summaryrefslogtreecommitdiff
path: root/drivers/gpu
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/Kconfig12
-rw-r--r--drivers/gpu/drm/Makefile109
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c48
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c140
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c48
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c59
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c272
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c156
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_sdma.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v6_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c282
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c318
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c206
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_v11_0.c58
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c139
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c60
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c84
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c90
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dma.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sienna_cichlid.c25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c21
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc21.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v6_1.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v6_7.c129
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v8_10.c78
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v8_7.c63
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h771
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm21
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_chardev.c26
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c47
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c6
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_migrate.c19
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_migrate.h2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c7
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_svm.c71
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_svm.h1
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c78
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c28
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c89
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c238
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c18
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c361
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_dpia.c33
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c53
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_stream.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c147
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_link.h11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_stream.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_aux.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c262
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c40
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c66
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c30
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c25
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c26
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c15
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.c47
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.c25
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c15
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c53
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.c57
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.h14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hpo_dp_link_encoder.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubp.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c318
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c59
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h23
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c90
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn321/dcn321_dio_link_encoder.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/Makefile4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c118
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c96
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c15
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c203
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c21
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_types.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h19
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h15
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/cursor_reg_cache.h99
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h9
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/resource.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_dp.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/virtual/virtual_link_hwss.c2
-rw-r--r--drivers/gpu/drm/amd/display/dmub/dmub_srv.h1
-rw-r--r--drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h135
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c1
-rw-r--r--drivers/gpu/drm/amd/display/include/link_service_types.h1
-rw-r--r--drivers/gpu/drm/amd/display/modules/color/color_gamma.c2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_offset.h4
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_sh_mask.h8
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/umc/umc_8_10_0_offset.h2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/umc/umc_8_10_0_sh_mask.h3
-rw-r--r--drivers/gpu/drm/amd/include/kgd_kfd_interface.h5
-rw-r--r--drivers/gpu/drm/amd/include/mes_v11_api_def.h3
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_pm.c4
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c5
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c5
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c2
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c2
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c25
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c4
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_4.h17
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c8
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c9
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c6
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c100
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c12
-rw-r--r--drivers/gpu/drm/armada/armada_gem.c8
-rw-r--r--drivers/gpu/drm/ast/Kconfig4
-rw-r--r--drivers/gpu/drm/ast/ast_drv.c6
-rw-r--r--drivers/gpu/drm/ast/ast_drv.h34
-rw-r--r--drivers/gpu/drm/ast/ast_main.c5
-rw-r--r--drivers/gpu/drm/ast/ast_mm.c14
-rw-r--r--drivers/gpu/drm/ast/ast_mode.c499
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511.h3
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511_drv.c28
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7533.c45
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix-anx6345.c4
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c4
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix_dp_core.c13
-rw-r--r--drivers/gpu/drm/bridge/analogix/anx7625.c4
-rw-r--r--drivers/gpu/drm/bridge/chrontel-ch7033.c4
-rw-r--r--drivers/gpu/drm/bridge/cros-ec-anx7688.c4
-rw-r--r--drivers/gpu/drm/bridge/ite-it6505.c168
-rw-r--r--drivers/gpu/drm/bridge/ite-it66121.c4
-rw-r--r--drivers/gpu/drm/bridge/lontium-lt8912b.c42
-rw-r--r--drivers/gpu/drm/bridge/lontium-lt9211.c4
-rw-r--r--drivers/gpu/drm/bridge/lontium-lt9611.c4
-rw-r--r--drivers/gpu/drm/bridge/lontium-lt9611uxc.c4
-rw-r--r--drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c8
-rw-r--r--drivers/gpu/drm/bridge/nxp-ptn3460.c4
-rw-r--r--drivers/gpu/drm/bridge/parade-ps8622.c4
-rw-r--r--drivers/gpu/drm/bridge/parade-ps8640.c9
-rw-r--r--drivers/gpu/drm/bridge/sii902x.c4
-rw-r--r--drivers/gpu/drm/bridge/sii9234.c4
-rw-r--r--drivers/gpu/drm/bridge/sil-sii8620.c4
-rw-r--r--drivers/gpu/drm/bridge/tc358767.c4
-rw-r--r--drivers/gpu/drm/bridge/tc358768.c4
-rw-r--r--drivers/gpu/drm/bridge/tc358775.c6
-rw-r--r--drivers/gpu/drm/bridge/ti-dlpc3433.c4
-rw-r--r--drivers/gpu/drm/bridge/ti-sn65dsi83.c4
-rw-r--r--drivers/gpu/drm/bridge/ti-tfp410.c4
-rw-r--r--drivers/gpu/drm/display/Makefile14
-rw-r--r--drivers/gpu/drm/display/drm_dp_helper.c22
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c60
-rw-r--r--drivers/gpu/drm/drm_atomic_state_helper.c6
-rw-r--r--drivers/gpu/drm/drm_atomic_uapi.c4
-rw-r--r--drivers/gpu/drm/drm_client.c10
-rw-r--r--drivers/gpu/drm/drm_connector.c6
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c39
-rw-r--r--drivers/gpu/drm/drm_crtc_internal.h15
-rw-r--r--drivers/gpu/drm/drm_debugfs.c12
-rw-r--r--drivers/gpu/drm/drm_edid.c714
-rw-r--r--drivers/gpu/drm/drm_edid_load.c109
-rw-r--r--drivers/gpu/drm/drm_format_helper.c11
-rw-r--r--drivers/gpu/drm/drm_framebuffer.c66
-rw-r--r--drivers/gpu/drm/drm_gem.c41
-rw-r--r--drivers/gpu/drm/drm_gem_dma_helper.c6
-rw-r--r--drivers/gpu/drm/drm_gem_framebuffer_helper.c6
-rw-r--r--drivers/gpu/drm/drm_gem_ttm_helper.c9
-rw-r--r--drivers/gpu/drm/drm_internal.h4
-rw-r--r--drivers/gpu/drm/drm_mipi_dsi.c2
-rw-r--r--drivers/gpu/drm/drm_mode_config.c3
-rw-r--r--drivers/gpu/drm/drm_modes.c18
-rw-r--r--drivers/gpu/drm/drm_plane_helper.c4
-rw-r--r--drivers/gpu/drm/drm_prime.c26
-rw-r--r--drivers/gpu/drm/drm_print.c48
-rw-r--r--drivers/gpu/drm/drm_probe_helper.c2
-rw-r--r--drivers/gpu/drm/drm_rect.c2
-rw-r--r--drivers/gpu/drm/drm_simple_kms_helper.c6
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dma.c1
-rw-r--r--drivers/gpu/drm/gma500/framebuffer.c6
-rw-r--r--drivers/gpu/drm/gma500/gem.c4
-rw-r--r--drivers/gpu/drm/gma500/gma_display.c11
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.h1
-rw-r--r--drivers/gpu/drm/gud/gud_connector.c2
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/Kconfig1
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c16
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h3
-rw-r--r--drivers/gpu/drm/hyperv/hyperv_drm_drv.c12
-rw-r--r--drivers/gpu/drm/hyperv/hyperv_drm_proto.c2
-rw-r--r--drivers/gpu/drm/i2c/ch7006_drv.c4
-rw-r--r--drivers/gpu/drm/i2c/sil164_drv.c7
-rw-r--r--drivers/gpu/drm/i2c/tda9950.c4
-rw-r--r--drivers/gpu/drm/i2c/tda998x_drv.c3
-rw-r--r--drivers/gpu/drm/i915/Makefile1
-rw-r--r--drivers/gpu/drm/i915/display/g4x_hdmi.c6
-rw-r--r--drivers/gpu/drm/i915/display/hsw_ips.c8
-rw-r--r--drivers/gpu/drm/i915/display/icl_dsi.c21
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic.c8
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic_plane.c58
-rw-r--r--drivers/gpu/drm/i915/display/intel_audio.c656
-rw-r--r--drivers/gpu/drm/i915/display/intel_audio_regs.h87
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.c10
-rw-r--r--drivers/gpu/drm/i915/display/intel_cdclk.c89
-rw-r--r--drivers/gpu/drm/i915/display/intel_color.c360
-rw-r--r--drivers/gpu/drm/i915/display/intel_color.h6
-rw-r--r--drivers/gpu/drm/i915/display/intel_combo_phy.c18
-rw-r--r--drivers/gpu/drm/i915/display/intel_connector.c18
-rw-r--r--drivers/gpu/drm/i915/display/intel_connector.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_crt.c9
-rw-r--r--drivers/gpu/drm/i915/display/intel_crtc.c8
-rw-r--r--drivers/gpu/drm/i915/display/intel_crtc_state_dump.c12
-rw-r--r--drivers/gpu/drm/i915/display/intel_cursor.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.c83
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c166
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.h7
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_core.h13
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_debugfs.c309
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power_well.c8
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_types.h27
-rw-r--r--drivers/gpu/drm/i915/display/intel_dkl_phy.c106
-rw-r--r--drivers/gpu/drm/i915/display/intel_dkl_phy.h24
-rw-r--r--drivers/gpu/drm/i915/display/intel_dkl_phy_regs.h204
-rw-r--r--drivers/gpu/drm/i915/display/intel_dmc.c22
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c106
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_hdcp.c20
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_mst.c46
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll_mgr.c125
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll_mgr.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_drrs.c112
-rw-r--r--drivers/gpu/drm/i915/display/intel_drrs.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_dvo.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb.c13
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbc.c33
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbdev.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdmi.c25
-rw-r--r--drivers/gpu/drm/i915/display/intel_hotplug.c216
-rw-r--r--drivers/gpu/drm/i915/display/intel_hotplug.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_lpe_audio.c5
-rw-r--r--drivers/gpu/drm/i915/display/intel_lvds.c48
-rw-r--r--drivers/gpu/drm/i915/display/intel_mg_phy_regs.h (renamed from drivers/gpu/drm/i915/display/intel_tc_phy_regs.h)6
-rw-r--r--drivers/gpu/drm/i915/display/intel_modeset_setup.c22
-rw-r--r--drivers/gpu/drm/i915/display/intel_modeset_verify.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_opregion.c7
-rw-r--r--drivers/gpu/drm/i915/display/intel_panel.c34
-rw-r--r--drivers/gpu/drm/i915/display/intel_panel.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_pipe_crc.c9
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.c114
-rw-r--r--drivers/gpu/drm/i915/display/intel_sdvo.c198
-rw-r--r--drivers/gpu/drm/i915/display/intel_tc.c78
-rw-r--r--drivers/gpu/drm/i915/display/intel_tc.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_tv.c87
-rw-r--r--drivers/gpu/drm/i915/display/skl_watermark.c12
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi.c22
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_clflush.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.c14
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_ttm.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_userptr.c14
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c16
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_regs.h3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_workarounds.c4
-rw-r--r--drivers/gpu/drm/i915/gvt/aperture_gm.c24
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.h46
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c4
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c205
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio_context.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/vgpu.c212
-rw-r--r--drivers/gpu/drm/i915/i915_driver.c25
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h6
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c6
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c270
-rw-r--r--drivers/gpu/drm/i915/i915_params.c12
-rw-r--r--drivers/gpu/drm/i915/i915_pci.c2
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h204
-rw-r--r--drivers/gpu/drm/i915/i915_scatterlist.c4
-rw-r--r--drivers/gpu/drm/i915/i915_sw_fence.c1
-rw-r--r--drivers/gpu/drm/i915/i915_sw_fence.h1
-rw-r--r--drivers/gpu/drm/i915/i915_ttm_buddy_manager.c12
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.c80
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.h17
-rw-r--r--drivers/gpu/drm/i915/intel_gvt_mmio_table.c3
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c262
-rw-r--r--drivers/gpu/drm/i915/intel_region_ttm.c2
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c11
-rw-r--r--drivers/gpu/drm/i915/intel_step.c25
-rw-r--r--drivers/gpu/drm/i915/intel_step.h28
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.h8
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_session.c1
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_tee.c3
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_selftest.c2
-rw-r--r--drivers/gpu/drm/i915/vlv_suspend.c28
-rw-r--r--drivers/gpu/drm/lima/lima_sched.c4
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dsi.c24
-rw-r--r--drivers/gpu/drm/meson/meson_drv.c14
-rw-r--r--drivers/gpu/drm/meson/meson_drv.h7
-rw-r--r--drivers/gpu/drm/meson/meson_encoder_cvbs.c13
-rw-r--r--drivers/gpu/drm/meson/meson_encoder_cvbs.h1
-rw-r--r--drivers/gpu/drm/meson/meson_encoder_hdmi.c13
-rw-r--r--drivers/gpu/drm/meson/meson_encoder_hdmi.h1
-rw-r--r--drivers/gpu/drm/meson/meson_plane.c2
-rw-r--r--drivers/gpu/drm/meson/meson_viu.c2
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.c6
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_g200se.c3
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c11
-rw-r--r--drivers/gpu/drm/msm/msm_fbdev.c2
-rw-r--r--drivers/gpu/drm/mxsfb/lcdif_kms.c253
-rw-r--r--drivers/gpu/drm/mxsfb/lcdif_regs.h41
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c13
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo0039.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo5039.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo74c1.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo85b5.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo9039.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo90b5.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_boa0b5.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dmem.c110
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c13
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ttm.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fbcon.c6
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fbdev.c2
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c4
-rw-r--r--drivers/gpu/drm/panel/panel-edp.c33
-rw-r--r--drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c4
-rw-r--r--drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c4
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-db7430.c7
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c44
-rw-r--r--drivers/gpu/drm/panel/panel-sitronix-st7701.c70
-rw-r--r--drivers/gpu/drm/panel/panel-tpo-tpg110.c7
-rw-r--r--drivers/gpu/drm/panel/panel-widechips-ws2401.c7
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_devfreq.c11
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_dump.c40
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_perfcnt.c6
-rw-r--r--drivers/gpu/drm/qxl/qxl_cmd.c8
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.h2
-rw-r--r--drivers/gpu/drm/qxl/qxl_kms.c4
-rw-r--r--drivers/gpu/drm/qxl/qxl_object.c17
-rw-r--r--drivers/gpu/drm/qxl/qxl_prime.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_trace.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c4
-rw-r--r--drivers/gpu/drm/rockchip/cdn-dp-core.c5
-rw-r--r--drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c8
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_fb.c43
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_fb.h6
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.c4
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop2.c6
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_lvds.c10
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_vop2_reg.c2
-rw-r--r--drivers/gpu/drm/scheduler/sched_entity.c29
-rw-r--r--drivers/gpu/drm/scheduler/sched_main.c100
-rw-r--r--drivers/gpu/drm/solomon/ssd130x-i2c.c4
-rw-r--r--drivers/gpu/drm/solomon/ssd130x.c50
-rw-r--r--drivers/gpu/drm/tegra/fb.c1
-rw-r--r--drivers/gpu/drm/tegra/gem.c17
-rw-r--r--drivers/gpu/drm/tests/Makefile14
-rw-r--r--drivers/gpu/drm/tests/drm_buddy_test.c8
-rw-r--r--drivers/gpu/drm/tests/drm_dp_mst_helper_test.c445
-rw-r--r--drivers/gpu/drm/tests/drm_format_helper_test.c190
-rw-r--r--drivers/gpu/drm/tests/drm_mm_test.c10
-rw-r--r--drivers/gpu/drm/tests/drm_plane_helper_test.c467
-rw-r--r--drivers/gpu/drm/tiny/Kconfig13
-rw-r--r--drivers/gpu/drm/tiny/Makefile1
-rw-r--r--drivers/gpu/drm/tiny/bochs.c1
-rw-r--r--drivers/gpu/drm/tiny/ofdrm.c1424
-rw-r--r--drivers/gpu/drm/tiny/simpledrm.c64
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c3
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c23
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c4
-rw-r--r--drivers/gpu/drm/ttm/ttm_range_manager.c3
-rw-r--r--drivers/gpu/drm/ttm/ttm_resource.c14
-rw-r--r--drivers/gpu/drm/udl/Makefile2
-rw-r--r--drivers/gpu/drm/udl/udl_connector.c139
-rw-r--r--drivers/gpu/drm/udl/udl_connector.h15
-rw-r--r--drivers/gpu/drm/udl/udl_drv.h42
-rw-r--r--drivers/gpu/drm/udl/udl_modeset.c566
-rw-r--r--drivers/gpu/drm/udl/udl_proto.h68
-rw-r--r--drivers/gpu/drm/udl/udl_transfer.c7
-rw-r--r--drivers/gpu/drm/vc4/vc4_bo.c4
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.c1
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.h16
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.c56
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.h8
-rw-r--r--drivers/gpu/drm/vc4/vc4_hvs.c26
-rw-r--r--drivers/gpu/drm/vc4/vc4_kms.c13
-rw-r--r--drivers/gpu/drm/vc4/vc4_vec.c8
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_ioctl.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/Kconfig7
-rw-r--r--drivers/gpu/drm/vmwgfx/Makefile4
-rw-r--r--drivers/gpu/drm/vmwgfx/ttm_object.c123
-rw-r--r--drivers/gpu/drm/vmwgfx/ttm_object.h20
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_blit.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_bo.c22
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c62
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c31
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c129
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h53
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c17
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fb.c831
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_hashtab.c199
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_hashtab.h83
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c669
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.h31
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_mksstat.h2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_msg.c59
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c31
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c26
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_surface.c14
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_validation.c55
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_validation.h26
-rw-r--r--drivers/gpu/drm/xlnx/Makefile2
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_disp.c646
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_disp.h48
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_dp.c476
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_dp.h4
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_dpsub.c300
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_dpsub.h46
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_kms.c534
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_kms.h46
510 files changed, 13637 insertions, 10486 deletions
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 198ba846d34b..34f5a092c99e 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -51,6 +51,18 @@ config DRM_DEBUG_MM
If in doubt, say "N".
+config DRM_USE_DYNAMIC_DEBUG
+ bool "use dynamic debug to implement drm.debug"
+ default y
+ depends on DRM
+ depends on DYNAMIC_DEBUG || DYNAMIC_DEBUG_CORE
+ depends on JUMP_LABEL
+ help
+ Use dynamic-debug to avoid drm_debug_enabled() runtime overheads.
+ Due to callsite counts in DRM drivers (~4k in amdgpu) and 56
+ bytes per callsite, the .data costs can be substantial, and
+ are therefore configurable.
+
config DRM_KUNIT_TEST
tristate "KUnit tests for DRM" if !KUNIT_ALL_TESTS
depends on DRM && KUNIT
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 25d0ba310509..6e55c47288e4 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -3,32 +3,73 @@
# Makefile for the drm device driver. This driver provides support for the
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
-drm-y := drm_aperture.o drm_auth.o drm_cache.o \
- drm_file.o drm_gem.o drm_ioctl.o \
- drm_drv.o \
- drm_sysfs.o drm_mm.o \
- drm_crtc.o drm_fourcc.o drm_modes.o drm_edid.o drm_displayid.o \
- drm_trace_points.o drm_prime.o \
- drm_vma_manager.o \
- drm_modeset_lock.o drm_atomic.o drm_bridge.o \
- drm_framebuffer.o drm_connector.o drm_blend.o \
- drm_encoder.o drm_mode_object.o drm_property.o \
- drm_plane.o drm_color_mgmt.o drm_print.o \
- drm_dumb_buffers.o drm_mode_config.o drm_vblank.o \
- drm_syncobj.o drm_lease.o drm_writeback.o drm_client.o \
- drm_client_modeset.o drm_atomic_uapi.o \
- drm_managed.o drm_vblank_work.o
-drm-$(CONFIG_DRM_LEGACY) += drm_agpsupport.o drm_bufs.o drm_context.o drm_dma.o \
- drm_hashtab.o drm_irq.o drm_legacy_misc.o drm_lock.o \
- drm_memory.o drm_scatter.o drm_vm.o
+CFLAGS-$(CONFIG_DRM_USE_DYNAMIC_DEBUG) += -DDYNAMIC_DEBUG_MODULE
+
+drm-y := \
+ drm_aperture.o \
+ drm_atomic.o \
+ drm_atomic_uapi.o \
+ drm_auth.o \
+ drm_blend.o \
+ drm_bridge.o \
+ drm_cache.o \
+ drm_client.o \
+ drm_client_modeset.o \
+ drm_color_mgmt.o \
+ drm_connector.o \
+ drm_crtc.o \
+ drm_displayid.o \
+ drm_drv.o \
+ drm_dumb_buffers.o \
+ drm_edid.o \
+ drm_encoder.o \
+ drm_file.o \
+ drm_fourcc.o \
+ drm_framebuffer.o \
+ drm_gem.o \
+ drm_ioctl.o \
+ drm_lease.o \
+ drm_managed.o \
+ drm_mm.o \
+ drm_mode_config.o \
+ drm_mode_object.o \
+ drm_modes.o \
+ drm_modeset_lock.o \
+ drm_plane.o \
+ drm_prime.o \
+ drm_print.o \
+ drm_property.o \
+ drm_syncobj.o \
+ drm_sysfs.o \
+ drm_trace_points.o \
+ drm_vblank.o \
+ drm_vblank_work.o \
+ drm_vma_manager.o \
+ drm_writeback.o
+drm-$(CONFIG_DRM_LEGACY) += \
+ drm_agpsupport.o \
+ drm_bufs.o \
+ drm_context.o \
+ drm_dma.o \
+ drm_hashtab.o \
+ drm_irq.o \
+ drm_legacy_misc.o \
+ drm_lock.o \
+ drm_memory.o \
+ drm_scatter.o \
+ drm_vm.o
drm-$(CONFIG_DRM_LIB_RANDOM) += lib/drm_random.o
drm-$(CONFIG_COMPAT) += drm_ioc32.o
drm-$(CONFIG_DRM_PANEL) += drm_panel.o
drm-$(CONFIG_OF) += drm_of.o
drm-$(CONFIG_PCI) += drm_pci.o
-drm-$(CONFIG_DEBUG_FS) += drm_debugfs.o drm_debugfs_crc.o
+drm-$(CONFIG_DEBUG_FS) += \
+ drm_debugfs.o \
+ drm_debugfs_crc.o
drm-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o
-drm-$(CONFIG_DRM_PRIVACY_SCREEN) += drm_privacy_screen.o drm_privacy_screen_x86.o
+drm-$(CONFIG_DRM_PRIVACY_SCREEN) += \
+ drm_privacy_screen.o \
+ drm_privacy_screen_x86.o
obj-$(CONFIG_DRM) += drm.o
obj-$(CONFIG_DRM_NOMODESET) += drm_nomodeset.o
@@ -57,16 +98,24 @@ obj-$(CONFIG_DRM_TTM_HELPER) += drm_ttm_helper.o
# Modesetting helpers
#
-drm_kms_helper-y := drm_bridge_connector.o drm_crtc_helper.o \
- drm_encoder_slave.o drm_flip_work.o \
- drm_probe_helper.o \
- drm_plane_helper.o drm_atomic_helper.o \
- drm_kms_helper_common.o \
- drm_simple_kms_helper.o drm_modeset_helper.o \
- drm_gem_atomic_helper.o \
- drm_gem_framebuffer_helper.o \
- drm_atomic_state_helper.o drm_damage_helper.o \
- drm_format_helper.o drm_self_refresh_helper.o drm_rect.o
+drm_kms_helper-y := \
+ drm_atomic_helper.o \
+ drm_atomic_state_helper.o \
+ drm_bridge_connector.o \
+ drm_crtc_helper.o \
+ drm_damage_helper.o \
+ drm_encoder_slave.o \
+ drm_flip_work.o \
+ drm_format_helper.o \
+ drm_gem_atomic_helper.o \
+ drm_gem_framebuffer_helper.o \
+ drm_kms_helper_common.o \
+ drm_modeset_helper.o \
+ drm_plane_helper.o \
+ drm_probe_helper.o \
+ drm_rect.o \
+ drm_self_refresh_helper.o \
+ drm_simple_kms_helper.o
drm_kms_helper-$(CONFIG_DRM_PANEL_BRIDGE) += bridge/panel.o
drm_kms_helper-$(CONFIG_DRM_FBDEV_EMULATION) += drm_fb_helper.o
obj-$(CONFIG_DRM_KMS_HELPER) += drm_kms_helper.o
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index ae9371b172e3..8639a4f9c6e8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -274,9 +274,6 @@ extern int amdgpu_vcnfw_log;
#define AMDGPU_RESET_VCE (1 << 13)
#define AMDGPU_RESET_VCE1 (1 << 14)
-#define AMDGPU_RESET_LEVEL_SOFT_RECOVERY (1 << 0)
-#define AMDGPU_RESET_LEVEL_MODE2 (1 << 1)
-
/* max cursor sizes (in pixels) */
#define CIK_CURSOR_WIDTH 128
#define CIK_CURSOR_HEIGHT 128
@@ -1065,7 +1062,6 @@ struct amdgpu_device {
struct work_struct reset_work;
- uint32_t amdgpu_reset_level_mask;
bool job_hang;
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
index 3da27436922c..b14800ac179e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
@@ -1053,6 +1053,10 @@ bool amdgpu_acpi_should_gpu_reset(struct amdgpu_device *adev)
{
if (adev->flags & AMD_IS_APU)
return false;
+
+ if (amdgpu_sriov_vf(adev))
+ return false;
+
return pm_suspend_target_state != PM_SUSPEND_TO_IDLE;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index c7b1a2dfde13..0561812aa0a4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -75,9 +75,6 @@ void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev)
return;
adev->kfd.dev = kgd2kfd_probe(adev, vf);
-
- if (adev->kfd.dev)
- amdgpu_amdkfd_total_mem_size += adev->gmc.real_vram_size;
}
/**
@@ -137,7 +134,6 @@ static void amdgpu_amdkfd_reset_work(struct work_struct *work)
reset_context.method = AMD_RESET_METHOD_NONE;
reset_context.reset_req_dev = adev;
clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
- clear_bit(AMDGPU_SKIP_MODE2_RESET, &reset_context.flags);
amdgpu_device_gpu_recover(adev, NULL, &reset_context);
}
@@ -201,6 +197,8 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
adev->kfd.init_complete = kgd2kfd_device_init(adev->kfd.dev,
adev_to_drm(adev), &gpu_resources);
+ amdgpu_amdkfd_total_mem_size += adev->gmc.real_vram_size;
+
INIT_WORK(&adev->kfd.reset_work, amdgpu_amdkfd_reset_work);
}
}
@@ -210,6 +208,7 @@ void amdgpu_amdkfd_device_fini_sw(struct amdgpu_device *adev)
if (adev->kfd.dev) {
kgd2kfd_device_exit(adev->kfd.dev);
adev->kfd.dev = NULL;
+ amdgpu_amdkfd_total_mem_size -= adev->gmc.real_vram_size;
}
}
@@ -756,11 +755,7 @@ void amdgpu_amdkfd_ras_poison_consumption_handler(struct amdgpu_device *adev, bo
{
struct ras_err_data err_data = {0, 0, 0, NULL};
- /* CPU MCA will handle page retirement if connected_to_cpu is 1 */
- if (!adev->gmc.xgmi.connected_to_cpu)
- amdgpu_umc_poison_handler(adev, &err_data, reset);
- else if (reset)
- amdgpu_amdkfd_gpu_reset(adev);
+ amdgpu_umc_poison_handler(adev, &err_data, reset);
}
bool amdgpu_amdkfd_ras_query_utcl2_poison_status(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c
index 0b0a72ca5695..7e80caa05060 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c
@@ -111,7 +111,7 @@ static int init_interrupts_v11(struct amdgpu_device *adev, uint32_t pipe_id)
lock_srbm(adev, mec, pipe, 0, 0);
- WREG32(SOC15_REG_OFFSET(GC, 0, regCPC_INT_CNTL),
+ WREG32_SOC15(GC, 0, regCPC_INT_CNTL,
CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK |
CP_INT_CNTL_RING0__OPCODE_ERROR_INT_ENABLE_MASK);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
index 6066aebf491c..de61a85c4b02 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -1954,8 +1954,6 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev)
return PTR_ERR(ent);
}
- debugfs_create_u32("amdgpu_reset_level", 0600, root, &adev->amdgpu_reset_level_mask);
-
/* Register debugfs entries for amdgpu_ttm */
amdgpu_ttm_debugfs_init(adev);
amdgpu_debugfs_pm_init(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index c04ea7f1e819..e0445e8cc342 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -2928,6 +2928,14 @@ static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
+ /*
+ * Per PMFW team's suggestion, driver needs to handle gfxoff
+ * and df cstate features disablement for gpu reset(e.g. Mode1Reset)
+ * scenario. Add the missing df cstate disablement here.
+ */
+ if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_DISALLOW))
+ dev_warn(adev->dev, "Failed to disallow df cstate");
+
for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
if (!adev->ip_blocks[i].status.valid)
continue;
@@ -3154,7 +3162,8 @@ static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev)
continue;
if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
- adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
+ adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
+ (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP && amdgpu_sriov_vf(adev))) {
r = adev->ip_blocks[i].version->funcs->resume(adev);
if (r) {
@@ -4069,12 +4078,20 @@ static void amdgpu_device_evict_resources(struct amdgpu_device *adev)
int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
{
struct amdgpu_device *adev = drm_to_adev(dev);
+ int r = 0;
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
adev->in_suspend = true;
+ if (amdgpu_sriov_vf(adev)) {
+ amdgpu_virt_fini_data_exchange(adev);
+ r = amdgpu_virt_request_full_gpu(adev, false);
+ if (r)
+ return r;
+ }
+
if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D3))
DRM_WARN("smart shift update failed\n");
@@ -4098,6 +4115,9 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
amdgpu_device_ip_suspend_phase2(adev);
+ if (amdgpu_sriov_vf(adev))
+ amdgpu_virt_release_full_gpu(adev, false);
+
return 0;
}
@@ -4116,6 +4136,12 @@ int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
struct amdgpu_device *adev = drm_to_adev(dev);
int r = 0;
+ if (amdgpu_sriov_vf(adev)) {
+ r = amdgpu_virt_request_full_gpu(adev, true);
+ if (r)
+ return r;
+ }
+
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
@@ -4130,6 +4156,13 @@ int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
}
r = amdgpu_device_ip_resume(adev);
+
+ /* no matter what r is, always need to properly release full GPU */
+ if (amdgpu_sriov_vf(adev)) {
+ amdgpu_virt_init_data_exchange(adev);
+ amdgpu_virt_release_full_gpu(adev, true);
+ }
+
if (r) {
dev_err(adev->dev, "amdgpu_device_ip_resume failed (%d).\n", r);
return r;
@@ -5185,7 +5218,6 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
reset_context->job = job;
reset_context->hive = hive;
-
/*
* Build list of devices to reset.
* In case we are in XGMI hive mode, resort the device list
@@ -5312,11 +5344,8 @@ retry: /* Rest of adevs pre asic reset from XGMI hive. */
amdgpu_ras_resume(adev);
} else {
r = amdgpu_do_asic_reset(device_list_handle, reset_context);
- if (r && r == -EAGAIN) {
- set_bit(AMDGPU_SKIP_MODE2_RESET, &reset_context->flags);
- adev->asic_reset_res = 0;
+ if (r && r == -EAGAIN)
goto retry;
- }
if (!r && gpu_reset_for_dev_remove)
goto recover_end;
@@ -5576,9 +5605,9 @@ bool amdgpu_device_is_peer_accessible(struct amdgpu_device *adev,
~*peer_adev->dev->dma_mask : ~((1ULL << 32) - 1);
resource_size_t aper_limit =
adev->gmc.aper_base + adev->gmc.aper_size - 1;
- bool p2p_access = !adev->gmc.xgmi.connected_to_cpu &&
- !(pci_p2pdma_distance_many(adev->pdev,
- &peer_adev->dev, 1, true) < 0);
+ bool p2p_access =
+ !adev->gmc.xgmi.connected_to_cpu &&
+ !(pci_p2pdma_distance(adev->pdev, peer_adev->dev, false) < 0);
return pcie_p2p && p2p_access && (adev->gmc.visible_vram_size &&
adev->gmc.real_vram_size == adev->gmc.visible_vram_size &&
@@ -5752,7 +5781,6 @@ pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev)
reset_context.reset_req_dev = adev;
set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
set_bit(AMDGPU_SKIP_HW_RESET, &reset_context.flags);
- set_bit(AMDGPU_SKIP_MODE2_RESET, &reset_context.flags);
adev->no_hw_access = true;
r = amdgpu_device_pre_asic_reset(adev, &reset_context);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
index 9fa2a5ceb77d..3993e6134914 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
@@ -229,7 +229,7 @@ static int amdgpu_discovery_read_binary_from_file(struct amdgpu_device *adev, ui
return r;
}
- memcpy((u8 *)binary, (u8 *)fw->data, adev->mman.discovery_tmr_size);
+ memcpy((u8 *)binary, (u8 *)fw->data, fw->size);
release_firmware(fw);
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index 23998f727c7f..1a06b8d724f3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -38,8 +38,6 @@
#include <linux/pci.h>
#include <linux/pm_runtime.h>
#include <drm/drm_crtc_helper.h>
-#include <drm/drm_damage_helper.h>
-#include <drm/drm_drv.h>
#include <drm/drm_edid.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_fb_helper.h>
@@ -500,12 +498,6 @@ static const struct drm_framebuffer_funcs amdgpu_fb_funcs = {
.create_handle = drm_gem_fb_create_handle,
};
-static const struct drm_framebuffer_funcs amdgpu_fb_funcs_atomic = {
- .destroy = drm_gem_fb_destroy,
- .create_handle = drm_gem_fb_create_handle,
- .dirty = drm_atomic_helper_dirtyfb,
-};
-
uint32_t amdgpu_display_supported_domains(struct amdgpu_device *adev,
uint64_t bo_flags)
{
@@ -1108,10 +1100,8 @@ static int amdgpu_display_gem_fb_verify_and_init(struct drm_device *dev,
if (ret)
goto err;
- if (drm_drv_uses_atomic_modeset(dev))
- ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs_atomic);
- else
- ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs);
+ ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs);
+
if (ret)
goto err;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
index 782cbca37538..7bd8e33b14be 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
@@ -58,7 +58,7 @@ static int amdgpu_dma_buf_attach(struct dma_buf *dmabuf,
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
int r;
- if (pci_p2pdma_distance_many(adev->pdev, &attach->dev, 1, true) < 0)
+ if (pci_p2pdma_distance(adev->pdev, attach->dev, false) < 0)
attach->peer2peer = false;
r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 16f6a313335e..3c9fecdd6b2f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -38,6 +38,8 @@
#include <linux/mmu_notifier.h>
#include <linux/suspend.h>
#include <linux/cc_platform.h>
+#include <linux/fb.h>
+#include <linux/dynamic_debug.h>
#include "amdgpu.h"
#include "amdgpu_irq.h"
@@ -186,6 +188,18 @@ int amdgpu_vcnfw_log;
static void amdgpu_drv_delayed_reset_work_handler(struct work_struct *work);
+DECLARE_DYNDBG_CLASSMAP(drm_debug_classes, DD_CLASS_TYPE_DISJOINT_BITS, 0,
+ "DRM_UT_CORE",
+ "DRM_UT_DRIVER",
+ "DRM_UT_KMS",
+ "DRM_UT_PRIME",
+ "DRM_UT_ATOMIC",
+ "DRM_UT_VBL",
+ "DRM_UT_STATE",
+ "DRM_UT_LEASE",
+ "DRM_UT_DP",
+ "DRM_UT_DRMRES");
+
struct amdgpu_mgpu_info mgpu_info = {
.mutex = __MUTEX_INITIALIZER(mgpu_info.mutex),
.delayed_reset_work = __DELAYED_WORK_INITIALIZER(
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index 8adeb7469f1e..d0d99ed607dd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -400,7 +400,6 @@ unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring)
/* We are not protected by ring lock when reading the last sequence
* but it's ok to report slightly wrong fence count here.
*/
- amdgpu_fence_process(ring);
emitted = 0x100000000ull;
emitted -= atomic_read(&ring->fence_drv.last_seq);
emitted += READ_ONCE(ring->fence_drv.sync_seq);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index ceb91469958a..9546adc8a76f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -23,6 +23,7 @@
*
*/
+#include <linux/firmware.h>
#include "amdgpu.h"
#include "amdgpu_gfx.h"
#include "amdgpu_rlc.h"
@@ -865,3 +866,142 @@ int amdgpu_gfx_get_num_kcq(struct amdgpu_device *adev)
}
return amdgpu_num_kcq;
}
+
+void amdgpu_gfx_cp_init_microcode(struct amdgpu_device *adev,
+ uint32_t ucode_id)
+{
+ const struct gfx_firmware_header_v1_0 *cp_hdr;
+ const struct gfx_firmware_header_v2_0 *cp_hdr_v2_0;
+ struct amdgpu_firmware_info *info = NULL;
+ const struct firmware *ucode_fw;
+ unsigned int fw_size;
+
+ switch (ucode_id) {
+ case AMDGPU_UCODE_ID_CP_PFP:
+ cp_hdr = (const struct gfx_firmware_header_v1_0 *)
+ adev->gfx.pfp_fw->data;
+ adev->gfx.pfp_fw_version =
+ le32_to_cpu(cp_hdr->header.ucode_version);
+ adev->gfx.pfp_feature_version =
+ le32_to_cpu(cp_hdr->ucode_feature_version);
+ ucode_fw = adev->gfx.pfp_fw;
+ fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes);
+ break;
+ case AMDGPU_UCODE_ID_CP_RS64_PFP:
+ cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *)
+ adev->gfx.pfp_fw->data;
+ adev->gfx.pfp_fw_version =
+ le32_to_cpu(cp_hdr_v2_0->header.ucode_version);
+ adev->gfx.pfp_feature_version =
+ le32_to_cpu(cp_hdr_v2_0->ucode_feature_version);
+ ucode_fw = adev->gfx.pfp_fw;
+ fw_size = le32_to_cpu(cp_hdr_v2_0->ucode_size_bytes);
+ break;
+ case AMDGPU_UCODE_ID_CP_RS64_PFP_P0_STACK:
+ case AMDGPU_UCODE_ID_CP_RS64_PFP_P1_STACK:
+ cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *)
+ adev->gfx.pfp_fw->data;
+ ucode_fw = adev->gfx.pfp_fw;
+ fw_size = le32_to_cpu(cp_hdr_v2_0->data_size_bytes);
+ break;
+ case AMDGPU_UCODE_ID_CP_ME:
+ cp_hdr = (const struct gfx_firmware_header_v1_0 *)
+ adev->gfx.me_fw->data;
+ adev->gfx.me_fw_version =
+ le32_to_cpu(cp_hdr->header.ucode_version);
+ adev->gfx.me_feature_version =
+ le32_to_cpu(cp_hdr->ucode_feature_version);
+ ucode_fw = adev->gfx.me_fw;
+ fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes);
+ break;
+ case AMDGPU_UCODE_ID_CP_RS64_ME:
+ cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *)
+ adev->gfx.me_fw->data;
+ adev->gfx.me_fw_version =
+ le32_to_cpu(cp_hdr_v2_0->header.ucode_version);
+ adev->gfx.me_feature_version =
+ le32_to_cpu(cp_hdr_v2_0->ucode_feature_version);
+ ucode_fw = adev->gfx.me_fw;
+ fw_size = le32_to_cpu(cp_hdr_v2_0->ucode_size_bytes);
+ break;
+ case AMDGPU_UCODE_ID_CP_RS64_ME_P0_STACK:
+ case AMDGPU_UCODE_ID_CP_RS64_ME_P1_STACK:
+ cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *)
+ adev->gfx.me_fw->data;
+ ucode_fw = adev->gfx.me_fw;
+ fw_size = le32_to_cpu(cp_hdr_v2_0->data_size_bytes);
+ break;
+ case AMDGPU_UCODE_ID_CP_CE:
+ cp_hdr = (const struct gfx_firmware_header_v1_0 *)
+ adev->gfx.ce_fw->data;
+ adev->gfx.ce_fw_version =
+ le32_to_cpu(cp_hdr->header.ucode_version);
+ adev->gfx.ce_feature_version =
+ le32_to_cpu(cp_hdr->ucode_feature_version);
+ ucode_fw = adev->gfx.ce_fw;
+ fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes);
+ break;
+ case AMDGPU_UCODE_ID_CP_MEC1:
+ cp_hdr = (const struct gfx_firmware_header_v1_0 *)
+ adev->gfx.mec_fw->data;
+ adev->gfx.mec_fw_version =
+ le32_to_cpu(cp_hdr->header.ucode_version);
+ adev->gfx.mec_feature_version =
+ le32_to_cpu(cp_hdr->ucode_feature_version);
+ ucode_fw = adev->gfx.mec_fw;
+ fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes) -
+ le32_to_cpu(cp_hdr->jt_size) * 4;
+ break;
+ case AMDGPU_UCODE_ID_CP_MEC1_JT:
+ cp_hdr = (const struct gfx_firmware_header_v1_0 *)
+ adev->gfx.mec_fw->data;
+ ucode_fw = adev->gfx.mec_fw;
+ fw_size = le32_to_cpu(cp_hdr->jt_size) * 4;
+ break;
+ case AMDGPU_UCODE_ID_CP_MEC2:
+ cp_hdr = (const struct gfx_firmware_header_v1_0 *)
+ adev->gfx.mec2_fw->data;
+ adev->gfx.mec2_fw_version =
+ le32_to_cpu(cp_hdr->header.ucode_version);
+ adev->gfx.mec2_feature_version =
+ le32_to_cpu(cp_hdr->ucode_feature_version);
+ ucode_fw = adev->gfx.mec2_fw;
+ fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes) -
+ le32_to_cpu(cp_hdr->jt_size) * 4;
+ break;
+ case AMDGPU_UCODE_ID_CP_MEC2_JT:
+ cp_hdr = (const struct gfx_firmware_header_v1_0 *)
+ adev->gfx.mec2_fw->data;
+ ucode_fw = adev->gfx.mec2_fw;
+ fw_size = le32_to_cpu(cp_hdr->jt_size) * 4;
+ break;
+ case AMDGPU_UCODE_ID_CP_RS64_MEC:
+ cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *)
+ adev->gfx.mec_fw->data;
+ adev->gfx.mec_fw_version =
+ le32_to_cpu(cp_hdr_v2_0->header.ucode_version);
+ adev->gfx.mec_feature_version =
+ le32_to_cpu(cp_hdr_v2_0->ucode_feature_version);
+ ucode_fw = adev->gfx.mec_fw;
+ fw_size = le32_to_cpu(cp_hdr_v2_0->ucode_size_bytes);
+ break;
+ case AMDGPU_UCODE_ID_CP_RS64_MEC_P0_STACK:
+ case AMDGPU_UCODE_ID_CP_RS64_MEC_P1_STACK:
+ case AMDGPU_UCODE_ID_CP_RS64_MEC_P2_STACK:
+ case AMDGPU_UCODE_ID_CP_RS64_MEC_P3_STACK:
+ cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *)
+ adev->gfx.mec_fw->data;
+ ucode_fw = adev->gfx.mec_fw;
+ fw_size = le32_to_cpu(cp_hdr_v2_0->data_size_bytes);
+ break;
+ default:
+ break;
+ }
+
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+ info = &adev->firmware.ucode[ucode_id];
+ info->ucode_id = ucode_id;
+ info->fw = ucode_fw;
+ adev->firmware.fw_size += ALIGN(fw_size, PAGE_SIZE);
+ }
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
index b91ab919ee70..832b3807f1d6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
@@ -426,4 +426,6 @@ int amdgpu_gfx_cp_ecc_error_irq(struct amdgpu_device *adev,
uint32_t amdgpu_kiq_rreg(struct amdgpu_device *adev, uint32_t reg);
void amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v);
int amdgpu_gfx_get_num_kcq(struct amdgpu_device *adev);
+void amdgpu_gfx_cp_init_microcode(struct amdgpu_device *adev, uint32_t ucode_id);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
index aebc384531ac..34233a74248c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
@@ -572,45 +572,15 @@ void amdgpu_gmc_tmz_set(struct amdgpu_device *adev)
void amdgpu_gmc_noretry_set(struct amdgpu_device *adev)
{
struct amdgpu_gmc *gmc = &adev->gmc;
-
- switch (adev->ip_versions[GC_HWIP][0]) {
- case IP_VERSION(9, 0, 1):
- case IP_VERSION(9, 3, 0):
- case IP_VERSION(9, 4, 0):
- case IP_VERSION(9, 4, 1):
- case IP_VERSION(9, 4, 2):
- case IP_VERSION(10, 3, 3):
- case IP_VERSION(10, 3, 4):
- case IP_VERSION(10, 3, 5):
- case IP_VERSION(10, 3, 6):
- case IP_VERSION(10, 3, 7):
- /*
- * noretry = 0 will cause kfd page fault tests fail
- * for some ASICs, so set default to 1 for these ASICs.
- */
- if (amdgpu_noretry == -1)
- gmc->noretry = 1;
- else
- gmc->noretry = amdgpu_noretry;
- break;
- default:
- /* Raven currently has issues with noretry
- * regardless of what we decide for other
- * asics, we should leave raven with
- * noretry = 0 until we root cause the
- * issues.
- *
- * default this to 0 for now, but we may want
- * to change this in the future for certain
- * GPUs as it can increase performance in
- * certain cases.
- */
- if (amdgpu_noretry == -1)
- gmc->noretry = 0;
- else
- gmc->noretry = amdgpu_noretry;
- break;
- }
+ uint32_t gc_ver = adev->ip_versions[GC_HWIP][0];
+ bool noretry_default = (gc_ver == IP_VERSION(9, 0, 1) ||
+ gc_ver == IP_VERSION(9, 3, 0) ||
+ gc_ver == IP_VERSION(9, 4, 0) ||
+ gc_ver == IP_VERSION(9, 4, 1) ||
+ gc_ver == IP_VERSION(9, 4, 2) ||
+ gc_ver >= IP_VERSION(10, 3, 0));
+
+ gmc->noretry = (amdgpu_noretry == -1) ? noretry_default : amdgpu_noretry;
}
void amdgpu_gmc_set_vm_fault_masks(struct amdgpu_device *adev, int hub_type,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
index 1f3302aebeff..44367f03316f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
@@ -144,7 +144,7 @@ static int amdgpu_gtt_mgr_new(struct ttm_resource_manager *man,
node->base.start = node->mm_nodes[0].start;
} else {
node->mm_nodes[0].start = 0;
- node->mm_nodes[0].size = node->base.num_pages;
+ node->mm_nodes[0].size = PFN_UP(node->base.size);
node->base.start = AMDGPU_BO_INVALID_OFFSET;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index 46c99331d7f1..cd968e781077 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -72,7 +72,6 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job)
reset_context.method = AMD_RESET_METHOD_NONE;
reset_context.reset_req_dev = adev;
clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
- clear_bit(AMDGPU_SKIP_MODE2_RESET, &reset_context.flags);
r = amdgpu_device_gpu_recover(ring->adev, job, &reset_context);
if (r)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
index 7b46f6bf4187..ad980f4b66e1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
@@ -222,6 +222,8 @@ struct mes_add_queue_input {
uint64_t tba_addr;
uint64_t tma_addr;
uint32_t is_kfd_process;
+ uint32_t is_aql_queue;
+ uint32_t queue_size;
};
struct mes_remove_queue_input {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index e6a9b9fc9e0b..974e85d8b6cc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -542,6 +542,7 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
/* GWS and OA don't need any alignment. */
page_align = bp->byte_align;
size <<= PAGE_SHIFT;
+
} else if (bp->domain & AMDGPU_GEM_DOMAIN_GDS) {
/* Both size and alignment must be a multiple of 4. */
page_align = ALIGN(bp->byte_align, 4);
@@ -688,13 +689,16 @@ int amdgpu_bo_create_vm(struct amdgpu_device *adev,
* num of amdgpu_vm_pt entries.
*/
BUG_ON(bp->bo_ptr_size < sizeof(struct amdgpu_bo_vm));
- bp->destroy = &amdgpu_bo_vm_destroy;
r = amdgpu_bo_create(adev, bp, &bo_ptr);
if (r)
return r;
*vmbo_ptr = to_amdgpu_bo_vm(bo_ptr);
INIT_LIST_HEAD(&(*vmbo_ptr)->shadow_list);
+ /* Set destroy callback to amdgpu_bo_vm_destroy after vmbo->shadow_list
+ * is initialized.
+ */
+ bo_ptr->tbo.destroy = &amdgpu_bo_vm_destroy;
return r;
}
@@ -773,7 +777,7 @@ int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr)
return 0;
}
- r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.resource->num_pages, &bo->kmap);
+ r = ttm_bo_kmap(&bo->tbo, 0, PFN_UP(bo->tbo.base.size), &bo->kmap);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
index e55f106621ef..a4b47e1bd111 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
@@ -1950,7 +1950,6 @@ static void amdgpu_ras_do_recovery(struct work_struct *work)
reset_context.method = AMD_RESET_METHOD_NONE;
reset_context.reset_req_dev = adev;
clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
- clear_bit(AMDGPU_SKIP_MODE2_RESET, &reset_context.flags);
amdgpu_device_gpu_recover(ras->adev, NULL, &reset_context);
}
@@ -2268,6 +2267,25 @@ static int amdgpu_ras_recovery_fini(struct amdgpu_device *adev)
static bool amdgpu_ras_asic_supported(struct amdgpu_device *adev)
{
+ if (amdgpu_sriov_vf(adev)) {
+ switch (adev->ip_versions[MP0_HWIP][0]) {
+ case IP_VERSION(13, 0, 2):
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ if (adev->asic_type == CHIP_IP_DISCOVERY) {
+ switch (adev->ip_versions[MP0_HWIP][0]) {
+ case IP_VERSION(13, 0, 0):
+ case IP_VERSION(13, 0, 10):
+ return true;
+ default:
+ return false;
+ }
+ }
+
return adev->asic_type == CHIP_VEGA10 ||
adev->asic_type == CHIP_VEGA20 ||
adev->asic_type == CHIP_ARCTURUS ||
@@ -2311,11 +2329,6 @@ static void amdgpu_ras_check_supported(struct amdgpu_device *adev)
!amdgpu_ras_asic_supported(adev))
return;
- /* If driver run on sriov guest side, only enable ras for aldebaran */
- if (amdgpu_sriov_vf(adev) &&
- adev->ip_versions[MP1_HWIP][0] != IP_VERSION(13, 0, 2))
- return;
-
if (!adev->gmc.xgmi.connected_to_cpu) {
if (amdgpu_atomfirmware_mem_ecc_supported(adev)) {
dev_info(adev->dev, "MEM ECC is active.\n");
@@ -2720,7 +2733,8 @@ int amdgpu_ras_pre_fini(struct amdgpu_device *adev)
/* Need disable ras on all IPs here before ip [hw/sw]fini */
- amdgpu_ras_disable_all_features(adev, 0);
+ if (con->features)
+ amdgpu_ras_disable_all_features(adev, 0);
amdgpu_ras_recovery_fini(adev);
return 0;
}
@@ -2833,11 +2847,8 @@ static int amdgpu_bad_page_notifier(struct notifier_block *nb,
struct mce *m = (struct mce *)data;
struct amdgpu_device *adev = NULL;
uint32_t gpu_id = 0;
- uint32_t umc_inst = 0;
- uint32_t ch_inst, channel_index = 0;
+ uint32_t umc_inst = 0, ch_inst = 0;
struct ras_err_data err_data = {0, 0, 0, NULL};
- struct eeprom_table_record err_rec;
- uint64_t retired_page;
/*
* If the error was generated in UMC_V2, which belongs to GPU UMCs,
@@ -2876,21 +2887,22 @@ static int amdgpu_bad_page_notifier(struct notifier_block *nb,
dev_info(adev->dev, "Uncorrectable error detected in UMC inst: %d, chan_idx: %d",
umc_inst, ch_inst);
+ err_data.err_addr =
+ kcalloc(adev->umc.max_ras_err_cnt_per_query,
+ sizeof(struct eeprom_table_record), GFP_KERNEL);
+ if (!err_data.err_addr) {
+ dev_warn(adev->dev,
+ "Failed to alloc memory for umc error record in mca notifier!\n");
+ return NOTIFY_DONE;
+ }
+
/*
* Translate UMC channel address to Physical address
*/
- channel_index =
- adev->umc.channel_idx_tbl[umc_inst * adev->umc.channel_inst_num
- + ch_inst];
-
- retired_page = ADDR_OF_8KB_BLOCK(m->addr) |
- ADDR_OF_256B_BLOCK(channel_index) |
- OFFSET_IN_256B_BLOCK(m->addr);
-
- memset(&err_rec, 0x0, sizeof(struct eeprom_table_record));
- err_data.err_addr = &err_rec;
- amdgpu_umc_fill_error_record(&err_data, m->addr,
- retired_page, channel_index, umc_inst);
+ if (adev->umc.ras &&
+ adev->umc.ras->convert_ras_error_address)
+ adev->umc.ras->convert_ras_error_address(adev,
+ &err_data, m->addr, ch_inst, umc_inst);
if (amdgpu_bad_page_threshold != 0) {
amdgpu_ras_add_bad_pages(adev, err_data.err_addr,
@@ -2898,6 +2910,7 @@ static int amdgpu_bad_page_notifier(struct notifier_block *nb,
amdgpu_ras_save_bad_pages(adev);
}
+ kfree(err_data.err_addr);
return NOTIFY_OK;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
index 6546552e596c..5c4f93ee0c57 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
@@ -62,7 +62,7 @@ static inline void amdgpu_res_first(struct ttm_resource *res,
if (!res)
goto fallback;
- BUG_ON(start + size > res->num_pages << PAGE_SHIFT);
+ BUG_ON(start + size > res->size);
cur->mem_type = res->mem_type;
@@ -110,7 +110,7 @@ fallback:
cur->size = size;
cur->remaining = size;
cur->node = NULL;
- WARN_ON(res && start + size > res->num_pages << PAGE_SHIFT);
+ WARN_ON(res && start + size > res->size);
return;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c
index 9da5ead50c90..f778466bb9db 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c
@@ -37,8 +37,6 @@ int amdgpu_reset_init(struct amdgpu_device *adev)
{
int ret = 0;
- adev->amdgpu_reset_level_mask = 0x1;
-
switch (adev->ip_versions[MP1_HWIP][0]) {
case IP_VERSION(13, 0, 2):
ret = aldebaran_reset_init(adev);
@@ -76,12 +74,6 @@ int amdgpu_reset_prepare_hwcontext(struct amdgpu_device *adev,
{
struct amdgpu_reset_handler *reset_handler = NULL;
- if (!(adev->amdgpu_reset_level_mask & AMDGPU_RESET_LEVEL_MODE2))
- return -ENOSYS;
-
- if (test_bit(AMDGPU_SKIP_MODE2_RESET, &reset_context->flags))
- return -ENOSYS;
-
if (adev->reset_cntl && adev->reset_cntl->get_reset_handler)
reset_handler = adev->reset_cntl->get_reset_handler(
adev->reset_cntl, reset_context);
@@ -98,12 +90,6 @@ int amdgpu_reset_perform_reset(struct amdgpu_device *adev,
int ret;
struct amdgpu_reset_handler *reset_handler = NULL;
- if (!(adev->amdgpu_reset_level_mask & AMDGPU_RESET_LEVEL_MODE2))
- return -ENOSYS;
-
- if (test_bit(AMDGPU_SKIP_MODE2_RESET, &reset_context->flags))
- return -ENOSYS;
-
if (adev->reset_cntl)
reset_handler = adev->reset_cntl->get_reset_handler(
adev->reset_cntl, reset_context);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h
index dc43fcb93eac..f4a501ff87d9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h
@@ -30,8 +30,7 @@ enum AMDGPU_RESET_FLAGS {
AMDGPU_NEED_FULL_RESET = 0,
AMDGPU_SKIP_HW_RESET = 1,
- AMDGPU_SKIP_MODE2_RESET = 2,
- AMDGPU_RESET_FOR_DEVICE_REMOVE = 3,
+ AMDGPU_RESET_FOR_DEVICE_REMOVE = 2,
};
struct amdgpu_reset_context {
@@ -113,7 +112,8 @@ static inline bool amdgpu_reset_get_reset_domain(struct amdgpu_reset_domain *dom
static inline void amdgpu_reset_put_reset_domain(struct amdgpu_reset_domain *domain)
{
- kref_put(&domain->refcount, amdgpu_reset_destroy_reset_domain);
+ if (domain)
+ kref_put(&domain->refcount, amdgpu_reset_destroy_reset_domain);
}
static inline bool amdgpu_reset_domain_schedule(struct amdgpu_reset_domain *domain,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index 3e316b013fd9..d3558c34d406 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -405,9 +405,6 @@ bool amdgpu_ring_soft_recovery(struct amdgpu_ring *ring, unsigned int vmid,
{
ktime_t deadline = ktime_add_us(ktime_get(), 10000);
- if (!(ring->adev->amdgpu_reset_level_mask & AMDGPU_RESET_LEVEL_SOFT_RECOVERY))
- return false;
-
if (amdgpu_sriov_vf(ring->adev) || !ring->funcs->soft_recovery || !fence)
return false;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c
index 6373bfb47d55..012b72d00e04 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c
@@ -272,3 +272,275 @@ void amdgpu_gfx_rlc_fini(struct amdgpu_device *adev)
&adev->gfx.rlc.cp_table_gpu_addr,
(void **)&adev->gfx.rlc.cp_table_ptr);
}
+
+static int amdgpu_gfx_rlc_init_microcode_v2_0(struct amdgpu_device *adev)
+{
+ const struct common_firmware_header *common_hdr;
+ const struct rlc_firmware_header_v2_0 *rlc_hdr;
+ struct amdgpu_firmware_info *info;
+ unsigned int *tmp;
+ unsigned int i;
+
+ rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
+
+ adev->gfx.rlc_fw_version = le32_to_cpu(rlc_hdr->header.ucode_version);
+ adev->gfx.rlc_feature_version = le32_to_cpu(rlc_hdr->ucode_feature_version);
+ adev->gfx.rlc.save_and_restore_offset =
+ le32_to_cpu(rlc_hdr->save_and_restore_offset);
+ adev->gfx.rlc.clear_state_descriptor_offset =
+ le32_to_cpu(rlc_hdr->clear_state_descriptor_offset);
+ adev->gfx.rlc.avail_scratch_ram_locations =
+ le32_to_cpu(rlc_hdr->avail_scratch_ram_locations);
+ adev->gfx.rlc.reg_restore_list_size =
+ le32_to_cpu(rlc_hdr->reg_restore_list_size);
+ adev->gfx.rlc.reg_list_format_start =
+ le32_to_cpu(rlc_hdr->reg_list_format_start);
+ adev->gfx.rlc.reg_list_format_separate_start =
+ le32_to_cpu(rlc_hdr->reg_list_format_separate_start);
+ adev->gfx.rlc.starting_offsets_start =
+ le32_to_cpu(rlc_hdr->starting_offsets_start);
+ adev->gfx.rlc.reg_list_format_size_bytes =
+ le32_to_cpu(rlc_hdr->reg_list_format_size_bytes);
+ adev->gfx.rlc.reg_list_size_bytes =
+ le32_to_cpu(rlc_hdr->reg_list_size_bytes);
+ adev->gfx.rlc.register_list_format =
+ kmalloc(adev->gfx.rlc.reg_list_format_size_bytes +
+ adev->gfx.rlc.reg_list_size_bytes, GFP_KERNEL);
+ if (!adev->gfx.rlc.register_list_format) {
+ dev_err(adev->dev, "failed to allocate memory for rlc register_list_format\n");
+ return -ENOMEM;
+ }
+
+ tmp = (unsigned int *)((uintptr_t)rlc_hdr +
+ le32_to_cpu(rlc_hdr->reg_list_format_array_offset_bytes));
+ for (i = 0 ; i < (rlc_hdr->reg_list_format_size_bytes >> 2); i++)
+ adev->gfx.rlc.register_list_format[i] = le32_to_cpu(tmp[i]);
+
+ adev->gfx.rlc.register_restore = adev->gfx.rlc.register_list_format + i;
+
+ tmp = (unsigned int *)((uintptr_t)rlc_hdr +
+ le32_to_cpu(rlc_hdr->reg_list_array_offset_bytes));
+ for (i = 0 ; i < (rlc_hdr->reg_list_size_bytes >> 2); i++)
+ adev->gfx.rlc.register_restore[i] = le32_to_cpu(tmp[i]);
+
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_G];
+ info->ucode_id = AMDGPU_UCODE_ID_RLC_G;
+ info->fw = adev->gfx.rlc_fw;
+ if (info->fw) {
+ common_hdr = (const struct common_firmware_header *)info->fw->data;
+ adev->firmware.fw_size +=
+ ALIGN(le32_to_cpu(common_hdr->ucode_size_bytes), PAGE_SIZE);
+ }
+ }
+
+ return 0;
+}
+
+static void amdgpu_gfx_rlc_init_microcode_v2_1(struct amdgpu_device *adev)
+{
+ const struct rlc_firmware_header_v2_1 *rlc_hdr;
+ struct amdgpu_firmware_info *info;
+
+ rlc_hdr = (const struct rlc_firmware_header_v2_1 *)adev->gfx.rlc_fw->data;
+ adev->gfx.rlc_srlc_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_cntl_ucode_ver);
+ adev->gfx.rlc_srlc_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_cntl_feature_ver);
+ adev->gfx.rlc.save_restore_list_cntl_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_cntl_size_bytes);
+ adev->gfx.rlc.save_restore_list_cntl = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_cntl_offset_bytes);
+ adev->gfx.rlc_srlg_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_gpm_ucode_ver);
+ adev->gfx.rlc_srlg_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_gpm_feature_ver);
+ adev->gfx.rlc.save_restore_list_gpm_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_gpm_size_bytes);
+ adev->gfx.rlc.save_restore_list_gpm = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_gpm_offset_bytes);
+ adev->gfx.rlc_srls_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_srm_ucode_ver);
+ adev->gfx.rlc_srls_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_srm_feature_ver);
+ adev->gfx.rlc.save_restore_list_srm_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_srm_size_bytes);
+ adev->gfx.rlc.save_restore_list_srm = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_srm_offset_bytes);
+ adev->gfx.rlc.reg_list_format_direct_reg_list_length =
+ le32_to_cpu(rlc_hdr->reg_list_format_direct_reg_list_length);
+
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+ if (adev->gfx.rlc.save_restore_list_cntl_size_bytes) {
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL];
+ info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL;
+ info->fw = adev->gfx.rlc_fw;
+ adev->firmware.fw_size +=
+ ALIGN(adev->gfx.rlc.save_restore_list_cntl_size_bytes, PAGE_SIZE);
+ }
+
+ if (adev->gfx.rlc.save_restore_list_gpm_size_bytes) {
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM];
+ info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM;
+ info->fw = adev->gfx.rlc_fw;
+ adev->firmware.fw_size +=
+ ALIGN(adev->gfx.rlc.save_restore_list_gpm_size_bytes, PAGE_SIZE);
+ }
+
+ if (adev->gfx.rlc.save_restore_list_srm_size_bytes) {
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM];
+ info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM;
+ info->fw = adev->gfx.rlc_fw;
+ adev->firmware.fw_size +=
+ ALIGN(adev->gfx.rlc.save_restore_list_srm_size_bytes, PAGE_SIZE);
+ }
+ }
+}
+
+static void amdgpu_gfx_rlc_init_microcode_v2_2(struct amdgpu_device *adev)
+{
+ const struct rlc_firmware_header_v2_2 *rlc_hdr;
+ struct amdgpu_firmware_info *info;
+
+ rlc_hdr = (const struct rlc_firmware_header_v2_2 *)adev->gfx.rlc_fw->data;
+ adev->gfx.rlc.rlc_iram_ucode_size_bytes = le32_to_cpu(rlc_hdr->rlc_iram_ucode_size_bytes);
+ adev->gfx.rlc.rlc_iram_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->rlc_iram_ucode_offset_bytes);
+ adev->gfx.rlc.rlc_dram_ucode_size_bytes = le32_to_cpu(rlc_hdr->rlc_dram_ucode_size_bytes);
+ adev->gfx.rlc.rlc_dram_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->rlc_dram_ucode_offset_bytes);
+
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+ if (adev->gfx.rlc.rlc_iram_ucode_size_bytes) {
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_IRAM];
+ info->ucode_id = AMDGPU_UCODE_ID_RLC_IRAM;
+ info->fw = adev->gfx.rlc_fw;
+ adev->firmware.fw_size +=
+ ALIGN(adev->gfx.rlc.rlc_iram_ucode_size_bytes, PAGE_SIZE);
+ }
+
+ if (adev->gfx.rlc.rlc_dram_ucode_size_bytes) {
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_DRAM];
+ info->ucode_id = AMDGPU_UCODE_ID_RLC_DRAM;
+ info->fw = adev->gfx.rlc_fw;
+ adev->firmware.fw_size +=
+ ALIGN(adev->gfx.rlc.rlc_dram_ucode_size_bytes, PAGE_SIZE);
+ }
+ }
+}
+
+static void amdgpu_gfx_rlc_init_microcode_v2_3(struct amdgpu_device *adev)
+{
+ const struct rlc_firmware_header_v2_3 *rlc_hdr;
+ struct amdgpu_firmware_info *info;
+
+ rlc_hdr = (const struct rlc_firmware_header_v2_3 *)adev->gfx.rlc_fw->data;
+ adev->gfx.rlcp_ucode_version = le32_to_cpu(rlc_hdr->rlcp_ucode_version);
+ adev->gfx.rlcp_ucode_feature_version = le32_to_cpu(rlc_hdr->rlcp_ucode_feature_version);
+ adev->gfx.rlc.rlcp_ucode_size_bytes = le32_to_cpu(rlc_hdr->rlcp_ucode_size_bytes);
+ adev->gfx.rlc.rlcp_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->rlcp_ucode_offset_bytes);
+
+ adev->gfx.rlcv_ucode_version = le32_to_cpu(rlc_hdr->rlcv_ucode_version);
+ adev->gfx.rlcv_ucode_feature_version = le32_to_cpu(rlc_hdr->rlcv_ucode_feature_version);
+ adev->gfx.rlc.rlcv_ucode_size_bytes = le32_to_cpu(rlc_hdr->rlcv_ucode_size_bytes);
+ adev->gfx.rlc.rlcv_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->rlcv_ucode_offset_bytes);
+
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+ if (adev->gfx.rlc.rlcp_ucode_size_bytes) {
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_P];
+ info->ucode_id = AMDGPU_UCODE_ID_RLC_P;
+ info->fw = adev->gfx.rlc_fw;
+ adev->firmware.fw_size +=
+ ALIGN(adev->gfx.rlc.rlcp_ucode_size_bytes, PAGE_SIZE);
+ }
+
+ if (adev->gfx.rlc.rlcv_ucode_size_bytes) {
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_V];
+ info->ucode_id = AMDGPU_UCODE_ID_RLC_V;
+ info->fw = adev->gfx.rlc_fw;
+ adev->firmware.fw_size +=
+ ALIGN(adev->gfx.rlc.rlcv_ucode_size_bytes, PAGE_SIZE);
+ }
+ }
+}
+
+static void amdgpu_gfx_rlc_init_microcode_v2_4(struct amdgpu_device *adev)
+{
+ const struct rlc_firmware_header_v2_4 *rlc_hdr;
+ struct amdgpu_firmware_info *info;
+
+ rlc_hdr = (const struct rlc_firmware_header_v2_4 *)adev->gfx.rlc_fw->data;
+ adev->gfx.rlc.global_tap_delays_ucode_size_bytes = le32_to_cpu(rlc_hdr->global_tap_delays_ucode_size_bytes);
+ adev->gfx.rlc.global_tap_delays_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->global_tap_delays_ucode_offset_bytes);
+ adev->gfx.rlc.se0_tap_delays_ucode_size_bytes = le32_to_cpu(rlc_hdr->se0_tap_delays_ucode_size_bytes);
+ adev->gfx.rlc.se0_tap_delays_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->se0_tap_delays_ucode_offset_bytes);
+ adev->gfx.rlc.se1_tap_delays_ucode_size_bytes = le32_to_cpu(rlc_hdr->se1_tap_delays_ucode_size_bytes);
+ adev->gfx.rlc.se1_tap_delays_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->se1_tap_delays_ucode_offset_bytes);
+ adev->gfx.rlc.se2_tap_delays_ucode_size_bytes = le32_to_cpu(rlc_hdr->se2_tap_delays_ucode_size_bytes);
+ adev->gfx.rlc.se2_tap_delays_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->se2_tap_delays_ucode_offset_bytes);
+ adev->gfx.rlc.se3_tap_delays_ucode_size_bytes = le32_to_cpu(rlc_hdr->se3_tap_delays_ucode_size_bytes);
+ adev->gfx.rlc.se3_tap_delays_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->se3_tap_delays_ucode_offset_bytes);
+
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+ if (adev->gfx.rlc.global_tap_delays_ucode_size_bytes) {
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_GLOBAL_TAP_DELAYS];
+ info->ucode_id = AMDGPU_UCODE_ID_GLOBAL_TAP_DELAYS;
+ info->fw = adev->gfx.rlc_fw;
+ adev->firmware.fw_size +=
+ ALIGN(adev->gfx.rlc.global_tap_delays_ucode_size_bytes, PAGE_SIZE);
+ }
+
+ if (adev->gfx.rlc.se0_tap_delays_ucode_size_bytes) {
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SE0_TAP_DELAYS];
+ info->ucode_id = AMDGPU_UCODE_ID_SE0_TAP_DELAYS;
+ info->fw = adev->gfx.rlc_fw;
+ adev->firmware.fw_size +=
+ ALIGN(adev->gfx.rlc.se0_tap_delays_ucode_size_bytes, PAGE_SIZE);
+ }
+
+ if (adev->gfx.rlc.se1_tap_delays_ucode_size_bytes) {
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SE1_TAP_DELAYS];
+ info->ucode_id = AMDGPU_UCODE_ID_SE1_TAP_DELAYS;
+ info->fw = adev->gfx.rlc_fw;
+ adev->firmware.fw_size +=
+ ALIGN(adev->gfx.rlc.se1_tap_delays_ucode_size_bytes, PAGE_SIZE);
+ }
+
+ if (adev->gfx.rlc.se2_tap_delays_ucode_size_bytes) {
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SE2_TAP_DELAYS];
+ info->ucode_id = AMDGPU_UCODE_ID_SE2_TAP_DELAYS;
+ info->fw = adev->gfx.rlc_fw;
+ adev->firmware.fw_size +=
+ ALIGN(adev->gfx.rlc.se2_tap_delays_ucode_size_bytes, PAGE_SIZE);
+ }
+
+ if (adev->gfx.rlc.se3_tap_delays_ucode_size_bytes) {
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SE3_TAP_DELAYS];
+ info->ucode_id = AMDGPU_UCODE_ID_SE3_TAP_DELAYS;
+ info->fw = adev->gfx.rlc_fw;
+ adev->firmware.fw_size +=
+ ALIGN(adev->gfx.rlc.se3_tap_delays_ucode_size_bytes, PAGE_SIZE);
+ }
+ }
+}
+
+int amdgpu_gfx_rlc_init_microcode(struct amdgpu_device *adev,
+ uint16_t version_major,
+ uint16_t version_minor)
+{
+ int err;
+
+ if (version_major < 2) {
+ /* only support rlc_hdr v2.x and onwards */
+ dev_err(adev->dev, "unsupported rlc fw hdr\n");
+ return -EINVAL;
+ }
+
+ /* is_rlc_v2_1 is still used in APU code path */
+ if (version_major == 2 && version_minor == 1)
+ adev->gfx.rlc.is_rlc_v2_1 = true;
+
+ if (version_minor >= 0) {
+ err = amdgpu_gfx_rlc_init_microcode_v2_0(adev);
+ if (err) {
+ dev_err(adev->dev, "fail to init rlc v2_0 microcode\n");
+ return err;
+ }
+ }
+ if (version_minor >= 1)
+ amdgpu_gfx_rlc_init_microcode_v2_1(adev);
+ if (version_minor >= 2)
+ amdgpu_gfx_rlc_init_microcode_v2_2(adev);
+ if (version_minor == 3)
+ amdgpu_gfx_rlc_init_microcode_v2_3(adev);
+ if (version_minor == 4)
+ amdgpu_gfx_rlc_init_microcode_v2_4(adev);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h
index 03ac36b2c2cf..23f060db9255 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h
@@ -267,5 +267,7 @@ int amdgpu_gfx_rlc_init_csb(struct amdgpu_device *adev);
int amdgpu_gfx_rlc_init_cpt(struct amdgpu_device *adev);
void amdgpu_gfx_rlc_setup_cp_table(struct amdgpu_device *adev);
void amdgpu_gfx_rlc_fini(struct amdgpu_device *adev);
-
+int amdgpu_gfx_rlc_init_microcode(struct amdgpu_device *adev,
+ uint16_t version_major,
+ uint16_t version_minor);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
index 42c1f050542f..ea5278f094c0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
@@ -21,6 +21,7 @@
*
*/
+#include <linux/firmware.h>
#include "amdgpu.h"
#include "amdgpu_sdma.h"
#include "amdgpu_ras.h"
@@ -150,3 +151,158 @@ int amdgpu_sdma_process_ecc_irq(struct amdgpu_device *adev,
amdgpu_ras_interrupt_dispatch(adev, &ih_data);
return 0;
}
+
+static int amdgpu_sdma_init_inst_ctx(struct amdgpu_sdma_instance *sdma_inst)
+{
+ int err = 0;
+ uint16_t version_major;
+ const struct common_firmware_header *header = NULL;
+ const struct sdma_firmware_header_v1_0 *hdr;
+ const struct sdma_firmware_header_v2_0 *hdr_v2;
+
+ err = amdgpu_ucode_validate(sdma_inst->fw);
+ if (err)
+ return err;
+
+ header = (const struct common_firmware_header *)
+ sdma_inst->fw->data;
+ version_major = le16_to_cpu(header->header_version_major);
+
+ switch (version_major) {
+ case 1:
+ hdr = (const struct sdma_firmware_header_v1_0 *)sdma_inst->fw->data;
+ sdma_inst->fw_version = le32_to_cpu(hdr->header.ucode_version);
+ sdma_inst->feature_version = le32_to_cpu(hdr->ucode_feature_version);
+ break;
+ case 2:
+ hdr_v2 = (const struct sdma_firmware_header_v2_0 *)sdma_inst->fw->data;
+ sdma_inst->fw_version = le32_to_cpu(hdr_v2->header.ucode_version);
+ sdma_inst->feature_version = le32_to_cpu(hdr_v2->ucode_feature_version);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (sdma_inst->feature_version >= 20)
+ sdma_inst->burst_nop = true;
+
+ return 0;
+}
+
+void amdgpu_sdma_destroy_inst_ctx(struct amdgpu_device *adev,
+ bool duplicate)
+{
+ int i;
+
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ release_firmware(adev->sdma.instance[i].fw);
+ if (duplicate)
+ break;
+ }
+
+ memset((void *)adev->sdma.instance, 0,
+ sizeof(struct amdgpu_sdma_instance) * AMDGPU_MAX_SDMA_INSTANCES);
+}
+
+int amdgpu_sdma_init_microcode(struct amdgpu_device *adev,
+ char *fw_name, u32 instance,
+ bool duplicate)
+{
+ struct amdgpu_firmware_info *info = NULL;
+ const struct common_firmware_header *header = NULL;
+ int err = 0, i;
+ const struct sdma_firmware_header_v2_0 *sdma_hdr;
+ uint16_t version_major;
+
+ err = request_firmware(&adev->sdma.instance[instance].fw, fw_name, adev->dev);
+ if (err)
+ goto out;
+
+ header = (const struct common_firmware_header *)
+ adev->sdma.instance[instance].fw->data;
+ version_major = le16_to_cpu(header->header_version_major);
+
+ if ((duplicate && instance) || (!duplicate && version_major > 1)) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ err = amdgpu_sdma_init_inst_ctx(&adev->sdma.instance[instance]);
+ if (err)
+ goto out;
+
+ if (duplicate) {
+ for (i = 1; i < adev->sdma.num_instances; i++)
+ memcpy((void *)&adev->sdma.instance[i],
+ (void *)&adev->sdma.instance[0],
+ sizeof(struct amdgpu_sdma_instance));
+ }
+
+ if (amdgpu_sriov_vf(adev))
+ return 0;
+
+ DRM_DEBUG("psp_load == '%s'\n",
+ adev->firmware.load_type == AMDGPU_FW_LOAD_PSP ? "true" : "false");
+
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+ switch (version_major) {
+ case 1:
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ if (!duplicate && (instance != i))
+ continue;
+ else {
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i];
+ info->ucode_id = AMDGPU_UCODE_ID_SDMA0 + i;
+ info->fw = adev->sdma.instance[i].fw;
+ adev->firmware.fw_size +=
+ ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
+ }
+ }
+ break;
+ case 2:
+ sdma_hdr = (const struct sdma_firmware_header_v2_0 *)
+ adev->sdma.instance[0].fw->data;
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA_UCODE_TH0];
+ info->ucode_id = AMDGPU_UCODE_ID_SDMA_UCODE_TH0;
+ info->fw = adev->sdma.instance[0].fw;
+ adev->firmware.fw_size +=
+ ALIGN(le32_to_cpu(sdma_hdr->ctx_ucode_size_bytes), PAGE_SIZE);
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA_UCODE_TH1];
+ info->ucode_id = AMDGPU_UCODE_ID_SDMA_UCODE_TH1;
+ info->fw = adev->sdma.instance[0].fw;
+ adev->firmware.fw_size +=
+ ALIGN(le32_to_cpu(sdma_hdr->ctl_ucode_size_bytes), PAGE_SIZE);
+ break;
+ default:
+ err = -EINVAL;
+ }
+ }
+
+out:
+ if (err) {
+ DRM_ERROR("SDMA: Failed to init firmware \"%s\"\n", fw_name);
+ amdgpu_sdma_destroy_inst_ctx(adev, duplicate);
+ }
+ return err;
+}
+
+void amdgpu_sdma_unset_buffer_funcs_helper(struct amdgpu_device *adev)
+{
+ struct amdgpu_ring *sdma;
+ int i;
+
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ if (adev->sdma.has_page_queue) {
+ sdma = &adev->sdma.instance[i].page;
+ if (adev->mman.buffer_funcs_ring == sdma) {
+ amdgpu_ttm_set_buffer_funcs_status(adev, false);
+ break;
+ }
+ }
+ sdma = &adev->sdma.instance[i].ring;
+ if (adev->mman.buffer_funcs_ring == sdma) {
+ amdgpu_ttm_set_buffer_funcs_status(adev, false);
+ break;
+ }
+ }
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
index 53ac3ebae8d6..7d99205c2e01 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
@@ -124,4 +124,10 @@ int amdgpu_sdma_process_ras_data_cb(struct amdgpu_device *adev,
int amdgpu_sdma_process_ecc_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry);
+int amdgpu_sdma_init_microcode(struct amdgpu_device *adev,
+ char *fw_name, u32 instance, bool duplicate);
+void amdgpu_sdma_destroy_inst_ctx(struct amdgpu_device *adev,
+ bool duplicate);
+void amdgpu_sdma_unset_buffer_funcs_helper(struct amdgpu_device *adev);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
index 5e6ddc7e101c..677ad2016976 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
@@ -127,7 +127,7 @@ TRACE_EVENT(amdgpu_bo_create,
TP_fast_assign(
__entry->bo = bo;
- __entry->pages = bo->tbo.resource->num_pages;
+ __entry->pages = PFN_UP(bo->tbo.resource->size);
__entry->type = bo->tbo.resource->mem_type;
__entry->prefer = bo->preferred_domains;
__entry->allow = bo->allowed_domains;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index b1c455329023..7d647993fd7e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -381,7 +381,7 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
dst.offset = 0;
r = amdgpu_ttm_copy_mem_to_mem(adev, &src, &dst,
- new_mem->num_pages << PAGE_SHIFT,
+ new_mem->size,
amdgpu_bo_encrypted(abo),
bo->base.resv, &fence);
if (r)
@@ -424,8 +424,9 @@ error:
static bool amdgpu_mem_visible(struct amdgpu_device *adev,
struct ttm_resource *mem)
{
- uint64_t mem_size = (u64)mem->num_pages << PAGE_SHIFT;
+ u64 mem_size = (u64)mem->size;
struct amdgpu_res_cursor cursor;
+ u64 end;
if (mem->mem_type == TTM_PL_SYSTEM ||
mem->mem_type == TTM_PL_TT)
@@ -434,12 +435,21 @@ static bool amdgpu_mem_visible(struct amdgpu_device *adev,
return false;
amdgpu_res_first(mem, 0, mem_size, &cursor);
+ end = cursor.start + cursor.size;
+ while (cursor.remaining) {
+ amdgpu_res_next(&cursor, cursor.size);
- /* ttm_resource_ioremap only supports contiguous memory */
- if (cursor.size != mem_size)
- return false;
+ if (!cursor.remaining)
+ break;
+
+ /* ttm_resource_ioremap only supports contiguous memory */
+ if (end != cursor.start)
+ return false;
+
+ end = cursor.start + cursor.size;
+ }
- return cursor.start + cursor.size <= adev->gmc.visible_vram_size;
+ return end <= adev->gmc.visible_vram_size;
}
/*
@@ -561,7 +571,7 @@ static int amdgpu_ttm_io_mem_reserve(struct ttm_device *bdev,
struct ttm_resource *mem)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
- size_t bus_size = (size_t)mem->num_pages << PAGE_SHIFT;
+ size_t bus_size = (size_t)mem->size;
switch (mem->mem_type) {
case TTM_PL_SYSTEM:
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
index 3629d8f292ef..e46439274f3a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
@@ -51,6 +51,9 @@ struct amdgpu_umc_ras {
struct amdgpu_ras_block_object ras_block;
void (*err_cnt_init)(struct amdgpu_device *adev);
bool (*query_ras_poison_mode)(struct amdgpu_device *adev);
+ void (*convert_ras_error_address)(struct amdgpu_device *adev,
+ struct ras_err_data *err_data, uint64_t err_addr,
+ uint32_t ch_inst, uint32_t umc_inst);
void (*ecc_info_query_ras_error_count)(struct amdgpu_device *adev,
void *ras_error_status);
void (*ecc_info_query_ras_error_address)(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index f36e4f08db6d..0b52af415b28 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -191,7 +191,7 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
fw_name = FIRMWARE_VCN4_0_2;
if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
(adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
- adev->vcn.indirect_sram = false;
+ adev->vcn.indirect_sram = true;
break;
case IP_VERSION(4, 0, 4):
fw_name = FIRMWARE_VCN4_0_4;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
index 80b7a6cfd026..253ea6b159df 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
@@ -161,7 +161,8 @@
#define AMDGPU_VCN_SW_RING_FLAG (1 << 9)
#define AMDGPU_VCN_FW_LOGGING_FLAG (1 << 10)
#define AMDGPU_VCN_SMU_VERSION_INFO_FLAG (1 << 11)
-#define AMDGPU_VCN_VF_RB_SETUP_FLAG (1 << 12)
+#define AMDGPU_VCN_SMU_DPM_INTERFACE_FLAG (1 << 11)
+#define AMDGPU_VCN_VF_RB_SETUP_FLAG (1 << 14)
#define AMDGPU_VCN_IB_FLAG_DECODE_BUFFER 0x00000001
#define AMDGPU_VCN_CMD_FLAG_MSG_BUFFER 0x00000001
@@ -171,6 +172,9 @@
#define VCN_CODEC_DISABLE_MASK_HEVC (1 << 2)
#define VCN_CODEC_DISABLE_MASK_H264 (1 << 3)
+#define AMDGPU_VCN_SMU_DPM_INTERFACE_DGPU (0)
+#define AMDGPU_VCN_SMU_DPM_INTERFACE_APU (1)
+
enum fw_queue_mode {
FW_QUEUE_RING_RESET = 1,
FW_QUEUE_DPG_HOLD_OFF = 2,
@@ -335,7 +339,9 @@ struct amdgpu_vcn4_fw_shared {
struct amdgpu_fw_shared_unified_queue_struct sq;
uint8_t pad1[8];
struct amdgpu_fw_shared_fw_logging fw_log;
+ uint8_t pad2[20];
struct amdgpu_fw_shared_rb_setup rb_setup;
+ struct amdgpu_fw_shared_smu_interface_info smu_dpm_interface;
};
struct amdgpu_vcn_fwlog {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
index e4af40b9a8aa..9c765b04aae3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
@@ -726,6 +726,12 @@ void amdgpu_detect_virtualization(struct amdgpu_device *adev)
adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
}
+ if (amdgpu_sriov_vf(adev) && adev->asic_type == CHIP_SIENNA_CICHLID)
+ /* VF MMIO access (except mailbox range) from CPU
+ * will be blocked during sriov runtime
+ */
+ adev->virt.caps |= AMDGPU_VF_MMIO_ACCESS_PROTECT;
+
/* we have the ability to check now */
if (amdgpu_sriov_vf(adev)) {
switch (adev->asic_type) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
index d94c31e68a14..49c4347d154c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
@@ -31,6 +31,7 @@
#define AMDGPU_SRIOV_CAPS_IS_VF (1 << 2) /* this GPU is a virtual function */
#define AMDGPU_PASSTHROUGH_MODE (1 << 3) /* thw whole GPU is pass through for VM */
#define AMDGPU_SRIOV_CAPS_RUNTIME (1 << 4) /* is out of full access mode */
+#define AMDGPU_VF_MMIO_ACCESS_PROTECT (1 << 5) /* MMIO write access is not allowed in sriov runtime */
/* flags for indirect register access path supported by rlcg for sriov */
#define AMDGPU_RLCG_GC_WRITE_LEGACY (0x8 << 28)
@@ -297,6 +298,9 @@ struct amdgpu_video_codec_info;
#define amdgpu_passthrough(adev) \
((adev)->virt.caps & AMDGPU_PASSTHROUGH_MODE)
+#define amdgpu_sriov_vf_mmio_access_protection(adev) \
+((adev)->virt.caps & AMDGPU_VF_MMIO_ACCESS_PROTECT)
+
static inline bool is_virtual_machine(void)
{
#if defined(CONFIG_X86)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
index f4b5301ea2a0..09dec2561adf 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
@@ -498,8 +498,6 @@ static int amdgpu_vkms_sw_init(void *handle)
adev_to_drm(adev)->mode_config.preferred_depth = 24;
adev_to_drm(adev)->mode_config.prefer_shadow = 1;
- adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
-
r = amdgpu_display_modeset_create_props(adev);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 83b0c5d86e48..2291aa14d888 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -2338,7 +2338,11 @@ void amdgpu_vm_manager_init(struct amdgpu_device *adev)
*/
#ifdef CONFIG_X86_64
if (amdgpu_vm_update_mode == -1) {
- if (amdgpu_gmc_vram_full_visible(&adev->gmc))
+ /* For asic with VF MMIO access protection
+ * avoid using CPU for VM table updates
+ */
+ if (amdgpu_gmc_vram_full_visible(&adev->gmc) &&
+ !amdgpu_sriov_vf_mmio_access_protection(adev))
adev->vm_manager.vm_update_mode =
AMDGPU_VM_USE_CPU_FOR_COMPUTE;
else
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
index 2b0669c464f6..69e105fa41f6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
@@ -116,8 +116,15 @@ static int amdgpu_vm_sdma_commit(struct amdgpu_vm_update_params *p,
DMA_RESV_USAGE_BOOKKEEP);
}
- if (fence && !p->immediate)
+ if (fence && !p->immediate) {
+ /*
+ * Most hw generations now have a separate queue for page table
+ * updates, but when the queue is shared with userspace we need
+ * the extra CPU round trip to correctly flush the TLB.
+ */
+ set_bit(DRM_SCHED_FENCE_DONT_PIPELINE, &f->flags);
swap(*fence, f);
+ }
dma_fence_put(f);
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
index 73a517bcf5c1..18c1a173d187 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
@@ -439,7 +439,7 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
/* Allocate blocks in desired range */
vres->flags |= DRM_BUDDY_RANGE_ALLOCATION;
- remaining_size = (u64)vres->base.num_pages << PAGE_SHIFT;
+ remaining_size = (u64)vres->base.size;
mutex_lock(&mgr->lock);
while (remaining_size) {
@@ -498,7 +498,7 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
LIST_HEAD(temp);
trim_list = &vres->blocks;
- original_size = (u64)vres->base.num_pages << PAGE_SHIFT;
+ original_size = (u64)vres->base.size;
/*
* If size value is rounded up to min_block_size, trim the last
@@ -533,8 +533,8 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
amdgpu_vram_mgr_block_size(block);
start >>= PAGE_SHIFT;
- if (start > vres->base.num_pages)
- start -= vres->base.num_pages;
+ if (start > PFN_UP(vres->base.size))
+ start -= PFN_UP(vres->base.size);
else
start = 0;
vres->base.start = max(vres->base.start, start);
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
index 5647f13b98d4..cbca9866645c 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
@@ -309,14 +309,10 @@ static void cik_sdma_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq
*/
static void cik_sdma_gfx_stop(struct amdgpu_device *adev)
{
- struct amdgpu_ring *sdma0 = &adev->sdma.instance[0].ring;
- struct amdgpu_ring *sdma1 = &adev->sdma.instance[1].ring;
u32 rb_cntl;
int i;
- if ((adev->mman.buffer_funcs_ring == sdma0) ||
- (adev->mman.buffer_funcs_ring == sdma1))
- amdgpu_ttm_set_buffer_funcs_status(adev, false);
+ amdgpu_sdma_unset_buffer_funcs_helper(adev);
for (i = 0; i < adev->sdma.num_instances; i++) {
rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]);
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index 288fce7dc0ed..05051d5d2ec3 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -2800,8 +2800,6 @@ static int dce_v10_0_sw_init(void *handle)
adev_to_drm(adev)->mode_config.fb_modifiers_not_supported = true;
- adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
-
r = amdgpu_display_modeset_create_props(adev);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index cbe5250b31cb..c928bc9eb202 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -2918,8 +2918,6 @@ static int dce_v11_0_sw_init(void *handle)
adev_to_drm(adev)->mode_config.fb_modifiers_not_supported = true;
- adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
-
r = amdgpu_display_modeset_create_props(adev);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
index b1c44fab074f..62315fd5a05f 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
@@ -2675,7 +2675,6 @@ static int dce_v6_0_sw_init(void *handle)
adev_to_drm(adev)->mode_config.preferred_depth = 24;
adev_to_drm(adev)->mode_config.prefer_shadow = 1;
adev_to_drm(adev)->mode_config.fb_modifiers_not_supported = true;
- adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
r = amdgpu_display_modeset_create_props(adev);
if (r)
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index a22b45c92792..87d5e4c21cb3 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -2701,8 +2701,6 @@ static int dce_v8_0_sw_init(void *handle)
adev_to_drm(adev)->mode_config.fb_modifiers_not_supported = true;
- adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
-
r = amdgpu_display_modeset_create_props(adev);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
index e4dde41f2f68..af94ac580d3e 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
@@ -3943,56 +3943,6 @@ static void gfx_v10_0_check_fw_write_wait(struct amdgpu_device *adev)
DRM_WARN_ONCE("CP firmware version too old, please update!");
}
-
-static void gfx_v10_0_init_rlc_ext_microcode(struct amdgpu_device *adev)
-{
- const struct rlc_firmware_header_v2_1 *rlc_hdr;
-
- rlc_hdr = (const struct rlc_firmware_header_v2_1 *)adev->gfx.rlc_fw->data;
- adev->gfx.rlc_srlc_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_cntl_ucode_ver);
- adev->gfx.rlc_srlc_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_cntl_feature_ver);
- adev->gfx.rlc.save_restore_list_cntl_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_cntl_size_bytes);
- adev->gfx.rlc.save_restore_list_cntl = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_cntl_offset_bytes);
- adev->gfx.rlc_srlg_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_gpm_ucode_ver);
- adev->gfx.rlc_srlg_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_gpm_feature_ver);
- adev->gfx.rlc.save_restore_list_gpm_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_gpm_size_bytes);
- adev->gfx.rlc.save_restore_list_gpm = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_gpm_offset_bytes);
- adev->gfx.rlc_srls_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_srm_ucode_ver);
- adev->gfx.rlc_srls_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_srm_feature_ver);
- adev->gfx.rlc.save_restore_list_srm_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_srm_size_bytes);
- adev->gfx.rlc.save_restore_list_srm = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_srm_offset_bytes);
- adev->gfx.rlc.reg_list_format_direct_reg_list_length =
- le32_to_cpu(rlc_hdr->reg_list_format_direct_reg_list_length);
-}
-
-static void gfx_v10_0_init_rlc_iram_dram_microcode(struct amdgpu_device *adev)
-{
- const struct rlc_firmware_header_v2_2 *rlc_hdr;
-
- rlc_hdr = (const struct rlc_firmware_header_v2_2 *)adev->gfx.rlc_fw->data;
- adev->gfx.rlc.rlc_iram_ucode_size_bytes = le32_to_cpu(rlc_hdr->rlc_iram_ucode_size_bytes);
- adev->gfx.rlc.rlc_iram_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->rlc_iram_ucode_offset_bytes);
- adev->gfx.rlc.rlc_dram_ucode_size_bytes = le32_to_cpu(rlc_hdr->rlc_dram_ucode_size_bytes);
- adev->gfx.rlc.rlc_dram_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->rlc_dram_ucode_offset_bytes);
-}
-
-static void gfx_v10_0_init_tap_delays_microcode(struct amdgpu_device *adev)
-{
- const struct rlc_firmware_header_v2_4 *rlc_hdr;
-
- rlc_hdr = (const struct rlc_firmware_header_v2_4 *)adev->gfx.rlc_fw->data;
- adev->gfx.rlc.global_tap_delays_ucode_size_bytes = le32_to_cpu(rlc_hdr->global_tap_delays_ucode_size_bytes);
- adev->gfx.rlc.global_tap_delays_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->global_tap_delays_ucode_offset_bytes);
- adev->gfx.rlc.se0_tap_delays_ucode_size_bytes = le32_to_cpu(rlc_hdr->se0_tap_delays_ucode_size_bytes);
- adev->gfx.rlc.se0_tap_delays_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->se0_tap_delays_ucode_offset_bytes);
- adev->gfx.rlc.se1_tap_delays_ucode_size_bytes = le32_to_cpu(rlc_hdr->se1_tap_delays_ucode_size_bytes);
- adev->gfx.rlc.se1_tap_delays_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->se1_tap_delays_ucode_offset_bytes);
- adev->gfx.rlc.se2_tap_delays_ucode_size_bytes = le32_to_cpu(rlc_hdr->se2_tap_delays_ucode_size_bytes);
- adev->gfx.rlc.se2_tap_delays_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->se2_tap_delays_ucode_offset_bytes);
- adev->gfx.rlc.se3_tap_delays_ucode_size_bytes = le32_to_cpu(rlc_hdr->se3_tap_delays_ucode_size_bytes);
- adev->gfx.rlc.se3_tap_delays_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->se3_tap_delays_ucode_offset_bytes);
-}
-
static bool gfx_v10_0_navi10_gfxoff_should_enable(struct amdgpu_device *adev)
{
bool ret = false;
@@ -4028,12 +3978,7 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev)
char fw_name[40];
char *wks = "";
int err;
- struct amdgpu_firmware_info *info = NULL;
- const struct common_firmware_header *header = NULL;
- const struct gfx_firmware_header_v1_0 *cp_hdr;
const struct rlc_firmware_header_v2_0 *rlc_hdr;
- unsigned int *tmp = NULL;
- unsigned int i = 0;
uint16_t version_major;
uint16_t version_minor;
@@ -4091,9 +4036,7 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev)
err = amdgpu_ucode_validate(adev->gfx.pfp_fw);
if (err)
goto out;
- cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
- adev->gfx.pfp_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
- adev->gfx.pfp_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
+ amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_PFP);
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me%s.bin", chip_name, wks);
err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev);
@@ -4102,9 +4045,7 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev)
err = amdgpu_ucode_validate(adev->gfx.me_fw);
if (err)
goto out;
- cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
- adev->gfx.me_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
- adev->gfx.me_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
+ amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_ME);
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce%s.bin", chip_name, wks);
err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev);
@@ -4113,69 +4054,27 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev)
err = amdgpu_ucode_validate(adev->gfx.ce_fw);
if (err)
goto out;
- cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
- adev->gfx.ce_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
- adev->gfx.ce_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
+ amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_CE);
if (!amdgpu_sriov_vf(adev)) {
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name);
err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev);
if (err)
goto out;
+ /* don't check this. There are apparently firmwares in the wild with
+ * incorrect size in the header
+ */
err = amdgpu_ucode_validate(adev->gfx.rlc_fw);
+ if (err)
+ dev_dbg(adev->dev,
+ "gfx10: amdgpu_ucode_validate() failed \"%s\"\n",
+ fw_name);
rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
version_major = le16_to_cpu(rlc_hdr->header.header_version_major);
version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor);
-
- adev->gfx.rlc_fw_version = le32_to_cpu(rlc_hdr->header.ucode_version);
- adev->gfx.rlc_feature_version = le32_to_cpu(rlc_hdr->ucode_feature_version);
- adev->gfx.rlc.save_and_restore_offset =
- le32_to_cpu(rlc_hdr->save_and_restore_offset);
- adev->gfx.rlc.clear_state_descriptor_offset =
- le32_to_cpu(rlc_hdr->clear_state_descriptor_offset);
- adev->gfx.rlc.avail_scratch_ram_locations =
- le32_to_cpu(rlc_hdr->avail_scratch_ram_locations);
- adev->gfx.rlc.reg_restore_list_size =
- le32_to_cpu(rlc_hdr->reg_restore_list_size);
- adev->gfx.rlc.reg_list_format_start =
- le32_to_cpu(rlc_hdr->reg_list_format_start);
- adev->gfx.rlc.reg_list_format_separate_start =
- le32_to_cpu(rlc_hdr->reg_list_format_separate_start);
- adev->gfx.rlc.starting_offsets_start =
- le32_to_cpu(rlc_hdr->starting_offsets_start);
- adev->gfx.rlc.reg_list_format_size_bytes =
- le32_to_cpu(rlc_hdr->reg_list_format_size_bytes);
- adev->gfx.rlc.reg_list_size_bytes =
- le32_to_cpu(rlc_hdr->reg_list_size_bytes);
- adev->gfx.rlc.register_list_format =
- kmalloc(adev->gfx.rlc.reg_list_format_size_bytes +
- adev->gfx.rlc.reg_list_size_bytes, GFP_KERNEL);
- if (!adev->gfx.rlc.register_list_format) {
- err = -ENOMEM;
+ err = amdgpu_gfx_rlc_init_microcode(adev, version_major, version_minor);
+ if (err)
goto out;
- }
-
- tmp = (unsigned int *)((uintptr_t)rlc_hdr +
- le32_to_cpu(rlc_hdr->reg_list_format_array_offset_bytes));
- for (i = 0 ; i < (rlc_hdr->reg_list_format_size_bytes >> 2); i++)
- adev->gfx.rlc.register_list_format[i] = le32_to_cpu(tmp[i]);
-
- adev->gfx.rlc.register_restore = adev->gfx.rlc.register_list_format + i;
-
- tmp = (unsigned int *)((uintptr_t)rlc_hdr +
- le32_to_cpu(rlc_hdr->reg_list_array_offset_bytes));
- for (i = 0 ; i < (rlc_hdr->reg_list_size_bytes >> 2); i++)
- adev->gfx.rlc.register_restore[i] = le32_to_cpu(tmp[i]);
-
- if (version_major == 2) {
- if (version_minor >= 1)
- gfx_v10_0_init_rlc_ext_microcode(adev);
- if (version_minor >= 2)
- gfx_v10_0_init_rlc_iram_dram_microcode(adev);
- if (version_minor == 4) {
- gfx_v10_0_init_tap_delays_microcode(adev);
- }
- }
}
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec%s.bin", chip_name, wks);
@@ -4185,9 +4084,8 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev)
err = amdgpu_ucode_validate(adev->gfx.mec_fw);
if (err)
goto out;
- cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
- adev->gfx.mec_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
- adev->gfx.mec_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
+ amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC1);
+ amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC1_JT);
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2%s.bin", chip_name, wks);
err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
@@ -4195,164 +4093,18 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev)
err = amdgpu_ucode_validate(adev->gfx.mec2_fw);
if (err)
goto out;
- cp_hdr = (const struct gfx_firmware_header_v1_0 *)
- adev->gfx.mec2_fw->data;
- adev->gfx.mec2_fw_version =
- le32_to_cpu(cp_hdr->header.ucode_version);
- adev->gfx.mec2_feature_version =
- le32_to_cpu(cp_hdr->ucode_feature_version);
+ amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC2);
+ amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC2_JT);
} else {
err = 0;
adev->gfx.mec2_fw = NULL;
}
- if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_PFP];
- info->ucode_id = AMDGPU_UCODE_ID_CP_PFP;
- info->fw = adev->gfx.pfp_fw;
- header = (const struct common_firmware_header *)info->fw->data;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
-
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_ME];
- info->ucode_id = AMDGPU_UCODE_ID_CP_ME;
- info->fw = adev->gfx.me_fw;
- header = (const struct common_firmware_header *)info->fw->data;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
-
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_CE];
- info->ucode_id = AMDGPU_UCODE_ID_CP_CE;
- info->fw = adev->gfx.ce_fw;
- header = (const struct common_firmware_header *)info->fw->data;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
-
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_G];
- info->ucode_id = AMDGPU_UCODE_ID_RLC_G;
- info->fw = adev->gfx.rlc_fw;
- if (info->fw) {
- header = (const struct common_firmware_header *)info->fw->data;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
- }
- if (adev->gfx.rlc.save_restore_list_cntl_size_bytes &&
- adev->gfx.rlc.save_restore_list_gpm_size_bytes &&
- adev->gfx.rlc.save_restore_list_srm_size_bytes) {
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL];
- info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL;
- info->fw = adev->gfx.rlc_fw;
- adev->firmware.fw_size +=
- ALIGN(adev->gfx.rlc.save_restore_list_cntl_size_bytes, PAGE_SIZE);
-
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM];
- info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM;
- info->fw = adev->gfx.rlc_fw;
- adev->firmware.fw_size +=
- ALIGN(adev->gfx.rlc.save_restore_list_gpm_size_bytes, PAGE_SIZE);
-
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM];
- info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM;
- info->fw = adev->gfx.rlc_fw;
- adev->firmware.fw_size +=
- ALIGN(adev->gfx.rlc.save_restore_list_srm_size_bytes, PAGE_SIZE);
-
- if (adev->gfx.rlc.rlc_iram_ucode_size_bytes &&
- adev->gfx.rlc.rlc_dram_ucode_size_bytes) {
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_IRAM];
- info->ucode_id = AMDGPU_UCODE_ID_RLC_IRAM;
- info->fw = adev->gfx.rlc_fw;
- adev->firmware.fw_size +=
- ALIGN(adev->gfx.rlc.rlc_iram_ucode_size_bytes, PAGE_SIZE);
-
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_DRAM];
- info->ucode_id = AMDGPU_UCODE_ID_RLC_DRAM;
- info->fw = adev->gfx.rlc_fw;
- adev->firmware.fw_size +=
- ALIGN(adev->gfx.rlc.rlc_dram_ucode_size_bytes, PAGE_SIZE);
- }
-
- }
-
- if (adev->gfx.rlc.global_tap_delays_ucode_size_bytes) {
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_GLOBAL_TAP_DELAYS];
- info->ucode_id = AMDGPU_UCODE_ID_GLOBAL_TAP_DELAYS;
- info->fw = adev->gfx.rlc_fw;
- adev->firmware.fw_size +=
- ALIGN(adev->gfx.rlc.global_tap_delays_ucode_size_bytes, PAGE_SIZE);
- }
-
- if (adev->gfx.rlc.se0_tap_delays_ucode_size_bytes) {
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SE0_TAP_DELAYS];
- info->ucode_id = AMDGPU_UCODE_ID_SE0_TAP_DELAYS;
- info->fw = adev->gfx.rlc_fw;
- adev->firmware.fw_size +=
- ALIGN(adev->gfx.rlc.se0_tap_delays_ucode_size_bytes, PAGE_SIZE);
- }
-
- if (adev->gfx.rlc.se1_tap_delays_ucode_size_bytes) {
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SE1_TAP_DELAYS];
- info->ucode_id = AMDGPU_UCODE_ID_SE1_TAP_DELAYS;
- info->fw = adev->gfx.rlc_fw;
- adev->firmware.fw_size +=
- ALIGN(adev->gfx.rlc.se1_tap_delays_ucode_size_bytes, PAGE_SIZE);
- }
-
- if (adev->gfx.rlc.se2_tap_delays_ucode_size_bytes) {
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SE2_TAP_DELAYS];
- info->ucode_id = AMDGPU_UCODE_ID_SE2_TAP_DELAYS;
- info->fw = adev->gfx.rlc_fw;
- adev->firmware.fw_size +=
- ALIGN(adev->gfx.rlc.se2_tap_delays_ucode_size_bytes, PAGE_SIZE);
- }
-
- if (adev->gfx.rlc.se3_tap_delays_ucode_size_bytes) {
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SE3_TAP_DELAYS];
- info->ucode_id = AMDGPU_UCODE_ID_SE3_TAP_DELAYS;
- info->fw = adev->gfx.rlc_fw;
- adev->firmware.fw_size +=
- ALIGN(adev->gfx.rlc.se3_tap_delays_ucode_size_bytes, PAGE_SIZE);
- }
-
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1];
- info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1;
- info->fw = adev->gfx.mec_fw;
- header = (const struct common_firmware_header *)info->fw->data;
- cp_hdr = (const struct gfx_firmware_header_v1_0 *)info->fw->data;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(header->ucode_size_bytes) -
- le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE);
-
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1_JT];
- info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1_JT;
- info->fw = adev->gfx.mec_fw;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE);
-
- if (adev->gfx.mec2_fw) {
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC2];
- info->ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
- info->fw = adev->gfx.mec2_fw;
- header = (const struct common_firmware_header *)info->fw->data;
- cp_hdr = (const struct gfx_firmware_header_v1_0 *)info->fw->data;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(header->ucode_size_bytes) -
- le32_to_cpu(cp_hdr->jt_size) * 4,
- PAGE_SIZE);
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC2_JT];
- info->ucode_id = AMDGPU_UCODE_ID_CP_MEC2_JT;
- info->fw = adev->gfx.mec2_fw;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(cp_hdr->jt_size) * 4,
- PAGE_SIZE);
- }
- }
-
gfx_v10_0_check_fw_write_wait(adev);
out:
if (err) {
dev_err(adev->dev,
- "gfx10: Failed to load firmware \"%s\"\n",
+ "gfx10: Failed to init firmware \"%s\"\n",
fw_name);
release_firmware(adev->gfx.pfp_fw);
adev->gfx.pfp_fw = NULL;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
index ce8c792cef1a..671ca5a0f208 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
@@ -438,66 +438,12 @@ static void gfx_v11_0_free_microcode(struct amdgpu_device *adev)
kfree(adev->gfx.rlc.register_list_format);
}
-static void gfx_v11_0_init_rlc_ext_microcode(struct amdgpu_device *adev)
-{
- const struct rlc_firmware_header_v2_1 *rlc_hdr;
-
- rlc_hdr = (const struct rlc_firmware_header_v2_1 *)adev->gfx.rlc_fw->data;
- adev->gfx.rlc_srlc_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_cntl_ucode_ver);
- adev->gfx.rlc_srlc_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_cntl_feature_ver);
- adev->gfx.rlc.save_restore_list_cntl_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_cntl_size_bytes);
- adev->gfx.rlc.save_restore_list_cntl = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_cntl_offset_bytes);
- adev->gfx.rlc_srlg_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_gpm_ucode_ver);
- adev->gfx.rlc_srlg_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_gpm_feature_ver);
- adev->gfx.rlc.save_restore_list_gpm_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_gpm_size_bytes);
- adev->gfx.rlc.save_restore_list_gpm = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_gpm_offset_bytes);
- adev->gfx.rlc_srls_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_srm_ucode_ver);
- adev->gfx.rlc_srls_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_srm_feature_ver);
- adev->gfx.rlc.save_restore_list_srm_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_srm_size_bytes);
- adev->gfx.rlc.save_restore_list_srm = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_srm_offset_bytes);
- adev->gfx.rlc.reg_list_format_direct_reg_list_length =
- le32_to_cpu(rlc_hdr->reg_list_format_direct_reg_list_length);
-}
-
-static void gfx_v11_0_init_rlc_iram_dram_microcode(struct amdgpu_device *adev)
-{
- const struct rlc_firmware_header_v2_2 *rlc_hdr;
-
- rlc_hdr = (const struct rlc_firmware_header_v2_2 *)adev->gfx.rlc_fw->data;
- adev->gfx.rlc.rlc_iram_ucode_size_bytes = le32_to_cpu(rlc_hdr->rlc_iram_ucode_size_bytes);
- adev->gfx.rlc.rlc_iram_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->rlc_iram_ucode_offset_bytes);
- adev->gfx.rlc.rlc_dram_ucode_size_bytes = le32_to_cpu(rlc_hdr->rlc_dram_ucode_size_bytes);
- adev->gfx.rlc.rlc_dram_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->rlc_dram_ucode_offset_bytes);
-}
-
-static void gfx_v11_0_init_rlcp_rlcv_microcode(struct amdgpu_device *adev)
-{
- const struct rlc_firmware_header_v2_3 *rlc_hdr;
-
- rlc_hdr = (const struct rlc_firmware_header_v2_3 *)adev->gfx.rlc_fw->data;
- adev->gfx.rlcp_ucode_version = le32_to_cpu(rlc_hdr->rlcp_ucode_version);
- adev->gfx.rlcp_ucode_feature_version = le32_to_cpu(rlc_hdr->rlcp_ucode_feature_version);
- adev->gfx.rlc.rlcp_ucode_size_bytes = le32_to_cpu(rlc_hdr->rlcp_ucode_size_bytes);
- adev->gfx.rlc.rlcp_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->rlcp_ucode_offset_bytes);
-
- adev->gfx.rlcv_ucode_version = le32_to_cpu(rlc_hdr->rlcv_ucode_version);
- adev->gfx.rlcv_ucode_feature_version = le32_to_cpu(rlc_hdr->rlcv_ucode_feature_version);
- adev->gfx.rlc.rlcv_ucode_size_bytes = le32_to_cpu(rlc_hdr->rlcv_ucode_size_bytes);
- adev->gfx.rlc.rlcv_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->rlcv_ucode_offset_bytes);
-}
-
static int gfx_v11_0_init_microcode(struct amdgpu_device *adev)
{
char fw_name[40];
char ucode_prefix[30];
int err;
- struct amdgpu_firmware_info *info = NULL;
- const struct common_firmware_header *header = NULL;
- const struct gfx_firmware_header_v1_0 *cp_hdr;
- const struct gfx_firmware_header_v2_0 *cp_hdr_v2_0;
const struct rlc_firmware_header_v2_0 *rlc_hdr;
- unsigned int *tmp = NULL;
- unsigned int i = 0;
uint16_t version_major;
uint16_t version_minor;
@@ -518,14 +464,11 @@ static int gfx_v11_0_init_microcode(struct amdgpu_device *adev)
adev->gfx.pfp_fw->data, 2, 0);
if (adev->gfx.rs64_enable) {
dev_info(adev->dev, "CP RS64 enable\n");
- cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *)adev->gfx.pfp_fw->data;
- adev->gfx.pfp_fw_version = le32_to_cpu(cp_hdr_v2_0->header.ucode_version);
- adev->gfx.pfp_feature_version = le32_to_cpu(cp_hdr_v2_0->ucode_feature_version);
-
+ amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_PFP);
+ amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_PFP_P0_STACK);
+ amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_PFP_P1_STACK);
} else {
- cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
- adev->gfx.pfp_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
- adev->gfx.pfp_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
+ amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_PFP);
}
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", ucode_prefix);
@@ -536,14 +479,11 @@ static int gfx_v11_0_init_microcode(struct amdgpu_device *adev)
if (err)
goto out;
if (adev->gfx.rs64_enable) {
- cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *)adev->gfx.me_fw->data;
- adev->gfx.me_fw_version = le32_to_cpu(cp_hdr_v2_0->header.ucode_version);
- adev->gfx.me_feature_version = le32_to_cpu(cp_hdr_v2_0->ucode_feature_version);
-
+ amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_ME);
+ amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_ME_P0_STACK);
+ amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_ME_P1_STACK);
} else {
- cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
- adev->gfx.me_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
- adev->gfx.me_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
+ amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_ME);
}
if (!amdgpu_sriov_vf(adev)) {
@@ -552,58 +492,14 @@ static int gfx_v11_0_init_microcode(struct amdgpu_device *adev)
if (err)
goto out;
err = amdgpu_ucode_validate(adev->gfx.rlc_fw);
+ if (err)
+ goto out;
rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
version_major = le16_to_cpu(rlc_hdr->header.header_version_major);
version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor);
-
- adev->gfx.rlc_fw_version = le32_to_cpu(rlc_hdr->header.ucode_version);
- adev->gfx.rlc_feature_version = le32_to_cpu(rlc_hdr->ucode_feature_version);
- adev->gfx.rlc.save_and_restore_offset =
- le32_to_cpu(rlc_hdr->save_and_restore_offset);
- adev->gfx.rlc.clear_state_descriptor_offset =
- le32_to_cpu(rlc_hdr->clear_state_descriptor_offset);
- adev->gfx.rlc.avail_scratch_ram_locations =
- le32_to_cpu(rlc_hdr->avail_scratch_ram_locations);
- adev->gfx.rlc.reg_restore_list_size =
- le32_to_cpu(rlc_hdr->reg_restore_list_size);
- adev->gfx.rlc.reg_list_format_start =
- le32_to_cpu(rlc_hdr->reg_list_format_start);
- adev->gfx.rlc.reg_list_format_separate_start =
- le32_to_cpu(rlc_hdr->reg_list_format_separate_start);
- adev->gfx.rlc.starting_offsets_start =
- le32_to_cpu(rlc_hdr->starting_offsets_start);
- adev->gfx.rlc.reg_list_format_size_bytes =
- le32_to_cpu(rlc_hdr->reg_list_format_size_bytes);
- adev->gfx.rlc.reg_list_size_bytes =
- le32_to_cpu(rlc_hdr->reg_list_size_bytes);
- adev->gfx.rlc.register_list_format =
- kmalloc(adev->gfx.rlc.reg_list_format_size_bytes +
- adev->gfx.rlc.reg_list_size_bytes, GFP_KERNEL);
- if (!adev->gfx.rlc.register_list_format) {
- err = -ENOMEM;
+ err = amdgpu_gfx_rlc_init_microcode(adev, version_major, version_minor);
+ if (err)
goto out;
- }
-
- tmp = (unsigned int *)((uintptr_t)rlc_hdr +
- le32_to_cpu(rlc_hdr->reg_list_format_array_offset_bytes));
- for (i = 0 ; i < (rlc_hdr->reg_list_format_size_bytes >> 2); i++)
- adev->gfx.rlc.register_list_format[i] = le32_to_cpu(tmp[i]);
-
- adev->gfx.rlc.register_restore = adev->gfx.rlc.register_list_format + i;
-
- tmp = (unsigned int *)((uintptr_t)rlc_hdr +
- le32_to_cpu(rlc_hdr->reg_list_array_offset_bytes));
- for (i = 0 ; i < (rlc_hdr->reg_list_size_bytes >> 2); i++)
- adev->gfx.rlc.register_restore[i] = le32_to_cpu(tmp[i]);
-
- if (version_major == 2) {
- if (version_minor >= 1)
- gfx_v11_0_init_rlc_ext_microcode(adev);
- if (version_minor >= 2)
- gfx_v11_0_init_rlc_iram_dram_microcode(adev);
- if (version_minor == 3)
- gfx_v11_0_init_rlcp_rlcv_microcode(adev);
- }
}
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", ucode_prefix);
@@ -614,190 +510,23 @@ static int gfx_v11_0_init_microcode(struct amdgpu_device *adev)
if (err)
goto out;
if (adev->gfx.rs64_enable) {
- cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *)adev->gfx.mec_fw->data;
- adev->gfx.mec_fw_version = le32_to_cpu(cp_hdr_v2_0->header.ucode_version);
- adev->gfx.mec_feature_version = le32_to_cpu(cp_hdr_v2_0->ucode_feature_version);
-
+ amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_MEC);
+ amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_MEC_P0_STACK);
+ amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_MEC_P1_STACK);
+ amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_MEC_P2_STACK);
+ amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_MEC_P3_STACK);
} else {
- cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
- adev->gfx.mec_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
- adev->gfx.mec_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
+ amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC1);
+ amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC1_JT);
}
/* only one MEC for gfx 11.0.0. */
adev->gfx.mec2_fw = NULL;
- if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
- if (adev->gfx.rs64_enable) {
- cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *)adev->gfx.pfp_fw->data;
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_RS64_PFP];
- info->ucode_id = AMDGPU_UCODE_ID_CP_RS64_PFP;
- info->fw = adev->gfx.pfp_fw;
- header = (const struct common_firmware_header *)info->fw->data;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(cp_hdr_v2_0->ucode_size_bytes), PAGE_SIZE);
-
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_RS64_PFP_P0_STACK];
- info->ucode_id = AMDGPU_UCODE_ID_CP_RS64_PFP_P0_STACK;
- info->fw = adev->gfx.pfp_fw;
- header = (const struct common_firmware_header *)info->fw->data;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(cp_hdr_v2_0->data_size_bytes), PAGE_SIZE);
-
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_RS64_PFP_P1_STACK];
- info->ucode_id = AMDGPU_UCODE_ID_CP_RS64_PFP_P1_STACK;
- info->fw = adev->gfx.pfp_fw;
- header = (const struct common_firmware_header *)info->fw->data;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(cp_hdr_v2_0->data_size_bytes), PAGE_SIZE);
-
- cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *)adev->gfx.me_fw->data;
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_RS64_ME];
- info->ucode_id = AMDGPU_UCODE_ID_CP_RS64_ME;
- info->fw = adev->gfx.me_fw;
- header = (const struct common_firmware_header *)info->fw->data;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(cp_hdr_v2_0->ucode_size_bytes), PAGE_SIZE);
-
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_RS64_ME_P0_STACK];
- info->ucode_id = AMDGPU_UCODE_ID_CP_RS64_ME_P0_STACK;
- info->fw = adev->gfx.me_fw;
- header = (const struct common_firmware_header *)info->fw->data;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(cp_hdr_v2_0->data_size_bytes), PAGE_SIZE);
-
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_RS64_ME_P1_STACK];
- info->ucode_id = AMDGPU_UCODE_ID_CP_RS64_ME_P1_STACK;
- info->fw = adev->gfx.me_fw;
- header = (const struct common_firmware_header *)info->fw->data;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(cp_hdr_v2_0->data_size_bytes), PAGE_SIZE);
-
- cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *)adev->gfx.mec_fw->data;
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_RS64_MEC];
- info->ucode_id = AMDGPU_UCODE_ID_CP_RS64_MEC;
- info->fw = adev->gfx.mec_fw;
- header = (const struct common_firmware_header *)info->fw->data;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(cp_hdr_v2_0->ucode_size_bytes), PAGE_SIZE);
-
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_RS64_MEC_P0_STACK];
- info->ucode_id = AMDGPU_UCODE_ID_CP_RS64_MEC_P0_STACK;
- info->fw = adev->gfx.mec_fw;
- header = (const struct common_firmware_header *)info->fw->data;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(cp_hdr_v2_0->data_size_bytes), PAGE_SIZE);
-
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_RS64_MEC_P1_STACK];
- info->ucode_id = AMDGPU_UCODE_ID_CP_RS64_MEC_P1_STACK;
- info->fw = adev->gfx.mec_fw;
- header = (const struct common_firmware_header *)info->fw->data;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(cp_hdr_v2_0->data_size_bytes), PAGE_SIZE);
-
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_RS64_MEC_P2_STACK];
- info->ucode_id = AMDGPU_UCODE_ID_CP_RS64_MEC_P2_STACK;
- info->fw = adev->gfx.mec_fw;
- header = (const struct common_firmware_header *)info->fw->data;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(cp_hdr_v2_0->data_size_bytes), PAGE_SIZE);
-
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_RS64_MEC_P3_STACK];
- info->ucode_id = AMDGPU_UCODE_ID_CP_RS64_MEC_P3_STACK;
- info->fw = adev->gfx.mec_fw;
- header = (const struct common_firmware_header *)info->fw->data;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(cp_hdr_v2_0->data_size_bytes), PAGE_SIZE);
- } else {
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_PFP];
- info->ucode_id = AMDGPU_UCODE_ID_CP_PFP;
- info->fw = adev->gfx.pfp_fw;
- header = (const struct common_firmware_header *)info->fw->data;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
-
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_ME];
- info->ucode_id = AMDGPU_UCODE_ID_CP_ME;
- info->fw = adev->gfx.me_fw;
- header = (const struct common_firmware_header *)info->fw->data;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
-
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1];
- info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1;
- info->fw = adev->gfx.mec_fw;
- header = (const struct common_firmware_header *)info->fw->data;
- cp_hdr = (const struct gfx_firmware_header_v1_0 *)info->fw->data;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(header->ucode_size_bytes) -
- le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE);
-
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1_JT];
- info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1_JT;
- info->fw = adev->gfx.mec_fw;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE);
- }
-
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_G];
- info->ucode_id = AMDGPU_UCODE_ID_RLC_G;
- info->fw = adev->gfx.rlc_fw;
- if (info->fw) {
- header = (const struct common_firmware_header *)info->fw->data;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
- }
- if (adev->gfx.rlc.save_restore_list_gpm_size_bytes &&
- adev->gfx.rlc.save_restore_list_srm_size_bytes) {
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM];
- info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM;
- info->fw = adev->gfx.rlc_fw;
- adev->firmware.fw_size +=
- ALIGN(adev->gfx.rlc.save_restore_list_gpm_size_bytes, PAGE_SIZE);
-
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM];
- info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM;
- info->fw = adev->gfx.rlc_fw;
- adev->firmware.fw_size +=
- ALIGN(adev->gfx.rlc.save_restore_list_srm_size_bytes, PAGE_SIZE);
- }
-
- if (adev->gfx.rlc.rlc_iram_ucode_size_bytes &&
- adev->gfx.rlc.rlc_dram_ucode_size_bytes) {
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_IRAM];
- info->ucode_id = AMDGPU_UCODE_ID_RLC_IRAM;
- info->fw = adev->gfx.rlc_fw;
- adev->firmware.fw_size +=
- ALIGN(adev->gfx.rlc.rlc_iram_ucode_size_bytes, PAGE_SIZE);
-
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_DRAM];
- info->ucode_id = AMDGPU_UCODE_ID_RLC_DRAM;
- info->fw = adev->gfx.rlc_fw;
- adev->firmware.fw_size +=
- ALIGN(adev->gfx.rlc.rlc_dram_ucode_size_bytes, PAGE_SIZE);
- }
-
- if (adev->gfx.rlc.rlcp_ucode_size_bytes) {
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_P];
- info->ucode_id = AMDGPU_UCODE_ID_RLC_P;
- info->fw = adev->gfx.rlc_fw;
- adev->firmware.fw_size +=
- ALIGN(adev->gfx.rlc.rlcp_ucode_size_bytes, PAGE_SIZE);
- }
-
- if (adev->gfx.rlc.rlcv_ucode_size_bytes) {
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_V];
- info->ucode_id = AMDGPU_UCODE_ID_RLC_V;
- info->fw = adev->gfx.rlc_fw;
- adev->firmware.fw_size +=
- ALIGN(adev->gfx.rlc.rlcv_ucode_size_bytes, PAGE_SIZE);
- }
- }
-
out:
if (err) {
dev_err(adev->dev,
- "gfx11: Failed to load firmware \"%s\"\n",
+ "gfx11: Failed to init firmware \"%s\"\n",
fw_name);
release_firmware(adev->gfx.pfp_fw);
adev->gfx.pfp_fw = NULL;
@@ -1842,7 +1571,7 @@ static void gfx_v11_0_init_compute_vmid(struct amdgpu_device *adev)
WREG32_SOC15(GC, 0, regSH_MEM_BASES, sh_mem_bases);
/* Enable trap for each kfd vmid. */
- data = RREG32(SOC15_REG_OFFSET(GC, 0, regSPI_GDBG_PER_VMID_CNTL));
+ data = RREG32_SOC15(GC, 0, regSPI_GDBG_PER_VMID_CNTL);
data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, TRAP_EN, 1);
}
soc21_grbm_select(adev, 0, 0, 0, 0);
@@ -5245,6 +4974,8 @@ static void gfx_v11_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid)
{
u32 reg, data;
+ amdgpu_gfx_off_ctrl(adev, false);
+
reg = SOC15_REG_OFFSET(GC, 0, regRLC_SPM_MC_CNTL);
if (amdgpu_sriov_is_pp_one_vf(adev))
data = RREG32_NO_KIQ(reg);
@@ -5258,6 +4989,8 @@ static void gfx_v11_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid)
WREG32_SOC15_NO_KIQ(GC, 0, regRLC_SPM_MC_CNTL, data);
else
WREG32_SOC15(GC, 0, regRLC_SPM_MC_CNTL, data);
+
+ amdgpu_gfx_off_ctrl(adev, true);
}
static const struct amdgpu_rlc_funcs gfx_v11_0_rlc_funcs = {
@@ -5343,6 +5076,7 @@ static int gfx_v11_0_set_clockgating_state(void *handle,
case IP_VERSION(11, 0, 0):
case IP_VERSION(11, 0, 1):
case IP_VERSION(11, 0, 2):
+ case IP_VERSION(11, 0, 3):
gfx_v11_0_update_gfx_clock_gating(adev,
state == AMD_CG_STATE_GATE);
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 1d6d3a852a0b..0320be4a5fc6 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -1091,27 +1091,6 @@ static void gfx_v9_0_free_microcode(struct amdgpu_device *adev)
kfree(adev->gfx.rlc.register_list_format);
}
-static void gfx_v9_0_init_rlc_ext_microcode(struct amdgpu_device *adev)
-{
- const struct rlc_firmware_header_v2_1 *rlc_hdr;
-
- rlc_hdr = (const struct rlc_firmware_header_v2_1 *)adev->gfx.rlc_fw->data;
- adev->gfx.rlc_srlc_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_cntl_ucode_ver);
- adev->gfx.rlc_srlc_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_cntl_feature_ver);
- adev->gfx.rlc.save_restore_list_cntl_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_cntl_size_bytes);
- adev->gfx.rlc.save_restore_list_cntl = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_cntl_offset_bytes);
- adev->gfx.rlc_srlg_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_gpm_ucode_ver);
- adev->gfx.rlc_srlg_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_gpm_feature_ver);
- adev->gfx.rlc.save_restore_list_gpm_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_gpm_size_bytes);
- adev->gfx.rlc.save_restore_list_gpm = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_gpm_offset_bytes);
- adev->gfx.rlc_srls_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_srm_ucode_ver);
- adev->gfx.rlc_srls_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_srm_feature_ver);
- adev->gfx.rlc.save_restore_list_srm_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_srm_size_bytes);
- adev->gfx.rlc.save_restore_list_srm = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_srm_offset_bytes);
- adev->gfx.rlc.reg_list_format_direct_reg_list_length =
- le32_to_cpu(rlc_hdr->reg_list_format_direct_reg_list_length);
-}
-
static void gfx_v9_0_check_fw_write_wait(struct amdgpu_device *adev)
{
adev->gfx.me_fw_write_wait = false;
@@ -1273,9 +1252,6 @@ static int gfx_v9_0_init_cp_gfx_microcode(struct amdgpu_device *adev,
{
char fw_name[30];
int err;
- struct amdgpu_firmware_info *info = NULL;
- const struct common_firmware_header *header = NULL;
- const struct gfx_firmware_header_v1_0 *cp_hdr;
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", chip_name);
err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev);
@@ -1284,9 +1260,7 @@ static int gfx_v9_0_init_cp_gfx_microcode(struct amdgpu_device *adev,
err = amdgpu_ucode_validate(adev->gfx.pfp_fw);
if (err)
goto out;
- cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
- adev->gfx.pfp_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
- adev->gfx.pfp_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
+ amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_PFP);
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name);
err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev);
@@ -1295,9 +1269,7 @@ static int gfx_v9_0_init_cp_gfx_microcode(struct amdgpu_device *adev,
err = amdgpu_ucode_validate(adev->gfx.me_fw);
if (err)
goto out;
- cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
- adev->gfx.me_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
- adev->gfx.me_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
+ amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_ME);
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce.bin", chip_name);
err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev);
@@ -1306,37 +1278,12 @@ static int gfx_v9_0_init_cp_gfx_microcode(struct amdgpu_device *adev,
err = amdgpu_ucode_validate(adev->gfx.ce_fw);
if (err)
goto out;
- cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
- adev->gfx.ce_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
- adev->gfx.ce_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
-
- if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_PFP];
- info->ucode_id = AMDGPU_UCODE_ID_CP_PFP;
- info->fw = adev->gfx.pfp_fw;
- header = (const struct common_firmware_header *)info->fw->data;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
-
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_ME];
- info->ucode_id = AMDGPU_UCODE_ID_CP_ME;
- info->fw = adev->gfx.me_fw;
- header = (const struct common_firmware_header *)info->fw->data;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
-
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_CE];
- info->ucode_id = AMDGPU_UCODE_ID_CP_CE;
- info->fw = adev->gfx.ce_fw;
- header = (const struct common_firmware_header *)info->fw->data;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
- }
+ amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_CE);
out:
if (err) {
dev_err(adev->dev,
- "gfx9: Failed to load firmware \"%s\"\n",
+ "gfx9: Failed to init firmware \"%s\"\n",
fw_name);
release_firmware(adev->gfx.pfp_fw);
adev->gfx.pfp_fw = NULL;
@@ -1353,11 +1300,7 @@ static int gfx_v9_0_init_rlc_microcode(struct amdgpu_device *adev,
{
char fw_name[30];
int err;
- struct amdgpu_firmware_info *info = NULL;
- const struct common_firmware_header *header = NULL;
const struct rlc_firmware_header_v2_0 *rlc_hdr;
- unsigned int *tmp = NULL;
- unsigned int i = 0;
uint16_t version_major;
uint16_t version_minor;
uint32_t smu_version;
@@ -1386,92 +1329,17 @@ static int gfx_v9_0_init_rlc_microcode(struct amdgpu_device *adev,
if (err)
goto out;
err = amdgpu_ucode_validate(adev->gfx.rlc_fw);
+ if (err)
+ goto out;
rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
version_major = le16_to_cpu(rlc_hdr->header.header_version_major);
version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor);
- if (version_major == 2 && version_minor == 1)
- adev->gfx.rlc.is_rlc_v2_1 = true;
-
- adev->gfx.rlc_fw_version = le32_to_cpu(rlc_hdr->header.ucode_version);
- adev->gfx.rlc_feature_version = le32_to_cpu(rlc_hdr->ucode_feature_version);
- adev->gfx.rlc.save_and_restore_offset =
- le32_to_cpu(rlc_hdr->save_and_restore_offset);
- adev->gfx.rlc.clear_state_descriptor_offset =
- le32_to_cpu(rlc_hdr->clear_state_descriptor_offset);
- adev->gfx.rlc.avail_scratch_ram_locations =
- le32_to_cpu(rlc_hdr->avail_scratch_ram_locations);
- adev->gfx.rlc.reg_restore_list_size =
- le32_to_cpu(rlc_hdr->reg_restore_list_size);
- adev->gfx.rlc.reg_list_format_start =
- le32_to_cpu(rlc_hdr->reg_list_format_start);
- adev->gfx.rlc.reg_list_format_separate_start =
- le32_to_cpu(rlc_hdr->reg_list_format_separate_start);
- adev->gfx.rlc.starting_offsets_start =
- le32_to_cpu(rlc_hdr->starting_offsets_start);
- adev->gfx.rlc.reg_list_format_size_bytes =
- le32_to_cpu(rlc_hdr->reg_list_format_size_bytes);
- adev->gfx.rlc.reg_list_size_bytes =
- le32_to_cpu(rlc_hdr->reg_list_size_bytes);
- adev->gfx.rlc.register_list_format =
- kmalloc(adev->gfx.rlc.reg_list_format_size_bytes +
- adev->gfx.rlc.reg_list_size_bytes, GFP_KERNEL);
- if (!adev->gfx.rlc.register_list_format) {
- err = -ENOMEM;
- goto out;
- }
-
- tmp = (unsigned int *)((uintptr_t)rlc_hdr +
- le32_to_cpu(rlc_hdr->reg_list_format_array_offset_bytes));
- for (i = 0 ; i < (adev->gfx.rlc.reg_list_format_size_bytes >> 2); i++)
- adev->gfx.rlc.register_list_format[i] = le32_to_cpu(tmp[i]);
-
- adev->gfx.rlc.register_restore = adev->gfx.rlc.register_list_format + i;
-
- tmp = (unsigned int *)((uintptr_t)rlc_hdr +
- le32_to_cpu(rlc_hdr->reg_list_array_offset_bytes));
- for (i = 0 ; i < (adev->gfx.rlc.reg_list_size_bytes >> 2); i++)
- adev->gfx.rlc.register_restore[i] = le32_to_cpu(tmp[i]);
-
- if (adev->gfx.rlc.is_rlc_v2_1)
- gfx_v9_0_init_rlc_ext_microcode(adev);
-
- if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_G];
- info->ucode_id = AMDGPU_UCODE_ID_RLC_G;
- info->fw = adev->gfx.rlc_fw;
- header = (const struct common_firmware_header *)info->fw->data;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
-
- if (adev->gfx.rlc.is_rlc_v2_1 &&
- adev->gfx.rlc.save_restore_list_cntl_size_bytes &&
- adev->gfx.rlc.save_restore_list_gpm_size_bytes &&
- adev->gfx.rlc.save_restore_list_srm_size_bytes) {
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL];
- info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL;
- info->fw = adev->gfx.rlc_fw;
- adev->firmware.fw_size +=
- ALIGN(adev->gfx.rlc.save_restore_list_cntl_size_bytes, PAGE_SIZE);
-
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM];
- info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM;
- info->fw = adev->gfx.rlc_fw;
- adev->firmware.fw_size +=
- ALIGN(adev->gfx.rlc.save_restore_list_gpm_size_bytes, PAGE_SIZE);
-
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM];
- info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM;
- info->fw = adev->gfx.rlc_fw;
- adev->firmware.fw_size +=
- ALIGN(adev->gfx.rlc.save_restore_list_srm_size_bytes, PAGE_SIZE);
- }
- }
-
+ err = amdgpu_gfx_rlc_init_microcode(adev, version_major, version_minor);
out:
if (err) {
dev_err(adev->dev,
- "gfx9: Failed to load firmware \"%s\"\n",
+ "gfx9: Failed to init firmware \"%s\"\n",
fw_name);
release_firmware(adev->gfx.rlc_fw);
adev->gfx.rlc_fw = NULL;
@@ -1494,9 +1362,6 @@ static int gfx_v9_0_init_cp_compute_microcode(struct amdgpu_device *adev,
{
char fw_name[30];
int err;
- struct amdgpu_firmware_info *info = NULL;
- const struct common_firmware_header *header = NULL;
- const struct gfx_firmware_header_v1_0 *cp_hdr;
if (amdgpu_sriov_vf(adev) && (adev->asic_type == CHIP_ALDEBARAN))
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sjt_mec.bin", chip_name);
@@ -1509,10 +1374,8 @@ static int gfx_v9_0_init_cp_compute_microcode(struct amdgpu_device *adev,
err = amdgpu_ucode_validate(adev->gfx.mec_fw);
if (err)
goto out;
- cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
- adev->gfx.mec_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
- adev->gfx.mec_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
-
+ amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC1);
+ amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC1_JT);
if (gfx_v9_0_load_mec2_fw_bin_support(adev)) {
if (amdgpu_sriov_vf(adev) && (adev->asic_type == CHIP_ALDEBARAN))
@@ -1525,12 +1388,8 @@ static int gfx_v9_0_init_cp_compute_microcode(struct amdgpu_device *adev,
err = amdgpu_ucode_validate(adev->gfx.mec2_fw);
if (err)
goto out;
- cp_hdr = (const struct gfx_firmware_header_v1_0 *)
- adev->gfx.mec2_fw->data;
- adev->gfx.mec2_fw_version =
- le32_to_cpu(cp_hdr->header.ucode_version);
- adev->gfx.mec2_feature_version =
- le32_to_cpu(cp_hdr->ucode_feature_version);
+ amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC2);
+ amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC2_JT);
} else {
err = 0;
adev->gfx.mec2_fw = NULL;
@@ -1540,49 +1399,12 @@ static int gfx_v9_0_init_cp_compute_microcode(struct amdgpu_device *adev,
adev->gfx.mec2_feature_version = adev->gfx.mec_feature_version;
}
- if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1];
- info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1;
- info->fw = adev->gfx.mec_fw;
- header = (const struct common_firmware_header *)info->fw->data;
- cp_hdr = (const struct gfx_firmware_header_v1_0 *)info->fw->data;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(header->ucode_size_bytes) - le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE);
-
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1_JT];
- info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1_JT;
- info->fw = adev->gfx.mec_fw;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE);
-
- if (adev->gfx.mec2_fw) {
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC2];
- info->ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
- info->fw = adev->gfx.mec2_fw;
- header = (const struct common_firmware_header *)info->fw->data;
- cp_hdr = (const struct gfx_firmware_header_v1_0 *)info->fw->data;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(header->ucode_size_bytes) - le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE);
-
- /* TODO: Determine if MEC2 JT FW loading can be removed
- for all GFX V9 asic and above */
- if (gfx_v9_0_load_mec2_fw_bin_support(adev)) {
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC2_JT];
- info->ucode_id = AMDGPU_UCODE_ID_CP_MEC2_JT;
- info->fw = adev->gfx.mec2_fw;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(cp_hdr->jt_size) * 4,
- PAGE_SIZE);
- }
- }
- }
-
out:
gfx_v9_0_check_if_need_gfxoff(adev);
gfx_v9_0_check_fw_write_wait(adev);
if (err) {
dev_err(adev->dev,
- "gfx9: Failed to load firmware \"%s\"\n",
+ "gfx9: Failed to init firmware \"%s\"\n",
fw_name);
release_firmware(adev->gfx.mec_fw);
adev->gfx.mec_fw = NULL;
@@ -5607,7 +5429,7 @@ static void gfx_v9_0_ring_emit_patch_cond_exec(struct amdgpu_ring *ring, unsigne
BUG_ON(offset > ring->buf_mask);
BUG_ON(ring->ring[offset] != 0x55aa55aa);
- cur = (ring->wptr & ring->buf_mask) - 1;
+ cur = (ring->wptr - 1) & ring->buf_mask;
if (likely(cur > offset))
ring->ring[offset] = cur - offset;
else
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
index 846ccb6cf07d..66dfb574cc7d 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
@@ -186,6 +186,10 @@ static void gmc_v11_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid,
/* Use register 17 for GART */
const unsigned eng = 17;
unsigned int i;
+ unsigned char hub_ip = 0;
+
+ hub_ip = (vmhub == AMDGPU_GFXHUB_0) ?
+ GC_HWIP : MMHUB_HWIP;
spin_lock(&adev->gmc.invalidate_lock);
/*
@@ -199,8 +203,8 @@ static void gmc_v11_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid,
if (use_semaphore) {
for (i = 0; i < adev->usec_timeout; i++) {
/* a read return value of 1 means semaphore acuqire */
- tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_sem +
- hub->eng_distance * eng);
+ tmp = RREG32_RLC_NO_KIQ(hub->vm_inv_eng0_sem +
+ hub->eng_distance * eng, hub_ip);
if (tmp & 0x1)
break;
udelay(1);
@@ -210,12 +214,12 @@ static void gmc_v11_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid,
DRM_ERROR("Timeout waiting for sem acquire in VM flush!\n");
}
- WREG32_NO_KIQ(hub->vm_inv_eng0_req + hub->eng_distance * eng, inv_req);
+ WREG32_RLC_NO_KIQ(hub->vm_inv_eng0_req + hub->eng_distance * eng, inv_req, hub_ip);
/* Wait for ACK with a delay.*/
for (i = 0; i < adev->usec_timeout; i++) {
- tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_ack +
- hub->eng_distance * eng);
+ tmp = RREG32_RLC_NO_KIQ(hub->vm_inv_eng0_ack +
+ hub->eng_distance * eng, hub_ip);
tmp &= 1 << vmid;
if (tmp)
break;
@@ -229,8 +233,8 @@ static void gmc_v11_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid,
* add semaphore release after invalidation,
* write with 0 means semaphore release
*/
- WREG32_NO_KIQ(hub->vm_inv_eng0_sem +
- hub->eng_distance * eng, 0);
+ WREG32_RLC_NO_KIQ(hub->vm_inv_eng0_sem +
+ hub->eng_distance * eng, 0, hub_ip);
/* Issue additional private vm invalidation to MMHUB */
if ((vmhub != AMDGPU_GFXHUB_0) &&
diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
index b64cd46a159a..fef7d020bc5f 100644
--- a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
@@ -187,6 +187,19 @@ static int mes_v11_0_add_hw_queue(struct amdgpu_mes *mes,
mes_add_queue_pkt.is_kfd_process = input->is_kfd_process;
mes_add_queue_pkt.trap_en = 1;
+ /* For KFD, gds_size is re-used for queue size (needed in MES for AQL queues) */
+ mes_add_queue_pkt.is_aql_queue = input->is_aql_queue;
+ mes_add_queue_pkt.gds_size = input->queue_size;
+
+ if (!(((adev->mes.sched_version & AMDGPU_MES_VERSION_MASK) >= 4) &&
+ (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(11, 0, 0)) &&
+ (adev->ip_versions[GC_HWIP][0] <= IP_VERSION(11, 0, 3))))
+ mes_add_queue_pkt.trap_en = 1;
+
+ /* For KFD, gds_size is re-used for queue size (needed in MES for AQL queues) */
+ mes_add_queue_pkt.is_aql_queue = input->is_aql_queue;
+ mes_add_queue_pkt.gds_size = input->queue_size;
+
return mes_v11_0_submit_pkt_and_poll_completion(mes,
&mes_add_queue_pkt, sizeof(mes_add_queue_pkt),
offsetof(union MESAPI__ADD_QUEUE, api_status));
@@ -1143,6 +1156,42 @@ static int mes_v11_0_sw_fini(void *handle)
return 0;
}
+static void mes_v11_0_kiq_dequeue_sched(struct amdgpu_device *adev)
+{
+ uint32_t data;
+ int i;
+
+ mutex_lock(&adev->srbm_mutex);
+ soc21_grbm_select(adev, 3, AMDGPU_MES_SCHED_PIPE, 0, 0);
+
+ /* disable the queue if it's active */
+ if (RREG32_SOC15(GC, 0, regCP_HQD_ACTIVE) & 1) {
+ WREG32_SOC15(GC, 0, regCP_HQD_DEQUEUE_REQUEST, 1);
+ for (i = 0; i < adev->usec_timeout; i++) {
+ if (!(RREG32_SOC15(GC, 0, regCP_HQD_ACTIVE) & 1))
+ break;
+ udelay(1);
+ }
+ }
+ data = RREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL);
+ data = REG_SET_FIELD(data, CP_HQD_PQ_DOORBELL_CONTROL,
+ DOORBELL_EN, 0);
+ data = REG_SET_FIELD(data, CP_HQD_PQ_DOORBELL_CONTROL,
+ DOORBELL_HIT, 1);
+ WREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL, data);
+
+ WREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL, 0);
+
+ WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_LO, 0);
+ WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_HI, 0);
+ WREG32_SOC15(GC, 0, regCP_HQD_PQ_RPTR, 0);
+
+ soc21_grbm_select(adev, 0, 0, 0, 0);
+ mutex_unlock(&adev->srbm_mutex);
+
+ adev->mes.ring.sched.ready = false;
+}
+
static void mes_v11_0_kiq_setting(struct amdgpu_ring *ring)
{
uint32_t tmp;
@@ -1194,6 +1243,9 @@ failure:
static int mes_v11_0_kiq_hw_fini(struct amdgpu_device *adev)
{
+ if (adev->mes.ring.sched.ready)
+ mes_v11_0_kiq_dequeue_sched(adev);
+
mes_v11_0_enable(adev, false);
return 0;
}
@@ -1249,9 +1301,6 @@ failure:
static int mes_v11_0_hw_fini(void *handle)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- adev->mes.ring.sched.ready = false;
return 0;
}
@@ -1283,7 +1332,8 @@ static int mes_v11_0_late_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- if (!amdgpu_in_reset(adev))
+ if (!amdgpu_in_reset(adev) &&
+ (adev->ip_versions[GC_HWIP][0] != IP_VERSION(11, 0, 3)))
amdgpu_mes_self_test(adev);
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
index a2f04b249132..12906ba74462 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
@@ -290,7 +290,6 @@ flr_done:
reset_context.method = AMD_RESET_METHOD_NONE;
reset_context.reset_req_dev = adev;
clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
- clear_bit(AMDGPU_SKIP_MODE2_RESET, &reset_context.flags);
amdgpu_device_gpu_recover(adev, NULL, &reset_context);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
index a977f0027928..e07757eea7ad 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
@@ -317,7 +317,6 @@ flr_done:
reset_context.method = AMD_RESET_METHOD_NONE;
reset_context.reset_req_dev = adev;
clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
- clear_bit(AMDGPU_SKIP_MODE2_RESET, &reset_context.flags);
amdgpu_device_gpu_recover(adev, NULL, &reset_context);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c
index fd14fa9b9cd7..288c414babdf 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c
@@ -529,7 +529,6 @@ static void xgpu_vi_mailbox_flr_work(struct work_struct *work)
reset_context.method = AMD_RESET_METHOD_NONE;
reset_context.reset_req_dev = adev;
clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
- clear_bit(AMDGPU_SKIP_MODE2_RESET, &reset_context.flags);
amdgpu_device_gpu_recover(adev, NULL, &reset_context);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
index 6bdffdc1c0b9..c52d246a1d96 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
@@ -342,14 +342,10 @@ static void sdma_v2_4_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 se
*/
static void sdma_v2_4_gfx_stop(struct amdgpu_device *adev)
{
- struct amdgpu_ring *sdma0 = &adev->sdma.instance[0].ring;
- struct amdgpu_ring *sdma1 = &adev->sdma.instance[1].ring;
u32 rb_cntl, ib_cntl;
int i;
- if ((adev->mman.buffer_funcs_ring == sdma0) ||
- (adev->mman.buffer_funcs_ring == sdma1))
- amdgpu_ttm_set_buffer_funcs_status(adev, false);
+ amdgpu_sdma_unset_buffer_funcs_helper(adev);
for (i = 0; i < adev->sdma.num_instances; i++) {
rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]);
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
index 2584fa3cb13e..486d9b5c1b9e 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
@@ -516,14 +516,10 @@ static void sdma_v3_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 se
*/
static void sdma_v3_0_gfx_stop(struct amdgpu_device *adev)
{
- struct amdgpu_ring *sdma0 = &adev->sdma.instance[0].ring;
- struct amdgpu_ring *sdma1 = &adev->sdma.instance[1].ring;
u32 rb_cntl, ib_cntl;
int i;
- if ((adev->mman.buffer_funcs_ring == sdma0) ||
- (adev->mman.buffer_funcs_ring == sdma1))
- amdgpu_ttm_set_buffer_funcs_status(adev, false);
+ amdgpu_sdma_unset_buffer_funcs_helper(adev);
for (i = 0; i < adev->sdma.num_instances; i++) {
rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]);
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index 7fe8bf3417db..1122bd4eae98 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -561,44 +561,6 @@ static void sdma_v4_0_setup_ulv(struct amdgpu_device *adev)
}
}
-static int sdma_v4_0_init_inst_ctx(struct amdgpu_sdma_instance *sdma_inst)
-{
- int err = 0;
- const struct sdma_firmware_header_v1_0 *hdr;
-
- err = amdgpu_ucode_validate(sdma_inst->fw);
- if (err)
- return err;
-
- hdr = (const struct sdma_firmware_header_v1_0 *)sdma_inst->fw->data;
- sdma_inst->fw_version = le32_to_cpu(hdr->header.ucode_version);
- sdma_inst->feature_version = le32_to_cpu(hdr->ucode_feature_version);
-
- if (sdma_inst->feature_version >= 20)
- sdma_inst->burst_nop = true;
-
- return 0;
-}
-
-static void sdma_v4_0_destroy_inst_ctx(struct amdgpu_device *adev)
-{
- int i;
-
- for (i = 0; i < adev->sdma.num_instances; i++) {
- release_firmware(adev->sdma.instance[i].fw);
- adev->sdma.instance[i].fw = NULL;
-
- /* arcturus shares the same FW memory across
- all SDMA isntances */
- if (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 2, 2) ||
- adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 4, 0))
- break;
- }
-
- memset((void *)adev->sdma.instance, 0,
- sizeof(struct amdgpu_sdma_instance) * AMDGPU_MAX_SDMA_INSTANCES);
-}
-
/**
* sdma_v4_0_init_microcode - load ucode images from disk
*
@@ -615,9 +577,7 @@ static int sdma_v4_0_init_microcode(struct amdgpu_device *adev)
{
const char *chip_name;
char fw_name[30];
- int err = 0, i;
- struct amdgpu_firmware_info *info = NULL;
- const struct common_firmware_header *header = NULL;
+ int ret, i;
DRM_DEBUG("\n");
@@ -656,58 +616,25 @@ static int sdma_v4_0_init_microcode(struct amdgpu_device *adev)
BUG();
}
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma.bin", chip_name);
-
- err = request_firmware(&adev->sdma.instance[0].fw, fw_name, adev->dev);
- if (err)
- goto out;
-
- err = sdma_v4_0_init_inst_ctx(&adev->sdma.instance[0]);
- if (err)
- goto out;
-
- for (i = 1; i < adev->sdma.num_instances; i++) {
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ if (i == 0)
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma.bin", chip_name);
+ else
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma%d.bin", chip_name, i);
if (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 2, 2) ||
adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 4, 0)) {
/* Acturus & Aldebaran will leverage the same FW memory
for every SDMA instance */
- memcpy((void *)&adev->sdma.instance[i],
- (void *)&adev->sdma.instance[0],
- sizeof(struct amdgpu_sdma_instance));
- }
- else {
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma%d.bin", chip_name, i);
-
- err = request_firmware(&adev->sdma.instance[i].fw, fw_name, adev->dev);
- if (err)
- goto out;
-
- err = sdma_v4_0_init_inst_ctx(&adev->sdma.instance[i]);
- if (err)
- goto out;
- }
- }
-
- DRM_DEBUG("psp_load == '%s'\n",
- adev->firmware.load_type == AMDGPU_FW_LOAD_PSP ? "true" : "false");
-
- if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
- for (i = 0; i < adev->sdma.num_instances; i++) {
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i];
- info->ucode_id = AMDGPU_UCODE_ID_SDMA0 + i;
- info->fw = adev->sdma.instance[i].fw;
- header = (const struct common_firmware_header *)info->fw->data;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
+ ret = amdgpu_sdma_init_microcode(adev, fw_name, 0, true);
+ break;
+ } else {
+ ret = amdgpu_sdma_init_microcode(adev, fw_name, i, false);
+ if (ret)
+ return ret;
}
}
-out:
- if (err) {
- DRM_ERROR("sdma_v4_0: Failed to load firmware \"%s\"\n", fw_name);
- sdma_v4_0_destroy_inst_ctx(adev);
- }
- return err;
+ return ret;
}
/**
@@ -988,18 +915,12 @@ static void sdma_v4_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 se
*/
static void sdma_v4_0_gfx_stop(struct amdgpu_device *adev)
{
- struct amdgpu_ring *sdma[AMDGPU_MAX_SDMA_INSTANCES];
u32 rb_cntl, ib_cntl;
- int i, unset = 0;
-
- for (i = 0; i < adev->sdma.num_instances; i++) {
- sdma[i] = &adev->sdma.instance[i].ring;
+ int i;
- if ((adev->mman.buffer_funcs_ring == sdma[i]) && unset != 1) {
- amdgpu_ttm_set_buffer_funcs_status(adev, false);
- unset = 1;
- }
+ amdgpu_sdma_unset_buffer_funcs_helper(adev);
+ for (i = 0; i < adev->sdma.num_instances; i++) {
rb_cntl = RREG32_SDMA(i, mmSDMA0_GFX_RB_CNTL);
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0);
WREG32_SDMA(i, mmSDMA0_GFX_RB_CNTL, rb_cntl);
@@ -1030,20 +951,12 @@ static void sdma_v4_0_rlc_stop(struct amdgpu_device *adev)
*/
static void sdma_v4_0_page_stop(struct amdgpu_device *adev)
{
- struct amdgpu_ring *sdma[AMDGPU_MAX_SDMA_INSTANCES];
u32 rb_cntl, ib_cntl;
int i;
- bool unset = false;
- for (i = 0; i < adev->sdma.num_instances; i++) {
- sdma[i] = &adev->sdma.instance[i].page;
-
- if ((adev->mman.buffer_funcs_ring == sdma[i]) &&
- (!unset)) {
- amdgpu_ttm_set_buffer_funcs_status(adev, false);
- unset = true;
- }
+ amdgpu_sdma_unset_buffer_funcs_helper(adev);
+ for (i = 0; i < adev->sdma.num_instances; i++) {
rb_cntl = RREG32_SDMA(i, mmSDMA0_PAGE_RB_CNTL);
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_PAGE_RB_CNTL,
RB_ENABLE, 0);
@@ -1504,11 +1417,6 @@ static int sdma_v4_0_start(struct amdgpu_device *adev)
WREG32_SDMA(i, mmSDMA0_CNTL, temp);
if (!amdgpu_sriov_vf(adev)) {
- ring = &adev->sdma.instance[i].ring;
- adev->nbio.funcs->sdma_doorbell_range(adev, i,
- ring->use_doorbell, ring->doorbell_index,
- adev->doorbell_index.sdma_doorbell_range);
-
/* unhalt engine */
temp = RREG32_SDMA(i, mmSDMA0_F32_CNTL);
temp = REG_SET_FIELD(temp, SDMA0_F32_CNTL, HALT, 0);
@@ -2000,7 +1908,11 @@ static int sdma_v4_0_sw_fini(void *handle)
amdgpu_ring_fini(&adev->sdma.instance[i].page);
}
- sdma_v4_0_destroy_inst_ctx(adev);
+ if (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 2, 0) ||
+ adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 4, 0))
+ amdgpu_sdma_destroy_inst_ctx(adev, true);
+ else
+ amdgpu_sdma_destroy_inst_ctx(adev, false);
return 0;
}
@@ -2023,8 +1935,11 @@ static int sdma_v4_0_hw_fini(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int i;
- if (amdgpu_sriov_vf(adev))
+ if (amdgpu_sriov_vf(adev)) {
+ /* disable the scheduler for SDMA */
+ amdgpu_sdma_unset_buffer_funcs_helper(adev);
return 0;
+ }
for (i = 0; i < adev->sdma.num_instances; i++) {
amdgpu_irq_put(adev, &adev->sdma.ecc_irq,
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
index a019ac92edb7..d4d9f196db83 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
@@ -240,10 +240,7 @@ static int sdma_v5_0_init_microcode(struct amdgpu_device *adev)
{
const char *chip_name;
char fw_name[40];
- int err = 0, i;
- struct amdgpu_firmware_info *info = NULL;
- const struct common_firmware_header *header = NULL;
- const struct sdma_firmware_header_v1_0 *hdr;
+ int ret, i;
if (amdgpu_sriov_vf(adev) && (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(5, 0, 5)))
return 0;
@@ -272,38 +269,12 @@ static int sdma_v5_0_init_microcode(struct amdgpu_device *adev)
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma.bin", chip_name);
else
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma1.bin", chip_name);
- err = request_firmware(&adev->sdma.instance[i].fw, fw_name, adev->dev);
- if (err)
- goto out;
- err = amdgpu_ucode_validate(adev->sdma.instance[i].fw);
- if (err)
- goto out;
- hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data;
- adev->sdma.instance[i].fw_version = le32_to_cpu(hdr->header.ucode_version);
- adev->sdma.instance[i].feature_version = le32_to_cpu(hdr->ucode_feature_version);
- if (adev->sdma.instance[i].feature_version >= 20)
- adev->sdma.instance[i].burst_nop = true;
- DRM_DEBUG("psp_load == '%s'\n",
- adev->firmware.load_type == AMDGPU_FW_LOAD_PSP ? "true" : "false");
-
- if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i];
- info->ucode_id = AMDGPU_UCODE_ID_SDMA0 + i;
- info->fw = adev->sdma.instance[i].fw;
- header = (const struct common_firmware_header *)info->fw->data;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
- }
+ ret = amdgpu_sdma_init_microcode(adev, fw_name, i, false);
+ if (ret)
+ return ret;
}
-out:
- if (err) {
- DRM_ERROR("sdma_v5_0: Failed to load firmware \"%s\"\n", fw_name);
- for (i = 0; i < adev->sdma.num_instances; i++) {
- release_firmware(adev->sdma.instance[i].fw);
- adev->sdma.instance[i].fw = NULL;
- }
- }
- return err;
+
+ return ret;
}
static unsigned sdma_v5_0_ring_init_cond_exec(struct amdgpu_ring *ring)
@@ -613,14 +584,10 @@ static void sdma_v5_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 se
*/
static void sdma_v5_0_gfx_stop(struct amdgpu_device *adev)
{
- struct amdgpu_ring *sdma0 = &adev->sdma.instance[0].ring;
- struct amdgpu_ring *sdma1 = &adev->sdma.instance[1].ring;
u32 rb_cntl, ib_cntl;
int i;
- if ((adev->mman.buffer_funcs_ring == sdma0) ||
- (adev->mman.buffer_funcs_ring == sdma1))
- amdgpu_ttm_set_buffer_funcs_status(adev, false);
+ amdgpu_sdma_unset_buffer_funcs_helper(adev);
for (i = 0; i < adev->sdma.num_instances; i++) {
rb_cntl = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL));
@@ -1465,12 +1432,10 @@ static int sdma_v5_0_sw_fini(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int i;
- for (i = 0; i < adev->sdma.num_instances; i++) {
- release_firmware(adev->sdma.instance[i].fw);
- adev->sdma.instance[i].fw = NULL;
-
+ for (i = 0; i < adev->sdma.num_instances; i++)
amdgpu_ring_fini(&adev->sdma.instance[i].ring);
- }
+
+ amdgpu_sdma_destroy_inst_ctx(adev, false);
return 0;
}
@@ -1491,8 +1456,11 @@ static int sdma_v5_0_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- if (amdgpu_sriov_vf(adev))
+ if (amdgpu_sriov_vf(adev)) {
+ /* disable the scheduler for SDMA */
+ amdgpu_sdma_unset_buffer_funcs_helper(adev);
return 0;
+ }
sdma_v5_0_ctx_switch_enable(adev, false);
sdma_v5_0_enable(adev, false);
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
index 95689ef4be10..809eca54fc61 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
@@ -89,33 +89,6 @@ static u32 sdma_v5_2_get_reg_offset(struct amdgpu_device *adev, u32 instance, u3
return base + internal_offset;
}
-static int sdma_v5_2_init_inst_ctx(struct amdgpu_sdma_instance *sdma_inst)
-{
- int err = 0;
- const struct sdma_firmware_header_v1_0 *hdr;
-
- err = amdgpu_ucode_validate(sdma_inst->fw);
- if (err)
- return err;
-
- hdr = (const struct sdma_firmware_header_v1_0 *)sdma_inst->fw->data;
- sdma_inst->fw_version = le32_to_cpu(hdr->header.ucode_version);
- sdma_inst->feature_version = le32_to_cpu(hdr->ucode_feature_version);
-
- if (sdma_inst->feature_version >= 20)
- sdma_inst->burst_nop = true;
-
- return 0;
-}
-
-static void sdma_v5_2_destroy_inst_ctx(struct amdgpu_device *adev)
-{
- release_firmware(adev->sdma.instance[0].fw);
-
- memset((void *)adev->sdma.instance, 0,
- sizeof(struct amdgpu_sdma_instance) * AMDGPU_MAX_SDMA_INSTANCES);
-}
-
/**
* sdma_v5_2_init_microcode - load ucode images from disk
*
@@ -132,9 +105,6 @@ static int sdma_v5_2_init_microcode(struct amdgpu_device *adev)
{
const char *chip_name;
char fw_name[40];
- int err = 0, i;
- struct amdgpu_firmware_info *info = NULL;
- const struct common_firmware_header *header = NULL;
DRM_DEBUG("\n");
@@ -169,42 +139,7 @@ static int sdma_v5_2_init_microcode(struct amdgpu_device *adev)
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s.bin", chip_name);
- err = request_firmware(&adev->sdma.instance[0].fw, fw_name, adev->dev);
- if (err)
- goto out;
-
- err = sdma_v5_2_init_inst_ctx(&adev->sdma.instance[0]);
- if (err)
- goto out;
-
- for (i = 1; i < adev->sdma.num_instances; i++)
- memcpy((void *)&adev->sdma.instance[i],
- (void *)&adev->sdma.instance[0],
- sizeof(struct amdgpu_sdma_instance));
-
- if (amdgpu_sriov_vf(adev) && (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(5, 2, 0)))
- return 0;
-
- DRM_DEBUG("psp_load == '%s'\n",
- adev->firmware.load_type == AMDGPU_FW_LOAD_PSP ? "true" : "false");
-
- if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
- for (i = 0; i < adev->sdma.num_instances; i++) {
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i];
- info->ucode_id = AMDGPU_UCODE_ID_SDMA0 + i;
- info->fw = adev->sdma.instance[i].fw;
- header = (const struct common_firmware_header *)info->fw->data;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
- }
- }
-
-out:
- if (err) {
- DRM_ERROR("sdma_v5_2: Failed to load firmware \"%s\"\n", fw_name);
- sdma_v5_2_destroy_inst_ctx(adev);
- }
- return err;
+ return amdgpu_sdma_init_microcode(adev, fw_name, 0, true);
}
static unsigned sdma_v5_2_ring_init_cond_exec(struct amdgpu_ring *ring)
@@ -479,18 +414,10 @@ static void sdma_v5_2_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 se
*/
static void sdma_v5_2_gfx_stop(struct amdgpu_device *adev)
{
- struct amdgpu_ring *sdma0 = &adev->sdma.instance[0].ring;
- struct amdgpu_ring *sdma1 = &adev->sdma.instance[1].ring;
- struct amdgpu_ring *sdma2 = &adev->sdma.instance[2].ring;
- struct amdgpu_ring *sdma3 = &adev->sdma.instance[3].ring;
u32 rb_cntl, ib_cntl;
int i;
- if ((adev->mman.buffer_funcs_ring == sdma0) ||
- (adev->mman.buffer_funcs_ring == sdma1) ||
- (adev->mman.buffer_funcs_ring == sdma2) ||
- (adev->mman.buffer_funcs_ring == sdma3))
- amdgpu_ttm_set_buffer_funcs_status(adev, false);
+ amdgpu_sdma_unset_buffer_funcs_helper(adev);
for (i = 0; i < adev->sdma.num_instances; i++) {
rb_cntl = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL));
@@ -1406,7 +1333,7 @@ static int sdma_v5_2_sw_fini(void *handle)
for (i = 0; i < adev->sdma.num_instances; i++)
amdgpu_ring_fini(&adev->sdma.instance[i].ring);
- sdma_v5_2_destroy_inst_ctx(adev);
+ amdgpu_sdma_destroy_inst_ctx(adev, true);
return 0;
}
@@ -1422,8 +1349,11 @@ static int sdma_v5_2_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- if (amdgpu_sriov_vf(adev))
+ if (amdgpu_sriov_vf(adev)) {
+ /* disable the scheduler for SDMA */
+ amdgpu_sdma_unset_buffer_funcs_helper(adev);
return 0;
+ }
sdma_v5_2_ctx_switch_enable(adev, false);
sdma_v5_2_enable(adev, false);
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
index 3a0b14be1a57..da3beb0bf2fa 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
@@ -78,33 +78,6 @@ static u32 sdma_v6_0_get_reg_offset(struct amdgpu_device *adev, u32 instance, u3
return base + internal_offset;
}
-static int sdma_v6_0_init_inst_ctx(struct amdgpu_sdma_instance *sdma_inst)
-{
- int err = 0;
- const struct sdma_firmware_header_v2_0 *hdr;
-
- err = amdgpu_ucode_validate(sdma_inst->fw);
- if (err)
- return err;
-
- hdr = (const struct sdma_firmware_header_v2_0 *)sdma_inst->fw->data;
- sdma_inst->fw_version = le32_to_cpu(hdr->header.ucode_version);
- sdma_inst->feature_version = le32_to_cpu(hdr->ucode_feature_version);
-
- if (sdma_inst->feature_version >= 20)
- sdma_inst->burst_nop = true;
-
- return 0;
-}
-
-static void sdma_v6_0_destroy_inst_ctx(struct amdgpu_device *adev)
-{
- release_firmware(adev->sdma.instance[0].fw);
-
- memset((void*)adev->sdma.instance, 0,
- sizeof(struct amdgpu_sdma_instance) * AMDGPU_MAX_SDMA_INSTANCES);
-}
-
/**
* sdma_v6_0_init_microcode - load ucode images from disk
*
@@ -114,16 +87,10 @@ static void sdma_v6_0_destroy_inst_ctx(struct amdgpu_device *adev)
* the driver (not loaded into hw).
* Returns 0 on success, error on failure.
*/
-
-// emulation only, won't work on real chip
-// sdma 6.0.0 real chip need to use PSP to load firmware
static int sdma_v6_0_init_microcode(struct amdgpu_device *adev)
{
char fw_name[30];
char ucode_prefix[30];
- int err = 0, i;
- struct amdgpu_firmware_info *info = NULL;
- const struct sdma_firmware_header_v2_0 *sdma_hdr;
DRM_DEBUG("\n");
@@ -131,43 +98,7 @@ static int sdma_v6_0_init_microcode(struct amdgpu_device *adev)
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s.bin", ucode_prefix);
- err = request_firmware(&adev->sdma.instance[0].fw, fw_name, adev->dev);
- if (err)
- goto out;
-
- err = sdma_v6_0_init_inst_ctx(&adev->sdma.instance[0]);
- if (err)
- goto out;
-
- for (i = 1; i < adev->sdma.num_instances; i++) {
- memcpy((void*)&adev->sdma.instance[i],
- (void*)&adev->sdma.instance[0],
- sizeof(struct amdgpu_sdma_instance));
- }
-
- DRM_DEBUG("psp_load == '%s'\n",
- adev->firmware.load_type == AMDGPU_FW_LOAD_PSP ? "true" : "false");
-
- if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
- sdma_hdr = (const struct sdma_firmware_header_v2_0 *)adev->sdma.instance[0].fw->data;
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA_UCODE_TH0];
- info->ucode_id = AMDGPU_UCODE_ID_SDMA_UCODE_TH0;
- info->fw = adev->sdma.instance[0].fw;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(sdma_hdr->ctx_ucode_size_bytes), PAGE_SIZE);
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA_UCODE_TH1];
- info->ucode_id = AMDGPU_UCODE_ID_SDMA_UCODE_TH1;
- info->fw = adev->sdma.instance[0].fw;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(sdma_hdr->ctl_ucode_size_bytes), PAGE_SIZE);
- }
-
-out:
- if (err) {
- DRM_ERROR("sdma_v6_0: Failed to load firmware \"%s\"\n", fw_name);
- sdma_v6_0_destroy_inst_ctx(adev);
- }
- return err;
+ return amdgpu_sdma_init_microcode(adev, fw_name, 0, true);
}
static unsigned sdma_v6_0_ring_init_cond_exec(struct amdgpu_ring *ring)
@@ -467,14 +398,10 @@ static void sdma_v6_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 se
*/
static void sdma_v6_0_gfx_stop(struct amdgpu_device *adev)
{
- struct amdgpu_ring *sdma0 = &adev->sdma.instance[0].ring;
- struct amdgpu_ring *sdma1 = &adev->sdma.instance[1].ring;
u32 rb_cntl, ib_cntl;
int i;
- if ((adev->mman.buffer_funcs_ring == sdma0) ||
- (adev->mman.buffer_funcs_ring == sdma1))
- amdgpu_ttm_set_buffer_funcs_status(adev, false);
+ amdgpu_sdma_unset_buffer_funcs_helper(adev);
for (i = 0; i < adev->sdma.num_instances; i++) {
rb_cntl = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_CNTL));
@@ -484,9 +411,6 @@ static void sdma_v6_0_gfx_stop(struct amdgpu_device *adev)
ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_QUEUE0_IB_CNTL, IB_ENABLE, 0);
WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_IB_CNTL), ib_cntl);
}
-
- sdma0->sched.ready = false;
- sdma1->sched.ready = false;
}
/**
@@ -915,7 +839,8 @@ static int sdma_v6_0_mqd_init(struct amdgpu_device *adev, void *mqd,
m->sdmax_rlcx_rb_cntl =
order_base_2(prop->queue_size / 4) << SDMA0_QUEUE0_RB_CNTL__RB_SIZE__SHIFT |
1 << SDMA0_QUEUE0_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT |
- 4 << SDMA0_QUEUE0_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT;
+ 4 << SDMA0_QUEUE0_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT |
+ 1 << SDMA0_QUEUE0_RB_CNTL__F32_WPTR_POLL_ENABLE__SHIFT;
m->sdmax_rlcx_rb_base = lower_32_bits(prop->hqd_base_gpu_addr >> 8);
m->sdmax_rlcx_rb_base_hi = upper_32_bits(prop->hqd_base_gpu_addr >> 8);
@@ -1370,7 +1295,7 @@ static int sdma_v6_0_sw_fini(void *handle)
for (i = 0; i < adev->sdma.num_instances; i++)
amdgpu_ring_fini(&adev->sdma.instance[i].ring);
- sdma_v6_0_destroy_inst_ctx(adev);
+ amdgpu_sdma_destroy_inst_ctx(adev, true);
return 0;
}
@@ -1386,8 +1311,11 @@ static int sdma_v6_0_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- if (amdgpu_sriov_vf(adev))
+ if (amdgpu_sriov_vf(adev)) {
+ /* disable the scheduler for SDMA */
+ amdgpu_sdma_unset_buffer_funcs_helper(adev);
return 0;
+ }
sdma_v6_0_ctx_switch_enable(adev, false);
sdma_v6_0_enable(adev, false);
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dma.c b/drivers/gpu/drm/amd/amdgpu/si_dma.c
index f675111ace20..4d5e718540aa 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dma.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dma.c
@@ -116,15 +116,14 @@ static void si_dma_stop(struct amdgpu_device *adev)
u32 rb_cntl;
unsigned i;
+ amdgpu_sdma_unset_buffer_funcs_helper(adev);
+
for (i = 0; i < adev->sdma.num_instances; i++) {
ring = &adev->sdma.instance[i].ring;
/* dma0 */
rb_cntl = RREG32(DMA_RB_CNTL + sdma_offsets[i]);
rb_cntl &= ~DMA_RB_ENABLE;
WREG32(DMA_RB_CNTL + sdma_offsets[i], rb_cntl);
-
- if (adev->mman.buffer_funcs_ring == ring)
- amdgpu_ttm_set_buffer_funcs_status(adev, false);
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/sienna_cichlid.c b/drivers/gpu/drm/amd/amdgpu/sienna_cichlid.c
index 7aa570c1ce4a..81a6d5b94987 100644
--- a/drivers/gpu/drm/amd/amdgpu/sienna_cichlid.c
+++ b/drivers/gpu/drm/amd/amdgpu/sienna_cichlid.c
@@ -31,12 +31,23 @@
#include "amdgpu_psp.h"
#include "amdgpu_xgmi.h"
+static bool sienna_cichlid_is_mode2_default(struct amdgpu_reset_control *reset_ctl)
+{
+#if 0
+ struct amdgpu_device *adev = (struct amdgpu_device *)reset_ctl->handle;
+
+ if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 7) &&
+ adev->pm.fw_version >= 0x3a5500 && !amdgpu_sriov_vf(adev))
+ return true;
+#endif
+ return false;
+}
+
static struct amdgpu_reset_handler *
sienna_cichlid_get_reset_handler(struct amdgpu_reset_control *reset_ctl,
struct amdgpu_reset_context *reset_context)
{
struct amdgpu_reset_handler *handler;
- struct amdgpu_device *adev = (struct amdgpu_device *)reset_ctl->handle;
if (reset_context->method != AMD_RESET_METHOD_NONE) {
list_for_each_entry(handler, &reset_ctl->reset_handlers,
@@ -44,15 +55,13 @@ sienna_cichlid_get_reset_handler(struct amdgpu_reset_control *reset_ctl,
if (handler->reset_method == reset_context->method)
return handler;
}
- } else {
- list_for_each_entry(handler, &reset_ctl->reset_handlers,
+ }
+
+ if (sienna_cichlid_is_mode2_default(reset_ctl)) {
+ list_for_each_entry (handler, &reset_ctl->reset_handlers,
handler_list) {
- if (handler->reset_method == AMD_RESET_METHOD_MODE2 &&
- adev->pm.fw_version >= 0x3a5500 &&
- !amdgpu_sriov_vf(adev)) {
- reset_context->method = AMD_RESET_METHOD_MODE2;
+ if (handler->reset_method == AMD_RESET_METHOD_MODE2)
return handler;
- }
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index 183024d7c184..e3b2b6b4f1a6 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -1211,6 +1211,20 @@ static int soc15_common_sw_fini(void *handle)
return 0;
}
+static void soc15_sdma_doorbell_range_init(struct amdgpu_device *adev)
+{
+ int i;
+
+ /* sdma doorbell range is programed by hypervisor */
+ if (!amdgpu_sriov_vf(adev)) {
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ adev->nbio.funcs->sdma_doorbell_range(adev, i,
+ true, adev->doorbell_index.sdma_engine[i] << 1,
+ adev->doorbell_index.sdma_doorbell_range);
+ }
+ }
+}
+
static int soc15_common_hw_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -1230,6 +1244,13 @@ static int soc15_common_hw_init(void *handle)
/* enable the doorbell aperture */
soc15_enable_doorbell_aperture(adev, true);
+ /* HW doorbell routing policy: doorbell writing not
+ * in SDMA/IH/MM/ACV range will be routed to CP. So
+ * we need to init SDMA doorbell range prior
+ * to CP ip block init and ring test. IH already
+ * happens before CP.
+ */
+ soc15_sdma_doorbell_range_init(adev);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/soc21.c b/drivers/gpu/drm/amd/amdgpu/soc21.c
index 5f0d6983714a..e08044008186 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc21.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc21.c
@@ -423,6 +423,7 @@ static bool soc21_need_full_reset(struct amdgpu_device *adev)
case IP_VERSION(11, 0, 0):
return amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC);
case IP_VERSION(11, 0, 2):
+ case IP_VERSION(11, 0, 3):
return false;
default:
return true;
@@ -629,12 +630,18 @@ static int soc21_common_early_init(void *handle)
AMD_CG_SUPPORT_JPEG_MGCG;
adev->pg_flags =
AMD_PG_SUPPORT_GFX_PG |
+ AMD_PG_SUPPORT_VCN |
+ AMD_PG_SUPPORT_VCN_DPG |
AMD_PG_SUPPORT_JPEG;
adev->external_rev_id = adev->rev_id + 0x1;
break;
case IP_VERSION(11, 0, 3):
adev->cg_flags = AMD_CG_SUPPORT_VCN_MGCG |
- AMD_CG_SUPPORT_JPEG_MGCG;
+ AMD_CG_SUPPORT_JPEG_MGCG |
+ AMD_CG_SUPPORT_GFX_CGCG |
+ AMD_CG_SUPPORT_GFX_CGLS |
+ AMD_CG_SUPPORT_REPEATER_FGCG |
+ AMD_CG_SUPPORT_GFX_MGCG;
adev->pg_flags = AMD_PG_SUPPORT_VCN |
AMD_PG_SUPPORT_VCN_DPG |
AMD_PG_SUPPORT_JPEG;
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c b/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c
index 939cb203f7ad..f17d297b594b 100644
--- a/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c
@@ -327,10 +327,9 @@ static void umc_v6_1_query_error_address(struct amdgpu_device *adev,
return;
}
- /* calculate error address if ue/ce error is detected */
+ /* calculate error address if ue error is detected */
if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
- (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 ||
- REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1)) {
+ REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1) {
err_addr = RREG64_PCIE((mc_umc_addrt0 + umc_reg_offset) * 4);
/* the lowest lsb bits should be ignored */
@@ -343,10 +342,7 @@ static void umc_v6_1_query_error_address(struct amdgpu_device *adev,
ADDR_OF_256B_BLOCK(channel_index) |
OFFSET_IN_256B_BLOCK(err_addr);
- /* we only save ue error information currently, ce is skipped */
- if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC)
- == 1)
- amdgpu_umc_fill_error_record(err_data, err_addr,
+ amdgpu_umc_fill_error_record(err_data, err_addr,
retired_page, channel_index, umc_inst);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c b/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c
index bf7524f16b66..5d5d031c9e7d 100644
--- a/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c
@@ -187,20 +187,51 @@ static void umc_v6_7_ecc_info_query_ras_error_count(struct amdgpu_device *adev,
}
}
+static void umc_v6_7_convert_error_address(struct amdgpu_device *adev,
+ struct ras_err_data *err_data, uint64_t err_addr,
+ uint32_t ch_inst, uint32_t umc_inst)
+{
+ uint32_t channel_index;
+ uint64_t soc_pa, retired_page, column;
+
+ channel_index =
+ adev->umc.channel_idx_tbl[umc_inst * adev->umc.channel_inst_num + ch_inst];
+ /* translate umc channel address to soc pa, 3 parts are included */
+ soc_pa = ADDR_OF_8KB_BLOCK(err_addr) |
+ ADDR_OF_256B_BLOCK(channel_index) |
+ OFFSET_IN_256B_BLOCK(err_addr);
+
+ /* The umc channel bits are not original values, they are hashed */
+ SET_CHANNEL_HASH(channel_index, soc_pa);
+
+ /* clear [C4 C3 C2] in soc physical address */
+ soc_pa &= ~(0x7ULL << UMC_V6_7_PA_C2_BIT);
+
+ /* loop for all possibilities of [C4 C3 C2] */
+ for (column = 0; column < UMC_V6_7_NA_MAP_PA_NUM; column++) {
+ retired_page = soc_pa | (column << UMC_V6_7_PA_C2_BIT);
+ dev_info(adev->dev, "Error Address(PA): 0x%llx\n", retired_page);
+ amdgpu_umc_fill_error_record(err_data, err_addr,
+ retired_page, channel_index, umc_inst);
+
+ /* shift R14 bit */
+ retired_page ^= (0x1ULL << UMC_V6_7_PA_R14_BIT);
+ dev_info(adev->dev, "Error Address(PA): 0x%llx\n", retired_page);
+ amdgpu_umc_fill_error_record(err_data, err_addr,
+ retired_page, channel_index, umc_inst);
+ }
+}
+
static void umc_v6_7_ecc_info_query_error_address(struct amdgpu_device *adev,
struct ras_err_data *err_data,
uint32_t ch_inst,
uint32_t umc_inst)
{
- uint64_t mc_umc_status, err_addr, soc_pa, retired_page, column;
- uint32_t channel_index;
+ uint64_t mc_umc_status, err_addr;
uint32_t eccinfo_table_idx;
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
eccinfo_table_idx = umc_inst * adev->umc.channel_inst_num + ch_inst;
- channel_index =
- adev->umc.channel_idx_tbl[umc_inst * adev->umc.channel_inst_num + ch_inst];
-
mc_umc_status = ras->umc_ecc.ecc[eccinfo_table_idx].mca_umc_status;
if (mc_umc_status == 0)
@@ -209,42 +240,15 @@ static void umc_v6_7_ecc_info_query_error_address(struct amdgpu_device *adev,
if (!err_data->err_addr)
return;
- /* calculate error address if ue/ce error is detected */
+ /* calculate error address if ue error is detected */
if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
- (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 ||
- REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1)) {
+ REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1) {
err_addr = ras->umc_ecc.ecc[eccinfo_table_idx].mca_umc_addr;
err_addr = REG_GET_FIELD(err_addr, MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr);
- /* translate umc channel address to soc pa, 3 parts are included */
- soc_pa = ADDR_OF_8KB_BLOCK(err_addr) |
- ADDR_OF_256B_BLOCK(channel_index) |
- OFFSET_IN_256B_BLOCK(err_addr);
-
- /* The umc channel bits are not original values, they are hashed */
- SET_CHANNEL_HASH(channel_index, soc_pa);
-
- /* clear [C4 C3 C2] in soc physical address */
- soc_pa &= ~(0x7ULL << UMC_V6_7_PA_C2_BIT);
-
- /* we only save ue error information currently, ce is skipped */
- if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC)
- == 1) {
- /* loop for all possibilities of [C4 C3 C2] */
- for (column = 0; column < UMC_V6_7_NA_MAP_PA_NUM; column++) {
- retired_page = soc_pa | (column << UMC_V6_7_PA_C2_BIT);
- dev_info(adev->dev, "Error Address(PA): 0x%llx\n", retired_page);
- amdgpu_umc_fill_error_record(err_data, err_addr,
- retired_page, channel_index, umc_inst);
-
- /* shift R14 bit */
- retired_page ^= (0x1ULL << UMC_V6_7_PA_R14_BIT);
- dev_info(adev->dev, "Error Address(PA): 0x%llx\n", retired_page);
- amdgpu_umc_fill_error_record(err_data, err_addr,
- retired_page, channel_index, umc_inst);
- }
- }
+ umc_v6_7_convert_error_address(adev, err_data, err_addr,
+ ch_inst, umc_inst);
}
}
@@ -452,14 +456,11 @@ static void umc_v6_7_query_ras_error_count(struct amdgpu_device *adev,
static void umc_v6_7_query_error_address(struct amdgpu_device *adev,
struct ras_err_data *err_data,
- uint32_t umc_reg_offset,
- uint32_t ch_inst,
+ uint32_t umc_reg_offset, uint32_t ch_inst,
uint32_t umc_inst)
{
uint32_t mc_umc_status_addr;
- uint32_t channel_index;
- uint64_t mc_umc_status, mc_umc_addrt0;
- uint64_t err_addr, soc_pa, retired_page, column;
+ uint64_t mc_umc_status = 0, mc_umc_addrt0, err_addr;
mc_umc_status_addr =
SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_STATUST0);
@@ -477,45 +478,15 @@ static void umc_v6_7_query_error_address(struct amdgpu_device *adev,
return;
}
- channel_index =
- adev->umc.channel_idx_tbl[umc_inst * adev->umc.channel_inst_num + ch_inst];
-
- /* calculate error address if ue/ce error is detected */
+ /* calculate error address if ue error is detected */
if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
- (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 ||
- REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1)) {
-
+ REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1) {
err_addr = RREG64_PCIE((mc_umc_addrt0 + umc_reg_offset) * 4);
- err_addr = REG_GET_FIELD(err_addr, MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr);
+ err_addr =
+ REG_GET_FIELD(err_addr, MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr);
- /* translate umc channel address to soc pa, 3 parts are included */
- soc_pa = ADDR_OF_8KB_BLOCK(err_addr) |
- ADDR_OF_256B_BLOCK(channel_index) |
- OFFSET_IN_256B_BLOCK(err_addr);
-
- /* The umc channel bits are not original values, they are hashed */
- SET_CHANNEL_HASH(channel_index, soc_pa);
-
- /* clear [C4 C3 C2] in soc physical address */
- soc_pa &= ~(0x7ULL << UMC_V6_7_PA_C2_BIT);
-
- /* we only save ue error information currently, ce is skipped */
- if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC)
- == 1) {
- /* loop for all possibilities of [C4 C3 C2] */
- for (column = 0; column < UMC_V6_7_NA_MAP_PA_NUM; column++) {
- retired_page = soc_pa | (column << UMC_V6_7_PA_C2_BIT);
- dev_info(adev->dev, "Error Address(PA): 0x%llx\n", retired_page);
- amdgpu_umc_fill_error_record(err_data, err_addr,
- retired_page, channel_index, umc_inst);
-
- /* shift R14 bit */
- retired_page ^= (0x1ULL << UMC_V6_7_PA_R14_BIT);
- dev_info(adev->dev, "Error Address(PA): 0x%llx\n", retired_page);
- amdgpu_umc_fill_error_record(err_data, err_addr,
- retired_page, channel_index, umc_inst);
- }
- }
+ umc_v6_7_convert_error_address(adev, err_data, err_addr,
+ ch_inst, umc_inst);
}
/* clear umc status */
@@ -540,8 +511,7 @@ static void umc_v6_7_query_ras_error_address(struct amdgpu_device *adev,
ch_inst);
umc_v6_7_query_error_address(adev,
err_data,
- umc_reg_offset,
- ch_inst,
+ umc_reg_offset, ch_inst,
umc_inst);
}
}
@@ -583,4 +553,5 @@ struct amdgpu_umc_ras umc_v6_7_ras = {
.query_ras_poison_mode = umc_v6_7_query_ras_poison_mode,
.ecc_info_query_ras_error_count = umc_v6_7_ecc_info_query_ras_error_count,
.ecc_info_query_ras_error_address = umc_v6_7_ecc_info_query_ras_error_address,
+ .convert_ras_error_address = umc_v6_7_convert_error_address,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v8_10.c b/drivers/gpu/drm/amd/amdgpu/umc_v8_10.c
index a8cbda81828d..91235df54e22 100644
--- a/drivers/gpu/drm/amd/amdgpu/umc_v8_10.c
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v8_10.c
@@ -208,7 +208,10 @@ static void umc_v8_10_query_error_address(struct amdgpu_device *adev,
{
uint64_t mc_umc_status_addr;
uint64_t mc_umc_status, err_addr;
- uint32_t channel_index;
+ uint64_t mc_umc_addrt0, na_err_addr_base;
+ uint64_t na_err_addr, retired_page_addr;
+ uint32_t channel_index, addr_lsb, col = 0;
+ int ret = 0;
mc_umc_status_addr =
SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_STATUST0);
@@ -229,13 +232,10 @@ static void umc_v8_10_query_error_address(struct amdgpu_device *adev,
umc_inst * adev->umc.channel_inst_num +
ch_inst];
- /* calculate error address if ue/ce error is detected */
+ /* calculate error address if ue error is detected */
if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, AddrV) == 1 &&
- (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 ||
- REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1)) {
- uint32_t addr_lsb;
- uint64_t mc_umc_addrt0;
+ REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1) {
mc_umc_addrt0 = SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_ADDRT0);
err_addr = RREG64_PCIE((mc_umc_addrt0 + umc_reg_offset) * 4);
@@ -243,32 +243,24 @@ static void umc_v8_10_query_error_address(struct amdgpu_device *adev,
/* the lowest lsb bits should be ignored */
addr_lsb = REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, AddrLsb);
-
err_addr &= ~((0x1ULL << addr_lsb) - 1);
-
- /* we only save ue error information currently, ce is skipped */
- if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1) {
- uint64_t na_err_addr_base = err_addr & ~(0x3ULL << UMC_V8_10_NA_C5_BIT);
- uint64_t na_err_addr, retired_page_addr;
- uint32_t col = 0;
- int ret = 0;
-
- /* loop for all possibilities of [C6 C5] in normal address. */
- for (col = 0; col < UMC_V8_10_NA_COL_2BITS_POWER_OF_2_NUM; col++) {
- na_err_addr = na_err_addr_base | (col << UMC_V8_10_NA_C5_BIT);
-
- /* Mapping normal error address to retired soc physical address. */
- ret = umc_v8_10_swizzle_mode_na_to_pa(adev, channel_index,
- na_err_addr, &retired_page_addr);
- if (ret) {
- dev_err(adev->dev, "Failed to map pa from umc na.\n");
- break;
- }
- dev_info(adev->dev, "Error Address(PA): 0x%llx\n",
- retired_page_addr);
- amdgpu_umc_fill_error_record(err_data, na_err_addr,
- retired_page_addr, channel_index, umc_inst);
+ na_err_addr_base = err_addr & ~(0x3ULL << UMC_V8_10_NA_C5_BIT);
+
+ /* loop for all possibilities of [C6 C5] in normal address. */
+ for (col = 0; col < UMC_V8_10_NA_COL_2BITS_POWER_OF_2_NUM; col++) {
+ na_err_addr = na_err_addr_base | (col << UMC_V8_10_NA_C5_BIT);
+
+ /* Mapping normal error address to retired soc physical address. */
+ ret = umc_v8_10_swizzle_mode_na_to_pa(adev, channel_index,
+ na_err_addr, &retired_page_addr);
+ if (ret) {
+ dev_err(adev->dev, "Failed to map pa from umc na.\n");
+ break;
}
+ dev_info(adev->dev, "Error Address(PA): 0x%llx\n",
+ retired_page_addr);
+ amdgpu_umc_fill_error_record(err_data, na_err_addr,
+ retired_page_addr, channel_index, umc_inst);
}
}
@@ -338,6 +330,31 @@ static void umc_v8_10_err_cnt_init(struct amdgpu_device *adev)
}
}
+static uint32_t umc_v8_10_query_ras_poison_mode_per_channel(
+ struct amdgpu_device *adev,
+ uint32_t umc_reg_offset)
+{
+ uint32_t ecc_ctrl_addr, ecc_ctrl;
+
+ ecc_ctrl_addr =
+ SOC15_REG_OFFSET(UMC, 0, regUMCCH0_0_GeccCtrl);
+ ecc_ctrl = RREG32_PCIE((ecc_ctrl_addr +
+ umc_reg_offset) * 4);
+
+ return REG_GET_FIELD(ecc_ctrl, UMCCH0_0_GeccCtrl, UCFatalEn);
+}
+
+static bool umc_v8_10_query_ras_poison_mode(struct amdgpu_device *adev)
+{
+ uint32_t umc_reg_offset = 0;
+
+ /* Enabling fatal error in umc node0 instance0 channel0 will be
+ * considered as fatal error mode
+ */
+ umc_reg_offset = get_umc_v8_10_reg_offset(adev, 0, 0, 0);
+ return !umc_v8_10_query_ras_poison_mode_per_channel(adev, umc_reg_offset);
+}
+
const struct amdgpu_ras_block_hw_ops umc_v8_10_ras_hw_ops = {
.query_ras_error_count = umc_v8_10_query_ras_error_count,
.query_ras_error_address = umc_v8_10_query_ras_error_address,
@@ -348,4 +365,5 @@ struct amdgpu_umc_ras umc_v8_10_ras = {
.hw_ops = &umc_v8_10_ras_hw_ops,
},
.err_cnt_init = umc_v8_10_err_cnt_init,
+ .query_ras_poison_mode = umc_v8_10_query_ras_poison_mode,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v8_7.c b/drivers/gpu/drm/amd/amdgpu/umc_v8_7.c
index f35253e0eaa6..b717fdaa46e4 100644
--- a/drivers/gpu/drm/amd/amdgpu/umc_v8_7.c
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v8_7.c
@@ -108,20 +108,35 @@ static void umc_v8_7_ecc_info_query_ras_error_count(struct amdgpu_device *adev,
}
}
+static void umc_v8_7_convert_error_address(struct amdgpu_device *adev,
+ struct ras_err_data *err_data, uint64_t err_addr,
+ uint32_t ch_inst, uint32_t umc_inst)
+{
+ uint64_t retired_page;
+ uint32_t channel_index;
+
+ channel_index =
+ adev->umc.channel_idx_tbl[umc_inst * adev->umc.channel_inst_num + ch_inst];
+
+ /* translate umc channel address to soc pa, 3 parts are included */
+ retired_page = ADDR_OF_4KB_BLOCK(err_addr) |
+ ADDR_OF_256B_BLOCK(channel_index) |
+ OFFSET_IN_256B_BLOCK(err_addr);
+
+ amdgpu_umc_fill_error_record(err_data, err_addr,
+ retired_page, channel_index, umc_inst);
+}
+
static void umc_v8_7_ecc_info_query_error_address(struct amdgpu_device *adev,
struct ras_err_data *err_data,
uint32_t ch_inst,
uint32_t umc_inst)
{
- uint64_t mc_umc_status, err_addr, retired_page;
- uint32_t channel_index;
+ uint64_t mc_umc_status, err_addr;
uint32_t eccinfo_table_idx;
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
eccinfo_table_idx = umc_inst * adev->umc.channel_inst_num + ch_inst;
- channel_index =
- adev->umc.channel_idx_tbl[umc_inst * adev->umc.channel_inst_num + ch_inst];
-
mc_umc_status = ras->umc_ecc.ecc[eccinfo_table_idx].mca_umc_status;
if (mc_umc_status == 0)
@@ -130,24 +145,15 @@ static void umc_v8_7_ecc_info_query_error_address(struct amdgpu_device *adev,
if (!err_data->err_addr)
return;
- /* calculate error address if ue/ce error is detected */
+ /* calculate error address if ue error is detected */
if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
- (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 ||
- REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1)) {
+ REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1) {
err_addr = ras->umc_ecc.ecc[eccinfo_table_idx].mca_umc_addr;
err_addr = REG_GET_FIELD(err_addr, MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr);
- /* translate umc channel address to soc pa, 3 parts are included */
- retired_page = ADDR_OF_4KB_BLOCK(err_addr) |
- ADDR_OF_256B_BLOCK(channel_index) |
- OFFSET_IN_256B_BLOCK(err_addr);
-
- /* we only save ue error information currently, ce is skipped */
- if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC)
- == 1)
- amdgpu_umc_fill_error_record(err_data, err_addr,
- retired_page, channel_index, umc_inst);
+ umc_v8_7_convert_error_address(adev, err_data, err_addr,
+ ch_inst, umc_inst);
}
}
@@ -324,14 +330,12 @@ static void umc_v8_7_query_error_address(struct amdgpu_device *adev,
uint32_t umc_inst)
{
uint32_t lsb, mc_umc_status_addr;
- uint64_t mc_umc_status, err_addr, retired_page, mc_umc_addrt0;
- uint32_t channel_index = adev->umc.channel_idx_tbl[umc_inst * adev->umc.channel_inst_num + ch_inst];
+ uint64_t mc_umc_status, err_addr, mc_umc_addrt0;
mc_umc_status_addr =
SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_STATUST0);
mc_umc_addrt0 =
SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_ADDRT0);
-
mc_umc_status = RREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4);
if (mc_umc_status == 0)
@@ -343,10 +347,9 @@ static void umc_v8_7_query_error_address(struct amdgpu_device *adev,
return;
}
- /* calculate error address if ue/ce error is detected */
+ /* calculate error address if ue error is detected */
if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
- (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 ||
- REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1)) {
+ REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1) {
err_addr = RREG64_PCIE((mc_umc_addrt0 + umc_reg_offset) * 4);
/* the lowest lsb bits should be ignored */
@@ -354,16 +357,8 @@ static void umc_v8_7_query_error_address(struct amdgpu_device *adev,
err_addr = REG_GET_FIELD(err_addr, MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr);
err_addr &= ~((0x1ULL << lsb) - 1);
- /* translate umc channel address to soc pa, 3 parts are included */
- retired_page = ADDR_OF_4KB_BLOCK(err_addr) |
- ADDR_OF_256B_BLOCK(channel_index) |
- OFFSET_IN_256B_BLOCK(err_addr);
-
- /* we only save ue error information currently, ce is skipped */
- if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC)
- == 1)
- amdgpu_umc_fill_error_record(err_data, err_addr,
- retired_page, channel_index, umc_inst);
+ umc_v8_7_convert_error_address(adev, err_data, err_addr,
+ ch_inst, umc_inst);
}
/* clear umc status */
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
index b6f73b87c47e..897a5ce9c9da 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
@@ -150,6 +150,10 @@ static int vcn_v4_0_sw_init(void *handle)
fw_shared->present_flag_0 = cpu_to_le32(AMDGPU_FW_SHARED_FLAG_0_UNIFIED_QUEUE);
fw_shared->sq.is_enabled = 1;
+ fw_shared->present_flag_0 |= cpu_to_le32(AMDGPU_VCN_SMU_DPM_INTERFACE_FLAG);
+ fw_shared->smu_dpm_interface.smu_interface_type = (adev->flags & AMD_IS_APU) ?
+ AMDGPU_VCN_SMU_DPM_INTERFACE_APU : AMDGPU_VCN_SMU_DPM_INTERFACE_DGPU;
+
if (amdgpu_sriov_vf(adev))
fw_shared->present_flag_0 |= cpu_to_le32(AMDGPU_VCN_VF_RB_SETUP_FLAG);
diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
index 60a81649cf12..c7118843db05 100644
--- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
+++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
@@ -742,7 +742,7 @@ static const uint32_t cwsr_trap_nv1x_hex[] = {
0xbf88fffe, 0x877aff7f,
0x04000000, 0x8f7a857a,
0x886d7a6d, 0xb97b02dc,
- 0x8f7b997b, 0xb97a2a05,
+ 0x8f7b997b, 0xb97a3a05,
0x807a817a, 0xbf0d997b,
0xbf850002, 0x8f7a897a,
0xbf820001, 0x8f7a8a7a,
@@ -819,7 +819,7 @@ static const uint32_t cwsr_trap_nv1x_hex[] = {
0xbefe037c, 0xbefc0370,
0xf4611c7a, 0xf8000000,
0x80708470, 0xbefc037e,
- 0xb9702a05, 0x80708170,
+ 0xb9703a05, 0x80708170,
0xbf0d9973, 0xbf850002,
0x8f708970, 0xbf820001,
0x8f708a70, 0xb97a1e06,
@@ -1069,7 +1069,7 @@ static const uint32_t cwsr_trap_nv1x_hex[] = {
0xb9f9f816, 0x876f7bff,
0xfffff800, 0x906f8b6f,
0xb9efa2c3, 0xb9f3f801,
- 0xb96e2a05, 0x806e816e,
+ 0xb96e3a05, 0x806e816e,
0xbf0d9972, 0xbf850002,
0x8f6e896e, 0xbf820001,
0x8f6e8a6e, 0xb96f1e06,
@@ -2114,7 +2114,7 @@ static const uint32_t cwsr_trap_gfx10_hex[] = {
0x007a0000, 0x7e000280,
0xbefe037a, 0xbeff037b,
0xb97b02dc, 0x8f7b997b,
- 0xb97a2a05, 0x807a817a,
+ 0xb97a3a05, 0x807a817a,
0xbf0d997b, 0xbf850002,
0x8f7a897a, 0xbf820001,
0x8f7a8a7a, 0xb97b1e06,
@@ -2157,7 +2157,7 @@ static const uint32_t cwsr_trap_gfx10_hex[] = {
0x01000000, 0xe0704100,
0x705d0100, 0xe0704200,
0x705d0200, 0xe0704300,
- 0x705d0300, 0xb9702a05,
+ 0x705d0300, 0xb9703a05,
0x80708170, 0xbf0d9973,
0xbf850002, 0x8f708970,
0xbf820001, 0x8f708a70,
@@ -2189,7 +2189,7 @@ static const uint32_t cwsr_trap_gfx10_hex[] = {
0xbefe03ff, 0x0000ffff,
0xbeff0380, 0xe0704000,
0x705d0200, 0xbefe03c1,
- 0xb9702a05, 0x80708170,
+ 0xb9703a05, 0x80708170,
0xbf0d9973, 0xbf850002,
0x8f708970, 0xbf820001,
0x8f708a70, 0xb97a1e06,
@@ -2475,7 +2475,7 @@ static const uint32_t cwsr_trap_gfx10_hex[] = {
0xb9ef4803, 0x876f7bff,
0xfffff800, 0x906f8b6f,
0xb9efa2c3, 0xb9f3f801,
- 0xb96e2a05, 0x806e816e,
+ 0xb96e3a05, 0x806e816e,
0xbf0d9972, 0xbf850002,
0x8f6e896e, 0xbf820001,
0x8f6e8a6e, 0xb96f1e06,
@@ -2494,438 +2494,441 @@ static const uint32_t cwsr_trap_gfx10_hex[] = {
0xbf9f0000, 0xbf9f0000,
0xbf9f0000, 0x00000000,
};
-
static const uint32_t cwsr_trap_gfx11_hex[] = {
- 0xbfa00001, 0xbfa0021b,
+ 0xbfa00001, 0xbfa0021e,
0xb0804006, 0xb8f8f802,
- 0x91788678, 0xb8fbf803,
- 0x8b6eff78, 0x00002000,
- 0xbfa10009, 0x8b6eff6d,
- 0x00ff0000, 0xbfa2001e,
- 0x8b6eff7b, 0x00000400,
- 0xbfa20041, 0xbf830010,
- 0xb8fbf803, 0xbfa0fffa,
- 0x8b6eff7b, 0x00000900,
- 0xbfa20015, 0x8b6eff7b,
- 0x000071ff, 0xbfa10008,
- 0x8b6fff7b, 0x00007080,
- 0xbfa10001, 0xbeee1287,
- 0xb8eff801, 0x846e8c6e,
- 0x8b6e6f6e, 0xbfa2000a,
+ 0x9178ff78, 0x00020006,
+ 0xb8fbf803, 0xbf0d9f6d,
+ 0xbfa20006, 0x8b6eff78,
+ 0x00002000, 0xbfa10009,
0x8b6eff6d, 0x00ff0000,
- 0xbfa20007, 0xb8eef801,
- 0x8b6eff6e, 0x00000800,
- 0xbfa20003, 0x8b6eff7b,
- 0x00000400, 0xbfa20026,
- 0xbefa4d82, 0xbf89fc07,
- 0x84fa887a, 0xf4005bbd,
- 0xf8000010, 0xbf89fc07,
- 0x846e976e, 0x9177ff77,
- 0x00800000, 0x8c776e77,
- 0xf4045bbd, 0xf8000000,
- 0xbf89fc07, 0xf4045ebd,
- 0xf8000008, 0xbf89fc07,
- 0x8bee6e6e, 0xbfa10001,
- 0xbe80486e, 0x8b6eff6d,
- 0x01ff0000, 0xbfa20005,
- 0x8c78ff78, 0x00002000,
- 0x80ec886c, 0x82ed806d,
- 0xbfa00005, 0x8b6eff6d,
- 0x01000000, 0xbfa20002,
- 0x806c846c, 0x826d806d,
- 0x8b6dff6d, 0x0000ffff,
- 0x8bfe7e7e, 0x8bea6a6a,
- 0xb978f802, 0xbe804a6c,
- 0x8b6dff6d, 0x0000ffff,
- 0xbefa0080, 0xb97a0283,
- 0xbeee007e, 0xbeef007f,
- 0xbefe0180, 0xbefe4d84,
- 0xbf89fc07, 0x8b7aff7f,
- 0x04000000, 0x847a857a,
- 0x8c6d7a6d, 0xbefa007e,
- 0x8b7bff7f, 0x0000ffff,
- 0xbefe00c1, 0xbeff00c1,
- 0xdca6c000, 0x007a0000,
- 0x7e000280, 0xbefe007a,
- 0xbeff007b, 0xb8fb02dc,
- 0x847b997b, 0xb8fa3b05,
- 0x807a817a, 0xbf0d997b,
- 0xbfa20002, 0x847a897a,
- 0xbfa00001, 0x847a8a7a,
- 0xb8fb1e06, 0x847b8a7b,
- 0x807a7b7a, 0x8b7bff7f,
- 0x0000ffff, 0x807aff7a,
- 0x00000200, 0x807a7e7a,
- 0x827b807b, 0xd7610000,
- 0x00010870, 0xd7610000,
- 0x00010a71, 0xd7610000,
- 0x00010c72, 0xd7610000,
- 0x00010e73, 0xd7610000,
- 0x00011074, 0xd7610000,
- 0x00011275, 0xd7610000,
- 0x00011476, 0xd7610000,
- 0x00011677, 0xd7610000,
- 0x00011a79, 0xd7610000,
- 0x00011c7e, 0xd7610000,
- 0x00011e7f, 0xbefe00ff,
- 0x00003fff, 0xbeff0080,
- 0xdca6c040, 0x007a0000,
- 0xd760007a, 0x00011d00,
- 0xd760007b, 0x00011f00,
+ 0xbfa2001e, 0x8b6eff7b,
+ 0x00000400, 0xbfa20041,
+ 0xbf830010, 0xb8fbf803,
+ 0xbfa0fffa, 0x8b6eff7b,
+ 0x00000900, 0xbfa20015,
+ 0x8b6eff7b, 0x000071ff,
+ 0xbfa10008, 0x8b6fff7b,
+ 0x00007080, 0xbfa10001,
+ 0xbeee1287, 0xb8eff801,
+ 0x846e8c6e, 0x8b6e6f6e,
+ 0xbfa2000a, 0x8b6eff6d,
+ 0x00ff0000, 0xbfa20007,
+ 0xb8eef801, 0x8b6eff6e,
+ 0x00000800, 0xbfa20003,
+ 0x8b6eff7b, 0x00000400,
+ 0xbfa20026, 0xbefa4d82,
+ 0xbf89fc07, 0x84fa887a,
+ 0xf4005bbd, 0xf8000010,
+ 0xbf89fc07, 0x846e976e,
+ 0x9177ff77, 0x00800000,
+ 0x8c776e77, 0xf4045bbd,
+ 0xf8000000, 0xbf89fc07,
+ 0xf4045ebd, 0xf8000008,
+ 0xbf89fc07, 0x8bee6e6e,
+ 0xbfa10001, 0xbe80486e,
+ 0x8b6eff6d, 0x01ff0000,
+ 0xbfa20005, 0x8c78ff78,
+ 0x00002000, 0x80ec886c,
+ 0x82ed806d, 0xbfa00005,
+ 0x8b6eff6d, 0x01000000,
+ 0xbfa20002, 0x806c846c,
+ 0x826d806d, 0x8b6dff6d,
+ 0x0000ffff, 0x8bfe7e7e,
+ 0x8bea6a6a, 0xb978f802,
+ 0xbe804a6c, 0x8b6dff6d,
+ 0x0000ffff, 0xbefa0080,
+ 0xb97a0283, 0xbeee007e,
+ 0xbeef007f, 0xbefe0180,
+ 0xbefe4d84, 0xbf89fc07,
+ 0x8b7aff7f, 0x04000000,
+ 0x847a857a, 0x8c6d7a6d,
+ 0xbefa007e, 0x8b7bff7f,
+ 0x0000ffff, 0xbefe00c1,
+ 0xbeff00c1, 0xdca6c000,
+ 0x007a0000, 0x7e000280,
0xbefe007a, 0xbeff007b,
- 0xbef4007e, 0x8b75ff7f,
- 0x0000ffff, 0x8c75ff75,
- 0x00040000, 0xbef60080,
- 0xbef700ff, 0x10807fac,
- 0xbef1007d, 0xbef00080,
- 0xb8f302dc, 0x84739973,
- 0xbefe00c1, 0x857d9973,
- 0x8b7d817d, 0xbf06817d,
- 0xbfa20002, 0xbeff0080,
- 0xbfa00002, 0xbeff00c1,
- 0xbfa00009, 0xbef600ff,
- 0x01000000, 0xe0685080,
- 0x701d0100, 0xe0685100,
- 0x701d0200, 0xe0685180,
- 0x701d0300, 0xbfa00008,
+ 0xb8fb02dc, 0x847b997b,
+ 0xb8fa3b05, 0x807a817a,
+ 0xbf0d997b, 0xbfa20002,
+ 0x847a897a, 0xbfa00001,
+ 0x847a8a7a, 0xb8fb1e06,
+ 0x847b8a7b, 0x807a7b7a,
+ 0x8b7bff7f, 0x0000ffff,
+ 0x807aff7a, 0x00000200,
+ 0x807a7e7a, 0x827b807b,
+ 0xd7610000, 0x00010870,
+ 0xd7610000, 0x00010a71,
+ 0xd7610000, 0x00010c72,
+ 0xd7610000, 0x00010e73,
+ 0xd7610000, 0x00011074,
+ 0xd7610000, 0x00011275,
+ 0xd7610000, 0x00011476,
+ 0xd7610000, 0x00011677,
+ 0xd7610000, 0x00011a79,
+ 0xd7610000, 0x00011c7e,
+ 0xd7610000, 0x00011e7f,
+ 0xbefe00ff, 0x00003fff,
+ 0xbeff0080, 0xdca6c040,
+ 0x007a0000, 0xd760007a,
+ 0x00011d00, 0xd760007b,
+ 0x00011f00, 0xbefe007a,
+ 0xbeff007b, 0xbef4007e,
+ 0x8b75ff7f, 0x0000ffff,
+ 0x8c75ff75, 0x00040000,
+ 0xbef60080, 0xbef700ff,
+ 0x10807fac, 0xbef1007d,
+ 0xbef00080, 0xb8f302dc,
+ 0x84739973, 0xbefe00c1,
+ 0x857d9973, 0x8b7d817d,
+ 0xbf06817d, 0xbfa20002,
+ 0xbeff0080, 0xbfa00002,
+ 0xbeff00c1, 0xbfa00009,
0xbef600ff, 0x01000000,
- 0xe0685100, 0x701d0100,
- 0xe0685200, 0x701d0200,
- 0xe0685300, 0x701d0300,
+ 0xe0685080, 0x701d0100,
+ 0xe0685100, 0x701d0200,
+ 0xe0685180, 0x701d0300,
+ 0xbfa00008, 0xbef600ff,
+ 0x01000000, 0xe0685100,
+ 0x701d0100, 0xe0685200,
+ 0x701d0200, 0xe0685300,
+ 0x701d0300, 0xb8f03b05,
+ 0x80708170, 0xbf0d9973,
+ 0xbfa20002, 0x84708970,
+ 0xbfa00001, 0x84708a70,
+ 0xb8fa1e06, 0x847a8a7a,
+ 0x80707a70, 0x8070ff70,
+ 0x00000200, 0xbef600ff,
+ 0x01000000, 0x7e000280,
+ 0x7e020280, 0x7e040280,
+ 0xbefd0080, 0xd7610002,
+ 0x0000fa71, 0x807d817d,
+ 0xd7610002, 0x0000fa6c,
+ 0x807d817d, 0x917aff6d,
+ 0x80000000, 0xd7610002,
+ 0x0000fa7a, 0x807d817d,
+ 0xd7610002, 0x0000fa6e,
+ 0x807d817d, 0xd7610002,
+ 0x0000fa6f, 0x807d817d,
+ 0xd7610002, 0x0000fa78,
+ 0x807d817d, 0xb8faf803,
+ 0xd7610002, 0x0000fa7a,
+ 0x807d817d, 0xd7610002,
+ 0x0000fa7b, 0x807d817d,
+ 0xb8f1f801, 0xd7610002,
+ 0x0000fa71, 0x807d817d,
+ 0xb8f1f814, 0xd7610002,
+ 0x0000fa71, 0x807d817d,
+ 0xb8f1f815, 0xd7610002,
+ 0x0000fa71, 0x807d817d,
+ 0xbefe00ff, 0x0000ffff,
+ 0xbeff0080, 0xe0685000,
+ 0x701d0200, 0xbefe00c1,
0xb8f03b05, 0x80708170,
0xbf0d9973, 0xbfa20002,
0x84708970, 0xbfa00001,
0x84708a70, 0xb8fa1e06,
0x847a8a7a, 0x80707a70,
- 0x8070ff70, 0x00000200,
0xbef600ff, 0x01000000,
- 0x7e000280, 0x7e020280,
- 0x7e040280, 0xbefd0080,
- 0xd7610002, 0x0000fa71,
- 0x807d817d, 0xd7610002,
- 0x0000fa6c, 0x807d817d,
- 0x917aff6d, 0x80000000,
- 0xd7610002, 0x0000fa7a,
- 0x807d817d, 0xd7610002,
- 0x0000fa6e, 0x807d817d,
- 0xd7610002, 0x0000fa6f,
- 0x807d817d, 0xd7610002,
- 0x0000fa78, 0x807d817d,
- 0xb8faf803, 0xd7610002,
- 0x0000fa7a, 0x807d817d,
- 0xd7610002, 0x0000fa7b,
- 0x807d817d, 0xb8f1f801,
- 0xd7610002, 0x0000fa71,
- 0x807d817d, 0xb8f1f814,
- 0xd7610002, 0x0000fa71,
- 0x807d817d, 0xb8f1f815,
- 0xd7610002, 0x0000fa71,
- 0x807d817d, 0xbefe00ff,
- 0x0000ffff, 0xbeff0080,
- 0xe0685000, 0x701d0200,
- 0xbefe00c1, 0xb8f03b05,
- 0x80708170, 0xbf0d9973,
- 0xbfa20002, 0x84708970,
- 0xbfa00001, 0x84708a70,
- 0xb8fa1e06, 0x847a8a7a,
- 0x80707a70, 0xbef600ff,
- 0x01000000, 0xbef90080,
- 0xbefd0080, 0xbf800000,
- 0xbe804100, 0xbe824102,
- 0xbe844104, 0xbe864106,
- 0xbe884108, 0xbe8a410a,
- 0xbe8c410c, 0xbe8e410e,
- 0xd7610002, 0x0000f200,
- 0x80798179, 0xd7610002,
- 0x0000f201, 0x80798179,
- 0xd7610002, 0x0000f202,
- 0x80798179, 0xd7610002,
- 0x0000f203, 0x80798179,
- 0xd7610002, 0x0000f204,
+ 0xbef90080, 0xbefd0080,
+ 0xbf800000, 0xbe804100,
+ 0xbe824102, 0xbe844104,
+ 0xbe864106, 0xbe884108,
+ 0xbe8a410a, 0xbe8c410c,
+ 0xbe8e410e, 0xd7610002,
+ 0x0000f200, 0x80798179,
+ 0xd7610002, 0x0000f201,
0x80798179, 0xd7610002,
- 0x0000f205, 0x80798179,
- 0xd7610002, 0x0000f206,
+ 0x0000f202, 0x80798179,
+ 0xd7610002, 0x0000f203,
0x80798179, 0xd7610002,
- 0x0000f207, 0x80798179,
- 0xd7610002, 0x0000f208,
+ 0x0000f204, 0x80798179,
+ 0xd7610002, 0x0000f205,
0x80798179, 0xd7610002,
- 0x0000f209, 0x80798179,
- 0xd7610002, 0x0000f20a,
+ 0x0000f206, 0x80798179,
+ 0xd7610002, 0x0000f207,
0x80798179, 0xd7610002,
- 0x0000f20b, 0x80798179,
- 0xd7610002, 0x0000f20c,
+ 0x0000f208, 0x80798179,
+ 0xd7610002, 0x0000f209,
0x80798179, 0xd7610002,
- 0x0000f20d, 0x80798179,
- 0xd7610002, 0x0000f20e,
+ 0x0000f20a, 0x80798179,
+ 0xd7610002, 0x0000f20b,
0x80798179, 0xd7610002,
- 0x0000f20f, 0x80798179,
- 0xbf06a079, 0xbfa10006,
- 0xe0685000, 0x701d0200,
- 0x8070ff70, 0x00000080,
- 0xbef90080, 0x7e040280,
- 0x807d907d, 0xbf0aff7d,
- 0x00000060, 0xbfa2ffbc,
- 0xbe804100, 0xbe824102,
- 0xbe844104, 0xbe864106,
- 0xbe884108, 0xbe8a410a,
- 0xd7610002, 0x0000f200,
+ 0x0000f20c, 0x80798179,
+ 0xd7610002, 0x0000f20d,
0x80798179, 0xd7610002,
- 0x0000f201, 0x80798179,
- 0xd7610002, 0x0000f202,
+ 0x0000f20e, 0x80798179,
+ 0xd7610002, 0x0000f20f,
+ 0x80798179, 0xbf06a079,
+ 0xbfa10006, 0xe0685000,
+ 0x701d0200, 0x8070ff70,
+ 0x00000080, 0xbef90080,
+ 0x7e040280, 0x807d907d,
+ 0xbf0aff7d, 0x00000060,
+ 0xbfa2ffbc, 0xbe804100,
+ 0xbe824102, 0xbe844104,
+ 0xbe864106, 0xbe884108,
+ 0xbe8a410a, 0xd7610002,
+ 0x0000f200, 0x80798179,
+ 0xd7610002, 0x0000f201,
0x80798179, 0xd7610002,
- 0x0000f203, 0x80798179,
- 0xd7610002, 0x0000f204,
+ 0x0000f202, 0x80798179,
+ 0xd7610002, 0x0000f203,
0x80798179, 0xd7610002,
- 0x0000f205, 0x80798179,
- 0xd7610002, 0x0000f206,
+ 0x0000f204, 0x80798179,
+ 0xd7610002, 0x0000f205,
0x80798179, 0xd7610002,
- 0x0000f207, 0x80798179,
- 0xd7610002, 0x0000f208,
+ 0x0000f206, 0x80798179,
+ 0xd7610002, 0x0000f207,
0x80798179, 0xd7610002,
- 0x0000f209, 0x80798179,
- 0xd7610002, 0x0000f20a,
+ 0x0000f208, 0x80798179,
+ 0xd7610002, 0x0000f209,
0x80798179, 0xd7610002,
- 0x0000f20b, 0x80798179,
- 0xe0685000, 0x701d0200,
+ 0x0000f20a, 0x80798179,
+ 0xd7610002, 0x0000f20b,
+ 0x80798179, 0xe0685000,
+ 0x701d0200, 0xbefe00c1,
+ 0x857d9973, 0x8b7d817d,
+ 0xbf06817d, 0xbfa20002,
+ 0xbeff0080, 0xbfa00001,
+ 0xbeff00c1, 0xb8fb4306,
+ 0x8b7bc17b, 0xbfa10044,
+ 0xbfbd0000, 0x8b7aff6d,
+ 0x80000000, 0xbfa10040,
+ 0x847b867b, 0x847b827b,
+ 0xbef6007b, 0xb8f03b05,
+ 0x80708170, 0xbf0d9973,
+ 0xbfa20002, 0x84708970,
+ 0xbfa00001, 0x84708a70,
+ 0xb8fa1e06, 0x847a8a7a,
+ 0x80707a70, 0x8070ff70,
+ 0x00000200, 0x8070ff70,
+ 0x00000080, 0xbef600ff,
+ 0x01000000, 0xd71f0000,
+ 0x000100c1, 0xd7200000,
+ 0x000200c1, 0x16000084,
+ 0x857d9973, 0x8b7d817d,
+ 0xbf06817d, 0xbefd0080,
+ 0xbfa20012, 0xbe8300ff,
+ 0x00000080, 0xbf800000,
+ 0xbf800000, 0xbf800000,
+ 0xd8d80000, 0x01000000,
+ 0xbf890000, 0xe0685000,
+ 0x701d0100, 0x807d037d,
+ 0x80700370, 0xd5250000,
+ 0x0001ff00, 0x00000080,
+ 0xbf0a7b7d, 0xbfa2fff4,
+ 0xbfa00011, 0xbe8300ff,
+ 0x00000100, 0xbf800000,
+ 0xbf800000, 0xbf800000,
+ 0xd8d80000, 0x01000000,
+ 0xbf890000, 0xe0685000,
+ 0x701d0100, 0x807d037d,
+ 0x80700370, 0xd5250000,
+ 0x0001ff00, 0x00000100,
+ 0xbf0a7b7d, 0xbfa2fff4,
0xbefe00c1, 0x857d9973,
0x8b7d817d, 0xbf06817d,
- 0xbfa20002, 0xbeff0080,
- 0xbfa00001, 0xbeff00c1,
- 0xb8fb4306, 0x8b7bc17b,
- 0xbfa10044, 0xbfbd0000,
- 0x8b7aff6d, 0x80000000,
- 0xbfa10040, 0x847b867b,
- 0x847b827b, 0xbef6007b,
- 0xb8f03b05, 0x80708170,
- 0xbf0d9973, 0xbfa20002,
- 0x84708970, 0xbfa00001,
- 0x84708a70, 0xb8fa1e06,
- 0x847a8a7a, 0x80707a70,
- 0x8070ff70, 0x00000200,
- 0x8070ff70, 0x00000080,
- 0xbef600ff, 0x01000000,
- 0xd71f0000, 0x000100c1,
- 0xd7200000, 0x000200c1,
- 0x16000084, 0x857d9973,
+ 0xbfa20004, 0xbef000ff,
+ 0x00000200, 0xbeff0080,
+ 0xbfa00003, 0xbef000ff,
+ 0x00000400, 0xbeff00c1,
+ 0xb8fb3b05, 0x807b817b,
+ 0x847b827b, 0x857d9973,
0x8b7d817d, 0xbf06817d,
- 0xbefd0080, 0xbfa20012,
- 0xbe8300ff, 0x00000080,
- 0xbf800000, 0xbf800000,
- 0xbf800000, 0xd8d80000,
- 0x01000000, 0xbf890000,
- 0xe0685000, 0x701d0100,
- 0x807d037d, 0x80700370,
- 0xd5250000, 0x0001ff00,
- 0x00000080, 0xbf0a7b7d,
- 0xbfa2fff4, 0xbfa00011,
- 0xbe8300ff, 0x00000100,
- 0xbf800000, 0xbf800000,
- 0xbf800000, 0xd8d80000,
- 0x01000000, 0xbf890000,
- 0xe0685000, 0x701d0100,
- 0x807d037d, 0x80700370,
- 0xd5250000, 0x0001ff00,
- 0x00000100, 0xbf0a7b7d,
- 0xbfa2fff4, 0xbefe00c1,
- 0x857d9973, 0x8b7d817d,
- 0xbf06817d, 0xbfa20004,
- 0xbef000ff, 0x00000200,
- 0xbeff0080, 0xbfa00003,
- 0xbef000ff, 0x00000400,
- 0xbeff00c1, 0xb8fb3b05,
- 0x807b817b, 0x847b827b,
- 0x857d9973, 0x8b7d817d,
- 0xbf06817d, 0xbfa20017,
+ 0xbfa20017, 0xbef600ff,
+ 0x01000000, 0xbefd0084,
+ 0xbf0a7b7d, 0xbfa10037,
+ 0x7e008700, 0x7e028701,
+ 0x7e048702, 0x7e068703,
+ 0xe0685000, 0x701d0000,
+ 0xe0685080, 0x701d0100,
+ 0xe0685100, 0x701d0200,
+ 0xe0685180, 0x701d0300,
+ 0x807d847d, 0x8070ff70,
+ 0x00000200, 0xbf0a7b7d,
+ 0xbfa2ffef, 0xbfa00025,
0xbef600ff, 0x01000000,
0xbefd0084, 0xbf0a7b7d,
- 0xbfa10037, 0x7e008700,
+ 0xbfa10011, 0x7e008700,
0x7e028701, 0x7e048702,
0x7e068703, 0xe0685000,
- 0x701d0000, 0xe0685080,
- 0x701d0100, 0xe0685100,
- 0x701d0200, 0xe0685180,
+ 0x701d0000, 0xe0685100,
+ 0x701d0100, 0xe0685200,
+ 0x701d0200, 0xe0685300,
0x701d0300, 0x807d847d,
- 0x8070ff70, 0x00000200,
+ 0x8070ff70, 0x00000400,
0xbf0a7b7d, 0xbfa2ffef,
- 0xbfa00025, 0xbef600ff,
- 0x01000000, 0xbefd0084,
- 0xbf0a7b7d, 0xbfa10011,
- 0x7e008700, 0x7e028701,
- 0x7e048702, 0x7e068703,
+ 0xb8fb1e06, 0x8b7bc17b,
+ 0xbfa1000c, 0x847b837b,
+ 0x807b7d7b, 0xbefe00c1,
+ 0xbeff0080, 0x7e008700,
0xe0685000, 0x701d0000,
- 0xe0685100, 0x701d0100,
- 0xe0685200, 0x701d0200,
- 0xe0685300, 0x701d0300,
- 0x807d847d, 0x8070ff70,
- 0x00000400, 0xbf0a7b7d,
- 0xbfa2ffef, 0xb8fb1e06,
- 0x8b7bc17b, 0xbfa1000c,
- 0x847b837b, 0x807b7d7b,
- 0xbefe00c1, 0xbeff0080,
- 0x7e008700, 0xe0685000,
- 0x701d0000, 0x807d817d,
- 0x8070ff70, 0x00000080,
- 0xbf0a7b7d, 0xbfa2fff8,
- 0xbfa00141, 0xbef4007e,
- 0x8b75ff7f, 0x0000ffff,
- 0x8c75ff75, 0x00040000,
- 0xbef60080, 0xbef700ff,
- 0x10807fac, 0xb8f202dc,
- 0x84729972, 0x8b6eff7f,
- 0x04000000, 0xbfa1003a,
+ 0x807d817d, 0x8070ff70,
+ 0x00000080, 0xbf0a7b7d,
+ 0xbfa2fff8, 0xbfa00146,
+ 0xbef4007e, 0x8b75ff7f,
+ 0x0000ffff, 0x8c75ff75,
+ 0x00040000, 0xbef60080,
+ 0xbef700ff, 0x10807fac,
+ 0xb8f202dc, 0x84729972,
+ 0x8b6eff7f, 0x04000000,
+ 0xbfa1003a, 0xbefe00c1,
+ 0x857d9972, 0x8b7d817d,
+ 0xbf06817d, 0xbfa20002,
+ 0xbeff0080, 0xbfa00001,
+ 0xbeff00c1, 0xb8ef4306,
+ 0x8b6fc16f, 0xbfa1002f,
+ 0x846f866f, 0x846f826f,
+ 0xbef6006f, 0xb8f83b05,
+ 0x80788178, 0xbf0d9972,
+ 0xbfa20002, 0x84788978,
+ 0xbfa00001, 0x84788a78,
+ 0xb8ee1e06, 0x846e8a6e,
+ 0x80786e78, 0x8078ff78,
+ 0x00000200, 0x8078ff78,
+ 0x00000080, 0xbef600ff,
+ 0x01000000, 0x857d9972,
+ 0x8b7d817d, 0xbf06817d,
+ 0xbefd0080, 0xbfa2000c,
+ 0xe0500000, 0x781d0000,
+ 0xbf8903f7, 0xdac00000,
+ 0x00000000, 0x807dff7d,
+ 0x00000080, 0x8078ff78,
+ 0x00000080, 0xbf0a6f7d,
+ 0xbfa2fff5, 0xbfa0000b,
+ 0xe0500000, 0x781d0000,
+ 0xbf8903f7, 0xdac00000,
+ 0x00000000, 0x807dff7d,
+ 0x00000100, 0x8078ff78,
+ 0x00000100, 0xbf0a6f7d,
+ 0xbfa2fff5, 0xbef80080,
0xbefe00c1, 0x857d9972,
0x8b7d817d, 0xbf06817d,
0xbfa20002, 0xbeff0080,
0xbfa00001, 0xbeff00c1,
- 0xb8ef4306, 0x8b6fc16f,
- 0xbfa1002f, 0x846f866f,
- 0x846f826f, 0xbef6006f,
- 0xb8f83b05, 0x80788178,
- 0xbf0d9972, 0xbfa20002,
- 0x84788978, 0xbfa00001,
- 0x84788a78, 0xb8ee1e06,
- 0x846e8a6e, 0x80786e78,
+ 0xb8ef3b05, 0x806f816f,
+ 0x846f826f, 0x857d9972,
+ 0x8b7d817d, 0xbf06817d,
+ 0xbfa20024, 0xbef600ff,
+ 0x01000000, 0xbeee0078,
0x8078ff78, 0x00000200,
- 0x8078ff78, 0x00000080,
- 0xbef600ff, 0x01000000,
- 0x857d9972, 0x8b7d817d,
- 0xbf06817d, 0xbefd0080,
- 0xbfa2000c, 0xe0500000,
- 0x781d0000, 0xbf8903f7,
- 0xdac00000, 0x00000000,
- 0x807dff7d, 0x00000080,
- 0x8078ff78, 0x00000080,
- 0xbf0a6f7d, 0xbfa2fff5,
- 0xbfa0000b, 0xe0500000,
- 0x781d0000, 0xbf8903f7,
- 0xdac00000, 0x00000000,
- 0x807dff7d, 0x00000100,
- 0x8078ff78, 0x00000100,
- 0xbf0a6f7d, 0xbfa2fff5,
- 0xbef80080, 0xbefe00c1,
- 0x857d9972, 0x8b7d817d,
- 0xbf06817d, 0xbfa20002,
- 0xbeff0080, 0xbfa00001,
- 0xbeff00c1, 0xb8ef3b05,
- 0x806f816f, 0x846f826f,
- 0x857d9972, 0x8b7d817d,
- 0xbf06817d, 0xbfa20024,
- 0xbef600ff, 0x01000000,
- 0xbeee0078, 0x8078ff78,
- 0x00000200, 0xbefd0084,
- 0xbf0a6f7d, 0xbfa10050,
+ 0xbefd0084, 0xbf0a6f7d,
+ 0xbfa10050, 0xe0505000,
+ 0x781d0000, 0xe0505080,
+ 0x781d0100, 0xe0505100,
+ 0x781d0200, 0xe0505180,
+ 0x781d0300, 0xbf8903f7,
+ 0x7e008500, 0x7e028501,
+ 0x7e048502, 0x7e068503,
+ 0x807d847d, 0x8078ff78,
+ 0x00000200, 0xbf0a6f7d,
+ 0xbfa2ffee, 0xe0505000,
+ 0x6e1d0000, 0xe0505080,
+ 0x6e1d0100, 0xe0505100,
+ 0x6e1d0200, 0xe0505180,
+ 0x6e1d0300, 0xbf8903f7,
+ 0xbfa00034, 0xbef600ff,
+ 0x01000000, 0xbeee0078,
+ 0x8078ff78, 0x00000400,
+ 0xbefd0084, 0xbf0a6f7d,
+ 0xbfa10012, 0xe0505000,
+ 0x781d0000, 0xe0505100,
+ 0x781d0100, 0xe0505200,
+ 0x781d0200, 0xe0505300,
+ 0x781d0300, 0xbf8903f7,
+ 0x7e008500, 0x7e028501,
+ 0x7e048502, 0x7e068503,
+ 0x807d847d, 0x8078ff78,
+ 0x00000400, 0xbf0a6f7d,
+ 0xbfa2ffee, 0xb8ef1e06,
+ 0x8b6fc16f, 0xbfa1000e,
+ 0x846f836f, 0x806f7d6f,
+ 0xbefe00c1, 0xbeff0080,
0xe0505000, 0x781d0000,
- 0xe0505080, 0x781d0100,
- 0xe0505100, 0x781d0200,
- 0xe0505180, 0x781d0300,
0xbf8903f7, 0x7e008500,
- 0x7e028501, 0x7e048502,
- 0x7e068503, 0x807d847d,
- 0x8078ff78, 0x00000200,
- 0xbf0a6f7d, 0xbfa2ffee,
+ 0x807d817d, 0x8078ff78,
+ 0x00000080, 0xbf0a6f7d,
+ 0xbfa2fff7, 0xbeff00c1,
0xe0505000, 0x6e1d0000,
- 0xe0505080, 0x6e1d0100,
- 0xe0505100, 0x6e1d0200,
- 0xe0505180, 0x6e1d0300,
- 0xbf8903f7, 0xbfa00034,
- 0xbef600ff, 0x01000000,
- 0xbeee0078, 0x8078ff78,
- 0x00000400, 0xbefd0084,
- 0xbf0a6f7d, 0xbfa10012,
- 0xe0505000, 0x781d0000,
- 0xe0505100, 0x781d0100,
- 0xe0505200, 0x781d0200,
- 0xe0505300, 0x781d0300,
- 0xbf8903f7, 0x7e008500,
- 0x7e028501, 0x7e048502,
- 0x7e068503, 0x807d847d,
- 0x8078ff78, 0x00000400,
- 0xbf0a6f7d, 0xbfa2ffee,
- 0xb8ef1e06, 0x8b6fc16f,
- 0xbfa1000e, 0x846f836f,
- 0x806f7d6f, 0xbefe00c1,
- 0xbeff0080, 0xe0505000,
- 0x781d0000, 0xbf8903f7,
- 0x7e008500, 0x807d817d,
- 0x8078ff78, 0x00000080,
- 0xbf0a6f7d, 0xbfa2fff7,
- 0xbeff00c1, 0xe0505000,
- 0x6e1d0000, 0xe0505100,
- 0x6e1d0100, 0xe0505200,
- 0x6e1d0200, 0xe0505300,
- 0x6e1d0300, 0xbf8903f7,
+ 0xe0505100, 0x6e1d0100,
+ 0xe0505200, 0x6e1d0200,
+ 0xe0505300, 0x6e1d0300,
+ 0xbf8903f7, 0xb8f83b05,
+ 0x80788178, 0xbf0d9972,
+ 0xbfa20002, 0x84788978,
+ 0xbfa00001, 0x84788a78,
+ 0xb8ee1e06, 0x846e8a6e,
+ 0x80786e78, 0x8078ff78,
+ 0x00000200, 0x80f8ff78,
+ 0x00000050, 0xbef600ff,
+ 0x01000000, 0xbefd00ff,
+ 0x0000006c, 0x80f89078,
+ 0xf428403a, 0xf0000000,
+ 0xbf89fc07, 0x80fd847d,
+ 0xbf800000, 0xbe804300,
+ 0xbe824302, 0x80f8a078,
+ 0xf42c403a, 0xf0000000,
+ 0xbf89fc07, 0x80fd887d,
+ 0xbf800000, 0xbe804300,
+ 0xbe824302, 0xbe844304,
+ 0xbe864306, 0x80f8c078,
+ 0xf430403a, 0xf0000000,
+ 0xbf89fc07, 0x80fd907d,
+ 0xbf800000, 0xbe804300,
+ 0xbe824302, 0xbe844304,
+ 0xbe864306, 0xbe884308,
+ 0xbe8a430a, 0xbe8c430c,
+ 0xbe8e430e, 0xbf06807d,
+ 0xbfa1fff0, 0xb980f801,
+ 0x00000000, 0xbfbd0000,
0xb8f83b05, 0x80788178,
0xbf0d9972, 0xbfa20002,
0x84788978, 0xbfa00001,
0x84788a78, 0xb8ee1e06,
0x846e8a6e, 0x80786e78,
0x8078ff78, 0x00000200,
- 0x80f8ff78, 0x00000050,
0xbef600ff, 0x01000000,
- 0xbefd00ff, 0x0000006c,
- 0x80f89078, 0xf428403a,
- 0xf0000000, 0xbf89fc07,
- 0x80fd847d, 0xbf800000,
- 0xbe804300, 0xbe824302,
- 0x80f8a078, 0xf42c403a,
- 0xf0000000, 0xbf89fc07,
- 0x80fd887d, 0xbf800000,
- 0xbe804300, 0xbe824302,
- 0xbe844304, 0xbe864306,
- 0x80f8c078, 0xf430403a,
- 0xf0000000, 0xbf89fc07,
- 0x80fd907d, 0xbf800000,
- 0xbe804300, 0xbe824302,
- 0xbe844304, 0xbe864306,
- 0xbe884308, 0xbe8a430a,
- 0xbe8c430c, 0xbe8e430e,
- 0xbf06807d, 0xbfa1fff0,
- 0xb980f801, 0x00000000,
- 0xbfbd0000, 0xb8f83b05,
- 0x80788178, 0xbf0d9972,
- 0xbfa20002, 0x84788978,
- 0xbfa00001, 0x84788a78,
- 0xb8ee1e06, 0x846e8a6e,
- 0x80786e78, 0x8078ff78,
- 0x00000200, 0xbef600ff,
- 0x01000000, 0xf4205bfa,
+ 0xf4205bfa, 0xf0000000,
+ 0x80788478, 0xf4205b3a,
0xf0000000, 0x80788478,
- 0xf4205b3a, 0xf0000000,
- 0x80788478, 0xf4205b7a,
+ 0xf4205b7a, 0xf0000000,
+ 0x80788478, 0xf4205c3a,
0xf0000000, 0x80788478,
- 0xf4205c3a, 0xf0000000,
- 0x80788478, 0xf4205c7a,
+ 0xf4205c7a, 0xf0000000,
+ 0x80788478, 0xf4205eba,
0xf0000000, 0x80788478,
- 0xf4205eba, 0xf0000000,
- 0x80788478, 0xf4205efa,
+ 0xf4205efa, 0xf0000000,
+ 0x80788478, 0xf4205e7a,
0xf0000000, 0x80788478,
- 0xf4205e7a, 0xf0000000,
- 0x80788478, 0xf4205cfa,
+ 0xf4205cfa, 0xf0000000,
+ 0x80788478, 0xf4205bba,
0xf0000000, 0x80788478,
+ 0xbf89fc07, 0xb96ef814,
0xf4205bba, 0xf0000000,
0x80788478, 0xbf89fc07,
- 0xb96ef814, 0xf4205bba,
- 0xf0000000, 0x80788478,
- 0xbf89fc07, 0xb96ef815,
- 0xbefd006f, 0xbefe0070,
- 0xbeff0071, 0x8b6f7bff,
- 0x000003ff, 0xb96f4803,
- 0x8b6f7bff, 0xfffff800,
- 0x856f8b6f, 0xb96fa2c3,
- 0xb973f801, 0xb8ee3b05,
- 0x806e816e, 0xbf0d9972,
- 0xbfa20002, 0x846e896e,
- 0xbfa00001, 0x846e8a6e,
- 0xb8ef1e06, 0x846f8a6f,
- 0x806e6f6e, 0x806eff6e,
- 0x00000200, 0x806e746e,
- 0x826f8075, 0x8b6fff6f,
- 0x0000ffff, 0xf4085c37,
- 0xf8000050, 0xf4085d37,
- 0xf8000060, 0xf4005e77,
- 0xf8000074, 0xbf89fc07,
- 0x8b6dff6d, 0x0000ffff,
- 0x8bfe7e7e, 0x8bea6a6a,
+ 0xb96ef815, 0xbefd006f,
+ 0xbefe0070, 0xbeff0071,
+ 0x8b6f7bff, 0x000003ff,
+ 0xb96f4803, 0x8b6f7bff,
+ 0xfffff800, 0x856f8b6f,
+ 0xb96fa2c3, 0xb973f801,
+ 0xb8ee3b05, 0x806e816e,
+ 0xbf0d9972, 0xbfa20002,
+ 0x846e896e, 0xbfa00001,
+ 0x846e8a6e, 0xb8ef1e06,
+ 0x846f8a6f, 0x806e6f6e,
+ 0x806eff6e, 0x00000200,
+ 0x806e746e, 0x826f8075,
+ 0x8b6fff6f, 0x0000ffff,
+ 0xf4085c37, 0xf8000050,
+ 0xf4085d37, 0xf8000060,
+ 0xf4005e77, 0xf8000074,
+ 0xbf89fc07, 0x8b6dff6d,
+ 0x0000ffff, 0x8bfe7e7e,
+ 0x8bea6a6a, 0xb8eef802,
+ 0xbf0d866e, 0xbfa20002,
+ 0xb97af802, 0xbe80486c,
0xb97af802, 0xbe804a6c,
0xbfb00000, 0xbf9f0000,
0xbf9f0000, 0xbf9f0000,
diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm
index 250ab007399b..0f81670f6f9c 100644
--- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm
+++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm
@@ -43,12 +43,14 @@
#define HAVE_XNACK (ASIC_FAMILY < CHIP_SIENNA_CICHLID)
#define HAVE_SENDMSG_RTN (ASIC_FAMILY >= CHIP_PLUM_BONITO)
#define HAVE_BUFFER_LDS_LOAD (ASIC_FAMILY < CHIP_PLUM_BONITO)
+#define SW_SA_TRAP (ASIC_FAMILY >= CHIP_PLUM_BONITO)
var SINGLE_STEP_MISSED_WORKAROUND = 1 //workaround for lost MODE.DEBUG_EN exception when SAVECTX raised
var SQ_WAVE_STATUS_SPI_PRIO_MASK = 0x00000006
var SQ_WAVE_STATUS_HALT_MASK = 0x2000
var SQ_WAVE_STATUS_ECC_ERR_MASK = 0x20000
+var SQ_WAVE_STATUS_TRAP_EN_SHIFT = 6
var SQ_WAVE_LDS_ALLOC_LDS_SIZE_SHIFT = 12
var SQ_WAVE_LDS_ALLOC_LDS_SIZE_SIZE = 9
@@ -183,6 +185,13 @@ L_SKIP_RESTORE:
s_getreg_b32 s_save_trapsts, hwreg(HW_REG_TRAPSTS)
+#if SW_SA_TRAP
+ // If ttmp1[31] is set then trap may occur early.
+ // Spin wait until SAVECTX exception is raised.
+ s_bitcmp1_b32 s_save_pc_hi, 31
+ s_cbranch_scc1 L_CHECK_SAVE
+#endif
+
s_and_b32 ttmp2, s_save_status, SQ_WAVE_STATUS_HALT_MASK
s_cbranch_scc0 L_NOT_HALTED
@@ -1061,8 +1070,20 @@ L_RESTORE_HWREG:
s_and_b32 s_restore_pc_hi, s_restore_pc_hi, 0x0000ffff //pc[47:32] //Do it here in order not to affect STATUS
s_and_b64 exec, exec, exec // Restore STATUS.EXECZ, not writable by s_setreg_b32
s_and_b64 vcc, vcc, vcc // Restore STATUS.VCCZ, not writable by s_setreg_b32
+
+#if SW_SA_TRAP
+ // If traps are enabled then return to the shader with PRIV=0.
+ // Otherwise retain PRIV=1 for subsequent context save requests.
+ s_getreg_b32 s_restore_tmp, hwreg(HW_REG_STATUS)
+ s_bitcmp1_b32 s_restore_tmp, SQ_WAVE_STATUS_TRAP_EN_SHIFT
+ s_cbranch_scc1 L_RETURN_WITHOUT_PRIV
+
s_setreg_b32 hwreg(HW_REG_STATUS), s_restore_status // SCC is included, which is changed by previous salu
+ s_setpc_b64 [s_restore_pc_lo, s_restore_pc_hi]
+L_RETURN_WITHOUT_PRIV:
+#endif
+ s_setreg_b32 hwreg(HW_REG_STATUS), s_restore_status // SCC is included, which is changed by previous salu
s_rfe_b64 s_restore_pc_lo //Return to the main shader program and resume execution
L_END_PGM:
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index 56f7307c21d2..5feaba6a77de 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -1584,6 +1584,8 @@ static int kfd_ioctl_smi_events(struct file *filep,
return kfd_smi_event_open(pdd->dev, &args->anon_fd);
}
+#if IS_ENABLED(CONFIG_HSA_AMD_SVM)
+
static int kfd_ioctl_set_xnack_mode(struct file *filep,
struct kfd_process *p, void *data)
{
@@ -1594,22 +1596,29 @@ static int kfd_ioctl_set_xnack_mode(struct file *filep,
if (args->xnack_enabled >= 0) {
if (!list_empty(&p->pqm.queues)) {
pr_debug("Process has user queues running\n");
- mutex_unlock(&p->mutex);
- return -EBUSY;
+ r = -EBUSY;
+ goto out_unlock;
}
- if (args->xnack_enabled && !kfd_process_xnack_mode(p, true))
+
+ if (p->xnack_enabled == args->xnack_enabled)
+ goto out_unlock;
+
+ if (args->xnack_enabled && !kfd_process_xnack_mode(p, true)) {
r = -EPERM;
- else
- p->xnack_enabled = args->xnack_enabled;
+ goto out_unlock;
+ }
+
+ r = svm_range_switch_xnack_reserve_mem(p, args->xnack_enabled);
} else {
args->xnack_enabled = p->xnack_enabled;
}
+
+out_unlock:
mutex_unlock(&p->mutex);
return r;
}
-#if IS_ENABLED(CONFIG_HSA_AMD_SVM)
static int kfd_ioctl_svm(struct file *filep, struct kfd_process *p, void *data)
{
struct kfd_ioctl_svm_args *args = data;
@@ -1629,6 +1638,11 @@ static int kfd_ioctl_svm(struct file *filep, struct kfd_process *p, void *data)
return r;
}
#else
+static int kfd_ioctl_set_xnack_mode(struct file *filep,
+ struct kfd_process *p, void *data)
+{
+ return -EPERM;
+}
static int kfd_ioctl_svm(struct file *filep, struct kfd_process *p, void *data)
{
return -EPERM;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index e83725a28106..ecb4c3abc629 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -205,6 +205,8 @@ static int add_queue_mes(struct device_queue_manager *dqm, struct queue *q,
}
queue_input.is_kfd_process = 1;
+ queue_input.is_aql_queue = (q->properties.format == KFD_QUEUE_FORMAT_AQL);
+ queue_input.queue_size = q->properties.queue_size >> 2;
queue_input.paging = false;
queue_input.tba_addr = qpd->tba_addr;
@@ -1240,6 +1242,24 @@ static void init_interrupts(struct device_queue_manager *dqm)
dqm->dev->kfd2kgd->init_interrupts(dqm->dev->adev, i);
}
+static void init_sdma_bitmaps(struct device_queue_manager *dqm)
+{
+ unsigned int num_sdma_queues =
+ min_t(unsigned int, sizeof(dqm->sdma_bitmap)*8,
+ get_num_sdma_queues(dqm));
+ unsigned int num_xgmi_sdma_queues =
+ min_t(unsigned int, sizeof(dqm->xgmi_sdma_bitmap)*8,
+ get_num_xgmi_sdma_queues(dqm));
+
+ if (num_sdma_queues)
+ dqm->sdma_bitmap = GENMASK_ULL(num_sdma_queues-1, 0);
+ if (num_xgmi_sdma_queues)
+ dqm->xgmi_sdma_bitmap = GENMASK_ULL(num_xgmi_sdma_queues-1, 0);
+
+ dqm->sdma_bitmap &= ~get_reserved_sdma_queues_bitmap(dqm);
+ pr_info("sdma_bitmap: %llx\n", dqm->sdma_bitmap);
+}
+
static int initialize_nocpsch(struct device_queue_manager *dqm)
{
int pipe, queue;
@@ -1268,11 +1288,7 @@ static int initialize_nocpsch(struct device_queue_manager *dqm)
memset(dqm->vmid_pasid, 0, sizeof(dqm->vmid_pasid));
- dqm->sdma_bitmap = ~0ULL >> (64 - get_num_sdma_queues(dqm));
- dqm->sdma_bitmap &= ~(get_reserved_sdma_queues_bitmap(dqm));
- pr_info("sdma_bitmap: %llx\n", dqm->sdma_bitmap);
-
- dqm->xgmi_sdma_bitmap = ~0ULL >> (64 - get_num_xgmi_sdma_queues(dqm));
+ init_sdma_bitmaps(dqm);
return 0;
}
@@ -1450,9 +1466,6 @@ static int set_sched_resources(struct device_queue_manager *dqm)
static int initialize_cpsch(struct device_queue_manager *dqm)
{
- uint64_t num_sdma_queues;
- uint64_t num_xgmi_sdma_queues;
-
pr_debug("num of pipes: %d\n", get_pipes_per_mec(dqm));
mutex_init(&dqm->lock_hidden);
@@ -1461,24 +1474,10 @@ static int initialize_cpsch(struct device_queue_manager *dqm)
dqm->active_cp_queue_count = 0;
dqm->gws_queue_count = 0;
dqm->active_runlist = false;
-
- num_sdma_queues = get_num_sdma_queues(dqm);
- if (num_sdma_queues >= BITS_PER_TYPE(dqm->sdma_bitmap))
- dqm->sdma_bitmap = ULLONG_MAX;
- else
- dqm->sdma_bitmap = (BIT_ULL(num_sdma_queues) - 1);
-
- dqm->sdma_bitmap &= ~(get_reserved_sdma_queues_bitmap(dqm));
- pr_info("sdma_bitmap: %llx\n", dqm->sdma_bitmap);
-
- num_xgmi_sdma_queues = get_num_xgmi_sdma_queues(dqm);
- if (num_xgmi_sdma_queues >= BITS_PER_TYPE(dqm->xgmi_sdma_bitmap))
- dqm->xgmi_sdma_bitmap = ULLONG_MAX;
- else
- dqm->xgmi_sdma_bitmap = (BIT_ULL(num_xgmi_sdma_queues) - 1);
-
INIT_WORK(&dqm->hw_exception_work, kfd_process_hw_exception);
+ init_sdma_bitmaps(dqm);
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c
index a6fcbeeb7428..0d53f6067422 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c
@@ -350,11 +350,11 @@ static void event_interrupt_wq_v11(struct kfd_dev *dev,
print_sq_intr_info_inst(context_id0, context_id1);
sq_int_priv = REG_GET_FIELD(context_id0,
SQ_INTERRUPT_WORD_WAVE_CTXID0, PRIV);
- if (sq_int_priv /*&& (kfd_set_dbg_ev_from_interrupt(dev, pasid,
+ /*if (sq_int_priv && (kfd_set_dbg_ev_from_interrupt(dev, pasid,
KFD_CTXID0_DOORBELL_ID(context_id0),
KFD_CTXID0_TRAP_CODE(context_id0),
- NULL, 0))*/)
- return;
+ NULL, 0)))
+ return;*/
break;
case SQ_INTERRUPT_WORD_ENCODING_ERROR:
print_sq_intr_info_error(context_id0, context_id1);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
index c70c026c9a93..2797029bd500 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
@@ -223,7 +223,7 @@ svm_migrate_get_vram_page(struct svm_range *prange, unsigned long pfn)
page = pfn_to_page(pfn);
svm_range_bo_ref(prange->svm_bo);
page->zone_device_data = prange->svm_bo;
- lock_page(page);
+ zone_device_page_init(page);
}
static void
@@ -410,7 +410,7 @@ svm_migrate_vma_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
uint64_t npages = (end - start) >> PAGE_SHIFT;
struct kfd_process_device *pdd;
struct dma_fence *mfence = NULL;
- struct migrate_vma migrate;
+ struct migrate_vma migrate = { 0 };
unsigned long cpages = 0;
dma_addr_t *scratch;
void *buf;
@@ -666,7 +666,7 @@ out_oom:
static long
svm_migrate_vma_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
struct vm_area_struct *vma, uint64_t start, uint64_t end,
- uint32_t trigger)
+ uint32_t trigger, struct page *fault_page)
{
struct kfd_process *p = container_of(prange->svms, struct kfd_process, svms);
uint64_t npages = (end - start) >> PAGE_SHIFT;
@@ -674,7 +674,7 @@ svm_migrate_vma_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
unsigned long cpages = 0;
struct kfd_process_device *pdd;
struct dma_fence *mfence = NULL;
- struct migrate_vma migrate;
+ struct migrate_vma migrate = { 0 };
dma_addr_t *scratch;
void *buf;
int r = -ENOMEM;
@@ -697,6 +697,7 @@ svm_migrate_vma_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
migrate.src = buf;
migrate.dst = migrate.src + npages;
+ migrate.fault_page = fault_page;
scratch = (dma_addr_t *)(migrate.dst + npages);
kfd_smi_event_migration_start(adev->kfd.dev, p->lead_thread->pid,
@@ -764,7 +765,7 @@ out:
* 0 - OK, otherwise error code
*/
int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm,
- uint32_t trigger)
+ uint32_t trigger, struct page *fault_page)
{
struct amdgpu_device *adev;
struct vm_area_struct *vma;
@@ -805,7 +806,8 @@ int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm,
}
next = min(vma->vm_end, end);
- r = svm_migrate_vma_to_ram(adev, prange, vma, addr, next, trigger);
+ r = svm_migrate_vma_to_ram(adev, prange, vma, addr, next, trigger,
+ fault_page);
if (r < 0) {
pr_debug("failed %ld to migrate prange %p\n", r, prange);
break;
@@ -849,7 +851,7 @@ svm_migrate_vram_to_vram(struct svm_range *prange, uint32_t best_loc,
pr_debug("from gpu 0x%x to gpu 0x%x\n", prange->actual_loc, best_loc);
do {
- r = svm_migrate_vram_to_ram(prange, mm, trigger);
+ r = svm_migrate_vram_to_ram(prange, mm, trigger, NULL);
if (r)
return r;
} while (prange->actual_loc && --retries);
@@ -950,7 +952,8 @@ static vm_fault_t svm_migrate_to_ram(struct vm_fault *vmf)
}
r = svm_migrate_vram_to_ram(prange, vmf->vma->vm_mm,
- KFD_MIGRATE_TRIGGER_PAGEFAULT_CPU);
+ KFD_MIGRATE_TRIGGER_PAGEFAULT_CPU,
+ vmf->page);
if (r)
pr_debug("failed %d migrate svms 0x%p range 0x%p [0x%lx 0x%lx]\n",
r, prange->svms, prange, prange->start, prange->last);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.h b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.h
index b3f0754b32fa..a5d7e6d22264 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.h
@@ -43,7 +43,7 @@ enum MIGRATION_COPY_DIR {
int svm_migrate_to_vram(struct svm_range *prange, uint32_t best_loc,
struct mm_struct *mm, uint32_t trigger);
int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm,
- uint32_t trigger);
+ uint32_t trigger, struct page *fault_page);
unsigned long
svm_migrate_addr_to_pfn(struct amdgpu_device *adev, unsigned long addr);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
index d982c154537e..4f6390f3236e 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
@@ -126,6 +126,10 @@ static void init_mqd(struct mqd_manager *mm, void **mqd,
m->compute_static_thread_mgmt_se1 = 0xFFFFFFFF;
m->compute_static_thread_mgmt_se2 = 0xFFFFFFFF;
m->compute_static_thread_mgmt_se3 = 0xFFFFFFFF;
+ m->compute_static_thread_mgmt_se4 = 0xFFFFFFFF;
+ m->compute_static_thread_mgmt_se5 = 0xFFFFFFFF;
+ m->compute_static_thread_mgmt_se6 = 0xFFFFFFFF;
+ m->compute_static_thread_mgmt_se7 = 0xFFFFFFFF;
m->cp_hqd_persistent_state = CP_HQD_PERSISTENT_STATE__PRELOAD_REQ_MASK |
0x55 << CP_HQD_PERSISTENT_STATE__PRELOAD_SIZE__SHIFT;
@@ -329,7 +333,8 @@ static void update_mqd_sdma(struct mqd_manager *mm, void *mqd,
<< SDMA0_QUEUE0_RB_CNTL__RB_SIZE__SHIFT |
q->vmid << SDMA0_QUEUE0_RB_CNTL__RB_VMID__SHIFT |
1 << SDMA0_QUEUE0_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT |
- 6 << SDMA0_QUEUE0_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT;
+ 6 << SDMA0_QUEUE0_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT |
+ 1 << SDMA0_QUEUE0_RB_CNTL__F32_WPTR_POLL_ENABLE__SHIFT;
m->sdmax_rlcx_rb_base = lower_32_bits(q->queue_address >> 8);
m->sdmax_rlcx_rb_base_hi = upper_32_bits(q->queue_address >> 8);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
index cf5b4005534c..64fdf63093a0 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
@@ -278,7 +278,7 @@ static void svm_range_free(struct svm_range *prange, bool update_mem_usage)
svm_range_free_dma_mappings(prange);
if (update_mem_usage && !p->xnack_enabled) {
- pr_debug("unreserve mem limit: %lld\n", size);
+ pr_debug("unreserve prange 0x%p size: 0x%llx\n", prange, size);
amdgpu_amdkfd_unreserve_mem_limit(NULL, size,
KFD_IOC_ALLOC_MEM_FLAGS_USERPTR);
}
@@ -2913,13 +2913,15 @@ retry_write_locked:
*/
if (prange->actual_loc)
r = svm_migrate_vram_to_ram(prange, mm,
- KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU);
+ KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU,
+ NULL);
else
r = 0;
}
} else {
r = svm_migrate_vram_to_ram(prange, mm,
- KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU);
+ KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU,
+ NULL);
}
if (r) {
pr_debug("failed %d to migrate svms %p [0x%lx 0x%lx]\n",
@@ -2956,6 +2958,64 @@ out:
return r;
}
+int
+svm_range_switch_xnack_reserve_mem(struct kfd_process *p, bool xnack_enabled)
+{
+ struct svm_range *prange, *pchild;
+ uint64_t reserved_size = 0;
+ uint64_t size;
+ int r = 0;
+
+ pr_debug("switching xnack from %d to %d\n", p->xnack_enabled, xnack_enabled);
+
+ mutex_lock(&p->svms.lock);
+
+ list_for_each_entry(prange, &p->svms.list, list) {
+ svm_range_lock(prange);
+ list_for_each_entry(pchild, &prange->child_list, child_list) {
+ size = (pchild->last - pchild->start + 1) << PAGE_SHIFT;
+ if (xnack_enabled) {
+ amdgpu_amdkfd_unreserve_mem_limit(NULL, size,
+ KFD_IOC_ALLOC_MEM_FLAGS_USERPTR);
+ } else {
+ r = amdgpu_amdkfd_reserve_mem_limit(NULL, size,
+ KFD_IOC_ALLOC_MEM_FLAGS_USERPTR);
+ if (r)
+ goto out_unlock;
+ reserved_size += size;
+ }
+ }
+
+ size = (prange->last - prange->start + 1) << PAGE_SHIFT;
+ if (xnack_enabled) {
+ amdgpu_amdkfd_unreserve_mem_limit(NULL, size,
+ KFD_IOC_ALLOC_MEM_FLAGS_USERPTR);
+ } else {
+ r = amdgpu_amdkfd_reserve_mem_limit(NULL, size,
+ KFD_IOC_ALLOC_MEM_FLAGS_USERPTR);
+ if (r)
+ goto out_unlock;
+ reserved_size += size;
+ }
+out_unlock:
+ svm_range_unlock(prange);
+ if (r)
+ break;
+ }
+
+ if (r)
+ amdgpu_amdkfd_unreserve_mem_limit(NULL, reserved_size,
+ KFD_IOC_ALLOC_MEM_FLAGS_USERPTR);
+ else
+ /* Change xnack mode must be inside svms lock, to avoid race with
+ * svm_range_deferred_list_work unreserve memory in parallel.
+ */
+ p->xnack_enabled = xnack_enabled;
+
+ mutex_unlock(&p->svms.lock);
+ return r;
+}
+
void svm_range_list_fini(struct kfd_process *p)
{
struct svm_range *prange;
@@ -3220,7 +3280,8 @@ svm_range_trigger_migration(struct mm_struct *mm, struct svm_range *prange,
return 0;
if (!best_loc) {
- r = svm_migrate_vram_to_ram(prange, mm, KFD_MIGRATE_TRIGGER_PREFETCH);
+ r = svm_migrate_vram_to_ram(prange, mm,
+ KFD_MIGRATE_TRIGGER_PREFETCH, NULL);
*migrated = !r;
return r;
}
@@ -3281,7 +3342,7 @@ static void svm_range_evict_svm_bo_worker(struct work_struct *work)
mutex_lock(&prange->migrate_mutex);
do {
r = svm_migrate_vram_to_ram(prange, mm,
- KFD_MIGRATE_TRIGGER_TTM_EVICTION);
+ KFD_MIGRATE_TRIGGER_TTM_EVICTION, NULL);
} while (!r && prange->actual_loc && --retries);
if (!r && prange->actual_loc)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
index 012c53729516..7a33b93f9df6 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
@@ -203,6 +203,7 @@ void svm_range_list_lock_and_flush_work(struct svm_range_list *svms, struct mm_s
void svm_range_bo_unref_async(struct svm_range_bo *svm_bo);
void svm_range_set_max_pages(struct amdgpu_device *adev);
+int svm_range_switch_xnack_reserve_mem(struct kfd_process *p, bool xnack_enabled);
#else
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 4c73727e0b7d..3c072754738d 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -1110,7 +1110,8 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev)
hw_params.fb[i] = &fb_info->fb[i];
switch (adev->ip_versions[DCE_HWIP][0]) {
- case IP_VERSION(3, 1, 3): /* Only for this asic hw internal rev B0 */
+ case IP_VERSION(3, 1, 3):
+ case IP_VERSION(3, 1, 4):
hw_params.dpia_supported = true;
hw_params.disable_dpia = adev->dm.dc->debug.dpia_debug.bits.disable_dpia;
break;
@@ -3815,8 +3816,6 @@ static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
/* indicates support for immediate flip */
adev_to_drm(adev)->mode_config.async_page_flip = true;
- adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
-
state = kzalloc(sizeof(*state), GFP_KERNEL);
if (!state)
return -ENOMEM;
@@ -6109,7 +6108,6 @@ static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
aconnector->base.name);
aconnector->base.force = DRM_FORCE_OFF;
- aconnector->base.override_edid = false;
return;
}
@@ -6144,8 +6142,6 @@ static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
}
-
- aconnector->base.override_edid = true;
create_eml_sink(aconnector);
}
@@ -7478,15 +7474,15 @@ static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
* We also need vupdate irq for the actual core vblank handling
* at end of vblank.
*/
- dm_set_vupdate_irq(new_state->base.crtc, true);
- drm_crtc_vblank_get(new_state->base.crtc);
+ WARN_ON(dm_set_vupdate_irq(new_state->base.crtc, true) != 0);
+ WARN_ON(drm_crtc_vblank_get(new_state->base.crtc) != 0);
DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
__func__, new_state->base.crtc->base.id);
} else if (old_vrr_active && !new_vrr_active) {
/* Transition VRR active -> inactive:
* Allow vblank irq disable again for fixed refresh rate.
*/
- dm_set_vupdate_irq(new_state->base.crtc, false);
+ WARN_ON(dm_set_vupdate_irq(new_state->base.crtc, false) != 0);
drm_crtc_vblank_put(new_state->base.crtc);
DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
__func__, new_state->base.crtc->base.id);
@@ -8242,23 +8238,6 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
mutex_unlock(&dm->dc_lock);
}
- /* Count number of newly disabled CRTCs for dropping PM refs later. */
- for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
- new_crtc_state, i) {
- if (old_crtc_state->active && !new_crtc_state->active)
- crtc_disable_count++;
-
- dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
- dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
-
- /* For freesync config update on crtc state and params for irq */
- update_stream_irq_parameters(dm, dm_new_crtc_state);
-
- /* Handle vrr on->off / off->on transitions */
- amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
- dm_new_crtc_state);
- }
-
/**
* Enable interrupts for CRTCs that are newly enabled or went through
* a modeset. It was intentionally deferred until after the front end
@@ -8268,16 +8247,29 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
#ifdef CONFIG_DEBUG_FS
- bool configure_crc = false;
enum amdgpu_dm_pipe_crc_source cur_crc_src;
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
- struct crc_rd_work *crc_rd_wrk = dm->crc_rd_wrk;
+ struct crc_rd_work *crc_rd_wrk;
+#endif
+#endif
+ /* Count number of newly disabled CRTCs for dropping PM refs later. */
+ if (old_crtc_state->active && !new_crtc_state->active)
+ crtc_disable_count++;
+
+ dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+ dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
+
+ /* For freesync config update on crtc state and params for irq */
+ update_stream_irq_parameters(dm, dm_new_crtc_state);
+
+#ifdef CONFIG_DEBUG_FS
+#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
+ crc_rd_wrk = dm->crc_rd_wrk;
#endif
spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
cur_crc_src = acrtc->dm_irq_params.crc_src;
spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
#endif
- dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
if (new_crtc_state->active &&
(!old_crtc_state->active ||
@@ -8285,16 +8277,19 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
dc_stream_retain(dm_new_crtc_state->stream);
acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
manage_dm_interrupts(adev, acrtc, true);
+ }
+ /* Handle vrr on->off / off->on transitions */
+ amdgpu_dm_handle_vrr_transition(dm_old_crtc_state, dm_new_crtc_state);
#ifdef CONFIG_DEBUG_FS
+ if (new_crtc_state->active &&
+ (!old_crtc_state->active ||
+ drm_atomic_crtc_needs_modeset(new_crtc_state))) {
/**
* Frontend may have changed so reapply the CRC capture
* settings for the stream.
*/
- dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
-
if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
- configure_crc = true;
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
if (amdgpu_dm_crc_window_is_activated(crtc)) {
spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
@@ -8306,14 +8301,12 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
}
#endif
- }
-
- if (configure_crc)
if (amdgpu_dm_crtc_configure_crc_source(
crtc, dm_new_crtc_state, cur_crc_src))
DRM_DEBUG_DRIVER("Failed to configure crc source");
-#endif
+ }
}
+#endif
}
for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
@@ -9392,10 +9385,6 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
}
}
}
- if (!pre_validate_dsc(state, &dm_state, vars)) {
- ret = -EINVAL;
- goto fail;
- }
}
#endif
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
@@ -9529,6 +9518,15 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
}
}
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (dc_resource_is_dsc_encoding_supported(dc)) {
+ if (!pre_validate_dsc(state, &dm_state, vars)) {
+ ret = -EINVAL;
+ goto fail;
+ }
+ }
+#endif
+
/* Run this here since we want to validate the streams we created */
ret = drm_atomic_helper_check_planes(dev, state);
if (ret) {
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
index c8da18e45b0e..26291db0a3cf 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
@@ -60,11 +60,15 @@ static bool link_supports_psrsu(struct dc_link *link)
*/
void amdgpu_dm_set_psr_caps(struct dc_link *link)
{
- if (!(link->connector_signal & SIGNAL_TYPE_EDP))
+ if (!(link->connector_signal & SIGNAL_TYPE_EDP)) {
+ link->psr_settings.psr_feature_enabled = false;
return;
+ }
- if (link->type == dc_connection_none)
+ if (link->type == dc_connection_none) {
+ link->psr_settings.psr_feature_enabled = false;
return;
+ }
if (link->dpcd_caps.psr_info.psr_version == 0) {
link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
@@ -170,7 +174,13 @@ bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
&stream, 1,
&params);
- power_opt |= psr_power_opt_z10_static_screen;
+ /*
+ * Only enable static-screen optimizations for PSR1. For PSR SU, this
+ * causes vstartup interrupt issues, used by amdgpu_dm to send vblank
+ * events.
+ */
+ if (link->psr_settings.psr_version < DC_PSR_VERSION_SU_1)
+ power_opt |= psr_power_opt_z10_static_screen;
return dc_link_set_psr_allow_active(link, &psr_enable, false, false, &power_opt);
}
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
index 5d70f9901d13..ee0456b5e14e 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
@@ -24,6 +24,7 @@
*/
#include "dm_services.h"
+#include "core_types.h"
#include "ObjectID.h"
#include "atomfirmware.h"
@@ -50,13 +51,6 @@
#define LAST_RECORD_TYPE 0xff
#define SMU9_SYSPLL0_ID 0
-struct i2c_id_config_access {
- uint8_t bfI2C_LineMux:4;
- uint8_t bfHW_EngineID:3;
- uint8_t bfHW_Capable:1;
- uint8_t ucAccess;
-};
-
static enum bp_result get_gpio_i2c_info(struct bios_parser *bp,
struct atom_i2c_record *record,
struct graphics_object_i2c_info *info);
@@ -1374,7 +1368,7 @@ static enum bp_result bios_parser_get_lttpr_interop(
default:
break;
}
-
+ DC_LOG_BIOS("DCE_INFO_CAPS_VBIOS_LTTPR_TRANSPARENT_ENABLE: %d tbl_revision.major = %d tbl_revision.minor = %d\n", *dce_caps, tbl_revision.major, tbl_revision.minor);
return result;
}
@@ -1390,6 +1384,7 @@ static enum bp_result bios_parser_get_lttpr_caps(
if (!DATA_TABLES(dce_info))
return BP_RESULT_UNSUPPORTED;
+ *dce_caps = 0;
header = GET_IMAGE(struct atom_common_table_header,
DATA_TABLES(dce_info));
get_atom_data_table_revision(header, &tbl_revision);
@@ -1423,7 +1418,11 @@ static enum bp_result bios_parser_get_lttpr_caps(
default:
break;
}
-
+ DC_LOG_BIOS("DCE_INFO_CAPS_LTTPR_SUPPORT_ENABLE: %d tbl_revision.major = %d tbl_revision.minor = %d\n", *dce_caps, tbl_revision.major, tbl_revision.minor);
+ if (dcb->ctx->dc->config.force_bios_enable_lttpr && *dce_caps == 0) {
+ *dce_caps = 1;
+ DC_LOG_BIOS("DCE_INFO_CAPS_VBIOS_LTTPR_TRANSPARENT_ENABLE: forced enabled");
+ }
return result;
}
@@ -2994,13 +2993,22 @@ static enum bp_result construct_integrated_info(
info->ext_disp_conn_info.path[i].ext_encoder_obj_id.id,
info->ext_disp_conn_info.path[i].caps
);
+ if (info->ext_disp_conn_info.path[i].caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN)
+ DC_LOG_BIOS("BIOS EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN on path %d\n", i);
+ else if (bp->base.ctx->dc->config.force_bios_fixed_vs) {
+ info->ext_disp_conn_info.path[i].caps |= EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN;
+ DC_LOG_BIOS("driver forced EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN on path %d\n", i);
+ }
}
-
// Log the Checksum and Voltage Swing
DC_LOG_BIOS("Integrated info table CHECKSUM: %d\n"
"Integrated info table FIX_DP_VOLTAGE_SWING: %d\n",
info->ext_disp_conn_info.checksum,
info->ext_disp_conn_info.fixdpvoltageswing);
+ if (bp->base.ctx->dc->config.force_bios_fixed_vs && info->ext_disp_conn_info.fixdpvoltageswing == 0) {
+ info->ext_disp_conn_info.fixdpvoltageswing = bp->base.ctx->dc->config.force_bios_fixed_vs & 0xF;
+ DC_LOG_BIOS("driver forced fixdpvoltageswing = %d\n", info->ext_disp_conn_info.fixdpvoltageswing);
+ }
}
/* Sort voltage table from low to high*/
for (i = 1; i < NUMBER_OF_DISP_CLK_VOLTAGE; ++i) {
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
index 0d30d1d9d67e..650f3b4b562e 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
@@ -179,7 +179,7 @@ void dcn20_update_clocks_update_dentist(struct clk_mgr_internal *clk_mgr, struct
} else if (dispclk_wdivider == 127 && current_dispclk_wdivider != 127) {
REG_UPDATE(DENTIST_DISPCLK_CNTL,
DENTIST_DISPCLK_WDIVIDER, 126);
- REG_WAIT(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_CHG_DONE, 1, 50, 100);
+ REG_WAIT(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_CHG_DONE, 1, 50, 2000);
for (i = 0; i < clk_mgr->base.ctx->dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
struct dccg *dccg = clk_mgr->base.ctx->dc->res_pool->dccg;
@@ -206,7 +206,7 @@ void dcn20_update_clocks_update_dentist(struct clk_mgr_internal *clk_mgr, struct
REG_UPDATE(DENTIST_DISPCLK_CNTL,
DENTIST_DISPCLK_WDIVIDER, dispclk_wdivider);
- REG_WAIT(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_CHG_DONE, 1, 50, 1000);
+ REG_WAIT(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_CHG_DONE, 1, 50, 2000);
REG_UPDATE(DENTIST_DISPCLK_CNTL,
DENTIST_DPPCLK_WDIVIDER, dppclk_wdivider);
REG_WAIT(DENTIST_DISPCLK_CNTL, DENTIST_DPPCLK_CHG_DONE, 1, 5, 100);
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
index 193a0f3de18d..1131c6d73f6c 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
@@ -137,11 +137,20 @@ static void dcn314_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state
if (pipe->top_pipe || pipe->prev_odm_pipe)
continue;
if (pipe->stream && (pipe->stream->dpms_off || dc_is_virtual_signal(pipe->stream->signal))) {
+ struct stream_encoder *stream_enc = pipe->stream_res.stream_enc;
+
if (disable) {
+ if (stream_enc && stream_enc->funcs->disable_fifo)
+ pipe->stream_res.stream_enc->funcs->disable_fifo(stream_enc);
+
pipe->stream_res.tg->funcs->immediate_disable_crtc(pipe->stream_res.tg);
reset_sync_context_for_pipe(dc, context, i);
- } else
+ } else {
pipe->stream_res.tg->funcs->enable_crtc(pipe->stream_res.tg);
+
+ if (stream_enc && stream_enc->funcs->enable_fifo)
+ pipe->stream_res.stream_enc->funcs->enable_fifo(stream_enc);
+ }
}
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.c
index 897105d1c111..ef0795b14a1f 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.c
@@ -339,29 +339,24 @@ void dcn314_smu_set_zstate_support(struct clk_mgr_internal *clk_mgr, enum dcn_zs
if (!clk_mgr->smu_present)
return;
- if (!clk_mgr->base.ctx->dc->debug.enable_z9_disable_interface &&
- (support == DCN_ZSTATE_SUPPORT_ALLOW_Z10_ONLY))
- support = DCN_ZSTATE_SUPPORT_DISALLOW;
-
-
// Arg[15:0] = 8/9/0 for Z8/Z9/disallow -> existing bits
// Arg[16] = Disallow Z9 -> new bit
switch (support) {
case DCN_ZSTATE_SUPPORT_ALLOW:
msg_id = VBIOSSMC_MSG_AllowZstatesEntry;
- param = 9;
+ param = (1 << 10) | (1 << 9) | (1 << 8);
break;
case DCN_ZSTATE_SUPPORT_DISALLOW:
msg_id = VBIOSSMC_MSG_AllowZstatesEntry;
- param = 8;
+ param = 0;
break;
case DCN_ZSTATE_SUPPORT_ALLOW_Z10_ONLY:
msg_id = VBIOSSMC_MSG_AllowZstatesEntry;
- param = 0x00010008;
+ param = (1 << 10);
break;
default: //DCN_ZSTATE_SUPPORT_UNKNOWN
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
index c6785969eb1a..1c612ccf1944 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
@@ -156,12 +156,14 @@ void dcn32_init_clocks(struct clk_mgr *clk_mgr_base)
{
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
unsigned int num_levels;
+ struct clk_limit_num_entries *num_entries_per_clk = &clk_mgr_base->bw_params->clk_table.num_entries_per_clk;
memset(&(clk_mgr_base->clks), 0, sizeof(struct dc_clocks));
clk_mgr_base->clks.p_state_change_support = true;
clk_mgr_base->clks.prev_p_state_change_support = true;
clk_mgr_base->clks.fclk_prev_p_state_change_support = true;
clk_mgr->smu_present = false;
+ clk_mgr->dpm_present = false;
if (!clk_mgr_base->bw_params)
return;
@@ -178,22 +180,29 @@ void dcn32_init_clocks(struct clk_mgr *clk_mgr_base)
/* DCFCLK */
dcn32_init_single_clock(clk_mgr, PPCLK_DCFCLK,
&clk_mgr_base->bw_params->clk_table.entries[0].dcfclk_mhz,
- &num_levels);
+ &num_entries_per_clk->num_dcfclk_levels);
/* SOCCLK */
dcn32_init_single_clock(clk_mgr, PPCLK_SOCCLK,
&clk_mgr_base->bw_params->clk_table.entries[0].socclk_mhz,
- &num_levels);
+ &num_entries_per_clk->num_socclk_levels);
+
/* DTBCLK */
if (!clk_mgr->base.ctx->dc->debug.disable_dtb_ref_clk_switch)
dcn32_init_single_clock(clk_mgr, PPCLK_DTBCLK,
&clk_mgr_base->bw_params->clk_table.entries[0].dtbclk_mhz,
- &num_levels);
+ &num_entries_per_clk->num_dtbclk_levels);
/* DISPCLK */
dcn32_init_single_clock(clk_mgr, PPCLK_DISPCLK,
&clk_mgr_base->bw_params->clk_table.entries[0].dispclk_mhz,
- &num_levels);
+ &num_entries_per_clk->num_dispclk_levels);
+ num_levels = num_entries_per_clk->num_dispclk_levels;
+
+ if (num_entries_per_clk->num_dcfclk_levels &&
+ num_entries_per_clk->num_dtbclk_levels &&
+ num_entries_per_clk->num_dispclk_levels)
+ clk_mgr->dpm_present = true;
if (clk_mgr_base->ctx->dc->debug.min_disp_clk_khz) {
unsigned int i;
@@ -325,6 +334,21 @@ static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base,
if (enter_display_off == safe_to_lower)
dcn30_smu_set_num_of_displays(clk_mgr, display_count);
+ clk_mgr_base->clks.fclk_prev_p_state_change_support = clk_mgr_base->clks.fclk_p_state_change_support;
+
+ total_plane_count = clk_mgr_helper_get_active_plane_cnt(dc, context);
+ fclk_p_state_change_support = new_clocks->fclk_p_state_change_support || (total_plane_count == 0);
+
+ if (should_update_pstate_support(safe_to_lower, fclk_p_state_change_support, clk_mgr_base->clks.fclk_p_state_change_support)) {
+ clk_mgr_base->clks.fclk_p_state_change_support = fclk_p_state_change_support;
+
+ /* To enable FCLK P-state switching, send FCLK_PSTATE_SUPPORTED message to PMFW */
+ if (clk_mgr_base->ctx->dce_version != DCN_VERSION_3_21 && clk_mgr_base->clks.fclk_p_state_change_support) {
+ /* Handle the code for sending a message to PMFW that FCLK P-state change is supported */
+ dcn32_smu_send_fclk_pstate_message(clk_mgr, FCLK_PSTATE_SUPPORTED);
+ }
+ }
+
if (dc->debug.force_min_dcfclk_mhz > 0)
new_clocks->dcfclk_khz = (new_clocks->dcfclk_khz > (dc->debug.force_min_dcfclk_mhz * 1000)) ?
new_clocks->dcfclk_khz : (dc->debug.force_min_dcfclk_mhz * 1000);
@@ -344,7 +368,6 @@ static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base,
clk_mgr_base->clks.socclk_khz = new_clocks->socclk_khz;
clk_mgr_base->clks.prev_p_state_change_support = clk_mgr_base->clks.p_state_change_support;
- clk_mgr_base->clks.fclk_prev_p_state_change_support = clk_mgr_base->clks.fclk_p_state_change_support;
clk_mgr_base->clks.prev_num_ways = clk_mgr_base->clks.num_ways;
if (clk_mgr_base->clks.num_ways != new_clocks->num_ways &&
@@ -353,27 +376,25 @@ static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base,
dcn32_smu_send_cab_for_uclk_message(clk_mgr, clk_mgr_base->clks.num_ways);
}
- total_plane_count = clk_mgr_helper_get_active_plane_cnt(dc, context);
+
p_state_change_support = new_clocks->p_state_change_support || (total_plane_count == 0);
- fclk_p_state_change_support = new_clocks->fclk_p_state_change_support || (total_plane_count == 0);
if (should_update_pstate_support(safe_to_lower, p_state_change_support, clk_mgr_base->clks.p_state_change_support)) {
clk_mgr_base->clks.p_state_change_support = p_state_change_support;
/* to disable P-State switching, set UCLK min = max */
if (!clk_mgr_base->clks.p_state_change_support)
dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK,
- clk_mgr_base->bw_params->clk_table.entries[clk_mgr_base->bw_params->clk_table.num_entries - 1].memclk_mhz);
+ clk_mgr_base->bw_params->clk_table.entries[clk_mgr_base->bw_params->clk_table.num_entries_per_clk.num_memclk_levels - 1].memclk_mhz);
}
- if (should_update_pstate_support(safe_to_lower, fclk_p_state_change_support, clk_mgr_base->clks.fclk_p_state_change_support) &&
- clk_mgr_base->ctx->dce_version != DCN_VERSION_3_21) {
- clk_mgr_base->clks.fclk_p_state_change_support = fclk_p_state_change_support;
+ /* Always update saved value, even if new value not set due to P-State switching unsupported. Also check safe_to_lower for FCLK */
+ if (safe_to_lower && (clk_mgr_base->clks.fclk_p_state_change_support != clk_mgr_base->clks.fclk_prev_p_state_change_support)) {
+ update_fclk = true;
+ }
- /* To disable FCLK P-state switching, send FCLK_PSTATE_NOTSUPPORTED message to PMFW */
- if (clk_mgr_base->ctx->dce_version != DCN_VERSION_3_21 && !clk_mgr_base->clks.fclk_p_state_change_support) {
- /* Handle code for sending a message to PMFW that FCLK P-state change is not supported */
- dcn32_smu_send_fclk_pstate_message(clk_mgr, FCLK_PSTATE_NOTSUPPORTED);
- }
+ if (clk_mgr_base->ctx->dce_version != DCN_VERSION_3_21 && !clk_mgr_base->clks.fclk_p_state_change_support && update_fclk) {
+ /* Handle code for sending a message to PMFW that FCLK P-state change is not supported */
+ dcn32_smu_send_fclk_pstate_message(clk_mgr, FCLK_PSTATE_NOTSUPPORTED);
}
/* Always update saved value, even if new value not set due to P-State switching unsupported */
@@ -382,21 +403,11 @@ static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base,
update_uclk = true;
}
- /* Always update saved value, even if new value not set due to P-State switching unsupported. Also check safe_to_lower for FCLK */
- if (safe_to_lower && (clk_mgr_base->clks.fclk_p_state_change_support != clk_mgr_base->clks.fclk_prev_p_state_change_support)) {
- update_fclk = true;
- }
-
/* set UCLK to requested value if P-State switching is supported, or to re-enable P-State switching */
if (clk_mgr_base->clks.p_state_change_support &&
(update_uclk || !clk_mgr_base->clks.prev_p_state_change_support))
dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK, khz_to_mhz_ceil(clk_mgr_base->clks.dramclk_khz));
- if (clk_mgr_base->ctx->dce_version != DCN_VERSION_3_21 && clk_mgr_base->clks.fclk_p_state_change_support && update_fclk) {
- /* Handle the code for sending a message to PMFW that FCLK P-state change is supported */
- dcn32_smu_send_fclk_pstate_message(clk_mgr, FCLK_PSTATE_SUPPORTED);
- }
-
if (clk_mgr_base->clks.num_ways != new_clocks->num_ways &&
clk_mgr_base->clks.num_ways > new_clocks->num_ways) {
clk_mgr_base->clks.num_ways = new_clocks->num_ways;
@@ -624,7 +635,7 @@ static void dcn32_set_hard_min_memclk(struct clk_mgr *clk_mgr_base, bool current
khz_to_mhz_ceil(clk_mgr_base->clks.dramclk_khz));
else
dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK,
- clk_mgr_base->bw_params->clk_table.entries[clk_mgr_base->bw_params->clk_table.num_entries - 1].memclk_mhz);
+ clk_mgr_base->bw_params->clk_table.entries[clk_mgr_base->bw_params->clk_table.num_entries_per_clk.num_memclk_levels - 1].memclk_mhz);
} else {
dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK,
clk_mgr_base->bw_params->clk_table.entries[0].memclk_mhz);
@@ -640,24 +651,42 @@ static void dcn32_set_hard_max_memclk(struct clk_mgr *clk_mgr_base)
return;
dcn30_smu_set_hard_max_by_freq(clk_mgr, PPCLK_UCLK,
- clk_mgr_base->bw_params->clk_table.entries[clk_mgr_base->bw_params->clk_table.num_entries - 1].memclk_mhz);
+ clk_mgr_base->bw_params->clk_table.entries[clk_mgr_base->bw_params->clk_table.num_entries_per_clk.num_memclk_levels - 1].memclk_mhz);
}
/* Get current memclk states, update bounding box */
static void dcn32_get_memclk_states_from_smu(struct clk_mgr *clk_mgr_base)
{
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+ struct clk_limit_num_entries *num_entries_per_clk = &clk_mgr_base->bw_params->clk_table.num_entries_per_clk;
unsigned int num_levels;
if (!clk_mgr->smu_present)
return;
- /* Refresh memclk states */
+ /* Refresh memclk and fclk states */
dcn32_init_single_clock(clk_mgr, PPCLK_UCLK,
&clk_mgr_base->bw_params->clk_table.entries[0].memclk_mhz,
- &num_levels);
+ &num_entries_per_clk->num_memclk_levels);
+
+ dcn32_init_single_clock(clk_mgr, PPCLK_FCLK,
+ &clk_mgr_base->bw_params->clk_table.entries[0].fclk_mhz,
+ &num_entries_per_clk->num_fclk_levels);
+
+ if (num_entries_per_clk->num_memclk_levels >= num_entries_per_clk->num_fclk_levels) {
+ num_levels = num_entries_per_clk->num_memclk_levels;
+ } else {
+ num_levels = num_entries_per_clk->num_fclk_levels;
+ }
+
clk_mgr_base->bw_params->clk_table.num_entries = num_levels ? num_levels : 1;
+ if (clk_mgr->dpm_present && !num_levels)
+ clk_mgr->dpm_present = false;
+
+ if (!clk_mgr->dpm_present)
+ dcn32_patch_dpm_table(clk_mgr_base->bw_params);
+
DC_FP_START();
/* Refresh bounding box */
clk_mgr_base->ctx->dc->res_pool->funcs->update_bw_bounding_box(
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 19eb960d75d8..997ab031f816 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -1201,7 +1201,7 @@ static void wait_for_no_pipes_pending(struct dc *dc, struct dc_state *context)
int count = 0;
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
- if (!pipe->plane_state)
+ if (!pipe->plane_state || pipe->stream->mall_stream_config.type == SUBVP_PHANTOM)
continue;
/* Timeout 100 ms */
@@ -1734,10 +1734,20 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
int i, k, l;
struct dc_stream_state *dc_streams[MAX_STREAMS] = {0};
struct dc_state *old_state;
+ bool subvp_prev_use = false;
dc_z10_restore(dc);
dc_allow_idle_optimizations(dc, false);
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+
+ /* Check old context for SubVP */
+ subvp_prev_use |= (old_pipe->stream && old_pipe->stream->mall_stream_config.type == SUBVP_PHANTOM);
+ if (subvp_prev_use)
+ break;
+ }
+
for (i = 0; i < context->stream_count; i++)
dc_streams[i] = context->streams[i];
@@ -1750,6 +1760,9 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
context->stream_count == 0)
dc->hwss.prepare_bandwidth(dc, context);
+ if (dc->debug.enable_double_buffered_dsc_pg_support)
+ dc->hwss.update_dsc_pg(dc, context, false);
+
disable_dangling_plane(dc, context);
/* re-program planes for existing stream, in case we need to
* free up plane resource for later use
@@ -1774,6 +1787,9 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe);
}
+ if (dc->hwss.subvp_pipe_control_lock)
+ dc->hwss.subvp_pipe_control_lock(dc, context, true, true, NULL, subvp_prev_use);
+
result = dc->hwss.apply_ctx_to_hw(dc, context);
if (result != DC_OK) {
@@ -1791,6 +1807,12 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
dc->hwss.interdependent_update_lock(dc, context, false);
dc->hwss.post_unlock_program_front_end(dc, context);
}
+
+ if (dc->hwss.commit_subvp_config)
+ dc->hwss.commit_subvp_config(dc, context);
+ if (dc->hwss.subvp_pipe_control_lock)
+ dc->hwss.subvp_pipe_control_lock(dc, context, false, true, NULL, subvp_prev_use);
+
for (i = 0; i < context->stream_count; i++) {
const struct dc_link *link = context->streams[i]->link;
@@ -1840,6 +1862,9 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
dc->hwss.optimize_bandwidth(dc, context);
}
+ if (dc->debug.enable_double_buffered_dsc_pg_support)
+ dc->hwss.update_dsc_pg(dc, context, true);
+
if (dc->ctx->dce_version >= DCE_VERSION_MAX)
TRACE_DCN_CLOCK_STATE(&context->bw_ctx.bw.dcn.clk);
else
@@ -2003,6 +2028,9 @@ void dc_post_update_surfaces_to_stream(struct dc *dc)
dc->hwss.optimize_bandwidth(dc, context);
+ if (dc->debug.enable_double_buffered_dsc_pg_support)
+ dc->hwss.update_dsc_pg(dc, context, true);
+
dc->optimized_required = false;
dc->wm_optimized_required = false;
}
@@ -2322,9 +2350,13 @@ static enum surface_update_type det_surface_update(const struct dc *dc,
type = get_scaling_info_update_type(u);
elevate_update_type(&overall_type, type);
- if (u->flip_addr)
+ if (u->flip_addr) {
update_flags->bits.addr_update = 1;
-
+ if (u->flip_addr->address.tmz_surface != u->surface->address.tmz_surface) {
+ update_flags->bits.tmz_changed = 1;
+ elevate_update_type(&overall_type, UPDATE_TYPE_FULL);
+ }
+ }
if (u->in_transfer_func)
update_flags->bits.in_transfer_func_change = 1;
@@ -2759,11 +2791,8 @@ static void copy_stream_update_to_stream(struct dc *dc,
if (update->abm_level)
stream->abm_level = *update->abm_level;
- if (update->periodic_interrupt0)
- stream->periodic_interrupt0 = *update->periodic_interrupt0;
-
- if (update->periodic_interrupt1)
- stream->periodic_interrupt1 = *update->periodic_interrupt1;
+ if (update->periodic_interrupt)
+ stream->periodic_interrupt = *update->periodic_interrupt;
if (update->gamut_remap)
stream->gamut_remap_matrix = *update->gamut_remap;
@@ -2917,6 +2946,12 @@ static bool update_planes_and_stream_state(struct dc *dc,
dc_resource_state_copy_construct(
dc->current_state, context);
+ /* For each full update, remove all existing phantom pipes first.
+ * Ensures that we have enough pipes for newly added MPO planes
+ */
+ if (dc->res_pool->funcs->remove_phantom_pipes)
+ dc->res_pool->funcs->remove_phantom_pipes(dc, context);
+
/*remove old surfaces from context */
if (!dc_rem_all_planes_for_stream(dc, stream, context)) {
@@ -2983,13 +3018,8 @@ static void commit_planes_do_stream_update(struct dc *dc,
if (!pipe_ctx->top_pipe && !pipe_ctx->prev_odm_pipe && pipe_ctx->stream == stream) {
- if (stream_update->periodic_interrupt0 &&
- dc->hwss.setup_periodic_interrupt)
- dc->hwss.setup_periodic_interrupt(dc, pipe_ctx, VLINE0);
-
- if (stream_update->periodic_interrupt1 &&
- dc->hwss.setup_periodic_interrupt)
- dc->hwss.setup_periodic_interrupt(dc, pipe_ctx, VLINE1);
+ if (stream_update->periodic_interrupt && dc->hwss.setup_periodic_interrupt)
+ dc->hwss.setup_periodic_interrupt(dc, pipe_ctx);
if ((stream_update->hdr_static_metadata && !stream->use_dynamic_meta) ||
stream_update->vrr_infopacket ||
@@ -3198,6 +3228,9 @@ static void commit_planes_for_stream(struct dc *dc,
if (get_seamless_boot_stream_count(context) == 0)
dc->hwss.prepare_bandwidth(dc, context);
+ if (dc->debug.enable_double_buffered_dsc_pg_support)
+ dc->hwss.update_dsc_pg(dc, context, false);
+
context_clock_trace(dc, context);
}
@@ -3326,8 +3359,14 @@ static void commit_planes_for_stream(struct dc *dc,
/* Since phantom pipe programming is moved to post_unlock_program_front_end,
* move the SubVP lock to after the phantom pipes have been setup
*/
- if (dc->hwss.subvp_pipe_control_lock)
- dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, NULL, subvp_prev_use);
+ if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
+ if (dc->hwss.subvp_pipe_control_lock)
+ dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, NULL, subvp_prev_use);
+ } else {
+ if (dc->hwss.subvp_pipe_control_lock)
+ dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, NULL, subvp_prev_use);
+ }
+
return;
}
@@ -3487,6 +3526,9 @@ static void commit_planes_for_stream(struct dc *dc,
if (update_type != UPDATE_TYPE_FAST)
dc->hwss.post_unlock_program_front_end(dc, context);
+ if (update_type != UPDATE_TYPE_FAST)
+ if (dc->hwss.commit_subvp_config)
+ dc->hwss.commit_subvp_config(dc, context);
if (update_type != UPDATE_TYPE_FAST)
if (dc->hwss.commit_subvp_config)
@@ -3521,11 +3563,91 @@ static void commit_planes_for_stream(struct dc *dc,
}
}
+/* Determines if the incoming context requires a applying transition state with unnecessary
+ * pipe splitting and ODM disabled, due to hardware limitations. In a case where
+ * the OPP associated with an MPCC might change due to plane additions, this function
+ * returns true.
+ */
+static bool could_mpcc_tree_change_for_active_pipes(struct dc *dc,
+ struct dc_stream_state *stream,
+ int surface_count,
+ bool *is_plane_addition)
+{
+
+ struct dc_stream_status *cur_stream_status = stream_get_status(dc->current_state, stream);
+ bool force_minimal_pipe_splitting = false;
+ uint32_t i;
+
+ *is_plane_addition = false;
+
+ if (cur_stream_status &&
+ dc->current_state->stream_count > 0 &&
+ dc->debug.pipe_split_policy != MPC_SPLIT_AVOID) {
+ /* determine if minimal transition is required due to MPC*/
+ if (surface_count > 0) {
+ if (cur_stream_status->plane_count > surface_count) {
+ force_minimal_pipe_splitting = true;
+ } else if (cur_stream_status->plane_count < surface_count) {
+ force_minimal_pipe_splitting = true;
+ *is_plane_addition = true;
+ }
+ }
+ }
+
+ if (cur_stream_status &&
+ dc->current_state->stream_count == 1 &&
+ dc->debug.enable_single_display_2to1_odm_policy) {
+ /* determine if minimal transition is required due to dynamic ODM*/
+ if (surface_count > 0) {
+ if (cur_stream_status->plane_count > 2 && cur_stream_status->plane_count > surface_count) {
+ force_minimal_pipe_splitting = true;
+ } else if (surface_count > 2 && cur_stream_status->plane_count < surface_count) {
+ force_minimal_pipe_splitting = true;
+ *is_plane_addition = true;
+ }
+ }
+ }
+
+ /* For SubVP pipe split case when adding MPO video
+ * we need to add a minimal transition. In this case
+ * there will be 2 streams (1 main stream, 1 phantom
+ * stream).
+ */
+ if (cur_stream_status &&
+ dc->current_state->stream_count == 2 &&
+ stream->mall_stream_config.type == SUBVP_MAIN) {
+ bool is_pipe_split = false;
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ if (dc->current_state->res_ctx.pipe_ctx[i].stream == stream &&
+ (dc->current_state->res_ctx.pipe_ctx[i].bottom_pipe ||
+ dc->current_state->res_ctx.pipe_ctx[i].next_odm_pipe)) {
+ is_pipe_split = true;
+ break;
+ }
+ }
+
+ /* determine if minimal transition is required due to SubVP*/
+ if (surface_count > 0 && is_pipe_split) {
+ if (cur_stream_status->plane_count > surface_count) {
+ force_minimal_pipe_splitting = true;
+ } else if (cur_stream_status->plane_count < surface_count) {
+ force_minimal_pipe_splitting = true;
+ *is_plane_addition = true;
+ }
+ }
+ }
+
+ return force_minimal_pipe_splitting;
+}
+
static bool commit_minimal_transition_state(struct dc *dc,
struct dc_state *transition_base_context)
{
struct dc_state *transition_context = dc_create_state(dc);
- enum pipe_split_policy tmp_policy;
+ enum pipe_split_policy tmp_mpc_policy;
+ bool temp_dynamic_odm_policy;
+ bool temp_subvp_policy;
enum dc_status ret = DC_ERROR_UNEXPECTED;
unsigned int i, j;
@@ -3533,10 +3655,16 @@ static bool commit_minimal_transition_state(struct dc *dc,
return false;
if (!dc->config.is_vmin_only_asic) {
- tmp_policy = dc->debug.pipe_split_policy;
+ tmp_mpc_policy = dc->debug.pipe_split_policy;
dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
}
+ temp_dynamic_odm_policy = dc->debug.enable_single_display_2to1_odm_policy;
+ dc->debug.enable_single_display_2to1_odm_policy = false;
+
+ temp_subvp_policy = dc->debug.force_disable_subvp;
+ dc->debug.force_disable_subvp = true;
+
dc_resource_state_copy_construct(transition_base_context, transition_context);
//commit minimal state
@@ -3557,20 +3685,23 @@ static bool commit_minimal_transition_state(struct dc *dc,
ret = dc_commit_state_no_check(dc, transition_context);
}
- //always release as dc_commit_state_no_check retains in good case
+ /*always release as dc_commit_state_no_check retains in good case*/
dc_release_state(transition_context);
- //restore previous pipe split policy
+ /*restore previous pipe split and odm policy*/
if (!dc->config.is_vmin_only_asic)
- dc->debug.pipe_split_policy = tmp_policy;
+ dc->debug.pipe_split_policy = tmp_mpc_policy;
+
+ dc->debug.enable_single_display_2to1_odm_policy = temp_dynamic_odm_policy;
+ dc->debug.force_disable_subvp = temp_subvp_policy;
if (ret != DC_OK) {
- //this should never happen
+ /*this should never happen*/
BREAK_TO_DEBUGGER();
return false;
}
- //force full surface update
+ /*force full surface update*/
for (i = 0; i < dc->current_state->stream_count; i++) {
for (j = 0; j < dc->current_state->stream_status[i].plane_count; j++) {
dc->current_state->stream_status[i].plane_states[j]->update_flags.raw = 0xFFFFFFFF;
@@ -3593,24 +3724,14 @@ bool dc_update_planes_and_stream(struct dc *dc,
* cause underflow. Apply stream configuration with minimal pipe
* split first to avoid unsupported transitions for active pipes.
*/
- bool force_minimal_pipe_splitting = false;
- bool is_plane_addition = false;
+ bool force_minimal_pipe_splitting;
+ bool is_plane_addition;
- struct dc_stream_status *cur_stream_status = stream_get_status(dc->current_state, stream);
-
- if (cur_stream_status &&
- dc->current_state->stream_count > 0 &&
- dc->debug.pipe_split_policy != MPC_SPLIT_AVOID) {
- /* determine if minimal transition is required */
- if (surface_count > 0) {
- if (cur_stream_status->plane_count > surface_count) {
- force_minimal_pipe_splitting = true;
- } else if (cur_stream_status->plane_count < surface_count) {
- force_minimal_pipe_splitting = true;
- is_plane_addition = true;
- }
- }
- }
+ force_minimal_pipe_splitting = could_mpcc_tree_change_for_active_pipes(
+ dc,
+ stream,
+ surface_count,
+ &is_plane_addition);
/* on plane addition, minimal state is the current one */
if (force_minimal_pipe_splitting && is_plane_addition &&
@@ -3627,7 +3748,7 @@ bool dc_update_planes_and_stream(struct dc *dc,
&context))
return false;
- /* on plane addition, minimal state is the new one */
+ /* on plane removal, minimal state is the new one */
if (force_minimal_pipe_splitting && !is_plane_addition) {
if (!commit_minimal_transition_state(dc, context)) {
dc_release_state(context);
@@ -4014,7 +4135,7 @@ struct dc_sink *dc_link_add_remote_sink(
* Treat device as no EDID device if EDID
* parsing fails
*/
- if (edid_status != EDID_OK) {
+ if (edid_status != EDID_OK && edid_status != EDID_PARTIAL_VALID) {
dc_sink->dc_edid.length = 0;
dm_error("Bad EDID, status%d!\n", edid_status);
}
@@ -4536,6 +4657,37 @@ enum dc_status dc_process_dmub_set_mst_slots(const struct dc *dc,
}
/**
+ *****************************************************************************
+ * Function: dc_process_dmub_dpia_hpd_int_enable
+ *
+ * @brief
+ * Submits dpia hpd int enable command to dmub via inbox message
+ *
+ * @param
+ * [in] dc: dc structure
+ * [in] hpd_int_enable: 1 for hpd int enable, 0 to disable
+ *
+ * @return
+ * None
+ *****************************************************************************
+ */
+void dc_process_dmub_dpia_hpd_int_enable(const struct dc *dc,
+ uint32_t hpd_int_enable)
+{
+ union dmub_rb_cmd cmd = {0};
+ struct dc_dmub_srv *dmub_srv = dc->ctx->dmub_srv;
+
+ cmd.dpia_hpd_int_enable.header.type = DMUB_CMD__DPIA_HPD_INT_ENABLE;
+ cmd.dpia_hpd_int_enable.enable = hpd_int_enable;
+
+ dc_dmub_srv_cmd_queue(dmub_srv, &cmd);
+ dc_dmub_srv_cmd_execute(dmub_srv);
+ dc_dmub_srv_wait_idle(dmub_srv);
+
+ DC_LOG_DEBUG("%s: hpd_int_enable(%d)\n", __func__, hpd_int_enable);
+}
+
+/**
* dc_disable_accelerated_mode - disable accelerated mode
* @dc: dc structure
*/
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index 3d19fb92333b..d7b1ace6328a 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -1307,7 +1307,10 @@ static bool detect_link_and_local_sink(struct dc_link *link,
}
if (link->connector_signal == SIGNAL_TYPE_EDP) {
- // Init dc_panel_config
+ /* Init dc_panel_config by HW config */
+ if (dc_ctx->dc->res_pool->funcs->get_panel_config_defaults)
+ dc_ctx->dc->res_pool->funcs->get_panel_config_defaults(&link->panel_config);
+ /* Pickup base DM settings */
dm_helpers_init_panel_settings(dc_ctx, &link->panel_config, sink);
// Override dc_panel_config if system has specific settings
dm_helpers_override_panel_settings(dc_ctx, &link->panel_config);
@@ -3143,7 +3146,7 @@ bool dc_link_set_psr_allow_active(struct dc_link *link, const bool *allow_active
if (!dc_get_edp_link_panel_inst(dc, link, &panel_inst))
return false;
- if (allow_active && link->type == dc_connection_none) {
+ if ((allow_active != NULL) && (*allow_active == true) && (link->type == dc_connection_none)) {
// Don't enter PSR if panel is not connected
return false;
}
@@ -3375,8 +3378,8 @@ bool dc_link_setup_psr(struct dc_link *link,
case FAMILY_YELLOW_CARP:
case AMDGPU_FAMILY_GC_10_3_6:
case AMDGPU_FAMILY_GC_11_0_1:
- if(!dc->debug.disable_z10)
- psr_context->psr_level.bits.SKIP_CRTC_DISABLE = false;
+ if (dc->debug.disable_z10)
+ psr_context->psr_level.bits.SKIP_CRTC_DISABLE = true;
break;
default:
psr_context->psr_level.bits.SKIP_CRTC_DISABLE = true;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
index d01d2eeed813..651231387043 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
@@ -35,6 +35,8 @@
#include "dc_link_ddc.h"
#include "dce/dce_aux.h"
#include "dmub/inc/dmub_cmd.h"
+#include "link_dpcd.h"
+#include "include/dal_asic_id.h"
#define DC_LOGGER_INIT(logger)
@@ -683,6 +685,21 @@ bool dc_link_aux_try_to_configure_timeout(struct ddc_service *ddc,
bool result = false;
struct ddc *ddc_pin = ddc->ddc_pin;
+ if ((ddc->link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) &&
+ !ddc->link->dc->debug.disable_fixed_vs_aux_timeout_wa &&
+ ASICREV_IS_YELLOW_CARP(ddc->ctx->asic_id.hw_internal_rev)) {
+ /* Fixed VS workaround for AUX timeout */
+ const uint32_t fixed_vs_address = 0xF004F;
+ const uint8_t fixed_vs_data[4] = {0x1, 0x22, 0x63, 0xc};
+
+ core_link_write_dpcd(ddc->link,
+ fixed_vs_address,
+ fixed_vs_data,
+ sizeof(fixed_vs_data));
+
+ timeout = 3072;
+ }
+
/* Do not try to access nonexistent DDC pin. */
if (ddc->link->ep_type != DISPLAY_ENDPOINT_PHY)
return true;
@@ -691,6 +708,7 @@ bool dc_link_aux_try_to_configure_timeout(struct ddc_service *ddc,
ddc->ctx->dc->res_pool->engines[ddc_pin->pin_data->en]->funcs->configure_timeout(ddc, timeout);
result = true;
}
+
return result;
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index 9eab6f48de1a..1254d38f1778 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -526,9 +526,9 @@ uint8_t dc_dp_initialize_scrambling_data_symbols(
return disable_scrabled_data_symbols;
}
-static inline bool is_repeater(struct dc_link *link, uint32_t offset)
+static inline bool is_repeater(const struct link_training_settings *lt_settings, uint32_t offset)
{
- return (link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) && (offset != 0);
+ return (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) && (offset != 0);
}
static void dpcd_set_lt_pattern_and_lane_settings(
@@ -545,7 +545,7 @@ static void dpcd_set_lt_pattern_and_lane_settings(
bool edp_workaround = false; /* TODO link_prop.INTERNAL */
dpcd_base_lt_offset = DP_TRAINING_PATTERN_SET;
- if (is_repeater(link, offset))
+ if (is_repeater(lt_settings, offset))
dpcd_base_lt_offset = DP_TRAINING_PATTERN_SET_PHY_REPEATER1 +
((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1));
@@ -561,7 +561,7 @@ static void dpcd_set_lt_pattern_and_lane_settings(
dpcd_lt_buffer[DP_TRAINING_PATTERN_SET - DP_TRAINING_PATTERN_SET]
= dpcd_pattern.raw;
- if (is_repeater(link, offset)) {
+ if (is_repeater(lt_settings, offset)) {
DC_LOG_HW_LINK_TRAINING("%s\n LTTPR Repeater ID: %d\n 0x%X pattern = %x\n",
__func__,
offset,
@@ -584,7 +584,7 @@ static void dpcd_set_lt_pattern_and_lane_settings(
lt_settings->dpcd_lane_settings,
size_in_bytes);
- if (is_repeater(link, offset)) {
+ if (is_repeater(lt_settings, offset)) {
if (dp_get_link_encoding_format(&lt_settings->link_settings) ==
DP_128b_132b_ENCODING)
DC_LOG_HW_LINK_TRAINING("%s:\n LTTPR Repeater ID: %d\n"
@@ -873,7 +873,7 @@ enum dc_status dp_get_lane_status_and_lane_adjust(
uint32_t lane;
enum dc_status status;
- if (is_repeater(link, offset)) {
+ if (is_repeater(link_training_setting, offset)) {
lane01_status_address =
DP_LANE0_1_STATUS_PHY_REPEATER1 +
((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1));
@@ -906,7 +906,7 @@ enum dc_status dp_get_lane_status_and_lane_adjust(
ln_align->raw = dpcd_buf[2];
- if (is_repeater(link, offset)) {
+ if (is_repeater(link_training_setting, offset)) {
DC_LOG_HW_LINK_TRAINING("%s:\n LTTPR Repeater ID: %d\n"
" 0x%X Lane01Status = %x\n 0x%X Lane23Status = %x\n ",
__func__,
@@ -944,6 +944,23 @@ enum dc_status dp_get_lane_status_and_lane_adjust(
return status;
}
+static enum dc_status dpcd_128b_132b_set_lane_settings(
+ struct dc_link *link,
+ const struct link_training_settings *link_training_setting)
+{
+ enum dc_status status = core_link_write_dpcd(link,
+ DP_TRAINING_LANE0_SET,
+ (uint8_t *)(link_training_setting->dpcd_lane_settings),
+ sizeof(link_training_setting->dpcd_lane_settings));
+
+ DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X TX_FFE_PRESET_VALUE = %x\n",
+ __func__,
+ DP_TRAINING_LANE0_SET,
+ link_training_setting->dpcd_lane_settings[0].tx_ffe.PRESET_VALUE);
+ return status;
+}
+
+
enum dc_status dpcd_set_lane_settings(
struct dc_link *link,
const struct link_training_settings *link_training_setting,
@@ -954,7 +971,7 @@ enum dc_status dpcd_set_lane_settings(
lane0_set_address = DP_TRAINING_LANE0_SET;
- if (is_repeater(link, offset))
+ if (is_repeater(link_training_setting, offset))
lane0_set_address = DP_TRAINING_LANE0_SET_PHY_REPEATER1 +
((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1));
@@ -963,17 +980,7 @@ enum dc_status dpcd_set_lane_settings(
(uint8_t *)(link_training_setting->dpcd_lane_settings),
link_training_setting->link_settings.lane_count);
- if (is_repeater(link, offset)) {
- if (dp_get_link_encoding_format(&link_training_setting->link_settings) ==
- DP_128b_132b_ENCODING)
- DC_LOG_HW_LINK_TRAINING("%s:\n LTTPR Repeater ID: %d\n"
- " 0x%X TX_FFE_PRESET_VALUE = %x\n",
- __func__,
- offset,
- lane0_set_address,
- link_training_setting->dpcd_lane_settings[0].tx_ffe.PRESET_VALUE);
- else if (dp_get_link_encoding_format(&link_training_setting->link_settings) ==
- DP_8b_10b_ENCODING)
+ if (is_repeater(link_training_setting, offset)) {
DC_LOG_HW_LINK_TRAINING("%s\n LTTPR Repeater ID: %d\n"
" 0x%X VS set = %x PE set = %x max VS Reached = %x max PE Reached = %x\n",
__func__,
@@ -985,14 +992,6 @@ enum dc_status dpcd_set_lane_settings(
link_training_setting->dpcd_lane_settings[0].bits.MAX_PRE_EMPHASIS_REACHED);
} else {
- if (dp_get_link_encoding_format(&link_training_setting->link_settings) ==
- DP_128b_132b_ENCODING)
- DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X TX_FFE_PRESET_VALUE = %x\n",
- __func__,
- lane0_set_address,
- link_training_setting->dpcd_lane_settings[0].tx_ffe.PRESET_VALUE);
- else if (dp_get_link_encoding_format(&link_training_setting->link_settings) ==
- DP_8b_10b_ENCODING)
DC_LOG_HW_LINK_TRAINING("%s\n 0x%X VS set = %x PE set = %x max VS Reached = %x max PE Reached = %x\n",
__func__,
lane0_set_address,
@@ -1172,7 +1171,7 @@ static enum link_training_result perform_channel_equalization_sequence(
/* Note: also check that TPS4 is a supported feature*/
tr_pattern = lt_settings->pattern_for_eq;
- if (is_repeater(link, offset) && dp_get_link_encoding_format(&lt_settings->link_settings) == DP_8b_10b_ENCODING)
+ if (is_repeater(lt_settings, offset) && dp_get_link_encoding_format(&lt_settings->link_settings) == DP_8b_10b_ENCODING)
tr_pattern = DP_TRAINING_PATTERN_SEQUENCE_4;
dp_set_hw_training_pattern(link, link_res, tr_pattern, offset);
@@ -1198,7 +1197,7 @@ static enum link_training_result perform_channel_equalization_sequence(
/* 3. wait for receiver to lock-on*/
wait_time_microsec = lt_settings->eq_pattern_time;
- if (is_repeater(link, offset))
+ if (is_repeater(lt_settings, offset))
wait_time_microsec =
dp_translate_training_aux_read_interval(
link->dpcd_caps.lttpr_caps.aux_rd_interval[offset - 1]);
@@ -1469,7 +1468,6 @@ static inline void decide_8b_10b_training_settings(
*/
lt_settings->link_settings.link_spread = link->dp_ss_off ?
LINK_SPREAD_DISABLED : LINK_SPREAD_05_DOWNSPREAD_30KHZ;
- lt_settings->lttpr_mode = link->lttpr_mode;
lt_settings->cr_pattern_time = get_cr_training_aux_rd_interval(link, link_setting);
lt_settings->eq_pattern_time = get_eq_training_aux_rd_interval(link, link_setting);
lt_settings->pattern_for_cr = decide_cr_training_pattern(link_setting);
@@ -1478,6 +1476,7 @@ static inline void decide_8b_10b_training_settings(
lt_settings->should_set_fec_ready = true;
lt_settings->disallow_per_lane_settings = true;
lt_settings->always_match_dpcd_with_hw_lane_settings = true;
+ lt_settings->lttpr_mode = dp_decide_8b_10b_lttpr_mode(link);
dp_hw_to_dpcd_lane_settings(lt_settings, lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings);
}
@@ -1501,9 +1500,8 @@ static inline void decide_128b_132b_training_settings(struct dc_link *link,
lt_settings->cds_pattern_time = 2500;
lt_settings->cds_wait_time_limit = (dp_convert_to_count(
link->dpcd_caps.lttpr_caps.phy_repeater_cnt) + 1) * 20000;
- lt_settings->lttpr_mode = dp_convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt) ?
- LTTPR_MODE_NON_TRANSPARENT : LTTPR_MODE_TRANSPARENT;
lt_settings->disallow_per_lane_settings = true;
+ lt_settings->lttpr_mode = dp_decide_128b_132b_lttpr_mode(link);
dp_hw_to_dpcd_lane_settings(lt_settings,
lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings);
}
@@ -1543,7 +1541,7 @@ static void override_training_settings(
lt_settings->ffe_preset = overrides->ffe_preset;
/* Override HW lane settings with BIOS forced values if present */
if (link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN &&
- link->lttpr_mode == LTTPR_MODE_TRANSPARENT) {
+ lt_settings->lttpr_mode == LTTPR_MODE_TRANSPARENT) {
lt_settings->voltage_swing = &link->bios_forced_drive_settings.VOLTAGE_SWING;
lt_settings->pre_emphasis = &link->bios_forced_drive_settings.PRE_EMPHASIS;
lt_settings->always_match_dpcd_with_hw_lane_settings = false;
@@ -1584,6 +1582,15 @@ static void override_training_settings(
if (link->preferred_training_settings.fec_enable != NULL)
lt_settings->should_set_fec_ready = *link->preferred_training_settings.fec_enable;
+
+ #if defined(CONFIG_DRM_AMD_DC_DCN)
+ /* Check DP tunnel LTTPR mode debug option. */
+ if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA && link->dc->debug.dpia_debug.bits.force_non_lttpr)
+ lt_settings->lttpr_mode = LTTPR_MODE_NON_LTTPR;
+
+#endif
+ dp_get_lttpr_mode_override(link, &lt_settings->lttpr_mode);
+
}
uint8_t dp_convert_to_count(uint8_t lttpr_repeater_count)
@@ -1649,7 +1656,7 @@ static enum dc_status configure_lttpr_mode_non_transparent(
link->dpcd_caps.lttpr_caps.mode = repeater_mode;
}
- if (link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) {
+ if (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) {
DC_LOG_HW_LINK_TRAINING("%s\n Set LTTPR to Non Transparent Mode\n", __func__);
@@ -2015,7 +2022,7 @@ static enum link_training_result dp_perform_128b_132b_channel_eq_done_sequence(
result = DP_128b_132b_LT_FAILED;
} else {
dp_set_hw_lane_settings(link, link_res, lt_settings, DPRX);
- dpcd_set_lane_settings(link, lt_settings, DPRX);
+ dpcd_128b_132b_set_lane_settings(link, lt_settings);
}
loop_count++;
}
@@ -2099,7 +2106,7 @@ static enum link_training_result dp_perform_8b_10b_link_training(
/* 1. set link rate, lane count and spread. */
dpcd_set_link_settings(link, lt_settings);
- if (link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) {
+ if (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) {
/* 2. perform link training (set link training done
* to false is done as well)
@@ -2216,7 +2223,7 @@ static enum link_training_result perform_fixed_vs_pe_nontransparent_training_seq
link->vendor_specific_lttpr_link_rate_wa = target_rate;
- if (link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) {
+ if (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) {
/* 2. perform link training (set link training done
* to false is done as well)
@@ -2288,7 +2295,7 @@ static enum link_training_result dp_perform_fixed_vs_pe_training_sequence(
ASSERT(dp_get_link_encoding_format(&lt_settings->link_settings) ==
DP_8b_10b_ENCODING);
- if (link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) {
+ if (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) {
status = perform_fixed_vs_pe_nontransparent_training_sequence(link, link_res, lt_settings);
return status;
}
@@ -2635,6 +2642,7 @@ enum link_training_result dc_link_dp_perform_link_training(
link,
link_settings,
&lt_settings);
+
override_training_settings(
link,
&link->preferred_training_settings,
@@ -2652,7 +2660,7 @@ enum link_training_result dc_link_dp_perform_link_training(
* Per DP specs starting from here, DPTX device shall not issue
* Non-LT AUX transactions inside training mode.
*/
- if (link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN)
+ if (link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN && encoding == DP_8b_10b_ENCODING)
status = dp_perform_fixed_vs_pe_training_sequence(link, link_res, &lt_settings);
else if (encoding == DP_8b_10b_ENCODING)
status = dp_perform_8b_10b_link_training(link, link_res, &lt_settings);
@@ -3086,7 +3094,7 @@ struct dc_link_settings dp_get_max_link_cap(struct dc_link *link)
* account for lttpr repeaters cap
* notes: repeaters do not snoop in the DPRX Capabilities addresses (3.6.3).
*/
- if (link->lttpr_mode != LTTPR_MODE_NON_LTTPR) {
+ if (dp_is_lttpr_present(link)) {
if (link->dpcd_caps.lttpr_caps.max_lane_count < max_link_cap.lane_count)
max_link_cap.lane_count = link->dpcd_caps.lttpr_caps.max_lane_count;
lttpr_max_link_rate = get_lttpr_max_link_rate(link);
@@ -3240,7 +3248,7 @@ static bool dp_verify_link_cap(
cur_link_settings = max_link_settings;
/* Grant extended timeout request */
- if ((link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) && (link->dpcd_caps.lttpr_caps.max_ext_timeout > 0)) {
+ if (dp_is_lttpr_present(link) && link->dpcd_caps.lttpr_caps.max_ext_timeout > 0) {
uint8_t grant = link->dpcd_caps.lttpr_caps.max_ext_timeout & 0x80;
core_link_write_dpcd(link, DP_PHY_REPEATER_EXTENDED_WAIT_TIMEOUT, &grant, sizeof(grant));
@@ -4101,8 +4109,13 @@ static void dp_test_send_phy_test_pattern(struct dc_link *link)
&dpcd_lane_adjustment[0].raw,
sizeof(dpcd_lane_adjustment));
+ /* prepare link training settings */
+ link_training_settings.link_settings = link->cur_link_settings;
+
+ link_training_settings.lttpr_mode = dp_decide_lttpr_mode(link, &link->cur_link_settings);
+
if ((link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) &&
- link->lttpr_mode == LTTPR_MODE_TRANSPARENT)
+ link_training_settings.lttpr_mode == LTTPR_MODE_TRANSPARENT)
dp_fixed_vs_pe_read_lane_adjust(
link,
link_training_settings.dpcd_lane_settings);
@@ -4209,9 +4222,6 @@ static void dp_test_send_phy_test_pattern(struct dc_link *link)
test_pattern_size);
}
- /* prepare link training settings */
- link_training_settings.link_settings = link->cur_link_settings;
-
for (lane = 0; lane <
(unsigned int)(link->cur_link_settings.lane_count);
lane++) {
@@ -5021,133 +5031,151 @@ static bool dpcd_read_sink_ext_caps(struct dc_link *link)
return true;
}
-/* Logic to determine LTTPR mode */
-static void determine_lttpr_mode(struct dc_link *link)
+bool dp_retrieve_lttpr_cap(struct dc_link *link)
{
- bool allow_lttpr_non_transparent_mode = 0;
- bool vbios_lttpr_enable = link->dc->caps.vbios_lttpr_enable;
+ uint8_t lttpr_dpcd_data[8];
+ enum dc_status status = DC_ERROR_UNEXPECTED;
+ bool is_lttpr_present = false;
+
+ /* Logic to determine LTTPR support*/
bool vbios_lttpr_interop = link->dc->caps.vbios_lttpr_aware;
- if (link->ctx->dc->debug.lttpr_mode_override != 0) {
- link->lttpr_mode = link->ctx->dc->debug.lttpr_mode_override;
- return;
- }
+ if (!vbios_lttpr_interop || !link->dc->caps.extended_aux_timeout_support)
+ return false;
- if ((link->dc->config.allow_lttpr_non_transparent_mode.bits.DP2_0 &&
- link->dpcd_caps.channel_coding_cap.bits.DP_128b_132b_SUPPORTED)) {
- allow_lttpr_non_transparent_mode = 1;
- } else if (link->dc->config.allow_lttpr_non_transparent_mode.bits.DP1_4A &&
- !link->dpcd_caps.channel_coding_cap.bits.DP_128b_132b_SUPPORTED) {
- allow_lttpr_non_transparent_mode = 1;
+ /* By reading LTTPR capability, RX assumes that we will enable
+ * LTTPR extended aux timeout if LTTPR is present.
+ */
+ status = core_link_read_dpcd(link,
+ DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV,
+ lttpr_dpcd_data,
+ sizeof(lttpr_dpcd_data));
+
+ link->dpcd_caps.lttpr_caps.revision.raw =
+ lttpr_dpcd_data[DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV -
+ DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
+
+ link->dpcd_caps.lttpr_caps.max_link_rate =
+ lttpr_dpcd_data[DP_MAX_LINK_RATE_PHY_REPEATER -
+ DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
+
+ link->dpcd_caps.lttpr_caps.phy_repeater_cnt =
+ lttpr_dpcd_data[DP_PHY_REPEATER_CNT -
+ DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
+
+ link->dpcd_caps.lttpr_caps.max_lane_count =
+ lttpr_dpcd_data[DP_MAX_LANE_COUNT_PHY_REPEATER -
+ DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
+
+ link->dpcd_caps.lttpr_caps.mode =
+ lttpr_dpcd_data[DP_PHY_REPEATER_MODE -
+ DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
+
+ link->dpcd_caps.lttpr_caps.max_ext_timeout =
+ lttpr_dpcd_data[DP_PHY_REPEATER_EXTENDED_WAIT_TIMEOUT -
+ DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
+ link->dpcd_caps.lttpr_caps.main_link_channel_coding.raw =
+ lttpr_dpcd_data[DP_MAIN_LINK_CHANNEL_CODING_PHY_REPEATER -
+ DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
+
+ link->dpcd_caps.lttpr_caps.supported_128b_132b_rates.raw =
+ lttpr_dpcd_data[DP_PHY_REPEATER_128B132B_RATES -
+ DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
+
+ /* If this chip cap is set, at least one retimer must exist in the chain
+ * Override count to 1 if we receive a known bad count (0 or an invalid value)
+ */
+ if (link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN &&
+ (dp_convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt) == 0)) {
+ ASSERT(0);
+ link->dpcd_caps.lttpr_caps.phy_repeater_cnt = 0x80;
+ DC_LOG_DC("lttpr_caps forced phy_repeater_cnt = %d\n", link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
}
- link->lttpr_mode = LTTPR_MODE_NON_LTTPR;
- if (vbios_lttpr_enable && vbios_lttpr_interop)
- link->lttpr_mode = LTTPR_MODE_NON_TRANSPARENT;
- else if (!vbios_lttpr_enable && vbios_lttpr_interop) {
- if (allow_lttpr_non_transparent_mode)
- link->lttpr_mode = LTTPR_MODE_NON_TRANSPARENT;
- else
- link->lttpr_mode = LTTPR_MODE_TRANSPARENT;
- } else if (!vbios_lttpr_enable && !vbios_lttpr_interop) {
- if (!allow_lttpr_non_transparent_mode || !link->dc->caps.extended_aux_timeout_support)
- link->lttpr_mode = LTTPR_MODE_NON_LTTPR;
- else
- link->lttpr_mode = LTTPR_MODE_NON_TRANSPARENT;
- }
+ /* Attempt to train in LTTPR transparent mode if repeater count exceeds 8. */
+ is_lttpr_present = dp_is_lttpr_present(link);
-#if defined(CONFIG_DRM_AMD_DC_DCN)
- /* Check DP tunnel LTTPR mode debug option. */
- if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA &&
- link->dc->debug.dpia_debug.bits.force_non_lttpr)
- link->lttpr_mode = LTTPR_MODE_NON_LTTPR;
-#endif
+ if (is_lttpr_present)
+ CONN_DATA_DETECT(link, lttpr_dpcd_data, sizeof(lttpr_dpcd_data), "LTTPR Caps: ");
+
+ DC_LOG_DC("is_lttpr_present = %d\n", is_lttpr_present);
+ return is_lttpr_present;
}
-bool dp_retrieve_lttpr_cap(struct dc_link *link)
+bool dp_is_lttpr_present(struct dc_link *link)
{
- uint8_t lttpr_dpcd_data[8];
- enum dc_status status = DC_ERROR_UNEXPECTED;
- bool is_lttpr_present = false;
+ return (dp_convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt) != 0 &&
+ link->dpcd_caps.lttpr_caps.max_lane_count > 0 &&
+ link->dpcd_caps.lttpr_caps.max_lane_count <= 4 &&
+ link->dpcd_caps.lttpr_caps.revision.raw >= 0x14);
+}
- memset(lttpr_dpcd_data, '\0', sizeof(lttpr_dpcd_data));
+enum lttpr_mode dp_decide_lttpr_mode(struct dc_link *link, struct dc_link_settings *link_setting)
+{
+ enum dp_link_encoding encoding = dp_get_link_encoding_format(link_setting);
- /* Logic to determine LTTPR mode*/
- determine_lttpr_mode(link);
+ if (encoding == DP_8b_10b_ENCODING)
+ return dp_decide_8b_10b_lttpr_mode(link);
+ else if (encoding == DP_128b_132b_ENCODING)
+ return dp_decide_128b_132b_lttpr_mode(link);
- if (link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT || link->lttpr_mode == LTTPR_MODE_TRANSPARENT) {
- if ((link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) &&
- !link->dc->debug.disable_fixed_vs_aux_timeout_wa) {
- /* Fixed VS workaround for AUX timeout */
- const uint32_t fixed_vs_address = 0xF004F;
- const uint8_t fixed_vs_data[4] = {0x1, 0x22, 0x63, 0xc};
+ ASSERT(0);
+ return LTTPR_MODE_NON_LTTPR;
+}
- core_link_write_dpcd(
- link,
- fixed_vs_address,
- fixed_vs_data,
- sizeof(fixed_vs_data));
- }
+void dp_get_lttpr_mode_override(struct dc_link *link, enum lttpr_mode *override)
+{
+ if (!dp_is_lttpr_present(link))
+ return;
- /* By reading LTTPR capability, RX assumes that we will enable
- * LTTPR extended aux timeout if LTTPR is present.
- */
- status = core_link_read_dpcd(
- link,
- DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV,
- lttpr_dpcd_data,
- sizeof(lttpr_dpcd_data));
-
- link->dpcd_caps.lttpr_caps.revision.raw =
- lttpr_dpcd_data[DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV -
- DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
-
- link->dpcd_caps.lttpr_caps.max_link_rate =
- lttpr_dpcd_data[DP_MAX_LINK_RATE_PHY_REPEATER -
- DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
-
- link->dpcd_caps.lttpr_caps.phy_repeater_cnt =
- lttpr_dpcd_data[DP_PHY_REPEATER_CNT -
- DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
-
- link->dpcd_caps.lttpr_caps.max_lane_count =
- lttpr_dpcd_data[DP_MAX_LANE_COUNT_PHY_REPEATER -
- DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
-
- link->dpcd_caps.lttpr_caps.mode =
- lttpr_dpcd_data[DP_PHY_REPEATER_MODE -
- DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
-
- link->dpcd_caps.lttpr_caps.max_ext_timeout =
- lttpr_dpcd_data[DP_PHY_REPEATER_EXTENDED_WAIT_TIMEOUT -
- DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
- link->dpcd_caps.lttpr_caps.main_link_channel_coding.raw =
- lttpr_dpcd_data[DP_MAIN_LINK_CHANNEL_CODING_PHY_REPEATER -
- DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
-
- link->dpcd_caps.lttpr_caps.supported_128b_132b_rates.raw =
- lttpr_dpcd_data[DP_PHY_REPEATER_128B132B_RATES -
- DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
-
- /* If this chip cap is set, at least one retimer must exist in the chain
- * Override count to 1 if we receive a known bad count (0 or an invalid value) */
- if (link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN &&
- (dp_convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt) == 0)) {
- ASSERT(0);
- link->dpcd_caps.lttpr_caps.phy_repeater_cnt = 0x80;
+ if (link->dc->debug.lttpr_mode_override == LTTPR_MODE_TRANSPARENT) {
+ *override = LTTPR_MODE_TRANSPARENT;
+ } else if (link->dc->debug.lttpr_mode_override == LTTPR_MODE_NON_TRANSPARENT) {
+ *override = LTTPR_MODE_NON_TRANSPARENT;
+ } else if (link->dc->debug.lttpr_mode_override == LTTPR_MODE_NON_LTTPR) {
+ *override = LTTPR_MODE_NON_LTTPR;
+ }
+ DC_LOG_DC("lttpr_mode_override chose LTTPR_MODE = %d\n", (uint8_t)(*override));
+}
+
+enum lttpr_mode dp_decide_8b_10b_lttpr_mode(struct dc_link *link)
+{
+ bool is_lttpr_present = dp_is_lttpr_present(link);
+ bool vbios_lttpr_force_non_transparent = link->dc->caps.vbios_lttpr_enable;
+ bool vbios_lttpr_aware = link->dc->caps.vbios_lttpr_aware;
+
+ if (!is_lttpr_present)
+ return LTTPR_MODE_NON_LTTPR;
+
+ if (vbios_lttpr_aware) {
+ if (vbios_lttpr_force_non_transparent) {
+ DC_LOG_DC("chose LTTPR_MODE_NON_TRANSPARENT due to VBIOS DCE_INFO_CAPS_LTTPR_SUPPORT_ENABLE set to 1.\n");
+ return LTTPR_MODE_NON_TRANSPARENT;
+ } else {
+ DC_LOG_DC("chose LTTPR_MODE_NON_TRANSPARENT by default due to VBIOS not set DCE_INFO_CAPS_LTTPR_SUPPORT_ENABLE set to 1.\n");
+ return LTTPR_MODE_TRANSPARENT;
}
+ }
- /* Attempt to train in LTTPR transparent mode if repeater count exceeds 8. */
- is_lttpr_present = (link->dpcd_caps.lttpr_caps.max_lane_count > 0 &&
- link->dpcd_caps.lttpr_caps.max_lane_count <= 4 &&
- link->dpcd_caps.lttpr_caps.revision.raw >= 0x14);
- if (is_lttpr_present) {
- CONN_DATA_DETECT(link, lttpr_dpcd_data, sizeof(lttpr_dpcd_data), "LTTPR Caps: ");
- configure_lttpr_mode_transparent(link);
- } else
- link->lttpr_mode = LTTPR_MODE_NON_LTTPR;
+ if (link->dc->config.allow_lttpr_non_transparent_mode.bits.DP1_4A &&
+ link->dc->caps.extended_aux_timeout_support) {
+ DC_LOG_DC("chose LTTPR_MODE_NON_TRANSPARENT by default and dc->config.allow_lttpr_non_transparent_mode.bits.DP1_4A set to 1.\n");
+ return LTTPR_MODE_NON_TRANSPARENT;
}
- return is_lttpr_present;
+
+ DC_LOG_DC("chose LTTPR_MODE_NON_LTTPR.\n");
+ return LTTPR_MODE_NON_LTTPR;
+}
+
+enum lttpr_mode dp_decide_128b_132b_lttpr_mode(struct dc_link *link)
+{
+ enum lttpr_mode mode = LTTPR_MODE_NON_LTTPR;
+
+ if (dp_is_lttpr_present(link))
+ mode = LTTPR_MODE_NON_TRANSPARENT;
+
+ DC_LOG_DC("128b_132b chose LTTPR_MODE %d.\n", mode);
+ return mode;
}
static bool get_usbc_cable_id(struct dc_link *link, union dp_cable_id *cable_id)
@@ -5165,9 +5193,10 @@ static bool get_usbc_cable_id(struct dc_link *link, union dp_cable_id *cable_id)
cmd.cable_id.data.input.phy_inst = resource_transmitter_to_phy_idx(
link->dc, link->link_enc->transmitter);
if (dc_dmub_srv_cmd_with_reply_data(link->ctx->dmub_srv, &cmd) &&
- cmd.cable_id.header.ret_status == 1)
+ cmd.cable_id.header.ret_status == 1) {
cable_id->raw = cmd.cable_id.data.output_raw;
-
+ DC_LOG_DC("usbc_cable_id = %d.\n", cable_id->raw);
+ }
return cmd.cable_id.header.ret_status == 1;
}
@@ -5209,13 +5238,17 @@ static enum dc_status wa_try_to_wake_dprx(struct dc_link *link, uint64_t timeout
uint64_t current_ts = 0;
uint64_t time_taken_ms = 0;
enum dc_connection_type type = dc_connection_none;
+ bool lttpr_present;
+ bool vbios_lttpr_interop = link->dc->caps.vbios_lttpr_aware;
- determine_lttpr_mode(link);
+ lttpr_present = dp_is_lttpr_present(link) ||
+ (!vbios_lttpr_interop || !link->dc->caps.extended_aux_timeout_support);
+ DC_LOG_DC("lttpr_present = %d.\n", lttpr_present ? 1 : 0);
/* Issue an AUX read to test DPRX responsiveness. If LTTPR is supported the first read is expected to
* be to determine LTTPR capabilities. Otherwise trying to read power state should be an innocuous AUX read.
*/
- if (link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT || link->lttpr_mode == LTTPR_MODE_TRANSPARENT)
+ if (lttpr_present)
status = core_link_read_dpcd(
link,
DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV,
@@ -5345,6 +5378,10 @@ static bool retrieve_link_cap(struct dc_link *link)
}
is_lttpr_present = dp_retrieve_lttpr_cap(link);
+
+ if (is_lttpr_present)
+ configure_lttpr_mode_transparent(link);
+
/* Read DP tunneling information. */
status = dpcd_get_tunneling_device_data(link);
@@ -5774,7 +5811,7 @@ void detect_edp_sink_caps(struct dc_link *link)
* Per VESA eDP spec, "The DPCD revision for eDP v1.4 is 13h"
*/
if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_13 &&
- (link->dc->debug.optimize_edp_link_rate ||
+ (link->panel_config.ilr.optimize_edp_link_rate ||
link->reported_link_cap.link_rate == LINK_RATE_UNKNOWN)) {
// Read DPCD 00010h - 0001Fh 16 bytes at one shot
core_link_read_dpcd(link, DP_SUPPORTED_LINK_RATES,
@@ -6096,7 +6133,7 @@ bool dc_link_dp_set_test_pattern(
/* Set DPCD Lane Settings before running test pattern */
if (p_link_settings != NULL) {
if ((link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) &&
- link->lttpr_mode == LTTPR_MODE_TRANSPARENT) {
+ p_link_settings->lttpr_mode == LTTPR_MODE_TRANSPARENT) {
dp_fixed_vs_pe_set_retimer_lane_settings(
link,
p_link_settings->dpcd_lane_settings,
@@ -6723,7 +6760,7 @@ bool is_edp_ilr_optimization_required(struct dc_link *link, struct dc_crtc_timin
ASSERT(link || crtc_timing); // invalid input
if (link->dpcd_caps.edp_supported_link_rates_count == 0 ||
- !link->dc->debug.optimize_edp_link_rate)
+ !link->panel_config.ilr.optimize_edp_link_rate)
return false;
@@ -7218,7 +7255,7 @@ void dp_set_hw_lane_settings(
{
const struct link_hwss *link_hwss = get_link_hwss(link, link_res);
- if ((link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) && !is_immediate_downstream(link, offset))
+ if ((link_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) && !is_immediate_downstream(link, offset))
return;
if (link_hwss->ext.set_dp_lane_settings)
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dpia.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dpia.c
index 468e39589ed8..74e36b34d3f7 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dpia.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dpia.c
@@ -115,12 +115,14 @@ static enum link_training_result dpia_configure_link(
DC_LOG_HW_LINK_TRAINING("%s\n DPIA(%d) configuring\n - LTTPR mode(%d)\n",
__func__,
link->link_id.enum_id - ENUM_ID_1,
- link->lttpr_mode);
+ lt_settings->lttpr_mode);
dp_decide_training_settings(link,
link_setting,
lt_settings);
+ dp_get_lttpr_mode_override(link, &lt_settings->lttpr_mode);
+
status = dpcd_configure_channel_coding(link, lt_settings);
if (status != DC_OK && link->is_hpd_pending)
return LINK_TRAINING_ABORT;
@@ -178,7 +180,7 @@ static uint8_t dpia_build_set_config_data(enum dpia_set_config_type type,
switch (type) {
case DPIA_SET_CFG_SET_LINK:
- data.set_link.mode = link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT ? 1 : 0;
+ data.set_link.mode = lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT ? 1 : 0;
break;
case DPIA_SET_CFG_SET_PHY_TEST_MODE:
break;
@@ -553,7 +555,7 @@ static enum link_training_result dpia_training_cr_phase(
{
enum link_training_result result = LINK_TRAINING_CR_FAIL_LANE0;
- if (link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT)
+ if (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT)
result = dpia_training_cr_non_transparent(link, link_res, lt_settings, hop);
else
result = dpia_training_cr_transparent(link, link_res, lt_settings);
@@ -830,7 +832,7 @@ static enum link_training_result dpia_training_eq_phase(
{
enum link_training_result result;
- if (link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT)
+ if (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT)
result = dpia_training_eq_non_transparent(link, link_res, lt_settings, hop);
else
result = dpia_training_eq_transparent(link, link_res, lt_settings);
@@ -870,13 +872,14 @@ static enum dc_status dpcd_clear_lt_pattern(struct dc_link *link, uint32_t hop)
* @param hop The Hop in display path. DPRX = 0.
*/
static enum link_training_result dpia_training_end(struct dc_link *link,
+ struct link_training_settings *lt_settings,
uint32_t hop)
{
enum link_training_result result = LINK_TRAINING_SUCCESS;
uint8_t repeater_cnt = 0; /* Number of hops/repeaters in display path. */
enum dc_status status;
- if (link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) {
+ if (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) {
repeater_cnt = dp_convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
if (hop == repeater_cnt) { /* DPTX-to-DPIA */
@@ -916,7 +919,7 @@ static enum link_training_result dpia_training_end(struct dc_link *link,
link->link_id.enum_id - ENUM_ID_1,
hop,
result,
- link->lttpr_mode);
+ lt_settings->lttpr_mode);
return result;
}
@@ -928,7 +931,9 @@ static enum link_training_result dpia_training_end(struct dc_link *link,
* @param link DPIA link being trained.
* @param hop The Hop in display path. DPRX = 0.
*/
-static void dpia_training_abort(struct dc_link *link, uint32_t hop)
+static void dpia_training_abort(struct dc_link *link,
+ struct link_training_settings *lt_settings,
+ uint32_t hop)
{
uint8_t data = 0;
uint32_t dpcd_tps_offset = DP_TRAINING_PATTERN_SET;
@@ -936,7 +941,7 @@ static void dpia_training_abort(struct dc_link *link, uint32_t hop)
DC_LOG_HW_LINK_TRAINING("%s\n DPIA(%d) aborting\n - LTTPR mode(%d)\n - HPD(%d)\n",
__func__,
link->link_id.enum_id - ENUM_ID_1,
- link->lttpr_mode,
+ lt_settings->lttpr_mode,
link->is_hpd_pending);
/* Abandon clean-up if sink unplugged. */
@@ -964,12 +969,16 @@ enum link_training_result dc_link_dpia_perform_link_training(
uint8_t repeater_cnt = 0; /* Number of hops/repeaters in display path. */
int8_t repeater_id; /* Current hop. */
+ struct dc_link_settings link_settings = *link_setting; // non-const copy to pass in
+
+ lt_settings.lttpr_mode = dp_decide_lttpr_mode(link, &link_settings);
+
/* Configure link as prescribed in link_setting and set LTTPR mode. */
result = dpia_configure_link(link, link_res, link_setting, &lt_settings);
if (result != LINK_TRAINING_SUCCESS)
return result;
- if (link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT)
+ if (lt_settings.lttpr_mode == LTTPR_MODE_NON_TRANSPARENT)
repeater_cnt = dp_convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
/* Train each hop in turn starting with the one closest to DPTX.
@@ -987,7 +996,7 @@ enum link_training_result dc_link_dpia_perform_link_training(
break;
/* Stop training hop. */
- result = dpia_training_end(link, repeater_id);
+ result = dpia_training_end(link, &lt_settings, repeater_id);
if (result != LINK_TRAINING_SUCCESS)
break;
}
@@ -1001,9 +1010,9 @@ enum link_training_result dc_link_dpia_perform_link_training(
msleep(5);
result = dp_check_link_loss_status(link, &lt_settings);
} else if (result == LINK_TRAINING_ABORT) {
- dpia_training_abort(link, repeater_id);
+ dpia_training_abort(link, &lt_settings, repeater_id);
} else {
- dpia_training_end(link, repeater_id);
+ dpia_training_end(link, &lt_settings, repeater_id);
}
return result;
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index 8ee0d946bb2f..fd8db482e56f 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -1747,7 +1747,6 @@ bool dc_remove_plane_from_context(
for (i = 0; i < stream_status->plane_count; i++) {
if (stream_status->plane_states[i] == plane_state) {
-
dc_plane_state_release(stream_status->plane_states[i]);
break;
}
@@ -3683,4 +3682,56 @@ bool is_h_timing_divisible_by_2(struct dc_stream_state *stream)
(stream->timing.h_sync_width % 2 == 0);
}
return divisible;
+}
+
+bool dc_resource_acquire_secondary_pipe_for_mpc_odm(
+ const struct dc *dc,
+ struct dc_state *state,
+ struct pipe_ctx *pri_pipe,
+ struct pipe_ctx *sec_pipe,
+ bool odm)
+{
+ int pipe_idx = sec_pipe->pipe_idx;
+ struct pipe_ctx *sec_top, *sec_bottom, *sec_next, *sec_prev;
+ const struct resource_pool *pool = dc->res_pool;
+
+ sec_top = sec_pipe->top_pipe;
+ sec_bottom = sec_pipe->bottom_pipe;
+ sec_next = sec_pipe->next_odm_pipe;
+ sec_prev = sec_pipe->prev_odm_pipe;
+
+ *sec_pipe = *pri_pipe;
+
+ sec_pipe->top_pipe = sec_top;
+ sec_pipe->bottom_pipe = sec_bottom;
+ sec_pipe->next_odm_pipe = sec_next;
+ sec_pipe->prev_odm_pipe = sec_prev;
+
+ sec_pipe->pipe_idx = pipe_idx;
+ sec_pipe->plane_res.mi = pool->mis[pipe_idx];
+ sec_pipe->plane_res.hubp = pool->hubps[pipe_idx];
+ sec_pipe->plane_res.ipp = pool->ipps[pipe_idx];
+ sec_pipe->plane_res.xfm = pool->transforms[pipe_idx];
+ sec_pipe->plane_res.dpp = pool->dpps[pipe_idx];
+ sec_pipe->plane_res.mpcc_inst = pool->dpps[pipe_idx]->inst;
+ sec_pipe->stream_res.dsc = NULL;
+ if (odm) {
+ if (!sec_pipe->top_pipe)
+ sec_pipe->stream_res.opp = pool->opps[pipe_idx];
+ else
+ sec_pipe->stream_res.opp = sec_pipe->top_pipe->stream_res.opp;
+ if (sec_pipe->stream->timing.flags.DSC == 1) {
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ dcn20_acquire_dsc(dc, &state->res_ctx, &sec_pipe->stream_res.dsc, pipe_idx);
+#endif
+ ASSERT(sec_pipe->stream_res.dsc);
+ if (sec_pipe->stream_res.dsc == NULL)
+ return false;
+ }
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ dcn20_build_mapped_resource(dc, state, sec_pipe->stream);
+#endif
+ }
+
+ return true;
} \ No newline at end of file
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
index 0c85ab5933b4..38d71b5c1f2d 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
@@ -30,6 +30,7 @@
#include "resource.h"
#include "ipp.h"
#include "timing_generator.h"
+#include "dc_dmub_srv.h"
#define DC_LOGGER dc->ctx->logger
@@ -275,6 +276,8 @@ static void program_cursor_attributes(
}
dc->hwss.set_cursor_attribute(pipe_ctx);
+
+ dc_send_update_cursor_info_to_dmu(pipe_ctx, i);
if (dc->hwss.set_cursor_sdr_white_level)
dc->hwss.set_cursor_sdr_white_level(pipe_ctx);
}
@@ -381,6 +384,8 @@ static void program_cursor_position(
}
dc->hwss.set_cursor_position(pipe_ctx);
+
+ dc_send_update_cursor_info_to_dmu(pipe_ctx, i);
}
if (pipe_to_program)
@@ -521,7 +526,7 @@ bool dc_stream_remove_writeback(struct dc *dc,
/* remove writeback info for disabled writeback pipes from stream */
for (i = 0, j = 0; i < stream->num_wb_info; i++) {
if (stream->writeback_info[i].wb_enabled) {
- if (i != j)
+ if (j < i)
/* trim the array */
stream->writeback_info[j] = stream->writeback_info[i];
j++;
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index 659c71778a64..bfc5474c0f4c 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -47,7 +47,7 @@ struct aux_payload;
struct set_config_cmd_payload;
struct dmub_notification;
-#define DC_VER "3.2.204"
+#define DC_VER "3.2.207"
#define MAX_SURFACES 3
#define MAX_PLANES 6
@@ -406,6 +406,9 @@ struct dc_config {
bool ignore_dpref_ss;
bool enable_mipi_converter_optimization;
bool use_default_clock_table;
+ bool force_bios_enable_lttpr;
+ uint8_t force_bios_fixed_vs;
+
};
enum visual_confirm {
@@ -818,7 +821,6 @@ struct dc_debug_options {
/* Enable dmub aux for legacy ddc */
bool enable_dmub_aux_for_legacy_ddc;
bool disable_fams;
- bool optimize_edp_link_rate; /* eDP ILR */
/* FEC/PSR1 sequence enable delay in 100us */
uint8_t fec_enable_delay_in100us;
bool enable_driver_sequence_debug;
@@ -832,6 +834,7 @@ struct dc_debug_options {
bool force_subvp_mclk_switch;
bool allow_sw_cursor_fallback;
unsigned int force_subvp_num_ways;
+ unsigned int force_mall_ss_num_ways;
bool alloc_extra_way_for_cursor;
bool force_usr_allow;
/* uses value at boot and disables switch */
@@ -846,6 +849,7 @@ struct dc_debug_options {
bool use_legacy_soc_bb_mechanism;
bool exit_idle_opt_for_cursor_updates;
bool enable_single_display_2to1_odm_policy;
+ bool enable_double_buffered_dsc_pg_support;
bool enable_dp_dig_pixel_rate_div_policy;
enum lttpr_mode lttpr_mode_override;
};
@@ -1117,6 +1121,7 @@ union surface_update_flags {
uint32_t clock_change:1;
uint32_t stereo_format_change:1;
uint32_t lut_3d:1;
+ uint32_t tmz_changed:1;
uint32_t full_update:1;
} bits;
@@ -1186,6 +1191,8 @@ struct dc_plane_state {
enum dc_irq_source irq_source;
struct kref refcount;
struct tg_color visual_confirm_color;
+
+ bool is_statically_allocated;
};
struct dc_plane_info {
@@ -1605,6 +1612,9 @@ enum dc_status dc_process_dmub_set_mst_slots(const struct dc *dc,
uint8_t mst_alloc_slots,
uint8_t *mst_slots_in_use);
+void dc_process_dmub_dpia_hpd_int_enable(const struct dc *dc,
+ uint32_t hpd_int_enable);
+
/*******************************************************************************
* DSC Interfaces
******************************************************************************/
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
index 89d7d3fd3321..0541e87e4f38 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
@@ -30,6 +30,7 @@
#include "dc_hw_types.h"
#include "core_types.h"
#include "../basics/conversion.h"
+#include "cursor_reg_cache.h"
#define CTX dc_dmub_srv->ctx
#define DC_LOGGER CTX->logger
@@ -780,7 +781,7 @@ void dc_dmub_setup_subvp_dmub_command(struct dc *dc,
// Store the original watermark value for this SubVP config so we can lower it when the
// MCLK switch starts
wm_val_refclk = context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns *
- dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000 / 1000;
+ (dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000) / 1000;
cmd.fw_assisted_mclk_switch_v2.config_data.watermark_a_cache = wm_val_refclk < 0xFFFF ? wm_val_refclk : 0xFFFF;
}
@@ -880,3 +881,147 @@ void dc_dmub_srv_log_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv)
diag_data.is_cw0_enabled,
diag_data.is_cw6_enabled);
}
+
+static bool dc_dmub_should_update_cursor_data(struct pipe_ctx *pipe_ctx)
+{
+ if (pipe_ctx->plane_state != NULL) {
+ if (pipe_ctx->plane_state->address.type == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE)
+ return false;
+ }
+
+ if ((pipe_ctx->stream->link->psr_settings.psr_version == DC_PSR_VERSION_SU_1 ||
+ pipe_ctx->stream->link->psr_settings.psr_version == DC_PSR_VERSION_1) &&
+ pipe_ctx->stream->ctx->dce_version >= DCN_VERSION_3_1)
+ return true;
+
+ return false;
+}
+
+static void dc_build_cursor_update_payload0(
+ struct pipe_ctx *pipe_ctx, uint8_t p_idx,
+ struct dmub_cmd_update_cursor_payload0 *payload)
+{
+ struct hubp *hubp = pipe_ctx->plane_res.hubp;
+ unsigned int panel_inst = 0;
+
+ if (!dc_get_edp_link_panel_inst(hubp->ctx->dc,
+ pipe_ctx->stream->link, &panel_inst))
+ return;
+
+ /* Payload: Cursor Rect is built from position & attribute
+ * x & y are obtained from postion
+ */
+ payload->cursor_rect.x = hubp->cur_rect.x;
+ payload->cursor_rect.y = hubp->cur_rect.y;
+ /* w & h are obtained from attribute */
+ payload->cursor_rect.width = hubp->cur_rect.w;
+ payload->cursor_rect.height = hubp->cur_rect.h;
+
+ payload->enable = hubp->pos.cur_ctl.bits.cur_enable;
+ payload->pipe_idx = p_idx;
+ payload->cmd_version = DMUB_CMD_PSR_CONTROL_VERSION_1;
+ payload->panel_inst = panel_inst;
+}
+
+static void dc_send_cmd_to_dmu(struct dc_dmub_srv *dmub_srv,
+ union dmub_rb_cmd *cmd)
+{
+ dc_dmub_srv_cmd_queue(dmub_srv, cmd);
+ dc_dmub_srv_cmd_execute(dmub_srv);
+ dc_dmub_srv_wait_idle(dmub_srv);
+}
+
+static void dc_build_cursor_position_update_payload0(
+ struct dmub_cmd_update_cursor_payload0 *pl, const uint8_t p_idx,
+ const struct hubp *hubp, const struct dpp *dpp)
+{
+ /* Hubp */
+ pl->position_cfg.pHubp.cur_ctl.raw = hubp->pos.cur_ctl.raw;
+ pl->position_cfg.pHubp.position.raw = hubp->pos.position.raw;
+ pl->position_cfg.pHubp.hot_spot.raw = hubp->pos.hot_spot.raw;
+ pl->position_cfg.pHubp.dst_offset.raw = hubp->pos.dst_offset.raw;
+
+ /* dpp */
+ pl->position_cfg.pDpp.cur0_ctl.raw = dpp->pos.cur0_ctl.raw;
+ pl->position_cfg.pipe_idx = p_idx;
+}
+
+static void dc_build_cursor_attribute_update_payload1(
+ struct dmub_cursor_attributes_cfg *pl_A, const uint8_t p_idx,
+ const struct hubp *hubp, const struct dpp *dpp)
+{
+ /* Hubp */
+ pl_A->aHubp.SURFACE_ADDR_HIGH = hubp->att.SURFACE_ADDR_HIGH;
+ pl_A->aHubp.SURFACE_ADDR = hubp->att.SURFACE_ADDR;
+ pl_A->aHubp.cur_ctl.raw = hubp->att.cur_ctl.raw;
+ pl_A->aHubp.size.raw = hubp->att.size.raw;
+ pl_A->aHubp.settings.raw = hubp->att.settings.raw;
+
+ /* dpp */
+ pl_A->aDpp.cur0_ctl.raw = dpp->att.cur0_ctl.raw;
+}
+
+/**
+ * ***************************************************************************************
+ * dc_send_update_cursor_info_to_dmu: Populate the DMCUB Cursor update info command
+ *
+ * This function would store the cursor related information and pass it into dmub
+ *
+ * @param [in] pCtx: pipe context
+ * @param [in] pipe_idx: pipe index
+ *
+ * @return: void
+ *
+ * ***************************************************************************************
+ */
+
+void dc_send_update_cursor_info_to_dmu(
+ struct pipe_ctx *pCtx, uint8_t pipe_idx)
+{
+ union dmub_rb_cmd cmd = { 0 };
+ union dmub_cmd_update_cursor_info_data *update_cursor_info =
+ &cmd.update_cursor_info.update_cursor_info_data;
+
+ if (!dc_dmub_should_update_cursor_data(pCtx))
+ return;
+ /*
+ * Since we use multi_cmd_pending for dmub command, the 2nd command is
+ * only assigned to store cursor attributes info.
+ * 1st command can view as 2 parts, 1st is for PSR/Replay data, the other
+ * is to store cursor position info.
+ *
+ * Command heaer type must be the same type if using multi_cmd_pending.
+ * Besides, while process 2nd command in DMU, the sub type is useless.
+ * So it's meanless to pass the sub type header with different type.
+ */
+
+ {
+ /* Build Payload#0 Header */
+ cmd.update_cursor_info.header.type = DMUB_CMD__UPDATE_CURSOR_INFO;
+ cmd.update_cursor_info.header.payload_bytes =
+ sizeof(cmd.update_cursor_info.update_cursor_info_data);
+ cmd.update_cursor_info.header.multi_cmd_pending = 1; /* To combine multi dmu cmd, 1st cmd */
+
+ /* Prepare Payload */
+ dc_build_cursor_update_payload0(pCtx, pipe_idx, &update_cursor_info->payload0);
+
+ dc_build_cursor_position_update_payload0(&update_cursor_info->payload0, pipe_idx,
+ pCtx->plane_res.hubp, pCtx->plane_res.dpp);
+ /* Send update_curosr_info to queue */
+ dc_dmub_srv_cmd_queue(pCtx->stream->ctx->dmub_srv, &cmd);
+ }
+ {
+ /* Build Payload#1 Header */
+ memset(update_cursor_info, 0, sizeof(union dmub_cmd_update_cursor_info_data));
+ cmd.update_cursor_info.header.type = DMUB_CMD__UPDATE_CURSOR_INFO;
+ cmd.update_cursor_info.header.payload_bytes = sizeof(struct cursor_attributes_cfg);
+ cmd.update_cursor_info.header.multi_cmd_pending = 0; /* Indicate it's the last command. */
+
+ dc_build_cursor_attribute_update_payload1(
+ &cmd.update_cursor_info.update_cursor_info_data.payload1.attribute_cfg,
+ pipe_idx, pCtx->plane_res.hubp, pCtx->plane_res.dpp);
+
+ /* Combine 2nd cmds update_curosr_info to DMU */
+ dc_send_cmd_to_dmu(pCtx->stream->ctx->dmub_srv, &cmd);
+ }
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
index 7e438345b1a8..d34f5563df2e 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
@@ -88,4 +88,5 @@ bool dc_dmub_srv_get_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv, struct dmu
void dc_dmub_setup_subvp_dmub_command(struct dc *dc, struct dc_state *context, bool enable);
void dc_dmub_srv_log_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv);
+void dc_send_update_cursor_info_to_dmu(struct pipe_ctx *pCtx, uint8_t pipe_idx);
#endif /* _DMUB_DC_SRV_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h
index d890a03910c7..caf0c7af2d0b 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_link.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_link.h
@@ -127,11 +127,21 @@ struct dc_panel_config {
unsigned int extra_t12_ms;
unsigned int extra_post_OUI_ms;
} pps;
+ // ABM
+ struct varib {
+ unsigned int varibright_feature_enable;
+ unsigned int def_varibright_level;
+ unsigned int abm_config_setting;
+ } varib;
// edp DSC
struct dsc {
bool disable_dsc_edp;
unsigned int force_dsc_edp_policy;
} dsc;
+ /* eDP ILR */
+ struct ilr {
+ bool optimize_edp_link_rate; /* eDP ILR */
+ } ilr;
};
/*
* A link contains one or more sinks and their connected status.
@@ -151,7 +161,6 @@ struct dc_link {
bool link_state_valid;
bool aux_access_disabled;
bool sync_lt_in_progress;
- enum lttpr_mode lttpr_mode;
bool is_internal_display;
/* TODO: Rename. Flag an endpoint as having a programmable mapping to a
diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h
index 9fcf9dc5bce4..9e6025c98db9 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_stream.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h
@@ -212,8 +212,7 @@ struct dc_stream_state {
/* DMCU info */
unsigned int abm_level;
- struct periodic_interrupt_config periodic_interrupt0;
- struct periodic_interrupt_config periodic_interrupt1;
+ struct periodic_interrupt_config periodic_interrupt;
/* from core_stream struct */
struct dc_context *ctx;
@@ -281,8 +280,7 @@ struct dc_stream_update {
struct dc_info_packet *hdr_static_metadata;
unsigned int *abm_level;
- struct periodic_interrupt_config *periodic_interrupt0;
- struct periodic_interrupt_config *periodic_interrupt1;
+ struct periodic_interrupt_config *periodic_interrupt;
struct dc_info_packet *vrr_infopacket;
struct dc_info_packet *vsc_infopacket;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
index 32782ef9ef77..140297c8ff55 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
@@ -942,10 +942,6 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc,
case AUX_RET_ERROR_ENGINE_ACQUIRE:
case AUX_RET_ERROR_UNKNOWN:
default:
- DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_INFORMATION,
- LOG_FLAG_I2cAux_DceAux,
- "dce_aux_transfer_with_retries: Failure: operation_result=%d",
- (int)operation_result);
goto fail;
}
}
@@ -953,14 +949,11 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc,
fail:
DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_ERROR,
LOG_FLAG_Error_I2cAux,
- "dce_aux_transfer_with_retries: FAILURE");
+ "%s: Failure: operation_result=%d",
+ __func__,
+ (int)operation_result);
if (!payload_reply)
payload->reply = NULL;
- DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_ERROR,
- WPP_BIT_FLAG_DC_ERROR,
- "AUX transaction failed. Result: %d",
- operation_result);
-
return false;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index 2341982ee0a5..d260eaa1509e 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -2178,8 +2178,7 @@ static void dce110_setup_audio_dto(
continue;
if (pipe_ctx->stream->signal != SIGNAL_TYPE_HDMI_TYPE_A)
continue;
- if (pipe_ctx->stream_res.audio != NULL &&
- pipe_ctx->stream_res.audio->enabled == false) {
+ if (pipe_ctx->stream_res.audio != NULL) {
struct audio_output audio_output;
build_audio_output(context, pipe_ctx, &audio_output);
@@ -2219,8 +2218,7 @@ static void dce110_setup_audio_dto(
if (!dc_is_dp_signal(pipe_ctx->stream->signal))
continue;
- if (pipe_ctx->stream_res.audio != NULL &&
- pipe_ctx->stream_res.audio->enabled == false) {
+ if (pipe_ctx->stream_res.audio != NULL) {
struct audio_output audio_output;
build_audio_output(context, pipe_ctx, &audio_output);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
index 897f412f539e..b9765b3899e1 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
@@ -469,6 +469,7 @@ void dpp1_set_cursor_position(
REG_UPDATE(CURSOR0_CONTROL,
CUR0_ENABLE, cur_en);
+ dpp_base->pos.cur0_ctl.bits.cur0_enable = cur_en;
}
void dpp1_cnv_set_optional_cursor_attributes(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index 287fdecc0b10..11e4c4e46947 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -2244,6 +2244,9 @@ void dcn10_enable_timing_synchronization(
DC_SYNC_INFO("Setting up OTG reset trigger\n");
for (i = 1; i < group_size; i++) {
+ if (grouped_pipes[i]->stream && grouped_pipes[i]->stream->mall_stream_config.type == SUBVP_PHANTOM)
+ continue;
+
opp = grouped_pipes[i]->stream_res.opp;
tg = grouped_pipes[i]->stream_res.tg;
tg->funcs->get_otg_active_size(tg, &width, &height);
@@ -2254,13 +2257,21 @@ void dcn10_enable_timing_synchronization(
for (i = 0; i < group_size; i++) {
if (grouped_pipes[i]->stream == NULL)
continue;
+
+ if (grouped_pipes[i]->stream && grouped_pipes[i]->stream->mall_stream_config.type == SUBVP_PHANTOM)
+ continue;
+
grouped_pipes[i]->stream->vblank_synchronized = false;
}
- for (i = 1; i < group_size; i++)
+ for (i = 1; i < group_size; i++) {
+ if (grouped_pipes[i]->stream && grouped_pipes[i]->stream->mall_stream_config.type == SUBVP_PHANTOM)
+ continue;
+
grouped_pipes[i]->stream_res.tg->funcs->enable_reset_trigger(
grouped_pipes[i]->stream_res.tg,
grouped_pipes[0]->stream_res.tg->inst);
+ }
DC_SYNC_INFO("Waiting for trigger\n");
@@ -2268,12 +2279,21 @@ void dcn10_enable_timing_synchronization(
* synchronized. Look at last pipe programmed to reset.
*/
- wait_for_reset_trigger_to_occur(dc_ctx, grouped_pipes[1]->stream_res.tg);
- for (i = 1; i < group_size; i++)
+ if (grouped_pipes[1]->stream && grouped_pipes[1]->stream->mall_stream_config.type != SUBVP_PHANTOM)
+ wait_for_reset_trigger_to_occur(dc_ctx, grouped_pipes[1]->stream_res.tg);
+
+ for (i = 1; i < group_size; i++) {
+ if (grouped_pipes[i]->stream && grouped_pipes[i]->stream->mall_stream_config.type == SUBVP_PHANTOM)
+ continue;
+
grouped_pipes[i]->stream_res.tg->funcs->disable_reset_trigger(
grouped_pipes[i]->stream_res.tg);
+ }
for (i = 1; i < group_size; i++) {
+ if (grouped_pipes[i]->stream && grouped_pipes[i]->stream->mall_stream_config.type == SUBVP_PHANTOM)
+ continue;
+
opp = grouped_pipes[i]->stream_res.opp;
tg = grouped_pipes[i]->stream_res.tg;
tg->funcs->get_otg_active_size(tg, &width, &height);
@@ -3005,6 +3025,7 @@ void dcn10_prepare_bandwidth(
{
struct dce_hwseq *hws = dc->hwseq;
struct hubbub *hubbub = dc->res_pool->hubbub;
+ int min_fclk_khz, min_dcfclk_khz, socclk_khz;
if (dc->debug.sanity_checks)
hws->funcs.verify_allow_pstate_change_high(dc);
@@ -3027,8 +3048,11 @@ void dcn10_prepare_bandwidth(
if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE) {
DC_FP_START();
- dcn_bw_notify_pplib_of_wm_ranges(dc);
+ dcn_get_soc_clks(
+ dc, &min_fclk_khz, &min_dcfclk_khz, &socclk_khz);
DC_FP_END();
+ dcn_bw_notify_pplib_of_wm_ranges(
+ dc, min_fclk_khz, min_dcfclk_khz, socclk_khz);
}
if (dc->debug.sanity_checks)
@@ -3041,6 +3065,7 @@ void dcn10_optimize_bandwidth(
{
struct dce_hwseq *hws = dc->hwseq;
struct hubbub *hubbub = dc->res_pool->hubbub;
+ int min_fclk_khz, min_dcfclk_khz, socclk_khz;
if (dc->debug.sanity_checks)
hws->funcs.verify_allow_pstate_change_high(dc);
@@ -3064,8 +3089,11 @@ void dcn10_optimize_bandwidth(
if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE) {
DC_FP_START();
- dcn_bw_notify_pplib_of_wm_ranges(dc);
+ dcn_get_soc_clks(
+ dc, &min_fclk_khz, &min_dcfclk_khz, &socclk_khz);
DC_FP_END();
+ dcn_bw_notify_pplib_of_wm_ranges(
+ dc, min_fclk_khz, min_dcfclk_khz, socclk_khz);
}
if (dc->debug.sanity_checks)
@@ -3344,127 +3372,6 @@ static bool dcn10_can_pipe_disable_cursor(struct pipe_ctx *pipe_ctx)
return false;
}
-static bool dcn10_dmub_should_update_cursor_data(
- struct pipe_ctx *pipe_ctx,
- struct dc_debug_options *debug)
-{
- if (pipe_ctx->plane_state->address.type == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE)
- return false;
-
- if (dcn10_can_pipe_disable_cursor(pipe_ctx))
- return false;
-
- if ((pipe_ctx->stream->link->psr_settings.psr_version == DC_PSR_VERSION_SU_1 || pipe_ctx->stream->link->psr_settings.psr_version == DC_PSR_VERSION_1)
- && pipe_ctx->stream->ctx->dce_version >= DCN_VERSION_3_1)
- return true;
-
- return false;
-}
-
-static void dcn10_dmub_update_cursor_data(
- struct pipe_ctx *pipe_ctx,
- struct hubp *hubp,
- const struct dc_cursor_mi_param *param,
- const struct dc_cursor_position *cur_pos,
- const struct dc_cursor_attributes *cur_attr)
-{
- union dmub_rb_cmd cmd;
- struct dmub_cmd_update_cursor_info_data *update_cursor_info;
- const struct dc_cursor_position *pos;
- const struct dc_cursor_attributes *attr;
- int src_x_offset = 0;
- int src_y_offset = 0;
- int x_hotspot = 0;
- int cursor_height = 0;
- int cursor_width = 0;
- uint32_t cur_en = 0;
- unsigned int panel_inst = 0;
-
- struct dc_debug_options *debug = &hubp->ctx->dc->debug;
-
- if (!dcn10_dmub_should_update_cursor_data(pipe_ctx, debug))
- return;
- /**
- * if cur_pos == NULL means the caller is from cursor_set_attribute
- * then driver use previous cursor position data
- * if cur_attr == NULL means the caller is from cursor_set_position
- * then driver use previous cursor attribute
- * if cur_pos or cur_attr is not NULL then update it
- */
- if (cur_pos != NULL)
- pos = cur_pos;
- else
- pos = &hubp->curs_pos;
-
- if (cur_attr != NULL)
- attr = cur_attr;
- else
- attr = &hubp->curs_attr;
-
- if (!dc_get_edp_link_panel_inst(hubp->ctx->dc, pipe_ctx->stream->link, &panel_inst))
- return;
-
- src_x_offset = pos->x - pos->x_hotspot - param->viewport.x;
- src_y_offset = pos->y - pos->y_hotspot - param->viewport.y;
- x_hotspot = pos->x_hotspot;
- cursor_height = (int)attr->height;
- cursor_width = (int)attr->width;
- cur_en = pos->enable ? 1:0;
-
- // Rotated cursor width/height and hotspots tweaks for offset calculation
- if (param->rotation == ROTATION_ANGLE_90 || param->rotation == ROTATION_ANGLE_270) {
- swap(cursor_height, cursor_width);
- if (param->rotation == ROTATION_ANGLE_90) {
- src_x_offset = pos->x - pos->y_hotspot - param->viewport.x;
- src_y_offset = pos->y - pos->x_hotspot - param->viewport.y;
- }
- } else if (param->rotation == ROTATION_ANGLE_180) {
- src_x_offset = pos->x - param->viewport.x;
- src_y_offset = pos->y - param->viewport.y;
- }
-
- if (param->mirror) {
- x_hotspot = param->viewport.width - x_hotspot;
- src_x_offset = param->viewport.x + param->viewport.width - src_x_offset;
- }
-
- if (src_x_offset >= (int)param->viewport.width)
- cur_en = 0; /* not visible beyond right edge*/
-
- if (src_x_offset + cursor_width <= 0)
- cur_en = 0; /* not visible beyond left edge*/
-
- if (src_y_offset >= (int)param->viewport.height)
- cur_en = 0; /* not visible beyond bottom edge*/
-
- if (src_y_offset + cursor_height <= 0)
- cur_en = 0; /* not visible beyond top edge*/
-
- // Cursor bitmaps have different hotspot values
- // There's a possibility that the above logic returns a negative value, so we clamp them to 0
- if (src_x_offset < 0)
- src_x_offset = 0;
- if (src_y_offset < 0)
- src_y_offset = 0;
-
- memset(&cmd, 0x0, sizeof(cmd));
- cmd.update_cursor_info.header.type = DMUB_CMD__UPDATE_CURSOR_INFO;
- cmd.update_cursor_info.header.payload_bytes =
- sizeof(cmd.update_cursor_info.update_cursor_info_data);
- update_cursor_info = &cmd.update_cursor_info.update_cursor_info_data;
- update_cursor_info->cursor_rect.x = src_x_offset + param->viewport.x;
- update_cursor_info->cursor_rect.y = src_y_offset + param->viewport.y;
- update_cursor_info->cursor_rect.width = attr->width;
- update_cursor_info->cursor_rect.height = attr->height;
- update_cursor_info->enable = cur_en;
- update_cursor_info->pipe_idx = pipe_ctx->pipe_idx;
- update_cursor_info->cmd_version = DMUB_CMD_PSR_CONTROL_VERSION_1;
- update_cursor_info->panel_inst = panel_inst;
- dc_dmub_srv_cmd_queue(pipe_ctx->stream->ctx->dmub_srv, &cmd);
- dc_dmub_srv_cmd_execute(pipe_ctx->stream->ctx->dmub_srv);
- dc_dmub_srv_wait_idle(pipe_ctx->stream->ctx->dmub_srv);
-}
-
void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
{
struct dc_cursor_position pos_cpy = pipe_ctx->stream->cursor_position;
@@ -3699,7 +3606,6 @@ void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
pipe_ctx->plane_res.scl_data.viewport.height - pos_cpy.y;
}
- dcn10_dmub_update_cursor_data(pipe_ctx, hubp, &param, &pos_cpy, NULL);
hubp->funcs->set_cursor_position(hubp, &pos_cpy, &param);
dpp->funcs->set_cursor_position(dpp, &pos_cpy, &param, hubp->curs_attr.width, hubp->curs_attr.height);
}
@@ -3707,25 +3613,6 @@ void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
void dcn10_set_cursor_attribute(struct pipe_ctx *pipe_ctx)
{
struct dc_cursor_attributes *attributes = &pipe_ctx->stream->cursor_attributes;
- struct dc_cursor_mi_param param = { 0 };
-
- /**
- * If enter PSR without cursor attribute update
- * the cursor attribute of dmub_restore_plane
- * are initial value. call dmub to exit PSR and
- * restore plane then update cursor attribute to
- * avoid override with initial value
- */
- if (pipe_ctx->plane_state != NULL) {
- param.pixel_clk_khz = pipe_ctx->stream->timing.pix_clk_100hz / 10;
- param.ref_clk_khz = pipe_ctx->stream->ctx->dc->res_pool->ref_clocks.dchub_ref_clock_inKhz;
- param.viewport = pipe_ctx->plane_res.scl_data.viewport;
- param.h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz;
- param.v_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.vert;
- param.rotation = pipe_ctx->plane_state->rotation;
- param.mirror = pipe_ctx->plane_state->horizontal_mirror;
- dcn10_dmub_update_cursor_data(pipe_ctx, pipe_ctx->plane_res.hubp, &param, NULL, attributes);
- }
pipe_ctx->plane_res.hubp->funcs->set_cursor_attributes(
pipe_ctx->plane_res.hubp, attributes);
@@ -3810,81 +3697,56 @@ void dcn10_calc_vupdate_position(
uint32_t *start_line,
uint32_t *end_line)
{
- const struct dc_crtc_timing *dc_crtc_timing = &pipe_ctx->stream->timing;
- int vline_int_offset_from_vupdate =
- pipe_ctx->stream->periodic_interrupt0.lines_offset;
- int vupdate_offset_from_vsync = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
- int start_position;
+ const struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
+ int vupdate_pos = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
- if (vline_int_offset_from_vupdate > 0)
- vline_int_offset_from_vupdate--;
- else if (vline_int_offset_from_vupdate < 0)
- vline_int_offset_from_vupdate++;
-
- start_position = vline_int_offset_from_vupdate + vupdate_offset_from_vsync;
-
- if (start_position >= 0)
- *start_line = start_position;
+ if (vupdate_pos >= 0)
+ *start_line = vupdate_pos - ((vupdate_pos / timing->v_total) * timing->v_total);
else
- *start_line = dc_crtc_timing->v_total + start_position - 1;
-
- *end_line = *start_line + 2;
-
- if (*end_line >= dc_crtc_timing->v_total)
- *end_line = 2;
+ *start_line = vupdate_pos + ((-vupdate_pos / timing->v_total) + 1) * timing->v_total - 1;
+ *end_line = (*start_line + 2) % timing->v_total;
}
static void dcn10_cal_vline_position(
struct dc *dc,
struct pipe_ctx *pipe_ctx,
- enum vline_select vline,
uint32_t *start_line,
uint32_t *end_line)
{
- enum vertical_interrupt_ref_point ref_point = INVALID_POINT;
+ const struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
+ int vline_pos = pipe_ctx->stream->periodic_interrupt.lines_offset;
- if (vline == VLINE0)
- ref_point = pipe_ctx->stream->periodic_interrupt0.ref_point;
- else if (vline == VLINE1)
- ref_point = pipe_ctx->stream->periodic_interrupt1.ref_point;
+ if (pipe_ctx->stream->periodic_interrupt.ref_point == START_V_UPDATE) {
+ if (vline_pos > 0)
+ vline_pos--;
+ else if (vline_pos < 0)
+ vline_pos++;
- switch (ref_point) {
- case START_V_UPDATE:
- dcn10_calc_vupdate_position(
- dc,
- pipe_ctx,
- start_line,
- end_line);
- break;
- case START_V_SYNC:
- // Suppose to do nothing because vsync is 0;
- break;
- default:
+ vline_pos += dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
+ if (vline_pos >= 0)
+ *start_line = vline_pos - ((vline_pos / timing->v_total) * timing->v_total);
+ else
+ *start_line = vline_pos + ((-vline_pos / timing->v_total) + 1) * timing->v_total - 1;
+ *end_line = (*start_line + 2) % timing->v_total;
+ } else if (pipe_ctx->stream->periodic_interrupt.ref_point == START_V_SYNC) {
+ // vsync is line 0 so start_line is just the requested line offset
+ *start_line = vline_pos;
+ *end_line = (*start_line + 2) % timing->v_total;
+ } else
ASSERT(0);
- break;
- }
}
void dcn10_setup_periodic_interrupt(
struct dc *dc,
- struct pipe_ctx *pipe_ctx,
- enum vline_select vline)
+ struct pipe_ctx *pipe_ctx)
{
struct timing_generator *tg = pipe_ctx->stream_res.tg;
+ uint32_t start_line = 0;
+ uint32_t end_line = 0;
- if (vline == VLINE0) {
- uint32_t start_line = 0;
- uint32_t end_line = 0;
-
- dcn10_cal_vline_position(dc, pipe_ctx, vline, &start_line, &end_line);
+ dcn10_cal_vline_position(dc, pipe_ctx, &start_line, &end_line);
- tg->funcs->setup_vertical_interrupt0(tg, start_line, end_line);
-
- } else if (vline == VLINE1) {
- pipe_ctx->stream_res.tg->funcs->setup_vertical_interrupt1(
- tg,
- pipe_ctx->stream->periodic_interrupt1.lines_offset);
- }
+ tg->funcs->setup_vertical_interrupt0(tg, start_line, end_line);
}
void dcn10_setup_vupdate_interrupt(struct dc *dc, struct pipe_ctx *pipe_ctx)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
index 9ae07c77fdc0..0ef7bf7ddb75 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
@@ -175,8 +175,7 @@ void dcn10_set_cursor_attribute(struct pipe_ctx *pipe_ctx);
void dcn10_set_cursor_sdr_white_level(struct pipe_ctx *pipe_ctx);
void dcn10_setup_periodic_interrupt(
struct dc *dc,
- struct pipe_ctx *pipe_ctx,
- enum vline_select vline);
+ struct pipe_ctx *pipe_ctx);
enum dc_status dcn10_set_clock(struct dc *dc,
enum dc_clock_type clock_type,
uint32_t clk_khz,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
index ea7739255119..33d780218790 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
@@ -207,10 +207,7 @@ void optc1_program_timing(
/* In case of V_TOTAL_CONTROL is on, make sure OTG_V_TOTAL_MAX and
* OTG_V_TOTAL_MIN are equal to V_TOTAL.
*/
- REG_SET(OTG_V_TOTAL_MAX, 0,
- OTG_V_TOTAL_MAX, v_total);
- REG_SET(OTG_V_TOTAL_MIN, 0,
- OTG_V_TOTAL_MIN, v_total);
+ optc->funcs->set_vtotal_min_max(optc, v_total, v_total);
/* v_sync_start = 0, v_sync_end = v_sync_width */
v_sync_end = patched_crtc_timing.v_sync_width;
@@ -649,13 +646,6 @@ uint32_t optc1_get_vblank_counter(struct timing_generator *optc)
void optc1_lock(struct timing_generator *optc)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
- uint32_t regval = 0;
-
- regval = REG_READ(OTG_CONTROL);
-
- /* otg is not running, do not need to be locked */
- if ((regval & 0x1) == 0x0)
- return;
REG_SET(OTG_GLOBAL_CONTROL0, 0,
OTG_MASTER_UPDATE_LOCK_SEL, optc->inst);
@@ -663,12 +653,10 @@ void optc1_lock(struct timing_generator *optc)
OTG_MASTER_UPDATE_LOCK, 1);
/* Should be fast, status does not update on maximus */
- if (optc->ctx->dce_environment != DCE_ENV_FPGA_MAXIMUS) {
-
+ if (optc->ctx->dce_environment != DCE_ENV_FPGA_MAXIMUS)
REG_WAIT(OTG_MASTER_UPDATE_LOCK,
UPDATE_LOCK_STATUS, 1,
1, 10);
- }
}
void optc1_unlock(struct timing_generator *optc)
@@ -679,16 +667,6 @@ void optc1_unlock(struct timing_generator *optc)
OTG_MASTER_UPDATE_LOCK, 0);
}
-bool optc1_is_locked(struct timing_generator *optc)
-{
- struct optc *optc1 = DCN10TG_FROM_TG(optc);
- uint32_t locked;
-
- REG_GET(OTG_MASTER_UPDATE_LOCK, UPDATE_LOCK_STATUS, &locked);
-
- return (locked == 1);
-}
-
void optc1_get_position(struct timing_generator *optc,
struct crtc_position *position)
{
@@ -941,11 +919,7 @@ void optc1_set_drr(
}
- REG_SET(OTG_V_TOTAL_MAX, 0,
- OTG_V_TOTAL_MAX, params->vertical_total_max - 1);
-
- REG_SET(OTG_V_TOTAL_MIN, 0,
- OTG_V_TOTAL_MIN, params->vertical_total_min - 1);
+ optc->funcs->set_vtotal_min_max(optc, params->vertical_total_min - 1, params->vertical_total_max - 1);
REG_UPDATE_5(OTG_V_TOTAL_CONTROL,
OTG_V_TOTAL_MIN_SEL, 1,
@@ -964,11 +938,7 @@ void optc1_set_drr(
OTG_V_TOTAL_MAX_SEL, 0,
OTG_FORCE_LOCK_ON_EVENT, 0);
- REG_SET(OTG_V_TOTAL_MIN, 0,
- OTG_V_TOTAL_MIN, 0);
-
- REG_SET(OTG_V_TOTAL_MAX, 0,
- OTG_V_TOTAL_MAX, 0);
+ optc->funcs->set_vtotal_min_max(optc, 0, 0);
}
}
@@ -1583,11 +1553,11 @@ static const struct timing_generator_funcs dcn10_tg_funcs = {
.enable_crtc_reset = optc1_enable_crtc_reset,
.disable_reset_trigger = optc1_disable_reset_trigger,
.lock = optc1_lock,
- .is_locked = optc1_is_locked,
.unlock = optc1_unlock,
.enable_optc_clock = optc1_enable_optc_clock,
.set_drr = optc1_set_drr,
.get_last_used_drr_vtotal = NULL,
+ .set_vtotal_min_max = optc1_set_vtotal_min_max,
.set_static_screen_control = optc1_set_static_screen_control,
.set_test_pattern = optc1_set_test_pattern,
.program_stereo = optc1_program_stereo,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
index 6323ca6dc3b3..88ac5f6f4c96 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
@@ -654,7 +654,6 @@ void optc1_set_blank(struct timing_generator *optc,
bool enable_blanking);
bool optc1_is_blanked(struct timing_generator *optc);
-bool optc1_is_locked(struct timing_generator *optc);
void optc1_program_blank_color(
struct timing_generator *optc,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
index 831080b9eb87..56d30baf12df 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
@@ -1336,6 +1336,21 @@ static noinline void dcn10_resource_construct_fp(
}
}
+static bool verify_clock_values(struct dm_pp_clock_levels_with_voltage *clks)
+{
+ int i;
+
+ if (clks->num_levels == 0)
+ return false;
+
+ for (i = 0; i < clks->num_levels; i++)
+ /* Ensure that the result is sane */
+ if (clks->data[i].clocks_in_khz == 0)
+ return false;
+
+ return true;
+}
+
static bool dcn10_resource_construct(
uint8_t num_virtual_links,
struct dc *dc,
@@ -1345,6 +1360,9 @@ static bool dcn10_resource_construct(
int j;
struct dc_context *ctx = dc->ctx;
uint32_t pipe_fuses = read_pipe_fuses(ctx);
+ struct dm_pp_clock_levels_with_voltage fclks = {0}, dcfclks = {0};
+ int min_fclk_khz, min_dcfclk_khz, socclk_khz;
+ bool res;
ctx->dc_bios->regs = &bios_regs;
@@ -1523,15 +1541,53 @@ static bool dcn10_resource_construct(
&& pool->base.pp_smu->rv_funcs.set_pme_wa_enable != NULL)
dc->debug.az_endpoint_mute_only = false;
- DC_FP_START();
- if (!dc->debug.disable_pplib_clock_request)
- dcn_bw_update_from_pplib(dc);
+
+ if (!dc->debug.disable_pplib_clock_request) {
+ /*
+ * TODO: This is not the proper way to obtain
+ * fabric_and_dram_bandwidth, should be min(fclk, memclk).
+ */
+ res = dm_pp_get_clock_levels_by_type_with_voltage(
+ ctx, DM_PP_CLOCK_TYPE_FCLK, &fclks);
+
+ DC_FP_START();
+
+ if (res)
+ res = verify_clock_values(&fclks);
+
+ if (res)
+ dcn_bw_update_from_pplib_fclks(dc, &fclks);
+ else
+ BREAK_TO_DEBUGGER();
+
+ DC_FP_END();
+
+ res = dm_pp_get_clock_levels_by_type_with_voltage(
+ ctx, DM_PP_CLOCK_TYPE_DCFCLK, &dcfclks);
+
+ DC_FP_START();
+
+ if (res)
+ res = verify_clock_values(&dcfclks);
+
+ if (res)
+ dcn_bw_update_from_pplib_dcfclks(dc, &dcfclks);
+ else
+ BREAK_TO_DEBUGGER();
+
+ DC_FP_END();
+ }
+
dcn_bw_sync_calcs_and_dml(dc);
if (!dc->debug.disable_pplib_wm_range) {
dc->res_pool = &pool->base;
- dcn_bw_notify_pplib_of_wm_ranges(dc);
+ DC_FP_START();
+ dcn_get_soc_clks(
+ dc, &min_fclk_khz, &min_dcfclk_khz, &socclk_khz);
+ DC_FP_END();
+ dcn_bw_notify_pplib_of_wm_ranges(
+ dc, min_fclk_khz, min_dcfclk_khz, socclk_khz);
}
- DC_FP_END();
{
struct irq_service_init_data init_data;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
index b1ec0e6f7f58..4996d2810edb 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
@@ -617,6 +617,17 @@ void hubp2_cursor_set_attributes(
CURSOR0_DST_Y_OFFSET, 0,
/* used to shift the cursor chunk request deadline */
CURSOR0_CHUNK_HDL_ADJUST, 3);
+
+ hubp->att.SURFACE_ADDR_HIGH = attr->address.high_part;
+ hubp->att.SURFACE_ADDR = attr->address.low_part;
+ hubp->att.size.bits.width = attr->width;
+ hubp->att.size.bits.height = attr->height;
+ hubp->att.cur_ctl.bits.mode = attr->color_format;
+ hubp->att.cur_ctl.bits.pitch = hw_pitch;
+ hubp->att.cur_ctl.bits.line_per_chunk = lpc;
+ hubp->att.cur_ctl.bits.cur_2x_magnify = attr->attribute_flags.bits.ENABLE_MAGNIFICATION;
+ hubp->att.settings.bits.dst_y_offset = 0;
+ hubp->att.settings.bits.chunk_hdl_adjust = 3;
}
void hubp2_dmdata_set_attributes(
@@ -1033,6 +1044,25 @@ void hubp2_cursor_set_position(
REG_SET(CURSOR_DST_OFFSET, 0,
CURSOR_DST_X_OFFSET, dst_x_offset);
/* TODO Handle surface pixel formats other than 4:4:4 */
+ /* Cursor Position Register Config */
+ hubp->pos.cur_ctl.bits.cur_enable = cur_en;
+ hubp->pos.position.bits.x_pos = pos->x;
+ hubp->pos.position.bits.y_pos = pos->y;
+ hubp->pos.hot_spot.bits.x_hot = x_hotspot;
+ hubp->pos.hot_spot.bits.y_hot = y_hotspot;
+ hubp->pos.dst_offset.bits.dst_x_offset = dst_x_offset;
+ /* Cursor Rectangle Cache
+ * Cursor bitmaps have different hotspot values
+ * There's a possibility that the above logic returns a negative value,
+ * so we clamp them to 0
+ */
+ if (src_x_offset < 0)
+ src_x_offset = 0;
+ if (src_y_offset < 0)
+ src_y_offset = 0;
+ /* Save necessary cursor info x, y position. w, h is saved in attribute func. */
+ hubp->cur_rect.x = src_x_offset + param->viewport.x;
+ hubp->cur_rect.y = src_y_offset + param->viewport.y;
}
void hubp2_clk_cntl(struct hubp *hubp, bool enable)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
index e1d271fe9e64..d732b6f031a1 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
@@ -1862,24 +1862,6 @@ void dcn20_post_unlock_program_front_end(
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
- struct pipe_ctx *mpcc_pipe;
-
- if (pipe->vtp_locked) {
- dc->hwseq->funcs.wait_for_blank_complete(pipe->stream_res.opp);
- pipe->plane_res.hubp->funcs->set_blank(pipe->plane_res.hubp, true);
- pipe->vtp_locked = false;
-
- for (mpcc_pipe = pipe->bottom_pipe; mpcc_pipe; mpcc_pipe = mpcc_pipe->bottom_pipe)
- mpcc_pipe->plane_res.hubp->funcs->set_blank(mpcc_pipe->plane_res.hubp, true);
-
- for (i = 0; i < dc->res_pool->pipe_count; i++)
- if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable)
- dc->hwss.disable_plane(dc, &dc->current_state->res_ctx.pipe_ctx[i]);
- }
- }
-
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
/* If an active, non-phantom pipe is being transitioned into a phantom
@@ -2018,6 +2000,10 @@ void dcn20_optimize_bandwidth(
context->bw_ctx.bw.dcn.clk.dramclk_khz <= dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000)
dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, dc->clk_mgr->bw_params->dc_mode_softmax_memclk);
+ /* increase compbuf size */
+ if (hubbub->funcs->program_compbuf_size)
+ hubbub->funcs->program_compbuf_size(hubbub, context->bw_ctx.bw.dcn.compbuf_size_kb, true);
+
dc->clk_mgr->funcs->update_clocks(
dc->clk_mgr,
context,
@@ -2033,9 +2019,6 @@ void dcn20_optimize_bandwidth(
pipe_ctx->dlg_regs.optimized_min_dst_y_next_start);
}
}
- /* increase compbuf size */
- if (hubbub->funcs->program_compbuf_size)
- hubbub->funcs->program_compbuf_size(hubbub, context->bw_ctx.bw.dcn.compbuf_size_kb, true);
}
bool dcn20_update_bandwidth(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c
index 0340fdd3f5fb..a08c335b7383 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c
@@ -529,6 +529,7 @@ static struct timing_generator_funcs dcn20_tg_funcs = {
.enable_optc_clock = optc1_enable_optc_clock,
.set_drr = optc1_set_drr,
.get_last_used_drr_vtotal = optc2_get_last_used_drr_vtotal,
+ .set_vtotal_min_max = optc1_set_vtotal_min_max,
.set_static_screen_control = optc1_set_static_screen_control,
.program_stereo = optc1_program_stereo,
.is_stereo_left_eye = optc1_is_stereo_left_eye,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c
index 5752271f22df..c5e200d09038 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c
@@ -67,15 +67,9 @@ static uint32_t convert_and_clamp(
void dcn21_dchvm_init(struct hubbub *hubbub)
{
struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
- uint32_t riommu_active, prefetch_done;
+ uint32_t riommu_active;
int i;
- REG_GET(DCHVM_RIOMMU_STAT0, HOSTVM_PREFETCH_DONE, &prefetch_done);
-
- if (prefetch_done) {
- hubbub->riommu_active = true;
- return;
- }
//Init DCHVM block
REG_UPDATE(DCHVM_CTRL0, HOSTVM_INIT_REQ, 1);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
index 7cb35bb1c0f1..887081472c0d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
@@ -657,7 +657,6 @@ static const struct dc_debug_options debug_defaults_drv = {
.usbc_combo_phy_reset_wa = true,
.dmub_command_table = true,
.use_max_lb = true,
- .optimize_edp_link_rate = true
};
static const struct dc_debug_options debug_defaults_diags = {
@@ -677,6 +676,12 @@ static const struct dc_debug_options debug_defaults_diags = {
.use_max_lb = true
};
+static const struct dc_panel_config panel_config_defaults = {
+ .ilr = {
+ .optimize_edp_link_rate = true,
+ },
+};
+
enum dcn20_clk_src_array_id {
DCN20_CLK_SRC_PLL0,
DCN20_CLK_SRC_PLL1,
@@ -1367,6 +1372,11 @@ static struct panel_cntl *dcn21_panel_cntl_create(const struct panel_cntl_init_d
return &panel_cntl->base;
}
+static void dcn21_get_panel_config_defaults(struct dc_panel_config *panel_config)
+{
+ *panel_config = panel_config_defaults;
+}
+
#define CTX ctx
#define REG(reg_name) \
@@ -1408,6 +1418,7 @@ static const struct resource_funcs dcn21_res_pool_funcs = {
.set_mcif_arb_params = dcn20_set_mcif_arb_params,
.find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link,
.update_bw_bounding_box = dcn21_update_bw_bounding_box,
+ .get_panel_config_defaults = dcn21_get_panel_config_defaults,
};
static bool dcn21_resource_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c
index 4a668d6563df..e5b7ef7422b8 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c
@@ -372,6 +372,10 @@ void dpp3_set_cursor_attributes(
REG_UPDATE(CURSOR0_COLOR1,
CUR0_COLOR1, 0xFFFFFFFF);
}
+
+ dpp_base->att.cur0_ctl.bits.expansion_mode = 0;
+ dpp_base->att.cur0_ctl.bits.cur0_rom_en = cur_rom_en;
+ dpp_base->att.cur0_ctl.bits.mode = color_format;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c
index 1782b9c26cf4..892d3c4d01a1 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c
@@ -319,13 +319,13 @@ static struct timing_generator_funcs dcn30_tg_funcs = {
.enable_crtc_reset = optc1_enable_crtc_reset,
.disable_reset_trigger = optc1_disable_reset_trigger,
.lock = optc3_lock,
- .is_locked = optc1_is_locked,
.unlock = optc1_unlock,
.lock_doublebuffer_enable = optc3_lock_doublebuffer_enable,
.lock_doublebuffer_disable = optc3_lock_doublebuffer_disable,
.enable_optc_clock = optc1_enable_optc_clock,
.set_drr = optc1_set_drr,
.get_last_used_drr_vtotal = optc2_get_last_used_drr_vtotal,
+ .set_vtotal_min_max = optc3_set_vtotal_min_max,
.set_static_screen_control = optc1_set_static_screen_control,
.program_stereo = optc1_program_stereo,
.is_stereo_left_eye = optc1_is_stereo_left_eye,
@@ -366,4 +366,3 @@ void dcn30_timing_generator_init(struct optc *optc1)
optc1->min_h_sync_width = 4;
optc1->min_v_sync_width = 1;
}
-
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
index f6f3878c99b8..020f512e9690 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
@@ -724,7 +724,8 @@ static const struct dc_debug_options debug_defaults_drv = {
.dwb_fi_phase = -1, // -1 = disable,
.dmub_command_table = true,
.disable_psr = false,
- .use_max_lb = true
+ .use_max_lb = true,
+ .exit_idle_opt_for_cursor_updates = true
};
static const struct dc_debug_options debug_defaults_diags = {
@@ -1654,6 +1655,9 @@ noinline bool dcn30_internal_validate_bw(
if (!pipes)
return false;
+ context->bw_ctx.dml.vba.maxMpcComb = 0;
+ context->bw_ctx.dml.vba.VoltageLevel = 0;
+ context->bw_ctx.dml.vba.DRAMClockChangeSupport[0][0] = dm_dram_clock_change_vactive;
dc->res_pool->funcs->update_soc_for_wm_a(dc, context);
pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, fast_validate);
@@ -1872,6 +1876,7 @@ noinline bool dcn30_internal_validate_bw(
if (repopulate_pipes)
pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, fast_validate);
+ context->bw_ctx.dml.vba.VoltageLevel = vlevel;
*vlevel_out = vlevel;
*pipe_cnt_out = pipe_cnt;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
index 0c2b15a0f3a7..f04595b750ab 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
@@ -700,6 +700,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.dwb_fi_phase = -1, // -1 = disable
.dmub_command_table = true,
.use_max_lb = false,
+ .exit_idle_opt_for_cursor_updates = true
};
static const struct dc_debug_options debug_defaults_diags = {
@@ -851,7 +852,7 @@ static struct hubbub *dcn301_hubbub_create(struct dc_context *ctx)
vmid->masks = &vmid_masks;
}
- hubbub3->num_vmid = res_cap_dcn301.num_vmid;
+ hubbub3->num_vmid = res_cap_dcn301.num_vmid;
return &hubbub3->base;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c
index 4fab537e822f..b925b6ddde5a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c
@@ -93,7 +93,8 @@ static const struct dc_debug_options debug_defaults_drv = {
.underflow_assert_delay_us = 0xFFFFFFFF,
.dwb_fi_phase = -1, // -1 = disable,
.dmub_command_table = true,
- .use_max_lb = true
+ .use_max_lb = true,
+ .exit_idle_opt_for_cursor_updates = true
};
static const struct dc_debug_options debug_defaults_diags = {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c
index d97076648acb..527d5c902878 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c
@@ -77,6 +77,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.underflow_assert_delay_us = 0xFFFFFFFF,
.dwb_fi_phase = -1, // -1 = disable,
.dmub_command_table = true,
+ .exit_idle_opt_for_cursor_updates = true,
.disable_idle_power_optimizations = false,
};
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c
index 23621ff08c90..814f401db3b3 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c
@@ -150,9 +150,9 @@ static void dcn31_hpo_dp_stream_enc_dp_blank(
* 10us*5000=50ms. This covers 41.7ms of minimum 24 Hz mode +
* a little more because we may not trust delay accuracy.
*/
- //REG_WAIT(DP_SYM32_ENC_VID_STREAM_CONTROL,
- // VID_STREAM_STATUS, 0,
- // 10, 5000);
+ REG_WAIT(DP_SYM32_ENC_VID_STREAM_CONTROL,
+ VID_STREAM_STATUS, 0,
+ 10, 5000);
/* Disable SDP tranmission */
REG_UPDATE(DP_SYM32_ENC_SDP_CONTROL,
@@ -197,7 +197,7 @@ static void dcn31_hpo_dp_stream_enc_set_stream_attribute(
uint32_t h_back_porch;
uint32_t h_width;
uint32_t v_height;
- unsigned long long v_freq;
+ uint64_t v_freq;
uint8_t misc0 = 0;
uint8_t misc1 = 0;
uint8_t hsp;
@@ -360,7 +360,7 @@ static void dcn31_hpo_dp_stream_enc_set_stream_attribute(
v_height = hw_crtc_timing.v_border_top + hw_crtc_timing.v_addressable + hw_crtc_timing.v_border_bottom;
hsp = hw_crtc_timing.flags.HSYNC_POSITIVE_POLARITY ? 0 : 0x80;
vsp = hw_crtc_timing.flags.VSYNC_POSITIVE_POLARITY ? 0 : 0x80;
- v_freq = hw_crtc_timing.pix_clk_100hz * 100;
+ v_freq = (uint64_t)hw_crtc_timing.pix_clk_100hz * 100;
/* MSA Packet Mapping to 32-bit Link Symbols - DP2 spec, section 2.7.4.1
*
@@ -436,32 +436,28 @@ static void dcn31_hpo_dp_stream_enc_update_dp_info_packets(
{
struct dcn31_hpo_dp_stream_encoder *enc3 = DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(enc);
uint32_t dmdata_packet_enabled = 0;
- bool sdp_stream_enable = false;
- if (info_frame->vsc.valid) {
+ if (info_frame->vsc.valid)
enc->vpg->funcs->update_generic_info_packet(
enc->vpg,
0, /* packetIndex */
&info_frame->vsc,
true);
- sdp_stream_enable = true;
- }
- if (info_frame->spd.valid) {
+
+ if (info_frame->spd.valid)
enc->vpg->funcs->update_generic_info_packet(
enc->vpg,
2, /* packetIndex */
&info_frame->spd,
true);
- sdp_stream_enable = true;
- }
- if (info_frame->hdrsmd.valid) {
+
+ if (info_frame->hdrsmd.valid)
enc->vpg->funcs->update_generic_info_packet(
enc->vpg,
3, /* packetIndex */
&info_frame->hdrsmd,
true);
- sdp_stream_enable = true;
- }
+
/* enable/disable transmission of packet(s).
* If enabled, packet transmission begins on the next frame
*/
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.c
index 2f7404a97479..63a677c8ee27 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.c
@@ -201,7 +201,6 @@ void optc31_set_drr(
// Setup manual flow control for EOF via TRIG_A
optc->funcs->setup_manual_trigger(optc);
-
} else {
REG_UPDATE_4(OTG_V_TOTAL_CONTROL,
OTG_SET_V_TOTAL_MIN_MASK, 0,
@@ -260,7 +259,6 @@ static struct timing_generator_funcs dcn31_tg_funcs = {
.enable_crtc_reset = optc1_enable_crtc_reset,
.disable_reset_trigger = optc1_disable_reset_trigger,
.lock = optc3_lock,
- .is_locked = optc1_is_locked,
.unlock = optc1_unlock,
.lock_doublebuffer_enable = optc3_lock_doublebuffer_enable,
.lock_doublebuffer_disable = optc3_lock_doublebuffer_disable,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
index 8c1a6fb36306..fddc21a5a04c 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
@@ -888,9 +888,8 @@ static const struct dc_debug_options debug_defaults_drv = {
}
},
.disable_z10 = true,
- .optimize_edp_link_rate = true,
.enable_z9_disable_interface = true, /* Allow support for the PMFW interface for disable Z9*/
- .dml_hostvm_override = DML_HOSTVM_NO_OVERRIDE,
+ .dml_hostvm_override = DML_HOSTVM_OVERRIDE_FALSE,
};
static const struct dc_debug_options debug_defaults_diags = {
@@ -911,6 +910,12 @@ static const struct dc_debug_options debug_defaults_diags = {
.use_max_lb = true
};
+static const struct dc_panel_config panel_config_defaults = {
+ .ilr = {
+ .optimize_edp_link_rate = true,
+ },
+};
+
static void dcn31_dpp_destroy(struct dpp **dpp)
{
kfree(TO_DCN20_DPP(*dpp));
@@ -1803,6 +1808,11 @@ validate_out:
return out;
}
+static void dcn31_get_panel_config_defaults(struct dc_panel_config *panel_config)
+{
+ *panel_config = panel_config_defaults;
+}
+
static struct dc_cap_funcs cap_funcs = {
.get_dcc_compression_cap = dcn20_get_dcc_compression_cap
};
@@ -1829,6 +1839,7 @@ static struct resource_funcs dcn31_res_pool_funcs = {
.release_post_bldn_3dlut = dcn30_release_post_bldn_3dlut,
.update_bw_bounding_box = dcn31_update_bw_bounding_box,
.patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
+ .get_panel_config_defaults = dcn31_get_panel_config_defaults,
};
static struct clock_source *dcn30_clock_source_create(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.c
index 36630d532c18..1bd7e0f327d8 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.c
@@ -45,6 +45,48 @@
#define DC_LOGGER \
dccg->ctx->logger
+static void dccg314_get_pixel_rate_div(
+ struct dccg *dccg,
+ uint32_t otg_inst,
+ enum pixel_rate_div *k1,
+ enum pixel_rate_div *k2)
+{
+ struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+ uint32_t val_k1 = PIXEL_RATE_DIV_NA, val_k2 = PIXEL_RATE_DIV_NA;
+
+ *k1 = PIXEL_RATE_DIV_NA;
+ *k2 = PIXEL_RATE_DIV_NA;
+
+ switch (otg_inst) {
+ case 0:
+ REG_GET_2(OTG_PIXEL_RATE_DIV,
+ OTG0_PIXEL_RATE_DIVK1, &val_k1,
+ OTG0_PIXEL_RATE_DIVK2, &val_k2);
+ break;
+ case 1:
+ REG_GET_2(OTG_PIXEL_RATE_DIV,
+ OTG1_PIXEL_RATE_DIVK1, &val_k1,
+ OTG1_PIXEL_RATE_DIVK2, &val_k2);
+ break;
+ case 2:
+ REG_GET_2(OTG_PIXEL_RATE_DIV,
+ OTG2_PIXEL_RATE_DIVK1, &val_k1,
+ OTG2_PIXEL_RATE_DIVK2, &val_k2);
+ break;
+ case 3:
+ REG_GET_2(OTG_PIXEL_RATE_DIV,
+ OTG3_PIXEL_RATE_DIVK1, &val_k1,
+ OTG3_PIXEL_RATE_DIVK2, &val_k2);
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+
+ *k1 = (enum pixel_rate_div)val_k1;
+ *k2 = (enum pixel_rate_div)val_k2;
+}
+
static void dccg314_set_pixel_rate_div(
struct dccg *dccg,
uint32_t otg_inst,
@@ -52,6 +94,11 @@ static void dccg314_set_pixel_rate_div(
enum pixel_rate_div k2)
{
struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+ enum pixel_rate_div cur_k1 = PIXEL_RATE_DIV_NA, cur_k2 = PIXEL_RATE_DIV_NA;
+
+ dccg314_get_pixel_rate_div(dccg, otg_inst, &cur_k1, &cur_k2);
+ if (k1 == PIXEL_RATE_DIV_NA || k2 == PIXEL_RATE_DIV_NA || (k1 == cur_k1 && k2 == cur_k2))
+ return;
switch (otg_inst) {
case 0:
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.c
index 914c5da737ed..7e773bf7b895 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.c
@@ -56,7 +56,8 @@ static void enc314_enable_fifo(struct stream_encoder *enc)
/* TODO: Confirm if we need to wait for DIG_SYMCLK_FE_ON */
REG_WAIT(DIG_FE_CNTL, DIG_SYMCLK_FE_ON, 1, 10, 5000);
- REG_UPDATE_2(DIG_FIFO_CTRL0, DIG_FIFO_RESET, 1, DIG_FIFO_READ_START_LEVEL, 0x7);
+ REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_READ_START_LEVEL, 0x7);
+ REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_RESET, 1);
REG_WAIT(DIG_FIFO_CTRL0, DIG_FIFO_RESET_DONE, 1, 10, 5000);
REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_RESET, 0);
REG_WAIT(DIG_FIFO_CTRL0, DIG_FIFO_RESET_DONE, 0, 10, 5000);
@@ -261,6 +262,16 @@ static bool is_two_pixels_per_containter(const struct dc_crtc_timing *timing)
return two_pix;
}
+static void enc314_stream_encoder_dp_blank(
+ struct dc_link *link,
+ struct stream_encoder *enc)
+{
+ /* New to DCN314 - disable the FIFO before VID stream disable. */
+ enc314_disable_fifo(enc);
+
+ enc1_stream_encoder_dp_blank(link, enc);
+}
+
static void enc314_stream_encoder_dp_unblank(
struct dc_link *link,
struct stream_encoder *enc,
@@ -316,15 +327,11 @@ static void enc314_stream_encoder_dp_unblank(
/* switch DP encoder to CRTC data, but reset it the fifo first. It may happen
* that it overflows during mode transition, and sometimes doesn't recover.
*/
- REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_READ_START_LEVEL, 0x7);
REG_UPDATE(DP_STEER_FIFO, DP_STEER_FIFO_RESET, 1);
udelay(10);
REG_UPDATE(DP_STEER_FIFO, DP_STEER_FIFO_RESET, 0);
- /* DIG Resync FIFO now needs to be explicitly enabled. */
- enc314_enable_fifo(enc);
-
/* wait 100us for DIG/DP logic to prime
* (i.e. a few video lines)
*/
@@ -340,6 +347,12 @@ static void enc314_stream_encoder_dp_unblank(
REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, true);
+ /*
+ * DIG Resync FIFO now needs to be explicitly enabled.
+ * This should come after DP_VID_STREAM_ENABLE per HW docs.
+ */
+ enc314_enable_fifo(enc);
+
dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_ENABLE_DP_VID_STREAM);
}
@@ -408,7 +421,7 @@ static const struct stream_encoder_funcs dcn314_str_enc_funcs = {
.stop_dp_info_packets =
enc1_stream_encoder_stop_dp_info_packets,
.dp_blank =
- enc1_stream_encoder_dp_blank,
+ enc314_stream_encoder_dp_blank,
.dp_unblank =
enc314_stream_encoder_dp_unblank,
.audio_mute_control = enc3_audio_mute_control,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
index 24ec71cbd3e3..d0ad72caead2 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
@@ -881,7 +881,8 @@ static const struct dc_plane_cap plane_cap = {
};
static const struct dc_debug_options debug_defaults_drv = {
- .disable_z10 = true, /*hw not support it*/
+ .disable_z10 = false,
+ .enable_z9_disable_interface = true,
.disable_dmcu = true,
.force_abm_enable = false,
.timing_trace = false,
@@ -914,7 +915,6 @@ static const struct dc_debug_options debug_defaults_drv = {
.afmt = true,
}
},
- .optimize_edp_link_rate = true,
.seamless_boot_odm_combine = true
};
@@ -936,6 +936,12 @@ static const struct dc_debug_options debug_defaults_diags = {
.use_max_lb = true
};
+static const struct dc_panel_config panel_config_defaults = {
+ .ilr = {
+ .optimize_edp_link_rate = true,
+ },
+};
+
static void dcn31_dpp_destroy(struct dpp **dpp)
{
kfree(TO_DCN20_DPP(*dpp));
@@ -1675,6 +1681,11 @@ static void dcn314_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *b
DC_FP_END();
}
+static void dcn314_get_panel_config_defaults(struct dc_panel_config *panel_config)
+{
+ *panel_config = panel_config_defaults;
+}
+
static struct resource_funcs dcn314_res_pool_funcs = {
.destroy = dcn314_destroy_resource_pool,
.link_enc_create = dcn31_link_encoder_create,
@@ -1697,6 +1708,7 @@ static struct resource_funcs dcn314_res_pool_funcs = {
.release_post_bldn_3dlut = dcn30_release_post_bldn_3dlut,
.update_bw_bounding_box = dcn314_update_bw_bounding_box,
.patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
+ .get_panel_config_defaults = dcn314_get_panel_config_defaults,
};
static struct clock_source *dcn30_clock_source_create(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c b/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c
index eebb42c9ddd6..58746c437554 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c
@@ -885,7 +885,6 @@ static const struct dc_debug_options debug_defaults_drv = {
.afmt = true,
}
},
- .optimize_edp_link_rate = true,
.psr_power_use_phy_fsm = 0,
};
@@ -907,6 +906,12 @@ static const struct dc_debug_options debug_defaults_diags = {
.use_max_lb = true
};
+static const struct dc_panel_config panel_config_defaults = {
+ .ilr = {
+ .optimize_edp_link_rate = true,
+ },
+};
+
static void dcn31_dpp_destroy(struct dpp **dpp)
{
kfree(TO_DCN20_DPP(*dpp));
@@ -1708,6 +1713,11 @@ static int dcn315_populate_dml_pipes_from_context(
return pipe_cnt;
}
+static void dcn315_get_panel_config_defaults(struct dc_panel_config *panel_config)
+{
+ *panel_config = panel_config_defaults;
+}
+
static struct dc_cap_funcs cap_funcs = {
.get_dcc_compression_cap = dcn20_get_dcc_compression_cap
};
@@ -1721,7 +1731,7 @@ static struct resource_funcs dcn315_res_pool_funcs = {
.panel_cntl_create = dcn31_panel_cntl_create,
.validate_bandwidth = dcn31_validate_bandwidth,
.calculate_wm_and_dlg = dcn31_calculate_wm_and_dlg,
- .update_soc_for_wm_a = dcn31_update_soc_for_wm_a,
+ .update_soc_for_wm_a = dcn315_update_soc_for_wm_a,
.populate_dml_pipes = dcn315_populate_dml_pipes_from_context,
.acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer,
.add_stream_to_ctx = dcn30_add_stream_to_ctx,
@@ -1734,6 +1744,7 @@ static struct resource_funcs dcn315_res_pool_funcs = {
.release_post_bldn_3dlut = dcn30_release_post_bldn_3dlut,
.update_bw_bounding_box = dcn315_update_bw_bounding_box,
.patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
+ .get_panel_config_defaults = dcn315_get_panel_config_defaults,
};
static bool dcn315_resource_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c b/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c
index f4b52a35ad84..6b40a11ac83a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c
@@ -885,7 +885,6 @@ static const struct dc_debug_options debug_defaults_drv = {
.afmt = true,
}
},
- .optimize_edp_link_rate = true,
};
static const struct dc_debug_options debug_defaults_diags = {
@@ -906,6 +905,12 @@ static const struct dc_debug_options debug_defaults_diags = {
.use_max_lb = true
};
+static const struct dc_panel_config panel_config_defaults = {
+ .ilr = {
+ .optimize_edp_link_rate = true,
+ },
+};
+
static void dcn31_dpp_destroy(struct dpp **dpp)
{
kfree(TO_DCN20_DPP(*dpp));
@@ -1710,6 +1715,11 @@ static int dcn316_populate_dml_pipes_from_context(
return pipe_cnt;
}
+static void dcn316_get_panel_config_defaults(struct dc_panel_config *panel_config)
+{
+ *panel_config = panel_config_defaults;
+}
+
static struct dc_cap_funcs cap_funcs = {
.get_dcc_compression_cap = dcn20_get_dcc_compression_cap
};
@@ -1736,6 +1746,7 @@ static struct resource_funcs dcn316_res_pool_funcs = {
.release_post_bldn_3dlut = dcn30_release_post_bldn_3dlut,
.update_bw_bounding_box = dcn316_update_bw_bounding_box,
.patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
+ .get_panel_config_defaults = dcn316_get_panel_config_defaults,
};
static bool dcn316_resource_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c
index 26eb04ea472c..e4daed44ef5f 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c
@@ -42,6 +42,48 @@
#define DC_LOGGER \
dccg->ctx->logger
+static void dccg32_get_pixel_rate_div(
+ struct dccg *dccg,
+ uint32_t otg_inst,
+ enum pixel_rate_div *k1,
+ enum pixel_rate_div *k2)
+{
+ struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+ uint32_t val_k1 = PIXEL_RATE_DIV_NA, val_k2 = PIXEL_RATE_DIV_NA;
+
+ *k1 = PIXEL_RATE_DIV_NA;
+ *k2 = PIXEL_RATE_DIV_NA;
+
+ switch (otg_inst) {
+ case 0:
+ REG_GET_2(OTG_PIXEL_RATE_DIV,
+ OTG0_PIXEL_RATE_DIVK1, &val_k1,
+ OTG0_PIXEL_RATE_DIVK2, &val_k2);
+ break;
+ case 1:
+ REG_GET_2(OTG_PIXEL_RATE_DIV,
+ OTG1_PIXEL_RATE_DIVK1, &val_k1,
+ OTG1_PIXEL_RATE_DIVK2, &val_k2);
+ break;
+ case 2:
+ REG_GET_2(OTG_PIXEL_RATE_DIV,
+ OTG2_PIXEL_RATE_DIVK1, &val_k1,
+ OTG2_PIXEL_RATE_DIVK2, &val_k2);
+ break;
+ case 3:
+ REG_GET_2(OTG_PIXEL_RATE_DIV,
+ OTG3_PIXEL_RATE_DIVK1, &val_k1,
+ OTG3_PIXEL_RATE_DIVK2, &val_k2);
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+
+ *k1 = (enum pixel_rate_div)val_k1;
+ *k2 = (enum pixel_rate_div)val_k2;
+}
+
static void dccg32_set_pixel_rate_div(
struct dccg *dccg,
uint32_t otg_inst,
@@ -50,6 +92,17 @@ static void dccg32_set_pixel_rate_div(
{
struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+ enum pixel_rate_div cur_k1 = PIXEL_RATE_DIV_NA, cur_k2 = PIXEL_RATE_DIV_NA;
+
+ // Don't program 0xF into the register field. Not valid since
+ // K1 / K2 field is only 1 / 2 bits wide
+ if (k1 == PIXEL_RATE_DIV_NA || k2 == PIXEL_RATE_DIV_NA)
+ return;
+
+ dccg32_get_pixel_rate_div(dccg, otg_inst, &cur_k1, &cur_k2);
+ if (k1 == cur_k1 && k2 == cur_k2)
+ return;
+
switch (otg_inst) {
case 0:
REG_UPDATE_2(OTG_PIXEL_RATE_DIV,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c
index fdae6aa89908..076969d928af 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c
@@ -150,12 +150,6 @@ static void dcn32_link_encoder_get_max_link_cap(struct link_encoder *enc,
}
-void enc32_set_dig_output_mode(struct link_encoder *enc, uint8_t pix_per_container)
-{
- struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
- REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_OUTPUT_PIXEL_MODE, pix_per_container);
-}
-
static const struct link_encoder_funcs dcn32_link_enc_funcs = {
.read_state = link_enc2_read_state,
.validate_output_with_stream =
@@ -186,7 +180,6 @@ static const struct link_encoder_funcs dcn32_link_enc_funcs = {
.is_in_alt_mode = dcn32_link_encoder_is_in_alt_mode,
.get_max_link_cap = dcn32_link_encoder_get_max_link_cap,
.set_dio_phy_mux = dcn31_link_encoder_set_dio_phy_mux,
- .set_dig_output_mode = enc32_set_dig_output_mode,
};
void dcn32_link_encoder_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.h
index 749a1e8cb811..bbcfce06bec0 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.h
@@ -53,8 +53,4 @@ void dcn32_link_encoder_enable_dp_output(
const struct dc_link_settings *link_settings,
enum clock_source_id clock_source);
-void enc32_set_dig_output_mode(
- struct link_encoder *enc,
- uint8_t pix_per_container);
-
#endif /* __DC_LINK_ENCODER__DCN32_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.c
index 0e9dce414641..d19fc93dbc75 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.c
@@ -243,6 +243,39 @@ static bool is_two_pixels_per_containter(const struct dc_crtc_timing *timing)
return two_pix;
}
+static bool is_h_timing_divisible_by_2(const struct dc_crtc_timing *timing)
+{
+ /* math borrowed from function of same name in inc/resource
+ * checks if h_timing is divisible by 2
+ */
+
+ bool divisible = false;
+ uint16_t h_blank_start = 0;
+ uint16_t h_blank_end = 0;
+
+ if (timing) {
+ h_blank_start = timing->h_total - timing->h_front_porch;
+ h_blank_end = h_blank_start - timing->h_addressable;
+
+ /* HTOTAL, Hblank start/end, and Hsync start/end all must be
+ * divisible by 2 in order for the horizontal timing params
+ * to be considered divisible by 2. Hsync start is always 0.
+ */
+ divisible = (timing->h_total % 2 == 0) &&
+ (h_blank_start % 2 == 0) &&
+ (h_blank_end % 2 == 0) &&
+ (timing->h_sync_width % 2 == 0);
+ }
+ return divisible;
+}
+
+static bool is_dp_dig_pixel_rate_div_policy(struct dc *dc, const struct dc_crtc_timing *timing)
+{
+ /* should be functionally the same as dcn32_is_dp_dig_pixel_rate_div_policy for DP encoders*/
+ return is_h_timing_divisible_by_2(timing) &&
+ dc->debug.enable_dp_dig_pixel_rate_div_policy;
+}
+
static void enc32_stream_encoder_dp_unblank(
struct dc_link *link,
struct stream_encoder *enc,
@@ -259,7 +292,7 @@ static void enc32_stream_encoder_dp_unblank(
/* YCbCr 4:2:0 : Computed VID_M will be 2X the input rate */
if (is_two_pixels_per_containter(&param->timing) || param->opp_cnt > 1
- || dc->debug.enable_dp_dig_pixel_rate_div_policy) {
+ || is_dp_dig_pixel_rate_div_policy(dc, &param->timing)) {
/*this logic should be the same in get_pixel_clock_parameters() */
n_multiply = 1;
}
@@ -355,7 +388,7 @@ static void enc32_dp_set_dsc_config(struct stream_encoder *enc,
{
struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
- REG_UPDATE(DP_DSC_CNTL, DP_DSC_MODE, dsc_mode);
+ REG_UPDATE(DP_DSC_CNTL, DP_DSC_MODE, dsc_mode == OPTC_DSC_DISABLED ? 0 : 1);
}
/* this function read dsc related register fields to be logged later in dcn10_log_hw_state
@@ -378,24 +411,6 @@ static void enc32_read_state(struct stream_encoder *enc, struct enc_state *s)
}
}
-static void enc32_stream_encoder_reset_fifo(struct stream_encoder *enc)
-{
- struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
- uint32_t fifo_enabled;
-
- REG_GET(DIG_FIFO_CTRL0, DIG_FIFO_ENABLE, &fifo_enabled);
-
- if (fifo_enabled == 0) {
- /* reset DIG resync FIFO */
- REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_RESET, 1);
- /* TODO: fix timeout when wait for DIG_FIFO_RESET_DONE */
- //REG_WAIT(DIG_FIFO_CTRL0, DIG_FIFO_RESET_DONE, 1, 1, 100);
- udelay(1);
- REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_RESET, 0);
- REG_WAIT(DIG_FIFO_CTRL0, DIG_FIFO_RESET_DONE, 0, 1, 100);
- }
-}
-
static void enc32_set_dig_input_mode(struct stream_encoder *enc, unsigned int pix_per_container)
{
struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
@@ -425,8 +440,6 @@ static const struct stream_encoder_funcs dcn32_str_enc_funcs = {
enc3_stream_encoder_update_dp_info_packets,
.stop_dp_info_packets =
enc1_stream_encoder_stop_dp_info_packets,
- .reset_fifo =
- enc32_stream_encoder_reset_fifo,
.dp_blank =
enc1_stream_encoder_dp_blank,
.dp_unblank =
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.h
index 250d9a341cf6..ecd041a446d2 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.h
@@ -71,7 +71,9 @@
SRI(DP_MSE_RATE_UPDATE, DP, id), \
SRI(DP_PIXEL_FORMAT, DP, id), \
SRI(DP_SEC_CNTL, DP, id), \
+ SRI(DP_SEC_CNTL1, DP, id), \
SRI(DP_SEC_CNTL2, DP, id), \
+ SRI(DP_SEC_CNTL5, DP, id), \
SRI(DP_SEC_CNTL6, DP, id), \
SRI(DP_STEER_FIFO, DP, id), \
SRI(DP_VID_M, DP, id), \
@@ -93,7 +95,7 @@
SRI(DIG_FIFO_CTRL0, DIG, id)
-#define SE_COMMON_MASK_SH_LIST_DCN32_BASE(mask_sh)\
+#define SE_COMMON_MASK_SH_LIST_DCN32(mask_sh)\
SE_SF(DP0_DP_PIXEL_FORMAT, DP_PIXEL_ENCODING, mask_sh),\
SE_SF(DP0_DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH, mask_sh),\
SE_SF(DP0_DP_PIXEL_FORMAT, DP_PIXEL_PER_CYCLE_PROCESSING_MODE, mask_sh),\
@@ -106,6 +108,7 @@
SE_SF(DIG0_HDMI_VBI_PACKET_CONTROL, HDMI_GC_CONT, mask_sh),\
SE_SF(DIG0_HDMI_VBI_PACKET_CONTROL, HDMI_GC_SEND, mask_sh),\
SE_SF(DIG0_HDMI_VBI_PACKET_CONTROL, HDMI_NULL_SEND, mask_sh),\
+ SE_SF(DIG0_HDMI_VBI_PACKET_CONTROL, HDMI_ACP_SEND, mask_sh),\
SE_SF(DIG0_HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_SEND, mask_sh),\
SE_SF(DIG0_HDMI_INFOFRAME_CONTROL1, HDMI_AUDIO_INFO_LINE, mask_sh),\
SE_SF(DIG0_HDMI_GC, HDMI_GC_AVMUTE, mask_sh),\
@@ -244,15 +247,6 @@
SE_SF(DIG0_DIG_FIFO_CTRL0, DIG_FIFO_RESET_DONE, mask_sh),\
SE_SF(DIG0_DIG_FIFO_CTRL0, DIG_FIFO_OUTPUT_PIXEL_MODE, mask_sh)
-#if defined(CONFIG_DRM_AMD_DC_HDCP)
-#define SE_COMMON_MASK_SH_LIST_DCN32(mask_sh)\
- SE_COMMON_MASK_SH_LIST_DCN32_BASE(mask_sh),\
- SE_SF(DIG0_HDMI_VBI_PACKET_CONTROL, HDMI_ACP_SEND, mask_sh)
-#else
-#define SE_COMMON_MASK_SH_LIST_DCN32(mask_sh)\
- SE_COMMON_MASK_SH_LIST_DCN32_BASE(mask_sh)
-#endif
-
void dcn32_dio_stream_encoder_construct(
struct dcn10_stream_encoder *enc1,
struct dc_context *ctx,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hpo_dp_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hpo_dp_link_encoder.h
index 9db1323e1933..176b1537d2a1 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hpo_dp_link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hpo_dp_link_encoder.h
@@ -47,6 +47,7 @@
SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CONFIG, TP_PRBS_SEL1, mask_sh),\
SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CONFIG, TP_PRBS_SEL2, mask_sh),\
SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CONFIG, TP_PRBS_SEL3, mask_sh),\
+ SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_TP_SQ_PULSE, TP_SQ_PULSE_WIDTH, mask_sh),\
SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC0, SAT_STREAM_SOURCE, mask_sh),\
SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC0, SAT_SLOT_COUNT, mask_sh),\
SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_VC_RATE_CNTL0, STREAM_VC_RATE_X, mask_sh),\
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.c
index 0b70247a5d36..9fbb72369c10 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.c
@@ -98,9 +98,13 @@ void dcn32_program_det_size(struct hubbub *hubbub, int hubp_inst, unsigned int d
default:
break;
}
- /* Should never be hit, if it is we have an erroneous hw config*/
- ASSERT(hubbub2->det0_size + hubbub2->det1_size + hubbub2->det2_size
- + hubbub2->det3_size + hubbub2->compbuf_size_segments <= hubbub2->crb_size_segs);
+ if (hubbub2->det0_size + hubbub2->det1_size + hubbub2->det2_size
+ + hubbub2->det3_size + hubbub2->compbuf_size_segments > hubbub2->crb_size_segs) {
+ /* This may happen during seamless transition from ODM 2:1 to ODM4:1 */
+ DC_LOG_WARNING("CRB Config Warning: DET size (%d,%d,%d,%d) + Compbuf size (%d) > CRB segments (%d)\n",
+ hubbub2->det0_size, hubbub2->det1_size, hubbub2->det2_size, hubbub2->det3_size,
+ hubbub2->compbuf_size_segments, hubbub2->crb_size_segs);
+ }
}
static void dcn32_program_compbuf_size(struct hubbub *hubbub, unsigned int compbuf_size_kb, bool safe_to_increase)
@@ -932,6 +936,7 @@ static const struct hubbub_funcs hubbub32_funcs = {
.program_watermarks = hubbub32_program_watermarks,
.allow_self_refresh_control = hubbub1_allow_self_refresh_control,
.is_allow_self_refresh_enabled = hubbub1_is_allow_self_refresh_enabled,
+ .verify_allow_pstate_change_high = hubbub1_verify_allow_pstate_change_high,
.force_wm_propagate_to_pipes = hubbub32_force_wm_propagate_to_pipes,
.force_pstate_change_control = hubbub3_force_pstate_change_control,
.init_watermarks = hubbub32_init_watermarks,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubp.c
index 2038cbda33f7..ac1c6458dd55 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubp.c
@@ -79,6 +79,8 @@ void hubp32_phantom_hubp_post_enable(struct hubp *hubp)
uint32_t reg_val;
struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+ /* For phantom pipe enable, disable GSL */
+ REG_UPDATE(DCSURF_FLIP_CONTROL2, SURFACE_GSL_ENABLE, 0);
REG_UPDATE(DCHUBP_CNTL, HUBP_BLANK_EN, 1);
reg_val = REG_READ(DCHUBP_CNTL);
if (reg_val) {
@@ -179,12 +181,12 @@ static struct hubp_funcs dcn32_hubp_funcs = {
.hubp_init = hubp3_init,
.set_unbounded_requesting = hubp31_set_unbounded_requesting,
.hubp_soft_reset = hubp31_soft_reset,
+ .hubp_set_flip_int = hubp1_set_flip_int,
.hubp_in_blank = hubp1_in_blank,
.hubp_update_force_pstate_disallow = hubp32_update_force_pstate_disallow,
.phantom_hubp_post_enable = hubp32_phantom_hubp_post_enable,
.hubp_update_mall_sel = hubp32_update_mall_sel,
- .hubp_prepare_subvp_buffering = hubp32_prepare_subvp_buffering,
- .hubp_set_flip_int = hubp1_set_flip_int
+ .hubp_prepare_subvp_buffering = hubp32_prepare_subvp_buffering
};
bool hubp32_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
index 7544ac8e33ac..cf5bd9713f54 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
@@ -49,6 +49,7 @@
#include "dcn20/dcn20_optc.h"
#include "dmub_subvp_state.h"
#include "dce/dmub_hw_lock_mgr.h"
+#include "dcn32_resource.h"
#include "dc_link_dp.h"
#include "dmub/inc/dmub_subvp_state.h"
@@ -198,42 +199,6 @@ static bool dcn32_check_no_memory_request_for_cab(struct dc *dc)
return false;
}
-/* This function takes in the start address and surface size to be cached in CAB
- * and calculates the total number of cache lines required to store the surface.
- * The number of cache lines used for each surface is calculated independently of
- * one another. For example, if there is a primary surface(1), meta surface(2), and
- * cursor(3), this function should be called 3 times to calculate the number of cache
- * lines used for each of those surfaces.
- */
-static uint32_t dcn32_cache_lines_for_surface(struct dc *dc, uint32_t surface_size, uint64_t start_address)
-{
- uint32_t lines_used = 1;
- uint32_t num_cached_bytes = 0;
- uint32_t remaining_size = 0;
- uint32_t cache_line_size = dc->caps.cache_line_size;
- uint32_t remainder = 0;
-
- /* 1. Calculate surface size minus the number of bytes stored
- * in the first cache line (all bytes in first cache line might
- * not be fully used).
- */
- div_u64_rem(start_address, cache_line_size, &remainder);
- num_cached_bytes = cache_line_size - remainder;
- remaining_size = surface_size - num_cached_bytes;
-
- /* 2. Calculate number of cache lines that will be fully used with
- * the remaining number of bytes to be stored.
- */
- lines_used += (remaining_size / cache_line_size);
-
- /* 3. Check if we need an extra line due to the remaining size not being
- * a multiple of CACHE_LINE_SIZE.
- */
- if (remaining_size % cache_line_size > 0)
- lines_used++;
-
- return lines_used;
-}
/* This function loops through every surface that needs to be cached in CAB for SS,
* and calculates the total number of ways required to store all surfaces (primary,
@@ -241,96 +206,115 @@ static uint32_t dcn32_cache_lines_for_surface(struct dc *dc, uint32_t surface_si
*/
static uint32_t dcn32_calculate_cab_allocation(struct dc *dc, struct dc_state *ctx)
{
- uint8_t i, j;
+ int i, j;
struct dc_stream_state *stream = NULL;
struct dc_plane_state *plane = NULL;
- uint32_t surface_size = 0;
uint32_t cursor_size = 0;
- uint32_t cache_lines_used = 0;
uint32_t total_lines = 0;
uint32_t lines_per_way = 0;
- uint32_t num_ways = 0;
- uint32_t prev_addr_low = 0;
+ uint8_t num_ways = 0;
+ uint8_t bytes_per_pixel = 0;
+ uint8_t cursor_bpp = 0;
+ uint16_t mblk_width = 0;
+ uint16_t mblk_height = 0;
+ uint16_t mall_alloc_width_blk_aligned = 0;
+ uint16_t mall_alloc_height_blk_aligned = 0;
+ uint16_t num_mblks = 0;
+ uint32_t bytes_in_mall = 0;
+ uint32_t cache_lines_used = 0;
+ uint32_t cache_lines_per_plane = 0;
- for (i = 0; i < ctx->stream_count; i++) {
- stream = ctx->streams[i];
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
- // Don't include PSR surface in the total surface size for CAB allocation
- if (stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED)
+ if (!pipe->stream || !pipe->plane_state ||
+ pipe->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED ||
+ pipe->stream->mall_stream_config.type == SUBVP_PHANTOM)
continue;
- if (ctx->stream_status[i].plane_count == 0)
- continue;
+ bytes_per_pixel = pipe->plane_state->format >= SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616 ? 8 : 4;
+ mblk_width = DCN3_2_MBLK_WIDTH;
+ mblk_height = bytes_per_pixel == 4 ? DCN3_2_MBLK_HEIGHT_4BPE : DCN3_2_MBLK_HEIGHT_8BPE;
- // For each stream, loop through each plane to calculate the number of cache
- // lines required to store the surface in CAB
- for (j = 0; j < ctx->stream_status[i].plane_count; j++) {
- plane = ctx->stream_status[i].plane_states[j];
+ /* full_vp_width_blk_aligned = FLOOR(vp_x_start + full_vp_width + blk_width - 1, blk_width) -
+ * FLOOR(vp_x_start, blk_width)
+ *
+ * mall_alloc_width_blk_aligned_l/c = full_vp_width_blk_aligned_l/c
+ */
+ mall_alloc_width_blk_aligned = ((pipe->plane_res.scl_data.viewport.x +
+ pipe->plane_res.scl_data.viewport.width + mblk_width - 1) / mblk_width * mblk_width) -
+ (pipe->plane_res.scl_data.viewport.x / mblk_width * mblk_width);
+
+ /* full_vp_height_blk_aligned = FLOOR(vp_y_start + full_vp_height + blk_height - 1, blk_height) -
+ * FLOOR(vp_y_start, blk_height)
+ *
+ * mall_alloc_height_blk_aligned_l/c = full_vp_height_blk_aligned_l/c
+ */
+ mall_alloc_height_blk_aligned = ((pipe->plane_res.scl_data.viewport.y +
+ pipe->plane_res.scl_data.viewport.height + mblk_height - 1) / mblk_height * mblk_height) -
+ (pipe->plane_res.scl_data.viewport.y / mblk_height * mblk_height);
- // Calculate total surface size
- if (prev_addr_low != plane->address.grph.addr.u.low_part) {
- /* if plane address are different from prev FB, then userspace allocated separate FBs*/
- surface_size += plane->plane_size.surface_pitch *
- plane->plane_size.surface_size.height *
- (plane->format >= SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616 ? 8 : 4);
+ num_mblks = ((mall_alloc_width_blk_aligned + mblk_width - 1) / mblk_width) *
+ ((mall_alloc_height_blk_aligned + mblk_height - 1) / mblk_height);
- prev_addr_low = plane->address.grph.addr.u.low_part;
- } else {
- /* We have the same fb for all the planes.
- * Xorg always creates one giant fb that holds all surfaces,
- * so allocating it once is sufficient.
- * */
- continue;
- }
- // Convert surface size + starting address to number of cache lines required
- // (alignment accounted for)
- cache_lines_used += dcn32_cache_lines_for_surface(dc, surface_size,
- plane->address.grph.addr.quad_part);
-
- if (plane->address.grph.meta_addr.quad_part) {
- // Meta surface
- cache_lines_used += dcn32_cache_lines_for_surface(dc, surface_size,
- plane->address.grph.meta_addr.quad_part);
- }
- }
+ /* For DCC:
+ * meta_num_mblk = CEILING(full_mblk_width_ub_l*full_mblk_height_ub_l*Bpe/256/mblk_bytes, 1)
+ */
+ if (pipe->plane_state->dcc.enable)
+ num_mblks += (mall_alloc_width_blk_aligned * mall_alloc_width_blk_aligned * bytes_per_pixel +
+ (256 * DCN3_2_MALL_MBLK_SIZE_BYTES) - 1) / (256 * DCN3_2_MALL_MBLK_SIZE_BYTES);
- // Include cursor size for CAB allocation
- for (j = 0; j < dc->res_pool->pipe_count; j++) {
- struct pipe_ctx *pipe = &ctx->res_ctx.pipe_ctx[j];
- struct hubp *hubp = pipe->plane_res.hubp;
+ bytes_in_mall = num_mblks * DCN3_2_MALL_MBLK_SIZE_BYTES;
- if (pipe->stream && pipe->plane_state && hubp)
- /* Find the cursor plane and use the exact size instead of
- * using the max for calculation
- */
- if (hubp->curs_attr.width > 0) {
- // Round cursor width to next multiple of 64
- cursor_size = (((hubp->curs_attr.width + 63) / 64) * 64) * hubp->curs_attr.height;
- break;
- }
- }
+ /* (cache lines used is total bytes / cache_line size. Add +2 for worst case alignment
+ * (MALL is 64-byte aligned)
+ */
+ cache_lines_per_plane = bytes_in_mall / dc->caps.cache_line_size + 2;
+ cache_lines_used += cache_lines_per_plane;
+ }
- switch (stream->cursor_attributes.color_format) {
- case CURSOR_MODE_MONO:
- cursor_size /= 2;
- break;
- case CURSOR_MODE_COLOR_1BIT_AND:
- case CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA:
- case CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA:
- cursor_size *= 4;
- break;
+ // Include cursor size for CAB allocation
+ for (j = 0; j < dc->res_pool->pipe_count; j++) {
+ struct pipe_ctx *pipe = &ctx->res_ctx.pipe_ctx[j];
+ struct hubp *hubp = pipe->plane_res.hubp;
- case CURSOR_MODE_COLOR_64BIT_FP_PRE_MULTIPLIED:
- case CURSOR_MODE_COLOR_64BIT_FP_UN_PRE_MULTIPLIED:
- cursor_size *= 8;
- break;
- }
+ if (pipe->stream && pipe->plane_state && hubp)
+ /* Find the cursor plane and use the exact size instead of
+ using the max for calculation */
- if (stream->cursor_position.enable && !dc->debug.alloc_extra_way_for_cursor &&
- cursor_size > 16384) {
- cache_lines_used += dcn32_cache_lines_for_surface(dc, cursor_size,
- plane->address.grph.cursor_cache_addr.quad_part);
- }
+ if (hubp->curs_attr.width > 0) {
+ // Round cursor width to next multiple of 64
+ cursor_size = (((hubp->curs_attr.width + 63) / 64) * 64) * hubp->curs_attr.height;
+
+ switch (pipe->stream->cursor_attributes.color_format) {
+ case CURSOR_MODE_MONO:
+ cursor_size /= 2;
+ cursor_bpp = 4;
+ break;
+ case CURSOR_MODE_COLOR_1BIT_AND:
+ case CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA:
+ case CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA:
+ cursor_size *= 4;
+ cursor_bpp = 4;
+ break;
+
+ case CURSOR_MODE_COLOR_64BIT_FP_PRE_MULTIPLIED:
+ case CURSOR_MODE_COLOR_64BIT_FP_UN_PRE_MULTIPLIED:
+ cursor_size *= 8;
+ cursor_bpp = 8;
+ break;
+ }
+
+ if (pipe->stream->cursor_position.enable && !dc->debug.alloc_extra_way_for_cursor &&
+ cursor_size > 16384) {
+ /* cursor_num_mblk = CEILING(num_cursors*cursor_width*cursor_width*cursor_Bpe/mblk_bytes, 1)
+ */
+ cache_lines_used += (((hubp->curs_attr.width * hubp->curs_attr.height * cursor_bpp +
+ DCN3_2_MALL_MBLK_SIZE_BYTES - 1) / DCN3_2_MALL_MBLK_SIZE_BYTES) *
+ DCN3_2_MALL_MBLK_SIZE_BYTES) / dc->caps.cache_line_size + 2;
+ }
+ break;
+ }
}
// Convert number of cache lines required to number of ways
@@ -360,7 +344,9 @@ static uint32_t dcn32_calculate_cab_allocation(struct dc *dc, struct dc_state *c
}
}
}
-
+ if (dc->debug.force_mall_ss_num_ways > 0) {
+ num_ways = dc->debug.force_mall_ss_num_ways;
+ }
return num_ways;
}
@@ -369,7 +355,7 @@ bool dcn32_apply_idle_power_optimizations(struct dc *dc, bool enable)
union dmub_rb_cmd cmd;
uint8_t ways, i;
int j;
- bool stereo_in_use = false;
+ bool mall_ss_unsupported = false;
struct dc_plane_state *plane = NULL;
if (!dc->ctx->dmub_srv)
@@ -400,22 +386,23 @@ bool dcn32_apply_idle_power_optimizations(struct dc *dc, bool enable)
*/
ways = dcn32_calculate_cab_allocation(dc, dc->current_state);
- /* MALL not supported with Stereo3D. If any plane is using stereo,
- * don't try to enter MALL.
+ /* MALL not supported with Stereo3D or TMZ surface. If any plane is using stereo,
+ * or TMZ surface, don't try to enter MALL.
*/
for (i = 0; i < dc->current_state->stream_count; i++) {
for (j = 0; j < dc->current_state->stream_status[i].plane_count; j++) {
plane = dc->current_state->stream_status[i].plane_states[j];
- if (plane->address.type == PLN_ADDR_TYPE_GRPH_STEREO) {
- stereo_in_use = true;
+ if (plane->address.type == PLN_ADDR_TYPE_GRPH_STEREO ||
+ plane->address.tmz_surface) {
+ mall_ss_unsupported = true;
break;
}
}
- if (stereo_in_use)
+ if (mall_ss_unsupported)
break;
}
- if (ways <= dc->caps.cache_num_ways && !stereo_in_use) {
+ if (ways <= dc->caps.cache_num_ways && !mall_ss_unsupported) {
memset(&cmd, 0, sizeof(cmd));
cmd.cab.header.type = DMUB_CMD__CAB_FOR_SS;
cmd.cab.header.sub_type = DMUB_CMD__CAB_DCN_SS_FIT_IN_CAB;
@@ -642,10 +629,9 @@ bool dcn32_set_input_transfer_func(struct dc *dc,
params = &dpp_base->degamma_params;
}
- result = dpp_base->funcs->dpp_program_gamcor_lut(dpp_base, params);
+ dpp_base->funcs->dpp_program_gamcor_lut(dpp_base, params);
- if (result &&
- pipe_ctx->stream_res.opp &&
+ if (pipe_ctx->stream_res.opp &&
pipe_ctx->stream_res.opp->ctx &&
hws->funcs.set_mcm_luts)
result = hws->funcs.set_mcm_luts(pipe_ctx, plane_state);
@@ -773,7 +759,8 @@ void dcn32_update_mall_sel(struct dc *dc, struct dc_state *context)
hubp->funcs->hubp_update_mall_sel(hubp,
num_ways <= dc->caps.cache_num_ways &&
pipe->stream->link->psr_settings.psr_version == DC_PSR_VERSION_UNSUPPORTED &&
- pipe->plane_state->address.type != PLN_ADDR_TYPE_GRPH_STEREO ? 2 : 0,
+ pipe->plane_state->address.type != PLN_ADDR_TYPE_GRPH_STEREO &&
+ !pipe->plane_state->address.tmz_surface ? 2 : 0,
cache_cursor);
}
}
@@ -1002,6 +989,10 @@ void dcn32_init_hw(struct dc *dc)
dc_dmub_srv_query_caps_cmd(dc->ctx->dmub_srv->dmub);
dc->caps.dmub_caps.psr = dc->ctx->dmub_srv->dmub->feature_caps.psr;
}
+
+ /* Enable support for ODM and windowed MPO if policy flag is set */
+ if (dc->debug.enable_single_display_2to1_odm_policy)
+ dc->config.enable_windowed_mpo_odm = true;
}
static int calc_mpc_flow_ctrl_cnt(const struct dc_stream_state *stream,
@@ -1156,23 +1147,25 @@ void dcn32_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx *
true);
}
- // Don't program pixel clock after link is already enabled
-/* if (false == pipe_ctx->clock_source->funcs->program_pix_clk(
- pipe_ctx->clock_source,
- &pipe_ctx->stream_res.pix_clk_params,
- &pipe_ctx->pll_settings)) {
- BREAK_TO_DEBUGGER();
- }*/
+ if (pipe_ctx->stream_res.dsc) {
+ struct pipe_ctx *current_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[pipe_ctx->pipe_idx];
- if (pipe_ctx->stream_res.dsc)
update_dsc_on_stream(pipe_ctx, pipe_ctx->stream->timing.flags.DSC);
+
+ /* Check if no longer using pipe for ODM, then need to disconnect DSC for that pipe */
+ if (!pipe_ctx->next_odm_pipe && current_pipe_ctx->next_odm_pipe &&
+ current_pipe_ctx->next_odm_pipe->stream_res.dsc) {
+ struct display_stream_compressor *dsc = current_pipe_ctx->next_odm_pipe->stream_res.dsc;
+ /* disconnect DSC block from stream */
+ dsc->funcs->dsc_disconnect(dsc);
+ }
+ }
}
unsigned int dcn32_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsigned int *k1_div, unsigned int *k2_div)
{
struct dc_stream_state *stream = pipe_ctx->stream;
unsigned int odm_combine_factor = 0;
- struct dc *dc = pipe_ctx->stream->ctx->dc;
bool two_pix_per_container = false;
// For phantom pipes, use the same programming as the main pipes
@@ -1200,7 +1193,7 @@ unsigned int dcn32_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsign
} else {
*k1_div = PIXEL_RATE_DIV_BY_1;
*k2_div = PIXEL_RATE_DIV_BY_4;
- if ((odm_combine_factor == 2) || dc->debug.enable_dp_dig_pixel_rate_div_policy)
+ if ((odm_combine_factor == 2) || dcn32_is_dp_dig_pixel_rate_div_policy(pipe_ctx))
*k2_div = PIXEL_RATE_DIV_BY_2;
}
}
@@ -1237,7 +1230,6 @@ void dcn32_unblank_stream(struct pipe_ctx *pipe_ctx,
struct dc_link *link = stream->link;
struct dce_hwseq *hws = link->dc->hwseq;
struct pipe_ctx *odm_pipe;
- struct dc *dc = pipe_ctx->stream->ctx->dc;
uint32_t pix_per_cycle = 1;
params.opp_cnt = 1;
@@ -1256,7 +1248,7 @@ void dcn32_unblank_stream(struct pipe_ctx *pipe_ctx,
pipe_ctx->stream_res.tg->inst);
} else if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
if (optc2_is_two_pixels_per_containter(&stream->timing) || params.opp_cnt > 1
- || dc->debug.enable_dp_dig_pixel_rate_div_policy) {
+ || dcn32_is_dp_dig_pixel_rate_div_policy(pipe_ctx)) {
params.timing.pix_clk_100hz /= 2;
pix_per_cycle = 2;
}
@@ -1273,6 +1265,9 @@ bool dcn32_is_dp_dig_pixel_rate_div_policy(struct pipe_ctx *pipe_ctx)
{
struct dc *dc = pipe_ctx->stream->ctx->dc;
+ if (!is_h_timing_divisible_by_2(pipe_ctx->stream))
+ return false;
+
if (dc_is_dp_signal(pipe_ctx->stream->signal) && !is_dp_128b_132b_signal(pipe_ctx) &&
dc->debug.enable_dp_dig_pixel_rate_div_policy)
return true;
@@ -1303,7 +1298,7 @@ static void apply_symclk_on_tx_off_wa(struct dc_link *link)
if (link->phy_state.symclk_ref_cnts.otg > 0) {
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
- if (pipe_ctx->stream->link == link && pipe_ctx->top_pipe == NULL) {
+ if (pipe_ctx->stream && pipe_ctx->stream->link == link && pipe_ctx->top_pipe == NULL) {
pipe_ctx->clock_source->funcs->program_pix_clk(
pipe_ctx->clock_source,
&pipe_ctx->stream_res.pix_clk_params,
@@ -1375,3 +1370,58 @@ void dcn32_update_phantom_vp_position(struct dc *dc,
}
}
}
+
+bool dcn32_dsc_pg_status(
+ struct dce_hwseq *hws,
+ unsigned int dsc_inst)
+{
+ uint32_t pwr_status = 0;
+
+ switch (dsc_inst) {
+ case 0: /* DSC0 */
+ REG_GET(DOMAIN16_PG_STATUS,
+ DOMAIN_PGFSM_PWR_STATUS, &pwr_status);
+ break;
+ case 1: /* DSC1 */
+
+ REG_GET(DOMAIN17_PG_STATUS,
+ DOMAIN_PGFSM_PWR_STATUS, &pwr_status);
+ break;
+ case 2: /* DSC2 */
+ REG_GET(DOMAIN18_PG_STATUS,
+ DOMAIN_PGFSM_PWR_STATUS, &pwr_status);
+ break;
+ case 3: /* DSC3 */
+ REG_GET(DOMAIN19_PG_STATUS,
+ DOMAIN_PGFSM_PWR_STATUS, &pwr_status);
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ break;
+ }
+
+ return pwr_status == 0;
+}
+
+void dcn32_update_dsc_pg(struct dc *dc,
+ struct dc_state *context,
+ bool safe_to_disable)
+{
+ struct dce_hwseq *hws = dc->hwseq;
+ int i;
+
+ for (i = 0; i < dc->res_pool->res_cap->num_dsc; i++) {
+ struct display_stream_compressor *dsc = dc->res_pool->dscs[i];
+ bool is_dsc_ungated = hws->funcs.dsc_pg_status(hws, dsc->inst);
+
+ if (context->res_ctx.is_dsc_acquired[i]) {
+ if (!is_dsc_ungated) {
+ hws->funcs.dsc_pg_control(hws, dsc->inst, true);
+ }
+ } else if (safe_to_disable) {
+ if (is_dsc_ungated) {
+ hws->funcs.dsc_pg_control(hws, dsc->inst, false);
+ }
+ }
+ }
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.h
index 86c018b2f132..ac3657a5b9ea 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.h
@@ -92,4 +92,12 @@ void dcn32_update_phantom_vp_position(struct dc *dc,
struct dc_state *context,
struct pipe_ctx *phantom_pipe);
+bool dcn32_dsc_pg_status(
+ struct dce_hwseq *hws,
+ unsigned int dsc_inst);
+
+void dcn32_update_dsc_pg(struct dc *dc,
+ struct dc_state *context,
+ bool safe_to_disable);
+
#endif /* __DC_HWSS_DCN32_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c
index c66dfe14909a..45a949ba6f3f 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c
@@ -109,6 +109,7 @@ static const struct hw_sequencer_funcs dcn32_funcs = {
.subvp_pipe_control_lock = dcn32_subvp_pipe_control_lock,
.update_visual_confirm_color = dcn20_update_visual_confirm_color,
.update_phantom_vp_position = dcn32_update_phantom_vp_position,
+ .update_dsc_pg = dcn32_update_dsc_pg,
};
static const struct hwseq_private_funcs dcn32_private_funcs = {
@@ -138,6 +139,7 @@ static const struct hwseq_private_funcs dcn32_private_funcs = {
.program_all_writeback_pipes_in_tree = dcn30_program_all_writeback_pipes_in_tree,
.update_odm = dcn32_update_odm,
.dsc_pg_control = dcn32_dsc_pg_control,
+ .dsc_pg_status = dcn32_dsc_pg_status,
.set_hdr_multiplier = dcn10_set_hdr_multiplier,
.verify_allow_pstate_change_high = dcn10_verify_allow_pstate_change_high,
.wait_for_blank_complete = dcn20_wait_for_blank_complete,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.c
index ec3989d37086..2b33eeb213e2 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.c
@@ -151,7 +151,7 @@ static bool optc32_disable_crtc(struct timing_generator *optc)
/* CRTC disabled, so disable clock. */
REG_WAIT(OTG_CLOCK_CONTROL,
OTG_BUSY, 0,
- 1, 100000);
+ 1, 150000);
return true;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
index f8c6afacf633..a88dd7b3d1c1 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
@@ -718,6 +718,9 @@ static const struct dc_debug_options debug_defaults_drv = {
.force_disable_subvp = false,
.exit_idle_opt_for_cursor_updates = true,
.enable_single_display_2to1_odm_policy = true,
+
+ /* Must match enable_single_display_2to1_odm_policy to support dynamic ODM transitions*/
+ .enable_double_buffered_dsc_pg_support = true,
.enable_dp_dig_pixel_rate_div_policy = 1,
.allow_sw_cursor_fallback = false,
.alloc_extra_way_for_cursor = true,
@@ -1677,6 +1680,8 @@ static void dcn32_enable_phantom_plane(struct dc *dc,
phantom_plane->clip_rect.y = 0;
phantom_plane->clip_rect.height = phantom_stream->timing.v_addressable;
+ phantom_plane->is_phantom = true;
+
dc_add_plane_to_context(dc, phantom_stream, phantom_plane, context);
curr_pipe = curr_pipe->bottom_pipe;
@@ -1746,6 +1751,10 @@ bool dcn32_remove_phantom_pipes(struct dc *dc, struct dc_state *context)
pipe->stream->mall_stream_config.type = SUBVP_NONE;
pipe->stream->mall_stream_config.paired_stream = NULL;
}
+
+ if (pipe->plane_state) {
+ pipe->plane_state->is_phantom = false;
+ }
}
return removed_pipe;
}
@@ -1795,14 +1804,39 @@ bool dcn32_validate_bandwidth(struct dc *dc,
int vlevel = 0;
int pipe_cnt = 0;
display_e2e_pipe_params_st *pipes = kzalloc(dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st), GFP_KERNEL);
+ struct mall_temp_config mall_temp_config;
+
+ /* To handle Freesync properly, setting FreeSync DML parameters
+ * to its default state for the first stage of validation
+ */
+ context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching = false;
+ context->bw_ctx.dml.soc.dram_clock_change_requirement_final = true;
+
DC_LOGGER_INIT(dc->ctx->logger);
+ /* For fast validation, there are situations where a shallow copy of
+ * of the dc->current_state is created for the validation. In this case
+ * we want to save and restore the mall config because we always
+ * teardown subvp at the beginning of validation (and don't attempt
+ * to add it back if it's fast validation). If we don't restore the
+ * subvp config in cases of fast validation + shallow copy of the
+ * dc->current_state, the dc->current_state will have a partially
+ * removed subvp state when we did not intend to remove it.
+ */
+ if (fast_validate) {
+ memset(&mall_temp_config, 0, sizeof(mall_temp_config));
+ dcn32_save_mall_state(dc, context, &mall_temp_config);
+ }
+
BW_VAL_TRACE_COUNT();
DC_FP_START();
out = dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate);
DC_FP_END();
+ if (fast_validate)
+ dcn32_restore_mall_state(dc, context, &mall_temp_config);
+
if (pipe_cnt == 0)
goto validate_out;
@@ -1846,7 +1880,7 @@ int dcn32_populate_dml_pipes_from_context(
struct resource_context *res_ctx = &context->res_ctx;
struct pipe_ctx *pipe;
bool subvp_in_use = false;
- int plane_count = 0;
+ uint8_t is_pipe_split_expected[MAX_PIPES] = {0};
struct dc_crtc_timing *timing;
dcn20_populate_dml_pipes_from_context(dc, context, pipes, fast_validate);
@@ -1865,12 +1899,13 @@ int dcn32_populate_dml_pipes_from_context(
timing = &pipe->stream->timing;
pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_dal;
- if (context->stream_count == 1 && !dc_is_hdmi_signal(res_ctx->pipe_ctx[i].stream->signal) &&
- is_h_timing_divisible_by_2(res_ctx->pipe_ctx[i].stream)) {
- if (dc->debug.enable_single_display_2to1_odm_policy) {
- if (!((plane_count > 2) && pipe->top_pipe))
- pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_2to1;
- }
+ if (context->stream_count == 1 &&
+ context->stream_status[0].plane_count <= 1 &&
+ !dc_is_hdmi_signal(res_ctx->pipe_ctx[i].stream->signal) &&
+ is_h_timing_divisible_by_2(res_ctx->pipe_ctx[i].stream) &&
+ pipe->stream->timing.pix_clk_100hz * 100 > DCN3_2_VMIN_DISPCLK_HZ &&
+ dc->debug.enable_single_display_2to1_odm_policy) {
+ pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_2to1;
}
pipe_cnt++;
}
@@ -1927,12 +1962,10 @@ int dcn32_populate_dml_pipes_from_context(
}
}
- /* Calculate the number of planes we have so we can determine
- * whether to apply ODM 2to1 policy or not
- */
- if (pipe->stream && !pipe->prev_odm_pipe &&
- (!pipe->top_pipe || pipe->top_pipe->plane_state != pipe->plane_state))
- ++plane_count;
+ DC_FP_START();
+ is_pipe_split_expected[i] = dcn32_predict_pipe_split(context, &pipes[pipe_cnt]);
+ DC_FP_END();
+
pipe_cnt++;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h
index 40e9211eec1e..f76120e67c16 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h
@@ -37,6 +37,7 @@
#define DCN3_2_MBLK_WIDTH 128
#define DCN3_2_MBLK_HEIGHT_4BPE 128
#define DCN3_2_MBLK_HEIGHT_8BPE 64
+#define DCN3_2_VMIN_DISPCLK_HZ 717000000
#define TO_DCN32_RES_POOL(pool)\
container_of(pool, struct dcn32_resource_pool, base)
@@ -44,6 +45,17 @@
extern struct _vcs_dpi_ip_params_st dcn3_2_ip;
extern struct _vcs_dpi_soc_bounding_box_st dcn3_2_soc;
+/* Temp struct used to save and restore MALL config
+ * during validation.
+ *
+ * TODO: Move MALL config into dc_state instead of stream struct
+ * to avoid needing to save/restore.
+ */
+struct mall_temp_config {
+ struct mall_stream_config mall_stream_config[MAX_PIPES];
+ bool is_phantom_plane[MAX_PIPES];
+};
+
struct dcn32_resource_pool {
struct resource_pool base;
};
@@ -107,6 +119,8 @@ bool dcn32_subvp_in_use(struct dc *dc,
bool dcn32_mpo_in_use(struct dc_state *context);
+bool dcn32_any_surfaces_rotated(struct dc *dc, struct dc_state *context);
+
struct pipe_ctx *dcn32_acquire_idle_pipe_for_head_pipe_in_layer(
struct dc_state *state,
const struct resource_pool *pool,
@@ -119,6 +133,15 @@ void dcn32_determine_det_override(struct dc *dc,
void dcn32_set_det_allocations(struct dc *dc, struct dc_state *context,
display_e2e_pipe_params_st *pipes);
+
+void dcn32_save_mall_state(struct dc *dc,
+ struct dc_state *context,
+ struct mall_temp_config *temp_config);
+
+void dcn32_restore_mall_state(struct dc *dc,
+ struct dc_state *context,
+ struct mall_temp_config *temp_config);
+
/* definitions for run time init of reg offsets */
/* CLK SRC */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c
index 46ba6eee69ea..d51d0c40ae5b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c
@@ -233,6 +233,23 @@ bool dcn32_mpo_in_use(struct dc_state *context)
return false;
}
+
+bool dcn32_any_surfaces_rotated(struct dc *dc, struct dc_state *context)
+{
+ uint32_t i;
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+
+ if (!pipe->stream)
+ continue;
+
+ if (pipe->plane_state && pipe->plane_state->rotation != ROTATION_ANGLE_0)
+ return true;
+ }
+ return false;
+}
+
/**
* *******************************************************************************************
* dcn32_determine_det_override: Determine DET allocation for each pipe
@@ -278,7 +295,7 @@ void dcn32_determine_det_override(struct dc *dc,
}
}
- if (context->stream_count > 0) {
+ if (stream_count > 0) {
stream_segments = 18 / stream_count;
for (i = 0; i < context->stream_count; i++) {
if (context->streams[i]->mall_stream_config.type == SUBVP_PHANTOM)
@@ -363,3 +380,74 @@ void dcn32_set_det_allocations(struct dc *dc, struct dc_state *context,
} else
dcn32_determine_det_override(dc, context, pipes);
}
+
+/**
+ * *******************************************************************************************
+ * dcn32_save_mall_state: Save MALL (SubVP) state for fast validation cases
+ *
+ * This function saves the MALL (SubVP) case for fast validation cases. For fast validation,
+ * there are situations where a shallow copy of the dc->current_state is created for the
+ * validation. In this case we want to save and restore the mall config because we always
+ * teardown subvp at the beginning of validation (and don't attempt to add it back if it's
+ * fast validation). If we don't restore the subvp config in cases of fast validation +
+ * shallow copy of the dc->current_state, the dc->current_state will have a partially
+ * removed subvp state when we did not intend to remove it.
+ *
+ * NOTE: This function ONLY works if the streams are not moved to a different pipe in the
+ * validation. We don't expect this to happen in fast_validation=1 cases.
+ *
+ * @param [in]: dc: Current DC state
+ * @param [in]: context: New DC state to be programmed
+ * @param [out]: temp_config: struct used to cache the existing MALL state
+ *
+ * @return: void
+ *
+ * *******************************************************************************************
+ */
+void dcn32_save_mall_state(struct dc *dc,
+ struct dc_state *context,
+ struct mall_temp_config *temp_config)
+{
+ uint32_t i;
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+
+ if (pipe->stream)
+ temp_config->mall_stream_config[i] = pipe->stream->mall_stream_config;
+
+ if (pipe->plane_state)
+ temp_config->is_phantom_plane[i] = pipe->plane_state->is_phantom;
+ }
+}
+
+/**
+ * *******************************************************************************************
+ * dcn32_restore_mall_state: Restore MALL (SubVP) state for fast validation cases
+ *
+ * Restore the MALL state based on the previously saved state from dcn32_save_mall_state
+ *
+ * @param [in]: dc: Current DC state
+ * @param [in/out]: context: New DC state to be programmed, restore MALL state into here
+ * @param [in]: temp_config: struct that has the cached MALL state
+ *
+ * @return: void
+ *
+ * *******************************************************************************************
+ */
+void dcn32_restore_mall_state(struct dc *dc,
+ struct dc_state *context,
+ struct mall_temp_config *temp_config)
+{
+ uint32_t i;
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+
+ if (pipe->stream)
+ pipe->stream->mall_stream_config = temp_config->mall_stream_config[i];
+
+ if (pipe->plane_state)
+ pipe->plane_state->is_phantom = temp_config->is_phantom_plane[i];
+ }
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_dio_link_encoder.c
index 49682a31ecbd..fa9b6603cfd3 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_dio_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_dio_link_encoder.c
@@ -91,7 +91,6 @@ static const struct link_encoder_funcs dcn321_link_enc_funcs = {
.is_in_alt_mode = dcn20_link_encoder_is_in_alt_mode,
.get_max_link_cap = dcn20_link_encoder_get_max_link_cap,
.set_dio_phy_mux = dcn31_link_encoder_set_dio_phy_mux,
- .set_dig_output_mode = enc32_set_dig_output_mode,
};
void dcn321_link_encoder_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c
index c772ef962194..61087f2385a9 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c
@@ -94,8 +94,6 @@
#include "dcn20/dcn20_vmid.h"
#define DC_LOGGER_INIT(logger)
-#define fixed16_to_double(x) (((double)x) / ((double) (1 << 16)))
-#define fixed16_to_double_to_cpu(x) fixed16_to_double(le32_to_cpu(x))
enum dcn321_clk_src_array_id {
DCN321_CLK_SRC_PLL0,
@@ -718,6 +716,9 @@ static const struct dc_debug_options debug_defaults_drv = {
.force_disable_subvp = false,
.exit_idle_opt_for_cursor_updates = true,
.enable_single_display_2to1_odm_policy = true,
+
+ /*must match enable_single_display_2to1_odm_policy to support dynamic ODM transitions*/
+ .enable_double_buffered_dsc_pg_support = true,
.enable_dp_dig_pixel_rate_div_policy = 1,
.allow_sw_cursor_fallback = false,
.alloc_extra_way_for_cursor = true,
@@ -1603,7 +1604,7 @@ static struct resource_funcs dcn321_res_pool_funcs = {
.validate_bandwidth = dcn32_validate_bandwidth,
.calculate_wm_and_dlg = dcn32_calculate_wm_and_dlg,
.populate_dml_pipes = dcn32_populate_dml_pipes_from_context,
- .acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer,
+ .acquire_idle_pipe_for_head_pipe_in_layer = dcn32_acquire_idle_pipe_for_head_pipe_in_layer,
.add_stream_to_ctx = dcn30_add_stream_to_ctx,
.add_dsc_to_stream_resource = dcn20_add_dsc_to_stream_resource,
.remove_stream_from_ctx = dcn20_remove_stream_from_ctx,
@@ -1653,7 +1654,7 @@ static bool dcn321_resource_construct(
#undef REG_STRUCT
#define REG_STRUCT dccg_regs
- dccg_regs_init();
+ dccg_regs_init();
ctx->dc_bios->regs = &bios_regs;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/Makefile b/drivers/gpu/drm/amd/display/dc/dml/Makefile
index cb81ed2fbd53..ca7d24000621 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dml/Makefile
@@ -34,7 +34,7 @@ dml_ccflags := -mhard-float -maltivec
endif
ifdef CONFIG_CC_IS_GCC
-ifeq ($(call cc-ifversion, -lt, 0701, y), y)
+ifneq ($(call gcc-min-version, 70100),y)
IS_OLD_GCC = 1
endif
endif
@@ -77,7 +77,7 @@ CFLAGS_$(AMDDALPATH)/dc/dml/dcn30/dcn30_fpu.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn32/dcn32_fpu.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn32/display_mode_vba_32.o := $(dml_ccflags) $(frame_warn_flag)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn32/display_rq_dlg_calc_32.o := $(dml_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml/dcn32/display_mode_vba_util_32.o := $(dml_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml/dcn32/display_mode_vba_util_32.o := $(dml_ccflags) $(frame_warn_flag)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn321/dcn321_fpu.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn31/dcn31_fpu.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn301/dcn301_fpu.o := $(dml_ccflags)
diff --git a/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c
index d46adc849d2a..e73f089c84bb 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c
@@ -1444,81 +1444,67 @@ unsigned int dcn_find_dcfclk_suits_all(
return dcf_clk;
}
-static bool verify_clock_values(struct dm_pp_clock_levels_with_voltage *clks)
+void dcn_bw_update_from_pplib_fclks(
+ struct dc *dc,
+ struct dm_pp_clock_levels_with_voltage *fclks)
{
- int i;
-
- if (clks->num_levels == 0)
- return false;
-
- for (i = 0; i < clks->num_levels; i++)
- /* Ensure that the result is sane */
- if (clks->data[i].clocks_in_khz == 0)
- return false;
+ unsigned vmin0p65_idx, vmid0p72_idx, vnom0p8_idx, vmax0p9_idx;
- return true;
+ ASSERT(fclks->num_levels);
+
+ vmin0p65_idx = 0;
+ vmid0p72_idx = fclks->num_levels -
+ (fclks->num_levels > 2 ? 3 : (fclks->num_levels > 1 ? 2 : 1));
+ vnom0p8_idx = fclks->num_levels - (fclks->num_levels > 1 ? 2 : 1);
+ vmax0p9_idx = fclks->num_levels - 1;
+
+ dc->dcn_soc->fabric_and_dram_bandwidth_vmin0p65 =
+ 32 * (fclks->data[vmin0p65_idx].clocks_in_khz / 1000.0) / 1000.0;
+ dc->dcn_soc->fabric_and_dram_bandwidth_vmid0p72 =
+ dc->dcn_soc->number_of_channels *
+ (fclks->data[vmid0p72_idx].clocks_in_khz / 1000.0)
+ * ddr4_dram_factor_single_Channel / 1000.0;
+ dc->dcn_soc->fabric_and_dram_bandwidth_vnom0p8 =
+ dc->dcn_soc->number_of_channels *
+ (fclks->data[vnom0p8_idx].clocks_in_khz / 1000.0)
+ * ddr4_dram_factor_single_Channel / 1000.0;
+ dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9 =
+ dc->dcn_soc->number_of_channels *
+ (fclks->data[vmax0p9_idx].clocks_in_khz / 1000.0)
+ * ddr4_dram_factor_single_Channel / 1000.0;
}
-void dcn_bw_update_from_pplib(struct dc *dc)
+void dcn_bw_update_from_pplib_dcfclks(
+ struct dc *dc,
+ struct dm_pp_clock_levels_with_voltage *dcfclks)
{
- struct dc_context *ctx = dc->ctx;
- struct dm_pp_clock_levels_with_voltage fclks = {0}, dcfclks = {0};
- bool res;
- unsigned vmin0p65_idx, vmid0p72_idx, vnom0p8_idx, vmax0p9_idx;
-
- /* TODO: This is not the proper way to obtain fabric_and_dram_bandwidth, should be min(fclk, memclk) */
- res = dm_pp_get_clock_levels_by_type_with_voltage(
- ctx, DM_PP_CLOCK_TYPE_FCLK, &fclks);
-
- if (res)
- res = verify_clock_values(&fclks);
-
- if (res) {
- ASSERT(fclks.num_levels);
-
- vmin0p65_idx = 0;
- vmid0p72_idx = fclks.num_levels -
- (fclks.num_levels > 2 ? 3 : (fclks.num_levels > 1 ? 2 : 1));
- vnom0p8_idx = fclks.num_levels - (fclks.num_levels > 1 ? 2 : 1);
- vmax0p9_idx = fclks.num_levels - 1;
-
- dc->dcn_soc->fabric_and_dram_bandwidth_vmin0p65 =
- 32 * (fclks.data[vmin0p65_idx].clocks_in_khz / 1000.0) / 1000.0;
- dc->dcn_soc->fabric_and_dram_bandwidth_vmid0p72 =
- dc->dcn_soc->number_of_channels *
- (fclks.data[vmid0p72_idx].clocks_in_khz / 1000.0)
- * ddr4_dram_factor_single_Channel / 1000.0;
- dc->dcn_soc->fabric_and_dram_bandwidth_vnom0p8 =
- dc->dcn_soc->number_of_channels *
- (fclks.data[vnom0p8_idx].clocks_in_khz / 1000.0)
- * ddr4_dram_factor_single_Channel / 1000.0;
- dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9 =
- dc->dcn_soc->number_of_channels *
- (fclks.data[vmax0p9_idx].clocks_in_khz / 1000.0)
- * ddr4_dram_factor_single_Channel / 1000.0;
- } else
- BREAK_TO_DEBUGGER();
-
- res = dm_pp_get_clock_levels_by_type_with_voltage(
- ctx, DM_PP_CLOCK_TYPE_DCFCLK, &dcfclks);
-
- if (res)
- res = verify_clock_values(&dcfclks);
+ if (dcfclks->num_levels >= 3) {
+ dc->dcn_soc->dcfclkv_min0p65 = dcfclks->data[0].clocks_in_khz / 1000.0;
+ dc->dcn_soc->dcfclkv_mid0p72 = dcfclks->data[dcfclks->num_levels - 3].clocks_in_khz / 1000.0;
+ dc->dcn_soc->dcfclkv_nom0p8 = dcfclks->data[dcfclks->num_levels - 2].clocks_in_khz / 1000.0;
+ dc->dcn_soc->dcfclkv_max0p9 = dcfclks->data[dcfclks->num_levels - 1].clocks_in_khz / 1000.0;
+ }
+}
- if (res && dcfclks.num_levels >= 3) {
- dc->dcn_soc->dcfclkv_min0p65 = dcfclks.data[0].clocks_in_khz / 1000.0;
- dc->dcn_soc->dcfclkv_mid0p72 = dcfclks.data[dcfclks.num_levels - 3].clocks_in_khz / 1000.0;
- dc->dcn_soc->dcfclkv_nom0p8 = dcfclks.data[dcfclks.num_levels - 2].clocks_in_khz / 1000.0;
- dc->dcn_soc->dcfclkv_max0p9 = dcfclks.data[dcfclks.num_levels - 1].clocks_in_khz / 1000.0;
- } else
- BREAK_TO_DEBUGGER();
+void dcn_get_soc_clks(
+ struct dc *dc,
+ int *min_fclk_khz,
+ int *min_dcfclk_khz,
+ int *socclk_khz)
+{
+ *min_fclk_khz = dc->dcn_soc->fabric_and_dram_bandwidth_vmin0p65 * 1000000 / 32;
+ *min_dcfclk_khz = dc->dcn_soc->dcfclkv_min0p65 * 1000;
+ *socclk_khz = dc->dcn_soc->socclk * 1000;
}
-void dcn_bw_notify_pplib_of_wm_ranges(struct dc *dc)
+void dcn_bw_notify_pplib_of_wm_ranges(
+ struct dc *dc,
+ int min_fclk_khz,
+ int min_dcfclk_khz,
+ int socclk_khz)
{
struct pp_smu_funcs_rv *pp = NULL;
struct pp_smu_wm_range_sets ranges = {0};
- int min_fclk_khz, min_dcfclk_khz, socclk_khz;
const int overdrive = 5000000; /* 5 GHz to cover Overdrive */
if (dc->res_pool->pp_smu)
@@ -1526,10 +1512,6 @@ void dcn_bw_notify_pplib_of_wm_ranges(struct dc *dc)
if (!pp || !pp->set_wm_ranges)
return;
- min_fclk_khz = dc->dcn_soc->fabric_and_dram_bandwidth_vmin0p65 * 1000000 / 32;
- min_dcfclk_khz = dc->dcn_soc->dcfclkv_min0p65 * 1000;
- socclk_khz = dc->dcn_soc->socclk * 1000;
-
/* Now notify PPLib/SMU about which Watermarks sets they should select
* depending on DPM state they are in. And update BW MGR GFX Engine and
* Memory clock member variables for Watermarks calculations for each
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c
index b6e99eefe869..7dd0845d1bd9 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c
@@ -292,6 +292,7 @@ static struct _vcs_dpi_soc_bounding_box_st dcn3_15_soc = {
.urgent_latency_adjustment_fabric_clock_component_us = 0,
.urgent_latency_adjustment_fabric_clock_reference_mhz = 0,
.num_chans = 4,
+ .dummy_pstate_latency_us = 10.0
};
struct _vcs_dpi_ip_params_st dcn3_16_ip = {
@@ -459,13 +460,30 @@ void dcn31_update_soc_for_wm_a(struct dc *dc, struct dc_state *context)
}
}
+void dcn315_update_soc_for_wm_a(struct dc *dc, struct dc_state *context)
+{
+ dc_assert_fp_enabled();
+
+ if (dc->clk_mgr->bw_params->wm_table.entries[WM_A].valid) {
+ /* For 315 pstate change is only supported if possible in vactive */
+ if (context->bw_ctx.dml.vba.DRAMClockChangeSupport[context->bw_ctx.dml.vba.VoltageLevel][context->bw_ctx.dml.vba.maxMpcComb] != dm_dram_clock_change_vactive)
+ context->bw_ctx.dml.soc.dram_clock_change_latency_us = context->bw_ctx.dml.soc.dummy_pstate_latency_us;
+ else
+ context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.entries[WM_A].pstate_latency_us;
+ context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us =
+ dc->clk_mgr->bw_params->wm_table.entries[WM_A].sr_enter_plus_exit_time_us;
+ context->bw_ctx.dml.soc.sr_exit_time_us =
+ dc->clk_mgr->bw_params->wm_table.entries[WM_A].sr_exit_time_us;
+ }
+}
+
void dcn31_calculate_wm_and_dlg_fp(
struct dc *dc, struct dc_state *context,
display_e2e_pipe_params_st *pipes,
int pipe_cnt,
int vlevel)
{
- int i, pipe_idx;
+ int i, pipe_idx, active_dpp_count = 0;
double dcfclk = context->bw_ctx.dml.vba.DCFCLKState[vlevel][context->bw_ctx.dml.vba.maxMpcComb];
dc_assert_fp_enabled();
@@ -486,72 +504,6 @@ void dcn31_calculate_wm_and_dlg_fp(
pipes[0].clks_cfg.dcfclk_mhz = dcfclk;
pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel].socclk_mhz;
-#if 0 // TODO
- /* Set B:
- * TODO
- */
- if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].valid) {
- if (vlevel == 0) {
- pipes[0].clks_cfg.voltage = 1;
- pipes[0].clks_cfg.dcfclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dcfclk_mhz;
- }
- context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.pstate_latency_us;
- context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.sr_enter_plus_exit_time_us;
- context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.sr_exit_time_us;
- }
- context->bw_ctx.bw.dcn.watermarks.b.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_enter_plus_exit_z8_ns = get_wm_z8_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_exit_z8_ns = get_wm_z8_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.b.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.b.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.b.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.b.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-
- pipes[0].clks_cfg.voltage = vlevel;
- pipes[0].clks_cfg.dcfclk_mhz = dcfclk;
-
- /* Set C:
- * TODO
- */
- if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].valid) {
- context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.pstate_latency_us;
- context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.sr_enter_plus_exit_time_us;
- context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.sr_exit_time_us;
- }
- context->bw_ctx.bw.dcn.watermarks.c.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_enter_plus_exit_z8_ns = get_wm_z8_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_exit_z8_ns = get_wm_z8_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.c.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.c.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.c.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.c.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-
- /* Set D:
- * TODO
- */
- if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].valid) {
- context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.pstate_latency_us;
- context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.sr_enter_plus_exit_time_us;
- context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.sr_exit_time_us;
- }
- context->bw_ctx.bw.dcn.watermarks.d.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_enter_plus_exit_z8_ns = get_wm_z8_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_exit_z8_ns = get_wm_z8_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.d.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.d.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.d.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.d.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-#endif
-
/* Set A:
* All clocks min required
*
@@ -568,16 +520,17 @@ void dcn31_calculate_wm_and_dlg_fp(
context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.a.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- /* TODO: remove: */
context->bw_ctx.bw.dcn.watermarks.b = context->bw_ctx.bw.dcn.watermarks.a;
context->bw_ctx.bw.dcn.watermarks.c = context->bw_ctx.bw.dcn.watermarks.a;
context->bw_ctx.bw.dcn.watermarks.d = context->bw_ctx.bw.dcn.watermarks.a;
- /* end remove*/
for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
if (!context->res_ctx.pipe_ctx[i].stream)
continue;
+ if (context->res_ctx.pipe_ctx[i].plane_state)
+ active_dpp_count++;
+
pipes[pipe_idx].clks_cfg.dispclk_mhz = get_dispclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt);
pipes[pipe_idx].clks_cfg.dppclk_mhz = get_dppclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
@@ -594,6 +547,9 @@ void dcn31_calculate_wm_and_dlg_fp(
}
dcn20_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel);
+ /* For 31x apu pstate change is only supported if possible in vactive or if there are no active dpps */
+ context->bw_ctx.bw.dcn.clk.p_state_change_support =
+ context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb] == dm_dram_clock_change_vactive || !active_dpp_count;
}
void dcn31_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params)
@@ -739,7 +695,7 @@ void dcn315_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param
}
if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment))
- dml_init_instance(&dc->dml, &dcn3_15_soc, &dcn3_15_ip, DML_PROJECT_DCN31);
+ dml_init_instance(&dc->dml, &dcn3_15_soc, &dcn3_15_ip, DML_PROJECT_DCN315);
else
dml_init_instance(&dc->dml, &dcn3_15_soc, &dcn3_15_ip, DML_PROJECT_DCN31_FPGA);
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.h
index 4372f17b55d4..fd58b2561ec9 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.h
@@ -35,6 +35,7 @@ void dcn31_zero_pipe_dcc_fraction(display_e2e_pipe_params_st *pipes,
int pipe_cnt);
void dcn31_update_soc_for_wm_a(struct dc *dc, struct dc_state *context);
+void dcn315_update_soc_for_wm_a(struct dc *dc, struct dc_state *context);
void dcn31_calculate_wm_and_dlg_fp(
struct dc *dc, struct dc_state *context,
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
index 8dfe639b6508..b612edb14417 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
@@ -43,6 +43,8 @@
#define BPP_BLENDED_PIPE 0xffffffff
#define DCN31_MAX_DSC_IMAGE_WIDTH 5184
#define DCN31_MAX_FMT_420_BUFFER_WIDTH 4096
+#define DCN3_15_MIN_COMPBUF_SIZE_KB 128
+#define DCN3_15_MAX_DET_SIZE 384
// For DML-C changes that hasn't been propagated to VBA yet
//#define __DML_VBA_ALLOW_DELTA__
@@ -3775,6 +3777,17 @@ static noinline void CalculatePrefetchSchedulePerPlane(
&v->VReadyOffsetPix[k]);
}
+static void PatchDETBufferSizeInKByte(unsigned int NumberOfActivePlanes, int NoOfDPPThisState[], unsigned int config_return_buffer_size_in_kbytes, unsigned int *DETBufferSizeInKByte)
+{
+ int i, total_pipes = 0;
+ for (i = 0; i < NumberOfActivePlanes; i++)
+ total_pipes += NoOfDPPThisState[i];
+ *DETBufferSizeInKByte = ((config_return_buffer_size_in_kbytes - DCN3_15_MIN_COMPBUF_SIZE_KB) / 64 / total_pipes) * 64;
+ if (*DETBufferSizeInKByte > DCN3_15_MAX_DET_SIZE)
+ *DETBufferSizeInKByte = DCN3_15_MAX_DET_SIZE;
+}
+
+
void dml31_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_lib)
{
struct vba_vars_st *v = &mode_lib->vba;
@@ -4533,6 +4546,8 @@ void dml31_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
v->ODMCombineEnableThisState[k] = v->ODMCombineEnablePerState[i][k];
}
+ if (v->NumberOfActivePlanes > 1 && mode_lib->project == DML_PROJECT_DCN315)
+ PatchDETBufferSizeInKByte(v->NumberOfActivePlanes, v->NoOfDPPThisState, v->ip.config_return_buffer_size_in_kbytes, &v->DETBufferSizeInKByte[0]);
CalculateSwathAndDETConfiguration(
false,
v->NumberOfActivePlanes,
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
index fe0770038a90..819de0f11012 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
@@ -121,8 +121,8 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_2_soc = {
},
},
.num_states = 1,
- .sr_exit_time_us = 20.16,
- .sr_enter_plus_exit_time_us = 27.13,
+ .sr_exit_time_us = 42.97,
+ .sr_enter_plus_exit_time_us = 49.94,
.sr_exit_z8_time_us = 285.0,
.sr_enter_plus_exit_z8_time_us = 320,
.writeback_latency_us = 12.0,
@@ -243,7 +243,7 @@ void dcn32_build_wm_range_table_fpu(struct clk_mgr_internal *clk_mgr)
clk_mgr->base.bw_params->wm_table.nv_entries[WM_D].pmfw_breakdown.max_uclk = 0xFFFF;
}
-/**
+/*
* Finds dummy_latency_index when MCLK switching using firmware based
* vblank stretch is enabled. This function will iterate through the
* table of dummy pstate latencies until the lowest value that allows
@@ -290,15 +290,14 @@ int dcn32_find_dummy_latency_index_for_fw_based_mclk_switch(struct dc *dc,
/**
* dcn32_helper_populate_phantom_dlg_params - Get DLG params for phantom pipes
* and populate pipe_ctx with those params.
- *
- * This function must be called AFTER the phantom pipes are added to context
- * and run through DML (so that the DLG params for the phantom pipes can be
- * populated), and BEFORE we program the timing for the phantom pipes.
- *
* @dc: [in] current dc state
* @context: [in] new dc state
* @pipes: [in] DML pipe params array
* @pipe_cnt: [in] DML pipe count
+ *
+ * This function must be called AFTER the phantom pipes are added to context
+ * and run through DML (so that the DLG params for the phantom pipes can be
+ * populated), and BEFORE we program the timing for the phantom pipes.
*/
void dcn32_helper_populate_phantom_dlg_params(struct dc *dc,
struct dc_state *context,
@@ -331,8 +330,9 @@ void dcn32_helper_populate_phantom_dlg_params(struct dc *dc,
}
/**
- * *******************************************************************************************
- * dcn32_predict_pipe_split: Predict if pipe split will occur for a given DML pipe
+ * dcn32_predict_pipe_split - Predict if pipe split will occur for a given DML pipe
+ * @context: [in] New DC state to be programmed
+ * @pipe_e2e: [in] DML pipe end to end context
*
* This function takes in a DML pipe (pipe_e2e) and predicts if pipe split is required (both
* ODM and MPC). For pipe split, ODM combine is determined by the ODM mode, and MPC combine is
@@ -343,12 +343,7 @@ void dcn32_helper_populate_phantom_dlg_params(struct dc *dc,
* - MPC combine is only chosen if there is no ODM combine requirements / policy in place, and
* MPC is required
*
- * @param [in]: context: New DC state to be programmed
- * @param [in]: pipe_e2e: DML pipe end to end context
- *
- * @return: Number of splits expected (1 for 2:1 split, 3 for 4:1 split, 0 for no splits).
- *
- * *******************************************************************************************
+ * Return: Number of splits expected (1 for 2:1 split, 3 for 4:1 split, 0 for no splits).
*/
uint8_t dcn32_predict_pipe_split(struct dc_state *context,
display_e2e_pipe_params_st *pipe_e2e)
@@ -504,7 +499,14 @@ void insert_entry_into_table_sorted(struct _vcs_dpi_voltage_scaling_st *table,
}
/**
- * dcn32_set_phantom_stream_timing: Set timing params for the phantom stream
+ * dcn32_set_phantom_stream_timing - Set timing params for the phantom stream
+ * @dc: current dc state
+ * @context: new dc state
+ * @ref_pipe: Main pipe for the phantom stream
+ * @phantom_stream: target phantom stream state
+ * @pipes: DML pipe params
+ * @pipe_cnt: number of DML pipes
+ * @dc_pipe_idx: DC pipe index for the main pipe (i.e. ref_pipe)
*
* Set timing params of the phantom stream based on calculated output from DML.
* This function first gets the DML pipe index using the DC pipe index, then
@@ -517,13 +519,6 @@ void insert_entry_into_table_sorted(struct _vcs_dpi_voltage_scaling_st *table,
* that separately.
*
* - Set phantom backporch = vstartup of main pipe
- *
- * @dc: current dc state
- * @context: new dc state
- * @ref_pipe: Main pipe for the phantom stream
- * @pipes: DML pipe params
- * @pipe_cnt: number of DML pipes
- * @dc_pipe_idx: DC pipe index for the main pipe (i.e. ref_pipe)
*/
void dcn32_set_phantom_stream_timing(struct dc *dc,
struct dc_state *context,
@@ -592,16 +587,14 @@ void dcn32_set_phantom_stream_timing(struct dc *dc,
}
/**
- * dcn32_get_num_free_pipes: Calculate number of free pipes
+ * dcn32_get_num_free_pipes - Calculate number of free pipes
+ * @dc: current dc state
+ * @context: new dc state
*
* This function assumes that a "used" pipe is a pipe that has
* both a stream and a plane assigned to it.
*
- * @dc: current dc state
- * @context: new dc state
- *
- * Return:
- * Number of free pipes available in the context
+ * Return: Number of free pipes available in the context
*/
static unsigned int dcn32_get_num_free_pipes(struct dc *dc, struct dc_state *context)
{
@@ -625,7 +618,10 @@ static unsigned int dcn32_get_num_free_pipes(struct dc *dc, struct dc_state *con
}
/**
- * dcn32_assign_subvp_pipe: Function to decide which pipe will use Sub-VP.
+ * dcn32_assign_subvp_pipe - Function to decide which pipe will use Sub-VP.
+ * @dc: current dc state
+ * @context: new dc state
+ * @index: [out] dc pipe index for the pipe chosen to have phantom pipes assigned
*
* We enter this function if we are Sub-VP capable (i.e. enough pipes available)
* and regular P-State switching (i.e. VACTIVE/VBLANK) is not supported, or if
@@ -639,12 +635,7 @@ static unsigned int dcn32_get_num_free_pipes(struct dc *dc, struct dc_state *con
* for determining which should be the SubVP pipe (need a way to determine if a pipe / plane doesn't
* support MCLK switching naturally [i.e. ACTIVE or VBLANK]).
*
- * @param dc: current dc state
- * @param context: new dc state
- * @param index: [out] dc pipe index for the pipe chosen to have phantom pipes assigned
- *
- * Return:
- * True if a valid pipe assignment was found for Sub-VP. Otherwise false.
+ * Return: True if a valid pipe assignment was found for Sub-VP. Otherwise false.
*/
static bool dcn32_assign_subvp_pipe(struct dc *dc,
struct dc_state *context,
@@ -674,9 +665,10 @@ static bool dcn32_assign_subvp_pipe(struct dc *dc,
* - Not able to switch in vactive naturally (switching in active means the
* DET provides enough buffer to hide the P-State switch latency -- trying
* to combine this with SubVP can cause issues with the scheduling).
+ * - Not TMZ surface
*/
if (pipe->plane_state && !pipe->top_pipe &&
- pipe->stream->mall_stream_config.type == SUBVP_NONE && refresh_rate < 120 &&
+ pipe->stream->mall_stream_config.type == SUBVP_NONE && refresh_rate < 120 && !pipe->plane_state->address.tmz_surface &&
vba->ActiveDRAMClockChangeLatencyMarginPerState[vba->VoltageLevel][vba->maxMpcComb][vba->pipe_plane[pipe_idx]] <= 0) {
while (pipe) {
num_pipes++;
@@ -710,7 +702,9 @@ static bool dcn32_assign_subvp_pipe(struct dc *dc,
}
/**
- * dcn32_enough_pipes_for_subvp: Function to check if there are "enough" pipes for SubVP.
+ * dcn32_enough_pipes_for_subvp - Function to check if there are "enough" pipes for SubVP.
+ * @dc: current dc state
+ * @context: new dc state
*
* This function returns true if there are enough free pipes
* to create the required phantom pipes for any given stream
@@ -722,9 +716,6 @@ static bool dcn32_assign_subvp_pipe(struct dc *dc,
* pipe which can be used as the phantom pipe for the non pipe
* split pipe.
*
- * @dc: current dc state
- * @context: new dc state
- *
* Return:
* True if there are enough free pipes to assign phantom pipes to at least one
* stream that does not already have phantom pipes assigned. Otherwise false.
@@ -763,7 +754,9 @@ static bool dcn32_enough_pipes_for_subvp(struct dc *dc, struct dc_state *context
}
/**
- * subvp_subvp_schedulable: Determine if SubVP + SubVP config is schedulable
+ * subvp_subvp_schedulable - Determine if SubVP + SubVP config is schedulable
+ * @dc: current dc state
+ * @context: new dc state
*
* High level algorithm:
* 1. Find longest microschedule length (in us) between the two SubVP pipes
@@ -771,11 +764,7 @@ static bool dcn32_enough_pipes_for_subvp(struct dc *dc, struct dc_state *context
* pipes still allows for the maximum microschedule to fit in the active
* region for both pipes.
*
- * @dc: current dc state
- * @context: new dc state
- *
- * Return:
- * bool - True if the SubVP + SubVP config is schedulable, false otherwise
+ * Return: True if the SubVP + SubVP config is schedulable, false otherwise
*/
static bool subvp_subvp_schedulable(struct dc *dc, struct dc_state *context)
{
@@ -835,7 +824,10 @@ static bool subvp_subvp_schedulable(struct dc *dc, struct dc_state *context)
}
/**
- * subvp_drr_schedulable: Determine if SubVP + DRR config is schedulable
+ * subvp_drr_schedulable - Determine if SubVP + DRR config is schedulable
+ * @dc: current dc state
+ * @context: new dc state
+ * @drr_pipe: DRR pipe_ctx for the SubVP + DRR config
*
* High level algorithm:
* 1. Get timing for SubVP pipe, phantom pipe, and DRR pipe
@@ -844,12 +836,7 @@ static bool subvp_subvp_schedulable(struct dc *dc, struct dc_state *context)
* 3.If (SubVP Active - Prefetch > Stretched DRR frame + max(MALL region, Stretched DRR frame))
* then report the configuration as supported
*
- * @dc: current dc state
- * @context: new dc state
- * @drr_pipe: DRR pipe_ctx for the SubVP + DRR config
- *
- * Return:
- * bool - True if the SubVP + DRR config is schedulable, false otherwise
+ * Return: True if the SubVP + DRR config is schedulable, false otherwise
*/
static bool subvp_drr_schedulable(struct dc *dc, struct dc_state *context, struct pipe_ctx *drr_pipe)
{
@@ -913,7 +900,9 @@ static bool subvp_drr_schedulable(struct dc *dc, struct dc_state *context, struc
/**
- * subvp_vblank_schedulable: Determine if SubVP + VBLANK config is schedulable
+ * subvp_vblank_schedulable - Determine if SubVP + VBLANK config is schedulable
+ * @dc: current dc state
+ * @context: new dc state
*
* High level algorithm:
* 1. Get timing for SubVP pipe, phantom pipe, and VBLANK pipe
@@ -921,11 +910,7 @@ static bool subvp_drr_schedulable(struct dc *dc, struct dc_state *context, struc
* then report the configuration as supported
* 3. If the VBLANK display is DRR, then take the DRR static schedulability path
*
- * @dc: current dc state
- * @context: new dc state
- *
- * Return:
- * bool - True if the SubVP + VBLANK/DRR config is schedulable, false otherwise
+ * Return: True if the SubVP + VBLANK/DRR config is schedulable, false otherwise
*/
static bool subvp_vblank_schedulable(struct dc *dc, struct dc_state *context)
{
@@ -1002,20 +987,18 @@ static bool subvp_vblank_schedulable(struct dc *dc, struct dc_state *context)
}
/**
- * subvp_validate_static_schedulability: Check which SubVP case is calculated and handle
- * static analysis based on the case.
+ * subvp_validate_static_schedulability - Check which SubVP case is calculated
+ * and handle static analysis based on the case.
+ * @dc: current dc state
+ * @context: new dc state
+ * @vlevel: Voltage level calculated by DML
*
* Three cases:
* 1. SubVP + SubVP
* 2. SubVP + VBLANK (DRR checked internally)
* 3. SubVP + VACTIVE (currently unsupported)
*
- * @dc: current dc state
- * @context: new dc state
- * @vlevel: Voltage level calculated by DML
- *
- * Return:
- * bool - True if statically schedulable, false otherwise
+ * Return: True if statically schedulable, false otherwise
*/
static bool subvp_validate_static_schedulability(struct dc *dc,
struct dc_state *context,
@@ -1114,12 +1097,15 @@ static void dcn32_full_validate_bw_helper(struct dc *dc,
* 5. (Config doesn't support MCLK in VACTIVE/VBLANK || dc->debug.force_subvp_mclk_switch)
*/
if (!dc->debug.force_disable_subvp && dcn32_all_pipes_have_stream_and_plane(dc, context) &&
- !dcn32_mpo_in_use(context) && (*vlevel == context->bw_ctx.dml.soc.num_states ||
+ !dcn32_mpo_in_use(context) && !dcn32_any_surfaces_rotated(dc, context) &&
+ (*vlevel == context->bw_ctx.dml.soc.num_states ||
vba->DRAMClockChangeSupport[*vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported ||
dc->debug.force_subvp_mclk_switch)) {
dcn32_merge_pipes_for_subvp(dc, context);
- // to re-initialize viewport after the pipe merge
+ memset(merge, 0, MAX_PIPES * sizeof(bool));
+
+ /* to re-initialize viewport after the pipe merge */
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
@@ -1588,6 +1574,33 @@ bool dcn32_internal_validate_bw(struct dc *dc,
if (pipe->next_odm_pipe)
pipe->next_odm_pipe->prev_odm_pipe = pipe->prev_odm_pipe;
+ /*2:1ODM+MPC Split MPO to Single Pipe + MPC Split MPO*/
+ if (pipe->bottom_pipe) {
+ if (pipe->bottom_pipe->prev_odm_pipe || pipe->bottom_pipe->next_odm_pipe) {
+ /*MPC split rules will handle this case*/
+ pipe->bottom_pipe->top_pipe = NULL;
+ } else {
+ /* when merging an ODM pipes, the bottom MPC pipe must now point to
+ * the previous ODM pipe and its associated stream assets
+ */
+ if (pipe->prev_odm_pipe->bottom_pipe) {
+ /* 3 plane MPO*/
+ pipe->bottom_pipe->top_pipe = pipe->prev_odm_pipe->bottom_pipe;
+ pipe->prev_odm_pipe->bottom_pipe->bottom_pipe = pipe->bottom_pipe;
+ } else {
+ /* 2 plane MPO*/
+ pipe->bottom_pipe->top_pipe = pipe->prev_odm_pipe;
+ pipe->prev_odm_pipe->bottom_pipe = pipe->bottom_pipe;
+ }
+
+ memcpy(&pipe->bottom_pipe->stream_res, &pipe->bottom_pipe->top_pipe->stream_res, sizeof(struct stream_resource));
+ }
+ }
+
+ if (pipe->top_pipe) {
+ pipe->top_pipe->bottom_pipe = NULL;
+ }
+
pipe->bottom_pipe = NULL;
pipe->next_odm_pipe = NULL;
pipe->plane_state = NULL;
@@ -1756,6 +1769,7 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context,
int i, pipe_idx, vlevel_temp = 0;
double dcfclk = dcn3_2_soc.clock_limits[0].dcfclk_mhz;
double dcfclk_from_validation = context->bw_ctx.dml.vba.DCFCLKState[vlevel][context->bw_ctx.dml.vba.maxMpcComb];
+ double dcfclk_from_fw_based_mclk_switching = dcfclk_from_validation;
bool pstate_en = context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb] !=
dm_dram_clock_change_unsupported;
unsigned int dummy_latency_index = 0;
@@ -1791,7 +1805,7 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context,
dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us;
dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, false);
maxMpcComb = context->bw_ctx.dml.vba.maxMpcComb;
- dcfclk = context->bw_ctx.dml.vba.DCFCLKState[vlevel][context->bw_ctx.dml.vba.maxMpcComb];
+ dcfclk_from_fw_based_mclk_switching = context->bw_ctx.dml.vba.DCFCLKState[vlevel][context->bw_ctx.dml.vba.maxMpcComb];
pstate_en = context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][maxMpcComb] !=
dm_dram_clock_change_unsupported;
}
@@ -1877,6 +1891,10 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context,
pipes[0].clks_cfg.dcfclk_mhz = dcfclk_from_validation;
pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel].socclk_mhz;
+ if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching) {
+ pipes[0].clks_cfg.dcfclk_mhz = dcfclk_from_fw_based_mclk_switching;
+ }
+
if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].valid) {
min_dram_speed_mts = context->bw_ctx.dml.vba.DRAMSpeed;
min_dram_speed_mts_margin = 160;
@@ -2019,6 +2037,45 @@ static void remove_entry_from_table_at_index(struct _vcs_dpi_voltage_scaling_st
memset(&table[--(*num_entries)], 0, sizeof(struct _vcs_dpi_voltage_scaling_st));
}
+void dcn32_patch_dpm_table(struct clk_bw_params *bw_params)
+{
+ int i;
+ unsigned int max_dcfclk_mhz = 0, max_dispclk_mhz = 0, max_dppclk_mhz = 0,
+ max_phyclk_mhz = 0, max_dtbclk_mhz = 0, max_fclk_mhz = 0, max_uclk_mhz = 0;
+
+ for (i = 0; i < MAX_NUM_DPM_LVL; i++) {
+ if (bw_params->clk_table.entries[i].dcfclk_mhz > max_dcfclk_mhz)
+ max_dcfclk_mhz = bw_params->clk_table.entries[i].dcfclk_mhz;
+ if (bw_params->clk_table.entries[i].fclk_mhz > max_fclk_mhz)
+ max_fclk_mhz = bw_params->clk_table.entries[i].fclk_mhz;
+ if (bw_params->clk_table.entries[i].memclk_mhz > max_uclk_mhz)
+ max_uclk_mhz = bw_params->clk_table.entries[i].memclk_mhz;
+ if (bw_params->clk_table.entries[i].dispclk_mhz > max_dispclk_mhz)
+ max_dispclk_mhz = bw_params->clk_table.entries[i].dispclk_mhz;
+ if (bw_params->clk_table.entries[i].dppclk_mhz > max_dppclk_mhz)
+ max_dppclk_mhz = bw_params->clk_table.entries[i].dppclk_mhz;
+ if (bw_params->clk_table.entries[i].phyclk_mhz > max_phyclk_mhz)
+ max_phyclk_mhz = bw_params->clk_table.entries[i].phyclk_mhz;
+ if (bw_params->clk_table.entries[i].dtbclk_mhz > max_dtbclk_mhz)
+ max_dtbclk_mhz = bw_params->clk_table.entries[i].dtbclk_mhz;
+ }
+
+ /* Scan through clock values we currently have and if they are 0,
+ * then populate it with dcn3_2_soc.clock_limits[] value.
+ *
+ * Do it for DCFCLK, DISPCLK, DTBCLK and UCLK as any of those being
+ * 0, will cause it to skip building the clock table.
+ */
+ if (max_dcfclk_mhz == 0)
+ bw_params->clk_table.entries[0].dcfclk_mhz = dcn3_2_soc.clock_limits[0].dcfclk_mhz;
+ if (max_dispclk_mhz == 0)
+ bw_params->clk_table.entries[0].dispclk_mhz = dcn3_2_soc.clock_limits[0].dispclk_mhz;
+ if (max_dtbclk_mhz == 0)
+ bw_params->clk_table.entries[0].dtbclk_mhz = dcn3_2_soc.clock_limits[0].dtbclk_mhz;
+ if (max_uclk_mhz == 0)
+ bw_params->clk_table.entries[0].memclk_mhz = dcn3_2_soc.clock_limits[0].dram_speed_mts / 16;
+}
+
static int build_synthetic_soc_states(struct clk_bw_params *bw_params,
struct _vcs_dpi_voltage_scaling_st *table, unsigned int *num_entries)
{
@@ -2211,7 +2268,7 @@ static int build_synthetic_soc_states(struct clk_bw_params *bw_params,
return 0;
}
-/**
+/*
* dcn32_update_bw_bounding_box
*
* This would override some dcn3_2 ip_or_soc initial parameters hardcoded from
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.h
index 732ed7fadb8a..3a3dc2ce4c73 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.h
@@ -71,4 +71,6 @@ int dcn32_find_dummy_latency_index_for_fw_based_mclk_switch(struct dc *dc,
int pipe_cnt,
int vlevel);
+void dcn32_patch_dpm_table(struct clk_bw_params *bw_params);
+
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c
index 75be1e1ce543..5b91660a6496 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c
@@ -733,6 +733,8 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
mode_lib->vba.FCLKChangeLatency, v->UrgentLatency,
mode_lib->vba.SREnterPlusExitTime);
+ memset(&v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.myPipe, 0, sizeof(DmlPipe));
+
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.myPipe.Dppclk = mode_lib->vba.DPPCLK[k];
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.myPipe.Dispclk = mode_lib->vba.DISPCLK;
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.myPipe.PixelClock = mode_lib->vba.PixelClock[k];
@@ -2252,9 +2254,8 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
for (k = 0; k <= mode_lib->vba.NumberOfActiveSurfaces - 1; k++) {
if (!(mode_lib->vba.DSCInputBitPerComponent[k] == 12.0
|| mode_lib->vba.DSCInputBitPerComponent[k] == 10.0
- || mode_lib->vba.DSCInputBitPerComponent[k] == 8.0
- || mode_lib->vba.DSCInputBitPerComponent[k] >
- mode_lib->vba.MaximumDSCBitsPerComponent)) {
+ || mode_lib->vba.DSCInputBitPerComponent[k] == 8.0)
+ || mode_lib->vba.DSCInputBitPerComponent[k] > mode_lib->vba.MaximumDSCBitsPerComponent) {
mode_lib->vba.NonsupportedDSCInputBPC = true;
}
}
@@ -2330,16 +2331,15 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
if (mode_lib->vba.OutputMultistreamId[k] == k && mode_lib->vba.ForcedOutputLinkBPP[k] == 0)
mode_lib->vba.BPPForMultistreamNotIndicated = true;
for (j = 0; j < mode_lib->vba.NumberOfActiveSurfaces; ++j) {
- if (mode_lib->vba.OutputMultistreamId[k] == j && mode_lib->vba.OutputMultistreamEn[k]
+ if (mode_lib->vba.OutputMultistreamId[k] == j
&& mode_lib->vba.ForcedOutputLinkBPP[k] == 0)
mode_lib->vba.BPPForMultistreamNotIndicated = true;
}
}
if ((mode_lib->vba.Output[k] == dm_edp || mode_lib->vba.Output[k] == dm_hdmi)) {
- if (mode_lib->vba.OutputMultistreamId[k] == k && mode_lib->vba.OutputMultistreamEn[k])
+ if (mode_lib->vba.OutputMultistreamEn[k] == true && mode_lib->vba.OutputMultistreamId[k] == k)
mode_lib->vba.MultistreamWithHDMIOreDP = true;
-
for (j = 0; j < mode_lib->vba.NumberOfActiveSurfaces; ++j) {
if (mode_lib->vba.OutputMultistreamEn[k] == true && mode_lib->vba.OutputMultistreamId[k] == j)
mode_lib->vba.MultistreamWithHDMIOreDP = true;
@@ -2478,8 +2478,6 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
mode_lib->vba.PixelClock[k], mode_lib->vba.PixelClockBackEnd[k]);
}
- m = 0;
-
for (k = 0; k <= mode_lib->vba.NumberOfActiveSurfaces - 1; k++) {
for (m = 0; m <= mode_lib->vba.NumberOfActiveSurfaces - 1; m++) {
for (j = 0; j <= mode_lib->vba.NumberOfActiveSurfaces - 1; j++) {
@@ -2856,8 +2854,6 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
}
}
- m = 0;
-
//Calculate Return BW
for (i = 0; i < (int) v->soc.num_states; ++i) {
for (j = 0; j <= 1; ++j) {
@@ -3618,11 +3614,10 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
mode_lib->vba.ModeIsSupported = mode_lib->vba.ModeSupport[i][0] == true
|| mode_lib->vba.ModeSupport[i][1] == true;
- if (mode_lib->vba.ModeSupport[i][0] == true) {
+ if (mode_lib->vba.ModeSupport[i][0] == true)
MaximumMPCCombine = 0;
- } else {
+ else
MaximumMPCCombine = 1;
- }
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c
index f5400eda07a5..4125d3d111d1 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c
@@ -114,6 +114,7 @@ void dml_init_instance(struct display_mode_lib *lib,
break;
case DML_PROJECT_DCN31:
case DML_PROJECT_DCN31_FPGA:
+ case DML_PROJECT_DCN315:
lib->funcs = dml31_funcs;
break;
case DML_PROJECT_DCN314:
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h
index b1878a1440e2..3d643d50c3eb 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h
@@ -40,6 +40,7 @@ enum dml_project {
DML_PROJECT_DCN21,
DML_PROJECT_DCN30,
DML_PROJECT_DCN31,
+ DML_PROJECT_DCN315,
DML_PROJECT_DCN31_FPGA,
DML_PROJECT_DCN314,
DML_PROJECT_DCN32,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
index 8919a2092ac5..9498105c98ab 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
@@ -39,6 +39,8 @@
#include "panel_cntl.h"
#define MAX_CLOCK_SOURCES 7
+#define MAX_SVP_PHANTOM_STREAMS 2
+#define MAX_SVP_PHANTOM_PLANES 2
void enable_surface_flip_reporting(struct dc_plane_state *plane_state,
uint32_t controller_id);
@@ -232,6 +234,7 @@ struct resource_funcs {
unsigned int index);
bool (*remove_phantom_pipes)(struct dc *dc, struct dc_state *context);
+ void (*get_panel_config_defaults)(struct dc_panel_config *panel_config);
};
struct audio_support{
@@ -438,7 +441,6 @@ struct pipe_ctx {
union pipe_update_flags update_flags;
struct dwbc *dwbc;
struct mcif_wb *mcif_wb;
- bool vtp_locked;
};
/* Data used for dynamic link encoder assignment.
@@ -492,6 +494,8 @@ struct dcn_bw_output {
struct dcn_watermark_set watermarks;
struct dcn_bw_writeback bw_writeback;
int compbuf_size_kb;
+ unsigned int legacy_svp_drr_stream_index;
+ bool legacy_svp_drr_stream_index_valid;
};
union bw_output {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
index 6682d9e181c6..b304d450b038 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
@@ -194,6 +194,11 @@ enum dc_status dpcd_configure_lttpr_mode(
enum dp_link_encoding dp_get_link_encoding_format(const struct dc_link_settings *link_settings);
bool dp_retrieve_lttpr_cap(struct dc_link *link);
+bool dp_is_lttpr_present(struct dc_link *link);
+enum lttpr_mode dp_decide_lttpr_mode(struct dc_link *link, struct dc_link_settings *link_setting);
+void dp_get_lttpr_mode_override(struct dc_link *link, enum lttpr_mode *override);
+enum lttpr_mode dp_decide_8b_10b_lttpr_mode(struct dc_link *link);
+enum lttpr_mode dp_decide_128b_132b_lttpr_mode(struct dc_link *link);
bool dpcd_write_128b_132b_sst_payload_allocation_table(
const struct dc_stream_state *stream,
struct dc_link *link,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h b/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h
index 806f3041db14..9e4ddc985240 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h
@@ -628,8 +628,23 @@ unsigned int dcn_find_dcfclk_suits_all(
const struct dc *dc,
struct dc_clocks *clocks);
-void dcn_bw_update_from_pplib(struct dc *dc);
-void dcn_bw_notify_pplib_of_wm_ranges(struct dc *dc);
+void dcn_get_soc_clks(
+ struct dc *dc,
+ int *min_fclk_khz,
+ int *min_dcfclk_khz,
+ int *socclk_khz);
+
+void dcn_bw_update_from_pplib_fclks(
+ struct dc *dc,
+ struct dm_pp_clock_levels_with_voltage *fclks);
+void dcn_bw_update_from_pplib_dcfclks(
+ struct dc *dc,
+ struct dm_pp_clock_levels_with_voltage *dcfclks);
+void dcn_bw_notify_pplib_of_wm_ranges(
+ struct dc *dc,
+ int min_fclk_khz,
+ int min_dcfclk_khz,
+ int socclk_khz);
void dcn_bw_sync_calcs_and_dml(struct dc *dc);
enum source_macro_tile_size swizzle_mode_to_macro_tile_size(enum swizzle_mode_values sw_mode);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
index d9f1b0a4fbd4..591ab1389e3b 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
@@ -95,10 +95,23 @@ struct clk_limit_table_entry {
unsigned int wck_ratio;
};
+struct clk_limit_num_entries {
+ unsigned int num_dcfclk_levels;
+ unsigned int num_fclk_levels;
+ unsigned int num_memclk_levels;
+ unsigned int num_socclk_levels;
+ unsigned int num_dtbclk_levels;
+ unsigned int num_dispclk_levels;
+ unsigned int num_dppclk_levels;
+ unsigned int num_phyclk_levels;
+ unsigned int num_phyclk_d18_levels;
+};
+
/* This table is contiguous */
struct clk_limit_table {
struct clk_limit_table_entry entries[MAX_NUM_DPM_LVL];
- unsigned int num_entries;
+ struct clk_limit_num_entries num_entries_per_clk;
+ unsigned int num_entries; /* highest populated dpm level for back compatibility */
};
struct wm_range_table_entry {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
index 68c2ed434d2c..cff5fd55a0ad 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
@@ -340,6 +340,8 @@ struct clk_mgr_internal {
bool smu_present;
void *wm_range_table;
long long wm_range_table_addr;
+
+ bool dpm_present;
};
struct clk_mgr_internal_funcs {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/cursor_reg_cache.h b/drivers/gpu/drm/amd/display/dc/inc/hw/cursor_reg_cache.h
new file mode 100644
index 000000000000..45645f9fd86c
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/cursor_reg_cache.h
@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright © 2022 Advanced Micro Devices, Inc. All rights reserved. */
+
+#ifndef __DAL_CURSOR_CACHE_H__
+#define __DAL_CURSOR_CACHE_H__
+
+union reg_cursor_control_cfg {
+ struct {
+ uint32_t cur_enable: 1;
+ uint32_t reser0: 3;
+ uint32_t cur_2x_magnify: 1;
+ uint32_t reser1: 3;
+ uint32_t mode: 3;
+ uint32_t reser2: 5;
+ uint32_t pitch: 2;
+ uint32_t reser3: 6;
+ uint32_t line_per_chunk: 5;
+ uint32_t reser4: 3;
+ } bits;
+ uint32_t raw;
+};
+struct cursor_position_cache_hubp {
+ union reg_cursor_control_cfg cur_ctl;
+ union reg_position_cfg {
+ struct {
+ uint32_t x_pos: 16;
+ uint32_t y_pos: 16;
+ } bits;
+ uint32_t raw;
+ } position;
+ union reg_hot_spot_cfg {
+ struct {
+ uint32_t x_hot: 16;
+ uint32_t y_hot: 16;
+ } bits;
+ uint32_t raw;
+ } hot_spot;
+ union reg_dst_offset_cfg {
+ struct {
+ uint32_t dst_x_offset: 13;
+ uint32_t reserved: 19;
+ } bits;
+ uint32_t raw;
+ } dst_offset;
+};
+
+struct cursor_attribute_cache_hubp {
+ uint32_t SURFACE_ADDR_HIGH;
+ uint32_t SURFACE_ADDR;
+ union reg_cursor_control_cfg cur_ctl;
+ union reg_cursor_size_cfg {
+ struct {
+ uint32_t width: 16;
+ uint32_t height: 16;
+ } bits;
+ uint32_t raw;
+ } size;
+ union reg_cursor_settings_cfg {
+ struct {
+ uint32_t dst_y_offset: 8;
+ uint32_t chunk_hdl_adjust: 2;
+ uint32_t reserved: 22;
+ } bits;
+ uint32_t raw;
+ } settings;
+};
+
+struct cursor_rect {
+ uint32_t x;
+ uint32_t y;
+ uint32_t w;
+ uint32_t h;
+};
+
+union reg_cur0_control_cfg {
+ struct {
+ uint32_t cur0_enable: 1;
+ uint32_t expansion_mode: 1;
+ uint32_t reser0: 1;
+ uint32_t cur0_rom_en: 1;
+ uint32_t mode: 3;
+ uint32_t reserved: 25;
+ } bits;
+ uint32_t raw;
+};
+struct cursor_position_cache_dpp {
+ union reg_cur0_control_cfg cur0_ctl;
+};
+
+struct cursor_attribute_cache_dpp {
+ union reg_cur0_control_cfg cur0_ctl;
+};
+
+struct cursor_attributes_cfg {
+ struct cursor_attribute_cache_hubp aHubp;
+ struct cursor_attribute_cache_dpp aDpp;
+};
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
index 3ef7faa92052..dcb80c4747b0 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
@@ -28,6 +28,7 @@
#define __DAL_DPP_H__
#include "transform.h"
+#include "cursor_reg_cache.h"
union defer_reg_writes {
struct {
@@ -58,6 +59,9 @@ struct dpp {
struct pwl_params shaper_params;
bool cm_bypass_mode;
+
+ struct cursor_position_cache_dpp pos;
+ struct cursor_attribute_cache_dpp att;
};
struct dpp_input_csc_matrix {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
index 44c4578193a3..d5ea7545583e 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
@@ -27,6 +27,7 @@
#define __DAL_HUBP_H__
#include "mem_input.h"
+#include "cursor_reg_cache.h"
#define OPP_ID_INVALID 0xf
#define MAX_TTU 0xffffff
@@ -65,6 +66,10 @@ struct hubp {
struct dc_cursor_attributes curs_attr;
struct dc_cursor_position curs_pos;
bool power_gated;
+
+ struct cursor_position_cache_hubp pos;
+ struct cursor_attribute_cache_hubp att;
+ struct cursor_rect cur_rect;
};
struct surface_flip_registers {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
index 72eef7a5ed83..25a1df45b264 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
@@ -209,7 +209,6 @@ struct timing_generator_funcs {
void (*set_blank)(struct timing_generator *tg,
bool enable_blanking);
bool (*is_blanked)(struct timing_generator *tg);
- bool (*is_locked)(struct timing_generator *tg);
void (*set_overscan_blank_color) (struct timing_generator *tg, const struct tg_color *color);
void (*set_blank_color)(struct timing_generator *tg, const struct tg_color *color);
void (*set_colors)(struct timing_generator *tg,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
index 416b700a8d5a..d04b68dad413 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
@@ -32,11 +32,6 @@
#include "inc/hw/link_encoder.h"
#include "core_status.h"
-enum vline_select {
- VLINE0,
- VLINE1
-};
-
struct pipe_ctx;
struct dc_state;
struct dc_stream_status;
@@ -89,6 +84,7 @@ struct hw_sequencer_funcs {
struct pipe_ctx *pipe_ctx, bool enableTripleBuffer);
void (*update_pending_status)(struct pipe_ctx *pipe_ctx);
void (*power_down)(struct dc *dc);
+ void (*update_dsc_pg)(struct dc *dc, struct dc_state *context, bool safe_to_disable);
/* Pipe Lock Related */
void (*pipe_control_lock)(struct dc *dc,
@@ -117,8 +113,7 @@ struct hw_sequencer_funcs {
int group_index, int group_size,
struct pipe_ctx *grouped_pipes[]);
void (*setup_periodic_interrupt)(struct dc *dc,
- struct pipe_ctx *pipe_ctx,
- enum vline_select vline);
+ struct pipe_ctx *pipe_ctx);
void (*set_drr)(struct pipe_ctx **pipe_ctx, int num_pipes,
struct dc_crtc_timing_adjust adjust);
void (*set_static_screen_control)(struct pipe_ctx **pipe_ctx,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h
index 1cdea0efe5c1..a4d61bb724b6 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h
@@ -124,6 +124,8 @@ struct hwseq_private_funcs {
void (*dsc_pg_control)(struct dce_hwseq *hws,
unsigned int dsc_inst,
bool power_on);
+ bool (*dsc_pg_status)(struct dce_hwseq *hws,
+ unsigned int dsc_inst);
void (*update_odm)(struct dc *dc, struct dc_state *context,
struct pipe_ctx *pipe_ctx);
void (*program_all_writeback_pipes_in_tree)(struct dc *dc,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/resource.h b/drivers/gpu/drm/amd/display/dc/inc/resource.h
index c37d1141febe..5040836f404d 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/resource.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/resource.h
@@ -230,4 +230,10 @@ const struct link_hwss *get_link_hwss(const struct dc_link *link,
bool is_h_timing_divisible_by_2(struct dc_stream_state *stream);
+bool dc_resource_acquire_secondary_pipe_for_mpc_odm(
+ const struct dc *dc,
+ struct dc_state *state,
+ struct pipe_ctx *pri_pipe,
+ struct pipe_ctx *sec_pipe,
+ bool odm);
#endif /* DRIVERS_GPU_DRM_AMD_DC_DEV_DC_INC_RESOURCE_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_dp.c b/drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_dp.c
index 7d3147175ca2..153a88381f2c 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_dp.c
@@ -111,7 +111,7 @@ static void setup_hpo_dp_stream_encoder(struct pipe_ctx *pipe_ctx)
enum phyd32clk_clock_source phyd32clk = get_phyd32clk_src(pipe_ctx->stream->link);
dto_params.otg_inst = tg->inst;
- dto_params.pixclk_khz = pipe_ctx->stream->phy_pix_clk;
+ dto_params.pixclk_khz = pipe_ctx->stream->timing.pix_clk_100hz / 10;
dto_params.num_odm_segments = get_odm_segment_count(pipe_ctx);
dto_params.timing = &pipe_ctx->stream->timing;
dto_params.ref_dtbclk_khz = dc->clk_mgr->funcs->get_dtb_ref_clk_frequency(dc->clk_mgr);
diff --git a/drivers/gpu/drm/amd/display/dc/virtual/virtual_link_hwss.c b/drivers/gpu/drm/amd/display/dc/virtual/virtual_link_hwss.c
index 9522fe0b36c9..4f7f99156897 100644
--- a/drivers/gpu/drm/amd/display/dc/virtual/virtual_link_hwss.c
+++ b/drivers/gpu/drm/amd/display/dc/virtual/virtual_link_hwss.c
@@ -37,7 +37,7 @@ void virtual_reset_stream_encoder(struct pipe_ctx *pipe_ctx)
{
}
-void virtual_disable_link_output(struct dc_link *link,
+static void virtual_disable_link_output(struct dc_link *link,
const struct link_resource *link_res,
enum signal_type signal)
{
diff --git a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
index f34c45b19fcb..eb5b7eb292ef 100644
--- a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
+++ b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
@@ -248,6 +248,7 @@ struct dmub_srv_hw_params {
bool disable_dpia;
bool usb4_cm_version;
bool fw_in_system_memory;
+ bool dpia_hpd_int_enable_supported;
};
/**
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
index 6b9a529e9f12..7a8f61517424 100644
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
@@ -400,8 +400,9 @@ union dmub_fw_boot_options {
uint32_t diag_env: 1; /* 1 if diagnostic environment */
uint32_t gpint_scratch8: 1; /* 1 if GPINT is in scratch8*/
uint32_t usb4_cm_version: 1; /**< 1 CM support */
+ uint32_t dpia_hpd_int_enable_supported: 1; /* 1 if dpia hpd int enable supported */
- uint32_t reserved : 17; /**< reserved */
+ uint32_t reserved : 16; /**< reserved */
} bits; /**< boot bits */
uint32_t all; /**< 32-bit access to bits */
};
@@ -728,6 +729,12 @@ enum dmub_cmd_type {
/**
* Command type used for all VBIOS interface commands.
*/
+
+ /**
+ * Command type used to set DPIA HPD interrupt state
+ */
+ DMUB_CMD__DPIA_HPD_INT_ENABLE = 86,
+
DMUB_CMD__VBIOS = 128,
};
@@ -1256,6 +1263,14 @@ struct dmub_rb_cmd_set_mst_alloc_slots {
};
/**
+ * DMUB command structure for DPIA HPD int enable control.
+ */
+struct dmub_rb_cmd_dpia_hpd_int_enable {
+ struct dmub_cmd_header header; /* header */
+ uint32_t enable; /* dpia hpd interrupt enable */
+};
+
+/**
* struct dmub_rb_cmd_dpphy_init - DPPHY init.
*/
struct dmub_rb_cmd_dpphy_init {
@@ -2084,7 +2099,99 @@ struct dmub_rb_cmd_update_dirty_rect {
/**
* Data passed from driver to FW in a DMUB_CMD__UPDATE_CURSOR_INFO command.
*/
-struct dmub_cmd_update_cursor_info_data {
+union dmub_reg_cursor_control_cfg {
+ struct {
+ uint32_t cur_enable: 1;
+ uint32_t reser0: 3;
+ uint32_t cur_2x_magnify: 1;
+ uint32_t reser1: 3;
+ uint32_t mode: 3;
+ uint32_t reser2: 5;
+ uint32_t pitch: 2;
+ uint32_t reser3: 6;
+ uint32_t line_per_chunk: 5;
+ uint32_t reser4: 3;
+ } bits;
+ uint32_t raw;
+};
+struct dmub_cursor_position_cache_hubp {
+ union dmub_reg_cursor_control_cfg cur_ctl;
+ union dmub_reg_position_cfg {
+ struct {
+ uint32_t cur_x_pos: 16;
+ uint32_t cur_y_pos: 16;
+ } bits;
+ uint32_t raw;
+ } position;
+ union dmub_reg_hot_spot_cfg {
+ struct {
+ uint32_t hot_x: 16;
+ uint32_t hot_y: 16;
+ } bits;
+ uint32_t raw;
+ } hot_spot;
+ union dmub_reg_dst_offset_cfg {
+ struct {
+ uint32_t dst_x_offset: 13;
+ uint32_t reserved: 19;
+ } bits;
+ uint32_t raw;
+ } dst_offset;
+};
+
+union dmub_reg_cur0_control_cfg {
+ struct {
+ uint32_t cur0_enable: 1;
+ uint32_t expansion_mode: 1;
+ uint32_t reser0: 1;
+ uint32_t cur0_rom_en: 1;
+ uint32_t mode: 3;
+ uint32_t reserved: 25;
+ } bits;
+ uint32_t raw;
+};
+struct dmub_cursor_position_cache_dpp {
+ union dmub_reg_cur0_control_cfg cur0_ctl;
+};
+struct dmub_cursor_position_cfg {
+ struct dmub_cursor_position_cache_hubp pHubp;
+ struct dmub_cursor_position_cache_dpp pDpp;
+ uint8_t pipe_idx;
+ /*
+ * Padding is required. To be 4 Bytes Aligned.
+ */
+ uint8_t padding[3];
+};
+
+struct dmub_cursor_attribute_cache_hubp {
+ uint32_t SURFACE_ADDR_HIGH;
+ uint32_t SURFACE_ADDR;
+ union dmub_reg_cursor_control_cfg cur_ctl;
+ union dmub_reg_cursor_size_cfg {
+ struct {
+ uint32_t width: 16;
+ uint32_t height: 16;
+ } bits;
+ uint32_t raw;
+ } size;
+ union dmub_reg_cursor_settings_cfg {
+ struct {
+ uint32_t dst_y_offset: 8;
+ uint32_t chunk_hdl_adjust: 2;
+ uint32_t reserved: 22;
+ } bits;
+ uint32_t raw;
+ } settings;
+};
+struct dmub_cursor_attribute_cache_dpp {
+ union dmub_reg_cur0_control_cfg cur0_ctl;
+};
+struct dmub_cursor_attributes_cfg {
+ struct dmub_cursor_attribute_cache_hubp aHubp;
+ struct dmub_cursor_attribute_cache_dpp aDpp;
+};
+
+struct dmub_cmd_update_cursor_payload0 {
/**
* Cursor dirty rects.
*/
@@ -2111,6 +2218,20 @@ struct dmub_cmd_update_cursor_info_data {
* Currently the support is only for 0 or 1
*/
uint8_t panel_inst;
+ /**
+ * Cursor Position Register.
+ * Registers contains Hubp & Dpp modules
+ */
+ struct dmub_cursor_position_cfg position_cfg;
+};
+
+struct dmub_cmd_update_cursor_payload1 {
+ struct dmub_cursor_attributes_cfg attribute_cfg;
+};
+
+union dmub_cmd_update_cursor_info_data {
+ struct dmub_cmd_update_cursor_payload0 payload0;
+ struct dmub_cmd_update_cursor_payload1 payload1;
};
/**
* Definition of a DMUB_CMD__UPDATE_CURSOR_INFO command.
@@ -2123,7 +2244,7 @@ struct dmub_rb_cmd_update_cursor_info {
/**
* Data passed from driver to FW in a DMUB_CMD__UPDATE_CURSOR_INFO command.
*/
- struct dmub_cmd_update_cursor_info_data update_cursor_info_data;
+ union dmub_cmd_update_cursor_info_data update_cursor_info_data;
};
/**
@@ -2820,11 +2941,7 @@ struct dmub_rb_cmd_get_visual_confirm_color {
struct dmub_optc_state {
uint32_t v_total_max;
uint32_t v_total_min;
- uint32_t v_total_mid;
- uint32_t v_total_mid_frame_num;
uint32_t tg_inst;
- uint32_t enable_manual_trigger;
- uint32_t clear_force_vsync;
};
struct dmub_rb_cmd_drr_update {
@@ -3230,6 +3347,10 @@ union dmub_rb_cmd {
* Definition of a DMUB_CMD__QUERY_HPD_STATE command.
*/
struct dmub_rb_cmd_query_hpd_state query_hpd;
+ /**
+ * Definition of a DMUB_CMD__DPIA_HPD_INT_ENABLE command.
+ */
+ struct dmub_rb_cmd_dpia_hpd_int_enable dpia_hpd_int_enable;
};
/**
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c
index c7bd7e216710..c90b9ee42e12 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c
@@ -350,6 +350,7 @@ void dmub_dcn31_enable_dmub_boot_options(struct dmub_srv *dmub, const struct dmu
boot_options.bits.dpia_supported = params->dpia_supported;
boot_options.bits.enable_dpia = params->disable_dpia ? 0 : 1;
boot_options.bits.usb4_cm_version = params->usb4_cm_version;
+ boot_options.bits.dpia_hpd_int_enable_supported = params->dpia_hpd_int_enable_supported;
boot_options.bits.power_optimization = params->power_optimization;
boot_options.bits.sel_mux_phy_c_d_phy_f_g = (dmub->asic == DMUB_ASIC_DCN31B) ? 1 : 0;
diff --git a/drivers/gpu/drm/amd/display/include/link_service_types.h b/drivers/gpu/drm/amd/display/include/link_service_types.h
index d76ab72baf0c..d1e91d31d151 100644
--- a/drivers/gpu/drm/amd/display/include/link_service_types.h
+++ b/drivers/gpu/drm/amd/display/include/link_service_types.h
@@ -83,6 +83,7 @@ enum link_training_result {
};
enum lttpr_mode {
+ LTTPR_MODE_UNKNOWN,
LTTPR_MODE_NON_LTTPR,
LTTPR_MODE_TRANSPARENT,
LTTPR_MODE_NON_TRANSPARENT,
diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
index 04f7656906ca..447a0ec9cbe2 100644
--- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
+++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
@@ -1692,7 +1692,7 @@ static void apply_degamma_for_user_regamma(struct pwl_float_data_ex *rgb_regamma
struct pwl_float_data_ex *rgb = rgb_regamma;
const struct hw_x_point *coord_x = coordinates_x;
- build_coefficients(&coeff, true);
+ build_coefficients(&coeff, TRANSFER_FUNCTION_SRGB);
i = 0;
while (i != hw_points_num + 1) {
diff --git a/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_offset.h b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_offset.h
index d8632ccf3494..c488d4a50cf4 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_offset.h
@@ -4409,6 +4409,10 @@
#define mmVMSHAREDPF0_MC_VM_XGMI_LFB_SIZE_BASE_IDX 1
#define mmVMSHAREDPF0_MC_VM_CACHEABLE_DRAM_CNTL 0x0af9
#define mmVMSHAREDPF0_MC_VM_CACHEABLE_DRAM_CNTL_BASE_IDX 1
+#define mmMC_VM_XGMI_LFB_CNTL 0x0823
+#define mmMC_VM_XGMI_LFB_CNTL_BASE_IDX 0
+#define mmMC_VM_XGMI_LFB_SIZE 0x0824
+#define mmMC_VM_XGMI_LFB_SIZE_BASE_IDX 0
// addressBlock: mmhub_utcl2_vmsharedvcdec
diff --git a/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_sh_mask.h
index 111a71b434e2..2969fbf282b7 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_sh_mask.h
@@ -26728,6 +26728,14 @@
//VMSHAREDPF0_MC_VM_CACHEABLE_DRAM_CNTL
#define VMSHAREDPF0_MC_VM_CACHEABLE_DRAM_CNTL__ENABLE_CACHEABLE_DRAM_ADDRESS_APERTURE__SHIFT 0x0
#define VMSHAREDPF0_MC_VM_CACHEABLE_DRAM_CNTL__ENABLE_CACHEABLE_DRAM_ADDRESS_APERTURE_MASK 0x00000001L
+//MC_VM_XGMI_LFB_CNTL
+#define MC_VM_XGMI_LFB_CNTL__PF_LFB_REGION__SHIFT 0x0
+#define MC_VM_XGMI_LFB_CNTL__PF_MAX_REGION__SHIFT 0x3
+#define MC_VM_XGMI_LFB_CNTL__PF_LFB_REGION_MASK 0x00000007L
+#define MC_VM_XGMI_LFB_CNTL__PF_MAX_REGION_MASK 0x00000038L
+//MC_VM_XGMI_LFB_SIZE
+#define MC_VM_XGMI_LFB_SIZE__PF_LFB_SIZE__SHIFT 0x0
+#define MC_VM_XGMI_LFB_SIZE__PF_LFB_SIZE_MASK 0x0000FFFFL
// addressBlock: mmhub_utcl2_vmsharedvcdec
diff --git a/drivers/gpu/drm/amd/include/asic_reg/umc/umc_8_10_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/umc/umc_8_10_0_offset.h
index b798cf5a2c39..38adde3cae5a 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/umc/umc_8_10_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/umc/umc_8_10_0_offset.h
@@ -29,5 +29,7 @@
#define regMCA_UMC_UMC0_MCUMC_STATUST0_BASE_IDX 2
#define regMCA_UMC_UMC0_MCUMC_ADDRT0 0x03c4
#define regMCA_UMC_UMC0_MCUMC_ADDRT0_BASE_IDX 2
+#define regUMCCH0_0_GeccCtrl 0x0053
+#define regUMCCH0_0_GeccCtrl_BASE_IDX 2
#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/umc/umc_8_10_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/umc/umc_8_10_0_sh_mask.h
index bd99b431247f..4dbec524f943 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/umc/umc_8_10_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/umc/umc_8_10_0_sh_mask.h
@@ -90,5 +90,8 @@
#define MCA_UMC_UMC0_MCUMC_ADDRT0__ErrorAddr__SHIFT 0x0
#define MCA_UMC_UMC0_MCUMC_ADDRT0__Reserved__SHIFT 0x38
#define MCA_UMC_UMC0_MCUMC_ADDRT0__ErrorAddr_MASK 0x00FFFFFFFFFFFFFFL
+//UMCCH0_0_GeccCtrl
+#define UMCCH0_0_GeccCtrl__UCFatalEn__SHIFT 0xd
+#define UMCCH0_0_GeccCtrl__UCFatalEn_MASK 0x00002000L
#endif
diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
index e85364dff4e0..5cb3e8634739 100644
--- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
@@ -262,8 +262,9 @@ struct kfd2kgd_calls {
uint32_t queue_id);
int (*hqd_destroy)(struct amdgpu_device *adev, void *mqd,
- uint32_t reset_type, unsigned int timeout,
- uint32_t pipe_id, uint32_t queue_id);
+ enum kfd_preempt_type reset_type,
+ unsigned int timeout, uint32_t pipe_id,
+ uint32_t queue_id);
bool (*hqd_sdma_is_occupied)(struct amdgpu_device *adev, void *mqd);
diff --git a/drivers/gpu/drm/amd/include/mes_v11_api_def.h b/drivers/gpu/drm/amd/include/mes_v11_api_def.h
index 50bfa513cb35..7e85cdc5bd34 100644
--- a/drivers/gpu/drm/amd/include/mes_v11_api_def.h
+++ b/drivers/gpu/drm/amd/include/mes_v11_api_def.h
@@ -269,7 +269,8 @@ union MESAPI__ADD_QUEUE {
uint32_t map_kiq_utility_queue : 1;
uint32_t is_kfd_process : 1;
uint32_t trap_en : 1;
- uint32_t reserved : 21;
+ uint32_t is_aql_queue : 1;
+ uint32_t reserved : 20;
};
struct MES_API_STATUS api_status;
uint64_t tma_addr;
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
index 948cc75376f8..236657eece47 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
@@ -3362,11 +3362,11 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
if (adev->pm.sysfs_initialized)
return 0;
+ INIT_LIST_HEAD(&adev->pm.pm_attr_list);
+
if (adev->pm.dpm_enabled == 0)
return 0;
- INIT_LIST_HEAD(&adev->pm.pm_attr_list);
-
adev->pm.int_hwmon_dev = hwmon_device_register_with_groups(adev->dev,
DRIVER_NAME, adev,
hwmon_groups);
diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
index 8fd0782a2b20..f5e08b60f66e 100644
--- a/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
+++ b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
@@ -1384,13 +1384,16 @@ static int kv_dpm_enable(struct amdgpu_device *adev)
static void kv_dpm_disable(struct amdgpu_device *adev)
{
struct kv_power_info *pi = kv_get_pi(adev);
+ int err;
amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq,
AMDGPU_THERMAL_IRQ_LOW_TO_HIGH);
amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq,
AMDGPU_THERMAL_IRQ_HIGH_TO_LOW);
- amdgpu_kv_smc_bapm_enable(adev, false);
+ err = amdgpu_kv_smc_bapm_enable(adev, false);
+ if (err)
+ DRM_ERROR("amdgpu_kv_smc_bapm_enable failed\n");
if (adev->asic_type == CHIP_MULLINS)
kv_enable_nb_dpm(adev, false);
diff --git a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
index 1eb4e613b27a..ec055858eb95 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
@@ -1485,6 +1485,7 @@ static int pp_get_prv_buffer_details(void *handle, void **addr, size_t *size)
{
struct pp_hwmgr *hwmgr = handle;
struct amdgpu_device *adev = hwmgr->adev;
+ int err;
if (!addr || !size)
return -EINVAL;
@@ -1492,7 +1493,9 @@ static int pp_get_prv_buffer_details(void *handle, void **addr, size_t *size)
*addr = NULL;
*size = 0;
if (adev->pm.smu_prv_buffer) {
- amdgpu_bo_kmap(adev->pm.smu_prv_buffer, addr);
+ err = amdgpu_bo_kmap(adev->pm.smu_prv_buffer, addr);
+ if (err)
+ return err;
*size = adev->pm.smu_prv_buffer_size;
}
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
index e4fcbf8a7eb5..7ef7e81525a3 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
@@ -3603,7 +3603,7 @@ static int smu7_get_pp_table_entry_callback_func_v1(struct pp_hwmgr *hwmgr,
return -EINVAL);
PP_ASSERT_WITH_CODE(
- (smu7_power_state->performance_level_count <=
+ (smu7_power_state->performance_level_count <
hwmgr->platform_descriptor.hardwareActivityPerformanceLevels),
"Performance levels exceeds Driver limit!",
return -EINVAL);
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
index 99bfe5efe171..c8c9fb827bda 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
@@ -3155,7 +3155,7 @@ static int vega10_get_pp_table_entry_callback_func(struct pp_hwmgr *hwmgr,
return -1);
PP_ASSERT_WITH_CODE(
- (vega10_ps->performance_level_count <=
+ (vega10_ps->performance_level_count <
hwmgr->platform_descriptor.
hardwareActivityPerformanceLevels),
"Performance levels exceeds Driver limit!",
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c
index 190af79f3236..dad3e3741a4e 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c
@@ -67,21 +67,22 @@ int vega10_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr,
int vega10_fan_ctrl_get_fan_speed_pwm(struct pp_hwmgr *hwmgr,
uint32_t *speed)
{
- struct amdgpu_device *adev = hwmgr->adev;
- uint32_t duty100, duty;
- uint64_t tmp64;
+ uint32_t current_rpm;
+ uint32_t percent = 0;
- duty100 = REG_GET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL1),
- CG_FDO_CTRL1, FMAX_DUTY100);
- duty = REG_GET_FIELD(RREG32_SOC15(THM, 0, mmCG_THERMAL_STATUS),
- CG_THERMAL_STATUS, FDO_PWM_DUTY);
+ if (hwmgr->thermal_controller.fanInfo.bNoFan)
+ return 0;
- if (!duty100)
- return -EINVAL;
+ if (vega10_get_current_rpm(hwmgr, &current_rpm))
+ return -1;
+
+ if (hwmgr->thermal_controller.
+ advanceFanControlParameters.usMaxFanRPM != 0)
+ percent = current_rpm * 255 /
+ hwmgr->thermal_controller.
+ advanceFanControlParameters.usMaxFanRPM;
- tmp64 = (uint64_t)duty * 255;
- do_div(tmp64, duty100);
- *speed = MIN((uint32_t)tmp64, 255);
+ *speed = MIN(percent, 255);
return 0;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index 13c5c7f1ecb9..4fe75dd2b329 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -1314,8 +1314,8 @@ static int smu_smc_hw_setup(struct smu_context *smu)
ret = smu_enable_thermal_alert(smu);
if (ret) {
- dev_err(adev->dev, "Failed to enable thermal alert!\n");
- return ret;
+ dev_err(adev->dev, "Failed to enable thermal alert!\n");
+ return ret;
}
ret = smu_notify_display_change(smu);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_4.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_4.h
index ae2d337158f3..f77401709d83 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_4.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_4.h
@@ -27,7 +27,7 @@
// *** IMPORTANT ***
// SMU TEAM: Always increment the interface version if
// any structure is changed in this file
-#define PMFW_DRIVER_IF_VERSION 5
+#define PMFW_DRIVER_IF_VERSION 7
typedef struct {
int32_t value;
@@ -163,8 +163,8 @@ typedef struct {
uint16_t DclkFrequency; //[MHz]
uint16_t MemclkFrequency; //[MHz]
uint16_t spare; //[centi]
- uint16_t UvdActivity; //[centi]
uint16_t GfxActivity; //[centi]
+ uint16_t UvdActivity; //[centi]
uint16_t Voltage[2]; //[mV] indices: VDDCR_VDD, VDDCR_SOC
uint16_t Current[2]; //[mA] indices: VDDCR_VDD, VDDCR_SOC
@@ -199,6 +199,19 @@ typedef struct {
uint16_t DeviceState;
uint16_t CurTemp; //[centi-Celsius]
uint16_t spare2;
+
+ uint16_t AverageGfxclkFrequency;
+ uint16_t AverageFclkFrequency;
+ uint16_t AverageGfxActivity;
+ uint16_t AverageSocclkFrequency;
+ uint16_t AverageVclkFrequency;
+ uint16_t AverageVcnActivity;
+ uint16_t AverageDRAMReads; //Filtered DF Bandwidth::DRAM Reads
+ uint16_t AverageDRAMWrites; //Filtered DF Bandwidth::DRAM Writes
+ uint16_t AverageSocketPower; //Filtered value of CurrentSocketPower
+ uint16_t AverageCorePower; //Filtered of [sum of CorePower[8]])
+ uint16_t AverageCoreC0Residency[8]; //Filtered of [average C0 residency % per core]
+ uint32_t MetricsCounter; //Counts the # of metrics table parameter reads per update to the metrics table, i.e. if the metrics table update happens every 1 second, this value could be up to 1000 if the smu collected metrics data every cycle, or as low as 0 if the smu was asleep the whole time. Reset to 0 after writing.
} SmuMetrics_t;
typedef struct {
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
index 9d62ea2af132..8f72202aea8e 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
@@ -28,7 +28,7 @@
#define SMU13_DRIVER_IF_VERSION_INV 0xFFFFFFFF
#define SMU13_DRIVER_IF_VERSION_YELLOW_CARP 0x04
#define SMU13_DRIVER_IF_VERSION_ALDE 0x08
-#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_4 0x05
+#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_4 0x07
#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_5 0x04
#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_0 0x30
#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_7 0x2C
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
index 445005571f76..9cd005131f56 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
@@ -2242,9 +2242,17 @@ static void arcturus_get_unique_id(struct smu_context *smu)
static int arcturus_set_df_cstate(struct smu_context *smu,
enum pp_df_cstate state)
{
+ struct amdgpu_device *adev = smu->adev;
uint32_t smu_version;
int ret;
+ /*
+ * Arcturus does not need the cstate disablement
+ * prerequisite for gpu reset.
+ */
+ if (amdgpu_in_reset(adev) || adev->in_suspend)
+ return 0;
+
ret = smu_cmn_get_smc_version(smu, NULL, &smu_version);
if (ret) {
dev_err(smu->adev->dev, "Failed to get smu version!\n");
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
index 619aee51b123..d30ec3005ea1 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
@@ -1640,6 +1640,15 @@ static bool aldebaran_is_baco_supported(struct smu_context *smu)
static int aldebaran_set_df_cstate(struct smu_context *smu,
enum pp_df_cstate state)
{
+ struct amdgpu_device *adev = smu->adev;
+
+ /*
+ * Aldebaran does not need the cstate disablement
+ * prerequisite for gpu reset.
+ */
+ if (amdgpu_in_reset(adev) || adev->in_suspend)
+ return 0;
+
return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DFCstateControl, state, NULL);
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
index 93fffdbab4f0..c4552ade8d44 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
@@ -211,7 +211,8 @@ int smu_v13_0_init_pptable_microcode(struct smu_context *smu)
return 0;
if ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 7)) ||
- (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 0)))
+ (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 0)) ||
+ (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 10)))
return 0;
/* override pptable_id from driver parameter */
@@ -454,9 +455,6 @@ int smu_v13_0_setup_pptable(struct smu_context *smu)
dev_info(adev->dev, "override pptable id %d\n", pptable_id);
} else {
pptable_id = smu->smu_table.boot_values.pp_table_id;
-
- if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 10))
- pptable_id = 6666;
}
/* force using vbios pptable in sriov mode */
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
index 096327513dd0..29529328152d 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
@@ -119,6 +119,7 @@ static struct cmn2asic_msg_mapping smu_v13_0_0_message_map[SMU_MSG_MAX_COUNT] =
MSG_MAP(NotifyPowerSource, PPSMC_MSG_NotifyPowerSource, 0),
MSG_MAP(Mode1Reset, PPSMC_MSG_Mode1Reset, 0),
MSG_MAP(PrepareMp1ForUnload, PPSMC_MSG_PrepareMp1ForUnload, 0),
+ MSG_MAP(DFCstateControl, PPSMC_MSG_SetExternalClientDfCstateAllow, 0),
};
static struct cmn2asic_mapping smu_v13_0_0_clk_map[SMU_CLK_COUNT] = {
@@ -239,82 +240,47 @@ smu_v13_0_0_get_allowed_feature_mask(struct smu_context *smu,
uint32_t *feature_mask, uint32_t num)
{
struct amdgpu_device *adev = smu->adev;
+ u32 smu_version;
if (num > 2)
return -EINVAL;
- memset(feature_mask, 0, sizeof(uint32_t) * num);
-
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_FW_DATA_READ_BIT);
+ memset(feature_mask, 0xff, sizeof(uint32_t) * num);
- if (adev->pm.pp_feature & PP_SCLK_DPM_MASK) {
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT);
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_GFX_IMU_BIT);
+ if (!(adev->pm.pp_feature & PP_SCLK_DPM_MASK)) {
+ *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT);
+ *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_GFX_IMU_BIT);
}
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_MM_DPM_BIT);
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DS_VCN_BIT);
-
- if ((adev->pg_flags & AMD_PG_SUPPORT_ATHUB) &&
- (adev->pg_flags & AMD_PG_SUPPORT_MMHUB))
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_ATHUB_MMHUB_PG_BIT);
-
- if (adev->pm.pp_feature & PP_SOCCLK_DPM_MASK)
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT);
-
-#if 0
- if (adev->pm.pp_feature & PP_GFXOFF_MASK)
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_GFXOFF_BIT);
-#endif
-
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_THROTTLERS_BIT);
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_FAN_CONTROL_BIT);
+ if (!(adev->pg_flags & AMD_PG_SUPPORT_ATHUB) ||
+ !(adev->pg_flags & AMD_PG_SUPPORT_MMHUB))
+ *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_ATHUB_MMHUB_PG_BIT);
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DF_CSTATE_BIT);
+ if (!(adev->pm.pp_feature & PP_SOCCLK_DPM_MASK))
+ *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT);
- if (adev->pm.pp_feature & PP_MCLK_DPM_MASK) {
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_UCLK_BIT);
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_VMEMP_SCALING_BIT);
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_VDDIO_MEM_SCALING_BIT);
+ /* PMFW 78.58 contains a critical fix for gfxoff feature */
+ smu_cmn_get_smc_version(smu, NULL, &smu_version);
+ if ((smu_version < 0x004e3a00) ||
+ !(adev->pm.pp_feature & PP_GFXOFF_MASK))
+ *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_GFXOFF_BIT);
+
+ if (!(adev->pm.pp_feature & PP_MCLK_DPM_MASK)) {
+ *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DPM_UCLK_BIT);
+ *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_VMEMP_SCALING_BIT);
+ *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_VDDIO_MEM_SCALING_BIT);
}
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_MEM_TEMP_READ_BIT);
-
- if (adev->pm.pp_feature & PP_SCLK_DEEP_SLEEP_MASK)
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DS_GFXCLK_BIT);
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DS_SOCCLK_BIT);
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_SOC_MPCLK_DS_BIT);
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_BACO_MPCLK_DS_BIT);
+ if (!(adev->pm.pp_feature & PP_SCLK_DEEP_SLEEP_MASK))
+ *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DS_GFXCLK_BIT);
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_FCLK_BIT);
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_DCN_BIT);
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DS_DCFCLK_BIT);
-
- if (adev->pm.pp_feature & PP_PCIE_DPM_MASK) {
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_LINK_BIT);
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DS_LCLK_BIT);
+ if (!(adev->pm.pp_feature & PP_PCIE_DPM_MASK)) {
+ *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DPM_LINK_BIT);
+ *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DS_LCLK_BIT);
}
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_BACO_BIT);
-
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_MP0CLK_BIT);
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_FW_DSTATE_BIT);
-
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_OUT_OF_BAND_MONITOR_BIT);
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_SOC_CG_BIT);
-
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DS_FCLK_BIT);
-
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_FW_CTF_BIT);
-
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DS_UCLK_BIT);
-
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_VR0HOT_BIT);
-
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_GFX_POWER_OPTIMIZER_BIT);
-
- if (adev->pm.pp_feature & PP_ULV_MASK)
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_GFX_ULV_BIT);
+ if (!(adev->pm.pp_feature & PP_ULV_MASK))
+ *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_GFX_ULV_BIT);
return 0;
}
@@ -1788,6 +1754,15 @@ static int smu_v13_0_0_set_mp1_state(struct smu_context *smu,
return ret;
}
+static int smu_v13_0_0_set_df_cstate(struct smu_context *smu,
+ enum pp_df_cstate state)
+{
+ return smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_DFCstateControl,
+ state,
+ NULL);
+}
+
static const struct pptable_funcs smu_v13_0_0_ppt_funcs = {
.get_allowed_feature_mask = smu_v13_0_0_get_allowed_feature_mask,
.set_default_dpm_table = smu_v13_0_0_set_default_dpm_table,
@@ -1857,6 +1832,7 @@ static const struct pptable_funcs smu_v13_0_0_ppt_funcs = {
.mode1_reset_is_support = smu_v13_0_0_is_mode1_reset_supported,
.mode1_reset = smu_v13_0_mode1_reset,
.set_mp1_state = smu_v13_0_0_set_mp1_state,
+ .set_df_cstate = smu_v13_0_0_set_df_cstate,
};
void smu_v13_0_0_set_ppt_funcs(struct smu_context *smu)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
index c422bf8a09b1..c4102cfb734c 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
@@ -121,6 +121,7 @@ static struct cmn2asic_msg_mapping smu_v13_0_7_message_map[SMU_MSG_MAX_COUNT] =
MSG_MAP(Mode1Reset, PPSMC_MSG_Mode1Reset, 0),
MSG_MAP(PrepareMp1ForUnload, PPSMC_MSG_PrepareMp1ForUnload, 0),
MSG_MAP(SetMGpuFanBoostLimitRpm, PPSMC_MSG_SetMGpuFanBoostLimitRpm, 0),
+ MSG_MAP(DFCstateControl, PPSMC_MSG_SetExternalClientDfCstateAllow, 0),
};
static struct cmn2asic_mapping smu_v13_0_7_clk_map[SMU_CLK_COUNT] = {
@@ -1587,6 +1588,16 @@ static bool smu_v13_0_7_is_mode1_reset_supported(struct smu_context *smu)
return true;
}
+
+static int smu_v13_0_7_set_df_cstate(struct smu_context *smu,
+ enum pp_df_cstate state)
+{
+ return smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_DFCstateControl,
+ state,
+ NULL);
+}
+
static const struct pptable_funcs smu_v13_0_7_ppt_funcs = {
.get_allowed_feature_mask = smu_v13_0_7_get_allowed_feature_mask,
.set_default_dpm_table = smu_v13_0_7_set_default_dpm_table,
@@ -1649,6 +1660,7 @@ static const struct pptable_funcs smu_v13_0_7_ppt_funcs = {
.mode1_reset_is_support = smu_v13_0_7_is_mode1_reset_supported,
.mode1_reset = smu_v13_0_mode1_reset,
.set_mp1_state = smu_v13_0_7_set_mp1_state,
+ .set_df_cstate = smu_v13_0_7_set_df_cstate,
};
void smu_v13_0_7_set_ppt_funcs(struct smu_context *smu)
diff --git a/drivers/gpu/drm/armada/armada_gem.c b/drivers/gpu/drm/armada/armada_gem.c
index 5430265ad458..26d10065d534 100644
--- a/drivers/gpu/drm/armada/armada_gem.c
+++ b/drivers/gpu/drm/armada/armada_gem.c
@@ -66,8 +66,8 @@ void armada_gem_free_object(struct drm_gem_object *obj)
if (dobj->obj.import_attach) {
/* We only ever display imported data */
if (dobj->sgt)
- dma_buf_unmap_attachment(dobj->obj.import_attach,
- dobj->sgt, DMA_TO_DEVICE);
+ dma_buf_unmap_attachment_unlocked(dobj->obj.import_attach,
+ dobj->sgt, DMA_TO_DEVICE);
drm_prime_gem_destroy(&dobj->obj, NULL);
}
@@ -539,8 +539,8 @@ int armada_gem_map_import(struct armada_gem_object *dobj)
{
int ret;
- dobj->sgt = dma_buf_map_attachment(dobj->obj.import_attach,
- DMA_TO_DEVICE);
+ dobj->sgt = dma_buf_map_attachment_unlocked(dobj->obj.import_attach,
+ DMA_TO_DEVICE);
if (IS_ERR(dobj->sgt)) {
ret = PTR_ERR(dobj->sgt);
dobj->sgt = NULL;
diff --git a/drivers/gpu/drm/ast/Kconfig b/drivers/gpu/drm/ast/Kconfig
index fbcf2f45cef5..d367a90cd3de 100644
--- a/drivers/gpu/drm/ast/Kconfig
+++ b/drivers/gpu/drm/ast/Kconfig
@@ -2,10 +2,8 @@
config DRM_AST
tristate "AST server chips"
depends on DRM && PCI && MMU
+ select DRM_GEM_SHMEM_HELPER
select DRM_KMS_HELPER
- select DRM_VRAM_HELPER
- select DRM_TTM
- select DRM_TTM_HELPER
help
Say yes for experimental AST GPU driver. Do not enable
this driver without having a working -modesetting,
diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c
index 760b27971557..bbeb5defc8f5 100644
--- a/drivers/gpu/drm/ast/ast_drv.c
+++ b/drivers/gpu/drm/ast/ast_drv.c
@@ -33,13 +33,13 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_drv.h>
-#include <drm/drm_gem_vram_helper.h>
+#include <drm/drm_gem_shmem_helper.h>
#include <drm/drm_module.h>
#include <drm/drm_probe_helper.h>
#include "ast_drv.h"
-int ast_modeset = -1;
+static int ast_modeset = -1;
MODULE_PARM_DESC(modeset, "Disable/Enable modesetting");
module_param_named(modeset, ast_modeset, int, 0400);
@@ -63,7 +63,7 @@ static const struct drm_driver ast_driver = {
.minor = DRIVER_MINOR,
.patchlevel = DRIVER_PATCHLEVEL,
- DRM_GEM_VRAM_DRIVER
+ DRM_GEM_SHMEM_DRIVER_OPS
};
/*
diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
index 2e44b971c3a6..74f41282444f 100644
--- a/drivers/gpu/drm/ast/ast_drv.h
+++ b/drivers/gpu/drm/ast/ast_drv.h
@@ -87,7 +87,7 @@ enum ast_tx_chip {
#define AST_DRAM_8Gx16 8
/*
- * Cursor plane
+ * Hardware cursor
*/
#define AST_MAX_HWC_WIDTH 64
@@ -96,8 +96,6 @@ enum ast_tx_chip {
#define AST_HWC_SIZE (AST_MAX_HWC_WIDTH * AST_MAX_HWC_HEIGHT * 2)
#define AST_HWC_SIGNATURE_SIZE 32
-#define AST_DEFAULT_HWC_NUM 2
-
/* define for signature structure */
#define AST_HWC_SIGNATURE_CHECKSUM 0x00
#define AST_HWC_SIGNATURE_SizeX 0x04
@@ -107,22 +105,21 @@ enum ast_tx_chip {
#define AST_HWC_SIGNATURE_HOTSPOTX 0x14
#define AST_HWC_SIGNATURE_HOTSPOTY 0x18
-struct ast_cursor_plane {
- struct drm_plane base;
+/*
+ * Planes
+ */
- struct {
- struct drm_gem_vram_object *gbo;
- struct iosys_map map;
- u64 off;
- } hwc[AST_DEFAULT_HWC_NUM];
+struct ast_plane {
+ struct drm_plane base;
- unsigned int next_hwc_index;
+ void __iomem *vaddr;
+ u64 offset;
+ unsigned long size;
};
-static inline struct ast_cursor_plane *
-to_ast_cursor_plane(struct drm_plane *plane)
+static inline struct ast_plane *to_ast_plane(struct drm_plane *plane)
{
- return container_of(plane, struct ast_cursor_plane, base);
+ return container_of(plane, struct ast_plane, base);
}
/*
@@ -175,8 +172,13 @@ struct ast_private {
uint32_t dram_type;
uint32_t mclk;
- struct drm_plane primary_plane;
- struct ast_cursor_plane cursor_plane;
+ void __iomem *vram;
+ unsigned long vram_base;
+ unsigned long vram_size;
+ unsigned long vram_fb_available;
+
+ struct ast_plane primary_plane;
+ struct ast_plane cursor_plane;
struct drm_crtc crtc;
struct {
struct {
diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
index 067453266897..bffa310a0431 100644
--- a/drivers/gpu/drm/ast/ast_main.c
+++ b/drivers/gpu/drm/ast/ast_main.c
@@ -32,7 +32,6 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_gem.h>
-#include <drm/drm_gem_vram_helper.h>
#include <drm/drm_managed.h>
#include "ast_drv.h"
@@ -461,8 +460,8 @@ struct ast_private *ast_device_create(const struct drm_driver *drv,
/* map reserved buffer */
ast->dp501_fw_buf = NULL;
- if (dev->vram_mm->vram_size < pci_resource_len(pdev, 0)) {
- ast->dp501_fw_buf = pci_iomap_range(pdev, 0, dev->vram_mm->vram_size, 0);
+ if (ast->vram_size < pci_resource_len(pdev, 0)) {
+ ast->dp501_fw_buf = pci_iomap_range(pdev, 0, ast->vram_size, 0);
if (!ast->dp501_fw_buf)
drm_info(dev, "failed to map reserved buffer!\n");
}
diff --git a/drivers/gpu/drm/ast/ast_mm.c b/drivers/gpu/drm/ast/ast_mm.c
index 6e999408dda9..248284a4b3ff 100644
--- a/drivers/gpu/drm/ast/ast_mm.c
+++ b/drivers/gpu/drm/ast/ast_mm.c
@@ -28,7 +28,6 @@
#include <linux/pci.h>
-#include <drm/drm_gem_vram_helper.h>
#include <drm/drm_managed.h>
#include <drm/drm_print.h>
@@ -80,7 +79,6 @@ int ast_mm_init(struct ast_private *ast)
struct pci_dev *pdev = to_pci_dev(dev->dev);
resource_size_t base, size;
u32 vram_size;
- int ret;
base = pci_resource_start(pdev, 0);
size = pci_resource_len(pdev, 0);
@@ -91,11 +89,13 @@ int ast_mm_init(struct ast_private *ast)
vram_size = ast_get_vram_size(ast);
- ret = drmm_vram_helper_init(dev, base, vram_size);
- if (ret) {
- drm_err(dev, "Error initializing VRAM MM; %d\n", ret);
- return ret;
- }
+ ast->vram = devm_ioremap_wc(dev->dev, base, vram_size);
+ if (!ast->vram)
+ return -ENOMEM;
+
+ ast->vram_base = base;
+ ast->vram_size = vram_size;
+ ast->vram_fb_available = vram_size;
return 0;
}
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index 1bc0220e6783..c7443317c747 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -36,11 +36,13 @@
#include <drm/drm_atomic_state_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_damage_helper.h>
#include <drm/drm_edid.h>
+#include <drm/drm_format_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
-#include <drm/drm_gem_vram_helper.h>
+#include <drm/drm_gem_shmem_helper.h>
#include <drm/drm_managed.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
@@ -48,6 +50,8 @@
#include "ast_drv.h"
#include "ast_tables.h"
+#define AST_LUT_SIZE 256
+
static inline void ast_load_palette_index(struct ast_private *ast,
u8 index, u8 red, u8 green,
u8 blue)
@@ -62,20 +66,46 @@ static inline void ast_load_palette_index(struct ast_private *ast,
ast_io_read8(ast, AST_IO_SEQ_PORT);
}
-static void ast_crtc_load_lut(struct ast_private *ast, struct drm_crtc *crtc)
+static void ast_crtc_set_gamma_linear(struct ast_private *ast,
+ const struct drm_format_info *format)
{
- u16 *r, *g, *b;
int i;
- if (!crtc->enabled)
- return;
+ switch (format->format) {
+ case DRM_FORMAT_C8: /* In this case, gamma table is used as color palette */
+ case DRM_FORMAT_RGB565:
+ case DRM_FORMAT_XRGB8888:
+ for (i = 0; i < AST_LUT_SIZE; i++)
+ ast_load_palette_index(ast, i, i, i, i);
+ break;
+ default:
+ drm_warn_once(&ast->base, "Unsupported format %p4cc for gamma correction\n",
+ &format->format);
+ break;
+ }
+}
- r = crtc->gamma_store;
- g = r + crtc->gamma_size;
- b = g + crtc->gamma_size;
+static void ast_crtc_set_gamma(struct ast_private *ast,
+ const struct drm_format_info *format,
+ struct drm_color_lut *lut)
+{
+ int i;
- for (i = 0; i < 256; i++)
- ast_load_palette_index(ast, i, *r++ >> 8, *g++ >> 8, *b++ >> 8);
+ switch (format->format) {
+ case DRM_FORMAT_C8: /* In this case, gamma table is used as color palette */
+ case DRM_FORMAT_RGB565:
+ case DRM_FORMAT_XRGB8888:
+ for (i = 0; i < AST_LUT_SIZE; i++)
+ ast_load_palette_index(ast, i,
+ lut[i].red >> 8,
+ lut[i].green >> 8,
+ lut[i].blue >> 8);
+ break;
+ default:
+ drm_warn_once(&ast->base, "Unsupported format %p4cc for gamma correction\n",
+ &format->format);
+ break;
+ }
}
static bool ast_get_vbios_mode_info(const struct drm_format_info *format,
@@ -538,6 +568,29 @@ static void ast_wait_for_vretrace(struct ast_private *ast)
}
/*
+ * Planes
+ */
+
+static int ast_plane_init(struct drm_device *dev, struct ast_plane *ast_plane,
+ void __iomem *vaddr, u64 offset, unsigned long size,
+ uint32_t possible_crtcs,
+ const struct drm_plane_funcs *funcs,
+ const uint32_t *formats, unsigned int format_count,
+ const uint64_t *format_modifiers,
+ enum drm_plane_type type)
+{
+ struct drm_plane *plane = &ast_plane->base;
+
+ ast_plane->vaddr = vaddr;
+ ast_plane->offset = offset;
+ ast_plane->size = size;
+
+ return drm_universal_plane_init(dev, plane, possible_crtcs, funcs,
+ formats, format_count, format_modifiers,
+ type, NULL);
+}
+
+/*
* Primary plane
*/
@@ -550,52 +603,62 @@ static const uint32_t ast_primary_plane_formats[] = {
static int ast_primary_plane_helper_atomic_check(struct drm_plane *plane,
struct drm_atomic_state *state)
{
- struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
- plane);
- struct drm_crtc_state *crtc_state;
- struct ast_crtc_state *ast_crtc_state;
+ struct drm_device *dev = plane->dev;
+ struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, plane);
+ struct drm_crtc_state *new_crtc_state = NULL;
+ struct ast_crtc_state *new_ast_crtc_state;
int ret;
- if (!new_plane_state->crtc)
- return 0;
-
- crtc_state = drm_atomic_get_new_crtc_state(state,
- new_plane_state->crtc);
+ if (new_plane_state->crtc)
+ new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
- ret = drm_atomic_helper_check_plane_state(new_plane_state, crtc_state,
+ ret = drm_atomic_helper_check_plane_state(new_plane_state, new_crtc_state,
DRM_PLANE_NO_SCALING,
DRM_PLANE_NO_SCALING,
false, true);
- if (ret)
+ if (ret) {
return ret;
+ } else if (!new_plane_state->visible) {
+ if (drm_WARN_ON(dev, new_plane_state->crtc)) /* cannot legally happen */
+ return -EINVAL;
+ else
+ return 0;
+ }
- if (!new_plane_state->visible)
- return 0;
-
- ast_crtc_state = to_ast_crtc_state(crtc_state);
+ new_ast_crtc_state = to_ast_crtc_state(new_crtc_state);
- ast_crtc_state->format = new_plane_state->fb->format;
+ new_ast_crtc_state->format = new_plane_state->fb->format;
return 0;
}
-static void
-ast_primary_plane_helper_atomic_update(struct drm_plane *plane,
- struct drm_atomic_state *state)
+static void ast_handle_damage(struct ast_plane *ast_plane, struct iosys_map *src,
+ struct drm_framebuffer *fb,
+ const struct drm_rect *clip)
+{
+ struct iosys_map dst = IOSYS_MAP_INIT_VADDR(ast_plane->vaddr);
+
+ iosys_map_incr(&dst, drm_fb_clip_offset(fb->pitches[0], fb->format, clip));
+ drm_fb_memcpy(&dst, fb->pitches, src, fb, clip);
+}
+
+static void ast_primary_plane_helper_atomic_update(struct drm_plane *plane,
+ struct drm_atomic_state *state)
{
- struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
- plane);
struct drm_device *dev = plane->dev;
struct ast_private *ast = to_ast_private(dev);
- struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
- plane);
- struct drm_gem_vram_object *gbo;
- s64 gpu_addr;
- struct drm_framebuffer *fb = new_state->fb;
- struct drm_framebuffer *old_fb = old_state->fb;
+ struct drm_plane_state *plane_state = drm_atomic_get_new_plane_state(state, plane);
+ struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state);
+ struct drm_framebuffer *fb = plane_state->fb;
+ struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(state, plane);
+ struct drm_framebuffer *old_fb = old_plane_state->fb;
+ struct ast_plane *ast_plane = to_ast_plane(plane);
+ struct drm_rect damage;
+ struct drm_atomic_helper_damage_iter iter;
if (!old_fb || (fb->format != old_fb->format)) {
- struct drm_crtc_state *crtc_state = new_state->crtc->state;
+ struct drm_crtc *crtc = plane_state->crtc;
+ struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
struct ast_crtc_state *ast_crtc_state = to_ast_crtc_state(crtc_state);
struct ast_vbios_mode_info *vbios_mode_info = &ast_crtc_state->vbios_mode_info;
@@ -603,20 +666,28 @@ ast_primary_plane_helper_atomic_update(struct drm_plane *plane,
ast_set_vbios_color_reg(ast, fb->format, vbios_mode_info);
}
- gbo = drm_gem_vram_of_gem(fb->obj[0]);
- gpu_addr = drm_gem_vram_offset(gbo);
- if (drm_WARN_ON_ONCE(dev, gpu_addr < 0))
- return; /* Bug: we didn't pin the BO to VRAM in prepare_fb. */
-
- ast_set_offset_reg(ast, fb);
- ast_set_start_address_crt1(ast, (u32)gpu_addr);
+ drm_atomic_helper_damage_iter_init(&iter, old_plane_state, plane_state);
+ drm_atomic_for_each_plane_damage(&iter, &damage) {
+ ast_handle_damage(ast_plane, shadow_plane_state->data, fb, &damage);
+ }
- ast_set_index_reg_mask(ast, AST_IO_SEQ_PORT, 0x1, 0xdf, 0x00);
+ /*
+ * Some BMCs stop scanning out the video signal after the driver
+ * reprogrammed the offset or scanout address. This stalls display
+ * output for several seconds and makes the display unusable.
+ * Therefore only update the offset if it changes and reprogram the
+ * address after enabling the plane.
+ */
+ if (!old_fb || old_fb->pitches[0] != fb->pitches[0])
+ ast_set_offset_reg(ast, fb);
+ if (!old_fb) {
+ ast_set_start_address_crt1(ast, (u32)ast_plane->offset);
+ ast_set_index_reg_mask(ast, AST_IO_SEQ_PORT, 0x1, 0xdf, 0x00);
+ }
}
-static void
-ast_primary_plane_helper_atomic_disable(struct drm_plane *plane,
- struct drm_atomic_state *state)
+static void ast_primary_plane_helper_atomic_disable(struct drm_plane *plane,
+ struct drm_atomic_state *state)
{
struct ast_private *ast = to_ast_private(plane->dev);
@@ -624,7 +695,7 @@ ast_primary_plane_helper_atomic_disable(struct drm_plane *plane,
}
static const struct drm_plane_helper_funcs ast_primary_plane_helper_funcs = {
- DRM_GEM_VRAM_PLANE_HELPER_FUNCS,
+ DRM_GEM_SHADOW_PLANE_HELPER_FUNCS,
.atomic_check = ast_primary_plane_helper_atomic_check,
.atomic_update = ast_primary_plane_helper_atomic_update,
.atomic_disable = ast_primary_plane_helper_atomic_disable,
@@ -634,27 +705,30 @@ static const struct drm_plane_funcs ast_primary_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.destroy = drm_plane_cleanup,
- .reset = drm_atomic_helper_plane_reset,
- .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
+ DRM_GEM_SHADOW_PLANE_FUNCS,
};
static int ast_primary_plane_init(struct ast_private *ast)
{
struct drm_device *dev = &ast->base;
- struct drm_plane *primary_plane = &ast->primary_plane;
+ struct ast_plane *ast_primary_plane = &ast->primary_plane;
+ struct drm_plane *primary_plane = &ast_primary_plane->base;
+ void __iomem *vaddr = ast->vram;
+ u64 offset = ast->vram_base;
+ unsigned long cursor_size = roundup(AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE, PAGE_SIZE);
+ unsigned long size = ast->vram_fb_available - cursor_size;
int ret;
- ret = drm_universal_plane_init(dev, primary_plane, 0x01,
- &ast_primary_plane_funcs,
- ast_primary_plane_formats,
- ARRAY_SIZE(ast_primary_plane_formats),
- NULL, DRM_PLANE_TYPE_PRIMARY, NULL);
+ ret = ast_plane_init(dev, ast_primary_plane, vaddr, offset, size,
+ 0x01, &ast_primary_plane_funcs,
+ ast_primary_plane_formats, ARRAY_SIZE(ast_primary_plane_formats),
+ NULL, DRM_PLANE_TYPE_PRIMARY);
if (ret) {
- drm_err(dev, "drm_universal_plane_init() failed: %d\n", ret);
+ drm_err(dev, "ast_plane_init() failed: %d\n", ret);
return ret;
}
drm_plane_helper_add(primary_plane, &ast_primary_plane_helper_funcs);
+ drm_plane_enable_fb_damage_clips(primary_plane);
return 0;
}
@@ -774,99 +848,79 @@ static const uint32_t ast_cursor_plane_formats[] = {
static int ast_cursor_plane_helper_atomic_check(struct drm_plane *plane,
struct drm_atomic_state *state)
{
- struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
- plane);
- struct drm_framebuffer *fb = new_plane_state->fb;
- struct drm_crtc_state *crtc_state;
+ struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, plane);
+ struct drm_framebuffer *new_fb = new_plane_state->fb;
+ struct drm_crtc_state *new_crtc_state = NULL;
int ret;
- if (!new_plane_state->crtc)
- return 0;
-
- crtc_state = drm_atomic_get_new_crtc_state(state,
- new_plane_state->crtc);
+ if (new_plane_state->crtc)
+ new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
- ret = drm_atomic_helper_check_plane_state(new_plane_state, crtc_state,
+ ret = drm_atomic_helper_check_plane_state(new_plane_state, new_crtc_state,
DRM_PLANE_NO_SCALING,
DRM_PLANE_NO_SCALING,
true, true);
- if (ret)
+ if (ret || !new_plane_state->visible)
return ret;
- if (!new_plane_state->visible)
- return 0;
-
- if (fb->width > AST_MAX_HWC_WIDTH || fb->height > AST_MAX_HWC_HEIGHT)
+ if (new_fb->width > AST_MAX_HWC_WIDTH || new_fb->height > AST_MAX_HWC_HEIGHT)
return -EINVAL;
return 0;
}
-static void
-ast_cursor_plane_helper_atomic_update(struct drm_plane *plane,
- struct drm_atomic_state *state)
+static void ast_cursor_plane_helper_atomic_update(struct drm_plane *plane,
+ struct drm_atomic_state *state)
{
- struct ast_cursor_plane *ast_cursor_plane = to_ast_cursor_plane(plane);
- struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
- plane);
- struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
- plane);
- struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(new_state);
- struct drm_framebuffer *fb = new_state->fb;
+ struct ast_plane *ast_plane = to_ast_plane(plane);
+ struct drm_plane_state *plane_state = drm_atomic_get_new_plane_state(state, plane);
+ struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state);
+ struct drm_framebuffer *fb = plane_state->fb;
+ struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(state, plane);
struct ast_private *ast = to_ast_private(plane->dev);
- struct iosys_map dst_map =
- ast_cursor_plane->hwc[ast_cursor_plane->next_hwc_index].map;
- u64 dst_off =
- ast_cursor_plane->hwc[ast_cursor_plane->next_hwc_index].off;
struct iosys_map src_map = shadow_plane_state->data[0];
+ struct drm_rect damage;
+ const u8 *src = src_map.vaddr; /* TODO: Use mapping abstraction properly */
+ u64 dst_off = ast_plane->offset;
+ u8 __iomem *dst = ast_plane->vaddr; /* TODO: Use mapping abstraction properly */
+ u8 __iomem *sig = dst + AST_HWC_SIZE; /* TODO: Use mapping abstraction properly */
unsigned int offset_x, offset_y;
u16 x, y;
u8 x_offset, y_offset;
- u8 __iomem *dst;
- u8 __iomem *sig;
- const u8 *src;
-
- src = src_map.vaddr; /* TODO: Use mapping abstraction properly */
- dst = dst_map.vaddr_iomem; /* TODO: Use mapping abstraction properly */
- sig = dst + AST_HWC_SIZE; /* TODO: Use mapping abstraction properly */
/*
- * Do data transfer to HW cursor BO. If a new cursor image was installed,
- * point the scanout engine to dst_gbo's offset and page-flip the HWC buffers.
+ * Do data transfer to hardware buffer and point the scanout
+ * engine to the offset.
*/
- ast_update_cursor_image(dst, src, fb->width, fb->height);
-
- if (new_state->fb != old_state->fb) {
+ if (drm_atomic_helper_damage_merged(old_plane_state, plane_state, &damage)) {
+ ast_update_cursor_image(dst, src, fb->width, fb->height);
ast_set_cursor_base(ast, dst_off);
-
- ++ast_cursor_plane->next_hwc_index;
- ast_cursor_plane->next_hwc_index %= ARRAY_SIZE(ast_cursor_plane->hwc);
}
/*
* Update location in HWC signature and registers.
*/
- writel(new_state->crtc_x, sig + AST_HWC_SIGNATURE_X);
- writel(new_state->crtc_y, sig + AST_HWC_SIGNATURE_Y);
+ writel(plane_state->crtc_x, sig + AST_HWC_SIGNATURE_X);
+ writel(plane_state->crtc_y, sig + AST_HWC_SIGNATURE_Y);
offset_x = AST_MAX_HWC_WIDTH - fb->width;
offset_y = AST_MAX_HWC_HEIGHT - fb->height;
- if (new_state->crtc_x < 0) {
- x_offset = (-new_state->crtc_x) + offset_x;
+ if (plane_state->crtc_x < 0) {
+ x_offset = (-plane_state->crtc_x) + offset_x;
x = 0;
} else {
x_offset = offset_x;
- x = new_state->crtc_x;
+ x = plane_state->crtc_x;
}
- if (new_state->crtc_y < 0) {
- y_offset = (-new_state->crtc_y) + offset_y;
+ if (plane_state->crtc_y < 0) {
+ y_offset = (-plane_state->crtc_y) + offset_y;
y = 0;
} else {
y_offset = offset_y;
- y = new_state->crtc_y;
+ y = plane_state->crtc_y;
}
ast_set_cursor_location(ast, x, y, x_offset, y_offset);
@@ -875,9 +929,8 @@ ast_cursor_plane_helper_atomic_update(struct drm_plane *plane,
ast_set_cursor_enabled(ast, true);
}
-static void
-ast_cursor_plane_helper_atomic_disable(struct drm_plane *plane,
- struct drm_atomic_state *state)
+static void ast_cursor_plane_helper_atomic_disable(struct drm_plane *plane,
+ struct drm_atomic_state *state)
{
struct ast_private *ast = to_ast_private(plane->dev);
@@ -891,41 +944,22 @@ static const struct drm_plane_helper_funcs ast_cursor_plane_helper_funcs = {
.atomic_disable = ast_cursor_plane_helper_atomic_disable,
};
-static void ast_cursor_plane_destroy(struct drm_plane *plane)
-{
- struct ast_cursor_plane *ast_cursor_plane = to_ast_cursor_plane(plane);
- size_t i;
- struct drm_gem_vram_object *gbo;
- struct iosys_map map;
-
- for (i = 0; i < ARRAY_SIZE(ast_cursor_plane->hwc); ++i) {
- gbo = ast_cursor_plane->hwc[i].gbo;
- map = ast_cursor_plane->hwc[i].map;
- drm_gem_vram_vunmap(gbo, &map);
- drm_gem_vram_unpin(gbo);
- drm_gem_vram_put(gbo);
- }
-
- drm_plane_cleanup(plane);
-}
-
static const struct drm_plane_funcs ast_cursor_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
- .destroy = ast_cursor_plane_destroy,
+ .destroy = drm_plane_cleanup,
DRM_GEM_SHADOW_PLANE_FUNCS,
};
static int ast_cursor_plane_init(struct ast_private *ast)
{
struct drm_device *dev = &ast->base;
- struct ast_cursor_plane *ast_cursor_plane = &ast->cursor_plane;
+ struct ast_plane *ast_cursor_plane = &ast->cursor_plane;
struct drm_plane *cursor_plane = &ast_cursor_plane->base;
- size_t size, i;
- struct drm_gem_vram_object *gbo;
- struct iosys_map map;
+ size_t size;
+ void __iomem *vaddr;
+ u64 offset;
int ret;
- s64 off;
/*
* Allocate backing storage for cursors. The BOs are permanently
@@ -934,60 +968,26 @@ static int ast_cursor_plane_init(struct ast_private *ast)
size = roundup(AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE, PAGE_SIZE);
- for (i = 0; i < ARRAY_SIZE(ast_cursor_plane->hwc); ++i) {
- gbo = drm_gem_vram_create(dev, size, 0);
- if (IS_ERR(gbo)) {
- ret = PTR_ERR(gbo);
- goto err_hwc;
- }
- ret = drm_gem_vram_pin(gbo, DRM_GEM_VRAM_PL_FLAG_VRAM |
- DRM_GEM_VRAM_PL_FLAG_TOPDOWN);
- if (ret)
- goto err_drm_gem_vram_put;
- ret = drm_gem_vram_vmap(gbo, &map);
- if (ret)
- goto err_drm_gem_vram_unpin;
- off = drm_gem_vram_offset(gbo);
- if (off < 0) {
- ret = off;
- goto err_drm_gem_vram_vunmap;
- }
- ast_cursor_plane->hwc[i].gbo = gbo;
- ast_cursor_plane->hwc[i].map = map;
- ast_cursor_plane->hwc[i].off = off;
- }
+ if (ast->vram_fb_available < size)
+ return -ENOMEM;
- /*
- * Create the cursor plane. The plane's destroy callback will release
- * the backing storages' BO memory.
- */
+ vaddr = ast->vram + ast->vram_fb_available - size;
+ offset = ast->vram_base + ast->vram_fb_available - size;
- ret = drm_universal_plane_init(dev, cursor_plane, 0x01,
- &ast_cursor_plane_funcs,
- ast_cursor_plane_formats,
- ARRAY_SIZE(ast_cursor_plane_formats),
- NULL, DRM_PLANE_TYPE_CURSOR, NULL);
+ ret = ast_plane_init(dev, ast_cursor_plane, vaddr, offset, size,
+ 0x01, &ast_cursor_plane_funcs,
+ ast_cursor_plane_formats, ARRAY_SIZE(ast_cursor_plane_formats),
+ NULL, DRM_PLANE_TYPE_CURSOR);
if (ret) {
- drm_err(dev, "drm_universal_plane failed(): %d\n", ret);
- goto err_hwc;
+ drm_err(dev, "ast_plane_init() failed: %d\n", ret);
+ return ret;
}
drm_plane_helper_add(cursor_plane, &ast_cursor_plane_helper_funcs);
+ drm_plane_enable_fb_damage_clips(cursor_plane);
- return 0;
+ ast->vram_fb_available -= size;
-err_hwc:
- while (i) {
- --i;
- gbo = ast_cursor_plane->hwc[i].gbo;
- map = ast_cursor_plane->hwc[i].map;
-err_drm_gem_vram_vunmap:
- drm_gem_vram_vunmap(gbo, &map);
-err_drm_gem_vram_unpin:
- drm_gem_vram_unpin(gbo);
-err_drm_gem_vram_put:
- drm_gem_vram_put(gbo);
- }
- return ret;
+ return 0;
}
/*
@@ -1026,9 +1026,11 @@ static void ast_crtc_dpms(struct drm_crtc *crtc, int mode)
ast_set_color_reg(ast, format);
ast_set_vbios_color_reg(ast, format, vbios_mode_info);
+ if (crtc->state->gamma_lut)
+ ast_crtc_set_gamma(ast, format, crtc->state->gamma_lut->data);
+ else
+ ast_crtc_set_gamma_linear(ast, format);
}
-
- ast_crtc_load_lut(ast, crtc);
break;
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
@@ -1123,47 +1125,50 @@ static int ast_crtc_helper_atomic_check(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
+ struct drm_crtc_state *old_crtc_state = drm_atomic_get_old_crtc_state(state, crtc);
+ struct ast_crtc_state *old_ast_crtc_state = to_ast_crtc_state(old_crtc_state);
struct drm_device *dev = crtc->dev;
struct ast_crtc_state *ast_state;
const struct drm_format_info *format;
bool succ;
int ret;
- ret = drm_atomic_helper_check_crtc_state(crtc_state, false);
+ if (!crtc_state->enable)
+ return 0;
+
+ ret = drm_atomic_helper_check_crtc_primary_plane(crtc_state);
if (ret)
return ret;
- if (!crtc_state->enable)
- goto out;
-
ast_state = to_ast_crtc_state(crtc_state);
format = ast_state->format;
if (drm_WARN_ON_ONCE(dev, !format))
return -EINVAL; /* BUG: We didn't set format in primary check(). */
+ /*
+ * The gamma LUT has to be reloaded after changing the primary
+ * plane's color format.
+ */
+ if (old_ast_crtc_state->format != format)
+ crtc_state->color_mgmt_changed = true;
+
+ if (crtc_state->color_mgmt_changed && crtc_state->gamma_lut) {
+ if (crtc_state->gamma_lut->length !=
+ AST_LUT_SIZE * sizeof(struct drm_color_lut)) {
+ drm_err(dev, "Wrong size for gamma_lut %zu\n",
+ crtc_state->gamma_lut->length);
+ return -EINVAL;
+ }
+ }
+
succ = ast_get_vbios_mode_info(format, &crtc_state->mode,
&crtc_state->adjusted_mode,
&ast_state->vbios_mode_info);
if (!succ)
return -EINVAL;
-out:
- return drm_atomic_add_affected_planes(state, crtc);
-}
-
-static void ast_crtc_helper_atomic_begin(struct drm_crtc *crtc, struct drm_atomic_state *state)
-{
- struct drm_device *dev = crtc->dev;
- struct ast_private *ast = to_ast_private(dev);
-
- /*
- * Concurrent operations could possibly trigger a call to
- * drm_connector_helper_funcs.get_modes by trying to read the
- * display modes. Protect access to I/O registers by acquiring
- * the I/O-register lock. Released in atomic_flush().
- */
- mutex_lock(&ast->ioregs_lock);
+ return 0;
}
static void
@@ -1172,35 +1177,34 @@ ast_crtc_helper_atomic_flush(struct drm_crtc *crtc,
{
struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
crtc);
- struct drm_crtc_state *old_crtc_state = drm_atomic_get_old_crtc_state(state,
- crtc);
struct drm_device *dev = crtc->dev;
struct ast_private *ast = to_ast_private(dev);
struct ast_crtc_state *ast_crtc_state = to_ast_crtc_state(crtc_state);
- struct ast_crtc_state *old_ast_crtc_state = to_ast_crtc_state(old_crtc_state);
struct ast_vbios_mode_info *vbios_mode_info = &ast_crtc_state->vbios_mode_info;
/*
* The gamma LUT has to be reloaded after changing the primary
* plane's color format.
*/
- if (old_ast_crtc_state->format != ast_crtc_state->format)
- ast_crtc_load_lut(ast, crtc);
+ if (crtc_state->enable && crtc_state->color_mgmt_changed) {
+ if (crtc_state->gamma_lut)
+ ast_crtc_set_gamma(ast,
+ ast_crtc_state->format,
+ crtc_state->gamma_lut->data);
+ else
+ ast_crtc_set_gamma_linear(ast, ast_crtc_state->format);
+ }
//Set Aspeed Display-Port
if (ast->tx_chip_types & AST_TX_ASTDP_BIT)
ast_dp_set_mode(crtc, vbios_mode_info);
-
- mutex_unlock(&ast->ioregs_lock);
}
-static void
-ast_crtc_helper_atomic_enable(struct drm_crtc *crtc,
- struct drm_atomic_state *state)
+static void ast_crtc_helper_atomic_enable(struct drm_crtc *crtc, struct drm_atomic_state *state)
{
struct drm_device *dev = crtc->dev;
struct ast_private *ast = to_ast_private(dev);
- struct drm_crtc_state *crtc_state = crtc->state;
+ struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
struct ast_crtc_state *ast_crtc_state = to_ast_crtc_state(crtc_state);
struct ast_vbios_mode_info *vbios_mode_info =
&ast_crtc_state->vbios_mode_info;
@@ -1217,12 +1221,9 @@ ast_crtc_helper_atomic_enable(struct drm_crtc *crtc,
ast_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
}
-static void
-ast_crtc_helper_atomic_disable(struct drm_crtc *crtc,
- struct drm_atomic_state *state)
+static void ast_crtc_helper_atomic_disable(struct drm_crtc *crtc, struct drm_atomic_state *state)
{
- struct drm_crtc_state *old_crtc_state = drm_atomic_get_old_crtc_state(state,
- crtc);
+ struct drm_crtc_state *old_crtc_state = drm_atomic_get_old_crtc_state(state, crtc);
struct drm_device *dev = crtc->dev;
struct ast_private *ast = to_ast_private(dev);
@@ -1250,7 +1251,6 @@ ast_crtc_helper_atomic_disable(struct drm_crtc *crtc,
static const struct drm_crtc_helper_funcs ast_crtc_helper_funcs = {
.mode_valid = ast_crtc_helper_mode_valid,
.atomic_check = ast_crtc_helper_atomic_check,
- .atomic_begin = ast_crtc_helper_atomic_begin,
.atomic_flush = ast_crtc_helper_atomic_flush,
.atomic_enable = ast_crtc_helper_atomic_enable,
.atomic_disable = ast_crtc_helper_atomic_disable,
@@ -1317,13 +1317,15 @@ static int ast_crtc_init(struct drm_device *dev)
struct drm_crtc *crtc = &ast->crtc;
int ret;
- ret = drm_crtc_init_with_planes(dev, crtc, &ast->primary_plane,
+ ret = drm_crtc_init_with_planes(dev, crtc, &ast->primary_plane.base,
&ast->cursor_plane.base, &ast_crtc_funcs,
NULL);
if (ret)
return ret;
- drm_mode_crtc_set_gamma_size(crtc, 256);
+ drm_mode_crtc_set_gamma_size(crtc, AST_LUT_SIZE);
+ drm_crtc_enable_color_mgmt(crtc, 0, false, AST_LUT_SIZE);
+
drm_crtc_helper_add(crtc, &ast_crtc_helper_funcs);
return 0;
@@ -1718,13 +1720,46 @@ static int ast_astdp_output_init(struct ast_private *ast)
* Mode config
*/
+static void ast_mode_config_helper_atomic_commit_tail(struct drm_atomic_state *state)
+{
+ struct ast_private *ast = to_ast_private(state->dev);
+
+ /*
+ * Concurrent operations could possibly trigger a call to
+ * drm_connector_helper_funcs.get_modes by trying to read the
+ * display modes. Protect access to I/O registers by acquiring
+ * the I/O-register lock. Released in atomic_flush().
+ */
+ mutex_lock(&ast->ioregs_lock);
+ drm_atomic_helper_commit_tail_rpm(state);
+ mutex_unlock(&ast->ioregs_lock);
+}
+
static const struct drm_mode_config_helper_funcs ast_mode_config_helper_funcs = {
- .atomic_commit_tail = drm_atomic_helper_commit_tail_rpm,
+ .atomic_commit_tail = ast_mode_config_helper_atomic_commit_tail,
};
+static enum drm_mode_status ast_mode_config_mode_valid(struct drm_device *dev,
+ const struct drm_display_mode *mode)
+{
+ static const unsigned long max_bpp = 4; /* DRM_FORMAT_XRGB8888 */
+ struct ast_private *ast = to_ast_private(dev);
+ unsigned long fbsize, fbpages, max_fbpages;
+
+ max_fbpages = (ast->vram_fb_available) >> PAGE_SHIFT;
+
+ fbsize = mode->hdisplay * mode->vdisplay * max_bpp;
+ fbpages = DIV_ROUND_UP(fbsize, PAGE_SIZE);
+
+ if (fbpages > max_fbpages)
+ return MODE_MEM;
+
+ return MODE_OK;
+}
+
static const struct drm_mode_config_funcs ast_mode_config_funcs = {
- .fb_create = drm_gem_fb_create,
- .mode_valid = drm_vram_helper_mode_valid,
+ .fb_create = drm_gem_fb_create_with_dirty,
+ .mode_valid = ast_mode_config_mode_valid,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = drm_atomic_helper_commit,
};
@@ -1732,7 +1767,6 @@ static const struct drm_mode_config_funcs ast_mode_config_funcs = {
int ast_mode_config_init(struct ast_private *ast)
{
struct drm_device *dev = &ast->base;
- struct pci_dev *pdev = to_pci_dev(dev->dev);
int ret;
ret = drmm_mode_config_init(dev);
@@ -1743,8 +1777,6 @@ int ast_mode_config_init(struct ast_private *ast)
dev->mode_config.min_width = 0;
dev->mode_config.min_height = 0;
dev->mode_config.preferred_depth = 24;
- dev->mode_config.prefer_shadow = 1;
- dev->mode_config.fb_base = pci_resource_start(pdev, 0);
if (ast->chip == AST2100 ||
ast->chip == AST2200 ||
@@ -1761,7 +1793,6 @@ int ast_mode_config_init(struct ast_private *ast)
dev->mode_config.helper_private = &ast_mode_config_helper_funcs;
-
ret = ast_primary_plane_init(ast);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511.h b/drivers/gpu/drm/bridge/adv7511/adv7511.h
index 94de73cbeb2d..17445800248d 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511.h
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511.h
@@ -402,7 +402,8 @@ static inline int adv7511_cec_init(struct device *dev, struct adv7511 *adv7511)
void adv7533_dsi_power_on(struct adv7511 *adv);
void adv7533_dsi_power_off(struct adv7511 *adv);
-void adv7533_mode_set(struct adv7511 *adv, const struct drm_display_mode *mode);
+enum drm_mode_status adv7533_mode_valid(struct adv7511 *adv,
+ const struct drm_display_mode *mode);
int adv7533_patch_registers(struct adv7511 *adv);
int adv7533_patch_cec_registers(struct adv7511 *adv);
int adv7533_attach_dsi(struct adv7511 *adv);
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
index 6031bdd92342..e7a6e456ed0d 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
@@ -697,7 +697,7 @@ adv7511_detect(struct adv7511 *adv7511, struct drm_connector *connector)
}
static enum drm_mode_status adv7511_mode_valid(struct adv7511 *adv7511,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
if (mode->clock > 165000)
return MODE_CLOCK_HIGH;
@@ -791,9 +791,6 @@ static void adv7511_mode_set(struct adv7511 *adv7511,
regmap_update_bits(adv7511->regmap, 0x17,
0x60, (vsync_polarity << 6) | (hsync_polarity << 5));
- if (adv7511->type == ADV7533 || adv7511->type == ADV7535)
- adv7533_mode_set(adv7511, adj_mode);
-
drm_mode_copy(&adv7511->curr_mode, adj_mode);
/*
@@ -913,6 +910,18 @@ static void adv7511_bridge_mode_set(struct drm_bridge *bridge,
adv7511_mode_set(adv, mode, adj_mode);
}
+static enum drm_mode_status adv7511_bridge_mode_valid(struct drm_bridge *bridge,
+ const struct drm_display_info *info,
+ const struct drm_display_mode *mode)
+{
+ struct adv7511 *adv = bridge_to_adv7511(bridge);
+
+ if (adv->type == ADV7533 || adv->type == ADV7535)
+ return adv7533_mode_valid(adv, mode);
+ else
+ return adv7511_mode_valid(adv, mode);
+}
+
static int adv7511_bridge_attach(struct drm_bridge *bridge,
enum drm_bridge_attach_flags flags)
{
@@ -960,6 +969,7 @@ static const struct drm_bridge_funcs adv7511_bridge_funcs = {
.enable = adv7511_bridge_enable,
.disable = adv7511_bridge_disable,
.mode_set = adv7511_bridge_mode_set,
+ .mode_valid = adv7511_bridge_mode_valid,
.attach = adv7511_bridge_attach,
.detect = adv7511_bridge_detect,
.get_edid = adv7511_bridge_get_edid,
@@ -1209,10 +1219,8 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
return ret;
ret = adv7511_init_regulators(adv7511);
- if (ret) {
- dev_err(dev, "failed to init regulators\n");
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to init regulators\n");
/*
* The power down GPIO is optional. If present, toggle it from active to
@@ -1336,7 +1344,7 @@ uninit_regulators:
return ret;
}
-static int adv7511_remove(struct i2c_client *i2c)
+static void adv7511_remove(struct i2c_client *i2c)
{
struct adv7511 *adv7511 = i2c_get_clientdata(i2c);
@@ -1352,8 +1360,6 @@ static int adv7511_remove(struct i2c_client *i2c)
i2c_unregister_device(adv7511->i2c_packet);
i2c_unregister_device(adv7511->i2c_edid);
-
- return 0;
}
static const struct i2c_device_id adv7511_i2c_ids[] = {
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7533.c b/drivers/gpu/drm/bridge/adv7511/adv7533.c
index ef6270806d1d..fdfeadcefe80 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7533.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7533.c
@@ -100,26 +100,27 @@ void adv7533_dsi_power_off(struct adv7511 *adv)
regmap_write(adv->regmap_cec, 0x27, 0x0b);
}
-void adv7533_mode_set(struct adv7511 *adv, const struct drm_display_mode *mode)
+enum drm_mode_status adv7533_mode_valid(struct adv7511 *adv,
+ const struct drm_display_mode *mode)
{
+ int lanes;
struct mipi_dsi_device *dsi = adv->dsi;
- int lanes, ret;
-
- if (adv->num_dsi_lanes != 4)
- return;
if (mode->clock > 80000)
lanes = 4;
else
lanes = 3;
- if (lanes != dsi->lanes) {
- mipi_dsi_detach(dsi);
- dsi->lanes = lanes;
- ret = mipi_dsi_attach(dsi);
- if (ret)
- dev_err(&dsi->dev, "failed to change host lanes\n");
- }
+ /*
+ * TODO: add support for dynamic switching of lanes
+ * by using the bridge pre_enable() op . Till then filter
+ * out the modes which shall need different number of lanes
+ * than what was configured in the device tree.
+ */
+ if (lanes != dsi->lanes)
+ return MODE_BAD;
+
+ return MODE_OK;
}
int adv7533_patch_registers(struct adv7511 *adv)
@@ -148,16 +149,14 @@ int adv7533_attach_dsi(struct adv7511 *adv)
};
host = of_find_mipi_dsi_host_by_node(adv->host_node);
- if (!host) {
- dev_err(dev, "failed to find dsi host\n");
- return -EPROBE_DEFER;
- }
+ if (!host)
+ return dev_err_probe(dev, -EPROBE_DEFER,
+ "failed to find dsi host\n");
dsi = devm_mipi_dsi_device_register_full(dev, host, &info);
- if (IS_ERR(dsi)) {
- dev_err(dev, "failed to create dsi device\n");
- return PTR_ERR(dsi);
- }
+ if (IS_ERR(dsi))
+ return dev_err_probe(dev, PTR_ERR(dsi),
+ "failed to create dsi device\n");
adv->dsi = dsi;
@@ -167,10 +166,8 @@ int adv7533_attach_dsi(struct adv7511 *adv)
MIPI_DSI_MODE_NO_EOT_PACKET | MIPI_DSI_MODE_VIDEO_HSE;
ret = devm_mipi_dsi_attach(dev, dsi);
- if (ret < 0) {
- dev_err(dev, "failed to attach dsi to host\n");
- return ret;
- }
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "failed to attach dsi to host\n");
return 0;
}
diff --git a/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c b/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c
index ae3d6e9a606c..660a54857929 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c
@@ -787,7 +787,7 @@ err_unregister_i2c:
return err;
}
-static int anx6345_i2c_remove(struct i2c_client *client)
+static void anx6345_i2c_remove(struct i2c_client *client)
{
struct anx6345 *anx6345 = i2c_get_clientdata(client);
@@ -798,8 +798,6 @@ static int anx6345_i2c_remove(struct i2c_client *client)
kfree(anx6345->edid);
mutex_destroy(&anx6345->lock);
-
- return 0;
}
static const struct i2c_device_id anx6345_id[] = {
diff --git a/drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c b/drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
index d2fc8676fab6..5997049fde5b 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
@@ -1357,7 +1357,7 @@ err_unregister_i2c:
return err;
}
-static int anx78xx_i2c_remove(struct i2c_client *client)
+static void anx78xx_i2c_remove(struct i2c_client *client)
{
struct anx78xx *anx78xx = i2c_get_clientdata(client);
@@ -1366,8 +1366,6 @@ static int anx78xx_i2c_remove(struct i2c_client *client)
unregister_i2c_dummy_clients(anx78xx);
kfree(anx78xx->edid);
-
- return 0;
}
static const struct i2c_device_id anx78xx_id[] = {
diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
index 8aadcc0aa90b..df9370e0ff23 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
@@ -1864,12 +1864,6 @@ EXPORT_SYMBOL_GPL(analogix_dp_remove);
int analogix_dp_suspend(struct analogix_dp_device *dp)
{
clk_disable_unprepare(dp->clock);
-
- if (dp->plat_data->panel) {
- if (drm_panel_unprepare(dp->plat_data->panel))
- DRM_ERROR("failed to turnoff the panel\n");
- }
-
return 0;
}
EXPORT_SYMBOL_GPL(analogix_dp_suspend);
@@ -1884,13 +1878,6 @@ int analogix_dp_resume(struct analogix_dp_device *dp)
return ret;
}
- if (dp->plat_data->panel) {
- if (drm_panel_prepare(dp->plat_data->panel)) {
- DRM_ERROR("failed to setup the panel\n");
- return -EBUSY;
- }
- }
-
return 0;
}
EXPORT_SYMBOL_GPL(analogix_dp_resume);
diff --git a/drivers/gpu/drm/bridge/analogix/anx7625.c b/drivers/gpu/drm/bridge/analogix/anx7625.c
index 0c323b5a1c99..b0ff1ecb80a5 100644
--- a/drivers/gpu/drm/bridge/analogix/anx7625.c
+++ b/drivers/gpu/drm/bridge/analogix/anx7625.c
@@ -2715,7 +2715,7 @@ free_hdcp_wq:
return ret;
}
-static int anx7625_i2c_remove(struct i2c_client *client)
+static void anx7625_i2c_remove(struct i2c_client *client)
{
struct anx7625_data *platform = i2c_get_clientdata(client);
@@ -2735,8 +2735,6 @@ static int anx7625_i2c_remove(struct i2c_client *client)
if (platform->pdata.audio_en)
anx7625_unregister_audio(platform);
-
- return 0;
}
static const struct i2c_device_id anx7625_id[] = {
diff --git a/drivers/gpu/drm/bridge/chrontel-ch7033.c b/drivers/gpu/drm/bridge/chrontel-ch7033.c
index ba060277c3fd..b94f39a86846 100644
--- a/drivers/gpu/drm/bridge/chrontel-ch7033.c
+++ b/drivers/gpu/drm/bridge/chrontel-ch7033.c
@@ -583,14 +583,12 @@ static int ch7033_probe(struct i2c_client *client,
return 0;
}
-static int ch7033_remove(struct i2c_client *client)
+static void ch7033_remove(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct ch7033_priv *priv = dev_get_drvdata(dev);
drm_bridge_remove(&priv->bridge);
-
- return 0;
}
static const struct of_device_id ch7033_dt_ids[] = {
diff --git a/drivers/gpu/drm/bridge/cros-ec-anx7688.c b/drivers/gpu/drm/bridge/cros-ec-anx7688.c
index 0f6d907432e3..fa91bdeddef0 100644
--- a/drivers/gpu/drm/bridge/cros-ec-anx7688.c
+++ b/drivers/gpu/drm/bridge/cros-ec-anx7688.c
@@ -159,13 +159,11 @@ static int cros_ec_anx7688_bridge_probe(struct i2c_client *client)
return 0;
}
-static int cros_ec_anx7688_bridge_remove(struct i2c_client *client)
+static void cros_ec_anx7688_bridge_remove(struct i2c_client *client)
{
struct cros_ec_anx7688 *anx7688 = i2c_get_clientdata(client);
drm_bridge_remove(&anx7688->bridge);
-
- return 0;
}
static const struct of_device_id cros_ec_anx7688_bridge_match_table[] = {
diff --git a/drivers/gpu/drm/bridge/ite-it6505.c b/drivers/gpu/drm/bridge/ite-it6505.c
index 2767b70fa2cb..21a9b8422bda 100644
--- a/drivers/gpu/drm/bridge/ite-it6505.c
+++ b/drivers/gpu/drm/bridge/ite-it6505.c
@@ -412,6 +412,7 @@ struct it6505 {
* Mutex protects extcon and interrupt functions from interfering
* each other.
*/
+ struct mutex irq_lock;
struct mutex extcon_lock;
struct mutex mode_lock; /* used to bridge_detect */
struct mutex aux_lock; /* used to aux data transfers */
@@ -421,6 +422,7 @@ struct it6505 {
struct notifier_block event_nb;
struct extcon_dev *extcon;
struct work_struct extcon_wq;
+ int extcon_state;
enum drm_connector_status connector_status;
enum link_train_status link_state;
struct work_struct link_works;
@@ -439,7 +441,7 @@ struct it6505 {
enum hdcp_state hdcp_status;
struct delayed_work hdcp_work;
struct work_struct hdcp_wait_ksv_list;
- struct completion wait_edid_complete;
+ struct completion extcon_completion;
u8 auto_train_retry;
bool hdcp_desired;
bool is_repeater;
@@ -724,28 +726,6 @@ static void it6505_calc_video_info(struct it6505 *it6505)
DRM_MODE_ARG(&it6505->video_info));
}
-static int it6505_drm_dp_link_probe(struct drm_dp_aux *aux,
- struct it6505_drm_dp_link *link)
-{
- u8 values[3];
- int err;
-
- memset(link, 0, sizeof(*link));
-
- err = drm_dp_dpcd_read(aux, DP_DPCD_REV, values, sizeof(values));
- if (err < 0)
- return err;
-
- link->revision = values[0];
- link->rate = drm_dp_bw_code_to_link_rate(values[1]);
- link->num_lanes = values[2] & DP_MAX_LANE_COUNT_MASK;
-
- if (values[2] & DP_ENHANCED_FRAME_CAP)
- link->capabilities = DP_ENHANCED_FRAME_CAP;
-
- return 0;
-}
-
static int it6505_drm_dp_link_set_power(struct drm_dp_aux *aux,
struct it6505_drm_dp_link *link,
u8 mode)
@@ -1455,11 +1435,19 @@ static void it6505_parse_link_capabilities(struct it6505 *it6505)
int bcaps;
if (it6505->dpcd[0] == 0) {
- it6505_aux_on(it6505);
- it6505_get_dpcd(it6505, DP_DPCD_REV, it6505->dpcd,
- ARRAY_SIZE(it6505->dpcd));
+ dev_err(dev, "DPCD is not initialized");
+ return;
}
+ memset(link, 0, sizeof(*link));
+
+ link->revision = it6505->dpcd[0];
+ link->rate = drm_dp_bw_code_to_link_rate(it6505->dpcd[1]);
+ link->num_lanes = it6505->dpcd[2] & DP_MAX_LANE_COUNT_MASK;
+
+ if (it6505->dpcd[2] & DP_ENHANCED_FRAME_CAP)
+ link->capabilities = DP_ENHANCED_FRAME_CAP;
+
DRM_DEV_DEBUG_DRIVER(dev, "DPCD Rev.: %d.%d",
link->revision >> 4, link->revision & 0x0F);
@@ -2322,19 +2310,32 @@ static int it6505_process_hpd_irq(struct it6505 *it6505)
static void it6505_irq_hpd(struct it6505 *it6505)
{
struct device *dev = &it6505->client->dev;
+ int dp_sink_count;
it6505->hpd_state = it6505_get_sink_hpd_status(it6505);
DRM_DEV_DEBUG_DRIVER(dev, "hpd change interrupt, change to %s",
it6505->hpd_state ? "high" : "low");
- if (it6505->bridge.dev)
- drm_helper_hpd_irq_event(it6505->bridge.dev);
- DRM_DEV_DEBUG_DRIVER(dev, "it6505->sink_count: %d",
- it6505->sink_count);
-
if (it6505->hpd_state) {
- wait_for_completion_timeout(&it6505->wait_edid_complete,
- msecs_to_jiffies(6000));
+ wait_for_completion_timeout(&it6505->extcon_completion,
+ msecs_to_jiffies(1000));
+ it6505_aux_on(it6505);
+ if (it6505->dpcd[0] == 0) {
+ it6505_get_dpcd(it6505, DP_DPCD_REV, it6505->dpcd,
+ ARRAY_SIZE(it6505->dpcd));
+ it6505_variable_config(it6505);
+ it6505_parse_link_capabilities(it6505);
+ }
+ it6505->auto_train_retry = AUTO_TRAIN_RETRY;
+
+ it6505_drm_dp_link_set_power(&it6505->aux, &it6505->link,
+ DP_SET_POWER_D0);
+ dp_sink_count = it6505_dpcd_read(it6505, DP_SINK_COUNT);
+ it6505->sink_count = DP_GET_SINK_COUNT(dp_sink_count);
+
+ DRM_DEV_DEBUG_DRIVER(dev, "it6505->sink_count: %d",
+ it6505->sink_count);
+
it6505_lane_termination_on(it6505);
it6505_lane_power_on(it6505);
@@ -2362,6 +2363,9 @@ static void it6505_irq_hpd(struct it6505 *it6505)
it6505_lane_off(it6505);
it6505_link_reset_step_train(it6505);
}
+
+ if (it6505->bridge.dev)
+ drm_helper_hpd_irq_event(it6505->bridge.dev);
}
static void it6505_irq_hpd_irq(struct it6505 *it6505)
@@ -2490,8 +2494,7 @@ static irqreturn_t it6505_int_threaded_handler(int unused, void *data)
};
int int_status[3], i;
- msleep(100);
- mutex_lock(&it6505->extcon_lock);
+ mutex_lock(&it6505->irq_lock);
if (it6505->enable_drv_hold || !it6505->powered)
goto unlock;
@@ -2521,7 +2524,7 @@ static irqreturn_t it6505_int_threaded_handler(int unused, void *data)
}
unlock:
- mutex_unlock(&it6505->extcon_lock);
+ mutex_unlock(&it6505->irq_lock);
return IRQ_HANDLED;
}
@@ -2624,26 +2627,14 @@ static enum drm_connector_status it6505_detect(struct it6505 *it6505)
goto unlock;
if (it6505->enable_drv_hold) {
- status = it6505_get_sink_hpd_status(it6505) ?
- connector_status_connected :
- connector_status_disconnected;
+ status = it6505->hpd_state ? connector_status_connected :
+ connector_status_disconnected;
goto unlock;
}
- if (it6505_get_sink_hpd_status(it6505)) {
- it6505_aux_on(it6505);
- it6505_drm_dp_link_probe(&it6505->aux, &it6505->link);
+ if (it6505->hpd_state) {
it6505_drm_dp_link_set_power(&it6505->aux, &it6505->link,
DP_SET_POWER_D0);
- it6505->auto_train_retry = AUTO_TRAIN_RETRY;
-
- if (it6505->dpcd[0] == 0) {
- it6505_get_dpcd(it6505, DP_DPCD_REV, it6505->dpcd,
- ARRAY_SIZE(it6505->dpcd));
- it6505_variable_config(it6505);
- it6505_parse_link_capabilities(it6505);
- }
-
dp_sink_count = it6505_dpcd_read(it6505, DP_SINK_COUNT);
it6505->sink_count = DP_GET_SINK_COUNT(dp_sink_count);
DRM_DEV_DEBUG_DRIVER(dev, "it6505->sink_count:%d branch:%d",
@@ -2685,31 +2676,44 @@ static void it6505_extcon_work(struct work_struct *work)
{
struct it6505 *it6505 = container_of(work, struct it6505, extcon_wq);
struct device *dev = &it6505->client->dev;
- int state = extcon_get_state(it6505->extcon, EXTCON_DISP_DP);
- unsigned int pwroffretry = 0;
+ int state, ret;
if (it6505->enable_drv_hold)
return;
mutex_lock(&it6505->extcon_lock);
+ state = extcon_get_state(it6505->extcon, EXTCON_DISP_DP);
DRM_DEV_DEBUG_DRIVER(dev, "EXTCON_DISP_DP = 0x%02x", state);
- if (state > 0) {
+
+ if (state == it6505->extcon_state || unlikely(state < 0))
+ goto unlock;
+ it6505->extcon_state = state;
+ if (state) {
DRM_DEV_DEBUG_DRIVER(dev, "start to power on");
msleep(100);
- it6505_poweron(it6505);
+ ret = pm_runtime_get_sync(dev);
+
+ /*
+ * On system resume, extcon_work can be triggered before
+ * pm_runtime_force_resume re-enables runtime power management.
+ * Handling the error here to make sure the bridge is powered on.
+ */
+ if (ret < 0)
+ it6505_poweron(it6505);
+
+ complete_all(&it6505->extcon_completion);
} else {
DRM_DEV_DEBUG_DRIVER(dev, "start to power off");
- while (it6505_poweroff(it6505) && pwroffretry++ < 5) {
- DRM_DEV_DEBUG_DRIVER(dev, "power off fail %d times",
- pwroffretry);
- }
+ pm_runtime_put_sync(dev);
+ reinit_completion(&it6505->extcon_completion);
drm_helper_hpd_irq_event(it6505->bridge.dev);
memset(it6505->dpcd, 0, sizeof(it6505->dpcd));
DRM_DEV_DEBUG_DRIVER(dev, "power off it6505 success!");
}
+unlock:
mutex_unlock(&it6505->extcon_lock);
}
@@ -2860,10 +2864,7 @@ static int it6505_bridge_attach(struct drm_bridge *bridge,
}
/* Register aux channel */
- it6505->aux.name = "DP-AUX";
- it6505->aux.dev = dev;
it6505->aux.drm_dev = bridge->dev;
- it6505->aux.transfer = it6505_aux_transfer;
ret = drm_dp_aux_register(&it6505->aux);
@@ -2980,6 +2981,28 @@ static void it6505_bridge_atomic_disable(struct drm_bridge *bridge,
}
}
+static void it6505_bridge_atomic_pre_enable(struct drm_bridge *bridge,
+ struct drm_bridge_state *old_state)
+{
+ struct it6505 *it6505 = bridge_to_it6505(bridge);
+ struct device *dev = &it6505->client->dev;
+
+ DRM_DEV_DEBUG_DRIVER(dev, "start");
+
+ pm_runtime_get_sync(dev);
+}
+
+static void it6505_bridge_atomic_post_disable(struct drm_bridge *bridge,
+ struct drm_bridge_state *old_state)
+{
+ struct it6505 *it6505 = bridge_to_it6505(bridge);
+ struct device *dev = &it6505->client->dev;
+
+ DRM_DEV_DEBUG_DRIVER(dev, "start");
+
+ pm_runtime_put_sync(dev);
+}
+
static enum drm_connector_status
it6505_bridge_detect(struct drm_bridge *bridge)
{
@@ -3014,6 +3037,8 @@ static const struct drm_bridge_funcs it6505_bridge_funcs = {
.mode_valid = it6505_bridge_mode_valid,
.atomic_enable = it6505_bridge_atomic_enable,
.atomic_disable = it6505_bridge_atomic_disable,
+ .atomic_pre_enable = it6505_bridge_atomic_pre_enable,
+ .atomic_post_disable = it6505_bridge_atomic_post_disable,
.detect = it6505_bridge_detect,
.get_edid = it6505_bridge_get_edid,
};
@@ -3032,8 +3057,10 @@ static __maybe_unused int it6505_bridge_suspend(struct device *dev)
return it6505_poweroff(it6505);
}
-static SIMPLE_DEV_PM_OPS(it6505_bridge_pm_ops, it6505_bridge_suspend,
- it6505_bridge_resume);
+static const struct dev_pm_ops it6505_bridge_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume)
+ SET_RUNTIME_PM_OPS(it6505_bridge_suspend, it6505_bridge_resume, NULL)
+};
static int it6505_init_pdata(struct it6505 *it6505)
{
@@ -3250,6 +3277,7 @@ static int it6505_i2c_probe(struct i2c_client *client,
if (!it6505)
return -ENOMEM;
+ mutex_init(&it6505->irq_lock);
mutex_init(&it6505->extcon_lock);
mutex_init(&it6505->mode_lock);
mutex_init(&it6505->aux_lock);
@@ -3305,7 +3333,7 @@ static int it6505_i2c_probe(struct i2c_client *client,
INIT_WORK(&it6505->link_works, it6505_link_training_work);
INIT_WORK(&it6505->hdcp_wait_ksv_list, it6505_hdcp_wait_ksv_list);
INIT_DELAYED_WORK(&it6505->hdcp_work, it6505_hdcp_work);
- init_completion(&it6505->wait_edid_complete);
+ init_completion(&it6505->extcon_completion);
memset(it6505->dpcd, 0, sizeof(it6505->dpcd));
it6505->powered = false;
it6505->enable_drv_hold = DEFAULT_DRV_HOLD;
@@ -3315,6 +3343,12 @@ static int it6505_i2c_probe(struct i2c_client *client,
DRM_DEV_DEBUG_DRIVER(dev, "it6505 device name: %s", dev_name(dev));
debugfs_init(it6505);
+ pm_runtime_enable(dev);
+
+ it6505->aux.name = "DP-AUX";
+ it6505->aux.dev = dev;
+ it6505->aux.transfer = it6505_aux_transfer;
+ drm_dp_aux_init(&it6505->aux);
it6505->bridge.funcs = &it6505_bridge_funcs;
it6505->bridge.type = DRM_MODE_CONNECTOR_DisplayPort;
@@ -3325,7 +3359,7 @@ static int it6505_i2c_probe(struct i2c_client *client,
return 0;
}
-static int it6505_i2c_remove(struct i2c_client *client)
+static void it6505_i2c_remove(struct i2c_client *client)
{
struct it6505 *it6505 = i2c_get_clientdata(client);
@@ -3333,8 +3367,6 @@ static int it6505_i2c_remove(struct i2c_client *client)
drm_dp_aux_unregister(&it6505->aux);
it6505_debugfs_remove(it6505);
it6505_poweroff(it6505);
-
- return 0;
}
static const struct i2c_device_id it6505_id[] = {
diff --git a/drivers/gpu/drm/bridge/ite-it66121.c b/drivers/gpu/drm/bridge/ite-it66121.c
index 44278d54d35d..4f6f1deba28c 100644
--- a/drivers/gpu/drm/bridge/ite-it66121.c
+++ b/drivers/gpu/drm/bridge/ite-it66121.c
@@ -1623,15 +1623,13 @@ static int it66121_probe(struct i2c_client *client,
return 0;
}
-static int it66121_remove(struct i2c_client *client)
+static void it66121_remove(struct i2c_client *client)
{
struct it66121_ctx *ctx = i2c_get_clientdata(client);
ite66121_power_off(ctx);
drm_bridge_remove(&ctx->bridge);
mutex_destroy(&ctx->lock);
-
- return 0;
}
static const struct of_device_id it66121_dt_match[] = {
diff --git a/drivers/gpu/drm/bridge/lontium-lt8912b.c b/drivers/gpu/drm/bridge/lontium-lt8912b.c
index 28bad30dc4e5..a98efef0ba0e 100644
--- a/drivers/gpu/drm/bridge/lontium-lt8912b.c
+++ b/drivers/gpu/drm/bridge/lontium-lt8912b.c
@@ -165,30 +165,38 @@ static int lt8912_write_rxlogicres_config(struct lt8912 *lt)
return ret;
};
+/* enable LVDS output with some hardcoded configuration, not required for the HDMI output */
static int lt8912_write_lvds_config(struct lt8912 *lt)
{
const struct reg_sequence seq[] = {
+ // lvds power up
{0x44, 0x30},
{0x51, 0x05},
- {0x50, 0x24},
- {0x51, 0x2d},
- {0x52, 0x04},
- {0x69, 0x0e},
+
+ // core pll bypass
+ {0x50, 0x24}, // cp=50uA
+ {0x51, 0x2d}, // Pix_clk as reference, second order passive LPF PLL
+ {0x52, 0x04}, // loopdiv=0, use second-order PLL
+ {0x69, 0x0e}, // CP_PRESET_DIV_RATIO
{0x69, 0x8e},
{0x6a, 0x00},
- {0x6c, 0xb8},
+ {0x6c, 0xb8}, // RGD_CP_SOFT_K_EN,RGD_CP_SOFT_K[13:8]
{0x6b, 0x51},
- {0x04, 0xfb},
+
+ {0x04, 0xfb}, // core pll reset
{0x04, 0xff},
- {0x7f, 0x00},
- {0xa8, 0x13},
- {0x02, 0xf7},
+
+ // scaler bypass
+ {0x7f, 0x00}, // disable scaler
+ {0xa8, 0x13}, // 0x13: JEIDA, 0x33: VESA
+
+ {0x02, 0xf7}, // lvds pll reset
{0x02, 0xff},
{0x03, 0xcf},
{0x03, 0xff},
};
- return regmap_multi_reg_write(lt->regmap[I2C_CEC_DSI], seq, ARRAY_SIZE(seq));
+ return regmap_multi_reg_write(lt->regmap[I2C_MAIN], seq, ARRAY_SIZE(seq));
};
static inline struct lt8912 *bridge_to_lt8912(struct drm_bridge *b)
@@ -268,7 +276,7 @@ static int lt8912_video_setup(struct lt8912 *lt)
u32 hactive, h_total, hpw, hfp, hbp;
u32 vactive, v_total, vpw, vfp, vbp;
u8 settle = 0x08;
- int ret;
+ int ret, hsync_activehigh, vsync_activehigh;
if (!lt)
return -EINVAL;
@@ -278,12 +286,14 @@ static int lt8912_video_setup(struct lt8912 *lt)
hpw = lt->mode.hsync_len;
hbp = lt->mode.hback_porch;
h_total = hactive + hfp + hpw + hbp;
+ hsync_activehigh = lt->mode.flags & DISPLAY_FLAGS_HSYNC_HIGH;
vactive = lt->mode.vactive;
vfp = lt->mode.vfront_porch;
vpw = lt->mode.vsync_len;
vbp = lt->mode.vback_porch;
v_total = vactive + vfp + vpw + vbp;
+ vsync_activehigh = lt->mode.flags & DISPLAY_FLAGS_VSYNC_HIGH;
if (vactive <= 600)
settle = 0x04;
@@ -317,6 +327,13 @@ static int lt8912_video_setup(struct lt8912 *lt)
ret |= regmap_write(lt->regmap[I2C_CEC_DSI], 0x3e, hfp & 0xff);
ret |= regmap_write(lt->regmap[I2C_CEC_DSI], 0x3f, hfp >> 8);
+ ret |= regmap_update_bits(lt->regmap[I2C_MAIN], 0xab, BIT(0),
+ vsync_activehigh ? BIT(0) : 0);
+ ret |= regmap_update_bits(lt->regmap[I2C_MAIN], 0xab, BIT(1),
+ hsync_activehigh ? BIT(1) : 0);
+ ret |= regmap_update_bits(lt->regmap[I2C_MAIN], 0xb2, BIT(0),
+ lt->connector.display_info.is_hdmi ? BIT(0) : 0);
+
return ret;
}
@@ -714,7 +731,7 @@ err_dt_parse:
return ret;
}
-static int lt8912_remove(struct i2c_client *client)
+static void lt8912_remove(struct i2c_client *client)
{
struct lt8912 *lt = i2c_get_clientdata(client);
@@ -722,7 +739,6 @@ static int lt8912_remove(struct i2c_client *client)
drm_bridge_remove(&lt->bridge);
lt8912_free_i2c(lt);
lt8912_put_dt(lt);
- return 0;
}
static const struct of_device_id lt8912_dt_match[] = {
diff --git a/drivers/gpu/drm/bridge/lontium-lt9211.c b/drivers/gpu/drm/bridge/lontium-lt9211.c
index 9a3e90427d12..933ca028d612 100644
--- a/drivers/gpu/drm/bridge/lontium-lt9211.c
+++ b/drivers/gpu/drm/bridge/lontium-lt9211.c
@@ -766,13 +766,11 @@ static int lt9211_probe(struct i2c_client *client,
return ret;
}
-static int lt9211_remove(struct i2c_client *client)
+static void lt9211_remove(struct i2c_client *client)
{
struct lt9211 *ctx = i2c_get_clientdata(client);
drm_bridge_remove(&ctx->bridge);
-
- return 0;
}
static struct i2c_device_id lt9211_id[] = {
diff --git a/drivers/gpu/drm/bridge/lontium-lt9611.c b/drivers/gpu/drm/bridge/lontium-lt9611.c
index 5fccacc159f0..7c0a99173b39 100644
--- a/drivers/gpu/drm/bridge/lontium-lt9611.c
+++ b/drivers/gpu/drm/bridge/lontium-lt9611.c
@@ -1217,7 +1217,7 @@ err_of_put:
return ret;
}
-static int lt9611_remove(struct i2c_client *client)
+static void lt9611_remove(struct i2c_client *client)
{
struct lt9611 *lt9611 = i2c_get_clientdata(client);
@@ -1229,8 +1229,6 @@ static int lt9611_remove(struct i2c_client *client)
of_node_put(lt9611->dsi1_node);
of_node_put(lt9611->dsi0_node);
-
- return 0;
}
static struct i2c_device_id lt9611_id[] = {
diff --git a/drivers/gpu/drm/bridge/lontium-lt9611uxc.c b/drivers/gpu/drm/bridge/lontium-lt9611uxc.c
index fdf12d4c6416..fa1ee6264d92 100644
--- a/drivers/gpu/drm/bridge/lontium-lt9611uxc.c
+++ b/drivers/gpu/drm/bridge/lontium-lt9611uxc.c
@@ -978,7 +978,7 @@ err_of_put:
return ret;
}
-static int lt9611uxc_remove(struct i2c_client *client)
+static void lt9611uxc_remove(struct i2c_client *client)
{
struct lt9611uxc *lt9611uxc = i2c_get_clientdata(client);
@@ -993,8 +993,6 @@ static int lt9611uxc_remove(struct i2c_client *client)
of_node_put(lt9611uxc->dsi1_node);
of_node_put(lt9611uxc->dsi0_node);
-
- return 0;
}
static struct i2c_device_id lt9611uxc_id[] = {
diff --git a/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c b/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
index 72248a565579..97359f807bfc 100644
--- a/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
+++ b/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
@@ -357,11 +357,9 @@ static int stdp4028_ge_b850v3_fw_probe(struct i2c_client *stdp4028_i2c,
return ge_b850v3_register();
}
-static int stdp4028_ge_b850v3_fw_remove(struct i2c_client *stdp4028_i2c)
+static void stdp4028_ge_b850v3_fw_remove(struct i2c_client *stdp4028_i2c)
{
ge_b850v3_lvds_remove();
-
- return 0;
}
static const struct i2c_device_id stdp4028_ge_b850v3_fw_i2c_table[] = {
@@ -407,11 +405,9 @@ static int stdp2690_ge_b850v3_fw_probe(struct i2c_client *stdp2690_i2c,
return ge_b850v3_register();
}
-static int stdp2690_ge_b850v3_fw_remove(struct i2c_client *stdp2690_i2c)
+static void stdp2690_ge_b850v3_fw_remove(struct i2c_client *stdp2690_i2c)
{
ge_b850v3_lvds_remove();
-
- return 0;
}
static const struct i2c_device_id stdp2690_ge_b850v3_fw_i2c_table[] = {
diff --git a/drivers/gpu/drm/bridge/nxp-ptn3460.c b/drivers/gpu/drm/bridge/nxp-ptn3460.c
index 1ab91f4e057b..0851101a8c72 100644
--- a/drivers/gpu/drm/bridge/nxp-ptn3460.c
+++ b/drivers/gpu/drm/bridge/nxp-ptn3460.c
@@ -315,13 +315,11 @@ static int ptn3460_probe(struct i2c_client *client,
return 0;
}
-static int ptn3460_remove(struct i2c_client *client)
+static void ptn3460_remove(struct i2c_client *client)
{
struct ptn3460_bridge *ptn_bridge = i2c_get_clientdata(client);
drm_bridge_remove(&ptn_bridge->bridge);
-
- return 0;
}
static const struct i2c_device_id ptn3460_i2c_table[] = {
diff --git a/drivers/gpu/drm/bridge/parade-ps8622.c b/drivers/gpu/drm/bridge/parade-ps8622.c
index b5750e5f71d7..309de802863d 100644
--- a/drivers/gpu/drm/bridge/parade-ps8622.c
+++ b/drivers/gpu/drm/bridge/parade-ps8622.c
@@ -520,14 +520,12 @@ static int ps8622_probe(struct i2c_client *client,
return 0;
}
-static int ps8622_remove(struct i2c_client *client)
+static void ps8622_remove(struct i2c_client *client)
{
struct ps8622_bridge *ps8622 = i2c_get_clientdata(client);
backlight_device_unregister(ps8622->bl);
drm_bridge_remove(&ps8622->bridge);
-
- return 0;
}
static const struct i2c_device_id ps8622_i2c_table[] = {
diff --git a/drivers/gpu/drm/bridge/parade-ps8640.c b/drivers/gpu/drm/bridge/parade-ps8640.c
index d7483c13c569..5be6562c2a19 100644
--- a/drivers/gpu/drm/bridge/parade-ps8640.c
+++ b/drivers/gpu/drm/bridge/parade-ps8640.c
@@ -286,7 +286,6 @@ static ssize_t ps8640_aux_transfer_msg(struct drm_dp_aux *aux,
}
switch (data & SWAUX_STATUS_MASK) {
- /* Ignore the DEFER cases as they are already handled in hardware */
case SWAUX_STATUS_NACK:
case SWAUX_STATUS_I2C_NACK:
/*
@@ -303,6 +302,14 @@ static ssize_t ps8640_aux_transfer_msg(struct drm_dp_aux *aux,
case SWAUX_STATUS_ACKM:
len = data & SWAUX_M_MASK;
break;
+ case SWAUX_STATUS_DEFER:
+ case SWAUX_STATUS_I2C_DEFER:
+ if (is_native_aux)
+ msg->reply |= DP_AUX_NATIVE_REPLY_DEFER;
+ else
+ msg->reply |= DP_AUX_I2C_REPLY_DEFER;
+ len = data & SWAUX_M_MASK;
+ break;
case SWAUX_STATUS_INVALID:
return -EOPNOTSUPP;
case SWAUX_STATUS_TIMEOUT:
diff --git a/drivers/gpu/drm/bridge/sii902x.c b/drivers/gpu/drm/bridge/sii902x.c
index 7ab38d734ad6..878fb7d3732b 100644
--- a/drivers/gpu/drm/bridge/sii902x.c
+++ b/drivers/gpu/drm/bridge/sii902x.c
@@ -1145,7 +1145,7 @@ static int sii902x_probe(struct i2c_client *client,
return ret;
}
-static int sii902x_remove(struct i2c_client *client)
+static void sii902x_remove(struct i2c_client *client)
{
struct sii902x *sii902x = i2c_get_clientdata(client);
@@ -1154,8 +1154,6 @@ static int sii902x_remove(struct i2c_client *client)
drm_bridge_remove(&sii902x->bridge);
regulator_bulk_disable(ARRAY_SIZE(sii902x->supplies),
sii902x->supplies);
-
- return 0;
}
static const struct of_device_id sii902x_dt_ids[] = {
diff --git a/drivers/gpu/drm/bridge/sii9234.c b/drivers/gpu/drm/bridge/sii9234.c
index 15c98a7bd81c..5b3061d4b5c3 100644
--- a/drivers/gpu/drm/bridge/sii9234.c
+++ b/drivers/gpu/drm/bridge/sii9234.c
@@ -936,14 +936,12 @@ static int sii9234_probe(struct i2c_client *client,
return 0;
}
-static int sii9234_remove(struct i2c_client *client)
+static void sii9234_remove(struct i2c_client *client)
{
struct sii9234 *ctx = i2c_get_clientdata(client);
sii9234_cable_out(ctx);
drm_bridge_remove(&ctx->bridge);
-
- return 0;
}
static const struct of_device_id sii9234_dt_match[] = {
diff --git a/drivers/gpu/drm/bridge/sil-sii8620.c b/drivers/gpu/drm/bridge/sil-sii8620.c
index ab0bce4a988c..511982a1cedb 100644
--- a/drivers/gpu/drm/bridge/sil-sii8620.c
+++ b/drivers/gpu/drm/bridge/sil-sii8620.c
@@ -2346,7 +2346,7 @@ static int sii8620_probe(struct i2c_client *client,
return 0;
}
-static int sii8620_remove(struct i2c_client *client)
+static void sii8620_remove(struct i2c_client *client)
{
struct sii8620 *ctx = i2c_get_clientdata(client);
@@ -2360,8 +2360,6 @@ static int sii8620_remove(struct i2c_client *client)
sii8620_cable_out(ctx);
}
drm_bridge_remove(&ctx->bridge);
-
- return 0;
}
static const struct of_device_id sii8620_dt_match[] = {
diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c
index 89e060b273ef..2a58eb271f70 100644
--- a/drivers/gpu/drm/bridge/tc358767.c
+++ b/drivers/gpu/drm/bridge/tc358767.c
@@ -2184,13 +2184,11 @@ static int tc_probe(struct i2c_client *client, const struct i2c_device_id *id)
return 0;
}
-static int tc_remove(struct i2c_client *client)
+static void tc_remove(struct i2c_client *client)
{
struct tc_data *tc = i2c_get_clientdata(client);
drm_bridge_remove(&tc->bridge);
-
- return 0;
}
static const struct i2c_device_id tc358767_i2c_ids[] = {
diff --git a/drivers/gpu/drm/bridge/tc358768.c b/drivers/gpu/drm/bridge/tc358768.c
index fd585bf925fe..4c4b77ce8aba 100644
--- a/drivers/gpu/drm/bridge/tc358768.c
+++ b/drivers/gpu/drm/bridge/tc358768.c
@@ -1072,13 +1072,11 @@ static int tc358768_i2c_probe(struct i2c_client *client,
return mipi_dsi_host_register(&priv->dsi_host);
}
-static int tc358768_i2c_remove(struct i2c_client *client)
+static void tc358768_i2c_remove(struct i2c_client *client)
{
struct tc358768_priv *priv = i2c_get_clientdata(client);
mipi_dsi_host_unregister(&priv->dsi_host);
-
- return 0;
}
static struct i2c_driver tc358768_driver = {
diff --git a/drivers/gpu/drm/bridge/tc358775.c b/drivers/gpu/drm/bridge/tc358775.c
index f1c6e62b0e1d..3ceb0e9f9bdc 100644
--- a/drivers/gpu/drm/bridge/tc358775.c
+++ b/drivers/gpu/drm/bridge/tc358775.c
@@ -408,7 +408,7 @@ static void tc_bridge_enable(struct drm_bridge *bridge)
(val >> 8) & 0xFF, val & 0xFF);
d2l_write(tc->i2c, SYSRST, SYS_RST_REG | SYS_RST_DSIRX | SYS_RST_BM |
- SYS_RST_LCD | SYS_RST_I2CM | SYS_RST_I2CS);
+ SYS_RST_LCD | SYS_RST_I2CM);
usleep_range(30000, 40000);
d2l_write(tc->i2c, PPI_TX_RX_TA, TTA_GET | TTA_SURE);
@@ -704,13 +704,11 @@ err_bridge_remove:
return ret;
}
-static int tc_remove(struct i2c_client *client)
+static void tc_remove(struct i2c_client *client)
{
struct tc_data *tc = i2c_get_clientdata(client);
drm_bridge_remove(&tc->bridge);
-
- return 0;
}
static const struct i2c_device_id tc358775_i2c_ids[] = {
diff --git a/drivers/gpu/drm/bridge/ti-dlpc3433.c b/drivers/gpu/drm/bridge/ti-dlpc3433.c
index cef454862b67..186a9e2ff24d 100644
--- a/drivers/gpu/drm/bridge/ti-dlpc3433.c
+++ b/drivers/gpu/drm/bridge/ti-dlpc3433.c
@@ -379,14 +379,12 @@ err_remove_bridge:
return ret;
}
-static int dlpc3433_remove(struct i2c_client *client)
+static void dlpc3433_remove(struct i2c_client *client)
{
struct dlpc *dlpc = i2c_get_clientdata(client);
drm_bridge_remove(&dlpc->bridge);
of_node_put(dlpc->host_node);
-
- return 0;
}
static const struct i2c_device_id dlpc3433_id[] = {
diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi83.c b/drivers/gpu/drm/bridge/ti-sn65dsi83.c
index 14e7aa77e758..7ba9467fff12 100644
--- a/drivers/gpu/drm/bridge/ti-sn65dsi83.c
+++ b/drivers/gpu/drm/bridge/ti-sn65dsi83.c
@@ -708,13 +708,11 @@ err_remove_bridge:
return ret;
}
-static int sn65dsi83_remove(struct i2c_client *client)
+static void sn65dsi83_remove(struct i2c_client *client)
{
struct sn65dsi83 *ctx = i2c_get_clientdata(client);
drm_bridge_remove(&ctx->bridge);
-
- return 0;
}
static struct i2c_device_id sn65dsi83_id[] = {
diff --git a/drivers/gpu/drm/bridge/ti-tfp410.c b/drivers/gpu/drm/bridge/ti-tfp410.c
index 401fe61217c7..b9635abbad16 100644
--- a/drivers/gpu/drm/bridge/ti-tfp410.c
+++ b/drivers/gpu/drm/bridge/ti-tfp410.c
@@ -394,11 +394,9 @@ static int tfp410_i2c_probe(struct i2c_client *client,
return tfp410_init(&client->dev, true);
}
-static int tfp410_i2c_remove(struct i2c_client *client)
+static void tfp410_i2c_remove(struct i2c_client *client)
{
tfp410_fini(&client->dev);
-
- return 0;
}
static const struct i2c_device_id tfp410_i2c_ids[] = {
diff --git a/drivers/gpu/drm/display/Makefile b/drivers/gpu/drm/display/Makefile
index 52cdda1180d9..17ac4a1006a8 100644
--- a/drivers/gpu/drm/display/Makefile
+++ b/drivers/gpu/drm/display/Makefile
@@ -3,13 +3,15 @@
obj-$(CONFIG_DRM_DP_AUX_BUS) += drm_dp_aux_bus.o
drm_display_helper-y := drm_display_helper_mod.o
-drm_display_helper-$(CONFIG_DRM_DISPLAY_DP_HELPER) += drm_dp_dual_mode_helper.o \
- drm_dp_helper.o \
- drm_dp_mst_topology.o \
- drm_dsc_helper.o
+drm_display_helper-$(CONFIG_DRM_DISPLAY_DP_HELPER) += \
+ drm_dp_dual_mode_helper.o \
+ drm_dp_helper.o \
+ drm_dp_mst_topology.o \
+ drm_dsc_helper.o
drm_display_helper-$(CONFIG_DRM_DISPLAY_HDCP_HELPER) += drm_hdcp_helper.o
-drm_display_helper-$(CONFIG_DRM_DISPLAY_HDMI_HELPER) += drm_hdmi_helper.o \
- drm_scdc_helper.o
+drm_display_helper-$(CONFIG_DRM_DISPLAY_HDMI_HELPER) += \
+ drm_hdmi_helper.o \
+ drm_scdc_helper.o
drm_display_helper-$(CONFIG_DRM_DP_AUX_CHARDEV) += drm_dp_aux_dev.o
drm_display_helper-$(CONFIG_DRM_DP_CEC) += drm_dp_cec.o
diff --git a/drivers/gpu/drm/display/drm_dp_helper.c b/drivers/gpu/drm/display/drm_dp_helper.c
index 92990a3d577a..16565a0a5da6 100644
--- a/drivers/gpu/drm/display/drm_dp_helper.c
+++ b/drivers/gpu/drm/display/drm_dp_helper.c
@@ -30,6 +30,7 @@
#include <linux/sched.h>
#include <linux/seq_file.h>
#include <linux/string_helpers.h>
+#include <linux/dynamic_debug.h>
#include <drm/display/drm_dp_helper.h>
#include <drm/display/drm_dp_mst_helper.h>
@@ -40,6 +41,18 @@
#include "drm_dp_helper_internal.h"
+DECLARE_DYNDBG_CLASSMAP(drm_debug_classes, DD_CLASS_TYPE_DISJOINT_BITS, 0,
+ "DRM_UT_CORE",
+ "DRM_UT_DRIVER",
+ "DRM_UT_KMS",
+ "DRM_UT_PRIME",
+ "DRM_UT_ATOMIC",
+ "DRM_UT_VBL",
+ "DRM_UT_STATE",
+ "DRM_UT_LEASE",
+ "DRM_UT_DP",
+ "DRM_UT_DRMRES");
+
struct dp_aux_backlight {
struct backlight_device *base;
struct drm_dp_aux *aux;
@@ -2670,17 +2683,8 @@ int drm_dp_set_phy_test_pattern(struct drm_dp_aux *aux,
struct drm_dp_phy_test_params *data, u8 dp_rev)
{
int err, i;
- u8 link_config[2];
u8 test_pattern;
- link_config[0] = drm_dp_link_rate_to_bw_code(data->link_rate);
- link_config[1] = data->num_lanes;
- if (data->enhanced_frame_cap)
- link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
- err = drm_dp_dpcd_write(aux, DP_LINK_BW_SET, link_config, 2);
- if (err < 0)
- return err;
-
test_pattern = data->phy_pattern;
if (dp_rev < 0x12) {
test_pattern = (test_pattern << 2) &
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 98cc3137c062..1a586b3c454b 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -924,59 +924,35 @@ int drm_atomic_helper_check_plane_state(struct drm_plane_state *plane_state,
EXPORT_SYMBOL(drm_atomic_helper_check_plane_state);
/**
- * drm_atomic_helper_check_crtc_state() - Check CRTC state for validity
+ * drm_atomic_helper_check_crtc_primary_plane() - Check CRTC state for primary plane
* @crtc_state: CRTC state to check
- * @can_disable_primary_planes: can the CRTC be enabled without a primary plane?
*
- * Checks that a desired CRTC update is valid. Drivers that provide
- * their own CRTC handling rather than helper-provided implementations may
- * still wish to call this function to avoid duplication of error checking
- * code.
- *
- * Note that @can_disable_primary_planes only tests if the CRTC can be
- * enabled without a primary plane. To test if a primary plane can be updated
- * without a CRTC, use drm_atomic_helper_check_plane_state() in the plane's
- * atomic check.
+ * Checks that a CRTC has at least one primary plane attached to it, which is
+ * a requirement on some hardware. Note that this only involves the CRTC side
+ * of the test. To test if the primary plane is visible or if it can be updated
+ * without the CRTC being enabled, use drm_atomic_helper_check_plane_state() in
+ * the plane's atomic check.
*
* RETURNS:
- * Zero if update appears valid, error code on failure
+ * 0 if a primary plane is attached to the CRTC, or an error code otherwise
*/
-int drm_atomic_helper_check_crtc_state(struct drm_crtc_state *crtc_state,
- bool can_disable_primary_planes)
+int drm_atomic_helper_check_crtc_primary_plane(struct drm_crtc_state *crtc_state)
{
- struct drm_device *dev = crtc_state->crtc->dev;
- struct drm_atomic_state *state = crtc_state->state;
-
- if (!crtc_state->enable)
- return 0;
+ struct drm_crtc *crtc = crtc_state->crtc;
+ struct drm_device *dev = crtc->dev;
+ struct drm_plane *plane;
/* needs at least one primary plane to be enabled */
- if (!can_disable_primary_planes) {
- bool has_primary_plane = false;
- struct drm_plane *plane;
-
- drm_for_each_plane_mask(plane, dev, crtc_state->plane_mask) {
- struct drm_plane_state *plane_state;
-
- if (plane->type != DRM_PLANE_TYPE_PRIMARY)
- continue;
- plane_state = drm_atomic_get_plane_state(state, plane);
- if (IS_ERR(plane_state))
- return PTR_ERR(plane_state);
- if (plane_state->fb && plane_state->crtc) {
- has_primary_plane = true;
- break;
- }
- }
- if (!has_primary_plane) {
- drm_dbg_kms(dev, "Cannot enable CRTC without a primary plane.\n");
- return -EINVAL;
- }
+ drm_for_each_plane_mask(plane, dev, crtc_state->plane_mask) {
+ if (plane->type == DRM_PLANE_TYPE_PRIMARY)
+ return 0;
}
- return 0;
+ drm_dbg_atomic(dev, "[CRTC:%d:%s] primary plane missing\n", crtc->base.id, crtc->name);
+
+ return -EINVAL;
}
-EXPORT_SYMBOL(drm_atomic_helper_check_crtc_state);
+EXPORT_SYMBOL(drm_atomic_helper_check_crtc_primary_plane);
/**
* drm_atomic_helper_check_planes - validate state object for planes changes
diff --git a/drivers/gpu/drm/drm_atomic_state_helper.c b/drivers/gpu/drm/drm_atomic_state_helper.c
index bf31b9d92094..dfb57217253b 100644
--- a/drivers/gpu/drm/drm_atomic_state_helper.c
+++ b/drivers/gpu/drm/drm_atomic_state_helper.c
@@ -464,12 +464,12 @@ void drm_atomic_helper_connector_reset(struct drm_connector *connector)
EXPORT_SYMBOL(drm_atomic_helper_connector_reset);
/**
- * drm_atomic_helper_connector_tv_reset - Resets TV connector properties
+ * drm_atomic_helper_connector_tv_margins_reset - Resets TV connector properties
* @connector: DRM connector
*
* Resets the TV-related properties attached to a connector.
*/
-void drm_atomic_helper_connector_tv_reset(struct drm_connector *connector)
+void drm_atomic_helper_connector_tv_margins_reset(struct drm_connector *connector)
{
struct drm_cmdline_mode *cmdline = &connector->cmdline_mode;
struct drm_connector_state *state = connector->state;
@@ -479,7 +479,7 @@ void drm_atomic_helper_connector_tv_reset(struct drm_connector *connector)
state->tv.margins.top = cmdline->tv_margins.top;
state->tv.margins.bottom = cmdline->tv_margins.bottom;
}
-EXPORT_SYMBOL(drm_atomic_helper_connector_tv_reset);
+EXPORT_SYMBOL(drm_atomic_helper_connector_tv_margins_reset);
/**
* __drm_atomic_helper_connector_duplicate_state - copy atomic connector state
diff --git a/drivers/gpu/drm/drm_atomic_uapi.c b/drivers/gpu/drm/drm_atomic_uapi.c
index 79730fa1dd8e..c06d0639d552 100644
--- a/drivers/gpu/drm/drm_atomic_uapi.c
+++ b/drivers/gpu/drm/drm_atomic_uapi.c
@@ -687,6 +687,8 @@ static int drm_atomic_connector_set_property(struct drm_connector *connector,
*/
return -EINVAL;
} else if (property == config->tv_select_subconnector_property) {
+ state->tv.select_subconnector = val;
+ } else if (property == config->tv_subconnector_property) {
state->tv.subconnector = val;
} else if (property == config->tv_left_margin_property) {
state->tv.margins.left = val;
@@ -795,6 +797,8 @@ drm_atomic_connector_get_property(struct drm_connector *connector,
else
*val = connector->dpms;
} else if (property == config->tv_select_subconnector_property) {
+ *val = state->tv.select_subconnector;
+ } else if (property == config->tv_subconnector_property) {
*val = state->tv.subconnector;
} else if (property == config->tv_left_margin_property) {
*val = state->tv.margins.left;
diff --git a/drivers/gpu/drm/drm_client.c b/drivers/gpu/drm/drm_client.c
index 2b230b4d6942..fd67efe37c63 100644
--- a/drivers/gpu/drm/drm_client.c
+++ b/drivers/gpu/drm/drm_client.c
@@ -235,10 +235,10 @@ static void drm_client_buffer_delete(struct drm_client_buffer *buffer)
{
struct drm_device *dev = buffer->client->dev;
- drm_gem_vunmap(buffer->gem, &buffer->map);
-
- if (buffer->gem)
+ if (buffer->gem) {
+ drm_gem_vunmap_unlocked(buffer->gem, &buffer->map);
drm_gem_object_put(buffer->gem);
+ }
if (buffer->handle)
drm_mode_destroy_dumb(dev, buffer->handle, buffer->client->file);
@@ -323,7 +323,7 @@ drm_client_buffer_vmap(struct drm_client_buffer *buffer,
* fd_install step out of the driver backend hooks, to make that
* final step optional for internal users.
*/
- ret = drm_gem_vmap(buffer->gem, map);
+ ret = drm_gem_vmap_unlocked(buffer->gem, map);
if (ret)
return ret;
@@ -345,7 +345,7 @@ void drm_client_buffer_vunmap(struct drm_client_buffer *buffer)
{
struct iosys_map *map = &buffer->map;
- drm_gem_vunmap(buffer->gem, map);
+ drm_gem_vunmap_unlocked(buffer->gem, map);
}
EXPORT_SYMBOL(drm_client_buffer_vunmap);
diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c
index e3142c8142b3..547356e00341 100644
--- a/drivers/gpu/drm/drm_connector.c
+++ b/drivers/gpu/drm/drm_connector.c
@@ -274,6 +274,7 @@ static int __drm_connector_init(struct drm_device *dev,
INIT_LIST_HEAD(&connector->probed_modes);
INIT_LIST_HEAD(&connector->modes);
mutex_init(&connector->mutex);
+ mutex_init(&connector->edid_override_mutex);
connector->edid_blob_ptr = NULL;
connector->epoch_counter = 0;
connector->tile_blob_ptr = NULL;
@@ -435,7 +436,7 @@ int drmm_connector_init(struct drm_device *dev,
if (drm_WARN_ON(dev, funcs && funcs->destroy))
return -EINVAL;
- ret = __drm_connector_init(dev, connector, funcs, connector_type, NULL);
+ ret = __drm_connector_init(dev, connector, funcs, connector_type, ddc);
if (ret)
return ret;
@@ -582,6 +583,9 @@ void drm_connector_cleanup(struct drm_connector *connector)
mutex_destroy(&connector->mutex);
memset(connector, 0, sizeof(*connector));
+
+ if (dev->registered)
+ drm_sysfs_hotplug_event(dev);
}
EXPORT_SYMBOL(drm_connector_cleanup);
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 457448cc60f7..f5fb22e0d033 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -32,6 +32,7 @@
#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/moduleparam.h>
+#include <linux/dynamic_debug.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
@@ -50,6 +51,18 @@
#include "drm_crtc_helper_internal.h"
+DECLARE_DYNDBG_CLASSMAP(drm_debug_classes, DD_CLASS_TYPE_DISJOINT_BITS, 0,
+ "DRM_UT_CORE",
+ "DRM_UT_DRIVER",
+ "DRM_UT_KMS",
+ "DRM_UT_PRIME",
+ "DRM_UT_ATOMIC",
+ "DRM_UT_VBL",
+ "DRM_UT_STATE",
+ "DRM_UT_LEASE",
+ "DRM_UT_DP",
+ "DRM_UT_DRMRES");
+
/**
* DOC: overview
*
@@ -421,6 +434,32 @@ done:
}
EXPORT_SYMBOL(drm_crtc_helper_set_mode);
+/**
+ * drm_crtc_helper_atomic_check() - Helper to check CRTC atomic-state
+ * @crtc: CRTC to check
+ * @state: atomic state object
+ *
+ * Provides a default CRTC-state check handler for CRTCs that only have
+ * one primary plane attached to it.
+ *
+ * This is often the case for the CRTC of simple framebuffers. See also
+ * drm_plane_helper_atomic_check() for the respective plane-state check
+ * helper function.
+ *
+ * RETURNS:
+ * Zero on success, or an errno code otherwise.
+ */
+int drm_crtc_helper_atomic_check(struct drm_crtc *crtc, struct drm_atomic_state *state)
+{
+ struct drm_crtc_state *new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
+
+ if (!new_crtc_state->enable)
+ return 0;
+
+ return drm_atomic_helper_check_crtc_primary_plane(new_crtc_state);
+}
+EXPORT_SYMBOL(drm_crtc_helper_atomic_check);
+
static void
drm_crtc_helper_disable(struct drm_crtc *crtc)
{
diff --git a/drivers/gpu/drm/drm_crtc_internal.h b/drivers/gpu/drm/drm_crtc_internal.h
index 56041b604881..501a10edd0e1 100644
--- a/drivers/gpu/drm/drm_crtc_internal.h
+++ b/drivers/gpu/drm/drm_crtc_internal.h
@@ -56,9 +56,10 @@ struct drm_plane;
struct drm_plane_state;
struct drm_property;
struct edid;
+struct fwnode_handle;
struct kref;
+struct seq_file;
struct work_struct;
-struct fwnode_handle;
/* drm_crtc.c */
int drm_mode_crtc_set_obj_prop(struct drm_mode_object *obj,
@@ -286,5 +287,17 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
/* drm_edid.c */
void drm_mode_fixup_1366x768(struct drm_display_mode *mode);
+int drm_edid_override_show(struct drm_connector *connector, struct seq_file *m);
int drm_edid_override_set(struct drm_connector *connector, const void *edid, size_t size);
int drm_edid_override_reset(struct drm_connector *connector);
+
+/* drm_edid_load.c */
+#ifdef CONFIG_DRM_LOAD_EDID_FIRMWARE
+const struct drm_edid *drm_edid_load_firmware(struct drm_connector *connector);
+#else
+static inline const struct drm_edid *
+drm_edid_load_firmware(struct drm_connector *connector)
+{
+ return ERR_PTR(-ENOENT);
+}
+#endif
diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c
index 493922069c90..ee445f4605ba 100644
--- a/drivers/gpu/drm/drm_debugfs.c
+++ b/drivers/gpu/drm/drm_debugfs.c
@@ -328,13 +328,7 @@ static ssize_t connector_write(struct file *file, const char __user *ubuf,
static int edid_show(struct seq_file *m, void *data)
{
- struct drm_connector *connector = m->private;
- struct drm_property_blob *edid = connector->edid_blob_ptr;
-
- if (connector->override_edid && edid)
- seq_write(m, edid->data, edid->length);
-
- return 0;
+ return drm_edid_override_show(m->private, m);
}
static int edid_open(struct inode *inode, struct file *file)
@@ -377,8 +371,8 @@ static int vrr_range_show(struct seq_file *m, void *data)
if (connector->status != connector_status_connected)
return -ENODEV;
- seq_printf(m, "Min: %u\n", (u8)connector->display_info.monitor_range.min_vfreq);
- seq_printf(m, "Max: %u\n", (u8)connector->display_info.monitor_range.max_vfreq);
+ seq_printf(m, "Min: %u\n", connector->display_info.monitor_range.min_vfreq);
+ seq_printf(m, "Max: %u\n", connector->display_info.monitor_range.max_vfreq);
return 0;
}
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 90a5e26eafa8..b2d61c05f559 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -1572,15 +1572,6 @@ struct drm_edid {
const struct edid *edid;
};
-static bool version_greater(const struct drm_edid *drm_edid,
- u8 version, u8 revision)
-{
- const struct edid *edid = drm_edid->edid;
-
- return edid->version > version ||
- (edid->version == version && edid->revision > revision);
-}
-
static int edid_hfeeodb_extension_block_count(const struct edid *edid);
static int edid_hfeeodb_block_count(const struct edid *edid)
@@ -1622,7 +1613,8 @@ static const void *edid_extension_block_data(const struct edid *edid, int index)
return edid_block_data(edid, index + 1);
}
-static int drm_edid_block_count(const struct drm_edid *drm_edid)
+/* EDID block count indicated in EDID, may exceed allocated size */
+static int __drm_edid_block_count(const struct drm_edid *drm_edid)
{
int num_blocks;
@@ -1642,12 +1634,18 @@ static int drm_edid_block_count(const struct drm_edid *drm_edid)
num_blocks = eeodb;
}
- /* Limit by allocated size */
- num_blocks = min(num_blocks, (int)drm_edid->size / EDID_LENGTH);
-
return num_blocks;
}
+/* EDID block count, limited by allocated size */
+static int drm_edid_block_count(const struct drm_edid *drm_edid)
+{
+ /* Limit by allocated size */
+ return min(__drm_edid_block_count(drm_edid),
+ (int)drm_edid->size / EDID_LENGTH);
+}
+
+/* EDID extension block count, limited by allocated size */
static int drm_edid_extension_block_count(const struct drm_edid *drm_edid)
{
return drm_edid_block_count(drm_edid) - 1;
@@ -1981,7 +1979,7 @@ bool drm_edid_block_valid(u8 *_block, int block_num, bool print_bad_edid,
status = edid_block_check(block, is_base_block);
if (status == EDID_BLOCK_HEADER_REPAIR) {
- DRM_DEBUG("Fixing EDID header, your hardware may be failing\n");
+ DRM_DEBUG_KMS("Fixing EDID header, your hardware may be failing\n");
edid_header_fix(block);
/* Retry with fixed header, update status if that worked. */
@@ -2042,6 +2040,36 @@ bool drm_edid_is_valid(struct edid *edid)
}
EXPORT_SYMBOL(drm_edid_is_valid);
+/**
+ * drm_edid_valid - sanity check EDID data
+ * @drm_edid: EDID data
+ *
+ * Sanity check an EDID. Cross check block count against allocated size and
+ * checksum the blocks.
+ *
+ * Return: True if the EDID data is valid, false otherwise.
+ */
+bool drm_edid_valid(const struct drm_edid *drm_edid)
+{
+ int i;
+
+ if (!drm_edid)
+ return false;
+
+ if (edid_size_by_blocks(__drm_edid_block_count(drm_edid)) != drm_edid->size)
+ return false;
+
+ for (i = 0; i < drm_edid_block_count(drm_edid); i++) {
+ const void *block = drm_edid_block_data(drm_edid, i);
+
+ if (!edid_block_valid(block, i == 0))
+ return false;
+ }
+
+ return true;
+}
+EXPORT_SYMBOL(drm_edid_valid);
+
static struct edid *edid_filter_invalid_blocks(struct edid *edid,
size_t *alloc_size)
{
@@ -2168,58 +2196,91 @@ static void connector_bad_edid(struct drm_connector *connector,
if (connector->bad_edid_counter++ && !drm_debug_enabled(DRM_UT_KMS))
return;
- drm_dbg_kms(connector->dev, "%s: EDID is invalid:\n", connector->name);
+ drm_dbg_kms(connector->dev, "[CONNECTOR:%d:%s] EDID is invalid:\n",
+ connector->base.id, connector->name);
for (i = 0; i < num_blocks; i++)
edid_block_dump(KERN_DEBUG, edid + i, i);
}
/* Get override or firmware EDID */
-static struct edid *drm_get_override_edid(struct drm_connector *connector,
- size_t *alloc_size)
+static const struct drm_edid *drm_edid_override_get(struct drm_connector *connector)
{
- struct edid *override = NULL;
+ const struct drm_edid *override = NULL;
- if (connector->override_edid)
- override = drm_edid_duplicate(connector->edid_blob_ptr->data);
+ mutex_lock(&connector->edid_override_mutex);
- if (!override)
- override = drm_load_edid_firmware(connector);
+ if (connector->edid_override)
+ override = drm_edid_dup(connector->edid_override);
- /* FIXME: Get alloc size from deeper down the stack */
- if (!IS_ERR_OR_NULL(override) && alloc_size)
- *alloc_size = edid_size(override);
+ mutex_unlock(&connector->edid_override_mutex);
+
+ if (!override)
+ override = drm_edid_load_firmware(connector);
return IS_ERR(override) ? NULL : override;
}
/* For debugfs edid_override implementation */
+int drm_edid_override_show(struct drm_connector *connector, struct seq_file *m)
+{
+ const struct drm_edid *drm_edid;
+
+ mutex_lock(&connector->edid_override_mutex);
+
+ drm_edid = connector->edid_override;
+ if (drm_edid)
+ seq_write(m, drm_edid->edid, drm_edid->size);
+
+ mutex_unlock(&connector->edid_override_mutex);
+
+ return 0;
+}
+
+/* For debugfs edid_override implementation */
int drm_edid_override_set(struct drm_connector *connector, const void *edid,
size_t size)
{
- int ret;
+ const struct drm_edid *drm_edid;
- if (size < EDID_LENGTH || edid_size(edid) > size)
+ drm_edid = drm_edid_alloc(edid, size);
+ if (!drm_edid_valid(drm_edid)) {
+ drm_dbg_kms(connector->dev, "[CONNECTOR:%d:%s] EDID override invalid\n",
+ connector->base.id, connector->name);
+ drm_edid_free(drm_edid);
return -EINVAL;
+ }
- connector->override_edid = false;
+ drm_dbg_kms(connector->dev, "[CONNECTOR:%d:%s] EDID override set\n",
+ connector->base.id, connector->name);
- ret = drm_connector_update_edid_property(connector, edid);
- if (!ret)
- connector->override_edid = true;
+ mutex_lock(&connector->edid_override_mutex);
- return ret;
+ drm_edid_free(connector->edid_override);
+ connector->edid_override = drm_edid;
+
+ mutex_unlock(&connector->edid_override_mutex);
+
+ return 0;
}
/* For debugfs edid_override implementation */
int drm_edid_override_reset(struct drm_connector *connector)
{
- connector->override_edid = false;
+ drm_dbg_kms(connector->dev, "[CONNECTOR:%d:%s] EDID override reset\n",
+ connector->base.id, connector->name);
+
+ mutex_lock(&connector->edid_override_mutex);
+
+ drm_edid_free(connector->edid_override);
+ connector->edid_override = NULL;
- return drm_connector_update_edid_property(connector, NULL);
+ mutex_unlock(&connector->edid_override_mutex);
+
+ return 0;
}
/**
- * drm_add_override_edid_modes - add modes from override/firmware EDID
+ * drm_edid_override_connector_update - add modes from override/firmware EDID
* @connector: connector we're probing
*
* Add modes from the override/firmware EDID, if available. Only to be used from
@@ -2229,24 +2290,25 @@ int drm_edid_override_reset(struct drm_connector *connector)
*
* Return: The number of modes added or 0 if we couldn't find any.
*/
-int drm_add_override_edid_modes(struct drm_connector *connector)
+int drm_edid_override_connector_update(struct drm_connector *connector)
{
- struct edid *override;
+ const struct drm_edid *override;
int num_modes = 0;
- override = drm_get_override_edid(connector, NULL);
+ override = drm_edid_override_get(connector);
if (override) {
- drm_connector_update_edid_property(connector, override);
- num_modes = drm_add_edid_modes(connector, override);
- kfree(override);
+ num_modes = drm_edid_connector_update(connector, override);
+
+ drm_edid_free(override);
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s] adding %d modes via fallback override/firmware EDID\n",
- connector->base.id, connector->name, num_modes);
+ drm_dbg_kms(connector->dev,
+ "[CONNECTOR:%d:%s] adding %d modes via fallback override/firmware EDID\n",
+ connector->base.id, connector->name, num_modes);
}
return num_modes;
}
-EXPORT_SYMBOL(drm_add_override_edid_modes);
+EXPORT_SYMBOL(drm_edid_override_connector_update);
typedef int read_block_fn(void *context, u8 *buf, unsigned int block, size_t len);
@@ -2289,12 +2351,19 @@ static struct edid *_drm_do_get_edid(struct drm_connector *connector,
{
enum edid_block_status status;
int i, num_blocks, invalid_blocks = 0;
+ const struct drm_edid *override;
struct edid *edid, *new;
size_t alloc_size = EDID_LENGTH;
- edid = drm_get_override_edid(connector, &alloc_size);
- if (edid)
+ override = drm_edid_override_get(connector);
+ if (override) {
+ alloc_size = override->size;
+ edid = kmemdup(override->edid, alloc_size, GFP_KERNEL);
+ drm_edid_free(override);
+ if (!edid)
+ return NULL;
goto ok;
+ }
edid = kmalloc(alloc_size, GFP_KERNEL);
if (!edid)
@@ -2397,7 +2466,7 @@ fail:
* adapter and use drm_get_edid() instead of abusing this function.
*
* The EDID may be overridden using debugfs override_edid or firmware EDID
- * (drm_load_edid_firmware() and drm.edid_firmware parameter), in this priority
+ * (drm_edid_load_firmware() and drm.edid_firmware parameter), in this priority
* order. Having either of them bypasses actual EDID reads.
*
* Return: Pointer to valid EDID or NULL if we couldn't find any.
@@ -2575,7 +2644,7 @@ EXPORT_SYMBOL(drm_get_edid);
* this function.
*
* The EDID may be overridden using debugfs override_edid or firmware EDID
- * (drm_load_edid_firmware() and drm.edid_firmware parameter), in this priority
+ * (drm_edid_load_firmware() and drm.edid_firmware parameter), in this priority
* order. Having either of them bypasses actual EDID reads.
*
* The returned pointer must be freed using drm_edid_free().
@@ -2613,7 +2682,7 @@ EXPORT_SYMBOL(drm_edid_read_custom);
* Read EDID using the given I2C adapter.
*
* The EDID may be overridden using debugfs override_edid or firmware EDID
- * (drm_load_edid_firmware() and drm.edid_firmware parameter), in this priority
+ * (drm_edid_load_firmware() and drm.edid_firmware parameter), in this priority
* order. Having either of them bypasses actual EDID reads.
*
* Prefer initializing connector->ddc with drm_connector_init_with_ddc() and
@@ -2649,7 +2718,7 @@ EXPORT_SYMBOL(drm_edid_read_ddc);
* Read EDID using the connector's I2C adapter.
*
* The EDID may be overridden using debugfs override_edid or firmware EDID
- * (drm_load_edid_firmware() and drm.edid_firmware parameter), in this priority
+ * (drm_edid_load_firmware() and drm.edid_firmware parameter), in this priority
* order. Having either of them bypasses actual EDID reads.
*
* The returned pointer must be freed using drm_edid_free().
@@ -2984,7 +3053,7 @@ is_rb(const struct detailed_timing *descriptor, void *data)
BUILD_BUG_ON(offsetof(typeof(*descriptor), data.other_data.data.range.formula.cvt.flags) != 15);
if (descriptor->data.other_data.data.range.flags == DRM_EDID_CVT_SUPPORT_FLAG &&
- descriptor->data.other_data.data.range.formula.cvt.flags & 0x10)
+ descriptor->data.other_data.data.range.formula.cvt.flags & DRM_EDID_CVT_FLAGS_REDUCED_BLANKING)
*res = true;
}
@@ -3012,7 +3081,7 @@ find_gtf2(const struct detailed_timing *descriptor, void *data)
BUILD_BUG_ON(offsetof(typeof(*descriptor), data.other_data.data.range.flags) != 10);
- if (descriptor->data.other_data.data.range.flags == 0x02)
+ if (descriptor->data.other_data.data.range.flags == DRM_EDID_SECONDARY_GTF_SUPPORT_FLAG)
*res = descriptor;
}
@@ -3077,20 +3146,53 @@ drm_gtf2_2j(const struct drm_edid *drm_edid)
return descriptor ? descriptor->data.other_data.data.range.formula.gtf2.j : 0;
}
+static void
+get_timing_level(const struct detailed_timing *descriptor, void *data)
+{
+ int *res = data;
+
+ if (!is_display_descriptor(descriptor, EDID_DETAIL_MONITOR_RANGE))
+ return;
+
+ BUILD_BUG_ON(offsetof(typeof(*descriptor), data.other_data.data.range.flags) != 10);
+
+ switch (descriptor->data.other_data.data.range.flags) {
+ case DRM_EDID_DEFAULT_GTF_SUPPORT_FLAG:
+ *res = LEVEL_GTF;
+ break;
+ case DRM_EDID_SECONDARY_GTF_SUPPORT_FLAG:
+ *res = LEVEL_GTF2;
+ break;
+ case DRM_EDID_CVT_SUPPORT_FLAG:
+ *res = LEVEL_CVT;
+ break;
+ default:
+ break;
+ }
+}
+
/* Get standard timing level (CVT/GTF/DMT). */
static int standard_timing_level(const struct drm_edid *drm_edid)
{
const struct edid *edid = drm_edid->edid;
- if (edid->revision >= 2) {
- if (edid->revision >= 4 && (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF))
- return LEVEL_CVT;
- if (drm_gtf2_hbreak(drm_edid))
- return LEVEL_GTF2;
- if (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF)
- return LEVEL_GTF;
+ if (edid->revision >= 4) {
+ /*
+ * If the range descriptor doesn't
+ * indicate otherwise default to CVT
+ */
+ int ret = LEVEL_CVT;
+
+ drm_for_each_detailed_block(drm_edid, get_timing_level, &ret);
+
+ return ret;
+ } else if (edid->revision >= 3 && drm_gtf2_hbreak(drm_edid)) {
+ return LEVEL_GTF2;
+ } else if (edid->revision >= 2) {
+ return LEVEL_GTF;
+ } else {
+ return LEVEL_DMT;
}
- return LEVEL_DMT;
}
/*
@@ -3113,6 +3215,35 @@ static int drm_mode_hsync(const struct drm_display_mode *mode)
return DIV_ROUND_CLOSEST(mode->clock, mode->htotal);
}
+static struct drm_display_mode *
+drm_gtf2_mode(struct drm_device *dev,
+ const struct drm_edid *drm_edid,
+ int hsize, int vsize, int vrefresh_rate)
+{
+ struct drm_display_mode *mode;
+
+ /*
+ * This is potentially wrong if there's ever a monitor with
+ * more than one ranges section, each claiming a different
+ * secondary GTF curve. Please don't do that.
+ */
+ mode = drm_gtf_mode(dev, hsize, vsize, vrefresh_rate, 0, 0);
+ if (!mode)
+ return NULL;
+
+ if (drm_mode_hsync(mode) > drm_gtf2_hbreak(drm_edid)) {
+ drm_mode_destroy(dev, mode);
+ mode = drm_gtf_mode_complex(dev, hsize, vsize,
+ vrefresh_rate, 0, 0,
+ drm_gtf2_m(drm_edid),
+ drm_gtf2_2c(drm_edid),
+ drm_gtf2_k(drm_edid),
+ drm_gtf2_2j(drm_edid));
+ }
+
+ return mode;
+}
+
/*
* Take the standard timing params (in this case width, aspect, and refresh)
* and convert them into a real mode using CVT/GTF/DMT.
@@ -3201,23 +3332,7 @@ static struct drm_display_mode *drm_mode_std(struct drm_connector *connector,
mode = drm_gtf_mode(dev, hsize, vsize, vrefresh_rate, 0, 0);
break;
case LEVEL_GTF2:
- /*
- * This is potentially wrong if there's ever a monitor with
- * more than one ranges section, each claiming a different
- * secondary GTF curve. Please don't do that.
- */
- mode = drm_gtf_mode(dev, hsize, vsize, vrefresh_rate, 0, 0);
- if (!mode)
- return NULL;
- if (drm_mode_hsync(mode) > drm_gtf2_hbreak(drm_edid)) {
- drm_mode_destroy(dev, mode);
- mode = drm_gtf_mode_complex(dev, hsize, vsize,
- vrefresh_rate, 0, 0,
- drm_gtf2_m(drm_edid),
- drm_gtf2_2c(drm_edid),
- drm_gtf2_k(drm_edid),
- drm_gtf2_2j(drm_edid));
- }
+ mode = drm_gtf2_mode(dev, drm_edid, hsize, vsize, vrefresh_rate);
break;
case LEVEL_CVT:
mode = drm_cvt_mode(dev, hsize, vsize, vrefresh_rate, 0, 0,
@@ -3274,11 +3389,12 @@ drm_mode_do_interlace_quirk(struct drm_display_mode *mode,
* timing block contains enough info for us to create and return a new struct
* drm_display_mode.
*/
-static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
+static struct drm_display_mode *drm_mode_detailed(struct drm_connector *connector,
const struct drm_edid *drm_edid,
const struct detailed_timing *timing,
u32 quirks)
{
+ struct drm_device *dev = connector->dev;
struct drm_display_mode *mode;
const struct detailed_pixel_timing *pt = &timing->data.pixel_data;
unsigned hactive = (pt->hactive_hblank_hi & 0xf0) << 4 | pt->hactive_lo;
@@ -3295,17 +3411,19 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
return NULL;
if (pt->misc & DRM_EDID_PT_STEREO) {
- DRM_DEBUG_KMS("stereo mode not supported\n");
+ drm_dbg_kms(dev, "[CONNECTOR:%d:%s] Stereo mode not supported\n",
+ connector->base.id, connector->name);
return NULL;
}
if (!(pt->misc & DRM_EDID_PT_SEPARATE_SYNC)) {
- DRM_DEBUG_KMS("composite sync not supported\n");
+ drm_dbg_kms(dev, "[CONNECTOR:%d:%s] Composite sync not supported\n",
+ connector->base.id, connector->name);
}
/* it is incorrect if hsync/vsync width is zero */
if (!hsync_pulse_width || !vsync_pulse_width) {
- DRM_DEBUG_KMS("Incorrect Detailed timing. "
- "Wrong Hsync/Vsync pulse width\n");
+ drm_dbg_kms(dev, "[CONNECTOR:%d:%s] Incorrect Detailed timing. Wrong Hsync/Vsync pulse width\n",
+ connector->base.id, connector->name);
return NULL;
}
@@ -3415,7 +3533,7 @@ range_pixel_clock(const struct edid *edid, const u8 *t)
return 0;
/* 1.4 with CVT support gives us real precision, yay */
- if (edid->revision >= 4 && t[10] == 0x04)
+ if (edid->revision >= 4 && t[10] == DRM_EDID_CVT_SUPPORT_FLAG)
return (t[9] * 10000) - ((t[12] >> 2) * 250);
/* 1.3 is pathetic, so fuzz up a bit */
@@ -3441,7 +3559,7 @@ static bool mode_in_range(const struct drm_display_mode *mode,
return false;
/* 1.4 max horizontal check */
- if (edid->revision >= 4 && t[10] == 0x04)
+ if (edid->revision >= 4 && t[10] == DRM_EDID_CVT_SUPPORT_FLAG)
if (t[13] && mode->hdisplay > 8 * (t[13] + (256 * (t[12]&0x3))))
return false;
@@ -3533,6 +3651,35 @@ static int drm_gtf_modes_for_range(struct drm_connector *connector,
return modes;
}
+static int drm_gtf2_modes_for_range(struct drm_connector *connector,
+ const struct drm_edid *drm_edid,
+ const struct detailed_timing *timing)
+{
+ int i, modes = 0;
+ struct drm_display_mode *newmode;
+ struct drm_device *dev = connector->dev;
+
+ for (i = 0; i < ARRAY_SIZE(extra_modes); i++) {
+ const struct minimode *m = &extra_modes[i];
+
+ newmode = drm_gtf2_mode(dev, drm_edid, m->w, m->h, m->r);
+ if (!newmode)
+ return modes;
+
+ drm_mode_fixup_1366x768(newmode);
+ if (!mode_in_range(newmode, drm_edid, timing) ||
+ !valid_inferred_mode(connector, newmode)) {
+ drm_mode_destroy(dev, newmode);
+ continue;
+ }
+
+ drm_mode_probed_add(connector, newmode);
+ modes++;
+ }
+
+ return modes;
+}
+
static int drm_cvt_modes_for_range(struct drm_connector *connector,
const struct drm_edid *drm_edid,
const struct detailed_timing *timing)
@@ -3577,25 +3724,29 @@ do_inferred_modes(const struct detailed_timing *timing, void *c)
closure->drm_edid,
timing);
- if (!version_greater(closure->drm_edid, 1, 1))
+ if (closure->drm_edid->edid->revision < 2)
return; /* GTF not defined yet */
switch (range->flags) {
- case 0x02: /* secondary gtf, XXX could do more */
- case 0x00: /* default gtf */
+ case DRM_EDID_SECONDARY_GTF_SUPPORT_FLAG:
+ closure->modes += drm_gtf2_modes_for_range(closure->connector,
+ closure->drm_edid,
+ timing);
+ break;
+ case DRM_EDID_DEFAULT_GTF_SUPPORT_FLAG:
closure->modes += drm_gtf_modes_for_range(closure->connector,
closure->drm_edid,
timing);
break;
- case 0x04: /* cvt, only in 1.4+ */
- if (!version_greater(closure->drm_edid, 1, 3))
+ case DRM_EDID_CVT_SUPPORT_FLAG:
+ if (closure->drm_edid->edid->revision < 4)
break;
closure->modes += drm_cvt_modes_for_range(closure->connector,
closure->drm_edid,
timing);
break;
- case 0x01: /* just the ranges, no formula */
+ case DRM_EDID_RANGE_LIMITS_ONLY_FLAG:
default:
break;
}
@@ -3609,7 +3760,7 @@ static int add_inferred_modes(struct drm_connector *connector,
.drm_edid = drm_edid,
};
- if (version_greater(drm_edid, 1, 0))
+ if (drm_edid->edid->revision >= 1)
drm_for_each_detailed_block(drm_edid, do_inferred_modes, &closure);
return closure.modes;
@@ -3686,7 +3837,7 @@ static int add_established_modes(struct drm_connector *connector,
}
}
- if (version_greater(drm_edid, 1, 0))
+ if (edid->revision >= 1)
drm_for_each_detailed_block(drm_edid, do_established_modes,
&closure);
@@ -3741,7 +3892,7 @@ static int add_standard_modes(struct drm_connector *connector,
}
}
- if (version_greater(drm_edid, 1, 0))
+ if (drm_edid->edid->revision >= 1)
drm_for_each_detailed_block(drm_edid, do_standard_modes,
&closure);
@@ -3821,7 +3972,7 @@ add_cvt_modes(struct drm_connector *connector, const struct drm_edid *drm_edid)
.drm_edid = drm_edid,
};
- if (version_greater(drm_edid, 1, 2))
+ if (drm_edid->edid->revision >= 3)
drm_for_each_detailed_block(drm_edid, do_cvt_mode, &closure);
/* XXX should also look for CVT codes in VTB blocks */
@@ -3829,7 +3980,8 @@ add_cvt_modes(struct drm_connector *connector, const struct drm_edid *drm_edid)
return closure.modes;
}
-static void fixup_detailed_cea_mode_clock(struct drm_display_mode *mode);
+static void fixup_detailed_cea_mode_clock(struct drm_connector *connector,
+ struct drm_display_mode *mode);
static void
do_detailed_mode(const struct detailed_timing *timing, void *c)
@@ -3840,7 +3992,7 @@ do_detailed_mode(const struct detailed_timing *timing, void *c)
if (!is_detailed_timing_descriptor(timing))
return;
- newmode = drm_mode_detailed(closure->connector->dev,
+ newmode = drm_mode_detailed(closure->connector,
closure->drm_edid, timing,
closure->quirks);
if (!newmode)
@@ -3854,7 +4006,7 @@ do_detailed_mode(const struct detailed_timing *timing, void *c)
* so fix up anything that looks like CEA/HDMI mode, but the clock
* is just slightly off.
*/
- fixup_detailed_cea_mode_clock(newmode);
+ fixup_detailed_cea_mode_clock(closure->connector, newmode);
drm_mode_probed_add(closure->connector, newmode);
closure->modes++;
@@ -3873,13 +4025,14 @@ static int add_detailed_modes(struct drm_connector *connector,
struct detailed_mode_closure closure = {
.connector = connector,
.drm_edid = drm_edid,
- .preferred = true,
.quirks = quirks,
};
- if (closure.preferred && !version_greater(drm_edid, 1, 3))
+ if (drm_edid->edid->revision >= 4)
+ closure.preferred = true; /* first detailed timing is always preferred */
+ else
closure.preferred =
- (drm_edid->edid->features & DRM_EDID_FEATURE_PREFERRED_TIMING);
+ drm_edid->edid->features & DRM_EDID_FEATURE_PREFERRED_TIMING;
drm_for_each_detailed_block(drm_edid, do_detailed_mode, &closure);
@@ -4515,7 +4668,8 @@ static int add_hdmi_mode(struct drm_connector *connector, u8 vic)
struct drm_display_mode *newmode;
if (!drm_valid_hdmi_vic(vic)) {
- DRM_ERROR("Unknown HDMI VIC: %d\n", vic);
+ drm_err(connector->dev, "[CONNECTOR:%d:%s] Unknown HDMI VIC: %d\n",
+ connector->base.id, connector->name, vic);
return 0;
}
@@ -5122,7 +5276,8 @@ static int add_cea_modes(struct drm_connector *connector,
return modes;
}
-static void fixup_detailed_cea_mode_clock(struct drm_display_mode *mode)
+static void fixup_detailed_cea_mode_clock(struct drm_connector *connector,
+ struct drm_display_mode *mode)
{
const struct drm_display_mode *cea_mode;
int clock1, clock2, clock;
@@ -5160,8 +5315,10 @@ static void fixup_detailed_cea_mode_clock(struct drm_display_mode *mode)
if (mode->clock == clock)
return;
- DRM_DEBUG("detailed mode matches %s VIC %d, adjusting clock %d -> %d\n",
- type, vic, mode->clock, clock);
+ drm_dbg_kms(connector->dev,
+ "[CONNECTOR:%d:%s] detailed mode matches %s VIC %d, adjusting clock %d -> %d\n",
+ connector->base.id, connector->name,
+ type, vic, mode->clock, clock);
mode->clock = clock;
}
@@ -5269,15 +5426,12 @@ drm_parse_hdmi_vsdb_audio(struct drm_connector *connector, const u8 *db)
if (len >= 12)
connector->audio_latency[1] = db[12];
- DRM_DEBUG_KMS("HDMI: latency present %d %d, "
- "video latency %d %d, "
- "audio latency %d %d\n",
- connector->latency_present[0],
- connector->latency_present[1],
- connector->video_latency[0],
- connector->video_latency[1],
- connector->audio_latency[0],
- connector->audio_latency[1]);
+ drm_dbg_kms(connector->dev,
+ "[CONNECTOR:%d:%s] HDMI: latency present %d %d, video latency %d %d, audio latency %d %d\n",
+ connector->base.id, connector->name,
+ connector->latency_present[0], connector->latency_present[1],
+ connector->video_latency[0], connector->video_latency[1],
+ connector->audio_latency[0], connector->audio_latency[1]);
}
static void
@@ -5375,7 +5529,9 @@ static void drm_edid_to_eld(struct drm_connector *connector,
return;
mnl = get_monitor_name(drm_edid, &eld[DRM_ELD_MONITOR_NAME_STRING]);
- DRM_DEBUG_KMS("ELD monitor %s\n", &eld[DRM_ELD_MONITOR_NAME_STRING]);
+ drm_dbg_kms(connector->dev, "[CONNECTOR:%d:%s] ELD monitor %s\n",
+ connector->base.id, connector->name,
+ &eld[DRM_ELD_MONITOR_NAME_STRING]);
eld[DRM_ELD_CEA_EDID_VER_MNL] = info->cea_rev << DRM_ELD_CEA_EDID_VER_SHIFT;
eld[DRM_ELD_CEA_EDID_VER_MNL] |= mnl;
@@ -5429,8 +5585,9 @@ static void drm_edid_to_eld(struct drm_connector *connector,
eld[DRM_ELD_BASELINE_ELD_LEN] =
DIV_ROUND_UP(drm_eld_calc_baseline_block_size(eld), 4);
- DRM_DEBUG_KMS("ELD size %d, SAD count %d\n",
- drm_eld_size(eld), total_sad_count);
+ drm_dbg_kms(connector->dev, "[CONNECTOR:%d:%s] ELD size %d, SAD count %d\n",
+ connector->base.id, connector->name,
+ drm_eld_size(eld), total_sad_count);
}
static int _drm_edid_to_sad(const struct drm_edid *drm_edid,
@@ -5701,7 +5858,8 @@ static void drm_parse_vcdb(struct drm_connector *connector, const u8 *db)
{
struct drm_display_info *info = &connector->display_info;
- DRM_DEBUG_KMS("CEA VCDB 0x%02x\n", db[2]);
+ drm_dbg_kms(connector->dev, "[CONNECTOR:%d:%s] CEA VCDB 0x%02x\n",
+ connector->base.id, connector->name, db[2]);
if (db[2] & EDID_CEA_VCDB_QS)
info->rgb_quant_range_selectable = true;
@@ -5752,12 +5910,87 @@ static void drm_parse_ycbcr420_deep_color_info(struct drm_connector *connector,
hdmi->y420_dc_modes = dc_mask;
}
+static void drm_parse_dsc_info(struct drm_hdmi_dsc_cap *hdmi_dsc,
+ const u8 *hf_scds)
+{
+ hdmi_dsc->v_1p2 = hf_scds[11] & DRM_EDID_DSC_1P2;
+
+ if (!hdmi_dsc->v_1p2)
+ return;
+
+ hdmi_dsc->native_420 = hf_scds[11] & DRM_EDID_DSC_NATIVE_420;
+ hdmi_dsc->all_bpp = hf_scds[11] & DRM_EDID_DSC_ALL_BPP;
+
+ if (hf_scds[11] & DRM_EDID_DSC_16BPC)
+ hdmi_dsc->bpc_supported = 16;
+ else if (hf_scds[11] & DRM_EDID_DSC_12BPC)
+ hdmi_dsc->bpc_supported = 12;
+ else if (hf_scds[11] & DRM_EDID_DSC_10BPC)
+ hdmi_dsc->bpc_supported = 10;
+ else
+ /* Supports min 8 BPC if DSC 1.2 is supported*/
+ hdmi_dsc->bpc_supported = 8;
+
+ if (cea_db_payload_len(hf_scds) >= 12 && hf_scds[12]) {
+ u8 dsc_max_slices;
+ u8 dsc_max_frl_rate;
+
+ dsc_max_frl_rate = (hf_scds[12] & DRM_EDID_DSC_MAX_FRL_RATE_MASK) >> 4;
+ drm_get_max_frl_rate(dsc_max_frl_rate, &hdmi_dsc->max_lanes,
+ &hdmi_dsc->max_frl_rate_per_lane);
+
+ dsc_max_slices = hf_scds[12] & DRM_EDID_DSC_MAX_SLICES;
+
+ switch (dsc_max_slices) {
+ case 1:
+ hdmi_dsc->max_slices = 1;
+ hdmi_dsc->clk_per_slice = 340;
+ break;
+ case 2:
+ hdmi_dsc->max_slices = 2;
+ hdmi_dsc->clk_per_slice = 340;
+ break;
+ case 3:
+ hdmi_dsc->max_slices = 4;
+ hdmi_dsc->clk_per_slice = 340;
+ break;
+ case 4:
+ hdmi_dsc->max_slices = 8;
+ hdmi_dsc->clk_per_slice = 340;
+ break;
+ case 5:
+ hdmi_dsc->max_slices = 8;
+ hdmi_dsc->clk_per_slice = 400;
+ break;
+ case 6:
+ hdmi_dsc->max_slices = 12;
+ hdmi_dsc->clk_per_slice = 400;
+ break;
+ case 7:
+ hdmi_dsc->max_slices = 16;
+ hdmi_dsc->clk_per_slice = 400;
+ break;
+ case 0:
+ default:
+ hdmi_dsc->max_slices = 0;
+ hdmi_dsc->clk_per_slice = 0;
+ }
+ }
+
+ if (cea_db_payload_len(hf_scds) >= 13 && hf_scds[13])
+ hdmi_dsc->total_chunk_kbytes = hf_scds[13] & DRM_EDID_DSC_TOTAL_CHUNK_KBYTES;
+}
+
/* Sink Capability Data Structure */
static void drm_parse_hdmi_forum_scds(struct drm_connector *connector,
const u8 *hf_scds)
{
struct drm_display_info *display = &connector->display_info;
struct drm_hdmi_info *hdmi = &display->hdmi;
+ struct drm_hdmi_dsc_cap *hdmi_dsc = &hdmi->dsc_cap;
+ int max_tmds_clock = 0;
+ u8 max_frl_rate = 0;
+ bool dsc_support = false;
display->has_hdmi_infoframe = true;
@@ -5777,14 +6010,13 @@ static void drm_parse_hdmi_forum_scds(struct drm_connector *connector,
*/
if (hf_scds[5]) {
- /* max clock is 5000 KHz times block value */
- u32 max_tmds_clock = hf_scds[5] * 5000;
struct drm_scdc *scdc = &hdmi->scdc;
+ /* max clock is 5000 KHz times block value */
+ max_tmds_clock = hf_scds[5] * 5000;
+
if (max_tmds_clock > 340000) {
display->max_tmds_clock = max_tmds_clock;
- DRM_DEBUG_KMS("HF-VSDB: max TMDS clock %d kHz\n",
- display->max_tmds_clock);
}
if (scdc->supported) {
@@ -5797,74 +6029,22 @@ static void drm_parse_hdmi_forum_scds(struct drm_connector *connector,
}
if (hf_scds[7]) {
- u8 max_frl_rate;
- u8 dsc_max_frl_rate;
- u8 dsc_max_slices;
- struct drm_hdmi_dsc_cap *hdmi_dsc = &hdmi->dsc_cap;
-
- DRM_DEBUG_KMS("hdmi_21 sink detected. parsing edid\n");
max_frl_rate = (hf_scds[7] & DRM_EDID_MAX_FRL_RATE_MASK) >> 4;
drm_get_max_frl_rate(max_frl_rate, &hdmi->max_lanes,
&hdmi->max_frl_rate_per_lane);
- hdmi_dsc->v_1p2 = hf_scds[11] & DRM_EDID_DSC_1P2;
-
- if (hdmi_dsc->v_1p2) {
- hdmi_dsc->native_420 = hf_scds[11] & DRM_EDID_DSC_NATIVE_420;
- hdmi_dsc->all_bpp = hf_scds[11] & DRM_EDID_DSC_ALL_BPP;
-
- if (hf_scds[11] & DRM_EDID_DSC_16BPC)
- hdmi_dsc->bpc_supported = 16;
- else if (hf_scds[11] & DRM_EDID_DSC_12BPC)
- hdmi_dsc->bpc_supported = 12;
- else if (hf_scds[11] & DRM_EDID_DSC_10BPC)
- hdmi_dsc->bpc_supported = 10;
- else
- hdmi_dsc->bpc_supported = 0;
-
- dsc_max_frl_rate = (hf_scds[12] & DRM_EDID_DSC_MAX_FRL_RATE_MASK) >> 4;
- drm_get_max_frl_rate(dsc_max_frl_rate, &hdmi_dsc->max_lanes,
- &hdmi_dsc->max_frl_rate_per_lane);
- hdmi_dsc->total_chunk_kbytes = hf_scds[13] & DRM_EDID_DSC_TOTAL_CHUNK_KBYTES;
-
- dsc_max_slices = hf_scds[12] & DRM_EDID_DSC_MAX_SLICES;
- switch (dsc_max_slices) {
- case 1:
- hdmi_dsc->max_slices = 1;
- hdmi_dsc->clk_per_slice = 340;
- break;
- case 2:
- hdmi_dsc->max_slices = 2;
- hdmi_dsc->clk_per_slice = 340;
- break;
- case 3:
- hdmi_dsc->max_slices = 4;
- hdmi_dsc->clk_per_slice = 340;
- break;
- case 4:
- hdmi_dsc->max_slices = 8;
- hdmi_dsc->clk_per_slice = 340;
- break;
- case 5:
- hdmi_dsc->max_slices = 8;
- hdmi_dsc->clk_per_slice = 400;
- break;
- case 6:
- hdmi_dsc->max_slices = 12;
- hdmi_dsc->clk_per_slice = 400;
- break;
- case 7:
- hdmi_dsc->max_slices = 16;
- hdmi_dsc->clk_per_slice = 400;
- break;
- case 0:
- default:
- hdmi_dsc->max_slices = 0;
- hdmi_dsc->clk_per_slice = 0;
- }
- }
}
drm_parse_ycbcr420_deep_color_info(connector, hf_scds);
+
+ if (cea_db_payload_len(hf_scds) >= 11 && hf_scds[11]) {
+ drm_parse_dsc_info(hdmi_dsc, hf_scds);
+ dsc_support = true;
+ }
+
+ drm_dbg_kms(connector->dev,
+ "[CONNECTOR:%d:%s] HF-VSDB: max TMDS clock: %d KHz, HDMI 2.1 support: %s, DSC 1.2 support: %s\n",
+ connector->base.id, connector->name,
+ max_tmds_clock, str_yes_no(max_frl_rate), str_yes_no(dsc_support));
}
static void drm_parse_hdmi_deep_color_info(struct drm_connector *connector,
@@ -5882,39 +6062,39 @@ static void drm_parse_hdmi_deep_color_info(struct drm_connector *connector,
if (hdmi[6] & DRM_EDID_HDMI_DC_30) {
dc_bpc = 10;
info->edid_hdmi_rgb444_dc_modes |= DRM_EDID_HDMI_DC_30;
- DRM_DEBUG("%s: HDMI sink does deep color 30.\n",
- connector->name);
+ drm_dbg_kms(connector->dev, "[CONNECTOR:%d:%s] HDMI sink does deep color 30.\n",
+ connector->base.id, connector->name);
}
if (hdmi[6] & DRM_EDID_HDMI_DC_36) {
dc_bpc = 12;
info->edid_hdmi_rgb444_dc_modes |= DRM_EDID_HDMI_DC_36;
- DRM_DEBUG("%s: HDMI sink does deep color 36.\n",
- connector->name);
+ drm_dbg_kms(connector->dev, "[CONNECTOR:%d:%s] HDMI sink does deep color 36.\n",
+ connector->base.id, connector->name);
}
if (hdmi[6] & DRM_EDID_HDMI_DC_48) {
dc_bpc = 16;
info->edid_hdmi_rgb444_dc_modes |= DRM_EDID_HDMI_DC_48;
- DRM_DEBUG("%s: HDMI sink does deep color 48.\n",
- connector->name);
+ drm_dbg_kms(connector->dev, "[CONNECTOR:%d:%s] HDMI sink does deep color 48.\n",
+ connector->base.id, connector->name);
}
if (dc_bpc == 0) {
- DRM_DEBUG("%s: No deep color support on this HDMI sink.\n",
- connector->name);
+ drm_dbg_kms(connector->dev, "[CONNECTOR:%d:%s] No deep color support on this HDMI sink.\n",
+ connector->base.id, connector->name);
return;
}
- DRM_DEBUG("%s: Assigning HDMI sink color depth as %d bpc.\n",
- connector->name, dc_bpc);
+ drm_dbg_kms(connector->dev, "[CONNECTOR:%d:%s] Assigning HDMI sink color depth as %d bpc.\n",
+ connector->base.id, connector->name, dc_bpc);
info->bpc = dc_bpc;
/* YCRCB444 is optional according to spec. */
if (hdmi[6] & DRM_EDID_HDMI_DC_Y444) {
info->edid_hdmi_ycbcr444_dc_modes = info->edid_hdmi_rgb444_dc_modes;
- DRM_DEBUG("%s: HDMI sink does YCRCB444 in deep color.\n",
- connector->name);
+ drm_dbg_kms(connector->dev, "[CONNECTOR:%d:%s] HDMI sink does YCRCB444 in deep color.\n",
+ connector->base.id, connector->name);
}
/*
@@ -5922,8 +6102,8 @@ static void drm_parse_hdmi_deep_color_info(struct drm_connector *connector,
* then deep color 36 bit must be supported.
*/
if (!(hdmi[6] & DRM_EDID_HDMI_DC_36)) {
- DRM_DEBUG("%s: HDMI sink should do DC_36, but does not!\n",
- connector->name);
+ drm_dbg_kms(connector->dev, "[CONNECTOR:%d:%s] HDMI sink should do DC_36, but does not!\n",
+ connector->base.id, connector->name);
}
}
@@ -5940,10 +6120,9 @@ drm_parse_hdmi_vsdb_video(struct drm_connector *connector, const u8 *db)
if (len >= 7)
info->max_tmds_clock = db[7] * 5000;
- DRM_DEBUG_KMS("HDMI: DVI dual %d, "
- "max TMDS clock %d kHz\n",
- info->dvi_dual,
- info->max_tmds_clock);
+ drm_dbg_kms(connector->dev, "[CONNECTOR:%d:%s] HDMI: DVI dual %d, max TMDS clock %d kHz\n",
+ connector->base.id, connector->name,
+ info->dvi_dual, info->max_tmds_clock);
drm_parse_hdmi_deep_color_info(connector, db);
}
@@ -5963,8 +6142,9 @@ static void drm_parse_microsoft_vsdb(struct drm_connector *connector,
if (version == 1 || version == 2 || (version == 3 && !desktop_usage))
info->non_desktop = true;
- drm_dbg_kms(connector->dev, "HMD or specialized display VSDB version %u: 0x%02x\n",
- version, db[5]);
+ drm_dbg_kms(connector->dev,
+ "[CONNECTOR:%d:%s] HMD or specialized display VSDB version %u: 0x%02x\n",
+ connector->base.id, connector->name, version, db[5]);
}
static void drm_parse_cea_ext(struct drm_connector *connector,
@@ -5985,8 +6165,10 @@ static void drm_parse_cea_ext(struct drm_connector *connector,
info->cea_rev = edid_ext[1];
if (info->cea_rev != edid_ext[1])
- DRM_DEBUG_KMS("CEA extension version mismatch %u != %u\n",
- info->cea_rev, edid_ext[1]);
+ drm_dbg_kms(connector->dev,
+ "[CONNECTOR:%d:%s] CEA extension version mismatch %u != %u\n",
+ connector->base.id, connector->name,
+ info->cea_rev, edid_ext[1]);
/* The existence of a CTA extension should imply RGB support */
info->color_formats = DRM_COLOR_FORMAT_RGB444;
@@ -6020,43 +6202,62 @@ static void drm_parse_cea_ext(struct drm_connector *connector,
}
static
-void get_monitor_range(const struct detailed_timing *timing,
- void *info_monitor_range)
+void get_monitor_range(const struct detailed_timing *timing, void *c)
{
- struct drm_monitor_range_info *monitor_range = info_monitor_range;
+ struct detailed_mode_closure *closure = c;
+ struct drm_display_info *info = &closure->connector->display_info;
+ struct drm_monitor_range_info *monitor_range = &info->monitor_range;
const struct detailed_non_pixel *data = &timing->data.other_data;
const struct detailed_data_monitor_range *range = &data->data.range;
+ const struct edid *edid = closure->drm_edid->edid;
if (!is_display_descriptor(timing, EDID_DETAIL_MONITOR_RANGE))
return;
/*
- * Check for flag range limits only. If flag == 1 then
- * no additional timing information provided.
- * Default GTF, GTF Secondary curve and CVT are not
- * supported
+ * These limits are used to determine the VRR refresh
+ * rate range. Only the "range limits only" variant
+ * of the range descriptor seems to guarantee that
+ * any and all timings are accepted by the sink, as
+ * opposed to just timings conforming to the indicated
+ * formula (GTF/GTF2/CVT). Thus other variants of the
+ * range descriptor are not accepted here.
*/
if (range->flags != DRM_EDID_RANGE_LIMITS_ONLY_FLAG)
return;
monitor_range->min_vfreq = range->min_vfreq;
monitor_range->max_vfreq = range->max_vfreq;
+
+ if (edid->revision >= 4) {
+ if (data->pad2 & DRM_EDID_RANGE_OFFSET_MIN_VFREQ)
+ monitor_range->min_vfreq += 255;
+ if (data->pad2 & DRM_EDID_RANGE_OFFSET_MAX_VFREQ)
+ monitor_range->max_vfreq += 255;
+ }
}
static void drm_get_monitor_range(struct drm_connector *connector,
const struct drm_edid *drm_edid)
{
- struct drm_display_info *info = &connector->display_info;
+ const struct drm_display_info *info = &connector->display_info;
+ struct detailed_mode_closure closure = {
+ .connector = connector,
+ .drm_edid = drm_edid,
+ };
- if (!version_greater(drm_edid, 1, 1))
+ if (drm_edid->edid->revision < 4)
return;
- drm_for_each_detailed_block(drm_edid, get_monitor_range,
- &info->monitor_range);
+ if (!(drm_edid->edid->features & DRM_EDID_FEATURE_CONTINUOUS_FREQ))
+ return;
+
+ drm_for_each_detailed_block(drm_edid, get_monitor_range, &closure);
- DRM_DEBUG_KMS("Supported Monitor Refresh rate range is %d Hz - %d Hz\n",
- info->monitor_range.min_vfreq,
- info->monitor_range.max_vfreq);
+ drm_dbg_kms(connector->dev,
+ "[CONNECTOR:%d:%s] Supported Monitor Refresh rate range is %d Hz - %d Hz\n",
+ connector->base.id, connector->name,
+ info->monitor_range.min_vfreq, info->monitor_range.max_vfreq);
}
static void drm_parse_vesa_mso_data(struct drm_connector *connector,
@@ -6067,8 +6268,9 @@ static void drm_parse_vesa_mso_data(struct drm_connector *connector,
struct drm_display_info *info = &connector->display_info;
if (block->num_bytes < 3) {
- drm_dbg_kms(connector->dev, "Unexpected vendor block size %u\n",
- block->num_bytes);
+ drm_dbg_kms(connector->dev,
+ "[CONNECTOR:%d:%s] Unexpected vendor block size %u\n",
+ connector->base.id, connector->name, block->num_bytes);
return;
}
@@ -6076,13 +6278,16 @@ static void drm_parse_vesa_mso_data(struct drm_connector *connector,
return;
if (sizeof(*vesa) != sizeof(*block) + block->num_bytes) {
- drm_dbg_kms(connector->dev, "Unexpected VESA vendor block size\n");
+ drm_dbg_kms(connector->dev,
+ "[CONNECTOR:%d:%s] Unexpected VESA vendor block size\n",
+ connector->base.id, connector->name);
return;
}
switch (FIELD_GET(DISPLAYID_VESA_MSO_MODE, vesa->mso)) {
default:
- drm_dbg_kms(connector->dev, "Reserved MSO mode value\n");
+ drm_dbg_kms(connector->dev, "[CONNECTOR:%d:%s] Reserved MSO mode value\n",
+ connector->base.id, connector->name);
fallthrough;
case 0:
info->mso_stream_count = 0;
@@ -6102,12 +6307,16 @@ static void drm_parse_vesa_mso_data(struct drm_connector *connector,
info->mso_pixel_overlap = FIELD_GET(DISPLAYID_VESA_MSO_OVERLAP, vesa->mso);
if (info->mso_pixel_overlap > 8) {
- drm_dbg_kms(connector->dev, "Reserved MSO pixel overlap value %u\n",
+ drm_dbg_kms(connector->dev,
+ "[CONNECTOR:%d:%s] Reserved MSO pixel overlap value %u\n",
+ connector->base.id, connector->name,
info->mso_pixel_overlap);
info->mso_pixel_overlap = 8;
}
- drm_dbg_kms(connector->dev, "MSO stream count %u, pixel overlap %u\n",
+ drm_dbg_kms(connector->dev,
+ "[CONNECTOR:%d:%s] MSO stream count %u, pixel overlap %u\n",
+ connector->base.id, connector->name,
info->mso_stream_count, info->mso_pixel_overlap);
}
@@ -6190,8 +6399,9 @@ static u32 update_display_info(struct drm_connector *connector,
if (info->bpc == 0 && edid->revision == 3 &&
edid->input & DRM_EDID_DIGITAL_DFP_1_X) {
info->bpc = 8;
- DRM_DEBUG("%s: Assigning DFP sink color depth as %d bpc.\n",
- connector->name, info->bpc);
+ drm_dbg_kms(connector->dev,
+ "[CONNECTOR:%d:%s] Assigning DFP sink color depth as %d bpc.\n",
+ connector->base.id, connector->name, info->bpc);
}
/* Only defined for 1.4 with digital displays */
@@ -6223,8 +6433,9 @@ static u32 update_display_info(struct drm_connector *connector,
break;
}
- DRM_DEBUG("%s: Assigning EDID-1.4 digital sink color depth as %d bpc.\n",
- connector->name, info->bpc);
+ drm_dbg_kms(connector->dev,
+ "[CONNECTOR:%d:%s] Assigning EDID-1.4 digital sink color depth as %d bpc.\n",
+ connector->base.id, connector->name, info->bpc);
if (edid->features & DRM_EDID_FEATURE_RGB_YCRCB444)
info->color_formats |= DRM_COLOR_FORMAT_YCBCR444;
@@ -6235,7 +6446,8 @@ static u32 update_display_info(struct drm_connector *connector,
out:
if (quirks & EDID_QUIRK_NON_DESKTOP) {
- drm_dbg_kms(connector->dev, "Non-desktop display%s\n",
+ drm_dbg_kms(connector->dev, "[CONNECTOR:%d:%s] Non-desktop display%s\n",
+ connector->base.id, connector->name,
info->non_desktop ? " (redundant quirk)" : "");
info->non_desktop = true;
}
@@ -6378,7 +6590,7 @@ static int _drm_edid_connector_update(struct drm_connector *connector,
num_modes += add_cea_modes(connector, drm_edid);
num_modes += add_alternate_cea_modes(connector, drm_edid);
num_modes += add_displayid_detailed_modes(connector, drm_edid);
- if (drm_edid->edid->features & DRM_EDID_FEATURE_DEFAULT_GTF)
+ if (drm_edid->edid->features & DRM_EDID_FEATURE_CONTINUOUS_FREQ)
num_modes += add_inferred_modes(connector, drm_edid);
if (quirks & (EDID_QUIRK_PREFER_LARGE_60 | EDID_QUIRK_PREFER_LARGE_75))
@@ -6470,23 +6682,6 @@ int drm_edid_connector_update(struct drm_connector *connector,
{
int count;
- /*
- * FIXME: Reconcile the differences in override_edid handling between
- * this and drm_connector_update_edid_property().
- *
- * If override_edid is set, and the EDID passed in here originates from
- * drm_edid_read() and friends, it will be the override EDID, and there
- * are no issues. drm_connector_update_edid_property() ignoring requests
- * to set the EDID dates back to a time when override EDID was not
- * handled at the low level EDID read.
- *
- * The only way the EDID passed in here can be different from the
- * override EDID is when a driver passes in an EDID that does *not*
- * originate from drm_edid_read() and friends, or passes in a stale
- * cached version. This, in turn, is a question of when an override EDID
- * set via debugfs should take effect.
- */
-
count = _drm_edid_connector_update(connector, drm_edid);
_drm_update_tile_info(connector, drm_edid);
@@ -6501,10 +6696,6 @@ EXPORT_SYMBOL(drm_edid_connector_update);
static int _drm_connector_update_edid_property(struct drm_connector *connector,
const struct drm_edid *drm_edid)
{
- /* ignore requests to set edid when overridden */
- if (connector->override_edid)
- return 0;
-
/*
* Set the display info, using edid if available, otherwise resetting
* the values to defaults. This duplicates the work done in
@@ -6567,8 +6758,8 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid)
struct drm_edid drm_edid;
if (edid && !drm_edid_is_valid(edid)) {
- drm_warn(connector->dev, "%s: EDID invalid.\n",
- connector->name);
+ drm_warn(connector->dev, "[CONNECTOR:%d:%s] EDID invalid.\n",
+ connector->base.id, connector->name);
edid = NULL;
}
@@ -6825,7 +7016,7 @@ drm_hdmi_avi_infoframe_quant_range(struct hdmi_avi_infoframe *frame,
* by non-zero YQ when receiving RGB. There doesn't seem to be any
* good way to tell which version of CEA-861 the sink supports, so
* we limit non-zero YQ to HDMI 2.0 sinks only as HDMI 2.0 is based
- * on on CEA-861-F.
+ * on CEA-861-F.
*/
if (!is_hdmi2_sink(connector) ||
rgb_quant_range == HDMI_QUANTIZATION_RANGE_LIMITED)
@@ -6944,11 +7135,14 @@ static void drm_parse_tiled_block(struct drm_connector *connector,
connector->tile_h_size = w + 1;
connector->tile_v_size = h + 1;
- DRM_DEBUG_KMS("tile cap 0x%x\n", tile->tile_cap);
- DRM_DEBUG_KMS("tile_size %d x %d\n", w + 1, h + 1);
- DRM_DEBUG_KMS("topo num tiles %dx%d, location %dx%d\n",
- num_h_tile + 1, num_v_tile + 1, tile_h_loc, tile_v_loc);
- DRM_DEBUG_KMS("vend %c%c%c\n", tile->topology_id[0], tile->topology_id[1], tile->topology_id[2]);
+ drm_dbg_kms(connector->dev,
+ "[CONNECTOR:%d:%s] tile cap 0x%x, size %dx%d, num tiles %dx%d, location %dx%d, vend %c%c%c",
+ connector->base.id, connector->name,
+ tile->tile_cap,
+ connector->tile_h_size, connector->tile_v_size,
+ connector->num_h_tile, connector->num_v_tile,
+ connector->tile_h_loc, connector->tile_v_loc,
+ tile->topology_id[0], tile->topology_id[1], tile->topology_id[2]);
tg = drm_mode_get_tile_group(connector->dev, tile->topology_id);
if (!tg)
diff --git a/drivers/gpu/drm/drm_edid_load.c b/drivers/gpu/drm/drm_edid_load.c
index 37d8ba3ddb46..ef4ab59d6935 100644
--- a/drivers/gpu/drm/drm_edid_load.c
+++ b/drivers/gpu/drm/drm_edid_load.c
@@ -11,12 +11,13 @@
#include <linux/module.h>
#include <linux/platform_device.h>
-#include <drm/drm_crtc.h>
-#include <drm/drm_crtc_helper.h>
+#include <drm/drm_connector.h>
#include <drm/drm_drv.h>
#include <drm/drm_edid.h>
#include <drm/drm_print.h>
+#include "drm_crtc_internal.h"
+
static char edid_firmware[PATH_MAX];
module_param_string(edid_firmware, edid_firmware, sizeof(edid_firmware), 0644);
MODULE_PARM_DESC(edid_firmware, "Do not probe monitor, use specified EDID blob "
@@ -159,23 +160,12 @@ static const u8 generic_edid[GENERIC_EDIDS][128] = {
},
};
-static int edid_size(const u8 *edid, int data_size)
-{
- if (data_size < EDID_LENGTH)
- return 0;
-
- return (edid[0x7e] + 1) * EDID_LENGTH;
-}
-
-static void *edid_load(struct drm_connector *connector, const char *name,
- const char *connector_name)
+static const struct drm_edid *edid_load(struct drm_connector *connector, const char *name)
{
const struct firmware *fw = NULL;
const u8 *fwdata;
- u8 *edid;
+ const struct drm_edid *drm_edid;
int fwsize, builtin;
- int i, valid_extensions = 0;
- bool print_bad_edid = !connector->bad_edid_counter || drm_debug_enabled(DRM_UT_KMS);
builtin = match_string(generic_edid_name, GENERIC_EDIDS, name);
if (builtin >= 0) {
@@ -185,18 +175,22 @@ static void *edid_load(struct drm_connector *connector, const char *name,
struct platform_device *pdev;
int err;
- pdev = platform_device_register_simple(connector_name, -1, NULL, 0);
+ pdev = platform_device_register_simple(connector->name, -1, NULL, 0);
if (IS_ERR(pdev)) {
- DRM_ERROR("Failed to register EDID firmware platform device "
- "for connector \"%s\"\n", connector_name);
+ drm_err(connector->dev,
+ "[CONNECTOR:%d:%s] Failed to register EDID firmware platform device for connector \"%s\"\n",
+ connector->base.id, connector->name,
+ connector->name);
return ERR_CAST(pdev);
}
err = request_firmware(&fw, name, &pdev->dev);
platform_device_unregister(pdev);
if (err) {
- DRM_ERROR("Requesting EDID firmware \"%s\" failed (err=%d)\n",
- name, err);
+ drm_err(connector->dev,
+ "[CONNECTOR:%d:%s] Requesting EDID firmware \"%s\" failed (err=%d)\n",
+ connector->base.id, connector->name,
+ name, err);
return ERR_PTR(err);
}
@@ -204,70 +198,26 @@ static void *edid_load(struct drm_connector *connector, const char *name,
fwsize = fw->size;
}
- if (edid_size(fwdata, fwsize) != fwsize) {
- DRM_ERROR("Size of EDID firmware \"%s\" is invalid "
- "(expected %d, got %d\n", name,
- edid_size(fwdata, fwsize), (int)fwsize);
- edid = ERR_PTR(-EINVAL);
- goto out;
- }
-
- edid = kmemdup(fwdata, fwsize, GFP_KERNEL);
- if (edid == NULL) {
- edid = ERR_PTR(-ENOMEM);
- goto out;
- }
+ drm_dbg_kms(connector->dev, "[CONNECTOR:%d:%s] Loaded %s firmware EDID \"%s\"\n",
+ connector->base.id, connector->name,
+ builtin >= 0 ? "built-in" : "external", name);
- if (!drm_edid_block_valid(edid, 0, print_bad_edid,
- &connector->edid_corrupt)) {
- connector->bad_edid_counter++;
- DRM_ERROR("Base block of EDID firmware \"%s\" is invalid ",
- name);
- kfree(edid);
- edid = ERR_PTR(-EINVAL);
- goto out;
+ drm_edid = drm_edid_alloc(fwdata, fwsize);
+ if (!drm_edid_valid(drm_edid)) {
+ drm_err(connector->dev, "Invalid firmware EDID \"%s\"\n", name);
+ drm_edid_free(drm_edid);
+ drm_edid = ERR_PTR(-EINVAL);
}
- for (i = 1; i <= edid[0x7e]; i++) {
- if (i != valid_extensions + 1)
- memcpy(edid + (valid_extensions + 1) * EDID_LENGTH,
- edid + i * EDID_LENGTH, EDID_LENGTH);
- if (drm_edid_block_valid(edid + i * EDID_LENGTH, i,
- print_bad_edid,
- NULL))
- valid_extensions++;
- }
-
- if (valid_extensions != edid[0x7e]) {
- u8 *new_edid;
-
- edid[EDID_LENGTH-1] += edid[0x7e] - valid_extensions;
- DRM_INFO("Found %d valid extensions instead of %d in EDID data "
- "\"%s\" for connector \"%s\"\n", valid_extensions,
- edid[0x7e], name, connector_name);
- edid[0x7e] = valid_extensions;
-
- new_edid = krealloc(edid, (valid_extensions + 1) * EDID_LENGTH,
- GFP_KERNEL);
- if (new_edid)
- edid = new_edid;
- }
-
- DRM_INFO("Got %s EDID base block and %d extension%s from "
- "\"%s\" for connector \"%s\"\n", (builtin >= 0) ? "built-in" :
- "external", valid_extensions, valid_extensions == 1 ? "" : "s",
- name, connector_name);
-
-out:
release_firmware(fw);
- return edid;
+
+ return drm_edid;
}
-struct edid *drm_load_edid_firmware(struct drm_connector *connector)
+const struct drm_edid *drm_edid_load_firmware(struct drm_connector *connector)
{
- const char *connector_name = connector->name;
char *edidname, *last, *colon, *fwstr, *edidstr, *fallback = NULL;
- struct edid *edid;
+ const struct drm_edid *drm_edid;
if (edid_firmware[0] == '\0')
return ERR_PTR(-ENOENT);
@@ -288,7 +238,7 @@ struct edid *drm_load_edid_firmware(struct drm_connector *connector)
while ((edidname = strsep(&edidstr, ","))) {
colon = strchr(edidname, ':');
if (colon != NULL) {
- if (strncmp(connector_name, edidname, colon - edidname))
+ if (strncmp(connector->name, edidname, colon - edidname))
continue;
edidname = colon + 1;
break;
@@ -310,8 +260,9 @@ struct edid *drm_load_edid_firmware(struct drm_connector *connector)
if (*last == '\n')
*last = '\0';
- edid = edid_load(connector, edidname, connector_name);
+ drm_edid = edid_load(connector, edidname);
+
kfree(fwstr);
- return edid;
+ return drm_edid;
}
diff --git a/drivers/gpu/drm/drm_format_helper.c b/drivers/gpu/drm/drm_format_helper.c
index 4afc4ac27342..653a5821dd53 100644
--- a/drivers/gpu/drm/drm_format_helper.c
+++ b/drivers/gpu/drm/drm_format_helper.c
@@ -553,6 +553,7 @@ void drm_fb_xrgb8888_to_xrgb2101010(struct iosys_map *dst, const unsigned int *d
drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false,
drm_fb_xrgb8888_to_xrgb2101010_line);
}
+EXPORT_SYMBOL(drm_fb_xrgb8888_to_xrgb2101010);
static void drm_fb_xrgb8888_to_gray8_line(void *dbuf, const void *sbuf, unsigned int pixels)
{
@@ -659,6 +660,11 @@ int drm_fb_blit(struct iosys_map *dst, const unsigned int *dst_pitch, uint32_t d
drm_fb_xrgb8888_to_rgb565(dst, dst_pitch, src, fb, clip, false);
return 0;
}
+ } else if (dst_format == (DRM_FORMAT_RGB565 | DRM_FORMAT_BIG_ENDIAN)) {
+ if (fb_format == DRM_FORMAT_RGB565) {
+ drm_fb_swab(dst, dst_pitch, src, fb, clip, false);
+ return 0;
+ }
} else if (dst_format == DRM_FORMAT_RGB888) {
if (fb_format == DRM_FORMAT_XRGB8888) {
drm_fb_xrgb8888_to_rgb888(dst, dst_pitch, src, fb, clip);
@@ -677,6 +683,11 @@ int drm_fb_blit(struct iosys_map *dst, const unsigned int *dst_pitch, uint32_t d
drm_fb_xrgb8888_to_xrgb2101010(dst, dst_pitch, src, fb, clip);
return 0;
}
+ } else if (dst_format == DRM_FORMAT_BGRX8888) {
+ if (fb_format == DRM_FORMAT_XRGB8888) {
+ drm_fb_swab(dst, dst_pitch, src, fb, clip, false);
+ return 0;
+ }
}
drm_warn_once(fb->dev, "No conversion helper from %p4cc to %p4cc found.\n",
diff --git a/drivers/gpu/drm/drm_framebuffer.c b/drivers/gpu/drm/drm_framebuffer.c
index 185b04762e2c..2dd97473ca10 100644
--- a/drivers/gpu/drm/drm_framebuffer.c
+++ b/drivers/gpu/drm/drm_framebuffer.c
@@ -87,13 +87,13 @@ int drm_framebuffer_check_src_coords(uint32_t src_x, uint32_t src_y,
src_x > fb_width - src_w ||
src_h > fb_height ||
src_y > fb_height - src_h) {
- DRM_DEBUG_KMS("Invalid source coordinates "
- "%u.%06ux%u.%06u+%u.%06u+%u.%06u (fb %ux%u)\n",
- src_w >> 16, ((src_w & 0xffff) * 15625) >> 10,
- src_h >> 16, ((src_h & 0xffff) * 15625) >> 10,
- src_x >> 16, ((src_x & 0xffff) * 15625) >> 10,
- src_y >> 16, ((src_y & 0xffff) * 15625) >> 10,
- fb->width, fb->height);
+ drm_dbg_kms(fb->dev, "Invalid source coordinates "
+ "%u.%06ux%u.%06u+%u.%06u+%u.%06u (fb %ux%u)\n",
+ src_w >> 16, ((src_w & 0xffff) * 15625) >> 10,
+ src_h >> 16, ((src_h & 0xffff) * 15625) >> 10,
+ src_x >> 16, ((src_x & 0xffff) * 15625) >> 10,
+ src_y >> 16, ((src_y & 0xffff) * 15625) >> 10,
+ fb->width, fb->height);
return -ENOSPC;
}
@@ -125,7 +125,7 @@ int drm_mode_addfb(struct drm_device *dev, struct drm_mode_fb_cmd *or,
r.pixel_format = drm_driver_legacy_fb_format(dev, or->bpp, or->depth);
if (r.pixel_format == DRM_FORMAT_INVALID) {
- DRM_DEBUG("bad {bpp:%d, depth:%d}\n", or->bpp, or->depth);
+ drm_dbg_kms(dev, "bad {bpp:%d, depth:%d}\n", or->bpp, or->depth);
return -EINVAL;
}
@@ -177,18 +177,18 @@ static int framebuffer_check(struct drm_device *dev,
/* check if the format is supported at all */
if (!__drm_format_info(r->pixel_format)) {
- DRM_DEBUG_KMS("bad framebuffer format %p4cc\n",
- &r->pixel_format);
+ drm_dbg_kms(dev, "bad framebuffer format %p4cc\n",
+ &r->pixel_format);
return -EINVAL;
}
if (r->width == 0) {
- DRM_DEBUG_KMS("bad framebuffer width %u\n", r->width);
+ drm_dbg_kms(dev, "bad framebuffer width %u\n", r->width);
return -EINVAL;
}
if (r->height == 0) {
- DRM_DEBUG_KMS("bad framebuffer height %u\n", r->height);
+ drm_dbg_kms(dev, "bad framebuffer height %u\n", r->height);
return -EINVAL;
}
@@ -202,12 +202,12 @@ static int framebuffer_check(struct drm_device *dev,
u64 min_pitch = drm_format_info_min_pitch(info, i, width);
if (!block_size && (r->modifier[i] == DRM_FORMAT_MOD_LINEAR)) {
- DRM_DEBUG_KMS("Format requires non-linear modifier for plane %d\n", i);
+ drm_dbg_kms(dev, "Format requires non-linear modifier for plane %d\n", i);
return -EINVAL;
}
if (!r->handles[i]) {
- DRM_DEBUG_KMS("no buffer object handle for plane %d\n", i);
+ drm_dbg_kms(dev, "no buffer object handle for plane %d\n", i);
return -EINVAL;
}
@@ -218,20 +218,20 @@ static int framebuffer_check(struct drm_device *dev,
return -ERANGE;
if (block_size && r->pitches[i] < min_pitch) {
- DRM_DEBUG_KMS("bad pitch %u for plane %d\n", r->pitches[i], i);
+ drm_dbg_kms(dev, "bad pitch %u for plane %d\n", r->pitches[i], i);
return -EINVAL;
}
if (r->modifier[i] && !(r->flags & DRM_MODE_FB_MODIFIERS)) {
- DRM_DEBUG_KMS("bad fb modifier %llu for plane %d\n",
- r->modifier[i], i);
+ drm_dbg_kms(dev, "bad fb modifier %llu for plane %d\n",
+ r->modifier[i], i);
return -EINVAL;
}
if (r->flags & DRM_MODE_FB_MODIFIERS &&
r->modifier[i] != r->modifier[0]) {
- DRM_DEBUG_KMS("bad fb modifier %llu for plane %d\n",
- r->modifier[i], i);
+ drm_dbg_kms(dev, "bad fb modifier %llu for plane %d\n",
+ r->modifier[i], i);
return -EINVAL;
}
@@ -244,7 +244,7 @@ static int framebuffer_check(struct drm_device *dev,
if (r->pixel_format != DRM_FORMAT_NV12 ||
width % 128 || height % 32 ||
r->pitches[i] % 128) {
- DRM_DEBUG_KMS("bad modifier data for plane %d\n", i);
+ drm_dbg_kms(dev, "bad modifier data for plane %d\n", i);
return -EINVAL;
}
break;
@@ -256,7 +256,7 @@ static int framebuffer_check(struct drm_device *dev,
for (i = info->num_planes; i < 4; i++) {
if (r->modifier[i]) {
- DRM_DEBUG_KMS("non-zero modifier for unused plane %d\n", i);
+ drm_dbg_kms(dev, "non-zero modifier for unused plane %d\n", i);
return -EINVAL;
}
@@ -265,17 +265,17 @@ static int framebuffer_check(struct drm_device *dev,
continue;
if (r->handles[i]) {
- DRM_DEBUG_KMS("buffer object handle for unused plane %d\n", i);
+ drm_dbg_kms(dev, "buffer object handle for unused plane %d\n", i);
return -EINVAL;
}
if (r->pitches[i]) {
- DRM_DEBUG_KMS("non-zero pitch for unused plane %d\n", i);
+ drm_dbg_kms(dev, "non-zero pitch for unused plane %d\n", i);
return -EINVAL;
}
if (r->offsets[i]) {
- DRM_DEBUG_KMS("non-zero offset for unused plane %d\n", i);
+ drm_dbg_kms(dev, "non-zero offset for unused plane %d\n", i);
return -EINVAL;
}
}
@@ -293,24 +293,24 @@ drm_internal_framebuffer_create(struct drm_device *dev,
int ret;
if (r->flags & ~(DRM_MODE_FB_INTERLACED | DRM_MODE_FB_MODIFIERS)) {
- DRM_DEBUG_KMS("bad framebuffer flags 0x%08x\n", r->flags);
+ drm_dbg_kms(dev, "bad framebuffer flags 0x%08x\n", r->flags);
return ERR_PTR(-EINVAL);
}
if ((config->min_width > r->width) || (r->width > config->max_width)) {
- DRM_DEBUG_KMS("bad framebuffer width %d, should be >= %d && <= %d\n",
- r->width, config->min_width, config->max_width);
+ drm_dbg_kms(dev, "bad framebuffer width %d, should be >= %d && <= %d\n",
+ r->width, config->min_width, config->max_width);
return ERR_PTR(-EINVAL);
}
if ((config->min_height > r->height) || (r->height > config->max_height)) {
- DRM_DEBUG_KMS("bad framebuffer height %d, should be >= %d && <= %d\n",
- r->height, config->min_height, config->max_height);
+ drm_dbg_kms(dev, "bad framebuffer height %d, should be >= %d && <= %d\n",
+ r->height, config->min_height, config->max_height);
return ERR_PTR(-EINVAL);
}
if (r->flags & DRM_MODE_FB_MODIFIERS &&
dev->mode_config.fb_modifiers_not_supported) {
- DRM_DEBUG_KMS("driver does not support fb modifiers\n");
+ drm_dbg_kms(dev, "driver does not support fb modifiers\n");
return ERR_PTR(-EINVAL);
}
@@ -320,7 +320,7 @@ drm_internal_framebuffer_create(struct drm_device *dev,
fb = dev->mode_config.funcs->fb_create(dev, file_priv, r);
if (IS_ERR(fb)) {
- DRM_DEBUG_KMS("could not create framebuffer\n");
+ drm_dbg_kms(dev, "could not create framebuffer\n");
return fb;
}
@@ -356,7 +356,7 @@ int drm_mode_addfb2(struct drm_device *dev,
if (IS_ERR(fb))
return PTR_ERR(fb);
- DRM_DEBUG_KMS("[FB:%d]\n", fb->base.id);
+ drm_dbg_kms(dev, "[FB:%d]\n", fb->base.id);
r->fb_id = fb->base.id;
/* Transfer ownership to the filp for reaping on close */
@@ -384,7 +384,7 @@ int drm_mode_addfb2_ioctl(struct drm_device *dev,
* then. So block it to make userspace fallback to
* ADDFB.
*/
- DRM_DEBUG_KMS("addfb2 broken on bigendian");
+ drm_dbg_kms(dev, "addfb2 broken on bigendian");
return -EOPNOTSUPP;
}
#endif
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 33f1d66dda60..b8db675e7fb5 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -169,21 +169,6 @@ void drm_gem_private_object_init(struct drm_device *dev,
}
EXPORT_SYMBOL(drm_gem_private_object_init);
-static void
-drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp)
-{
- /*
- * Note: obj->dma_buf can't disappear as long as we still hold a
- * handle reference in obj->handle_count.
- */
- mutex_lock(&filp->prime.lock);
- if (obj->dma_buf) {
- drm_prime_remove_buf_handle_locked(&filp->prime,
- obj->dma_buf);
- }
- mutex_unlock(&filp->prime.lock);
-}
-
/**
* drm_gem_object_handle_free - release resources bound to userspace handles
* @obj: GEM object to clean up.
@@ -254,7 +239,7 @@ drm_gem_object_release_handle(int id, void *ptr, void *data)
if (obj->funcs->close)
obj->funcs->close(obj, file_priv);
- drm_gem_remove_prime_handles(obj, file_priv);
+ drm_prime_remove_buf_handle(&file_priv->prime, id);
drm_vma_node_revoke(&obj->vma_node, file_priv);
drm_gem_object_handle_put_unlocked(obj);
@@ -1173,6 +1158,8 @@ int drm_gem_vmap(struct drm_gem_object *obj, struct iosys_map *map)
{
int ret;
+ dma_resv_assert_held(obj->resv);
+
if (!obj->funcs->vmap)
return -EOPNOTSUPP;
@@ -1188,6 +1175,8 @@ EXPORT_SYMBOL(drm_gem_vmap);
void drm_gem_vunmap(struct drm_gem_object *obj, struct iosys_map *map)
{
+ dma_resv_assert_held(obj->resv);
+
if (iosys_map_is_null(map))
return;
@@ -1199,6 +1188,26 @@ void drm_gem_vunmap(struct drm_gem_object *obj, struct iosys_map *map)
}
EXPORT_SYMBOL(drm_gem_vunmap);
+int drm_gem_vmap_unlocked(struct drm_gem_object *obj, struct iosys_map *map)
+{
+ int ret;
+
+ dma_resv_lock(obj->resv, NULL);
+ ret = drm_gem_vmap(obj, map);
+ dma_resv_unlock(obj->resv);
+
+ return ret;
+}
+EXPORT_SYMBOL(drm_gem_vmap_unlocked);
+
+void drm_gem_vunmap_unlocked(struct drm_gem_object *obj, struct iosys_map *map)
+{
+ dma_resv_lock(obj->resv, NULL);
+ drm_gem_vunmap(obj, map);
+ dma_resv_unlock(obj->resv);
+}
+EXPORT_SYMBOL(drm_gem_vunmap_unlocked);
+
/**
* drm_gem_lock_reservations - Sets up the ww context and acquires
* the lock on an array of GEM objects.
diff --git a/drivers/gpu/drm/drm_gem_dma_helper.c b/drivers/gpu/drm/drm_gem_dma_helper.c
index f6901ff97bbb..1e658c448366 100644
--- a/drivers/gpu/drm/drm_gem_dma_helper.c
+++ b/drivers/gpu/drm/drm_gem_dma_helper.c
@@ -230,7 +230,7 @@ void drm_gem_dma_free(struct drm_gem_dma_object *dma_obj)
if (gem_obj->import_attach) {
if (dma_obj->vaddr)
- dma_buf_vunmap(gem_obj->import_attach->dmabuf, &map);
+ dma_buf_vunmap_unlocked(gem_obj->import_attach->dmabuf, &map);
drm_prime_gem_destroy(gem_obj, dma_obj->sgt);
} else if (dma_obj->vaddr) {
if (dma_obj->map_noncoherent)
@@ -581,7 +581,7 @@ drm_gem_dma_prime_import_sg_table_vmap(struct drm_device *dev,
struct iosys_map map;
int ret;
- ret = dma_buf_vmap(attach->dmabuf, &map);
+ ret = dma_buf_vmap_unlocked(attach->dmabuf, &map);
if (ret) {
DRM_ERROR("Failed to vmap PRIME buffer\n");
return ERR_PTR(ret);
@@ -589,7 +589,7 @@ drm_gem_dma_prime_import_sg_table_vmap(struct drm_device *dev,
obj = drm_gem_dma_prime_import_sg_table(dev, attach, sgt);
if (IS_ERR(obj)) {
- dma_buf_vunmap(attach->dmabuf, &map);
+ dma_buf_vunmap_unlocked(attach->dmabuf, &map);
return obj;
}
diff --git a/drivers/gpu/drm/drm_gem_framebuffer_helper.c b/drivers/gpu/drm/drm_gem_framebuffer_helper.c
index 880a4975507f..e35e224e6303 100644
--- a/drivers/gpu/drm/drm_gem_framebuffer_helper.c
+++ b/drivers/gpu/drm/drm_gem_framebuffer_helper.c
@@ -354,7 +354,7 @@ int drm_gem_fb_vmap(struct drm_framebuffer *fb, struct iosys_map *map,
ret = -EINVAL;
goto err_drm_gem_vunmap;
}
- ret = drm_gem_vmap(obj, &map[i]);
+ ret = drm_gem_vmap_unlocked(obj, &map[i]);
if (ret)
goto err_drm_gem_vunmap;
}
@@ -376,7 +376,7 @@ err_drm_gem_vunmap:
obj = drm_gem_fb_get_obj(fb, i);
if (!obj)
continue;
- drm_gem_vunmap(obj, &map[i]);
+ drm_gem_vunmap_unlocked(obj, &map[i]);
}
return ret;
}
@@ -403,7 +403,7 @@ void drm_gem_fb_vunmap(struct drm_framebuffer *fb, struct iosys_map *map)
continue;
if (iosys_map_is_null(&map[i]))
continue;
- drm_gem_vunmap(obj, &map[i]);
+ drm_gem_vunmap_unlocked(obj, &map[i]);
}
}
EXPORT_SYMBOL(drm_gem_fb_vunmap);
diff --git a/drivers/gpu/drm/drm_gem_ttm_helper.c b/drivers/gpu/drm/drm_gem_ttm_helper.c
index e5fc875990c4..d5962a34c01d 100644
--- a/drivers/gpu/drm/drm_gem_ttm_helper.c
+++ b/drivers/gpu/drm/drm_gem_ttm_helper.c
@@ -64,13 +64,8 @@ int drm_gem_ttm_vmap(struct drm_gem_object *gem,
struct iosys_map *map)
{
struct ttm_buffer_object *bo = drm_gem_ttm_of_gem(gem);
- int ret;
-
- dma_resv_lock(gem->resv, NULL);
- ret = ttm_bo_vmap(bo, map);
- dma_resv_unlock(gem->resv);
- return ret;
+ return ttm_bo_vmap(bo, map);
}
EXPORT_SYMBOL(drm_gem_ttm_vmap);
@@ -87,9 +82,7 @@ void drm_gem_ttm_vunmap(struct drm_gem_object *gem,
{
struct ttm_buffer_object *bo = drm_gem_ttm_of_gem(gem);
- dma_resv_lock(gem->resv, NULL);
ttm_bo_vunmap(bo, map);
- dma_resv_unlock(gem->resv);
}
EXPORT_SYMBOL(drm_gem_ttm_vunmap);
diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h
index 1fbbc19f1ac0..7bb98e6a446d 100644
--- a/drivers/gpu/drm/drm_internal.h
+++ b/drivers/gpu/drm/drm_internal.h
@@ -74,8 +74,8 @@ int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data,
void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv);
void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv);
-void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpriv,
- struct dma_buf *dma_buf);
+void drm_prime_remove_buf_handle(struct drm_prime_file_private *prime_fpriv,
+ uint32_t handle);
/* drm_drv.c */
struct drm_minor *drm_minor_acquire(unsigned int minor_id);
diff --git a/drivers/gpu/drm/drm_mipi_dsi.c b/drivers/gpu/drm/drm_mipi_dsi.c
index 3ec02748d56f..497ef4b6a90a 100644
--- a/drivers/gpu/drm/drm_mipi_dsi.c
+++ b/drivers/gpu/drm/drm_mipi_dsi.c
@@ -606,7 +606,7 @@ int mipi_dsi_turn_on_peripheral(struct mipi_dsi_device *dsi)
EXPORT_SYMBOL(mipi_dsi_turn_on_peripheral);
/*
- * mipi_dsi_set_maximum_return_packet_size() - specify the maximum size of the
+ * mipi_dsi_set_maximum_return_packet_size() - specify the maximum size of
* the payload in a long packet transmitted from the peripheral back to the
* host processor
* @dsi: DSI peripheral device
diff --git a/drivers/gpu/drm/drm_mode_config.c b/drivers/gpu/drm/drm_mode_config.c
index 939d621c9ad4..688c8afe0bf1 100644
--- a/drivers/gpu/drm/drm_mode_config.c
+++ b/drivers/gpu/drm/drm_mode_config.c
@@ -151,9 +151,6 @@ int drm_mode_getresources(struct drm_device *dev, void *data,
count = 0;
connector_id = u64_to_user_ptr(card_res->connector_id_ptr);
drm_for_each_connector_iter(connector, &conn_iter) {
- if (connector->registration_state != DRM_CONNECTOR_REGISTERED)
- continue;
-
/* only expose writeback connectors if userspace understands them */
if (!file_priv->writeback_connectors &&
(connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK))
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index 304004fb80aa..5d4ac79381c4 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -1801,20 +1801,24 @@ bool drm_mode_parse_command_line_for_connector(const char *mode_option,
name = mode_option;
+ /* Locate the start of named options */
+ options_ptr = strchr(name, ',');
+ if (options_ptr)
+ options_off = options_ptr - name;
+ else
+ options_off = strlen(name);
+
/* Try to locate the bpp and refresh specifiers, if any */
- bpp_ptr = strchr(name, '-');
+ bpp_ptr = strnchr(name, options_off, '-');
+ while (bpp_ptr && !isdigit(bpp_ptr[1]))
+ bpp_ptr = strnchr(bpp_ptr + 1, options_off, '-');
if (bpp_ptr)
bpp_off = bpp_ptr - name;
- refresh_ptr = strchr(name, '@');
+ refresh_ptr = strnchr(name, options_off, '@');
if (refresh_ptr)
refresh_off = refresh_ptr - name;
- /* Locate the start of named options */
- options_ptr = strchr(name, ',');
- if (options_ptr)
- options_off = options_ptr - name;
-
/* Locate the end of the name / resolution, and parse it */
if (bpp_ptr) {
mode_end = bpp_off;
diff --git a/drivers/gpu/drm/drm_plane_helper.c b/drivers/gpu/drm/drm_plane_helper.c
index 865bd999b187..ba6a9136a065 100644
--- a/drivers/gpu/drm/drm_plane_helper.c
+++ b/drivers/gpu/drm/drm_plane_helper.c
@@ -298,7 +298,9 @@ EXPORT_SYMBOL(drm_plane_helper_destroy);
* scale and positioning are not expected to change since the plane is always
* a fullscreen scanout buffer.
*
- * This is often the case for the primary plane of simple framebuffers.
+ * This is often the case for the primary plane of simple framebuffers. See
+ * also drm_crtc_helper_atomic_check() for the respective CRTC-state check
+ * helper function.
*
* RETURNS:
* Zero on success, or an errno code otherwise.
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index a3f180653b8b..20e109a802ae 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -190,29 +190,33 @@ static int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpri
return -ENOENT;
}
-void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpriv,
- struct dma_buf *dma_buf)
+void drm_prime_remove_buf_handle(struct drm_prime_file_private *prime_fpriv,
+ uint32_t handle)
{
struct rb_node *rb;
- rb = prime_fpriv->dmabufs.rb_node;
+ mutex_lock(&prime_fpriv->lock);
+
+ rb = prime_fpriv->handles.rb_node;
while (rb) {
struct drm_prime_member *member;
- member = rb_entry(rb, struct drm_prime_member, dmabuf_rb);
- if (member->dma_buf == dma_buf) {
+ member = rb_entry(rb, struct drm_prime_member, handle_rb);
+ if (member->handle == handle) {
rb_erase(&member->handle_rb, &prime_fpriv->handles);
rb_erase(&member->dmabuf_rb, &prime_fpriv->dmabufs);
- dma_buf_put(dma_buf);
+ dma_buf_put(member->dma_buf);
kfree(member);
- return;
- } else if (member->dma_buf < dma_buf) {
+ break;
+ } else if (member->handle < handle) {
rb = rb->rb_right;
} else {
rb = rb->rb_left;
}
}
+
+ mutex_unlock(&prime_fpriv->lock);
}
void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv)
@@ -936,7 +940,7 @@ struct drm_gem_object *drm_gem_prime_import_dev(struct drm_device *dev,
get_dma_buf(dma_buf);
- sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
+ sgt = dma_buf_map_attachment_unlocked(attach, DMA_BIDIRECTIONAL);
if (IS_ERR(sgt)) {
ret = PTR_ERR(sgt);
goto fail_detach;
@@ -954,7 +958,7 @@ struct drm_gem_object *drm_gem_prime_import_dev(struct drm_device *dev,
return obj;
fail_unmap:
- dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
+ dma_buf_unmap_attachment_unlocked(attach, sgt, DMA_BIDIRECTIONAL);
fail_detach:
dma_buf_detach(dma_buf, attach);
dma_buf_put(dma_buf);
@@ -1052,7 +1056,7 @@ void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg)
attach = obj->import_attach;
if (sg)
- dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
+ dma_buf_unmap_attachment_unlocked(attach, sg, DMA_BIDIRECTIONAL);
dma_buf = attach->dmabuf;
dma_buf_detach(attach->dmabuf, attach);
/* remove the reference */
diff --git a/drivers/gpu/drm/drm_print.c b/drivers/gpu/drm/drm_print.c
index f783d4963d4b..5b93c11895bb 100644
--- a/drivers/gpu/drm/drm_print.c
+++ b/drivers/gpu/drm/drm_print.c
@@ -23,14 +23,13 @@
* Rob Clark <robdclark@gmail.com>
*/
-#define DEBUG /* for pr_debug() */
-
#include <linux/stdarg.h>
#include <linux/io.h>
#include <linux/moduleparam.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
+#include <linux/dynamic_debug.h>
#include <drm/drm.h>
#include <drm/drm_drv.h>
@@ -40,7 +39,7 @@
* __drm_debug: Enable debug output.
* Bitmask of DRM_UT_x. See include/drm/drm_print.h for details.
*/
-unsigned int __drm_debug;
+unsigned long __drm_debug;
EXPORT_SYMBOL(__drm_debug);
MODULE_PARM_DESC(debug, "Enable debug output, where each bit enables a debug category.\n"
@@ -52,7 +51,30 @@ MODULE_PARM_DESC(debug, "Enable debug output, where each bit enables a debug cat
"\t\tBit 5 (0x20) will enable VBL messages (vblank code)\n"
"\t\tBit 7 (0x80) will enable LEASE messages (leasing code)\n"
"\t\tBit 8 (0x100) will enable DP messages (displayport code)");
-module_param_named(debug, __drm_debug, int, 0600);
+
+#if !defined(CONFIG_DRM_USE_DYNAMIC_DEBUG)
+module_param_named(debug, __drm_debug, ulong, 0600);
+#else
+/* classnames must match vals of enum drm_debug_category */
+DECLARE_DYNDBG_CLASSMAP(drm_debug_classes, DD_CLASS_TYPE_DISJOINT_BITS, 0,
+ "DRM_UT_CORE",
+ "DRM_UT_DRIVER",
+ "DRM_UT_KMS",
+ "DRM_UT_PRIME",
+ "DRM_UT_ATOMIC",
+ "DRM_UT_VBL",
+ "DRM_UT_STATE",
+ "DRM_UT_LEASE",
+ "DRM_UT_DP",
+ "DRM_UT_DRMRES");
+
+static struct ddebug_class_param drm_debug_bitmap = {
+ .bits = &__drm_debug,
+ .flags = "p",
+ .map = &drm_debug_classes,
+};
+module_param_cb(debug, &param_ops_dyndbg_classes, &drm_debug_bitmap, 0600);
+#endif
void __drm_puts_coredump(struct drm_printer *p, const char *str)
{
@@ -162,7 +184,8 @@ EXPORT_SYMBOL(__drm_printfn_info);
void __drm_printfn_debug(struct drm_printer *p, struct va_format *vaf)
{
- pr_debug("%s %pV", p->prefix, vaf);
+ /* pr_debug callsite decorations are unhelpful here */
+ printk(KERN_DEBUG "%s %pV", p->prefix, vaf);
}
EXPORT_SYMBOL(__drm_printfn_debug);
@@ -256,15 +279,16 @@ void drm_dev_printk(const struct device *dev, const char *level,
}
EXPORT_SYMBOL(drm_dev_printk);
-void drm_dev_dbg(const struct device *dev, enum drm_debug_category category,
- const char *format, ...)
+void __drm_dev_dbg(struct _ddebug *desc, const struct device *dev,
+ enum drm_debug_category category, const char *format, ...)
{
struct va_format vaf;
va_list args;
- if (!drm_debug_enabled(category))
+ if (!__drm_debug_enabled(category))
return;
+ /* we know we are printing for either syslog, tracefs, or both */
va_start(args, format);
vaf.fmt = format;
vaf.va = &args;
@@ -278,14 +302,14 @@ void drm_dev_dbg(const struct device *dev, enum drm_debug_category category,
va_end(args);
}
-EXPORT_SYMBOL(drm_dev_dbg);
+EXPORT_SYMBOL(__drm_dev_dbg);
-void __drm_dbg(enum drm_debug_category category, const char *format, ...)
+void ___drm_dbg(struct _ddebug *desc, enum drm_debug_category category, const char *format, ...)
{
struct va_format vaf;
va_list args;
- if (!drm_debug_enabled(category))
+ if (!__drm_debug_enabled(category))
return;
va_start(args, format);
@@ -297,7 +321,7 @@ void __drm_dbg(enum drm_debug_category category, const char *format, ...)
va_end(args);
}
-EXPORT_SYMBOL(__drm_dbg);
+EXPORT_SYMBOL(___drm_dbg);
void __drm_err(const char *format, ...)
{
diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
index 69b0b2b9cc1c..2fc21df709bc 100644
--- a/drivers/gpu/drm/drm_probe_helper.c
+++ b/drivers/gpu/drm/drm_probe_helper.c
@@ -367,7 +367,7 @@ static int drm_helper_probe_get_modes(struct drm_connector *connector)
* override/firmware EDID.
*/
if (count == 0 && connector->status == connector_status_connected)
- count = drm_add_override_edid_modes(connector);
+ count = drm_edid_override_connector_update(connector);
return count;
}
diff --git a/drivers/gpu/drm/drm_rect.c b/drivers/gpu/drm/drm_rect.c
index 0460e874896e..85c79a38c13a 100644
--- a/drivers/gpu/drm/drm_rect.c
+++ b/drivers/gpu/drm/drm_rect.c
@@ -80,7 +80,7 @@ static u32 clip_scaled(int src, int dst, int *clip)
* @dst: destination window rectangle
* @clip: clip rectangle
*
- * Clip rectangle @dst by rectangle @clip. Clip rectangle @src by the
+ * Clip rectangle @dst by rectangle @clip. Clip rectangle @src by
* the corresponding amounts, retaining the vertical and horizontal scaling
* factors from @src to @dst.
*
diff --git a/drivers/gpu/drm/drm_simple_kms_helper.c b/drivers/gpu/drm/drm_simple_kms_helper.c
index e9f782119d3d..31233c6ae3c4 100644
--- a/drivers/gpu/drm/drm_simple_kms_helper.c
+++ b/drivers/gpu/drm/drm_simple_kms_helper.c
@@ -102,10 +102,14 @@ static int drm_simple_kms_crtc_check(struct drm_crtc *crtc,
struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
int ret;
- ret = drm_atomic_helper_check_crtc_state(crtc_state, false);
+ if (!crtc_state->enable)
+ goto out;
+
+ ret = drm_atomic_helper_check_crtc_primary_plane(crtc_state);
if (ret)
return ret;
+out:
return drm_atomic_add_affected_planes(state, crtc);
}
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
index 3fa2da149639..7031db145a77 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
@@ -65,7 +65,7 @@ static void etnaviv_gem_prime_release(struct etnaviv_gem_object *etnaviv_obj)
struct iosys_map map = IOSYS_MAP_INIT_VADDR(etnaviv_obj->vaddr);
if (etnaviv_obj->vaddr)
- dma_buf_vunmap(etnaviv_obj->base.import_attach->dmabuf, &map);
+ dma_buf_vunmap_unlocked(etnaviv_obj->base.import_attach->dmabuf, &map);
/* Don't drop the pages for imported dmabuf, as they are not
* ours, just free the array we allocated:
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dma.c b/drivers/gpu/drm/exynos/exynos_drm_dma.c
index bf33c3084cb4..a971590b8132 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dma.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dma.c
@@ -4,7 +4,6 @@
// Author: Inki Dae <inki.dae@samsung.com>
// Author: Andrzej Hajda <a.hajda@samsung.com>
-#include <linux/dma-iommu.h>
#include <linux/dma-map-ops.h>
#include <linux/iommu.h>
#include <linux/platform_device.h>
diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c
index aa3ecf771fd3..5f502a0048ab 100644
--- a/drivers/gpu/drm/gma500/framebuffer.c
+++ b/drivers/gpu/drm/gma500/framebuffer.c
@@ -286,7 +286,7 @@ static int psbfb_create(struct drm_fb_helper *fb_helper,
info->fbops = &psbfb_unaccel_ops;
- info->fix.smem_start = dev->mode_config.fb_base;
+ info->fix.smem_start = dev_priv->fb_base;
info->fix.smem_len = size;
info->fix.ywrapstep = 0;
info->fix.ypanstep = 0;
@@ -296,7 +296,7 @@ static int psbfb_create(struct drm_fb_helper *fb_helper,
info->screen_size = size;
if (dev_priv->gtt.stolen_size) {
- info->apertures->ranges[0].base = dev->mode_config.fb_base;
+ info->apertures->ranges[0].base = dev_priv->fb_base;
info->apertures->ranges[0].size = dev_priv->gtt.stolen_size;
}
@@ -527,7 +527,7 @@ void psb_modeset_init(struct drm_device *dev)
/* set memory base */
/* Oaktrail and Poulsbo should use BAR 2*/
- pci_read_config_dword(pdev, PSB_BSM, (u32 *)&(dev->mode_config.fb_base));
+ pci_read_config_dword(pdev, PSB_BSM, (u32 *)&(dev_priv->fb_base));
/* num pipes is 2 for PSB but 1 for Mrst */
for (i = 0; i < dev_priv->num_pipe; i++)
diff --git a/drivers/gpu/drm/gma500/gem.c b/drivers/gpu/drm/gma500/gem.c
index dffe37490206..4b7627a72637 100644
--- a/drivers/gpu/drm/gma500/gem.c
+++ b/drivers/gpu/drm/gma500/gem.c
@@ -112,12 +112,12 @@ static void psb_gem_free_object(struct drm_gem_object *obj)
{
struct psb_gem_object *pobj = to_psb_gem_object(obj);
- drm_gem_object_release(obj);
-
/* Undo the mmap pin if we are destroying the object */
if (pobj->mmapping)
psb_gem_unpin(pobj);
+ drm_gem_object_release(obj);
+
WARN_ON(pobj->in_gart && !pobj->stolen);
release_resource(&pobj->resource);
diff --git a/drivers/gpu/drm/gma500/gma_display.c b/drivers/gpu/drm/gma500/gma_display.c
index b0ea911b27de..fe7b8436f87a 100644
--- a/drivers/gpu/drm/gma500/gma_display.c
+++ b/drivers/gpu/drm/gma500/gma_display.c
@@ -532,15 +532,18 @@ int gma_crtc_page_flip(struct drm_crtc *crtc,
WARN_ON(drm_crtc_vblank_get(crtc) != 0);
gma_crtc->page_flip_event = event;
+ spin_unlock_irqrestore(&dev->event_lock, flags);
/* Call this locked if we want an event at vblank interrupt. */
ret = crtc_funcs->mode_set_base(crtc, crtc->x, crtc->y, old_fb);
if (ret) {
- gma_crtc->page_flip_event = NULL;
- drm_crtc_vblank_put(crtc);
+ spin_lock_irqsave(&dev->event_lock, flags);
+ if (gma_crtc->page_flip_event) {
+ gma_crtc->page_flip_event = NULL;
+ drm_crtc_vblank_put(crtc);
+ }
+ spin_unlock_irqrestore(&dev->event_lock, flags);
}
-
- spin_unlock_irqrestore(&dev->event_lock, flags);
} else {
ret = crtc_funcs->mode_set_base(crtc, crtc->x, crtc->y, old_fb);
}
diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h
index ae544b69fc47..a5df6d2f2cab 100644
--- a/drivers/gpu/drm/gma500/psb_drv.h
+++ b/drivers/gpu/drm/gma500/psb_drv.h
@@ -523,6 +523,7 @@ struct drm_psb_private {
uint32_t blc_adj2;
struct drm_fb_helper *fb_helper;
+ resource_size_t fb_base;
bool dsr_enable;
u32 dsr_fb_update;
diff --git a/drivers/gpu/drm/gud/gud_connector.c b/drivers/gpu/drm/gud/gud_connector.c
index d0addd478815..fa636206f232 100644
--- a/drivers/gpu/drm/gud/gud_connector.c
+++ b/drivers/gpu/drm/gud/gud_connector.c
@@ -355,7 +355,7 @@ static void gud_connector_reset(struct drm_connector *connector)
drm_atomic_helper_connector_reset(connector);
connector->state->tv = gconn->initial_tv_state;
/* Set margins from command line */
- drm_atomic_helper_connector_tv_reset(connector);
+ drm_atomic_helper_connector_tv_margins_reset(connector);
if (gconn->initial_brightness >= 0)
connector->state->tv.brightness = gconn->initial_brightness;
}
diff --git a/drivers/gpu/drm/hisilicon/hibmc/Kconfig b/drivers/gpu/drm/hisilicon/hibmc/Kconfig
index 073adfe438dd..4e41c144a290 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/Kconfig
+++ b/drivers/gpu/drm/hisilicon/hibmc/Kconfig
@@ -2,6 +2,7 @@
config DRM_HISI_HIBMC
tristate "DRM Support for Hisilicon Hibmc"
depends on DRM && PCI && (ARM64 || COMPILE_TEST)
+ depends on MMU
select DRM_KMS_HELPER
select DRM_VRAM_HELPER
select DRM_TTM
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
index fe4269c5aa0a..5a2e1cac06b2 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
@@ -105,7 +105,6 @@ static int hibmc_kms_init(struct hibmc_drm_private *priv)
dev->mode_config.max_width = 1920;
dev->mode_config.max_height = 1200;
- dev->mode_config.fb_base = priv->fb_base;
dev->mode_config.preferred_depth = 32;
dev->mode_config.prefer_shadow = 1;
@@ -212,7 +211,7 @@ static int hibmc_hw_map(struct hibmc_drm_private *priv)
{
struct drm_device *dev = &priv->dev;
struct pci_dev *pdev = to_pci_dev(dev->dev);
- resource_size_t addr, size, ioaddr, iosize;
+ resource_size_t ioaddr, iosize;
ioaddr = pci_resource_start(pdev, 1);
iosize = pci_resource_len(pdev, 1);
@@ -222,16 +221,6 @@ static int hibmc_hw_map(struct hibmc_drm_private *priv)
return -ENOMEM;
}
- addr = pci_resource_start(pdev, 0);
- size = pci_resource_len(pdev, 0);
- priv->fb_map = devm_ioremap(dev->dev, addr, size);
- if (!priv->fb_map) {
- drm_err(dev, "Cannot map framebuffer\n");
- return -ENOMEM;
- }
- priv->fb_base = addr;
- priv->fb_size = size;
-
return 0;
}
@@ -271,7 +260,8 @@ static int hibmc_load(struct drm_device *dev)
if (ret)
goto err;
- ret = drmm_vram_helper_init(dev, pci_resource_start(pdev, 0), priv->fb_size);
+ ret = drmm_vram_helper_init(dev, pci_resource_start(pdev, 0),
+ pci_resource_len(pdev, 0));
if (ret) {
drm_err(dev, "Error initializing VRAM MM; %d\n", ret);
goto err;
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h
index 7d263f4d7078..4a0cd22c10e2 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h
@@ -32,9 +32,6 @@ struct hibmc_connector {
struct hibmc_drm_private {
/* hw */
void __iomem *mmio;
- void __iomem *fb_map;
- resource_size_t fb_base;
- resource_size_t fb_size;
/* drm */
struct drm_device dev;
diff --git a/drivers/gpu/drm/hyperv/hyperv_drm_drv.c b/drivers/gpu/drm/hyperv/hyperv_drm_drv.c
index 6d11e7938c83..ca127ff797f7 100644
--- a/drivers/gpu/drm/hyperv/hyperv_drm_drv.c
+++ b/drivers/gpu/drm/hyperv/hyperv_drm_drv.c
@@ -23,9 +23,6 @@
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 0
-#define PCI_VENDOR_ID_MICROSOFT 0x1414
-#define PCI_DEVICE_ID_HYPERV_VIDEO 0x5353
-
DEFINE_DRM_GEM_FOPS(hv_fops);
static struct drm_driver hyperv_driver = {
@@ -133,7 +130,6 @@ static int hyperv_vmbus_probe(struct hv_device *hdev,
}
ret = hyperv_setup_vram(hv, hdev);
-
if (ret)
goto err_vmbus_close;
@@ -146,22 +142,22 @@ static int hyperv_vmbus_probe(struct hv_device *hdev,
if (ret)
drm_warn(dev, "Failed to update vram location.\n");
- hv->dirt_needed = true;
-
ret = hyperv_mode_config_init(hv);
if (ret)
- goto err_vmbus_close;
+ goto err_free_mmio;
ret = drm_dev_register(dev, 0);
if (ret) {
drm_err(dev, "Failed to register drm driver.\n");
- goto err_vmbus_close;
+ goto err_free_mmio;
}
drm_fbdev_generic_setup(dev, 0);
return 0;
+err_free_mmio:
+ vmbus_free_mmio(hv->mem->start, hv->fb_size);
err_vmbus_close:
vmbus_close(hdev->channel);
err_hv_set_drv_data:
diff --git a/drivers/gpu/drm/hyperv/hyperv_drm_proto.c b/drivers/gpu/drm/hyperv/hyperv_drm_proto.c
index 76a182a9a765..013a7829182d 100644
--- a/drivers/gpu/drm/hyperv/hyperv_drm_proto.c
+++ b/drivers/gpu/drm/hyperv/hyperv_drm_proto.c
@@ -208,7 +208,7 @@ static inline int hyperv_sendpacket(struct hv_device *hdev, struct synthvid_msg
VM_PKT_DATA_INBAND, 0);
if (ret)
- drm_err(&hv->dev, "Unable to send packet via vmbus\n");
+ drm_err_ratelimited(&hv->dev, "Unable to send packet via vmbus; error %d\n", ret);
return ret;
}
diff --git a/drivers/gpu/drm/i2c/ch7006_drv.c b/drivers/gpu/drm/i2c/ch7006_drv.c
index b91e48d2190d..578b738859b9 100644
--- a/drivers/gpu/drm/i2c/ch7006_drv.c
+++ b/drivers/gpu/drm/i2c/ch7006_drv.c
@@ -417,11 +417,9 @@ fail:
return -ENODEV;
}
-static int ch7006_remove(struct i2c_client *client)
+static void ch7006_remove(struct i2c_client *client)
{
ch7006_dbg(client, "\n");
-
- return 0;
}
static int ch7006_resume(struct device *dev)
diff --git a/drivers/gpu/drm/i2c/sil164_drv.c b/drivers/gpu/drm/i2c/sil164_drv.c
index 741886b54419..1bc0b5de4499 100644
--- a/drivers/gpu/drm/i2c/sil164_drv.c
+++ b/drivers/gpu/drm/i2c/sil164_drv.c
@@ -370,12 +370,6 @@ sil164_probe(struct i2c_client *client, const struct i2c_device_id *id)
return 0;
}
-static int
-sil164_remove(struct i2c_client *client)
-{
- return 0;
-}
-
static struct i2c_client *
sil164_detect_slave(struct i2c_client *client)
{
@@ -427,7 +421,6 @@ MODULE_DEVICE_TABLE(i2c, sil164_ids);
static struct drm_i2c_encoder_driver sil164_driver = {
.i2c_driver = {
.probe = sil164_probe,
- .remove = sil164_remove,
.driver = {
.name = "sil164",
},
diff --git a/drivers/gpu/drm/i2c/tda9950.c b/drivers/gpu/drm/i2c/tda9950.c
index 5b03fdd1eaa4..9ed54e7ccff2 100644
--- a/drivers/gpu/drm/i2c/tda9950.c
+++ b/drivers/gpu/drm/i2c/tda9950.c
@@ -478,14 +478,12 @@ static int tda9950_probe(struct i2c_client *client,
return 0;
}
-static int tda9950_remove(struct i2c_client *client)
+static void tda9950_remove(struct i2c_client *client)
{
struct tda9950_priv *priv = i2c_get_clientdata(client);
cec_notifier_cec_adap_unregister(priv->notify, priv->adap);
cec_unregister_adapter(priv->adap);
-
- return 0;
}
static struct i2c_device_id tda9950_ids[] = {
diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c
index f8eb6f69be05..d444e7fffb54 100644
--- a/drivers/gpu/drm/i2c/tda998x_drv.c
+++ b/drivers/gpu/drm/i2c/tda998x_drv.c
@@ -2076,11 +2076,10 @@ tda998x_probe(struct i2c_client *client, const struct i2c_device_id *id)
return ret;
}
-static int tda998x_remove(struct i2c_client *client)
+static void tda998x_remove(struct i2c_client *client)
{
component_del(&client->dev, &tda998x_ops);
tda998x_destroy(&client->dev);
- return 0;
}
#ifdef CONFIG_OF
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 2535593ab379..51704b54317c 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -285,6 +285,7 @@ i915-y += \
display/intel_ddi.o \
display/intel_ddi_buf_trans.o \
display/intel_display_trace.o \
+ display/intel_dkl_phy.o \
display/intel_dp.o \
display/intel_dp_aux.o \
display/intel_dp_aux_backlight.o \
diff --git a/drivers/gpu/drm/i915/display/g4x_hdmi.c b/drivers/gpu/drm/i915/display/g4x_hdmi.c
index 5fbd2ae95869..8aadf96fa5e9 100644
--- a/drivers/gpu/drm/i915/display/g4x_hdmi.c
+++ b/drivers/gpu/drm/i915/display/g4x_hdmi.c
@@ -120,7 +120,7 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
pipe_config->hw.adjusted_mode.flags |= flags;
if ((tmp & SDVO_COLOR_FORMAT_MASK) == HDMI_COLOR_FORMAT_12bpc)
- dotclock = pipe_config->port_clock * 2 / 3;
+ dotclock = DIV_ROUND_CLOSEST(pipe_config->port_clock * 2, 3);
else
dotclock = pipe_config->port_clock;
@@ -585,7 +585,7 @@ void g4x_hdmi_init(struct drm_i915_private *dev_priv,
} else {
intel_encoder->pipe_mask = ~0;
}
- intel_encoder->cloneable = 1 << INTEL_OUTPUT_ANALOG;
+ intel_encoder->cloneable = BIT(INTEL_OUTPUT_ANALOG);
intel_encoder->hpd_pin = intel_hpd_pin_default(dev_priv, port);
/*
* BSpec is unclear about HDMI+HDMI cloning on g4x, but it seems
@@ -593,7 +593,7 @@ void g4x_hdmi_init(struct drm_i915_private *dev_priv,
* only one port anyway, nothing is lost by allowing it.
*/
if (IS_G4X(dev_priv))
- intel_encoder->cloneable |= 1 << INTEL_OUTPUT_HDMI;
+ intel_encoder->cloneable |= BIT(INTEL_OUTPUT_HDMI);
dig_port->hdmi.hdmi_reg = hdmi_reg;
dig_port->dp.output_reg = INVALID_MMIO_REG;
diff --git a/drivers/gpu/drm/i915/display/hsw_ips.c b/drivers/gpu/drm/i915/display/hsw_ips.c
index a5be4af792cb..83aa3800245f 100644
--- a/drivers/gpu/drm/i915/display/hsw_ips.c
+++ b/drivers/gpu/drm/i915/display/hsw_ips.c
@@ -104,8 +104,7 @@ static bool hsw_ips_need_disable(struct intel_atomic_state *state,
* Disable IPS before we program the LUT.
*/
if (IS_HASWELL(i915) &&
- (new_crtc_state->uapi.color_mgmt_changed ||
- new_crtc_state->update_pipe) &&
+ intel_crtc_needs_color_update(new_crtc_state) &&
new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
return true;
@@ -146,8 +145,7 @@ static bool hsw_ips_need_enable(struct intel_atomic_state *state,
* Re-enable IPS after the LUT has been programmed.
*/
if (IS_HASWELL(i915) &&
- (new_crtc_state->uapi.color_mgmt_changed ||
- new_crtc_state->update_pipe) &&
+ intel_crtc_needs_color_update(new_crtc_state) &&
new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
return true;
@@ -155,7 +153,7 @@ static bool hsw_ips_need_enable(struct intel_atomic_state *state,
* We can't read out IPS on broadwell, assume the worst and
* forcibly enable IPS on the first fastset.
*/
- if (new_crtc_state->update_pipe && old_crtc_state->inherited)
+ if (intel_crtc_needs_fastset(new_crtc_state) && old_crtc_state->inherited)
return true;
return !old_crtc_state->ips_enabled;
diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c
index ed4d93942dbd..e05e7cd6c412 100644
--- a/drivers/gpu/drm/i915/display/icl_dsi.c
+++ b/drivers/gpu/drm/i915/display/icl_dsi.c
@@ -1974,16 +1974,8 @@ static void icl_dsi_add_properties(struct intel_connector *connector)
{
const struct drm_display_mode *fixed_mode =
intel_panel_preferred_fixed_mode(connector);
- u32 allowed_scalers;
- allowed_scalers = BIT(DRM_MODE_SCALE_ASPECT) |
- BIT(DRM_MODE_SCALE_FULLSCREEN) |
- BIT(DRM_MODE_SCALE_CENTER);
-
- drm_connector_attach_scaling_mode_property(&connector->base,
- allowed_scalers);
-
- connector->base.state->scaling_mode = DRM_MODE_SCALE_ASPECT;
+ intel_attach_scaling_mode_property(&connector->base);
drm_connector_set_panel_orientation_with_quirk(&connector->base,
intel_dsi_get_panel_orientation(connector),
@@ -1993,7 +1985,6 @@ static void icl_dsi_add_properties(struct intel_connector *connector)
void icl_dsi_init(struct drm_i915_private *dev_priv)
{
- struct drm_device *dev = &dev_priv->drm;
struct intel_dsi *intel_dsi;
struct intel_encoder *encoder;
struct intel_connector *intel_connector;
@@ -2018,7 +2009,7 @@ void icl_dsi_init(struct drm_i915_private *dev_priv)
connector = &intel_connector->base;
/* register DSI encoder with DRM subsystem */
- drm_encoder_init(dev, &encoder->base, &gen11_dsi_encoder_funcs,
+ drm_encoder_init(&dev_priv->drm, &encoder->base, &gen11_dsi_encoder_funcs,
DRM_MODE_ENCODER_DSI, "DSI %c", port_name(port));
encoder->pre_pll_enable = gen11_dsi_pre_pll_enable;
@@ -2042,12 +2033,10 @@ void icl_dsi_init(struct drm_i915_private *dev_priv)
encoder->is_clock_enabled = gen11_dsi_is_clock_enabled;
/* register DSI connector with DRM subsystem */
- drm_connector_init(dev, connector, &gen11_dsi_connector_funcs,
+ drm_connector_init(&dev_priv->drm, connector, &gen11_dsi_connector_funcs,
DRM_MODE_CONNECTOR_DSI);
drm_connector_helper_add(connector, &gen11_dsi_connector_helper_funcs);
connector->display_info.subpixel_order = SubPixelHorizontalRGB;
- connector->interlace_allowed = false;
- connector->doublescan_allowed = false;
intel_connector->get_hw_state = intel_connector_get_hw_state;
/* attach connector to encoder */
@@ -2055,9 +2044,9 @@ void icl_dsi_init(struct drm_i915_private *dev_priv)
intel_bios_init_panel(dev_priv, &intel_connector->panel, NULL, NULL);
- mutex_lock(&dev->mode_config.mutex);
+ mutex_lock(&dev_priv->drm.mode_config.mutex);
intel_panel_add_vbt_lfp_fixed_mode(intel_connector);
- mutex_unlock(&dev->mode_config.mutex);
+ mutex_unlock(&dev_priv->drm.mode_config.mutex);
if (!intel_panel_preferred_fixed_mode(intel_connector)) {
drm_err(&dev_priv->drm, "DSI fixed mode info missing\n");
diff --git a/drivers/gpu/drm/i915/display/intel_atomic.c b/drivers/gpu/drm/i915/display/intel_atomic.c
index 18f0a5ae3bac..6621aa245caf 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic.c
+++ b/drivers/gpu/drm/i915/display/intel_atomic.c
@@ -252,6 +252,11 @@ intel_crtc_duplicate_state(struct drm_crtc *crtc)
if (crtc_state->hw.gamma_lut)
drm_property_blob_get(crtc_state->hw.gamma_lut);
+ if (crtc_state->pre_csc_lut)
+ drm_property_blob_get(crtc_state->pre_csc_lut);
+ if (crtc_state->post_csc_lut)
+ drm_property_blob_get(crtc_state->post_csc_lut);
+
crtc_state->update_pipe = false;
crtc_state->disable_lp_wm = false;
crtc_state->disable_cxsr = false;
@@ -274,6 +279,9 @@ static void intel_crtc_put_color_blobs(struct intel_crtc_state *crtc_state)
drm_property_blob_put(crtc_state->hw.degamma_lut);
drm_property_blob_put(crtc_state->hw.gamma_lut);
drm_property_blob_put(crtc_state->hw.ctm);
+
+ drm_property_blob_put(crtc_state->pre_csc_lut);
+ drm_property_blob_put(crtc_state->post_csc_lut);
}
void intel_crtc_free_hw_state(struct intel_crtc_state *crtc_state)
diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.c b/drivers/gpu/drm/i915/display/intel_atomic_plane.c
index aaa6708256d5..bcf0239b9533 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic_plane.c
+++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.c
@@ -425,6 +425,47 @@ static bool intel_plane_do_async_flip(struct intel_plane *plane,
return DISPLAY_VER(i915) < 13 || old_crtc_state->uapi.async_flip;
}
+static bool i9xx_must_disable_cxsr(const struct intel_crtc_state *new_crtc_state,
+ const struct intel_plane_state *old_plane_state,
+ const struct intel_plane_state *new_plane_state)
+{
+ struct intel_plane *plane = to_intel_plane(new_plane_state->uapi.plane);
+ bool old_visible = old_plane_state->uapi.visible;
+ bool new_visible = new_plane_state->uapi.visible;
+ u32 old_ctl = old_plane_state->ctl;
+ u32 new_ctl = new_plane_state->ctl;
+ bool modeset, turn_on, turn_off;
+
+ if (plane->id == PLANE_CURSOR)
+ return false;
+
+ modeset = intel_crtc_needs_modeset(new_crtc_state);
+ turn_off = old_visible && (!new_visible || modeset);
+ turn_on = new_visible && (!old_visible || modeset);
+
+ /* Must disable CxSR around plane enable/disable */
+ if (turn_on || turn_off)
+ return true;
+
+ if (!old_visible || !new_visible)
+ return false;
+
+ /*
+ * Most plane control register updates are blocked while in CxSR.
+ *
+ * Tiling mode is one exception where the primary plane can
+ * apparently handle it, whereas the sprites can not (the
+ * sprite issue being only relevant on VLV/CHV where CxSR
+ * is actually possible with a sprite enabled).
+ */
+ if (plane->id == PLANE_PRIMARY) {
+ old_ctl &= ~DISP_TILED;
+ new_ctl &= ~DISP_TILED;
+ }
+
+ return old_ctl != new_ctl;
+}
+
static int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_state,
struct intel_crtc_state *new_crtc_state,
const struct intel_plane_state *old_plane_state,
@@ -482,17 +523,9 @@ static int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_cr
if (turn_on) {
if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv))
new_crtc_state->update_wm_pre = true;
-
- /* must disable cxsr around plane enable/disable */
- if (plane->id != PLANE_CURSOR)
- new_crtc_state->disable_cxsr = true;
} else if (turn_off) {
if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv))
new_crtc_state->update_wm_post = true;
-
- /* must disable cxsr around plane enable/disable */
- if (plane->id != PLANE_CURSOR)
- new_crtc_state->disable_cxsr = true;
} else if (intel_wm_need_update(old_plane_state, new_plane_state)) {
if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv)) {
/* FIXME bollocks */
@@ -504,6 +537,10 @@ static int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_cr
if (visible || was_visible)
new_crtc_state->fb_bits |= plane->frontbuffer_bit;
+ if (HAS_GMCH(dev_priv) &&
+ i9xx_must_disable_cxsr(new_crtc_state, old_plane_state, new_plane_state))
+ new_crtc_state->disable_cxsr = true;
+
/*
* ILK/SNB DVSACNTR/Sprite Enable
* IVB SPR_CTL/Sprite Enable
@@ -1005,7 +1042,7 @@ intel_prepare_plane_fb(struct drm_plane *_plane,
*/
if (intel_crtc_needs_modeset(crtc_state)) {
ret = i915_sw_fence_await_reservation(&state->commit_ready,
- old_obj->base.resv, NULL,
+ old_obj->base.resv,
false, 0,
GFP_KERNEL);
if (ret < 0)
@@ -1039,8 +1076,7 @@ intel_prepare_plane_fb(struct drm_plane *_plane,
struct dma_fence *fence;
ret = i915_sw_fence_await_reservation(&state->commit_ready,
- obj->base.resv, NULL,
- false,
+ obj->base.resv, false,
i915_fence_timeout(dev_priv),
GFP_KERNEL);
if (ret < 0)
diff --git a/drivers/gpu/drm/i915/display/intel_audio.c b/drivers/gpu/drm/i915/display/intel_audio.c
index aacbc6da84ef..c3176c9c89a6 100644
--- a/drivers/gpu/drm/i915/display/intel_audio.c
+++ b/drivers/gpu/drm/i915/display/intel_audio.c
@@ -250,7 +250,7 @@ static const struct hdmi_aud_ncts hdmi_aud_ncts_36bpp[] = {
/* get AUD_CONFIG_PIXEL_CLOCK_HDMI_* value for mode */
static u32 audio_config_hdmi_pixel_clock(const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+ struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
int i;
@@ -260,17 +260,17 @@ static u32 audio_config_hdmi_pixel_clock(const struct intel_crtc_state *crtc_sta
break;
}
- if (DISPLAY_VER(dev_priv) < 12 && adjusted_mode->crtc_clock > 148500)
+ if (DISPLAY_VER(i915) < 12 && adjusted_mode->crtc_clock > 148500)
i = ARRAY_SIZE(hdmi_audio_clock);
if (i == ARRAY_SIZE(hdmi_audio_clock)) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(&i915->drm,
"HDMI audio pixel clock setting for %d not found, falling back to defaults\n",
adjusted_mode->crtc_clock);
i = 1;
}
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(&i915->drm,
"Configuring HDMI audio for pixel clock %d (0x%08x)\n",
hdmi_audio_clock[i].clock,
hdmi_audio_clock[i].config);
@@ -304,96 +304,67 @@ static int audio_config_hdmi_get_n(const struct intel_crtc_state *crtc_state,
return 0;
}
-static bool intel_eld_uptodate(struct drm_connector *connector,
- i915_reg_t reg_eldv, u32 bits_eldv,
- i915_reg_t reg_elda, u32 bits_elda,
- i915_reg_t reg_edid)
+/* ELD buffer size in dwords */
+static int g4x_eld_buffer_size(struct drm_i915_private *i915)
{
- struct drm_i915_private *dev_priv = to_i915(connector->dev);
- const u8 *eld = connector->eld;
u32 tmp;
- int i;
-
- tmp = intel_de_read(dev_priv, reg_eldv);
- tmp &= bits_eldv;
-
- if (!tmp)
- return false;
- tmp = intel_de_read(dev_priv, reg_elda);
- tmp &= ~bits_elda;
- intel_de_write(dev_priv, reg_elda, tmp);
+ tmp = intel_de_read(i915, G4X_AUD_CNTL_ST);
- for (i = 0; i < drm_eld_size(eld) / 4; i++)
- if (intel_de_read(dev_priv, reg_edid) != *((const u32 *)eld + i))
- return false;
-
- return true;
+ return REG_FIELD_GET(G4X_ELD_BUFFER_SIZE_MASK, tmp);
}
static void g4x_audio_codec_disable(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- u32 eldv, tmp;
-
- tmp = intel_de_read(dev_priv, G4X_AUD_VID_DID);
- if (tmp == INTEL_AUDIO_DEVBLC || tmp == INTEL_AUDIO_DEVCL)
- eldv = G4X_ELDV_DEVCL_DEVBLC;
- else
- eldv = G4X_ELDV_DEVCTG;
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
/* Invalidate ELD */
- tmp = intel_de_read(dev_priv, G4X_AUD_CNTL_ST);
- tmp &= ~eldv;
- intel_de_write(dev_priv, G4X_AUD_CNTL_ST, tmp);
+ intel_de_rmw(i915, G4X_AUD_CNTL_ST,
+ G4X_ELD_VALID, 0);
+
+ intel_crtc_wait_for_next_vblank(crtc);
+ intel_crtc_wait_for_next_vblank(crtc);
}
static void g4x_audio_codec_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_connector *connector = conn_state->connector;
- const u8 *eld = connector->eld;
- u32 eldv;
- u32 tmp;
- int len, i;
+ const u32 *eld = (const u32 *)connector->eld;
+ int eld_buffer_size, len, i;
- tmp = intel_de_read(dev_priv, G4X_AUD_VID_DID);
- if (tmp == INTEL_AUDIO_DEVBLC || tmp == INTEL_AUDIO_DEVCL)
- eldv = G4X_ELDV_DEVCL_DEVBLC;
- else
- eldv = G4X_ELDV_DEVCTG;
+ intel_crtc_wait_for_next_vblank(crtc);
- if (intel_eld_uptodate(connector,
- G4X_AUD_CNTL_ST, eldv,
- G4X_AUD_CNTL_ST, G4X_ELD_ADDR_MASK,
- G4X_HDMIW_HDMIEDID))
- return;
+ intel_de_rmw(i915, G4X_AUD_CNTL_ST,
+ G4X_ELD_VALID | G4X_ELD_ADDRESS_MASK, 0);
- tmp = intel_de_read(dev_priv, G4X_AUD_CNTL_ST);
- tmp &= ~(eldv | G4X_ELD_ADDR_MASK);
- len = (tmp >> 9) & 0x1f; /* ELD buffer size */
- intel_de_write(dev_priv, G4X_AUD_CNTL_ST, tmp);
+ eld_buffer_size = g4x_eld_buffer_size(i915);
+ len = min(drm_eld_size(connector->eld) / 4, eld_buffer_size);
- len = min(drm_eld_size(eld) / 4, len);
for (i = 0; i < len; i++)
- intel_de_write(dev_priv, G4X_HDMIW_HDMIEDID,
- *((const u32 *)eld + i));
+ intel_de_write(i915, G4X_HDMIW_HDMIEDID, eld[i]);
+ for (; i < eld_buffer_size; i++)
+ intel_de_write(i915, G4X_HDMIW_HDMIEDID, 0);
- tmp = intel_de_read(dev_priv, G4X_AUD_CNTL_ST);
- tmp |= eldv;
- intel_de_write(dev_priv, G4X_AUD_CNTL_ST, tmp);
+ drm_WARN_ON(&i915->drm,
+ (intel_de_read(i915, G4X_AUD_CNTL_ST) & G4X_ELD_ADDRESS_MASK) != 0);
+
+ intel_de_rmw(i915, G4X_AUD_CNTL_ST,
+ 0, G4X_ELD_VALID);
}
static void
hsw_dp_audio_config_update(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct i915_audio_component *acomp = dev_priv->display.audio.component;
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct i915_audio_component *acomp = i915->display.audio.component;
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
enum port port = encoder->port;
const struct dp_aud_n_m *nm;
@@ -403,12 +374,12 @@ hsw_dp_audio_config_update(struct intel_encoder *encoder,
rate = acomp ? acomp->aud_sample_rate[port] : 0;
nm = audio_config_dp_get_n_m(crtc_state, rate);
if (nm)
- drm_dbg_kms(&dev_priv->drm, "using Maud %u, Naud %u\n", nm->m,
+ drm_dbg_kms(&i915->drm, "using Maud %u, Naud %u\n", nm->m,
nm->n);
else
- drm_dbg_kms(&dev_priv->drm, "using automatic Maud, Naud\n");
+ drm_dbg_kms(&i915->drm, "using automatic Maud, Naud\n");
- tmp = intel_de_read(dev_priv, HSW_AUD_CFG(cpu_transcoder));
+ tmp = intel_de_read(i915, HSW_AUD_CFG(cpu_transcoder));
tmp &= ~AUD_CONFIG_N_VALUE_INDEX;
tmp &= ~AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK;
tmp &= ~AUD_CONFIG_N_PROG_ENABLE;
@@ -420,9 +391,9 @@ hsw_dp_audio_config_update(struct intel_encoder *encoder,
tmp |= AUD_CONFIG_N_PROG_ENABLE;
}
- intel_de_write(dev_priv, HSW_AUD_CFG(cpu_transcoder), tmp);
+ intel_de_write(i915, HSW_AUD_CFG(cpu_transcoder), tmp);
- tmp = intel_de_read(dev_priv, HSW_AUD_M_CTS_ENABLE(cpu_transcoder));
+ tmp = intel_de_read(i915, HSW_AUD_M_CTS_ENABLE(cpu_transcoder));
tmp &= ~AUD_CONFIG_M_MASK;
tmp &= ~AUD_M_CTS_M_VALUE_INDEX;
tmp &= ~AUD_M_CTS_M_PROG_ENABLE;
@@ -433,15 +404,15 @@ hsw_dp_audio_config_update(struct intel_encoder *encoder,
tmp |= AUD_M_CTS_M_PROG_ENABLE;
}
- intel_de_write(dev_priv, HSW_AUD_M_CTS_ENABLE(cpu_transcoder), tmp);
+ intel_de_write(i915, HSW_AUD_M_CTS_ENABLE(cpu_transcoder), tmp);
}
static void
hsw_hdmi_audio_config_update(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct i915_audio_component *acomp = dev_priv->display.audio.component;
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct i915_audio_component *acomp = i915->display.audio.component;
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
enum port port = encoder->port;
int n, rate;
@@ -449,7 +420,7 @@ hsw_hdmi_audio_config_update(struct intel_encoder *encoder,
rate = acomp ? acomp->aud_sample_rate[port] : 0;
- tmp = intel_de_read(dev_priv, HSW_AUD_CFG(cpu_transcoder));
+ tmp = intel_de_read(i915, HSW_AUD_CFG(cpu_transcoder));
tmp &= ~AUD_CONFIG_N_VALUE_INDEX;
tmp &= ~AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK;
tmp &= ~AUD_CONFIG_N_PROG_ENABLE;
@@ -457,25 +428,25 @@ hsw_hdmi_audio_config_update(struct intel_encoder *encoder,
n = audio_config_hdmi_get_n(crtc_state, rate);
if (n != 0) {
- drm_dbg_kms(&dev_priv->drm, "using N %d\n", n);
+ drm_dbg_kms(&i915->drm, "using N %d\n", n);
tmp &= ~AUD_CONFIG_N_MASK;
tmp |= AUD_CONFIG_N(n);
tmp |= AUD_CONFIG_N_PROG_ENABLE;
} else {
- drm_dbg_kms(&dev_priv->drm, "using automatic N\n");
+ drm_dbg_kms(&i915->drm, "using automatic N\n");
}
- intel_de_write(dev_priv, HSW_AUD_CFG(cpu_transcoder), tmp);
+ intel_de_write(i915, HSW_AUD_CFG(cpu_transcoder), tmp);
/*
* Let's disable "Enable CTS or M Prog bit"
* and let HW calculate the value
*/
- tmp = intel_de_read(dev_priv, HSW_AUD_M_CTS_ENABLE(cpu_transcoder));
+ tmp = intel_de_read(i915, HSW_AUD_M_CTS_ENABLE(cpu_transcoder));
tmp &= ~AUD_M_CTS_M_PROG_ENABLE;
tmp &= ~AUD_M_CTS_M_VALUE_INDEX;
- intel_de_write(dev_priv, HSW_AUD_M_CTS_ENABLE(cpu_transcoder), tmp);
+ intel_de_write(i915, HSW_AUD_M_CTS_ENABLE(cpu_transcoder), tmp);
}
static void
@@ -488,33 +459,48 @@ hsw_audio_config_update(struct intel_encoder *encoder,
hsw_hdmi_audio_config_update(encoder, crtc_state);
}
+/* ELD buffer size in dwords */
+static int hsw_eld_buffer_size(struct drm_i915_private *i915,
+ enum transcoder cpu_transcoder)
+{
+ u32 tmp;
+
+ tmp = intel_de_read(i915, HSW_AUD_DIP_ELD_CTRL(cpu_transcoder));
+
+ return REG_FIELD_GET(IBX_ELD_BUFFER_SIZE_MASK, tmp);
+}
+
static void hsw_audio_codec_disable(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
- u32 tmp;
- mutex_lock(&dev_priv->display.audio.mutex);
+ mutex_lock(&i915->display.audio.mutex);
/* Disable timestamps */
- tmp = intel_de_read(dev_priv, HSW_AUD_CFG(cpu_transcoder));
- tmp &= ~AUD_CONFIG_N_VALUE_INDEX;
- tmp |= AUD_CONFIG_N_PROG_ENABLE;
- tmp &= ~AUD_CONFIG_UPPER_N_MASK;
- tmp &= ~AUD_CONFIG_LOWER_N_MASK;
- if (intel_crtc_has_dp_encoder(old_crtc_state))
- tmp |= AUD_CONFIG_N_VALUE_INDEX;
- intel_de_write(dev_priv, HSW_AUD_CFG(cpu_transcoder), tmp);
+ intel_de_rmw(i915, HSW_AUD_CFG(cpu_transcoder),
+ AUD_CONFIG_N_VALUE_INDEX |
+ AUD_CONFIG_UPPER_N_MASK |
+ AUD_CONFIG_LOWER_N_MASK,
+ AUD_CONFIG_N_PROG_ENABLE |
+ (intel_crtc_has_dp_encoder(old_crtc_state) ?
+ AUD_CONFIG_N_VALUE_INDEX : 0));
/* Invalidate ELD */
- tmp = intel_de_read(dev_priv, HSW_AUD_PIN_ELD_CP_VLD);
- tmp &= ~AUDIO_ELD_VALID(cpu_transcoder);
- tmp &= ~AUDIO_OUTPUT_ENABLE(cpu_transcoder);
- intel_de_write(dev_priv, HSW_AUD_PIN_ELD_CP_VLD, tmp);
+ intel_de_rmw(i915, HSW_AUD_PIN_ELD_CP_VLD,
+ AUDIO_ELD_VALID(cpu_transcoder), 0);
+
+ intel_crtc_wait_for_next_vblank(crtc);
+ intel_crtc_wait_for_next_vblank(crtc);
- mutex_unlock(&dev_priv->display.audio.mutex);
+ /* Disable audio presence detect */
+ intel_de_rmw(i915, HSW_AUD_PIN_ELD_CP_VLD,
+ AUDIO_OUTPUT_ENABLE(cpu_transcoder), 0);
+
+ mutex_unlock(&i915->display.audio.mutex);
}
static unsigned int calc_hblank_early_prog(struct intel_encoder *encoder,
@@ -626,178 +612,190 @@ static void enable_audio_dsc_wa(struct intel_encoder *encoder,
intel_de_write(i915, AUD_CONFIG_BE, val);
}
-#undef ROUNDING_FACTOR
-
static void hsw_audio_codec_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_connector *connector = conn_state->connector;
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
- const u8 *eld = connector->eld;
- u32 tmp;
- int len, i;
+ const u32 *eld = (const u32 *)connector->eld;
+ int eld_buffer_size, len, i;
- mutex_lock(&dev_priv->display.audio.mutex);
+ mutex_lock(&i915->display.audio.mutex);
/* Enable Audio WA for 4k DSC usecases */
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP))
enable_audio_dsc_wa(encoder, crtc_state);
- /* Enable audio presence detect, invalidate ELD */
- tmp = intel_de_read(dev_priv, HSW_AUD_PIN_ELD_CP_VLD);
- tmp |= AUDIO_OUTPUT_ENABLE(cpu_transcoder);
- tmp &= ~AUDIO_ELD_VALID(cpu_transcoder);
- intel_de_write(dev_priv, HSW_AUD_PIN_ELD_CP_VLD, tmp);
+ /* Enable audio presence detect */
+ intel_de_rmw(i915, HSW_AUD_PIN_ELD_CP_VLD,
+ 0, AUDIO_OUTPUT_ENABLE(cpu_transcoder));
- /*
- * FIXME: We're supposed to wait for vblank here, but we have vblanks
- * disabled during the mode set. The proper fix would be to push the
- * rest of the setup into a vblank work item, queued here, but the
- * infrastructure is not there yet.
- */
+ intel_crtc_wait_for_next_vblank(crtc);
+
+ /* Invalidate ELD */
+ intel_de_rmw(i915, HSW_AUD_PIN_ELD_CP_VLD,
+ AUDIO_ELD_VALID(cpu_transcoder), 0);
+
+ /* Reset ELD address */
+ intel_de_rmw(i915, HSW_AUD_DIP_ELD_CTRL(cpu_transcoder),
+ IBX_ELD_ADDRESS_MASK, 0);
- /* Reset ELD write address */
- tmp = intel_de_read(dev_priv, HSW_AUD_DIP_ELD_CTRL(cpu_transcoder));
- tmp &= ~IBX_ELD_ADDRESS_MASK;
- intel_de_write(dev_priv, HSW_AUD_DIP_ELD_CTRL(cpu_transcoder), tmp);
+ eld_buffer_size = hsw_eld_buffer_size(i915, cpu_transcoder);
+ len = min(drm_eld_size(connector->eld) / 4, eld_buffer_size);
- /* Up to 84 bytes of hw ELD buffer */
- len = min(drm_eld_size(eld), 84);
- for (i = 0; i < len / 4; i++)
- intel_de_write(dev_priv, HSW_AUD_EDID_DATA(cpu_transcoder),
- *((const u32 *)eld + i));
+ for (i = 0; i < len; i++)
+ intel_de_write(i915, HSW_AUD_EDID_DATA(cpu_transcoder), eld[i]);
+ for (; i < eld_buffer_size; i++)
+ intel_de_write(i915, HSW_AUD_EDID_DATA(cpu_transcoder), 0);
+
+ drm_WARN_ON(&i915->drm,
+ (intel_de_read(i915, HSW_AUD_DIP_ELD_CTRL(cpu_transcoder)) &
+ IBX_ELD_ADDRESS_MASK) != 0);
/* ELD valid */
- tmp = intel_de_read(dev_priv, HSW_AUD_PIN_ELD_CP_VLD);
- tmp |= AUDIO_ELD_VALID(cpu_transcoder);
- intel_de_write(dev_priv, HSW_AUD_PIN_ELD_CP_VLD, tmp);
+ intel_de_rmw(i915, HSW_AUD_PIN_ELD_CP_VLD,
+ 0, AUDIO_ELD_VALID(cpu_transcoder));
/* Enable timestamps */
hsw_audio_config_update(encoder, crtc_state);
- mutex_unlock(&dev_priv->display.audio.mutex);
+ mutex_unlock(&i915->display.audio.mutex);
+}
+
+struct ilk_audio_regs {
+ i915_reg_t hdmiw_hdmiedid, aud_config, aud_cntl_st, aud_cntrl_st2;
+};
+
+static void ilk_audio_regs_init(struct drm_i915_private *i915,
+ enum pipe pipe,
+ struct ilk_audio_regs *regs)
+{
+ if (HAS_PCH_IBX(i915)) {
+ regs->hdmiw_hdmiedid = IBX_HDMIW_HDMIEDID(pipe);
+ regs->aud_config = IBX_AUD_CFG(pipe);
+ regs->aud_cntl_st = IBX_AUD_CNTL_ST(pipe);
+ regs->aud_cntrl_st2 = IBX_AUD_CNTL_ST2;
+ } else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
+ regs->hdmiw_hdmiedid = VLV_HDMIW_HDMIEDID(pipe);
+ regs->aud_config = VLV_AUD_CFG(pipe);
+ regs->aud_cntl_st = VLV_AUD_CNTL_ST(pipe);
+ regs->aud_cntrl_st2 = VLV_AUD_CNTL_ST2;
+ } else {
+ regs->hdmiw_hdmiedid = CPT_HDMIW_HDMIEDID(pipe);
+ regs->aud_config = CPT_AUD_CFG(pipe);
+ regs->aud_cntl_st = CPT_AUD_CNTL_ST(pipe);
+ regs->aud_cntrl_st2 = CPT_AUD_CNTRL_ST2;
+ }
+}
+
+/* ELD buffer size in dwords */
+static int ilk_eld_buffer_size(struct drm_i915_private *i915,
+ enum pipe pipe)
+{
+ struct ilk_audio_regs regs;
+ u32 tmp;
+
+ ilk_audio_regs_init(i915, pipe, &regs);
+
+ tmp = intel_de_read(i915, regs.aud_cntl_st);
+
+ return REG_FIELD_GET(IBX_ELD_BUFFER_SIZE_MASK, tmp);
}
static void ilk_audio_codec_disable(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
- enum pipe pipe = crtc->pipe;
enum port port = encoder->port;
- u32 tmp, eldv;
- i915_reg_t aud_config, aud_cntrl_st2;
+ enum pipe pipe = crtc->pipe;
+ struct ilk_audio_regs regs;
- if (drm_WARN_ON(&dev_priv->drm, port == PORT_A))
+ if (drm_WARN_ON(&i915->drm, port == PORT_A))
return;
- if (HAS_PCH_IBX(dev_priv)) {
- aud_config = IBX_AUD_CFG(pipe);
- aud_cntrl_st2 = IBX_AUD_CNTL_ST2;
- } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
- aud_config = VLV_AUD_CFG(pipe);
- aud_cntrl_st2 = VLV_AUD_CNTL_ST2;
- } else {
- aud_config = CPT_AUD_CFG(pipe);
- aud_cntrl_st2 = CPT_AUD_CNTRL_ST2;
- }
+ ilk_audio_regs_init(i915, pipe, &regs);
- /* Disable timestamps */
- tmp = intel_de_read(dev_priv, aud_config);
- tmp &= ~AUD_CONFIG_N_VALUE_INDEX;
- tmp |= AUD_CONFIG_N_PROG_ENABLE;
- tmp &= ~AUD_CONFIG_UPPER_N_MASK;
- tmp &= ~AUD_CONFIG_LOWER_N_MASK;
- if (intel_crtc_has_dp_encoder(old_crtc_state))
- tmp |= AUD_CONFIG_N_VALUE_INDEX;
- intel_de_write(dev_priv, aud_config, tmp);
+ mutex_lock(&i915->display.audio.mutex);
- eldv = IBX_ELD_VALID(port);
+ /* Disable timestamps */
+ intel_de_rmw(i915, regs.aud_config,
+ AUD_CONFIG_N_VALUE_INDEX |
+ AUD_CONFIG_UPPER_N_MASK |
+ AUD_CONFIG_LOWER_N_MASK,
+ AUD_CONFIG_N_PROG_ENABLE |
+ (intel_crtc_has_dp_encoder(old_crtc_state) ?
+ AUD_CONFIG_N_VALUE_INDEX : 0));
/* Invalidate ELD */
- tmp = intel_de_read(dev_priv, aud_cntrl_st2);
- tmp &= ~eldv;
- intel_de_write(dev_priv, aud_cntrl_st2, tmp);
+ intel_de_rmw(i915, regs.aud_cntrl_st2,
+ IBX_ELD_VALID(port), 0);
+
+ mutex_unlock(&i915->display.audio.mutex);
+
+ intel_crtc_wait_for_next_vblank(crtc);
+ intel_crtc_wait_for_next_vblank(crtc);
}
static void ilk_audio_codec_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_connector *connector = conn_state->connector;
- enum pipe pipe = crtc->pipe;
+ const u32 *eld = (const u32 *)connector->eld;
enum port port = encoder->port;
- const u8 *eld = connector->eld;
- u32 tmp, eldv;
- int len, i;
- i915_reg_t hdmiw_hdmiedid, aud_config, aud_cntl_st, aud_cntrl_st2;
+ enum pipe pipe = crtc->pipe;
+ int eld_buffer_size, len, i;
+ struct ilk_audio_regs regs;
- if (drm_WARN_ON(&dev_priv->drm, port == PORT_A))
+ if (drm_WARN_ON(&i915->drm, port == PORT_A))
return;
- /*
- * FIXME: We're supposed to wait for vblank here, but we have vblanks
- * disabled during the mode set. The proper fix would be to push the
- * rest of the setup into a vblank work item, queued here, but the
- * infrastructure is not there yet.
- */
+ intel_crtc_wait_for_next_vblank(crtc);
- if (HAS_PCH_IBX(dev_priv)) {
- hdmiw_hdmiedid = IBX_HDMIW_HDMIEDID(pipe);
- aud_config = IBX_AUD_CFG(pipe);
- aud_cntl_st = IBX_AUD_CNTL_ST(pipe);
- aud_cntrl_st2 = IBX_AUD_CNTL_ST2;
- } else if (IS_VALLEYVIEW(dev_priv) ||
- IS_CHERRYVIEW(dev_priv)) {
- hdmiw_hdmiedid = VLV_HDMIW_HDMIEDID(pipe);
- aud_config = VLV_AUD_CFG(pipe);
- aud_cntl_st = VLV_AUD_CNTL_ST(pipe);
- aud_cntrl_st2 = VLV_AUD_CNTL_ST2;
- } else {
- hdmiw_hdmiedid = CPT_HDMIW_HDMIEDID(pipe);
- aud_config = CPT_AUD_CFG(pipe);
- aud_cntl_st = CPT_AUD_CNTL_ST(pipe);
- aud_cntrl_st2 = CPT_AUD_CNTRL_ST2;
- }
+ ilk_audio_regs_init(i915, pipe, &regs);
- eldv = IBX_ELD_VALID(port);
+ mutex_lock(&i915->display.audio.mutex);
/* Invalidate ELD */
- tmp = intel_de_read(dev_priv, aud_cntrl_st2);
- tmp &= ~eldv;
- intel_de_write(dev_priv, aud_cntrl_st2, tmp);
+ intel_de_rmw(i915, regs.aud_cntrl_st2,
+ IBX_ELD_VALID(port), 0);
- /* Reset ELD write address */
- tmp = intel_de_read(dev_priv, aud_cntl_st);
- tmp &= ~IBX_ELD_ADDRESS_MASK;
- intel_de_write(dev_priv, aud_cntl_st, tmp);
+ /* Reset ELD address */
+ intel_de_rmw(i915, regs.aud_cntl_st,
+ IBX_ELD_ADDRESS_MASK, 0);
- /* Up to 84 bytes of hw ELD buffer */
- len = min(drm_eld_size(eld), 84);
- for (i = 0; i < len / 4; i++)
- intel_de_write(dev_priv, hdmiw_hdmiedid,
- *((const u32 *)eld + i));
+ eld_buffer_size = ilk_eld_buffer_size(i915, pipe);
+ len = min(drm_eld_size(connector->eld) / 4, eld_buffer_size);
+
+ for (i = 0; i < len; i++)
+ intel_de_write(i915, regs.hdmiw_hdmiedid, eld[i]);
+ for (; i < eld_buffer_size; i++)
+ intel_de_write(i915, regs.hdmiw_hdmiedid, 0);
+
+ drm_WARN_ON(&i915->drm,
+ (intel_de_read(i915, regs.aud_cntl_st) & IBX_ELD_ADDRESS_MASK) != 0);
/* ELD valid */
- tmp = intel_de_read(dev_priv, aud_cntrl_st2);
- tmp |= eldv;
- intel_de_write(dev_priv, aud_cntrl_st2, tmp);
+ intel_de_rmw(i915, regs.aud_cntrl_st2,
+ 0, IBX_ELD_VALID(port));
/* Enable timestamps */
- tmp = intel_de_read(dev_priv, aud_config);
- tmp &= ~AUD_CONFIG_N_VALUE_INDEX;
- tmp &= ~AUD_CONFIG_N_PROG_ENABLE;
- tmp &= ~AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK;
- if (intel_crtc_has_dp_encoder(crtc_state))
- tmp |= AUD_CONFIG_N_VALUE_INDEX;
- else
- tmp |= audio_config_hdmi_pixel_clock(crtc_state);
- intel_de_write(dev_priv, aud_config, tmp);
+ intel_de_rmw(i915, regs.aud_config,
+ AUD_CONFIG_N_VALUE_INDEX |
+ AUD_CONFIG_N_PROG_ENABLE |
+ AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK,
+ (intel_crtc_has_dp_encoder(crtc_state) ?
+ AUD_CONFIG_N_VALUE_INDEX :
+ audio_config_hdmi_pixel_clock(crtc_state)));
+
+ mutex_unlock(&i915->display.audio.mutex);
}
/**
@@ -813,8 +811,8 @@ void intel_audio_codec_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct i915_audio_component *acomp = dev_priv->display.audio.component;
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct i915_audio_component *acomp = i915->display.audio.component;
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_connector *connector = conn_state->connector;
const struct drm_display_mode *adjusted_mode =
@@ -825,30 +823,30 @@ void intel_audio_codec_enable(struct intel_encoder *encoder,
if (!crtc_state->has_audio)
return;
- drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s][ENCODER:%d:%s] Enable audio codec on pipe %c, %u bytes ELD\n",
+ drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s][ENCODER:%d:%s] Enable audio codec on pipe %c, %u bytes ELD\n",
connector->base.id, connector->name,
encoder->base.base.id, encoder->base.name,
pipe_name(pipe), drm_eld_size(connector->eld));
/* FIXME precompute the ELD in .compute_config() */
if (!connector->eld[0])
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(&i915->drm,
"Bogus ELD on [CONNECTOR:%d:%s]\n",
connector->base.id, connector->name);
connector->eld[6] = drm_av_sync_delay(connector, adjusted_mode) / 2;
- if (dev_priv->display.funcs.audio)
- dev_priv->display.funcs.audio->audio_codec_enable(encoder,
+ if (i915->display.funcs.audio)
+ i915->display.funcs.audio->audio_codec_enable(encoder,
crtc_state,
conn_state);
- mutex_lock(&dev_priv->display.audio.mutex);
+ mutex_lock(&i915->display.audio.mutex);
encoder->audio_connector = connector;
/* referred in audio callbacks */
- dev_priv->display.audio.encoder_map[pipe] = encoder;
- mutex_unlock(&dev_priv->display.audio.mutex);
+ i915->display.audio.encoder_map[pipe] = encoder;
+ mutex_unlock(&i915->display.audio.mutex);
if (acomp && acomp->base.audio_ops &&
acomp->base.audio_ops->pin_eld_notify) {
@@ -859,7 +857,7 @@ void intel_audio_codec_enable(struct intel_encoder *encoder,
(int) port, (int) pipe);
}
- intel_lpe_audio_notify(dev_priv, pipe, port, connector->eld,
+ intel_lpe_audio_notify(i915, pipe, port, connector->eld,
crtc_state->port_clock,
intel_crtc_has_dp_encoder(crtc_state));
}
@@ -877,8 +875,8 @@ void intel_audio_codec_disable(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct i915_audio_component *acomp = dev_priv->display.audio.component;
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct i915_audio_component *acomp = i915->display.audio.component;
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
struct drm_connector *connector = old_conn_state->connector;
enum port port = encoder->port;
@@ -887,19 +885,19 @@ void intel_audio_codec_disable(struct intel_encoder *encoder,
if (!old_crtc_state->has_audio)
return;
- drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s][ENCODER:%d:%s] Disable audio codec on pipe %c\n",
+ drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s][ENCODER:%d:%s] Disable audio codec on pipe %c\n",
connector->base.id, connector->name,
encoder->base.base.id, encoder->base.name, pipe_name(pipe));
- if (dev_priv->display.funcs.audio)
- dev_priv->display.funcs.audio->audio_codec_disable(encoder,
+ if (i915->display.funcs.audio)
+ i915->display.funcs.audio->audio_codec_disable(encoder,
old_crtc_state,
old_conn_state);
- mutex_lock(&dev_priv->display.audio.mutex);
+ mutex_lock(&i915->display.audio.mutex);
encoder->audio_connector = NULL;
- dev_priv->display.audio.encoder_map[pipe] = NULL;
- mutex_unlock(&dev_priv->display.audio.mutex);
+ i915->display.audio.encoder_map[pipe] = NULL;
+ mutex_unlock(&i915->display.audio.mutex);
if (acomp && acomp->base.audio_ops &&
acomp->base.audio_ops->pin_eld_notify) {
@@ -910,7 +908,7 @@ void intel_audio_codec_disable(struct intel_encoder *encoder,
(int) port, (int) pipe);
}
- intel_lpe_audio_notify(dev_priv, pipe, port, NULL, 0, false);
+ intel_lpe_audio_notify(i915, pipe, port, NULL, 0, false);
}
static const struct intel_audio_funcs g4x_audio_funcs = {
@@ -930,19 +928,18 @@ static const struct intel_audio_funcs hsw_audio_funcs = {
/**
* intel_audio_hooks_init - Set up chip specific audio hooks
- * @dev_priv: device private
+ * @i915: device private
*/
-void intel_audio_hooks_init(struct drm_i915_private *dev_priv)
+void intel_audio_hooks_init(struct drm_i915_private *i915)
{
- if (IS_G4X(dev_priv)) {
- dev_priv->display.funcs.audio = &g4x_audio_funcs;
- } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
- dev_priv->display.funcs.audio = &ilk_audio_funcs;
- } else if (IS_HASWELL(dev_priv) || DISPLAY_VER(dev_priv) >= 8) {
- dev_priv->display.funcs.audio = &hsw_audio_funcs;
- } else if (HAS_PCH_SPLIT(dev_priv)) {
- dev_priv->display.funcs.audio = &ilk_audio_funcs;
- }
+ if (IS_G4X(i915))
+ i915->display.funcs.audio = &g4x_audio_funcs;
+ else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
+ i915->display.funcs.audio = &ilk_audio_funcs;
+ else if (IS_HASWELL(i915) || DISPLAY_VER(i915) >= 8)
+ i915->display.funcs.audio = &hsw_audio_funcs;
+ else if (HAS_PCH_SPLIT(i915))
+ i915->display.funcs.audio = &ilk_audio_funcs;
}
struct aud_ts_cdclk_m_n {
@@ -1000,7 +997,7 @@ static int glk_force_audio_cdclk_commit(struct intel_atomic_state *state,
return drm_atomic_commit(&state->base);
}
-static void glk_force_audio_cdclk(struct drm_i915_private *dev_priv,
+static void glk_force_audio_cdclk(struct drm_i915_private *i915,
bool enable)
{
struct drm_modeset_acquire_ctx ctx;
@@ -1008,13 +1005,13 @@ static void glk_force_audio_cdclk(struct drm_i915_private *dev_priv,
struct intel_crtc *crtc;
int ret;
- crtc = intel_first_crtc(dev_priv);
+ crtc = intel_first_crtc(i915);
if (!crtc)
return;
drm_modeset_acquire_init(&ctx, 0);
- state = drm_atomic_state_alloc(&dev_priv->drm);
- if (drm_WARN_ON(&dev_priv->drm, !state))
+ state = drm_atomic_state_alloc(&i915->drm);
+ if (drm_WARN_ON(&i915->drm, !state))
return;
state->acquire_ctx = &ctx;
@@ -1028,7 +1025,7 @@ retry:
goto retry;
}
- drm_WARN_ON(&dev_priv->drm, ret);
+ drm_WARN_ON(&i915->drm, ret);
drm_atomic_state_put(state);
@@ -1038,30 +1035,30 @@ retry:
static unsigned long i915_audio_component_get_power(struct device *kdev)
{
- struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
+ struct drm_i915_private *i915 = kdev_to_i915(kdev);
intel_wakeref_t ret;
/* Catch potential impedance mismatches before they occur! */
BUILD_BUG_ON(sizeof(intel_wakeref_t) > sizeof(unsigned long));
- ret = intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO_PLAYBACK);
+ ret = intel_display_power_get(i915, POWER_DOMAIN_AUDIO_PLAYBACK);
- if (dev_priv->display.audio.power_refcount++ == 0) {
- if (DISPLAY_VER(dev_priv) >= 9) {
- intel_de_write(dev_priv, AUD_FREQ_CNTRL,
- dev_priv->display.audio.freq_cntrl);
- drm_dbg_kms(&dev_priv->drm,
+ if (i915->display.audio.power_refcount++ == 0) {
+ if (DISPLAY_VER(i915) >= 9) {
+ intel_de_write(i915, AUD_FREQ_CNTRL,
+ i915->display.audio.freq_cntrl);
+ drm_dbg_kms(&i915->drm,
"restored AUD_FREQ_CNTRL to 0x%x\n",
- dev_priv->display.audio.freq_cntrl);
+ i915->display.audio.freq_cntrl);
}
/* Force CDCLK to 2*BCLK as long as we need audio powered. */
- if (IS_GEMINILAKE(dev_priv))
- glk_force_audio_cdclk(dev_priv, true);
+ if (IS_GEMINILAKE(i915))
+ glk_force_audio_cdclk(i915, true);
- if (DISPLAY_VER(dev_priv) >= 10)
- intel_de_write(dev_priv, AUD_PIN_BUF_CTL,
- (intel_de_read(dev_priv, AUD_PIN_BUF_CTL) | AUD_PIN_BUF_ENABLE));
+ if (DISPLAY_VER(i915) >= 10)
+ intel_de_rmw(i915, AUD_PIN_BUF_CTL,
+ 0, AUD_PIN_BUF_ENABLE);
}
return ret;
@@ -1070,24 +1067,23 @@ static unsigned long i915_audio_component_get_power(struct device *kdev)
static void i915_audio_component_put_power(struct device *kdev,
unsigned long cookie)
{
- struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
+ struct drm_i915_private *i915 = kdev_to_i915(kdev);
/* Stop forcing CDCLK to 2*BCLK if no need for audio to be powered. */
- if (--dev_priv->display.audio.power_refcount == 0)
- if (IS_GEMINILAKE(dev_priv))
- glk_force_audio_cdclk(dev_priv, false);
+ if (--i915->display.audio.power_refcount == 0)
+ if (IS_GEMINILAKE(i915))
+ glk_force_audio_cdclk(i915, false);
- intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO_PLAYBACK, cookie);
+ intel_display_power_put(i915, POWER_DOMAIN_AUDIO_PLAYBACK, cookie);
}
static void i915_audio_component_codec_wake_override(struct device *kdev,
bool enable)
{
- struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
+ struct drm_i915_private *i915 = kdev_to_i915(kdev);
unsigned long cookie;
- u32 tmp;
- if (DISPLAY_VER(dev_priv) < 9)
+ if (DISPLAY_VER(i915) < 9)
return;
cookie = i915_audio_component_get_power(kdev);
@@ -1096,15 +1092,13 @@ static void i915_audio_component_codec_wake_override(struct device *kdev,
* Enable/disable generating the codec wake signal, overriding the
* internal logic to generate the codec wake to controller.
*/
- tmp = intel_de_read(dev_priv, HSW_AUD_CHICKENBIT);
- tmp &= ~SKL_AUD_CODEC_WAKE_SIGNAL;
- intel_de_write(dev_priv, HSW_AUD_CHICKENBIT, tmp);
+ intel_de_rmw(i915, HSW_AUD_CHICKENBIT,
+ SKL_AUD_CODEC_WAKE_SIGNAL, 0);
usleep_range(1000, 1500);
if (enable) {
- tmp = intel_de_read(dev_priv, HSW_AUD_CHICKENBIT);
- tmp |= SKL_AUD_CODEC_WAKE_SIGNAL;
- intel_de_write(dev_priv, HSW_AUD_CHICKENBIT, tmp);
+ intel_de_rmw(i915, HSW_AUD_CHICKENBIT,
+ 0, SKL_AUD_CODEC_WAKE_SIGNAL);
usleep_range(1000, 1500);
}
@@ -1114,12 +1108,12 @@ static void i915_audio_component_codec_wake_override(struct device *kdev,
/* Get CDCLK in kHz */
static int i915_audio_component_get_cdclk_freq(struct device *kdev)
{
- struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
+ struct drm_i915_private *i915 = kdev_to_i915(kdev);
- if (drm_WARN_ON_ONCE(&dev_priv->drm, !HAS_DDI(dev_priv)))
+ if (drm_WARN_ON_ONCE(&i915->drm, !HAS_DDI(i915)))
return -ENODEV;
- return dev_priv->display.cdclk.hw.cdclk;
+ return i915->display.cdclk.hw.cdclk;
}
/*
@@ -1132,18 +1126,18 @@ static int i915_audio_component_get_cdclk_freq(struct device *kdev)
* will get the right intel_encoder with port matched
* Non-MST & (pipe < 0): get the right intel_encoder with port matched
*/
-static struct intel_encoder *get_saved_enc(struct drm_i915_private *dev_priv,
- int port, int pipe)
+static struct intel_encoder *get_saved_enc(struct drm_i915_private *i915,
+ int port, int pipe)
{
struct intel_encoder *encoder;
/* MST */
if (pipe >= 0) {
- if (drm_WARN_ON(&dev_priv->drm,
- pipe >= ARRAY_SIZE(dev_priv->display.audio.encoder_map)))
+ if (drm_WARN_ON(&i915->drm,
+ pipe >= ARRAY_SIZE(i915->display.audio.encoder_map)))
return NULL;
- encoder = dev_priv->display.audio.encoder_map[pipe];
+ encoder = i915->display.audio.encoder_map[pipe];
/*
* when bootup, audio driver may not know it is
* MST or not. So it will poll all the port & pipe
@@ -1158,8 +1152,8 @@ static struct intel_encoder *get_saved_enc(struct drm_i915_private *dev_priv,
if (pipe > 0)
return NULL;
- for_each_pipe(dev_priv, pipe) {
- encoder = dev_priv->display.audio.encoder_map[pipe];
+ for_each_pipe(i915, pipe) {
+ encoder = i915->display.audio.encoder_map[pipe];
if (encoder == NULL)
continue;
@@ -1176,23 +1170,23 @@ static struct intel_encoder *get_saved_enc(struct drm_i915_private *dev_priv,
static int i915_audio_component_sync_audio_rate(struct device *kdev, int port,
int pipe, int rate)
{
- struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
- struct i915_audio_component *acomp = dev_priv->display.audio.component;
+ struct drm_i915_private *i915 = kdev_to_i915(kdev);
+ struct i915_audio_component *acomp = i915->display.audio.component;
struct intel_encoder *encoder;
struct intel_crtc *crtc;
unsigned long cookie;
int err = 0;
- if (!HAS_DDI(dev_priv))
+ if (!HAS_DDI(i915))
return 0;
cookie = i915_audio_component_get_power(kdev);
- mutex_lock(&dev_priv->display.audio.mutex);
+ mutex_lock(&i915->display.audio.mutex);
/* 1. get the pipe */
- encoder = get_saved_enc(dev_priv, port, pipe);
+ encoder = get_saved_enc(i915, port, pipe);
if (!encoder || !encoder->base.crtc) {
- drm_dbg_kms(&dev_priv->drm, "Not valid for port %c\n",
+ drm_dbg_kms(&i915->drm, "Not valid for port %c\n",
port_name(port));
err = -ENODEV;
goto unlock;
@@ -1206,7 +1200,7 @@ static int i915_audio_component_sync_audio_rate(struct device *kdev, int port,
hsw_audio_config_update(encoder, crtc->config);
unlock:
- mutex_unlock(&dev_priv->display.audio.mutex);
+ mutex_unlock(&i915->display.audio.mutex);
i915_audio_component_put_power(kdev, cookie);
return err;
}
@@ -1215,18 +1209,18 @@ static int i915_audio_component_get_eld(struct device *kdev, int port,
int pipe, bool *enabled,
unsigned char *buf, int max_bytes)
{
- struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
+ struct drm_i915_private *i915 = kdev_to_i915(kdev);
struct intel_encoder *intel_encoder;
const u8 *eld;
int ret = -EINVAL;
- mutex_lock(&dev_priv->display.audio.mutex);
+ mutex_lock(&i915->display.audio.mutex);
- intel_encoder = get_saved_enc(dev_priv, port, pipe);
+ intel_encoder = get_saved_enc(i915, port, pipe);
if (!intel_encoder) {
- drm_dbg_kms(&dev_priv->drm, "Not valid for port %c\n",
+ drm_dbg_kms(&i915->drm, "Not valid for port %c\n",
port_name(port));
- mutex_unlock(&dev_priv->display.audio.mutex);
+ mutex_unlock(&i915->display.audio.mutex);
return ret;
}
@@ -1238,7 +1232,7 @@ static int i915_audio_component_get_eld(struct device *kdev, int port,
memcpy(buf, eld, min(max_bytes, ret));
}
- mutex_unlock(&dev_priv->display.audio.mutex);
+ mutex_unlock(&i915->display.audio.mutex);
return ret;
}
@@ -1256,25 +1250,25 @@ static int i915_audio_component_bind(struct device *i915_kdev,
struct device *hda_kdev, void *data)
{
struct i915_audio_component *acomp = data;
- struct drm_i915_private *dev_priv = kdev_to_i915(i915_kdev);
+ struct drm_i915_private *i915 = kdev_to_i915(i915_kdev);
int i;
- if (drm_WARN_ON(&dev_priv->drm, acomp->base.ops || acomp->base.dev))
+ if (drm_WARN_ON(&i915->drm, acomp->base.ops || acomp->base.dev))
return -EEXIST;
- if (drm_WARN_ON(&dev_priv->drm,
+ if (drm_WARN_ON(&i915->drm,
!device_link_add(hda_kdev, i915_kdev,
DL_FLAG_STATELESS)))
return -ENOMEM;
- drm_modeset_lock_all(&dev_priv->drm);
+ drm_modeset_lock_all(&i915->drm);
acomp->base.ops = &i915_audio_component_ops;
acomp->base.dev = i915_kdev;
BUILD_BUG_ON(MAX_PORTS != I915_MAX_PORTS);
for (i = 0; i < ARRAY_SIZE(acomp->aud_sample_rate); i++)
acomp->aud_sample_rate[i] = 0;
- dev_priv->display.audio.component = acomp;
- drm_modeset_unlock_all(&dev_priv->drm);
+ i915->display.audio.component = acomp;
+ drm_modeset_unlock_all(&i915->drm);
return 0;
}
@@ -1283,19 +1277,19 @@ static void i915_audio_component_unbind(struct device *i915_kdev,
struct device *hda_kdev, void *data)
{
struct i915_audio_component *acomp = data;
- struct drm_i915_private *dev_priv = kdev_to_i915(i915_kdev);
+ struct drm_i915_private *i915 = kdev_to_i915(i915_kdev);
- drm_modeset_lock_all(&dev_priv->drm);
+ drm_modeset_lock_all(&i915->drm);
acomp->base.ops = NULL;
acomp->base.dev = NULL;
- dev_priv->display.audio.component = NULL;
- drm_modeset_unlock_all(&dev_priv->drm);
+ i915->display.audio.component = NULL;
+ drm_modeset_unlock_all(&i915->drm);
device_link_remove(hda_kdev, i915_kdev);
- if (dev_priv->display.audio.power_refcount)
- drm_err(&dev_priv->drm, "audio power refcount %d after unbind\n",
- dev_priv->display.audio.power_refcount);
+ if (i915->display.audio.power_refcount)
+ drm_err(&i915->drm, "audio power refcount %d after unbind\n",
+ i915->display.audio.power_refcount);
}
static const struct component_ops i915_audio_component_bind_ops = {
@@ -1314,7 +1308,7 @@ static const struct component_ops i915_audio_component_bind_ops = {
/**
* i915_audio_component_init - initialize and register the audio component
- * @dev_priv: i915 device instance
+ * @i915: i915 device instance
*
* This will register with the component framework a child component which
* will bind dynamically to the snd_hda_intel driver's corresponding master
@@ -1328,83 +1322,83 @@ static const struct component_ops i915_audio_component_bind_ops = {
* We ignore any error during registration and continue with reduced
* functionality (i.e. without HDMI audio).
*/
-static void i915_audio_component_init(struct drm_i915_private *dev_priv)
+static void i915_audio_component_init(struct drm_i915_private *i915)
{
u32 aud_freq, aud_freq_init;
int ret;
- ret = component_add_typed(dev_priv->drm.dev,
+ ret = component_add_typed(i915->drm.dev,
&i915_audio_component_bind_ops,
I915_COMPONENT_AUDIO);
if (ret < 0) {
- drm_err(&dev_priv->drm,
+ drm_err(&i915->drm,
"failed to add audio component (%d)\n", ret);
/* continue with reduced functionality */
return;
}
- if (DISPLAY_VER(dev_priv) >= 9) {
- aud_freq_init = intel_de_read(dev_priv, AUD_FREQ_CNTRL);
+ if (DISPLAY_VER(i915) >= 9) {
+ aud_freq_init = intel_de_read(i915, AUD_FREQ_CNTRL);
- if (DISPLAY_VER(dev_priv) >= 12)
+ if (DISPLAY_VER(i915) >= 12)
aud_freq = AUD_FREQ_GEN12;
else
aud_freq = aud_freq_init;
/* use BIOS provided value for TGL and RKL unless it is a known bad value */
- if ((IS_TIGERLAKE(dev_priv) || IS_ROCKETLAKE(dev_priv)) &&
+ if ((IS_TIGERLAKE(i915) || IS_ROCKETLAKE(i915)) &&
aud_freq_init != AUD_FREQ_TGL_BROKEN)
aud_freq = aud_freq_init;
- drm_dbg_kms(&dev_priv->drm, "use AUD_FREQ_CNTRL of 0x%x (init value 0x%x)\n",
+ drm_dbg_kms(&i915->drm, "use AUD_FREQ_CNTRL of 0x%x (init value 0x%x)\n",
aud_freq, aud_freq_init);
- dev_priv->display.audio.freq_cntrl = aud_freq;
+ i915->display.audio.freq_cntrl = aud_freq;
}
/* init with current cdclk */
- intel_audio_cdclk_change_post(dev_priv);
+ intel_audio_cdclk_change_post(i915);
- dev_priv->display.audio.component_registered = true;
+ i915->display.audio.component_registered = true;
}
/**
* i915_audio_component_cleanup - deregister the audio component
- * @dev_priv: i915 device instance
+ * @i915: i915 device instance
*
* Deregisters the audio component, breaking any existing binding to the
* corresponding snd_hda_intel driver's master component.
*/
-static void i915_audio_component_cleanup(struct drm_i915_private *dev_priv)
+static void i915_audio_component_cleanup(struct drm_i915_private *i915)
{
- if (!dev_priv->display.audio.component_registered)
+ if (!i915->display.audio.component_registered)
return;
- component_del(dev_priv->drm.dev, &i915_audio_component_bind_ops);
- dev_priv->display.audio.component_registered = false;
+ component_del(i915->drm.dev, &i915_audio_component_bind_ops);
+ i915->display.audio.component_registered = false;
}
/**
* intel_audio_init() - Initialize the audio driver either using
* component framework or using lpe audio bridge
- * @dev_priv: the i915 drm device private data
+ * @i915: the i915 drm device private data
*
*/
-void intel_audio_init(struct drm_i915_private *dev_priv)
+void intel_audio_init(struct drm_i915_private *i915)
{
- if (intel_lpe_audio_init(dev_priv) < 0)
- i915_audio_component_init(dev_priv);
+ if (intel_lpe_audio_init(i915) < 0)
+ i915_audio_component_init(i915);
}
/**
* intel_audio_deinit() - deinitialize the audio driver
- * @dev_priv: the i915 drm device private data
+ * @i915: the i915 drm device private data
*
*/
-void intel_audio_deinit(struct drm_i915_private *dev_priv)
+void intel_audio_deinit(struct drm_i915_private *i915)
{
- if (dev_priv->display.audio.lpe.platdev != NULL)
- intel_lpe_audio_teardown(dev_priv);
+ if (i915->display.audio.lpe.platdev != NULL)
+ intel_lpe_audio_teardown(i915);
else
- i915_audio_component_cleanup(dev_priv);
+ i915_audio_component_cleanup(i915);
}
diff --git a/drivers/gpu/drm/i915/display/intel_audio_regs.h b/drivers/gpu/drm/i915/display/intel_audio_regs.h
index d1e5844e3484..4f432c2eb543 100644
--- a/drivers/gpu/drm/i915/display/intel_audio_regs.h
+++ b/drivers/gpu/drm/i915/display/intel_audio_regs.h
@@ -8,16 +8,11 @@
#include "i915_reg_defs.h"
-#define G4X_AUD_VID_DID _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x62020)
-#define INTEL_AUDIO_DEVCL 0x808629FB
-#define INTEL_AUDIO_DEVBLC 0x80862801
-#define INTEL_AUDIO_DEVCTG 0x80862802
-
#define G4X_AUD_CNTL_ST _MMIO(0x620B4)
-#define G4X_ELDV_DEVCL_DEVBLC (1 << 13)
-#define G4X_ELDV_DEVCTG (1 << 14)
-#define G4X_ELD_ADDR_MASK (0xf << 5)
-#define G4X_ELD_ACK (1 << 4)
+#define G4X_ELD_VALID REG_BIT(14)
+#define G4X_ELD_BUFFER_SIZE_MASK REG_GENMASK(13, 9)
+#define G4X_ELD_ADDRESS_MASK REG_GENMASK(8, 5)
+#define G4X_ELD_ACK REG_BIT(4)
#define G4X_HDMIW_HDMIEDID _MMIO(0x6210C)
#define _IBX_HDMIW_HDMIEDID_A 0xE2050
@@ -28,12 +23,12 @@
#define _IBX_AUD_CNTL_ST_B 0xE21B4
#define IBX_AUD_CNTL_ST(pipe) _MMIO_PIPE(pipe, _IBX_AUD_CNTL_ST_A, \
_IBX_AUD_CNTL_ST_B)
-#define IBX_ELD_BUFFER_SIZE_MASK (0x1f << 10)
-#define IBX_ELD_ADDRESS_MASK (0x1f << 5)
-#define IBX_ELD_ACK (1 << 4)
+#define IBX_ELD_BUFFER_SIZE_MASK REG_GENMASK(14, 10)
+#define IBX_ELD_ADDRESS_MASK REG_GENMASK(9, 5)
+#define IBX_ELD_ACK REG_BIT(4)
#define IBX_AUD_CNTL_ST2 _MMIO(0xE20C0)
-#define IBX_CP_READY(port) ((1 << 1) << (((port) - 1) * 4))
-#define IBX_ELD_VALID(port) ((1 << 0) << (((port) - 1) * 4))
+#define IBX_CP_READY(port) REG_BIT(((port) - 1) * 4 + 1)
+#define IBX_ELD_VALID(port) REG_BIT(((port) - 1) * 4 + 0)
#define _CPT_HDMIW_HDMIEDID_A 0xE5050
#define _CPT_HDMIW_HDMIEDID_B 0xE5150
@@ -60,34 +55,30 @@
#define _VLV_AUD_CONFIG_A (VLV_DISPLAY_BASE + 0x62000)
#define _VLV_AUD_CONFIG_B (VLV_DISPLAY_BASE + 0x62100)
#define VLV_AUD_CFG(pipe) _MMIO_PIPE(pipe, _VLV_AUD_CONFIG_A, _VLV_AUD_CONFIG_B)
-
-#define AUD_CONFIG_N_VALUE_INDEX (1 << 29)
-#define AUD_CONFIG_N_PROG_ENABLE (1 << 28)
-#define AUD_CONFIG_UPPER_N_SHIFT 20
-#define AUD_CONFIG_UPPER_N_MASK (0xff << 20)
-#define AUD_CONFIG_LOWER_N_SHIFT 4
-#define AUD_CONFIG_LOWER_N_MASK (0xfff << 4)
-#define AUD_CONFIG_N_MASK (AUD_CONFIG_UPPER_N_MASK | AUD_CONFIG_LOWER_N_MASK)
-#define AUD_CONFIG_N(n) \
- (((((n) >> 12) & 0xff) << AUD_CONFIG_UPPER_N_SHIFT) | \
- (((n) & 0xfff) << AUD_CONFIG_LOWER_N_SHIFT))
-#define AUD_CONFIG_PIXEL_CLOCK_HDMI_SHIFT 16
-#define AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK (0xf << 16)
-#define AUD_CONFIG_PIXEL_CLOCK_HDMI_25175 (0 << 16)
-#define AUD_CONFIG_PIXEL_CLOCK_HDMI_25200 (1 << 16)
-#define AUD_CONFIG_PIXEL_CLOCK_HDMI_27000 (2 << 16)
-#define AUD_CONFIG_PIXEL_CLOCK_HDMI_27027 (3 << 16)
-#define AUD_CONFIG_PIXEL_CLOCK_HDMI_54000 (4 << 16)
-#define AUD_CONFIG_PIXEL_CLOCK_HDMI_54054 (5 << 16)
-#define AUD_CONFIG_PIXEL_CLOCK_HDMI_74176 (6 << 16)
-#define AUD_CONFIG_PIXEL_CLOCK_HDMI_74250 (7 << 16)
-#define AUD_CONFIG_PIXEL_CLOCK_HDMI_148352 (8 << 16)
-#define AUD_CONFIG_PIXEL_CLOCK_HDMI_148500 (9 << 16)
-#define AUD_CONFIG_PIXEL_CLOCK_HDMI_296703 (10 << 16)
-#define AUD_CONFIG_PIXEL_CLOCK_HDMI_297000 (11 << 16)
-#define AUD_CONFIG_PIXEL_CLOCK_HDMI_593407 (12 << 16)
-#define AUD_CONFIG_PIXEL_CLOCK_HDMI_594000 (13 << 16)
-#define AUD_CONFIG_DISABLE_NCTS (1 << 3)
+#define AUD_CONFIG_N_VALUE_INDEX REG_BIT(29)
+#define AUD_CONFIG_N_PROG_ENABLE REG_BIT(28)
+#define AUD_CONFIG_UPPER_N_MASK REG_GENMASK(27, 20)
+#define AUD_CONFIG_LOWER_N_MASK REG_GENMASK(15, 4)
+#define AUD_CONFIG_N_MASK (AUD_CONFIG_UPPER_N_MASK | \
+ AUD_CONFIG_LOWER_N_MASK)
+#define AUD_CONFIG_N(n) (REG_FIELD_PREP(AUD_CONFIG_UPPER_N_MASK, (n) >> 12) | \
+ REG_FIELD_PREP(AUD_CONFIG_LOWER_N_MASK, (n) & 0xfff))
+#define AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK REG_GENMASK(19, 16)
+#define AUD_CONFIG_PIXEL_CLOCK_HDMI_25175 REG_FIELD_PREP(AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK, 0)
+#define AUD_CONFIG_PIXEL_CLOCK_HDMI_25200 REG_FIELD_PREP(AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK, 1)
+#define AUD_CONFIG_PIXEL_CLOCK_HDMI_27000 REG_FIELD_PREP(AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK, 2)
+#define AUD_CONFIG_PIXEL_CLOCK_HDMI_27027 REG_FIELD_PREP(AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK, 3)
+#define AUD_CONFIG_PIXEL_CLOCK_HDMI_54000 REG_FIELD_PREP(AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK, 4)
+#define AUD_CONFIG_PIXEL_CLOCK_HDMI_54054 REG_FIELD_PREP(AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK, 5)
+#define AUD_CONFIG_PIXEL_CLOCK_HDMI_74176 REG_FIELD_PREP(AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK, 6)
+#define AUD_CONFIG_PIXEL_CLOCK_HDMI_74250 REG_FIELD_PREP(AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK, 7)
+#define AUD_CONFIG_PIXEL_CLOCK_HDMI_148352 REG_FIELD_PREP(AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK, 8)
+#define AUD_CONFIG_PIXEL_CLOCK_HDMI_148500 REG_FIELD_PREP(AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK, 9)
+#define AUD_CONFIG_PIXEL_CLOCK_HDMI_296703 REG_FIELD_PREP(AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK, 10)
+#define AUD_CONFIG_PIXEL_CLOCK_HDMI_297000 REG_FIELD_PREP(AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK, 11)
+#define AUD_CONFIG_PIXEL_CLOCK_HDMI_593407 REG_FIELD_PREP(AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK, 12)
+#define AUD_CONFIG_PIXEL_CLOCK_HDMI_594000 REG_FIELD_PREP(AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK, 13)
+#define AUD_CONFIG_DISABLE_NCTS REG_BIT(3)
#define _HSW_AUD_CONFIG_A 0x65000
#define _HSW_AUD_CONFIG_B 0x65100
@@ -100,9 +91,9 @@
#define _HSW_AUD_M_CTS_ENABLE_A 0x65028
#define _HSW_AUD_M_CTS_ENABLE_B 0x65128
#define HSW_AUD_M_CTS_ENABLE(trans) _MMIO_TRANS(trans, _HSW_AUD_M_CTS_ENABLE_A, _HSW_AUD_M_CTS_ENABLE_B)
-#define AUD_M_CTS_M_VALUE_INDEX (1 << 21)
-#define AUD_M_CTS_M_PROG_ENABLE (1 << 20)
-#define AUD_CONFIG_M_MASK 0xfffff
+#define AUD_M_CTS_M_VALUE_INDEX REG_BIT(21)
+#define AUD_M_CTS_M_PROG_ENABLE REG_BIT(20)
+#define AUD_CONFIG_M_MASK REG_GENMASK(19, 0)
#define _HSW_AUD_DIP_ELD_CTRL_ST_A 0x650b4
#define _HSW_AUD_DIP_ELD_CTRL_ST_B 0x651b4
@@ -130,11 +121,11 @@
#define AUD_DP_2DOT0_CTRL(trans) _MMIO_TRANS(trans, _AUD_TCA_DP_2DOT0_CTRL, _AUD_TCB_DP_2DOT0_CTRL)
#define AUD_ENABLE_SDP_SPLIT REG_BIT(31)
-#define HSW_AUD_CHICKENBIT _MMIO(0x65f10)
-#define SKL_AUD_CODEC_WAKE_SIGNAL (1 << 15)
+#define HSW_AUD_CHICKENBIT _MMIO(0x65f10)
+#define SKL_AUD_CODEC_WAKE_SIGNAL REG_BIT(15)
#define AUD_FREQ_CNTRL _MMIO(0x65900)
-#define AUD_PIN_BUF_CTL _MMIO(0x48414)
+#define AUD_PIN_BUF_CTL _MMIO(0x48414)
#define AUD_PIN_BUF_ENABLE REG_BIT(31)
#define AUD_TS_CDCLK_M _MMIO(0x65ea0)
diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c
index 28bdb936cd1f..c2987f2c2b2e 100644
--- a/drivers/gpu/drm/i915/display/intel_bios.c
+++ b/drivers/gpu/drm/i915/display/intel_bios.c
@@ -2188,7 +2188,7 @@ static u8 map_ddc_pin(struct drm_i915_private *i915, u8 vbt_pin)
const u8 *ddc_pin_map;
int n_entries;
- if (IS_ALDERLAKE_P(i915)) {
+ if (HAS_PCH_MTP(i915) || IS_ALDERLAKE_P(i915)) {
ddc_pin_map = adlp_ddc_pin_map;
n_entries = ARRAY_SIZE(adlp_ddc_pin_map);
} else if (IS_ALDERLAKE_S(i915)) {
@@ -2676,6 +2676,14 @@ static void print_ddi_port(const struct intel_bios_encoder_data *devdata,
drm_dbg_kms(&i915->drm,
"Port %c VBT DP max link rate: %d\n",
port_name(port), dp_max_link_rate);
+
+ /*
+ * FIXME need to implement support for VBT
+ * vswing/preemph tables should this ever trigger.
+ */
+ drm_WARN(&i915->drm, child->use_vbt_vswing,
+ "Port %c asks to use VBT vswing/preemph tables\n",
+ port_name(port));
}
static void parse_ddi_port(struct intel_bios_encoder_data *devdata)
diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c
index ed05070b7307..eada931cb1c8 100644
--- a/drivers/gpu/drm/i915/display/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/display/intel_cdclk.c
@@ -1220,11 +1220,6 @@ static void skl_cdclk_uninit_hw(struct drm_i915_private *dev_priv)
skl_set_cdclk(dev_priv, &cdclk_config, INVALID_PIPE);
}
-static bool has_cdclk_squasher(struct drm_i915_private *i915)
-{
- return IS_DG2(i915);
-}
-
struct intel_cdclk_vals {
u32 cdclk;
u16 refclk;
@@ -1520,7 +1515,7 @@ static void bxt_get_cdclk(struct drm_i915_private *dev_priv,
return;
}
- if (has_cdclk_squasher(dev_priv))
+ if (HAS_CDCLK_SQUASH(dev_priv))
squash_ctl = intel_de_read(dev_priv, CDCLK_SQUASH_CTL);
if (squash_ctl & CDCLK_SQUASH_ENABLE) {
@@ -1689,6 +1684,38 @@ static u32 cdclk_squash_waveform(struct drm_i915_private *dev_priv,
return 0xffff;
}
+static void icl_cdclk_pll_update(struct drm_i915_private *i915, int vco)
+{
+ if (i915->display.cdclk.hw.vco != 0 &&
+ i915->display.cdclk.hw.vco != vco)
+ icl_cdclk_pll_disable(i915);
+
+ if (i915->display.cdclk.hw.vco != vco)
+ icl_cdclk_pll_enable(i915, vco);
+}
+
+static void bxt_cdclk_pll_update(struct drm_i915_private *i915, int vco)
+{
+ if (i915->display.cdclk.hw.vco != 0 &&
+ i915->display.cdclk.hw.vco != vco)
+ bxt_de_pll_disable(i915);
+
+ if (i915->display.cdclk.hw.vco != vco)
+ bxt_de_pll_enable(i915, vco);
+}
+
+static void dg2_cdclk_squash_program(struct drm_i915_private *i915,
+ u16 waveform)
+{
+ u32 squash_ctl = 0;
+
+ if (waveform)
+ squash_ctl = CDCLK_SQUASH_ENABLE |
+ CDCLK_SQUASH_WINDOW_SIZE(0xf) | waveform;
+
+ intel_de_write(i915, CDCLK_SQUASH_CTL, squash_ctl);
+}
+
static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
const struct intel_cdclk_config *cdclk_config,
enum pipe pipe)
@@ -1724,21 +1751,10 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
if (HAS_CDCLK_CRAWL(dev_priv) && dev_priv->display.cdclk.hw.vco > 0 && vco > 0) {
if (dev_priv->display.cdclk.hw.vco != vco)
adlp_cdclk_pll_crawl(dev_priv, vco);
- } else if (DISPLAY_VER(dev_priv) >= 11) {
- if (dev_priv->display.cdclk.hw.vco != 0 &&
- dev_priv->display.cdclk.hw.vco != vco)
- icl_cdclk_pll_disable(dev_priv);
-
- if (dev_priv->display.cdclk.hw.vco != vco)
- icl_cdclk_pll_enable(dev_priv, vco);
- } else {
- if (dev_priv->display.cdclk.hw.vco != 0 &&
- dev_priv->display.cdclk.hw.vco != vco)
- bxt_de_pll_disable(dev_priv);
-
- if (dev_priv->display.cdclk.hw.vco != vco)
- bxt_de_pll_enable(dev_priv, vco);
- }
+ } else if (DISPLAY_VER(dev_priv) >= 11)
+ icl_cdclk_pll_update(dev_priv, vco);
+ else
+ bxt_cdclk_pll_update(dev_priv, vco);
waveform = cdclk_squash_waveform(dev_priv, cdclk);
@@ -1747,15 +1763,8 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
else
clock = cdclk;
- if (has_cdclk_squasher(dev_priv)) {
- u32 squash_ctl = 0;
-
- if (waveform)
- squash_ctl = CDCLK_SQUASH_ENABLE |
- CDCLK_SQUASH_WINDOW_SIZE(0xf) | waveform;
-
- intel_de_write(dev_priv, CDCLK_SQUASH_CTL, squash_ctl);
- }
+ if (HAS_CDCLK_SQUASH(dev_priv))
+ dg2_cdclk_squash_program(dev_priv, waveform);
val = bxt_cdclk_cd2x_div_sel(dev_priv, clock, vco) |
bxt_cdclk_cd2x_pipe(dev_priv, pipe) |
@@ -1845,7 +1854,7 @@ static void bxt_sanitize_cdclk(struct drm_i915_private *dev_priv)
expected = skl_cdclk_decimal(cdclk);
/* Figure out what CD2X divider we should be using for this cdclk */
- if (has_cdclk_squasher(dev_priv))
+ if (HAS_CDCLK_SQUASH(dev_priv))
clock = dev_priv->display.cdclk.hw.vco / 2;
else
clock = dev_priv->display.cdclk.hw.cdclk;
@@ -1976,7 +1985,7 @@ static bool intel_cdclk_can_squash(struct drm_i915_private *dev_priv,
* the moment all platforms with squasher use a fixed cd2x
* divider.
*/
- if (!has_cdclk_squasher(dev_priv))
+ if (!HAS_CDCLK_SQUASH(dev_priv))
return false;
return a->cdclk != b->cdclk &&
@@ -2028,7 +2037,7 @@ static bool intel_cdclk_can_cd2x_update(struct drm_i915_private *dev_priv,
* the moment all platforms with squasher use a fixed cd2x
* divider.
*/
- if (has_cdclk_squasher(dev_priv))
+ if (HAS_CDCLK_SQUASH(dev_priv))
return false;
return a->cdclk != b->cdclk &&
@@ -2464,10 +2473,6 @@ static int bdw_modeset_calc_cdclk(struct intel_cdclk_state *cdclk_state)
if (min_cdclk < 0)
return min_cdclk;
- /*
- * FIXME should also account for plane ratio
- * once 64bpp pixel formats are supported.
- */
cdclk = bdw_calc_cdclk(min_cdclk);
cdclk_state->logical.cdclk = cdclk;
@@ -2534,10 +2539,6 @@ static int skl_modeset_calc_cdclk(struct intel_cdclk_state *cdclk_state)
vco = skl_dpll0_vco(cdclk_state);
- /*
- * FIXME should also account for plane ratio
- * once 64bpp pixel formats are supported.
- */
cdclk = skl_calc_cdclk(min_cdclk, vco);
cdclk_state->logical.vco = vco;
@@ -2762,12 +2763,12 @@ int intel_modeset_calc_cdclk(struct intel_atomic_state *state)
&old_cdclk_state->actual,
&new_cdclk_state->actual)) {
drm_dbg_kms(&dev_priv->drm,
- "Can change cdclk via squasher\n");
+ "Can change cdclk via squashing\n");
} else if (intel_cdclk_can_crawl(dev_priv,
&old_cdclk_state->actual,
&new_cdclk_state->actual)) {
drm_dbg_kms(&dev_priv->drm,
- "Can change cdclk via crawl\n");
+ "Can change cdclk via crawling\n");
} else if (pipe != INVALID_PIPE) {
new_cdclk_state->pipe = pipe;
@@ -2777,7 +2778,7 @@ int intel_modeset_calc_cdclk(struct intel_atomic_state *state)
} else if (intel_cdclk_needs_modeset(&old_cdclk_state->actual,
&new_cdclk_state->actual)) {
/* All pipes must be switched off while we change the cdclk. */
- ret = intel_modeset_all_pipes(state);
+ ret = intel_modeset_all_pipes(state, "CDCLK change");
if (ret)
return ret;
diff --git a/drivers/gpu/drm/i915/display/intel_color.c b/drivers/gpu/drm/i915/display/intel_color.c
index 6bda4274eae9..4bb113c39f4b 100644
--- a/drivers/gpu/drm/i915/display/intel_color.c
+++ b/drivers/gpu/drm/i915/display/intel_color.c
@@ -25,9 +25,7 @@
#include "intel_color.h"
#include "intel_de.h"
#include "intel_display_types.h"
-#include "intel_dpll.h"
#include "intel_dsb.h"
-#include "vlv_dsi_pll.h"
struct intel_color_funcs {
int (*color_check)(struct intel_crtc_state *crtc_state);
@@ -559,6 +557,32 @@ static void skl_color_commit_arm(const struct intel_crtc_state *crtc_state)
crtc_state->csc_mode);
}
+static struct drm_property_blob *
+create_linear_lut(struct drm_i915_private *i915, int lut_size)
+{
+ struct drm_property_blob *blob;
+ struct drm_color_lut *lut;
+ int i;
+
+ blob = drm_property_create_blob(&i915->drm,
+ sizeof(struct drm_color_lut) * lut_size,
+ NULL);
+ if (IS_ERR(blob))
+ return blob;
+
+ lut = blob->data;
+
+ for (i = 0; i < lut_size; i++) {
+ u16 val = 0xffff * i / (lut_size - 1);
+
+ lut[i].red = val;
+ lut[i].green = val;
+ lut[i].blue = val;
+ }
+
+ return blob;
+}
+
static void i9xx_load_lut_8(struct intel_crtc *crtc,
const struct drm_property_blob *blob)
{
@@ -580,12 +604,9 @@ static void i9xx_load_lut_8(struct intel_crtc *crtc,
static void i9xx_load_luts(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- const struct drm_property_blob *gamma_lut = crtc_state->hw.gamma_lut;
+ const struct drm_property_blob *post_csc_lut = crtc_state->post_csc_lut;
- assert_pll_enabled(dev_priv, crtc->pipe);
-
- i9xx_load_lut_8(crtc, gamma_lut);
+ i9xx_load_lut_8(crtc, post_csc_lut);
}
static void i965_load_lut_10p6(struct intel_crtc *crtc,
@@ -611,18 +632,12 @@ static void i965_load_lut_10p6(struct intel_crtc *crtc,
static void i965_load_luts(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- const struct drm_property_blob *gamma_lut = crtc_state->hw.gamma_lut;
-
- if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
- assert_dsi_pll_enabled(dev_priv);
- else
- assert_pll_enabled(dev_priv, crtc->pipe);
+ const struct drm_property_blob *post_csc_lut = crtc_state->post_csc_lut;
if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT)
- i9xx_load_lut_8(crtc, gamma_lut);
+ i9xx_load_lut_8(crtc, post_csc_lut);
else
- i965_load_lut_10p6(crtc, gamma_lut);
+ i965_load_lut_10p6(crtc, post_csc_lut);
}
static void ilk_load_lut_8(struct intel_crtc *crtc,
@@ -659,14 +674,16 @@ static void ilk_load_lut_10(struct intel_crtc *crtc,
static void ilk_load_luts(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- const struct drm_property_blob *gamma_lut = crtc_state->hw.gamma_lut;
+ const struct drm_property_blob *post_csc_lut = crtc_state->post_csc_lut;
+ const struct drm_property_blob *pre_csc_lut = crtc_state->pre_csc_lut;
+ const struct drm_property_blob *blob = post_csc_lut ?: pre_csc_lut;
switch (crtc_state->gamma_mode) {
case GAMMA_MODE_MODE_8BIT:
- ilk_load_lut_8(crtc, gamma_lut);
+ ilk_load_lut_8(crtc, blob);
break;
case GAMMA_MODE_MODE_10BIT:
- ilk_load_lut_10(crtc, gamma_lut);
+ ilk_load_lut_10(crtc, blob);
break;
default:
MISSING_CASE(crtc_state->gamma_mode);
@@ -773,19 +790,19 @@ static void ivb_load_lut_ext_max(const struct intel_crtc_state *crtc_state)
static void ivb_load_luts(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- const struct drm_property_blob *gamma_lut = crtc_state->hw.gamma_lut;
- const struct drm_property_blob *degamma_lut = crtc_state->hw.degamma_lut;
- const struct drm_property_blob *blob = gamma_lut ?: degamma_lut;
+ const struct drm_property_blob *post_csc_lut = crtc_state->post_csc_lut;
+ const struct drm_property_blob *pre_csc_lut = crtc_state->pre_csc_lut;
+ const struct drm_property_blob *blob = post_csc_lut ?: pre_csc_lut;
switch (crtc_state->gamma_mode) {
case GAMMA_MODE_MODE_8BIT:
ilk_load_lut_8(crtc, blob);
break;
case GAMMA_MODE_MODE_SPLIT:
- ivb_load_lut_10(crtc, degamma_lut, PAL_PREC_SPLIT_MODE |
+ ivb_load_lut_10(crtc, pre_csc_lut, PAL_PREC_SPLIT_MODE |
PAL_PREC_INDEX_VALUE(0));
ivb_load_lut_ext_max(crtc_state);
- ivb_load_lut_10(crtc, gamma_lut, PAL_PREC_SPLIT_MODE |
+ ivb_load_lut_10(crtc, post_csc_lut, PAL_PREC_SPLIT_MODE |
PAL_PREC_INDEX_VALUE(512));
break;
case GAMMA_MODE_MODE_10BIT:
@@ -802,19 +819,19 @@ static void ivb_load_luts(const struct intel_crtc_state *crtc_state)
static void bdw_load_luts(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- const struct drm_property_blob *gamma_lut = crtc_state->hw.gamma_lut;
- const struct drm_property_blob *degamma_lut = crtc_state->hw.degamma_lut;
- const struct drm_property_blob *blob = gamma_lut ?: degamma_lut;
+ const struct drm_property_blob *post_csc_lut = crtc_state->post_csc_lut;
+ const struct drm_property_blob *pre_csc_lut = crtc_state->pre_csc_lut;
+ const struct drm_property_blob *blob = post_csc_lut ?: pre_csc_lut;
switch (crtc_state->gamma_mode) {
case GAMMA_MODE_MODE_8BIT:
ilk_load_lut_8(crtc, blob);
break;
case GAMMA_MODE_MODE_SPLIT:
- bdw_load_lut_10(crtc, degamma_lut, PAL_PREC_SPLIT_MODE |
+ bdw_load_lut_10(crtc, pre_csc_lut, PAL_PREC_SPLIT_MODE |
PAL_PREC_INDEX_VALUE(0));
ivb_load_lut_ext_max(crtc_state);
- bdw_load_lut_10(crtc, gamma_lut, PAL_PREC_SPLIT_MODE |
+ bdw_load_lut_10(crtc, post_csc_lut, PAL_PREC_SPLIT_MODE |
PAL_PREC_INDEX_VALUE(512));
break;
case GAMMA_MODE_MODE_10BIT:
@@ -837,13 +854,14 @@ static int glk_degamma_lut_size(struct drm_i915_private *i915)
return 35;
}
-static void glk_load_degamma_lut(const struct intel_crtc_state *crtc_state)
+static void glk_load_degamma_lut(const struct intel_crtc_state *crtc_state,
+ const struct drm_property_blob *blob)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct drm_color_lut *lut = blob->data;
+ int i, lut_size = drm_color_lut_size(blob);
enum pipe pipe = crtc->pipe;
- int i, lut_size = INTEL_INFO(dev_priv)->display.color.degamma_lut_size;
- const struct drm_color_lut *lut = crtc_state->hw.degamma_lut->data;
/*
* When setting the auto-increment bit, the hardware seems to
@@ -879,59 +897,21 @@ static void glk_load_degamma_lut(const struct intel_crtc_state *crtc_state)
intel_de_write_fw(dev_priv, PRE_CSC_GAMC_INDEX(pipe), 0);
}
-static void glk_load_degamma_lut_linear(const struct intel_crtc_state *crtc_state)
-{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- enum pipe pipe = crtc->pipe;
- int i, lut_size = INTEL_INFO(dev_priv)->display.color.degamma_lut_size;
-
- /*
- * When setting the auto-increment bit, the hardware seems to
- * ignore the index bits, so we need to reset it to index 0
- * separately.
- */
- intel_de_write_fw(dev_priv, PRE_CSC_GAMC_INDEX(pipe), 0);
- intel_de_write_fw(dev_priv, PRE_CSC_GAMC_INDEX(pipe),
- PRE_CSC_GAMC_AUTO_INCREMENT);
-
- for (i = 0; i < lut_size; i++) {
- u32 v = (i << 16) / (lut_size - 1);
-
- intel_de_write_fw(dev_priv, PRE_CSC_GAMC_DATA(pipe), v);
- }
-
- /* Clamp values > 1.0. */
- while (i++ < 35)
- intel_de_write_fw(dev_priv, PRE_CSC_GAMC_DATA(pipe), 1 << 16);
-
- intel_de_write_fw(dev_priv, PRE_CSC_GAMC_INDEX(pipe), 0);
-}
-
static void glk_load_luts(const struct intel_crtc_state *crtc_state)
{
- const struct drm_property_blob *gamma_lut = crtc_state->hw.gamma_lut;
+ const struct drm_property_blob *pre_csc_lut = crtc_state->pre_csc_lut;
+ const struct drm_property_blob *post_csc_lut = crtc_state->post_csc_lut;
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- /*
- * On GLK+ both pipe CSC and degamma LUT are controlled
- * by csc_enable. Hence for the cases where the CSC is
- * needed but degamma LUT is not we need to load a
- * linear degamma LUT. In fact we'll just always load
- * the degama LUT so that we don't have to reload
- * it every time the pipe CSC is being enabled.
- */
- if (crtc_state->hw.degamma_lut)
- glk_load_degamma_lut(crtc_state);
- else
- glk_load_degamma_lut_linear(crtc_state);
+ if (pre_csc_lut)
+ glk_load_degamma_lut(crtc_state, pre_csc_lut);
switch (crtc_state->gamma_mode) {
case GAMMA_MODE_MODE_8BIT:
- ilk_load_lut_8(crtc, gamma_lut);
+ ilk_load_lut_8(crtc, post_csc_lut);
break;
case GAMMA_MODE_MODE_10BIT:
- bdw_load_lut_10(crtc, gamma_lut, PAL_PREC_INDEX_VALUE(0));
+ bdw_load_lut_10(crtc, post_csc_lut, PAL_PREC_INDEX_VALUE(0));
ivb_load_lut_ext_max(crtc_state);
break;
default:
@@ -971,7 +951,7 @@ static void
icl_program_gamma_superfine_segment(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- const struct drm_property_blob *blob = crtc_state->hw.gamma_lut;
+ const struct drm_property_blob *blob = crtc_state->post_csc_lut;
const struct drm_color_lut *lut = blob->data;
enum pipe pipe = crtc->pipe;
int i;
@@ -1000,7 +980,7 @@ static void
icl_program_gamma_multi_segment(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- const struct drm_property_blob *blob = crtc_state->hw.gamma_lut;
+ const struct drm_property_blob *blob = crtc_state->post_csc_lut;
const struct drm_color_lut *lut = blob->data;
const struct drm_color_lut *entry;
enum pipe pipe = crtc->pipe;
@@ -1054,22 +1034,23 @@ icl_program_gamma_multi_segment(const struct intel_crtc_state *crtc_state)
static void icl_load_luts(const struct intel_crtc_state *crtc_state)
{
- const struct drm_property_blob *gamma_lut = crtc_state->hw.gamma_lut;
+ const struct drm_property_blob *pre_csc_lut = crtc_state->pre_csc_lut;
+ const struct drm_property_blob *post_csc_lut = crtc_state->post_csc_lut;
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- if (crtc_state->hw.degamma_lut)
- glk_load_degamma_lut(crtc_state);
+ if (pre_csc_lut)
+ glk_load_degamma_lut(crtc_state, pre_csc_lut);
switch (crtc_state->gamma_mode & GAMMA_MODE_MODE_MASK) {
case GAMMA_MODE_MODE_8BIT:
- ilk_load_lut_8(crtc, gamma_lut);
+ ilk_load_lut_8(crtc, post_csc_lut);
break;
case GAMMA_MODE_MODE_12BIT_MULTI_SEGMENTED:
icl_program_gamma_superfine_segment(crtc_state);
icl_program_gamma_multi_segment(crtc_state);
break;
case GAMMA_MODE_MODE_10BIT:
- bdw_load_lut_10(crtc, gamma_lut, PAL_PREC_INDEX_VALUE(0));
+ bdw_load_lut_10(crtc, post_csc_lut, PAL_PREC_INDEX_VALUE(0));
ivb_load_lut_ext_max(crtc_state);
break;
default:
@@ -1145,18 +1126,18 @@ static void chv_load_luts(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- const struct drm_property_blob *degamma_lut = crtc_state->hw.degamma_lut;
- const struct drm_property_blob *gamma_lut = crtc_state->hw.gamma_lut;
+ const struct drm_property_blob *pre_csc_lut = crtc_state->pre_csc_lut;
+ const struct drm_property_blob *post_csc_lut = crtc_state->post_csc_lut;
const struct drm_property_blob *ctm = crtc_state->hw.ctm;
if (crtc_state->cgm_mode & CGM_PIPE_MODE_CSC)
chv_load_cgm_csc(crtc, ctm);
if (crtc_state->cgm_mode & CGM_PIPE_MODE_DEGAMMA)
- chv_load_cgm_degamma(crtc, degamma_lut);
+ chv_load_cgm_degamma(crtc, pre_csc_lut);
if (crtc_state->cgm_mode & CGM_PIPE_MODE_GAMMA)
- chv_load_cgm_gamma(crtc, gamma_lut);
+ chv_load_cgm_gamma(crtc, post_csc_lut);
else
i965_load_luts(crtc_state);
@@ -1194,8 +1175,8 @@ static bool intel_can_preload_luts(const struct intel_crtc_state *new_crtc_state
const struct intel_crtc_state *old_crtc_state =
intel_atomic_get_old_crtc_state(state, crtc);
- return !old_crtc_state->hw.gamma_lut &&
- !old_crtc_state->hw.degamma_lut;
+ return !old_crtc_state->post_csc_lut &&
+ !old_crtc_state->pre_csc_lut;
}
static bool chv_can_preload_luts(const struct intel_crtc_state *new_crtc_state)
@@ -1214,25 +1195,7 @@ static bool chv_can_preload_luts(const struct intel_crtc_state *new_crtc_state)
if (old_crtc_state->cgm_mode || new_crtc_state->cgm_mode)
return false;
- return !old_crtc_state->hw.gamma_lut;
-}
-
-static bool glk_can_preload_luts(const struct intel_crtc_state *new_crtc_state)
-{
- struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
- struct intel_atomic_state *state =
- to_intel_atomic_state(new_crtc_state->uapi.state);
- const struct intel_crtc_state *old_crtc_state =
- intel_atomic_get_old_crtc_state(state, crtc);
-
- /*
- * The hardware degamma is active whenever the pipe
- * CSC is active. Thus even if the old state has no
- * software degamma we need to avoid clobbering the
- * linear hardware degamma mid scanout.
- */
- return !old_crtc_state->csc_enable &&
- !old_crtc_state->hw.gamma_lut;
+ return !old_crtc_state->post_csc_lut;
}
int intel_color_check(struct intel_crtc_state *crtc_state)
@@ -1295,6 +1258,10 @@ intel_color_add_affected_planes(struct intel_crtc_state *new_crtc_state)
return PTR_ERR(plane_state);
new_crtc_state->update_planes |= BIT(plane->id);
+
+ /* plane control register changes blocked by CxSR */
+ if (HAS_GMCH(dev_priv))
+ new_crtc_state->disable_cxsr = true;
}
return 0;
@@ -1361,6 +1328,40 @@ static u32 i9xx_gamma_mode(struct intel_crtc_state *crtc_state)
return GAMMA_MODE_MODE_10BIT; /* i965+ only */
}
+void intel_color_assert_luts(const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+
+ /* make sure {pre,post}_csc_lut were correctly assigned */
+ if (DISPLAY_VER(i915) >= 11 || HAS_GMCH(i915)) {
+ drm_WARN_ON(&i915->drm,
+ crtc_state->pre_csc_lut != crtc_state->hw.degamma_lut);
+ drm_WARN_ON(&i915->drm,
+ crtc_state->post_csc_lut != crtc_state->hw.gamma_lut);
+ } else if (DISPLAY_VER(i915) == 10) {
+ drm_WARN_ON(&i915->drm,
+ crtc_state->pre_csc_lut != crtc_state->hw.degamma_lut &&
+ crtc_state->pre_csc_lut != i915->display.color.glk_linear_degamma_lut);
+ drm_WARN_ON(&i915->drm,
+ crtc_state->post_csc_lut != crtc_state->hw.gamma_lut);
+ } else {
+ drm_WARN_ON(&i915->drm,
+ crtc_state->pre_csc_lut != crtc_state->hw.degamma_lut &&
+ crtc_state->pre_csc_lut != crtc_state->hw.gamma_lut);
+ drm_WARN_ON(&i915->drm,
+ crtc_state->post_csc_lut != crtc_state->hw.degamma_lut &&
+ crtc_state->post_csc_lut != crtc_state->hw.gamma_lut);
+ }
+}
+
+static void intel_assign_luts(struct intel_crtc_state *crtc_state)
+{
+ drm_property_replace_blob(&crtc_state->pre_csc_lut,
+ crtc_state->hw.degamma_lut);
+ drm_property_replace_blob(&crtc_state->post_csc_lut,
+ crtc_state->hw.gamma_lut);
+}
+
static int i9xx_color_check(struct intel_crtc_state *crtc_state)
{
int ret;
@@ -1379,6 +1380,8 @@ static int i9xx_color_check(struct intel_crtc_state *crtc_state)
if (ret)
return ret;
+ intel_assign_luts(crtc_state);
+
crtc_state->preload_luts = intel_can_preload_luts(crtc_state);
return 0;
@@ -1433,6 +1436,8 @@ static int chv_color_check(struct intel_crtc_state *crtc_state)
if (ret)
return ret;
+ intel_assign_luts(crtc_state);
+
crtc_state->preload_luts = chv_can_preload_luts(crtc_state);
return 0;
@@ -1458,10 +1463,29 @@ static u32 ilk_csc_mode(const struct intel_crtc_state *crtc_state)
if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
return CSC_BLACK_SCREEN_OFFSET;
+ if (crtc_state->hw.degamma_lut)
+ return CSC_MODE_YUV_TO_RGB;
+
return CSC_MODE_YUV_TO_RGB |
CSC_POSITION_BEFORE_GAMMA;
}
+static void ilk_assign_luts(struct intel_crtc_state *crtc_state)
+{
+ if (crtc_state->hw.degamma_lut ||
+ crtc_state->csc_mode & CSC_POSITION_BEFORE_GAMMA) {
+ drm_property_replace_blob(&crtc_state->pre_csc_lut,
+ crtc_state->hw.degamma_lut);
+ drm_property_replace_blob(&crtc_state->post_csc_lut,
+ crtc_state->hw.gamma_lut);
+ } else {
+ drm_property_replace_blob(&crtc_state->pre_csc_lut,
+ crtc_state->hw.gamma_lut);
+ drm_property_replace_blob(&crtc_state->post_csc_lut,
+ NULL);
+ }
+}
+
static int ilk_color_check(struct intel_crtc_state *crtc_state)
{
int ret;
@@ -1489,6 +1513,8 @@ static int ilk_color_check(struct intel_crtc_state *crtc_state)
if (ret)
return ret;
+ ilk_assign_luts(crtc_state);
+
crtc_state->preload_luts = intel_can_preload_luts(crtc_state);
return 0;
@@ -1556,6 +1582,8 @@ static int ivb_color_check(struct intel_crtc_state *crtc_state)
if (ret)
return ret;
+ ilk_assign_luts(crtc_state);
+
crtc_state->preload_luts = intel_can_preload_luts(crtc_state);
return 0;
@@ -1570,6 +1598,23 @@ static u32 glk_gamma_mode(const struct intel_crtc_state *crtc_state)
return GAMMA_MODE_MODE_10BIT;
}
+static void glk_assign_luts(struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+
+ intel_assign_luts(crtc_state);
+
+ /*
+ * On GLK+ both pipe CSC and degamma LUT are controlled
+ * by csc_enable. Hence for the cases where the CSC is
+ * needed but degamma LUT is not we need to load a
+ * linear degamma LUT.
+ */
+ if (crtc_state->csc_enable && !crtc_state->pre_csc_lut)
+ drm_property_replace_blob(&crtc_state->pre_csc_lut,
+ i915->display.color.glk_linear_degamma_lut);
+}
+
static int glk_color_check(struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
@@ -1604,7 +1649,9 @@ static int glk_color_check(struct intel_crtc_state *crtc_state)
if (ret)
return ret;
- crtc_state->preload_luts = glk_can_preload_luts(crtc_state);
+ glk_assign_luts(crtc_state);
+
+ crtc_state->preload_luts = intel_can_preload_luts(crtc_state);
return 0;
}
@@ -1664,6 +1711,8 @@ static int icl_color_check(struct intel_crtc_state *crtc_state)
crtc_state->csc_mode = icl_csc_mode(crtc_state);
+ intel_assign_luts(crtc_state);
+
crtc_state->preload_luts = intel_can_preload_luts(crtc_state);
return 0;
@@ -1869,7 +1918,7 @@ static void i9xx_read_luts(struct intel_crtc_state *crtc_state)
if (!crtc_state->gamma_enable)
return;
- crtc_state->hw.gamma_lut = i9xx_read_lut_8(crtc);
+ crtc_state->post_csc_lut = i9xx_read_lut_8(crtc);
}
static struct drm_property_blob *i965_read_lut_10p6(struct intel_crtc *crtc)
@@ -1910,9 +1959,9 @@ static void i965_read_luts(struct intel_crtc_state *crtc_state)
return;
if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT)
- crtc_state->hw.gamma_lut = i9xx_read_lut_8(crtc);
+ crtc_state->post_csc_lut = i9xx_read_lut_8(crtc);
else
- crtc_state->hw.gamma_lut = i965_read_lut_10p6(crtc);
+ crtc_state->post_csc_lut = i965_read_lut_10p6(crtc);
}
static struct drm_property_blob *chv_read_cgm_gamma(struct intel_crtc *crtc)
@@ -1946,7 +1995,7 @@ static void chv_read_luts(struct intel_crtc_state *crtc_state)
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
if (crtc_state->cgm_mode & CGM_PIPE_MODE_GAMMA)
- crtc_state->hw.gamma_lut = chv_read_cgm_gamma(crtc);
+ crtc_state->post_csc_lut = chv_read_cgm_gamma(crtc);
else
i965_read_luts(crtc_state);
}
@@ -2013,10 +2062,10 @@ static void ilk_read_luts(struct intel_crtc_state *crtc_state)
switch (crtc_state->gamma_mode) {
case GAMMA_MODE_MODE_8BIT:
- crtc_state->hw.gamma_lut = ilk_read_lut_8(crtc);
+ crtc_state->post_csc_lut = ilk_read_lut_8(crtc);
break;
case GAMMA_MODE_MODE_10BIT:
- crtc_state->hw.gamma_lut = ilk_read_lut_10(crtc);
+ crtc_state->post_csc_lut = ilk_read_lut_10(crtc);
break;
default:
MISSING_CASE(crtc_state->gamma_mode);
@@ -2068,10 +2117,10 @@ static void glk_read_luts(struct intel_crtc_state *crtc_state)
switch (crtc_state->gamma_mode) {
case GAMMA_MODE_MODE_8BIT:
- crtc_state->hw.gamma_lut = ilk_read_lut_8(crtc);
+ crtc_state->post_csc_lut = ilk_read_lut_8(crtc);
break;
case GAMMA_MODE_MODE_10BIT:
- crtc_state->hw.gamma_lut = bdw_read_lut_10(crtc, PAL_PREC_INDEX_VALUE(0));
+ crtc_state->post_csc_lut = bdw_read_lut_10(crtc, PAL_PREC_INDEX_VALUE(0));
break;
default:
MISSING_CASE(crtc_state->gamma_mode);
@@ -2126,13 +2175,13 @@ static void icl_read_luts(struct intel_crtc_state *crtc_state)
switch (crtc_state->gamma_mode & GAMMA_MODE_MODE_MASK) {
case GAMMA_MODE_MODE_8BIT:
- crtc_state->hw.gamma_lut = ilk_read_lut_8(crtc);
+ crtc_state->post_csc_lut = ilk_read_lut_8(crtc);
break;
case GAMMA_MODE_MODE_10BIT:
- crtc_state->hw.gamma_lut = bdw_read_lut_10(crtc, PAL_PREC_INDEX_VALUE(0));
+ crtc_state->post_csc_lut = bdw_read_lut_10(crtc, PAL_PREC_INDEX_VALUE(0));
break;
case GAMMA_MODE_MODE_12BIT_MULTI_SEGMENTED:
- crtc_state->hw.gamma_lut = icl_read_lut_multi_segment(crtc);
+ crtc_state->post_csc_lut = icl_read_lut_multi_segment(crtc);
break;
default:
MISSING_CASE(crtc_state->gamma_mode);
@@ -2217,41 +2266,58 @@ static const struct intel_color_funcs ilk_color_funcs = {
.read_luts = ilk_read_luts,
};
-void intel_color_init(struct intel_crtc *crtc)
+void intel_color_crtc_init(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
bool has_ctm = INTEL_INFO(dev_priv)->display.color.degamma_lut_size != 0;
drm_mode_crtc_set_gamma_size(&crtc->base, 256);
- if (HAS_GMCH(dev_priv)) {
- if (IS_CHERRYVIEW(dev_priv)) {
- dev_priv->display.funcs.color = &chv_color_funcs;
- } else if (DISPLAY_VER(dev_priv) >= 4) {
- dev_priv->display.funcs.color = &i965_color_funcs;
- } else {
- dev_priv->display.funcs.color = &i9xx_color_funcs;
- }
- } else {
- if (DISPLAY_VER(dev_priv) >= 11)
- dev_priv->display.funcs.color = &icl_color_funcs;
- else if (DISPLAY_VER(dev_priv) == 10)
- dev_priv->display.funcs.color = &glk_color_funcs;
- else if (DISPLAY_VER(dev_priv) == 9)
- dev_priv->display.funcs.color = &skl_color_funcs;
- else if (DISPLAY_VER(dev_priv) == 8)
- dev_priv->display.funcs.color = &bdw_color_funcs;
- else if (DISPLAY_VER(dev_priv) == 7) {
- if (IS_HASWELL(dev_priv))
- dev_priv->display.funcs.color = &hsw_color_funcs;
- else
- dev_priv->display.funcs.color = &ivb_color_funcs;
- } else
- dev_priv->display.funcs.color = &ilk_color_funcs;
- }
-
drm_crtc_enable_color_mgmt(&crtc->base,
INTEL_INFO(dev_priv)->display.color.degamma_lut_size,
has_ctm,
INTEL_INFO(dev_priv)->display.color.gamma_lut_size);
}
+
+int intel_color_init(struct drm_i915_private *i915)
+{
+ struct drm_property_blob *blob;
+
+ if (DISPLAY_VER(i915) != 10)
+ return 0;
+
+ blob = create_linear_lut(i915, INTEL_INFO(i915)->display.color.degamma_lut_size);
+ if (IS_ERR(blob))
+ return PTR_ERR(blob);
+
+ i915->display.color.glk_linear_degamma_lut = blob;
+
+ return 0;
+}
+
+void intel_color_init_hooks(struct drm_i915_private *i915)
+{
+ if (HAS_GMCH(i915)) {
+ if (IS_CHERRYVIEW(i915))
+ i915->display.funcs.color = &chv_color_funcs;
+ else if (DISPLAY_VER(i915) >= 4)
+ i915->display.funcs.color = &i965_color_funcs;
+ else
+ i915->display.funcs.color = &i9xx_color_funcs;
+ } else {
+ if (DISPLAY_VER(i915) >= 11)
+ i915->display.funcs.color = &icl_color_funcs;
+ else if (DISPLAY_VER(i915) == 10)
+ i915->display.funcs.color = &glk_color_funcs;
+ else if (DISPLAY_VER(i915) == 9)
+ i915->display.funcs.color = &skl_color_funcs;
+ else if (DISPLAY_VER(i915) == 8)
+ i915->display.funcs.color = &bdw_color_funcs;
+ else if (IS_HASWELL(i915))
+ i915->display.funcs.color = &hsw_color_funcs;
+ else if (DISPLAY_VER(i915) == 7)
+ i915->display.funcs.color = &ivb_color_funcs;
+ else
+ i915->display.funcs.color = &ilk_color_funcs;
+ }
+}
diff --git a/drivers/gpu/drm/i915/display/intel_color.h b/drivers/gpu/drm/i915/display/intel_color.h
index fd873425e082..2a5ada67774d 100644
--- a/drivers/gpu/drm/i915/display/intel_color.h
+++ b/drivers/gpu/drm/i915/display/intel_color.h
@@ -10,9 +10,12 @@
struct intel_crtc_state;
struct intel_crtc;
+struct drm_i915_private;
struct drm_property_blob;
-void intel_color_init(struct intel_crtc *crtc);
+void intel_color_init_hooks(struct drm_i915_private *i915);
+int intel_color_init(struct drm_i915_private *i915);
+void intel_color_crtc_init(struct intel_crtc *crtc);
int intel_color_check(struct intel_crtc_state *crtc_state);
void intel_color_commit_noarm(const struct intel_crtc_state *crtc_state);
void intel_color_commit_arm(const struct intel_crtc_state *crtc_state);
@@ -22,5 +25,6 @@ int intel_color_get_gamma_bit_precision(const struct intel_crtc_state *crtc_stat
bool intel_color_lut_equal(struct drm_property_blob *blob1,
struct drm_property_blob *blob2,
u32 gamma_mode, u32 bit_precision);
+void intel_color_assert_luts(const struct intel_crtc_state *crtc_state);
#endif /* __INTEL_COLOR_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_combo_phy.c b/drivers/gpu/drm/i915/display/intel_combo_phy.c
index 64890f39c3cc..71d7aece1dc6 100644
--- a/drivers/gpu/drm/i915/display/intel_combo_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_combo_phy.c
@@ -53,7 +53,6 @@ static const struct icl_procmon {
static const struct icl_procmon *
icl_get_procmon_ref_values(struct drm_i915_private *dev_priv, enum phy phy)
{
- const struct icl_procmon *procmon;
u32 val;
val = intel_de_read(dev_priv, ICL_PORT_COMP_DW3(phy));
@@ -62,23 +61,16 @@ icl_get_procmon_ref_values(struct drm_i915_private *dev_priv, enum phy phy)
MISSING_CASE(val);
fallthrough;
case VOLTAGE_INFO_0_85V | PROCESS_INFO_DOT_0:
- procmon = &icl_procmon_values[PROCMON_0_85V_DOT_0];
- break;
+ return &icl_procmon_values[PROCMON_0_85V_DOT_0];
case VOLTAGE_INFO_0_95V | PROCESS_INFO_DOT_0:
- procmon = &icl_procmon_values[PROCMON_0_95V_DOT_0];
- break;
+ return &icl_procmon_values[PROCMON_0_95V_DOT_0];
case VOLTAGE_INFO_0_95V | PROCESS_INFO_DOT_1:
- procmon = &icl_procmon_values[PROCMON_0_95V_DOT_1];
- break;
+ return &icl_procmon_values[PROCMON_0_95V_DOT_1];
case VOLTAGE_INFO_1_05V | PROCESS_INFO_DOT_0:
- procmon = &icl_procmon_values[PROCMON_1_05V_DOT_0];
- break;
+ return &icl_procmon_values[PROCMON_1_05V_DOT_0];
case VOLTAGE_INFO_1_05V | PROCESS_INFO_DOT_1:
- procmon = &icl_procmon_values[PROCMON_1_05V_DOT_1];
- break;
+ return &icl_procmon_values[PROCMON_1_05V_DOT_1];
}
-
- return procmon;
}
static void icl_set_procmon_ref_values(struct drm_i915_private *dev_priv,
diff --git a/drivers/gpu/drm/i915/display/intel_connector.c b/drivers/gpu/drm/i915/display/intel_connector.c
index 6d5cbeb8df4d..6205ddd3ded0 100644
--- a/drivers/gpu/drm/i915/display/intel_connector.c
+++ b/drivers/gpu/drm/i915/display/intel_connector.c
@@ -293,3 +293,21 @@ intel_attach_dp_colorspace_property(struct drm_connector *connector)
if (!drm_mode_create_dp_colorspace_property(connector))
drm_connector_attach_colorspace_property(connector);
}
+
+void
+intel_attach_scaling_mode_property(struct drm_connector *connector)
+{
+ struct drm_i915_private *i915 = to_i915(connector->dev);
+ u32 scaling_modes;
+
+ scaling_modes = BIT(DRM_MODE_SCALE_ASPECT) |
+ BIT(DRM_MODE_SCALE_FULLSCREEN);
+
+ /* On GMCH platforms borders are only possible on the LVDS port */
+ if (!HAS_GMCH(i915) || connector->connector_type == DRM_MODE_CONNECTOR_LVDS)
+ scaling_modes |= BIT(DRM_MODE_SCALE_CENTER);
+
+ drm_connector_attach_scaling_mode_property(connector, scaling_modes);
+
+ connector->state->scaling_mode = DRM_MODE_SCALE_ASPECT;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_connector.h b/drivers/gpu/drm/i915/display/intel_connector.h
index 661a37a3c6d8..7d7b588d2286 100644
--- a/drivers/gpu/drm/i915/display/intel_connector.h
+++ b/drivers/gpu/drm/i915/display/intel_connector.h
@@ -32,5 +32,6 @@ void intel_attach_broadcast_rgb_property(struct drm_connector *connector);
void intel_attach_aspect_ratio_property(struct drm_connector *connector);
void intel_attach_hdmi_colorspace_property(struct drm_connector *connector);
void intel_attach_dp_colorspace_property(struct drm_connector *connector);
+void intel_attach_scaling_mode_property(struct drm_connector *connector);
#endif /* __INTEL_CONNECTOR_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_crt.c b/drivers/gpu/drm/i915/display/intel_crt.c
index 4a8ff2f97608..94d0a5e1dd03 100644
--- a/drivers/gpu/drm/i915/display/intel_crt.c
+++ b/drivers/gpu/drm/i915/display/intel_crt.c
@@ -1044,17 +1044,14 @@ void intel_crt_init(struct drm_i915_private *dev_priv)
intel_connector_attach_encoder(intel_connector, &crt->base);
crt->base.type = INTEL_OUTPUT_ANALOG;
- crt->base.cloneable = (1 << INTEL_OUTPUT_DVO) | (1 << INTEL_OUTPUT_HDMI);
+ crt->base.cloneable = BIT(INTEL_OUTPUT_DVO) | BIT(INTEL_OUTPUT_HDMI);
if (IS_I830(dev_priv))
crt->base.pipe_mask = BIT(PIPE_A);
else
crt->base.pipe_mask = ~0;
- if (DISPLAY_VER(dev_priv) == 2)
- connector->interlace_allowed = 0;
- else
- connector->interlace_allowed = 1;
- connector->doublescan_allowed = 0;
+ if (DISPLAY_VER(dev_priv) != 2)
+ connector->interlace_allowed = true;
crt->adpa_reg = adpa_reg;
diff --git a/drivers/gpu/drm/i915/display/intel_crtc.c b/drivers/gpu/drm/i915/display/intel_crtc.c
index 6792a9056f46..037fc140b585 100644
--- a/drivers/gpu/drm/i915/display/intel_crtc.c
+++ b/drivers/gpu/drm/i915/display/intel_crtc.c
@@ -365,9 +365,8 @@ int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
BIT(DRM_SCALING_FILTER_DEFAULT) |
BIT(DRM_SCALING_FILTER_NEAREST_NEIGHBOR));
- intel_color_init(crtc);
-
- intel_crtc_drrs_init(crtc);
+ intel_color_crtc_init(crtc);
+ intel_drrs_crtc_init(crtc);
intel_crtc_crc_init(crtc);
cpu_latency_qos_add_request(&crtc->vblank_pm_qos, PM_QOS_DEFAULT_VALUE);
@@ -387,8 +386,7 @@ static bool intel_crtc_needs_vblank_work(const struct intel_crtc_state *crtc_sta
return crtc_state->hw.active &&
!intel_crtc_needs_modeset(crtc_state) &&
!crtc_state->preload_luts &&
- (crtc_state->uapi.color_mgmt_changed ||
- crtc_state->update_pipe);
+ intel_crtc_needs_color_update(crtc_state);
}
static void intel_crtc_vblank_work(struct kthread_work *base)
diff --git a/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c b/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c
index e9212f69c360..e3273fe8ddac 100644
--- a/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c
+++ b/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c
@@ -298,11 +298,13 @@ void intel_crtc_state_dump(const struct intel_crtc_state *pipe_config,
pipe_config->csc_mode, pipe_config->gamma_mode,
pipe_config->gamma_enable, pipe_config->csc_enable);
- drm_dbg_kms(&i915->drm, "degamma lut: %d entries, gamma lut: %d entries\n",
- pipe_config->hw.degamma_lut ?
- drm_color_lut_size(pipe_config->hw.degamma_lut) : 0,
- pipe_config->hw.gamma_lut ?
- drm_color_lut_size(pipe_config->hw.gamma_lut) : 0);
+ drm_dbg_kms(&i915->drm, "pre csc lut: %s%d entries, post csc lut: %d entries\n",
+ pipe_config->pre_csc_lut && pipe_config->pre_csc_lut ==
+ i915->display.color.glk_linear_degamma_lut ? "(linear) " : "",
+ pipe_config->pre_csc_lut ?
+ drm_color_lut_size(pipe_config->pre_csc_lut) : 0,
+ pipe_config->post_csc_lut ?
+ drm_color_lut_size(pipe_config->post_csc_lut) : 0);
dump_planes:
if (!state)
diff --git a/drivers/gpu/drm/i915/display/intel_cursor.c b/drivers/gpu/drm/i915/display/intel_cursor.c
index 87899e89b3a7..96422c98656a 100644
--- a/drivers/gpu/drm/i915/display/intel_cursor.c
+++ b/drivers/gpu/drm/i915/display/intel_cursor.c
@@ -631,8 +631,10 @@ intel_legacy_cursor_update(struct drm_plane *_plane,
*
* FIXME bigjoiner fastpath would be good
*/
- if (!crtc_state->hw.active || intel_crtc_needs_modeset(crtc_state) ||
- crtc_state->update_pipe || crtc_state->bigjoiner_pipes)
+ if (!crtc_state->hw.active ||
+ intel_crtc_needs_modeset(crtc_state) ||
+ intel_crtc_needs_fastset(crtc_state) ||
+ crtc_state->bigjoiner_pipes)
goto slow;
/*
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
index da8472cdc135..e95bde5cf060 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi.c
@@ -43,6 +43,8 @@
#include "intel_de.h"
#include "intel_display_power.h"
#include "intel_display_types.h"
+#include "intel_dkl_phy.h"
+#include "intel_dkl_phy_regs.h"
#include "intel_dp.h"
#include "intel_dp_link_training.h"
#include "intel_dp_mst.h"
@@ -55,13 +57,13 @@
#include "intel_hdmi.h"
#include "intel_hotplug.h"
#include "intel_lspcon.h"
+#include "intel_mg_phy_regs.h"
#include "intel_pps.h"
#include "intel_psr.h"
#include "intel_quirks.h"
#include "intel_snps_phy.h"
#include "intel_sprite.h"
#include "intel_tc.h"
-#include "intel_tc_phy_regs.h"
#include "intel_vdsc.h"
#include "intel_vrr.h"
#include "skl_scaler.h"
@@ -1262,33 +1264,30 @@ static void tgl_dkl_phy_set_signal_levels(struct intel_encoder *encoder,
for (ln = 0; ln < 2; ln++) {
int level;
- intel_de_write(dev_priv, HIP_INDEX_REG(tc_port),
- HIP_INDEX_VAL(tc_port, ln));
-
- intel_de_write(dev_priv, DKL_TX_PMD_LANE_SUS(tc_port), 0);
+ intel_dkl_phy_write(dev_priv, DKL_TX_PMD_LANE_SUS(tc_port, ln), 0);
level = intel_ddi_level(encoder, crtc_state, 2*ln+0);
- intel_de_rmw(dev_priv, DKL_TX_DPCNTL0(tc_port),
- DKL_TX_PRESHOOT_COEFF_MASK |
- DKL_TX_DE_EMPAHSIS_COEFF_MASK |
- DKL_TX_VSWING_CONTROL_MASK,
- DKL_TX_PRESHOOT_COEFF(trans->entries[level].dkl.preshoot) |
- DKL_TX_DE_EMPHASIS_COEFF(trans->entries[level].dkl.de_emphasis) |
- DKL_TX_VSWING_CONTROL(trans->entries[level].dkl.vswing));
+ intel_dkl_phy_rmw(dev_priv, DKL_TX_DPCNTL0(tc_port, ln),
+ DKL_TX_PRESHOOT_COEFF_MASK |
+ DKL_TX_DE_EMPAHSIS_COEFF_MASK |
+ DKL_TX_VSWING_CONTROL_MASK,
+ DKL_TX_PRESHOOT_COEFF(trans->entries[level].dkl.preshoot) |
+ DKL_TX_DE_EMPHASIS_COEFF(trans->entries[level].dkl.de_emphasis) |
+ DKL_TX_VSWING_CONTROL(trans->entries[level].dkl.vswing));
level = intel_ddi_level(encoder, crtc_state, 2*ln+1);
- intel_de_rmw(dev_priv, DKL_TX_DPCNTL1(tc_port),
- DKL_TX_PRESHOOT_COEFF_MASK |
- DKL_TX_DE_EMPAHSIS_COEFF_MASK |
- DKL_TX_VSWING_CONTROL_MASK,
- DKL_TX_PRESHOOT_COEFF(trans->entries[level].dkl.preshoot) |
- DKL_TX_DE_EMPHASIS_COEFF(trans->entries[level].dkl.de_emphasis) |
- DKL_TX_VSWING_CONTROL(trans->entries[level].dkl.vswing));
+ intel_dkl_phy_rmw(dev_priv, DKL_TX_DPCNTL1(tc_port, ln),
+ DKL_TX_PRESHOOT_COEFF_MASK |
+ DKL_TX_DE_EMPAHSIS_COEFF_MASK |
+ DKL_TX_VSWING_CONTROL_MASK,
+ DKL_TX_PRESHOOT_COEFF(trans->entries[level].dkl.preshoot) |
+ DKL_TX_DE_EMPHASIS_COEFF(trans->entries[level].dkl.de_emphasis) |
+ DKL_TX_VSWING_CONTROL(trans->entries[level].dkl.vswing));
- intel_de_rmw(dev_priv, DKL_TX_DPCNTL2(tc_port),
- DKL_TX_DP20BITMODE, 0);
+ intel_dkl_phy_rmw(dev_priv, DKL_TX_DPCNTL2(tc_port, ln),
+ DKL_TX_DP20BITMODE, 0);
if (IS_ALDERLAKE_P(dev_priv)) {
u32 val;
@@ -1306,10 +1305,10 @@ static void tgl_dkl_phy_set_signal_levels(struct intel_encoder *encoder,
val |= DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX2(0);
}
- intel_de_rmw(dev_priv, DKL_TX_DPCNTL2(tc_port),
- DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX1_MASK |
- DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX2_MASK,
- val);
+ intel_dkl_phy_rmw(dev_priv, DKL_TX_DPCNTL2(tc_port, ln),
+ DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX1_MASK |
+ DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX2_MASK,
+ val);
}
}
}
@@ -2019,12 +2018,8 @@ icl_program_mg_dp_mode(struct intel_digital_port *dig_port,
return;
if (DISPLAY_VER(dev_priv) >= 12) {
- intel_de_write(dev_priv, HIP_INDEX_REG(tc_port),
- HIP_INDEX_VAL(tc_port, 0x0));
- ln0 = intel_de_read(dev_priv, DKL_DP_MODE(tc_port));
- intel_de_write(dev_priv, HIP_INDEX_REG(tc_port),
- HIP_INDEX_VAL(tc_port, 0x1));
- ln1 = intel_de_read(dev_priv, DKL_DP_MODE(tc_port));
+ ln0 = intel_dkl_phy_read(dev_priv, DKL_DP_MODE(tc_port, 0));
+ ln1 = intel_dkl_phy_read(dev_priv, DKL_DP_MODE(tc_port, 1));
} else {
ln0 = intel_de_read(dev_priv, MG_DP_MODE(0, tc_port));
ln1 = intel_de_read(dev_priv, MG_DP_MODE(1, tc_port));
@@ -2085,12 +2080,8 @@ icl_program_mg_dp_mode(struct intel_digital_port *dig_port,
}
if (DISPLAY_VER(dev_priv) >= 12) {
- intel_de_write(dev_priv, HIP_INDEX_REG(tc_port),
- HIP_INDEX_VAL(tc_port, 0x0));
- intel_de_write(dev_priv, DKL_DP_MODE(tc_port), ln0);
- intel_de_write(dev_priv, HIP_INDEX_REG(tc_port),
- HIP_INDEX_VAL(tc_port, 0x1));
- intel_de_write(dev_priv, DKL_DP_MODE(tc_port), ln1);
+ intel_dkl_phy_write(dev_priv, DKL_DP_MODE(tc_port, 0), ln0);
+ intel_dkl_phy_write(dev_priv, DKL_DP_MODE(tc_port, 1), ln1);
} else {
intel_de_write(dev_priv, MG_DP_MODE(0, tc_port), ln0);
intel_de_write(dev_priv, MG_DP_MODE(1, tc_port), ln1);
@@ -3094,10 +3085,8 @@ static void adlp_tbt_to_dp_alt_switch_wa(struct intel_encoder *encoder)
enum tc_port tc_port = intel_port_to_tc(i915, encoder->port);
int ln;
- for (ln = 0; ln < 2; ln++) {
- intel_de_write(i915, HIP_INDEX_REG(tc_port), HIP_INDEX_VAL(tc_port, ln));
- intel_de_rmw(i915, DKL_PCS_DW5(tc_port), DKL_PCS_DW5_CORE_SOFTRESET, 0);
- }
+ for (ln = 0; ln < 2; ln++)
+ intel_dkl_phy_rmw(i915, DKL_PCS_DW5(tc_port, ln), DKL_PCS_DW5_CORE_SOFTRESET, 0);
}
static void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp,
@@ -3536,7 +3525,7 @@ static void icl_ddi_tc_get_clock(struct intel_encoder *encoder,
if (drm_WARN_ON(&i915->drm, !pll))
return;
- if (intel_get_shared_dpll_id(i915, pll) == DPLL_ID_ICL_TBTPLL)
+ if (pll->info->id == DPLL_ID_ICL_TBTPLL)
port_dpll_id = ICL_PORT_DPLL_DEFAULT;
else
port_dpll_id = ICL_PORT_DPLL_MG_PHY;
@@ -3549,7 +3538,7 @@ static void icl_ddi_tc_get_clock(struct intel_encoder *encoder,
icl_set_active_port_dpll(crtc_state, port_dpll_id);
- if (intel_get_shared_dpll_id(i915, crtc_state->shared_dpll) == DPLL_ID_ICL_TBTPLL)
+ if (crtc_state->shared_dpll->info->id == DPLL_ID_ICL_TBTPLL)
crtc_state->port_clock = icl_calc_tbt_pll_link(i915, encoder->port);
else
crtc_state->port_clock = intel_dpll_get_freq(i915, crtc_state->shared_dpll,
@@ -3591,7 +3580,7 @@ static void intel_ddi_sync_state(struct intel_encoder *encoder,
enum phy phy = intel_port_to_phy(i915, encoder->port);
if (intel_phy_is_tc(i915, phy))
- intel_tc_port_sanitize(enc_to_dig_port(encoder));
+ intel_tc_port_sanitize_mode(enc_to_dig_port(encoder));
if (crtc_state && intel_crtc_has_dp_encoder(crtc_state))
intel_dp_sync_state(encoder, crtc_state);
@@ -3801,11 +3790,17 @@ static void intel_ddi_encoder_destroy(struct drm_encoder *encoder)
static void intel_ddi_encoder_reset(struct drm_encoder *encoder)
{
+ struct drm_i915_private *i915 = to_i915(encoder->dev);
struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(encoder));
+ struct intel_digital_port *dig_port = enc_to_dig_port(to_intel_encoder(encoder));
+ enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
intel_dp->reset_link_params = true;
intel_pps_encoder_reset(intel_dp);
+
+ if (intel_phy_is_tc(i915, phy))
+ intel_tc_port_init_mode(dig_port);
}
static const struct drm_encoder_funcs intel_ddi_funcs = {
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index dd008ba8afe3..b9393f9fc764 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -831,13 +831,27 @@ intel_plane_fence_y_offset(const struct intel_plane_state *plane_state)
}
static int
+intel_display_commit_duplicated_state(struct intel_atomic_state *state,
+ struct drm_modeset_acquire_ctx *ctx)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ int ret;
+
+ ret = drm_atomic_helper_commit_duplicated_state(&state->base, ctx);
+
+ drm_WARN_ON(&i915->drm, ret == -EDEADLK);
+
+ return ret;
+}
+
+static int
__intel_display_resume(struct drm_i915_private *i915,
struct drm_atomic_state *state,
struct drm_modeset_acquire_ctx *ctx)
{
struct drm_crtc_state *crtc_state;
struct drm_crtc *crtc;
- int i, ret;
+ int i;
intel_modeset_setup_hw_state(i915, ctx);
intel_vga_redisable(i915);
@@ -863,11 +877,7 @@ __intel_display_resume(struct drm_i915_private *i915,
if (!HAS_GMCH(i915))
to_intel_atomic_state(state)->skip_intermediate_wm = true;
- ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
-
- drm_WARN_ON(&i915->drm, ret == -EDEADLK);
-
- return ret;
+ return intel_display_commit_duplicated_state(to_intel_atomic_state(state), ctx);
}
static bool gpu_reset_clobbers_display(struct drm_i915_private *dev_priv)
@@ -878,7 +888,6 @@ static bool gpu_reset_clobbers_display(struct drm_i915_private *dev_priv)
void intel_display_prepare_reset(struct drm_i915_private *dev_priv)
{
- struct drm_device *dev = &dev_priv->drm;
struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
struct drm_atomic_state *state;
int ret;
@@ -906,10 +915,10 @@ void intel_display_prepare_reset(struct drm_i915_private *dev_priv)
* Need mode_config.mutex so that we don't
* trample ongoing ->detect() and whatnot.
*/
- mutex_lock(&dev->mode_config.mutex);
+ mutex_lock(&dev_priv->drm.mode_config.mutex);
drm_modeset_acquire_init(ctx, 0);
while (1) {
- ret = drm_modeset_lock_all_ctx(dev, ctx);
+ ret = drm_modeset_lock_all_ctx(&dev_priv->drm, ctx);
if (ret != -EDEADLK)
break;
@@ -919,7 +928,7 @@ void intel_display_prepare_reset(struct drm_i915_private *dev_priv)
* Disabling the crtcs gracefully seems nicer. Also the
* g33 docs say we should at least disable all the planes.
*/
- state = drm_atomic_helper_duplicate_state(dev, ctx);
+ state = drm_atomic_helper_duplicate_state(&dev_priv->drm, ctx);
if (IS_ERR(state)) {
ret = PTR_ERR(state);
drm_err(&dev_priv->drm, "Duplicating state failed with %i\n",
@@ -927,7 +936,7 @@ void intel_display_prepare_reset(struct drm_i915_private *dev_priv)
return;
}
- ret = drm_atomic_helper_disable_all(dev, ctx);
+ ret = drm_atomic_helper_disable_all(&dev_priv->drm, ctx);
if (ret) {
drm_err(&dev_priv->drm, "Suspending crtc's failed with %i\n",
ret);
@@ -959,7 +968,7 @@ void intel_display_finish_reset(struct drm_i915_private *i915)
/* reset doesn't touch the display */
if (!gpu_reset_clobbers_display(i915)) {
/* for testing only restore the display */
- ret = __intel_display_resume(i915, state, ctx);
+ ret = intel_display_commit_duplicated_state(to_intel_atomic_state(state), ctx);
if (ret)
drm_err(&i915->drm,
"Restoring old state failed with %i\n", ret);
@@ -1252,8 +1261,6 @@ static void intel_post_plane_update(struct intel_atomic_state *state,
if (needs_cursorclk_wa(old_crtc_state) &&
!needs_cursorclk_wa(new_crtc_state))
icl_wa_cursorclkgating(dev_priv, pipe, false);
-
- intel_drrs_activate(new_crtc_state);
}
static void intel_crtc_enable_flip_done(struct intel_atomic_state *state,
@@ -4572,8 +4579,8 @@ static bool encoders_cloneable(const struct intel_encoder *a,
const struct intel_encoder *b)
{
/* masks could be asymmetric, so check both ways */
- return a == b || (a->cloneable & (1 << b->type) &&
- b->cloneable & (1 << a->type));
+ return a == b || (a->cloneable & BIT(b->type) &&
+ b->cloneable & BIT(a->type));
}
static bool check_single_encoder_cloning(struct intel_atomic_state *state,
@@ -4824,14 +4831,14 @@ static int intel_crtc_atomic_check(struct intel_atomic_state *state,
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
- bool mode_changed = intel_crtc_needs_modeset(crtc_state);
int ret;
if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv) &&
- mode_changed && !crtc_state->hw.active)
+ intel_crtc_needs_modeset(crtc_state) &&
+ !crtc_state->hw.active)
crtc_state->update_wm_post = true;
- if (mode_changed) {
+ if (intel_crtc_needs_modeset(crtc_state)) {
ret = intel_dpll_crtc_get_shared_dpll(state, crtc);
if (ret)
return ret;
@@ -4844,8 +4851,7 @@ static int intel_crtc_atomic_check(struct intel_atomic_state *state,
if (c8_planes_changed(crtc_state))
crtc_state->uapi.color_mgmt_changed = true;
- if (mode_changed || crtc_state->update_pipe ||
- crtc_state->uapi.color_mgmt_changed) {
+ if (intel_crtc_needs_color_update(crtc_state)) {
ret = intel_color_check(crtc_state);
if (ret)
return ret;
@@ -4871,7 +4877,8 @@ static int intel_crtc_atomic_check(struct intel_atomic_state *state,
}
if (DISPLAY_VER(dev_priv) >= 9) {
- if (mode_changed || crtc_state->update_pipe) {
+ if (intel_crtc_needs_modeset(crtc_state) ||
+ intel_crtc_needs_fastset(crtc_state)) {
ret = skl_update_scaler_crtc(crtc_state);
if (ret)
return ret;
@@ -5637,39 +5644,6 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_I(name.y2); \
} while (0)
-/* This is required for BDW+ where there is only one set of registers for
- * switching between high and low RR.
- * This macro can be used whenever a comparison has to be made between one
- * hw state and multiple sw state variables.
- */
-#define PIPE_CONF_CHECK_M_N_ALT(name, alt_name) do { \
- if (!intel_compare_link_m_n(&current_config->name, \
- &pipe_config->name) && \
- !intel_compare_link_m_n(&current_config->alt_name, \
- &pipe_config->name)) { \
- pipe_config_mismatch(fastset, crtc, __stringify(name), \
- "(expected tu %i data %i/%i link %i/%i, " \
- "or tu %i data %i/%i link %i/%i, " \
- "found tu %i, data %i/%i link %i/%i)", \
- current_config->name.tu, \
- current_config->name.data_m, \
- current_config->name.data_n, \
- current_config->name.link_m, \
- current_config->name.link_n, \
- current_config->alt_name.tu, \
- current_config->alt_name.data_m, \
- current_config->alt_name.data_n, \
- current_config->alt_name.link_m, \
- current_config->alt_name.link_n, \
- pipe_config->name.tu, \
- pipe_config->name.data_m, \
- pipe_config->name.data_n, \
- pipe_config->name.link_m, \
- pipe_config->name.link_n); \
- ret = false; \
- } \
-} while (0)
-
#define PIPE_CONF_CHECK_FLAGS(name, mask) do { \
if ((current_config->name ^ pipe_config->name) & (mask)) { \
pipe_config_mismatch(fastset, crtc, __stringify(name), \
@@ -5738,7 +5712,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
if (HAS_DOUBLE_BUFFERED_M_N(dev_priv)) {
if (!fastset || !pipe_config->seamless_m_n)
- PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2);
+ PIPE_CONF_CHECK_M_N(dp_m_n);
} else {
PIPE_CONF_CHECK_M_N(dp_m_n);
PIPE_CONF_CHECK_M_N(dp_m2_n2);
@@ -5815,7 +5789,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
bp_gamma = intel_color_get_gamma_bit_precision(pipe_config);
if (bp_gamma)
- PIPE_CONF_CHECK_COLOR_LUT(gamma_mode, hw.gamma_lut, bp_gamma);
+ PIPE_CONF_CHECK_COLOR_LUT(gamma_mode, post_csc_lut, bp_gamma);
if (current_config->active_planes) {
PIPE_CONF_CHECK_BOOL(has_psr);
@@ -5937,7 +5911,8 @@ intel_verify_planes(struct intel_atomic_state *state)
plane_state->uapi.visible);
}
-int intel_modeset_all_pipes(struct intel_atomic_state *state)
+int intel_modeset_all_pipes(struct intel_atomic_state *state,
+ const char *reason)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct intel_crtc *crtc;
@@ -5958,7 +5933,11 @@ int intel_modeset_all_pipes(struct intel_atomic_state *state)
drm_atomic_crtc_needs_modeset(&crtc_state->uapi))
continue;
+ drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] Full modeset due to %s\n",
+ crtc->base.base.id, crtc->base.name, reason);
+
crtc_state->uapi.mode_changed = true;
+ crtc_state->update_pipe = false;
ret = drm_atomic_add_affected_connectors(&state->base,
&crtc->base);
@@ -6134,7 +6113,8 @@ static void intel_crtc_check_fastset(const struct intel_crtc_state *old_crtc_sta
return;
new_crtc_state->uapi.mode_changed = false;
- new_crtc_state->update_pipe = true;
+ if (!intel_crtc_needs_modeset(new_crtc_state))
+ new_crtc_state->update_pipe = true;
}
static int intel_crtc_add_planes_to_state(struct intel_atomic_state *state,
@@ -6906,12 +6886,19 @@ static int intel_atomic_check(struct drm_device *dev,
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
new_crtc_state, i) {
+ intel_color_assert_luts(new_crtc_state);
+
ret = intel_async_flip_check_hw(state, crtc);
if (ret)
goto fail;
+ /* Either full modeset or fastset (or neither), never both */
+ drm_WARN_ON(&dev_priv->drm,
+ intel_crtc_needs_modeset(new_crtc_state) &&
+ intel_crtc_needs_fastset(new_crtc_state));
+
if (!intel_crtc_needs_modeset(new_crtc_state) &&
- !new_crtc_state->update_pipe)
+ !intel_crtc_needs_fastset(new_crtc_state))
continue;
intel_crtc_state_dump(new_crtc_state, state,
@@ -6947,12 +6934,8 @@ static int intel_atomic_prepare_commit(struct intel_atomic_state *state)
return ret;
for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
- bool mode_changed = intel_crtc_needs_modeset(crtc_state);
-
- if (mode_changed || crtc_state->update_pipe ||
- crtc_state->uapi.color_mgmt_changed) {
+ if (intel_crtc_needs_color_update(crtc_state))
intel_dsb_prepare(crtc_state);
- }
}
return 0;
@@ -7033,14 +7016,13 @@ static void commit_pipe_pre_planes(struct intel_atomic_state *state,
* CRTC was enabled.
*/
if (!modeset) {
- if (new_crtc_state->uapi.color_mgmt_changed ||
- new_crtc_state->update_pipe)
+ if (intel_crtc_needs_color_update(new_crtc_state))
intel_color_commit_arm(new_crtc_state);
if (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
bdw_set_pipemisc(new_crtc_state);
- if (new_crtc_state->update_pipe)
+ if (intel_crtc_needs_fastset(new_crtc_state))
intel_pipe_fastset(old_crtc_state, new_crtc_state);
}
@@ -7099,25 +7081,23 @@ static void intel_update_crtc(struct intel_atomic_state *state,
if (!modeset) {
if (new_crtc_state->preload_luts &&
- (new_crtc_state->uapi.color_mgmt_changed ||
- new_crtc_state->update_pipe))
+ intel_crtc_needs_color_update(new_crtc_state))
intel_color_load_luts(new_crtc_state);
intel_pre_plane_update(state, crtc);
- if (new_crtc_state->update_pipe)
+ if (intel_crtc_needs_fastset(new_crtc_state))
intel_encoders_update_pipe(state, crtc);
if (DISPLAY_VER(i915) >= 11 &&
- new_crtc_state->update_pipe)
+ intel_crtc_needs_fastset(new_crtc_state))
icl_set_pipe_chicken(new_crtc_state);
}
intel_fbc_update(state, crtc);
if (!modeset &&
- (new_crtc_state->uapi.color_mgmt_changed ||
- new_crtc_state->update_pipe))
+ intel_crtc_needs_color_update(new_crtc_state))
intel_color_commit_noarm(new_crtc_state);
intel_crtc_planes_update_noarm(state, crtc);
@@ -7139,7 +7119,7 @@ static void intel_update_crtc(struct intel_atomic_state *state,
* valid pipe configuration from the BIOS we need to take care
* of enabling them on the CRTC's first fastset.
*/
- if (new_crtc_state->update_pipe && !modeset &&
+ if (intel_crtc_needs_fastset(new_crtc_state) && !modeset &&
old_crtc_state->inherited)
intel_crtc_arm_fifo_underrun(crtc, new_crtc_state);
}
@@ -7162,9 +7142,7 @@ static void intel_old_crtc_state_disables(struct intel_atomic_state *state,
intel_fbc_disable(crtc);
intel_disable_shared_dpll(old_crtc_state);
- /* FIXME unify this for all platforms */
- if (!new_crtc_state->hw.active &&
- !HAS_GMCH(dev_priv))
+ if (!new_crtc_state->hw.active)
intel_initial_watermarks(state, crtc);
}
@@ -7499,9 +7477,8 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
new_crtc_state, i) {
if (intel_crtc_needs_modeset(new_crtc_state) ||
- new_crtc_state->update_pipe) {
+ intel_crtc_needs_fastset(new_crtc_state))
intel_modeset_get_crtc_power_domains(new_crtc_state, &put_domains[crtc->pipe]);
- }
}
intel_commit_modeset_disables(state);
@@ -7605,6 +7582,12 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
intel_modeset_verify_crtc(crtc, state, old_crtc_state, new_crtc_state);
/*
+ * Activate DRRS after state readout to avoid
+ * dp_m_n vs. dp_m2_n2 confusion on BDW+.
+ */
+ intel_drrs_activate(new_crtc_state);
+
+ /*
* DSB cleanup is done in cleanup_work aligning with framebuffer
* cleanup. So copy and reset the dsb structure to sync with
* commit_done and later do dsb cleanup in cleanup_work.
@@ -8130,6 +8113,17 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
drm_helper_move_panel_connectors_to_head(&dev_priv->drm);
}
+static int max_dotclock(struct drm_i915_private *i915)
+{
+ int max_dotclock = i915->max_dotclk_freq;
+
+ /* icl+ might use bigjoiner */
+ if (DISPLAY_VER(i915) >= 11)
+ max_dotclock *= 2;
+
+ return max_dotclock;
+}
+
static enum drm_mode_status
intel_mode_valid(struct drm_device *dev,
const struct drm_display_mode *mode)
@@ -8167,6 +8161,13 @@ intel_mode_valid(struct drm_device *dev,
DRM_MODE_FLAG_CLKDIV2))
return MODE_BAD;
+ /*
+ * Reject clearly excessive dotclocks early to
+ * avoid having to worry about huge integers later.
+ */
+ if (mode->clock > max_dotclock(dev_priv))
+ return MODE_CLOCK_HIGH;
+
/* Transcoder timing limits */
if (DISPLAY_VER(dev_priv) >= 11) {
hdisplay_max = 16384;
@@ -8326,6 +8327,7 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv)
if (!HAS_DISPLAY(dev_priv))
return;
+ intel_color_init_hooks(dev_priv);
intel_init_cdclk_hooks(dev_priv);
intel_audio_hooks_init(dev_priv);
@@ -8656,6 +8658,10 @@ int intel_modeset_init_noirq(struct drm_i915_private *i915)
if (ret)
goto cleanup_vga_client_pw_domain_dmc;
+ ret = intel_color_init(i915);
+ if (ret)
+ goto cleanup_vga_client_pw_domain_dmc;
+
ret = intel_dbuf_init(i915);
if (ret)
goto cleanup_vga_client_pw_domain_dmc;
diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h
index 884e8e67b17c..c803330a276d 100644
--- a/drivers/gpu/drm/i915/display/intel_display.h
+++ b/drivers/gpu/drm/i915/display/intel_display.h
@@ -469,10 +469,6 @@ enum hpd_pin {
list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \
for_each_if((intel_encoder)->base.crtc == (__crtc))
-#define for_each_connector_on_encoder(dev, __encoder, intel_connector) \
- list_for_each_entry((intel_connector), &(dev)->mode_config.connector_list, base.head) \
- for_each_if((intel_connector)->base.encoder == (__encoder))
-
#define for_each_old_intel_plane_in_state(__state, plane, old_plane_state, __i) \
for ((__i) = 0; \
(__i) < (__state)->base.dev->mode_config.num_total_plane && \
@@ -683,7 +679,8 @@ void intel_modeset_driver_remove(struct drm_i915_private *i915);
void intel_modeset_driver_remove_noirq(struct drm_i915_private *i915);
void intel_modeset_driver_remove_nogem(struct drm_i915_private *i915);
void intel_display_resume(struct drm_device *dev);
-int intel_modeset_all_pipes(struct intel_atomic_state *state);
+int intel_modeset_all_pipes(struct intel_atomic_state *state,
+ const char *reason);
void intel_modeset_get_crtc_power_domains(struct intel_crtc_state *crtc_state,
struct intel_power_domain_mask *old_domains);
void intel_modeset_put_crtc_power_domains(struct intel_crtc *crtc,
diff --git a/drivers/gpu/drm/i915/display/intel_display_core.h b/drivers/gpu/drm/i915/display/intel_display_core.h
index 96cf994b0ad1..337d8e08ba43 100644
--- a/drivers/gpu/drm/i915/display/intel_display_core.h
+++ b/drivers/gpu/drm/i915/display/intel_display_core.h
@@ -28,6 +28,7 @@
struct drm_i915_private;
struct drm_property;
+struct drm_property_blob;
struct i915_audio_component;
struct i915_hdcp_comp_master;
struct intel_atomic_state;
@@ -309,6 +310,10 @@ struct intel_display {
} cdclk;
struct {
+ struct drm_property_blob *glk_linear_degamma_lut;
+ } color;
+
+ struct {
/* The current hardware dbuf configuration */
u8 enabled_slices;
@@ -316,6 +321,14 @@ struct intel_display {
} dbuf;
struct {
+ /*
+ * dkl.phy_lock protects against concurrent access of the
+ * Dekel TypeC PHYs.
+ */
+ spinlock_t phy_lock;
+ } dkl;
+
+ struct {
/* VLV/CHV/BXT/GLK DSI MMIO register base address */
u32 mmio_base;
} dsi;
diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.c b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
index 7c7253a2541c..cfc056a05bbf 100644
--- a/drivers/gpu/drm/i915/display/intel_display_debugfs.c
+++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
@@ -22,6 +22,7 @@
#include "intel_fbdev.h"
#include "intel_hdcp.h"
#include "intel_hdmi.h"
+#include "intel_hotplug.h"
#include "intel_panel.h"
#include "intel_pm.h"
#include "intel_psr.h"
@@ -127,7 +128,6 @@ static int i915_vbt(struct seq_file *m, void *unused)
static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct drm_device *dev = &dev_priv->drm;
struct intel_framebuffer *fbdev_fb = NULL;
struct drm_framebuffer *drm_fb;
@@ -146,8 +146,8 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
}
#endif
- mutex_lock(&dev->mode_config.fb_lock);
- drm_for_each_fb(drm_fb, dev) {
+ mutex_lock(&dev_priv->drm.mode_config.fb_lock);
+ drm_for_each_fb(drm_fb, &dev_priv->drm) {
struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb);
if (fb == fbdev_fb)
continue;
@@ -162,7 +162,7 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
i915_debugfs_describe_obj(m, intel_fb_obj(&fb->base));
seq_putc(m, '\n');
}
- mutex_unlock(&dev->mode_config.fb_lock);
+ mutex_unlock(&dev_priv->drm.mode_config.fb_lock);
return 0;
}
@@ -897,7 +897,6 @@ static void intel_crtc_info(struct seq_file *m, struct intel_crtc *crtc)
static int i915_display_info(struct seq_file *m, void *unused)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct drm_device *dev = &dev_priv->drm;
struct intel_crtc *crtc;
struct drm_connector *connector;
struct drm_connector_list_iter conn_iter;
@@ -905,22 +904,22 @@ static int i915_display_info(struct seq_file *m, void *unused)
wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
- drm_modeset_lock_all(dev);
+ drm_modeset_lock_all(&dev_priv->drm);
seq_printf(m, "CRTC info\n");
seq_printf(m, "---------\n");
- for_each_intel_crtc(dev, crtc)
+ for_each_intel_crtc(&dev_priv->drm, crtc)
intel_crtc_info(m, crtc);
seq_printf(m, "\n");
seq_printf(m, "Connector info\n");
seq_printf(m, "--------------\n");
- drm_connector_list_iter_begin(dev, &conn_iter);
+ drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
drm_for_each_connector_iter(connector, &conn_iter)
intel_connector_info(m, connector);
drm_connector_list_iter_end(&conn_iter);
- drm_modeset_unlock_all(dev);
+ drm_modeset_unlock_all(&dev_priv->drm);
intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
@@ -930,10 +929,9 @@ static int i915_display_info(struct seq_file *m, void *unused)
static int i915_shared_dplls_info(struct seq_file *m, void *unused)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct drm_device *dev = &dev_priv->drm;
int i;
- drm_modeset_lock_all(dev);
+ drm_modeset_lock_all(&dev_priv->drm);
seq_printf(m, "PLL refclks: non-SSC: %d kHz, SSC: %d kHz\n",
dev_priv->display.dpll.ref_clks.nssc,
@@ -978,7 +976,7 @@ static int i915_shared_dplls_info(struct seq_file *m, void *unused)
seq_printf(m, " mg_pll_tdc_coldst_bias: 0x%08x\n",
pll->state.hw_state.mg_pll_tdc_coldst_bias);
}
- drm_modeset_unlock_all(dev);
+ drm_modeset_unlock_all(&dev_priv->drm);
return 0;
}
@@ -986,14 +984,13 @@ static int i915_shared_dplls_info(struct seq_file *m, void *unused)
static int i915_ddb_info(struct seq_file *m, void *unused)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct drm_device *dev = &dev_priv->drm;
struct skl_ddb_entry *entry;
struct intel_crtc *crtc;
if (DISPLAY_VER(dev_priv) < 9)
return -ENODEV;
- drm_modeset_lock_all(dev);
+ drm_modeset_lock_all(&dev_priv->drm);
seq_printf(m, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size");
@@ -1017,53 +1014,7 @@ static int i915_ddb_info(struct seq_file *m, void *unused)
entry->end, skl_ddb_entry_size(entry));
}
- drm_modeset_unlock_all(dev);
-
- return 0;
-}
-
-static int i915_drrs_status(struct seq_file *m, void *unused)
-{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct drm_connector_list_iter conn_iter;
- struct intel_connector *connector;
- struct intel_crtc *crtc;
-
- drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
- for_each_intel_connector_iter(connector, &conn_iter) {
- seq_printf(m, "[CONNECTOR:%d:%s] DRRS type: %s\n",
- connector->base.base.id, connector->base.name,
- intel_drrs_type_str(intel_panel_drrs_type(connector)));
- }
- drm_connector_list_iter_end(&conn_iter);
-
- seq_puts(m, "\n");
-
- for_each_intel_crtc(&dev_priv->drm, crtc) {
- const struct intel_crtc_state *crtc_state =
- to_intel_crtc_state(crtc->base.state);
-
- seq_printf(m, "[CRTC:%d:%s]:\n",
- crtc->base.base.id, crtc->base.name);
-
- mutex_lock(&crtc->drrs.mutex);
-
- /* DRRS Supported */
- seq_printf(m, "\tDRRS Enabled: %s\n",
- str_yes_no(crtc_state->has_drrs));
-
- seq_printf(m, "\tDRRS Active: %s\n",
- str_yes_no(intel_drrs_is_active(crtc)));
-
- seq_printf(m, "\tBusy_frontbuffer_bits: 0x%X\n",
- crtc->drrs.busy_frontbuffer_bits);
-
- seq_printf(m, "\tDRRS refresh rate: %s\n",
- crtc->drrs.refresh_rate == DRRS_REFRESH_RATE_LOW ?
- "low" : "high");
-
- mutex_unlock(&crtc->drrs.mutex);
- }
+ drm_modeset_unlock_all(&dev_priv->drm);
return 0;
}
@@ -1107,13 +1058,12 @@ static int i915_lpsp_status(struct seq_file *m, void *unused)
static int i915_dp_mst_info(struct seq_file *m, void *unused)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct drm_device *dev = &dev_priv->drm;
struct intel_encoder *intel_encoder;
struct intel_digital_port *dig_port;
struct drm_connector *connector;
struct drm_connector_list_iter conn_iter;
- drm_connector_list_iter_begin(dev, &conn_iter);
+ drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
drm_for_each_connector_iter(connector, &conn_iter) {
if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
continue;
@@ -1200,12 +1150,11 @@ static ssize_t i915_displayport_test_active_write(struct file *file,
static int i915_displayport_test_active_show(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = m->private;
- struct drm_device *dev = &dev_priv->drm;
struct drm_connector *connector;
struct drm_connector_list_iter conn_iter;
struct intel_dp *intel_dp;
- drm_connector_list_iter_begin(dev, &conn_iter);
+ drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
drm_for_each_connector_iter(connector, &conn_iter) {
struct intel_encoder *encoder;
@@ -1250,12 +1199,11 @@ static const struct file_operations i915_displayport_test_active_fops = {
static int i915_displayport_test_data_show(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = m->private;
- struct drm_device *dev = &dev_priv->drm;
struct drm_connector *connector;
struct drm_connector_list_iter conn_iter;
struct intel_dp *intel_dp;
- drm_connector_list_iter_begin(dev, &conn_iter);
+ drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
drm_for_each_connector_iter(connector, &conn_iter) {
struct intel_encoder *encoder;
@@ -1304,12 +1252,11 @@ DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_data);
static int i915_displayport_test_type_show(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = m->private;
- struct drm_device *dev = &dev_priv->drm;
struct drm_connector *connector;
struct drm_connector_list_iter conn_iter;
struct intel_dp *intel_dp;
- drm_connector_list_iter_begin(dev, &conn_iter);
+ drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
drm_for_each_connector_iter(connector, &conn_iter) {
struct intel_encoder *encoder;
@@ -1336,7 +1283,6 @@ DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_type);
static void wm_latency_show(struct seq_file *m, const u16 wm[8])
{
struct drm_i915_private *dev_priv = m->private;
- struct drm_device *dev = &dev_priv->drm;
int level;
int num_levels;
@@ -1349,7 +1295,7 @@ static void wm_latency_show(struct seq_file *m, const u16 wm[8])
else
num_levels = ilk_wm_max_level(dev_priv) + 1;
- drm_modeset_lock_all(dev);
+ drm_modeset_lock_all(&dev_priv->drm);
for (level = 0; level < num_levels; level++) {
unsigned int latency = wm[level];
@@ -1370,7 +1316,7 @@ static void wm_latency_show(struct seq_file *m, const u16 wm[8])
level, wm[level], latency / 10, latency % 10);
}
- drm_modeset_unlock_all(dev);
+ drm_modeset_unlock_all(&dev_priv->drm);
}
static int pri_wm_latency_show(struct seq_file *m, void *data)
@@ -1453,7 +1399,6 @@ static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
{
struct seq_file *m = file->private_data;
struct drm_i915_private *dev_priv = m->private;
- struct drm_device *dev = &dev_priv->drm;
u16 new[8] = { 0 };
int num_levels;
int level;
@@ -1483,12 +1428,12 @@ static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
if (ret != num_levels)
return -EINVAL;
- drm_modeset_lock_all(dev);
+ drm_modeset_lock_all(&dev_priv->drm);
for (level = 0; level < num_levels; level++)
wm[level] = new[level];
- drm_modeset_unlock_all(dev);
+ drm_modeset_unlock_all(&dev_priv->drm);
return len;
}
@@ -1566,209 +1511,6 @@ static const struct file_operations i915_cur_wm_latency_fops = {
.write = cur_wm_latency_write
};
-static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data)
-{
- struct drm_i915_private *dev_priv = m->private;
- struct intel_hotplug *hotplug = &dev_priv->display.hotplug;
-
- /* Synchronize with everything first in case there's been an HPD
- * storm, but we haven't finished handling it in the kernel yet
- */
- intel_synchronize_irq(dev_priv);
- flush_work(&dev_priv->display.hotplug.dig_port_work);
- flush_delayed_work(&dev_priv->display.hotplug.hotplug_work);
-
- seq_printf(m, "Threshold: %d\n", hotplug->hpd_storm_threshold);
- seq_printf(m, "Detected: %s\n",
- str_yes_no(delayed_work_pending(&hotplug->reenable_work)));
-
- return 0;
-}
-
-static ssize_t i915_hpd_storm_ctl_write(struct file *file,
- const char __user *ubuf, size_t len,
- loff_t *offp)
-{
- struct seq_file *m = file->private_data;
- struct drm_i915_private *dev_priv = m->private;
- struct intel_hotplug *hotplug = &dev_priv->display.hotplug;
- unsigned int new_threshold;
- int i;
- char *newline;
- char tmp[16];
-
- if (len >= sizeof(tmp))
- return -EINVAL;
-
- if (copy_from_user(tmp, ubuf, len))
- return -EFAULT;
-
- tmp[len] = '\0';
-
- /* Strip newline, if any */
- newline = strchr(tmp, '\n');
- if (newline)
- *newline = '\0';
-
- if (strcmp(tmp, "reset") == 0)
- new_threshold = HPD_STORM_DEFAULT_THRESHOLD;
- else if (kstrtouint(tmp, 10, &new_threshold) != 0)
- return -EINVAL;
-
- if (new_threshold > 0)
- drm_dbg_kms(&dev_priv->drm,
- "Setting HPD storm detection threshold to %d\n",
- new_threshold);
- else
- drm_dbg_kms(&dev_priv->drm, "Disabling HPD storm detection\n");
-
- spin_lock_irq(&dev_priv->irq_lock);
- hotplug->hpd_storm_threshold = new_threshold;
- /* Reset the HPD storm stats so we don't accidentally trigger a storm */
- for_each_hpd_pin(i)
- hotplug->stats[i].count = 0;
- spin_unlock_irq(&dev_priv->irq_lock);
-
- /* Re-enable hpd immediately if we were in an irq storm */
- flush_delayed_work(&dev_priv->display.hotplug.reenable_work);
-
- return len;
-}
-
-static int i915_hpd_storm_ctl_open(struct inode *inode, struct file *file)
-{
- return single_open(file, i915_hpd_storm_ctl_show, inode->i_private);
-}
-
-static const struct file_operations i915_hpd_storm_ctl_fops = {
- .owner = THIS_MODULE,
- .open = i915_hpd_storm_ctl_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .write = i915_hpd_storm_ctl_write
-};
-
-static int i915_hpd_short_storm_ctl_show(struct seq_file *m, void *data)
-{
- struct drm_i915_private *dev_priv = m->private;
-
- seq_printf(m, "Enabled: %s\n",
- str_yes_no(dev_priv->display.hotplug.hpd_short_storm_enabled));
-
- return 0;
-}
-
-static int
-i915_hpd_short_storm_ctl_open(struct inode *inode, struct file *file)
-{
- return single_open(file, i915_hpd_short_storm_ctl_show,
- inode->i_private);
-}
-
-static ssize_t i915_hpd_short_storm_ctl_write(struct file *file,
- const char __user *ubuf,
- size_t len, loff_t *offp)
-{
- struct seq_file *m = file->private_data;
- struct drm_i915_private *dev_priv = m->private;
- struct intel_hotplug *hotplug = &dev_priv->display.hotplug;
- char *newline;
- char tmp[16];
- int i;
- bool new_state;
-
- if (len >= sizeof(tmp))
- return -EINVAL;
-
- if (copy_from_user(tmp, ubuf, len))
- return -EFAULT;
-
- tmp[len] = '\0';
-
- /* Strip newline, if any */
- newline = strchr(tmp, '\n');
- if (newline)
- *newline = '\0';
-
- /* Reset to the "default" state for this system */
- if (strcmp(tmp, "reset") == 0)
- new_state = !HAS_DP_MST(dev_priv);
- else if (kstrtobool(tmp, &new_state) != 0)
- return -EINVAL;
-
- drm_dbg_kms(&dev_priv->drm, "%sabling HPD short storm detection\n",
- new_state ? "En" : "Dis");
-
- spin_lock_irq(&dev_priv->irq_lock);
- hotplug->hpd_short_storm_enabled = new_state;
- /* Reset the HPD storm stats so we don't accidentally trigger a storm */
- for_each_hpd_pin(i)
- hotplug->stats[i].count = 0;
- spin_unlock_irq(&dev_priv->irq_lock);
-
- /* Re-enable hpd immediately if we were in an irq storm */
- flush_delayed_work(&dev_priv->display.hotplug.reenable_work);
-
- return len;
-}
-
-static const struct file_operations i915_hpd_short_storm_ctl_fops = {
- .owner = THIS_MODULE,
- .open = i915_hpd_short_storm_ctl_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .write = i915_hpd_short_storm_ctl_write,
-};
-
-static int i915_drrs_ctl_set(void *data, u64 val)
-{
- struct drm_i915_private *dev_priv = data;
- struct drm_device *dev = &dev_priv->drm;
- struct intel_crtc *crtc;
-
- for_each_intel_crtc(dev, crtc) {
- struct intel_crtc_state *crtc_state;
- struct drm_crtc_commit *commit;
- int ret;
-
- ret = drm_modeset_lock_single_interruptible(&crtc->base.mutex);
- if (ret)
- return ret;
-
- crtc_state = to_intel_crtc_state(crtc->base.state);
-
- if (!crtc_state->hw.active ||
- !crtc_state->has_drrs)
- goto out;
-
- commit = crtc_state->uapi.commit;
- if (commit) {
- ret = wait_for_completion_interruptible(&commit->hw_done);
- if (ret)
- goto out;
- }
-
- drm_dbg(&dev_priv->drm,
- "Manually %sactivating DRRS\n", val ? "" : "de");
-
- if (val)
- intel_drrs_activate(crtc_state);
- else
- intel_drrs_deactivate(crtc_state);
-
-out:
- drm_modeset_unlock(&crtc->base.mutex);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
-DEFINE_SIMPLE_ATTRIBUTE(i915_drrs_ctl_fops, NULL, i915_drrs_ctl_set, "%llu\n");
-
static ssize_t
i915_fifo_underrun_reset_write(struct file *filp,
const char __user *ubuf,
@@ -1776,7 +1518,6 @@ i915_fifo_underrun_reset_write(struct file *filp,
{
struct drm_i915_private *dev_priv = filp->private_data;
struct intel_crtc *crtc;
- struct drm_device *dev = &dev_priv->drm;
int ret;
bool reset;
@@ -1787,7 +1528,7 @@ i915_fifo_underrun_reset_write(struct file *filp,
if (!reset)
return cnt;
- for_each_intel_crtc(dev, crtc) {
+ for_each_intel_crtc(&dev_priv->drm, crtc) {
struct drm_crtc_commit *commit;
struct intel_crtc_state *crtc_state;
@@ -1842,7 +1583,6 @@ static const struct drm_info_list intel_display_debugfs_list[] = {
{"i915_shared_dplls_info", i915_shared_dplls_info, 0},
{"i915_dp_mst_info", i915_dp_mst_info, 0},
{"i915_ddb_info", i915_ddb_info, 0},
- {"i915_drrs_status", i915_drrs_status, 0},
{"i915_lpsp_status", i915_lpsp_status, 0},
};
@@ -1857,9 +1597,6 @@ static const struct {
{"i915_dp_test_data", &i915_displayport_test_data_fops},
{"i915_dp_test_type", &i915_displayport_test_type_fops},
{"i915_dp_test_active", &i915_displayport_test_active_fops},
- {"i915_hpd_storm_ctl", &i915_hpd_storm_ctl_fops},
- {"i915_hpd_short_storm_ctl", &i915_hpd_short_storm_ctl_fops},
- {"i915_drrs_ctl", &i915_drrs_ctl_fops},
{"i915_edp_psr_debug", &i915_edp_psr_debug_fops},
};
@@ -1882,6 +1619,7 @@ void intel_display_debugfs_register(struct drm_i915_private *i915)
intel_dmc_debugfs_register(i915);
intel_fbc_debugfs_register(i915);
+ intel_hpd_debugfs_register(i915);
skl_watermark_ipc_debugfs_register(i915);
}
@@ -2195,6 +1933,8 @@ void intel_connector_debugfs_add(struct intel_connector *intel_connector)
if (!root)
return;
+ intel_drrs_connector_debugfs_add(intel_connector);
+
if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
debugfs_create_file("i915_panel_timings", S_IRUGO, root,
connector, &i915_panel_fops);
@@ -2247,6 +1987,7 @@ void intel_crtc_debugfs_add(struct drm_crtc *crtc)
return;
crtc_updates_add(crtc);
+ intel_drrs_crtc_debugfs_add(to_intel_crtc(crtc));
intel_fbc_crtc_debugfs_add(to_intel_crtc(crtc));
debugfs_create_file("i915_current_bpc", 0444, crtc->debugfs_entry, crtc,
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c
index 1e608b9e5055..4c1de91e56ff 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power.c
@@ -1148,10 +1148,9 @@ static void hsw_assert_cdclk(struct drm_i915_private *dev_priv)
static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
{
- struct drm_device *dev = &dev_priv->drm;
struct intel_crtc *crtc;
- for_each_intel_crtc(dev, crtc)
+ for_each_intel_crtc(&dev_priv->drm, crtc)
I915_STATE_WARN(crtc->active, "CRTC for pipe %c enabled\n",
pipe_name(crtc->pipe));
diff --git a/drivers/gpu/drm/i915/display/intel_display_power_well.c b/drivers/gpu/drm/i915/display/intel_display_power_well.c
index df7ee4969ef1..8710dd41ffd4 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power_well.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power_well.c
@@ -12,6 +12,8 @@
#include "intel_de.h"
#include "intel_display_power_well.h"
#include "intel_display_types.h"
+#include "intel_dkl_phy.h"
+#include "intel_dkl_phy_regs.h"
#include "intel_dmc.h"
#include "intel_dpio_phy.h"
#include "intel_dpll.h"
@@ -529,11 +531,9 @@ icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
enum tc_port tc_port;
tc_port = TGL_AUX_PW_TO_TC_PORT(i915_power_well_instance(power_well)->hsw.idx);
- intel_de_write(dev_priv, HIP_INDEX_REG(tc_port),
- HIP_INDEX_VAL(tc_port, 0x2));
- if (intel_de_wait_for_set(dev_priv, DKL_CMN_UC_DW_27(tc_port),
- DKL_CMN_UC_DW27_UC_HEALTH, 1))
+ if (wait_for(intel_dkl_phy_read(dev_priv, DKL_CMN_UC_DW_27(tc_port)) &
+ DKL_CMN_UC_DW27_UC_HEALTH, 1))
drm_warn(&dev_priv->drm,
"Timeout waiting TC uC health\n");
}
diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h
index 298d00a11f47..7f18c052ec16 100644
--- a/drivers/gpu/drm/i915/display/intel_display_types.h
+++ b/drivers/gpu/drm/i915/display/intel_display_types.h
@@ -1001,11 +1001,15 @@ struct intel_crtc_state {
*/
struct {
bool active, enable;
+ /* logical state of LUTs */
struct drm_property_blob *degamma_lut, *gamma_lut, *ctm;
struct drm_display_mode mode, pipe_mode, adjusted_mode;
enum drm_scaling_filter scaling_filter;
} hw;
+ /* actual state of LUTs */
+ struct drm_property_blob *pre_csc_lut, *post_csc_lut;
+
/**
* quirks - bitfield with hw state readout quirks
*
@@ -2040,15 +2044,16 @@ static inline bool
intel_crtc_has_type(const struct intel_crtc_state *crtc_state,
enum intel_output_type type)
{
- return crtc_state->output_types & (1 << type);
+ return crtc_state->output_types & BIT(type);
}
+
static inline bool
intel_crtc_has_dp_encoder(const struct intel_crtc_state *crtc_state)
{
return crtc_state->output_types &
- ((1 << INTEL_OUTPUT_DP) |
- (1 << INTEL_OUTPUT_DP_MST) |
- (1 << INTEL_OUTPUT_EDP));
+ (BIT(INTEL_OUTPUT_DP) |
+ BIT(INTEL_OUTPUT_DP_MST) |
+ BIT(INTEL_OUTPUT_EDP));
}
static inline bool
@@ -2057,6 +2062,20 @@ intel_crtc_needs_modeset(const struct intel_crtc_state *crtc_state)
return drm_atomic_crtc_needs_modeset(&crtc_state->uapi);
}
+static inline bool
+intel_crtc_needs_fastset(const struct intel_crtc_state *crtc_state)
+{
+ return crtc_state->update_pipe;
+}
+
+static inline bool
+intel_crtc_needs_color_update(const struct intel_crtc_state *crtc_state)
+{
+ return crtc_state->uapi.color_mgmt_changed ||
+ intel_crtc_needs_fastset(crtc_state) ||
+ intel_crtc_needs_modeset(crtc_state);
+}
+
static inline u32 intel_plane_ggtt_offset(const struct intel_plane_state *plane_state)
{
return i915_ggtt_offset(plane_state->ggtt_vma);
diff --git a/drivers/gpu/drm/i915/display/intel_dkl_phy.c b/drivers/gpu/drm/i915/display/intel_dkl_phy.c
new file mode 100644
index 000000000000..57cc3edba016
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_dkl_phy.c
@@ -0,0 +1,106 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#include "i915_drv.h"
+#include "i915_reg.h"
+
+#include "intel_de.h"
+#include "intel_display.h"
+#include "intel_dkl_phy.h"
+#include "intel_dkl_phy_regs.h"
+
+static void
+dkl_phy_set_hip_idx(struct drm_i915_private *i915, struct intel_dkl_phy_reg reg)
+{
+ enum tc_port tc_port = DKL_REG_TC_PORT(reg);
+
+ drm_WARN_ON(&i915->drm, tc_port < TC_PORT_1 || tc_port >= I915_MAX_TC_PORTS);
+
+ intel_de_write(i915,
+ HIP_INDEX_REG(tc_port),
+ HIP_INDEX_VAL(tc_port, reg.bank_idx));
+}
+
+/**
+ * intel_dkl_phy_read - read a Dekel PHY register
+ * @i915: i915 device instance
+ * @reg: Dekel PHY register
+ *
+ * Read the @reg Dekel PHY register.
+ *
+ * Returns the read value.
+ */
+u32
+intel_dkl_phy_read(struct drm_i915_private *i915, struct intel_dkl_phy_reg reg)
+{
+ u32 val;
+
+ spin_lock(&i915->display.dkl.phy_lock);
+
+ dkl_phy_set_hip_idx(i915, reg);
+ val = intel_de_read(i915, DKL_REG_MMIO(reg));
+
+ spin_unlock(&i915->display.dkl.phy_lock);
+
+ return val;
+}
+
+/**
+ * intel_dkl_phy_write - write a Dekel PHY register
+ * @i915: i915 device instance
+ * @reg: Dekel PHY register
+ * @val: value to write
+ *
+ * Write @val to the @reg Dekel PHY register.
+ */
+void
+intel_dkl_phy_write(struct drm_i915_private *i915, struct intel_dkl_phy_reg reg, u32 val)
+{
+ spin_lock(&i915->display.dkl.phy_lock);
+
+ dkl_phy_set_hip_idx(i915, reg);
+ intel_de_write(i915, DKL_REG_MMIO(reg), val);
+
+ spin_unlock(&i915->display.dkl.phy_lock);
+}
+
+/**
+ * intel_dkl_phy_rmw - read-modify-write a Dekel PHY register
+ * @i915: i915 device instance
+ * @reg: Dekel PHY register
+ * @clear: mask to clear
+ * @set: mask to set
+ *
+ * Read the @reg Dekel PHY register, clearing then setting the @clear/@set bits in it, and writing
+ * this value back to the register if the value differs from the read one.
+ */
+void
+intel_dkl_phy_rmw(struct drm_i915_private *i915, struct intel_dkl_phy_reg reg, u32 clear, u32 set)
+{
+ spin_lock(&i915->display.dkl.phy_lock);
+
+ dkl_phy_set_hip_idx(i915, reg);
+ intel_de_rmw(i915, DKL_REG_MMIO(reg), clear, set);
+
+ spin_unlock(&i915->display.dkl.phy_lock);
+}
+
+/**
+ * intel_dkl_phy_posting_read - do a posting read from a Dekel PHY register
+ * @i915: i915 device instance
+ * @reg: Dekel PHY register
+ *
+ * Read the @reg Dekel PHY register without returning the read value.
+ */
+void
+intel_dkl_phy_posting_read(struct drm_i915_private *i915, struct intel_dkl_phy_reg reg)
+{
+ spin_lock(&i915->display.dkl.phy_lock);
+
+ dkl_phy_set_hip_idx(i915, reg);
+ intel_de_posting_read(i915, DKL_REG_MMIO(reg));
+
+ spin_unlock(&i915->display.dkl.phy_lock);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_dkl_phy.h b/drivers/gpu/drm/i915/display/intel_dkl_phy.h
new file mode 100644
index 000000000000..570ee36f9386
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_dkl_phy.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef __INTEL_DKL_PHY_H__
+#define __INTEL_DKL_PHY_H__
+
+#include <linux/types.h>
+
+#include "intel_dkl_phy_regs.h"
+
+struct drm_i915_private;
+
+u32
+intel_dkl_phy_read(struct drm_i915_private *i915, struct intel_dkl_phy_reg reg);
+void
+intel_dkl_phy_write(struct drm_i915_private *i915, struct intel_dkl_phy_reg reg, u32 val);
+void
+intel_dkl_phy_rmw(struct drm_i915_private *i915, struct intel_dkl_phy_reg reg, u32 clear, u32 set);
+void
+intel_dkl_phy_posting_read(struct drm_i915_private *i915, struct intel_dkl_phy_reg reg);
+
+#endif /* __INTEL_DKL_PHY_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_dkl_phy_regs.h b/drivers/gpu/drm/i915/display/intel_dkl_phy_regs.h
new file mode 100644
index 000000000000..56085b32956d
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_dkl_phy_regs.h
@@ -0,0 +1,204 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef __INTEL_DKL_PHY_REGS__
+#define __INTEL_DKL_PHY_REGS__
+
+#include <linux/types.h>
+
+struct intel_dkl_phy_reg {
+ u32 reg:24;
+ u32 bank_idx:4;
+};
+
+#define _DKL_PHY1_BASE 0x168000
+#define _DKL_PHY2_BASE 0x169000
+#define _DKL_PHY3_BASE 0x16A000
+#define _DKL_PHY4_BASE 0x16B000
+#define _DKL_PHY5_BASE 0x16C000
+#define _DKL_PHY6_BASE 0x16D000
+
+#define DKL_REG_TC_PORT(__reg) \
+ (TC_PORT_1 + ((__reg).reg - _DKL_PHY1_BASE) / (_DKL_PHY2_BASE - _DKL_PHY1_BASE))
+
+/* DEKEL PHY MMIO Address = Phy base + (internal address & ~index_mask) */
+#define DKL_REG_MMIO(__reg) _MMIO((__reg).reg)
+
+#define _DKL_REG_PHY_BASE(tc_port) _PORT(tc_port, \
+ _DKL_PHY1_BASE, \
+ _DKL_PHY2_BASE)
+
+#define _DKL_BANK_SHIFT 12
+#define _DKL_REG_BANK_OFFSET(phy_offset) \
+ ((phy_offset) & ((1 << _DKL_BANK_SHIFT) - 1))
+#define _DKL_REG_BANK_IDX(phy_offset) \
+ (((phy_offset) >> _DKL_BANK_SHIFT) & 0xf)
+
+#define _DKL_REG(tc_port, phy_offset) \
+ ((const struct intel_dkl_phy_reg) { \
+ .reg = _DKL_REG_PHY_BASE(tc_port) + \
+ _DKL_REG_BANK_OFFSET(phy_offset), \
+ .bank_idx = _DKL_REG_BANK_IDX(phy_offset), \
+ })
+
+#define _DKL_REG_LN(tc_port, ln_idx, ln0_offs, ln1_offs) \
+ _DKL_REG(tc_port, (ln0_offs) + (ln_idx) * ((ln1_offs) - (ln0_offs)))
+
+#define _DKL_PCS_DW5_LN0 0x0014
+#define _DKL_PCS_DW5_LN1 0x1014
+#define DKL_PCS_DW5(tc_port, ln) _DKL_REG_LN(tc_port, ln, \
+ _DKL_PCS_DW5_LN0, \
+ _DKL_PCS_DW5_LN1)
+#define DKL_PCS_DW5_CORE_SOFTRESET REG_BIT(11)
+
+#define _DKL_PLL_DIV0 0x2200
+#define DKL_PLL_DIV0(tc_port) _DKL_REG(tc_port, \
+ _DKL_PLL_DIV0)
+#define DKL_PLL_DIV0_AFC_STARTUP_MASK REG_GENMASK(27, 25)
+#define DKL_PLL_DIV0_AFC_STARTUP(val) REG_FIELD_PREP(DKL_PLL_DIV0_AFC_STARTUP_MASK, (val))
+#define DKL_PLL_DIV0_INTEG_COEFF(x) ((x) << 16)
+#define DKL_PLL_DIV0_INTEG_COEFF_MASK (0x1F << 16)
+#define DKL_PLL_DIV0_PROP_COEFF(x) ((x) << 12)
+#define DKL_PLL_DIV0_PROP_COEFF_MASK (0xF << 12)
+#define DKL_PLL_DIV0_FBPREDIV_SHIFT (8)
+#define DKL_PLL_DIV0_FBPREDIV(x) ((x) << DKL_PLL_DIV0_FBPREDIV_SHIFT)
+#define DKL_PLL_DIV0_FBPREDIV_MASK (0xF << DKL_PLL_DIV0_FBPREDIV_SHIFT)
+#define DKL_PLL_DIV0_FBDIV_INT(x) ((x) << 0)
+#define DKL_PLL_DIV0_FBDIV_INT_MASK (0xFF << 0)
+#define DKL_PLL_DIV0_MASK (DKL_PLL_DIV0_INTEG_COEFF_MASK | \
+ DKL_PLL_DIV0_PROP_COEFF_MASK | \
+ DKL_PLL_DIV0_FBPREDIV_MASK | \
+ DKL_PLL_DIV0_FBDIV_INT_MASK)
+
+#define _DKL_PLL_DIV1 0x2204
+#define DKL_PLL_DIV1(tc_port) _DKL_REG(tc_port, \
+ _DKL_PLL_DIV1)
+#define DKL_PLL_DIV1_IREF_TRIM(x) ((x) << 16)
+#define DKL_PLL_DIV1_IREF_TRIM_MASK (0x1F << 16)
+#define DKL_PLL_DIV1_TDC_TARGET_CNT(x) ((x) << 0)
+#define DKL_PLL_DIV1_TDC_TARGET_CNT_MASK (0xFF << 0)
+
+#define _DKL_PLL_SSC 0x2210
+#define DKL_PLL_SSC(tc_port) _DKL_REG(tc_port, \
+ _DKL_PLL_SSC)
+#define DKL_PLL_SSC_IREF_NDIV_RATIO(x) ((x) << 29)
+#define DKL_PLL_SSC_IREF_NDIV_RATIO_MASK (0x7 << 29)
+#define DKL_PLL_SSC_STEP_LEN(x) ((x) << 16)
+#define DKL_PLL_SSC_STEP_LEN_MASK (0xFF << 16)
+#define DKL_PLL_SSC_STEP_NUM(x) ((x) << 11)
+#define DKL_PLL_SSC_STEP_NUM_MASK (0x7 << 11)
+#define DKL_PLL_SSC_EN (1 << 9)
+
+#define _DKL_PLL_BIAS 0x2214
+#define DKL_PLL_BIAS(tc_port) _DKL_REG(tc_port, \
+ _DKL_PLL_BIAS)
+#define DKL_PLL_BIAS_FRAC_EN_H (1 << 30)
+#define DKL_PLL_BIAS_FBDIV_SHIFT (8)
+#define DKL_PLL_BIAS_FBDIV_FRAC(x) ((x) << DKL_PLL_BIAS_FBDIV_SHIFT)
+#define DKL_PLL_BIAS_FBDIV_FRAC_MASK (0x3FFFFF << DKL_PLL_BIAS_FBDIV_SHIFT)
+
+#define _DKL_PLL_TDC_COLDST_BIAS 0x2218
+#define DKL_PLL_TDC_COLDST_BIAS(tc_port) _DKL_REG(tc_port, \
+ _DKL_PLL_TDC_COLDST_BIAS)
+#define DKL_PLL_TDC_SSC_STEP_SIZE(x) ((x) << 8)
+#define DKL_PLL_TDC_SSC_STEP_SIZE_MASK (0xFF << 8)
+#define DKL_PLL_TDC_FEED_FWD_GAIN(x) ((x) << 0)
+#define DKL_PLL_TDC_FEED_FWD_GAIN_MASK (0xFF << 0)
+
+#define _DKL_REFCLKIN_CTL 0x212C
+#define DKL_REFCLKIN_CTL(tc_port) _DKL_REG(tc_port, \
+ _DKL_REFCLKIN_CTL)
+/* Bits are the same as MG_REFCLKIN_CTL */
+
+#define _DKL_CLKTOP2_HSCLKCTL 0x20D4
+#define DKL_CLKTOP2_HSCLKCTL(rc_port) _DKL_REG(tc_port, \
+ _DKL_CLKTOP2_HSCLKCTL)
+/* Bits are the same as MG_CLKTOP2_HSCLKCTL */
+
+#define _DKL_CLKTOP2_CORECLKCTL1 0x20D8
+#define DKL_CLKTOP2_CORECLKCTL1(tc_port) _DKL_REG(tc_port, \
+ _DKL_CLKTOP2_CORECLKCTL1)
+/* Bits are the same as MG_CLKTOP2_CORECLKCTL1 */
+
+#define _DKL_TX_DPCNTL0_LN0 0x02C0
+#define _DKL_TX_DPCNTL0_LN1 0x12C0
+#define DKL_TX_DPCNTL0(tc_port, ln) _DKL_REG_LN(tc_port, ln, \
+ _DKL_TX_DPCNTL0_LN0, \
+ _DKL_TX_DPCNTL0_LN1)
+#define DKL_TX_PRESHOOT_COEFF(x) ((x) << 13)
+#define DKL_TX_PRESHOOT_COEFF_MASK (0x1f << 13)
+#define DKL_TX_DE_EMPHASIS_COEFF(x) ((x) << 8)
+#define DKL_TX_DE_EMPAHSIS_COEFF_MASK (0x1f << 8)
+#define DKL_TX_VSWING_CONTROL(x) ((x) << 0)
+#define DKL_TX_VSWING_CONTROL_MASK (0x7 << 0)
+
+#define _DKL_TX_DPCNTL1_LN0 0x02C4
+#define _DKL_TX_DPCNTL1_LN1 0x12C4
+#define DKL_TX_DPCNTL1(tc_port, ln) _DKL_REG_LN(tc_port, ln, \
+ _DKL_TX_DPCNTL1_LN0, \
+ _DKL_TX_DPCNTL1_LN1)
+/* Bits are the same as DKL_TX_DPCNTRL0 */
+
+#define _DKL_TX_DPCNTL2_LN0 0x02C8
+#define _DKL_TX_DPCNTL2_LN1 0x12C8
+#define DKL_TX_DPCNTL2(tc_port, ln) _DKL_REG_LN(tc_port, ln, \
+ _DKL_TX_DPCNTL2_LN0, \
+ _DKL_TX_DPCNTL2_LN1)
+#define DKL_TX_DP20BITMODE REG_BIT(2)
+#define DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX1_MASK REG_GENMASK(4, 3)
+#define DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX1(val) REG_FIELD_PREP(DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX1_MASK, (val))
+#define DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX2_MASK REG_GENMASK(6, 5)
+#define DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX2(val) REG_FIELD_PREP(DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX2_MASK, (val))
+
+#define _DKL_TX_FW_CALIB_LN0 0x02F8
+#define _DKL_TX_FW_CALIB_LN1 0x12F8
+#define DKL_TX_FW_CALIB(tc_port, ln) _DKL_REG_LN(tc_port, ln, \
+ _DKL_TX_FW_CALIB_LN0, \
+ _DKL_TX_FW_CALIB_LN1)
+#define DKL_TX_CFG_DISABLE_WAIT_INIT (1 << 7)
+
+#define _DKL_TX_PMD_LANE_SUS_LN0 0x0D00
+#define _DKL_TX_PMD_LANE_SUS_LN1 0x1D00
+#define DKL_TX_PMD_LANE_SUS(tc_port, ln) _DKL_REG_LN(tc_port, ln, \
+ _DKL_TX_PMD_LANE_SUS_LN0, \
+ _DKL_TX_PMD_LANE_SUS_LN1)
+
+#define _DKL_TX_DW17_LN0 0x0DC4
+#define _DKL_TX_DW17_LN1 0x1DC4
+#define DKL_TX_DW17(tc_port, ln) _DKL_REG_LN(tc_port, ln, \
+ _DKL_TX_DW17_LN0, \
+ _DKL_TX_DW17_LN1)
+
+#define _DKL_TX_DW18_LN0 0x0DC8
+#define _DKL_TX_DW18_LN1 0x1DC8
+#define DKL_TX_DW18(tc_port, ln) _DKL_REG_LN(tc_port, ln, \
+ _DKL_TX_DW18_LN0, \
+ _DKL_TX_DW18_LN1)
+
+#define _DKL_DP_MODE_LN0 0x00A0
+#define _DKL_DP_MODE_LN1 0x10A0
+#define DKL_DP_MODE(tc_port, ln) _DKL_REG_LN(tc_port, ln, \
+ _DKL_DP_MODE_LN0, \
+ _DKL_DP_MODE_LN1)
+
+#define _DKL_CMN_UC_DW27 0x236C
+#define DKL_CMN_UC_DW_27(tc_port) _DKL_REG(tc_port, \
+ _DKL_CMN_UC_DW27)
+#define DKL_CMN_UC_DW27_UC_HEALTH (0x1 << 15)
+
+/*
+ * Each Dekel PHY is addressed through a 4KB aperture. Each PHY has more than
+ * 4KB of register space, so a separate index is programmed in HIP_INDEX_REG0
+ * or HIP_INDEX_REG1, based on the port number, to set the upper 2 address
+ * bits that point the 4KB window into the full PHY register space.
+ */
+#define _HIP_INDEX_REG0 0x1010A0
+#define _HIP_INDEX_REG1 0x1010A4
+#define HIP_INDEX_REG(tc_port) _MMIO((tc_port) < 4 ? _HIP_INDEX_REG0 \
+ : _HIP_INDEX_REG1)
+#define _HIP_INDEX_SHIFT(tc_port) (8 * ((tc_port) % 4))
+#define HIP_INDEX_VAL(tc_port, val) ((val) << _HIP_INDEX_SHIFT(tc_port))
+
+#endif /* __INTEL_DKL_PHY_REGS__ */
diff --git a/drivers/gpu/drm/i915/display/intel_dmc.c b/drivers/gpu/drm/i915/display/intel_dmc.c
index e52ecc0738a6..081a4d0083b1 100644
--- a/drivers/gpu/drm/i915/display/intel_dmc.c
+++ b/drivers/gpu/drm/i915/display/intel_dmc.c
@@ -1065,12 +1065,13 @@ static int intel_dmc_debugfs_status_show(struct seq_file *m, void *unused)
seq_printf(m, "fw loaded: %s\n",
str_yes_no(intel_dmc_has_payload(i915)));
seq_printf(m, "path: %s\n", dmc->fw_path);
- seq_printf(m, "Pipe A fw support: %s\n",
+ seq_printf(m, "Pipe A fw needed: %s\n",
str_yes_no(GRAPHICS_VER(i915) >= 12));
seq_printf(m, "Pipe A fw loaded: %s\n",
str_yes_no(dmc->dmc_info[DMC_FW_PIPEA].payload));
- seq_printf(m, "Pipe B fw support: %s\n",
- str_yes_no(IS_ALDERLAKE_P(i915)));
+ seq_printf(m, "Pipe B fw needed: %s\n",
+ str_yes_no(IS_ALDERLAKE_P(i915) ||
+ DISPLAY_VER(i915) >= 14));
seq_printf(m, "Pipe B fw loaded: %s\n",
str_yes_no(dmc->dmc_info[DMC_FW_PIPEB].payload));
@@ -1081,22 +1082,19 @@ static int intel_dmc_debugfs_status_show(struct seq_file *m, void *unused)
DMC_VERSION_MINOR(dmc->version));
if (DISPLAY_VER(i915) >= 12) {
- if (IS_DGFX(i915)) {
+ i915_reg_t dc3co_reg;
+
+ if (IS_DGFX(i915) || DISPLAY_VER(i915) >= 14) {
+ dc3co_reg = DG1_DMC_DEBUG3;
dc5_reg = DG1_DMC_DEBUG_DC5_COUNT;
} else {
+ dc3co_reg = TGL_DMC_DEBUG3;
dc5_reg = TGL_DMC_DEBUG_DC5_COUNT;
dc6_reg = TGL_DMC_DEBUG_DC6_COUNT;
}
- /*
- * NOTE: DMC_DEBUG3 is a general purpose reg.
- * According to B.Specs:49196 DMC f/w reuses DC5/6 counter
- * reg for DC3CO debugging and validation,
- * but TGL DMC f/w is using DMC_DEBUG3 reg for DC3CO counter.
- */
seq_printf(m, "DC3CO count: %d\n",
- intel_de_read(i915, IS_DGFX(i915) ?
- DG1_DMC_DEBUG3 : TGL_DMC_DEBUG3));
+ intel_de_read(i915, dc3co_reg));
} else {
dc5_reg = IS_BROXTON(i915) ? BXT_DMC_DC3_DC5_COUNT :
SKL_DMC_DC3_DC5_COUNT;
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index c9be61d2348e..7400d6b4c587 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -2306,6 +2306,7 @@ bool intel_dp_initial_fastset_check(struct intel_encoder *encoder,
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ bool fastset = true;
/*
* If BIOS has set an unsupported or non-standard link rate for some
@@ -2313,9 +2314,10 @@ bool intel_dp_initial_fastset_check(struct intel_encoder *encoder,
*/
if (intel_dp_rate_index(intel_dp->source_rates, intel_dp->num_source_rates,
crtc_state->port_clock) < 0) {
- drm_dbg_kms(&i915->drm, "Forcing full modeset due to unsupported link rate\n");
+ drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] Forcing full modeset due to unsupported link rate\n",
+ encoder->base.base.id, encoder->base.name);
crtc_state->uapi.connectors_changed = true;
- return false;
+ fastset = false;
}
/*
@@ -2326,18 +2328,20 @@ bool intel_dp_initial_fastset_check(struct intel_encoder *encoder,
* Remove once we have readout for DSC.
*/
if (crtc_state->dsc.compression_enable) {
- drm_dbg_kms(&i915->drm, "Forcing full modeset due to DSC being enabled\n");
+ drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] Forcing full modeset due to DSC being enabled\n",
+ encoder->base.base.id, encoder->base.name);
crtc_state->uapi.mode_changed = true;
- return false;
+ fastset = false;
}
if (CAN_PSR(intel_dp)) {
- drm_dbg_kms(&i915->drm, "Forcing full modeset to compute PSR state\n");
+ drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] Forcing full modeset to compute PSR state\n",
+ encoder->base.base.id, encoder->base.name);
crtc_state->uapi.mode_changed = true;
- return false;
+ fastset = false;
}
- return true;
+ return fastset;
}
static void intel_dp_get_pcon_dsc_cap(struct intel_dp *intel_dp)
@@ -2686,7 +2690,6 @@ void intel_dp_configure_protocol_converter(struct intel_dp *intel_dp,
str_enable_disable(tmp));
}
-
bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp)
{
u8 dprx = 0;
@@ -3957,6 +3960,8 @@ intel_dp_handle_hdmi_link_status_change(struct intel_dp *intel_dp)
drm_dp_pcon_hdmi_frl_link_error_count(&intel_dp->aux, &intel_dp->attached_connector->base);
+ intel_dp->frl.is_trained = false;
+
/* Restart FRL training or fall back to TMDS mode */
intel_dp_check_frl_training(intel_dp);
}
@@ -5172,19 +5177,6 @@ intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connect
if (has_gamut_metadata_dip(dev_priv, port))
drm_connector_attach_hdr_output_metadata_property(connector);
- if (intel_dp_is_edp(intel_dp)) {
- u32 allowed_scalers;
-
- allowed_scalers = BIT(DRM_MODE_SCALE_ASPECT) | BIT(DRM_MODE_SCALE_FULLSCREEN);
- if (!HAS_GMCH(dev_priv))
- allowed_scalers |= BIT(DRM_MODE_SCALE_CENTER);
-
- drm_connector_attach_scaling_mode_property(connector, allowed_scalers);
-
- connector->state->scaling_mode = DRM_MODE_SCALE_ASPECT;
-
- }
-
if (HAS_VRR(dev_priv))
drm_connector_attach_vrr_capable_property(connector);
}
@@ -5197,8 +5189,7 @@ intel_edp_add_properties(struct intel_dp *intel_dp)
const struct drm_display_mode *fixed_mode =
intel_panel_preferred_fixed_mode(connector);
- if (!fixed_mode)
- return;
+ intel_attach_scaling_mode_property(&connector->base);
drm_connector_set_panel_orientation_with_quirk(&connector->base,
i915->display.vbt.orientation,
@@ -5206,16 +5197,43 @@ intel_edp_add_properties(struct intel_dp *intel_dp)
fixed_mode->vdisplay);
}
+static void intel_edp_backlight_setup(struct intel_dp *intel_dp,
+ struct intel_connector *connector)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ enum pipe pipe = INVALID_PIPE;
+
+ if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
+ /*
+ * Figure out the current pipe for the initial backlight setup.
+ * If the current pipe isn't valid, try the PPS pipe, and if that
+ * fails just assume pipe A.
+ */
+ pipe = vlv_active_pipe(intel_dp);
+
+ if (pipe != PIPE_A && pipe != PIPE_B)
+ pipe = intel_dp->pps.pps_pipe;
+
+ if (pipe != PIPE_A && pipe != PIPE_B)
+ pipe = PIPE_A;
+
+ drm_dbg_kms(&i915->drm,
+ "[CONNECTOR:%d:%s] using pipe %c for initial backlight setup\n",
+ connector->base.base.id, connector->base.name,
+ pipe_name(pipe));
+ }
+
+ intel_backlight_setup(connector, pipe);
+}
+
static bool intel_edp_init_connector(struct intel_dp *intel_dp,
struct intel_connector *intel_connector)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- struct drm_device *dev = &dev_priv->drm;
struct drm_connector *connector = &intel_connector->base;
struct drm_display_mode *fixed_mode;
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
bool has_dpcd;
- enum pipe pipe = INVALID_PIPE;
struct edid *edid;
if (!intel_dp_is_edp(intel_dp))
@@ -5228,7 +5246,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
* with an already powered-on LVDS power sequencer.
*/
if (intel_get_lvds_encoder(dev_priv)) {
- drm_WARN_ON(dev,
+ drm_WARN_ON(&dev_priv->drm,
!(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)));
drm_info(&dev_priv->drm,
"LVDS was detected, not registering eDP\n");
@@ -5244,11 +5262,12 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
if (!has_dpcd) {
/* if this fails, presume the device is a ghost */
drm_info(&dev_priv->drm,
- "failed to retrieve link info, disabling eDP\n");
+ "[ENCODER:%d:%s] failed to retrieve link info, disabling eDP\n",
+ encoder->base.base.id, encoder->base.name);
goto out_vdd_off;
}
- mutex_lock(&dev->mode_config.mutex);
+ mutex_lock(&dev_priv->drm.mode_config.mutex);
edid = drm_get_edid(connector, &intel_dp->aux.ddc);
if (!edid) {
/* Fallback to EDID from ACPI OpRegion, if any */
@@ -5273,9 +5292,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
intel_bios_init_panel(dev_priv, &intel_connector->panel,
encoder->devdata, IS_ERR(edid) ? NULL : edid);
- intel_panel_add_edid_fixed_modes(intel_connector,
- intel_connector->panel.vbt.drrs_type != DRRS_TYPE_NONE,
- intel_vrr_is_capable(intel_connector));
+ intel_panel_add_edid_fixed_modes(intel_connector, true);
/* MSO requires information from the EDID */
intel_edp_mso_init(intel_dp);
@@ -5288,30 +5305,18 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
if (!intel_panel_preferred_fixed_mode(intel_connector))
intel_panel_add_vbt_lfp_fixed_mode(intel_connector);
- mutex_unlock(&dev->mode_config.mutex);
+ mutex_unlock(&dev_priv->drm.mode_config.mutex);
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
- /*
- * Figure out the current pipe for the initial backlight setup.
- * If the current pipe isn't valid, try the PPS pipe, and if that
- * fails just assume pipe A.
- */
- pipe = vlv_active_pipe(intel_dp);
-
- if (pipe != PIPE_A && pipe != PIPE_B)
- pipe = intel_dp->pps.pps_pipe;
-
- if (pipe != PIPE_A && pipe != PIPE_B)
- pipe = PIPE_A;
-
- drm_dbg_kms(&dev_priv->drm,
- "using pipe %c for initial backlight setup\n",
- pipe_name(pipe));
+ if (!intel_panel_preferred_fixed_mode(intel_connector)) {
+ drm_info(&dev_priv->drm,
+ "[ENCODER:%d:%s] failed to find fixed mode for the panel, disabling eDP\n",
+ encoder->base.base.id, encoder->base.name);
+ goto out_vdd_off;
}
intel_panel_init(intel_connector);
- intel_backlight_setup(intel_connector, pipe);
+ intel_edp_backlight_setup(intel_dp, intel_connector);
intel_edp_add_properties(intel_dp);
@@ -5413,7 +5418,6 @@ intel_dp_init_connector(struct intel_digital_port *dig_port,
if (!HAS_GMCH(dev_priv))
connector->interlace_allowed = true;
- connector->doublescan_allowed = 0;
intel_connector->polled = DRM_CONNECTOR_POLL_HPD;
diff --git a/drivers/gpu/drm/i915/display/intel_dp_hdcp.c b/drivers/gpu/drm/i915/display/intel_dp_hdcp.c
index 88689124c013..35360dd543ac 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_hdcp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_hdcp.c
@@ -19,28 +19,20 @@
#include "intel_hdcp.h"
#include "intel_hdcp_regs.h"
-static unsigned int transcoder_to_stream_enc_status(enum transcoder cpu_transcoder)
+static u32 transcoder_to_stream_enc_status(enum transcoder cpu_transcoder)
{
- u32 stream_enc_mask;
-
switch (cpu_transcoder) {
case TRANSCODER_A:
- stream_enc_mask = HDCP_STATUS_STREAM_A_ENC;
- break;
+ return HDCP_STATUS_STREAM_A_ENC;
case TRANSCODER_B:
- stream_enc_mask = HDCP_STATUS_STREAM_B_ENC;
- break;
+ return HDCP_STATUS_STREAM_B_ENC;
case TRANSCODER_C:
- stream_enc_mask = HDCP_STATUS_STREAM_C_ENC;
- break;
+ return HDCP_STATUS_STREAM_C_ENC;
case TRANSCODER_D:
- stream_enc_mask = HDCP_STATUS_STREAM_D_ENC;
- break;
+ return HDCP_STATUS_STREAM_D_ENC;
default:
- stream_enc_mask = 0;
+ return 0;
}
-
- return stream_enc_mask;
}
static void intel_dp_hdcp_wait_for_cp_irq(struct intel_hdcp *hdcp, int timeout)
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c
index 03604a37931c..cd4e61026d98 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c
@@ -793,7 +793,35 @@ static bool intel_dp_mst_get_hw_state(struct intel_connector *connector)
return false;
}
-static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, const char *pathprop)
+static int intel_dp_mst_add_properties(struct intel_dp *intel_dp,
+ struct drm_connector *connector,
+ const char *pathprop)
+{
+ struct drm_i915_private *i915 = to_i915(connector->dev);
+
+ drm_object_attach_property(&connector->base,
+ i915->drm.mode_config.path_property, 0);
+ drm_object_attach_property(&connector->base,
+ i915->drm.mode_config.tile_property, 0);
+
+ intel_attach_force_audio_property(connector);
+ intel_attach_broadcast_rgb_property(connector);
+
+ /*
+ * Reuse the prop from the SST connector because we're
+ * not allowed to create new props after device registration.
+ */
+ connector->max_bpc_property =
+ intel_dp->attached_connector->base.max_bpc_property;
+ if (connector->max_bpc_property)
+ drm_connector_attach_max_bpc_property(connector, 6, 12);
+
+ return drm_connector_set_path_property(connector, pathprop);
+}
+
+static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_port *port,
+ const char *pathprop)
{
struct intel_dp *intel_dp = container_of(mgr, struct intel_dp, mst_mgr);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
@@ -833,28 +861,14 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo
goto err;
}
- drm_object_attach_property(&connector->base, dev->mode_config.path_property, 0);
- drm_object_attach_property(&connector->base, dev->mode_config.tile_property, 0);
-
- ret = drm_connector_set_path_property(connector, pathprop);
+ ret = intel_dp_mst_add_properties(intel_dp, connector, pathprop);
if (ret)
goto err;
- intel_attach_force_audio_property(connector);
- intel_attach_broadcast_rgb_property(connector);
-
ret = intel_dp_hdcp_init(dig_port, intel_connector);
if (ret)
drm_dbg_kms(&dev_priv->drm, "[%s:%d] HDCP MST init failed, skipping.\n",
connector->name, connector->base.id);
- /*
- * Reuse the prop from the SST connector because we're
- * not allowed to create new props after device registration.
- */
- connector->max_bpc_property =
- intel_dp->attached_connector->base.max_bpc_property;
- if (connector->max_bpc_property)
- drm_connector_attach_max_bpc_property(connector, 6, 12);
return connector;
diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
index e5fb66a5dd02..7c6c094a0a01 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
@@ -25,12 +25,14 @@
#include "intel_de.h"
#include "intel_display_types.h"
+#include "intel_dkl_phy.h"
+#include "intel_dkl_phy_regs.h"
#include "intel_dpio_phy.h"
#include "intel_dpll.h"
#include "intel_dpll_mgr.h"
+#include "intel_mg_phy_regs.h"
#include "intel_pch_refclk.h"
#include "intel_tc.h"
-#include "intel_tc_phy_regs.h"
/**
* DOC: Display PLLs
@@ -152,28 +154,6 @@ intel_get_shared_dpll_by_id(struct drm_i915_private *dev_priv,
return &dev_priv->display.dpll.shared_dplls[id];
}
-/**
- * intel_get_shared_dpll_id - get the id of a DPLL
- * @dev_priv: i915 device instance
- * @pll: the DPLL
- *
- * Returns:
- * The id of @pll
- */
-enum intel_dpll_id
-intel_get_shared_dpll_id(struct drm_i915_private *dev_priv,
- struct intel_shared_dpll *pll)
-{
- long pll_idx = pll - dev_priv->display.dpll.shared_dplls;
-
- if (drm_WARN_ON(&dev_priv->drm,
- pll_idx < 0 ||
- pll_idx >= dev_priv->display.dpll.num_shared_dpll))
- return -1;
-
- return pll_idx;
-}
-
/* For ILK+ */
void assert_shared_dpll(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll,
@@ -384,20 +364,30 @@ intel_reference_shared_dpll(struct intel_atomic_state *state,
if (shared_dpll[id].pipe_mask == 0)
shared_dpll[id].hw_state = *pll_state;
- drm_dbg(&i915->drm, "using %s for pipe %c\n", pll->info->name,
- pipe_name(crtc->pipe));
+ drm_WARN_ON(&i915->drm, (shared_dpll[id].pipe_mask & BIT(crtc->pipe)) != 0);
shared_dpll[id].pipe_mask |= BIT(crtc->pipe);
+
+ drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] reserving %s\n",
+ crtc->base.base.id, crtc->base.name, pll->info->name);
}
static void intel_unreference_shared_dpll(struct intel_atomic_state *state,
const struct intel_crtc *crtc,
const struct intel_shared_dpll *pll)
{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
struct intel_shared_dpll_state *shared_dpll;
+ const enum intel_dpll_id id = pll->info->id;
shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
- shared_dpll[pll->info->id].pipe_mask &= ~BIT(crtc->pipe);
+
+ drm_WARN_ON(&i915->drm, (shared_dpll[id].pipe_mask & BIT(crtc->pipe)) == 0);
+
+ shared_dpll[id].pipe_mask &= ~BIT(crtc->pipe);
+
+ drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] releasing %s\n",
+ crtc->base.base.id, crtc->base.name, pll->info->name);
}
static void intel_put_dpll(struct intel_atomic_state *state,
@@ -708,8 +698,6 @@ struct hsw_wrpll_rnp {
static unsigned hsw_wrpll_get_budget_for_freq(int clock)
{
- unsigned budget;
-
switch (clock) {
case 25175000:
case 25200000:
@@ -742,21 +730,18 @@ static unsigned hsw_wrpll_get_budget_for_freq(int clock)
case 222750000:
case 296703000:
case 297000000:
- budget = 0;
- break;
+ return 0;
case 233500000:
case 245250000:
case 247750000:
case 253250000:
case 298000000:
- budget = 1500;
- break;
+ return 1500;
case 169128000:
case 169500000:
case 179500000:
case 202000000:
- budget = 2000;
- break;
+ return 2000;
case 256250000:
case 262500000:
case 270000000:
@@ -766,18 +751,13 @@ static unsigned hsw_wrpll_get_budget_for_freq(int clock)
case 281250000:
case 286000000:
case 291750000:
- budget = 4000;
- break;
+ return 4000;
case 267250000:
case 268500000:
- budget = 5000;
- break;
+ return 5000;
default:
- budget = 1000;
- break;
+ return 1000;
}
-
- return budget;
}
static void hsw_wrpll_update_rnp(u64 freq2k, unsigned int budget,
@@ -3508,15 +3488,12 @@ static bool dkl_pll_get_hw_state(struct drm_i915_private *dev_priv,
* All registers read here have the same HIP_INDEX_REG even though
* they are on different building blocks
*/
- intel_de_write(dev_priv, HIP_INDEX_REG(tc_port),
- HIP_INDEX_VAL(tc_port, 0x2));
-
- hw_state->mg_refclkin_ctl = intel_de_read(dev_priv,
- DKL_REFCLKIN_CTL(tc_port));
+ hw_state->mg_refclkin_ctl = intel_dkl_phy_read(dev_priv,
+ DKL_REFCLKIN_CTL(tc_port));
hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
hw_state->mg_clktop2_hsclkctl =
- intel_de_read(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port));
+ intel_dkl_phy_read(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port));
hw_state->mg_clktop2_hsclkctl &=
MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
@@ -3524,32 +3501,32 @@ static bool dkl_pll_get_hw_state(struct drm_i915_private *dev_priv,
MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
hw_state->mg_clktop2_coreclkctl1 =
- intel_de_read(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port));
+ intel_dkl_phy_read(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port));
hw_state->mg_clktop2_coreclkctl1 &=
MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
- hw_state->mg_pll_div0 = intel_de_read(dev_priv, DKL_PLL_DIV0(tc_port));
+ hw_state->mg_pll_div0 = intel_dkl_phy_read(dev_priv, DKL_PLL_DIV0(tc_port));
val = DKL_PLL_DIV0_MASK;
if (dev_priv->display.vbt.override_afc_startup)
val |= DKL_PLL_DIV0_AFC_STARTUP_MASK;
hw_state->mg_pll_div0 &= val;
- hw_state->mg_pll_div1 = intel_de_read(dev_priv, DKL_PLL_DIV1(tc_port));
+ hw_state->mg_pll_div1 = intel_dkl_phy_read(dev_priv, DKL_PLL_DIV1(tc_port));
hw_state->mg_pll_div1 &= (DKL_PLL_DIV1_IREF_TRIM_MASK |
DKL_PLL_DIV1_TDC_TARGET_CNT_MASK);
- hw_state->mg_pll_ssc = intel_de_read(dev_priv, DKL_PLL_SSC(tc_port));
+ hw_state->mg_pll_ssc = intel_dkl_phy_read(dev_priv, DKL_PLL_SSC(tc_port));
hw_state->mg_pll_ssc &= (DKL_PLL_SSC_IREF_NDIV_RATIO_MASK |
DKL_PLL_SSC_STEP_LEN_MASK |
DKL_PLL_SSC_STEP_NUM_MASK |
DKL_PLL_SSC_EN);
- hw_state->mg_pll_bias = intel_de_read(dev_priv, DKL_PLL_BIAS(tc_port));
+ hw_state->mg_pll_bias = intel_dkl_phy_read(dev_priv, DKL_PLL_BIAS(tc_port));
hw_state->mg_pll_bias &= (DKL_PLL_BIAS_FRAC_EN_H |
DKL_PLL_BIAS_FBDIV_FRAC_MASK);
hw_state->mg_pll_tdc_coldst_bias =
- intel_de_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port));
+ intel_dkl_phy_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port));
hw_state->mg_pll_tdc_coldst_bias &= (DKL_PLL_TDC_SSC_STEP_SIZE_MASK |
DKL_PLL_TDC_FEED_FWD_GAIN_MASK);
@@ -3737,61 +3714,58 @@ static void dkl_pll_write(struct drm_i915_private *dev_priv,
* All registers programmed here have the same HIP_INDEX_REG even
* though on different building block
*/
- intel_de_write(dev_priv, HIP_INDEX_REG(tc_port),
- HIP_INDEX_VAL(tc_port, 0x2));
-
/* All the registers are RMW */
- val = intel_de_read(dev_priv, DKL_REFCLKIN_CTL(tc_port));
+ val = intel_dkl_phy_read(dev_priv, DKL_REFCLKIN_CTL(tc_port));
val &= ~MG_REFCLKIN_CTL_OD_2_MUX_MASK;
val |= hw_state->mg_refclkin_ctl;
- intel_de_write(dev_priv, DKL_REFCLKIN_CTL(tc_port), val);
+ intel_dkl_phy_write(dev_priv, DKL_REFCLKIN_CTL(tc_port), val);
- val = intel_de_read(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port));
+ val = intel_dkl_phy_read(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port));
val &= ~MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
val |= hw_state->mg_clktop2_coreclkctl1;
- intel_de_write(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port), val);
+ intel_dkl_phy_write(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port), val);
- val = intel_de_read(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port));
+ val = intel_dkl_phy_read(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port));
val &= ~(MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK);
val |= hw_state->mg_clktop2_hsclkctl;
- intel_de_write(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port), val);
+ intel_dkl_phy_write(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port), val);
val = DKL_PLL_DIV0_MASK;
if (dev_priv->display.vbt.override_afc_startup)
val |= DKL_PLL_DIV0_AFC_STARTUP_MASK;
- intel_de_rmw(dev_priv, DKL_PLL_DIV0(tc_port), val,
- hw_state->mg_pll_div0);
+ intel_dkl_phy_rmw(dev_priv, DKL_PLL_DIV0(tc_port), val,
+ hw_state->mg_pll_div0);
- val = intel_de_read(dev_priv, DKL_PLL_DIV1(tc_port));
+ val = intel_dkl_phy_read(dev_priv, DKL_PLL_DIV1(tc_port));
val &= ~(DKL_PLL_DIV1_IREF_TRIM_MASK |
DKL_PLL_DIV1_TDC_TARGET_CNT_MASK);
val |= hw_state->mg_pll_div1;
- intel_de_write(dev_priv, DKL_PLL_DIV1(tc_port), val);
+ intel_dkl_phy_write(dev_priv, DKL_PLL_DIV1(tc_port), val);
- val = intel_de_read(dev_priv, DKL_PLL_SSC(tc_port));
+ val = intel_dkl_phy_read(dev_priv, DKL_PLL_SSC(tc_port));
val &= ~(DKL_PLL_SSC_IREF_NDIV_RATIO_MASK |
DKL_PLL_SSC_STEP_LEN_MASK |
DKL_PLL_SSC_STEP_NUM_MASK |
DKL_PLL_SSC_EN);
val |= hw_state->mg_pll_ssc;
- intel_de_write(dev_priv, DKL_PLL_SSC(tc_port), val);
+ intel_dkl_phy_write(dev_priv, DKL_PLL_SSC(tc_port), val);
- val = intel_de_read(dev_priv, DKL_PLL_BIAS(tc_port));
+ val = intel_dkl_phy_read(dev_priv, DKL_PLL_BIAS(tc_port));
val &= ~(DKL_PLL_BIAS_FRAC_EN_H |
DKL_PLL_BIAS_FBDIV_FRAC_MASK);
val |= hw_state->mg_pll_bias;
- intel_de_write(dev_priv, DKL_PLL_BIAS(tc_port), val);
+ intel_dkl_phy_write(dev_priv, DKL_PLL_BIAS(tc_port), val);
- val = intel_de_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port));
+ val = intel_dkl_phy_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port));
val &= ~(DKL_PLL_TDC_SSC_STEP_SIZE_MASK |
DKL_PLL_TDC_FEED_FWD_GAIN_MASK);
val |= hw_state->mg_pll_tdc_coldst_bias;
- intel_de_write(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port), val);
+ intel_dkl_phy_write(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port), val);
- intel_de_posting_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port));
+ intel_dkl_phy_posting_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port));
}
static void icl_pll_power_enable(struct drm_i915_private *dev_priv,
@@ -4193,6 +4167,8 @@ void intel_shared_dpll_init(struct drm_i915_private *dev_priv)
const struct dpll_info *dpll_info;
int i;
+ mutex_init(&dev_priv->display.dpll.lock);
+
if (IS_DG2(dev_priv))
/* No shared DPLLs on DG2; port PLLs are part of the PHY */
dpll_mgr = NULL;
@@ -4237,7 +4213,6 @@ void intel_shared_dpll_init(struct drm_i915_private *dev_priv)
dev_priv->display.dpll.mgr = dpll_mgr;
dev_priv->display.dpll.num_shared_dpll = i;
- mutex_init(&dev_priv->display.dpll.lock);
}
/**
diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.h b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h
index 3247dc300ae4..3854f1b4299a 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.h
+++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h
@@ -328,9 +328,6 @@ struct intel_shared_dpll {
struct intel_shared_dpll *
intel_get_shared_dpll_by_id(struct drm_i915_private *dev_priv,
enum intel_dpll_id id);
-enum intel_dpll_id
-intel_get_shared_dpll_id(struct drm_i915_private *dev_priv,
- struct intel_shared_dpll *pll);
void assert_shared_dpll(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll,
bool state);
diff --git a/drivers/gpu/drm/i915/display/intel_drrs.c b/drivers/gpu/drm/i915/display/intel_drrs.c
index 7da4a9cbe4ba..e27408efaae2 100644
--- a/drivers/gpu/drm/i915/display/intel_drrs.c
+++ b/drivers/gpu/drm/i915/display/intel_drrs.c
@@ -284,16 +284,124 @@ void intel_drrs_flush(struct drm_i915_private *dev_priv,
}
/**
- * intel_crtc_drrs_init - Init DRRS for CRTC
+ * intel_drrs_crtc_init - Init DRRS for CRTC
* @crtc: crtc
*
* This function is called only once at driver load to initialize basic
* DRRS stuff.
*
*/
-void intel_crtc_drrs_init(struct intel_crtc *crtc)
+void intel_drrs_crtc_init(struct intel_crtc *crtc)
{
INIT_DELAYED_WORK(&crtc->drrs.work, intel_drrs_downclock_work);
mutex_init(&crtc->drrs.mutex);
crtc->drrs.cpu_transcoder = INVALID_TRANSCODER;
}
+
+static int intel_drrs_debugfs_status_show(struct seq_file *m, void *unused)
+{
+ struct intel_crtc *crtc = m->private;
+ const struct intel_crtc_state *crtc_state;
+ int ret;
+
+ ret = drm_modeset_lock_single_interruptible(&crtc->base.mutex);
+ if (ret)
+ return ret;
+
+ crtc_state = to_intel_crtc_state(crtc->base.state);
+
+ mutex_lock(&crtc->drrs.mutex);
+
+ seq_printf(m, "DRRS enabled: %s\n",
+ str_yes_no(crtc_state->has_drrs));
+
+ seq_printf(m, "DRRS active: %s\n",
+ str_yes_no(intel_drrs_is_active(crtc)));
+
+ seq_printf(m, "DRRS refresh rate: %s\n",
+ crtc->drrs.refresh_rate == DRRS_REFRESH_RATE_LOW ?
+ "low" : "high");
+
+ seq_printf(m, "DRRS busy frontbuffer bits: 0x%x\n",
+ crtc->drrs.busy_frontbuffer_bits);
+
+ mutex_unlock(&crtc->drrs.mutex);
+
+ drm_modeset_unlock(&crtc->base.mutex);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(intel_drrs_debugfs_status);
+
+static int intel_drrs_debugfs_ctl_set(void *data, u64 val)
+{
+ struct intel_crtc *crtc = data;
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_crtc_state *crtc_state;
+ struct drm_crtc_commit *commit;
+ int ret;
+
+ ret = drm_modeset_lock_single_interruptible(&crtc->base.mutex);
+ if (ret)
+ return ret;
+
+ crtc_state = to_intel_crtc_state(crtc->base.state);
+
+ if (!crtc_state->hw.active ||
+ !crtc_state->has_drrs)
+ goto out;
+
+ commit = crtc_state->uapi.commit;
+ if (commit) {
+ ret = wait_for_completion_interruptible(&commit->hw_done);
+ if (ret)
+ goto out;
+ }
+
+ drm_dbg(&i915->drm,
+ "Manually %sactivating DRRS\n", val ? "" : "de");
+
+ if (val)
+ intel_drrs_activate(crtc_state);
+ else
+ intel_drrs_deactivate(crtc_state);
+
+out:
+ drm_modeset_unlock(&crtc->base.mutex);
+
+ return ret;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(intel_drrs_debugfs_ctl_fops,
+ NULL, intel_drrs_debugfs_ctl_set, "%llu\n");
+
+void intel_drrs_crtc_debugfs_add(struct intel_crtc *crtc)
+{
+ debugfs_create_file("i915_drrs_status", 0444, crtc->base.debugfs_entry,
+ crtc, &intel_drrs_debugfs_status_fops);
+
+ debugfs_create_file("i915_drrs_ctl", 0644, crtc->base.debugfs_entry,
+ crtc, &intel_drrs_debugfs_ctl_fops);
+}
+
+static int intel_drrs_debugfs_type_show(struct seq_file *m, void *unused)
+{
+ struct intel_connector *connector = m->private;
+
+ seq_printf(m, "DRRS type: %s\n",
+ intel_drrs_type_str(intel_panel_drrs_type(connector)));
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(intel_drrs_debugfs_type);
+
+void intel_drrs_connector_debugfs_add(struct intel_connector *connector)
+{
+ if (intel_panel_drrs_type(connector) == DRRS_TYPE_NONE)
+ return;
+
+ debugfs_create_file("i915_drrs_type", 0444, connector->base.debugfs_entry,
+ connector, &intel_drrs_debugfs_type_fops);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_drrs.h b/drivers/gpu/drm/i915/display/intel_drrs.h
index 3ad1be1ad9c1..8ef5f93a80ff 100644
--- a/drivers/gpu/drm/i915/display/intel_drrs.h
+++ b/drivers/gpu/drm/i915/display/intel_drrs.h
@@ -23,6 +23,8 @@ void intel_drrs_invalidate(struct drm_i915_private *dev_priv,
unsigned int frontbuffer_bits);
void intel_drrs_flush(struct drm_i915_private *dev_priv,
unsigned int frontbuffer_bits);
-void intel_crtc_drrs_init(struct intel_crtc *crtc);
+void intel_drrs_crtc_init(struct intel_crtc *crtc);
+void intel_drrs_crtc_debugfs_add(struct intel_crtc *crtc);
+void intel_drrs_connector_debugfs_add(struct intel_connector *connector);
#endif /* __INTEL_DRRS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_dvo.c b/drivers/gpu/drm/i915/display/intel_dvo.c
index 5572e43026e4..595087288922 100644
--- a/drivers/gpu/drm/i915/display/intel_dvo.c
+++ b/drivers/gpu/drm/i915/display/intel_dvo.c
@@ -491,8 +491,8 @@ void intel_dvo_init(struct drm_i915_private *dev_priv)
intel_encoder->pipe_mask = ~0;
if (dvo->type != INTEL_DVO_CHIP_LVDS)
- intel_encoder->cloneable = (1 << INTEL_OUTPUT_ANALOG) |
- (1 << INTEL_OUTPUT_DVO);
+ intel_encoder->cloneable = BIT(INTEL_OUTPUT_ANALOG) |
+ BIT(INTEL_OUTPUT_DVO);
switch (dvo->type) {
case INTEL_DVO_CHIP_TMDS:
@@ -515,8 +515,6 @@ void intel_dvo_init(struct drm_i915_private *dev_priv)
drm_connector_helper_add(connector,
&intel_dvo_connector_helper_funcs);
connector->display_info.subpixel_order = SubPixelHorizontalRGB;
- connector->interlace_allowed = false;
- connector->doublescan_allowed = false;
intel_connector_attach_encoder(intel_connector, intel_encoder);
if (dvo->type == INTEL_DVO_CHIP_LVDS) {
diff --git a/drivers/gpu/drm/i915/display/intel_fb.c b/drivers/gpu/drm/i915/display/intel_fb.c
index eefa33c555ac..63137ae5ab21 100644
--- a/drivers/gpu/drm/i915/display/intel_fb.c
+++ b/drivers/gpu/drm/i915/display/intel_fb.c
@@ -301,6 +301,19 @@ static bool plane_caps_contain_all(u8 caps, u8 mask)
}
/**
+ * intel_fb_is_tiled_modifier: Check if a modifier is a tiled modifier type
+ * @modifier: Modifier to check
+ *
+ * Returns:
+ * Returns %true if @modifier is a tiled modifier.
+ */
+bool intel_fb_is_tiled_modifier(u64 modifier)
+{
+ return plane_caps_contain_any(lookup_modifier(modifier)->plane_caps,
+ INTEL_PLANE_CAP_TILING_MASK);
+}
+
+/**
* intel_fb_is_ccs_modifier: Check if a modifier is a CCS modifier type
* @modifier: Modifier to check
*
diff --git a/drivers/gpu/drm/i915/display/intel_fb.h b/drivers/gpu/drm/i915/display/intel_fb.h
index 12386f13a4e0..4662b812b934 100644
--- a/drivers/gpu/drm/i915/display/intel_fb.h
+++ b/drivers/gpu/drm/i915/display/intel_fb.h
@@ -29,6 +29,7 @@ struct intel_plane_state;
#define INTEL_PLANE_CAP_TILING_Yf BIT(5)
#define INTEL_PLANE_CAP_TILING_4 BIT(6)
+bool intel_fb_is_tiled_modifier(u64 modifier);
bool intel_fb_is_ccs_modifier(u64 modifier);
bool intel_fb_is_rc_ccs_cc_modifier(u64 modifier);
bool intel_fb_is_mc_ccs_modifier(u64 modifier);
diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c
index f38175304928..3f24f326b989 100644
--- a/drivers/gpu/drm/i915/display/intel_fbc.c
+++ b/drivers/gpu/drm/i915/display/intel_fbc.c
@@ -670,6 +670,7 @@ static void intel_fbc_nuke(struct intel_fbc *fbc)
{
struct drm_i915_private *i915 = fbc->i915;
+ lockdep_assert_held(&fbc->lock);
drm_WARN_ON(&i915->drm, fbc->flip_pending);
trace_intel_fbc_nuke(fbc->state.plane);
@@ -679,6 +680,8 @@ static void intel_fbc_nuke(struct intel_fbc *fbc)
static void intel_fbc_activate(struct intel_fbc *fbc)
{
+ lockdep_assert_held(&fbc->lock);
+
intel_fbc_hw_activate(fbc);
intel_fbc_nuke(fbc);
@@ -687,9 +690,7 @@ static void intel_fbc_activate(struct intel_fbc *fbc)
static void intel_fbc_deactivate(struct intel_fbc *fbc, const char *reason)
{
- struct drm_i915_private *i915 = fbc->i915;
-
- drm_WARN_ON(&i915->drm, !mutex_is_locked(&fbc->lock));
+ lockdep_assert_held(&fbc->lock);
if (fbc->active)
intel_fbc_hw_deactivate(fbc);
@@ -1009,7 +1010,8 @@ static bool intel_fbc_is_fence_ok(const struct intel_plane_state *plane_state)
{
struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
- /* The use of a CPU fence is one of two ways to detect writes by the
+ /*
+ * The use of a CPU fence is one of two ways to detect writes by the
* CPU to the scanout and trigger updates to the FBC.
*
* The other method is by software tracking (see
@@ -1019,12 +1021,6 @@ static bool intel_fbc_is_fence_ok(const struct intel_plane_state *plane_state)
* Note that is possible for a tiled surface to be unmappable (and
* so have no fence associated with it) due to aperture constraints
* at the time of pinning.
- *
- * FIXME with 90/270 degree rotation we should use the fence on
- * the normal GTT view (the rotated view doesn't even have a
- * fence). Would need changes to the FBC fence Y offset as well.
- * For now this will effectively disable FBC with 90/270 degree
- * rotation.
*/
return DISPLAY_VER(i915) >= 9 ||
(plane_state->flags & PLANE_HAS_FENCE &&
@@ -1227,6 +1223,8 @@ static bool __intel_fbc_pre_update(struct intel_atomic_state *state,
struct intel_fbc *fbc = plane->fbc;
bool need_vblank_wait = false;
+ lockdep_assert_held(&fbc->lock);
+
fbc->flip_pending = true;
if (intel_fbc_can_flip_nuke(state, crtc, plane))
@@ -1284,7 +1282,7 @@ static void __intel_fbc_disable(struct intel_fbc *fbc)
struct drm_i915_private *i915 = fbc->i915;
struct intel_plane *plane = fbc->state.plane;
- drm_WARN_ON(&i915->drm, !mutex_is_locked(&fbc->lock));
+ lockdep_assert_held(&fbc->lock);
drm_WARN_ON(&i915->drm, fbc->active);
drm_dbg_kms(&i915->drm, "Disabling FBC on [PLANE:%d:%s]\n",
@@ -1299,9 +1297,9 @@ static void __intel_fbc_disable(struct intel_fbc *fbc)
static void __intel_fbc_post_update(struct intel_fbc *fbc)
{
- struct drm_i915_private *i915 = fbc->i915;
+ lockdep_assert_held(&fbc->lock);
- drm_WARN_ON(&i915->drm, !mutex_is_locked(&fbc->lock));
+ fbc->flip_pending = false;
if (!fbc->busy_bits)
intel_fbc_activate(fbc);
@@ -1324,10 +1322,8 @@ void intel_fbc_post_update(struct intel_atomic_state *state,
mutex_lock(&fbc->lock);
- if (fbc->state.plane == plane) {
- fbc->flip_pending = false;
+ if (fbc->state.plane == plane)
__intel_fbc_post_update(fbc);
- }
mutex_unlock(&fbc->lock);
}
@@ -1437,6 +1433,8 @@ static void __intel_fbc_enable(struct intel_atomic_state *state,
intel_atomic_get_new_plane_state(state, plane);
struct intel_fbc *fbc = plane->fbc;
+ lockdep_assert_held(&fbc->lock);
+
if (fbc->state.plane) {
if (fbc->state.plane != plane)
return;
@@ -1522,7 +1520,8 @@ void intel_fbc_update(struct intel_atomic_state *state,
mutex_lock(&fbc->lock);
- if (crtc_state->update_pipe && plane_state->no_fbc_reason) {
+ if (intel_crtc_needs_fastset(crtc_state) &&
+ plane_state->no_fbc_reason) {
if (fbc->state.plane == plane)
__intel_fbc_disable(fbc);
} else {
diff --git a/drivers/gpu/drm/i915/display/intel_fbdev.c b/drivers/gpu/drm/i915/display/intel_fbdev.c
index 112aa0447a0d..ab385d18ddcc 100644
--- a/drivers/gpu/drm/i915/display/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/display/intel_fbdev.c
@@ -175,7 +175,7 @@ static int intelfb_alloc(struct drm_fb_helper *helper,
}
if (IS_ERR(obj)) {
- drm_err(&dev_priv->drm, "failed to allocate framebuffer\n");
+ drm_err(&dev_priv->drm, "failed to allocate framebuffer (%pe)\n", obj);
return PTR_ERR(obj);
}
@@ -256,7 +256,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
info = drm_fb_helper_alloc_fbi(helper);
if (IS_ERR(info)) {
- drm_err(&dev_priv->drm, "Failed to allocate fb_info\n");
+ drm_err(&dev_priv->drm, "Failed to allocate fb_info (%pe)\n", info);
ret = PTR_ERR(info);
goto out_unpin;
}
@@ -291,7 +291,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
vaddr = i915_vma_pin_iomap(vma);
if (IS_ERR(vaddr)) {
drm_err(&dev_priv->drm,
- "Failed to remap framebuffer into virtual memory\n");
+ "Failed to remap framebuffer into virtual memory (%pe)\n", vaddr);
ret = PTR_ERR(vaddr);
goto out_unpin;
}
diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c
index 7816b2a33fee..02f8374ea51f 100644
--- a/drivers/gpu/drm/i915/display/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/display/intel_hdmi.c
@@ -2355,7 +2355,7 @@ intel_hdmi_unset_edid(struct drm_connector *connector)
}
static void
-intel_hdmi_dp_dual_mode_detect(struct drm_connector *connector, bool has_edid)
+intel_hdmi_dp_dual_mode_detect(struct drm_connector *connector)
{
struct drm_i915_private *dev_priv = to_i915(connector->dev);
struct intel_hdmi *hdmi = intel_attached_hdmi(to_intel_connector(connector));
@@ -2371,16 +2371,10 @@ intel_hdmi_dp_dual_mode_detect(struct drm_connector *connector, bool has_edid)
* CONFIG1 pin, but no such luck on our hardware.
*
* The only method left to us is to check the VBT to see
- * if the port is a dual mode capable DP port. But let's
- * only do that when we sucesfully read the EDID, to avoid
- * confusing log messages about DP dual mode adaptors when
- * there's nothing connected to the port.
+ * if the port is a dual mode capable DP port.
*/
if (type == DRM_DP_DUAL_MODE_UNKNOWN) {
- /* An overridden EDID imply that we want this port for testing.
- * Make sure not to set limits for that port.
- */
- if (has_edid && !connector->override_edid &&
+ if (!connector->force &&
intel_bios_is_port_dp_dual_mode(dev_priv, port)) {
drm_dbg_kms(&dev_priv->drm,
"Assuming DP dual mode adaptor presence based on VBT\n");
@@ -2435,18 +2429,18 @@ intel_hdmi_set_edid(struct drm_connector *connector)
intel_gmbus_force_bit(i2c, false);
}
- intel_hdmi_dp_dual_mode_detect(connector, edid != NULL);
-
- intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS, wakeref);
-
to_intel_connector(connector)->detect_edid = edid;
if (edid && edid->input & DRM_EDID_INPUT_DIGITAL) {
intel_hdmi->has_audio = drm_detect_monitor_audio(edid);
intel_hdmi->has_hdmi_sink = drm_detect_hdmi_monitor(edid);
+ intel_hdmi_dp_dual_mode_detect(connector);
+
connected = true;
}
+ intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS, wakeref);
+
cec_notifier_set_phys_addr_from_edid(intel_hdmi->cec_notifier, edid);
return connected;
@@ -2956,9 +2950,8 @@ void intel_hdmi_init_connector(struct intel_digital_port *dig_port,
ddc);
drm_connector_helper_add(connector, &intel_hdmi_connector_helper_funcs);
- connector->interlace_allowed = 1;
- connector->doublescan_allowed = 0;
- connector->stereo_allowed = 1;
+ connector->interlace_allowed = true;
+ connector->stereo_allowed = true;
if (DISPLAY_VER(dev_priv) >= 10)
connector->ycbcr_420_allowed = true;
diff --git a/drivers/gpu/drm/i915/display/intel_hotplug.c b/drivers/gpu/drm/i915/display/intel_hotplug.c
index f7a2f485b177..907ab7526cb4 100644
--- a/drivers/gpu/drm/i915/display/intel_hotplug.c
+++ b/drivers/gpu/drm/i915/display/intel_hotplug.c
@@ -90,6 +90,9 @@ enum hpd_pin intel_hpd_pin_default(struct drm_i915_private *dev_priv,
return HPD_PORT_A + port - PORT_A;
}
+/* Threshold == 5 for long IRQs, 50 for short */
+#define HPD_STORM_DEFAULT_THRESHOLD 50
+
#define HPD_STORM_DETECT_PERIOD 1000
#define HPD_STORM_REENABLE_DELAY (2 * 60 * 1000)
#define HPD_RETRY_DELAY 1000
@@ -175,14 +178,13 @@ static bool intel_hpd_irq_storm_detect(struct drm_i915_private *dev_priv,
static void
intel_hpd_irq_storm_switch_to_polling(struct drm_i915_private *dev_priv)
{
- struct drm_device *dev = &dev_priv->drm;
struct drm_connector_list_iter conn_iter;
struct intel_connector *connector;
bool hpd_disabled = false;
lockdep_assert_held(&dev_priv->irq_lock);
- drm_connector_list_iter_begin(dev, &conn_iter);
+ drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
for_each_intel_connector_iter(connector, &conn_iter) {
enum hpd_pin pin;
@@ -208,7 +210,7 @@ intel_hpd_irq_storm_switch_to_polling(struct drm_i915_private *dev_priv)
/* Enable polling and queue hotplug re-enabling. */
if (hpd_disabled) {
- drm_kms_helper_poll_enable(dev);
+ drm_kms_helper_poll_enable(&dev_priv->drm);
mod_delayed_work(system_wq, &dev_priv->display.hotplug.reenable_work,
msecs_to_jiffies(HPD_STORM_REENABLE_DELAY));
}
@@ -219,7 +221,6 @@ static void intel_hpd_irq_storm_reenable_work(struct work_struct *work)
struct drm_i915_private *dev_priv =
container_of(work, typeof(*dev_priv),
display.hotplug.reenable_work.work);
- struct drm_device *dev = &dev_priv->drm;
struct drm_connector_list_iter conn_iter;
struct intel_connector *connector;
intel_wakeref_t wakeref;
@@ -229,7 +230,7 @@ static void intel_hpd_irq_storm_reenable_work(struct work_struct *work)
spin_lock_irq(&dev_priv->irq_lock);
- drm_connector_list_iter_begin(dev, &conn_iter);
+ drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
for_each_intel_connector_iter(connector, &conn_iter) {
pin = intel_connector_hpd_pin(connector);
if (pin == HPD_NONE ||
@@ -367,14 +368,13 @@ static void i915_hotplug_work_func(struct work_struct *work)
struct drm_i915_private *dev_priv =
container_of(work, struct drm_i915_private,
display.hotplug.hotplug_work.work);
- struct drm_device *dev = &dev_priv->drm;
struct drm_connector_list_iter conn_iter;
struct intel_connector *connector;
u32 changed = 0, retry = 0;
u32 hpd_event_bits;
u32 hpd_retry_bits;
- mutex_lock(&dev->mode_config.mutex);
+ mutex_lock(&dev_priv->drm.mode_config.mutex);
drm_dbg_kms(&dev_priv->drm, "running encoder hotplug functions\n");
spin_lock_irq(&dev_priv->irq_lock);
@@ -389,7 +389,7 @@ static void i915_hotplug_work_func(struct work_struct *work)
spin_unlock_irq(&dev_priv->irq_lock);
- drm_connector_list_iter_begin(dev, &conn_iter);
+ drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
for_each_intel_connector_iter(connector, &conn_iter) {
enum hpd_pin pin;
u32 hpd_bit;
@@ -426,10 +426,10 @@ static void i915_hotplug_work_func(struct work_struct *work)
}
}
drm_connector_list_iter_end(&conn_iter);
- mutex_unlock(&dev->mode_config.mutex);
+ mutex_unlock(&dev_priv->drm.mode_config.mutex);
if (changed)
- drm_kms_helper_hotplug_event(dev);
+ drm_kms_helper_hotplug_event(&dev_priv->drm);
/* Remove shared HPD pins that have changed */
retry &= ~changed;
@@ -612,16 +612,15 @@ static void i915_hpd_poll_init_work(struct work_struct *work)
struct drm_i915_private *dev_priv =
container_of(work, struct drm_i915_private,
display.hotplug.poll_init_work);
- struct drm_device *dev = &dev_priv->drm;
struct drm_connector_list_iter conn_iter;
struct intel_connector *connector;
bool enabled;
- mutex_lock(&dev->mode_config.mutex);
+ mutex_lock(&dev_priv->drm.mode_config.mutex);
enabled = READ_ONCE(dev_priv->display.hotplug.poll_enabled);
- drm_connector_list_iter_begin(dev, &conn_iter);
+ drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
for_each_intel_connector_iter(connector, &conn_iter) {
enum hpd_pin pin;
@@ -638,16 +637,16 @@ static void i915_hpd_poll_init_work(struct work_struct *work)
drm_connector_list_iter_end(&conn_iter);
if (enabled)
- drm_kms_helper_poll_enable(dev);
+ drm_kms_helper_poll_enable(&dev_priv->drm);
- mutex_unlock(&dev->mode_config.mutex);
+ mutex_unlock(&dev_priv->drm.mode_config.mutex);
/*
* We might have missed any hotplugs that happened while we were
* in the middle of disabling polling
*/
if (!enabled)
- drm_helper_hpd_irq_event(dev);
+ drm_helper_hpd_irq_event(&dev_priv->drm);
}
/**
@@ -711,14 +710,23 @@ void intel_hpd_poll_disable(struct drm_i915_private *dev_priv)
schedule_work(&dev_priv->display.hotplug.poll_init_work);
}
-void intel_hpd_init_work(struct drm_i915_private *dev_priv)
+void intel_hpd_init_early(struct drm_i915_private *i915)
{
- INIT_DELAYED_WORK(&dev_priv->display.hotplug.hotplug_work,
+ INIT_DELAYED_WORK(&i915->display.hotplug.hotplug_work,
i915_hotplug_work_func);
- INIT_WORK(&dev_priv->display.hotplug.dig_port_work, i915_digport_work_func);
- INIT_WORK(&dev_priv->display.hotplug.poll_init_work, i915_hpd_poll_init_work);
- INIT_DELAYED_WORK(&dev_priv->display.hotplug.reenable_work,
+ INIT_WORK(&i915->display.hotplug.dig_port_work, i915_digport_work_func);
+ INIT_WORK(&i915->display.hotplug.poll_init_work, i915_hpd_poll_init_work);
+ INIT_DELAYED_WORK(&i915->display.hotplug.reenable_work,
intel_hpd_irq_storm_reenable_work);
+
+ i915->display.hotplug.hpd_storm_threshold = HPD_STORM_DEFAULT_THRESHOLD;
+ /* If we have MST support, we want to avoid doing short HPD IRQ storm
+ * detection, as short HPD storms will occur as a natural part of
+ * sideband messaging with MST.
+ * On older platforms however, IRQ storms can occur with both long and
+ * short pulses, as seen on some G4x systems.
+ */
+ i915->display.hotplug.hpd_short_storm_enabled = !HAS_DP_MST(i915);
}
void intel_hpd_cancel_work(struct drm_i915_private *dev_priv)
@@ -767,3 +775,169 @@ void intel_hpd_enable(struct drm_i915_private *dev_priv, enum hpd_pin pin)
dev_priv->display.hotplug.stats[pin].state = HPD_ENABLED;
spin_unlock_irq(&dev_priv->irq_lock);
}
+
+static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data)
+{
+ struct drm_i915_private *dev_priv = m->private;
+ struct intel_hotplug *hotplug = &dev_priv->display.hotplug;
+
+ /* Synchronize with everything first in case there's been an HPD
+ * storm, but we haven't finished handling it in the kernel yet
+ */
+ intel_synchronize_irq(dev_priv);
+ flush_work(&dev_priv->display.hotplug.dig_port_work);
+ flush_delayed_work(&dev_priv->display.hotplug.hotplug_work);
+
+ seq_printf(m, "Threshold: %d\n", hotplug->hpd_storm_threshold);
+ seq_printf(m, "Detected: %s\n",
+ str_yes_no(delayed_work_pending(&hotplug->reenable_work)));
+
+ return 0;
+}
+
+static ssize_t i915_hpd_storm_ctl_write(struct file *file,
+ const char __user *ubuf, size_t len,
+ loff_t *offp)
+{
+ struct seq_file *m = file->private_data;
+ struct drm_i915_private *dev_priv = m->private;
+ struct intel_hotplug *hotplug = &dev_priv->display.hotplug;
+ unsigned int new_threshold;
+ int i;
+ char *newline;
+ char tmp[16];
+
+ if (len >= sizeof(tmp))
+ return -EINVAL;
+
+ if (copy_from_user(tmp, ubuf, len))
+ return -EFAULT;
+
+ tmp[len] = '\0';
+
+ /* Strip newline, if any */
+ newline = strchr(tmp, '\n');
+ if (newline)
+ *newline = '\0';
+
+ if (strcmp(tmp, "reset") == 0)
+ new_threshold = HPD_STORM_DEFAULT_THRESHOLD;
+ else if (kstrtouint(tmp, 10, &new_threshold) != 0)
+ return -EINVAL;
+
+ if (new_threshold > 0)
+ drm_dbg_kms(&dev_priv->drm,
+ "Setting HPD storm detection threshold to %d\n",
+ new_threshold);
+ else
+ drm_dbg_kms(&dev_priv->drm, "Disabling HPD storm detection\n");
+
+ spin_lock_irq(&dev_priv->irq_lock);
+ hotplug->hpd_storm_threshold = new_threshold;
+ /* Reset the HPD storm stats so we don't accidentally trigger a storm */
+ for_each_hpd_pin(i)
+ hotplug->stats[i].count = 0;
+ spin_unlock_irq(&dev_priv->irq_lock);
+
+ /* Re-enable hpd immediately if we were in an irq storm */
+ flush_delayed_work(&dev_priv->display.hotplug.reenable_work);
+
+ return len;
+}
+
+static int i915_hpd_storm_ctl_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, i915_hpd_storm_ctl_show, inode->i_private);
+}
+
+static const struct file_operations i915_hpd_storm_ctl_fops = {
+ .owner = THIS_MODULE,
+ .open = i915_hpd_storm_ctl_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = i915_hpd_storm_ctl_write
+};
+
+static int i915_hpd_short_storm_ctl_show(struct seq_file *m, void *data)
+{
+ struct drm_i915_private *dev_priv = m->private;
+
+ seq_printf(m, "Enabled: %s\n",
+ str_yes_no(dev_priv->display.hotplug.hpd_short_storm_enabled));
+
+ return 0;
+}
+
+static int
+i915_hpd_short_storm_ctl_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, i915_hpd_short_storm_ctl_show,
+ inode->i_private);
+}
+
+static ssize_t i915_hpd_short_storm_ctl_write(struct file *file,
+ const char __user *ubuf,
+ size_t len, loff_t *offp)
+{
+ struct seq_file *m = file->private_data;
+ struct drm_i915_private *dev_priv = m->private;
+ struct intel_hotplug *hotplug = &dev_priv->display.hotplug;
+ char *newline;
+ char tmp[16];
+ int i;
+ bool new_state;
+
+ if (len >= sizeof(tmp))
+ return -EINVAL;
+
+ if (copy_from_user(tmp, ubuf, len))
+ return -EFAULT;
+
+ tmp[len] = '\0';
+
+ /* Strip newline, if any */
+ newline = strchr(tmp, '\n');
+ if (newline)
+ *newline = '\0';
+
+ /* Reset to the "default" state for this system */
+ if (strcmp(tmp, "reset") == 0)
+ new_state = !HAS_DP_MST(dev_priv);
+ else if (kstrtobool(tmp, &new_state) != 0)
+ return -EINVAL;
+
+ drm_dbg_kms(&dev_priv->drm, "%sabling HPD short storm detection\n",
+ new_state ? "En" : "Dis");
+
+ spin_lock_irq(&dev_priv->irq_lock);
+ hotplug->hpd_short_storm_enabled = new_state;
+ /* Reset the HPD storm stats so we don't accidentally trigger a storm */
+ for_each_hpd_pin(i)
+ hotplug->stats[i].count = 0;
+ spin_unlock_irq(&dev_priv->irq_lock);
+
+ /* Re-enable hpd immediately if we were in an irq storm */
+ flush_delayed_work(&dev_priv->display.hotplug.reenable_work);
+
+ return len;
+}
+
+static const struct file_operations i915_hpd_short_storm_ctl_fops = {
+ .owner = THIS_MODULE,
+ .open = i915_hpd_short_storm_ctl_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = i915_hpd_short_storm_ctl_write,
+};
+
+void intel_hpd_debugfs_register(struct drm_i915_private *i915)
+{
+ struct drm_minor *minor = i915->drm.primary;
+
+ debugfs_create_file("i915_hpd_storm_ctl", 0644, minor->debugfs_root,
+ i915, &i915_hpd_storm_ctl_fops);
+ debugfs_create_file("i915_hpd_short_storm_ctl", 0644, minor->debugfs_root,
+ i915, &i915_hpd_short_storm_ctl_fops);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_hotplug.h b/drivers/gpu/drm/i915/display/intel_hotplug.h
index b87e95d606e6..424ae5dbf5a0 100644
--- a/drivers/gpu/drm/i915/display/intel_hotplug.h
+++ b/drivers/gpu/drm/i915/display/intel_hotplug.h
@@ -22,11 +22,12 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
u32 pin_mask, u32 long_mask);
void intel_hpd_trigger_irq(struct intel_digital_port *dig_port);
void intel_hpd_init(struct drm_i915_private *dev_priv);
-void intel_hpd_init_work(struct drm_i915_private *dev_priv);
+void intel_hpd_init_early(struct drm_i915_private *i915);
void intel_hpd_cancel_work(struct drm_i915_private *dev_priv);
enum hpd_pin intel_hpd_pin_default(struct drm_i915_private *dev_priv,
enum port port);
bool intel_hpd_disable(struct drm_i915_private *dev_priv, enum hpd_pin pin);
void intel_hpd_enable(struct drm_i915_private *dev_priv, enum hpd_pin pin);
+void intel_hpd_debugfs_register(struct drm_i915_private *i915);
#endif /* __INTEL_HOTPLUG_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_lpe_audio.c b/drivers/gpu/drm/i915/display/intel_lpe_audio.c
index 389ccdc46a1e..22ca8754ea96 100644
--- a/drivers/gpu/drm/i915/display/intel_lpe_audio.c
+++ b/drivers/gpu/drm/i915/display/intel_lpe_audio.c
@@ -80,8 +80,7 @@
static struct platform_device *
lpe_audio_platdev_create(struct drm_i915_private *dev_priv)
{
- struct drm_device *dev = &dev_priv->drm;
- struct pci_dev *pdev = to_pci_dev(dev->dev);
+ struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
struct platform_device_info pinfo = {};
struct resource *rsc;
struct platform_device *platdev;
@@ -108,7 +107,7 @@ lpe_audio_platdev_create(struct drm_i915_private *dev_priv)
rsc[1].flags = IORESOURCE_MEM;
rsc[1].name = "hdmi-lpe-audio-mmio";
- pinfo.parent = dev->dev;
+ pinfo.parent = dev_priv->drm.dev;
pinfo.name = "hdmi-lpe-audio";
pinfo.id = -1;
pinfo.res = rsc;
diff --git a/drivers/gpu/drm/i915/display/intel_lvds.c b/drivers/gpu/drm/i915/display/intel_lvds.c
index 9aa38e8141b5..246787bbf5ef 100644
--- a/drivers/gpu/drm/i915/display/intel_lvds.c
+++ b/drivers/gpu/drm/i915/display/intel_lvds.c
@@ -78,9 +78,9 @@ struct intel_lvds_encoder {
struct intel_connector *attached_connector;
};
-static struct intel_lvds_encoder *to_lvds_encoder(struct drm_encoder *encoder)
+static struct intel_lvds_encoder *to_lvds_encoder(struct intel_encoder *encoder)
{
- return container_of(encoder, struct intel_lvds_encoder, base.base);
+ return container_of(encoder, struct intel_lvds_encoder, base);
}
bool intel_lvds_port_enabled(struct drm_i915_private *dev_priv,
@@ -103,7 +103,7 @@ static bool intel_lvds_get_hw_state(struct intel_encoder *encoder,
enum pipe *pipe)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
+ struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(encoder);
intel_wakeref_t wakeref;
bool ret;
@@ -123,7 +123,7 @@ static void intel_lvds_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
+ struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(encoder);
u32 tmp, flags = 0;
pipe_config->output_types |= BIT(INTEL_OUTPUT_LVDS);
@@ -229,7 +229,7 @@ static void intel_pre_enable_lvds(struct intel_atomic_state *state,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
- struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
+ struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
@@ -312,7 +312,7 @@ static void intel_enable_lvds(struct intel_atomic_state *state,
const struct drm_connector_state *conn_state)
{
struct drm_device *dev = encoder->base.dev;
- struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
+ struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(encoder);
struct drm_i915_private *dev_priv = to_i915(dev);
intel_de_write(dev_priv, lvds_encoder->reg,
@@ -334,7 +334,7 @@ static void intel_disable_lvds(struct intel_atomic_state *state,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
- struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
+ struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
intel_de_write(dev_priv, PP_CONTROL(0),
@@ -413,7 +413,7 @@ static int intel_lvds_compute_config(struct intel_encoder *intel_encoder,
{
struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
struct intel_lvds_encoder *lvds_encoder =
- to_lvds_encoder(&intel_encoder->base);
+ to_lvds_encoder(intel_encoder);
struct intel_connector *intel_connector =
lvds_encoder->attached_connector;
struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
@@ -775,7 +775,7 @@ bool intel_is_dual_link_lvds(struct drm_i915_private *dev_priv)
{
struct intel_encoder *encoder = intel_get_lvds_encoder(dev_priv);
- return encoder && to_lvds_encoder(&encoder->base)->is_dual_link;
+ return encoder && to_lvds_encoder(encoder)->is_dual_link;
}
static bool compute_is_dual_link_lvds(struct intel_lvds_encoder *lvds_encoder)
@@ -814,6 +814,11 @@ static bool compute_is_dual_link_lvds(struct intel_lvds_encoder *lvds_encoder)
return (val & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP;
}
+static void intel_lvds_add_properties(struct drm_connector *connector)
+{
+ intel_attach_scaling_mode_property(connector);
+}
+
/**
* intel_lvds_init - setup LVDS connectors on this device
* @dev_priv: i915 device
@@ -823,7 +828,6 @@ static bool compute_is_dual_link_lvds(struct intel_lvds_encoder *lvds_encoder)
*/
void intel_lvds_init(struct drm_i915_private *dev_priv)
{
- struct drm_device *dev = &dev_priv->drm;
struct intel_lvds_encoder *lvds_encoder;
struct intel_encoder *intel_encoder;
struct intel_connector *intel_connector;
@@ -833,11 +837,10 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
i915_reg_t lvds_reg;
u32 lvds;
u8 pin;
- u32 allowed_scalers;
/* Skip init on machines we know falsely report LVDS */
if (dmi_check_system(intel_no_lvds)) {
- drm_WARN(dev, !dev_priv->display.vbt.int_lvds_support,
+ drm_WARN(&dev_priv->drm, !dev_priv->display.vbt.int_lvds_support,
"Useless DMI match. Internal LVDS support disabled by VBT\n");
return;
}
@@ -886,10 +889,10 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
intel_encoder = &lvds_encoder->base;
encoder = &intel_encoder->base;
connector = &intel_connector->base;
- drm_connector_init(dev, &intel_connector->base, &intel_lvds_connector_funcs,
+ drm_connector_init(&dev_priv->drm, &intel_connector->base, &intel_lvds_connector_funcs,
DRM_MODE_CONNECTOR_LVDS);
- drm_encoder_init(dev, &intel_encoder->base, &intel_lvds_enc_funcs,
+ drm_encoder_init(&dev_priv->drm, &intel_encoder->base, &intel_lvds_enc_funcs,
DRM_MODE_ENCODER_LVDS, "LVDS");
intel_encoder->enable = intel_enable_lvds;
@@ -920,17 +923,10 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
drm_connector_helper_add(connector, &intel_lvds_connector_helper_funcs);
connector->display_info.subpixel_order = SubPixelHorizontalRGB;
- connector->interlace_allowed = false;
- connector->doublescan_allowed = false;
lvds_encoder->reg = lvds_reg;
- /* create the scaling mode property */
- allowed_scalers = BIT(DRM_MODE_SCALE_ASPECT);
- allowed_scalers |= BIT(DRM_MODE_SCALE_FULLSCREEN);
- allowed_scalers |= BIT(DRM_MODE_SCALE_CENTER);
- drm_connector_attach_scaling_mode_property(connector, allowed_scalers);
- connector->state->scaling_mode = DRM_MODE_SCALE_ASPECT;
+ intel_lvds_add_properties(connector);
intel_lvds_pps_get_hw_state(dev_priv, &lvds_encoder->init_pps);
lvds_encoder->init_lvds_val = lvds;
@@ -947,7 +943,7 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
* Attempt to get the fixed panel mode from DDC. Assume that the
* preferred mode is the right one.
*/
- mutex_lock(&dev->mode_config.mutex);
+ mutex_lock(&dev_priv->drm.mode_config.mutex);
if (vga_switcheroo_handler_flags() & VGA_SWITCHEROO_CAN_SWITCH_DDC)
edid = drm_get_edid_switcheroo(connector,
intel_gmbus_get_adapter(dev_priv, pin));
@@ -971,9 +967,7 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
IS_ERR(edid) ? NULL : edid);
/* Try EDID first */
- intel_panel_add_edid_fixed_modes(intel_connector,
- intel_connector->panel.vbt.drrs_type != DRRS_TYPE_NONE,
- false);
+ intel_panel_add_edid_fixed_modes(intel_connector, true);
/* Failed to get EDID, what about VBT? */
if (!intel_panel_preferred_fixed_mode(intel_connector))
@@ -987,7 +981,7 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
if (!intel_panel_preferred_fixed_mode(intel_connector))
intel_panel_add_encoder_fixed_mode(intel_connector, intel_encoder);
- mutex_unlock(&dev->mode_config.mutex);
+ mutex_unlock(&dev_priv->drm.mode_config.mutex);
/* If we still don't have a mode after all that, give up. */
if (!intel_panel_preferred_fixed_mode(intel_connector))
diff --git a/drivers/gpu/drm/i915/display/intel_tc_phy_regs.h b/drivers/gpu/drm/i915/display/intel_mg_phy_regs.h
index 5a545086f959..07978f8d5fb7 100644
--- a/drivers/gpu/drm/i915/display/intel_tc_phy_regs.h
+++ b/drivers/gpu/drm/i915/display/intel_mg_phy_regs.h
@@ -3,8 +3,8 @@
* Copyright © 2022 Intel Corporation
*/
-#ifndef __INTEL_TC_PHY_REGS__
-#define __INTEL_TC_PHY_REGS__
+#ifndef __INTEL_MG_PHY_REGS__
+#define __INTEL_MG_PHY_REGS__
#include "i915_reg_defs.h"
@@ -277,4 +277,4 @@
_MG_PLL_TDC_COLDST_BIAS_PORT1, \
_MG_PLL_TDC_COLDST_BIAS_PORT2)
-#endif /* __INTEL_TC_PHY_REGS__ */
+#endif /* __INTEL_MG_PHY_REGS__ */
diff --git a/drivers/gpu/drm/i915/display/intel_modeset_setup.c b/drivers/gpu/drm/i915/display/intel_modeset_setup.c
index cbfabd58b75a..9d8ca230be39 100644
--- a/drivers/gpu/drm/i915/display/intel_modeset_setup.c
+++ b/drivers/gpu/drm/i915/display/intel_modeset_setup.c
@@ -155,6 +155,12 @@ static void intel_crtc_copy_hw_to_uapi_state(struct intel_crtc_state *crtc_state
crtc_state->uapi.adjusted_mode = crtc_state->hw.adjusted_mode;
crtc_state->uapi.scaling_filter = crtc_state->hw.scaling_filter;
+ /* assume 1:1 mapping */
+ drm_property_replace_blob(&crtc_state->hw.degamma_lut,
+ crtc_state->pre_csc_lut);
+ drm_property_replace_blob(&crtc_state->hw.gamma_lut,
+ crtc_state->post_csc_lut);
+
drm_property_replace_blob(&crtc_state->uapi.degamma_lut,
crtc_state->hw.degamma_lut);
drm_property_replace_blob(&crtc_state->uapi.gamma_lut,
@@ -205,13 +211,21 @@ static bool intel_crtc_has_encoders(struct intel_crtc *crtc)
static struct intel_connector *intel_encoder_find_connector(struct intel_encoder *encoder)
{
- struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct drm_connector_list_iter conn_iter;
struct intel_connector *connector;
+ struct intel_connector *found_connector = NULL;
- for_each_connector_on_encoder(dev, &encoder->base, connector)
- return connector;
+ drm_connector_list_iter_begin(&i915->drm, &conn_iter);
+ for_each_intel_connector_iter(connector, &conn_iter) {
+ if (&encoder->base == connector->base.encoder) {
+ found_connector = connector;
+ break;
+ }
+ }
+ drm_connector_list_iter_end(&conn_iter);
- return NULL;
+ return found_connector;
}
static void intel_sanitize_fifo_underrun_reporting(const struct intel_crtc_state *crtc_state)
diff --git a/drivers/gpu/drm/i915/display/intel_modeset_verify.c b/drivers/gpu/drm/i915/display/intel_modeset_verify.c
index 0fdcf2e6d57f..842d70f0dfd2 100644
--- a/drivers/gpu/drm/i915/display/intel_modeset_verify.c
+++ b/drivers/gpu/drm/i915/display/intel_modeset_verify.c
@@ -227,7 +227,8 @@ void intel_modeset_verify_crtc(struct intel_crtc *crtc,
struct intel_crtc_state *old_crtc_state,
struct intel_crtc_state *new_crtc_state)
{
- if (!intel_crtc_needs_modeset(new_crtc_state) && !new_crtc_state->update_pipe)
+ if (!intel_crtc_needs_modeset(new_crtc_state) &&
+ !intel_crtc_needs_fastset(new_crtc_state))
return;
intel_wm_state_verify(crtc, new_crtc_state);
diff --git a/drivers/gpu/drm/i915/display/intel_opregion.c b/drivers/gpu/drm/i915/display/intel_opregion.c
index caa07ef34f21..e0184745632c 100644
--- a/drivers/gpu/drm/i915/display/intel_opregion.c
+++ b/drivers/gpu/drm/i915/display/intel_opregion.c
@@ -463,7 +463,6 @@ static u32 asle_set_backlight(struct drm_i915_private *dev_priv, u32 bclp)
struct intel_connector *connector;
struct drm_connector_list_iter conn_iter;
struct opregion_asle *asle = dev_priv->display.opregion.asle;
- struct drm_device *dev = &dev_priv->drm;
drm_dbg(&dev_priv->drm, "bclp = 0x%08x\n", bclp);
@@ -480,7 +479,7 @@ static u32 asle_set_backlight(struct drm_i915_private *dev_priv, u32 bclp)
if (bclp > 255)
return ASLC_BACKLIGHT_FAILED;
- drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
+ drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex, NULL);
/*
* Update backlight on all connectors that support backlight (usually
@@ -488,13 +487,13 @@ static u32 asle_set_backlight(struct drm_i915_private *dev_priv, u32 bclp)
*/
drm_dbg_kms(&dev_priv->drm, "updating opregion backlight %d/255\n",
bclp);
- drm_connector_list_iter_begin(dev, &conn_iter);
+ drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
for_each_intel_connector_iter(connector, &conn_iter)
intel_backlight_set_acpi(connector->base.state, bclp, 255);
drm_connector_list_iter_end(&conn_iter);
asle->cblv = DIV_ROUND_UP(bclp * 100, 255) | ASLE_CBLV_VALID;
- drm_modeset_unlock(&dev->mode_config.connection_mutex);
+ drm_modeset_unlock(&dev_priv->drm.mode_config.connection_mutex);
return 0;
diff --git a/drivers/gpu/drm/i915/display/intel_panel.c b/drivers/gpu/drm/i915/display/intel_panel.c
index a3a3f9fe4342..69ce77711b7c 100644
--- a/drivers/gpu/drm/i915/display/intel_panel.c
+++ b/drivers/gpu/drm/i915/display/intel_panel.c
@@ -85,9 +85,10 @@ static bool is_alt_drrs_mode(const struct drm_display_mode *mode,
static bool is_alt_fixed_mode(const struct drm_display_mode *mode,
const struct drm_display_mode *preferred_mode)
{
- return drm_mode_match(mode, preferred_mode,
- DRM_MODE_MATCH_FLAGS |
- DRM_MODE_MATCH_3D_FLAGS) &&
+ u32 sync_flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC |
+ DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC;
+
+ return (mode->flags & ~sync_flags) == (preferred_mode->flags & ~sync_flags) &&
mode->hdisplay == preferred_mode->hdisplay &&
mode->vdisplay == preferred_mode->vdisplay;
}
@@ -147,12 +148,24 @@ int intel_panel_get_modes(struct intel_connector *connector)
return num_modes;
}
-enum drrs_type intel_panel_drrs_type(struct intel_connector *connector)
+static bool has_drrs_modes(struct intel_connector *connector)
{
- if (list_empty(&connector->panel.fixed_modes) ||
- list_is_singular(&connector->panel.fixed_modes))
- return DRRS_TYPE_NONE;
+ const struct drm_display_mode *mode1;
+
+ list_for_each_entry(mode1, &connector->panel.fixed_modes, head) {
+ const struct drm_display_mode *mode2 = mode1;
+
+ list_for_each_entry_continue(mode2, &connector->panel.fixed_modes, head) {
+ if (is_alt_drrs_mode(mode1, mode2))
+ return true;
+ }
+ }
+
+ return false;
+}
+enum drrs_type intel_panel_drrs_type(struct intel_connector *connector)
+{
return connector->panel.vbt.drrs_type;
}
@@ -254,10 +267,10 @@ static void intel_panel_destroy_probed_modes(struct intel_connector *connector)
}
void intel_panel_add_edid_fixed_modes(struct intel_connector *connector,
- bool has_drrs, bool has_vrr)
+ bool use_alt_fixed_modes)
{
intel_panel_add_edid_preferred_mode(connector);
- if (intel_panel_preferred_fixed_mode(connector) && (has_drrs || has_vrr))
+ if (intel_panel_preferred_fixed_mode(connector) && use_alt_fixed_modes)
intel_panel_add_edid_alt_fixed_modes(connector);
intel_panel_destroy_probed_modes(connector);
}
@@ -653,6 +666,9 @@ int intel_panel_init(struct intel_connector *connector)
intel_backlight_init_funcs(panel);
+ if (!has_drrs_modes(connector))
+ connector->panel.vbt.drrs_type = DRRS_TYPE_NONE;
+
drm_dbg_kms(connector->base.dev,
"[CONNECTOR:%d:%s] DRRS type: %s\n",
connector->base.base.id, connector->base.name,
diff --git a/drivers/gpu/drm/i915/display/intel_panel.h b/drivers/gpu/drm/i915/display/intel_panel.h
index eff3ffd3d082..5c5b5b7f95b6 100644
--- a/drivers/gpu/drm/i915/display/intel_panel.h
+++ b/drivers/gpu/drm/i915/display/intel_panel.h
@@ -44,7 +44,7 @@ int intel_panel_fitting(struct intel_crtc_state *crtc_state,
int intel_panel_compute_config(struct intel_connector *connector,
struct drm_display_mode *adjusted_mode);
void intel_panel_add_edid_fixed_modes(struct intel_connector *connector,
- bool has_drrs, bool has_vrr);
+ bool use_alt_fixed_modes);
void intel_panel_add_vbt_lfp_fixed_mode(struct intel_connector *connector);
void intel_panel_add_vbt_sdvo_fixed_mode(struct intel_connector *connector);
void intel_panel_add_encoder_fixed_mode(struct intel_connector *connector,
diff --git a/drivers/gpu/drm/i915/display/intel_pipe_crc.c b/drivers/gpu/drm/i915/display/intel_pipe_crc.c
index 8ac263f471be..1c74388c60d7 100644
--- a/drivers/gpu/drm/i915/display/intel_pipe_crc.c
+++ b/drivers/gpu/drm/i915/display/intel_pipe_crc.c
@@ -75,7 +75,6 @@ static int i9xx_pipe_crc_auto_source(struct drm_i915_private *dev_priv,
enum pipe pipe,
enum intel_pipe_crc_source *source)
{
- struct drm_device *dev = &dev_priv->drm;
struct intel_encoder *encoder;
struct intel_crtc *crtc;
struct intel_digital_port *dig_port;
@@ -83,8 +82,8 @@ static int i9xx_pipe_crc_auto_source(struct drm_i915_private *dev_priv,
*source = INTEL_PIPE_CRC_SOURCE_PIPE;
- drm_modeset_lock_all(dev);
- for_each_intel_encoder(dev, encoder) {
+ drm_modeset_lock_all(&dev_priv->drm);
+ for_each_intel_encoder(&dev_priv->drm, encoder) {
if (!encoder->base.crtc)
continue;
@@ -111,7 +110,7 @@ static int i9xx_pipe_crc_auto_source(struct drm_i915_private *dev_priv,
*source = INTEL_PIPE_CRC_SOURCE_DP_D;
break;
default:
- drm_WARN(dev, 1, "nonexisting DP port %c\n",
+ drm_WARN(&dev_priv->drm, 1, "nonexisting DP port %c\n",
port_name(dig_port->base.port));
break;
}
@@ -120,7 +119,7 @@ static int i9xx_pipe_crc_auto_source(struct drm_i915_private *dev_priv,
break;
}
}
- drm_modeset_unlock_all(dev);
+ drm_modeset_unlock_all(&dev_priv->drm);
return ret;
}
diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c
index 9def8d9fade6..904a1049eff3 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.c
+++ b/drivers/gpu/drm/i915/display/intel_psr.c
@@ -116,34 +116,56 @@ static bool psr2_global_enabled(struct intel_dp *intel_dp)
}
}
+static u32 psr_irq_psr_error_bit_get(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
+ return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_ERROR :
+ EDP_PSR_ERROR(intel_dp->psr.transcoder);
+}
+
+static u32 psr_irq_post_exit_bit_get(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
+ return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_POST_EXIT :
+ EDP_PSR_POST_EXIT(intel_dp->psr.transcoder);
+}
+
+static u32 psr_irq_pre_entry_bit_get(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
+ return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_PRE_ENTRY :
+ EDP_PSR_PRE_ENTRY(intel_dp->psr.transcoder);
+}
+
+static u32 psr_irq_mask_get(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
+ return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_MASK :
+ EDP_PSR_MASK(intel_dp->psr.transcoder);
+}
+
static void psr_irq_control(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- enum transcoder trans_shift;
i915_reg_t imr_reg;
u32 mask, val;
- /*
- * gen12+ has registers relative to transcoder and one per transcoder
- * using the same bit definition: handle it as TRANSCODER_EDP to force
- * 0 shift in bit definition
- */
- if (DISPLAY_VER(dev_priv) >= 12) {
- trans_shift = 0;
+ if (DISPLAY_VER(dev_priv) >= 12)
imr_reg = TRANS_PSR_IMR(intel_dp->psr.transcoder);
- } else {
- trans_shift = intel_dp->psr.transcoder;
+ else
imr_reg = EDP_PSR_IMR;
- }
- mask = EDP_PSR_ERROR(trans_shift);
+ mask = psr_irq_psr_error_bit_get(intel_dp);
if (intel_dp->psr.debug & I915_PSR_DEBUG_IRQ)
- mask |= EDP_PSR_POST_EXIT(trans_shift) |
- EDP_PSR_PRE_ENTRY(trans_shift);
+ mask |= psr_irq_post_exit_bit_get(intel_dp) |
+ psr_irq_pre_entry_bit_get(intel_dp);
- /* Warning: it is masking/setting reserved bits too */
val = intel_de_read(dev_priv, imr_reg);
- val &= ~EDP_PSR_TRANS_MASK(trans_shift);
+ val &= ~psr_irq_mask_get(intel_dp);
val |= ~mask;
intel_de_write(dev_priv, imr_reg, val);
}
@@ -191,25 +213,21 @@ void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir)
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
ktime_t time_ns = ktime_get();
- enum transcoder trans_shift;
i915_reg_t imr_reg;
- if (DISPLAY_VER(dev_priv) >= 12) {
- trans_shift = 0;
+ if (DISPLAY_VER(dev_priv) >= 12)
imr_reg = TRANS_PSR_IMR(intel_dp->psr.transcoder);
- } else {
- trans_shift = intel_dp->psr.transcoder;
+ else
imr_reg = EDP_PSR_IMR;
- }
- if (psr_iir & EDP_PSR_PRE_ENTRY(trans_shift)) {
+ if (psr_iir & psr_irq_pre_entry_bit_get(intel_dp)) {
intel_dp->psr.last_entry_attempt = time_ns;
drm_dbg_kms(&dev_priv->drm,
"[transcoder %s] PSR entry attempt in 2 vblanks\n",
transcoder_name(cpu_transcoder));
}
- if (psr_iir & EDP_PSR_POST_EXIT(trans_shift)) {
+ if (psr_iir & psr_irq_post_exit_bit_get(intel_dp)) {
intel_dp->psr.last_exit = time_ns;
drm_dbg_kms(&dev_priv->drm,
"[transcoder %s] PSR exit completed\n",
@@ -226,7 +244,7 @@ void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir)
}
}
- if (psr_iir & EDP_PSR_ERROR(trans_shift)) {
+ if (psr_iir & psr_irq_psr_error_bit_get(intel_dp)) {
u32 val;
drm_warn(&dev_priv->drm, "[transcoder %s] PSR aux error\n",
@@ -243,7 +261,7 @@ void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir)
* or unset irq_aux_error.
*/
val = intel_de_read(dev_priv, imr_reg);
- val |= EDP_PSR_ERROR(trans_shift);
+ val |= psr_irq_psr_error_bit_get(intel_dp);
intel_de_write(dev_priv, imr_reg, val);
schedule_work(&intel_dp->psr.work);
@@ -515,7 +533,7 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
val |= psr_compute_idle_frames(intel_dp) << EDP_PSR2_IDLE_FRAME_SHIFT;
- if (!IS_ALDERLAKE_P(dev_priv))
+ if (DISPLAY_VER(dev_priv) <= 13 && !IS_ALDERLAKE_P(dev_priv))
val |= EDP_SU_TRACK_ENABLE;
if (DISPLAY_VER(dev_priv) >= 10 && DISPLAY_VER(dev_priv) <= 12)
@@ -598,7 +616,7 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
static bool
transcoder_has_psr2(struct drm_i915_private *dev_priv, enum transcoder trans)
{
- if (IS_ALDERLAKE_P(dev_priv))
+ if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14)
return trans == TRANSCODER_A || trans == TRANSCODER_B;
else if (DISPLAY_VER(dev_priv) >= 12)
return trans == TRANSCODER_A;
@@ -678,7 +696,7 @@ dc3co_is_pipe_port_compatible(struct intel_dp *intel_dp,
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
enum port port = dig_port->base.port;
- if (IS_ALDERLAKE_P(dev_priv))
+ if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14)
return pipe <= PIPE_B && port <= PORT_B;
else
return pipe == PIPE_A && port == PORT_A;
@@ -777,11 +795,11 @@ static bool psr2_granularity_check(struct intel_dp *intel_dp,
return intel_dp->psr.su_y_granularity == 4;
/*
- * adl_p has 1 line granularity. For other platforms with SW tracking we
- * can adjust the y coordinates to match sink requirement if multiple of
- * 4.
+ * adl_p and display 14+ platforms has 1 line granularity.
+ * For other platforms with SW tracking we can adjust the y coordinates
+ * to match sink requirement if multiple of 4.
*/
- if (IS_ALDERLAKE_P(dev_priv))
+ if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14)
y_granularity = intel_dp->psr.su_y_granularity;
else if (intel_dp->psr.su_y_granularity <= 2)
y_granularity = 4;
@@ -865,7 +883,8 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
* resolution requires DSC to be enabled, priority is given to DSC
* over PSR2.
*/
- if (crtc_state->dsc.compression_enable) {
+ if (crtc_state->dsc.compression_enable &&
+ (DISPLAY_VER(dev_priv) <= 13 && !IS_ALDERLAKE_P(dev_priv))) {
drm_dbg_kms(&dev_priv->drm,
"PSR2 cannot be enabled since DSC is enabled\n");
return false;
@@ -1194,14 +1213,12 @@ static bool psr_interrupt_error_check(struct intel_dp *intel_dp)
* first time that PSR HW tries to activate so lets keep PSR disabled
* to avoid any rendering problems.
*/
- if (DISPLAY_VER(dev_priv) >= 12) {
+ if (DISPLAY_VER(dev_priv) >= 12)
val = intel_de_read(dev_priv,
TRANS_PSR_IIR(intel_dp->psr.transcoder));
- val &= EDP_PSR_ERROR(0);
- } else {
+ else
val = intel_de_read(dev_priv, EDP_PSR_IIR);
- val &= EDP_PSR_ERROR(intel_dp->psr.transcoder);
- }
+ val &= psr_irq_psr_error_bit_get(intel_dp);
if (val) {
intel_dp->psr.sink_not_reliable = true;
drm_dbg_kms(&dev_priv->drm,
@@ -1458,7 +1475,7 @@ static u32 man_trk_ctl_enable_bit_get(struct drm_i915_private *dev_priv)
static u32 man_trk_ctl_single_full_frame_bit_get(struct drm_i915_private *dev_priv)
{
- return IS_ALDERLAKE_P(dev_priv) ?
+ return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14 ?
ADLP_PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME :
PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME;
}
@@ -1611,7 +1628,7 @@ static void psr2_man_trk_ctl_calc(struct intel_crtc_state *crtc_state,
if (clip->y1 == -1)
goto exit;
- if (IS_ALDERLAKE_P(dev_priv)) {
+ if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14) {
val |= ADLP_PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(clip->y1);
val |= ADLP_PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(clip->y2 - 1);
} else {
@@ -1648,7 +1665,15 @@ static void intel_psr2_sel_fetch_pipe_alignment(const struct intel_crtc_state *c
struct drm_rect *pipe_clip)
{
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
- const u16 y_alignment = crtc_state->su_y_granularity;
+ const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
+ u16 y_alignment;
+
+ /* ADLP aligns the SU region to vdsc slice height in case dsc is enabled */
+ if (crtc_state->dsc.compression_enable &&
+ (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14))
+ y_alignment = vdsc_cfg->slice_height;
+ else
+ y_alignment = crtc_state->su_y_granularity;
pipe_clip->y1 -= pipe_clip->y1 % y_alignment;
if (pipe_clip->y2 % y_alignment)
@@ -2038,13 +2063,12 @@ static bool __psr_wait_for_idle_locked(struct intel_dp *intel_dp)
static int intel_psr_fastset_force(struct drm_i915_private *dev_priv)
{
struct drm_connector_list_iter conn_iter;
- struct drm_device *dev = &dev_priv->drm;
struct drm_modeset_acquire_ctx ctx;
struct drm_atomic_state *state;
struct drm_connector *conn;
int err = 0;
- state = drm_atomic_state_alloc(dev);
+ state = drm_atomic_state_alloc(&dev_priv->drm);
if (!state)
return -ENOMEM;
@@ -2053,7 +2077,7 @@ static int intel_psr_fastset_force(struct drm_i915_private *dev_priv)
retry:
- drm_connector_list_iter_begin(dev, &conn_iter);
+ drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
drm_for_each_connector_iter(conn, &conn_iter) {
struct drm_connector_state *conn_state;
struct drm_crtc_state *crtc_state;
diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.c b/drivers/gpu/drm/i915/display/intel_sdvo.c
index f5b744bef18f..48b7b1aa37b2 100644
--- a/drivers/gpu/drm/i915/display/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/display/intel_sdvo.c
@@ -199,7 +199,7 @@ to_intel_sdvo_connector(struct drm_connector *connector)
container_of((conn_state), struct intel_sdvo_connector_state, base.base)
static bool
-intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo, u16 flags);
+intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo);
static bool
intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo,
struct intel_sdvo_connector *intel_sdvo_connector,
@@ -1297,13 +1297,28 @@ static bool intel_sdvo_limited_color_range(struct intel_encoder *encoder,
return intel_hdmi_limited_color_range(crtc_state, conn_state);
}
+static bool intel_sdvo_has_audio(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
+ const struct intel_digital_connector_state *intel_conn_state =
+ to_intel_digital_connector_state(conn_state);
+
+ if (!crtc_state->has_hdmi_sink)
+ return false;
+
+ if (intel_conn_state->force_audio == HDMI_AUDIO_AUTO)
+ return intel_sdvo->has_hdmi_audio;
+ else
+ return intel_conn_state->force_audio == HDMI_AUDIO_ON;
+}
+
static int intel_sdvo_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
{
struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
- struct intel_sdvo_connector_state *intel_sdvo_state =
- to_intel_sdvo_connector_state(conn_state);
struct intel_sdvo_connector *intel_sdvo_connector =
to_intel_sdvo_connector(conn_state->connector);
struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
@@ -1362,13 +1377,7 @@ static int intel_sdvo_compute_config(struct intel_encoder *encoder,
pipe_config->has_hdmi_sink = intel_has_hdmi_sink(intel_sdvo, conn_state);
- if (pipe_config->has_hdmi_sink) {
- if (intel_sdvo_state->base.force_audio == HDMI_AUDIO_AUTO)
- pipe_config->has_audio = intel_sdvo->has_hdmi_audio;
- else
- pipe_config->has_audio =
- intel_sdvo_state->base.force_audio == HDMI_AUDIO_ON;
- }
+ pipe_config->has_audio = intel_sdvo_has_audio(encoder, pipe_config, conn_state);
pipe_config->limited_color_range =
intel_sdvo_limited_color_range(encoder, pipe_config,
@@ -2290,17 +2299,12 @@ static int intel_sdvo_get_tv_modes(struct drm_connector *connector)
static int intel_sdvo_get_lvds_modes(struct drm_connector *connector)
{
- struct intel_sdvo *intel_sdvo = intel_attached_sdvo(to_intel_connector(connector));
struct drm_i915_private *dev_priv = to_i915(connector->dev);
- int num_modes = 0;
drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s]\n",
connector->base.id, connector->name);
- num_modes += intel_panel_get_modes(to_intel_connector(connector));
- num_modes += intel_ddc_get_modes(connector, &intel_sdvo->ddc);
-
- return num_modes;
+ return intel_panel_get_modes(to_intel_connector(connector));
}
static int intel_sdvo_get_modes(struct drm_connector *connector)
@@ -2627,7 +2631,7 @@ intel_sdvo_unselect_i2c_bus(struct intel_sdvo *sdvo)
}
static bool
-intel_sdvo_is_hdmi_connector(struct intel_sdvo *intel_sdvo, int device)
+intel_sdvo_is_hdmi_connector(struct intel_sdvo *intel_sdvo)
{
return intel_sdvo_check_supp_encode(intel_sdvo);
}
@@ -2689,9 +2693,8 @@ intel_sdvo_connector_init(struct intel_sdvo_connector *connector,
drm_connector_helper_add(drm_connector,
&intel_sdvo_connector_helper_funcs);
- connector->base.base.interlace_allowed = 1;
- connector->base.base.doublescan_allowed = 0;
connector->base.base.display_info.subpixel_order = SubPixelHorizontalRGB;
+ connector->base.base.interlace_allowed = true;
connector->base.get_hw_state = intel_sdvo_connector_get_hw_state;
intel_connector_attach_encoder(&connector->base, &encoder->base);
@@ -2733,7 +2736,7 @@ static struct intel_sdvo_connector *intel_sdvo_connector_alloc(void)
}
static bool
-intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
+intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, u16 type)
{
struct drm_encoder *encoder = &intel_sdvo->base.base;
struct drm_connector *connector;
@@ -2741,19 +2744,13 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
struct intel_connector *intel_connector;
struct intel_sdvo_connector *intel_sdvo_connector;
- DRM_DEBUG_KMS("initialising DVI device %d\n", device);
+ DRM_DEBUG_KMS("initialising DVI type 0x%x\n", type);
intel_sdvo_connector = intel_sdvo_connector_alloc();
if (!intel_sdvo_connector)
return false;
- if (device == 0) {
- intel_sdvo->controlled_output |= SDVO_OUTPUT_TMDS0;
- intel_sdvo_connector->output_flag = SDVO_OUTPUT_TMDS0;
- } else if (device == 1) {
- intel_sdvo->controlled_output |= SDVO_OUTPUT_TMDS1;
- intel_sdvo_connector->output_flag = SDVO_OUTPUT_TMDS1;
- }
+ intel_sdvo_connector->output_flag = type;
intel_connector = &intel_sdvo_connector->base;
connector = &intel_connector->base;
@@ -2773,7 +2770,7 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
encoder->encoder_type = DRM_MODE_ENCODER_TMDS;
connector->connector_type = DRM_MODE_CONNECTOR_DVID;
- if (intel_sdvo_is_hdmi_connector(intel_sdvo, device)) {
+ if (intel_sdvo_is_hdmi_connector(intel_sdvo)) {
connector->connector_type = DRM_MODE_CONNECTOR_HDMIA;
intel_sdvo_connector->is_hdmi = true;
}
@@ -2790,14 +2787,14 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
}
static bool
-intel_sdvo_tv_init(struct intel_sdvo *intel_sdvo, int type)
+intel_sdvo_tv_init(struct intel_sdvo *intel_sdvo, u16 type)
{
struct drm_encoder *encoder = &intel_sdvo->base.base;
struct drm_connector *connector;
struct intel_connector *intel_connector;
struct intel_sdvo_connector *intel_sdvo_connector;
- DRM_DEBUG_KMS("initialising TV type %d\n", type);
+ DRM_DEBUG_KMS("initialising TV type 0x%x\n", type);
intel_sdvo_connector = intel_sdvo_connector_alloc();
if (!intel_sdvo_connector)
@@ -2808,7 +2805,6 @@ intel_sdvo_tv_init(struct intel_sdvo *intel_sdvo, int type)
encoder->encoder_type = DRM_MODE_ENCODER_TVDAC;
connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO;
- intel_sdvo->controlled_output |= type;
intel_sdvo_connector->output_flag = type;
if (intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo) < 0) {
@@ -2830,14 +2826,14 @@ err:
}
static bool
-intel_sdvo_analog_init(struct intel_sdvo *intel_sdvo, int device)
+intel_sdvo_analog_init(struct intel_sdvo *intel_sdvo, u16 type)
{
struct drm_encoder *encoder = &intel_sdvo->base.base;
struct drm_connector *connector;
struct intel_connector *intel_connector;
struct intel_sdvo_connector *intel_sdvo_connector;
- DRM_DEBUG_KMS("initialising analog device %d\n", device);
+ DRM_DEBUG_KMS("initialising analog type 0x%x\n", type);
intel_sdvo_connector = intel_sdvo_connector_alloc();
if (!intel_sdvo_connector)
@@ -2849,13 +2845,7 @@ intel_sdvo_analog_init(struct intel_sdvo *intel_sdvo, int device)
encoder->encoder_type = DRM_MODE_ENCODER_DAC;
connector->connector_type = DRM_MODE_CONNECTOR_VGA;
- if (device == 0) {
- intel_sdvo->controlled_output |= SDVO_OUTPUT_RGB0;
- intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB0;
- } else if (device == 1) {
- intel_sdvo->controlled_output |= SDVO_OUTPUT_RGB1;
- intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB1;
- }
+ intel_sdvo_connector->output_flag = type;
if (intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo) < 0) {
kfree(intel_sdvo_connector);
@@ -2866,7 +2856,7 @@ intel_sdvo_analog_init(struct intel_sdvo *intel_sdvo, int device)
}
static bool
-intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device)
+intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, u16 type)
{
struct drm_encoder *encoder = &intel_sdvo->base.base;
struct drm_i915_private *i915 = to_i915(encoder->dev);
@@ -2874,7 +2864,7 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device)
struct intel_connector *intel_connector;
struct intel_sdvo_connector *intel_sdvo_connector;
- DRM_DEBUG_KMS("initialising LVDS device %d\n", device);
+ DRM_DEBUG_KMS("initialising LVDS type 0x%x\n", type);
intel_sdvo_connector = intel_sdvo_connector_alloc();
if (!intel_sdvo_connector)
@@ -2885,13 +2875,7 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device)
encoder->encoder_type = DRM_MODE_ENCODER_LVDS;
connector->connector_type = DRM_MODE_CONNECTOR_LVDS;
- if (device == 0) {
- intel_sdvo->controlled_output |= SDVO_OUTPUT_LVDS0;
- intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS0;
- } else if (device == 1) {
- intel_sdvo->controlled_output |= SDVO_OUTPUT_LVDS1;
- intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS1;
- }
+ intel_sdvo_connector->output_flag = type;
if (intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo) < 0) {
kfree(intel_sdvo_connector);
@@ -2910,8 +2894,12 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device)
intel_panel_add_vbt_sdvo_fixed_mode(intel_connector);
if (!intel_panel_preferred_fixed_mode(intel_connector)) {
+ mutex_lock(&i915->drm.mode_config.mutex);
+
intel_ddc_get_modes(connector, &intel_sdvo->ddc);
- intel_panel_add_edid_fixed_modes(intel_connector, false, false);
+ intel_panel_add_edid_fixed_modes(intel_connector, false);
+
+ mutex_unlock(&i915->drm.mode_config.mutex);
}
intel_panel_init(intel_connector);
@@ -2926,58 +2914,78 @@ err:
return false;
}
-static bool
-intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo, u16 flags)
+static u16 intel_sdvo_filter_output_flags(u16 flags)
{
+ flags &= SDVO_OUTPUT_MASK;
+
/* SDVO requires XXX1 function may not exist unless it has XXX0 function.*/
+ if (!(flags & SDVO_OUTPUT_TMDS0))
+ flags &= ~SDVO_OUTPUT_TMDS1;
- if (flags & SDVO_OUTPUT_TMDS0)
- if (!intel_sdvo_dvi_init(intel_sdvo, 0))
- return false;
+ if (!(flags & SDVO_OUTPUT_RGB0))
+ flags &= ~SDVO_OUTPUT_RGB1;
- if ((flags & SDVO_TMDS_MASK) == SDVO_TMDS_MASK)
- if (!intel_sdvo_dvi_init(intel_sdvo, 1))
- return false;
+ if (!(flags & SDVO_OUTPUT_LVDS0))
+ flags &= ~SDVO_OUTPUT_LVDS1;
- /* TV has no XXX1 function block */
- if (flags & SDVO_OUTPUT_SVID0)
- if (!intel_sdvo_tv_init(intel_sdvo, SDVO_OUTPUT_SVID0))
- return false;
+ return flags;
+}
- if (flags & SDVO_OUTPUT_CVBS0)
- if (!intel_sdvo_tv_init(intel_sdvo, SDVO_OUTPUT_CVBS0))
- return false;
+static bool intel_sdvo_output_init(struct intel_sdvo *sdvo, u16 type)
+{
+ if (type & SDVO_TMDS_MASK)
+ return intel_sdvo_dvi_init(sdvo, type);
+ else if (type & SDVO_TV_MASK)
+ return intel_sdvo_tv_init(sdvo, type);
+ else if (type & SDVO_RGB_MASK)
+ return intel_sdvo_analog_init(sdvo, type);
+ else if (type & SDVO_LVDS_MASK)
+ return intel_sdvo_lvds_init(sdvo, type);
+ else
+ return false;
+}
- if (flags & SDVO_OUTPUT_YPRPB0)
- if (!intel_sdvo_tv_init(intel_sdvo, SDVO_OUTPUT_YPRPB0))
- return false;
+static bool
+intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo)
+{
+ static const u16 probe_order[] = {
+ SDVO_OUTPUT_TMDS0,
+ SDVO_OUTPUT_TMDS1,
+ /* TV has no XXX1 function block */
+ SDVO_OUTPUT_SVID0,
+ SDVO_OUTPUT_CVBS0,
+ SDVO_OUTPUT_YPRPB0,
+ SDVO_OUTPUT_RGB0,
+ SDVO_OUTPUT_RGB1,
+ SDVO_OUTPUT_LVDS0,
+ SDVO_OUTPUT_LVDS1,
+ };
+ struct drm_i915_private *i915 = to_i915(intel_sdvo->base.base.dev);
+ u16 flags;
+ int i;
- if (flags & SDVO_OUTPUT_RGB0)
- if (!intel_sdvo_analog_init(intel_sdvo, 0))
- return false;
+ flags = intel_sdvo_filter_output_flags(intel_sdvo->caps.output_flags);
- if ((flags & SDVO_RGB_MASK) == SDVO_RGB_MASK)
- if (!intel_sdvo_analog_init(intel_sdvo, 1))
- return false;
+ if (flags == 0) {
+ DRM_DEBUG_KMS("%s: Unknown SDVO output type (0x%04x)\n",
+ SDVO_NAME(intel_sdvo), intel_sdvo->caps.output_flags);
+ return false;
+ }
- if (flags & SDVO_OUTPUT_LVDS0)
- if (!intel_sdvo_lvds_init(intel_sdvo, 0))
- return false;
+ intel_sdvo->controlled_output = flags;
- if ((flags & SDVO_LVDS_MASK) == SDVO_LVDS_MASK)
- if (!intel_sdvo_lvds_init(intel_sdvo, 1))
- return false;
+ intel_sdvo_select_ddc_bus(i915, intel_sdvo);
- if ((flags & SDVO_OUTPUT_MASK) == 0) {
- unsigned char bytes[2];
+ for (i = 0; i < ARRAY_SIZE(probe_order); i++) {
+ u16 type = flags & probe_order[i];
- intel_sdvo->controlled_output = 0;
- memcpy(bytes, &intel_sdvo->caps.output_flags, 2);
- DRM_DEBUG_KMS("%s: Unknown SDVO output type (0x%02x%02x)\n",
- SDVO_NAME(intel_sdvo),
- bytes[0], bytes[1]);
- return false;
+ if (!type)
+ continue;
+
+ if (!intel_sdvo_output_init(intel_sdvo, type))
+ return false;
}
+
intel_sdvo->base.pipe_mask = ~0;
return true;
@@ -3353,8 +3361,7 @@ bool intel_sdvo_init(struct drm_i915_private *dev_priv,
intel_sdvo->colorimetry_cap =
intel_sdvo_get_colorimetry_cap(intel_sdvo);
- if (intel_sdvo_output_setup(intel_sdvo,
- intel_sdvo->caps.output_flags) != true) {
+ if (!intel_sdvo_output_setup(intel_sdvo)) {
drm_dbg_kms(&dev_priv->drm,
"SDVO output failed to setup on %s\n",
SDVO_NAME(intel_sdvo));
@@ -3383,8 +3390,6 @@ bool intel_sdvo_init(struct drm_i915_private *dev_priv,
*/
intel_sdvo->base.cloneable = 0;
- intel_sdvo_select_ddc_bus(dev_priv, intel_sdvo);
-
/* Set the input timing to the screen. Assume always input 0. */
if (!intel_sdvo_set_target_input(intel_sdvo))
goto err_output;
@@ -3407,9 +3412,12 @@ bool intel_sdvo_init(struct drm_i915_private *dev_priv,
(intel_sdvo->caps.sdvo_inputs_mask & 0x2) ? 'Y' : 'N',
/* check currently supported outputs */
intel_sdvo->caps.output_flags &
- (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_RGB0) ? 'Y' : 'N',
+ (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_RGB0 |
+ SDVO_OUTPUT_LVDS0 | SDVO_OUTPUT_SVID0 |
+ SDVO_OUTPUT_CVBS0 | SDVO_OUTPUT_YPRPB0) ? 'Y' : 'N',
intel_sdvo->caps.output_flags &
- (SDVO_OUTPUT_TMDS1 | SDVO_OUTPUT_RGB1) ? 'Y' : 'N');
+ (SDVO_OUTPUT_TMDS1 | SDVO_OUTPUT_RGB1 |
+ SDVO_OUTPUT_LVDS1) ? 'Y' : 'N');
return true;
err_output:
diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c
index e5af955b5600..70624b4b2d38 100644
--- a/drivers/gpu/drm/i915/display/intel_tc.c
+++ b/drivers/gpu/drm/i915/display/intel_tc.c
@@ -8,9 +8,10 @@
#include "intel_display.h"
#include "intel_display_power_map.h"
#include "intel_display_types.h"
+#include "intel_dkl_phy_regs.h"
#include "intel_dp_mst.h"
+#include "intel_mg_phy_regs.h"
#include "intel_tc.h"
-#include "intel_tc_phy_regs.h"
static const char *tc_port_mode_name(enum tc_port_mode mode)
{
@@ -408,14 +409,9 @@ static bool adl_tc_phy_take_ownership(struct intel_digital_port *dig_port,
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
struct intel_uncore *uncore = &i915->uncore;
enum port port = dig_port->base.port;
- u32 val;
- val = intel_uncore_read(uncore, DDI_BUF_CTL(port));
- if (take)
- val |= DDI_BUF_CTL_TC_PHY_OWNERSHIP;
- else
- val &= ~DDI_BUF_CTL_TC_PHY_OWNERSHIP;
- intel_uncore_write(uncore, DDI_BUF_CTL(port), val);
+ intel_uncore_rmw(uncore, DDI_BUF_CTL(port), DDI_BUF_CTL_TC_PHY_OWNERSHIP,
+ take ? DDI_BUF_CTL_TC_PHY_OWNERSHIP : 0);
return true;
}
@@ -687,18 +683,58 @@ static void
intel_tc_port_link_init_refcount(struct intel_digital_port *dig_port,
int refcount)
{
+ dig_port->tc_link_refcount = refcount;
+}
+
+/**
+ * intel_tc_port_init_mode: Read out HW state and init the given port's TypeC mode
+ * @dig_port: digital port
+ *
+ * Read out the HW state and initialize the TypeC mode of @dig_port. The mode
+ * will be locked until intel_tc_port_sanitize_mode() is called.
+ */
+void intel_tc_port_init_mode(struct intel_digital_port *dig_port)
+{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ intel_wakeref_t tc_cold_wref;
+ enum intel_display_power_domain domain;
+
+ mutex_lock(&dig_port->tc_lock);
+ drm_WARN_ON(&i915->drm, dig_port->tc_mode != TC_PORT_DISCONNECTED);
+ drm_WARN_ON(&i915->drm, dig_port->tc_lock_wakeref);
drm_WARN_ON(&i915->drm, dig_port->tc_link_refcount);
- dig_port->tc_link_refcount = refcount;
+
+ tc_cold_wref = tc_cold_block(dig_port, &domain);
+
+ dig_port->tc_mode = intel_tc_port_get_current_mode(dig_port);
+ /* Prevent changing dig_port->tc_mode until intel_tc_port_sanitize_mode() is called. */
+ intel_tc_port_link_init_refcount(dig_port, 1);
+ dig_port->tc_lock_wakeref = tc_cold_block(dig_port, &dig_port->tc_lock_power_domain);
+
+ tc_cold_unblock(dig_port, domain, tc_cold_wref);
+
+ drm_dbg_kms(&i915->drm, "Port %s: init mode (%s)\n",
+ dig_port->tc_port_name,
+ tc_port_mode_name(dig_port->tc_mode));
+
+ mutex_unlock(&dig_port->tc_lock);
}
-void intel_tc_port_sanitize(struct intel_digital_port *dig_port)
+/**
+ * intel_tc_port_sanitize_mode: Sanitize the given port's TypeC mode
+ * @dig_port: digital port
+ *
+ * Sanitize @dig_port's TypeC mode wrt. the encoder's state right after driver
+ * loading and system resume:
+ * If the encoder is enabled keep the TypeC mode/PHY connected state locked until
+ * the encoder is disabled.
+ * If the encoder is disabled make sure the PHY is disconnected.
+ */
+void intel_tc_port_sanitize_mode(struct intel_digital_port *dig_port)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
struct intel_encoder *encoder = &dig_port->base;
- intel_wakeref_t tc_cold_wref;
- enum intel_display_power_domain domain;
int active_links = 0;
mutex_lock(&dig_port->tc_lock);
@@ -708,21 +744,14 @@ void intel_tc_port_sanitize(struct intel_digital_port *dig_port)
else if (encoder->base.crtc)
active_links = to_intel_crtc(encoder->base.crtc)->active;
- drm_WARN_ON(&i915->drm, dig_port->tc_mode != TC_PORT_DISCONNECTED);
- drm_WARN_ON(&i915->drm, dig_port->tc_lock_wakeref);
-
- tc_cold_wref = tc_cold_block(dig_port, &domain);
+ drm_WARN_ON(&i915->drm, dig_port->tc_link_refcount != 1);
+ intel_tc_port_link_init_refcount(dig_port, active_links);
- dig_port->tc_mode = intel_tc_port_get_current_mode(dig_port);
if (active_links) {
if (!icl_tc_phy_is_connected(dig_port))
drm_dbg_kms(&i915->drm,
"Port %s: PHY disconnected with %d active link(s)\n",
dig_port->tc_port_name, active_links);
- intel_tc_port_link_init_refcount(dig_port, active_links);
-
- dig_port->tc_lock_wakeref = tc_cold_block(dig_port,
- &dig_port->tc_lock_power_domain);
} else {
/*
* TBT-alt is the default mode in any case the PHY ownership is not
@@ -736,9 +765,10 @@ void intel_tc_port_sanitize(struct intel_digital_port *dig_port)
dig_port->tc_port_name,
tc_port_mode_name(dig_port->tc_mode));
icl_tc_phy_disconnect(dig_port);
- }
- tc_cold_unblock(dig_port, domain, tc_cold_wref);
+ tc_cold_unblock(dig_port, dig_port->tc_lock_power_domain,
+ fetch_and_zero(&dig_port->tc_lock_wakeref));
+ }
drm_dbg_kms(&i915->drm, "Port %s: sanitize mode (%s)\n",
dig_port->tc_port_name,
@@ -923,4 +953,6 @@ void intel_tc_port_init(struct intel_digital_port *dig_port, bool is_legacy)
dig_port->tc_mode = TC_PORT_DISCONNECTED;
dig_port->tc_link_refcount = 0;
tc_port_load_fia_params(i915, dig_port);
+
+ intel_tc_port_init_mode(dig_port);
}
diff --git a/drivers/gpu/drm/i915/display/intel_tc.h b/drivers/gpu/drm/i915/display/intel_tc.h
index 6b47b29f551c..d54082e2d5e8 100644
--- a/drivers/gpu/drm/i915/display/intel_tc.h
+++ b/drivers/gpu/drm/i915/display/intel_tc.h
@@ -24,7 +24,8 @@ int intel_tc_port_fia_max_lane_count(struct intel_digital_port *dig_port);
void intel_tc_port_set_fia_lane_count(struct intel_digital_port *dig_port,
int required_lanes);
-void intel_tc_port_sanitize(struct intel_digital_port *dig_port);
+void intel_tc_port_init_mode(struct intel_digital_port *dig_port);
+void intel_tc_port_sanitize_mode(struct intel_digital_port *dig_port);
void intel_tc_port_lock(struct intel_digital_port *dig_port);
void intel_tc_port_unlock(struct intel_digital_port *dig_port);
void intel_tc_port_flush_work(struct intel_digital_port *dig_port);
diff --git a/drivers/gpu/drm/i915/display/intel_tv.c b/drivers/gpu/drm/i915/display/intel_tv.c
index dcf89d701f0f..cf7d5c1ab406 100644
--- a/drivers/gpu/drm/i915/display/intel_tv.c
+++ b/drivers/gpu/drm/i915/display/intel_tv.c
@@ -1880,18 +1880,56 @@ static const struct drm_encoder_funcs intel_tv_enc_funcs = {
.destroy = intel_encoder_destroy,
};
+static void intel_tv_add_properties(struct drm_connector *connector)
+{
+ struct drm_i915_private *i915 = to_i915(connector->dev);
+ struct drm_connector_state *conn_state = connector->state;
+ const char *tv_format_names[ARRAY_SIZE(tv_modes)];
+ int i;
+
+ /* BIOS margin values */
+ conn_state->tv.margins.left = 54;
+ conn_state->tv.margins.top = 36;
+ conn_state->tv.margins.right = 46;
+ conn_state->tv.margins.bottom = 37;
+
+ conn_state->tv.mode = 0;
+
+ /* Create TV properties then attach current values */
+ for (i = 0; i < ARRAY_SIZE(tv_modes); i++) {
+ /* 1080p50/1080p60 not supported on gen3 */
+ if (DISPLAY_VER(i915) == 3 && tv_modes[i].oversample == 1)
+ break;
+
+ tv_format_names[i] = tv_modes[i].name;
+ }
+ drm_mode_create_tv_properties(&i915->drm, i, tv_format_names);
+
+ drm_object_attach_property(&connector->base,
+ i915->drm.mode_config.tv_mode_property,
+ conn_state->tv.mode);
+ drm_object_attach_property(&connector->base,
+ i915->drm.mode_config.tv_left_margin_property,
+ conn_state->tv.margins.left);
+ drm_object_attach_property(&connector->base,
+ i915->drm.mode_config.tv_top_margin_property,
+ conn_state->tv.margins.top);
+ drm_object_attach_property(&connector->base,
+ i915->drm.mode_config.tv_right_margin_property,
+ conn_state->tv.margins.right);
+ drm_object_attach_property(&connector->base,
+ i915->drm.mode_config.tv_bottom_margin_property,
+ conn_state->tv.margins.bottom);
+}
+
void
intel_tv_init(struct drm_i915_private *dev_priv)
{
- struct drm_device *dev = &dev_priv->drm;
struct drm_connector *connector;
struct intel_tv *intel_tv;
struct intel_encoder *intel_encoder;
struct intel_connector *intel_connector;
u32 tv_dac_on, tv_dac_off, save_tv_dac;
- const char *tv_format_names[ARRAY_SIZE(tv_modes)];
- int i, initial_mode = 0;
- struct drm_connector_state *state;
if ((intel_de_read(dev_priv, TV_CTL) & TV_FUSE_STATE_MASK) == TV_FUSE_STATE_DISABLED)
return;
@@ -1937,7 +1975,6 @@ intel_tv_init(struct drm_i915_private *dev_priv)
intel_encoder = &intel_tv->base;
connector = &intel_connector->base;
- state = connector->state;
/*
* The documentation, for the older chipsets at least, recommend
@@ -1951,10 +1988,10 @@ intel_tv_init(struct drm_i915_private *dev_priv)
*/
intel_connector->polled = DRM_CONNECTOR_POLL_CONNECT;
- drm_connector_init(dev, connector, &intel_tv_connector_funcs,
+ drm_connector_init(&dev_priv->drm, connector, &intel_tv_connector_funcs,
DRM_MODE_CONNECTOR_SVIDEO);
- drm_encoder_init(dev, &intel_encoder->base, &intel_tv_enc_funcs,
+ drm_encoder_init(&dev_priv->drm, &intel_encoder->base, &intel_tv_enc_funcs,
DRM_MODE_ENCODER_TVDAC, "TV");
intel_encoder->compute_config = intel_tv_compute_config;
@@ -1974,41 +2011,7 @@ intel_tv_init(struct drm_i915_private *dev_priv)
intel_encoder->cloneable = 0;
intel_tv->type = DRM_MODE_CONNECTOR_Unknown;
- /* BIOS margin values */
- state->tv.margins.left = 54;
- state->tv.margins.top = 36;
- state->tv.margins.right = 46;
- state->tv.margins.bottom = 37;
-
- state->tv.mode = initial_mode;
-
drm_connector_helper_add(connector, &intel_tv_connector_helper_funcs);
- connector->interlace_allowed = false;
- connector->doublescan_allowed = false;
- /* Create TV properties then attach current values */
- for (i = 0; i < ARRAY_SIZE(tv_modes); i++) {
- /* 1080p50/1080p60 not supported on gen3 */
- if (DISPLAY_VER(dev_priv) == 3 &&
- tv_modes[i].oversample == 1)
- break;
-
- tv_format_names[i] = tv_modes[i].name;
- }
- drm_mode_create_tv_properties(dev, i, tv_format_names);
-
- drm_object_attach_property(&connector->base, dev->mode_config.tv_mode_property,
- state->tv.mode);
- drm_object_attach_property(&connector->base,
- dev->mode_config.tv_left_margin_property,
- state->tv.margins.left);
- drm_object_attach_property(&connector->base,
- dev->mode_config.tv_top_margin_property,
- state->tv.margins.top);
- drm_object_attach_property(&connector->base,
- dev->mode_config.tv_right_margin_property,
- state->tv.margins.right);
- drm_object_attach_property(&connector->base,
- dev->mode_config.tv_bottom_margin_property,
- state->tv.margins.bottom);
+ intel_tv_add_properties(connector);
}
diff --git a/drivers/gpu/drm/i915/display/skl_watermark.c b/drivers/gpu/drm/i915/display/skl_watermark.c
index 01b0932757ed..d58e667016e4 100644
--- a/drivers/gpu/drm/i915/display/skl_watermark.c
+++ b/drivers/gpu/drm/i915/display/skl_watermark.c
@@ -1706,14 +1706,10 @@ skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
return -EINVAL;
}
- wp->y_tiled = modifier == I915_FORMAT_MOD_Y_TILED ||
- modifier == I915_FORMAT_MOD_4_TILED ||
- modifier == I915_FORMAT_MOD_Yf_TILED ||
- modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
- modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
wp->x_tiled = modifier == I915_FORMAT_MOD_X_TILED;
- wp->rc_surface = modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
- modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
+ wp->y_tiled = modifier != I915_FORMAT_MOD_X_TILED &&
+ intel_fb_is_tiled_modifier(modifier);
+ wp->rc_surface = intel_fb_is_ccs_modifier(modifier);
wp->is_planar = intel_format_info_is_yuv_semiplanar(format, modifier);
wp->width = width;
@@ -2486,7 +2482,7 @@ skl_compute_ddb(struct intel_atomic_state *state)
if (old_dbuf_state->joined_mbus != new_dbuf_state->joined_mbus) {
/* TODO: Implement vblank synchronized MBUS joining changes */
- ret = intel_modeset_all_pipes(state);
+ ret = intel_modeset_all_pipes(state, "MBUS joining change");
if (ret)
return ret;
}
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c
index b3f5ca280ef2..5a741ea4505f 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi.c
+++ b/drivers/gpu/drm/i915/display/vlv_dsi.c
@@ -1659,19 +1659,10 @@ static const struct drm_connector_funcs intel_dsi_connector_funcs = {
static void vlv_dsi_add_properties(struct intel_connector *connector)
{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
const struct drm_display_mode *fixed_mode =
intel_panel_preferred_fixed_mode(connector);
- u32 allowed_scalers;
- allowed_scalers = BIT(DRM_MODE_SCALE_ASPECT) | BIT(DRM_MODE_SCALE_FULLSCREEN);
- if (!HAS_GMCH(dev_priv))
- allowed_scalers |= BIT(DRM_MODE_SCALE_CENTER);
-
- drm_connector_attach_scaling_mode_property(&connector->base,
- allowed_scalers);
-
- connector->base.state->scaling_mode = DRM_MODE_SCALE_ASPECT;
+ intel_attach_scaling_mode_property(&connector->base);
drm_connector_set_panel_orientation_with_quirk(&connector->base,
intel_dsi_get_panel_orientation(connector),
@@ -1854,7 +1845,6 @@ static void vlv_dphy_param_init(struct intel_dsi *intel_dsi)
void vlv_dsi_init(struct drm_i915_private *dev_priv)
{
- struct drm_device *dev = &dev_priv->drm;
struct intel_dsi *intel_dsi;
struct intel_encoder *intel_encoder;
struct drm_encoder *encoder;
@@ -1891,7 +1881,7 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
connector = &intel_connector->base;
- drm_encoder_init(dev, encoder, &intel_dsi_funcs, DRM_MODE_ENCODER_DSI,
+ drm_encoder_init(&dev_priv->drm, encoder, &intel_dsi_funcs, DRM_MODE_ENCODER_DSI,
"DSI %c", port_name(port));
intel_encoder->compute_config = intel_dsi_compute_config;
@@ -1974,20 +1964,18 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
intel_dsi_vbt_gpio_init(intel_dsi,
intel_dsi_get_hw_state(intel_encoder, &pipe));
- drm_connector_init(dev, connector, &intel_dsi_connector_funcs,
+ drm_connector_init(&dev_priv->drm, connector, &intel_dsi_connector_funcs,
DRM_MODE_CONNECTOR_DSI);
drm_connector_helper_add(connector, &intel_dsi_connector_helper_funcs);
connector->display_info.subpixel_order = SubPixelHorizontalRGB; /*XXX*/
- connector->interlace_allowed = false;
- connector->doublescan_allowed = false;
intel_connector_attach_encoder(intel_connector, intel_encoder);
- mutex_lock(&dev->mode_config.mutex);
+ mutex_lock(&dev_priv->drm.mode_config.mutex);
intel_panel_add_vbt_lfp_fixed_mode(intel_connector);
- mutex_unlock(&dev->mode_config.mutex);
+ mutex_unlock(&dev_priv->drm.mode_config.mutex);
if (!intel_panel_preferred_fixed_mode(intel_connector)) {
drm_dbg_kms(&dev_priv->drm, "no fixed mode\n");
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_clflush.c b/drivers/gpu/drm/i915/gem/i915_gem_clflush.c
index 0512afdd20d8..b3b398fe689c 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_clflush.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_clflush.c
@@ -113,7 +113,7 @@ bool i915_gem_clflush_object(struct drm_i915_gem_object *obj,
clflush = clflush_work_create(obj);
if (clflush) {
i915_sw_fence_await_reservation(&clflush->base.chain,
- obj->base.resv, NULL, true,
+ obj->base.resv, true,
i915_fence_timeout(i915),
I915_FENCE_GFP);
dma_resv_add_fence(obj->base.resv, &clflush->base.dma,
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
index 98b2d2950df6..ec6f7ae47783 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
@@ -73,7 +73,7 @@ static int i915_gem_dmabuf_vmap(struct dma_buf *dma_buf,
struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
void *vaddr;
- vaddr = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WB);
+ vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
if (IS_ERR(vaddr))
return PTR_ERR(vaddr);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index 8f5796cf9c9c..1160723c9d2d 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -2424,7 +2424,7 @@ gen8_dispatch_bsd_engine(struct drm_i915_private *dev_priv,
/* Check whether the file_priv has already selected one ring. */
if ((int)file_priv->bsd_engine < 0)
file_priv->bsd_engine =
- get_random_int() % num_vcs_engines(dev_priv);
+ prandom_u32_max(num_vcs_engines(dev_priv));
return file_priv->bsd_engine;
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c
index 62495d5d0038..733696057761 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
@@ -290,7 +290,21 @@ void __i915_gem_object_pages_fini(struct drm_i915_gem_object *obj)
__i915_gem_object_free_mmaps(obj);
atomic_set(&obj->mm.pages_pin_count, 0);
+
+ /*
+ * dma_buf_unmap_attachment() requires reservation to be
+ * locked. The imported GEM shouldn't share reservation lock
+ * and ttm_bo_cleanup_memtype_use() shouldn't be invoked for
+ * dma-buf, so it's safe to take the lock.
+ */
+ if (obj->base.import_attach)
+ i915_gem_object_lock(obj, NULL);
+
__i915_gem_object_put_pages(obj);
+
+ if (obj->base.import_attach)
+ i915_gem_object_unlock(obj);
+
GEM_BUG_ON(i915_gem_object_has_pages(obj));
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
index 9219a60a7f88..25129af70f70 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
@@ -640,7 +640,7 @@ bool i915_ttm_resource_mappable(struct ttm_resource *res)
if (!i915_ttm_cpu_maps_iomem(res))
return true;
- return bman_res->used_visible_size == bman_res->base.num_pages;
+ return bman_res->used_visible_size == PFN_UP(bman_res->base.size);
}
static int i915_ttm_io_mem_reserve(struct ttm_device *bdev, struct ttm_resource *mem)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
index 9b7d101cf47a..1b1a22716722 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
@@ -425,12 +425,11 @@ static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = {
static int
probe_range(struct mm_struct *mm, unsigned long addr, unsigned long len)
{
- const unsigned long end = addr + len;
+ VMA_ITERATOR(vmi, mm, addr);
struct vm_area_struct *vma;
- int ret = -EFAULT;
mmap_read_lock(mm);
- for (vma = find_vma(mm, addr); vma; vma = vma->vm_next) {
+ for_each_vma_range(vmi, vma, addr + len) {
/* Check for holes, note that we also update the addr below */
if (vma->vm_start > addr)
break;
@@ -438,16 +437,13 @@ probe_range(struct mm_struct *mm, unsigned long addr, unsigned long len)
if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP))
break;
- if (vma->vm_end >= end) {
- ret = 0;
- break;
- }
-
addr = vma->vm_end;
}
mmap_read_unlock(mm);
- return ret;
+ if (vma)
+ return -EFAULT;
+ return 0;
}
/*
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
index aaa3d6174bf6..e57f9390076c 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
@@ -290,7 +290,7 @@ static int igt_dmabuf_import_same_driver(struct drm_i915_private *i915,
goto out_import;
}
- st = dma_buf_map_attachment(import_attach, DMA_BIDIRECTIONAL);
+ st = dma_buf_map_attachment_unlocked(import_attach, DMA_BIDIRECTIONAL);
if (IS_ERR(st)) {
err = PTR_ERR(st);
goto out_detach;
@@ -303,7 +303,7 @@ static int igt_dmabuf_import_same_driver(struct drm_i915_private *i915,
timeout = -ETIME;
}
err = timeout > 0 ? 0 : timeout;
- dma_buf_unmap_attachment(import_attach, st, DMA_BIDIRECTIONAL);
+ dma_buf_unmap_attachment_unlocked(import_attach, st, DMA_BIDIRECTIONAL);
out_detach:
dma_buf_detach(dmabuf, import_attach);
out_import:
@@ -373,7 +373,7 @@ static int igt_dmabuf_import(void *arg)
goto out_obj;
}
- err = dma_buf_vmap(dmabuf, &map);
+ err = dma_buf_vmap_unlocked(dmabuf, &map);
dma_map = err ? NULL : map.vaddr;
if (!dma_map) {
pr_err("dma_buf_vmap failed\n");
@@ -414,7 +414,7 @@ static int igt_dmabuf_import(void *arg)
err = 0;
out_dma_map:
- dma_buf_vunmap(dmabuf, &map);
+ dma_buf_vunmap_unlocked(dmabuf, &map);
out_obj:
i915_gem_object_put(obj);
out_dmabuf:
@@ -435,7 +435,7 @@ static int igt_dmabuf_import_ownership(void *arg)
if (IS_ERR(dmabuf))
return PTR_ERR(dmabuf);
- err = dma_buf_vmap(dmabuf, &map);
+ err = dma_buf_vmap_unlocked(dmabuf, &map);
ptr = err ? NULL : map.vaddr;
if (!ptr) {
pr_err("dma_buf_vmap failed\n");
@@ -444,7 +444,7 @@ static int igt_dmabuf_import_ownership(void *arg)
}
memset(ptr, 0xc5, PAGE_SIZE);
- dma_buf_vunmap(dmabuf, &map);
+ dma_buf_vunmap_unlocked(dmabuf, &map);
obj = to_intel_bo(i915_gem_prime_import(&i915->drm, dmabuf));
if (IS_ERR(obj)) {
@@ -495,7 +495,7 @@ static int igt_dmabuf_export_vmap(void *arg)
}
i915_gem_object_put(obj);
- err = dma_buf_vmap(dmabuf, &map);
+ err = dma_buf_vmap_unlocked(dmabuf, &map);
ptr = err ? NULL : map.vaddr;
if (!ptr) {
pr_err("dma_buf_vmap failed\n");
@@ -512,7 +512,7 @@ static int igt_dmabuf_export_vmap(void *arg)
memset(ptr, 0xc5, dmabuf->size);
err = 0;
- dma_buf_vunmap(dmabuf, &map);
+ dma_buf_vunmap_unlocked(dmabuf, &map);
out:
dma_buf_put(dmabuf);
return err;
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_regs.h b/drivers/gpu/drm/i915/gt/intel_gt_regs.h
index dfbf093ec562..70177d3f2e94 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_regs.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_regs.h
@@ -55,6 +55,9 @@
#define FORCEWAKE_ACK_GSC _MMIO(0xdf8)
#define FORCEWAKE_ACK_GT_MTL _MMIO(0xdfc)
+#define GMD_ID_GRAPHICS _MMIO(0xd8c)
+#define GMD_ID_MEDIA _MMIO(MTL_MEDIA_GSI_BASE + 0xd8c)
+
#define MCFG_MCR_SELECTOR _MMIO(0xfd0)
#define MTL_MCR_SELECTOR _MMIO(0xfd4)
#define SF_MCR_SELECTOR _MMIO(0xfd8)
diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c
index 8c98567f7a78..3cdf5c24dbc5 100644
--- a/drivers/gpu/drm/i915/gt/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c
@@ -2429,11 +2429,11 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
}
if (IS_DG1_GRAPHICS_STEP(i915, STEP_A0, STEP_B0) ||
- IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915)) {
+ IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915) || IS_ALDERLAKE_P(i915)) {
/*
* Wa_1607030317:tgl
* Wa_1607186500:tgl
- * Wa_1607297627:tgl,rkl,dg1[a0]
+ * Wa_1607297627:tgl,rkl,dg1[a0],adlp
*
* On TGL and RKL there are multiple entries for this WA in the
* BSpec; some indicate this is an A0-only WA, others indicate
diff --git a/drivers/gpu/drm/i915/gvt/aperture_gm.c b/drivers/gpu/drm/i915/gvt/aperture_gm.c
index 557f3314291a..076c779f776a 100644
--- a/drivers/gpu/drm/i915/gvt/aperture_gm.c
+++ b/drivers/gpu/drm/i915/gvt/aperture_gm.c
@@ -240,13 +240,13 @@ static void free_resource(struct intel_vgpu *vgpu)
}
static int alloc_resource(struct intel_vgpu *vgpu,
- struct intel_vgpu_creation_params *param)
+ const struct intel_vgpu_config *conf)
{
struct intel_gvt *gvt = vgpu->gvt;
unsigned long request, avail, max, taken;
const char *item;
- if (!param->low_gm_sz || !param->high_gm_sz || !param->fence_sz) {
+ if (!conf->low_mm || !conf->high_mm || !conf->fence) {
gvt_vgpu_err("Invalid vGPU creation params\n");
return -EINVAL;
}
@@ -255,7 +255,7 @@ static int alloc_resource(struct intel_vgpu *vgpu,
max = gvt_aperture_sz(gvt) - HOST_LOW_GM_SIZE;
taken = gvt->gm.vgpu_allocated_low_gm_size;
avail = max - taken;
- request = MB_TO_BYTES(param->low_gm_sz);
+ request = conf->low_mm;
if (request > avail)
goto no_enough_resource;
@@ -266,7 +266,7 @@ static int alloc_resource(struct intel_vgpu *vgpu,
max = gvt_hidden_sz(gvt) - HOST_HIGH_GM_SIZE;
taken = gvt->gm.vgpu_allocated_high_gm_size;
avail = max - taken;
- request = MB_TO_BYTES(param->high_gm_sz);
+ request = conf->high_mm;
if (request > avail)
goto no_enough_resource;
@@ -277,16 +277,16 @@ static int alloc_resource(struct intel_vgpu *vgpu,
max = gvt_fence_sz(gvt) - HOST_FENCE;
taken = gvt->fence.vgpu_allocated_fence_num;
avail = max - taken;
- request = param->fence_sz;
+ request = conf->fence;
if (request > avail)
goto no_enough_resource;
vgpu_fence_sz(vgpu) = request;
- gvt->gm.vgpu_allocated_low_gm_size += MB_TO_BYTES(param->low_gm_sz);
- gvt->gm.vgpu_allocated_high_gm_size += MB_TO_BYTES(param->high_gm_sz);
- gvt->fence.vgpu_allocated_fence_num += param->fence_sz;
+ gvt->gm.vgpu_allocated_low_gm_size += conf->low_mm;
+ gvt->gm.vgpu_allocated_high_gm_size += conf->high_mm;
+ gvt->fence.vgpu_allocated_fence_num += conf->fence;
return 0;
no_enough_resource:
@@ -298,7 +298,7 @@ no_enough_resource:
}
/**
- * inte_gvt_free_vgpu_resource - free HW resource owned by a vGPU
+ * intel_vgpu_free_resource() - free HW resource owned by a vGPU
* @vgpu: a vGPU
*
* This function is used to free the HW resource owned by a vGPU.
@@ -328,7 +328,7 @@ void intel_vgpu_reset_resource(struct intel_vgpu *vgpu)
}
/**
- * intel_alloc_vgpu_resource - allocate HW resource for a vGPU
+ * intel_vgpu_alloc_resource() - allocate HW resource for a vGPU
* @vgpu: vGPU
* @param: vGPU creation params
*
@@ -340,11 +340,11 @@ void intel_vgpu_reset_resource(struct intel_vgpu *vgpu)
*
*/
int intel_vgpu_alloc_resource(struct intel_vgpu *vgpu,
- struct intel_vgpu_creation_params *param)
+ const struct intel_vgpu_config *conf)
{
int ret;
- ret = alloc_resource(vgpu, param);
+ ret = alloc_resource(vgpu, conf);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index b4f69364f9a1..ce0eb03709c3 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -2341,7 +2341,7 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
gvt_vgpu_err("fail to populate guest ggtt entry\n");
/* guest driver may read/write the entry when partial
* update the entry in this situation p2m will fail
- * settting the shadow entry to point to a scratch page
+ * setting the shadow entry to point to a scratch page
*/
ops->set_pfn(&m, gvt->gtt.scratch_mfn);
} else
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index 705689e64011..dbf8d7470b2c 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -36,6 +36,7 @@
#include <uapi/linux/pci_regs.h>
#include <linux/kvm_host.h>
#include <linux/vfio.h>
+#include <linux/mdev.h>
#include "i915_drv.h"
#include "intel_gvt.h"
@@ -172,6 +173,7 @@ struct intel_vgpu_submission {
#define KVMGT_DEBUGFS_FILENAME "kvmgt_nr_cache_entries"
struct intel_vgpu {
+ struct vfio_device vfio_device;
struct intel_gvt *gvt;
struct mutex vgpu_lock;
int id;
@@ -211,7 +213,6 @@ struct intel_vgpu {
u32 scan_nonprivbb;
- struct vfio_device vfio_device;
struct vfio_region *region;
int num_regions;
struct eventfd_ctx *intx_trigger;
@@ -294,15 +295,25 @@ struct intel_gvt_firmware {
bool firmware_loaded;
};
-#define NR_MAX_INTEL_VGPU_TYPES 20
-struct intel_vgpu_type {
- char name[16];
- unsigned int avail_instance;
- unsigned int low_gm_size;
- unsigned int high_gm_size;
+struct intel_vgpu_config {
+ unsigned int low_mm;
+ unsigned int high_mm;
unsigned int fence;
+
+ /*
+ * A vGPU with a weight of 8 will get twice as much GPU as a vGPU with
+ * a weight of 4 on a contended host, different vGPU type has different
+ * weight set. Legal weights range from 1 to 16.
+ */
unsigned int weight;
- enum intel_vgpu_edid resolution;
+ enum intel_vgpu_edid edid;
+ const char *name;
+};
+
+struct intel_vgpu_type {
+ struct mdev_type type;
+ char name[16];
+ const struct intel_vgpu_config *conf;
};
struct intel_gvt {
@@ -326,6 +337,8 @@ struct intel_gvt {
struct intel_gvt_workload_scheduler scheduler;
struct notifier_block shadow_ctx_notifier_block[I915_NUM_ENGINES];
DECLARE_HASHTABLE(cmd_table, GVT_CMD_HASH_BITS);
+ struct mdev_parent parent;
+ struct mdev_type **mdev_types;
struct intel_vgpu_type *types;
unsigned int num_types;
struct intel_vgpu *idle_vgpu;
@@ -436,19 +449,8 @@ int intel_gvt_load_firmware(struct intel_gvt *gvt);
/* ring context size i.e. the first 0x50 dwords*/
#define RING_CTX_SIZE 320
-struct intel_vgpu_creation_params {
- __u64 low_gm_sz; /* in MB */
- __u64 high_gm_sz; /* in MB */
- __u64 fence_sz;
- __u64 resolution;
- __s32 primary;
- __u64 vgpu_id;
-
- __u32 weight;
-};
-
int intel_vgpu_alloc_resource(struct intel_vgpu *vgpu,
- struct intel_vgpu_creation_params *param);
+ const struct intel_vgpu_config *conf);
void intel_vgpu_reset_resource(struct intel_vgpu *vgpu);
void intel_vgpu_free_resource(struct intel_vgpu *vgpu);
void intel_vgpu_write_fence(struct intel_vgpu *vgpu,
@@ -494,8 +496,8 @@ void intel_gvt_clean_vgpu_types(struct intel_gvt *gvt);
struct intel_vgpu *intel_gvt_create_idle_vgpu(struct intel_gvt *gvt);
void intel_gvt_destroy_idle_vgpu(struct intel_vgpu *vgpu);
-struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
- struct intel_vgpu_type *type);
+int intel_gvt_create_vgpu(struct intel_vgpu *vgpu,
+ const struct intel_vgpu_config *conf);
void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu);
void intel_gvt_release_vgpu(struct intel_vgpu *vgpu);
void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index 41ab6c99e3a0..1cb388484bf0 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -905,7 +905,7 @@ static int update_fdi_rx_iir_status(struct intel_vgpu *vgpu,
else if (FDI_RX_IMR_TO_PIPE(offset) != INVALID_INDEX)
index = FDI_RX_IMR_TO_PIPE(offset);
else {
- gvt_vgpu_err("Unsupport registers %x\n", offset);
+ gvt_vgpu_err("Unsupported registers %x\n", offset);
return -EINVAL;
}
@@ -3052,7 +3052,7 @@ int intel_vgpu_default_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
}
/**
- * intel_t_default_mmio_write - default MMIO write handler
+ * intel_vgpu_default_mmio_write() - default MMIO write handler
* @vgpu: a vGPU
* @offset: access offset
* @p_data: write data buffer
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index e3cd58946477..7a45e5360caf 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -34,7 +34,6 @@
*/
#include <linux/init.h>
-#include <linux/device.h>
#include <linux/mm.h>
#include <linux/kthread.h>
#include <linux/sched/mm.h>
@@ -43,7 +42,6 @@
#include <linux/rbtree.h>
#include <linux/spinlock.h>
#include <linux/eventfd.h>
-#include <linux/uuid.h>
#include <linux/mdev.h>
#include <linux/debugfs.h>
@@ -115,117 +113,18 @@ static void kvmgt_page_track_flush_slot(struct kvm *kvm,
struct kvm_memory_slot *slot,
struct kvm_page_track_notifier_node *node);
-static ssize_t available_instances_show(struct mdev_type *mtype,
- struct mdev_type_attribute *attr,
- char *buf)
+static ssize_t intel_vgpu_show_description(struct mdev_type *mtype, char *buf)
{
- struct intel_vgpu_type *type;
- unsigned int num = 0;
- struct intel_gvt *gvt = kdev_to_i915(mtype_get_parent_dev(mtype))->gvt;
-
- type = &gvt->types[mtype_get_type_group_id(mtype)];
- if (!type)
- num = 0;
- else
- num = type->avail_instance;
-
- return sprintf(buf, "%u\n", num);
-}
-
-static ssize_t device_api_show(struct mdev_type *mtype,
- struct mdev_type_attribute *attr, char *buf)
-{
- return sprintf(buf, "%s\n", VFIO_DEVICE_API_PCI_STRING);
-}
-
-static ssize_t description_show(struct mdev_type *mtype,
- struct mdev_type_attribute *attr, char *buf)
-{
- struct intel_vgpu_type *type;
- struct intel_gvt *gvt = kdev_to_i915(mtype_get_parent_dev(mtype))->gvt;
-
- type = &gvt->types[mtype_get_type_group_id(mtype)];
- if (!type)
- return 0;
+ struct intel_vgpu_type *type =
+ container_of(mtype, struct intel_vgpu_type, type);
return sprintf(buf, "low_gm_size: %dMB\nhigh_gm_size: %dMB\n"
"fence: %d\nresolution: %s\n"
"weight: %d\n",
- BYTES_TO_MB(type->low_gm_size),
- BYTES_TO_MB(type->high_gm_size),
- type->fence, vgpu_edid_str(type->resolution),
- type->weight);
-}
-
-static ssize_t name_show(struct mdev_type *mtype,
- struct mdev_type_attribute *attr, char *buf)
-{
- struct intel_vgpu_type *type;
- struct intel_gvt *gvt = kdev_to_i915(mtype_get_parent_dev(mtype))->gvt;
-
- type = &gvt->types[mtype_get_type_group_id(mtype)];
- if (!type)
- return 0;
-
- return sprintf(buf, "%s\n", type->name);
-}
-
-static MDEV_TYPE_ATTR_RO(available_instances);
-static MDEV_TYPE_ATTR_RO(device_api);
-static MDEV_TYPE_ATTR_RO(description);
-static MDEV_TYPE_ATTR_RO(name);
-
-static struct attribute *gvt_type_attrs[] = {
- &mdev_type_attr_available_instances.attr,
- &mdev_type_attr_device_api.attr,
- &mdev_type_attr_description.attr,
- &mdev_type_attr_name.attr,
- NULL,
-};
-
-static struct attribute_group *gvt_vgpu_type_groups[] = {
- [0 ... NR_MAX_INTEL_VGPU_TYPES - 1] = NULL,
-};
-
-static int intel_gvt_init_vgpu_type_groups(struct intel_gvt *gvt)
-{
- int i, j;
- struct intel_vgpu_type *type;
- struct attribute_group *group;
-
- for (i = 0; i < gvt->num_types; i++) {
- type = &gvt->types[i];
-
- group = kzalloc(sizeof(struct attribute_group), GFP_KERNEL);
- if (!group)
- goto unwind;
-
- group->name = type->name;
- group->attrs = gvt_type_attrs;
- gvt_vgpu_type_groups[i] = group;
- }
-
- return 0;
-
-unwind:
- for (j = 0; j < i; j++) {
- group = gvt_vgpu_type_groups[j];
- kfree(group);
- }
-
- return -ENOMEM;
-}
-
-static void intel_gvt_cleanup_vgpu_type_groups(struct intel_gvt *gvt)
-{
- int i;
- struct attribute_group *group;
-
- for (i = 0; i < gvt->num_types; i++) {
- group = gvt_vgpu_type_groups[i];
- gvt_vgpu_type_groups[i] = NULL;
- kfree(group);
- }
+ BYTES_TO_MB(type->conf->low_mm),
+ BYTES_TO_MB(type->conf->high_mm),
+ type->conf->fence, vgpu_edid_str(type->conf->edid),
+ type->conf->weight);
}
static void gvt_unpin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
@@ -1546,7 +1445,28 @@ static const struct attribute_group *intel_vgpu_groups[] = {
NULL,
};
+static int intel_vgpu_init_dev(struct vfio_device *vfio_dev)
+{
+ struct mdev_device *mdev = to_mdev_device(vfio_dev->dev);
+ struct intel_vgpu *vgpu = vfio_dev_to_vgpu(vfio_dev);
+ struct intel_vgpu_type *type =
+ container_of(mdev->type, struct intel_vgpu_type, type);
+
+ vgpu->gvt = kdev_to_i915(mdev->type->parent->dev)->gvt;
+ return intel_gvt_create_vgpu(vgpu, type->conf);
+}
+
+static void intel_vgpu_release_dev(struct vfio_device *vfio_dev)
+{
+ struct intel_vgpu *vgpu = vfio_dev_to_vgpu(vfio_dev);
+
+ intel_gvt_destroy_vgpu(vgpu);
+ vfio_free_device(vfio_dev);
+}
+
static const struct vfio_device_ops intel_vgpu_dev_ops = {
+ .init = intel_vgpu_init_dev,
+ .release = intel_vgpu_release_dev,
.open_device = intel_vgpu_open_device,
.close_device = intel_vgpu_close_device,
.read = intel_vgpu_read,
@@ -1558,35 +1478,28 @@ static const struct vfio_device_ops intel_vgpu_dev_ops = {
static int intel_vgpu_probe(struct mdev_device *mdev)
{
- struct device *pdev = mdev_parent_dev(mdev);
- struct intel_gvt *gvt = kdev_to_i915(pdev)->gvt;
- struct intel_vgpu_type *type;
struct intel_vgpu *vgpu;
int ret;
- type = &gvt->types[mdev_get_type_group_id(mdev)];
- if (!type)
- return -EINVAL;
-
- vgpu = intel_gvt_create_vgpu(gvt, type);
+ vgpu = vfio_alloc_device(intel_vgpu, vfio_device, &mdev->dev,
+ &intel_vgpu_dev_ops);
if (IS_ERR(vgpu)) {
gvt_err("failed to create intel vgpu: %ld\n", PTR_ERR(vgpu));
return PTR_ERR(vgpu);
}
- vfio_init_group_dev(&vgpu->vfio_device, &mdev->dev,
- &intel_vgpu_dev_ops);
-
dev_set_drvdata(&mdev->dev, vgpu);
ret = vfio_register_emulated_iommu_dev(&vgpu->vfio_device);
- if (ret) {
- intel_gvt_destroy_vgpu(vgpu);
- return ret;
- }
+ if (ret)
+ goto out_put_vdev;
gvt_dbg_core("intel_vgpu_create succeeded for mdev: %s\n",
dev_name(mdev_dev(mdev)));
return 0;
+
+out_put_vdev:
+ vfio_put_device(&vgpu->vfio_device);
+ return ret;
}
static void intel_vgpu_remove(struct mdev_device *mdev)
@@ -1595,18 +1508,43 @@ static void intel_vgpu_remove(struct mdev_device *mdev)
if (WARN_ON_ONCE(vgpu->attached))
return;
- intel_gvt_destroy_vgpu(vgpu);
+
+ vfio_unregister_group_dev(&vgpu->vfio_device);
+ vfio_put_device(&vgpu->vfio_device);
+}
+
+static unsigned int intel_vgpu_get_available(struct mdev_type *mtype)
+{
+ struct intel_vgpu_type *type =
+ container_of(mtype, struct intel_vgpu_type, type);
+ struct intel_gvt *gvt = kdev_to_i915(mtype->parent->dev)->gvt;
+ unsigned int low_gm_avail, high_gm_avail, fence_avail;
+
+ mutex_lock(&gvt->lock);
+ low_gm_avail = gvt_aperture_sz(gvt) - HOST_LOW_GM_SIZE -
+ gvt->gm.vgpu_allocated_low_gm_size;
+ high_gm_avail = gvt_hidden_sz(gvt) - HOST_HIGH_GM_SIZE -
+ gvt->gm.vgpu_allocated_high_gm_size;
+ fence_avail = gvt_fence_sz(gvt) - HOST_FENCE -
+ gvt->fence.vgpu_allocated_fence_num;
+ mutex_unlock(&gvt->lock);
+
+ return min3(low_gm_avail / type->conf->low_mm,
+ high_gm_avail / type->conf->high_mm,
+ fence_avail / type->conf->fence);
}
static struct mdev_driver intel_vgpu_mdev_driver = {
+ .device_api = VFIO_DEVICE_API_PCI_STRING,
.driver = {
.name = "intel_vgpu_mdev",
.owner = THIS_MODULE,
.dev_groups = intel_vgpu_groups,
},
- .probe = intel_vgpu_probe,
- .remove = intel_vgpu_remove,
- .supported_type_groups = gvt_vgpu_type_groups,
+ .probe = intel_vgpu_probe,
+ .remove = intel_vgpu_remove,
+ .get_available = intel_vgpu_get_available,
+ .show_description = intel_vgpu_show_description,
};
int intel_gvt_page_track_add(struct intel_vgpu *info, u64 gfn)
@@ -1904,8 +1842,7 @@ static void intel_gvt_clean_device(struct drm_i915_private *i915)
if (drm_WARN_ON(&i915->drm, !gvt))
return;
- mdev_unregister_device(i915->drm.dev);
- intel_gvt_cleanup_vgpu_type_groups(gvt);
+ mdev_unregister_parent(&gvt->parent);
intel_gvt_destroy_idle_vgpu(gvt->idle_vgpu);
intel_gvt_clean_vgpu_types(gvt);
@@ -2005,19 +1942,15 @@ static int intel_gvt_init_device(struct drm_i915_private *i915)
intel_gvt_debugfs_init(gvt);
- ret = intel_gvt_init_vgpu_type_groups(gvt);
+ ret = mdev_register_parent(&gvt->parent, i915->drm.dev,
+ &intel_vgpu_mdev_driver,
+ gvt->mdev_types, gvt->num_types);
if (ret)
goto out_destroy_idle_vgpu;
- ret = mdev_register_device(i915->drm.dev, &intel_vgpu_mdev_driver);
- if (ret)
- goto out_cleanup_vgpu_type_groups;
-
gvt_dbg_core("gvt device initialization is done\n");
return 0;
-out_cleanup_vgpu_type_groups:
- intel_gvt_cleanup_vgpu_type_groups(gvt);
out_destroy_idle_vgpu:
intel_gvt_destroy_idle_vgpu(gvt->idle_vgpu);
intel_gvt_debugfs_clean(gvt);
diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c
index 5197e15cb78e..200c1162daa3 100644
--- a/drivers/gpu/drm/i915/gvt/mmio_context.c
+++ b/drivers/gpu/drm/i915/gvt/mmio_context.c
@@ -546,7 +546,7 @@ static void switch_mmio(struct intel_vgpu *pre,
}
/**
- * intel_gvt_switch_render_mmio - switch mmio context of specific engine
+ * intel_gvt_switch_mmio - switch mmio context of specific engine
* @pre: the last vGPU that own the engine
* @next: the vGPU to switch to
* @engine: the engine
diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c
index 46da19b3225d..56c71474008a 100644
--- a/drivers/gpu/drm/i915/gvt/vgpu.c
+++ b/drivers/gpu/drm/i915/gvt/vgpu.c
@@ -73,24 +73,21 @@ void populate_pvinfo_page(struct intel_vgpu *vgpu)
drm_WARN_ON(&i915->drm, sizeof(struct vgt_if) != VGT_PVINFO_SIZE);
}
+/*
+ * vGPU type name is defined as GVTg_Vx_y which contains the physical GPU
+ * generation type (e.g V4 as BDW server, V5 as SKL server).
+ *
+ * Depening on the physical SKU resource, we might see vGPU types like
+ * GVTg_V4_8, GVTg_V4_4, GVTg_V4_2, etc. We can create different types of
+ * vGPU on same physical GPU depending on available resource. Each vGPU
+ * type will have a different number of avail_instance to indicate how
+ * many vGPU instance can be created for this type.
+ */
#define VGPU_MAX_WEIGHT 16
#define VGPU_WEIGHT(vgpu_num) \
(VGPU_MAX_WEIGHT / (vgpu_num))
-static const struct {
- unsigned int low_mm;
- unsigned int high_mm;
- unsigned int fence;
-
- /* A vGPU with a weight of 8 will get twice as much GPU as a vGPU
- * with a weight of 4 on a contended host, different vGPU type has
- * different weight set. Legal weights range from 1 to 16.
- */
- unsigned int weight;
- enum intel_vgpu_edid edid;
- const char *name;
-} vgpu_types[] = {
-/* Fixed vGPU type table */
+static const struct intel_vgpu_config intel_vgpu_configs[] = {
{ MB_TO_BYTES(64), MB_TO_BYTES(384), 4, VGPU_WEIGHT(8), GVT_EDID_1024_768, "8" },
{ MB_TO_BYTES(128), MB_TO_BYTES(512), 4, VGPU_WEIGHT(4), GVT_EDID_1920_1200, "4" },
{ MB_TO_BYTES(256), MB_TO_BYTES(1024), 4, VGPU_WEIGHT(2), GVT_EDID_1920_1200, "2" },
@@ -106,102 +103,58 @@ static const struct {
*/
int intel_gvt_init_vgpu_types(struct intel_gvt *gvt)
{
- unsigned int num_types;
- unsigned int i, low_avail, high_avail;
- unsigned int min_low;
-
- /* vGPU type name is defined as GVTg_Vx_y which contains
- * physical GPU generation type (e.g V4 as BDW server, V5 as
- * SKL server).
- *
- * Depend on physical SKU resource, might see vGPU types like
- * GVTg_V4_8, GVTg_V4_4, GVTg_V4_2, etc. We can create
- * different types of vGPU on same physical GPU depending on
- * available resource. Each vGPU type will have "avail_instance"
- * to indicate how many vGPU instance can be created for this
- * type.
- *
- */
- low_avail = gvt_aperture_sz(gvt) - HOST_LOW_GM_SIZE;
- high_avail = gvt_hidden_sz(gvt) - HOST_HIGH_GM_SIZE;
- num_types = ARRAY_SIZE(vgpu_types);
+ unsigned int low_avail = gvt_aperture_sz(gvt) - HOST_LOW_GM_SIZE;
+ unsigned int high_avail = gvt_hidden_sz(gvt) - HOST_HIGH_GM_SIZE;
+ unsigned int num_types = ARRAY_SIZE(intel_vgpu_configs);
+ unsigned int i;
gvt->types = kcalloc(num_types, sizeof(struct intel_vgpu_type),
GFP_KERNEL);
if (!gvt->types)
return -ENOMEM;
- min_low = MB_TO_BYTES(32);
- for (i = 0; i < num_types; ++i) {
- if (low_avail / vgpu_types[i].low_mm == 0)
- break;
-
- gvt->types[i].low_gm_size = vgpu_types[i].low_mm;
- gvt->types[i].high_gm_size = vgpu_types[i].high_mm;
- gvt->types[i].fence = vgpu_types[i].fence;
+ gvt->mdev_types = kcalloc(num_types, sizeof(*gvt->mdev_types),
+ GFP_KERNEL);
+ if (!gvt->mdev_types)
+ goto out_free_types;
- if (vgpu_types[i].weight < 1 ||
- vgpu_types[i].weight > VGPU_MAX_WEIGHT)
- return -EINVAL;
+ for (i = 0; i < num_types; ++i) {
+ const struct intel_vgpu_config *conf = &intel_vgpu_configs[i];
- gvt->types[i].weight = vgpu_types[i].weight;
- gvt->types[i].resolution = vgpu_types[i].edid;
- gvt->types[i].avail_instance = min(low_avail / vgpu_types[i].low_mm,
- high_avail / vgpu_types[i].high_mm);
+ if (low_avail / conf->low_mm == 0)
+ break;
+ if (conf->weight < 1 || conf->weight > VGPU_MAX_WEIGHT)
+ goto out_free_mdev_types;
- if (GRAPHICS_VER(gvt->gt->i915) == 8)
- sprintf(gvt->types[i].name, "GVTg_V4_%s",
- vgpu_types[i].name);
- else if (GRAPHICS_VER(gvt->gt->i915) == 9)
- sprintf(gvt->types[i].name, "GVTg_V5_%s",
- vgpu_types[i].name);
+ sprintf(gvt->types[i].name, "GVTg_V%u_%s",
+ GRAPHICS_VER(gvt->gt->i915) == 8 ? 4 : 5, conf->name);
+ gvt->types[i].conf = conf;
gvt_dbg_core("type[%d]: %s avail %u low %u high %u fence %u weight %u res %s\n",
i, gvt->types[i].name,
- gvt->types[i].avail_instance,
- gvt->types[i].low_gm_size,
- gvt->types[i].high_gm_size, gvt->types[i].fence,
- gvt->types[i].weight,
- vgpu_edid_str(gvt->types[i].resolution));
+ min(low_avail / conf->low_mm,
+ high_avail / conf->high_mm),
+ conf->low_mm, conf->high_mm, conf->fence,
+ conf->weight, vgpu_edid_str(conf->edid));
+
+ gvt->mdev_types[i] = &gvt->types[i].type;
+ gvt->mdev_types[i]->sysfs_name = gvt->types[i].name;
}
gvt->num_types = i;
return 0;
-}
-void intel_gvt_clean_vgpu_types(struct intel_gvt *gvt)
-{
+out_free_mdev_types:
+ kfree(gvt->mdev_types);
+out_free_types:
kfree(gvt->types);
+ return -EINVAL;
}
-static void intel_gvt_update_vgpu_types(struct intel_gvt *gvt)
+void intel_gvt_clean_vgpu_types(struct intel_gvt *gvt)
{
- int i;
- unsigned int low_gm_avail, high_gm_avail, fence_avail;
- unsigned int low_gm_min, high_gm_min, fence_min;
-
- /* Need to depend on maxium hw resource size but keep on
- * static config for now.
- */
- low_gm_avail = gvt_aperture_sz(gvt) - HOST_LOW_GM_SIZE -
- gvt->gm.vgpu_allocated_low_gm_size;
- high_gm_avail = gvt_hidden_sz(gvt) - HOST_HIGH_GM_SIZE -
- gvt->gm.vgpu_allocated_high_gm_size;
- fence_avail = gvt_fence_sz(gvt) - HOST_FENCE -
- gvt->fence.vgpu_allocated_fence_num;
-
- for (i = 0; i < gvt->num_types; i++) {
- low_gm_min = low_gm_avail / gvt->types[i].low_gm_size;
- high_gm_min = high_gm_avail / gvt->types[i].high_gm_size;
- fence_min = fence_avail / gvt->types[i].fence;
- gvt->types[i].avail_instance = min(min(low_gm_min, high_gm_min),
- fence_min);
-
- gvt_dbg_core("update type[%d]: %s avail %u low %u high %u fence %u\n",
- i, gvt->types[i].name,
- gvt->types[i].avail_instance, gvt->types[i].low_gm_size,
- gvt->types[i].high_gm_size, gvt->types[i].fence);
- }
+ kfree(gvt->mdev_types);
+ kfree(gvt->types);
}
/**
@@ -298,12 +251,6 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
intel_vgpu_clean_mmio(vgpu);
intel_vgpu_dmabuf_cleanup(vgpu);
mutex_unlock(&vgpu->vgpu_lock);
-
- mutex_lock(&gvt->lock);
- intel_gvt_update_vgpu_types(gvt);
- mutex_unlock(&gvt->lock);
-
- vfree(vgpu);
}
#define IDLE_VGPU_IDR 0
@@ -363,42 +310,38 @@ void intel_gvt_destroy_idle_vgpu(struct intel_vgpu *vgpu)
vfree(vgpu);
}
-static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
- struct intel_vgpu_creation_params *param)
+int intel_gvt_create_vgpu(struct intel_vgpu *vgpu,
+ const struct intel_vgpu_config *conf)
{
+ struct intel_gvt *gvt = vgpu->gvt;
struct drm_i915_private *dev_priv = gvt->gt->i915;
- struct intel_vgpu *vgpu;
int ret;
- gvt_dbg_core("low %llu MB high %llu MB fence %llu\n",
- param->low_gm_sz, param->high_gm_sz,
- param->fence_sz);
-
- vgpu = vzalloc(sizeof(*vgpu));
- if (!vgpu)
- return ERR_PTR(-ENOMEM);
+ gvt_dbg_core("low %u MB high %u MB fence %u\n",
+ BYTES_TO_MB(conf->low_mm), BYTES_TO_MB(conf->high_mm),
+ conf->fence);
+ mutex_lock(&gvt->lock);
ret = idr_alloc(&gvt->vgpu_idr, vgpu, IDLE_VGPU_IDR + 1, GVT_MAX_VGPU,
GFP_KERNEL);
if (ret < 0)
- goto out_free_vgpu;
+ goto out_unlock;;
vgpu->id = ret;
- vgpu->gvt = gvt;
- vgpu->sched_ctl.weight = param->weight;
+ vgpu->sched_ctl.weight = conf->weight;
mutex_init(&vgpu->vgpu_lock);
mutex_init(&vgpu->dmabuf_lock);
INIT_LIST_HEAD(&vgpu->dmabuf_obj_list_head);
INIT_RADIX_TREE(&vgpu->page_track_tree, GFP_KERNEL);
idr_init_base(&vgpu->object_idr, 1);
- intel_vgpu_init_cfg_space(vgpu, param->primary);
+ intel_vgpu_init_cfg_space(vgpu, 1);
vgpu->d3_entered = false;
ret = intel_vgpu_init_mmio(vgpu);
if (ret)
goto out_clean_idr;
- ret = intel_vgpu_alloc_resource(vgpu, param);
+ ret = intel_vgpu_alloc_resource(vgpu, conf);
if (ret)
goto out_clean_vgpu_mmio;
@@ -412,7 +355,7 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
if (ret)
goto out_clean_gtt;
- ret = intel_vgpu_init_display(vgpu, param->resolution);
+ ret = intel_vgpu_init_display(vgpu, conf->edid);
if (ret)
goto out_clean_opregion;
@@ -437,7 +380,9 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
if (ret)
goto out_clean_sched_policy;
- return vgpu;
+ intel_gvt_update_reg_whitelist(vgpu);
+ mutex_unlock(&gvt->lock);
+ return 0;
out_clean_sched_policy:
intel_vgpu_clean_sched_policy(vgpu);
@@ -455,48 +400,9 @@ out_clean_vgpu_mmio:
intel_vgpu_clean_mmio(vgpu);
out_clean_idr:
idr_remove(&gvt->vgpu_idr, vgpu->id);
-out_free_vgpu:
- vfree(vgpu);
- return ERR_PTR(ret);
-}
-
-/**
- * intel_gvt_create_vgpu - create a virtual GPU
- * @gvt: GVT device
- * @type: type of the vGPU to create
- *
- * This function is called when user wants to create a virtual GPU.
- *
- * Returns:
- * pointer to intel_vgpu, error pointer if failed.
- */
-struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
- struct intel_vgpu_type *type)
-{
- struct intel_vgpu_creation_params param;
- struct intel_vgpu *vgpu;
-
- param.primary = 1;
- param.low_gm_sz = type->low_gm_size;
- param.high_gm_sz = type->high_gm_size;
- param.fence_sz = type->fence;
- param.weight = type->weight;
- param.resolution = type->resolution;
-
- /* XXX current param based on MB */
- param.low_gm_sz = BYTES_TO_MB(param.low_gm_sz);
- param.high_gm_sz = BYTES_TO_MB(param.high_gm_sz);
-
- mutex_lock(&gvt->lock);
- vgpu = __intel_gvt_create_vgpu(gvt, &param);
- if (!IS_ERR(vgpu)) {
- /* calculate left instance change for types */
- intel_gvt_update_vgpu_types(gvt);
- intel_gvt_update_reg_whitelist(vgpu);
- }
+out_unlock:
mutex_unlock(&gvt->lock);
-
- return vgpu;
+ return ret;
}
/**
diff --git a/drivers/gpu/drm/i915/i915_driver.c b/drivers/gpu/drm/i915/i915_driver.c
index ae0414037625..c3d43f9b1e45 100644
--- a/drivers/gpu/drm/i915/i915_driver.c
+++ b/drivers/gpu/drm/i915/i915_driver.c
@@ -338,7 +338,8 @@ static int i915_driver_early_probe(struct drm_i915_private *dev_priv)
if (i915_inject_probe_failure(dev_priv))
return -ENODEV;
- intel_device_info_subplatform_init(dev_priv);
+ intel_device_info_runtime_init_early(dev_priv);
+
intel_step_init(dev_priv);
intel_uncore_mmio_debug_init_early(dev_priv);
@@ -354,6 +355,7 @@ static int i915_driver_early_probe(struct drm_i915_private *dev_priv)
mutex_init(&dev_priv->display.wm.wm_mutex);
mutex_init(&dev_priv->display.pps.mutex);
mutex_init(&dev_priv->display.hdcp.comp_mutex);
+ spin_lock_init(&dev_priv->display.dkl.phy_lock);
i915_memcpy_init_early(dev_priv);
intel_runtime_pm_init_early(&dev_priv->runtime_pm);
@@ -739,7 +741,6 @@ static void i915_driver_hw_remove(struct drm_i915_private *dev_priv)
*/
static void i915_driver_register(struct drm_i915_private *dev_priv)
{
- struct drm_device *dev = &dev_priv->drm;
struct intel_gt *gt;
unsigned int i;
@@ -749,7 +750,7 @@ static void i915_driver_register(struct drm_i915_private *dev_priv)
intel_vgpu_register(dev_priv);
/* Reveal our presence to userspace */
- if (drm_dev_register(dev, 0)) {
+ if (drm_dev_register(&dev_priv->drm, 0)) {
drm_err(&dev_priv->drm,
"Failed to register driver for userspace access!\n");
return;
@@ -898,10 +899,6 @@ int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (IS_ERR(i915))
return PTR_ERR(i915);
- /* Disable nuclear pageflip by default on pre-ILK */
- if (!i915->params.nuclear_pageflip && DISPLAY_VER(i915) < 5)
- i915->drm.driver_features &= ~DRIVER_ATOMIC;
-
ret = pci_enable_device(pdev);
if (ret)
goto out_fini;
@@ -1097,32 +1094,30 @@ static void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
static void intel_suspend_encoders(struct drm_i915_private *dev_priv)
{
- struct drm_device *dev = &dev_priv->drm;
struct intel_encoder *encoder;
if (!HAS_DISPLAY(dev_priv))
return;
- drm_modeset_lock_all(dev);
- for_each_intel_encoder(dev, encoder)
+ drm_modeset_lock_all(&dev_priv->drm);
+ for_each_intel_encoder(&dev_priv->drm, encoder)
if (encoder->suspend)
encoder->suspend(encoder);
- drm_modeset_unlock_all(dev);
+ drm_modeset_unlock_all(&dev_priv->drm);
}
static void intel_shutdown_encoders(struct drm_i915_private *dev_priv)
{
- struct drm_device *dev = &dev_priv->drm;
struct intel_encoder *encoder;
if (!HAS_DISPLAY(dev_priv))
return;
- drm_modeset_lock_all(dev);
- for_each_intel_encoder(dev, encoder)
+ drm_modeset_lock_all(&dev_priv->drm);
+ for_each_intel_encoder(&dev_priv->drm, encoder)
if (encoder->shutdown)
encoder->shutdown(encoder);
- drm_modeset_unlock_all(dev);
+ drm_modeset_unlock_all(&dev_priv->drm);
}
void i915_driver_shutdown(struct drm_i915_private *i915)
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index b1c4e924d883..05b3300cc4ed 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -74,9 +74,6 @@ struct intel_limit;
struct intel_overlay_error_state;
struct vlv_s0ix_state;
-/* Threshold == 5 for long IRQs, 50 for short */
-#define HPD_STORM_DEFAULT_THRESHOLD 50
-
#define I915_GEM_GPU_DOMAINS \
(I915_GEM_DOMAIN_RENDER | \
I915_GEM_DOMAIN_SAMPLER | \
@@ -872,6 +869,7 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define HAS_DOUBLE_BUFFERED_M_N(dev_priv) (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
#define HAS_CDCLK_CRAWL(dev_priv) (INTEL_INFO(dev_priv)->display.has_cdclk_crawl)
+#define HAS_CDCLK_SQUASH(dev_priv) (INTEL_INFO(dev_priv)->display.has_cdclk_squash)
#define HAS_DDI(dev_priv) (INTEL_INFO(dev_priv)->display.has_ddi)
#define HAS_FPGA_DBG_UNCLAIMED(dev_priv) (INTEL_INFO(dev_priv)->display.has_fpga_dbg)
#define HAS_PSR(dev_priv) (INTEL_INFO(dev_priv)->display.has_psr)
@@ -937,6 +935,8 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define HAS_GMCH(dev_priv) (INTEL_INFO(dev_priv)->display.has_gmch)
+#define HAS_GMD_ID(i915) (INTEL_INFO(i915)->has_gmd_id)
+
#define HAS_LSPCON(dev_priv) (IS_DISPLAY_VER(dev_priv, 9, 10))
#define HAS_L3_CCS_READ(i915) (INTEL_INFO(i915)->has_l3_ccs_read)
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 329ff75b80b9..7bd1861ddbdf 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -137,12 +137,12 @@ static u64 random_offset(u64 start, u64 end, u64 len, u64 align)
range = round_down(end - len, align) - round_up(start, align);
if (range) {
if (sizeof(unsigned long) == sizeof(u64)) {
- addr = get_random_long();
+ addr = get_random_u64();
} else {
- addr = get_random_int();
+ addr = get_random_u32();
if (range > U32_MAX) {
addr <<= 32;
- addr |= get_random_int();
+ addr |= get_random_u32();
}
}
div64_u64_rem(addr, range, &addr);
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 86a42d9e8041..d68859866bf2 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -325,15 +325,10 @@ i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv,
u32 mask,
u32 bits)
{
- u32 val;
-
lockdep_assert_held(&dev_priv->irq_lock);
drm_WARN_ON(&dev_priv->drm, bits & ~mask);
- val = intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_EN);
- val &= ~mask;
- val |= bits;
- intel_uncore_write(&dev_priv->uncore, PORT_HOTPLUG_EN, val);
+ intel_uncore_rmw(&dev_priv->uncore, PORT_HOTPLUG_EN, mask, bits);
}
/**
@@ -1057,8 +1052,8 @@ static void ivb_parity_work(struct work_struct *work)
if (drm_WARN_ON(&dev_priv->drm, !dev_priv->l3_parity.which_slice))
goto out;
- misccpctl = intel_uncore_read(&dev_priv->uncore, GEN7_MISCCPCTL);
- intel_uncore_write(&dev_priv->uncore, GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
+ misccpctl = intel_uncore_rmw(&dev_priv->uncore, GEN7_MISCCPCTL,
+ GEN7_DOP_CLOCK_GATE_ENABLE, 0);
intel_uncore_posting_read(&dev_priv->uncore, GEN7_MISCCPCTL);
while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
@@ -1689,8 +1684,7 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
* bits this time around.
*/
intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, 0);
- ier = intel_uncore_read(&dev_priv->uncore, VLV_IER);
- intel_uncore_write(&dev_priv->uncore, VLV_IER, 0);
+ ier = intel_uncore_rmw(&dev_priv->uncore, VLV_IER, ~0, 0);
if (gt_iir)
intel_uncore_write(&dev_priv->uncore, GTIIR, gt_iir);
@@ -1775,8 +1769,7 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
* bits this time around.
*/
intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, 0);
- ier = intel_uncore_read(&dev_priv->uncore, VLV_IER);
- intel_uncore_write(&dev_priv->uncore, VLV_IER, 0);
+ ier = intel_uncore_rmw(&dev_priv->uncore, VLV_IER, ~0, 0);
gen8_gt_irq_handler(to_gt(dev_priv), master_ctl);
@@ -1981,8 +1974,7 @@ static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
if (ddi_hotplug_trigger) {
u32 dig_hotplug_reg;
- dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, SHOTPLUG_CTL_DDI);
- intel_uncore_write(&dev_priv->uncore, SHOTPLUG_CTL_DDI, dig_hotplug_reg);
+ dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, SHOTPLUG_CTL_DDI, 0, 0);
intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
ddi_hotplug_trigger, dig_hotplug_reg,
@@ -1993,8 +1985,7 @@ static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
if (tc_hotplug_trigger) {
u32 dig_hotplug_reg;
- dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, SHOTPLUG_CTL_TC);
- intel_uncore_write(&dev_priv->uncore, SHOTPLUG_CTL_TC, dig_hotplug_reg);
+ dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, SHOTPLUG_CTL_TC, 0, 0);
intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
tc_hotplug_trigger, dig_hotplug_reg,
@@ -2019,8 +2010,7 @@ static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
if (hotplug_trigger) {
u32 dig_hotplug_reg;
- dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG);
- intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG, dig_hotplug_reg);
+ dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG, 0, 0);
intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
hotplug_trigger, dig_hotplug_reg,
@@ -2031,8 +2021,7 @@ static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
if (hotplug2_trigger) {
u32 dig_hotplug_reg;
- dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG2);
- intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG2, dig_hotplug_reg);
+ dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG2, 0, 0);
intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
hotplug2_trigger, dig_hotplug_reg,
@@ -2052,8 +2041,7 @@ static void ilk_hpd_irq_handler(struct drm_i915_private *dev_priv,
{
u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
- dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, DIGITAL_PORT_HOTPLUG_CNTRL);
- intel_uncore_write(&dev_priv->uncore, DIGITAL_PORT_HOTPLUG_CNTRL, dig_hotplug_reg);
+ dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, DIGITAL_PORT_HOTPLUG_CNTRL, 0, 0);
intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
hotplug_trigger, dig_hotplug_reg,
@@ -2232,8 +2220,7 @@ static void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv,
{
u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
- dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG);
- intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG, dig_hotplug_reg);
+ dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG, 0, 0);
intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
hotplug_trigger, dig_hotplug_reg,
@@ -2252,8 +2239,7 @@ static void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
if (trigger_tc) {
u32 dig_hotplug_reg;
- dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, GEN11_TC_HOTPLUG_CTL);
- intel_uncore_write(&dev_priv->uncore, GEN11_TC_HOTPLUG_CTL, dig_hotplug_reg);
+ dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, GEN11_TC_HOTPLUG_CTL, 0, 0);
intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
trigger_tc, dig_hotplug_reg,
@@ -2264,8 +2250,7 @@ static void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
if (trigger_tbt) {
u32 dig_hotplug_reg;
- dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, GEN11_TBT_HOTPLUG_CTL);
- intel_uncore_write(&dev_priv->uncore, GEN11_TBT_HOTPLUG_CTL, dig_hotplug_reg);
+ dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, GEN11_TBT_HOTPLUG_CTL, 0, 0);
intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
trigger_tbt, dig_hotplug_reg,
@@ -2355,8 +2340,7 @@ gen8_de_misc_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
else
iir_reg = EDP_PSR_IIR;
- psr_iir = intel_uncore_read(&dev_priv->uncore, iir_reg);
- intel_uncore_write(&dev_priv->uncore, iir_reg, psr_iir);
+ psr_iir = intel_uncore_rmw(&dev_priv->uncore, iir_reg, 0, 0);
if (psr_iir)
found = true;
@@ -2426,8 +2410,7 @@ static void gen11_dsi_te_interrupt_handler(struct drm_i915_private *dev_priv,
/* clear TE in dsi IIR */
port = (te_trigger & DSI1_TE) ? PORT_B : PORT_A;
- tmp = intel_uncore_read(&dev_priv->uncore, DSI_INTR_IDENT_REG(port));
- intel_uncore_write(&dev_priv->uncore, DSI_INTR_IDENT_REG(port), tmp);
+ tmp = intel_uncore_rmw(&dev_priv->uncore, DSI_INTR_IDENT_REG(port), 0, 0);
}
static u32 gen8_de_pipe_flip_done_mask(struct drm_i915_private *i915)
@@ -2884,7 +2867,6 @@ static bool gen11_dsi_configure_te(struct intel_crtc *intel_crtc,
{
struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
enum port port;
- u32 tmp;
if (!(intel_crtc->mode_flags &
(I915_MODE_FLAG_DSI_USE_TE1 | I915_MODE_FLAG_DSI_USE_TE0)))
@@ -2896,16 +2878,10 @@ static bool gen11_dsi_configure_te(struct intel_crtc *intel_crtc,
else
port = PORT_A;
- tmp = intel_uncore_read(&dev_priv->uncore, DSI_INTR_MASK_REG(port));
- if (enable)
- tmp &= ~DSI_TE_EVENT;
- else
- tmp |= DSI_TE_EVENT;
-
- intel_uncore_write(&dev_priv->uncore, DSI_INTR_MASK_REG(port), tmp);
+ intel_uncore_rmw(&dev_priv->uncore, DSI_INTR_MASK_REG(port), DSI_TE_EVENT,
+ enable ? 0 : DSI_TE_EVENT);
- tmp = intel_uncore_read(&dev_priv->uncore, DSI_INTR_IDENT_REG(port));
- intel_uncore_write(&dev_priv->uncore, DSI_INTR_IDENT_REG(port), tmp);
+ intel_uncore_rmw(&dev_priv->uncore, DSI_INTR_IDENT_REG(port), 0, 0);
return true;
}
@@ -3020,7 +2996,7 @@ static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK_VLV);
i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0);
- intel_uncore_write(uncore, PORT_HOTPLUG_STAT, intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_STAT));
+ intel_uncore_rmw(uncore, PORT_HOTPLUG_STAT, 0, 0);
i9xx_pipestat_irq_reset(dev_priv);
@@ -3118,7 +3094,7 @@ static void gen8_irq_reset(struct drm_i915_private *dev_priv)
{
struct intel_uncore *uncore = &dev_priv->uncore;
- gen8_master_intr_disable(dev_priv->uncore.regs);
+ gen8_master_intr_disable(uncore->regs);
gen8_gt_irq_reset(to_gt(dev_priv));
gen8_display_irq_reset(dev_priv);
@@ -3250,7 +3226,7 @@ static void cherryview_irq_reset(struct drm_i915_private *dev_priv)
{
struct intel_uncore *uncore = &dev_priv->uncore;
- intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, 0);
+ intel_uncore_write(uncore, GEN8_MASTER_IRQ, 0);
intel_uncore_posting_read(&dev_priv->uncore, GEN8_MASTER_IRQ);
gen8_gt_irq_reset(to_gt(dev_priv));
@@ -3290,23 +3266,20 @@ static u32 ibx_hotplug_enables(struct drm_i915_private *i915,
static void ibx_hpd_detection_setup(struct drm_i915_private *dev_priv)
{
- u32 hotplug;
-
/*
* Enable digital hotplug on the PCH, and configure the DP short pulse
* duration to 2ms (which is the minimum in the Display Port spec).
* The pulse duration bits are reserved on LPT+.
*/
- hotplug = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG);
- hotplug &= ~(PORTA_HOTPLUG_ENABLE |
- PORTB_HOTPLUG_ENABLE |
- PORTC_HOTPLUG_ENABLE |
- PORTD_HOTPLUG_ENABLE |
- PORTB_PULSE_DURATION_MASK |
- PORTC_PULSE_DURATION_MASK |
- PORTD_PULSE_DURATION_MASK);
- hotplug |= intel_hpd_hotplug_enables(dev_priv, ibx_hotplug_enables);
- intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG, hotplug);
+ intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG,
+ PORTA_HOTPLUG_ENABLE |
+ PORTB_HOTPLUG_ENABLE |
+ PORTC_HOTPLUG_ENABLE |
+ PORTD_HOTPLUG_ENABLE |
+ PORTB_PULSE_DURATION_MASK |
+ PORTC_PULSE_DURATION_MASK |
+ PORTD_PULSE_DURATION_MASK,
+ intel_hpd_hotplug_enables(dev_priv, ibx_hotplug_enables));
}
static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv)
@@ -3353,30 +3326,24 @@ static u32 icp_tc_hotplug_enables(struct drm_i915_private *i915,
static void icp_ddi_hpd_detection_setup(struct drm_i915_private *dev_priv)
{
- u32 hotplug;
-
- hotplug = intel_uncore_read(&dev_priv->uncore, SHOTPLUG_CTL_DDI);
- hotplug &= ~(SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_A) |
- SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_B) |
- SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_C) |
- SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_D));
- hotplug |= intel_hpd_hotplug_enables(dev_priv, icp_ddi_hotplug_enables);
- intel_uncore_write(&dev_priv->uncore, SHOTPLUG_CTL_DDI, hotplug);
+ intel_uncore_rmw(&dev_priv->uncore, SHOTPLUG_CTL_DDI,
+ SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_A) |
+ SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_B) |
+ SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_C) |
+ SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_D),
+ intel_hpd_hotplug_enables(dev_priv, icp_ddi_hotplug_enables));
}
static void icp_tc_hpd_detection_setup(struct drm_i915_private *dev_priv)
{
- u32 hotplug;
-
- hotplug = intel_uncore_read(&dev_priv->uncore, SHOTPLUG_CTL_TC);
- hotplug &= ~(ICP_TC_HPD_ENABLE(HPD_PORT_TC1) |
- ICP_TC_HPD_ENABLE(HPD_PORT_TC2) |
- ICP_TC_HPD_ENABLE(HPD_PORT_TC3) |
- ICP_TC_HPD_ENABLE(HPD_PORT_TC4) |
- ICP_TC_HPD_ENABLE(HPD_PORT_TC5) |
- ICP_TC_HPD_ENABLE(HPD_PORT_TC6));
- hotplug |= intel_hpd_hotplug_enables(dev_priv, icp_tc_hotplug_enables);
- intel_uncore_write(&dev_priv->uncore, SHOTPLUG_CTL_TC, hotplug);
+ intel_uncore_rmw(&dev_priv->uncore, SHOTPLUG_CTL_TC,
+ ICP_TC_HPD_ENABLE(HPD_PORT_TC1) |
+ ICP_TC_HPD_ENABLE(HPD_PORT_TC2) |
+ ICP_TC_HPD_ENABLE(HPD_PORT_TC3) |
+ ICP_TC_HPD_ENABLE(HPD_PORT_TC4) |
+ ICP_TC_HPD_ENABLE(HPD_PORT_TC5) |
+ ICP_TC_HPD_ENABLE(HPD_PORT_TC6),
+ intel_hpd_hotplug_enables(dev_priv, icp_tc_hotplug_enables));
}
static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv)
@@ -3411,62 +3378,54 @@ static u32 gen11_hotplug_enables(struct drm_i915_private *i915,
}
}
-static void dg1_hpd_irq_setup(struct drm_i915_private *dev_priv)
+static void dg1_hpd_invert(struct drm_i915_private *i915)
{
- u32 val;
-
- val = intel_uncore_read(&dev_priv->uncore, SOUTH_CHICKEN1);
- val |= (INVERT_DDIA_HPD |
- INVERT_DDIB_HPD |
- INVERT_DDIC_HPD |
- INVERT_DDID_HPD);
- intel_uncore_write(&dev_priv->uncore, SOUTH_CHICKEN1, val);
+ u32 val = (INVERT_DDIA_HPD |
+ INVERT_DDIB_HPD |
+ INVERT_DDIC_HPD |
+ INVERT_DDID_HPD);
+ intel_uncore_rmw(&i915->uncore, SOUTH_CHICKEN1, 0, val);
+}
+static void dg1_hpd_irq_setup(struct drm_i915_private *dev_priv)
+{
+ dg1_hpd_invert(dev_priv);
icp_hpd_irq_setup(dev_priv);
}
static void gen11_tc_hpd_detection_setup(struct drm_i915_private *dev_priv)
{
- u32 hotplug;
-
- hotplug = intel_uncore_read(&dev_priv->uncore, GEN11_TC_HOTPLUG_CTL);
- hotplug &= ~(GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC1) |
- GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC2) |
- GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC3) |
- GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC4) |
- GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC5) |
- GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC6));
- hotplug |= intel_hpd_hotplug_enables(dev_priv, gen11_hotplug_enables);
- intel_uncore_write(&dev_priv->uncore, GEN11_TC_HOTPLUG_CTL, hotplug);
+ intel_uncore_rmw(&dev_priv->uncore, GEN11_TC_HOTPLUG_CTL,
+ GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC1) |
+ GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC2) |
+ GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC3) |
+ GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC4) |
+ GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC5) |
+ GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC6),
+ intel_hpd_hotplug_enables(dev_priv, gen11_hotplug_enables));
}
static void gen11_tbt_hpd_detection_setup(struct drm_i915_private *dev_priv)
{
- u32 hotplug;
-
- hotplug = intel_uncore_read(&dev_priv->uncore, GEN11_TBT_HOTPLUG_CTL);
- hotplug &= ~(GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC1) |
- GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC2) |
- GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC3) |
- GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC4) |
- GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC5) |
- GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC6));
- hotplug |= intel_hpd_hotplug_enables(dev_priv, gen11_hotplug_enables);
- intel_uncore_write(&dev_priv->uncore, GEN11_TBT_HOTPLUG_CTL, hotplug);
+ intel_uncore_rmw(&dev_priv->uncore, GEN11_TBT_HOTPLUG_CTL,
+ GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC1) |
+ GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC2) |
+ GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC3) |
+ GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC4) |
+ GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC5) |
+ GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC6),
+ intel_hpd_hotplug_enables(dev_priv, gen11_hotplug_enables));
}
static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv)
{
u32 hotplug_irqs, enabled_irqs;
- u32 val;
enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.hpd);
hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.hpd);
- val = intel_uncore_read(&dev_priv->uncore, GEN11_DE_HPD_IMR);
- val &= ~hotplug_irqs;
- val |= ~enabled_irqs & hotplug_irqs;
- intel_uncore_write(&dev_priv->uncore, GEN11_DE_HPD_IMR, val);
+ intel_uncore_rmw(&dev_priv->uncore, GEN11_DE_HPD_IMR, hotplug_irqs,
+ ~enabled_irqs & hotplug_irqs);
intel_uncore_posting_read(&dev_priv->uncore, GEN11_DE_HPD_IMR);
gen11_tc_hpd_detection_setup(dev_priv);
@@ -3506,29 +3465,22 @@ static u32 spt_hotplug2_enables(struct drm_i915_private *i915,
static void spt_hpd_detection_setup(struct drm_i915_private *dev_priv)
{
- u32 val, hotplug;
-
/* Display WA #1179 WaHardHangonHotPlug: cnp */
if (HAS_PCH_CNP(dev_priv)) {
- val = intel_uncore_read(&dev_priv->uncore, SOUTH_CHICKEN1);
- val &= ~CHASSIS_CLK_REQ_DURATION_MASK;
- val |= CHASSIS_CLK_REQ_DURATION(0xf);
- intel_uncore_write(&dev_priv->uncore, SOUTH_CHICKEN1, val);
+ intel_uncore_rmw(&dev_priv->uncore, SOUTH_CHICKEN1, CHASSIS_CLK_REQ_DURATION_MASK,
+ CHASSIS_CLK_REQ_DURATION(0xf));
}
/* Enable digital hotplug on the PCH */
- hotplug = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG);
- hotplug &= ~(PORTA_HOTPLUG_ENABLE |
- PORTB_HOTPLUG_ENABLE |
- PORTC_HOTPLUG_ENABLE |
- PORTD_HOTPLUG_ENABLE);
- hotplug |= intel_hpd_hotplug_enables(dev_priv, spt_hotplug_enables);
- intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG, hotplug);
+ intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG,
+ PORTA_HOTPLUG_ENABLE |
+ PORTB_HOTPLUG_ENABLE |
+ PORTC_HOTPLUG_ENABLE |
+ PORTD_HOTPLUG_ENABLE,
+ intel_hpd_hotplug_enables(dev_priv, spt_hotplug_enables));
- hotplug = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG2);
- hotplug &= ~PORTE_HOTPLUG_ENABLE;
- hotplug |= intel_hpd_hotplug_enables(dev_priv, spt_hotplug2_enables);
- intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG2, hotplug);
+ intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG2, PORTE_HOTPLUG_ENABLE,
+ intel_hpd_hotplug_enables(dev_priv, spt_hotplug2_enables));
}
static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv)
@@ -3560,18 +3512,14 @@ static u32 ilk_hotplug_enables(struct drm_i915_private *i915,
static void ilk_hpd_detection_setup(struct drm_i915_private *dev_priv)
{
- u32 hotplug;
-
/*
* Enable digital hotplug on the CPU, and configure the DP short pulse
* duration to 2ms (which is the minimum in the Display Port spec)
* The pulse duration bits are reserved on HSW+.
*/
- hotplug = intel_uncore_read(&dev_priv->uncore, DIGITAL_PORT_HOTPLUG_CNTRL);
- hotplug &= ~(DIGITAL_PORTA_HOTPLUG_ENABLE |
- DIGITAL_PORTA_PULSE_DURATION_MASK);
- hotplug |= intel_hpd_hotplug_enables(dev_priv, ilk_hotplug_enables);
- intel_uncore_write(&dev_priv->uncore, DIGITAL_PORT_HOTPLUG_CNTRL, hotplug);
+ intel_uncore_rmw(&dev_priv->uncore, DIGITAL_PORT_HOTPLUG_CNTRL,
+ DIGITAL_PORTA_HOTPLUG_ENABLE | DIGITAL_PORTA_PULSE_DURATION_MASK,
+ intel_hpd_hotplug_enables(dev_priv, ilk_hotplug_enables));
}
static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv)
@@ -3619,17 +3567,12 @@ static u32 bxt_hotplug_enables(struct drm_i915_private *i915,
static void bxt_hpd_detection_setup(struct drm_i915_private *dev_priv)
{
- u32 hotplug;
-
- hotplug = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG);
- hotplug &= ~(PORTA_HOTPLUG_ENABLE |
- PORTB_HOTPLUG_ENABLE |
- PORTC_HOTPLUG_ENABLE |
- BXT_DDIA_HPD_INVERT |
- BXT_DDIB_HPD_INVERT |
- BXT_DDIC_HPD_INVERT);
- hotplug |= intel_hpd_hotplug_enables(dev_priv, bxt_hotplug_enables);
- intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG, hotplug);
+ intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG,
+ PORTA_HOTPLUG_ENABLE |
+ PORTB_HOTPLUG_ENABLE |
+ PORTC_HOTPLUG_ENABLE |
+ BXT_DDI_HPD_INVERT_MASK,
+ intel_hpd_hotplug_enables(dev_priv, bxt_hotplug_enables));
}
static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv)
@@ -4009,9 +3952,7 @@ static void i9xx_error_irq_ack(struct drm_i915_private *dev_priv,
{
u32 emr;
- *eir = intel_uncore_read(&dev_priv->uncore, EIR);
-
- intel_uncore_write(&dev_priv->uncore, EIR, *eir);
+ *eir = intel_uncore_rmw(&dev_priv->uncore, EIR, 0, 0);
*eir_stuck = intel_uncore_read(&dev_priv->uncore, EIR);
if (*eir_stuck == 0)
@@ -4027,8 +3968,7 @@ static void i9xx_error_irq_ack(struct drm_i915_private *dev_priv,
* (or by a GPU reset) so we mask any bit that
* remains set.
*/
- emr = intel_uncore_read(&dev_priv->uncore, EMR);
- intel_uncore_write(&dev_priv->uncore, EMR, 0xffffffff);
+ emr = intel_uncore_rmw(&dev_priv->uncore, EMR, ~0, 0xffffffff);
intel_uncore_write(&dev_priv->uncore, EMR, emr | *eir_stuck);
}
@@ -4095,7 +4035,7 @@ static void i915_irq_reset(struct drm_i915_private *dev_priv)
if (I915_HAS_HOTPLUG(dev_priv)) {
i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
- intel_uncore_write(&dev_priv->uncore, PORT_HOTPLUG_STAT, intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_STAT));
+ intel_uncore_rmw(&dev_priv->uncore, PORT_HOTPLUG_STAT, 0, 0);
}
i9xx_pipestat_irq_reset(dev_priv);
@@ -4109,8 +4049,8 @@ static void i915_irq_postinstall(struct drm_i915_private *dev_priv)
struct intel_uncore *uncore = &dev_priv->uncore;
u32 enable_mask;
- intel_uncore_write(&dev_priv->uncore, EMR, ~(I915_ERROR_PAGE_TABLE |
- I915_ERROR_MEMORY_REFRESH));
+ intel_uncore_write(uncore, EMR, ~(I915_ERROR_PAGE_TABLE |
+ I915_ERROR_MEMORY_REFRESH));
/* Unmask the interrupts that we always want on. */
dev_priv->irq_mask =
@@ -4205,7 +4145,7 @@ static void i965_irq_reset(struct drm_i915_private *dev_priv)
struct intel_uncore *uncore = &dev_priv->uncore;
i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
- intel_uncore_write(&dev_priv->uncore, PORT_HOTPLUG_STAT, intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_STAT));
+ intel_uncore_rmw(uncore, PORT_HOTPLUG_STAT, 0, 0);
i9xx_pipestat_irq_reset(dev_priv);
@@ -4232,7 +4172,7 @@ static void i965_irq_postinstall(struct drm_i915_private *dev_priv)
error_mask = ~(I915_ERROR_PAGE_TABLE |
I915_ERROR_MEMORY_REFRESH);
}
- intel_uncore_write(&dev_priv->uncore, EMR, error_mask);
+ intel_uncore_write(uncore, EMR, error_mask);
/* Unmask the interrupts that we always want on. */
dev_priv->irq_mask =
@@ -4383,7 +4323,6 @@ void intel_hpd_irq_setup(struct drm_i915_private *i915)
*/
void intel_irq_init(struct drm_i915_private *dev_priv)
{
- struct drm_device *dev = &dev_priv->drm;
int i;
INIT_WORK(&dev_priv->l3_parity.error_work, ivb_parity_work);
@@ -4399,9 +4338,9 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
intel_hpd_init_pins(dev_priv);
- intel_hpd_init_work(dev_priv);
+ intel_hpd_init_early(dev_priv);
- dev->vblank_disable_immediate = true;
+ dev_priv->drm.vblank_disable_immediate = true;
/* Most platforms treat the display irq block as an always-on
* power domain. vlv/chv can disable it at runtime and need
@@ -4413,15 +4352,6 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
dev_priv->display_irqs_enabled = false;
- dev_priv->display.hotplug.hpd_storm_threshold = HPD_STORM_DEFAULT_THRESHOLD;
- /* If we have MST support, we want to avoid doing short HPD IRQ storm
- * detection, as short HPD storms will occur as a natural part of
- * sideband messaging with MST.
- * On older platforms however, IRQ storms can occur with both long and
- * short pulses, as seen on some G4x systems.
- */
- dev_priv->display.hotplug.hpd_short_storm_enabled = !HAS_DP_MST(dev_priv);
-
if (HAS_GMCH(dev_priv)) {
if (I915_HAS_HOTPLUG(dev_priv))
dev_priv->display.funcs.hotplug = &i915_hpd_funcs;
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
index 6fc475a5db61..d1e4d528cb17 100644
--- a/drivers/gpu/drm/i915/i915_params.c
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -29,6 +29,18 @@
#include "i915_params.h"
#include "i915_drv.h"
+DECLARE_DYNDBG_CLASSMAP(drm_debug_classes, DD_CLASS_TYPE_DISJOINT_BITS, 0,
+ "DRM_UT_CORE",
+ "DRM_UT_DRIVER",
+ "DRM_UT_KMS",
+ "DRM_UT_PRIME",
+ "DRM_UT_ATOMIC",
+ "DRM_UT_VBL",
+ "DRM_UT_STATE",
+ "DRM_UT_LEASE",
+ "DRM_UT_DP",
+ "DRM_UT_DRMRES");
+
#define i915_param_named(name, T, perm, desc) \
module_param_named(name, i915_modparams.name, T, perm); \
MODULE_PARM_DESC(name, desc)
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index 6b22fb506aa9..211913be40ce 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -1066,6 +1066,7 @@ static const struct intel_device_info xehpsdv_info = {
.has_guc_deprivilege = 1, \
.has_heci_pxp = 1, \
.has_media_ratio_mode = 1, \
+ .display.has_cdclk_squash = 1, \
.__runtime.platform_engine_mask = \
BIT(RCS0) | BIT(BCS0) | \
BIT(VECS0) | BIT(VECS1) | \
@@ -1144,6 +1145,7 @@ static const struct intel_device_info mtl_info = {
.display.has_modular_fia = 1,
.extra_gt_list = xelpmp_extra_gt,
.has_flat_ccs = 0,
+ .has_gmd_id = 1,
.has_mslice_steering = 0,
.has_snoop = 1,
.__runtime.memory_regions = REGION_SMEM | REGION_STOLEN_LMEM,
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index ab0823e9e6c7..1c0da50c0dc7 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -2168,10 +2168,18 @@
#define TRANS_PSR_IIR(tran) _MMIO_TRANS2(tran, _PSR_IIR_A)
#define _EDP_PSR_TRANS_SHIFT(trans) ((trans) == TRANSCODER_EDP ? \
0 : ((trans) - TRANSCODER_A + 1) * 8)
-#define EDP_PSR_TRANS_MASK(trans) (0x7 << _EDP_PSR_TRANS_SHIFT(trans))
-#define EDP_PSR_ERROR(trans) (0x4 << _EDP_PSR_TRANS_SHIFT(trans))
-#define EDP_PSR_POST_EXIT(trans) (0x2 << _EDP_PSR_TRANS_SHIFT(trans))
-#define EDP_PSR_PRE_ENTRY(trans) (0x1 << _EDP_PSR_TRANS_SHIFT(trans))
+#define TGL_PSR_MASK REG_GENMASK(2, 0)
+#define TGL_PSR_ERROR REG_BIT(2)
+#define TGL_PSR_POST_EXIT REG_BIT(1)
+#define TGL_PSR_PRE_ENTRY REG_BIT(0)
+#define EDP_PSR_MASK(trans) (TGL_PSR_MASK << \
+ _EDP_PSR_TRANS_SHIFT(trans))
+#define EDP_PSR_ERROR(trans) (TGL_PSR_ERROR << \
+ _EDP_PSR_TRANS_SHIFT(trans))
+#define EDP_PSR_POST_EXIT(trans) (TGL_PSR_POST_EXIT << \
+ _EDP_PSR_TRANS_SHIFT(trans))
+#define EDP_PSR_PRE_ENTRY(trans) (TGL_PSR_PRE_ENTRY << \
+ _EDP_PSR_TRANS_SHIFT(trans))
#define _SRD_AUX_DATA_A 0x60814
#define _SRD_AUX_DATA_EDP 0x6f814
@@ -5850,6 +5858,11 @@
#define ICL_DSSM_CDCLK_PLL_REFCLK_19_2MHz (1 << 29)
#define ICL_DSSM_CDCLK_PLL_REFCLK_38_4MHz (2 << 29)
+#define GMD_ID_DISPLAY _MMIO(0x510a0)
+#define GMD_ID_ARCH_MASK REG_GENMASK(31, 22)
+#define GMD_ID_RELEASE_MASK REG_GENMASK(21, 14)
+#define GMD_ID_STEP REG_GENMASK(5, 0)
+
/*GEN11 chicken */
#define _PIPEA_CHICKEN 0x70038
#define _PIPEB_CHICKEN 0x71038
@@ -7422,182 +7435,6 @@ enum skl_power_gate {
_ADLS_DPLL4_CFGCR1, \
_ADLS_DPLL3_CFGCR1)
-#define _DKL_PHY1_BASE 0x168000
-#define _DKL_PHY2_BASE 0x169000
-#define _DKL_PHY3_BASE 0x16A000
-#define _DKL_PHY4_BASE 0x16B000
-#define _DKL_PHY5_BASE 0x16C000
-#define _DKL_PHY6_BASE 0x16D000
-
-/* DEKEL PHY MMIO Address = Phy base + (internal address & ~index_mask) */
-#define _DKL_PCS_DW5 0x14
-#define DKL_PCS_DW5(tc_port) _MMIO(_PORT(tc_port, _DKL_PHY1_BASE, \
- _DKL_PHY2_BASE) + \
- _DKL_PCS_DW5)
-#define DKL_PCS_DW5_CORE_SOFTRESET REG_BIT(11)
-
-#define _DKL_PLL_DIV0 0x200
-#define DKL_PLL_DIV0_AFC_STARTUP_MASK REG_GENMASK(27, 25)
-#define DKL_PLL_DIV0_AFC_STARTUP(val) REG_FIELD_PREP(DKL_PLL_DIV0_AFC_STARTUP_MASK, (val))
-#define DKL_PLL_DIV0_INTEG_COEFF(x) ((x) << 16)
-#define DKL_PLL_DIV0_INTEG_COEFF_MASK (0x1F << 16)
-#define DKL_PLL_DIV0_PROP_COEFF(x) ((x) << 12)
-#define DKL_PLL_DIV0_PROP_COEFF_MASK (0xF << 12)
-#define DKL_PLL_DIV0_FBPREDIV_SHIFT (8)
-#define DKL_PLL_DIV0_FBPREDIV(x) ((x) << DKL_PLL_DIV0_FBPREDIV_SHIFT)
-#define DKL_PLL_DIV0_FBPREDIV_MASK (0xF << DKL_PLL_DIV0_FBPREDIV_SHIFT)
-#define DKL_PLL_DIV0_FBDIV_INT(x) ((x) << 0)
-#define DKL_PLL_DIV0_FBDIV_INT_MASK (0xFF << 0)
-#define DKL_PLL_DIV0_MASK (DKL_PLL_DIV0_INTEG_COEFF_MASK | \
- DKL_PLL_DIV0_PROP_COEFF_MASK | \
- DKL_PLL_DIV0_FBPREDIV_MASK | \
- DKL_PLL_DIV0_FBDIV_INT_MASK)
-#define DKL_PLL_DIV0(tc_port) _MMIO(_PORT(tc_port, _DKL_PHY1_BASE, \
- _DKL_PHY2_BASE) + \
- _DKL_PLL_DIV0)
-
-#define _DKL_PLL_DIV1 0x204
-#define DKL_PLL_DIV1_IREF_TRIM(x) ((x) << 16)
-#define DKL_PLL_DIV1_IREF_TRIM_MASK (0x1F << 16)
-#define DKL_PLL_DIV1_TDC_TARGET_CNT(x) ((x) << 0)
-#define DKL_PLL_DIV1_TDC_TARGET_CNT_MASK (0xFF << 0)
-#define DKL_PLL_DIV1(tc_port) _MMIO(_PORT(tc_port, _DKL_PHY1_BASE, \
- _DKL_PHY2_BASE) + \
- _DKL_PLL_DIV1)
-
-#define _DKL_PLL_SSC 0x210
-#define DKL_PLL_SSC_IREF_NDIV_RATIO(x) ((x) << 29)
-#define DKL_PLL_SSC_IREF_NDIV_RATIO_MASK (0x7 << 29)
-#define DKL_PLL_SSC_STEP_LEN(x) ((x) << 16)
-#define DKL_PLL_SSC_STEP_LEN_MASK (0xFF << 16)
-#define DKL_PLL_SSC_STEP_NUM(x) ((x) << 11)
-#define DKL_PLL_SSC_STEP_NUM_MASK (0x7 << 11)
-#define DKL_PLL_SSC_EN (1 << 9)
-#define DKL_PLL_SSC(tc_port) _MMIO(_PORT(tc_port, _DKL_PHY1_BASE, \
- _DKL_PHY2_BASE) + \
- _DKL_PLL_SSC)
-
-#define _DKL_PLL_BIAS 0x214
-#define DKL_PLL_BIAS_FRAC_EN_H (1 << 30)
-#define DKL_PLL_BIAS_FBDIV_SHIFT (8)
-#define DKL_PLL_BIAS_FBDIV_FRAC(x) ((x) << DKL_PLL_BIAS_FBDIV_SHIFT)
-#define DKL_PLL_BIAS_FBDIV_FRAC_MASK (0x3FFFFF << DKL_PLL_BIAS_FBDIV_SHIFT)
-#define DKL_PLL_BIAS(tc_port) _MMIO(_PORT(tc_port, _DKL_PHY1_BASE, \
- _DKL_PHY2_BASE) + \
- _DKL_PLL_BIAS)
-
-#define _DKL_PLL_TDC_COLDST_BIAS 0x218
-#define DKL_PLL_TDC_SSC_STEP_SIZE(x) ((x) << 8)
-#define DKL_PLL_TDC_SSC_STEP_SIZE_MASK (0xFF << 8)
-#define DKL_PLL_TDC_FEED_FWD_GAIN(x) ((x) << 0)
-#define DKL_PLL_TDC_FEED_FWD_GAIN_MASK (0xFF << 0)
-#define DKL_PLL_TDC_COLDST_BIAS(tc_port) _MMIO(_PORT(tc_port, \
- _DKL_PHY1_BASE, \
- _DKL_PHY2_BASE) + \
- _DKL_PLL_TDC_COLDST_BIAS)
-
-#define _DKL_REFCLKIN_CTL 0x12C
-/* Bits are the same as MG_REFCLKIN_CTL */
-#define DKL_REFCLKIN_CTL(tc_port) _MMIO(_PORT(tc_port, \
- _DKL_PHY1_BASE, \
- _DKL_PHY2_BASE) + \
- _DKL_REFCLKIN_CTL)
-
-#define _DKL_CLKTOP2_HSCLKCTL 0xD4
-/* Bits are the same as MG_CLKTOP2_HSCLKCTL */
-#define DKL_CLKTOP2_HSCLKCTL(tc_port) _MMIO(_PORT(tc_port, \
- _DKL_PHY1_BASE, \
- _DKL_PHY2_BASE) + \
- _DKL_CLKTOP2_HSCLKCTL)
-
-#define _DKL_CLKTOP2_CORECLKCTL1 0xD8
-/* Bits are the same as MG_CLKTOP2_CORECLKCTL1 */
-#define DKL_CLKTOP2_CORECLKCTL1(tc_port) _MMIO(_PORT(tc_port, \
- _DKL_PHY1_BASE, \
- _DKL_PHY2_BASE) + \
- _DKL_CLKTOP2_CORECLKCTL1)
-
-#define _DKL_TX_DPCNTL0 0x2C0
-#define DKL_TX_PRESHOOT_COEFF(x) ((x) << 13)
-#define DKL_TX_PRESHOOT_COEFF_MASK (0x1f << 13)
-#define DKL_TX_DE_EMPHASIS_COEFF(x) ((x) << 8)
-#define DKL_TX_DE_EMPAHSIS_COEFF_MASK (0x1f << 8)
-#define DKL_TX_VSWING_CONTROL(x) ((x) << 0)
-#define DKL_TX_VSWING_CONTROL_MASK (0x7 << 0)
-#define DKL_TX_DPCNTL0(tc_port) _MMIO(_PORT(tc_port, \
- _DKL_PHY1_BASE, \
- _DKL_PHY2_BASE) + \
- _DKL_TX_DPCNTL0)
-
-#define _DKL_TX_DPCNTL1 0x2C4
-/* Bits are the same as DKL_TX_DPCNTRL0 */
-#define DKL_TX_DPCNTL1(tc_port) _MMIO(_PORT(tc_port, \
- _DKL_PHY1_BASE, \
- _DKL_PHY2_BASE) + \
- _DKL_TX_DPCNTL1)
-
-#define _DKL_TX_DPCNTL2 0x2C8
-#define DKL_TX_DP20BITMODE REG_BIT(2)
-#define DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX1_MASK REG_GENMASK(4, 3)
-#define DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX1(val) REG_FIELD_PREP(DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX1_MASK, (val))
-#define DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX2_MASK REG_GENMASK(6, 5)
-#define DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX2(val) REG_FIELD_PREP(DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX2_MASK, (val))
-#define DKL_TX_DPCNTL2(tc_port) _MMIO(_PORT(tc_port, \
- _DKL_PHY1_BASE, \
- _DKL_PHY2_BASE) + \
- _DKL_TX_DPCNTL2)
-
-#define _DKL_TX_FW_CALIB 0x2F8
-#define DKL_TX_CFG_DISABLE_WAIT_INIT (1 << 7)
-#define DKL_TX_FW_CALIB(tc_port) _MMIO(_PORT(tc_port, \
- _DKL_PHY1_BASE, \
- _DKL_PHY2_BASE) + \
- _DKL_TX_FW_CALIB)
-
-#define _DKL_TX_PMD_LANE_SUS 0xD00
-#define DKL_TX_PMD_LANE_SUS(tc_port) _MMIO(_PORT(tc_port, \
- _DKL_PHY1_BASE, \
- _DKL_PHY2_BASE) + \
- _DKL_TX_PMD_LANE_SUS)
-
-#define _DKL_TX_DW17 0xDC4
-#define DKL_TX_DW17(tc_port) _MMIO(_PORT(tc_port, \
- _DKL_PHY1_BASE, \
- _DKL_PHY2_BASE) + \
- _DKL_TX_DW17)
-
-#define _DKL_TX_DW18 0xDC8
-#define DKL_TX_DW18(tc_port) _MMIO(_PORT(tc_port, \
- _DKL_PHY1_BASE, \
- _DKL_PHY2_BASE) + \
- _DKL_TX_DW18)
-
-#define _DKL_DP_MODE 0xA0
-#define DKL_DP_MODE(tc_port) _MMIO(_PORT(tc_port, \
- _DKL_PHY1_BASE, \
- _DKL_PHY2_BASE) + \
- _DKL_DP_MODE)
-
-#define _DKL_CMN_UC_DW27 0x36C
-#define DKL_CMN_UC_DW27_UC_HEALTH (0x1 << 15)
-#define DKL_CMN_UC_DW_27(tc_port) _MMIO(_PORT(tc_port, \
- _DKL_PHY1_BASE, \
- _DKL_PHY2_BASE) + \
- _DKL_CMN_UC_DW27)
-
-/*
- * Each Dekel PHY is addressed through a 4KB aperture. Each PHY has more than
- * 4KB of register space, so a separate index is programmed in HIP_INDEX_REG0
- * or HIP_INDEX_REG1, based on the port number, to set the upper 2 address
- * bits that point the 4KB window into the full PHY register space.
- */
-#define _HIP_INDEX_REG0 0x1010A0
-#define _HIP_INDEX_REG1 0x1010A4
-#define HIP_INDEX_REG(tc_port) _MMIO((tc_port) < 4 ? _HIP_INDEX_REG0 \
- : _HIP_INDEX_REG1)
-#define _HIP_INDEX_SHIFT(tc_port) (8 * ((tc_port) % 4))
-#define HIP_INDEX_VAL(tc_port, val) ((val) << _HIP_INDEX_SHIFT(tc_port))
-
/* BXT display engine PLL */
#define BXT_DE_PLL_CTL _MMIO(0x6d000)
#define BXT_DE_PLL_RATIO(x) (x) /* {60,65,100} * 19.2MHz */
@@ -8355,6 +8192,11 @@ enum skl_power_gate {
#define GEN12_CULLBIT2 _MMIO(0x7030)
#define GEN12_STATE_ACK_DEBUG _MMIO(0x20BC)
+#define _MTL_CLKGATE_DIS_TRANS_A 0x604E8
+#define _MTL_CLKGATE_DIS_TRANS_B 0x614E8
+#define MTL_CLKGATE_DIS_TRANS(trans) _MMIO_TRANS2(trans, _MTL_CLKGATE_DIS_TRANS_A)
+#define MTL_CLKGATE_DIS_TRANS_DMASC_GATING_DIS REG_BIT(7)
+
#define MTL_LATENCY_LP0_LP1 _MMIO(0x45780)
#define MTL_LATENCY_LP2_LP3 _MMIO(0x45784)
#define MTL_LATENCY_LP4_LP5 _MMIO(0x45788)
@@ -8378,4 +8220,6 @@ enum skl_power_gate {
#define MTL_TRAS_MASK REG_GENMASK(16, 8)
#define MTL_TRDPRE_MASK REG_GENMASK(7, 0)
+#define MTL_MEDIA_GSI_BASE 0x380000
+
#endif /* _I915_REG_H_ */
diff --git a/drivers/gpu/drm/i915/i915_scatterlist.c b/drivers/gpu/drm/i915/i915_scatterlist.c
index dcc081874ec8..114e5e39aa72 100644
--- a/drivers/gpu/drm/i915/i915_scatterlist.c
+++ b/drivers/gpu/drm/i915/i915_scatterlist.c
@@ -158,7 +158,7 @@ struct i915_refct_sgt *i915_rsgt_from_buddy_resource(struct ttm_resource *res,
u32 page_alignment)
{
struct i915_ttm_buddy_resource *bman_res = to_ttm_buddy_resource(res);
- const u64 size = res->num_pages << PAGE_SHIFT;
+ const u64 size = res->size;
const u32 max_segment = round_down(UINT_MAX, page_alignment);
struct drm_buddy *mm = bman_res->mm;
struct list_head *blocks = &bman_res->blocks;
@@ -177,7 +177,7 @@ struct i915_refct_sgt *i915_rsgt_from_buddy_resource(struct ttm_resource *res,
i915_refct_sgt_init(rsgt, size);
st = &rsgt->table;
- if (sg_alloc_table(st, res->num_pages, GFP_KERNEL)) {
+ if (sg_alloc_table(st, PFN_UP(res->size), GFP_KERNEL)) {
i915_refct_sgt_put(rsgt);
return ERR_PTR(-ENOMEM);
}
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c
index 6fc0d1b89690..cc2a8821d22a 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence.c
+++ b/drivers/gpu/drm/i915/i915_sw_fence.c
@@ -571,7 +571,6 @@ int __i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
int i915_sw_fence_await_reservation(struct i915_sw_fence *fence,
struct dma_resv *resv,
- const struct dma_fence_ops *exclude,
bool write,
unsigned long timeout,
gfp_t gfp)
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.h b/drivers/gpu/drm/i915/i915_sw_fence.h
index 619fc5a22f0c..f752bfc7c6e1 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence.h
+++ b/drivers/gpu/drm/i915/i915_sw_fence.h
@@ -91,7 +91,6 @@ int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
int i915_sw_fence_await_reservation(struct i915_sw_fence *fence,
struct dma_resv *resv,
- const struct dma_fence_ops *exclude,
bool write,
unsigned long timeout,
gfp_t gfp);
diff --git a/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c b/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c
index e19452f0e100..7e611476c7a4 100644
--- a/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c
+++ b/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c
@@ -62,8 +62,8 @@ static int i915_ttm_buddy_man_alloc(struct ttm_resource_manager *man,
if (place->fpfn || lpfn != man->size)
bman_res->flags |= DRM_BUDDY_RANGE_ALLOCATION;
- GEM_BUG_ON(!bman_res->base.num_pages);
- size = bman_res->base.num_pages << PAGE_SHIFT;
+ GEM_BUG_ON(!bman_res->base.size);
+ size = bman_res->base.size;
min_page_size = bman->default_page_size;
if (bo->page_alignment)
@@ -72,7 +72,7 @@ static int i915_ttm_buddy_man_alloc(struct ttm_resource_manager *man,
GEM_BUG_ON(min_page_size < mm->chunk_size);
GEM_BUG_ON(!IS_ALIGNED(size, min_page_size));
- if (place->fpfn + bman_res->base.num_pages != place->lpfn &&
+ if (place->fpfn + PFN_UP(bman_res->base.size) != place->lpfn &&
place->flags & TTM_PL_FLAG_CONTIGUOUS) {
unsigned long pages;
@@ -108,7 +108,7 @@ static int i915_ttm_buddy_man_alloc(struct ttm_resource_manager *man,
goto err_free_blocks;
if (place->flags & TTM_PL_FLAG_CONTIGUOUS) {
- u64 original_size = (u64)bman_res->base.num_pages << PAGE_SHIFT;
+ u64 original_size = (u64)bman_res->base.size;
drm_buddy_block_trim(mm,
original_size,
@@ -116,7 +116,7 @@ static int i915_ttm_buddy_man_alloc(struct ttm_resource_manager *man,
}
if (lpfn <= bman->visible_size) {
- bman_res->used_visible_size = bman_res->base.num_pages;
+ bman_res->used_visible_size = PFN_UP(bman_res->base.size);
} else {
struct drm_buddy_block *block;
@@ -228,7 +228,7 @@ static bool i915_ttm_buddy_man_compatible(struct ttm_resource_manager *man,
if (!place->fpfn &&
place->lpfn == i915_ttm_buddy_man_visible_size(man))
- return bman_res->used_visible_size == res->num_pages;
+ return bman_res->used_visible_size == PFN_UP(res->size);
/* Check each drm buddy block individually */
list_for_each_entry(block, &bman_res->blocks, link) {
diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c
index 20575eb77ea7..1dc1fb29a776 100644
--- a/drivers/gpu/drm/i915/intel_device_info.c
+++ b/drivers/gpu/drm/i915/intel_device_info.c
@@ -29,6 +29,7 @@
#include "display/intel_cdclk.h"
#include "display/intel_de.h"
+#include "gt/intel_gt_regs.h"
#include "intel_device_info.h"
#include "i915_drv.h"
#include "i915_utils.h"
@@ -231,7 +232,7 @@ static bool find_devid(u16 id, const u16 *p, unsigned int num)
return false;
}
-void intel_device_info_subplatform_init(struct drm_i915_private *i915)
+static void intel_device_info_subplatform_init(struct drm_i915_private *i915)
{
const struct intel_device_info *info = INTEL_INFO(i915);
const struct intel_runtime_info *rinfo = RUNTIME_INFO(i915);
@@ -288,6 +289,78 @@ void intel_device_info_subplatform_init(struct drm_i915_private *i915)
RUNTIME_INFO(i915)->platform_mask[pi] |= mask;
}
+static void ip_ver_read(struct drm_i915_private *i915, u32 offset, struct intel_ip_version *ip)
+{
+ struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
+ void __iomem *addr;
+ u32 val;
+ u8 expected_ver = ip->ver;
+ u8 expected_rel = ip->rel;
+
+ addr = pci_iomap_range(pdev, 0, offset, sizeof(u32));
+ if (drm_WARN_ON(&i915->drm, !addr))
+ return;
+
+ val = ioread32(addr);
+ pci_iounmap(pdev, addr);
+
+ ip->ver = REG_FIELD_GET(GMD_ID_ARCH_MASK, val);
+ ip->rel = REG_FIELD_GET(GMD_ID_RELEASE_MASK, val);
+ ip->step = REG_FIELD_GET(GMD_ID_STEP, val);
+
+ /* Sanity check against expected versions from device info */
+ if (IP_VER(ip->ver, ip->rel) < IP_VER(expected_ver, expected_rel))
+ drm_dbg(&i915->drm,
+ "Hardware reports GMD IP version %u.%u (REG[0x%x] = 0x%08x) but minimum expected is %u.%u\n",
+ ip->ver, ip->rel, offset, val, expected_ver, expected_rel);
+}
+
+/*
+ * Setup the graphics version for the current device. This must be done before
+ * any code that performs checks on GRAPHICS_VER or DISPLAY_VER, so this
+ * function should be called very early in the driver initialization sequence.
+ *
+ * Regular MMIO access is not yet setup at the point this function is called so
+ * we peek at the appropriate MMIO offset directly. The GMD_ID register is
+ * part of an 'always on' power well by design, so we don't need to worry about
+ * forcewake while reading it.
+ */
+static void intel_ipver_early_init(struct drm_i915_private *i915)
+{
+ struct intel_runtime_info *runtime = RUNTIME_INFO(i915);
+
+ if (!HAS_GMD_ID(i915)) {
+ drm_WARN_ON(&i915->drm, RUNTIME_INFO(i915)->graphics.ip.ver > 12);
+ /*
+ * On older platforms, graphics and media share the same ip
+ * version and release.
+ */
+ RUNTIME_INFO(i915)->media.ip =
+ RUNTIME_INFO(i915)->graphics.ip;
+ return;
+ }
+
+ ip_ver_read(i915, i915_mmio_reg_offset(GMD_ID_GRAPHICS),
+ &runtime->graphics.ip);
+ ip_ver_read(i915, i915_mmio_reg_offset(GMD_ID_DISPLAY),
+ &runtime->display.ip);
+ ip_ver_read(i915, i915_mmio_reg_offset(GMD_ID_MEDIA),
+ &runtime->media.ip);
+}
+
+/**
+ * intel_device_info_runtime_init_early - initialize early runtime info
+ * @i915: the i915 device
+ *
+ * Determine early intel_device_info fields at runtime. This function needs
+ * to be called before the MMIO has been setup.
+ */
+void intel_device_info_runtime_init_early(struct drm_i915_private *i915)
+{
+ intel_ipver_early_init(i915);
+ intel_device_info_subplatform_init(i915);
+}
+
/**
* intel_device_info_runtime_init - initialize runtime info
* @dev_priv: the i915 device
@@ -442,6 +515,11 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
runtime->has_dmc = false;
runtime->has_dsc = false;
}
+
+ /* Disable nuclear pageflip by default on pre-g4x */
+ if (!dev_priv->params.nuclear_pageflip &&
+ DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv))
+ dev_priv->drm.driver_features &= ~DRIVER_ATOMIC;
}
void intel_driver_caps_print(const struct intel_driver_caps *caps,
diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h
index 4ee6074955ef..d588e5fd2eea 100644
--- a/drivers/gpu/drm/i915/intel_device_info.h
+++ b/drivers/gpu/drm/i915/intel_device_info.h
@@ -152,6 +152,7 @@ enum intel_ppgtt_type {
func(has_4tile); \
func(has_flat_ccs); \
func(has_global_mocs); \
+ func(has_gmd_id); \
func(has_gt_uc); \
func(has_heci_pxp); \
func(has_heci_gscfi); \
@@ -181,6 +182,7 @@ enum intel_ppgtt_type {
/* Keep in alphabetical order */ \
func(cursor_needs_physical); \
func(has_cdclk_crawl); \
+ func(has_cdclk_squash); \
func(has_ddi); \
func(has_dp_mst); \
func(has_dsb); \
@@ -196,20 +198,25 @@ enum intel_ppgtt_type {
func(overlay_needs_physical); \
func(supports_tv);
-struct ip_version {
+struct intel_ip_version {
u8 ver;
u8 rel;
+ u8 step;
};
struct intel_runtime_info {
+ /*
+ * Single "graphics" IP version that represents
+ * render, compute and copy behavior.
+ */
struct {
- struct ip_version ip;
+ struct intel_ip_version ip;
} graphics;
struct {
- struct ip_version ip;
+ struct intel_ip_version ip;
} media;
struct {
- struct ip_version ip;
+ struct intel_ip_version ip;
} display;
/*
@@ -309,7 +316,7 @@ struct intel_driver_caps {
const char *intel_platform_name(enum intel_platform platform);
-void intel_device_info_subplatform_init(struct drm_i915_private *dev_priv);
+void intel_device_info_runtime_init_early(struct drm_i915_private *dev_priv);
void intel_device_info_runtime_init(struct drm_i915_private *dev_priv);
void intel_device_info_print(const struct intel_device_info *info,
diff --git a/drivers/gpu/drm/i915/intel_gvt_mmio_table.c b/drivers/gpu/drm/i915/intel_gvt_mmio_table.c
index 51d5c3c804d5..638b77d64bf4 100644
--- a/drivers/gpu/drm/i915/intel_gvt_mmio_table.c
+++ b/drivers/gpu/drm/i915/intel_gvt_mmio_table.c
@@ -1077,7 +1077,8 @@ static int iterate_skl_plus_mmio(struct intel_gvt_mmio_table_iter *iter)
MMIO_D(GEN8_HDC_CHICKEN1);
MMIO_D(GEN9_WM_CHICKEN3);
- if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv))
+ if (IS_KABYLAKE(dev_priv) ||
+ IS_COFFEELAKE(dev_priv) || IS_COMETLAKE(dev_priv))
MMIO_D(GAMT_CHKN_BIT_REG);
if (!IS_BROXTON(dev_priv))
MMIO_D(GEN9_CTX_PREEMPT_REG);
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index f3f15c2d5bb7..ee34e2785636 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -887,19 +887,14 @@ static void pnv_update_wm(struct drm_i915_private *dev_priv)
wm = intel_calculate_wm(pixel_rate, &pnv_cursor_wm,
pnv_display_wm.fifo_size,
4, latency->cursor_sr);
- reg = intel_uncore_read(&dev_priv->uncore, DSPFW3);
- reg &= ~DSPFW_CURSOR_SR_MASK;
- reg |= FW_WM(wm, CURSOR_SR);
- intel_uncore_write(&dev_priv->uncore, DSPFW3, reg);
+ intel_uncore_rmw(&dev_priv->uncore, DSPFW3, DSPFW_CURSOR_SR_MASK,
+ FW_WM(wm, CURSOR_SR));
/* Display HPLL off SR */
wm = intel_calculate_wm(pixel_rate, &pnv_display_hplloff_wm,
pnv_display_hplloff_wm.fifo_size,
cpp, latency->display_hpll_disable);
- reg = intel_uncore_read(&dev_priv->uncore, DSPFW3);
- reg &= ~DSPFW_HPLL_SR_MASK;
- reg |= FW_WM(wm, HPLL_SR);
- intel_uncore_write(&dev_priv->uncore, DSPFW3, reg);
+ intel_uncore_rmw(&dev_priv->uncore, DSPFW3, DSPFW_HPLL_SR_MASK, FW_WM(wm, HPLL_SR));
/* cursor HPLL off SR */
wm = intel_calculate_wm(pixel_rate, &pnv_cursor_hplloff_wm,
@@ -1329,34 +1324,14 @@ static bool g4x_compute_fbc_en(const struct g4x_wm_state *wm_state,
return true;
}
-static int g4x_compute_pipe_wm(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
+static int _g4x_compute_pipe_wm(struct intel_crtc_state *crtc_state)
{
- struct intel_crtc_state *crtc_state =
- intel_atomic_get_new_crtc_state(state, crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct g4x_wm_state *wm_state = &crtc_state->wm.g4x.optimal;
u8 active_planes = crtc_state->active_planes & ~BIT(PLANE_CURSOR);
const struct g4x_pipe_wm *raw;
- const struct intel_plane_state *old_plane_state;
- const struct intel_plane_state *new_plane_state;
- struct intel_plane *plane;
enum plane_id plane_id;
- int i, level;
- unsigned int dirty = 0;
-
- for_each_oldnew_intel_plane_in_state(state, plane,
- old_plane_state,
- new_plane_state, i) {
- if (new_plane_state->hw.crtc != &crtc->base &&
- old_plane_state->hw.crtc != &crtc->base)
- continue;
-
- if (g4x_raw_plane_wm_compute(crtc_state, new_plane_state))
- dirty |= BIT(plane->id);
- }
-
- if (!dirty)
- return 0;
+ int level;
level = G4X_WM_LEVEL_NORMAL;
if (!g4x_raw_crtc_wm_is_valid(crtc_state, level))
@@ -1409,6 +1384,34 @@ static int g4x_compute_pipe_wm(struct intel_atomic_state *state,
return 0;
}
+static int g4x_compute_pipe_wm(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ const struct intel_plane_state *old_plane_state;
+ const struct intel_plane_state *new_plane_state;
+ struct intel_plane *plane;
+ unsigned int dirty = 0;
+ int i;
+
+ for_each_oldnew_intel_plane_in_state(state, plane,
+ old_plane_state,
+ new_plane_state, i) {
+ if (new_plane_state->hw.crtc != &crtc->base &&
+ old_plane_state->hw.crtc != &crtc->base)
+ continue;
+
+ if (g4x_raw_plane_wm_compute(crtc_state, new_plane_state))
+ dirty |= BIT(plane->id);
+ }
+
+ if (!dirty)
+ return 0;
+
+ return _g4x_compute_pipe_wm(crtc_state);
+}
+
static int g4x_compute_intermediate_wm(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
@@ -1849,64 +1852,17 @@ static bool vlv_raw_crtc_wm_is_valid(const struct intel_crtc_state *crtc_state,
vlv_raw_plane_wm_is_valid(crtc_state, PLANE_CURSOR, level);
}
-static int vlv_compute_pipe_wm(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
+static int _vlv_compute_pipe_wm(struct intel_crtc_state *crtc_state)
{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- struct intel_crtc_state *crtc_state =
- intel_atomic_get_new_crtc_state(state, crtc);
struct vlv_wm_state *wm_state = &crtc_state->wm.vlv.optimal;
const struct vlv_fifo_state *fifo_state =
&crtc_state->wm.vlv.fifo_state;
u8 active_planes = crtc_state->active_planes & ~BIT(PLANE_CURSOR);
int num_active_planes = hweight8(active_planes);
- bool needs_modeset = drm_atomic_crtc_needs_modeset(&crtc_state->uapi);
- const struct intel_plane_state *old_plane_state;
- const struct intel_plane_state *new_plane_state;
- struct intel_plane *plane;
enum plane_id plane_id;
- int level, ret, i;
- unsigned int dirty = 0;
-
- for_each_oldnew_intel_plane_in_state(state, plane,
- old_plane_state,
- new_plane_state, i) {
- if (new_plane_state->hw.crtc != &crtc->base &&
- old_plane_state->hw.crtc != &crtc->base)
- continue;
-
- if (vlv_raw_plane_wm_compute(crtc_state, new_plane_state))
- dirty |= BIT(plane->id);
- }
-
- /*
- * DSPARB registers may have been reset due to the
- * power well being turned off. Make sure we restore
- * them to a consistent state even if no primary/sprite
- * planes are initially active.
- */
- if (needs_modeset)
- crtc_state->fifo_changed = true;
-
- if (!dirty)
- return 0;
-
- /* cursor changes don't warrant a FIFO recompute */
- if (dirty & ~BIT(PLANE_CURSOR)) {
- const struct intel_crtc_state *old_crtc_state =
- intel_atomic_get_old_crtc_state(state, crtc);
- const struct vlv_fifo_state *old_fifo_state =
- &old_crtc_state->wm.vlv.fifo_state;
-
- ret = vlv_compute_fifo(crtc_state);
- if (ret)
- return ret;
-
- if (needs_modeset ||
- memcmp(old_fifo_state, fifo_state,
- sizeof(*fifo_state)) != 0)
- crtc_state->fifo_changed = true;
- }
+ int level;
/* initially allow all levels */
wm_state->num_levels = intel_wm_num_levels(dev_priv);
@@ -1953,6 +1909,67 @@ static int vlv_compute_pipe_wm(struct intel_atomic_state *state,
return 0;
}
+static int vlv_compute_pipe_wm(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ bool needs_modeset = drm_atomic_crtc_needs_modeset(&crtc_state->uapi);
+ const struct intel_plane_state *old_plane_state;
+ const struct intel_plane_state *new_plane_state;
+ struct intel_plane *plane;
+ unsigned int dirty = 0;
+ int i;
+
+ for_each_oldnew_intel_plane_in_state(state, plane,
+ old_plane_state,
+ new_plane_state, i) {
+ if (new_plane_state->hw.crtc != &crtc->base &&
+ old_plane_state->hw.crtc != &crtc->base)
+ continue;
+
+ if (vlv_raw_plane_wm_compute(crtc_state, new_plane_state))
+ dirty |= BIT(plane->id);
+ }
+
+ /*
+ * DSPARB registers may have been reset due to the
+ * power well being turned off. Make sure we restore
+ * them to a consistent state even if no primary/sprite
+ * planes are initially active. We also force a FIFO
+ * recomputation so that we are sure to sanitize the
+ * FIFO setting we took over from the BIOS even if there
+ * are no active planes on the crtc.
+ */
+ if (needs_modeset)
+ dirty = ~0;
+
+ if (!dirty)
+ return 0;
+
+ /* cursor changes don't warrant a FIFO recompute */
+ if (dirty & ~BIT(PLANE_CURSOR)) {
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+ const struct vlv_fifo_state *old_fifo_state =
+ &old_crtc_state->wm.vlv.fifo_state;
+ const struct vlv_fifo_state *new_fifo_state =
+ &crtc_state->wm.vlv.fifo_state;
+ int ret;
+
+ ret = vlv_compute_fifo(crtc_state);
+ if (ret)
+ return ret;
+
+ if (needs_modeset ||
+ memcmp(old_fifo_state, new_fifo_state,
+ sizeof(*new_fifo_state)) != 0)
+ crtc_state->fifo_changed = true;
+ }
+
+ return _vlv_compute_pipe_wm(crtc_state);
+}
+
#define VLV_FIFO(plane, value) \
(((value) << DSPARB_ ## plane ## _SHIFT_VLV) & DSPARB_ ## plane ## _MASK_VLV)
@@ -3450,7 +3467,6 @@ static void ilk_write_wm_values(struct drm_i915_private *dev_priv,
{
struct ilk_wm_values *previous = &dev_priv->display.wm.hw;
unsigned int dirty;
- u32 val;
dirty = ilk_compute_wm_dirty(dev_priv, previous, results);
if (!dirty)
@@ -3466,32 +3482,20 @@ static void ilk_write_wm_values(struct drm_i915_private *dev_priv,
intel_uncore_write(&dev_priv->uncore, WM0_PIPE_ILK(PIPE_C), results->wm_pipe[2]);
if (dirty & WM_DIRTY_DDB) {
- if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
- val = intel_uncore_read(&dev_priv->uncore, WM_MISC);
- if (results->partitioning == INTEL_DDB_PART_1_2)
- val &= ~WM_MISC_DATA_PARTITION_5_6;
- else
- val |= WM_MISC_DATA_PARTITION_5_6;
- intel_uncore_write(&dev_priv->uncore, WM_MISC, val);
- } else {
- val = intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL2);
- if (results->partitioning == INTEL_DDB_PART_1_2)
- val &= ~DISP_DATA_PARTITION_5_6;
- else
- val |= DISP_DATA_PARTITION_5_6;
- intel_uncore_write(&dev_priv->uncore, DISP_ARB_CTL2, val);
- }
- }
-
- if (dirty & WM_DIRTY_FBC) {
- val = intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL);
- if (results->enable_fbc_wm)
- val &= ~DISP_FBC_WM_DIS;
+ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
+ intel_uncore_rmw(&dev_priv->uncore, WM_MISC, WM_MISC_DATA_PARTITION_5_6,
+ results->partitioning == INTEL_DDB_PART_1_2 ? 0 :
+ WM_MISC_DATA_PARTITION_5_6);
else
- val |= DISP_FBC_WM_DIS;
- intel_uncore_write(&dev_priv->uncore, DISP_ARB_CTL, val);
+ intel_uncore_rmw(&dev_priv->uncore, DISP_ARB_CTL2, DISP_DATA_PARTITION_5_6,
+ results->partitioning == INTEL_DDB_PART_1_2 ? 0 :
+ DISP_DATA_PARTITION_5_6);
}
+ if (dirty & WM_DIRTY_FBC)
+ intel_uncore_rmw(&dev_priv->uncore, DISP_ARB_CTL, DISP_FBC_WM_DIS,
+ results->enable_fbc_wm ? 0 : DISP_FBC_WM_DIS);
+
if (dirty & WM_DIRTY_LP(1) &&
previous->wm_lp_spr[0] != results->wm_lp_spr[0])
intel_uncore_write(&dev_priv->uncore, WM1S_LP_ILK, results->wm_lp_spr[0]);
@@ -3816,6 +3820,8 @@ void g4x_wm_get_hw_state(struct drm_i915_private *dev_priv)
plane_id, USHRT_MAX);
g4x_raw_fbc_wm_set(crtc_state, level, USHRT_MAX);
+ g4x_invalidate_wms(crtc, active, level);
+
crtc_state->wm.g4x.optimal = *active;
crtc_state->wm.g4x.intermediate = *active;
@@ -3852,37 +3858,30 @@ void g4x_wm_sanitize(struct drm_i915_private *dev_priv)
to_intel_crtc_state(crtc->base.state);
struct intel_plane_state *plane_state =
to_intel_plane_state(plane->base.state);
- struct g4x_wm_state *wm_state = &crtc_state->wm.g4x.optimal;
enum plane_id plane_id = plane->id;
- int level;
+ int level, num_levels = intel_wm_num_levels(dev_priv);
if (plane_state->uapi.visible)
continue;
- for (level = 0; level < 3; level++) {
+ for (level = 0; level < num_levels; level++) {
struct g4x_pipe_wm *raw =
&crtc_state->wm.g4x.raw[level];
raw->plane[plane_id] = 0;
- wm_state->wm.plane[plane_id] = 0;
- }
- if (plane_id == PLANE_PRIMARY) {
- for (level = 0; level < 3; level++) {
- struct g4x_pipe_wm *raw =
- &crtc_state->wm.g4x.raw[level];
+ if (plane_id == PLANE_PRIMARY)
raw->fbc = 0;
- }
-
- wm_state->sr.fbc = 0;
- wm_state->hpll.fbc = 0;
- wm_state->fbc_en = false;
}
}
for_each_intel_crtc(&dev_priv->drm, crtc) {
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
+ int ret;
+
+ ret = _g4x_compute_pipe_wm(crtc_state);
+ drm_WARN_ON(&dev_priv->drm, ret);
crtc_state->wm.g4x.intermediate =
crtc_state->wm.g4x.optimal;
@@ -4008,30 +4007,27 @@ void vlv_wm_sanitize(struct drm_i915_private *dev_priv)
to_intel_crtc_state(crtc->base.state);
struct intel_plane_state *plane_state =
to_intel_plane_state(plane->base.state);
- struct vlv_wm_state *wm_state = &crtc_state->wm.vlv.optimal;
- const struct vlv_fifo_state *fifo_state =
- &crtc_state->wm.vlv.fifo_state;
enum plane_id plane_id = plane->id;
- int level;
+ int level, num_levels = intel_wm_num_levels(dev_priv);
if (plane_state->uapi.visible)
continue;
- for (level = 0; level < wm_state->num_levels; level++) {
+ for (level = 0; level < num_levels; level++) {
struct g4x_pipe_wm *raw =
&crtc_state->wm.vlv.raw[level];
raw->plane[plane_id] = 0;
-
- wm_state->wm[level].plane[plane_id] =
- vlv_invert_wm_value(raw->plane[plane_id],
- fifo_state->plane[plane_id]);
}
}
for_each_intel_crtc(&dev_priv->drm, crtc) {
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
+ int ret;
+
+ ret = _vlv_compute_pipe_wm(crtc_state);
+ drm_WARN_ON(&dev_priv->drm, ret);
crtc_state->wm.vlv.intermediate =
crtc_state->wm.vlv.optimal;
@@ -4107,7 +4103,7 @@ static void g4x_disable_trickle_feed(struct drm_i915_private *dev_priv)
for_each_pipe(dev_priv, pipe) {
intel_uncore_rmw(&dev_priv->uncore, DSPCNTR(pipe), 0, DISP_TRICKLE_FEED_DISABLE);
- intel_uncore_write(&dev_priv->uncore, DSPSURF(pipe), intel_uncore_read(&dev_priv->uncore, DSPSURF(pipe)));
+ intel_uncore_rmw(&dev_priv->uncore, DSPSURF(pipe), 0, 0);
intel_uncore_posting_read(&dev_priv->uncore, DSPSURF(pipe));
}
}
@@ -4560,8 +4556,6 @@ static void hsw_init_clock_gating(struct drm_i915_private *dev_priv)
static void ivb_init_clock_gating(struct drm_i915_private *dev_priv)
{
- u32 snpcr;
-
intel_uncore_write(&dev_priv->uncore, ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE);
/* WaFbcAsynchFlipDisableFbcQueue:ivb */
@@ -4596,10 +4590,8 @@ static void ivb_init_clock_gating(struct drm_i915_private *dev_priv)
g4x_disable_trickle_feed(dev_priv);
- snpcr = intel_uncore_read(&dev_priv->uncore, GEN6_MBCUNIT_SNPCR);
- snpcr &= ~GEN6_MBC_SNPCR_MASK;
- snpcr |= GEN6_MBC_SNPCR_MED;
- intel_uncore_write(&dev_priv->uncore, GEN6_MBCUNIT_SNPCR, snpcr);
+ intel_uncore_rmw(&dev_priv->uncore, GEN6_MBCUNIT_SNPCR, GEN6_MBC_SNPCR_MASK,
+ GEN6_MBC_SNPCR_MED);
if (!HAS_PCH_NOP(dev_priv))
cpt_init_clock_gating(dev_priv);
diff --git a/drivers/gpu/drm/i915/intel_region_ttm.c b/drivers/gpu/drm/i915/intel_region_ttm.c
index 575d67bc6ffe..cf89d0c2a2d9 100644
--- a/drivers/gpu/drm/i915/intel_region_ttm.c
+++ b/drivers/gpu/drm/i915/intel_region_ttm.c
@@ -244,7 +244,7 @@ void intel_region_ttm_resource_free(struct intel_memory_region *mem,
struct ttm_resource_manager *man = mem->region_private;
struct ttm_buffer_object mock_bo = {};
- mock_bo.base.size = res->num_pages << PAGE_SHIFT;
+ mock_bo.base.size = res->size;
mock_bo.bdev = &mem->i915->bdev;
res->bo = &mock_bo;
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index 8b668ee35bd9..129746713d07 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -591,8 +591,15 @@ void intel_runtime_pm_enable(struct intel_runtime_pm *rpm)
pm_runtime_use_autosuspend(kdev);
}
- /* Enable by default */
- pm_runtime_allow(kdev);
+ /*
+ * FIXME: Temp hammer to keep autosupend disable on lmem supported platforms.
+ * As per PCIe specs 5.3.1.4.1, all iomem read write request over a PCIe
+ * function will be unsupported in case PCIe endpoint function is in D3.
+ * Let's keep i915 autosuspend control 'on' till we fix all known issue
+ * with lmem access in D3.
+ */
+ if (!IS_DGFX(i915))
+ pm_runtime_allow(kdev);
/*
* The core calls the driver load handler with an RPM reference held.
diff --git a/drivers/gpu/drm/i915/intel_step.c b/drivers/gpu/drm/i915/intel_step.c
index 42b3133d8387..75d7a86c60c0 100644
--- a/drivers/gpu/drm/i915/intel_step.c
+++ b/drivers/gpu/drm/i915/intel_step.c
@@ -135,6 +135,19 @@ static const struct intel_step_info adlp_n_revids[] = {
[0x0] = { COMMON_GT_MEDIA_STEP(A0), .display_step = STEP_D0 },
};
+static u8 gmd_to_intel_step(struct drm_i915_private *i915,
+ struct intel_ip_version *gmd)
+{
+ u8 step = gmd->step + STEP_A0;
+
+ if (step >= STEP_FUTURE) {
+ drm_dbg(&i915->drm, "Using future steppings\n");
+ return STEP_FUTURE;
+ }
+
+ return step;
+}
+
static void pvc_step_init(struct drm_i915_private *i915, int pci_revid);
void intel_step_init(struct drm_i915_private *i915)
@@ -144,6 +157,18 @@ void intel_step_init(struct drm_i915_private *i915)
int revid = INTEL_REVID(i915);
struct intel_step_info step = {};
+ if (HAS_GMD_ID(i915)) {
+ step.graphics_step = gmd_to_intel_step(i915,
+ &RUNTIME_INFO(i915)->graphics.ip);
+ step.media_step = gmd_to_intel_step(i915,
+ &RUNTIME_INFO(i915)->media.ip);
+ step.display_step = gmd_to_intel_step(i915,
+ &RUNTIME_INFO(i915)->display.ip);
+ RUNTIME_INFO(i915)->step = step;
+
+ return;
+ }
+
if (IS_PONTEVECCHIO(i915)) {
pvc_step_init(i915, revid);
return;
diff --git a/drivers/gpu/drm/i915/intel_step.h b/drivers/gpu/drm/i915/intel_step.h
index a6b12bfa9744..96dfca4cba73 100644
--- a/drivers/gpu/drm/i915/intel_step.h
+++ b/drivers/gpu/drm/i915/intel_step.h
@@ -11,6 +11,10 @@
struct drm_i915_private;
struct intel_step_info {
+ /*
+ * It is expected to have 4 number steps per letter. Deviation from
+ * the expectation breaks gmd_to_intel_step().
+ */
u8 graphics_step; /* Represents the compute tile on Xe_HPC */
u8 display_step;
u8 media_step;
@@ -23,21 +27,43 @@ struct intel_step_info {
func(A0) \
func(A1) \
func(A2) \
+ func(A3) \
func(B0) \
func(B1) \
func(B2) \
func(B3) \
func(C0) \
func(C1) \
+ func(C2) \
+ func(C3) \
func(D0) \
func(D1) \
+ func(D2) \
+ func(D3) \
func(E0) \
+ func(E1) \
+ func(E2) \
+ func(E3) \
func(F0) \
+ func(F1) \
+ func(F2) \
+ func(F3) \
func(G0) \
+ func(G1) \
+ func(G2) \
+ func(G3) \
func(H0) \
+ func(H1) \
+ func(H2) \
+ func(H3) \
func(I0) \
func(I1) \
- func(J0)
+ func(I2) \
+ func(I3) \
+ func(J0) \
+ func(J1) \
+ func(J2) \
+ func(J3)
/*
* Symbolic steppings that do not match the hardware. These are valid both as gt
diff --git a/drivers/gpu/drm/i915/intel_uncore.h b/drivers/gpu/drm/i915/intel_uncore.h
index 7e1b3b89f689..5449146a0624 100644
--- a/drivers/gpu/drm/i915/intel_uncore.h
+++ b/drivers/gpu/drm/i915/intel_uncore.h
@@ -433,15 +433,15 @@ intel_uncore_read64_2x32(struct intel_uncore *uncore,
#define intel_uncore_write64_fw(...) __raw_uncore_write64(__VA_ARGS__)
#define intel_uncore_posting_read_fw(...) ((void)intel_uncore_read_fw(__VA_ARGS__))
-static inline void intel_uncore_rmw(struct intel_uncore *uncore,
- i915_reg_t reg, u32 clear, u32 set)
+static inline u32 intel_uncore_rmw(struct intel_uncore *uncore,
+ i915_reg_t reg, u32 clear, u32 set)
{
u32 old, val;
old = intel_uncore_read(uncore, reg);
val = (old & ~clear) | set;
- if (val != old)
- intel_uncore_write(uncore, reg, val);
+ intel_uncore_write(uncore, reg, val);
+ return old;
}
static inline void intel_uncore_rmw_fw(struct intel_uncore *uncore,
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_session.c b/drivers/gpu/drm/i915/pxp/intel_pxp_session.c
index ecff0935adbf..85572360c71a 100644
--- a/drivers/gpu/drm/i915/pxp/intel_pxp_session.c
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_session.c
@@ -77,6 +77,7 @@ static int pxp_create_arb_session(struct intel_pxp *pxp)
drm_err(&gt->i915->drm, "arb session failed to go in play\n");
return ret;
}
+ drm_dbg(&gt->i915->drm, "PXP ARB session is alive\n");
if (!++pxp->key_instance)
++pxp->key_instance;
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_tee.c b/drivers/gpu/drm/i915/pxp/intel_pxp_tee.c
index 58471517d1e6..052fd2f9a583 100644
--- a/drivers/gpu/drm/i915/pxp/intel_pxp_tee.c
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_tee.c
@@ -303,6 +303,9 @@ int intel_pxp_tee_cmd_create_arb_session(struct intel_pxp *pxp,
if (ret)
drm_err(&i915->drm, "Failed to send tee msg ret=[%d]\n", ret);
+ else if (msg_out.header.status != 0x0)
+ drm_warn(&i915->drm, "PXP firmware failed arb session init request ret=[0x%08x]\n",
+ msg_out.header.status);
return ret;
}
diff --git a/drivers/gpu/drm/i915/selftests/i915_selftest.c b/drivers/gpu/drm/i915/selftests/i915_selftest.c
index c4e932368b37..39da0fb0d6d2 100644
--- a/drivers/gpu/drm/i915/selftests/i915_selftest.c
+++ b/drivers/gpu/drm/i915/selftests/i915_selftest.c
@@ -135,7 +135,7 @@ static int __run_selftests(const char *name,
int err = 0;
while (!i915_selftest.random_seed)
- i915_selftest.random_seed = get_random_int();
+ i915_selftest.random_seed = get_random_u32();
i915_selftest.timeout_jiffies =
i915_selftest.timeout_ms ?
diff --git a/drivers/gpu/drm/i915/vlv_suspend.c b/drivers/gpu/drm/i915/vlv_suspend.c
index 664fde244f59..02e63ed77f60 100644
--- a/drivers/gpu/drm/i915/vlv_suspend.c
+++ b/drivers/gpu/drm/i915/vlv_suspend.c
@@ -194,7 +194,6 @@ static void vlv_restore_gunit_s0ix_state(struct drm_i915_private *i915)
{
struct vlv_s0ix_state *s = i915->vlv_s0ix_state;
struct intel_uncore *uncore = &i915->uncore;
- u32 val;
int i;
if (!s)
@@ -262,15 +261,11 @@ static void vlv_restore_gunit_s0ix_state(struct drm_i915_private *i915)
* be restored, as they are used to control the s0ix suspend/resume
* sequence by the caller.
*/
- val = intel_uncore_read(uncore, VLV_GTLC_WAKE_CTRL);
- val &= VLV_GTLC_ALLOWWAKEREQ;
- val |= s->gtlc_wake_ctrl & ~VLV_GTLC_ALLOWWAKEREQ;
- intel_uncore_write(uncore, VLV_GTLC_WAKE_CTRL, val);
+ intel_uncore_rmw(uncore, VLV_GTLC_WAKE_CTRL, ~VLV_GTLC_ALLOWWAKEREQ,
+ s->gtlc_wake_ctrl & ~VLV_GTLC_ALLOWWAKEREQ);
- val = intel_uncore_read(uncore, VLV_GTLC_SURVIVABILITY_REG);
- val &= VLV_GFX_CLK_FORCE_ON_BIT;
- val |= s->gtlc_survive & ~VLV_GFX_CLK_FORCE_ON_BIT;
- intel_uncore_write(uncore, VLV_GTLC_SURVIVABILITY_REG, val);
+ intel_uncore_rmw(uncore, VLV_GTLC_SURVIVABILITY_REG, ~VLV_GFX_CLK_FORCE_ON_BIT,
+ s->gtlc_survive & ~VLV_GFX_CLK_FORCE_ON_BIT);
intel_uncore_write(uncore, VLV_PMWGICZ, s->pmwgicz);
@@ -308,14 +303,10 @@ static int vlv_wait_for_pw_status(struct drm_i915_private *i915,
static int vlv_force_gfx_clock(struct drm_i915_private *i915, bool force_on)
{
struct intel_uncore *uncore = &i915->uncore;
- u32 val;
int err;
- val = intel_uncore_read(uncore, VLV_GTLC_SURVIVABILITY_REG);
- val &= ~VLV_GFX_CLK_FORCE_ON_BIT;
- if (force_on)
- val |= VLV_GFX_CLK_FORCE_ON_BIT;
- intel_uncore_write(uncore, VLV_GTLC_SURVIVABILITY_REG, val);
+ intel_uncore_rmw(uncore, VLV_GTLC_SURVIVABILITY_REG, VLV_GFX_CLK_FORCE_ON_BIT,
+ force_on ? VLV_GFX_CLK_FORCE_ON_BIT : 0);
if (!force_on)
return 0;
@@ -340,11 +331,8 @@ static int vlv_allow_gt_wake(struct drm_i915_private *i915, bool allow)
u32 val;
int err;
- val = intel_uncore_read(uncore, VLV_GTLC_WAKE_CTRL);
- val &= ~VLV_GTLC_ALLOWWAKEREQ;
- if (allow)
- val |= VLV_GTLC_ALLOWWAKEREQ;
- intel_uncore_write(uncore, VLV_GTLC_WAKE_CTRL, val);
+ intel_uncore_rmw(uncore, VLV_GTLC_WAKE_CTRL, VLV_GTLC_ALLOWWAKEREQ,
+ allow ? VLV_GTLC_ALLOWWAKEREQ : 0);
intel_uncore_posting_read(uncore, VLV_GTLC_WAKE_CTRL);
mask = VLV_GTLC_ALLOWWAKEACK;
diff --git a/drivers/gpu/drm/lima/lima_sched.c b/drivers/gpu/drm/lima/lima_sched.c
index e82931712d8a..ff003403fbbc 100644
--- a/drivers/gpu/drm/lima/lima_sched.c
+++ b/drivers/gpu/drm/lima/lima_sched.c
@@ -371,7 +371,7 @@ static void lima_sched_build_error_task_list(struct lima_sched_task *task)
} else {
buffer_chunk->size = lima_bo_size(bo);
- ret = drm_gem_shmem_vmap(&bo->base, &map);
+ ret = drm_gem_vmap_unlocked(&bo->base.base, &map);
if (ret) {
kvfree(et);
goto out;
@@ -379,7 +379,7 @@ static void lima_sched_build_error_task_list(struct lima_sched_task *task)
memcpy(buffer_chunk + 1, map.vaddr, buffer_chunk->size);
- drm_gem_shmem_vunmap(&bo->base, &map);
+ drm_gem_vunmap_unlocked(&bo->base.base, &map);
}
buffer_chunk = (void *)(buffer_chunk + 1) + buffer_chunk->size;
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
index 2d72cc5ddaba..6b6d5335c834 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
@@ -157,7 +157,7 @@ static void mtk_dither_config(struct device *dev, unsigned int w,
{
struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev);
- mtk_ddp_write(cmdq_pkt, h << 16 | w, &priv->cmdq_reg, priv->regs, DISP_REG_DITHER_SIZE);
+ mtk_ddp_write(cmdq_pkt, w << 16 | h, &priv->cmdq_reg, priv->regs, DISP_REG_DITHER_SIZE);
mtk_ddp_write(cmdq_pkt, DITHER_RELAY_MODE, &priv->cmdq_reg, priv->regs,
DISP_REG_DITHER_CFG);
mtk_dither_set_common(priv->regs, &priv->cmdq_reg, bpc, DISP_REG_DITHER_CFG,
diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c
index 9cc406e1eee1..3b7d13028fb6 100644
--- a/drivers/gpu/drm/mediatek/mtk_dsi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dsi.c
@@ -685,6 +685,16 @@ static void mtk_dsi_poweroff(struct mtk_dsi *dsi)
if (--dsi->refcount != 0)
return;
+ /*
+ * mtk_dsi_stop() and mtk_dsi_start() is asymmetric, since
+ * mtk_dsi_stop() should be called after mtk_drm_crtc_atomic_disable(),
+ * which needs irq for vblank, and mtk_dsi_stop() will disable irq.
+ * mtk_dsi_start() needs to be called in mtk_output_dsi_enable(),
+ * after dsi is fully set.
+ */
+ mtk_dsi_stop(dsi);
+
+ mtk_dsi_switch_to_cmd_mode(dsi, VM_DONE_INT_FLAG, 500);
mtk_dsi_reset_engine(dsi);
mtk_dsi_lane0_ulp_mode_enter(dsi);
mtk_dsi_clk_ulp_mode_enter(dsi);
@@ -735,17 +745,6 @@ static void mtk_output_dsi_disable(struct mtk_dsi *dsi)
if (!dsi->enabled)
return;
- /*
- * mtk_dsi_stop() and mtk_dsi_start() is asymmetric, since
- * mtk_dsi_stop() should be called after mtk_drm_crtc_atomic_disable(),
- * which needs irq for vblank, and mtk_dsi_stop() will disable irq.
- * mtk_dsi_start() needs to be called in mtk_output_dsi_enable(),
- * after dsi is fully set.
- */
- mtk_dsi_stop(dsi);
-
- mtk_dsi_switch_to_cmd_mode(dsi, VM_DONE_INT_FLAG, 500);
-
dsi->enabled = false;
}
@@ -808,10 +807,13 @@ static void mtk_dsi_bridge_atomic_post_disable(struct drm_bridge *bridge,
static const struct drm_bridge_funcs mtk_dsi_bridge_funcs = {
.attach = mtk_dsi_bridge_attach,
+ .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
.atomic_disable = mtk_dsi_bridge_atomic_disable,
+ .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
.atomic_enable = mtk_dsi_bridge_atomic_enable,
.atomic_pre_enable = mtk_dsi_bridge_atomic_pre_enable,
.atomic_post_disable = mtk_dsi_bridge_atomic_post_disable,
+ .atomic_reset = drm_atomic_helper_bridge_reset,
.mode_set = mtk_dsi_bridge_mode_set,
};
diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c
index fcf0d493782c..3b24a924b7b9 100644
--- a/drivers/gpu/drm/meson/meson_drv.c
+++ b/drivers/gpu/drm/meson/meson_drv.c
@@ -388,10 +388,14 @@ static void meson_drv_unbind(struct device *dev)
drm_dev_unregister(drm);
drm_kms_helper_poll_fini(drm);
drm_atomic_helper_shutdown(drm);
- component_unbind_all(dev, drm);
free_irq(priv->vsync_irq, drm);
drm_dev_put(drm);
+ meson_encoder_hdmi_remove(priv);
+ meson_encoder_cvbs_remove(priv);
+
+ component_unbind_all(dev, drm);
+
if (priv->afbcd.ops)
priv->afbcd.ops->exit(priv);
}
@@ -493,6 +497,13 @@ static int meson_drv_probe(struct platform_device *pdev)
return 0;
};
+static int meson_drv_remove(struct platform_device *pdev)
+{
+ component_master_del(&pdev->dev, &meson_drv_master_ops);
+
+ return 0;
+}
+
static struct meson_drm_match_data meson_drm_gxbb_data = {
.compat = VPU_COMPATIBLE_GXBB,
};
@@ -530,6 +541,7 @@ static const struct dev_pm_ops meson_drv_pm_ops = {
static struct platform_driver meson_drm_platform_driver = {
.probe = meson_drv_probe,
+ .remove = meson_drv_remove,
.shutdown = meson_drv_shutdown,
.driver = {
.name = "meson-drm",
diff --git a/drivers/gpu/drm/meson/meson_drv.h b/drivers/gpu/drm/meson/meson_drv.h
index 177dac3ca3be..c62ee358456f 100644
--- a/drivers/gpu/drm/meson/meson_drv.h
+++ b/drivers/gpu/drm/meson/meson_drv.h
@@ -25,6 +25,12 @@ enum vpu_compatible {
VPU_COMPATIBLE_G12A = 3,
};
+enum {
+ MESON_ENC_CVBS = 0,
+ MESON_ENC_HDMI,
+ MESON_ENC_LAST,
+};
+
struct meson_drm_match_data {
enum vpu_compatible compat;
struct meson_afbcd_ops *afbcd_ops;
@@ -51,6 +57,7 @@ struct meson_drm {
struct drm_crtc *crtc;
struct drm_plane *primary_plane;
struct drm_plane *overlay_plane;
+ void *encoders[MESON_ENC_LAST];
const struct meson_drm_soc_limits *limits;
diff --git a/drivers/gpu/drm/meson/meson_encoder_cvbs.c b/drivers/gpu/drm/meson/meson_encoder_cvbs.c
index 8110a6e39320..5675bc2a92cf 100644
--- a/drivers/gpu/drm/meson/meson_encoder_cvbs.c
+++ b/drivers/gpu/drm/meson/meson_encoder_cvbs.c
@@ -281,5 +281,18 @@ int meson_encoder_cvbs_init(struct meson_drm *priv)
}
drm_connector_attach_encoder(connector, &meson_encoder_cvbs->encoder);
+ priv->encoders[MESON_ENC_CVBS] = meson_encoder_cvbs;
+
return 0;
}
+
+void meson_encoder_cvbs_remove(struct meson_drm *priv)
+{
+ struct meson_encoder_cvbs *meson_encoder_cvbs;
+
+ if (priv->encoders[MESON_ENC_CVBS]) {
+ meson_encoder_cvbs = priv->encoders[MESON_ENC_CVBS];
+ drm_bridge_remove(&meson_encoder_cvbs->bridge);
+ drm_bridge_remove(meson_encoder_cvbs->next_bridge);
+ }
+}
diff --git a/drivers/gpu/drm/meson/meson_encoder_cvbs.h b/drivers/gpu/drm/meson/meson_encoder_cvbs.h
index 61d9d183ce7f..09710fec3c66 100644
--- a/drivers/gpu/drm/meson/meson_encoder_cvbs.h
+++ b/drivers/gpu/drm/meson/meson_encoder_cvbs.h
@@ -25,5 +25,6 @@ struct meson_cvbs_mode {
extern struct meson_cvbs_mode meson_cvbs_modes[MESON_CVBS_MODES_COUNT];
int meson_encoder_cvbs_init(struct meson_drm *priv);
+void meson_encoder_cvbs_remove(struct meson_drm *priv);
#endif /* __MESON_VENC_CVBS_H */
diff --git a/drivers/gpu/drm/meson/meson_encoder_hdmi.c b/drivers/gpu/drm/meson/meson_encoder_hdmi.c
index 2f616c55c271..53231bfdf7e2 100644
--- a/drivers/gpu/drm/meson/meson_encoder_hdmi.c
+++ b/drivers/gpu/drm/meson/meson_encoder_hdmi.c
@@ -452,6 +452,8 @@ int meson_encoder_hdmi_init(struct meson_drm *priv)
meson_encoder_hdmi->cec_notifier = notifier;
}
+ priv->encoders[MESON_ENC_HDMI] = meson_encoder_hdmi;
+
dev_dbg(priv->dev, "HDMI encoder initialized\n");
return 0;
@@ -460,3 +462,14 @@ err_put_node:
of_node_put(remote);
return ret;
}
+
+void meson_encoder_hdmi_remove(struct meson_drm *priv)
+{
+ struct meson_encoder_hdmi *meson_encoder_hdmi;
+
+ if (priv->encoders[MESON_ENC_HDMI]) {
+ meson_encoder_hdmi = priv->encoders[MESON_ENC_HDMI];
+ drm_bridge_remove(&meson_encoder_hdmi->bridge);
+ drm_bridge_remove(meson_encoder_hdmi->next_bridge);
+ }
+}
diff --git a/drivers/gpu/drm/meson/meson_encoder_hdmi.h b/drivers/gpu/drm/meson/meson_encoder_hdmi.h
index ed19494f0956..a6cd38eb5f71 100644
--- a/drivers/gpu/drm/meson/meson_encoder_hdmi.h
+++ b/drivers/gpu/drm/meson/meson_encoder_hdmi.h
@@ -8,5 +8,6 @@
#define __MESON_ENCODER_HDMI_H
int meson_encoder_hdmi_init(struct meson_drm *priv);
+void meson_encoder_hdmi_remove(struct meson_drm *priv);
#endif /* __MESON_ENCODER_HDMI_H */
diff --git a/drivers/gpu/drm/meson/meson_plane.c b/drivers/gpu/drm/meson/meson_plane.c
index dfd6a9f33dda..815dfe30492b 100644
--- a/drivers/gpu/drm/meson/meson_plane.c
+++ b/drivers/gpu/drm/meson/meson_plane.c
@@ -169,7 +169,7 @@ static void meson_plane_atomic_update(struct drm_plane *plane,
/* Enable OSD and BLK0, set max global alpha */
priv->viu.osd1_ctrl_stat = OSD_ENABLE |
- (0xFF << OSD_GLOBAL_ALPHA_SHIFT) |
+ (0x100 << OSD_GLOBAL_ALPHA_SHIFT) |
OSD_BLK0_ENABLE;
priv->viu.osd1_ctrl_stat2 = readl(priv->io_base +
diff --git a/drivers/gpu/drm/meson/meson_viu.c b/drivers/gpu/drm/meson/meson_viu.c
index bb7e109534de..d4b907889a21 100644
--- a/drivers/gpu/drm/meson/meson_viu.c
+++ b/drivers/gpu/drm/meson/meson_viu.c
@@ -94,7 +94,7 @@ static void meson_viu_set_g12a_osd1_matrix(struct meson_drm *priv,
priv->io_base + _REG(VPP_WRAP_OSD1_MATRIX_COEF11_12));
writel(((m[9] & 0x1fff) << 16) | (m[10] & 0x1fff),
priv->io_base + _REG(VPP_WRAP_OSD1_MATRIX_COEF20_21));
- writel((m[11] & 0x1fff) << 16,
+ writel((m[11] & 0x1fff),
priv->io_base + _REG(VPP_WRAP_OSD1_MATRIX_COEF22));
writel(((m[18] & 0xfff) << 16) | (m[19] & 0xfff),
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c
index 4d38b8e18030..ece6cd102dbb 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.c
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.c
@@ -263,7 +263,11 @@ mgag200_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ret)
return ret;
- drm_fbdev_generic_setup(dev, 0);
+ /*
+ * FIXME: A 24-bit color depth does not work with 24 bpp on
+ * G200ER. Force 32 bpp.
+ */
+ drm_fbdev_generic_setup(dev, 32);
return 0;
}
diff --git a/drivers/gpu/drm/mgag200/mgag200_g200se.c b/drivers/gpu/drm/mgag200/mgag200_g200se.c
index be389ed91cbd..bd6e573c9a1a 100644
--- a/drivers/gpu/drm/mgag200/mgag200_g200se.c
+++ b/drivers/gpu/drm/mgag200/mgag200_g200se.c
@@ -284,7 +284,8 @@ static void mgag200_g200se_04_pixpllc_atomic_update(struct drm_crtc *crtc,
pixpllcp = pixpllc->p - 1;
pixpllcs = pixpllc->s;
- xpixpllcm = pixpllcm | ((pixpllcn & BIT(8)) >> 1);
+ // For G200SE A, BIT(7) should be set unconditionally.
+ xpixpllcm = BIT(7) | pixpllcm;
xpixpllcn = pixpllcn;
xpixpllcp = (pixpllcs << 3) | pixpllcp;
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index bbab2549243a..0a5aaf78172a 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -579,13 +579,13 @@ int mgag200_crtc_helper_atomic_check(struct drm_crtc *crtc, struct drm_atomic_st
struct drm_property_blob *new_gamma_lut = new_crtc_state->gamma_lut;
int ret;
- ret = drm_atomic_helper_check_crtc_state(new_crtc_state, false);
- if (ret)
- return ret;
-
if (!new_crtc_state->enable)
return 0;
+ ret = drm_atomic_helper_check_crtc_primary_plane(new_crtc_state);
+ if (ret)
+ return ret;
+
if (new_crtc_state->mode_changed) {
if (funcs->pixpllc_atomic_check) {
ret = funcs->pixpllc_atomic_check(crtc, new_state);
@@ -601,7 +601,7 @@ int mgag200_crtc_helper_atomic_check(struct drm_crtc *crtc, struct drm_atomic_st
}
}
- return drm_atomic_add_affected_planes(new_state, crtc);
+ return 0;
}
void mgag200_crtc_helper_atomic_flush(struct drm_crtc *crtc, struct drm_atomic_state *old_state)
@@ -824,7 +824,6 @@ int mgag200_mode_config_init(struct mga_device *mdev, resource_size_t vram_avail
dev->mode_config.max_width = MGAG200_MAX_FB_WIDTH;
dev->mode_config.max_height = MGAG200_MAX_FB_HEIGHT;
dev->mode_config.preferred_depth = 24;
- dev->mode_config.fb_base = mdev->vram_res->start;
dev->mode_config.funcs = &mgag200_mode_config_funcs;
dev->mode_config.helper_private = &mgag200_mode_config_helper_funcs;
diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c
index 46168eccfac4..b373e3000320 100644
--- a/drivers/gpu/drm/msm/msm_fbdev.c
+++ b/drivers/gpu/drm/msm/msm_fbdev.c
@@ -109,8 +109,6 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
drm_fb_helper_fill_info(fbi, helper, sizes);
- dev->mode_config.fb_base = paddr;
-
fbi->screen_base = msm_gem_get_vaddr(bo);
if (IS_ERR(fbi->screen_base)) {
ret = PTR_ERR(fbi->screen_base);
diff --git a/drivers/gpu/drm/mxsfb/lcdif_kms.c b/drivers/gpu/drm/mxsfb/lcdif_kms.c
index b1092aab1423..f0ad6e2a9352 100644
--- a/drivers/gpu/drm/mxsfb/lcdif_kms.c
+++ b/drivers/gpu/drm/mxsfb/lcdif_kms.c
@@ -15,6 +15,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
+#include <drm/drm_color_mgmt.h>
#include <drm/drm_crtc.h>
#include <drm/drm_encoder.h>
#include <drm/drm_fb_dma_helper.h>
@@ -31,13 +32,126 @@
/* -----------------------------------------------------------------------------
* CRTC
*/
+
+/*
+ * For conversion from YCbCr to RGB, the CSC operates as follows:
+ *
+ * |R| |A1 A2 A3| |Y + D1|
+ * |G| = |B1 B2 B3| * |Cb + D2|
+ * |B| |C1 C2 C3| |Cr + D3|
+ *
+ * The A, B and C coefficients are expressed as Q2.8 fixed point values, and
+ * the D coefficients as Q0.8. Despite the reference manual stating the
+ * opposite, the D1, D2 and D3 offset values are added to Y, Cb and Cr, not
+ * subtracted. They must thus be programmed with negative values.
+ */
+static const u32 lcdif_yuv2rgb_coeffs[3][2][6] = {
+ [DRM_COLOR_YCBCR_BT601] = {
+ [DRM_COLOR_YCBCR_LIMITED_RANGE] = {
+ /*
+ * BT.601 limited range:
+ *
+ * |R| |1.1644 0.0000 1.5960| |Y - 16 |
+ * |G| = |1.1644 -0.3917 -0.8129| * |Cb - 128|
+ * |B| |1.1644 2.0172 0.0000| |Cr - 128|
+ */
+ CSC0_COEF0_A1(0x12a) | CSC0_COEF0_A2(0x000),
+ CSC0_COEF1_A3(0x199) | CSC0_COEF1_B1(0x12a),
+ CSC0_COEF2_B2(0x79c) | CSC0_COEF2_B3(0x730),
+ CSC0_COEF3_C1(0x12a) | CSC0_COEF3_C2(0x204),
+ CSC0_COEF4_C3(0x000) | CSC0_COEF4_D1(0x1f0),
+ CSC0_COEF5_D2(0x180) | CSC0_COEF5_D3(0x180),
+ },
+ [DRM_COLOR_YCBCR_FULL_RANGE] = {
+ /*
+ * BT.601 full range:
+ *
+ * |R| |1.0000 0.0000 1.4020| |Y - 0 |
+ * |G| = |1.0000 -0.3441 -0.7141| * |Cb - 128|
+ * |B| |1.0000 1.7720 0.0000| |Cr - 128|
+ */
+ CSC0_COEF0_A1(0x100) | CSC0_COEF0_A2(0x000),
+ CSC0_COEF1_A3(0x167) | CSC0_COEF1_B1(0x100),
+ CSC0_COEF2_B2(0x7a8) | CSC0_COEF2_B3(0x749),
+ CSC0_COEF3_C1(0x100) | CSC0_COEF3_C2(0x1c6),
+ CSC0_COEF4_C3(0x000) | CSC0_COEF4_D1(0x000),
+ CSC0_COEF5_D2(0x180) | CSC0_COEF5_D3(0x180),
+ },
+ },
+ [DRM_COLOR_YCBCR_BT709] = {
+ [DRM_COLOR_YCBCR_LIMITED_RANGE] = {
+ /*
+ * Rec.709 limited range:
+ *
+ * |R| |1.1644 0.0000 1.7927| |Y - 16 |
+ * |G| = |1.1644 -0.2132 -0.5329| * |Cb - 128|
+ * |B| |1.1644 2.1124 0.0000| |Cr - 128|
+ */
+ CSC0_COEF0_A1(0x12a) | CSC0_COEF0_A2(0x000),
+ CSC0_COEF1_A3(0x1cb) | CSC0_COEF1_B1(0x12a),
+ CSC0_COEF2_B2(0x7c9) | CSC0_COEF2_B3(0x778),
+ CSC0_COEF3_C1(0x12a) | CSC0_COEF3_C2(0x21d),
+ CSC0_COEF4_C3(0x000) | CSC0_COEF4_D1(0x1f0),
+ CSC0_COEF5_D2(0x180) | CSC0_COEF5_D3(0x180),
+ },
+ [DRM_COLOR_YCBCR_FULL_RANGE] = {
+ /*
+ * Rec.709 full range:
+ *
+ * |R| |1.0000 0.0000 1.5748| |Y - 0 |
+ * |G| = |1.0000 -0.1873 -0.4681| * |Cb - 128|
+ * |B| |1.0000 1.8556 0.0000| |Cr - 128|
+ */
+ CSC0_COEF0_A1(0x100) | CSC0_COEF0_A2(0x000),
+ CSC0_COEF1_A3(0x193) | CSC0_COEF1_B1(0x100),
+ CSC0_COEF2_B2(0x7d0) | CSC0_COEF2_B3(0x788),
+ CSC0_COEF3_C1(0x100) | CSC0_COEF3_C2(0x1db),
+ CSC0_COEF4_C3(0x000) | CSC0_COEF4_D1(0x000),
+ CSC0_COEF5_D2(0x180) | CSC0_COEF5_D3(0x180),
+ },
+ },
+ [DRM_COLOR_YCBCR_BT2020] = {
+ [DRM_COLOR_YCBCR_LIMITED_RANGE] = {
+ /*
+ * BT.2020 limited range:
+ *
+ * |R| |1.1644 0.0000 1.6787| |Y - 16 |
+ * |G| = |1.1644 -0.1874 -0.6505| * |Cb - 128|
+ * |B| |1.1644 2.1418 0.0000| |Cr - 128|
+ */
+ CSC0_COEF0_A1(0x12a) | CSC0_COEF0_A2(0x000),
+ CSC0_COEF1_A3(0x1ae) | CSC0_COEF1_B1(0x12a),
+ CSC0_COEF2_B2(0x7d0) | CSC0_COEF2_B3(0x759),
+ CSC0_COEF3_C1(0x12a) | CSC0_COEF3_C2(0x224),
+ CSC0_COEF4_C3(0x000) | CSC0_COEF4_D1(0x1f0),
+ CSC0_COEF5_D2(0x180) | CSC0_COEF5_D3(0x180),
+ },
+ [DRM_COLOR_YCBCR_FULL_RANGE] = {
+ /*
+ * BT.2020 full range:
+ *
+ * |R| |1.0000 0.0000 1.4746| |Y - 0 |
+ * |G| = |1.0000 -0.1646 -0.5714| * |Cb - 128|
+ * |B| |1.0000 1.8814 0.0000| |Cr - 128|
+ */
+ CSC0_COEF0_A1(0x100) | CSC0_COEF0_A2(0x000),
+ CSC0_COEF1_A3(0x179) | CSC0_COEF1_B1(0x100),
+ CSC0_COEF2_B2(0x7d6) | CSC0_COEF2_B3(0x76e),
+ CSC0_COEF3_C1(0x100) | CSC0_COEF3_C2(0x1e2),
+ CSC0_COEF4_C3(0x000) | CSC0_COEF4_D1(0x000),
+ CSC0_COEF5_D2(0x180) | CSC0_COEF5_D3(0x180),
+ },
+ },
+};
+
static void lcdif_set_formats(struct lcdif_drm_private *lcdif,
+ struct drm_plane_state *plane_state,
const u32 bus_format)
{
struct drm_device *drm = lcdif->drm;
- const u32 format = lcdif->crtc.primary->state->fb->format->format;
-
- writel(CSC0_CTRL_BYPASS, lcdif->base + LCDC_V8_CSC0_CTRL);
+ const u32 format = plane_state->fb->format->format;
+ bool in_yuv = false;
+ bool out_yuv = false;
switch (bus_format) {
case MEDIA_BUS_FMT_RGB565_1X16:
@@ -51,24 +165,7 @@ static void lcdif_set_formats(struct lcdif_drm_private *lcdif,
case MEDIA_BUS_FMT_UYVY8_1X16:
writel(DISP_PARA_LINE_PATTERN_UYVY_H,
lcdif->base + LCDC_V8_DISP_PARA);
-
- /* CSC: BT.601 Full Range RGB to YCbCr coefficients. */
- writel(CSC0_COEF0_A2(0x096) | CSC0_COEF0_A1(0x04c),
- lcdif->base + LCDC_V8_CSC0_COEF0);
- writel(CSC0_COEF1_B1(0x7d5) | CSC0_COEF1_A3(0x01d),
- lcdif->base + LCDC_V8_CSC0_COEF1);
- writel(CSC0_COEF2_B3(0x080) | CSC0_COEF2_B2(0x7ac),
- lcdif->base + LCDC_V8_CSC0_COEF2);
- writel(CSC0_COEF3_C2(0x795) | CSC0_COEF3_C1(0x080),
- lcdif->base + LCDC_V8_CSC0_COEF3);
- writel(CSC0_COEF4_D1(0x000) | CSC0_COEF4_C3(0x7ec),
- lcdif->base + LCDC_V8_CSC0_COEF4);
- writel(CSC0_COEF5_D3(0x080) | CSC0_COEF5_D2(0x080),
- lcdif->base + LCDC_V8_CSC0_COEF5);
-
- writel(CSC0_CTRL_CSC_MODE_RGB2YCbCr,
- lcdif->base + LCDC_V8_CSC0_CTRL);
-
+ out_yuv = true;
break;
default:
dev_err(drm->dev, "Unknown media bus format 0x%x\n", bus_format);
@@ -76,6 +173,7 @@ static void lcdif_set_formats(struct lcdif_drm_private *lcdif,
}
switch (format) {
+ /* RGB Formats */
case DRM_FORMAT_RGB565:
writel(CTRLDESCL0_5_BPP_16_RGB565,
lcdif->base + LCDC_V8_CTRLDESCL0_5);
@@ -100,10 +198,84 @@ static void lcdif_set_formats(struct lcdif_drm_private *lcdif,
writel(CTRLDESCL0_5_BPP_32_ARGB8888,
lcdif->base + LCDC_V8_CTRLDESCL0_5);
break;
+
+ /* YUV Formats */
+ case DRM_FORMAT_YUYV:
+ writel(CTRLDESCL0_5_BPP_YCbCr422 | CTRLDESCL0_5_YUV_FORMAT_VY2UY1,
+ lcdif->base + LCDC_V8_CTRLDESCL0_5);
+ in_yuv = true;
+ break;
+ case DRM_FORMAT_YVYU:
+ writel(CTRLDESCL0_5_BPP_YCbCr422 | CTRLDESCL0_5_YUV_FORMAT_UY2VY1,
+ lcdif->base + LCDC_V8_CTRLDESCL0_5);
+ in_yuv = true;
+ break;
+ case DRM_FORMAT_UYVY:
+ writel(CTRLDESCL0_5_BPP_YCbCr422 | CTRLDESCL0_5_YUV_FORMAT_Y2VY1U,
+ lcdif->base + LCDC_V8_CTRLDESCL0_5);
+ in_yuv = true;
+ break;
+ case DRM_FORMAT_VYUY:
+ writel(CTRLDESCL0_5_BPP_YCbCr422 | CTRLDESCL0_5_YUV_FORMAT_Y2UY1V,
+ lcdif->base + LCDC_V8_CTRLDESCL0_5);
+ in_yuv = true;
+ break;
+
default:
dev_err(drm->dev, "Unknown pixel format 0x%x\n", format);
break;
}
+
+ /*
+ * The CSC differentiates between "YCbCr" and "YUV", but the reference
+ * manual doesn't detail how they differ. Experiments showed that the
+ * luminance value is unaffected, only the calculations involving chroma
+ * values differ. The YCbCr mode behaves as expected, with chroma values
+ * being offset by 128. The YUV mode isn't fully understood.
+ */
+ if (!in_yuv && out_yuv) {
+ /* RGB -> YCbCr */
+ writel(CSC0_CTRL_CSC_MODE_RGB2YCbCr,
+ lcdif->base + LCDC_V8_CSC0_CTRL);
+
+ /*
+ * CSC: BT.601 Limited Range RGB to YCbCr coefficients.
+ *
+ * |Y | | 0.2568 0.5041 0.0979| |R| |16 |
+ * |Cb| = |-0.1482 -0.2910 0.4392| * |G| + |128|
+ * |Cr| | 0.4392 0.4392 -0.3678| |B| |128|
+ */
+ writel(CSC0_COEF0_A2(0x081) | CSC0_COEF0_A1(0x041),
+ lcdif->base + LCDC_V8_CSC0_COEF0);
+ writel(CSC0_COEF1_B1(0x7db) | CSC0_COEF1_A3(0x019),
+ lcdif->base + LCDC_V8_CSC0_COEF1);
+ writel(CSC0_COEF2_B3(0x070) | CSC0_COEF2_B2(0x7b6),
+ lcdif->base + LCDC_V8_CSC0_COEF2);
+ writel(CSC0_COEF3_C2(0x7a2) | CSC0_COEF3_C1(0x070),
+ lcdif->base + LCDC_V8_CSC0_COEF3);
+ writel(CSC0_COEF4_D1(0x010) | CSC0_COEF4_C3(0x7ee),
+ lcdif->base + LCDC_V8_CSC0_COEF4);
+ writel(CSC0_COEF5_D3(0x080) | CSC0_COEF5_D2(0x080),
+ lcdif->base + LCDC_V8_CSC0_COEF5);
+ } else if (in_yuv && !out_yuv) {
+ /* YCbCr -> RGB */
+ const u32 *coeffs =
+ lcdif_yuv2rgb_coeffs[plane_state->color_encoding]
+ [plane_state->color_range];
+
+ writel(CSC0_CTRL_CSC_MODE_YCbCr2RGB,
+ lcdif->base + LCDC_V8_CSC0_CTRL);
+
+ writel(coeffs[0], lcdif->base + LCDC_V8_CSC0_COEF0);
+ writel(coeffs[1], lcdif->base + LCDC_V8_CSC0_COEF1);
+ writel(coeffs[2], lcdif->base + LCDC_V8_CSC0_COEF2);
+ writel(coeffs[3], lcdif->base + LCDC_V8_CSC0_COEF3);
+ writel(coeffs[4], lcdif->base + LCDC_V8_CSC0_COEF4);
+ writel(coeffs[5], lcdif->base + LCDC_V8_CSC0_COEF5);
+ } else {
+ /* RGB -> RGB, YCbCr -> YCbCr: bypass colorspace converter. */
+ writel(CSC0_CTRL_BYPASS, lcdif->base + LCDC_V8_CSC0_CTRL);
+ }
}
static void lcdif_set_mode(struct lcdif_drm_private *lcdif, u32 bus_flags)
@@ -142,8 +314,18 @@ static void lcdif_set_mode(struct lcdif_drm_private *lcdif, u32 bus_flags)
CTRLDESCL0_1_WIDTH(m->hdisplay),
lcdif->base + LCDC_V8_CTRLDESCL0_1);
- writel(CTRLDESCL0_3_PITCH(lcdif->crtc.primary->state->fb->pitches[0]),
- lcdif->base + LCDC_V8_CTRLDESCL0_3);
+ /*
+ * Undocumented P_SIZE and T_SIZE register but those written in the
+ * downstream kernel those registers control the AXI burst size. As of
+ * now there are two known values:
+ * 1 - 128Byte
+ * 2 - 256Byte
+ * Downstream set it to 256B burst size to improve the memory
+ * efficiency so set it here too.
+ */
+ ctrl = CTRLDESCL0_3_P_SIZE(2) | CTRLDESCL0_3_T_SIZE(2) |
+ CTRLDESCL0_3_PITCH(lcdif->crtc.primary->state->fb->pitches[0]);
+ writel(ctrl, lcdif->base + LCDC_V8_CTRLDESCL0_3);
}
static void lcdif_enable_controller(struct lcdif_drm_private *lcdif)
@@ -188,6 +370,7 @@ static void lcdif_reset_block(struct lcdif_drm_private *lcdif)
}
static void lcdif_crtc_mode_set_nofb(struct lcdif_drm_private *lcdif,
+ struct drm_plane_state *plane_state,
struct drm_bridge_state *bridge_state,
const u32 bus_format)
{
@@ -210,7 +393,7 @@ static void lcdif_crtc_mode_set_nofb(struct lcdif_drm_private *lcdif,
/* Mandatory eLCDIF reset as per the Reference Manual */
lcdif_reset_block(lcdif);
- lcdif_set_formats(lcdif, bus_format);
+ lcdif_set_formats(lcdif, plane_state, bus_format);
lcdif_set_mode(lcdif, bus_flags);
}
@@ -293,7 +476,7 @@ static void lcdif_crtc_atomic_enable(struct drm_crtc *crtc,
pm_runtime_get_sync(drm->dev);
- lcdif_crtc_mode_set_nofb(lcdif, bridge_state, bus_format);
+ lcdif_crtc_mode_set_nofb(lcdif, new_pstate, bridge_state, bus_format);
/* Write cur_buf as well to avoid an initial corrupt frame */
paddr = drm_fb_dma_get_gem_addr(new_pstate->fb, new_pstate, 0);
@@ -437,12 +620,19 @@ static const struct drm_plane_funcs lcdif_plane_funcs = {
};
static const u32 lcdif_primary_plane_formats[] = {
+ /* RGB */
DRM_FORMAT_RGB565,
DRM_FORMAT_RGB888,
DRM_FORMAT_XBGR8888,
DRM_FORMAT_XRGB1555,
DRM_FORMAT_XRGB4444,
DRM_FORMAT_XRGB8888,
+
+ /* Packed YCbCr */
+ DRM_FORMAT_YUYV,
+ DRM_FORMAT_YVYU,
+ DRM_FORMAT_UYVY,
+ DRM_FORMAT_VYUY,
};
static const u64 lcdif_modifiers[] = {
@@ -456,6 +646,11 @@ static const u64 lcdif_modifiers[] = {
int lcdif_kms_init(struct lcdif_drm_private *lcdif)
{
+ const u32 supported_encodings = BIT(DRM_COLOR_YCBCR_BT601) |
+ BIT(DRM_COLOR_YCBCR_BT709) |
+ BIT(DRM_COLOR_YCBCR_BT2020);
+ const u32 supported_ranges = BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
+ BIT(DRM_COLOR_YCBCR_FULL_RANGE);
struct drm_encoder *encoder = &lcdif->encoder;
struct drm_crtc *crtc = &lcdif->crtc;
int ret;
@@ -471,6 +666,14 @@ int lcdif_kms_init(struct lcdif_drm_private *lcdif)
if (ret)
return ret;
+ ret = drm_plane_create_color_properties(&lcdif->planes.primary,
+ supported_encodings,
+ supported_ranges,
+ DRM_COLOR_YCBCR_BT601,
+ DRM_COLOR_YCBCR_LIMITED_RANGE);
+ if (ret)
+ return ret;
+
drm_crtc_helper_add(crtc, &lcdif_crtc_helper_funcs);
ret = drm_crtc_init_with_planes(lcdif->drm, crtc,
&lcdif->planes.primary, NULL,
diff --git a/drivers/gpu/drm/mxsfb/lcdif_regs.h b/drivers/gpu/drm/mxsfb/lcdif_regs.h
index c70220651e3a..fb74eb5ccbf1 100644
--- a/drivers/gpu/drm/mxsfb/lcdif_regs.h
+++ b/drivers/gpu/drm/mxsfb/lcdif_regs.h
@@ -130,7 +130,7 @@
#define CTRL_FETCH_START_OPTION_BPV BIT(9)
#define CTRL_FETCH_START_OPTION_RESV GENMASK(9, 8)
#define CTRL_FETCH_START_OPTION_MASK GENMASK(9, 8)
-#define CTRL_NEG BIT(4)
+#define CTRL_NEG BIT(4)
#define CTRL_INV_PXCK BIT(3)
#define CTRL_INV_DE BIT(2)
#define CTRL_INV_VS BIT(1)
@@ -138,9 +138,9 @@
#define DISP_PARA_DISP_ON BIT(31)
#define DISP_PARA_SWAP_EN BIT(30)
-#define DISP_PARA_LINE_PATTERN_UYVY_H (GENMASK(29, 28) | BIT(26))
-#define DISP_PARA_LINE_PATTERN_RGB565 GENMASK(28, 26)
-#define DISP_PARA_LINE_PATTERN_RGB888 0
+#define DISP_PARA_LINE_PATTERN_UYVY_H (0xd << 26)
+#define DISP_PARA_LINE_PATTERN_RGB565 (0x7 << 26)
+#define DISP_PARA_LINE_PATTERN_RGB888 (0x0 << 26)
#define DISP_PARA_LINE_PATTERN_MASK GENMASK(29, 26)
#define DISP_PARA_DISP_MODE_MASK GENMASK(25, 24)
#define DISP_PARA_BGND_R_MASK GENMASK(23, 16)
@@ -186,10 +186,14 @@
#define INT_ENABLE_D1_PLANE_PANIC_EN BIT(0)
#define CTRLDESCL0_1_HEIGHT(n) (((n) & 0xffff) << 16)
-#define CTRLDESCL0_1_HEIGHT_MASK GENMASK(31, 16)
+#define CTRLDESCL0_1_HEIGHT_MASK GENMASK(31, 16)
#define CTRLDESCL0_1_WIDTH(n) ((n) & 0xffff)
#define CTRLDESCL0_1_WIDTH_MASK GENMASK(15, 0)
+#define CTRLDESCL0_3_P_SIZE(n) (((n) << 20) & CTRLDESCL0_3_P_SIZE_MASK)
+#define CTRLDESCL0_3_P_SIZE_MASK GENMASK(22, 20)
+#define CTRLDESCL0_3_T_SIZE(n) (((n) << 16) & CTRLDESCL0_3_T_SIZE_MASK)
+#define CTRLDESCL0_3_T_SIZE_MASK GENMASK(17, 16)
#define CTRLDESCL0_3_PITCH(n) ((n) & 0xffff)
#define CTRLDESCL0_3_PITCH_MASK GENMASK(15, 0)
@@ -198,21 +202,24 @@
#define CTRLDESCL0_5_EN BIT(31)
#define CTRLDESCL0_5_SHADOW_LOAD_EN BIT(30)
-#define CTRLDESCL0_5_BPP_16_RGB565 BIT(26)
-#define CTRLDESCL0_5_BPP_16_ARGB1555 (BIT(26) | BIT(24))
-#define CTRLDESCL0_5_BPP_16_ARGB4444 (BIT(26) | BIT(25))
-#define CTRLDESCL0_5_BPP_YCbCr422 (BIT(26) | BIT(25) | BIT(24))
-#define CTRLDESCL0_5_BPP_24_RGB888 BIT(27)
-#define CTRLDESCL0_5_BPP_32_ARGB8888 (BIT(27) | BIT(24))
-#define CTRLDESCL0_5_BPP_32_ABGR8888 (BIT(27) | BIT(25))
+#define CTRLDESCL0_5_BPP_16_RGB565 (0x4 << 24)
+#define CTRLDESCL0_5_BPP_16_ARGB1555 (0x5 << 24)
+#define CTRLDESCL0_5_BPP_16_ARGB4444 (0x6 << 24)
+#define CTRLDESCL0_5_BPP_YCbCr422 (0x7 << 24)
+#define CTRLDESCL0_5_BPP_24_RGB888 (0x8 << 24)
+#define CTRLDESCL0_5_BPP_32_ARGB8888 (0x9 << 24)
+#define CTRLDESCL0_5_BPP_32_ABGR8888 (0xa << 24)
#define CTRLDESCL0_5_BPP_MASK GENMASK(27, 24)
-#define CTRLDESCL0_5_YUV_FORMAT_Y2VY1U 0
-#define CTRLDESCL0_5_YUV_FORMAT_Y2UY1V BIT(14)
-#define CTRLDESCL0_5_YUV_FORMAT_VY2UY1 BIT(15)
-#define CTRLDESCL0_5_YUV_FORMAT_UY2VY1 (BIT(15) | BIT(14))
+#define CTRLDESCL0_5_YUV_FORMAT_Y2VY1U (0x0 << 14)
+#define CTRLDESCL0_5_YUV_FORMAT_Y2UY1V (0x1 << 14)
+#define CTRLDESCL0_5_YUV_FORMAT_VY2UY1 (0x2 << 14)
+#define CTRLDESCL0_5_YUV_FORMAT_UY2VY1 (0x3 << 14)
#define CTRLDESCL0_5_YUV_FORMAT_MASK GENMASK(15, 14)
-#define CSC0_CTRL_CSC_MODE_RGB2YCbCr GENMASK(2, 1)
+#define CSC0_CTRL_CSC_MODE_YUV2RGB (0x0 << 1)
+#define CSC0_CTRL_CSC_MODE_YCbCr2RGB (0x1 << 1)
+#define CSC0_CTRL_CSC_MODE_RGB2YUV (0x2 << 1)
+#define CSC0_CTRL_CSC_MODE_RGB2YCbCr (0x3 << 1)
#define CSC0_CTRL_CSC_MODE_MASK GENMASK(2, 1)
#define CSC0_CTRL_BYPASS BIT(0)
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
index 33c97d510999..aa94f8e284dd 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
@@ -131,7 +131,7 @@ nv50_dmac_kick(struct nvif_push *push)
{
struct nv50_dmac *dmac = container_of(push, typeof(*dmac), _push);
- dmac->cur = push->cur - (u32 *)dmac->_push.mem.object.map.ptr;
+ dmac->cur = push->cur - (u32 __iomem *)dmac->_push.mem.object.map.ptr;
if (dmac->put != dmac->cur) {
/* Push buffer fetches are not coherent with BAR1, we need to ensure
* writes have been flushed right through to VRAM before writing PUT.
@@ -194,7 +194,7 @@ nv50_dmac_wait(struct nvif_push *push, u32 size)
if (WARN_ON(size > dmac->max))
return -EINVAL;
- dmac->cur = push->cur - (u32 *)dmac->_push.mem.object.map.ptr;
+ dmac->cur = push->cur - (u32 __iomem *)dmac->_push.mem.object.map.ptr;
if (dmac->cur + size >= dmac->max) {
int ret = nv50_dmac_wind(dmac);
if (ret)
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 35bb0bb3fe61..813937ad1dc2 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -532,7 +532,7 @@ nouveau_bo_map(struct nouveau_bo *nvbo)
if (ret)
return ret;
- ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.resource->num_pages, &nvbo->kmap);
+ ret = ttm_bo_kmap(&nvbo->bo, 0, PFN_UP(nvbo->bo.base.size), &nvbo->kmap);
ttm_bo_unreserve(&nvbo->bo);
return ret;
@@ -822,6 +822,15 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict,
if (ret == 0) {
ret = nouveau_fence_new(chan, false, &fence);
if (ret == 0) {
+ /* TODO: figure out a better solution here
+ *
+ * wait on the fence here explicitly as going through
+ * ttm_bo_move_accel_cleanup somehow doesn't seem to do it.
+ *
+ * Without this the operation can timeout and we'll fallback to a
+ * software copy, which might take several minutes to finish.
+ */
+ nouveau_fence_wait(fence, false, false);
ret = ttm_bo_move_accel_cleanup(bo,
&fence->base,
evict, false,
@@ -1227,7 +1236,7 @@ vm_fault_t nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
} else {
/* make sure bo is in mappable vram */
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA ||
- bo->resource->start + bo->resource->num_pages < mappable)
+ bo->resource->start + PFN_UP(bo->resource->size) < mappable)
return 0;
for (i = 0; i < nvbo->placement.num_placement; ++i) {
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo0039.c b/drivers/gpu/drm/nouveau/nouveau_bo0039.c
index 7390132129fe..e2ce44adaa5c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo0039.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo0039.c
@@ -52,7 +52,7 @@ nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
u32 src_offset = old_reg->start << PAGE_SHIFT;
u32 dst_ctxdma = nouveau_bo_mem_ctxdma(bo, chan, new_reg);
u32 dst_offset = new_reg->start << PAGE_SHIFT;
- u32 page_count = new_reg->num_pages;
+ u32 page_count = PFN_UP(new_reg->size);
int ret;
ret = PUSH_WAIT(push, 3);
@@ -62,7 +62,7 @@ nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
PUSH_MTHD(push, NV039, SET_CONTEXT_DMA_BUFFER_IN, src_ctxdma,
SET_CONTEXT_DMA_BUFFER_OUT, dst_ctxdma);
- page_count = new_reg->num_pages;
+ page_count = PFN_UP(new_reg->size);
while (page_count) {
int line_count = (page_count > 2047) ? 2047 : page_count;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo5039.c b/drivers/gpu/drm/nouveau/nouveau_bo5039.c
index 4c75c7b3804c..c6cf3629a9f9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo5039.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo5039.c
@@ -41,7 +41,7 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
{
struct nouveau_mem *mem = nouveau_mem(old_reg);
struct nvif_push *push = chan->chan.push;
- u64 length = (new_reg->num_pages << PAGE_SHIFT);
+ u64 length = new_reg->size;
u64 src_offset = mem->vma[0].addr;
u64 dst_offset = mem->vma[1].addr;
int src_tiled = !!mem->kind;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo74c1.c b/drivers/gpu/drm/nouveau/nouveau_bo74c1.c
index ed6c09d67840..9b7ba31fae13 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo74c1.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo74c1.c
@@ -44,7 +44,7 @@ nv84_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
if (ret)
return ret;
- PUSH_NVSQ(push, NV74C1, 0x0304, new_reg->num_pages << PAGE_SHIFT,
+ PUSH_NVSQ(push, NV74C1, 0x0304, new_reg->size,
0x0308, upper_32_bits(mem->vma[0].addr),
0x030c, lower_32_bits(mem->vma[0].addr),
0x0310, upper_32_bits(mem->vma[1].addr),
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo85b5.c b/drivers/gpu/drm/nouveau/nouveau_bo85b5.c
index dec29b2d8bb2..a15a38a87a95 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo85b5.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo85b5.c
@@ -44,10 +44,10 @@ nva3_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
struct nvif_push *push = chan->chan.push;
u64 src_offset = mem->vma[0].addr;
u64 dst_offset = mem->vma[1].addr;
- u32 page_count = new_reg->num_pages;
+ u32 page_count = PFN_UP(new_reg->size);
int ret;
- page_count = new_reg->num_pages;
+ page_count = PFN_UP(new_reg->size);
while (page_count) {
int line_count = (page_count > 8191) ? 8191 : page_count;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo9039.c b/drivers/gpu/drm/nouveau/nouveau_bo9039.c
index 776b04976cdf..d2bb2687d401 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo9039.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo9039.c
@@ -42,10 +42,10 @@ nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
struct nouveau_mem *mem = nouveau_mem(old_reg);
u64 src_offset = mem->vma[0].addr;
u64 dst_offset = mem->vma[1].addr;
- u32 page_count = new_reg->num_pages;
+ u32 page_count = PFN_UP(new_reg->size);
int ret;
- page_count = new_reg->num_pages;
+ page_count = PFN_UP(new_reg->size);
while (page_count) {
int line_count = (page_count > 2047) ? 2047 : page_count;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo90b5.c b/drivers/gpu/drm/nouveau/nouveau_bo90b5.c
index 8499f58213e3..4618f4f5ab56 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo90b5.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo90b5.c
@@ -37,10 +37,10 @@ nvc0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
struct nvif_push *push = chan->chan.push;
u64 src_offset = mem->vma[0].addr;
u64 dst_offset = mem->vma[1].addr;
- u32 page_count = new_reg->num_pages;
+ u32 page_count = PFN_UP(new_reg->size);
int ret;
- page_count = new_reg->num_pages;
+ page_count = PFN_UP(new_reg->size);
while (page_count) {
int line_count = (page_count > 8191) ? 8191 : page_count;
diff --git a/drivers/gpu/drm/nouveau/nouveau_boa0b5.c b/drivers/gpu/drm/nouveau/nouveau_boa0b5.c
index 575212472e7a..07a5c6302c98 100644
--- a/drivers/gpu/drm/nouveau/nouveau_boa0b5.c
+++ b/drivers/gpu/drm/nouveau/nouveau_boa0b5.c
@@ -58,7 +58,7 @@ nve0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
PITCH_IN, PAGE_SIZE,
PITCH_OUT, PAGE_SIZE,
LINE_LENGTH_IN, PAGE_SIZE,
- LINE_COUNT, new_reg->num_pages);
+ LINE_COUNT, PFN_UP(new_reg->size));
PUSH_IMMD(push, NVA0B5, LAUNCH_DMA,
NVDEF(NVA0B5, LAUNCH_DMA, DATA_TRANSFER_TYPE, NON_PIPELINED) |
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index a2f5df568ca5..2e97186090c8 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -659,7 +659,6 @@ int
nouveau_display_create(struct drm_device *dev)
{
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvkm_device *device = nvxx_device(&drm->client.device);
struct nouveau_display *disp;
int ret;
@@ -672,7 +671,6 @@ nouveau_display_create(struct drm_device *dev)
drm_mode_create_dvi_i_properties(dev);
dev->mode_config.funcs = &nouveau_mode_config_funcs;
- dev->mode_config.fb_base = device->func->resource_addr(device, 1);
dev->mode_config.min_width = 0;
dev->mode_config.min_height = 0;
diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.c b/drivers/gpu/drm/nouveau/nouveau_dmem.c
index 16356611b5b9..789857faa048 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dmem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c
@@ -33,7 +33,6 @@
#include <nvif/if000c.h>
#include <nvif/if500b.h>
#include <nvif/if900b.h>
-#include <nvif/if000c.h>
#include <nvhw/class/cla0b5.h>
@@ -139,44 +138,24 @@ static void nouveau_dmem_fence_done(struct nouveau_fence **fence)
}
}
-static vm_fault_t nouveau_dmem_fault_copy_one(struct nouveau_drm *drm,
- struct vm_fault *vmf, struct migrate_vma *args,
- dma_addr_t *dma_addr)
+static int nouveau_dmem_copy_one(struct nouveau_drm *drm, struct page *spage,
+ struct page *dpage, dma_addr_t *dma_addr)
{
struct device *dev = drm->dev->dev;
- struct page *dpage, *spage;
- struct nouveau_svmm *svmm;
- spage = migrate_pfn_to_page(args->src[0]);
- if (!spage || !(args->src[0] & MIGRATE_PFN_MIGRATE))
- return 0;
-
- dpage = alloc_page_vma(GFP_HIGHUSER, vmf->vma, vmf->address);
- if (!dpage)
- return VM_FAULT_SIGBUS;
lock_page(dpage);
*dma_addr = dma_map_page(dev, dpage, 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, *dma_addr))
- goto error_free_page;
+ return -EIO;
- svmm = spage->zone_device_data;
- mutex_lock(&svmm->mutex);
- nouveau_svmm_invalidate(svmm, args->start, args->end);
if (drm->dmem->migrate.copy_func(drm, 1, NOUVEAU_APER_HOST, *dma_addr,
- NOUVEAU_APER_VRAM, nouveau_dmem_page_addr(spage)))
- goto error_dma_unmap;
- mutex_unlock(&svmm->mutex);
+ NOUVEAU_APER_VRAM, nouveau_dmem_page_addr(spage))) {
+ dma_unmap_page(dev, *dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
+ return -EIO;
+ }
- args->dst[0] = migrate_pfn(page_to_pfn(dpage));
return 0;
-
-error_dma_unmap:
- mutex_unlock(&svmm->mutex);
- dma_unmap_page(dev, *dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
-error_free_page:
- __free_page(dpage);
- return VM_FAULT_SIGBUS;
}
static vm_fault_t nouveau_dmem_migrate_to_ram(struct vm_fault *vmf)
@@ -184,9 +163,11 @@ static vm_fault_t nouveau_dmem_migrate_to_ram(struct vm_fault *vmf)
struct nouveau_drm *drm = page_to_drm(vmf->page);
struct nouveau_dmem *dmem = drm->dmem;
struct nouveau_fence *fence;
+ struct nouveau_svmm *svmm;
+ struct page *spage, *dpage;
unsigned long src = 0, dst = 0;
dma_addr_t dma_addr = 0;
- vm_fault_t ret;
+ vm_fault_t ret = 0;
struct migrate_vma args = {
.vma = vmf->vma,
.start = vmf->address,
@@ -194,6 +175,7 @@ static vm_fault_t nouveau_dmem_migrate_to_ram(struct vm_fault *vmf)
.src = &src,
.dst = &dst,
.pgmap_owner = drm->dev,
+ .fault_page = vmf->page,
.flags = MIGRATE_VMA_SELECT_DEVICE_PRIVATE,
};
@@ -207,10 +189,26 @@ static vm_fault_t nouveau_dmem_migrate_to_ram(struct vm_fault *vmf)
if (!args.cpages)
return 0;
- ret = nouveau_dmem_fault_copy_one(drm, vmf, &args, &dma_addr);
- if (ret || dst == 0)
+ spage = migrate_pfn_to_page(src);
+ if (!spage || !(src & MIGRATE_PFN_MIGRATE))
goto done;
+ dpage = alloc_page_vma(GFP_HIGHUSER, vmf->vma, vmf->address);
+ if (!dpage)
+ goto done;
+
+ dst = migrate_pfn(page_to_pfn(dpage));
+
+ svmm = spage->zone_device_data;
+ mutex_lock(&svmm->mutex);
+ nouveau_svmm_invalidate(svmm, args.start, args.end);
+ ret = nouveau_dmem_copy_one(drm, spage, dpage, &dma_addr);
+ mutex_unlock(&svmm->mutex);
+ if (ret) {
+ ret = VM_FAULT_SIGBUS;
+ goto done;
+ }
+
nouveau_fence_new(dmem->migrate.chan, false, &fence);
migrate_vma_pages(&args);
nouveau_dmem_fence_done(&fence);
@@ -326,7 +324,7 @@ nouveau_dmem_page_alloc_locked(struct nouveau_drm *drm)
return NULL;
}
- lock_page(page);
+ zone_device_page_init(page);
return page;
}
@@ -369,6 +367,52 @@ nouveau_dmem_suspend(struct nouveau_drm *drm)
mutex_unlock(&drm->dmem->mutex);
}
+/*
+ * Evict all pages mapping a chunk.
+ */
+static void
+nouveau_dmem_evict_chunk(struct nouveau_dmem_chunk *chunk)
+{
+ unsigned long i, npages = range_len(&chunk->pagemap.range) >> PAGE_SHIFT;
+ unsigned long *src_pfns, *dst_pfns;
+ dma_addr_t *dma_addrs;
+ struct nouveau_fence *fence;
+
+ src_pfns = kcalloc(npages, sizeof(*src_pfns), GFP_KERNEL);
+ dst_pfns = kcalloc(npages, sizeof(*dst_pfns), GFP_KERNEL);
+ dma_addrs = kcalloc(npages, sizeof(*dma_addrs), GFP_KERNEL);
+
+ migrate_device_range(src_pfns, chunk->pagemap.range.start >> PAGE_SHIFT,
+ npages);
+
+ for (i = 0; i < npages; i++) {
+ if (src_pfns[i] & MIGRATE_PFN_MIGRATE) {
+ struct page *dpage;
+
+ /*
+ * _GFP_NOFAIL because the GPU is going away and there
+ * is nothing sensible we can do if we can't copy the
+ * data back.
+ */
+ dpage = alloc_page(GFP_HIGHUSER | __GFP_NOFAIL);
+ dst_pfns[i] = migrate_pfn(page_to_pfn(dpage));
+ nouveau_dmem_copy_one(chunk->drm,
+ migrate_pfn_to_page(src_pfns[i]), dpage,
+ &dma_addrs[i]);
+ }
+ }
+
+ nouveau_fence_new(chunk->drm->dmem->migrate.chan, false, &fence);
+ migrate_device_pages(src_pfns, dst_pfns, npages);
+ nouveau_dmem_fence_done(&fence);
+ migrate_device_finalize(src_pfns, dst_pfns, npages);
+ kfree(src_pfns);
+ kfree(dst_pfns);
+ for (i = 0; i < npages; i++)
+ dma_unmap_page(chunk->drm->dev->dev, dma_addrs[i], PAGE_SIZE, DMA_BIDIRECTIONAL);
+ kfree(dma_addrs);
+}
+
void
nouveau_dmem_fini(struct nouveau_drm *drm)
{
@@ -380,8 +424,10 @@ nouveau_dmem_fini(struct nouveau_drm *drm)
mutex_lock(&drm->dmem->mutex);
list_for_each_entry_safe(chunk, tmp, &drm->dmem->chunks, list) {
+ nouveau_dmem_evict_chunk(chunk);
nouveau_bo_unpin(chunk->bo);
nouveau_bo_ref(NULL, &chunk->bo);
+ WARN_ON(chunk->callocated);
list_del(&chunk->list);
memunmap_pages(&chunk->pagemap);
release_mem_region(chunk->pagemap.range.start,
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 561309d447e0..fd99ec0f4257 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -28,6 +28,7 @@
#include <linux/pm_runtime.h>
#include <linux/vga_switcheroo.h>
#include <linux/mmu_notifier.h>
+#include <linux/dynamic_debug.h>
#include <drm/drm_aperture.h>
#include <drm/drm_crtc_helper.h>
@@ -70,6 +71,18 @@
#include "nouveau_svm.h"
#include "nouveau_dmem.h"
+DECLARE_DYNDBG_CLASSMAP(drm_debug_classes, DD_CLASS_TYPE_DISJOINT_BITS, 0,
+ "DRM_UT_CORE",
+ "DRM_UT_DRIVER",
+ "DRM_UT_KMS",
+ "DRM_UT_PRIME",
+ "DRM_UT_ATOMIC",
+ "DRM_UT_VBL",
+ "DRM_UT_STATE",
+ "DRM_UT_LEASE",
+ "DRM_UT_DP",
+ "DRM_UT_DRMRES");
+
MODULE_PARM_DESC(config, "option string to pass to driver core");
static char *nouveau_config;
module_param_named(config, nouveau_config, charp, 0400);
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index fab542a758ff..ac5793c96957 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -679,7 +679,7 @@ nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli,
}
if (!nvbo->kmap.virtual) {
- ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.resource->num_pages,
+ ret = ttm_bo_kmap(&nvbo->bo, 0, PFN_UP(nvbo->bo.base.size),
&nvbo->kmap);
if (ret) {
NV_PRINTK(err, cli, "failed kmap for reloc\n");
@@ -868,8 +868,7 @@ revalidate:
if (unlikely(cmd != req->suffix0)) {
if (!nvbo->kmap.virtual) {
ret = ttm_bo_kmap(&nvbo->bo, 0,
- nvbo->bo.resource->
- num_pages,
+ PFN_UP(nvbo->bo.base.size),
&nvbo->kmap);
if (ret) {
WIND_RING(chan);
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
index 76f8edefa637..1fde3a5d7c32 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.c
@@ -115,7 +115,7 @@ nouveau_mem_host(struct ttm_resource *reg, struct ttm_tt *tt)
mutex_lock(&drm->master.lock);
ret = nvif_mem_ctor_type(mmu, "ttmHostMem", cli->mem->oclass, type, PAGE_SHIFT,
- reg->num_pages << PAGE_SHIFT,
+ reg->size,
&args, sizeof(args), &mem->mem);
mutex_unlock(&drm->master.lock);
return ret;
@@ -128,7 +128,7 @@ nouveau_mem_vram(struct ttm_resource *reg, bool contig, u8 page)
struct nouveau_cli *cli = mem->cli;
struct nouveau_drm *drm = cli->drm;
struct nvif_mmu *mmu = &cli->mmu;
- u64 size = ALIGN(reg->num_pages << PAGE_SHIFT, 1 << page);
+ u64 size = ALIGN(reg->size, 1 << page);
int ret;
mutex_lock(&drm->master.lock);
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
index 9602c30928f2..1469a88910e4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
@@ -139,7 +139,7 @@ nv04_gart_manager_new(struct ttm_resource_manager *man,
mem = nouveau_mem(*res);
ttm_resource_init(bo, place, *res);
ret = nvif_vmm_get(&mem->cli->vmm.vmm, PTES, false, 12, 0,
- (long)(*res)->num_pages << PAGE_SHIFT, &mem->vma[0]);
+ (long)(*res)->size, &mem->vma[0]);
if (ret) {
nouveau_mem_del(man, *res);
return ret;
diff --git a/drivers/gpu/drm/nouveau/nv04_fbcon.c b/drivers/gpu/drm/nouveau/nv04_fbcon.c
index 92f3fb6765ab..c30b8dacd86b 100644
--- a/drivers/gpu/drm/nouveau/nv04_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv04_fbcon.c
@@ -137,6 +137,8 @@ nv04_fbcon_accel_init(struct fb_info *info)
struct nouveau_channel *chan = drm->channel;
struct nvif_device *device = &drm->client.device;
struct nvif_push *push = chan->chan.push;
+ struct nvkm_device *nvkm_device = nvxx_device(&drm->client.device);
+ resource_size_t fb_base = nvkm_device->func->resource_addr(nvkm_device, 1);
int surface_fmt, pattern_fmt, rect_fmt;
int ret;
@@ -210,8 +212,8 @@ nv04_fbcon_accel_init(struct fb_info *info)
0x0188, chan->vram.handle);
PUSH_NVSQ(push, NV042, 0x0300, surface_fmt,
0x0304, info->fix.line_length | (info->fix.line_length << 16),
- 0x0308, info->fix.smem_start - dev->mode_config.fb_base,
- 0x030c, info->fix.smem_start - dev->mode_config.fb_base);
+ 0x0308, info->fix.smem_start - fb_base,
+ 0x030c, info->fix.smem_start - fb_base);
PUSH_NVSQ(push, NV043, 0x0000, nfbdev->rop.handle);
PUSH_NVSQ(push, NV043, 0x0300, 0x55);
diff --git a/drivers/gpu/drm/omapdrm/omap_fbdev.c b/drivers/gpu/drm/omapdrm/omap_fbdev.c
index 40706c5aad7b..ed67dd25794c 100644
--- a/drivers/gpu/drm/omapdrm/omap_fbdev.c
+++ b/drivers/gpu/drm/omapdrm/omap_fbdev.c
@@ -177,8 +177,6 @@ static int omap_fbdev_create(struct drm_fb_helper *helper,
drm_fb_helper_fill_info(fbi, helper, sizes);
- dev->mode_config.fb_base = dma_addr;
-
fbi->screen_buffer = omap_gem_vaddr(fbdev->bo);
fbi->screen_size = fbdev->bo->size;
fbi->fix.smem_start = dma_addr;
diff --git a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
index 393f82e26927..8e194dbc9506 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
@@ -125,7 +125,7 @@ struct drm_gem_object *omap_gem_prime_import(struct drm_device *dev,
get_dma_buf(dma_buf);
- sgt = dma_buf_map_attachment(attach, DMA_TO_DEVICE);
+ sgt = dma_buf_map_attachment_unlocked(attach, DMA_TO_DEVICE);
if (IS_ERR(sgt)) {
ret = PTR_ERR(sgt);
goto fail_detach;
@@ -142,7 +142,7 @@ struct drm_gem_object *omap_gem_prime_import(struct drm_device *dev,
return obj;
fail_unmap:
- dma_buf_unmap_attachment(attach, sgt, DMA_TO_DEVICE);
+ dma_buf_unmap_attachment_unlocked(attach, sgt, DMA_TO_DEVICE);
fail_detach:
dma_buf_detach(dma_buf, attach);
dma_buf_put(dma_buf);
diff --git a/drivers/gpu/drm/panel/panel-edp.c b/drivers/gpu/drm/panel/panel-edp.c
index 060f4f98bc04..5cb8dc2ebe18 100644
--- a/drivers/gpu/drm/panel/panel-edp.c
+++ b/drivers/gpu/drm/panel/panel-edp.c
@@ -403,17 +403,10 @@ static int panel_edp_unprepare(struct drm_panel *panel)
static int panel_edp_get_hpd_gpio(struct device *dev, struct panel_edp *p)
{
- int err;
-
p->hpd_gpio = devm_gpiod_get_optional(dev, "hpd", GPIOD_IN);
- if (IS_ERR(p->hpd_gpio)) {
- err = PTR_ERR(p->hpd_gpio);
-
- if (err != -EPROBE_DEFER)
- dev_err(dev, "failed to get 'hpd' GPIO: %d\n", err);
-
- return err;
- }
+ if (IS_ERR(p->hpd_gpio))
+ return dev_err_probe(dev, PTR_ERR(p->hpd_gpio),
+ "failed to get 'hpd' GPIO\n");
return 0;
}
@@ -832,12 +825,9 @@ static int panel_edp_probe(struct device *dev, const struct panel_desc *desc,
panel->enable_gpio = devm_gpiod_get_optional(dev, "enable",
GPIOD_OUT_LOW);
- if (IS_ERR(panel->enable_gpio)) {
- err = PTR_ERR(panel->enable_gpio);
- if (err != -EPROBE_DEFER)
- dev_err(dev, "failed to request GPIO: %d\n", err);
- return err;
- }
+ if (IS_ERR(panel->enable_gpio))
+ return dev_err_probe(dev, PTR_ERR(panel->enable_gpio),
+ "failed to request GPIO\n");
err = of_drm_get_panel_orientation(dev->of_node, &panel->orientation);
if (err) {
@@ -1295,7 +1285,8 @@ static const struct panel_desc innolux_n116bca_ea1 = {
},
.delay = {
.hpd_absent = 200,
- .prepare_to_enable = 80,
+ .enable = 80,
+ .disable = 50,
.unprepare = 500,
},
};
@@ -1878,6 +1869,7 @@ static const struct panel_delay delay_200_500_e200 = {
static const struct edp_panel_entry edp_panels[] = {
EDP_PANEL_ENTRY('A', 'U', 'O', 0x1062, &delay_200_500_e50, "B120XAN01.0"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0x1e9b, &delay_200_500_e50, "B133UAN02.1"),
+ EDP_PANEL_ENTRY('A', 'U', 'O', 0x1ea5, &delay_200_500_e50, "B116XAK01.6"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0x405c, &auo_b116xak01.delay, "B116XAK01"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0x615c, &delay_200_500_e50, "B116XAN06.1"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0x8594, &delay_200_500_e50, "B133UAN01.0"),
@@ -1885,10 +1877,17 @@ static const struct edp_panel_entry edp_panels[] = {
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0786, &delay_200_500_p2e80, "NV116WHM-T01"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x07d1, &boe_nv133fhm_n61.delay, "NV133FHM-N61"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x082d, &boe_nv133fhm_n61.delay, "NV133FHM-N62"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x094b, &delay_200_500_e50, "NT116WHM-N21"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x098d, &boe_nv110wtm_n61.delay, "NV110WTM-N61"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x09dd, &delay_200_500_e50, "NT116WHM-N21"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0a5d, &delay_200_500_e50, "NV116WHM-N45"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x0ac5, &delay_200_500_e50, "NV116WHM-N4C"),
+ EDP_PANEL_ENTRY('C', 'M', 'N', 0x1139, &delay_200_500_e80_d50, "N116BGE-EA2"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x114c, &innolux_n116bca_ea1.delay, "N116BCA-EA1"),
+ EDP_PANEL_ENTRY('C', 'M', 'N', 0x1152, &delay_200_500_e80_d50, "N116BCN-EA1"),
+ EDP_PANEL_ENTRY('C', 'M', 'N', 0x1153, &delay_200_500_e80_d50, "N116BGE-EA2"),
+ EDP_PANEL_ENTRY('C', 'M', 'N', 0x1154, &delay_200_500_e80_d50, "N116BCA-EA2"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x1247, &delay_200_500_e80_d50, "N120ACA-EA1"),
EDP_PANEL_ENTRY('I', 'V', 'O', 0x057d, &delay_200_500_e200, "R140NWF5 RH"),
diff --git a/drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c b/drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c
index cb5cb27462df..36a46cb7fe1c 100644
--- a/drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c
+++ b/drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c
@@ -288,7 +288,7 @@ static int lcd_olinuxino_probe(struct i2c_client *client,
return 0;
}
-static int lcd_olinuxino_remove(struct i2c_client *client)
+static void lcd_olinuxino_remove(struct i2c_client *client)
{
struct lcd_olinuxino *panel = i2c_get_clientdata(client);
@@ -296,8 +296,6 @@ static int lcd_olinuxino_remove(struct i2c_client *client)
drm_panel_disable(&panel->panel);
drm_panel_unprepare(&panel->panel);
-
- return 0;
}
static const struct of_device_id lcd_olinuxino_of_ids[] = {
diff --git a/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c b/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
index a6dc5ab182fa..79f852465a84 100644
--- a/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
+++ b/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
@@ -446,7 +446,7 @@ error:
return -ENODEV;
}
-static int rpi_touchscreen_remove(struct i2c_client *i2c)
+static void rpi_touchscreen_remove(struct i2c_client *i2c)
{
struct rpi_touchscreen *ts = i2c_get_clientdata(i2c);
@@ -455,8 +455,6 @@ static int rpi_touchscreen_remove(struct i2c_client *i2c)
drm_panel_remove(&ts->base);
mipi_dsi_device_unregister(ts->dsi);
-
- return 0;
}
static int rpi_touchscreen_dsi_probe(struct mipi_dsi_device *dsi)
diff --git a/drivers/gpu/drm/panel/panel-samsung-db7430.c b/drivers/gpu/drm/panel/panel-samsung-db7430.c
index 04640c5256a8..117b26845083 100644
--- a/drivers/gpu/drm/panel/panel-samsung-db7430.c
+++ b/drivers/gpu/drm/panel/panel-samsung-db7430.c
@@ -331,9 +331,16 @@ static const struct of_device_id db7430_match[] = {
};
MODULE_DEVICE_TABLE(of, db7430_match);
+static const struct spi_device_id db7430_ids[] = {
+ { "lms397kf04" },
+ { },
+};
+MODULE_DEVICE_TABLE(spi, db7430_ids);
+
static struct spi_driver db7430_driver = {
.probe = db7430_probe,
.remove = db7430_remove,
+ .id_table = db7430_ids,
.driver = {
.name = "db7430-panel",
.of_match_table = db7430_match,
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index edd5a0c35437..2944228a8e2c 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -575,12 +575,9 @@ static int panel_simple_probe(struct device *dev, const struct panel_desc *desc)
panel->enable_gpio = devm_gpiod_get_optional(dev, "enable",
GPIOD_OUT_LOW);
- if (IS_ERR(panel->enable_gpio)) {
- err = PTR_ERR(panel->enable_gpio);
- if (err != -EPROBE_DEFER)
- dev_err(dev, "failed to request GPIO: %d\n", err);
- return err;
- }
+ if (IS_ERR(panel->enable_gpio))
+ return dev_err_probe(dev, PTR_ERR(panel->enable_gpio),
+ "failed to request GPIO\n");
err = of_drm_get_panel_orientation(dev->of_node, &panel->orientation);
if (err) {
@@ -2255,7 +2252,7 @@ static const struct panel_desc innolux_g121i1_l01 = {
.enable = 200,
.disable = 20,
},
- .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .bus_format = MEDIA_BUS_FMT_RGB666_1X7X3_SPWG,
.connector_type = DRM_MODE_CONNECTOR_LVDS,
};
@@ -2701,6 +2698,36 @@ static const struct panel_desc multi_inno_mi0700s4t_6 = {
.connector_type = DRM_MODE_CONNECTOR_DPI,
};
+static const struct display_timing multi_inno_mi0800ft_9_timing = {
+ .pixelclock = { 32000000, 40000000, 50000000 },
+ .hactive = { 800, 800, 800 },
+ .hfront_porch = { 16, 210, 354 },
+ .hback_porch = { 6, 26, 45 },
+ .hsync_len = { 1, 20, 40 },
+ .vactive = { 600, 600, 600 },
+ .vfront_porch = { 1, 12, 77 },
+ .vback_porch = { 3, 13, 22 },
+ .vsync_len = { 1, 10, 20 },
+ .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW |
+ DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_PIXDATA_POSEDGE |
+ DISPLAY_FLAGS_SYNC_POSEDGE,
+};
+
+static const struct panel_desc multi_inno_mi0800ft_9 = {
+ .timings = &multi_inno_mi0800ft_9_timing,
+ .num_timings = 1,
+ .bpc = 8,
+ .size = {
+ .width = 162,
+ .height = 122,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH |
+ DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE |
+ DRM_BUS_FLAG_SYNC_SAMPLE_NEGEDGE,
+ .connector_type = DRM_MODE_CONNECTOR_DPI,
+};
+
static const struct display_timing multi_inno_mi1010ait_1cp_timing = {
.pixelclock = { 68900000, 70000000, 73400000 },
.hactive = { 1280, 1280, 1280 },
@@ -4133,6 +4160,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "multi-inno,mi0700s4t-6",
.data = &multi_inno_mi0700s4t_6,
}, {
+ .compatible = "multi-inno,mi0800ft-9",
+ .data = &multi_inno_mi0800ft_9,
+ }, {
.compatible = "multi-inno,mi1010ait-1cp",
.data = &multi_inno_mi1010ait_1cp,
}, {
diff --git a/drivers/gpu/drm/panel/panel-sitronix-st7701.c b/drivers/gpu/drm/panel/panel-sitronix-st7701.c
index c481daa4bbce..0b8cf65172ff 100644
--- a/drivers/gpu/drm/panel/panel-sitronix-st7701.c
+++ b/drivers/gpu/drm/panel/panel-sitronix-st7701.c
@@ -19,6 +19,9 @@
/* Command2 BKx selection command */
#define DSI_CMD2BKX_SEL 0xFF
+#define DSI_CMD1 0
+#define DSI_CMD2 BIT(4)
+#define DSI_CMD2BK_MASK GENMASK(3, 0)
/* Command2, BK0 commands */
#define DSI_CMD2_BK0_PVGAMCTRL 0xB0 /* Positive Voltage Gamma Control */
@@ -39,21 +42,6 @@
#define DSI_CMD2_BK1_SPD2 0xC2 /* Source EQ2 Setting */
#define DSI_CMD2_BK1_MIPISET1 0xD0 /* MIPI Setting 1 */
-/*
- * Command2 with BK function selection.
- *
- * BIT[4].....CN2
- * BIT[1:0]...BKXSEL
- * 1:00 = CMD2BK0, Command2 BK0
- * 1:01 = CMD2BK1, Command2 BK1
- * 1:11 = CMD2BK3, Command2 BK3
- * 0:00 = Command2 disable
- */
-#define DSI_CMD2BK0_SEL 0x10
-#define DSI_CMD2BK1_SEL 0x11
-#define DSI_CMD2BK3_SEL 0x13
-#define DSI_CMD2BKX_SEL_NONE 0x00
-
/* Command2, BK0 bytes */
#define DSI_CMD2_BK0_GAMCTRL_AJ_MASK GENMASK(7, 6)
#define DSI_CMD2_BK0_GAMCTRL_VC0_MASK GENMASK(3, 0)
@@ -191,6 +179,18 @@ static u8 st7701_vgls_map(struct st7701 *st7701)
return 0;
}
+static void st7701_switch_cmd_bkx(struct st7701 *st7701, bool cmd2, u8 bkx)
+{
+ u8 val;
+
+ if (cmd2)
+ val = DSI_CMD2 | FIELD_PREP(DSI_CMD2BK_MASK, bkx);
+ else
+ val = DSI_CMD1;
+
+ ST7701_DSI(st7701, DSI_CMD2BKX_SEL, 0x77, 0x01, 0x00, 0x00, val);
+}
+
static void st7701_init_sequence(struct st7701 *st7701)
{
const struct st7701_panel_desc *desc = st7701->desc;
@@ -208,8 +208,8 @@ static void st7701_init_sequence(struct st7701 *st7701)
msleep(st7701->sleep_delay);
/* Command2, BK0 */
- ST7701_DSI(st7701, DSI_CMD2BKX_SEL,
- 0x77, 0x01, 0x00, 0x00, DSI_CMD2BK0_SEL);
+ st7701_switch_cmd_bkx(st7701, true, 0);
+
mipi_dsi_dcs_write(st7701->dsi, DSI_CMD2_BK0_PVGAMCTRL,
desc->pv_gamma, ARRAY_SIZE(desc->pv_gamma));
mipi_dsi_dcs_write(st7701->dsi, DSI_CMD2_BK0_NVGAMCTRL,
@@ -244,11 +244,10 @@ static void st7701_init_sequence(struct st7701 *st7701)
DSI_CMD2_BK0_INVSEL_ONES_MASK |
FIELD_PREP(DSI_CMD2_BK0_INVSEL_NLINV_MASK, desc->nlinv),
FIELD_PREP(DSI_CMD2_BK0_INVSEL_RTNI_MASK,
- DIV_ROUND_UP(mode->htotal, 16)));
+ (clamp((u32)mode->htotal, 512U, 1008U) - 512) / 16));
/* Command2, BK1 */
- ST7701_DSI(st7701, DSI_CMD2BKX_SEL,
- 0x77, 0x01, 0x00, 0x00, DSI_CMD2BK1_SEL);
+ st7701_switch_cmd_bkx(st7701, true, 1);
/* Vop = 3.5375V + (VRHA[7:0] * 0.0125V) */
ST7701_DSI(st7701, DSI_CMD2_BK1_VRHS,
@@ -373,33 +372,27 @@ static void dmt028vghmcmi_1a_gip_sequence(struct st7701 *st7701)
0x08, 0x08, 0x08, 0x40,
0x3F, 0x64);
- ST7701_DSI(st7701, DSI_CMD2BKX_SEL,
- 0x77, 0x01, 0x00, 0x00, DSI_CMD2BKX_SEL_NONE);
+ st7701_switch_cmd_bkx(st7701, false, 0);
- ST7701_DSI(st7701, DSI_CMD2BKX_SEL,
- 0x77, 0x01, 0x00, 0x00, DSI_CMD2BK3_SEL);
+ st7701_switch_cmd_bkx(st7701, true, 3);
ST7701_DSI(st7701, 0xE6, 0x7C);
ST7701_DSI(st7701, 0xE8, 0x00, 0x0E);
- ST7701_DSI(st7701, DSI_CMD2BKX_SEL,
- 0x77, 0x01, 0x00, 0x00, DSI_CMD2BKX_SEL_NONE);
+ st7701_switch_cmd_bkx(st7701, false, 0);
ST7701_DSI(st7701, 0x11);
msleep(120);
- ST7701_DSI(st7701, DSI_CMD2BKX_SEL,
- 0x77, 0x01, 0x00, 0x00, DSI_CMD2BK3_SEL);
+ st7701_switch_cmd_bkx(st7701, true, 3);
ST7701_DSI(st7701, 0xE8, 0x00, 0x0C);
msleep(10);
ST7701_DSI(st7701, 0xE8, 0x00, 0x00);
- ST7701_DSI(st7701, DSI_CMD2BKX_SEL,
- 0x77, 0x01, 0x00, 0x00, DSI_CMD2BKX_SEL_NONE);
+ st7701_switch_cmd_bkx(st7701, false, 0);
ST7701_DSI(st7701, 0x11);
msleep(120);
ST7701_DSI(st7701, 0xE8, 0x00, 0x00);
- ST7701_DSI(st7701, DSI_CMD2BKX_SEL,
- 0x77, 0x01, 0x00, 0x00, DSI_CMD2BKX_SEL_NONE);
+ st7701_switch_cmd_bkx(st7701, false, 0);
ST7701_DSI(st7701, 0x3A, 0x70);
}
@@ -426,8 +419,7 @@ static int st7701_prepare(struct drm_panel *panel)
st7701->desc->gip_sequence(st7701);
/* Disable Command2 */
- ST7701_DSI(st7701, DSI_CMD2BKX_SEL,
- 0x77, 0x01, 0x00, 0x00, DSI_CMD2BKX_SEL_NONE);
+ st7701_switch_cmd_bkx(st7701, false, 0);
return 0;
}
@@ -762,7 +754,15 @@ static int st7701_dsi_probe(struct mipi_dsi_device *dsi)
st7701->dsi = dsi;
st7701->desc = desc;
- return mipi_dsi_attach(dsi);
+ ret = mipi_dsi_attach(dsi);
+ if (ret)
+ goto err_attach;
+
+ return 0;
+
+err_attach:
+ drm_panel_remove(&st7701->panel);
+ return ret;
}
static void st7701_dsi_remove(struct mipi_dsi_device *dsi)
diff --git a/drivers/gpu/drm/panel/panel-tpo-tpg110.c b/drivers/gpu/drm/panel/panel-tpo-tpg110.c
index 0b1f5a11a055..845304435e23 100644
--- a/drivers/gpu/drm/panel/panel-tpo-tpg110.c
+++ b/drivers/gpu/drm/panel/panel-tpo-tpg110.c
@@ -463,9 +463,16 @@ static const struct of_device_id tpg110_match[] = {
};
MODULE_DEVICE_TABLE(of, tpg110_match);
+static const struct spi_device_id tpg110_ids[] = {
+ { "tpg110" },
+ { },
+};
+MODULE_DEVICE_TABLE(spi, tpg110_ids);
+
static struct spi_driver tpg110_driver = {
.probe = tpg110_probe,
.remove = tpg110_remove,
+ .id_table = tpg110_ids,
.driver = {
.name = "tpo-tpg110-panel",
.of_match_table = tpg110_match,
diff --git a/drivers/gpu/drm/panel/panel-widechips-ws2401.c b/drivers/gpu/drm/panel/panel-widechips-ws2401.c
index 236f3cb2b594..2591ff8f0d4e 100644
--- a/drivers/gpu/drm/panel/panel-widechips-ws2401.c
+++ b/drivers/gpu/drm/panel/panel-widechips-ws2401.c
@@ -425,9 +425,16 @@ static const struct of_device_id ws2401_match[] = {
};
MODULE_DEVICE_TABLE(of, ws2401_match);
+static const struct spi_device_id ws2401_ids[] = {
+ { "lms380kf01" },
+ { },
+};
+MODULE_DEVICE_TABLE(spi, ws2401_ids);
+
static struct spi_driver ws2401_driver = {
.probe = ws2401_probe,
.remove = ws2401_remove,
+ .id_table = ws2401_ids,
.driver = {
.name = "ws2401-panel",
.of_match_table = ws2401_match,
diff --git a/drivers/gpu/drm/panfrost/panfrost_devfreq.c b/drivers/gpu/drm/panfrost/panfrost_devfreq.c
index 5110cd9b2425..fe5f12f16a63 100644
--- a/drivers/gpu/drm/panfrost/panfrost_devfreq.c
+++ b/drivers/gpu/drm/panfrost/panfrost_devfreq.c
@@ -131,6 +131,17 @@ int panfrost_devfreq_init(struct panfrost_device *pfdev)
return PTR_ERR(opp);
panfrost_devfreq_profile.initial_freq = cur_freq;
+
+ /*
+ * Set the recommend OPP this will enable and configure the regulator
+ * if any and will avoid a switch off by regulator_late_cleanup()
+ */
+ ret = dev_pm_opp_set_opp(dev, opp);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "Couldn't set recommended OPP\n");
+ return ret;
+ }
+
dev_pm_opp_put(opp);
/*
diff --git a/drivers/gpu/drm/panfrost/panfrost_dump.c b/drivers/gpu/drm/panfrost/panfrost_dump.c
index 89056a1aac7d..e7942ac449c6 100644
--- a/drivers/gpu/drm/panfrost/panfrost_dump.c
+++ b/drivers/gpu/drm/panfrost/panfrost_dump.c
@@ -63,13 +63,13 @@ static void panfrost_core_dump_header(struct panfrost_dump_iterator *iter,
{
struct panfrost_dump_object_header *hdr = iter->hdr;
- hdr->magic = cpu_to_le32(PANFROSTDUMP_MAGIC);
- hdr->type = cpu_to_le32(type);
- hdr->file_offset = cpu_to_le32(iter->data - iter->start);
- hdr->file_size = cpu_to_le32(data_end - iter->data);
+ hdr->magic = PANFROSTDUMP_MAGIC;
+ hdr->type = type;
+ hdr->file_offset = iter->data - iter->start;
+ hdr->file_size = data_end - iter->data;
iter->hdr++;
- iter->data += le32_to_cpu(hdr->file_size);
+ iter->data += hdr->file_size;
}
static void
@@ -93,8 +93,8 @@ panfrost_core_dump_registers(struct panfrost_dump_iterator *iter,
reg = panfrost_dump_registers[i] + js_as_offset;
- dumpreg->reg = cpu_to_le32(reg);
- dumpreg->value = cpu_to_le32(gpu_read(pfdev, reg));
+ dumpreg->reg = reg;
+ dumpreg->value = gpu_read(pfdev, reg);
}
panfrost_core_dump_header(iter, PANFROSTDUMP_BUF_REG, dumpreg);
@@ -106,7 +106,7 @@ void panfrost_core_dump(struct panfrost_job *job)
struct panfrost_dump_iterator iter;
struct drm_gem_object *dbo;
unsigned int n_obj, n_bomap_pages;
- __le64 *bomap, *bomap_start;
+ u64 *bomap, *bomap_start;
size_t file_size;
u32 as_nr;
int slot;
@@ -177,11 +177,11 @@ void panfrost_core_dump(struct panfrost_job *job)
* For now, we write the job identifier in the register dump header,
* so that we can decode the entire dump later with pandecode
*/
- iter.hdr->reghdr.jc = cpu_to_le64(job->jc);
- iter.hdr->reghdr.major = cpu_to_le32(PANFROSTDUMP_MAJOR);
- iter.hdr->reghdr.minor = cpu_to_le32(PANFROSTDUMP_MINOR);
- iter.hdr->reghdr.gpu_id = cpu_to_le32(pfdev->features.id);
- iter.hdr->reghdr.nbos = cpu_to_le64(job->bo_count);
+ iter.hdr->reghdr.jc = job->jc;
+ iter.hdr->reghdr.major = PANFROSTDUMP_MAJOR;
+ iter.hdr->reghdr.minor = PANFROSTDUMP_MINOR;
+ iter.hdr->reghdr.gpu_id = pfdev->features.id;
+ iter.hdr->reghdr.nbos = job->bo_count;
panfrost_core_dump_registers(&iter, pfdev, as_nr, slot);
@@ -209,7 +209,7 @@ void panfrost_core_dump(struct panfrost_job *job)
goto dump_header;
}
- ret = drm_gem_shmem_vmap(&bo->base, &map);
+ ret = drm_gem_vmap_unlocked(&bo->base.base, &map);
if (ret) {
dev_err(pfdev->dev, "Panfrost Dump: couldn't map Buffer Object\n");
iter.hdr->bomap.valid = 0;
@@ -218,27 +218,27 @@ void panfrost_core_dump(struct panfrost_job *job)
WARN_ON(!mapping->active);
- iter.hdr->bomap.data[0] = cpu_to_le32((bomap - bomap_start));
+ iter.hdr->bomap.data[0] = bomap - bomap_start;
for_each_sgtable_page(bo->base.sgt, &page_iter, 0) {
struct page *page = sg_page_iter_page(&page_iter);
if (!IS_ERR(page)) {
- *bomap++ = cpu_to_le64(page_to_phys(page));
+ *bomap++ = page_to_phys(page);
} else {
dev_err(pfdev->dev, "Panfrost Dump: wrong page\n");
- *bomap++ = ~cpu_to_le64(0);
+ *bomap++ = 0;
}
}
- iter.hdr->bomap.iova = cpu_to_le64(mapping->mmnode.start << PAGE_SHIFT);
+ iter.hdr->bomap.iova = mapping->mmnode.start << PAGE_SHIFT;
vaddr = map.vaddr;
memcpy(iter.data, vaddr, bo->base.base.size);
- drm_gem_shmem_vunmap(&bo->base, &map);
+ drm_gem_vunmap_unlocked(&bo->base.base, &map);
- iter.hdr->bomap.valid = cpu_to_le32(1);
+ iter.hdr->bomap.valid = 1;
dump_header: panfrost_core_dump_header(&iter, PANFROSTDUMP_BUF_BO, iter.data +
bo->base.base.size);
diff --git a/drivers/gpu/drm/panfrost/panfrost_perfcnt.c b/drivers/gpu/drm/panfrost/panfrost_perfcnt.c
index bc0df93f7f21..ba9b6e2b2636 100644
--- a/drivers/gpu/drm/panfrost/panfrost_perfcnt.c
+++ b/drivers/gpu/drm/panfrost/panfrost_perfcnt.c
@@ -106,7 +106,7 @@ static int panfrost_perfcnt_enable_locked(struct panfrost_device *pfdev,
goto err_close_bo;
}
- ret = drm_gem_shmem_vmap(bo, &map);
+ ret = drm_gem_vmap_unlocked(&bo->base, &map);
if (ret)
goto err_put_mapping;
perfcnt->buf = map.vaddr;
@@ -165,7 +165,7 @@ static int panfrost_perfcnt_enable_locked(struct panfrost_device *pfdev,
return 0;
err_vunmap:
- drm_gem_shmem_vunmap(bo, &map);
+ drm_gem_vunmap_unlocked(&bo->base, &map);
err_put_mapping:
panfrost_gem_mapping_put(perfcnt->mapping);
err_close_bo:
@@ -195,7 +195,7 @@ static int panfrost_perfcnt_disable_locked(struct panfrost_device *pfdev,
GPU_PERFCNT_CFG_MODE(GPU_PERFCNT_CFG_MODE_OFF));
perfcnt->user = NULL;
- drm_gem_shmem_vunmap(&perfcnt->mapping->obj->base, &map);
+ drm_gem_vunmap_unlocked(&perfcnt->mapping->obj->base.base, &map);
perfcnt->buf = NULL;
panfrost_gem_close(&perfcnt->mapping->obj->base.base, file_priv);
panfrost_mmu_as_put(pfdev, perfcnt->mapping->mmu);
diff --git a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c
index 7b00c955cd82..63aa96a69752 100644
--- a/drivers/gpu/drm/qxl/qxl_cmd.c
+++ b/drivers/gpu/drm/qxl/qxl_cmd.c
@@ -53,17 +53,11 @@ void qxl_ring_free(struct qxl_ring *ring)
kfree(ring);
}
-void qxl_ring_init_hdr(struct qxl_ring *ring)
-{
- ring->ring->header.notify_on_prod = ring->n_elements;
-}
-
struct qxl_ring *
qxl_ring_create(struct qxl_ring_header *header,
int element_size,
int n_elements,
int prod_notify,
- bool set_prod_notify,
wait_queue_head_t *push_event)
{
struct qxl_ring *ring;
@@ -77,8 +71,6 @@ qxl_ring_create(struct qxl_ring_header *header,
ring->n_elements = n_elements;
ring->prod_notify = prod_notify;
ring->push_event = push_event;
- if (set_prod_notify)
- qxl_ring_init_hdr(ring);
spin_lock_init(&ring->lock);
return ring;
}
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index a152a7c6db21..6492a70e3c39 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -1261,8 +1261,6 @@ int qxl_modeset_init(struct qxl_device *qdev)
qdev->ddev.mode_config.max_width = 8192;
qdev->ddev.mode_config.max_height = 8192;
- qdev->ddev.mode_config.fb_base = qdev->vram_base;
-
drm_mode_create_suggested_offset_properties(&qdev->ddev);
qxl_mode_create_hotplug_mode_update_property(qdev);
diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
index 47c169673088..432758ad39a3 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.h
+++ b/drivers/gpu/drm/qxl/qxl_drv.h
@@ -277,10 +277,8 @@ struct qxl_ring *qxl_ring_create(struct qxl_ring_header *header,
int element_size,
int n_elements,
int prod_notify,
- bool set_prod_notify,
wait_queue_head_t *push_event);
void qxl_ring_free(struct qxl_ring *ring);
-void qxl_ring_init_hdr(struct qxl_ring *ring);
int qxl_check_idle(struct qxl_ring *ring);
static inline uint64_t
diff --git a/drivers/gpu/drm/qxl/qxl_kms.c b/drivers/gpu/drm/qxl/qxl_kms.c
index 9bf6d4cc98d4..dc3828db1991 100644
--- a/drivers/gpu/drm/qxl/qxl_kms.c
+++ b/drivers/gpu/drm/qxl/qxl_kms.c
@@ -194,7 +194,6 @@ int qxl_device_init(struct qxl_device *qdev,
sizeof(struct qxl_command),
QXL_COMMAND_RING_SIZE,
qdev->io_base + QXL_IO_NOTIFY_CMD,
- false,
&qdev->display_event);
if (!qdev->command_ring) {
DRM_ERROR("Unable to create command ring\n");
@@ -207,7 +206,6 @@ int qxl_device_init(struct qxl_device *qdev,
sizeof(struct qxl_command),
QXL_CURSOR_RING_SIZE,
qdev->io_base + QXL_IO_NOTIFY_CURSOR,
- false,
&qdev->cursor_event);
if (!qdev->cursor_ring) {
@@ -219,7 +217,7 @@ int qxl_device_init(struct qxl_device *qdev,
qdev->release_ring = qxl_ring_create(
&(qdev->ram_header->release_ring_hdr),
sizeof(uint64_t),
- QXL_RELEASE_RING_SIZE, 0, true,
+ QXL_RELEASE_RING_SIZE, 0,
NULL);
if (!qdev->release_ring) {
diff --git a/drivers/gpu/drm/qxl/qxl_object.c b/drivers/gpu/drm/qxl/qxl_object.c
index 695d9308d1f0..06a58dad5f5c 100644
--- a/drivers/gpu/drm/qxl/qxl_object.c
+++ b/drivers/gpu/drm/qxl/qxl_object.c
@@ -168,9 +168,16 @@ int qxl_bo_vmap_locked(struct qxl_bo *bo, struct iosys_map *map)
bo->map_count++;
goto out;
}
- r = ttm_bo_vmap(&bo->tbo, &bo->map);
+
+ r = __qxl_bo_pin(bo);
if (r)
return r;
+
+ r = ttm_bo_vmap(&bo->tbo, &bo->map);
+ if (r) {
+ __qxl_bo_unpin(bo);
+ return r;
+ }
bo->map_count = 1;
/* TODO: Remove kptr in favor of map everywhere. */
@@ -192,12 +199,6 @@ int qxl_bo_vmap(struct qxl_bo *bo, struct iosys_map *map)
if (r)
return r;
- r = __qxl_bo_pin(bo);
- if (r) {
- qxl_bo_unreserve(bo);
- return r;
- }
-
r = qxl_bo_vmap_locked(bo, map);
qxl_bo_unreserve(bo);
return r;
@@ -247,6 +248,7 @@ void qxl_bo_vunmap_locked(struct qxl_bo *bo)
return;
bo->kptr = NULL;
ttm_bo_vunmap(&bo->tbo, &bo->map);
+ __qxl_bo_unpin(bo);
}
int qxl_bo_vunmap(struct qxl_bo *bo)
@@ -258,7 +260,6 @@ int qxl_bo_vunmap(struct qxl_bo *bo)
return r;
qxl_bo_vunmap_locked(bo);
- __qxl_bo_unpin(bo);
qxl_bo_unreserve(bo);
return 0;
}
diff --git a/drivers/gpu/drm/qxl/qxl_prime.c b/drivers/gpu/drm/qxl/qxl_prime.c
index 142d01415acb..9169c26357d3 100644
--- a/drivers/gpu/drm/qxl/qxl_prime.c
+++ b/drivers/gpu/drm/qxl/qxl_prime.c
@@ -59,7 +59,7 @@ int qxl_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map)
struct qxl_bo *bo = gem_to_qxl_bo(obj);
int ret;
- ret = qxl_bo_vmap(bo, map);
+ ret = qxl_bo_vmap_locked(bo, map);
if (ret < 0)
return ret;
@@ -71,5 +71,5 @@ void qxl_gem_prime_vunmap(struct drm_gem_object *obj,
{
struct qxl_bo *bo = gem_to_qxl_bo(obj);
- qxl_bo_vunmap(bo);
+ qxl_bo_vunmap_locked(bo);
}
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 446f7bae54c4..46a27ebf4588 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -400,8 +400,11 @@ static int cmp_size_smaller_first(void *priv, const struct list_head *a,
struct radeon_bo_list *lb = list_entry(b, struct radeon_bo_list, tv.head);
/* Sort A before B if A is smaller. */
- return (int)la->robj->tbo.resource->num_pages -
- (int)lb->robj->tbo.resource->num_pages;
+ if (la->robj->tbo.base.size > lb->robj->tbo.base.size)
+ return 1;
+ if (la->robj->tbo.base.size < lb->robj->tbo.base.size)
+ return -1;
+ return 0;
}
/**
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index ca5598ae8bfc..9bed1a6cb163 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -1604,8 +1604,6 @@ int radeon_modeset_init(struct radeon_device *rdev)
rdev->ddev->mode_config.fb_modifiers_not_supported = true;
- rdev->ddev->mode_config.fb_base = rdev->mc.aper_base;
-
ret = radeon_modeset_create_props(rdev);
if (ret) {
return ret;
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index 6ccea51d4072..cc6754d88b81 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -276,7 +276,7 @@ static int radeonfb_create(struct drm_fb_helper *helper,
drm_fb_helper_fill_info(info, &rfbdev->helper, sizes);
/* setup aperture base/size for vesafb takeover */
- info->apertures->ranges[0].base = rdev->ddev->mode_config.fb_base;
+ info->apertures->ranges[0].base = rdev->mc.aper_base;
info->apertures->ranges[0].size = rdev->mc.aper_size;
/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 00c33b24d5d3..10c0fbd9d2b4 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -232,7 +232,7 @@ int radeon_bo_kmap(struct radeon_bo *bo, void **ptr)
}
return 0;
}
- r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.resource->num_pages, &bo->kmap);
+ r = ttm_bo_kmap(&bo->tbo, 0, PFN_UP(bo->tbo.base.size), &bo->kmap);
if (r) {
return r;
}
@@ -737,7 +737,7 @@ vm_fault_t radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
if (bo->resource->mem_type != TTM_PL_VRAM)
return 0;
- size = bo->resource->num_pages << PAGE_SHIFT;
+ size = bo->resource->size;
offset = bo->resource->start << PAGE_SHIFT;
if ((offset + size) <= rdev->mc.visible_vram_size)
return 0;
diff --git a/drivers/gpu/drm/radeon/radeon_trace.h b/drivers/gpu/drm/radeon/radeon_trace.h
index c9fed5f2b870..22676617e1a5 100644
--- a/drivers/gpu/drm/radeon/radeon_trace.h
+++ b/drivers/gpu/drm/radeon/radeon_trace.h
@@ -22,7 +22,7 @@ TRACE_EVENT(radeon_bo_create,
TP_fast_assign(
__entry->bo = bo;
- __entry->pages = bo->tbo.resource->num_pages;
+ __entry->pages = PFN_UP(bo->tbo.resource->size);
),
TP_printk("bo=%p, pages=%u", __entry->bo, __entry->pages)
);
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index d33fec488713..fff48306c05f 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -181,7 +181,7 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
BUILD_BUG_ON((PAGE_SIZE % RADEON_GPU_PAGE_SIZE) != 0);
- num_pages = new_mem->num_pages * (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
+ num_pages = PFN_UP(new_mem->size) * (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
fence = radeon_copy(rdev, old_start, new_start, num_pages, bo->base.resv);
if (IS_ERR(fence))
return PTR_ERR(fence);
@@ -268,7 +268,7 @@ out:
static int radeon_ttm_io_mem_reserve(struct ttm_device *bdev, struct ttm_resource *mem)
{
struct radeon_device *rdev = radeon_get_rdev(bdev);
- size_t bus_size = (size_t)mem->num_pages << PAGE_SHIFT;
+ size_t bus_size = (size_t)mem->size;
switch (mem->mem_type) {
case TTM_PL_SYSTEM:
diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.c b/drivers/gpu/drm/rockchip/cdn-dp-core.c
index c204e9b95c1f..518ee13b1d6f 100644
--- a/drivers/gpu/drm/rockchip/cdn-dp-core.c
+++ b/drivers/gpu/drm/rockchip/cdn-dp-core.c
@@ -283,8 +283,9 @@ static int cdn_dp_connector_get_modes(struct drm_connector *connector)
return ret;
}
-static int cdn_dp_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+static enum drm_mode_status
+cdn_dp_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
{
struct cdn_dp_device *dp = connector_to_dp(connector);
struct drm_display_info *display_info = &dp->connector.display_info;
diff --git a/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c b/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
index bf6948125b84..92b599b089f9 100644
--- a/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
+++ b/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
@@ -364,12 +364,6 @@ static inline u32 dsi_read(struct dw_mipi_dsi_rockchip *dsi, u32 reg)
return readl(dsi->base + reg);
}
-static inline void dsi_update_bits(struct dw_mipi_dsi_rockchip *dsi, u32 reg,
- u32 mask, u32 val)
-{
- dsi_write(dsi, reg, (dsi_read(dsi, reg) & ~mask) | val);
-}
-
static void dw_mipi_dsi_phy_write(struct dw_mipi_dsi_rockchip *dsi,
u8 test_code,
u8 test_data)
@@ -1213,7 +1207,7 @@ static int dw_mipi_dsi_dphy_power_on(struct phy *phy)
return i;
}
- ret = pm_runtime_get_sync(dsi->dev);
+ ret = pm_runtime_resume_and_get(dsi->dev);
if (ret < 0) {
DRM_DEV_ERROR(dsi->dev, "failed to enable device: %d\n", ret);
return ret;
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
index 092bf863110b..409eaa1bf092 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
@@ -25,35 +25,6 @@ static const struct drm_framebuffer_funcs rockchip_drm_fb_funcs = {
.dirty = drm_atomic_helper_dirtyfb,
};
-static struct drm_framebuffer *
-rockchip_fb_alloc(struct drm_device *dev, const struct drm_mode_fb_cmd2 *mode_cmd,
- struct drm_gem_object **obj, unsigned int num_planes)
-{
- struct drm_framebuffer *fb;
- int ret;
- int i;
-
- fb = kzalloc(sizeof(*fb), GFP_KERNEL);
- if (!fb)
- return ERR_PTR(-ENOMEM);
-
- drm_helper_mode_fill_fb_struct(dev, fb, mode_cmd);
-
- for (i = 0; i < num_planes; i++)
- fb->obj[i] = obj[i];
-
- ret = drm_framebuffer_init(dev, fb, &rockchip_drm_fb_funcs);
- if (ret) {
- DRM_DEV_ERROR(dev->dev,
- "Failed to initialize framebuffer: %d\n",
- ret);
- kfree(fb);
- return ERR_PTR(ret);
- }
-
- return fb;
-}
-
static const struct drm_mode_config_helper_funcs rockchip_mode_config_helpers = {
.atomic_commit_tail = drm_atomic_helper_commit_tail_rpm,
};
@@ -106,20 +77,6 @@ static const struct drm_mode_config_funcs rockchip_drm_mode_config_funcs = {
.atomic_commit = drm_atomic_helper_commit,
};
-struct drm_framebuffer *
-rockchip_drm_framebuffer_init(struct drm_device *dev,
- const struct drm_mode_fb_cmd2 *mode_cmd,
- struct drm_gem_object *obj)
-{
- struct drm_framebuffer *fb;
-
- fb = rockchip_fb_alloc(dev, mode_cmd, &obj, 1);
- if (IS_ERR(fb))
- return ERR_CAST(fb);
-
- return fb;
-}
-
void rockchip_drm_mode_config_init(struct drm_device *dev)
{
dev->mode_config.min_width = 0;
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fb.h b/drivers/gpu/drm/rockchip/rockchip_drm_fb.h
index 1a696521096d..bae4e079dfb1 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_fb.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_fb.h
@@ -7,11 +7,5 @@
#ifndef _ROCKCHIP_DRM_FB_H
#define _ROCKCHIP_DRM_FB_H
-struct drm_framebuffer *
-rockchip_drm_framebuffer_init(struct drm_device *dev,
- const struct drm_mode_fb_cmd2 *mode_cmd,
- struct drm_gem_object *obj);
-void rockchip_drm_framebuffer_fini(struct drm_framebuffer *fb);
-
void rockchip_drm_mode_config_init(struct drm_device *dev);
#endif /* _ROCKCHIP_DRM_FB_H */
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
index c356de5dd220..fa1f4ee6d195 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -602,7 +602,7 @@ static int vop_enable(struct drm_crtc *crtc, struct drm_crtc_state *old_state)
struct vop *vop = to_vop(crtc);
int ret, i;
- ret = pm_runtime_get_sync(vop->dev);
+ ret = pm_runtime_resume_and_get(vop->dev);
if (ret < 0) {
DRM_DEV_ERROR(vop->dev, "failed to get pm runtime: %d\n", ret);
return ret;
@@ -1983,7 +1983,7 @@ static int vop_initial(struct vop *vop)
return PTR_ERR(vop->dclk);
}
- ret = pm_runtime_get_sync(vop->dev);
+ ret = pm_runtime_resume_and_get(vop->dev);
if (ret < 0) {
DRM_DEV_ERROR(vop->dev, "failed to get pm runtime: %d\n", ret);
return ret;
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
index 552426d5d3a2..ad87db2fcaf6 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
@@ -822,7 +822,7 @@ static void vop2_enable(struct vop2 *vop2)
{
int ret;
- ret = pm_runtime_get_sync(vop2->dev);
+ ret = pm_runtime_resume_and_get(vop2->dev);
if (ret < 0) {
drm_err(vop2->drm, "failed to get pm runtime: %d\n", ret);
return;
@@ -1438,11 +1438,15 @@ static void rk3568_set_intf_mux(struct vop2_video_port *vp, int id,
die &= ~RK3568_SYS_DSP_INFACE_EN_HDMI_MUX;
die |= RK3568_SYS_DSP_INFACE_EN_HDMI |
FIELD_PREP(RK3568_SYS_DSP_INFACE_EN_HDMI_MUX, vp->id);
+ dip &= ~RK3568_DSP_IF_POL__HDMI_PIN_POL;
+ dip |= FIELD_PREP(RK3568_DSP_IF_POL__HDMI_PIN_POL, polflags);
break;
case ROCKCHIP_VOP2_EP_EDP0:
die &= ~RK3568_SYS_DSP_INFACE_EN_EDP_MUX;
die |= RK3568_SYS_DSP_INFACE_EN_EDP |
FIELD_PREP(RK3568_SYS_DSP_INFACE_EN_EDP_MUX, vp->id);
+ dip &= ~RK3568_DSP_IF_POL__EDP_PIN_POL;
+ dip |= FIELD_PREP(RK3568_DSP_IF_POL__EDP_PIN_POL, polflags);
break;
case ROCKCHIP_VOP2_EP_MIPI0:
die &= ~RK3568_SYS_DSP_INFACE_EN_MIPI0_MUX;
diff --git a/drivers/gpu/drm/rockchip/rockchip_lvds.c b/drivers/gpu/drm/rockchip/rockchip_lvds.c
index 5a284332ec49..68f6ebb33460 100644
--- a/drivers/gpu/drm/rockchip/rockchip_lvds.c
+++ b/drivers/gpu/drm/rockchip/rockchip_lvds.c
@@ -152,7 +152,7 @@ static int rk3288_lvds_poweron(struct rockchip_lvds *lvds)
DRM_DEV_ERROR(lvds->dev, "failed to enable lvds pclk %d\n", ret);
return ret;
}
- ret = pm_runtime_get_sync(lvds->dev);
+ ret = pm_runtime_resume_and_get(lvds->dev);
if (ret < 0) {
DRM_DEV_ERROR(lvds->dev, "failed to get pm runtime: %d\n", ret);
clk_disable(lvds->pclk);
@@ -336,16 +336,20 @@ static int px30_lvds_poweron(struct rockchip_lvds *lvds)
{
int ret;
- ret = pm_runtime_get_sync(lvds->dev);
+ ret = pm_runtime_resume_and_get(lvds->dev);
if (ret < 0) {
DRM_DEV_ERROR(lvds->dev, "failed to get pm runtime: %d\n", ret);
return ret;
}
/* Enable LVDS mode */
- return regmap_update_bits(lvds->grf, PX30_LVDS_GRF_PD_VO_CON1,
+ ret = regmap_update_bits(lvds->grf, PX30_LVDS_GRF_PD_VO_CON1,
PX30_LVDS_MODE_EN(1) | PX30_LVDS_P2S_EN(1),
PX30_LVDS_MODE_EN(1) | PX30_LVDS_P2S_EN(1));
+ if (ret)
+ pm_runtime_put(lvds->dev);
+
+ return ret;
}
static void px30_lvds_poweroff(struct rockchip_lvds *lvds)
diff --git a/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c b/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c
index 9bf0637bf8e2..9d30aa73b542 100644
--- a/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c
+++ b/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c
@@ -188,7 +188,7 @@ static const struct vop2_win_data rk3568_vop_win_data[] = {
.base = 0x1800,
.layer_sel_id = 2,
.supported_rotations = DRM_MODE_REFLECT_Y,
- .type = DRM_PLANE_TYPE_OVERLAY,
+ .type = DRM_PLANE_TYPE_PRIMARY,
.max_upscale_factor = 8,
.max_downscale_factor = 8,
.dly = { 20, 47, 41 },
diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c
index 6b25b2f4f5a3..c97bc1149663 100644
--- a/drivers/gpu/drm/scheduler/sched_entity.c
+++ b/drivers/gpu/drm/scheduler/sched_entity.c
@@ -73,6 +73,7 @@ int drm_sched_entity_init(struct drm_sched_entity *entity,
entity->priority = priority;
entity->sched_list = num_sched_list > 1 ? sched_list : NULL;
entity->last_scheduled = NULL;
+ RB_CLEAR_NODE(&entity->rb_tree_node);
if(num_sched_list)
entity->rq = &sched_list[0]->sched_rq[entity->priority];
@@ -207,6 +208,7 @@ static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f,
struct drm_sched_job *job = container_of(cb, struct drm_sched_job,
finish_cb);
+ dma_fence_put(f);
INIT_WORK(&job->work, drm_sched_entity_kill_jobs_work);
schedule_work(&job->work);
}
@@ -234,8 +236,10 @@ static void drm_sched_entity_kill_jobs(struct drm_sched_entity *entity)
struct drm_sched_fence *s_fence = job->s_fence;
/* Wait for all dependencies to avoid data corruptions */
- while ((f = drm_sched_job_dependency(job, entity)))
+ while ((f = drm_sched_job_dependency(job, entity))) {
dma_fence_wait(f, false);
+ dma_fence_put(f);
+ }
drm_sched_fence_scheduled(s_fence);
dma_fence_set_error(&s_fence->finished, -ESRCH);
@@ -250,6 +254,7 @@ static void drm_sched_entity_kill_jobs(struct drm_sched_entity *entity)
continue;
}
+ dma_fence_get(entity->last_scheduled);
r = dma_fence_add_callback(entity->last_scheduled,
&job->finish_cb,
drm_sched_entity_kill_jobs_cb);
@@ -385,7 +390,8 @@ static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity)
}
s_fence = to_drm_sched_fence(fence);
- if (s_fence && s_fence->sched == sched) {
+ if (s_fence && s_fence->sched == sched &&
+ !test_bit(DRM_SCHED_FENCE_DONT_PIPELINE, &fence->flags)) {
/*
* Fence is from the same scheduler, only need to wait for
@@ -443,6 +449,19 @@ struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity)
smp_wmb();
spsc_queue_pop(&entity->job_queue);
+
+ /*
+ * Update the entity's location in the min heap according to
+ * the timestamp of the next job, if any.
+ */
+ if (drm_sched_policy == DRM_SCHED_POLICY_FIFO) {
+ struct drm_sched_job *next;
+
+ next = to_drm_sched_job(spsc_queue_peek(&entity->job_queue));
+ if (next)
+ drm_sched_rq_update_fifo(entity, next->submit_ts);
+ }
+
return sched_job;
}
@@ -507,6 +526,7 @@ void drm_sched_entity_push_job(struct drm_sched_job *sched_job)
atomic_inc(entity->rq->sched->score);
WRITE_ONCE(entity->last_user, current->group_leader);
first = spsc_queue_push(&entity->job_queue, &sched_job->queue_node);
+ sched_job->submit_ts = ktime_get();
/* first job wakes up scheduler */
if (first) {
@@ -518,8 +538,13 @@ void drm_sched_entity_push_job(struct drm_sched_job *sched_job)
DRM_ERROR("Trying to push to a killed entity\n");
return;
}
+
drm_sched_rq_add_entity(entity->rq, entity);
spin_unlock(&entity->rq_lock);
+
+ if (drm_sched_policy == DRM_SCHED_POLICY_FIFO)
+ drm_sched_rq_update_fifo(entity, sched_job->submit_ts);
+
drm_sched_wakeup(entity->rq->sched);
}
}
diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
index 4f2395d1a791..d0ff9e11cb69 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -62,6 +62,55 @@
#define to_drm_sched_job(sched_job) \
container_of((sched_job), struct drm_sched_job, queue_node)
+int drm_sched_policy = DRM_SCHED_POLICY_FIFO;
+
+/**
+ * DOC: sched_policy (int)
+ * Used to override default entities scheduling policy in a run queue.
+ */
+MODULE_PARM_DESC(sched_policy, "Specify the scheduling policy for entities on a run-queue, " __stringify(DRM_SCHED_POLICY_RR) " = Round Robin, " __stringify(DRM_SCHED_POLICY_FIFO) " = FIFO (default).");
+module_param_named(sched_policy, drm_sched_policy, int, 0444);
+
+static __always_inline bool drm_sched_entity_compare_before(struct rb_node *a,
+ const struct rb_node *b)
+{
+ struct drm_sched_entity *ent_a = rb_entry((a), struct drm_sched_entity, rb_tree_node);
+ struct drm_sched_entity *ent_b = rb_entry((b), struct drm_sched_entity, rb_tree_node);
+
+ return ktime_before(ent_a->oldest_job_waiting, ent_b->oldest_job_waiting);
+}
+
+static inline void drm_sched_rq_remove_fifo_locked(struct drm_sched_entity *entity)
+{
+ struct drm_sched_rq *rq = entity->rq;
+
+ if (!RB_EMPTY_NODE(&entity->rb_tree_node)) {
+ rb_erase_cached(&entity->rb_tree_node, &rq->rb_tree_root);
+ RB_CLEAR_NODE(&entity->rb_tree_node);
+ }
+}
+
+void drm_sched_rq_update_fifo(struct drm_sched_entity *entity, ktime_t ts)
+{
+ /*
+ * Both locks need to be grabbed, one to protect from entity->rq change
+ * for entity from within concurrent drm_sched_entity_select_rq and the
+ * other to update the rb tree structure.
+ */
+ spin_lock(&entity->rq_lock);
+ spin_lock(&entity->rq->lock);
+
+ drm_sched_rq_remove_fifo_locked(entity);
+
+ entity->oldest_job_waiting = ts;
+
+ rb_add_cached(&entity->rb_tree_node, &entity->rq->rb_tree_root,
+ drm_sched_entity_compare_before);
+
+ spin_unlock(&entity->rq->lock);
+ spin_unlock(&entity->rq_lock);
+}
+
/**
* drm_sched_rq_init - initialize a given run queue struct
*
@@ -75,6 +124,7 @@ static void drm_sched_rq_init(struct drm_gpu_scheduler *sched,
{
spin_lock_init(&rq->lock);
INIT_LIST_HEAD(&rq->entities);
+ rq->rb_tree_root = RB_ROOT_CACHED;
rq->current_entity = NULL;
rq->sched = sched;
}
@@ -92,9 +142,12 @@ void drm_sched_rq_add_entity(struct drm_sched_rq *rq,
{
if (!list_empty(&entity->list))
return;
+
spin_lock(&rq->lock);
+
atomic_inc(rq->sched->score);
list_add_tail(&entity->list, &rq->entities);
+
spin_unlock(&rq->lock);
}
@@ -111,23 +164,30 @@ void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
{
if (list_empty(&entity->list))
return;
+
spin_lock(&rq->lock);
+
atomic_dec(rq->sched->score);
list_del_init(&entity->list);
+
if (rq->current_entity == entity)
rq->current_entity = NULL;
+
+ if (drm_sched_policy == DRM_SCHED_POLICY_FIFO)
+ drm_sched_rq_remove_fifo_locked(entity);
+
spin_unlock(&rq->lock);
}
/**
- * drm_sched_rq_select_entity - Select an entity which could provide a job to run
+ * drm_sched_rq_select_entity_rr - Select an entity which could provide a job to run
*
* @rq: scheduler run queue to check.
*
* Try to find a ready entity, returns NULL if none found.
*/
static struct drm_sched_entity *
-drm_sched_rq_select_entity(struct drm_sched_rq *rq)
+drm_sched_rq_select_entity_rr(struct drm_sched_rq *rq)
{
struct drm_sched_entity *entity;
@@ -164,6 +224,34 @@ drm_sched_rq_select_entity(struct drm_sched_rq *rq)
}
/**
+ * drm_sched_rq_select_entity_fifo - Select an entity which provides a job to run
+ *
+ * @rq: scheduler run queue to check.
+ *
+ * Find oldest waiting ready entity, returns NULL if none found.
+ */
+static struct drm_sched_entity *
+drm_sched_rq_select_entity_fifo(struct drm_sched_rq *rq)
+{
+ struct rb_node *rb;
+
+ spin_lock(&rq->lock);
+ for (rb = rb_first_cached(&rq->rb_tree_root); rb; rb = rb_next(rb)) {
+ struct drm_sched_entity *entity;
+
+ entity = rb_entry(rb, struct drm_sched_entity, rb_tree_node);
+ if (drm_sched_entity_is_ready(entity)) {
+ rq->current_entity = entity;
+ reinit_completion(&entity->entity_idle);
+ break;
+ }
+ }
+ spin_unlock(&rq->lock);
+
+ return rb ? rb_entry(rb, struct drm_sched_entity, rb_tree_node) : NULL;
+}
+
+/**
* drm_sched_job_done - complete a job
* @s_job: pointer to the job which is done
*
@@ -803,7 +891,9 @@ drm_sched_select_entity(struct drm_gpu_scheduler *sched)
/* Kernel run queue has higher priority than normal run queue*/
for (i = DRM_SCHED_PRIORITY_COUNT - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) {
- entity = drm_sched_rq_select_entity(&sched->sched_rq[i]);
+ entity = drm_sched_policy == DRM_SCHED_POLICY_FIFO ?
+ drm_sched_rq_select_entity_fifo(&sched->sched_rq[i]) :
+ drm_sched_rq_select_entity_rr(&sched->sched_rq[i]);
if (entity)
break;
}
@@ -829,7 +919,7 @@ drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched)
job = list_first_entry_or_null(&sched->pending_list,
struct drm_sched_job, list);
- if (job && dma_fence_is_signaled(job->s_fence->parent)) {
+ if (job && dma_fence_is_signaled(&job->s_fence->finished)) {
/* remove job from pending_list */
list_del_init(&job->list);
@@ -841,7 +931,7 @@ drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched)
if (next) {
next->s_fence->scheduled.timestamp =
- job->s_fence->parent->timestamp;
+ job->s_fence->finished.timestamp;
/* start TO timer for next job */
drm_sched_start_timeout(sched);
}
diff --git a/drivers/gpu/drm/solomon/ssd130x-i2c.c b/drivers/gpu/drm/solomon/ssd130x-i2c.c
index 1e0fcec7be47..ddfa0bb5d9c9 100644
--- a/drivers/gpu/drm/solomon/ssd130x-i2c.c
+++ b/drivers/gpu/drm/solomon/ssd130x-i2c.c
@@ -39,13 +39,11 @@ static int ssd130x_i2c_probe(struct i2c_client *client)
return 0;
}
-static int ssd130x_i2c_remove(struct i2c_client *client)
+static void ssd130x_i2c_remove(struct i2c_client *client)
{
struct ssd130x_device *ssd130x = i2c_get_clientdata(client);
ssd130x_remove(ssd130x);
-
- return 0;
}
static void ssd130x_i2c_shutdown(struct i2c_client *client)
diff --git a/drivers/gpu/drm/solomon/ssd130x.c b/drivers/gpu/drm/solomon/ssd130x.c
index 7fae9480aa11..f2795f90ea69 100644
--- a/drivers/gpu/drm/solomon/ssd130x.c
+++ b/drivers/gpu/drm/solomon/ssd130x.c
@@ -20,6 +20,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc_helper.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_fb_helper.h>
@@ -555,43 +556,53 @@ static int ssd130x_fb_blit_rect(struct drm_framebuffer *fb, const struct iosys_m
if (!buf)
return -ENOMEM;
+ ret = drm_gem_fb_begin_cpu_access(fb, DMA_FROM_DEVICE);
+ if (ret)
+ goto out_free;
+
iosys_map_set_vaddr(&dst, buf);
drm_fb_xrgb8888_to_mono(&dst, &dst_pitch, vmap, fb, rect);
+ drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE);
+
ssd130x_update_rect(ssd130x, buf, rect);
+out_free:
kfree(buf);
return ret;
}
static void ssd130x_primary_plane_helper_atomic_update(struct drm_plane *plane,
- struct drm_atomic_state *old_state)
+ struct drm_atomic_state *state)
{
- struct drm_plane_state *plane_state = plane->state;
- struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(old_state, plane);
+ struct drm_plane_state *plane_state = drm_atomic_get_new_plane_state(state, plane);
+ struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(state, plane);
struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state);
+ struct drm_atomic_helper_damage_iter iter;
struct drm_device *drm = plane->dev;
- struct drm_rect src_clip, dst_clip;
+ struct drm_rect dst_clip;
+ struct drm_rect damage;
int idx;
- if (!drm_atomic_helper_damage_merged(old_plane_state, plane_state, &src_clip))
+ if (!drm_dev_enter(drm, &idx))
return;
- dst_clip = plane_state->dst;
- if (!drm_rect_intersect(&dst_clip, &src_clip))
- return;
+ drm_atomic_helper_damage_iter_init(&iter, old_plane_state, plane_state);
+ drm_atomic_for_each_plane_damage(&iter, &damage) {
+ dst_clip = plane_state->dst;
- if (!drm_dev_enter(drm, &idx))
- return;
+ if (!drm_rect_intersect(&dst_clip, &damage))
+ continue;
- ssd130x_fb_blit_rect(plane_state->fb, &shadow_plane_state->data[0], &dst_clip);
+ ssd130x_fb_blit_rect(plane_state->fb, &shadow_plane_state->data[0], &dst_clip);
+ }
drm_dev_exit(idx);
}
static void ssd130x_primary_plane_helper_atomic_disable(struct drm_plane *plane,
- struct drm_atomic_state *old_state)
+ struct drm_atomic_state *state)
{
struct drm_device *drm = plane->dev;
struct ssd130x_device *ssd130x = drm_to_ssd130x(drm);
@@ -635,19 +646,6 @@ static enum drm_mode_status ssd130x_crtc_helper_mode_valid(struct drm_crtc *crtc
return MODE_OK;
}
-static int ssd130x_crtc_helper_atomic_check(struct drm_crtc *crtc,
- struct drm_atomic_state *new_state)
-{
- struct drm_crtc_state *new_crtc_state = drm_atomic_get_new_crtc_state(new_state, crtc);
- int ret;
-
- ret = drm_atomic_helper_check_crtc_state(new_crtc_state, false);
- if (ret)
- return ret;
-
- return drm_atomic_add_affected_planes(new_state, crtc);
-}
-
/*
* The CRTC is always enabled. Screen updates are performed by
* the primary plane's atomic_update function. Disabling clears
@@ -655,7 +653,7 @@ static int ssd130x_crtc_helper_atomic_check(struct drm_crtc *crtc,
*/
static const struct drm_crtc_helper_funcs ssd130x_crtc_helper_funcs = {
.mode_valid = ssd130x_crtc_helper_mode_valid,
- .atomic_check = ssd130x_crtc_helper_atomic_check,
+ .atomic_check = drm_crtc_helper_atomic_check,
};
static void ssd130x_crtc_reset(struct drm_crtc *crtc)
diff --git a/drivers/gpu/drm/tegra/fb.c b/drivers/gpu/drm/tegra/fb.c
index 9291209154a7..bce71c0ccc9e 100644
--- a/drivers/gpu/drm/tegra/fb.c
+++ b/drivers/gpu/drm/tegra/fb.c
@@ -280,7 +280,6 @@ static int tegra_fbdev_probe(struct drm_fb_helper *helper,
}
}
- drm->mode_config.fb_base = (resource_size_t)bo->iova;
info->screen_base = (void __iomem *)bo->vaddr + offset;
info->screen_size = size;
info->fix.smem_start = (unsigned long)(bo->iova + offset);
diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c
index 81991090adcc..b09b8ab40ae4 100644
--- a/drivers/gpu/drm/tegra/gem.c
+++ b/drivers/gpu/drm/tegra/gem.c
@@ -84,7 +84,7 @@ static struct host1x_bo_mapping *tegra_bo_pin(struct device *dev, struct host1x_
goto free;
}
- map->sgt = dma_buf_map_attachment(map->attach, direction);
+ map->sgt = dma_buf_map_attachment_unlocked(map->attach, direction);
if (IS_ERR(map->sgt)) {
dma_buf_detach(buf, map->attach);
err = PTR_ERR(map->sgt);
@@ -160,7 +160,8 @@ free:
static void tegra_bo_unpin(struct host1x_bo_mapping *map)
{
if (map->attach) {
- dma_buf_unmap_attachment(map->attach, map->sgt, map->direction);
+ dma_buf_unmap_attachment_unlocked(map->attach, map->sgt,
+ map->direction);
dma_buf_detach(map->attach->dmabuf, map->attach);
} else {
dma_unmap_sgtable(map->dev, map->sgt, map->direction, 0);
@@ -181,7 +182,7 @@ static void *tegra_bo_mmap(struct host1x_bo *bo)
if (obj->vaddr) {
return obj->vaddr;
} else if (obj->gem.import_attach) {
- ret = dma_buf_vmap(obj->gem.import_attach->dmabuf, &map);
+ ret = dma_buf_vmap_unlocked(obj->gem.import_attach->dmabuf, &map);
return ret ? NULL : map.vaddr;
} else {
return vmap(obj->pages, obj->num_pages, VM_MAP,
@@ -197,7 +198,7 @@ static void tegra_bo_munmap(struct host1x_bo *bo, void *addr)
if (obj->vaddr)
return;
else if (obj->gem.import_attach)
- dma_buf_vunmap(obj->gem.import_attach->dmabuf, &map);
+ dma_buf_vunmap_unlocked(obj->gem.import_attach->dmabuf, &map);
else
vunmap(addr);
}
@@ -461,7 +462,7 @@ static struct tegra_bo *tegra_bo_import(struct drm_device *drm,
get_dma_buf(buf);
- bo->sgt = dma_buf_map_attachment(attach, DMA_TO_DEVICE);
+ bo->sgt = dma_buf_map_attachment_unlocked(attach, DMA_TO_DEVICE);
if (IS_ERR(bo->sgt)) {
err = PTR_ERR(bo->sgt);
goto detach;
@@ -479,7 +480,7 @@ static struct tegra_bo *tegra_bo_import(struct drm_device *drm,
detach:
if (!IS_ERR_OR_NULL(bo->sgt))
- dma_buf_unmap_attachment(attach, bo->sgt, DMA_TO_DEVICE);
+ dma_buf_unmap_attachment_unlocked(attach, bo->sgt, DMA_TO_DEVICE);
dma_buf_detach(buf, attach);
dma_buf_put(buf);
@@ -508,8 +509,8 @@ void tegra_bo_free_object(struct drm_gem_object *gem)
tegra_bo_iommu_unmap(tegra, bo);
if (gem->import_attach) {
- dma_buf_unmap_attachment(gem->import_attach, bo->sgt,
- DMA_TO_DEVICE);
+ dma_buf_unmap_attachment_unlocked(gem->import_attach, bo->sgt,
+ DMA_TO_DEVICE);
drm_prime_gem_destroy(gem, NULL);
} else {
tegra_bo_free(gem->dev, bo);
diff --git a/drivers/gpu/drm/tests/Makefile b/drivers/gpu/drm/tests/Makefile
index 91b70f7d2769..2d9f49b62ecb 100644
--- a/drivers/gpu/drm/tests/Makefile
+++ b/drivers/gpu/drm/tests/Makefile
@@ -1,5 +1,13 @@
# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_DRM_KUNIT_TEST) += drm_format_helper_test.o drm_damage_helper_test.o \
- drm_cmdline_parser_test.o drm_rect_test.o drm_format_test.o drm_plane_helper_test.o \
- drm_dp_mst_helper_test.o drm_framebuffer_test.o drm_buddy_test.o drm_mm_test.o
+obj-$(CONFIG_DRM_KUNIT_TEST) += \
+ drm_buddy_test.o \
+ drm_cmdline_parser_test.o \
+ drm_damage_helper_test.o \
+ drm_dp_mst_helper_test.o \
+ drm_format_helper_test.o \
+ drm_format_test.o \
+ drm_framebuffer_test.o \
+ drm_mm_test.o \
+ drm_plane_helper_test.o \
+ drm_rect_test.o
diff --git a/drivers/gpu/drm/tests/drm_buddy_test.c b/drivers/gpu/drm/tests/drm_buddy_test.c
index 7a2b2d6bc3fe..f8ee714df396 100644
--- a/drivers/gpu/drm/tests/drm_buddy_test.c
+++ b/drivers/gpu/drm/tests/drm_buddy_test.c
@@ -726,10 +726,12 @@ static void drm_test_buddy_alloc_limit(struct kunit *test)
drm_buddy_fini(&mm);
}
-static int drm_buddy_init_test(struct kunit *test)
+static int drm_buddy_suite_init(struct kunit_suite *suite)
{
while (!random_seed)
- random_seed = get_random_int();
+ random_seed = get_random_u32();
+
+ kunit_info(suite, "Testing DRM buddy manager, with random_seed=0x%x\n", random_seed);
return 0;
}
@@ -746,7 +748,7 @@ static struct kunit_case drm_buddy_tests[] = {
static struct kunit_suite drm_buddy_test_suite = {
.name = "drm_buddy",
- .init = drm_buddy_init_test,
+ .suite_init = drm_buddy_suite_init,
.test_cases = drm_buddy_tests,
};
diff --git a/drivers/gpu/drm/tests/drm_dp_mst_helper_test.c b/drivers/gpu/drm/tests/drm_dp_mst_helper_test.c
index 65c9d225b558..545beea33e8c 100644
--- a/drivers/gpu/drm/tests/drm_dp_mst_helper_test.c
+++ b/drivers/gpu/drm/tests/drm_dp_mst_helper_test.c
@@ -5,44 +5,280 @@
* Copyright (c) 2022 Maíra Canal <mairacanal@riseup.net>
*/
-#define PREFIX_STR "[drm_dp_mst_helper]"
-
#include <kunit/test.h>
-#include <linux/random.h>
-
#include <drm/display/drm_dp_mst_helper.h>
#include <drm/drm_print.h>
#include "../display/drm_dp_mst_topology_internal.h"
+struct drm_dp_mst_calc_pbn_mode_test {
+ const int clock;
+ const int bpp;
+ const bool dsc;
+ const int expected;
+};
+
+static const struct drm_dp_mst_calc_pbn_mode_test drm_dp_mst_calc_pbn_mode_cases[] = {
+ {
+ .clock = 154000,
+ .bpp = 30,
+ .dsc = false,
+ .expected = 689
+ },
+ {
+ .clock = 234000,
+ .bpp = 30,
+ .dsc = false,
+ .expected = 1047
+ },
+ {
+ .clock = 297000,
+ .bpp = 24,
+ .dsc = false,
+ .expected = 1063
+ },
+ {
+ .clock = 332880,
+ .bpp = 24,
+ .dsc = true,
+ .expected = 50
+ },
+ {
+ .clock = 324540,
+ .bpp = 24,
+ .dsc = true,
+ .expected = 49
+ },
+};
+
static void drm_test_dp_mst_calc_pbn_mode(struct kunit *test)
{
- int pbn, i;
- const struct {
- int rate;
- int bpp;
- int expected;
- bool dsc;
- } test_params[] = {
- { 154000, 30, 689, false },
- { 234000, 30, 1047, false },
- { 297000, 24, 1063, false },
- { 332880, 24, 50, true },
- { 324540, 24, 49, true },
- };
+ const struct drm_dp_mst_calc_pbn_mode_test *params = test->param_value;
- for (i = 0; i < ARRAY_SIZE(test_params); i++) {
- pbn = drm_dp_calc_pbn_mode(test_params[i].rate,
- test_params[i].bpp,
- test_params[i].dsc);
- KUNIT_EXPECT_EQ_MSG(test, pbn, test_params[i].expected,
- "Expected PBN %d for clock %d bpp %d, got %d\n",
- test_params[i].expected, test_params[i].rate,
- test_params[i].bpp, pbn);
- }
+ KUNIT_EXPECT_EQ(test, drm_dp_calc_pbn_mode(params->clock, params->bpp, params->dsc),
+ params->expected);
}
+static void dp_mst_calc_pbn_mode_desc(const struct drm_dp_mst_calc_pbn_mode_test *t, char *desc)
+{
+ sprintf(desc, "Clock %d BPP %d DSC %s", t->clock, t->bpp, t->dsc ? "enabled" : "disabled");
+}
+
+KUNIT_ARRAY_PARAM(drm_dp_mst_calc_pbn_mode, drm_dp_mst_calc_pbn_mode_cases,
+ dp_mst_calc_pbn_mode_desc);
+
+static u8 data[] = { 0xff, 0x00, 0xdd };
+
+struct drm_dp_mst_sideband_msg_req_test {
+ const char *desc;
+ const struct drm_dp_sideband_msg_req_body in;
+};
+
+static const struct drm_dp_mst_sideband_msg_req_test drm_dp_mst_sideband_msg_req_cases[] = {
+ {
+ .desc = "DP_ENUM_PATH_RESOURCES with port number",
+ .in = {
+ .req_type = DP_ENUM_PATH_RESOURCES,
+ .u.port_num.port_number = 5,
+ },
+ },
+ {
+ .desc = "DP_POWER_UP_PHY with port number",
+ .in = {
+ .req_type = DP_POWER_UP_PHY,
+ .u.port_num.port_number = 5,
+ },
+ },
+ {
+ .desc = "DP_POWER_DOWN_PHY with port number",
+ .in = {
+ .req_type = DP_POWER_DOWN_PHY,
+ .u.port_num.port_number = 5,
+ },
+ },
+ {
+ .desc = "DP_ALLOCATE_PAYLOAD with SDP stream sinks",
+ .in = {
+ .req_type = DP_ALLOCATE_PAYLOAD,
+ .u.allocate_payload.number_sdp_streams = 3,
+ .u.allocate_payload.sdp_stream_sink = { 1, 2, 3 },
+ },
+ },
+ {
+ .desc = "DP_ALLOCATE_PAYLOAD with port number",
+ .in = {
+ .req_type = DP_ALLOCATE_PAYLOAD,
+ .u.allocate_payload.port_number = 0xf,
+ },
+ },
+ {
+ .desc = "DP_ALLOCATE_PAYLOAD with VCPI",
+ .in = {
+ .req_type = DP_ALLOCATE_PAYLOAD,
+ .u.allocate_payload.vcpi = 0x7f,
+ },
+ },
+ {
+ .desc = "DP_ALLOCATE_PAYLOAD with PBN",
+ .in = {
+ .req_type = DP_ALLOCATE_PAYLOAD,
+ .u.allocate_payload.pbn = U16_MAX,
+ },
+ },
+ {
+ .desc = "DP_QUERY_PAYLOAD with port number",
+ .in = {
+ .req_type = DP_QUERY_PAYLOAD,
+ .u.query_payload.port_number = 0xf,
+ },
+ },
+ {
+ .desc = "DP_QUERY_PAYLOAD with VCPI",
+ .in = {
+ .req_type = DP_QUERY_PAYLOAD,
+ .u.query_payload.vcpi = 0x7f,
+ },
+ },
+ {
+ .desc = "DP_REMOTE_DPCD_READ with port number",
+ .in = {
+ .req_type = DP_REMOTE_DPCD_READ,
+ .u.dpcd_read.port_number = 0xf,
+ },
+ },
+ {
+ .desc = "DP_REMOTE_DPCD_READ with DPCD address",
+ .in = {
+ .req_type = DP_REMOTE_DPCD_READ,
+ .u.dpcd_read.dpcd_address = 0xfedcb,
+ },
+ },
+ {
+ .desc = "DP_REMOTE_DPCD_READ with max number of bytes",
+ .in = {
+ .req_type = DP_REMOTE_DPCD_READ,
+ .u.dpcd_read.num_bytes = U8_MAX,
+ },
+ },
+ {
+ .desc = "DP_REMOTE_DPCD_WRITE with port number",
+ .in = {
+ .req_type = DP_REMOTE_DPCD_WRITE,
+ .u.dpcd_write.port_number = 0xf,
+ },
+ },
+ {
+ .desc = "DP_REMOTE_DPCD_WRITE with DPCD address",
+ .in = {
+ .req_type = DP_REMOTE_DPCD_WRITE,
+ .u.dpcd_write.dpcd_address = 0xfedcb,
+ },
+ },
+ {
+ .desc = "DP_REMOTE_DPCD_WRITE with data array",
+ .in = {
+ .req_type = DP_REMOTE_DPCD_WRITE,
+ .u.dpcd_write.num_bytes = ARRAY_SIZE(data),
+ .u.dpcd_write.bytes = data,
+ },
+ },
+ {
+ .desc = "DP_REMOTE_I2C_READ with port number",
+ .in = {
+ .req_type = DP_REMOTE_I2C_READ,
+ .u.i2c_read.port_number = 0xf,
+ },
+ },
+ {
+ .desc = "DP_REMOTE_I2C_READ with I2C device ID",
+ .in = {
+ .req_type = DP_REMOTE_I2C_READ,
+ .u.i2c_read.read_i2c_device_id = 0x7f,
+ },
+ },
+ {
+ .desc = "DP_REMOTE_I2C_READ with transactions array",
+ .in = {
+ .req_type = DP_REMOTE_I2C_READ,
+ .u.i2c_read.num_transactions = 3,
+ .u.i2c_read.num_bytes_read = ARRAY_SIZE(data) * 3,
+ .u.i2c_read.transactions = {
+ { .bytes = data, .num_bytes = ARRAY_SIZE(data), .i2c_dev_id = 0x7f,
+ .i2c_transaction_delay = 0xf, },
+ { .bytes = data, .num_bytes = ARRAY_SIZE(data), .i2c_dev_id = 0x7e,
+ .i2c_transaction_delay = 0xe, },
+ { .bytes = data, .num_bytes = ARRAY_SIZE(data), .i2c_dev_id = 0x7d,
+ .i2c_transaction_delay = 0xd, },
+ },
+ },
+ },
+ {
+ .desc = "DP_REMOTE_I2C_WRITE with port number",
+ .in = {
+ .req_type = DP_REMOTE_I2C_WRITE,
+ .u.i2c_write.port_number = 0xf,
+ },
+ },
+ {
+ .desc = "DP_REMOTE_I2C_WRITE with I2C device ID",
+ .in = {
+ .req_type = DP_REMOTE_I2C_WRITE,
+ .u.i2c_write.write_i2c_device_id = 0x7f,
+ },
+ },
+ {
+ .desc = "DP_REMOTE_I2C_WRITE with data array",
+ .in = {
+ .req_type = DP_REMOTE_I2C_WRITE,
+ .u.i2c_write.num_bytes = ARRAY_SIZE(data),
+ .u.i2c_write.bytes = data,
+ },
+ },
+ {
+ .desc = "DP_QUERY_STREAM_ENC_STATUS with stream ID",
+ .in = {
+ .req_type = DP_QUERY_STREAM_ENC_STATUS,
+ .u.enc_status.stream_id = 1,
+ },
+ },
+ {
+ .desc = "DP_QUERY_STREAM_ENC_STATUS with client ID",
+ .in = {
+ .req_type = DP_QUERY_STREAM_ENC_STATUS,
+ .u.enc_status.client_id = { 0x4f, 0x7f, 0xb4, 0x00, 0x8c, 0x0d, 0x67 },
+ },
+ },
+ {
+ .desc = "DP_QUERY_STREAM_ENC_STATUS with stream event",
+ .in = {
+ .req_type = DP_QUERY_STREAM_ENC_STATUS,
+ .u.enc_status.stream_event = 3,
+ },
+ },
+ {
+ .desc = "DP_QUERY_STREAM_ENC_STATUS with valid stream event",
+ .in = {
+ .req_type = DP_QUERY_STREAM_ENC_STATUS,
+ .u.enc_status.valid_stream_event = 0,
+ },
+ },
+ {
+ .desc = "DP_QUERY_STREAM_ENC_STATUS with stream behavior",
+ .in = {
+ .req_type = DP_QUERY_STREAM_ENC_STATUS,
+ .u.enc_status.stream_behavior = 3,
+ },
+ },
+ {
+ .desc = "DP_QUERY_STREAM_ENC_STATUS with a valid stream behavior",
+ .in = {
+ .req_type = DP_QUERY_STREAM_ENC_STATUS,
+ .u.enc_status.valid_stream_behavior = 1,
+ }
+ },
+};
+
static bool
sideband_msg_req_equal(const struct drm_dp_sideband_msg_req_body *in,
const struct drm_dp_sideband_msg_req_body *out)
@@ -118,41 +354,41 @@ sideband_msg_req_equal(const struct drm_dp_sideband_msg_req_body *in,
return true;
}
-static bool
-sideband_msg_req_encode_decode(struct drm_dp_sideband_msg_req_body *in)
+static void drm_test_dp_mst_msg_printf(struct drm_printer *p, struct va_format *vaf)
{
+ struct kunit *test = p->arg;
+
+ kunit_err(test, "%pV", vaf);
+}
+
+static void drm_test_dp_mst_sideband_msg_req_decode(struct kunit *test)
+{
+ const struct drm_dp_mst_sideband_msg_req_test *params = test->param_value;
+ const struct drm_dp_sideband_msg_req_body *in = &params->in;
struct drm_dp_sideband_msg_req_body *out;
- struct drm_printer p = drm_err_printer(PREFIX_STR);
struct drm_dp_sideband_msg_tx *txmsg;
- int i, ret;
- bool result = true;
+ struct drm_printer p = {
+ .printfn = drm_test_dp_mst_msg_printf,
+ .arg = test
+ };
+ int i;
- out = kzalloc(sizeof(*out), GFP_KERNEL);
- if (!out)
- return false;
+ out = kunit_kzalloc(test, sizeof(*out), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, out);
- txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
- if (!txmsg) {
- kfree(out);
- return false;
- }
+ txmsg = kunit_kzalloc(test, sizeof(*txmsg), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, txmsg);
drm_dp_encode_sideband_req(in, txmsg);
- ret = drm_dp_decode_sideband_req(txmsg, out);
- if (ret < 0) {
- drm_printf(&p, "Failed to decode sideband request: %d\n",
- ret);
- result = false;
- goto out;
- }
+ KUNIT_EXPECT_GE_MSG(test, drm_dp_decode_sideband_req(txmsg, out), 0,
+ "Failed to decode sideband request");
if (!sideband_msg_req_equal(in, out)) {
- drm_printf(&p, "Encode/decode failed, expected:\n");
+ KUNIT_FAIL(test, "Encode/decode failed");
+ kunit_err(test, "Expected:");
drm_dp_dump_sideband_msg_req_body(in, 1, &p);
- drm_printf(&p, "Got:\n");
+ kunit_err(test, "Got:");
drm_dp_dump_sideband_msg_req_body(out, 1, &p);
- result = false;
- goto out;
}
switch (in->req_type) {
@@ -167,112 +403,21 @@ sideband_msg_req_encode_decode(struct drm_dp_sideband_msg_req_body *in)
kfree(out->u.i2c_write.bytes);
break;
}
-
- /* Clear everything but the req_type for the input */
- memset(&in->u, 0, sizeof(in->u));
-
-out:
- kfree(out);
- kfree(txmsg);
- return result;
}
-static void drm_test_dp_mst_sideband_msg_req_decode(struct kunit *test)
+static void
+drm_dp_mst_sideband_msg_req_desc(const struct drm_dp_mst_sideband_msg_req_test *t, char *desc)
{
- struct drm_dp_sideband_msg_req_body in = { 0 };
- u8 data[] = { 0xff, 0x0, 0xdd };
- int i;
-
- in.req_type = DP_ENUM_PATH_RESOURCES;
- in.u.port_num.port_number = 5;
- KUNIT_EXPECT_TRUE(test, sideband_msg_req_encode_decode(&in));
-
- in.req_type = DP_POWER_UP_PHY;
- in.u.port_num.port_number = 5;
- KUNIT_EXPECT_TRUE(test, sideband_msg_req_encode_decode(&in));
-
- in.req_type = DP_POWER_DOWN_PHY;
- in.u.port_num.port_number = 5;
- KUNIT_EXPECT_TRUE(test, sideband_msg_req_encode_decode(&in));
-
- in.req_type = DP_ALLOCATE_PAYLOAD;
- in.u.allocate_payload.number_sdp_streams = 3;
- for (i = 0; i < in.u.allocate_payload.number_sdp_streams; i++)
- in.u.allocate_payload.sdp_stream_sink[i] = i + 1;
- KUNIT_EXPECT_TRUE(test, sideband_msg_req_encode_decode(&in));
- in.u.allocate_payload.port_number = 0xf;
- KUNIT_EXPECT_TRUE(test, sideband_msg_req_encode_decode(&in));
- in.u.allocate_payload.vcpi = 0x7f;
- KUNIT_EXPECT_TRUE(test, sideband_msg_req_encode_decode(&in));
- in.u.allocate_payload.pbn = U16_MAX;
- KUNIT_EXPECT_TRUE(test, sideband_msg_req_encode_decode(&in));
-
- in.req_type = DP_QUERY_PAYLOAD;
- in.u.query_payload.port_number = 0xf;
- KUNIT_EXPECT_TRUE(test, sideband_msg_req_encode_decode(&in));
- in.u.query_payload.vcpi = 0x7f;
- KUNIT_EXPECT_TRUE(test, sideband_msg_req_encode_decode(&in));
-
- in.req_type = DP_REMOTE_DPCD_READ;
- in.u.dpcd_read.port_number = 0xf;
- KUNIT_EXPECT_TRUE(test, sideband_msg_req_encode_decode(&in));
- in.u.dpcd_read.dpcd_address = 0xfedcb;
- KUNIT_EXPECT_TRUE(test, sideband_msg_req_encode_decode(&in));
- in.u.dpcd_read.num_bytes = U8_MAX;
- KUNIT_EXPECT_TRUE(test, sideband_msg_req_encode_decode(&in));
-
- in.req_type = DP_REMOTE_DPCD_WRITE;
- in.u.dpcd_write.port_number = 0xf;
- KUNIT_EXPECT_TRUE(test, sideband_msg_req_encode_decode(&in));
- in.u.dpcd_write.dpcd_address = 0xfedcb;
- KUNIT_EXPECT_TRUE(test, sideband_msg_req_encode_decode(&in));
- in.u.dpcd_write.num_bytes = ARRAY_SIZE(data);
- in.u.dpcd_write.bytes = data;
- KUNIT_EXPECT_TRUE(test, sideband_msg_req_encode_decode(&in));
-
- in.req_type = DP_REMOTE_I2C_READ;
- in.u.i2c_read.port_number = 0xf;
- KUNIT_EXPECT_TRUE(test, sideband_msg_req_encode_decode(&in));
- in.u.i2c_read.read_i2c_device_id = 0x7f;
- KUNIT_EXPECT_TRUE(test, sideband_msg_req_encode_decode(&in));
- in.u.i2c_read.num_transactions = 3;
- in.u.i2c_read.num_bytes_read = ARRAY_SIZE(data) * 3;
- for (i = 0; i < in.u.i2c_read.num_transactions; i++) {
- in.u.i2c_read.transactions[i].bytes = data;
- in.u.i2c_read.transactions[i].num_bytes = ARRAY_SIZE(data);
- in.u.i2c_read.transactions[i].i2c_dev_id = 0x7f & ~i;
- in.u.i2c_read.transactions[i].i2c_transaction_delay = 0xf & ~i;
- }
- KUNIT_EXPECT_TRUE(test, sideband_msg_req_encode_decode(&in));
-
- in.req_type = DP_REMOTE_I2C_WRITE;
- in.u.i2c_write.port_number = 0xf;
- KUNIT_EXPECT_TRUE(test, sideband_msg_req_encode_decode(&in));
- in.u.i2c_write.write_i2c_device_id = 0x7f;
- KUNIT_EXPECT_TRUE(test, sideband_msg_req_encode_decode(&in));
- in.u.i2c_write.num_bytes = ARRAY_SIZE(data);
- in.u.i2c_write.bytes = data;
- KUNIT_EXPECT_TRUE(test, sideband_msg_req_encode_decode(&in));
-
- in.req_type = DP_QUERY_STREAM_ENC_STATUS;
- in.u.enc_status.stream_id = 1;
- KUNIT_EXPECT_TRUE(test, sideband_msg_req_encode_decode(&in));
- get_random_bytes(in.u.enc_status.client_id,
- sizeof(in.u.enc_status.client_id));
- KUNIT_EXPECT_TRUE(test, sideband_msg_req_encode_decode(&in));
- in.u.enc_status.stream_event = 3;
- KUNIT_EXPECT_TRUE(test, sideband_msg_req_encode_decode(&in));
- in.u.enc_status.valid_stream_event = 0;
- KUNIT_EXPECT_TRUE(test, sideband_msg_req_encode_decode(&in));
- in.u.enc_status.stream_behavior = 3;
- KUNIT_EXPECT_TRUE(test, sideband_msg_req_encode_decode(&in));
- in.u.enc_status.valid_stream_behavior = 1;
- KUNIT_EXPECT_TRUE(test, sideband_msg_req_encode_decode(&in));
+ strcpy(desc, t->desc);
}
+KUNIT_ARRAY_PARAM(drm_dp_mst_sideband_msg_req, drm_dp_mst_sideband_msg_req_cases,
+ drm_dp_mst_sideband_msg_req_desc);
+
static struct kunit_case drm_dp_mst_helper_tests[] = {
- KUNIT_CASE(drm_test_dp_mst_calc_pbn_mode),
- KUNIT_CASE(drm_test_dp_mst_sideband_msg_req_decode),
+ KUNIT_CASE_PARAM(drm_test_dp_mst_calc_pbn_mode, drm_dp_mst_calc_pbn_mode_gen_params),
+ KUNIT_CASE_PARAM(drm_test_dp_mst_sideband_msg_req_decode,
+ drm_dp_mst_sideband_msg_req_gen_params),
{ }
};
diff --git a/drivers/gpu/drm/tests/drm_format_helper_test.c b/drivers/gpu/drm/tests/drm_format_helper_test.c
index ab49f5e8796c..2191e57f2297 100644
--- a/drivers/gpu/drm/tests/drm_format_helper_test.c
+++ b/drivers/gpu/drm/tests/drm_format_helper_test.c
@@ -16,6 +16,11 @@
#define TEST_BUF_SIZE 50
+struct convert_to_gray8_result {
+ unsigned int dst_pitch;
+ const u8 expected[TEST_BUF_SIZE];
+};
+
struct convert_to_rgb332_result {
unsigned int dst_pitch;
const u8 expected[TEST_BUF_SIZE];
@@ -27,13 +32,26 @@ struct convert_to_rgb565_result {
const u16 expected_swab[TEST_BUF_SIZE];
};
+struct convert_to_rgb888_result {
+ unsigned int dst_pitch;
+ const u8 expected[TEST_BUF_SIZE];
+};
+
+struct convert_to_xrgb2101010_result {
+ unsigned int dst_pitch;
+ const u32 expected[TEST_BUF_SIZE];
+};
+
struct convert_xrgb8888_case {
const char *name;
unsigned int pitch;
struct drm_rect clip;
const u32 xrgb8888[TEST_BUF_SIZE];
+ struct convert_to_gray8_result gray8_result;
struct convert_to_rgb332_result rgb332_result;
struct convert_to_rgb565_result rgb565_result;
+ struct convert_to_rgb888_result rgb888_result;
+ struct convert_to_xrgb2101010_result xrgb2101010_result;
};
static struct convert_xrgb8888_case convert_xrgb8888_cases[] = {
@@ -42,6 +60,10 @@ static struct convert_xrgb8888_case convert_xrgb8888_cases[] = {
.pitch = 1 * 4,
.clip = DRM_RECT_INIT(0, 0, 1, 1),
.xrgb8888 = { 0x01FF0000 },
+ .gray8_result = {
+ .dst_pitch = 0,
+ .expected = { 0x4C },
+ },
.rgb332_result = {
.dst_pitch = 0,
.expected = { 0xE0 },
@@ -51,6 +73,14 @@ static struct convert_xrgb8888_case convert_xrgb8888_cases[] = {
.expected = { 0xF800 },
.expected_swab = { 0x00F8 },
},
+ .rgb888_result = {
+ .dst_pitch = 0,
+ .expected = { 0x00, 0x00, 0xFF },
+ },
+ .xrgb2101010_result = {
+ .dst_pitch = 0,
+ .expected = { 0x3FF00000 },
+ },
},
{
.name = "single_pixel_clip_rectangle",
@@ -60,6 +90,10 @@ static struct convert_xrgb8888_case convert_xrgb8888_cases[] = {
0x00000000, 0x00000000,
0x00000000, 0x10FF0000,
},
+ .gray8_result = {
+ .dst_pitch = 0,
+ .expected = { 0x4C },
+ },
.rgb332_result = {
.dst_pitch = 0,
.expected = { 0xE0 },
@@ -69,6 +103,14 @@ static struct convert_xrgb8888_case convert_xrgb8888_cases[] = {
.expected = { 0xF800 },
.expected_swab = { 0x00F8 },
},
+ .rgb888_result = {
+ .dst_pitch = 0,
+ .expected = { 0x00, 0x00, 0xFF },
+ },
+ .xrgb2101010_result = {
+ .dst_pitch = 0,
+ .expected = { 0x3FF00000 },
+ },
},
{
/* Well known colors: White, black, red, green, blue, magenta,
@@ -85,6 +127,15 @@ static struct convert_xrgb8888_case convert_xrgb8888_cases[] = {
0x00000000, 0x550000FF, 0x66FF00FF, 0x00000000,
0x00000000, 0x77FFFF00, 0x8800FFFF, 0x00000000,
},
+ .gray8_result = {
+ .dst_pitch = 0,
+ .expected = {
+ 0xFF, 0x00,
+ 0x4C, 0x99,
+ 0x19, 0x66,
+ 0xE5, 0xB2,
+ },
+ },
.rgb332_result = {
.dst_pitch = 0,
.expected = {
@@ -109,6 +160,24 @@ static struct convert_xrgb8888_case convert_xrgb8888_cases[] = {
0xE0FF, 0xFF07,
},
},
+ .rgb888_result = {
+ .dst_pitch = 0,
+ .expected = {
+ 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0xFF, 0x00, 0xFF, 0x00,
+ 0xFF, 0x00, 0x00, 0xFF, 0x00, 0xFF,
+ 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0x00,
+ },
+ },
+ .xrgb2101010_result = {
+ .dst_pitch = 0,
+ .expected = {
+ 0x3FFFFFFF, 0x00000000,
+ 0x3FF00000, 0x000FFC00,
+ 0x000003FF, 0x3FF003FF,
+ 0x3FFFFC00, 0x000FFFFF,
+ },
+ },
},
{
/* Randomly picked colors. Full buffer within the clip area. */
@@ -120,6 +189,14 @@ static struct convert_xrgb8888_case convert_xrgb8888_cases[] = {
0xD16C7073, 0xA20E449C, 0xB2114D05,
0xC2A80303, 0xD26C7073, 0xA30E449C,
},
+ .gray8_result = {
+ .dst_pitch = 5,
+ .expected = {
+ 0x3C, 0x33, 0x34, 0x00, 0x00,
+ 0x6F, 0x3C, 0x33, 0x00, 0x00,
+ 0x34, 0x6F, 0x3C, 0x00, 0x00,
+ },
+ },
.rgb332_result = {
.dst_pitch = 5,
.expected = {
@@ -141,6 +218,25 @@ static struct convert_xrgb8888_case convert_xrgb8888_cases[] = {
0x00A8, 0x8E6B, 0x330A, 0x0000, 0x0000,
},
},
+ .rgb888_result = {
+ .dst_pitch = 15,
+ .expected = {
+ 0x9C, 0x44, 0x0E, 0x05, 0x4D, 0x11, 0x03, 0x03, 0xA8,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x73, 0x70, 0x6C, 0x9C, 0x44, 0x0E, 0x05, 0x4D, 0x11,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x03, 0x03, 0xA8, 0x73, 0x70, 0x6C, 0x9C, 0x44, 0x0E,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ },
+ },
+ .xrgb2101010_result = {
+ .dst_pitch = 20,
+ .expected = {
+ 0x03844672, 0x0444D414, 0x2A20300C, 0x00000000, 0x00000000,
+ 0x1B1705CD, 0x03844672, 0x0444D414, 0x00000000, 0x00000000,
+ 0x2A20300C, 0x1B1705CD, 0x03844672, 0x00000000, 0x00000000,
+ },
+ },
},
};
@@ -192,6 +288,36 @@ static void convert_xrgb8888_case_desc(struct convert_xrgb8888_case *t,
KUNIT_ARRAY_PARAM(convert_xrgb8888, convert_xrgb8888_cases,
convert_xrgb8888_case_desc);
+static void drm_test_fb_xrgb8888_to_gray8(struct kunit *test)
+{
+ const struct convert_xrgb8888_case *params = test->param_value;
+ const struct convert_to_gray8_result *result = &params->gray8_result;
+ size_t dst_size;
+ __u8 *buf = NULL;
+ __u32 *xrgb8888 = NULL;
+ struct iosys_map dst, src;
+
+ struct drm_framebuffer fb = {
+ .format = drm_format_info(DRM_FORMAT_XRGB8888),
+ .pitches = { params->pitch, 0, 0 },
+ };
+
+ dst_size = conversion_buf_size(DRM_FORMAT_R8, result->dst_pitch,
+ &params->clip);
+ KUNIT_ASSERT_GT(test, dst_size, 0);
+
+ buf = kunit_kzalloc(test, dst_size, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buf);
+ iosys_map_set_vaddr(&dst, buf);
+
+ xrgb8888 = le32buf_to_cpu(test, params->xrgb8888, TEST_BUF_SIZE);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, xrgb8888);
+ iosys_map_set_vaddr(&src, xrgb8888);
+
+ drm_fb_xrgb8888_to_gray8(&dst, &result->dst_pitch, &src, &fb, &params->clip);
+ KUNIT_EXPECT_EQ(test, memcmp(buf, result->expected, dst_size), 0);
+}
+
static void drm_test_fb_xrgb8888_to_rgb332(struct kunit *test)
{
const struct convert_xrgb8888_case *params = test->param_value;
@@ -255,9 +381,73 @@ static void drm_test_fb_xrgb8888_to_rgb565(struct kunit *test)
KUNIT_EXPECT_EQ(test, memcmp(buf, result->expected_swab, dst_size), 0);
}
+static void drm_test_fb_xrgb8888_to_rgb888(struct kunit *test)
+{
+ const struct convert_xrgb8888_case *params = test->param_value;
+ const struct convert_to_rgb888_result *result = &params->rgb888_result;
+ size_t dst_size;
+ __u8 *buf = NULL;
+ __u32 *xrgb8888 = NULL;
+ struct iosys_map dst, src;
+
+ struct drm_framebuffer fb = {
+ .format = drm_format_info(DRM_FORMAT_XRGB8888),
+ .pitches = { params->pitch, 0, 0 },
+ };
+
+ dst_size = conversion_buf_size(DRM_FORMAT_RGB888, result->dst_pitch,
+ &params->clip);
+ KUNIT_ASSERT_GT(test, dst_size, 0);
+
+ buf = kunit_kzalloc(test, dst_size, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buf);
+ iosys_map_set_vaddr(&dst, buf);
+
+ xrgb8888 = le32buf_to_cpu(test, params->xrgb8888, TEST_BUF_SIZE);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, xrgb8888);
+ iosys_map_set_vaddr(&src, xrgb8888);
+
+ drm_fb_xrgb8888_to_rgb888(&dst, &result->dst_pitch, &src, &fb, &params->clip);
+ KUNIT_EXPECT_EQ(test, memcmp(buf, result->expected, dst_size), 0);
+}
+
+static void drm_test_fb_xrgb8888_to_xrgb2101010(struct kunit *test)
+{
+ const struct convert_xrgb8888_case *params = test->param_value;
+ const struct convert_to_xrgb2101010_result *result = &params->xrgb2101010_result;
+ size_t dst_size;
+ __u32 *buf = NULL;
+ __u32 *xrgb8888 = NULL;
+ struct iosys_map dst, src;
+
+ struct drm_framebuffer fb = {
+ .format = drm_format_info(DRM_FORMAT_XRGB8888),
+ .pitches = { params->pitch, 0, 0 },
+ };
+
+ dst_size = conversion_buf_size(DRM_FORMAT_XRGB2101010,
+ result->dst_pitch, &params->clip);
+ KUNIT_ASSERT_GT(test, dst_size, 0);
+
+ buf = kunit_kzalloc(test, dst_size, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buf);
+ iosys_map_set_vaddr(&dst, buf);
+
+ xrgb8888 = le32buf_to_cpu(test, params->xrgb8888, TEST_BUF_SIZE);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, xrgb8888);
+ iosys_map_set_vaddr(&src, xrgb8888);
+
+ drm_fb_xrgb8888_to_xrgb2101010(&dst, &result->dst_pitch, &src, &fb, &params->clip);
+ buf = le32buf_to_cpu(test, buf, dst_size / sizeof(u32));
+ KUNIT_EXPECT_EQ(test, memcmp(buf, result->expected, dst_size), 0);
+}
+
static struct kunit_case drm_format_helper_test_cases[] = {
+ KUNIT_CASE_PARAM(drm_test_fb_xrgb8888_to_gray8, convert_xrgb8888_gen_params),
KUNIT_CASE_PARAM(drm_test_fb_xrgb8888_to_rgb332, convert_xrgb8888_gen_params),
KUNIT_CASE_PARAM(drm_test_fb_xrgb8888_to_rgb565, convert_xrgb8888_gen_params),
+ KUNIT_CASE_PARAM(drm_test_fb_xrgb8888_to_rgb888, convert_xrgb8888_gen_params),
+ KUNIT_CASE_PARAM(drm_test_fb_xrgb8888_to_xrgb2101010, convert_xrgb8888_gen_params),
{}
};
diff --git a/drivers/gpu/drm/tests/drm_mm_test.c b/drivers/gpu/drm/tests/drm_mm_test.c
index 659d1af4dca7..89f12d3b4a21 100644
--- a/drivers/gpu/drm/tests/drm_mm_test.c
+++ b/drivers/gpu/drm/tests/drm_mm_test.c
@@ -2209,10 +2209,14 @@ err_nodes:
vfree(nodes);
}
-static int drm_mm_init_test(struct kunit *test)
+static int drm_mm_suite_init(struct kunit_suite *suite)
{
while (!random_seed)
- random_seed = get_random_int();
+ random_seed = get_random_u32();
+
+ kunit_info(suite,
+ "Testing DRM range manager, with random_seed=0x%x max_iterations=%u max_prime=%u\n",
+ random_seed, max_iterations, max_prime);
return 0;
}
@@ -2246,7 +2250,7 @@ static struct kunit_case drm_mm_tests[] = {
static struct kunit_suite drm_mm_test_suite = {
.name = "drm_mm",
- .init = drm_mm_init_test,
+ .suite_init = drm_mm_suite_init,
.test_cases = drm_mm_tests,
};
diff --git a/drivers/gpu/drm/tests/drm_plane_helper_test.c b/drivers/gpu/drm/tests/drm_plane_helper_test.c
index ec71af791f1f..0f392146b233 100644
--- a/drivers/gpu/drm/tests/drm_plane_helper_test.c
+++ b/drivers/gpu/drm/tests/drm_plane_helper_test.c
@@ -10,225 +10,306 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_modes.h>
+#include <drm/drm_rect.h>
-static void set_src(struct drm_plane_state *plane_state,
- unsigned int src_x, unsigned int src_y,
- unsigned int src_w, unsigned int src_h)
+static const struct drm_crtc_state crtc_state = {
+ .crtc = ZERO_SIZE_PTR,
+ .enable = true,
+ .active = true,
+ .mode = {
+ DRM_MODE("1024x768", 0, 65000, 1024, 1048,
+ 1184, 1344, 0, 768, 771, 777, 806, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC)
+ },
+};
+
+struct drm_check_plane_state_test {
+ const char *name;
+ const char *msg;
+ struct {
+ unsigned int x;
+ unsigned int y;
+ unsigned int w;
+ unsigned int h;
+ } src, src_expected;
+ struct {
+ int x;
+ int y;
+ unsigned int w;
+ unsigned int h;
+ } crtc, crtc_expected;
+ unsigned int rotation;
+ int min_scale;
+ int max_scale;
+ bool can_position;
+};
+
+static int drm_plane_helper_init(struct kunit *test)
{
- plane_state->src_x = src_x;
- plane_state->src_y = src_y;
- plane_state->src_w = src_w;
- plane_state->src_h = src_h;
+ const struct drm_check_plane_state_test *params = test->param_value;
+ struct drm_plane *plane;
+ struct drm_framebuffer *fb;
+ struct drm_plane_state *mock;
+
+ plane = kunit_kzalloc(test, sizeof(*plane), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, plane);
+
+ fb = kunit_kzalloc(test, sizeof(*fb), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, fb);
+ fb->width = 2048;
+ fb->height = 2048;
+
+ mock = kunit_kzalloc(test, sizeof(*mock), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, mock);
+ mock->plane = plane;
+ mock->crtc = ZERO_SIZE_PTR;
+ mock->fb = fb;
+ mock->rotation = params->rotation;
+ mock->src_x = params->src.x;
+ mock->src_y = params->src.y;
+ mock->src_w = params->src.w;
+ mock->src_h = params->src.h;
+ mock->crtc_x = params->crtc.x;
+ mock->crtc_y = params->crtc.y;
+ mock->crtc_w = params->crtc.w;
+ mock->crtc_h = params->crtc.h;
+
+ test->priv = mock;
+
+ return 0;
}
-static bool check_src_eq(struct drm_plane_state *plane_state,
+static void check_src_eq(struct kunit *test, struct drm_plane_state *plane_state,
unsigned int src_x, unsigned int src_y,
unsigned int src_w, unsigned int src_h)
{
- if (plane_state->src.x1 < 0) {
- pr_err("src x coordinate %x should never be below 0.\n", plane_state->src.x1);
- drm_rect_debug_print("src: ", &plane_state->src, true);
- return false;
- }
- if (plane_state->src.y1 < 0) {
- pr_err("src y coordinate %x should never be below 0.\n", plane_state->src.y1);
- drm_rect_debug_print("src: ", &plane_state->src, true);
- return false;
- }
-
- if (plane_state->src.x1 != src_x ||
- plane_state->src.y1 != src_y ||
- drm_rect_width(&plane_state->src) != src_w ||
- drm_rect_height(&plane_state->src) != src_h) {
- drm_rect_debug_print("src: ", &plane_state->src, true);
- return false;
- }
-
- return true;
-}
+ struct drm_rect expected = DRM_RECT_INIT(src_x, src_y, src_w, src_h);
-static void set_crtc(struct drm_plane_state *plane_state,
- int crtc_x, int crtc_y,
- unsigned int crtc_w, unsigned int crtc_h)
-{
- plane_state->crtc_x = crtc_x;
- plane_state->crtc_y = crtc_y;
- plane_state->crtc_w = crtc_w;
- plane_state->crtc_h = crtc_h;
+ KUNIT_ASSERT_GE_MSG(test, plane_state->src.x1, 0,
+ "src x coordinate %x should never be below 0, src: " DRM_RECT_FP_FMT,
+ plane_state->src.x1, DRM_RECT_FP_ARG(&plane_state->src));
+
+ KUNIT_ASSERT_GE_MSG(test, plane_state->src.y1, 0,
+ "src y coordinate %x should never be below 0, src: " DRM_RECT_FP_FMT,
+ plane_state->src.y1, DRM_RECT_FP_ARG(&plane_state->src));
+
+ KUNIT_EXPECT_TRUE_MSG(test, drm_rect_equals(&plane_state->src, &expected),
+ "dst: " DRM_RECT_FP_FMT ", expected: " DRM_RECT_FP_FMT,
+ DRM_RECT_FP_ARG(&plane_state->src), DRM_RECT_FP_ARG(&expected));
}
-static bool check_crtc_eq(struct drm_plane_state *plane_state,
+static void check_crtc_eq(struct kunit *test, struct drm_plane_state *plane_state,
int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h)
{
- if (plane_state->dst.x1 != crtc_x ||
- plane_state->dst.y1 != crtc_y ||
- drm_rect_width(&plane_state->dst) != crtc_w ||
- drm_rect_height(&plane_state->dst) != crtc_h) {
- drm_rect_debug_print("dst: ", &plane_state->dst, false);
-
- return false;
- }
+ struct drm_rect expected = DRM_RECT_INIT(crtc_x, crtc_y, crtc_w, crtc_h);
- return true;
+ KUNIT_EXPECT_TRUE_MSG(test, drm_rect_equals(&plane_state->dst, &expected),
+ "dst: " DRM_RECT_FMT ", expected: " DRM_RECT_FMT,
+ DRM_RECT_ARG(&plane_state->dst), DRM_RECT_ARG(&expected));
}
static void drm_test_check_plane_state(struct kunit *test)
{
- int ret;
-
- static const struct drm_crtc_state crtc_state = {
- .crtc = ZERO_SIZE_PTR,
- .enable = true,
- .active = true,
- .mode = {
- DRM_MODE("1024x768", 0, 65000, 1024, 1048, 1184, 1344, 0, 768, 771,
- 777, 806, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC)
- },
- };
- static struct drm_plane plane = {
- .dev = NULL
- };
- static struct drm_framebuffer fb = {
- .width = 2048,
- .height = 2048
- };
- static struct drm_plane_state plane_state = {
- .plane = &plane,
- .crtc = ZERO_SIZE_PTR,
- .fb = &fb,
- .rotation = DRM_MODE_ROTATE_0
- };
-
- /* Simple clipping, no scaling. */
- set_src(&plane_state, 0, 0, fb.width << 16, fb.height << 16);
- set_crtc(&plane_state, 0, 0, fb.width, fb.height);
- ret = drm_atomic_helper_check_plane_state(&plane_state, &crtc_state,
- DRM_PLANE_NO_SCALING,
- DRM_PLANE_NO_SCALING,
- false, false);
- KUNIT_EXPECT_FALSE_MSG(test, ret, 0, "Simple clipping check should pass\n");
- KUNIT_EXPECT_TRUE(test, plane_state.visible);
- KUNIT_EXPECT_TRUE(test, check_src_eq(&plane_state, 0, 0, 1024 << 16, 768 << 16));
- KUNIT_EXPECT_TRUE(test, check_crtc_eq(&plane_state, 0, 0, 1024, 768));
-
- /* Rotated clipping + reflection, no scaling. */
- plane_state.rotation = DRM_MODE_ROTATE_90 | DRM_MODE_REFLECT_X;
- ret = drm_atomic_helper_check_plane_state(&plane_state, &crtc_state,
- DRM_PLANE_NO_SCALING,
- DRM_PLANE_NO_SCALING,
- false, false);
- KUNIT_EXPECT_FALSE_MSG(test, ret, 0, "Rotated clipping check should pass\n");
- KUNIT_EXPECT_TRUE(test, plane_state.visible);
- KUNIT_EXPECT_TRUE(test, check_src_eq(&plane_state, 0, 0, 768 << 16, 1024 << 16));
- KUNIT_EXPECT_TRUE(test, check_crtc_eq(&plane_state, 0, 0, 1024, 768));
- plane_state.rotation = DRM_MODE_ROTATE_0;
-
- /* Check whether positioning works correctly. */
- set_src(&plane_state, 0, 0, 1023 << 16, 767 << 16);
- set_crtc(&plane_state, 0, 0, 1023, 767);
- ret = drm_atomic_helper_check_plane_state(&plane_state, &crtc_state,
- DRM_PLANE_NO_SCALING,
- DRM_PLANE_NO_SCALING,
- false, false);
- KUNIT_EXPECT_TRUE_MSG(test, ret,
- "Should not be able to position on the crtc with can_position=false\n");
-
- ret = drm_atomic_helper_check_plane_state(&plane_state, &crtc_state,
- DRM_PLANE_NO_SCALING,
- DRM_PLANE_NO_SCALING,
- true, false);
- KUNIT_EXPECT_FALSE_MSG(test, ret, 0, "Simple positioning should work\n");
- KUNIT_EXPECT_TRUE(test, plane_state.visible);
- KUNIT_EXPECT_TRUE(test, check_src_eq(&plane_state, 0, 0, 1023 << 16, 767 << 16));
- KUNIT_EXPECT_TRUE(test, check_crtc_eq(&plane_state, 0, 0, 1023, 767));
-
- /* Simple scaling tests. */
- set_src(&plane_state, 0, 0, 512 << 16, 384 << 16);
- set_crtc(&plane_state, 0, 0, 1024, 768);
- ret = drm_atomic_helper_check_plane_state(&plane_state, &crtc_state,
- 0x8001,
- DRM_PLANE_NO_SCALING,
- false, false);
- KUNIT_EXPECT_TRUE_MSG(test, ret, "Upscaling out of range should fail.\n");
- ret = drm_atomic_helper_check_plane_state(&plane_state, &crtc_state,
- 0x8000,
- DRM_PLANE_NO_SCALING,
- false, false);
- KUNIT_EXPECT_FALSE_MSG(test, ret, 0, "Upscaling exactly 2x should work\n");
- KUNIT_EXPECT_TRUE(test, plane_state.visible);
- KUNIT_EXPECT_TRUE(test, check_src_eq(&plane_state, 0, 0, 512 << 16, 384 << 16));
- KUNIT_EXPECT_TRUE(test, check_crtc_eq(&plane_state, 0, 0, 1024, 768));
-
- set_src(&plane_state, 0, 0, 2048 << 16, 1536 << 16);
- ret = drm_atomic_helper_check_plane_state(&plane_state, &crtc_state,
- DRM_PLANE_NO_SCALING,
- 0x1ffff, false, false);
- KUNIT_EXPECT_TRUE_MSG(test, ret, "Downscaling out of range should fail.\n");
- ret = drm_atomic_helper_check_plane_state(&plane_state, &crtc_state,
- DRM_PLANE_NO_SCALING,
- 0x20000, false, false);
- KUNIT_EXPECT_FALSE_MSG(test, ret, 0, "Should succeed with exact scaling limit\n");
- KUNIT_EXPECT_TRUE(test, plane_state.visible);
- KUNIT_EXPECT_TRUE(test, check_src_eq(&plane_state, 0, 0, 2048 << 16, 1536 << 16));
- KUNIT_EXPECT_TRUE(test, check_crtc_eq(&plane_state, 0, 0, 1024, 768));
-
- /* Testing rounding errors. */
- set_src(&plane_state, 0, 0, 0x40001, 0x40001);
- set_crtc(&plane_state, 1022, 766, 4, 4);
- ret = drm_atomic_helper_check_plane_state(&plane_state, &crtc_state,
- DRM_PLANE_NO_SCALING,
- 0x10001,
- true, false);
- KUNIT_EXPECT_FALSE_MSG(test, ret, 0, "Should succeed by clipping to exact multiple");
- KUNIT_EXPECT_TRUE(test, plane_state.visible);
- KUNIT_EXPECT_TRUE(test, check_src_eq(&plane_state, 0, 0, 2 << 16, 2 << 16));
- KUNIT_EXPECT_TRUE(test, check_crtc_eq(&plane_state, 1022, 766, 2, 2));
-
- set_src(&plane_state, 0x20001, 0x20001, 0x4040001, 0x3040001);
- set_crtc(&plane_state, -2, -2, 1028, 772);
- ret = drm_atomic_helper_check_plane_state(&plane_state, &crtc_state,
- DRM_PLANE_NO_SCALING,
- 0x10001,
- false, false);
- KUNIT_EXPECT_FALSE_MSG(test, ret, 0, "Should succeed by clipping to exact multiple");
- KUNIT_EXPECT_TRUE(test, plane_state.visible);
- KUNIT_EXPECT_TRUE(test, check_src_eq(&plane_state, 0x40002, 0x40002,
- 1024 << 16, 768 << 16));
- KUNIT_EXPECT_TRUE(test, check_crtc_eq(&plane_state, 0, 0, 1024, 768));
-
- set_src(&plane_state, 0, 0, 0x3ffff, 0x3ffff);
- set_crtc(&plane_state, 1022, 766, 4, 4);
- ret = drm_atomic_helper_check_plane_state(&plane_state, &crtc_state,
- 0xffff,
- DRM_PLANE_NO_SCALING,
- true, false);
- KUNIT_EXPECT_FALSE_MSG(test, ret, 0, "Should succeed by clipping to exact multiple");
- KUNIT_EXPECT_TRUE(test, plane_state.visible);
- /* Should not be rounded to 0x20001, which would be upscaling. */
- KUNIT_EXPECT_TRUE(test, check_src_eq(&plane_state, 0, 0, 2 << 16, 2 << 16));
- KUNIT_EXPECT_TRUE(test, check_crtc_eq(&plane_state, 1022, 766, 2, 2));
-
- set_src(&plane_state, 0x1ffff, 0x1ffff, 0x403ffff, 0x303ffff);
- set_crtc(&plane_state, -2, -2, 1028, 772);
- ret = drm_atomic_helper_check_plane_state(&plane_state, &crtc_state,
- 0xffff,
- DRM_PLANE_NO_SCALING,
- false, false);
- KUNIT_EXPECT_FALSE_MSG(test, ret, 0, "Should succeed by clipping to exact multiple");
- KUNIT_EXPECT_TRUE(test, plane_state.visible);
- KUNIT_EXPECT_TRUE(test, check_src_eq(&plane_state, 0x3fffe, 0x3fffe,
- 1024 << 16, 768 << 16));
- KUNIT_EXPECT_TRUE(test, check_crtc_eq(&plane_state, 0, 0, 1024, 768));
+ const struct drm_check_plane_state_test *params = test->param_value;
+ struct drm_plane_state *plane_state = test->priv;
+
+ KUNIT_ASSERT_EQ_MSG(test,
+ drm_atomic_helper_check_plane_state(plane_state, &crtc_state,
+ params->min_scale,
+ params->max_scale,
+ params->can_position, false),
+ 0, params->msg);
+ KUNIT_EXPECT_TRUE(test, plane_state->visible);
+ check_src_eq(test, plane_state, params->src_expected.x, params->src_expected.y,
+ params->src_expected.w, params->src_expected.h);
+ check_crtc_eq(test, plane_state, params->crtc_expected.x, params->crtc_expected.y,
+ params->crtc_expected.w, params->crtc_expected.h);
+}
+
+static void drm_check_plane_state_desc(const struct drm_check_plane_state_test *t,
+ char *desc)
+{
+ sprintf(desc, "%s", t->name);
+}
+
+static const struct drm_check_plane_state_test drm_check_plane_state_tests[] = {
+ {
+ .name = "clipping_simple",
+ .msg = "Simple clipping check should pass",
+ .src = { 0, 0,
+ 2048 << 16,
+ 2048 << 16 },
+ .crtc = { 0, 0, 2048, 2048 },
+ .rotation = DRM_MODE_ROTATE_0,
+ .min_scale = DRM_PLANE_NO_SCALING,
+ .max_scale = DRM_PLANE_NO_SCALING,
+ .can_position = false,
+ .src_expected = { 0, 0, 1024 << 16, 768 << 16 },
+ .crtc_expected = { 0, 0, 1024, 768 },
+ },
+ {
+ .name = "clipping_rotate_reflect",
+ .msg = "Rotated clipping check should pass",
+ .src = { 0, 0,
+ 2048 << 16,
+ 2048 << 16 },
+ .crtc = { 0, 0, 2048, 2048 },
+ .rotation = DRM_MODE_ROTATE_90 | DRM_MODE_REFLECT_X,
+ .min_scale = DRM_PLANE_NO_SCALING,
+ .max_scale = DRM_PLANE_NO_SCALING,
+ .can_position = false,
+ .src_expected = { 0, 0, 768 << 16, 1024 << 16 },
+ .crtc_expected = { 0, 0, 1024, 768 },
+ },
+ {
+ .name = "positioning_simple",
+ .msg = "Simple positioning should work",
+ .src = { 0, 0, 1023 << 16, 767 << 16 },
+ .crtc = { 0, 0, 1023, 767 },
+ .rotation = DRM_MODE_ROTATE_0,
+ .min_scale = DRM_PLANE_NO_SCALING,
+ .max_scale = DRM_PLANE_NO_SCALING,
+ .can_position = true,
+ .src_expected = { 0, 0, 1023 << 16, 767 << 16 },
+ .crtc_expected = { 0, 0, 1023, 767 },
+ },
+ {
+ .name = "upscaling",
+ .msg = "Upscaling exactly 2x should work",
+ .src = { 0, 0, 512 << 16, 384 << 16 },
+ .crtc = { 0, 0, 1024, 768 },
+ .rotation = DRM_MODE_ROTATE_0,
+ .min_scale = 0x8000,
+ .max_scale = DRM_PLANE_NO_SCALING,
+ .can_position = false,
+ .src_expected = { 0, 0, 512 << 16, 384 << 16 },
+ .crtc_expected = { 0, 0, 1024, 768 },
+ },
+ {
+ .name = "downscaling",
+ .msg = "Should succeed with exact scaling limit",
+ .src = { 0, 0, 2048 << 16, 1536 << 16 },
+ .crtc = { 0, 0, 1024, 768 },
+ .rotation = DRM_MODE_ROTATE_0,
+ .min_scale = DRM_PLANE_NO_SCALING,
+ .max_scale = 0x20000,
+ .can_position = false,
+ .src_expected = { 0, 0, 2048 << 16, 1536 << 16 },
+ .crtc_expected = { 0, 0, 1024, 768 },
+ },
+ {
+ .name = "rounding1",
+ .msg = "Should succeed by clipping to exact multiple",
+ .src = { 0, 0, 0x40001, 0x40001 },
+ .crtc = { 1022, 766, 4, 4 },
+ .rotation = DRM_MODE_ROTATE_0,
+ .min_scale = DRM_PLANE_NO_SCALING,
+ .max_scale = 0x10001,
+ .can_position = true,
+ .src_expected = { 0, 0, 2 << 16, 2 << 16 },
+ .crtc_expected = { 1022, 766, 2, 2 },
+ },
+ {
+ .name = "rounding2",
+ .msg = "Should succeed by clipping to exact multiple",
+ .src = { 0x20001, 0x20001, 0x4040001, 0x3040001 },
+ .crtc = { -2, -2, 1028, 772 },
+ .rotation = DRM_MODE_ROTATE_0,
+ .min_scale = DRM_PLANE_NO_SCALING,
+ .max_scale = 0x10001,
+ .can_position = false,
+ .src_expected = { 0x40002, 0x40002, 1024 << 16, 768 << 16 },
+ .crtc_expected = { 0, 0, 1024, 768 },
+ },
+ {
+ .name = "rounding3",
+ .msg = "Should succeed by clipping to exact multiple",
+ .src = { 0, 0, 0x3ffff, 0x3ffff },
+ .crtc = { 1022, 766, 4, 4 },
+ .rotation = DRM_MODE_ROTATE_0,
+ .min_scale = 0xffff,
+ .max_scale = DRM_PLANE_NO_SCALING,
+ .can_position = true,
+ /* Should not be rounded to 0x20001, which would be upscaling. */
+ .src_expected = { 0, 0, 2 << 16, 2 << 16 },
+ .crtc_expected = { 1022, 766, 2, 2 },
+ },
+ {
+ .name = "rounding4",
+ .msg = "Should succeed by clipping to exact multiple",
+ .src = { 0x1ffff, 0x1ffff, 0x403ffff, 0x303ffff },
+ .crtc = { -2, -2, 1028, 772 },
+ .rotation = DRM_MODE_ROTATE_0,
+ .min_scale = 0xffff,
+ .max_scale = DRM_PLANE_NO_SCALING,
+ .can_position = false,
+ .src_expected = { 0x3fffe, 0x3fffe, 1024 << 16, 768 << 16 },
+ .crtc_expected = { 0, 0, 1024, 768 },
+ },
+};
+
+KUNIT_ARRAY_PARAM(drm_check_plane_state, drm_check_plane_state_tests, drm_check_plane_state_desc);
+
+static void drm_test_check_invalid_plane_state(struct kunit *test)
+{
+ const struct drm_check_plane_state_test *params = test->param_value;
+ struct drm_plane_state *plane_state = test->priv;
+
+ KUNIT_ASSERT_LT_MSG(test,
+ drm_atomic_helper_check_plane_state(plane_state, &crtc_state,
+ params->min_scale,
+ params->max_scale,
+ params->can_position, false),
+ 0, params->msg);
}
+static const struct drm_check_plane_state_test drm_check_invalid_plane_state_tests[] = {
+ {
+ .name = "positioning_invalid",
+ .msg = "Should not be able to position on the crtc with can_position=false",
+ .src = { 0, 0, 1023 << 16, 767 << 16 },
+ .crtc = { 0, 0, 1023, 767 },
+ .rotation = DRM_MODE_ROTATE_0,
+ .min_scale = DRM_PLANE_NO_SCALING,
+ .max_scale = DRM_PLANE_NO_SCALING,
+ .can_position = false,
+ },
+ {
+ .name = "upscaling_invalid",
+ .msg = "Upscaling out of range should fail",
+ .src = { 0, 0, 512 << 16, 384 << 16 },
+ .crtc = { 0, 0, 1024, 768 },
+ .rotation = DRM_MODE_ROTATE_0,
+ .min_scale = 0x8001,
+ .max_scale = DRM_PLANE_NO_SCALING,
+ .can_position = false,
+ },
+ {
+ .name = "downscaling_invalid",
+ .msg = "Downscaling out of range should fail",
+ .src = { 0, 0, 2048 << 16, 1536 << 16 },
+ .crtc = { 0, 0, 1024, 768 },
+ .rotation = DRM_MODE_ROTATE_0,
+ .min_scale = DRM_PLANE_NO_SCALING,
+ .max_scale = 0x1ffff,
+ .can_position = false,
+ },
+};
+
+KUNIT_ARRAY_PARAM(drm_check_invalid_plane_state, drm_check_invalid_plane_state_tests,
+ drm_check_plane_state_desc);
+
static struct kunit_case drm_plane_helper_test[] = {
- KUNIT_CASE(drm_test_check_plane_state),
+ KUNIT_CASE_PARAM(drm_test_check_plane_state, drm_check_plane_state_gen_params),
+ KUNIT_CASE_PARAM(drm_test_check_invalid_plane_state,
+ drm_check_invalid_plane_state_gen_params),
{}
};
static struct kunit_suite drm_plane_helper_test_suite = {
.name = "drm_plane_helper",
+ .init = drm_plane_helper_init,
.test_cases = drm_plane_helper_test,
};
diff --git a/drivers/gpu/drm/tiny/Kconfig b/drivers/gpu/drm/tiny/Kconfig
index 565957264875..a300b03a3c7a 100644
--- a/drivers/gpu/drm/tiny/Kconfig
+++ b/drivers/gpu/drm/tiny/Kconfig
@@ -51,6 +51,19 @@ config DRM_GM12U320
This is a KMS driver for projectors which use the GM12U320 chipset
for video transfer over USB2/3, such as the Acer C120 mini projector.
+config DRM_OFDRM
+ tristate "Open Firmware display driver"
+ depends on DRM && OF && (PPC || COMPILE_TEST)
+ select APERTURE_HELPERS
+ select DRM_GEM_SHMEM_HELPER
+ select DRM_KMS_HELPER
+ help
+ DRM driver for Open Firmware framebuffers.
+
+ This driver assumes that the display hardware has been initialized
+ by the Open Firmware before the kernel boots. Scanout buffer, size,
+ and display format must be provided via device tree.
+
config DRM_PANEL_MIPI_DBI
tristate "DRM support for MIPI DBI compatible panels"
depends on DRM && SPI
diff --git a/drivers/gpu/drm/tiny/Makefile b/drivers/gpu/drm/tiny/Makefile
index 1d9d6227e7ab..76dde89a044b 100644
--- a/drivers/gpu/drm/tiny/Makefile
+++ b/drivers/gpu/drm/tiny/Makefile
@@ -4,6 +4,7 @@ obj-$(CONFIG_DRM_ARCPGU) += arcpgu.o
obj-$(CONFIG_DRM_BOCHS) += bochs.o
obj-$(CONFIG_DRM_CIRRUS_QEMU) += cirrus.o
obj-$(CONFIG_DRM_GM12U320) += gm12u320.o
+obj-$(CONFIG_DRM_OFDRM) += ofdrm.o
obj-$(CONFIG_DRM_PANEL_MIPI_DBI) += panel-mipi-dbi.o
obj-$(CONFIG_DRM_SIMPLEDRM) += simpledrm.o
obj-$(CONFIG_TINYDRM_HX8357D) += hx8357d.o
diff --git a/drivers/gpu/drm/tiny/bochs.c b/drivers/gpu/drm/tiny/bochs.c
index a51262289aef..04682f831544 100644
--- a/drivers/gpu/drm/tiny/bochs.c
+++ b/drivers/gpu/drm/tiny/bochs.c
@@ -543,7 +543,6 @@ static int bochs_kms_init(struct bochs_device *bochs)
bochs->dev->mode_config.max_width = 8192;
bochs->dev->mode_config.max_height = 8192;
- bochs->dev->mode_config.fb_base = bochs->fb_base;
bochs->dev->mode_config.preferred_depth = 24;
bochs->dev->mode_config.prefer_shadow = 0;
bochs->dev->mode_config.prefer_shadow_fbdev = 1;
diff --git a/drivers/gpu/drm/tiny/ofdrm.c b/drivers/gpu/drm/tiny/ofdrm.c
new file mode 100644
index 000000000000..0e1cc2369afc
--- /dev/null
+++ b/drivers/gpu/drm/tiny/ofdrm.c
@@ -0,0 +1,1424 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/of_address.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+
+#include <drm/drm_aperture.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_state_helper.h>
+#include <drm/drm_connector.h>
+#include <drm/drm_damage_helper.h>
+#include <drm/drm_device.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_format_helper.h>
+#include <drm/drm_framebuffer.h>
+#include <drm/drm_gem_atomic_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_gem_shmem_helper.h>
+#include <drm/drm_managed.h>
+#include <drm/drm_modeset_helper_vtables.h>
+#include <drm/drm_plane_helper.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
+
+#define DRIVER_NAME "ofdrm"
+#define DRIVER_DESC "DRM driver for OF platform devices"
+#define DRIVER_DATE "20220501"
+#define DRIVER_MAJOR 1
+#define DRIVER_MINOR 0
+
+#define PCI_VENDOR_ID_ATI_R520 0x7100
+#define PCI_VENDOR_ID_ATI_R600 0x9400
+
+#define OFDRM_GAMMA_LUT_SIZE 256
+
+/* Definitions used by the Avivo palette */
+#define AVIVO_DC_LUT_RW_SELECT 0x6480
+#define AVIVO_DC_LUT_RW_MODE 0x6484
+#define AVIVO_DC_LUT_RW_INDEX 0x6488
+#define AVIVO_DC_LUT_SEQ_COLOR 0x648c
+#define AVIVO_DC_LUT_PWL_DATA 0x6490
+#define AVIVO_DC_LUT_30_COLOR 0x6494
+#define AVIVO_DC_LUT_READ_PIPE_SELECT 0x6498
+#define AVIVO_DC_LUT_WRITE_EN_MASK 0x649c
+#define AVIVO_DC_LUT_AUTOFILL 0x64a0
+#define AVIVO_DC_LUTA_CONTROL 0x64c0
+#define AVIVO_DC_LUTA_BLACK_OFFSET_BLUE 0x64c4
+#define AVIVO_DC_LUTA_BLACK_OFFSET_GREEN 0x64c8
+#define AVIVO_DC_LUTA_BLACK_OFFSET_RED 0x64cc
+#define AVIVO_DC_LUTA_WHITE_OFFSET_BLUE 0x64d0
+#define AVIVO_DC_LUTA_WHITE_OFFSET_GREEN 0x64d4
+#define AVIVO_DC_LUTA_WHITE_OFFSET_RED 0x64d8
+#define AVIVO_DC_LUTB_CONTROL 0x6cc0
+#define AVIVO_DC_LUTB_BLACK_OFFSET_BLUE 0x6cc4
+#define AVIVO_DC_LUTB_BLACK_OFFSET_GREEN 0x6cc8
+#define AVIVO_DC_LUTB_BLACK_OFFSET_RED 0x6ccc
+#define AVIVO_DC_LUTB_WHITE_OFFSET_BLUE 0x6cd0
+#define AVIVO_DC_LUTB_WHITE_OFFSET_GREEN 0x6cd4
+#define AVIVO_DC_LUTB_WHITE_OFFSET_RED 0x6cd8
+
+enum ofdrm_model {
+ OFDRM_MODEL_UNKNOWN,
+ OFDRM_MODEL_MACH64, /* ATI Mach64 */
+ OFDRM_MODEL_RAGE128, /* ATI Rage128 */
+ OFDRM_MODEL_RAGE_M3A, /* ATI Rage Mobility M3 Head A */
+ OFDRM_MODEL_RAGE_M3B, /* ATI Rage Mobility M3 Head B */
+ OFDRM_MODEL_RADEON, /* ATI Radeon */
+ OFDRM_MODEL_GXT2000, /* IBM GXT2000 */
+ OFDRM_MODEL_AVIVO, /* ATI R5xx */
+ OFDRM_MODEL_QEMU, /* QEMU VGA */
+};
+
+/*
+ * Helpers for display nodes
+ */
+
+static int display_get_validated_int(struct drm_device *dev, const char *name, uint32_t value)
+{
+ if (value > INT_MAX) {
+ drm_err(dev, "invalid framebuffer %s of %u\n", name, value);
+ return -EINVAL;
+ }
+ return (int)value;
+}
+
+static int display_get_validated_int0(struct drm_device *dev, const char *name, uint32_t value)
+{
+ if (!value) {
+ drm_err(dev, "invalid framebuffer %s of %u\n", name, value);
+ return -EINVAL;
+ }
+ return display_get_validated_int(dev, name, value);
+}
+
+static const struct drm_format_info *display_get_validated_format(struct drm_device *dev,
+ u32 depth, bool big_endian)
+{
+ const struct drm_format_info *info;
+ u32 format;
+
+ switch (depth) {
+ case 8:
+ format = drm_mode_legacy_fb_format(8, 8);
+ break;
+ case 15:
+ case 16:
+ format = drm_mode_legacy_fb_format(16, depth);
+ break;
+ case 32:
+ format = drm_mode_legacy_fb_format(32, 24);
+ break;
+ default:
+ drm_err(dev, "unsupported framebuffer depth %u\n", depth);
+ return ERR_PTR(-EINVAL);
+ }
+
+ /*
+ * DRM formats assume little-endian byte order. Update the format
+ * if the scanout buffer uses big-endian ordering.
+ */
+ if (big_endian) {
+ switch (format) {
+ case DRM_FORMAT_XRGB8888:
+ format = DRM_FORMAT_BGRX8888;
+ break;
+ case DRM_FORMAT_ARGB8888:
+ format = DRM_FORMAT_BGRA8888;
+ break;
+ case DRM_FORMAT_RGB565:
+ format = DRM_FORMAT_RGB565 | DRM_FORMAT_BIG_ENDIAN;
+ break;
+ case DRM_FORMAT_XRGB1555:
+ format = DRM_FORMAT_XRGB1555 | DRM_FORMAT_BIG_ENDIAN;
+ break;
+ default:
+ break;
+ }
+ }
+
+ info = drm_format_info(format);
+ if (!info) {
+ drm_err(dev, "cannot find framebuffer format for depth %u\n", depth);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return info;
+}
+
+static int display_read_u32_of(struct drm_device *dev, struct device_node *of_node,
+ const char *name, u32 *value)
+{
+ int ret = of_property_read_u32(of_node, name, value);
+
+ if (ret)
+ drm_err(dev, "cannot parse framebuffer %s: error %d\n", name, ret);
+ return ret;
+}
+
+static bool display_get_big_endian_of(struct drm_device *dev, struct device_node *of_node)
+{
+ bool big_endian;
+
+#ifdef __BIG_ENDIAN
+ big_endian = true;
+ if (of_get_property(of_node, "little-endian", NULL))
+ big_endian = false;
+#else
+ big_endian = false;
+ if (of_get_property(of_node, "big-endian", NULL))
+ big_endian = true;
+#endif
+
+ return big_endian;
+}
+
+static int display_get_width_of(struct drm_device *dev, struct device_node *of_node)
+{
+ u32 width;
+ int ret = display_read_u32_of(dev, of_node, "width", &width);
+
+ if (ret)
+ return ret;
+ return display_get_validated_int0(dev, "width", width);
+}
+
+static int display_get_height_of(struct drm_device *dev, struct device_node *of_node)
+{
+ u32 height;
+ int ret = display_read_u32_of(dev, of_node, "height", &height);
+
+ if (ret)
+ return ret;
+ return display_get_validated_int0(dev, "height", height);
+}
+
+static int display_get_depth_of(struct drm_device *dev, struct device_node *of_node)
+{
+ u32 depth;
+ int ret = display_read_u32_of(dev, of_node, "depth", &depth);
+
+ if (ret)
+ return ret;
+ return display_get_validated_int0(dev, "depth", depth);
+}
+
+static int display_get_linebytes_of(struct drm_device *dev, struct device_node *of_node)
+{
+ u32 linebytes;
+ int ret = display_read_u32_of(dev, of_node, "linebytes", &linebytes);
+
+ if (ret)
+ return ret;
+ return display_get_validated_int(dev, "linebytes", linebytes);
+}
+
+static u64 display_get_address_of(struct drm_device *dev, struct device_node *of_node)
+{
+ u32 address;
+ int ret;
+
+ /*
+ * Not all devices provide an address property, it's not
+ * a bug if this fails. The driver will try to find the
+ * framebuffer base address from the device's memory regions.
+ */
+ ret = of_property_read_u32(of_node, "address", &address);
+ if (ret)
+ return OF_BAD_ADDR;
+
+ return address;
+}
+
+static bool is_avivo(__be32 vendor, __be32 device)
+{
+ /* This will match most R5xx */
+ return (vendor == PCI_VENDOR_ID_ATI) &&
+ ((device >= PCI_VENDOR_ID_ATI_R520 && device < 0x7800) ||
+ (PCI_VENDOR_ID_ATI_R600 >= 0x9400));
+}
+
+static enum ofdrm_model display_get_model_of(struct drm_device *dev, struct device_node *of_node)
+{
+ enum ofdrm_model model = OFDRM_MODEL_UNKNOWN;
+
+ if (of_node_name_prefix(of_node, "ATY,Rage128")) {
+ model = OFDRM_MODEL_RAGE128;
+ } else if (of_node_name_prefix(of_node, "ATY,RageM3pA") ||
+ of_node_name_prefix(of_node, "ATY,RageM3p12A")) {
+ model = OFDRM_MODEL_RAGE_M3A;
+ } else if (of_node_name_prefix(of_node, "ATY,RageM3pB")) {
+ model = OFDRM_MODEL_RAGE_M3B;
+ } else if (of_node_name_prefix(of_node, "ATY,Rage6")) {
+ model = OFDRM_MODEL_RADEON;
+ } else if (of_node_name_prefix(of_node, "ATY,")) {
+ return OFDRM_MODEL_MACH64;
+ } else if (of_device_is_compatible(of_node, "pci1014,b7") ||
+ of_device_is_compatible(of_node, "pci1014,21c")) {
+ model = OFDRM_MODEL_GXT2000;
+ } else if (of_node_name_prefix(of_node, "vga,Display-")) {
+ struct device_node *of_parent;
+ const __be32 *vendor_p, *device_p;
+
+ /* Look for AVIVO initialized by SLOF */
+ of_parent = of_get_parent(of_node);
+ vendor_p = of_get_property(of_parent, "vendor-id", NULL);
+ device_p = of_get_property(of_parent, "device-id", NULL);
+ if (vendor_p && device_p && is_avivo(*vendor_p, *device_p))
+ model = OFDRM_MODEL_AVIVO;
+ of_node_put(of_parent);
+ } else if (of_device_is_compatible(of_node, "qemu,std-vga")) {
+ model = OFDRM_MODEL_QEMU;
+ }
+
+ return model;
+}
+
+/*
+ * Open Firmware display device
+ */
+
+struct ofdrm_device;
+
+struct ofdrm_device_funcs {
+ void __iomem *(*cmap_ioremap)(struct ofdrm_device *odev,
+ struct device_node *of_node,
+ u64 fb_bas);
+ void (*cmap_write)(struct ofdrm_device *odev, unsigned char index,
+ unsigned char r, unsigned char g, unsigned char b);
+};
+
+struct ofdrm_device {
+ struct drm_device dev;
+ struct platform_device *pdev;
+
+ const struct ofdrm_device_funcs *funcs;
+
+ /* firmware-buffer settings */
+ struct iosys_map screen_base;
+ struct drm_display_mode mode;
+ const struct drm_format_info *format;
+ unsigned int pitch;
+
+ /* colormap */
+ void __iomem *cmap_base;
+
+ /* modesetting */
+ uint32_t formats[8];
+ struct drm_plane primary_plane;
+ struct drm_crtc crtc;
+ struct drm_encoder encoder;
+ struct drm_connector connector;
+};
+
+static struct ofdrm_device *ofdrm_device_of_dev(struct drm_device *dev)
+{
+ return container_of(dev, struct ofdrm_device, dev);
+}
+
+/*
+ * Hardware
+ */
+
+#if defined(CONFIG_PCI)
+static struct pci_dev *display_get_pci_dev_of(struct drm_device *dev, struct device_node *of_node)
+{
+ const __be32 *vendor_p, *device_p;
+ u32 vendor, device;
+ struct pci_dev *pcidev;
+
+ vendor_p = of_get_property(of_node, "vendor-id", NULL);
+ if (!vendor_p)
+ return ERR_PTR(-ENODEV);
+ vendor = be32_to_cpup(vendor_p);
+
+ device_p = of_get_property(of_node, "device-id", NULL);
+ if (!device_p)
+ return ERR_PTR(-ENODEV);
+ device = be32_to_cpup(device_p);
+
+ pcidev = pci_get_device(vendor, device, NULL);
+ if (!pcidev)
+ return ERR_PTR(-ENODEV);
+
+ return pcidev;
+}
+
+static void ofdrm_pci_release(void *data)
+{
+ struct pci_dev *pcidev = data;
+
+ pci_disable_device(pcidev);
+}
+
+static int ofdrm_device_init_pci(struct ofdrm_device *odev)
+{
+ struct drm_device *dev = &odev->dev;
+ struct platform_device *pdev = to_platform_device(dev->dev);
+ struct device_node *of_node = pdev->dev.of_node;
+ struct pci_dev *pcidev;
+ int ret;
+
+ /*
+ * Never use pcim_ or other managed helpers on the returned PCI
+ * device. Otherwise, probing the native driver will fail for
+ * resource conflicts. PCI-device management has to be tied to
+ * the lifetime of the platform device until the native driver
+ * takes over.
+ */
+ pcidev = display_get_pci_dev_of(dev, of_node);
+ if (IS_ERR(pcidev))
+ return 0; /* no PCI device found; ignore the error */
+
+ ret = pci_enable_device(pcidev);
+ if (ret) {
+ drm_err(dev, "pci_enable_device(%s) failed: %d\n",
+ dev_name(&pcidev->dev), ret);
+ return ret;
+ }
+ ret = devm_add_action_or_reset(&pdev->dev, ofdrm_pci_release, pcidev);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+#else
+static int ofdrm_device_init_pci(struct ofdrm_device *odev)
+{
+ return 0;
+}
+#endif
+
+/*
+ * OF display settings
+ */
+
+static struct resource *ofdrm_find_fb_resource(struct ofdrm_device *odev,
+ struct resource *fb_res)
+{
+ struct platform_device *pdev = to_platform_device(odev->dev.dev);
+ struct resource *res, *max_res = NULL;
+ u32 i;
+
+ for (i = 0; pdev->num_resources; ++i) {
+ res = platform_get_resource(pdev, IORESOURCE_MEM, i);
+ if (!res)
+ break; /* all resources processed */
+ if (resource_size(res) < resource_size(fb_res))
+ continue; /* resource too small */
+ if (fb_res->start && resource_contains(res, fb_res))
+ return res; /* resource contains framebuffer */
+ if (!max_res || resource_size(res) > resource_size(max_res))
+ max_res = res; /* store largest resource as fallback */
+ }
+
+ return max_res;
+}
+
+/*
+ * Colormap / Palette
+ */
+
+static void __iomem *get_cmap_address_of(struct ofdrm_device *odev, struct device_node *of_node,
+ int bar_no, unsigned long offset, unsigned long size)
+{
+ struct drm_device *dev = &odev->dev;
+ const __be32 *addr_p;
+ u64 max_size, address;
+ unsigned int flags;
+ void __iomem *mem;
+
+ addr_p = of_get_pci_address(of_node, bar_no, &max_size, &flags);
+ if (!addr_p)
+ addr_p = of_get_address(of_node, bar_no, &max_size, &flags);
+ if (!addr_p)
+ return ERR_PTR(-ENODEV);
+
+ if ((flags & (IORESOURCE_IO | IORESOURCE_MEM)) == 0)
+ return ERR_PTR(-ENODEV);
+
+ if ((offset + size) >= max_size)
+ return ERR_PTR(-ENODEV);
+
+ address = of_translate_address(of_node, addr_p);
+ if (address == OF_BAD_ADDR)
+ return ERR_PTR(-ENODEV);
+
+ mem = devm_ioremap(dev->dev, address + offset, size);
+ if (!mem)
+ return ERR_PTR(-ENOMEM);
+
+ return mem;
+}
+
+static void __iomem *ofdrm_mach64_cmap_ioremap(struct ofdrm_device *odev,
+ struct device_node *of_node,
+ u64 fb_base)
+{
+ struct drm_device *dev = &odev->dev;
+ u64 address;
+ void __iomem *cmap_base;
+
+ address = fb_base & 0xff000000ul;
+ address += 0x7ff000;
+
+ cmap_base = devm_ioremap(dev->dev, address, 0x1000);
+ if (!cmap_base)
+ return ERR_PTR(-ENOMEM);
+
+ return cmap_base;
+}
+
+static void ofdrm_mach64_cmap_write(struct ofdrm_device *odev, unsigned char index,
+ unsigned char r, unsigned char g, unsigned char b)
+{
+ void __iomem *addr = odev->cmap_base + 0xcc0;
+ void __iomem *data = odev->cmap_base + 0xcc0 + 1;
+
+ writeb(index, addr);
+ writeb(r, data);
+ writeb(g, data);
+ writeb(b, data);
+}
+
+static void __iomem *ofdrm_rage128_cmap_ioremap(struct ofdrm_device *odev,
+ struct device_node *of_node,
+ u64 fb_base)
+{
+ return get_cmap_address_of(odev, of_node, 2, 0, 0x1fff);
+}
+
+static void ofdrm_rage128_cmap_write(struct ofdrm_device *odev, unsigned char index,
+ unsigned char r, unsigned char g, unsigned char b)
+{
+ void __iomem *addr = odev->cmap_base + 0xb0;
+ void __iomem *data = odev->cmap_base + 0xb4;
+ u32 color = (r << 16) | (g << 8) | b;
+
+ writeb(index, addr);
+ writel(color, data);
+}
+
+static void __iomem *ofdrm_rage_m3a_cmap_ioremap(struct ofdrm_device *odev,
+ struct device_node *of_node,
+ u64 fb_base)
+{
+ return get_cmap_address_of(odev, of_node, 2, 0, 0x1fff);
+}
+
+static void ofdrm_rage_m3a_cmap_write(struct ofdrm_device *odev, unsigned char index,
+ unsigned char r, unsigned char g, unsigned char b)
+{
+ void __iomem *dac_ctl = odev->cmap_base + 0x58;
+ void __iomem *addr = odev->cmap_base + 0xb0;
+ void __iomem *data = odev->cmap_base + 0xb4;
+ u32 color = (r << 16) | (g << 8) | b;
+ u32 val;
+
+ /* Clear PALETTE_ACCESS_CNTL in DAC_CNTL */
+ val = readl(dac_ctl);
+ val &= ~0x20;
+ writel(val, dac_ctl);
+
+ /* Set color at palette index */
+ writeb(index, addr);
+ writel(color, data);
+}
+
+static void __iomem *ofdrm_rage_m3b_cmap_ioremap(struct ofdrm_device *odev,
+ struct device_node *of_node,
+ u64 fb_base)
+{
+ return get_cmap_address_of(odev, of_node, 2, 0, 0x1fff);
+}
+
+static void ofdrm_rage_m3b_cmap_write(struct ofdrm_device *odev, unsigned char index,
+ unsigned char r, unsigned char g, unsigned char b)
+{
+ void __iomem *dac_ctl = odev->cmap_base + 0x58;
+ void __iomem *addr = odev->cmap_base + 0xb0;
+ void __iomem *data = odev->cmap_base + 0xb4;
+ u32 color = (r << 16) | (g << 8) | b;
+ u32 val;
+
+ /* Set PALETTE_ACCESS_CNTL in DAC_CNTL */
+ val = readl(dac_ctl);
+ val |= 0x20;
+ writel(val, dac_ctl);
+
+ /* Set color at palette index */
+ writeb(index, addr);
+ writel(color, data);
+}
+
+static void __iomem *ofdrm_radeon_cmap_ioremap(struct ofdrm_device *odev,
+ struct device_node *of_node,
+ u64 fb_base)
+{
+ return get_cmap_address_of(odev, of_node, 1, 0, 0x1fff);
+}
+
+static void __iomem *ofdrm_gxt2000_cmap_ioremap(struct ofdrm_device *odev,
+ struct device_node *of_node,
+ u64 fb_base)
+{
+ return get_cmap_address_of(odev, of_node, 0, 0x6000, 0x1000);
+}
+
+static void ofdrm_gxt2000_cmap_write(struct ofdrm_device *odev, unsigned char index,
+ unsigned char r, unsigned char g, unsigned char b)
+{
+ void __iomem *data = ((unsigned int __iomem *)odev->cmap_base) + index;
+ u32 color = (r << 16) | (g << 8) | b;
+
+ writel(color, data);
+}
+
+static void __iomem *ofdrm_avivo_cmap_ioremap(struct ofdrm_device *odev,
+ struct device_node *of_node,
+ u64 fb_base)
+{
+ struct device_node *of_parent;
+ void __iomem *cmap_base;
+
+ of_parent = of_get_parent(of_node);
+ cmap_base = get_cmap_address_of(odev, of_parent, 0, 0, 0x10000);
+ of_node_put(of_parent);
+
+ return cmap_base;
+}
+
+static void ofdrm_avivo_cmap_write(struct ofdrm_device *odev, unsigned char index,
+ unsigned char r, unsigned char g, unsigned char b)
+{
+ void __iomem *lutsel = odev->cmap_base + AVIVO_DC_LUT_RW_SELECT;
+ void __iomem *addr = odev->cmap_base + AVIVO_DC_LUT_RW_INDEX;
+ void __iomem *data = odev->cmap_base + AVIVO_DC_LUT_30_COLOR;
+ u32 color = (r << 22) | (g << 12) | (b << 2);
+
+ /* Write to both LUTs for now */
+
+ writel(1, lutsel);
+ writeb(index, addr);
+ writel(color, data);
+
+ writel(0, lutsel);
+ writeb(index, addr);
+ writel(color, data);
+}
+
+static void __iomem *ofdrm_qemu_cmap_ioremap(struct ofdrm_device *odev,
+ struct device_node *of_node,
+ u64 fb_base)
+{
+ static const __be32 io_of_addr[3] = {
+ cpu_to_be32(0x01000000),
+ cpu_to_be32(0x00),
+ cpu_to_be32(0x00),
+ };
+
+ struct drm_device *dev = &odev->dev;
+ u64 address;
+ void __iomem *cmap_base;
+
+ address = of_translate_address(of_node, io_of_addr);
+ if (address == OF_BAD_ADDR)
+ return ERR_PTR(-ENODEV);
+
+ cmap_base = devm_ioremap(dev->dev, address + 0x3c8, 2);
+ if (!cmap_base)
+ return ERR_PTR(-ENOMEM);
+
+ return cmap_base;
+}
+
+static void ofdrm_qemu_cmap_write(struct ofdrm_device *odev, unsigned char index,
+ unsigned char r, unsigned char g, unsigned char b)
+{
+ void __iomem *addr = odev->cmap_base;
+ void __iomem *data = odev->cmap_base + 1;
+
+ writeb(index, addr);
+ writeb(r, data);
+ writeb(g, data);
+ writeb(b, data);
+}
+
+static void ofdrm_device_set_gamma_linear(struct ofdrm_device *odev,
+ const struct drm_format_info *format)
+{
+ struct drm_device *dev = &odev->dev;
+ int i;
+
+ switch (format->format) {
+ case DRM_FORMAT_RGB565:
+ case DRM_FORMAT_RGB565 | DRM_FORMAT_BIG_ENDIAN:
+ /* Use better interpolation, to take 32 values from 0 to 255 */
+ for (i = 0; i < OFDRM_GAMMA_LUT_SIZE / 8; i++) {
+ unsigned char r = i * 8 + i / 4;
+ unsigned char g = i * 4 + i / 16;
+ unsigned char b = i * 8 + i / 4;
+
+ odev->funcs->cmap_write(odev, i, r, g, b);
+ }
+ /* Green has one more bit, so add padding with 0 for red and blue. */
+ for (i = OFDRM_GAMMA_LUT_SIZE / 8; i < OFDRM_GAMMA_LUT_SIZE / 4; i++) {
+ unsigned char r = 0;
+ unsigned char g = i * 4 + i / 16;
+ unsigned char b = 0;
+
+ odev->funcs->cmap_write(odev, i, r, g, b);
+ }
+ break;
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_BGRX8888:
+ for (i = 0; i < OFDRM_GAMMA_LUT_SIZE; i++)
+ odev->funcs->cmap_write(odev, i, i, i, i);
+ break;
+ default:
+ drm_warn_once(dev, "Unsupported format %p4cc for gamma correction\n",
+ &format->format);
+ break;
+ }
+}
+
+static void ofdrm_device_set_gamma(struct ofdrm_device *odev,
+ const struct drm_format_info *format,
+ struct drm_color_lut *lut)
+{
+ struct drm_device *dev = &odev->dev;
+ int i;
+
+ switch (format->format) {
+ case DRM_FORMAT_RGB565:
+ case DRM_FORMAT_RGB565 | DRM_FORMAT_BIG_ENDIAN:
+ /* Use better interpolation, to take 32 values from lut[0] to lut[255] */
+ for (i = 0; i < OFDRM_GAMMA_LUT_SIZE / 8; i++) {
+ unsigned char r = lut[i * 8 + i / 4].red >> 8;
+ unsigned char g = lut[i * 4 + i / 16].green >> 8;
+ unsigned char b = lut[i * 8 + i / 4].blue >> 8;
+
+ odev->funcs->cmap_write(odev, i, r, g, b);
+ }
+ /* Green has one more bit, so add padding with 0 for red and blue. */
+ for (i = OFDRM_GAMMA_LUT_SIZE / 8; i < OFDRM_GAMMA_LUT_SIZE / 4; i++) {
+ unsigned char r = 0;
+ unsigned char g = lut[i * 4 + i / 16].green >> 8;
+ unsigned char b = 0;
+
+ odev->funcs->cmap_write(odev, i, r, g, b);
+ }
+ break;
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_BGRX8888:
+ for (i = 0; i < OFDRM_GAMMA_LUT_SIZE; i++) {
+ unsigned char r = lut[i].red >> 8;
+ unsigned char g = lut[i].green >> 8;
+ unsigned char b = lut[i].blue >> 8;
+
+ odev->funcs->cmap_write(odev, i, r, g, b);
+ }
+ break;
+ default:
+ drm_warn_once(dev, "Unsupported format %p4cc for gamma correction\n",
+ &format->format);
+ break;
+ }
+}
+
+/*
+ * Modesetting
+ */
+
+struct ofdrm_crtc_state {
+ struct drm_crtc_state base;
+
+ /* Primary-plane format; required for color mgmt. */
+ const struct drm_format_info *format;
+};
+
+static struct ofdrm_crtc_state *to_ofdrm_crtc_state(struct drm_crtc_state *base)
+{
+ return container_of(base, struct ofdrm_crtc_state, base);
+}
+
+static void ofdrm_crtc_state_destroy(struct ofdrm_crtc_state *ofdrm_crtc_state)
+{
+ __drm_atomic_helper_crtc_destroy_state(&ofdrm_crtc_state->base);
+ kfree(ofdrm_crtc_state);
+}
+
+/*
+ * Support all formats of OF display and maybe more; in order
+ * of preference. The display's update function will do any
+ * conversion necessary.
+ *
+ * TODO: Add blit helpers for remaining formats and uncomment
+ * constants.
+ */
+static const uint32_t ofdrm_primary_plane_formats[] = {
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_RGB565,
+ //DRM_FORMAT_XRGB1555,
+ //DRM_FORMAT_C8,
+ /* Big-endian formats below */
+ DRM_FORMAT_BGRX8888,
+ DRM_FORMAT_RGB565 | DRM_FORMAT_BIG_ENDIAN,
+};
+
+static const uint64_t ofdrm_primary_plane_format_modifiers[] = {
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_FORMAT_MOD_INVALID
+};
+
+static int ofdrm_primary_plane_helper_atomic_check(struct drm_plane *plane,
+ struct drm_atomic_state *new_state)
+{
+ struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(new_state, plane);
+ struct drm_framebuffer *new_fb = new_plane_state->fb;
+ struct drm_crtc *new_crtc = new_plane_state->crtc;
+ struct drm_crtc_state *new_crtc_state = NULL;
+ struct ofdrm_crtc_state *new_ofdrm_crtc_state;
+ int ret;
+
+ if (new_crtc)
+ new_crtc_state = drm_atomic_get_new_crtc_state(new_state, new_plane_state->crtc);
+
+ ret = drm_atomic_helper_check_plane_state(new_plane_state, new_crtc_state,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
+ false, false);
+ if (ret)
+ return ret;
+ else if (!new_plane_state->visible)
+ return 0;
+
+ new_crtc_state = drm_atomic_get_new_crtc_state(new_state, new_plane_state->crtc);
+
+ new_ofdrm_crtc_state = to_ofdrm_crtc_state(new_crtc_state);
+ new_ofdrm_crtc_state->format = new_fb->format;
+
+ return 0;
+}
+
+static void ofdrm_primary_plane_helper_atomic_update(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct drm_device *dev = plane->dev;
+ struct ofdrm_device *odev = ofdrm_device_of_dev(dev);
+ struct drm_plane_state *plane_state = drm_atomic_get_new_plane_state(state, plane);
+ struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(state, plane);
+ struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state);
+ struct drm_framebuffer *fb = plane_state->fb;
+ unsigned int dst_pitch = odev->pitch;
+ const struct drm_format_info *dst_format = odev->format;
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_rect damage;
+ int ret, idx;
+
+ ret = drm_gem_fb_begin_cpu_access(fb, DMA_FROM_DEVICE);
+ if (ret)
+ return;
+
+ if (!drm_dev_enter(dev, &idx))
+ goto out_drm_gem_fb_end_cpu_access;
+
+ drm_atomic_helper_damage_iter_init(&iter, old_plane_state, plane_state);
+ drm_atomic_for_each_plane_damage(&iter, &damage) {
+ struct iosys_map dst = odev->screen_base;
+ struct drm_rect dst_clip = plane_state->dst;
+
+ if (!drm_rect_intersect(&dst_clip, &damage))
+ continue;
+
+ iosys_map_incr(&dst, drm_fb_clip_offset(dst_pitch, dst_format, &dst_clip));
+ drm_fb_blit(&dst, &dst_pitch, dst_format->format, shadow_plane_state->data, fb,
+ &damage);
+ }
+
+ drm_dev_exit(idx);
+out_drm_gem_fb_end_cpu_access:
+ drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE);
+}
+
+static void ofdrm_primary_plane_helper_atomic_disable(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct drm_device *dev = plane->dev;
+ struct ofdrm_device *odev = ofdrm_device_of_dev(dev);
+ struct iosys_map dst = odev->screen_base;
+ struct drm_plane_state *plane_state = drm_atomic_get_new_plane_state(state, plane);
+ void __iomem *dst_vmap = dst.vaddr_iomem; /* TODO: Use mapping abstraction */
+ unsigned int dst_pitch = odev->pitch;
+ const struct drm_format_info *dst_format = odev->format;
+ struct drm_rect dst_clip;
+ unsigned long lines, linepixels, i;
+ int idx;
+
+ drm_rect_init(&dst_clip,
+ plane_state->src_x >> 16, plane_state->src_y >> 16,
+ plane_state->src_w >> 16, plane_state->src_h >> 16);
+
+ lines = drm_rect_height(&dst_clip);
+ linepixels = drm_rect_width(&dst_clip);
+
+ if (!drm_dev_enter(dev, &idx))
+ return;
+
+ /* Clear buffer to black if disabled */
+ dst_vmap += drm_fb_clip_offset(dst_pitch, dst_format, &dst_clip);
+ for (i = 0; i < lines; ++i) {
+ memset_io(dst_vmap, 0, linepixels * dst_format->cpp[0]);
+ dst_vmap += dst_pitch;
+ }
+
+ drm_dev_exit(idx);
+}
+
+static const struct drm_plane_helper_funcs ofdrm_primary_plane_helper_funcs = {
+ DRM_GEM_SHADOW_PLANE_HELPER_FUNCS,
+ .atomic_check = ofdrm_primary_plane_helper_atomic_check,
+ .atomic_update = ofdrm_primary_plane_helper_atomic_update,
+ .atomic_disable = ofdrm_primary_plane_helper_atomic_disable,
+};
+
+static const struct drm_plane_funcs ofdrm_primary_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = drm_plane_cleanup,
+ DRM_GEM_SHADOW_PLANE_FUNCS,
+};
+
+static enum drm_mode_status ofdrm_crtc_helper_mode_valid(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode)
+{
+ struct ofdrm_device *odev = ofdrm_device_of_dev(crtc->dev);
+
+ return drm_crtc_helper_mode_valid_fixed(crtc, mode, &odev->mode);
+}
+
+static int ofdrm_crtc_helper_atomic_check(struct drm_crtc *crtc,
+ struct drm_atomic_state *new_state)
+{
+ static const size_t gamma_lut_length = OFDRM_GAMMA_LUT_SIZE * sizeof(struct drm_color_lut);
+
+ struct drm_device *dev = crtc->dev;
+ struct drm_crtc_state *new_crtc_state = drm_atomic_get_new_crtc_state(new_state, crtc);
+ int ret;
+
+ if (!new_crtc_state->enable)
+ return 0;
+
+ ret = drm_atomic_helper_check_crtc_primary_plane(new_crtc_state);
+ if (ret)
+ return ret;
+
+ if (new_crtc_state->color_mgmt_changed) {
+ struct drm_property_blob *gamma_lut = new_crtc_state->gamma_lut;
+
+ if (gamma_lut && (gamma_lut->length != gamma_lut_length)) {
+ drm_dbg(dev, "Incorrect gamma_lut length %zu\n", gamma_lut->length);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static void ofdrm_crtc_helper_atomic_flush(struct drm_crtc *crtc, struct drm_atomic_state *state)
+{
+ struct ofdrm_device *odev = ofdrm_device_of_dev(crtc->dev);
+ struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
+ struct ofdrm_crtc_state *ofdrm_crtc_state = to_ofdrm_crtc_state(crtc_state);
+
+ if (crtc_state->enable && crtc_state->color_mgmt_changed) {
+ const struct drm_format_info *format = ofdrm_crtc_state->format;
+
+ if (crtc_state->gamma_lut)
+ ofdrm_device_set_gamma(odev, format, crtc_state->gamma_lut->data);
+ else
+ ofdrm_device_set_gamma_linear(odev, format);
+ }
+}
+
+/*
+ * The CRTC is always enabled. Screen updates are performed by
+ * the primary plane's atomic_update function. Disabling clears
+ * the screen in the primary plane's atomic_disable function.
+ */
+static const struct drm_crtc_helper_funcs ofdrm_crtc_helper_funcs = {
+ .mode_valid = ofdrm_crtc_helper_mode_valid,
+ .atomic_check = ofdrm_crtc_helper_atomic_check,
+ .atomic_flush = ofdrm_crtc_helper_atomic_flush,
+};
+
+static void ofdrm_crtc_reset(struct drm_crtc *crtc)
+{
+ struct ofdrm_crtc_state *ofdrm_crtc_state =
+ kzalloc(sizeof(*ofdrm_crtc_state), GFP_KERNEL);
+
+ if (crtc->state)
+ ofdrm_crtc_state_destroy(to_ofdrm_crtc_state(crtc->state));
+
+ if (ofdrm_crtc_state)
+ __drm_atomic_helper_crtc_reset(crtc, &ofdrm_crtc_state->base);
+ else
+ __drm_atomic_helper_crtc_reset(crtc, NULL);
+}
+
+static struct drm_crtc_state *ofdrm_crtc_atomic_duplicate_state(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_crtc_state *crtc_state = crtc->state;
+ struct ofdrm_crtc_state *new_ofdrm_crtc_state;
+ struct ofdrm_crtc_state *ofdrm_crtc_state;
+
+ if (drm_WARN_ON(dev, !crtc_state))
+ return NULL;
+
+ new_ofdrm_crtc_state = kzalloc(sizeof(*new_ofdrm_crtc_state), GFP_KERNEL);
+ if (!new_ofdrm_crtc_state)
+ return NULL;
+
+ ofdrm_crtc_state = to_ofdrm_crtc_state(crtc_state);
+
+ __drm_atomic_helper_crtc_duplicate_state(crtc, &new_ofdrm_crtc_state->base);
+ new_ofdrm_crtc_state->format = ofdrm_crtc_state->format;
+
+ return &new_ofdrm_crtc_state->base;
+}
+
+static void ofdrm_crtc_atomic_destroy_state(struct drm_crtc *crtc,
+ struct drm_crtc_state *crtc_state)
+{
+ ofdrm_crtc_state_destroy(to_ofdrm_crtc_state(crtc_state));
+}
+
+static const struct drm_crtc_funcs ofdrm_crtc_funcs = {
+ .reset = ofdrm_crtc_reset,
+ .destroy = drm_crtc_cleanup,
+ .set_config = drm_atomic_helper_set_config,
+ .page_flip = drm_atomic_helper_page_flip,
+ .atomic_duplicate_state = ofdrm_crtc_atomic_duplicate_state,
+ .atomic_destroy_state = ofdrm_crtc_atomic_destroy_state,
+};
+
+static int ofdrm_connector_helper_get_modes(struct drm_connector *connector)
+{
+ struct ofdrm_device *odev = ofdrm_device_of_dev(connector->dev);
+
+ return drm_connector_helper_get_modes_fixed(connector, &odev->mode);
+}
+
+static const struct drm_connector_helper_funcs ofdrm_connector_helper_funcs = {
+ .get_modes = ofdrm_connector_helper_get_modes,
+};
+
+static const struct drm_connector_funcs ofdrm_connector_funcs = {
+ .reset = drm_atomic_helper_connector_reset,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = drm_connector_cleanup,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static const struct drm_mode_config_funcs ofdrm_mode_config_funcs = {
+ .fb_create = drm_gem_fb_create_with_dirty,
+ .atomic_check = drm_atomic_helper_check,
+ .atomic_commit = drm_atomic_helper_commit,
+};
+
+/*
+ * Init / Cleanup
+ */
+
+static const struct ofdrm_device_funcs ofdrm_unknown_device_funcs = {
+};
+
+static const struct ofdrm_device_funcs ofdrm_mach64_device_funcs = {
+ .cmap_ioremap = ofdrm_mach64_cmap_ioremap,
+ .cmap_write = ofdrm_mach64_cmap_write,
+};
+
+static const struct ofdrm_device_funcs ofdrm_rage128_device_funcs = {
+ .cmap_ioremap = ofdrm_rage128_cmap_ioremap,
+ .cmap_write = ofdrm_rage128_cmap_write,
+};
+
+static const struct ofdrm_device_funcs ofdrm_rage_m3a_device_funcs = {
+ .cmap_ioremap = ofdrm_rage_m3a_cmap_ioremap,
+ .cmap_write = ofdrm_rage_m3a_cmap_write,
+};
+
+static const struct ofdrm_device_funcs ofdrm_rage_m3b_device_funcs = {
+ .cmap_ioremap = ofdrm_rage_m3b_cmap_ioremap,
+ .cmap_write = ofdrm_rage_m3b_cmap_write,
+};
+
+static const struct ofdrm_device_funcs ofdrm_radeon_device_funcs = {
+ .cmap_ioremap = ofdrm_radeon_cmap_ioremap,
+ .cmap_write = ofdrm_rage128_cmap_write, /* same as Rage128 */
+};
+
+static const struct ofdrm_device_funcs ofdrm_gxt2000_device_funcs = {
+ .cmap_ioremap = ofdrm_gxt2000_cmap_ioremap,
+ .cmap_write = ofdrm_gxt2000_cmap_write,
+};
+
+static const struct ofdrm_device_funcs ofdrm_avivo_device_funcs = {
+ .cmap_ioremap = ofdrm_avivo_cmap_ioremap,
+ .cmap_write = ofdrm_avivo_cmap_write,
+};
+
+static const struct ofdrm_device_funcs ofdrm_qemu_device_funcs = {
+ .cmap_ioremap = ofdrm_qemu_cmap_ioremap,
+ .cmap_write = ofdrm_qemu_cmap_write,
+};
+
+static struct drm_display_mode ofdrm_mode(unsigned int width, unsigned int height)
+{
+ /*
+ * Assume a monitor resolution of 96 dpi to
+ * get a somewhat reasonable screen size.
+ */
+ const struct drm_display_mode mode = {
+ DRM_MODE_INIT(60, width, height,
+ DRM_MODE_RES_MM(width, 96ul),
+ DRM_MODE_RES_MM(height, 96ul))
+ };
+
+ return mode;
+}
+
+static struct ofdrm_device *ofdrm_device_create(struct drm_driver *drv,
+ struct platform_device *pdev)
+{
+ struct device_node *of_node = pdev->dev.of_node;
+ struct ofdrm_device *odev;
+ struct drm_device *dev;
+ enum ofdrm_model model;
+ bool big_endian;
+ int width, height, depth, linebytes;
+ const struct drm_format_info *format;
+ u64 address;
+ resource_size_t fb_size, fb_base, fb_pgbase, fb_pgsize;
+ struct resource *res, *mem;
+ void __iomem *screen_base;
+ struct drm_plane *primary_plane;
+ struct drm_crtc *crtc;
+ struct drm_encoder *encoder;
+ struct drm_connector *connector;
+ unsigned long max_width, max_height;
+ size_t nformats;
+ int ret;
+
+ odev = devm_drm_dev_alloc(&pdev->dev, drv, struct ofdrm_device, dev);
+ if (IS_ERR(odev))
+ return ERR_CAST(odev);
+ dev = &odev->dev;
+ platform_set_drvdata(pdev, dev);
+
+ ret = ofdrm_device_init_pci(odev);
+ if (ret)
+ return ERR_PTR(ret);
+
+ /*
+ * OF display-node settings
+ */
+
+ model = display_get_model_of(dev, of_node);
+ drm_dbg(dev, "detected model %d\n", model);
+
+ switch (model) {
+ case OFDRM_MODEL_UNKNOWN:
+ odev->funcs = &ofdrm_unknown_device_funcs;
+ break;
+ case OFDRM_MODEL_MACH64:
+ odev->funcs = &ofdrm_mach64_device_funcs;
+ break;
+ case OFDRM_MODEL_RAGE128:
+ odev->funcs = &ofdrm_rage128_device_funcs;
+ break;
+ case OFDRM_MODEL_RAGE_M3A:
+ odev->funcs = &ofdrm_rage_m3a_device_funcs;
+ break;
+ case OFDRM_MODEL_RAGE_M3B:
+ odev->funcs = &ofdrm_rage_m3b_device_funcs;
+ break;
+ case OFDRM_MODEL_RADEON:
+ odev->funcs = &ofdrm_radeon_device_funcs;
+ break;
+ case OFDRM_MODEL_GXT2000:
+ odev->funcs = &ofdrm_gxt2000_device_funcs;
+ break;
+ case OFDRM_MODEL_AVIVO:
+ odev->funcs = &ofdrm_avivo_device_funcs;
+ break;
+ case OFDRM_MODEL_QEMU:
+ odev->funcs = &ofdrm_qemu_device_funcs;
+ break;
+ }
+
+ big_endian = display_get_big_endian_of(dev, of_node);
+
+ width = display_get_width_of(dev, of_node);
+ if (width < 0)
+ return ERR_PTR(width);
+ height = display_get_height_of(dev, of_node);
+ if (height < 0)
+ return ERR_PTR(height);
+ depth = display_get_depth_of(dev, of_node);
+ if (depth < 0)
+ return ERR_PTR(depth);
+ linebytes = display_get_linebytes_of(dev, of_node);
+ if (linebytes < 0)
+ return ERR_PTR(linebytes);
+
+ format = display_get_validated_format(dev, depth, big_endian);
+ if (IS_ERR(format))
+ return ERR_CAST(format);
+ if (!linebytes) {
+ linebytes = drm_format_info_min_pitch(format, 0, width);
+ if (drm_WARN_ON(dev, !linebytes))
+ return ERR_PTR(-EINVAL);
+ }
+
+ fb_size = linebytes * height;
+
+ /*
+ * Try to figure out the address of the framebuffer. Unfortunately, Open
+ * Firmware doesn't provide a standard way to do so. All we can do is a
+ * dodgy heuristic that happens to work in practice.
+ *
+ * On most machines, the "address" property contains what we need, though
+ * not on Matrox cards found in IBM machines. What appears to give good
+ * results is to go through the PCI ranges and pick one that encloses the
+ * "address" property. If none match, we pick the largest.
+ */
+ address = display_get_address_of(dev, of_node);
+ if (address != OF_BAD_ADDR) {
+ struct resource fb_res = DEFINE_RES_MEM(address, fb_size);
+
+ res = ofdrm_find_fb_resource(odev, &fb_res);
+ if (!res)
+ return ERR_PTR(-EINVAL);
+ if (resource_contains(res, &fb_res))
+ fb_base = address;
+ else
+ fb_base = res->start;
+ } else {
+ struct resource fb_res = DEFINE_RES_MEM(0u, fb_size);
+
+ res = ofdrm_find_fb_resource(odev, &fb_res);
+ if (!res)
+ return ERR_PTR(-EINVAL);
+ fb_base = res->start;
+ }
+
+ /*
+ * I/O resources
+ */
+
+ fb_pgbase = round_down(fb_base, PAGE_SIZE);
+ fb_pgsize = fb_base - fb_pgbase + round_up(fb_size, PAGE_SIZE);
+
+ ret = devm_aperture_acquire_from_firmware(dev, fb_pgbase, fb_pgsize);
+ if (ret) {
+ drm_err(dev, "could not acquire memory range %pr: error %d\n", &res, ret);
+ return ERR_PTR(ret);
+ }
+
+ mem = devm_request_mem_region(&pdev->dev, fb_pgbase, fb_pgsize, drv->name);
+ if (!mem) {
+ drm_warn(dev, "could not acquire memory region %pr\n", &res);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ screen_base = devm_ioremap(&pdev->dev, mem->start, resource_size(mem));
+ if (!screen_base)
+ return ERR_PTR(-ENOMEM);
+
+ if (odev->funcs->cmap_ioremap) {
+ void __iomem *cmap_base = odev->funcs->cmap_ioremap(odev, of_node, fb_base);
+
+ if (IS_ERR(cmap_base)) {
+ /* Don't fail; continue without colormap */
+ drm_warn(dev, "could not find colormap: error %ld\n", PTR_ERR(cmap_base));
+ } else {
+ odev->cmap_base = cmap_base;
+ }
+ }
+
+ /*
+ * Firmware framebuffer
+ */
+
+ iosys_map_set_vaddr_iomem(&odev->screen_base, screen_base);
+ odev->mode = ofdrm_mode(width, height);
+ odev->format = format;
+ odev->pitch = linebytes;
+
+ drm_dbg(dev, "display mode={" DRM_MODE_FMT "}\n", DRM_MODE_ARG(&odev->mode));
+ drm_dbg(dev, "framebuffer format=%p4cc, size=%dx%d, linebytes=%d byte\n",
+ &format->format, width, height, linebytes);
+
+ /*
+ * Mode-setting pipeline
+ */
+
+ ret = drmm_mode_config_init(dev);
+ if (ret)
+ return ERR_PTR(ret);
+
+ max_width = max_t(unsigned long, width, DRM_SHADOW_PLANE_MAX_WIDTH);
+ max_height = max_t(unsigned long, height, DRM_SHADOW_PLANE_MAX_HEIGHT);
+
+ dev->mode_config.min_width = width;
+ dev->mode_config.max_width = max_width;
+ dev->mode_config.min_height = height;
+ dev->mode_config.max_height = max_height;
+ dev->mode_config.funcs = &ofdrm_mode_config_funcs;
+ switch (depth) {
+ case 32:
+ dev->mode_config.preferred_depth = 24;
+ break;
+ default:
+ dev->mode_config.preferred_depth = depth;
+ break;
+ }
+ dev->mode_config.quirk_addfb_prefer_host_byte_order = true;
+
+ /* Primary plane */
+
+ nformats = drm_fb_build_fourcc_list(dev, &format->format, 1,
+ ofdrm_primary_plane_formats,
+ ARRAY_SIZE(ofdrm_primary_plane_formats),
+ odev->formats, ARRAY_SIZE(odev->formats));
+
+ primary_plane = &odev->primary_plane;
+ ret = drm_universal_plane_init(dev, primary_plane, 0, &ofdrm_primary_plane_funcs,
+ odev->formats, nformats,
+ ofdrm_primary_plane_format_modifiers,
+ DRM_PLANE_TYPE_PRIMARY, NULL);
+ if (ret)
+ return ERR_PTR(ret);
+ drm_plane_helper_add(primary_plane, &ofdrm_primary_plane_helper_funcs);
+ drm_plane_enable_fb_damage_clips(primary_plane);
+
+ /* CRTC */
+
+ crtc = &odev->crtc;
+ ret = drm_crtc_init_with_planes(dev, crtc, primary_plane, NULL,
+ &ofdrm_crtc_funcs, NULL);
+ if (ret)
+ return ERR_PTR(ret);
+ drm_crtc_helper_add(crtc, &ofdrm_crtc_helper_funcs);
+
+ if (odev->cmap_base) {
+ drm_mode_crtc_set_gamma_size(crtc, OFDRM_GAMMA_LUT_SIZE);
+ drm_crtc_enable_color_mgmt(crtc, 0, false, OFDRM_GAMMA_LUT_SIZE);
+ }
+
+ /* Encoder */
+
+ encoder = &odev->encoder;
+ ret = drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_NONE);
+ if (ret)
+ return ERR_PTR(ret);
+ encoder->possible_crtcs = drm_crtc_mask(crtc);
+
+ /* Connector */
+
+ connector = &odev->connector;
+ ret = drm_connector_init(dev, connector, &ofdrm_connector_funcs,
+ DRM_MODE_CONNECTOR_Unknown);
+ if (ret)
+ return ERR_PTR(ret);
+ drm_connector_helper_add(connector, &ofdrm_connector_helper_funcs);
+ drm_connector_set_panel_orientation_with_quirk(connector,
+ DRM_MODE_PANEL_ORIENTATION_UNKNOWN,
+ width, height);
+
+ ret = drm_connector_attach_encoder(connector, encoder);
+ if (ret)
+ return ERR_PTR(ret);
+
+ drm_mode_config_reset(dev);
+
+ return odev;
+}
+
+/*
+ * DRM driver
+ */
+
+DEFINE_DRM_GEM_FOPS(ofdrm_fops);
+
+static struct drm_driver ofdrm_driver = {
+ DRM_GEM_SHMEM_DRIVER_OPS,
+ .name = DRIVER_NAME,
+ .desc = DRIVER_DESC,
+ .date = DRIVER_DATE,
+ .major = DRIVER_MAJOR,
+ .minor = DRIVER_MINOR,
+ .driver_features = DRIVER_ATOMIC | DRIVER_GEM | DRIVER_MODESET,
+ .fops = &ofdrm_fops,
+};
+
+/*
+ * Platform driver
+ */
+
+static int ofdrm_probe(struct platform_device *pdev)
+{
+ struct ofdrm_device *odev;
+ struct drm_device *dev;
+ int ret;
+
+ odev = ofdrm_device_create(&ofdrm_driver, pdev);
+ if (IS_ERR(odev))
+ return PTR_ERR(odev);
+ dev = &odev->dev;
+
+ ret = drm_dev_register(dev, 0);
+ if (ret)
+ return ret;
+
+ /*
+ * FIXME: 24-bit color depth does not work reliably with a 32-bpp
+ * value. Force the bpp value of the scanout buffer's format.
+ */
+ drm_fbdev_generic_setup(dev, drm_format_info_bpp(odev->format, 0));
+
+ return 0;
+}
+
+static int ofdrm_remove(struct platform_device *pdev)
+{
+ struct drm_device *dev = platform_get_drvdata(pdev);
+
+ drm_dev_unplug(dev);
+
+ return 0;
+}
+
+static const struct of_device_id ofdrm_of_match_display[] = {
+ { .compatible = "display", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, ofdrm_of_match_display);
+
+static struct platform_driver ofdrm_platform_driver = {
+ .driver = {
+ .name = "of-display",
+ .of_match_table = ofdrm_of_match_display,
+ },
+ .probe = ofdrm_probe,
+ .remove = ofdrm_remove,
+};
+
+module_platform_driver(ofdrm_platform_driver);
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/tiny/simpledrm.c b/drivers/gpu/drm/tiny/simpledrm.c
index ea5b3239a659..cbb100753154 100644
--- a/drivers/gpu/drm/tiny/simpledrm.c
+++ b/drivers/gpu/drm/tiny/simpledrm.c
@@ -11,6 +11,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_state_helper.h>
#include <drm/drm_connector.h>
+#include <drm/drm_crtc_helper.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_device.h>
#include <drm/drm_drv.h>
@@ -470,40 +471,45 @@ static const uint64_t simpledrm_primary_plane_format_modifiers[] = {
};
static void simpledrm_primary_plane_helper_atomic_update(struct drm_plane *plane,
- struct drm_atomic_state *old_state)
+ struct drm_atomic_state *state)
{
- struct drm_plane_state *plane_state = plane->state;
- struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(old_state, plane);
+ struct drm_plane_state *plane_state = drm_atomic_get_new_plane_state(state, plane);
+ struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(state, plane);
struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state);
struct drm_framebuffer *fb = plane_state->fb;
struct drm_device *dev = plane->dev;
struct simpledrm_device *sdev = simpledrm_device_of_dev(dev);
- struct iosys_map dst = IOSYS_MAP_INIT_VADDR(sdev->screen_base);
- struct drm_rect src_clip, dst_clip;
- int idx;
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_rect damage;
+ int ret, idx;
- if (!fb)
+ ret = drm_gem_fb_begin_cpu_access(fb, DMA_FROM_DEVICE);
+ if (ret)
return;
- if (!drm_atomic_helper_damage_merged(old_plane_state, plane_state, &src_clip))
- return;
+ if (!drm_dev_enter(dev, &idx))
+ goto out_drm_gem_fb_end_cpu_access;
- dst_clip = plane_state->dst;
- if (!drm_rect_intersect(&dst_clip, &src_clip))
- return;
+ drm_atomic_helper_damage_iter_init(&iter, old_plane_state, plane_state);
+ drm_atomic_for_each_plane_damage(&iter, &damage) {
+ struct iosys_map dst = IOSYS_MAP_INIT_VADDR(sdev->screen_base);
+ struct drm_rect dst_clip = plane_state->dst;
- if (!drm_dev_enter(dev, &idx))
- return;
+ if (!drm_rect_intersect(&dst_clip, &damage))
+ continue;
- iosys_map_incr(&dst, drm_fb_clip_offset(sdev->pitch, sdev->format, &dst_clip));
- drm_fb_blit(&dst, &sdev->pitch, sdev->format->format, shadow_plane_state->data, fb,
- &src_clip);
+ iosys_map_incr(&dst, drm_fb_clip_offset(sdev->pitch, sdev->format, &dst_clip));
+ drm_fb_blit(&dst, &sdev->pitch, sdev->format->format, shadow_plane_state->data, fb,
+ &damage);
+ }
drm_dev_exit(idx);
+out_drm_gem_fb_end_cpu_access:
+ drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE);
}
static void simpledrm_primary_plane_helper_atomic_disable(struct drm_plane *plane,
- struct drm_atomic_state *old_state)
+ struct drm_atomic_state *state)
{
struct drm_device *dev = plane->dev;
struct simpledrm_device *sdev = simpledrm_device_of_dev(dev);
@@ -540,19 +546,6 @@ static enum drm_mode_status simpledrm_crtc_helper_mode_valid(struct drm_crtc *cr
return drm_crtc_helper_mode_valid_fixed(crtc, mode, &sdev->mode);
}
-static int simpledrm_crtc_helper_atomic_check(struct drm_crtc *crtc,
- struct drm_atomic_state *new_state)
-{
- struct drm_crtc_state *new_crtc_state = drm_atomic_get_new_crtc_state(new_state, crtc);
- int ret;
-
- ret = drm_atomic_helper_check_crtc_state(new_crtc_state, false);
- if (ret)
- return ret;
-
- return drm_atomic_add_affected_planes(new_state, crtc);
-}
-
/*
* The CRTC is always enabled. Screen updates are performed by
* the primary plane's atomic_update function. Disabling clears
@@ -560,7 +553,7 @@ static int simpledrm_crtc_helper_atomic_check(struct drm_crtc *crtc,
*/
static const struct drm_crtc_helper_funcs simpledrm_crtc_helper_funcs = {
.mode_valid = simpledrm_crtc_helper_mode_valid,
- .atomic_check = simpledrm_crtc_helper_atomic_check,
+ .atomic_check = drm_crtc_helper_atomic_check,
};
static const struct drm_crtc_funcs simpledrm_crtc_funcs = {
@@ -687,8 +680,11 @@ static struct simpledrm_device *simpledrm_device_create(struct drm_driver *drv,
drm_err(dev, "no simplefb configuration found\n");
return ERR_PTR(-ENODEV);
}
- if (!stride)
- stride = DIV_ROUND_UP(drm_format_info_bpp(format, 0) * width, 8);
+ if (!stride) {
+ stride = drm_format_info_min_pitch(format, 0, width);
+ if (drm_WARN_ON(dev, !stride))
+ return ERR_PTR(-EINVAL);
+ }
sdev->mode = simpledrm_mode(width, height);
sdev->format = format;
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 7c8e8be774f1..c3f4b33136e5 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -51,9 +51,6 @@ static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
struct ttm_resource_manager *man;
int i, mem_type;
- drm_printf(&p, "No space for %p (%lu pages, %zuK, %zuM)\n",
- bo, bo->resource->num_pages, bo->base.size >> 10,
- bo->base.size >> 20);
for (i = 0; i < placement->num_placement; i++) {
mem_type = placement->placement[i].mem_type;
drm_printf(&p, " placement[%d]=0x%08X (%d)\n",
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 1530982338e9..ba3aa0a0fc43 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -173,7 +173,7 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
clear = src_iter->ops->maps_tt && (!ttm || !ttm_tt_is_populated(ttm));
if (!(clear && ttm && !(ttm->page_flags & TTM_TT_FLAG_ZERO_ALLOC)))
- ttm_move_memcpy(clear, dst_mem->num_pages, dst_iter, src_iter);
+ ttm_move_memcpy(clear, ttm->num_pages, dst_iter, src_iter);
if (!src_iter->ops->maps_tt)
ttm_kmap_iter_linear_io_fini(&_src_iter.io, bdev, src_mem);
@@ -239,16 +239,19 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
if (bo->type != ttm_bo_type_sg)
fbo->base.base.resv = &fbo->base.base._resv;
- if (fbo->base.resource) {
- ttm_resource_set_bo(fbo->base.resource, &fbo->base);
- bo->resource = NULL;
- }
-
dma_resv_init(&fbo->base.base._resv);
fbo->base.base.dev = NULL;
ret = dma_resv_trylock(&fbo->base.base._resv);
WARN_ON(!ret);
+ if (fbo->base.resource) {
+ ttm_resource_set_bo(fbo->base.resource, &fbo->base);
+ bo->resource = NULL;
+ ttm_bo_set_bulk_move(&fbo->base, NULL);
+ } else {
+ fbo->base.bulk_move = NULL;
+ }
+
ret = dma_resv_reserve_fences(&fbo->base.base._resv, 1);
if (ret) {
kfree(fbo);
@@ -354,9 +357,9 @@ int ttm_bo_kmap(struct ttm_buffer_object *bo,
map->virtual = NULL;
map->bo = bo;
- if (num_pages > bo->resource->num_pages)
+ if (num_pages > PFN_UP(bo->resource->size))
return -EINVAL;
- if ((start_page + num_pages) > bo->resource->num_pages)
+ if ((start_page + num_pages) > PFN_UP(bo->resource->size))
return -EINVAL;
ret = ttm_mem_io_reserve(bo->bdev, bo->resource);
@@ -402,6 +405,8 @@ int ttm_bo_vmap(struct ttm_buffer_object *bo, struct iosys_map *map)
struct ttm_resource *mem = bo->resource;
int ret;
+ dma_resv_assert_held(bo->base.resv);
+
ret = ttm_mem_io_reserve(bo->bdev, mem);
if (ret)
return ret;
@@ -460,6 +465,8 @@ void ttm_bo_vunmap(struct ttm_buffer_object *bo, struct iosys_map *map)
{
struct ttm_resource *mem = bo->resource;
+ dma_resv_assert_held(bo->base.resv);
+
if (iosys_map_is_null(map))
return;
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 38119311284d..5a3e4b891377 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -217,7 +217,7 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
page_last = vma_pages(vma) + vma->vm_pgoff -
drm_vma_node_start(&bo->base.vma_node);
- if (unlikely(page_offset >= bo->resource->num_pages))
+ if (unlikely(page_offset >= PFN_UP(bo->base.size)))
return VM_FAULT_SIGBUS;
prot = ttm_io_prot(bo, bo->resource, prot);
@@ -412,7 +412,7 @@ int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr,
<< PAGE_SHIFT);
int ret;
- if (len < 1 || (offset + len) >> PAGE_SHIFT > bo->resource->num_pages)
+ if (len < 1 || (offset + len) > bo->base.size)
return -EIO;
ret = ttm_bo_reserve(bo, true, false, NULL);
diff --git a/drivers/gpu/drm/ttm/ttm_range_manager.c b/drivers/gpu/drm/ttm/ttm_range_manager.c
index 4cfef2b3514d..0a8bc0b7f380 100644
--- a/drivers/gpu/drm/ttm/ttm_range_manager.c
+++ b/drivers/gpu/drm/ttm/ttm_range_manager.c
@@ -83,7 +83,7 @@ static int ttm_range_man_alloc(struct ttm_resource_manager *man,
spin_lock(&rman->lock);
ret = drm_mm_insert_node_in_range(mm, &node->mm_nodes[0],
- node->base.num_pages,
+ PFN_UP(node->base.size),
bo->page_alignment, 0,
place->fpfn, lpfn, mode);
spin_unlock(&rman->lock);
@@ -229,7 +229,6 @@ int ttm_range_man_fini_nocheck(struct ttm_device *bdev,
return ret;
spin_lock(&rman->lock);
- drm_mm_clean(mm);
drm_mm_takedown(mm);
spin_unlock(&rman->lock);
diff --git a/drivers/gpu/drm/ttm/ttm_resource.c b/drivers/gpu/drm/ttm/ttm_resource.c
index a729c32a1e48..328391bb1d87 100644
--- a/drivers/gpu/drm/ttm/ttm_resource.c
+++ b/drivers/gpu/drm/ttm/ttm_resource.c
@@ -177,7 +177,7 @@ void ttm_resource_init(struct ttm_buffer_object *bo,
struct ttm_resource_manager *man;
res->start = 0;
- res->num_pages = PFN_UP(bo->base.size);
+ res->size = bo->base.size;
res->mem_type = place->mem_type;
res->placement = place->flags;
res->bus.addr = NULL;
@@ -192,7 +192,7 @@ void ttm_resource_init(struct ttm_buffer_object *bo,
list_add_tail(&res->lru, &bo->bdev->pinned);
else
list_add_tail(&res->lru, &man->lru[bo->priority]);
- man->usage += res->num_pages << PAGE_SHIFT;
+ man->usage += res->size;
spin_unlock(&bo->bdev->lru_lock);
}
EXPORT_SYMBOL(ttm_resource_init);
@@ -214,7 +214,7 @@ void ttm_resource_fini(struct ttm_resource_manager *man,
spin_lock(&bdev->lru_lock);
list_del_init(&res->lru);
- man->usage -= res->num_pages << PAGE_SHIFT;
+ man->usage -= res->size;
spin_unlock(&bdev->lru_lock);
}
EXPORT_SYMBOL(ttm_resource_fini);
@@ -665,17 +665,15 @@ ttm_kmap_iter_linear_io_init(struct ttm_kmap_iter_linear_io *iter_io,
iosys_map_set_vaddr(&iter_io->dmap, mem->bus.addr);
iter_io->needs_unmap = false;
} else {
- size_t bus_size = (size_t)mem->num_pages << PAGE_SHIFT;
-
iter_io->needs_unmap = true;
memset(&iter_io->dmap, 0, sizeof(iter_io->dmap));
if (mem->bus.caching == ttm_write_combined)
iosys_map_set_vaddr_iomem(&iter_io->dmap,
ioremap_wc(mem->bus.offset,
- bus_size));
+ mem->size));
else if (mem->bus.caching == ttm_cached)
iosys_map_set_vaddr(&iter_io->dmap,
- memremap(mem->bus.offset, bus_size,
+ memremap(mem->bus.offset, mem->size,
MEMREMAP_WB |
MEMREMAP_WT |
MEMREMAP_WC));
@@ -684,7 +682,7 @@ ttm_kmap_iter_linear_io_init(struct ttm_kmap_iter_linear_io *iter_io,
if (iosys_map_is_null(&iter_io->dmap))
iosys_map_set_vaddr_iomem(&iter_io->dmap,
ioremap(mem->bus.offset,
- bus_size));
+ mem->size));
if (iosys_map_is_null(&iter_io->dmap)) {
ret = -ENOMEM;
diff --git a/drivers/gpu/drm/udl/Makefile b/drivers/gpu/drm/udl/Makefile
index 24d61f61d7db..3f6db179455d 100644
--- a/drivers/gpu/drm/udl/Makefile
+++ b/drivers/gpu/drm/udl/Makefile
@@ -1,4 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
-udl-y := udl_drv.o udl_modeset.o udl_connector.o udl_main.o udl_transfer.o
+udl-y := udl_drv.o udl_modeset.o udl_main.o udl_transfer.o
obj-$(CONFIG_DRM_UDL) := udl.o
diff --git a/drivers/gpu/drm/udl/udl_connector.c b/drivers/gpu/drm/udl/udl_connector.c
deleted file mode 100644
index fade4c7adbf7..000000000000
--- a/drivers/gpu/drm/udl/udl_connector.c
+++ /dev/null
@@ -1,139 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2012 Red Hat
- * based in parts on udlfb.c:
- * Copyright (C) 2009 Roberto De Ioris <roberto@unbit.it>
- * Copyright (C) 2009 Jaya Kumar <jayakumar.lkml@gmail.com>
- * Copyright (C) 2009 Bernie Thompson <bernie@plugable.com>
- */
-
-#include <drm/drm_atomic_state_helper.h>
-#include <drm/drm_edid.h>
-#include <drm/drm_crtc_helper.h>
-#include <drm/drm_probe_helper.h>
-
-#include "udl_connector.h"
-#include "udl_drv.h"
-
-static int udl_get_edid_block(void *data, u8 *buf, unsigned int block,
- size_t len)
-{
- int ret, i;
- u8 *read_buff;
- struct udl_device *udl = data;
- struct usb_device *udev = udl_to_usb_device(udl);
-
- read_buff = kmalloc(2, GFP_KERNEL);
- if (!read_buff)
- return -1;
-
- for (i = 0; i < len; i++) {
- int bval = (i + block * EDID_LENGTH) << 8;
- ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
- 0x02, (0x80 | (0x02 << 5)), bval,
- 0xA1, read_buff, 2, 1000);
- if (ret < 1) {
- DRM_ERROR("Read EDID byte %d failed err %x\n", i, ret);
- kfree(read_buff);
- return -1;
- }
- buf[i] = read_buff[1];
- }
-
- kfree(read_buff);
- return 0;
-}
-
-static int udl_get_modes(struct drm_connector *connector)
-{
- struct udl_drm_connector *udl_connector =
- container_of(connector,
- struct udl_drm_connector,
- connector);
-
- drm_connector_update_edid_property(connector, udl_connector->edid);
- if (udl_connector->edid)
- return drm_add_edid_modes(connector, udl_connector->edid);
- return 0;
-}
-
-static enum drm_mode_status udl_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
-{
- struct udl_device *udl = to_udl(connector->dev);
- if (!udl->sku_pixel_limit)
- return 0;
-
- if (mode->vdisplay * mode->hdisplay > udl->sku_pixel_limit)
- return MODE_VIRTUAL_Y;
-
- return 0;
-}
-
-static enum drm_connector_status
-udl_detect(struct drm_connector *connector, bool force)
-{
- struct udl_device *udl = to_udl(connector->dev);
- struct udl_drm_connector *udl_connector =
- container_of(connector,
- struct udl_drm_connector,
- connector);
-
- /* cleanup previous edid */
- if (udl_connector->edid != NULL) {
- kfree(udl_connector->edid);
- udl_connector->edid = NULL;
- }
-
- udl_connector->edid = drm_do_get_edid(connector, udl_get_edid_block, udl);
- if (!udl_connector->edid)
- return connector_status_disconnected;
-
- return connector_status_connected;
-}
-
-static void udl_connector_destroy(struct drm_connector *connector)
-{
- struct udl_drm_connector *udl_connector =
- container_of(connector,
- struct udl_drm_connector,
- connector);
-
- drm_connector_cleanup(connector);
- kfree(udl_connector->edid);
- kfree(connector);
-}
-
-static const struct drm_connector_helper_funcs udl_connector_helper_funcs = {
- .get_modes = udl_get_modes,
- .mode_valid = udl_mode_valid,
-};
-
-static const struct drm_connector_funcs udl_connector_funcs = {
- .reset = drm_atomic_helper_connector_reset,
- .detect = udl_detect,
- .fill_modes = drm_helper_probe_single_connector_modes,
- .destroy = udl_connector_destroy,
- .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
-};
-
-struct drm_connector *udl_connector_init(struct drm_device *dev)
-{
- struct udl_drm_connector *udl_connector;
- struct drm_connector *connector;
-
- udl_connector = kzalloc(sizeof(struct udl_drm_connector), GFP_KERNEL);
- if (!udl_connector)
- return ERR_PTR(-ENOMEM);
-
- connector = &udl_connector->connector;
- drm_connector_init(dev, connector, &udl_connector_funcs,
- DRM_MODE_CONNECTOR_VGA);
- drm_connector_helper_add(connector, &udl_connector_helper_funcs);
-
- connector->polled = DRM_CONNECTOR_POLL_HPD |
- DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
-
- return connector;
-}
diff --git a/drivers/gpu/drm/udl/udl_connector.h b/drivers/gpu/drm/udl/udl_connector.h
deleted file mode 100644
index 7f2d392df173..000000000000
--- a/drivers/gpu/drm/udl/udl_connector.h
+++ /dev/null
@@ -1,15 +0,0 @@
-#ifndef __UDL_CONNECTOR_H__
-#define __UDL_CONNECTOR_H__
-
-#include <drm/drm_crtc.h>
-
-struct edid;
-
-struct udl_drm_connector {
- struct drm_connector connector;
- /* last udl_detect edid */
- struct edid *edid;
-};
-
-
-#endif //__UDL_CONNECTOR_H__
diff --git a/drivers/gpu/drm/udl/udl_drv.h b/drivers/gpu/drm/udl/udl_drv.h
index b4cc7cc568c7..282ebd6c02fd 100644
--- a/drivers/gpu/drm/udl/udl_drv.h
+++ b/drivers/gpu/drm/udl/udl_drv.h
@@ -14,10 +14,13 @@
#include <linux/mm_types.h>
#include <linux/usb.h>
+#include <drm/drm_connector.h>
+#include <drm/drm_crtc.h>
#include <drm/drm_device.h>
+#include <drm/drm_encoder.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem.h>
-#include <drm/drm_simple_kms_helper.h>
+#include <drm/drm_plane.h>
struct drm_mode_create_dumb;
@@ -46,21 +49,31 @@ struct urb_list {
size_t size;
};
+struct udl_connector {
+ struct drm_connector connector;
+ /* last udl_detect edid */
+ struct edid *edid;
+};
+
+static inline struct udl_connector *to_udl_connector(struct drm_connector *connector)
+{
+ return container_of(connector, struct udl_connector, connector);
+}
+
struct udl_device {
struct drm_device drm;
struct device *dev;
struct device *dmadev;
- struct drm_simple_display_pipe display_pipe;
+ struct drm_plane primary_plane;
+ struct drm_crtc crtc;
+ struct drm_encoder encoder;
struct mutex gem_lock;
int sku_pixel_limit;
struct urb_list urbs;
-
- char mode_buf[1024];
- uint32_t mode_buf_len;
};
#define to_udl(x) container_of(x, struct udl_device, drm)
@@ -89,23 +102,4 @@ int udl_render_hline(struct drm_device *dev, int log_bpp, struct urb **urb_ptr,
int udl_drop_usb(struct drm_device *dev);
int udl_select_std_channel(struct udl_device *udl);
-#define CMD_WRITE_RAW8 "\xAF\x60" /**< 8 bit raw write command. */
-#define CMD_WRITE_RL8 "\xAF\x61" /**< 8 bit run length command. */
-#define CMD_WRITE_COPY8 "\xAF\x62" /**< 8 bit copy command. */
-#define CMD_WRITE_RLX8 "\xAF\x63" /**< 8 bit extended run length command. */
-
-#define CMD_WRITE_RAW16 "\xAF\x68" /**< 16 bit raw write command. */
-#define CMD_WRITE_RL16 "\xAF\x69" /**< 16 bit run length command. */
-#define CMD_WRITE_COPY16 "\xAF\x6A" /**< 16 bit copy command. */
-#define CMD_WRITE_RLX16 "\xAF\x6B" /**< 16 bit extended run length command. */
-
-/* On/Off for driving the DisplayLink framebuffer to the display */
-#define UDL_REG_BLANK_MODE 0x1f
-
-#define UDL_BLANK_MODE_ON 0x00 /* hsync and vsync on, visible */
-#define UDL_BLANK_MODE_BLANKED 0x01 /* hsync and vsync on, blanked */
-#define UDL_BLANK_MODE_VSYNC_OFF 0x03 /* vsync off, blanked */
-#define UDL_BLANK_MODE_HSYNC_OFF 0x05 /* hsync off, blanked */
-#define UDL_BLANK_MODE_POWERDOWN 0x07 /* powered off; requires modeset */
-
#endif
diff --git a/drivers/gpu/drm/udl/udl_modeset.c b/drivers/gpu/drm/udl/udl_modeset.c
index ec6876f449f3..4b79d44752c9 100644
--- a/drivers/gpu/drm/udl/udl_modeset.c
+++ b/drivers/gpu/drm/udl/udl_modeset.c
@@ -8,70 +8,91 @@
* Copyright (C) 2009 Bernie Thompson <bernie@plugable.com>
*/
+#include <linux/bitfield.h>
+
+#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_damage_helper.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_edid.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_gem_shmem_helper.h>
#include <drm/drm_modeset_helper_vtables.h>
+#include <drm/drm_plane_helper.h>
+#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
#include "udl_drv.h"
-
-#define UDL_COLOR_DEPTH_16BPP 0
+#include "udl_proto.h"
/*
- * All DisplayLink bulk operations start with 0xAF, followed by specific code
- * All operations are written to buffers which then later get sent to device
+ * All DisplayLink bulk operations start with 0xaf (UDL_MSG_BULK), followed by
+ * a specific command code. All operations are written to a command buffer, which
+ * the driver sends to the device.
*/
static char *udl_set_register(char *buf, u8 reg, u8 val)
{
- *buf++ = 0xAF;
- *buf++ = 0x20;
+ *buf++ = UDL_MSG_BULK;
+ *buf++ = UDL_CMD_WRITEREG;
*buf++ = reg;
*buf++ = val;
+
return buf;
}
static char *udl_vidreg_lock(char *buf)
{
- return udl_set_register(buf, 0xFF, 0x00);
+ return udl_set_register(buf, UDL_REG_VIDREG, UDL_VIDREG_LOCK);
}
static char *udl_vidreg_unlock(char *buf)
{
- return udl_set_register(buf, 0xFF, 0xFF);
+ return udl_set_register(buf, UDL_REG_VIDREG, UDL_VIDREG_UNLOCK);
}
static char *udl_set_blank_mode(char *buf, u8 mode)
{
- return udl_set_register(buf, UDL_REG_BLANK_MODE, mode);
+ return udl_set_register(buf, UDL_REG_BLANKMODE, mode);
}
static char *udl_set_color_depth(char *buf, u8 selection)
{
- return udl_set_register(buf, 0x00, selection);
+ return udl_set_register(buf, UDL_REG_COLORDEPTH, selection);
}
-static char *udl_set_base16bpp(char *wrptr, u32 base)
+static char *udl_set_base16bpp(char *buf, u32 base)
{
- /* the base pointer is 16 bits wide, 0x20 is hi byte. */
- wrptr = udl_set_register(wrptr, 0x20, base >> 16);
- wrptr = udl_set_register(wrptr, 0x21, base >> 8);
- return udl_set_register(wrptr, 0x22, base);
+ /* the base pointer is 24 bits wide, 0x20 is hi byte. */
+ u8 reg20 = FIELD_GET(UDL_BASE_ADDR2_MASK, base);
+ u8 reg21 = FIELD_GET(UDL_BASE_ADDR1_MASK, base);
+ u8 reg22 = FIELD_GET(UDL_BASE_ADDR0_MASK, base);
+
+ buf = udl_set_register(buf, UDL_REG_BASE16BPP_ADDR2, reg20);
+ buf = udl_set_register(buf, UDL_REG_BASE16BPP_ADDR1, reg21);
+ buf = udl_set_register(buf, UDL_REG_BASE16BPP_ADDR0, reg22);
+
+ return buf;
}
/*
* DisplayLink HW has separate 16bpp and 8bpp framebuffers.
* In 24bpp modes, the low 323 RGB bits go in the 8bpp framebuffer
*/
-static char *udl_set_base8bpp(char *wrptr, u32 base)
+static char *udl_set_base8bpp(char *buf, u32 base)
{
- wrptr = udl_set_register(wrptr, 0x26, base >> 16);
- wrptr = udl_set_register(wrptr, 0x27, base >> 8);
- return udl_set_register(wrptr, 0x28, base);
+ /* the base pointer is 24 bits wide, 0x26 is hi byte. */
+ u8 reg26 = FIELD_GET(UDL_BASE_ADDR2_MASK, base);
+ u8 reg27 = FIELD_GET(UDL_BASE_ADDR1_MASK, base);
+ u8 reg28 = FIELD_GET(UDL_BASE_ADDR0_MASK, base);
+
+ buf = udl_set_register(buf, UDL_REG_BASE8BPP_ADDR2, reg26);
+ buf = udl_set_register(buf, UDL_REG_BASE8BPP_ADDR1, reg27);
+ buf = udl_set_register(buf, UDL_REG_BASE8BPP_ADDR0, reg28);
+
+ return buf;
}
static char *udl_set_register_16(char *wrptr, u8 reg, u16 value)
@@ -122,84 +143,46 @@ static char *udl_set_register_lfsr16(char *wrptr, u8 reg, u16 value)
}
/*
- * This takes a standard fbdev screeninfo struct and all of its monitor mode
- * details and converts them into the DisplayLink equivalent register commands.
- ERR(vreg(dev, 0x00, (color_depth == 16) ? 0 : 1));
- ERR(vreg_lfsr16(dev, 0x01, xDisplayStart));
- ERR(vreg_lfsr16(dev, 0x03, xDisplayEnd));
- ERR(vreg_lfsr16(dev, 0x05, yDisplayStart));
- ERR(vreg_lfsr16(dev, 0x07, yDisplayEnd));
- ERR(vreg_lfsr16(dev, 0x09, xEndCount));
- ERR(vreg_lfsr16(dev, 0x0B, hSyncStart));
- ERR(vreg_lfsr16(dev, 0x0D, hSyncEnd));
- ERR(vreg_big_endian(dev, 0x0F, hPixels));
- ERR(vreg_lfsr16(dev, 0x11, yEndCount));
- ERR(vreg_lfsr16(dev, 0x13, vSyncStart));
- ERR(vreg_lfsr16(dev, 0x15, vSyncEnd));
- ERR(vreg_big_endian(dev, 0x17, vPixels));
- ERR(vreg_little_endian(dev, 0x1B, pixelClock5KHz));
-
- ERR(vreg(dev, 0x1F, 0));
-
- ERR(vbuf(dev, WRITE_VIDREG_UNLOCK, DSIZEOF(WRITE_VIDREG_UNLOCK)));
+ * Takes a DRM display mode and converts it into the DisplayLink
+ * equivalent register commands.
*/
-static char *udl_set_vid_cmds(char *wrptr, struct drm_display_mode *mode)
+static char *udl_set_display_mode(char *buf, struct drm_display_mode *mode)
{
- u16 xds, yds;
- u16 xde, yde;
- u16 yec;
-
- /* x display start */
- xds = mode->crtc_htotal - mode->crtc_hsync_start;
- wrptr = udl_set_register_lfsr16(wrptr, 0x01, xds);
- /* x display end */
- xde = xds + mode->crtc_hdisplay;
- wrptr = udl_set_register_lfsr16(wrptr, 0x03, xde);
-
- /* y display start */
- yds = mode->crtc_vtotal - mode->crtc_vsync_start;
- wrptr = udl_set_register_lfsr16(wrptr, 0x05, yds);
- /* y display end */
- yde = yds + mode->crtc_vdisplay;
- wrptr = udl_set_register_lfsr16(wrptr, 0x07, yde);
-
- /* x end count is active + blanking - 1 */
- wrptr = udl_set_register_lfsr16(wrptr, 0x09,
- mode->crtc_htotal - 1);
+ u16 reg01 = mode->crtc_htotal - mode->crtc_hsync_start;
+ u16 reg03 = reg01 + mode->crtc_hdisplay;
+ u16 reg05 = mode->crtc_vtotal - mode->crtc_vsync_start;
+ u16 reg07 = reg05 + mode->crtc_vdisplay;
+ u16 reg09 = mode->crtc_htotal - 1;
+ u16 reg0b = 1; /* libdlo hardcodes hsync start to 1 */
+ u16 reg0d = mode->crtc_hsync_end - mode->crtc_hsync_start + 1;
+ u16 reg0f = mode->hdisplay;
+ u16 reg11 = mode->crtc_vtotal;
+ u16 reg13 = 0; /* libdlo hardcodes vsync start to 0 */
+ u16 reg15 = mode->crtc_vsync_end - mode->crtc_vsync_start;
+ u16 reg17 = mode->crtc_vdisplay;
+ u16 reg1b = mode->clock / 5;
+
+ buf = udl_set_register_lfsr16(buf, UDL_REG_XDISPLAYSTART, reg01);
+ buf = udl_set_register_lfsr16(buf, UDL_REG_XDISPLAYEND, reg03);
+ buf = udl_set_register_lfsr16(buf, UDL_REG_YDISPLAYSTART, reg05);
+ buf = udl_set_register_lfsr16(buf, UDL_REG_YDISPLAYEND, reg07);
+ buf = udl_set_register_lfsr16(buf, UDL_REG_XENDCOUNT, reg09);
+ buf = udl_set_register_lfsr16(buf, UDL_REG_HSYNCSTART, reg0b);
+ buf = udl_set_register_lfsr16(buf, UDL_REG_HSYNCEND, reg0d);
+ buf = udl_set_register_16(buf, UDL_REG_HPIXELS, reg0f);
+ buf = udl_set_register_lfsr16(buf, UDL_REG_YENDCOUNT, reg11);
+ buf = udl_set_register_lfsr16(buf, UDL_REG_VSYNCSTART, reg13);
+ buf = udl_set_register_lfsr16(buf, UDL_REG_VSYNCEND, reg15);
+ buf = udl_set_register_16(buf, UDL_REG_VPIXELS, reg17);
+ buf = udl_set_register_16be(buf, UDL_REG_PIXELCLOCK5KHZ, reg1b);
- /* libdlo hardcodes hsync start to 1 */
- wrptr = udl_set_register_lfsr16(wrptr, 0x0B, 1);
-
- /* hsync end is width of sync pulse + 1 */
- wrptr = udl_set_register_lfsr16(wrptr, 0x0D,
- mode->crtc_hsync_end - mode->crtc_hsync_start + 1);
-
- /* hpixels is active pixels */
- wrptr = udl_set_register_16(wrptr, 0x0F, mode->hdisplay);
-
- /* yendcount is vertical active + vertical blanking */
- yec = mode->crtc_vtotal;
- wrptr = udl_set_register_lfsr16(wrptr, 0x11, yec);
-
- /* libdlo hardcodes vsync start to 0 */
- wrptr = udl_set_register_lfsr16(wrptr, 0x13, 0);
-
- /* vsync end is width of vsync pulse */
- wrptr = udl_set_register_lfsr16(wrptr, 0x15, mode->crtc_vsync_end - mode->crtc_vsync_start);
-
- /* vpixels is active pixels */
- wrptr = udl_set_register_16(wrptr, 0x17, mode->crtc_vdisplay);
-
- wrptr = udl_set_register_16be(wrptr, 0x1B,
- mode->clock / 5);
-
- return wrptr;
+ return buf;
}
static char *udl_dummy_render(char *wrptr)
{
- *wrptr++ = 0xAF;
- *wrptr++ = 0x6A; /* copy */
+ *wrptr++ = UDL_MSG_BULK;
+ *wrptr++ = UDL_CMD_WRITECOPY16;
*wrptr++ = 0x00; /* from addr */
*wrptr++ = 0x00;
*wrptr++ = 0x00;
@@ -210,31 +193,6 @@ static char *udl_dummy_render(char *wrptr)
return wrptr;
}
-static int udl_crtc_write_mode_to_hw(struct drm_crtc *crtc)
-{
- struct drm_device *dev = crtc->dev;
- struct udl_device *udl = to_udl(dev);
- struct urb *urb;
- char *buf;
- int retval;
-
- if (udl->mode_buf_len == 0) {
- DRM_ERROR("No mode set\n");
- return -EINVAL;
- }
-
- urb = udl_get_urb(dev);
- if (!urb)
- return -ENOMEM;
-
- buf = (char *)urb->transfer_buffer;
-
- memcpy(buf, udl->mode_buf, udl->mode_buf_len);
- retval = udl_submit_urb(dev, urb, udl->mode_buf_len);
- DRM_DEBUG("write mode info %d\n", udl->mode_buf_len);
- return retval;
-}
-
static long udl_log_cpp(unsigned int cpp)
{
if (WARN_ON(!is_power_of_2(cpp)))
@@ -258,15 +216,9 @@ static int udl_handle_damage(struct drm_framebuffer *fb,
return ret;
log_bpp = ret;
- ret = drm_gem_fb_begin_cpu_access(fb, DMA_FROM_DEVICE);
- if (ret)
- return ret;
-
urb = udl_get_urb(dev);
- if (!urb) {
- ret = -ENOMEM;
- goto out_drm_gem_fb_end_cpu_access;
- }
+ if (!urb)
+ return -ENOMEM;
cmd = urb->transfer_buffer;
for (i = clip->y1; i < clip->y2; i++) {
@@ -278,145 +230,339 @@ static int udl_handle_damage(struct drm_framebuffer *fb,
&cmd, byte_offset, dev_byte_offset,
byte_width);
if (ret)
- goto out_drm_gem_fb_end_cpu_access;
+ return ret;
}
if (cmd > (char *)urb->transfer_buffer) {
/* Send partial buffer remaining before exiting */
int len;
if (cmd < (char *)urb->transfer_buffer + urb->transfer_buffer_length)
- *cmd++ = 0xAF;
+ *cmd++ = UDL_MSG_BULK;
len = cmd - (char *)urb->transfer_buffer;
ret = udl_submit_urb(dev, urb, len);
} else {
udl_urb_completion(urb);
}
- ret = 0;
-
-out_drm_gem_fb_end_cpu_access:
- drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE);
- return ret;
+ return 0;
}
/*
- * Simple display pipeline
+ * Primary plane
*/
-static const uint32_t udl_simple_display_pipe_formats[] = {
+static const uint32_t udl_primary_plane_formats[] = {
DRM_FORMAT_RGB565,
DRM_FORMAT_XRGB8888,
};
-static enum drm_mode_status
-udl_simple_display_pipe_mode_valid(struct drm_simple_display_pipe *pipe,
- const struct drm_display_mode *mode)
+static const uint64_t udl_primary_plane_fmtmods[] = {
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_FORMAT_MOD_INVALID
+};
+
+static void udl_primary_plane_helper_atomic_update(struct drm_plane *plane,
+ struct drm_atomic_state *state)
{
- return MODE_OK;
+ struct drm_device *dev = plane->dev;
+ struct drm_plane_state *plane_state = drm_atomic_get_new_plane_state(state, plane);
+ struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state);
+ struct drm_framebuffer *fb = plane_state->fb;
+ struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(state, plane);
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_rect damage;
+ int ret, idx;
+
+ if (!fb)
+ return; /* no framebuffer; plane is disabled */
+
+ ret = drm_gem_fb_begin_cpu_access(fb, DMA_FROM_DEVICE);
+ if (ret)
+ return;
+
+ if (!drm_dev_enter(dev, &idx))
+ goto out_drm_gem_fb_end_cpu_access;
+
+ drm_atomic_helper_damage_iter_init(&iter, old_plane_state, plane_state);
+ drm_atomic_for_each_plane_damage(&iter, &damage) {
+ udl_handle_damage(fb, &shadow_plane_state->data[0], &damage);
+ }
+
+ drm_dev_exit(idx);
+
+out_drm_gem_fb_end_cpu_access:
+ drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE);
+}
+
+static const struct drm_plane_helper_funcs udl_primary_plane_helper_funcs = {
+ DRM_GEM_SHADOW_PLANE_HELPER_FUNCS,
+ .atomic_check = drm_plane_helper_atomic_check,
+ .atomic_update = udl_primary_plane_helper_atomic_update,
+};
+
+static const struct drm_plane_funcs udl_primary_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = drm_plane_cleanup,
+ DRM_GEM_SHADOW_PLANE_FUNCS,
+};
+
+/*
+ * CRTC
+ */
+
+static int udl_crtc_helper_atomic_check(struct drm_crtc *crtc, struct drm_atomic_state *state)
+{
+ struct drm_crtc_state *new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
+
+ if (!new_crtc_state->enable)
+ return 0;
+
+ return drm_atomic_helper_check_crtc_primary_plane(new_crtc_state);
}
-static void
-udl_simple_display_pipe_enable(struct drm_simple_display_pipe *pipe,
- struct drm_crtc_state *crtc_state,
- struct drm_plane_state *plane_state)
+static void udl_crtc_helper_atomic_enable(struct drm_crtc *crtc, struct drm_atomic_state *state)
{
- struct drm_crtc *crtc = &pipe->crtc;
struct drm_device *dev = crtc->dev;
- struct drm_framebuffer *fb = plane_state->fb;
- struct udl_device *udl = to_udl(dev);
+ struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
struct drm_display_mode *mode = &crtc_state->mode;
- struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state);
- struct drm_rect clip = DRM_RECT_INIT(0, 0, fb->width, fb->height);
+ struct urb *urb;
char *buf;
- char *wrptr;
- int color_depth = UDL_COLOR_DEPTH_16BPP;
+ int idx;
- buf = (char *)udl->mode_buf;
+ if (!drm_dev_enter(dev, &idx))
+ return;
- /* This first section has to do with setting the base address on the
- * controller associated with the display. There are 2 base
- * pointers, currently, we only use the 16 bpp segment.
- */
- wrptr = udl_vidreg_lock(buf);
- wrptr = udl_set_color_depth(wrptr, color_depth);
+ urb = udl_get_urb(dev);
+ if (!urb)
+ goto out;
+
+ buf = (char *)urb->transfer_buffer;
+ buf = udl_vidreg_lock(buf);
+ buf = udl_set_color_depth(buf, UDL_COLORDEPTH_16BPP);
/* set base for 16bpp segment to 0 */
- wrptr = udl_set_base16bpp(wrptr, 0);
+ buf = udl_set_base16bpp(buf, 0);
/* set base for 8bpp segment to end of fb */
- wrptr = udl_set_base8bpp(wrptr, 2 * mode->vdisplay * mode->hdisplay);
-
- wrptr = udl_set_vid_cmds(wrptr, mode);
- wrptr = udl_set_blank_mode(wrptr, UDL_BLANK_MODE_ON);
- wrptr = udl_vidreg_unlock(wrptr);
-
- wrptr = udl_dummy_render(wrptr);
-
- udl->mode_buf_len = wrptr - buf;
+ buf = udl_set_base8bpp(buf, 2 * mode->vdisplay * mode->hdisplay);
+ buf = udl_set_display_mode(buf, mode);
+ buf = udl_set_blank_mode(buf, UDL_BLANKMODE_ON);
+ buf = udl_vidreg_unlock(buf);
+ buf = udl_dummy_render(buf);
- udl_handle_damage(fb, &shadow_plane_state->data[0], &clip);
+ udl_submit_urb(dev, urb, buf - (char *)urb->transfer_buffer);
- /* enable display */
- udl_crtc_write_mode_to_hw(crtc);
+out:
+ drm_dev_exit(idx);
}
-static void
-udl_simple_display_pipe_disable(struct drm_simple_display_pipe *pipe)
+static void udl_crtc_helper_atomic_disable(struct drm_crtc *crtc, struct drm_atomic_state *state)
{
- struct drm_crtc *crtc = &pipe->crtc;
struct drm_device *dev = crtc->dev;
struct urb *urb;
char *buf;
+ int idx;
+
+ if (!drm_dev_enter(dev, &idx))
+ return;
urb = udl_get_urb(dev);
if (!urb)
- return;
+ goto out;
buf = (char *)urb->transfer_buffer;
buf = udl_vidreg_lock(buf);
- buf = udl_set_blank_mode(buf, UDL_BLANK_MODE_POWERDOWN);
+ buf = udl_set_blank_mode(buf, UDL_BLANKMODE_POWERDOWN);
buf = udl_vidreg_unlock(buf);
buf = udl_dummy_render(buf);
udl_submit_urb(dev, urb, buf - (char *)urb->transfer_buffer);
+
+out:
+ drm_dev_exit(idx);
+}
+
+static const struct drm_crtc_helper_funcs udl_crtc_helper_funcs = {
+ .atomic_check = udl_crtc_helper_atomic_check,
+ .atomic_enable = udl_crtc_helper_atomic_enable,
+ .atomic_disable = udl_crtc_helper_atomic_disable,
+};
+
+static const struct drm_crtc_funcs udl_crtc_funcs = {
+ .reset = drm_atomic_helper_crtc_reset,
+ .destroy = drm_crtc_cleanup,
+ .set_config = drm_atomic_helper_set_config,
+ .page_flip = drm_atomic_helper_page_flip,
+ .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+};
+
+/*
+ * Encoder
+ */
+
+static const struct drm_encoder_funcs udl_encoder_funcs = {
+ .destroy = drm_encoder_cleanup,
+};
+
+/*
+ * Connector
+ */
+
+static int udl_connector_helper_get_modes(struct drm_connector *connector)
+{
+ struct udl_connector *udl_connector = to_udl_connector(connector);
+
+ drm_connector_update_edid_property(connector, udl_connector->edid);
+ if (udl_connector->edid)
+ return drm_add_edid_modes(connector, udl_connector->edid);
+
+ return 0;
}
-static void
-udl_simple_display_pipe_update(struct drm_simple_display_pipe *pipe,
- struct drm_plane_state *old_plane_state)
+static const struct drm_connector_helper_funcs udl_connector_helper_funcs = {
+ .get_modes = udl_connector_helper_get_modes,
+};
+
+static int udl_get_edid_block(void *data, u8 *buf, unsigned int block, size_t len)
{
- struct drm_plane_state *state = pipe->plane.state;
- struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(state);
- struct drm_framebuffer *fb = state->fb;
- struct drm_rect rect;
+ struct udl_device *udl = data;
+ struct drm_device *dev = &udl->drm;
+ struct usb_device *udev = udl_to_usb_device(udl);
+ u8 *read_buff;
+ int ret;
+ size_t i;
- if (!fb)
- return;
+ read_buff = kmalloc(2, GFP_KERNEL);
+ if (!read_buff)
+ return -ENOMEM;
+
+ for (i = 0; i < len; i++) {
+ int bval = (i + block * EDID_LENGTH) << 8;
+
+ ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
+ 0x02, (0x80 | (0x02 << 5)), bval,
+ 0xA1, read_buff, 2, USB_CTRL_GET_TIMEOUT);
+ if (ret < 0) {
+ drm_err(dev, "Read EDID byte %zu failed err %x\n", i, ret);
+ goto err_kfree;
+ } else if (ret < 1) {
+ ret = -EIO;
+ drm_err(dev, "Read EDID byte %zu failed\n", i);
+ goto err_kfree;
+ }
+
+ buf[i] = read_buff[1];
+ }
+
+ kfree(read_buff);
+
+ return 0;
+
+err_kfree:
+ kfree(read_buff);
+ return ret;
+}
+
+static enum drm_connector_status udl_connector_detect(struct drm_connector *connector, bool force)
+{
+ struct drm_device *dev = connector->dev;
+ struct udl_device *udl = to_udl(dev);
+ struct udl_connector *udl_connector = to_udl_connector(connector);
+ enum drm_connector_status status = connector_status_disconnected;
+ int idx;
+
+ /* cleanup previous EDID */
+ kfree(udl_connector->edid);
+ udl_connector->edid = NULL;
+
+ if (!drm_dev_enter(dev, &idx))
+ return connector_status_disconnected;
+
+ udl_connector->edid = drm_do_get_edid(connector, udl_get_edid_block, udl);
+ if (udl_connector->edid)
+ status = connector_status_connected;
+
+ drm_dev_exit(idx);
+
+ return status;
+}
- if (drm_atomic_helper_damage_merged(old_plane_state, state, &rect))
- udl_handle_damage(fb, &shadow_plane_state->data[0], &rect);
+static void udl_connector_destroy(struct drm_connector *connector)
+{
+ struct udl_connector *udl_connector = to_udl_connector(connector);
+
+ drm_connector_cleanup(connector);
+ kfree(udl_connector->edid);
+ kfree(udl_connector);
}
-static const struct drm_simple_display_pipe_funcs udl_simple_display_pipe_funcs = {
- .mode_valid = udl_simple_display_pipe_mode_valid,
- .enable = udl_simple_display_pipe_enable,
- .disable = udl_simple_display_pipe_disable,
- .update = udl_simple_display_pipe_update,
- DRM_GEM_SIMPLE_DISPLAY_PIPE_SHADOW_PLANE_FUNCS,
+static const struct drm_connector_funcs udl_connector_funcs = {
+ .reset = drm_atomic_helper_connector_reset,
+ .detect = udl_connector_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = udl_connector_destroy,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
+struct drm_connector *udl_connector_init(struct drm_device *dev)
+{
+ struct udl_connector *udl_connector;
+ struct drm_connector *connector;
+ int ret;
+
+ udl_connector = kzalloc(sizeof(*udl_connector), GFP_KERNEL);
+ if (!udl_connector)
+ return ERR_PTR(-ENOMEM);
+
+ connector = &udl_connector->connector;
+ ret = drm_connector_init(dev, connector, &udl_connector_funcs, DRM_MODE_CONNECTOR_VGA);
+ if (ret)
+ goto err_kfree;
+
+ drm_connector_helper_add(connector, &udl_connector_helper_funcs);
+
+ connector->polled = DRM_CONNECTOR_POLL_HPD |
+ DRM_CONNECTOR_POLL_CONNECT |
+ DRM_CONNECTOR_POLL_DISCONNECT;
+
+ return connector;
+
+err_kfree:
+ kfree(udl_connector);
+ return ERR_PTR(ret);
+}
+
/*
* Modesetting
*/
-static const struct drm_mode_config_funcs udl_mode_funcs = {
+static enum drm_mode_status udl_mode_config_mode_valid(struct drm_device *dev,
+ const struct drm_display_mode *mode)
+{
+ struct udl_device *udl = to_udl(dev);
+
+ if (udl->sku_pixel_limit) {
+ if (mode->vdisplay * mode->hdisplay > udl->sku_pixel_limit)
+ return MODE_MEM;
+ }
+
+ return MODE_OK;
+}
+
+static const struct drm_mode_config_funcs udl_mode_config_funcs = {
.fb_create = drm_gem_fb_create_with_dirty,
+ .mode_valid = udl_mode_config_mode_valid,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = drm_atomic_helper_commit,
};
int udl_modeset_init(struct drm_device *dev)
{
- size_t format_count = ARRAY_SIZE(udl_simple_display_pipe_formats);
struct udl_device *udl = to_udl(dev);
+ struct drm_plane *primary_plane;
+ struct drm_crtc *crtc;
+ struct drm_encoder *encoder;
struct drm_connector *connector;
int ret;
@@ -426,28 +572,42 @@ int udl_modeset_init(struct drm_device *dev)
dev->mode_config.min_width = 640;
dev->mode_config.min_height = 480;
-
dev->mode_config.max_width = 2048;
dev->mode_config.max_height = 2048;
-
- dev->mode_config.prefer_shadow = 0;
dev->mode_config.preferred_depth = 16;
+ dev->mode_config.funcs = &udl_mode_config_funcs;
+
+ primary_plane = &udl->primary_plane;
+ ret = drm_universal_plane_init(dev, primary_plane, 0,
+ &udl_primary_plane_funcs,
+ udl_primary_plane_formats,
+ ARRAY_SIZE(udl_primary_plane_formats),
+ udl_primary_plane_fmtmods,
+ DRM_PLANE_TYPE_PRIMARY, NULL);
+ if (ret)
+ return ret;
+ drm_plane_helper_add(primary_plane, &udl_primary_plane_helper_funcs);
+ drm_plane_enable_fb_damage_clips(primary_plane);
- dev->mode_config.funcs = &udl_mode_funcs;
+ crtc = &udl->crtc;
+ ret = drm_crtc_init_with_planes(dev, crtc, primary_plane, NULL,
+ &udl_crtc_funcs, NULL);
+ if (ret)
+ return ret;
+ drm_crtc_helper_add(crtc, &udl_crtc_helper_funcs);
+
+ encoder = &udl->encoder;
+ ret = drm_encoder_init(dev, encoder, &udl_encoder_funcs, DRM_MODE_ENCODER_DAC, NULL);
+ if (ret)
+ return ret;
+ encoder->possible_crtcs = drm_crtc_mask(crtc);
connector = udl_connector_init(dev);
if (IS_ERR(connector))
return PTR_ERR(connector);
-
- format_count = ARRAY_SIZE(udl_simple_display_pipe_formats);
-
- ret = drm_simple_display_pipe_init(dev, &udl->display_pipe,
- &udl_simple_display_pipe_funcs,
- udl_simple_display_pipe_formats,
- format_count, NULL, connector);
+ ret = drm_connector_attach_encoder(connector, encoder);
if (ret)
return ret;
- drm_plane_enable_fb_damage_clips(&udl->display_pipe.plane);
drm_mode_config_reset(dev);
diff --git a/drivers/gpu/drm/udl/udl_proto.h b/drivers/gpu/drm/udl/udl_proto.h
new file mode 100644
index 000000000000..c92d2109584c
--- /dev/null
+++ b/drivers/gpu/drm/udl/udl_proto.h
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef UDL_PROTO_H
+#define UDL_PROTO_H
+
+#include <linux/bits.h>
+
+#define UDL_MSG_BULK 0xaf
+
+/* Register access */
+#define UDL_CMD_WRITEREG 0x20 /* See register constants below */
+
+/* Framebuffer access */
+#define UDL_CMD_WRITERAW8 0x60 /* 8 bit raw write command. */
+#define UDL_CMD_WRITERL8 0x61 /* 8 bit run length command. */
+#define UDL_CMD_WRITECOPY8 0x62 /* 8 bit copy command. */
+#define UDL_CMD_WRITERLX8 0x63 /* 8 bit extended run length command. */
+#define UDL_CMD_WRITERAW16 0x68 /* 16 bit raw write command. */
+#define UDL_CMD_WRITERL16 0x69 /* 16 bit run length command. */
+#define UDL_CMD_WRITECOPY16 0x6a /* 16 bit copy command. */
+#define UDL_CMD_WRITERLX16 0x6b /* 16 bit extended run length command. */
+
+/* Color depth */
+#define UDL_REG_COLORDEPTH 0x00
+#define UDL_COLORDEPTH_16BPP 0
+#define UDL_COLORDEPTH_24BPP 1
+
+/* Display-mode settings */
+#define UDL_REG_XDISPLAYSTART 0x01
+#define UDL_REG_XDISPLAYEND 0x03
+#define UDL_REG_YDISPLAYSTART 0x05
+#define UDL_REG_YDISPLAYEND 0x07
+#define UDL_REG_XENDCOUNT 0x09
+#define UDL_REG_HSYNCSTART 0x0b
+#define UDL_REG_HSYNCEND 0x0d
+#define UDL_REG_HPIXELS 0x0f
+#define UDL_REG_YENDCOUNT 0x11
+#define UDL_REG_VSYNCSTART 0x13
+#define UDL_REG_VSYNCEND 0x15
+#define UDL_REG_VPIXELS 0x17
+#define UDL_REG_PIXELCLOCK5KHZ 0x1b
+
+/* On/Off for driving the DisplayLink framebuffer to the display */
+#define UDL_REG_BLANKMODE 0x1f
+#define UDL_BLANKMODE_ON 0x00 /* hsync and vsync on, visible */
+#define UDL_BLANKMODE_BLANKED 0x01 /* hsync and vsync on, blanked */
+#define UDL_BLANKMODE_VSYNC_OFF 0x03 /* vsync off, blanked */
+#define UDL_BLANKMODE_HSYNC_OFF 0x05 /* hsync off, blanked */
+#define UDL_BLANKMODE_POWERDOWN 0x07 /* powered off; requires modeset */
+
+/* Framebuffer address */
+#define UDL_REG_BASE16BPP_ADDR2 0x20
+#define UDL_REG_BASE16BPP_ADDR1 0x21
+#define UDL_REG_BASE16BPP_ADDR0 0x22
+#define UDL_REG_BASE8BPP_ADDR2 0x26
+#define UDL_REG_BASE8BPP_ADDR1 0x27
+#define UDL_REG_BASE8BPP_ADDR0 0x28
+
+#define UDL_BASE_ADDR0_MASK GENMASK(7, 0)
+#define UDL_BASE_ADDR1_MASK GENMASK(15, 8)
+#define UDL_BASE_ADDR2_MASK GENMASK(23, 16)
+
+/* Lock/unlock video registers */
+#define UDL_REG_VIDREG 0xff
+#define UDL_VIDREG_LOCK 0x00
+#define UDL_VIDREG_UNLOCK 0xff
+
+#endif
diff --git a/drivers/gpu/drm/udl/udl_transfer.c b/drivers/gpu/drm/udl/udl_transfer.c
index b57844632dbd..5ff1037a3453 100644
--- a/drivers/gpu/drm/udl/udl_transfer.c
+++ b/drivers/gpu/drm/udl/udl_transfer.c
@@ -10,6 +10,7 @@
#include <asm/unaligned.h>
#include "udl_drv.h"
+#include "udl_proto.h"
#define MAX_CMD_PIXELS 255
@@ -89,8 +90,8 @@ static void udl_compress_hline16(
const u8 *cmd_pixel_start, *cmd_pixel_end = NULL;
uint16_t pixel_val16;
- *cmd++ = 0xaf;
- *cmd++ = 0x6b;
+ *cmd++ = UDL_MSG_BULK;
+ *cmd++ = UDL_CMD_WRITERLX16;
*cmd++ = (uint8_t) ((dev_addr >> 16) & 0xFF);
*cmd++ = (uint8_t) ((dev_addr >> 8) & 0xFF);
*cmd++ = (uint8_t) ((dev_addr) & 0xFF);
@@ -152,7 +153,7 @@ static void udl_compress_hline16(
if (cmd_buffer_end <= MIN_RLX_CMD_BYTES + cmd) {
/* Fill leftover bytes with no-ops */
if (cmd_buffer_end > cmd)
- memset(cmd, 0xAF, cmd_buffer_end - cmd);
+ memset(cmd, UDL_MSG_BULK, cmd_buffer_end - cmd);
cmd = (uint8_t *) cmd_buffer_end;
}
diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c
index 231add8b8e12..43d9b3a6a352 100644
--- a/drivers/gpu/drm/vc4/vc4_bo.c
+++ b/drivers/gpu/drm/vc4/vc4_bo.c
@@ -736,12 +736,12 @@ static int vc4_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct
struct vc4_bo *bo = to_vc4_bo(obj);
if (bo->validated_shader && (vma->vm_flags & VM_WRITE)) {
- DRM_DEBUG("mmaping of shader BOs for writing not allowed.\n");
+ DRM_DEBUG("mmapping of shader BOs for writing not allowed.\n");
return -EINVAL;
}
if (bo->madv != VC4_MADV_WILLNEED) {
- DRM_DEBUG("mmaping of %s BO not allowed\n",
+ DRM_DEBUG("mmapping of %s BO not allowed\n",
bo->madv == VC4_MADV_DONTNEED ?
"purgeable" : "purged");
return -EINVAL;
diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c
index ffbbb454c9e8..2027063fdc30 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.c
+++ b/drivers/gpu/drm/vc4/vc4_drv.c
@@ -490,6 +490,7 @@ module_init(vc4_drm_register);
module_exit(vc4_drm_unregister);
MODULE_ALIAS("platform:vc4-drm");
+MODULE_SOFTDEP("pre: snd-soc-hdmi-codec");
MODULE_DESCRIPTION("Broadcom VC4 DRM Driver");
MODULE_AUTHOR("Eric Anholt <eric@anholt.net>");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h
index 418a8242691f..515228682e8e 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.h
+++ b/drivers/gpu/drm/vc4/vc4_drv.h
@@ -326,6 +326,8 @@ struct vc4_hvs {
struct clk *core_clk;
+ unsigned long max_core_rate;
+
/* Memory manager for CRTCs to allocate space in the display
* list. Units are dwords.
*/
@@ -337,6 +339,20 @@ struct vc4_hvs {
struct drm_mm_node mitchell_netravali_filter;
struct debugfs_regset32 regset;
+
+ /*
+ * Even if HDMI0 on the RPi4 can output modes requiring a pixel
+ * rate higher than 297MHz, it needs some adjustments in the
+ * config.txt file to be able to do so and thus won't always be
+ * available.
+ */
+ bool vc5_hdmi_enable_hdmi_20;
+
+ /*
+ * 4096x2160@60 requires a core overclock to work, so register
+ * whether that is sufficient.
+ */
+ bool vc5_hdmi_enable_4096by2160;
};
struct vc4_plane {
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
index 64f9feabf43e..0d78c800ed51 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
@@ -459,6 +459,7 @@ static int vc4_hdmi_connector_detect_ctx(struct drm_connector *connector,
static int vc4_hdmi_connector_get_modes(struct drm_connector *connector)
{
struct vc4_hdmi *vc4_hdmi = connector_to_vc4_hdmi(connector);
+ struct vc4_dev *vc4 = to_vc4_dev(connector->dev);
int ret = 0;
struct edid *edid;
@@ -482,7 +483,7 @@ static int vc4_hdmi_connector_get_modes(struct drm_connector *connector)
ret = drm_add_edid_modes(connector, edid);
kfree(edid);
- if (vc4_hdmi->disable_4kp60) {
+ if (!vc4->hvs->vc5_hdmi_enable_hdmi_20) {
struct drm_device *drm = connector->dev;
const struct drm_display_mode *mode;
@@ -542,7 +543,7 @@ static void vc4_hdmi_connector_reset(struct drm_connector *connector)
new_state->base.max_bpc = 8;
new_state->base.max_requested_bpc = 8;
new_state->output_format = VC4_HDMI_OUTPUT_RGB;
- drm_atomic_helper_connector_tv_reset(connector);
+ drm_atomic_helper_connector_tv_margins_reset(connector);
}
static struct drm_connector_state *
@@ -1752,15 +1753,23 @@ vc4_hdmi_sink_supports_format_bpc(const struct vc4_hdmi *vc4_hdmi,
static enum drm_mode_status
vc4_hdmi_encoder_clock_valid(const struct vc4_hdmi *vc4_hdmi,
+ const struct drm_display_mode *mode,
unsigned long long clock)
{
const struct drm_connector *connector = &vc4_hdmi->connector;
const struct drm_display_info *info = &connector->display_info;
+ struct vc4_dev *vc4 = to_vc4_dev(connector->dev);
if (clock > vc4_hdmi->variant->max_pixel_clock)
return MODE_CLOCK_HIGH;
- if (vc4_hdmi->disable_4kp60 && clock > HDMI_14_MAX_TMDS_CLK)
+ if (!vc4->hvs->vc5_hdmi_enable_hdmi_20 && clock > HDMI_14_MAX_TMDS_CLK)
+ return MODE_CLOCK_HIGH;
+
+ /* 4096x2160@60 is not reliable without overclocking core */
+ if (!vc4->hvs->vc5_hdmi_enable_4096by2160 &&
+ mode->hdisplay > 3840 && mode->vdisplay >= 2160 &&
+ drm_mode_vrefresh(mode) >= 50)
return MODE_CLOCK_HIGH;
if (info->max_tmds_clock && clock > (info->max_tmds_clock * 1000))
@@ -1797,7 +1806,7 @@ vc4_hdmi_encoder_compute_clock(const struct vc4_hdmi *vc4_hdmi,
unsigned long long clock;
clock = vc4_hdmi_encoder_compute_mode_clock(mode, bpc, fmt);
- if (vc4_hdmi_encoder_clock_valid(vc4_hdmi, clock) != MODE_OK)
+ if (vc4_hdmi_encoder_clock_valid(vc4_hdmi, mode, clock) != MODE_OK)
return -EINVAL;
vc4_state->tmds_char_rate = clock;
@@ -1960,7 +1969,7 @@ vc4_hdmi_encoder_mode_valid(struct drm_encoder *encoder,
(mode->hsync_end % 2) || (mode->htotal % 2)))
return MODE_H_ILLEGAL;
- return vc4_hdmi_encoder_clock_valid(vc4_hdmi, mode->clock * 1000);
+ return vc4_hdmi_encoder_clock_valid(vc4_hdmi, mode, mode->clock * 1000);
}
static const struct drm_encoder_helper_funcs vc4_hdmi_encoder_helper_funcs = {
@@ -3318,12 +3327,37 @@ static int vc4_hdmi_runtime_resume(struct device *dev)
struct vc4_hdmi *vc4_hdmi = dev_get_drvdata(dev);
unsigned long __maybe_unused flags;
u32 __maybe_unused value;
+ unsigned long rate;
int ret;
+ /*
+ * The HSM clock is in the HDMI power domain, so we need to set
+ * its frequency while the power domain is active so that it
+ * keeps its rate.
+ */
+ ret = clk_set_min_rate(vc4_hdmi->hsm_clock, HSM_MIN_CLOCK_FREQ);
+ if (ret)
+ return ret;
+
ret = clk_prepare_enable(vc4_hdmi->hsm_clock);
if (ret)
return ret;
+ /*
+ * Whenever the RaspberryPi boots without an HDMI monitor
+ * plugged in, the firmware won't have initialized the HSM clock
+ * rate and it will be reported as 0.
+ *
+ * If we try to access a register of the controller in such a
+ * case, it will lead to a silent CPU stall. Let's make sure we
+ * prevent such a case.
+ */
+ rate = clk_get_rate(vc4_hdmi->hsm_clock);
+ if (!rate) {
+ ret = -EINVAL;
+ goto err_disable_clk;
+ }
+
if (vc4_hdmi->variant->reset)
vc4_hdmi->variant->reset(vc4_hdmi);
@@ -3345,6 +3379,10 @@ static int vc4_hdmi_runtime_resume(struct device *dev)
#endif
return 0;
+
+err_disable_clk:
+ clk_disable_unprepare(vc4_hdmi->hsm_clock);
+ return ret;
}
static void vc4_hdmi_put_ddc_device(void *ptr)
@@ -3427,14 +3465,6 @@ static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data)
vc4_hdmi->disable_wifi_frequencies =
of_property_read_bool(dev->of_node, "wifi-2.4ghz-coexistence");
- if (variant->max_pixel_clock == 600000000) {
- struct vc4_dev *vc4 = to_vc4_dev(drm);
- long max_rate = clk_round_rate(vc4->hvs->core_clk, 550000000);
-
- if (max_rate < 550000000)
- vc4_hdmi->disable_4kp60 = true;
- }
-
ret = devm_pm_runtime_enable(dev);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.h b/drivers/gpu/drm/vc4/vc4_hdmi.h
index db823efb2563..e3619836ca17 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.h
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.h
@@ -156,14 +156,6 @@ struct vc4_hdmi {
*/
bool disable_wifi_frequencies;
- /*
- * Even if HDMI0 on the RPi4 can output modes requiring a pixel
- * rate higher than 297MHz, it needs some adjustments in the
- * config.txt file to be able to do so and thus won't always be
- * available.
- */
- bool disable_4kp60;
-
struct cec_adapter *cec_adap;
struct cec_msg cec_rx_msg;
bool cec_tx_ok;
diff --git a/drivers/gpu/drm/vc4/vc4_hvs.c b/drivers/gpu/drm/vc4/vc4_hvs.c
index 4ac9f5a2d5f9..c4453a5ae163 100644
--- a/drivers/gpu/drm/vc4/vc4_hvs.c
+++ b/drivers/gpu/drm/vc4/vc4_hvs.c
@@ -28,6 +28,8 @@
#include <drm/drm_drv.h>
#include <drm/drm_vblank.h>
+#include <soc/bcm2835/raspberrypi-firmware.h>
+
#include "vc4_drv.h"
#include "vc4_regs.h"
@@ -791,12 +793,36 @@ static int vc4_hvs_bind(struct device *dev, struct device *master, void *data)
hvs->regset.nregs = ARRAY_SIZE(hvs_regs);
if (vc4->is_vc5) {
+ struct rpi_firmware *firmware;
+ struct device_node *node;
+ unsigned int max_rate;
+
+ node = rpi_firmware_find_node();
+ if (!node)
+ return -EINVAL;
+
+ firmware = rpi_firmware_get(node);
+ of_node_put(node);
+ if (!firmware)
+ return -EPROBE_DEFER;
+
hvs->core_clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(hvs->core_clk)) {
dev_err(&pdev->dev, "Couldn't get core clock\n");
return PTR_ERR(hvs->core_clk);
}
+ max_rate = rpi_firmware_clk_get_max_rate(firmware,
+ RPI_FIRMWARE_CORE_CLK_ID);
+ rpi_firmware_put(firmware);
+ if (max_rate >= 550000000)
+ hvs->vc5_hdmi_enable_hdmi_20 = true;
+
+ if (max_rate >= 600000000)
+ hvs->vc5_hdmi_enable_4096by2160 = true;
+
+ hvs->max_core_rate = max_rate;
+
ret = clk_prepare_enable(hvs->core_clk);
if (ret) {
dev_err(&pdev->dev, "Couldn't enable the core clock\n");
diff --git a/drivers/gpu/drm/vc4/vc4_kms.c b/drivers/gpu/drm/vc4/vc4_kms.c
index 4419e810103d..5c97642ed66a 100644
--- a/drivers/gpu/drm/vc4/vc4_kms.c
+++ b/drivers/gpu/drm/vc4/vc4_kms.c
@@ -396,8 +396,8 @@ static void vc4_atomic_commit_tail(struct drm_atomic_state *state)
if (vc4->is_vc5) {
unsigned long state_rate = max(old_hvs_state->core_clock_rate,
new_hvs_state->core_clock_rate);
- unsigned long core_rate = max_t(unsigned long,
- 500000000, state_rate);
+ unsigned long core_rate = clamp_t(unsigned long, state_rate,
+ 500000000, hvs->max_core_rate);
drm_dbg(dev, "Raising the core clock at %lu Hz\n", core_rate);
@@ -431,14 +431,17 @@ static void vc4_atomic_commit_tail(struct drm_atomic_state *state)
drm_atomic_helper_cleanup_planes(dev, state);
if (vc4->is_vc5) {
- drm_dbg(dev, "Running the core clock at %lu Hz\n",
- new_hvs_state->core_clock_rate);
+ unsigned long core_rate = min_t(unsigned long,
+ hvs->max_core_rate,
+ new_hvs_state->core_clock_rate);
+
+ drm_dbg(dev, "Running the core clock at %lu Hz\n", core_rate);
/*
* Request a clock rate based on the current HVS
* requirements.
*/
- WARN_ON(clk_set_min_rate(hvs->core_clk, new_hvs_state->core_clock_rate));
+ WARN_ON(clk_set_min_rate(hvs->core_clk, core_rate));
drm_dbg(dev, "Core clock actual rate: %lu Hz\n",
clk_get_rate(hvs->core_clk));
diff --git a/drivers/gpu/drm/vc4/vc4_vec.c b/drivers/gpu/drm/vc4/vc4_vec.c
index 0b3333865702..92c07e31d632 100644
--- a/drivers/gpu/drm/vc4/vc4_vec.c
+++ b/drivers/gpu/drm/vc4/vc4_vec.c
@@ -69,6 +69,7 @@
#define VEC_CONFIG0_STD_MASK GENMASK(1, 0)
#define VEC_CONFIG0_NTSC_STD 0
#define VEC_CONFIG0_PAL_BDGHI_STD 1
+#define VEC_CONFIG0_PAL_M_STD 2
#define VEC_CONFIG0_PAL_N_STD 3
#define VEC_SCHPH 0x108
@@ -255,10 +256,9 @@ static const struct vc4_vec_tv_mode vc4_vec_tv_modes[] = {
.config1 = VEC_CONFIG1_C_CVBS_CVBS,
},
[VC4_VEC_TV_MODE_PAL_M] = {
- .mode = &pal_mode,
- .config0 = VEC_CONFIG0_PAL_BDGHI_STD,
- .config1 = VEC_CONFIG1_C_CVBS_CVBS | VEC_CONFIG1_CUSTOM_FREQ,
- .custom_freq = 0x223b61d1,
+ .mode = &ntsc_mode,
+ .config0 = VEC_CONFIG0_PAL_M_STD,
+ .config1 = VEC_CONFIG1_C_CVBS_CVBS,
},
};
diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
index 3b1701607aae..5d05093014ac 100644
--- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c
+++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
@@ -47,7 +47,7 @@ static int virtio_gpu_fence_event_create(struct drm_device *dev,
struct virtio_gpu_fence_event *e = NULL;
int ret;
- if (!(vfpriv->ring_idx_mask & (1 << ring_idx)))
+ if (!(vfpriv->ring_idx_mask & BIT_ULL(ring_idx)))
return 0;
e = kzalloc(sizeof(*e), GFP_KERNEL);
diff --git a/drivers/gpu/drm/vmwgfx/Kconfig b/drivers/gpu/drm/vmwgfx/Kconfig
index a4fabe208d9f..faddae3d6ac2 100644
--- a/drivers/gpu/drm/vmwgfx/Kconfig
+++ b/drivers/gpu/drm/vmwgfx/Kconfig
@@ -16,13 +16,6 @@ config DRM_VMWGFX
virtual hardware.
The compiled module will be called "vmwgfx.ko".
-config DRM_VMWGFX_FBCON
- depends on DRM_VMWGFX && DRM_FBDEV_EMULATION
- bool "Enable framebuffer console under vmwgfx by default"
- help
- Choose this option if you are shipping a new vmwgfx
- userspace driver that supports using the kernel driver.
-
config DRM_VMWGFX_MKSSTATS
bool "Enable mksGuestStats instrumentation of vmwgfx by default"
depends on DRM_VMWGFX
diff --git a/drivers/gpu/drm/vmwgfx/Makefile b/drivers/gpu/drm/vmwgfx/Makefile
index eee73b9aa404..2a644f035597 100644
--- a/drivers/gpu/drm/vmwgfx/Makefile
+++ b/drivers/gpu/drm/vmwgfx/Makefile
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
-vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_hashtab.o vmwgfx_kms.o vmwgfx_drv.o \
+vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \
vmwgfx_ioctl.o vmwgfx_resource.o vmwgfx_ttm_buffer.o \
vmwgfx_cmd.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \
vmwgfx_overlay.o vmwgfx_gmrid_manager.o vmwgfx_fence.o \
@@ -12,6 +12,4 @@ vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_hashtab.o vmwgfx_kms.o vmwgfx_d
vmwgfx_devcaps.o ttm_object.o vmwgfx_system_manager.o \
vmwgfx_gem.o
-vmwgfx-$(CONFIG_DRM_FBDEV_EMULATION) += vmwgfx_fb.o
-
obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o
diff --git a/drivers/gpu/drm/vmwgfx/ttm_object.c b/drivers/gpu/drm/vmwgfx/ttm_object.c
index 26a55fef1ab5..932b125ebf3d 100644
--- a/drivers/gpu/drm/vmwgfx/ttm_object.c
+++ b/drivers/gpu/drm/vmwgfx/ttm_object.c
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/**************************************************************************
*
- * Copyright (c) 2009-2013 VMware, Inc., Palo Alto, CA., USA
+ * Copyright (c) 2009-2022 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -44,16 +44,20 @@
#define pr_fmt(fmt) "[TTM] " fmt
+#include "ttm_object.h"
+#include "vmwgfx_drv.h"
+
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <linux/atomic.h>
#include <linux/module.h>
-#include "ttm_object.h"
-#include "vmwgfx_drv.h"
+#include <linux/hashtable.h>
MODULE_IMPORT_NS(DMA_BUF);
+#define VMW_TTM_OBJECT_REF_HT_ORDER 10
+
/**
* struct ttm_object_file
*
@@ -74,16 +78,14 @@ struct ttm_object_file {
struct ttm_object_device *tdev;
spinlock_t lock;
struct list_head ref_list;
- struct vmwgfx_open_hash ref_hash;
+ DECLARE_HASHTABLE(ref_hash, VMW_TTM_OBJECT_REF_HT_ORDER);
struct kref refcount;
};
/*
* struct ttm_object_device
*
- * @object_lock: lock that protects the object_hash hash table.
- *
- * @object_hash: hash table for fast lookup of object global names.
+ * @object_lock: lock that protects idr.
*
* @object_count: Per device object count.
*
@@ -92,7 +94,6 @@ struct ttm_object_file {
struct ttm_object_device {
spinlock_t object_lock;
- struct vmwgfx_open_hash object_hash;
atomic_t object_count;
struct dma_buf_ops ops;
void (*dmabuf_release)(struct dma_buf *dma_buf);
@@ -138,6 +139,36 @@ ttm_object_file_ref(struct ttm_object_file *tfile)
return tfile;
}
+static int ttm_tfile_find_ref_rcu(struct ttm_object_file *tfile,
+ uint64_t key,
+ struct vmwgfx_hash_item **p_hash)
+{
+ struct vmwgfx_hash_item *hash;
+
+ hash_for_each_possible_rcu(tfile->ref_hash, hash, head, key) {
+ if (hash->key == key) {
+ *p_hash = hash;
+ return 0;
+ }
+ }
+ return -EINVAL;
+}
+
+static int ttm_tfile_find_ref(struct ttm_object_file *tfile,
+ uint64_t key,
+ struct vmwgfx_hash_item **p_hash)
+{
+ struct vmwgfx_hash_item *hash;
+
+ hash_for_each_possible(tfile->ref_hash, hash, head, key) {
+ if (hash->key == key) {
+ *p_hash = hash;
+ return 0;
+ }
+ }
+ return -EINVAL;
+}
+
static void ttm_object_file_destroy(struct kref *kref)
{
struct ttm_object_file *tfile =
@@ -240,37 +271,35 @@ void ttm_base_object_unref(struct ttm_base_object **p_base)
* Return: A pointer to the object if successful or NULL otherwise.
*/
struct ttm_base_object *
-ttm_base_object_noref_lookup(struct ttm_object_file *tfile, uint32_t key)
+ttm_base_object_noref_lookup(struct ttm_object_file *tfile, uint64_t key)
{
struct vmwgfx_hash_item *hash;
- struct vmwgfx_open_hash *ht = &tfile->ref_hash;
int ret;
rcu_read_lock();
- ret = vmwgfx_ht_find_item_rcu(ht, key, &hash);
+ ret = ttm_tfile_find_ref_rcu(tfile, key, &hash);
if (ret) {
rcu_read_unlock();
return NULL;
}
__release(RCU);
- return drm_hash_entry(hash, struct ttm_ref_object, hash)->obj;
+ return hlist_entry(hash, struct ttm_ref_object, hash)->obj;
}
EXPORT_SYMBOL(ttm_base_object_noref_lookup);
struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile,
- uint32_t key)
+ uint64_t key)
{
struct ttm_base_object *base = NULL;
struct vmwgfx_hash_item *hash;
- struct vmwgfx_open_hash *ht = &tfile->ref_hash;
int ret;
rcu_read_lock();
- ret = vmwgfx_ht_find_item_rcu(ht, key, &hash);
+ ret = ttm_tfile_find_ref_rcu(tfile, key, &hash);
if (likely(ret == 0)) {
- base = drm_hash_entry(hash, struct ttm_ref_object, hash)->obj;
+ base = hlist_entry(hash, struct ttm_ref_object, hash)->obj;
if (!kref_get_unless_zero(&base->refcount))
base = NULL;
}
@@ -280,7 +309,7 @@ struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile,
}
struct ttm_base_object *
-ttm_base_object_lookup_for_ref(struct ttm_object_device *tdev, uint32_t key)
+ttm_base_object_lookup_for_ref(struct ttm_object_device *tdev, uint64_t key)
{
struct ttm_base_object *base;
@@ -299,7 +328,6 @@ int ttm_ref_object_add(struct ttm_object_file *tfile,
bool *existed,
bool require_existed)
{
- struct vmwgfx_open_hash *ht = &tfile->ref_hash;
struct ttm_ref_object *ref;
struct vmwgfx_hash_item *hash;
int ret = -EINVAL;
@@ -312,10 +340,10 @@ int ttm_ref_object_add(struct ttm_object_file *tfile,
while (ret == -EINVAL) {
rcu_read_lock();
- ret = vmwgfx_ht_find_item_rcu(ht, base->handle, &hash);
+ ret = ttm_tfile_find_ref_rcu(tfile, base->handle, &hash);
if (ret == 0) {
- ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
+ ref = hlist_entry(hash, struct ttm_ref_object, hash);
if (kref_get_unless_zero(&ref->kref)) {
rcu_read_unlock();
break;
@@ -337,21 +365,14 @@ int ttm_ref_object_add(struct ttm_object_file *tfile,
kref_init(&ref->kref);
spin_lock(&tfile->lock);
- ret = vmwgfx_ht_insert_item_rcu(ht, &ref->hash);
-
- if (likely(ret == 0)) {
- list_add_tail(&ref->head, &tfile->ref_list);
- kref_get(&base->refcount);
- spin_unlock(&tfile->lock);
- if (existed != NULL)
- *existed = false;
- break;
- }
+ hash_add_rcu(tfile->ref_hash, &ref->hash.head, ref->hash.key);
+ ret = 0;
+ list_add_tail(&ref->head, &tfile->ref_list);
+ kref_get(&base->refcount);
spin_unlock(&tfile->lock);
- BUG_ON(ret != -EINVAL);
-
- kfree(ref);
+ if (existed != NULL)
+ *existed = false;
}
return ret;
@@ -363,10 +384,8 @@ ttm_ref_object_release(struct kref *kref)
struct ttm_ref_object *ref =
container_of(kref, struct ttm_ref_object, kref);
struct ttm_object_file *tfile = ref->tfile;
- struct vmwgfx_open_hash *ht;
- ht = &tfile->ref_hash;
- (void)vmwgfx_ht_remove_item_rcu(ht, &ref->hash);
+ hash_del_rcu(&ref->hash.head);
list_del(&ref->head);
spin_unlock(&tfile->lock);
@@ -378,18 +397,17 @@ ttm_ref_object_release(struct kref *kref)
int ttm_ref_object_base_unref(struct ttm_object_file *tfile,
unsigned long key)
{
- struct vmwgfx_open_hash *ht = &tfile->ref_hash;
struct ttm_ref_object *ref;
struct vmwgfx_hash_item *hash;
int ret;
spin_lock(&tfile->lock);
- ret = vmwgfx_ht_find_item(ht, key, &hash);
+ ret = ttm_tfile_find_ref(tfile, key, &hash);
if (unlikely(ret != 0)) {
spin_unlock(&tfile->lock);
return -EINVAL;
}
- ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
+ ref = hlist_entry(hash, struct ttm_ref_object, hash);
kref_put(&ref->kref, ttm_ref_object_release);
spin_unlock(&tfile->lock);
return 0;
@@ -416,16 +434,13 @@ void ttm_object_file_release(struct ttm_object_file **p_tfile)
}
spin_unlock(&tfile->lock);
- vmwgfx_ht_remove(&tfile->ref_hash);
ttm_object_file_unref(&tfile);
}
-struct ttm_object_file *ttm_object_file_init(struct ttm_object_device *tdev,
- unsigned int hash_order)
+struct ttm_object_file *ttm_object_file_init(struct ttm_object_device *tdev)
{
struct ttm_object_file *tfile = kmalloc(sizeof(*tfile), GFP_KERNEL);
- int ret;
if (unlikely(tfile == NULL))
return NULL;
@@ -435,34 +450,21 @@ struct ttm_object_file *ttm_object_file_init(struct ttm_object_device *tdev,
kref_init(&tfile->refcount);
INIT_LIST_HEAD(&tfile->ref_list);
- ret = vmwgfx_ht_create(&tfile->ref_hash, hash_order);
- if (ret)
- goto out_err;
+ hash_init(tfile->ref_hash);
return tfile;
-out_err:
- vmwgfx_ht_remove(&tfile->ref_hash);
-
- kfree(tfile);
-
- return NULL;
}
struct ttm_object_device *
-ttm_object_device_init(unsigned int hash_order,
- const struct dma_buf_ops *ops)
+ttm_object_device_init(const struct dma_buf_ops *ops)
{
struct ttm_object_device *tdev = kmalloc(sizeof(*tdev), GFP_KERNEL);
- int ret;
if (unlikely(tdev == NULL))
return NULL;
spin_lock_init(&tdev->object_lock);
atomic_set(&tdev->object_count, 0);
- ret = vmwgfx_ht_create(&tdev->object_hash, hash_order);
- if (ret != 0)
- goto out_no_object_hash;
/*
* Our base is at VMWGFX_NUM_MOB + 1 because we want to create
@@ -477,10 +479,6 @@ ttm_object_device_init(unsigned int hash_order,
tdev->dmabuf_release = tdev->ops.release;
tdev->ops.release = ttm_prime_dmabuf_release;
return tdev;
-
-out_no_object_hash:
- kfree(tdev);
- return NULL;
}
void ttm_object_device_release(struct ttm_object_device **p_tdev)
@@ -491,7 +489,6 @@ void ttm_object_device_release(struct ttm_object_device **p_tdev)
WARN_ON_ONCE(!idr_is_empty(&tdev->idr));
idr_destroy(&tdev->idr);
- vmwgfx_ht_remove(&tdev->object_hash);
kfree(tdev);
}
diff --git a/drivers/gpu/drm/vmwgfx/ttm_object.h b/drivers/gpu/drm/vmwgfx/ttm_object.h
index 1a2fa0f83f5f..f0ebbe340ad6 100644
--- a/drivers/gpu/drm/vmwgfx/ttm_object.h
+++ b/drivers/gpu/drm/vmwgfx/ttm_object.h
@@ -1,6 +1,6 @@
/**************************************************************************
*
- * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
+ * Copyright (c) 2006-2022 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -42,8 +42,6 @@
#include <linux/list.h>
#include <linux/rcupdate.h>
-#include "vmwgfx_hashtab.h"
-
/**
* enum ttm_object_type
*
@@ -104,7 +102,7 @@ struct ttm_base_object {
struct ttm_object_file *tfile;
struct kref refcount;
void (*refcount_release) (struct ttm_base_object **base);
- u32 handle;
+ u64 handle;
enum ttm_object_type object_type;
u32 shareable;
};
@@ -164,7 +162,7 @@ extern int ttm_base_object_init(struct ttm_object_file *tfile,
*/
extern struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file
- *tfile, uint32_t key);
+ *tfile, uint64_t key);
/**
* ttm_base_object_lookup_for_ref
@@ -178,7 +176,7 @@ extern struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file
*/
extern struct ttm_base_object *
-ttm_base_object_lookup_for_ref(struct ttm_object_device *tdev, uint32_t key);
+ttm_base_object_lookup_for_ref(struct ttm_object_device *tdev, uint64_t key);
/**
* ttm_base_object_unref
@@ -237,14 +235,12 @@ extern int ttm_ref_object_base_unref(struct ttm_object_file *tfile,
* ttm_object_file_init - initialize a struct ttm_object file
*
* @tdev: A struct ttm_object device this file is initialized on.
- * @hash_order: Order of the hash table used to hold the reference objects.
*
* This is typically called by the file_ops::open function.
*/
extern struct ttm_object_file *ttm_object_file_init(struct ttm_object_device
- *tdev,
- unsigned int hash_order);
+ *tdev);
/**
* ttm_object_file_release - release data held by a ttm_object_file
@@ -262,7 +258,6 @@ extern void ttm_object_file_release(struct ttm_object_file **p_tfile);
/**
* ttm_object device init - initialize a struct ttm_object_device
*
- * @hash_order: Order of hash table used to hash the base objects.
* @ops: DMA buf ops for prime objects of this device.
*
* This function is typically called on device initialization to prepare
@@ -270,8 +265,7 @@ extern void ttm_object_file_release(struct ttm_object_file **p_tfile);
*/
extern struct ttm_object_device *
-ttm_object_device_init(unsigned int hash_order,
- const struct dma_buf_ops *ops);
+ttm_object_device_init(const struct dma_buf_ops *ops);
/**
* ttm_object_device_release - release data held by a ttm_object_device
@@ -314,7 +308,7 @@ extern int ttm_prime_handle_to_fd(struct ttm_object_file *tfile,
kfree_rcu(__obj, __prime.base.rhead)
struct ttm_base_object *
-ttm_base_object_noref_lookup(struct ttm_object_file *tfile, uint32_t key);
+ttm_base_object_noref_lookup(struct ttm_object_file *tfile, uint64_t key);
/**
* ttm_base_object_noref_release - release a base object pointer looked up
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c b/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
index 09fe20e918f9..c52c7bf1485b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
@@ -483,8 +483,8 @@ int vmw_bo_cpu_blit(struct ttm_buffer_object *dst,
d.src_addr = NULL;
d.dst_pages = dst->ttm->pages;
d.src_pages = src->ttm->pages;
- d.dst_num_pages = dst->resource->num_pages;
- d.src_num_pages = src->resource->num_pages;
+ d.dst_num_pages = PFN_UP(dst->resource->size);
+ d.src_num_pages = PFN_UP(src->resource->size);
d.dst_prot = ttm_io_prot(dst, dst->resource, PAGE_KERNEL);
d.src_prot = ttm_io_prot(src, src->resource, PAGE_KERNEL);
d.diff = diff;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
index 822251aaab0a..321c551784a1 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
@@ -194,7 +194,7 @@ int vmw_bo_pin_in_start_of_vram(struct vmw_private *dev_priv,
int ret = 0;
place = vmw_vram_placement.placement[0];
- place.lpfn = bo->resource->num_pages;
+ place.lpfn = PFN_UP(bo->resource->size);
placement.num_placement = 1;
placement.placement = &place;
placement.num_busy_placement = 1;
@@ -211,7 +211,7 @@ int vmw_bo_pin_in_start_of_vram(struct vmw_private *dev_priv,
* that situation.
*/
if (bo->resource->mem_type == TTM_PL_VRAM &&
- bo->resource->start < bo->resource->num_pages &&
+ bo->resource->start < PFN_UP(bo->resource->size) &&
bo->resource->start > 0 &&
buf->base.pin_count == 0) {
ctx.interruptible = false;
@@ -352,7 +352,7 @@ void *vmw_bo_map_and_cache(struct vmw_buffer_object *vbo)
if (virtual)
return virtual;
- ret = ttm_bo_kmap(bo, 0, bo->resource->num_pages, &vbo->map);
+ ret = ttm_bo_kmap(bo, 0, PFN_UP(bo->base.size), &vbo->map);
if (ret)
DRM_ERROR("Buffer object map failed: %d.\n", ret);
@@ -807,9 +807,23 @@ int vmw_dumb_create(struct drm_file *file_priv,
{
struct vmw_private *dev_priv = vmw_priv(dev);
struct vmw_buffer_object *vbo;
+ int cpp = DIV_ROUND_UP(args->bpp, 8);
int ret;
- args->pitch = args->width * ((args->bpp + 7) / 8);
+ switch (cpp) {
+ case 1: /* DRM_FORMAT_C8 */
+ case 2: /* DRM_FORMAT_RGB565 */
+ case 4: /* DRM_FORMAT_XRGB8888 */
+ break;
+ default:
+ /*
+ * Dumb buffers don't allow anything else.
+ * This is tested via IGT's dumb_buffers
+ */
+ return -EINVAL;
+ }
+
+ args->pitch = args->width * cpp;
args->size = ALIGN(args->pitch * args->height, PAGE_SIZE);
ret = vmw_gem_object_create_with_handle(dev_priv, file_priv,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
index 82ef58ccdd42..47bc0b411055 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright 2014-2015 VMware, Inc., Palo Alto, CA., USA
+ * Copyright 2014-2022 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
@@ -28,6 +28,8 @@
#include "vmwgfx_drv.h"
#include "vmwgfx_resource_priv.h"
+#include <linux/hashtable.h>
+
#define VMW_CMDBUF_RES_MAN_HT_ORDER 12
/**
@@ -59,7 +61,7 @@ struct vmw_cmdbuf_res {
* @resources and @list are protected by the cmdbuf mutex for now.
*/
struct vmw_cmdbuf_res_manager {
- struct vmwgfx_open_hash resources;
+ DECLARE_HASHTABLE(resources, VMW_CMDBUF_RES_MAN_HT_ORDER);
struct list_head list;
struct vmw_private *dev_priv;
};
@@ -82,14 +84,13 @@ vmw_cmdbuf_res_lookup(struct vmw_cmdbuf_res_manager *man,
u32 user_key)
{
struct vmwgfx_hash_item *hash;
- int ret;
unsigned long key = user_key | (res_type << 24);
- ret = vmwgfx_ht_find_item(&man->resources, key, &hash);
- if (unlikely(ret != 0))
- return ERR_PTR(ret);
-
- return drm_hash_entry(hash, struct vmw_cmdbuf_res, hash)->res;
+ hash_for_each_possible_rcu(man->resources, hash, head, key) {
+ if (hash->key == key)
+ return hlist_entry(hash, struct vmw_cmdbuf_res, hash)->res;
+ }
+ return ERR_PTR(-EINVAL);
}
/**
@@ -105,7 +106,7 @@ static void vmw_cmdbuf_res_free(struct vmw_cmdbuf_res_manager *man,
struct vmw_cmdbuf_res *entry)
{
list_del(&entry->head);
- WARN_ON(vmwgfx_ht_remove_item(&man->resources, &entry->hash));
+ hash_del_rcu(&entry->hash.head);
vmw_resource_unreference(&entry->res);
kfree(entry);
}
@@ -159,7 +160,6 @@ void vmw_cmdbuf_res_commit(struct list_head *list)
void vmw_cmdbuf_res_revert(struct list_head *list)
{
struct vmw_cmdbuf_res *entry, *next;
- int ret;
list_for_each_entry_safe(entry, next, list, head) {
switch (entry->state) {
@@ -167,8 +167,8 @@ void vmw_cmdbuf_res_revert(struct list_head *list)
vmw_cmdbuf_res_free(entry->man, entry);
break;
case VMW_CMDBUF_RES_DEL:
- ret = vmwgfx_ht_insert_item(&entry->man->resources, &entry->hash);
- BUG_ON(ret);
+ hash_add_rcu(entry->man->resources, &entry->hash.head,
+ entry->hash.key);
list_move_tail(&entry->head, &entry->man->list);
entry->state = VMW_CMDBUF_RES_COMMITTED;
break;
@@ -199,26 +199,20 @@ int vmw_cmdbuf_res_add(struct vmw_cmdbuf_res_manager *man,
struct list_head *list)
{
struct vmw_cmdbuf_res *cres;
- int ret;
cres = kzalloc(sizeof(*cres), GFP_KERNEL);
if (unlikely(!cres))
return -ENOMEM;
cres->hash.key = user_key | (res_type << 24);
- ret = vmwgfx_ht_insert_item(&man->resources, &cres->hash);
- if (unlikely(ret != 0)) {
- kfree(cres);
- goto out_invalid_key;
- }
+ hash_add_rcu(man->resources, &cres->hash.head, cres->hash.key);
cres->state = VMW_CMDBUF_RES_ADD;
cres->res = vmw_resource_reference(res);
cres->man = man;
list_add_tail(&cres->head, list);
-out_invalid_key:
- return ret;
+ return 0;
}
/**
@@ -243,24 +237,26 @@ int vmw_cmdbuf_res_remove(struct vmw_cmdbuf_res_manager *man,
struct list_head *list,
struct vmw_resource **res_p)
{
- struct vmw_cmdbuf_res *entry;
+ struct vmw_cmdbuf_res *entry = NULL;
struct vmwgfx_hash_item *hash;
- int ret;
+ unsigned long key = user_key | (res_type << 24);
- ret = vmwgfx_ht_find_item(&man->resources, user_key | (res_type << 24),
- &hash);
- if (likely(ret != 0))
+ hash_for_each_possible_rcu(man->resources, hash, head, key) {
+ if (hash->key == key) {
+ entry = hlist_entry(hash, struct vmw_cmdbuf_res, hash);
+ break;
+ }
+ }
+ if (unlikely(!entry))
return -EINVAL;
- entry = drm_hash_entry(hash, struct vmw_cmdbuf_res, hash);
-
switch (entry->state) {
case VMW_CMDBUF_RES_ADD:
vmw_cmdbuf_res_free(man, entry);
*res_p = NULL;
break;
case VMW_CMDBUF_RES_COMMITTED:
- (void) vmwgfx_ht_remove_item(&man->resources, &entry->hash);
+ hash_del_rcu(&entry->hash.head);
list_del(&entry->head);
entry->state = VMW_CMDBUF_RES_DEL;
list_add_tail(&entry->head, list);
@@ -287,7 +283,6 @@ struct vmw_cmdbuf_res_manager *
vmw_cmdbuf_res_man_create(struct vmw_private *dev_priv)
{
struct vmw_cmdbuf_res_manager *man;
- int ret;
man = kzalloc(sizeof(*man), GFP_KERNEL);
if (!man)
@@ -295,12 +290,8 @@ vmw_cmdbuf_res_man_create(struct vmw_private *dev_priv)
man->dev_priv = dev_priv;
INIT_LIST_HEAD(&man->list);
- ret = vmwgfx_ht_create(&man->resources, VMW_CMDBUF_RES_MAN_HT_ORDER);
- if (ret == 0)
- return man;
-
- kfree(man);
- return ERR_PTR(ret);
+ hash_init(man->resources);
+ return man;
}
/**
@@ -320,7 +311,6 @@ void vmw_cmdbuf_res_man_destroy(struct vmw_cmdbuf_res_manager *man)
list_for_each_entry_safe(entry, next, &man->list, head)
vmw_cmdbuf_res_free(man, entry);
- vmwgfx_ht_remove(&man->resources);
kfree(man);
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
index 79b30dc9d825..b78a10312fad 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
@@ -33,6 +33,7 @@
#include <drm/ttm/ttm_placement.h>
#include "vmwgfx_drv.h"
+#include "vmwgfx_mksstat.h"
#include "vmwgfx_resource_priv.h"
#include "vmwgfx_so.h"
@@ -72,12 +73,24 @@ struct vmw_cotable_info {
bool);
};
+
+/*
+ * Getting the initial size right is difficult because it all depends
+ * on what the userspace is doing. The sizes will be aligned up to
+ * a PAGE_SIZE so we just want to make sure that for majority of apps
+ * the initial number of entries doesn't require an immediate resize.
+ * For all cotables except SVGACOTableDXElementLayoutEntry and
+ * SVGACOTableDXBlendStateEntry the initial number of entries fits
+ * within the PAGE_SIZE. For SVGACOTableDXElementLayoutEntry and
+ * SVGACOTableDXBlendStateEntry we want to reserve two pages,
+ * because that's what all apps will require initially.
+ */
static const struct vmw_cotable_info co_info[] = {
{1, sizeof(SVGACOTableDXRTViewEntry), &vmw_view_cotable_list_destroy},
{1, sizeof(SVGACOTableDXDSViewEntry), &vmw_view_cotable_list_destroy},
{1, sizeof(SVGACOTableDXSRViewEntry), &vmw_view_cotable_list_destroy},
- {1, sizeof(SVGACOTableDXElementLayoutEntry), NULL},
- {1, sizeof(SVGACOTableDXBlendStateEntry), NULL},
+ {PAGE_SIZE/sizeof(SVGACOTableDXElementLayoutEntry) + 1, sizeof(SVGACOTableDXElementLayoutEntry), NULL},
+ {PAGE_SIZE/sizeof(SVGACOTableDXBlendStateEntry) + 1, sizeof(SVGACOTableDXBlendStateEntry), NULL},
{1, sizeof(SVGACOTableDXDepthStencilEntry), NULL},
{1, sizeof(SVGACOTableDXRasterizerStateEntry), NULL},
{1, sizeof(SVGACOTableDXSamplerEntry), NULL},
@@ -395,9 +408,12 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
int ret;
size_t i;
+ MKS_STAT_TIME_DECL(MKSSTAT_KERN_COTABLE_RESIZE);
+ MKS_STAT_TIME_PUSH(MKSSTAT_KERN_COTABLE_RESIZE);
+
ret = vmw_cotable_readback(res);
if (ret)
- return ret;
+ goto out_done;
cur_size_read_back = vcotbl->size_read_back;
vcotbl->size_read_back = old_size_read_back;
@@ -411,7 +427,7 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
true, true, vmw_bo_bo_free, &buf);
if (ret) {
DRM_ERROR("Failed initializing new cotable MOB.\n");
- return ret;
+ goto out_done;
}
bo = &buf->base;
@@ -427,7 +443,7 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
* Do a page by page copy of COTables. This eliminates slow vmap()s.
* This should really be a TTM utility.
*/
- for (i = 0; i < old_bo->resource->num_pages; ++i) {
+ for (i = 0; i < PFN_UP(old_bo->resource->size); ++i) {
bool dummy;
ret = ttm_bo_kmap(old_bo, i, 1, &old_map);
@@ -485,6 +501,8 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
/* Release the pin acquired in vmw_bo_init */
ttm_bo_unpin(bo);
+ MKS_STAT_TIME_POP(MKSSTAT_KERN_COTABLE_RESIZE);
+
return 0;
out_map_new:
@@ -494,6 +512,9 @@ out_wait:
ttm_bo_unreserve(bo);
vmw_bo_unreference(&buf);
+out_done:
+ MKS_STAT_TIME_POP(MKSSTAT_KERN_COTABLE_RESIZE);
+
return ret;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index d7bd5eb1d3ac..63496773f714 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -25,13 +25,17 @@
*
**************************************************************************/
-#include <linux/dma-mapping.h>
-#include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/cc_platform.h>
+
+#include "vmwgfx_drv.h"
+
+#include "vmwgfx_devcaps.h"
+#include "vmwgfx_mksstat.h"
+#include "vmwgfx_binding.h"
+#include "ttm_object.h"
#include <drm/drm_aperture.h>
#include <drm/drm_drv.h>
+#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_ttm_helper.h>
#include <drm/drm_ioctl.h>
#include <drm/drm_module.h>
@@ -41,17 +45,14 @@
#include <drm/ttm/ttm_placement.h>
#include <generated/utsrelease.h>
-#include "ttm_object.h"
-#include "vmwgfx_binding.h"
-#include "vmwgfx_devcaps.h"
-#include "vmwgfx_drv.h"
-#include "vmwgfx_mksstat.h"
+#include <linux/cc_platform.h>
+#include <linux/dma-mapping.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/version.h>
#define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices"
-#define VMW_MIN_INITIAL_WIDTH 800
-#define VMW_MIN_INITIAL_HEIGHT 600
-
/*
* Fully encoded drm commands. Might move to vmw_drm.h
*/
@@ -262,7 +263,6 @@ static const struct pci_device_id vmw_pci_id_list[] = {
};
MODULE_DEVICE_TABLE(pci, vmw_pci_id_list);
-static int enable_fbdev = IS_ENABLED(CONFIG_DRM_VMWGFX_FBCON);
static int vmw_restrict_iommu;
static int vmw_force_coherent;
static int vmw_restrict_dma_mask;
@@ -272,8 +272,6 @@ static int vmw_probe(struct pci_dev *, const struct pci_device_id *);
static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
void *ptr);
-MODULE_PARM_DESC(enable_fbdev, "Enable vmwgfx fbdev");
-module_param_named(enable_fbdev, enable_fbdev, int, 0600);
MODULE_PARM_DESC(restrict_iommu, "Try to limit IOMMU usage for TTM pages");
module_param_named(restrict_iommu, vmw_restrict_iommu, int, 0600);
MODULE_PARM_DESC(force_coherent, "Force coherent TTM pages");
@@ -623,8 +621,8 @@ static void vmw_get_initial_size(struct vmw_private *dev_priv)
width = vmw_read(dev_priv, SVGA_REG_WIDTH);
height = vmw_read(dev_priv, SVGA_REG_HEIGHT);
- width = max_t(uint32_t, width, VMW_MIN_INITIAL_WIDTH);
- height = max_t(uint32_t, height, VMW_MIN_INITIAL_HEIGHT);
+ width = max_t(uint32_t, width, VMWGFX_MIN_INITIAL_WIDTH);
+ height = max_t(uint32_t, height, VMWGFX_MIN_INITIAL_HEIGHT);
if (width > dev_priv->fb_max_width ||
height > dev_priv->fb_max_height) {
@@ -633,8 +631,8 @@ static void vmw_get_initial_size(struct vmw_private *dev_priv)
* This is a host error and shouldn't occur.
*/
- width = VMW_MIN_INITIAL_WIDTH;
- height = VMW_MIN_INITIAL_HEIGHT;
+ width = VMWGFX_MIN_INITIAL_WIDTH;
+ height = VMWGFX_MIN_INITIAL_HEIGHT;
}
dev_priv->initial_width = width;
@@ -806,6 +804,43 @@ static int vmw_detect_version(struct vmw_private *dev)
return 0;
}
+static void vmw_write_driver_id(struct vmw_private *dev)
+{
+ if ((dev->capabilities2 & SVGA_CAP2_DX2) != 0) {
+ vmw_write(dev, SVGA_REG_GUEST_DRIVER_ID,
+ SVGA_REG_GUEST_DRIVER_ID_LINUX);
+
+ vmw_write(dev, SVGA_REG_GUEST_DRIVER_VERSION1,
+ LINUX_VERSION_MAJOR << 24 |
+ LINUX_VERSION_PATCHLEVEL << 16 |
+ LINUX_VERSION_SUBLEVEL);
+ vmw_write(dev, SVGA_REG_GUEST_DRIVER_VERSION2,
+ VMWGFX_DRIVER_MAJOR << 24 |
+ VMWGFX_DRIVER_MINOR << 16 |
+ VMWGFX_DRIVER_PATCHLEVEL);
+ vmw_write(dev, SVGA_REG_GUEST_DRIVER_VERSION3, 0);
+
+ vmw_write(dev, SVGA_REG_GUEST_DRIVER_ID,
+ SVGA_REG_GUEST_DRIVER_ID_SUBMIT);
+ }
+}
+
+static void vmw_sw_context_init(struct vmw_private *dev_priv)
+{
+ struct vmw_sw_context *sw_context = &dev_priv->ctx;
+
+ hash_init(sw_context->res_ht);
+}
+
+static void vmw_sw_context_fini(struct vmw_private *dev_priv)
+{
+ struct vmw_sw_context *sw_context = &dev_priv->ctx;
+
+ vfree(sw_context->cmd_bounce);
+ if (sw_context->staged_bindings)
+ vmw_binding_state_free(sw_context->staged_bindings);
+}
+
static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id)
{
int ret;
@@ -815,6 +850,8 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id)
dev_priv->drm.dev_private = dev_priv;
+ vmw_sw_context_init(dev_priv);
+
mutex_init(&dev_priv->cmdbuf_mutex);
mutex_init(&dev_priv->binding_mutex);
spin_lock_init(&dev_priv->resource_lock);
@@ -844,9 +881,6 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id)
dev_priv->assume_16bpp = !!vmw_assume_16bpp;
- dev_priv->enable_fb = enable_fbdev;
-
-
dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES);
vmw_print_bitmap(&dev_priv->drm, "Capabilities",
dev_priv->capabilities,
@@ -970,7 +1004,7 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id)
goto out_err0;
}
- dev_priv->tdev = ttm_object_device_init(12, &vmw_prime_dmabuf_ops);
+ dev_priv->tdev = ttm_object_device_init(&vmw_prime_dmabuf_ops);
if (unlikely(dev_priv->tdev == NULL)) {
drm_err(&dev_priv->drm,
@@ -1091,12 +1125,7 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id)
vmw_host_printf("vmwgfx: Module Version: %d.%d.%d (kernel: %s)",
VMWGFX_DRIVER_MAJOR, VMWGFX_DRIVER_MINOR,
VMWGFX_DRIVER_PATCHLEVEL, UTS_RELEASE);
-
- if (dev_priv->enable_fb) {
- vmw_fifo_resource_inc(dev_priv);
- vmw_svga_enable(dev_priv);
- vmw_fb_init(dev_priv);
- }
+ vmw_write_driver_id(dev_priv);
dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier;
register_pm_notifier(&dev_priv->pm_nb);
@@ -1143,15 +1172,10 @@ static void vmw_driver_unload(struct drm_device *dev)
unregister_pm_notifier(&dev_priv->pm_nb);
- if (dev_priv->ctx.res_ht_initialized)
- vmwgfx_ht_remove(&dev_priv->ctx.res_ht);
- vfree(dev_priv->ctx.cmd_bounce);
- if (dev_priv->enable_fb) {
- vmw_fb_off(dev_priv);
- vmw_fb_close(dev_priv);
- vmw_fifo_resource_dec(dev_priv);
- vmw_svga_disable(dev_priv);
- }
+ vmw_sw_context_fini(dev_priv);
+ vmw_fifo_resource_dec(dev_priv);
+
+ vmw_svga_disable(dev_priv);
vmw_kms_close(dev_priv);
vmw_overlay_close(dev_priv);
@@ -1173,8 +1197,6 @@ static void vmw_driver_unload(struct drm_device *dev)
vmw_irq_uninstall(&dev_priv->drm);
ttm_object_device_release(&dev_priv->tdev);
- if (dev_priv->ctx.staged_bindings)
- vmw_binding_state_free(dev_priv->ctx.staged_bindings);
for (i = vmw_res_context; i < vmw_res_max; ++i)
idr_destroy(&dev_priv->res_idr[i]);
@@ -1203,7 +1225,7 @@ static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv)
if (unlikely(!vmw_fp))
return ret;
- vmw_fp->tfile = ttm_object_file_init(dev_priv->tdev, 10);
+ vmw_fp->tfile = ttm_object_file_init(dev_priv->tdev);
if (unlikely(vmw_fp->tfile == NULL))
goto out_no_tfile;
@@ -1291,8 +1313,6 @@ static void vmw_master_drop(struct drm_device *dev,
struct vmw_private *dev_priv = vmw_priv(dev);
vmw_kms_legacy_hotspot_clear(dev_priv);
- if (!dev_priv->enable_fb)
- vmw_svga_disable(dev_priv);
}
/**
@@ -1485,25 +1505,19 @@ static int vmw_pm_freeze(struct device *kdev)
DRM_ERROR("Failed to freeze modesetting.\n");
return ret;
}
- if (dev_priv->enable_fb)
- vmw_fb_off(dev_priv);
vmw_execbuf_release_pinned_bo(dev_priv);
vmw_resource_evict_all(dev_priv);
vmw_release_device_early(dev_priv);
while (ttm_device_swapout(&dev_priv->bdev, &ctx, GFP_KERNEL) > 0);
- if (dev_priv->enable_fb)
- vmw_fifo_resource_dec(dev_priv);
+ vmw_fifo_resource_dec(dev_priv);
if (atomic_read(&dev_priv->num_fifo_resources) != 0) {
DRM_ERROR("Can't hibernate while 3D resources are active.\n");
- if (dev_priv->enable_fb)
- vmw_fifo_resource_inc(dev_priv);
+ vmw_fifo_resource_inc(dev_priv);
WARN_ON(vmw_request_device_late(dev_priv));
dev_priv->suspend_locked = false;
if (dev_priv->suspend_state)
vmw_kms_resume(dev);
- if (dev_priv->enable_fb)
- vmw_fb_on(dev_priv);
return -EBUSY;
}
@@ -1523,24 +1537,19 @@ static int vmw_pm_restore(struct device *kdev)
vmw_detect_version(dev_priv);
- if (dev_priv->enable_fb)
- vmw_fifo_resource_inc(dev_priv);
+ vmw_fifo_resource_inc(dev_priv);
ret = vmw_request_device(dev_priv);
if (ret)
return ret;
- if (dev_priv->enable_fb)
- __vmw_svga_enable(dev_priv);
+ __vmw_svga_enable(dev_priv);
vmw_fence_fifo_up(dev_priv->fman);
dev_priv->suspend_locked = false;
if (dev_priv->suspend_state)
vmw_kms_resume(&dev_priv->drm);
- if (dev_priv->enable_fb)
- vmw_fb_on(dev_priv);
-
return 0;
}
@@ -1631,6 +1640,10 @@ static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ret)
goto out_unload;
+ vmw_fifo_resource_inc(vmw);
+ vmw_svga_enable(vmw);
+ drm_fbdev_generic_setup(&vmw->drm, 0);
+
vmw_debugfs_gem_init(vmw);
vmw_debugfs_resource_managers_init(vmw);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 09e2d738aa87..b062b020b378 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -30,6 +30,7 @@
#include <linux/suspend.h>
#include <linux/sync_file.h>
+#include <linux/hashtable.h>
#include <drm/drm_auth.h>
#include <drm/drm_device.h>
@@ -42,7 +43,6 @@
#include "ttm_object.h"
#include "vmwgfx_fence.h"
-#include "vmwgfx_hashtab.h"
#include "vmwgfx_reg.h"
#include "vmwgfx_validation.h"
@@ -62,6 +62,9 @@
#define VMWGFX_MAX_DISPLAYS 16
#define VMWGFX_CMD_BOUNCE_INIT_SIZE 32768
+#define VMWGFX_MIN_INITIAL_WIDTH 1280
+#define VMWGFX_MIN_INITIAL_HEIGHT 800
+
#define VMWGFX_PCI_ID_SVGA2 0x0405
#define VMWGFX_PCI_ID_SVGA3 0x0406
@@ -93,6 +96,11 @@
#define VMW_RES_STREAM ttm_driver_type2
#define VMW_RES_FENCE ttm_driver_type3
#define VMW_RES_SHADER ttm_driver_type4
+#define VMW_RES_HT_ORDER 12
+
+#define VMW_CURSOR_SNOOP_FORMAT SVGA3D_A8R8G8B8
+#define VMW_CURSOR_SNOOP_WIDTH 64
+#define VMW_CURSOR_SNOOP_HEIGHT 64
#define MKSSTAT_CAPACITY_LOG2 5U
#define MKSSTAT_CAPACITY (1U << MKSSTAT_CAPACITY_LOG2)
@@ -102,6 +110,11 @@ struct vmw_fpriv {
bool gb_aware; /* user-space is guest-backed aware */
};
+struct vmwgfx_hash_item {
+ struct hlist_node head;
+ unsigned long key;
+};
+
/**
* struct vmw_buffer_object - TTM buffer object with vmwgfx additions
* @base: The TTM buffer object
@@ -425,8 +438,7 @@ struct vmw_ctx_validation_info;
* @ctx: The validation context
*/
struct vmw_sw_context{
- struct vmwgfx_open_hash res_ht;
- bool res_ht_initialized;
+ DECLARE_HASHTABLE(res_ht, VMW_RES_HT_ORDER);
bool kernel;
struct vmw_fpriv *fp;
struct drm_file *filp;
@@ -546,7 +558,6 @@ struct vmw_private {
* Framebuffer info.
*/
- void *fb_info;
enum vmw_display_unit_type active_display_unit;
struct vmw_legacy_display *ldu_priv;
struct vmw_overlay *overlay_priv;
@@ -605,8 +616,6 @@ struct vmw_private {
struct mutex cmdbuf_mutex;
struct mutex binding_mutex;
- bool enable_fb;
-
/**
* PM management.
*/
@@ -1184,35 +1193,6 @@ extern void vmw_generic_waiter_add(struct vmw_private *dev_priv, u32 flag,
extern void vmw_generic_waiter_remove(struct vmw_private *dev_priv,
u32 flag, int *waiter_count);
-
-/**
- * Kernel framebuffer - vmwgfx_fb.c
- */
-
-#ifdef CONFIG_DRM_FBDEV_EMULATION
-int vmw_fb_init(struct vmw_private *vmw_priv);
-int vmw_fb_close(struct vmw_private *dev_priv);
-int vmw_fb_off(struct vmw_private *vmw_priv);
-int vmw_fb_on(struct vmw_private *vmw_priv);
-#else
-static inline int vmw_fb_init(struct vmw_private *vmw_priv)
-{
- return 0;
-}
-static inline int vmw_fb_close(struct vmw_private *dev_priv)
-{
- return 0;
-}
-static inline int vmw_fb_off(struct vmw_private *vmw_priv)
-{
- return 0;
-}
-static inline int vmw_fb_on(struct vmw_private *vmw_priv)
-{
- return 0;
-}
-#endif
-
/**
* Kernel modesetting - vmwgfx_kms.c
*/
@@ -1232,9 +1212,6 @@ int vmw_kms_write_svga(struct vmw_private *vmw_priv,
bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
uint32_t pitch,
uint32_t height);
-u32 vmw_get_vblank_counter(struct drm_crtc *crtc);
-int vmw_enable_vblank(struct drm_crtc *crtc);
-void vmw_disable_vblank(struct drm_crtc *crtc);
int vmw_kms_present(struct vmw_private *dev_priv,
struct drm_file *file_priv,
struct vmw_framebuffer *vfb,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index f085dbd4736d..a5379f6fb5ab 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright 2009 - 2015 VMware, Inc., Palo Alto, CA., USA
+ * Copyright 2009 - 2022 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
@@ -25,6 +25,7 @@
*
**************************************************************************/
#include <linux/sync_file.h>
+#include <linux/hashtable.h>
#include "vmwgfx_drv.h"
#include "vmwgfx_reg.h"
@@ -34,7 +35,6 @@
#include "vmwgfx_binding.h"
#include "vmwgfx_mksstat.h"
-#define VMW_RES_HT_ORDER 12
/*
* Helper macro to get dx_ctx_node if available otherwise print an error
@@ -1047,7 +1047,7 @@ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
if (unlikely(new_query_bo != sw_context->cur_query_bo)) {
- if (unlikely(new_query_bo->base.resource->num_pages > 4)) {
+ if (unlikely(PFN_UP(new_query_bo->base.resource->size) > 4)) {
VMW_DEBUG_USER("Query buffer too large.\n");
return -EINVAL;
}
@@ -3869,7 +3869,6 @@ int vmw_execbuf_fence_commands(struct drm_file *file_priv,
* @fence: Pointer to the fenc object.
* @fence_handle: User-space fence handle.
* @out_fence_fd: exported file descriptor for the fence. -1 if not used
- * @sync_file: Only used to clean up in case of an error in this function.
*
* This function copies fence information to user-space. If copying fails, the
* user-space struct drm_vmw_fence_rep::error member is hopefully left
@@ -4101,7 +4100,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
int ret;
int32_t out_fence_fd = -1;
struct sync_file *sync_file = NULL;
- DECLARE_VAL_CONTEXT(val_ctx, &sw_context->res_ht, 1);
+ DECLARE_VAL_CONTEXT(val_ctx, sw_context, 1);
if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) {
out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
@@ -4164,14 +4163,6 @@ int vmw_execbuf_process(struct drm_file *file_priv,
if (sw_context->staged_bindings)
vmw_binding_state_reset(sw_context->staged_bindings);
- if (!sw_context->res_ht_initialized) {
- ret = vmwgfx_ht_create(&sw_context->res_ht, VMW_RES_HT_ORDER);
- if (unlikely(ret != 0))
- goto out_unlock;
-
- sw_context->res_ht_initialized = true;
- }
-
INIT_LIST_HEAD(&sw_context->staged_cmd_res);
sw_context->ctx = &val_ctx;
ret = vmw_execbuf_tie_context(dev_priv, sw_context, dx_context_handle);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
deleted file mode 100644
index 5b85b477e4c6..000000000000
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+++ /dev/null
@@ -1,831 +0,0 @@
-/**************************************************************************
- *
- * Copyright © 2007 David Airlie
- * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- **************************************************************************/
-
-#include <linux/fb.h>
-#include <linux/pci.h>
-
-#include <drm/drm_fourcc.h>
-#include <drm/ttm/ttm_placement.h>
-
-#include "vmwgfx_drv.h"
-#include "vmwgfx_kms.h"
-
-#define VMW_DIRTY_DELAY (HZ / 30)
-
-struct vmw_fb_par {
- struct vmw_private *vmw_priv;
-
- void *vmalloc;
-
- struct mutex bo_mutex;
- struct vmw_buffer_object *vmw_bo;
- unsigned bo_size;
- struct drm_framebuffer *set_fb;
- struct drm_display_mode *set_mode;
- u32 fb_x;
- u32 fb_y;
- bool bo_iowrite;
-
- u32 pseudo_palette[17];
-
- unsigned max_width;
- unsigned max_height;
-
- struct {
- spinlock_t lock;
- bool active;
- unsigned x1;
- unsigned y1;
- unsigned x2;
- unsigned y2;
- } dirty;
-
- struct drm_crtc *crtc;
- struct drm_connector *con;
- struct delayed_work local_work;
-};
-
-static int vmw_fb_setcolreg(unsigned regno, unsigned red, unsigned green,
- unsigned blue, unsigned transp,
- struct fb_info *info)
-{
- struct vmw_fb_par *par = info->par;
- u32 *pal = par->pseudo_palette;
-
- if (regno > 15) {
- DRM_ERROR("Bad regno %u.\n", regno);
- return 1;
- }
-
- switch (par->set_fb->format->depth) {
- case 24:
- case 32:
- pal[regno] = ((red & 0xff00) << 8) |
- (green & 0xff00) |
- ((blue & 0xff00) >> 8);
- break;
- default:
- DRM_ERROR("Bad depth %u, bpp %u.\n",
- par->set_fb->format->depth,
- par->set_fb->format->cpp[0] * 8);
- return 1;
- }
-
- return 0;
-}
-
-static int vmw_fb_check_var(struct fb_var_screeninfo *var,
- struct fb_info *info)
-{
- int depth = var->bits_per_pixel;
- struct vmw_fb_par *par = info->par;
- struct vmw_private *vmw_priv = par->vmw_priv;
-
- switch (var->bits_per_pixel) {
- case 32:
- depth = (var->transp.length > 0) ? 32 : 24;
- break;
- default:
- DRM_ERROR("Bad bpp %u.\n", var->bits_per_pixel);
- return -EINVAL;
- }
-
- switch (depth) {
- case 24:
- var->red.offset = 16;
- var->green.offset = 8;
- var->blue.offset = 0;
- var->red.length = 8;
- var->green.length = 8;
- var->blue.length = 8;
- var->transp.length = 0;
- var->transp.offset = 0;
- break;
- case 32:
- var->red.offset = 16;
- var->green.offset = 8;
- var->blue.offset = 0;
- var->red.length = 8;
- var->green.length = 8;
- var->blue.length = 8;
- var->transp.length = 8;
- var->transp.offset = 24;
- break;
- default:
- DRM_ERROR("Bad depth %u.\n", depth);
- return -EINVAL;
- }
-
- if ((var->xoffset + var->xres) > par->max_width ||
- (var->yoffset + var->yres) > par->max_height) {
- DRM_ERROR("Requested geom can not fit in framebuffer\n");
- return -EINVAL;
- }
-
- if (!vmw_kms_validate_mode_vram(vmw_priv,
- var->xres * var->bits_per_pixel/8,
- var->yoffset + var->yres)) {
- DRM_ERROR("Requested geom can not fit in framebuffer\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int vmw_fb_blank(int blank, struct fb_info *info)
-{
- return 0;
-}
-
-/**
- * vmw_fb_dirty_flush - flush dirty regions to the kms framebuffer
- *
- * @work: The struct work_struct associated with this task.
- *
- * This function flushes the dirty regions of the vmalloc framebuffer to the
- * kms framebuffer, and if the kms framebuffer is visible, also updated the
- * corresponding displays. Note that this function runs even if the kms
- * framebuffer is not bound to a crtc and thus not visible, but it's turned
- * off during hibernation using the par->dirty.active bool.
- */
-static void vmw_fb_dirty_flush(struct work_struct *work)
-{
- struct vmw_fb_par *par = container_of(work, struct vmw_fb_par,
- local_work.work);
- struct vmw_private *vmw_priv = par->vmw_priv;
- struct fb_info *info = vmw_priv->fb_info;
- unsigned long irq_flags;
- s32 dst_x1, dst_x2, dst_y1, dst_y2, w = 0, h = 0;
- u32 cpp, max_x, max_y;
- struct drm_clip_rect clip;
- struct drm_framebuffer *cur_fb;
- u8 *src_ptr, *dst_ptr;
- struct vmw_buffer_object *vbo = par->vmw_bo;
- void *virtual;
-
- if (!READ_ONCE(par->dirty.active))
- return;
-
- mutex_lock(&par->bo_mutex);
- cur_fb = par->set_fb;
- if (!cur_fb)
- goto out_unlock;
-
- (void) ttm_bo_reserve(&vbo->base, false, false, NULL);
- virtual = vmw_bo_map_and_cache(vbo);
- if (!virtual)
- goto out_unreserve;
-
- spin_lock_irqsave(&par->dirty.lock, irq_flags);
- if (!par->dirty.active) {
- spin_unlock_irqrestore(&par->dirty.lock, irq_flags);
- goto out_unreserve;
- }
-
- /*
- * Handle panning when copying from vmalloc to framebuffer.
- * Clip dirty area to framebuffer.
- */
- cpp = cur_fb->format->cpp[0];
- max_x = par->fb_x + cur_fb->width;
- max_y = par->fb_y + cur_fb->height;
-
- dst_x1 = par->dirty.x1 - par->fb_x;
- dst_y1 = par->dirty.y1 - par->fb_y;
- dst_x1 = max_t(s32, dst_x1, 0);
- dst_y1 = max_t(s32, dst_y1, 0);
-
- dst_x2 = par->dirty.x2 - par->fb_x;
- dst_y2 = par->dirty.y2 - par->fb_y;
- dst_x2 = min_t(s32, dst_x2, max_x);
- dst_y2 = min_t(s32, dst_y2, max_y);
- w = dst_x2 - dst_x1;
- h = dst_y2 - dst_y1;
- w = max_t(s32, 0, w);
- h = max_t(s32, 0, h);
-
- par->dirty.x1 = par->dirty.x2 = 0;
- par->dirty.y1 = par->dirty.y2 = 0;
- spin_unlock_irqrestore(&par->dirty.lock, irq_flags);
-
- if (w && h) {
- dst_ptr = (u8 *)virtual +
- (dst_y1 * par->set_fb->pitches[0] + dst_x1 * cpp);
- src_ptr = (u8 *)par->vmalloc +
- ((dst_y1 + par->fb_y) * info->fix.line_length +
- (dst_x1 + par->fb_x) * cpp);
-
- while (h-- > 0) {
- memcpy(dst_ptr, src_ptr, w*cpp);
- dst_ptr += par->set_fb->pitches[0];
- src_ptr += info->fix.line_length;
- }
-
- clip.x1 = dst_x1;
- clip.x2 = dst_x2;
- clip.y1 = dst_y1;
- clip.y2 = dst_y2;
- }
-
-out_unreserve:
- ttm_bo_unreserve(&vbo->base);
- if (w && h) {
- WARN_ON_ONCE(par->set_fb->funcs->dirty(cur_fb, NULL, 0, 0,
- &clip, 1));
- vmw_cmd_flush(vmw_priv, false);
- }
-out_unlock:
- mutex_unlock(&par->bo_mutex);
-}
-
-static void vmw_fb_dirty_mark(struct vmw_fb_par *par,
- unsigned x1, unsigned y1,
- unsigned width, unsigned height)
-{
- unsigned long flags;
- unsigned x2 = x1 + width;
- unsigned y2 = y1 + height;
-
- spin_lock_irqsave(&par->dirty.lock, flags);
- if (par->dirty.x1 == par->dirty.x2) {
- par->dirty.x1 = x1;
- par->dirty.y1 = y1;
- par->dirty.x2 = x2;
- par->dirty.y2 = y2;
- /* if we are active start the dirty work
- * we share the work with the defio system */
- if (par->dirty.active)
- schedule_delayed_work(&par->local_work,
- VMW_DIRTY_DELAY);
- } else {
- if (x1 < par->dirty.x1)
- par->dirty.x1 = x1;
- if (y1 < par->dirty.y1)
- par->dirty.y1 = y1;
- if (x2 > par->dirty.x2)
- par->dirty.x2 = x2;
- if (y2 > par->dirty.y2)
- par->dirty.y2 = y2;
- }
- spin_unlock_irqrestore(&par->dirty.lock, flags);
-}
-
-static int vmw_fb_pan_display(struct fb_var_screeninfo *var,
- struct fb_info *info)
-{
- struct vmw_fb_par *par = info->par;
-
- if ((var->xoffset + var->xres) > var->xres_virtual ||
- (var->yoffset + var->yres) > var->yres_virtual) {
- DRM_ERROR("Requested panning can not fit in framebuffer\n");
- return -EINVAL;
- }
-
- mutex_lock(&par->bo_mutex);
- par->fb_x = var->xoffset;
- par->fb_y = var->yoffset;
- if (par->set_fb)
- vmw_fb_dirty_mark(par, par->fb_x, par->fb_y, par->set_fb->width,
- par->set_fb->height);
- mutex_unlock(&par->bo_mutex);
-
- return 0;
-}
-
-static void vmw_deferred_io(struct fb_info *info, struct list_head *pagereflist)
-{
- struct vmw_fb_par *par = info->par;
- unsigned long start, end, min, max;
- unsigned long flags;
- struct fb_deferred_io_pageref *pageref;
- int y1, y2;
-
- min = ULONG_MAX;
- max = 0;
- list_for_each_entry(pageref, pagereflist, list) {
- start = pageref->offset;
- end = start + PAGE_SIZE - 1;
- min = min(min, start);
- max = max(max, end);
- }
-
- if (min < max) {
- y1 = min / info->fix.line_length;
- y2 = (max / info->fix.line_length) + 1;
-
- spin_lock_irqsave(&par->dirty.lock, flags);
- par->dirty.x1 = 0;
- par->dirty.y1 = y1;
- par->dirty.x2 = info->var.xres;
- par->dirty.y2 = y2;
- spin_unlock_irqrestore(&par->dirty.lock, flags);
-
- /*
- * Since we've already waited on this work once, try to
- * execute asap.
- */
- cancel_delayed_work(&par->local_work);
- schedule_delayed_work(&par->local_work, 0);
- }
-};
-
-static struct fb_deferred_io vmw_defio = {
- .delay = VMW_DIRTY_DELAY,
- .deferred_io = vmw_deferred_io,
-};
-
-/*
- * Draw code
- */
-
-static void vmw_fb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
-{
- cfb_fillrect(info, rect);
- vmw_fb_dirty_mark(info->par, rect->dx, rect->dy,
- rect->width, rect->height);
-}
-
-static void vmw_fb_copyarea(struct fb_info *info, const struct fb_copyarea *region)
-{
- cfb_copyarea(info, region);
- vmw_fb_dirty_mark(info->par, region->dx, region->dy,
- region->width, region->height);
-}
-
-static void vmw_fb_imageblit(struct fb_info *info, const struct fb_image *image)
-{
- cfb_imageblit(info, image);
- vmw_fb_dirty_mark(info->par, image->dx, image->dy,
- image->width, image->height);
-}
-
-/*
- * Bring up code
- */
-
-static int vmw_fb_create_bo(struct vmw_private *vmw_priv,
- size_t size, struct vmw_buffer_object **out)
-{
- struct vmw_buffer_object *vmw_bo;
- int ret;
-
- ret = vmw_bo_create(vmw_priv, size,
- &vmw_sys_placement,
- false, false,
- &vmw_bo_bo_free, &vmw_bo);
- if (unlikely(ret != 0))
- return ret;
-
- *out = vmw_bo;
-
- return ret;
-}
-
-static int vmw_fb_compute_depth(struct fb_var_screeninfo *var,
- int *depth)
-{
- switch (var->bits_per_pixel) {
- case 32:
- *depth = (var->transp.length > 0) ? 32 : 24;
- break;
- default:
- DRM_ERROR("Bad bpp %u.\n", var->bits_per_pixel);
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int vmwgfx_set_config_internal(struct drm_mode_set *set)
-{
- struct drm_crtc *crtc = set->crtc;
- struct drm_modeset_acquire_ctx ctx;
- int ret;
-
- drm_modeset_acquire_init(&ctx, 0);
-
-restart:
- ret = crtc->funcs->set_config(set, &ctx);
-
- if (ret == -EDEADLK) {
- drm_modeset_backoff(&ctx);
- goto restart;
- }
-
- drm_modeset_drop_locks(&ctx);
- drm_modeset_acquire_fini(&ctx);
-
- return ret;
-}
-
-static int vmw_fb_kms_detach(struct vmw_fb_par *par,
- bool detach_bo,
- bool unref_bo)
-{
- struct drm_framebuffer *cur_fb = par->set_fb;
- int ret;
-
- /* Detach the KMS framebuffer from crtcs */
- if (par->set_mode) {
- struct drm_mode_set set;
-
- set.crtc = par->crtc;
- set.x = 0;
- set.y = 0;
- set.mode = NULL;
- set.fb = NULL;
- set.num_connectors = 0;
- set.connectors = &par->con;
- ret = vmwgfx_set_config_internal(&set);
- if (ret) {
- DRM_ERROR("Could not unset a mode.\n");
- return ret;
- }
- drm_mode_destroy(&par->vmw_priv->drm, par->set_mode);
- par->set_mode = NULL;
- }
-
- if (cur_fb) {
- drm_framebuffer_put(cur_fb);
- par->set_fb = NULL;
- }
-
- if (par->vmw_bo && detach_bo && unref_bo)
- vmw_bo_unreference(&par->vmw_bo);
-
- return 0;
-}
-
-static int vmw_fb_kms_framebuffer(struct fb_info *info)
-{
- struct drm_mode_fb_cmd2 mode_cmd = {0};
- struct vmw_fb_par *par = info->par;
- struct fb_var_screeninfo *var = &info->var;
- struct drm_framebuffer *cur_fb;
- struct vmw_framebuffer *vfb;
- int ret = 0, depth;
- size_t new_bo_size;
-
- ret = vmw_fb_compute_depth(var, &depth);
- if (ret)
- return ret;
-
- mode_cmd.width = var->xres;
- mode_cmd.height = var->yres;
- mode_cmd.pitches[0] = ((var->bits_per_pixel + 7) / 8) * mode_cmd.width;
- mode_cmd.pixel_format =
- drm_mode_legacy_fb_format(var->bits_per_pixel, depth);
-
- cur_fb = par->set_fb;
- if (cur_fb && cur_fb->width == mode_cmd.width &&
- cur_fb->height == mode_cmd.height &&
- cur_fb->format->format == mode_cmd.pixel_format &&
- cur_fb->pitches[0] == mode_cmd.pitches[0])
- return 0;
-
- /* Need new buffer object ? */
- new_bo_size = (size_t) mode_cmd.pitches[0] * (size_t) mode_cmd.height;
- ret = vmw_fb_kms_detach(par,
- par->bo_size < new_bo_size ||
- par->bo_size > 2*new_bo_size,
- true);
- if (ret)
- return ret;
-
- if (!par->vmw_bo) {
- ret = vmw_fb_create_bo(par->vmw_priv, new_bo_size,
- &par->vmw_bo);
- if (ret) {
- DRM_ERROR("Failed creating a buffer object for "
- "fbdev.\n");
- return ret;
- }
- par->bo_size = new_bo_size;
- }
-
- vfb = vmw_kms_new_framebuffer(par->vmw_priv, par->vmw_bo, NULL,
- true, &mode_cmd);
- if (IS_ERR(vfb))
- return PTR_ERR(vfb);
-
- par->set_fb = &vfb->base;
-
- return 0;
-}
-
-static int vmw_fb_set_par(struct fb_info *info)
-{
- struct vmw_fb_par *par = info->par;
- struct vmw_private *vmw_priv = par->vmw_priv;
- struct drm_mode_set set;
- struct fb_var_screeninfo *var = &info->var;
- struct drm_display_mode new_mode = { DRM_MODE("fb_mode",
- DRM_MODE_TYPE_DRIVER,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC)
- };
- struct drm_display_mode *mode;
- int ret;
-
- mode = drm_mode_duplicate(&vmw_priv->drm, &new_mode);
- if (!mode) {
- DRM_ERROR("Could not create new fb mode.\n");
- return -ENOMEM;
- }
-
- mode->hdisplay = var->xres;
- mode->vdisplay = var->yres;
- vmw_guess_mode_timing(mode);
-
- if (!vmw_kms_validate_mode_vram(vmw_priv,
- mode->hdisplay *
- DIV_ROUND_UP(var->bits_per_pixel, 8),
- mode->vdisplay)) {
- drm_mode_destroy(&vmw_priv->drm, mode);
- return -EINVAL;
- }
-
- mutex_lock(&par->bo_mutex);
- ret = vmw_fb_kms_framebuffer(info);
- if (ret)
- goto out_unlock;
-
- par->fb_x = var->xoffset;
- par->fb_y = var->yoffset;
-
- set.crtc = par->crtc;
- set.x = 0;
- set.y = 0;
- set.mode = mode;
- set.fb = par->set_fb;
- set.num_connectors = 1;
- set.connectors = &par->con;
-
- ret = vmwgfx_set_config_internal(&set);
- if (ret)
- goto out_unlock;
-
- vmw_fb_dirty_mark(par, par->fb_x, par->fb_y,
- par->set_fb->width, par->set_fb->height);
-
- /* If there already was stuff dirty we wont
- * schedule a new work, so lets do it now */
-
- schedule_delayed_work(&par->local_work, 0);
-
-out_unlock:
- if (par->set_mode)
- drm_mode_destroy(&vmw_priv->drm, par->set_mode);
- par->set_mode = mode;
-
- mutex_unlock(&par->bo_mutex);
-
- return ret;
-}
-
-
-static const struct fb_ops vmw_fb_ops = {
- .owner = THIS_MODULE,
- .fb_check_var = vmw_fb_check_var,
- .fb_set_par = vmw_fb_set_par,
- .fb_setcolreg = vmw_fb_setcolreg,
- .fb_fillrect = vmw_fb_fillrect,
- .fb_copyarea = vmw_fb_copyarea,
- .fb_imageblit = vmw_fb_imageblit,
- .fb_pan_display = vmw_fb_pan_display,
- .fb_blank = vmw_fb_blank,
- .fb_mmap = fb_deferred_io_mmap,
-};
-
-int vmw_fb_init(struct vmw_private *vmw_priv)
-{
- struct device *device = vmw_priv->drm.dev;
- struct vmw_fb_par *par;
- struct fb_info *info;
- unsigned fb_width, fb_height;
- unsigned int fb_bpp, fb_pitch, fb_size;
- struct drm_display_mode *init_mode;
- int ret;
-
- fb_bpp = 32;
-
- /* XXX As shouldn't these be as well. */
- fb_width = min(vmw_priv->fb_max_width, (unsigned)2048);
- fb_height = min(vmw_priv->fb_max_height, (unsigned)2048);
-
- fb_pitch = fb_width * fb_bpp / 8;
- fb_size = fb_pitch * fb_height;
-
- info = framebuffer_alloc(sizeof(*par), device);
- if (!info)
- return -ENOMEM;
-
- /*
- * Par
- */
- vmw_priv->fb_info = info;
- par = info->par;
- memset(par, 0, sizeof(*par));
- INIT_DELAYED_WORK(&par->local_work, &vmw_fb_dirty_flush);
- par->vmw_priv = vmw_priv;
- par->vmalloc = NULL;
- par->max_width = fb_width;
- par->max_height = fb_height;
-
- ret = vmw_kms_fbdev_init_data(vmw_priv, 0, par->max_width,
- par->max_height, &par->con,
- &par->crtc, &init_mode);
- if (ret)
- goto err_kms;
-
- info->var.xres = init_mode->hdisplay;
- info->var.yres = init_mode->vdisplay;
-
- /*
- * Create buffers and alloc memory
- */
- par->vmalloc = vzalloc(fb_size);
- if (unlikely(par->vmalloc == NULL)) {
- ret = -ENOMEM;
- goto err_free;
- }
-
- /*
- * Fixed and var
- */
- strcpy(info->fix.id, "svgadrmfb");
- info->fix.type = FB_TYPE_PACKED_PIXELS;
- info->fix.visual = FB_VISUAL_TRUECOLOR;
- info->fix.type_aux = 0;
- info->fix.xpanstep = 1; /* doing it in hw */
- info->fix.ypanstep = 1; /* doing it in hw */
- info->fix.ywrapstep = 0;
- info->fix.accel = FB_ACCEL_NONE;
- info->fix.line_length = fb_pitch;
-
- info->fix.smem_start = 0;
- info->fix.smem_len = fb_size;
-
- info->pseudo_palette = par->pseudo_palette;
- info->screen_base = (char __iomem *)par->vmalloc;
- info->screen_size = fb_size;
-
- info->fbops = &vmw_fb_ops;
-
- /* 24 depth per default */
- info->var.red.offset = 16;
- info->var.green.offset = 8;
- info->var.blue.offset = 0;
- info->var.red.length = 8;
- info->var.green.length = 8;
- info->var.blue.length = 8;
- info->var.transp.offset = 0;
- info->var.transp.length = 0;
-
- info->var.xres_virtual = fb_width;
- info->var.yres_virtual = fb_height;
- info->var.bits_per_pixel = fb_bpp;
- info->var.xoffset = 0;
- info->var.yoffset = 0;
- info->var.activate = FB_ACTIVATE_NOW;
- info->var.height = -1;
- info->var.width = -1;
-
- /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
- info->apertures = alloc_apertures(1);
- if (!info->apertures) {
- ret = -ENOMEM;
- goto err_aper;
- }
- info->apertures->ranges[0].base = vmw_priv->vram_start;
- info->apertures->ranges[0].size = vmw_priv->vram_size;
-
- /*
- * Dirty & Deferred IO
- */
- par->dirty.x1 = par->dirty.x2 = 0;
- par->dirty.y1 = par->dirty.y2 = 0;
- par->dirty.active = true;
- spin_lock_init(&par->dirty.lock);
- mutex_init(&par->bo_mutex);
- info->fbdefio = &vmw_defio;
- fb_deferred_io_init(info);
-
- ret = register_framebuffer(info);
- if (unlikely(ret != 0))
- goto err_defio;
-
- vmw_fb_set_par(info);
-
- return 0;
-
-err_defio:
- fb_deferred_io_cleanup(info);
-err_aper:
-err_free:
- vfree(par->vmalloc);
-err_kms:
- framebuffer_release(info);
- vmw_priv->fb_info = NULL;
-
- return ret;
-}
-
-int vmw_fb_close(struct vmw_private *vmw_priv)
-{
- struct fb_info *info;
- struct vmw_fb_par *par;
-
- if (!vmw_priv->fb_info)
- return 0;
-
- info = vmw_priv->fb_info;
- par = info->par;
-
- /* ??? order */
- fb_deferred_io_cleanup(info);
- cancel_delayed_work_sync(&par->local_work);
- unregister_framebuffer(info);
-
- mutex_lock(&par->bo_mutex);
- (void) vmw_fb_kms_detach(par, true, true);
- mutex_unlock(&par->bo_mutex);
-
- vfree(par->vmalloc);
- framebuffer_release(info);
-
- return 0;
-}
-
-int vmw_fb_off(struct vmw_private *vmw_priv)
-{
- struct fb_info *info;
- struct vmw_fb_par *par;
- unsigned long flags;
-
- if (!vmw_priv->fb_info)
- return -EINVAL;
-
- info = vmw_priv->fb_info;
- par = info->par;
-
- spin_lock_irqsave(&par->dirty.lock, flags);
- par->dirty.active = false;
- spin_unlock_irqrestore(&par->dirty.lock, flags);
-
- flush_delayed_work(&info->deferred_work);
- flush_delayed_work(&par->local_work);
-
- return 0;
-}
-
-int vmw_fb_on(struct vmw_private *vmw_priv)
-{
- struct fb_info *info;
- struct vmw_fb_par *par;
- unsigned long flags;
-
- if (!vmw_priv->fb_info)
- return -EINVAL;
-
- info = vmw_priv->fb_info;
- par = info->par;
-
- spin_lock_irqsave(&par->dirty.lock, flags);
- par->dirty.active = true;
- spin_unlock_irqrestore(&par->dirty.lock, flags);
-
- /*
- * Need to reschedule a dirty update, because otherwise that's
- * only done in dirty_mark() if the previous coalesced
- * dirty region was empty.
- */
- schedule_delayed_work(&par->local_work, 0);
-
- return 0;
-}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
index 60e3cc537f36..abd5e3323ebf 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
@@ -71,7 +71,7 @@ static int vmw_gmrid_man_get_node(struct ttm_resource_manager *man,
spin_lock(&gman->lock);
if (gman->max_gmr_pages > 0) {
- gman->used_gmr_pages += (*res)->num_pages;
+ gman->used_gmr_pages += PFN_UP((*res)->size);
/*
* Because the graphics memory is a soft limit we can try to
* expand it instead of letting the userspace apps crash.
@@ -114,7 +114,7 @@ static int vmw_gmrid_man_get_node(struct ttm_resource_manager *man,
return 0;
nospace:
- gman->used_gmr_pages -= (*res)->num_pages;
+ gman->used_gmr_pages -= PFN_UP((*res)->size);
spin_unlock(&gman->lock);
ida_free(&gman->gmr_ida, id);
ttm_resource_fini(man, *res);
@@ -129,7 +129,7 @@ static void vmw_gmrid_man_put_node(struct ttm_resource_manager *man,
ida_free(&gman->gmr_ida, res->start);
spin_lock(&gman->lock);
- gman->used_gmr_pages -= res->num_pages;
+ gman->used_gmr_pages -= PFN_UP(res->size);
spin_unlock(&gman->lock);
ttm_resource_fini(man, res);
kfree(res);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_hashtab.c b/drivers/gpu/drm/vmwgfx/vmwgfx_hashtab.c
deleted file mode 100644
index 06aebc12774e..000000000000
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_hashtab.c
+++ /dev/null
@@ -1,199 +0,0 @@
-/*
- * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND. USA.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- */
-
-/*
- * Simple open hash tab implementation.
- *
- * Authors:
- * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
- */
-
-#include <linux/export.h>
-#include <linux/hash.h>
-#include <linux/mm.h>
-#include <linux/rculist.h>
-#include <linux/slab.h>
-#include <linux/vmalloc.h>
-
-#include <drm/drm_print.h>
-
-#include "vmwgfx_hashtab.h"
-
-int vmwgfx_ht_create(struct vmwgfx_open_hash *ht, unsigned int order)
-{
- unsigned int size = 1 << order;
-
- ht->order = order;
- ht->table = NULL;
- if (size <= PAGE_SIZE / sizeof(*ht->table))
- ht->table = kcalloc(size, sizeof(*ht->table), GFP_KERNEL);
- else
- ht->table = vzalloc(array_size(size, sizeof(*ht->table)));
- if (!ht->table) {
- DRM_ERROR("Out of memory for hash table\n");
- return -ENOMEM;
- }
- return 0;
-}
-
-void vmwgfx_ht_verbose_list(struct vmwgfx_open_hash *ht, unsigned long key)
-{
- struct vmwgfx_hash_item *entry;
- struct hlist_head *h_list;
- unsigned int hashed_key;
- int count = 0;
-
- hashed_key = hash_long(key, ht->order);
- DRM_DEBUG("Key is 0x%08lx, Hashed key is 0x%08x\n", key, hashed_key);
- h_list = &ht->table[hashed_key];
- hlist_for_each_entry(entry, h_list, head)
- DRM_DEBUG("count %d, key: 0x%08lx\n", count++, entry->key);
-}
-
-static struct hlist_node *vmwgfx_ht_find_key(struct vmwgfx_open_hash *ht, unsigned long key)
-{
- struct vmwgfx_hash_item *entry;
- struct hlist_head *h_list;
- unsigned int hashed_key;
-
- hashed_key = hash_long(key, ht->order);
- h_list = &ht->table[hashed_key];
- hlist_for_each_entry(entry, h_list, head) {
- if (entry->key == key)
- return &entry->head;
- if (entry->key > key)
- break;
- }
- return NULL;
-}
-
-static struct hlist_node *vmwgfx_ht_find_key_rcu(struct vmwgfx_open_hash *ht, unsigned long key)
-{
- struct vmwgfx_hash_item *entry;
- struct hlist_head *h_list;
- unsigned int hashed_key;
-
- hashed_key = hash_long(key, ht->order);
- h_list = &ht->table[hashed_key];
- hlist_for_each_entry_rcu(entry, h_list, head) {
- if (entry->key == key)
- return &entry->head;
- if (entry->key > key)
- break;
- }
- return NULL;
-}
-
-int vmwgfx_ht_insert_item(struct vmwgfx_open_hash *ht, struct vmwgfx_hash_item *item)
-{
- struct vmwgfx_hash_item *entry;
- struct hlist_head *h_list;
- struct hlist_node *parent;
- unsigned int hashed_key;
- unsigned long key = item->key;
-
- hashed_key = hash_long(key, ht->order);
- h_list = &ht->table[hashed_key];
- parent = NULL;
- hlist_for_each_entry(entry, h_list, head) {
- if (entry->key == key)
- return -EINVAL;
- if (entry->key > key)
- break;
- parent = &entry->head;
- }
- if (parent)
- hlist_add_behind_rcu(&item->head, parent);
- else
- hlist_add_head_rcu(&item->head, h_list);
- return 0;
-}
-
-/*
- * Just insert an item and return any "bits" bit key that hasn't been
- * used before.
- */
-int vmwgfx_ht_just_insert_please(struct vmwgfx_open_hash *ht, struct vmwgfx_hash_item *item,
- unsigned long seed, int bits, int shift,
- unsigned long add)
-{
- int ret;
- unsigned long mask = (1UL << bits) - 1;
- unsigned long first, unshifted_key;
-
- unshifted_key = hash_long(seed, bits);
- first = unshifted_key;
- do {
- item->key = (unshifted_key << shift) + add;
- ret = vmwgfx_ht_insert_item(ht, item);
- if (ret)
- unshifted_key = (unshifted_key + 1) & mask;
- } while (ret && (unshifted_key != first));
-
- if (ret) {
- DRM_ERROR("Available key bit space exhausted\n");
- return -EINVAL;
- }
- return 0;
-}
-
-int vmwgfx_ht_find_item(struct vmwgfx_open_hash *ht, unsigned long key,
- struct vmwgfx_hash_item **item)
-{
- struct hlist_node *list;
-
- list = vmwgfx_ht_find_key_rcu(ht, key);
- if (!list)
- return -EINVAL;
-
- *item = hlist_entry(list, struct vmwgfx_hash_item, head);
- return 0;
-}
-
-int vmwgfx_ht_remove_key(struct vmwgfx_open_hash *ht, unsigned long key)
-{
- struct hlist_node *list;
-
- list = vmwgfx_ht_find_key(ht, key);
- if (list) {
- hlist_del_init_rcu(list);
- return 0;
- }
- return -EINVAL;
-}
-
-int vmwgfx_ht_remove_item(struct vmwgfx_open_hash *ht, struct vmwgfx_hash_item *item)
-{
- hlist_del_init_rcu(&item->head);
- return 0;
-}
-
-void vmwgfx_ht_remove(struct vmwgfx_open_hash *ht)
-{
- if (ht->table) {
- kvfree(ht->table);
- ht->table = NULL;
- }
-}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_hashtab.h b/drivers/gpu/drm/vmwgfx/vmwgfx_hashtab.h
deleted file mode 100644
index a9ce12922e21..000000000000
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_hashtab.h
+++ /dev/null
@@ -1,83 +0,0 @@
-/*
- * Copyright 2006 Tungsten Graphics, Inc., Bismack, ND. USA.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- */
-
-/*
- * Simple open hash tab implementation.
- *
- * Authors:
- * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
- */
-
-/*
- * TODO: Replace this hashtable with Linux' generic implementation
- * from <linux/hashtable.h>.
- */
-
-#ifndef VMWGFX_HASHTAB_H
-#define VMWGFX_HASHTAB_H
-
-#include <linux/list.h>
-
-#define drm_hash_entry(_ptr, _type, _member) container_of(_ptr, _type, _member)
-
-struct vmwgfx_hash_item {
- struct hlist_node head;
- unsigned long key;
-};
-
-struct vmwgfx_open_hash {
- struct hlist_head *table;
- u8 order;
-};
-
-int vmwgfx_ht_create(struct vmwgfx_open_hash *ht, unsigned int order);
-int vmwgfx_ht_insert_item(struct vmwgfx_open_hash *ht, struct vmwgfx_hash_item *item);
-int vmwgfx_ht_just_insert_please(struct vmwgfx_open_hash *ht, struct vmwgfx_hash_item *item,
- unsigned long seed, int bits, int shift,
- unsigned long add);
-int vmwgfx_ht_find_item(struct vmwgfx_open_hash *ht, unsigned long key,
- struct vmwgfx_hash_item **item);
-
-void vmwgfx_ht_verbose_list(struct vmwgfx_open_hash *ht, unsigned long key);
-int vmwgfx_ht_remove_key(struct vmwgfx_open_hash *ht, unsigned long key);
-int vmwgfx_ht_remove_item(struct vmwgfx_open_hash *ht, struct vmwgfx_hash_item *item);
-void vmwgfx_ht_remove(struct vmwgfx_open_hash *ht);
-
-/*
- * RCU-safe interface
- *
- * The user of this API needs to make sure that two or more instances of the
- * hash table manipulation functions are never run simultaneously.
- * The lookup function vmwgfx_ht_find_item_rcu may, however, run simultaneously
- * with any of the manipulation functions as long as it's called from within
- * an RCU read-locked section.
- */
-#define vmwgfx_ht_insert_item_rcu vmwgfx_ht_insert_item
-#define vmwgfx_ht_just_insert_please_rcu vmwgfx_ht_just_insert_please
-#define vmwgfx_ht_remove_key_rcu vmwgfx_ht_remove_key
-#define vmwgfx_ht_remove_item_rcu vmwgfx_ht_remove_item
-#define vmwgfx_ht_find_item_rcu vmwgfx_ht_find_item
-
-#endif
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 214829c32ed8..257f090071f1 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -25,15 +25,15 @@
*
**************************************************************************/
+#include "vmwgfx_kms.h"
+#include "vmw_surface_cache.h"
+
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_rect.h>
#include <drm/drm_sysfs.h>
-#include <drm/drm_vblank.h>
-
-#include "vmwgfx_kms.h"
void vmw_du_cleanup(struct vmw_display_unit *du)
{
@@ -52,9 +52,9 @@ void vmw_du_cleanup(struct vmw_display_unit *du)
* Display Unit Cursor functions
*/
+static int vmw_du_cursor_plane_unmap_cm(struct vmw_plane_state *vps);
static void vmw_cursor_update_mob(struct vmw_private *dev_priv,
- struct ttm_buffer_object *bo,
- struct ttm_bo_kmap_obj *map,
+ struct vmw_plane_state *vps,
u32 *image, u32 width, u32 height,
u32 hotspotX, u32 hotspotY);
@@ -63,23 +63,23 @@ struct vmw_svga_fifo_cmd_define_cursor {
SVGAFifoCmdDefineAlphaCursor cursor;
};
-static void vmw_cursor_update_image(struct vmw_private *dev_priv,
- struct ttm_buffer_object *cm_bo,
- struct ttm_bo_kmap_obj *cm_map,
- u32 *image, u32 width, u32 height,
- u32 hotspotX, u32 hotspotY)
+/**
+ * vmw_send_define_cursor_cmd - queue a define cursor command
+ * @dev_priv: the private driver struct
+ * @image: buffer which holds the cursor image
+ * @width: width of the mouse cursor image
+ * @height: height of the mouse cursor image
+ * @hotspotX: the horizontal position of mouse hotspot
+ * @hotspotY: the vertical position of mouse hotspot
+ */
+static void vmw_send_define_cursor_cmd(struct vmw_private *dev_priv,
+ u32 *image, u32 width, u32 height,
+ u32 hotspotX, u32 hotspotY)
{
struct vmw_svga_fifo_cmd_define_cursor *cmd;
const u32 image_size = width * height * sizeof(*image);
const u32 cmd_size = sizeof(*cmd) + image_size;
- if (cm_bo != NULL) {
- vmw_cursor_update_mob(dev_priv, cm_bo, cm_map, image,
- width, height,
- hotspotX, hotspotY);
- return;
- }
-
/* Try to reserve fifocmd space and swallow any failures;
such reservations cannot be left unconsumed for long
under the risk of clogging other fifocmd users, so
@@ -87,7 +87,7 @@ static void vmw_cursor_update_image(struct vmw_private *dev_priv,
other fallible KMS-atomic resources at prepare_fb */
cmd = VMW_CMD_RESERVE(dev_priv, cmd_size);
- if (unlikely(cmd == NULL))
+ if (unlikely(!cmd))
return;
memset(cmd, 0, sizeof(*cmd));
@@ -105,11 +105,39 @@ static void vmw_cursor_update_image(struct vmw_private *dev_priv,
}
/**
+ * vmw_cursor_update_image - update the cursor image on the provided plane
+ * @dev_priv: the private driver struct
+ * @vps: the plane state of the cursor plane
+ * @image: buffer which holds the cursor image
+ * @width: width of the mouse cursor image
+ * @height: height of the mouse cursor image
+ * @hotspotX: the horizontal position of mouse hotspot
+ * @hotspotY: the vertical position of mouse hotspot
+ */
+static void vmw_cursor_update_image(struct vmw_private *dev_priv,
+ struct vmw_plane_state *vps,
+ u32 *image, u32 width, u32 height,
+ u32 hotspotX, u32 hotspotY)
+{
+ if (vps->cursor.bo)
+ vmw_cursor_update_mob(dev_priv, vps, image,
+ vps->base.crtc_w, vps->base.crtc_h,
+ hotspotX, hotspotY);
+
+ else
+ vmw_send_define_cursor_cmd(dev_priv, image, width, height,
+ hotspotX, hotspotY);
+}
+
+
+/**
* vmw_cursor_update_mob - Update cursor vis CursorMob mechanism
*
+ * Called from inside vmw_du_cursor_plane_atomic_update to actually
+ * make the cursor-image live.
+ *
* @dev_priv: device to work with
- * @bo: BO for the MOB
- * @map: kmap obj for the BO
+ * @vps: the plane state of the cursor plane
* @image: cursor source data to fill the MOB with
* @width: source data width
* @height: source data height
@@ -117,8 +145,7 @@ static void vmw_cursor_update_image(struct vmw_private *dev_priv,
* @hotspotY: cursor hotspot Y
*/
static void vmw_cursor_update_mob(struct vmw_private *dev_priv,
- struct ttm_buffer_object *bo,
- struct ttm_bo_kmap_obj *map,
+ struct vmw_plane_state *vps,
u32 *image, u32 width, u32 height,
u32 hotspotX, u32 hotspotY)
{
@@ -127,11 +154,11 @@ static void vmw_cursor_update_mob(struct vmw_private *dev_priv,
const u32 image_size = width * height * sizeof(*image);
bool dummy;
- BUG_ON(!image);
-
- header = (SVGAGBCursorHeader *)ttm_kmap_obj_virtual(map, &dummy);
+ header = ttm_kmap_obj_virtual(&vps->cursor.map, &dummy);
alpha_header = &header->header.alphaHeader;
+ memset(header, 0, sizeof(*header));
+
header->type = SVGA_ALPHA_CURSOR;
header->sizeInBytes = image_size;
@@ -141,100 +168,159 @@ static void vmw_cursor_update_mob(struct vmw_private *dev_priv,
alpha_header->height = height;
memcpy(header + 1, image, image_size);
-
- vmw_write(dev_priv, SVGA_REG_CURSOR_MOBID, bo->resource->start);
+ vmw_write(dev_priv, SVGA_REG_CURSOR_MOBID,
+ vps->cursor.bo->resource->start);
}
-void vmw_du_destroy_cursor_mob_array(struct vmw_cursor_plane *vcp)
-{
- size_t i;
- for (i = 0; i < ARRAY_SIZE(vcp->cursor_mob); i++) {
- if (vcp->cursor_mob[i] != NULL) {
- ttm_bo_unpin(vcp->cursor_mob[i]);
- ttm_bo_put(vcp->cursor_mob[i]);
- kfree(vcp->cursor_mob[i]);
- vcp->cursor_mob[i] = NULL;
- }
- }
+static u32 vmw_du_cursor_mob_size(u32 w, u32 h)
+{
+ return w * h * sizeof(u32) + sizeof(SVGAGBCursorHeader);
}
-#define CURSOR_MOB_SIZE(dimension) \
- ((dimension) * (dimension) * sizeof(u32) + sizeof(SVGAGBCursorHeader))
+/**
+ * vmw_du_cursor_plane_acquire_image -- Acquire the image data
+ * @vps: cursor plane state
+ */
+static u32 *vmw_du_cursor_plane_acquire_image(struct vmw_plane_state *vps)
+{
+ bool dummy;
+ if (vps->surf) {
+ if (vps->surf_mapped)
+ return vmw_bo_map_and_cache(vps->surf->res.backup);
+ return vps->surf->snooper.image;
+ } else if (vps->bo)
+ return ttm_kmap_obj_virtual(&vps->bo->map, &dummy);
+ return NULL;
+}
-int vmw_du_create_cursor_mob_array(struct vmw_cursor_plane *cursor)
+static bool vmw_du_cursor_plane_has_changed(struct vmw_plane_state *old_vps,
+ struct vmw_plane_state *new_vps)
{
- struct vmw_private *dev_priv = cursor->base.dev->dev_private;
- uint32_t cursor_max_dim, mob_max_size;
- int ret = 0;
- size_t i;
+ void *old_image;
+ void *new_image;
+ u32 size;
+ bool changed;
- if (!dev_priv->has_mob || (dev_priv->capabilities2 & SVGA_CAP2_CURSOR_MOB) == 0)
- return -ENOSYS;
+ if (old_vps->base.crtc_w != new_vps->base.crtc_w ||
+ old_vps->base.crtc_h != new_vps->base.crtc_h)
+ return true;
- mob_max_size = vmw_read(dev_priv, SVGA_REG_MOB_MAX_SIZE);
- cursor_max_dim = vmw_read(dev_priv, SVGA_REG_CURSOR_MAX_DIMENSION);
+ if (old_vps->cursor.hotspot_x != new_vps->cursor.hotspot_x ||
+ old_vps->cursor.hotspot_y != new_vps->cursor.hotspot_y)
+ return true;
- if (CURSOR_MOB_SIZE(cursor_max_dim) > mob_max_size)
- cursor_max_dim = 64; /* Mandatorily-supported cursor dimension */
+ size = new_vps->base.crtc_w * new_vps->base.crtc_h * sizeof(u32);
- for (i = 0; i < ARRAY_SIZE(cursor->cursor_mob); i++) {
- struct ttm_buffer_object **const bo = &cursor->cursor_mob[i];
+ old_image = vmw_du_cursor_plane_acquire_image(old_vps);
+ new_image = vmw_du_cursor_plane_acquire_image(new_vps);
- ret = vmw_bo_create_kernel(dev_priv,
- CURSOR_MOB_SIZE(cursor_max_dim),
- &vmw_mob_placement, bo);
+ changed = false;
+ if (old_image && new_image)
+ changed = memcmp(old_image, new_image, size) != 0;
- if (ret != 0)
- goto teardown;
+ return changed;
+}
- if ((*bo)->resource->mem_type != VMW_PL_MOB) {
- DRM_ERROR("Obtained buffer object is not a MOB.\n");
- ret = -ENOSYS;
- goto teardown;
- }
+static void vmw_du_destroy_cursor_mob(struct ttm_buffer_object **bo)
+{
+ if (!(*bo))
+ return;
- /* Fence the mob creation so we are guarateed to have the mob */
- ret = ttm_bo_reserve(*bo, false, false, NULL);
+ ttm_bo_unpin(*bo);
+ ttm_bo_put(*bo);
+ kfree(*bo);
+ *bo = NULL;
+}
- if (ret != 0)
- goto teardown;
+static void vmw_du_put_cursor_mob(struct vmw_cursor_plane *vcp,
+ struct vmw_plane_state *vps)
+{
+ u32 i;
- vmw_bo_fence_single(*bo, NULL);
+ if (!vps->cursor.bo)
+ return;
- ttm_bo_unreserve(*bo);
+ vmw_du_cursor_plane_unmap_cm(vps);
- drm_info(&dev_priv->drm, "Using CursorMob mobid %lu, max dimension %u\n",
- (*bo)->resource->start, cursor_max_dim);
+ /* Look for a free slot to return this mob to the cache. */
+ for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) {
+ if (!vcp->cursor_mobs[i]) {
+ vcp->cursor_mobs[i] = vps->cursor.bo;
+ vps->cursor.bo = NULL;
+ return;
+ }
}
- return 0;
-
-teardown:
- vmw_du_destroy_cursor_mob_array(cursor);
+ /* Cache is full: See if this mob is bigger than an existing mob. */
+ for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) {
+ if (vcp->cursor_mobs[i]->base.size <
+ vps->cursor.bo->base.size) {
+ vmw_du_destroy_cursor_mob(&vcp->cursor_mobs[i]);
+ vcp->cursor_mobs[i] = vps->cursor.bo;
+ vps->cursor.bo = NULL;
+ return;
+ }
+ }
- return ret;
+ /* Destroy it if it's not worth caching. */
+ vmw_du_destroy_cursor_mob(&vps->cursor.bo);
}
-#undef CURSOR_MOB_SIZE
-
-static void vmw_cursor_update_bo(struct vmw_private *dev_priv,
- struct ttm_buffer_object *cm_bo,
- struct ttm_bo_kmap_obj *cm_map,
- struct vmw_buffer_object *bo,
- u32 width, u32 height,
- u32 hotspotX, u32 hotspotY)
+static int vmw_du_get_cursor_mob(struct vmw_cursor_plane *vcp,
+ struct vmw_plane_state *vps)
{
- void *virtual;
- bool dummy;
+ struct vmw_private *dev_priv = vcp->base.dev->dev_private;
+ u32 size = vmw_du_cursor_mob_size(vps->base.crtc_w, vps->base.crtc_h);
+ u32 i;
+ u32 cursor_max_dim, mob_max_size;
+ int ret;
+
+ if (!dev_priv->has_mob ||
+ (dev_priv->capabilities2 & SVGA_CAP2_CURSOR_MOB) == 0)
+ return -EINVAL;
+
+ mob_max_size = vmw_read(dev_priv, SVGA_REG_MOB_MAX_SIZE);
+ cursor_max_dim = vmw_read(dev_priv, SVGA_REG_CURSOR_MAX_DIMENSION);
+
+ if (size > mob_max_size || vps->base.crtc_w > cursor_max_dim ||
+ vps->base.crtc_h > cursor_max_dim)
+ return -EINVAL;
- virtual = ttm_kmap_obj_virtual(&bo->map, &dummy);
- if (virtual) {
- vmw_cursor_update_image(dev_priv, cm_bo, cm_map, virtual,
- width, height,
- hotspotX, hotspotY);
- atomic_dec(&bo->base_mapped_count);
+ if (vps->cursor.bo) {
+ if (vps->cursor.bo->base.size >= size)
+ return 0;
+ vmw_du_put_cursor_mob(vcp, vps);
+ }
+
+ /* Look for an unused mob in the cache. */
+ for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) {
+ if (vcp->cursor_mobs[i] &&
+ vcp->cursor_mobs[i]->base.size >= size) {
+ vps->cursor.bo = vcp->cursor_mobs[i];
+ vcp->cursor_mobs[i] = NULL;
+ return 0;
+ }
}
+ /* Create a new mob if we can't find an existing one. */
+ ret = vmw_bo_create_kernel(dev_priv, size, &vmw_mob_placement,
+ &vps->cursor.bo);
+
+ if (ret != 0)
+ return ret;
+
+ /* Fence the mob creation so we are guarateed to have the mob */
+ ret = ttm_bo_reserve(vps->cursor.bo, false, false, NULL);
+ if (ret != 0)
+ goto teardown;
+
+ vmw_bo_fence_single(vps->cursor.bo, NULL);
+ ttm_bo_unreserve(vps->cursor.bo);
+ return 0;
+
+teardown:
+ vmw_du_destroy_cursor_mob(&vps->cursor.bo);
+ return ret;
}
@@ -266,7 +352,6 @@ static void vmw_cursor_update_position(struct vmw_private *dev_priv,
spin_unlock(&dev_priv->cursor_lock);
}
-
void vmw_kms_cursor_snoop(struct vmw_surface *srf,
struct ttm_object_file *tfile,
struct ttm_buffer_object *bo,
@@ -284,10 +369,13 @@ void vmw_kms_cursor_snoop(struct vmw_surface *srf,
SVGA3dCmdSurfaceDMA dma;
} *cmd;
int i, ret;
+ const struct SVGA3dSurfaceDesc *desc =
+ vmw_surface_get_desc(VMW_CURSOR_SNOOP_FORMAT);
+ const u32 image_pitch = VMW_CURSOR_SNOOP_WIDTH * desc->pitchBytesPerBlock;
cmd = container_of(header, struct vmw_dma_cmd, header);
- /* No snooper installed */
+ /* No snooper installed, nothing to copy */
if (!srf->snooper.image)
return;
@@ -308,7 +396,8 @@ void vmw_kms_cursor_snoop(struct vmw_surface *srf,
if (cmd->dma.guest.ptr.offset % PAGE_SIZE ||
box->x != 0 || box->y != 0 || box->z != 0 ||
box->srcx != 0 || box->srcy != 0 || box->srcz != 0 ||
- box->d != 1 || box_count != 1) {
+ box->d != 1 || box_count != 1 ||
+ box->w > VMW_CURSOR_SNOOP_WIDTH || box->h > VMW_CURSOR_SNOOP_HEIGHT) {
/* TODO handle none page aligned offsets */
/* TODO handle more dst & src != 0 */
/* TODO handle more then one copy */
@@ -322,7 +411,7 @@ void vmw_kms_cursor_snoop(struct vmw_surface *srf,
}
kmap_offset = cmd->dma.guest.ptr.offset >> PAGE_SHIFT;
- kmap_num = (64*64*4) >> PAGE_SHIFT;
+ kmap_num = (VMW_CURSOR_SNOOP_HEIGHT*image_pitch) >> PAGE_SHIFT;
ret = ttm_bo_reserve(bo, true, false, NULL);
if (unlikely(ret != 0)) {
@@ -336,14 +425,15 @@ void vmw_kms_cursor_snoop(struct vmw_surface *srf,
virtual = ttm_kmap_obj_virtual(&map, &dummy);
- if (box->w == 64 && cmd->dma.guest.pitch == 64*4) {
- memcpy(srf->snooper.image, virtual, 64*64*4);
+ if (box->w == VMW_CURSOR_SNOOP_WIDTH && cmd->dma.guest.pitch == image_pitch) {
+ memcpy(srf->snooper.image, virtual,
+ VMW_CURSOR_SNOOP_HEIGHT*image_pitch);
} else {
/* Image is unsigned pointer. */
for (i = 0; i < box->h; i++)
- memcpy(srf->snooper.image + i * 64,
+ memcpy(srf->snooper.image + i * image_pitch,
virtual + i * cmd->dma.guest.pitch,
- box->w * 4);
+ box->w * desc->pitchBytesPerBlock);
}
srf->snooper.age++;
@@ -387,15 +477,17 @@ void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv)
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
du = vmw_crtc_to_du(crtc);
if (!du->cursor_surface ||
- du->cursor_age == du->cursor_surface->snooper.age)
+ du->cursor_age == du->cursor_surface->snooper.age ||
+ !du->cursor_surface->snooper.image)
continue;
du->cursor_age = du->cursor_surface->snooper.age;
- vmw_cursor_update_image(dev_priv, NULL, NULL,
- du->cursor_surface->snooper.image,
- 64, 64,
- du->hotspot_x + du->core_hotspot_x,
- du->hotspot_y + du->core_hotspot_y);
+ vmw_send_define_cursor_cmd(dev_priv,
+ du->cursor_surface->snooper.image,
+ VMW_CURSOR_SNOOP_WIDTH,
+ VMW_CURSOR_SNOOP_HEIGHT,
+ du->hotspot_x + du->core_hotspot_x,
+ du->hotspot_y + du->core_hotspot_y);
}
mutex_unlock(&dev->mode_config.mutex);
@@ -404,8 +496,14 @@ void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv)
void vmw_du_cursor_plane_destroy(struct drm_plane *plane)
{
+ struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane);
+ u32 i;
+
vmw_cursor_update_position(plane->dev->dev_private, false, 0, 0);
- vmw_du_destroy_cursor_mob_array(vmw_plane_to_vcp(plane));
+
+ for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++)
+ vmw_du_destroy_cursor_mob(&vcp->cursor_mobs[i]);
+
drm_plane_cleanup(plane);
}
@@ -463,6 +561,87 @@ vmw_du_plane_cleanup_fb(struct drm_plane *plane,
/**
+ * vmw_du_cursor_plane_map_cm - Maps the cursor mobs.
+ *
+ * @vps: plane_state
+ *
+ * Returns 0 on success
+ */
+
+static int
+vmw_du_cursor_plane_map_cm(struct vmw_plane_state *vps)
+{
+ int ret;
+ u32 size = vmw_du_cursor_mob_size(vps->base.crtc_w, vps->base.crtc_h);
+ struct ttm_buffer_object *bo = vps->cursor.bo;
+
+ if (!bo)
+ return -EINVAL;
+
+ if (bo->base.size < size)
+ return -EINVAL;
+
+ if (vps->cursor.mapped)
+ return 0;
+
+ ret = ttm_bo_reserve(bo, false, false, NULL);
+
+ if (unlikely(ret != 0))
+ return -ENOMEM;
+
+ ret = ttm_bo_kmap(bo, 0, PFN_UP(size), &vps->cursor.map);
+
+ /*
+ * We just want to try to get mob bind to finish
+ * so that the first write to SVGA_REG_CURSOR_MOBID
+ * is done with a buffer that the device has already
+ * seen
+ */
+ (void) ttm_bo_wait(bo, false, false);
+
+ ttm_bo_unreserve(bo);
+
+ if (unlikely(ret != 0))
+ return -ENOMEM;
+
+ vps->cursor.mapped = true;
+
+ return 0;
+}
+
+
+/**
+ * vmw_du_cursor_plane_unmap_cm - Unmaps the cursor mobs.
+ *
+ * @vps: state of the cursor plane
+ *
+ * Returns 0 on success
+ */
+
+static int
+vmw_du_cursor_plane_unmap_cm(struct vmw_plane_state *vps)
+{
+ int ret = 0;
+ struct ttm_buffer_object *bo = vps->cursor.bo;
+
+ if (!vps->cursor.mapped)
+ return 0;
+
+ if (!bo)
+ return 0;
+
+ ret = ttm_bo_reserve(bo, true, false, NULL);
+ if (likely(ret == 0)) {
+ ttm_bo_kunmap(&vps->cursor.map);
+ ttm_bo_unreserve(bo);
+ vps->cursor.mapped = false;
+ }
+
+ return ret;
+}
+
+
+/**
* vmw_du_cursor_plane_cleanup_fb - Unpins the plane surface
*
* @plane: cursor plane
@@ -476,10 +655,16 @@ void
vmw_du_cursor_plane_cleanup_fb(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
+ struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane);
struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state);
bool dummy;
- if (vps->bo != NULL && ttm_kmap_obj_virtual(&vps->bo->map, &dummy) != NULL) {
+ if (vps->surf_mapped) {
+ vmw_bo_unmap(vps->surf->res.backup);
+ vps->surf_mapped = false;
+ }
+
+ if (vps->bo && ttm_kmap_obj_virtual(&vps->bo->map, &dummy)) {
const int ret = ttm_bo_reserve(&vps->bo->base, true, false, NULL);
if (likely(ret == 0)) {
@@ -489,14 +674,8 @@ vmw_du_cursor_plane_cleanup_fb(struct drm_plane *plane,
}
}
- if (vps->cm_bo != NULL && ttm_kmap_obj_virtual(&vps->cm_map, &dummy) != NULL) {
- const int ret = ttm_bo_reserve(vps->cm_bo, true, false, NULL);
-
- if (likely(ret == 0)) {
- ttm_bo_kunmap(&vps->cm_map);
- ttm_bo_unreserve(vps->cm_bo);
- }
- }
+ vmw_du_cursor_plane_unmap_cm(vps);
+ vmw_du_put_cursor_mob(vcp, vps);
vmw_du_plane_unpin_surf(vps, false);
@@ -511,6 +690,7 @@ vmw_du_cursor_plane_cleanup_fb(struct drm_plane *plane,
}
}
+
/**
* vmw_du_cursor_plane_prepare_fb - Readies the cursor by referencing it
*
@@ -526,8 +706,6 @@ vmw_du_cursor_plane_prepare_fb(struct drm_plane *plane,
struct drm_framebuffer *fb = new_state->fb;
struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane);
struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state);
- struct ttm_buffer_object *cm_bo = NULL;
- bool dummy;
int ret = 0;
if (vps->surf) {
@@ -550,13 +728,14 @@ vmw_du_cursor_plane_prepare_fb(struct drm_plane *plane,
}
}
- vps->cm_bo = NULL;
-
- if (vps->surf == NULL && vps->bo != NULL) {
+ if (!vps->surf && vps->bo) {
const u32 size = new_state->crtc_w * new_state->crtc_h * sizeof(u32);
- /* Not using vmw_bo_map_and_cache() helper here as we need to reserve
- the ttm_buffer_object first which wmw_bo_map_and_cache() omits. */
+ /*
+ * Not using vmw_bo_map_and_cache() helper here as we need to
+ * reserve the ttm_buffer_object first which
+ * vmw_bo_map_and_cache() omits.
+ */
ret = ttm_bo_reserve(&vps->bo->base, true, false, NULL);
if (unlikely(ret != 0))
@@ -571,69 +750,24 @@ vmw_du_cursor_plane_prepare_fb(struct drm_plane *plane,
if (unlikely(ret != 0))
return -ENOMEM;
+ } else if (vps->surf && !vps->bo && vps->surf->res.backup) {
+
+ WARN_ON(vps->surf->snooper.image);
+ ret = ttm_bo_reserve(&vps->surf->res.backup->base, true, false,
+ NULL);
+ if (unlikely(ret != 0))
+ return -ENOMEM;
+ vmw_bo_map_and_cache(vps->surf->res.backup);
+ ttm_bo_unreserve(&vps->surf->res.backup->base);
+ vps->surf_mapped = true;
}
if (vps->surf || vps->bo) {
- unsigned cursor_mob_idx = vps->cursor_mob_idx;
-
- /* Lazily set up cursor MOBs just once -- no reattempts. */
- if (cursor_mob_idx == 0 && vcp->cursor_mob[0] == NULL)
- if (vmw_du_create_cursor_mob_array(vcp) != 0)
- vps->cursor_mob_idx = cursor_mob_idx = -1U;
-
- if (cursor_mob_idx < ARRAY_SIZE(vcp->cursor_mob)) {
- const u32 size = sizeof(SVGAGBCursorHeader) +
- new_state->crtc_w * new_state->crtc_h * sizeof(u32);
-
- cm_bo = vcp->cursor_mob[cursor_mob_idx];
-
- if (cm_bo->resource->num_pages * PAGE_SIZE < size) {
- ret = -EINVAL;
- goto error_bo_unmap;
- }
-
- ret = ttm_bo_reserve(cm_bo, false, false, NULL);
-
- if (unlikely(ret != 0)) {
- ret = -ENOMEM;
- goto error_bo_unmap;
- }
-
- ret = ttm_bo_kmap(cm_bo, 0, PFN_UP(size), &vps->cm_map);
-
- /*
- * We just want to try to get mob bind to finish
- * so that the first write to SVGA_REG_CURSOR_MOBID
- * is done with a buffer that the device has already
- * seen
- */
- (void) ttm_bo_wait(cm_bo, false, false);
-
- ttm_bo_unreserve(cm_bo);
-
- if (unlikely(ret != 0)) {
- ret = -ENOMEM;
- goto error_bo_unmap;
- }
-
- vps->cursor_mob_idx = cursor_mob_idx ^ 1;
- vps->cm_bo = cm_bo;
- }
+ vmw_du_get_cursor_mob(vcp, vps);
+ vmw_du_cursor_plane_map_cm(vps);
}
return 0;
-
-error_bo_unmap:
- if (vps->bo != NULL && ttm_kmap_obj_virtual(&vps->bo->map, &dummy) != NULL) {
- const int ret = ttm_bo_reserve(&vps->bo->base, true, false, NULL);
- if (likely(ret == 0)) {
- atomic_dec(&vps->bo->base_mapped_count);
- ttm_bo_kunmap(&vps->bo->map);
- ttm_bo_unreserve(&vps->bo->base);
- }
- }
-
- return ret;
}
@@ -649,7 +783,9 @@ vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
struct vmw_private *dev_priv = vmw_priv(crtc->dev);
struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state);
+ struct vmw_plane_state *old_vps = vmw_plane_state_to_vps(old_state);
s32 hotspot_x, hotspot_y;
+ bool dummy;
hotspot_x = du->hotspot_x;
hotspot_y = du->hotspot_y;
@@ -662,23 +798,38 @@ vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
du->cursor_surface = vps->surf;
du->cursor_bo = vps->bo;
+ if (!vps->surf && !vps->bo) {
+ vmw_cursor_update_position(dev_priv, false, 0, 0);
+ return;
+ }
+
+ vps->cursor.hotspot_x = hotspot_x;
+ vps->cursor.hotspot_y = hotspot_y;
+
if (vps->surf) {
du->cursor_age = du->cursor_surface->snooper.age;
+ }
- vmw_cursor_update_image(dev_priv, vps->cm_bo, &vps->cm_map,
- vps->surf->snooper.image,
- new_state->crtc_w,
- new_state->crtc_h,
- hotspot_x, hotspot_y);
- } else if (vps->bo) {
- vmw_cursor_update_bo(dev_priv, vps->cm_bo, &vps->cm_map,
- vps->bo,
- new_state->crtc_w,
- new_state->crtc_h,
- hotspot_x, hotspot_y);
+ if (!vmw_du_cursor_plane_has_changed(old_vps, vps)) {
+ /*
+ * If it hasn't changed, avoid making the device do extra
+ * work by keeping the old cursor active.
+ */
+ struct vmw_cursor_plane_state tmp = old_vps->cursor;
+ old_vps->cursor = vps->cursor;
+ vps->cursor = tmp;
} else {
- vmw_cursor_update_position(dev_priv, false, 0, 0);
- return;
+ void *image = vmw_du_cursor_plane_acquire_image(vps);
+ if (image)
+ vmw_cursor_update_image(dev_priv, vps, image,
+ new_state->crtc_w,
+ new_state->crtc_h,
+ hotspot_x, hotspot_y);
+ }
+
+ if (vps->bo) {
+ if (ttm_kmap_obj_virtual(&vps->bo->map, &dummy))
+ atomic_dec(&vps->bo->base_mapped_count);
}
du->cursor_x = new_state->crtc_x + du->set_gui_x;
@@ -778,12 +929,16 @@ int vmw_du_cursor_plane_atomic_check(struct drm_plane *plane,
return -EINVAL;
}
- if (!vmw_framebuffer_to_vfb(fb)->bo)
+ if (!vmw_framebuffer_to_vfb(fb)->bo) {
surface = vmw_framebuffer_to_vfbs(fb)->surface;
- if (surface && !surface->snooper.image) {
- DRM_ERROR("surface not suitable for cursor\n");
- return -EINVAL;
+ WARN_ON(!surface);
+
+ if (!surface ||
+ (!surface->snooper.image && !surface->res.backup)) {
+ DRM_ERROR("surface not suitable for cursor\n");
+ return -EINVAL;
+ }
}
return 0;
@@ -831,15 +986,6 @@ void vmw_du_crtc_atomic_begin(struct drm_crtc *crtc,
void vmw_du_crtc_atomic_flush(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
- struct drm_pending_vblank_event *event = crtc->state->event;
-
- if (event) {
- crtc->state->event = NULL;
-
- spin_lock_irq(&crtc->dev->event_lock);
- drm_crtc_send_vblank_event(crtc, event);
- spin_unlock_irq(&crtc->dev->event_lock);
- }
}
@@ -943,6 +1089,8 @@ vmw_du_plane_duplicate_state(struct drm_plane *plane)
vps->pinned = 0;
vps->cpp = 0;
+ memset(&vps->cursor, 0, sizeof(vps->cursor));
+
/* Each ref counted resource needs to be acquired again */
if (vps->surf)
(void) vmw_surface_reference(vps->surf);
@@ -997,7 +1145,6 @@ vmw_du_plane_destroy_state(struct drm_plane *plane,
{
struct vmw_plane_state *vps = vmw_plane_state_to_vps(state);
-
/* Should have been freed by cleanup_fb */
if (vps->surf)
vmw_surface_unreference(&vps->surf);
@@ -1664,7 +1811,7 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
if (IS_ERR(vfb)) {
ret = PTR_ERR(vfb);
goto err_out;
- }
+ }
err_out:
/* vmw_user_lookup_handle takes one ref so does new_fb */
@@ -2052,6 +2199,8 @@ int vmw_kms_init(struct vmw_private *dev_priv)
dev->mode_config.min_height = 1;
dev->mode_config.max_width = dev_priv->texture_max_width;
dev->mode_config.max_height = dev_priv->texture_max_height;
+ dev->mode_config.preferred_depth = dev_priv->assume_16bpp ? 16 : 32;
+ dev->mode_config.prefer_shadow_fbdev = !dev_priv->has_mob;
drm_mode_create_suggested_offset_properties(dev);
vmw_kms_create_hotplug_mode_update_property(dev_priv);
@@ -2093,7 +2242,6 @@ int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
struct drm_crtc *crtc;
int ret = 0;
-
mutex_lock(&dev->mode_config.mutex);
if (arg->flags & DRM_VMW_CURSOR_BYPASS_ALL) {
@@ -2155,30 +2303,6 @@ bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
dev_priv->max_primary_mem : dev_priv->vram_size);
}
-
-/*
- * Function called by DRM code called with vbl_lock held.
- */
-u32 vmw_get_vblank_counter(struct drm_crtc *crtc)
-{
- return 0;
-}
-
-/*
- * Function called by DRM code called with vbl_lock held.
- */
-int vmw_enable_vblank(struct drm_crtc *crtc)
-{
- return -EINVAL;
-}
-
-/*
- * Function called by DRM code called with vbl_lock held.
- */
-void vmw_disable_vblank(struct drm_crtc *crtc)
-{
-}
-
/**
* vmw_du_update_layout - Update the display unit with topology from resolution
* plugin and generate DRM uevent
@@ -2207,7 +2331,7 @@ retry:
if (ret == -EDEADLK) {
drm_modeset_backoff(&ctx);
goto retry;
- }
+ }
goto out_fini;
}
}
@@ -2222,8 +2346,8 @@ retry:
du->gui_x = rects[du->unit].x1;
du->gui_y = rects[du->unit].y1;
} else {
- du->pref_width = 800;
- du->pref_height = 600;
+ du->pref_width = VMWGFX_MIN_INITIAL_WIDTH;
+ du->pref_height = VMWGFX_MIN_INITIAL_HEIGHT;
du->pref_active = false;
du->gui_x = 0;
du->gui_y = 0;
@@ -2250,13 +2374,13 @@ retry:
}
con->status = vmw_du_connector_detect(con, true);
}
-
- drm_sysfs_hotplug_event(dev);
out_fini:
drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx);
mutex_unlock(&dev->mode_config.mutex);
+ drm_sysfs_hotplug_event(dev);
+
return 0;
}
@@ -2536,10 +2660,9 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
int ret, i;
if (!arg->num_outputs) {
- struct drm_rect def_rect = {0, 0, 800, 600};
- VMW_DEBUG_KMS("Default layout x1 = %d y1 = %d x2 = %d y2 = %d\n",
- def_rect.x1, def_rect.y1,
- def_rect.x2, def_rect.y2);
+ struct drm_rect def_rect = {0, 0,
+ VMWGFX_MIN_INITIAL_WIDTH,
+ VMWGFX_MIN_INITIAL_HEIGHT};
vmw_du_update_layout(dev_priv, 1, &def_rect);
return 0;
}
@@ -2834,68 +2957,6 @@ int vmw_kms_update_proxy(struct vmw_resource *res,
return 0;
}
-int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv,
- unsigned unit,
- u32 max_width,
- u32 max_height,
- struct drm_connector **p_con,
- struct drm_crtc **p_crtc,
- struct drm_display_mode **p_mode)
-{
- struct drm_connector *con;
- struct vmw_display_unit *du;
- struct drm_display_mode *mode;
- int i = 0;
- int ret = 0;
-
- mutex_lock(&dev_priv->drm.mode_config.mutex);
- list_for_each_entry(con, &dev_priv->drm.mode_config.connector_list,
- head) {
- if (i == unit)
- break;
-
- ++i;
- }
-
- if (&con->head == &dev_priv->drm.mode_config.connector_list) {
- DRM_ERROR("Could not find initial display unit.\n");
- ret = -EINVAL;
- goto out_unlock;
- }
-
- if (list_empty(&con->modes))
- (void) vmw_du_connector_fill_modes(con, max_width, max_height);
-
- if (list_empty(&con->modes)) {
- DRM_ERROR("Could not find initial display mode.\n");
- ret = -EINVAL;
- goto out_unlock;
- }
-
- du = vmw_connector_to_du(con);
- *p_con = con;
- *p_crtc = &du->crtc;
-
- list_for_each_entry(mode, &con->modes, head) {
- if (mode->type & DRM_MODE_TYPE_PREFERRED)
- break;
- }
-
- if (&mode->head == &con->modes) {
- WARN_ONCE(true, "Could not find initial preferred mode.\n");
- *p_mode = list_first_entry(&con->modes,
- struct drm_display_mode,
- head);
- } else {
- *p_mode = mode;
- }
-
- out_unlock:
- mutex_unlock(&dev_priv->drm.mode_config.mutex);
-
- return ret;
-}
-
/**
* vmw_kms_create_implicit_placement_property - Set up the implicit placement
* property.
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
index 85f86faa3243..4d6e7b555db7 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
@@ -272,6 +272,14 @@ struct vmw_crtc_state {
struct drm_crtc_state base;
};
+struct vmw_cursor_plane_state {
+ struct ttm_buffer_object *bo;
+ struct ttm_bo_kmap_obj map;
+ bool mapped;
+ s32 hotspot_x;
+ s32 hotspot_y;
+};
+
/**
* Derived class for plane state object
*
@@ -295,13 +303,8 @@ struct vmw_plane_state {
/* For CPU Blit */
unsigned int cpp;
- /* CursorMob flipping index; -1 if cursor mobs not used */
- unsigned int cursor_mob_idx;
- /* Currently-active CursorMob */
- struct ttm_buffer_object *cm_bo;
- /* CursorMob kmap_obj; expected valid at cursor_plane_atomic_update
- IFF currently-active CursorMob above is valid */
- struct ttm_bo_kmap_obj cm_map;
+ bool surf_mapped;
+ struct vmw_cursor_plane_state cursor;
};
@@ -338,11 +341,12 @@ struct vmw_connector_state {
* Derived class for cursor plane object
*
* @base DRM plane object
- * @cursor_mob array of two MOBs for CursorMob flipping
+ * @cursor.cursor_mobs Cursor mobs available for re-use
*/
struct vmw_cursor_plane {
struct drm_plane base;
- struct ttm_buffer_object *cursor_mob[2];
+
+ struct ttm_buffer_object *cursor_mobs[3];
};
/**
@@ -458,13 +462,6 @@ vmw_kms_new_framebuffer(struct vmw_private *dev_priv,
struct vmw_surface *surface,
bool only_2d,
const struct drm_mode_fb_cmd2 *mode_cmd);
-int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv,
- unsigned unit,
- u32 max_width,
- u32 max_height,
- struct drm_connector **p_con,
- struct drm_crtc **p_crtc,
- struct drm_display_mode **p_mode);
void vmw_guess_mode_timing(struct drm_display_mode *mode);
void vmw_kms_update_implicit_fb(struct vmw_private *dev_priv);
void vmw_kms_create_implicit_placement_property(struct vmw_private *dev_priv);
@@ -472,8 +469,6 @@ void vmw_kms_create_implicit_placement_property(struct vmw_private *dev_priv);
/* Universal Plane Helpers */
void vmw_du_primary_plane_destroy(struct drm_plane *plane);
void vmw_du_cursor_plane_destroy(struct drm_plane *plane);
-int vmw_du_create_cursor_mob_array(struct vmw_cursor_plane *vcp);
-void vmw_du_destroy_cursor_mob_array(struct vmw_cursor_plane *vcp);
/* Atomic Helpers */
int vmw_du_primary_plane_atomic_check(struct drm_plane *plane,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
index b8761f16dd78..a56e5d0ca3c6 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -28,7 +28,6 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_fourcc.h>
-#include <drm/drm_vblank.h>
#include "vmwgfx_kms.h"
@@ -235,9 +234,6 @@ static const struct drm_crtc_funcs vmw_legacy_crtc_funcs = {
.atomic_duplicate_state = vmw_du_crtc_duplicate_state,
.atomic_destroy_state = vmw_du_crtc_destroy_state,
.set_config = drm_atomic_helper_set_config,
- .get_vblank_counter = vmw_get_vblank_counter,
- .enable_vblank = vmw_enable_vblank,
- .disable_vblank = vmw_disable_vblank,
};
@@ -507,10 +503,6 @@ int vmw_kms_ldu_init_display(struct vmw_private *dev_priv)
dev_priv->ldu_priv->last_num_active = 0;
dev_priv->ldu_priv->fb = NULL;
- ret = drm_vblank_init(dev, num_display_units);
- if (ret != 0)
- goto err_free;
-
vmw_kms_create_implicit_placement_property(dev_priv);
for (i = 0; i < num_display_units; ++i) {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_mksstat.h b/drivers/gpu/drm/vmwgfx/vmwgfx_mksstat.h
index 0509f55f07b4..ede74c7fdbbf 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_mksstat.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_mksstat.h
@@ -29,6 +29,7 @@
#define _VMWGFX_MKSSTAT_H_
#include <asm/page.h>
+#include <linux/kconfig.h>
/* Reservation marker for mksstat pid's */
#define MKSSTAT_PID_RESERVED -1
@@ -41,6 +42,7 @@
typedef enum {
MKSSTAT_KERN_EXECBUF, /* vmw_execbuf_ioctl */
+ MKSSTAT_KERN_COTABLE_RESIZE,
MKSSTAT_KERN_COUNT /* Reserved entry; always last */
} mksstat_kern_stats_t;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
index 089046fa21be..06d9e106e3c5 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
@@ -85,7 +85,14 @@ struct rpc_channel {
u32 cookie_low;
};
-
+#if IS_ENABLED(CONFIG_DRM_VMWGFX_MKSSTATS)
+/* Kernel mksGuestStats counter names and desciptions; same order as enum mksstat_kern_stats_t */
+static const char* const mksstat_kern_name_desc[MKSSTAT_KERN_COUNT][2] =
+{
+ { "vmw_execbuf_ioctl", "vmw_execbuf_ioctl" },
+ { "vmw_cotable_resize", "vmw_cotable_resize" },
+};
+#endif
/**
* vmw_open_channel
@@ -695,12 +702,6 @@ static inline void hypervisor_ppn_remove(PPN64 pfn)
/* Header to the text description of mksGuestStat instance descriptor */
#define MKSSTAT_KERNEL_DESCRIPTION "vmwgfx"
-/* Kernel mksGuestStats counter names and desciptions; same order as enum mksstat_kern_stats_t */
-static const char* const mksstat_kern_name_desc[MKSSTAT_KERN_COUNT][2] =
-{
- { "vmw_execbuf_ioctl", "vmw_execbuf_ioctl" },
-};
-
/**
* mksstat_init_record: Initializes an MKSGuestStatCounter-based record
* for the respective mksGuestStat index.
@@ -786,6 +787,7 @@ static int mksstat_init_kern_id(struct page **ppage)
/* Set up all kernel-internal counters and corresponding structures */
pstrs_acc = pstrs;
pstrs_acc = mksstat_init_record_time(MKSSTAT_KERN_EXECBUF, pstat, pinfo, pstrs_acc);
+ pstrs_acc = mksstat_init_record_time(MKSSTAT_KERN_COTABLE_RESIZE, pstat, pinfo, pstrs_acc);
/* Add new counters above, in their order of appearance in mksstat_kern_stats_t */
@@ -1014,8 +1016,6 @@ int vmw_mksstat_add_ioctl(struct drm_device *dev, void *data,
struct vmw_private *const dev_priv = vmw_priv(dev);
- struct page *page;
- MKSGuestStatInstanceDescriptor *pdesc;
const size_t num_pages_stat = PFN_UP(arg->stat_len);
const size_t num_pages_info = PFN_UP(arg->info_len);
const size_t num_pages_strs = PFN_UP(arg->strs_len);
@@ -1023,10 +1023,13 @@ int vmw_mksstat_add_ioctl(struct drm_device *dev, void *data,
long nr_pinned_stat;
long nr_pinned_info;
long nr_pinned_strs;
- struct page *pages_stat[ARRAY_SIZE(pdesc->statPPNs)];
- struct page *pages_info[ARRAY_SIZE(pdesc->infoPPNs)];
- struct page *pages_strs[ARRAY_SIZE(pdesc->strsPPNs)];
+ MKSGuestStatInstanceDescriptor *pdesc;
+ struct page *page = NULL;
+ struct page **pages_stat = NULL;
+ struct page **pages_info = NULL;
+ struct page **pages_strs = NULL;
size_t i, slot;
+ int ret_err = -ENOMEM;
arg->id = -1;
@@ -1054,13 +1057,23 @@ int vmw_mksstat_add_ioctl(struct drm_device *dev, void *data,
BUG_ON(dev_priv->mksstat_user_pages[slot]);
+ /* Allocate statically-sized temp arrays for pages -- too big to keep in frame */
+ pages_stat = (struct page **)kmalloc_array(
+ ARRAY_SIZE(pdesc->statPPNs) +
+ ARRAY_SIZE(pdesc->infoPPNs) +
+ ARRAY_SIZE(pdesc->strsPPNs), sizeof(*pages_stat), GFP_KERNEL);
+
+ if (!pages_stat)
+ goto err_nomem;
+
+ pages_info = pages_stat + ARRAY_SIZE(pdesc->statPPNs);
+ pages_strs = pages_info + ARRAY_SIZE(pdesc->infoPPNs);
+
/* Allocate a page for the instance descriptor */
page = alloc_page(GFP_KERNEL | __GFP_ZERO);
- if (!page) {
- atomic_set(&dev_priv->mksstat_user_pids[slot], 0);
- return -ENOMEM;
- }
+ if (!page)
+ goto err_nomem;
/* Set up the instance descriptor */
pdesc = page_address(page);
@@ -1075,9 +1088,8 @@ int vmw_mksstat_add_ioctl(struct drm_device *dev, void *data,
ARRAY_SIZE(pdesc->description) - 1);
if (desc_len < 0) {
- atomic_set(&dev_priv->mksstat_user_pids[slot], 0);
- __free_page(page);
- return -EFAULT;
+ ret_err = -EFAULT;
+ goto err_nomem;
}
reset_ppn_array(pdesc->statPPNs, ARRAY_SIZE(pdesc->statPPNs));
@@ -1118,6 +1130,7 @@ int vmw_mksstat_add_ioctl(struct drm_device *dev, void *data,
DRM_DEV_INFO(dev->dev, "pid=%d arg.description='%.*s' id=%zu\n", current->pid, (int)desc_len, pdesc->description, slot);
+ kfree(pages_stat);
return 0;
err_pin_strs:
@@ -1132,9 +1145,13 @@ err_pin_stat:
if (nr_pinned_stat > 0)
unpin_user_pages(pages_stat, nr_pinned_stat);
+err_nomem:
atomic_set(&dev_priv->mksstat_user_pids[slot], 0);
- __free_page(page);
- return -ENOMEM;
+ if (page)
+ __free_page(page);
+ kfree(pages_stat);
+
+ return ret_err;
}
/**
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c b/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
index 7bc99b1279f7..f41f041559f4 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
@@ -230,7 +230,7 @@ void vmw_bo_dirty_unmap(struct vmw_buffer_object *vbo,
int vmw_bo_dirty_add(struct vmw_buffer_object *vbo)
{
struct vmw_bo_dirty *dirty = vbo->dirty;
- pgoff_t num_pages = vbo->base.resource->num_pages;
+ pgoff_t num_pages = PFN_UP(vbo->base.resource->size);
size_t size;
int ret;
@@ -395,7 +395,7 @@ vm_fault_t vmw_bo_vm_mkwrite(struct vm_fault *vmf)
return ret;
page_offset = vmf->pgoff - drm_vma_node_start(&bo->base.vma_node);
- if (unlikely(page_offset >= bo->resource->num_pages)) {
+ if (unlikely(page_offset >= PFN_UP(bo->resource->size))) {
ret = VM_FAULT_SIGBUS;
goto out_unlock;
}
@@ -438,7 +438,7 @@ vm_fault_t vmw_bo_vm_fault(struct vm_fault *vmf)
page_offset = vmf->pgoff -
drm_vma_node_start(&bo->base.vma_node);
- if (page_offset >= bo->resource->num_pages ||
+ if (page_offset >= PFN_UP(bo->resource->size) ||
vmw_resources_clean(vbo, page_offset,
page_offset + PAGE_SIZE,
&allowed_prefault)) {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
index ecd3c2fc978b..8db61c541a80 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
@@ -29,7 +29,6 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_fourcc.h>
-#include <drm/drm_vblank.h>
#include "vmwgfx_kms.h"
@@ -320,9 +319,6 @@ static const struct drm_crtc_funcs vmw_screen_object_crtc_funcs = {
.atomic_destroy_state = vmw_du_crtc_destroy_state,
.set_config = drm_atomic_helper_set_config,
.page_flip = drm_atomic_helper_page_flip,
- .get_vblank_counter = vmw_get_vblank_counter,
- .enable_vblank = vmw_enable_vblank,
- .disable_vblank = vmw_disable_vblank,
};
/*
@@ -730,7 +726,6 @@ vmw_sou_primary_plane_atomic_update(struct drm_plane *plane,
struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state, plane);
struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, plane);
struct drm_crtc *crtc = new_state->crtc;
- struct drm_pending_vblank_event *event = NULL;
struct vmw_fence_obj *fence = NULL;
int ret;
@@ -754,24 +749,6 @@ vmw_sou_primary_plane_atomic_update(struct drm_plane *plane,
return;
}
- /* For error case vblank event is send from vmw_du_crtc_atomic_flush */
- event = crtc->state->event;
- if (event && fence) {
- struct drm_file *file_priv = event->base.file_priv;
-
- ret = vmw_event_fence_action_queue(file_priv,
- fence,
- &event->base,
- &event->event.vbl.tv_sec,
- &event->event.vbl.tv_usec,
- true);
-
- if (unlikely(ret != 0))
- DRM_ERROR("Failed to queue event on fence.\n");
- else
- crtc->state->event = NULL;
- }
-
if (fence)
vmw_fence_obj_unreference(&fence);
}
@@ -947,18 +924,12 @@ err_free:
int vmw_kms_sou_init_display(struct vmw_private *dev_priv)
{
struct drm_device *dev = &dev_priv->drm;
- int i, ret;
+ int i;
if (!(dev_priv->capabilities & SVGA_CAP_SCREEN_OBJECT_2)) {
return -ENOSYS;
}
- ret = -ENOMEM;
-
- ret = drm_vblank_init(dev, VMWGFX_NUM_DISPLAY_UNITS);
- if (unlikely(ret != 0))
- return ret;
-
for (i = 0; i < VMWGFX_NUM_DISPLAY_UNITS; ++i)
vmw_sou_init(dev_priv, i);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
index 8650c3aea8f0..0090abe89254 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
@@ -29,7 +29,6 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_fourcc.h>
-#include <drm/drm_vblank.h>
#include "vmwgfx_kms.h"
#include "vmw_surface_cache.h"
@@ -925,9 +924,6 @@ static const struct drm_crtc_funcs vmw_stdu_crtc_funcs = {
.atomic_destroy_state = vmw_du_crtc_destroy_state,
.set_config = drm_atomic_helper_set_config,
.page_flip = drm_atomic_helper_page_flip,
- .get_vblank_counter = vmw_get_vblank_counter,
- .enable_vblank = vmw_enable_vblank,
- .disable_vblank = vmw_disable_vblank,
};
@@ -1591,7 +1587,6 @@ vmw_stdu_primary_plane_atomic_update(struct drm_plane *plane,
struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state);
struct drm_crtc *crtc = new_state->crtc;
struct vmw_screen_target_display_unit *stdu;
- struct drm_pending_vblank_event *event;
struct vmw_fence_obj *fence = NULL;
struct vmw_private *dev_priv;
int ret;
@@ -1640,23 +1635,6 @@ vmw_stdu_primary_plane_atomic_update(struct drm_plane *plane,
return;
}
- /* In case of error, vblank event is send in vmw_du_crtc_atomic_flush */
- event = crtc->state->event;
- if (event && fence) {
- struct drm_file *file_priv = event->base.file_priv;
-
- ret = vmw_event_fence_action_queue(file_priv,
- fence,
- &event->base,
- &event->event.vbl.tv_sec,
- &event->event.vbl.tv_usec,
- true);
- if (ret)
- DRM_ERROR("Failed to queue event on fence.\n");
- else
- crtc->state->event = NULL;
- }
-
if (fence)
vmw_fence_obj_unreference(&fence);
}
@@ -1883,10 +1861,6 @@ int vmw_kms_stdu_init_display(struct vmw_private *dev_priv)
if (!(dev_priv->capabilities & SVGA_CAP_GBOBJECTS))
return -ENOSYS;
- ret = drm_vblank_init(dev, VMWGFX_NUM_DISPLAY_UNITS);
- if (unlikely(ret != 0))
- return ret;
-
dev_priv->active_display_unit = vmw_du_screen_target;
for (i = 0; i < VMWGFX_NUM_DISPLAY_UNITS; ++i) {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index ace7ca150b03..3bc63ae768f3 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -815,11 +815,15 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
res->backup_size = cur_bo_offset;
if (metadata->scanout &&
metadata->num_sizes == 1 &&
- metadata->sizes[0].width == 64 &&
- metadata->sizes[0].height == 64 &&
- metadata->format == SVGA3D_A8R8G8B8) {
-
- srf->snooper.image = kzalloc(64 * 64 * 4, GFP_KERNEL);
+ metadata->sizes[0].width == VMW_CURSOR_SNOOP_WIDTH &&
+ metadata->sizes[0].height == VMW_CURSOR_SNOOP_HEIGHT &&
+ metadata->format == VMW_CURSOR_SNOOP_FORMAT) {
+ const struct SVGA3dSurfaceDesc *desc =
+ vmw_surface_get_desc(VMW_CURSOR_SNOOP_FORMAT);
+ const u32 cursor_size_bytes = VMW_CURSOR_SNOOP_WIDTH *
+ VMW_CURSOR_SNOOP_HEIGHT *
+ desc->pitchBytesPerBlock;
+ srf->snooper.image = kzalloc(cursor_size_bytes, GFP_KERNEL);
if (!srf->snooper.image) {
DRM_ERROR("Failed to allocate cursor_image\n");
ret = -ENOMEM;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
index f46891012be3..f5c4a40fb16d 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright © 2018 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2018 - 2022 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -180,11 +180,16 @@ vmw_validation_find_bo_dup(struct vmw_validation_context *ctx,
if (!ctx->merge_dups)
return NULL;
- if (ctx->ht) {
+ if (ctx->sw_context) {
struct vmwgfx_hash_item *hash;
+ unsigned long key = (unsigned long) vbo;
- if (!vmwgfx_ht_find_item(ctx->ht, (unsigned long) vbo, &hash))
- bo_node = container_of(hash, typeof(*bo_node), hash);
+ hash_for_each_possible_rcu(ctx->sw_context->res_ht, hash, head, key) {
+ if (hash->key == key) {
+ bo_node = container_of(hash, typeof(*bo_node), hash);
+ break;
+ }
+ }
} else {
struct vmw_validation_bo_node *entry;
@@ -217,11 +222,16 @@ vmw_validation_find_res_dup(struct vmw_validation_context *ctx,
if (!ctx->merge_dups)
return NULL;
- if (ctx->ht) {
+ if (ctx->sw_context) {
struct vmwgfx_hash_item *hash;
+ unsigned long key = (unsigned long) res;
- if (!vmwgfx_ht_find_item(ctx->ht, (unsigned long) res, &hash))
- res_node = container_of(hash, typeof(*res_node), hash);
+ hash_for_each_possible_rcu(ctx->sw_context->res_ht, hash, head, key) {
+ if (hash->key == key) {
+ res_node = container_of(hash, typeof(*res_node), hash);
+ break;
+ }
+ }
} else {
struct vmw_validation_res_node *entry;
@@ -269,20 +279,15 @@ int vmw_validation_add_bo(struct vmw_validation_context *ctx,
}
} else {
struct ttm_validate_buffer *val_buf;
- int ret;
bo_node = vmw_validation_mem_alloc(ctx, sizeof(*bo_node));
if (!bo_node)
return -ENOMEM;
- if (ctx->ht) {
+ if (ctx->sw_context) {
bo_node->hash.key = (unsigned long) vbo;
- ret = vmwgfx_ht_insert_item(ctx->ht, &bo_node->hash);
- if (ret) {
- DRM_ERROR("Failed to initialize a buffer "
- "validation entry.\n");
- return ret;
- }
+ hash_add_rcu(ctx->sw_context->res_ht, &bo_node->hash.head,
+ bo_node->hash.key);
}
val_buf = &bo_node->base;
val_buf->bo = ttm_bo_get_unless_zero(&vbo->base);
@@ -316,7 +321,6 @@ int vmw_validation_add_resource(struct vmw_validation_context *ctx,
bool *first_usage)
{
struct vmw_validation_res_node *node;
- int ret;
node = vmw_validation_find_res_dup(ctx, res);
if (node) {
@@ -330,14 +334,9 @@ int vmw_validation_add_resource(struct vmw_validation_context *ctx,
return -ENOMEM;
}
- if (ctx->ht) {
+ if (ctx->sw_context) {
node->hash.key = (unsigned long) res;
- ret = vmwgfx_ht_insert_item(ctx->ht, &node->hash);
- if (ret) {
- DRM_ERROR("Failed to initialize a resource validation "
- "entry.\n");
- return ret;
- }
+ hash_add_rcu(ctx->sw_context->res_ht, &node->hash.head, node->hash.key);
}
node->res = vmw_resource_reference_unless_doomed(res);
if (!node->res)
@@ -681,19 +680,19 @@ void vmw_validation_drop_ht(struct vmw_validation_context *ctx)
struct vmw_validation_bo_node *entry;
struct vmw_validation_res_node *val;
- if (!ctx->ht)
+ if (!ctx->sw_context)
return;
list_for_each_entry(entry, &ctx->bo_list, base.head)
- (void) vmwgfx_ht_remove_item(ctx->ht, &entry->hash);
+ hash_del_rcu(&entry->hash.head);
list_for_each_entry(val, &ctx->resource_list, head)
- (void) vmwgfx_ht_remove_item(ctx->ht, &val->hash);
+ hash_del_rcu(&val->hash.head);
list_for_each_entry(val, &ctx->resource_ctx_list, head)
- (void) vmwgfx_ht_remove_item(ctx->ht, &val->hash);
+ hash_del_rcu(&entry->hash.head);
- ctx->ht = NULL;
+ ctx->sw_context = NULL;
}
/**
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h
index f21df053882b..ab9ec226f433 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/**************************************************************************
*
- * Copyright © 2018 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2018 - 2022 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -29,12 +29,11 @@
#define _VMWGFX_VALIDATION_H_
#include <linux/list.h>
+#include <linux/hashtable.h>
#include <linux/ww_mutex.h>
#include <drm/ttm/ttm_execbuf_util.h>
-#include "vmwgfx_hashtab.h"
-
#define VMW_RES_DIRTY_NONE 0
#define VMW_RES_DIRTY_SET BIT(0)
#define VMW_RES_DIRTY_CLEAR BIT(1)
@@ -59,7 +58,7 @@
* @total_mem: Amount of reserved memory.
*/
struct vmw_validation_context {
- struct vmwgfx_open_hash *ht;
+ struct vmw_sw_context *sw_context;
struct list_head resource_list;
struct list_head resource_ctx_list;
struct list_head bo_list;
@@ -82,16 +81,16 @@ struct vmw_fence_obj;
/**
* DECLARE_VAL_CONTEXT - Declare a validation context with initialization
* @_name: The name of the variable
- * @_ht: The hash table used to find dups or NULL if none
+ * @_sw_context: Contains the hash table used to find dups or NULL if none
* @_merge_dups: Whether to merge duplicate buffer object- or resource
* entries. If set to true, ideally a hash table pointer should be supplied
* as well unless the number of resources and buffer objects per validation
* is known to be very small
*/
#endif
-#define DECLARE_VAL_CONTEXT(_name, _ht, _merge_dups) \
+#define DECLARE_VAL_CONTEXT(_name, _sw_context, _merge_dups) \
struct vmw_validation_context _name = \
- { .ht = _ht, \
+ { .sw_context = _sw_context, \
.resource_list = LIST_HEAD_INIT((_name).resource_list), \
.resource_ctx_list = LIST_HEAD_INIT((_name).resource_ctx_list), \
.bo_list = LIST_HEAD_INIT((_name).bo_list), \
@@ -115,19 +114,6 @@ vmw_validation_has_bos(struct vmw_validation_context *ctx)
}
/**
- * vmw_validation_set_ht - Register a hash table for duplicate finding
- * @ctx: The validation context
- * @ht: Pointer to a hash table to use for duplicate finding
- * This function is intended to be used if the hash table wasn't
- * available at validation context declaration time
- */
-static inline void vmw_validation_set_ht(struct vmw_validation_context *ctx,
- struct vmwgfx_open_hash *ht)
-{
- ctx->ht = ht;
-}
-
-/**
* vmw_validation_bo_reserve - Reserve buffer objects registered with a
* validation context
* @ctx: The validation context
diff --git a/drivers/gpu/drm/xlnx/Makefile b/drivers/gpu/drm/xlnx/Makefile
index 51c24b72217b..ea1422a39502 100644
--- a/drivers/gpu/drm/xlnx/Makefile
+++ b/drivers/gpu/drm/xlnx/Makefile
@@ -1,2 +1,2 @@
-zynqmp-dpsub-y := zynqmp_disp.o zynqmp_dpsub.o zynqmp_dp.o
+zynqmp-dpsub-y := zynqmp_disp.o zynqmp_dpsub.o zynqmp_dp.o zynqmp_kms.o
obj-$(CONFIG_DRM_ZYNQMP_DPSUB) += zynqmp-dpsub.o
diff --git a/drivers/gpu/drm/xlnx/zynqmp_disp.c b/drivers/gpu/drm/xlnx/zynqmp_disp.c
index bbb365f2d087..3b87eebddc97 100644
--- a/drivers/gpu/drm/xlnx/zynqmp_disp.c
+++ b/drivers/gpu/drm/xlnx/zynqmp_disp.c
@@ -9,29 +9,19 @@
* - Laurent Pinchart <laurent.pinchart@ideasonboard.com>
*/
-#include <drm/drm_atomic.h>
-#include <drm/drm_atomic_helper.h>
-#include <drm/drm_atomic_uapi.h>
-#include <drm/drm_blend.h>
-#include <drm/drm_crtc.h>
-#include <drm/drm_device.h>
#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
-#include <drm/drm_managed.h>
#include <drm/drm_plane.h>
-#include <drm/drm_vblank.h>
#include <linux/clk.h>
-#include <linux/delay.h>
#include <linux/dma/xilinx_dpdma.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
-#include <linux/pm_runtime.h>
-#include <linux/spinlock.h>
+#include <linux/slab.h>
#include "zynqmp_disp.h"
#include "zynqmp_disp_regs.h"
@@ -72,46 +62,23 @@
#define ZYNQMP_DISP_AV_BUF_NUM_VID_GFX_BUFFERS 4
#define ZYNQMP_DISP_AV_BUF_NUM_BUFFERS 6
-#define ZYNQMP_DISP_NUM_LAYERS 2
#define ZYNQMP_DISP_MAX_NUM_SUB_PLANES 3
/**
* struct zynqmp_disp_format - Display subsystem format information
* @drm_fmt: DRM format (4CC)
* @buf_fmt: AV buffer format
- * @bus_fmt: Media bus formats (live formats)
* @swap: Flag to swap R & B for RGB formats, and U & V for YUV formats
* @sf: Scaling factors for color components
*/
struct zynqmp_disp_format {
u32 drm_fmt;
u32 buf_fmt;
- u32 bus_fmt;
bool swap;
const u32 *sf;
};
/**
- * enum zynqmp_disp_layer_id - Layer identifier
- * @ZYNQMP_DISP_LAYER_VID: Video layer
- * @ZYNQMP_DISP_LAYER_GFX: Graphics layer
- */
-enum zynqmp_disp_layer_id {
- ZYNQMP_DISP_LAYER_VID,
- ZYNQMP_DISP_LAYER_GFX
-};
-
-/**
- * enum zynqmp_disp_layer_mode - Layer mode
- * @ZYNQMP_DISP_LAYER_NONLIVE: non-live (memory) mode
- * @ZYNQMP_DISP_LAYER_LIVE: live (stream) mode
- */
-enum zynqmp_disp_layer_mode {
- ZYNQMP_DISP_LAYER_NONLIVE,
- ZYNQMP_DISP_LAYER_LIVE
-};
-
-/**
* struct zynqmp_disp_layer_dma - DMA channel for one data plane of a layer
* @chan: DMA channel
* @xt: Interleaved DMA descriptor template
@@ -136,8 +103,7 @@ struct zynqmp_disp_layer_info {
};
/**
- * struct zynqmp_disp_layer - Display layer (DRM plane)
- * @plane: DRM plane
+ * struct zynqmp_disp_layer - Display layer
* @id: Layer ID
* @disp: Back pointer to struct zynqmp_disp
* @info: Static layer information
@@ -147,8 +113,7 @@ struct zynqmp_disp_layer_info {
* @mode: Current operation mode
*/
struct zynqmp_disp_layer {
- struct drm_plane plane;
- enum zynqmp_disp_layer_id id;
+ enum zynqmp_dpsub_layer_id id;
struct zynqmp_disp *disp;
const struct zynqmp_disp_layer_info *info;
@@ -156,32 +121,22 @@ struct zynqmp_disp_layer {
const struct zynqmp_disp_format *disp_fmt;
const struct drm_format_info *drm_fmt;
- enum zynqmp_disp_layer_mode mode;
+ enum zynqmp_dpsub_layer_mode mode;
};
/**
* struct zynqmp_disp - Display controller
* @dev: Device structure
- * @drm: DRM core
* @dpsub: Display subsystem
- * @crtc: DRM CRTC
* @blend.base: Register I/O base address for the blender
* @avbuf.base: Register I/O base address for the audio/video buffer manager
* @audio.base: Registers I/O base address for the audio mixer
- * @audio.clk: Audio clock
- * @audio.clk_from_ps: True of the audio clock comes from PS, false from PL
* @layers: Layers (planes)
- * @event: Pending vblank event request
- * @pclk: Pixel clock
- * @pclk_from_ps: True of the video clock comes from PS, false from PL
*/
struct zynqmp_disp {
struct device *dev;
- struct drm_device *drm;
struct zynqmp_dpsub *dpsub;
- struct drm_crtc crtc;
-
struct {
void __iomem *base;
} blend;
@@ -190,16 +145,9 @@ struct zynqmp_disp {
} avbuf;
struct {
void __iomem *base;
- struct clk *clk;
- bool clk_from_ps;
} audio;
- struct zynqmp_disp_layer layers[ZYNQMP_DISP_NUM_LAYERS];
-
- struct drm_pending_vblank_event *event;
-
- struct clk *pclk;
- bool pclk_from_ps;
+ struct zynqmp_disp_layer layers[ZYNQMP_DPSUB_NUM_LAYERS];
};
/* -----------------------------------------------------------------------------
@@ -416,14 +364,9 @@ static void zynqmp_disp_avbuf_write(struct zynqmp_disp *disp, int reg, u32 val)
writel(val, disp->avbuf.base + reg);
}
-static bool zynqmp_disp_layer_is_gfx(const struct zynqmp_disp_layer *layer)
-{
- return layer->id == ZYNQMP_DISP_LAYER_GFX;
-}
-
static bool zynqmp_disp_layer_is_video(const struct zynqmp_disp_layer *layer)
{
- return layer->id == ZYNQMP_DISP_LAYER_VID;
+ return layer->id == ZYNQMP_DPSUB_LAYER_VID;
}
/**
@@ -566,27 +509,25 @@ static void zynqmp_disp_avbuf_disable_audio(struct zynqmp_disp *disp)
* zynqmp_disp_avbuf_enable_video - Enable a video layer
* @disp: Display controller
* @layer: The layer
- * @mode: Operating mode of layer
*
* Enable the video/graphics buffer for @layer.
*/
static void zynqmp_disp_avbuf_enable_video(struct zynqmp_disp *disp,
- struct zynqmp_disp_layer *layer,
- enum zynqmp_disp_layer_mode mode)
+ struct zynqmp_disp_layer *layer)
{
u32 val;
val = zynqmp_disp_avbuf_read(disp, ZYNQMP_DISP_AV_BUF_OUTPUT);
if (zynqmp_disp_layer_is_video(layer)) {
val &= ~ZYNQMP_DISP_AV_BUF_OUTPUT_VID1_MASK;
- if (mode == ZYNQMP_DISP_LAYER_NONLIVE)
+ if (layer->mode == ZYNQMP_DPSUB_LAYER_NONLIVE)
val |= ZYNQMP_DISP_AV_BUF_OUTPUT_VID1_MEM;
else
val |= ZYNQMP_DISP_AV_BUF_OUTPUT_VID1_LIVE;
} else {
val &= ~ZYNQMP_DISP_AV_BUF_OUTPUT_VID2_MASK;
val |= ZYNQMP_DISP_AV_BUF_OUTPUT_VID2_MEM;
- if (mode == ZYNQMP_DISP_LAYER_NONLIVE)
+ if (layer->mode == ZYNQMP_DPSUB_LAYER_NONLIVE)
val |= ZYNQMP_DISP_AV_BUF_OUTPUT_VID2_MEM;
else
val |= ZYNQMP_DISP_AV_BUF_OUTPUT_VID2_LIVE;
@@ -758,8 +699,8 @@ static void zynqmp_disp_blend_set_bg_color(struct zynqmp_disp *disp,
* @enable: True to enable global alpha blending
* @alpha: Global alpha value (ignored if @enabled is false)
*/
-static void zynqmp_disp_blend_set_global_alpha(struct zynqmp_disp *disp,
- bool enable, u32 alpha)
+void zynqmp_disp_blend_set_global_alpha(struct zynqmp_disp *disp,
+ bool enable, u32 alpha)
{
zynqmp_disp_blend_write(disp, ZYNQMP_DISP_V_BLEND_SET_GLOBAL_ALPHA,
ZYNQMP_DISP_V_BLEND_SET_GLOBAL_ALPHA_VALUE(alpha) |
@@ -902,80 +843,6 @@ static void zynqmp_disp_audio_disable(struct zynqmp_disp *disp)
ZYNQMP_DISP_AUD_SOFT_RESET_AUD_SRST);
}
-static void zynqmp_disp_audio_init(struct zynqmp_disp *disp)
-{
- /* Try the live PL audio clock. */
- disp->audio.clk = devm_clk_get(disp->dev, "dp_live_audio_aclk");
- if (!IS_ERR(disp->audio.clk)) {
- disp->audio.clk_from_ps = false;
- return;
- }
-
- /* If the live PL audio clock is not valid, fall back to PS clock. */
- disp->audio.clk = devm_clk_get(disp->dev, "dp_aud_clk");
- if (!IS_ERR(disp->audio.clk)) {
- disp->audio.clk_from_ps = true;
- return;
- }
-
- dev_err(disp->dev, "audio disabled due to missing clock\n");
-}
-
-/* -----------------------------------------------------------------------------
- * ZynqMP Display external functions for zynqmp_dp
- */
-
-/**
- * zynqmp_disp_handle_vblank - Handle the vblank event
- * @disp: Display controller
- *
- * This function handles the vblank interrupt, and sends an event to
- * CRTC object. This will be called by the DP vblank interrupt handler.
- */
-void zynqmp_disp_handle_vblank(struct zynqmp_disp *disp)
-{
- struct drm_crtc *crtc = &disp->crtc;
-
- drm_crtc_handle_vblank(crtc);
-}
-
-/**
- * zynqmp_disp_audio_enabled - If the audio is enabled
- * @disp: Display controller
- *
- * Return if the audio is enabled depending on the audio clock.
- *
- * Return: true if audio is enabled, or false.
- */
-bool zynqmp_disp_audio_enabled(struct zynqmp_disp *disp)
-{
- return !!disp->audio.clk;
-}
-
-/**
- * zynqmp_disp_get_audio_clk_rate - Get the current audio clock rate
- * @disp: Display controller
- *
- * Return: the current audio clock rate.
- */
-unsigned int zynqmp_disp_get_audio_clk_rate(struct zynqmp_disp *disp)
-{
- if (zynqmp_disp_audio_enabled(disp))
- return 0;
- return clk_get_rate(disp->audio.clk);
-}
-
-/**
- * zynqmp_disp_get_crtc_mask - Return the CRTC bit mask
- * @disp: Display controller
- *
- * Return: the crtc mask of the zyqnmp_disp CRTC.
- */
-uint32_t zynqmp_disp_get_crtc_mask(struct zynqmp_disp *disp)
-{
- return drm_crtc_mask(&disp->crtc);
-}
-
/* -----------------------------------------------------------------------------
* ZynqMP Display Layer & DRM Plane
*/
@@ -1006,19 +873,46 @@ zynqmp_disp_layer_find_format(struct zynqmp_disp_layer *layer,
}
/**
+ * zynqmp_disp_layer_drm_formats - Return the DRM formats supported by the layer
+ * @layer: The layer
+ * @num_formats: Pointer to the returned number of formats
+ *
+ * Return: A newly allocated u32 array that stores all the DRM formats
+ * supported by the layer. The number of formats in the array is returned
+ * through the num_formats argument.
+ */
+u32 *zynqmp_disp_layer_drm_formats(struct zynqmp_disp_layer *layer,
+ unsigned int *num_formats)
+{
+ unsigned int i;
+ u32 *formats;
+
+ formats = kcalloc(layer->info->num_formats, sizeof(*formats),
+ GFP_KERNEL);
+ if (!formats)
+ return NULL;
+
+ for (i = 0; i < layer->info->num_formats; ++i)
+ formats[i] = layer->info->formats[i].drm_fmt;
+
+ *num_formats = layer->info->num_formats;
+ return formats;
+}
+
+/**
* zynqmp_disp_layer_enable - Enable a layer
* @layer: The layer
+ * @mode: Operating mode of layer
*
* Enable the @layer in the audio/video buffer manager and the blender. DMA
* channels are started separately by zynqmp_disp_layer_update().
*/
-static void zynqmp_disp_layer_enable(struct zynqmp_disp_layer *layer)
+void zynqmp_disp_layer_enable(struct zynqmp_disp_layer *layer,
+ enum zynqmp_dpsub_layer_mode mode)
{
- zynqmp_disp_avbuf_enable_video(layer->disp, layer,
- ZYNQMP_DISP_LAYER_NONLIVE);
+ layer->mode = mode;
+ zynqmp_disp_avbuf_enable_video(layer->disp, layer);
zynqmp_disp_blend_layer_enable(layer->disp, layer);
-
- layer->mode = ZYNQMP_DISP_LAYER_NONLIVE;
}
/**
@@ -1028,12 +922,14 @@ static void zynqmp_disp_layer_enable(struct zynqmp_disp_layer *layer)
* Disable the layer by stopping its DMA channels and disabling it in the
* audio/video buffer manager and the blender.
*/
-static void zynqmp_disp_layer_disable(struct zynqmp_disp_layer *layer)
+void zynqmp_disp_layer_disable(struct zynqmp_disp_layer *layer)
{
unsigned int i;
- for (i = 0; i < layer->drm_fmt->num_planes; i++)
- dmaengine_terminate_sync(layer->dmas[i].chan);
+ if (layer->disp->dpsub->dma_enabled) {
+ for (i = 0; i < layer->drm_fmt->num_planes; i++)
+ dmaengine_terminate_sync(layer->dmas[i].chan);
+ }
zynqmp_disp_avbuf_disable_video(layer->disp, layer);
zynqmp_disp_blend_layer_disable(layer->disp, layer);
@@ -1042,15 +938,13 @@ static void zynqmp_disp_layer_disable(struct zynqmp_disp_layer *layer)
/**
* zynqmp_disp_layer_set_format - Set the layer format
* @layer: The layer
- * @state: The plane state
+ * @info: The format info
*
- * Set the format for @layer based on @state->fb->format. The layer must be
- * disabled.
+ * Set the format for @layer to @info. The layer must be disabled.
*/
-static void zynqmp_disp_layer_set_format(struct zynqmp_disp_layer *layer,
- struct drm_plane_state *state)
+void zynqmp_disp_layer_set_format(struct zynqmp_disp_layer *layer,
+ const struct drm_format_info *info)
{
- const struct drm_format_info *info = state->fb->format;
unsigned int i;
layer->disp_fmt = zynqmp_disp_layer_find_format(layer, info->format);
@@ -1058,6 +952,9 @@ static void zynqmp_disp_layer_set_format(struct zynqmp_disp_layer *layer,
zynqmp_disp_avbuf_set_format(layer->disp, layer, layer->disp_fmt);
+ if (!layer->disp->dpsub->dma_enabled)
+ return;
+
/*
* Set pconfig for each DMA channel to indicate they're part of a
* video group.
@@ -1087,13 +984,16 @@ static void zynqmp_disp_layer_set_format(struct zynqmp_disp_layer *layer,
*
* Return: 0 on success, or the DMA descriptor failure error otherwise
*/
-static int zynqmp_disp_layer_update(struct zynqmp_disp_layer *layer,
- struct drm_plane_state *state)
+int zynqmp_disp_layer_update(struct zynqmp_disp_layer *layer,
+ struct drm_plane_state *state)
{
const struct drm_format_info *info = layer->drm_fmt;
unsigned int i;
- for (i = 0; i < layer->drm_fmt->num_planes; i++) {
+ if (!layer->disp->dpsub->dma_enabled)
+ return 0;
+
+ for (i = 0; i < info->num_planes; i++) {
unsigned int width = state->crtc_w / (i ? info->hsub : 1);
unsigned int height = state->crtc_h / (i ? info->vsub : 1);
struct zynqmp_disp_layer_dma *dma = &layer->dmas[i];
@@ -1128,143 +1028,6 @@ static int zynqmp_disp_layer_update(struct zynqmp_disp_layer *layer,
return 0;
}
-static inline struct zynqmp_disp_layer *plane_to_layer(struct drm_plane *plane)
-{
- return container_of(plane, struct zynqmp_disp_layer, plane);
-}
-
-static int
-zynqmp_disp_plane_atomic_check(struct drm_plane *plane,
- struct drm_atomic_state *state)
-{
- struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
- plane);
- struct drm_crtc_state *crtc_state;
-
- if (!new_plane_state->crtc)
- return 0;
-
- crtc_state = drm_atomic_get_crtc_state(state, new_plane_state->crtc);
- if (IS_ERR(crtc_state))
- return PTR_ERR(crtc_state);
-
- return drm_atomic_helper_check_plane_state(new_plane_state,
- crtc_state,
- DRM_PLANE_NO_SCALING,
- DRM_PLANE_NO_SCALING,
- false, false);
-}
-
-static void
-zynqmp_disp_plane_atomic_disable(struct drm_plane *plane,
- struct drm_atomic_state *state)
-{
- struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
- plane);
- struct zynqmp_disp_layer *layer = plane_to_layer(plane);
-
- if (!old_state->fb)
- return;
-
- zynqmp_disp_layer_disable(layer);
-
- if (zynqmp_disp_layer_is_gfx(layer))
- zynqmp_disp_blend_set_global_alpha(layer->disp, false,
- plane->state->alpha >> 8);
-}
-
-static void
-zynqmp_disp_plane_atomic_update(struct drm_plane *plane,
- struct drm_atomic_state *state)
-{
- struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state, plane);
- struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, plane);
- struct zynqmp_disp_layer *layer = plane_to_layer(plane);
- bool format_changed = false;
-
- if (!old_state->fb ||
- old_state->fb->format->format != new_state->fb->format->format)
- format_changed = true;
-
- /*
- * If the format has changed (including going from a previously
- * disabled state to any format), reconfigure the format. Disable the
- * plane first if needed.
- */
- if (format_changed) {
- if (old_state->fb)
- zynqmp_disp_layer_disable(layer);
-
- zynqmp_disp_layer_set_format(layer, new_state);
- }
-
- zynqmp_disp_layer_update(layer, new_state);
-
- if (zynqmp_disp_layer_is_gfx(layer))
- zynqmp_disp_blend_set_global_alpha(layer->disp, true,
- plane->state->alpha >> 8);
-
- /* Enable or re-enable the plane is the format has changed. */
- if (format_changed)
- zynqmp_disp_layer_enable(layer);
-}
-
-static const struct drm_plane_helper_funcs zynqmp_disp_plane_helper_funcs = {
- .atomic_check = zynqmp_disp_plane_atomic_check,
- .atomic_update = zynqmp_disp_plane_atomic_update,
- .atomic_disable = zynqmp_disp_plane_atomic_disable,
-};
-
-static const struct drm_plane_funcs zynqmp_disp_plane_funcs = {
- .update_plane = drm_atomic_helper_update_plane,
- .disable_plane = drm_atomic_helper_disable_plane,
- .destroy = drm_plane_cleanup,
- .reset = drm_atomic_helper_plane_reset,
- .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
-};
-
-static int zynqmp_disp_create_planes(struct zynqmp_disp *disp)
-{
- unsigned int i, j;
- int ret;
-
- for (i = 0; i < ZYNQMP_DISP_NUM_LAYERS; i++) {
- struct zynqmp_disp_layer *layer = &disp->layers[i];
- enum drm_plane_type type;
- u32 *drm_formats;
-
- drm_formats = drmm_kcalloc(disp->drm, sizeof(*drm_formats),
- layer->info->num_formats,
- GFP_KERNEL);
- if (!drm_formats)
- return -ENOMEM;
-
- for (j = 0; j < layer->info->num_formats; ++j)
- drm_formats[j] = layer->info->formats[j].drm_fmt;
-
- /* Graphics layer is primary, and video layer is overlay. */
- type = zynqmp_disp_layer_is_video(layer)
- ? DRM_PLANE_TYPE_OVERLAY : DRM_PLANE_TYPE_PRIMARY;
- ret = drm_universal_plane_init(disp->drm, &layer->plane, 0,
- &zynqmp_disp_plane_funcs,
- drm_formats,
- layer->info->num_formats,
- NULL, type, NULL);
- if (ret)
- return ret;
-
- drm_plane_helper_add(&layer->plane,
- &zynqmp_disp_plane_helper_funcs);
-
- drm_plane_create_zpos_immutable_property(&layer->plane, i);
- if (zynqmp_disp_layer_is_gfx(layer))
- drm_plane_create_alpha_property(&layer->plane);
- }
-
- return 0;
-}
-
/**
* zynqmp_disp_layer_release_dma - Release DMA channels for a layer
* @disp: Display controller
@@ -1277,7 +1040,7 @@ static void zynqmp_disp_layer_release_dma(struct zynqmp_disp *disp,
{
unsigned int i;
- if (!layer->info)
+ if (!layer->info || !disp->dpsub->dma_enabled)
return;
for (i = 0; i < layer->info->num_channels; i++) {
@@ -1300,7 +1063,7 @@ static void zynqmp_disp_destroy_layers(struct zynqmp_disp *disp)
{
unsigned int i;
- for (i = 0; i < ZYNQMP_DISP_NUM_LAYERS; i++)
+ for (i = 0; i < ARRAY_SIZE(disp->layers); i++)
zynqmp_disp_layer_release_dma(disp, &disp->layers[i]);
}
@@ -1320,6 +1083,9 @@ static int zynqmp_disp_layer_request_dma(struct zynqmp_disp *disp,
unsigned int i;
int ret;
+ if (!disp->dpsub->dma_enabled)
+ return 0;
+
for (i = 0; i < layer->info->num_channels; i++) {
struct zynqmp_disp_layer_dma *dma = &layer->dmas[i];
char dma_channel_name[16];
@@ -1347,12 +1113,12 @@ static int zynqmp_disp_layer_request_dma(struct zynqmp_disp *disp,
static int zynqmp_disp_create_layers(struct zynqmp_disp *disp)
{
static const struct zynqmp_disp_layer_info layer_info[] = {
- [ZYNQMP_DISP_LAYER_VID] = {
+ [ZYNQMP_DPSUB_LAYER_VID] = {
.formats = avbuf_vid_fmts,
.num_formats = ARRAY_SIZE(avbuf_vid_fmts),
.num_channels = 3,
},
- [ZYNQMP_DISP_LAYER_GFX] = {
+ [ZYNQMP_DPSUB_LAYER_GFX] = {
.formats = avbuf_gfx_fmts,
.num_formats = ARRAY_SIZE(avbuf_gfx_fmts),
.num_channels = 1,
@@ -1362,7 +1128,7 @@ static int zynqmp_disp_create_layers(struct zynqmp_disp *disp)
unsigned int i;
int ret;
- for (i = 0; i < ZYNQMP_DISP_NUM_LAYERS; i++) {
+ for (i = 0; i < ARRAY_SIZE(disp->layers); i++) {
struct zynqmp_disp_layer *layer = &disp->layers[i];
layer->id = i;
@@ -1372,6 +1138,8 @@ static int zynqmp_disp_create_layers(struct zynqmp_disp *disp)
ret = zynqmp_disp_layer_request_dma(disp, layer);
if (ret)
goto err;
+
+ disp->dpsub->layers[i] = layer;
}
return 0;
@@ -1382,19 +1150,23 @@ err:
}
/* -----------------------------------------------------------------------------
- * ZynqMP Display & DRM CRTC
+ * ZynqMP Display
*/
/**
* zynqmp_disp_enable - Enable the display controller
* @disp: Display controller
*/
-static void zynqmp_disp_enable(struct zynqmp_disp *disp)
+void zynqmp_disp_enable(struct zynqmp_disp *disp)
{
+ zynqmp_disp_blend_set_output_format(disp, ZYNQMP_DPSUB_FORMAT_RGB);
+ zynqmp_disp_blend_set_bg_color(disp, 0, 0, 0);
+
zynqmp_disp_avbuf_enable(disp);
/* Choose clock source based on the DT clock handle. */
- zynqmp_disp_avbuf_set_clocks_sources(disp, disp->pclk_from_ps,
- disp->audio.clk_from_ps, true);
+ zynqmp_disp_avbuf_set_clocks_sources(disp, disp->dpsub->vid_clk_from_ps,
+ disp->dpsub->aud_clk_from_ps,
+ true);
zynqmp_disp_avbuf_enable_channels(disp);
zynqmp_disp_avbuf_enable_audio(disp);
@@ -1405,7 +1177,7 @@ static void zynqmp_disp_enable(struct zynqmp_disp *disp)
* zynqmp_disp_disable - Disable the display controller
* @disp: Display controller
*/
-static void zynqmp_disp_disable(struct zynqmp_disp *disp)
+void zynqmp_disp_disable(struct zynqmp_disp *disp)
{
zynqmp_disp_audio_disable(disp);
@@ -1414,27 +1186,27 @@ static void zynqmp_disp_disable(struct zynqmp_disp *disp)
zynqmp_disp_avbuf_disable(disp);
}
-static inline struct zynqmp_disp *crtc_to_disp(struct drm_crtc *crtc)
-{
- return container_of(crtc, struct zynqmp_disp, crtc);
-}
-
-static int zynqmp_disp_crtc_setup_clock(struct drm_crtc *crtc,
- struct drm_display_mode *adjusted_mode)
+/**
+ * zynqmp_disp_setup_clock - Configure the display controller pixel clock rate
+ * @disp: Display controller
+ * @mode_clock: The pixel clock rate, in Hz
+ *
+ * Return: 0 on success, or a negative error clock otherwise
+ */
+int zynqmp_disp_setup_clock(struct zynqmp_disp *disp,
+ unsigned long mode_clock)
{
- struct zynqmp_disp *disp = crtc_to_disp(crtc);
- unsigned long mode_clock = adjusted_mode->clock * 1000;
unsigned long rate;
long diff;
int ret;
- ret = clk_set_rate(disp->pclk, mode_clock);
+ ret = clk_set_rate(disp->dpsub->vid_clk, mode_clock);
if (ret) {
- dev_err(disp->dev, "failed to set a pixel clock\n");
+ dev_err(disp->dev, "failed to set the video clock\n");
return ret;
}
- rate = clk_get_rate(disp->pclk);
+ rate = clk_get_rate(disp->dpsub->vid_clk);
diff = rate - mode_clock;
if (abs(diff) > mode_clock / 20)
dev_info(disp->dev,
@@ -1448,245 +1220,63 @@ static int zynqmp_disp_crtc_setup_clock(struct drm_crtc *crtc,
return 0;
}
-static void
-zynqmp_disp_crtc_atomic_enable(struct drm_crtc *crtc,
- struct drm_atomic_state *state)
-{
- struct zynqmp_disp *disp = crtc_to_disp(crtc);
- struct drm_display_mode *adjusted_mode = &crtc->state->adjusted_mode;
- int ret, vrefresh;
-
- pm_runtime_get_sync(disp->dev);
-
- zynqmp_disp_crtc_setup_clock(crtc, adjusted_mode);
-
- ret = clk_prepare_enable(disp->pclk);
- if (ret) {
- dev_err(disp->dev, "failed to enable a pixel clock\n");
- pm_runtime_put_sync(disp->dev);
- return;
- }
-
- zynqmp_disp_blend_set_output_format(disp, ZYNQMP_DPSUB_FORMAT_RGB);
- zynqmp_disp_blend_set_bg_color(disp, 0, 0, 0);
-
- zynqmp_disp_enable(disp);
-
- /* Delay of 3 vblank intervals for timing gen to be stable */
- vrefresh = (adjusted_mode->clock * 1000) /
- (adjusted_mode->vtotal * adjusted_mode->htotal);
- msleep(3 * 1000 / vrefresh);
-}
-
-static void
-zynqmp_disp_crtc_atomic_disable(struct drm_crtc *crtc,
- struct drm_atomic_state *state)
-{
- struct zynqmp_disp *disp = crtc_to_disp(crtc);
- struct drm_plane_state *old_plane_state;
-
- /*
- * Disable the plane if active. The old plane state can be NULL in the
- * .shutdown() path if the plane is already disabled, skip
- * zynqmp_disp_plane_atomic_disable() in that case.
- */
- old_plane_state = drm_atomic_get_old_plane_state(state, crtc->primary);
- if (old_plane_state)
- zynqmp_disp_plane_atomic_disable(crtc->primary, state);
-
- zynqmp_disp_disable(disp);
-
- drm_crtc_vblank_off(&disp->crtc);
-
- spin_lock_irq(&crtc->dev->event_lock);
- if (crtc->state->event) {
- drm_crtc_send_vblank_event(crtc, crtc->state->event);
- crtc->state->event = NULL;
- }
- spin_unlock_irq(&crtc->dev->event_lock);
-
- clk_disable_unprepare(disp->pclk);
- pm_runtime_put_sync(disp->dev);
-}
-
-static int zynqmp_disp_crtc_atomic_check(struct drm_crtc *crtc,
- struct drm_atomic_state *state)
-{
- return drm_atomic_add_affected_planes(state, crtc);
-}
-
-static void
-zynqmp_disp_crtc_atomic_begin(struct drm_crtc *crtc,
- struct drm_atomic_state *state)
-{
- drm_crtc_vblank_on(crtc);
-}
-
-static void
-zynqmp_disp_crtc_atomic_flush(struct drm_crtc *crtc,
- struct drm_atomic_state *state)
-{
- if (crtc->state->event) {
- struct drm_pending_vblank_event *event;
-
- /* Consume the flip_done event from atomic helper. */
- event = crtc->state->event;
- crtc->state->event = NULL;
-
- event->pipe = drm_crtc_index(crtc);
-
- WARN_ON(drm_crtc_vblank_get(crtc) != 0);
-
- spin_lock_irq(&crtc->dev->event_lock);
- drm_crtc_arm_vblank_event(crtc, event);
- spin_unlock_irq(&crtc->dev->event_lock);
- }
-}
-
-static const struct drm_crtc_helper_funcs zynqmp_disp_crtc_helper_funcs = {
- .atomic_enable = zynqmp_disp_crtc_atomic_enable,
- .atomic_disable = zynqmp_disp_crtc_atomic_disable,
- .atomic_check = zynqmp_disp_crtc_atomic_check,
- .atomic_begin = zynqmp_disp_crtc_atomic_begin,
- .atomic_flush = zynqmp_disp_crtc_atomic_flush,
-};
-
-static int zynqmp_disp_crtc_enable_vblank(struct drm_crtc *crtc)
-{
- struct zynqmp_disp *disp = crtc_to_disp(crtc);
-
- zynqmp_dp_enable_vblank(disp->dpsub->dp);
-
- return 0;
-}
-
-static void zynqmp_disp_crtc_disable_vblank(struct drm_crtc *crtc)
-{
- struct zynqmp_disp *disp = crtc_to_disp(crtc);
-
- zynqmp_dp_disable_vblank(disp->dpsub->dp);
-}
-
-static const struct drm_crtc_funcs zynqmp_disp_crtc_funcs = {
- .destroy = drm_crtc_cleanup,
- .set_config = drm_atomic_helper_set_config,
- .page_flip = drm_atomic_helper_page_flip,
- .reset = drm_atomic_helper_crtc_reset,
- .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
- .enable_vblank = zynqmp_disp_crtc_enable_vblank,
- .disable_vblank = zynqmp_disp_crtc_disable_vblank,
-};
-
-static int zynqmp_disp_create_crtc(struct zynqmp_disp *disp)
-{
- struct drm_plane *plane = &disp->layers[ZYNQMP_DISP_LAYER_GFX].plane;
- int ret;
-
- ret = drm_crtc_init_with_planes(disp->drm, &disp->crtc, plane,
- NULL, &zynqmp_disp_crtc_funcs, NULL);
- if (ret < 0)
- return ret;
-
- drm_crtc_helper_add(&disp->crtc, &zynqmp_disp_crtc_helper_funcs);
-
- /* Start with vertical blanking interrupt reporting disabled. */
- drm_crtc_vblank_off(&disp->crtc);
-
- return 0;
-}
-
-static void zynqmp_disp_map_crtc_to_plane(struct zynqmp_disp *disp)
-{
- u32 possible_crtcs = drm_crtc_mask(&disp->crtc);
- unsigned int i;
-
- for (i = 0; i < ZYNQMP_DISP_NUM_LAYERS; i++)
- disp->layers[i].plane.possible_crtcs = possible_crtcs;
-}
-
/* -----------------------------------------------------------------------------
* Initialization & Cleanup
*/
-int zynqmp_disp_drm_init(struct zynqmp_dpsub *dpsub)
-{
- struct zynqmp_disp *disp = dpsub->disp;
- int ret;
-
- ret = zynqmp_disp_create_planes(disp);
- if (ret)
- return ret;
-
- ret = zynqmp_disp_create_crtc(disp);
- if (ret < 0)
- return ret;
-
- zynqmp_disp_map_crtc_to_plane(disp);
-
- return 0;
-}
-
-int zynqmp_disp_probe(struct zynqmp_dpsub *dpsub, struct drm_device *drm)
+int zynqmp_disp_probe(struct zynqmp_dpsub *dpsub)
{
struct platform_device *pdev = to_platform_device(dpsub->dev);
struct zynqmp_disp *disp;
- struct zynqmp_disp_layer *layer;
struct resource *res;
int ret;
- disp = drmm_kzalloc(drm, sizeof(*disp), GFP_KERNEL);
+ disp = kzalloc(sizeof(*disp), GFP_KERNEL);
if (!disp)
return -ENOMEM;
disp->dev = &pdev->dev;
disp->dpsub = dpsub;
- disp->drm = drm;
-
- dpsub->disp = disp;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "blend");
disp->blend.base = devm_ioremap_resource(disp->dev, res);
- if (IS_ERR(disp->blend.base))
- return PTR_ERR(disp->blend.base);
+ if (IS_ERR(disp->blend.base)) {
+ ret = PTR_ERR(disp->blend.base);
+ goto error;
+ }
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "av_buf");
disp->avbuf.base = devm_ioremap_resource(disp->dev, res);
- if (IS_ERR(disp->avbuf.base))
- return PTR_ERR(disp->avbuf.base);
+ if (IS_ERR(disp->avbuf.base)) {
+ ret = PTR_ERR(disp->avbuf.base);
+ goto error;
+ }
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "aud");
disp->audio.base = devm_ioremap_resource(disp->dev, res);
- if (IS_ERR(disp->audio.base))
- return PTR_ERR(disp->audio.base);
-
- /* Try the live PL video clock */
- disp->pclk = devm_clk_get(disp->dev, "dp_live_video_in_clk");
- if (!IS_ERR(disp->pclk))
- disp->pclk_from_ps = false;
- else if (PTR_ERR(disp->pclk) == -EPROBE_DEFER)
- return PTR_ERR(disp->pclk);
-
- /* If the live PL video clock is not valid, fall back to PS clock */
- if (IS_ERR_OR_NULL(disp->pclk)) {
- disp->pclk = devm_clk_get(disp->dev, "dp_vtc_pixel_clk_in");
- if (IS_ERR(disp->pclk)) {
- dev_err(disp->dev, "failed to init any video clock\n");
- return PTR_ERR(disp->pclk);
- }
- disp->pclk_from_ps = true;
+ if (IS_ERR(disp->audio.base)) {
+ ret = PTR_ERR(disp->audio.base);
+ goto error;
}
- zynqmp_disp_audio_init(disp);
-
ret = zynqmp_disp_create_layers(disp);
if (ret)
- return ret;
+ goto error;
+
+ if (disp->dpsub->dma_enabled) {
+ struct zynqmp_disp_layer *layer;
- layer = &disp->layers[ZYNQMP_DISP_LAYER_VID];
- dpsub->dma_align = 1 << layer->dmas[0].chan->device->copy_align;
+ layer = &disp->layers[ZYNQMP_DPSUB_LAYER_VID];
+ dpsub->dma_align = 1 << layer->dmas[0].chan->device->copy_align;
+ }
+
+ dpsub->disp = disp;
return 0;
+
+error:
+ kfree(disp);
+ return ret;
}
void zynqmp_disp_remove(struct zynqmp_dpsub *dpsub)
diff --git a/drivers/gpu/drm/xlnx/zynqmp_disp.h b/drivers/gpu/drm/xlnx/zynqmp_disp.h
index f402901afb23..123cffac08be 100644
--- a/drivers/gpu/drm/xlnx/zynqmp_disp.h
+++ b/drivers/gpu/drm/xlnx/zynqmp_disp.h
@@ -25,18 +25,52 @@
#define ZYNQMP_DISP_MAX_DMA_BIT 44
struct device;
-struct drm_device;
+struct drm_format_info;
+struct drm_plane_state;
struct platform_device;
struct zynqmp_disp;
+struct zynqmp_disp_layer;
struct zynqmp_dpsub;
-void zynqmp_disp_handle_vblank(struct zynqmp_disp *disp);
-bool zynqmp_disp_audio_enabled(struct zynqmp_disp *disp);
-unsigned int zynqmp_disp_get_audio_clk_rate(struct zynqmp_disp *disp);
-uint32_t zynqmp_disp_get_crtc_mask(struct zynqmp_disp *disp);
+/**
+ * enum zynqmp_dpsub_layer_id - Layer identifier
+ * @ZYNQMP_DPSUB_LAYER_VID: Video layer
+ * @ZYNQMP_DPSUB_LAYER_GFX: Graphics layer
+ */
+enum zynqmp_dpsub_layer_id {
+ ZYNQMP_DPSUB_LAYER_VID,
+ ZYNQMP_DPSUB_LAYER_GFX,
+};
+
+/**
+ * enum zynqmp_dpsub_layer_mode - Layer mode
+ * @ZYNQMP_DPSUB_LAYER_NONLIVE: non-live (memory) mode
+ * @ZYNQMP_DPSUB_LAYER_LIVE: live (stream) mode
+ */
+enum zynqmp_dpsub_layer_mode {
+ ZYNQMP_DPSUB_LAYER_NONLIVE,
+ ZYNQMP_DPSUB_LAYER_LIVE,
+};
+
+void zynqmp_disp_enable(struct zynqmp_disp *disp);
+void zynqmp_disp_disable(struct zynqmp_disp *disp);
+int zynqmp_disp_setup_clock(struct zynqmp_disp *disp,
+ unsigned long mode_clock);
+
+void zynqmp_disp_blend_set_global_alpha(struct zynqmp_disp *disp,
+ bool enable, u32 alpha);
+
+u32 *zynqmp_disp_layer_drm_formats(struct zynqmp_disp_layer *layer,
+ unsigned int *num_formats);
+void zynqmp_disp_layer_enable(struct zynqmp_disp_layer *layer,
+ enum zynqmp_dpsub_layer_mode mode);
+void zynqmp_disp_layer_disable(struct zynqmp_disp_layer *layer);
+void zynqmp_disp_layer_set_format(struct zynqmp_disp_layer *layer,
+ const struct drm_format_info *info);
+int zynqmp_disp_layer_update(struct zynqmp_disp_layer *layer,
+ struct drm_plane_state *state);
-int zynqmp_disp_drm_init(struct zynqmp_dpsub *dpsub);
-int zynqmp_disp_probe(struct zynqmp_dpsub *dpsub, struct drm_device *drm);
+int zynqmp_disp_probe(struct zynqmp_dpsub *dpsub);
void zynqmp_disp_remove(struct zynqmp_dpsub *dpsub);
#endif /* _ZYNQMP_DISP_H_ */
diff --git a/drivers/gpu/drm/xlnx/zynqmp_dp.c b/drivers/gpu/drm/xlnx/zynqmp_dp.c
index d14612b34796..7c9ae167eac7 100644
--- a/drivers/gpu/drm/xlnx/zynqmp_dp.c
+++ b/drivers/gpu/drm/xlnx/zynqmp_dp.c
@@ -11,16 +11,12 @@
#include <drm/display/drm_dp_helper.h>
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_connector.h>
#include <drm/drm_crtc.h>
#include <drm/drm_device.h>
#include <drm/drm_edid.h>
-#include <drm/drm_encoder.h>
-#include <drm/drm_managed.h>
+#include <drm/drm_fourcc.h>
#include <drm/drm_modes.h>
#include <drm/drm_of.h>
-#include <drm/drm_probe_helper.h>
-#include <drm/drm_simple_kms_helper.h>
#include <linux/clk.h>
#include <linux/delay.h>
@@ -31,10 +27,12 @@
#include <linux/pm_runtime.h>
#include <linux/phy/phy.h>
#include <linux/reset.h>
+#include <linux/slab.h>
#include "zynqmp_disp.h"
#include "zynqmp_dp.h"
#include "zynqmp_dpsub.h"
+#include "zynqmp_kms.h"
static uint zynqmp_dp_aux_timeout_ms = 50;
module_param_named(aux_timeout_ms, zynqmp_dp_aux_timeout_ms, uint, 0444);
@@ -277,14 +275,13 @@ struct zynqmp_dp_config {
/**
* struct zynqmp_dp - Xilinx DisplayPort core
- * @encoder: the drm encoder structure
- * @connector: the drm connector structure
* @dev: device structure
* @dpsub: Display subsystem
- * @drm: DRM core
* @iomem: device I/O memory for register access
* @reset: reset controller
* @irq: irq
+ * @bridge: DRM bridge for the DP encoder
+ * @next_bridge: The downstream bridge
* @config: IP core configuration from DTS
* @aux: aux channel
* @phy: PHY handles for DP lanes
@@ -298,15 +295,15 @@ struct zynqmp_dp_config {
* @train_set: set of training data
*/
struct zynqmp_dp {
- struct drm_encoder encoder;
- struct drm_connector connector;
struct device *dev;
struct zynqmp_dpsub *dpsub;
- struct drm_device *drm;
void __iomem *iomem;
struct reset_control *reset;
int irq;
+ struct drm_bridge bridge;
+ struct drm_bridge *next_bridge;
+
struct zynqmp_dp_config config;
struct drm_dp_aux aux;
struct phy *phy[ZYNQMP_DP_MAX_LANES];
@@ -321,14 +318,9 @@ struct zynqmp_dp {
u8 train_set[ZYNQMP_DP_MAX_LANES];
};
-static inline struct zynqmp_dp *encoder_to_dp(struct drm_encoder *encoder)
-{
- return container_of(encoder, struct zynqmp_dp, encoder);
-}
-
-static inline struct zynqmp_dp *connector_to_dp(struct drm_connector *connector)
+static inline struct zynqmp_dp *bridge_to_dp(struct drm_bridge *bridge)
{
- return container_of(connector, struct zynqmp_dp, connector);
+ return container_of(bridge, struct zynqmp_dp, bridge);
}
static void zynqmp_dp_write(struct zynqmp_dp *dp, int offset, u32 val)
@@ -1064,7 +1056,7 @@ static int zynqmp_dp_aux_init(struct zynqmp_dp *dp)
dp->aux.name = "ZynqMP DP AUX";
dp->aux.dev = dp->dev;
- dp->aux.drm_dev = dp->drm;
+ dp->aux.drm_dev = dp->bridge.dev;
dp->aux.transfer = zynqmp_dp_aux_transfer;
return drm_dp_aux_register(&dp->aux);
@@ -1101,6 +1093,7 @@ static void zynqmp_dp_update_misc(struct zynqmp_dp *dp)
/**
* zynqmp_dp_set_format - Set the input format
* @dp: DisplayPort IP core structure
+ * @info: Display info
* @format: input format
* @bpc: bits per component
*
@@ -1109,10 +1102,10 @@ static void zynqmp_dp_update_misc(struct zynqmp_dp *dp)
* Return: 0 on success, or -EINVAL.
*/
static int zynqmp_dp_set_format(struct zynqmp_dp *dp,
+ const struct drm_display_info *info,
enum zynqmp_dpsub_format format,
unsigned int bpc)
{
- static const struct drm_display_info *display;
struct zynqmp_dp_config *config = &dp->config;
unsigned int num_colors;
@@ -1145,12 +1138,11 @@ static int zynqmp_dp_set_format(struct zynqmp_dp *dp,
return -EINVAL;
}
- display = &dp->connector.display_info;
- if (display->bpc && bpc > display->bpc) {
+ if (info && info->bpc && bpc > info->bpc) {
dev_warn(dp->dev,
"downgrading requested %ubpc to display limit %ubpc\n",
- bpc, display->bpc);
- bpc = display->bpc;
+ bpc, info->bpc);
+ bpc = info->bpc;
}
config->misc0 &= ~ZYNQMP_DP_MAIN_STREAM_MISC0_BPC_MASK;
@@ -1195,7 +1187,7 @@ static int zynqmp_dp_set_format(struct zynqmp_dp *dp,
*/
static void
zynqmp_dp_encoder_mode_set_transfer_unit(struct zynqmp_dp *dp,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
u32 tu = ZYNQMP_DP_MSA_TRANSFER_UNIT_SIZE_TU_SIZE_DEF;
u32 bw, vid_kbytes, avg_bytes_per_tu, init_wait;
@@ -1255,12 +1247,12 @@ static void zynqmp_dp_encoder_mode_set_stream(struct zynqmp_dp *dp,
zynqmp_dp_write(dp, ZYNQMP_DP_MAIN_STREAM_VSTART,
mode->vtotal - mode->vsync_start);
- /* In synchronous mode, set the diviers */
+ /* In synchronous mode, set the dividers */
if (dp->config.misc0 & ZYNQMP_DP_MAIN_STREAM_MISC0_SYNC_LOCK) {
reg = drm_dp_bw_code_to_link_rate(dp->mode.bw_code);
zynqmp_dp_write(dp, ZYNQMP_DP_MAIN_STREAM_N_VID, reg);
zynqmp_dp_write(dp, ZYNQMP_DP_MAIN_STREAM_M_VID, mode->clock);
- rate = zynqmp_disp_get_audio_clk_rate(dp->dpsub->disp);
+ rate = zynqmp_dpsub_get_audio_clk_rate(dp->dpsub);
if (rate) {
dev_dbg(dp->dev, "Audio rate: %d\n", rate / 512);
zynqmp_dp_write(dp, ZYNQMP_DP_TX_N_AUD, reg);
@@ -1269,7 +1261,7 @@ static void zynqmp_dp_encoder_mode_set_stream(struct zynqmp_dp *dp,
}
/* Only 2 channel audio is supported now */
- if (zynqmp_disp_audio_enabled(dp->dpsub->disp))
+ if (zynqmp_dpsub_audio_enabled(dp->dpsub))
zynqmp_dp_write(dp, ZYNQMP_DP_TX_AUDIO_CHANNELS, 1);
zynqmp_dp_write(dp, ZYNQMP_DP_USER_PIX_WIDTH, 1);
@@ -1281,97 +1273,114 @@ static void zynqmp_dp_encoder_mode_set_stream(struct zynqmp_dp *dp,
}
/* -----------------------------------------------------------------------------
- * DRM Connector
+ * DISP Configuration
*/
-static enum drm_connector_status
-zynqmp_dp_connector_detect(struct drm_connector *connector, bool force)
+static void zynqmp_dp_disp_enable(struct zynqmp_dp *dp,
+ struct drm_bridge_state *old_bridge_state)
{
- struct zynqmp_dp *dp = connector_to_dp(connector);
- struct zynqmp_dp_link_config *link_config = &dp->link_config;
- u32 state, i;
- int ret;
+ enum zynqmp_dpsub_layer_id layer_id;
+ struct zynqmp_disp_layer *layer;
+ const struct drm_format_info *info;
+
+ if (dp->dpsub->connected_ports & BIT(ZYNQMP_DPSUB_PORT_LIVE_VIDEO))
+ layer_id = ZYNQMP_DPSUB_LAYER_VID;
+ else if (dp->dpsub->connected_ports & BIT(ZYNQMP_DPSUB_PORT_LIVE_GFX))
+ layer_id = ZYNQMP_DPSUB_LAYER_GFX;
+ else
+ return;
- /*
- * This is from heuristic. It takes some delay (ex, 100 ~ 500 msec) to
- * get the HPD signal with some monitors.
- */
- for (i = 0; i < 10; i++) {
- state = zynqmp_dp_read(dp, ZYNQMP_DP_INTERRUPT_SIGNAL_STATE);
- if (state & ZYNQMP_DP_INTERRUPT_SIGNAL_STATE_HPD)
- break;
- msleep(100);
- }
+ layer = dp->dpsub->layers[layer_id];
- if (state & ZYNQMP_DP_INTERRUPT_SIGNAL_STATE_HPD) {
- ret = drm_dp_dpcd_read(&dp->aux, 0x0, dp->dpcd,
- sizeof(dp->dpcd));
- if (ret < 0) {
- dev_dbg(dp->dev, "DPCD read failed");
- goto disconnected;
- }
+ /* TODO: Make the format configurable. */
+ info = drm_format_info(DRM_FORMAT_YUV422);
+ zynqmp_disp_layer_set_format(layer, info);
+ zynqmp_disp_layer_enable(layer, ZYNQMP_DPSUB_LAYER_LIVE);
- link_config->max_rate = min_t(int,
- drm_dp_max_link_rate(dp->dpcd),
- DP_HIGH_BIT_RATE2);
- link_config->max_lanes = min_t(u8,
- drm_dp_max_lane_count(dp->dpcd),
- dp->num_lanes);
+ if (layer_id == ZYNQMP_DPSUB_LAYER_GFX)
+ zynqmp_disp_blend_set_global_alpha(dp->dpsub->disp, true, 255);
+ else
+ zynqmp_disp_blend_set_global_alpha(dp->dpsub->disp, false, 0);
- dp->status = connector_status_connected;
- return connector_status_connected;
- }
+ zynqmp_disp_enable(dp->dpsub->disp);
+}
-disconnected:
- dp->status = connector_status_disconnected;
- return connector_status_disconnected;
+static void zynqmp_dp_disp_disable(struct zynqmp_dp *dp,
+ struct drm_bridge_state *old_bridge_state)
+{
+ struct zynqmp_disp_layer *layer;
+
+ if (dp->dpsub->connected_ports & BIT(ZYNQMP_DPSUB_PORT_LIVE_VIDEO))
+ layer = dp->dpsub->layers[ZYNQMP_DPSUB_LAYER_VID];
+ else if (dp->dpsub->connected_ports & BIT(ZYNQMP_DPSUB_PORT_LIVE_GFX))
+ layer = dp->dpsub->layers[ZYNQMP_DPSUB_LAYER_GFX];
+ else
+ return;
+
+ zynqmp_disp_disable(dp->dpsub->disp);
+ zynqmp_disp_layer_disable(layer);
}
-static int zynqmp_dp_connector_get_modes(struct drm_connector *connector)
+/* -----------------------------------------------------------------------------
+ * DRM Bridge
+ */
+
+static int zynqmp_dp_bridge_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
{
- struct zynqmp_dp *dp = connector_to_dp(connector);
- struct edid *edid;
+ struct zynqmp_dp *dp = bridge_to_dp(bridge);
int ret;
- edid = drm_get_edid(connector, &dp->aux.ddc);
- if (!edid)
- return 0;
+ /* Initialize and register the AUX adapter. */
+ ret = zynqmp_dp_aux_init(dp);
+ if (ret) {
+ dev_err(dp->dev, "failed to initialize DP aux\n");
+ return ret;
+ }
- drm_connector_update_edid_property(connector, edid);
- ret = drm_add_edid_modes(connector, edid);
- kfree(edid);
+ if (dp->next_bridge) {
+ ret = drm_bridge_attach(bridge->encoder, dp->next_bridge,
+ bridge, flags);
+ if (ret < 0)
+ goto error;
+ }
+ /* Now that initialisation is complete, enable interrupts. */
+ zynqmp_dp_write(dp, ZYNQMP_DP_INT_EN, ZYNQMP_DP_INT_ALL);
+
+ return 0;
+
+error:
+ zynqmp_dp_aux_cleanup(dp);
return ret;
}
-static struct drm_encoder *
-zynqmp_dp_connector_best_encoder(struct drm_connector *connector)
+static void zynqmp_dp_bridge_detach(struct drm_bridge *bridge)
{
- struct zynqmp_dp *dp = connector_to_dp(connector);
+ struct zynqmp_dp *dp = bridge_to_dp(bridge);
- return &dp->encoder;
+ zynqmp_dp_aux_cleanup(dp);
}
-static int zynqmp_dp_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+static int zynqmp_dp_bridge_mode_valid(struct drm_bridge *bridge,
+ const struct drm_display_info *info,
+ const struct drm_display_mode *mode)
{
- struct zynqmp_dp *dp = connector_to_dp(connector);
- u8 max_lanes = dp->link_config.max_lanes;
- u8 bpp = dp->config.bpp;
- int max_rate = dp->link_config.max_rate;
+ struct zynqmp_dp *dp = bridge_to_dp(bridge);
int rate;
if (mode->clock > ZYNQMP_MAX_FREQ) {
- dev_dbg(dp->dev, "filtered the mode, %s,for high pixel rate\n",
+ dev_dbg(dp->dev, "filtered mode %s for high pixel rate\n",
mode->name);
drm_mode_debug_printmodeline(mode);
return MODE_CLOCK_HIGH;
}
/* Check with link rate and lane count */
- rate = zynqmp_dp_max_rate(max_rate, max_lanes, bpp);
+ rate = zynqmp_dp_max_rate(dp->link_config.max_rate,
+ dp->link_config.max_lanes, dp->config.bpp);
if (mode->clock > rate) {
- dev_dbg(dp->dev, "filtered the mode, %s,for high pixel rate\n",
+ dev_dbg(dp->dev, "filtered mode %s for high pixel rate\n",
mode->name);
drm_mode_debug_printmodeline(mode);
return MODE_CLOCK_HIGH;
@@ -1380,36 +1389,62 @@ static int zynqmp_dp_connector_mode_valid(struct drm_connector *connector,
return MODE_OK;
}
-static const struct drm_connector_funcs zynqmp_dp_connector_funcs = {
- .detect = zynqmp_dp_connector_detect,
- .fill_modes = drm_helper_probe_single_connector_modes,
- .destroy = drm_connector_cleanup,
- .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
- .reset = drm_atomic_helper_connector_reset,
-};
-
-static const struct drm_connector_helper_funcs
-zynqmp_dp_connector_helper_funcs = {
- .get_modes = zynqmp_dp_connector_get_modes,
- .best_encoder = zynqmp_dp_connector_best_encoder,
- .mode_valid = zynqmp_dp_connector_mode_valid,
-};
-
-/* -----------------------------------------------------------------------------
- * DRM Encoder
- */
-
-static void zynqmp_dp_encoder_enable(struct drm_encoder *encoder)
+static void zynqmp_dp_bridge_atomic_enable(struct drm_bridge *bridge,
+ struct drm_bridge_state *old_bridge_state)
{
- struct zynqmp_dp *dp = encoder_to_dp(encoder);
+ struct zynqmp_dp *dp = bridge_to_dp(bridge);
+ struct drm_atomic_state *state = old_bridge_state->base.state;
+ const struct drm_crtc_state *crtc_state;
+ const struct drm_display_mode *adjusted_mode;
+ const struct drm_display_mode *mode;
+ struct drm_connector *connector;
+ struct drm_crtc *crtc;
unsigned int i;
- int ret = 0;
+ int rate;
+ int ret;
pm_runtime_get_sync(dp->dev);
+
+ zynqmp_dp_disp_enable(dp, old_bridge_state);
+
+ /*
+ * Retrieve the CRTC mode and adjusted mode. This requires a little
+ * dance to go from the bridge to the encoder, to the connector and to
+ * the CRTC.
+ */
+ connector = drm_atomic_get_new_connector_for_encoder(state,
+ bridge->encoder);
+ crtc = drm_atomic_get_new_connector_state(state, connector)->crtc;
+ crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
+ adjusted_mode = &crtc_state->adjusted_mode;
+ mode = &crtc_state->mode;
+
+ zynqmp_dp_set_format(dp, &connector->display_info,
+ ZYNQMP_DPSUB_FORMAT_RGB, 8);
+
+ /* Check again as bpp or format might have been changed */
+ rate = zynqmp_dp_max_rate(dp->link_config.max_rate,
+ dp->link_config.max_lanes, dp->config.bpp);
+ if (mode->clock > rate) {
+ dev_err(dp->dev, "mode %s has too high pixel rate\n",
+ mode->name);
+ drm_mode_debug_printmodeline(mode);
+ }
+
+ /* Configure the mode */
+ ret = zynqmp_dp_mode_configure(dp, adjusted_mode->clock, 0);
+ if (ret < 0) {
+ pm_runtime_put_sync(dp->dev);
+ return;
+ }
+
+ zynqmp_dp_encoder_mode_set_transfer_unit(dp, adjusted_mode);
+ zynqmp_dp_encoder_mode_set_stream(dp, adjusted_mode);
+
+ /* Enable the encoder */
dp->enabled = true;
zynqmp_dp_update_misc(dp);
- if (zynqmp_disp_audio_enabled(dp->dpsub->disp))
+ if (zynqmp_dpsub_audio_enabled(dp->dpsub))
zynqmp_dp_write(dp, ZYNQMP_DP_TX_AUDIO_CONTROL, 1);
zynqmp_dp_write(dp, ZYNQMP_DP_TX_PHY_POWER_DOWN, 0);
if (dp->status == connector_status_connected) {
@@ -1432,9 +1467,10 @@ static void zynqmp_dp_encoder_enable(struct drm_encoder *encoder)
zynqmp_dp_write(dp, ZYNQMP_DP_MAIN_STREAM_ENABLE, 1);
}
-static void zynqmp_dp_encoder_disable(struct drm_encoder *encoder)
+static void zynqmp_dp_bridge_atomic_disable(struct drm_bridge *bridge,
+ struct drm_bridge_state *old_bridge_state)
{
- struct zynqmp_dp *dp = encoder_to_dp(encoder);
+ struct zynqmp_dp *dp = bridge_to_dp(bridge);
dp->enabled = false;
cancel_delayed_work(&dp->hpd_work);
@@ -1442,49 +1478,22 @@ static void zynqmp_dp_encoder_disable(struct drm_encoder *encoder)
drm_dp_dpcd_writeb(&dp->aux, DP_SET_POWER, DP_SET_POWER_D3);
zynqmp_dp_write(dp, ZYNQMP_DP_TX_PHY_POWER_DOWN,
ZYNQMP_DP_TX_PHY_POWER_DOWN_ALL);
- if (zynqmp_disp_audio_enabled(dp->dpsub->disp))
+ if (zynqmp_dpsub_audio_enabled(dp->dpsub))
zynqmp_dp_write(dp, ZYNQMP_DP_TX_AUDIO_CONTROL, 0);
- pm_runtime_put_sync(dp->dev);
-}
-
-static void
-zynqmp_dp_encoder_atomic_mode_set(struct drm_encoder *encoder,
- struct drm_crtc_state *crtc_state,
- struct drm_connector_state *connector_state)
-{
- struct zynqmp_dp *dp = encoder_to_dp(encoder);
- struct drm_display_mode *mode = &crtc_state->mode;
- struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
- u8 max_lanes = dp->link_config.max_lanes;
- u8 bpp = dp->config.bpp;
- int rate, max_rate = dp->link_config.max_rate;
- int ret;
- zynqmp_dp_set_format(dp, ZYNQMP_DPSUB_FORMAT_RGB, 8);
+ zynqmp_dp_disp_disable(dp, old_bridge_state);
- /* Check again as bpp or format might have been chagned */
- rate = zynqmp_dp_max_rate(max_rate, max_lanes, bpp);
- if (mode->clock > rate) {
- dev_err(dp->dev, "the mode, %s,has too high pixel rate\n",
- mode->name);
- drm_mode_debug_printmodeline(mode);
- }
-
- ret = zynqmp_dp_mode_configure(dp, adjusted_mode->clock, 0);
- if (ret < 0)
- return;
-
- zynqmp_dp_encoder_mode_set_transfer_unit(dp, adjusted_mode);
- zynqmp_dp_encoder_mode_set_stream(dp, adjusted_mode);
+ pm_runtime_put_sync(dp->dev);
}
#define ZYNQMP_DP_MIN_H_BACKPORCH 20
-static int
-zynqmp_dp_encoder_atomic_check(struct drm_encoder *encoder,
- struct drm_crtc_state *crtc_state,
- struct drm_connector_state *conn_state)
+static int zynqmp_dp_bridge_atomic_check(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
{
+ struct zynqmp_dp *dp = bridge_to_dp(bridge);
struct drm_display_mode *mode = &crtc_state->mode;
struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
int diff = mode->htotal - mode->hsync_end;
@@ -1497,7 +1506,7 @@ zynqmp_dp_encoder_atomic_check(struct drm_encoder *encoder,
int vrefresh = (adjusted_mode->clock * 1000) /
(adjusted_mode->vtotal * adjusted_mode->htotal);
- dev_dbg(encoder->dev->dev, "hbackporch adjusted: %d to %d",
+ dev_dbg(dp->dev, "hbackporch adjusted: %d to %d",
diff, ZYNQMP_DP_MIN_H_BACKPORCH - diff);
diff = ZYNQMP_DP_MIN_H_BACKPORCH - diff;
adjusted_mode->htotal += diff;
@@ -1508,11 +1517,68 @@ zynqmp_dp_encoder_atomic_check(struct drm_encoder *encoder,
return 0;
}
-static const struct drm_encoder_helper_funcs zynqmp_dp_encoder_helper_funcs = {
- .enable = zynqmp_dp_encoder_enable,
- .disable = zynqmp_dp_encoder_disable,
- .atomic_mode_set = zynqmp_dp_encoder_atomic_mode_set,
- .atomic_check = zynqmp_dp_encoder_atomic_check,
+static enum drm_connector_status zynqmp_dp_bridge_detect(struct drm_bridge *bridge)
+{
+ struct zynqmp_dp *dp = bridge_to_dp(bridge);
+ struct zynqmp_dp_link_config *link_config = &dp->link_config;
+ u32 state, i;
+ int ret;
+
+ /*
+ * This is from heuristic. It takes some delay (ex, 100 ~ 500 msec) to
+ * get the HPD signal with some monitors.
+ */
+ for (i = 0; i < 10; i++) {
+ state = zynqmp_dp_read(dp, ZYNQMP_DP_INTERRUPT_SIGNAL_STATE);
+ if (state & ZYNQMP_DP_INTERRUPT_SIGNAL_STATE_HPD)
+ break;
+ msleep(100);
+ }
+
+ if (state & ZYNQMP_DP_INTERRUPT_SIGNAL_STATE_HPD) {
+ ret = drm_dp_dpcd_read(&dp->aux, 0x0, dp->dpcd,
+ sizeof(dp->dpcd));
+ if (ret < 0) {
+ dev_dbg(dp->dev, "DPCD read failed");
+ goto disconnected;
+ }
+
+ link_config->max_rate = min_t(int,
+ drm_dp_max_link_rate(dp->dpcd),
+ DP_HIGH_BIT_RATE2);
+ link_config->max_lanes = min_t(u8,
+ drm_dp_max_lane_count(dp->dpcd),
+ dp->num_lanes);
+
+ dp->status = connector_status_connected;
+ return connector_status_connected;
+ }
+
+disconnected:
+ dp->status = connector_status_disconnected;
+ return connector_status_disconnected;
+}
+
+static struct edid *zynqmp_dp_bridge_get_edid(struct drm_bridge *bridge,
+ struct drm_connector *connector)
+{
+ struct zynqmp_dp *dp = bridge_to_dp(bridge);
+
+ return drm_get_edid(connector, &dp->aux.ddc);
+}
+
+static const struct drm_bridge_funcs zynqmp_dp_bridge_funcs = {
+ .attach = zynqmp_dp_bridge_attach,
+ .detach = zynqmp_dp_bridge_detach,
+ .mode_valid = zynqmp_dp_bridge_mode_valid,
+ .atomic_enable = zynqmp_dp_bridge_atomic_enable,
+ .atomic_disable = zynqmp_dp_bridge_atomic_disable,
+ .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
+ .atomic_reset = drm_atomic_helper_bridge_reset,
+ .atomic_check = zynqmp_dp_bridge_atomic_check,
+ .detect = zynqmp_dp_bridge_detect,
+ .get_edid = zynqmp_dp_bridge_get_edid,
};
/* -----------------------------------------------------------------------------
@@ -1543,12 +1609,12 @@ void zynqmp_dp_disable_vblank(struct zynqmp_dp *dp)
static void zynqmp_dp_hpd_work_func(struct work_struct *work)
{
- struct zynqmp_dp *dp;
-
- dp = container_of(work, struct zynqmp_dp, hpd_work.work);
+ struct zynqmp_dp *dp = container_of(work, struct zynqmp_dp,
+ hpd_work.work);
+ enum drm_connector_status status;
- if (dp->drm)
- drm_helper_hpd_irq_event(dp->drm);
+ status = zynqmp_dp_bridge_detect(&dp->bridge);
+ drm_bridge_hpd_notify(&dp->bridge, status);
}
static irqreturn_t zynqmp_dp_irq_handler(int irq, void *data)
@@ -1570,7 +1636,7 @@ static irqreturn_t zynqmp_dp_irq_handler(int irq, void *data)
zynqmp_dp_write(dp, ZYNQMP_DP_INT_STATUS, status);
if (status & ZYNQMP_DP_INT_VBLANK_START)
- zynqmp_disp_handle_vblank(dp->dpsub->disp);
+ zynqmp_dpsub_drm_handle_vblank(dp->dpsub);
if (status & ZYNQMP_DP_INT_HPD_EVENT)
schedule_delayed_work(&dp->hpd_work, 0);
@@ -1599,94 +1665,76 @@ handled:
* Initialization & Cleanup
*/
-int zynqmp_dp_drm_init(struct zynqmp_dpsub *dpsub)
-{
- struct zynqmp_dp *dp = dpsub->dp;
- struct drm_encoder *encoder = &dp->encoder;
- struct drm_connector *connector = &dp->connector;
- int ret;
-
- dp->config.misc0 &= ~ZYNQMP_DP_MAIN_STREAM_MISC0_SYNC_LOCK;
- zynqmp_dp_set_format(dp, ZYNQMP_DPSUB_FORMAT_RGB, 8);
-
- /* Create the DRM encoder and connector. */
- encoder->possible_crtcs |= zynqmp_disp_get_crtc_mask(dpsub->disp);
- drm_simple_encoder_init(dp->drm, encoder, DRM_MODE_ENCODER_TMDS);
- drm_encoder_helper_add(encoder, &zynqmp_dp_encoder_helper_funcs);
-
- connector->polled = DRM_CONNECTOR_POLL_HPD;
- ret = drm_connector_init(encoder->dev, connector,
- &zynqmp_dp_connector_funcs,
- DRM_MODE_CONNECTOR_DisplayPort);
- if (ret) {
- dev_err(dp->dev, "failed to create the DRM connector\n");
- return ret;
- }
-
- drm_connector_helper_add(connector, &zynqmp_dp_connector_helper_funcs);
- drm_connector_register(connector);
- drm_connector_attach_encoder(connector, encoder);
-
- /* Initialize and register the AUX adapter. */
- ret = zynqmp_dp_aux_init(dp);
- if (ret) {
- dev_err(dp->dev, "failed to initialize DP aux\n");
- return ret;
- }
-
- /* Now that initialisation is complete, enable interrupts. */
- zynqmp_dp_write(dp, ZYNQMP_DP_INT_EN, ZYNQMP_DP_INT_ALL);
-
- return 0;
-}
-
-int zynqmp_dp_probe(struct zynqmp_dpsub *dpsub, struct drm_device *drm)
+int zynqmp_dp_probe(struct zynqmp_dpsub *dpsub)
{
struct platform_device *pdev = to_platform_device(dpsub->dev);
+ struct drm_bridge *bridge;
struct zynqmp_dp *dp;
struct resource *res;
int ret;
- dp = drmm_kzalloc(drm, sizeof(*dp), GFP_KERNEL);
+ dp = kzalloc(sizeof(*dp), GFP_KERNEL);
if (!dp)
return -ENOMEM;
dp->dev = &pdev->dev;
dp->dpsub = dpsub;
dp->status = connector_status_disconnected;
- dp->drm = drm;
INIT_DELAYED_WORK(&dp->hpd_work, zynqmp_dp_hpd_work_func);
- dpsub->dp = dp;
-
/* Acquire all resources (IOMEM, IRQ and PHYs). */
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dp");
dp->iomem = devm_ioremap_resource(dp->dev, res);
- if (IS_ERR(dp->iomem))
- return PTR_ERR(dp->iomem);
+ if (IS_ERR(dp->iomem)) {
+ ret = PTR_ERR(dp->iomem);
+ goto err_free;
+ }
dp->irq = platform_get_irq(pdev, 0);
- if (dp->irq < 0)
- return dp->irq;
+ if (dp->irq < 0) {
+ ret = dp->irq;
+ goto err_free;
+ }
dp->reset = devm_reset_control_get(dp->dev, NULL);
if (IS_ERR(dp->reset)) {
if (PTR_ERR(dp->reset) != -EPROBE_DEFER)
dev_err(dp->dev, "failed to get reset: %ld\n",
PTR_ERR(dp->reset));
- return PTR_ERR(dp->reset);
+ ret = PTR_ERR(dp->reset);
+ goto err_free;
}
ret = zynqmp_dp_reset(dp, false);
if (ret < 0)
- return ret;
+ goto err_free;
ret = zynqmp_dp_phy_probe(dp);
if (ret)
goto err_reset;
+ /* Initialize the bridge. */
+ bridge = &dp->bridge;
+ bridge->funcs = &zynqmp_dp_bridge_funcs;
+ bridge->ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID
+ | DRM_BRIDGE_OP_HPD;
+ bridge->type = DRM_MODE_CONNECTOR_DisplayPort;
+ dpsub->bridge = bridge;
+
+ /*
+ * Acquire the next bridge in the chain. Ignore errors caused by port@5
+ * not being connected for backward-compatibility with older DTs.
+ */
+ ret = drm_of_find_panel_or_bridge(dp->dev->of_node, 5, 0, NULL,
+ &dp->next_bridge);
+ if (ret < 0 && ret != -ENODEV)
+ goto err_reset;
+
/* Initialize the hardware. */
+ dp->config.misc0 &= ~ZYNQMP_DP_MAIN_STREAM_MISC0_SYNC_LOCK;
+ zynqmp_dp_set_format(dp, NULL, ZYNQMP_DPSUB_FORMAT_RGB, 8);
+
zynqmp_dp_write(dp, ZYNQMP_DP_TX_PHY_POWER_DOWN,
ZYNQMP_DP_TX_PHY_POWER_DOWN_ALL);
zynqmp_dp_set(dp, ZYNQMP_DP_PHY_RESET, ZYNQMP_DP_PHY_RESET_ALL_RESET);
@@ -1710,6 +1758,8 @@ int zynqmp_dp_probe(struct zynqmp_dpsub *dpsub, struct drm_device *drm)
if (ret < 0)
goto err_phy_exit;
+ dpsub->dp = dp;
+
dev_dbg(dp->dev, "ZynqMP DisplayPort Tx probed with %u lanes\n",
dp->num_lanes);
@@ -1719,7 +1769,8 @@ err_phy_exit:
zynqmp_dp_phy_exit(dp);
err_reset:
zynqmp_dp_reset(dp, true);
-
+err_free:
+ kfree(dp);
return ret;
}
@@ -1731,7 +1782,6 @@ void zynqmp_dp_remove(struct zynqmp_dpsub *dpsub)
disable_irq(dp->irq);
cancel_delayed_work_sync(&dp->hpd_work);
- zynqmp_dp_aux_cleanup(dp);
zynqmp_dp_write(dp, ZYNQMP_DP_TRANSMITTER_ENABLE, 0);
zynqmp_dp_write(dp, ZYNQMP_DP_INT_DS, 0xffffffff);
diff --git a/drivers/gpu/drm/xlnx/zynqmp_dp.h b/drivers/gpu/drm/xlnx/zynqmp_dp.h
index 4507740093f6..f077d7fbd0ad 100644
--- a/drivers/gpu/drm/xlnx/zynqmp_dp.h
+++ b/drivers/gpu/drm/xlnx/zynqmp_dp.h
@@ -12,7 +12,6 @@
#ifndef _ZYNQMP_DP_H_
#define _ZYNQMP_DP_H_
-struct drm_device;
struct platform_device;
struct zynqmp_dp;
struct zynqmp_dpsub;
@@ -20,8 +19,7 @@ struct zynqmp_dpsub;
void zynqmp_dp_enable_vblank(struct zynqmp_dp *dp);
void zynqmp_dp_disable_vblank(struct zynqmp_dp *dp);
-int zynqmp_dp_drm_init(struct zynqmp_dpsub *dpsub);
-int zynqmp_dp_probe(struct zynqmp_dpsub *dpsub, struct drm_device *drm);
+int zynqmp_dp_probe(struct zynqmp_dpsub *dpsub);
void zynqmp_dp_remove(struct zynqmp_dpsub *dpsub);
#endif /* _ZYNQMP_DP_H_ */
diff --git a/drivers/gpu/drm/xlnx/zynqmp_dpsub.c b/drivers/gpu/drm/xlnx/zynqmp_dpsub.c
index 1de2d927c32b..bab862484d42 100644
--- a/drivers/gpu/drm/xlnx/zynqmp_dpsub.c
+++ b/drivers/gpu/drm/xlnx/zynqmp_dpsub.c
@@ -12,191 +12,217 @@
#include <linux/clk.h>
#include <linux/dma-mapping.h>
#include <linux/module.h>
+#include <linux/of_graph.h>
#include <linux/of_reserved_mem.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <linux/slab.h>
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_device.h>
-#include <drm/drm_drv.h>
-#include <drm/drm_fb_helper.h>
-#include <drm/drm_fourcc.h>
-#include <drm/drm_gem_dma_helper.h>
-#include <drm/drm_gem_framebuffer_helper.h>
-#include <drm/drm_managed.h>
-#include <drm/drm_mode_config.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_modeset_helper.h>
#include <drm/drm_module.h>
-#include <drm/drm_probe_helper.h>
-#include <drm/drm_vblank.h>
#include "zynqmp_disp.h"
#include "zynqmp_dp.h"
#include "zynqmp_dpsub.h"
+#include "zynqmp_kms.h"
/* -----------------------------------------------------------------------------
- * Dumb Buffer & Framebuffer Allocation
+ * Power Management
*/
-static int zynqmp_dpsub_dumb_create(struct drm_file *file_priv,
- struct drm_device *drm,
- struct drm_mode_create_dumb *args)
+static int __maybe_unused zynqmp_dpsub_suspend(struct device *dev)
{
- struct zynqmp_dpsub *dpsub = to_zynqmp_dpsub(drm);
- unsigned int pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
+ struct zynqmp_dpsub *dpsub = dev_get_drvdata(dev);
- /* Enforce the alignment constraints of the DMA engine. */
- args->pitch = ALIGN(pitch, dpsub->dma_align);
+ if (!dpsub->drm)
+ return 0;
- return drm_gem_dma_dumb_create_internal(file_priv, drm, args);
+ return drm_mode_config_helper_suspend(&dpsub->drm->dev);
}
-static struct drm_framebuffer *
-zynqmp_dpsub_fb_create(struct drm_device *drm, struct drm_file *file_priv,
- const struct drm_mode_fb_cmd2 *mode_cmd)
+static int __maybe_unused zynqmp_dpsub_resume(struct device *dev)
{
- struct zynqmp_dpsub *dpsub = to_zynqmp_dpsub(drm);
- struct drm_mode_fb_cmd2 cmd = *mode_cmd;
- unsigned int i;
+ struct zynqmp_dpsub *dpsub = dev_get_drvdata(dev);
- /* Enforce the alignment constraints of the DMA engine. */
- for (i = 0; i < ARRAY_SIZE(cmd.pitches); ++i)
- cmd.pitches[i] = ALIGN(cmd.pitches[i], dpsub->dma_align);
+ if (!dpsub->drm)
+ return 0;
- return drm_gem_fb_create(drm, file_priv, &cmd);
+ return drm_mode_config_helper_resume(&dpsub->drm->dev);
}
-static const struct drm_mode_config_funcs zynqmp_dpsub_mode_config_funcs = {
- .fb_create = zynqmp_dpsub_fb_create,
- .atomic_check = drm_atomic_helper_check,
- .atomic_commit = drm_atomic_helper_commit,
+static const struct dev_pm_ops zynqmp_dpsub_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(zynqmp_dpsub_suspend, zynqmp_dpsub_resume)
};
/* -----------------------------------------------------------------------------
- * DRM/KMS Driver
+ * DPSUB Configuration
*/
-DEFINE_DRM_GEM_DMA_FOPS(zynqmp_dpsub_drm_fops);
-
-static const struct drm_driver zynqmp_dpsub_drm_driver = {
- .driver_features = DRIVER_MODESET | DRIVER_GEM |
- DRIVER_ATOMIC,
-
- DRM_GEM_DMA_DRIVER_OPS_WITH_DUMB_CREATE(zynqmp_dpsub_dumb_create),
+/**
+ * zynqmp_dpsub_audio_enabled - If the audio is enabled
+ * @dpsub: DisplayPort subsystem
+ *
+ * Return if the audio is enabled depending on the audio clock.
+ *
+ * Return: true if audio is enabled, or false.
+ */
+bool zynqmp_dpsub_audio_enabled(struct zynqmp_dpsub *dpsub)
+{
+ return !!dpsub->aud_clk;
+}
- .fops = &zynqmp_dpsub_drm_fops,
+/**
+ * zynqmp_dpsub_get_audio_clk_rate - Get the current audio clock rate
+ * @dpsub: DisplayPort subsystem
+ *
+ * Return: the current audio clock rate.
+ */
+unsigned int zynqmp_dpsub_get_audio_clk_rate(struct zynqmp_dpsub *dpsub)
+{
+ if (zynqmp_dpsub_audio_enabled(dpsub))
+ return 0;
+ return clk_get_rate(dpsub->aud_clk);
+}
- .name = "zynqmp-dpsub",
- .desc = "Xilinx DisplayPort Subsystem Driver",
- .date = "20130509",
- .major = 1,
- .minor = 0,
-};
+/* -----------------------------------------------------------------------------
+ * Probe & Remove
+ */
-static int zynqmp_dpsub_drm_init(struct zynqmp_dpsub *dpsub)
+static int zynqmp_dpsub_init_clocks(struct zynqmp_dpsub *dpsub)
{
- struct drm_device *drm = &dpsub->drm;
int ret;
- /* Initialize mode config, vblank and the KMS poll helper. */
- ret = drmm_mode_config_init(drm);
- if (ret < 0)
- return ret;
-
- drm->mode_config.funcs = &zynqmp_dpsub_mode_config_funcs;
- drm->mode_config.min_width = 0;
- drm->mode_config.min_height = 0;
- drm->mode_config.max_width = ZYNQMP_DISP_MAX_WIDTH;
- drm->mode_config.max_height = ZYNQMP_DISP_MAX_HEIGHT;
+ dpsub->apb_clk = devm_clk_get(dpsub->dev, "dp_apb_clk");
+ if (IS_ERR(dpsub->apb_clk))
+ return PTR_ERR(dpsub->apb_clk);
- ret = drm_vblank_init(drm, 1);
- if (ret)
+ ret = clk_prepare_enable(dpsub->apb_clk);
+ if (ret) {
+ dev_err(dpsub->dev, "failed to enable the APB clock\n");
return ret;
-
- drm_kms_helper_poll_init(drm);
+ }
/*
- * Initialize the DISP and DP components. This will creates planes,
- * CRTC, encoder and connector. The DISP should be initialized first as
- * the DP encoder needs the CRTC.
+ * Try the live PL video clock, and fall back to the PS clock if the
+ * live PL video clock isn't valid.
*/
- ret = zynqmp_disp_drm_init(dpsub);
- if (ret)
- goto err_poll_fini;
-
- ret = zynqmp_dp_drm_init(dpsub);
- if (ret)
- goto err_poll_fini;
-
- /* Reset all components and register the DRM device. */
- drm_mode_config_reset(drm);
+ dpsub->vid_clk = devm_clk_get(dpsub->dev, "dp_live_video_in_clk");
+ if (!IS_ERR(dpsub->vid_clk))
+ dpsub->vid_clk_from_ps = false;
+ else if (PTR_ERR(dpsub->vid_clk) == -EPROBE_DEFER)
+ return PTR_ERR(dpsub->vid_clk);
+
+ if (IS_ERR_OR_NULL(dpsub->vid_clk)) {
+ dpsub->vid_clk = devm_clk_get(dpsub->dev, "dp_vtc_pixel_clk_in");
+ if (IS_ERR(dpsub->vid_clk)) {
+ dev_err(dpsub->dev, "failed to init any video clock\n");
+ return PTR_ERR(dpsub->vid_clk);
+ }
+ dpsub->vid_clk_from_ps = true;
+ }
- ret = drm_dev_register(drm, 0);
- if (ret < 0)
- goto err_poll_fini;
+ /*
+ * Try the live PL audio clock, and fall back to the PS clock if the
+ * live PL audio clock isn't valid. Missing audio clock disables audio
+ * but isn't an error.
+ */
+ dpsub->aud_clk = devm_clk_get(dpsub->dev, "dp_live_audio_aclk");
+ if (!IS_ERR(dpsub->aud_clk)) {
+ dpsub->aud_clk_from_ps = false;
+ return 0;
+ }
- /* Initialize fbdev generic emulation. */
- drm_fbdev_generic_setup(drm, 24);
+ dpsub->aud_clk = devm_clk_get(dpsub->dev, "dp_aud_clk");
+ if (!IS_ERR(dpsub->aud_clk)) {
+ dpsub->aud_clk_from_ps = true;
+ return 0;
+ }
+ dev_info(dpsub->dev, "audio disabled due to missing clock\n");
return 0;
-
-err_poll_fini:
- drm_kms_helper_poll_fini(drm);
- return ret;
}
-/* -----------------------------------------------------------------------------
- * Power Management
- */
-
-static int __maybe_unused zynqmp_dpsub_suspend(struct device *dev)
+static int zynqmp_dpsub_parse_dt(struct zynqmp_dpsub *dpsub)
{
- struct zynqmp_dpsub *dpsub = dev_get_drvdata(dev);
+ struct device_node *np;
+ unsigned int i;
- return drm_mode_config_helper_suspend(&dpsub->drm);
-}
+ /*
+ * For backward compatibility with old device trees that don't contain
+ * ports, consider that only the DP output port is connected if no
+ * ports child no exists.
+ */
+ np = of_get_child_by_name(dpsub->dev->of_node, "ports");
+ of_node_put(np);
+ if (!np) {
+ dev_warn(dpsub->dev, "missing ports, update DT bindings\n");
+ dpsub->connected_ports = BIT(ZYNQMP_DPSUB_PORT_OUT_DP);
+ dpsub->dma_enabled = true;
+ return 0;
+ }
-static int __maybe_unused zynqmp_dpsub_resume(struct device *dev)
-{
- struct zynqmp_dpsub *dpsub = dev_get_drvdata(dev);
+ /* Check which ports are connected. */
+ for (i = 0; i < ZYNQMP_DPSUB_NUM_PORTS; ++i) {
+ struct device_node *np;
- return drm_mode_config_helper_resume(&dpsub->drm);
-}
+ np = of_graph_get_remote_node(dpsub->dev->of_node, i, -1);
+ if (np) {
+ dpsub->connected_ports |= BIT(i);
+ of_node_put(np);
+ }
+ }
-static const struct dev_pm_ops zynqmp_dpsub_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(zynqmp_dpsub_suspend, zynqmp_dpsub_resume)
-};
+ /* Sanity checks. */
+ if ((dpsub->connected_ports & BIT(ZYNQMP_DPSUB_PORT_LIVE_VIDEO)) &&
+ (dpsub->connected_ports & BIT(ZYNQMP_DPSUB_PORT_LIVE_GFX))) {
+ dev_err(dpsub->dev, "only one live video input is supported\n");
+ return -EINVAL;
+ }
-/* -----------------------------------------------------------------------------
- * Probe & Remove
- */
+ if ((dpsub->connected_ports & BIT(ZYNQMP_DPSUB_PORT_LIVE_VIDEO)) ||
+ (dpsub->connected_ports & BIT(ZYNQMP_DPSUB_PORT_LIVE_GFX))) {
+ if (dpsub->vid_clk_from_ps) {
+ dev_err(dpsub->dev,
+ "live video input requires PL clock\n");
+ return -EINVAL;
+ }
+ } else {
+ dpsub->dma_enabled = true;
+ }
-static int zynqmp_dpsub_init_clocks(struct zynqmp_dpsub *dpsub)
-{
- int ret;
+ if (dpsub->connected_ports & BIT(ZYNQMP_DPSUB_PORT_LIVE_AUDIO))
+ dev_warn(dpsub->dev, "live audio unsupported, ignoring\n");
- dpsub->apb_clk = devm_clk_get(dpsub->dev, "dp_apb_clk");
- if (IS_ERR(dpsub->apb_clk))
- return PTR_ERR(dpsub->apb_clk);
+ if ((dpsub->connected_ports & BIT(ZYNQMP_DPSUB_PORT_OUT_VIDEO)) ||
+ (dpsub->connected_ports & BIT(ZYNQMP_DPSUB_PORT_OUT_AUDIO)))
+ dev_warn(dpsub->dev, "output to PL unsupported, ignoring\n");
- ret = clk_prepare_enable(dpsub->apb_clk);
- if (ret) {
- dev_err(dpsub->dev, "failed to enable the APB clock\n");
- return ret;
+ if (!(dpsub->connected_ports & BIT(ZYNQMP_DPSUB_PORT_OUT_DP))) {
+ dev_err(dpsub->dev, "DP output port not connected\n");
+ return -EINVAL;
}
return 0;
}
+void zynqmp_dpsub_release(struct zynqmp_dpsub *dpsub)
+{
+ kfree(dpsub->disp);
+ kfree(dpsub->dp);
+ kfree(dpsub);
+}
+
static int zynqmp_dpsub_probe(struct platform_device *pdev)
{
struct zynqmp_dpsub *dpsub;
int ret;
/* Allocate private data. */
- dpsub = devm_drm_dev_alloc(&pdev->dev, &zynqmp_dpsub_drm_driver,
- struct zynqmp_dpsub, drm);
- if (IS_ERR(dpsub))
- return PTR_ERR(dpsub);
+ dpsub = kzalloc(sizeof(*dpsub), GFP_KERNEL);
+ if (!dpsub)
+ return -ENOMEM;
dpsub->dev = &pdev->dev;
platform_set_drvdata(pdev, dpsub);
@@ -210,23 +236,31 @@ static int zynqmp_dpsub_probe(struct platform_device *pdev)
if (ret < 0)
goto err_mem;
+ ret = zynqmp_dpsub_parse_dt(dpsub);
+ if (ret < 0)
+ goto err_mem;
+
pm_runtime_enable(&pdev->dev);
/*
* DP should be probed first so that the zynqmp_disp can set the output
* format accordingly.
*/
- ret = zynqmp_dp_probe(dpsub, &dpsub->drm);
+ ret = zynqmp_dp_probe(dpsub);
if (ret)
goto err_pm;
- ret = zynqmp_disp_probe(dpsub, &dpsub->drm);
+ ret = zynqmp_disp_probe(dpsub);
if (ret)
goto err_dp;
- ret = zynqmp_dpsub_drm_init(dpsub);
- if (ret)
- goto err_disp;
+ if (dpsub->dma_enabled) {
+ ret = zynqmp_dpsub_drm_init(dpsub);
+ if (ret)
+ goto err_disp;
+ } else {
+ drm_bridge_add(dpsub->bridge);
+ }
dev_info(&pdev->dev, "ZynqMP DisplayPort Subsystem driver probed");
@@ -241,17 +275,19 @@ err_pm:
clk_disable_unprepare(dpsub->apb_clk);
err_mem:
of_reserved_mem_device_release(&pdev->dev);
+ if (!dpsub->drm)
+ zynqmp_dpsub_release(dpsub);
return ret;
}
static int zynqmp_dpsub_remove(struct platform_device *pdev)
{
struct zynqmp_dpsub *dpsub = platform_get_drvdata(pdev);
- struct drm_device *drm = &dpsub->drm;
- drm_dev_unregister(drm);
- drm_atomic_helper_shutdown(drm);
- drm_kms_helper_poll_fini(drm);
+ if (dpsub->drm)
+ zynqmp_dpsub_drm_cleanup(dpsub);
+ else
+ drm_bridge_remove(dpsub->bridge);
zynqmp_disp_remove(dpsub);
zynqmp_dp_remove(dpsub);
@@ -260,6 +296,9 @@ static int zynqmp_dpsub_remove(struct platform_device *pdev)
clk_disable_unprepare(dpsub->apb_clk);
of_reserved_mem_device_release(&pdev->dev);
+ if (!dpsub->drm)
+ zynqmp_dpsub_release(dpsub);
+
return 0;
}
@@ -267,7 +306,10 @@ static void zynqmp_dpsub_shutdown(struct platform_device *pdev)
{
struct zynqmp_dpsub *dpsub = platform_get_drvdata(pdev);
- drm_atomic_helper_shutdown(&dpsub->drm);
+ if (!dpsub->drm)
+ return;
+
+ drm_atomic_helper_shutdown(&dpsub->drm->dev);
}
static const struct of_device_id zynqmp_dpsub_of_match[] = {
diff --git a/drivers/gpu/drm/xlnx/zynqmp_dpsub.h b/drivers/gpu/drm/xlnx/zynqmp_dpsub.h
index c04026d82639..09ea01878f2a 100644
--- a/drivers/gpu/drm/xlnx/zynqmp_dpsub.h
+++ b/drivers/gpu/drm/xlnx/zynqmp_dpsub.h
@@ -14,9 +14,23 @@
struct clk;
struct device;
-struct drm_device;
+struct drm_bridge;
struct zynqmp_disp;
+struct zynqmp_disp_layer;
struct zynqmp_dp;
+struct zynqmp_dpsub_drm;
+
+#define ZYNQMP_DPSUB_NUM_LAYERS 2
+
+enum zynqmp_dpsub_port {
+ ZYNQMP_DPSUB_PORT_LIVE_VIDEO,
+ ZYNQMP_DPSUB_PORT_LIVE_GFX,
+ ZYNQMP_DPSUB_PORT_LIVE_AUDIO,
+ ZYNQMP_DPSUB_PORT_OUT_VIDEO,
+ ZYNQMP_DPSUB_PORT_OUT_AUDIO,
+ ZYNQMP_DPSUB_PORT_OUT_DP,
+ ZYNQMP_DPSUB_NUM_PORTS,
+};
enum zynqmp_dpsub_format {
ZYNQMP_DPSUB_FORMAT_RGB,
@@ -27,28 +41,46 @@ enum zynqmp_dpsub_format {
/**
* struct zynqmp_dpsub - ZynqMP DisplayPort Subsystem
- * @drm: The DRM/KMS device
* @dev: The physical device
* @apb_clk: The APB clock
+ * @vid_clk: Video clock
+ * @vid_clk_from_ps: True of the video clock comes from PS, false from PL
+ * @aud_clk: Audio clock
+ * @aud_clk_from_ps: True of the audio clock comes from PS, false from PL
+ * @connected_ports: Bitmask of connected ports in the device tree
+ * @dma_enabled: True if the DMA interface is enabled, false if the DPSUB is
+ * driven by the live input
+ * @drm: The DRM/KMS device data
+ * @bridge: The DP encoder bridge
* @disp: The display controller
* @dp: The DisplayPort controller
* @dma_align: DMA alignment constraint (must be a power of 2)
*/
struct zynqmp_dpsub {
- struct drm_device drm;
struct device *dev;
struct clk *apb_clk;
+ struct clk *vid_clk;
+ bool vid_clk_from_ps;
+ struct clk *aud_clk;
+ bool aud_clk_from_ps;
+
+ unsigned int connected_ports;
+ bool dma_enabled;
+
+ struct zynqmp_dpsub_drm *drm;
+ struct drm_bridge *bridge;
struct zynqmp_disp *disp;
+ struct zynqmp_disp_layer *layers[ZYNQMP_DPSUB_NUM_LAYERS];
struct zynqmp_dp *dp;
unsigned int dma_align;
};
-static inline struct zynqmp_dpsub *to_zynqmp_dpsub(struct drm_device *drm)
-{
- return container_of(drm, struct zynqmp_dpsub, drm);
-}
+bool zynqmp_dpsub_audio_enabled(struct zynqmp_dpsub *dpsub);
+unsigned int zynqmp_dpsub_get_audio_clk_rate(struct zynqmp_dpsub *dpsub);
+
+void zynqmp_dpsub_release(struct zynqmp_dpsub *dpsub);
#endif /* _ZYNQMP_DPSUB_H_ */
diff --git a/drivers/gpu/drm/xlnx/zynqmp_kms.c b/drivers/gpu/drm/xlnx/zynqmp_kms.c
new file mode 100644
index 000000000000..1847792cf13d
--- /dev/null
+++ b/drivers/gpu/drm/xlnx/zynqmp_kms.c
@@ -0,0 +1,534 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ZynqMP DisplayPort Subsystem - KMS API
+ *
+ * Copyright (C) 2017 - 2021 Xilinx, Inc.
+ *
+ * Authors:
+ * - Hyun Woo Kwon <hyun.kwon@xilinx.com>
+ * - Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ */
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_blend.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_bridge_connector.h>
+#include <drm/drm_connector.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_device.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_encoder.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
+#include <drm/drm_gem_dma_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_managed.h>
+#include <drm/drm_mode_config.h>
+#include <drm/drm_plane.h>
+#include <drm/drm_plane_helper.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
+#include <drm/drm_vblank.h>
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/pm_runtime.h>
+#include <linux/spinlock.h>
+
+#include "zynqmp_disp.h"
+#include "zynqmp_dp.h"
+#include "zynqmp_dpsub.h"
+#include "zynqmp_kms.h"
+
+static inline struct zynqmp_dpsub *to_zynqmp_dpsub(struct drm_device *drm)
+{
+ return container_of(drm, struct zynqmp_dpsub_drm, dev)->dpsub;
+}
+
+/* -----------------------------------------------------------------------------
+ * DRM Planes
+ */
+
+static int zynqmp_dpsub_plane_atomic_check(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
+ plane);
+ struct drm_crtc_state *crtc_state;
+
+ if (!new_plane_state->crtc)
+ return 0;
+
+ crtc_state = drm_atomic_get_crtc_state(state, new_plane_state->crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
+
+ return drm_atomic_helper_check_plane_state(new_plane_state,
+ crtc_state,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
+ false, false);
+}
+
+static void zynqmp_dpsub_plane_atomic_disable(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
+ plane);
+ struct zynqmp_dpsub *dpsub = to_zynqmp_dpsub(plane->dev);
+ struct zynqmp_disp_layer *layer = dpsub->layers[plane->index];
+
+ if (!old_state->fb)
+ return;
+
+ zynqmp_disp_layer_disable(layer);
+
+ if (plane->index == ZYNQMP_DPSUB_LAYER_GFX)
+ zynqmp_disp_blend_set_global_alpha(dpsub->disp, false,
+ plane->state->alpha >> 8);
+}
+
+static void zynqmp_dpsub_plane_atomic_update(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state, plane);
+ struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, plane);
+ struct zynqmp_dpsub *dpsub = to_zynqmp_dpsub(plane->dev);
+ struct zynqmp_disp_layer *layer = dpsub->layers[plane->index];
+ bool format_changed = false;
+
+ if (!old_state->fb ||
+ old_state->fb->format->format != new_state->fb->format->format)
+ format_changed = true;
+
+ /*
+ * If the format has changed (including going from a previously
+ * disabled state to any format), reconfigure the format. Disable the
+ * plane first if needed.
+ */
+ if (format_changed) {
+ if (old_state->fb)
+ zynqmp_disp_layer_disable(layer);
+
+ zynqmp_disp_layer_set_format(layer, new_state->fb->format);
+ }
+
+ zynqmp_disp_layer_update(layer, new_state);
+
+ if (plane->index == ZYNQMP_DPSUB_LAYER_GFX)
+ zynqmp_disp_blend_set_global_alpha(dpsub->disp, true,
+ plane->state->alpha >> 8);
+
+ /* Enable or re-enable the plane if the format has changed. */
+ if (format_changed)
+ zynqmp_disp_layer_enable(layer, ZYNQMP_DPSUB_LAYER_NONLIVE);
+}
+
+static const struct drm_plane_helper_funcs zynqmp_dpsub_plane_helper_funcs = {
+ .atomic_check = zynqmp_dpsub_plane_atomic_check,
+ .atomic_update = zynqmp_dpsub_plane_atomic_update,
+ .atomic_disable = zynqmp_dpsub_plane_atomic_disable,
+};
+
+static const struct drm_plane_funcs zynqmp_dpsub_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = drm_plane_cleanup,
+ .reset = drm_atomic_helper_plane_reset,
+ .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
+};
+
+static int zynqmp_dpsub_create_planes(struct zynqmp_dpsub *dpsub)
+{
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < ARRAY_SIZE(dpsub->drm->planes); i++) {
+ struct zynqmp_disp_layer *layer = dpsub->layers[i];
+ struct drm_plane *plane = &dpsub->drm->planes[i];
+ enum drm_plane_type type;
+ unsigned int num_formats;
+ u32 *formats;
+
+ formats = zynqmp_disp_layer_drm_formats(layer, &num_formats);
+ if (!formats)
+ return -ENOMEM;
+
+ /* Graphics layer is primary, and video layer is overlay. */
+ type = i == ZYNQMP_DPSUB_LAYER_VID
+ ? DRM_PLANE_TYPE_OVERLAY : DRM_PLANE_TYPE_PRIMARY;
+ ret = drm_universal_plane_init(&dpsub->drm->dev, plane, 0,
+ &zynqmp_dpsub_plane_funcs,
+ formats, num_formats,
+ NULL, type, NULL);
+ kfree(formats);
+ if (ret)
+ return ret;
+
+ drm_plane_helper_add(plane, &zynqmp_dpsub_plane_helper_funcs);
+
+ drm_plane_create_zpos_immutable_property(plane, i);
+ if (i == ZYNQMP_DPSUB_LAYER_GFX)
+ drm_plane_create_alpha_property(plane);
+ }
+
+ return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * DRM CRTC
+ */
+
+static inline struct zynqmp_dpsub *crtc_to_dpsub(struct drm_crtc *crtc)
+{
+ return container_of(crtc, struct zynqmp_dpsub_drm, crtc)->dpsub;
+}
+
+static void zynqmp_dpsub_crtc_atomic_enable(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ struct zynqmp_dpsub *dpsub = crtc_to_dpsub(crtc);
+ struct drm_display_mode *adjusted_mode = &crtc->state->adjusted_mode;
+ int ret, vrefresh;
+
+ pm_runtime_get_sync(dpsub->dev);
+
+ zynqmp_disp_setup_clock(dpsub->disp, adjusted_mode->clock * 1000);
+
+ ret = clk_prepare_enable(dpsub->vid_clk);
+ if (ret) {
+ dev_err(dpsub->dev, "failed to enable a pixel clock\n");
+ pm_runtime_put_sync(dpsub->dev);
+ return;
+ }
+
+ zynqmp_disp_enable(dpsub->disp);
+
+ /* Delay of 3 vblank intervals for timing gen to be stable */
+ vrefresh = (adjusted_mode->clock * 1000) /
+ (adjusted_mode->vtotal * adjusted_mode->htotal);
+ msleep(3 * 1000 / vrefresh);
+}
+
+static void zynqmp_dpsub_crtc_atomic_disable(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ struct zynqmp_dpsub *dpsub = crtc_to_dpsub(crtc);
+ struct drm_plane_state *old_plane_state;
+
+ /*
+ * Disable the plane if active. The old plane state can be NULL in the
+ * .shutdown() path if the plane is already disabled, skip
+ * zynqmp_disp_plane_atomic_disable() in that case.
+ */
+ old_plane_state = drm_atomic_get_old_plane_state(state, crtc->primary);
+ if (old_plane_state)
+ zynqmp_dpsub_plane_atomic_disable(crtc->primary, state);
+
+ zynqmp_disp_disable(dpsub->disp);
+
+ drm_crtc_vblank_off(crtc);
+
+ spin_lock_irq(&crtc->dev->event_lock);
+ if (crtc->state->event) {
+ drm_crtc_send_vblank_event(crtc, crtc->state->event);
+ crtc->state->event = NULL;
+ }
+ spin_unlock_irq(&crtc->dev->event_lock);
+
+ clk_disable_unprepare(dpsub->vid_clk);
+ pm_runtime_put_sync(dpsub->dev);
+}
+
+static int zynqmp_dpsub_crtc_atomic_check(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ return drm_atomic_add_affected_planes(state, crtc);
+}
+
+static void zynqmp_dpsub_crtc_atomic_begin(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ drm_crtc_vblank_on(crtc);
+}
+
+static void zynqmp_dpsub_crtc_atomic_flush(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ if (crtc->state->event) {
+ struct drm_pending_vblank_event *event;
+
+ /* Consume the flip_done event from atomic helper. */
+ event = crtc->state->event;
+ crtc->state->event = NULL;
+
+ event->pipe = drm_crtc_index(crtc);
+
+ WARN_ON(drm_crtc_vblank_get(crtc) != 0);
+
+ spin_lock_irq(&crtc->dev->event_lock);
+ drm_crtc_arm_vblank_event(crtc, event);
+ spin_unlock_irq(&crtc->dev->event_lock);
+ }
+}
+
+static const struct drm_crtc_helper_funcs zynqmp_dpsub_crtc_helper_funcs = {
+ .atomic_enable = zynqmp_dpsub_crtc_atomic_enable,
+ .atomic_disable = zynqmp_dpsub_crtc_atomic_disable,
+ .atomic_check = zynqmp_dpsub_crtc_atomic_check,
+ .atomic_begin = zynqmp_dpsub_crtc_atomic_begin,
+ .atomic_flush = zynqmp_dpsub_crtc_atomic_flush,
+};
+
+static int zynqmp_dpsub_crtc_enable_vblank(struct drm_crtc *crtc)
+{
+ struct zynqmp_dpsub *dpsub = crtc_to_dpsub(crtc);
+
+ zynqmp_dp_enable_vblank(dpsub->dp);
+
+ return 0;
+}
+
+static void zynqmp_dpsub_crtc_disable_vblank(struct drm_crtc *crtc)
+{
+ struct zynqmp_dpsub *dpsub = crtc_to_dpsub(crtc);
+
+ zynqmp_dp_disable_vblank(dpsub->dp);
+}
+
+static const struct drm_crtc_funcs zynqmp_dpsub_crtc_funcs = {
+ .destroy = drm_crtc_cleanup,
+ .set_config = drm_atomic_helper_set_config,
+ .page_flip = drm_atomic_helper_page_flip,
+ .reset = drm_atomic_helper_crtc_reset,
+ .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+ .enable_vblank = zynqmp_dpsub_crtc_enable_vblank,
+ .disable_vblank = zynqmp_dpsub_crtc_disable_vblank,
+};
+
+static int zynqmp_dpsub_create_crtc(struct zynqmp_dpsub *dpsub)
+{
+ struct drm_plane *plane = &dpsub->drm->planes[ZYNQMP_DPSUB_LAYER_GFX];
+ struct drm_crtc *crtc = &dpsub->drm->crtc;
+ int ret;
+
+ ret = drm_crtc_init_with_planes(&dpsub->drm->dev, crtc, plane,
+ NULL, &zynqmp_dpsub_crtc_funcs, NULL);
+ if (ret < 0)
+ return ret;
+
+ drm_crtc_helper_add(crtc, &zynqmp_dpsub_crtc_helper_funcs);
+
+ /* Start with vertical blanking interrupt reporting disabled. */
+ drm_crtc_vblank_off(crtc);
+
+ return 0;
+}
+
+static void zynqmp_dpsub_map_crtc_to_plane(struct zynqmp_dpsub *dpsub)
+{
+ u32 possible_crtcs = drm_crtc_mask(&dpsub->drm->crtc);
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(dpsub->drm->planes); i++)
+ dpsub->drm->planes[i].possible_crtcs = possible_crtcs;
+}
+
+/**
+ * zynqmp_dpsub_drm_handle_vblank - Handle the vblank event
+ * @dpsub: DisplayPort subsystem
+ *
+ * This function handles the vblank interrupt, and sends an event to
+ * CRTC object. This will be called by the DP vblank interrupt handler.
+ */
+void zynqmp_dpsub_drm_handle_vblank(struct zynqmp_dpsub *dpsub)
+{
+ drm_crtc_handle_vblank(&dpsub->drm->crtc);
+}
+
+/* -----------------------------------------------------------------------------
+ * Dumb Buffer & Framebuffer Allocation
+ */
+
+static int zynqmp_dpsub_dumb_create(struct drm_file *file_priv,
+ struct drm_device *drm,
+ struct drm_mode_create_dumb *args)
+{
+ struct zynqmp_dpsub *dpsub = to_zynqmp_dpsub(drm);
+ unsigned int pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
+
+ /* Enforce the alignment constraints of the DMA engine. */
+ args->pitch = ALIGN(pitch, dpsub->dma_align);
+
+ return drm_gem_dma_dumb_create_internal(file_priv, drm, args);
+}
+
+static struct drm_framebuffer *
+zynqmp_dpsub_fb_create(struct drm_device *drm, struct drm_file *file_priv,
+ const struct drm_mode_fb_cmd2 *mode_cmd)
+{
+ struct zynqmp_dpsub *dpsub = to_zynqmp_dpsub(drm);
+ struct drm_mode_fb_cmd2 cmd = *mode_cmd;
+ unsigned int i;
+
+ /* Enforce the alignment constraints of the DMA engine. */
+ for (i = 0; i < ARRAY_SIZE(cmd.pitches); ++i)
+ cmd.pitches[i] = ALIGN(cmd.pitches[i], dpsub->dma_align);
+
+ return drm_gem_fb_create(drm, file_priv, &cmd);
+}
+
+static const struct drm_mode_config_funcs zynqmp_dpsub_mode_config_funcs = {
+ .fb_create = zynqmp_dpsub_fb_create,
+ .atomic_check = drm_atomic_helper_check,
+ .atomic_commit = drm_atomic_helper_commit,
+};
+
+/* -----------------------------------------------------------------------------
+ * DRM/KMS Driver
+ */
+
+DEFINE_DRM_GEM_DMA_FOPS(zynqmp_dpsub_drm_fops);
+
+static const struct drm_driver zynqmp_dpsub_drm_driver = {
+ .driver_features = DRIVER_MODESET | DRIVER_GEM |
+ DRIVER_ATOMIC,
+
+ DRM_GEM_DMA_DRIVER_OPS_WITH_DUMB_CREATE(zynqmp_dpsub_dumb_create),
+
+ .fops = &zynqmp_dpsub_drm_fops,
+
+ .name = "zynqmp-dpsub",
+ .desc = "Xilinx DisplayPort Subsystem Driver",
+ .date = "20130509",
+ .major = 1,
+ .minor = 0,
+};
+
+static int zynqmp_dpsub_kms_init(struct zynqmp_dpsub *dpsub)
+{
+ struct drm_encoder *encoder = &dpsub->drm->encoder;
+ struct drm_connector *connector;
+ int ret;
+
+ /* Create the planes and the CRTC. */
+ ret = zynqmp_dpsub_create_planes(dpsub);
+ if (ret)
+ return ret;
+
+ ret = zynqmp_dpsub_create_crtc(dpsub);
+ if (ret < 0)
+ return ret;
+
+ zynqmp_dpsub_map_crtc_to_plane(dpsub);
+
+ /* Create the encoder and attach the bridge. */
+ encoder->possible_crtcs |= drm_crtc_mask(&dpsub->drm->crtc);
+ drm_simple_encoder_init(&dpsub->drm->dev, encoder, DRM_MODE_ENCODER_NONE);
+
+ ret = drm_bridge_attach(encoder, dpsub->bridge, NULL,
+ DRM_BRIDGE_ATTACH_NO_CONNECTOR);
+ if (ret) {
+ dev_err(dpsub->dev, "failed to attach bridge to encoder\n");
+ return ret;
+ }
+
+ /* Create the connector for the chain of bridges. */
+ connector = drm_bridge_connector_init(&dpsub->drm->dev, encoder);
+ if (IS_ERR(connector)) {
+ dev_err(dpsub->dev, "failed to created connector\n");
+ return PTR_ERR(connector);
+ }
+
+ ret = drm_connector_attach_encoder(connector, encoder);
+ if (ret < 0) {
+ dev_err(dpsub->dev, "failed to attach connector to encoder\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static void zynqmp_dpsub_drm_release(struct drm_device *drm, void *res)
+{
+ struct zynqmp_dpsub_drm *dpdrm = res;
+
+ zynqmp_dpsub_release(dpdrm->dpsub);
+}
+
+int zynqmp_dpsub_drm_init(struct zynqmp_dpsub *dpsub)
+{
+ struct zynqmp_dpsub_drm *dpdrm;
+ struct drm_device *drm;
+ int ret;
+
+ /*
+ * Allocate the drm_device and immediately add a cleanup action to
+ * release the zynqmp_dpsub instance. If any of those operations fail,
+ * dpsub->drm will remain NULL, which tells the caller that it must
+ * cleanup manually.
+ */
+ dpdrm = devm_drm_dev_alloc(dpsub->dev, &zynqmp_dpsub_drm_driver,
+ struct zynqmp_dpsub_drm, dev);
+ if (IS_ERR(dpdrm))
+ return PTR_ERR(dpdrm);
+
+ dpdrm->dpsub = dpsub;
+ drm = &dpdrm->dev;
+
+ ret = drmm_add_action(drm, zynqmp_dpsub_drm_release, dpdrm);
+ if (ret < 0)
+ return ret;
+
+ dpsub->drm = dpdrm;
+
+ /* Initialize mode config, vblank and the KMS poll helper. */
+ ret = drmm_mode_config_init(drm);
+ if (ret < 0)
+ return ret;
+
+ drm->mode_config.funcs = &zynqmp_dpsub_mode_config_funcs;
+ drm->mode_config.min_width = 0;
+ drm->mode_config.min_height = 0;
+ drm->mode_config.max_width = ZYNQMP_DISP_MAX_WIDTH;
+ drm->mode_config.max_height = ZYNQMP_DISP_MAX_HEIGHT;
+
+ ret = drm_vblank_init(drm, 1);
+ if (ret)
+ return ret;
+
+ drm_kms_helper_poll_init(drm);
+
+ ret = zynqmp_dpsub_kms_init(dpsub);
+ if (ret < 0)
+ goto err_poll_fini;
+
+ /* Reset all components and register the DRM device. */
+ drm_mode_config_reset(drm);
+
+ ret = drm_dev_register(drm, 0);
+ if (ret < 0)
+ goto err_poll_fini;
+
+ /* Initialize fbdev generic emulation. */
+ drm_fbdev_generic_setup(drm, 24);
+
+ return 0;
+
+err_poll_fini:
+ drm_kms_helper_poll_fini(drm);
+ return ret;
+}
+
+void zynqmp_dpsub_drm_cleanup(struct zynqmp_dpsub *dpsub)
+{
+ struct drm_device *drm = &dpsub->drm->dev;
+
+ drm_dev_unregister(drm);
+ drm_atomic_helper_shutdown(drm);
+ drm_kms_helper_poll_fini(drm);
+}
diff --git a/drivers/gpu/drm/xlnx/zynqmp_kms.h b/drivers/gpu/drm/xlnx/zynqmp_kms.h
new file mode 100644
index 000000000000..01be96b00e3f
--- /dev/null
+++ b/drivers/gpu/drm/xlnx/zynqmp_kms.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * ZynqMP DisplayPort Subsystem - KMS API
+ *
+ * Copyright (C) 2017 - 2021 Xilinx, Inc.
+ *
+ * Authors:
+ * - Hyun Woo Kwon <hyun.kwon@xilinx.com>
+ * - Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ */
+
+#ifndef _ZYNQMP_KMS_H_
+#define _ZYNQMP_KMS_H_
+
+#include <drm/drm_crtc.h>
+#include <drm/drm_device.h>
+#include <drm/drm_encoder.h>
+#include <drm/drm_plane.h>
+
+#include "zynqmp_dpsub.h"
+
+struct zynqmp_dpsub;
+
+/**
+ * struct zynqmp_dpsub - ZynqMP DisplayPort Subsystem DRM/KMS data
+ * @dpsub: Backpointer to the DisplayPort subsystem
+ * @drm: The DRM/KMS device
+ * @planes: The DRM planes
+ * @crtc: The DRM CRTC
+ * @encoder: The dummy DRM encoder
+ */
+struct zynqmp_dpsub_drm {
+ struct zynqmp_dpsub *dpsub;
+
+ struct drm_device dev;
+ struct drm_plane planes[ZYNQMP_DPSUB_NUM_LAYERS];
+ struct drm_crtc crtc;
+ struct drm_encoder encoder;
+};
+
+void zynqmp_dpsub_drm_handle_vblank(struct zynqmp_dpsub *dpsub);
+
+int zynqmp_dpsub_drm_init(struct zynqmp_dpsub *dpsub);
+void zynqmp_dpsub_drm_cleanup(struct zynqmp_dpsub *dpsub);
+
+#endif /* _ZYNQMP_KMS_H_ */