summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/amd/display/dc
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/display/dc')
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c196
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c164
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_debug.c24
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link.c139
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c22
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c335
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c189
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_stream.c97
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_surface.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h52
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_ddc_types.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dp_types.h18
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_helper.c76
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_link.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_stream.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_types.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_aux.c146
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_aux.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c30
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h22
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c78
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c82
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c72
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c25
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.c38
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c20
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c202
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c36
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c31
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_helpers.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_pp_smu.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_services.h36
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_services_types.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c40
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/clock_source.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_status.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_types.h26
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/reg_helper.h12
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/resource.h14
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dce80/irq_service_dce80.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c40
-rw-r--r--drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.c3
74 files changed, 1691 insertions, 824 deletions
diff --git a/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c b/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c
index f28989860fd8..1e9a2d352068 100644
--- a/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c
+++ b/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c
@@ -449,6 +449,11 @@ static inline unsigned int clamp_ux_dy(
return min_clamp;
}
+unsigned int dc_fixpt_u4d19(struct fixed31_32 arg)
+{
+ return ux_dy(arg.value, 4, 19);
+}
+
unsigned int dc_fixpt_u3d19(struct fixed31_32 arg)
{
return ux_dy(arg.value, 3, 19);
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
index eb62d10bb65c..8843361e842d 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
@@ -247,6 +247,53 @@ static enum dcn_bw_defs tl_pixel_format_to_bw_defs(enum surface_pixel_format for
}
}
+enum source_macro_tile_size swizzle_mode_to_macro_tile_size(enum swizzle_mode_values sw_mode)
+{
+ switch (sw_mode) {
+ /* for 4/8/16 high tiles */
+ case DC_SW_LINEAR:
+ return dm_4k_tile;
+ case DC_SW_4KB_S:
+ case DC_SW_4KB_S_X:
+ return dm_4k_tile;
+ case DC_SW_64KB_S:
+ case DC_SW_64KB_S_X:
+ case DC_SW_64KB_S_T:
+ return dm_64k_tile;
+ case DC_SW_VAR_S:
+ case DC_SW_VAR_S_X:
+ return dm_256k_tile;
+
+ /* For 64bpp 2 high tiles */
+ case DC_SW_4KB_D:
+ case DC_SW_4KB_D_X:
+ return dm_4k_tile;
+ case DC_SW_64KB_D:
+ case DC_SW_64KB_D_X:
+ case DC_SW_64KB_D_T:
+ return dm_64k_tile;
+ case DC_SW_VAR_D:
+ case DC_SW_VAR_D_X:
+ return dm_256k_tile;
+
+ case DC_SW_4KB_R:
+ case DC_SW_4KB_R_X:
+ return dm_4k_tile;
+ case DC_SW_64KB_R:
+ case DC_SW_64KB_R_X:
+ return dm_64k_tile;
+ case DC_SW_VAR_R:
+ case DC_SW_VAR_R_X:
+ return dm_256k_tile;
+
+ /* Unsupported swizzle modes for dcn */
+ case DC_SW_256B_S:
+ default:
+ ASSERT(0); /* Not supported */
+ return 0;
+ }
+}
+
static void pipe_ctx_to_e2e_pipe_params (
const struct pipe_ctx *pipe,
struct _vcs_dpi_display_pipe_params_st *input)
@@ -287,46 +334,7 @@ static void pipe_ctx_to_e2e_pipe_params (
input->src.cur0_src_width = 128; /* TODO: Cursor calcs, not curently stored */
input->src.cur0_bpp = 32;
- switch (pipe->plane_state->tiling_info.gfx9.swizzle) {
- /* for 4/8/16 high tiles */
- case DC_SW_LINEAR:
- input->src.macro_tile_size = dm_4k_tile;
- break;
- case DC_SW_4KB_S:
- case DC_SW_4KB_S_X:
- input->src.macro_tile_size = dm_4k_tile;
- break;
- case DC_SW_64KB_S:
- case DC_SW_64KB_S_X:
- case DC_SW_64KB_S_T:
- input->src.macro_tile_size = dm_64k_tile;
- break;
- case DC_SW_VAR_S:
- case DC_SW_VAR_S_X:
- input->src.macro_tile_size = dm_256k_tile;
- break;
-
- /* For 64bpp 2 high tiles */
- case DC_SW_4KB_D:
- case DC_SW_4KB_D_X:
- input->src.macro_tile_size = dm_4k_tile;
- break;
- case DC_SW_64KB_D:
- case DC_SW_64KB_D_X:
- case DC_SW_64KB_D_T:
- input->src.macro_tile_size = dm_64k_tile;
- break;
- case DC_SW_VAR_D:
- case DC_SW_VAR_D_X:
- input->src.macro_tile_size = dm_256k_tile;
- break;
-
- /* Unsupported swizzle modes for dcn */
- case DC_SW_256B_S:
- default:
- ASSERT(0); /* Not supported */
- break;
- }
+ input->src.macro_tile_size = swizzle_mode_to_macro_tile_size(pipe->plane_state->tiling_info.gfx9.swizzle);
switch (pipe->plane_state->rotation) {
case ROTATION_ANGLE_0:
@@ -466,7 +474,7 @@ static void dcn_bw_calc_rq_dlg_ttu(
input.clks_cfg.dcfclk_mhz = v->dcfclk;
input.clks_cfg.dispclk_mhz = v->dispclk;
input.clks_cfg.dppclk_mhz = v->dppclk;
- input.clks_cfg.refclk_mhz = dc->res_pool->ref_clock_inKhz / 1000.0;
+ input.clks_cfg.refclk_mhz = dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000.0;
input.clks_cfg.socclk_mhz = v->socclk;
input.clks_cfg.voltage = v->voltage_level;
// dc->dml.logger = pool->base.logger;
@@ -536,28 +544,28 @@ static void calc_wm_sets_and_perf_params(
v->fabric_and_dram_bandwidth = v->fabric_and_dram_bandwidth_vnom0p8;
dispclkdppclkdcfclk_deep_sleep_prefetch_parameters_watermarks_and_performance_calculation(v);
- context->bw.dcn.watermarks.b.cstate_pstate.cstate_exit_ns =
+ context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_exit_ns =
v->stutter_exit_watermark * 1000;
- context->bw.dcn.watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns =
+ context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns =
v->stutter_enter_plus_exit_watermark * 1000;
- context->bw.dcn.watermarks.b.cstate_pstate.pstate_change_ns =
+ context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.pstate_change_ns =
v->dram_clock_change_watermark * 1000;
- context->bw.dcn.watermarks.b.pte_meta_urgent_ns = v->ptemeta_urgent_watermark * 1000;
- context->bw.dcn.watermarks.b.urgent_ns = v->urgent_watermark * 1000;
+ context->bw_ctx.bw.dcn.watermarks.b.pte_meta_urgent_ns = v->ptemeta_urgent_watermark * 1000;
+ context->bw_ctx.bw.dcn.watermarks.b.urgent_ns = v->urgent_watermark * 1000;
v->dcfclk_per_state[1] = v->dcfclkv_nom0p8;
v->dcfclk_per_state[0] = v->dcfclkv_nom0p8;
v->dcfclk = v->dcfclkv_nom0p8;
dispclkdppclkdcfclk_deep_sleep_prefetch_parameters_watermarks_and_performance_calculation(v);
- context->bw.dcn.watermarks.c.cstate_pstate.cstate_exit_ns =
+ context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_exit_ns =
v->stutter_exit_watermark * 1000;
- context->bw.dcn.watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns =
+ context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns =
v->stutter_enter_plus_exit_watermark * 1000;
- context->bw.dcn.watermarks.c.cstate_pstate.pstate_change_ns =
+ context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.pstate_change_ns =
v->dram_clock_change_watermark * 1000;
- context->bw.dcn.watermarks.c.pte_meta_urgent_ns = v->ptemeta_urgent_watermark * 1000;
- context->bw.dcn.watermarks.c.urgent_ns = v->urgent_watermark * 1000;
+ context->bw_ctx.bw.dcn.watermarks.c.pte_meta_urgent_ns = v->ptemeta_urgent_watermark * 1000;
+ context->bw_ctx.bw.dcn.watermarks.c.urgent_ns = v->urgent_watermark * 1000;
}
if (v->voltage_level < 3) {
@@ -571,14 +579,14 @@ static void calc_wm_sets_and_perf_params(
v->dcfclk = v->dcfclkv_max0p9;
dispclkdppclkdcfclk_deep_sleep_prefetch_parameters_watermarks_and_performance_calculation(v);
- context->bw.dcn.watermarks.d.cstate_pstate.cstate_exit_ns =
+ context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_exit_ns =
v->stutter_exit_watermark * 1000;
- context->bw.dcn.watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns =
+ context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns =
v->stutter_enter_plus_exit_watermark * 1000;
- context->bw.dcn.watermarks.d.cstate_pstate.pstate_change_ns =
+ context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.pstate_change_ns =
v->dram_clock_change_watermark * 1000;
- context->bw.dcn.watermarks.d.pte_meta_urgent_ns = v->ptemeta_urgent_watermark * 1000;
- context->bw.dcn.watermarks.d.urgent_ns = v->urgent_watermark * 1000;
+ context->bw_ctx.bw.dcn.watermarks.d.pte_meta_urgent_ns = v->ptemeta_urgent_watermark * 1000;
+ context->bw_ctx.bw.dcn.watermarks.d.urgent_ns = v->urgent_watermark * 1000;
}
v->fabric_and_dram_bandwidth_per_state[2] = v->fabric_and_dram_bandwidth_vnom0p8;
@@ -591,20 +599,20 @@ static void calc_wm_sets_and_perf_params(
v->dcfclk = v->dcfclk_per_state[v->voltage_level];
dispclkdppclkdcfclk_deep_sleep_prefetch_parameters_watermarks_and_performance_calculation(v);
- context->bw.dcn.watermarks.a.cstate_pstate.cstate_exit_ns =
+ context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_exit_ns =
v->stutter_exit_watermark * 1000;
- context->bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns =
+ context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns =
v->stutter_enter_plus_exit_watermark * 1000;
- context->bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns =
+ context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns =
v->dram_clock_change_watermark * 1000;
- context->bw.dcn.watermarks.a.pte_meta_urgent_ns = v->ptemeta_urgent_watermark * 1000;
- context->bw.dcn.watermarks.a.urgent_ns = v->urgent_watermark * 1000;
+ context->bw_ctx.bw.dcn.watermarks.a.pte_meta_urgent_ns = v->ptemeta_urgent_watermark * 1000;
+ context->bw_ctx.bw.dcn.watermarks.a.urgent_ns = v->urgent_watermark * 1000;
if (v->voltage_level >= 2) {
- context->bw.dcn.watermarks.b = context->bw.dcn.watermarks.a;
- context->bw.dcn.watermarks.c = context->bw.dcn.watermarks.a;
+ context->bw_ctx.bw.dcn.watermarks.b = context->bw_ctx.bw.dcn.watermarks.a;
+ context->bw_ctx.bw.dcn.watermarks.c = context->bw_ctx.bw.dcn.watermarks.a;
}
if (v->voltage_level >= 3)
- context->bw.dcn.watermarks.d = context->bw.dcn.watermarks.a;
+ context->bw_ctx.bw.dcn.watermarks.d = context->bw_ctx.bw.dcn.watermarks.a;
}
#endif
@@ -1000,8 +1008,8 @@ bool dcn_validate_bandwidth(
dc->debug.sr_enter_plus_exit_time_dpm0_ns / 1000.0f;
if (dc->debug.sr_exit_time_dpm0_ns)
v->sr_exit_time = dc->debug.sr_exit_time_dpm0_ns / 1000.0f;
- dc->dml.soc.sr_enter_plus_exit_time_us = v->sr_enter_plus_exit_time;
- dc->dml.soc.sr_exit_time_us = v->sr_exit_time;
+ context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = v->sr_enter_plus_exit_time;
+ context->bw_ctx.dml.soc.sr_exit_time_us = v->sr_exit_time;
mode_support_and_system_configuration(v);
}
@@ -1027,54 +1035,54 @@ bool dcn_validate_bandwidth(
*/
dispclkdppclkdcfclk_deep_sleep_prefetch_parameters_watermarks_and_performance_calculation(v);
- context->bw.dcn.watermarks.a.cstate_pstate.cstate_exit_ns =
+ context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_exit_ns =
v->stutter_exit_watermark * 1000;
- context->bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns =
+ context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns =
v->stutter_enter_plus_exit_watermark * 1000;
- context->bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns =
+ context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns =
v->dram_clock_change_watermark * 1000;
- context->bw.dcn.watermarks.a.pte_meta_urgent_ns = v->ptemeta_urgent_watermark * 1000;
- context->bw.dcn.watermarks.a.urgent_ns = v->urgent_watermark * 1000;
- context->bw.dcn.watermarks.b = context->bw.dcn.watermarks.a;
- context->bw.dcn.watermarks.c = context->bw.dcn.watermarks.a;
- context->bw.dcn.watermarks.d = context->bw.dcn.watermarks.a;
+ context->bw_ctx.bw.dcn.watermarks.a.pte_meta_urgent_ns = v->ptemeta_urgent_watermark * 1000;
+ context->bw_ctx.bw.dcn.watermarks.a.urgent_ns = v->urgent_watermark * 1000;
+ context->bw_ctx.bw.dcn.watermarks.b = context->bw_ctx.bw.dcn.watermarks.a;
+ context->bw_ctx.bw.dcn.watermarks.c = context->bw_ctx.bw.dcn.watermarks.a;
+ context->bw_ctx.bw.dcn.watermarks.d = context->bw_ctx.bw.dcn.watermarks.a;
- context->bw.dcn.clk.fclk_khz = (int)(bw_consumed * 1000000 /
+ context->bw_ctx.bw.dcn.clk.fclk_khz = (int)(bw_consumed * 1000000 /
(ddr4_dram_factor_single_Channel * v->number_of_channels));
if (bw_consumed == v->fabric_and_dram_bandwidth_vmin0p65) {
- context->bw.dcn.clk.fclk_khz = (int)(bw_consumed * 1000000 / 32);
+ context->bw_ctx.bw.dcn.clk.fclk_khz = (int)(bw_consumed * 1000000 / 32);
}
- context->bw.dcn.clk.dcfclk_deep_sleep_khz = (int)(v->dcf_clk_deep_sleep * 1000);
- context->bw.dcn.clk.dcfclk_khz = (int)(v->dcfclk * 1000);
+ context->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz = (int)(v->dcf_clk_deep_sleep * 1000);
+ context->bw_ctx.bw.dcn.clk.dcfclk_khz = (int)(v->dcfclk * 1000);
- context->bw.dcn.clk.dispclk_khz = (int)(v->dispclk * 1000);
+ context->bw_ctx.bw.dcn.clk.dispclk_khz = (int)(v->dispclk * 1000);
if (dc->debug.max_disp_clk == true)
- context->bw.dcn.clk.dispclk_khz = (int)(dc->dcn_soc->max_dispclk_vmax0p9 * 1000);
+ context->bw_ctx.bw.dcn.clk.dispclk_khz = (int)(dc->dcn_soc->max_dispclk_vmax0p9 * 1000);
- if (context->bw.dcn.clk.dispclk_khz <
+ if (context->bw_ctx.bw.dcn.clk.dispclk_khz <
dc->debug.min_disp_clk_khz) {
- context->bw.dcn.clk.dispclk_khz =
+ context->bw_ctx.bw.dcn.clk.dispclk_khz =
dc->debug.min_disp_clk_khz;
}
- context->bw.dcn.clk.dppclk_khz = context->bw.dcn.clk.dispclk_khz / v->dispclk_dppclk_ratio;
- context->bw.dcn.clk.phyclk_khz = v->phyclk_per_state[v->voltage_level];
+ context->bw_ctx.bw.dcn.clk.dppclk_khz = context->bw_ctx.bw.dcn.clk.dispclk_khz / v->dispclk_dppclk_ratio;
+ context->bw_ctx.bw.dcn.clk.phyclk_khz = v->phyclk_per_state[v->voltage_level];
switch (v->voltage_level) {
case 0:
- context->bw.dcn.clk.max_supported_dppclk_khz =
+ context->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz =
(int)(dc->dcn_soc->max_dppclk_vmin0p65 * 1000);
break;
case 1:
- context->bw.dcn.clk.max_supported_dppclk_khz =
+ context->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz =
(int)(dc->dcn_soc->max_dppclk_vmid0p72 * 1000);
break;
case 2:
- context->bw.dcn.clk.max_supported_dppclk_khz =
+ context->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz =
(int)(dc->dcn_soc->max_dppclk_vnom0p8 * 1000);
break;
default:
- context->bw.dcn.clk.max_supported_dppclk_khz =
+ context->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz =
(int)(dc->dcn_soc->max_dppclk_vmax0p9 * 1000);
break;
}
@@ -1141,7 +1149,7 @@ bool dcn_validate_bandwidth(
hsplit_pipe->pipe_dlg_param.vblank_end = pipe->pipe_dlg_param.vblank_end;
} else {
/* pipe not split previously needs split */
- hsplit_pipe = find_idle_secondary_pipe(&context->res_ctx, pool);
+ hsplit_pipe = find_idle_secondary_pipe(&context->res_ctx, pool, pipe);
ASSERT(hsplit_pipe);
split_stream_across_pipes(
&context->res_ctx, pool,
@@ -1173,9 +1181,9 @@ bool dcn_validate_bandwidth(
if (v->voltage_level == 0) {
- dc->dml.soc.sr_enter_plus_exit_time_us =
+ context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us =
dc->dcn_soc->sr_enter_plus_exit_time;
- dc->dml.soc.sr_exit_time_us = dc->dcn_soc->sr_exit_time;
+ context->bw_ctx.dml.soc.sr_exit_time_us = dc->dcn_soc->sr_exit_time;
}
/*
@@ -1395,12 +1403,14 @@ void dcn_bw_update_from_pplib(struct dc *dc)
void dcn_bw_notify_pplib_of_wm_ranges(struct dc *dc)
{
- struct pp_smu_funcs_rv *pp = dc->res_pool->pp_smu;
+ struct pp_smu_funcs_rv *pp = NULL;
struct pp_smu_wm_range_sets ranges = {0};
int min_fclk_khz, min_dcfclk_khz, socclk_khz;
const int overdrive = 5000000; /* 5 GHz to cover Overdrive */
- if (!pp->set_wm_ranges)
+ if (dc->res_pool->pp_smu)
+ pp = &dc->res_pool->pp_smu->rv_funcs;
+ if (!pp || !pp->set_wm_ranges)
return;
kernel_fpu_begin();
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index c68fbd55db3c..4887d0611001 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -524,6 +524,14 @@ void dc_link_set_preferred_link_settings(struct dc *dc,
struct dc_stream_state *link_stream;
struct dc_link_settings store_settings = *link_setting;
+ link->preferred_link_setting = store_settings;
+
+ /* Retrain with preferred link settings only relevant for
+ * DP signal type
+ */
+ if (!dc_is_dp_signal(link->connector_signal))
+ return;
+
for (i = 0; i < MAX_PIPES; i++) {
pipe = &dc->current_state->res_ctx.pipe_ctx[i];
if (pipe->stream && pipe->stream->link) {
@@ -538,7 +546,10 @@ void dc_link_set_preferred_link_settings(struct dc *dc,
link_stream = link->dc->current_state->res_ctx.pipe_ctx[i].stream;
- link->preferred_link_setting = store_settings;
+ /* Cannot retrain link if backend is off */
+ if (link_stream->dpms_off)
+ return;
+
if (link_stream)
decide_link_settings(link_stream, &store_settings);
@@ -573,6 +584,28 @@ void dc_link_set_test_pattern(struct dc_link *link,
cust_pattern_size);
}
+uint32_t dc_link_bandwidth_kbps(
+ const struct dc_link *link,
+ const struct dc_link_settings *link_setting)
+{
+ uint32_t link_bw_kbps = link_setting->link_rate * LINK_RATE_REF_FREQ_IN_KHZ; /* bytes per sec */
+
+ link_bw_kbps *= 8; /* 8 bits per byte*/
+ link_bw_kbps *= link_setting->lane_count;
+
+ return link_bw_kbps;
+
+}
+
+const struct dc_link_settings *dc_link_get_verified_link_cap(
+ const struct dc_link *link)
+{
+ if (link->preferred_link_setting.lane_count != LANE_COUNT_UNKNOWN &&
+ link->preferred_link_setting.link_rate != LINK_RATE_UNKNOWN)
+ return &link->preferred_link_setting;
+ return &link->verified_link_cap;
+}
+
static void destruct(struct dc *dc)
{
dc_release_state(dc->current_state);
@@ -621,6 +654,10 @@ static bool construct(struct dc *dc,
#endif
enum dce_version dc_version = DCE_VERSION_UNKNOWN;
+ dc->config = init_params->flags;
+
+ memcpy(&dc->bb_overrides, &init_params->bb_overrides, sizeof(dc->bb_overrides));
+
dc_dceip = kzalloc(sizeof(*dc_dceip), GFP_KERNEL);
if (!dc_dceip) {
dm_error("%s: failed to create dceip\n", __func__);
@@ -668,13 +705,6 @@ static bool construct(struct dc *dc,
dc_ctx->dc_stream_id_count = 0;
dc->ctx = dc_ctx;
- dc->current_state = dc_create_state();
-
- if (!dc->current_state) {
- dm_error("%s: failed to create validate ctx\n", __func__);
- goto fail;
- }
-
/* Create logger */
dc_ctx->dce_environment = init_params->dce_environment;
@@ -722,14 +752,22 @@ static bool construct(struct dc *dc,
goto fail;
}
- dc->res_pool = dc_create_resource_pool(
- dc,
- init_params->num_virtual_links,
- dc_version,
- init_params->asic_id);
+ dc->res_pool = dc_create_resource_pool(dc, init_params, dc_version);
if (!dc->res_pool)
goto fail;
+ /* Creation of current_state must occur after dc->dml
+ * is initialized in dc_create_resource_pool because
+ * on creation it copies the contents of dc->dml
+ */
+
+ dc->current_state = dc_create_state(dc);
+
+ if (!dc->current_state) {
+ dm_error("%s: failed to create validate ctx\n", __func__);
+ goto fail;
+ }
+
dc_resource_state_construct(dc, dc->current_state);
if (!create_links(dc, init_params->num_virtual_links))
@@ -746,7 +784,7 @@ fail:
static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
{
int i, j;
- struct dc_state *dangling_context = dc_create_state();
+ struct dc_state *dangling_context = dc_create_state(dc);
struct dc_state *current_ctx;
if (dangling_context == NULL)
@@ -811,8 +849,6 @@ struct dc *dc_create(const struct dc_init_data *init_params)
if (dc->res_pool->dmcu != NULL)
dc->versions.dmcu_version = dc->res_pool->dmcu->dmcu_version;
- dc->config = init_params->flags;
-
dc->build_id = DC_BUILD_ID;
DC_LOG_DC("Display Core initialized\n");
@@ -969,7 +1005,7 @@ static bool context_changed(
return false;
}
-bool dc_validate_seamless_boot_timing(struct dc *dc,
+bool dc_validate_seamless_boot_timing(const struct dc *dc,
const struct dc_sink *sink,
struct dc_crtc_timing *crtc_timing)
{
@@ -1060,7 +1096,13 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
if (!dcb->funcs->is_accelerated_mode(dcb))
dc->hwss.enable_accelerated_mode(dc, context);
- dc->hwss.prepare_bandwidth(dc, context);
+ for (i = 0; i < context->stream_count; i++) {
+ if (context->streams[i]->apply_seamless_boot_optimization)
+ dc->optimize_seamless_boot = true;
+ }
+
+ if (!dc->optimize_seamless_boot)
+ dc->hwss.prepare_bandwidth(dc, context);
/* re-program planes for existing stream, in case we need to
* free up plane resource for later use
@@ -1135,12 +1177,15 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
dc_enable_stereo(dc, context, dc_streams, context->stream_count);
- /* pplib is notified if disp_num changed */
- dc->hwss.optimize_bandwidth(dc, context);
+ if (!dc->optimize_seamless_boot)
+ /* pplib is notified if disp_num changed */
+ dc->hwss.optimize_bandwidth(dc, context);
for (i = 0; i < context->stream_count; i++)
context->streams[i]->mode_changed = false;
+ memset(&context->commit_hints, 0, sizeof(context->commit_hints));
+
dc_release_state(dc->current_state);
dc->current_state = context;
@@ -1177,7 +1222,7 @@ bool dc_post_update_surfaces_to_stream(struct dc *dc)
int i;
struct dc_state *context = dc->current_state;
- if (dc->optimized_required == false)
+ if (!dc->optimized_required || dc->optimize_seamless_boot)
return true;
post_surface_trace(dc);
@@ -1195,18 +1240,60 @@ bool dc_post_update_surfaces_to_stream(struct dc *dc)
return true;
}
-struct dc_state *dc_create_state(void)
+struct dc_state *dc_create_state(struct dc *dc)
{
struct dc_state *context = kzalloc(sizeof(struct dc_state),
GFP_KERNEL);
if (!context)
return NULL;
+ /* Each context must have their own instance of VBA and in order to
+ * initialize and obtain IP and SOC the base DML instance from DC is
+ * initially copied into every context
+ */
+#ifdef CONFIG_DRM_AMD_DC_DCN1_0
+ memcpy(&context->bw_ctx.dml, &dc->dml, sizeof(struct display_mode_lib));
+#endif
kref_init(&context->refcount);
+
return context;
}
+struct dc_state *dc_copy_state(struct dc_state *src_ctx)
+{
+ int i, j;
+ struct dc_state *new_ctx = kzalloc(sizeof(struct dc_state),
+ GFP_KERNEL);
+
+ if (!new_ctx)
+ return NULL;
+
+ memcpy(new_ctx, src_ctx, sizeof(struct dc_state));
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ struct pipe_ctx *cur_pipe = &new_ctx->res_ctx.pipe_ctx[i];
+
+ if (cur_pipe->top_pipe)
+ cur_pipe->top_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->top_pipe->pipe_idx];
+
+ if (cur_pipe->bottom_pipe)
+ cur_pipe->bottom_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->bottom_pipe->pipe_idx];
+
+ }
+
+ for (i = 0; i < new_ctx->stream_count; i++) {
+ dc_stream_retain(new_ctx->streams[i]);
+ for (j = 0; j < new_ctx->stream_status[i].plane_count; j++)
+ dc_plane_state_retain(
+ new_ctx->stream_status[i].plane_states[j]);
+ }
+
+ kref_init(&new_ctx->refcount);
+
+ return new_ctx;
+}
+
void dc_retain_state(struct dc_state *context)
{
kref_get(&context->refcount);
@@ -1661,6 +1748,7 @@ static void commit_planes_do_stream_update(struct dc *dc,
continue;
if (stream_update->dpms_off) {
+ dc->hwss.pipe_control_lock(dc, pipe_ctx, true);
if (*stream_update->dpms_off) {
core_link_disable_stream(pipe_ctx, KEEP_ACQUIRED_RESOURCE);
dc->hwss.optimize_bandwidth(dc, dc->current_state);
@@ -1668,6 +1756,7 @@ static void commit_planes_do_stream_update(struct dc *dc,
dc->hwss.prepare_bandwidth(dc, dc->current_state);
core_link_enable_stream(dc->current_state, pipe_ctx);
}
+ dc->hwss.pipe_control_lock(dc, pipe_ctx, false);
}
if (stream_update->abm_level && pipe_ctx->stream_res.abm) {
@@ -1695,7 +1784,16 @@ static void commit_planes_for_stream(struct dc *dc,
int i, j;
struct pipe_ctx *top_pipe_to_program = NULL;
- if (update_type == UPDATE_TYPE_FULL) {
+ if (dc->optimize_seamless_boot && surface_count > 0) {
+ /* Optimize seamless boot flag keeps clocks and watermarks high until
+ * first flip. After first flip, optimization is required to lower
+ * bandwidth.
+ */
+ dc->optimize_seamless_boot = false;
+ dc->optimized_required = true;
+ }
+
+ if (update_type == UPDATE_TYPE_FULL && !dc->optimize_seamless_boot) {
dc->hwss.prepare_bandwidth(dc, context);
context_clock_trace(dc, context);
}
@@ -1795,7 +1893,7 @@ void dc_commit_updates_for_stream(struct dc *dc,
if (update_type >= UPDATE_TYPE_FULL) {
/* initialize scratch memory for building context */
- context = dc_create_state();
+ context = dc_create_state(dc);
if (context == NULL) {
DC_ERROR("Failed to allocate new validate context!\n");
return;
@@ -2080,13 +2178,13 @@ void dc_link_remove_remote_sink(struct dc_link *link, struct dc_sink *sink)
void get_clock_requirements_for_state(struct dc_state *state, struct AsicStateEx *info)
{
- info->displayClock = (unsigned int)state->bw.dcn.clk.dispclk_khz;
- info->engineClock = (unsigned int)state->bw.dcn.clk.dcfclk_khz;
- info->memoryClock = (unsigned int)state->bw.dcn.clk.dramclk_khz;
- info->maxSupportedDppClock = (unsigned int)state->bw.dcn.clk.max_supported_dppclk_khz;
- info->dppClock = (unsigned int)state->bw.dcn.clk.dppclk_khz;
- info->socClock = (unsigned int)state->bw.dcn.clk.socclk_khz;
- info->dcfClockDeepSleep = (unsigned int)state->bw.dcn.clk.dcfclk_deep_sleep_khz;
- info->fClock = (unsigned int)state->bw.dcn.clk.fclk_khz;
- info->phyClock = (unsigned int)state->bw.dcn.clk.phyclk_khz;
+ info->displayClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dispclk_khz;
+ info->engineClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dcfclk_khz;
+ info->memoryClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dramclk_khz;
+ info->maxSupportedDppClock = (unsigned int)state->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz;
+ info->dppClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dppclk_khz;
+ info->socClock = (unsigned int)state->bw_ctx.bw.dcn.clk.socclk_khz;
+ info->dcfClockDeepSleep = (unsigned int)state->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz;
+ info->fClock = (unsigned int)state->bw_ctx.bw.dcn.clk.fclk_khz;
+ info->phyClock = (unsigned int)state->bw_ctx.bw.dcn.clk.phyclk_khz;
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
index 73d049506618..5903e7822f98 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
@@ -351,19 +351,19 @@ void context_clock_trace(
DC_LOGGER_INIT(dc->ctx->logger);
CLOCK_TRACE("Current: dispclk_khz:%d max_dppclk_khz:%d dcfclk_khz:%d\n"
"dcfclk_deep_sleep_khz:%d fclk_khz:%d socclk_khz:%d\n",
- context->bw.dcn.clk.dispclk_khz,
- context->bw.dcn.clk.dppclk_khz,
- context->bw.dcn.clk.dcfclk_khz,
- context->bw.dcn.clk.dcfclk_deep_sleep_khz,
- context->bw.dcn.clk.fclk_khz,
- context->bw.dcn.clk.socclk_khz);
+ context->bw_ctx.bw.dcn.clk.dispclk_khz,
+ context->bw_ctx.bw.dcn.clk.dppclk_khz,
+ context->bw_ctx.bw.dcn.clk.dcfclk_khz,
+ context->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz,
+ context->bw_ctx.bw.dcn.clk.fclk_khz,
+ context->bw_ctx.bw.dcn.clk.socclk_khz);
CLOCK_TRACE("Calculated: dispclk_khz:%d max_dppclk_khz:%d dcfclk_khz:%d\n"
"dcfclk_deep_sleep_khz:%d fclk_khz:%d socclk_khz:%d\n",
- context->bw.dcn.clk.dispclk_khz,
- context->bw.dcn.clk.dppclk_khz,
- context->bw.dcn.clk.dcfclk_khz,
- context->bw.dcn.clk.dcfclk_deep_sleep_khz,
- context->bw.dcn.clk.fclk_khz,
- context->bw.dcn.clk.socclk_khz);
+ context->bw_ctx.bw.dcn.clk.dispclk_khz,
+ context->bw_ctx.bw.dcn.clk.dppclk_khz,
+ context->bw_ctx.bw.dcn.clk.dcfclk_khz,
+ context->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz,
+ context->bw_ctx.bw.dcn.clk.fclk_khz,
+ context->bw_ctx.bw.dcn.clk.socclk_khz);
#endif
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index ea18e9c2d8ce..3ef68a249f4d 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -58,7 +58,6 @@
******************************************************************************/
enum {
- LINK_RATE_REF_FREQ_IN_MHZ = 27,
PEAK_FACTOR_X1000 = 1006,
/*
* Some receivers fail to train on first try and are good
@@ -640,7 +639,8 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
bool same_dpcd = true;
enum dc_connection_type new_connection_type = dc_connection_none;
DC_LOGGER_INIT(link->ctx->logger);
- if (link->connector_signal == SIGNAL_TYPE_VIRTUAL)
+
+ if (dc_is_virtual_signal(link->connector_signal))
return false;
if (false == dc_link_detect_sink(link, &new_connection_type)) {
@@ -720,9 +720,8 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
same_dpcd = false;
}
/* Active dongle plug in without display or downstream unplug*/
- if (link->type == dc_connection_active_dongle
- && link->dpcd_caps.sink_count.
- bits.SINK_COUNT == 0) {
+ if (link->type == dc_connection_active_dongle &&
+ link->dpcd_caps.sink_count.bits.SINK_COUNT == 0) {
if (prev_sink != NULL) {
/* Downstream unplug */
dc_sink_release(prev_sink);
@@ -1172,8 +1171,6 @@ static bool construct(
goto create_fail;
}
-
-
/* TODO: #DAL3 Implement id to str function.*/
LINK_INFO("Connector[%d] description:"
"signal %d\n",
@@ -1207,7 +1204,7 @@ static bool construct(
link->link_enc = link->dc->res_pool->funcs->link_enc_create(
&enc_init_data);
- if( link->link_enc == NULL) {
+ if (link->link_enc == NULL) {
DC_ERROR("Failed to create link encoder!\n");
goto link_enc_create_fail;
}
@@ -1399,9 +1396,18 @@ static enum dc_status enable_link_dp(
/* get link settings for video mode timing */
decide_link_settings(stream, &link_settings);
+ /* If link settings are different than current and link already enabled
+ * then need to disable before programming to new rate.
+ */
+ if (link->link_status.link_active &&
+ (link->cur_link_settings.lane_count != link_settings.lane_count ||
+ link->cur_link_settings.link_rate != link_settings.link_rate)) {
+ dp_disable_link_phy(link, pipe_ctx->stream->signal);
+ }
+
pipe_ctx->stream_res.pix_clk_params.requested_sym_clk =
link_settings.link_rate * LINK_RATE_REF_FREQ_IN_KHZ;
- state->dccg->funcs->update_clocks(state->dccg, state, false);
+ state->clk_mgr->funcs->update_clocks(state->clk_mgr, state, false);
dp_enable_link_phy(
link,
@@ -1466,14 +1472,14 @@ static enum dc_status enable_link_dp_mst(
if (link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN)
return DC_OK;
+ /* clear payload table */
+ dm_helpers_dp_mst_clear_payload_allocation_table(link->ctx, link);
+
/* to make sure the pending down rep can be processed
- * before clear payload table
+ * before enabling the link
*/
dm_helpers_dp_mst_poll_pending_down_reply(link->ctx, link);
- /* clear payload table */
- dm_helpers_dp_mst_clear_payload_allocation_table(link->ctx, link);
-
/* set the sink to MST mode before enabling the link */
dp_enable_mst_on_sink(link, true);
@@ -1982,7 +1988,7 @@ static void enable_link_hdmi(struct pipe_ctx *pipe_ctx)
pipe_ctx->stream->signal,
stream->phy_pix_clk);
- if (pipe_ctx->stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
+ if (dc_is_hdmi_signal(pipe_ctx->stream->signal))
dal_ddc_service_read_scdc_data(link->ddc);
}
@@ -2074,11 +2080,28 @@ static void disable_link(struct dc_link *link, enum signal_type signal)
}
}
+static uint32_t get_timing_pixel_clock_100hz(const struct dc_crtc_timing *timing)
+{
+
+ uint32_t pxl_clk = timing->pix_clk_100hz;
+
+ if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)
+ pxl_clk /= 2;
+ else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR422)
+ pxl_clk = pxl_clk * 2 / 3;
+
+ if (timing->display_color_depth == COLOR_DEPTH_101010)
+ pxl_clk = pxl_clk * 10 / 8;
+ else if (timing->display_color_depth == COLOR_DEPTH_121212)
+ pxl_clk = pxl_clk * 12 / 8;
+
+ return pxl_clk;
+}
+
static bool dp_active_dongle_validate_timing(
const struct dc_crtc_timing *timing,
const struct dpcd_caps *dpcd_caps)
{
- unsigned int required_pix_clk_100hz = timing->pix_clk_100hz;
const struct dc_dongle_caps *dongle_caps = &dpcd_caps->dongle_caps;
switch (dpcd_caps->dongle_type) {
@@ -2115,13 +2138,6 @@ static bool dp_active_dongle_validate_timing(
return false;
}
-
- /* Check Color Depth and Pixel Clock */
- if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)
- required_pix_clk_100hz /= 2;
- else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR422)
- required_pix_clk_100hz = required_pix_clk_100hz * 2 / 3;
-
switch (timing->display_color_depth) {
case COLOR_DEPTH_666:
case COLOR_DEPTH_888:
@@ -2130,14 +2146,11 @@ static bool dp_active_dongle_validate_timing(
case COLOR_DEPTH_101010:
if (dongle_caps->dp_hdmi_max_bpc < 10)
return false;
- required_pix_clk_100hz = required_pix_clk_100hz * 10 / 8;
break;
case COLOR_DEPTH_121212:
if (dongle_caps->dp_hdmi_max_bpc < 12)
return false;
- required_pix_clk_100hz = required_pix_clk_100hz * 12 / 8;
break;
-
case COLOR_DEPTH_141414:
case COLOR_DEPTH_161616:
default:
@@ -2145,7 +2158,7 @@ static bool dp_active_dongle_validate_timing(
return false;
}
- if (required_pix_clk_100hz > (dongle_caps->dp_hdmi_max_pixel_clk * 10))
+ if (get_timing_pixel_clock_100hz(timing) > (dongle_caps->dp_hdmi_max_pixel_clk_in_khz * 10))
return false;
return true;
@@ -2166,7 +2179,7 @@ enum dc_status dc_link_validate_mode_timing(
return DC_OK;
/* Passive Dongle */
- if (0 != max_pix_clk && timing->pix_clk_100hz > max_pix_clk)
+ if (max_pix_clk != 0 && get_timing_pixel_clock_100hz(timing) > max_pix_clk)
return DC_EXCEED_DONGLE_CAP;
/* Active Dongle*/
@@ -2284,14 +2297,13 @@ void core_link_resume(struct dc_link *link)
static struct fixed31_32 get_pbn_per_slot(struct dc_stream_state *stream)
{
- struct dc_link_settings *link_settings =
- &stream->link->cur_link_settings;
- uint32_t link_rate_in_mbps =
- link_settings->link_rate * LINK_RATE_REF_FREQ_IN_MHZ;
- struct fixed31_32 mbps = dc_fixpt_from_int(
- link_rate_in_mbps * link_settings->lane_count);
-
- return dc_fixpt_div_int(mbps, 54);
+ struct fixed31_32 mbytes_per_sec;
+ uint32_t link_rate_in_mbytes_per_sec = dc_link_bandwidth_kbps(stream->link, &stream->link->cur_link_settings);
+ link_rate_in_mbytes_per_sec /= 8000; /* Kbits to MBytes */
+
+ mbytes_per_sec = dc_fixpt_from_int(link_rate_in_mbytes_per_sec);
+
+ return dc_fixpt_div_int(mbytes_per_sec, 54);
}
static int get_color_depth(enum dc_color_depth color_depth)
@@ -2316,7 +2328,7 @@ static struct fixed31_32 get_pbn_from_timing(struct pipe_ctx *pipe_ctx)
uint32_t denominator;
bpc = get_color_depth(pipe_ctx->stream_res.pix_clk_params.color_depth);
- kbps = pipe_ctx->stream_res.pix_clk_params.requested_pix_clk_100hz / 10 * bpc * 3;
+ kbps = dc_bandwidth_in_kbps_from_timing(&pipe_ctx->stream->timing);
/*
* margin 5300ppm + 300ppm ~ 0.6% as per spec, factor is 1.006
@@ -2551,12 +2563,12 @@ void core_link_enable_stream(
struct dc_state *state,
struct pipe_ctx *pipe_ctx)
{
- struct dc *core_dc = pipe_ctx->stream->ctx->dc;
+ struct dc *core_dc = pipe_ctx->stream->ctx->dc;
struct dc_stream_state *stream = pipe_ctx->stream;
enum dc_status status;
DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger);
- if (pipe_ctx->stream->signal != SIGNAL_TYPE_VIRTUAL) {
+ if (!dc_is_virtual_signal(pipe_ctx->stream->signal)) {
stream->link->link_enc->funcs->setup(
stream->link->link_enc,
pipe_ctx->stream->signal);
@@ -2570,9 +2582,10 @@ void core_link_enable_stream(
pipe_ctx->stream_res.stream_enc->funcs->dp_set_stream_attribute(
pipe_ctx->stream_res.stream_enc,
&stream->timing,
- stream->output_color_space);
+ stream->output_color_space,
+ stream->link->dpcd_caps.dprx_feature.bits.SST_SPLIT_SDP_CAP);
- if (dc_is_hdmi_signal(pipe_ctx->stream->signal))
+ if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal))
pipe_ctx->stream_res.stream_enc->funcs->hdmi_set_stream_attribute(
pipe_ctx->stream_res.stream_enc,
&stream->timing,
@@ -2736,3 +2749,49 @@ void dc_link_enable_hpd_filter(struct dc_link *link, bool enable)
}
}
+uint32_t dc_bandwidth_in_kbps_from_timing(
+ const struct dc_crtc_timing *timing)
+{
+ uint32_t bits_per_channel = 0;
+ uint32_t kbps;
+
+ switch (timing->display_color_depth) {
+ case COLOR_DEPTH_666:
+ bits_per_channel = 6;
+ break;
+ case COLOR_DEPTH_888:
+ bits_per_channel = 8;
+ break;
+ case COLOR_DEPTH_101010:
+ bits_per_channel = 10;
+ break;
+ case COLOR_DEPTH_121212:
+ bits_per_channel = 12;
+ break;
+ case COLOR_DEPTH_141414:
+ bits_per_channel = 14;
+ break;
+ case COLOR_DEPTH_161616:
+ bits_per_channel = 16;
+ break;
+ default:
+ break;
+ }
+
+ ASSERT(bits_per_channel != 0);
+
+ kbps = timing->pix_clk_100hz / 10;
+ kbps *= bits_per_channel;
+
+ if (timing->flags.Y_ONLY != 1) {
+ /*Only YOnly make reduce bandwidth by 1/3 compares to RGB*/
+ kbps *= 3;
+ if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)
+ kbps /= 2;
+ else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR422)
+ kbps = kbps * 2 / 3;
+ }
+
+ return kbps;
+
+}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
index b7ee63cd8dc7..f02092a0dc76 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
@@ -573,12 +573,28 @@ bool dal_ddc_service_query_ddc_data(
return ret;
}
-int dc_link_aux_transfer(struct ddc_service *ddc,
- struct aux_payload *payload)
+/* dc_link_aux_transfer_raw() - Attempt to transfer
+ * the given aux payload. This function does not perform
+ * retries or handle error states. The reply is returned
+ * in the payload->reply and the result through
+ * *operation_result. Returns the number of bytes transferred,
+ * or -1 on a failure.
+ */
+int dc_link_aux_transfer_raw(struct ddc_service *ddc,
+ struct aux_payload *payload,
+ enum aux_channel_operation_result *operation_result)
{
- return dce_aux_transfer(ddc, payload);
+ return dce_aux_transfer_raw(ddc, payload, operation_result);
}
+/* dc_link_aux_transfer_with_retries() - Attempt to submit an
+ * aux payload, retrying on timeouts, defers, and busy states
+ * as outlined in the DP spec. Returns true if the request
+ * was successful.
+ *
+ * Unless you want to implement your own retry semantics, this
+ * is probably the one you want.
+ */
bool dc_link_aux_transfer_with_retries(struct ddc_service *ddc,
struct aux_payload *payload)
{
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index 09d301216076..acb4f829e042 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -93,12 +93,10 @@ static void dpcd_set_link_settings(
struct dc_link *link,
const struct link_training_settings *lt_settings)
{
- uint8_t rate = (uint8_t)
- (lt_settings->link_settings.link_rate);
+ uint8_t rate;
union down_spread_ctrl downspread = { {0} };
union lane_count_set lane_count_set = { {0} };
- uint8_t link_set_buffer[2];
downspread.raw = (uint8_t)
(lt_settings->link_settings.link_spread);
@@ -111,29 +109,42 @@ static void dpcd_set_link_settings(
lane_count_set.bits.POST_LT_ADJ_REQ_GRANTED =
link->dpcd_caps.max_ln_count.bits.POST_LT_ADJ_REQ_SUPPORTED;
- link_set_buffer[0] = rate;
- link_set_buffer[1] = lane_count_set.raw;
-
- core_link_write_dpcd(link, DP_LINK_BW_SET,
- link_set_buffer, 2);
core_link_write_dpcd(link, DP_DOWNSPREAD_CTRL,
&downspread.raw, sizeof(downspread));
+ core_link_write_dpcd(link, DP_LANE_COUNT_SET,
+ &lane_count_set.raw, 1);
+
if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_14 &&
- (link->dpcd_caps.link_rate_set >= 1 &&
- link->dpcd_caps.link_rate_set <= 8)) {
+ lt_settings->link_settings.use_link_rate_set == true) {
+ rate = 0;
+ core_link_write_dpcd(link, DP_LINK_BW_SET, &rate, 1);
core_link_write_dpcd(link, DP_LINK_RATE_SET,
- &link->dpcd_caps.link_rate_set, 1);
+ &lt_settings->link_settings.link_rate_set, 1);
+ } else {
+ rate = (uint8_t) (lt_settings->link_settings.link_rate);
+ core_link_write_dpcd(link, DP_LINK_BW_SET, &rate, 1);
}
- DC_LOG_HW_LINK_TRAINING("%s\n %x rate = %x\n %x lane = %x\n %x spread = %x\n",
- __func__,
- DP_LINK_BW_SET,
- lt_settings->link_settings.link_rate,
- DP_LANE_COUNT_SET,
- lt_settings->link_settings.lane_count,
- DP_DOWNSPREAD_CTRL,
- lt_settings->link_settings.link_spread);
+ if (rate) {
+ DC_LOG_HW_LINK_TRAINING("%s\n %x rate = %x\n %x lane = %x\n %x spread = %x\n",
+ __func__,
+ DP_LINK_BW_SET,
+ lt_settings->link_settings.link_rate,
+ DP_LANE_COUNT_SET,
+ lt_settings->link_settings.lane_count,
+ DP_DOWNSPREAD_CTRL,
+ lt_settings->link_settings.link_spread);
+ } else {
+ DC_LOG_HW_LINK_TRAINING("%s\n %x rate set = %x\n %x lane = %x\n %x spread = %x\n",
+ __func__,
+ DP_LINK_RATE_SET,
+ lt_settings->link_settings.link_rate_set,
+ DP_LANE_COUNT_SET,
+ lt_settings->link_settings.lane_count,
+ DP_DOWNSPREAD_CTRL,
+ lt_settings->link_settings.link_spread);
+ }
}
@@ -952,6 +963,8 @@ enum link_training_result dc_link_dp_perform_link_training(
lt_settings.link_settings.link_rate = link_setting->link_rate;
lt_settings.link_settings.lane_count = link_setting->lane_count;
+ lt_settings.link_settings.use_link_rate_set = link_setting->use_link_rate_set;
+ lt_settings.link_settings.link_rate_set = link_setting->link_rate_set;
/*@todo[vdevulap] move SS to LS, should not be handled by displaypath*/
@@ -1075,7 +1088,7 @@ static struct dc_link_settings get_max_link_cap(struct dc_link *link)
{
/* Set Default link settings */
struct dc_link_settings max_link_cap = {LANE_COUNT_FOUR, LINK_RATE_HIGH,
- LINK_SPREAD_05_DOWNSPREAD_30KHZ};
+ LINK_SPREAD_05_DOWNSPREAD_30KHZ, false, 0};
/* Higher link settings based on feature supported */
if (link->link_enc->features.flags.bits.IS_HBR2_CAPABLE)
@@ -1520,69 +1533,6 @@ static bool decide_fallback_link_setting(
return true;
}
-static uint32_t bandwidth_in_kbps_from_timing(
- const struct dc_crtc_timing *timing)
-{
- uint32_t bits_per_channel = 0;
- uint32_t kbps;
-
- switch (timing->display_color_depth) {
- case COLOR_DEPTH_666:
- bits_per_channel = 6;
- break;
- case COLOR_DEPTH_888:
- bits_per_channel = 8;
- break;
- case COLOR_DEPTH_101010:
- bits_per_channel = 10;
- break;
- case COLOR_DEPTH_121212:
- bits_per_channel = 12;
- break;
- case COLOR_DEPTH_141414:
- bits_per_channel = 14;
- break;
- case COLOR_DEPTH_161616:
- bits_per_channel = 16;
- break;
- default:
- break;
- }
-
- ASSERT(bits_per_channel != 0);
-
- kbps = timing->pix_clk_100hz / 10;
- kbps *= bits_per_channel;
-
- if (timing->flags.Y_ONLY != 1) {
- /*Only YOnly make reduce bandwidth by 1/3 compares to RGB*/
- kbps *= 3;
- if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)
- kbps /= 2;
- else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR422)
- kbps = kbps * 2 / 3;
- }
-
- return kbps;
-
-}
-
-static uint32_t bandwidth_in_kbps_from_link_settings(
- const struct dc_link_settings *link_setting)
-{
- uint32_t link_rate_in_kbps = link_setting->link_rate *
- LINK_RATE_REF_FREQ_IN_KHZ;
-
- uint32_t lane_count = link_setting->lane_count;
- uint32_t kbps = link_rate_in_kbps;
-
- kbps *= lane_count;
- kbps *= 8; /* 8 bits per byte*/
-
- return kbps;
-
-}
-
bool dp_validate_mode_timing(
struct dc_link *link,
const struct dc_crtc_timing *timing)
@@ -1599,7 +1549,7 @@ bool dp_validate_mode_timing(
return true;
/* We always use verified link settings */
- link_setting = &link->verified_link_cap;
+ link_setting = dc_link_get_verified_link_cap(link);
/* TODO: DYNAMIC_VALIDATION needs to be implemented */
/*if (flags.DYNAMIC_VALIDATION == 1 &&
@@ -1607,8 +1557,8 @@ bool dp_validate_mode_timing(
link_setting = &link->verified_link_cap;
*/
- req_bw = bandwidth_in_kbps_from_timing(timing);
- max_bw = bandwidth_in_kbps_from_link_settings(link_setting);
+ req_bw = dc_bandwidth_in_kbps_from_timing(timing);
+ max_bw = dc_link_bandwidth_kbps(link, link_setting);
if (req_bw <= max_bw) {
/* remember the biggest mode here, during
@@ -1629,58 +1579,78 @@ bool dp_validate_mode_timing(
return false;
}
-void decide_link_settings(struct dc_stream_state *stream,
- struct dc_link_settings *link_setting)
+static bool decide_dp_link_settings(struct dc_link *link, struct dc_link_settings *link_setting, uint32_t req_bw)
{
-
struct dc_link_settings initial_link_setting = {
- LANE_COUNT_ONE, LINK_RATE_LOW, LINK_SPREAD_DISABLED};
+ LANE_COUNT_ONE, LINK_RATE_LOW, LINK_SPREAD_DISABLED, false, 0};
struct dc_link_settings current_link_setting =
initial_link_setting;
- struct dc_link *link;
- uint32_t req_bw;
uint32_t link_bw;
- req_bw = bandwidth_in_kbps_from_timing(&stream->timing);
-
- link = stream->link;
-
- /* if preferred is specified through AMDDP, use it, if it's enough
- * to drive the mode
+ /* search for the minimum link setting that:
+ * 1. is supported according to the link training result
+ * 2. could support the b/w requested by the timing
*/
- if (link->preferred_link_setting.lane_count !=
- LANE_COUNT_UNKNOWN &&
- link->preferred_link_setting.link_rate !=
- LINK_RATE_UNKNOWN) {
- *link_setting = link->preferred_link_setting;
- return;
- }
+ while (current_link_setting.link_rate <=
+ link->verified_link_cap.link_rate) {
+ link_bw = dc_link_bandwidth_kbps(
+ link,
+ &current_link_setting);
+ if (req_bw <= link_bw) {
+ *link_setting = current_link_setting;
+ return true;
+ }
- /* MST doesn't perform link training for now
- * TODO: add MST specific link training routine
- */
- if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
- *link_setting = link->verified_link_cap;
- return;
+ if (current_link_setting.lane_count <
+ link->verified_link_cap.lane_count) {
+ current_link_setting.lane_count =
+ increase_lane_count(
+ current_link_setting.lane_count);
+ } else {
+ current_link_setting.link_rate =
+ increase_link_rate(
+ current_link_setting.link_rate);
+ current_link_setting.lane_count =
+ initial_link_setting.lane_count;
+ }
}
- /* EDP use the link cap setting */
- if (link->connector_signal == SIGNAL_TYPE_EDP) {
+ return false;
+}
+
+static bool decide_edp_link_settings(struct dc_link *link, struct dc_link_settings *link_setting, uint32_t req_bw)
+{
+ struct dc_link_settings initial_link_setting;
+ struct dc_link_settings current_link_setting;
+ uint32_t link_bw;
+
+ if (link->dpcd_caps.dpcd_rev.raw < DPCD_REV_14 ||
+ link->dpcd_caps.edp_supported_link_rates_count == 0 ||
+ link->dc->config.optimize_edp_link_rate == false) {
*link_setting = link->verified_link_cap;
- return;
+ return true;
}
+ memset(&initial_link_setting, 0, sizeof(initial_link_setting));
+ initial_link_setting.lane_count = LANE_COUNT_ONE;
+ initial_link_setting.link_rate = link->dpcd_caps.edp_supported_link_rates[0];
+ initial_link_setting.link_spread = LINK_SPREAD_DISABLED;
+ initial_link_setting.use_link_rate_set = true;
+ initial_link_setting.link_rate_set = 0;
+ current_link_setting = initial_link_setting;
+
/* search for the minimum link setting that:
* 1. is supported according to the link training result
* 2. could support the b/w requested by the timing
*/
while (current_link_setting.link_rate <=
link->verified_link_cap.link_rate) {
- link_bw = bandwidth_in_kbps_from_link_settings(
+ link_bw = dc_link_bandwidth_kbps(
+ link,
&current_link_setting);
if (req_bw <= link_bw) {
*link_setting = current_link_setting;
- return;
+ return true;
}
if (current_link_setting.lane_count <
@@ -1689,13 +1659,53 @@ void decide_link_settings(struct dc_stream_state *stream,
increase_lane_count(
current_link_setting.lane_count);
} else {
- current_link_setting.link_rate =
- increase_link_rate(
- current_link_setting.link_rate);
- current_link_setting.lane_count =
- initial_link_setting.lane_count;
+ if (current_link_setting.link_rate_set < link->dpcd_caps.edp_supported_link_rates_count) {
+ current_link_setting.link_rate_set++;
+ current_link_setting.link_rate =
+ link->dpcd_caps.edp_supported_link_rates[current_link_setting.link_rate_set];
+ current_link_setting.lane_count =
+ initial_link_setting.lane_count;
+ } else
+ break;
}
}
+ return false;
+}
+
+void decide_link_settings(struct dc_stream_state *stream,
+ struct dc_link_settings *link_setting)
+{
+ struct dc_link *link;
+ uint32_t req_bw;
+
+ req_bw = dc_bandwidth_in_kbps_from_timing(&stream->timing);
+
+ link = stream->link;
+
+ /* if preferred is specified through AMDDP, use it, if it's enough
+ * to drive the mode
+ */
+ if (link->preferred_link_setting.lane_count !=
+ LANE_COUNT_UNKNOWN &&
+ link->preferred_link_setting.link_rate !=
+ LINK_RATE_UNKNOWN) {
+ *link_setting = link->preferred_link_setting;
+ return;
+ }
+
+ /* MST doesn't perform link training for now
+ * TODO: add MST specific link training routine
+ */
+ if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
+ *link_setting = link->verified_link_cap;
+ return;
+ }
+
+ if (link->connector_signal == SIGNAL_TYPE_EDP) {
+ if (decide_edp_link_settings(link, link_setting, req_bw))
+ return;
+ } else if (decide_dp_link_settings(link, link_setting, req_bw))
+ return;
BREAK_TO_DEBUGGER();
ASSERT(link->verified_link_cap.lane_count != LANE_COUNT_UNKNOWN);
@@ -2155,11 +2165,7 @@ bool is_mst_supported(struct dc_link *link)
bool is_dp_active_dongle(const struct dc_link *link)
{
- enum display_dongle_type dongle_type = link->dpcd_caps.dongle_type;
-
- return (dongle_type == DISPLAY_DONGLE_DP_VGA_CONVERTER) ||
- (dongle_type == DISPLAY_DONGLE_DP_DVI_CONVERTER) ||
- (dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER);
+ return link->dpcd_caps.is_branch_dev;
}
static int translate_dpcd_max_bpc(enum dpcd_downstream_port_max_bpc bpc)
@@ -2193,6 +2199,9 @@ static void get_active_converter_info(
return;
}
+ /* DPCD 0x5 bit 0 = 1, it indicate it's branch device */
+ link->dpcd_caps.is_branch_dev = ds_port.fields.PORT_PRESENT;
+
switch (ds_port.fields.PORT_TYPE) {
case DOWNSTREAM_VGA:
link->dpcd_caps.dongle_type = DISPLAY_DONGLE_DP_VGA_CONVERTER;
@@ -2234,8 +2243,8 @@ static void get_active_converter_info(
hdmi_caps = {.raw = det_caps[3] };
union dwnstream_port_caps_byte2
hdmi_color_caps = {.raw = det_caps[2] };
- link->dpcd_caps.dongle_caps.dp_hdmi_max_pixel_clk =
- det_caps[1] * 25000;
+ link->dpcd_caps.dongle_caps.dp_hdmi_max_pixel_clk_in_khz =
+ det_caps[1] * 2500;
link->dpcd_caps.dongle_caps.is_dp_hdmi_s3d_converter =
hdmi_caps.bits.FRAME_SEQ_TO_FRAME_PACK;
@@ -2252,7 +2261,7 @@ static void get_active_converter_info(
translate_dpcd_max_bpc(
hdmi_color_caps.bits.MAX_BITS_PER_COLOR_COMPONENT);
- if (link->dpcd_caps.dongle_caps.dp_hdmi_max_pixel_clk != 0)
+ if (link->dpcd_caps.dongle_caps.dp_hdmi_max_pixel_clk_in_khz != 0)
link->dpcd_caps.dongle_caps.extendedCapValid = true;
}
@@ -2347,6 +2356,10 @@ static bool retrieve_link_cap(struct dc_link *link)
{
uint8_t dpcd_data[DP_ADAPTER_CAP - DP_DPCD_REV + 1];
+ /*Only need to read 1 byte starting from DP_DPRX_FEATURE_ENUMERATION_LIST.
+ */
+ uint8_t dpcd_dprx_data = '\0';
+
struct dp_device_vendor_id sink_id;
union down_stream_port_count down_strm_port_count;
union edp_configuration_cap edp_config_cap;
@@ -2383,7 +2396,10 @@ static bool retrieve_link_cap(struct dc_link *link)
aux_rd_interval.raw =
dpcd_data[DP_TRAINING_AUX_RD_INTERVAL];
- if (aux_rd_interval.bits.EXT_RECIEVER_CAP_FIELD_PRESENT == 1) {
+ link->dpcd_caps.ext_receiver_cap_field_present =
+ aux_rd_interval.bits.EXT_RECEIVER_CAP_FIELD_PRESENT == 1 ? true:false;
+
+ if (aux_rd_interval.bits.EXT_RECEIVER_CAP_FIELD_PRESENT == 1) {
uint8_t ext_cap_data[16];
memset(ext_cap_data, '\0', sizeof(ext_cap_data));
@@ -2404,7 +2420,38 @@ static bool retrieve_link_cap(struct dc_link *link)
}
link->dpcd_caps.dpcd_rev.raw =
- dpcd_data[DP_DPCD_REV - DP_DPCD_REV];
+ dpcd_data[DP_DPCD_REV - DP_DPCD_REV];
+
+ if (link->dpcd_caps.dpcd_rev.raw >= 0x14) {
+ for (i = 0; i < read_dpcd_retry_cnt; i++) {
+ status = core_link_read_dpcd(
+ link,
+ DP_DPRX_FEATURE_ENUMERATION_LIST,
+ &dpcd_dprx_data,
+ sizeof(dpcd_dprx_data));
+ if (status == DC_OK)
+ break;
+ }
+
+ link->dpcd_caps.dprx_feature.raw = dpcd_dprx_data;
+
+ if (status != DC_OK)
+ dm_error("%s: Read DPRX caps data failed.\n", __func__);
+ }
+
+ else {
+ link->dpcd_caps.dprx_feature.raw = 0;
+ }
+
+
+ /* Error condition checking...
+ * It is impossible for Sink to report Max Lane Count = 0.
+ * It is possible for Sink to report Max Link Rate = 0, if it is
+ * an eDP device that is reporting specialized link rates in the
+ * SUPPORTED_LINK_RATE table.
+ */
+ if (dpcd_data[DP_MAX_LANE_COUNT - DP_DPCD_REV] == 0)
+ return false;
ds_port.byte = dpcd_data[DP_DOWNSTREAMPORT_PRESENT -
DP_DPCD_REV];
@@ -2536,31 +2583,31 @@ enum dc_link_rate linkRateInKHzToLinkRateMultiplier(uint32_t link_rate_in_khz)
void detect_edp_sink_caps(struct dc_link *link)
{
- uint8_t supported_link_rates[16] = {0};
+ uint8_t supported_link_rates[16];
uint32_t entry;
uint32_t link_rate_in_khz;
enum dc_link_rate link_rate = LINK_RATE_UNKNOWN;
retrieve_link_cap(link);
+ link->dpcd_caps.edp_supported_link_rates_count = 0;
+ memset(supported_link_rates, 0, sizeof(supported_link_rates));
- if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_14) {
+ if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_14 &&
+ link->dc->config.optimize_edp_link_rate) {
// Read DPCD 00010h - 0001Fh 16 bytes at one shot
core_link_read_dpcd(link, DP_SUPPORTED_LINK_RATES,
supported_link_rates, sizeof(supported_link_rates));
- link->dpcd_caps.link_rate_set = 0;
for (entry = 0; entry < 16; entry += 2) {
// DPCD register reports per-lane link rate = 16-bit link rate capability
- // value X 200 kHz. Need multipler to find link rate in kHz.
+ // value X 200 kHz. Need multiplier to find link rate in kHz.
link_rate_in_khz = (supported_link_rates[entry+1] * 0x100 +
supported_link_rates[entry]) * 200;
if (link_rate_in_khz != 0) {
link_rate = linkRateInKHzToLinkRateMultiplier(link_rate_in_khz);
- if (link->reported_link_cap.link_rate < link_rate) {
- link->reported_link_cap.link_rate = link_rate;
- link->dpcd_caps.link_rate_set = entry;
- }
+ link->dpcd_caps.edp_supported_link_rates[link->dpcd_caps.edp_supported_link_rates_count] = link_rate;
+ link->dpcd_caps.edp_supported_link_rates_count++;
}
}
}
@@ -2601,6 +2648,7 @@ static void set_crtc_test_pattern(struct dc_link *link,
enum dc_color_depth color_depth = pipe_ctx->
stream->timing.display_color_depth;
struct bit_depth_reduction_params params;
+ struct output_pixel_processor *opp = pipe_ctx->stream_res.opp;
memset(&params, 0, sizeof(params));
@@ -2640,8 +2688,7 @@ static void set_crtc_test_pattern(struct dc_link *link,
{
/* disable bit depth reduction */
pipe_ctx->stream->bit_depth_params = params;
- pipe_ctx->stream_res.opp->funcs->
- opp_program_bit_depth_reduction(pipe_ctx->stream_res.opp, &params);
+ opp->funcs->opp_program_bit_depth_reduction(opp, &params);
if (pipe_ctx->stream_res.tg->funcs->set_test_pattern)
pipe_ctx->stream_res.tg->funcs->set_test_pattern(pipe_ctx->stream_res.tg,
controller_test_pattern, color_depth);
@@ -2650,11 +2697,9 @@ static void set_crtc_test_pattern(struct dc_link *link,
case DP_TEST_PATTERN_VIDEO_MODE:
{
/* restore bitdepth reduction */
- resource_build_bit_depth_reduction_params(pipe_ctx->stream,
- &params);
+ resource_build_bit_depth_reduction_params(pipe_ctx->stream, &params);
pipe_ctx->stream->bit_depth_params = params;
- pipe_ctx->stream_res.opp->funcs->
- opp_program_bit_depth_reduction(pipe_ctx->stream_res.opp, &params);
+ opp->funcs->opp_program_bit_depth_reduction(opp, &params);
if (pipe_ctx->stream_res.tg->funcs->set_test_pattern)
pipe_ctx->stream_res.tg->funcs->set_test_pattern(pipe_ctx->stream_res.tg,
CONTROLLER_DP_TEST_PATTERN_VIDEOMODE,
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index 349ab8017776..3830e6ce1355 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -31,6 +31,8 @@
#include "opp.h"
#include "timing_generator.h"
#include "transform.h"
+#include "dccg.h"
+#include "dchubbub.h"
#include "dpp.h"
#include "core_types.h"
#include "set_mode_types.h"
@@ -104,44 +106,43 @@ enum dce_version resource_parse_asic_id(struct hw_asic_id asic_id)
return dc_version;
}
-struct resource_pool *dc_create_resource_pool(
- struct dc *dc,
- int num_virtual_links,
- enum dce_version dc_version,
- struct hw_asic_id asic_id)
+struct resource_pool *dc_create_resource_pool(struct dc *dc,
+ const struct dc_init_data *init_data,
+ enum dce_version dc_version)
{
struct resource_pool *res_pool = NULL;
switch (dc_version) {
case DCE_VERSION_8_0:
res_pool = dce80_create_resource_pool(
- num_virtual_links, dc);
+ init_data->num_virtual_links, dc);
break;
case DCE_VERSION_8_1:
res_pool = dce81_create_resource_pool(
- num_virtual_links, dc);
+ init_data->num_virtual_links, dc);
break;
case DCE_VERSION_8_3:
res_pool = dce83_create_resource_pool(
- num_virtual_links, dc);
+ init_data->num_virtual_links, dc);
break;
case DCE_VERSION_10_0:
res_pool = dce100_create_resource_pool(
- num_virtual_links, dc);
+ init_data->num_virtual_links, dc);
break;
case DCE_VERSION_11_0:
res_pool = dce110_create_resource_pool(
- num_virtual_links, dc, asic_id);
+ init_data->num_virtual_links, dc,
+ init_data->asic_id);
break;
case DCE_VERSION_11_2:
case DCE_VERSION_11_22:
res_pool = dce112_create_resource_pool(
- num_virtual_links, dc);
+ init_data->num_virtual_links, dc);
break;
case DCE_VERSION_12_0:
case DCE_VERSION_12_1:
res_pool = dce120_create_resource_pool(
- num_virtual_links, dc);
+ init_data->num_virtual_links, dc);
break;
#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
@@ -149,8 +150,7 @@ struct resource_pool *dc_create_resource_pool(
#if defined(CONFIG_DRM_AMD_DC_DCN1_01)
case DCN_VERSION_1_01:
#endif
- res_pool = dcn10_create_resource_pool(
- num_virtual_links, dc);
+ res_pool = dcn10_create_resource_pool(init_data, dc);
break;
#endif
@@ -163,7 +163,28 @@ struct resource_pool *dc_create_resource_pool(
if (dc->ctx->dc_bios->funcs->get_firmware_info(
dc->ctx->dc_bios, &fw_info) == BP_RESULT_OK) {
- res_pool->ref_clock_inKhz = fw_info.pll_info.crystal_frequency;
+ res_pool->ref_clocks.xtalin_clock_inKhz = fw_info.pll_info.crystal_frequency;
+
+ if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
+ // On FPGA these dividers are currently not configured by GDB
+ res_pool->ref_clocks.dccg_ref_clock_inKhz = res_pool->ref_clocks.xtalin_clock_inKhz;
+ res_pool->ref_clocks.dchub_ref_clock_inKhz = res_pool->ref_clocks.xtalin_clock_inKhz;
+ } else if (res_pool->dccg && res_pool->hubbub) {
+ // If DCCG reference frequency cannot be determined (usually means not set to xtalin) then this is a critical error
+ // as this value must be known for DCHUB programming
+ (res_pool->dccg->funcs->get_dccg_ref_freq)(res_pool->dccg,
+ fw_info.pll_info.crystal_frequency,
+ &res_pool->ref_clocks.dccg_ref_clock_inKhz);
+
+ // Similarly, if DCHUB reference frequency cannot be determined, then it is also a critical error
+ (res_pool->hubbub->funcs->get_dchub_ref_freq)(res_pool->hubbub,
+ res_pool->ref_clocks.dccg_ref_clock_inKhz,
+ &res_pool->ref_clocks.dchub_ref_clock_inKhz);
+ } else {
+ // Not all ASICs have DCCG sw component
+ res_pool->ref_clocks.dccg_ref_clock_inKhz = res_pool->ref_clocks.xtalin_clock_inKhz;
+ res_pool->ref_clocks.dchub_ref_clock_inKhz = res_pool->ref_clocks.xtalin_clock_inKhz;
+ }
} else
ASSERT_CRITICAL(false);
}
@@ -260,6 +281,7 @@ bool resource_construct(
pool->stream_enc_count++;
}
}
+
dc->caps.dynamic_audio = false;
if (pool->audio_count < pool->stream_enc_count) {
dc->caps.dynamic_audio = true;
@@ -1014,24 +1036,60 @@ enum dc_status resource_build_scaling_params_for_context(
struct pipe_ctx *find_idle_secondary_pipe(
struct resource_context *res_ctx,
- const struct resource_pool *pool)
+ const struct resource_pool *pool,
+ const struct pipe_ctx *primary_pipe)
{
int i;
struct pipe_ctx *secondary_pipe = NULL;
/*
- * search backwards for the second pipe to keep pipe
- * assignment more consistent
+ * We add a preferred pipe mapping to avoid the chance that
+ * MPCCs already in use will need to be reassigned to other trees.
+ * For example, if we went with the strict, assign backwards logic:
+ *
+ * (State 1)
+ * Display A on, no surface, top pipe = 0
+ * Display B on, no surface, top pipe = 1
+ *
+ * (State 2)
+ * Display A on, no surface, top pipe = 0
+ * Display B on, surface enable, top pipe = 1, bottom pipe = 5
+ *
+ * (State 3)
+ * Display A on, surface enable, top pipe = 0, bottom pipe = 5
+ * Display B on, surface enable, top pipe = 1, bottom pipe = 4
+ *
+ * The state 2->3 transition requires remapping MPCC 5 from display B
+ * to display A.
+ *
+ * However, with the preferred pipe logic, state 2 would look like:
+ *
+ * (State 2)
+ * Display A on, no surface, top pipe = 0
+ * Display B on, surface enable, top pipe = 1, bottom pipe = 4
+ *
+ * This would then cause 2->3 to not require remapping any MPCCs.
*/
-
- for (i = pool->pipe_count - 1; i >= 0; i--) {
- if (res_ctx->pipe_ctx[i].stream == NULL) {
- secondary_pipe = &res_ctx->pipe_ctx[i];
- secondary_pipe->pipe_idx = i;
- break;
+ if (primary_pipe) {
+ int preferred_pipe_idx = (pool->pipe_count - 1) - primary_pipe->pipe_idx;
+ if (res_ctx->pipe_ctx[preferred_pipe_idx].stream == NULL) {
+ secondary_pipe = &res_ctx->pipe_ctx[preferred_pipe_idx];
+ secondary_pipe->pipe_idx = preferred_pipe_idx;
}
}
+ /*
+ * search backwards for the second pipe to keep pipe
+ * assignment more consistent
+ */
+ if (!secondary_pipe)
+ for (i = pool->pipe_count - 1; i >= 0; i--) {
+ if (res_ctx->pipe_ctx[i].stream == NULL) {
+ secondary_pipe = &res_ctx->pipe_ctx[i];
+ secondary_pipe->pipe_idx = i;
+ break;
+ }
+ }
return secondary_pipe;
}
@@ -1214,6 +1272,9 @@ bool dc_add_plane_to_context(
free_pipe->clock_source = tail_pipe->clock_source;
free_pipe->top_pipe = tail_pipe;
tail_pipe->bottom_pipe = free_pipe;
+ } else if (free_pipe->bottom_pipe && free_pipe->bottom_pipe->plane_state == NULL) {
+ ASSERT(free_pipe->bottom_pipe->stream_res.opp != free_pipe->stream_res.opp);
+ free_pipe->bottom_pipe->plane_state = plane_state;
}
/* assign new surfaces*/
@@ -1224,6 +1285,35 @@ bool dc_add_plane_to_context(
return true;
}
+struct pipe_ctx *dc_res_get_odm_bottom_pipe(struct pipe_ctx *pipe_ctx)
+{
+ struct pipe_ctx *bottom_pipe = pipe_ctx->bottom_pipe;
+
+ /* ODM should only be updated once per otg */
+ if (pipe_ctx->top_pipe)
+ return NULL;
+
+ while (bottom_pipe) {
+ if (bottom_pipe->stream_res.opp != pipe_ctx->stream_res.opp)
+ break;
+ bottom_pipe = bottom_pipe->bottom_pipe;
+ }
+
+ return bottom_pipe;
+}
+
+bool dc_res_is_odm_head_pipe(struct pipe_ctx *pipe_ctx)
+{
+ struct pipe_ctx *top_pipe = pipe_ctx->top_pipe;
+
+ if (!top_pipe)
+ return false;
+ if (top_pipe && top_pipe->stream_res.opp == pipe_ctx->stream_res.opp)
+ return false;
+
+ return true;
+}
+
bool dc_remove_plane_from_context(
const struct dc *dc,
struct dc_stream_state *stream,
@@ -1247,10 +1337,14 @@ bool dc_remove_plane_from_context(
/* release pipe for plane*/
for (i = pool->pipe_count - 1; i >= 0; i--) {
- struct pipe_ctx *pipe_ctx;
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
- if (context->res_ctx.pipe_ctx[i].plane_state == plane_state) {
- pipe_ctx = &context->res_ctx.pipe_ctx[i];
+ if (pipe_ctx->plane_state == plane_state) {
+ if (dc_res_is_odm_head_pipe(pipe_ctx)) {
+ pipe_ctx->plane_state = NULL;
+ pipe_ctx->bottom_pipe = NULL;
+ continue;
+ }
if (pipe_ctx->top_pipe)
pipe_ctx->top_pipe->bottom_pipe = pipe_ctx->bottom_pipe;
@@ -1268,8 +1362,9 @@ bool dc_remove_plane_from_context(
*/
if (!pipe_ctx->top_pipe) {
pipe_ctx->plane_state = NULL;
- pipe_ctx->bottom_pipe = NULL;
- } else {
+ if (!dc_res_get_odm_bottom_pipe(pipe_ctx))
+ pipe_ctx->bottom_pipe = NULL;
+ } else {
memset(pipe_ctx, 0, sizeof(*pipe_ctx));
}
}
@@ -1674,6 +1769,9 @@ enum dc_status dc_remove_stream_from_ctx(
for (i = 0; i < MAX_PIPES; i++) {
if (new_ctx->res_ctx.pipe_ctx[i].stream == stream &&
!new_ctx->res_ctx.pipe_ctx[i].top_pipe) {
+ struct pipe_ctx *odm_pipe =
+ dc_res_get_odm_bottom_pipe(&new_ctx->res_ctx.pipe_ctx[i]);
+
del_pipe = &new_ctx->res_ctx.pipe_ctx[i];
ASSERT(del_pipe->stream_res.stream_enc);
@@ -1698,6 +1796,8 @@ enum dc_status dc_remove_stream_from_ctx(
dc->res_pool->funcs->remove_stream_from_ctx(dc, new_ctx, stream);
memset(del_pipe, 0, sizeof(*del_pipe));
+ if (odm_pipe)
+ memset(odm_pipe, 0, sizeof(*odm_pipe));
break;
}
@@ -1855,6 +1955,7 @@ enum dc_status resource_map_pool_resources(
struct dc_context *dc_ctx = dc->ctx;
struct pipe_ctx *pipe_ctx = NULL;
int pipe_idx = -1;
+ struct dc_bios *dcb = dc->ctx->dc_bios;
/* TODO Check if this is needed */
/*if (!resource_is_stream_unchanged(old_context, stream)) {
@@ -1869,6 +1970,13 @@ enum dc_status resource_map_pool_resources(
calculate_phy_pix_clks(stream);
+ /* TODO: Check Linux */
+ if (dc->config.allow_seamless_boot_optimization &&
+ !dcb->funcs->is_accelerated_mode(dcb)) {
+ if (dc_validate_seamless_boot_timing(dc, stream->sink, &stream->timing))
+ stream->apply_seamless_boot_optimization = true;
+ }
+
if (stream->apply_seamless_boot_optimization)
pipe_idx = acquire_resource_from_hw_enabled_state(
&context->res_ctx,
@@ -1951,7 +2059,7 @@ void dc_resource_state_construct(
const struct dc *dc,
struct dc_state *dst_ctx)
{
- dst_ctx->dccg = dc->res_pool->clk_mgr;
+ dst_ctx->clk_mgr = dc->res_pool->clk_mgr;
}
/**
@@ -2315,6 +2423,21 @@ static void set_spd_info_packet(
*info_packet = stream->vrr_infopacket;
}
+static void set_dp_sdp_info_packet(
+ struct dc_info_packet *info_packet,
+ struct dc_stream_state *stream)
+{
+ /* SPD info packet for custom sdp message */
+
+ /* Return if false. If true,
+ * set the corresponding bit in the info packet
+ */
+ if (!stream->dpsdp_infopacket.valid)
+ return;
+
+ *info_packet = stream->dpsdp_infopacket;
+}
+
static void set_hdr_static_info_packet(
struct dc_info_packet *info_packet,
struct dc_stream_state *stream)
@@ -2411,6 +2534,7 @@ void resource_build_info_frame(struct pipe_ctx *pipe_ctx)
info->spd.valid = false;
info->hdrsmd.valid = false;
info->vsc.valid = false;
+ info->dpsdp.valid = false;
signal = pipe_ctx->stream->signal;
@@ -2430,6 +2554,8 @@ void resource_build_info_frame(struct pipe_ctx *pipe_ctx)
set_spd_info_packet(&info->spd, pipe_ctx->stream);
set_hdr_static_info_packet(&info->hdrsmd, pipe_ctx->stream);
+
+ set_dp_sdp_info_packet(&info->dpsdp, pipe_ctx->stream);
}
patch_gamut_packet_checksum(&info->gamut);
@@ -2657,10 +2783,11 @@ enum dc_status dc_validate_stream(struct dc *dc, struct dc_stream_state *stream)
if (!tg->funcs->validate_timing(tg, &stream->timing))
res = DC_FAIL_CONTROLLER_VALIDATE;
- if (res == DC_OK)
+ if (res == DC_OK) {
if (!link->link_enc->funcs->validate_output_with_stream(
link->link_enc, stream))
res = DC_FAIL_ENC_VALIDATE;
+ }
/* TODO: validate audio ASIC caps, encoder */
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
index 996298c35f42..f7a293902234 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
@@ -29,6 +29,9 @@
#include "resource.h"
#include "ipp.h"
#include "timing_generator.h"
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#include "dcn10/dcn10_hw_sequencer.h"
+#endif
#define DC_LOGGER dc->ctx->logger
@@ -196,6 +199,34 @@ struct dc_stream_status *dc_stream_get_status(
return dc_stream_get_status_from_state(dc->current_state, stream);
}
+static void delay_cursor_until_vupdate(struct pipe_ctx *pipe_ctx, struct dc *dc)
+{
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+ unsigned int vupdate_line;
+ unsigned int lines_to_vupdate, us_to_vupdate, vpos, nvpos;
+ struct dc_stream_state *stream = pipe_ctx->stream;
+ unsigned int us_per_line;
+
+ if (stream->ctx->asic_id.chip_family == FAMILY_RV &&
+ ASIC_REV_IS_RAVEN(stream->ctx->asic_id.hw_internal_rev)) {
+
+ vupdate_line = get_vupdate_offset_from_vsync(pipe_ctx);
+ dc_stream_get_crtc_position(dc, &stream, 1, &vpos, &nvpos);
+
+ if (vpos >= vupdate_line)
+ return;
+
+ us_per_line = stream->timing.h_total * 10000 / stream->timing.pix_clk_100hz;
+ lines_to_vupdate = vupdate_line - vpos;
+ us_to_vupdate = lines_to_vupdate * us_per_line;
+
+ /* 70 us is a conservative estimate of cursor update time*/
+ if (us_to_vupdate < 70)
+ udelay(us_to_vupdate);
+ }
+#endif
+}
+
/**
* dc_stream_set_cursor_attributes() - Update cursor attributes and set cursor surface address
*/
@@ -234,6 +265,8 @@ bool dc_stream_set_cursor_attributes(
if (!pipe_to_program) {
pipe_to_program = pipe_ctx;
+
+ delay_cursor_until_vupdate(pipe_ctx, core_dc);
core_dc->hwss.pipe_control_lock(core_dc, pipe_to_program, true);
}
@@ -283,6 +316,8 @@ bool dc_stream_set_cursor_position(
if (!pipe_to_program) {
pipe_to_program = pipe_ctx;
+
+ delay_cursor_until_vupdate(pipe_ctx, core_dc);
core_dc->hwss.pipe_control_lock(core_dc, pipe_to_program, true);
}
@@ -314,6 +349,68 @@ uint32_t dc_stream_get_vblank_counter(const struct dc_stream_state *stream)
return 0;
}
+static void build_dp_sdp_info_frame(struct pipe_ctx *pipe_ctx,
+ const uint8_t *custom_sdp_message,
+ unsigned int sdp_message_size)
+{
+ uint8_t i;
+ struct encoder_info_frame *info = &pipe_ctx->stream_res.encoder_info_frame;
+
+ /* set valid info */
+ info->dpsdp.valid = true;
+
+ /* set sdp message header */
+ info->dpsdp.hb0 = custom_sdp_message[0]; /* package id */
+ info->dpsdp.hb1 = custom_sdp_message[1]; /* package type */
+ info->dpsdp.hb2 = custom_sdp_message[2]; /* package specific byte 0 any data */
+ info->dpsdp.hb3 = custom_sdp_message[3]; /* package specific byte 0 any data */
+
+ /* set sdp message data */
+ for (i = 0; i < 32; i++)
+ info->dpsdp.sb[i] = (custom_sdp_message[i+4]);
+
+}
+
+static void invalid_dp_sdp_info_frame(struct pipe_ctx *pipe_ctx)
+{
+ struct encoder_info_frame *info = &pipe_ctx->stream_res.encoder_info_frame;
+
+ /* in-valid info */
+ info->dpsdp.valid = false;
+}
+
+bool dc_stream_send_dp_sdp(const struct dc_stream_state *stream,
+ const uint8_t *custom_sdp_message,
+ unsigned int sdp_message_size)
+{
+ int i;
+ struct dc *core_dc;
+ struct resource_context *res_ctx;
+
+ if (stream == NULL) {
+ dm_error("DC: dc_stream is NULL!\n");
+ return false;
+ }
+
+ core_dc = stream->ctx->dc;
+ res_ctx = &core_dc->current_state->res_ctx;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i];
+
+ if (pipe_ctx->stream != stream)
+ continue;
+
+ build_dp_sdp_info_frame(pipe_ctx, custom_sdp_message, sdp_message_size);
+
+ core_dc->hwss.update_info_frame(pipe_ctx);
+
+ invalid_dp_sdp_info_frame(pipe_ctx);
+ }
+
+ return true;
+}
+
bool dc_stream_get_scanoutpos(const struct dc_stream_state *stream,
uint32_t *v_blank_start,
uint32_t *v_blank_end,
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
index ee6bd50f60b8..a5e86f9b148f 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
@@ -119,6 +119,19 @@ const struct dc_plane_status *dc_plane_get_status(
if (core_dc->current_state == NULL)
return NULL;
+ /* Find the current plane state and set its pending bit to false */
+ for (i = 0; i < core_dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx =
+ &core_dc->current_state->res_ctx.pipe_ctx[i];
+
+ if (pipe_ctx->plane_state != plane_state)
+ continue;
+
+ pipe_ctx->plane_state->status.is_flip_pending = false;
+
+ break;
+ }
+
for (i = 0; i < core_dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe_ctx =
&core_dc->current_state->res_ctx.pipe_ctx[i];
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index 1a7fd6aa77eb..dc6a2c236ba7 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -39,9 +39,10 @@
#include "inc/hw/dmcu.h"
#include "dml/display_mode_lib.h"
-#define DC_VER "3.2.17"
+#define DC_VER "3.2.25"
#define MAX_SURFACES 3
+#define MAX_PLANES 6
#define MAX_STREAMS 6
#define MAX_SINKS_PER_LINK 4
@@ -53,6 +54,22 @@ struct dc_versions {
struct dmcu_version dmcu_version;
};
+enum dc_plane_type {
+ DC_PLANE_TYPE_INVALID,
+ DC_PLANE_TYPE_DCE_RGB,
+ DC_PLANE_TYPE_DCE_UNDERLAY,
+ DC_PLANE_TYPE_DCN_UNIVERSAL,
+};
+
+struct dc_plane_cap {
+ enum dc_plane_type type;
+ uint32_t blends_with_above : 1;
+ uint32_t blends_with_below : 1;
+ uint32_t per_pixel_alpha : 1;
+ uint32_t supports_argb8888 : 1;
+ uint32_t supports_nv12 : 1;
+};
+
struct dc_caps {
uint32_t max_streams;
uint32_t max_links;
@@ -73,6 +90,7 @@ struct dc_caps {
bool force_dp_tps4_for_cp2520;
bool disable_dp_clk_share;
bool psp_setup_panel_mode;
+ struct dc_plane_cap planes[MAX_PLANES];
};
struct dc_dcc_surface_param {
@@ -164,6 +182,8 @@ struct dc_config {
bool gpu_vm_support;
bool disable_disp_pll_sharing;
bool fbc_support;
+ bool optimize_edp_link_rate;
+ bool allow_seamless_boot_optimization;
};
enum visual_confirm {
@@ -203,6 +223,7 @@ struct dc_clocks {
int fclk_khz;
int phyclk_khz;
int dramclk_khz;
+ bool p_state_change_support;
};
struct dc_debug_options {
@@ -257,6 +278,7 @@ struct dc_debug_options {
bool skip_detection_link_training;
unsigned int force_odm_combine; //bit vector based on otg inst
unsigned int force_fclk_khz;
+ bool disable_tri_buf;
};
struct dc_debug_data {
@@ -265,6 +287,14 @@ struct dc_debug_data {
uint32_t auxErrorCount;
};
+struct dc_bounding_box_overrides {
+ int sr_exit_time_ns;
+ int sr_enter_plus_exit_time_ns;
+ int urgent_latency_ns;
+ int percent_of_ideal_drambw;
+ int dram_clock_change_latency_ns;
+};
+
struct dc_state;
struct resource_pool;
struct dce_hwseq;
@@ -274,6 +304,7 @@ struct dc {
struct dc_cap_funcs cap_funcs;
struct dc_config config;
struct dc_debug_options debug;
+ struct dc_bounding_box_overrides bb_overrides;
struct dc_context *ctx;
uint8_t link_count;
@@ -298,8 +329,12 @@ struct dc {
struct hw_sequencer_funcs hwss;
struct dce_hwseq *hwseq;
+ /* Require to optimize clocks and bandwidth for added/removed planes */
bool optimized_required;
+ /* Require to maintain clocks and bandwidth for UEFI enabled HW */
+ bool optimize_seamless_boot;
+
/* FBC compressor */
struct compressor *fbc_compressor;
@@ -327,6 +362,7 @@ struct dc_init_data {
struct hw_asic_id asic_id;
void *driver; /* ctx */
struct cgs_device *cgs_device;
+ struct dc_bounding_box_overrides bb_overrides;
int num_virtual_links;
/*
@@ -594,7 +630,7 @@ struct dc_validation_set {
uint8_t plane_count;
};
-bool dc_validate_seamless_boot_timing(struct dc *dc,
+bool dc_validate_seamless_boot_timing(const struct dc *dc,
const struct dc_sink *sink,
struct dc_crtc_timing *crtc_timing);
@@ -633,7 +669,8 @@ void dc_resource_state_destruct(struct dc_state *context);
bool dc_commit_state(struct dc *dc, struct dc_state *context);
-struct dc_state *dc_create_state(void);
+struct dc_state *dc_create_state(struct dc *dc);
+struct dc_state *dc_copy_state(struct dc_state *src_ctx);
void dc_retain_state(struct dc_state *context);
void dc_release_state(struct dc_state *context);
@@ -645,9 +682,16 @@ struct dpcd_caps {
union dpcd_rev dpcd_rev;
union max_lane_count max_ln_count;
union max_down_spread max_down_spread;
+ union dprx_feature dprx_feature;
+
+ /* valid only for eDP v1.4 or higher*/
+ uint8_t edp_supported_link_rates_count;
+ enum dc_link_rate edp_supported_link_rates[8];
/* dongle type (DP converter, CV smart dongle) */
enum display_dongle_type dongle_type;
+ /* branch device or sink device */
+ bool is_branch_dev;
/* Dongle's downstream count. */
union sink_count sink_count;
/* If dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER,
@@ -663,11 +707,11 @@ struct dpcd_caps {
int8_t branch_dev_name[6];
int8_t branch_hw_revision;
int8_t branch_fw_revision[2];
- uint8_t link_rate_set;
bool allow_invalid_MSA_timing_param;
bool panel_mode_edp;
bool dpcd_display_control_capable;
+ bool ext_receiver_cap_field_present;
};
#include "dc_link.h"
diff --git a/drivers/gpu/drm/amd/display/dc/dc_ddc_types.h b/drivers/gpu/drm/amd/display/dc/dc_ddc_types.h
index 05c8c31d8b31..4ef97f65e55d 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_ddc_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_ddc_types.h
@@ -68,6 +68,8 @@ enum aux_transaction_reply {
AUX_TRANSACTION_REPLY_AUX_ACK = 0x00,
AUX_TRANSACTION_REPLY_AUX_NACK = 0x01,
AUX_TRANSACTION_REPLY_AUX_DEFER = 0x02,
+ AUX_TRANSACTION_REPLY_I2C_OVER_AUX_NACK = 0x04,
+ AUX_TRANSACTION_REPLY_I2C_OVER_AUX_DEFER = 0x08,
AUX_TRANSACTION_REPLY_I2C_ACK = 0x00,
AUX_TRANSACTION_REPLY_I2C_NACK = 0x10,
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
index d4eab33c453b..11c68a399267 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
@@ -94,6 +94,8 @@ struct dc_link_settings {
enum dc_lane_count lane_count;
enum dc_link_rate link_rate;
enum dc_link_spread link_spread;
+ bool use_link_rate_set;
+ uint8_t link_rate_set;
};
struct dc_lane_settings {
@@ -420,10 +422,24 @@ union edp_configuration_cap {
uint8_t raw;
};
+union dprx_feature {
+ struct {
+ uint8_t GTC_CAP:1; // bit 0: DP 1.3+
+ uint8_t SST_SPLIT_SDP_CAP:1; // bit 1: DP 1.4
+ uint8_t AV_SYNC_CAP:1; // bit 2: DP 1.3+
+ uint8_t VSC_SDP_COLORIMETRY_SUPPORTED:1; // bit 3: DP 1.3+
+ uint8_t VSC_EXT_VESA_SDP_SUPPORTED:1; // bit 4: DP 1.4
+ uint8_t VSC_EXT_VESA_SDP_CHAINING_SUPPORTED:1; // bit 5: DP 1.4
+ uint8_t VSC_EXT_CEA_SDP_SUPPORTED:1; // bit 6: DP 1.4
+ uint8_t VSC_EXT_CEA_SDP_CHAINING_SUPPORTED:1; // bit 7: DP 1.4
+ } bits;
+ uint8_t raw;
+};
+
union training_aux_rd_interval {
struct {
uint8_t TRAINIG_AUX_RD_INTERVAL:7;
- uint8_t EXT_RECIEVER_CAP_FIELD_PRESENT:1;
+ uint8_t EXT_RECEIVER_CAP_FIELD_PRESENT:1;
} bits;
uint8_t raw;
};
diff --git a/drivers/gpu/drm/amd/display/dc/dc_helper.c b/drivers/gpu/drm/amd/display/dc/dc_helper.c
index 597d38393379..5e6c5eff49cf 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_helper.c
@@ -51,20 +51,16 @@ static inline void set_reg_field_value_masks(
field_value_mask->mask = field_value_mask->mask | mask;
}
-uint32_t generic_reg_update_ex(const struct dc_context *ctx,
- uint32_t addr, uint32_t reg_val, int n,
+static void set_reg_field_values(struct dc_reg_value_masks *field_value_mask,
+ uint32_t addr, int n,
uint8_t shift1, uint32_t mask1, uint32_t field_value1,
- ...)
+ va_list ap)
{
- struct dc_reg_value_masks field_value_mask = {0};
uint32_t shift, mask, field_value;
int i = 1;
- va_list ap;
- va_start(ap, field_value1);
-
/* gather all bits value/mask getting updated in this register */
- set_reg_field_value_masks(&field_value_mask,
+ set_reg_field_value_masks(field_value_mask,
field_value1, mask1, shift1);
while (i < n) {
@@ -72,10 +68,48 @@ uint32_t generic_reg_update_ex(const struct dc_context *ctx,
mask = va_arg(ap, uint32_t);
field_value = va_arg(ap, uint32_t);
- set_reg_field_value_masks(&field_value_mask,
+ set_reg_field_value_masks(field_value_mask,
field_value, mask, shift);
i++;
}
+}
+
+uint32_t generic_reg_update_ex(const struct dc_context *ctx,
+ uint32_t addr, int n,
+ uint8_t shift1, uint32_t mask1, uint32_t field_value1,
+ ...)
+{
+ struct dc_reg_value_masks field_value_mask = {0};
+ uint32_t reg_val;
+ va_list ap;
+
+ va_start(ap, field_value1);
+
+ set_reg_field_values(&field_value_mask, addr, n, shift1, mask1,
+ field_value1, ap);
+
+ va_end(ap);
+
+ /* mmio write directly */
+ reg_val = dm_read_reg(ctx, addr);
+ reg_val = (reg_val & ~field_value_mask.mask) | field_value_mask.value;
+ dm_write_reg(ctx, addr, reg_val);
+ return reg_val;
+}
+
+uint32_t generic_reg_set_ex(const struct dc_context *ctx,
+ uint32_t addr, uint32_t reg_val, int n,
+ uint8_t shift1, uint32_t mask1, uint32_t field_value1,
+ ...)
+{
+ struct dc_reg_value_masks field_value_mask = {0};
+ va_list ap;
+
+ va_start(ap, field_value1);
+
+ set_reg_field_values(&field_value_mask, addr, n, shift1, mask1,
+ field_value1, ap);
+
va_end(ap);
@@ -85,6 +119,24 @@ uint32_t generic_reg_update_ex(const struct dc_context *ctx,
return reg_val;
}
+uint32_t dm_read_reg_func(
+ const struct dc_context *ctx,
+ uint32_t address,
+ const char *func_name)
+{
+ uint32_t value;
+#ifdef DM_CHECK_ADDR_0
+ if (address == 0) {
+ DC_ERR("invalid register read; address = 0\n");
+ return 0;
+ }
+#endif
+ value = cgs_read_register(ctx->cgs_device, address);
+ trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
+
+ return value;
+}
+
uint32_t generic_reg_get(const struct dc_context *ctx, uint32_t addr,
uint8_t shift, uint32_t mask, uint32_t *field_value)
{
@@ -235,7 +287,7 @@ uint32_t generic_reg_get(const struct dc_context *ctx,
}
*/
-uint32_t generic_reg_wait(const struct dc_context *ctx,
+void generic_reg_wait(const struct dc_context *ctx,
uint32_t addr, uint32_t shift, uint32_t mask, uint32_t condition_value,
unsigned int delay_between_poll_us, unsigned int time_out_num_tries,
const char *func_name, int line)
@@ -265,7 +317,7 @@ uint32_t generic_reg_wait(const struct dc_context *ctx,
DC_LOG_DC("REG_WAIT taking a while: %dms in %s line:%d\n",
delay_between_poll_us * i / 1000,
func_name, line);
- return reg_val;
+ return;
}
}
@@ -275,8 +327,6 @@ uint32_t generic_reg_wait(const struct dc_context *ctx,
if (!IS_FPGA_MAXIMUS_DC(ctx->dce_environment))
BREAK_TO_DEBUGGER();
-
- return reg_val;
}
void generic_write_indirect_reg(const struct dc_context *ctx,
diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h
index 8fc223defed4..4e26d6e93b31 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_link.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_link.h
@@ -246,10 +246,18 @@ void dc_link_set_test_pattern(struct dc_link *link,
const struct link_training_settings *p_link_settings,
const unsigned char *p_custom_pattern,
unsigned int cust_pattern_size);
+uint32_t dc_link_bandwidth_kbps(
+ const struct dc_link *link,
+ const struct dc_link_settings *link_setting);
+
+const struct dc_link_settings *dc_link_get_verified_link_cap(
+ const struct dc_link *link);
bool dc_submit_i2c(
struct dc *dc,
uint32_t link_index,
struct i2c_command *cmd);
+uint32_t dc_bandwidth_in_kbps_from_timing(
+ const struct dc_crtc_timing *timing);
#endif /* DC_LINK_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h
index 5657cb3a2ad3..17fa3bf6cf7b 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_stream.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h
@@ -80,6 +80,7 @@ struct dc_stream_state {
struct dc_info_packet vrr_infopacket;
struct dc_info_packet vsc_infopacket;
struct dc_info_packet vsp_infopacket;
+ struct dc_info_packet dpsdp_infopacket;
struct rect src; /* composition area */
struct rect dst; /* stream addressable area */
@@ -221,6 +222,13 @@ struct dc_stream_state *dc_get_stream_at_index(struct dc *dc, uint8_t i);
*/
uint32_t dc_stream_get_vblank_counter(const struct dc_stream_state *stream);
+/*
+ * Send dp sdp message.
+ */
+bool dc_stream_send_dp_sdp(const struct dc_stream_state *stream,
+ const uint8_t *custom_sdp_message,
+ unsigned int sdp_message_size);
+
/* TODO: Return parsed values rather than direct register read
* This has a dependency on the caller (amdgpu_display_get_crtc_scanoutpos)
* being refactored properly to be dce-specific
diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h
index da2009a108cf..6c2a3d9a4c2e 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_types.h
@@ -103,7 +103,7 @@ struct dc_context {
};
-#define DC_MAX_EDID_BUFFER_SIZE 512
+#define DC_MAX_EDID_BUFFER_SIZE 1024
#define EDID_BLOCK_SIZE 128
#define MAX_SURFACE_NUM 4
#define NUM_PIXEL_FORMATS 10
@@ -395,7 +395,7 @@ struct dc_dongle_caps {
bool is_dp_hdmi_ycbcr422_converter;
bool is_dp_hdmi_ycbcr420_converter;
uint32_t dp_hdmi_max_bpc;
- uint32_t dp_hdmi_max_pixel_clk;
+ uint32_t dp_hdmi_max_pixel_clk_in_khz;
};
/* Scaling format */
enum scaling_transformation {
@@ -550,9 +550,9 @@ struct psr_config {
unsigned char psr_version;
unsigned int psr_rfb_setup_time;
bool psr_exit_link_training_required;
-
bool psr_frame_capture_indication_req;
unsigned int psr_sdp_transmit_line_num_deadline;
+ bool allow_smu_optimizations;
};
union dmcu_psr_level {
@@ -654,6 +654,7 @@ struct psr_context {
* continuing powerd own
*/
unsigned int frame_delay;
+ bool allow_smu_optimizations;
};
struct colorspace_transform {
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
index 4febf4ef7240..937b5cffd7ef 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
@@ -171,24 +171,24 @@ static void submit_channel_request(
(request->action == I2CAUX_TRANSACTION_ACTION_I2C_WRITE_MOT)));
if (REG(AUXN_IMPCAL)) {
/* clear_aux_error */
- REG_UPDATE_SEQ(AUXN_IMPCAL, AUXN_CALOUT_ERROR_AK,
- 1,
- 0);
+ REG_UPDATE_SEQ_2(AUXN_IMPCAL,
+ AUXN_CALOUT_ERROR_AK, 1,
+ AUXN_CALOUT_ERROR_AK, 0);
- REG_UPDATE_SEQ(AUXP_IMPCAL, AUXP_CALOUT_ERROR_AK,
- 1,
- 0);
+ REG_UPDATE_SEQ_2(AUXP_IMPCAL,
+ AUXP_CALOUT_ERROR_AK, 1,
+ AUXP_CALOUT_ERROR_AK, 0);
/* force_default_calibrate */
- REG_UPDATE_1BY1_2(AUXN_IMPCAL,
+ REG_UPDATE_SEQ_2(AUXN_IMPCAL,
AUXN_IMPCAL_ENABLE, 1,
AUXN_IMPCAL_OVERRIDE_ENABLE, 0);
/* bug? why AUXN update EN and OVERRIDE_EN 1 by 1 while AUX P toggles OVERRIDE? */
- REG_UPDATE_SEQ(AUXP_IMPCAL, AUXP_IMPCAL_OVERRIDE_ENABLE,
- 1,
- 0);
+ REG_UPDATE_SEQ_2(AUXP_IMPCAL,
+ AUXP_IMPCAL_OVERRIDE_ENABLE, 1,
+ AUXP_IMPCAL_OVERRIDE_ENABLE, 0);
}
/* set the delay and the number of bytes to write */
@@ -267,7 +267,7 @@ static int read_channel_reply(struct dce_aux *engine, uint32_t size,
if (!bytes_replied)
return -1;
- REG_UPDATE_1BY1_3(AUX_SW_DATA,
+ REG_UPDATE_SEQ_3(AUX_SW_DATA,
AUX_SW_INDEX, 0,
AUX_SW_AUTOINCREMENT_DISABLE, 1,
AUX_SW_DATA_RW, 1);
@@ -317,9 +317,10 @@ static enum aux_channel_operation_result get_channel_status(
*returned_bytes = 0;
/* poll to make sure that SW_DONE is asserted */
- value = REG_WAIT(AUX_SW_STATUS, AUX_SW_DONE, 1,
+ REG_WAIT(AUX_SW_STATUS, AUX_SW_DONE, 1,
10, aux110->timeout_period/10);
+ value = REG_READ(AUX_SW_STATUS);
/* in case HPD is LOW, exit AUX transaction */
if ((value & AUX_SW_STATUS__AUX_SW_HPD_DISCON_MASK))
return AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON;
@@ -374,7 +375,6 @@ static bool acquire(
struct dce_aux *engine,
struct ddc *ddc)
{
-
enum gpio_result result;
if (!is_engine_available(engine))
@@ -439,12 +439,12 @@ static enum i2caux_transaction_action i2caux_action_from_payload(struct aux_payl
return I2CAUX_TRANSACTION_ACTION_DP_READ;
}
-int dce_aux_transfer(struct ddc_service *ddc,
- struct aux_payload *payload)
+int dce_aux_transfer_raw(struct ddc_service *ddc,
+ struct aux_payload *payload,
+ enum aux_channel_operation_result *operation_result)
{
struct ddc *ddc_pin = ddc->ddc_pin;
struct dce_aux *aux_engine;
- enum aux_channel_operation_result operation_result;
struct aux_request_transaction_data aux_req;
struct aux_reply_transaction_data aux_rep;
uint8_t returned_bytes = 0;
@@ -455,7 +455,8 @@ int dce_aux_transfer(struct ddc_service *ddc,
memset(&aux_rep, 0, sizeof(aux_rep));
aux_engine = ddc->ctx->dc->res_pool->engines[ddc_pin->pin_data->en];
- acquire(aux_engine, ddc_pin);
+ if (!acquire(aux_engine, ddc_pin))
+ return -1;
if (payload->i2c_over_aux)
aux_req.type = AUX_TRANSACTION_TYPE_I2C;
@@ -470,28 +471,26 @@ int dce_aux_transfer(struct ddc_service *ddc,
aux_req.data = payload->data;
submit_channel_request(aux_engine, &aux_req);
- operation_result = get_channel_status(aux_engine, &returned_bytes);
-
- switch (operation_result) {
- case AUX_CHANNEL_OPERATION_SUCCEEDED:
- res = read_channel_reply(aux_engine, payload->length,
- payload->data, payload->reply,
- &status);
- break;
- case AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON:
- res = 0;
- break;
- case AUX_CHANNEL_OPERATION_FAILED_REASON_UNKNOWN:
- case AUX_CHANNEL_OPERATION_FAILED_INVALID_REPLY:
- case AUX_CHANNEL_OPERATION_FAILED_TIMEOUT:
+ *operation_result = get_channel_status(aux_engine, &returned_bytes);
+
+ if (*operation_result == AUX_CHANNEL_OPERATION_SUCCEEDED) {
+ read_channel_reply(aux_engine, payload->length,
+ payload->data, payload->reply,
+ &status);
+ res = returned_bytes;
+ } else {
res = -1;
- break;
}
+
release_engine(aux_engine);
return res;
}
-#define AUX_RETRY_MAX 7
+#define AUX_MAX_RETRIES 7
+#define AUX_MAX_DEFER_RETRIES 7
+#define AUX_MAX_I2C_DEFER_RETRIES 7
+#define AUX_MAX_INVALID_REPLY_RETRIES 2
+#define AUX_MAX_TIMEOUT_RETRIES 3
bool dce_aux_transfer_with_retries(struct ddc_service *ddc,
struct aux_payload *payload)
@@ -499,24 +498,85 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc,
int i, ret = 0;
uint8_t reply;
bool payload_reply = true;
+ enum aux_channel_operation_result operation_result;
+ int aux_ack_retries = 0,
+ aux_defer_retries = 0,
+ aux_i2c_defer_retries = 0,
+ aux_timeout_retries = 0,
+ aux_invalid_reply_retries = 0;
if (!payload->reply) {
payload_reply = false;
payload->reply = &reply;
}
- for (i = 0; i < AUX_RETRY_MAX; i++) {
- ret = dce_aux_transfer(ddc, payload);
-
- if (ret >= 0) {
- if (*payload->reply == 0) {
- if (!payload_reply)
- payload->reply = NULL;
- return true;
+ for (i = 0; i < AUX_MAX_RETRIES; i++) {
+ ret = dce_aux_transfer_raw(ddc, payload, &operation_result);
+ switch (operation_result) {
+ case AUX_CHANNEL_OPERATION_SUCCEEDED:
+ aux_timeout_retries = 0;
+ aux_invalid_reply_retries = 0;
+
+ switch (*payload->reply) {
+ case AUX_TRANSACTION_REPLY_AUX_ACK:
+ if (!payload->write && payload->length != ret) {
+ if (++aux_ack_retries >= AUX_MAX_RETRIES)
+ goto fail;
+ else
+ udelay(300);
+ } else
+ return true;
+ break;
+
+ case AUX_TRANSACTION_REPLY_AUX_DEFER:
+ case AUX_TRANSACTION_REPLY_I2C_OVER_AUX_NACK:
+ case AUX_TRANSACTION_REPLY_I2C_OVER_AUX_DEFER:
+ if (++aux_defer_retries >= AUX_MAX_DEFER_RETRIES)
+ goto fail;
+ break;
+
+ case AUX_TRANSACTION_REPLY_I2C_DEFER:
+ aux_defer_retries = 0;
+ if (++aux_i2c_defer_retries >= AUX_MAX_I2C_DEFER_RETRIES)
+ goto fail;
+ break;
+
+ case AUX_TRANSACTION_REPLY_AUX_NACK:
+ case AUX_TRANSACTION_REPLY_HPD_DISCON:
+ default:
+ goto fail;
}
- }
+ break;
+
+ case AUX_CHANNEL_OPERATION_FAILED_INVALID_REPLY:
+ if (++aux_invalid_reply_retries >= AUX_MAX_INVALID_REPLY_RETRIES)
+ goto fail;
+ else
+ udelay(400);
+ break;
+
+ case AUX_CHANNEL_OPERATION_FAILED_TIMEOUT:
+ if (++aux_timeout_retries >= AUX_MAX_TIMEOUT_RETRIES)
+ goto fail;
+ else {
+ /*
+ * DP 1.4, 2.8.2: AUX Transaction Response/Reply Timeouts
+ * According to the DP spec there should be 3 retries total
+ * with a 400us wait inbetween each. Hardware already waits
+ * for 550us therefore no wait is required here.
+ */
+ }
+ break;
- udelay(1000);
+ case AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON:
+ case AUX_CHANNEL_OPERATION_FAILED_REASON_UNKNOWN:
+ default:
+ goto fail;
+ }
}
+
+fail:
+ if (!payload_reply)
+ payload->reply = NULL;
return false;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h
index d27f22c05e4b..aab5f0c34584 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h
@@ -123,8 +123,9 @@ bool dce110_aux_engine_acquire(
struct dce_aux *aux_engine,
struct ddc *ddc);
-int dce_aux_transfer(struct ddc_service *ddc,
- struct aux_payload *cmd);
+int dce_aux_transfer_raw(struct ddc_service *ddc,
+ struct aux_payload *cmd,
+ enum aux_channel_operation_result *operation_result);
bool dce_aux_transfer_with_retries(struct ddc_service *ddc,
struct aux_payload *cmd);
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c
index 6e142c2db986..963686380738 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c
@@ -222,7 +222,7 @@ static enum dm_pp_clocks_state dce_get_required_clocks_state(
* all required clocks
*/
for (i = clk_mgr_dce->max_clks_state; i >= DM_PP_CLOCKS_STATE_ULTRA_LOW; i--)
- if (context->bw.dce.dispclk_khz >
+ if (context->bw_ctx.bw.dce.dispclk_khz >
clk_mgr_dce->max_clks_by_state[i].display_clk_khz
|| max_pix_clk >
clk_mgr_dce->max_clks_by_state[i].pixel_clk_khz)
@@ -232,7 +232,7 @@ static enum dm_pp_clocks_state dce_get_required_clocks_state(
if (low_req_clk > clk_mgr_dce->max_clks_state) {
/* set max clock state for high phyclock, invalid on exceeding display clock */
if (clk_mgr_dce->max_clks_by_state[clk_mgr_dce->max_clks_state].display_clk_khz
- < context->bw.dce.dispclk_khz)
+ < context->bw_ctx.bw.dce.dispclk_khz)
low_req_clk = DM_PP_CLOCKS_STATE_INVALID;
else
low_req_clk = clk_mgr_dce->max_clks_state;
@@ -610,22 +610,22 @@ static void dce11_pplib_apply_display_requirements(
struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg;
pp_display_cfg->all_displays_in_sync =
- context->bw.dce.all_displays_in_sync;
+ context->bw_ctx.bw.dce.all_displays_in_sync;
pp_display_cfg->nb_pstate_switch_disable =
- context->bw.dce.nbp_state_change_enable == false;
+ context->bw_ctx.bw.dce.nbp_state_change_enable == false;
pp_display_cfg->cpu_cc6_disable =
- context->bw.dce.cpuc_state_change_enable == false;
+ context->bw_ctx.bw.dce.cpuc_state_change_enable == false;
pp_display_cfg->cpu_pstate_disable =
- context->bw.dce.cpup_state_change_enable == false;
+ context->bw_ctx.bw.dce.cpup_state_change_enable == false;
pp_display_cfg->cpu_pstate_separation_time =
- context->bw.dce.blackout_recovery_time_us;
+ context->bw_ctx.bw.dce.blackout_recovery_time_us;
- pp_display_cfg->min_memory_clock_khz = context->bw.dce.yclk_khz
+ pp_display_cfg->min_memory_clock_khz = context->bw_ctx.bw.dce.yclk_khz
/ MEMORY_TYPE_MULTIPLIER_CZ;
pp_display_cfg->min_engine_clock_khz = determine_sclk_from_bounding_box(
dc,
- context->bw.dce.sclk_khz);
+ context->bw_ctx.bw.dce.sclk_khz);
/*
* As workaround for >4x4K lightup set dcfclock to min_engine_clock value.
@@ -638,7 +638,7 @@ static void dce11_pplib_apply_display_requirements(
pp_display_cfg->min_engine_clock_khz : 0;
pp_display_cfg->min_engine_clock_deep_sleep_khz
- = context->bw.dce.sclk_deep_sleep_khz;
+ = context->bw_ctx.bw.dce.sclk_deep_sleep_khz;
pp_display_cfg->avail_mclk_switch_time_us =
dce110_get_min_vblank_time_us(context);
@@ -669,7 +669,7 @@ static void dce_update_clocks(struct clk_mgr *clk_mgr,
{
struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr);
struct dm_pp_power_level_change_request level_change_req;
- int patched_disp_clk = context->bw.dce.dispclk_khz;
+ int patched_disp_clk = context->bw_ctx.bw.dce.dispclk_khz;
/*TODO: W/A for dal3 linux, investigate why this works */
if (!clk_mgr_dce->dfs_bypass_active)
@@ -696,7 +696,7 @@ static void dce11_update_clocks(struct clk_mgr *clk_mgr,
{
struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr);
struct dm_pp_power_level_change_request level_change_req;
- int patched_disp_clk = context->bw.dce.dispclk_khz;
+ int patched_disp_clk = context->bw_ctx.bw.dce.dispclk_khz;
/*TODO: W/A for dal3 linux, investigate why this works */
if (!clk_mgr_dce->dfs_bypass_active)
@@ -711,7 +711,7 @@ static void dce11_update_clocks(struct clk_mgr *clk_mgr,
}
if (should_set_clock(safe_to_lower, patched_disp_clk, clk_mgr->clks.dispclk_khz)) {
- context->bw.dce.dispclk_khz = dce_set_clock(clk_mgr, patched_disp_clk);
+ context->bw_ctx.bw.dce.dispclk_khz = dce_set_clock(clk_mgr, patched_disp_clk);
clk_mgr->clks.dispclk_khz = patched_disp_clk;
}
dce11_pplib_apply_display_requirements(clk_mgr->ctx->dc, context);
@@ -723,7 +723,7 @@ static void dce112_update_clocks(struct clk_mgr *clk_mgr,
{
struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr);
struct dm_pp_power_level_change_request level_change_req;
- int patched_disp_clk = context->bw.dce.dispclk_khz;
+ int patched_disp_clk = context->bw_ctx.bw.dce.dispclk_khz;
/*TODO: W/A for dal3 linux, investigate why this works */
if (!clk_mgr_dce->dfs_bypass_active)
@@ -751,7 +751,7 @@ static void dce12_update_clocks(struct clk_mgr *clk_mgr,
struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr);
struct dm_pp_clock_for_voltage_req clock_voltage_req = {0};
int max_pix_clk = get_max_pixel_clock_for_all_paths(context);
- int patched_disp_clk = context->bw.dce.dispclk_khz;
+ int patched_disp_clk = context->bw_ctx.bw.dce.dispclk_khz;
/*TODO: W/A for dal3 linux, investigate why this works */
if (!clk_mgr_dce->dfs_bypass_active)
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
index 71d5777de961..f70437aae8e0 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
@@ -978,7 +978,7 @@ static bool dce110_clock_source_power_down(
}
static bool get_pixel_clk_frequency_100hz(
- struct clock_source *clock_source,
+ const struct clock_source *clock_source,
unsigned int inst,
unsigned int *pixel_clk_khz)
{
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
index c2926cf19dee..aa586672e8cd 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
@@ -213,9 +213,6 @@ static bool dce_dmcu_setup_psr(struct dmcu *dmcu,
link->link_enc->funcs->psr_program_secondary_packet(link->link_enc,
psr_context->sdpTransmitLineNumDeadline);
- if (psr_context->psr_level.bits.SKIP_SMU_NOTIFICATION)
- REG_UPDATE(SMU_INTERRUPT_CONTROL, DC_SMU_INT_ENABLE, 1);
-
/* waitDMCUReadyForCmd */
REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0,
dmcu_wait_reg_ready_interval,
@@ -594,7 +591,7 @@ static bool dcn10_dmcu_setup_psr(struct dmcu *dmcu,
link->link_enc->funcs->psr_program_secondary_packet(link->link_enc,
psr_context->sdpTransmitLineNumDeadline);
- if (psr_context->psr_level.bits.SKIP_SMU_NOTIFICATION)
+ if (psr_context->allow_smu_optimizations)
REG_UPDATE(SMU_INTERRUPT_CONTROL, DC_SMU_INT_ENABLE, 1);
/* waitDMCUReadyForCmd */
@@ -615,6 +612,7 @@ static bool dcn10_dmcu_setup_psr(struct dmcu *dmcu,
psr_context->psrFrameCaptureIndicationReq;
masterCmdData1.bits.aux_chan = psr_context->channel;
masterCmdData1.bits.aux_repeat = psr_context->aux_repeats;
+ masterCmdData1.bits.allow_smu_optimizations = psr_context->allow_smu_optimizations;
dm_write_reg(dmcu->ctx, REG(MASTER_COMM_DATA_REG1),
masterCmdData1.u32All);
@@ -635,6 +633,7 @@ static bool dcn10_dmcu_setup_psr(struct dmcu *dmcu,
dm_write_reg(dmcu->ctx, REG(MASTER_COMM_DATA_REG3),
masterCmdData3.u32All);
+
/* setDMCUParam_Cmd */
REG_UPDATE(MASTER_COMM_CMD_REG,
MASTER_COMM_CMD_REG_BYTE0, PSR_SET);
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h
index c24c0e5ea44e..60ce56f60ae3 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h
@@ -199,16 +199,16 @@ struct dce_dmcu {
******************************************************************/
union dce_dmcu_psr_config_data_reg1 {
struct {
- unsigned int timehyst_frames:8; /*[7:0]*/
- unsigned int hyst_lines:7; /*[14:8]*/
- unsigned int rfb_update_auto_en:1; /*[15:15]*/
- unsigned int dp_port_num:3; /*[18:16]*/
- unsigned int dcp_sel:3; /*[21:19]*/
- unsigned int phy_type:1; /*[22:22]*/
- unsigned int frame_cap_ind:1; /*[23:23]*/
- unsigned int aux_chan:3; /*[26:24]*/
- unsigned int aux_repeat:4; /*[30:27]*/
- unsigned int reserved:1; /*[31:31]*/
+ unsigned int timehyst_frames:8; /*[7:0]*/
+ unsigned int hyst_lines:7; /*[14:8]*/
+ unsigned int rfb_update_auto_en:1; /*[15:15]*/
+ unsigned int dp_port_num:3; /*[18:16]*/
+ unsigned int dcp_sel:3; /*[21:19]*/
+ unsigned int phy_type:1; /*[22:22]*/
+ unsigned int frame_cap_ind:1; /*[23:23]*/
+ unsigned int aux_chan:3; /*[26:24]*/
+ unsigned int aux_repeat:4; /*[30:27]*/
+ unsigned int allow_smu_optimizations:1; /*[31:31]*/
} bits;
unsigned int u32All;
};
@@ -236,7 +236,7 @@ union dce_dmcu_psr_config_data_reg3 {
struct {
unsigned int psr_level:16; /*[15:0]*/
unsigned int link_rate:4; /*[19:16]*/
- unsigned int reserved:12; /*[31:20]*/
+ unsigned int reserved:12; /*[31:20]*/
} bits;
unsigned int u32All;
};
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c
index 40f2d6e0b122..cd26161bcc4d 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c
@@ -346,6 +346,16 @@ static void release_engine(
}
+static bool is_engine_available(struct dce_i2c_hw *dce_i2c_hw)
+{
+ unsigned int arbitrate;
+
+ REG_GET(DC_I2C_ARBITRATION, DC_I2C_REG_RW_CNTL_STATUS, &arbitrate);
+ if (arbitrate == DC_I2C_REG_RW_CNTL_STATUS_DMCU_ONLY)
+ return false;
+ return true;
+}
+
struct dce_i2c_hw *acquire_i2c_hw_engine(
struct resource_pool *pool,
struct ddc *ddc)
@@ -368,7 +378,7 @@ struct dce_i2c_hw *acquire_i2c_hw_engine(
if (!dce_i2c_hw)
return NULL;
- if (pool->i2c_hw_buffer_in_use)
+ if (pool->i2c_hw_buffer_in_use || !is_engine_available(dce_i2c_hw))
return NULL;
do {
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.h b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.h
index 7f19bb439665..575500755b2e 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.h
@@ -29,7 +29,8 @@
enum dc_i2c_status {
DC_I2C_STATUS__DC_I2C_STATUS_IDLE,
DC_I2C_STATUS__DC_I2C_STATUS_USED_BY_SW,
- DC_I2C_STATUS__DC_I2C_STATUS_USED_BY_HW
+ DC_I2C_STATUS__DC_I2C_STATUS_USED_BY_HW,
+ DC_I2C_REG_RW_CNTL_STATUS_DMCU_ONLY = 2,
};
enum dc_i2c_arbitration {
@@ -129,7 +130,8 @@ enum {
I2C_SF(DC_I2C_DATA, DC_I2C_DATA, mask_sh),\
I2C_SF(DC_I2C_DATA, DC_I2C_INDEX, mask_sh),\
I2C_SF(DC_I2C_DATA, DC_I2C_INDEX_WRITE, mask_sh),\
- I2C_SF(MICROSECOND_TIME_BASE_DIV, XTAL_REF_DIV, mask_sh)
+ I2C_SF(MICROSECOND_TIME_BASE_DIV, XTAL_REF_DIV, mask_sh),\
+ I2C_SF(DC_I2C_ARBITRATION, DC_I2C_REG_RW_CNTL_STATUS, mask_sh)
#define I2C_COMMON_MASK_SH_LIST_DCE110(mask_sh)\
I2C_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(mask_sh),\
@@ -170,6 +172,7 @@ struct dce_i2c_shift {
uint8_t DC_I2C_INDEX;
uint8_t DC_I2C_INDEX_WRITE;
uint8_t XTAL_REF_DIV;
+ uint8_t DC_I2C_REG_RW_CNTL_STATUS;
};
struct dce_i2c_mask {
@@ -207,6 +210,7 @@ struct dce_i2c_mask {
uint32_t DC_I2C_INDEX;
uint32_t DC_I2C_INDEX_WRITE;
uint32_t XTAL_REF_DIV;
+ uint32_t DC_I2C_REG_RW_CNTL_STATUS;
};
struct dce_i2c_registers {
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
index 1fa2d4fd7a35..14309fe6f2e6 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
@@ -272,7 +272,8 @@ static void dce110_update_hdmi_info_packet(
static void dce110_stream_encoder_dp_set_stream_attribute(
struct stream_encoder *enc,
struct dc_crtc_timing *crtc_timing,
- enum dc_color_space output_color_space)
+ enum dc_color_space output_color_space,
+ uint32_t enable_sdp_splitting)
{
#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
uint32_t h_active_start;
@@ -977,7 +978,7 @@ static void dce110_stream_encoder_dp_unblank(
uint64_t m_vid_l = n_vid;
- m_vid_l *= param->pixel_clk_khz;
+ m_vid_l *= param->timing.pix_clk_100hz / 10;
m_vid_l = div_u64(m_vid_l,
param->link_settings.link_rate
* LINK_RATE_REF_FREQ_IN_KHZ);
diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
index 23044e6723e8..767d37c6d942 100644
--- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
@@ -378,6 +378,11 @@ static const struct resource_caps res_cap = {
.num_ddc = 6,
};
+static const struct dc_plane_cap plane_cap = {
+ .type = DC_PLANE_TYPE_DCE_RGB,
+ .supports_argb8888 = true,
+};
+
#define CTX ctx
#define REG(reg) mm ## reg
@@ -768,11 +773,11 @@ bool dce100_validate_bandwidth(
if (at_least_one_pipe) {
/* TODO implement when needed but for now hardcode max value*/
- context->bw.dce.dispclk_khz = 681000;
- context->bw.dce.yclk_khz = 250000 * MEMORY_TYPE_MULTIPLIER_CZ;
+ context->bw_ctx.bw.dce.dispclk_khz = 681000;
+ context->bw_ctx.bw.dce.yclk_khz = 250000 * MEMORY_TYPE_MULTIPLIER_CZ;
} else {
- context->bw.dce.dispclk_khz = 0;
- context->bw.dce.yclk_khz = 0;
+ context->bw_ctx.bw.dce.dispclk_khz = 0;
+ context->bw_ctx.bw.dce.yclk_khz = 0;
}
return true;
@@ -1023,6 +1028,9 @@ static bool construct(
dc->caps.max_planes = pool->base.pipe_count;
+ for (i = 0; i < dc->caps.max_planes; ++i)
+ dc->caps.planes[i] = plane_cap;
+
if (!resource_construct(num_virtual_links, dc, &pool->base,
&res_create_funcs))
goto res_create_fail;
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index 5e4db3712eef..7ac50ab1b762 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -616,7 +616,7 @@ dce110_set_output_transfer_func(struct pipe_ctx *pipe_ctx,
void dce110_update_info_frame(struct pipe_ctx *pipe_ctx)
{
- bool is_hdmi;
+ bool is_hdmi_tmds;
bool is_dp;
ASSERT(pipe_ctx->stream);
@@ -624,13 +624,13 @@ void dce110_update_info_frame(struct pipe_ctx *pipe_ctx)
if (pipe_ctx->stream_res.stream_enc == NULL)
return; /* this is not root pipe */
- is_hdmi = dc_is_hdmi_signal(pipe_ctx->stream->signal);
+ is_hdmi_tmds = dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal);
is_dp = dc_is_dp_signal(pipe_ctx->stream->signal);
- if (!is_hdmi && !is_dp)
+ if (!is_hdmi_tmds && !is_dp)
return;
- if (is_hdmi)
+ if (is_hdmi_tmds)
pipe_ctx->stream_res.stream_enc->funcs->update_hdmi_info_packets(
pipe_ctx->stream_res.stream_enc,
&pipe_ctx->stream_res.encoder_info_frame);
@@ -935,13 +935,31 @@ void hwss_edp_backlight_control(
edp_receiver_ready_T9(link);
}
+// Static helper function which calls the correct function
+// based on pp_smu version
+static void set_pme_wa_enable_by_version(struct dc *dc)
+{
+ struct pp_smu_funcs *pp_smu = NULL;
+
+ if (dc->res_pool->pp_smu)
+ pp_smu = dc->res_pool->pp_smu;
+
+ if (pp_smu) {
+ if (pp_smu->ctx.ver == PP_SMU_VER_RV && pp_smu->rv_funcs.set_pme_wa_enable)
+ pp_smu->rv_funcs.set_pme_wa_enable(&(pp_smu->ctx));
+ }
+}
+
void dce110_enable_audio_stream(struct pipe_ctx *pipe_ctx)
{
- struct dc *core_dc = pipe_ctx->stream->ctx->dc;
/* notify audio driver for audio modes of monitor */
- struct pp_smu_funcs_rv *pp_smu = core_dc->res_pool->pp_smu;
+ struct dc *core_dc = pipe_ctx->stream->ctx->dc;
+ struct pp_smu_funcs *pp_smu = NULL;
unsigned int i, num_audio = 1;
+ if (core_dc->res_pool->pp_smu)
+ pp_smu = core_dc->res_pool->pp_smu;
+
if (pipe_ctx->stream_res.audio) {
for (i = 0; i < MAX_PIPES; i++) {
/*current_state not updated yet*/
@@ -951,30 +969,31 @@ void dce110_enable_audio_stream(struct pipe_ctx *pipe_ctx)
pipe_ctx->stream_res.audio->funcs->az_enable(pipe_ctx->stream_res.audio);
- if (num_audio >= 1 && pp_smu != NULL && pp_smu->set_pme_wa_enable != NULL)
+ if (num_audio >= 1 && pp_smu != NULL)
/*this is the first audio. apply the PME w/a in order to wake AZ from D3*/
- pp_smu->set_pme_wa_enable(&pp_smu->pp_smu);
+ set_pme_wa_enable_by_version(core_dc);
/* un-mute audio */
/* TODO: audio should be per stream rather than per link */
pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control(
- pipe_ctx->stream_res.stream_enc, false);
+ pipe_ctx->stream_res.stream_enc, false);
}
}
void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx, int option)
{
struct dc *dc = pipe_ctx->stream->ctx->dc;
+ struct pp_smu_funcs *pp_smu = NULL;
pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control(
pipe_ctx->stream_res.stream_enc, true);
if (pipe_ctx->stream_res.audio) {
- struct pp_smu_funcs_rv *pp_smu = dc->res_pool->pp_smu;
+ if (dc->res_pool->pp_smu)
+ pp_smu = dc->res_pool->pp_smu;
if (option != KEEP_ACQUIRED_RESOURCE ||
- !dc->debug.az_endpoint_mute_only) {
+ !dc->debug.az_endpoint_mute_only)
/*only disalbe az_endpoint if power down or free*/
pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio);
- }
if (dc_is_dp_signal(pipe_ctx->stream->signal))
pipe_ctx->stream_res.stream_enc->funcs->dp_audio_disable(
@@ -989,9 +1008,9 @@ void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx, int option)
update_audio_usage(&dc->current_state->res_ctx, dc->res_pool, pipe_ctx->stream_res.audio, false);
pipe_ctx->stream_res.audio = NULL;
}
- if (pp_smu != NULL && pp_smu->set_pme_wa_enable != NULL)
+ if (pp_smu != NULL)
/*this is the first audio. apply the PME w/a in order to wake AZ from D3*/
- pp_smu->set_pme_wa_enable(&pp_smu->pp_smu);
+ set_pme_wa_enable_by_version(dc);
/* TODO: notify audio driver for if audio modes list changed
* add audio mode list change flag */
@@ -1007,7 +1026,7 @@ void dce110_disable_stream(struct pipe_ctx *pipe_ctx, int option)
struct dc_link *link = stream->link;
struct dc *dc = pipe_ctx->stream->ctx->dc;
- if (dc_is_hdmi_signal(pipe_ctx->stream->signal))
+ if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal))
pipe_ctx->stream_res.stream_enc->funcs->stop_hdmi_info_packets(
pipe_ctx->stream_res.stream_enc);
@@ -1032,7 +1051,7 @@ void dce110_unblank_stream(struct pipe_ctx *pipe_ctx,
struct dc_link *link = stream->link;
/* only 3 items below are used by unblank */
- params.pixel_clk_khz = pipe_ctx->stream->timing.pix_clk_100hz / 10;
+ params.timing = pipe_ctx->stream->timing;
params.link_settings.link_rate = link_settings->link_rate;
if (dc_is_dp_signal(pipe_ctx->stream->signal))
@@ -1147,8 +1166,8 @@ static void build_audio_output(
if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT ||
pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
audio_output->pll_info.dp_dto_source_clock_in_khz =
- state->dccg->funcs->get_dp_ref_clk_frequency(
- state->dccg);
+ state->clk_mgr->funcs->get_dp_ref_clk_frequency(
+ state->clk_mgr);
}
audio_output->pll_info.feed_back_divider =
@@ -1349,7 +1368,7 @@ static enum dc_status apply_single_controller_ctx_to_hw(
pipe_ctx->stream_res.tg->funcs->set_static_screen_control(
pipe_ctx->stream_res.tg, event_triggers);
- if (pipe_ctx->stream->signal != SIGNAL_TYPE_VIRTUAL)
+ if (!dc_is_virtual_signal(pipe_ctx->stream->signal))
pipe_ctx->stream_res.stream_enc->funcs->dig_connect_to_otg(
pipe_ctx->stream_res.stream_enc,
pipe_ctx->stream_res.tg->inst);
@@ -1358,7 +1377,7 @@ static enum dc_status apply_single_controller_ctx_to_hw(
pipe_ctx->stream_res.opp,
COLOR_SPACE_YCBCR601,
stream->timing.display_color_depth,
- pipe_ctx->stream->signal);
+ stream->signal);
pipe_ctx->stream_res.opp->funcs->opp_program_fmt(
pipe_ctx->stream_res.opp,
@@ -1532,6 +1551,9 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context)
}
}
+ if (dc->hwss.init_pipes)
+ dc->hwss.init_pipes(dc, context);
+
if (edp_link) {
/* this seems to cause blank screens on DCE8 */
if ((dc->ctx->dce_version == DCE_VERSION_8_0) ||
@@ -1608,18 +1630,18 @@ static void dce110_set_displaymarks(
dc->bw_vbios->blackout_duration, pipe_ctx->stream);
pipe_ctx->plane_res.mi->funcs->mem_input_program_display_marks(
pipe_ctx->plane_res.mi,
- context->bw.dce.nbp_state_change_wm_ns[num_pipes],
- context->bw.dce.stutter_exit_wm_ns[num_pipes],
- context->bw.dce.stutter_entry_wm_ns[num_pipes],
- context->bw.dce.urgent_wm_ns[num_pipes],
+ context->bw_ctx.bw.dce.nbp_state_change_wm_ns[num_pipes],
+ context->bw_ctx.bw.dce.stutter_exit_wm_ns[num_pipes],
+ context->bw_ctx.bw.dce.stutter_entry_wm_ns[num_pipes],
+ context->bw_ctx.bw.dce.urgent_wm_ns[num_pipes],
total_dest_line_time_ns);
if (i == underlay_idx) {
num_pipes++;
pipe_ctx->plane_res.mi->funcs->mem_input_program_chroma_display_marks(
pipe_ctx->plane_res.mi,
- context->bw.dce.nbp_state_change_wm_ns[num_pipes],
- context->bw.dce.stutter_exit_wm_ns[num_pipes],
- context->bw.dce.urgent_wm_ns[num_pipes],
+ context->bw_ctx.bw.dce.nbp_state_change_wm_ns[num_pipes],
+ context->bw_ctx.bw.dce.stutter_exit_wm_ns[num_pipes],
+ context->bw_ctx.bw.dce.urgent_wm_ns[num_pipes],
total_dest_line_time_ns);
}
num_pipes++;
@@ -2612,7 +2634,7 @@ void dce110_set_cursor_position(struct pipe_ctx *pipe_ctx)
struct mem_input *mi = pipe_ctx->plane_res.mi;
struct dc_cursor_mi_param param = {
.pixel_clk_khz = pipe_ctx->stream->timing.pix_clk_100hz / 10,
- .ref_clk_khz = pipe_ctx->stream->ctx->dc->res_pool->ref_clock_inKhz,
+ .ref_clk_khz = pipe_ctx->stream->ctx->dc->res_pool->ref_clocks.xtalin_clock_inKhz,
.viewport = pipe_ctx->plane_res.scl_data.viewport,
.h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz,
.v_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.vert,
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
index 7549adaa1542..7c4914b2b524 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
@@ -392,6 +392,21 @@ static const struct resource_caps stoney_resource_cap = {
.num_ddc = 3,
};
+static const struct dc_plane_cap plane_cap = {
+ .type = DC_PLANE_TYPE_DCE_RGB,
+ .blends_with_below = true,
+ .blends_with_above = true,
+ .per_pixel_alpha = 1,
+ .supports_argb8888 = true,
+};
+
+static const struct dc_plane_cap underlay_plane_cap = {
+ .type = DC_PLANE_TYPE_DCE_UNDERLAY,
+ .blends_with_above = true,
+ .per_pixel_alpha = 1,
+ .supports_nv12 = true
+};
+
#define CTX ctx
#define REG(reg) mm ## reg
@@ -868,7 +883,7 @@ static bool dce110_validate_bandwidth(
dc->bw_vbios,
context->res_ctx.pipe_ctx,
dc->res_pool->pipe_count,
- &context->bw.dce))
+ &context->bw_ctx.bw.dce))
result = true;
if (!result)
@@ -878,8 +893,8 @@ static bool dce110_validate_bandwidth(
context->streams[0]->timing.v_addressable,
context->streams[0]->timing.pix_clk_100hz / 10);
- if (memcmp(&dc->current_state->bw.dce,
- &context->bw.dce, sizeof(context->bw.dce))) {
+ if (memcmp(&dc->current_state->bw_ctx.bw.dce,
+ &context->bw_ctx.bw.dce, sizeof(context->bw_ctx.bw.dce))) {
DC_LOG_BANDWIDTH_CALCS(
"%s: finish,\n"
@@ -893,34 +908,34 @@ static bool dce110_validate_bandwidth(
"sclk: %d sclk_sleep: %d yclk: %d blackout_recovery_time_us: %d\n"
,
__func__,
- context->bw.dce.nbp_state_change_wm_ns[0].b_mark,
- context->bw.dce.nbp_state_change_wm_ns[0].a_mark,
- context->bw.dce.urgent_wm_ns[0].b_mark,
- context->bw.dce.urgent_wm_ns[0].a_mark,
- context->bw.dce.stutter_exit_wm_ns[0].b_mark,
- context->bw.dce.stutter_exit_wm_ns[0].a_mark,
- context->bw.dce.nbp_state_change_wm_ns[1].b_mark,
- context->bw.dce.nbp_state_change_wm_ns[1].a_mark,
- context->bw.dce.urgent_wm_ns[1].b_mark,
- context->bw.dce.urgent_wm_ns[1].a_mark,
- context->bw.dce.stutter_exit_wm_ns[1].b_mark,
- context->bw.dce.stutter_exit_wm_ns[1].a_mark,
- context->bw.dce.nbp_state_change_wm_ns[2].b_mark,
- context->bw.dce.nbp_state_change_wm_ns[2].a_mark,
- context->bw.dce.urgent_wm_ns[2].b_mark,
- context->bw.dce.urgent_wm_ns[2].a_mark,
- context->bw.dce.stutter_exit_wm_ns[2].b_mark,
- context->bw.dce.stutter_exit_wm_ns[2].a_mark,
- context->bw.dce.stutter_mode_enable,
- context->bw.dce.cpuc_state_change_enable,
- context->bw.dce.cpup_state_change_enable,
- context->bw.dce.nbp_state_change_enable,
- context->bw.dce.all_displays_in_sync,
- context->bw.dce.dispclk_khz,
- context->bw.dce.sclk_khz,
- context->bw.dce.sclk_deep_sleep_khz,
- context->bw.dce.yclk_khz,
- context->bw.dce.blackout_recovery_time_us);
+ context->bw_ctx.bw.dce.nbp_state_change_wm_ns[0].b_mark,
+ context->bw_ctx.bw.dce.nbp_state_change_wm_ns[0].a_mark,
+ context->bw_ctx.bw.dce.urgent_wm_ns[0].b_mark,
+ context->bw_ctx.bw.dce.urgent_wm_ns[0].a_mark,
+ context->bw_ctx.bw.dce.stutter_exit_wm_ns[0].b_mark,
+ context->bw_ctx.bw.dce.stutter_exit_wm_ns[0].a_mark,
+ context->bw_ctx.bw.dce.nbp_state_change_wm_ns[1].b_mark,
+ context->bw_ctx.bw.dce.nbp_state_change_wm_ns[1].a_mark,
+ context->bw_ctx.bw.dce.urgent_wm_ns[1].b_mark,
+ context->bw_ctx.bw.dce.urgent_wm_ns[1].a_mark,
+ context->bw_ctx.bw.dce.stutter_exit_wm_ns[1].b_mark,
+ context->bw_ctx.bw.dce.stutter_exit_wm_ns[1].a_mark,
+ context->bw_ctx.bw.dce.nbp_state_change_wm_ns[2].b_mark,
+ context->bw_ctx.bw.dce.nbp_state_change_wm_ns[2].a_mark,
+ context->bw_ctx.bw.dce.urgent_wm_ns[2].b_mark,
+ context->bw_ctx.bw.dce.urgent_wm_ns[2].a_mark,
+ context->bw_ctx.bw.dce.stutter_exit_wm_ns[2].b_mark,
+ context->bw_ctx.bw.dce.stutter_exit_wm_ns[2].a_mark,
+ context->bw_ctx.bw.dce.stutter_mode_enable,
+ context->bw_ctx.bw.dce.cpuc_state_change_enable,
+ context->bw_ctx.bw.dce.cpup_state_change_enable,
+ context->bw_ctx.bw.dce.nbp_state_change_enable,
+ context->bw_ctx.bw.dce.all_displays_in_sync,
+ context->bw_ctx.bw.dce.dispclk_khz,
+ context->bw_ctx.bw.dce.sclk_khz,
+ context->bw_ctx.bw.dce.sclk_deep_sleep_khz,
+ context->bw_ctx.bw.dce.yclk_khz,
+ context->bw_ctx.bw.dce.blackout_recovery_time_us);
}
return result;
}
@@ -1371,6 +1386,11 @@ static bool construct(
dc->caps.max_planes = pool->base.pipe_count;
+ for (i = 0; i < pool->base.underlay_pipe_index; ++i)
+ dc->caps.planes[i] = plane_cap;
+
+ dc->caps.planes[pool->base.underlay_pipe_index] = underlay_plane_cap;
+
bw_calcs_init(dc->bw_dceip, dc->bw_vbios, dc->ctx->asic_id);
bw_calcs_data_update_from_pplib(dc);
diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
index ea3065d63372..2f28a74383f5 100644
--- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
@@ -397,6 +397,11 @@ static const struct resource_caps polaris_11_resource_cap = {
.num_ddc = 5,
};
+static const struct dc_plane_cap plane_cap = {
+ .type = DC_PLANE_TYPE_DCE_RGB,
+ .supports_argb8888 = true,
+};
+
#define CTX ctx
#define REG(reg) mm ## reg
@@ -818,7 +823,7 @@ bool dce112_validate_bandwidth(
dc->bw_vbios,
context->res_ctx.pipe_ctx,
dc->res_pool->pipe_count,
- &context->bw.dce))
+ &context->bw_ctx.bw.dce))
result = true;
if (!result)
@@ -826,8 +831,8 @@ bool dce112_validate_bandwidth(
"%s: Bandwidth validation failed!",
__func__);
- if (memcmp(&dc->current_state->bw.dce,
- &context->bw.dce, sizeof(context->bw.dce))) {
+ if (memcmp(&dc->current_state->bw_ctx.bw.dce,
+ &context->bw_ctx.bw.dce, sizeof(context->bw_ctx.bw.dce))) {
DC_LOG_BANDWIDTH_CALCS(
"%s: finish,\n"
@@ -841,34 +846,34 @@ bool dce112_validate_bandwidth(
"sclk: %d sclk_sleep: %d yclk: %d blackout_recovery_time_us: %d\n"
,
__func__,
- context->bw.dce.nbp_state_change_wm_ns[0].b_mark,
- context->bw.dce.nbp_state_change_wm_ns[0].a_mark,
- context->bw.dce.urgent_wm_ns[0].b_mark,
- context->bw.dce.urgent_wm_ns[0].a_mark,
- context->bw.dce.stutter_exit_wm_ns[0].b_mark,
- context->bw.dce.stutter_exit_wm_ns[0].a_mark,
- context->bw.dce.nbp_state_change_wm_ns[1].b_mark,
- context->bw.dce.nbp_state_change_wm_ns[1].a_mark,
- context->bw.dce.urgent_wm_ns[1].b_mark,
- context->bw.dce.urgent_wm_ns[1].a_mark,
- context->bw.dce.stutter_exit_wm_ns[1].b_mark,
- context->bw.dce.stutter_exit_wm_ns[1].a_mark,
- context->bw.dce.nbp_state_change_wm_ns[2].b_mark,
- context->bw.dce.nbp_state_change_wm_ns[2].a_mark,
- context->bw.dce.urgent_wm_ns[2].b_mark,
- context->bw.dce.urgent_wm_ns[2].a_mark,
- context->bw.dce.stutter_exit_wm_ns[2].b_mark,
- context->bw.dce.stutter_exit_wm_ns[2].a_mark,
- context->bw.dce.stutter_mode_enable,
- context->bw.dce.cpuc_state_change_enable,
- context->bw.dce.cpup_state_change_enable,
- context->bw.dce.nbp_state_change_enable,
- context->bw.dce.all_displays_in_sync,
- context->bw.dce.dispclk_khz,
- context->bw.dce.sclk_khz,
- context->bw.dce.sclk_deep_sleep_khz,
- context->bw.dce.yclk_khz,
- context->bw.dce.blackout_recovery_time_us);
+ context->bw_ctx.bw.dce.nbp_state_change_wm_ns[0].b_mark,
+ context->bw_ctx.bw.dce.nbp_state_change_wm_ns[0].a_mark,
+ context->bw_ctx.bw.dce.urgent_wm_ns[0].b_mark,
+ context->bw_ctx.bw.dce.urgent_wm_ns[0].a_mark,
+ context->bw_ctx.bw.dce.stutter_exit_wm_ns[0].b_mark,
+ context->bw_ctx.bw.dce.stutter_exit_wm_ns[0].a_mark,
+ context->bw_ctx.bw.dce.nbp_state_change_wm_ns[1].b_mark,
+ context->bw_ctx.bw.dce.nbp_state_change_wm_ns[1].a_mark,
+ context->bw_ctx.bw.dce.urgent_wm_ns[1].b_mark,
+ context->bw_ctx.bw.dce.urgent_wm_ns[1].a_mark,
+ context->bw_ctx.bw.dce.stutter_exit_wm_ns[1].b_mark,
+ context->bw_ctx.bw.dce.stutter_exit_wm_ns[1].a_mark,
+ context->bw_ctx.bw.dce.nbp_state_change_wm_ns[2].b_mark,
+ context->bw_ctx.bw.dce.nbp_state_change_wm_ns[2].a_mark,
+ context->bw_ctx.bw.dce.urgent_wm_ns[2].b_mark,
+ context->bw_ctx.bw.dce.urgent_wm_ns[2].a_mark,
+ context->bw_ctx.bw.dce.stutter_exit_wm_ns[2].b_mark,
+ context->bw_ctx.bw.dce.stutter_exit_wm_ns[2].a_mark,
+ context->bw_ctx.bw.dce.stutter_mode_enable,
+ context->bw_ctx.bw.dce.cpuc_state_change_enable,
+ context->bw_ctx.bw.dce.cpup_state_change_enable,
+ context->bw_ctx.bw.dce.nbp_state_change_enable,
+ context->bw_ctx.bw.dce.all_displays_in_sync,
+ context->bw_ctx.bw.dce.dispclk_khz,
+ context->bw_ctx.bw.dce.sclk_khz,
+ context->bw_ctx.bw.dce.sclk_deep_sleep_khz,
+ context->bw_ctx.bw.dce.yclk_khz,
+ context->bw_ctx.bw.dce.blackout_recovery_time_us);
}
return result;
}
@@ -887,7 +892,7 @@ enum dc_status resource_map_phy_clock_resources(
return DC_ERROR_UNEXPECTED;
if (dc_is_dp_signal(pipe_ctx->stream->signal)
- || pipe_ctx->stream->signal == SIGNAL_TYPE_VIRTUAL)
+ || dc_is_virtual_signal(pipe_ctx->stream->signal))
pipe_ctx->clock_source =
dc->res_pool->dp_clock_source;
else
@@ -1310,6 +1315,9 @@ static bool construct(
dc->caps.max_planes = pool->base.pipe_count;
+ for (i = 0; i < dc->caps.max_planes; ++i)
+ dc->caps.planes[i] = plane_cap;
+
/* Create hardware sequencer */
dce112_hw_sequencer_construct(dc);
diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
index 312a0aebf91f..01ea503faa12 100644
--- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
@@ -454,6 +454,11 @@ static const struct resource_caps res_cap = {
.num_ddc = 6,
};
+static const struct dc_plane_cap plane_cap = {
+ .type = DC_PLANE_TYPE_DCE_RGB,
+ .supports_argb8888 = true,
+};
+
static const struct dc_debug_options debug_defaults = {
.disable_clock_gate = true,
};
@@ -1171,6 +1176,9 @@ static bool construct(
dc->caps.max_planes = pool->base.pipe_count;
+ for (i = 0; i < dc->caps.max_planes; ++i)
+ dc->caps.planes[i] = plane_cap;
+
bw_calcs_init(dc->bw_dceip, dc->bw_vbios, dc->ctx->asic_id);
bw_calcs_data_update_from_pplib(dc);
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
index c109ace96be9..c7899ec96287 100644
--- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
@@ -387,6 +387,11 @@ static const struct resource_caps res_cap_83 = {
.num_ddc = 2,
};
+static const struct dc_plane_cap plane_cap = {
+ .type = DC_PLANE_TYPE_DCE_RGB,
+ .supports_argb8888 = true,
+};
+
static const struct dce_dmcu_registers dmcu_regs = {
DMCU_DCE80_REG_LIST()
};
@@ -802,11 +807,11 @@ bool dce80_validate_bandwidth(
if (at_least_one_pipe) {
/* TODO implement when needed but for now hardcode max value*/
- context->bw.dce.dispclk_khz = 681000;
- context->bw.dce.yclk_khz = 250000 * MEMORY_TYPE_MULTIPLIER_CZ;
+ context->bw_ctx.bw.dce.dispclk_khz = 681000;
+ context->bw_ctx.bw.dce.yclk_khz = 250000 * MEMORY_TYPE_MULTIPLIER_CZ;
} else {
- context->bw.dce.dispclk_khz = 0;
- context->bw.dce.yclk_khz = 0;
+ context->bw_ctx.bw.dce.dispclk_khz = 0;
+ context->bw_ctx.bw.dce.yclk_khz = 0;
}
return true;
@@ -1032,6 +1037,10 @@ static bool dce80_construct(
}
dc->caps.max_planes = pool->base.pipe_count;
+
+ for (i = 0; i < dc->caps.max_planes; ++i)
+ dc->caps.planes[i] = plane_cap;
+
dc->caps.disable_dp_clk_share = true;
if (!resource_construct(num_virtual_links, dc, &pool->base,
@@ -1237,6 +1246,10 @@ static bool dce81_construct(
}
dc->caps.max_planes = pool->base.pipe_count;
+
+ for (i = 0; i < dc->caps.max_planes; ++i)
+ dc->caps.planes[i] = plane_cap;
+
dc->caps.disable_dp_clk_share = true;
if (!resource_construct(num_virtual_links, dc, &pool->base,
@@ -1438,6 +1451,10 @@ static bool dce83_construct(
}
dc->caps.max_planes = pool->base.pipe_count;
+
+ for (i = 0; i < dc->caps.max_planes; ++i)
+ dc->caps.planes[i] = plane_cap;
+
dc->caps.disable_dp_clk_share = true;
if (!resource_construct(num_virtual_links, dc, &pool->base,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.c
index afe8c42211cd..0d9bee8d5ab9 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.c
@@ -43,23 +43,6 @@
#define DC_LOGGER \
clk_mgr->ctx->logger
-void dcn1_pplib_apply_display_requirements(
- struct dc *dc,
- struct dc_state *context)
-{
- struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg;
-
- pp_display_cfg->min_engine_clock_khz = dc->res_pool->clk_mgr->clks.dcfclk_khz;
- pp_display_cfg->min_memory_clock_khz = dc->res_pool->clk_mgr->clks.fclk_khz;
- pp_display_cfg->min_engine_clock_deep_sleep_khz = dc->res_pool->clk_mgr->clks.dcfclk_deep_sleep_khz;
- pp_display_cfg->min_dcfc_deep_sleep_clock_khz = dc->res_pool->clk_mgr->clks.dcfclk_deep_sleep_khz;
- pp_display_cfg->min_dcfclock_khz = dc->res_pool->clk_mgr->clks.dcfclk_khz;
- pp_display_cfg->disp_clk_khz = dc->res_pool->clk_mgr->clks.dispclk_khz;
- dce110_fill_display_configs(context, pp_display_cfg);
-
- dm_pp_apply_display_requirements(dc->ctx, pp_display_cfg);
-}
-
static int dcn1_determine_dppclk_threshold(struct clk_mgr *clk_mgr, struct dc_clocks *new_clocks)
{
bool request_dpp_div = new_clocks->dispclk_khz > new_clocks->dppclk_khz;
@@ -167,11 +150,11 @@ static void dcn1_update_clocks(struct clk_mgr *clk_mgr,
{
struct dc *dc = clk_mgr->ctx->dc;
struct dc_debug_options *debug = &dc->debug;
- struct dc_clocks *new_clocks = &context->bw.dcn.clk;
+ struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk;
struct pp_smu_display_requirement_rv *smu_req_cur =
&dc->res_pool->pp_smu_req;
struct pp_smu_display_requirement_rv smu_req = *smu_req_cur;
- struct pp_smu_funcs_rv *pp_smu = dc->res_pool->pp_smu;
+ struct pp_smu_funcs_rv *pp_smu = NULL;
bool send_request_to_increase = false;
bool send_request_to_lower = false;
int display_count;
@@ -179,7 +162,8 @@ static void dcn1_update_clocks(struct clk_mgr *clk_mgr,
bool enter_display_off = false;
display_count = get_active_display_cnt(dc, context);
-
+ if (dc->res_pool->pp_smu)
+ pp_smu = &dc->res_pool->pp_smu->rv_funcs;
if (display_count == 0)
enter_display_off = true;
@@ -189,7 +173,7 @@ static void dcn1_update_clocks(struct clk_mgr *clk_mgr,
* if function pointer not set up, this message is
* sent as part of pplib_apply_display_requirements.
*/
- if (pp_smu->set_display_count)
+ if (pp_smu && pp_smu->set_display_count)
pp_smu->set_display_count(&pp_smu->pp_smu, display_count);
smu_req.display_count = display_count;
@@ -239,17 +223,13 @@ static void dcn1_update_clocks(struct clk_mgr *clk_mgr,
*/
if (send_request_to_increase) {
/*use dcfclk to request voltage*/
- if (pp_smu->set_hard_min_fclk_by_freq &&
+ if (pp_smu && pp_smu->set_hard_min_fclk_by_freq &&
pp_smu->set_hard_min_dcfclk_by_freq &&
pp_smu->set_min_deep_sleep_dcfclk) {
pp_smu->set_hard_min_fclk_by_freq(&pp_smu->pp_smu, smu_req.hard_min_fclk_mhz);
pp_smu->set_hard_min_dcfclk_by_freq(&pp_smu->pp_smu, smu_req.hard_min_dcefclk_mhz);
pp_smu->set_min_deep_sleep_dcfclk(&pp_smu->pp_smu, smu_req.min_deep_sleep_dcefclk_mhz);
- } else {
- if (pp_smu->set_display_requirement)
- pp_smu->set_display_requirement(&pp_smu->pp_smu, &smu_req);
- dcn1_pplib_apply_display_requirements(dc, context);
}
}
@@ -265,17 +245,13 @@ static void dcn1_update_clocks(struct clk_mgr *clk_mgr,
if (!send_request_to_increase && send_request_to_lower) {
/*use dcfclk to request voltage*/
- if (pp_smu->set_hard_min_fclk_by_freq &&
+ if (pp_smu && pp_smu->set_hard_min_fclk_by_freq &&
pp_smu->set_hard_min_dcfclk_by_freq &&
pp_smu->set_min_deep_sleep_dcfclk) {
pp_smu->set_hard_min_fclk_by_freq(&pp_smu->pp_smu, smu_req.hard_min_fclk_mhz);
pp_smu->set_hard_min_dcfclk_by_freq(&pp_smu->pp_smu, smu_req.hard_min_dcefclk_mhz);
pp_smu->set_min_deep_sleep_dcfclk(&pp_smu->pp_smu, smu_req.min_deep_sleep_dcefclk_mhz);
- } else {
- if (pp_smu->set_display_requirement)
- pp_smu->set_display_requirement(&pp_smu->pp_smu, &smu_req);
- dcn1_pplib_apply_display_requirements(dc, context);
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.h
index a995eda443a3..97007cf33665 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.h
@@ -34,10 +34,6 @@ struct clk_bypass {
uint32_t dprefclk_bypass;
};
-void dcn1_pplib_apply_display_requirements(
- struct dc *dc,
- struct dc_state *context);
-
struct clk_mgr *dcn1_clk_mgr_create(struct dc_context *ctx);
#endif //__DCN10_CLK_MGR_H__
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
index cd1ebe57ed59..6f4b24756323 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
@@ -91,13 +91,6 @@ enum dscl_mode_sel {
DSCL_MODE_DSCL_BYPASS = 6
};
-enum gamut_remap_select {
- GAMUT_REMAP_BYPASS = 0,
- GAMUT_REMAP_COEFF,
- GAMUT_REMAP_COMA_COEFF,
- GAMUT_REMAP_COMB_COEFF
-};
-
void dpp_read_state(struct dpp *dpp_base,
struct dcn_dpp_state *s)
{
@@ -392,6 +385,10 @@ void dpp1_cnv_setup (
default:
break;
}
+
+ /* Set default color space based on format if none is given. */
+ color_space = input_color_space ? input_color_space : color_space;
+
REG_SET(CNVC_SURFACE_PIXEL_FORMAT, 0,
CNVC_SURFACE_PIXEL_FORMAT, pixel_format);
REG_UPDATE(FORMAT_CONTROL, FORMAT_CONTROL__ALPHA_EN, alpha_en);
@@ -403,7 +400,7 @@ void dpp1_cnv_setup (
for (i = 0; i < 12; i++)
tbl_entry.regval[i] = input_csc_color_matrix.matrix[i];
- tbl_entry.color_space = input_color_space;
+ tbl_entry.color_space = color_space;
if (color_space >= COLOR_SPACE_YCBCR601)
select = INPUT_CSC_SELECT_ICSC;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c
index 41f0f4c912e7..882bcc5a40f6 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c
@@ -88,13 +88,6 @@ enum dscl_mode_sel {
DSCL_MODE_DSCL_BYPASS = 6
};
-enum gamut_remap_select {
- GAMUT_REMAP_BYPASS = 0,
- GAMUT_REMAP_COEFF,
- GAMUT_REMAP_COMA_COEFF,
- GAMUT_REMAP_COMB_COEFF
-};
-
static const struct dpp_input_csc_matrix dpp_input_csc_matrix[] = {
{COLOR_SPACE_SRGB,
{0x2000, 0, 0, 0, 0, 0x2000, 0, 0, 0, 0, 0x2000, 0} },
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c
index c7642e748297..ce21a290bf3e 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c
@@ -406,15 +406,25 @@ void dpp1_dscl_calc_lb_num_partitions(
int *num_part_y,
int *num_part_c)
{
+ int lb_memory_size, lb_memory_size_c, lb_memory_size_a, num_partitions_a,
+ lb_bpc, memory_line_size_y, memory_line_size_c, memory_line_size_a;
+
int line_size = scl_data->viewport.width < scl_data->recout.width ?
scl_data->viewport.width : scl_data->recout.width;
int line_size_c = scl_data->viewport_c.width < scl_data->recout.width ?
scl_data->viewport_c.width : scl_data->recout.width;
- int lb_bpc = dpp1_dscl_get_lb_depth_bpc(scl_data->lb_params.depth);
- int memory_line_size_y = (line_size * lb_bpc + 71) / 72; /* +71 to ceil */
- int memory_line_size_c = (line_size_c * lb_bpc + 71) / 72; /* +71 to ceil */
- int memory_line_size_a = (line_size + 5) / 6; /* +5 to ceil */
- int lb_memory_size, lb_memory_size_c, lb_memory_size_a, num_partitions_a;
+
+ if (line_size == 0)
+ line_size = 1;
+
+ if (line_size_c == 0)
+ line_size_c = 1;
+
+
+ lb_bpc = dpp1_dscl_get_lb_depth_bpc(scl_data->lb_params.depth);
+ memory_line_size_y = (line_size * lb_bpc + 71) / 72; /* +71 to ceil */
+ memory_line_size_c = (line_size_c * lb_bpc + 71) / 72; /* +71 to ceil */
+ memory_line_size_a = (line_size + 5) / 6; /* +5 to ceil */
if (lb_config == LB_MEMORY_CONFIG_1) {
lb_memory_size = 816;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
index e161ad836812..295cbd5b843f 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
@@ -258,8 +258,9 @@ void hubbub1_wm_change_req_wa(struct hubbub *hubbub)
{
struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
- REG_UPDATE_SEQ(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL,
- DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, 0, 1);
+ REG_UPDATE_SEQ_2(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL,
+ DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, 0,
+ DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, 1);
}
void hubbub1_program_watermarks(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
index a6d6dfe00617..3268ab089363 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
@@ -595,6 +595,9 @@
type AGP_BASE;\
type AGP_BOT;\
type AGP_TOP;\
+ type DCN_VM_SYSTEM_APERTURE_DEFAULT_SYSTEM;\
+ type DCN_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB;\
+ type DCN_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB;\
/* todo: get these from GVM instead of reading registers ourselves */\
type PAGE_DIRECTORY_ENTRY_HI32;\
type PAGE_DIRECTORY_ENTRY_LO32;\
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index d1a8f1c302a9..dab370676bfa 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -65,7 +65,7 @@ void print_microsec(struct dc_context *dc_ctx,
struct dc_log_buffer_ctx *log_ctx,
uint32_t ref_cycle)
{
- const uint32_t ref_clk_mhz = dc_ctx->dc->res_pool->ref_clock_inKhz / 1000;
+ const uint32_t ref_clk_mhz = dc_ctx->dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000;
static const unsigned int frac = 1000;
uint32_t us_x10 = (ref_cycle * frac) / ref_clk_mhz;
@@ -345,13 +345,13 @@ void dcn10_log_hw_state(struct dc *dc,
DTN_INFO("\nCALCULATED Clocks: dcfclk_khz:%d dcfclk_deep_sleep_khz:%d dispclk_khz:%d\n"
"dppclk_khz:%d max_supported_dppclk_khz:%d fclk_khz:%d socclk_khz:%d\n\n",
- dc->current_state->bw.dcn.clk.dcfclk_khz,
- dc->current_state->bw.dcn.clk.dcfclk_deep_sleep_khz,
- dc->current_state->bw.dcn.clk.dispclk_khz,
- dc->current_state->bw.dcn.clk.dppclk_khz,
- dc->current_state->bw.dcn.clk.max_supported_dppclk_khz,
- dc->current_state->bw.dcn.clk.fclk_khz,
- dc->current_state->bw.dcn.clk.socclk_khz);
+ dc->current_state->bw_ctx.bw.dcn.clk.dcfclk_khz,
+ dc->current_state->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz,
+ dc->current_state->bw_ctx.bw.dcn.clk.dispclk_khz,
+ dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz,
+ dc->current_state->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz,
+ dc->current_state->bw_ctx.bw.dcn.clk.fclk_khz,
+ dc->current_state->bw_ctx.bw.dcn.clk.socclk_khz);
log_mpc_crc(dc, log_ctx);
@@ -714,7 +714,7 @@ static enum dc_status dcn10_enable_stream_timing(
return DC_OK;
}
-static void reset_back_end_for_pipe(
+static void dcn10_reset_back_end_for_pipe(
struct dc *dc,
struct pipe_ctx *pipe_ctx,
struct dc_state *context)
@@ -889,22 +889,23 @@ void hwss1_plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx)
dcn10_verify_allow_pstate_change_high(dc);
}
-static void plane_atomic_power_down(struct dc *dc, struct pipe_ctx *pipe_ctx)
+static void plane_atomic_power_down(struct dc *dc,
+ struct dpp *dpp,
+ struct hubp *hubp)
{
struct dce_hwseq *hws = dc->hwseq;
- struct dpp *dpp = pipe_ctx->plane_res.dpp;
DC_LOGGER_INIT(dc->ctx->logger);
if (REG(DC_IP_REQUEST_CNTL)) {
REG_SET(DC_IP_REQUEST_CNTL, 0,
IP_REQUEST_EN, 1);
dpp_pg_control(hws, dpp->inst, false);
- hubp_pg_control(hws, pipe_ctx->plane_res.hubp->inst, false);
+ hubp_pg_control(hws, hubp->inst, false);
dpp->funcs->dpp_reset(dpp);
REG_SET(DC_IP_REQUEST_CNTL, 0,
IP_REQUEST_EN, 0);
DC_LOG_DEBUG(
- "Power gated front end %d\n", pipe_ctx->pipe_idx);
+ "Power gated front end %d\n", hubp->inst);
}
}
@@ -931,7 +932,9 @@ static void plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
hubp->power_gated = true;
dc->optimized_required = false; /* We're powering off, no need to optimize */
- plane_atomic_power_down(dc, pipe_ctx);
+ plane_atomic_power_down(dc,
+ pipe_ctx->plane_res.dpp,
+ pipe_ctx->plane_res.hubp);
pipe_ctx->stream = NULL;
memset(&pipe_ctx->stream_res, 0, sizeof(pipe_ctx->stream_res));
@@ -976,16 +979,14 @@ static void dcn10_init_pipes(struct dc *dc, struct dc_state *context)
* to non-preferred front end. If pipe_ctx->stream is not NULL,
* we will use the pipe, so don't disable
*/
- if (pipe_ctx->stream != NULL)
+ if (pipe_ctx->stream != NULL && can_apply_seamless_boot)
continue;
- if (tg->funcs->is_tg_enabled(tg))
- tg->funcs->lock(tg);
-
/* Blank controller using driver code instead of
* command table.
*/
if (tg->funcs->is_tg_enabled(tg)) {
+ tg->funcs->lock(tg);
tg->funcs->set_blank(tg, true);
hwss_wait_for_blank_complete(tg);
}
@@ -1001,16 +1002,19 @@ static void dcn10_init_pipes(struct dc *dc, struct dc_state *context)
struct dpp *dpp = dc->res_pool->dpps[i];
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
- // W/A for issue with dc_post_update_surfaces_to_stream
- hubp->power_gated = true;
-
/* There is assumption that pipe_ctx is not mapping irregularly
* to non-preferred front end. If pipe_ctx->stream is not NULL,
* we will use the pipe, so don't disable
*/
- if (pipe_ctx->stream != NULL)
+ if (can_apply_seamless_boot &&
+ pipe_ctx->stream != NULL &&
+ pipe_ctx->stream_res.tg->funcs->is_tg_enabled(
+ pipe_ctx->stream_res.tg))
continue;
+ /* Disable on the current state so the new one isn't cleared. */
+ pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
+
dpp->funcs->dpp_reset(dpp);
pipe_ctx->stream_res.tg = tg;
@@ -1108,6 +1112,22 @@ static void dcn10_init_hw(struct dc *dc)
link->link_status.link_active = true;
}
+ /* If taking control over from VBIOS, we may want to optimize our first
+ * mode set, so we need to skip powering down pipes until we know which
+ * pipes we want to use.
+ * Otherwise, if taking control is not possible, we need to power
+ * everything down.
+ */
+ if (dcb->funcs->is_accelerated_mode(dcb)) {
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct hubp *hubp = dc->res_pool->hubps[i];
+ struct dpp *dpp = dc->res_pool->dpps[i];
+
+ dc->res_pool->opps[i]->mpc_tree_params.opp_id = dc->res_pool->opps[i]->inst;
+ plane_atomic_power_down(dc, dpp, hubp);
+ }
+ }
+
for (i = 0; i < dc->res_pool->audio_count; i++) {
struct audio *audio = dc->res_pool->audios[i];
@@ -1137,12 +1157,9 @@ static void dcn10_init_hw(struct dc *dc)
enable_power_gating_plane(dc->hwseq, true);
memset(&dc->res_pool->clk_mgr->clks, 0, sizeof(dc->res_pool->clk_mgr->clks));
-
- if (dc->hwss.init_pipes)
- dc->hwss.init_pipes(dc, dc->current_state);
}
-static void reset_hw_ctx_wrap(
+static void dcn10_reset_hw_ctx_wrap(
struct dc *dc,
struct dc_state *context)
{
@@ -1164,10 +1181,9 @@ static void reset_hw_ctx_wrap(
pipe_need_reprogram(pipe_ctx_old, pipe_ctx)) {
struct clock_source *old_clk = pipe_ctx_old->clock_source;
- reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state);
- if (dc->hwss.enable_stream_gating) {
+ dcn10_reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state);
+ if (dc->hwss.enable_stream_gating)
dc->hwss.enable_stream_gating(dc, pipe_ctx);
- }
if (old_clk)
old_clk->funcs->cs_power_down(old_clk);
}
@@ -1836,7 +1852,7 @@ void dcn10_get_hdr_visual_confirm_color(
switch (top_pipe_ctx->plane_res.scl_data.format) {
case PIXEL_FORMAT_ARGB2101010:
- if (top_pipe_ctx->stream->out_transfer_func->tf == TRANSFER_FUNCTION_UNITY) {
+ if (top_pipe_ctx->stream->out_transfer_func->tf == TRANSFER_FUNCTION_PQ) {
/* HDR10, ARGB2101010 - set boarder color to red */
color->color_r_cr = color_value;
}
@@ -1931,7 +1947,7 @@ static void update_dpp(struct dpp *dpp, struct dc_plane_state *plane_state)
plane_state->format,
EXPANSION_MODE_ZERO,
plane_state->input_csc_color_matrix,
- COLOR_SPACE_YCBCR601_LIMITED);
+ plane_state->color_space);
//set scale and bias registers
build_prescale_params(&bns_params, plane_state);
@@ -2051,7 +2067,7 @@ void update_dchubp_dpp(
* divided by 2
*/
if (plane_state->update_flags.bits.full_update) {
- bool should_divided_by_2 = context->bw.dcn.clk.dppclk_khz <=
+ bool should_divided_by_2 = context->bw_ctx.bw.dcn.clk.dppclk_khz <=
dc->res_pool->clk_mgr->clks.dispclk_khz / 2;
dpp->funcs->dpp_dppclk_control(
@@ -2120,6 +2136,9 @@ void update_dchubp_dpp(
if (pipe_ctx->stream->cursor_attributes.address.quad_part != 0) {
dc->hwss.set_cursor_position(pipe_ctx);
dc->hwss.set_cursor_attribute(pipe_ctx);
+
+ if (dc->hwss.set_cursor_sdr_white_level)
+ dc->hwss.set_cursor_sdr_white_level(pipe_ctx);
}
if (plane_state->update_flags.bits.full_update) {
@@ -2310,6 +2329,7 @@ static void dcn10_apply_ctx_for_surface(
int i;
struct timing_generator *tg;
bool removed_pipe[4] = { false };
+ bool interdependent_update = false;
struct pipe_ctx *top_pipe_to_program =
find_top_pipe_for_stream(dc, context, stream);
DC_LOGGER_INIT(dc->ctx->logger);
@@ -2319,7 +2339,13 @@ static void dcn10_apply_ctx_for_surface(
tg = top_pipe_to_program->stream_res.tg;
- dcn10_pipe_control_lock(dc, top_pipe_to_program, true);
+ interdependent_update = top_pipe_to_program->plane_state &&
+ top_pipe_to_program->plane_state->update_flags.bits.full_update;
+
+ if (interdependent_update)
+ lock_all_pipes(dc, context, true);
+ else
+ dcn10_pipe_control_lock(dc, top_pipe_to_program, true);
if (num_planes == 0) {
/* OTG blank before remove all front end */
@@ -2339,15 +2365,9 @@ static void dcn10_apply_ctx_for_surface(
*/
if (pipe_ctx->plane_state && !old_pipe_ctx->plane_state) {
if (old_pipe_ctx->stream_res.tg == tg &&
- old_pipe_ctx->plane_res.hubp &&
- old_pipe_ctx->plane_res.hubp->opp_id != 0xf) {
+ old_pipe_ctx->plane_res.hubp &&
+ old_pipe_ctx->plane_res.hubp->opp_id != 0xf)
dcn10_disable_plane(dc, old_pipe_ctx);
- /*
- * power down fe will unlock when calling reset, need
- * to lock it back here. Messy, need rework.
- */
- pipe_ctx->stream_res.tg->funcs->lock(pipe_ctx->stream_res.tg);
- }
}
if ((!pipe_ctx->plane_state ||
@@ -2366,29 +2386,25 @@ static void dcn10_apply_ctx_for_surface(
if (num_planes > 0)
program_all_pipe_in_tree(dc, top_pipe_to_program, context);
- dcn10_pipe_control_lock(dc, top_pipe_to_program, false);
-
- if (top_pipe_to_program->plane_state &&
- top_pipe_to_program->plane_state->update_flags.bits.full_update)
+ if (interdependent_update)
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
- tg = pipe_ctx->stream_res.tg;
/* Skip inactive pipes and ones already updated */
- if (!pipe_ctx->stream || pipe_ctx->stream == stream
- || !pipe_ctx->plane_state
- || !tg->funcs->is_tg_enabled(tg))
+ if (!pipe_ctx->stream || pipe_ctx->stream == stream ||
+ !pipe_ctx->plane_state || !tg->funcs->is_tg_enabled(tg))
continue;
- tg->funcs->lock(tg);
-
pipe_ctx->plane_res.hubp->funcs->hubp_setup_interdependent(
pipe_ctx->plane_res.hubp,
&pipe_ctx->dlg_regs,
&pipe_ctx->ttu_regs);
-
- tg->funcs->unlock(tg);
}
+ if (interdependent_update)
+ lock_all_pipes(dc, context, false);
+ else
+ dcn10_pipe_control_lock(dc, top_pipe_to_program, false);
+
if (num_planes == 0)
false_optc_underflow_wa(dc, stream, tg);
@@ -2425,7 +2441,7 @@ static void dcn10_prepare_bandwidth(
if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
if (context->stream_count == 0)
- context->bw.dcn.clk.phyclk_khz = 0;
+ context->bw_ctx.bw.dcn.clk.phyclk_khz = 0;
dc->res_pool->clk_mgr->funcs->update_clocks(
dc->res_pool->clk_mgr,
@@ -2434,8 +2450,8 @@ static void dcn10_prepare_bandwidth(
}
hubbub1_program_watermarks(dc->res_pool->hubbub,
- &context->bw.dcn.watermarks,
- dc->res_pool->ref_clock_inKhz / 1000,
+ &context->bw_ctx.bw.dcn.watermarks,
+ dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
true);
dcn10_stereo_hw_frame_pack_wa(dc, context);
@@ -2455,7 +2471,7 @@ static void dcn10_optimize_bandwidth(
if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
if (context->stream_count == 0)
- context->bw.dcn.clk.phyclk_khz = 0;
+ context->bw_ctx.bw.dcn.clk.phyclk_khz = 0;
dc->res_pool->clk_mgr->funcs->update_clocks(
dc->res_pool->clk_mgr,
@@ -2464,8 +2480,8 @@ static void dcn10_optimize_bandwidth(
}
hubbub1_program_watermarks(dc->res_pool->hubbub,
- &context->bw.dcn.watermarks,
- dc->res_pool->ref_clock_inKhz / 1000,
+ &context->bw_ctx.bw.dcn.watermarks,
+ dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
true);
dcn10_stereo_hw_frame_pack_wa(dc, context);
@@ -2654,7 +2670,7 @@ static void dcn10_update_pending_status(struct pipe_ctx *pipe_ctx)
flip_pending = pipe_ctx->plane_res.hubp->funcs->hubp_is_flip_pending(
pipe_ctx->plane_res.hubp);
- plane_state->status.is_flip_pending = flip_pending;
+ plane_state->status.is_flip_pending = plane_state->status.is_flip_pending || flip_pending;
if (!flip_pending)
plane_state->status.current_address = plane_state->status.requested_address;
@@ -2685,16 +2701,22 @@ static void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
struct dpp *dpp = pipe_ctx->plane_res.dpp;
struct dc_cursor_mi_param param = {
.pixel_clk_khz = pipe_ctx->stream->timing.pix_clk_100hz / 10,
- .ref_clk_khz = pipe_ctx->stream->ctx->dc->res_pool->ref_clock_inKhz,
+ .ref_clk_khz = pipe_ctx->stream->ctx->dc->res_pool->ref_clocks.dchub_ref_clock_inKhz,
.viewport = pipe_ctx->plane_res.scl_data.viewport,
.h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz,
.v_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.vert,
.rotation = pipe_ctx->plane_state->rotation,
.mirror = pipe_ctx->plane_state->horizontal_mirror
};
+ uint32_t x_plane = pipe_ctx->plane_state->dst_rect.x;
+ uint32_t y_plane = pipe_ctx->plane_state->dst_rect.y;
+ uint32_t x_offset = min(x_plane, pos_cpy.x);
+ uint32_t y_offset = min(y_plane, pos_cpy.y);
- pos_cpy.x_hotspot += pipe_ctx->plane_state->dst_rect.x;
- pos_cpy.y_hotspot += pipe_ctx->plane_state->dst_rect.y;
+ pos_cpy.x -= x_offset;
+ pos_cpy.y -= y_offset;
+ pos_cpy.x_hotspot += (x_plane - x_offset);
+ pos_cpy.y_hotspot += (y_plane - y_offset);
if (pipe_ctx->plane_state->address.type
== PLN_ADDR_TYPE_VIDEO_PROGRESSIVE)
@@ -2789,6 +2811,33 @@ int get_vupdate_offset_from_vsync(struct pipe_ctx *pipe_ctx)
return vertical_line_start;
}
+void lock_all_pipes(struct dc *dc,
+ struct dc_state *context,
+ bool lock)
+{
+ struct pipe_ctx *pipe_ctx;
+ struct timing_generator *tg;
+ int i;
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ pipe_ctx = &context->res_ctx.pipe_ctx[i];
+ tg = pipe_ctx->stream_res.tg;
+ /*
+ * Only lock the top pipe's tg to prevent redundant
+ * (un)locking. Also skip if pipe is disabled.
+ */
+ if (pipe_ctx->top_pipe ||
+ !pipe_ctx->stream || !pipe_ctx->plane_state ||
+ !tg->funcs->is_tg_enabled(tg))
+ continue;
+
+ if (lock)
+ tg->funcs->lock(tg);
+ else
+ tg->funcs->unlock(tg);
+ }
+}
+
static void calc_vupdate_position(
struct pipe_ctx *pipe_ctx,
uint32_t *start_line,
@@ -2882,6 +2931,29 @@ static void dcn10_setup_vupdate_interrupt(struct pipe_ctx *pipe_ctx)
tg->funcs->setup_vertical_interrupt2(tg, start_line);
}
+static void dcn10_unblank_stream(struct pipe_ctx *pipe_ctx,
+ struct dc_link_settings *link_settings)
+{
+ struct encoder_unblank_param params = { { 0 } };
+ struct dc_stream_state *stream = pipe_ctx->stream;
+ struct dc_link *link = stream->link;
+
+ /* only 3 items below are used by unblank */
+ params.timing = pipe_ctx->stream->timing;
+
+ params.link_settings.link_rate = link_settings->link_rate;
+
+ if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
+ if (params.timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
+ params.timing.pix_clk_100hz /= 2;
+ pipe_ctx->stream_res.stream_enc->funcs->dp_unblank(pipe_ctx->stream_res.stream_enc, &params);
+ }
+
+ if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) {
+ link->dc->hwss.edp_backlight_control(link, true);
+ }
+}
+
static const struct hw_sequencer_funcs dcn10_funcs = {
.program_gamut_remap = program_gamut_remap,
.init_hw = dcn10_init_hw,
@@ -2903,7 +2975,7 @@ static const struct hw_sequencer_funcs dcn10_funcs = {
.update_info_frame = dce110_update_info_frame,
.enable_stream = dce110_enable_stream,
.disable_stream = dce110_disable_stream,
- .unblank_stream = dce110_unblank_stream,
+ .unblank_stream = dcn10_unblank_stream,
.blank_stream = dce110_blank_stream,
.enable_audio_stream = dce110_enable_audio_stream,
.disable_audio_stream = dce110_disable_audio_stream,
@@ -2913,7 +2985,7 @@ static const struct hw_sequencer_funcs dcn10_funcs = {
.pipe_control_lock = dcn10_pipe_control_lock,
.prepare_bandwidth = dcn10_prepare_bandwidth,
.optimize_bandwidth = dcn10_optimize_bandwidth,
- .reset_hw_ctx_wrap = reset_hw_ctx_wrap,
+ .reset_hw_ctx_wrap = dcn10_reset_hw_ctx_wrap,
.enable_stream_timing = dcn10_enable_stream_timing,
.set_drr = set_drr,
.get_position = get_position,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
index 6d66084df55f..4b3b27a5d23b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
@@ -83,4 +83,8 @@ struct pipe_ctx *find_top_pipe_for_stream(
int get_vupdate_offset_from_vsync(struct pipe_ctx *pipe_ctx);
+void lock_all_pipes(struct dc *dc,
+ struct dc_state *context,
+ bool lock);
+
#endif /* __DC_HWSS_DCN10_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c
index 98f41d250978..991622da9ed5 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c
@@ -77,7 +77,7 @@ static unsigned int dcn10_get_hubbub_state(struct dc *dc, char *pBuf, unsigned i
unsigned int chars_printed = 0;
unsigned int remaining_buffer = bufSize;
- const uint32_t ref_clk_mhz = dc_ctx->dc->res_pool->ref_clock_inKhz / 1000;
+ const uint32_t ref_clk_mhz = dc_ctx->dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000;
static const unsigned int frac = 1000;
memset(&wm, 0, sizeof(struct dcn_hubbub_wm));
@@ -115,7 +115,7 @@ static unsigned int dcn10_get_hubp_states(struct dc *dc, char *pBuf, unsigned in
unsigned int chars_printed = 0;
unsigned int remaining_buffer = bufSize;
- const uint32_t ref_clk_mhz = dc_ctx->dc->res_pool->ref_clock_inKhz / 1000;
+ const uint32_t ref_clk_mhz = dc_ctx->dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000;
static const unsigned int frac = 1000;
if (invarOnly)
@@ -472,12 +472,12 @@ static unsigned int dcn10_get_clock_states(struct dc *dc, char *pBuf, unsigned i
chars_printed = snprintf_count(pBuf, bufSize, "dcfclk,dcfclk_deep_sleep,dispclk,"
"dppclk,fclk,socclk\n"
"%d,%d,%d,%d,%d,%d\n",
- dc->current_state->bw.dcn.clk.dcfclk_khz,
- dc->current_state->bw.dcn.clk.dcfclk_deep_sleep_khz,
- dc->current_state->bw.dcn.clk.dispclk_khz,
- dc->current_state->bw.dcn.clk.dppclk_khz,
- dc->current_state->bw.dcn.clk.fclk_khz,
- dc->current_state->bw.dcn.clk.socclk_khz);
+ dc->current_state->bw_ctx.bw.dcn.clk.dcfclk_khz,
+ dc->current_state->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz,
+ dc->current_state->bw_ctx.bw.dcn.clk.dispclk_khz,
+ dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz,
+ dc->current_state->bw_ctx.bw.dcn.clk.fclk_khz,
+ dc->current_state->bw_ctx.bw.dcn.clk.socclk_khz);
remaining_buffer -= chars_printed;
pBuf += chars_printed;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
index a9db372688ff..0126a44ba012 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
@@ -1304,7 +1304,6 @@ void dcn10_link_encoder_connect_dig_be_to_fe(
#define HPD_REG_UPDATE_N(reg_name, n, ...) \
generic_reg_update_ex(CTX, \
HPD_REG(reg_name), \
- HPD_REG_READ(reg_name), \
n, __VA_ARGS__)
#define HPD_REG_UPDATE(reg_name, field, val) \
@@ -1337,7 +1336,6 @@ void dcn10_link_encoder_disable_hpd(struct link_encoder *enc)
#define AUX_REG_UPDATE_N(reg_name, n, ...) \
generic_reg_update_ex(CTX, \
AUX_REG(reg_name), \
- AUX_REG_READ(reg_name), \
n, __VA_ARGS__)
#define AUX_REG_UPDATE(reg_name, field, val) \
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
index 09d74070a49b..79f4fbb8a145 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
@@ -516,6 +516,15 @@ static const struct resource_caps rv2_res_cap = {
};
#endif
+static const struct dc_plane_cap plane_cap = {
+ .type = DC_PLANE_TYPE_DCN_UNIVERSAL,
+ .blends_with_above = true,
+ .blends_with_below = true,
+ .per_pixel_alpha = true,
+ .supports_argb8888 = true,
+ .supports_nv12 = true
+};
+
static const struct dc_debug_options debug_defaults_drv = {
.sanity_checks = true,
.disable_dmcu = true,
@@ -848,14 +857,14 @@ void dcn10_clock_source_destroy(struct clock_source **clk_src)
*clk_src = NULL;
}
-static struct pp_smu_funcs_rv *dcn10_pp_smu_create(struct dc_context *ctx)
+static struct pp_smu_funcs *dcn10_pp_smu_create(struct dc_context *ctx)
{
- struct pp_smu_funcs_rv *pp_smu = kzalloc(sizeof(*pp_smu), GFP_KERNEL);
+ struct pp_smu_funcs *pp_smu = kzalloc(sizeof(*pp_smu), GFP_KERNEL);
if (!pp_smu)
return pp_smu;
- dm_pp_get_funcs_rv(ctx, pp_smu);
+ dm_pp_get_funcs(ctx, pp_smu);
return pp_smu;
}
@@ -865,10 +874,7 @@ static void destruct(struct dcn10_resource_pool *pool)
for (i = 0; i < pool->base.stream_enc_count; i++) {
if (pool->base.stream_enc[i] != NULL) {
- /* TODO: free dcn version of stream encoder once implemented
- * rather than using virtual stream encoder
- */
- kfree(pool->base.stream_enc[i]);
+ kfree(DCN10STRENC_FROM_STRENC(pool->base.stream_enc[i]));
pool->base.stream_enc[i] = NULL;
}
}
@@ -921,9 +927,6 @@ static void destruct(struct dcn10_resource_pool *pool)
}
}
- for (i = 0; i < pool->base.stream_enc_count; i++)
- kfree(pool->base.stream_enc[i]);
-
for (i = 0; i < pool->base.audio_count; i++) {
if (pool->base.audios[i])
dce_aud_destroy(&pool->base.audios[i]);
@@ -1078,7 +1081,7 @@ static struct pipe_ctx *dcn10_acquire_idle_pipe_for_layer(
{
struct resource_context *res_ctx = &context->res_ctx;
struct pipe_ctx *head_pipe = resource_get_head_pipe_for_stream(res_ctx, stream);
- struct pipe_ctx *idle_pipe = find_idle_secondary_pipe(res_ctx, pool);
+ struct pipe_ctx *idle_pipe = find_idle_secondary_pipe(res_ctx, pool, head_pipe);
if (!head_pipe) {
ASSERT(0);
@@ -1143,7 +1146,7 @@ static enum dc_status dcn10_validate_global(struct dc *dc, struct dc_state *cont
continue;
if (context->stream_status[i].plane_count > 2)
- return false;
+ return DC_FAIL_UNSUPPORTED_1;
for (j = 0; j < context->stream_status[i].plane_count; j++) {
struct dc_plane_state *plane =
@@ -1351,7 +1354,7 @@ static bool construct(
goto fail;
}
- dml_init_instance(&dc->dml, DML_PROJECT_RAVEN1);
+ dml_init_instance(&dc->dml, &dcn1_0_soc, &dcn1_0_ip, DML_PROJECT_RAVEN1);
memcpy(dc->dcn_ip, &dcn10_ip_defaults, sizeof(dcn10_ip_defaults));
memcpy(dc->dcn_soc, &dcn10_soc_defaults, sizeof(dcn10_soc_defaults));
@@ -1510,6 +1513,9 @@ static bool construct(
dcn10_hw_sequencer_construct(dc);
dc->caps.max_planes = pool->base.pipe_count;
+ for (i = 0; i < dc->caps.max_planes; ++i)
+ dc->caps.planes[i] = plane_cap;
+
dc->cap_funcs = cap_funcs;
return true;
@@ -1522,7 +1528,7 @@ fail:
}
struct resource_pool *dcn10_create_resource_pool(
- uint8_t num_virtual_links,
+ const struct dc_init_data *init_data,
struct dc *dc)
{
struct dcn10_resource_pool *pool =
@@ -1531,7 +1537,7 @@ struct resource_pool *dcn10_create_resource_pool(
if (!pool)
return NULL;
- if (construct(num_virtual_links, dc, pool))
+ if (construct(init_data->num_virtual_links, dc, pool))
return &pool->base;
BREAK_TO_DEBUGGER();
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.h
index 8f71225bc61b..999c684a0b36 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.h
@@ -39,7 +39,7 @@ struct dcn10_resource_pool {
struct resource_pool base;
};
struct resource_pool *dcn10_create_resource_pool(
- uint8_t num_virtual_links,
+ const struct dc_init_data *init_data,
struct dc *dc);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
index b08254121251..8ee9f6dc1d62 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
@@ -245,7 +245,8 @@ static void enc1_update_hdmi_info_packet(
void enc1_stream_encoder_dp_set_stream_attribute(
struct stream_encoder *enc,
struct dc_crtc_timing *crtc_timing,
- enum dc_color_space output_color_space)
+ enum dc_color_space output_color_space,
+ uint32_t enable_sdp_splitting)
{
uint32_t h_active_start;
uint32_t v_active_start;
@@ -298,7 +299,6 @@ void enc1_stream_encoder_dp_set_stream_attribute(
break;
case PIXEL_ENCODING_YCBCR420:
dp_pixel_encoding = DP_PIXEL_ENCODING_TYPE_YCBCR420;
- REG_UPDATE(DP_VID_TIMING, DP_VID_N_MUL, 1);
break;
default:
dp_pixel_encoding = DP_PIXEL_ENCODING_TYPE_RGB444;
@@ -726,13 +726,19 @@ void enc1_stream_encoder_update_dp_info_packets(
3, /* packetIndex */
&info_frame->hdrsmd);
+ if (info_frame->dpsdp.valid)
+ enc1_update_generic_info_packet(
+ enc1,
+ 4,/* packetIndex */
+ &info_frame->dpsdp);
+
/* enable/disable transmission of packet(s).
* If enabled, packet transmission begins on the next frame
*/
REG_UPDATE(DP_SEC_CNTL, DP_SEC_GSP0_ENABLE, info_frame->vsc.valid);
REG_UPDATE(DP_SEC_CNTL, DP_SEC_GSP2_ENABLE, info_frame->spd.valid);
REG_UPDATE(DP_SEC_CNTL, DP_SEC_GSP3_ENABLE, info_frame->hdrsmd.valid);
-
+ REG_UPDATE(DP_SEC_CNTL, DP_SEC_GSP4_ENABLE, info_frame->dpsdp.valid);
/* This bit is the master enable bit.
* When enabling secondary stream engine,
@@ -797,10 +803,10 @@ void enc1_stream_encoder_dp_blank(
*/
REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_DIS_DEFER, 2);
/* Larger delay to wait until VBLANK - use max retry of
- * 10us*3000=30ms. This covers 16.6ms of typical 60 Hz mode +
+ * 10us*5000=50ms. This covers 41.7ms of minimum 24 Hz mode +
* a little more because we may not trust delay accuracy.
*/
- max_retries = DP_BLANK_MAX_RETRY * 150;
+ max_retries = DP_BLANK_MAX_RETRY * 250;
/* disable DP stream */
REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, 0);
@@ -833,14 +839,19 @@ void enc1_stream_encoder_dp_unblank(
if (param->link_settings.link_rate != LINK_RATE_UNKNOWN) {
uint32_t n_vid = 0x8000;
uint32_t m_vid;
+ uint32_t n_multiply = 0;
+ uint64_t m_vid_l = n_vid;
+ /* YCbCr 4:2:0 : Computed VID_M will be 2X the input rate */
+ if (param->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420) {
+ /*this param->pixel_clk_khz is half of 444 rate for 420 already*/
+ n_multiply = 1;
+ }
/* M / N = Fstream / Flink
* m_vid / n_vid = pixel rate / link rate
*/
- uint64_t m_vid_l = n_vid;
-
- m_vid_l *= param->pixel_clk_khz;
+ m_vid_l *= param->timing.pix_clk_100hz / 10;
m_vid_l = div_u64(m_vid_l,
param->link_settings.link_rate
* LINK_RATE_REF_FREQ_IN_KHZ);
@@ -859,7 +870,9 @@ void enc1_stream_encoder_dp_unblank(
REG_UPDATE(DP_VID_M, DP_VID_M, m_vid);
- REG_UPDATE(DP_VID_TIMING, DP_VID_M_N_GEN_EN, 1);
+ REG_UPDATE_2(DP_VID_TIMING,
+ DP_VID_M_N_GEN_EN, 1,
+ DP_VID_N_MUL, n_multiply);
}
/* set DIG_START to 0x1 to resync FIFO */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h
index b7c800e10a32..e654c2f55971 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h
@@ -462,7 +462,8 @@ void enc1_update_generic_info_packet(
void enc1_stream_encoder_dp_set_stream_attribute(
struct stream_encoder *enc,
struct dc_crtc_timing *crtc_timing,
- enum dc_color_space output_color_space);
+ enum dc_color_space output_color_space,
+ uint32_t enable_sdp_splitting);
void enc1_stream_encoder_hdmi_set_stream_attribute(
struct stream_encoder *enc,
diff --git a/drivers/gpu/drm/amd/display/dc/dm_helpers.h b/drivers/gpu/drm/amd/display/dc/dm_helpers.h
index e81b24374bcb..ccbfe9680d27 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_helpers.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_helpers.h
@@ -58,7 +58,7 @@ bool dm_helpers_dp_mst_write_payload_allocation_table(
bool enable);
/*
- * poll pending down reply before clear payload allocation table
+ * poll pending down reply
*/
void dm_helpers_dp_mst_poll_pending_down_reply(
struct dc_context *ctx,
diff --git a/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h b/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h
index 14bed5b1fa97..cc6891b8ea69 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h
@@ -30,6 +30,8 @@
* interface to PPLIB/SMU to setup clocks and pstate requirements on SoC
*/
+typedef bool BOOLEAN;
+
enum pp_smu_ver {
/*
* PP_SMU_INTERFACE_X should be interpreted as the interface defined
@@ -137,12 +139,6 @@ struct pp_smu_funcs_rv {
/* PME w/a */
void (*set_pme_wa_enable)(struct pp_smu *pp);
- /*
- * Legacy functions. Used for backwards comp. with existing
- * PPlib code.
- */
- void (*set_display_requirement)(struct pp_smu *pp,
- struct pp_smu_display_requirement_rv *req);
};
struct pp_smu_funcs {
diff --git a/drivers/gpu/drm/amd/display/dc/dm_services.h b/drivers/gpu/drm/amd/display/dc/dm_services.h
index 1961cc6d9143..b426ba02b793 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_services.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_services.h
@@ -52,30 +52,17 @@ irq_handler_idx dm_register_interrupt(
* GPU registers access
*
*/
-
+uint32_t dm_read_reg_func(
+ const struct dc_context *ctx,
+ uint32_t address,
+ const char *func_name);
/* enable for debugging new code, this adds 50k to the driver size. */
/* #define DM_CHECK_ADDR_0 */
#define dm_read_reg(ctx, address) \
dm_read_reg_func(ctx, address, __func__)
-static inline uint32_t dm_read_reg_func(
- const struct dc_context *ctx,
- uint32_t address,
- const char *func_name)
-{
- uint32_t value;
-#ifdef DM_CHECK_ADDR_0
- if (address == 0) {
- DC_ERR("invalid register read; address = 0\n");
- return 0;
- }
-#endif
- value = cgs_read_register(ctx->cgs_device, address);
- trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
- return value;
-}
#define dm_write_reg(ctx, address, value) \
dm_write_reg_func(ctx, address, value, __func__)
@@ -144,10 +131,14 @@ static inline uint32_t set_reg_field_value_ex(
reg_name ## __ ## reg_field ## _MASK,\
reg_name ## __ ## reg_field ## __SHIFT)
-uint32_t generic_reg_update_ex(const struct dc_context *ctx,
+uint32_t generic_reg_set_ex(const struct dc_context *ctx,
uint32_t addr, uint32_t reg_val, int n,
uint8_t shift1, uint32_t mask1, uint32_t field_value1, ...);
+uint32_t generic_reg_update_ex(const struct dc_context *ctx,
+ uint32_t addr, int n,
+ uint8_t shift1, uint32_t mask1, uint32_t field_value1, ...);
+
#define FD(reg_field) reg_field ## __SHIFT, \
reg_field ## _MASK
@@ -155,7 +146,7 @@ uint32_t generic_reg_update_ex(const struct dc_context *ctx,
* return number of poll before condition is met
* return 0 if condition is not meet after specified time out tries
*/
-unsigned int generic_reg_wait(const struct dc_context *ctx,
+void generic_reg_wait(const struct dc_context *ctx,
uint32_t addr, uint32_t mask, uint32_t shift, uint32_t condition_value,
unsigned int delay_between_poll_us, unsigned int time_out_num_tries,
const char *func_name, int line);
@@ -172,11 +163,10 @@ unsigned int generic_reg_wait(const struct dc_context *ctx,
#define generic_reg_update_soc15(ctx, inst_offset, reg_name, n, ...)\
generic_reg_update_ex(ctx, DCE_BASE.instance[0].segment[mm##reg_name##_BASE_IDX] + mm##reg_name + inst_offset, \
- dm_read_reg_func(ctx, mm##reg_name + DCE_BASE.instance[0].segment[mm##reg_name##_BASE_IDX] + inst_offset, __func__), \
n, __VA_ARGS__)
#define generic_reg_set_soc15(ctx, inst_offset, reg_name, n, ...)\
- generic_reg_update_ex(ctx, DCE_BASE.instance[0].segment[mm##reg_name##_BASE_IDX] + mm##reg_name + inst_offset, 0, \
+ generic_reg_set_ex(ctx, DCE_BASE.instance[0].segment[mm##reg_name##_BASE_IDX] + mm##reg_name + inst_offset, 0, \
n, __VA_ARGS__)
#define get_reg_field_value_soc15(reg_value, block, reg_num, reg_name, reg_field)\
@@ -223,8 +213,8 @@ bool dm_pp_notify_wm_clock_changes(
const struct dc_context *ctx,
struct dm_pp_wm_sets_with_clock_ranges *wm_with_clock_ranges);
-void dm_pp_get_funcs_rv(struct dc_context *ctx,
- struct pp_smu_funcs_rv *funcs);
+void dm_pp_get_funcs(struct dc_context *ctx,
+ struct pp_smu_funcs *funcs);
/* DAL calls this function to notify PP about completion of Mode Set.
* For PP it means that current DCE clocks are those which were returned
diff --git a/drivers/gpu/drm/amd/display/dc/dm_services_types.h b/drivers/gpu/drm/amd/display/dc/dm_services_types.h
index 77200711abbe..a3d1be20dd9d 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_services_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_services_types.h
@@ -29,7 +29,7 @@
#include "os_types.h"
#include "dc_types.h"
-struct pp_smu_funcs_rv;
+struct pp_smu_funcs;
struct dm_pp_clock_range {
int min_khz;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c
index d303b789adfe..80ffd7d958b2 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c
@@ -26,40 +26,14 @@
#include "display_mode_lib.h"
#include "dc_features.h"
-extern const struct _vcs_dpi_ip_params_st dcn1_0_ip;
-extern const struct _vcs_dpi_soc_bounding_box_st dcn1_0_soc;
-
-static void set_soc_bounding_box(struct _vcs_dpi_soc_bounding_box_st *soc, enum dml_project project)
-{
- switch (project) {
- case DML_PROJECT_RAVEN1:
- *soc = dcn1_0_soc;
- break;
- default:
- ASSERT(0);
- break;
- }
-}
-
-static void set_ip_params(struct _vcs_dpi_ip_params_st *ip, enum dml_project project)
+void dml_init_instance(struct display_mode_lib *lib,
+ const struct _vcs_dpi_soc_bounding_box_st *soc_bb,
+ const struct _vcs_dpi_ip_params_st *ip_params,
+ enum dml_project project)
{
- switch (project) {
- case DML_PROJECT_RAVEN1:
- *ip = dcn1_0_ip;
- break;
- default:
- ASSERT(0);
- break;
- }
-}
-
-void dml_init_instance(struct display_mode_lib *lib, enum dml_project project)
-{
- if (lib->project != project) {
- set_soc_bounding_box(&lib->soc, project);
- set_ip_params(&lib->ip, project);
- lib->project = project;
- }
+ lib->soc = *soc_bb;
+ lib->ip = *ip_params;
+ lib->project = project;
}
const char *dml_get_status_message(enum dm_validation_status status)
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h
index a730e0209c05..1b546dba34bd 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h
@@ -41,7 +41,10 @@ struct display_mode_lib {
struct dal_logger *logger;
};
-void dml_init_instance(struct display_mode_lib *lib, enum dml_project project);
+void dml_init_instance(struct display_mode_lib *lib,
+ const struct _vcs_dpi_soc_bounding_box_st *soc_bb,
+ const struct _vcs_dpi_ip_params_st *ip_params,
+ enum dml_project project);
const char *dml_get_status_message(enum dm_validation_status status);
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
index 391183e3428f..c5b791d158a7 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
@@ -25,6 +25,8 @@
#ifndef __DISPLAY_MODE_STRUCTS_H__
#define __DISPLAY_MODE_STRUCTS_H__
+#define MAX_CLOCK_LIMIT_STATES 8
+
typedef struct _vcs_dpi_voltage_scaling_st voltage_scaling_st;
typedef struct _vcs_dpi_soc_bounding_box_st soc_bounding_box_st;
typedef struct _vcs_dpi_ip_params_st ip_params_st;
@@ -103,7 +105,7 @@ struct _vcs_dpi_soc_bounding_box_st {
double xfc_xbuf_latency_tolerance_us;
int use_urgent_burst_bw;
unsigned int num_states;
- struct _vcs_dpi_voltage_scaling_st clock_limits[8];
+ struct _vcs_dpi_voltage_scaling_st clock_limits[MAX_CLOCK_LIMIT_STATES];
};
struct _vcs_dpi_ip_params_st {
@@ -416,6 +418,7 @@ struct _vcs_dpi_display_dlg_regs_st {
unsigned int refcyc_per_vm_group_flip;
unsigned int refcyc_per_vm_req_vblank;
unsigned int refcyc_per_vm_req_flip;
+ unsigned int refcyc_per_vm_dmdata;
};
struct _vcs_dpi_display_ttu_regs_st {
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.c b/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.c
index 48400d642610..e2d82aacd3bc 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.c
@@ -321,6 +321,9 @@ void print__dlg_regs_st(struct display_mode_lib *mode_lib, display_dlg_regs_st d
dml_print(
"DML_RQ_DLG_CALC: xfc_reg_remote_surface_flip_latency = 0x%0x\n",
dlg_regs.xfc_reg_remote_surface_flip_latency);
+ dml_print(
+ "DML_RQ_DLG_CALC: refcyc_per_vm_dmdata = 0x%0x\n",
+ dlg_regs.refcyc_per_vm_dmdata);
dml_print("DML_RQ_DLG_CALC: =====================================\n");
}
diff --git a/drivers/gpu/drm/amd/display/dc/inc/clock_source.h b/drivers/gpu/drm/amd/display/dc/inc/clock_source.h
index fe6301cb8681..1b01a9a58d14 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/clock_source.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/clock_source.h
@@ -167,7 +167,7 @@ struct clock_source_funcs {
struct pixel_clk_params *,
struct pll_settings *);
bool (*get_pixel_clk_frequency_100hz)(
- struct clock_source *clock_source,
+ const struct clock_source *clock_source,
unsigned int inst,
unsigned int *pixel_clk_khz);
};
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_status.h b/drivers/gpu/drm/amd/display/dc/inc/core_status.h
index 2e61a22ef4b2..8dca3b7700e5 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_status.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_status.h
@@ -43,7 +43,7 @@ enum dc_status {
DC_FAIL_BANDWIDTH_VALIDATE = 13, /* BW and Watermark validation */
DC_FAIL_SCALING = 14,
DC_FAIL_DP_LINK_TRAINING = 15,
-
+ DC_FAIL_UNSUPPORTED_1 = 18,
DC_ERROR_UNEXPECTED = -1
};
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
index 986ed1728644..827541e34ee2 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
@@ -95,7 +95,6 @@ struct resource_funcs {
void (*link_init)(struct dc_link *link);
struct link_encoder *(*link_enc_create)(
const struct encoder_init_data *init);
-
bool (*validate_bandwidth)(
struct dc *dc,
struct dc_state *context);
@@ -144,7 +143,7 @@ struct resource_pool {
struct stream_encoder *stream_enc[MAX_PIPES * 2];
struct hubbub *hubbub;
struct mpc *mpc;
- struct pp_smu_funcs_rv *pp_smu;
+ struct pp_smu_funcs *pp_smu;
struct pp_smu_display_requirement_rv pp_smu_req;
struct dce_aux *engines[MAX_PIPES];
struct dce_i2c_hw *hw_i2cs[MAX_PIPES];
@@ -154,7 +153,12 @@ struct resource_pool {
unsigned int pipe_count;
unsigned int underlay_pipe_index;
unsigned int stream_enc_count;
- unsigned int ref_clock_inKhz;
+
+ struct {
+ unsigned int xtalin_clock_inKhz;
+ unsigned int dccg_ref_clock_inKhz;
+ unsigned int dchub_ref_clock_inKhz;
+ } ref_clocks;
unsigned int timing_generator_count;
/*
@@ -262,18 +266,22 @@ struct dcn_bw_output {
struct dcn_watermark_set watermarks;
};
-union bw_context {
+union bw_output {
struct dcn_bw_output dcn;
struct dce_bw_output dce;
};
+struct bw_context {
+ union bw_output bw;
+ struct display_mode_lib dml;
+};
/**
* struct dc_state - The full description of a state requested by a user
*
* @streams: Stream properties
* @stream_status: The planes on a given stream
* @res_ctx: Persistent state of resources
- * @bw: The output from bandwidth and watermark calculations
+ * @bw_ctx: The output from bandwidth and watermark calculations and the DML
* @pp_display_cfg: PowerPlay clocks and settings
* @dcn_bw_vars: non-stack memory to support bandwidth calculations
*
@@ -285,7 +293,7 @@ struct dc_state {
struct resource_context res_ctx;
- union bw_context bw;
+ struct bw_context bw_ctx;
/* Note: these are big structures, do *not* put on stack! */
struct dm_pp_display_configuration pp_display_cfg;
@@ -293,7 +301,11 @@ struct dc_state {
struct dcn_bw_internal_vars dcn_bw_vars;
#endif
- struct clk_mgr *dccg;
+ struct clk_mgr *clk_mgr;
+
+ struct {
+ bool full_update_needed : 1;
+ } commit_hints;
struct kref refcount;
};
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h
index 16fd4dc6c4dd..b1fab251c09b 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h
@@ -95,8 +95,9 @@ bool dal_ddc_service_query_ddc_data(
uint8_t *read_buf,
uint32_t read_size);
-int dc_link_aux_transfer(struct ddc_service *ddc,
- struct aux_payload *payload);
+int dc_link_aux_transfer_raw(struct ddc_service *ddc,
+ struct aux_payload *payload,
+ enum aux_channel_operation_result *operation_result);
bool dc_link_aux_transfer_with_retries(struct ddc_service *ddc,
struct aux_payload *payload);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h b/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h
index ece954a40a8e..86ec3f69c765 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h
@@ -631,5 +631,7 @@ void dcn_bw_update_from_pplib(struct dc *dc);
void dcn_bw_notify_pplib_of_wm_ranges(struct dc *dc);
void dcn_bw_sync_calcs_and_dml(struct dc *dc);
+enum source_macro_tile_size swizzle_mode_to_macro_tile_size(enum swizzle_mode_values sw_mode);
+
#endif /* __DCN_CALCS_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
index 23a4b18e5fee..31bd6d5183ab 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
@@ -42,6 +42,8 @@ struct clk_mgr_funcs {
bool safe_to_lower);
int (*get_dp_ref_clk_frequency)(struct clk_mgr *clk_mgr);
+
+ void (*init_clocks)(struct clk_mgr *clk_mgr);
};
#endif /* __DAL_CLK_MGR_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
index 95a56d012626..05ee5295d2c1 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
@@ -39,6 +39,10 @@ struct dccg_funcs {
void (*update_dpp_dto)(struct dccg *dccg,
int dpp_inst,
int req_dppclk);
+ void (*get_dccg_ref_freq)(struct dccg *dccg,
+ unsigned int xtalin_freq_inKhz,
+ unsigned int *dccg_ref_freq_inKhz);
+ void (*dccg_init)(struct dccg *dccg);
};
#endif //__DAL_DCCG_H__
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
index 9d2d8e51306c..5e8fead3c09a 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
@@ -73,6 +73,10 @@ struct hubbub_funcs {
void (*wm_read_state)(struct hubbub *hubbub,
struct dcn_hubbub_wm *wm);
+
+ void (*get_dchub_ref_freq)(struct hubbub *hubbub,
+ unsigned int dccg_ref_freq_inKhz,
+ unsigned int *dchub_ref_freq_inKhz);
};
struct hubbub {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
index da85537a4488..4c8e2c6fb6db 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
@@ -146,6 +146,12 @@ struct out_csc_color_matrix {
uint16_t regval[12];
};
+enum gamut_remap_select {
+ GAMUT_REMAP_BYPASS = 0,
+ GAMUT_REMAP_COEFF,
+ GAMUT_REMAP_COMA_COEFF,
+ GAMUT_REMAP_COMB_COEFF
+};
enum opp_regamma {
OPP_REGAMMA_BYPASS = 0,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
index 4051493557bc..49854eb73d1d 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
@@ -63,11 +63,13 @@ struct encoder_info_frame {
struct dc_info_packet vsc;
/* HDR Static MetaData */
struct dc_info_packet hdrsmd;
+ /* custom sdp message */
+ struct dc_info_packet dpsdp;
};
struct encoder_unblank_param {
struct dc_link_settings link_settings;
- unsigned int pixel_clk_khz;
+ struct dc_crtc_timing timing;
};
struct encoder_set_dp_phy_pattern_param {
@@ -88,7 +90,8 @@ struct stream_encoder_funcs {
void (*dp_set_stream_attribute)(
struct stream_encoder *enc,
struct dc_crtc_timing *crtc_timing,
- enum dc_color_space output_color_space);
+ enum dc_color_space output_color_space,
+ uint32_t enable_sdp_splitting);
void (*hdmi_set_stream_attribute)(
struct stream_encoder *enc,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
index c25f7df7b5e3..067d53caf28a 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
@@ -187,8 +187,10 @@ struct timing_generator_funcs {
bool (*did_triggered_reset_occur)(struct timing_generator *tg);
void (*setup_global_swap_lock)(struct timing_generator *tg,
const struct dcp_gsl_params *gsl_params);
+ void (*setup_global_lock)(struct timing_generator *tg);
void (*unlock)(struct timing_generator *tg);
void (*lock)(struct timing_generator *tg);
+ void (*lock_global)(struct timing_generator *tg);
void (*enable_reset_trigger)(struct timing_generator *tg,
int source_tg_inst);
void (*enable_crtc_reset)(struct timing_generator *tg,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
index 7676f25216b1..33905468e2b9 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
@@ -176,6 +176,10 @@ struct hw_sequencer_funcs {
struct dc *dc,
struct pipe_ctx *pipe,
bool lock);
+ void (*pipe_control_lock_global)(
+ struct dc *dc,
+ struct pipe_ctx *pipe,
+ bool lock);
void (*blank_pixel_data)(
struct dc *dc,
struct pipe_ctx *pipe_ctx,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/reg_helper.h b/drivers/gpu/drm/amd/display/dc/inc/reg_helper.h
index cf5a84b9e27c..8503d9cc4763 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/reg_helper.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/reg_helper.h
@@ -52,7 +52,7 @@
/* macro to set register fields. */
#define REG_SET_N(reg_name, n, initial_val, ...) \
- generic_reg_update_ex(CTX, \
+ generic_reg_set_ex(CTX, \
REG(reg_name), \
initial_val, \
n, __VA_ARGS__)
@@ -225,7 +225,6 @@
#define REG_UPDATE_N(reg_name, n, ...) \
generic_reg_update_ex(CTX, \
REG(reg_name), \
- REG_READ(reg_name), \
n, __VA_ARGS__)
#define REG_UPDATE(reg_name, field, val) \
@@ -380,16 +379,11 @@
/* macro to update a register field to specified values in given sequences.
* useful when toggling bits
*/
-#define REG_UPDATE_SEQ(reg, field, value1, value2) \
-{ uint32_t val = REG_UPDATE(reg, field, value1); \
- REG_SET(reg, val, field, value2); }
-
-/* macro to update fields in register 1 field at a time in given order */
-#define REG_UPDATE_1BY1_2(reg, f1, v1, f2, v2) \
+#define REG_UPDATE_SEQ_2(reg, f1, v1, f2, v2) \
{ uint32_t val = REG_UPDATE(reg, f1, v1); \
REG_SET(reg, val, f2, v2); }
-#define REG_UPDATE_1BY1_3(reg, f1, v1, f2, v2, f3, v3) \
+#define REG_UPDATE_SEQ_3(reg, f1, v1, f2, v2, f3, v3) \
{ uint32_t val = REG_UPDATE(reg, f1, v1); \
val = REG_SET(reg, val, f2, v2); \
REG_SET(reg, val, f3, v3); }
diff --git a/drivers/gpu/drm/amd/display/dc/inc/resource.h b/drivers/gpu/drm/amd/display/dc/inc/resource.h
index 0086a2f1d21a..3ce0a4fc5822 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/resource.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/resource.h
@@ -70,11 +70,9 @@ bool resource_construct(
struct resource_pool *pool,
const struct resource_create_funcs *create_funcs);
-struct resource_pool *dc_create_resource_pool(
- struct dc *dc,
- int num_virtual_links,
- enum dce_version dc_version,
- struct hw_asic_id asic_id);
+struct resource_pool *dc_create_resource_pool(struct dc *dc,
+ const struct dc_init_data *init_data,
+ enum dce_version dc_version);
void dc_destroy_resource_pool(struct dc *dc);
@@ -131,7 +129,8 @@ bool resource_attach_surfaces_to_context(
struct pipe_ctx *find_idle_secondary_pipe(
struct resource_context *res_ctx,
- const struct resource_pool *pool);
+ const struct resource_pool *pool,
+ const struct pipe_ctx *primary_pipe);
bool resource_is_stream_unchanged(
struct dc_state *old_context, struct dc_stream_state *stream);
@@ -172,4 +171,7 @@ void update_audio_usage(
unsigned int resource_pixel_format_to_bpp(enum surface_pixel_format format);
+struct pipe_ctx *dc_res_get_odm_bottom_pipe(struct pipe_ctx *pipe_ctx);
+bool dc_res_is_odm_head_pipe(struct pipe_ctx *pipe_ctx);
+
#endif /* DRIVERS_GPU_DRM_AMD_DC_DEV_DC_INC_RESOURCE_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c b/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c
index afe0876fe6f8..86987f5e8bd5 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c
@@ -81,6 +81,11 @@ static const struct irq_source_info_funcs vblank_irq_info_funcs = {
.ack = NULL
};
+static const struct irq_source_info_funcs vupdate_irq_info_funcs = {
+ .set = NULL,
+ .ack = NULL
+};
+
#define hpd_int_entry(reg_num)\
[DC_IRQ_SOURCE_HPD1 + reg_num] = {\
.enable_reg = mmHPD ## reg_num ## _DC_HPD_INT_CONTROL,\
@@ -137,7 +142,7 @@ static const struct irq_source_info_funcs vblank_irq_info_funcs = {
CRTC_V_UPDATE_INT_STATUS__CRTC_V_UPDATE_INT_CLEAR_MASK,\
.ack_value =\
CRTC_V_UPDATE_INT_STATUS__CRTC_V_UPDATE_INT_CLEAR_MASK,\
- .funcs = &vblank_irq_info_funcs\
+ .funcs = &vupdate_irq_info_funcs\
}
#define vblank_int_entry(reg_num)\
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.c b/drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.c
index 1ea7256ec89b..750ba0ab4106 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.c
@@ -84,6 +84,11 @@ static const struct irq_source_info_funcs vblank_irq_info_funcs = {
.ack = NULL
};
+static const struct irq_source_info_funcs vupdate_irq_info_funcs = {
+ .set = NULL,
+ .ack = NULL
+};
+
#define BASE_INNER(seg) \
DCE_BASE__INST0_SEG ## seg
@@ -140,7 +145,7 @@ static const struct irq_source_info_funcs vblank_irq_info_funcs = {
IRQ_REG_ENTRY(CRTC, reg_num,\
CRTC_INTERRUPT_CONTROL, CRTC_V_UPDATE_INT_MSK,\
CRTC_V_UPDATE_INT_STATUS, CRTC_V_UPDATE_INT_CLEAR),\
- .funcs = &vblank_irq_info_funcs\
+ .funcs = &vupdate_irq_info_funcs\
}
#define vblank_int_entry(reg_num)\
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dce80/irq_service_dce80.c b/drivers/gpu/drm/amd/display/dc/irq/dce80/irq_service_dce80.c
index 8a2066c313fe..de218fe84a43 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dce80/irq_service_dce80.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dce80/irq_service_dce80.c
@@ -84,6 +84,10 @@ static const struct irq_source_info_funcs vblank_irq_info_funcs = {
.ack = NULL
};
+static const struct irq_source_info_funcs vupdate_irq_info_funcs = {
+ .set = NULL,
+ .ack = NULL
+};
#define hpd_int_entry(reg_num)\
[DC_IRQ_SOURCE_INVALID + reg_num] = {\
@@ -142,7 +146,7 @@ static const struct irq_source_info_funcs vblank_irq_info_funcs = {
CRTC_V_UPDATE_INT_STATUS__CRTC_V_UPDATE_INT_CLEAR_MASK,\
.ack_value =\
CRTC_V_UPDATE_INT_STATUS__CRTC_V_UPDATE_INT_CLEAR_MASK,\
- .funcs = &vblank_irq_info_funcs\
+ .funcs = &vupdate_irq_info_funcs\
}
#define vblank_int_entry(reg_num)\
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c b/drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c
index e04ae49243f6..10ac6deff5ff 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c
@@ -56,6 +56,18 @@ enum dc_irq_source to_dal_irq_source_dcn10(
return DC_IRQ_SOURCE_VBLANK5;
case DCN_1_0__SRCID__DC_D6_OTG_VSTARTUP:
return DC_IRQ_SOURCE_VBLANK6;
+ case DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
+ return DC_IRQ_SOURCE_VUPDATE1;
+ case DCN_1_0__SRCID__OTG1_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
+ return DC_IRQ_SOURCE_VUPDATE2;
+ case DCN_1_0__SRCID__OTG2_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
+ return DC_IRQ_SOURCE_VUPDATE3;
+ case DCN_1_0__SRCID__OTG3_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
+ return DC_IRQ_SOURCE_VUPDATE4;
+ case DCN_1_0__SRCID__OTG4_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
+ return DC_IRQ_SOURCE_VUPDATE5;
+ case DCN_1_0__SRCID__OTG5_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
+ return DC_IRQ_SOURCE_VUPDATE6;
case DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT:
return DC_IRQ_SOURCE_PFLIP1;
case DCN_1_0__SRCID__HUBP1_FLIP_INTERRUPT:
@@ -153,6 +165,11 @@ static const struct irq_source_info_funcs vblank_irq_info_funcs = {
.ack = NULL
};
+static const struct irq_source_info_funcs vupdate_no_lock_irq_info_funcs = {
+ .set = NULL,
+ .ack = NULL
+};
+
#define BASE_INNER(seg) \
DCE_BASE__INST0_SEG ## seg
@@ -203,12 +220,15 @@ static const struct irq_source_info_funcs vblank_irq_info_funcs = {
.funcs = &pflip_irq_info_funcs\
}
-#define vupdate_int_entry(reg_num)\
+/* vupdate_no_lock_int_entry maps to DC_IRQ_SOURCE_VUPDATEx, to match semantic
+ * of DCE's DC_IRQ_SOURCE_VUPDATEx.
+ */
+#define vupdate_no_lock_int_entry(reg_num)\
[DC_IRQ_SOURCE_VUPDATE1 + reg_num] = {\
IRQ_REG_ENTRY(OTG, reg_num,\
- OTG_GLOBAL_SYNC_STATUS, VUPDATE_INT_EN,\
- OTG_GLOBAL_SYNC_STATUS, VUPDATE_EVENT_CLEAR),\
- .funcs = &vblank_irq_info_funcs\
+ OTG_GLOBAL_SYNC_STATUS, VUPDATE_NO_LOCK_INT_EN,\
+ OTG_GLOBAL_SYNC_STATUS, VUPDATE_NO_LOCK_EVENT_CLEAR),\
+ .funcs = &vupdate_no_lock_irq_info_funcs\
}
#define vblank_int_entry(reg_num)\
@@ -315,12 +335,12 @@ irq_source_info_dcn10[DAL_IRQ_SOURCES_NUMBER] = {
dc_underflow_int_entry(6),
[DC_IRQ_SOURCE_DMCU_SCP] = dummy_irq_entry(),
[DC_IRQ_SOURCE_VBIOS_SW] = dummy_irq_entry(),
- vupdate_int_entry(0),
- vupdate_int_entry(1),
- vupdate_int_entry(2),
- vupdate_int_entry(3),
- vupdate_int_entry(4),
- vupdate_int_entry(5),
+ vupdate_no_lock_int_entry(0),
+ vupdate_no_lock_int_entry(1),
+ vupdate_no_lock_int_entry(2),
+ vupdate_no_lock_int_entry(3),
+ vupdate_no_lock_int_entry(4),
+ vupdate_no_lock_int_entry(5),
vblank_int_entry(0),
vblank_int_entry(1),
vblank_int_entry(2),
diff --git a/drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.c
index 3dc1733eea20..fdcf9e66d852 100644
--- a/drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.c
@@ -29,7 +29,8 @@
static void virtual_stream_encoder_dp_set_stream_attribute(
struct stream_encoder *enc,
struct dc_crtc_timing *crtc_timing,
- enum dc_color_space output_color_space) {}
+ enum dc_color_space output_color_space,
+ uint32_t enable_sdp_splitting) {}
static void virtual_stream_encoder_hdmi_set_stream_attribute(
struct stream_encoder *enc,