summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/amd/display/dc/core/dc.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/display/dc/core/dc.c')
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c511
1 files changed, 365 insertions, 146 deletions
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index d08e60dff46d..74c21d98b4de 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -24,6 +24,8 @@
#include "dm_services.h"
+#include "amdgpu.h"
+
#include "dc.h"
#include "core_status.h"
@@ -75,6 +77,8 @@
#include "hw_sequencer_private.h"
+#include "dml2/dml2_internal_types.h"
+
#include "dce/dmub_outbox.h"
#define CTX \
@@ -828,6 +832,7 @@ static void dc_destruct(struct dc *dc)
if (dc->ctx->created_bios)
dal_bios_parser_destroy(&dc->ctx->dc_bios);
+ kfree(dc->ctx->logger);
dc_perf_trace_destroy(&dc->ctx->perf_trace);
kfree(dc->ctx);
@@ -868,8 +873,18 @@ static bool dc_construct_ctx(struct dc *dc,
dc_ctx->dce_environment = init_params->dce_environment;
dc_ctx->dcn_reg_offsets = init_params->dcn_reg_offsets;
dc_ctx->nbio_reg_offsets = init_params->nbio_reg_offsets;
+ dc_ctx->clk_reg_offsets = init_params->clk_reg_offsets;
/* Create logger */
+ dc_ctx->logger = kmalloc(sizeof(*dc_ctx->logger), GFP_KERNEL);
+
+ if (!dc_ctx->logger) {
+ kfree(dc_ctx);
+ return false;
+ }
+
+ dc_ctx->logger->dev = adev_to_drm(init_params->driver);
+ dc->dml.logger = dc_ctx->logger;
dc_ctx->dce_version = resource_parse_asic_id(init_params->asic_id);
@@ -990,7 +1005,8 @@ static bool dc_construct(struct dc *dc,
/* set i2c speed if not done by the respective dcnxxx__resource.c */
if (dc->caps.i2c_speed_in_khz_hdcp == 0)
dc->caps.i2c_speed_in_khz_hdcp = dc->caps.i2c_speed_in_khz;
-
+ if (dc->caps.max_optimizable_video_width == 0)
+ dc->caps.max_optimizable_video_width = 5120;
dc->clk_mgr = dc_clk_mgr_create(dc->ctx, dc->res_pool->pp_smu, dc->res_pool->dccg);
if (!dc->clk_mgr)
goto fail;
@@ -1069,53 +1085,6 @@ static void apply_ctx_interdependent_lock(struct dc *dc,
}
}
-static void phantom_pipe_blank(
- struct dc *dc,
- struct timing_generator *tg,
- int width,
- int height)
-{
- struct dce_hwseq *hws = dc->hwseq;
- enum dc_color_space color_space;
- struct tg_color black_color = {0};
- struct output_pixel_processor *opp = NULL;
- uint32_t num_opps, opp_id_src0, opp_id_src1;
- uint32_t otg_active_width, otg_active_height;
- uint32_t i;
-
- /* program opp dpg blank color */
- color_space = COLOR_SPACE_SRGB;
- color_space_to_black_color(dc, color_space, &black_color);
-
- otg_active_width = width;
- otg_active_height = height;
-
- /* get the OPTC source */
- tg->funcs->get_optc_source(tg, &num_opps, &opp_id_src0, &opp_id_src1);
- ASSERT(opp_id_src0 < dc->res_pool->res_cap->num_opp);
-
- for (i = 0; i < dc->res_pool->res_cap->num_opp; i++) {
- if (dc->res_pool->opps[i] != NULL && dc->res_pool->opps[i]->inst == opp_id_src0) {
- opp = dc->res_pool->opps[i];
- break;
- }
- }
-
- if (opp && opp->funcs->opp_set_disp_pattern_generator)
- opp->funcs->opp_set_disp_pattern_generator(
- opp,
- CONTROLLER_DP_TEST_PATTERN_SOLID_COLOR,
- CONTROLLER_DP_COLOR_SPACE_UDEFINED,
- COLOR_DEPTH_UNDEFINED,
- &black_color,
- otg_active_width,
- otg_active_height,
- 0);
-
- if (tg->funcs->is_tg_enabled(tg))
- hws->funcs.wait_for_blank_complete(opp);
-}
-
static void dc_update_viusal_confirm_color(struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe_ctx)
{
if (dc->ctx->dce_version >= DCN_VERSION_1_0) {
@@ -1206,7 +1175,8 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
main_pipe_width = old_stream->mall_stream_config.paired_stream->dst.width;
main_pipe_height = old_stream->mall_stream_config.paired_stream->dst.height;
- phantom_pipe_blank(dc, tg, main_pipe_width, main_pipe_height);
+ if (dc->hwss.blank_phantom)
+ dc->hwss.blank_phantom(dc, tg, main_pipe_width, main_pipe_height);
tg->funcs->enable_crtc(tg);
}
}
@@ -1370,6 +1340,7 @@ struct dc *dc_create(const struct dc_init_data *init_params)
dc->dcn_reg_offsets = init_params->dcn_reg_offsets;
dc->nbio_reg_offsets = init_params->nbio_reg_offsets;
+ dc->clk_reg_offsets = init_params->clk_reg_offsets;
/* Populate versioning information */
dc->versions.dc_ver = DC_VER;
@@ -1888,7 +1859,7 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
if (dc->hwss.subvp_pipe_control_lock)
dc->hwss.subvp_pipe_control_lock(dc, context, true, true, NULL, subvp_prev_use);
- if (dc->debug.enable_double_buffered_dsc_pg_support)
+ if (dc->hwss.update_dsc_pg)
dc->hwss.update_dsc_pg(dc, context, false);
disable_dangling_plane(dc, context);
@@ -1995,7 +1966,7 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
dc->hwss.optimize_bandwidth(dc, context);
}
- if (dc->debug.enable_double_buffered_dsc_pg_support)
+ if (dc->hwss.update_dsc_pg)
dc->hwss.update_dsc_pg(dc, context, true);
if (dc->ctx->dce_version >= DCE_VERSION_MAX)
@@ -2242,7 +2213,7 @@ void dc_post_update_surfaces_to_stream(struct dc *dc)
dc->hwss.optimize_bandwidth(dc, context);
- if (dc->debug.enable_double_buffered_dsc_pg_support)
+ if (dc->hwss.update_dsc_pg)
dc->hwss.update_dsc_pg(dc, context, true);
}
@@ -2269,6 +2240,11 @@ struct dc_state *dc_create_state(struct dc *dc)
init_state(dc, context);
+#ifdef CONFIG_DRM_AMD_DC_FP
+ if (dc->debug.using_dml2) {
+ dml2_create(dc, &dc->dml2_options, &context->bw_ctx.dml2);
+ }
+#endif
kref_init(&context->refcount);
return context;
@@ -2278,11 +2254,25 @@ struct dc_state *dc_copy_state(struct dc_state *src_ctx)
{
int i, j;
struct dc_state *new_ctx = kvmalloc(sizeof(struct dc_state), GFP_KERNEL);
+#ifdef CONFIG_DRM_AMD_DC_FP
+ struct dml2_context *dml2 = NULL;
+#endif
if (!new_ctx)
return NULL;
memcpy(new_ctx, src_ctx, sizeof(struct dc_state));
+#ifdef CONFIG_DRM_AMD_DC_FP
+ if (new_ctx->bw_ctx.dml2) {
+ dml2 = kzalloc(sizeof(struct dml2_context), GFP_KERNEL);
+ if (!dml2)
+ return NULL;
+
+ memcpy(dml2, src_ctx->bw_ctx.dml2, sizeof(struct dml2_context));
+ new_ctx->bw_ctx.dml2 = dml2;
+ }
+#endif
+
for (i = 0; i < MAX_PIPES; i++) {
struct pipe_ctx *cur_pipe = &new_ctx->res_ctx.pipe_ctx[i];
@@ -2321,6 +2311,12 @@ static void dc_state_free(struct kref *kref)
{
struct dc_state *context = container_of(kref, struct dc_state, refcount);
dc_resource_state_destruct(context);
+
+#ifdef CONFIG_DRM_AMD_DC_FP
+ dml2_destroy(context->bw_ctx.dml2);
+ context->bw_ctx.dml2 = 0;
+#endif
+
kvfree(context);
}
@@ -2488,6 +2484,7 @@ static enum surface_update_type get_plane_info_update_type(const struct dc_surfa
}
static enum surface_update_type get_scaling_info_update_type(
+ const struct dc *dc,
const struct dc_surface_update *u)
{
union surface_update_flags *update_flags = &u->surface->update_flags;
@@ -2520,6 +2517,12 @@ static enum surface_update_type get_scaling_info_update_type(
update_flags->bits.clock_change = 1;
}
+ if (u->scaling_info->src_rect.width > dc->caps.max_optimizable_video_width &&
+ (u->scaling_info->clip_rect.width > u->surface->clip_rect.width ||
+ u->scaling_info->clip_rect.height > u->surface->clip_rect.height))
+ /* Changing clip size of a large surface may result in MPC slice count change */
+ update_flags->bits.bandwidth_change = 1;
+
if (u->scaling_info->src_rect.x != u->surface->src_rect.x
|| u->scaling_info->src_rect.y != u->surface->src_rect.y
|| u->scaling_info->clip_rect.x != u->surface->clip_rect.x
@@ -2557,7 +2560,7 @@ static enum surface_update_type det_surface_update(const struct dc *dc,
type = get_plane_info_update_type(u);
elevate_update_type(&overall_type, type);
- type = get_scaling_info_update_type(u);
+ type = get_scaling_info_update_type(dc, u);
elevate_update_type(&overall_type, type);
if (u->flip_addr) {
@@ -3012,6 +3015,34 @@ static void copy_stream_update_to_stream(struct dc *dc,
}
}
+static void backup_plane_states_for_stream(
+ struct dc_plane_state plane_states[MAX_SURFACE_NUM],
+ struct dc_stream_state *stream)
+{
+ int i;
+ struct dc_stream_status *status = dc_stream_get_status(stream);
+
+ if (!status)
+ return;
+
+ for (i = 0; i < status->plane_count; i++)
+ plane_states[i] = *status->plane_states[i];
+}
+
+static void restore_plane_states_for_stream(
+ struct dc_plane_state plane_states[MAX_SURFACE_NUM],
+ struct dc_stream_state *stream)
+{
+ int i;
+ struct dc_stream_status *status = dc_stream_get_status(stream);
+
+ if (!status)
+ return;
+
+ for (i = 0; i < status->plane_count; i++)
+ *status->plane_states[i] = plane_states[i];
+}
+
static bool update_planes_and_stream_state(struct dc *dc,
struct dc_surface_update *srf_updates, int surface_count,
struct dc_stream_state *stream,
@@ -3035,7 +3066,7 @@ static bool update_planes_and_stream_state(struct dc *dc,
}
context = dc->current_state;
-
+ backup_plane_states_for_stream(dc->current_state->scratch.plane_states, stream);
update_type = dc_check_update_surfaces_for_stream(
dc, srf_updates, surface_count, stream_update, stream_status);
@@ -3065,6 +3096,9 @@ static bool update_planes_and_stream_state(struct dc *dc,
if (update_type >= update_surface_trace_level)
update_surface_trace(dc, srf_updates, surface_count);
+ for (i = 0; i < surface_count; i++)
+ copy_surface_update_to_plane(srf_updates[i].surface, &srf_updates[i]);
+
if (update_type >= UPDATE_TYPE_FULL) {
struct dc_plane_state *new_planes[MAX_SURFACES] = {0};
@@ -3106,8 +3140,6 @@ static bool update_planes_and_stream_state(struct dc *dc,
for (i = 0; i < surface_count; i++) {
struct dc_plane_state *surface = srf_updates[i].surface;
- copy_surface_update_to_plane(surface, &srf_updates[i]);
-
if (update_type >= UPDATE_TYPE_MED) {
for (j = 0; j < dc->res_pool->pipe_count; j++) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
@@ -3138,10 +3170,19 @@ static bool update_planes_and_stream_state(struct dc *dc,
BREAK_TO_DEBUGGER();
goto fail;
}
+
+ for (i = 0; i < context->stream_count; i++) {
+ struct pipe_ctx *otg_master = resource_get_otg_master_for_stream(&context->res_ctx,
+ context->streams[i]);
+
+ if (otg_master->stream->test_pattern.type != DP_TEST_PATTERN_VIDEO_MODE)
+ resource_build_test_pattern_params(&context->res_ctx, otg_master);
+ }
}
*new_context = context;
*new_update_type = update_type;
+ backup_plane_states_for_stream(context->scratch.plane_states, stream);
return true;
@@ -3513,7 +3554,7 @@ static void wait_for_outstanding_hw_updates(struct dc *dc, const struct dc_state
*/
int pipe_idx;
int opp_inst;
- int opp_count = dc->res_pool->pipe_count;
+ int opp_count = dc->res_pool->res_cap->num_opp;
struct hubp *hubp;
int mpcc_inst;
const struct pipe_ctx *pipe_ctx;
@@ -3534,7 +3575,8 @@ static void wait_for_outstanding_hw_updates(struct dc *dc, const struct dc_state
mpcc_inst = hubp->inst;
// MPCC inst is equal to pipe index in practice
for (opp_inst = 0; opp_inst < opp_count; opp_inst++) {
- if (dc->res_pool->opps[opp_inst]->mpcc_disconnect_pending[mpcc_inst]) {
+ if ((dc->res_pool->opps[opp_inst] != NULL) &&
+ (dc->res_pool->opps[opp_inst]->mpcc_disconnect_pending[mpcc_inst])) {
dc->res_pool->mpc->funcs->wait_for_idle(dc->res_pool->mpc, mpcc_inst);
dc->res_pool->opps[opp_inst]->mpcc_disconnect_pending[mpcc_inst] = false;
break;
@@ -3556,6 +3598,7 @@ static void commit_planes_for_stream(struct dc *dc,
bool should_lock_all_pipes = (update_type != UPDATE_TYPE_FAST);
bool subvp_prev_use = false;
bool subvp_curr_use = false;
+ uint8_t current_stream_mask = 0;
// Once we apply the new subvp context to hardware it won't be in the
// dc->current_state anymore, so we have to cache it before we apply
@@ -3571,7 +3614,7 @@ static void commit_planes_for_stream(struct dc *dc,
if (get_seamless_boot_stream_count(context) == 0)
dc->hwss.prepare_bandwidth(dc, context);
- if (dc->debug.enable_double_buffered_dsc_pg_support)
+ if (dc->hwss.update_dsc_pg)
dc->hwss.update_dsc_pg(dc, context, false);
context_clock_trace(dc, context);
@@ -3905,6 +3948,12 @@ static void commit_planes_for_stream(struct dc *dc,
if (pipe_ctx->stream_res.tg->funcs->program_manual_trigger)
pipe_ctx->stream_res.tg->funcs->program_manual_trigger(pipe_ctx->stream_res.tg);
}
+
+ current_stream_mask = get_stream_mask(dc, context);
+ if (current_stream_mask != context->stream_mask) {
+ context->stream_mask = current_stream_mask;
+ dc_dmub_srv_notify_stream_mask(dc->ctx->dmub_srv, current_stream_mask);
+ }
}
/**
@@ -3912,6 +3961,7 @@ static void commit_planes_for_stream(struct dc *dc,
*
* @dc: Used to get the current state status
* @stream: Target stream, which we want to remove the attached planes
+ * @srf_updates: Array of surface updates
* @surface_count: Number of surface update
* @is_plane_addition: [in] Fill out with true if it is a plane addition case
*
@@ -3928,6 +3978,7 @@ static void commit_planes_for_stream(struct dc *dc,
*/
static bool could_mpcc_tree_change_for_active_pipes(struct dc *dc,
struct dc_stream_state *stream,
+ struct dc_surface_update *srf_updates,
int surface_count,
bool *is_plane_addition)
{
@@ -3998,6 +4049,107 @@ static bool could_mpcc_tree_change_for_active_pipes(struct dc *dc,
return force_minimal_pipe_splitting;
}
+struct pipe_split_policy_backup {
+ bool dynamic_odm_policy;
+ bool subvp_policy;
+ enum pipe_split_policy mpc_policy;
+};
+
+static void release_minimal_transition_state(struct dc *dc,
+ struct dc_state *context, struct pipe_split_policy_backup *policy)
+{
+ dc_release_state(context);
+ /* restore previous pipe split and odm policy */
+ if (!dc->config.is_vmin_only_asic)
+ dc->debug.pipe_split_policy = policy->mpc_policy;
+ dc->debug.enable_single_display_2to1_odm_policy = policy->dynamic_odm_policy;
+ dc->debug.force_disable_subvp = policy->subvp_policy;
+}
+
+static struct dc_state *create_minimal_transition_state(struct dc *dc,
+ struct dc_state *base_context, struct pipe_split_policy_backup *policy)
+{
+ struct dc_state *minimal_transition_context = dc_create_state(dc);
+ unsigned int i, j;
+
+ if (!dc->config.is_vmin_only_asic) {
+ policy->mpc_policy = dc->debug.pipe_split_policy;
+ dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
+ }
+ policy->dynamic_odm_policy = dc->debug.enable_single_display_2to1_odm_policy;
+ dc->debug.enable_single_display_2to1_odm_policy = false;
+ policy->subvp_policy = dc->debug.force_disable_subvp;
+ dc->debug.force_disable_subvp = true;
+
+ dc_resource_state_copy_construct(base_context, minimal_transition_context);
+
+ /* commit minimal state */
+ if (dc->res_pool->funcs->validate_bandwidth(dc, minimal_transition_context, false)) {
+ for (i = 0; i < minimal_transition_context->stream_count; i++) {
+ struct dc_stream_status *stream_status = &minimal_transition_context->stream_status[i];
+
+ for (j = 0; j < stream_status->plane_count; j++) {
+ struct dc_plane_state *plane_state = stream_status->plane_states[j];
+
+ /* force vsync flip when reconfiguring pipes to prevent underflow
+ * and corruption
+ */
+ plane_state->flip_immediate = false;
+ }
+ }
+ } else {
+ /* this should never happen */
+ release_minimal_transition_state(dc, minimal_transition_context, policy);
+ BREAK_TO_DEBUGGER();
+ minimal_transition_context = NULL;
+ }
+ return minimal_transition_context;
+}
+
+static bool commit_minimal_transition_state_for_windowed_mpo_odm(struct dc *dc,
+ struct dc_state *context,
+ struct dc_stream_state *stream)
+{
+ bool success = false;
+ struct dc_state *minimal_transition_context;
+ struct pipe_split_policy_backup policy;
+
+ /* commit based on new context */
+ minimal_transition_context = create_minimal_transition_state(dc,
+ context, &policy);
+ if (minimal_transition_context) {
+ if (dc->hwss.is_pipe_topology_transition_seamless(
+ dc, dc->current_state, minimal_transition_context) &&
+ dc->hwss.is_pipe_topology_transition_seamless(
+ dc, minimal_transition_context, context)) {
+ DC_LOG_DC("%s base = new state\n", __func__);
+ success = dc_commit_state_no_check(dc, minimal_transition_context) == DC_OK;
+ }
+ release_minimal_transition_state(dc, minimal_transition_context, &policy);
+ }
+
+ if (!success) {
+ /* commit based on current context */
+ restore_plane_states_for_stream(dc->current_state->scratch.plane_states, stream);
+ minimal_transition_context = create_minimal_transition_state(dc,
+ dc->current_state, &policy);
+ if (minimal_transition_context) {
+ if (dc->hwss.is_pipe_topology_transition_seamless(
+ dc, dc->current_state, minimal_transition_context) &&
+ dc->hwss.is_pipe_topology_transition_seamless(
+ dc, minimal_transition_context, context)) {
+ DC_LOG_DC("%s base = current state\n", __func__);
+ success = dc_commit_state_no_check(dc, minimal_transition_context) == DC_OK;
+ }
+ release_minimal_transition_state(dc, minimal_transition_context, &policy);
+ }
+ restore_plane_states_for_stream(context->scratch.plane_states, stream);
+ }
+
+ ASSERT(success);
+ return success;
+}
+
/**
* commit_minimal_transition_state - Create a transition pipe split state
*
@@ -4019,23 +4171,14 @@ static bool could_mpcc_tree_change_for_active_pipes(struct dc *dc,
static bool commit_minimal_transition_state(struct dc *dc,
struct dc_state *transition_base_context)
{
- struct dc_state *transition_context = dc_create_state(dc);
- enum pipe_split_policy tmp_mpc_policy = 0;
- bool temp_dynamic_odm_policy = 0;
- bool temp_subvp_policy = 0;
+ struct dc_state *transition_context;
+ struct pipe_split_policy_backup policy;
enum dc_status ret = DC_ERROR_UNEXPECTED;
unsigned int i, j;
unsigned int pipe_in_use = 0;
bool subvp_in_use = false;
bool odm_in_use = false;
- if (!transition_context)
- return false;
- /* Setup:
- * Store the current ODM and MPC config in some temp variables to be
- * restored after we commit the transition state.
- */
-
/* check current pipes in use*/
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &transition_base_context->res_ctx.pipe_ctx[i];
@@ -4060,10 +4203,10 @@ static bool commit_minimal_transition_state(struct dc *dc,
* pipe, we must use the minimal transition.
*/
for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+ struct pipe_ctx *pipe = &transition_base_context->res_ctx.pipe_ctx[i];
- if (pipe->stream && pipe->next_odm_pipe) {
- odm_in_use = true;
+ if (resource_is_pipe_type(pipe, OTG_MASTER)) {
+ odm_in_use = resource_get_odm_slice_count(pipe) > 1;
break;
}
}
@@ -4076,54 +4219,23 @@ static bool commit_minimal_transition_state(struct dc *dc,
* Reduce the scenarios to use dc_commit_state_no_check in the stage of flip. Especially
* enter/exit MPO when DCN still have enough resources.
*/
- if (pipe_in_use != dc->res_pool->pipe_count && !subvp_in_use && !odm_in_use) {
- dc_release_state(transition_context);
+ if (pipe_in_use != dc->res_pool->pipe_count && !subvp_in_use && !odm_in_use)
return true;
- }
-
- if (!dc->config.is_vmin_only_asic) {
- tmp_mpc_policy = dc->debug.pipe_split_policy;
- dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
- }
-
- temp_dynamic_odm_policy = dc->debug.enable_single_display_2to1_odm_policy;
- dc->debug.enable_single_display_2to1_odm_policy = false;
- temp_subvp_policy = dc->debug.force_disable_subvp;
- dc->debug.force_disable_subvp = true;
-
- dc_resource_state_copy_construct(transition_base_context, transition_context);
-
- /* commit minimal state */
- if (dc->res_pool->funcs->validate_bandwidth(dc, transition_context, false)) {
- for (i = 0; i < transition_context->stream_count; i++) {
- struct dc_stream_status *stream_status = &transition_context->stream_status[i];
-
- for (j = 0; j < stream_status->plane_count; j++) {
- struct dc_plane_state *plane_state = stream_status->plane_states[j];
-
- /* force vsync flip when reconfiguring pipes to prevent underflow
- * and corruption
- */
- plane_state->flip_immediate = false;
- }
- }
+ DC_LOG_DC("%s base = %s state, reason = %s\n", __func__,
+ dc->current_state == transition_base_context ? "current" : "new",
+ subvp_in_use ? "Subvp In Use" :
+ odm_in_use ? "ODM in Use" :
+ dc->debug.pipe_split_policy != MPC_SPLIT_AVOID ? "MPC in Use" :
+ "Unknown");
+ transition_context = create_minimal_transition_state(dc,
+ transition_base_context, &policy);
+ if (transition_context) {
ret = dc_commit_state_no_check(dc, transition_context);
+ release_minimal_transition_state(dc, transition_context, &policy);
}
- /* always release as dc_commit_state_no_check retains in good case */
- dc_release_state(transition_context);
-
- /* TearDown:
- * Restore original configuration for ODM and MPO.
- */
- if (!dc->config.is_vmin_only_asic)
- dc->debug.pipe_split_policy = tmp_mpc_policy;
-
- dc->debug.enable_single_display_2to1_odm_policy = temp_dynamic_odm_policy;
- dc->debug.force_disable_subvp = temp_subvp_policy;
-
if (ret != DC_OK) {
/* this should never happen */
BREAK_TO_DEBUGGER();
@@ -4295,6 +4407,51 @@ static bool fast_update_only(struct dc *dc,
&& !full_update_required(dc, srf_updates, surface_count, stream_update, stream);
}
+static bool should_commit_minimal_transition_for_windowed_mpo_odm(struct dc *dc,
+ struct dc_stream_state *stream,
+ struct dc_state *context)
+{
+ struct pipe_ctx *cur_pipe, *new_pipe;
+ bool cur_is_odm_in_use, new_is_odm_in_use;
+ struct dc_stream_status *cur_stream_status = stream_get_status(dc->current_state, stream);
+ struct dc_stream_status *new_stream_status = stream_get_status(context, stream);
+
+ if (!dc->debug.enable_single_display_2to1_odm_policy ||
+ !dc->config.enable_windowed_mpo_odm)
+ /* skip the check if windowed MPO ODM or dynamic ODM is turned
+ * off.
+ */
+ return false;
+
+ if (context == dc->current_state)
+ /* skip the check for fast update */
+ return false;
+
+ if (new_stream_status->plane_count != cur_stream_status->plane_count)
+ /* plane count changed, not a plane scaling update so not the
+ * case we are looking for
+ */
+ return false;
+
+ cur_pipe = resource_get_otg_master_for_stream(&dc->current_state->res_ctx, stream);
+ new_pipe = resource_get_otg_master_for_stream(&context->res_ctx, stream);
+ cur_is_odm_in_use = resource_get_odm_slice_count(cur_pipe) > 1;
+ new_is_odm_in_use = resource_get_odm_slice_count(new_pipe) > 1;
+ if (cur_is_odm_in_use == new_is_odm_in_use)
+ /* ODM state isn't changed, not the case we are looking for */
+ return false;
+
+ if (dc->hwss.is_pipe_topology_transition_seamless &&
+ dc->hwss.is_pipe_topology_transition_seamless(
+ dc, dc->current_state, context))
+ /* transition can be achieved without the need for committing
+ * minimal transition state first
+ */
+ return false;
+
+ return true;
+}
+
bool dc_update_planes_and_stream(struct dc *dc,
struct dc_surface_update *srf_updates, int surface_count,
struct dc_stream_state *stream,
@@ -4312,11 +4469,15 @@ bool dc_update_planes_and_stream(struct dc *dc,
*/
bool force_minimal_pipe_splitting = 0;
bool is_plane_addition = 0;
+ bool is_fast_update_only;
populate_fast_updates(fast_update, srf_updates, surface_count, stream_update);
+ is_fast_update_only = fast_update_only(dc, fast_update, srf_updates,
+ surface_count, stream_update, stream);
force_minimal_pipe_splitting = could_mpcc_tree_change_for_active_pipes(
dc,
stream,
+ srf_updates,
surface_count,
&is_plane_addition);
@@ -4363,9 +4524,21 @@ bool dc_update_planes_and_stream(struct dc *dc,
update_type = UPDATE_TYPE_FULL;
}
+ /* when windowed MPO ODM is supported, we need to handle a special case
+ * where we can transition between ODM combine and MPC combine due to
+ * plane scaling update. This transition will require us to commit
+ * minimal transition state. The condition to trigger this update can't
+ * be predicted by could_mpcc_tree_change_for_active_pipes because we
+ * can only determine it after DML validation. Therefore we can't rely
+ * on the existing commit minimal transition state sequence. Instead
+ * we have to add additional handling here to handle this transition
+ * with its own special sequence.
+ */
+ if (should_commit_minimal_transition_for_windowed_mpo_odm(dc, stream, context))
+ commit_minimal_transition_state_for_windowed_mpo_odm(dc,
+ context, stream);
update_seamless_boot_flags(dc, context, surface_count, stream);
- if (fast_update_only(dc, fast_update, srf_updates, surface_count, stream_update, stream) &&
- !dc->debug.enable_legacy_fast_update) {
+ if (is_fast_update_only && !dc->debug.enable_legacy_fast_update) {
commit_planes_for_stream_fast(dc,
srf_updates,
surface_count,
@@ -4374,6 +4547,13 @@ bool dc_update_planes_and_stream(struct dc *dc,
update_type,
context);
} else {
+ if (!stream_update &&
+ dc->hwss.is_pipe_topology_transition_seamless &&
+ !dc->hwss.is_pipe_topology_transition_seamless(
+ dc, dc->current_state, context)) {
+ DC_LOG_ERROR("performing non-seamless pipe topology transition with surface only update!\n");
+ BREAK_TO_DEBUGGER();
+ }
commit_planes_for_stream(
dc,
srf_updates,
@@ -4603,9 +4783,6 @@ void dc_set_power_state(
struct dc *dc,
enum dc_acpi_cm_power_state power_state)
{
- struct kref refcount;
- struct display_mode_lib *dml;
-
if (!dc->current_state)
return;
@@ -4625,30 +4802,8 @@ void dc_set_power_state(
break;
default:
ASSERT(dc->current_state->stream_count == 0);
- /* Zero out the current context so that on resume we start with
- * clean state, and dc hw programming optimizations will not
- * cause any trouble.
- */
- dml = kzalloc(sizeof(struct display_mode_lib),
- GFP_KERNEL);
-
- ASSERT(dml);
- if (!dml)
- return;
-
- /* Preserve refcount */
- refcount = dc->current_state->refcount;
- /* Preserve display mode lib */
- memcpy(dml, &dc->current_state->bw_ctx.dml, sizeof(struct display_mode_lib));
dc_resource_state_destruct(dc->current_state);
- memset(dc->current_state, 0,
- sizeof(*dc->current_state));
-
- dc->current_state->refcount = refcount;
- dc->current_state->bw_ctx.dml = *dml;
-
- kfree(dml);
break;
}
@@ -4730,6 +4885,9 @@ void dc_allow_idle_optimizations(struct dc *dc, bool allow)
if (dc->debug.disable_idle_power_optimizations)
return;
+ if (dc->caps.ips_support && dc->config.disable_ips)
+ return;
+
if (dc->clk_mgr != NULL && dc->clk_mgr->funcs->is_smu_present)
if (!dc->clk_mgr->funcs->is_smu_present(dc->clk_mgr))
return;
@@ -4741,6 +4899,26 @@ void dc_allow_idle_optimizations(struct dc *dc, bool allow)
dc->idle_optimizations_allowed = allow;
}
+bool dc_dmub_is_ips_idle_state(struct dc *dc)
+{
+ uint32_t idle_state = 0;
+
+ if (dc->debug.disable_idle_power_optimizations)
+ return false;
+
+ if (!dc->caps.ips_support || dc->config.disable_ips)
+ return false;
+
+ if (dc->hwss.get_idle_state)
+ idle_state = dc->hwss.get_idle_state(dc);
+
+ if ((idle_state & DMUB_IPS1_ALLOW_MASK) ||
+ (idle_state & DMUB_IPS2_ALLOW_MASK))
+ return true;
+
+ return false;
+}
+
/* set min and max memory clock to lowest and highest DPM level, respectively */
void dc_unlock_memory_clock_frequency(struct dc *dc)
{
@@ -5273,14 +5451,55 @@ bool dc_abm_save_restore(
void dc_query_current_properties(struct dc *dc, struct dc_current_properties *properties)
{
unsigned int i;
- bool subvp_in_use = false;
+ bool subvp_sw_cursor_req = false;
for (i = 0; i < dc->current_state->stream_count; i++) {
- if (dc->current_state->streams[i]->mall_stream_config.type != SUBVP_NONE) {
- subvp_in_use = true;
+ if (check_subvp_sw_cursor_fallback_req(dc, dc->current_state->streams[i])) {
+ subvp_sw_cursor_req = true;
break;
}
}
- properties->cursor_size_limit = subvp_in_use ? 64 : dc->caps.max_cursor_size;
+ properties->cursor_size_limit = subvp_sw_cursor_req ? 64 : dc->caps.max_cursor_size;
+}
+
+/**
+ * dc_set_edp_power() - DM controls eDP power to be ON/OFF
+ *
+ * Called when DM wants to power on/off eDP.
+ * Only work on links with flag skip_implict_edp_power_control is set.
+ *
+ * @dc: Current DC state
+ * @edp_link: a link with eDP connector signal type
+ * @powerOn: power on/off eDP
+ *
+ * Return: void
+ */
+void dc_set_edp_power(const struct dc *dc, struct dc_link *edp_link,
+ bool powerOn)
+{
+ if (edp_link->connector_signal != SIGNAL_TYPE_EDP)
+ return;
+
+ if (edp_link->skip_implict_edp_power_control == false)
+ return;
+
+ edp_link->dc->link_srv->edp_set_panel_power(edp_link, powerOn);
+}
+
+/*
+ *****************************************************************************
+ * dc_get_power_profile_for_dc_state() - extracts power profile from dc state
+ *
+ * Called when DM wants to make power policy decisions based on dc_state
+ *
+ *****************************************************************************
+ */
+struct dc_power_profile dc_get_power_profile_for_dc_state(const struct dc_state *context)
+{
+ struct dc_power_profile profile = { 0 };
+
+ profile.power_level += !context->bw_ctx.bw.dcn.clk.p_state_change_support;
+
+ return profile;
}