summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c')
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c478
1 files changed, 319 insertions, 159 deletions
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 50c783e19f5a..4300ce98ce8d 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -210,7 +210,7 @@ static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
struct amdgpu_dm_connector *amdgpu_dm_connector,
- uint32_t link_index,
+ u32 link_index,
struct amdgpu_encoder *amdgpu_encoder);
static int amdgpu_dm_encoder_init(struct drm_device *dev,
struct amdgpu_encoder *aencoder,
@@ -262,7 +262,7 @@ static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
u32 *vbl, u32 *position)
{
- uint32_t v_blank_start, v_blank_end, h_position, v_position;
+ u32 v_blank_start, v_blank_end, h_position, v_position;
if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
return -EINVAL;
@@ -361,7 +361,7 @@ static void dm_pflip_high_irq(void *interrupt_params)
struct amdgpu_device *adev = irq_params->adev;
unsigned long flags;
struct drm_pending_vblank_event *e;
- uint32_t vpos, hpos, v_blank_start, v_blank_end;
+ u32 vpos, hpos, v_blank_start, v_blank_end;
bool vrr_active;
amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
@@ -648,7 +648,7 @@ static void dmub_hpd_callback(struct amdgpu_device *adev,
struct drm_connector *connector;
struct drm_connector_list_iter iter;
struct dc_link *link;
- uint8_t link_index = 0;
+ u8 link_index = 0;
struct drm_device *dev;
if (adev == NULL)
@@ -749,7 +749,7 @@ static void dm_dmub_outbox1_low_irq(void *interrupt_params)
struct amdgpu_device *adev = irq_params->adev;
struct amdgpu_display_manager *dm = &adev->dm;
struct dmcub_trace_buf_entry entry = { 0 };
- uint32_t count = 0;
+ u32 count = 0;
struct dmub_hpd_work *dmub_hpd_wrk;
struct dc_link *plink = NULL;
@@ -1015,7 +1015,7 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev)
struct dmub_srv_hw_params hw_params;
enum dmub_status status;
const unsigned char *fw_inst_const, *fw_bss_data;
- uint32_t i, fw_inst_const_size, fw_bss_data_size;
+ u32 i, fw_inst_const_size, fw_bss_data_size;
bool has_hw_support;
if (!dmub_srv)
@@ -1176,10 +1176,10 @@ static void dm_dmub_hw_resume(struct amdgpu_device *adev)
static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
{
- uint64_t pt_base;
- uint32_t logical_addr_low;
- uint32_t logical_addr_high;
- uint32_t agp_base, agp_bot, agp_top;
+ u64 pt_base;
+ u32 logical_addr_low;
+ u32 logical_addr_high;
+ u32 agp_base, agp_bot, agp_top;
PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
memset(pa_config, 0, sizeof(*pa_config));
@@ -1642,7 +1642,10 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
}
#endif
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
- adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work();
+ adev->dm.secure_display_ctxs = amdgpu_dm_crtc_secure_display_create_contexts(adev);
+ if (!adev->dm.secure_display_ctxs) {
+ DRM_ERROR("amdgpu: failed to initialize secure_display_ctxs.\n");
+ }
#endif
if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
init_completion(&adev->dm.dmub_aux_transfer_done);
@@ -1737,10 +1740,15 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev)
amdgpu_dm_destroy_drm_device(&adev->dm);
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
- if (adev->dm.crc_rd_wrk) {
- flush_work(&adev->dm.crc_rd_wrk->notify_ta_work);
- kfree(adev->dm.crc_rd_wrk);
- adev->dm.crc_rd_wrk = NULL;
+ if (adev->dm.secure_display_ctxs) {
+ for (i = 0; i < adev->dm.dc->caps.max_links; i++) {
+ if (adev->dm.secure_display_ctxs[i].crtc) {
+ flush_work(&adev->dm.secure_display_ctxs[i].notify_ta_work);
+ flush_work(&adev->dm.secure_display_ctxs[i].forward_roi_work);
+ }
+ }
+ kfree(adev->dm.secure_display_ctxs);
+ adev->dm.secure_display_ctxs = NULL;
}
#endif
#ifdef CONFIG_DRM_AMD_DC_HDCP
@@ -1875,25 +1883,17 @@ static int load_dmcu_fw(struct amdgpu_device *adev)
return 0;
}
- r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
- if (r == -ENOENT) {
+ r = amdgpu_ucode_request(adev, &adev->dm.fw_dmcu, fw_name_dmcu);
+ if (r == -ENODEV) {
/* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
adev->dm.fw_dmcu = NULL;
return 0;
}
if (r) {
- dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
- fw_name_dmcu);
- return r;
- }
-
- r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
- if (r) {
dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
fw_name_dmcu);
- release_firmware(adev->dm.fw_dmcu);
- adev->dm.fw_dmcu = NULL;
+ amdgpu_ucode_release(&adev->dm.fw_dmcu);
return r;
}
@@ -1939,7 +1939,6 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
struct dmub_srv_fb_info *fb_info;
struct dmub_srv *dmub_srv;
const struct dmcub_firmware_header_v1_0 *hdr;
- const char *fw_name_dmub;
enum dmub_asic dmub_asic;
enum dmub_status status;
int r;
@@ -1947,73 +1946,46 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
switch (adev->ip_versions[DCE_HWIP][0]) {
case IP_VERSION(2, 1, 0):
dmub_asic = DMUB_ASIC_DCN21;
- fw_name_dmub = FIRMWARE_RENOIR_DMUB;
- if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
- fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
break;
case IP_VERSION(3, 0, 0):
- if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0)) {
+ if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0))
dmub_asic = DMUB_ASIC_DCN30;
- fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
- } else {
+ else
dmub_asic = DMUB_ASIC_DCN30;
- fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
- }
break;
case IP_VERSION(3, 0, 1):
dmub_asic = DMUB_ASIC_DCN301;
- fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
break;
case IP_VERSION(3, 0, 2):
dmub_asic = DMUB_ASIC_DCN302;
- fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
break;
case IP_VERSION(3, 0, 3):
dmub_asic = DMUB_ASIC_DCN303;
- fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB;
break;
case IP_VERSION(3, 1, 2):
case IP_VERSION(3, 1, 3):
dmub_asic = (adev->external_rev_id == YELLOW_CARP_B0) ? DMUB_ASIC_DCN31B : DMUB_ASIC_DCN31;
- fw_name_dmub = FIRMWARE_YELLOW_CARP_DMUB;
break;
case IP_VERSION(3, 1, 4):
dmub_asic = DMUB_ASIC_DCN314;
- fw_name_dmub = FIRMWARE_DCN_314_DMUB;
break;
case IP_VERSION(3, 1, 5):
dmub_asic = DMUB_ASIC_DCN315;
- fw_name_dmub = FIRMWARE_DCN_315_DMUB;
break;
case IP_VERSION(3, 1, 6):
dmub_asic = DMUB_ASIC_DCN316;
- fw_name_dmub = FIRMWARE_DCN316_DMUB;
break;
case IP_VERSION(3, 2, 0):
dmub_asic = DMUB_ASIC_DCN32;
- fw_name_dmub = FIRMWARE_DCN_V3_2_0_DMCUB;
break;
case IP_VERSION(3, 2, 1):
dmub_asic = DMUB_ASIC_DCN321;
- fw_name_dmub = FIRMWARE_DCN_V3_2_1_DMCUB;
break;
default:
/* ASIC doesn't support DMUB. */
return 0;
}
- r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
- if (r) {
- DRM_ERROR("DMUB firmware loading failed: %d\n", r);
- return 0;
- }
-
- r = amdgpu_ucode_validate(adev->dm.dmub_fw);
- if (r) {
- DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
- return 0;
- }
-
hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
@@ -2080,7 +2052,9 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
* TODO: Move this into GART.
*/
r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
+ AMDGPU_GEM_DOMAIN_VRAM |
+ AMDGPU_GEM_DOMAIN_GTT,
+ &adev->dm.dmub_bo,
&adev->dm.dmub_bo_gpu_addr,
&adev->dm.dmub_bo_cpu_addr);
if (r)
@@ -2135,11 +2109,8 @@ static int dm_sw_fini(void *handle)
adev->dm.dmub_srv = NULL;
}
- release_firmware(adev->dm.dmub_fw);
- adev->dm.dmub_fw = NULL;
-
- release_firmware(adev->dm.fw_dmcu);
- adev->dm.fw_dmcu = NULL;
+ amdgpu_ucode_release(&adev->dm.dmub_fw);
+ amdgpu_ucode_release(&adev->dm.fw_dmcu);
return 0;
}
@@ -2165,6 +2136,8 @@ static int detect_mst_link_for_all_connectors(struct drm_device *dev)
DRM_ERROR("DM_MST: Failed to start MST\n");
aconnector->dc_link->type =
dc_connection_single;
+ ret = dm_helpers_dp_mst_stop_top_mgr(aconnector->dc_link->ctx,
+ aconnector->dc_link);
break;
}
}
@@ -2486,7 +2459,7 @@ struct amdgpu_dm_connector *
amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
struct drm_crtc *crtc)
{
- uint32_t i;
+ u32 i;
struct drm_connector_state *new_con_state;
struct drm_connector *connector;
struct drm_crtc *crtc_from_state;
@@ -2734,12 +2707,14 @@ static int dm_resume(void *handle)
drm_for_each_connector_iter(connector, &iter) {
aconnector = to_amdgpu_dm_connector(connector);
+ if (!aconnector->dc_link)
+ continue;
+
/*
* this is the case when traversing through already created
* MST connectors, should be skipped
*/
- if (aconnector->dc_link &&
- aconnector->dc_link->type == dc_connection_mst_branch)
+ if (aconnector->dc_link->type == dc_connection_mst_branch)
continue;
mutex_lock(&aconnector->hpd_lock);
@@ -3117,8 +3092,8 @@ static void handle_hpd_irq(void *param)
static void dm_handle_mst_sideband_msg(struct amdgpu_dm_connector *aconnector)
{
- uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
- uint8_t dret;
+ u8 esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
+ u8 dret;
bool new_irq_handled = false;
int dpcd_addr;
int dpcd_bytes_to_read;
@@ -3146,7 +3121,7 @@ static void dm_handle_mst_sideband_msg(struct amdgpu_dm_connector *aconnector)
while (dret == dpcd_bytes_to_read &&
process_count < max_process_count) {
- uint8_t retry;
+ u8 retry;
dret = 0;
process_count++;
@@ -3165,7 +3140,7 @@ static void dm_handle_mst_sideband_msg(struct amdgpu_dm_connector *aconnector)
dpcd_bytes_to_read - 1;
for (retry = 0; retry < 3; retry++) {
- uint8_t wret;
+ u8 wret;
wret = drm_dp_dpcd_write(
&aconnector->dm_dp_aux.aux,
@@ -4179,12 +4154,12 @@ static void amdgpu_set_panel_orientation(struct drm_connector *connector);
static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
{
struct amdgpu_display_manager *dm = &adev->dm;
- int32_t i;
+ s32 i;
struct amdgpu_dm_connector *aconnector = NULL;
struct amdgpu_encoder *aencoder = NULL;
struct amdgpu_mode_info *mode_info = &adev->mode_info;
- uint32_t link_cnt;
- int32_t primary_planes;
+ u32 link_cnt;
+ s32 primary_planes;
enum dc_connection_type new_connection_type = dc_connection_none;
const struct dc_plane_cap *plane;
bool psr_feature_enabled = false;
@@ -4361,6 +4336,10 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
amdgpu_set_panel_orientation(&aconnector->base);
}
+ /* If we didn't find a panel, notify the acpi video detection */
+ if (dm->adev->flags & AMD_IS_APU && dm->num_of_edps == 0)
+ acpi_video_report_nolcd();
+
/* Software is initialized. Now we can register interrupt handlers. */
switch (adev->asic_type) {
#if defined(CONFIG_DRM_AMD_DC_SI)
@@ -4500,6 +4479,61 @@ DEVICE_ATTR_WO(s3_debug);
#endif
+static int dm_init_microcode(struct amdgpu_device *adev)
+{
+ char *fw_name_dmub;
+ int r;
+
+ switch (adev->ip_versions[DCE_HWIP][0]) {
+ case IP_VERSION(2, 1, 0):
+ fw_name_dmub = FIRMWARE_RENOIR_DMUB;
+ if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
+ fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
+ break;
+ case IP_VERSION(3, 0, 0):
+ if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0))
+ fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
+ else
+ fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
+ break;
+ case IP_VERSION(3, 0, 1):
+ fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
+ break;
+ case IP_VERSION(3, 0, 2):
+ fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
+ break;
+ case IP_VERSION(3, 0, 3):
+ fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB;
+ break;
+ case IP_VERSION(3, 1, 2):
+ case IP_VERSION(3, 1, 3):
+ fw_name_dmub = FIRMWARE_YELLOW_CARP_DMUB;
+ break;
+ case IP_VERSION(3, 1, 4):
+ fw_name_dmub = FIRMWARE_DCN_314_DMUB;
+ break;
+ case IP_VERSION(3, 1, 5):
+ fw_name_dmub = FIRMWARE_DCN_315_DMUB;
+ break;
+ case IP_VERSION(3, 1, 6):
+ fw_name_dmub = FIRMWARE_DCN316_DMUB;
+ break;
+ case IP_VERSION(3, 2, 0):
+ fw_name_dmub = FIRMWARE_DCN_V3_2_0_DMCUB;
+ break;
+ case IP_VERSION(3, 2, 1):
+ fw_name_dmub = FIRMWARE_DCN_V3_2_1_DMCUB;
+ break;
+ default:
+ /* ASIC doesn't support DMUB. */
+ return 0;
+ }
+ r = amdgpu_ucode_request(adev, &adev->dm.dmub_fw, fw_name_dmub);
+ if (r)
+ DRM_ERROR("DMUB firmware loading failed: %d\n", r);
+ return r;
+}
+
static int dm_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -4632,7 +4666,7 @@ static int dm_early_init(void *handle)
#endif
adev->dc_enabled = true;
- return 0;
+ return dm_init_microcode(adev);
}
static bool modereset_required(struct drm_crtc_state *crtc_state)
@@ -4697,7 +4731,7 @@ fill_plane_color_attributes(const struct drm_plane_state *plane_state,
static int
fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
const struct drm_plane_state *plane_state,
- const uint64_t tiling_flags,
+ const u64 tiling_flags,
struct dc_plane_info *plane_info,
struct dc_plane_address *address,
bool tmz_surface,
@@ -4872,7 +4906,7 @@ static int fill_dc_plane_attributes(struct amdgpu_device *adev,
static inline void fill_dc_dirty_rect(struct drm_plane *plane,
struct rect *dirty_rect, int32_t x,
- int32_t y, int32_t width, int32_t height,
+ s32 y, s32 width, s32 height,
int *i, bool ffu)
{
if (*i > DC_MAX_DIRTY_RECTS)
@@ -4928,11 +4962,11 @@ static void fill_dc_dirty_rects(struct drm_plane *plane,
{
struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
struct rect *dirty_rects = flip_addrs->dirty_rects;
- uint32_t num_clips;
+ u32 num_clips;
struct drm_mode_rect *clips;
bool bb_changed;
bool fb_changed;
- uint32_t i = 0;
+ u32 i = 0;
/*
* Cursor plane has it's own dirty rect update interface. See
@@ -5078,7 +5112,7 @@ static enum dc_color_depth
convert_color_depth_from_display_info(const struct drm_connector *connector,
bool is_y420, int requested_bpc)
{
- uint8_t bpc;
+ u8 bpc;
if (is_y420) {
bpc = 8;
@@ -5622,8 +5656,8 @@ static void apply_dsc_policy_for_edp(struct amdgpu_dm_connector *aconnector,
uint32_t max_dsc_target_bpp_limit_override)
{
const struct dc_link_settings *verified_link_cap = NULL;
- uint32_t link_bw_in_kbps;
- uint32_t edp_min_bpp_x16, edp_max_bpp_x16;
+ u32 link_bw_in_kbps;
+ u32 edp_min_bpp_x16, edp_max_bpp_x16;
struct dc *dc = sink->ctx->dc;
struct dc_dsc_bw_range bw_range = {0};
struct dc_dsc_config dsc_cfg = {0};
@@ -5680,11 +5714,11 @@ static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
struct dsc_dec_dpcd_caps *dsc_caps)
{
struct drm_connector *drm_connector = &aconnector->base;
- uint32_t link_bandwidth_kbps;
+ u32 link_bandwidth_kbps;
struct dc *dc = sink->ctx->dc;
- uint32_t max_supported_bw_in_kbps, timing_bw_in_kbps;
- uint32_t dsc_max_supported_bw_in_kbps;
- uint32_t max_dsc_target_bpp_limit_override =
+ u32 max_supported_bw_in_kbps, timing_bw_in_kbps;
+ u32 dsc_max_supported_bw_in_kbps;
+ u32 max_dsc_target_bpp_limit_override =
drm_connector->display_info.max_dsc_bpp;
link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
@@ -5831,7 +5865,8 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
*/
DRM_DEBUG_DRIVER("No preferred mode found\n");
} else {
- recalculate_timing = is_freesync_video_mode(&mode, aconnector);
+ recalculate_timing = amdgpu_freesync_vid_mode &&
+ is_freesync_video_mode(&mode, aconnector);
if (recalculate_timing) {
freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
drm_mode_copy(&saved_mode, &mode);
@@ -6905,7 +6940,7 @@ static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
const struct drm_display_mode *m;
struct drm_display_mode *new_mode;
uint i;
- uint32_t new_modes_count = 0;
+ u32 new_modes_count = 0;
/* Standard FPS values
*
@@ -6919,7 +6954,7 @@ static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
* 60 - Commonly used
* 48,72,96,120 - Multiples of 24
*/
- static const uint32_t common_rates[] = {
+ static const u32 common_rates[] = {
23976, 24000, 25000, 29970, 30000,
48000, 50000, 60000, 72000, 96000, 120000
};
@@ -6935,8 +6970,8 @@ static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
return 0;
for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
- uint64_t target_vtotal, target_vtotal_diff;
- uint64_t num, den;
+ u64 target_vtotal, target_vtotal_diff;
+ u64 num, den;
if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
continue;
@@ -6982,7 +7017,7 @@ static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connect
struct amdgpu_dm_connector *amdgpu_dm_connector =
to_amdgpu_dm_connector(connector);
- if (!edid)
+ if (!(amdgpu_freesync_vid_mode && edid))
return;
if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
@@ -7178,7 +7213,7 @@ create_i2c(struct ddc_service *ddc_service,
*/
static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
struct amdgpu_dm_connector *aconnector,
- uint32_t link_index,
+ u32 link_index,
struct amdgpu_encoder *aencoder)
{
int res = 0;
@@ -7363,27 +7398,55 @@ is_scaling_state_different(const struct dm_connector_state *dm_state,
}
#ifdef CONFIG_DRM_AMD_DC_HDCP
-static bool is_content_protection_different(struct drm_connector_state *state,
- const struct drm_connector_state *old_state,
- const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
+static bool is_content_protection_different(struct drm_crtc_state *new_crtc_state,
+ struct drm_crtc_state *old_crtc_state,
+ struct drm_connector_state *new_conn_state,
+ struct drm_connector_state *old_conn_state,
+ const struct drm_connector *connector,
+ struct hdcp_workqueue *hdcp_w)
{
struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
- /* Handle: Type0/1 change */
- if (old_state->hdcp_content_type != state->hdcp_content_type &&
- state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
- state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+ pr_debug("[HDCP_DM] connector->index: %x connect_status: %x dpms: %x\n",
+ connector->index, connector->status, connector->dpms);
+ pr_debug("[HDCP_DM] state protection old: %x new: %x\n",
+ old_conn_state->content_protection, new_conn_state->content_protection);
+
+ if (old_crtc_state)
+ pr_debug("[HDCP_DM] old crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n",
+ old_crtc_state->enable,
+ old_crtc_state->active,
+ old_crtc_state->mode_changed,
+ old_crtc_state->active_changed,
+ old_crtc_state->connectors_changed);
+
+ if (new_crtc_state)
+ pr_debug("[HDCP_DM] NEW crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n",
+ new_crtc_state->enable,
+ new_crtc_state->active,
+ new_crtc_state->mode_changed,
+ new_crtc_state->active_changed,
+ new_crtc_state->connectors_changed);
+
+ /* hdcp content type change */
+ if (old_conn_state->hdcp_content_type != new_conn_state->hdcp_content_type &&
+ new_conn_state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
+ new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+ pr_debug("[HDCP_DM] Type0/1 change %s :true\n", __func__);
return true;
}
- /* CP is being re enabled, ignore this
- *
- * Handles: ENABLED -> DESIRED
- */
- if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
- state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
- state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
+ /* CP is being re enabled, ignore this */
+ if (old_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
+ new_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
+ if (new_crtc_state && new_crtc_state->mode_changed) {
+ new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+ pr_debug("[HDCP_DM] ENABLED->DESIRED & mode_changed %s :true\n", __func__);
+ return true;
+ }
+ new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
+ pr_debug("[HDCP_DM] ENABLED -> DESIRED %s :false\n", __func__);
return false;
}
@@ -7391,9 +7454,9 @@ static bool is_content_protection_different(struct drm_connector_state *state,
*
* Handles: UNDESIRED -> ENABLED
*/
- if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
- state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
- state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+ if (old_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
+ new_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
+ new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
/* Stream removed and re-enabled
*
@@ -7403,10 +7466,12 @@ static bool is_content_protection_different(struct drm_connector_state *state,
*
* Handles: DESIRED -> DESIRED (Special case)
*/
- if (!(old_state->crtc && old_state->crtc->enabled) &&
- state->crtc && state->crtc->enabled &&
+ if (!(old_conn_state->crtc && old_conn_state->crtc->enabled) &&
+ new_conn_state->crtc && new_conn_state->crtc->enabled &&
connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
dm_con_state->update_hdcp = false;
+ pr_debug("[HDCP_DM] DESIRED->DESIRED (Stream removed and re-enabled) %s :true\n",
+ __func__);
return true;
}
@@ -7418,35 +7483,42 @@ static bool is_content_protection_different(struct drm_connector_state *state,
*
* Handles: DESIRED -> DESIRED (Special case)
*/
- if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
- connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
+ if (dm_con_state->update_hdcp &&
+ new_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
+ connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
dm_con_state->update_hdcp = false;
+ pr_debug("[HDCP_DM] DESIRED->DESIRED (Hot-plug, headless s3, dpms) %s :true\n",
+ __func__);
return true;
}
- /*
- * Handles: UNDESIRED -> UNDESIRED
- * DESIRED -> DESIRED
- * ENABLED -> ENABLED
- */
- if (old_state->content_protection == state->content_protection)
+ if (old_conn_state->content_protection == new_conn_state->content_protection) {
+ if (new_conn_state->content_protection >= DRM_MODE_CONTENT_PROTECTION_DESIRED) {
+ if (new_crtc_state && new_crtc_state->mode_changed) {
+ pr_debug("[HDCP_DM] DESIRED->DESIRED or ENABLE->ENABLE mode_change %s :true\n",
+ __func__);
+ return true;
+ }
+ pr_debug("[HDCP_DM] DESIRED->DESIRED & ENABLE->ENABLE %s :false\n",
+ __func__);
+ return false;
+ }
+
+ pr_debug("[HDCP_DM] UNDESIRED->UNDESIRED %s :false\n", __func__);
return false;
+ }
- /*
- * Handles: UNDESIRED -> DESIRED
- * DESIRED -> UNDESIRED
- * ENABLED -> UNDESIRED
- */
- if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
+ if (new_conn_state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED) {
+ pr_debug("[HDCP_DM] UNDESIRED->DESIRED or DESIRED->UNDESIRED or ENABLED->UNDESIRED %s :true\n",
+ __func__);
return true;
+ }
- /*
- * Handles: DESIRED -> ENABLED
- */
+ pr_debug("[HDCP_DM] DESIRED->ENABLED %s :false\n", __func__);
return false;
}
-
#endif
+
static void remove_stream(struct amdgpu_device *adev,
struct amdgpu_crtc *acrtc,
struct dc_stream_state *stream)
@@ -7662,8 +7734,8 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
struct drm_crtc *pcrtc,
bool wait_for_vblank)
{
- uint32_t i;
- uint64_t timestamp_ns;
+ u32 i;
+ u64 timestamp_ns;
struct drm_plane *plane;
struct drm_plane_state *old_plane_state, *new_plane_state;
struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
@@ -7674,7 +7746,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
int planes_count = 0, vpos, hpos;
unsigned long flags;
- uint32_t target_vblank, last_flip_vblank;
+ u32 target_vblank, last_flip_vblank;
bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
bool cursor_update = false;
bool pflip_present = false;
@@ -8112,7 +8184,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
struct amdgpu_display_manager *dm = &adev->dm;
struct dm_atomic_state *dm_state;
struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
- uint32_t i, j;
+ u32 i, j;
struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state, *new_crtc_state;
unsigned long flags;
@@ -8286,10 +8358,61 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+ pr_debug("[HDCP_DM] -------------- i : %x ----------\n", i);
+
+ if (!connector)
+ continue;
+
+ pr_debug("[HDCP_DM] connector->index: %x connect_status: %x dpms: %x\n",
+ connector->index, connector->status, connector->dpms);
+ pr_debug("[HDCP_DM] state protection old: %x new: %x\n",
+ old_con_state->content_protection, new_con_state->content_protection);
+
+ if (aconnector->dc_sink) {
+ if (aconnector->dc_sink->sink_signal != SIGNAL_TYPE_VIRTUAL &&
+ aconnector->dc_sink->sink_signal != SIGNAL_TYPE_NONE) {
+ pr_debug("[HDCP_DM] pipe_ctx dispname=%s\n",
+ aconnector->dc_sink->edid_caps.display_name);
+ }
+ }
+
+ new_crtc_state = NULL;
+ old_crtc_state = NULL;
+
+ if (acrtc) {
+ new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
+ old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
+ }
+
+ if (old_crtc_state)
+ pr_debug("old crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n",
+ old_crtc_state->enable,
+ old_crtc_state->active,
+ old_crtc_state->mode_changed,
+ old_crtc_state->active_changed,
+ old_crtc_state->connectors_changed);
+
+ if (new_crtc_state)
+ pr_debug("NEW crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n",
+ new_crtc_state->enable,
+ new_crtc_state->active,
+ new_crtc_state->mode_changed,
+ new_crtc_state->active_changed,
+ new_crtc_state->connectors_changed);
+ }
+
+ for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
+ struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
+ struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
+ struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+
new_crtc_state = NULL;
+ old_crtc_state = NULL;
- if (acrtc)
+ if (acrtc) {
new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
+ old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
+ }
dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
@@ -8301,11 +8424,44 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
continue;
}
- if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
+ if (is_content_protection_different(new_crtc_state, old_crtc_state, new_con_state,
+ old_con_state, connector, adev->dm.hdcp_workqueue)) {
+ /* when display is unplugged from mst hub, connctor will
+ * be destroyed within dm_dp_mst_connector_destroy. connector
+ * hdcp perperties, like type, undesired, desired, enabled,
+ * will be lost. So, save hdcp properties into hdcp_work within
+ * amdgpu_dm_atomic_commit_tail. if the same display is
+ * plugged back with same display index, its hdcp properties
+ * will be retrieved from hdcp_work within dm_dp_mst_get_modes
+ */
+
+ bool enable_encryption = false;
+
+ if (new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED)
+ enable_encryption = true;
+
+ if (aconnector->dc_link && aconnector->dc_sink &&
+ aconnector->dc_link->type == dc_connection_mst_branch) {
+ struct hdcp_workqueue *hdcp_work = adev->dm.hdcp_workqueue;
+ struct hdcp_workqueue *hdcp_w =
+ &hdcp_work[aconnector->dc_link->link_index];
+
+ hdcp_w->hdcp_content_type[connector->index] =
+ new_con_state->hdcp_content_type;
+ hdcp_w->content_protection[connector->index] =
+ new_con_state->content_protection;
+ }
+
+ if (new_crtc_state && new_crtc_state->mode_changed &&
+ new_con_state->content_protection >= DRM_MODE_CONTENT_PROTECTION_DESIRED)
+ enable_encryption = true;
+
+ DRM_INFO("[HDCP_DM] hdcp_update_display enable_encryption = %x\n", enable_encryption);
+
hdcp_update_display(
adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
- new_con_state->hdcp_content_type,
- new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED);
+ new_con_state->hdcp_content_type, enable_encryption);
+ }
}
#endif
@@ -8403,9 +8559,6 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
#ifdef CONFIG_DEBUG_FS
enum amdgpu_dm_pipe_crc_source cur_crc_src;
-#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
- struct crc_rd_work *crc_rd_wrk;
-#endif
#endif
/* Count number of newly disabled CRTCs for dropping PM refs later. */
if (old_crtc_state->active && !new_crtc_state->active)
@@ -8418,9 +8571,6 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
update_stream_irq_parameters(dm, dm_new_crtc_state);
#ifdef CONFIG_DEBUG_FS
-#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
- crc_rd_wrk = dm->crc_rd_wrk;
-#endif
spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
cur_crc_src = acrtc->dm_irq_params.crc_src;
spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
@@ -8449,10 +8599,12 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
if (amdgpu_dm_crc_window_is_activated(crtc)) {
spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
acrtc->dm_irq_params.window_param.update_win = true;
+
+ /**
+ * It takes 2 frames for HW to stably generate CRC when
+ * resuming from suspend, so we set skip_frame_cnt 2.
+ */
acrtc->dm_irq_params.window_param.skip_frame_cnt = 2;
- spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock);
- crc_rd_wrk->crtc = crtc;
- spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock);
spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
}
#endif
@@ -8675,15 +8827,22 @@ static void get_freesync_config_for_crtc(
struct drm_display_mode *mode = &new_crtc_state->base.mode;
int vrefresh = drm_mode_vrefresh(mode);
bool fs_vid_mode = false;
+ bool drr_active = false;
new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
vrefresh >= aconnector->min_vfreq &&
vrefresh <= aconnector->max_vfreq;
- if (new_crtc_state->vrr_supported) {
+ drr_active = new_crtc_state->vrr_supported &&
+ new_crtc_state->freesync_config.state != VRR_STATE_DISABLED &&
+ new_crtc_state->freesync_config.state != VRR_STATE_INACTIVE &&
+ new_crtc_state->freesync_config.state != VRR_STATE_UNSUPPORTED;
+
+ if (drr_active)
new_crtc_state->stream->ignore_msa_timing_param = true;
- fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
+ if (new_crtc_state->vrr_supported) {
+ fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000;
config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000;
config.vsif_supported = true;
@@ -8743,7 +8902,7 @@ is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
}
static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
- uint64_t num, den, res;
+ u64 num, den, res;
struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
@@ -8846,7 +9005,8 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
* TODO: Refactor this function to allow this check to work
* in all conditions.
*/
- if (dm_new_crtc_state->stream &&
+ if (amdgpu_freesync_vid_mode &&
+ dm_new_crtc_state->stream &&
is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
goto skip_modeset;
@@ -8881,7 +9041,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
if (!dm_old_crtc_state->stream)
goto skip_modeset;
- if (dm_new_crtc_state->stream &&
+ if (amdgpu_freesync_vid_mode && dm_new_crtc_state->stream &&
is_timing_unchanged_for_freesync(new_crtc_state,
old_crtc_state)) {
new_crtc_state->mode_changed = false;
@@ -8893,7 +9053,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
set_freesync_fixed_config(dm_new_crtc_state);
goto skip_modeset;
- } else if (aconnector &&
+ } else if (amdgpu_freesync_vid_mode && aconnector &&
is_freesync_video_mode(&new_crtc_state->mode,
aconnector)) {
struct drm_display_mode *high_mode;
@@ -9879,7 +10039,7 @@ fail:
static bool is_dp_capable_without_timing_msa(struct dc *dc,
struct amdgpu_dm_connector *amdgpu_dm_connector)
{
- uint8_t dpcd_data;
+ u8 dpcd_data;
bool capable = false;
if (amdgpu_dm_connector->dc_link &&
@@ -9898,7 +10058,7 @@ static bool is_dp_capable_without_timing_msa(struct dc *dc,
static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,
unsigned int offset,
unsigned int total_length,
- uint8_t *data,
+ u8 *data,
unsigned int length,
struct amdgpu_hdmi_vsdb_info *vsdb)
{
@@ -9953,7 +10113,7 @@ static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,
}
static bool parse_edid_cea_dmcu(struct amdgpu_display_manager *dm,
- uint8_t *edid_ext, int len,
+ u8 *edid_ext, int len,
struct amdgpu_hdmi_vsdb_info *vsdb_info)
{
int i;
@@ -9994,7 +10154,7 @@ static bool parse_edid_cea_dmcu(struct amdgpu_display_manager *dm,
}
static bool parse_edid_cea_dmub(struct amdgpu_display_manager *dm,
- uint8_t *edid_ext, int len,
+ u8 *edid_ext, int len,
struct amdgpu_hdmi_vsdb_info *vsdb_info)
{
int i;
@@ -10010,7 +10170,7 @@ static bool parse_edid_cea_dmub(struct amdgpu_display_manager *dm,
}
static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
- uint8_t *edid_ext, int len,
+ u8 *edid_ext, int len,
struct amdgpu_hdmi_vsdb_info *vsdb_info)
{
struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
@@ -10024,7 +10184,7 @@ static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
{
- uint8_t *edid_ext = NULL;
+ u8 *edid_ext = NULL;
int i;
bool valid_vsdb_found = false;
@@ -10200,7 +10360,7 @@ void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
}
void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
- uint32_t value, const char *func_name)
+ u32 value, const char *func_name)
{
#ifdef DM_CHECK_ADDR_0
if (address == 0) {
@@ -10215,7 +10375,7 @@ void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
const char *func_name)
{
- uint32_t value;
+ u32 value;
#ifdef DM_CHECK_ADDR_0
if (address == 0) {
DC_ERR("invalid register read; address = 0\n");