summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/amd/display/amdgpu_dm
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/display/amdgpu_dm')
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/Makefile4
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c2494
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h117
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c858
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_colorop.c209
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_colorop.h36
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c565
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h56
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c100
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.h2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c183
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.h1
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c136
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h1
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c65
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c84
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.h7
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq_params.h5
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c234
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h3
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c142
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h7
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c54
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.h4
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_quirks.c178
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.c5
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.h1
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c5
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h27
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.c4
31 files changed, 4455 insertions, 1134 deletions
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile b/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile
index ab2a97e354da..8e949fe77312 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile
@@ -38,7 +38,9 @@ AMDGPUDM = \
amdgpu_dm_pp_smu.o \
amdgpu_dm_psr.o \
amdgpu_dm_replay.o \
- amdgpu_dm_wb.o
+ amdgpu_dm_quirks.o \
+ amdgpu_dm_wb.o \
+ amdgpu_dm_colorop.o
ifdef CONFIG_DRM_AMD_DC_FP
AMDGPUDM += dc_fpu.o
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index f0a6816709ca..740711ac1037 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: MIT
/*
* Copyright 2015 Advanced Micro Devices, Inc.
*
@@ -39,13 +40,11 @@
#include "dc/dc_stat.h"
#include "dc/dc_state.h"
#include "amdgpu_dm_trace.h"
-#include "dpcd_defs.h"
#include "link/protocols/link_dpcd.h"
#include "link_service_types.h"
#include "link/protocols/link_dp_capability.h"
#include "link/protocols/link_ddc.h"
-#include "vid.h"
#include "amdgpu.h"
#include "amdgpu_display.h"
#include "amdgpu_ucode.h"
@@ -56,7 +55,6 @@
#include "amdgpu_dm_hdcp.h"
#include <drm/display/drm_hdcp_helper.h>
#include "amdgpu_dm_wb.h"
-#include "amdgpu_pm.h"
#include "amdgpu_atombios.h"
#include "amd_shared.h"
@@ -80,9 +78,9 @@
#include <linux/power_supply.h>
#include <linux/firmware.h>
#include <linux/component.h>
-#include <linux/dmi.h>
#include <linux/sort.h>
+#include <drm/drm_privacy_screen_consumer.h>
#include <drm/display/drm_dp_mst_helper.h>
#include <drm/display/drm_hdmi_helper.h>
#include <drm/drm_atomic.h>
@@ -93,26 +91,21 @@
#include <drm/drm_fourcc.h>
#include <drm/drm_edid.h>
#include <drm/drm_eld.h>
+#include <drm/drm_utils.h>
#include <drm/drm_vblank.h>
#include <drm/drm_audio_component.h>
#include <drm/drm_gem_atomic_helper.h>
+#include <media/cec-notifier.h>
#include <acpi/video.h>
#include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
-#include "dcn/dcn_1_0_offset.h"
-#include "dcn/dcn_1_0_sh_mask.h"
-#include "soc15_hw_ip.h"
-#include "soc15_common.h"
-#include "vega10_ip_offset.h"
-
-#include "gc/gc_11_0_0_offset.h"
-#include "gc/gc_11_0_0_sh_mask.h"
-
#include "modules/inc/mod_freesync.h"
#include "modules/power/power_helpers.h"
+static_assert(AMDGPU_DMUB_NOTIFICATION_MAX == DMUB_NOTIFICATION_MAX, "AMDGPU_DMUB_NOTIFICATION_MAX mismatch");
+
#define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
#define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
@@ -153,6 +146,9 @@ MODULE_FIRMWARE(FIRMWARE_DCN_35_DMUB);
#define FIRMWARE_DCN_351_DMUB "amdgpu/dcn_3_5_1_dmcub.bin"
MODULE_FIRMWARE(FIRMWARE_DCN_351_DMUB);
+#define FIRMWARE_DCN_36_DMUB "amdgpu/dcn_3_6_dmcub.bin"
+MODULE_FIRMWARE(FIRMWARE_DCN_36_DMUB);
+
#define FIRMWARE_DCN_401_DMUB "amdgpu/dcn_4_0_1_dmcub.bin"
MODULE_FIRMWARE(FIRMWARE_DCN_401_DMUB);
@@ -177,6 +173,8 @@ static int amdgpu_dm_init(struct amdgpu_device *adev);
static void amdgpu_dm_fini(struct amdgpu_device *adev);
static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector);
static void reset_freesync_config_for_crtc(struct dm_crtc_state *new_crtc_state);
+static struct amdgpu_i2c_adapter *
+create_i2c(struct ddc_service *ddc_service, bool oem);
static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
{
@@ -235,6 +233,7 @@ static int amdgpu_dm_encoder_init(struct drm_device *dev,
static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
+static int amdgpu_dm_atomic_setup_commit(struct drm_atomic_state *state);
static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
static int amdgpu_dm_atomic_check(struct drm_device *dev,
@@ -243,6 +242,10 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector);
static void handle_hpd_rx_irq(void *param);
+static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
+ int bl_idx,
+ u32 user_brightness);
+
static bool
is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
struct drm_crtc_state *new_crtc_state);
@@ -269,7 +272,7 @@ static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
acrtc = adev->mode_info.crtcs[crtc];
if (!acrtc->dm_irq_params.stream) {
- DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
+ drm_err(adev_to_drm(adev), "dc_stream_state is NULL for crtc '%d'!\n",
crtc);
return 0;
}
@@ -290,7 +293,7 @@ static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
acrtc = adev->mode_info.crtcs[crtc];
if (!acrtc->dm_irq_params.stream) {
- DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
+ drm_err(adev_to_drm(adev), "dc_stream_state is NULL for crtc '%d'!\n",
crtc);
return 0;
}
@@ -314,7 +317,7 @@ static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
return 0;
}
-static bool dm_is_idle(void *handle)
+static bool dm_is_idle(struct amdgpu_ip_block *ip_block)
{
/* XXX todo */
return true;
@@ -361,6 +364,8 @@ get_crtc_by_otg_inst(struct amdgpu_device *adev,
static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
struct dm_crtc_state *new_state)
{
+ if (new_state->stream->adjust.timing_adjust_pending)
+ return true;
if (new_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)
return true;
else if (amdgpu_dm_crtc_vrr_active(old_state) != amdgpu_dm_crtc_vrr_active(new_state))
@@ -413,8 +418,7 @@ static inline bool update_planes_and_stream_adapter(struct dc *dc,
/*
* Previous frame finished and HW is ready for optimization.
*/
- if (update_type == UPDATE_TYPE_FAST)
- dc_post_update_surfaces_to_stream(dc);
+ dc_post_update_surfaces_to_stream(dc);
return dc_update_planes_and_stream(dc,
array_of_surface_update,
@@ -527,6 +531,50 @@ static void dm_pflip_high_irq(void *interrupt_params)
amdgpu_crtc->crtc_id, amdgpu_crtc, vrr_active, (int)!e);
}
+static void dm_handle_vmin_vmax_update(struct work_struct *offload_work)
+{
+ struct vupdate_offload_work *work = container_of(offload_work, struct vupdate_offload_work, work);
+ struct amdgpu_device *adev = work->adev;
+ struct dc_stream_state *stream = work->stream;
+ struct dc_crtc_timing_adjust *adjust = work->adjust;
+
+ mutex_lock(&adev->dm.dc_lock);
+ dc_stream_adjust_vmin_vmax(adev->dm.dc, stream, adjust);
+ mutex_unlock(&adev->dm.dc_lock);
+
+ dc_stream_release(stream);
+ kfree(work->adjust);
+ kfree(work);
+}
+
+static void schedule_dc_vmin_vmax(struct amdgpu_device *adev,
+ struct dc_stream_state *stream,
+ struct dc_crtc_timing_adjust *adjust)
+{
+ struct vupdate_offload_work *offload_work = kzalloc(sizeof(*offload_work), GFP_NOWAIT);
+ if (!offload_work) {
+ drm_dbg_driver(adev_to_drm(adev), "Failed to allocate vupdate_offload_work\n");
+ return;
+ }
+
+ struct dc_crtc_timing_adjust *adjust_copy = kzalloc(sizeof(*adjust_copy), GFP_NOWAIT);
+ if (!adjust_copy) {
+ drm_dbg_driver(adev_to_drm(adev), "Failed to allocate adjust_copy\n");
+ kfree(offload_work);
+ return;
+ }
+
+ dc_stream_retain(stream);
+ memcpy(adjust_copy, adjust, sizeof(*adjust_copy));
+
+ INIT_WORK(&offload_work->work, dm_handle_vmin_vmax_update);
+ offload_work->adev = adev;
+ offload_work->stream = stream;
+ offload_work->adjust = adjust_copy;
+
+ queue_work(system_wq, &offload_work->work);
+}
+
static void dm_vupdate_high_irq(void *interrupt_params)
{
struct common_irq_params *irq_params = interrupt_params;
@@ -564,22 +612,27 @@ static void dm_vupdate_high_irq(void *interrupt_params)
* page-flip completion events that have been queued to us
* if a pageflip happened inside front-porch.
*/
- if (vrr_active) {
+ if (vrr_active && acrtc->dm_irq_params.stream) {
+ bool replay_en = acrtc->dm_irq_params.stream->link->replay_settings.replay_feature_enabled;
+ bool psr_en = acrtc->dm_irq_params.stream->link->psr_settings.psr_feature_enabled;
+ bool fs_active_var_en = acrtc->dm_irq_params.freesync_config.state
+ == VRR_STATE_ACTIVE_VARIABLE;
+
amdgpu_dm_crtc_handle_vblank(acrtc);
/* BTR processing for pre-DCE12 ASICs */
- if (acrtc->dm_irq_params.stream &&
- adev->family < AMDGPU_FAMILY_AI) {
+ if (adev->family < AMDGPU_FAMILY_AI) {
spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
mod_freesync_handle_v_update(
adev->dm.freesync_module,
acrtc->dm_irq_params.stream,
&acrtc->dm_irq_params.vrr_params);
- dc_stream_adjust_vmin_vmax(
- adev->dm.dc,
- acrtc->dm_irq_params.stream,
- &acrtc->dm_irq_params.vrr_params.adjust);
+ if (fs_active_var_en || (!fs_active_var_en && !replay_en && !psr_en)) {
+ schedule_dc_vmin_vmax(adev,
+ acrtc->dm_irq_params.stream,
+ &acrtc->dm_irq_params.vrr_params.adjust);
+ }
spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
}
}
@@ -662,15 +715,20 @@ static void dm_crtc_high_irq(void *interrupt_params)
spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
if (acrtc->dm_irq_params.stream &&
- acrtc->dm_irq_params.vrr_params.supported &&
- acrtc->dm_irq_params.freesync_config.state ==
- VRR_STATE_ACTIVE_VARIABLE) {
+ acrtc->dm_irq_params.vrr_params.supported) {
+ bool replay_en = acrtc->dm_irq_params.stream->link->replay_settings.replay_feature_enabled;
+ bool psr_en = acrtc->dm_irq_params.stream->link->psr_settings.psr_feature_enabled;
+ bool fs_active_var_en = acrtc->dm_irq_params.freesync_config.state == VRR_STATE_ACTIVE_VARIABLE;
+
mod_freesync_handle_v_update(adev->dm.freesync_module,
acrtc->dm_irq_params.stream,
&acrtc->dm_irq_params.vrr_params);
- dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
- &acrtc->dm_irq_params.vrr_params.adjust);
+ /* update vmin_vmax only if freesync is enabled, or only if PSR and REPLAY are disabled */
+ if (fs_active_var_en || (!fs_active_var_en && !replay_en && !psr_en)) {
+ schedule_dc_vmin_vmax(adev, acrtc->dm_irq_params.stream,
+ &acrtc->dm_irq_params.vrr_params.adjust);
+ }
}
/*
@@ -738,6 +796,29 @@ static void dmub_aux_setconfig_callback(struct amdgpu_device *adev,
complete(&adev->dm.dmub_aux_transfer_done);
}
+static void dmub_aux_fused_io_callback(struct amdgpu_device *adev,
+ struct dmub_notification *notify)
+{
+ if (!adev || !notify) {
+ ASSERT(false);
+ return;
+ }
+
+ const struct dmub_cmd_fused_request *req = &notify->fused_request;
+ const uint8_t ddc_line = req->u.aux.ddc_line;
+
+ if (ddc_line >= ARRAY_SIZE(adev->dm.fused_io)) {
+ ASSERT(false);
+ return;
+ }
+
+ struct fused_io_sync *sync = &adev->dm.fused_io[ddc_line];
+
+ static_assert(sizeof(*req) <= sizeof(sync->reply_data), "Size mismatch");
+ memcpy(sync->reply_data, req, sizeof(*req));
+ complete(&sync->replied);
+}
+
/**
* dmub_hpd_callback - DMUB HPD interrupt processing callback.
* @adev: amdgpu_device pointer
@@ -761,18 +842,18 @@ static void dmub_hpd_callback(struct amdgpu_device *adev,
return;
if (notify == NULL) {
- DRM_ERROR("DMUB HPD callback notification was NULL");
+ drm_err(adev_to_drm(adev), "DMUB HPD callback notification was NULL");
return;
}
if (notify->link_index > adev->dm.dc->link_count) {
- DRM_ERROR("DMUB HPD index (%u)is abnormal", notify->link_index);
+ drm_err(adev_to_drm(adev), "DMUB HPD index (%u)is abnormal", notify->link_index);
return;
}
/* Skip DMUB HPD IRQ in suspend/resume. We will probe them later. */
if (notify->type == DMUB_NOTIFICATION_HPD && adev->in_suspend) {
- DRM_INFO("Skip DMUB HPD IRQ callback in suspend/resume\n");
+ drm_info(adev_to_drm(adev), "Skip DMUB HPD IRQ callback in suspend/resume\n");
return;
}
@@ -789,11 +870,11 @@ static void dmub_hpd_callback(struct amdgpu_device *adev,
aconnector = to_amdgpu_dm_connector(connector);
if (link && aconnector->dc_link == link) {
if (notify->type == DMUB_NOTIFICATION_HPD)
- DRM_INFO("DMUB HPD IRQ callback: link_index=%u\n", link_index);
+ drm_info(adev_to_drm(adev), "DMUB HPD IRQ callback: link_index=%u\n", link_index);
else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ)
- DRM_INFO("DMUB HPD RX IRQ callback: link_index=%u\n", link_index);
+ drm_info(adev_to_drm(adev), "DMUB HPD RX IRQ callback: link_index=%u\n", link_index);
else
- DRM_WARN("DMUB Unknown HPD callback type %d, link_index=%u\n",
+ drm_warn(adev_to_drm(adev), "DMUB Unknown HPD callback type %d, link_index=%u\n",
notify->type, link_index);
hpd_aconnector = aconnector;
@@ -805,7 +886,7 @@ static void dmub_hpd_callback(struct amdgpu_device *adev,
if (hpd_aconnector) {
if (notify->type == DMUB_NOTIFICATION_HPD) {
if (hpd_aconnector->dc_link->hpd_status == (notify->hpd_status == DP_HPD_PLUG))
- DRM_WARN("DMUB reported hpd status unchanged. link_index=%u\n", link_index);
+ drm_warn(adev_to_drm(adev), "DMUB reported hpd status unchanged. link_index=%u\n", link_index);
handle_hpd_irq_helper(hpd_aconnector);
} else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ) {
handle_hpd_rx_irq(hpd_aconnector);
@@ -824,7 +905,7 @@ static void dmub_hpd_callback(struct amdgpu_device *adev,
static void dmub_hpd_sense_callback(struct amdgpu_device *adev,
struct dmub_notification *notify)
{
- DRM_DEBUG_DRIVER("DMUB HPD SENSE callback.\n");
+ drm_dbg_driver(adev_to_drm(adev), "DMUB HPD SENSE callback.\n");
}
/**
@@ -860,7 +941,7 @@ static void dm_handle_hpd_work(struct work_struct *work)
dmub_hpd_wrk = container_of(work, struct dmub_hpd_work, handle_hpd_work);
if (!dmub_hpd_wrk->dmub_notify) {
- DRM_ERROR("dmub_hpd_wrk dmub_notify is NULL");
+ drm_err(adev_to_drm(dmub_hpd_wrk->adev), "dmub_hpd_wrk dmub_notify is NULL");
return;
}
@@ -874,6 +955,30 @@ static void dm_handle_hpd_work(struct work_struct *work)
}
+static const char *dmub_notification_type_str(enum dmub_notification_type e)
+{
+ switch (e) {
+ case DMUB_NOTIFICATION_NO_DATA:
+ return "NO_DATA";
+ case DMUB_NOTIFICATION_AUX_REPLY:
+ return "AUX_REPLY";
+ case DMUB_NOTIFICATION_HPD:
+ return "HPD";
+ case DMUB_NOTIFICATION_HPD_IRQ:
+ return "HPD_IRQ";
+ case DMUB_NOTIFICATION_SET_CONFIG_REPLY:
+ return "SET_CONFIG_REPLY";
+ case DMUB_NOTIFICATION_DPIA_NOTIFICATION:
+ return "DPIA_NOTIFICATION";
+ case DMUB_NOTIFICATION_HPD_SENSE_NOTIFY:
+ return "HPD_SENSE_NOTIFY";
+ case DMUB_NOTIFICATION_FUSED_IO:
+ return "FUSED_IO";
+ default:
+ return "<unknown>";
+ }
+}
+
#define DMUB_TRACE_MAX_READ 64
/**
* dm_dmub_outbox1_low_irq() - Handles Outbox interrupt
@@ -891,22 +996,13 @@ static void dm_dmub_outbox1_low_irq(void *interrupt_params)
struct dmcub_trace_buf_entry entry = { 0 };
u32 count = 0;
struct dmub_hpd_work *dmub_hpd_wrk;
- static const char *const event_type[] = {
- "NO_DATA",
- "AUX_REPLY",
- "HPD",
- "HPD_IRQ",
- "SET_CONFIGC_REPLY",
- "DPIA_NOTIFICATION",
- "HPD_SENSE_NOTIFY",
- };
do {
if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
entry.param0, entry.param1);
- DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
+ drm_dbg_driver(adev_to_drm(adev), "trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
entry.trace_code, entry.tick_count, entry.param0, entry.param1);
} else
break;
@@ -916,7 +1012,7 @@ static void dm_dmub_outbox1_low_irq(void *interrupt_params)
} while (count <= DMUB_TRACE_MAX_READ);
if (count > DMUB_TRACE_MAX_READ)
- DRM_DEBUG_DRIVER("Warning : count > DMUB_TRACE_MAX_READ");
+ drm_dbg_driver(adev_to_drm(adev), "Warning : count > DMUB_TRACE_MAX_READ");
if (dc_enable_dmub_notifications(adev->dm.dc) &&
irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) {
@@ -924,25 +1020,25 @@ static void dm_dmub_outbox1_low_irq(void *interrupt_params)
do {
dc_stat_get_dmub_notification(adev->dm.dc, &notify);
if (notify.type >= ARRAY_SIZE(dm->dmub_thread_offload)) {
- DRM_ERROR("DM: notify type %d invalid!", notify.type);
+ drm_err(adev_to_drm(adev), "DM: notify type %d invalid!", notify.type);
continue;
}
if (!dm->dmub_callback[notify.type]) {
- DRM_WARN("DMUB notification skipped due to no handler: type=%s\n",
- event_type[notify.type]);
+ drm_warn(adev_to_drm(adev), "DMUB notification skipped due to no handler: type=%s\n",
+ dmub_notification_type_str(notify.type));
continue;
}
if (dm->dmub_thread_offload[notify.type] == true) {
dmub_hpd_wrk = kzalloc(sizeof(*dmub_hpd_wrk), GFP_ATOMIC);
if (!dmub_hpd_wrk) {
- DRM_ERROR("Failed to allocate dmub_hpd_wrk");
+ drm_err(adev_to_drm(adev), "Failed to allocate dmub_hpd_wrk");
return;
}
dmub_hpd_wrk->dmub_notify = kmemdup(&notify, sizeof(struct dmub_notification),
GFP_ATOMIC);
if (!dmub_hpd_wrk->dmub_notify) {
kfree(dmub_hpd_wrk);
- DRM_ERROR("Failed to allocate dmub_hpd_wrk->dmub_notify");
+ drm_err(adev_to_drm(adev), "Failed to allocate dmub_hpd_wrk->dmub_notify");
return;
}
INIT_WORK(&dmub_hpd_wrk->handle_hpd_work, dm_handle_hpd_work);
@@ -955,13 +1051,13 @@ static void dm_dmub_outbox1_low_irq(void *interrupt_params)
}
}
-static int dm_set_clockgating_state(void *handle,
+static int dm_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
return 0;
}
-static int dm_set_powergating_state(void *handle,
+static int dm_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
return 0;
@@ -1000,10 +1096,10 @@ static void amdgpu_dm_fbc_init(struct drm_connector *connector)
&compressor->gpu_addr, &compressor->cpu_addr);
if (r)
- DRM_ERROR("DM: Failed to initialize FBC\n");
+ drm_err(adev_to_drm(adev), "DM: Failed to initialize FBC\n");
else {
adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
- DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
+ drm_info(adev_to_drm(adev), "DM: FBC alloc %lu\n", max_size*4);
}
}
@@ -1036,8 +1132,10 @@ static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
continue;
*enabled = true;
+ mutex_lock(&connector->eld_mutex);
ret = drm_eld_size(connector->eld);
memcpy(buf, connector->eld, min(max_bytes, ret));
+ mutex_unlock(&connector->eld_mutex);
break;
}
@@ -1166,13 +1264,13 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev)
return 0;
if (!fb_info) {
- DRM_ERROR("No framebuffer info for DMUB service.\n");
+ drm_err(adev_to_drm(adev), "No framebuffer info for DMUB service.\n");
return -EINVAL;
}
if (!dmub_fw) {
/* Firmware required for DMUB support. */
- DRM_ERROR("No firmware provided for DMUB.\n");
+ drm_err(adev_to_drm(adev), "No firmware provided for DMUB.\n");
return -EINVAL;
}
@@ -1182,19 +1280,19 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev)
status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
if (status != DMUB_STATUS_OK) {
- DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
+ drm_err(adev_to_drm(adev), "Error checking HW support for DMUB: %d\n", status);
return -EINVAL;
}
if (!has_hw_support) {
- DRM_INFO("DMUB unsupported on ASIC\n");
+ drm_info(adev_to_drm(adev), "DMUB unsupported on ASIC\n");
return 0;
}
/* Reset DMCUB if it was previously running - before we overwrite its memory. */
status = dmub_srv_hw_reset(dmub_srv);
if (status != DMUB_STATUS_OK)
- DRM_WARN("Error resetting DMUB HW: %d\n", status);
+ drm_warn(adev_to_drm(adev), "Error resetting DMUB HW: %d\n", status);
hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
@@ -1263,6 +1361,7 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev)
case IP_VERSION(3, 1, 4):
case IP_VERSION(3, 5, 0):
case IP_VERSION(3, 5, 1):
+ case IP_VERSION(3, 6, 0):
case IP_VERSION(4, 0, 1):
hw_params.dpia_supported = true;
hw_params.disable_dpia = adev->dm.dc->debug.dpia_debug.bits.disable_dpia;
@@ -1274,7 +1373,9 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev)
switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
case IP_VERSION(3, 5, 0):
case IP_VERSION(3, 5, 1):
+ case IP_VERSION(3, 6, 0):
hw_params.ips_sequential_ono = adev->external_rev_id > 0x10;
+ hw_params.lower_hbr3_phy_ssc = true;
break;
default:
break;
@@ -1282,14 +1383,14 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev)
status = dmub_srv_hw_init(dmub_srv, &hw_params);
if (status != DMUB_STATUS_OK) {
- DRM_ERROR("Error initializing DMUB HW: %d\n", status);
+ drm_err(adev_to_drm(adev), "Error initializing DMUB HW: %d\n", status);
return -EINVAL;
}
/* Wait for firmware load to finish. */
status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
if (status != DMUB_STATUS_OK)
- DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
+ drm_warn(adev_to_drm(adev), "Wait for DMUB auto-load failed: %d\n", status);
/* Init DMCU and ABM if available. */
if (dmcu && abm) {
@@ -1300,11 +1401,11 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev)
if (!adev->dm.dc->ctx->dmub_srv)
adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
if (!adev->dm.dc->ctx->dmub_srv) {
- DRM_ERROR("Couldn't allocate DC DMUB server!\n");
+ drm_err(adev_to_drm(adev), "Couldn't allocate DC DMUB server!\n");
return -ENOMEM;
}
- DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
+ drm_info(adev_to_drm(adev), "DMUB hardware initialized: version=0x%08X\n",
adev->dm.dmcub_fw_version);
/* Keeping sanity checks off if
@@ -1347,18 +1448,18 @@ static void dm_dmub_hw_resume(struct amdgpu_device *adev)
status = dmub_srv_is_hw_init(dmub_srv, &init);
if (status != DMUB_STATUS_OK)
- DRM_WARN("DMUB hardware init check failed: %d\n", status);
+ drm_warn(adev_to_drm(adev), "DMUB hardware init check failed: %d\n", status);
if (status == DMUB_STATUS_OK && init) {
/* Wait for firmware load to finish. */
status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
if (status != DMUB_STATUS_OK)
- DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
+ drm_warn(adev_to_drm(adev), "Wait for DMUB auto-load failed: %d\n", status);
} else {
/* Perform the full hardware initialization. */
r = dm_dmub_hw_init(adev);
if (r)
- DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
+ drm_err(adev_to_drm(adev), "DMUB interface failed to initialize: status=%d\n", r);
}
}
@@ -1468,18 +1569,18 @@ static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
offload_work = container_of(work, struct hpd_rx_irq_offload_work, work);
aconnector = offload_work->offload_wq->aconnector;
+ adev = offload_work->adev;
if (!aconnector) {
- DRM_ERROR("Can't retrieve aconnector in hpd_rx_irq_offload_work");
+ drm_err(adev_to_drm(adev), "Can't retrieve aconnector in hpd_rx_irq_offload_work");
goto skip;
}
- adev = drm_to_adev(aconnector->base.dev);
dc_link = aconnector->dc_link;
mutex_lock(&aconnector->hpd_lock);
if (!dc_link_detect_connection_type(dc_link, &new_connection_type))
- DRM_ERROR("KMS: Failed to detect connector\n");
+ drm_err(adev_to_drm(adev), "KMS: Failed to detect connector\n");
mutex_unlock(&aconnector->hpd_lock);
if (new_connection_type == dc_connection_none)
@@ -1548,8 +1649,9 @@ skip:
}
-static struct hpd_rx_irq_offload_work_queue *hpd_rx_irq_create_workqueue(struct dc *dc)
+static struct hpd_rx_irq_offload_work_queue *hpd_rx_irq_create_workqueue(struct amdgpu_device *adev)
{
+ struct dc *dc = adev->dm.dc;
int max_caps = dc->caps.max_links;
int i = 0;
struct hpd_rx_irq_offload_work_queue *hpd_rx_offload_wq = NULL;
@@ -1565,7 +1667,7 @@ static struct hpd_rx_irq_offload_work_queue *hpd_rx_irq_create_workqueue(struct
create_singlethread_workqueue("amdgpu_dm_hpd_rx_offload_wq");
if (hpd_rx_offload_wq[i].wq == NULL) {
- DRM_ERROR("create amdgpu_dm_hpd_rx_offload_wq fail!");
+ drm_err(adev_to_drm(adev), "create amdgpu_dm_hpd_rx_offload_wq fail!");
goto out_err;
}
@@ -1614,77 +1716,6 @@ static bool dm_should_disable_stutter(struct pci_dev *pdev)
return false;
}
-static const struct dmi_system_id hpd_disconnect_quirk_table[] = {
- {
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3660"),
- },
- },
- {
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3260"),
- },
- },
- {
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3460"),
- },
- },
- {
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Tower Plus 7010"),
- },
- },
- {
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Tower 7010"),
- },
- },
- {
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex SFF Plus 7010"),
- },
- },
- {
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex SFF 7010"),
- },
- },
- {
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Micro Plus 7010"),
- },
- },
- {
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Micro 7010"),
- },
- },
- {}
- /* TODO: refactor this from a fixed table to a dynamic option */
-};
-
-static void retrieve_dmi_info(struct amdgpu_display_manager *dm)
-{
- const struct dmi_system_id *dmi_id;
-
- dm->aux_hpd_discon_quirk = false;
-
- dmi_id = dmi_first_match(hpd_disconnect_quirk_table);
- if (dmi_id) {
- dm->aux_hpd_discon_quirk = true;
- DRM_INFO("aux_hpd_discon_quirk attached\n");
- }
-}
void*
dm_allocate_gpu_mem(
@@ -1771,10 +1802,11 @@ dm_dmub_send_vbios_gpint_command(struct amdgpu_device *adev,
return DMUB_STATUS_TIMEOUT;
}
-static struct dml2_soc_bb *dm_dmub_get_vbios_bounding_box(struct amdgpu_device *adev)
+static void *dm_dmub_get_vbios_bounding_box(struct amdgpu_device *adev)
{
- struct dml2_soc_bb *bb;
+ void *bb;
long long addr;
+ unsigned int bb_size;
int i = 0;
uint16_t chunk;
enum dmub_gpint_command send_addrs[] = {
@@ -1787,6 +1819,7 @@ static struct dml2_soc_bb *dm_dmub_get_vbios_bounding_box(struct amdgpu_device *
switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
case IP_VERSION(4, 0, 1):
+ bb_size = sizeof(struct dml2_soc_bb);
break;
default:
return NULL;
@@ -1794,7 +1827,7 @@ static struct dml2_soc_bb *dm_dmub_get_vbios_bounding_box(struct amdgpu_device *
bb = dm_allocate_gpu_mem(adev,
DC_MEM_ALLOC_TYPE_GART,
- sizeof(struct dml2_soc_bb),
+ bb_size,
&addr);
if (!bb)
return NULL;
@@ -1828,26 +1861,7 @@ static enum dmub_ips_disable_type dm_get_default_ips_mode(
switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
case IP_VERSION(3, 5, 0):
- /*
- * On DCN35 systems with Z8 enabled, it's possible for IPS2 + Z8 to
- * cause a hard hang. A fix exists for newer PMFW.
- *
- * As a workaround, for non-fixed PMFW, force IPS1+RCG as the deepest
- * IPS state in all cases, except for s0ix and all displays off (DPMS),
- * where IPS2 is allowed.
- *
- * When checking pmfw version, use the major and minor only.
- */
- if ((adev->pm.fw_version & 0x00FFFF00) < 0x005D6300)
- ret = DMUB_IPS_RCG_IN_ACTIVE_IPS2_IN_OFF;
- else if (amdgpu_ip_version(adev, GC_HWIP, 0) > IP_VERSION(11, 5, 0))
- /*
- * Other ASICs with DCN35 that have residency issues with
- * IPS2 in idle.
- * We want them to use IPS2 only in display off cases.
- */
- ret = DMUB_IPS_RCG_IN_ACTIVE_IPS2_IN_OFF;
- break;
+ case IP_VERSION(3, 6, 0):
case IP_VERSION(3, 5, 1):
ret = DMUB_IPS_RCG_IN_ACTIVE_IPS2_IN_OFF;
break;
@@ -1879,7 +1893,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
mutex_init(&adev->dm.audio_lock);
if (amdgpu_dm_irq_init(adev)) {
- DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
+ drm_err(adev_to_drm(adev), "failed to initialize DM IRQ support.\n");
goto error;
}
@@ -1964,7 +1978,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
if (amdgpu_device_seamless_boot_supported(adev)) {
init_data.flags.seamless_boot_edp_requested = true;
init_data.flags.allow_seamless_boot_optimization = true;
- DRM_INFO("Seamless boot condition check passed\n");
+ drm_dbg(adev->dm.ddev, "Seamless boot requested\n");
}
init_data.flags.enable_mipi_converter_optimization = true;
@@ -1986,11 +2000,17 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
init_data.flags.disable_ips_in_vpb = 0;
+ /* DCN35 and above supports dynamic DTBCLK switch */
+ if (amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(3, 5, 0))
+ init_data.flags.allow_0_dtb_clk = true;
+
/* Enable DWB for tested platforms only */
if (amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(3, 0, 0))
init_data.num_virtual_links = 1;
retrieve_dmi_info(&adev->dm);
+ if (adev->dm.edp0_on_dp1_quirk)
+ init_data.flags.support_edp0_on_dp1 = true;
if (adev->dm.bb_from_dmub)
init_data.bb_from_dmub = adev->dm.bb_from_dmub;
@@ -2001,10 +2021,10 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
adev->dm.dc = dc_create(&init_data);
if (adev->dm.dc) {
- DRM_INFO("Display Core v%s initialized on %s\n", DC_VER,
+ drm_info(adev_to_drm(adev), "Display Core v%s initialized on %s\n", DC_VER,
dce_version_to_string(adev->dm.dc->ctx->dce_version));
} else {
- DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
+ drm_info(adev_to_drm(adev), "Display Core failed to initialize with v%s!\n", DC_VER);
goto error;
}
@@ -2030,30 +2050,44 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
if (amdgpu_dc_debug_mask & DC_FORCE_SUBVP_MCLK_SWITCH)
adev->dm.dc->debug.force_subvp_mclk_switch = true;
+ if (amdgpu_dc_debug_mask & DC_DISABLE_SUBVP_FAMS) {
+ adev->dm.dc->debug.force_disable_subvp = true;
+ adev->dm.dc->debug.fams2_config.bits.enable = false;
+ }
+
if (amdgpu_dc_debug_mask & DC_ENABLE_DML2) {
adev->dm.dc->debug.using_dml2 = true;
adev->dm.dc->debug.using_dml21 = true;
}
+ if (amdgpu_dc_debug_mask & DC_HDCP_LC_FORCE_FW_ENABLE)
+ adev->dm.dc->debug.hdcp_lc_force_fw_enable = true;
+
+ if (amdgpu_dc_debug_mask & DC_HDCP_LC_ENABLE_SW_FALLBACK)
+ adev->dm.dc->debug.hdcp_lc_enable_sw_fallback = true;
+
+ if (amdgpu_dc_debug_mask & DC_SKIP_DETECTION_LT)
+ adev->dm.dc->debug.skip_detection_link_training = true;
+
adev->dm.dc->debug.visual_confirm = amdgpu_dc_visual_confirm;
/* TODO: Remove after DP2 receiver gets proper support of Cable ID feature */
adev->dm.dc->debug.ignore_cable_id = true;
if (adev->dm.dc->caps.dp_hdmi21_pcon_support)
- DRM_INFO("DP-HDMI FRL PCON supported\n");
+ drm_info(adev_to_drm(adev), "DP-HDMI FRL PCON supported\n");
r = dm_dmub_hw_init(adev);
if (r) {
- DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
+ drm_err(adev_to_drm(adev), "DMUB interface failed to initialize: status=%d\n", r);
goto error;
}
dc_hardware_init(adev->dm.dc);
- adev->dm.hpd_rx_offload_wq = hpd_rx_irq_create_workqueue(adev->dm.dc);
+ adev->dm.hpd_rx_offload_wq = hpd_rx_irq_create_workqueue(adev);
if (!adev->dm.hpd_rx_offload_wq) {
- DRM_ERROR("amdgpu: failed to create hpd rx offload workqueue.\n");
+ drm_err(adev_to_drm(adev), "failed to create hpd rx offload workqueue.\n");
goto error;
}
@@ -2068,10 +2102,10 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
if (!adev->dm.freesync_module) {
- DRM_ERROR(
- "amdgpu: failed to initialize freesync_module.\n");
+ drm_err(adev_to_drm(adev),
+ "failed to initialize freesync_module.\n");
} else
- DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
+ drm_dbg_driver(adev_to_drm(adev), "amdgpu: freesync_module init done %p.\n",
adev->dm.freesync_module);
amdgpu_dm_init_color_mod();
@@ -2080,7 +2114,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
adev->dm.vblank_control_workqueue =
create_singlethread_workqueue("dm_vblank_control_workqueue");
if (!adev->dm.vblank_control_workqueue)
- DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
+ drm_err(adev_to_drm(adev), "failed to initialize vblank_workqueue.\n");
}
if (adev->dm.dc->caps.ips_support &&
@@ -2091,9 +2125,9 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
if (!adev->dm.hdcp_workqueue)
- DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
+ drm_err(adev_to_drm(adev), "failed to initialize hdcp_workqueue.\n");
else
- DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
+ drm_dbg_driver(adev_to_drm(adev), "amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
dc_init_callbacks(adev->dm.dc, &init_params);
}
@@ -2101,20 +2135,29 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
init_completion(&adev->dm.dmub_aux_transfer_done);
adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL);
if (!adev->dm.dmub_notify) {
- DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify");
+ drm_info(adev_to_drm(adev), "fail to allocate adev->dm.dmub_notify");
goto error;
}
adev->dm.delayed_hpd_wq = create_singlethread_workqueue("amdgpu_dm_hpd_wq");
if (!adev->dm.delayed_hpd_wq) {
- DRM_ERROR("amdgpu: failed to create hpd offload workqueue.\n");
+ drm_err(adev_to_drm(adev), "failed to create hpd offload workqueue.\n");
goto error;
}
amdgpu_dm_outbox_init(adev);
if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_AUX_REPLY,
dmub_aux_setconfig_callback, false)) {
- DRM_ERROR("amdgpu: fail to register dmub aux callback");
+ drm_err(adev_to_drm(adev), "fail to register dmub aux callback");
+ goto error;
+ }
+
+ for (size_t i = 0; i < ARRAY_SIZE(adev->dm.fused_io); i++)
+ init_completion(&adev->dm.fused_io[i].replied);
+
+ if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_FUSED_IO,
+ dmub_aux_fused_io_callback, false)) {
+ drm_err(adev_to_drm(adev), "fail to register dmub fused io callback");
goto error;
}
/* Enable outbox notification only after IRQ handlers are registered and DMUB is alive.
@@ -2131,8 +2174,8 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
}
if (amdgpu_dm_initialize_drm_device(adev)) {
- DRM_ERROR(
- "amdgpu: failed to initialize sw for display support.\n");
+ drm_err(adev_to_drm(adev),
+ "failed to initialize sw for display support.\n");
goto error;
}
@@ -2146,18 +2189,22 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
- DRM_ERROR(
- "amdgpu: failed to initialize sw for display support.\n");
+ drm_err(adev_to_drm(adev),
+ "failed to initialize vblank for display support.\n");
goto error;
}
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
- adev->dm.secure_display_ctxs = amdgpu_dm_crtc_secure_display_create_contexts(adev);
- if (!adev->dm.secure_display_ctxs)
- DRM_ERROR("amdgpu: failed to initialize secure display contexts.\n");
+ amdgpu_dm_crtc_secure_display_create_contexts(adev);
+ if (!adev->dm.secure_display_ctx.crtc_ctx)
+ drm_err(adev_to_drm(adev), "failed to initialize secure display contexts.\n");
+
+ if (amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(4, 0, 1))
+ adev->dm.secure_display_ctx.support_mul_roi = true;
+
#endif
- DRM_DEBUG_DRIVER("KMS initialized.\n");
+ drm_dbg_driver(adev_to_drm(adev), "KMS initialized.\n");
return 0;
error:
@@ -2197,15 +2244,15 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev)
amdgpu_dm_destroy_drm_device(&adev->dm);
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
- if (adev->dm.secure_display_ctxs) {
+ if (adev->dm.secure_display_ctx.crtc_ctx) {
for (i = 0; i < adev->mode_info.num_crtc; i++) {
- if (adev->dm.secure_display_ctxs[i].crtc) {
- flush_work(&adev->dm.secure_display_ctxs[i].notify_ta_work);
- flush_work(&adev->dm.secure_display_ctxs[i].forward_roi_work);
+ if (adev->dm.secure_display_ctx.crtc_ctx[i].crtc) {
+ flush_work(&adev->dm.secure_display_ctx.crtc_ctx[i].notify_ta_work);
+ flush_work(&adev->dm.secure_display_ctx.crtc_ctx[i].forward_roi_work);
}
}
- kfree(adev->dm.secure_display_ctxs);
- adev->dm.secure_display_ctxs = NULL;
+ kfree(adev->dm.secure_display_ctx.crtc_ctx);
+ adev->dm.secure_display_ctx.crtc_ctx = NULL;
}
#endif
if (adev->dm.hdcp_workqueue) {
@@ -2324,12 +2371,13 @@ static int load_dmcu_fw(struct amdgpu_device *adev)
case IP_VERSION(3, 2, 1):
case IP_VERSION(3, 5, 0):
case IP_VERSION(3, 5, 1):
+ case IP_VERSION(3, 6, 0):
case IP_VERSION(4, 0, 1):
return 0;
default:
break;
}
- DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
+ drm_err(adev_to_drm(adev), "Unsupported ASIC type: 0x%X\n", adev->asic_type);
return -EINVAL;
}
@@ -2338,7 +2386,8 @@ static int load_dmcu_fw(struct amdgpu_device *adev)
return 0;
}
- r = amdgpu_ucode_request(adev, &adev->dm.fw_dmcu, "%s", fw_name_dmcu);
+ r = amdgpu_ucode_request(adev, &adev->dm.fw_dmcu, AMDGPU_UCODE_REQUIRED,
+ "%s", fw_name_dmcu);
if (r == -ENODEV) {
/* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
@@ -2346,7 +2395,7 @@ static int load_dmcu_fw(struct amdgpu_device *adev)
return 0;
}
if (r) {
- dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
+ drm_err(adev_to_drm(adev), "amdgpu_dm: Can't validate firmware \"%s\"\n",
fw_name_dmcu);
amdgpu_ucode_release(&adev->dm.fw_dmcu);
return r;
@@ -2405,6 +2454,7 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_5_TRACEBUFF
DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_6_FW_STATE
DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_7_SCRATCH_MEM
+ DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_IB_MEM
DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_SHARED_STATE
};
int r;
@@ -2448,6 +2498,9 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
case IP_VERSION(3, 5, 1):
dmub_asic = DMUB_ASIC_DCN35;
break;
+ case IP_VERSION(3, 6, 0):
+ dmub_asic = DMUB_ASIC_DCN36;
+ break;
case IP_VERSION(4, 0, 1):
dmub_asic = DMUB_ASIC_DCN401;
break;
@@ -2468,7 +2521,7 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
adev->firmware.fw_size +=
ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
- DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
+ drm_info(adev_to_drm(adev), "Loading DMUB firmware via PSP: version=0x%08X\n",
adev->dm.dmcub_fw_version);
}
@@ -2477,7 +2530,7 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
dmub_srv = adev->dm.dmub_srv;
if (!dmub_srv) {
- DRM_ERROR("Failed to allocate DMUB service!\n");
+ drm_err(adev_to_drm(adev), "Failed to allocate DMUB service!\n");
return -ENOMEM;
}
@@ -2490,7 +2543,7 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
/* Create the DMUB service. */
status = dmub_srv_create(dmub_srv, &create_params);
if (status != DMUB_STATUS_OK) {
- DRM_ERROR("Error creating DMUB service: %d\n", status);
+ drm_err(adev_to_drm(adev), "Error creating DMUB service: %d\n", status);
return -EINVAL;
}
@@ -2515,7 +2568,7 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
&region_info);
if (status != DMUB_STATUS_OK) {
- DRM_ERROR("Error calculating DMUB region info: %d\n", status);
+ drm_err(adev_to_drm(adev), "Error calculating DMUB region info: %d\n", status);
return -EINVAL;
}
@@ -2544,14 +2597,14 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
fb_info = adev->dm.dmub_fb_info;
if (!fb_info) {
- DRM_ERROR(
+ drm_err(adev_to_drm(adev),
"Failed to allocate framebuffer info for DMUB service!\n");
return -ENOMEM;
}
status = dmub_srv_calc_mem_info(dmub_srv, &memory_params, fb_info);
if (status != DMUB_STATUS_OK) {
- DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
+ drm_err(adev_to_drm(adev), "Error calculating DMUB FB info: %d\n", status);
return -EINVAL;
}
@@ -2568,7 +2621,7 @@ static int dm_sw_init(struct amdgpu_ip_block *ip_block)
adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
if (!adev->dm.cgs_device) {
- DRM_ERROR("amdgpu: failed to create cgs device.\n");
+ drm_err(adev_to_drm(adev), "failed to create cgs device.\n");
return -EINVAL;
}
@@ -2746,6 +2799,48 @@ out_fail:
mutex_unlock(&mgr->lock);
}
+void hdmi_cec_unset_edid(struct amdgpu_dm_connector *aconnector)
+{
+ struct cec_notifier *n = aconnector->notifier;
+
+ if (!n)
+ return;
+
+ cec_notifier_phys_addr_invalidate(n);
+}
+
+void hdmi_cec_set_edid(struct amdgpu_dm_connector *aconnector)
+{
+ struct drm_connector *connector = &aconnector->base;
+ struct cec_notifier *n = aconnector->notifier;
+
+ if (!n)
+ return;
+
+ cec_notifier_set_phys_addr(n,
+ connector->display_info.source_physical_address);
+}
+
+static void s3_handle_hdmi_cec(struct drm_device *ddev, bool suspend)
+{
+ struct amdgpu_dm_connector *aconnector;
+ struct drm_connector *connector;
+ struct drm_connector_list_iter conn_iter;
+
+ drm_connector_list_iter_begin(ddev, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter) {
+ if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
+ continue;
+
+ aconnector = to_amdgpu_dm_connector(connector);
+ if (suspend)
+ hdmi_cec_unset_edid(aconnector);
+ else
+ hdmi_cec_set_edid(aconnector);
+ }
+ drm_connector_list_iter_end(&conn_iter);
+}
+
static void s3_handle_mst(struct drm_device *dev, bool suspend)
{
struct amdgpu_dm_connector *aconnector;
@@ -2832,13 +2927,40 @@ static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
ret = amdgpu_dpm_write_watermarks_table(adev);
if (ret) {
- DRM_ERROR("Failed to update WMTABLE!\n");
+ drm_err(adev_to_drm(adev), "Failed to update WMTABLE!\n");
return ret;
}
return 0;
}
+static int dm_oem_i2c_hw_init(struct amdgpu_device *adev)
+{
+ struct amdgpu_display_manager *dm = &adev->dm;
+ struct amdgpu_i2c_adapter *oem_i2c;
+ struct ddc_service *oem_ddc_service;
+ int r;
+
+ oem_ddc_service = dc_get_oem_i2c_device(adev->dm.dc);
+ if (oem_ddc_service) {
+ oem_i2c = create_i2c(oem_ddc_service, true);
+ if (!oem_i2c) {
+ drm_info(adev_to_drm(adev), "Failed to create oem i2c adapter data\n");
+ return -ENOMEM;
+ }
+
+ r = devm_i2c_add_adapter(adev->dev, &oem_i2c->base);
+ if (r) {
+ drm_info(adev_to_drm(adev), "Failed to register oem i2c\n");
+ kfree(oem_i2c);
+ return r;
+ }
+ dm->oem_i2c = oem_i2c;
+ }
+
+ return 0;
+}
+
/**
* dm_hw_init() - Initialize DC device
* @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
@@ -2870,6 +2992,10 @@ static int dm_hw_init(struct amdgpu_ip_block *ip_block)
return r;
amdgpu_dm_hpd_init(adev);
+ r = dm_oem_i2c_hw_init(adev);
+ if (r)
+ drm_info(adev_to_drm(adev), "Failed to add OEM i2c bus\n");
+
return 0;
}
@@ -2909,33 +3035,40 @@ static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
if (rc)
- DRM_WARN("Failed to %s pflip interrupts\n",
+ drm_warn(adev_to_drm(adev), "Failed to %s pflip interrupts\n",
enable ? "enable" : "disable");
- if (enable) {
- if (amdgpu_dm_crtc_vrr_active(to_dm_crtc_state(acrtc->base.state)))
- rc = amdgpu_dm_crtc_set_vupdate_irq(&acrtc->base, true);
- } else
- rc = amdgpu_dm_crtc_set_vupdate_irq(&acrtc->base, false);
-
- if (rc)
- DRM_WARN("Failed to %sable vupdate interrupt\n", enable ? "en" : "dis");
+ if (dc_supports_vrr(adev->dm.dc->ctx->dce_version)) {
+ if (enable) {
+ if (amdgpu_dm_crtc_vrr_active(
+ to_dm_crtc_state(acrtc->base.state)))
+ rc = amdgpu_dm_crtc_set_vupdate_irq(
+ &acrtc->base, true);
+ } else
+ rc = amdgpu_dm_crtc_set_vupdate_irq(
+ &acrtc->base, false);
+
+ if (rc)
+ drm_warn(adev_to_drm(adev), "Failed to %sable vupdate interrupt\n",
+ enable ? "en" : "dis");
+ }
irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
/* During gpu-reset we disable and then enable vblank irq, so
* don't use amdgpu_irq_get/put() to avoid refcount change.
*/
if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
- DRM_WARN("Failed to %sable vblank interrupt\n", enable ? "en" : "dis");
+ drm_warn(adev_to_drm(adev), "Failed to %sable vblank interrupt\n", enable ? "en" : "dis");
}
}
}
+DEFINE_FREE(state_release, struct dc_state *, if (_T) dc_state_release(_T))
+
static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
{
- struct dc_state *context = NULL;
- enum dc_status res = DC_ERROR_UNEXPECTED;
+ struct dc_state *context __free(state_release) = NULL;
int i;
struct dc_stream_state *del_streams[MAX_PIPES];
int del_streams_count = 0;
@@ -2945,7 +3078,7 @@ static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
context = dc_state_create_current_copy(dc);
if (context == NULL)
- goto context_alloc_fail;
+ return DC_ERROR_UNEXPECTED;
/* First remove from context all streams */
for (i = 0; i < context->stream_count; i++) {
@@ -2956,25 +3089,20 @@ static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
/* Remove all planes for removed streams and then remove the streams */
for (i = 0; i < del_streams_count; i++) {
- if (!dc_state_rem_all_planes_for_stream(dc, del_streams[i], context)) {
- res = DC_FAIL_DETACH_SURFACES;
- goto fail;
- }
+ enum dc_status res;
+
+ if (!dc_state_rem_all_planes_for_stream(dc, del_streams[i], context))
+ return DC_FAIL_DETACH_SURFACES;
res = dc_state_remove_stream(dc, context, del_streams[i]);
if (res != DC_OK)
- goto fail;
+ return res;
}
params.streams = context->streams;
params.stream_count = context->stream_count;
- res = dc_commit_streams(dc, &params);
-fail:
- dc_state_release(context);
-
-context_alloc_fail:
- return res;
+ return dc_commit_streams(dc, &params);
}
static void hpd_rx_irq_work_suspend(struct amdgpu_display_manager *dm)
@@ -2987,13 +3115,78 @@ static void hpd_rx_irq_work_suspend(struct amdgpu_display_manager *dm)
}
}
+static int dm_cache_state(struct amdgpu_device *adev)
+{
+ int r;
+
+ adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
+ if (IS_ERR(adev->dm.cached_state)) {
+ r = PTR_ERR(adev->dm.cached_state);
+ adev->dm.cached_state = NULL;
+ }
+
+ return adev->dm.cached_state ? 0 : r;
+}
+
+static void dm_destroy_cached_state(struct amdgpu_device *adev)
+{
+ struct amdgpu_display_manager *dm = &adev->dm;
+ struct drm_device *ddev = adev_to_drm(adev);
+ struct dm_plane_state *dm_new_plane_state;
+ struct drm_plane_state *new_plane_state;
+ struct dm_crtc_state *dm_new_crtc_state;
+ struct drm_crtc_state *new_crtc_state;
+ struct drm_plane *plane;
+ struct drm_crtc *crtc;
+ int i;
+
+ if (!dm->cached_state)
+ return;
+
+ /* Force mode set in atomic commit */
+ for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
+ new_crtc_state->active_changed = true;
+ dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+ reset_freesync_config_for_crtc(dm_new_crtc_state);
+ }
+
+ /*
+ * atomic_check is expected to create the dc states. We need to release
+ * them here, since they were duplicated as part of the suspend
+ * procedure.
+ */
+ for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
+ dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+ if (dm_new_crtc_state->stream) {
+ WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
+ dc_stream_release(dm_new_crtc_state->stream);
+ dm_new_crtc_state->stream = NULL;
+ }
+ dm_new_crtc_state->base.color_mgmt_changed = true;
+ }
+
+ for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
+ dm_new_plane_state = to_dm_plane_state(new_plane_state);
+ if (dm_new_plane_state->dc_state) {
+ WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
+ dc_plane_state_release(dm_new_plane_state->dc_state);
+ dm_new_plane_state->dc_state = NULL;
+ }
+ }
+
+ drm_atomic_helper_resume(ddev, dm->cached_state);
+
+ dm->cached_state = NULL;
+}
+
static int dm_suspend(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
struct amdgpu_display_manager *dm = &adev->dm;
- int ret = 0;
if (amdgpu_in_reset(adev)) {
+ enum dc_status res;
+
mutex_lock(&dm->dc_lock);
dc_allow_idle_optimizations(adev->dm.dc, false);
@@ -3003,19 +3196,27 @@ static int dm_suspend(struct amdgpu_ip_block *ip_block)
if (dm->cached_dc_state)
dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
- amdgpu_dm_commit_zero_streams(dm->dc);
+ res = amdgpu_dm_commit_zero_streams(dm->dc);
+ if (res != DC_OK) {
+ drm_err(adev_to_drm(adev), "Failed to commit zero streams: %d\n", res);
+ return -EINVAL;
+ }
amdgpu_dm_irq_suspend(adev);
hpd_rx_irq_work_suspend(dm);
- return ret;
+ return 0;
}
- WARN_ON(adev->dm.cached_state);
- adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
- if (IS_ERR(adev->dm.cached_state))
- return PTR_ERR(adev->dm.cached_state);
+ if (!adev->dm.cached_state) {
+ int r = dm_cache_state(adev);
+
+ if (r)
+ return r;
+ }
+
+ s3_handle_hdmi_cec(adev_to_drm(adev), true);
s3_handle_mst(adev_to_drm(adev), true);
@@ -3144,36 +3345,112 @@ static void dm_gpureset_commit_state(struct dc_state *dc_state,
struct dc_scaling_info scaling_infos[MAX_SURFACES];
struct dc_flip_addrs flip_addrs[MAX_SURFACES];
struct dc_stream_update stream_update;
- } *bundle;
+ } *bundle __free(kfree);
int k, m;
bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
if (!bundle) {
drm_err(dm->ddev, "Failed to allocate update bundle\n");
- goto cleanup;
+ return;
}
for (k = 0; k < dc_state->stream_count; k++) {
bundle->stream_update.stream = dc_state->streams[k];
- for (m = 0; m < dc_state->stream_status->plane_count; m++) {
+ for (m = 0; m < dc_state->stream_status[k].plane_count; m++) {
bundle->surface_updates[m].surface =
- dc_state->stream_status->plane_states[m];
+ dc_state->stream_status[k].plane_states[m];
bundle->surface_updates[m].surface->force_full_update =
true;
}
update_planes_and_stream_adapter(dm->dc,
UPDATE_TYPE_FULL,
- dc_state->stream_status->plane_count,
+ dc_state->stream_status[k].plane_count,
dc_state->streams[k],
&bundle->stream_update,
bundle->surface_updates);
}
+}
-cleanup:
- kfree(bundle);
+static void apply_delay_after_dpcd_poweroff(struct amdgpu_device *adev,
+ struct dc_sink *sink)
+{
+ struct dc_panel_patch *ppatch = NULL;
+
+ if (!sink)
+ return;
+
+ ppatch = &sink->edid_caps.panel_patch;
+ if (ppatch->wait_after_dpcd_poweroff_ms) {
+ msleep(ppatch->wait_after_dpcd_poweroff_ms);
+ drm_dbg_driver(adev_to_drm(adev),
+ "%s: adding a %ds delay as w/a for panel\n",
+ __func__,
+ ppatch->wait_after_dpcd_poweroff_ms / 1000);
+ }
+}
+
+/**
+ * amdgpu_dm_dump_links_and_sinks - Debug dump of all DC links and their sinks
+ * @adev: amdgpu device pointer
+ *
+ * Iterates through all DC links and dumps information about local and remote
+ * (MST) sinks. Should be called after connector detection is complete to see
+ * the final state of all links.
+ */
+static void amdgpu_dm_dump_links_and_sinks(struct amdgpu_device *adev)
+{
+ struct dc *dc = adev->dm.dc;
+ struct drm_device *dev = adev_to_drm(adev);
+ int li;
+
+ if (!dc)
+ return;
+
+ for (li = 0; li < dc->link_count; li++) {
+ struct dc_link *l = dc->links[li];
+ const char *name = NULL;
+ int rs;
+
+ if (!l)
+ continue;
+ if (l->local_sink && l->local_sink->edid_caps.display_name[0])
+ name = l->local_sink->edid_caps.display_name;
+ else
+ name = "n/a";
+
+ drm_dbg_kms(dev,
+ "LINK_DUMP[%d]: local_sink=%p type=%d sink_signal=%d sink_count=%u edid_name=%s mst_capable=%d mst_alloc_streams=%d\n",
+ li,
+ l->local_sink,
+ l->type,
+ l->local_sink ? l->local_sink->sink_signal : SIGNAL_TYPE_NONE,
+ l->sink_count,
+ name,
+ l->dpcd_caps.is_mst_capable,
+ l->mst_stream_alloc_table.stream_count);
+
+ /* Dump remote (MST) sinks if any */
+ for (rs = 0; rs < l->sink_count; rs++) {
+ struct dc_sink *rsink = l->remote_sinks[rs];
+ const char *rname = NULL;
+
+ if (!rsink)
+ continue;
+ if (rsink->edid_caps.display_name[0])
+ rname = rsink->edid_caps.display_name;
+ else
+ rname = "n/a";
+ drm_dbg_kms(dev,
+ " REMOTE_SINK[%d:%d]: sink=%p signal=%d edid_name=%s\n",
+ li, rs,
+ rsink,
+ rsink->sink_signal,
+ rname);
+ }
+ }
}
static int dm_resume(struct amdgpu_ip_block *ip_block)
@@ -3184,12 +3461,6 @@ static int dm_resume(struct amdgpu_ip_block *ip_block)
struct amdgpu_dm_connector *aconnector;
struct drm_connector *connector;
struct drm_connector_list_iter iter;
- struct drm_crtc *crtc;
- struct drm_crtc_state *new_crtc_state;
- struct dm_crtc_state *dm_new_crtc_state;
- struct drm_plane *plane;
- struct drm_plane_state *new_plane_state;
- struct dm_plane_state *dm_new_plane_state;
struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
enum dc_connection_type new_connection_type = dc_connection_none;
struct dc_state *dc_state;
@@ -3221,8 +3492,10 @@ static int dm_resume(struct amdgpu_ip_block *ip_block)
link_enc_cfg_copy(adev->dm.dc->current_state, dc_state);
r = dm_dmub_hw_init(adev);
- if (r)
- DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
+ if (r) {
+ drm_err(adev_to_drm(adev), "DMUB interface failed to initialize: status=%d\n", r);
+ return r;
+ }
dc_dmub_srv_set_power_state(dm->dc->ctx->dmub_srv, DC_ACPI_CM_POWER_STATE_D0);
dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
@@ -3260,6 +3533,12 @@ static int dm_resume(struct amdgpu_ip_block *ip_block)
mutex_unlock(&dm->dc_lock);
+ /* set the backlight after a reset */
+ for (i = 0; i < dm->num_of_edps; i++) {
+ if (dm->backlight_dev[i])
+ amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]);
+ }
+
return 0;
}
/* Recreate dc_state - DC invalidates it when setting power state to S3. */
@@ -3289,12 +3568,15 @@ static int dm_resume(struct amdgpu_ip_block *ip_block)
*/
amdgpu_dm_irq_resume_early(adev);
+ s3_handle_hdmi_cec(ddev, false);
+
/* On resume we need to rewrite the MSTM control bits to enable MST*/
s3_handle_mst(ddev, false);
/* Do detection*/
drm_connector_list_iter_begin(ddev, &iter);
drm_for_each_connector_iter(connector, &iter) {
+ bool ret;
if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
continue;
@@ -3311,17 +3593,20 @@ static int dm_resume(struct amdgpu_ip_block *ip_block)
if (aconnector->mst_root)
continue;
- mutex_lock(&aconnector->hpd_lock);
+ guard(mutex)(&aconnector->hpd_lock);
if (!dc_link_detect_connection_type(aconnector->dc_link, &new_connection_type))
- DRM_ERROR("KMS: Failed to detect connector\n");
+ drm_err(adev_to_drm(adev), "KMS: Failed to detect connector\n");
if (aconnector->base.force && new_connection_type == dc_connection_none) {
emulated_link_detect(aconnector->dc_link);
} else {
- mutex_lock(&dm->dc_lock);
+ guard(mutex)(&dm->dc_lock);
dc_exit_ips_for_hw_access(dm->dc);
- dc_link_detect(aconnector->dc_link, DETECT_REASON_RESUMEFROMS3S4);
- mutex_unlock(&dm->dc_lock);
+ ret = dc_link_detect(aconnector->dc_link, DETECT_REASON_RESUMEFROMS3S4);
+ if (ret) {
+ /* w/a delay for certain panels */
+ apply_delay_after_dpcd_poweroff(adev, aconnector->dc_sink);
+ }
}
if (aconnector->fake_enable && aconnector->dc_link->local_sink)
@@ -3331,48 +3616,15 @@ static int dm_resume(struct amdgpu_ip_block *ip_block)
dc_sink_release(aconnector->dc_sink);
aconnector->dc_sink = NULL;
amdgpu_dm_update_connector_after_detect(aconnector);
- mutex_unlock(&aconnector->hpd_lock);
}
drm_connector_list_iter_end(&iter);
- /* Force mode set in atomic commit */
- for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
- new_crtc_state->active_changed = true;
- dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
- reset_freesync_config_for_crtc(dm_new_crtc_state);
- }
-
- /*
- * atomic_check is expected to create the dc states. We need to release
- * them here, since they were duplicated as part of the suspend
- * procedure.
- */
- for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
- dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
- if (dm_new_crtc_state->stream) {
- WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
- dc_stream_release(dm_new_crtc_state->stream);
- dm_new_crtc_state->stream = NULL;
- }
- dm_new_crtc_state->base.color_mgmt_changed = true;
- }
-
- for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
- dm_new_plane_state = to_dm_plane_state(new_plane_state);
- if (dm_new_plane_state->dc_state) {
- WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
- dc_plane_state_release(dm_new_plane_state->dc_state);
- dm_new_plane_state->dc_state = NULL;
- }
- }
-
- drm_atomic_helper_resume(ddev, dm->cached_state);
-
- dm->cached_state = NULL;
+ dm_destroy_cached_state(adev);
/* Do mst topology probing after resuming cached state*/
drm_connector_list_iter_begin(ddev, &iter);
drm_for_each_connector_iter(connector, &iter) {
+ bool init = false;
if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
continue;
@@ -3382,10 +3634,23 @@ static int dm_resume(struct amdgpu_ip_block *ip_block)
aconnector->mst_root)
continue;
- drm_dp_mst_topology_queue_probe(&aconnector->mst_mgr);
+ scoped_guard(mutex, &aconnector->mst_mgr.lock) {
+ init = !aconnector->mst_mgr.mst_primary;
+ }
+ if (init)
+ dm_helpers_dp_mst_start_top_mgr(aconnector->dc_link->ctx,
+ aconnector->dc_link, false);
+ else
+ drm_dp_mst_topology_queue_probe(&aconnector->mst_mgr);
}
drm_connector_list_iter_end(&iter);
+ /* Debug dump: list all DC links and their associated sinks after detection
+ * is complete for all connectors. This provides a comprehensive view of the
+ * final state without repeating the dump for each connector.
+ */
+ amdgpu_dm_dump_links_and_sinks(adev);
+
amdgpu_dm_irq_resume_late(adev);
amdgpu_dm_smu_write_watermarks_table(adev);
@@ -3448,22 +3713,25 @@ static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
.atomic_commit_tail = amdgpu_dm_atomic_commit_tail,
- .atomic_commit_setup = drm_dp_mst_atomic_setup_commit,
+ .atomic_commit_setup = amdgpu_dm_atomic_setup_commit,
};
static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
{
+ const struct drm_panel_backlight_quirk *panel_backlight_quirk;
struct amdgpu_dm_backlight_caps *caps;
struct drm_connector *conn_base;
struct amdgpu_device *adev;
struct drm_luminance_range_info *luminance_range;
+ struct drm_device *drm;
if (aconnector->bl_idx == -1 ||
aconnector->dc_link->connector_signal != SIGNAL_TYPE_EDP)
return;
conn_base = &aconnector->base;
- adev = drm_to_adev(conn_base->dev);
+ drm = conn_base->dev;
+ adev = drm_to_adev(drm);
caps = &adev->dm.backlight_caps[aconnector->bl_idx];
caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
@@ -3481,24 +3749,49 @@ static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
caps->aux_support = false;
else if (amdgpu_backlight == 1)
caps->aux_support = true;
+ if (caps->aux_support)
+ aconnector->dc_link->backlight_control_type = BACKLIGHT_CONTROL_AMD_AUX;
luminance_range = &conn_base->display_info.luminance_range;
- if (luminance_range->max_luminance) {
- caps->aux_min_input_signal = luminance_range->min_luminance;
+ if (luminance_range->max_luminance)
caps->aux_max_input_signal = luminance_range->max_luminance;
- } else {
- caps->aux_min_input_signal = 0;
+ else
caps->aux_max_input_signal = 512;
+
+ if (luminance_range->min_luminance)
+ caps->aux_min_input_signal = luminance_range->min_luminance;
+ else
+ caps->aux_min_input_signal = 1;
+
+ panel_backlight_quirk =
+ drm_get_panel_backlight_quirk(aconnector->drm_edid);
+ if (!IS_ERR_OR_NULL(panel_backlight_quirk)) {
+ if (panel_backlight_quirk->min_brightness) {
+ caps->min_input_signal =
+ panel_backlight_quirk->min_brightness - 1;
+ drm_info(drm,
+ "Applying panel backlight quirk, min_brightness: %d\n",
+ caps->min_input_signal);
+ }
+ if (panel_backlight_quirk->brightness_mask) {
+ drm_info(drm,
+ "Applying panel backlight quirk, brightness_mask: 0x%X\n",
+ panel_backlight_quirk->brightness_mask);
+ caps->brightness_mask =
+ panel_backlight_quirk->brightness_mask;
+ }
}
}
+DEFINE_FREE(sink_release, struct dc_sink *, if (_T) dc_sink_release(_T))
+
void amdgpu_dm_update_connector_after_detect(
struct amdgpu_dm_connector *aconnector)
{
struct drm_connector *connector = &aconnector->base;
+ struct dc_sink *sink __free(sink_release) = NULL;
struct drm_device *dev = connector->dev;
- struct dc_sink *sink;
/* MST handled by drm_mst framework */
if (aconnector->mst_mgr.mst_state == true)
@@ -3520,7 +3813,7 @@ void amdgpu_dm_update_connector_after_detect(
* For S3 resume with headless use eml_sink to fake stream
* because on resume connector->sink is set to NULL
*/
- mutex_lock(&dev->mode_config.mutex);
+ guard(mutex)(&dev->mode_config.mutex);
if (sink) {
if (aconnector->dc_sink) {
@@ -3545,10 +3838,6 @@ void amdgpu_dm_update_connector_after_detect(
}
}
- mutex_unlock(&dev->mode_config.mutex);
-
- if (sink)
- dc_sink_release(sink);
return;
}
@@ -3556,10 +3845,8 @@ void amdgpu_dm_update_connector_after_detect(
* TODO: temporary guard to look for proper fix
* if this sink is MST sink, we should not do anything
*/
- if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
- dc_sink_release(sink);
+ if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
return;
- }
if (aconnector->dc_sink == sink) {
/*
@@ -3568,15 +3855,15 @@ void amdgpu_dm_update_connector_after_detect(
*/
drm_dbg_kms(dev, "DCHPD: connector_id=%d: dc_sink didn't change.\n",
aconnector->connector_id);
- if (sink)
- dc_sink_release(sink);
return;
}
drm_dbg_kms(dev, "DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
aconnector->connector_id, aconnector->dc_sink, sink);
- mutex_lock(&dev->mode_config.mutex);
+ /* When polling, DRM has already locked the mutex for us. */
+ if (!drm_kms_helper_is_poll_worker())
+ mutex_lock(&dev->mode_config.mutex);
/*
* 1. Update status of the drm connector
@@ -3596,6 +3883,7 @@ void amdgpu_dm_update_connector_after_detect(
dc_sink_retain(aconnector->dc_sink);
if (sink->dc_edid.length == 0) {
aconnector->drm_edid = NULL;
+ hdmi_cec_unset_edid(aconnector);
if (aconnector->dc_link->aux_mode) {
drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
}
@@ -3605,6 +3893,7 @@ void amdgpu_dm_update_connector_after_detect(
aconnector->drm_edid = drm_edid_alloc(edid, sink->dc_edid.length);
drm_edid_connector_update(connector, aconnector->drm_edid);
+ hdmi_cec_set_edid(aconnector);
if (aconnector->dc_link->aux_mode)
drm_dp_cec_attach(&aconnector->dm_dp_aux.aux,
connector->display_info.source_physical_address);
@@ -3621,6 +3910,7 @@ void amdgpu_dm_update_connector_after_detect(
amdgpu_dm_update_freesync_caps(connector, aconnector->drm_edid);
update_connector_ext_caps(aconnector);
} else {
+ hdmi_cec_unset_edid(aconnector);
drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
amdgpu_dm_update_freesync_caps(connector, NULL);
aconnector->num_modes = 0;
@@ -3635,12 +3925,102 @@ void amdgpu_dm_update_connector_after_detect(
connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
}
- mutex_unlock(&dev->mode_config.mutex);
-
update_subconnector_property(aconnector);
- if (sink)
- dc_sink_release(sink);
+ /* When polling, the mutex will be unlocked for us by DRM. */
+ if (!drm_kms_helper_is_poll_worker())
+ mutex_unlock(&dev->mode_config.mutex);
+}
+
+static bool are_sinks_equal(const struct dc_sink *sink1, const struct dc_sink *sink2)
+{
+ if (!sink1 || !sink2)
+ return false;
+ if (sink1->sink_signal != sink2->sink_signal)
+ return false;
+
+ if (sink1->dc_edid.length != sink2->dc_edid.length)
+ return false;
+
+ if (memcmp(sink1->dc_edid.raw_edid, sink2->dc_edid.raw_edid,
+ sink1->dc_edid.length) != 0)
+ return false;
+ return true;
+}
+
+
+/**
+ * DOC: hdmi_hpd_debounce_work
+ *
+ * HDMI HPD debounce delay in milliseconds. When an HDMI display toggles HPD
+ * (such as during power save transitions), this delay determines how long to
+ * wait before processing the HPD event. This allows distinguishing between a
+ * physical unplug (>hdmi_hpd_debounce_delay)
+ * and a spontaneous RX HPD toggle (<hdmi_hpd_debounce_delay).
+ *
+ * If the toggle is less than this delay, the driver compares sink capabilities
+ * and permits a hotplug event if they changed.
+ *
+ * The default value of 1500ms was chosen based on experimental testing with
+ * various monitors that exhibit spontaneous HPD toggling behavior.
+ */
+static void hdmi_hpd_debounce_work(struct work_struct *work)
+{
+ struct amdgpu_dm_connector *aconnector =
+ container_of(to_delayed_work(work), struct amdgpu_dm_connector,
+ hdmi_hpd_debounce_work);
+ struct drm_connector *connector = &aconnector->base;
+ struct drm_device *dev = connector->dev;
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ struct dc *dc = aconnector->dc_link->ctx->dc;
+ bool fake_reconnect = false;
+ bool reallow_idle = false;
+ bool ret = false;
+ guard(mutex)(&aconnector->hpd_lock);
+
+ /* Re-detect the display */
+ scoped_guard(mutex, &adev->dm.dc_lock) {
+ if (dc->caps.ips_support && dc->ctx->dmub_srv->idle_allowed) {
+ dc_allow_idle_optimizations(dc, false);
+ reallow_idle = true;
+ }
+ ret = dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
+ }
+
+ if (ret) {
+ /* Apply workaround delay for certain panels */
+ apply_delay_after_dpcd_poweroff(adev, aconnector->dc_sink);
+ /* Compare sinks to determine if this was a spontaneous HPD toggle */
+ if (are_sinks_equal(aconnector->dc_link->local_sink, aconnector->hdmi_prev_sink)) {
+ /*
+ * Sinks match - this was a spontaneous HDMI HPD toggle.
+ */
+ drm_dbg_kms(dev, "HDMI HPD: Sink unchanged after debounce, internal re-enable\n");
+ fake_reconnect = true;
+ }
+
+ /* Update connector state */
+ amdgpu_dm_update_connector_after_detect(aconnector);
+
+ drm_modeset_lock_all(dev);
+ dm_restore_drm_connector_state(dev, connector);
+ drm_modeset_unlock_all(dev);
+
+ /* Only notify OS if sink actually changed */
+ if (!fake_reconnect && aconnector->base.force == DRM_FORCE_UNSPECIFIED)
+ drm_kms_helper_hotplug_event(dev);
+ }
+
+ /* Release the cached sink reference */
+ if (aconnector->hdmi_prev_sink) {
+ dc_sink_release(aconnector->hdmi_prev_sink);
+ aconnector->hdmi_prev_sink = NULL;
+ }
+
+ scoped_guard(mutex, &adev->dm.dc_lock) {
+ if (reallow_idle && dc->caps.ips_support)
+ dc_allow_idle_optimizations(dc, true);
+ }
}
static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
@@ -3652,6 +4032,7 @@ static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
struct dc *dc = aconnector->dc_link->ctx->dc;
bool ret = false;
+ bool debounce_required = false;
if (adev->dm.disable_hpd_irq)
return;
@@ -3660,7 +4041,7 @@ static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
* In case of failure or MST no need to update connector status or notify the OS
* since (for MST case) MST does this in its own context.
*/
- mutex_lock(&aconnector->hpd_lock);
+ guard(mutex)(&aconnector->hpd_lock);
if (adev->dm.hdcp_workqueue) {
hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
@@ -3672,7 +4053,15 @@ static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
aconnector->timing_changed = false;
if (!dc_link_detect_connection_type(aconnector->dc_link, &new_connection_type))
- DRM_ERROR("KMS: Failed to detect connector\n");
+ drm_err(adev_to_drm(adev), "KMS: Failed to detect connector\n");
+
+ /*
+ * Check for HDMI disconnect with debounce enabled.
+ */
+ debounce_required = (aconnector->hdmi_hpd_debounce_delay_ms > 0 &&
+ dc_is_hdmi_signal(aconnector->dc_link->connector_signal) &&
+ new_connection_type == dc_connection_none &&
+ aconnector->dc_link->local_sink != NULL);
if (aconnector->base.force && new_connection_type == dc_connection_none) {
emulated_link_detect(aconnector->dc_link);
@@ -3683,12 +4072,41 @@ static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
drm_kms_helper_connector_hotplug_event(connector);
+ } else if (debounce_required) {
+ /*
+ * HDMI disconnect detected - schedule delayed work instead of
+ * processing immediately. This allows us to coalesce spurious
+ * HDMI signals from physical unplugs.
+ */
+ drm_dbg_kms(dev, "HDMI HPD: Disconnect detected, scheduling debounce work (%u ms)\n",
+ aconnector->hdmi_hpd_debounce_delay_ms);
+
+ /* Cache the current sink for later comparison */
+ if (aconnector->hdmi_prev_sink)
+ dc_sink_release(aconnector->hdmi_prev_sink);
+ aconnector->hdmi_prev_sink = aconnector->dc_link->local_sink;
+ if (aconnector->hdmi_prev_sink)
+ dc_sink_retain(aconnector->hdmi_prev_sink);
+
+ /* Schedule delayed detection. */
+ if (mod_delayed_work(system_wq,
+ &aconnector->hdmi_hpd_debounce_work,
+ msecs_to_jiffies(aconnector->hdmi_hpd_debounce_delay_ms)))
+ drm_dbg_kms(dev, "HDMI HPD: Re-scheduled debounce work\n");
+
} else {
- mutex_lock(&adev->dm.dc_lock);
- dc_exit_ips_for_hw_access(dc);
- ret = dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
- mutex_unlock(&adev->dm.dc_lock);
+
+ /* If the aconnector->hdmi_hpd_debounce_work is scheduled, exit early */
+ if (delayed_work_pending(&aconnector->hdmi_hpd_debounce_work))
+ return;
+
+ scoped_guard(mutex, &adev->dm.dc_lock) {
+ dc_exit_ips_for_hw_access(dc);
+ ret = dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
+ }
if (ret) {
+ /* w/a delay for certain panels */
+ apply_delay_after_dpcd_poweroff(adev, aconnector->dc_sink);
amdgpu_dm_update_connector_after_detect(aconnector);
drm_modeset_lock_all(dev);
@@ -3699,8 +4117,6 @@ static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
drm_kms_helper_connector_hotplug_event(connector);
}
}
- mutex_unlock(&aconnector->hpd_lock);
-
}
static void handle_hpd_irq(void *param)
@@ -3711,20 +4127,21 @@ static void handle_hpd_irq(void *param)
}
-static void schedule_hpd_rx_offload_work(struct hpd_rx_irq_offload_work_queue *offload_wq,
+static void schedule_hpd_rx_offload_work(struct amdgpu_device *adev, struct hpd_rx_irq_offload_work_queue *offload_wq,
union hpd_irq_data hpd_irq_data)
{
struct hpd_rx_irq_offload_work *offload_work =
kzalloc(sizeof(*offload_work), GFP_KERNEL);
if (!offload_work) {
- DRM_ERROR("Failed to allocate hpd_rx_irq_offload_work.\n");
+ drm_err(adev_to_drm(adev), "Failed to allocate hpd_rx_irq_offload_work.\n");
return;
}
INIT_WORK(&offload_work->work, dm_handle_hpd_rx_offload_work);
offload_work->data = hpd_irq_data;
offload_work->offload_wq = offload_wq;
+ offload_work->adev = adev;
queue_work(offload_wq->wq, &offload_work->work);
DRM_DEBUG_KMS("queue work to handle hpd_rx offload work");
@@ -3766,7 +4183,7 @@ static void handle_hpd_rx_irq(void *param)
goto out;
if (hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
- schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
+ schedule_hpd_rx_offload_work(adev, offload_wq, hpd_irq_data);
goto out;
}
@@ -3788,7 +4205,7 @@ static void handle_hpd_rx_irq(void *param)
spin_unlock(&offload_wq->offload_lock);
if (!skip)
- schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
+ schedule_hpd_rx_offload_work(adev, offload_wq, hpd_irq_data);
goto out;
}
@@ -3805,7 +4222,7 @@ static void handle_hpd_rx_irq(void *param)
spin_unlock(&offload_wq->offload_lock);
if (!skip)
- schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
+ schedule_hpd_rx_offload_work(adev, offload_wq, hpd_irq_data);
goto out;
}
@@ -3815,7 +4232,7 @@ out:
if (result && !is_mst_root_connector) {
/* Downstream Port status changed. */
if (!dc_link_detect_connection_type(dc_link, &new_connection_type))
- DRM_ERROR("KMS: Failed to detect connector\n");
+ drm_err(adev_to_drm(adev), "KMS: Failed to detect connector\n");
if (aconnector->base.force && new_connection_type == dc_connection_none) {
emulated_link_detect(dc_link);
@@ -3878,19 +4295,19 @@ static int register_hpd_handlers(struct amdgpu_device *adev)
if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD,
dmub_hpd_callback, true)) {
- DRM_ERROR("amdgpu: fail to register dmub hpd callback");
+ drm_err(adev_to_drm(adev), "fail to register dmub hpd callback");
return -EINVAL;
}
if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_IRQ,
dmub_hpd_callback, true)) {
- DRM_ERROR("amdgpu: fail to register dmub hpd callback");
+ drm_err(adev_to_drm(adev), "fail to register dmub hpd callback");
return -EINVAL;
}
if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_SENSE_NOTIFY,
dmub_hpd_sense_callback, true)) {
- DRM_ERROR("amdgpu: fail to register dmub hpd sense callback");
+ drm_err(adev_to_drm(adev), "fail to register dmub hpd sense callback");
return -EINVAL;
}
}
@@ -3911,7 +4328,7 @@ static int register_hpd_handlers(struct amdgpu_device *adev)
if (int_params.irq_source == DC_IRQ_SOURCE_INVALID ||
int_params.irq_source < DC_IRQ_SOURCE_HPD1 ||
int_params.irq_source > DC_IRQ_SOURCE_HPD6) {
- DRM_ERROR("Failed to register hpd irq!\n");
+ drm_err(adev_to_drm(adev), "Failed to register hpd irq!\n");
return -EINVAL;
}
@@ -3929,7 +4346,7 @@ static int register_hpd_handlers(struct amdgpu_device *adev)
if (int_params.irq_source == DC_IRQ_SOURCE_INVALID ||
int_params.irq_source < DC_IRQ_SOURCE_HPD1RX ||
int_params.irq_source > DC_IRQ_SOURCE_HPD6RX) {
- DRM_ERROR("Failed to register hpd rx irq!\n");
+ drm_err(adev_to_drm(adev), "Failed to register hpd rx irq!\n");
return -EINVAL;
}
@@ -3971,7 +4388,7 @@ static int dce60_register_irq_handlers(struct amdgpu_device *adev)
for (i = 0; i < adev->mode_info.num_crtc; i++) {
r = amdgpu_irq_add_id(adev, client_id, i + 1, &adev->crtc_irq);
if (r) {
- DRM_ERROR("Failed to add crtc irq id!\n");
+ drm_err(adev_to_drm(adev), "Failed to add crtc irq id!\n");
return r;
}
@@ -3982,7 +4399,7 @@ static int dce60_register_irq_handlers(struct amdgpu_device *adev)
if (int_params.irq_source == DC_IRQ_SOURCE_INVALID ||
int_params.irq_source < DC_IRQ_SOURCE_VBLANK1 ||
int_params.irq_source > DC_IRQ_SOURCE_VBLANK6) {
- DRM_ERROR("Failed to register vblank irq!\n");
+ drm_err(adev_to_drm(adev), "Failed to register vblank irq!\n");
return -EINVAL;
}
@@ -4001,7 +4418,7 @@ static int dce60_register_irq_handlers(struct amdgpu_device *adev)
i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
if (r) {
- DRM_ERROR("Failed to add page flip irq id!\n");
+ drm_err(adev_to_drm(adev), "Failed to add page flip irq id!\n");
return r;
}
@@ -4012,7 +4429,7 @@ static int dce60_register_irq_handlers(struct amdgpu_device *adev)
if (int_params.irq_source == DC_IRQ_SOURCE_INVALID ||
int_params.irq_source < DC_IRQ_SOURCE_PFLIP_FIRST ||
int_params.irq_source > DC_IRQ_SOURCE_PFLIP_LAST) {
- DRM_ERROR("Failed to register pflip irq!\n");
+ drm_err(adev_to_drm(adev), "Failed to register pflip irq!\n");
return -EINVAL;
}
@@ -4030,7 +4447,7 @@ static int dce60_register_irq_handlers(struct amdgpu_device *adev)
r = amdgpu_irq_add_id(adev, client_id,
VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
if (r) {
- DRM_ERROR("Failed to add hpd irq id!\n");
+ drm_err(adev_to_drm(adev), "Failed to add hpd irq id!\n");
return r;
}
@@ -4072,7 +4489,7 @@ static int dce110_register_irq_handlers(struct amdgpu_device *adev)
for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
if (r) {
- DRM_ERROR("Failed to add crtc irq id!\n");
+ drm_err(adev_to_drm(adev), "Failed to add crtc irq id!\n");
return r;
}
@@ -4083,7 +4500,7 @@ static int dce110_register_irq_handlers(struct amdgpu_device *adev)
if (int_params.irq_source == DC_IRQ_SOURCE_INVALID ||
int_params.irq_source < DC_IRQ_SOURCE_VBLANK1 ||
int_params.irq_source > DC_IRQ_SOURCE_VBLANK6) {
- DRM_ERROR("Failed to register vblank irq!\n");
+ drm_err(adev_to_drm(adev), "Failed to register vblank irq!\n");
return -EINVAL;
}
@@ -4101,7 +4518,7 @@ static int dce110_register_irq_handlers(struct amdgpu_device *adev)
for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
if (r) {
- DRM_ERROR("Failed to add vupdate irq id!\n");
+ drm_err(adev_to_drm(adev), "Failed to add vupdate irq id!\n");
return r;
}
@@ -4112,7 +4529,7 @@ static int dce110_register_irq_handlers(struct amdgpu_device *adev)
if (int_params.irq_source == DC_IRQ_SOURCE_INVALID ||
int_params.irq_source < DC_IRQ_SOURCE_VUPDATE1 ||
int_params.irq_source > DC_IRQ_SOURCE_VUPDATE6) {
- DRM_ERROR("Failed to register vupdate irq!\n");
+ drm_err(adev_to_drm(adev), "Failed to register vupdate irq!\n");
return -EINVAL;
}
@@ -4131,7 +4548,7 @@ static int dce110_register_irq_handlers(struct amdgpu_device *adev)
i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
if (r) {
- DRM_ERROR("Failed to add page flip irq id!\n");
+ drm_err(adev_to_drm(adev), "Failed to add page flip irq id!\n");
return r;
}
@@ -4142,7 +4559,7 @@ static int dce110_register_irq_handlers(struct amdgpu_device *adev)
if (int_params.irq_source == DC_IRQ_SOURCE_INVALID ||
int_params.irq_source < DC_IRQ_SOURCE_PFLIP_FIRST ||
int_params.irq_source > DC_IRQ_SOURCE_PFLIP_LAST) {
- DRM_ERROR("Failed to register pflip irq!\n");
+ drm_err(adev_to_drm(adev), "Failed to register pflip irq!\n");
return -EINVAL;
}
@@ -4160,7 +4577,7 @@ static int dce110_register_irq_handlers(struct amdgpu_device *adev)
r = amdgpu_irq_add_id(adev, client_id,
VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
if (r) {
- DRM_ERROR("Failed to add hpd irq id!\n");
+ drm_err(adev_to_drm(adev), "Failed to add hpd irq id!\n");
return r;
}
@@ -4210,7 +4627,7 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
if (r) {
- DRM_ERROR("Failed to add crtc irq id!\n");
+ drm_err(adev_to_drm(adev), "Failed to add crtc irq id!\n");
return r;
}
@@ -4221,7 +4638,7 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
if (int_params.irq_source == DC_IRQ_SOURCE_INVALID ||
int_params.irq_source < DC_IRQ_SOURCE_VBLANK1 ||
int_params.irq_source > DC_IRQ_SOURCE_VBLANK6) {
- DRM_ERROR("Failed to register vblank irq!\n");
+ drm_err(adev_to_drm(adev), "Failed to register vblank irq!\n");
return -EINVAL;
}
@@ -4242,7 +4659,7 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
vrtl_int_srcid[i], &adev->vline0_irq);
if (r) {
- DRM_ERROR("Failed to add vline0 irq id!\n");
+ drm_err(adev_to_drm(adev), "Failed to add vline0 irq id!\n");
return r;
}
@@ -4253,7 +4670,7 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
if (int_params.irq_source == DC_IRQ_SOURCE_INVALID ||
int_params.irq_source < DC_IRQ_SOURCE_DC1_VLINE0 ||
int_params.irq_source > DC_IRQ_SOURCE_DC6_VLINE0) {
- DRM_ERROR("Failed to register vline0 irq!\n");
+ drm_err(adev_to_drm(adev), "Failed to register vline0 irq!\n");
return -EINVAL;
}
@@ -4281,7 +4698,7 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
if (r) {
- DRM_ERROR("Failed to add vupdate irq id!\n");
+ drm_err(adev_to_drm(adev), "Failed to add vupdate irq id!\n");
return r;
}
@@ -4292,7 +4709,7 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
if (int_params.irq_source == DC_IRQ_SOURCE_INVALID ||
int_params.irq_source < DC_IRQ_SOURCE_VUPDATE1 ||
int_params.irq_source > DC_IRQ_SOURCE_VUPDATE6) {
- DRM_ERROR("Failed to register vupdate irq!\n");
+ drm_err(adev_to_drm(adev), "Failed to register vupdate irq!\n");
return -EINVAL;
}
@@ -4312,7 +4729,7 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
i++) {
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
if (r) {
- DRM_ERROR("Failed to add page flip irq id!\n");
+ drm_err(adev_to_drm(adev), "Failed to add page flip irq id!\n");
return r;
}
@@ -4323,7 +4740,7 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
if (int_params.irq_source == DC_IRQ_SOURCE_INVALID ||
int_params.irq_source < DC_IRQ_SOURCE_PFLIP_FIRST ||
int_params.irq_source > DC_IRQ_SOURCE_PFLIP_LAST) {
- DRM_ERROR("Failed to register pflip irq!\n");
+ drm_err(adev_to_drm(adev), "Failed to register pflip irq!\n");
return -EINVAL;
}
@@ -4341,7 +4758,7 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
&adev->hpd_irq);
if (r) {
- DRM_ERROR("Failed to add hpd irq id!\n");
+ drm_err(adev_to_drm(adev), "Failed to add hpd irq id!\n");
return r;
}
@@ -4363,7 +4780,7 @@ static int register_outbox_irq_handlers(struct amdgpu_device *adev)
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT,
&adev->dmub_outbox_irq);
if (r) {
- DRM_ERROR("Failed to add outbox irq id!\n");
+ drm_err(adev_to_drm(adev), "Failed to add outbox irq id!\n");
return r;
}
@@ -4540,48 +4957,40 @@ static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm,
int bl_idx)
{
-#if defined(CONFIG_ACPI)
- struct amdgpu_dm_backlight_caps caps;
-
- memset(&caps, 0, sizeof(caps));
+ struct amdgpu_dm_backlight_caps *caps = &dm->backlight_caps[bl_idx];
- if (dm->backlight_caps[bl_idx].caps_valid)
+ if (caps->caps_valid)
return;
- amdgpu_acpi_get_backlight_caps(&caps);
+#if defined(CONFIG_ACPI)
+ amdgpu_acpi_get_backlight_caps(caps);
/* validate the firmware value is sane */
- if (caps.caps_valid) {
- int spread = caps.max_input_signal - caps.min_input_signal;
+ if (caps->caps_valid) {
+ int spread = caps->max_input_signal - caps->min_input_signal;
- if (caps.max_input_signal > AMDGPU_DM_DEFAULT_MAX_BACKLIGHT ||
- caps.min_input_signal < 0 ||
+ if (caps->max_input_signal > AMDGPU_DM_DEFAULT_MAX_BACKLIGHT ||
+ caps->min_input_signal < 0 ||
spread > AMDGPU_DM_DEFAULT_MAX_BACKLIGHT ||
spread < AMDGPU_DM_MIN_SPREAD) {
DRM_DEBUG_KMS("DM: Invalid backlight caps: min=%d, max=%d\n",
- caps.min_input_signal, caps.max_input_signal);
- caps.caps_valid = false;
+ caps->min_input_signal, caps->max_input_signal);
+ caps->caps_valid = false;
}
}
- if (caps.caps_valid) {
- dm->backlight_caps[bl_idx].caps_valid = true;
- if (caps.aux_support)
- return;
- dm->backlight_caps[bl_idx].min_input_signal = caps.min_input_signal;
- dm->backlight_caps[bl_idx].max_input_signal = caps.max_input_signal;
- } else {
- dm->backlight_caps[bl_idx].min_input_signal =
- AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
- dm->backlight_caps[bl_idx].max_input_signal =
- AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
+ if (!caps->caps_valid) {
+ caps->min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
+ caps->max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
+ caps->caps_valid = true;
}
#else
- if (dm->backlight_caps[bl_idx].aux_support)
+ if (caps->aux_support)
return;
- dm->backlight_caps[bl_idx].min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
- dm->backlight_caps[bl_idx].max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
+ caps->min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
+ caps->max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
+ caps->caps_valid = true;
#endif
}
@@ -4603,6 +5012,82 @@ static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
return 1;
}
+/* Rescale from [min..max] to [0..AMDGPU_MAX_BL_LEVEL] */
+static inline u32 scale_input_to_fw(int min, int max, u64 input)
+{
+ return DIV_ROUND_CLOSEST_ULL(input * AMDGPU_MAX_BL_LEVEL, max - min);
+}
+
+/* Rescale from [0..AMDGPU_MAX_BL_LEVEL] to [min..max] */
+static inline u32 scale_fw_to_input(int min, int max, u64 input)
+{
+ return min + DIV_ROUND_CLOSEST_ULL(input * (max - min), AMDGPU_MAX_BL_LEVEL);
+}
+
+static void convert_custom_brightness(const struct amdgpu_dm_backlight_caps *caps,
+ unsigned int min, unsigned int max,
+ uint32_t *user_brightness)
+{
+ u32 brightness = scale_input_to_fw(min, max, *user_brightness);
+ u8 lower_signal, upper_signal, upper_lum, lower_lum, lum;
+ int left, right;
+
+ if (amdgpu_dc_debug_mask & DC_DISABLE_CUSTOM_BRIGHTNESS_CURVE)
+ return;
+
+ if (!caps->data_points)
+ return;
+
+ /*
+ * Handle the case where brightness is below the first data point
+ * Interpolate between (0,0) and (first_signal, first_lum)
+ */
+ if (brightness < caps->luminance_data[0].input_signal) {
+ lum = DIV_ROUND_CLOSEST(caps->luminance_data[0].luminance * brightness,
+ caps->luminance_data[0].input_signal);
+ goto scale;
+ }
+
+ left = 0;
+ right = caps->data_points - 1;
+ while (left <= right) {
+ int mid = left + (right - left) / 2;
+ u8 signal = caps->luminance_data[mid].input_signal;
+
+ /* Exact match found */
+ if (signal == brightness) {
+ lum = caps->luminance_data[mid].luminance;
+ goto scale;
+ }
+
+ if (signal < brightness)
+ left = mid + 1;
+ else
+ right = mid - 1;
+ }
+
+ /* verify bound */
+ if (left >= caps->data_points)
+ left = caps->data_points - 1;
+
+ /* At this point, left > right */
+ lower_signal = caps->luminance_data[right].input_signal;
+ upper_signal = caps->luminance_data[left].input_signal;
+ lower_lum = caps->luminance_data[right].luminance;
+ upper_lum = caps->luminance_data[left].luminance;
+
+ /* interpolate */
+ if (right == left || !lower_lum)
+ lum = upper_lum;
+ else
+ lum = lower_lum + DIV_ROUND_CLOSEST((upper_lum - lower_lum) *
+ (brightness - lower_signal),
+ upper_signal - lower_signal);
+scale:
+ *user_brightness = scale_fw_to_input(min, max,
+ DIV_ROUND_CLOSEST(lum * brightness, 101));
+}
+
static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
uint32_t brightness)
{
@@ -4611,9 +5096,10 @@ static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *c
if (!get_brightness_range(caps, &min, &max))
return brightness;
- // Rescale 0..255 to min..max
- return min + DIV_ROUND_CLOSEST((max - min) * brightness,
- AMDGPU_MAX_BL_LEVEL);
+ convert_custom_brightness(caps, min, max, &brightness);
+
+ // Rescale 0..max to min..max
+ return min + DIV_ROUND_CLOSEST_ULL((u64)(max - min) * brightness, max);
}
static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
@@ -4626,8 +5112,8 @@ static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *cap
if (brightness < min)
return 0;
- // Rescale min..max to 0..255
- return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
+ // Rescale min..max to 0..max
+ return DIV_ROUND_CLOSEST_ULL((u64)max * (brightness - min),
max - min);
}
@@ -4635,21 +5121,40 @@ static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
int bl_idx,
u32 user_brightness)
{
- struct amdgpu_dm_backlight_caps caps;
+ struct amdgpu_dm_backlight_caps *caps;
struct dc_link *link;
u32 brightness;
bool rc, reallow_idle = false;
+ struct drm_connector *connector;
+
+ list_for_each_entry(connector, &dm->ddev->mode_config.connector_list, head) {
+ struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+
+ if (aconnector->bl_idx != bl_idx)
+ continue;
+
+ /* if connector is off, save the brightness for next time it's on */
+ if (!aconnector->base.encoder) {
+ dm->brightness[bl_idx] = user_brightness;
+ dm->actual_brightness[bl_idx] = 0;
+ return;
+ }
+ }
amdgpu_dm_update_backlight_caps(dm, bl_idx);
- caps = dm->backlight_caps[bl_idx];
+ caps = &dm->backlight_caps[bl_idx];
dm->brightness[bl_idx] = user_brightness;
/* update scratch register */
if (bl_idx == 0)
amdgpu_atombios_scratch_regs_set_backlight_level(dm->adev, dm->brightness[bl_idx]);
- brightness = convert_brightness_from_user(&caps, dm->brightness[bl_idx]);
+ brightness = convert_brightness_from_user(caps, dm->brightness[bl_idx]);
link = (struct dc_link *)dm->backlight_link[bl_idx];
+ /* Apply brightness quirk */
+ if (caps->brightness_mask)
+ brightness |= caps->brightness_mask;
+
/* Change brightness based on AUX property */
mutex_lock(&dm->dc_lock);
if (dm->dc->caps.ips_support && dm->dc->ctx->dmub_srv->idle_allowed) {
@@ -4657,7 +5162,15 @@ static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
reallow_idle = true;
}
- if (caps.aux_support) {
+ if (trace_amdgpu_dm_brightness_enabled()) {
+ trace_amdgpu_dm_brightness(__builtin_return_address(0),
+ user_brightness,
+ brightness,
+ caps->aux_support,
+ power_supply_is_system_supplied() > 0);
+ }
+
+ if (caps->aux_support) {
rc = dc_link_set_backlight_level_nits(link, true, brightness,
AUX_BL_DEFAULT_TRANSITION_TIME_MS);
if (!rc)
@@ -4710,10 +5223,8 @@ static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm,
if (caps.aux_support) {
u32 avg, peak;
- bool rc;
- rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
- if (!rc)
+ if (!dc_link_get_backlight_level_nits(link, &avg, &peak))
return dm->brightness[bl_idx];
return convert_brightness_to_user(&caps, avg);
}
@@ -4752,8 +5263,9 @@ amdgpu_dm_register_backlight_device(struct amdgpu_dm_connector *aconnector)
struct drm_device *drm = aconnector->base.dev;
struct amdgpu_display_manager *dm = &drm_to_adev(drm)->dm;
struct backlight_properties props = { 0 };
- struct amdgpu_dm_backlight_caps caps = { 0 };
+ struct amdgpu_dm_backlight_caps *caps;
char bl_name[16];
+ int min, max;
if (aconnector->bl_idx == -1)
return;
@@ -4765,16 +5277,24 @@ amdgpu_dm_register_backlight_device(struct amdgpu_dm_connector *aconnector)
return;
}
- amdgpu_acpi_get_backlight_caps(&caps);
- if (caps.caps_valid) {
+ caps = &dm->backlight_caps[aconnector->bl_idx];
+ if (get_brightness_range(caps, &min, &max)) {
if (power_supply_is_system_supplied() > 0)
- props.brightness = caps.ac_level;
+ props.brightness = DIV_ROUND_CLOSEST((max - min) * caps->ac_level, 100);
else
- props.brightness = caps.dc_level;
+ props.brightness = DIV_ROUND_CLOSEST((max - min) * caps->dc_level, 100);
+ /* min is zero, so max needs to be adjusted */
+ props.max_brightness = max - min;
+ drm_dbg(drm, "Backlight caps: min: %d, max: %d, ac %d, dc %d\n", min, max,
+ caps->ac_level, caps->dc_level);
} else
- props.brightness = AMDGPU_MAX_BL_LEVEL;
+ props.brightness = props.max_brightness = MAX_BACKLIGHT_LEVEL;
- props.max_brightness = AMDGPU_MAX_BL_LEVEL;
+ if (caps->data_points && !(amdgpu_dc_debug_mask & DC_DISABLE_CUSTOM_BRIGHTNESS_CURVE)) {
+ drm_info(drm, "Using custom brightness curve\n");
+ props.scale = BACKLIGHT_SCALE_NON_LINEAR;
+ } else
+ props.scale = BACKLIGHT_SCALE_LINEAR;
props.type = BACKLIGHT_RAW;
snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
@@ -4783,12 +5303,13 @@ amdgpu_dm_register_backlight_device(struct amdgpu_dm_connector *aconnector)
dm->backlight_dev[aconnector->bl_idx] =
backlight_device_register(bl_name, aconnector->base.kdev, dm,
&amdgpu_dm_backlight_ops, &props);
+ dm->brightness[aconnector->bl_idx] = props.brightness;
if (IS_ERR(dm->backlight_dev[aconnector->bl_idx])) {
- DRM_ERROR("DM: Backlight registration failed!\n");
+ drm_err(drm, "DM: Backlight registration failed!\n");
dm->backlight_dev[aconnector->bl_idx] = NULL;
} else
- DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
+ drm_dbg_driver(drm, "DM: Registered Backlight device: %s\n", bl_name);
}
static int initialize_plane(struct amdgpu_display_manager *dm,
@@ -4802,7 +5323,7 @@ static int initialize_plane(struct amdgpu_display_manager *dm,
plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
if (!plane) {
- DRM_ERROR("KMS: Failed to allocate plane\n");
+ drm_err(adev_to_drm(dm->adev), "KMS: Failed to allocate plane\n");
return -ENOMEM;
}
plane->type = plane_type;
@@ -4820,7 +5341,7 @@ static int initialize_plane(struct amdgpu_display_manager *dm,
ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
if (ret) {
- DRM_ERROR("KMS: Failed to initialize plane\n");
+ drm_err(adev_to_drm(dm->adev), "KMS: Failed to initialize plane\n");
kfree(plane);
return ret;
}
@@ -4835,6 +5356,7 @@ static int initialize_plane(struct amdgpu_display_manager *dm,
static void setup_backlight_device(struct amdgpu_display_manager *dm,
struct amdgpu_dm_connector *aconnector)
{
+ struct amdgpu_dm_backlight_caps *caps;
struct dc_link *link = aconnector->dc_link;
int bl_idx = dm->num_of_edps;
@@ -4850,11 +5372,17 @@ static void setup_backlight_device(struct amdgpu_display_manager *dm,
aconnector->bl_idx = bl_idx;
amdgpu_dm_update_backlight_caps(dm, bl_idx);
- dm->brightness[bl_idx] = AMDGPU_MAX_BL_LEVEL;
dm->backlight_link[bl_idx] = link;
dm->num_of_edps++;
update_connector_ext_caps(aconnector);
+ caps = &dm->backlight_caps[aconnector->bl_idx];
+
+ /* Only offer ABM property when non-OLED and user didn't turn off by module parameter */
+ if (!caps->ext_caps->bits.oled && amdgpu_dm_abm_level < 0)
+ drm_object_attach_property(&aconnector->base.base,
+ dm->adev->mode_info.abm_level_property,
+ ABM_SYSFS_CONTROL);
}
static void amdgpu_set_panel_orientation(struct drm_connector *connector);
@@ -4890,14 +5418,14 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
link_cnt = dm->dc->caps.max_links;
if (amdgpu_dm_mode_config_init(dm->adev)) {
- DRM_ERROR("DM: Failed to initialize mode config\n");
+ drm_err(adev_to_drm(adev), "DM: Failed to initialize mode config\n");
return -EINVAL;
}
/* There is one primary plane per CRTC */
primary_planes = dm->dc->caps.max_streams;
if (primary_planes > AMDGPU_MAX_PLANES) {
- DRM_ERROR("DM: Plane nums out of 6 planes\n");
+ drm_err(adev_to_drm(adev), "DM: Plane nums out of 6 planes\n");
return -EINVAL;
}
@@ -4910,7 +5438,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
if (initialize_plane(dm, mode_info, i,
DRM_PLANE_TYPE_PRIMARY, plane)) {
- DRM_ERROR("KMS: Failed to initialize primary plane\n");
+ drm_err(adev_to_drm(adev), "KMS: Failed to initialize primary plane\n");
goto fail;
}
}
@@ -4942,14 +5470,14 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
if (initialize_plane(dm, NULL, primary_planes + i,
DRM_PLANE_TYPE_OVERLAY, plane)) {
- DRM_ERROR("KMS: Failed to initialize overlay plane\n");
+ drm_err(adev_to_drm(adev), "KMS: Failed to initialize overlay plane\n");
goto fail;
}
}
for (i = 0; i < dm->dc->caps.max_streams; i++)
if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
- DRM_ERROR("KMS: Failed to initialize crtc\n");
+ drm_err(adev_to_drm(adev), "KMS: Failed to initialize crtc\n");
goto fail;
}
@@ -4966,9 +5494,10 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
case IP_VERSION(2, 1, 0):
case IP_VERSION(3, 5, 0):
case IP_VERSION(3, 5, 1):
+ case IP_VERSION(3, 6, 0):
case IP_VERSION(4, 0, 1):
if (register_outbox_irq_handlers(dm->adev)) {
- DRM_ERROR("DM: Failed to initialize IRQ\n");
+ drm_err(adev_to_drm(adev), "DM: Failed to initialize IRQ\n");
goto fail;
}
break;
@@ -4989,6 +5518,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
case IP_VERSION(3, 2, 1):
case IP_VERSION(3, 5, 0):
case IP_VERSION(3, 5, 1):
+ case IP_VERSION(3, 6, 0):
case IP_VERSION(4, 0, 1):
psr_feature_enabled = true;
break;
@@ -5006,6 +5536,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
case IP_VERSION(3, 2, 1):
case IP_VERSION(3, 5, 0):
case IP_VERSION(3, 5, 1):
+ case IP_VERSION(3, 6, 0):
replay_feature_enabled = true;
break;
@@ -5016,7 +5547,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
}
if (link_cnt > MAX_LINKS) {
- DRM_ERROR(
+ drm_err(adev_to_drm(adev),
"KMS: Cannot support more than %d display indexes\n",
MAX_LINKS);
goto fail;
@@ -5032,12 +5563,12 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
struct amdgpu_dm_wb_connector *wbcon = kzalloc(sizeof(*wbcon), GFP_KERNEL);
if (!wbcon) {
- DRM_ERROR("KMS: Failed to allocate writeback connector\n");
+ drm_err(adev_to_drm(adev), "KMS: Failed to allocate writeback connector\n");
continue;
}
if (amdgpu_dm_wb_connector_init(dm, wbcon, i)) {
- DRM_ERROR("KMS: Failed to initialize writeback connector\n");
+ drm_err(adev_to_drm(adev), "KMS: Failed to initialize writeback connector\n");
kfree(wbcon);
continue;
}
@@ -5057,12 +5588,12 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
goto fail;
if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
- DRM_ERROR("KMS: Failed to initialize encoder\n");
+ drm_err(adev_to_drm(adev), "KMS: Failed to initialize encoder\n");
goto fail;
}
if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
- DRM_ERROR("KMS: Failed to initialize connector\n");
+ drm_err(adev_to_drm(adev), "KMS: Failed to initialize connector\n");
goto fail;
}
@@ -5071,7 +5602,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
aconnector;
if (!dc_link_detect_connection_type(link, &new_connection_type))
- DRM_ERROR("KMS: Failed to detect connector\n");
+ drm_err(adev_to_drm(adev), "KMS: Failed to detect connector\n");
if (aconnector->base.force && new_connection_type == dc_connection_none) {
emulated_link_detect(link);
@@ -5093,13 +5624,26 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
if (amdgpu_dm_set_replay_caps(link, aconnector))
psr_feature_enabled = false;
- if (psr_feature_enabled)
+ if (psr_feature_enabled) {
amdgpu_dm_set_psr_caps(link);
+ drm_info(adev_to_drm(adev), "PSR support %d, DC PSR ver %d, sink PSR ver %d DPCD caps 0x%x su_y_granularity %d\n",
+ link->psr_settings.psr_feature_enabled,
+ link->psr_settings.psr_version,
+ link->dpcd_caps.psr_info.psr_version,
+ link->dpcd_caps.psr_info.psr_dpcd_caps.raw,
+ link->dpcd_caps.psr_info.psr2_su_y_granularity_cap);
+ }
}
}
amdgpu_set_panel_orientation(&aconnector->base);
}
+ /* Debug dump: list all DC links and their associated sinks after detection
+ * is complete for all connectors. This provides a comprehensive view of the
+ * final state without repeating the dump for each connector.
+ */
+ amdgpu_dm_dump_links_and_sinks(adev);
+
/* Software is initialized. Now we can register interrupt handlers. */
switch (adev->asic_type) {
#if defined(CONFIG_DRM_AMD_DC_SI)
@@ -5108,7 +5652,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
case CHIP_VERDE:
case CHIP_OLAND:
if (dce60_register_irq_handlers(dm->adev)) {
- DRM_ERROR("DM: Failed to initialize IRQ\n");
+ drm_err(adev_to_drm(adev), "DM: Failed to initialize IRQ\n");
goto fail;
}
break;
@@ -5130,7 +5674,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
case CHIP_VEGA12:
case CHIP_VEGA20:
if (dce110_register_irq_handlers(dm->adev)) {
- DRM_ERROR("DM: Failed to initialize IRQ\n");
+ drm_err(adev_to_drm(adev), "DM: Failed to initialize IRQ\n");
goto fail;
}
break;
@@ -5155,14 +5699,15 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
case IP_VERSION(3, 2, 1):
case IP_VERSION(3, 5, 0):
case IP_VERSION(3, 5, 1):
+ case IP_VERSION(3, 6, 0):
case IP_VERSION(4, 0, 1):
if (dcn10_register_irq_handlers(dm->adev)) {
- DRM_ERROR("DM: Failed to initialize IRQ\n");
+ drm_err(adev_to_drm(adev), "DM: Failed to initialize IRQ\n");
goto fail;
}
break;
default:
- DRM_ERROR("Unsupported DCE IP versions: 0x%X\n",
+ drm_err(adev_to_drm(adev), "Unsupported DCE IP versions: 0x%X\n",
amdgpu_ip_version(adev, DCE_HWIP, 0));
goto fail;
}
@@ -5179,7 +5724,8 @@ fail:
static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
{
- drm_atomic_private_obj_fini(&dm->atomic_obj);
+ if (dm->atomic_obj.state)
+ drm_atomic_private_obj_fini(&dm->atomic_obj);
}
/******************************************************************************
@@ -5297,6 +5843,9 @@ static int dm_init_microcode(struct amdgpu_device *adev)
case IP_VERSION(3, 5, 1):
fw_name_dmub = FIRMWARE_DCN_351_DMUB;
break;
+ case IP_VERSION(3, 6, 0):
+ fw_name_dmub = FIRMWARE_DCN_36_DMUB;
+ break;
case IP_VERSION(4, 0, 1):
fw_name_dmub = FIRMWARE_DCN_401_DMUB;
break;
@@ -5304,7 +5853,8 @@ static int dm_init_microcode(struct amdgpu_device *adev)
/* ASIC doesn't support DMUB. */
return 0;
}
- r = amdgpu_ucode_request(adev, &adev->dm.dmub_fw, "%s", fw_name_dmub);
+ r = amdgpu_ucode_request(adev, &adev->dm.dmub_fw, AMDGPU_UCODE_REQUIRED,
+ "%s", fw_name_dmub);
return r;
}
@@ -5319,7 +5869,7 @@ static int dm_early_init(struct amdgpu_ip_block *ip_block)
/* if there is no object header, skip DM */
if (!amdgpu_atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset)) {
adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK;
- dev_info(adev->dev, "No object header, skipping DM\n");
+ drm_info(adev_to_drm(adev), "No object header, skipping DM\n");
return -ENOENT;
}
@@ -5424,13 +5974,14 @@ static int dm_early_init(struct amdgpu_ip_block *ip_block)
case IP_VERSION(3, 2, 1):
case IP_VERSION(3, 5, 0):
case IP_VERSION(3, 5, 1):
+ case IP_VERSION(3, 6, 0):
case IP_VERSION(4, 0, 1):
adev->mode_info.num_crtc = 4;
adev->mode_info.num_hpd = 4;
adev->mode_info.num_dig = 4;
break;
default:
- DRM_ERROR("Unsupported DCE IP versions: 0x%x\n",
+ drm_err(adev_to_drm(adev), "Unsupported DCE IP versions: 0x%x\n",
amdgpu_ip_version(adev, DCE_HWIP, 0));
return -EINVAL;
}
@@ -5479,6 +6030,10 @@ fill_plane_color_attributes(const struct drm_plane_state *plane_state,
*color_space = COLOR_SPACE_SRGB;
+ /* Ignore properties when DRM_CLIENT_CAP_PLANE_COLOR_PIPELINE is set */
+ if (plane_state->state && plane_state->state->plane_color_pipeline)
+ return 0;
+
/* DRM color properties only affect non-RGB formats. */
if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
return 0;
@@ -5502,9 +6057,9 @@ fill_plane_color_attributes(const struct drm_plane_state *plane_state,
case DRM_COLOR_YCBCR_BT2020:
if (full_range)
- *color_space = COLOR_SPACE_2020_YCBCR;
+ *color_space = COLOR_SPACE_2020_YCBCR_FULL;
else
- return -EINVAL;
+ *color_space = COLOR_SPACE_2020_YCBCR_LIMITED;
break;
default:
@@ -5520,8 +6075,7 @@ fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
const u64 tiling_flags,
struct dc_plane_info *plane_info,
struct dc_plane_address *address,
- bool tmz_surface,
- bool force_disable_dcc)
+ bool tmz_surface)
{
const struct drm_framebuffer *fb = plane_state->fb;
const struct amdgpu_framebuffer *afb =
@@ -5580,7 +6134,7 @@ fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616;
break;
default:
- DRM_ERROR(
+ drm_err(adev_to_drm(adev),
"Unsupported screen format %p4cc\n",
&fb->format->format);
return -EINVAL;
@@ -5620,7 +6174,7 @@ fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
&plane_info->tiling_info,
&plane_info->plane_size,
&plane_info->dcc, address,
- tmz_surface, force_disable_dcc);
+ tmz_surface);
if (ret)
return ret;
@@ -5641,7 +6195,6 @@ static int fill_dc_plane_attributes(struct amdgpu_device *adev,
struct dc_scaling_info scaling_info;
struct dc_plane_info plane_info;
int ret;
- bool force_disable_dcc = false;
ret = amdgpu_dm_plane_fill_dc_scaling_info(adev, plane_state, &scaling_info);
if (ret)
@@ -5652,13 +6205,11 @@ static int fill_dc_plane_attributes(struct amdgpu_device *adev,
dc_plane_state->clip_rect = scaling_info.clip_rect;
dc_plane_state->scaling_quality = scaling_info.scaling_quality;
- force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
ret = fill_dc_plane_info_and_addr(adev, plane_state,
afb->tiling_flags,
&plane_info,
&dc_plane_state->address,
- afb->tmz_surface,
- force_disable_dcc);
+ afb->tmz_surface);
if (ret)
return ret;
@@ -6004,12 +6555,14 @@ get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing,
if (dc_crtc_timing->pixel_encoding == PIXEL_ENCODING_RGB)
color_space = COLOR_SPACE_2020_RGB_FULLRANGE;
else
- color_space = COLOR_SPACE_2020_YCBCR;
+ color_space = COLOR_SPACE_2020_YCBCR_LIMITED;
break;
case DRM_MODE_COLORIMETRY_DEFAULT: // ITU601
default:
if (dc_crtc_timing->pixel_encoding == PIXEL_ENCODING_RGB) {
color_space = COLOR_SPACE_SRGB;
+ if (connector_state->hdmi.broadcast_rgb == DRM_HDMI_BROADCAST_RGB_LIMITED)
+ color_space = COLOR_SPACE_SRGB_LIMITED;
/*
* 27030khz is the separation point between HDTV and SDTV
* according to HDMI spec, we use YCbCr709 and YCbCr601
@@ -6102,6 +6655,7 @@ static void fill_stream_properties_from_drm_display_mode(
struct amdgpu_dm_connector *aconnector = NULL;
struct hdmi_vendor_infoframe hv_frame;
struct hdmi_avi_infoframe avi_frame;
+ ssize_t err;
if (connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK)
aconnector = to_amdgpu_dm_connector(connector);
@@ -6121,6 +6675,10 @@ static void fill_stream_properties_from_drm_display_mode(
&& aconnector
&& aconnector->force_yuv420_output)
timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
+ else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCBCR422)
+ && aconnector
+ && aconnector->force_yuv422_output)
+ timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR422;
else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCBCR444)
&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
@@ -6148,9 +6706,19 @@ static void fill_stream_properties_from_drm_display_mode(
}
if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
- drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
+ err = drm_hdmi_avi_infoframe_from_display_mode(&avi_frame,
+ (struct drm_connector *)connector,
+ mode_in);
+ if (err < 0)
+ drm_warn_once(connector->dev, "Failed to setup avi infoframe on connector %s: %zd\n",
+ connector->name, err);
timing_out->vic = avi_frame.video_code;
- drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
+ err = drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame,
+ (struct drm_connector *)connector,
+ mode_in);
+ if (err < 0)
+ drm_warn_once(connector->dev, "Failed to setup vendor infoframe on connector %s: %zd\n",
+ connector->name, err);
timing_out->hdmi_vic = hv_frame.vic;
}
@@ -6263,19 +6831,19 @@ decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
const struct drm_display_mode *native_mode,
bool scale_enabled)
{
- if (scale_enabled) {
- copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
- } else if (native_mode->clock == drm_mode->clock &&
- native_mode->htotal == drm_mode->htotal &&
- native_mode->vtotal == drm_mode->vtotal) {
- copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
+ if (scale_enabled || (
+ native_mode->clock == drm_mode->clock &&
+ native_mode->htotal == drm_mode->htotal &&
+ native_mode->vtotal == drm_mode->vtotal)) {
+ if (native_mode->crtc_clock)
+ copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
} else {
/* no scaling nor amdgpu inserted, no need to patch */
}
}
static struct dc_sink *
-create_fake_sink(struct dc_link *link)
+create_fake_sink(struct drm_device *dev, struct dc_link *link)
{
struct dc_sink_init_data sink_init_data = { 0 };
struct dc_sink *sink = NULL;
@@ -6285,7 +6853,7 @@ create_fake_sink(struct dc_link *link)
sink = dc_sink_create(&sink_init_data);
if (!sink) {
- DRM_ERROR("Failed to create sink!\n");
+ drm_err(dev, "Failed to create sink!\n");
return NULL;
}
sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
@@ -6418,7 +6986,7 @@ get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
m_pref = list_first_entry_or_null(
&aconnector->base.modes, struct drm_display_mode, head);
if (!m_pref) {
- DRM_DEBUG_DRIVER("No preferred mode found in EDID\n");
+ drm_dbg_driver(aconnector->base.dev, "No preferred mode found in EDID\n");
return NULL;
}
}
@@ -6593,7 +7161,7 @@ static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
dc_link_get_highest_encoding_format(aconnector->dc_link),
&stream->timing.dsc_cfg)) {
stream->timing.flags.DSC = 1;
- DRM_DEBUG_DRIVER("%s: SST_DSC [%s] DSC is selected from SST RX\n",
+ drm_dbg_driver(drm_connector->dev, "%s: SST_DSC [%s] DSC is selected from SST RX\n",
__func__, drm_connector->name);
}
} else if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) {
@@ -6613,7 +7181,7 @@ static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
dc_link_get_highest_encoding_format(aconnector->dc_link),
&stream->timing.dsc_cfg)) {
stream->timing.flags.DSC = 1;
- DRM_DEBUG_DRIVER("%s: SST_DSC [%s] DSC is selected from DP-HDMI PCON\n",
+ drm_dbg_driver(drm_connector->dev, "%s: SST_DSC [%s] DSC is selected from DP-HDMI PCON\n",
__func__, drm_connector->name);
}
}
@@ -6641,6 +7209,7 @@ create_stream_for_sink(struct drm_connector *connector,
const struct dc_stream_state *old_stream,
int requested_bpc)
{
+ struct drm_device *dev = connector->dev;
struct amdgpu_dm_connector *aconnector = NULL;
struct drm_display_mode *preferred_mode = NULL;
const struct drm_connector_state *con_state = &dm_state->base;
@@ -6663,11 +7232,6 @@ create_stream_for_sink(struct drm_connector *connector,
drm_mode_init(&mode, drm_mode);
memset(&saved_mode, 0, sizeof(saved_mode));
- if (connector == NULL) {
- DRM_ERROR("connector is NULL!\n");
- return stream;
- }
-
if (connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK) {
aconnector = NULL;
aconnector = to_amdgpu_dm_connector(connector);
@@ -6682,7 +7246,7 @@ create_stream_for_sink(struct drm_connector *connector,
}
if (!aconnector || !aconnector->dc_sink) {
- sink = create_fake_sink(link);
+ sink = create_fake_sink(dev, link);
if (!sink)
return stream;
@@ -6694,7 +7258,7 @@ create_stream_for_sink(struct drm_connector *connector,
stream = dc_create_stream_for_sink(sink);
if (stream == NULL) {
- DRM_ERROR("Failed to create stream for sink!\n");
+ drm_err(dev, "Failed to create stream for sink!\n");
goto finish;
}
@@ -6726,7 +7290,7 @@ create_stream_for_sink(struct drm_connector *connector,
* case, we call set mode ourselves to restore the previous mode
* and the modelist may not be filled in time.
*/
- DRM_DEBUG_DRIVER("No preferred mode found\n");
+ drm_dbg_driver(dev, "No preferred mode found\n");
} else if (aconnector) {
recalculate_timing = amdgpu_freesync_vid_mode &&
is_freesync_video_mode(&mode, aconnector);
@@ -6822,29 +7386,117 @@ finish:
return stream;
}
+/**
+ * amdgpu_dm_connector_poll - Poll a connector to see if it's connected to a display
+ * @aconnector: DM connector to poll (owns @base drm_connector and @dc_link)
+ * @force: if true, force polling even when DAC load detection was used
+ *
+ * Used for connectors that don't support HPD (hotplug detection) to
+ * periodically check whether the connector is connected to a display.
+ *
+ * When connection was determined via DAC load detection, we avoid
+ * re-running it on normal polls to prevent visible glitches, unless
+ * @force is set.
+ *
+ * Return: The probed connector status (connected/disconnected/unknown).
+ */
static enum drm_connector_status
-amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
+amdgpu_dm_connector_poll(struct amdgpu_dm_connector *aconnector, bool force)
{
- bool connected;
- struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+ struct drm_connector *connector = &aconnector->base;
+ struct drm_device *dev = connector->dev;
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ struct dc_link *link = aconnector->dc_link;
+ enum dc_connection_type conn_type = dc_connection_none;
+ enum drm_connector_status status = connector_status_disconnected;
- /*
- * Notes:
- * 1. This interface is NOT called in context of HPD irq.
- * 2. This interface *is called* in context of user-mode ioctl. Which
- * makes it a bad place for *any* MST-related activity.
+ /* When we determined the connection using DAC load detection,
+ * do NOT poll the connector do detect disconnect because
+ * that would run DAC load detection again which can cause
+ * visible visual glitches.
+ *
+ * Only allow to poll such a connector again when forcing.
*/
+ if (!force && link->local_sink && link->type == dc_connection_dac_load)
+ return connector->status;
- if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
- !aconnector->fake_enable)
- connected = (aconnector->dc_sink != NULL);
- else
- connected = (aconnector->base.force == DRM_FORCE_ON ||
- aconnector->base.force == DRM_FORCE_ON_DIGITAL);
+ mutex_lock(&aconnector->hpd_lock);
+
+ if (dc_link_detect_connection_type(aconnector->dc_link, &conn_type) &&
+ conn_type != dc_connection_none) {
+ mutex_lock(&adev->dm.dc_lock);
+
+ /* Only call full link detection when a sink isn't created yet,
+ * ie. just when the display is plugged in, otherwise we risk flickering.
+ */
+ if (link->local_sink ||
+ dc_link_detect(link, DETECT_REASON_HPD))
+ status = connector_status_connected;
+
+ mutex_unlock(&adev->dm.dc_lock);
+ }
+
+ if (connector->status != status) {
+ if (status == connector_status_disconnected) {
+ if (link->local_sink)
+ dc_sink_release(link->local_sink);
+
+ link->local_sink = NULL;
+ link->dpcd_sink_count = 0;
+ link->type = dc_connection_none;
+ }
+
+ amdgpu_dm_update_connector_after_detect(aconnector);
+ }
+
+ mutex_unlock(&aconnector->hpd_lock);
+ return status;
+}
+
+/**
+ * amdgpu_dm_connector_detect() - Detect whether a DRM connector is connected to a display
+ *
+ * A connector is considered connected when it has a sink that is not NULL.
+ * For connectors that support HPD (hotplug detection), the connection is
+ * handled in the HPD interrupt.
+ * For connectors that may not support HPD, such as analog connectors,
+ * DRM will call this function repeatedly to poll them.
+ *
+ * Notes:
+ * 1. This interface is NOT called in context of HPD irq.
+ * 2. This interface *is called* in context of user-mode ioctl. Which
+ * makes it a bad place for *any* MST-related activity.
+ *
+ * @connector: The DRM connector we are checking. We convert it to
+ * amdgpu_dm_connector so we can read the DC link and state.
+ * @force: If true, do a full detect again. This is used even when
+ * a lighter check would normally be used to avoid flicker.
+ *
+ * Return: The connector status (connected, disconnected, or unknown).
+ *
+ */
+static enum drm_connector_status
+amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
+{
+ struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
update_subconnector_property(aconnector);
- return (connected ? connector_status_connected :
+ if (aconnector->base.force == DRM_FORCE_ON ||
+ aconnector->base.force == DRM_FORCE_ON_DIGITAL)
+ return connector_status_connected;
+ else if (aconnector->base.force == DRM_FORCE_OFF)
+ return connector_status_disconnected;
+
+ /* Poll analog connectors and only when either
+ * disconnected or connected to an analog display.
+ */
+ if (drm_kms_helper_is_poll_worker() &&
+ dc_connector_supports_analog(aconnector->dc_link->link_id.id) &&
+ (!aconnector->dc_sink || aconnector->dc_sink->edid_caps.analog))
+ return amdgpu_dm_connector_poll(aconnector, force);
+
+ return (aconnector->dc_sink ? connector_status_connected :
connector_status_disconnected);
}
@@ -6895,6 +7547,20 @@ int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
} else if (property == adev->mode_info.underscan_property) {
dm_new_state->underscan_enable = val;
ret = 0;
+ } else if (property == adev->mode_info.abm_level_property) {
+ switch (val) {
+ case ABM_SYSFS_CONTROL:
+ dm_new_state->abm_sysfs_forbidden = false;
+ break;
+ case ABM_LEVEL_OFF:
+ dm_new_state->abm_sysfs_forbidden = true;
+ dm_new_state->abm_level = ABM_LEVEL_IMMEDIATE_DISABLE;
+ break;
+ default:
+ dm_new_state->abm_sysfs_forbidden = true;
+ dm_new_state->abm_level = val;
+ }
+ ret = 0;
}
return ret;
@@ -6937,6 +7603,13 @@ int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
} else if (property == adev->mode_info.underscan_property) {
*val = dm_state->underscan_enable;
ret = 0;
+ } else if (property == adev->mode_info.abm_level_property) {
+ if (!dm_state->abm_sysfs_forbidden)
+ *val = ABM_SYSFS_CONTROL;
+ else
+ *val = (dm_state->abm_level != ABM_LEVEL_IMMEDIATE_DISABLE) ?
+ dm_state->abm_level : 0;
+ ret = 0;
}
return ret;
@@ -6989,10 +7662,16 @@ static ssize_t panel_power_savings_store(struct device *device,
return -EINVAL;
drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
- to_dm_connector_state(connector->state)->abm_level = val ?:
- ABM_LEVEL_IMMEDIATE_DISABLE;
+ if (to_dm_connector_state(connector->state)->abm_sysfs_forbidden)
+ ret = -EBUSY;
+ else
+ to_dm_connector_state(connector->state)->abm_level = val ?:
+ ABM_LEVEL_IMMEDIATE_DISABLE;
drm_modeset_unlock(&dev->mode_config.connection_mutex);
+ if (ret)
+ return ret;
+
drm_kms_helper_hotplug_event(dev);
return count;
@@ -7040,6 +7719,7 @@ static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
if (amdgpu_dm_should_create_sysfs(amdgpu_dm_connector))
sysfs_remove_group(&connector->kdev->kobj, &amdgpu_group);
+ cec_notifier_conn_unregister(amdgpu_dm_connector->notifier);
drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
}
@@ -7056,6 +7736,13 @@ static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
if (aconnector->mst_mgr.dev)
drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
+ /* Cancel and flush any pending HDMI HPD debounce work */
+ cancel_delayed_work_sync(&aconnector->hdmi_hpd_debounce_work);
+ if (aconnector->hdmi_prev_sink) {
+ dc_sink_release(aconnector->hdmi_prev_sink);
+ aconnector->hdmi_prev_sink = NULL;
+ }
+
if (aconnector->bl_idx != -1) {
backlight_device_unregister(dm->backlight_dev[aconnector->bl_idx]);
dm->backlight_dev[aconnector->bl_idx] = NULL;
@@ -7071,10 +7758,6 @@ static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
drm_connector_unregister(connector);
drm_connector_cleanup(connector);
- if (aconnector->i2c) {
- i2c_del_adapter(&aconnector->i2c->base);
- kfree(aconnector->i2c);
- }
kfree(aconnector->dm_dp_aux.aux.name);
kfree(connector);
@@ -7174,11 +7857,18 @@ static void amdgpu_dm_connector_funcs_force(struct drm_connector *connector)
struct dc_link *dc_link = aconnector->dc_link;
struct dc_sink *dc_em_sink = aconnector->dc_em_sink;
const struct drm_edid *drm_edid;
+ struct i2c_adapter *ddc;
+ struct drm_device *dev = connector->dev;
+
+ if (dc_link && dc_link->aux_mode)
+ ddc = &aconnector->dm_dp_aux.aux.ddc;
+ else
+ ddc = &aconnector->i2c->base;
- drm_edid = drm_edid_read(connector);
+ drm_edid = drm_edid_read_ddc(connector, ddc);
drm_edid_connector_update(connector, drm_edid);
if (!drm_edid) {
- DRM_ERROR("No EDID found on connector: %s.\n", connector->name);
+ drm_err(dev, "No EDID found on connector: %s.\n", connector->name);
return;
}
@@ -7220,17 +7910,24 @@ static int get_modes(struct drm_connector *connector)
static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
{
struct drm_connector *connector = &aconnector->base;
+ struct dc_link *dc_link = aconnector->dc_link;
struct dc_sink_init_data init_params = {
.link = aconnector->dc_link,
.sink_signal = SIGNAL_TYPE_VIRTUAL
};
const struct drm_edid *drm_edid;
const struct edid *edid;
+ struct i2c_adapter *ddc;
- drm_edid = drm_edid_read(connector);
+ if (dc_link && dc_link->aux_mode)
+ ddc = &aconnector->dm_dp_aux.aux.ddc;
+ else
+ ddc = &aconnector->i2c->base;
+
+ drm_edid = drm_edid_read_ddc(connector, ddc);
drm_edid_connector_update(connector, drm_edid);
if (!drm_edid) {
- DRM_ERROR("No EDID found on connector: %s.\n", connector->name);
+ drm_err(connector->dev, "No EDID found on connector: %s.\n", connector->name);
return;
}
@@ -7322,7 +8019,7 @@ static enum dc_status dm_validate_stream_and_context(struct dc *dc,
dc_result = DC_FAIL_ATTACH_SURFACES;
if (dc_result == DC_OK)
- dc_result = dc_validate_global_state(dc, dc_state, true);
+ dc_result = dc_validate_global_state(dc, dc_state, DC_VALIDATE_MODE_ONLY);
cleanup:
if (dc_state)
@@ -7335,12 +8032,12 @@ cleanup:
}
struct dc_stream_state *
-create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
+create_validate_stream_for_sink(struct drm_connector *connector,
const struct drm_display_mode *drm_mode,
const struct dm_connector_state *dm_state,
const struct dc_stream_state *old_stream)
{
- struct drm_connector *connector = &aconnector->base;
+ struct amdgpu_dm_connector *aconnector = NULL;
struct amdgpu_device *adev = drm_to_adev(connector->dev);
struct dc_stream_state *stream;
const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
@@ -7351,23 +8048,29 @@ create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
if (!dm_state)
return NULL;
- if (aconnector->dc_link->connector_signal == SIGNAL_TYPE_HDMI_TYPE_A ||
- aconnector->dc_link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER)
+ if (connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK)
+ aconnector = to_amdgpu_dm_connector(connector);
+
+ if (aconnector &&
+ (aconnector->dc_link->connector_signal == SIGNAL_TYPE_HDMI_TYPE_A ||
+ aconnector->dc_link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER))
bpc_limit = 8;
do {
+ drm_dbg_kms(connector->dev, "Trying with %d bpc\n", requested_bpc);
stream = create_stream_for_sink(connector, drm_mode,
dm_state, old_stream,
requested_bpc);
if (stream == NULL) {
- DRM_ERROR("Failed to create stream for sink!\n");
+ drm_err(adev_to_drm(adev), "Failed to create stream for sink!\n");
break;
}
- if (aconnector->base.connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
+ dc_result = dc_validate_stream(adev->dm.dc, stream);
+
+ if (!aconnector) /* writeback connector */
return stream;
- dc_result = dc_validate_stream(adev->dm.dc, stream);
if (dc_result == DC_OK && stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
dc_result = dm_dp_mst_is_port_support_mode(aconnector, stream);
@@ -7375,7 +8078,7 @@ create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
dc_result = dm_validate_stream_and_context(adev->dm.dc, stream);
if (dc_result != DC_OK) {
- DRM_DEBUG_KMS("Mode %dx%d (clk %d) pixel_encoding:%s color_depth:%s failed validation -- %s\n",
+ DRM_DEBUG_KMS("Pruned mode %d x %d (clk %d) %s %s -- %s\n",
drm_mode->hdisplay,
drm_mode->vdisplay,
drm_mode->clock,
@@ -7390,28 +8093,57 @@ create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
} while (stream == NULL && requested_bpc >= bpc_limit);
- if ((dc_result == DC_FAIL_ENC_VALIDATE ||
- dc_result == DC_EXCEED_DONGLE_CAP) &&
- !aconnector->force_yuv420_output) {
- DRM_DEBUG_KMS("%s:%d Retry forcing yuv420 encoding\n",
- __func__, __LINE__);
-
- aconnector->force_yuv420_output = true;
- stream = create_validate_stream_for_sink(aconnector, drm_mode,
- dm_state, old_stream);
+ switch (dc_result) {
+ /*
+ * If we failed to validate DP bandwidth stream with the requested RGB color depth,
+ * we try to fallback and configure in order:
+ * YUV422 (8bpc, 6bpc)
+ * YUV420 (8bpc, 6bpc)
+ */
+ case DC_FAIL_ENC_VALIDATE:
+ case DC_EXCEED_DONGLE_CAP:
+ case DC_NO_DP_LINK_BANDWIDTH:
+ /* recursively entered twice and already tried both YUV422 and YUV420 */
+ if (aconnector->force_yuv422_output && aconnector->force_yuv420_output)
+ break;
+ /* first failure; try YUV422 */
+ if (!aconnector->force_yuv422_output) {
+ drm_dbg_kms(connector->dev, "%s:%d Validation failed with %d, retrying w/ YUV422\n",
+ __func__, __LINE__, dc_result);
+ aconnector->force_yuv422_output = true;
+ /* recursively entered and YUV422 failed, try YUV420 */
+ } else if (!aconnector->force_yuv420_output) {
+ drm_dbg_kms(connector->dev, "%s:%d Validation failed with %d, retrying w/ YUV420\n",
+ __func__, __LINE__, dc_result);
+ aconnector->force_yuv420_output = true;
+ }
+ stream = create_validate_stream_for_sink(connector, drm_mode,
+ dm_state, old_stream);
+ aconnector->force_yuv422_output = false;
aconnector->force_yuv420_output = false;
+ break;
+ case DC_OK:
+ break;
+ default:
+ drm_dbg_kms(connector->dev, "%s:%d Unhandled validation failure %d\n",
+ __func__, __LINE__, dc_result);
+ break;
}
return stream;
}
enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
int result = MODE_ERROR;
struct dc_sink *dc_sink;
+ struct drm_display_mode *test_mode;
/* TODO: Unhardcode stream count */
struct dc_stream_state *stream;
+ /* we always have an amdgpu_dm_connector here since we got
+ * here via the amdgpu_dm_connector_helper_funcs
+ */
struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
@@ -7430,15 +8162,20 @@ enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connec
if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
aconnector->base.force != DRM_FORCE_ON) {
- DRM_ERROR("dc_sink is NULL!\n");
+ drm_err(connector->dev, "dc_sink is NULL!\n");
goto fail;
}
- drm_mode_set_crtcinfo(mode, 0);
+ test_mode = drm_mode_duplicate(connector->dev, mode);
+ if (!test_mode)
+ goto fail;
+
+ drm_mode_set_crtcinfo(test_mode, 0);
- stream = create_validate_stream_for_sink(aconnector, mode,
+ stream = create_validate_stream_for_sink(connector, test_mode,
to_dm_connector_state(connector->state),
NULL);
+ drm_mode_destroy(connector->dev, test_mode);
if (stream) {
dc_stream_release(stream);
result = MODE_OK;
@@ -7521,6 +8258,9 @@ amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(conn);
int ret;
+ if (WARN_ON(unlikely(!old_con_state || !new_con_state)))
+ return -EINVAL;
+
trace_amdgpu_dm_connector_atomic_check(new_con_state);
if (conn->connector_type == DRM_MODE_CONNECTOR_DisplayPort) {
@@ -7532,6 +8272,14 @@ amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
if (!crtc)
return 0;
+ if (new_con_state->privacy_screen_sw_state != old_con_state->privacy_screen_sw_state) {
+ new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
+ if (IS_ERR(new_crtc_state))
+ return PTR_ERR(new_crtc_state);
+
+ new_crtc_state->mode_changed = true;
+ }
+
if (new_con_state->colorspace != old_con_state->colorspace) {
new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
if (IS_ERR(new_crtc_state))
@@ -7633,6 +8381,23 @@ static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
int clock, bpp = 0;
bool is_y420 = false;
+ if ((connector->connector_type == DRM_MODE_CONNECTOR_eDP) ||
+ (connector->connector_type == DRM_MODE_CONNECTOR_LVDS)) {
+ struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+ struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
+ enum drm_mode_status result;
+
+ result = drm_crtc_helper_mode_valid_fixed(encoder->crtc, adjusted_mode, native_mode);
+ if (result != MODE_OK && dm_new_connector_state->scaling == RMX_OFF) {
+ drm_dbg_driver(encoder->dev,
+ "mode %dx%d@%dHz is not native, enabling scaling\n",
+ adjusted_mode->hdisplay, adjusted_mode->vdisplay,
+ drm_mode_vrefresh(adjusted_mode));
+ dm_new_connector_state->scaling = RMX_ASPECT;
+ }
+ return 0;
+ }
+
if (!aconnector->mst_output_port)
return 0;
@@ -7646,7 +8411,7 @@ static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
if (IS_ERR(mst_state))
return PTR_ERR(mst_state);
- mst_state->pbn_div.full = dfixed_const(dm_mst_get_pbn_divider(aconnector->mst_root->dc_link));
+ mst_state->pbn_div.full = dm_mst_get_pbn_divider(aconnector->mst_root->dc_link);
if (!state->duplicated) {
int max_bpc = conn_state->max_requested_bpc;
@@ -7753,7 +8518,7 @@ static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
return 0;
}
-static int to_drm_connector_type(enum signal_type st)
+static int to_drm_connector_type(enum signal_type st, uint32_t connector_id)
{
switch (st) {
case SIGNAL_TYPE_HDMI_TYPE_A:
@@ -7769,6 +8534,10 @@ static int to_drm_connector_type(enum signal_type st)
return DRM_MODE_CONNECTOR_DisplayPort;
case SIGNAL_TYPE_DVI_DUAL_LINK:
case SIGNAL_TYPE_DVI_SINGLE_LINK:
+ if (connector_id == CONNECTOR_ID_SINGLE_LINK_DVII ||
+ connector_id == CONNECTOR_ID_DUAL_LINK_DVII)
+ return DRM_MODE_CONNECTOR_DVII;
+
return DRM_MODE_CONNECTOR_DVID;
case SIGNAL_TYPE_VIRTUAL:
return DRM_MODE_CONNECTOR_VIRTUAL;
@@ -7820,7 +8589,7 @@ static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
static struct drm_display_mode *
amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
- char *name,
+ const char *name,
int hdisplay, int vdisplay)
{
struct drm_device *dev = encoder->dev;
@@ -7842,6 +8611,24 @@ amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
}
+static const struct amdgpu_dm_mode_size {
+ char name[DRM_DISPLAY_MODE_LEN];
+ int w;
+ int h;
+} common_modes[] = {
+ { "640x480", 640, 480},
+ { "800x600", 800, 600},
+ { "1024x768", 1024, 768},
+ { "1280x720", 1280, 720},
+ { "1280x800", 1280, 800},
+ {"1280x1024", 1280, 1024},
+ { "1440x900", 1440, 900},
+ {"1680x1050", 1680, 1050},
+ {"1600x1200", 1600, 1200},
+ {"1920x1080", 1920, 1080},
+ {"1920x1200", 1920, 1200}
+};
+
static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
struct drm_connector *connector)
{
@@ -7852,23 +8639,10 @@ static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
to_amdgpu_dm_connector(connector);
int i;
int n;
- struct mode_size {
- char name[DRM_DISPLAY_MODE_LEN];
- int w;
- int h;
- } common_modes[] = {
- { "640x480", 640, 480},
- { "800x600", 800, 600},
- { "1024x768", 1024, 768},
- { "1280x720", 1280, 720},
- { "1280x800", 1280, 800},
- {"1280x1024", 1280, 1024},
- { "1440x900", 1440, 900},
- {"1680x1050", 1680, 1050},
- {"1600x1200", 1600, 1200},
- {"1920x1080", 1920, 1080},
- {"1920x1200", 1920, 1200}
- };
+
+ if ((connector->connector_type != DRM_MODE_CONNECTOR_eDP) &&
+ (connector->connector_type != DRM_MODE_CONNECTOR_LVDS))
+ return;
n = ARRAY_SIZE(common_modes);
@@ -8065,6 +8839,16 @@ static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connect
if (!(amdgpu_freesync_vid_mode && drm_edid))
return;
+ if (!amdgpu_dm_connector->dc_sink || !amdgpu_dm_connector->dc_link)
+ return;
+
+ if (!dc_supports_vrr(amdgpu_dm_connector->dc_sink->ctx->dce_version))
+ return;
+
+ if (dc_connector_supports_analog(amdgpu_dm_connector->dc_link->link_id.id) &&
+ amdgpu_dm_connector->dc_sink->edid_caps.analog)
+ return;
+
if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
amdgpu_dm_connector->num_modes +=
add_fs_modes(amdgpu_dm_connector);
@@ -8074,11 +8858,11 @@ static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
{
struct amdgpu_dm_connector *amdgpu_dm_connector =
to_amdgpu_dm_connector(connector);
+ struct dc_link *dc_link = amdgpu_dm_connector->dc_link;
struct drm_encoder *encoder;
const struct drm_edid *drm_edid = amdgpu_dm_connector->drm_edid;
- struct dc_link_settings *verified_link_cap =
- &amdgpu_dm_connector->dc_link->verified_link_cap;
- const struct dc *dc = amdgpu_dm_connector->dc_link->dc;
+ struct dc_link_settings *verified_link_cap = &dc_link->verified_link_cap;
+ const struct dc *dc = dc_link->dc;
encoder = amdgpu_dm_connector_to_encoder(connector);
@@ -8088,6 +8872,17 @@ static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
if (dc->link_srv->dp_get_encoding_format(verified_link_cap) == DP_128b_132b_ENCODING)
amdgpu_dm_connector->num_modes +=
drm_add_modes_noedid(connector, 1920, 1080);
+
+ if (amdgpu_dm_connector->dc_sink &&
+ amdgpu_dm_connector->dc_sink->edid_caps.analog &&
+ dc_connector_supports_analog(dc_link->link_id.id)) {
+ /* Analog monitor connected by DAC load detection.
+ * Add common modes. It will be up to the user to select one that works.
+ */
+ for (int i = 0; i < ARRAY_SIZE(common_modes); i++)
+ amdgpu_dm_connector->num_modes += drm_add_modes_noedid(
+ connector, common_modes[i].w, common_modes[i].h);
+ }
} else {
amdgpu_dm_connector_ddc_get_modes(connector, drm_edid);
if (encoder)
@@ -8135,6 +8930,10 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
mutex_init(&aconnector->hpd_lock);
mutex_init(&aconnector->handle_mst_msg_ready);
+ aconnector->hdmi_hpd_debounce_delay_ms = AMDGPU_DM_HDMI_HPD_DEBOUNCE_MS;
+ INIT_DELAYED_WORK(&aconnector->hdmi_hpd_debounce_work, hdmi_hpd_debounce_work);
+ aconnector->hdmi_prev_sink = NULL;
+
/*
* configure support HPD hot plug connector_>polled default value is 0
* which means HPD hot plug not supported
@@ -8156,6 +8955,11 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
case DRM_MODE_CONNECTOR_DVID:
aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
break;
+ case DRM_MODE_CONNECTOR_DVII:
+ case DRM_MODE_CONNECTOR_VGA:
+ aconnector->base.polled =
+ DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
+ break;
default:
break;
}
@@ -8164,6 +8968,10 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
dm->ddev->mode_config.scaling_mode_property,
DRM_MODE_SCALE_NONE);
+ if (connector_type == DRM_MODE_CONNECTOR_HDMIA
+ || (connector_type == DRM_MODE_CONNECTOR_DisplayPort && !aconnector->mst_root))
+ drm_connector_attach_broadcast_rgb_property(&aconnector->base);
+
drm_object_attach_property(&aconnector->base.base,
adev->mode_info.underscan_property,
UNDERSCAN_OFF);
@@ -8205,6 +9013,18 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
if (adev->dm.hdcp_workqueue)
drm_connector_attach_content_protection_property(&aconnector->base, true);
}
+
+ if (connector_type == DRM_MODE_CONNECTOR_eDP) {
+ struct drm_privacy_screen *privacy_screen;
+
+ privacy_screen = drm_privacy_screen_get(adev_to_drm(adev)->dev, NULL);
+ if (!IS_ERR(privacy_screen)) {
+ drm_connector_attach_privacy_screen_provider(&aconnector->base,
+ privacy_screen);
+ } else if (PTR_ERR(privacy_screen) != -ENODEV) {
+ drm_warn(adev_to_drm(adev), "Error getting privacy-screen\n");
+ }
+ }
}
static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
@@ -8216,7 +9036,7 @@ static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
int i;
int result = -EIO;
- if (!ddc_service->ddc_pin || !ddc_service->ddc_pin->hw_info.hw_supported)
+ if (!ddc_service->ddc_pin)
return result;
cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
@@ -8235,11 +9055,18 @@ static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
cmd.payloads[i].data = msgs[i].buf;
}
- if (dc_submit_i2c(
- ddc_service->ctx->dc,
- ddc_service->link->link_index,
- &cmd))
- result = num;
+ if (i2c->oem) {
+ if (dc_submit_i2c_oem(
+ ddc_service->ctx->dc,
+ &cmd))
+ result = num;
+ } else {
+ if (dc_submit_i2c(
+ ddc_service->ctx->dc,
+ ddc_service->link->link_index,
+ &cmd))
+ result = num;
+ }
kfree(cmd.payloads);
return result;
@@ -8256,9 +9083,7 @@ static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
};
static struct amdgpu_i2c_adapter *
-create_i2c(struct ddc_service *ddc_service,
- int link_index,
- int *res)
+create_i2c(struct ddc_service *ddc_service, bool oem)
{
struct amdgpu_device *adev = ddc_service->ctx->driver_context;
struct amdgpu_i2c_adapter *i2c;
@@ -8269,13 +9094,39 @@ create_i2c(struct ddc_service *ddc_service,
i2c->base.owner = THIS_MODULE;
i2c->base.dev.parent = &adev->pdev->dev;
i2c->base.algo = &amdgpu_dm_i2c_algo;
- snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
+ if (oem)
+ snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c OEM bus");
+ else
+ snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d",
+ ddc_service->link->link_index);
i2c_set_adapdata(&i2c->base, i2c);
i2c->ddc_service = ddc_service;
+ i2c->oem = oem;
return i2c;
}
+int amdgpu_dm_initialize_hdmi_connector(struct amdgpu_dm_connector *aconnector)
+{
+ struct cec_connector_info conn_info;
+ struct drm_device *ddev = aconnector->base.dev;
+ struct device *hdmi_dev = ddev->dev;
+
+ if (amdgpu_dc_debug_mask & DC_DISABLE_HDMI_CEC) {
+ drm_info(ddev, "HDMI-CEC feature masked\n");
+ return -EINVAL;
+ }
+
+ cec_fill_conn_info_from_drm(&conn_info, &aconnector->base);
+ aconnector->notifier =
+ cec_notifier_conn_register(hdmi_dev, NULL, &conn_info);
+ if (!aconnector->notifier) {
+ drm_err(ddev, "Failed to create cec notifier\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
/*
* Note: this function assumes that dc_link_detect() was called for the
@@ -8296,21 +9147,21 @@ static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
link->priv = aconnector;
- i2c = create_i2c(link->ddc, link->link_index, &res);
+ i2c = create_i2c(link->ddc, false);
if (!i2c) {
- DRM_ERROR("Failed to create i2c adapter data\n");
+ drm_err(adev_to_drm(dm->adev), "Failed to create i2c adapter data\n");
return -ENOMEM;
}
aconnector->i2c = i2c;
- res = i2c_add_adapter(&i2c->base);
+ res = devm_i2c_add_adapter(dm->adev->dev, &i2c->base);
if (res) {
- DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
+ drm_err(adev_to_drm(dm->adev), "Failed to register hw i2c %d\n", link->link_index);
goto out_free;
}
- connector_type = to_drm_connector_type(link->connector_signal);
+ connector_type = to_drm_connector_type(link->connector_signal, link->link_id.id);
res = drm_connector_init_with_ddc(
dm->ddev,
@@ -8320,7 +9171,7 @@ static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
&i2c->base);
if (res) {
- DRM_ERROR("connector_init failed\n");
+ drm_err(adev_to_drm(dm->adev), "connector_init failed\n");
aconnector->connector_id = -1;
goto out_free;
}
@@ -8339,6 +9190,10 @@ static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
drm_connector_attach_encoder(
&aconnector->base, &aencoder->base);
+ if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
+ connector_type == DRM_MODE_CONNECTOR_HDMIB)
+ amdgpu_dm_initialize_hdmi_connector(aconnector);
+
if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
|| connector_type == DRM_MODE_CONNECTOR_eDP)
amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
@@ -8397,30 +9252,54 @@ static int amdgpu_dm_encoder_init(struct drm_device *dev,
static void manage_dm_interrupts(struct amdgpu_device *adev,
struct amdgpu_crtc *acrtc,
struct dm_crtc_state *acrtc_state)
-{
- /*
- * We have no guarantee that the frontend index maps to the same
+{ /*
+ * We cannot be sure that the frontend index maps to the same
* backend index - some even map to more than one.
- *
- * TODO: Use a different interrupt or check DC itself for the mapping.
+ * So we have to go through the CRTC to find the right IRQ.
*/
- int irq_type =
- amdgpu_display_crtc_idx_to_irq_type(
+ int irq_type = amdgpu_display_crtc_idx_to_irq_type(
adev,
acrtc->crtc_id);
+ struct drm_device *dev = adev_to_drm(adev);
+
struct drm_vblank_crtc_config config = {0};
struct dc_crtc_timing *timing;
int offdelay;
if (acrtc_state) {
- if (amdgpu_ip_version(adev, DCE_HWIP, 0) <
- IP_VERSION(3, 5, 0) ||
- acrtc_state->stream->link->psr_settings.psr_version <
- DC_PSR_VERSION_UNSUPPORTED ||
- !(adev->flags & AMD_IS_APU)) {
- timing = &acrtc_state->stream->timing;
-
- /* at least 2 frames */
+ timing = &acrtc_state->stream->timing;
+
+ /*
+ * Depending on when the HW latching event of double-buffered
+ * registers happen relative to the PSR SDP deadline, and how
+ * bad the Panel clock has drifted since the last ALPM off
+ * event, there can be up to 3 frames of delay between sending
+ * the PSR exit cmd to DMUB fw, and when the panel starts
+ * displaying live frames.
+ *
+ * We can set:
+ *
+ * 20/100 * offdelay_ms = 3_frames_ms
+ * => offdelay_ms = 5 * 3_frames_ms
+ *
+ * This ensures that `3_frames_ms` will only be experienced as a
+ * 20% delay on top how long the display has been static, and
+ * thus make the delay less perceivable.
+ */
+ if (acrtc_state->stream->link->psr_settings.psr_version <
+ DC_PSR_VERSION_UNSUPPORTED) {
+ offdelay = DIV64_U64_ROUND_UP((u64)5 * 3 * 10 *
+ timing->v_total *
+ timing->h_total,
+ timing->pix_clk_100hz);
+ config.offdelay_ms = offdelay ?: 30;
+ } else if (amdgpu_ip_version(adev, DCE_HWIP, 0) <
+ IP_VERSION(3, 5, 0) ||
+ !(adev->flags & AMD_IS_APU)) {
+ /*
+ * Older HW and DGPU have issues with instant off;
+ * use a 2 frame offdelay.
+ */
offdelay = DIV64_U64_ROUND_UP((u64)20 *
timing->v_total *
timing->h_total,
@@ -8428,33 +9307,42 @@ static void manage_dm_interrupts(struct amdgpu_device *adev,
config.offdelay_ms = offdelay ?: 30;
} else {
+ /* offdelay_ms = 0 will never disable vblank */
+ config.offdelay_ms = 1;
config.disable_immediate = true;
}
drm_crtc_vblank_on_config(&acrtc->base,
&config);
-
- amdgpu_irq_get(
- adev,
- &adev->pageflip_irq,
- irq_type);
+ /* Allow RX6xxx, RX7700, RX7800 GPUs to call amdgpu_irq_get.*/
+ switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
+ case IP_VERSION(3, 0, 0):
+ case IP_VERSION(3, 0, 2):
+ case IP_VERSION(3, 0, 3):
+ case IP_VERSION(3, 2, 0):
+ if (amdgpu_irq_get(adev, &adev->pageflip_irq, irq_type))
+ drm_err(dev, "DM_IRQ: Cannot get pageflip irq!\n");
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
- amdgpu_irq_get(
- adev,
- &adev->vline0_irq,
- irq_type);
+ if (amdgpu_irq_get(adev, &adev->vline0_irq, irq_type))
+ drm_err(dev, "DM_IRQ: Cannot get vline0 irq!\n");
#endif
+ }
+
} else {
+ /* Allow RX6xxx, RX7700, RX7800 GPUs to call amdgpu_irq_put.*/
+ switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
+ case IP_VERSION(3, 0, 0):
+ case IP_VERSION(3, 0, 2):
+ case IP_VERSION(3, 0, 3):
+ case IP_VERSION(3, 2, 0):
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
- amdgpu_irq_put(
- adev,
- &adev->vline0_irq,
- irq_type);
+ if (amdgpu_irq_put(adev, &adev->vline0_irq, irq_type))
+ drm_err(dev, "DM_IRQ: Cannot put vline0 irq!\n");
#endif
- amdgpu_irq_put(
- adev,
- &adev->pageflip_irq,
- irq_type);
+ if (amdgpu_irq_put(adev, &adev->pageflip_irq, irq_type))
+ drm_err(dev, "DM_IRQ: Cannot put pageflip irq!\n");
+ }
+
drm_crtc_vblank_off(&acrtc->base);
}
}
@@ -8810,7 +9698,7 @@ static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
*/
WARN_ON(amdgpu_dm_crtc_set_vupdate_irq(new_state->base.crtc, true) != 0);
WARN_ON(drm_crtc_vblank_get(new_state->base.crtc) != 0);
- DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
+ drm_dbg_driver(new_state->base.crtc->dev, "%s: crtc=%u VRR off->on: Get vblank ref\n",
__func__, new_state->base.crtc->base.id);
} else if (old_vrr_active && !new_vrr_active) {
/* Transition VRR active -> inactive:
@@ -8818,7 +9706,7 @@ static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
*/
WARN_ON(amdgpu_dm_crtc_set_vupdate_irq(new_state->base.crtc, false) != 0);
drm_crtc_vblank_put(new_state->base.crtc);
- DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
+ drm_dbg_driver(new_state->base.crtc->dev, "%s: crtc=%u VRR on->off: Drop vblank ref\n",
__func__, new_state->base.crtc->base.id);
}
}
@@ -8905,13 +9793,13 @@ static void amdgpu_dm_update_cursor(struct drm_plane *plane,
if (crtc_state->stream) {
if (!dc_stream_set_cursor_attributes(crtc_state->stream,
&attributes))
- DRM_ERROR("DC failed to set cursor attributes\n");
+ drm_err(adev_to_drm(adev), "DC failed to set cursor attributes\n");
update->cursor_attributes = &crtc_state->stream->cursor_attributes;
if (!dc_stream_set_cursor_position(crtc_state->stream,
&position))
- DRM_ERROR("DC failed to set cursor position\n");
+ drm_err(adev_to_drm(adev), "DC failed to set cursor position\n");
update->cursor_position = &crtc_state->stream->cursor_position;
}
@@ -8925,6 +9813,7 @@ static void amdgpu_dm_enable_self_refresh(struct amdgpu_crtc *acrtc_attach,
struct replay_settings *pr = &acrtc_state->stream->link->replay_settings;
struct amdgpu_dm_connector *aconn =
(struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context;
+ bool vrr_active = amdgpu_dm_crtc_vrr_active(acrtc_state);
if (acrtc_state->update_type > UPDATE_TYPE_FAST) {
if (pr->config.replay_supported && !pr->replay_feature_enabled)
@@ -8951,14 +9840,15 @@ static void amdgpu_dm_enable_self_refresh(struct amdgpu_crtc *acrtc_attach,
* adequate number of fast atomic commits to notify KMD
* of update events. See `vblank_control_worker()`.
*/
- if (acrtc_attach->dm_irq_params.allow_sr_entry &&
+ if (!vrr_active &&
+ acrtc_attach->dm_irq_params.allow_sr_entry &&
#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
!amdgpu_dm_crc_window_is_activated(acrtc_state->base.crtc) &&
#endif
(current_ts - psr->psr_dirty_rects_change_timestamp_ns) > 500000000) {
if (pr->replay_feature_enabled && !pr->replay_allow_active)
amdgpu_dm_replay_enable(acrtc_state->stream, true);
- if (psr->psr_version >= DC_PSR_VERSION_SU_1 &&
+ if (psr->psr_version == DC_PSR_VERSION_SU_1 &&
!psr->psr_allow_active && !aconn->disallow_edp_enter_psr)
amdgpu_dm_psr_enable(acrtc_state->stream);
}
@@ -9095,7 +9985,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
afb->tiling_flags,
&bundle->plane_infos[planes_count],
&bundle->flip_addrs[planes_count].address,
- afb->tmz_surface, false);
+ afb->tmz_surface);
drm_dbg_state(state->dev, "plane: id=%d dcc_en=%d\n",
new_plane_state->plane->index,
@@ -9129,7 +10019,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
acrtc_state->stream->link->psr_settings.psr_dirty_rects_change_timestamp_ns =
timestamp_ns;
if (acrtc_state->stream->link->psr_settings.psr_allow_active)
- amdgpu_dm_psr_disable(acrtc_state->stream);
+ amdgpu_dm_psr_disable(acrtc_state->stream, true);
mutex_unlock(&dm->dc_lock);
}
}
@@ -9160,7 +10050,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
bundle->surface_updates[planes_count].surface = dc_plane;
if (!bundle->surface_updates[planes_count].surface) {
- DRM_ERROR("No surface for CRTC: id=%d\n",
+ drm_err(dev, "No surface for CRTC: id=%d\n",
acrtc_attach->crtc_id);
continue;
}
@@ -9295,11 +10185,11 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
bundle->stream_update.abm_level = &acrtc_state->abm_level;
mutex_lock(&dm->dc_lock);
- if (acrtc_state->update_type > UPDATE_TYPE_FAST) {
+ if ((acrtc_state->update_type > UPDATE_TYPE_FAST) || vrr_active) {
if (acrtc_state->stream->link->replay_settings.replay_allow_active)
amdgpu_dm_replay_disable(acrtc_state->stream);
if (acrtc_state->stream->link->psr_settings.psr_allow_active)
- amdgpu_dm_psr_disable(acrtc_state->stream);
+ amdgpu_dm_psr_disable(acrtc_state->stream, true);
}
mutex_unlock(&dm->dc_lock);
@@ -9676,20 +10566,20 @@ static void dm_set_writeback(struct amdgpu_display_manager *dm,
wb_info = kzalloc(sizeof(*wb_info), GFP_KERNEL);
if (!wb_info) {
- DRM_ERROR("Failed to allocate wb_info\n");
+ drm_err(adev_to_drm(adev), "Failed to allocate wb_info\n");
return;
}
acrtc = to_amdgpu_crtc(wb_conn->encoder.crtc);
if (!acrtc) {
- DRM_ERROR("no amdgpu_crtc found\n");
+ drm_err(adev_to_drm(adev), "no amdgpu_crtc found\n");
kfree(wb_info);
return;
}
afb = to_amdgpu_framebuffer(new_con_state->writeback_job->fb);
if (!afb) {
- DRM_ERROR("No amdgpu_framebuffer found\n");
+ drm_err(adev_to_drm(adev), "No amdgpu_framebuffer found\n");
kfree(wb_info);
return;
}
@@ -9760,69 +10650,40 @@ static void dm_set_writeback(struct amdgpu_display_manager *dm,
drm_writeback_queue_job(wb_conn, new_con_state);
}
-/**
- * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
- * @state: The atomic state to commit
- *
- * This will tell DC to commit the constructed DC state from atomic_check,
- * programming the hardware. Any failures here implies a hardware failure, since
- * atomic check should have filtered anything non-kosher.
- */
-static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
+static void amdgpu_dm_update_hdcp(struct drm_atomic_state *state)
{
+ struct drm_connector_state *old_con_state, *new_con_state;
struct drm_device *dev = state->dev;
- struct amdgpu_device *adev = drm_to_adev(dev);
- struct amdgpu_display_manager *dm = &adev->dm;
- struct dm_atomic_state *dm_state;
- struct dc_state *dc_state = NULL;
- u32 i, j;
- struct drm_crtc *crtc;
- struct drm_crtc_state *old_crtc_state, *new_crtc_state;
- unsigned long flags;
- bool wait_for_vblank = true;
struct drm_connector *connector;
- struct drm_connector_state *old_con_state, *new_con_state;
- struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
- int crtc_disable_count = 0;
-
- trace_amdgpu_dm_atomic_commit_tail_begin(state);
-
- drm_atomic_helper_update_legacy_modeset_state(dev, state);
- drm_dp_mst_atomic_wait_for_dependencies(state);
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ int i;
- dm_state = dm_atomic_get_new_state(state);
- if (dm_state && dm_state->context) {
- dc_state = dm_state->context;
- amdgpu_dm_commit_streams(state, dc_state);
- }
+ if (!adev->dm.hdcp_workqueue)
+ return;
for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
+ struct drm_crtc_state *old_crtc_state, *new_crtc_state;
+ struct dm_crtc_state *dm_new_crtc_state;
struct amdgpu_dm_connector *aconnector;
- if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
+ if (!connector || connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
continue;
aconnector = to_amdgpu_dm_connector(connector);
- if (!adev->dm.hdcp_workqueue)
- continue;
-
- pr_debug("[HDCP_DM] -------------- i : %x ----------\n", i);
-
- if (!connector)
- continue;
+ drm_dbg(dev, "[HDCP_DM] -------------- i : %x ----------\n", i);
- pr_debug("[HDCP_DM] connector->index: %x connect_status: %x dpms: %x\n",
+ drm_dbg(dev, "[HDCP_DM] connector->index: %x connect_status: %x dpms: %x\n",
connector->index, connector->status, connector->dpms);
- pr_debug("[HDCP_DM] state protection old: %x new: %x\n",
+ drm_dbg(dev, "[HDCP_DM] state protection old: %x new: %x\n",
old_con_state->content_protection, new_con_state->content_protection);
if (aconnector->dc_sink) {
if (aconnector->dc_sink->sink_signal != SIGNAL_TYPE_VIRTUAL &&
aconnector->dc_sink->sink_signal != SIGNAL_TYPE_NONE) {
- pr_debug("[HDCP_DM] pipe_ctx dispname=%s\n",
+ drm_dbg(dev, "[HDCP_DM] pipe_ctx dispname=%s\n",
aconnector->dc_sink->edid_caps.display_name);
}
}
@@ -9836,7 +10697,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
}
if (old_crtc_state)
- pr_debug("old crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n",
+ drm_dbg(dev, "old crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n",
old_crtc_state->enable,
old_crtc_state->active,
old_crtc_state->mode_changed,
@@ -9844,29 +10705,13 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
old_crtc_state->connectors_changed);
if (new_crtc_state)
- pr_debug("NEW crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n",
+ drm_dbg(dev, "NEW crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n",
new_crtc_state->enable,
new_crtc_state->active,
new_crtc_state->mode_changed,
new_crtc_state->active_changed,
new_crtc_state->connectors_changed);
- }
- for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
- struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
- struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
- struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
-
- if (!adev->dm.hdcp_workqueue)
- continue;
-
- new_crtc_state = NULL;
- old_crtc_state = NULL;
-
- if (acrtc) {
- new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
- old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
- }
dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
@@ -9910,7 +10755,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
new_con_state->content_protection >= DRM_MODE_CONTENT_PROTECTION_DESIRED)
enable_encryption = true;
- DRM_INFO("[HDCP_DM] hdcp_update_display enable_encryption = %x\n", enable_encryption);
+ drm_info(dev, "[HDCP_DM] hdcp_update_display enable_encryption = %x\n", enable_encryption);
if (aconnector->dc_link)
hdcp_update_display(
@@ -9918,6 +10763,78 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
new_con_state->hdcp_content_type, enable_encryption);
}
}
+}
+
+static int amdgpu_dm_atomic_setup_commit(struct drm_atomic_state *state)
+{
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *old_crtc_state, *new_crtc_state;
+ struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
+ int i, ret;
+
+ ret = drm_dp_mst_atomic_setup_commit(state);
+ if (ret)
+ return ret;
+
+ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+ dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
+ dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+ /*
+ * Color management settings. We also update color properties
+ * when a modeset is needed, to ensure it gets reprogrammed.
+ */
+ if (dm_new_crtc_state->base.active && dm_new_crtc_state->stream &&
+ (dm_new_crtc_state->base.color_mgmt_changed ||
+ dm_old_crtc_state->regamma_tf != dm_new_crtc_state->regamma_tf ||
+ drm_atomic_crtc_needs_modeset(new_crtc_state))) {
+ ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
+ if (ret) {
+ drm_dbg_atomic(state->dev, "Failed to update color state\n");
+ return ret;
+ }
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
+ * @state: The atomic state to commit
+ *
+ * This will tell DC to commit the constructed DC state from atomic_check,
+ * programming the hardware. Any failures here implies a hardware failure, since
+ * atomic check should have filtered anything non-kosher.
+ */
+static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
+{
+ struct drm_device *dev = state->dev;
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ struct amdgpu_display_manager *dm = &adev->dm;
+ struct dm_atomic_state *dm_state;
+ struct dc_state *dc_state = NULL;
+ u32 i, j;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *old_crtc_state, *new_crtc_state;
+ unsigned long flags;
+ bool wait_for_vblank = true;
+ struct drm_connector *connector;
+ struct drm_connector_state *old_con_state = NULL, *new_con_state = NULL;
+ struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
+ int crtc_disable_count = 0;
+
+ trace_amdgpu_dm_atomic_commit_tail_begin(state);
+
+ drm_atomic_helper_update_legacy_modeset_state(dev, state);
+ drm_dp_mst_atomic_wait_for_dependencies(state);
+
+ dm_state = dm_atomic_get_new_state(state);
+ if (dm_state && dm_state->context) {
+ dc_state = dm_state->context;
+ amdgpu_dm_commit_streams(state, dc_state);
+ }
+
+ amdgpu_dm_update_hdcp(state);
/* Handle connector state changes */
for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
@@ -9928,7 +10845,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
struct dc_stream_update stream_update;
struct dc_info_packet hdr_packet;
struct dc_stream_status *status = NULL;
- bool abm_changed, hdr_changed, scaling_changed;
+ bool abm_changed, hdr_changed, scaling_changed, output_color_space_changed = false;
memset(&stream_update, 0, sizeof(stream_update));
@@ -9947,13 +10864,18 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
scaling_changed = is_scaling_state_different(dm_new_con_state,
dm_old_con_state);
+ if ((new_con_state->hdmi.broadcast_rgb != old_con_state->hdmi.broadcast_rgb) &&
+ (dm_old_crtc_state->stream->output_color_space !=
+ get_output_color_space(&dm_new_crtc_state->stream->timing, new_con_state)))
+ output_color_space_changed = true;
+
abm_changed = dm_new_crtc_state->abm_level !=
dm_old_crtc_state->abm_level;
hdr_changed =
!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state);
- if (!scaling_changed && !abm_changed && !hdr_changed)
+ if (!scaling_changed && !abm_changed && !hdr_changed && !output_color_space_changed)
continue;
stream_update.stream = dm_new_crtc_state->stream;
@@ -9965,6 +10887,13 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
stream_update.dst = dm_new_crtc_state->stream->dst;
}
+ if (output_color_space_changed) {
+ dm_new_crtc_state->stream->output_color_space
+ = get_output_color_space(&dm_new_crtc_state->stream->timing, new_con_state);
+
+ stream_update.output_color_space = &dm_new_crtc_state->stream->output_color_space;
+ }
+
if (abm_changed) {
dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
@@ -9988,9 +10917,9 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
* Here we create an empty update on each plane.
* To fix this, DC should permit updating only stream properties.
*/
- dummy_updates = kzalloc(sizeof(struct dc_surface_update) * MAX_SURFACES, GFP_ATOMIC);
+ dummy_updates = kzalloc(sizeof(struct dc_surface_update) * MAX_SURFACES, GFP_KERNEL);
if (!dummy_updates) {
- DRM_ERROR("Failed to allocate memory for dummy_updates.\n");
+ drm_err(adev_to_drm(adev), "Failed to allocate memory for dummy_updates.\n");
continue;
}
for (j = 0; j < status->plane_count; j++)
@@ -10008,6 +10937,8 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
&stream_update);
mutex_unlock(&dm->dc_lock);
kfree(dummy_updates);
+
+ drm_connector_update_privacy_screen(new_con_state);
}
/**
@@ -10058,14 +10989,20 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
if (amdgpu_dm_crc_window_is_activated(crtc)) {
- spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
- acrtc->dm_irq_params.window_param.update_win = true;
+ uint8_t cnt;
- /**
- * It takes 2 frames for HW to stably generate CRC when
- * resuming from suspend, so we set skip_frame_cnt 2.
- */
- acrtc->dm_irq_params.window_param.skip_frame_cnt = 2;
+ spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
+ for (cnt = 0; cnt < MAX_CRC_WINDOW_NUM; cnt++) {
+ if (acrtc->dm_irq_params.window_param[cnt].enable) {
+ acrtc->dm_irq_params.window_param[cnt].update_win = true;
+
+ /**
+ * It takes 2 frames for HW to stably generate CRC when
+ * resuming from suspend, so we set skip_frame_cnt 2.
+ */
+ acrtc->dm_irq_params.window_param[cnt].skip_frame_cnt = 2;
+ }
+ }
spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
}
#endif
@@ -10193,16 +11130,20 @@ static int dm_force_atomic_commit(struct drm_connector *connector)
*/
conn_state = drm_atomic_get_connector_state(state, connector);
- ret = PTR_ERR_OR_ZERO(conn_state);
- if (ret)
+ /* Check for error in getting connector state */
+ if (IS_ERR(conn_state)) {
+ ret = PTR_ERR(conn_state);
goto out;
+ }
/* Attach crtc to drm_atomic_state*/
crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
- ret = PTR_ERR_OR_ZERO(crtc_state);
- if (ret)
+ /* Check for error in getting crtc state */
+ if (IS_ERR(crtc_state)) {
+ ret = PTR_ERR(crtc_state);
goto out;
+ }
/* force a restore */
crtc_state->mode_changed = true;
@@ -10210,9 +11151,11 @@ static int dm_force_atomic_commit(struct drm_connector *connector)
/* Attach plane to drm_atomic_state */
plane_state = drm_atomic_get_plane_state(state, plane);
- ret = PTR_ERR_OR_ZERO(plane_state);
- if (ret)
+ /* Check for error in getting plane state */
+ if (IS_ERR(plane_state)) {
+ ret = PTR_ERR(plane_state);
goto out;
+ }
/* Call commit internally with the state we just constructed */
ret = drm_atomic_commit(state);
@@ -10220,7 +11163,7 @@ static int dm_force_atomic_commit(struct drm_connector *connector)
out:
drm_atomic_state_put(state);
if (ret)
- DRM_ERROR("Restoring old state failed with %i\n", ret);
+ drm_err(ddev, "Restoring old state failed with %i\n", ret);
return ret;
}
@@ -10304,7 +11247,7 @@ static int do_aquire_global_lock(struct drm_device *dev,
&commit->flip_done, 10*HZ);
if (ret == 0)
- DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done timed out\n",
+ drm_err(dev, "[CRTC:%d:%s] hw_done or flip_done timed out\n",
crtc->base.id, crtc->name);
drm_crtc_commit_put(commit);
@@ -10350,6 +11293,8 @@ static void get_freesync_config_for_crtc(
} else {
config.state = VRR_STATE_INACTIVE;
}
+ } else {
+ config.state = VRR_STATE_UNSUPPORTED;
}
out:
new_crtc_state->freesync_config = config;
@@ -10420,6 +11365,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
struct dm_atomic_state *dm_state = NULL;
struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
struct dc_stream_state *new_stream;
+ struct amdgpu_device *adev = dm->adev;
int ret = 0;
/*
@@ -10449,8 +11395,8 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
drm_old_conn_state = drm_atomic_get_old_connector_state(state,
connector);
- if (IS_ERR(drm_new_conn_state)) {
- ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
+ if (WARN_ON(!drm_new_conn_state)) {
+ ret = -EINVAL;
goto fail;
}
@@ -10460,7 +11406,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
goto skip_modeset;
- new_stream = create_validate_stream_for_sink(aconnector,
+ new_stream = create_validate_stream_for_sink(connector,
&new_crtc_state->mode,
dm_new_conn_state,
dm_old_crtc_state->stream);
@@ -10473,7 +11419,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
*/
if (!new_stream) {
- DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
+ drm_dbg_driver(adev_to_drm(adev), "%s: Failed to create new stream for crtc %d\n",
__func__, acrtc->base.base.id);
ret = -ENOMEM;
goto fail;
@@ -10511,7 +11457,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
new_crtc_state->mode_changed = false;
- DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
+ drm_dbg_driver(adev_to_drm(adev), "Mode change not required, setting mode_changed to %d",
new_crtc_state->mode_changed);
}
}
@@ -10549,7 +11495,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
is_timing_unchanged_for_freesync(new_crtc_state,
old_crtc_state)) {
new_crtc_state->mode_changed = false;
- DRM_DEBUG_DRIVER(
+ drm_dbg_driver(adev_to_drm(adev),
"Mode change not required for front porch change, setting mode_changed to %d",
new_crtc_state->mode_changed);
@@ -10570,7 +11516,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
if (ret)
goto fail;
- DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
+ drm_dbg_driver(adev_to_drm(adev), "Disabling DRM crtc: %d\n",
crtc->base.id);
/* i.e. reset mode */
@@ -10666,7 +11612,7 @@ skip_modeset:
if (dm_new_crtc_state->base.color_mgmt_changed ||
dm_old_crtc_state->regamma_tf != dm_new_crtc_state->regamma_tf ||
drm_atomic_crtc_needs_modeset(new_crtc_state)) {
- ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
+ ret = amdgpu_dm_check_crtc_color_mgmt(dm_new_crtc_state, true);
if (ret)
goto fail;
}
@@ -10703,6 +11649,9 @@ static bool should_reset_plane(struct drm_atomic_state *state,
state->allow_modeset)
return true;
+ if (amdgpu_in_reset(adev) && state->allow_modeset)
+ return true;
+
/* Exit early if we know that we're adding or removing the plane. */
if (old_plane_state->crtc != new_plane_state->crtc)
return true;
@@ -11153,8 +12102,8 @@ dm_get_plane_scale(struct drm_plane_state *plane_state,
int plane_src_w, plane_src_h;
dm_get_oriented_plane_size(plane_state, &plane_src_w, &plane_src_h);
- *out_plane_scale_w = plane_state->crtc_w * 1000 / plane_src_w;
- *out_plane_scale_h = plane_state->crtc_h * 1000 / plane_src_h;
+ *out_plane_scale_w = plane_src_w ? plane_state->crtc_w * 1000 / plane_src_w : 0;
+ *out_plane_scale_h = plane_src_h ? plane_state->crtc_h * 1000 / plane_src_h : 0;
}
/*
@@ -11408,6 +12357,30 @@ static int dm_crtc_get_cursor_mode(struct amdgpu_device *adev,
return 0;
}
+static bool amdgpu_dm_crtc_mem_type_changed(struct drm_device *dev,
+ struct drm_atomic_state *state,
+ struct drm_crtc_state *crtc_state)
+{
+ struct drm_plane *plane;
+ struct drm_plane_state *new_plane_state, *old_plane_state;
+
+ drm_for_each_plane_mask(plane, dev, crtc_state->plane_mask) {
+ new_plane_state = drm_atomic_get_plane_state(state, plane);
+ old_plane_state = drm_atomic_get_plane_state(state, plane);
+
+ if (IS_ERR(new_plane_state) || IS_ERR(old_plane_state)) {
+ drm_err(dev, "Failed to get plane state for plane %s\n", plane->name);
+ return false;
+ }
+
+ if (old_plane_state->fb && new_plane_state->fb &&
+ get_mem_type(old_plane_state->fb) != get_mem_type(new_plane_state->fb))
+ return true;
+ }
+
+ return false;
+}
+
/**
* amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
*
@@ -11605,10 +12578,6 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
/* Remove exiting planes if they are modified */
for_each_oldnew_plane_in_descending_zpos(state, plane, old_plane_state, new_plane_state) {
- if (old_plane_state->fb && new_plane_state->fb &&
- get_mem_type(old_plane_state->fb) !=
- get_mem_type(new_plane_state->fb))
- lock_and_validation_needed = true;
ret = dm_update_plane_state(dc, state, plane,
old_plane_state,
@@ -11846,7 +12815,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
drm_dbg_atomic(dev, "MST drm_dp_mst_atomic_check() failed\n");
goto fail;
}
- status = dc_validate_global_state(dc, dm_state->context, true);
+ status = dc_validate_global_state(dc, dm_state->context, DC_VALIDATE_MODE_ONLY);
if (status != DC_OK) {
drm_dbg_atomic(dev, "DC global validation failure: %s (%d)",
dc_status_to_str(status), status);
@@ -11875,7 +12844,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
int j = state->num_private_objs-1;
dm_atomic_destroy_state(obj,
- state->private_objs[i].state);
+ state->private_objs[i].state_to_destroy);
/* If i is not at the end of the array then the
* last element needs to be moved to where i was
@@ -11886,7 +12855,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
state->private_objs[j];
state->private_objs[j].ptr = NULL;
- state->private_objs[j].state = NULL;
+ state->private_objs[j].state_to_destroy = NULL;
state->private_objs[j].old_state = NULL;
state->private_objs[j].new_state = NULL;
@@ -11903,9 +12872,11 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
/*
* Only allow async flips for fast updates that don't change
- * the FB pitch, the DCC state, rotation, etc.
+ * the FB pitch, the DCC state, rotation, mem_type, etc.
*/
- if (new_crtc_state->async_flip && lock_and_validation_needed) {
+ if (new_crtc_state->async_flip &&
+ (lock_and_validation_needed ||
+ amdgpu_dm_crtc_mem_type_changed(dev, state, new_crtc_state))) {
drm_dbg_atomic(crtc->dev,
"[CRTC:%d:%s] async flips are only supported for fast updates\n",
crtc->base.id, crtc->name);
@@ -11967,7 +12938,7 @@ static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,
res = dc_wake_and_execute_dmub_cmd(dm->dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY);
if (!res) {
- DRM_ERROR("EDID CEA parser failed\n");
+ drm_err(adev_to_drm(dm->adev), "EDID CEA parser failed\n");
return false;
}
@@ -11975,7 +12946,7 @@ static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,
if (output->type == DMUB_CMD__EDID_CEA_ACK) {
if (!output->ack.success) {
- DRM_ERROR("EDID CEA ack failed at offset %d\n",
+ drm_err(adev_to_drm(dm->adev), "EDID CEA ack failed at offset %d\n",
output->ack.offset);
}
} else if (output->type == DMUB_CMD__EDID_CEA_AMD_VSDB) {
@@ -11987,7 +12958,7 @@ static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,
vsdb->min_refresh_rate_hz = output->amd_vsdb.min_frame_rate;
vsdb->max_refresh_rate_hz = output->amd_vsdb.max_frame_rate;
} else {
- DRM_WARN("Unknown EDID CEA parser results\n");
+ drm_warn(adev_to_drm(dm->adev), "Unknown EDID CEA parser results\n");
return false;
}
@@ -12203,7 +13174,7 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
enum adaptive_sync_type as_type = ADAPTIVE_SYNC_TYPE_NONE;
if (!connector->state) {
- DRM_ERROR("%s - Connector has no state", __func__);
+ drm_err(adev_to_drm(adev), "%s - Connector has no state", __func__);
goto update;
}
@@ -12225,7 +13196,7 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
dm_con_state = to_dm_connector_state(connector->state);
- if (!adev->dm.freesync_module)
+ if (!adev->dm.freesync_module || !dc_supports_vrr(sink->ctx->dce_version))
goto update;
edid = drm_edid_raw(drm_edid); // FIXME: Get rid of drm_edid_raw()
@@ -12237,10 +13208,14 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
if (edid && (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT ||
sink->sink_signal == SIGNAL_TYPE_EDP)) {
- amdgpu_dm_connector->min_vfreq = connector->display_info.monitor_range.min_vfreq;
- amdgpu_dm_connector->max_vfreq = connector->display_info.monitor_range.max_vfreq;
- if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
- freesync_capable = true;
+ if (amdgpu_dm_connector->dc_link &&
+ amdgpu_dm_connector->dc_link->dpcd_caps.allow_invalid_MSA_timing_param) {
+ amdgpu_dm_connector->min_vfreq = connector->display_info.monitor_range.min_vfreq;
+ amdgpu_dm_connector->max_vfreq = connector->display_info.monitor_range.max_vfreq;
+ if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
+ freesync_capable = true;
+ }
+
parse_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
if (vsdb_info.replay_mode) {
@@ -12384,7 +13359,7 @@ int amdgpu_dm_process_dmub_aux_transfer_sync(
}
if (!wait_for_completion_timeout(&adev->dm.dmub_aux_transfer_done, 10 * HZ)) {
- DRM_ERROR("wait_for_completion_timeout timeout!");
+ drm_err(adev_to_drm(adev), "wait_for_completion_timeout timeout!");
*operation_result = AUX_RET_ERROR_TIMEOUT;
goto out;
}
@@ -12394,31 +13369,24 @@ int amdgpu_dm_process_dmub_aux_transfer_sync(
* Transient states before tunneling is enabled could
* lead to this error. We can ignore this for now.
*/
- if (p_notify->result != AUX_RET_ERROR_PROTOCOL_ERROR) {
- DRM_WARN("DPIA AUX failed on 0x%x(%d), error %d\n",
+ if (p_notify->result == AUX_RET_ERROR_PROTOCOL_ERROR) {
+ drm_warn(adev_to_drm(adev), "DPIA AUX failed on 0x%x(%d), error %d\n",
payload->address, payload->length,
p_notify->result);
}
- *operation_result = AUX_RET_ERROR_INVALID_REPLY;
+ *operation_result = p_notify->result;
goto out;
}
+ payload->reply[0] = adev->dm.dmub_notify->aux_reply.command & 0xF;
+ if (adev->dm.dmub_notify->aux_reply.command & 0xF0)
+ /* The reply is stored in the top nibble of the command. */
+ payload->reply[0] = (adev->dm.dmub_notify->aux_reply.command >> 4) & 0xF;
- payload->reply[0] = adev->dm.dmub_notify->aux_reply.command;
- if (!payload->write && p_notify->aux_reply.length &&
- (payload->reply[0] == AUX_TRANSACTION_REPLY_AUX_ACK)) {
-
- if (payload->length != p_notify->aux_reply.length) {
- DRM_WARN("invalid read length %d from DPIA AUX 0x%x(%d)!\n",
- p_notify->aux_reply.length,
- payload->address, payload->length);
- *operation_result = AUX_RET_ERROR_INVALID_REPLY;
- goto out;
- }
-
+ /*write req may receive a byte indicating partially written number as well*/
+ if (p_notify->aux_reply.length)
memcpy(payload->data, p_notify->aux_reply.data,
p_notify->aux_reply.length);
- }
/* success */
ret = p_notify->aux_reply.length;
@@ -12429,6 +13397,79 @@ out:
return ret;
}
+static void abort_fused_io(
+ struct dc_context *ctx,
+ const struct dmub_cmd_fused_request *request
+)
+{
+ union dmub_rb_cmd command = { 0 };
+ struct dmub_rb_cmd_fused_io *io = &command.fused_io;
+
+ io->header.type = DMUB_CMD__FUSED_IO;
+ io->header.sub_type = DMUB_CMD__FUSED_IO_ABORT;
+ io->header.payload_bytes = sizeof(*io) - sizeof(io->header);
+ io->request = *request;
+ dm_execute_dmub_cmd(ctx, &command, DM_DMUB_WAIT_TYPE_NO_WAIT);
+}
+
+static bool execute_fused_io(
+ struct amdgpu_device *dev,
+ struct dc_context *ctx,
+ union dmub_rb_cmd *commands,
+ uint8_t count,
+ uint32_t timeout_us
+)
+{
+ const uint8_t ddc_line = commands[0].fused_io.request.u.aux.ddc_line;
+
+ if (ddc_line >= ARRAY_SIZE(dev->dm.fused_io))
+ return false;
+
+ struct fused_io_sync *sync = &dev->dm.fused_io[ddc_line];
+ struct dmub_rb_cmd_fused_io *first = &commands[0].fused_io;
+ const bool result = dm_execute_dmub_cmd_list(ctx, count, commands, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)
+ && first->header.ret_status
+ && first->request.status == FUSED_REQUEST_STATUS_SUCCESS;
+
+ if (!result)
+ return false;
+
+ while (wait_for_completion_timeout(&sync->replied, usecs_to_jiffies(timeout_us))) {
+ reinit_completion(&sync->replied);
+
+ struct dmub_cmd_fused_request *reply = (struct dmub_cmd_fused_request *) sync->reply_data;
+
+ static_assert(sizeof(*reply) <= sizeof(sync->reply_data), "Size mismatch");
+
+ if (reply->identifier == first->request.identifier) {
+ first->request = *reply;
+ return true;
+ }
+ }
+
+ reinit_completion(&sync->replied);
+ first->request.status = FUSED_REQUEST_STATUS_TIMEOUT;
+ abort_fused_io(ctx, &first->request);
+ return false;
+}
+
+bool amdgpu_dm_execute_fused_io(
+ struct amdgpu_device *dev,
+ struct dc_link *link,
+ union dmub_rb_cmd *commands,
+ uint8_t count,
+ uint32_t timeout_us)
+{
+ struct amdgpu_display_manager *dm = &dev->dm;
+
+ mutex_lock(&dm->dpia_aux_lock);
+
+ const bool result = execute_fused_io(dev, link->ctx, commands, count, timeout_us);
+
+ mutex_unlock(&dm->dpia_aux_lock);
+ return result;
+}
+
int amdgpu_dm_process_dmub_set_config_sync(
struct dc_context *ctx,
unsigned int link_index,
@@ -12447,7 +13488,7 @@ int amdgpu_dm_process_dmub_set_config_sync(
ret = 0;
*operation_result = adev->dm.dmub_notify->sc_status;
} else {
- DRM_ERROR("wait_for_completion_timeout timeout!");
+ drm_err(adev_to_drm(adev), "wait_for_completion_timeout timeout!");
ret = -1;
*operation_result = SET_CONFIG_UNKNOWN_ERROR;
}
@@ -12467,3 +13508,10 @@ bool dm_execute_dmub_cmd_list(const struct dc_context *ctx, unsigned int count,
{
return dc_dmub_srv_cmd_run_list(ctx->dmub_srv, count, cmd, wait_type);
}
+
+void dm_acpi_process_phy_transition_interlock(
+ const struct dc_context *ctx,
+ struct dm_process_phy_transition_init_params process_phy_transition_init_params)
+{
+ // Not yet implemented
+}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index 6464a8378387..ef97cede9926 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: MIT */
/*
* Copyright (C) 2015-2020 Advanced Micro Devices, Inc. All rights reserved.
*
@@ -50,7 +51,7 @@
#define AMDGPU_DM_MAX_NUM_EDP 2
-#define AMDGPU_DMUB_NOTIFICATION_MAX 7
+#define AMDGPU_DMUB_NOTIFICATION_MAX 8
#define HDMI_AMD_VENDOR_SPECIFIC_DATA_BLOCK_IEEE_REGISTRATION_ID 0x00001A
#define AMD_VSDB_VERSION_3_FEATURECAP_REPLAYMODE 0x40
@@ -58,6 +59,7 @@
#define AMDGPU_HDR_MULT_DEFAULT (0x100000000LL)
+#define AMDGPU_DM_HDMI_HPD_DEBOUNCE_MS 1500
/*
#include "include/amdgpu_dal_power_if.h"
#include "amdgpu_dm_irq.h"
@@ -81,6 +83,7 @@ struct amdgpu_bo;
struct dmub_srv;
struct dc_plane_state;
struct dmub_notification;
+struct dmub_cmd_fused_request;
struct amd_vsdb_block {
unsigned char ieee_id[3];
@@ -152,6 +155,32 @@ struct idle_workqueue {
};
/**
+ * struct vupdate_offload_work - Work data for offloading task from vupdate handler
+ * @work: Kernel work data for the work event
+ * @adev: amdgpu_device back pointer
+ * @stream: DC stream associated with the crtc
+ * @adjust: DC CRTC timing adjust to be applied to the crtc
+ */
+struct vupdate_offload_work {
+ struct work_struct work;
+ struct amdgpu_device *adev;
+ struct dc_stream_state *stream;
+ struct dc_crtc_timing_adjust *adjust;
+};
+
+#define MAX_LUMINANCE_DATA_POINTS 99
+
+/**
+ * struct amdgpu_dm_luminance_data - Custom luminance data
+ * @luminance: Luminance in percent
+ * @input_signal: Input signal in range 0-255
+ */
+struct amdgpu_dm_luminance_data {
+ u8 luminance;
+ u8 input_signal;
+} __packed;
+
+/**
* struct amdgpu_dm_backlight_caps - Information about backlight
*
* Describe the backlight support for ACPI or eDP AUX.
@@ -188,6 +217,11 @@ struct amdgpu_dm_backlight_caps {
*/
bool aux_support;
/**
+ * @brightness_mask: After deriving brightness, OR it with this mask.
+ * Workaround for panels with issues with certain brightness values.
+ */
+ u32 brightness_mask;
+ /**
* @ac_level: the default brightness if booted on AC
*/
u8 ac_level;
@@ -195,6 +229,14 @@ struct amdgpu_dm_backlight_caps {
* @dc_level: the default brightness if booted on DC
*/
u8 dc_level;
+ /**
+ * @data_points: the number of custom luminance data points
+ */
+ u8 data_points;
+ /**
+ * @luminance_data: custom luminance data
+ */
+ struct amdgpu_dm_luminance_data luminance_data[MAX_LUMINANCE_DATA_POINTS];
};
/**
@@ -256,6 +298,10 @@ struct hpd_rx_irq_offload_work {
* @offload_wq: offload work queue that this work is queued to
*/
struct hpd_rx_irq_offload_work_queue *offload_wq;
+ /**
+ * @adev: amdgpu_device pointer
+ */
+ struct amdgpu_device *adev;
};
/**
@@ -541,12 +587,12 @@ struct amdgpu_display_manager {
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
/**
- * @secure_display_ctxs:
+ * @secure_display_ctx:
*
- * Store the ROI information and the work_struct to command dmub and psp for
- * all crtcs.
+ * Store secure display relevant info. e.g. the ROI information
+ * , the work_struct to command dmub, etc.
*/
- struct secure_display_context *secure_display_ctxs;
+ struct secure_display_context secure_display_ctx;
#endif
/**
* @hpd_rx_offload_wq:
@@ -594,6 +640,13 @@ struct amdgpu_display_manager {
bool aux_hpd_discon_quirk;
/**
+ * @edp0_on_dp1_quirk:
+ *
+ * quirk for platforms that put edp0 on DP1.
+ */
+ bool edp0_on_dp1_quirk;
+
+ /**
* @dpia_aux_lock:
*
* Guards access to DPIA AUX
@@ -604,8 +657,26 @@ struct amdgpu_display_manager {
* @bb_from_dmub:
*
* Bounding box data read from dmub during early initialization for DCN4+
+ * Data is stored as a byte array that should be casted to the appropriate bb struct
*/
- struct dml2_soc_bb *bb_from_dmub;
+ void *bb_from_dmub;
+
+ /**
+ * @oem_i2c:
+ *
+ * OEM i2c bus
+ */
+ struct amdgpu_i2c_adapter *oem_i2c;
+
+ /**
+ * @fused_io:
+ *
+ * dmub fused io interface
+ */
+ struct fused_io_sync {
+ struct completion replied;
+ char reply_data[0x40]; // Cannot include dmub_cmd here
+ } fused_io[8];
};
enum dsc_clock_force_state {
@@ -671,6 +742,8 @@ struct amdgpu_dm_connector {
uint32_t connector_id;
int bl_idx;
+ struct cec_notifier *notifier;
+
/* we need to mind the EDID between detect
and get modes due to analog/digital/tvencoder */
const struct drm_edid *drm_edid;
@@ -697,8 +770,13 @@ struct amdgpu_dm_connector {
struct drm_dp_mst_port *mst_output_port;
struct amdgpu_dm_connector *mst_root;
struct drm_dp_aux *dsc_aux;
+ uint32_t mst_local_bw;
+ uint16_t vc_full_pbn;
struct mutex handle_mst_msg_ready;
+ /* branch device specific data */
+ uint32_t branch_ieee_oui;
+
/* TODO see if we can merge with ddc_bus or make a dm_connector */
struct amdgpu_i2c_adapter *i2c;
@@ -722,6 +800,7 @@ struct amdgpu_dm_connector {
bool fake_enable;
bool force_yuv420_output;
+ bool force_yuv422_output;
struct dsc_preferred_settings dsc_settings;
union dp_downstream_port_present mst_downstream_port_present;
/* Cached display modes */
@@ -741,6 +820,11 @@ struct amdgpu_dm_connector {
bool pack_sdp_v1_3;
enum adaptive_sync_type as_type;
struct amdgpu_hdmi_vsdb_info vsdb_info;
+
+ /* HDMI HPD debounce support */
+ unsigned int hdmi_hpd_debounce_delay_ms;
+ struct delayed_work hdmi_hpd_debounce_work;
+ struct dc_sink *hdmi_prev_sink;
};
static inline void amdgpu_dm_set_mst_status(uint8_t *status,
@@ -915,6 +999,7 @@ struct dm_connector_state {
bool underscan_enable;
bool freesync_capable;
bool update_hdcp;
+ bool abm_sysfs_forbidden;
uint8_t abm_level;
int vcpi_slots;
uint64_t pbn;
@@ -945,7 +1030,7 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
int link_index);
enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode);
+ const struct drm_display_mode *mode);
void dm_restore_drm_connector_state(struct drm_device *dev,
struct drm_connector *connector);
@@ -969,6 +1054,8 @@ void amdgpu_dm_init_color_mod(void);
int amdgpu_dm_create_color_properties(struct amdgpu_device *adev);
int amdgpu_dm_verify_lut_sizes(const struct drm_crtc_state *crtc_state);
int amdgpu_dm_update_crtc_color_mgmt(struct dm_crtc_state *crtc);
+int amdgpu_dm_check_crtc_color_mgmt(struct dm_crtc_state *crtc,
+ bool check_only);
int amdgpu_dm_update_plane_color_mgmt(struct dm_crtc_state *crtc,
struct drm_plane_state *plane_state,
struct dc_plane_state *dc_plane_state);
@@ -981,11 +1068,19 @@ extern const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs;
int amdgpu_dm_process_dmub_aux_transfer_sync(struct dc_context *ctx, unsigned int link_index,
struct aux_payload *payload, enum aux_return_code_type *operation_result);
+bool amdgpu_dm_execute_fused_io(
+ struct amdgpu_device *dev,
+ struct dc_link *link,
+ union dmub_rb_cmd *commands,
+ uint8_t count,
+ uint32_t timeout_us
+);
+
int amdgpu_dm_process_dmub_set_config_sync(struct dc_context *ctx, unsigned int link_index,
struct set_config_cmd_payload *payload, enum set_config_status *operation_result);
struct dc_stream_state *
- create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
+ create_validate_stream_for_sink(struct drm_connector *connector,
const struct drm_display_mode *drm_mode,
const struct dm_connector_state *dm_state,
const struct dc_stream_state *old_stream);
@@ -1010,4 +1105,10 @@ void dm_free_gpu_mem(struct amdgpu_device *adev,
bool amdgpu_dm_is_headless(struct amdgpu_device *adev);
+void hdmi_cec_set_edid(struct amdgpu_dm_connector *aconnector);
+void hdmi_cec_unset_edid(struct amdgpu_dm_connector *aconnector);
+int amdgpu_dm_initialize_hdmi_connector(struct amdgpu_dm_connector *aconnector);
+
+void retrieve_dmi_info(struct amdgpu_display_manager *dm);
+
#endif /* __AMDGPU_DM_H__ */
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
index ebabfe3a512f..1dcc79b35225 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: MIT
/*
* Copyright 2018 Advanced Micro Devices, Inc.
*
@@ -25,13 +26,39 @@
#include "amdgpu.h"
#include "amdgpu_mode.h"
#include "amdgpu_dm.h"
+#include "amdgpu_dm_colorop.h"
#include "dc.h"
#include "modules/color/color_gamma.h"
-#include "basics/conversion.h"
/**
* DOC: overview
*
+ * We have three types of color management in the AMD display driver.
+ * 1. the legacy &drm_crtc DEGAMMA, CTM, and GAMMA properties
+ * 2. AMD driver private color management on &drm_plane and &drm_crtc
+ * 3. AMD plane color pipeline
+ *
+ * The CRTC properties are the original color management. When they were
+ * implemented per-plane color management was not a thing yet. Because
+ * of that we could get away with plumbing the DEGAMMA and CTM
+ * properties to pre-blending HW functions. This is incompatible with
+ * per-plane color management, such as via the AMD private properties or
+ * the new drm_plane color pipeline. The only compatible CRTC property
+ * with per-plane color management is the GAMMA property as it is
+ * applied post-blending.
+ *
+ * The AMD driver private color management properties are only exposed
+ * when the kernel is built explicitly with -DAMD_PRIVATE_COLOR. They
+ * are temporary building blocks on the path to full-fledged &drm_plane
+ * and &drm_crtc color pipelines and lay the driver's groundwork for the
+ * color pipelines.
+ *
+ * The AMD plane color pipeline describes AMD's &drm_colorops via the
+ * &drm_plane's COLOR_PIPELINE property.
+ *
+ * drm_crtc Properties
+ * -------------------
+ *
* The DC interface to HW gives us the following color management blocks
* per pipe (surface):
*
@@ -42,36 +69,93 @@
* - Surface regamma LUT (normalized)
* - Output CSC (normalized)
*
- * But these aren't a direct mapping to DRM color properties. The current DRM
- * interface exposes CRTC degamma, CRTC CTM and CRTC regamma while our hardware
- * is essentially giving:
+ * But these aren't a direct mapping to DRM color properties. The
+ * current DRM interface exposes CRTC degamma, CRTC CTM and CRTC regamma
+ * while our hardware is essentially giving:
*
* Plane CTM -> Plane degamma -> Plane CTM -> Plane regamma -> Plane CTM
*
- * The input gamma LUT block isn't really applicable here since it operates
- * on the actual input data itself rather than the HW fp representation. The
- * input and output CSC blocks are technically available to use as part of
- * the DC interface but are typically used internally by DC for conversions
- * between color spaces. These could be blended together with user
- * adjustments in the future but for now these should remain untouched.
+ * The input gamma LUT block isn't really applicable here since it
+ * operates on the actual input data itself rather than the HW fp
+ * representation. The input and output CSC blocks are technically
+ * available to use as part of the DC interface but are typically used
+ * internally by DC for conversions between color spaces. These could be
+ * blended together with user adjustments in the future but for now
+ * these should remain untouched.
+ *
+ * The pipe blending also happens after these blocks so we don't
+ * actually support any CRTC props with correct blending with multiple
+ * planes - but we can still support CRTC color management properties in
+ * DM in most single plane cases correctly with clever management of the
+ * DC interface in DM.
+ *
+ * As per DRM documentation, blocks should be in hardware bypass when
+ * their respective property is set to NULL. A linear DGM/RGM LUT should
+ * also considered as putting the respective block into bypass mode.
+ *
+ * This means that the following configuration is assumed to be the
+ * default:
+ *
+ * Plane DGM Bypass -> Plane CTM Bypass -> Plane RGM Bypass -> ... CRTC
+ * DGM Bypass -> CRTC CTM Bypass -> CRTC RGM Bypass
+ *
+ * AMD Private Color Management on drm_plane
+ * -----------------------------------------
+ *
+ * The AMD private color management properties on a &drm_plane are:
+ *
+ * - AMD_PLANE_DEGAMMA_LUT
+ * - AMD_PLANE_DEGAMMA_LUT_SIZE
+ * - AMD_PLANE_DEGAMMA_TF
+ * - AMD_PLANE_HDR_MULT
+ * - AMD_PLANE_CTM
+ * - AMD_PLANE_SHAPER_LUT
+ * - AMD_PLANE_SHAPER_LUT_SIZE
+ * - AMD_PLANE_SHAPER_TF
+ * - AMD_PLANE_LUT3D
+ * - AMD_PLANE_LUT3D_SIZE
+ * - AMD_PLANE_BLEND_LUT
+ * - AMD_PLANE_BLEND_LUT_SIZE
+ * - AMD_PLANE_BLEND_TF
+ *
+ * The AMD private color management property on a &drm_crtc is:
*
- * The pipe blending also happens after these blocks so we don't actually
- * support any CRTC props with correct blending with multiple planes - but we
- * can still support CRTC color management properties in DM in most single
- * plane cases correctly with clever management of the DC interface in DM.
+ * - AMD_CRTC_REGAMMA_TF
*
- * As per DRM documentation, blocks should be in hardware bypass when their
- * respective property is set to NULL. A linear DGM/RGM LUT should also
- * considered as putting the respective block into bypass mode.
+ * Use of these properties is discouraged.
*
- * This means that the following
- * configuration is assumed to be the default:
+ * AMD plane color pipeline
+ * ------------------------
+ *
+ * The AMD &drm_plane color pipeline is advertised for DCN generations
+ * 3.0 and newer. It exposes these elements in this order:
+ *
+ * 1. 1D curve colorop
+ * 2. Multiplier
+ * 3. 3x4 CTM
+ * 4. 1D curve colorop
+ * 5. 1D LUT
+ * 6. 3D LUT
+ * 7. 1D curve colorop
+ * 8. 1D LUT
+ *
+ * The multiplier (#2) is a simple multiplier that is applied to all
+ * channels.
+ *
+ * The 3x4 CTM (#3) is a simple 3x4 matrix.
+ *
+ * #1, and #7 are non-linear to linear curves. #4 is a linear to
+ * non-linear curve. They support sRGB, PQ, and BT.709/BT.2020 EOTFs or
+ * their inverse.
+ *
+ * The 1D LUTs (#5 and #8) are plain 4096 entry LUTs.
+ *
+ * The 3DLUT (#6) is a tetrahedrally interpolated 17 cube LUT.
*
- * Plane DGM Bypass -> Plane CTM Bypass -> Plane RGM Bypass -> ...
- * CRTC DGM Bypass -> CRTC CTM Bypass -> CRTC RGM Bypass
*/
#define MAX_DRM_LUT_VALUE 0xFFFF
+#define MAX_DRM_LUT32_VALUE 0xFFFFFFFF
#define SDR_WHITE_LEVEL_INIT_VALUE 80
/**
@@ -342,6 +426,21 @@ __extract_blob_lut(const struct drm_property_blob *blob, uint32_t *size)
}
/**
+ * __extract_blob_lut32 - Extracts the DRM lut and lut size from a blob.
+ * @blob: DRM color mgmt property blob
+ * @size: lut size
+ *
+ * Returns:
+ * DRM LUT or NULL
+ */
+static const struct drm_color_lut32 *
+__extract_blob_lut32(const struct drm_property_blob *blob, uint32_t *size)
+{
+ *size = blob ? drm_color_lut32_size(blob) : 0;
+ return blob ? (struct drm_color_lut32 *)blob->data : NULL;
+}
+
+/**
* __is_lut_linear - check if the given lut is a linear mapping of values
* @lut: given lut to check values
* @size: lut size
@@ -415,6 +514,24 @@ static void __drm_lut_to_dc_gamma(const struct drm_color_lut *lut,
}
/**
+ * __drm_lut32_to_dc_gamma - convert the drm_color_lut to dc_gamma.
+ * @lut: DRM lookup table for color conversion
+ * @gamma: DC gamma to set entries
+ *
+ * The conversion depends on the size of the lut - whether or not it's legacy.
+ */
+static void __drm_lut32_to_dc_gamma(const struct drm_color_lut32 *lut, struct dc_gamma *gamma)
+{
+ int i;
+
+ for (i = 0; i < MAX_COLOR_LUT_ENTRIES; i++) {
+ gamma->entries.red[i] = dc_fixpt_from_fraction(lut[i].red, MAX_DRM_LUT32_VALUE);
+ gamma->entries.green[i] = dc_fixpt_from_fraction(lut[i].green, MAX_DRM_LUT32_VALUE);
+ gamma->entries.blue[i] = dc_fixpt_from_fraction(lut[i].blue, MAX_DRM_LUT32_VALUE);
+ }
+}
+
+/**
* __drm_ctm_to_dc_matrix - converts a DRM CTM to a DC CSC float matrix
* @ctm: DRM color transformation matrix
* @matrix: DC CSC float matrix
@@ -566,12 +683,68 @@ static int __set_output_tf(struct dc_transfer_func *func,
return res ? 0 : -ENOMEM;
}
-static int amdgpu_dm_set_atomic_regamma(struct dc_stream_state *stream,
+/**
+ * __set_output_tf_32 - calculates the output transfer function based on expected input space.
+ * @func: transfer function
+ * @lut: lookup table that defines the color space
+ * @lut_size: size of respective lut
+ * @has_rom: if ROM can be used for hardcoded curve
+ *
+ * Returns:
+ * 0 in case of success. -ENOMEM if fails.
+ */
+static int __set_output_tf_32(struct dc_transfer_func *func,
+ const struct drm_color_lut32 *lut, uint32_t lut_size,
+ bool has_rom)
+{
+ struct dc_gamma *gamma = NULL;
+ struct calculate_buffer cal_buffer = {0};
+ bool res;
+
+ cal_buffer.buffer_index = -1;
+
+ if (lut_size) {
+ gamma = dc_create_gamma();
+ if (!gamma)
+ return -ENOMEM;
+
+ gamma->num_entries = lut_size;
+ __drm_lut32_to_dc_gamma(lut, gamma);
+ }
+
+ if (func->tf == TRANSFER_FUNCTION_LINEAR) {
+ /*
+ * Color module doesn't like calculating regamma params
+ * on top of a linear input. But degamma params can be used
+ * instead to simulate this.
+ */
+ if (gamma)
+ gamma->type = GAMMA_CUSTOM;
+ res = mod_color_calculate_degamma_params(NULL, func,
+ gamma, gamma != NULL);
+ } else {
+ /*
+ * Assume sRGB. The actual mapping will depend on whether the
+ * input was legacy or not.
+ */
+ if (gamma)
+ gamma->type = GAMMA_CS_TFM_1D;
+ res = mod_color_calculate_regamma_params(func, gamma, gamma != NULL,
+ has_rom, NULL, &cal_buffer);
+ }
+
+ if (gamma)
+ dc_gamma_release(&gamma);
+
+ return res ? 0 : -ENOMEM;
+}
+
+
+static int amdgpu_dm_set_atomic_regamma(struct dc_transfer_func *out_tf,
const struct drm_color_lut *regamma_lut,
uint32_t regamma_size, bool has_rom,
enum dc_transfer_func_predefined tf)
{
- struct dc_transfer_func *out_tf = &stream->out_transfer_func;
int ret = 0;
if (regamma_size || tf != TRANSFER_FUNCTION_LINEAR) {
@@ -639,6 +812,42 @@ static int __set_input_tf(struct dc_color_caps *caps, struct dc_transfer_func *f
return res ? 0 : -ENOMEM;
}
+/**
+ * __set_input_tf_32 - calculates the input transfer function based on expected
+ * input space.
+ * @caps: dc color capabilities
+ * @func: transfer function
+ * @lut: lookup table that defines the color space
+ * @lut_size: size of respective lut.
+ *
+ * Returns:
+ * 0 in case of success. -ENOMEM if fails.
+ */
+static int __set_input_tf_32(struct dc_color_caps *caps, struct dc_transfer_func *func,
+ const struct drm_color_lut32 *lut, uint32_t lut_size)
+{
+ struct dc_gamma *gamma = NULL;
+ bool res;
+
+ if (lut_size) {
+ gamma = dc_create_gamma();
+ if (!gamma)
+ return -ENOMEM;
+
+ gamma->type = GAMMA_CUSTOM;
+ gamma->num_entries = lut_size;
+
+ __drm_lut32_to_dc_gamma(lut, gamma);
+ }
+
+ res = mod_color_calculate_degamma_params(caps, func, gamma, gamma != NULL);
+
+ if (gamma)
+ dc_gamma_release(&gamma);
+
+ return res ? 0 : -ENOMEM;
+}
+
static enum dc_transfer_func_predefined
amdgpu_tf_to_dc_tf(enum amdgpu_transfer_function tf)
{
@@ -668,6 +877,27 @@ amdgpu_tf_to_dc_tf(enum amdgpu_transfer_function tf)
}
}
+static enum dc_transfer_func_predefined
+amdgpu_colorop_tf_to_dc_tf(enum drm_colorop_curve_1d_type tf)
+{
+ switch (tf) {
+ case DRM_COLOROP_1D_CURVE_SRGB_EOTF:
+ case DRM_COLOROP_1D_CURVE_SRGB_INV_EOTF:
+ return TRANSFER_FUNCTION_SRGB;
+ case DRM_COLOROP_1D_CURVE_PQ_125_EOTF:
+ case DRM_COLOROP_1D_CURVE_PQ_125_INV_EOTF:
+ return TRANSFER_FUNCTION_PQ;
+ case DRM_COLOROP_1D_CURVE_BT2020_INV_OETF:
+ case DRM_COLOROP_1D_CURVE_BT2020_OETF:
+ return TRANSFER_FUNCTION_BT709;
+ case DRM_COLOROP_1D_CURVE_GAMMA22:
+ case DRM_COLOROP_1D_CURVE_GAMMA22_INV:
+ return TRANSFER_FUNCTION_GAMMA22;
+ default:
+ return TRANSFER_FUNCTION_LINEAR;
+ }
+}
+
static void __to_dc_lut3d_color(struct dc_rgb *rgb,
const struct drm_color_lut lut,
int bit_precision)
@@ -721,6 +951,59 @@ static void __drm_3dlut_to_dc_3dlut(const struct drm_color_lut *lut,
__to_dc_lut3d_color(&lut0[lut_i], lut[i], bit_depth);
}
+static void __to_dc_lut3d_32_color(struct dc_rgb *rgb,
+ const struct drm_color_lut32 lut,
+ int bit_precision)
+{
+ rgb->red = drm_color_lut32_extract(lut.red, bit_precision);
+ rgb->green = drm_color_lut32_extract(lut.green, bit_precision);
+ rgb->blue = drm_color_lut32_extract(lut.blue, bit_precision);
+}
+
+static void __drm_3dlut32_to_dc_3dlut(const struct drm_color_lut32 *lut,
+ uint32_t lut3d_size,
+ struct tetrahedral_params *params,
+ bool use_tetrahedral_9,
+ int bit_depth)
+{
+ struct dc_rgb *lut0;
+ struct dc_rgb *lut1;
+ struct dc_rgb *lut2;
+ struct dc_rgb *lut3;
+ int lut_i, i;
+
+
+ if (use_tetrahedral_9) {
+ lut0 = params->tetrahedral_9.lut0;
+ lut1 = params->tetrahedral_9.lut1;
+ lut2 = params->tetrahedral_9.lut2;
+ lut3 = params->tetrahedral_9.lut3;
+ } else {
+ lut0 = params->tetrahedral_17.lut0;
+ lut1 = params->tetrahedral_17.lut1;
+ lut2 = params->tetrahedral_17.lut2;
+ lut3 = params->tetrahedral_17.lut3;
+ }
+
+ for (lut_i = 0, i = 0; i < lut3d_size - 4; lut_i++, i += 4) {
+ /*
+ * We should consider the 3D LUT RGB values are distributed
+ * along four arrays lut0-3 where the first sizes 1229 and the
+ * other 1228. The bit depth supported for 3dlut channel is
+ * 12-bit, but DC also supports 10-bit.
+ *
+ * TODO: improve color pipeline API to enable the userspace set
+ * bit depth and 3D LUT size/stride, as specified by VA-API.
+ */
+ __to_dc_lut3d_32_color(&lut0[lut_i], lut[i], bit_depth);
+ __to_dc_lut3d_32_color(&lut1[lut_i], lut[i + 1], bit_depth);
+ __to_dc_lut3d_32_color(&lut2[lut_i], lut[i + 2], bit_depth);
+ __to_dc_lut3d_32_color(&lut3[lut_i], lut[i + 3], bit_depth);
+ }
+ /* lut0 has 1229 points (lut_size/4 + 1) */
+ __to_dc_lut3d_32_color(&lut0[lut_i], lut[i], bit_depth);
+}
+
/* amdgpu_dm_atomic_lut3d - set DRM 3D LUT to DC stream
* @drm_lut3d: user 3D LUT
* @drm_lut3d_size: size of 3D LUT
@@ -821,7 +1104,7 @@ int amdgpu_dm_verify_lut3d_size(struct amdgpu_device *adev,
struct dm_plane_state *dm_plane_state = to_dm_plane_state(plane_state);
const struct drm_color_lut *shaper = NULL, *lut3d = NULL;
uint32_t exp_size, size, dim_size = MAX_COLOR_3DLUT_SIZE;
- bool has_3dlut = adev->dm.dc->caps.color.dpp.hw_3d_lut;
+ bool has_3dlut = adev->dm.dc->caps.color.dpp.hw_3d_lut || adev->dm.dc->caps.color.mpc.preblend;
/* shaper LUT is only available if 3D LUT color caps */
exp_size = has_3dlut ? MAX_COLOR_LUT_ENTRIES : 0;
@@ -885,33 +1168,33 @@ int amdgpu_dm_verify_lut_sizes(const struct drm_crtc_state *crtc_state)
}
/**
- * amdgpu_dm_update_crtc_color_mgmt: Maps DRM color management to DC stream.
+ * amdgpu_dm_check_crtc_color_mgmt: Check if DRM color props are programmable by DC.
* @crtc: amdgpu_dm crtc state
+ * @check_only: only check color state without update dc stream
*
- * With no plane level color management properties we're free to use any
- * of the HW blocks as long as the CRTC CTM always comes before the
- * CRTC RGM and after the CRTC DGM.
- *
- * - The CRTC RGM block will be placed in the RGM LUT block if it is non-linear.
- * - The CRTC DGM block will be placed in the DGM LUT block if it is non-linear.
- * - The CRTC CTM will be placed in the gamut remap block if it is non-linear.
+ * This function just verifies CRTC LUT sizes, if there is enough space for
+ * output transfer function and if its parameters can be calculated by AMD
+ * color module. It also adjusts some settings for programming CRTC degamma at
+ * plane stage, using plane DGM block.
*
* The RGM block is typically more fully featured and accurate across
* all ASICs - DCE can't support a custom non-linear CRTC DGM.
*
* For supporting both plane level color management and CRTC level color
- * management at once we have to either restrict the usage of CRTC properties
- * or blend adjustments together.
+ * management at once we have to either restrict the usage of some CRTC
+ * properties or blend adjustments together.
*
* Returns:
- * 0 on success. Error code if setup fails.
+ * 0 on success. Error code if validation fails.
*/
-int amdgpu_dm_update_crtc_color_mgmt(struct dm_crtc_state *crtc)
+
+int amdgpu_dm_check_crtc_color_mgmt(struct dm_crtc_state *crtc,
+ bool check_only)
{
struct dc_stream_state *stream = crtc->stream;
struct amdgpu_device *adev = drm_to_adev(crtc->base.state->dev);
bool has_rom = adev->asic_type <= CHIP_RAVEN;
- struct drm_color_ctm *ctm = NULL;
+ struct dc_transfer_func *out_tf;
const struct drm_color_lut *degamma_lut, *regamma_lut;
uint32_t degamma_size, regamma_size;
bool has_regamma, has_degamma;
@@ -940,6 +1223,14 @@ int amdgpu_dm_update_crtc_color_mgmt(struct dm_crtc_state *crtc)
crtc->cm_has_degamma = false;
crtc->cm_is_degamma_srgb = false;
+ if (check_only) {
+ out_tf = kvzalloc(sizeof(*out_tf), GFP_KERNEL);
+ if (!out_tf)
+ return -ENOMEM;
+ } else {
+ out_tf = &stream->out_transfer_func;
+ }
+
/* Setup regamma and degamma. */
if (is_legacy) {
/*
@@ -954,8 +1245,8 @@ int amdgpu_dm_update_crtc_color_mgmt(struct dm_crtc_state *crtc)
* inverse color ramp in legacy userspace.
*/
crtc->cm_is_degamma_srgb = true;
- stream->out_transfer_func.type = TF_TYPE_DISTRIBUTED_POINTS;
- stream->out_transfer_func.tf = TRANSFER_FUNCTION_SRGB;
+ out_tf->type = TF_TYPE_DISTRIBUTED_POINTS;
+ out_tf->tf = TRANSFER_FUNCTION_SRGB;
/*
* Note: although we pass has_rom as parameter here, we never
* actually use ROM because the color module only takes the ROM
@@ -963,16 +1254,12 @@ int amdgpu_dm_update_crtc_color_mgmt(struct dm_crtc_state *crtc)
*
* See more in mod_color_calculate_regamma_params()
*/
- r = __set_legacy_tf(&stream->out_transfer_func, regamma_lut,
+ r = __set_legacy_tf(out_tf, regamma_lut,
regamma_size, has_rom);
- if (r)
- return r;
} else {
regamma_size = has_regamma ? regamma_size : 0;
- r = amdgpu_dm_set_atomic_regamma(stream, regamma_lut,
+ r = amdgpu_dm_set_atomic_regamma(out_tf, regamma_lut,
regamma_size, has_rom, tf);
- if (r)
- return r;
}
/*
@@ -981,6 +1268,43 @@ int amdgpu_dm_update_crtc_color_mgmt(struct dm_crtc_state *crtc)
* have to place the CTM in the OCSC in that case.
*/
crtc->cm_has_degamma = has_degamma;
+ if (check_only)
+ kvfree(out_tf);
+
+ return r;
+}
+
+/**
+ * amdgpu_dm_update_crtc_color_mgmt: Maps DRM color management to DC stream.
+ * @crtc: amdgpu_dm crtc state
+ *
+ * With no plane level color management properties we're free to use any
+ * of the HW blocks as long as the CRTC CTM always comes before the
+ * CRTC RGM and after the CRTC DGM.
+ *
+ * - The CRTC RGM block will be placed in the RGM LUT block if it is non-linear.
+ * - The CRTC DGM block will be placed in the DGM LUT block if it is non-linear.
+ * - The CRTC CTM will be placed in the gamut remap block if it is non-linear.
+ *
+ * The RGM block is typically more fully featured and accurate across
+ * all ASICs - DCE can't support a custom non-linear CRTC DGM.
+ *
+ * For supporting both plane level color management and CRTC level color
+ * management at once we have to either restrict the usage of CRTC properties
+ * or blend adjustments together.
+ *
+ * Returns:
+ * 0 on success. Error code if setup fails.
+ */
+int amdgpu_dm_update_crtc_color_mgmt(struct dm_crtc_state *crtc)
+{
+ struct dc_stream_state *stream = crtc->stream;
+ struct drm_color_ctm *ctm = NULL;
+ int ret;
+
+ ret = amdgpu_dm_check_crtc_color_mgmt(crtc, false);
+ if (ret)
+ return ret;
/* Setup CRTC CTM. */
if (crtc->base.ctm) {
@@ -1138,6 +1462,360 @@ __set_dm_plane_degamma(struct drm_plane_state *plane_state,
}
static int
+__set_colorop_in_tf_1d_curve(struct dc_plane_state *dc_plane_state,
+ struct drm_colorop_state *colorop_state)
+{
+ struct dc_transfer_func *tf = &dc_plane_state->in_transfer_func;
+ struct drm_colorop *colorop = colorop_state->colorop;
+ struct drm_device *drm = colorop->dev;
+
+ if (colorop->type != DRM_COLOROP_1D_CURVE)
+ return -EINVAL;
+
+ if (!(BIT(colorop_state->curve_1d_type) & amdgpu_dm_supported_degam_tfs))
+ return -EINVAL;
+
+ if (colorop_state->bypass) {
+ tf->type = TF_TYPE_BYPASS;
+ tf->tf = TRANSFER_FUNCTION_LINEAR;
+ return 0;
+ }
+
+ drm_dbg(drm, "Degamma colorop with ID: %d\n", colorop->base.id);
+
+ tf->type = TF_TYPE_PREDEFINED;
+ tf->tf = amdgpu_colorop_tf_to_dc_tf(colorop_state->curve_1d_type);
+
+ return 0;
+}
+
+static int
+__set_dm_plane_colorop_degamma(struct drm_plane_state *plane_state,
+ struct dc_plane_state *dc_plane_state,
+ struct drm_colorop *colorop)
+{
+ struct drm_colorop *old_colorop;
+ struct drm_colorop_state *colorop_state = NULL, *new_colorop_state;
+ struct drm_atomic_state *state = plane_state->state;
+ int i = 0;
+
+ old_colorop = colorop;
+
+ /* 1st op: 1d curve - degamma */
+ for_each_new_colorop_in_state(state, colorop, new_colorop_state, i) {
+ if (new_colorop_state->colorop == old_colorop &&
+ (BIT(new_colorop_state->curve_1d_type) & amdgpu_dm_supported_degam_tfs)) {
+ colorop_state = new_colorop_state;
+ break;
+ }
+ }
+
+ if (!colorop_state)
+ return -EINVAL;
+
+ return __set_colorop_in_tf_1d_curve(dc_plane_state, colorop_state);
+}
+
+static int
+__set_dm_plane_colorop_3x4_matrix(struct drm_plane_state *plane_state,
+ struct dc_plane_state *dc_plane_state,
+ struct drm_colorop *colorop)
+{
+ struct drm_colorop *old_colorop;
+ struct drm_colorop_state *colorop_state = NULL, *new_colorop_state;
+ struct drm_atomic_state *state = plane_state->state;
+ const struct drm_device *dev = colorop->dev;
+ const struct drm_property_blob *blob;
+ struct drm_color_ctm_3x4 *ctm = NULL;
+ int i = 0;
+
+ /* 3x4 matrix */
+ old_colorop = colorop;
+ for_each_new_colorop_in_state(state, colorop, new_colorop_state, i) {
+ if (new_colorop_state->colorop == old_colorop &&
+ new_colorop_state->colorop->type == DRM_COLOROP_CTM_3X4) {
+ colorop_state = new_colorop_state;
+ break;
+ }
+ }
+
+ if (colorop_state && !colorop_state->bypass && colorop->type == DRM_COLOROP_CTM_3X4) {
+ drm_dbg(dev, "3x4 matrix colorop with ID: %d\n", colorop->base.id);
+ blob = colorop_state->data;
+ if (blob->length == sizeof(struct drm_color_ctm_3x4)) {
+ ctm = (struct drm_color_ctm_3x4 *) blob->data;
+ __drm_ctm_3x4_to_dc_matrix(ctm, dc_plane_state->gamut_remap_matrix.matrix);
+ dc_plane_state->gamut_remap_matrix.enable_remap = true;
+ dc_plane_state->input_csc_color_matrix.enable_adjustment = false;
+ } else {
+ drm_warn(dev, "blob->length (%zu) isn't equal to drm_color_ctm_3x4 (%zu)\n",
+ blob->length, sizeof(struct drm_color_ctm_3x4));
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int
+__set_dm_plane_colorop_multiplier(struct drm_plane_state *plane_state,
+ struct dc_plane_state *dc_plane_state,
+ struct drm_colorop *colorop)
+{
+ struct drm_colorop *old_colorop;
+ struct drm_colorop_state *colorop_state = NULL, *new_colorop_state;
+ struct drm_atomic_state *state = plane_state->state;
+ const struct drm_device *dev = colorop->dev;
+ int i = 0;
+
+ /* Multiplier */
+ old_colorop = colorop;
+ for_each_new_colorop_in_state(state, colorop, new_colorop_state, i) {
+ if (new_colorop_state->colorop == old_colorop &&
+ new_colorop_state->colorop->type == DRM_COLOROP_MULTIPLIER) {
+ colorop_state = new_colorop_state;
+ break;
+ }
+ }
+
+ if (colorop_state && !colorop_state->bypass && colorop->type == DRM_COLOROP_MULTIPLIER) {
+ drm_dbg(dev, "Multiplier colorop with ID: %d\n", colorop->base.id);
+ dc_plane_state->hdr_mult = amdgpu_dm_fixpt_from_s3132(colorop_state->multiplier);
+ }
+
+ return 0;
+}
+
+static int
+__set_dm_plane_colorop_shaper(struct drm_plane_state *plane_state,
+ struct dc_plane_state *dc_plane_state,
+ struct drm_colorop *colorop)
+{
+ struct drm_colorop *old_colorop;
+ struct drm_colorop_state *colorop_state = NULL, *new_colorop_state;
+ struct drm_atomic_state *state = plane_state->state;
+ enum dc_transfer_func_predefined default_tf = TRANSFER_FUNCTION_LINEAR;
+ struct dc_transfer_func *tf = &dc_plane_state->in_shaper_func;
+ const struct drm_color_lut32 *shaper_lut;
+ struct drm_device *dev = colorop->dev;
+ bool enabled = false;
+ u32 shaper_size;
+ int i = 0, ret = 0;
+
+ /* 1D Curve - SHAPER TF */
+ old_colorop = colorop;
+ for_each_new_colorop_in_state(state, colorop, new_colorop_state, i) {
+ if (new_colorop_state->colorop == old_colorop &&
+ (BIT(new_colorop_state->curve_1d_type) & amdgpu_dm_supported_shaper_tfs)) {
+ colorop_state = new_colorop_state;
+ break;
+ }
+ }
+
+ if (colorop_state && !colorop_state->bypass && colorop->type == DRM_COLOROP_1D_CURVE) {
+ drm_dbg(dev, "Shaper TF colorop with ID: %d\n", colorop->base.id);
+ tf->type = TF_TYPE_DISTRIBUTED_POINTS;
+ tf->tf = default_tf = amdgpu_colorop_tf_to_dc_tf(colorop_state->curve_1d_type);
+ tf->sdr_ref_white_level = SDR_WHITE_LEVEL_INIT_VALUE;
+ ret = __set_output_tf(tf, 0, 0, false);
+ if (ret)
+ return ret;
+ enabled = true;
+ }
+
+ /* 1D LUT - SHAPER LUT */
+ colorop = old_colorop->next;
+ if (!colorop) {
+ drm_dbg(dev, "no Shaper LUT colorop found\n");
+ return -EINVAL;
+ }
+
+ old_colorop = colorop;
+ for_each_new_colorop_in_state(state, colorop, new_colorop_state, i) {
+ if (new_colorop_state->colorop == old_colorop &&
+ new_colorop_state->colorop->type == DRM_COLOROP_1D_LUT) {
+ colorop_state = new_colorop_state;
+ break;
+ }
+ }
+
+ if (colorop_state && !colorop_state->bypass && colorop->type == DRM_COLOROP_1D_LUT) {
+ drm_dbg(dev, "Shaper LUT colorop with ID: %d\n", colorop->base.id);
+ tf->type = TF_TYPE_DISTRIBUTED_POINTS;
+ tf->tf = default_tf;
+ tf->sdr_ref_white_level = SDR_WHITE_LEVEL_INIT_VALUE;
+ shaper_lut = __extract_blob_lut32(colorop_state->data, &shaper_size);
+ shaper_size = shaper_lut != NULL ? shaper_size : 0;
+
+ /* Custom LUT size must be the same as supported size */
+ if (shaper_size == colorop->size) {
+ ret = __set_output_tf_32(tf, shaper_lut, shaper_size, false);
+ if (ret)
+ return ret;
+ enabled = true;
+ }
+ }
+
+ if (!enabled)
+ tf->type = TF_TYPE_BYPASS;
+
+ return 0;
+}
+
+/* __set_colorop_3dlut - set DRM 3D LUT to DC stream
+ * @drm_lut3d: user 3D LUT
+ * @drm_lut3d_size: size of 3D LUT
+ * @lut3d: DC 3D LUT
+ *
+ * Map user 3D LUT data to DC 3D LUT and all necessary bits to program it
+ * on DCN accordingly.
+ *
+ * Returns:
+ * 0 on success. -EINVAL if drm_lut3d_size is zero.
+ */
+static int __set_colorop_3dlut(const struct drm_color_lut32 *drm_lut3d,
+ uint32_t drm_lut3d_size,
+ struct dc_3dlut *lut)
+{
+ if (!drm_lut3d_size) {
+ lut->state.bits.initialized = 0;
+ return -EINVAL;
+ }
+
+ /* Only supports 17x17x17 3D LUT (12-bit) now */
+ lut->lut_3d.use_12bits = true;
+ lut->lut_3d.use_tetrahedral_9 = false;
+
+ lut->state.bits.initialized = 1;
+ __drm_3dlut32_to_dc_3dlut(drm_lut3d, drm_lut3d_size, &lut->lut_3d,
+ lut->lut_3d.use_tetrahedral_9, 12);
+
+ return 0;
+}
+
+static int
+__set_dm_plane_colorop_3dlut(struct drm_plane_state *plane_state,
+ struct dc_plane_state *dc_plane_state,
+ struct drm_colorop *colorop)
+{
+ struct drm_colorop *old_colorop;
+ struct drm_colorop_state *colorop_state = NULL, *new_colorop_state;
+ struct dc_transfer_func *tf = &dc_plane_state->in_shaper_func;
+ struct drm_atomic_state *state = plane_state->state;
+ const struct amdgpu_device *adev = drm_to_adev(colorop->dev);
+ const struct drm_device *dev = colorop->dev;
+ const struct drm_color_lut32 *lut3d;
+ uint32_t lut3d_size;
+ int i = 0, ret = 0;
+
+ /* 3D LUT */
+ old_colorop = colorop;
+ for_each_new_colorop_in_state(state, colorop, new_colorop_state, i) {
+ if (new_colorop_state->colorop == old_colorop &&
+ new_colorop_state->colorop->type == DRM_COLOROP_3D_LUT) {
+ colorop_state = new_colorop_state;
+ break;
+ }
+ }
+
+ if (colorop_state && !colorop_state->bypass && colorop->type == DRM_COLOROP_3D_LUT) {
+ if (!adev->dm.dc->caps.color.dpp.hw_3d_lut) {
+ drm_dbg(dev, "3D LUT is not supported by hardware\n");
+ return -EINVAL;
+ }
+
+ drm_dbg(dev, "3D LUT colorop with ID: %d\n", colorop->base.id);
+ lut3d = __extract_blob_lut32(colorop_state->data, &lut3d_size);
+ lut3d_size = lut3d != NULL ? lut3d_size : 0;
+ ret = __set_colorop_3dlut(lut3d, lut3d_size, &dc_plane_state->lut3d_func);
+ if (ret) {
+ drm_dbg(dev, "3D LUT colorop with ID: %d has LUT size = %d\n",
+ colorop->base.id, lut3d_size);
+ return ret;
+ }
+
+ /* 3D LUT requires shaper. If shaper colorop is bypassed, enable shaper curve
+ * with TRANSFER_FUNCTION_LINEAR
+ */
+ if (tf->type == TF_TYPE_BYPASS) {
+ tf->type = TF_TYPE_DISTRIBUTED_POINTS;
+ tf->tf = TRANSFER_FUNCTION_LINEAR;
+ tf->sdr_ref_white_level = SDR_WHITE_LEVEL_INIT_VALUE;
+ ret = __set_output_tf_32(tf, NULL, 0, false);
+ }
+ }
+
+ return ret;
+}
+
+static int
+__set_dm_plane_colorop_blend(struct drm_plane_state *plane_state,
+ struct dc_plane_state *dc_plane_state,
+ struct drm_colorop *colorop)
+{
+ struct drm_colorop *old_colorop;
+ struct drm_colorop_state *colorop_state = NULL, *new_colorop_state;
+ struct drm_atomic_state *state = plane_state->state;
+ enum dc_transfer_func_predefined default_tf = TRANSFER_FUNCTION_LINEAR;
+ struct dc_transfer_func *tf = &dc_plane_state->blend_tf;
+ const struct drm_color_lut32 *blend_lut = NULL;
+ struct drm_device *dev = colorop->dev;
+ uint32_t blend_size = 0;
+ int i = 0;
+
+ /* 1D Curve - BLND TF */
+ old_colorop = colorop;
+ for_each_new_colorop_in_state(state, colorop, new_colorop_state, i) {
+ if (new_colorop_state->colorop == old_colorop &&
+ (BIT(new_colorop_state->curve_1d_type) & amdgpu_dm_supported_blnd_tfs)) {
+ colorop_state = new_colorop_state;
+ break;
+ }
+ }
+
+ if (colorop_state && !colorop_state->bypass && colorop->type == DRM_COLOROP_1D_CURVE &&
+ (BIT(colorop_state->curve_1d_type) & amdgpu_dm_supported_blnd_tfs)) {
+ drm_dbg(dev, "Blend TF colorop with ID: %d\n", colorop->base.id);
+ tf->type = TF_TYPE_DISTRIBUTED_POINTS;
+ tf->tf = default_tf = amdgpu_colorop_tf_to_dc_tf(colorop_state->curve_1d_type);
+ tf->sdr_ref_white_level = SDR_WHITE_LEVEL_INIT_VALUE;
+ __set_input_tf_32(NULL, tf, blend_lut, blend_size);
+ }
+
+ /* 1D Curve - BLND LUT */
+ colorop = old_colorop->next;
+ if (!colorop) {
+ drm_dbg(dev, "no Blend LUT colorop found\n");
+ return -EINVAL;
+ }
+
+ old_colorop = colorop;
+ for_each_new_colorop_in_state(state, colorop, new_colorop_state, i) {
+ if (new_colorop_state->colorop == old_colorop &&
+ new_colorop_state->colorop->type == DRM_COLOROP_1D_LUT) {
+ colorop_state = new_colorop_state;
+ break;
+ }
+ }
+
+ if (colorop_state && !colorop_state->bypass && colorop->type == DRM_COLOROP_1D_LUT &&
+ (BIT(colorop_state->curve_1d_type) & amdgpu_dm_supported_blnd_tfs)) {
+ drm_dbg(dev, "Blend LUT colorop with ID: %d\n", colorop->base.id);
+ tf->type = TF_TYPE_DISTRIBUTED_POINTS;
+ tf->tf = default_tf;
+ tf->sdr_ref_white_level = SDR_WHITE_LEVEL_INIT_VALUE;
+ blend_lut = __extract_blob_lut32(colorop_state->data, &blend_size);
+ blend_size = blend_lut != NULL ? blend_size : 0;
+
+ /* Custom LUT size must be the same as supported size */
+ if (blend_size == colorop->size)
+ __set_input_tf_32(NULL, tf, blend_lut, blend_size);
+ }
+
+ return 0;
+}
+
+static int
amdgpu_dm_plane_set_color_properties(struct drm_plane_state *plane_state,
struct dc_plane_state *dc_plane_state)
{
@@ -1187,6 +1865,93 @@ amdgpu_dm_plane_set_color_properties(struct drm_plane_state *plane_state,
return 0;
}
+static int
+amdgpu_dm_plane_set_colorop_properties(struct drm_plane_state *plane_state,
+ struct dc_plane_state *dc_plane_state)
+{
+ struct drm_colorop *colorop = plane_state->color_pipeline;
+ struct drm_device *dev = plane_state->plane->dev;
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ int ret;
+
+ /* 1D Curve - DEGAM TF */
+ if (!colorop)
+ return -EINVAL;
+
+ ret = __set_dm_plane_colorop_degamma(plane_state, dc_plane_state, colorop);
+ if (ret)
+ return ret;
+
+ /* Multiplier */
+ colorop = colorop->next;
+ if (!colorop) {
+ drm_dbg(dev, "no multiplier colorop found\n");
+ return -EINVAL;
+ }
+
+ ret = __set_dm_plane_colorop_multiplier(plane_state, dc_plane_state, colorop);
+ if (ret)
+ return ret;
+
+ /* 3x4 matrix */
+ colorop = colorop->next;
+ if (!colorop) {
+ drm_dbg(dev, "no 3x4 matrix colorop found\n");
+ return -EINVAL;
+ }
+
+ ret = __set_dm_plane_colorop_3x4_matrix(plane_state, dc_plane_state, colorop);
+ if (ret)
+ return ret;
+
+ if (adev->dm.dc->caps.color.dpp.hw_3d_lut) {
+ /* 1D Curve & LUT - SHAPER TF & LUT */
+ colorop = colorop->next;
+ if (!colorop) {
+ drm_dbg(dev, "no Shaper TF colorop found\n");
+ return -EINVAL;
+ }
+
+ ret = __set_dm_plane_colorop_shaper(plane_state, dc_plane_state, colorop);
+ if (ret)
+ return ret;
+
+ /* Shaper LUT colorop is already handled, just skip here */
+ colorop = colorop->next;
+ if (!colorop)
+ return -EINVAL;
+
+ /* 3D LUT */
+ colorop = colorop->next;
+ if (!colorop) {
+ drm_dbg(dev, "no 3D LUT colorop found\n");
+ return -EINVAL;
+ }
+
+ ret = __set_dm_plane_colorop_3dlut(plane_state, dc_plane_state, colorop);
+ if (ret)
+ return ret;
+ }
+
+ /* 1D Curve & LUT - BLND TF & LUT */
+ colorop = colorop->next;
+ if (!colorop) {
+ drm_dbg(dev, "no Blend TF colorop found\n");
+ return -EINVAL;
+ }
+
+ ret = __set_dm_plane_colorop_blend(plane_state, dc_plane_state, colorop);
+ if (ret)
+ return ret;
+
+ /* BLND LUT colorop is already handled, just skip here */
+ colorop = colorop->next;
+ if (!colorop)
+ return -EINVAL;
+
+ return 0;
+}
+
/**
* amdgpu_dm_update_plane_color_mgmt: Maps DRM color management to DC plane.
* @crtc: amdgpu_dm crtc state
@@ -1283,5 +2048,8 @@ int amdgpu_dm_update_plane_color_mgmt(struct dm_crtc_state *crtc,
dc_plane_state->input_csc_color_matrix.enable_adjustment = false;
}
+ if (!amdgpu_dm_plane_set_colorop_properties(plane_state, dc_plane_state))
+ return 0;
+
return amdgpu_dm_plane_set_color_properties(plane_state, dc_plane_state);
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_colorop.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_colorop.c
new file mode 100644
index 000000000000..d585618b8064
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_colorop.c
@@ -0,0 +1,209 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include <drm/drm_print.h>
+#include <drm/drm_plane.h>
+#include <drm/drm_property.h>
+#include <drm/drm_colorop.h>
+
+#include "amdgpu.h"
+#include "amdgpu_dm_colorop.h"
+#include "dc.h"
+
+const u64 amdgpu_dm_supported_degam_tfs =
+ BIT(DRM_COLOROP_1D_CURVE_SRGB_EOTF) |
+ BIT(DRM_COLOROP_1D_CURVE_PQ_125_EOTF) |
+ BIT(DRM_COLOROP_1D_CURVE_BT2020_INV_OETF) |
+ BIT(DRM_COLOROP_1D_CURVE_GAMMA22_INV);
+
+const u64 amdgpu_dm_supported_shaper_tfs =
+ BIT(DRM_COLOROP_1D_CURVE_SRGB_INV_EOTF) |
+ BIT(DRM_COLOROP_1D_CURVE_PQ_125_INV_EOTF) |
+ BIT(DRM_COLOROP_1D_CURVE_BT2020_OETF) |
+ BIT(DRM_COLOROP_1D_CURVE_GAMMA22);
+
+const u64 amdgpu_dm_supported_blnd_tfs =
+ BIT(DRM_COLOROP_1D_CURVE_SRGB_EOTF) |
+ BIT(DRM_COLOROP_1D_CURVE_PQ_125_EOTF) |
+ BIT(DRM_COLOROP_1D_CURVE_BT2020_INV_OETF) |
+ BIT(DRM_COLOROP_1D_CURVE_GAMMA22_INV);
+
+#define MAX_COLOR_PIPELINE_OPS 10
+
+#define LUT3D_SIZE 17
+
+int amdgpu_dm_initialize_default_pipeline(struct drm_plane *plane, struct drm_prop_enum_list *list)
+{
+ struct drm_colorop *ops[MAX_COLOR_PIPELINE_OPS];
+ struct drm_device *dev = plane->dev;
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ int ret;
+ int i = 0;
+
+ memset(ops, 0, sizeof(ops));
+
+ /* 1D curve - DEGAM TF */
+ ops[i] = kzalloc(sizeof(*ops[0]), GFP_KERNEL);
+ if (!ops[i]) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ ret = drm_plane_colorop_curve_1d_init(dev, ops[i], plane,
+ amdgpu_dm_supported_degam_tfs,
+ DRM_COLOROP_FLAG_ALLOW_BYPASS);
+ if (ret)
+ goto cleanup;
+
+ list->type = ops[i]->base.id;
+ list->name = kasprintf(GFP_KERNEL, "Color Pipeline %d", ops[i]->base.id);
+
+ i++;
+
+ /* Multiplier */
+ ops[i] = kzalloc(sizeof(struct drm_colorop), GFP_KERNEL);
+ if (!ops[i]) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ ret = drm_plane_colorop_mult_init(dev, ops[i], plane, DRM_COLOROP_FLAG_ALLOW_BYPASS);
+ if (ret)
+ goto cleanup;
+
+ drm_colorop_set_next_property(ops[i-1], ops[i]);
+
+ i++;
+
+ /* 3x4 matrix */
+ ops[i] = kzalloc(sizeof(struct drm_colorop), GFP_KERNEL);
+ if (!ops[i]) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ ret = drm_plane_colorop_ctm_3x4_init(dev, ops[i], plane, DRM_COLOROP_FLAG_ALLOW_BYPASS);
+ if (ret)
+ goto cleanup;
+
+ drm_colorop_set_next_property(ops[i-1], ops[i]);
+
+ i++;
+
+ if (adev->dm.dc->caps.color.dpp.hw_3d_lut) {
+ /* 1D curve - SHAPER TF */
+ ops[i] = kzalloc(sizeof(*ops[0]), GFP_KERNEL);
+ if (!ops[i]) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ ret = drm_plane_colorop_curve_1d_init(dev, ops[i], plane,
+ amdgpu_dm_supported_shaper_tfs,
+ DRM_COLOROP_FLAG_ALLOW_BYPASS);
+ if (ret)
+ goto cleanup;
+
+ drm_colorop_set_next_property(ops[i-1], ops[i]);
+
+ i++;
+
+ /* 1D LUT - SHAPER LUT */
+ ops[i] = kzalloc(sizeof(*ops[0]), GFP_KERNEL);
+ if (!ops[i]) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ ret = drm_plane_colorop_curve_1d_lut_init(dev, ops[i], plane, MAX_COLOR_LUT_ENTRIES,
+ DRM_COLOROP_LUT1D_INTERPOLATION_LINEAR,
+ DRM_COLOROP_FLAG_ALLOW_BYPASS);
+ if (ret)
+ goto cleanup;
+
+ drm_colorop_set_next_property(ops[i-1], ops[i]);
+
+ i++;
+
+ /* 3D LUT */
+ ops[i] = kzalloc(sizeof(*ops[0]), GFP_KERNEL);
+ if (!ops[i]) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ ret = drm_plane_colorop_3dlut_init(dev, ops[i], plane, LUT3D_SIZE,
+ DRM_COLOROP_LUT3D_INTERPOLATION_TETRAHEDRAL,
+ DRM_COLOROP_FLAG_ALLOW_BYPASS);
+ if (ret)
+ goto cleanup;
+
+ drm_colorop_set_next_property(ops[i-1], ops[i]);
+
+ i++;
+ }
+
+ /* 1D curve - BLND TF */
+ ops[i] = kzalloc(sizeof(*ops[0]), GFP_KERNEL);
+ if (!ops[i]) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ ret = drm_plane_colorop_curve_1d_init(dev, ops[i], plane,
+ amdgpu_dm_supported_blnd_tfs,
+ DRM_COLOROP_FLAG_ALLOW_BYPASS);
+ if (ret)
+ goto cleanup;
+
+ drm_colorop_set_next_property(ops[i - 1], ops[i]);
+
+ i++;
+
+ /* 1D LUT - BLND LUT */
+ ops[i] = kzalloc(sizeof(struct drm_colorop), GFP_KERNEL);
+ if (!ops[i]) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ ret = drm_plane_colorop_curve_1d_lut_init(dev, ops[i], plane, MAX_COLOR_LUT_ENTRIES,
+ DRM_COLOROP_LUT1D_INTERPOLATION_LINEAR,
+ DRM_COLOROP_FLAG_ALLOW_BYPASS);
+ if (ret)
+ goto cleanup;
+
+ drm_colorop_set_next_property(ops[i-1], ops[i]);
+ return 0;
+
+cleanup:
+ if (ret == -ENOMEM)
+ drm_err(plane->dev, "KMS: Failed to allocate colorop\n");
+
+ drm_colorop_pipeline_destroy(dev);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_colorop.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_colorop.h
new file mode 100644
index 000000000000..2e1617ffc8ee
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_colorop.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __AMDGPU_DM_COLOROP_H__
+#define __AMDGPU_DM_COLOROP_H__
+
+extern const u64 amdgpu_dm_supported_degam_tfs;
+extern const u64 amdgpu_dm_supported_shaper_tfs;
+extern const u64 amdgpu_dm_supported_blnd_tfs;
+
+int amdgpu_dm_initialize_default_pipeline(struct drm_plane *plane, struct drm_prop_enum_list *list);
+
+#endif /* __AMDGPU_DM_COLOROP_H__*/
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
index f936a35fa9eb..e20aa7438066 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: MIT
/*
* Copyright 2015 Advanced Micro Devices, Inc.
*
@@ -30,6 +31,7 @@
#include "amdgpu_dm.h"
#include "dc.h"
#include "amdgpu_securedisplay.h"
+#include "amdgpu_dm_psr.h"
static const char *const pipe_crc_sources[] = {
"none",
@@ -83,45 +85,274 @@ const char *const *amdgpu_dm_crtc_get_crc_sources(struct drm_crtc *crtc,
}
#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
+static void update_phy_id_mapping(struct amdgpu_device *adev)
+{
+ struct drm_device *ddev = adev_to_drm(adev);
+ struct amdgpu_display_manager *dm = &adev->dm;
+ struct drm_connector *connector;
+ struct amdgpu_dm_connector *aconnector;
+ struct amdgpu_dm_connector *sort_connector[AMDGPU_DM_MAX_CRTC] = {NULL};
+ struct drm_connector_list_iter iter;
+ uint8_t idx = 0, idx_2 = 0, connector_cnt = 0;
+
+ dm->secure_display_ctx.phy_mapping_updated = false;
+
+ mutex_lock(&ddev->mode_config.mutex);
+ drm_connector_list_iter_begin(ddev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
+
+ if (connector->status != connector_status_connected)
+ continue;
+
+ if (idx >= AMDGPU_DM_MAX_CRTC) {
+ DRM_WARN("%s connected connectors exceed max crtc\n", __func__);
+ mutex_unlock(&ddev->mode_config.mutex);
+ return;
+ }
+
+ aconnector = to_amdgpu_dm_connector(connector);
+
+ sort_connector[idx] = aconnector;
+ idx++;
+ connector_cnt++;
+ }
+ drm_connector_list_iter_end(&iter);
+
+ /* sort connectors by link_enc_hw_instance first */
+ for (idx = connector_cnt; idx > 1 ; idx--) {
+ for (idx_2 = 0; idx_2 < (idx - 1); idx_2++) {
+ if (sort_connector[idx_2]->dc_link->link_enc_hw_inst >
+ sort_connector[idx_2 + 1]->dc_link->link_enc_hw_inst)
+ swap(sort_connector[idx_2], sort_connector[idx_2 + 1]);
+ }
+ }
+
+ /*
+ * Sort mst connectors by RAD. mst connectors with the same enc_hw_instance are already
+ * sorted together above.
+ */
+ for (idx = 0; idx < connector_cnt; /*Do nothing*/) {
+ if (sort_connector[idx]->mst_root) {
+ uint8_t i, j, k;
+ uint8_t mst_con_cnt = 1;
+
+ for (idx_2 = (idx + 1); idx_2 < connector_cnt; idx_2++) {
+ if (sort_connector[idx_2]->mst_root == sort_connector[idx]->mst_root)
+ mst_con_cnt++;
+ else
+ break;
+ }
+
+ for (i = mst_con_cnt; i > 1; i--) {
+ for (j = idx; j < (idx + i - 2); j++) {
+ int mstb_lct = sort_connector[j]->mst_output_port->parent->lct;
+ int next_mstb_lct = sort_connector[j + 1]->mst_output_port->parent->lct;
+ u8 *rad;
+ u8 *next_rad;
+ bool swap = false;
+
+ /* Sort by mst tree depth first. Then compare RAD if depth is the same*/
+ if (mstb_lct > next_mstb_lct) {
+ swap = true;
+ } else if (mstb_lct == next_mstb_lct) {
+ if (mstb_lct == 1) {
+ if (sort_connector[j]->mst_output_port->port_num > sort_connector[j + 1]->mst_output_port->port_num)
+ swap = true;
+ } else if (mstb_lct > 1) {
+ rad = sort_connector[j]->mst_output_port->parent->rad;
+ next_rad = sort_connector[j + 1]->mst_output_port->parent->rad;
+
+ for (k = 0; k < mstb_lct - 1; k++) {
+ int shift = (k % 2) ? 0 : 4;
+ int port_num = (rad[k / 2] >> shift) & 0xf;
+ int next_port_num = (next_rad[k / 2] >> shift) & 0xf;
+
+ if (port_num > next_port_num) {
+ swap = true;
+ break;
+ }
+ }
+ } else {
+ DRM_ERROR("MST LCT shouldn't be set as < 1");
+ mutex_unlock(&ddev->mode_config.mutex);
+ return;
+ }
+ }
+
+ if (swap)
+ swap(sort_connector[j], sort_connector[j + 1]);
+ }
+ }
+
+ idx += mst_con_cnt;
+ } else {
+ idx++;
+ }
+ }
+
+ /* Complete sorting. Assign relavant result to dm->secure_display_ctx.phy_id_mapping[]*/
+ memset(dm->secure_display_ctx.phy_id_mapping, 0, sizeof(dm->secure_display_ctx.phy_id_mapping));
+ for (idx = 0; idx < connector_cnt; idx++) {
+ aconnector = sort_connector[idx];
+
+ dm->secure_display_ctx.phy_id_mapping[idx].assigned = true;
+ dm->secure_display_ctx.phy_id_mapping[idx].is_mst = false;
+ dm->secure_display_ctx.phy_id_mapping[idx].enc_hw_inst = aconnector->dc_link->link_enc_hw_inst;
+
+ if (sort_connector[idx]->mst_root) {
+ dm->secure_display_ctx.phy_id_mapping[idx].is_mst = true;
+ dm->secure_display_ctx.phy_id_mapping[idx].lct = aconnector->mst_output_port->parent->lct;
+ dm->secure_display_ctx.phy_id_mapping[idx].port_num = aconnector->mst_output_port->port_num;
+ memcpy(dm->secure_display_ctx.phy_id_mapping[idx].rad,
+ aconnector->mst_output_port->parent->rad, sizeof(aconnector->mst_output_port->parent->rad));
+ }
+ }
+ mutex_unlock(&ddev->mode_config.mutex);
+
+ dm->secure_display_ctx.phy_id_mapping_cnt = connector_cnt;
+ dm->secure_display_ctx.phy_mapping_updated = true;
+}
+
+static bool get_phy_id(struct amdgpu_display_manager *dm,
+ struct amdgpu_dm_connector *aconnector, uint8_t *phy_id)
+{
+ int idx, idx_2;
+ bool found = false;
+
+ /*
+ * Assume secure display start after all connectors are probed. The connection
+ * config is static as well
+ */
+ if (!dm->secure_display_ctx.phy_mapping_updated) {
+ DRM_WARN("%s Should update the phy id table before get it's value", __func__);
+ return false;
+ }
+
+ for (idx = 0; idx < dm->secure_display_ctx.phy_id_mapping_cnt; idx++) {
+ if (!dm->secure_display_ctx.phy_id_mapping[idx].assigned) {
+ DRM_ERROR("phy_id_mapping[%d] should be assigned", idx);
+ return false;
+ }
+
+ if (aconnector->dc_link->link_enc_hw_inst ==
+ dm->secure_display_ctx.phy_id_mapping[idx].enc_hw_inst) {
+ if (!dm->secure_display_ctx.phy_id_mapping[idx].is_mst) {
+ found = true;
+ goto out;
+ } else {
+ /* Could caused by wrongly pass mst root connector */
+ if (!aconnector->mst_output_port) {
+ DRM_ERROR("%s Check mst case but connector without a port assigned", __func__);
+ return false;
+ }
+
+ if (aconnector->mst_root &&
+ aconnector->mst_root->mst_mgr.mst_primary == NULL) {
+ DRM_WARN("%s pass in a stale mst connector", __func__);
+ }
+
+ if (aconnector->mst_output_port->parent->lct == dm->secure_display_ctx.phy_id_mapping[idx].lct &&
+ aconnector->mst_output_port->port_num == dm->secure_display_ctx.phy_id_mapping[idx].port_num) {
+ if (aconnector->mst_output_port->parent->lct == 1) {
+ found = true;
+ goto out;
+ } else if (aconnector->mst_output_port->parent->lct > 1) {
+ /* Check RAD */
+ for (idx_2 = 0; idx_2 < aconnector->mst_output_port->parent->lct - 1; idx_2++) {
+ int shift = (idx_2 % 2) ? 0 : 4;
+ int port_num = (aconnector->mst_output_port->parent->rad[idx_2 / 2] >> shift) & 0xf;
+ int port_num2 = (dm->secure_display_ctx.phy_id_mapping[idx].rad[idx_2 / 2] >> shift) & 0xf;
+
+ if (port_num != port_num2)
+ break;
+ }
+
+ if (idx_2 == aconnector->mst_output_port->parent->lct - 1) {
+ found = true;
+ goto out;
+ }
+ } else {
+ DRM_ERROR("lCT should be >= 1");
+ return false;
+ }
+ }
+ }
+ }
+ }
+
+out:
+ if (found) {
+ DRM_DEBUG_DRIVER("Associated secure display PHY ID as %d", idx);
+ *phy_id = idx;
+ } else {
+ DRM_WARN("Can't find associated phy ID");
+ return false;
+ }
+
+ return true;
+}
+
static void amdgpu_dm_set_crc_window_default(struct drm_crtc *crtc, struct dc_stream_state *stream)
{
struct drm_device *drm_dev = crtc->dev;
struct amdgpu_display_manager *dm = &drm_to_adev(drm_dev)->dm;
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
+ struct amdgpu_dm_connector *aconnector;
bool was_activated;
+ uint8_t phy_id;
+ unsigned long flags;
+ int i;
- spin_lock_irq(&drm_dev->event_lock);
- was_activated = acrtc->dm_irq_params.window_param.activated;
- acrtc->dm_irq_params.window_param.x_start = 0;
- acrtc->dm_irq_params.window_param.y_start = 0;
- acrtc->dm_irq_params.window_param.x_end = 0;
- acrtc->dm_irq_params.window_param.y_end = 0;
- acrtc->dm_irq_params.window_param.activated = false;
- acrtc->dm_irq_params.window_param.update_win = false;
- acrtc->dm_irq_params.window_param.skip_frame_cnt = 0;
- spin_unlock_irq(&drm_dev->event_lock);
+ spin_lock_irqsave(&drm_dev->event_lock, flags);
+ was_activated = acrtc->dm_irq_params.crc_window_activated;
+ for (i = 0; i < MAX_CRC_WINDOW_NUM; i++) {
+ acrtc->dm_irq_params.window_param[i].x_start = 0;
+ acrtc->dm_irq_params.window_param[i].y_start = 0;
+ acrtc->dm_irq_params.window_param[i].x_end = 0;
+ acrtc->dm_irq_params.window_param[i].y_end = 0;
+ acrtc->dm_irq_params.window_param[i].enable = false;
+ acrtc->dm_irq_params.window_param[i].update_win = false;
+ acrtc->dm_irq_params.window_param[i].skip_frame_cnt = 0;
+ }
+ acrtc->dm_irq_params.crc_window_activated = false;
+ spin_unlock_irqrestore(&drm_dev->event_lock, flags);
/* Disable secure_display if it was enabled */
- if (was_activated) {
+ if (was_activated && dm->secure_display_ctx.op_mode == LEGACY_MODE) {
/* stop ROI update on this crtc */
- flush_work(&dm->secure_display_ctxs[crtc->index].notify_ta_work);
- flush_work(&dm->secure_display_ctxs[crtc->index].forward_roi_work);
- dc_stream_forward_crc_window(stream, NULL, true);
+ flush_work(&dm->secure_display_ctx.crtc_ctx[crtc->index].notify_ta_work);
+ flush_work(&dm->secure_display_ctx.crtc_ctx[crtc->index].forward_roi_work);
+ aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
+
+ if (aconnector && get_phy_id(dm, aconnector, &phy_id)) {
+ if (dm->secure_display_ctx.support_mul_roi)
+ dc_stream_forward_multiple_crc_window(stream, NULL, phy_id, true);
+ else
+ dc_stream_forward_crc_window(stream, NULL, phy_id, true);
+ } else {
+ DRM_DEBUG_DRIVER("%s Can't find matching phy id", __func__);
+ }
}
}
static void amdgpu_dm_crtc_notify_ta_to_read(struct work_struct *work)
{
- struct secure_display_context *secure_display_ctx;
+ struct secure_display_crtc_context *crtc_ctx;
struct psp_context *psp;
struct ta_securedisplay_cmd *securedisplay_cmd;
struct drm_crtc *crtc;
struct dc_stream_state *stream;
+ struct amdgpu_dm_connector *aconnector;
uint8_t phy_inst;
+ struct amdgpu_display_manager *dm;
+ struct crc_data crc_cpy[MAX_CRC_WINDOW_NUM];
+ unsigned long flags;
+ uint8_t roi_idx = 0;
int ret;
+ int i;
- secure_display_ctx = container_of(work, struct secure_display_context, notify_ta_work);
- crtc = secure_display_ctx->crtc;
+ crtc_ctx = container_of(work, struct secure_display_crtc_context, notify_ta_work);
+ crtc = crtc_ctx->crtc;
if (!crtc)
return;
@@ -133,21 +364,50 @@ static void amdgpu_dm_crtc_notify_ta_to_read(struct work_struct *work)
return;
}
+ dm = &drm_to_adev(crtc->dev)->dm;
stream = to_amdgpu_crtc(crtc)->dm_irq_params.stream;
- phy_inst = stream->link->link_enc_hw_inst;
-
- /* need lock for multiple crtcs to use the command buffer */
- mutex_lock(&psp->securedisplay_context.mutex);
+ aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
+ if (!aconnector)
+ return;
- psp_prep_securedisplay_cmd_buf(psp, &securedisplay_cmd,
- TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC);
+ mutex_lock(&crtc->dev->mode_config.mutex);
+ if (!get_phy_id(dm, aconnector, &phy_inst)) {
+ DRM_WARN("%s Can't find mapping phy id!", __func__);
+ mutex_unlock(&crtc->dev->mode_config.mutex);
+ return;
+ }
+ mutex_unlock(&crtc->dev->mode_config.mutex);
- securedisplay_cmd->securedisplay_in_message.send_roi_crc.phy_id = phy_inst;
+ spin_lock_irqsave(&crtc->dev->event_lock, flags);
+ memcpy(crc_cpy, crtc_ctx->crc_info.crc, sizeof(struct crc_data) * MAX_CRC_WINDOW_NUM);
+ spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+ /* need lock for multiple crtcs to use the command buffer */
+ mutex_lock(&psp->securedisplay_context.mutex);
/* PSP TA is expected to finish data transmission over I2C within current frame,
* even there are up to 4 crtcs request to send in this frame.
*/
- ret = psp_securedisplay_invoke(psp, TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC);
+ if (dm->secure_display_ctx.support_mul_roi) {
+ psp_prep_securedisplay_cmd_buf(psp, &securedisplay_cmd,
+ TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC_V2);
+
+ securedisplay_cmd->securedisplay_in_message.send_roi_crc_v2.phy_id = phy_inst;
+
+ for (i = 0; i < MAX_CRC_WINDOW_NUM; i++) {
+ if (crc_cpy[i].crc_ready)
+ roi_idx |= 1 << i;
+ }
+ securedisplay_cmd->securedisplay_in_message.send_roi_crc_v2.roi_idx = roi_idx;
+
+ ret = psp_securedisplay_invoke(psp, TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC_V2);
+ } else {
+ psp_prep_securedisplay_cmd_buf(psp, &securedisplay_cmd,
+ TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC);
+
+ securedisplay_cmd->securedisplay_in_message.send_roi_crc.phy_id = phy_inst;
+
+ ret = psp_securedisplay_invoke(psp, TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC);
+ }
if (!ret) {
if (securedisplay_cmd->status != TA_SECUREDISPLAY_STATUS__SUCCESS)
@@ -160,22 +420,47 @@ static void amdgpu_dm_crtc_notify_ta_to_read(struct work_struct *work)
static void
amdgpu_dm_forward_crc_window(struct work_struct *work)
{
- struct secure_display_context *secure_display_ctx;
+ struct secure_display_crtc_context *crtc_ctx;
struct amdgpu_display_manager *dm;
struct drm_crtc *crtc;
struct dc_stream_state *stream;
+ struct amdgpu_dm_connector *aconnector;
+ struct crc_window roi_cpy[MAX_CRC_WINDOW_NUM];
+ unsigned long flags;
+ uint8_t phy_id;
- secure_display_ctx = container_of(work, struct secure_display_context, forward_roi_work);
- crtc = secure_display_ctx->crtc;
+ crtc_ctx = container_of(work, struct secure_display_crtc_context, forward_roi_work);
+ crtc = crtc_ctx->crtc;
if (!crtc)
return;
dm = &drm_to_adev(crtc->dev)->dm;
stream = to_amdgpu_crtc(crtc)->dm_irq_params.stream;
+ aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
+
+ if (!aconnector)
+ return;
+
+ mutex_lock(&crtc->dev->mode_config.mutex);
+ if (!get_phy_id(dm, aconnector, &phy_id)) {
+ DRM_WARN("%s Can't find mapping phy id!", __func__);
+ mutex_unlock(&crtc->dev->mode_config.mutex);
+ return;
+ }
+ mutex_unlock(&crtc->dev->mode_config.mutex);
+
+ spin_lock_irqsave(&crtc->dev->event_lock, flags);
+ memcpy(roi_cpy, crtc_ctx->roi, sizeof(struct crc_window) * MAX_CRC_WINDOW_NUM);
+ spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
mutex_lock(&dm->dc_lock);
- dc_stream_forward_crc_window(stream, &secure_display_ctx->rect, false);
+ if (dm->secure_display_ctx.support_mul_roi)
+ dc_stream_forward_multiple_crc_window(stream, roi_cpy,
+ phy_id, false);
+ else
+ dc_stream_forward_crc_window(stream, &roi_cpy[0].rect,
+ phy_id, false);
mutex_unlock(&dm->dc_lock);
}
@@ -186,7 +471,7 @@ bool amdgpu_dm_crc_window_is_activated(struct drm_crtc *crtc)
bool ret = false;
spin_lock_irq(&drm_dev->event_lock);
- ret = acrtc->dm_irq_params.window_param.activated;
+ ret = acrtc->dm_irq_params.crc_window_activated;
spin_unlock_irq(&drm_dev->event_lock);
return ret;
@@ -224,10 +509,14 @@ int amdgpu_dm_crtc_configure_crc_source(struct drm_crtc *crtc,
mutex_lock(&adev->dm.dc_lock);
+ /* For PSR1, check that the panel has exited PSR */
+ if (stream_state->link->psr_settings.psr_version < DC_PSR_VERSION_SU_1)
+ amdgpu_dm_psr_wait_disable(stream_state);
+
/* Enable or disable CRTC CRC generation */
if (dm_is_crc_source_crtc(source) || source == AMDGPU_DM_PIPE_CRC_SOURCE_NONE) {
if (!dc_stream_configure_crc(stream_state->ctx->dc,
- stream_state, NULL, enable, enable)) {
+ stream_state, NULL, enable, enable, 0, true)) {
ret = -EINVAL;
goto unlock;
}
@@ -258,6 +547,10 @@ int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name)
struct drm_crtc_commit *commit;
struct dm_crtc_state *crtc_state;
struct drm_device *drm_dev = crtc->dev;
+#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
+ struct amdgpu_device *adev = drm_to_adev(drm_dev);
+ struct amdgpu_display_manager *dm = &adev->dm;
+#endif
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
struct drm_dp_aux *aux = NULL;
bool enable = false;
@@ -357,6 +650,17 @@ int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name)
}
+ /*
+ * Reading the CRC requires the vblank interrupt handler to be
+ * enabled. Keep a reference until CRC capture stops.
+ */
+ enabled = amdgpu_dm_is_valid_crc_source(cur_crc_src);
+ if (!enabled && enable) {
+ ret = drm_crtc_vblank_get(crtc);
+ if (ret)
+ goto cleanup;
+ }
+
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
/* Reset secure_display when we change crc source from debugfs */
amdgpu_dm_set_crc_window_default(crtc, crtc_state->stream);
@@ -367,16 +671,7 @@ int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name)
goto cleanup;
}
- /*
- * Reading the CRC requires the vblank interrupt handler to be
- * enabled. Keep a reference until CRC capture stops.
- */
- enabled = amdgpu_dm_is_valid_crc_source(cur_crc_src);
if (!enabled && enable) {
- ret = drm_crtc_vblank_get(crtc);
- if (ret)
- goto cleanup;
-
if (dm_is_crc_source_dprx(source)) {
if (drm_dp_start_crc(aux, crtc)) {
DRM_DEBUG_DRIVER("dp start crc failed\n");
@@ -402,6 +697,13 @@ int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name)
/* Reset crc_skipped on dm state */
crtc_state->crc_skip_count = 0;
+#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
+ /* Initialize phy id mapping table for secure display*/
+ if (dm->secure_display_ctx.op_mode == LEGACY_MODE &&
+ !dm->secure_display_ctx.phy_mapping_updated)
+ update_phy_id_mapping(adev);
+#endif
+
cleanup:
if (commit)
drm_crtc_commit_put(commit);
@@ -456,7 +758,7 @@ void amdgpu_dm_crtc_handle_crc_irq(struct drm_crtc *crtc)
}
if (dm_is_crc_source_crtc(cur_crc_src)) {
- if (!dc_stream_get_crc(stream_state->ctx->dc, stream_state,
+ if (!dc_stream_get_crc(stream_state->ctx->dc, stream_state, 0,
&crcs[0], &crcs[1], &crcs[2]))
return;
@@ -472,8 +774,17 @@ void amdgpu_dm_crtc_handle_crc_window_irq(struct drm_crtc *crtc)
enum amdgpu_dm_pipe_crc_source cur_crc_src;
struct amdgpu_crtc *acrtc = NULL;
struct amdgpu_device *adev = NULL;
- struct secure_display_context *secure_display_ctx = NULL;
+ struct secure_display_crtc_context *crtc_ctx = NULL;
+ bool reset_crc_frame_count[MAX_CRC_WINDOW_NUM] = {false};
+ uint32_t crc_r[MAX_CRC_WINDOW_NUM] = {0};
+ uint32_t crc_g[MAX_CRC_WINDOW_NUM] = {0};
+ uint32_t crc_b[MAX_CRC_WINDOW_NUM] = {0};
unsigned long flags1;
+ bool forward_roi_change = false;
+ bool notify_ta = false;
+ bool all_crc_ready = true;
+ struct dc_stream_state *stream_state;
+ int i;
if (crtc == NULL)
return;
@@ -481,78 +792,160 @@ void amdgpu_dm_crtc_handle_crc_window_irq(struct drm_crtc *crtc)
acrtc = to_amdgpu_crtc(crtc);
adev = drm_to_adev(crtc->dev);
drm_dev = crtc->dev;
+ stream_state = to_dm_crtc_state(crtc->state)->stream;
spin_lock_irqsave(&drm_dev->event_lock, flags1);
cur_crc_src = acrtc->dm_irq_params.crc_src;
/* Early return if CRC capture is not enabled. */
if (!amdgpu_dm_is_valid_crc_source(cur_crc_src) ||
- !dm_is_crc_source_crtc(cur_crc_src))
- goto cleanup;
-
- if (!acrtc->dm_irq_params.window_param.activated)
- goto cleanup;
+ !dm_is_crc_source_crtc(cur_crc_src)) {
+ spin_unlock_irqrestore(&drm_dev->event_lock, flags1);
+ return;
+ }
- if (acrtc->dm_irq_params.window_param.skip_frame_cnt) {
- acrtc->dm_irq_params.window_param.skip_frame_cnt -= 1;
- goto cleanup;
+ if (!acrtc->dm_irq_params.crc_window_activated) {
+ spin_unlock_irqrestore(&drm_dev->event_lock, flags1);
+ return;
}
- secure_display_ctx = &adev->dm.secure_display_ctxs[acrtc->crtc_id];
- if (WARN_ON(secure_display_ctx->crtc != crtc)) {
- /* We have set the crtc when creating secure_display_context,
+ crtc_ctx = &adev->dm.secure_display_ctx.crtc_ctx[acrtc->crtc_id];
+ if (WARN_ON(crtc_ctx->crtc != crtc)) {
+ /* We have set the crtc when creating secure_display_crtc_context,
* don't expect it to be changed here.
*/
- secure_display_ctx->crtc = crtc;
+ crtc_ctx->crtc = crtc;
}
- if (acrtc->dm_irq_params.window_param.update_win) {
- /* prepare work for dmub to update ROI */
- secure_display_ctx->rect.x = acrtc->dm_irq_params.window_param.x_start;
- secure_display_ctx->rect.y = acrtc->dm_irq_params.window_param.y_start;
- secure_display_ctx->rect.width = acrtc->dm_irq_params.window_param.x_end -
- acrtc->dm_irq_params.window_param.x_start;
- secure_display_ctx->rect.height = acrtc->dm_irq_params.window_param.y_end -
- acrtc->dm_irq_params.window_param.y_start;
- schedule_work(&secure_display_ctx->forward_roi_work);
-
- acrtc->dm_irq_params.window_param.update_win = false;
+ for (i = 0; i < MAX_CRC_WINDOW_NUM; i++) {
+ struct crc_params crc_window = {
+ .windowa_x_start = acrtc->dm_irq_params.window_param[i].x_start,
+ .windowa_y_start = acrtc->dm_irq_params.window_param[i].y_start,
+ .windowa_x_end = acrtc->dm_irq_params.window_param[i].x_end,
+ .windowa_y_end = acrtc->dm_irq_params.window_param[i].y_end,
+ .windowb_x_start = acrtc->dm_irq_params.window_param[i].x_start,
+ .windowb_y_start = acrtc->dm_irq_params.window_param[i].y_start,
+ .windowb_x_end = acrtc->dm_irq_params.window_param[i].x_end,
+ .windowb_y_end = acrtc->dm_irq_params.window_param[i].y_end,
+ };
+
+ crtc_ctx->roi[i].enable = acrtc->dm_irq_params.window_param[i].enable;
+
+ if (!acrtc->dm_irq_params.window_param[i].enable) {
+ crtc_ctx->crc_info.crc[i].crc_ready = false;
+ continue;
+ }
- /* Statically skip 1 frame, because we may need to wait below things
- * before sending ROI to dmub:
- * 1. We defer the work by using system workqueue.
- * 2. We may need to wait for dc_lock before accessing dmub.
- */
- acrtc->dm_irq_params.window_param.skip_frame_cnt = 1;
+ if (acrtc->dm_irq_params.window_param[i].skip_frame_cnt) {
+ acrtc->dm_irq_params.window_param[i].skip_frame_cnt -= 1;
+ crtc_ctx->crc_info.crc[i].crc_ready = false;
+ continue;
+ }
- } else {
- /* prepare work for psp to read ROI/CRC and send to I2C */
- schedule_work(&secure_display_ctx->notify_ta_work);
+ if (acrtc->dm_irq_params.window_param[i].update_win) {
+ crtc_ctx->roi[i].rect.x = crc_window.windowa_x_start;
+ crtc_ctx->roi[i].rect.y = crc_window.windowa_y_start;
+ crtc_ctx->roi[i].rect.width = crc_window.windowa_x_end -
+ crc_window.windowa_x_start;
+ crtc_ctx->roi[i].rect.height = crc_window.windowa_y_end -
+ crc_window.windowa_y_start;
+
+ if (adev->dm.secure_display_ctx.op_mode == LEGACY_MODE)
+ /* forward task to dmub to update ROI */
+ forward_roi_change = true;
+ else if (adev->dm.secure_display_ctx.op_mode == DISPLAY_CRC_MODE)
+ /* update ROI via dm*/
+ dc_stream_configure_crc(stream_state->ctx->dc, stream_state,
+ &crc_window, true, true, i, false);
+
+ reset_crc_frame_count[i] = true;
+
+ acrtc->dm_irq_params.window_param[i].update_win = false;
+
+ /* Statically skip 1 frame, because we may need to wait below things
+ * before sending ROI to dmub:
+ * 1. We defer the work by using system workqueue.
+ * 2. We may need to wait for dc_lock before accessing dmub.
+ */
+ acrtc->dm_irq_params.window_param[i].skip_frame_cnt = 1;
+ crtc_ctx->crc_info.crc[i].crc_ready = false;
+ } else {
+ if (!dc_stream_get_crc(stream_state->ctx->dc, stream_state, i,
+ &crc_r[i], &crc_g[i], &crc_b[i]))
+ DRM_ERROR("Secure Display: fail to get crc from engine %d\n", i);
+
+ if (adev->dm.secure_display_ctx.op_mode == LEGACY_MODE)
+ /* forward task to psp to read ROI/CRC and output via I2C */
+ notify_ta = true;
+ else if (adev->dm.secure_display_ctx.op_mode == DISPLAY_CRC_MODE)
+ /* Avoid ROI window get changed, keep overwriting. */
+ dc_stream_configure_crc(stream_state->ctx->dc, stream_state,
+ &crc_window, true, true, i, false);
+
+ /* crc ready for psp to read out */
+ crtc_ctx->crc_info.crc[i].crc_ready = true;
+ }
}
-cleanup:
spin_unlock_irqrestore(&drm_dev->event_lock, flags1);
+
+ if (forward_roi_change)
+ schedule_work(&crtc_ctx->forward_roi_work);
+
+ if (notify_ta)
+ schedule_work(&crtc_ctx->notify_ta_work);
+
+ spin_lock_irqsave(&crtc_ctx->crc_info.lock, flags1);
+ for (i = 0; i < MAX_CRC_WINDOW_NUM; i++) {
+ crtc_ctx->crc_info.crc[i].crc_R = crc_r[i];
+ crtc_ctx->crc_info.crc[i].crc_G = crc_g[i];
+ crtc_ctx->crc_info.crc[i].crc_B = crc_b[i];
+
+ if (!crtc_ctx->roi[i].enable) {
+ crtc_ctx->crc_info.crc[i].frame_count = 0;
+ continue;
+ }
+
+ if (!crtc_ctx->crc_info.crc[i].crc_ready)
+ all_crc_ready = false;
+
+ if (reset_crc_frame_count[i] || crtc_ctx->crc_info.crc[i].frame_count == UINT_MAX)
+ /* Reset the reference frame count after user update the ROI
+ * or it reaches the maximum value.
+ */
+ crtc_ctx->crc_info.crc[i].frame_count = 0;
+ else
+ crtc_ctx->crc_info.crc[i].frame_count += 1;
+ }
+ spin_unlock_irqrestore(&crtc_ctx->crc_info.lock, flags1);
+
+ if (all_crc_ready)
+ complete_all(&crtc_ctx->crc_info.completion);
}
-struct secure_display_context *
-amdgpu_dm_crtc_secure_display_create_contexts(struct amdgpu_device *adev)
+void amdgpu_dm_crtc_secure_display_create_contexts(struct amdgpu_device *adev)
{
- struct secure_display_context *secure_display_ctxs = NULL;
+ struct secure_display_crtc_context *crtc_ctx = NULL;
int i;
- secure_display_ctxs = kcalloc(adev->mode_info.num_crtc,
- sizeof(struct secure_display_context),
+ crtc_ctx = kcalloc(adev->mode_info.num_crtc,
+ sizeof(struct secure_display_crtc_context),
GFP_KERNEL);
- if (!secure_display_ctxs)
- return NULL;
+ if (!crtc_ctx) {
+ adev->dm.secure_display_ctx.crtc_ctx = NULL;
+ return;
+ }
for (i = 0; i < adev->mode_info.num_crtc; i++) {
- INIT_WORK(&secure_display_ctxs[i].forward_roi_work, amdgpu_dm_forward_crc_window);
- INIT_WORK(&secure_display_ctxs[i].notify_ta_work, amdgpu_dm_crtc_notify_ta_to_read);
- secure_display_ctxs[i].crtc = &adev->mode_info.crtcs[i]->base;
+ INIT_WORK(&crtc_ctx[i].forward_roi_work, amdgpu_dm_forward_crc_window);
+ INIT_WORK(&crtc_ctx[i].notify_ta_work, amdgpu_dm_crtc_notify_ta_to_read);
+ crtc_ctx[i].crtc = &adev->mode_info.crtcs[i]->base;
+ spin_lock_init(&crtc_ctx[i].crc_info.lock);
}
- return secure_display_ctxs;
+ adev->dm.secure_display_ctx.crtc_ctx = crtc_ctx;
+
+ adev->dm.secure_display_ctx.op_mode = DISPLAY_CRC_MODE;
}
#endif
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h
index 748e80ef40d0..95bdb8699d7f 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: MIT */
/*
* Copyright 2019 Advanced Micro Devices, Inc.
*
@@ -40,20 +41,53 @@ enum amdgpu_dm_pipe_crc_source {
};
#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
+#define MAX_CRTC 6
+
+enum secure_display_mode {
+ /* via dmub + psp */
+ LEGACY_MODE = 0,
+ /* driver directly */
+ DISPLAY_CRC_MODE,
+ SECURE_DISPLAY_MODE_MAX,
+};
+
+struct phy_id_mapping {
+ bool assigned;
+ bool is_mst;
+ uint8_t enc_hw_inst;
+ u8 lct;
+ u8 port_num;
+ u8 rad[8];
+};
+
+struct crc_data {
+ uint32_t crc_R;
+ uint32_t crc_G;
+ uint32_t crc_B;
+ uint32_t frame_count;
+ bool crc_ready;
+};
+
+struct crc_info {
+ struct crc_data crc[MAX_CRC_WINDOW_NUM];
+ struct completion completion;
+ spinlock_t lock;
+};
+
struct crc_window_param {
uint16_t x_start;
uint16_t y_start;
uint16_t x_end;
uint16_t y_end;
/* CRC window is activated or not*/
- bool activated;
+ bool enable;
/* Update crc window during vertical blank or not */
bool update_win;
/* skip reading/writing for few frames */
int skip_frame_cnt;
};
-struct secure_display_context {
+struct secure_display_crtc_context {
/* work to notify PSP TA*/
struct work_struct notify_ta_work;
@@ -63,7 +97,20 @@ struct secure_display_context {
struct drm_crtc *crtc;
/* Region of Interest (ROI) */
- struct rect rect;
+ struct crc_window roi[MAX_CRC_WINDOW_NUM];
+
+ struct crc_info crc_info;
+};
+
+struct secure_display_context {
+
+ struct secure_display_crtc_context *crtc_ctx;
+ /* Whether dmub support multiple ROI setting */
+ bool support_mul_roi;
+ enum secure_display_mode op_mode;
+ bool phy_mapping_updated;
+ int phy_id_mapping_cnt;
+ struct phy_id_mapping phy_id_mapping[MAX_CRTC];
};
#endif
@@ -95,8 +142,7 @@ void amdgpu_dm_crtc_handle_crc_irq(struct drm_crtc *crtc);
#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
bool amdgpu_dm_crc_window_is_activated(struct drm_crtc *crtc);
void amdgpu_dm_crtc_handle_crc_window_irq(struct drm_crtc *crtc);
-struct secure_display_context *amdgpu_dm_crtc_secure_display_create_contexts(
- struct amdgpu_device *adev);
+void amdgpu_dm_crtc_secure_display_create_contexts(struct amdgpu_device *adev);
#else
#define amdgpu_dm_crc_window_is_activated(x)
#define amdgpu_dm_crtc_handle_crc_window_irq(x)
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
index 64a041c2af05..697e232acebf 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
@@ -93,7 +93,7 @@ int amdgpu_dm_crtc_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
return rc;
}
-bool amdgpu_dm_crtc_vrr_active(struct dm_crtc_state *dm_state)
+bool amdgpu_dm_crtc_vrr_active(const struct dm_crtc_state *dm_state)
{
return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
@@ -113,6 +113,7 @@ bool amdgpu_dm_crtc_vrr_active(struct dm_crtc_state *dm_state)
*
* Panel Replay and PSR SU
* - Enable when:
+ * - VRR is disabled
* - vblank counter is disabled
* - entry is allowed: usermode demonstrates an adequate number of fast
* commits)
@@ -131,19 +132,20 @@ static void amdgpu_dm_crtc_set_panel_sr_feature(
bool is_sr_active = (link->replay_settings.replay_allow_active ||
link->psr_settings.psr_allow_active);
bool is_crc_window_active = false;
+ bool vrr_active = amdgpu_dm_crtc_vrr_active_irq(vblank_work->acrtc);
#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
is_crc_window_active =
amdgpu_dm_crc_window_is_activated(&vblank_work->acrtc->base);
#endif
- if (link->replay_settings.replay_feature_enabled &&
+ if (link->replay_settings.replay_feature_enabled && !vrr_active &&
allow_sr_entry && !is_sr_active && !is_crc_window_active) {
amdgpu_dm_replay_enable(vblank_work->stream, true);
} else if (vblank_enabled) {
if (link->psr_settings.psr_version < DC_PSR_VERSION_SU_1 && is_sr_active)
- amdgpu_dm_psr_disable(vblank_work->stream);
- } else if (link->psr_settings.psr_feature_enabled &&
+ amdgpu_dm_psr_disable(vblank_work->stream, false);
+ } else if (link->psr_settings.psr_feature_enabled && !vrr_active &&
allow_sr_entry && !is_sr_active && !is_crc_window_active) {
struct amdgpu_dm_connector *aconn =
@@ -216,8 +218,10 @@ static void amdgpu_dm_idle_worker(struct work_struct *work)
break;
}
- if (idle_work->enable)
+ if (idle_work->enable) {
+ dc_post_update_surfaces_to_stream(idle_work->dm->dc);
dc_allow_idle_optimizations(idle_work->dm->dc, true);
+ }
mutex_unlock(&idle_work->dm->dc_lock);
}
idle_work->dm->idle_workqueue->running = false;
@@ -244,6 +248,8 @@ static void amdgpu_dm_crtc_vblank_control_worker(struct work_struct *work)
struct vblank_control_work *vblank_work =
container_of(work, struct vblank_control_work, work);
struct amdgpu_display_manager *dm = vblank_work->dm;
+ struct amdgpu_device *adev = drm_to_adev(dm->ddev);
+ int r;
mutex_lock(&dm->dc_lock);
@@ -271,9 +277,20 @@ static void amdgpu_dm_crtc_vblank_control_worker(struct work_struct *work)
vblank_work->acrtc->dm_irq_params.allow_sr_entry);
}
- if (dm->active_vblank_irq_count == 0)
+ if (dm->active_vblank_irq_count == 0) {
+ dc_post_update_surfaces_to_stream(dm->dc);
+
+ r = amdgpu_dpm_pause_power_profile(adev, true);
+ if (r)
+ dev_warn(adev->dev, "failed to set default power profile mode\n");
+
dc_allow_idle_optimizations(dm->dc, true);
+ r = amdgpu_dpm_pause_power_profile(adev, false);
+ if (r)
+ dev_warn(adev->dev, "failed to restore the power profile mode\n");
+ }
+
mutex_unlock(&dm->dc_lock);
dc_stream_release(vblank_work->stream);
@@ -291,18 +308,45 @@ static inline int amdgpu_dm_crtc_set_vblank(struct drm_crtc *crtc, bool enable)
int irq_type;
int rc = 0;
- if (acrtc->otg_inst == -1)
- goto skip;
+ if (enable && !acrtc->base.enabled) {
+ drm_dbg_vbl(crtc->dev,
+ "Reject vblank enable on unconfigured CRTC %d (enabled=%d)\n",
+ acrtc->crtc_id, acrtc->base.enabled);
+ return -EINVAL;
+ }
irq_type = amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
if (enable) {
- /* vblank irq on -> Only need vupdate irq in vrr mode */
- if (amdgpu_dm_crtc_vrr_active(acrtc_state))
- rc = amdgpu_dm_crtc_set_vupdate_irq(crtc, true);
- } else {
- /* vblank irq off -> vupdate irq off */
- rc = amdgpu_dm_crtc_set_vupdate_irq(crtc, false);
+ struct dc *dc = adev->dm.dc;
+ struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(crtc);
+ struct psr_settings *psr = &acrtc_state->stream->link->psr_settings;
+ struct replay_settings *pr = &acrtc_state->stream->link->replay_settings;
+ bool sr_supported = (psr->psr_version != DC_PSR_VERSION_UNSUPPORTED) ||
+ pr->config.replay_supported;
+
+ /*
+ * IPS & self-refresh feature can cause vblank counter resets between
+ * vblank disable and enable.
+ * It may cause system stuck due to waiting for the vblank counter.
+ * Call this function to estimate missed vblanks by using timestamps and
+ * update the vblank counter in DRM.
+ */
+ if (dc->caps.ips_support &&
+ dc->config.disable_ips != DMUB_IPS_DISABLE_ALL &&
+ sr_supported && vblank->config.disable_immediate)
+ drm_crtc_vblank_restore(crtc);
+ }
+
+ if (dc_supports_vrr(dm->dc->ctx->dce_version)) {
+ if (enable) {
+ /* vblank irq on -> Only need vupdate irq in vrr mode */
+ if (amdgpu_dm_crtc_vrr_active(acrtc_state))
+ rc = amdgpu_dm_crtc_set_vupdate_irq(crtc, true);
+ } else {
+ /* vblank irq off -> vupdate irq off */
+ rc = amdgpu_dm_crtc_set_vupdate_irq(crtc, false);
+ }
}
if (rc)
@@ -354,7 +398,7 @@ static inline int amdgpu_dm_crtc_set_vblank(struct drm_crtc *crtc, bool enable)
return rc;
}
#endif
-skip:
+
if (amdgpu_in_reset(adev))
return 0;
@@ -659,6 +703,15 @@ static int amdgpu_dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
return -EINVAL;
}
+ if (!state->legacy_cursor_update && amdgpu_dm_crtc_vrr_active(dm_crtc_state)) {
+ struct drm_plane_state *primary_state;
+
+ /* Pull in primary plane for correct VRR handling */
+ primary_state = drm_atomic_get_plane_state(state, crtc->primary);
+ if (IS_ERR(primary_state))
+ return PTR_ERR(primary_state);
+ }
+
/* In some use cases, like reset, no stream is attached */
if (!dm_crtc_state->stream)
return 0;
@@ -683,7 +736,7 @@ int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
{
struct amdgpu_crtc *acrtc = NULL;
struct drm_plane *cursor_plane;
- bool is_dcn;
+ bool has_degamma;
int res = -ENOMEM;
cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
@@ -722,11 +775,18 @@ int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
dm->adev->mode_info.crtcs[crtc_index] = acrtc;
- /* Don't enable DRM CRTC degamma property for DCE since it doesn't
- * support programmable degamma anywhere.
+ /* Don't enable DRM CRTC degamma property for
+ * 1. Degamma is replaced by color pipeline.
+ * 2. DCE since it doesn't support programmable degamma anywhere.
+ * 3. DCN401 since pre-blending degamma LUT doesn't apply to cursor.
*/
- is_dcn = dm->adev->dm.dc->caps.color.dpp.dcn_arch;
- drm_crtc_enable_color_mgmt(&acrtc->base, is_dcn ? MAX_COLOR_LUT_ENTRIES : 0,
+ if (plane->color_pipeline_property)
+ has_degamma = false;
+ else
+ has_degamma = dm->adev->dm.dc->caps.color.dpp.dcn_arch &&
+ dm->adev->dm.dc->ctx->dce_version != DCN_VERSION_4_01;
+
+ drm_crtc_enable_color_mgmt(&acrtc->base, has_degamma ? MAX_COLOR_LUT_ENTRIES : 0,
true, MAX_COLOR_LUT_ENTRIES);
drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.h
index 17e948753f59..c1212947a77b 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.h
@@ -37,7 +37,7 @@ int amdgpu_dm_crtc_set_vupdate_irq(struct drm_crtc *crtc, bool enable);
bool amdgpu_dm_crtc_vrr_active_irq(struct amdgpu_crtc *acrtc);
-bool amdgpu_dm_crtc_vrr_active(struct dm_crtc_state *dm_state);
+bool amdgpu_dm_crtc_vrr_active(const struct dm_crtc_state *dm_state);
int amdgpu_dm_crtc_enable_vblank(struct drm_crtc *crtc);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
index 6a97bb2d9160..a9839485f2a2 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: MIT
/*
* Copyright 2018 Advanced Micro Devices, Inc.
*
@@ -25,6 +26,7 @@
#include <linux/string_helpers.h>
#include <linux/uaccess.h>
+#include <media/cec-notifier.h>
#include "dc.h"
#include "amdgpu.h"
@@ -258,7 +260,7 @@ static ssize_t dp_link_settings_write(struct file *f, const char __user *buf,
struct dc_link *link = connector->dc_link;
struct amdgpu_device *adev = drm_to_adev(connector->base.dev);
struct dc *dc = (struct dc *)link->dc;
- struct dc_link_settings prefer_link_settings;
+ struct dc_link_settings prefer_link_settings = {0};
char *wr_buf = NULL;
const uint32_t wr_buf_size = 40;
/* 0: lane_count; 1: link_rate */
@@ -389,7 +391,7 @@ static ssize_t dp_mst_link_setting(struct file *f, const char __user *buf,
struct dc_link *link = aconnector->dc_link;
struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
struct dc *dc = (struct dc *)link->dc;
- struct dc_link_settings prefer_link_settings;
+ struct dc_link_settings prefer_link_settings = {0};
char *wr_buf = NULL;
const uint32_t wr_buf_size = 40;
/* 0: lane_count; 1: link_rate */
@@ -613,7 +615,7 @@ static ssize_t dp_phy_settings_write(struct file *f, const char __user *buf,
uint32_t wr_buf_size = 40;
long param[3];
bool use_prefer_link_setting;
- struct link_training_settings link_lane_settings;
+ struct link_training_settings link_lane_settings = {0};
int max_param_num = 3;
uint8_t param_nums = 0;
int r = 0;
@@ -757,6 +759,7 @@ static ssize_t dp_phy_test_pattern_debugfs_write(struct file *f, const char __us
int max_param_num = 11;
enum dp_test_pattern test_pattern = DP_TEST_PATTERN_UNSUPPORTED;
bool disable_hpd = false;
+ bool supports_hpd = link->irq_source_hpd != DC_IRQ_SOURCE_INVALID;
bool valid_test_pattern = false;
uint8_t param_nums = 0;
/* init with default 80bit custom pattern */
@@ -768,7 +771,7 @@ static ssize_t dp_phy_test_pattern_debugfs_write(struct file *f, const char __us
LINK_RATE_UNKNOWN, LINK_SPREAD_DISABLED};
struct dc_link_settings cur_link_settings = {LANE_COUNT_UNKNOWN,
LINK_RATE_UNKNOWN, LINK_SPREAD_DISABLED};
- struct link_training_settings link_training_settings;
+ struct link_training_settings link_training_settings = {0};
int i;
if (size == 0)
@@ -848,7 +851,7 @@ static ssize_t dp_phy_test_pattern_debugfs_write(struct file *f, const char __us
* because it might have been disabled after a test pattern was set.
* AUX depends on HPD * sequence dependent, do not move!
*/
- if (!disable_hpd)
+ if (supports_hpd && !disable_hpd)
dc_link_enable_hpd(link);
prefer_link_settings.lane_count = link->verified_link_cap.lane_count;
@@ -886,7 +889,7 @@ static ssize_t dp_phy_test_pattern_debugfs_write(struct file *f, const char __us
* Need disable interrupt to avoid SW driver disable DP output. This is
* done after the test pattern is set.
*/
- if (valid_test_pattern && disable_hpd)
+ if (valid_test_pattern && supports_hpd && disable_hpd)
dc_link_disable_hpd(link);
kfree(wr_buf);
@@ -902,9 +905,10 @@ static int dmub_tracebuffer_show(struct seq_file *m, void *data)
{
struct amdgpu_device *adev = m->private;
struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
+ struct dmub_fw_meta_info *fw_meta_info = NULL;
struct dmub_debugfs_trace_entry *entries;
uint8_t *tbuf_base;
- uint32_t tbuf_size, max_entries, num_entries, i;
+ uint32_t tbuf_size, max_entries, num_entries, first_entry, i;
if (!fb_info)
return 0;
@@ -913,20 +917,42 @@ static int dmub_tracebuffer_show(struct seq_file *m, void *data)
if (!tbuf_base)
return 0;
- tbuf_size = fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size;
+ if (adev->dm.dmub_srv)
+ fw_meta_info = &adev->dm.dmub_srv->meta_info;
+
+ tbuf_size = fw_meta_info ? fw_meta_info->trace_buffer_size :
+ DMUB_TRACE_BUFFER_SIZE;
max_entries = (tbuf_size - sizeof(struct dmub_debugfs_trace_header)) /
sizeof(struct dmub_debugfs_trace_entry);
num_entries =
((struct dmub_debugfs_trace_header *)tbuf_base)->entry_count;
+ /* DMCUB tracebuffer is a ring. If it rolled over, print a hint that
+ * entries are being overwritten.
+ */
+ if (num_entries > max_entries)
+ seq_printf(m, "...\n");
+
+ first_entry = num_entries % max_entries;
num_entries = min(num_entries, max_entries);
entries = (struct dmub_debugfs_trace_entry
*)(tbuf_base +
sizeof(struct dmub_debugfs_trace_header));
- for (i = 0; i < num_entries; ++i) {
+ /* To print entries chronologically, start from the first entry till the
+ * top of buffer, then from base of buffer to first entry.
+ */
+ for (i = first_entry; i < num_entries; ++i) {
+ struct dmub_debugfs_trace_entry *entry = &entries[i];
+
+ seq_printf(m,
+ "trace_code=%u tick_count=%u param0=%u param1=%u\n",
+ entry->trace_code, entry->tick_count, entry->param0,
+ entry->param1);
+ }
+ for (i = 0; i < first_entry; ++i) {
struct dmub_debugfs_trace_entry *entry = &entries[i];
seq_printf(m,
@@ -1145,7 +1171,7 @@ static int amdgpu_current_colorspace_show(struct seq_file *m, void *data)
case COLOR_SPACE_2020_RGB_FULLRANGE:
seq_puts(m, "BT2020_RGB");
break;
- case COLOR_SPACE_2020_YCBCR:
+ case COLOR_SPACE_2020_YCBCR_LIMITED:
seq_puts(m, "BT2020_YCC");
break;
default:
@@ -1277,7 +1303,8 @@ static int odm_combine_segments_show(struct seq_file *m, void *unused)
if (connector->status != connector_status_connected)
return -ENODEV;
- if (pipe_ctx != NULL && pipe_ctx->stream_res.tg->funcs->get_odm_combine_segments)
+ if (pipe_ctx && pipe_ctx->stream_res.tg &&
+ pipe_ctx->stream_res.tg->funcs->get_odm_combine_segments)
pipe_ctx->stream_res.tg->funcs->get_odm_combine_segments(pipe_ctx->stream_res.tg, &segments);
seq_printf(m, "%d\n", segments);
@@ -2825,6 +2852,67 @@ static int is_dpia_link_show(struct seq_file *m, void *data)
return 0;
}
+/**
+ * hdmi_cec_state_show - Read out the HDMI-CEC feature status
+ * @m: sequence file.
+ * @data: unused.
+ *
+ * Return 0 on success
+ */
+static int hdmi_cec_state_show(struct seq_file *m, void *data)
+{
+ struct drm_connector *connector = m->private;
+ struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+
+ seq_printf(m, "%s:%d\n", connector->name, connector->base.id);
+ seq_printf(m, "HDMI-CEC status: %d\n", aconnector->notifier ? 1 : 0);
+
+ return 0;
+}
+
+/**
+ * hdmi_cec_state_write - Enable/Disable HDMI-CEC feature from driver side
+ * @f: file structure.
+ * @buf: userspace buffer. set to '1' to enable; '0' to disable cec feature.
+ * @size: size of buffer from userpsace.
+ * @pos: unused.
+ *
+ * Return size on success, error code on failure
+ */
+static ssize_t hdmi_cec_state_write(struct file *f, const char __user *buf,
+ size_t size, loff_t *pos)
+{
+ int ret;
+ bool enable;
+ struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private;
+ struct drm_device *ddev = aconnector->base.dev;
+
+ if (size == 0)
+ return -EINVAL;
+
+ ret = kstrtobool_from_user(buf, size, &enable);
+ if (ret) {
+ drm_dbg_driver(ddev, "invalid user data !\n");
+ return ret;
+ }
+
+ if (enable) {
+ if (aconnector->notifier)
+ return -EINVAL;
+ ret = amdgpu_dm_initialize_hdmi_connector(aconnector);
+ if (ret)
+ return ret;
+ hdmi_cec_set_edid(aconnector);
+ } else {
+ if (!aconnector->notifier)
+ return -EINVAL;
+ cec_notifier_conn_unregister(aconnector->notifier);
+ aconnector->notifier = NULL;
+ }
+
+ return size;
+}
+
DEFINE_SHOW_ATTRIBUTE(dp_dsc_fec_support);
DEFINE_SHOW_ATTRIBUTE(dmub_fw_state);
DEFINE_SHOW_ATTRIBUTE(dmub_tracebuffer);
@@ -2837,6 +2925,7 @@ DEFINE_SHOW_ATTRIBUTE(psr_capability);
DEFINE_SHOW_ATTRIBUTE(dp_is_mst_connector);
DEFINE_SHOW_ATTRIBUTE(dp_mst_progress_status);
DEFINE_SHOW_ATTRIBUTE(is_dpia_link);
+DEFINE_SHOW_STORE_ATTRIBUTE(hdmi_cec_state);
static const struct file_operations dp_dsc_clock_en_debugfs_fops = {
.owner = THIS_MODULE,
@@ -2972,7 +3061,8 @@ static const struct {
char *name;
const struct file_operations *fops;
} hdmi_debugfs_entries[] = {
- {"hdcp_sink_capability", &hdcp_sink_capability_fops}
+ {"hdcp_sink_capability", &hdcp_sink_capability_fops},
+ {"hdmi_cec_state", &hdmi_cec_state_fops}
};
/*
@@ -3019,6 +3109,35 @@ static int replay_get_state(void *data, u64 *val)
}
/*
+ * Start / Stop capture Replay residency
+ */
+static int replay_set_residency(void *data, u64 val)
+{
+ struct amdgpu_dm_connector *connector = data;
+ struct dc_link *link = connector->dc_link;
+ bool is_start = (val != 0);
+ u32 residency = 0;
+
+ link->dc->link_srv->edp_replay_residency(link, &residency, is_start, PR_RESIDENCY_MODE_PHY);
+ return 0;
+}
+
+/*
+ * Read Replay residency
+ */
+static int replay_get_residency(void *data, u64 *val)
+{
+ struct amdgpu_dm_connector *connector = data;
+ struct dc_link *link = connector->dc_link;
+ u32 residency = 0;
+
+ link->dc->link_srv->edp_replay_residency(link, &residency, false, PR_RESIDENCY_MODE_PHY);
+ *val = (u64)residency;
+
+ return 0;
+}
+
+/*
* Read PSR state
*/
static int psr_get(void *data, u64 *val)
@@ -3237,7 +3356,8 @@ DEFINE_DEBUGFS_ATTRIBUTE(dmcub_trace_event_state_fops, dmcub_trace_event_state_g
dmcub_trace_event_state_set, "%llu\n");
DEFINE_DEBUGFS_ATTRIBUTE(replay_state_fops, replay_get_state, NULL, "%llu\n");
-
+DEFINE_DEBUGFS_ATTRIBUTE(replay_residency_fops, replay_get_residency, replay_set_residency,
+ "%llu\n");
DEFINE_DEBUGFS_ATTRIBUTE(psr_fops, psr_get, NULL, "%llu\n");
DEFINE_DEBUGFS_ATTRIBUTE(psr_residency_fops, psr_read_residency, NULL,
"%llu\n");
@@ -3415,6 +3535,8 @@ void connector_debugfs_init(struct amdgpu_dm_connector *connector)
debugfs_create_file("replay_capability", 0444, dir, connector,
&replay_capability_fops);
debugfs_create_file("replay_state", 0444, dir, connector, &replay_state_fops);
+ debugfs_create_file_unsafe("replay_residency", 0444, dir,
+ connector, &replay_residency_fops);
debugfs_create_file_unsafe("psr_capability", 0444, dir, connector, &psr_capability_fops);
debugfs_create_file_unsafe("psr_state", 0444, dir, connector, &psr_fops);
debugfs_create_file_unsafe("psr_residency", 0444, dir,
@@ -3457,8 +3579,8 @@ static int crc_win_x_start_set(void *data, u64 val)
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
spin_lock_irq(&drm_dev->event_lock);
- acrtc->dm_irq_params.window_param.x_start = (uint16_t) val;
- acrtc->dm_irq_params.window_param.update_win = false;
+ acrtc->dm_irq_params.window_param[0].x_start = (uint16_t) val;
+ acrtc->dm_irq_params.window_param[0].update_win = false;
spin_unlock_irq(&drm_dev->event_lock);
return 0;
@@ -3474,7 +3596,7 @@ static int crc_win_x_start_get(void *data, u64 *val)
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
spin_lock_irq(&drm_dev->event_lock);
- *val = acrtc->dm_irq_params.window_param.x_start;
+ *val = acrtc->dm_irq_params.window_param[0].x_start;
spin_unlock_irq(&drm_dev->event_lock);
return 0;
@@ -3494,8 +3616,8 @@ static int crc_win_y_start_set(void *data, u64 val)
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
spin_lock_irq(&drm_dev->event_lock);
- acrtc->dm_irq_params.window_param.y_start = (uint16_t) val;
- acrtc->dm_irq_params.window_param.update_win = false;
+ acrtc->dm_irq_params.window_param[0].y_start = (uint16_t) val;
+ acrtc->dm_irq_params.window_param[0].update_win = false;
spin_unlock_irq(&drm_dev->event_lock);
return 0;
@@ -3511,7 +3633,7 @@ static int crc_win_y_start_get(void *data, u64 *val)
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
spin_lock_irq(&drm_dev->event_lock);
- *val = acrtc->dm_irq_params.window_param.y_start;
+ *val = acrtc->dm_irq_params.window_param[0].y_start;
spin_unlock_irq(&drm_dev->event_lock);
return 0;
@@ -3530,8 +3652,8 @@ static int crc_win_x_end_set(void *data, u64 val)
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
spin_lock_irq(&drm_dev->event_lock);
- acrtc->dm_irq_params.window_param.x_end = (uint16_t) val;
- acrtc->dm_irq_params.window_param.update_win = false;
+ acrtc->dm_irq_params.window_param[0].x_end = (uint16_t) val;
+ acrtc->dm_irq_params.window_param[0].update_win = false;
spin_unlock_irq(&drm_dev->event_lock);
return 0;
@@ -3547,7 +3669,7 @@ static int crc_win_x_end_get(void *data, u64 *val)
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
spin_lock_irq(&drm_dev->event_lock);
- *val = acrtc->dm_irq_params.window_param.x_end;
+ *val = acrtc->dm_irq_params.window_param[0].x_end;
spin_unlock_irq(&drm_dev->event_lock);
return 0;
@@ -3566,8 +3688,8 @@ static int crc_win_y_end_set(void *data, u64 val)
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
spin_lock_irq(&drm_dev->event_lock);
- acrtc->dm_irq_params.window_param.y_end = (uint16_t) val;
- acrtc->dm_irq_params.window_param.update_win = false;
+ acrtc->dm_irq_params.window_param[0].y_end = (uint16_t) val;
+ acrtc->dm_irq_params.window_param[0].update_win = false;
spin_unlock_irq(&drm_dev->event_lock);
return 0;
@@ -3583,7 +3705,7 @@ static int crc_win_y_end_get(void *data, u64 *val)
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
spin_lock_irq(&drm_dev->event_lock);
- *val = acrtc->dm_irq_params.window_param.y_end;
+ *val = acrtc->dm_irq_params.window_param[0].y_end;
spin_unlock_irq(&drm_dev->event_lock);
return 0;
@@ -3606,13 +3728,14 @@ static int crc_win_update_set(void *data, u64 val)
/* PSR may write to OTG CRC window control register,
* so close it before starting secure_display.
*/
- amdgpu_dm_psr_disable(acrtc->dm_irq_params.stream);
+ amdgpu_dm_psr_disable(acrtc->dm_irq_params.stream, true);
spin_lock_irq(&adev_to_drm(adev)->event_lock);
- acrtc->dm_irq_params.window_param.activated = true;
- acrtc->dm_irq_params.window_param.update_win = true;
- acrtc->dm_irq_params.window_param.skip_frame_cnt = 0;
+ acrtc->dm_irq_params.window_param[0].enable = true;
+ acrtc->dm_irq_params.window_param[0].update_win = true;
+ acrtc->dm_irq_params.window_param[0].skip_frame_cnt = 0;
+ acrtc->dm_irq_params.crc_window_activated = true;
spin_unlock_irq(&adev_to_drm(adev)->event_lock);
mutex_unlock(&adev->dm.dc_lock);
@@ -3900,7 +4023,7 @@ static int capabilities_show(struct seq_file *m, void *unused)
struct hubbub *hubbub = dc->res_pool->hubbub;
- if (hubbub->funcs->get_mall_en)
+ if (hubbub && hubbub->funcs->get_mall_en)
hubbub->funcs->get_mall_en(hubbub, &mall_in_use);
if (dc->cap_funcs.get_subvp_en)
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.h
index 071200473c27..122cdc124b3b 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: MIT */
/*
* Copyright 2018 Advanced Micro Devices, Inc.
*
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
index e339c7a8d541..85ce558cefc5 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: MIT
/*
* Copyright 2019 Advanced Micro Devices, Inc.
*
@@ -26,6 +27,7 @@
#include "amdgpu_dm_hdcp.h"
#include "amdgpu.h"
#include "amdgpu_dm.h"
+#include "dc_fused_io.h"
#include "dm_helpers.h"
#include <drm/display/drm_hdcp_helper.h>
#include "hdcp_psp.h"
@@ -76,6 +78,34 @@ lp_read_dpcd(void *handle, uint32_t address, uint8_t *data, uint32_t size)
return dm_helpers_dp_read_dpcd(link->ctx, link, address, data, size);
}
+static bool lp_atomic_write_poll_read_i2c(
+ void *handle,
+ const struct mod_hdcp_atomic_op_i2c *write,
+ const struct mod_hdcp_atomic_op_i2c *poll,
+ struct mod_hdcp_atomic_op_i2c *read,
+ uint32_t poll_timeout_us,
+ uint8_t poll_mask_msb
+)
+{
+ struct dc_link *link = handle;
+
+ return dm_atomic_write_poll_read_i2c(link, write, poll, read, poll_timeout_us, poll_mask_msb);
+}
+
+static bool lp_atomic_write_poll_read_aux(
+ void *handle,
+ const struct mod_hdcp_atomic_op_aux *write,
+ const struct mod_hdcp_atomic_op_aux *poll,
+ struct mod_hdcp_atomic_op_aux *read,
+ uint32_t poll_timeout_us,
+ uint8_t poll_mask_msb
+)
+{
+ struct dc_link *link = handle;
+
+ return dm_atomic_write_poll_read_aux(link, write, poll, read, poll_timeout_us, poll_mask_msb);
+}
+
static uint8_t *psp_get_srm(struct psp_context *psp, uint32_t *srm_version, uint32_t *srm_size)
{
struct ta_hdcp_shared_memory *hdcp_cmd;
@@ -171,8 +201,12 @@ void hdcp_update_display(struct hdcp_workqueue *hdcp_work,
struct mod_hdcp_link_adjustment link_adjust;
struct mod_hdcp_display_adjustment display_adjust;
unsigned int conn_index = aconnector->base.index;
+ const struct dc *dc = aconnector->dc_link->dc;
- mutex_lock(&hdcp_w->mutex);
+ guard(mutex)(&hdcp_w->mutex);
+ drm_connector_get(&aconnector->base);
+ if (hdcp_w->aconnector[conn_index])
+ drm_connector_put(&hdcp_w->aconnector[conn_index]->base);
hdcp_w->aconnector[conn_index] = aconnector;
memset(&link_adjust, 0, sizeof(link_adjust));
@@ -190,6 +224,7 @@ void hdcp_update_display(struct hdcp_workqueue *hdcp_work,
display_adjust.disable = MOD_HDCP_DISPLAY_NOT_DISABLE;
link_adjust.auth_delay = 2;
+ link_adjust.retry_limit = MAX_NUM_OF_ATTEMPTS;
if (content_type == DRM_MODE_HDCP_CONTENT_TYPE0) {
link_adjust.hdcp2.force_type = MOD_HDCP_FORCE_TYPE_0;
@@ -197,6 +232,9 @@ void hdcp_update_display(struct hdcp_workqueue *hdcp_work,
link_adjust.hdcp1.disable = 1;
link_adjust.hdcp2.force_type = MOD_HDCP_FORCE_TYPE_1;
}
+ link_adjust.hdcp2.use_fw_locality_check =
+ (dc->caps.fused_io_supported || dc->debug.hdcp_lc_force_fw_enable);
+ link_adjust.hdcp2.use_sw_locality_fallback = dc->debug.hdcp_lc_enable_sw_fallback;
schedule_delayed_work(&hdcp_w->property_validate_dwork,
msecs_to_jiffies(DRM_HDCP_CHECK_PERIOD_MS));
@@ -209,7 +247,6 @@ void hdcp_update_display(struct hdcp_workqueue *hdcp_work,
mod_hdcp_update_display(&hdcp_w->hdcp, conn_index, &link_adjust, &display_adjust, &hdcp_w->output);
process_output(hdcp_w);
- mutex_unlock(&hdcp_w->mutex);
}
static void hdcp_remove_display(struct hdcp_workqueue *hdcp_work,
@@ -220,8 +257,7 @@ static void hdcp_remove_display(struct hdcp_workqueue *hdcp_work,
struct drm_connector_state *conn_state = aconnector->base.state;
unsigned int conn_index = aconnector->base.index;
- mutex_lock(&hdcp_w->mutex);
- hdcp_w->aconnector[conn_index] = aconnector;
+ guard(mutex)(&hdcp_w->mutex);
/* the removal of display will invoke auth reset -> hdcp destroy and
* we'd expect the Content Protection (CP) property changed back to
@@ -237,9 +273,11 @@ static void hdcp_remove_display(struct hdcp_workqueue *hdcp_work,
}
mod_hdcp_remove_display(&hdcp_w->hdcp, aconnector->base.index, &hdcp_w->output);
-
+ if (hdcp_w->aconnector[conn_index]) {
+ drm_connector_put(&hdcp_w->aconnector[conn_index]->base);
+ hdcp_w->aconnector[conn_index] = NULL;
+ }
process_output(hdcp_w);
- mutex_unlock(&hdcp_w->mutex);
}
void hdcp_reset_display(struct hdcp_workqueue *hdcp_work, unsigned int link_index)
@@ -247,7 +285,7 @@ void hdcp_reset_display(struct hdcp_workqueue *hdcp_work, unsigned int link_inde
struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index];
unsigned int conn_index;
- mutex_lock(&hdcp_w->mutex);
+ guard(mutex)(&hdcp_w->mutex);
mod_hdcp_reset_connection(&hdcp_w->hdcp, &hdcp_w->output);
@@ -256,11 +294,13 @@ void hdcp_reset_display(struct hdcp_workqueue *hdcp_work, unsigned int link_inde
for (conn_index = 0; conn_index < AMDGPU_DM_MAX_DISPLAY_INDEX; conn_index++) {
hdcp_w->encryption_status[conn_index] =
MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF;
+ if (hdcp_w->aconnector[conn_index]) {
+ drm_connector_put(&hdcp_w->aconnector[conn_index]->base);
+ hdcp_w->aconnector[conn_index] = NULL;
+ }
}
process_output(hdcp_w);
-
- mutex_unlock(&hdcp_w->mutex);
}
void hdcp_handle_cpirq(struct hdcp_workqueue *hdcp_work, unsigned int link_index)
@@ -277,7 +317,7 @@ static void event_callback(struct work_struct *work)
hdcp_work = container_of(to_delayed_work(work), struct hdcp_workqueue,
callback_dwork);
- mutex_lock(&hdcp_work->mutex);
+ guard(mutex)(&hdcp_work->mutex);
cancel_delayed_work(&hdcp_work->callback_dwork);
@@ -285,8 +325,6 @@ static void event_callback(struct work_struct *work)
&hdcp_work->output);
process_output(hdcp_work);
-
- mutex_unlock(&hdcp_work->mutex);
}
static void event_property_update(struct work_struct *work)
@@ -323,7 +361,7 @@ static void event_property_update(struct work_struct *work)
continue;
drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
- mutex_lock(&hdcp_work->mutex);
+ guard(mutex)(&hdcp_work->mutex);
if (conn_state->commit) {
ret = wait_for_completion_interruptible_timeout(&conn_state->commit->hw_done,
@@ -355,7 +393,6 @@ static void event_property_update(struct work_struct *work)
drm_hdcp_update_content_protection(connector,
DRM_MODE_CONTENT_PROTECTION_DESIRED);
}
- mutex_unlock(&hdcp_work->mutex);
drm_modeset_unlock(&dev->mode_config.connection_mutex);
}
}
@@ -368,7 +405,7 @@ static void event_property_validate(struct work_struct *work)
struct amdgpu_dm_connector *aconnector;
unsigned int conn_index;
- mutex_lock(&hdcp_work->mutex);
+ guard(mutex)(&hdcp_work->mutex);
for (conn_index = 0; conn_index < AMDGPU_DM_MAX_DISPLAY_INDEX;
conn_index++) {
@@ -408,8 +445,6 @@ static void event_property_validate(struct work_struct *work)
schedule_work(&hdcp_work->property_update_work);
}
}
-
- mutex_unlock(&hdcp_work->mutex);
}
static void event_watchdog_timer(struct work_struct *work)
@@ -420,7 +455,7 @@ static void event_watchdog_timer(struct work_struct *work)
struct hdcp_workqueue,
watchdog_timer_dwork);
- mutex_lock(&hdcp_work->mutex);
+ guard(mutex)(&hdcp_work->mutex);
cancel_delayed_work(&hdcp_work->watchdog_timer_dwork);
@@ -429,8 +464,6 @@ static void event_watchdog_timer(struct work_struct *work)
&hdcp_work->output);
process_output(hdcp_work);
-
- mutex_unlock(&hdcp_work->mutex);
}
static void event_cpirq(struct work_struct *work)
@@ -439,13 +472,11 @@ static void event_cpirq(struct work_struct *work)
hdcp_work = container_of(work, struct hdcp_workqueue, cpirq_work);
- mutex_lock(&hdcp_work->mutex);
+ guard(mutex)(&hdcp_work->mutex);
mod_hdcp_process_event(&hdcp_work->hdcp, MOD_HDCP_EVENT_CPIRQ, &hdcp_work->output);
process_output(hdcp_work);
-
- mutex_unlock(&hdcp_work->mutex);
}
void hdcp_destroy(struct kobject *kobj, struct hdcp_workqueue *hdcp_work)
@@ -455,6 +486,7 @@ void hdcp_destroy(struct kobject *kobj, struct hdcp_workqueue *hdcp_work)
for (i = 0; i < hdcp_work->max_link; i++) {
cancel_delayed_work_sync(&hdcp_work[i].callback_dwork);
cancel_delayed_work_sync(&hdcp_work[i].watchdog_timer_dwork);
+ cancel_delayed_work_sync(&hdcp_work[i].property_validate_dwork);
}
sysfs_remove_bin_file(kobj, &hdcp_work[0].attr);
@@ -469,7 +501,6 @@ static bool enable_assr(void *handle, struct dc_link *link)
struct mod_hdcp hdcp = hdcp_work->hdcp;
struct psp_context *psp = hdcp.config.psp.handle;
struct ta_dtm_shared_memory *dtm_cmd;
- bool res = true;
if (!psp->dtm_context.context.initialized) {
DRM_INFO("Failed to enable ASSR, DTM TA is not initialized.");
@@ -478,7 +509,7 @@ static bool enable_assr(void *handle, struct dc_link *link)
dtm_cmd = (struct ta_dtm_shared_memory *)psp->dtm_context.context.mem_context.shared_buf;
- mutex_lock(&psp->dtm_context.mutex);
+ guard(mutex)(&psp->dtm_context.mutex);
memset(dtm_cmd, 0, sizeof(struct ta_dtm_shared_memory));
dtm_cmd->cmd_id = TA_DTM_COMMAND__TOPOLOGY_ASSR_ENABLE;
@@ -490,12 +521,10 @@ static bool enable_assr(void *handle, struct dc_link *link)
if (dtm_cmd->dtm_status != TA_DTM_STATUS__SUCCESS) {
DRM_INFO("Failed to enable ASSR");
- res = false;
+ return false;
}
- mutex_unlock(&psp->dtm_context.mutex);
-
- return res;
+ return true;
}
static void update_config(void *handle, struct cp_psp_stream_config *config)
@@ -503,11 +532,13 @@ static void update_config(void *handle, struct cp_psp_stream_config *config)
struct hdcp_workqueue *hdcp_work = handle;
struct amdgpu_dm_connector *aconnector = config->dm_stream_ctx;
int link_index = aconnector->dc_link->link_index;
+ unsigned int conn_index = aconnector->base.index;
struct mod_hdcp_display *display = &hdcp_work[link_index].display;
struct mod_hdcp_link *link = &hdcp_work[link_index].link;
struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index];
struct dc_sink *sink = NULL;
bool link_is_hdcp14 = false;
+ const struct dc *dc = aconnector->dc_link->dc;
if (config->dpms_off) {
hdcp_remove_display(hdcp_work, link_index, aconnector);
@@ -547,7 +578,10 @@ static void update_config(void *handle, struct cp_psp_stream_config *config)
link->dp.usb4_enabled = config->usb4_enabled;
display->adjust.disable = MOD_HDCP_DISPLAY_DISABLE_AUTHENTICATION;
link->adjust.auth_delay = 2;
+ link->adjust.retry_limit = MAX_NUM_OF_ATTEMPTS;
link->adjust.hdcp1.disable = 0;
+ link->adjust.hdcp2.use_fw_locality_check = (dc->caps.fused_io_supported || dc->debug.hdcp_lc_force_fw_enable);
+ link->adjust.hdcp2.use_sw_locality_fallback = dc->debug.hdcp_lc_enable_sw_fallback;
hdcp_w->encryption_status[display->index] = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF;
DRM_DEBUG_DRIVER("[HDCP_DM] display %d, CP %d, type %d\n", aconnector->base.index,
@@ -556,13 +590,14 @@ static void update_config(void *handle, struct cp_psp_stream_config *config)
(!!aconnector->base.state) ?
aconnector->base.state->hdcp_content_type : -1);
- mutex_lock(&hdcp_w->mutex);
+ guard(mutex)(&hdcp_w->mutex);
mod_hdcp_add_display(&hdcp_w->hdcp, link, display, &hdcp_w->output);
-
+ drm_connector_get(&aconnector->base);
+ if (hdcp_w->aconnector[conn_index])
+ drm_connector_put(&hdcp_w->aconnector[conn_index]->base);
+ hdcp_w->aconnector[conn_index] = aconnector;
process_output(hdcp_w);
- mutex_unlock(&hdcp_w->mutex);
-
}
/**
@@ -614,7 +649,7 @@ static void update_config(void *handle, struct cp_psp_stream_config *config)
* incorrect/corrupted and we should correct our SRM by getting it from PSP
*/
static ssize_t srm_data_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buffer,
+ const struct bin_attribute *bin_attr, char *buffer,
loff_t pos, size_t count)
{
struct hdcp_workqueue *work;
@@ -638,7 +673,7 @@ static ssize_t srm_data_write(struct file *filp, struct kobject *kobj,
}
static ssize_t srm_data_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buffer,
+ const struct bin_attribute *bin_attr, char *buffer,
loff_t pos, size_t count)
{
struct hdcp_workqueue *work;
@@ -736,19 +771,30 @@ struct hdcp_workqueue *hdcp_create_workqueue(struct amdgpu_device *adev,
INIT_DELAYED_WORK(&hdcp_work[i].watchdog_timer_dwork, event_watchdog_timer);
INIT_DELAYED_WORK(&hdcp_work[i].property_validate_dwork, event_property_validate);
- hdcp_work[i].hdcp.config.psp.handle = &adev->psp;
- if (dc->ctx->dce_version == DCN_VERSION_3_1 ||
+ struct mod_hdcp_config *config = &hdcp_work[i].hdcp.config;
+ struct mod_hdcp_ddc_funcs *ddc_funcs = &config->ddc.funcs;
+
+ config->psp.handle = &adev->psp;
+ if (dc->ctx->dce_version == DCN_VERSION_3_1 ||
dc->ctx->dce_version == DCN_VERSION_3_14 ||
dc->ctx->dce_version == DCN_VERSION_3_15 ||
- dc->ctx->dce_version == DCN_VERSION_3_5 ||
+ dc->ctx->dce_version == DCN_VERSION_3_16 ||
+ dc->ctx->dce_version == DCN_VERSION_3_2 ||
+ dc->ctx->dce_version == DCN_VERSION_3_21 ||
+ dc->ctx->dce_version == DCN_VERSION_3_5 ||
dc->ctx->dce_version == DCN_VERSION_3_51 ||
- dc->ctx->dce_version == DCN_VERSION_3_16)
- hdcp_work[i].hdcp.config.psp.caps.dtm_v3_supported = 1;
- hdcp_work[i].hdcp.config.ddc.handle = dc_get_link_at_index(dc, i);
- hdcp_work[i].hdcp.config.ddc.funcs.write_i2c = lp_write_i2c;
- hdcp_work[i].hdcp.config.ddc.funcs.read_i2c = lp_read_i2c;
- hdcp_work[i].hdcp.config.ddc.funcs.write_dpcd = lp_write_dpcd;
- hdcp_work[i].hdcp.config.ddc.funcs.read_dpcd = lp_read_dpcd;
+ dc->ctx->dce_version == DCN_VERSION_3_6 ||
+ dc->ctx->dce_version == DCN_VERSION_4_01)
+ config->psp.caps.dtm_v3_supported = 1;
+
+ config->ddc.handle = dc_get_link_at_index(dc, i);
+
+ ddc_funcs->write_i2c = lp_write_i2c;
+ ddc_funcs->read_i2c = lp_read_i2c;
+ ddc_funcs->write_dpcd = lp_write_dpcd;
+ ddc_funcs->read_dpcd = lp_read_dpcd;
+ ddc_funcs->atomic_write_poll_read_i2c = lp_atomic_write_poll_read_i2c;
+ ddc_funcs->atomic_write_poll_read_aux = lp_atomic_write_poll_read_aux;
memset(hdcp_work[i].aconnector, 0,
sizeof(struct amdgpu_dm_connector *) *
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h
index 69b445b011c8..4faa344f196e 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: MIT */
/*
* Copyright 2019 Advanced Micro Devices, Inc.
*
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
index b0fea0856866..ac98c746c3de 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: MIT
/*
* Copyright 2015 Advanced Micro Devices, Inc.
*
@@ -55,16 +56,21 @@ static u32 edid_extract_panel_id(struct edid *edid)
(u32)EDID_PRODUCT_ID(edid);
}
-static void apply_edid_quirks(struct edid *edid, struct dc_edid_caps *edid_caps)
+static void apply_edid_quirks(struct drm_device *dev, struct edid *edid, struct dc_edid_caps *edid_caps)
{
uint32_t panel_id = edid_extract_panel_id(edid);
switch (panel_id) {
+ /* Workaround for monitors that need a delay after detecting the link */
+ case drm_edid_encode_panel_id('G', 'B', 'T', 0x3215):
+ drm_dbg_driver(dev, "Add 10s delay for link detection for panel id %X\n", panel_id);
+ edid_caps->panel_patch.wait_after_dpcd_poweroff_ms = 10000;
+ break;
/* Workaround for some monitors which does not work well with FAMS */
case drm_edid_encode_panel_id('S', 'A', 'M', 0x0E5E):
case drm_edid_encode_panel_id('S', 'A', 'M', 0x7053):
case drm_edid_encode_panel_id('S', 'A', 'M', 0x71AC):
- DRM_DEBUG_DRIVER("Disabling FAMS on monitor with panel id %X\n", panel_id);
+ drm_dbg_driver(dev, "Disabling FAMS on monitor with panel id %X\n", panel_id);
edid_caps->panel_patch.disable_fams = true;
break;
/* Workaround for some monitors that do not clear DPCD 0x317 if FreeSync is unsupported */
@@ -73,11 +79,12 @@ static void apply_edid_quirks(struct edid *edid, struct dc_edid_caps *edid_caps)
case drm_edid_encode_panel_id('B', 'O', 'E', 0x092A):
case drm_edid_encode_panel_id('L', 'G', 'D', 0x06D1):
case drm_edid_encode_panel_id('M', 'S', 'F', 0x1003):
- DRM_DEBUG_DRIVER("Clearing DPCD 0x317 on monitor with panel id %X\n", panel_id);
+ drm_dbg_driver(dev, "Clearing DPCD 0x317 on monitor with panel id %X\n", panel_id);
edid_caps->panel_patch.remove_sink_ext_caps = true;
break;
case drm_edid_encode_panel_id('S', 'D', 'C', 0x4154):
- DRM_DEBUG_DRIVER("Disabling VSC on monitor with panel id %X\n", panel_id);
+ case drm_edid_encode_panel_id('S', 'D', 'C', 0x4171):
+ drm_dbg_driver(dev, "Disabling VSC on monitor with panel id %X\n", panel_id);
edid_caps->panel_patch.disable_colorimetry = true;
break;
default:
@@ -101,6 +108,7 @@ enum dc_edid_status dm_helpers_parse_edid_caps(
{
struct amdgpu_dm_connector *aconnector = link->priv;
struct drm_connector *connector = &aconnector->base;
+ struct drm_device *dev = connector->dev;
struct edid *edid_buf = edid ? (struct edid *) edid->raw_edid : NULL;
struct cea_sad *sads;
int sad_count = -1;
@@ -123,6 +131,7 @@ enum dc_edid_status dm_helpers_parse_edid_caps(
edid_caps->serial_number = edid_buf->serial;
edid_caps->manufacture_week = edid_buf->mfg_week;
edid_caps->manufacture_year = edid_buf->mfg_year;
+ edid_caps->analog = !(edid_buf->input & DRM_EDID_INPUT_DIGITAL);
drm_edid_get_monitor_name(edid_buf,
edid_caps->display_name,
@@ -130,7 +139,7 @@ enum dc_edid_status dm_helpers_parse_edid_caps(
edid_caps->edid_hdmi = connector->display_info.is_hdmi;
- apply_edid_quirks(edid_buf, edid_caps);
+ apply_edid_quirks(dev, edid_buf, edid_caps);
sad_count = drm_edid_to_sad((struct edid *) edid->raw_edid, &sads);
if (sad_count <= 0)
@@ -624,6 +633,19 @@ bool dm_helpers_submit_i2c(
return result;
}
+bool dm_helpers_execute_fused_io(
+ struct dc_context *ctx,
+ struct dc_link *link,
+ union dmub_rb_cmd *commands,
+ uint8_t count,
+ uint32_t timeout_us
+)
+{
+ struct amdgpu_device *dev = ctx->driver_context;
+
+ return amdgpu_dm_execute_fused_io(dev, link, commands, count, timeout_us);
+}
+
static bool execute_synaptics_rc_command(struct drm_dp_aux *aux,
bool is_write_cmd,
unsigned char cmd,
@@ -885,6 +907,12 @@ bool dm_helpers_dp_write_dsc_enable(
return ret;
}
+bool dm_helpers_dp_write_hblank_reduction(struct dc_context *ctx, const struct dc_stream_state *stream)
+{
+ // TODO
+ return false;
+}
+
bool dm_helpers_is_dp_sink_present(struct dc_link *link)
{
bool dp_sink_present;
@@ -906,15 +934,15 @@ dm_helpers_probe_acpi_edid(void *data, u8 *buf, unsigned int block, size_t len)
{
struct drm_connector *connector = data;
struct acpi_device *acpidev = ACPI_COMPANION(connector->dev->dev);
- unsigned char start = block * EDID_LENGTH;
- void *edid;
+ unsigned short start = block * EDID_LENGTH;
+ struct edid *edid;
int r;
if (!acpidev)
return -ENODEV;
/* fetch the entire edid from BIOS */
- r = acpi_video_get_edid(acpidev, ACPI_VIDEO_DISPLAY_LCD, -1, &edid);
+ r = acpi_video_get_edid(acpidev, ACPI_VIDEO_DISPLAY_LCD, -1, (void *)&edid);
if (r < 0) {
drm_dbg(connector->dev, "Failed to get EDID from ACPI: %d\n", r);
return r;
@@ -924,7 +952,14 @@ dm_helpers_probe_acpi_edid(void *data, u8 *buf, unsigned int block, size_t len)
goto cleanup;
}
- memcpy(buf, edid + start, len);
+ /* sanity check */
+ if (edid->revision < 4 || !(edid->input & DRM_EDID_INPUT_DIGITAL) ||
+ (edid->input & DRM_EDID_DIGITAL_TYPE_MASK) == DRM_EDID_DIGITAL_TYPE_UNDEF) {
+ r = -EINVAL;
+ goto cleanup;
+ }
+
+ memcpy(buf, (void *)edid + start, len);
r = 0;
cleanup:
@@ -963,8 +998,8 @@ enum dc_edid_status dm_helpers_read_local_edid(
struct amdgpu_dm_connector *aconnector = link->priv;
struct drm_connector *connector = &aconnector->base;
struct i2c_adapter *ddc;
- int retry = 3;
- enum dc_edid_status edid_status;
+ int retry = 25;
+ enum dc_edid_status edid_status = EDID_NO_RESPONSE;
const struct drm_edid *drm_edid;
const struct edid *edid;
@@ -994,9 +1029,13 @@ enum dc_edid_status dm_helpers_read_local_edid(
}
if (!drm_edid)
- return EDID_NO_RESPONSE;
+ continue;
edid = drm_edid_raw(drm_edid); // FIXME: Get rid of drm_edid_raw()
+ if (!edid ||
+ edid->extensions >= sizeof(sink->dc_edid.raw_edid) / EDID_LENGTH)
+ return EDID_BAD_INPUT;
+
sink->dc_edid.length = EDID_LENGTH * (edid->extensions + 1);
memmove(sink->dc_edid.raw_edid, (uint8_t *)edid, sink->dc_edid.length);
@@ -1008,7 +1047,7 @@ enum dc_edid_status dm_helpers_read_local_edid(
&sink->dc_edid,
&sink->edid_caps);
- } while (edid_status == EDID_BAD_CHECKSUM && --retry > 0);
+ } while ((edid_status == EDID_BAD_CHECKSUM || edid_status == EDID_NO_RESPONSE) && --retry > 0);
if (edid_status != EDID_OK)
DRM_ERROR("EDID err: %d, on connector: %s",
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
index 3390f0d8420a..0a2a3f233a0e 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: MIT
/*
* Copyright 2015 Advanced Micro Devices, Inc.
*
@@ -473,8 +474,9 @@ void amdgpu_dm_irq_fini(struct amdgpu_device *adev)
unregister_all_irq_handlers(adev);
}
-int amdgpu_dm_irq_suspend(struct amdgpu_device *adev)
+void amdgpu_dm_irq_suspend(struct amdgpu_device *adev)
{
+ struct drm_device *dev = adev_to_drm(adev);
int src;
struct list_head *hnd_list_h;
struct list_head *hnd_list_l;
@@ -511,10 +513,12 @@ int amdgpu_dm_irq_suspend(struct amdgpu_device *adev)
}
DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
- return 0;
+
+ if (dev->mode_config.poll_enabled)
+ drm_kms_helper_poll_disable(dev);
}
-int amdgpu_dm_irq_resume_early(struct amdgpu_device *adev)
+void amdgpu_dm_irq_resume_early(struct amdgpu_device *adev)
{
int src;
struct list_head *hnd_list_h, *hnd_list_l;
@@ -522,7 +526,7 @@ int amdgpu_dm_irq_resume_early(struct amdgpu_device *adev)
DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
- DRM_DEBUG_KMS("DM_IRQ: early resume\n");
+ drm_dbg(adev_to_drm(adev), "DM_IRQ: early resume\n");
/* re-enable short pulse interrupts HW interrupt */
for (src = DC_IRQ_SOURCE_HPD1RX; src <= DC_IRQ_SOURCE_HPD6RX; src++) {
@@ -533,19 +537,18 @@ int amdgpu_dm_irq_resume_early(struct amdgpu_device *adev)
}
DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
-
- return 0;
}
-int amdgpu_dm_irq_resume_late(struct amdgpu_device *adev)
+void amdgpu_dm_irq_resume_late(struct amdgpu_device *adev)
{
+ struct drm_device *dev = adev_to_drm(adev);
int src;
struct list_head *hnd_list_h, *hnd_list_l;
unsigned long irq_table_flags;
DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
- DRM_DEBUG_KMS("DM_IRQ: resume\n");
+ drm_dbg(adev_to_drm(adev), "DM_IRQ: resume\n");
/**
* Renable HW interrupt for HPD and only since FLIP and VBLANK
@@ -559,7 +562,9 @@ int amdgpu_dm_irq_resume_late(struct amdgpu_device *adev)
}
DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
- return 0;
+
+ if (dev->mode_config.poll_enabled)
+ drm_kms_helper_poll_enable(dev);
}
/*
@@ -894,12 +899,24 @@ void amdgpu_dm_hpd_init(struct amdgpu_device *adev)
struct drm_device *dev = adev_to_drm(adev);
struct drm_connector *connector;
struct drm_connector_list_iter iter;
+ int irq_type;
+ int i;
+ bool use_polling = false;
+
+ /* First, clear all hpd and hpdrx interrupts */
+ for (i = DC_IRQ_SOURCE_HPD1; i <= DC_IRQ_SOURCE_HPD6RX; i++) {
+ if (!dc_interrupt_set(adev->dm.dc, i, false))
+ drm_err(dev, "Failed to clear hpd(rx) source=%d on init\n",
+ i);
+ }
drm_connector_list_iter_begin(dev, &iter);
drm_for_each_connector_iter(connector, &iter) {
struct amdgpu_dm_connector *amdgpu_dm_connector;
const struct dc_link *dc_link;
+ use_polling |= connector->polled != DRM_CONNECTOR_POLL_HPD;
+
if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
continue;
@@ -907,10 +924,31 @@ void amdgpu_dm_hpd_init(struct amdgpu_device *adev)
dc_link = amdgpu_dm_connector->dc_link;
+ /*
+ * Get a base driver irq reference for hpd ints for the lifetime
+ * of dm. Note that only hpd interrupt types are registered with
+ * base driver; hpd_rx types aren't. IOW, amdgpu_irq_get/put on
+ * hpd_rx isn't available. DM currently controls hpd_rx
+ * explicitly with dc_interrupt_set()
+ */
if (dc_link->irq_source_hpd != DC_IRQ_SOURCE_INVALID) {
- dc_interrupt_set(adev->dm.dc,
- dc_link->irq_source_hpd,
- true);
+ irq_type = dc_link->irq_source_hpd - DC_IRQ_SOURCE_HPD1;
+ /*
+ * TODO: There's a mismatch between mode_info.num_hpd
+ * and what bios reports as the # of connectors with hpd
+ * sources. Since the # of hpd source types registered
+ * with base driver == mode_info.num_hpd, we have to
+ * fallback to dc_interrupt_set for the remaining types.
+ */
+ if (irq_type < adev->mode_info.num_hpd) {
+ if (amdgpu_irq_get(adev, &adev->hpd_irq, irq_type))
+ drm_err(dev, "DM_IRQ: Failed get HPD for source=%d)!\n",
+ dc_link->irq_source_hpd);
+ } else {
+ dc_interrupt_set(adev->dm.dc,
+ dc_link->irq_source_hpd,
+ true);
+ }
}
if (dc_link->irq_source_hpd_rx != DC_IRQ_SOURCE_INVALID) {
@@ -920,6 +958,9 @@ void amdgpu_dm_hpd_init(struct amdgpu_device *adev)
}
}
drm_connector_list_iter_end(&iter);
+
+ if (use_polling)
+ drm_kms_helper_poll_init(dev);
}
/**
@@ -935,6 +976,7 @@ void amdgpu_dm_hpd_fini(struct amdgpu_device *adev)
struct drm_device *dev = adev_to_drm(adev);
struct drm_connector *connector;
struct drm_connector_list_iter iter;
+ int irq_type;
drm_connector_list_iter_begin(dev, &iter);
drm_for_each_connector_iter(connector, &iter) {
@@ -948,9 +990,18 @@ void amdgpu_dm_hpd_fini(struct amdgpu_device *adev)
dc_link = amdgpu_dm_connector->dc_link;
if (dc_link->irq_source_hpd != DC_IRQ_SOURCE_INVALID) {
- dc_interrupt_set(adev->dm.dc,
- dc_link->irq_source_hpd,
- false);
+ irq_type = dc_link->irq_source_hpd - DC_IRQ_SOURCE_HPD1;
+
+ /* TODO: See same TODO in amdgpu_dm_hpd_init() */
+ if (irq_type < adev->mode_info.num_hpd) {
+ if (amdgpu_irq_put(adev, &adev->hpd_irq, irq_type))
+ drm_err(dev, "DM_IRQ: Failed put HPD for source=%d!\n",
+ dc_link->irq_source_hpd);
+ } else {
+ dc_interrupt_set(adev->dm.dc,
+ dc_link->irq_source_hpd,
+ false);
+ }
}
if (dc_link->irq_source_hpd_rx != DC_IRQ_SOURCE_INVALID) {
@@ -960,4 +1011,7 @@ void amdgpu_dm_hpd_fini(struct amdgpu_device *adev)
}
}
drm_connector_list_iter_end(&iter);
+
+ if (dev->mode_config.poll_enabled)
+ drm_kms_helper_poll_fini(dev);
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.h
index 2349238a626b..4f6b58f4f90d 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: MIT */
/*
* Copyright 2015 Advanced Micro Devices, Inc.
*
@@ -90,14 +91,14 @@ void amdgpu_dm_hpd_fini(struct amdgpu_device *adev);
* amdgpu_dm_irq_suspend - disable ASIC interrupt during suspend.
*
*/
-int amdgpu_dm_irq_suspend(struct amdgpu_device *adev);
+void amdgpu_dm_irq_suspend(struct amdgpu_device *adev);
/**
* amdgpu_dm_irq_resume_early - enable HPDRX ASIC interrupts during resume.
* amdgpu_dm_irq_resume - enable ASIC interrupt during resume.
*
*/
-int amdgpu_dm_irq_resume_early(struct amdgpu_device *adev);
-int amdgpu_dm_irq_resume_late(struct amdgpu_device *adev);
+void amdgpu_dm_irq_resume_early(struct amdgpu_device *adev);
+void amdgpu_dm_irq_resume_late(struct amdgpu_device *adev);
#endif /* __AMDGPU_DM_IRQ_H__ */
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq_params.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq_params.h
index 6a7ecc1e4602..3c9995275cbd 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq_params.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq_params.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: MIT */
/*
* Copyright 2020 Advanced Micro Devices, Inc.
*
@@ -39,7 +40,9 @@ struct dm_irq_params {
#ifdef CONFIG_DEBUG_FS
enum amdgpu_dm_pipe_crc_source crc_src;
#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
- struct crc_window_param window_param;
+ struct crc_window_param window_param[MAX_CRC_WINDOW_NUM];
+ /* At least one CRC window is activated or not*/
+ bool crc_window_activated;
#endif
#endif
};
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index 6e4359490613..dbd1da4d85d3 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: MIT
/*
* Copyright 2012-15 Advanced Micro Devices, Inc.
*
@@ -51,6 +52,9 @@
#define PEAK_FACTOR_X1000 1006
+/*
+ * This function handles both native AUX and I2C-Over-AUX transactions.
+ */
static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
struct drm_dp_aux_msg *msg)
{
@@ -59,6 +63,7 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
enum aux_return_code_type operation_result;
struct amdgpu_device *adev;
struct ddc_service *ddc;
+ uint8_t copy[16];
if (WARN_ON(msg->size > 16))
return -E2BIG;
@@ -74,6 +79,11 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
(msg->request & DP_AUX_I2C_WRITE_STATUS_UPDATE) != 0;
payload.defer_delay = 0;
+ if (payload.write) {
+ memcpy(copy, msg->buffer, msg->size);
+ payload.data = copy;
+ }
+
result = dc_link_aux_transfer_raw(TO_DM_AUX(aux)->ddc_service, &payload,
&operation_result);
@@ -87,15 +97,25 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
if (adev->dm.aux_hpd_discon_quirk) {
if (msg->address == DP_SIDEBAND_MSG_DOWN_REQ_BASE &&
operation_result == AUX_RET_ERROR_HPD_DISCON) {
- result = 0;
+ result = msg->size;
operation_result = AUX_RET_SUCCESS;
}
}
- if (payload.write && result >= 0)
- result = msg->size;
+ /*
+ * result equals to 0 includes the cases of AUX_DEFER/I2C_DEFER
+ */
+ if (payload.write && result >= 0) {
+ if (result) {
+ /*one byte indicating partially written bytes*/
+ drm_dbg_dp(adev_to_drm(adev), "AUX partially written\n");
+ result = payload.data[0];
+ } else if (!payload.reply[0])
+ /*I2C_ACK|AUX_ACK*/
+ result = msg->size;
+ }
- if (result < 0)
+ if (result < 0) {
switch (operation_result) {
case AUX_RET_SUCCESS:
break;
@@ -114,6 +134,13 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
break;
}
+ drm_dbg_dp(adev_to_drm(adev), "DP AUX transfer fail:%d\n", operation_result);
+ }
+
+ if (payload.reply[0])
+ drm_dbg_dp(adev_to_drm(adev), "AUX reply command not ACK: 0x%02x.",
+ payload.reply[0]);
+
return result;
}
@@ -155,6 +182,17 @@ amdgpu_dm_mst_connector_late_register(struct drm_connector *connector)
return 0;
}
+
+static inline void
+amdgpu_dm_mst_reset_mst_connector_setting(struct amdgpu_dm_connector *aconnector)
+{
+ aconnector->drm_edid = NULL;
+ aconnector->dsc_aux = NULL;
+ aconnector->mst_output_port->passthrough_aux = NULL;
+ aconnector->mst_local_bw = 0;
+ aconnector->vc_full_pbn = 0;
+}
+
static void
amdgpu_dm_mst_connector_early_unregister(struct drm_connector *connector)
{
@@ -182,9 +220,7 @@ amdgpu_dm_mst_connector_early_unregister(struct drm_connector *connector)
dc_sink_release(dc_sink);
aconnector->dc_sink = NULL;
- aconnector->drm_edid = NULL;
- aconnector->dsc_aux = NULL;
- port->passthrough_aux = NULL;
+ amdgpu_dm_mst_reset_mst_connector_setting(aconnector);
}
aconnector->mst_status = MST_STATUS_DEFAULT;
@@ -294,6 +330,34 @@ static bool retrieve_downstream_port_device(struct amdgpu_dm_connector *aconnect
return true;
}
+static bool retrieve_branch_specific_data(struct amdgpu_dm_connector *aconnector)
+{
+ struct drm_connector *connector = &aconnector->base;
+ struct drm_dp_mst_port *port = aconnector->mst_output_port;
+ struct drm_dp_mst_port *port_parent;
+ struct drm_dp_aux *immediate_upstream_aux;
+ struct drm_dp_desc branch_desc;
+
+ if (!port->parent)
+ return false;
+
+ port_parent = port->parent->port_parent;
+
+ immediate_upstream_aux = port_parent ? &port_parent->aux : port->mgr->aux;
+
+ if (drm_dp_read_desc(immediate_upstream_aux, &branch_desc, true))
+ return false;
+
+ aconnector->branch_ieee_oui = (branch_desc.ident.oui[0] << 16) +
+ (branch_desc.ident.oui[1] << 8) +
+ (branch_desc.ident.oui[2]);
+
+ drm_dbg_dp(port->aux.drm_dev, "MST branch oui 0x%x detected at %s\n",
+ aconnector->branch_ieee_oui, connector->name);
+
+ return true;
+}
+
static int dm_dp_mst_get_modes(struct drm_connector *connector)
{
struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
@@ -504,9 +568,7 @@ dm_dp_mst_detect(struct drm_connector *connector,
dc_sink_release(aconnector->dc_sink);
aconnector->dc_sink = NULL;
- aconnector->drm_edid = NULL;
- aconnector->dsc_aux = NULL;
- port->passthrough_aux = NULL;
+ amdgpu_dm_mst_reset_mst_connector_setting(aconnector);
amdgpu_dm_set_mst_status(&aconnector->mst_status,
MST_REMOTE_EDID | MST_ALLOCATE_NEW_PAYLOAD | MST_CLEAR_ALLOCATED_PAYLOAD,
@@ -590,11 +652,12 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
amdgpu_dm_set_mst_status(&aconnector->mst_status,
MST_PROBE, true);
- if (drm_connector_init(
+ if (drm_connector_dynamic_init(
dev,
connector,
&dm_dp_mst_connector_funcs,
- DRM_MODE_CONNECTOR_DisplayPort)) {
+ DRM_MODE_CONNECTOR_DisplayPort,
+ NULL)) {
kfree(aconnector);
return NULL;
}
@@ -634,6 +697,9 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
drm_connector_set_path_property(connector, pathprop);
+ if (!retrieve_branch_specific_data(aconnector))
+ aconnector->branch_ieee_oui = 0;
+
/*
* Initialize connector state before adding the connectror to drm and
* framebuffer lists
@@ -775,6 +841,7 @@ void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm,
drm_dp_aux_init(&aconnector->dm_dp_aux.aux);
drm_dp_cec_register_connector(&aconnector->dm_dp_aux.aux,
&aconnector->base);
+ drm_dp_dpcd_set_probe(&aconnector->dm_dp_aux.aux, false);
if (aconnector->base.connector_type == DRM_MODE_CONNECTOR_eDP)
return;
@@ -787,13 +854,20 @@ void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm,
drm_connector_attach_dp_subconnector_property(&aconnector->base);
}
-int dm_mst_get_pbn_divider(struct dc_link *link)
+uint32_t dm_mst_get_pbn_divider(struct dc_link *link)
{
+ uint32_t pbn_div_x100;
+ uint64_t dividend, divisor;
+
if (!link)
return 0;
- return dc_link_bandwidth_kbps(link,
- dc_link_get_link_cap(link)) / (8 * 1000 * 54);
+ dividend = (uint64_t)dc_link_bandwidth_kbps(link, dc_link_get_link_cap(link)) * 100;
+ divisor = 8 * 1000 * 54;
+
+ pbn_div_x100 = div64_u64(dividend, divisor);
+
+ return dfixed_const(pbn_div_x100) / 100;
}
struct dsc_mst_fairness_params {
@@ -810,26 +884,28 @@ struct dsc_mst_fairness_params {
};
#if defined(CONFIG_DRM_AMD_DC_FP)
-static uint16_t get_fec_overhead_multiplier(struct dc_link *dc_link)
+static uint64_t kbps_to_pbn(int kbps, bool is_peak_pbn)
{
- u8 link_coding_cap;
- uint16_t fec_overhead_multiplier_x1000 = PBN_FEC_OVERHEAD_MULTIPLIER_8B_10B;
+ uint64_t effective_kbps = (uint64_t)kbps;
- link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(dc_link);
- if (link_coding_cap == DP_128b_132b_ENCODING)
- fec_overhead_multiplier_x1000 = PBN_FEC_OVERHEAD_MULTIPLIER_128B_132B;
+ if (is_peak_pbn) { // add 0.6% (1006/1000) overhead into effective kbps
+ effective_kbps *= 1006;
+ effective_kbps = div_u64(effective_kbps, 1000);
+ }
- return fec_overhead_multiplier_x1000;
+ return (uint64_t) DIV64_U64_ROUND_UP(effective_kbps * 64, (54 * 8 * 1000));
}
-static int kbps_to_peak_pbn(int kbps, uint16_t fec_overhead_multiplier_x1000)
+static uint32_t pbn_to_kbps(unsigned int pbn, bool with_margin)
{
- u64 peak_kbps = kbps;
+ uint64_t pbn_effective = (uint64_t)pbn;
+
+ if (with_margin) // deduct 0.6% (994/1000) overhead from effective pbn
+ pbn_effective *= (1000000 / PEAK_FACTOR_X1000);
+ else
+ pbn_effective *= 1000;
- peak_kbps *= 1006;
- peak_kbps *= fec_overhead_multiplier_x1000;
- peak_kbps = div_u64(peak_kbps, 1000 * 1000);
- return (int) DIV64_U64_ROUND_UP(peak_kbps * 64, (54 * 8 * 1000));
+ return DIV_U64_ROUND_UP(pbn_effective * 8 * 54, 64);
}
static void set_dsc_configs_from_fairness_vars(struct dsc_mst_fairness_params *params,
@@ -900,7 +976,7 @@ static int bpp_x16_from_pbn(struct dsc_mst_fairness_params param, int pbn)
dc_dsc_get_default_config_option(param.sink->ctx->dc, &dsc_options);
dsc_options.max_target_bpp_limit_override_x16 = drm_connector->display_info.max_dsc_bpp * 16;
- kbps = div_u64((u64)pbn * 994 * 8 * 54, 64);
+ kbps = pbn_to_kbps(pbn, false);
dc_dsc_compute_config(
param.sink->ctx->dc->res_pool->dscs[0],
&param.sink->dsc_caps.dsc_dec_caps,
@@ -929,12 +1005,11 @@ static int increase_dsc_bpp(struct drm_atomic_state *state,
int link_timeslots_used;
int fair_pbn_alloc;
int ret = 0;
- uint16_t fec_overhead_multiplier_x1000 = get_fec_overhead_multiplier(dc_link);
for (i = 0; i < count; i++) {
if (vars[i + k].dsc_enabled) {
initial_slack[i] =
- kbps_to_peak_pbn(params[i].bw_range.max_kbps, fec_overhead_multiplier_x1000) - vars[i + k].pbn;
+ kbps_to_pbn(params[i].bw_range.max_kbps, false) - vars[i + k].pbn;
bpp_increased[i] = false;
remaining_to_increase += 1;
} else {
@@ -1030,7 +1105,6 @@ static int try_disable_dsc(struct drm_atomic_state *state,
int next_index;
int remaining_to_try = 0;
int ret;
- uint16_t fec_overhead_multiplier_x1000 = get_fec_overhead_multiplier(dc_link);
int var_pbn;
for (i = 0; i < count; i++) {
@@ -1063,7 +1137,7 @@ static int try_disable_dsc(struct drm_atomic_state *state,
DRM_DEBUG_DRIVER("MST_DSC index #%d, try no compression\n", next_index);
var_pbn = vars[next_index].pbn;
- vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.stream_kbps, fec_overhead_multiplier_x1000);
+ vars[next_index].pbn = kbps_to_pbn(params[next_index].bw_range.stream_kbps, true);
ret = drm_dp_atomic_find_time_slots(state,
params[next_index].port->mgr,
params[next_index].port,
@@ -1123,7 +1197,6 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
int count = 0;
int i, k, ret;
bool debugfs_overwrite = false;
- uint16_t fec_overhead_multiplier_x1000 = get_fec_overhead_multiplier(dc_link);
struct drm_connector_state *new_conn_state;
memset(params, 0, sizeof(params));
@@ -1204,7 +1277,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
DRM_DEBUG_DRIVER("MST_DSC Try no compression\n");
for (i = 0; i < count; i++) {
vars[i + k].aconnector = params[i].aconnector;
- vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps, fec_overhead_multiplier_x1000);
+ vars[i + k].pbn = kbps_to_pbn(params[i].bw_range.stream_kbps, false);
vars[i + k].dsc_enabled = false;
vars[i + k].bpp_x16 = 0;
ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr, params[i].port,
@@ -1226,7 +1299,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
DRM_DEBUG_DRIVER("MST_DSC Try max compression\n");
for (i = 0; i < count; i++) {
if (params[i].compression_possible && params[i].clock_force_enable != DSC_CLK_FORCE_DISABLE) {
- vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.min_kbps, fec_overhead_multiplier_x1000);
+ vars[i + k].pbn = kbps_to_pbn(params[i].bw_range.min_kbps, false);
vars[i + k].dsc_enabled = true;
vars[i + k].bpp_x16 = params[i].bw_range.min_target_bpp_x16;
ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr,
@@ -1234,7 +1307,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
if (ret < 0)
return ret;
} else {
- vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps, fec_overhead_multiplier_x1000);
+ vars[i + k].pbn = kbps_to_pbn(params[i].bw_range.stream_kbps, false);
vars[i + k].dsc_enabled = false;
vars[i + k].bpp_x16 = 0;
ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr,
@@ -1617,7 +1690,6 @@ int pre_validate_dsc(struct drm_atomic_state *state,
if (ind >= 0) {
struct drm_connector *connector;
- struct amdgpu_dm_connector *aconnector;
struct drm_connector_state *drm_new_conn_state;
struct dm_connector_state *dm_new_conn_state;
struct dm_crtc_state *dm_old_crtc_state;
@@ -1625,15 +1697,17 @@ int pre_validate_dsc(struct drm_atomic_state *state,
connector =
amdgpu_dm_find_first_crtc_matching_connector(state,
state->crtcs[ind].ptr);
- aconnector = to_amdgpu_dm_connector(connector);
+ if (!connector)
+ continue;
+
drm_new_conn_state =
drm_atomic_get_new_connector_state(state,
- &aconnector->base);
+ connector);
dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
dm_old_crtc_state = to_dm_crtc_state(state->crtcs[ind].old_state);
local_dc_state->streams[i] =
- create_validate_stream_for_sink(aconnector,
+ create_validate_stream_for_sink(connector,
&state->crtcs[ind].new_state->mode,
dm_new_conn_state,
dm_old_crtc_state->stream);
@@ -1688,32 +1762,21 @@ clean_exit:
return ret;
}
-static unsigned int kbps_from_pbn(unsigned int pbn)
-{
- unsigned int kbps = pbn;
-
- kbps *= (1000000 / PEAK_FACTOR_X1000);
- kbps *= 8;
- kbps *= 54;
- kbps /= 64;
-
- return kbps;
-}
-
static bool is_dsc_common_config_possible(struct dc_stream_state *stream,
struct dc_dsc_bw_range *bw_range)
{
struct dc_dsc_policy dsc_policy = {0};
+ bool is_dsc_possible;
dc_dsc_get_policy_for_timing(&stream->timing, 0, &dsc_policy, dc_link_get_highest_encoding_format(stream->link));
- dc_dsc_compute_bandwidth_range(stream->sink->ctx->dc->res_pool->dscs[0],
- stream->sink->ctx->dc->debug.dsc_min_slice_height_override,
- dsc_policy.min_target_bpp * 16,
- dsc_policy.max_target_bpp * 16,
- &stream->sink->dsc_caps.dsc_dec_caps,
- &stream->timing, dc_link_get_highest_encoding_format(stream->link), bw_range);
-
- return bw_range->max_target_bpp_x16 && bw_range->min_target_bpp_x16;
+ is_dsc_possible = dc_dsc_compute_bandwidth_range(stream->sink->ctx->dc->res_pool->dscs[0],
+ stream->sink->ctx->dc->debug.dsc_min_slice_height_override,
+ dsc_policy.min_target_bpp * 16,
+ dsc_policy.max_target_bpp * 16,
+ &stream->sink->dsc_caps.dsc_dec_caps,
+ &stream->timing, dc_link_get_highest_encoding_format(stream->link), bw_range);
+
+ return is_dsc_possible;
}
#endif
@@ -1726,14 +1789,20 @@ static bool dp_get_link_current_set_bw(struct drm_dp_aux *aux, uint32_t *cur_lin
union lane_count_set lane_count;
u8 dp_link_encoding;
u8 link_bw_set = 0;
+ u8 data[16] = {0};
*cur_link_bw = 0;
- if (drm_dp_dpcd_read(aux, DP_MAIN_LINK_CHANNEL_CODING_SET, &dp_link_encoding, 1) != 1 ||
- drm_dp_dpcd_read(aux, DP_LANE_COUNT_SET, &lane_count.raw, 1) != 1 ||
- drm_dp_dpcd_read(aux, DP_LINK_BW_SET, &link_bw_set, 1) != 1)
+ if (drm_dp_dpcd_read(aux, DP_LINK_BW_SET, data, 16) != 16)
return false;
+ dp_link_encoding = data[DP_MAIN_LINK_CHANNEL_CODING_SET - DP_LINK_BW_SET];
+ link_bw_set = data[DP_LINK_BW_SET - DP_LINK_BW_SET];
+ lane_count.raw = data[DP_LANE_COUNT_SET - DP_LINK_BW_SET];
+
+ drm_dbg_dp(aux->drm_dev, "MST_DSC downlink setting: %d, 0x%x x %d\n",
+ dp_link_encoding, link_bw_set, lane_count.bits.LANE_COUNT_SET);
+
switch (dp_link_encoding) {
case DP_8b_10b_ENCODING:
link_rate = link_bw_set;
@@ -1791,7 +1860,7 @@ enum dc_status dm_dp_mst_is_port_support_mode(
dc_link_get_highest_encoding_format(stream->link));
cur_link_settings = stream->link->verified_link_cap;
root_link_bw_in_kbps = dc_link_bandwidth_kbps(aconnector->dc_link, &cur_link_settings);
- virtual_channel_bw_in_kbps = kbps_from_pbn(aconnector->mst_output_port->full_pbn);
+ virtual_channel_bw_in_kbps = pbn_to_kbps(aconnector->mst_output_port->full_pbn, true);
/* pick the end to end bw bottleneck */
end_to_end_bw_in_kbps = min(root_link_bw_in_kbps, virtual_channel_bw_in_kbps);
@@ -1819,10 +1888,21 @@ enum dc_status dm_dp_mst_is_port_support_mode(
struct drm_dp_mst_port *immediate_upstream_port = NULL;
uint32_t end_link_bw = 0;
- /*Get last DP link BW capability*/
- if (dp_get_link_current_set_bw(&aconnector->mst_output_port->aux, &end_link_bw)) {
- if (stream_kbps > end_link_bw) {
- DRM_DEBUG_DRIVER("MST_DSC dsc decode at last link."
+ /*Get last DP link BW capability. Mode shall be supported by Legacy peer*/
+ if (aconnector->mst_output_port->pdt != DP_PEER_DEVICE_DP_LEGACY_CONV &&
+ aconnector->mst_output_port->pdt != DP_PEER_DEVICE_NONE) {
+ if (aconnector->vc_full_pbn != aconnector->mst_output_port->full_pbn) {
+ dp_get_link_current_set_bw(&aconnector->mst_output_port->aux, &end_link_bw);
+ aconnector->vc_full_pbn = aconnector->mst_output_port->full_pbn;
+ aconnector->mst_local_bw = end_link_bw;
+ } else {
+ end_link_bw = aconnector->mst_local_bw;
+ }
+
+ if (end_link_bw > 0 &&
+ stream_kbps > end_link_bw &&
+ aconnector->branch_ieee_oui != DP_BRANCH_DEVICE_ID_90CC24) {
+ DRM_DEBUG_DRIVER("MST_DSC dsc decode at last link. "
"Mode required bw can't fit into last link\n");
return DC_FAIL_BANDWIDTH_VALIDATE;
}
@@ -1833,13 +1913,17 @@ enum dc_status dm_dp_mst_is_port_support_mode(
immediate_upstream_port = aconnector->mst_output_port->parent->port_parent;
if (immediate_upstream_port) {
- virtual_channel_bw_in_kbps = kbps_from_pbn(immediate_upstream_port->full_pbn);
+ virtual_channel_bw_in_kbps = pbn_to_kbps(immediate_upstream_port->full_pbn, true);
virtual_channel_bw_in_kbps = min(root_link_bw_in_kbps, virtual_channel_bw_in_kbps);
- if (bw_range.min_kbps > virtual_channel_bw_in_kbps) {
- DRM_DEBUG_DRIVER("MST_DSC dsc decode at last link."
- "Max dsc compression can't fit into MST available bw\n");
- return DC_FAIL_BANDWIDTH_VALIDATE;
- }
+ } else {
+ /* For topology LCT 1 case - only one mstb*/
+ virtual_channel_bw_in_kbps = root_link_bw_in_kbps;
+ }
+
+ if (bw_range.min_kbps > virtual_channel_bw_in_kbps) {
+ DRM_DEBUG_DRIVER("MST_DSC dsc decode at last link."
+ "Max dsc compression can't fit into MST available bw\n");
+ return DC_FAIL_BANDWIDTH_VALIDATE;
}
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
index 600d6e221011..6f7ea684b555 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: MIT */
/*
* Copyright 2012-15 Advanced Micro Devices, Inc.
*
@@ -59,7 +60,7 @@ enum mst_msg_ready_type {
struct amdgpu_display_manager;
struct amdgpu_dm_connector;
-int dm_mst_get_pbn_divider(struct dc_link *link);
+uint32_t dm_mst_get_pbn_divider(struct dc_link *link);
void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm,
struct amdgpu_dm_connector *aconnector,
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
index 495e3cd70426..2e3ee78999d9 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
@@ -26,6 +26,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_blend.h>
+#include "drm/drm_framebuffer.h"
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
@@ -36,6 +37,7 @@
#include "amdgpu_display.h"
#include "amdgpu_dm_trace.h"
#include "amdgpu_dm_plane.h"
+#include "amdgpu_dm_colorop.h"
#include "gc/gc_11_0_0_offset.h"
#include "gc/gc_11_0_0_sh_mask.h"
@@ -91,9 +93,9 @@ enum dm_micro_swizzle {
MICRO_SWIZZLE_R = 3
};
-const struct drm_format_info *amdgpu_dm_plane_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
+const struct drm_format_info *amdgpu_dm_plane_get_format_info(u32 pixel_format, u64 modifier)
{
- return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
+ return amdgpu_lookup_format_info(pixel_format, modifier);
}
void amdgpu_dm_plane_fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
@@ -145,7 +147,7 @@ static void amdgpu_dm_plane_add_modifier(uint64_t **mods, uint64_t *size, uint64
if (*cap - *size < 1) {
uint64_t new_cap = *cap * 2;
- uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
+ uint64_t *new_mods = kmalloc_array(new_cap, sizeof(uint64_t), GFP_KERNEL);
if (!new_mods) {
kfree(*mods);
@@ -176,7 +178,7 @@ static unsigned int amdgpu_dm_plane_modifier_gfx9_swizzle_mode(uint64_t modifier
return AMD_FMT_MOD_GET(TILE, modifier);
}
-static void amdgpu_dm_plane_fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
+static void amdgpu_dm_plane_fill_gfx8_tiling_info_from_flags(struct dc_tiling_info *tiling_info,
uint64_t tiling_flags)
{
/* Fill GFX8 params */
@@ -189,6 +191,7 @@ static void amdgpu_dm_plane_fill_gfx8_tiling_info_from_flags(union dc_tiling_inf
tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
+ tiling_info->gfxversion = DcGfxVersion8;
/* XXX fix me for VI */
tiling_info->gfx8.num_banks = num_banks;
tiling_info->gfx8.array_mode =
@@ -209,7 +212,7 @@ static void amdgpu_dm_plane_fill_gfx8_tiling_info_from_flags(union dc_tiling_inf
}
static void amdgpu_dm_plane_fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
- union dc_tiling_info *tiling_info)
+ struct dc_tiling_info *tiling_info)
{
/* Fill GFX9 params */
tiling_info->gfx9.num_pipes =
@@ -230,7 +233,7 @@ static void amdgpu_dm_plane_fill_gfx9_tiling_info_from_device(const struct amdgp
}
static void amdgpu_dm_plane_fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
- union dc_tiling_info *tiling_info,
+ struct dc_tiling_info *tiling_info,
uint64_t modifier)
{
unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
@@ -260,7 +263,7 @@ static void amdgpu_dm_plane_fill_gfx9_tiling_info_from_modifier(const struct amd
static int amdgpu_dm_plane_validate_dcc(struct amdgpu_device *adev,
const enum surface_pixel_format format,
const enum dc_rotation_angle rotation,
- const union dc_tiling_info *tiling_info,
+ const struct dc_tiling_info *tiling_info,
const struct dc_plane_dcc_param *dcc,
const struct dc_plane_address *address,
const struct plane_size *plane_size)
@@ -275,8 +278,11 @@ static int amdgpu_dm_plane_validate_dcc(struct amdgpu_device *adev,
if (!dcc->enable)
return 0;
- if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
- !dc->cap_funcs.get_dcc_compression_cap)
+ if (adev->family < AMDGPU_FAMILY_GC_12_0_0 &&
+ format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
+ return -EINVAL;
+
+ if (!dc->cap_funcs.get_dcc_compression_cap)
return -EINVAL;
input.format = format;
@@ -307,18 +313,18 @@ static int amdgpu_dm_plane_fill_gfx9_plane_attributes_from_modifiers(struct amdg
const enum surface_pixel_format format,
const enum dc_rotation_angle rotation,
const struct plane_size *plane_size,
- union dc_tiling_info *tiling_info,
+ struct dc_tiling_info *tiling_info,
struct dc_plane_dcc_param *dcc,
- struct dc_plane_address *address,
- const bool force_disable_dcc)
+ struct dc_plane_address *address)
{
const uint64_t modifier = afb->base.modifier;
int ret = 0;
amdgpu_dm_plane_fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
tiling_info->gfx9.swizzle = amdgpu_dm_plane_modifier_gfx9_swizzle_mode(modifier);
+ tiling_info->gfxversion = DcGfxVersion9;
- if (amdgpu_dm_plane_modifier_has_dcc(modifier) && !force_disable_dcc) {
+ if (amdgpu_dm_plane_modifier_has_dcc(modifier)) {
uint64_t dcc_address = afb->address + afb->base.offsets[1];
bool independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
bool independent_128b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_128B, modifier);
@@ -358,10 +364,9 @@ static int amdgpu_dm_plane_fill_gfx12_plane_attributes_from_modifiers(struct amd
const enum surface_pixel_format format,
const enum dc_rotation_angle rotation,
const struct plane_size *plane_size,
- union dc_tiling_info *tiling_info,
+ struct dc_tiling_info *tiling_info,
struct dc_plane_dcc_param *dcc,
- struct dc_plane_address *address,
- const bool force_disable_dcc)
+ struct dc_plane_address *address)
{
const uint64_t modifier = afb->base.modifier;
int ret = 0;
@@ -370,8 +375,9 @@ static int amdgpu_dm_plane_fill_gfx12_plane_attributes_from_modifiers(struct amd
amdgpu_dm_plane_fill_gfx9_tiling_info_from_device(adev, tiling_info);
tiling_info->gfx9.swizzle = amdgpu_dm_plane_modifier_gfx9_swizzle_mode(modifier);
+ tiling_info->gfxversion = DcGfxAddr3;
- if (amdgpu_dm_plane_modifier_has_dcc(modifier) && !force_disable_dcc) {
+ if (amdgpu_dm_plane_modifier_has_dcc(modifier)) {
int max_compressed_block = AMD_FMT_MOD_GET(DCC_MAX_COMPRESSED_BLOCK, modifier);
dcc->enable = 1;
@@ -695,7 +701,7 @@ static void amdgpu_dm_plane_add_gfx12_modifiers(struct amdgpu_device *adev,
uint64_t mod_4k = ver | AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX12_4K_2D);
uint64_t mod_256b = ver | AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX12_256B_2D);
uint64_t dcc = ver | AMD_FMT_MOD_SET(DCC, 1);
- uint8_t max_comp_block[] = {1, 0};
+ uint8_t max_comp_block[] = {2, 1, 0};
uint64_t max_comp_block_mod[ARRAY_SIZE(max_comp_block)] = {0};
uint8_t i = 0, j = 0;
uint64_t gfx12_modifiers[] = {mod_256k, mod_64k, mod_4k, mod_256b, DRM_FORMAT_MOD_LINEAR};
@@ -727,7 +733,7 @@ static int amdgpu_dm_plane_get_plane_modifiers(struct amdgpu_device *adev, unsig
if (adev->family < AMDGPU_FAMILY_AI)
return 0;
- *mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
+ *mods = kmalloc_array(capacity, sizeof(uint64_t), GFP_KERNEL);
if (plane_type == DRM_PLANE_TYPE_CURSOR) {
amdgpu_dm_plane_add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
@@ -835,12 +841,11 @@ int amdgpu_dm_plane_fill_plane_buffer_attributes(struct amdgpu_device *adev,
const enum surface_pixel_format format,
const enum dc_rotation_angle rotation,
const uint64_t tiling_flags,
- union dc_tiling_info *tiling_info,
+ struct dc_tiling_info *tiling_info,
struct plane_size *plane_size,
struct dc_plane_dcc_param *dcc,
struct dc_plane_address *address,
- bool tmz_surface,
- bool force_disable_dcc)
+ bool tmz_surface)
{
const struct drm_framebuffer *fb = &afb->base;
int ret;
@@ -900,16 +905,14 @@ int amdgpu_dm_plane_fill_plane_buffer_attributes(struct amdgpu_device *adev,
ret = amdgpu_dm_plane_fill_gfx12_plane_attributes_from_modifiers(adev, afb, format,
rotation, plane_size,
tiling_info, dcc,
- address,
- force_disable_dcc);
+ address);
if (ret)
return ret;
} else if (adev->family >= AMDGPU_FAMILY_AI) {
ret = amdgpu_dm_plane_fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
rotation, plane_size,
tiling_info, dcc,
- address,
- force_disable_dcc);
+ address);
if (ret)
return ret;
} else {
@@ -946,13 +949,13 @@ static int amdgpu_dm_plane_helper_prepare_fb(struct drm_plane *plane,
adev = amdgpu_ttm_adev(rbo->tbo.bdev);
r = amdgpu_bo_reserve(rbo, true);
if (r) {
- dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
+ drm_err(adev_to_drm(adev), "fail to reserve bo (%d)\n", r);
return r;
}
r = dma_resv_reserve_fences(rbo->tbo.base.resv, 1);
if (r) {
- dev_err(adev->dev, "reserving fence slot failed (%d)\n", r);
+ drm_err(adev_to_drm(adev), "reserving fence slot failed (%d)\n", r);
goto error_unlock;
}
@@ -1000,14 +1003,13 @@ static int amdgpu_dm_plane_helper_prepare_fb(struct drm_plane *plane,
dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
struct dc_plane_state *plane_state =
dm_plane_state_new->dc_state;
- bool force_disable_dcc = !plane_state->dcc.enable;
amdgpu_dm_plane_fill_plane_buffer_attributes(
adev, afb, plane_state->format, plane_state->rotation,
afb->tiling_flags,
&plane_state->tiling_info, &plane_state->plane_size,
&plane_state->dcc, &plane_state->address,
- afb->tmz_surface, force_disable_dcc);
+ afb->tmz_surface);
}
return 0;
@@ -1260,21 +1262,24 @@ static int amdgpu_dm_plane_atomic_check(struct drm_plane *plane,
}
static int amdgpu_dm_plane_atomic_async_check(struct drm_plane *plane,
- struct drm_atomic_state *state)
+ struct drm_atomic_state *state, bool flip)
{
struct drm_crtc_state *new_crtc_state;
struct drm_plane_state *new_plane_state;
struct dm_crtc_state *dm_new_crtc_state;
- /* Only support async updates on cursor planes. */
- if (plane->type != DRM_PLANE_TYPE_CURSOR)
+ if (flip) {
+ if (plane->type != DRM_PLANE_TYPE_OVERLAY)
+ return -EINVAL;
+ } else if (plane->type != DRM_PLANE_TYPE_CURSOR) {
return -EINVAL;
+ }
new_plane_state = drm_atomic_get_new_plane_state(state, plane);
new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
/* Reject overlay cursors for now*/
- if (dm_new_crtc_state->cursor_mode == DM_CURSOR_OVERLAY_MODE)
+ if (!flip && dm_new_crtc_state->cursor_mode == DM_CURSOR_OVERLAY_MODE)
return -EINVAL;
return 0;
@@ -1421,6 +1426,20 @@ static void amdgpu_dm_plane_atomic_async_update(struct drm_plane *plane,
amdgpu_dm_plane_handle_cursor_update(plane, old_state);
}
+static void amdgpu_dm_plane_panic_flush(struct drm_plane *plane)
+{
+ struct dm_plane_state *dm_plane_state = to_dm_plane_state(plane->state);
+ struct drm_framebuffer *fb = plane->state->fb;
+ struct dc_plane_state *dc_plane_state;
+
+ if (!dm_plane_state || !dm_plane_state->dc_state)
+ return;
+
+ dc_plane_state = dm_plane_state->dc_state;
+
+ dc_plane_force_dcc_and_tiling_disable(dc_plane_state, fb->modifier ? true : false);
+}
+
static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
.prepare_fb = amdgpu_dm_plane_helper_prepare_fb,
.cleanup_fb = amdgpu_dm_plane_helper_cleanup_fb,
@@ -1429,6 +1448,16 @@ static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
.atomic_async_update = amdgpu_dm_plane_atomic_async_update
};
+static const struct drm_plane_helper_funcs dm_primary_plane_helper_funcs = {
+ .prepare_fb = amdgpu_dm_plane_helper_prepare_fb,
+ .cleanup_fb = amdgpu_dm_plane_helper_cleanup_fb,
+ .atomic_check = amdgpu_dm_plane_atomic_check,
+ .atomic_async_check = amdgpu_dm_plane_atomic_async_check,
+ .atomic_async_update = amdgpu_dm_plane_atomic_async_update,
+ .get_scanout_buffer = amdgpu_display_get_scanout_buffer,
+ .panic_flush = amdgpu_dm_plane_panic_flush,
+};
+
static void amdgpu_dm_plane_drm_plane_reset(struct drm_plane *plane)
{
struct dm_plane_state *amdgpu_state = NULL;
@@ -1605,7 +1634,7 @@ dm_atomic_plane_attach_color_mgmt_properties(struct amdgpu_display_manager *dm,
drm_object_attach_property(&plane->base,
dm->adev->mode_info.plane_ctm_property, 0);
- if (dpp_color_caps.hw_3d_lut) {
+ if (dpp_color_caps.hw_3d_lut || dm->dc->caps.color.mpc.preblend) {
drm_object_attach_property(&plane->base,
mode_info.plane_shaper_lut_property, 0);
drm_object_attach_property(&plane->base,
@@ -1754,6 +1783,39 @@ dm_atomic_plane_get_property(struct drm_plane *plane,
return 0;
}
+#else
+
+#define MAX_COLOR_PIPELINES 5
+
+static int
+dm_plane_init_colorops(struct drm_plane *plane)
+{
+ struct drm_prop_enum_list pipelines[MAX_COLOR_PIPELINES];
+ struct drm_device *dev = plane->dev;
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ struct dc *dc = adev->dm.dc;
+ int len = 0;
+ int ret;
+
+ if (plane->type == DRM_PLANE_TYPE_CURSOR)
+ return 0;
+
+ /* initialize pipeline */
+ if (dc->ctx->dce_version >= DCN_VERSION_3_0) {
+ ret = amdgpu_dm_initialize_default_pipeline(plane, &pipelines[len]);
+ if (ret) {
+ drm_err(plane->dev, "Failed to create color pipeline for plane %d: %d\n",
+ plane->base.id, ret);
+ return ret;
+ }
+ len++;
+
+ /* Create COLOR_PIPELINE property and attach */
+ drm_plane_create_color_pipeline_property(plane, pipelines, len);
+ }
+
+ return 0;
+}
#endif
static const struct drm_plane_funcs dm_plane_funcs = {
@@ -1855,11 +1917,19 @@ int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
plane->type != DRM_PLANE_TYPE_CURSOR)
drm_plane_enable_fb_damage_clips(plane);
- drm_plane_helper_add(plane, &dm_plane_helper_funcs);
+ if (plane->type == DRM_PLANE_TYPE_PRIMARY)
+ drm_plane_helper_add(plane, &dm_primary_plane_helper_funcs);
+ else
+ drm_plane_helper_add(plane, &dm_plane_helper_funcs);
#ifdef AMD_PRIVATE_COLOR
dm_atomic_plane_attach_color_mgmt_properties(dm, plane);
+#else
+ res = dm_plane_init_colorops(plane);
+ if (res)
+ return res;
#endif
+
/* Create (reset) the plane state */
if (plane->funcs->reset)
plane->funcs->reset(plane);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h
index 6498359bff6f..ea2619b507db 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h
@@ -47,19 +47,18 @@ int amdgpu_dm_plane_fill_plane_buffer_attributes(struct amdgpu_device *adev,
const enum surface_pixel_format format,
const enum dc_rotation_angle rotation,
const uint64_t tiling_flags,
- union dc_tiling_info *tiling_info,
+ struct dc_tiling_info *tiling_info,
struct plane_size *plane_size,
struct dc_plane_dcc_param *dcc,
struct dc_plane_address *address,
- bool tmz_surface,
- bool force_disable_dcc);
+ bool tmz_surface);
int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
struct drm_plane *plane,
unsigned long possible_crtcs,
const struct dc_plane_cap *plane_cap);
-const struct drm_format_info *amdgpu_dm_plane_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
+const struct drm_format_info *amdgpu_dm_plane_get_format_info(u32 pixel_format, u64 modifier);
void amdgpu_dm_plane_fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
bool *per_pixel_alpha, bool *pre_multiplied_alpha,
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
index 848c5b4bb301..11b2ea6edf95 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: MIT
/*
* Copyright 2018 Advanced Micro Devices, Inc.
*
@@ -97,6 +98,7 @@ bool dm_pp_apply_display_requirements(
const struct dm_pp_single_disp_config *dc_cfg =
&pp_display_cfg->disp_configs[i];
adev->pm.pm_display_cfg.displays[i].controller_id = dc_cfg->pipe_idx + 1;
+ adev->pm.pm_display_cfg.displays[i].pixel_clock = dc_cfg->pixel_clock;
}
amdgpu_dpm_display_configuration_change(adev, &adev->pm.pm_display_cfg);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
index f40240aafe98..fd491b7a3cd7 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: MIT
/*
* Copyright 2021 Advanced Micro Devices, Inc.
*
@@ -26,7 +27,6 @@
#include "amdgpu_dm_psr.h"
#include "dc_dmub_srv.h"
#include "dc.h"
-#include "dm_helpers.h"
#include "amdgpu_dm.h"
#include "modules/power/power_helpers.h"
@@ -54,7 +54,8 @@ static bool link_supports_psrsu(struct dc_link *link)
if (amdgpu_dc_debug_mask & DC_DISABLE_PSR_SU)
return false;
- return dc_dmub_check_min_version(dc->ctx->dmub_srv->dmub);
+ /* Temporarily disable PSR-SU to avoid glitches */
+ return false;
}
/*
@@ -86,14 +87,6 @@ void amdgpu_dm_set_psr_caps(struct dc_link *link)
link->psr_settings.psr_feature_enabled = true;
}
-
- DRM_INFO("PSR support %d, DC PSR ver %d, sink PSR ver %d DPCD caps 0x%x su_y_granularity %d\n",
- link->psr_settings.psr_feature_enabled,
- link->psr_settings.psr_version,
- link->dpcd_caps.psr_info.psr_version,
- link->dpcd_caps.psr_info.psr_dpcd_caps.raw,
- link->dpcd_caps.psr_info.psr2_su_y_granularity_cap);
-
}
/*
@@ -126,8 +119,10 @@ bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
psr_config.allow_multi_disp_optimizations =
(amdgpu_dc_feature_mask & DC_PSR_ALLOW_MULTI_DISP_OPT);
- if (!psr_su_set_dsc_slice_height(dc, link, stream, &psr_config))
- return false;
+ if (link->psr_settings.psr_version == DC_PSR_VERSION_SU_1) {
+ if (!psr_su_set_dsc_slice_height(dc, link, stream, &psr_config))
+ return false;
+ }
ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
@@ -201,14 +196,13 @@ void amdgpu_dm_psr_enable(struct dc_stream_state *stream)
*
* Return: true if success
*/
-bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
+bool amdgpu_dm_psr_disable(struct dc_stream_state *stream, bool wait)
{
- unsigned int power_opt = 0;
bool psr_enable = false;
DRM_DEBUG_DRIVER("Disabling psr...\n");
- return dc_link_set_psr_allow_active(stream->link, &psr_enable, true, false, &power_opt);
+ return dc_link_set_psr_allow_active(stream->link, &psr_enable, wait, false, NULL);
}
/*
@@ -251,3 +245,33 @@ bool amdgpu_dm_psr_is_active_allowed(struct amdgpu_display_manager *dm)
return allow_active;
}
+
+/**
+ * amdgpu_dm_psr_wait_disable() - Wait for eDP panel to exit PSR
+ * @stream: stream state attached to the eDP link
+ *
+ * Waits for a max of 500ms for the eDP panel to exit PSR.
+ *
+ * Return: true if panel exited PSR, false otherwise.
+ */
+bool amdgpu_dm_psr_wait_disable(struct dc_stream_state *stream)
+{
+ enum dc_psr_state psr_state = PSR_STATE0;
+ struct dc_link *link = stream->link;
+ int retry_count;
+
+ if (link == NULL)
+ return false;
+
+ for (retry_count = 0; retry_count <= 1000; retry_count++) {
+ dc_link_get_psr_state(link, &psr_state);
+ if (psr_state == PSR_STATE0)
+ break;
+ udelay(500);
+ }
+
+ if (retry_count == 1000)
+ return false;
+
+ return true;
+}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.h
index cd2d45c2b5ef..4fb8626913cf 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: MIT */
/*
* Copyright 2021 Advanced Micro Devices, Inc.
*
@@ -34,8 +35,9 @@
void amdgpu_dm_set_psr_caps(struct dc_link *link);
void amdgpu_dm_psr_enable(struct dc_stream_state *stream);
bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
-bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
+bool amdgpu_dm_psr_disable(struct dc_stream_state *stream, bool wait);
bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm);
bool amdgpu_dm_psr_is_active_allowed(struct amdgpu_display_manager *dm);
+bool amdgpu_dm_psr_wait_disable(struct dc_stream_state *stream);
#endif /* AMDGPU_DM_AMDGPU_DM_PSR_H_ */
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_quirks.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_quirks.c
new file mode 100644
index 000000000000..1da07ebf9217
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_quirks.c
@@ -0,0 +1,178 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include <linux/dmi.h>
+
+#include "amdgpu.h"
+#include "amdgpu_dm.h"
+
+struct amdgpu_dm_quirks {
+ bool aux_hpd_discon;
+ bool support_edp0_on_dp1;
+};
+
+static struct amdgpu_dm_quirks quirk_entries = {
+ .aux_hpd_discon = false,
+ .support_edp0_on_dp1 = false
+};
+
+static int edp0_on_dp1_callback(const struct dmi_system_id *id)
+{
+ quirk_entries.support_edp0_on_dp1 = true;
+ return 0;
+}
+
+static int aux_hpd_discon_callback(const struct dmi_system_id *id)
+{
+ quirk_entries.aux_hpd_discon = true;
+ return 0;
+}
+
+static const struct dmi_system_id dmi_quirk_table[] = {
+ {
+ .callback = aux_hpd_discon_callback,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3660"),
+ },
+ },
+ {
+ .callback = aux_hpd_discon_callback,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3260"),
+ },
+ },
+ {
+ .callback = aux_hpd_discon_callback,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3460"),
+ },
+ },
+ {
+ .callback = aux_hpd_discon_callback,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Tower Plus 7010"),
+ },
+ },
+ {
+ .callback = aux_hpd_discon_callback,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Tower 7010"),
+ },
+ },
+ {
+ .callback = aux_hpd_discon_callback,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex SFF Plus 7010"),
+ },
+ },
+ {
+ .callback = aux_hpd_discon_callback,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex SFF 7010"),
+ },
+ },
+ {
+ .callback = aux_hpd_discon_callback,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Micro Plus 7010"),
+ },
+ },
+ {
+ .callback = aux_hpd_discon_callback,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Micro 7010"),
+ },
+ },
+ {
+ .callback = edp0_on_dp1_callback,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "HP"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP Elite mt645 G8 Mobile Thin Client"),
+ },
+ },
+ {
+ .callback = edp0_on_dp1_callback,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "HP"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook 645 14 inch G11 Notebook PC"),
+ },
+ },
+ {
+ .callback = edp0_on_dp1_callback,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "HP"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook 665 16 inch G11 Notebook PC"),
+ },
+ },
+ {
+ .callback = edp0_on_dp1_callback,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "HP"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP ProBook 445 14 inch G11 Notebook PC"),
+ },
+ },
+ {
+ .callback = edp0_on_dp1_callback,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "HP"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP ProBook 465 16 inch G11 Notebook PC"),
+ },
+ },
+ {}
+ /* TODO: refactor this from a fixed table to a dynamic option */
+};
+
+void retrieve_dmi_info(struct amdgpu_display_manager *dm)
+{
+ struct drm_device *dev = dm->ddev;
+ int dmi_id;
+
+ dm->aux_hpd_discon_quirk = false;
+ dm->edp0_on_dp1_quirk = false;
+
+ dmi_id = dmi_check_system(dmi_quirk_table);
+
+ if (!dmi_id)
+ return;
+
+ if (quirk_entries.aux_hpd_discon) {
+ dm->aux_hpd_discon_quirk = true;
+ drm_info(dev, "aux_hpd_discon_quirk attached\n");
+ }
+ if (quirk_entries.support_edp0_on_dp1) {
+ dm->edp0_on_dp1_quirk = true;
+ drm_info(dev, "support_edp0_on_dp1 attached\n");
+ }
+}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.c
index 41f07f13a7b5..da94e3544b65 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: MIT
/*
* Copyright 2023 Advanced Micro Devices, Inc.
*
@@ -30,7 +31,7 @@
#include "amdgpu_dm.h"
#include "modules/power/power_helpers.h"
#include "dmub/inc/dmub_cmd.h"
-#include "dc/inc/link.h"
+#include "dc/inc/link_service.h"
/*
* amdgpu_dm_link_supports_replay() - check if the link supports replay
@@ -161,7 +162,7 @@ bool amdgpu_dm_replay_enable(struct dc_stream_state *stream, bool wait)
if (link) {
link->dc->link_srv->edp_setup_replay(link, stream);
- link->dc->link_srv->edp_set_coasting_vtotal(link, stream->timing.v_total);
+ link->dc->link_srv->edp_set_coasting_vtotal(link, stream->timing.v_total, 0);
DRM_DEBUG_DRIVER("Enabling replay...\n");
link->dc->link_srv->edp_set_replay_allow_active(link, &replay_active, wait, false, NULL);
return true;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.h
index 8126bdb1eb6b..73b6c67ae5e7 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: MIT */
/*
* Copyright 2021 Advanced Micro Devices, Inc.
*
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
index 0005f5f8f34f..8550d5e8b753 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
@@ -1,3 +1,4 @@
+//SPDX-License-Identifier: MIT
/*
* Copyright 2015 Advanced Micro Devices, Inc.
*
@@ -52,11 +53,11 @@ void dm_perf_trace_timestamp(const char *func_name, unsigned int line, struct dc
func_name, line);
}
-void dm_trace_smu_msg(uint32_t msg_id, uint32_t param_in, struct dc_context *ctx)
+void dm_trace_smu_enter(uint32_t msg_id, uint32_t param_in, unsigned int delay, struct dc_context *ctx)
{
}
-void dm_trace_smu_delay(uint32_t delay, struct dc_context *ctx)
+void dm_trace_smu_exit(bool success, uint32_t response, struct dc_context *ctx)
{
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h
index 4686d4b0cbad..aa56fd6d56c3 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h
@@ -1,3 +1,4 @@
+//SPDX-License-Identifier: MIT
/*
* Copyright 2018 Advanced Micro Devices, Inc.
*
@@ -726,6 +727,32 @@ TRACE_EVENT(dcn_optc_lock_unlock_state,
)
);
+TRACE_EVENT(amdgpu_dm_brightness,
+ TP_PROTO(void *function, u32 user_brightness, u32 converted_brightness, bool aux, bool ac),
+ TP_ARGS(function, user_brightness, converted_brightness, aux, ac),
+ TP_STRUCT__entry(
+ __field(void *, function)
+ __field(u32, user_brightness)
+ __field(u32, converted_brightness)
+ __field(bool, aux)
+ __field(bool, ac)
+ ),
+ TP_fast_assign(
+ __entry->function = function;
+ __entry->user_brightness = user_brightness;
+ __entry->converted_brightness = converted_brightness;
+ __entry->aux = aux;
+ __entry->ac = ac;
+ ),
+ TP_printk("%ps: brightness requested=%u converted=%u aux=%s power=%s",
+ (void *)__entry->function,
+ (u32)__entry->user_brightness,
+ (u32)__entry->converted_brightness,
+ (__entry->aux) ? "true" : "false",
+ (__entry->ac) ? "AC" : "DC"
+ )
+);
+
#endif /* _AMDGPU_DM_TRACE_H_ */
#undef TRACE_INCLUDE_PATH
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.c
index 0d5fefb0f591..d9527c05fc87 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.c
@@ -102,13 +102,13 @@ static int amdgpu_dm_wb_prepare_job(struct drm_writeback_connector *wb_connector
r = amdgpu_bo_reserve(rbo, true);
if (r) {
- dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
+ drm_err(adev_to_drm(adev), "fail to reserve bo (%d)\n", r);
return r;
}
r = dma_resv_reserve_fences(rbo->tbo.base.resv, 1);
if (r) {
- dev_err(adev->dev, "reserving fence slot failed (%d)\n", r);
+ drm_err(adev_to_drm(adev), "reserving fence slot failed (%d)\n", r);
goto error_unlock;
}