diff options
Diffstat (limited to 'drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c')
-rw-r--r-- | drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 3337 |
1 files changed, 2321 insertions, 1016 deletions
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 2851719d7121..0b8ac9edc070 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -77,9 +77,10 @@ #include <linux/types.h> #include <linux/pm_runtime.h> #include <linux/pci.h> +#include <linux/power_supply.h> #include <linux/firmware.h> #include <linux/component.h> -#include <linux/dmi.h> +#include <linux/sort.h> #include <drm/display/drm_dp_mst_helper.h> #include <drm/display/drm_hdmi_helper.h> @@ -91,10 +92,12 @@ #include <drm/drm_fourcc.h> #include <drm/drm_edid.h> #include <drm/drm_eld.h> +#include <drm/drm_utils.h> #include <drm/drm_vblank.h> #include <drm/drm_audio_component.h> #include <drm/drm_gem_atomic_helper.h> +#include <media/cec-notifier.h> #include <acpi/video.h> #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h" @@ -111,6 +114,8 @@ #include "modules/inc/mod_freesync.h" #include "modules/power/power_helpers.h" +static_assert(AMDGPU_DMUB_NOTIFICATION_MAX == DMUB_NOTIFICATION_MAX, "AMDGPU_DMUB_NOTIFICATION_MAX mismatch"); + #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin" MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB); #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin" @@ -148,6 +153,15 @@ MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU); #define FIRMWARE_DCN_35_DMUB "amdgpu/dcn_3_5_dmcub.bin" MODULE_FIRMWARE(FIRMWARE_DCN_35_DMUB); +#define FIRMWARE_DCN_351_DMUB "amdgpu/dcn_3_5_1_dmcub.bin" +MODULE_FIRMWARE(FIRMWARE_DCN_351_DMUB); + +#define FIRMWARE_DCN_36_DMUB "amdgpu/dcn_3_6_dmcub.bin" +MODULE_FIRMWARE(FIRMWARE_DCN_36_DMUB); + +#define FIRMWARE_DCN_401_DMUB "amdgpu/dcn_4_0_1_dmcub.bin" +MODULE_FIRMWARE(FIRMWARE_DCN_401_DMUB); + /* Number of bytes in PSP header for firmware. */ #define PSP_HEADER_BYTES 0x100 @@ -168,6 +182,9 @@ MODULE_FIRMWARE(FIRMWARE_DCN_35_DMUB); static int amdgpu_dm_init(struct amdgpu_device *adev); static void amdgpu_dm_fini(struct amdgpu_device *adev); static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector); +static void reset_freesync_config_for_crtc(struct dm_crtc_state *new_crtc_state); +static struct amdgpu_i2c_adapter * +create_i2c(struct ddc_service *ddc_service, bool oem); static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link) { @@ -234,6 +251,10 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector); static void handle_hpd_rx_irq(void *param); +static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm, + int bl_idx, + u32 user_brightness); + static bool is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state, struct drm_crtc_state *new_crtc_state); @@ -260,7 +281,7 @@ static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc) acrtc = adev->mode_info.crtcs[crtc]; if (!acrtc->dm_irq_params.stream) { - DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n", + drm_err(adev_to_drm(adev), "dc_stream_state is NULL for crtc '%d'!\n", crtc); return 0; } @@ -271,7 +292,7 @@ static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc) static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc, u32 *vbl, u32 *position) { - u32 v_blank_start, v_blank_end, h_position, v_position; + u32 v_blank_start = 0, v_blank_end = 0, h_position = 0, v_position = 0; struct amdgpu_crtc *acrtc = NULL; struct dc *dc = adev->dm.dc; @@ -281,7 +302,7 @@ static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc, acrtc = adev->mode_info.crtcs[crtc]; if (!acrtc->dm_irq_params.stream) { - DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n", + drm_err(adev_to_drm(adev), "dc_stream_state is NULL for crtc '%d'!\n", crtc); return 0; } @@ -305,24 +326,24 @@ static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc, return 0; } -static bool dm_is_idle(void *handle) +static bool dm_is_idle(struct amdgpu_ip_block *ip_block) { /* XXX todo */ return true; } -static int dm_wait_for_idle(void *handle) +static int dm_wait_for_idle(struct amdgpu_ip_block *ip_block) { /* XXX todo */ return 0; } -static bool dm_check_soft_reset(void *handle) +static bool dm_check_soft_reset(struct amdgpu_ip_block *ip_block) { return false; } -static int dm_soft_reset(void *handle) +static int dm_soft_reset(struct amdgpu_ip_block *ip_block) { /* XXX todo */ return 0; @@ -352,6 +373,8 @@ get_crtc_by_otg_inst(struct amdgpu_device *adev, static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state, struct dm_crtc_state *new_state) { + if (new_state->stream->adjust.timing_adjust_pending) + return true; if (new_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED) return true; else if (amdgpu_dm_crtc_vrr_active(old_state) != amdgpu_dm_crtc_vrr_active(new_state)) @@ -360,13 +383,18 @@ static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state, return false; } -static inline void reverse_planes_order(struct dc_surface_update *array_of_surface_update, - int planes_count) +/* + * DC will program planes with their z-order determined by their ordering + * in the dc_surface_updates array. This comparator is used to sort them + * by descending zpos. + */ +static int dm_plane_layer_index_cmp(const void *a, const void *b) { - int i, j; + const struct dc_surface_update *sa = (struct dc_surface_update *)a; + const struct dc_surface_update *sb = (struct dc_surface_update *)b; - for (i = 0, j = planes_count - 1; i < j; i++, j--) - swap(array_of_surface_update[i], array_of_surface_update[j]); + /* Sort by descending dc_plane layer_index (i.e. normalized_zpos) */ + return sb->surface->layer_index - sa->surface->layer_index; } /** @@ -393,7 +421,8 @@ static inline bool update_planes_and_stream_adapter(struct dc *dc, struct dc_stream_update *stream_update, struct dc_surface_update *array_of_surface_update) { - reverse_planes_order(array_of_surface_update, planes_count); + sort(array_of_surface_update, planes_count, + sizeof(*array_of_surface_update), dm_plane_layer_index_cmp, NULL); /* * Previous frame finished and HW is ready for optimization. @@ -528,7 +557,7 @@ static void dm_vupdate_high_irq(void *interrupt_params) if (acrtc) { vrr_active = amdgpu_dm_crtc_vrr_active_irq(acrtc); drm_dev = acrtc->base.dev; - vblank = &drm_dev->vblank[acrtc->base.index]; + vblank = drm_crtc_vblank_crtc(&acrtc->base); previous_timestamp = atomic64_read(&irq_params->previous_timestamp); frame_duration_ns = vblank->time - previous_timestamp; @@ -591,12 +620,14 @@ static void dm_crtc_high_irq(void *interrupt_params) if (!acrtc) return; - if (acrtc->wb_pending) { - if (acrtc->wb_conn) { - spin_lock_irqsave(&acrtc->wb_conn->job_lock, flags); + if (acrtc->wb_conn) { + spin_lock_irqsave(&acrtc->wb_conn->job_lock, flags); + + if (acrtc->wb_pending) { job = list_first_entry_or_null(&acrtc->wb_conn->job_queue, struct drm_writeback_job, list_entry); + acrtc->wb_pending = false; spin_unlock_irqrestore(&acrtc->wb_conn->job_lock, flags); if (job) { @@ -614,8 +645,7 @@ static void dm_crtc_high_irq(void *interrupt_params) acrtc->dm_irq_params.stream, 0); } } else - DRM_ERROR("%s: no amdgpu_crtc wb_conn\n", __func__); - acrtc->wb_pending = false; + spin_unlock_irqrestore(&acrtc->wb_conn->job_lock, flags); } vrr_active = amdgpu_dm_crtc_vrr_active_irq(acrtc); @@ -722,6 +752,29 @@ static void dmub_aux_setconfig_callback(struct amdgpu_device *adev, complete(&adev->dm.dmub_aux_transfer_done); } +static void dmub_aux_fused_io_callback(struct amdgpu_device *adev, + struct dmub_notification *notify) +{ + if (!adev || !notify) { + ASSERT(false); + return; + } + + const struct dmub_cmd_fused_request *req = ¬ify->fused_request; + const uint8_t ddc_line = req->u.aux.ddc_line; + + if (ddc_line >= ARRAY_SIZE(adev->dm.fused_io)) { + ASSERT(false); + return; + } + + struct fused_io_sync *sync = &adev->dm.fused_io[ddc_line]; + + static_assert(sizeof(*req) <= sizeof(sync->reply_data), "Size mismatch"); + memcpy(sync->reply_data, req, sizeof(*req)); + complete(&sync->replied); +} + /** * dmub_hpd_callback - DMUB HPD interrupt processing callback. * @adev: amdgpu_device pointer @@ -745,12 +798,18 @@ static void dmub_hpd_callback(struct amdgpu_device *adev, return; if (notify == NULL) { - DRM_ERROR("DMUB HPD callback notification was NULL"); + drm_err(adev_to_drm(adev), "DMUB HPD callback notification was NULL"); return; } if (notify->link_index > adev->dm.dc->link_count) { - DRM_ERROR("DMUB HPD index (%u)is abnormal", notify->link_index); + drm_err(adev_to_drm(adev), "DMUB HPD index (%u)is abnormal", notify->link_index); + return; + } + + /* Skip DMUB HPD IRQ in suspend/resume. We will probe them later. */ + if (notify->type == DMUB_NOTIFICATION_HPD && adev->in_suspend) { + drm_info(adev_to_drm(adev), "Skip DMUB HPD IRQ callback in suspend/resume\n"); return; } @@ -767,11 +826,11 @@ static void dmub_hpd_callback(struct amdgpu_device *adev, aconnector = to_amdgpu_dm_connector(connector); if (link && aconnector->dc_link == link) { if (notify->type == DMUB_NOTIFICATION_HPD) - DRM_INFO("DMUB HPD callback: link_index=%u\n", link_index); + drm_info(adev_to_drm(adev), "DMUB HPD IRQ callback: link_index=%u\n", link_index); else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ) - DRM_INFO("DMUB HPD IRQ callback: link_index=%u\n", link_index); + drm_info(adev_to_drm(adev), "DMUB HPD RX IRQ callback: link_index=%u\n", link_index); else - DRM_WARN("DMUB Unknown HPD callback type %d, link_index=%u\n", + drm_warn(adev_to_drm(adev), "DMUB Unknown HPD callback type %d, link_index=%u\n", notify->type, link_index); hpd_aconnector = aconnector; @@ -781,14 +840,31 @@ static void dmub_hpd_callback(struct amdgpu_device *adev, drm_connector_list_iter_end(&iter); if (hpd_aconnector) { - if (notify->type == DMUB_NOTIFICATION_HPD) + if (notify->type == DMUB_NOTIFICATION_HPD) { + if (hpd_aconnector->dc_link->hpd_status == (notify->hpd_status == DP_HPD_PLUG)) + drm_warn(adev_to_drm(adev), "DMUB reported hpd status unchanged. link_index=%u\n", link_index); handle_hpd_irq_helper(hpd_aconnector); - else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ) + } else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ) { handle_hpd_rx_irq(hpd_aconnector); + } } } /** + * dmub_hpd_sense_callback - DMUB HPD sense processing callback. + * @adev: amdgpu_device pointer + * @notify: dmub notification structure + * + * HPD sense changes can occur during low power states and need to be + * notified from firmware to driver. + */ +static void dmub_hpd_sense_callback(struct amdgpu_device *adev, + struct dmub_notification *notify) +{ + drm_dbg_driver(adev_to_drm(adev), "DMUB HPD SENSE callback.\n"); +} + +/** * register_dmub_notify_callback - Sets callback for DMUB notify * @adev: amdgpu_device pointer * @type: Type of dmub notification @@ -821,7 +897,7 @@ static void dm_handle_hpd_work(struct work_struct *work) dmub_hpd_wrk = container_of(work, struct dmub_hpd_work, handle_hpd_work); if (!dmub_hpd_wrk->dmub_notify) { - DRM_ERROR("dmub_hpd_wrk dmub_notify is NULL"); + drm_err(adev_to_drm(dmub_hpd_wrk->adev), "dmub_hpd_wrk dmub_notify is NULL"); return; } @@ -835,6 +911,30 @@ static void dm_handle_hpd_work(struct work_struct *work) } +static const char *dmub_notification_type_str(enum dmub_notification_type e) +{ + switch (e) { + case DMUB_NOTIFICATION_NO_DATA: + return "NO_DATA"; + case DMUB_NOTIFICATION_AUX_REPLY: + return "AUX_REPLY"; + case DMUB_NOTIFICATION_HPD: + return "HPD"; + case DMUB_NOTIFICATION_HPD_IRQ: + return "HPD_IRQ"; + case DMUB_NOTIFICATION_SET_CONFIG_REPLY: + return "SET_CONFIG_REPLY"; + case DMUB_NOTIFICATION_DPIA_NOTIFICATION: + return "DPIA_NOTIFICATION"; + case DMUB_NOTIFICATION_HPD_SENSE_NOTIFY: + return "HPD_SENSE_NOTIFY"; + case DMUB_NOTIFICATION_FUSED_IO: + return "FUSED_IO"; + default: + return "<unknown>"; + } +} + #define DMUB_TRACE_MAX_READ 64 /** * dm_dmub_outbox1_low_irq() - Handles Outbox interrupt @@ -845,14 +945,30 @@ static void dm_handle_hpd_work(struct work_struct *work) */ static void dm_dmub_outbox1_low_irq(void *interrupt_params) { - struct dmub_notification notify; + struct dmub_notification notify = {0}; struct common_irq_params *irq_params = interrupt_params; struct amdgpu_device *adev = irq_params->adev; struct amdgpu_display_manager *dm = &adev->dm; struct dmcub_trace_buf_entry entry = { 0 }; u32 count = 0; struct dmub_hpd_work *dmub_hpd_wrk; - struct dc_link *plink = NULL; + + do { + if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) { + trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count, + entry.param0, entry.param1); + + drm_dbg_driver(adev_to_drm(adev), "trace_code:%u, tick_count:%u, param0:%u, param1:%u\n", + entry.trace_code, entry.tick_count, entry.param0, entry.param1); + } else + break; + + count++; + + } while (count <= DMUB_TRACE_MAX_READ); + + if (count > DMUB_TRACE_MAX_READ) + drm_dbg_driver(adev_to_drm(adev), "Warning : count > DMUB_TRACE_MAX_READ"); if (dc_enable_dmub_notifications(adev->dm.dc) && irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) { @@ -860,75 +976,51 @@ static void dm_dmub_outbox1_low_irq(void *interrupt_params) do { dc_stat_get_dmub_notification(adev->dm.dc, ¬ify); if (notify.type >= ARRAY_SIZE(dm->dmub_thread_offload)) { - DRM_ERROR("DM: notify type %d invalid!", notify.type); + drm_err(adev_to_drm(adev), "DM: notify type %d invalid!", notify.type); continue; } if (!dm->dmub_callback[notify.type]) { - DRM_DEBUG_DRIVER("DMUB notification skipped, no handler: type=%d\n", notify.type); + drm_warn(adev_to_drm(adev), "DMUB notification skipped due to no handler: type=%s\n", + dmub_notification_type_str(notify.type)); continue; } if (dm->dmub_thread_offload[notify.type] == true) { dmub_hpd_wrk = kzalloc(sizeof(*dmub_hpd_wrk), GFP_ATOMIC); if (!dmub_hpd_wrk) { - DRM_ERROR("Failed to allocate dmub_hpd_wrk"); + drm_err(adev_to_drm(adev), "Failed to allocate dmub_hpd_wrk"); return; } dmub_hpd_wrk->dmub_notify = kmemdup(¬ify, sizeof(struct dmub_notification), GFP_ATOMIC); if (!dmub_hpd_wrk->dmub_notify) { kfree(dmub_hpd_wrk); - DRM_ERROR("Failed to allocate dmub_hpd_wrk->dmub_notify"); + drm_err(adev_to_drm(adev), "Failed to allocate dmub_hpd_wrk->dmub_notify"); return; } INIT_WORK(&dmub_hpd_wrk->handle_hpd_work, dm_handle_hpd_work); dmub_hpd_wrk->adev = adev; - if (notify.type == DMUB_NOTIFICATION_HPD) { - plink = adev->dm.dc->links[notify.link_index]; - if (plink) { - plink->hpd_status = - notify.hpd_status == DP_HPD_PLUG; - } - } queue_work(adev->dm.delayed_hpd_wq, &dmub_hpd_wrk->handle_hpd_work); } else { dm->dmub_callback[notify.type](adev, ¬ify); } } while (notify.pending_notification); } - - - do { - if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) { - trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count, - entry.param0, entry.param1); - - DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n", - entry.trace_code, entry.tick_count, entry.param0, entry.param1); - } else - break; - - count++; - - } while (count <= DMUB_TRACE_MAX_READ); - - if (count > DMUB_TRACE_MAX_READ) - DRM_DEBUG_DRIVER("Warning : count > DMUB_TRACE_MAX_READ"); } -static int dm_set_clockgating_state(void *handle, +static int dm_set_clockgating_state(struct amdgpu_ip_block *ip_block, enum amd_clockgating_state state) { return 0; } -static int dm_set_powergating_state(void *handle, +static int dm_set_powergating_state(struct amdgpu_ip_block *ip_block, enum amd_powergating_state state) { return 0; } /* Prototypes of private functions */ -static int dm_early_init(void *handle); +static int dm_early_init(struct amdgpu_ip_block *ip_block); /* Allocate memory for FBC compressed data */ static void amdgpu_dm_fbc_init(struct drm_connector *connector) @@ -950,8 +1042,8 @@ static void amdgpu_dm_fbc_init(struct drm_connector *connector) list_for_each_entry(mode, &connector->modes, head) { - if (max_size < mode->htotal * mode->vtotal) - max_size = mode->htotal * mode->vtotal; + if (max_size < (unsigned long) mode->htotal * mode->vtotal) + max_size = (unsigned long) mode->htotal * mode->vtotal; } if (max_size) { @@ -960,10 +1052,10 @@ static void amdgpu_dm_fbc_init(struct drm_connector *connector) &compressor->gpu_addr, &compressor->cpu_addr); if (r) - DRM_ERROR("DM: Failed to initialize FBC\n"); + drm_err(adev_to_drm(adev), "DM: Failed to initialize FBC\n"); else { adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr; - DRM_INFO("DM: FBC alloc %lu\n", max_size*4); + drm_info(adev_to_drm(adev), "DM: FBC alloc %lu\n", max_size*4); } } @@ -996,8 +1088,10 @@ static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port, continue; *enabled = true; + mutex_lock(&connector->eld_mutex); ret = drm_eld_size(connector->eld); memcpy(buf, connector->eld, min(max_bytes, ret)); + mutex_unlock(&connector->eld_mutex); break; } @@ -1126,13 +1220,13 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev) return 0; if (!fb_info) { - DRM_ERROR("No framebuffer info for DMUB service.\n"); + drm_err(adev_to_drm(adev), "No framebuffer info for DMUB service.\n"); return -EINVAL; } if (!dmub_fw) { /* Firmware required for DMUB support. */ - DRM_ERROR("No firmware provided for DMUB.\n"); + drm_err(adev_to_drm(adev), "No firmware provided for DMUB.\n"); return -EINVAL; } @@ -1142,19 +1236,19 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev) status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support); if (status != DMUB_STATUS_OK) { - DRM_ERROR("Error checking HW support for DMUB: %d\n", status); + drm_err(adev_to_drm(adev), "Error checking HW support for DMUB: %d\n", status); return -EINVAL; } if (!has_hw_support) { - DRM_INFO("DMUB unsupported on ASIC\n"); + drm_info(adev_to_drm(adev), "DMUB unsupported on ASIC\n"); return 0; } /* Reset DMCUB if it was previously running - before we overwrite its memory. */ status = dmub_srv_hw_reset(dmub_srv); if (status != DMUB_STATUS_OK) - DRM_WARN("Error resetting DMUB HW: %d\n", status); + drm_warn(adev_to_drm(adev), "Error resetting DMUB HW: %d\n", status); hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data; @@ -1200,6 +1294,9 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev) memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0, fb_info->fb[DMUB_WINDOW_6_FW_STATE].size); + memset(fb_info->fb[DMUB_WINDOW_SHARED_STATE].cpu_addr, 0, + fb_info->fb[DMUB_WINDOW_SHARED_STATE].size); + /* Initialize hardware. */ memset(&hw_params, 0, sizeof(hw_params)); hw_params.fb_base = adev->gmc.fb_start; @@ -1220,6 +1317,8 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev) case IP_VERSION(3, 1, 4): case IP_VERSION(3, 5, 0): case IP_VERSION(3, 5, 1): + case IP_VERSION(3, 6, 0): + case IP_VERSION(4, 0, 1): hw_params.dpia_supported = true; hw_params.disable_dpia = adev->dm.dc->debug.dpia_debug.bits.disable_dpia; break; @@ -1227,16 +1326,27 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev) break; } + switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { + case IP_VERSION(3, 5, 0): + case IP_VERSION(3, 5, 1): + case IP_VERSION(3, 6, 0): + hw_params.ips_sequential_ono = adev->external_rev_id > 0x10; + hw_params.lower_hbr3_phy_ssc = true; + break; + default: + break; + } + status = dmub_srv_hw_init(dmub_srv, &hw_params); if (status != DMUB_STATUS_OK) { - DRM_ERROR("Error initializing DMUB HW: %d\n", status); + drm_err(adev_to_drm(adev), "Error initializing DMUB HW: %d\n", status); return -EINVAL; } /* Wait for firmware load to finish. */ status = dmub_srv_wait_for_auto_load(dmub_srv, 100000); if (status != DMUB_STATUS_OK) - DRM_WARN("Wait for DMUB auto-load failed: %d\n", status); + drm_warn(adev_to_drm(adev), "Wait for DMUB auto-load failed: %d\n", status); /* Init DMCU and ABM if available. */ if (dmcu && abm) { @@ -1247,13 +1357,36 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev) if (!adev->dm.dc->ctx->dmub_srv) adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv); if (!adev->dm.dc->ctx->dmub_srv) { - DRM_ERROR("Couldn't allocate DC DMUB server!\n"); + drm_err(adev_to_drm(adev), "Couldn't allocate DC DMUB server!\n"); return -ENOMEM; } - DRM_INFO("DMUB hardware initialized: version=0x%08X\n", + drm_info(adev_to_drm(adev), "DMUB hardware initialized: version=0x%08X\n", adev->dm.dmcub_fw_version); + /* Keeping sanity checks off if + * DCN31 >= 4.0.59.0 + * DCN314 >= 8.0.16.0 + * Otherwise, turn on sanity checks + */ + switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { + case IP_VERSION(3, 1, 2): + case IP_VERSION(3, 1, 3): + if (adev->dm.dmcub_fw_version && + adev->dm.dmcub_fw_version >= DMUB_FW_VERSION(4, 0, 0) && + adev->dm.dmcub_fw_version < DMUB_FW_VERSION(4, 0, 59)) + adev->dm.dc->debug.sanity_checks = true; + break; + case IP_VERSION(3, 1, 4): + if (adev->dm.dmcub_fw_version && + adev->dm.dmcub_fw_version >= DMUB_FW_VERSION(4, 0, 0) && + adev->dm.dmcub_fw_version < DMUB_FW_VERSION(8, 0, 16)) + adev->dm.dc->debug.sanity_checks = true; + break; + default: + break; + } + return 0; } @@ -1262,6 +1395,7 @@ static void dm_dmub_hw_resume(struct amdgpu_device *adev) struct dmub_srv *dmub_srv = adev->dm.dmub_srv; enum dmub_status status; bool init; + int r; if (!dmub_srv) { /* DMUB isn't supported on the ASIC. */ @@ -1270,16 +1404,18 @@ static void dm_dmub_hw_resume(struct amdgpu_device *adev) status = dmub_srv_is_hw_init(dmub_srv, &init); if (status != DMUB_STATUS_OK) - DRM_WARN("DMUB hardware init check failed: %d\n", status); + drm_warn(adev_to_drm(adev), "DMUB hardware init check failed: %d\n", status); if (status == DMUB_STATUS_OK && init) { /* Wait for firmware load to finish. */ status = dmub_srv_wait_for_auto_load(dmub_srv, 100000); if (status != DMUB_STATUS_OK) - DRM_WARN("Wait for DMUB auto-load failed: %d\n", status); + drm_warn(adev_to_drm(adev), "Wait for DMUB auto-load failed: %d\n", status); } else { /* Perform the full hardware initialization. */ - dm_dmub_hw_init(adev); + r = dm_dmub_hw_init(adev); + if (r) + drm_err(adev_to_drm(adev), "DMUB interface failed to initialize: status=%d\n", r); } } @@ -1389,18 +1525,18 @@ static void dm_handle_hpd_rx_offload_work(struct work_struct *work) offload_work = container_of(work, struct hpd_rx_irq_offload_work, work); aconnector = offload_work->offload_wq->aconnector; + adev = offload_work->adev; if (!aconnector) { - DRM_ERROR("Can't retrieve aconnector in hpd_rx_irq_offload_work"); + drm_err(adev_to_drm(adev), "Can't retrieve aconnector in hpd_rx_irq_offload_work"); goto skip; } - adev = drm_to_adev(aconnector->base.dev); dc_link = aconnector->dc_link; mutex_lock(&aconnector->hpd_lock); if (!dc_link_detect_connection_type(dc_link, &new_connection_type)) - DRM_ERROR("KMS: Failed to detect connector\n"); + drm_err(adev_to_drm(adev), "KMS: Failed to detect connector\n"); mutex_unlock(&aconnector->hpd_lock); if (new_connection_type == dc_connection_none) @@ -1469,8 +1605,9 @@ skip: } -static struct hpd_rx_irq_offload_work_queue *hpd_rx_irq_create_workqueue(struct dc *dc) +static struct hpd_rx_irq_offload_work_queue *hpd_rx_irq_create_workqueue(struct amdgpu_device *adev) { + struct dc *dc = adev->dm.dc; int max_caps = dc->caps.max_links; int i = 0; struct hpd_rx_irq_offload_work_queue *hpd_rx_offload_wq = NULL; @@ -1486,7 +1623,7 @@ static struct hpd_rx_irq_offload_work_queue *hpd_rx_irq_create_workqueue(struct create_singlethread_workqueue("amdgpu_dm_hpd_rx_offload_wq"); if (hpd_rx_offload_wq[i].wq == NULL) { - DRM_ERROR("create amdgpu_dm_hpd_rx_offload_wq fail!"); + drm_err(adev_to_drm(adev), "create amdgpu_dm_hpd_rx_offload_wq fail!"); goto out_err; } @@ -1535,76 +1672,161 @@ static bool dm_should_disable_stutter(struct pci_dev *pdev) return false; } -static const struct dmi_system_id hpd_disconnect_quirk_table[] = { - { - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3660"), - }, - }, - { - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3260"), - }, - }, - { - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3460"), - }, - }, - { - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Tower Plus 7010"), - }, - }, - { - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Tower 7010"), - }, - }, - { - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex SFF Plus 7010"), - }, - }, - { - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex SFF 7010"), - }, - }, - { - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Micro Plus 7010"), - }, - }, - { - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Micro 7010"), - }, - }, - {} - /* TODO: refactor this from a fixed table to a dynamic option */ -}; -static void retrieve_dmi_info(struct amdgpu_display_manager *dm) +void* +dm_allocate_gpu_mem( + struct amdgpu_device *adev, + enum dc_gpu_mem_alloc_type type, + size_t size, + long long *addr) +{ + struct dal_allocation *da; + u32 domain = (type == DC_MEM_ALLOC_TYPE_GART) ? + AMDGPU_GEM_DOMAIN_GTT : AMDGPU_GEM_DOMAIN_VRAM; + int ret; + + da = kzalloc(sizeof(struct dal_allocation), GFP_KERNEL); + if (!da) + return NULL; + + ret = amdgpu_bo_create_kernel(adev, size, PAGE_SIZE, + domain, &da->bo, + &da->gpu_addr, &da->cpu_ptr); + + *addr = da->gpu_addr; + + if (ret) { + kfree(da); + return NULL; + } + + /* add da to list in dm */ + list_add(&da->list, &adev->dm.da_list); + + return da->cpu_ptr; +} + +void +dm_free_gpu_mem( + struct amdgpu_device *adev, + enum dc_gpu_mem_alloc_type type, + void *pvMem) +{ + struct dal_allocation *da; + + /* walk the da list in DM */ + list_for_each_entry(da, &adev->dm.da_list, list) { + if (pvMem == da->cpu_ptr) { + amdgpu_bo_free_kernel(&da->bo, &da->gpu_addr, &da->cpu_ptr); + list_del(&da->list); + kfree(da); + break; + } + } + +} + +static enum dmub_status +dm_dmub_send_vbios_gpint_command(struct amdgpu_device *adev, + enum dmub_gpint_command command_code, + uint16_t param, + uint32_t timeout_us) +{ + union dmub_gpint_data_register reg, test; + uint32_t i; + + /* Assume that VBIOS DMUB is ready to take commands */ + + reg.bits.status = 1; + reg.bits.command_code = command_code; + reg.bits.param = param; + + cgs_write_register(adev->dm.cgs_device, 0x34c0 + 0x01f8, reg.all); + + for (i = 0; i < timeout_us; ++i) { + udelay(1); + + /* Check if our GPINT got acked */ + reg.bits.status = 0; + test = (union dmub_gpint_data_register) + cgs_read_register(adev->dm.cgs_device, 0x34c0 + 0x01f8); + + if (test.all == reg.all) + return DMUB_STATUS_OK; + } + + return DMUB_STATUS_TIMEOUT; +} + +static struct dml2_soc_bb *dm_dmub_get_vbios_bounding_box(struct amdgpu_device *adev) { - const struct dmi_system_id *dmi_id; + struct dml2_soc_bb *bb; + long long addr; + int i = 0; + uint16_t chunk; + enum dmub_gpint_command send_addrs[] = { + DMUB_GPINT__SET_BB_ADDR_WORD0, + DMUB_GPINT__SET_BB_ADDR_WORD1, + DMUB_GPINT__SET_BB_ADDR_WORD2, + DMUB_GPINT__SET_BB_ADDR_WORD3, + }; + enum dmub_status ret; + + switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { + case IP_VERSION(4, 0, 1): + break; + default: + return NULL; + } - dm->aux_hpd_discon_quirk = false; + bb = dm_allocate_gpu_mem(adev, + DC_MEM_ALLOC_TYPE_GART, + sizeof(struct dml2_soc_bb), + &addr); + if (!bb) + return NULL; - dmi_id = dmi_first_match(hpd_disconnect_quirk_table); - if (dmi_id) { - dm->aux_hpd_discon_quirk = true; - DRM_INFO("aux_hpd_discon_quirk attached\n"); + for (i = 0; i < 4; i++) { + /* Extract 16-bit chunk */ + chunk = ((uint64_t) addr >> (i * 16)) & 0xFFFF; + /* Send the chunk */ + ret = dm_dmub_send_vbios_gpint_command(adev, send_addrs[i], chunk, 30000); + if (ret != DMUB_STATUS_OK) + goto free_bb; } + + /* Now ask DMUB to copy the bb */ + ret = dm_dmub_send_vbios_gpint_command(adev, DMUB_GPINT__BB_COPY, 1, 200000); + if (ret != DMUB_STATUS_OK) + goto free_bb; + + return bb; + +free_bb: + dm_free_gpu_mem(adev, DC_MEM_ALLOC_TYPE_GART, (void *) bb); + return NULL; + +} + +static enum dmub_ips_disable_type dm_get_default_ips_mode( + struct amdgpu_device *adev) +{ + enum dmub_ips_disable_type ret = DMUB_IPS_ENABLE; + + switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { + case IP_VERSION(3, 5, 0): + case IP_VERSION(3, 6, 0): + case IP_VERSION(3, 5, 1): + ret = DMUB_IPS_RCG_IN_ACTIVE_IPS2_IN_OFF; + break; + default: + /* ASICs older than DCN35 do not have IPSs */ + if (amdgpu_ip_version(adev, DCE_HWIP, 0) < IP_VERSION(3, 5, 0)) + ret = DMUB_IPS_DISABLE_ALL; + break; + } + + return ret; } static int amdgpu_dm_init(struct amdgpu_device *adev) @@ -1625,7 +1847,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) mutex_init(&adev->dm.audio_lock); if (amdgpu_dm_irq_init(adev)) { - DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n"); + drm_err(adev_to_drm(adev), "amdgpu: failed to initialize DM IRQ support.\n"); goto error; } @@ -1642,13 +1864,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) init_data.driver = adev; - adev->dm.cgs_device = amdgpu_cgs_create_device(adev); - - if (!adev->dm.cgs_device) { - DRM_ERROR("amdgpu: failed to create cgs device.\n"); - goto error; - } - + /* cgs_device was created in dm_sw_init() */ init_data.cgs_device = adev->dm.cgs_device; init_data.dce_environment = DCE_ENV_PRODUCTION_DRV; @@ -1685,7 +1901,11 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) else init_data.flags.gpu_vm_support = (amdgpu_sg_display != 0); } else { - init_data.flags.gpu_vm_support = (amdgpu_sg_display != 0) && (adev->flags & AMD_IS_APU); + if (amdgpu_ip_version(adev, DCE_HWIP, 0) == IP_VERSION(2, 0, 3)) + init_data.flags.gpu_vm_support = (amdgpu_sg_display == 1); + else + init_data.flags.gpu_vm_support = + (amdgpu_sg_display != 0) && (adev->flags & AMD_IS_APU); } adev->mode_info.gpu_vm_support = init_data.flags.gpu_vm_support; @@ -1712,7 +1932,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) if (amdgpu_device_seamless_boot_supported(adev)) { init_data.flags.seamless_boot_edp_requested = true; init_data.flags.allow_seamless_boot_optimization = true; - DRM_INFO("Seamless boot condition check passed\n"); + drm_dbg(adev->dm.ddev, "Seamless boot requested\n"); } init_data.flags.enable_mipi_converter_optimization = true; @@ -1723,25 +1943,38 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) if (amdgpu_dc_debug_mask & DC_DISABLE_IPS) init_data.flags.disable_ips = DMUB_IPS_DISABLE_ALL; + else if (amdgpu_dc_debug_mask & DC_DISABLE_IPS_DYNAMIC) + init_data.flags.disable_ips = DMUB_IPS_DISABLE_DYNAMIC; + else if (amdgpu_dc_debug_mask & DC_DISABLE_IPS2_DYNAMIC) + init_data.flags.disable_ips = DMUB_IPS_RCG_IN_ACTIVE_IPS2_IN_OFF; + else if (amdgpu_dc_debug_mask & DC_FORCE_IPS_ENABLE) + init_data.flags.disable_ips = DMUB_IPS_ENABLE; + else + init_data.flags.disable_ips = dm_get_default_ips_mode(adev); - init_data.flags.disable_ips_in_vpb = 1; + init_data.flags.disable_ips_in_vpb = 0; /* Enable DWB for tested platforms only */ if (amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(3, 0, 0)) init_data.num_virtual_links = 1; - INIT_LIST_HEAD(&adev->dm.da_list); - retrieve_dmi_info(&adev->dm); + if (adev->dm.edp0_on_dp1_quirk) + init_data.flags.support_edp0_on_dp1 = true; + + if (adev->dm.bb_from_dmub) + init_data.bb_from_dmub = adev->dm.bb_from_dmub; + else + init_data.bb_from_dmub = NULL; /* Display Core create. */ adev->dm.dc = dc_create(&init_data); if (adev->dm.dc) { - DRM_INFO("Display Core v%s initialized on %s\n", DC_VER, + drm_info(adev_to_drm(adev), "Display Core v%s initialized on %s\n", DC_VER, dce_version_to_string(adev->dm.dc->ctx->dce_version)); } else { - DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER); + drm_info(adev_to_drm(adev), "Display Core failed to initialize with v%s!\n", DC_VER); goto error; } @@ -1767,8 +2000,24 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) if (amdgpu_dc_debug_mask & DC_FORCE_SUBVP_MCLK_SWITCH) adev->dm.dc->debug.force_subvp_mclk_switch = true; - if (amdgpu_dc_debug_mask & DC_ENABLE_DML2) + if (amdgpu_dc_debug_mask & DC_DISABLE_SUBVP_FAMS) { + adev->dm.dc->debug.force_disable_subvp = true; + adev->dm.dc->debug.fams2_config.bits.enable = false; + } + + if (amdgpu_dc_debug_mask & DC_ENABLE_DML2) { adev->dm.dc->debug.using_dml2 = true; + adev->dm.dc->debug.using_dml21 = true; + } + + if (amdgpu_dc_debug_mask & DC_HDCP_LC_FORCE_FW_ENABLE) + adev->dm.dc->debug.hdcp_lc_force_fw_enable = true; + + if (amdgpu_dc_debug_mask & DC_HDCP_LC_ENABLE_SW_FALLBACK) + adev->dm.dc->debug.hdcp_lc_enable_sw_fallback = true; + + if (amdgpu_dc_debug_mask & DC_SKIP_DETECTION_LT) + adev->dm.dc->debug.skip_detection_link_training = true; adev->dm.dc->debug.visual_confirm = amdgpu_dc_visual_confirm; @@ -1776,19 +2025,19 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) adev->dm.dc->debug.ignore_cable_id = true; if (adev->dm.dc->caps.dp_hdmi21_pcon_support) - DRM_INFO("DP-HDMI FRL PCON supported\n"); + drm_info(adev_to_drm(adev), "DP-HDMI FRL PCON supported\n"); r = dm_dmub_hw_init(adev); if (r) { - DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r); + drm_err(adev_to_drm(adev), "DMUB interface failed to initialize: status=%d\n", r); goto error; } dc_hardware_init(adev->dm.dc); - adev->dm.hpd_rx_offload_wq = hpd_rx_irq_create_workqueue(adev->dm.dc); + adev->dm.hpd_rx_offload_wq = hpd_rx_irq_create_workqueue(adev); if (!adev->dm.hpd_rx_offload_wq) { - DRM_ERROR("amdgpu: failed to create hpd rx offload workqueue.\n"); + drm_err(adev_to_drm(adev), "amdgpu: failed to create hpd rx offload workqueue.\n"); goto error; } @@ -1803,10 +2052,10 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) adev->dm.freesync_module = mod_freesync_create(adev->dm.dc); if (!adev->dm.freesync_module) { - DRM_ERROR( + drm_err(adev_to_drm(adev), "amdgpu: failed to initialize freesync_module.\n"); } else - DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n", + drm_dbg_driver(adev_to_drm(adev), "amdgpu: freesync_module init done %p.\n", adev->dm.freesync_module); amdgpu_dm_init_color_mod(); @@ -1815,16 +2064,20 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) adev->dm.vblank_control_workqueue = create_singlethread_workqueue("dm_vblank_control_workqueue"); if (!adev->dm.vblank_control_workqueue) - DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n"); + drm_err(adev_to_drm(adev), "amdgpu: failed to initialize vblank_workqueue.\n"); } + if (adev->dm.dc->caps.ips_support && + adev->dm.dc->config.disable_ips != DMUB_IPS_DISABLE_ALL) + adev->dm.idle_workqueue = idle_create_workqueue(adev); + if (adev->dm.dc->caps.max_links > 0 && adev->family >= AMDGPU_FAMILY_RV) { adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc); if (!adev->dm.hdcp_workqueue) - DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n"); + drm_err(adev_to_drm(adev), "amdgpu: failed to initialize hdcp_workqueue.\n"); else - DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue); + drm_dbg_driver(adev_to_drm(adev), "amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue); dc_init_callbacks(adev->dm.dc, &init_params); } @@ -1832,20 +2085,29 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) init_completion(&adev->dm.dmub_aux_transfer_done); adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL); if (!adev->dm.dmub_notify) { - DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify"); + drm_info(adev_to_drm(adev), "amdgpu: fail to allocate adev->dm.dmub_notify"); goto error; } adev->dm.delayed_hpd_wq = create_singlethread_workqueue("amdgpu_dm_hpd_wq"); if (!adev->dm.delayed_hpd_wq) { - DRM_ERROR("amdgpu: failed to create hpd offload workqueue.\n"); + drm_err(adev_to_drm(adev), "amdgpu: failed to create hpd offload workqueue.\n"); goto error; } amdgpu_dm_outbox_init(adev); if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_AUX_REPLY, dmub_aux_setconfig_callback, false)) { - DRM_ERROR("amdgpu: fail to register dmub aux callback"); + drm_err(adev_to_drm(adev), "amdgpu: fail to register dmub aux callback"); + goto error; + } + + for (size_t i = 0; i < ARRAY_SIZE(adev->dm.fused_io); i++) + init_completion(&adev->dm.fused_io[i].replied); + + if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_FUSED_IO, + dmub_aux_fused_io_callback, false)) { + drm_err(adev_to_drm(adev), "amdgpu: fail to register dmub fused io callback"); goto error; } /* Enable outbox notification only after IRQ handlers are registered and DMUB is alive. @@ -1862,7 +2124,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) } if (amdgpu_dm_initialize_drm_device(adev)) { - DRM_ERROR( + drm_err(adev_to_drm(adev), "amdgpu: failed to initialize sw for display support.\n"); goto error; } @@ -1877,18 +2139,22 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size; if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) { - DRM_ERROR( + drm_err(adev_to_drm(adev), "amdgpu: failed to initialize sw for display support.\n"); goto error; } #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) - adev->dm.secure_display_ctxs = amdgpu_dm_crtc_secure_display_create_contexts(adev); - if (!adev->dm.secure_display_ctxs) - DRM_ERROR("amdgpu: failed to initialize secure display contexts.\n"); + amdgpu_dm_crtc_secure_display_create_contexts(adev); + if (!adev->dm.secure_display_ctx.crtc_ctx) + drm_err(adev_to_drm(adev), "amdgpu: failed to initialize secure display contexts.\n"); + + if (amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(4, 0, 1)) + adev->dm.secure_display_ctx.support_mul_roi = true; + #endif - DRM_DEBUG_DRIVER("KMS initialized.\n"); + drm_dbg_driver(adev_to_drm(adev), "KMS initialized.\n"); return 0; error: @@ -1897,9 +2163,9 @@ error: return -EINVAL; } -static int amdgpu_dm_early_fini(void *handle) +static int amdgpu_dm_early_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; amdgpu_dm_audio_fini(adev); @@ -1915,18 +2181,28 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev) adev->dm.vblank_control_workqueue = NULL; } + if (adev->dm.idle_workqueue) { + if (adev->dm.idle_workqueue->running) { + adev->dm.idle_workqueue->enable = false; + flush_work(&adev->dm.idle_workqueue->work); + } + + kfree(adev->dm.idle_workqueue); + adev->dm.idle_workqueue = NULL; + } + amdgpu_dm_destroy_drm_device(&adev->dm); #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) - if (adev->dm.secure_display_ctxs) { + if (adev->dm.secure_display_ctx.crtc_ctx) { for (i = 0; i < adev->mode_info.num_crtc; i++) { - if (adev->dm.secure_display_ctxs[i].crtc) { - flush_work(&adev->dm.secure_display_ctxs[i].notify_ta_work); - flush_work(&adev->dm.secure_display_ctxs[i].forward_roi_work); + if (adev->dm.secure_display_ctx.crtc_ctx[i].crtc) { + flush_work(&adev->dm.secure_display_ctx.crtc_ctx[i].notify_ta_work); + flush_work(&adev->dm.secure_display_ctx.crtc_ctx[i].forward_roi_work); } } - kfree(adev->dm.secure_display_ctxs); - adev->dm.secure_display_ctxs = NULL; + kfree(adev->dm.secure_display_ctx.crtc_ctx); + adev->dm.secure_display_ctx.crtc_ctx = NULL; } #endif if (adev->dm.hdcp_workqueue) { @@ -2045,11 +2321,13 @@ static int load_dmcu_fw(struct amdgpu_device *adev) case IP_VERSION(3, 2, 1): case IP_VERSION(3, 5, 0): case IP_VERSION(3, 5, 1): + case IP_VERSION(3, 6, 0): + case IP_VERSION(4, 0, 1): return 0; default: break; } - DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type); + drm_err(adev_to_drm(adev), "Unsupported ASIC type: 0x%X\n", adev->asic_type); return -EINVAL; } @@ -2058,7 +2336,8 @@ static int load_dmcu_fw(struct amdgpu_device *adev) return 0; } - r = amdgpu_ucode_request(adev, &adev->dm.fw_dmcu, fw_name_dmcu); + r = amdgpu_ucode_request(adev, &adev->dm.fw_dmcu, AMDGPU_UCODE_REQUIRED, + "%s", fw_name_dmcu); if (r == -ENODEV) { /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */ DRM_DEBUG_KMS("dm: DMCU firmware not found\n"); @@ -2066,7 +2345,7 @@ static int load_dmcu_fw(struct amdgpu_device *adev) return 0; } if (r) { - dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n", + drm_err(adev_to_drm(adev), "amdgpu_dm: Can't validate firmware \"%s\"\n", fw_name_dmcu); amdgpu_ucode_release(&adev->dm.fw_dmcu); return r; @@ -2168,6 +2447,13 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev) case IP_VERSION(3, 5, 1): dmub_asic = DMUB_ASIC_DCN35; break; + case IP_VERSION(3, 6, 0): + dmub_asic = DMUB_ASIC_DCN36; + break; + case IP_VERSION(4, 0, 1): + dmub_asic = DMUB_ASIC_DCN401; + break; + default: /* ASIC doesn't support DMUB. */ return 0; @@ -2184,7 +2470,7 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev) adev->firmware.fw_size += ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE); - DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n", + drm_info(adev_to_drm(adev), "Loading DMUB firmware via PSP: version=0x%08X\n", adev->dm.dmcub_fw_version); } @@ -2193,7 +2479,7 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev) dmub_srv = adev->dm.dmub_srv; if (!dmub_srv) { - DRM_ERROR("Failed to allocate DMUB service!\n"); + drm_err(adev_to_drm(adev), "Failed to allocate DMUB service!\n"); return -ENOMEM; } @@ -2206,7 +2492,7 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev) /* Create the DMUB service. */ status = dmub_srv_create(dmub_srv, &create_params); if (status != DMUB_STATUS_OK) { - DRM_ERROR("Error creating DMUB service: %d\n", status); + drm_err(adev_to_drm(adev), "Error creating DMUB service: %d\n", status); return -EINVAL; } @@ -2231,7 +2517,7 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev) ®ion_info); if (status != DMUB_STATUS_OK) { - DRM_ERROR("Error calculating DMUB region info: %d\n", status); + drm_err(adev_to_drm(adev), "Error calculating DMUB region info: %d\n", status); return -EINVAL; } @@ -2260,25 +2546,37 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev) fb_info = adev->dm.dmub_fb_info; if (!fb_info) { - DRM_ERROR( + drm_err(adev_to_drm(adev), "Failed to allocate framebuffer info for DMUB service!\n"); return -ENOMEM; } status = dmub_srv_calc_mem_info(dmub_srv, &memory_params, fb_info); if (status != DMUB_STATUS_OK) { - DRM_ERROR("Error calculating DMUB FB info: %d\n", status); + drm_err(adev_to_drm(adev), "Error calculating DMUB FB info: %d\n", status); return -EINVAL; } + adev->dm.bb_from_dmub = dm_dmub_get_vbios_bounding_box(adev); + return 0; } -static int dm_sw_init(void *handle) +static int dm_sw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int r; + adev->dm.cgs_device = amdgpu_cgs_create_device(adev); + + if (!adev->dm.cgs_device) { + drm_err(adev_to_drm(adev), "amdgpu: failed to create cgs device.\n"); + return -EINVAL; + } + + /* Moved from dm init since we need to use allocations for storing bounding box data */ + INIT_LIST_HEAD(&adev->dm.da_list); + r = dm_dmub_sw_init(adev); if (r) return r; @@ -2286,9 +2584,21 @@ static int dm_sw_init(void *handle) return load_dmcu_fw(adev); } -static int dm_sw_fini(void *handle) +static int dm_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; + struct dal_allocation *da; + + list_for_each_entry(da, &adev->dm.da_list, list) { + if (adev->dm.bb_from_dmub == (void *) da->cpu_ptr) { + amdgpu_bo_free_kernel(&da->bo, &da->gpu_addr, &da->cpu_ptr); + list_del(&da->list); + kfree(da); + adev->dm.bb_from_dmub = NULL; + break; + } + } + kfree(adev->dm.dmub_fb_info); adev->dm.dmub_fb_info = NULL; @@ -2321,13 +2631,13 @@ static int detect_mst_link_for_all_connectors(struct drm_device *dev) aconnector = to_amdgpu_dm_connector(connector); if (aconnector->dc_link->type == dc_connection_mst_branch && aconnector->mst_mgr.aux) { - DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n", + drm_dbg_kms(dev, "DM_MST: starting TM on aconnector: %p [id: %d]\n", aconnector, aconnector->base.base.id); ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true); if (ret < 0) { - DRM_ERROR("DM_MST: Failed to start MST\n"); + drm_err(dev, "DM_MST: Failed to start MST\n"); aconnector->dc_link->type = dc_connection_single; ret = dm_helpers_dp_mst_stop_top_mgr(aconnector->dc_link->ctx, @@ -2341,9 +2651,9 @@ static int detect_mst_link_for_all_connectors(struct drm_device *dev) return ret; } -static int dm_late_init(void *handle) +static int dm_late_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct dmcu_iram_parameters params; unsigned int linear_lut[16]; @@ -2389,9 +2699,9 @@ static int dm_late_init(void *handle) static void resume_mst_branch_status(struct drm_dp_mst_topology_mgr *mgr) { + u8 buf[UUID_SIZE]; + guid_t guid; int ret; - u8 guid[16]; - u64 tmp64; mutex_lock(&mgr->lock); if (!mgr->mst_primary) @@ -2412,31 +2722,74 @@ static void resume_mst_branch_status(struct drm_dp_mst_topology_mgr *mgr) } /* Some hubs forget their guids after they resume */ - ret = drm_dp_dpcd_read(mgr->aux, DP_GUID, guid, 16); - if (ret != 16) { + ret = drm_dp_dpcd_read(mgr->aux, DP_GUID, buf, sizeof(buf)); + if (ret != sizeof(buf)) { drm_dbg_kms(mgr->dev, "dpcd read failed - undocked during suspend?\n"); goto out_fail; } - if (memchr_inv(guid, 0, 16) == NULL) { - tmp64 = get_jiffies_64(); - memcpy(&guid[0], &tmp64, sizeof(u64)); - memcpy(&guid[8], &tmp64, sizeof(u64)); + import_guid(&guid, buf); + + if (guid_is_null(&guid)) { + guid_gen(&guid); + export_guid(buf, &guid); - ret = drm_dp_dpcd_write(mgr->aux, DP_GUID, guid, 16); + ret = drm_dp_dpcd_write(mgr->aux, DP_GUID, buf, sizeof(buf)); - if (ret != 16) { + if (ret != sizeof(buf)) { drm_dbg_kms(mgr->dev, "check mstb guid failed - undocked during suspend?\n"); goto out_fail; } } - memcpy(mgr->mst_primary->guid, guid, 16); + guid_copy(&mgr->mst_primary->guid, &guid); out_fail: mutex_unlock(&mgr->lock); } +void hdmi_cec_unset_edid(struct amdgpu_dm_connector *aconnector) +{ + struct cec_notifier *n = aconnector->notifier; + + if (!n) + return; + + cec_notifier_phys_addr_invalidate(n); +} + +void hdmi_cec_set_edid(struct amdgpu_dm_connector *aconnector) +{ + struct drm_connector *connector = &aconnector->base; + struct cec_notifier *n = aconnector->notifier; + + if (!n) + return; + + cec_notifier_set_phys_addr(n, + connector->display_info.source_physical_address); +} + +static void s3_handle_hdmi_cec(struct drm_device *ddev, bool suspend) +{ + struct amdgpu_dm_connector *aconnector; + struct drm_connector *connector; + struct drm_connector_list_iter conn_iter; + + drm_connector_list_iter_begin(ddev, &conn_iter); + drm_for_each_connector_iter(connector, &conn_iter) { + if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) + continue; + + aconnector = to_amdgpu_dm_connector(connector); + if (suspend) + hdmi_cec_unset_edid(aconnector); + else + hdmi_cec_set_edid(aconnector); + } + drm_connector_list_iter_end(&conn_iter); +} + static void s3_handle_mst(struct drm_device *dev, bool suspend) { struct amdgpu_dm_connector *aconnector; @@ -2523,16 +2876,43 @@ static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev) ret = amdgpu_dpm_write_watermarks_table(adev); if (ret) { - DRM_ERROR("Failed to update WMTABLE!\n"); + drm_err(adev_to_drm(adev), "Failed to update WMTABLE!\n"); return ret; } return 0; } +static int dm_oem_i2c_hw_init(struct amdgpu_device *adev) +{ + struct amdgpu_display_manager *dm = &adev->dm; + struct amdgpu_i2c_adapter *oem_i2c; + struct ddc_service *oem_ddc_service; + int r; + + oem_ddc_service = dc_get_oem_i2c_device(adev->dm.dc); + if (oem_ddc_service) { + oem_i2c = create_i2c(oem_ddc_service, true); + if (!oem_i2c) { + drm_info(adev_to_drm(adev), "Failed to create oem i2c adapter data\n"); + return -ENOMEM; + } + + r = i2c_add_adapter(&oem_i2c->base); + if (r) { + drm_info(adev_to_drm(adev), "Failed to register oem i2c\n"); + kfree(oem_i2c); + return r; + } + dm->oem_i2c = oem_i2c; + } + + return 0; +} + /** * dm_hw_init() - Initialize DC device - * @handle: The base driver device containing the amdgpu_dm device. + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Initialize the &struct amdgpu_display_manager device. This involves calling * the initializers of each DM component, then populating the struct with them. @@ -2550,27 +2930,37 @@ static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev) * - Vblank support * - Debug FS entries, if enabled */ -static int dm_hw_init(void *handle) +static int dm_hw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; + int r; + /* Create DAL display manager */ - amdgpu_dm_init(adev); + r = amdgpu_dm_init(adev); + if (r) + return r; amdgpu_dm_hpd_init(adev); + r = dm_oem_i2c_hw_init(adev); + if (r) + drm_info(adev_to_drm(adev), "Failed to add OEM i2c bus\n"); + return 0; } /** * dm_hw_fini() - Teardown DC device - * @handle: The base driver device containing the amdgpu_dm device. + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Teardown components within &struct amdgpu_display_manager that require * cleanup. This involves cleaning up the DRM device, DC, and any modules that * were loaded. Also flush IRQ workqueues and disable them. */ -static int dm_hw_fini(void *handle) +static int dm_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; + + kfree(adev->dm.oem_i2c); amdgpu_dm_hpd_fini(adev); @@ -2596,7 +2986,7 @@ static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev, irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst; rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY; if (rc) - DRM_WARN("Failed to %s pflip interrupts\n", + drm_warn(adev_to_drm(adev), "Failed to %s pflip interrupts\n", enable ? "enable" : "disable"); if (enable) { @@ -2606,32 +2996,34 @@ static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev, rc = amdgpu_dm_crtc_set_vupdate_irq(&acrtc->base, false); if (rc) - DRM_WARN("Failed to %sable vupdate interrupt\n", enable ? "en" : "dis"); + drm_warn(adev_to_drm(adev), "Failed to %sable vupdate interrupt\n", enable ? "en" : "dis"); irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst; /* During gpu-reset we disable and then enable vblank irq, so * don't use amdgpu_irq_get/put() to avoid refcount change. */ if (!dc_interrupt_set(adev->dm.dc, irq_source, enable)) - DRM_WARN("Failed to %sable vblank interrupt\n", enable ? "en" : "dis"); + drm_warn(adev_to_drm(adev), "Failed to %sable vblank interrupt\n", enable ? "en" : "dis"); } } } +DEFINE_FREE(state_release, struct dc_state *, if (_T) dc_state_release(_T)) + static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc) { - struct dc_state *context = NULL; - enum dc_status res = DC_ERROR_UNEXPECTED; + struct dc_state *context __free(state_release) = NULL; int i; struct dc_stream_state *del_streams[MAX_PIPES]; int del_streams_count = 0; + struct dc_commit_streams_params params = {}; memset(del_streams, 0, sizeof(del_streams)); context = dc_state_create_current_copy(dc); if (context == NULL) - goto context_alloc_fail; + return DC_ERROR_UNEXPECTED; /* First remove from context all streams */ for (i = 0; i < context->stream_count; i++) { @@ -2642,23 +3034,20 @@ static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc) /* Remove all planes for removed streams and then remove the streams */ for (i = 0; i < del_streams_count; i++) { - if (!dc_state_rem_all_planes_for_stream(dc, del_streams[i], context)) { - res = DC_FAIL_DETACH_SURFACES; - goto fail; - } + enum dc_status res; + + if (!dc_state_rem_all_planes_for_stream(dc, del_streams[i], context)) + return DC_FAIL_DETACH_SURFACES; res = dc_state_remove_stream(dc, context, del_streams[i]); if (res != DC_OK) - goto fail; + return res; } - res = dc_commit_streams(dc, context->streams, context->stream_count); + params.streams = context->streams; + params.stream_count = context->stream_count; -fail: - dc_state_release(context); - -context_alloc_fail: - return res; + return dc_commit_streams(dc, ¶ms); } static void hpd_rx_irq_work_suspend(struct amdgpu_display_manager *dm) @@ -2671,34 +3060,58 @@ static void hpd_rx_irq_work_suspend(struct amdgpu_display_manager *dm) } } -static int dm_suspend(void *handle) +static int dm_prepare_suspend(struct amdgpu_ip_block *ip_block) +{ + struct amdgpu_device *adev = ip_block->adev; + + if (amdgpu_in_reset(adev)) + return 0; + + WARN_ON(adev->dm.cached_state); + adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev)); + if (IS_ERR(adev->dm.cached_state)) + return PTR_ERR(adev->dm.cached_state); + + return 0; +} + +static int dm_suspend(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = handle; + struct amdgpu_device *adev = ip_block->adev; struct amdgpu_display_manager *dm = &adev->dm; - int ret = 0; if (amdgpu_in_reset(adev)) { + enum dc_status res; + mutex_lock(&dm->dc_lock); dc_allow_idle_optimizations(adev->dm.dc, false); dm->cached_dc_state = dc_state_create_copy(dm->dc->current_state); - dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false); + if (dm->cached_dc_state) + dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false); - amdgpu_dm_commit_zero_streams(dm->dc); + res = amdgpu_dm_commit_zero_streams(dm->dc); + if (res != DC_OK) { + drm_err(adev_to_drm(adev), "Failed to commit zero streams: %d\n", res); + return -EINVAL; + } amdgpu_dm_irq_suspend(adev); hpd_rx_irq_work_suspend(dm); - return ret; + return 0; } - WARN_ON(adev->dm.cached_state); - adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev)); - if (IS_ERR(adev->dm.cached_state)) - return PTR_ERR(adev->dm.cached_state); + if (!adev->dm.cached_state) { + adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev)); + if (IS_ERR(adev->dm.cached_state)) + return PTR_ERR(adev->dm.cached_state); + } + + s3_handle_hdmi_cec(adev_to_drm(adev), true); s3_handle_mst(adev_to_drm(adev), true); @@ -2707,6 +3120,10 @@ static int dm_suspend(void *handle) hpd_rx_irq_work_suspend(dm); dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3); + + if (dm->dc->caps.ips_support && adev->in_s0ix) + dc_allow_idle_optimizations(dm->dc, true); + dc_dmub_srv_set_power_state(dm->dc->ctx->dmub_srv, DC_ACPI_CM_POWER_STATE_D3); return 0; @@ -2823,41 +3240,56 @@ static void dm_gpureset_commit_state(struct dc_state *dc_state, struct dc_scaling_info scaling_infos[MAX_SURFACES]; struct dc_flip_addrs flip_addrs[MAX_SURFACES]; struct dc_stream_update stream_update; - } *bundle; + } *bundle __free(kfree); int k, m; bundle = kzalloc(sizeof(*bundle), GFP_KERNEL); if (!bundle) { drm_err(dm->ddev, "Failed to allocate update bundle\n"); - goto cleanup; + return; } for (k = 0; k < dc_state->stream_count; k++) { bundle->stream_update.stream = dc_state->streams[k]; - for (m = 0; m < dc_state->stream_status->plane_count; m++) { + for (m = 0; m < dc_state->stream_status[k].plane_count; m++) { bundle->surface_updates[m].surface = - dc_state->stream_status->plane_states[m]; + dc_state->stream_status[k].plane_states[m]; bundle->surface_updates[m].surface->force_full_update = true; } update_planes_and_stream_adapter(dm->dc, UPDATE_TYPE_FULL, - dc_state->stream_status->plane_count, + dc_state->stream_status[k].plane_count, dc_state->streams[k], &bundle->stream_update, bundle->surface_updates); } +} -cleanup: - kfree(bundle); +static void apply_delay_after_dpcd_poweroff(struct amdgpu_device *adev, + struct dc_sink *sink) +{ + struct dc_panel_patch *ppatch = NULL; + + if (!sink) + return; + + ppatch = &sink->edid_caps.panel_patch; + if (ppatch->wait_after_dpcd_poweroff_ms) { + msleep(ppatch->wait_after_dpcd_poweroff_ms); + drm_dbg_driver(adev_to_drm(adev), + "%s: adding a %ds delay as w/a for panel\n", + __func__, + ppatch->wait_after_dpcd_poweroff_ms / 1000); + } } -static int dm_resume(void *handle) +static int dm_resume(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = handle; + struct amdgpu_device *adev = ip_block->adev; struct drm_device *ddev = adev_to_drm(adev); struct amdgpu_display_manager *dm = &adev->dm; struct amdgpu_dm_connector *aconnector; @@ -2872,8 +3304,8 @@ static int dm_resume(void *handle) struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state); enum dc_connection_type new_connection_type = dc_connection_none; struct dc_state *dc_state; - int i, r, j, ret; - bool need_hotplug = false; + int i, r, j; + struct dc_commit_streams_params commit_params = {}; if (dm->dc->caps.ips_support) { dc_dmub_srv_apply_idle_power_optimizations(dm->dc, false); @@ -2901,7 +3333,7 @@ static int dm_resume(void *handle) r = dm_dmub_hw_init(adev); if (r) - DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r); + drm_err(adev_to_drm(adev), "DMUB interface failed to initialize: status=%d\n", r); dc_dmub_srv_set_power_state(dm->dc->ctx->dmub_srv, DC_ACPI_CM_POWER_STATE_D0); dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0); @@ -2923,7 +3355,10 @@ static int dm_resume(void *handle) dc_enable_dmub_outbox(adev->dm.dc); } - WARN_ON(!dc_commit_streams(dm->dc, dc_state->streams, dc_state->stream_count)); + commit_params.streams = dc_state->streams; + commit_params.stream_count = dc_state->stream_count; + dc_exit_ips_for_hw_access(dm->dc); + WARN_ON(!dc_commit_streams(dm->dc, &commit_params)); dm_gpureset_commit_state(dm->cached_dc_state, dm); @@ -2936,11 +3371,17 @@ static int dm_resume(void *handle) mutex_unlock(&dm->dc_lock); + /* set the backlight after a reset */ + for (i = 0; i < dm->num_of_edps; i++) { + if (dm->backlight_dev[i]) + amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]); + } + return 0; } /* Recreate dc_state - DC invalidates it when setting power state to S3. */ dc_state_release(dm_state->context); - dm_state->context = dc_state_create(dm->dc); + dm_state->context = dc_state_create(dm->dc, NULL); /* TODO: Remove dc_state->dccg, use dc->dccg directly. */ /* Before powering on DC we need to re-initialize DMUB. */ @@ -2965,12 +3406,15 @@ static int dm_resume(void *handle) */ amdgpu_dm_irq_resume_early(adev); + s3_handle_hdmi_cec(ddev, false); + /* On resume we need to rewrite the MSTM control bits to enable MST*/ s3_handle_mst(ddev, false); /* Do detection*/ drm_connector_list_iter_begin(ddev, &iter); drm_for_each_connector_iter(connector, &iter) { + bool ret; if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) continue; @@ -2984,19 +3428,23 @@ static int dm_resume(void *handle) * this is the case when traversing through already created end sink * MST connectors, should be skipped */ - if (aconnector && aconnector->mst_root) + if (aconnector->mst_root) continue; - mutex_lock(&aconnector->hpd_lock); + guard(mutex)(&aconnector->hpd_lock); if (!dc_link_detect_connection_type(aconnector->dc_link, &new_connection_type)) - DRM_ERROR("KMS: Failed to detect connector\n"); + drm_err(adev_to_drm(adev), "KMS: Failed to detect connector\n"); if (aconnector->base.force && new_connection_type == dc_connection_none) { emulated_link_detect(aconnector->dc_link); } else { - mutex_lock(&dm->dc_lock); - dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD); - mutex_unlock(&dm->dc_lock); + guard(mutex)(&dm->dc_lock); + dc_exit_ips_for_hw_access(dm->dc); + ret = dc_link_detect(aconnector->dc_link, DETECT_REASON_RESUMEFROMS3S4); + if (ret) { + /* w/a delay for certain panels */ + apply_delay_after_dpcd_poweroff(adev, aconnector->dc_sink); + } } if (aconnector->fake_enable && aconnector->dc_link->local_sink) @@ -3006,13 +3454,15 @@ static int dm_resume(void *handle) dc_sink_release(aconnector->dc_sink); aconnector->dc_sink = NULL; amdgpu_dm_update_connector_after_detect(aconnector); - mutex_unlock(&aconnector->hpd_lock); } drm_connector_list_iter_end(&iter); /* Force mode set in atomic commit */ - for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) + for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) { new_crtc_state->active_changed = true; + dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); + reset_freesync_config_for_crtc(dm_new_crtc_state); + } /* * atomic_check is expected to create the dc states. We need to release @@ -3026,6 +3476,7 @@ static int dm_resume(void *handle) dc_stream_release(dm_new_crtc_state->stream); dm_new_crtc_state->stream = NULL; } + dm_new_crtc_state->base.color_mgmt_changed = true; } for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) { @@ -3044,28 +3495,25 @@ static int dm_resume(void *handle) /* Do mst topology probing after resuming cached state*/ drm_connector_list_iter_begin(ddev, &iter); drm_for_each_connector_iter(connector, &iter) { + + if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) + continue; + aconnector = to_amdgpu_dm_connector(connector); if (aconnector->dc_link->type != dc_connection_mst_branch || aconnector->mst_root) continue; - ret = drm_dp_mst_topology_mgr_resume(&aconnector->mst_mgr, true); - - if (ret < 0) { - dm_helpers_dp_mst_stop_top_mgr(aconnector->dc_link->ctx, - aconnector->dc_link); - need_hotplug = true; - } + drm_dp_mst_topology_queue_probe(&aconnector->mst_mgr); } drm_connector_list_iter_end(&iter); - if (need_hotplug) - drm_kms_helper_hotplug_event(ddev); - amdgpu_dm_irq_resume_late(adev); amdgpu_dm_smu_write_watermarks_table(adev); + drm_kms_helper_hotplug_event(ddev); + return 0; } @@ -3088,6 +3536,7 @@ static const struct amd_ip_funcs amdgpu_dm_funcs = { .early_fini = amdgpu_dm_early_fini, .hw_init = dm_hw_init, .hw_fini = dm_hw_fini, + .prepare_suspend = dm_prepare_suspend, .suspend = dm_suspend, .resume = dm_resume, .is_idle = dm_is_idle, @@ -3131,6 +3580,7 @@ static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector) struct drm_connector *conn_base; struct amdgpu_device *adev; struct drm_luminance_range_info *luminance_range; + int min_input_signal_override; if (aconnector->bl_idx == -1 || aconnector->dc_link->connector_signal != SIGNAL_TYPE_EDP) @@ -3155,6 +3605,8 @@ static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector) caps->aux_support = false; else if (amdgpu_backlight == 1) caps->aux_support = true; + if (caps->aux_support) + aconnector->dc_link->backlight_control_type = BACKLIGHT_CONTROL_AMD_AUX; luminance_range = &conn_base->display_info.luminance_range; @@ -3165,14 +3617,20 @@ static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector) caps->aux_min_input_signal = 0; caps->aux_max_input_signal = 512; } + + min_input_signal_override = drm_get_panel_min_brightness_quirk(aconnector->drm_edid); + if (min_input_signal_override >= 0) + caps->min_input_signal = min_input_signal_override; } +DEFINE_FREE(sink_release, struct dc_sink *, if (_T) dc_sink_release(_T)) + void amdgpu_dm_update_connector_after_detect( struct amdgpu_dm_connector *aconnector) { struct drm_connector *connector = &aconnector->base; + struct dc_sink *sink __free(sink_release) = NULL; struct drm_device *dev = connector->dev; - struct dc_sink *sink; /* MST handled by drm_mst framework */ if (aconnector->mst_mgr.mst_state == true) @@ -3194,7 +3652,7 @@ void amdgpu_dm_update_connector_after_detect( * For S3 resume with headless use eml_sink to fake stream * because on resume connector->sink is set to NULL */ - mutex_lock(&dev->mode_config.mutex); + guard(mutex)(&dev->mode_config.mutex); if (sink) { if (aconnector->dc_sink) { @@ -3210,7 +3668,7 @@ void amdgpu_dm_update_connector_after_detect( aconnector->dc_sink = sink; dc_sink_retain(aconnector->dc_sink); amdgpu_dm_update_freesync_caps(connector, - aconnector->edid); + aconnector->drm_edid); } else { amdgpu_dm_update_freesync_caps(connector, NULL); if (!aconnector->dc_sink) { @@ -3219,10 +3677,6 @@ void amdgpu_dm_update_connector_after_detect( } } - mutex_unlock(&dev->mode_config.mutex); - - if (sink) - dc_sink_release(sink); return; } @@ -3230,27 +3684,23 @@ void amdgpu_dm_update_connector_after_detect( * TODO: temporary guard to look for proper fix * if this sink is MST sink, we should not do anything */ - if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { - dc_sink_release(sink); + if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) return; - } if (aconnector->dc_sink == sink) { /* * We got a DP short pulse (Link Loss, DP CTS, etc...). * Do nothing!! */ - DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n", - aconnector->connector_id); - if (sink) - dc_sink_release(sink); + drm_dbg_kms(dev, "DCHPD: connector_id=%d: dc_sink didn't change.\n", + aconnector->connector_id); return; } - DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n", - aconnector->connector_id, aconnector->dc_sink, sink); + drm_dbg_kms(dev, "DCHPD: connector_id=%d: Old sink=%p New sink=%p\n", + aconnector->connector_id, aconnector->dc_sink, sink); - mutex_lock(&dev->mode_config.mutex); + guard(mutex)(&dev->mode_config.mutex); /* * 1. Update status of the drm connector @@ -3269,18 +3719,21 @@ void amdgpu_dm_update_connector_after_detect( aconnector->dc_sink = sink; dc_sink_retain(aconnector->dc_sink); if (sink->dc_edid.length == 0) { - aconnector->edid = NULL; + aconnector->drm_edid = NULL; + hdmi_cec_unset_edid(aconnector); if (aconnector->dc_link->aux_mode) { - drm_dp_cec_unset_edid( - &aconnector->dm_dp_aux.aux); + drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux); } } else { - aconnector->edid = - (struct edid *)sink->dc_edid.raw_edid; + const struct edid *edid = (const struct edid *)sink->dc_edid.raw_edid; + + aconnector->drm_edid = drm_edid_alloc(edid, sink->dc_edid.length); + drm_edid_connector_update(connector, aconnector->drm_edid); + hdmi_cec_set_edid(aconnector); if (aconnector->dc_link->aux_mode) - drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux, - aconnector->edid); + drm_dp_cec_attach(&aconnector->dm_dp_aux.aux, + connector->display_info.source_physical_address); } if (!aconnector->timing_requested) { @@ -3291,17 +3744,17 @@ void amdgpu_dm_update_connector_after_detect( "failed to create aconnector->requested_timing\n"); } - drm_connector_update_edid_property(connector, aconnector->edid); - amdgpu_dm_update_freesync_caps(connector, aconnector->edid); + amdgpu_dm_update_freesync_caps(connector, aconnector->drm_edid); update_connector_ext_caps(aconnector); } else { + hdmi_cec_unset_edid(aconnector); drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux); amdgpu_dm_update_freesync_caps(connector, NULL); - drm_connector_update_edid_property(connector, NULL); aconnector->num_modes = 0; dc_sink_release(aconnector->dc_sink); aconnector->dc_sink = NULL; - aconnector->edid = NULL; + drm_edid_free(aconnector->drm_edid); + aconnector->drm_edid = NULL; kfree(aconnector->timing_requested); aconnector->timing_requested = NULL; /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */ @@ -3309,12 +3762,7 @@ void amdgpu_dm_update_connector_after_detect( connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED; } - mutex_unlock(&dev->mode_config.mutex); - update_subconnector_property(aconnector); - - if (sink) - dc_sink_release(sink); } static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector) @@ -3324,6 +3772,7 @@ static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector) enum dc_connection_type new_connection_type = dc_connection_none; struct amdgpu_device *adev = drm_to_adev(dev); struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state); + struct dc *dc = aconnector->dc_link->ctx->dc; bool ret = false; if (adev->dm.disable_hpd_irq) @@ -3333,7 +3782,7 @@ static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector) * In case of failure or MST no need to update connector status or notify the OS * since (for MST case) MST does this in its own context. */ - mutex_lock(&aconnector->hpd_lock); + guard(mutex)(&aconnector->hpd_lock); if (adev->dm.hdcp_workqueue) { hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index); @@ -3345,7 +3794,7 @@ static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector) aconnector->timing_changed = false; if (!dc_link_detect_connection_type(aconnector->dc_link, &new_connection_type)) - DRM_ERROR("KMS: Failed to detect connector\n"); + drm_err(adev_to_drm(adev), "KMS: Failed to detect connector\n"); if (aconnector->base.force && new_connection_type == dc_connection_none) { emulated_link_detect(aconnector->dc_link); @@ -3357,10 +3806,13 @@ static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector) if (aconnector->base.force == DRM_FORCE_UNSPECIFIED) drm_kms_helper_connector_hotplug_event(connector); } else { - mutex_lock(&adev->dm.dc_lock); - ret = dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD); - mutex_unlock(&adev->dm.dc_lock); + scoped_guard(mutex, &adev->dm.dc_lock) { + dc_exit_ips_for_hw_access(dc); + ret = dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD); + } if (ret) { + /* w/a delay for certain panels */ + apply_delay_after_dpcd_poweroff(adev, aconnector->dc_sink); amdgpu_dm_update_connector_after_detect(aconnector); drm_modeset_lock_all(dev); @@ -3371,8 +3823,6 @@ static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector) drm_kms_helper_connector_hotplug_event(connector); } } - mutex_unlock(&aconnector->hpd_lock); - } static void handle_hpd_irq(void *param) @@ -3383,20 +3833,21 @@ static void handle_hpd_irq(void *param) } -static void schedule_hpd_rx_offload_work(struct hpd_rx_irq_offload_work_queue *offload_wq, +static void schedule_hpd_rx_offload_work(struct amdgpu_device *adev, struct hpd_rx_irq_offload_work_queue *offload_wq, union hpd_irq_data hpd_irq_data) { struct hpd_rx_irq_offload_work *offload_work = kzalloc(sizeof(*offload_work), GFP_KERNEL); if (!offload_work) { - DRM_ERROR("Failed to allocate hpd_rx_irq_offload_work.\n"); + drm_err(adev_to_drm(adev), "Failed to allocate hpd_rx_irq_offload_work.\n"); return; } INIT_WORK(&offload_work->work, dm_handle_hpd_rx_offload_work); offload_work->data = hpd_irq_data; offload_work->offload_wq = offload_wq; + offload_work->adev = adev; queue_work(offload_wq->wq, &offload_work->work); DRM_DEBUG_KMS("queue work to handle hpd_rx offload work"); @@ -3417,6 +3868,7 @@ static void handle_hpd_rx_irq(void *param) bool has_left_work = false; int idx = dc_link->link_index; struct hpd_rx_irq_offload_work_queue *offload_wq = &adev->dm.hpd_rx_offload_wq[idx]; + struct dc *dc = aconnector->dc_link->ctx->dc; memset(&hpd_irq_data, 0, sizeof(hpd_irq_data)); @@ -3437,7 +3889,7 @@ static void handle_hpd_rx_irq(void *param) goto out; if (hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST) { - schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data); + schedule_hpd_rx_offload_work(adev, offload_wq, hpd_irq_data); goto out; } @@ -3459,7 +3911,7 @@ static void handle_hpd_rx_irq(void *param) spin_unlock(&offload_wq->offload_lock); if (!skip) - schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data); + schedule_hpd_rx_offload_work(adev, offload_wq, hpd_irq_data); goto out; } @@ -3476,7 +3928,7 @@ static void handle_hpd_rx_irq(void *param) spin_unlock(&offload_wq->offload_lock); if (!skip) - schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data); + schedule_hpd_rx_offload_work(adev, offload_wq, hpd_irq_data); goto out; } @@ -3486,7 +3938,7 @@ out: if (result && !is_mst_root_connector) { /* Downstream Port status changed. */ if (!dc_link_detect_connection_type(dc_link, &new_connection_type)) - DRM_ERROR("KMS: Failed to detect connector\n"); + drm_err(adev_to_drm(adev), "KMS: Failed to detect connector\n"); if (aconnector->base.force && new_connection_type == dc_connection_none) { emulated_link_detect(dc_link); @@ -3506,6 +3958,7 @@ out: bool ret = false; mutex_lock(&adev->dm.dc_lock); + dc_exit_ips_for_hw_access(dc); ret = dc_link_detect(dc_link, DETECT_REASON_HPDRX); mutex_unlock(&adev->dm.dc_lock); @@ -3534,7 +3987,7 @@ out: mutex_unlock(&aconnector->hpd_lock); } -static void register_hpd_handlers(struct amdgpu_device *adev) +static int register_hpd_handlers(struct amdgpu_device *adev) { struct drm_device *dev = adev_to_drm(adev); struct drm_connector *connector; @@ -3546,11 +3999,23 @@ static void register_hpd_handlers(struct amdgpu_device *adev) int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT; if (dc_is_dmub_outbox_supported(adev->dm.dc)) { - if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD, dmub_hpd_callback, true)) - DRM_ERROR("amdgpu: fail to register dmub hpd callback"); + if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD, + dmub_hpd_callback, true)) { + drm_err(adev_to_drm(adev), "amdgpu: fail to register dmub hpd callback"); + return -EINVAL; + } + + if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_IRQ, + dmub_hpd_callback, true)) { + drm_err(adev_to_drm(adev), "amdgpu: fail to register dmub hpd callback"); + return -EINVAL; + } - if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_IRQ, dmub_hpd_callback, true)) - DRM_ERROR("amdgpu: fail to register dmub hpd callback"); + if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_SENSE_NOTIFY, + dmub_hpd_sense_callback, true)) { + drm_err(adev_to_drm(adev), "amdgpu: fail to register dmub hpd sense callback"); + return -EINVAL; + } } list_for_each_entry(connector, @@ -3566,9 +4031,16 @@ static void register_hpd_handlers(struct amdgpu_device *adev) int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT; int_params.irq_source = dc_link->irq_source_hpd; - amdgpu_dm_irq_register_interrupt(adev, &int_params, - handle_hpd_irq, - (void *) aconnector); + if (int_params.irq_source == DC_IRQ_SOURCE_INVALID || + int_params.irq_source < DC_IRQ_SOURCE_HPD1 || + int_params.irq_source > DC_IRQ_SOURCE_HPD6) { + drm_err(adev_to_drm(adev), "Failed to register hpd irq!\n"); + return -EINVAL; + } + + if (!amdgpu_dm_irq_register_interrupt(adev, &int_params, + handle_hpd_irq, (void *) aconnector)) + return -ENOMEM; } if (dc_link->irq_source_hpd_rx != DC_IRQ_SOURCE_INVALID) { @@ -3577,11 +4049,19 @@ static void register_hpd_handlers(struct amdgpu_device *adev) int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT; int_params.irq_source = dc_link->irq_source_hpd_rx; - amdgpu_dm_irq_register_interrupt(adev, &int_params, - handle_hpd_rx_irq, - (void *) aconnector); + if (int_params.irq_source == DC_IRQ_SOURCE_INVALID || + int_params.irq_source < DC_IRQ_SOURCE_HPD1RX || + int_params.irq_source > DC_IRQ_SOURCE_HPD6RX) { + drm_err(adev_to_drm(adev), "Failed to register hpd rx irq!\n"); + return -EINVAL; + } + + if (!amdgpu_dm_irq_register_interrupt(adev, &int_params, + handle_hpd_rx_irq, (void *) aconnector)) + return -ENOMEM; } } + return 0; } #if defined(CONFIG_DRM_AMD_DC_SI) @@ -3614,7 +4094,7 @@ static int dce60_register_irq_handlers(struct amdgpu_device *adev) for (i = 0; i < adev->mode_info.num_crtc; i++) { r = amdgpu_irq_add_id(adev, client_id, i + 1, &adev->crtc_irq); if (r) { - DRM_ERROR("Failed to add crtc irq id!\n"); + drm_err(adev_to_drm(adev), "Failed to add crtc irq id!\n"); return r; } @@ -3622,13 +4102,21 @@ static int dce60_register_irq_handlers(struct amdgpu_device *adev) int_params.irq_source = dc_interrupt_to_irq_source(dc, i + 1, 0); + if (int_params.irq_source == DC_IRQ_SOURCE_INVALID || + int_params.irq_source < DC_IRQ_SOURCE_VBLANK1 || + int_params.irq_source > DC_IRQ_SOURCE_VBLANK6) { + drm_err(adev_to_drm(adev), "Failed to register vblank irq!\n"); + return -EINVAL; + } + c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1]; c_irq_params->adev = adev; c_irq_params->irq_src = int_params.irq_source; - amdgpu_dm_irq_register_interrupt(adev, &int_params, - dm_crtc_high_irq, c_irq_params); + if (!amdgpu_dm_irq_register_interrupt(adev, &int_params, + dm_crtc_high_irq, c_irq_params)) + return -ENOMEM; } /* Use GRPH_PFLIP interrupt */ @@ -3636,7 +4124,7 @@ static int dce60_register_irq_handlers(struct amdgpu_device *adev) i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) { r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq); if (r) { - DRM_ERROR("Failed to add page flip irq id!\n"); + drm_err(adev_to_drm(adev), "Failed to add page flip irq id!\n"); return r; } @@ -3644,27 +4132,34 @@ static int dce60_register_irq_handlers(struct amdgpu_device *adev) int_params.irq_source = dc_interrupt_to_irq_source(dc, i, 0); + if (int_params.irq_source == DC_IRQ_SOURCE_INVALID || + int_params.irq_source < DC_IRQ_SOURCE_PFLIP_FIRST || + int_params.irq_source > DC_IRQ_SOURCE_PFLIP_LAST) { + drm_err(adev_to_drm(adev), "Failed to register pflip irq!\n"); + return -EINVAL; + } + c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST]; c_irq_params->adev = adev; c_irq_params->irq_src = int_params.irq_source; - amdgpu_dm_irq_register_interrupt(adev, &int_params, - dm_pflip_high_irq, c_irq_params); - + if (!amdgpu_dm_irq_register_interrupt(adev, &int_params, + dm_pflip_high_irq, c_irq_params)) + return -ENOMEM; } /* HPD */ r = amdgpu_irq_add_id(adev, client_id, VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq); if (r) { - DRM_ERROR("Failed to add hpd irq id!\n"); + drm_err(adev_to_drm(adev), "Failed to add hpd irq id!\n"); return r; } - register_hpd_handlers(adev); + r = register_hpd_handlers(adev); - return 0; + return r; } #endif @@ -3700,7 +4195,7 @@ static int dce110_register_irq_handlers(struct amdgpu_device *adev) for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) { r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq); if (r) { - DRM_ERROR("Failed to add crtc irq id!\n"); + drm_err(adev_to_drm(adev), "Failed to add crtc irq id!\n"); return r; } @@ -3708,20 +4203,28 @@ static int dce110_register_irq_handlers(struct amdgpu_device *adev) int_params.irq_source = dc_interrupt_to_irq_source(dc, i, 0); + if (int_params.irq_source == DC_IRQ_SOURCE_INVALID || + int_params.irq_source < DC_IRQ_SOURCE_VBLANK1 || + int_params.irq_source > DC_IRQ_SOURCE_VBLANK6) { + drm_err(adev_to_drm(adev), "Failed to register vblank irq!\n"); + return -EINVAL; + } + c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1]; c_irq_params->adev = adev; c_irq_params->irq_src = int_params.irq_source; - amdgpu_dm_irq_register_interrupt(adev, &int_params, - dm_crtc_high_irq, c_irq_params); + if (!amdgpu_dm_irq_register_interrupt(adev, &int_params, + dm_crtc_high_irq, c_irq_params)) + return -ENOMEM; } /* Use VUPDATE interrupt */ for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) { r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq); if (r) { - DRM_ERROR("Failed to add vupdate irq id!\n"); + drm_err(adev_to_drm(adev), "Failed to add vupdate irq id!\n"); return r; } @@ -3729,13 +4232,21 @@ static int dce110_register_irq_handlers(struct amdgpu_device *adev) int_params.irq_source = dc_interrupt_to_irq_source(dc, i, 0); + if (int_params.irq_source == DC_IRQ_SOURCE_INVALID || + int_params.irq_source < DC_IRQ_SOURCE_VUPDATE1 || + int_params.irq_source > DC_IRQ_SOURCE_VUPDATE6) { + drm_err(adev_to_drm(adev), "Failed to register vupdate irq!\n"); + return -EINVAL; + } + c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1]; c_irq_params->adev = adev; c_irq_params->irq_src = int_params.irq_source; - amdgpu_dm_irq_register_interrupt(adev, &int_params, - dm_vupdate_high_irq, c_irq_params); + if (!amdgpu_dm_irq_register_interrupt(adev, &int_params, + dm_vupdate_high_irq, c_irq_params)) + return -ENOMEM; } /* Use GRPH_PFLIP interrupt */ @@ -3743,7 +4254,7 @@ static int dce110_register_irq_handlers(struct amdgpu_device *adev) i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) { r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq); if (r) { - DRM_ERROR("Failed to add page flip irq id!\n"); + drm_err(adev_to_drm(adev), "Failed to add page flip irq id!\n"); return r; } @@ -3751,27 +4262,34 @@ static int dce110_register_irq_handlers(struct amdgpu_device *adev) int_params.irq_source = dc_interrupt_to_irq_source(dc, i, 0); + if (int_params.irq_source == DC_IRQ_SOURCE_INVALID || + int_params.irq_source < DC_IRQ_SOURCE_PFLIP_FIRST || + int_params.irq_source > DC_IRQ_SOURCE_PFLIP_LAST) { + drm_err(adev_to_drm(adev), "Failed to register pflip irq!\n"); + return -EINVAL; + } + c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST]; c_irq_params->adev = adev; c_irq_params->irq_src = int_params.irq_source; - amdgpu_dm_irq_register_interrupt(adev, &int_params, - dm_pflip_high_irq, c_irq_params); - + if (!amdgpu_dm_irq_register_interrupt(adev, &int_params, + dm_pflip_high_irq, c_irq_params)) + return -ENOMEM; } /* HPD */ r = amdgpu_irq_add_id(adev, client_id, VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq); if (r) { - DRM_ERROR("Failed to add hpd irq id!\n"); + drm_err(adev_to_drm(adev), "Failed to add hpd irq id!\n"); return r; } - register_hpd_handlers(adev); + r = register_hpd_handlers(adev); - return 0; + return r; } /* Register IRQ sources and initialize IRQ callbacks */ @@ -3815,7 +4333,7 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev) r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq); if (r) { - DRM_ERROR("Failed to add crtc irq id!\n"); + drm_err(adev_to_drm(adev), "Failed to add crtc irq id!\n"); return r; } @@ -3823,13 +4341,21 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev) int_params.irq_source = dc_interrupt_to_irq_source(dc, i, 0); + if (int_params.irq_source == DC_IRQ_SOURCE_INVALID || + int_params.irq_source < DC_IRQ_SOURCE_VBLANK1 || + int_params.irq_source > DC_IRQ_SOURCE_VBLANK6) { + drm_err(adev_to_drm(adev), "Failed to register vblank irq!\n"); + return -EINVAL; + } + c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1]; c_irq_params->adev = adev; c_irq_params->irq_src = int_params.irq_source; - amdgpu_dm_irq_register_interrupt( - adev, &int_params, dm_crtc_high_irq, c_irq_params); + if (!amdgpu_dm_irq_register_interrupt(adev, &int_params, + dm_crtc_high_irq, c_irq_params)) + return -ENOMEM; } /* Use otg vertical line interrupt */ @@ -3839,7 +4365,7 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev) vrtl_int_srcid[i], &adev->vline0_irq); if (r) { - DRM_ERROR("Failed to add vline0 irq id!\n"); + drm_err(adev_to_drm(adev), "Failed to add vline0 irq id!\n"); return r; } @@ -3847,9 +4373,11 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev) int_params.irq_source = dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0); - if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) { - DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]); - break; + if (int_params.irq_source == DC_IRQ_SOURCE_INVALID || + int_params.irq_source < DC_IRQ_SOURCE_DC1_VLINE0 || + int_params.irq_source > DC_IRQ_SOURCE_DC6_VLINE0) { + drm_err(adev_to_drm(adev), "Failed to register vline0 irq!\n"); + return -EINVAL; } c_irq_params = &adev->dm.vline0_params[int_params.irq_source @@ -3858,8 +4386,10 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev) c_irq_params->adev = adev; c_irq_params->irq_src = int_params.irq_source; - amdgpu_dm_irq_register_interrupt(adev, &int_params, - dm_dcn_vertical_interrupt0_high_irq, c_irq_params); + if (!amdgpu_dm_irq_register_interrupt(adev, &int_params, + dm_dcn_vertical_interrupt0_high_irq, + c_irq_params)) + return -ENOMEM; } #endif @@ -3874,7 +4404,7 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev) r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq); if (r) { - DRM_ERROR("Failed to add vupdate irq id!\n"); + drm_err(adev_to_drm(adev), "Failed to add vupdate irq id!\n"); return r; } @@ -3882,13 +4412,21 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev) int_params.irq_source = dc_interrupt_to_irq_source(dc, i, 0); + if (int_params.irq_source == DC_IRQ_SOURCE_INVALID || + int_params.irq_source < DC_IRQ_SOURCE_VUPDATE1 || + int_params.irq_source > DC_IRQ_SOURCE_VUPDATE6) { + drm_err(adev_to_drm(adev), "Failed to register vupdate irq!\n"); + return -EINVAL; + } + c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1]; c_irq_params->adev = adev; c_irq_params->irq_src = int_params.irq_source; - amdgpu_dm_irq_register_interrupt(adev, &int_params, - dm_vupdate_high_irq, c_irq_params); + if (!amdgpu_dm_irq_register_interrupt(adev, &int_params, + dm_vupdate_high_irq, c_irq_params)) + return -ENOMEM; } /* Use GRPH_PFLIP interrupt */ @@ -3897,7 +4435,7 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev) i++) { r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq); if (r) { - DRM_ERROR("Failed to add page flip irq id!\n"); + drm_err(adev_to_drm(adev), "Failed to add page flip irq id!\n"); return r; } @@ -3905,27 +4443,34 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev) int_params.irq_source = dc_interrupt_to_irq_source(dc, i, 0); + if (int_params.irq_source == DC_IRQ_SOURCE_INVALID || + int_params.irq_source < DC_IRQ_SOURCE_PFLIP_FIRST || + int_params.irq_source > DC_IRQ_SOURCE_PFLIP_LAST) { + drm_err(adev_to_drm(adev), "Failed to register pflip irq!\n"); + return -EINVAL; + } + c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST]; c_irq_params->adev = adev; c_irq_params->irq_src = int_params.irq_source; - amdgpu_dm_irq_register_interrupt(adev, &int_params, - dm_pflip_high_irq, c_irq_params); - + if (!amdgpu_dm_irq_register_interrupt(adev, &int_params, + dm_pflip_high_irq, c_irq_params)) + return -ENOMEM; } /* HPD */ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT, &adev->hpd_irq); if (r) { - DRM_ERROR("Failed to add hpd irq id!\n"); + drm_err(adev_to_drm(adev), "Failed to add hpd irq id!\n"); return r; } - register_hpd_handlers(adev); + r = register_hpd_handlers(adev); - return 0; + return r; } /* Register Outbox IRQ sources and initialize IRQ callbacks */ static int register_outbox_irq_handlers(struct amdgpu_device *adev) @@ -3941,7 +4486,7 @@ static int register_outbox_irq_handlers(struct amdgpu_device *adev) r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT, &adev->dmub_outbox_irq); if (r) { - DRM_ERROR("Failed to add outbox irq id!\n"); + drm_err(adev_to_drm(adev), "Failed to add outbox irq id!\n"); return r; } @@ -3956,8 +4501,9 @@ static int register_outbox_irq_handlers(struct amdgpu_device *adev) c_irq_params->adev = adev; c_irq_params->irq_src = int_params.irq_source; - amdgpu_dm_irq_register_interrupt(adev, &int_params, - dm_dmub_outbox1_low_irq, c_irq_params); + if (!amdgpu_dm_irq_register_interrupt(adev, &int_params, + dm_dmub_outbox1_low_irq, c_irq_params)) + return -ENOMEM; } return 0; @@ -4092,8 +4638,11 @@ static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev) } #ifdef AMD_PRIVATE_COLOR - if (amdgpu_dm_create_color_properties(adev)) + if (amdgpu_dm_create_color_properties(adev)) { + dc_state_release(state->context); + kfree(state); return -ENOMEM; + } #endif r = amdgpu_dm_audio_init(adev); @@ -4108,38 +4657,46 @@ static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev) #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255 +#define AMDGPU_DM_MIN_SPREAD ((AMDGPU_DM_DEFAULT_MAX_BACKLIGHT - AMDGPU_DM_DEFAULT_MIN_BACKLIGHT) / 2) #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm, int bl_idx) { + struct amdgpu_dm_backlight_caps *caps = &dm->backlight_caps[bl_idx]; + + if (caps->caps_valid) + return; + #if defined(CONFIG_ACPI) - struct amdgpu_dm_backlight_caps caps; + amdgpu_acpi_get_backlight_caps(caps); - memset(&caps, 0, sizeof(caps)); + /* validate the firmware value is sane */ + if (caps->caps_valid) { + int spread = caps->max_input_signal - caps->min_input_signal; - if (dm->backlight_caps[bl_idx].caps_valid) - return; + if (caps->max_input_signal > AMDGPU_DM_DEFAULT_MAX_BACKLIGHT || + caps->min_input_signal < 0 || + spread > AMDGPU_DM_DEFAULT_MAX_BACKLIGHT || + spread < AMDGPU_DM_MIN_SPREAD) { + DRM_DEBUG_KMS("DM: Invalid backlight caps: min=%d, max=%d\n", + caps->min_input_signal, caps->max_input_signal); + caps->caps_valid = false; + } + } - amdgpu_acpi_get_backlight_caps(&caps); - if (caps.caps_valid) { - dm->backlight_caps[bl_idx].caps_valid = true; - if (caps.aux_support) - return; - dm->backlight_caps[bl_idx].min_input_signal = caps.min_input_signal; - dm->backlight_caps[bl_idx].max_input_signal = caps.max_input_signal; - } else { - dm->backlight_caps[bl_idx].min_input_signal = - AMDGPU_DM_DEFAULT_MIN_BACKLIGHT; - dm->backlight_caps[bl_idx].max_input_signal = - AMDGPU_DM_DEFAULT_MAX_BACKLIGHT; + if (!caps->caps_valid) { + caps->min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT; + caps->max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT; + caps->caps_valid = true; } #else - if (dm->backlight_caps[bl_idx].aux_support) + if (caps->aux_support) return; - dm->backlight_caps[bl_idx].min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT; - dm->backlight_caps[bl_idx].max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT; + caps->min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT; + caps->max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT; + caps->caps_valid = true; #endif } @@ -4161,6 +4718,60 @@ static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps, return 1; } +/* Rescale from [min..max] to [0..MAX_BACKLIGHT_LEVEL] */ +static inline u32 scale_input_to_fw(int min, int max, u64 input) +{ + return DIV_ROUND_CLOSEST_ULL(input * MAX_BACKLIGHT_LEVEL, max - min); +} + +/* Rescale from [0..MAX_BACKLIGHT_LEVEL] to [min..max] */ +static inline u32 scale_fw_to_input(int min, int max, u64 input) +{ + return min + DIV_ROUND_CLOSEST_ULL(input * (max - min), MAX_BACKLIGHT_LEVEL); +} + +static void convert_custom_brightness(const struct amdgpu_dm_backlight_caps *caps, + unsigned int min, unsigned int max, + uint32_t *user_brightness) +{ + u32 brightness = scale_input_to_fw(min, max, *user_brightness); + u8 prev_signal = 0, prev_lum = 0; + int i = 0; + + if (amdgpu_dc_debug_mask & DC_DISABLE_CUSTOM_BRIGHTNESS_CURVE) + return; + + if (!caps->data_points) + return; + + /* choose start to run less interpolation steps */ + if (caps->luminance_data[caps->data_points/2].input_signal > brightness) + i = caps->data_points/2; + do { + u8 signal = caps->luminance_data[i].input_signal; + u8 lum = caps->luminance_data[i].luminance; + + /* + * brightness == signal: luminance is percent numerator + * brightness < signal: interpolate between previous and current luminance numerator + * brightness > signal: find next data point + */ + if (brightness > signal) { + prev_signal = signal; + prev_lum = lum; + i++; + continue; + } + if (brightness < signal) + lum = prev_lum + DIV_ROUND_CLOSEST((lum - prev_lum) * + (brightness - prev_signal), + signal - prev_signal); + *user_brightness = scale_fw_to_input(min, max, + DIV_ROUND_CLOSEST(lum * brightness, 101)); + return; + } while (i < caps->data_points); +} + static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps, uint32_t brightness) { @@ -4169,9 +4780,10 @@ static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *c if (!get_brightness_range(caps, &min, &max)) return brightness; - // Rescale 0..255 to min..max - return min + DIV_ROUND_CLOSEST((max - min) * brightness, - AMDGPU_MAX_BL_LEVEL); + convert_custom_brightness(caps, min, max, &brightness); + + // Rescale 0..max to min..max + return min + DIV_ROUND_CLOSEST_ULL((u64)(max - min) * brightness, max); } static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps, @@ -4184,8 +4796,8 @@ static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *cap if (brightness < min) return 0; - // Rescale min..max to 0..255 - return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min), + // Rescale min..max to 0..max + return DIV_ROUND_CLOSEST_ULL((u64)max * (brightness - min), max - min); } @@ -4193,33 +4805,49 @@ static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm, int bl_idx, u32 user_brightness) { - struct amdgpu_dm_backlight_caps caps; + struct amdgpu_dm_backlight_caps *caps; struct dc_link *link; u32 brightness; - bool rc; + bool rc, reallow_idle = false; amdgpu_dm_update_backlight_caps(dm, bl_idx); - caps = dm->backlight_caps[bl_idx]; + caps = &dm->backlight_caps[bl_idx]; dm->brightness[bl_idx] = user_brightness; /* update scratch register */ if (bl_idx == 0) amdgpu_atombios_scratch_regs_set_backlight_level(dm->adev, dm->brightness[bl_idx]); - brightness = convert_brightness_from_user(&caps, dm->brightness[bl_idx]); + brightness = convert_brightness_from_user(caps, dm->brightness[bl_idx]); link = (struct dc_link *)dm->backlight_link[bl_idx]; /* Change brightness based on AUX property */ - if (caps.aux_support) { + mutex_lock(&dm->dc_lock); + if (dm->dc->caps.ips_support && dm->dc->ctx->dmub_srv->idle_allowed) { + dc_allow_idle_optimizations(dm->dc, false); + reallow_idle = true; + } + + if (caps->aux_support) { rc = dc_link_set_backlight_level_nits(link, true, brightness, AUX_BL_DEFAULT_TRANSITION_TIME_MS); if (!rc) DRM_DEBUG("DM: Failed to update backlight via AUX on eDP[%d]\n", bl_idx); } else { - rc = dc_link_set_backlight_level(link, brightness, 0); + struct set_backlight_level_params backlight_level_params = { 0 }; + + backlight_level_params.backlight_pwm_u16_16 = brightness; + backlight_level_params.transition_time_in_ms = 0; + + rc = dc_link_set_backlight_level(link, &backlight_level_params); if (!rc) DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", bl_idx); } + if (dm->dc->caps.ips_support && reallow_idle) + dc_allow_idle_optimizations(dm->dc, true); + + mutex_unlock(&dm->dc_lock); + if (rc) dm->actual_brightness[bl_idx] = user_brightness; } @@ -4294,7 +4922,9 @@ amdgpu_dm_register_backlight_device(struct amdgpu_dm_connector *aconnector) struct drm_device *drm = aconnector->base.dev; struct amdgpu_display_manager *dm = &drm_to_adev(drm)->dm; struct backlight_properties props = { 0 }; + struct amdgpu_dm_backlight_caps *caps; char bl_name[16]; + int min, max; if (aconnector->bl_idx == -1) return; @@ -4306,8 +4936,21 @@ amdgpu_dm_register_backlight_device(struct amdgpu_dm_connector *aconnector) return; } - props.max_brightness = AMDGPU_MAX_BL_LEVEL; - props.brightness = AMDGPU_MAX_BL_LEVEL; + caps = &dm->backlight_caps[aconnector->bl_idx]; + if (get_brightness_range(caps, &min, &max)) { + if (power_supply_is_system_supplied() > 0) + props.brightness = (max - min) * DIV_ROUND_CLOSEST(caps->ac_level, 100); + else + props.brightness = (max - min) * DIV_ROUND_CLOSEST(caps->dc_level, 100); + /* min is zero, so max needs to be adjusted */ + props.max_brightness = max - min; + drm_dbg(drm, "Backlight caps: min: %d, max: %d, ac %d, dc %d\n", min, max, + caps->ac_level, caps->dc_level); + } else + props.brightness = props.max_brightness = MAX_BACKLIGHT_LEVEL; + + if (caps->data_points && !(amdgpu_dc_debug_mask & DC_DISABLE_CUSTOM_BRIGHTNESS_CURVE)) + drm_info(drm, "Using custom brightness curve\n"); props.type = BACKLIGHT_RAW; snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d", @@ -4316,12 +4959,13 @@ amdgpu_dm_register_backlight_device(struct amdgpu_dm_connector *aconnector) dm->backlight_dev[aconnector->bl_idx] = backlight_device_register(bl_name, aconnector->base.kdev, dm, &amdgpu_dm_backlight_ops, &props); + dm->brightness[aconnector->bl_idx] = props.brightness; if (IS_ERR(dm->backlight_dev[aconnector->bl_idx])) { - DRM_ERROR("DM: Backlight registration failed!\n"); + drm_err(drm, "DM: Backlight registration failed!\n"); dm->backlight_dev[aconnector->bl_idx] = NULL; } else - DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name); + drm_dbg_driver(drm, "DM: Registered Backlight device: %s\n", bl_name); } static int initialize_plane(struct amdgpu_display_manager *dm, @@ -4335,7 +4979,7 @@ static int initialize_plane(struct amdgpu_display_manager *dm, plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL); if (!plane) { - DRM_ERROR("KMS: Failed to allocate plane\n"); + drm_err(adev_to_drm(dm->adev), "KMS: Failed to allocate plane\n"); return -ENOMEM; } plane->type = plane_type; @@ -4353,7 +4997,7 @@ static int initialize_plane(struct amdgpu_display_manager *dm, ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap); if (ret) { - DRM_ERROR("KMS: Failed to initialize plane\n"); + drm_err(adev_to_drm(dm->adev), "KMS: Failed to initialize plane\n"); kfree(plane); return ret; } @@ -4383,7 +5027,6 @@ static void setup_backlight_device(struct amdgpu_display_manager *dm, aconnector->bl_idx = bl_idx; amdgpu_dm_update_backlight_caps(dm, bl_idx); - dm->brightness[bl_idx] = AMDGPU_MAX_BL_LEVEL; dm->backlight_link[bl_idx] = link; dm->num_of_edps++; @@ -4423,13 +5066,16 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) link_cnt = dm->dc->caps.max_links; if (amdgpu_dm_mode_config_init(dm->adev)) { - DRM_ERROR("DM: Failed to initialize mode config\n"); + drm_err(adev_to_drm(adev), "DM: Failed to initialize mode config\n"); return -EINVAL; } /* There is one primary plane per CRTC */ primary_planes = dm->dc->caps.max_streams; - ASSERT(primary_planes <= AMDGPU_MAX_PLANES); + if (primary_planes > AMDGPU_MAX_PLANES) { + drm_err(adev_to_drm(adev), "DM: Plane nums out of 6 planes\n"); + return -EINVAL; + } /* * Initialize primary planes, implicit planes for legacy IOCTLS. @@ -4440,7 +5086,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) if (initialize_plane(dm, mode_info, i, DRM_PLANE_TYPE_PRIMARY, plane)) { - DRM_ERROR("KMS: Failed to initialize primary plane\n"); + drm_err(adev_to_drm(adev), "KMS: Failed to initialize primary plane\n"); goto fail; } } @@ -4472,14 +5118,14 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) if (initialize_plane(dm, NULL, primary_planes + i, DRM_PLANE_TYPE_OVERLAY, plane)) { - DRM_ERROR("KMS: Failed to initialize overlay plane\n"); + drm_err(adev_to_drm(adev), "KMS: Failed to initialize overlay plane\n"); goto fail; } } for (i = 0; i < dm->dc->caps.max_streams; i++) if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) { - DRM_ERROR("KMS: Failed to initialize crtc\n"); + drm_err(adev_to_drm(adev), "KMS: Failed to initialize crtc\n"); goto fail; } @@ -4496,8 +5142,10 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) case IP_VERSION(2, 1, 0): case IP_VERSION(3, 5, 0): case IP_VERSION(3, 5, 1): + case IP_VERSION(3, 6, 0): + case IP_VERSION(4, 0, 1): if (register_outbox_irq_handlers(dm->adev)) { - DRM_ERROR("DM: Failed to initialize IRQ\n"); + drm_err(adev_to_drm(adev), "DM: Failed to initialize IRQ\n"); goto fail; } break; @@ -4518,6 +5166,8 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) case IP_VERSION(3, 2, 1): case IP_VERSION(3, 5, 0): case IP_VERSION(3, 5, 1): + case IP_VERSION(3, 6, 0): + case IP_VERSION(4, 0, 1): psr_feature_enabled = true; break; default: @@ -4530,43 +5180,43 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) if (!(amdgpu_dc_debug_mask & DC_DISABLE_REPLAY)) { switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { case IP_VERSION(3, 1, 4): - case IP_VERSION(3, 1, 5): - case IP_VERSION(3, 1, 6): case IP_VERSION(3, 2, 0): case IP_VERSION(3, 2, 1): case IP_VERSION(3, 5, 0): case IP_VERSION(3, 5, 1): + case IP_VERSION(3, 6, 0): replay_feature_enabled = true; break; + default: replay_feature_enabled = amdgpu_dc_feature_mask & DC_REPLAY_MASK; break; } } + if (link_cnt > MAX_LINKS) { + drm_err(adev_to_drm(adev), + "KMS: Cannot support more than %d display indexes\n", + MAX_LINKS); + goto fail; + } + /* loops over all connectors on the board */ for (i = 0; i < link_cnt; i++) { struct dc_link *link = NULL; - if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) { - DRM_ERROR( - "KMS: Cannot support more than %d display indexes\n", - AMDGPU_DM_MAX_DISPLAY_INDEX); - continue; - } - link = dc_get_link_at_index(dm->dc, i); if (link->connector_signal == SIGNAL_TYPE_VIRTUAL) { struct amdgpu_dm_wb_connector *wbcon = kzalloc(sizeof(*wbcon), GFP_KERNEL); if (!wbcon) { - DRM_ERROR("KMS: Failed to allocate writeback connector\n"); + drm_err(adev_to_drm(adev), "KMS: Failed to allocate writeback connector\n"); continue; } if (amdgpu_dm_wb_connector_init(dm, wbcon, i)) { - DRM_ERROR("KMS: Failed to initialize writeback connector\n"); + drm_err(adev_to_drm(adev), "KMS: Failed to initialize writeback connector\n"); kfree(wbcon); continue; } @@ -4586,12 +5236,12 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) goto fail; if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) { - DRM_ERROR("KMS: Failed to initialize encoder\n"); + drm_err(adev_to_drm(adev), "KMS: Failed to initialize encoder\n"); goto fail; } if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) { - DRM_ERROR("KMS: Failed to initialize connector\n"); + drm_err(adev_to_drm(adev), "KMS: Failed to initialize connector\n"); goto fail; } @@ -4600,7 +5250,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) aconnector; if (!dc_link_detect_connection_type(link, &new_connection_type)) - DRM_ERROR("KMS: Failed to detect connector\n"); + drm_err(adev_to_drm(adev), "KMS: Failed to detect connector\n"); if (aconnector->base.force && new_connection_type == dc_connection_none) { emulated_link_detect(link); @@ -4609,6 +5259,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) bool ret = false; mutex_lock(&dm->dc_lock); + dc_exit_ips_for_hw_access(dm->dc); ret = dc_link_detect(link, DETECT_REASON_BOOT); mutex_unlock(&dm->dc_lock); @@ -4621,14 +5272,15 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) if (amdgpu_dm_set_replay_caps(link, aconnector)) psr_feature_enabled = false; - if (psr_feature_enabled) + if (psr_feature_enabled) { amdgpu_dm_set_psr_caps(link); - - /* TODO: Fix vblank control helpers to delay PSR entry to allow this when - * PSR is also supported. - */ - if (link->psr_settings.psr_feature_enabled) - adev_to_drm(adev)->vblank_disable_immediate = false; + drm_info(adev_to_drm(adev), "PSR support %d, DC PSR ver %d, sink PSR ver %d DPCD caps 0x%x su_y_granularity %d\n", + link->psr_settings.psr_feature_enabled, + link->psr_settings.psr_version, + link->dpcd_caps.psr_info.psr_version, + link->dpcd_caps.psr_info.psr_dpcd_caps.raw, + link->dpcd_caps.psr_info.psr2_su_y_granularity_cap); + } } } amdgpu_set_panel_orientation(&aconnector->base); @@ -4642,7 +5294,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) case CHIP_VERDE: case CHIP_OLAND: if (dce60_register_irq_handlers(dm->adev)) { - DRM_ERROR("DM: Failed to initialize IRQ\n"); + drm_err(adev_to_drm(adev), "DM: Failed to initialize IRQ\n"); goto fail; } break; @@ -4664,7 +5316,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) case CHIP_VEGA12: case CHIP_VEGA20: if (dce110_register_irq_handlers(dm->adev)) { - DRM_ERROR("DM: Failed to initialize IRQ\n"); + drm_err(adev_to_drm(adev), "DM: Failed to initialize IRQ\n"); goto fail; } break; @@ -4689,13 +5341,15 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) case IP_VERSION(3, 2, 1): case IP_VERSION(3, 5, 0): case IP_VERSION(3, 5, 1): + case IP_VERSION(3, 6, 0): + case IP_VERSION(4, 0, 1): if (dcn10_register_irq_handlers(dm->adev)) { - DRM_ERROR("DM: Failed to initialize IRQ\n"); + drm_err(adev_to_drm(adev), "DM: Failed to initialize IRQ\n"); goto fail; } break; default: - DRM_ERROR("Unsupported DCE IP versions: 0x%X\n", + drm_err(adev_to_drm(adev), "Unsupported DCE IP versions: 0x%X\n", amdgpu_ip_version(adev, DCE_HWIP, 0)); goto fail; } @@ -4756,15 +5410,20 @@ static ssize_t s3_debug_store(struct device *device, int s3_state; struct drm_device *drm_dev = dev_get_drvdata(device); struct amdgpu_device *adev = drm_to_adev(drm_dev); + struct amdgpu_ip_block *ip_block; + + ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_DCE); + if (!ip_block) + return -EINVAL; ret = kstrtoint(buf, 0, &s3_state); if (ret == 0) { if (s3_state) { - dm_resume(adev); + dm_resume(ip_block); drm_kms_helper_hotplug_event(adev_to_drm(adev)); } else - dm_suspend(adev); + dm_suspend(ip_block); } return ret == 0 ? count : 0; @@ -4820,20 +5479,29 @@ static int dm_init_microcode(struct amdgpu_device *adev) fw_name_dmub = FIRMWARE_DCN_V3_2_1_DMCUB; break; case IP_VERSION(3, 5, 0): - case IP_VERSION(3, 5, 1): fw_name_dmub = FIRMWARE_DCN_35_DMUB; break; + case IP_VERSION(3, 5, 1): + fw_name_dmub = FIRMWARE_DCN_351_DMUB; + break; + case IP_VERSION(3, 6, 0): + fw_name_dmub = FIRMWARE_DCN_36_DMUB; + break; + case IP_VERSION(4, 0, 1): + fw_name_dmub = FIRMWARE_DCN_401_DMUB; + break; default: /* ASIC doesn't support DMUB. */ return 0; } - r = amdgpu_ucode_request(adev, &adev->dm.dmub_fw, fw_name_dmub); + r = amdgpu_ucode_request(adev, &adev->dm.dmub_fw, AMDGPU_UCODE_REQUIRED, + "%s", fw_name_dmub); return r; } -static int dm_early_init(void *handle) +static int dm_early_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct amdgpu_mode_info *mode_info = &adev->mode_info; struct atom_context *ctx = mode_info->atom_context; int index = GetIndexIntoMasterTable(DATA, Object_Header); @@ -4842,7 +5510,7 @@ static int dm_early_init(void *handle) /* if there is no object header, skip DM */ if (!amdgpu_atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset)) { adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK; - dev_info(adev->dev, "No object header, skipping DM\n"); + drm_info(adev_to_drm(adev), "No object header, skipping DM\n"); return -ENOENT; } @@ -4947,12 +5615,14 @@ static int dm_early_init(void *handle) case IP_VERSION(3, 2, 1): case IP_VERSION(3, 5, 0): case IP_VERSION(3, 5, 1): + case IP_VERSION(3, 6, 0): + case IP_VERSION(4, 0, 1): adev->mode_info.num_crtc = 4; adev->mode_info.num_hpd = 4; adev->mode_info.num_dig = 4; break; default: - DRM_ERROR("Unsupported DCE IP versions: 0x%x\n", + drm_err(adev_to_drm(adev), "Unsupported DCE IP versions: 0x%x\n", amdgpu_ip_version(adev, DCE_HWIP, 0)); return -EINVAL; } @@ -5024,9 +5694,9 @@ fill_plane_color_attributes(const struct drm_plane_state *plane_state, case DRM_COLOR_YCBCR_BT2020: if (full_range) - *color_space = COLOR_SPACE_2020_YCBCR; + *color_space = COLOR_SPACE_2020_YCBCR_FULL; else - return -EINVAL; + *color_space = COLOR_SPACE_2020_YCBCR_LIMITED; break; default: @@ -5042,8 +5712,7 @@ fill_dc_plane_info_and_addr(struct amdgpu_device *adev, const u64 tiling_flags, struct dc_plane_info *plane_info, struct dc_plane_address *address, - bool tmz_surface, - bool force_disable_dcc) + bool tmz_surface) { const struct drm_framebuffer *fb = plane_state->fb; const struct amdgpu_framebuffer *afb = @@ -5102,7 +5771,7 @@ fill_dc_plane_info_and_addr(struct amdgpu_device *adev, plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616; break; default: - DRM_ERROR( + drm_err(adev_to_drm(adev), "Unsupported screen format %p4cc\n", &fb->format->format); return -EINVAL; @@ -5142,7 +5811,7 @@ fill_dc_plane_info_and_addr(struct amdgpu_device *adev, &plane_info->tiling_info, &plane_info->plane_size, &plane_info->dcc, address, - tmz_surface, force_disable_dcc); + tmz_surface); if (ret) return ret; @@ -5163,7 +5832,6 @@ static int fill_dc_plane_attributes(struct amdgpu_device *adev, struct dc_scaling_info scaling_info; struct dc_plane_info plane_info; int ret; - bool force_disable_dcc = false; ret = amdgpu_dm_plane_fill_dc_scaling_info(adev, plane_state, &scaling_info); if (ret) @@ -5174,13 +5842,11 @@ static int fill_dc_plane_attributes(struct amdgpu_device *adev, dc_plane_state->clip_rect = scaling_info.clip_rect; dc_plane_state->scaling_quality = scaling_info.scaling_quality; - force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend; ret = fill_dc_plane_info_and_addr(adev, plane_state, afb->tiling_flags, &plane_info, &dc_plane_state->address, - afb->tmz_surface, - force_disable_dcc); + afb->tmz_surface); if (ret) return ret; @@ -5526,12 +6192,14 @@ get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing, if (dc_crtc_timing->pixel_encoding == PIXEL_ENCODING_RGB) color_space = COLOR_SPACE_2020_RGB_FULLRANGE; else - color_space = COLOR_SPACE_2020_YCBCR; + color_space = COLOR_SPACE_2020_YCBCR_LIMITED; break; case DRM_MODE_COLORIMETRY_DEFAULT: // ITU601 default: if (dc_crtc_timing->pixel_encoding == PIXEL_ENCODING_RGB) { color_space = COLOR_SPACE_SRGB; + if (connector_state->hdmi.broadcast_rgb == DRM_HDMI_BROADCAST_RGB_LIMITED) + color_space = COLOR_SPACE_SRGB_LIMITED; /* * 27030khz is the separation point between HDTV and SDTV * according to HDMI spec, we use YCbCr709 and YCbCr601 @@ -5624,6 +6292,7 @@ static void fill_stream_properties_from_drm_display_mode( struct amdgpu_dm_connector *aconnector = NULL; struct hdmi_vendor_infoframe hv_frame; struct hdmi_avi_infoframe avi_frame; + ssize_t err; if (connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK) aconnector = to_amdgpu_dm_connector(connector); @@ -5670,9 +6339,17 @@ static void fill_stream_properties_from_drm_display_mode( } if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) { - drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in); + err = drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, + (struct drm_connector *)connector, + mode_in); + if (err < 0) + drm_warn_once(connector->dev, "Failed to setup avi infoframe on connector %s: %zd \n", connector->name, err); timing_out->vic = avi_frame.video_code; - drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in); + err = drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, + (struct drm_connector *)connector, + mode_in); + if (err < 0) + drm_warn_once(connector->dev, "Failed to setup vendor infoframe on connector %s: %zd \n", connector->name, err); timing_out->hdmi_vic = hv_frame.vic; } @@ -5700,8 +6377,8 @@ static void fill_stream_properties_from_drm_display_mode( timing_out->aspect_ratio = get_aspect_ratio(mode_in); - stream->out_transfer_func->type = TF_TYPE_PREDEFINED; - stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB; + stream->out_transfer_func.type = TF_TYPE_PREDEFINED; + stream->out_transfer_func.tf = TRANSFER_FUNCTION_SRGB; if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) { if (!adjust_colour_depth_from_display_info(timing_out, info) && drm_mode_is_420_also(info, mode_in) && @@ -5785,19 +6462,19 @@ decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode, const struct drm_display_mode *native_mode, bool scale_enabled) { - if (scale_enabled) { - copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode); - } else if (native_mode->clock == drm_mode->clock && - native_mode->htotal == drm_mode->htotal && - native_mode->vtotal == drm_mode->vtotal) { - copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode); + if (scale_enabled || ( + native_mode->clock == drm_mode->clock && + native_mode->htotal == drm_mode->htotal && + native_mode->vtotal == drm_mode->vtotal)) { + if (native_mode->crtc_clock) + copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode); } else { /* no scaling nor amdgpu inserted, no need to patch */ } } static struct dc_sink * -create_fake_sink(struct dc_link *link) +create_fake_sink(struct drm_device *dev, struct dc_link *link) { struct dc_sink_init_data sink_init_data = { 0 }; struct dc_sink *sink = NULL; @@ -5807,7 +6484,7 @@ create_fake_sink(struct dc_link *link) sink = dc_sink_create(&sink_init_data); if (!sink) { - DRM_ERROR("Failed to create sink!\n"); + drm_err(dev, "Failed to create sink!\n"); return NULL; } sink->sink_signal = SIGNAL_TYPE_VIRTUAL; @@ -5921,6 +6598,9 @@ get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector, &aconnector->base.probed_modes : &aconnector->base.modes; + if (aconnector->base.connector_type == DRM_MODE_CONNECTOR_WRITEBACK) + return NULL; + if (aconnector->freesync_vid_base.clock != 0) return &aconnector->freesync_vid_base; @@ -5937,7 +6617,7 @@ get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector, m_pref = list_first_entry_or_null( &aconnector->base.modes, struct drm_display_mode, head); if (!m_pref) { - DRM_DEBUG_DRIVER("No preferred mode found in EDID\n"); + drm_dbg_driver(aconnector->base.dev, "No preferred mode found in EDID\n"); return NULL; } } @@ -5991,6 +6671,7 @@ static bool is_freesync_video_mode(const struct drm_display_mode *mode, return true; } +#if defined(CONFIG_DRM_AMD_DC_FP) static void update_dsc_caps(struct amdgpu_dm_connector *aconnector, struct dc_sink *sink, struct dc_stream_state *stream, struct dsc_dec_dpcd_caps *dsc_caps) @@ -6009,7 +6690,6 @@ static void update_dsc_caps(struct amdgpu_dm_connector *aconnector, } } - static void apply_dsc_policy_for_edp(struct amdgpu_dm_connector *aconnector, struct dc_sink *sink, struct dc_stream_state *stream, struct dsc_dec_dpcd_caps *dsc_caps, @@ -6073,7 +6753,6 @@ static void apply_dsc_policy_for_edp(struct amdgpu_dm_connector *aconnector, } } - static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector, struct dc_sink *sink, struct dc_stream_state *stream, struct dsc_dec_dpcd_caps *dsc_caps) @@ -6097,13 +6776,13 @@ static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector, dc_dsc_policy_set_enable_dsc_when_not_needed( aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE); - if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_EDP && + if (sink->sink_signal == SIGNAL_TYPE_EDP && !aconnector->dc_link->panel_config.dsc.disable_dsc_edp && dc->caps.edp_dsc_support && aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE) { apply_dsc_policy_for_edp(aconnector, sink, stream, dsc_caps, max_dsc_target_bpp_limit_override); - } else if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) { + } else if (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) { if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE) { if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0], dsc_caps, @@ -6113,7 +6792,8 @@ static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector, dc_link_get_highest_encoding_format(aconnector->dc_link), &stream->timing.dsc_cfg)) { stream->timing.flags.DSC = 1; - DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from SST RX\n", __func__, drm_connector->name); + drm_dbg_driver(drm_connector->dev, "%s: SST_DSC [%s] DSC is selected from SST RX\n", + __func__, drm_connector->name); } } else if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) { timing_bw_in_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing, @@ -6132,7 +6812,7 @@ static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector, dc_link_get_highest_encoding_format(aconnector->dc_link), &stream->timing.dsc_cfg)) { stream->timing.flags.DSC = 1; - DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from DP-HDMI PCON\n", + drm_dbg_driver(drm_connector->dev, "%s: SST_DSC [%s] DSC is selected from DP-HDMI PCON\n", __func__, drm_connector->name); } } @@ -6151,6 +6831,7 @@ static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector, if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel) stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel; } +#endif static struct dc_stream_state * create_stream_for_sink(struct drm_connector *connector, @@ -6159,6 +6840,7 @@ create_stream_for_sink(struct drm_connector *connector, const struct dc_stream_state *old_stream, int requested_bpc) { + struct drm_device *dev = connector->dev; struct amdgpu_dm_connector *aconnector = NULL; struct drm_display_mode *preferred_mode = NULL; const struct drm_connector_state *con_state = &dm_state->base; @@ -6172,19 +6854,15 @@ create_stream_for_sink(struct drm_connector *connector, int mode_refresh; int preferred_refresh = 0; enum color_transfer_func tf = TRANSFER_FUNC_UNKNOWN; +#if defined(CONFIG_DRM_AMD_DC_FP) struct dsc_dec_dpcd_caps dsc_caps; - +#endif struct dc_link *link = NULL; struct dc_sink *sink = NULL; drm_mode_init(&mode, drm_mode); memset(&saved_mode, 0, sizeof(saved_mode)); - if (connector == NULL) { - DRM_ERROR("connector is NULL!\n"); - return stream; - } - if (connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK) { aconnector = NULL; aconnector = to_amdgpu_dm_connector(connector); @@ -6199,7 +6877,7 @@ create_stream_for_sink(struct drm_connector *connector, } if (!aconnector || !aconnector->dc_sink) { - sink = create_fake_sink(link); + sink = create_fake_sink(dev, link); if (!sink) return stream; @@ -6211,7 +6889,7 @@ create_stream_for_sink(struct drm_connector *connector, stream = dc_create_stream_for_sink(sink); if (stream == NULL) { - DRM_ERROR("Failed to create stream for sink!\n"); + drm_err(dev, "Failed to create stream for sink!\n"); goto finish; } @@ -6243,7 +6921,7 @@ create_stream_for_sink(struct drm_connector *connector, * case, we call set mode ourselves to restore the previous mode * and the modelist may not be filled in time. */ - DRM_DEBUG_DRIVER("No preferred mode found\n"); + drm_dbg_driver(dev, "No preferred mode found\n"); } else if (aconnector) { recalculate_timing = amdgpu_freesync_vid_mode && is_freesync_video_mode(&mode, aconnector); @@ -6289,10 +6967,12 @@ create_stream_for_sink(struct drm_connector *connector, stream->timing = *aconnector->timing_requested; } +#if defined(CONFIG_DRM_AMD_DC_FP) /* SST DSC determination policy */ update_dsc_caps(aconnector, sink, stream, &dsc_caps); if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported) apply_dsc_policy_for_stream(aconnector, sink, stream, &dsc_caps); +#endif update_stream_scaling_settings(&mode, dm_state, stream); @@ -6305,27 +6985,31 @@ create_stream_for_sink(struct drm_connector *connector, if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket); - else if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT || - stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST || - stream->signal == SIGNAL_TYPE_EDP) { + + if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT || + stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST || + stream->signal == SIGNAL_TYPE_EDP) { + const struct dc_edid_caps *edid_caps; + unsigned int disable_colorimetry = 0; + + if (aconnector->dc_sink) { + edid_caps = &aconnector->dc_sink->edid_caps; + disable_colorimetry = edid_caps->panel_patch.disable_colorimetry; + } + // // should decide stream support vsc sdp colorimetry capability // before building vsc info packet // - stream->use_vsc_sdp_for_colorimetry = false; - if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { - stream->use_vsc_sdp_for_colorimetry = - aconnector->dc_sink->is_vsc_sdp_colorimetry_supported; - } else { - if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED) - stream->use_vsc_sdp_for_colorimetry = true; - } - if (stream->out_transfer_func->tf == TRANSFER_FUNCTION_GAMMA22) + stream->use_vsc_sdp_for_colorimetry = stream->link->dpcd_caps.dpcd_rev.raw >= 0x14 && + stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED && + !disable_colorimetry; + + if (stream->out_transfer_func.tf == TRANSFER_FUNCTION_GAMMA22) tf = TRANSFER_FUNC_GAMMA_22; mod_build_vsc_infopacket(stream, &stream->vsc_infopacket, stream->output_color_space, tf); + aconnector->sr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY; - if (stream->link->psr_settings.psr_feature_enabled) - aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY; } finish: dc_sink_release(sink); @@ -6521,14 +7205,37 @@ static const struct attribute_group amdgpu_group = { .attrs = amdgpu_attrs }; +static bool +amdgpu_dm_should_create_sysfs(struct amdgpu_dm_connector *amdgpu_dm_connector) +{ + if (amdgpu_dm_abm_level >= 0) + return false; + + if (amdgpu_dm_connector->base.connector_type != DRM_MODE_CONNECTOR_eDP) + return false; + + /* check for OLED panels */ + if (amdgpu_dm_connector->bl_idx >= 0) { + struct drm_device *drm = amdgpu_dm_connector->base.dev; + struct amdgpu_display_manager *dm = &drm_to_adev(drm)->dm; + struct amdgpu_dm_backlight_caps *caps; + + caps = &dm->backlight_caps[amdgpu_dm_connector->bl_idx]; + if (caps->aux_support) + return false; + } + + return true; +} + static void amdgpu_dm_connector_unregister(struct drm_connector *connector) { struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector); - if (connector->connector_type == DRM_MODE_CONNECTOR_eDP && - amdgpu_dm_abm_level < 0) + if (amdgpu_dm_should_create_sysfs(amdgpu_dm_connector)) sysfs_remove_group(&connector->kdev->kobj, &amdgpu_group); + cec_notifier_conn_unregister(amdgpu_dm_connector->notifier); drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux); } @@ -6633,8 +7340,7 @@ amdgpu_dm_connector_late_register(struct drm_connector *connector) to_amdgpu_dm_connector(connector); int r; - if (connector->connector_type == DRM_MODE_CONNECTOR_eDP && - amdgpu_dm_abm_level < 0) { + if (amdgpu_dm_should_create_sysfs(amdgpu_dm_connector)) { r = sysfs_create_group(&connector->kdev->kobj, &amdgpu_group); if (r) @@ -6663,32 +7369,31 @@ static void amdgpu_dm_connector_funcs_force(struct drm_connector *connector) struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); struct dc_link *dc_link = aconnector->dc_link; struct dc_sink *dc_em_sink = aconnector->dc_em_sink; - struct edid *edid; + const struct drm_edid *drm_edid; struct i2c_adapter *ddc; + struct drm_device *dev = connector->dev; if (dc_link && dc_link->aux_mode) ddc = &aconnector->dm_dp_aux.aux.ddc; else ddc = &aconnector->i2c->base; - /* - * Note: drm_get_edid gets edid in the following order: - * 1) override EDID if set via edid_override debugfs, - * 2) firmware EDID if set via edid_firmware module parameter - * 3) regular DDC read. - */ - edid = drm_get_edid(connector, ddc); - if (!edid) { - DRM_ERROR("No EDID found on connector: %s.\n", connector->name); + drm_edid = drm_edid_read_ddc(connector, ddc); + drm_edid_connector_update(connector, drm_edid); + if (!drm_edid) { + drm_err(dev, "No EDID found on connector: %s.\n", connector->name); return; } - aconnector->edid = edid; - + aconnector->drm_edid = drm_edid; /* Update emulated (virtual) sink's EDID */ if (dc_em_sink && dc_link) { + // FIXME: Get rid of drm_edid_raw() + const struct edid *edid = drm_edid_raw(drm_edid); + memset(&dc_em_sink->edid_caps, 0, sizeof(struct dc_edid_caps)); - memmove(dc_em_sink->dc_edid.raw_edid, edid, (edid->extensions + 1) * EDID_LENGTH); + memmove(dc_em_sink->dc_edid.raw_edid, edid, + (edid->extensions + 1) * EDID_LENGTH); dm_helpers_parse_edid_caps( dc_link, &dc_em_sink->dc_edid, @@ -6723,31 +7428,28 @@ static void create_eml_sink(struct amdgpu_dm_connector *aconnector) .link = aconnector->dc_link, .sink_signal = SIGNAL_TYPE_VIRTUAL }; - struct edid *edid; + const struct drm_edid *drm_edid; + const struct edid *edid; struct i2c_adapter *ddc; - if (dc_link->aux_mode) + if (dc_link && dc_link->aux_mode) ddc = &aconnector->dm_dp_aux.aux.ddc; else ddc = &aconnector->i2c->base; - /* - * Note: drm_get_edid gets edid in the following order: - * 1) override EDID if set via edid_override debugfs, - * 2) firmware EDID if set via edid_firmware module parameter - * 3) regular DDC read. - */ - edid = drm_get_edid(connector, ddc); - if (!edid) { - DRM_ERROR("No EDID found on connector: %s.\n", connector->name); + drm_edid = drm_edid_read_ddc(connector, ddc); + drm_edid_connector_update(connector, drm_edid); + if (!drm_edid) { + drm_err(connector->dev, "No EDID found on connector: %s.\n", connector->name); return; } - if (drm_detect_hdmi_monitor(edid)) + if (connector->display_info.is_hdmi) init_params.sink_signal = SIGNAL_TYPE_HDMI_TYPE_A; - aconnector->edid = edid; + aconnector->drm_edid = drm_edid; + edid = drm_edid_raw(drm_edid); // FIXME: Get rid of drm_edid_raw() aconnector->dc_em_sink = dc_link_add_remote_sink( aconnector->dc_link, (uint8_t *)edid, @@ -6758,7 +7460,8 @@ static void create_eml_sink(struct amdgpu_dm_connector *aconnector) aconnector->dc_sink = aconnector->dc_link->local_sink ? aconnector->dc_link->local_sink : aconnector->dc_em_sink; - dc_sink_retain(aconnector->dc_sink); + if (aconnector->dc_sink) + dc_sink_retain(aconnector->dc_sink); } } @@ -6792,7 +7495,7 @@ static enum dc_status dm_validate_stream_and_context(struct dc *dc, if (!dc_plane_state) goto cleanup; - dc_state = dc_state_create(dc); + dc_state = dc_state_create(dc, NULL); if (!dc_state) goto cleanup; @@ -6842,31 +7545,44 @@ cleanup: } struct dc_stream_state * -create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector, +create_validate_stream_for_sink(struct drm_connector *connector, const struct drm_display_mode *drm_mode, const struct dm_connector_state *dm_state, const struct dc_stream_state *old_stream) { - struct drm_connector *connector = &aconnector->base; + struct amdgpu_dm_connector *aconnector = NULL; struct amdgpu_device *adev = drm_to_adev(connector->dev); struct dc_stream_state *stream; const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL; int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8; enum dc_status dc_result = DC_OK; + uint8_t bpc_limit = 6; + + if (!dm_state) + return NULL; + + if (connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK) + aconnector = to_amdgpu_dm_connector(connector); + + if (aconnector && + (aconnector->dc_link->connector_signal == SIGNAL_TYPE_HDMI_TYPE_A || + aconnector->dc_link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER)) + bpc_limit = 8; do { stream = create_stream_for_sink(connector, drm_mode, dm_state, old_stream, requested_bpc); if (stream == NULL) { - DRM_ERROR("Failed to create stream for sink!\n"); + drm_err(adev_to_drm(adev), "Failed to create stream for sink!\n"); break; } - if (aconnector->base.connector_type == DRM_MODE_CONNECTOR_WRITEBACK) + dc_result = dc_validate_stream(adev->dm.dc, stream); + + if (!aconnector) /* writeback connector */ return stream; - dc_result = dc_validate_stream(adev->dm.dc, stream); if (dc_result == DC_OK && stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) dc_result = dm_dp_mst_is_port_support_mode(aconnector, stream); @@ -6874,11 +7590,12 @@ create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector, dc_result = dm_validate_stream_and_context(adev->dm.dc, stream); if (dc_result != DC_OK) { - DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n", + DRM_DEBUG_KMS("Mode %dx%d (clk %d) pixel_encoding:%s color_depth:%s failed validation -- %s\n", drm_mode->hdisplay, drm_mode->vdisplay, drm_mode->clock, - dc_result, + dc_pixel_encoding_to_str(stream->timing.pixel_encoding), + dc_color_depth_to_str(stream->timing.display_color_depth), dc_status_to_str(dc_result)); dc_stream_release(stream); @@ -6886,13 +7603,16 @@ create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector, requested_bpc -= 2; /* lower bpc to retry validation */ } - } while (stream == NULL && requested_bpc >= 6); + } while (stream == NULL && requested_bpc >= bpc_limit); - if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) { - DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n"); + if ((dc_result == DC_FAIL_ENC_VALIDATE || + dc_result == DC_EXCEED_DONGLE_CAP) && + !aconnector->force_yuv420_output) { + DRM_DEBUG_KMS("%s:%d Retry forcing yuv420 encoding\n", + __func__, __LINE__); aconnector->force_yuv420_output = true; - stream = create_validate_stream_for_sink(aconnector, drm_mode, + stream = create_validate_stream_for_sink(connector, drm_mode, dm_state, old_stream); aconnector->force_yuv420_output = false; } @@ -6901,12 +7621,16 @@ create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector, } enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector, - struct drm_display_mode *mode) + const struct drm_display_mode *mode) { int result = MODE_ERROR; struct dc_sink *dc_sink; + struct drm_display_mode *test_mode; /* TODO: Unhardcode stream count */ struct dc_stream_state *stream; + /* we always have an amdgpu_dm_connector here since we got + * here via the amdgpu_dm_connector_helper_funcs + */ struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); if ((mode->flags & DRM_MODE_FLAG_INTERLACE) || @@ -6925,15 +7649,20 @@ enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connec if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL && aconnector->base.force != DRM_FORCE_ON) { - DRM_ERROR("dc_sink is NULL!\n"); + drm_err(connector->dev, "dc_sink is NULL!\n"); goto fail; } - drm_mode_set_crtcinfo(mode, 0); + test_mode = drm_mode_duplicate(connector->dev, mode); + if (!test_mode) + goto fail; + + drm_mode_set_crtcinfo(test_mode, 0); - stream = create_validate_stream_for_sink(aconnector, mode, + stream = create_validate_stream_for_sink(connector, test_mode, to_dm_connector_state(connector->state), NULL); + drm_mode_destroy(connector->dev, test_mode); if (stream) { dc_stream_release(stream); result = MODE_OK; @@ -7181,7 +7910,7 @@ static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state, struct amdgpu_dm_connector *aconnector; struct dm_connector_state *dm_conn_state; int i, j, ret; - int vcpi, pbn_div, pbn, slot_num = 0; + int vcpi, pbn_div, pbn = 0, slot_num = 0; for_each_new_connector_in_state(state, connector, new_con_state, i) { @@ -7221,7 +7950,7 @@ static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state, } } - if (j == dc_state->stream_count) + if (j == dc_state->stream_count || pbn_div == 0) continue; slot_num = DIV_ROUND_UP(pbn, pbn_div); @@ -7430,16 +8159,16 @@ static void amdgpu_set_panel_orientation(struct drm_connector *connector) } static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector, - struct edid *edid) + const struct drm_edid *drm_edid) { struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector); - if (edid) { + if (drm_edid) { /* empty probed_modes */ INIT_LIST_HEAD(&connector->probed_modes); amdgpu_dm_connector->num_modes = - drm_add_edid_modes(connector, edid); + drm_edid_connector_add_modes(connector); /* sorting the probed modes before calling function * amdgpu_dm_get_native_mode() since EDID can have @@ -7453,10 +8182,10 @@ static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector, amdgpu_dm_get_native_mode(connector); /* Freesync capabilities are reset by calling - * drm_add_edid_modes() and need to be + * drm_edid_connector_add_modes() and need to be * restored here. */ - amdgpu_dm_update_freesync_caps(connector, edid); + amdgpu_dm_update_freesync_caps(connector, drm_edid); } else { amdgpu_dm_connector->num_modes = 0; } @@ -7552,12 +8281,12 @@ static uint add_fs_modes(struct amdgpu_dm_connector *aconnector) } static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector, - struct edid *edid) + const struct drm_edid *drm_edid) { struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector); - if (!(amdgpu_freesync_vid_mode && edid)) + if (!(amdgpu_freesync_vid_mode && drm_edid)) return; if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10) @@ -7570,23 +8299,24 @@ static int amdgpu_dm_connector_get_modes(struct drm_connector *connector) struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector); struct drm_encoder *encoder; - struct edid *edid = amdgpu_dm_connector->edid; + const struct drm_edid *drm_edid = amdgpu_dm_connector->drm_edid; struct dc_link_settings *verified_link_cap = &amdgpu_dm_connector->dc_link->verified_link_cap; const struct dc *dc = amdgpu_dm_connector->dc_link->dc; encoder = amdgpu_dm_connector_to_encoder(connector); - if (!drm_edid_is_valid(edid)) { + if (!drm_edid) { amdgpu_dm_connector->num_modes = drm_add_modes_noedid(connector, 640, 480); if (dc->link_srv->dp_get_encoding_format(verified_link_cap) == DP_128b_132b_ENCODING) amdgpu_dm_connector->num_modes += drm_add_modes_noedid(connector, 1920, 1080); } else { - amdgpu_dm_connector_ddc_get_modes(connector, edid); - amdgpu_dm_connector_add_common_modes(encoder, connector); - amdgpu_dm_connector_add_freesync_modes(connector, edid); + amdgpu_dm_connector_ddc_get_modes(connector, drm_edid); + if (encoder) + amdgpu_dm_connector_add_common_modes(encoder, connector); + amdgpu_dm_connector_add_freesync_modes(connector, drm_edid); } amdgpu_dm_fbc_init(connector); @@ -7658,6 +8388,10 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm, dm->ddev->mode_config.scaling_mode_property, DRM_MODE_SCALE_NONE); + if (connector_type == DRM_MODE_CONNECTOR_HDMIA + || (connector_type == DRM_MODE_CONNECTOR_DisplayPort && !aconnector->mst_root)) + drm_connector_attach_broadcast_rgb_property(&aconnector->base); + drm_object_attach_property(&aconnector->base.base, adev->mode_info.underscan_property, UNDERSCAN_OFF); @@ -7710,7 +8444,7 @@ static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap, int i; int result = -EIO; - if (!ddc_service->ddc_pin || !ddc_service->ddc_pin->hw_info.hw_supported) + if (!ddc_service->ddc_pin) return result; cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL); @@ -7729,11 +8463,18 @@ static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap, cmd.payloads[i].data = msgs[i].buf; } - if (dc_submit_i2c( - ddc_service->ctx->dc, - ddc_service->link->link_index, - &cmd)) - result = num; + if (i2c->oem) { + if (dc_submit_i2c_oem( + ddc_service->ctx->dc, + &cmd)) + result = num; + } else { + if (dc_submit_i2c( + ddc_service->ctx->dc, + ddc_service->link->link_index, + &cmd)) + result = num; + } kfree(cmd.payloads); return result; @@ -7750,9 +8491,7 @@ static const struct i2c_algorithm amdgpu_dm_i2c_algo = { }; static struct amdgpu_i2c_adapter * -create_i2c(struct ddc_service *ddc_service, - int link_index, - int *res) +create_i2c(struct ddc_service *ddc_service, bool oem) { struct amdgpu_device *adev = ddc_service->ctx->driver_context; struct amdgpu_i2c_adapter *i2c; @@ -7763,13 +8502,39 @@ create_i2c(struct ddc_service *ddc_service, i2c->base.owner = THIS_MODULE; i2c->base.dev.parent = &adev->pdev->dev; i2c->base.algo = &amdgpu_dm_i2c_algo; - snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index); + if (oem) + snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c OEM bus"); + else + snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", + ddc_service->link->link_index); i2c_set_adapdata(&i2c->base, i2c); i2c->ddc_service = ddc_service; + i2c->oem = oem; return i2c; } +int amdgpu_dm_initialize_hdmi_connector(struct amdgpu_dm_connector *aconnector) +{ + struct cec_connector_info conn_info; + struct drm_device *ddev = aconnector->base.dev; + struct device *hdmi_dev = ddev->dev; + + if (amdgpu_dc_debug_mask & DC_DISABLE_HDMI_CEC) { + drm_info(ddev, "HDMI-CEC feature masked\n"); + return -EINVAL; + } + + cec_fill_conn_info_from_drm(&conn_info, &aconnector->base); + aconnector->notifier = + cec_notifier_conn_register(hdmi_dev, NULL, &conn_info); + if (!aconnector->notifier) { + drm_err(ddev, "Failed to create cec notifier\n"); + return -ENOMEM; + } + + return 0; +} /* * Note: this function assumes that dc_link_detect() was called for the @@ -7790,9 +8555,9 @@ static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm, link->priv = aconnector; - i2c = create_i2c(link->ddc, link->link_index, &res); + i2c = create_i2c(link->ddc, false); if (!i2c) { - DRM_ERROR("Failed to create i2c adapter data\n"); + drm_err(adev_to_drm(dm->adev), "Failed to create i2c adapter data\n"); return -ENOMEM; } @@ -7800,7 +8565,7 @@ static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm, res = i2c_add_adapter(&i2c->base); if (res) { - DRM_ERROR("Failed to register hw i2c %d\n", link->link_index); + drm_err(adev_to_drm(dm->adev), "Failed to register hw i2c %d\n", link->link_index); goto out_free; } @@ -7814,7 +8579,7 @@ static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm, &i2c->base); if (res) { - DRM_ERROR("connector_init failed\n"); + drm_err(adev_to_drm(dm->adev), "connector_init failed\n"); aconnector->connector_id = -1; goto out_free; } @@ -7833,6 +8598,10 @@ static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm, drm_connector_attach_encoder( &aconnector->base, &aencoder->base); + if (connector_type == DRM_MODE_CONNECTOR_HDMIA || + connector_type == DRM_MODE_CONNECTOR_HDMIB) + amdgpu_dm_initialize_hdmi_connector(aconnector); + if (connector_type == DRM_MODE_CONNECTOR_DisplayPort || connector_type == DRM_MODE_CONNECTOR_eDP) amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index); @@ -7890,42 +8659,61 @@ static int amdgpu_dm_encoder_init(struct drm_device *dev, static void manage_dm_interrupts(struct amdgpu_device *adev, struct amdgpu_crtc *acrtc, - bool enable) + struct dm_crtc_state *acrtc_state) { - /* - * We have no guarantee that the frontend index maps to the same - * backend index - some even map to more than one. - * - * TODO: Use a different interrupt or check DC itself for the mapping. - */ - int irq_type = - amdgpu_display_crtc_idx_to_irq_type( - adev, - acrtc->crtc_id); - - if (enable) { - drm_crtc_vblank_on(&acrtc->base); - amdgpu_irq_get( - adev, - &adev->pageflip_irq, - irq_type); -#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) - amdgpu_irq_get( - adev, - &adev->vline0_irq, - irq_type); -#endif + struct drm_vblank_crtc_config config = {0}; + struct dc_crtc_timing *timing; + int offdelay; + + if (acrtc_state) { + timing = &acrtc_state->stream->timing; + + /* + * Depending on when the HW latching event of double-buffered + * registers happen relative to the PSR SDP deadline, and how + * bad the Panel clock has drifted since the last ALPM off + * event, there can be up to 3 frames of delay between sending + * the PSR exit cmd to DMUB fw, and when the panel starts + * displaying live frames. + * + * We can set: + * + * 20/100 * offdelay_ms = 3_frames_ms + * => offdelay_ms = 5 * 3_frames_ms + * + * This ensures that `3_frames_ms` will only be experienced as a + * 20% delay on top how long the display has been static, and + * thus make the delay less perceivable. + */ + if (acrtc_state->stream->link->psr_settings.psr_version < + DC_PSR_VERSION_UNSUPPORTED) { + offdelay = DIV64_U64_ROUND_UP((u64)5 * 3 * 10 * + timing->v_total * + timing->h_total, + timing->pix_clk_100hz); + config.offdelay_ms = offdelay ?: 30; + } else if (amdgpu_ip_version(adev, DCE_HWIP, 0) < + IP_VERSION(3, 5, 0) || + !(adev->flags & AMD_IS_APU)) { + /* + * Older HW and DGPU have issues with instant off; + * use a 2 frame offdelay. + */ + offdelay = DIV64_U64_ROUND_UP((u64)20 * + timing->v_total * + timing->h_total, + timing->pix_clk_100hz); + + config.offdelay_ms = offdelay ?: 30; + } else { + /* offdelay_ms = 0 will never disable vblank */ + config.offdelay_ms = 1; + config.disable_immediate = true; + } + + drm_crtc_vblank_on_config(&acrtc->base, + &config); } else { -#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) - amdgpu_irq_put( - adev, - &adev->vline0_irq, - irq_type); -#endif - amdgpu_irq_put( - adev, - &adev->pageflip_irq, - irq_type); drm_crtc_vblank_off(&acrtc->base); } } @@ -8281,7 +9069,7 @@ static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state, */ WARN_ON(amdgpu_dm_crtc_set_vupdate_irq(new_state->base.crtc, true) != 0); WARN_ON(drm_crtc_vblank_get(new_state->base.crtc) != 0); - DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n", + drm_dbg_driver(new_state->base.crtc->dev, "%s: crtc=%u VRR off->on: Get vblank ref\n", __func__, new_state->base.crtc->base.id); } else if (old_vrr_active && !new_vrr_active) { /* Transition VRR active -> inactive: @@ -8289,7 +9077,7 @@ static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state, */ WARN_ON(amdgpu_dm_crtc_set_vupdate_irq(new_state->base.crtc, false) != 0); drm_crtc_vblank_put(new_state->base.crtc); - DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n", + drm_dbg_driver(new_state->base.crtc->dev, "%s: crtc=%u VRR on->off: Drop vblank ref\n", __func__, new_state->base.crtc->base.id); } } @@ -8316,6 +9104,130 @@ static inline uint32_t get_mem_type(struct drm_framebuffer *fb) return abo->tbo.resource ? abo->tbo.resource->mem_type : 0; } +static void amdgpu_dm_update_cursor(struct drm_plane *plane, + struct drm_plane_state *old_plane_state, + struct dc_stream_update *update) +{ + struct amdgpu_device *adev = drm_to_adev(plane->dev); + struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb); + struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc; + struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL; + struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); + uint64_t address = afb ? afb->address : 0; + struct dc_cursor_position position = {0}; + struct dc_cursor_attributes attributes; + int ret; + + if (!plane->state->fb && !old_plane_state->fb) + return; + + drm_dbg_atomic(plane->dev, "crtc_id=%d with size %d to %d\n", + amdgpu_crtc->crtc_id, plane->state->crtc_w, + plane->state->crtc_h); + + ret = amdgpu_dm_plane_get_cursor_position(plane, crtc, &position); + if (ret) + return; + + if (!position.enable) { + /* turn off cursor */ + if (crtc_state && crtc_state->stream) { + dc_stream_set_cursor_position(crtc_state->stream, + &position); + update->cursor_position = &crtc_state->stream->cursor_position; + } + return; + } + + amdgpu_crtc->cursor_width = plane->state->crtc_w; + amdgpu_crtc->cursor_height = plane->state->crtc_h; + + memset(&attributes, 0, sizeof(attributes)); + attributes.address.high_part = upper_32_bits(address); + attributes.address.low_part = lower_32_bits(address); + attributes.width = plane->state->crtc_w; + attributes.height = plane->state->crtc_h; + attributes.color_format = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA; + attributes.rotation_angle = 0; + attributes.attribute_flags.value = 0; + + /* Enable cursor degamma ROM on DCN3+ for implicit sRGB degamma in DRM + * legacy gamma setup. + */ + if (crtc_state->cm_is_degamma_srgb && + adev->dm.dc->caps.color.dpp.gamma_corr) + attributes.attribute_flags.bits.ENABLE_CURSOR_DEGAMMA = 1; + + if (afb) + attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0]; + + if (crtc_state->stream) { + if (!dc_stream_set_cursor_attributes(crtc_state->stream, + &attributes)) + drm_err(adev_to_drm(adev), "DC failed to set cursor attributes\n"); + + update->cursor_attributes = &crtc_state->stream->cursor_attributes; + + if (!dc_stream_set_cursor_position(crtc_state->stream, + &position)) + drm_err(adev_to_drm(adev), "DC failed to set cursor position\n"); + + update->cursor_position = &crtc_state->stream->cursor_position; + } +} + +static void amdgpu_dm_enable_self_refresh(struct amdgpu_crtc *acrtc_attach, + const struct dm_crtc_state *acrtc_state, + const u64 current_ts) +{ + struct psr_settings *psr = &acrtc_state->stream->link->psr_settings; + struct replay_settings *pr = &acrtc_state->stream->link->replay_settings; + struct amdgpu_dm_connector *aconn = + (struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context; + bool vrr_active = amdgpu_dm_crtc_vrr_active(acrtc_state); + + if (acrtc_state->update_type > UPDATE_TYPE_FAST) { + if (pr->config.replay_supported && !pr->replay_feature_enabled) + amdgpu_dm_link_setup_replay(acrtc_state->stream->link, aconn); + else if (psr->psr_version != DC_PSR_VERSION_UNSUPPORTED && + !psr->psr_feature_enabled) + if (!aconn->disallow_edp_enter_psr) + amdgpu_dm_link_setup_psr(acrtc_state->stream); + } + + /* Decrement skip count when SR is enabled and we're doing fast updates. */ + if (acrtc_state->update_type == UPDATE_TYPE_FAST && + (psr->psr_feature_enabled || pr->config.replay_supported)) { + if (aconn->sr_skip_count > 0) + aconn->sr_skip_count--; + + /* Allow SR when skip count is 0. */ + acrtc_attach->dm_irq_params.allow_sr_entry = !aconn->sr_skip_count; + + /* + * If sink supports PSR SU/Panel Replay, there is no need to rely on + * a vblank event disable request to enable PSR/RP. PSR SU/RP + * can be enabled immediately once OS demonstrates an + * adequate number of fast atomic commits to notify KMD + * of update events. See `vblank_control_worker()`. + */ + if (!vrr_active && + acrtc_attach->dm_irq_params.allow_sr_entry && +#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY + !amdgpu_dm_crc_window_is_activated(acrtc_state->base.crtc) && +#endif + (current_ts - psr->psr_dirty_rects_change_timestamp_ns) > 500000000) { + if (pr->replay_feature_enabled && !pr->replay_allow_active) + amdgpu_dm_replay_enable(acrtc_state->stream, true); + if (psr->psr_version == DC_PSR_VERSION_SU_1 && + !psr->psr_allow_active && !aconn->disallow_edp_enter_psr) + amdgpu_dm_psr_enable(acrtc_state->stream); + } + } else { + acrtc_attach->dm_irq_params.allow_sr_entry = false; + } +} + static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, struct drm_device *dev, struct amdgpu_display_manager *dm, @@ -8339,6 +9251,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, bool cursor_update = false; bool pflip_present = false; bool dirty_rects_changed = false; + bool updated_planes_and_streams = false; struct { struct dc_surface_update surface_updates[MAX_SURFACES]; struct dc_plane_info plane_infos[MAX_SURFACES]; @@ -8358,8 +9271,24 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, * Disable the cursor first if we're disabling all the planes. * It'll remain on the screen after the planes are re-enabled * if we don't. + * + * If the cursor is transitioning from native to overlay mode, the + * native cursor needs to be disabled first. */ - if (acrtc_state->active_planes == 0) + if (acrtc_state->cursor_mode == DM_CURSOR_OVERLAY_MODE && + dm_old_crtc_state->cursor_mode == DM_CURSOR_NATIVE_MODE) { + struct dc_cursor_position cursor_position = {0}; + + if (!dc_stream_set_cursor_position(acrtc_state->stream, + &cursor_position)) + drm_err(dev, "DC failed to disable native cursor\n"); + + bundle->stream_update.cursor_position = + &acrtc_state->stream->cursor_position; + } + + if (acrtc_state->active_planes == 0 && + dm_old_crtc_state->cursor_mode == DM_CURSOR_NATIVE_MODE) amdgpu_dm_commit_cursors(state); /* update planes when needed */ @@ -8373,10 +9302,14 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state); /* Cursor plane is handled after stream updates */ - if (plane->type == DRM_PLANE_TYPE_CURSOR) { + if (plane->type == DRM_PLANE_TYPE_CURSOR && + acrtc_state->cursor_mode == DM_CURSOR_NATIVE_MODE) { if ((fb && crtc == pcrtc) || - (old_plane_state->fb && old_plane_state->crtc == pcrtc)) + (old_plane_state->fb && old_plane_state->crtc == pcrtc)) { cursor_update = true; + if (amdgpu_ip_version(dm->adev, DCE_HWIP, 0) != 0) + amdgpu_dm_update_cursor(plane, old_plane_state, &bundle->stream_update); + } continue; } @@ -8394,13 +9327,13 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, bundle->surface_updates[planes_count].surface = dc_plane; if (new_pcrtc_state->color_mgmt_changed) { - bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction; - bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func; + bundle->surface_updates[planes_count].gamma = &dc_plane->gamma_correction; + bundle->surface_updates[planes_count].in_transfer_func = &dc_plane->in_transfer_func; bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix; bundle->surface_updates[planes_count].hdr_mult = dc_plane->hdr_mult; - bundle->surface_updates[planes_count].func_shaper = dc_plane->in_shaper_func; - bundle->surface_updates[planes_count].lut3d_func = dc_plane->lut3d_func; - bundle->surface_updates[planes_count].blend_tf = dc_plane->blend_tf; + bundle->surface_updates[planes_count].func_shaper = &dc_plane->in_shaper_func; + bundle->surface_updates[planes_count].lut3d_func = &dc_plane->lut3d_func; + bundle->surface_updates[planes_count].blend_tf = &dc_plane->blend_tf; } amdgpu_dm_plane_fill_dc_scaling_info(dm->adev, new_plane_state, @@ -8423,7 +9356,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, afb->tiling_flags, &bundle->plane_infos[planes_count], &bundle->flip_addrs[planes_count].address, - afb->tmz_surface, false); + afb->tmz_surface); drm_dbg_state(state->dev, "plane: id=%d dcc_en=%d\n", new_plane_state->plane->index, @@ -8448,7 +9381,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, * during the PSR-SU was disabled. */ if (acrtc_state->stream->link->psr_settings.psr_version >= DC_PSR_VERSION_SU_1 && - acrtc_attach->dm_irq_params.allow_psr_entry && + acrtc_attach->dm_irq_params.allow_sr_entry && #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY !amdgpu_dm_crc_window_is_activated(acrtc_state->base.crtc) && #endif @@ -8457,7 +9390,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, acrtc_state->stream->link->psr_settings.psr_dirty_rects_change_timestamp_ns = timestamp_ns; if (acrtc_state->stream->link->psr_settings.psr_allow_active) - amdgpu_dm_psr_disable(acrtc_state->stream); + amdgpu_dm_psr_disable(acrtc_state->stream, true); mutex_unlock(&dm->dc_lock); } } @@ -8488,7 +9421,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, bundle->surface_updates[planes_count].surface = dc_plane; if (!bundle->surface_updates[planes_count].surface) { - DRM_ERROR("No surface for CRTC: id=%d\n", + drm_err(dev, "No surface for CRTC: id=%d\n", acrtc_attach->crtc_id); continue; } @@ -8575,15 +9508,13 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, bundle->stream_update.vrr_infopacket = &acrtc_state->stream->vrr_infopacket; } - } else if (cursor_update && acrtc_state->active_planes > 0 && - acrtc_attach->base.state->event) { - drm_crtc_vblank_get(pcrtc); - + } else if (cursor_update && acrtc_state->active_planes > 0) { spin_lock_irqsave(&pcrtc->dev->event_lock, flags); - - acrtc_attach->event = acrtc_attach->base.state->event; - acrtc_attach->base.state->event = NULL; - + if (acrtc_attach->base.state->event) { + drm_crtc_vblank_get(pcrtc); + acrtc_attach->event = acrtc_attach->base.state->event; + acrtc_attach->base.state->event = NULL; + } spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags); } @@ -8613,7 +9544,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, bundle->stream_update.output_csc_transform = &acrtc_state->stream->csc_color_matrix; bundle->stream_update.out_transfer_func = - acrtc_state->stream->out_transfer_func; + &acrtc_state->stream->out_transfer_func; bundle->stream_update.lut3d_func = (struct dc_3dlut *) acrtc_state->stream->lut3d_func; bundle->stream_update.func_shaper = @@ -8625,9 +9556,12 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, bundle->stream_update.abm_level = &acrtc_state->abm_level; mutex_lock(&dm->dc_lock); - if ((acrtc_state->update_type > UPDATE_TYPE_FAST) && - acrtc_state->stream->link->psr_settings.psr_allow_active) - amdgpu_dm_psr_disable(acrtc_state->stream); + if ((acrtc_state->update_type > UPDATE_TYPE_FAST) || vrr_active) { + if (acrtc_state->stream->link->replay_settings.replay_allow_active) + amdgpu_dm_replay_disable(acrtc_state->stream); + if (acrtc_state->stream->link->psr_settings.psr_allow_active) + amdgpu_dm_psr_disable(acrtc_state->stream, true); + } mutex_unlock(&dm->dc_lock); /* @@ -8649,6 +9583,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, acrtc_state->stream, &bundle->stream_update, bundle->surface_updates); + updated_planes_and_streams = true; /** * Enable or disable the interrupts on the backend. @@ -8667,57 +9602,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, dm_update_pflip_irq_state(drm_to_adev(dev), acrtc_attach); - if (acrtc_state->update_type > UPDATE_TYPE_FAST) { - if (acrtc_state->stream->link->replay_settings.config.replay_supported && - !acrtc_state->stream->link->replay_settings.replay_feature_enabled) { - struct amdgpu_dm_connector *aconn = - (struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context; - amdgpu_dm_link_setup_replay(acrtc_state->stream->link, aconn); - } else if (acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED && - !acrtc_state->stream->link->psr_settings.psr_feature_enabled) { - - struct amdgpu_dm_connector *aconn = (struct amdgpu_dm_connector *) - acrtc_state->stream->dm_stream_context; - - if (!aconn->disallow_edp_enter_psr) - amdgpu_dm_link_setup_psr(acrtc_state->stream); - } - } - - /* Decrement skip count when PSR is enabled and we're doing fast updates. */ - if (acrtc_state->update_type == UPDATE_TYPE_FAST && - acrtc_state->stream->link->psr_settings.psr_feature_enabled) { - struct amdgpu_dm_connector *aconn = - (struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context; - - if (aconn->psr_skip_count > 0) - aconn->psr_skip_count--; - - /* Allow PSR when skip count is 0. */ - acrtc_attach->dm_irq_params.allow_psr_entry = !aconn->psr_skip_count; - - /* - * If sink supports PSR SU, there is no need to rely on - * a vblank event disable request to enable PSR. PSR SU - * can be enabled immediately once OS demonstrates an - * adequate number of fast atomic commits to notify KMD - * of update events. See `vblank_control_worker()`. - */ - if (acrtc_state->stream->link->psr_settings.psr_version >= DC_PSR_VERSION_SU_1 && - acrtc_attach->dm_irq_params.allow_psr_entry && -#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY - !amdgpu_dm_crc_window_is_activated(acrtc_state->base.crtc) && -#endif - !acrtc_state->stream->link->psr_settings.psr_allow_active && - !aconn->disallow_edp_enter_psr && - (timestamp_ns - - acrtc_state->stream->link->psr_settings.psr_dirty_rects_change_timestamp_ns) > - 500000000) - amdgpu_dm_psr_enable(acrtc_state->stream); - } else { - acrtc_attach->dm_irq_params.allow_psr_entry = false; - } - + amdgpu_dm_enable_self_refresh(acrtc_attach, acrtc_state, timestamp_ns); mutex_unlock(&dm->dc_lock); } @@ -8726,7 +9611,9 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, * This avoids redundant programming in the case where we're going * to be disabling a single plane - those pipes are being disabled. */ - if (acrtc_state->active_planes) + if (acrtc_state->active_planes && + (!updated_planes_and_streams || amdgpu_ip_version(dm->adev, DCE_HWIP, 0) == 0) && + acrtc_state->cursor_mode == DM_CURSOR_NATIVE_MODE) amdgpu_dm_commit_cursors(state); cleanup: @@ -8764,10 +9651,10 @@ static void amdgpu_dm_commit_audio(struct drm_device *dev, if (!drm_atomic_crtc_needs_modeset(new_crtc_state)) continue; +notify: if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) continue; -notify: aconnector = to_amdgpu_dm_connector(connector); mutex_lock(&adev->dm.audio_lock); @@ -8847,6 +9734,8 @@ static void amdgpu_dm_commit_streams(struct drm_atomic_state *state, struct drm_connector *connector; bool mode_set_reset_required = false; u32 i; + struct dc_commit_streams_params params = {dc_state->streams, dc_state->stream_count}; + bool set_backlight_level = false; /* Disable writeback */ for_each_old_connector_in_state(state, connector, old_con_state, i) { @@ -8866,7 +9755,7 @@ static void amdgpu_dm_commit_streams(struct drm_atomic_state *state, if (acrtc) old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base); - if (!acrtc->wb_enabled) + if (!acrtc || !acrtc->wb_enabled) continue; dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); @@ -8884,7 +9773,7 @@ static void amdgpu_dm_commit_streams(struct drm_atomic_state *state, if (old_crtc_state->active && (!new_crtc_state->active || drm_atomic_crtc_needs_modeset(new_crtc_state))) { - manage_dm_interrupts(adev, acrtc, false); + manage_dm_interrupts(adev, acrtc, NULL); dc_stream_release(dm_old_crtc_state->stream); } } @@ -8914,7 +9803,8 @@ static void amdgpu_dm_commit_streams(struct drm_atomic_state *state, memset(&position, 0, sizeof(position)); mutex_lock(&dm->dc_lock); - dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position); + dc_exit_ips_for_hw_access(dm->dc); + dc_stream_program_cursor_position(dm_old_crtc_state->stream, &position); mutex_unlock(&dm->dc_lock); } @@ -8930,7 +9820,9 @@ static void amdgpu_dm_commit_streams(struct drm_atomic_state *state, if (amdgpu_dm_crtc_modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) { - DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc); + drm_dbg_atomic(dev, + "Atomic commit: SET crtc id %d: [%p]\n", + acrtc->crtc_id, acrtc); if (!dm_new_crtc_state->stream) { /* @@ -8948,8 +9840,9 @@ static void amdgpu_dm_commit_streams(struct drm_atomic_state *state, * have a sink to keep the pipe running so that * hw state is consistent with the sw state */ - DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n", - __func__, acrtc->base.base.id); + drm_dbg_atomic(dev, + "Failed to create new stream for crtc %d\n", + acrtc->base.base.id); continue; } @@ -8962,8 +9855,11 @@ static void amdgpu_dm_commit_streams(struct drm_atomic_state *state, acrtc->hw_mode = new_crtc_state->mode; crtc->hwmode = new_crtc_state->mode; mode_set_reset_required = true; + set_backlight_level = true; } else if (modereset_required(new_crtc_state)) { - DRM_DEBUG_ATOMIC("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc); + drm_dbg_atomic(dev, + "Atomic commit: RESET. crtc id %d:[%p]\n", + acrtc->crtc_id, acrtc); /* i.e. reset mode */ if (dm_old_crtc_state->stream) remove_stream(adev, acrtc, dm_old_crtc_state->stream); @@ -8983,10 +9879,11 @@ static void amdgpu_dm_commit_streams(struct drm_atomic_state *state, dm_enable_per_frame_crtc_master_sync(dc_state); mutex_lock(&dm->dc_lock); - WARN_ON(!dc_commit_streams(dm->dc, dc_state->streams, dc_state->stream_count)); + dc_exit_ips_for_hw_access(dm->dc); + WARN_ON(!dc_commit_streams(dm->dc, ¶ms)); /* Allow idle optimization when vblank count is 0 for display off */ - if (dm->active_vblank_irq_count == 0) + if ((dm->active_vblank_irq_count == 0) && amdgpu_dm_is_headless(dm->adev)) dc_allow_idle_optimizations(dm->dc, true); mutex_unlock(&dm->dc_lock); @@ -9010,6 +9907,19 @@ static void amdgpu_dm_commit_streams(struct drm_atomic_state *state, acrtc->otg_inst = status->primary_otg_inst; } } + + /* During boot up and resume the DC layer will reset the panel brightness + * to fix a flicker issue. + * It will cause the dm->actual_brightness is not the current panel brightness + * level. (the dm->brightness is the correct panel level) + * So we set the backlight level with dm->brightness value after set mode + */ + if (set_backlight_level) { + for (i = 0; i < dm->num_of_edps; i++) { + if (dm->backlight_dev[i]) + amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]); + } + } } static void dm_set_writeback(struct amdgpu_display_manager *dm, @@ -9027,20 +9937,20 @@ static void dm_set_writeback(struct amdgpu_display_manager *dm, wb_info = kzalloc(sizeof(*wb_info), GFP_KERNEL); if (!wb_info) { - DRM_ERROR("Failed to allocate wb_info\n"); + drm_err(adev_to_drm(adev), "Failed to allocate wb_info\n"); return; } acrtc = to_amdgpu_crtc(wb_conn->encoder.crtc); if (!acrtc) { - DRM_ERROR("no amdgpu_crtc found\n"); + drm_err(adev_to_drm(adev), "no amdgpu_crtc found\n"); kfree(wb_info); return; } afb = to_amdgpu_framebuffer(new_con_state->writeback_job->fb); if (!afb) { - DRM_ERROR("No amdgpu_framebuffer found\n"); + drm_err(adev_to_drm(adev), "No amdgpu_framebuffer found\n"); kfree(wb_info); return; } @@ -9138,9 +10048,6 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) trace_amdgpu_dm_atomic_commit_tail_begin(state); - if (dm->dc->caps.ips_support && dm->dc->idle_optimizations_allowed) - dc_allow_idle_optimizations(dm->dc, false); - drm_atomic_helper_update_legacy_modeset_state(dev, state); drm_dp_mst_atomic_wait_for_dependencies(state); @@ -9264,11 +10171,12 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) new_con_state->content_protection >= DRM_MODE_CONTENT_PROTECTION_DESIRED) enable_encryption = true; - DRM_INFO("[HDCP_DM] hdcp_update_display enable_encryption = %x\n", enable_encryption); + drm_info(adev_to_drm(adev), "[HDCP_DM] hdcp_update_display enable_encryption = %x\n", enable_encryption); - hdcp_update_display( - adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector, - new_con_state->hdcp_content_type, enable_encryption); + if (aconnector->dc_link) + hdcp_update_display( + adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector, + new_con_state->hdcp_content_type, enable_encryption); } } @@ -9281,7 +10189,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) struct dc_stream_update stream_update; struct dc_info_packet hdr_packet; struct dc_stream_status *status = NULL; - bool abm_changed, hdr_changed, scaling_changed; + bool abm_changed, hdr_changed, scaling_changed, output_color_space_changed = false; memset(&stream_update, 0, sizeof(stream_update)); @@ -9300,13 +10208,18 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) scaling_changed = is_scaling_state_different(dm_new_con_state, dm_old_con_state); + if ((new_con_state->hdmi.broadcast_rgb != old_con_state->hdmi.broadcast_rgb) && + (dm_old_crtc_state->stream->output_color_space != + get_output_color_space(&dm_new_crtc_state->stream->timing, new_con_state))) + output_color_space_changed = true; + abm_changed = dm_new_crtc_state->abm_level != dm_old_crtc_state->abm_level; hdr_changed = !drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state); - if (!scaling_changed && !abm_changed && !hdr_changed) + if (!scaling_changed && !abm_changed && !hdr_changed && !output_color_space_changed) continue; stream_update.stream = dm_new_crtc_state->stream; @@ -9318,6 +10231,13 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) stream_update.dst = dm_new_crtc_state->stream->dst; } + if (output_color_space_changed) { + dm_new_crtc_state->stream->output_color_space + = get_output_color_space(&dm_new_crtc_state->stream->timing, new_con_state); + + stream_update.output_color_space = &dm_new_crtc_state->stream->output_color_space; + } + if (abm_changed) { dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level; @@ -9343,14 +10263,17 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) */ dummy_updates = kzalloc(sizeof(struct dc_surface_update) * MAX_SURFACES, GFP_ATOMIC); if (!dummy_updates) { - DRM_ERROR("Failed to allocate memory for dummy_updates.\n"); + drm_err(adev_to_drm(adev), "Failed to allocate memory for dummy_updates.\n"); continue; } for (j = 0; j < status->plane_count; j++) dummy_updates[j].surface = status->plane_states[0]; + sort(dummy_updates, status->plane_count, + sizeof(*dummy_updates), dm_plane_layer_index_cmp, NULL); mutex_lock(&dm->dc_lock); + dc_exit_ips_for_hw_access(dm->dc); dc_update_planes_and_stream(dm->dc, dummy_updates, status->plane_count, @@ -9392,7 +10315,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) drm_atomic_crtc_needs_modeset(new_crtc_state))) { dc_stream_retain(dm_new_crtc_state->stream); acrtc->dm_irq_params.stream = dm_new_crtc_state->stream; - manage_dm_interrupts(adev, acrtc, true); + manage_dm_interrupts(adev, acrtc, dm_new_crtc_state); } /* Handle vrr on->off / off->on transitions */ amdgpu_dm_handle_vrr_transition(dm_old_crtc_state, dm_new_crtc_state); @@ -9408,20 +10331,25 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) { #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) if (amdgpu_dm_crc_window_is_activated(crtc)) { + uint8_t cnt; spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); - acrtc->dm_irq_params.window_param.update_win = true; - - /** - * It takes 2 frames for HW to stably generate CRC when - * resuming from suspend, so we set skip_frame_cnt 2. - */ - acrtc->dm_irq_params.window_param.skip_frame_cnt = 2; + for (cnt = 0; cnt < MAX_CRC_WINDOW_NUM; cnt++) { + if (acrtc->dm_irq_params.window_param[cnt].enable) { + acrtc->dm_irq_params.window_param[cnt].update_win = true; + + /** + * It takes 2 frames for HW to stably generate CRC when + * resuming from suspend, so we set skip_frame_cnt 2. + */ + acrtc->dm_irq_params.window_param[cnt].skip_frame_cnt = 2; + } + } spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); } #endif if (amdgpu_dm_crtc_configure_crc_source( crtc, dm_new_crtc_state, cur_crc_src)) - DRM_DEBUG_DRIVER("Failed to configure crc source"); + drm_dbg_atomic(dev, "Failed to configure crc source"); } } #endif @@ -9516,6 +10444,8 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) for (i = 0; i < crtc_disable_count; i++) pm_runtime_put_autosuspend(dev->dev); pm_runtime_mark_last_busy(dev->dev); + + trace_amdgpu_dm_atomic_commit_tail_finish(state); } static int dm_force_atomic_commit(struct drm_connector *connector) @@ -9541,16 +10471,20 @@ static int dm_force_atomic_commit(struct drm_connector *connector) */ conn_state = drm_atomic_get_connector_state(state, connector); - ret = PTR_ERR_OR_ZERO(conn_state); - if (ret) + /* Check for error in getting connector state */ + if (IS_ERR(conn_state)) { + ret = PTR_ERR(conn_state); goto out; + } /* Attach crtc to drm_atomic_state*/ crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base); - ret = PTR_ERR_OR_ZERO(crtc_state); - if (ret) + /* Check for error in getting crtc state */ + if (IS_ERR(crtc_state)) { + ret = PTR_ERR(crtc_state); goto out; + } /* force a restore */ crtc_state->mode_changed = true; @@ -9558,9 +10492,11 @@ static int dm_force_atomic_commit(struct drm_connector *connector) /* Attach plane to drm_atomic_state */ plane_state = drm_atomic_get_plane_state(state, plane); - ret = PTR_ERR_OR_ZERO(plane_state); - if (ret) + /* Check for error in getting plane state */ + if (IS_ERR(plane_state)) { + ret = PTR_ERR(plane_state); goto out; + } /* Call commit internally with the state we just constructed */ ret = drm_atomic_commit(state); @@ -9568,7 +10504,7 @@ static int dm_force_atomic_commit(struct drm_connector *connector) out: drm_atomic_state_put(state); if (ret) - DRM_ERROR("Restoring old state failed with %i\n", ret); + drm_err(ddev, "Restoring old state failed with %i\n", ret); return ret; } @@ -9652,7 +10588,7 @@ static int do_aquire_global_lock(struct drm_device *dev, &commit->flip_done, 10*HZ); if (ret == 0) - DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done timed out\n", + drm_err(dev, "[CRTC:%d:%s] hw_done or flip_done timed out\n", crtc->base.id, crtc->name); drm_crtc_commit_put(commit); @@ -9768,6 +10704,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm, struct dm_atomic_state *dm_state = NULL; struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state; struct dc_stream_state *new_stream; + struct amdgpu_device *adev = dm->adev; int ret = 0; /* @@ -9797,8 +10734,8 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm, drm_old_conn_state = drm_atomic_get_old_connector_state(state, connector); - if (IS_ERR(drm_new_conn_state)) { - ret = PTR_ERR_OR_ZERO(drm_new_conn_state); + if (WARN_ON(!drm_new_conn_state)) { + ret = -EINVAL; goto fail; } @@ -9808,7 +10745,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm, if (!drm_atomic_crtc_needs_modeset(new_crtc_state)) goto skip_modeset; - new_stream = create_validate_stream_for_sink(aconnector, + new_stream = create_validate_stream_for_sink(connector, &new_crtc_state->mode, dm_new_conn_state, dm_old_crtc_state->stream); @@ -9821,7 +10758,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm, */ if (!new_stream) { - DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n", + drm_dbg_driver(adev_to_drm(adev), "%s: Failed to create new stream for crtc %d\n", __func__, acrtc->base.base.id); ret = -ENOMEM; goto fail; @@ -9859,7 +10796,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm, dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) && dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) { new_crtc_state->mode_changed = false; - DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d", + drm_dbg_driver(adev_to_drm(adev), "Mode change not required, setting mode_changed to %d", new_crtc_state->mode_changed); } } @@ -9897,7 +10834,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm, is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state)) { new_crtc_state->mode_changed = false; - DRM_DEBUG_DRIVER( + drm_dbg_driver(adev_to_drm(adev), "Mode change not required for front porch change, setting mode_changed to %d", new_crtc_state->mode_changed); @@ -9918,7 +10855,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm, if (ret) goto fail; - DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n", + drm_dbg_driver(adev_to_drm(adev), "Disabling DRM crtc: %d\n", crtc->base.id); /* i.e. reset mode */ @@ -10038,7 +10975,8 @@ static bool should_reset_plane(struct drm_atomic_state *state, { struct drm_plane *other; struct drm_plane_state *old_other_state, *new_other_state; - struct drm_crtc_state *new_crtc_state; + struct drm_crtc_state *old_crtc_state, *new_crtc_state; + struct dm_crtc_state *old_dm_crtc_state, *new_dm_crtc_state; struct amdgpu_device *adev = drm_to_adev(plane->dev); int i; @@ -10050,6 +10988,9 @@ static bool should_reset_plane(struct drm_atomic_state *state, state->allow_modeset) return true; + if (amdgpu_in_reset(adev) && state->allow_modeset) + return true; + /* Exit early if we know that we're adding or removing the plane. */ if (old_plane_state->crtc != new_plane_state->crtc) return true; @@ -10060,14 +11001,38 @@ static bool should_reset_plane(struct drm_atomic_state *state, new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_state->crtc); + old_crtc_state = + drm_atomic_get_old_crtc_state(state, old_plane_state->crtc); if (!new_crtc_state) return true; + /* + * A change in cursor mode means a new dc pipe needs to be acquired or + * released from the state + */ + old_dm_crtc_state = to_dm_crtc_state(old_crtc_state); + new_dm_crtc_state = to_dm_crtc_state(new_crtc_state); + if (plane->type == DRM_PLANE_TYPE_CURSOR && + old_dm_crtc_state != NULL && + old_dm_crtc_state->cursor_mode != new_dm_crtc_state->cursor_mode) { + return true; + } + /* CRTC Degamma changes currently require us to recreate planes. */ if (new_crtc_state->color_mgmt_changed) return true; + /* + * On zpos change, planes need to be reordered by removing and re-adding + * them one by one to the dc state, in order of descending zpos. + * + * TODO: We can likely skip bandwidth validation if the only thing that + * changed about the plane was it'z z-ordering. + */ + if (old_plane_state->normalized_zpos != new_plane_state->normalized_zpos) + return true; + if (drm_atomic_crtc_needs_modeset(new_crtc_state)) return true; @@ -10199,12 +11164,14 @@ static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc, * check tiling flags when the FB doesn't have a modifier. */ if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) { - if (adev->family < AMDGPU_FAMILY_AI) { + if (adev->family >= AMDGPU_FAMILY_GC_12_0_0) { + linear = AMDGPU_TILING_GET(afb->tiling_flags, GFX12_SWIZZLE_MODE) == 0; + } else if (adev->family >= AMDGPU_FAMILY_AI) { + linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0; + } else { linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 && AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 && AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0; - } else { - linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0; } if (!linear) { DRM_DEBUG_ATOMIC("Cursor FB not linear"); @@ -10215,6 +11182,68 @@ static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc, return 0; } +/* + * Helper function for checking the cursor in native mode + */ +static int dm_check_native_cursor_state(struct drm_crtc *new_plane_crtc, + struct drm_plane *plane, + struct drm_plane_state *new_plane_state, + bool enable) +{ + + struct amdgpu_crtc *new_acrtc; + int ret; + + if (!enable || !new_plane_crtc || + drm_atomic_plane_disabling(plane->state, new_plane_state)) + return 0; + + new_acrtc = to_amdgpu_crtc(new_plane_crtc); + + if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) { + DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n"); + return -EINVAL; + } + + if (new_plane_state->fb) { + ret = dm_check_cursor_fb(new_acrtc, new_plane_state, + new_plane_state->fb); + if (ret) + return ret; + } + + return 0; +} + +static bool dm_should_update_native_cursor(struct drm_atomic_state *state, + struct drm_crtc *old_plane_crtc, + struct drm_crtc *new_plane_crtc, + bool enable) +{ + struct drm_crtc_state *old_crtc_state, *new_crtc_state; + struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state; + + if (!enable) { + if (old_plane_crtc == NULL) + return true; + + old_crtc_state = drm_atomic_get_old_crtc_state( + state, old_plane_crtc); + dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); + + return dm_old_crtc_state->cursor_mode == DM_CURSOR_NATIVE_MODE; + } else { + if (new_plane_crtc == NULL) + return true; + + new_crtc_state = drm_atomic_get_new_crtc_state( + state, new_plane_crtc); + dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); + + return dm_new_crtc_state->cursor_mode == DM_CURSOR_NATIVE_MODE; + } +} + static int dm_update_plane_state(struct dc *dc, struct drm_atomic_state *state, struct drm_plane *plane, @@ -10230,8 +11259,7 @@ static int dm_update_plane_state(struct dc *dc, struct drm_crtc_state *old_crtc_state, *new_crtc_state; struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state; struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state; - struct amdgpu_crtc *new_acrtc; - bool needs_reset; + bool needs_reset, update_native_cursor; int ret = 0; @@ -10240,24 +11268,16 @@ static int dm_update_plane_state(struct dc *dc, dm_new_plane_state = to_dm_plane_state(new_plane_state); dm_old_plane_state = to_dm_plane_state(old_plane_state); - if (plane->type == DRM_PLANE_TYPE_CURSOR) { - if (!enable || !new_plane_crtc || - drm_atomic_plane_disabling(plane->state, new_plane_state)) - return 0; - - new_acrtc = to_amdgpu_crtc(new_plane_crtc); + update_native_cursor = dm_should_update_native_cursor(state, + old_plane_crtc, + new_plane_crtc, + enable); - if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) { - DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n"); - return -EINVAL; - } - - if (new_plane_state->fb) { - ret = dm_check_cursor_fb(new_acrtc, new_plane_state, - new_plane_state->fb); - if (ret) - return ret; - } + if (plane->type == DRM_PLANE_TYPE_CURSOR && update_native_cursor) { + ret = dm_check_native_cursor_state(new_plane_crtc, plane, + new_plane_state, enable); + if (ret) + return ret; return 0; } @@ -10323,20 +11343,14 @@ static int dm_update_plane_state(struct dc *dc, ret = amdgpu_dm_plane_helper_check_state(new_plane_state, new_crtc_state); if (ret) - return ret; + goto out; WARN_ON(dm_new_plane_state->dc_state); dc_new_plane_state = dc_create_plane_state(dc); - if (!dc_new_plane_state) - return -ENOMEM; - - /* Block top most plane from being a video plane */ - if (plane->type == DRM_PLANE_TYPE_OVERLAY) { - if (amdgpu_dm_plane_is_video_format(new_plane_state->fb->format->format) && *is_top_most_overlay) - return -EINVAL; - - *is_top_most_overlay = false; + if (!dc_new_plane_state) { + ret = -ENOMEM; + goto out; } DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n", @@ -10349,13 +11363,13 @@ static int dm_update_plane_state(struct dc *dc, new_crtc_state); if (ret) { dc_plane_state_release(dc_new_plane_state); - return ret; + goto out; } ret = dm_atomic_get_state(state, &dm_state); if (ret) { dc_plane_state_release(dc_new_plane_state); - return ret; + goto out; } /* @@ -10372,7 +11386,8 @@ static int dm_update_plane_state(struct dc *dc, dm_state->context)) { dc_plane_state_release(dc_new_plane_state); - return -EINVAL; + ret = -EINVAL; + goto out; } dm_new_plane_state->dc_state = dc_new_plane_state; @@ -10387,6 +11402,16 @@ static int dm_update_plane_state(struct dc *dc, *lock_and_validation_needed = true; } +out: + /* If enabling cursor overlay failed, attempt fallback to native mode */ + if (enable && ret == -EINVAL && plane->type == DRM_PLANE_TYPE_CURSOR) { + ret = dm_check_native_cursor_state(new_plane_crtc, plane, + new_plane_state, enable); + if (ret) + return ret; + + dm_new_crtc_state->cursor_mode = DM_CURSOR_NATIVE_MODE; + } return ret; } @@ -10416,131 +11441,283 @@ dm_get_plane_scale(struct drm_plane_state *plane_state, int plane_src_w, plane_src_h; dm_get_oriented_plane_size(plane_state, &plane_src_w, &plane_src_h); - *out_plane_scale_w = plane_state->crtc_w * 1000 / plane_src_w; - *out_plane_scale_h = plane_state->crtc_h * 1000 / plane_src_h; + *out_plane_scale_w = plane_src_w ? plane_state->crtc_w * 1000 / plane_src_w : 0; + *out_plane_scale_h = plane_src_h ? plane_state->crtc_h * 1000 / plane_src_h : 0; } -static int dm_check_crtc_cursor(struct drm_atomic_state *state, - struct drm_crtc *crtc, - struct drm_crtc_state *new_crtc_state) +/* + * The normalized_zpos value cannot be used by this iterator directly. It's only + * calculated for enabled planes, potentially causing normalized_zpos collisions + * between enabled/disabled planes in the atomic state. We need a unique value + * so that the iterator will not generate the same object twice, or loop + * indefinitely. + */ +static inline struct __drm_planes_state *__get_next_zpos( + struct drm_atomic_state *state, + struct __drm_planes_state *prev) { - struct drm_plane *cursor = crtc->cursor, *plane, *underlying; - struct drm_plane_state *old_plane_state, *new_plane_state; - struct drm_plane_state *new_cursor_state, *new_underlying_state; + unsigned int highest_zpos = 0, prev_zpos = 256; + uint32_t highest_id = 0, prev_id = UINT_MAX; + struct drm_plane_state *new_plane_state; + struct drm_plane *plane; + int i, highest_i = -1; + + if (prev != NULL) { + prev_zpos = prev->new_state->zpos; + prev_id = prev->ptr->base.id; + } + + for_each_new_plane_in_state(state, plane, new_plane_state, i) { + /* Skip planes with higher zpos than the previously returned */ + if (new_plane_state->zpos > prev_zpos || + (new_plane_state->zpos == prev_zpos && + plane->base.id >= prev_id)) + continue; + + /* Save the index of the plane with highest zpos */ + if (new_plane_state->zpos > highest_zpos || + (new_plane_state->zpos == highest_zpos && + plane->base.id > highest_id)) { + highest_zpos = new_plane_state->zpos; + highest_id = plane->base.id; + highest_i = i; + } + } + + if (highest_i < 0) + return NULL; + + return &state->planes[highest_i]; +} + +/* + * Use the uniqueness of the plane's (zpos, drm obj ID) combination to iterate + * by descending zpos, as read from the new plane state. This is the same + * ordering as defined by drm_atomic_normalize_zpos(). + */ +#define for_each_oldnew_plane_in_descending_zpos(__state, plane, old_plane_state, new_plane_state) \ + for (struct __drm_planes_state *__i = __get_next_zpos((__state), NULL); \ + __i != NULL; __i = __get_next_zpos((__state), __i)) \ + for_each_if(((plane) = __i->ptr, \ + (void)(plane) /* Only to avoid unused-but-set-variable warning */, \ + (old_plane_state) = __i->old_state, \ + (new_plane_state) = __i->new_state, 1)) + +static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc) +{ + struct drm_connector *connector; + struct drm_connector_state *conn_state, *old_conn_state; + struct amdgpu_dm_connector *aconnector = NULL; int i; - int cursor_scale_w, cursor_scale_h, underlying_scale_w, underlying_scale_h; - bool any_relevant_change = false; - /* On DCE and DCN there is no dedicated hardware cursor plane. We get a - * cursor per pipe but it's going to inherit the scaling and - * positioning from the underlying pipe. Check the cursor plane's - * blending properties match the underlying planes'. + for_each_oldnew_connector_in_state(state, connector, old_conn_state, conn_state, i) { + if (!conn_state->crtc) + conn_state = old_conn_state; + + if (conn_state->crtc != crtc) + continue; + + if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) + continue; + + aconnector = to_amdgpu_dm_connector(connector); + if (!aconnector->mst_output_port || !aconnector->mst_root) + aconnector = NULL; + else + break; + } + + if (!aconnector) + return 0; + + return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_root->mst_mgr); +} + +/** + * DOC: Cursor Modes - Native vs Overlay + * + * In native mode, the cursor uses a integrated cursor pipe within each DCN hw + * plane. It does not require a dedicated hw plane to enable, but it is + * subjected to the same z-order and scaling as the hw plane. It also has format + * restrictions, a RGB cursor in native mode cannot be enabled within a non-RGB + * hw plane. + * + * In overlay mode, the cursor uses a separate DCN hw plane, and thus has its + * own scaling and z-pos. It also has no blending restrictions. It lends to a + * cursor behavior more akin to a DRM client's expectations. However, it does + * occupy an extra DCN plane, and therefore will only be used if a DCN plane is + * available. + */ + +/** + * dm_crtc_get_cursor_mode() - Determine the required cursor mode on crtc + * @adev: amdgpu device + * @state: DRM atomic state + * @dm_crtc_state: amdgpu state for the CRTC containing the cursor + * @cursor_mode: Returns the required cursor mode on dm_crtc_state + * + * Get whether the cursor should be enabled in native mode, or overlay mode, on + * the dm_crtc_state. + * + * The cursor should be enabled in overlay mode if there exists an underlying + * plane - on which the cursor may be blended - that is either YUV formatted, or + * scaled differently from the cursor. + * + * Since zpos info is required, drm_atomic_normalize_zpos must be called before + * calling this function. + * + * Return: 0 on success, or an error code if getting the cursor plane state + * failed. + */ +static int dm_crtc_get_cursor_mode(struct amdgpu_device *adev, + struct drm_atomic_state *state, + struct dm_crtc_state *dm_crtc_state, + enum amdgpu_dm_cursor_mode *cursor_mode) +{ + struct drm_plane_state *old_plane_state, *plane_state, *cursor_state; + struct drm_crtc_state *crtc_state = &dm_crtc_state->base; + struct drm_plane *plane; + bool consider_mode_change = false; + bool entire_crtc_covered = false; + bool cursor_changed = false; + int underlying_scale_w, underlying_scale_h; + int cursor_scale_w, cursor_scale_h; + int i; + + /* Overlay cursor not supported on HW before DCN + * DCN401 does not have the cursor-on-scaled-plane or cursor-on-yuv-plane restrictions + * as previous DCN generations, so enable native mode on DCN401 in addition to DCE */ + if (amdgpu_ip_version(adev, DCE_HWIP, 0) == 0 || + amdgpu_ip_version(adev, DCE_HWIP, 0) == IP_VERSION(4, 0, 1)) { + *cursor_mode = DM_CURSOR_NATIVE_MODE; + return 0; + } - /* If no plane was enabled or changed scaling, no need to check again */ - for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) { + /* Init cursor_mode to be the same as current */ + *cursor_mode = dm_crtc_state->cursor_mode; + + /* + * Cursor mode can change if a plane's format changes, scale changes, is + * enabled/disabled, or z-order changes. + */ + for_each_oldnew_plane_in_state(state, plane, old_plane_state, plane_state, i) { int new_scale_w, new_scale_h, old_scale_w, old_scale_h; - if (!new_plane_state || !new_plane_state->fb || new_plane_state->crtc != crtc) + /* Only care about planes on this CRTC */ + if ((drm_plane_mask(plane) & crtc_state->plane_mask) == 0) continue; - if (!old_plane_state || !old_plane_state->fb || old_plane_state->crtc != crtc) { - any_relevant_change = true; + if (plane->type == DRM_PLANE_TYPE_CURSOR) + cursor_changed = true; + + if (drm_atomic_plane_enabling(old_plane_state, plane_state) || + drm_atomic_plane_disabling(old_plane_state, plane_state) || + old_plane_state->fb->format != plane_state->fb->format) { + consider_mode_change = true; break; } - if (new_plane_state->fb == old_plane_state->fb && - new_plane_state->crtc_w == old_plane_state->crtc_w && - new_plane_state->crtc_h == old_plane_state->crtc_h) - continue; - - dm_get_plane_scale(new_plane_state, &new_scale_w, &new_scale_h); + dm_get_plane_scale(plane_state, &new_scale_w, &new_scale_h); dm_get_plane_scale(old_plane_state, &old_scale_w, &old_scale_h); - if (new_scale_w != old_scale_w || new_scale_h != old_scale_h) { - any_relevant_change = true; + consider_mode_change = true; break; } } - if (!any_relevant_change) + if (!consider_mode_change && !crtc_state->zpos_changed) return 0; - new_cursor_state = drm_atomic_get_plane_state(state, cursor); - if (IS_ERR(new_cursor_state)) - return PTR_ERR(new_cursor_state); - - if (!new_cursor_state->fb) + /* + * If no cursor change on this CRTC, and not enabled on this CRTC, then + * no need to set cursor mode. This avoids needlessly locking the cursor + * state. + */ + if (!cursor_changed && + !(drm_plane_mask(crtc_state->crtc->cursor) & crtc_state->plane_mask)) { return 0; + } + + cursor_state = drm_atomic_get_plane_state(state, + crtc_state->crtc->cursor); + if (IS_ERR(cursor_state)) + return PTR_ERR(cursor_state); - dm_get_plane_scale(new_cursor_state, &cursor_scale_w, &cursor_scale_h); + /* Cursor is disabled */ + if (!cursor_state->fb) + return 0; - /* Need to check all enabled planes, even if this commit doesn't change - * their state + /* For all planes in descending z-order (all of which are below cursor + * as per zpos definitions), check their scaling and format */ - i = drm_atomic_add_affected_planes(state, crtc); - if (i) - return i; + for_each_oldnew_plane_in_descending_zpos(state, plane, old_plane_state, plane_state) { - for_each_new_plane_in_state_reverse(state, underlying, new_underlying_state, i) { - /* Narrow down to non-cursor planes on the same CRTC as the cursor */ - if (new_underlying_state->crtc != crtc || underlying == crtc->cursor) + /* Only care about non-cursor planes on this CRTC */ + if ((drm_plane_mask(plane) & crtc_state->plane_mask) == 0 || + plane->type == DRM_PLANE_TYPE_CURSOR) continue; - /* Ignore disabled planes */ - if (!new_underlying_state->fb) - continue; + /* Underlying plane is YUV format - use overlay cursor */ + if (amdgpu_dm_plane_is_video_format(plane_state->fb->format->format)) { + *cursor_mode = DM_CURSOR_OVERLAY_MODE; + return 0; + } - dm_get_plane_scale(new_underlying_state, + dm_get_plane_scale(plane_state, &underlying_scale_w, &underlying_scale_h); + dm_get_plane_scale(cursor_state, + &cursor_scale_w, &cursor_scale_h); - if (cursor_scale_w != underlying_scale_w || + /* Underlying plane has different scale - use overlay cursor */ + if (cursor_scale_w != underlying_scale_w && cursor_scale_h != underlying_scale_h) { - drm_dbg_atomic(crtc->dev, - "Cursor [PLANE:%d:%s] scaling doesn't match underlying [PLANE:%d:%s]\n", - cursor->base.id, cursor->name, underlying->base.id, underlying->name); - return -EINVAL; + *cursor_mode = DM_CURSOR_OVERLAY_MODE; + return 0; } /* If this plane covers the whole CRTC, no need to check planes underneath */ - if (new_underlying_state->crtc_x <= 0 && - new_underlying_state->crtc_y <= 0 && - new_underlying_state->crtc_x + new_underlying_state->crtc_w >= new_crtc_state->mode.hdisplay && - new_underlying_state->crtc_y + new_underlying_state->crtc_h >= new_crtc_state->mode.vdisplay) + if (plane_state->crtc_x <= 0 && plane_state->crtc_y <= 0 && + plane_state->crtc_x + plane_state->crtc_w >= crtc_state->mode.hdisplay && + plane_state->crtc_y + plane_state->crtc_h >= crtc_state->mode.vdisplay) { + entire_crtc_covered = true; break; + } } + /* If planes do not cover the entire CRTC, use overlay mode to enable + * cursor over holes + */ + if (entire_crtc_covered) + *cursor_mode = DM_CURSOR_NATIVE_MODE; + else + *cursor_mode = DM_CURSOR_OVERLAY_MODE; + return 0; } -static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc) +static bool amdgpu_dm_crtc_mem_type_changed(struct drm_device *dev, + struct drm_atomic_state *state, + struct drm_crtc_state *crtc_state) { - struct drm_connector *connector; - struct drm_connector_state *conn_state, *old_conn_state; - struct amdgpu_dm_connector *aconnector = NULL; - int i; - - for_each_oldnew_connector_in_state(state, connector, old_conn_state, conn_state, i) { - if (!conn_state->crtc) - conn_state = old_conn_state; + struct drm_plane *plane; + struct drm_plane_state *new_plane_state, *old_plane_state; - if (conn_state->crtc != crtc) - continue; + drm_for_each_plane_mask(plane, dev, crtc_state->plane_mask) { + new_plane_state = drm_atomic_get_plane_state(state, plane); + old_plane_state = drm_atomic_get_plane_state(state, plane); - if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) - continue; + if (IS_ERR(new_plane_state) || IS_ERR(old_plane_state)) { + drm_err(dev, "Failed to get plane state for plane %s\n", plane->name); + return false; + } - aconnector = to_amdgpu_dm_connector(connector); - if (!aconnector->mst_output_port || !aconnector->mst_root) - aconnector = NULL; - else - break; + if (old_plane_state->fb && new_plane_state->fb && + get_mem_type(old_plane_state->fb) != get_mem_type(new_plane_state->fb)) + return true; } - if (!aconnector) - return 0; - - return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_root->mst_mgr); + return false; } /** @@ -10579,7 +11756,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, struct drm_crtc *crtc; struct drm_crtc_state *old_crtc_state, *new_crtc_state; struct drm_plane *plane; - struct drm_plane_state *old_plane_state, *new_plane_state; + struct drm_plane_state *old_plane_state, *new_plane_state, *new_cursor_state; enum dc_status status; int ret, i; bool lock_and_validation_needed = false; @@ -10587,13 +11764,13 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state; struct drm_dp_mst_topology_mgr *mgr; struct drm_dp_mst_topology_state *mst_state; - struct dsc_mst_fairness_vars vars[MAX_PIPES]; + struct dsc_mst_fairness_vars vars[MAX_PIPES] = {0}; trace_amdgpu_dm_atomic_check_begin(state); ret = drm_atomic_helper_check_modeset(dev, state); if (ret) { - DRM_DEBUG_DRIVER("drm_atomic_helper_check_modeset() failed\n"); + drm_dbg_atomic(dev, "drm_atomic_helper_check_modeset() failed\n"); goto fail; } @@ -10608,7 +11785,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc); if (IS_ERR(new_crtc_state)) { - DRM_DEBUG_DRIVER("drm_atomic_get_crtc_state() failed\n"); + drm_dbg_atomic(dev, "drm_atomic_get_crtc_state() failed\n"); ret = PTR_ERR(new_crtc_state); goto fail; } @@ -10623,7 +11800,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, if (drm_atomic_crtc_needs_modeset(new_crtc_state)) { ret = add_affected_mst_dsc_crtcs(state, crtc); if (ret) { - DRM_DEBUG_DRIVER("add_affected_mst_dsc_crtcs() failed\n"); + drm_dbg_atomic(dev, "add_affected_mst_dsc_crtcs() failed\n"); goto fail; } } @@ -10640,7 +11817,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, ret = amdgpu_dm_verify_lut_sizes(new_crtc_state); if (ret) { - DRM_DEBUG_DRIVER("amdgpu_dm_verify_lut_sizes() failed\n"); + drm_dbg_atomic(dev, "amdgpu_dm_verify_lut_sizes() failed\n"); goto fail; } @@ -10649,13 +11826,13 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, ret = drm_atomic_add_affected_connectors(state, crtc); if (ret) { - DRM_DEBUG_DRIVER("drm_atomic_add_affected_connectors() failed\n"); + drm_dbg_atomic(dev, "drm_atomic_add_affected_connectors() failed\n"); goto fail; } ret = drm_atomic_add_affected_planes(state, crtc); if (ret) { - DRM_DEBUG_DRIVER("drm_atomic_add_affected_planes() failed\n"); + drm_dbg_atomic(dev, "drm_atomic_add_affected_planes() failed\n"); goto fail; } @@ -10694,7 +11871,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, if (IS_ERR(new_plane_state)) { ret = PTR_ERR(new_plane_state); - DRM_DEBUG_DRIVER("new_plane_state is BAD\n"); + drm_dbg_atomic(dev, "new_plane_state is BAD\n"); goto fail; } } @@ -10712,12 +11889,34 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, goto fail; } + /* + * Determine whether cursors on each CRTC should be enabled in native or + * overlay mode. + */ + for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { + dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); + + ret = dm_crtc_get_cursor_mode(adev, state, dm_new_crtc_state, + &dm_new_crtc_state->cursor_mode); + if (ret) { + drm_dbg(dev, "Failed to determine cursor mode\n"); + goto fail; + } + + /* + * If overlay cursor is needed, DC cannot go through the + * native cursor update path. All enabled planes on the CRTC + * need to be added for DC to not disable a plane by mistake + */ + if (dm_new_crtc_state->cursor_mode == DM_CURSOR_OVERLAY_MODE) { + ret = drm_atomic_add_affected_planes(state, crtc); + if (ret) + goto fail; + } + } + /* Remove exiting planes if they are modified */ - for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) { - if (old_plane_state->fb && new_plane_state->fb && - get_mem_type(old_plane_state->fb) != - get_mem_type(new_plane_state->fb)) - lock_and_validation_needed = true; + for_each_oldnew_plane_in_descending_zpos(state, plane, old_plane_state, new_plane_state) { ret = dm_update_plane_state(dc, state, plane, old_plane_state, @@ -10726,7 +11925,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, &lock_and_validation_needed, &is_top_most_overlay); if (ret) { - DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n"); + drm_dbg_atomic(dev, "dm_update_plane_state() failed\n"); goto fail; } } @@ -10739,7 +11938,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, false, &lock_and_validation_needed); if (ret) { - DRM_DEBUG_DRIVER("DISABLE: dm_update_crtc_state() failed\n"); + drm_dbg_atomic(dev, "DISABLE: dm_update_crtc_state() failed\n"); goto fail; } } @@ -10752,13 +11951,13 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, true, &lock_and_validation_needed); if (ret) { - DRM_DEBUG_DRIVER("ENABLE: dm_update_crtc_state() failed\n"); + drm_dbg_atomic(dev, "ENABLE: dm_update_crtc_state() failed\n"); goto fail; } } /* Add new/modified planes */ - for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) { + for_each_oldnew_plane_in_descending_zpos(state, plane, old_plane_state, new_plane_state) { ret = dm_update_plane_state(dc, state, plane, old_plane_state, new_plane_state, @@ -10766,35 +11965,75 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, &lock_and_validation_needed, &is_top_most_overlay); if (ret) { - DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n"); + drm_dbg_atomic(dev, "dm_update_plane_state() failed\n"); goto fail; } } +#if defined(CONFIG_DRM_AMD_DC_FP) if (dc_resource_is_dsc_encoding_supported(dc)) { ret = pre_validate_dsc(state, &dm_state, vars); if (ret != 0) goto fail; } +#endif /* Run this here since we want to validate the streams we created */ ret = drm_atomic_helper_check_planes(dev, state); if (ret) { - DRM_DEBUG_DRIVER("drm_atomic_helper_check_planes() failed\n"); + drm_dbg_atomic(dev, "drm_atomic_helper_check_planes() failed\n"); goto fail; } for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); if (dm_new_crtc_state->mpo_requested) - DRM_DEBUG_DRIVER("MPO enablement requested on crtc:[%p]\n", crtc); + drm_dbg_atomic(dev, "MPO enablement requested on crtc:[%p]\n", crtc); } - /* Check cursor planes scaling */ + /* Check cursor restrictions */ for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { - ret = dm_check_crtc_cursor(state, crtc, new_crtc_state); + enum amdgpu_dm_cursor_mode required_cursor_mode; + int is_rotated, is_scaled; + + /* Overlay cusor not subject to native cursor restrictions */ + dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); + if (dm_new_crtc_state->cursor_mode == DM_CURSOR_OVERLAY_MODE) + continue; + + /* Check if rotation or scaling is enabled on DCN401 */ + if ((drm_plane_mask(crtc->cursor) & new_crtc_state->plane_mask) && + amdgpu_ip_version(adev, DCE_HWIP, 0) == IP_VERSION(4, 0, 1)) { + new_cursor_state = drm_atomic_get_new_plane_state(state, crtc->cursor); + + is_rotated = new_cursor_state && + ((new_cursor_state->rotation & DRM_MODE_ROTATE_MASK) != DRM_MODE_ROTATE_0); + is_scaled = new_cursor_state && ((new_cursor_state->src_w >> 16 != new_cursor_state->crtc_w) || + (new_cursor_state->src_h >> 16 != new_cursor_state->crtc_h)); + + if (is_rotated || is_scaled) { + drm_dbg_driver( + crtc->dev, + "[CRTC:%d:%s] cannot enable hardware cursor due to rotation/scaling\n", + crtc->base.id, crtc->name); + ret = -EINVAL; + goto fail; + } + } + + /* If HW can only do native cursor, check restrictions again */ + ret = dm_crtc_get_cursor_mode(adev, state, dm_new_crtc_state, + &required_cursor_mode); if (ret) { - DRM_DEBUG_DRIVER("dm_check_crtc_cursor() failed\n"); + drm_dbg_driver(crtc->dev, + "[CRTC:%d:%s] Checking cursor mode failed\n", + crtc->base.id, crtc->name); + goto fail; + } else if (required_cursor_mode == DM_CURSOR_OVERLAY_MODE) { + drm_dbg_driver(crtc->dev, + "[CRTC:%d:%s] Cannot enable native cursor due to scaling or YUV restrictions\n", + crtc->base.id, crtc->name); + ret = -EINVAL; goto fail; } } @@ -10877,28 +12116,30 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, if (lock_and_validation_needed) { ret = dm_atomic_get_state(state, &dm_state); if (ret) { - DRM_DEBUG_DRIVER("dm_atomic_get_state() failed\n"); + drm_dbg_atomic(dev, "dm_atomic_get_state() failed\n"); goto fail; } ret = do_aquire_global_lock(dev, state); if (ret) { - DRM_DEBUG_DRIVER("do_aquire_global_lock() failed\n"); + drm_dbg_atomic(dev, "do_aquire_global_lock() failed\n"); goto fail; } +#if defined(CONFIG_DRM_AMD_DC_FP) if (dc_resource_is_dsc_encoding_supported(dc)) { ret = compute_mst_dsc_configs_for_state(state, dm_state->context, vars); if (ret) { - DRM_DEBUG_DRIVER("compute_mst_dsc_configs_for_state() failed\n"); + drm_dbg_atomic(dev, "MST_DSC compute_mst_dsc_configs_for_state() failed\n"); ret = -EINVAL; goto fail; } } +#endif ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context, vars); if (ret) { - DRM_DEBUG_DRIVER("dm_update_mst_vcpi_slots_for_dsc() failed\n"); + drm_dbg_atomic(dev, "dm_update_mst_vcpi_slots_for_dsc() failed\n"); goto fail; } @@ -10910,12 +12151,12 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, */ ret = drm_dp_mst_atomic_check(state); if (ret) { - DRM_DEBUG_DRIVER("drm_dp_mst_atomic_check() failed\n"); + drm_dbg_atomic(dev, "MST drm_dp_mst_atomic_check() failed\n"); goto fail; } status = dc_validate_global_state(dc, dm_state->context, true); if (status != DC_OK) { - DRM_DEBUG_DRIVER("DC global validation failure: %s (%d)", + drm_dbg_atomic(dev, "DC global validation failure: %s (%d)", dc_status_to_str(status), status); ret = -EINVAL; goto fail; @@ -10970,9 +12211,11 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, /* * Only allow async flips for fast updates that don't change - * the FB pitch, the DCC state, rotation, etc. + * the FB pitch, the DCC state, rotation, mem_type, etc. */ - if (new_crtc_state->async_flip && lock_and_validation_needed) { + if (new_crtc_state->async_flip && + (lock_and_validation_needed || + amdgpu_dm_crtc_mem_type_changed(dev, state, new_crtc_state))) { drm_dbg_atomic(crtc->dev, "[CRTC:%d:%s] async flips are only supported for fast updates\n", crtc->base.id, crtc->name); @@ -10993,36 +12236,17 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, fail: if (ret == -EDEADLK) - DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n"); + drm_dbg_atomic(dev, "Atomic check stopped to avoid deadlock.\n"); else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS) - DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n"); + drm_dbg_atomic(dev, "Atomic check stopped due to signal.\n"); else - DRM_DEBUG_DRIVER("Atomic check failed with err: %d\n", ret); + drm_dbg_atomic(dev, "Atomic check failed with err: %d\n", ret); trace_amdgpu_dm_atomic_check_finish(state, ret); return ret; } -static bool is_dp_capable_without_timing_msa(struct dc *dc, - struct amdgpu_dm_connector *amdgpu_dm_connector) -{ - u8 dpcd_data; - bool capable = false; - - if (amdgpu_dm_connector->dc_link && - dm_helpers_dp_read_dpcd( - NULL, - amdgpu_dm_connector->dc_link, - DP_DOWN_STREAM_PORT_COUNT, - &dpcd_data, - sizeof(dpcd_data))) { - capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false; - } - - return capable; -} - static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm, unsigned int offset, unsigned int total_length, @@ -11053,7 +12277,7 @@ static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm, res = dc_wake_and_execute_dmub_cmd(dm->dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY); if (!res) { - DRM_ERROR("EDID CEA parser failed\n"); + drm_err(adev_to_drm(dm->adev), "EDID CEA parser failed\n"); return false; } @@ -11061,7 +12285,7 @@ static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm, if (output->type == DMUB_CMD__EDID_CEA_ACK) { if (!output->ack.success) { - DRM_ERROR("EDID CEA ack failed at offset %d\n", + drm_err(adev_to_drm(dm->adev), "EDID CEA ack failed at offset %d\n", output->ack.offset); } } else if (output->type == DMUB_CMD__EDID_CEA_AMD_VSDB) { @@ -11073,7 +12297,7 @@ static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm, vsdb->min_refresh_rate_hz = output->amd_vsdb.min_frame_rate; vsdb->max_refresh_rate_hz = output->amd_vsdb.max_frame_rate; } else { - DRM_WARN("Unknown EDID CEA parser results\n"); + drm_warn(adev_to_drm(dm->adev), "Unknown EDID CEA parser results\n"); return false; } @@ -11153,8 +12377,51 @@ static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector, return ret; } +static void parse_edid_displayid_vrr(struct drm_connector *connector, + const struct edid *edid) +{ + u8 *edid_ext = NULL; + int i; + int j = 0; + u16 min_vfreq; + u16 max_vfreq; + + if (edid == NULL || edid->extensions == 0) + return; + + /* Find DisplayID extension */ + for (i = 0; i < edid->extensions; i++) { + edid_ext = (void *)(edid + (i + 1)); + if (edid_ext[0] == DISPLAYID_EXT) + break; + } + + if (edid_ext == NULL) + return; + + while (j < EDID_LENGTH) { + /* Get dynamic video timing range from DisplayID if available */ + if (EDID_LENGTH - j > 13 && edid_ext[j] == 0x25 && + (edid_ext[j+1] & 0xFE) == 0 && (edid_ext[j+2] == 9)) { + min_vfreq = edid_ext[j+9]; + if (edid_ext[j+1] & 7) + max_vfreq = edid_ext[j+10] + ((edid_ext[j+11] & 3) << 8); + else + max_vfreq = edid_ext[j+10]; + + if (max_vfreq && min_vfreq) { + connector->display_info.monitor_range.max_vfreq = max_vfreq; + connector->display_info.monitor_range.min_vfreq = min_vfreq; + + return; + } + } + j++; + } +} + static int parse_amd_vsdb(struct amdgpu_dm_connector *aconnector, - struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info) + const struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info) { u8 *edid_ext = NULL; int i; @@ -11170,7 +12437,7 @@ static int parse_amd_vsdb(struct amdgpu_dm_connector *aconnector, break; } - while (j < EDID_LENGTH) { + while (j < EDID_LENGTH - sizeof(struct amd_vsdb_block)) { struct amd_vsdb_block *amd_vsdb = (struct amd_vsdb_block *)&edid_ext[j]; unsigned int ieeeId = (amd_vsdb->ieee_id[2] << 16) | (amd_vsdb->ieee_id[1] << 8) | (amd_vsdb->ieee_id[0]); @@ -11189,7 +12456,8 @@ static int parse_amd_vsdb(struct amdgpu_dm_connector *aconnector, } static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector, - struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info) + const struct edid *edid, + struct amdgpu_hdmi_vsdb_info *vsdb_info) { u8 *edid_ext = NULL; int i; @@ -11223,7 +12491,7 @@ static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector, * amdgpu_dm_update_freesync_caps - Update Freesync capabilities * * @connector: Connector to query. - * @edid: EDID from monitor + * @drm_edid: DRM EDID from monitor * * Amdgpu supports Freesync in DP and HDMI displays, and it is required to keep * track of some of the display information in the internal data struct used by @@ -11231,24 +12499,21 @@ static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector, * FreeSync parameters. */ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector, - struct edid *edid) + const struct drm_edid *drm_edid) { int i = 0; - struct detailed_timing *timing; - struct detailed_non_pixel *data; - struct detailed_data_monitor_range *range; struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector); struct dm_connector_state *dm_con_state = NULL; struct dc_sink *sink; - struct amdgpu_device *adev = drm_to_adev(connector->dev); struct amdgpu_hdmi_vsdb_info vsdb_info = {0}; + const struct edid *edid; bool freesync_capable = false; enum adaptive_sync_type as_type = ADAPTIVE_SYNC_TYPE_NONE; if (!connector->state) { - DRM_ERROR("%s - Connector has no state", __func__); + drm_err(adev_to_drm(adev), "%s - Connector has no state", __func__); goto update; } @@ -11256,14 +12521,13 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector, amdgpu_dm_connector->dc_sink : amdgpu_dm_connector->dc_em_sink; - if (!edid || !sink) { + drm_edid_connector_update(connector, drm_edid); + + if (!drm_edid || !sink) { dm_con_state = to_dm_connector_state(connector->state); amdgpu_dm_connector->min_vfreq = 0; amdgpu_dm_connector->max_vfreq = 0; - amdgpu_dm_connector->pixel_clock_mhz = 0; - connector->display_info.monitor_range.min_vfreq = 0; - connector->display_info.monitor_range.max_vfreq = 0; freesync_capable = false; goto update; @@ -11274,69 +12538,23 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector, if (!adev->dm.freesync_module) goto update; + edid = drm_edid_raw(drm_edid); // FIXME: Get rid of drm_edid_raw() + + /* Some eDP panels only have the refresh rate range info in DisplayID */ + if ((connector->display_info.monitor_range.min_vfreq == 0 || + connector->display_info.monitor_range.max_vfreq == 0)) + parse_edid_displayid_vrr(connector, edid); + if (edid && (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT || sink->sink_signal == SIGNAL_TYPE_EDP)) { - bool edid_check_required = false; - - if (is_dp_capable_without_timing_msa(adev->dm.dc, - amdgpu_dm_connector)) { - if (edid->features & DRM_EDID_FEATURE_CONTINUOUS_FREQ) { + if (amdgpu_dm_connector->dc_link && + amdgpu_dm_connector->dc_link->dpcd_caps.allow_invalid_MSA_timing_param) { + amdgpu_dm_connector->min_vfreq = connector->display_info.monitor_range.min_vfreq; + amdgpu_dm_connector->max_vfreq = connector->display_info.monitor_range.max_vfreq; + if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10) freesync_capable = true; - amdgpu_dm_connector->min_vfreq = connector->display_info.monitor_range.min_vfreq; - amdgpu_dm_connector->max_vfreq = connector->display_info.monitor_range.max_vfreq; - } else { - edid_check_required = edid->version > 1 || - (edid->version == 1 && - edid->revision > 1); - } } - if (edid_check_required) { - for (i = 0; i < 4; i++) { - - timing = &edid->detailed_timings[i]; - data = &timing->data.other_data; - range = &data->data.range; - /* - * Check if monitor has continuous frequency mode - */ - if (data->type != EDID_DETAIL_MONITOR_RANGE) - continue; - /* - * Check for flag range limits only. If flag == 1 then - * no additional timing information provided. - * Default GTF, GTF Secondary curve and CVT are not - * supported - */ - if (range->flags != 1) - continue; - - connector->display_info.monitor_range.min_vfreq = range->min_vfreq; - connector->display_info.monitor_range.max_vfreq = range->max_vfreq; - - if (edid->revision >= 4) { - if (data->pad2 & DRM_EDID_RANGE_OFFSET_MIN_VFREQ) - connector->display_info.monitor_range.min_vfreq += 255; - if (data->pad2 & DRM_EDID_RANGE_OFFSET_MAX_VFREQ) - connector->display_info.monitor_range.max_vfreq += 255; - } - - amdgpu_dm_connector->min_vfreq = - connector->display_info.monitor_range.min_vfreq; - amdgpu_dm_connector->max_vfreq = - connector->display_info.monitor_range.max_vfreq; - amdgpu_dm_connector->pixel_clock_mhz = - range->pixel_clock_mhz * 10; - - break; - } - - if (amdgpu_dm_connector->max_vfreq - - amdgpu_dm_connector->min_vfreq > 10) { - - freesync_capable = true; - } - } parse_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info); if (vsdb_info.replay_mode) { @@ -11345,12 +12563,9 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector, amdgpu_dm_connector->as_type = ADAPTIVE_SYNC_TYPE_EDP; } - } else if (edid && sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) { + } else if (drm_edid && sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) { i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info); if (i >= 0 && vsdb_info.freesync_supported) { - timing = &edid->detailed_timings[i]; - data = &timing->data.other_data; - amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz; amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz; if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10) @@ -11361,7 +12576,8 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector, } } - as_type = dm_get_adaptive_sync_support_type(amdgpu_dm_connector->dc_link); + if (amdgpu_dm_connector->dc_link) + as_type = dm_get_adaptive_sync_support_type(amdgpu_dm_connector->dc_link); if (as_type == FREESYNC_TYPE_PCON_IN_WHITELIST) { i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info); @@ -11385,6 +12601,12 @@ update: if (dm_con_state) dm_con_state->freesync_capable = freesync_capable; + if (connector->state && amdgpu_dm_connector->dc_link && !freesync_capable && + amdgpu_dm_connector->dc_link->replay_settings.config.replay_supported) { + amdgpu_dm_connector->dc_link->replay_settings.config.replay_supported = false; + amdgpu_dm_connector->dc_link->replay_settings.replay_feature_enabled = false; + } + if (connector->vrr_capable_property) drm_connector_set_vrr_capable_property(connector, freesync_capable); @@ -11409,6 +12631,12 @@ void amdgpu_dm_trigger_timing_sync(struct drm_device *dev) mutex_unlock(&adev->dm.dc_lock); } +static inline void amdgpu_dm_exit_ips_for_hw_access(struct dc *dc) +{ + if (dc->ctx->dmub_srv && !dc->ctx->dmub_srv->idle_exit_counter) + dc_exit_ips_for_hw_access(dc); +} + void dm_write_reg_func(const struct dc_context *ctx, uint32_t address, u32 value, const char *func_name) { @@ -11419,6 +12647,8 @@ void dm_write_reg_func(const struct dc_context *ctx, uint32_t address, return; } #endif + + amdgpu_dm_exit_ips_for_hw_access(ctx->dc); cgs_write_register(ctx->cgs_device, address, value); trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value); } @@ -11442,6 +12672,8 @@ uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address, return 0; } + amdgpu_dm_exit_ips_for_hw_access(ctx->dc); + value = cgs_read_register(ctx->cgs_device, address); trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value); @@ -11466,7 +12698,7 @@ int amdgpu_dm_process_dmub_aux_transfer_sync( } if (!wait_for_completion_timeout(&adev->dm.dmub_aux_transfer_done, 10 * HZ)) { - DRM_ERROR("wait_for_completion_timeout timeout!"); + drm_err(adev_to_drm(adev), "wait_for_completion_timeout timeout!"); *operation_result = AUX_RET_ERROR_TIMEOUT; goto out; } @@ -11476,31 +12708,24 @@ int amdgpu_dm_process_dmub_aux_transfer_sync( * Transient states before tunneling is enabled could * lead to this error. We can ignore this for now. */ - if (p_notify->result != AUX_RET_ERROR_PROTOCOL_ERROR) { - DRM_WARN("DPIA AUX failed on 0x%x(%d), error %d\n", + if (p_notify->result == AUX_RET_ERROR_PROTOCOL_ERROR) { + drm_warn(adev_to_drm(adev), "DPIA AUX failed on 0x%x(%d), error %d\n", payload->address, payload->length, p_notify->result); } - *operation_result = AUX_RET_ERROR_INVALID_REPLY; + *operation_result = p_notify->result; goto out; } + payload->reply[0] = adev->dm.dmub_notify->aux_reply.command & 0xF; + if (adev->dm.dmub_notify->aux_reply.command & 0xF0) + /* The reply is stored in the top nibble of the command. */ + payload->reply[0] = (adev->dm.dmub_notify->aux_reply.command >> 4) & 0xF; - payload->reply[0] = adev->dm.dmub_notify->aux_reply.command; - if (!payload->write && p_notify->aux_reply.length && - (payload->reply[0] == AUX_TRANSACTION_REPLY_AUX_ACK)) { - - if (payload->length != p_notify->aux_reply.length) { - DRM_WARN("invalid read length %d from DPIA AUX 0x%x(%d)!\n", - p_notify->aux_reply.length, - payload->address, payload->length); - *operation_result = AUX_RET_ERROR_INVALID_REPLY; - goto out; - } - + /*write req may receive a byte indicating partially written number as well*/ + if (p_notify->aux_reply.length) memcpy(payload->data, p_notify->aux_reply.data, p_notify->aux_reply.length); - } /* success */ ret = p_notify->aux_reply.length; @@ -11511,6 +12736,79 @@ out: return ret; } +static void abort_fused_io( + struct dc_context *ctx, + const struct dmub_cmd_fused_request *request +) +{ + union dmub_rb_cmd command = { 0 }; + struct dmub_rb_cmd_fused_io *io = &command.fused_io; + + io->header.type = DMUB_CMD__FUSED_IO; + io->header.sub_type = DMUB_CMD__FUSED_IO_ABORT; + io->header.payload_bytes = sizeof(*io) - sizeof(io->header); + io->request = *request; + dm_execute_dmub_cmd(ctx, &command, DM_DMUB_WAIT_TYPE_NO_WAIT); +} + +static bool execute_fused_io( + struct amdgpu_device *dev, + struct dc_context *ctx, + union dmub_rb_cmd *commands, + uint8_t count, + uint32_t timeout_us +) +{ + const uint8_t ddc_line = commands[0].fused_io.request.u.aux.ddc_line; + + if (ddc_line >= ARRAY_SIZE(dev->dm.fused_io)) + return false; + + struct fused_io_sync *sync = &dev->dm.fused_io[ddc_line]; + struct dmub_rb_cmd_fused_io *first = &commands[0].fused_io; + const bool result = dm_execute_dmub_cmd_list(ctx, count, commands, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY) + && first->header.ret_status + && first->request.status == FUSED_REQUEST_STATUS_SUCCESS; + + if (!result) + return false; + + while (wait_for_completion_timeout(&sync->replied, usecs_to_jiffies(timeout_us))) { + reinit_completion(&sync->replied); + + struct dmub_cmd_fused_request *reply = (struct dmub_cmd_fused_request *) sync->reply_data; + + static_assert(sizeof(*reply) <= sizeof(sync->reply_data), "Size mismatch"); + + if (reply->identifier == first->request.identifier) { + first->request = *reply; + return true; + } + } + + reinit_completion(&sync->replied); + first->request.status = FUSED_REQUEST_STATUS_TIMEOUT; + abort_fused_io(ctx, &first->request); + return false; +} + +bool amdgpu_dm_execute_fused_io( + struct amdgpu_device *dev, + struct dc_link *link, + union dmub_rb_cmd *commands, + uint8_t count, + uint32_t timeout_us) +{ + struct amdgpu_display_manager *dm = &dev->dm; + + mutex_lock(&dm->dpia_aux_lock); + + const bool result = execute_fused_io(dev, link->ctx, commands, count, timeout_us); + + mutex_unlock(&dm->dpia_aux_lock); + return result; +} + int amdgpu_dm_process_dmub_set_config_sync( struct dc_context *ctx, unsigned int link_index, @@ -11529,7 +12827,7 @@ int amdgpu_dm_process_dmub_set_config_sync( ret = 0; *operation_result = adev->dm.dmub_notify->sc_status; } else { - DRM_ERROR("wait_for_completion_timeout timeout!"); + drm_err(adev_to_drm(adev), "wait_for_completion_timeout timeout!"); ret = -1; *operation_result = SET_CONFIG_UNKNOWN_ERROR; } @@ -11549,3 +12847,10 @@ bool dm_execute_dmub_cmd_list(const struct dc_context *ctx, unsigned int count, { return dc_dmub_srv_cmd_run_list(ctx->dmub_srv, count, cmd, wait_type); } + +void dm_acpi_process_phy_transition_interlock( + const struct dc_context *ctx, + struct dm_process_phy_transition_init_params process_phy_transition_init_params) +{ + // Not yet implemented +} |