diff options
Diffstat (limited to 'drivers/gpu/drm/amd/display/amdgpu_dm')
33 files changed, 12797 insertions, 3823 deletions
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile b/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile index 90fb0f3cdb6f..8e949fe77312 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile @@ -25,25 +25,28 @@ +ifneq ($(CONFIG_DRM_AMD_DC),) AMDGPUDM = \ amdgpu_dm.o \ amdgpu_dm_plane.o \ amdgpu_dm_crtc.o \ amdgpu_dm_irq.o \ amdgpu_dm_mst_types.o \ - amdgpu_dm_color.o - -ifdef CONFIG_DRM_AMD_DC_DCN + amdgpu_dm_color.o \ + amdgpu_dm_services.o \ + amdgpu_dm_helpers.o \ + amdgpu_dm_pp_smu.o \ + amdgpu_dm_psr.o \ + amdgpu_dm_replay.o \ + amdgpu_dm_quirks.o \ + amdgpu_dm_wb.o \ + amdgpu_dm_colorop.o + +ifdef CONFIG_DRM_AMD_DC_FP AMDGPUDM += dc_fpu.o endif -ifneq ($(CONFIG_DRM_AMD_DC),) -AMDGPUDM += amdgpu_dm_services.o amdgpu_dm_helpers.o amdgpu_dm_pp_smu.o amdgpu_dm_psr.o -endif - -ifdef CONFIG_DRM_AMD_DC_HDCP AMDGPUDM += amdgpu_dm_hdcp.o -endif ifneq ($(CONFIG_DEBUG_FS),) AMDGPUDM += amdgpu_dm_crc.o amdgpu_dm_debugfs.o @@ -54,3 +57,4 @@ subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc AMDGPU_DM = $(addprefix $(AMDDALPATH)/amdgpu_dm/,$(AMDGPUDM)) AMD_DISPLAY_FILES += $(AMDGPU_DM) +endif diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 589bee9acf16..740711ac1037 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: MIT /* * Copyright 2015 Advanced Micro Devices, Inc. * @@ -28,7 +29,6 @@ #include "dm_services_types.h" #include "dc.h" -#include "dc_link_dp.h" #include "link_enc_cfg.h" #include "dc/inc/core_types.h" #include "dal_asic_id.h" @@ -38,9 +38,13 @@ #include "dc/dc_dmub_srv.h" #include "dc/dc_edid_parser.h" #include "dc/dc_stat.h" +#include "dc/dc_state.h" #include "amdgpu_dm_trace.h" +#include "link/protocols/link_dpcd.h" +#include "link_service_types.h" +#include "link/protocols/link_dp_capability.h" +#include "link/protocols/link_ddc.h" -#include "vid.h" #include "amdgpu.h" #include "amdgpu_display.h" #include "amdgpu_ucode.h" @@ -48,11 +52,9 @@ #include "amdgpu_dm.h" #include "amdgpu_dm_plane.h" #include "amdgpu_dm_crtc.h" -#ifdef CONFIG_DRM_AMD_DC_HDCP #include "amdgpu_dm_hdcp.h" #include <drm/display/drm_hdcp_helper.h> -#endif -#include "amdgpu_pm.h" +#include "amdgpu_dm_wb.h" #include "amdgpu_atombios.h" #include "amd_shared.h" @@ -63,49 +65,46 @@ #include "amdgpu_dm_debugfs.h" #endif #include "amdgpu_dm_psr.h" +#include "amdgpu_dm_replay.h" #include "ivsrcid/ivsrcid_vislands30.h" -#include "i2caux_interface.h" +#include <linux/backlight.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/types.h> #include <linux/pm_runtime.h> #include <linux/pci.h> +#include <linux/power_supply.h> #include <linux/firmware.h> #include <linux/component.h> -#include <linux/dmi.h> +#include <linux/sort.h> +#include <drm/drm_privacy_screen_consumer.h> #include <drm/display/drm_dp_mst_helper.h> #include <drm/display/drm_hdmi_helper.h> #include <drm/drm_atomic.h> #include <drm/drm_atomic_uapi.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_blend.h> -#include <drm/drm_fb_helper.h> +#include <drm/drm_fixed.h> #include <drm/drm_fourcc.h> #include <drm/drm_edid.h> +#include <drm/drm_eld.h> +#include <drm/drm_utils.h> #include <drm/drm_vblank.h> #include <drm/drm_audio_component.h> #include <drm/drm_gem_atomic_helper.h> -#include <drm/drm_plane_helper.h> +#include <media/cec-notifier.h> #include <acpi/video.h> #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h" -#include "dcn/dcn_1_0_offset.h" -#include "dcn/dcn_1_0_sh_mask.h" -#include "soc15_hw_ip.h" -#include "soc15_common.h" -#include "vega10_ip_offset.h" - -#include "gc/gc_11_0_0_offset.h" -#include "gc/gc_11_0_0_sh_mask.h" - #include "modules/inc/mod_freesync.h" #include "modules/power/power_helpers.h" -#include "modules/inc/mod_info_packet.h" + +static_assert(AMDGPU_DMUB_NOTIFICATION_MAX == DMUB_NOTIFICATION_MAX, "AMDGPU_DMUB_NOTIFICATION_MAX mismatch"); #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin" MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB); @@ -141,6 +140,18 @@ MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU); #define FIRMWARE_NAVI12_DMCU "amdgpu/navi12_dmcu.bin" MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU); +#define FIRMWARE_DCN_35_DMUB "amdgpu/dcn_3_5_dmcub.bin" +MODULE_FIRMWARE(FIRMWARE_DCN_35_DMUB); + +#define FIRMWARE_DCN_351_DMUB "amdgpu/dcn_3_5_1_dmcub.bin" +MODULE_FIRMWARE(FIRMWARE_DCN_351_DMUB); + +#define FIRMWARE_DCN_36_DMUB "amdgpu/dcn_3_6_dmcub.bin" +MODULE_FIRMWARE(FIRMWARE_DCN_36_DMUB); + +#define FIRMWARE_DCN_401_DMUB "amdgpu/dcn_4_0_1_dmcub.bin" +MODULE_FIRMWARE(FIRMWARE_DCN_401_DMUB); + /* Number of bytes in PSP header for firmware. */ #define PSP_HEADER_BYTES 0x100 @@ -161,6 +172,9 @@ MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU); static int amdgpu_dm_init(struct amdgpu_device *adev); static void amdgpu_dm_fini(struct amdgpu_device *adev); static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector); +static void reset_freesync_config_for_crtc(struct dm_crtc_state *new_crtc_state); +static struct amdgpu_i2c_adapter * +create_i2c(struct ddc_service *ddc_service, bool oem); static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link) { @@ -211,7 +225,7 @@ static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm); static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm, struct amdgpu_dm_connector *amdgpu_dm_connector, - uint32_t link_index, + u32 link_index, struct amdgpu_encoder *amdgpu_encoder); static int amdgpu_dm_encoder_init(struct drm_device *dev, struct amdgpu_encoder *aencoder, @@ -219,6 +233,7 @@ static int amdgpu_dm_encoder_init(struct drm_device *dev, static int amdgpu_dm_connector_get_modes(struct drm_connector *connector); +static int amdgpu_dm_atomic_setup_commit(struct drm_atomic_state *state); static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state); static int amdgpu_dm_atomic_check(struct drm_device *dev, @@ -227,6 +242,10 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector); static void handle_hpd_rx_irq(void *param); +static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm, + int bl_idx, + u32 user_brightness); + static bool is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state, struct drm_crtc_state *new_crtc_state); @@ -245,72 +264,77 @@ is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state, */ static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc) { + struct amdgpu_crtc *acrtc = NULL; + if (crtc >= adev->mode_info.num_crtc) return 0; - else { - struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc]; - if (acrtc->dm_irq_params.stream == NULL) { - DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n", - crtc); - return 0; - } + acrtc = adev->mode_info.crtcs[crtc]; - return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream); + if (!acrtc->dm_irq_params.stream) { + drm_err(adev_to_drm(adev), "dc_stream_state is NULL for crtc '%d'!\n", + crtc); + return 0; } + + return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream); } static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc, u32 *vbl, u32 *position) { - uint32_t v_blank_start, v_blank_end, h_position, v_position; + u32 v_blank_start = 0, v_blank_end = 0, h_position = 0, v_position = 0; + struct amdgpu_crtc *acrtc = NULL; + struct dc *dc = adev->dm.dc; if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc)) return -EINVAL; - else { - struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc]; - if (acrtc->dm_irq_params.stream == NULL) { - DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n", - crtc); - return 0; - } + acrtc = adev->mode_info.crtcs[crtc]; - /* - * TODO rework base driver to use values directly. - * for now parse it back into reg-format - */ - dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream, - &v_blank_start, - &v_blank_end, - &h_position, - &v_position); - - *position = v_position | (h_position << 16); - *vbl = v_blank_start | (v_blank_end << 16); + if (!acrtc->dm_irq_params.stream) { + drm_err(adev_to_drm(adev), "dc_stream_state is NULL for crtc '%d'!\n", + crtc); + return 0; } + if (dc && dc->caps.ips_support && dc->idle_optimizations_allowed) + dc_allow_idle_optimizations(dc, false); + + /* + * TODO rework base driver to use values directly. + * for now parse it back into reg-format + */ + dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream, + &v_blank_start, + &v_blank_end, + &h_position, + &v_position); + + *position = v_position | (h_position << 16); + *vbl = v_blank_start | (v_blank_end << 16); + return 0; } -static bool dm_is_idle(void *handle) +static bool dm_is_idle(struct amdgpu_ip_block *ip_block) { /* XXX todo */ return true; } -static int dm_wait_for_idle(void *handle) +static int dm_wait_for_idle(struct amdgpu_ip_block *ip_block) { /* XXX todo */ return 0; } -static bool dm_check_soft_reset(void *handle) +static bool dm_check_soft_reset(struct amdgpu_ip_block *ip_block) { return false; } -static int dm_soft_reset(void *handle) +static int dm_soft_reset(struct amdgpu_ip_block *ip_block) { /* XXX todo */ return 0; @@ -340,14 +364,69 @@ get_crtc_by_otg_inst(struct amdgpu_device *adev, static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state, struct dm_crtc_state *new_state) { + if (new_state->stream->adjust.timing_adjust_pending) + return true; if (new_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED) return true; - else if (amdgpu_dm_vrr_active(old_state) != amdgpu_dm_vrr_active(new_state)) + else if (amdgpu_dm_crtc_vrr_active(old_state) != amdgpu_dm_crtc_vrr_active(new_state)) return true; else return false; } +/* + * DC will program planes with their z-order determined by their ordering + * in the dc_surface_updates array. This comparator is used to sort them + * by descending zpos. + */ +static int dm_plane_layer_index_cmp(const void *a, const void *b) +{ + const struct dc_surface_update *sa = (struct dc_surface_update *)a; + const struct dc_surface_update *sb = (struct dc_surface_update *)b; + + /* Sort by descending dc_plane layer_index (i.e. normalized_zpos) */ + return sb->surface->layer_index - sa->surface->layer_index; +} + +/** + * update_planes_and_stream_adapter() - Send planes to be updated in DC + * + * DC has a generic way to update planes and stream via + * dc_update_planes_and_stream function; however, DM might need some + * adjustments and preparation before calling it. This function is a wrapper + * for the dc_update_planes_and_stream that does any required configuration + * before passing control to DC. + * + * @dc: Display Core control structure + * @update_type: specify whether it is FULL/MEDIUM/FAST update + * @planes_count: planes count to update + * @stream: stream state + * @stream_update: stream update + * @array_of_surface_update: dc surface update pointer + * + */ +static inline bool update_planes_and_stream_adapter(struct dc *dc, + int update_type, + int planes_count, + struct dc_stream_state *stream, + struct dc_stream_update *stream_update, + struct dc_surface_update *array_of_surface_update) +{ + sort(array_of_surface_update, planes_count, + sizeof(*array_of_surface_update), dm_plane_layer_index_cmp, NULL); + + /* + * Previous frame finished and HW is ready for optimization. + */ + dc_post_update_surfaces_to_stream(dc); + + return dc_update_planes_and_stream(dc, + array_of_surface_update, + planes_count, + stream, + stream_update); +} + /** * dm_pflip_high_irq() - Handle pageflip interrupt * @interrupt_params: ignored @@ -360,9 +439,10 @@ static void dm_pflip_high_irq(void *interrupt_params) struct amdgpu_crtc *amdgpu_crtc; struct common_irq_params *irq_params = interrupt_params; struct amdgpu_device *adev = irq_params->adev; + struct drm_device *dev = adev_to_drm(adev); unsigned long flags; struct drm_pending_vblank_event *e; - uint32_t vpos, hpos, v_blank_start, v_blank_end; + u32 vpos, hpos, v_blank_start, v_blank_end; bool vrr_active; amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP); @@ -370,18 +450,17 @@ static void dm_pflip_high_irq(void *interrupt_params) /* IRQ could occur when in initial stage */ /* TODO work and BO cleanup */ if (amdgpu_crtc == NULL) { - DC_LOG_PFLIP("CRTC is null, returning.\n"); + drm_dbg_state(dev, "CRTC is null, returning.\n"); return; } spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); - if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){ - DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n", - amdgpu_crtc->pflip_status, - AMDGPU_FLIP_SUBMITTED, - amdgpu_crtc->crtc_id, - amdgpu_crtc); + if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED) { + drm_dbg_state(dev, + "amdgpu_crtc->pflip_status = %d != AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p]\n", + amdgpu_crtc->pflip_status, AMDGPU_FLIP_SUBMITTED, + amdgpu_crtc->crtc_id, amdgpu_crtc); spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); return; } @@ -392,7 +471,7 @@ static void dm_pflip_high_irq(void *interrupt_params) WARN_ON(!e); - vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc); + vrr_active = amdgpu_dm_crtc_vrr_active_irq(amdgpu_crtc); /* Fixed refresh rate, or VRR scanout position outside front-porch? */ if (!vrr_active || @@ -447,9 +526,53 @@ static void dm_pflip_high_irq(void *interrupt_params) amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE; spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); - DC_LOG_PFLIP("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n", - amdgpu_crtc->crtc_id, amdgpu_crtc, - vrr_active, (int) !e); + drm_dbg_state(dev, + "crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n", + amdgpu_crtc->crtc_id, amdgpu_crtc, vrr_active, (int)!e); +} + +static void dm_handle_vmin_vmax_update(struct work_struct *offload_work) +{ + struct vupdate_offload_work *work = container_of(offload_work, struct vupdate_offload_work, work); + struct amdgpu_device *adev = work->adev; + struct dc_stream_state *stream = work->stream; + struct dc_crtc_timing_adjust *adjust = work->adjust; + + mutex_lock(&adev->dm.dc_lock); + dc_stream_adjust_vmin_vmax(adev->dm.dc, stream, adjust); + mutex_unlock(&adev->dm.dc_lock); + + dc_stream_release(stream); + kfree(work->adjust); + kfree(work); +} + +static void schedule_dc_vmin_vmax(struct amdgpu_device *adev, + struct dc_stream_state *stream, + struct dc_crtc_timing_adjust *adjust) +{ + struct vupdate_offload_work *offload_work = kzalloc(sizeof(*offload_work), GFP_NOWAIT); + if (!offload_work) { + drm_dbg_driver(adev_to_drm(adev), "Failed to allocate vupdate_offload_work\n"); + return; + } + + struct dc_crtc_timing_adjust *adjust_copy = kzalloc(sizeof(*adjust_copy), GFP_NOWAIT); + if (!adjust_copy) { + drm_dbg_driver(adev_to_drm(adev), "Failed to allocate adjust_copy\n"); + kfree(offload_work); + return; + } + + dc_stream_retain(stream); + memcpy(adjust_copy, adjust, sizeof(*adjust_copy)); + + INIT_WORK(&offload_work->work, dm_handle_vmin_vmax_update); + offload_work->adev = adev; + offload_work->stream = stream; + offload_work->adjust = adjust_copy; + + queue_work(system_wq, &offload_work->work); } static void dm_vupdate_high_irq(void *interrupt_params) @@ -466,9 +589,9 @@ static void dm_vupdate_high_irq(void *interrupt_params) acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE); if (acrtc) { - vrr_active = amdgpu_dm_vrr_active_irq(acrtc); + vrr_active = amdgpu_dm_crtc_vrr_active_irq(acrtc); drm_dev = acrtc->base.dev; - vblank = &drm_dev->vblank[acrtc->base.index]; + vblank = drm_crtc_vblank_crtc(&acrtc->base); previous_timestamp = atomic64_read(&irq_params->previous_timestamp); frame_duration_ns = vblank->time - previous_timestamp; @@ -479,9 +602,9 @@ static void dm_vupdate_high_irq(void *interrupt_params) atomic64_set(&irq_params->previous_timestamp, vblank->time); } - DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d\n", - acrtc->crtc_id, - vrr_active); + drm_dbg_vbl(drm_dev, + "crtc:%d, vupdate-vrr:%d\n", acrtc->crtc_id, + vrr_active); /* Core vblank handling is done here after end of front-porch in * vrr mode, as vblank timestamping will give valid results @@ -489,22 +612,27 @@ static void dm_vupdate_high_irq(void *interrupt_params) * page-flip completion events that have been queued to us * if a pageflip happened inside front-porch. */ - if (vrr_active) { - dm_crtc_handle_vblank(acrtc); + if (vrr_active && acrtc->dm_irq_params.stream) { + bool replay_en = acrtc->dm_irq_params.stream->link->replay_settings.replay_feature_enabled; + bool psr_en = acrtc->dm_irq_params.stream->link->psr_settings.psr_feature_enabled; + bool fs_active_var_en = acrtc->dm_irq_params.freesync_config.state + == VRR_STATE_ACTIVE_VARIABLE; + + amdgpu_dm_crtc_handle_vblank(acrtc); /* BTR processing for pre-DCE12 ASICs */ - if (acrtc->dm_irq_params.stream && - adev->family < AMDGPU_FAMILY_AI) { + if (adev->family < AMDGPU_FAMILY_AI) { spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); mod_freesync_handle_v_update( adev->dm.freesync_module, acrtc->dm_irq_params.stream, &acrtc->dm_irq_params.vrr_params); - dc_stream_adjust_vmin_vmax( - adev->dm.dc, - acrtc->dm_irq_params.stream, - &acrtc->dm_irq_params.vrr_params.adjust); + if (fs_active_var_en || (!fs_active_var_en && !replay_en && !psr_en)) { + schedule_dc_vmin_vmax(adev, + acrtc->dm_irq_params.stream, + &acrtc->dm_irq_params.vrr_params.adjust); + } spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); } } @@ -522,6 +650,7 @@ static void dm_crtc_high_irq(void *interrupt_params) { struct common_irq_params *irq_params = interrupt_params; struct amdgpu_device *adev = irq_params->adev; + struct drm_writeback_job *job; struct amdgpu_crtc *acrtc; unsigned long flags; int vrr_active; @@ -530,10 +659,39 @@ static void dm_crtc_high_irq(void *interrupt_params) if (!acrtc) return; - vrr_active = amdgpu_dm_vrr_active_irq(acrtc); + if (acrtc->wb_conn) { + spin_lock_irqsave(&acrtc->wb_conn->job_lock, flags); + + if (acrtc->wb_pending) { + job = list_first_entry_or_null(&acrtc->wb_conn->job_queue, + struct drm_writeback_job, + list_entry); + acrtc->wb_pending = false; + spin_unlock_irqrestore(&acrtc->wb_conn->job_lock, flags); + + if (job) { + unsigned int v_total, refresh_hz; + struct dc_stream_state *stream = acrtc->dm_irq_params.stream; + + v_total = stream->adjust.v_total_max ? + stream->adjust.v_total_max : stream->timing.v_total; + refresh_hz = div_u64((uint64_t) stream->timing.pix_clk_100hz * + 100LL, (v_total * stream->timing.h_total)); + mdelay(1000 / refresh_hz); + + drm_writeback_signal_completion(acrtc->wb_conn, 0); + dc_stream_fc_disable_writeback(adev->dm.dc, + acrtc->dm_irq_params.stream, 0); + } + } else + spin_unlock_irqrestore(&acrtc->wb_conn->job_lock, flags); + } + + vrr_active = amdgpu_dm_crtc_vrr_active_irq(acrtc); - DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id, - vrr_active, acrtc->dm_irq_params.active_planes); + drm_dbg_vbl(adev_to_drm(adev), + "crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id, + vrr_active, acrtc->dm_irq_params.active_planes); /** * Core vblank handling at start of front-porch is only possible @@ -542,7 +700,7 @@ static void dm_crtc_high_irq(void *interrupt_params) * to dm_vupdate_high_irq after end of front-porch. */ if (!vrr_active) - dm_crtc_handle_vblank(acrtc); + amdgpu_dm_crtc_handle_vblank(acrtc); /** * Following stuff must happen at start of vblank, for crc @@ -557,15 +715,20 @@ static void dm_crtc_high_irq(void *interrupt_params) spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); if (acrtc->dm_irq_params.stream && - acrtc->dm_irq_params.vrr_params.supported && - acrtc->dm_irq_params.freesync_config.state == - VRR_STATE_ACTIVE_VARIABLE) { + acrtc->dm_irq_params.vrr_params.supported) { + bool replay_en = acrtc->dm_irq_params.stream->link->replay_settings.replay_feature_enabled; + bool psr_en = acrtc->dm_irq_params.stream->link->psr_settings.psr_feature_enabled; + bool fs_active_var_en = acrtc->dm_irq_params.freesync_config.state == VRR_STATE_ACTIVE_VARIABLE; + mod_freesync_handle_v_update(adev->dm.freesync_module, acrtc->dm_irq_params.stream, &acrtc->dm_irq_params.vrr_params); - dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream, - &acrtc->dm_irq_params.vrr_params.adjust); + /* update vmin_vmax only if freesync is enabled, or only if PSR and REPLAY are disabled */ + if (fs_active_var_en || (!fs_active_var_en && !replay_en && !psr_en)) { + schedule_dc_vmin_vmax(adev, acrtc->dm_irq_params.stream, + &acrtc->dm_irq_params.vrr_params.adjust); + } } /* @@ -633,6 +796,29 @@ static void dmub_aux_setconfig_callback(struct amdgpu_device *adev, complete(&adev->dm.dmub_aux_transfer_done); } +static void dmub_aux_fused_io_callback(struct amdgpu_device *adev, + struct dmub_notification *notify) +{ + if (!adev || !notify) { + ASSERT(false); + return; + } + + const struct dmub_cmd_fused_request *req = ¬ify->fused_request; + const uint8_t ddc_line = req->u.aux.ddc_line; + + if (ddc_line >= ARRAY_SIZE(adev->dm.fused_io)) { + ASSERT(false); + return; + } + + struct fused_io_sync *sync = &adev->dm.fused_io[ddc_line]; + + static_assert(sizeof(*req) <= sizeof(sync->reply_data), "Size mismatch"); + memcpy(sync->reply_data, req, sizeof(*req)); + complete(&sync->replied); +} + /** * dmub_hpd_callback - DMUB HPD interrupt processing callback. * @adev: amdgpu_device pointer @@ -649,19 +835,25 @@ static void dmub_hpd_callback(struct amdgpu_device *adev, struct drm_connector *connector; struct drm_connector_list_iter iter; struct dc_link *link; - uint8_t link_index = 0; + u8 link_index = 0; struct drm_device *dev; if (adev == NULL) return; if (notify == NULL) { - DRM_ERROR("DMUB HPD callback notification was NULL"); + drm_err(adev_to_drm(adev), "DMUB HPD callback notification was NULL"); return; } if (notify->link_index > adev->dm.dc->link_count) { - DRM_ERROR("DMUB HPD index (%u)is abnormal", notify->link_index); + drm_err(adev_to_drm(adev), "DMUB HPD index (%u)is abnormal", notify->link_index); + return; + } + + /* Skip DMUB HPD IRQ in suspend/resume. We will probe them later. */ + if (notify->type == DMUB_NOTIFICATION_HPD && adev->in_suspend) { + drm_info(adev_to_drm(adev), "Skip DMUB HPD IRQ callback in suspend/resume\n"); return; } @@ -671,9 +863,20 @@ static void dmub_hpd_callback(struct amdgpu_device *adev, drm_connector_list_iter_begin(dev, &iter); drm_for_each_connector_iter(connector, &iter) { + + if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) + continue; + aconnector = to_amdgpu_dm_connector(connector); if (link && aconnector->dc_link == link) { - DRM_INFO("DMUB HPD callback: link_index=%u\n", link_index); + if (notify->type == DMUB_NOTIFICATION_HPD) + drm_info(adev_to_drm(adev), "DMUB HPD IRQ callback: link_index=%u\n", link_index); + else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ) + drm_info(adev_to_drm(adev), "DMUB HPD RX IRQ callback: link_index=%u\n", link_index); + else + drm_warn(adev_to_drm(adev), "DMUB Unknown HPD callback type %d, link_index=%u\n", + notify->type, link_index); + hpd_aconnector = aconnector; break; } @@ -681,14 +884,31 @@ static void dmub_hpd_callback(struct amdgpu_device *adev, drm_connector_list_iter_end(&iter); if (hpd_aconnector) { - if (notify->type == DMUB_NOTIFICATION_HPD) + if (notify->type == DMUB_NOTIFICATION_HPD) { + if (hpd_aconnector->dc_link->hpd_status == (notify->hpd_status == DP_HPD_PLUG)) + drm_warn(adev_to_drm(adev), "DMUB reported hpd status unchanged. link_index=%u\n", link_index); handle_hpd_irq_helper(hpd_aconnector); - else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ) + } else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ) { handle_hpd_rx_irq(hpd_aconnector); + } } } /** + * dmub_hpd_sense_callback - DMUB HPD sense processing callback. + * @adev: amdgpu_device pointer + * @notify: dmub notification structure + * + * HPD sense changes can occur during low power states and need to be + * notified from firmware to driver. + */ +static void dmub_hpd_sense_callback(struct amdgpu_device *adev, + struct dmub_notification *notify) +{ + drm_dbg_driver(adev_to_drm(adev), "DMUB HPD SENSE callback.\n"); +} + +/** * register_dmub_notify_callback - Sets callback for DMUB notify * @adev: amdgpu_device pointer * @type: Type of dmub notification @@ -721,7 +941,7 @@ static void dm_handle_hpd_work(struct work_struct *work) dmub_hpd_wrk = container_of(work, struct dmub_hpd_work, handle_hpd_work); if (!dmub_hpd_wrk->dmub_notify) { - DRM_ERROR("dmub_hpd_wrk dmub_notify is NULL"); + drm_err(adev_to_drm(dmub_hpd_wrk->adev), "dmub_hpd_wrk dmub_notify is NULL"); return; } @@ -735,6 +955,30 @@ static void dm_handle_hpd_work(struct work_struct *work) } +static const char *dmub_notification_type_str(enum dmub_notification_type e) +{ + switch (e) { + case DMUB_NOTIFICATION_NO_DATA: + return "NO_DATA"; + case DMUB_NOTIFICATION_AUX_REPLY: + return "AUX_REPLY"; + case DMUB_NOTIFICATION_HPD: + return "HPD"; + case DMUB_NOTIFICATION_HPD_IRQ: + return "HPD_IRQ"; + case DMUB_NOTIFICATION_SET_CONFIG_REPLY: + return "SET_CONFIG_REPLY"; + case DMUB_NOTIFICATION_DPIA_NOTIFICATION: + return "DPIA_NOTIFICATION"; + case DMUB_NOTIFICATION_HPD_SENSE_NOTIFY: + return "HPD_SENSE_NOTIFY"; + case DMUB_NOTIFICATION_FUSED_IO: + return "FUSED_IO"; + default: + return "<unknown>"; + } +} + #define DMUB_TRACE_MAX_READ 64 /** * dm_dmub_outbox1_low_irq() - Handles Outbox interrupt @@ -745,14 +989,30 @@ static void dm_handle_hpd_work(struct work_struct *work) */ static void dm_dmub_outbox1_low_irq(void *interrupt_params) { - struct dmub_notification notify; + struct dmub_notification notify = {0}; struct common_irq_params *irq_params = interrupt_params; struct amdgpu_device *adev = irq_params->adev; struct amdgpu_display_manager *dm = &adev->dm; struct dmcub_trace_buf_entry entry = { 0 }; - uint32_t count = 0; + u32 count = 0; struct dmub_hpd_work *dmub_hpd_wrk; - struct dc_link *plink = NULL; + + do { + if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) { + trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count, + entry.param0, entry.param1); + + drm_dbg_driver(adev_to_drm(adev), "trace_code:%u, tick_count:%u, param0:%u, param1:%u\n", + entry.trace_code, entry.tick_count, entry.param0, entry.param1); + } else + break; + + count++; + + } while (count <= DMUB_TRACE_MAX_READ); + + if (count > DMUB_TRACE_MAX_READ) + drm_dbg_driver(adev_to_drm(adev), "Warning : count > DMUB_TRACE_MAX_READ"); if (dc_enable_dmub_notifications(adev->dm.dc) && irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) { @@ -760,82 +1020,56 @@ static void dm_dmub_outbox1_low_irq(void *interrupt_params) do { dc_stat_get_dmub_notification(adev->dm.dc, ¬ify); if (notify.type >= ARRAY_SIZE(dm->dmub_thread_offload)) { - DRM_ERROR("DM: notify type %d invalid!", notify.type); + drm_err(adev_to_drm(adev), "DM: notify type %d invalid!", notify.type); continue; } if (!dm->dmub_callback[notify.type]) { - DRM_DEBUG_DRIVER("DMUB notification skipped, no handler: type=%d\n", notify.type); + drm_warn(adev_to_drm(adev), "DMUB notification skipped due to no handler: type=%s\n", + dmub_notification_type_str(notify.type)); continue; } if (dm->dmub_thread_offload[notify.type] == true) { dmub_hpd_wrk = kzalloc(sizeof(*dmub_hpd_wrk), GFP_ATOMIC); if (!dmub_hpd_wrk) { - DRM_ERROR("Failed to allocate dmub_hpd_wrk"); + drm_err(adev_to_drm(adev), "Failed to allocate dmub_hpd_wrk"); return; } - dmub_hpd_wrk->dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_ATOMIC); + dmub_hpd_wrk->dmub_notify = kmemdup(¬ify, sizeof(struct dmub_notification), + GFP_ATOMIC); if (!dmub_hpd_wrk->dmub_notify) { kfree(dmub_hpd_wrk); - DRM_ERROR("Failed to allocate dmub_hpd_wrk->dmub_notify"); + drm_err(adev_to_drm(adev), "Failed to allocate dmub_hpd_wrk->dmub_notify"); return; } INIT_WORK(&dmub_hpd_wrk->handle_hpd_work, dm_handle_hpd_work); - if (dmub_hpd_wrk->dmub_notify) - memcpy(dmub_hpd_wrk->dmub_notify, ¬ify, sizeof(struct dmub_notification)); dmub_hpd_wrk->adev = adev; - if (notify.type == DMUB_NOTIFICATION_HPD) { - plink = adev->dm.dc->links[notify.link_index]; - if (plink) { - plink->hpd_status = - notify.hpd_status == DP_HPD_PLUG; - } - } queue_work(adev->dm.delayed_hpd_wq, &dmub_hpd_wrk->handle_hpd_work); } else { dm->dmub_callback[notify.type](adev, ¬ify); } } while (notify.pending_notification); } - - - do { - if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) { - trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count, - entry.param0, entry.param1); - - DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n", - entry.trace_code, entry.tick_count, entry.param0, entry.param1); - } else - break; - - count++; - - } while (count <= DMUB_TRACE_MAX_READ); - - if (count > DMUB_TRACE_MAX_READ) - DRM_DEBUG_DRIVER("Warning : count > DMUB_TRACE_MAX_READ"); } -static int dm_set_clockgating_state(void *handle, +static int dm_set_clockgating_state(struct amdgpu_ip_block *ip_block, enum amd_clockgating_state state) { return 0; } -static int dm_set_powergating_state(void *handle, +static int dm_set_powergating_state(struct amdgpu_ip_block *ip_block, enum amd_powergating_state state) { return 0; } /* Prototypes of private functions */ -static int dm_early_init(void* handle); +static int dm_early_init(struct amdgpu_ip_block *ip_block); /* Allocate memory for FBC compressed data */ static void amdgpu_dm_fbc_init(struct drm_connector *connector) { - struct drm_device *dev = connector->dev; - struct amdgpu_device *adev = drm_to_adev(dev); + struct amdgpu_device *adev = drm_to_adev(connector->dev); struct dm_compressor_info *compressor = &adev->dm.compressor; struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector); struct drm_display_mode *mode; @@ -852,8 +1086,8 @@ static void amdgpu_dm_fbc_init(struct drm_connector *connector) list_for_each_entry(mode, &connector->modes, head) { - if (max_size < mode->htotal * mode->vtotal) - max_size = mode->htotal * mode->vtotal; + if (max_size < (unsigned long) mode->htotal * mode->vtotal) + max_size = (unsigned long) mode->htotal * mode->vtotal; } if (max_size) { @@ -862,10 +1096,10 @@ static void amdgpu_dm_fbc_init(struct drm_connector *connector) &compressor->gpu_addr, &compressor->cpu_addr); if (r) - DRM_ERROR("DM: Failed to initialize FBC\n"); + drm_err(adev_to_drm(adev), "DM: Failed to initialize FBC\n"); else { adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr; - DRM_INFO("DM: FBC alloc %lu\n", max_size*4); + drm_info(adev_to_drm(adev), "DM: FBC alloc %lu\n", max_size*4); } } @@ -889,13 +1123,19 @@ static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port, drm_connector_list_iter_begin(dev, &conn_iter); drm_for_each_connector_iter(connector, &conn_iter) { + + if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) + continue; + aconnector = to_amdgpu_dm_connector(connector); if (aconnector->audio_inst != port) continue; *enabled = true; + mutex_lock(&connector->eld_mutex); ret = drm_eld_size(connector->eld); memcpy(buf, connector->eld, min(max_bytes, ret)); + mutex_unlock(&connector->eld_mutex); break; } @@ -929,8 +1169,7 @@ static int amdgpu_dm_audio_component_bind(struct device *kdev, static void amdgpu_dm_audio_component_unbind(struct device *kdev, struct device *hda_kdev, void *data) { - struct drm_device *dev = dev_get_drvdata(kdev); - struct amdgpu_device *adev = drm_to_adev(dev); + struct amdgpu_device *adev = drm_to_adev(dev_get_drvdata(kdev)); struct drm_audio_component *acomp = data; acomp->ops = NULL; @@ -1013,10 +1252,11 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev) const struct firmware *dmub_fw = adev->dm.dmub_fw; struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu; struct abm *abm = adev->dm.dc->res_pool->abm; + struct dc_context *ctx = adev->dm.dc->ctx; struct dmub_srv_hw_params hw_params; enum dmub_status status; const unsigned char *fw_inst_const, *fw_bss_data; - uint32_t i, fw_inst_const_size, fw_bss_data_size; + u32 i, fw_inst_const_size, fw_bss_data_size; bool has_hw_support; if (!dmub_srv) @@ -1024,31 +1264,35 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev) return 0; if (!fb_info) { - DRM_ERROR("No framebuffer info for DMUB service.\n"); + drm_err(adev_to_drm(adev), "No framebuffer info for DMUB service.\n"); return -EINVAL; } if (!dmub_fw) { /* Firmware required for DMUB support. */ - DRM_ERROR("No firmware provided for DMUB.\n"); + drm_err(adev_to_drm(adev), "No firmware provided for DMUB.\n"); return -EINVAL; } + /* initialize register offsets for ASICs with runtime initialization available */ + if (dmub_srv->hw_funcs.init_reg_offsets) + dmub_srv->hw_funcs.init_reg_offsets(dmub_srv, ctx); + status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support); if (status != DMUB_STATUS_OK) { - DRM_ERROR("Error checking HW support for DMUB: %d\n", status); + drm_err(adev_to_drm(adev), "Error checking HW support for DMUB: %d\n", status); return -EINVAL; } if (!has_hw_support) { - DRM_INFO("DMUB unsupported on ASIC\n"); + drm_info(adev_to_drm(adev), "DMUB unsupported on ASIC\n"); return 0; } /* Reset DMCUB if it was previously running - before we overwrite its memory. */ status = dmub_srv_hw_reset(dmub_srv); if (status != DMUB_STATUS_OK) - DRM_WARN("Error resetting DMUB HW: %d\n", status); + drm_warn(adev_to_drm(adev), "Error resetting DMUB HW: %d\n", status); hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data; @@ -1094,10 +1338,13 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev) memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0, fb_info->fb[DMUB_WINDOW_6_FW_STATE].size); + memset(fb_info->fb[DMUB_WINDOW_SHARED_STATE].cpu_addr, 0, + fb_info->fb[DMUB_WINDOW_SHARED_STATE].size); + /* Initialize hardware. */ memset(&hw_params, 0, sizeof(hw_params)); hw_params.fb_base = adev->gmc.fb_start; - hw_params.fb_offset = adev->gmc.aper_base; + hw_params.fb_offset = adev->vm_manager.vram_base_offset; /* backdoor load firmware and trigger dmub running */ if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) @@ -1109,9 +1356,13 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev) for (i = 0; i < fb_info->num_fb; ++i) hw_params.fb[i] = &fb_info->fb[i]; - switch (adev->ip_versions[DCE_HWIP][0]) { + switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { case IP_VERSION(3, 1, 3): case IP_VERSION(3, 1, 4): + case IP_VERSION(3, 5, 0): + case IP_VERSION(3, 5, 1): + case IP_VERSION(3, 6, 0): + case IP_VERSION(4, 0, 1): hw_params.dpia_supported = true; hw_params.disable_dpia = adev->dm.dc->debug.dpia_debug.bits.disable_dpia; break; @@ -1119,16 +1370,27 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev) break; } + switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { + case IP_VERSION(3, 5, 0): + case IP_VERSION(3, 5, 1): + case IP_VERSION(3, 6, 0): + hw_params.ips_sequential_ono = adev->external_rev_id > 0x10; + hw_params.lower_hbr3_phy_ssc = true; + break; + default: + break; + } + status = dmub_srv_hw_init(dmub_srv, &hw_params); if (status != DMUB_STATUS_OK) { - DRM_ERROR("Error initializing DMUB HW: %d\n", status); + drm_err(adev_to_drm(adev), "Error initializing DMUB HW: %d\n", status); return -EINVAL; } /* Wait for firmware load to finish. */ status = dmub_srv_wait_for_auto_load(dmub_srv, 100000); if (status != DMUB_STATUS_OK) - DRM_WARN("Wait for DMUB auto-load failed: %d\n", status); + drm_warn(adev_to_drm(adev), "Wait for DMUB auto-load failed: %d\n", status); /* Init DMCU and ABM if available. */ if (dmcu && abm) { @@ -1139,13 +1401,36 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev) if (!adev->dm.dc->ctx->dmub_srv) adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv); if (!adev->dm.dc->ctx->dmub_srv) { - DRM_ERROR("Couldn't allocate DC DMUB server!\n"); + drm_err(adev_to_drm(adev), "Couldn't allocate DC DMUB server!\n"); return -ENOMEM; } - DRM_INFO("DMUB hardware initialized: version=0x%08X\n", + drm_info(adev_to_drm(adev), "DMUB hardware initialized: version=0x%08X\n", adev->dm.dmcub_fw_version); + /* Keeping sanity checks off if + * DCN31 >= 4.0.59.0 + * DCN314 >= 8.0.16.0 + * Otherwise, turn on sanity checks + */ + switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { + case IP_VERSION(3, 1, 2): + case IP_VERSION(3, 1, 3): + if (adev->dm.dmcub_fw_version && + adev->dm.dmcub_fw_version >= DMUB_FW_VERSION(4, 0, 0) && + adev->dm.dmcub_fw_version < DMUB_FW_VERSION(4, 0, 59)) + adev->dm.dc->debug.sanity_checks = true; + break; + case IP_VERSION(3, 1, 4): + if (adev->dm.dmcub_fw_version && + adev->dm.dmcub_fw_version >= DMUB_FW_VERSION(4, 0, 0) && + adev->dm.dmcub_fw_version < DMUB_FW_VERSION(8, 0, 16)) + adev->dm.dc->debug.sanity_checks = true; + break; + default: + break; + } + return 0; } @@ -1154,6 +1439,7 @@ static void dm_dmub_hw_resume(struct amdgpu_device *adev) struct dmub_srv *dmub_srv = adev->dm.dmub_srv; enum dmub_status status; bool init; + int r; if (!dmub_srv) { /* DMUB isn't supported on the ASIC. */ @@ -1162,74 +1448,113 @@ static void dm_dmub_hw_resume(struct amdgpu_device *adev) status = dmub_srv_is_hw_init(dmub_srv, &init); if (status != DMUB_STATUS_OK) - DRM_WARN("DMUB hardware init check failed: %d\n", status); + drm_warn(adev_to_drm(adev), "DMUB hardware init check failed: %d\n", status); if (status == DMUB_STATUS_OK && init) { /* Wait for firmware load to finish. */ status = dmub_srv_wait_for_auto_load(dmub_srv, 100000); if (status != DMUB_STATUS_OK) - DRM_WARN("Wait for DMUB auto-load failed: %d\n", status); + drm_warn(adev_to_drm(adev), "Wait for DMUB auto-load failed: %d\n", status); } else { /* Perform the full hardware initialization. */ - dm_dmub_hw_init(adev); + r = dm_dmub_hw_init(adev); + if (r) + drm_err(adev_to_drm(adev), "DMUB interface failed to initialize: status=%d\n", r); } } static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config) { - uint64_t pt_base; - uint32_t logical_addr_low; - uint32_t logical_addr_high; - uint32_t agp_base, agp_bot, agp_top; + u64 pt_base; + u32 logical_addr_low; + u32 logical_addr_high; + u32 agp_base, agp_bot, agp_top; PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base; memset(pa_config, 0, sizeof(*pa_config)); - logical_addr_low = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18; - pt_base = amdgpu_gmc_pd_addr(adev->gart.bo); - - if (adev->apu_flags & AMD_APU_IS_RAVEN2) - /* - * Raven2 has a HW issue that it is unable to use the vram which - * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the - * workaround that increase system aperture high address (add 1) - * to get rid of the VM fault and hardware hang. - */ - logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18); - else - logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18; - agp_base = 0; agp_bot = adev->gmc.agp_start >> 24; agp_top = adev->gmc.agp_end >> 24; + /* AGP aperture is disabled */ + if (agp_bot > agp_top) { + logical_addr_low = adev->gmc.fb_start >> 18; + if (adev->apu_flags & (AMD_APU_IS_RAVEN2 | + AMD_APU_IS_RENOIR | + AMD_APU_IS_GREEN_SARDINE)) + /* + * Raven2 has a HW issue that it is unable to use the vram which + * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the + * workaround that increase system aperture high address (add 1) + * to get rid of the VM fault and hardware hang. + */ + logical_addr_high = (adev->gmc.fb_end >> 18) + 0x1; + else + logical_addr_high = adev->gmc.fb_end >> 18; + } else { + logical_addr_low = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18; + if (adev->apu_flags & (AMD_APU_IS_RAVEN2 | + AMD_APU_IS_RENOIR | + AMD_APU_IS_GREEN_SARDINE)) + /* + * Raven2 has a HW issue that it is unable to use the vram which + * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the + * workaround that increase system aperture high address (add 1) + * to get rid of the VM fault and hardware hang. + */ + logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18); + else + logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18; + } + + pt_base = amdgpu_gmc_pd_addr(adev->gart.bo); - page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF; - page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12); - page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF; - page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12); - page_table_base.high_part = upper_32_bits(pt_base) & 0xF; + page_table_start.high_part = upper_32_bits(adev->gmc.gart_start >> + AMDGPU_GPU_PAGE_SHIFT); + page_table_start.low_part = lower_32_bits(adev->gmc.gart_start >> + AMDGPU_GPU_PAGE_SHIFT); + page_table_end.high_part = upper_32_bits(adev->gmc.gart_end >> + AMDGPU_GPU_PAGE_SHIFT); + page_table_end.low_part = lower_32_bits(adev->gmc.gart_end >> + AMDGPU_GPU_PAGE_SHIFT); + page_table_base.high_part = upper_32_bits(pt_base); page_table_base.low_part = lower_32_bits(pt_base); pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18; pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18; - pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ; + pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24; pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24; pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24; pa_config->system_aperture.fb_base = adev->gmc.fb_start; - pa_config->system_aperture.fb_offset = adev->gmc.aper_base; + pa_config->system_aperture.fb_offset = adev->vm_manager.vram_base_offset; pa_config->system_aperture.fb_top = adev->gmc.fb_end; pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12; pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12; pa_config->gart_config.page_table_base_addr = page_table_base.quad_part; - pa_config->is_hvm_enabled = 0; + pa_config->is_hvm_enabled = adev->mode_info.gpu_vm_support; } +static void force_connector_state( + struct amdgpu_dm_connector *aconnector, + enum drm_connector_force force_state) +{ + struct drm_connector *connector = &aconnector->base; + + mutex_lock(&connector->dev->mode_config.mutex); + aconnector->base.force = force_state; + mutex_unlock(&connector->dev->mode_config.mutex); + + mutex_lock(&aconnector->hpd_lock); + drm_kms_helper_connector_hotplug_event(connector); + mutex_unlock(&aconnector->hpd_lock); +} + static void dm_handle_hpd_rx_offload_work(struct work_struct *work) { struct hpd_rx_irq_offload_work *offload_work; @@ -1238,21 +1563,24 @@ static void dm_handle_hpd_rx_offload_work(struct work_struct *work) struct amdgpu_device *adev; enum dc_connection_type new_connection_type = dc_connection_none; unsigned long flags; + union test_response test_response; + + memset(&test_response, 0, sizeof(test_response)); offload_work = container_of(work, struct hpd_rx_irq_offload_work, work); aconnector = offload_work->offload_wq->aconnector; + adev = offload_work->adev; if (!aconnector) { - DRM_ERROR("Can't retrieve aconnector in hpd_rx_irq_offload_work"); + drm_err(adev_to_drm(adev), "Can't retrieve aconnector in hpd_rx_irq_offload_work"); goto skip; } - adev = drm_to_adev(aconnector->base.dev); dc_link = aconnector->dc_link; mutex_lock(&aconnector->hpd_lock); - if (!dc_link_detect_sink(dc_link, &new_connection_type)) - DRM_ERROR("KMS: Failed to detect connector\n"); + if (!dc_link_detect_connection_type(dc_link, &new_connection_type)) + drm_err(adev_to_drm(adev), "KMS: Failed to detect connector\n"); mutex_unlock(&aconnector->hpd_lock); if (new_connection_type == dc_connection_none) @@ -1261,16 +1589,58 @@ static void dm_handle_hpd_rx_offload_work(struct work_struct *work) if (amdgpu_in_reset(adev)) goto skip; + if (offload_work->data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY || + offload_work->data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) { + dm_handle_mst_sideband_msg_ready_event(&aconnector->mst_mgr, DOWN_OR_UP_MSG_RDY_EVENT); + spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags); + offload_work->offload_wq->is_handling_mst_msg_rdy_event = false; + spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags); + goto skip; + } + mutex_lock(&adev->dm.dc_lock); - if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST) + if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST) { dc_link_dp_handle_automated_test(dc_link); - else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) && - hpd_rx_irq_check_link_loss_status(dc_link, &offload_work->data) && + + if (aconnector->timing_changed) { + /* force connector disconnect and reconnect */ + force_connector_state(aconnector, DRM_FORCE_OFF); + msleep(100); + force_connector_state(aconnector, DRM_FORCE_UNSPECIFIED); + } + + test_response.bits.ACK = 1; + + core_link_write_dpcd( + dc_link, + DP_TEST_RESPONSE, + &test_response.raw, + sizeof(test_response)); + } else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) && + dc_link_check_link_loss_status(dc_link, &offload_work->data) && dc_link_dp_allow_hpd_rx_irq(dc_link)) { - dc_link_dp_handle_link_loss(dc_link); + /* offload_work->data is from handle_hpd_rx_irq-> + * schedule_hpd_rx_offload_work.this is defer handle + * for hpd short pulse. upon here, link status may be + * changed, need get latest link status from dpcd + * registers. if link status is good, skip run link + * training again. + */ + union hpd_irq_data irq_data; + + memset(&irq_data, 0, sizeof(irq_data)); + + /* before dc_link_dp_handle_link_loss, allow new link lost handle + * request be added to work queue if link lost at end of dc_link_ + * dp_handle_link_loss + */ spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags); offload_work->offload_wq->is_handling_link_loss = false; spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags); + + if ((dc_link_dp_read_hpd_rx_irq_data(dc_link, &irq_data) == DC_OK) && + dc_link_check_link_loss_status(dc_link, &irq_data)) + dc_link_dp_handle_link_loss(dc_link); } mutex_unlock(&adev->dm.dc_lock); @@ -1279,8 +1649,9 @@ skip: } -static struct hpd_rx_irq_offload_work_queue *hpd_rx_irq_create_workqueue(struct dc *dc) +static struct hpd_rx_irq_offload_work_queue *hpd_rx_irq_create_workqueue(struct amdgpu_device *adev) { + struct dc *dc = adev->dm.dc; int max_caps = dc->caps.max_links; int i = 0; struct hpd_rx_irq_offload_work_queue *hpd_rx_offload_wq = NULL; @@ -1296,7 +1667,7 @@ static struct hpd_rx_irq_offload_work_queue *hpd_rx_irq_create_workqueue(struct create_singlethread_workqueue("amdgpu_dm_hpd_rx_offload_wq"); if (hpd_rx_offload_wq[i].wq == NULL) { - DRM_ERROR("create amdgpu_dm_hpd_rx_offload_wq fail!"); + drm_err(adev_to_drm(adev), "create amdgpu_dm_hpd_rx_offload_wq fail!"); goto out_err; } @@ -1345,47 +1716,169 @@ static bool dm_should_disable_stutter(struct pci_dev *pdev) return false; } -static const struct dmi_system_id hpd_disconnect_quirk_table[] = { - { - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3660"), - }, - }, - { - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3260"), - }, - }, - { - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3460"), - }, - }, - {} -}; -static void retrieve_dmi_info(struct amdgpu_display_manager *dm) +void* +dm_allocate_gpu_mem( + struct amdgpu_device *adev, + enum dc_gpu_mem_alloc_type type, + size_t size, + long long *addr) +{ + struct dal_allocation *da; + u32 domain = (type == DC_MEM_ALLOC_TYPE_GART) ? + AMDGPU_GEM_DOMAIN_GTT : AMDGPU_GEM_DOMAIN_VRAM; + int ret; + + da = kzalloc(sizeof(struct dal_allocation), GFP_KERNEL); + if (!da) + return NULL; + + ret = amdgpu_bo_create_kernel(adev, size, PAGE_SIZE, + domain, &da->bo, + &da->gpu_addr, &da->cpu_ptr); + + *addr = da->gpu_addr; + + if (ret) { + kfree(da); + return NULL; + } + + /* add da to list in dm */ + list_add(&da->list, &adev->dm.da_list); + + return da->cpu_ptr; +} + +void +dm_free_gpu_mem( + struct amdgpu_device *adev, + enum dc_gpu_mem_alloc_type type, + void *pvMem) +{ + struct dal_allocation *da; + + /* walk the da list in DM */ + list_for_each_entry(da, &adev->dm.da_list, list) { + if (pvMem == da->cpu_ptr) { + amdgpu_bo_free_kernel(&da->bo, &da->gpu_addr, &da->cpu_ptr); + list_del(&da->list); + kfree(da); + break; + } + } + +} + +static enum dmub_status +dm_dmub_send_vbios_gpint_command(struct amdgpu_device *adev, + enum dmub_gpint_command command_code, + uint16_t param, + uint32_t timeout_us) { - const struct dmi_system_id *dmi_id; + union dmub_gpint_data_register reg, test; + uint32_t i; + + /* Assume that VBIOS DMUB is ready to take commands */ + + reg.bits.status = 1; + reg.bits.command_code = command_code; + reg.bits.param = param; + + cgs_write_register(adev->dm.cgs_device, 0x34c0 + 0x01f8, reg.all); + + for (i = 0; i < timeout_us; ++i) { + udelay(1); + + /* Check if our GPINT got acked */ + reg.bits.status = 0; + test = (union dmub_gpint_data_register) + cgs_read_register(adev->dm.cgs_device, 0x34c0 + 0x01f8); + + if (test.all == reg.all) + return DMUB_STATUS_OK; + } + + return DMUB_STATUS_TIMEOUT; +} + +static void *dm_dmub_get_vbios_bounding_box(struct amdgpu_device *adev) +{ + void *bb; + long long addr; + unsigned int bb_size; + int i = 0; + uint16_t chunk; + enum dmub_gpint_command send_addrs[] = { + DMUB_GPINT__SET_BB_ADDR_WORD0, + DMUB_GPINT__SET_BB_ADDR_WORD1, + DMUB_GPINT__SET_BB_ADDR_WORD2, + DMUB_GPINT__SET_BB_ADDR_WORD3, + }; + enum dmub_status ret; + + switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { + case IP_VERSION(4, 0, 1): + bb_size = sizeof(struct dml2_soc_bb); + break; + default: + return NULL; + } + + bb = dm_allocate_gpu_mem(adev, + DC_MEM_ALLOC_TYPE_GART, + bb_size, + &addr); + if (!bb) + return NULL; + + for (i = 0; i < 4; i++) { + /* Extract 16-bit chunk */ + chunk = ((uint64_t) addr >> (i * 16)) & 0xFFFF; + /* Send the chunk */ + ret = dm_dmub_send_vbios_gpint_command(adev, send_addrs[i], chunk, 30000); + if (ret != DMUB_STATUS_OK) + goto free_bb; + } + + /* Now ask DMUB to copy the bb */ + ret = dm_dmub_send_vbios_gpint_command(adev, DMUB_GPINT__BB_COPY, 1, 200000); + if (ret != DMUB_STATUS_OK) + goto free_bb; - dm->aux_hpd_discon_quirk = false; + return bb; - dmi_id = dmi_first_match(hpd_disconnect_quirk_table); - if (dmi_id) { - dm->aux_hpd_discon_quirk = true; - DRM_INFO("aux_hpd_discon_quirk attached\n"); +free_bb: + dm_free_gpu_mem(adev, DC_MEM_ALLOC_TYPE_GART, (void *) bb); + return NULL; + +} + +static enum dmub_ips_disable_type dm_get_default_ips_mode( + struct amdgpu_device *adev) +{ + enum dmub_ips_disable_type ret = DMUB_IPS_ENABLE; + + switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { + case IP_VERSION(3, 5, 0): + case IP_VERSION(3, 6, 0): + case IP_VERSION(3, 5, 1): + ret = DMUB_IPS_RCG_IN_ACTIVE_IPS2_IN_OFF; + break; + default: + /* ASICs older than DCN35 do not have IPSs */ + if (amdgpu_ip_version(adev, DCE_HWIP, 0) < IP_VERSION(3, 5, 0)) + ret = DMUB_IPS_DISABLE_ALL; + break; } + + return ret; } static int amdgpu_dm_init(struct amdgpu_device *adev) { struct dc_init_data init_data; -#ifdef CONFIG_DRM_AMD_DC_HDCP struct dc_callback_init init_params; -#endif int r; adev->dm.ddev = adev_to_drm(adev); @@ -1393,16 +1886,14 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) /* Zero all the fields */ memset(&init_data, 0, sizeof(init_data)); -#ifdef CONFIG_DRM_AMD_DC_HDCP memset(&init_params, 0, sizeof(init_params)); -#endif + mutex_init(&adev->dm.dpia_aux_lock); mutex_init(&adev->dm.dc_lock); mutex_init(&adev->dm.audio_lock); - spin_lock_init(&adev->dm.vblank_lock); - if(amdgpu_dm_irq_init(adev)) { - DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n"); + if (amdgpu_dm_irq_init(adev)) { + drm_err(adev_to_drm(adev), "failed to initialize DM IRQ support.\n"); goto error; } @@ -1419,18 +1910,12 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) init_data.driver = adev; - adev->dm.cgs_device = amdgpu_cgs_create_device(adev); - - if (!adev->dm.cgs_device) { - DRM_ERROR("amdgpu: failed to create cgs device.\n"); - goto error; - } - + /* cgs_device was created in dm_sw_init() */ init_data.cgs_device = adev->dm.cgs_device; init_data.dce_environment = DCE_ENV_PRODUCTION_DRV; - switch (adev->ip_versions[DCE_HWIP][0]) { + switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { case IP_VERSION(2, 1, 0): switch (adev->dm.dmcub_fw_version) { case 0: /* development */ @@ -1449,36 +1934,27 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) break; } - switch (adev->asic_type) { - case CHIP_CARRIZO: - case CHIP_STONEY: - init_data.flags.gpu_vm_support = true; - break; - default: - switch (adev->ip_versions[DCE_HWIP][0]) { - case IP_VERSION(1, 0, 0): - case IP_VERSION(1, 0, 1): - /* enable S/G on PCO and RV2 */ - if ((adev->apu_flags & AMD_APU_IS_RAVEN2) || - (adev->apu_flags & AMD_APU_IS_PICASSO)) - init_data.flags.gpu_vm_support = true; - break; - case IP_VERSION(2, 1, 0): - case IP_VERSION(3, 0, 1): - case IP_VERSION(3, 1, 2): - case IP_VERSION(3, 1, 3): - case IP_VERSION(3, 1, 5): - case IP_VERSION(3, 1, 6): - init_data.flags.gpu_vm_support = true; - break; - default: - break; - } - break; + /* APU support S/G display by default except: + * ASICs before Carrizo, + * RAVEN1 (Users reported stability issue) + */ + + if (adev->asic_type < CHIP_CARRIZO) { + init_data.flags.gpu_vm_support = false; + } else if (adev->asic_type == CHIP_RAVEN) { + if (adev->apu_flags & AMD_APU_IS_RAVEN) + init_data.flags.gpu_vm_support = false; + else + init_data.flags.gpu_vm_support = (amdgpu_sg_display != 0); + } else { + if (amdgpu_ip_version(adev, DCE_HWIP, 0) == IP_VERSION(2, 0, 3)) + init_data.flags.gpu_vm_support = (amdgpu_sg_display == 1); + else + init_data.flags.gpu_vm_support = + (amdgpu_sg_display != 0) && (adev->flags & AMD_IS_APU); } - if (init_data.flags.gpu_vm_support) - adev->mode_info.gpu_vm_support = true; + adev->mode_info.gpu_vm_support = init_data.flags.gpu_vm_support; if (amdgpu_dc_feature_mask & DC_FBC_MASK) init_data.flags.fbc_support = true; @@ -1499,28 +1975,56 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) init_data.flags.seamless_boot_edp_requested = false; - if (check_seamless_boot_capability(adev)) { + if (amdgpu_device_seamless_boot_supported(adev)) { init_data.flags.seamless_boot_edp_requested = true; init_data.flags.allow_seamless_boot_optimization = true; - DRM_INFO("Seamless boot condition check passed\n"); + drm_dbg(adev->dm.ddev, "Seamless boot requested\n"); } init_data.flags.enable_mipi_converter_optimization = true; init_data.dcn_reg_offsets = adev->reg_offset[DCE_HWIP][0]; init_data.nbio_reg_offsets = adev->reg_offset[NBIO_HWIP][0]; + init_data.clk_reg_offsets = adev->reg_offset[CLK_HWIP][0]; + + if (amdgpu_dc_debug_mask & DC_DISABLE_IPS) + init_data.flags.disable_ips = DMUB_IPS_DISABLE_ALL; + else if (amdgpu_dc_debug_mask & DC_DISABLE_IPS_DYNAMIC) + init_data.flags.disable_ips = DMUB_IPS_DISABLE_DYNAMIC; + else if (amdgpu_dc_debug_mask & DC_DISABLE_IPS2_DYNAMIC) + init_data.flags.disable_ips = DMUB_IPS_RCG_IN_ACTIVE_IPS2_IN_OFF; + else if (amdgpu_dc_debug_mask & DC_FORCE_IPS_ENABLE) + init_data.flags.disable_ips = DMUB_IPS_ENABLE; + else + init_data.flags.disable_ips = dm_get_default_ips_mode(adev); - INIT_LIST_HEAD(&adev->dm.da_list); + init_data.flags.disable_ips_in_vpb = 0; + + /* DCN35 and above supports dynamic DTBCLK switch */ + if (amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(3, 5, 0)) + init_data.flags.allow_0_dtb_clk = true; + + /* Enable DWB for tested platforms only */ + if (amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(3, 0, 0)) + init_data.num_virtual_links = 1; retrieve_dmi_info(&adev->dm); + if (adev->dm.edp0_on_dp1_quirk) + init_data.flags.support_edp0_on_dp1 = true; + + if (adev->dm.bb_from_dmub) + init_data.bb_from_dmub = adev->dm.bb_from_dmub; + else + init_data.bb_from_dmub = NULL; /* Display Core create. */ adev->dm.dc = dc_create(&init_data); if (adev->dm.dc) { - DRM_INFO("Display Core initialized with v%s!\n", DC_VER); + drm_info(adev_to_drm(adev), "Display Core v%s initialized on %s\n", DC_VER, + dce_version_to_string(adev->dm.dc->ctx->dce_version)); } else { - DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER); + drm_info(adev_to_drm(adev), "Display Core failed to initialize with v%s!\n", DC_VER); goto error; } @@ -1537,9 +2041,8 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER) adev->dm.dc->debug.disable_stutter = true; - if (amdgpu_dc_debug_mask & DC_DISABLE_DSC) { + if (amdgpu_dc_debug_mask & DC_DISABLE_DSC) adev->dm.dc->debug.disable_dsc = true; - } if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING) adev->dm.dc->debug.disable_clock_gate = true; @@ -1547,22 +2050,44 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) if (amdgpu_dc_debug_mask & DC_FORCE_SUBVP_MCLK_SWITCH) adev->dm.dc->debug.force_subvp_mclk_switch = true; + if (amdgpu_dc_debug_mask & DC_DISABLE_SUBVP_FAMS) { + adev->dm.dc->debug.force_disable_subvp = true; + adev->dm.dc->debug.fams2_config.bits.enable = false; + } + + if (amdgpu_dc_debug_mask & DC_ENABLE_DML2) { + adev->dm.dc->debug.using_dml2 = true; + adev->dm.dc->debug.using_dml21 = true; + } + + if (amdgpu_dc_debug_mask & DC_HDCP_LC_FORCE_FW_ENABLE) + adev->dm.dc->debug.hdcp_lc_force_fw_enable = true; + + if (amdgpu_dc_debug_mask & DC_HDCP_LC_ENABLE_SW_FALLBACK) + adev->dm.dc->debug.hdcp_lc_enable_sw_fallback = true; + + if (amdgpu_dc_debug_mask & DC_SKIP_DETECTION_LT) + adev->dm.dc->debug.skip_detection_link_training = true; + adev->dm.dc->debug.visual_confirm = amdgpu_dc_visual_confirm; /* TODO: Remove after DP2 receiver gets proper support of Cable ID feature */ adev->dm.dc->debug.ignore_cable_id = true; + if (adev->dm.dc->caps.dp_hdmi21_pcon_support) + drm_info(adev_to_drm(adev), "DP-HDMI FRL PCON supported\n"); + r = dm_dmub_hw_init(adev); if (r) { - DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r); + drm_err(adev_to_drm(adev), "DMUB interface failed to initialize: status=%d\n", r); goto error; } dc_hardware_init(adev->dm.dc); - adev->dm.hpd_rx_offload_wq = hpd_rx_irq_create_workqueue(adev->dm.dc); + adev->dm.hpd_rx_offload_wq = hpd_rx_irq_create_workqueue(adev); if (!adev->dm.hpd_rx_offload_wq) { - DRM_ERROR("amdgpu: failed to create hpd rx offload workqueue.\n"); + drm_err(adev_to_drm(adev), "failed to create hpd rx offload workqueue.\n"); goto error; } @@ -1577,10 +2102,10 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) adev->dm.freesync_module = mod_freesync_create(adev->dm.dc); if (!adev->dm.freesync_module) { - DRM_ERROR( - "amdgpu: failed to initialize freesync_module.\n"); + drm_err(adev_to_drm(adev), + "failed to initialize freesync_module.\n"); } else - DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n", + drm_dbg_driver(adev_to_drm(adev), "amdgpu: freesync_module init done %p.\n", adev->dm.freesync_module); amdgpu_dm_init_color_mod(); @@ -1589,67 +2114,71 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) adev->dm.vblank_control_workqueue = create_singlethread_workqueue("dm_vblank_control_workqueue"); if (!adev->dm.vblank_control_workqueue) - DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n"); + drm_err(adev_to_drm(adev), "failed to initialize vblank_workqueue.\n"); } -#ifdef CONFIG_DRM_AMD_DC_HDCP + if (adev->dm.dc->caps.ips_support && + adev->dm.dc->config.disable_ips != DMUB_IPS_DISABLE_ALL) + adev->dm.idle_workqueue = idle_create_workqueue(adev); + if (adev->dm.dc->caps.max_links > 0 && adev->family >= AMDGPU_FAMILY_RV) { adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc); if (!adev->dm.hdcp_workqueue) - DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n"); + drm_err(adev_to_drm(adev), "failed to initialize hdcp_workqueue.\n"); else - DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue); + drm_dbg_driver(adev_to_drm(adev), "amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue); dc_init_callbacks(adev->dm.dc, &init_params); } -#endif -#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) - adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work(); -#endif if (dc_is_dmub_outbox_supported(adev->dm.dc)) { init_completion(&adev->dm.dmub_aux_transfer_done); adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL); if (!adev->dm.dmub_notify) { - DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify"); + drm_info(adev_to_drm(adev), "fail to allocate adev->dm.dmub_notify"); goto error; } adev->dm.delayed_hpd_wq = create_singlethread_workqueue("amdgpu_dm_hpd_wq"); if (!adev->dm.delayed_hpd_wq) { - DRM_ERROR("amdgpu: failed to create hpd offload workqueue.\n"); + drm_err(adev_to_drm(adev), "failed to create hpd offload workqueue.\n"); goto error; } amdgpu_dm_outbox_init(adev); if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_AUX_REPLY, dmub_aux_setconfig_callback, false)) { - DRM_ERROR("amdgpu: fail to register dmub aux callback"); + drm_err(adev_to_drm(adev), "fail to register dmub aux callback"); goto error; } - if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD, dmub_hpd_callback, true)) { - DRM_ERROR("amdgpu: fail to register dmub hpd callback"); - goto error; - } - if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_IRQ, dmub_hpd_callback, true)) { - DRM_ERROR("amdgpu: fail to register dmub hpd callback"); + + for (size_t i = 0; i < ARRAY_SIZE(adev->dm.fused_io); i++) + init_completion(&adev->dm.fused_io[i].replied); + + if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_FUSED_IO, + dmub_aux_fused_io_callback, false)) { + drm_err(adev_to_drm(adev), "fail to register dmub fused io callback"); goto error; } + /* Enable outbox notification only after IRQ handlers are registered and DMUB is alive. + * It is expected that DMUB will resend any pending notifications at this point. Note + * that hpd and hpd_irq handler registration are deferred to register_hpd_handlers() to + * align legacy interface initialization sequence. Connection status will be proactivly + * detected once in the amdgpu_dm_initialize_drm_device. + */ + dc_enable_dmub_outbox(adev->dm.dc); + + /* DPIA trace goes to dmesg logs only if outbox is enabled */ + if (amdgpu_dc_debug_mask & DC_ENABLE_DPIA_TRACE) + dc_dmub_srv_enable_dpia_trace(adev->dm.dc); } if (amdgpu_dm_initialize_drm_device(adev)) { - DRM_ERROR( - "amdgpu: failed to initialize sw for display support.\n"); + drm_err(adev_to_drm(adev), + "failed to initialize sw for display support.\n"); goto error; } - /* Enable outbox notification only after IRQ handlers are registered and DMUB is alive. - * It is expected that DMUB will resend any pending notifications at this point, for - * example HPD from DPIA. - */ - if (dc_is_dmub_outbox_supported(adev->dm.dc)) - dc_enable_dmub_outbox(adev->dm.dc); - /* create fake encoders for MST */ dm_dp_create_fake_mst_encoders(adev); @@ -1660,13 +2189,22 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size; if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) { - DRM_ERROR( - "amdgpu: failed to initialize sw for display support.\n"); + drm_err(adev_to_drm(adev), + "failed to initialize vblank for display support.\n"); goto error; } +#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) + amdgpu_dm_crtc_secure_display_create_contexts(adev); + if (!adev->dm.secure_display_ctx.crtc_ctx) + drm_err(adev_to_drm(adev), "failed to initialize secure display contexts.\n"); + + if (amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(4, 0, 1)) + adev->dm.secure_display_ctx.support_mul_roi = true; + +#endif - DRM_DEBUG_DRIVER("KMS initialized.\n"); + drm_dbg_driver(adev_to_drm(adev), "KMS initialized.\n"); return 0; error: @@ -1675,9 +2213,9 @@ error: return -EINVAL; } -static int amdgpu_dm_early_fini(void *handle) +static int amdgpu_dm_early_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; amdgpu_dm_audio_fini(adev); @@ -1693,36 +2231,44 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev) adev->dm.vblank_control_workqueue = NULL; } - for (i = 0; i < adev->dm.display_indexes_num; i++) { - drm_encoder_cleanup(&adev->dm.mst_encoders[i].base); + if (adev->dm.idle_workqueue) { + if (adev->dm.idle_workqueue->running) { + adev->dm.idle_workqueue->enable = false; + flush_work(&adev->dm.idle_workqueue->work); + } + + kfree(adev->dm.idle_workqueue); + adev->dm.idle_workqueue = NULL; } amdgpu_dm_destroy_drm_device(&adev->dm); #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) - if (adev->dm.crc_rd_wrk) { - flush_work(&adev->dm.crc_rd_wrk->notify_ta_work); - kfree(adev->dm.crc_rd_wrk); - adev->dm.crc_rd_wrk = NULL; + if (adev->dm.secure_display_ctx.crtc_ctx) { + for (i = 0; i < adev->mode_info.num_crtc; i++) { + if (adev->dm.secure_display_ctx.crtc_ctx[i].crtc) { + flush_work(&adev->dm.secure_display_ctx.crtc_ctx[i].notify_ta_work); + flush_work(&adev->dm.secure_display_ctx.crtc_ctx[i].forward_roi_work); + } + } + kfree(adev->dm.secure_display_ctx.crtc_ctx); + adev->dm.secure_display_ctx.crtc_ctx = NULL; } #endif -#ifdef CONFIG_DRM_AMD_DC_HDCP if (adev->dm.hdcp_workqueue) { hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue); adev->dm.hdcp_workqueue = NULL; } - if (adev->dm.dc) + if (adev->dm.dc) { dc_deinit_callbacks(adev->dm.dc); -#endif - - dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv); - - if (dc_enable_dmub_notifications(adev->dm.dc)) { - kfree(adev->dm.dmub_notify); - adev->dm.dmub_notify = NULL; - destroy_workqueue(adev->dm.delayed_hpd_wq); - adev->dm.delayed_hpd_wq = NULL; + dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv); + if (dc_enable_dmub_notifications(adev->dm.dc)) { + kfree(adev->dm.dmub_notify); + adev->dm.dmub_notify = NULL; + destroy_workqueue(adev->dm.delayed_hpd_wq); + adev->dm.delayed_hpd_wq = NULL; + } } if (adev->dm.dmub_bo) @@ -1730,7 +2276,7 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev) &adev->dm.dmub_bo_gpu_addr, &adev->dm.dmub_bo_cpu_addr); - if (adev->dm.hpd_rx_offload_wq) { + if (adev->dm.hpd_rx_offload_wq && adev->dm.dc) { for (i = 0; i < adev->dm.dc->caps.max_links; i++) { if (adev->dm.hpd_rx_offload_wq[i].wq) { destroy_workqueue(adev->dm.hpd_rx_offload_wq[i].wq); @@ -1762,8 +2308,7 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev) mutex_destroy(&adev->dm.audio_lock); mutex_destroy(&adev->dm.dc_lock); - - return; + mutex_destroy(&adev->dm.dpia_aux_lock); } static int load_dmcu_fw(struct amdgpu_device *adev) @@ -1772,7 +2317,7 @@ static int load_dmcu_fw(struct amdgpu_device *adev) int r; const struct dmcu_firmware_header_v1_0 *hdr; - switch(adev->asic_type) { + switch (adev->asic_type) { #if defined(CONFIG_DRM_AMD_DC_SI) case CHIP_TAHITI: case CHIP_PITCAIRN: @@ -1808,7 +2353,7 @@ static int load_dmcu_fw(struct amdgpu_device *adev) return 0; break; default: - switch (adev->ip_versions[DCE_HWIP][0]) { + switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { case IP_VERSION(2, 0, 2): case IP_VERSION(2, 0, 3): case IP_VERSION(2, 0, 0): @@ -1824,11 +2369,15 @@ static int load_dmcu_fw(struct amdgpu_device *adev) case IP_VERSION(3, 1, 6): case IP_VERSION(3, 2, 0): case IP_VERSION(3, 2, 1): + case IP_VERSION(3, 5, 0): + case IP_VERSION(3, 5, 1): + case IP_VERSION(3, 6, 0): + case IP_VERSION(4, 0, 1): return 0; default: break; } - DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type); + drm_err(adev_to_drm(adev), "Unsupported ASIC type: 0x%X\n", adev->asic_type); return -EINVAL; } @@ -1837,25 +2386,18 @@ static int load_dmcu_fw(struct amdgpu_device *adev) return 0; } - r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev); - if (r == -ENOENT) { + r = amdgpu_ucode_request(adev, &adev->dm.fw_dmcu, AMDGPU_UCODE_REQUIRED, + "%s", fw_name_dmcu); + if (r == -ENODEV) { /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */ DRM_DEBUG_KMS("dm: DMCU firmware not found\n"); adev->dm.fw_dmcu = NULL; return 0; } if (r) { - dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n", - fw_name_dmcu); - return r; - } - - r = amdgpu_ucode_validate(adev->dm.fw_dmcu); - if (r) { - dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n", + drm_err(adev_to_drm(adev), "amdgpu_dm: Can't validate firmware \"%s\"\n", fw_name_dmcu); - release_firmware(adev->dm.fw_dmcu); - adev->dm.fw_dmcu = NULL; + amdgpu_ucode_release(&adev->dm.fw_dmcu); return r; } @@ -1897,85 +2439,77 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev) struct dmub_srv_create_params create_params; struct dmub_srv_region_params region_params; struct dmub_srv_region_info region_info; - struct dmub_srv_fb_params fb_params; + struct dmub_srv_memory_params memory_params; struct dmub_srv_fb_info *fb_info; struct dmub_srv *dmub_srv; const struct dmcub_firmware_header_v1_0 *hdr; - const char *fw_name_dmub; enum dmub_asic dmub_asic; enum dmub_status status; + static enum dmub_window_memory_type window_memory_type[DMUB_WINDOW_TOTAL] = { + DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_0_INST_CONST + DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_1_STACK + DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_2_BSS_DATA + DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_3_VBIOS + DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_4_MAILBOX + DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_5_TRACEBUFF + DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_6_FW_STATE + DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_7_SCRATCH_MEM + DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_IB_MEM + DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_SHARED_STATE + }; int r; - switch (adev->ip_versions[DCE_HWIP][0]) { + switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { case IP_VERSION(2, 1, 0): dmub_asic = DMUB_ASIC_DCN21; - fw_name_dmub = FIRMWARE_RENOIR_DMUB; - if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id)) - fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB; break; case IP_VERSION(3, 0, 0): - if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0)) { - dmub_asic = DMUB_ASIC_DCN30; - fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB; - } else { - dmub_asic = DMUB_ASIC_DCN30; - fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB; - } + dmub_asic = DMUB_ASIC_DCN30; break; case IP_VERSION(3, 0, 1): dmub_asic = DMUB_ASIC_DCN301; - fw_name_dmub = FIRMWARE_VANGOGH_DMUB; break; case IP_VERSION(3, 0, 2): dmub_asic = DMUB_ASIC_DCN302; - fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB; break; case IP_VERSION(3, 0, 3): dmub_asic = DMUB_ASIC_DCN303; - fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB; break; case IP_VERSION(3, 1, 2): case IP_VERSION(3, 1, 3): dmub_asic = (adev->external_rev_id == YELLOW_CARP_B0) ? DMUB_ASIC_DCN31B : DMUB_ASIC_DCN31; - fw_name_dmub = FIRMWARE_YELLOW_CARP_DMUB; break; case IP_VERSION(3, 1, 4): dmub_asic = DMUB_ASIC_DCN314; - fw_name_dmub = FIRMWARE_DCN_314_DMUB; break; case IP_VERSION(3, 1, 5): dmub_asic = DMUB_ASIC_DCN315; - fw_name_dmub = FIRMWARE_DCN_315_DMUB; break; case IP_VERSION(3, 1, 6): dmub_asic = DMUB_ASIC_DCN316; - fw_name_dmub = FIRMWARE_DCN316_DMUB; break; case IP_VERSION(3, 2, 0): dmub_asic = DMUB_ASIC_DCN32; - fw_name_dmub = FIRMWARE_DCN_V3_2_0_DMCUB; break; case IP_VERSION(3, 2, 1): dmub_asic = DMUB_ASIC_DCN321; - fw_name_dmub = FIRMWARE_DCN_V3_2_1_DMCUB; break; + case IP_VERSION(3, 5, 0): + case IP_VERSION(3, 5, 1): + dmub_asic = DMUB_ASIC_DCN35; + break; + case IP_VERSION(3, 6, 0): + dmub_asic = DMUB_ASIC_DCN36; + break; + case IP_VERSION(4, 0, 1): + dmub_asic = DMUB_ASIC_DCN401; + break; + default: /* ASIC doesn't support DMUB. */ return 0; } - r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev); - if (r) { - DRM_ERROR("DMUB firmware loading failed: %d\n", r); - return 0; - } - - r = amdgpu_ucode_validate(adev->dm.dmub_fw); - if (r) { - DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r); - return 0; - } - hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data; adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version); @@ -1987,7 +2521,7 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev) adev->firmware.fw_size += ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE); - DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n", + drm_info(adev_to_drm(adev), "Loading DMUB firmware via PSP: version=0x%08X\n", adev->dm.dmcub_fw_version); } @@ -1996,7 +2530,7 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev) dmub_srv = adev->dm.dmub_srv; if (!dmub_srv) { - DRM_ERROR("Failed to allocate DMUB service!\n"); + drm_err(adev_to_drm(adev), "Failed to allocate DMUB service!\n"); return -ENOMEM; } @@ -2009,7 +2543,7 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev) /* Create the DMUB service. */ status = dmub_srv_create(dmub_srv, &create_params); if (status != DMUB_STATUS_OK) { - DRM_ERROR("Error creating DMUB service: %d\n", status); + drm_err(adev_to_drm(adev), "Error creating DMUB service: %d\n", status); return -EINVAL; } @@ -2028,12 +2562,13 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev) adev->dm.dmub_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes) + PSP_HEADER_BYTES; + region_params.window_memory_type = window_memory_type; status = dmub_srv_calc_region_info(dmub_srv, ®ion_params, ®ion_info); if (status != DMUB_STATUS_OK) { - DRM_ERROR("Error calculating DMUB region info: %d\n", status); + drm_err(adev_to_drm(adev), "Error calculating DMUB region info: %d\n", status); return -EINVAL; } @@ -2042,42 +2577,57 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev) * TODO: Move this into GART. */ r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE, - AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo, + AMDGPU_GEM_DOMAIN_VRAM | + AMDGPU_GEM_DOMAIN_GTT, + &adev->dm.dmub_bo, &adev->dm.dmub_bo_gpu_addr, &adev->dm.dmub_bo_cpu_addr); if (r) return r; /* Rebase the regions on the framebuffer address. */ - memset(&fb_params, 0, sizeof(fb_params)); - fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr; - fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr; - fb_params.region_info = ®ion_info; + memset(&memory_params, 0, sizeof(memory_params)); + memory_params.cpu_fb_addr = adev->dm.dmub_bo_cpu_addr; + memory_params.gpu_fb_addr = adev->dm.dmub_bo_gpu_addr; + memory_params.region_info = ®ion_info; + memory_params.window_memory_type = window_memory_type; adev->dm.dmub_fb_info = kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL); fb_info = adev->dm.dmub_fb_info; if (!fb_info) { - DRM_ERROR( + drm_err(adev_to_drm(adev), "Failed to allocate framebuffer info for DMUB service!\n"); return -ENOMEM; } - status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info); + status = dmub_srv_calc_mem_info(dmub_srv, &memory_params, fb_info); if (status != DMUB_STATUS_OK) { - DRM_ERROR("Error calculating DMUB FB info: %d\n", status); + drm_err(adev_to_drm(adev), "Error calculating DMUB FB info: %d\n", status); return -EINVAL; } + adev->dm.bb_from_dmub = dm_dmub_get_vbios_bounding_box(adev); + return 0; } -static int dm_sw_init(void *handle) +static int dm_sw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int r; + adev->dm.cgs_device = amdgpu_cgs_create_device(adev); + + if (!adev->dm.cgs_device) { + drm_err(adev_to_drm(adev), "failed to create cgs device.\n"); + return -EINVAL; + } + + /* Moved from dm init since we need to use allocations for storing bounding box data */ + INIT_LIST_HEAD(&adev->dm.da_list); + r = dm_dmub_sw_init(adev); if (r) return r; @@ -2085,23 +2635,33 @@ static int dm_sw_init(void *handle) return load_dmcu_fw(adev); } -static int dm_sw_fini(void *handle) +static int dm_sw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; + struct dal_allocation *da; + + list_for_each_entry(da, &adev->dm.da_list, list) { + if (adev->dm.bb_from_dmub == (void *) da->cpu_ptr) { + amdgpu_bo_free_kernel(&da->bo, &da->gpu_addr, &da->cpu_ptr); + list_del(&da->list); + kfree(da); + adev->dm.bb_from_dmub = NULL; + break; + } + } + kfree(adev->dm.dmub_fb_info); adev->dm.dmub_fb_info = NULL; if (adev->dm.dmub_srv) { dmub_srv_destroy(adev->dm.dmub_srv); + kfree(adev->dm.dmub_srv); adev->dm.dmub_srv = NULL; } - release_firmware(adev->dm.dmub_fw); - adev->dm.dmub_fw = NULL; - - release_firmware(adev->dm.fw_dmcu); - adev->dm.fw_dmcu = NULL; + amdgpu_ucode_release(&adev->dm.dmub_fw); + amdgpu_ucode_release(&adev->dm.fw_dmcu); return 0; } @@ -2115,18 +2675,24 @@ static int detect_mst_link_for_all_connectors(struct drm_device *dev) drm_connector_list_iter_begin(dev, &iter); drm_for_each_connector_iter(connector, &iter) { + + if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) + continue; + aconnector = to_amdgpu_dm_connector(connector); if (aconnector->dc_link->type == dc_connection_mst_branch && aconnector->mst_mgr.aux) { - DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n", + drm_dbg_kms(dev, "DM_MST: starting TM on aconnector: %p [id: %d]\n", aconnector, aconnector->base.base.id); ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true); if (ret < 0) { - DRM_ERROR("DM_MST: Failed to start MST\n"); + drm_err(dev, "DM_MST: Failed to start MST\n"); aconnector->dc_link->type = dc_connection_single; + ret = dm_helpers_dp_mst_stop_top_mgr(aconnector->dc_link->ctx, + aconnector->dc_link); break; } } @@ -2136,9 +2702,9 @@ static int detect_mst_link_for_all_connectors(struct drm_device *dev) return ret; } -static int dm_late_init(void *handle) +static int dm_late_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; struct dmcu_iram_parameters params; unsigned int linear_lut[16]; @@ -2162,9 +2728,9 @@ static int dm_late_init(void *handle) */ params.min_abm_backlight = 0x28F; /* In the case where abm is implemented on dmcub, - * dmcu object will be null. - * ABM 2.4 and up are implemented on dmcub. - */ + * dmcu object will be null. + * ABM 2.4 and up are implemented on dmcub. + */ if (dmcu) { if (!dmcu_load_iram(dmcu, params)) return -EINVAL; @@ -2172,7 +2738,7 @@ static int dm_late_init(void *handle) struct dc_link *edp_links[MAX_NUM_EDP]; int edp_num; - get_edp_links(adev->dm.dc, edp_links, &edp_num); + dc_get_edp_links(adev->dm.dc, edp_links, &edp_num); for (i = 0; i < edp_num; i++) { if (!dmub_init_abm_config(adev->dm.dc->res_pool, params, i)) return -EINVAL; @@ -2182,20 +2748,115 @@ static int dm_late_init(void *handle) return detect_mst_link_for_all_connectors(adev_to_drm(adev)); } +static void resume_mst_branch_status(struct drm_dp_mst_topology_mgr *mgr) +{ + u8 buf[UUID_SIZE]; + guid_t guid; + int ret; + + mutex_lock(&mgr->lock); + if (!mgr->mst_primary) + goto out_fail; + + if (drm_dp_read_dpcd_caps(mgr->aux, mgr->dpcd) < 0) { + drm_dbg_kms(mgr->dev, "dpcd read failed - undocked during suspend?\n"); + goto out_fail; + } + + ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, + DP_MST_EN | + DP_UP_REQ_EN | + DP_UPSTREAM_IS_SRC); + if (ret < 0) { + drm_dbg_kms(mgr->dev, "mst write failed - undocked during suspend?\n"); + goto out_fail; + } + + /* Some hubs forget their guids after they resume */ + ret = drm_dp_dpcd_read(mgr->aux, DP_GUID, buf, sizeof(buf)); + if (ret != sizeof(buf)) { + drm_dbg_kms(mgr->dev, "dpcd read failed - undocked during suspend?\n"); + goto out_fail; + } + + import_guid(&guid, buf); + + if (guid_is_null(&guid)) { + guid_gen(&guid); + export_guid(buf, &guid); + + ret = drm_dp_dpcd_write(mgr->aux, DP_GUID, buf, sizeof(buf)); + + if (ret != sizeof(buf)) { + drm_dbg_kms(mgr->dev, "check mstb guid failed - undocked during suspend?\n"); + goto out_fail; + } + } + + guid_copy(&mgr->mst_primary->guid, &guid); + +out_fail: + mutex_unlock(&mgr->lock); +} + +void hdmi_cec_unset_edid(struct amdgpu_dm_connector *aconnector) +{ + struct cec_notifier *n = aconnector->notifier; + + if (!n) + return; + + cec_notifier_phys_addr_invalidate(n); +} + +void hdmi_cec_set_edid(struct amdgpu_dm_connector *aconnector) +{ + struct drm_connector *connector = &aconnector->base; + struct cec_notifier *n = aconnector->notifier; + + if (!n) + return; + + cec_notifier_set_phys_addr(n, + connector->display_info.source_physical_address); +} + +static void s3_handle_hdmi_cec(struct drm_device *ddev, bool suspend) +{ + struct amdgpu_dm_connector *aconnector; + struct drm_connector *connector; + struct drm_connector_list_iter conn_iter; + + drm_connector_list_iter_begin(ddev, &conn_iter); + drm_for_each_connector_iter(connector, &conn_iter) { + if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) + continue; + + aconnector = to_amdgpu_dm_connector(connector); + if (suspend) + hdmi_cec_unset_edid(aconnector); + else + hdmi_cec_set_edid(aconnector); + } + drm_connector_list_iter_end(&conn_iter); +} + static void s3_handle_mst(struct drm_device *dev, bool suspend) { struct amdgpu_dm_connector *aconnector; struct drm_connector *connector; struct drm_connector_list_iter iter; struct drm_dp_mst_topology_mgr *mgr; - int ret; - bool need_hotplug = false; drm_connector_list_iter_begin(dev, &iter); drm_for_each_connector_iter(connector, &iter) { + + if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) + continue; + aconnector = to_amdgpu_dm_connector(connector); if (aconnector->dc_link->type != dc_connection_mst_branch || - aconnector->mst_port) + aconnector->mst_root) continue; mgr = &aconnector->mst_mgr; @@ -2203,18 +2864,23 @@ static void s3_handle_mst(struct drm_device *dev, bool suspend) if (suspend) { drm_dp_mst_topology_mgr_suspend(mgr); } else { - ret = drm_dp_mst_topology_mgr_resume(mgr, true); - if (ret < 0) { - dm_helpers_dp_mst_stop_top_mgr(aconnector->dc_link->ctx, - aconnector->dc_link); - need_hotplug = true; - } + /* if extended timeout is supported in hardware, + * default to LTTPR timeout (3.2ms) first as a W/A for DP link layer + * CTS 4.2.1.1 regression introduced by CTS specs requirement update. + */ + try_to_configure_aux_timeout(aconnector->dc_link->ddc, LINK_AUX_DEFAULT_LTTPR_TIMEOUT_PERIOD); + if (!dp_is_lttpr_present(aconnector->dc_link)) + try_to_configure_aux_timeout(aconnector->dc_link->ddc, LINK_AUX_DEFAULT_TIMEOUT_PERIOD); + + /* TODO: move resume_mst_branch_status() into drm mst resume again + * once topology probing work is pulled out from mst resume into mst + * resume 2nd step. mst resume 2nd step should be called after old + * state getting restored (i.e. drm_atomic_helper_resume()). + */ + resume_mst_branch_status(mgr); } } drm_connector_list_iter_end(&iter); - - if (need_hotplug) - drm_kms_helper_hotplug_event(dev); } static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev) @@ -2251,7 +2917,7 @@ static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev) * therefore, this function apply to navi10/12/14 but not Renoir * * */ - switch (adev->ip_versions[DCE_HWIP][0]) { + switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { case IP_VERSION(2, 0, 2): case IP_VERSION(2, 0, 0): break; @@ -2261,16 +2927,43 @@ static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev) ret = amdgpu_dpm_write_watermarks_table(adev); if (ret) { - DRM_ERROR("Failed to update WMTABLE!\n"); + drm_err(adev_to_drm(adev), "Failed to update WMTABLE!\n"); return ret; } return 0; } +static int dm_oem_i2c_hw_init(struct amdgpu_device *adev) +{ + struct amdgpu_display_manager *dm = &adev->dm; + struct amdgpu_i2c_adapter *oem_i2c; + struct ddc_service *oem_ddc_service; + int r; + + oem_ddc_service = dc_get_oem_i2c_device(adev->dm.dc); + if (oem_ddc_service) { + oem_i2c = create_i2c(oem_ddc_service, true); + if (!oem_i2c) { + drm_info(adev_to_drm(adev), "Failed to create oem i2c adapter data\n"); + return -ENOMEM; + } + + r = devm_i2c_add_adapter(adev->dev, &oem_i2c->base); + if (r) { + drm_info(adev_to_drm(adev), "Failed to register oem i2c\n"); + kfree(oem_i2c); + return r; + } + dm->oem_i2c = oem_i2c; + } + + return 0; +} + /** * dm_hw_init() - Initialize DC device - * @handle: The base driver device containing the amdgpu_dm device. + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Initialize the &struct amdgpu_display_manager device. This involves calling * the initializers of each DM component, then populating the struct with them. @@ -2288,27 +2981,35 @@ static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev) * - Vblank support * - Debug FS entries, if enabled */ -static int dm_hw_init(void *handle) +static int dm_hw_init(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; + int r; + /* Create DAL display manager */ - amdgpu_dm_init(adev); + r = amdgpu_dm_init(adev); + if (r) + return r; amdgpu_dm_hpd_init(adev); + r = dm_oem_i2c_hw_init(adev); + if (r) + drm_info(adev_to_drm(adev), "Failed to add OEM i2c bus\n"); + return 0; } /** * dm_hw_fini() - Teardown DC device - * @handle: The base driver device containing the amdgpu_dm device. + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Teardown components within &struct amdgpu_display_manager that require * cleanup. This involves cleaning up the DRM device, DC, and any modules that * were loaded. Also flush IRQ workqueues and disable them. */ -static int dm_hw_fini(void *handle) +static int dm_hw_fini(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; amdgpu_dm_hpd_fini(adev); @@ -2333,40 +3034,51 @@ static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev, if (acrtc && state->stream_status[i].plane_count != 0) { irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst; rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY; - DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n", - acrtc->crtc_id, enable ? "en" : "dis", rc); if (rc) - DRM_WARN("Failed to %s pflip interrupts\n", + drm_warn(adev_to_drm(adev), "Failed to %s pflip interrupts\n", enable ? "enable" : "disable"); - if (enable) { - rc = dm_enable_vblank(&acrtc->base); + if (dc_supports_vrr(adev->dm.dc->ctx->dce_version)) { + if (enable) { + if (amdgpu_dm_crtc_vrr_active( + to_dm_crtc_state(acrtc->base.state))) + rc = amdgpu_dm_crtc_set_vupdate_irq( + &acrtc->base, true); + } else + rc = amdgpu_dm_crtc_set_vupdate_irq( + &acrtc->base, false); + if (rc) - DRM_WARN("Failed to enable vblank interrupts\n"); - } else { - dm_disable_vblank(&acrtc->base); + drm_warn(adev_to_drm(adev), "Failed to %sable vupdate interrupt\n", + enable ? "en" : "dis"); } + irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst; + /* During gpu-reset we disable and then enable vblank irq, so + * don't use amdgpu_irq_get/put() to avoid refcount change. + */ + if (!dc_interrupt_set(adev->dm.dc, irq_source, enable)) + drm_warn(adev_to_drm(adev), "Failed to %sable vblank interrupt\n", enable ? "en" : "dis"); } } } +DEFINE_FREE(state_release, struct dc_state *, if (_T) dc_state_release(_T)) + static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc) { - struct dc_state *context = NULL; - enum dc_status res = DC_ERROR_UNEXPECTED; + struct dc_state *context __free(state_release) = NULL; int i; struct dc_stream_state *del_streams[MAX_PIPES]; int del_streams_count = 0; + struct dc_commit_streams_params params = {}; memset(del_streams, 0, sizeof(del_streams)); - context = dc_create_state(dc); + context = dc_state_create_current_copy(dc); if (context == NULL) - goto context_alloc_fail; - - dc_resource_state_copy_construct_current(dc, context); + return DC_ERROR_UNEXPECTED; /* First remove from context all streams */ for (i = 0; i < context->stream_count; i++) { @@ -2377,23 +3089,20 @@ static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc) /* Remove all planes for removed streams and then remove the streams */ for (i = 0; i < del_streams_count; i++) { - if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) { - res = DC_FAIL_DETACH_SURFACES; - goto fail; - } + enum dc_status res; + + if (!dc_state_rem_all_planes_for_stream(dc, del_streams[i], context)) + return DC_FAIL_DETACH_SURFACES; - res = dc_remove_stream_from_ctx(dc, context, del_streams[i]); + res = dc_state_remove_stream(dc, context, del_streams[i]); if (res != DC_OK) - goto fail; + return res; } - res = dc_commit_state(dc, context); - -fail: - dc_release_state(context); + params.streams = context->streams; + params.stream_count = context->stream_count; -context_alloc_fail: - return res; + return dc_commit_streams(dc, ¶ms); } static void hpd_rx_irq_work_suspend(struct amdgpu_display_manager *dm) @@ -2406,32 +3115,108 @@ static void hpd_rx_irq_work_suspend(struct amdgpu_display_manager *dm) } } -static int dm_suspend(void *handle) +static int dm_cache_state(struct amdgpu_device *adev) +{ + int r; + + adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev)); + if (IS_ERR(adev->dm.cached_state)) { + r = PTR_ERR(adev->dm.cached_state); + adev->dm.cached_state = NULL; + } + + return adev->dm.cached_state ? 0 : r; +} + +static void dm_destroy_cached_state(struct amdgpu_device *adev) { - struct amdgpu_device *adev = handle; struct amdgpu_display_manager *dm = &adev->dm; - int ret = 0; + struct drm_device *ddev = adev_to_drm(adev); + struct dm_plane_state *dm_new_plane_state; + struct drm_plane_state *new_plane_state; + struct dm_crtc_state *dm_new_crtc_state; + struct drm_crtc_state *new_crtc_state; + struct drm_plane *plane; + struct drm_crtc *crtc; + int i; + + if (!dm->cached_state) + return; + + /* Force mode set in atomic commit */ + for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) { + new_crtc_state->active_changed = true; + dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); + reset_freesync_config_for_crtc(dm_new_crtc_state); + } + + /* + * atomic_check is expected to create the dc states. We need to release + * them here, since they were duplicated as part of the suspend + * procedure. + */ + for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) { + dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); + if (dm_new_crtc_state->stream) { + WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1); + dc_stream_release(dm_new_crtc_state->stream); + dm_new_crtc_state->stream = NULL; + } + dm_new_crtc_state->base.color_mgmt_changed = true; + } + + for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) { + dm_new_plane_state = to_dm_plane_state(new_plane_state); + if (dm_new_plane_state->dc_state) { + WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1); + dc_plane_state_release(dm_new_plane_state->dc_state); + dm_new_plane_state->dc_state = NULL; + } + } + + drm_atomic_helper_resume(ddev, dm->cached_state); + + dm->cached_state = NULL; +} + +static int dm_suspend(struct amdgpu_ip_block *ip_block) +{ + struct amdgpu_device *adev = ip_block->adev; + struct amdgpu_display_manager *dm = &adev->dm; if (amdgpu_in_reset(adev)) { + enum dc_status res; + mutex_lock(&dm->dc_lock); dc_allow_idle_optimizations(adev->dm.dc, false); - dm->cached_dc_state = dc_copy_state(dm->dc->current_state); + dm->cached_dc_state = dc_state_create_copy(dm->dc->current_state); - dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false); + if (dm->cached_dc_state) + dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false); - amdgpu_dm_commit_zero_streams(dm->dc); + res = amdgpu_dm_commit_zero_streams(dm->dc); + if (res != DC_OK) { + drm_err(adev_to_drm(adev), "Failed to commit zero streams: %d\n", res); + return -EINVAL; + } amdgpu_dm_irq_suspend(adev); hpd_rx_irq_work_suspend(dm); - return ret; + return 0; } - WARN_ON(adev->dm.cached_state); - adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev)); + if (!adev->dm.cached_state) { + int r = dm_cache_state(adev); + + if (r) + return r; + } + + s3_handle_hdmi_cec(adev_to_drm(adev), true); s3_handle_mst(adev_to_drm(adev), true); @@ -2441,14 +3226,19 @@ static int dm_suspend(void *handle) dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3); + if (dm->dc->caps.ips_support && adev->in_s0ix) + dc_allow_idle_optimizations(dm->dc, true); + + dc_dmub_srv_set_power_state(dm->dc->ctx->dmub_srv, DC_ACPI_CM_POWER_STATE_D3); + return 0; } -struct amdgpu_dm_connector * +struct drm_connector * amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state, struct drm_crtc *crtc) { - uint32_t i; + u32 i; struct drm_connector_state *new_con_state; struct drm_connector *connector; struct drm_crtc *crtc_from_state; @@ -2457,7 +3247,7 @@ amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state, crtc_from_state = new_con_state->crtc; if (crtc_from_state == crtc) - return to_amdgpu_dm_connector(connector); + return connector; } return NULL; @@ -2469,6 +3259,7 @@ static void emulated_link_detect(struct dc_link *link) struct display_sink_capability sink_caps = { 0 }; enum dc_edid_status edid_status; struct dc_context *dc_ctx = link->ctx; + struct drm_device *dev = adev_to_drm(dc_ctx->driver_context); struct dc_sink *sink = NULL; struct dc_sink *prev_sink = NULL; @@ -2518,7 +3309,7 @@ static void emulated_link_detect(struct dc_link *link) } default: - DC_ERROR("Invalid connector type! signal:%d\n", + drm_err(dev, "Invalid connector type! signal:%d\n", link->connector_signal); return; } @@ -2528,7 +3319,7 @@ static void emulated_link_detect(struct dc_link *link) sink = dc_sink_create(&sink_init_data); if (!sink) { - DC_ERROR("Failed to create sink!\n"); + drm_err(dev, "Failed to create sink!\n"); return; } @@ -2541,7 +3332,7 @@ static void emulated_link_detect(struct dc_link *link) sink); if (edid_status != EDID_OK) - DC_ERROR("Failed to read EDID"); + drm_err(dev, "Failed to read EDID\n"); } @@ -2554,55 +3345,131 @@ static void dm_gpureset_commit_state(struct dc_state *dc_state, struct dc_scaling_info scaling_infos[MAX_SURFACES]; struct dc_flip_addrs flip_addrs[MAX_SURFACES]; struct dc_stream_update stream_update; - } * bundle; + } *bundle __free(kfree); int k, m; bundle = kzalloc(sizeof(*bundle), GFP_KERNEL); if (!bundle) { - dm_error("Failed to allocate update bundle\n"); - goto cleanup; + drm_err(dm->ddev, "Failed to allocate update bundle\n"); + return; } for (k = 0; k < dc_state->stream_count; k++) { bundle->stream_update.stream = dc_state->streams[k]; - for (m = 0; m < dc_state->stream_status->plane_count; m++) { + for (m = 0; m < dc_state->stream_status[k].plane_count; m++) { bundle->surface_updates[m].surface = - dc_state->stream_status->plane_states[m]; + dc_state->stream_status[k].plane_states[m]; bundle->surface_updates[m].surface->force_full_update = true; } - dc_commit_updates_for_stream( - dm->dc, bundle->surface_updates, - dc_state->stream_status->plane_count, - dc_state->streams[k], &bundle->stream_update, dc_state); + + update_planes_and_stream_adapter(dm->dc, + UPDATE_TYPE_FULL, + dc_state->stream_status[k].plane_count, + dc_state->streams[k], + &bundle->stream_update, + bundle->surface_updates); } +} -cleanup: - kfree(bundle); +static void apply_delay_after_dpcd_poweroff(struct amdgpu_device *adev, + struct dc_sink *sink) +{ + struct dc_panel_patch *ppatch = NULL; - return; + if (!sink) + return; + + ppatch = &sink->edid_caps.panel_patch; + if (ppatch->wait_after_dpcd_poweroff_ms) { + msleep(ppatch->wait_after_dpcd_poweroff_ms); + drm_dbg_driver(adev_to_drm(adev), + "%s: adding a %ds delay as w/a for panel\n", + __func__, + ppatch->wait_after_dpcd_poweroff_ms / 1000); + } } -static int dm_resume(void *handle) +/** + * amdgpu_dm_dump_links_and_sinks - Debug dump of all DC links and their sinks + * @adev: amdgpu device pointer + * + * Iterates through all DC links and dumps information about local and remote + * (MST) sinks. Should be called after connector detection is complete to see + * the final state of all links. + */ +static void amdgpu_dm_dump_links_and_sinks(struct amdgpu_device *adev) { - struct amdgpu_device *adev = handle; + struct dc *dc = adev->dm.dc; + struct drm_device *dev = adev_to_drm(adev); + int li; + + if (!dc) + return; + + for (li = 0; li < dc->link_count; li++) { + struct dc_link *l = dc->links[li]; + const char *name = NULL; + int rs; + + if (!l) + continue; + if (l->local_sink && l->local_sink->edid_caps.display_name[0]) + name = l->local_sink->edid_caps.display_name; + else + name = "n/a"; + + drm_dbg_kms(dev, + "LINK_DUMP[%d]: local_sink=%p type=%d sink_signal=%d sink_count=%u edid_name=%s mst_capable=%d mst_alloc_streams=%d\n", + li, + l->local_sink, + l->type, + l->local_sink ? l->local_sink->sink_signal : SIGNAL_TYPE_NONE, + l->sink_count, + name, + l->dpcd_caps.is_mst_capable, + l->mst_stream_alloc_table.stream_count); + + /* Dump remote (MST) sinks if any */ + for (rs = 0; rs < l->sink_count; rs++) { + struct dc_sink *rsink = l->remote_sinks[rs]; + const char *rname = NULL; + + if (!rsink) + continue; + if (rsink->edid_caps.display_name[0]) + rname = rsink->edid_caps.display_name; + else + rname = "n/a"; + drm_dbg_kms(dev, + " REMOTE_SINK[%d:%d]: sink=%p signal=%d edid_name=%s\n", + li, rs, + rsink, + rsink->sink_signal, + rname); + } + } +} + +static int dm_resume(struct amdgpu_ip_block *ip_block) +{ + struct amdgpu_device *adev = ip_block->adev; struct drm_device *ddev = adev_to_drm(adev); struct amdgpu_display_manager *dm = &adev->dm; struct amdgpu_dm_connector *aconnector; struct drm_connector *connector; struct drm_connector_list_iter iter; - struct drm_crtc *crtc; - struct drm_crtc_state *new_crtc_state; - struct dm_crtc_state *dm_new_crtc_state; - struct drm_plane *plane; - struct drm_plane_state *new_plane_state; - struct dm_plane_state *dm_new_plane_state; struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state); enum dc_connection_type new_connection_type = dc_connection_none; struct dc_state *dc_state; int i, r, j; + struct dc_commit_streams_params commit_params = {}; + + if (dm->dc->caps.ips_support) { + dc_dmub_srv_apply_idle_power_optimizations(dm->dc, false); + } if (amdgpu_in_reset(adev)) { dc_state = dm->cached_dc_state; @@ -2625,10 +3492,14 @@ static int dm_resume(void *handle) link_enc_cfg_copy(adev->dm.dc->current_state, dc_state); r = dm_dmub_hw_init(adev); - if (r) - DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r); + if (r) { + drm_err(adev_to_drm(adev), "DMUB interface failed to initialize: status=%d\n", r); + return r; + } + dc_dmub_srv_set_power_state(dm->dc->ctx->dmub_srv, DC_ACPI_CM_POWER_STATE_D0); dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0); + dc_resume(dm->dc); amdgpu_dm_irq_resume_early(adev); @@ -2646,26 +3517,34 @@ static int dm_resume(void *handle) dc_enable_dmub_outbox(adev->dm.dc); } - WARN_ON(!dc_commit_state(dm->dc, dc_state)); + commit_params.streams = dc_state->streams; + commit_params.stream_count = dc_state->stream_count; + dc_exit_ips_for_hw_access(dm->dc); + WARN_ON(!dc_commit_streams(dm->dc, &commit_params)); dm_gpureset_commit_state(dm->cached_dc_state, dm); dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true); - dc_release_state(dm->cached_dc_state); + dc_state_release(dm->cached_dc_state); dm->cached_dc_state = NULL; amdgpu_dm_irq_resume_late(adev); mutex_unlock(&dm->dc_lock); + /* set the backlight after a reset */ + for (i = 0; i < dm->num_of_edps; i++) { + if (dm->backlight_dev[i]) + amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]); + } + return 0; } /* Recreate dc_state - DC invalidates it when setting power state to S3. */ - dc_release_state(dm_state->context); - dm_state->context = dc_create_state(dm->dc); + dc_state_release(dm_state->context); + dm_state->context = dc_state_create(dm->dc, NULL); /* TODO: Remove dc_state->dccg, use dc->dccg directly. */ - dc_resource_state_construct(dm->dc, dm_state->context); /* Before powering on DC we need to re-initialize DMUB. */ dm_dmub_hw_resume(adev); @@ -2677,6 +3556,7 @@ static int dm_resume(void *handle) } /* power on hardware */ + dc_dmub_srv_set_power_state(dm->dc->ctx->dmub_srv, DC_ACPI_CM_POWER_STATE_D0); dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0); /* program HPD filter */ @@ -2688,32 +3568,45 @@ static int dm_resume(void *handle) */ amdgpu_dm_irq_resume_early(adev); + s3_handle_hdmi_cec(ddev, false); + /* On resume we need to rewrite the MSTM control bits to enable MST*/ s3_handle_mst(ddev, false); /* Do detection*/ drm_connector_list_iter_begin(ddev, &iter); drm_for_each_connector_iter(connector, &iter) { + bool ret; + + if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) + continue; + aconnector = to_amdgpu_dm_connector(connector); + if (!aconnector->dc_link) + continue; + /* - * this is the case when traversing through already created + * this is the case when traversing through already created end sink * MST connectors, should be skipped */ - if (aconnector->dc_link && - aconnector->dc_link->type == dc_connection_mst_branch) + if (aconnector->mst_root) continue; - mutex_lock(&aconnector->hpd_lock); - if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type)) - DRM_ERROR("KMS: Failed to detect connector\n"); + guard(mutex)(&aconnector->hpd_lock); + if (!dc_link_detect_connection_type(aconnector->dc_link, &new_connection_type)) + drm_err(adev_to_drm(adev), "KMS: Failed to detect connector\n"); if (aconnector->base.force && new_connection_type == dc_connection_none) { emulated_link_detect(aconnector->dc_link); } else { - mutex_lock(&dm->dc_lock); - dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD); - mutex_unlock(&dm->dc_lock); + guard(mutex)(&dm->dc_lock); + dc_exit_ips_for_hw_access(dm->dc); + ret = dc_link_detect(aconnector->dc_link, DETECT_REASON_RESUMEFROMS3S4); + if (ret) { + /* w/a delay for certain panels */ + apply_delay_after_dpcd_poweroff(adev, aconnector->dc_sink); + } } if (aconnector->fake_enable && aconnector->dc_link->local_sink) @@ -2723,45 +3616,47 @@ static int dm_resume(void *handle) dc_sink_release(aconnector->dc_sink); aconnector->dc_sink = NULL; amdgpu_dm_update_connector_after_detect(aconnector); - mutex_unlock(&aconnector->hpd_lock); } drm_connector_list_iter_end(&iter); - /* Force mode set in atomic commit */ - for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) - new_crtc_state->active_changed = true; + dm_destroy_cached_state(adev); - /* - * atomic_check is expected to create the dc states. We need to release - * them here, since they were duplicated as part of the suspend - * procedure. - */ - for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) { - dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); - if (dm_new_crtc_state->stream) { - WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1); - dc_stream_release(dm_new_crtc_state->stream); - dm_new_crtc_state->stream = NULL; - } - } + /* Do mst topology probing after resuming cached state*/ + drm_connector_list_iter_begin(ddev, &iter); + drm_for_each_connector_iter(connector, &iter) { + bool init = false; - for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) { - dm_new_plane_state = to_dm_plane_state(new_plane_state); - if (dm_new_plane_state->dc_state) { - WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1); - dc_plane_state_release(dm_new_plane_state->dc_state); - dm_new_plane_state->dc_state = NULL; + if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) + continue; + + aconnector = to_amdgpu_dm_connector(connector); + if (aconnector->dc_link->type != dc_connection_mst_branch || + aconnector->mst_root) + continue; + + scoped_guard(mutex, &aconnector->mst_mgr.lock) { + init = !aconnector->mst_mgr.mst_primary; } + if (init) + dm_helpers_dp_mst_start_top_mgr(aconnector->dc_link->ctx, + aconnector->dc_link, false); + else + drm_dp_mst_topology_queue_probe(&aconnector->mst_mgr); } + drm_connector_list_iter_end(&iter); - drm_atomic_helper_resume(ddev, dm->cached_state); - - dm->cached_state = NULL; + /* Debug dump: list all DC links and their associated sinks after detection + * is complete for all connectors. This provides a comprehensive view of the + * final state without repeating the dump for each connector. + */ + amdgpu_dm_dump_links_and_sinks(adev); amdgpu_dm_irq_resume_late(adev); amdgpu_dm_smu_write_watermarks_table(adev); + drm_kms_helper_hotplug_event(ddev); + return 0; } @@ -2794,8 +3689,7 @@ static const struct amd_ip_funcs amdgpu_dm_funcs = { .set_powergating_state = dm_set_powergating_state, }; -const struct amdgpu_ip_block_version dm_ip_block = -{ +const struct amdgpu_ip_block_version dm_ip_block = { .type = AMD_IP_BLOCK_TYPE_DCE, .major = 1, .minor = 0, @@ -2812,68 +3706,92 @@ const struct amdgpu_ip_block_version dm_ip_block = static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = { .fb_create = amdgpu_display_user_framebuffer_create, - .get_format_info = amd_get_format_info, - .output_poll_changed = drm_fb_helper_output_poll_changed, + .get_format_info = amdgpu_dm_plane_get_format_info, .atomic_check = amdgpu_dm_atomic_check, .atomic_commit = drm_atomic_helper_commit, }; static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = { .atomic_commit_tail = amdgpu_dm_atomic_commit_tail, - .atomic_commit_setup = drm_dp_mst_atomic_setup_commit, + .atomic_commit_setup = amdgpu_dm_atomic_setup_commit, }; static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector) { + const struct drm_panel_backlight_quirk *panel_backlight_quirk; struct amdgpu_dm_backlight_caps *caps; - struct amdgpu_display_manager *dm; struct drm_connector *conn_base; struct amdgpu_device *adev; - struct dc_link *link = NULL; struct drm_luminance_range_info *luminance_range; - int i; - - if (!aconnector || !aconnector->dc_link) - return; + struct drm_device *drm; - link = aconnector->dc_link; - if (link->connector_signal != SIGNAL_TYPE_EDP) + if (aconnector->bl_idx == -1 || + aconnector->dc_link->connector_signal != SIGNAL_TYPE_EDP) return; conn_base = &aconnector->base; - adev = drm_to_adev(conn_base->dev); - dm = &adev->dm; - for (i = 0; i < dm->num_of_edps; i++) { - if (link == dm->backlight_link[i]) - break; - } - if (i >= dm->num_of_edps) - return; - caps = &dm->backlight_caps[i]; + drm = conn_base->dev; + adev = drm_to_adev(drm); + + caps = &adev->dm.backlight_caps[aconnector->bl_idx]; caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps; caps->aux_support = false; - if (caps->ext_caps->bits.oled == 1 /*|| - caps->ext_caps->bits.sdr_aux_backlight_control == 1 || - caps->ext_caps->bits.hdr_aux_backlight_control == 1*/) + if (caps->ext_caps->bits.oled == 1 + /* + * || + * caps->ext_caps->bits.sdr_aux_backlight_control == 1 || + * caps->ext_caps->bits.hdr_aux_backlight_control == 1 + */) caps->aux_support = true; if (amdgpu_backlight == 0) caps->aux_support = false; else if (amdgpu_backlight == 1) caps->aux_support = true; + if (caps->aux_support) + aconnector->dc_link->backlight_control_type = BACKLIGHT_CONTROL_AMD_AUX; luminance_range = &conn_base->display_info.luminance_range; - caps->aux_min_input_signal = luminance_range->min_luminance; - caps->aux_max_input_signal = luminance_range->max_luminance; + + if (luminance_range->max_luminance) + caps->aux_max_input_signal = luminance_range->max_luminance; + else + caps->aux_max_input_signal = 512; + + if (luminance_range->min_luminance) + caps->aux_min_input_signal = luminance_range->min_luminance; + else + caps->aux_min_input_signal = 1; + + panel_backlight_quirk = + drm_get_panel_backlight_quirk(aconnector->drm_edid); + if (!IS_ERR_OR_NULL(panel_backlight_quirk)) { + if (panel_backlight_quirk->min_brightness) { + caps->min_input_signal = + panel_backlight_quirk->min_brightness - 1; + drm_info(drm, + "Applying panel backlight quirk, min_brightness: %d\n", + caps->min_input_signal); + } + if (panel_backlight_quirk->brightness_mask) { + drm_info(drm, + "Applying panel backlight quirk, brightness_mask: 0x%X\n", + panel_backlight_quirk->brightness_mask); + caps->brightness_mask = + panel_backlight_quirk->brightness_mask; + } + } } +DEFINE_FREE(sink_release, struct dc_sink *, if (_T) dc_sink_release(_T)) + void amdgpu_dm_update_connector_after_detect( struct amdgpu_dm_connector *aconnector) { struct drm_connector *connector = &aconnector->base; + struct dc_sink *sink __free(sink_release) = NULL; struct drm_device *dev = connector->dev; - struct dc_sink *sink; /* MST handled by drm_mst framework */ if (aconnector->mst_mgr.mst_state == true) @@ -2895,7 +3813,7 @@ void amdgpu_dm_update_connector_after_detect( * For S3 resume with headless use eml_sink to fake stream * because on resume connector->sink is set to NULL */ - mutex_lock(&dev->mode_config.mutex); + guard(mutex)(&dev->mode_config.mutex); if (sink) { if (aconnector->dc_sink) { @@ -2911,7 +3829,7 @@ void amdgpu_dm_update_connector_after_detect( aconnector->dc_sink = sink; dc_sink_retain(aconnector->dc_sink); amdgpu_dm_update_freesync_caps(connector, - aconnector->edid); + aconnector->drm_edid); } else { amdgpu_dm_update_freesync_caps(connector, NULL); if (!aconnector->dc_sink) { @@ -2920,10 +3838,6 @@ void amdgpu_dm_update_connector_after_detect( } } - mutex_unlock(&dev->mode_config.mutex); - - if (sink) - dc_sink_release(sink); return; } @@ -2931,27 +3845,25 @@ void amdgpu_dm_update_connector_after_detect( * TODO: temporary guard to look for proper fix * if this sink is MST sink, we should not do anything */ - if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { - dc_sink_release(sink); + if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) return; - } if (aconnector->dc_sink == sink) { /* * We got a DP short pulse (Link Loss, DP CTS, etc...). * Do nothing!! */ - DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n", - aconnector->connector_id); - if (sink) - dc_sink_release(sink); + drm_dbg_kms(dev, "DCHPD: connector_id=%d: dc_sink didn't change.\n", + aconnector->connector_id); return; } - DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n", - aconnector->connector_id, aconnector->dc_sink, sink); + drm_dbg_kms(dev, "DCHPD: connector_id=%d: Old sink=%p New sink=%p\n", + aconnector->connector_id, aconnector->dc_sink, sink); - mutex_lock(&dev->mode_config.mutex); + /* When polling, DRM has already locked the mutex for us. */ + if (!drm_kms_helper_is_poll_worker()) + mutex_lock(&dev->mode_config.mutex); /* * 1. Update status of the drm connector @@ -2970,44 +3882,145 @@ void amdgpu_dm_update_connector_after_detect( aconnector->dc_sink = sink; dc_sink_retain(aconnector->dc_sink); if (sink->dc_edid.length == 0) { - aconnector->edid = NULL; + aconnector->drm_edid = NULL; + hdmi_cec_unset_edid(aconnector); if (aconnector->dc_link->aux_mode) { - drm_dp_cec_unset_edid( - &aconnector->dm_dp_aux.aux); + drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux); } } else { - aconnector->edid = - (struct edid *)sink->dc_edid.raw_edid; + const struct edid *edid = (const struct edid *)sink->dc_edid.raw_edid; + aconnector->drm_edid = drm_edid_alloc(edid, sink->dc_edid.length); + drm_edid_connector_update(connector, aconnector->drm_edid); + + hdmi_cec_set_edid(aconnector); if (aconnector->dc_link->aux_mode) - drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux, - aconnector->edid); + drm_dp_cec_attach(&aconnector->dm_dp_aux.aux, + connector->display_info.source_physical_address); + } + + if (!aconnector->timing_requested) { + aconnector->timing_requested = + kzalloc(sizeof(struct dc_crtc_timing), GFP_KERNEL); + if (!aconnector->timing_requested) + drm_err(dev, + "failed to create aconnector->requested_timing\n"); } - drm_connector_update_edid_property(connector, aconnector->edid); - amdgpu_dm_update_freesync_caps(connector, aconnector->edid); + amdgpu_dm_update_freesync_caps(connector, aconnector->drm_edid); update_connector_ext_caps(aconnector); } else { + hdmi_cec_unset_edid(aconnector); drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux); amdgpu_dm_update_freesync_caps(connector, NULL); - drm_connector_update_edid_property(connector, NULL); aconnector->num_modes = 0; dc_sink_release(aconnector->dc_sink); aconnector->dc_sink = NULL; - aconnector->edid = NULL; -#ifdef CONFIG_DRM_AMD_DC_HDCP + drm_edid_free(aconnector->drm_edid); + aconnector->drm_edid = NULL; + kfree(aconnector->timing_requested); + aconnector->timing_requested = NULL; /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */ if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED; -#endif } - mutex_unlock(&dev->mode_config.mutex); - update_subconnector_property(aconnector); - if (sink) - dc_sink_release(sink); + /* When polling, the mutex will be unlocked for us by DRM. */ + if (!drm_kms_helper_is_poll_worker()) + mutex_unlock(&dev->mode_config.mutex); +} + +static bool are_sinks_equal(const struct dc_sink *sink1, const struct dc_sink *sink2) +{ + if (!sink1 || !sink2) + return false; + if (sink1->sink_signal != sink2->sink_signal) + return false; + + if (sink1->dc_edid.length != sink2->dc_edid.length) + return false; + + if (memcmp(sink1->dc_edid.raw_edid, sink2->dc_edid.raw_edid, + sink1->dc_edid.length) != 0) + return false; + return true; +} + + +/** + * DOC: hdmi_hpd_debounce_work + * + * HDMI HPD debounce delay in milliseconds. When an HDMI display toggles HPD + * (such as during power save transitions), this delay determines how long to + * wait before processing the HPD event. This allows distinguishing between a + * physical unplug (>hdmi_hpd_debounce_delay) + * and a spontaneous RX HPD toggle (<hdmi_hpd_debounce_delay). + * + * If the toggle is less than this delay, the driver compares sink capabilities + * and permits a hotplug event if they changed. + * + * The default value of 1500ms was chosen based on experimental testing with + * various monitors that exhibit spontaneous HPD toggling behavior. + */ +static void hdmi_hpd_debounce_work(struct work_struct *work) +{ + struct amdgpu_dm_connector *aconnector = + container_of(to_delayed_work(work), struct amdgpu_dm_connector, + hdmi_hpd_debounce_work); + struct drm_connector *connector = &aconnector->base; + struct drm_device *dev = connector->dev; + struct amdgpu_device *adev = drm_to_adev(dev); + struct dc *dc = aconnector->dc_link->ctx->dc; + bool fake_reconnect = false; + bool reallow_idle = false; + bool ret = false; + guard(mutex)(&aconnector->hpd_lock); + + /* Re-detect the display */ + scoped_guard(mutex, &adev->dm.dc_lock) { + if (dc->caps.ips_support && dc->ctx->dmub_srv->idle_allowed) { + dc_allow_idle_optimizations(dc, false); + reallow_idle = true; + } + ret = dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD); + } + + if (ret) { + /* Apply workaround delay for certain panels */ + apply_delay_after_dpcd_poweroff(adev, aconnector->dc_sink); + /* Compare sinks to determine if this was a spontaneous HPD toggle */ + if (are_sinks_equal(aconnector->dc_link->local_sink, aconnector->hdmi_prev_sink)) { + /* + * Sinks match - this was a spontaneous HDMI HPD toggle. + */ + drm_dbg_kms(dev, "HDMI HPD: Sink unchanged after debounce, internal re-enable\n"); + fake_reconnect = true; + } + + /* Update connector state */ + amdgpu_dm_update_connector_after_detect(aconnector); + + drm_modeset_lock_all(dev); + dm_restore_drm_connector_state(dev, connector); + drm_modeset_unlock_all(dev); + + /* Only notify OS if sink actually changed */ + if (!fake_reconnect && aconnector->base.force == DRM_FORCE_UNSPECIFIED) + drm_kms_helper_hotplug_event(dev); + } + + /* Release the cached sink reference */ + if (aconnector->hdmi_prev_sink) { + dc_sink_release(aconnector->hdmi_prev_sink); + aconnector->hdmi_prev_sink = NULL; + } + + scoped_guard(mutex, &adev->dm.dc_lock) { + if (reallow_idle && dc->caps.ips_support) + dc_allow_idle_optimizations(dc, true); + } } static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector) @@ -3016,10 +4029,10 @@ static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector) struct drm_device *dev = connector->dev; enum dc_connection_type new_connection_type = dc_connection_none; struct amdgpu_device *adev = drm_to_adev(dev); -#ifdef CONFIG_DRM_AMD_DC_HDCP struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state); -#endif + struct dc *dc = aconnector->dc_link->ctx->dc; bool ret = false; + bool debounce_required = false; if (adev->dm.disable_hpd_irq) return; @@ -3028,19 +4041,27 @@ static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector) * In case of failure or MST no need to update connector status or notify the OS * since (for MST case) MST does this in its own context. */ - mutex_lock(&aconnector->hpd_lock); + guard(mutex)(&aconnector->hpd_lock); -#ifdef CONFIG_DRM_AMD_DC_HDCP if (adev->dm.hdcp_workqueue) { hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index); dm_con_state->update_hdcp = true; } -#endif if (aconnector->fake_enable) aconnector->fake_enable = false; - if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type)) - DRM_ERROR("KMS: Failed to detect connector\n"); + aconnector->timing_changed = false; + + if (!dc_link_detect_connection_type(aconnector->dc_link, &new_connection_type)) + drm_err(adev_to_drm(adev), "KMS: Failed to detect connector\n"); + + /* + * Check for HDMI disconnect with debounce enabled. + */ + debounce_required = (aconnector->hdmi_hpd_debounce_delay_ms > 0 && + dc_is_hdmi_signal(aconnector->dc_link->connector_signal) && + new_connection_type == dc_connection_none && + aconnector->dc_link->local_sink != NULL); if (aconnector->base.force && new_connection_type == dc_connection_none) { emulated_link_detect(aconnector->dc_link); @@ -3051,11 +4072,41 @@ static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector) if (aconnector->base.force == DRM_FORCE_UNSPECIFIED) drm_kms_helper_connector_hotplug_event(connector); + } else if (debounce_required) { + /* + * HDMI disconnect detected - schedule delayed work instead of + * processing immediately. This allows us to coalesce spurious + * HDMI signals from physical unplugs. + */ + drm_dbg_kms(dev, "HDMI HPD: Disconnect detected, scheduling debounce work (%u ms)\n", + aconnector->hdmi_hpd_debounce_delay_ms); + + /* Cache the current sink for later comparison */ + if (aconnector->hdmi_prev_sink) + dc_sink_release(aconnector->hdmi_prev_sink); + aconnector->hdmi_prev_sink = aconnector->dc_link->local_sink; + if (aconnector->hdmi_prev_sink) + dc_sink_retain(aconnector->hdmi_prev_sink); + + /* Schedule delayed detection. */ + if (mod_delayed_work(system_wq, + &aconnector->hdmi_hpd_debounce_work, + msecs_to_jiffies(aconnector->hdmi_hpd_debounce_delay_ms))) + drm_dbg_kms(dev, "HDMI HPD: Re-scheduled debounce work\n"); + } else { - mutex_lock(&adev->dm.dc_lock); - ret = dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD); - mutex_unlock(&adev->dm.dc_lock); + + /* If the aconnector->hdmi_hpd_debounce_work is scheduled, exit early */ + if (delayed_work_pending(&aconnector->hdmi_hpd_debounce_work)) + return; + + scoped_guard(mutex, &adev->dm.dc_lock) { + dc_exit_ips_for_hw_access(dc); + ret = dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD); + } if (ret) { + /* w/a delay for certain panels */ + apply_delay_after_dpcd_poweroff(adev, aconnector->dc_sink); amdgpu_dm_update_connector_after_detect(aconnector); drm_modeset_lock_all(dev); @@ -3066,8 +4117,6 @@ static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector) drm_kms_helper_connector_hotplug_event(connector); } } - mutex_unlock(&aconnector->hpd_lock); - } static void handle_hpd_irq(void *param) @@ -3078,98 +4127,21 @@ static void handle_hpd_irq(void *param) } -static void dm_handle_mst_sideband_msg(struct amdgpu_dm_connector *aconnector) -{ - uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 }; - uint8_t dret; - bool new_irq_handled = false; - int dpcd_addr; - int dpcd_bytes_to_read; - - const int max_process_count = 30; - int process_count = 0; - - const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link); - - if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) { - dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT; - /* DPCD 0x200 - 0x201 for downstream IRQ */ - dpcd_addr = DP_SINK_COUNT; - } else { - dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI; - /* DPCD 0x2002 - 0x2005 for downstream IRQ */ - dpcd_addr = DP_SINK_COUNT_ESI; - } - - dret = drm_dp_dpcd_read( - &aconnector->dm_dp_aux.aux, - dpcd_addr, - esi, - dpcd_bytes_to_read); - - while (dret == dpcd_bytes_to_read && - process_count < max_process_count) { - uint8_t retry; - dret = 0; - - process_count++; - - DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]); - /* handle HPD short pulse irq */ - if (aconnector->mst_mgr.mst_state) - drm_dp_mst_hpd_irq( - &aconnector->mst_mgr, - esi, - &new_irq_handled); - - if (new_irq_handled) { - /* ACK at DPCD to notify down stream */ - const int ack_dpcd_bytes_to_write = - dpcd_bytes_to_read - 1; - - for (retry = 0; retry < 3; retry++) { - uint8_t wret; - - wret = drm_dp_dpcd_write( - &aconnector->dm_dp_aux.aux, - dpcd_addr + 1, - &esi[1], - ack_dpcd_bytes_to_write); - if (wret == ack_dpcd_bytes_to_write) - break; - } - - /* check if there is new irq to be handled */ - dret = drm_dp_dpcd_read( - &aconnector->dm_dp_aux.aux, - dpcd_addr, - esi, - dpcd_bytes_to_read); - - new_irq_handled = false; - } else { - break; - } - } - - if (process_count == max_process_count) - DRM_DEBUG_DRIVER("Loop exceeded max iterations\n"); -} - -static void schedule_hpd_rx_offload_work(struct hpd_rx_irq_offload_work_queue *offload_wq, +static void schedule_hpd_rx_offload_work(struct amdgpu_device *adev, struct hpd_rx_irq_offload_work_queue *offload_wq, union hpd_irq_data hpd_irq_data) { struct hpd_rx_irq_offload_work *offload_work = kzalloc(sizeof(*offload_work), GFP_KERNEL); if (!offload_work) { - DRM_ERROR("Failed to allocate hpd_rx_irq_offload_work.\n"); + drm_err(adev_to_drm(adev), "Failed to allocate hpd_rx_irq_offload_work.\n"); return; } INIT_WORK(&offload_work->work, dm_handle_hpd_rx_offload_work); offload_work->data = hpd_irq_data; offload_work->offload_wq = offload_wq; + offload_work->adev = adev; queue_work(offload_wq->wq, &offload_work->work); DRM_DEBUG_KMS("queue work to handle hpd_rx offload work"); @@ -3188,8 +4160,9 @@ static void handle_hpd_rx_irq(void *param) union hpd_irq_data hpd_irq_data; bool link_loss = false; bool has_left_work = false; - int idx = aconnector->base.index; + int idx = dc_link->link_index; struct hpd_rx_irq_offload_work_queue *offload_wq = &adev->dm.hpd_rx_offload_wq[idx]; + struct dc *dc = aconnector->dc_link->ctx->dc; memset(&hpd_irq_data, 0, sizeof(hpd_irq_data)); @@ -3210,14 +4183,30 @@ static void handle_hpd_rx_irq(void *param) goto out; if (hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST) { - schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data); + schedule_hpd_rx_offload_work(adev, offload_wq, hpd_irq_data); goto out; } if (dc_link_dp_allow_hpd_rx_irq(dc_link)) { if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY || hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) { - dm_handle_mst_sideband_msg(aconnector); + bool skip = false; + + /* + * DOWN_REP_MSG_RDY is also handled by polling method + * mgr->cbs->poll_hpd_irq() + */ + spin_lock(&offload_wq->offload_lock); + skip = offload_wq->is_handling_mst_msg_rdy_event; + + if (!skip) + offload_wq->is_handling_mst_msg_rdy_event = true; + + spin_unlock(&offload_wq->offload_lock); + + if (!skip) + schedule_hpd_rx_offload_work(adev, offload_wq, hpd_irq_data); + goto out; } @@ -3233,7 +4222,7 @@ static void handle_hpd_rx_irq(void *param) spin_unlock(&offload_wq->offload_lock); if (!skip) - schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data); + schedule_hpd_rx_offload_work(adev, offload_wq, hpd_irq_data); goto out; } @@ -3242,8 +4231,8 @@ static void handle_hpd_rx_irq(void *param) out: if (result && !is_mst_root_connector) { /* Downstream Port status changed. */ - if (!dc_link_detect_sink(dc_link, &new_connection_type)) - DRM_ERROR("KMS: Failed to detect connector\n"); + if (!dc_link_detect_connection_type(dc_link, &new_connection_type)) + drm_err(adev_to_drm(adev), "KMS: Failed to detect connector\n"); if (aconnector->base.force && new_connection_type == dc_connection_none) { emulated_link_detect(dc_link); @@ -3263,6 +4252,7 @@ out: bool ret = false; mutex_lock(&adev->dm.dc_lock); + dc_exit_ips_for_hw_access(dc); ret = dc_link_detect(dc_link, DETECT_REASON_HPDRX); mutex_unlock(&adev->dm.dc_lock); @@ -3280,12 +4270,10 @@ out: } } } -#ifdef CONFIG_DRM_AMD_DC_HDCP if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) { if (adev->dm.hdcp_workqueue) hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index); } -#endif if (dc_link->type != dc_connection_mst_branch) drm_dp_cec_irq(&aconnector->dm_dp_aux.aux); @@ -3293,7 +4281,7 @@ out: mutex_unlock(&aconnector->hpd_lock); } -static void register_hpd_handlers(struct amdgpu_device *adev) +static int register_hpd_handlers(struct amdgpu_device *adev) { struct drm_device *dev = adev_to_drm(adev); struct drm_connector *connector; @@ -3304,36 +4292,70 @@ static void register_hpd_handlers(struct amdgpu_device *adev) int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT; int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT; + if (dc_is_dmub_outbox_supported(adev->dm.dc)) { + if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD, + dmub_hpd_callback, true)) { + drm_err(adev_to_drm(adev), "fail to register dmub hpd callback"); + return -EINVAL; + } + + if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_IRQ, + dmub_hpd_callback, true)) { + drm_err(adev_to_drm(adev), "fail to register dmub hpd callback"); + return -EINVAL; + } + + if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_SENSE_NOTIFY, + dmub_hpd_sense_callback, true)) { + drm_err(adev_to_drm(adev), "fail to register dmub hpd sense callback"); + return -EINVAL; + } + } + list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) + continue; + aconnector = to_amdgpu_dm_connector(connector); dc_link = aconnector->dc_link; - if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) { + if (dc_link->irq_source_hpd != DC_IRQ_SOURCE_INVALID) { int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT; int_params.irq_source = dc_link->irq_source_hpd; - amdgpu_dm_irq_register_interrupt(adev, &int_params, - handle_hpd_irq, - (void *) aconnector); + if (int_params.irq_source == DC_IRQ_SOURCE_INVALID || + int_params.irq_source < DC_IRQ_SOURCE_HPD1 || + int_params.irq_source > DC_IRQ_SOURCE_HPD6) { + drm_err(adev_to_drm(adev), "Failed to register hpd irq!\n"); + return -EINVAL; + } + + if (!amdgpu_dm_irq_register_interrupt(adev, &int_params, + handle_hpd_irq, (void *) aconnector)) + return -ENOMEM; } - if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) { + if (dc_link->irq_source_hpd_rx != DC_IRQ_SOURCE_INVALID) { /* Also register for DP short pulse (hpd_rx). */ int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT; int_params.irq_source = dc_link->irq_source_hpd_rx; - amdgpu_dm_irq_register_interrupt(adev, &int_params, - handle_hpd_rx_irq, - (void *) aconnector); + if (int_params.irq_source == DC_IRQ_SOURCE_INVALID || + int_params.irq_source < DC_IRQ_SOURCE_HPD1RX || + int_params.irq_source > DC_IRQ_SOURCE_HPD6RX) { + drm_err(adev_to_drm(adev), "Failed to register hpd rx irq!\n"); + return -EINVAL; + } - if (adev->dm.hpd_rx_offload_wq) - adev->dm.hpd_rx_offload_wq[connector->index].aconnector = - aconnector; + if (!amdgpu_dm_irq_register_interrupt(adev, &int_params, + handle_hpd_rx_irq, (void *) aconnector)) + return -ENOMEM; } } + return 0; } #if defined(CONFIG_DRM_AMD_DC_SI) @@ -3345,7 +4367,7 @@ static int dce60_register_irq_handlers(struct amdgpu_device *adev) struct dc_interrupt_params int_params = {0}; int r; int i; - unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY; + unsigned int client_id = AMDGPU_IRQ_CLIENTID_LEGACY; int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT; int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT; @@ -3359,27 +4381,36 @@ static int dce60_register_irq_handlers(struct amdgpu_device *adev) * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts * coming from DC hardware. * amdgpu_dm_irq_handler() will re-direct the interrupt to DC - * for acknowledging and handling. */ + * for acknowledging and handling. + */ /* Use VBLANK interrupt */ for (i = 0; i < adev->mode_info.num_crtc; i++) { - r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq); + r = amdgpu_irq_add_id(adev, client_id, i + 1, &adev->crtc_irq); if (r) { - DRM_ERROR("Failed to add crtc irq id!\n"); + drm_err(adev_to_drm(adev), "Failed to add crtc irq id!\n"); return r; } int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; int_params.irq_source = - dc_interrupt_to_irq_source(dc, i+1 , 0); + dc_interrupt_to_irq_source(dc, i + 1, 0); + + if (int_params.irq_source == DC_IRQ_SOURCE_INVALID || + int_params.irq_source < DC_IRQ_SOURCE_VBLANK1 || + int_params.irq_source > DC_IRQ_SOURCE_VBLANK6) { + drm_err(adev_to_drm(adev), "Failed to register vblank irq!\n"); + return -EINVAL; + } c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1]; c_irq_params->adev = adev; c_irq_params->irq_src = int_params.irq_source; - amdgpu_dm_irq_register_interrupt(adev, &int_params, - dm_crtc_high_irq, c_irq_params); + if (!amdgpu_dm_irq_register_interrupt(adev, &int_params, + dm_crtc_high_irq, c_irq_params)) + return -ENOMEM; } /* Use GRPH_PFLIP interrupt */ @@ -3387,7 +4418,7 @@ static int dce60_register_irq_handlers(struct amdgpu_device *adev) i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) { r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq); if (r) { - DRM_ERROR("Failed to add page flip irq id!\n"); + drm_err(adev_to_drm(adev), "Failed to add page flip irq id!\n"); return r; } @@ -3395,27 +4426,34 @@ static int dce60_register_irq_handlers(struct amdgpu_device *adev) int_params.irq_source = dc_interrupt_to_irq_source(dc, i, 0); + if (int_params.irq_source == DC_IRQ_SOURCE_INVALID || + int_params.irq_source < DC_IRQ_SOURCE_PFLIP_FIRST || + int_params.irq_source > DC_IRQ_SOURCE_PFLIP_LAST) { + drm_err(adev_to_drm(adev), "Failed to register pflip irq!\n"); + return -EINVAL; + } + c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST]; c_irq_params->adev = adev; c_irq_params->irq_src = int_params.irq_source; - amdgpu_dm_irq_register_interrupt(adev, &int_params, - dm_pflip_high_irq, c_irq_params); - + if (!amdgpu_dm_irq_register_interrupt(adev, &int_params, + dm_pflip_high_irq, c_irq_params)) + return -ENOMEM; } /* HPD */ r = amdgpu_irq_add_id(adev, client_id, VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq); if (r) { - DRM_ERROR("Failed to add hpd irq id!\n"); + drm_err(adev_to_drm(adev), "Failed to add hpd irq id!\n"); return r; } - register_hpd_handlers(adev); + r = register_hpd_handlers(adev); - return 0; + return r; } #endif @@ -3427,7 +4465,7 @@ static int dce110_register_irq_handlers(struct amdgpu_device *adev) struct dc_interrupt_params int_params = {0}; int r; int i; - unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY; + unsigned int client_id = AMDGPU_IRQ_CLIENTID_LEGACY; if (adev->family >= AMDGPU_FAMILY_AI) client_id = SOC15_IH_CLIENTID_DCE; @@ -3444,13 +4482,14 @@ static int dce110_register_irq_handlers(struct amdgpu_device *adev) * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts * coming from DC hardware. * amdgpu_dm_irq_handler() will re-direct the interrupt to DC - * for acknowledging and handling. */ + * for acknowledging and handling. + */ /* Use VBLANK interrupt */ for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) { r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq); if (r) { - DRM_ERROR("Failed to add crtc irq id!\n"); + drm_err(adev_to_drm(adev), "Failed to add crtc irq id!\n"); return r; } @@ -3458,20 +4497,28 @@ static int dce110_register_irq_handlers(struct amdgpu_device *adev) int_params.irq_source = dc_interrupt_to_irq_source(dc, i, 0); + if (int_params.irq_source == DC_IRQ_SOURCE_INVALID || + int_params.irq_source < DC_IRQ_SOURCE_VBLANK1 || + int_params.irq_source > DC_IRQ_SOURCE_VBLANK6) { + drm_err(adev_to_drm(adev), "Failed to register vblank irq!\n"); + return -EINVAL; + } + c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1]; c_irq_params->adev = adev; c_irq_params->irq_src = int_params.irq_source; - amdgpu_dm_irq_register_interrupt(adev, &int_params, - dm_crtc_high_irq, c_irq_params); + if (!amdgpu_dm_irq_register_interrupt(adev, &int_params, + dm_crtc_high_irq, c_irq_params)) + return -ENOMEM; } /* Use VUPDATE interrupt */ for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) { r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq); if (r) { - DRM_ERROR("Failed to add vupdate irq id!\n"); + drm_err(adev_to_drm(adev), "Failed to add vupdate irq id!\n"); return r; } @@ -3479,13 +4526,21 @@ static int dce110_register_irq_handlers(struct amdgpu_device *adev) int_params.irq_source = dc_interrupt_to_irq_source(dc, i, 0); + if (int_params.irq_source == DC_IRQ_SOURCE_INVALID || + int_params.irq_source < DC_IRQ_SOURCE_VUPDATE1 || + int_params.irq_source > DC_IRQ_SOURCE_VUPDATE6) { + drm_err(adev_to_drm(adev), "Failed to register vupdate irq!\n"); + return -EINVAL; + } + c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1]; c_irq_params->adev = adev; c_irq_params->irq_src = int_params.irq_source; - amdgpu_dm_irq_register_interrupt(adev, &int_params, - dm_vupdate_high_irq, c_irq_params); + if (!amdgpu_dm_irq_register_interrupt(adev, &int_params, + dm_vupdate_high_irq, c_irq_params)) + return -ENOMEM; } /* Use GRPH_PFLIP interrupt */ @@ -3493,7 +4548,7 @@ static int dce110_register_irq_handlers(struct amdgpu_device *adev) i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) { r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq); if (r) { - DRM_ERROR("Failed to add page flip irq id!\n"); + drm_err(adev_to_drm(adev), "Failed to add page flip irq id!\n"); return r; } @@ -3501,27 +4556,34 @@ static int dce110_register_irq_handlers(struct amdgpu_device *adev) int_params.irq_source = dc_interrupt_to_irq_source(dc, i, 0); + if (int_params.irq_source == DC_IRQ_SOURCE_INVALID || + int_params.irq_source < DC_IRQ_SOURCE_PFLIP_FIRST || + int_params.irq_source > DC_IRQ_SOURCE_PFLIP_LAST) { + drm_err(adev_to_drm(adev), "Failed to register pflip irq!\n"); + return -EINVAL; + } + c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST]; c_irq_params->adev = adev; c_irq_params->irq_src = int_params.irq_source; - amdgpu_dm_irq_register_interrupt(adev, &int_params, - dm_pflip_high_irq, c_irq_params); - + if (!amdgpu_dm_irq_register_interrupt(adev, &int_params, + dm_pflip_high_irq, c_irq_params)) + return -ENOMEM; } /* HPD */ r = amdgpu_irq_add_id(adev, client_id, VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq); if (r) { - DRM_ERROR("Failed to add hpd irq id!\n"); + drm_err(adev_to_drm(adev), "Failed to add hpd irq id!\n"); return r; } - register_hpd_handlers(adev); + r = register_hpd_handlers(adev); - return 0; + return r; } /* Register IRQ sources and initialize IRQ callbacks */ @@ -3565,7 +4627,7 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev) r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq); if (r) { - DRM_ERROR("Failed to add crtc irq id!\n"); + drm_err(adev_to_drm(adev), "Failed to add crtc irq id!\n"); return r; } @@ -3573,13 +4635,21 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev) int_params.irq_source = dc_interrupt_to_irq_source(dc, i, 0); + if (int_params.irq_source == DC_IRQ_SOURCE_INVALID || + int_params.irq_source < DC_IRQ_SOURCE_VBLANK1 || + int_params.irq_source > DC_IRQ_SOURCE_VBLANK6) { + drm_err(adev_to_drm(adev), "Failed to register vblank irq!\n"); + return -EINVAL; + } + c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1]; c_irq_params->adev = adev; c_irq_params->irq_src = int_params.irq_source; - amdgpu_dm_irq_register_interrupt( - adev, &int_params, dm_crtc_high_irq, c_irq_params); + if (!amdgpu_dm_irq_register_interrupt(adev, &int_params, + dm_crtc_high_irq, c_irq_params)) + return -ENOMEM; } /* Use otg vertical line interrupt */ @@ -3589,7 +4659,7 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev) vrtl_int_srcid[i], &adev->vline0_irq); if (r) { - DRM_ERROR("Failed to add vline0 irq id!\n"); + drm_err(adev_to_drm(adev), "Failed to add vline0 irq id!\n"); return r; } @@ -3597,9 +4667,11 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev) int_params.irq_source = dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0); - if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) { - DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]); - break; + if (int_params.irq_source == DC_IRQ_SOURCE_INVALID || + int_params.irq_source < DC_IRQ_SOURCE_DC1_VLINE0 || + int_params.irq_source > DC_IRQ_SOURCE_DC6_VLINE0) { + drm_err(adev_to_drm(adev), "Failed to register vline0 irq!\n"); + return -EINVAL; } c_irq_params = &adev->dm.vline0_params[int_params.irq_source @@ -3608,8 +4680,10 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev) c_irq_params->adev = adev; c_irq_params->irq_src = int_params.irq_source; - amdgpu_dm_irq_register_interrupt(adev, &int_params, - dm_dcn_vertical_interrupt0_high_irq, c_irq_params); + if (!amdgpu_dm_irq_register_interrupt(adev, &int_params, + dm_dcn_vertical_interrupt0_high_irq, + c_irq_params)) + return -ENOMEM; } #endif @@ -3624,7 +4698,7 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev) r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq); if (r) { - DRM_ERROR("Failed to add vupdate irq id!\n"); + drm_err(adev_to_drm(adev), "Failed to add vupdate irq id!\n"); return r; } @@ -3632,13 +4706,21 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev) int_params.irq_source = dc_interrupt_to_irq_source(dc, i, 0); + if (int_params.irq_source == DC_IRQ_SOURCE_INVALID || + int_params.irq_source < DC_IRQ_SOURCE_VUPDATE1 || + int_params.irq_source > DC_IRQ_SOURCE_VUPDATE6) { + drm_err(adev_to_drm(adev), "Failed to register vupdate irq!\n"); + return -EINVAL; + } + c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1]; c_irq_params->adev = adev; c_irq_params->irq_src = int_params.irq_source; - amdgpu_dm_irq_register_interrupt(adev, &int_params, - dm_vupdate_high_irq, c_irq_params); + if (!amdgpu_dm_irq_register_interrupt(adev, &int_params, + dm_vupdate_high_irq, c_irq_params)) + return -ENOMEM; } /* Use GRPH_PFLIP interrupt */ @@ -3647,7 +4729,7 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev) i++) { r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq); if (r) { - DRM_ERROR("Failed to add page flip irq id!\n"); + drm_err(adev_to_drm(adev), "Failed to add page flip irq id!\n"); return r; } @@ -3655,27 +4737,34 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev) int_params.irq_source = dc_interrupt_to_irq_source(dc, i, 0); + if (int_params.irq_source == DC_IRQ_SOURCE_INVALID || + int_params.irq_source < DC_IRQ_SOURCE_PFLIP_FIRST || + int_params.irq_source > DC_IRQ_SOURCE_PFLIP_LAST) { + drm_err(adev_to_drm(adev), "Failed to register pflip irq!\n"); + return -EINVAL; + } + c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST]; c_irq_params->adev = adev; c_irq_params->irq_src = int_params.irq_source; - amdgpu_dm_irq_register_interrupt(adev, &int_params, - dm_pflip_high_irq, c_irq_params); - + if (!amdgpu_dm_irq_register_interrupt(adev, &int_params, + dm_pflip_high_irq, c_irq_params)) + return -ENOMEM; } /* HPD */ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT, &adev->hpd_irq); if (r) { - DRM_ERROR("Failed to add hpd irq id!\n"); + drm_err(adev_to_drm(adev), "Failed to add hpd irq id!\n"); return r; } - register_hpd_handlers(adev); + r = register_hpd_handlers(adev); - return 0; + return r; } /* Register Outbox IRQ sources and initialize IRQ callbacks */ static int register_outbox_irq_handlers(struct amdgpu_device *adev) @@ -3691,7 +4780,7 @@ static int register_outbox_irq_handlers(struct amdgpu_device *adev) r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT, &adev->dmub_outbox_irq); if (r) { - DRM_ERROR("Failed to add outbox irq id!\n"); + drm_err(adev_to_drm(adev), "Failed to add outbox irq id!\n"); return r; } @@ -3706,8 +4795,9 @@ static int register_outbox_irq_handlers(struct amdgpu_device *adev) c_irq_params->adev = adev; c_irq_params->irq_src = int_params.irq_source; - amdgpu_dm_irq_register_interrupt(adev, &int_params, - dm_dmub_outbox1_low_irq, c_irq_params); + if (!amdgpu_dm_irq_register_interrupt(adev, &int_params, + dm_dmub_outbox1_low_irq, c_irq_params)) + return -ENOMEM; } return 0; @@ -3771,7 +4861,7 @@ dm_atomic_duplicate_state(struct drm_private_obj *obj) old_state = to_dm_atomic_state(obj->state); if (old_state && old_state->context) - new_state->context = dc_copy_state(old_state->context); + new_state->context = dc_state_create_copy(old_state->context); if (!new_state->context) { kfree(new_state); @@ -3787,7 +4877,7 @@ static void dm_atomic_destroy_state(struct drm_private_obj *obj, struct dm_atomic_state *dm_state = to_dm_atomic_state(state); if (dm_state && dm_state->context) - dc_release_state(dm_state->context); + dc_state_release(dm_state->context); kfree(dm_state); } @@ -3819,20 +4909,16 @@ static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev) /* indicates support for immediate flip */ adev_to_drm(adev)->mode_config.async_page_flip = true; - adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base; - state = kzalloc(sizeof(*state), GFP_KERNEL); if (!state) return -ENOMEM; - state->context = dc_create_state(adev->dm.dc); + state->context = dc_state_create_current_copy(adev->dm.dc); if (!state->context) { kfree(state); return -ENOMEM; } - dc_resource_state_copy_construct_current(adev->dm.dc, state->context); - drm_atomic_private_obj_init(adev_to_drm(adev), &adev->dm.atomic_obj, &state->base, @@ -3840,14 +4926,22 @@ static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev) r = amdgpu_display_modeset_create_props(adev); if (r) { - dc_release_state(state->context); + dc_state_release(state->context); kfree(state); return r; } +#ifdef AMD_PRIVATE_COLOR + if (amdgpu_dm_create_color_properties(adev)) { + dc_state_release(state->context); + kfree(state); + return -ENOMEM; + } +#endif + r = amdgpu_dm_audio_init(adev); if (r) { - dc_release_state(state->context); + dc_state_release(state->context); kfree(state); return r; } @@ -3857,43 +4951,51 @@ static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev) #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255 +#define AMDGPU_DM_MIN_SPREAD ((AMDGPU_DM_DEFAULT_MAX_BACKLIGHT - AMDGPU_DM_DEFAULT_MIN_BACKLIGHT) / 2) #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm, int bl_idx) { + struct amdgpu_dm_backlight_caps *caps = &dm->backlight_caps[bl_idx]; + + if (caps->caps_valid) + return; + #if defined(CONFIG_ACPI) - struct amdgpu_dm_backlight_caps caps; + amdgpu_acpi_get_backlight_caps(caps); - memset(&caps, 0, sizeof(caps)); + /* validate the firmware value is sane */ + if (caps->caps_valid) { + int spread = caps->max_input_signal - caps->min_input_signal; - if (dm->backlight_caps[bl_idx].caps_valid) - return; + if (caps->max_input_signal > AMDGPU_DM_DEFAULT_MAX_BACKLIGHT || + caps->min_input_signal < 0 || + spread > AMDGPU_DM_DEFAULT_MAX_BACKLIGHT || + spread < AMDGPU_DM_MIN_SPREAD) { + DRM_DEBUG_KMS("DM: Invalid backlight caps: min=%d, max=%d\n", + caps->min_input_signal, caps->max_input_signal); + caps->caps_valid = false; + } + } - amdgpu_acpi_get_backlight_caps(&caps); - if (caps.caps_valid) { - dm->backlight_caps[bl_idx].caps_valid = true; - if (caps.aux_support) - return; - dm->backlight_caps[bl_idx].min_input_signal = caps.min_input_signal; - dm->backlight_caps[bl_idx].max_input_signal = caps.max_input_signal; - } else { - dm->backlight_caps[bl_idx].min_input_signal = - AMDGPU_DM_DEFAULT_MIN_BACKLIGHT; - dm->backlight_caps[bl_idx].max_input_signal = - AMDGPU_DM_DEFAULT_MAX_BACKLIGHT; + if (!caps->caps_valid) { + caps->min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT; + caps->max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT; + caps->caps_valid = true; } #else - if (dm->backlight_caps[bl_idx].aux_support) + if (caps->aux_support) return; - dm->backlight_caps[bl_idx].min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT; - dm->backlight_caps[bl_idx].max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT; + caps->min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT; + caps->max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT; + caps->caps_valid = true; #endif } static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps, - unsigned *min, unsigned *max) + unsigned int *min, unsigned int *max) { if (!caps) return 0; @@ -3910,31 +5012,108 @@ static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps, return 1; } +/* Rescale from [min..max] to [0..AMDGPU_MAX_BL_LEVEL] */ +static inline u32 scale_input_to_fw(int min, int max, u64 input) +{ + return DIV_ROUND_CLOSEST_ULL(input * AMDGPU_MAX_BL_LEVEL, max - min); +} + +/* Rescale from [0..AMDGPU_MAX_BL_LEVEL] to [min..max] */ +static inline u32 scale_fw_to_input(int min, int max, u64 input) +{ + return min + DIV_ROUND_CLOSEST_ULL(input * (max - min), AMDGPU_MAX_BL_LEVEL); +} + +static void convert_custom_brightness(const struct amdgpu_dm_backlight_caps *caps, + unsigned int min, unsigned int max, + uint32_t *user_brightness) +{ + u32 brightness = scale_input_to_fw(min, max, *user_brightness); + u8 lower_signal, upper_signal, upper_lum, lower_lum, lum; + int left, right; + + if (amdgpu_dc_debug_mask & DC_DISABLE_CUSTOM_BRIGHTNESS_CURVE) + return; + + if (!caps->data_points) + return; + + /* + * Handle the case where brightness is below the first data point + * Interpolate between (0,0) and (first_signal, first_lum) + */ + if (brightness < caps->luminance_data[0].input_signal) { + lum = DIV_ROUND_CLOSEST(caps->luminance_data[0].luminance * brightness, + caps->luminance_data[0].input_signal); + goto scale; + } + + left = 0; + right = caps->data_points - 1; + while (left <= right) { + int mid = left + (right - left) / 2; + u8 signal = caps->luminance_data[mid].input_signal; + + /* Exact match found */ + if (signal == brightness) { + lum = caps->luminance_data[mid].luminance; + goto scale; + } + + if (signal < brightness) + left = mid + 1; + else + right = mid - 1; + } + + /* verify bound */ + if (left >= caps->data_points) + left = caps->data_points - 1; + + /* At this point, left > right */ + lower_signal = caps->luminance_data[right].input_signal; + upper_signal = caps->luminance_data[left].input_signal; + lower_lum = caps->luminance_data[right].luminance; + upper_lum = caps->luminance_data[left].luminance; + + /* interpolate */ + if (right == left || !lower_lum) + lum = upper_lum; + else + lum = lower_lum + DIV_ROUND_CLOSEST((upper_lum - lower_lum) * + (brightness - lower_signal), + upper_signal - lower_signal); +scale: + *user_brightness = scale_fw_to_input(min, max, + DIV_ROUND_CLOSEST(lum * brightness, 101)); +} + static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps, uint32_t brightness) { - unsigned min, max; + unsigned int min, max; if (!get_brightness_range(caps, &min, &max)) return brightness; - // Rescale 0..255 to min..max - return min + DIV_ROUND_CLOSEST((max - min) * brightness, - AMDGPU_MAX_BL_LEVEL); + convert_custom_brightness(caps, min, max, &brightness); + + // Rescale 0..max to min..max + return min + DIV_ROUND_CLOSEST_ULL((u64)(max - min) * brightness, max); } static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps, uint32_t brightness) { - unsigned min, max; + unsigned int min, max; if (!get_brightness_range(caps, &min, &max)) return brightness; if (brightness < min) return 0; - // Rescale min..max to 0..255 - return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min), + // Rescale min..max to 0..max + return DIV_ROUND_CLOSEST_ULL((u64)max * (brightness - min), max - min); } @@ -3942,33 +5121,76 @@ static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm, int bl_idx, u32 user_brightness) { - struct amdgpu_dm_backlight_caps caps; + struct amdgpu_dm_backlight_caps *caps; struct dc_link *link; u32 brightness; - bool rc; + bool rc, reallow_idle = false; + struct drm_connector *connector; + + list_for_each_entry(connector, &dm->ddev->mode_config.connector_list, head) { + struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); + + if (aconnector->bl_idx != bl_idx) + continue; + + /* if connector is off, save the brightness for next time it's on */ + if (!aconnector->base.encoder) { + dm->brightness[bl_idx] = user_brightness; + dm->actual_brightness[bl_idx] = 0; + return; + } + } amdgpu_dm_update_backlight_caps(dm, bl_idx); - caps = dm->backlight_caps[bl_idx]; + caps = &dm->backlight_caps[bl_idx]; dm->brightness[bl_idx] = user_brightness; /* update scratch register */ if (bl_idx == 0) amdgpu_atombios_scratch_regs_set_backlight_level(dm->adev, dm->brightness[bl_idx]); - brightness = convert_brightness_from_user(&caps, dm->brightness[bl_idx]); + brightness = convert_brightness_from_user(caps, dm->brightness[bl_idx]); link = (struct dc_link *)dm->backlight_link[bl_idx]; + /* Apply brightness quirk */ + if (caps->brightness_mask) + brightness |= caps->brightness_mask; + /* Change brightness based on AUX property */ - if (caps.aux_support) { + mutex_lock(&dm->dc_lock); + if (dm->dc->caps.ips_support && dm->dc->ctx->dmub_srv->idle_allowed) { + dc_allow_idle_optimizations(dm->dc, false); + reallow_idle = true; + } + + if (trace_amdgpu_dm_brightness_enabled()) { + trace_amdgpu_dm_brightness(__builtin_return_address(0), + user_brightness, + brightness, + caps->aux_support, + power_supply_is_system_supplied() > 0); + } + + if (caps->aux_support) { rc = dc_link_set_backlight_level_nits(link, true, brightness, AUX_BL_DEFAULT_TRANSITION_TIME_MS); if (!rc) DRM_DEBUG("DM: Failed to update backlight via AUX on eDP[%d]\n", bl_idx); } else { - rc = dc_link_set_backlight_level(link, brightness, 0); + struct set_backlight_level_params backlight_level_params = { 0 }; + + backlight_level_params.backlight_pwm_u16_16 = brightness; + backlight_level_params.transition_time_in_ms = 0; + + rc = dc_link_set_backlight_level(link, &backlight_level_params); if (!rc) DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", bl_idx); } + if (dm->dc->caps.ips_support && reallow_idle) + dc_allow_idle_optimizations(dm->dc, true); + + mutex_unlock(&dm->dc_lock); + if (rc) dm->actual_brightness[bl_idx] = user_brightness; } @@ -3992,6 +5214,7 @@ static int amdgpu_dm_backlight_update_status(struct backlight_device *bd) static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm, int bl_idx) { + int ret; struct amdgpu_dm_backlight_caps caps; struct dc_link *link = (struct dc_link *)dm->backlight_link[bl_idx]; @@ -4000,19 +5223,18 @@ static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm, if (caps.aux_support) { u32 avg, peak; - bool rc; - rc = dc_link_get_backlight_level_nits(link, &avg, &peak); - if (!rc) + if (!dc_link_get_backlight_level_nits(link, &avg, &peak)) return dm->brightness[bl_idx]; return convert_brightness_to_user(&caps, avg); - } else { - int ret = dc_link_get_backlight_level(link); - - if (ret == DC_ERROR_UNEXPECTED) - return dm->brightness[bl_idx]; - return convert_brightness_to_user(&caps, ret); } + + ret = dc_link_get_backlight_level(link); + + if (ret == DC_ERROR_UNEXPECTED) + return dm->brightness[bl_idx]; + + return convert_brightness_to_user(&caps, ret); } static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd) @@ -4036,38 +5258,58 @@ static const struct backlight_ops amdgpu_dm_backlight_ops = { }; static void -amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm) +amdgpu_dm_register_backlight_device(struct amdgpu_dm_connector *aconnector) { - char bl_name[16]; + struct drm_device *drm = aconnector->base.dev; + struct amdgpu_display_manager *dm = &drm_to_adev(drm)->dm; struct backlight_properties props = { 0 }; + struct amdgpu_dm_backlight_caps *caps; + char bl_name[16]; + int min, max; - amdgpu_dm_update_backlight_caps(dm, dm->num_of_edps); - dm->brightness[dm->num_of_edps] = AMDGPU_MAX_BL_LEVEL; + if (aconnector->bl_idx == -1) + return; if (!acpi_video_backlight_use_native()) { - drm_info(adev_to_drm(dm->adev), "Skipping amdgpu DM backlight registration\n"); + drm_info(drm, "Skipping amdgpu DM backlight registration\n"); /* Try registering an ACPI video backlight device instead. */ acpi_video_register_backlight(); return; } - props.max_brightness = AMDGPU_MAX_BL_LEVEL; - props.brightness = AMDGPU_MAX_BL_LEVEL; + caps = &dm->backlight_caps[aconnector->bl_idx]; + if (get_brightness_range(caps, &min, &max)) { + if (power_supply_is_system_supplied() > 0) + props.brightness = DIV_ROUND_CLOSEST((max - min) * caps->ac_level, 100); + else + props.brightness = DIV_ROUND_CLOSEST((max - min) * caps->dc_level, 100); + /* min is zero, so max needs to be adjusted */ + props.max_brightness = max - min; + drm_dbg(drm, "Backlight caps: min: %d, max: %d, ac %d, dc %d\n", min, max, + caps->ac_level, caps->dc_level); + } else + props.brightness = props.max_brightness = MAX_BACKLIGHT_LEVEL; + + if (caps->data_points && !(amdgpu_dc_debug_mask & DC_DISABLE_CUSTOM_BRIGHTNESS_CURVE)) { + drm_info(drm, "Using custom brightness curve\n"); + props.scale = BACKLIGHT_SCALE_NON_LINEAR; + } else + props.scale = BACKLIGHT_SCALE_LINEAR; props.type = BACKLIGHT_RAW; snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d", - adev_to_drm(dm->adev)->primary->index + dm->num_of_edps); + drm->primary->index + aconnector->bl_idx); - dm->backlight_dev[dm->num_of_edps] = backlight_device_register(bl_name, - adev_to_drm(dm->adev)->dev, - dm, - &amdgpu_dm_backlight_ops, - &props); + dm->backlight_dev[aconnector->bl_idx] = + backlight_device_register(bl_name, aconnector->base.kdev, dm, + &amdgpu_dm_backlight_ops, &props); + dm->brightness[aconnector->bl_idx] = props.brightness; - if (IS_ERR(dm->backlight_dev[dm->num_of_edps])) - DRM_ERROR("DM: Backlight registration failed!\n"); - else - DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name); + if (IS_ERR(dm->backlight_dev[aconnector->bl_idx])) { + drm_err(drm, "DM: Backlight registration failed!\n"); + dm->backlight_dev[aconnector->bl_idx] = NULL; + } else + drm_dbg_driver(drm, "DM: Registered Backlight device: %s\n", bl_name); } static int initialize_plane(struct amdgpu_display_manager *dm, @@ -4081,7 +5323,7 @@ static int initialize_plane(struct amdgpu_display_manager *dm, plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL); if (!plane) { - DRM_ERROR("KMS: Failed to allocate plane\n"); + drm_err(adev_to_drm(dm->adev), "KMS: Failed to allocate plane\n"); return -ENOMEM; } plane->type = plane_type; @@ -4099,7 +5341,7 @@ static int initialize_plane(struct amdgpu_display_manager *dm, ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap); if (ret) { - DRM_ERROR("KMS: Failed to initialize plane\n"); + drm_err(adev_to_drm(dm->adev), "KMS: Failed to initialize plane\n"); kfree(plane); return ret; } @@ -4111,24 +5353,36 @@ static int initialize_plane(struct amdgpu_display_manager *dm, } -static void register_backlight_device(struct amdgpu_display_manager *dm, - struct dc_link *link) +static void setup_backlight_device(struct amdgpu_display_manager *dm, + struct amdgpu_dm_connector *aconnector) { - if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) && - link->type != dc_connection_none) { - /* - * Event if registration failed, we should continue with - * DM initialization because not having a backlight control - * is better then a black screen. - */ - if (!dm->backlight_dev[dm->num_of_edps]) - amdgpu_dm_register_backlight_device(dm); + struct amdgpu_dm_backlight_caps *caps; + struct dc_link *link = aconnector->dc_link; + int bl_idx = dm->num_of_edps; - if (dm->backlight_dev[dm->num_of_edps]) { - dm->backlight_link[dm->num_of_edps] = link; - dm->num_of_edps++; - } + if (!(link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) || + link->type == dc_connection_none) + return; + + if (dm->num_of_edps >= AMDGPU_DM_MAX_NUM_EDP) { + drm_warn(adev_to_drm(dm->adev), "Too much eDP connections, skipping backlight setup for additional eDPs\n"); + return; } + + aconnector->bl_idx = bl_idx; + + amdgpu_dm_update_backlight_caps(dm, bl_idx); + dm->backlight_link[bl_idx] = link; + dm->num_of_edps++; + + update_connector_ext_caps(aconnector); + caps = &dm->backlight_caps[aconnector->bl_idx]; + + /* Only offer ABM property when non-OLED and user didn't turn off by module parameter */ + if (!caps->ext_caps->bits.oled && amdgpu_dm_abm_level < 0) + drm_object_attach_property(&aconnector->base.base, + dm->adev->mode_info.abm_level_property, + ABM_SYSFS_CONTROL); } static void amdgpu_set_panel_orientation(struct drm_connector *connector); @@ -4144,29 +5398,36 @@ static void amdgpu_set_panel_orientation(struct drm_connector *connector); static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) { struct amdgpu_display_manager *dm = &adev->dm; - int32_t i; + s32 i; struct amdgpu_dm_connector *aconnector = NULL; struct amdgpu_encoder *aencoder = NULL; struct amdgpu_mode_info *mode_info = &adev->mode_info; - uint32_t link_cnt; - int32_t primary_planes; + u32 link_cnt; + s32 primary_planes; enum dc_connection_type new_connection_type = dc_connection_none; const struct dc_plane_cap *plane; bool psr_feature_enabled = false; + bool replay_feature_enabled = false; + int max_overlay = dm->dc->caps.max_slave_planes; dm->display_indexes_num = dm->dc->caps.max_streams; /* Update the actual used number of crtc */ adev->mode_info.num_crtc = adev->dm.display_indexes_num; + amdgpu_dm_set_irq_funcs(adev); + link_cnt = dm->dc->caps.max_links; if (amdgpu_dm_mode_config_init(dm->adev)) { - DRM_ERROR("DM: Failed to initialize mode config\n"); + drm_err(adev_to_drm(adev), "DM: Failed to initialize mode config\n"); return -EINVAL; } /* There is one primary plane per CRTC */ primary_planes = dm->dc->caps.max_streams; - ASSERT(primary_planes <= AMDGPU_MAX_PLANES); + if (primary_planes > AMDGPU_MAX_PLANES) { + drm_err(adev_to_drm(adev), "DM: Plane nums out of 6 planes\n"); + return -EINVAL; + } /* * Initialize primary planes, implicit planes for legacy IOCTLS. @@ -4177,7 +5438,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) if (initialize_plane(dm, mode_info, i, DRM_PLANE_TYPE_PRIMARY, plane)) { - DRM_ERROR("KMS: Failed to initialize primary plane\n"); + drm_err(adev_to_drm(adev), "KMS: Failed to initialize primary plane\n"); goto fail; } } @@ -4201,30 +5462,27 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL) continue; - if (!plane->blends_with_above || !plane->blends_with_below) - continue; - if (!plane->pixel_format_support.argb8888) continue; + if (max_overlay-- == 0) + break; + if (initialize_plane(dm, NULL, primary_planes + i, DRM_PLANE_TYPE_OVERLAY, plane)) { - DRM_ERROR("KMS: Failed to initialize overlay plane\n"); + drm_err(adev_to_drm(adev), "KMS: Failed to initialize overlay plane\n"); goto fail; } - - /* Only create one overlay plane. */ - break; } for (i = 0; i < dm->dc->caps.max_streams; i++) if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) { - DRM_ERROR("KMS: Failed to initialize crtc\n"); + drm_err(adev_to_drm(adev), "KMS: Failed to initialize crtc\n"); goto fail; } /* Use Outbox interrupt */ - switch (adev->ip_versions[DCE_HWIP][0]) { + switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { case IP_VERSION(3, 0, 0): case IP_VERSION(3, 1, 2): case IP_VERSION(3, 1, 3): @@ -4234,19 +5492,23 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) case IP_VERSION(3, 2, 0): case IP_VERSION(3, 2, 1): case IP_VERSION(2, 1, 0): + case IP_VERSION(3, 5, 0): + case IP_VERSION(3, 5, 1): + case IP_VERSION(3, 6, 0): + case IP_VERSION(4, 0, 1): if (register_outbox_irq_handlers(dm->adev)) { - DRM_ERROR("DM: Failed to initialize IRQ\n"); + drm_err(adev_to_drm(adev), "DM: Failed to initialize IRQ\n"); goto fail; } break; default: DRM_DEBUG_KMS("Unsupported DCN IP version for outbox: 0x%X\n", - adev->ip_versions[DCE_HWIP][0]); + amdgpu_ip_version(adev, DCE_HWIP, 0)); } /* Determine whether to enable PSR support by default. */ if (!(amdgpu_dc_debug_mask & DC_DISABLE_PSR)) { - switch (adev->ip_versions[DCE_HWIP][0]) { + switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { case IP_VERSION(3, 1, 2): case IP_VERSION(3, 1, 3): case IP_VERSION(3, 1, 4): @@ -4254,6 +5516,10 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) case IP_VERSION(3, 1, 6): case IP_VERSION(3, 2, 0): case IP_VERSION(3, 2, 1): + case IP_VERSION(3, 5, 0): + case IP_VERSION(3, 5, 1): + case IP_VERSION(3, 6, 0): + case IP_VERSION(4, 0, 1): psr_feature_enabled = true; break; default: @@ -4262,14 +5528,54 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) } } + /* Determine whether to enable Replay support by default. */ + if (!(amdgpu_dc_debug_mask & DC_DISABLE_REPLAY)) { + switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { + case IP_VERSION(3, 1, 4): + case IP_VERSION(3, 2, 0): + case IP_VERSION(3, 2, 1): + case IP_VERSION(3, 5, 0): + case IP_VERSION(3, 5, 1): + case IP_VERSION(3, 6, 0): + replay_feature_enabled = true; + break; + + default: + replay_feature_enabled = amdgpu_dc_feature_mask & DC_REPLAY_MASK; + break; + } + } + + if (link_cnt > MAX_LINKS) { + drm_err(adev_to_drm(adev), + "KMS: Cannot support more than %d display indexes\n", + MAX_LINKS); + goto fail; + } + /* loops over all connectors on the board */ for (i = 0; i < link_cnt; i++) { struct dc_link *link = NULL; - if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) { - DRM_ERROR( - "KMS: Cannot support more than %d display indexes\n", - AMDGPU_DM_MAX_DISPLAY_INDEX); + link = dc_get_link_at_index(dm->dc, i); + + if (link->connector_signal == SIGNAL_TYPE_VIRTUAL) { + struct amdgpu_dm_wb_connector *wbcon = kzalloc(sizeof(*wbcon), GFP_KERNEL); + + if (!wbcon) { + drm_err(adev_to_drm(adev), "KMS: Failed to allocate writeback connector\n"); + continue; + } + + if (amdgpu_dm_wb_connector_init(dm, wbcon, i)) { + drm_err(adev_to_drm(adev), "KMS: Failed to initialize writeback connector\n"); + kfree(wbcon); + continue; + } + + link->psr_settings.psr_feature_enabled = false; + link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED; + continue; } @@ -4282,19 +5588,21 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) goto fail; if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) { - DRM_ERROR("KMS: Failed to initialize encoder\n"); + drm_err(adev_to_drm(adev), "KMS: Failed to initialize encoder\n"); goto fail; } if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) { - DRM_ERROR("KMS: Failed to initialize connector\n"); + drm_err(adev_to_drm(adev), "KMS: Failed to initialize connector\n"); goto fail; } - link = dc_get_link_at_index(dm->dc, i); + if (dm->hpd_rx_offload_wq) + dm->hpd_rx_offload_wq[aconnector->base.index].aconnector = + aconnector; - if (!dc_link_detect_sink(link, &new_connection_type)) - DRM_ERROR("KMS: Failed to detect connector\n"); + if (!dc_link_detect_connection_type(link, &new_connection_type)) + drm_err(adev_to_drm(adev), "KMS: Failed to detect connector\n"); if (aconnector->base.force && new_connection_type == dc_connection_none) { emulated_link_detect(link); @@ -4303,29 +5611,39 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) bool ret = false; mutex_lock(&dm->dc_lock); + dc_exit_ips_for_hw_access(dm->dc); ret = dc_link_detect(link, DETECT_REASON_BOOT); mutex_unlock(&dm->dc_lock); if (ret) { amdgpu_dm_update_connector_after_detect(aconnector); - register_backlight_device(dm, link); + setup_backlight_device(dm, aconnector); - if (dm->num_of_edps) - update_connector_ext_caps(aconnector); + /* Disable PSR if Replay can be enabled */ + if (replay_feature_enabled) + if (amdgpu_dm_set_replay_caps(link, aconnector)) + psr_feature_enabled = false; - if (psr_feature_enabled) + if (psr_feature_enabled) { amdgpu_dm_set_psr_caps(link); - - /* TODO: Fix vblank control helpers to delay PSR entry to allow this when - * PSR is also supported. - */ - if (link->psr_settings.psr_feature_enabled) - adev_to_drm(adev)->vblank_disable_immediate = false; + drm_info(adev_to_drm(adev), "PSR support %d, DC PSR ver %d, sink PSR ver %d DPCD caps 0x%x su_y_granularity %d\n", + link->psr_settings.psr_feature_enabled, + link->psr_settings.psr_version, + link->dpcd_caps.psr_info.psr_version, + link->dpcd_caps.psr_info.psr_dpcd_caps.raw, + link->dpcd_caps.psr_info.psr2_su_y_granularity_cap); + } } } amdgpu_set_panel_orientation(&aconnector->base); } + /* Debug dump: list all DC links and their associated sinks after detection + * is complete for all connectors. This provides a comprehensive view of the + * final state without repeating the dump for each connector. + */ + amdgpu_dm_dump_links_and_sinks(adev); + /* Software is initialized. Now we can register interrupt handlers. */ switch (adev->asic_type) { #if defined(CONFIG_DRM_AMD_DC_SI) @@ -4334,7 +5652,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) case CHIP_VERDE: case CHIP_OLAND: if (dce60_register_irq_handlers(dm->adev)) { - DRM_ERROR("DM: Failed to initialize IRQ\n"); + drm_err(adev_to_drm(adev), "DM: Failed to initialize IRQ\n"); goto fail; } break; @@ -4356,12 +5674,12 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) case CHIP_VEGA12: case CHIP_VEGA20: if (dce110_register_irq_handlers(dm->adev)) { - DRM_ERROR("DM: Failed to initialize IRQ\n"); + drm_err(adev_to_drm(adev), "DM: Failed to initialize IRQ\n"); goto fail; } break; default: - switch (adev->ip_versions[DCE_HWIP][0]) { + switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { case IP_VERSION(1, 0, 0): case IP_VERSION(1, 0, 1): case IP_VERSION(2, 0, 2): @@ -4379,14 +5697,18 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) case IP_VERSION(3, 1, 6): case IP_VERSION(3, 2, 0): case IP_VERSION(3, 2, 1): + case IP_VERSION(3, 5, 0): + case IP_VERSION(3, 5, 1): + case IP_VERSION(3, 6, 0): + case IP_VERSION(4, 0, 1): if (dcn10_register_irq_handlers(dm->adev)) { - DRM_ERROR("DM: Failed to initialize IRQ\n"); + drm_err(adev_to_drm(adev), "DM: Failed to initialize IRQ\n"); goto fail; } break; default: - DRM_ERROR("Unsupported DCE IP versions: 0x%X\n", - adev->ip_versions[DCE_HWIP][0]); + drm_err(adev_to_drm(adev), "Unsupported DCE IP versions: 0x%X\n", + amdgpu_ip_version(adev, DCE_HWIP, 0)); goto fail; } break; @@ -4402,8 +5724,8 @@ fail: static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm) { - drm_atomic_private_obj_fini(&dm->atomic_obj); - return; + if (dm->atomic_obj.state) + drm_atomic_private_obj_fini(&dm->atomic_obj); } /****************************************************************************** @@ -4447,15 +5769,20 @@ static ssize_t s3_debug_store(struct device *device, int s3_state; struct drm_device *drm_dev = dev_get_drvdata(device); struct amdgpu_device *adev = drm_to_adev(drm_dev); + struct amdgpu_ip_block *ip_block; + + ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_DCE); + if (!ip_block) + return -EINVAL; ret = kstrtoint(buf, 0, &s3_state); if (ret == 0) { if (s3_state) { - dm_resume(adev); + dm_resume(ip_block); drm_kms_helper_hotplug_event(adev_to_drm(adev)); } else - dm_suspend(adev); + dm_suspend(ip_block); } return ret == 0 ? count : 0; @@ -4465,9 +5792,86 @@ DEVICE_ATTR_WO(s3_debug); #endif -static int dm_early_init(void *handle) +static int dm_init_microcode(struct amdgpu_device *adev) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + char *fw_name_dmub; + int r; + + switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { + case IP_VERSION(2, 1, 0): + fw_name_dmub = FIRMWARE_RENOIR_DMUB; + if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id)) + fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB; + break; + case IP_VERSION(3, 0, 0): + if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(10, 3, 0)) + fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB; + else + fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB; + break; + case IP_VERSION(3, 0, 1): + fw_name_dmub = FIRMWARE_VANGOGH_DMUB; + break; + case IP_VERSION(3, 0, 2): + fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB; + break; + case IP_VERSION(3, 0, 3): + fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB; + break; + case IP_VERSION(3, 1, 2): + case IP_VERSION(3, 1, 3): + fw_name_dmub = FIRMWARE_YELLOW_CARP_DMUB; + break; + case IP_VERSION(3, 1, 4): + fw_name_dmub = FIRMWARE_DCN_314_DMUB; + break; + case IP_VERSION(3, 1, 5): + fw_name_dmub = FIRMWARE_DCN_315_DMUB; + break; + case IP_VERSION(3, 1, 6): + fw_name_dmub = FIRMWARE_DCN316_DMUB; + break; + case IP_VERSION(3, 2, 0): + fw_name_dmub = FIRMWARE_DCN_V3_2_0_DMCUB; + break; + case IP_VERSION(3, 2, 1): + fw_name_dmub = FIRMWARE_DCN_V3_2_1_DMCUB; + break; + case IP_VERSION(3, 5, 0): + fw_name_dmub = FIRMWARE_DCN_35_DMUB; + break; + case IP_VERSION(3, 5, 1): + fw_name_dmub = FIRMWARE_DCN_351_DMUB; + break; + case IP_VERSION(3, 6, 0): + fw_name_dmub = FIRMWARE_DCN_36_DMUB; + break; + case IP_VERSION(4, 0, 1): + fw_name_dmub = FIRMWARE_DCN_401_DMUB; + break; + default: + /* ASIC doesn't support DMUB. */ + return 0; + } + r = amdgpu_ucode_request(adev, &adev->dm.dmub_fw, AMDGPU_UCODE_REQUIRED, + "%s", fw_name_dmub); + return r; +} + +static int dm_early_init(struct amdgpu_ip_block *ip_block) +{ + struct amdgpu_device *adev = ip_block->adev; + struct amdgpu_mode_info *mode_info = &adev->mode_info; + struct atom_context *ctx = mode_info->atom_context; + int index = GetIndexIntoMasterTable(DATA, Object_Header); + u16 data_offset; + + /* if there is no object header, skip DM */ + if (!amdgpu_atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset)) { + adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK; + drm_info(adev_to_drm(adev), "No object header, skipping DM\n"); + return -ENOENT; + } switch (adev->asic_type) { #if defined(CONFIG_DRM_AMD_DC_SI) @@ -4538,7 +5942,7 @@ static int dm_early_init(void *handle) break; default: - switch (adev->ip_versions[DCE_HWIP][0]) { + switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { case IP_VERSION(2, 0, 2): case IP_VERSION(3, 0, 0): adev->mode_info.num_crtc = 6; @@ -4568,20 +5972,22 @@ static int dm_early_init(void *handle) case IP_VERSION(3, 1, 6): case IP_VERSION(3, 2, 0): case IP_VERSION(3, 2, 1): + case IP_VERSION(3, 5, 0): + case IP_VERSION(3, 5, 1): + case IP_VERSION(3, 6, 0): + case IP_VERSION(4, 0, 1): adev->mode_info.num_crtc = 4; adev->mode_info.num_hpd = 4; adev->mode_info.num_dig = 4; break; default: - DRM_ERROR("Unsupported DCE IP versions: 0x%x\n", - adev->ip_versions[DCE_HWIP][0]); + drm_err(adev_to_drm(adev), "Unsupported DCE IP versions: 0x%x\n", + amdgpu_ip_version(adev, DCE_HWIP, 0)); return -EINVAL; } break; } - amdgpu_dm_set_irq_funcs(adev); - if (adev->mode_info.funcs == NULL) adev->mode_info.funcs = &dm_display_funcs; @@ -4595,8 +6001,9 @@ static int dm_early_init(void *handle) adev_to_drm(adev)->dev, &dev_attr_s3_debug); #endif + adev->dc_enabled = true; - return 0; + return dm_init_microcode(adev); } static bool modereset_required(struct drm_crtc_state *crtc_state) @@ -4623,6 +6030,10 @@ fill_plane_color_attributes(const struct drm_plane_state *plane_state, *color_space = COLOR_SPACE_SRGB; + /* Ignore properties when DRM_CLIENT_CAP_PLANE_COLOR_PIPELINE is set */ + if (plane_state->state && plane_state->state->plane_color_pipeline) + return 0; + /* DRM color properties only affect non-RGB formats. */ if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) return 0; @@ -4646,9 +6057,9 @@ fill_plane_color_attributes(const struct drm_plane_state *plane_state, case DRM_COLOR_YCBCR_BT2020: if (full_range) - *color_space = COLOR_SPACE_2020_YCBCR; + *color_space = COLOR_SPACE_2020_YCBCR_FULL; else - return -EINVAL; + *color_space = COLOR_SPACE_2020_YCBCR_LIMITED; break; default: @@ -4661,11 +6072,10 @@ fill_plane_color_attributes(const struct drm_plane_state *plane_state, static int fill_dc_plane_info_and_addr(struct amdgpu_device *adev, const struct drm_plane_state *plane_state, - const uint64_t tiling_flags, + const u64 tiling_flags, struct dc_plane_info *plane_info, struct dc_plane_address *address, - bool tmz_surface, - bool force_disable_dcc) + bool tmz_surface) { const struct drm_framebuffer *fb = plane_state->fb; const struct amdgpu_framebuffer *afb = @@ -4724,7 +6134,7 @@ fill_dc_plane_info_and_addr(struct amdgpu_device *adev, plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616; break; default: - DRM_ERROR( + drm_err(adev_to_drm(adev), "Unsupported screen format %p4cc\n", &fb->format->format); return -EINVAL; @@ -4759,16 +6169,16 @@ fill_dc_plane_info_and_addr(struct amdgpu_device *adev, if (ret) return ret; - ret = fill_plane_buffer_attributes(adev, afb, plane_info->format, + ret = amdgpu_dm_plane_fill_plane_buffer_attributes(adev, afb, plane_info->format, plane_info->rotation, tiling_flags, &plane_info->tiling_info, &plane_info->plane_size, &plane_info->dcc, address, - tmz_surface, force_disable_dcc); + tmz_surface); if (ret) return ret; - fill_blending_from_plane_state( + amdgpu_dm_plane_fill_blending_from_plane_state( plane_state, &plane_info->per_pixel_alpha, &plane_info->pre_multiplied_alpha, &plane_info->global_alpha, &plane_info->global_alpha_value); @@ -4785,9 +6195,8 @@ static int fill_dc_plane_attributes(struct amdgpu_device *adev, struct dc_scaling_info scaling_info; struct dc_plane_info plane_info; int ret; - bool force_disable_dcc = false; - ret = fill_dc_scaling_info(adev, plane_state, &scaling_info); + ret = amdgpu_dm_plane_fill_dc_scaling_info(adev, plane_state, &scaling_info); if (ret) return ret; @@ -4796,13 +6205,11 @@ static int fill_dc_plane_attributes(struct amdgpu_device *adev, dc_plane_state->clip_rect = scaling_info.clip_rect; dc_plane_state->scaling_quality = scaling_info.scaling_quality; - force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend; ret = fill_dc_plane_info_and_addr(adev, plane_state, afb->tiling_flags, &plane_info, &dc_plane_state->address, - afb->tmz_surface, - force_disable_dcc); + afb->tmz_surface); if (ret) return ret; @@ -4827,13 +6234,39 @@ static int fill_dc_plane_attributes(struct amdgpu_device *adev, * Always set input transfer function, since plane state is refreshed * every time. */ - ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state); + ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, + plane_state, + dc_plane_state); if (ret) return ret; return 0; } +static inline void fill_dc_dirty_rect(struct drm_plane *plane, + struct rect *dirty_rect, int32_t x, + s32 y, s32 width, s32 height, + int *i, bool ffu) +{ + WARN_ON(*i >= DC_MAX_DIRTY_RECTS); + + dirty_rect->x = x; + dirty_rect->y = y; + dirty_rect->width = width; + dirty_rect->height = height; + + if (ffu) + drm_dbg(plane->dev, + "[PLANE:%d] PSR FFU dirty rect size (%d, %d)\n", + plane->base.id, width, height); + else + drm_dbg(plane->dev, + "[PLANE:%d] PSR SU dirty rect at (%d, %d) size (%d, %d)", + plane->base.id, x, y, width, height); + + (*i)++; +} + /** * fill_dc_dirty_rects() - Fill DC dirty regions for PSR selective updates * @@ -4843,6 +6276,11 @@ static int fill_dc_plane_attributes(struct amdgpu_device *adev, * @new_plane_state: New state of @plane * @crtc_state: New state of CRTC connected to the @plane * @flip_addrs: DC flip tracking struct, which also tracts dirty rects + * @is_psr_su: Flag indicating whether Panel Self Refresh Selective Update (PSR SU) is enabled. + * If PSR SU is enabled and damage clips are available, only the regions of the screen + * that have changed will be updated. If PSR SU is not enabled, + * or if damage clips are not available, the entire screen will be updated. + * @dirty_regions_changed: dirty regions changed * * For PSR SU, DC informs the DMUB uController of dirty rectangle regions * (referred to as "damage clips" in DRM nomenclature) that require updating on @@ -4854,25 +6292,23 @@ static int fill_dc_plane_attributes(struct amdgpu_device *adev, * addition, certain use cases - such as cursor and multi-plane overlay (MPO) - * implicitly provide damage clips without any client support via the plane * bounds. - * - * Today, amdgpu_dm only supports the MPO and cursor usecase. - * - * TODO: Also enable for FB_DAMAGE_CLIPS */ static void fill_dc_dirty_rects(struct drm_plane *plane, struct drm_plane_state *old_plane_state, struct drm_plane_state *new_plane_state, struct drm_crtc_state *crtc_state, - struct dc_flip_addrs *flip_addrs) + struct dc_flip_addrs *flip_addrs, + bool is_psr_su, + bool *dirty_regions_changed) { struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state); struct rect *dirty_rects = flip_addrs->dirty_rects; - uint32_t num_clips; + u32 num_clips; + struct drm_mode_rect *clips; bool bb_changed; bool fb_changed; - uint32_t i = 0; - - flip_addrs->dirty_rect_count = 0; + u32 i = 0; + *dirty_regions_changed = false; /* * Cursor plane has it's own dirty rect update interface. See @@ -4881,20 +6317,27 @@ static void fill_dc_dirty_rects(struct drm_plane *plane, if (plane->type == DRM_PLANE_TYPE_CURSOR) return; - /* - * Today, we only consider MPO use-case for PSR SU. If MPO not - * requested, and there is a plane update, do FFU. - */ + if (new_plane_state->rotation != DRM_MODE_ROTATE_0) + goto ffu; + + num_clips = drm_plane_get_damage_clips_count(new_plane_state); + clips = drm_plane_get_damage_clips(new_plane_state); + + if (num_clips && (!amdgpu_damage_clips || (amdgpu_damage_clips < 0 && + is_psr_su))) + goto ffu; + if (!dm_crtc_state->mpo_requested) { - dirty_rects[0].x = 0; - dirty_rects[0].y = 0; - dirty_rects[0].width = dm_crtc_state->base.mode.crtc_hdisplay; - dirty_rects[0].height = dm_crtc_state->base.mode.crtc_vdisplay; - flip_addrs->dirty_rect_count = 1; - DRM_DEBUG_DRIVER("[PLANE:%d] PSR FFU dirty rect size (%d, %d)\n", - new_plane_state->plane->base.id, - dm_crtc_state->base.mode.crtc_hdisplay, - dm_crtc_state->base.mode.crtc_vdisplay); + if (!num_clips || num_clips > DC_MAX_DIRTY_RECTS) + goto ffu; + + for (; flip_addrs->dirty_rect_count < num_clips; clips++) + fill_dc_dirty_rect(new_plane_state->plane, + &dirty_rects[flip_addrs->dirty_rect_count], + clips->x1, clips->y1, + clips->x2 - clips->x1, clips->y2 - clips->y1, + &flip_addrs->dirty_rect_count, + false); return; } @@ -4905,7 +6348,6 @@ static void fill_dc_dirty_rects(struct drm_plane *plane, * If plane is moved or resized, also add old bounding box to dirty * rects. */ - num_clips = drm_plane_get_damage_clips_count(new_plane_state); fb_changed = old_plane_state->fb->base.id != new_plane_state->fb->base.id; bb_changed = (old_plane_state->crtc_x != new_plane_state->crtc_x || @@ -4913,36 +6355,53 @@ static void fill_dc_dirty_rects(struct drm_plane *plane, old_plane_state->crtc_w != new_plane_state->crtc_w || old_plane_state->crtc_h != new_plane_state->crtc_h); - DRM_DEBUG_DRIVER("[PLANE:%d] PSR bb_changed:%d fb_changed:%d num_clips:%d\n", - new_plane_state->plane->base.id, - bb_changed, fb_changed, num_clips); + drm_dbg(plane->dev, + "[PLANE:%d] PSR bb_changed:%d fb_changed:%d num_clips:%d\n", + new_plane_state->plane->base.id, + bb_changed, fb_changed, num_clips); - if (num_clips || fb_changed || bb_changed) { - dirty_rects[i].x = new_plane_state->crtc_x; - dirty_rects[i].y = new_plane_state->crtc_y; - dirty_rects[i].width = new_plane_state->crtc_w; - dirty_rects[i].height = new_plane_state->crtc_h; - DRM_DEBUG_DRIVER("[PLANE:%d] PSR SU dirty rect at (%d, %d) size (%d, %d)\n", - new_plane_state->plane->base.id, - dirty_rects[i].x, dirty_rects[i].y, - dirty_rects[i].width, dirty_rects[i].height); - i += 1; - } + *dirty_regions_changed = bb_changed; + + if ((num_clips + (bb_changed ? 2 : 0)) > DC_MAX_DIRTY_RECTS) + goto ffu; - /* Add old plane bounding-box if plane is moved or resized */ if (bb_changed) { - dirty_rects[i].x = old_plane_state->crtc_x; - dirty_rects[i].y = old_plane_state->crtc_y; - dirty_rects[i].width = old_plane_state->crtc_w; - dirty_rects[i].height = old_plane_state->crtc_h; - DRM_DEBUG_DRIVER("[PLANE:%d] PSR SU dirty rect at (%d, %d) size (%d, %d)\n", - old_plane_state->plane->base.id, - dirty_rects[i].x, dirty_rects[i].y, - dirty_rects[i].width, dirty_rects[i].height); - i += 1; + fill_dc_dirty_rect(new_plane_state->plane, &dirty_rects[i], + new_plane_state->crtc_x, + new_plane_state->crtc_y, + new_plane_state->crtc_w, + new_plane_state->crtc_h, &i, false); + + /* Add old plane bounding-box if plane is moved or resized */ + fill_dc_dirty_rect(new_plane_state->plane, &dirty_rects[i], + old_plane_state->crtc_x, + old_plane_state->crtc_y, + old_plane_state->crtc_w, + old_plane_state->crtc_h, &i, false); + } + + if (num_clips) { + for (; i < num_clips; clips++) + fill_dc_dirty_rect(new_plane_state->plane, + &dirty_rects[i], clips->x1, + clips->y1, clips->x2 - clips->x1, + clips->y2 - clips->y1, &i, false); + } else if (fb_changed && !bb_changed) { + fill_dc_dirty_rect(new_plane_state->plane, &dirty_rects[i], + new_plane_state->crtc_x, + new_plane_state->crtc_y, + new_plane_state->crtc_w, + new_plane_state->crtc_h, &i, false); } flip_addrs->dirty_rect_count = i; + return; + +ffu: + fill_dc_dirty_rect(new_plane_state->plane, &dirty_rects[0], 0, 0, + dm_crtc_state->base.mode.crtc_hdisplay, + dm_crtc_state->base.mode.crtc_vdisplay, + &flip_addrs->dirty_rect_count, true); } static void update_stream_scaling_settings(const struct drm_display_mode *mode, @@ -5004,7 +6463,7 @@ static enum dc_color_depth convert_color_depth_from_display_info(const struct drm_connector *connector, bool is_y420, int requested_bpc) { - uint8_t bpc; + u8 bpc; if (is_y420) { bpc = 8; @@ -5070,21 +6529,46 @@ get_aspect_ratio(const struct drm_display_mode *mode_in) } static enum dc_color_space -get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing) +get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing, + const struct drm_connector_state *connector_state) { enum dc_color_space color_space = COLOR_SPACE_SRGB; - switch (dc_crtc_timing->pixel_encoding) { - case PIXEL_ENCODING_YCBCR422: - case PIXEL_ENCODING_YCBCR444: - case PIXEL_ENCODING_YCBCR420: - { + switch (connector_state->colorspace) { + case DRM_MODE_COLORIMETRY_BT601_YCC: + if (dc_crtc_timing->flags.Y_ONLY) + color_space = COLOR_SPACE_YCBCR601_LIMITED; + else + color_space = COLOR_SPACE_YCBCR601; + break; + case DRM_MODE_COLORIMETRY_BT709_YCC: + if (dc_crtc_timing->flags.Y_ONLY) + color_space = COLOR_SPACE_YCBCR709_LIMITED; + else + color_space = COLOR_SPACE_YCBCR709; + break; + case DRM_MODE_COLORIMETRY_OPRGB: + color_space = COLOR_SPACE_ADOBERGB; + break; + case DRM_MODE_COLORIMETRY_BT2020_RGB: + case DRM_MODE_COLORIMETRY_BT2020_YCC: + if (dc_crtc_timing->pixel_encoding == PIXEL_ENCODING_RGB) + color_space = COLOR_SPACE_2020_RGB_FULLRANGE; + else + color_space = COLOR_SPACE_2020_YCBCR_LIMITED; + break; + case DRM_MODE_COLORIMETRY_DEFAULT: // ITU601 + default: + if (dc_crtc_timing->pixel_encoding == PIXEL_ENCODING_RGB) { + color_space = COLOR_SPACE_SRGB; + if (connector_state->hdmi.broadcast_rgb == DRM_HDMI_BROADCAST_RGB_LIMITED) + color_space = COLOR_SPACE_SRGB_LIMITED; /* * 27030khz is the separation point between HDTV and SDTV * according to HDMI spec, we use YCbCr709 and YCbCr601 * respectively */ - if (dc_crtc_timing->pix_clk_100hz > 270300) { + } else if (dc_crtc_timing->pix_clk_100hz > 270300) { if (dc_crtc_timing->flags.Y_ONLY) color_space = COLOR_SPACE_YCBCR709_LIMITED; @@ -5097,27 +6581,37 @@ get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing) else color_space = COLOR_SPACE_YCBCR601; } - - } - break; - case PIXEL_ENCODING_RGB: - color_space = COLOR_SPACE_SRGB; - break; - - default: - WARN_ON(1); break; } return color_space; } +static enum display_content_type +get_output_content_type(const struct drm_connector_state *connector_state) +{ + switch (connector_state->content_type) { + default: + case DRM_MODE_CONTENT_TYPE_NO_DATA: + return DISPLAY_CONTENT_TYPE_NO_DATA; + case DRM_MODE_CONTENT_TYPE_GRAPHICS: + return DISPLAY_CONTENT_TYPE_GRAPHICS; + case DRM_MODE_CONTENT_TYPE_PHOTO: + return DISPLAY_CONTENT_TYPE_PHOTO; + case DRM_MODE_CONTENT_TYPE_CINEMA: + return DISPLAY_CONTENT_TYPE_CINEMA; + case DRM_MODE_CONTENT_TYPE_GAME: + return DISPLAY_CONTENT_TYPE_GAME; + } +} + static bool adjust_colour_depth_from_display_info( struct dc_crtc_timing *timing_out, const struct drm_display_info *info) { enum dc_color_depth depth = timing_out->display_color_depth; int normalized_clk; + do { normalized_clk = timing_out->pix_clk_100hz / 10; /* YCbCr 4:2:0 requires additional adjustment of 1/2 */ @@ -5158,9 +6652,13 @@ static void fill_stream_properties_from_drm_display_mode( { struct dc_crtc_timing *timing_out = &stream->timing; const struct drm_display_info *info = &connector->display_info; - struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); + struct amdgpu_dm_connector *aconnector = NULL; struct hdmi_vendor_infoframe hv_frame; struct hdmi_avi_infoframe avi_frame; + ssize_t err; + + if (connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK) + aconnector = to_amdgpu_dm_connector(connector); memset(&hv_frame, 0, sizeof(hv_frame)); memset(&avi_frame, 0, sizeof(avi_frame)); @@ -5174,8 +6672,13 @@ static void fill_stream_properties_from_drm_display_mode( && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420; else if (drm_mode_is_420_also(info, mode_in) + && aconnector && aconnector->force_yuv420_output) timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420; + else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCBCR422) + && aconnector + && aconnector->force_yuv422_output) + timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR422; else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCBCR444) && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444; @@ -5203,13 +6706,23 @@ static void fill_stream_properties_from_drm_display_mode( } if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) { - drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in); + err = drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, + (struct drm_connector *)connector, + mode_in); + if (err < 0) + drm_warn_once(connector->dev, "Failed to setup avi infoframe on connector %s: %zd\n", + connector->name, err); timing_out->vic = avi_frame.video_code; - drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in); + err = drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, + (struct drm_connector *)connector, + mode_in); + if (err < 0) + drm_warn_once(connector->dev, "Failed to setup vendor infoframe on connector %s: %zd\n", + connector->name, err); timing_out->hdmi_vic = hv_frame.vic; } - if (is_freesync_video_mode(mode_in, aconnector)) { + if (aconnector && is_freesync_video_mode(mode_in, aconnector)) { timing_out->h_addressable = mode_in->hdisplay; timing_out->h_total = mode_in->htotal; timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start; @@ -5233,10 +6746,8 @@ static void fill_stream_properties_from_drm_display_mode( timing_out->aspect_ratio = get_aspect_ratio(mode_in); - stream->output_color_space = get_output_color_space(timing_out); - - stream->out_transfer_func->type = TF_TYPE_PREDEFINED; - stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB; + stream->out_transfer_func.type = TF_TYPE_PREDEFINED; + stream->out_transfer_func.tf = TRANSFER_FUNCTION_SRGB; if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) { if (!adjust_colour_depth_from_display_info(timing_out, info) && drm_mode_is_420_also(info, mode_in) && @@ -5245,6 +6756,9 @@ static void fill_stream_properties_from_drm_display_mode( adjust_colour_depth_from_display_info(timing_out, info); } } + + stream->output_color_space = get_output_color_space(timing_out, connector_state); + stream->content_type = get_output_content_type(connector_state); } static void fill_audio_info(struct audio_info *audio_info, @@ -5317,28 +6831,29 @@ decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode, const struct drm_display_mode *native_mode, bool scale_enabled) { - if (scale_enabled) { - copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode); - } else if (native_mode->clock == drm_mode->clock && - native_mode->htotal == drm_mode->htotal && - native_mode->vtotal == drm_mode->vtotal) { - copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode); + if (scale_enabled || ( + native_mode->clock == drm_mode->clock && + native_mode->htotal == drm_mode->htotal && + native_mode->vtotal == drm_mode->vtotal)) { + if (native_mode->crtc_clock) + copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode); } else { /* no scaling nor amdgpu inserted, no need to patch */ } } static struct dc_sink * -create_fake_sink(struct amdgpu_dm_connector *aconnector) +create_fake_sink(struct drm_device *dev, struct dc_link *link) { struct dc_sink_init_data sink_init_data = { 0 }; struct dc_sink *sink = NULL; - sink_init_data.link = aconnector->dc_link; - sink_init_data.sink_signal = aconnector->dc_link->connector_signal; + + sink_init_data.link = link; + sink_init_data.sink_signal = link->connector_signal; sink = dc_sink_create(&sink_init_data); if (!sink) { - DRM_ERROR("Failed to create sink!\n"); + drm_err(dev, "Failed to create sink!\n"); return NULL; } sink->sink_signal = SIGNAL_TYPE_VIRTUAL; @@ -5452,11 +6967,14 @@ get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector, &aconnector->base.probed_modes : &aconnector->base.modes; + if (aconnector->base.connector_type == DRM_MODE_CONNECTOR_WRITEBACK) + return NULL; + if (aconnector->freesync_vid_base.clock != 0) return &aconnector->freesync_vid_base; /* Find the preferred mode */ - list_for_each_entry (m, list_head, head) { + list_for_each_entry(m, list_head, head) { if (m->type & DRM_MODE_TYPE_PREFERRED) { m_pref = m; break; @@ -5468,7 +6986,7 @@ get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector, m_pref = list_first_entry_or_null( &aconnector->base.modes, struct drm_display_mode, head); if (!m_pref) { - DRM_DEBUG_DRIVER("No preferred mode found in EDID\n"); + drm_dbg_driver(aconnector->base.dev, "No preferred mode found in EDID\n"); return NULL; } } @@ -5480,7 +6998,7 @@ get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector, * For some monitors, preferred mode is not the mode with highest * supported refresh rate. */ - list_for_each_entry (m, list_head, head) { + list_for_each_entry(m, list_head, head) { current_refresh = drm_mode_vrefresh(m); if (m->hdisplay == m_pref->hdisplay && @@ -5522,7 +7040,7 @@ static bool is_freesync_video_mode(const struct drm_display_mode *mode, return true; } -#if defined(CONFIG_DRM_AMD_DC_DCN) +#if defined(CONFIG_DRM_AMD_DC_FP) static void update_dsc_caps(struct amdgpu_dm_connector *aconnector, struct dc_sink *sink, struct dc_stream_state *stream, struct dsc_dec_dpcd_caps *dsc_caps) @@ -5541,18 +7059,21 @@ static void update_dsc_caps(struct amdgpu_dm_connector *aconnector, } } - static void apply_dsc_policy_for_edp(struct amdgpu_dm_connector *aconnector, struct dc_sink *sink, struct dc_stream_state *stream, struct dsc_dec_dpcd_caps *dsc_caps, uint32_t max_dsc_target_bpp_limit_override) { const struct dc_link_settings *verified_link_cap = NULL; - uint32_t link_bw_in_kbps; - uint32_t edp_min_bpp_x16, edp_max_bpp_x16; + u32 link_bw_in_kbps; + u32 edp_min_bpp_x16, edp_max_bpp_x16; struct dc *dc = sink->ctx->dc; struct dc_dsc_bw_range bw_range = {0}; struct dc_dsc_config dsc_cfg = {0}; + struct dc_dsc_config_options dsc_options = {0}; + + dc_dsc_get_default_config_option(dc, &dsc_options); + dsc_options.max_target_bpp_limit_override_x16 = max_dsc_target_bpp_limit_override * 16; verified_link_cap = dc_link_get_link_cap(stream->link); link_bw_in_kbps = dc_link_bandwidth_kbps(stream->link, verified_link_cap); @@ -5570,15 +7091,16 @@ static void apply_dsc_policy_for_edp(struct amdgpu_dm_connector *aconnector, edp_min_bpp_x16, edp_max_bpp_x16, dsc_caps, &stream->timing, + dc_link_get_highest_encoding_format(aconnector->dc_link), &bw_range)) { if (bw_range.max_kbps < link_bw_in_kbps) { if (dc_dsc_compute_config(dc->res_pool->dscs[0], dsc_caps, - dc->debug.dsc_min_slice_height_override, - max_dsc_target_bpp_limit_override, + &dsc_options, 0, &stream->timing, + dc_link_get_highest_encoding_format(aconnector->dc_link), &dsc_cfg)) { stream->timing.dsc_cfg = dsc_cfg; stream->timing.flags.DSC = 1; @@ -5590,58 +7112,61 @@ static void apply_dsc_policy_for_edp(struct amdgpu_dm_connector *aconnector, if (dc_dsc_compute_config(dc->res_pool->dscs[0], dsc_caps, - dc->debug.dsc_min_slice_height_override, - max_dsc_target_bpp_limit_override, + &dsc_options, link_bw_in_kbps, &stream->timing, + dc_link_get_highest_encoding_format(aconnector->dc_link), &dsc_cfg)) { stream->timing.dsc_cfg = dsc_cfg; stream->timing.flags.DSC = 1; } } - static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector, struct dc_sink *sink, struct dc_stream_state *stream, struct dsc_dec_dpcd_caps *dsc_caps) { struct drm_connector *drm_connector = &aconnector->base; - uint32_t link_bandwidth_kbps; - uint32_t max_dsc_target_bpp_limit_override = 0; + u32 link_bandwidth_kbps; struct dc *dc = sink->ctx->dc; - uint32_t max_supported_bw_in_kbps, timing_bw_in_kbps; - uint32_t dsc_max_supported_bw_in_kbps; + u32 max_supported_bw_in_kbps, timing_bw_in_kbps; + u32 dsc_max_supported_bw_in_kbps; + u32 max_dsc_target_bpp_limit_override = + drm_connector->display_info.max_dsc_bpp; + struct dc_dsc_config_options dsc_options = {0}; + + dc_dsc_get_default_config_option(dc, &dsc_options); + dsc_options.max_target_bpp_limit_override_x16 = max_dsc_target_bpp_limit_override * 16; link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link, dc_link_get_link_cap(aconnector->dc_link)); - if (stream->link && stream->link->local_sink) - max_dsc_target_bpp_limit_override = - stream->link->local_sink->edid_caps.panel_patch.max_dsc_target_bpp_limit; /* Set DSC policy according to dsc_clock_en */ dc_dsc_policy_set_enable_dsc_when_not_needed( aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE); - if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_EDP && + if (sink->sink_signal == SIGNAL_TYPE_EDP && !aconnector->dc_link->panel_config.dsc.disable_dsc_edp && dc->caps.edp_dsc_support && aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE) { apply_dsc_policy_for_edp(aconnector, sink, stream, dsc_caps, max_dsc_target_bpp_limit_override); - } else if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) { + } else if (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) { if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE) { if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0], dsc_caps, - aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override, - max_dsc_target_bpp_limit_override, + &dsc_options, link_bandwidth_kbps, &stream->timing, + dc_link_get_highest_encoding_format(aconnector->dc_link), &stream->timing.dsc_cfg)) { stream->timing.flags.DSC = 1; - DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from SST RX\n", __func__, drm_connector->name); + drm_dbg_driver(drm_connector->dev, "%s: SST_DSC [%s] DSC is selected from SST RX\n", + __func__, drm_connector->name); } } else if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) { - timing_bw_in_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing); + timing_bw_in_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing, + dc_link_get_highest_encoding_format(aconnector->dc_link)); max_supported_bw_in_kbps = link_bandwidth_kbps; dsc_max_supported_bw_in_kbps = link_bandwidth_kbps; @@ -5650,13 +7175,13 @@ static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector, dsc_max_supported_bw_in_kbps > 0) if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0], dsc_caps, - aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override, - max_dsc_target_bpp_limit_override, + &dsc_options, dsc_max_supported_bw_in_kbps, &stream->timing, + dc_link_get_highest_encoding_format(aconnector->dc_link), &stream->timing.dsc_cfg)) { stream->timing.flags.DSC = 1; - DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from DP-HDMI PCON\n", + drm_dbg_driver(drm_connector->dev, "%s: SST_DSC [%s] DSC is selected from DP-HDMI PCON\n", __func__, drm_connector->name); } } @@ -5675,47 +7200,56 @@ static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector, if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel) stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel; } -#endif /* CONFIG_DRM_AMD_DC_DCN */ +#endif static struct dc_stream_state * -create_stream_for_sink(struct amdgpu_dm_connector *aconnector, +create_stream_for_sink(struct drm_connector *connector, const struct drm_display_mode *drm_mode, const struct dm_connector_state *dm_state, const struct dc_stream_state *old_stream, int requested_bpc) { + struct drm_device *dev = connector->dev; + struct amdgpu_dm_connector *aconnector = NULL; struct drm_display_mode *preferred_mode = NULL; - struct drm_connector *drm_connector; - const struct drm_connector_state *con_state = - dm_state ? &dm_state->base : NULL; + const struct drm_connector_state *con_state = &dm_state->base; struct dc_stream_state *stream = NULL; - struct drm_display_mode mode = *drm_mode; + struct drm_display_mode mode; struct drm_display_mode saved_mode; struct drm_display_mode *freesync_mode = NULL; bool native_mode_found = false; bool recalculate_timing = false; - bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false; + bool scale = dm_state->scaling != RMX_OFF; int mode_refresh; int preferred_refresh = 0; -#if defined(CONFIG_DRM_AMD_DC_DCN) + enum color_transfer_func tf = TRANSFER_FUNC_UNKNOWN; +#if defined(CONFIG_DRM_AMD_DC_FP) struct dsc_dec_dpcd_caps dsc_caps; #endif - + struct dc_link *link = NULL; struct dc_sink *sink = NULL; + drm_mode_init(&mode, drm_mode); memset(&saved_mode, 0, sizeof(saved_mode)); - if (aconnector == NULL) { - DRM_ERROR("aconnector is NULL!\n"); - return stream; - } + if (connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK) { + aconnector = NULL; + aconnector = to_amdgpu_dm_connector(connector); + link = aconnector->dc_link; + } else { + struct drm_writeback_connector *wbcon = NULL; + struct amdgpu_dm_wb_connector *dm_wbcon = NULL; - drm_connector = &aconnector->base; + wbcon = drm_connector_to_writeback(connector); + dm_wbcon = to_amdgpu_dm_wb_connector(wbcon); + link = dm_wbcon->link; + } - if (!aconnector->dc_sink) { - sink = create_fake_sink(aconnector); + if (!aconnector || !aconnector->dc_sink) { + sink = create_fake_sink(dev, link); if (!sink) return stream; + } else { sink = aconnector->dc_sink; dc_sink_retain(sink); @@ -5724,16 +7258,17 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector, stream = dc_create_stream_for_sink(sink); if (stream == NULL) { - DRM_ERROR("Failed to create stream for sink!\n"); + drm_err(dev, "Failed to create stream for sink!\n"); goto finish; } + /* We leave this NULL for writeback connectors */ stream->dm_stream_context = aconnector; stream->timing.flags.LTE_340MCSC_SCRAMBLE = - drm_connector->display_info.hdmi.scdc.scrambling.low_rates; + connector->display_info.hdmi.scdc.scrambling.low_rates; - list_for_each_entry(preferred_mode, &aconnector->base.modes, head) { + list_for_each_entry(preferred_mode, &connector->modes, head) { /* Search for preferred mode */ if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) { native_mode_found = true; @@ -5742,7 +7277,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector, } if (!native_mode_found) preferred_mode = list_first_entry_or_null( - &aconnector->base.modes, + &connector->modes, struct drm_display_mode, head); @@ -5753,15 +7288,18 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector, * This may not be an error, the use case is when we have no * usermode calls to reset and set mode upon hotplug. In this * case, we call set mode ourselves to restore the previous mode - * and the modelist may not be filled in in time. + * and the modelist may not be filled in time. */ - DRM_DEBUG_DRIVER("No preferred mode found\n"); - } else { - recalculate_timing = is_freesync_video_mode(&mode, aconnector); + drm_dbg_driver(dev, "No preferred mode found\n"); + } else if (aconnector) { + recalculate_timing = amdgpu_freesync_vid_mode && + is_freesync_video_mode(&mode, aconnector); if (recalculate_timing) { freesync_mode = get_highest_refresh_rate_mode(aconnector, false); drm_mode_copy(&saved_mode, &mode); + saved_mode.picture_aspect_ratio = mode.picture_aspect_ratio; drm_mode_copy(&mode, freesync_mode); + mode.picture_aspect_ratio = saved_mode.picture_aspect_ratio; } else { decide_crtc_timing_for_drm_display_mode( &mode, preferred_mode, scale); @@ -5772,23 +7310,33 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector, if (recalculate_timing) drm_mode_set_crtcinfo(&saved_mode, 0); - else if (!dm_state) - drm_mode_set_crtcinfo(&mode, 0); /* - * If scaling is enabled and refresh rate didn't change - * we copy the vic and polarities of the old timings - */ + * If scaling is enabled and refresh rate didn't change + * we copy the vic and polarities of the old timings + */ if (!scale || mode_refresh != preferred_refresh) fill_stream_properties_from_drm_display_mode( - stream, &mode, &aconnector->base, con_state, NULL, + stream, &mode, connector, con_state, NULL, requested_bpc); else fill_stream_properties_from_drm_display_mode( - stream, &mode, &aconnector->base, con_state, old_stream, + stream, &mode, connector, con_state, old_stream, requested_bpc); -#if defined(CONFIG_DRM_AMD_DC_DCN) + /* The rest isn't needed for writeback connectors */ + if (!aconnector) + goto finish; + + if (aconnector->timing_changed) { + drm_dbg(aconnector->base.dev, + "overriding timing for automated test, bpc %d, changing to %d\n", + stream->timing.display_color_depth, + aconnector->timing_requested->display_color_depth); + stream->timing = *aconnector->timing_requested; + } + +#if defined(CONFIG_DRM_AMD_DC_FP) /* SST DSC determination policy */ update_dsc_caps(aconnector, sink, stream, &dsc_caps); if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported) @@ -5799,7 +7347,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector, fill_audio_info( &stream->audio_info, - drm_connector, + connector, sink); update_stream_signal(stream, sink); @@ -5807,21 +7355,29 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector, if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket); - if (stream->link->psr_settings.psr_feature_enabled) { + if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT || + stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST || + stream->signal == SIGNAL_TYPE_EDP) { + const struct dc_edid_caps *edid_caps; + unsigned int disable_colorimetry = 0; + + if (aconnector->dc_sink) { + edid_caps = &aconnector->dc_sink->edid_caps; + disable_colorimetry = edid_caps->panel_patch.disable_colorimetry; + } + // // should decide stream support vsc sdp colorimetry capability // before building vsc info packet // - stream->use_vsc_sdp_for_colorimetry = false; - if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { - stream->use_vsc_sdp_for_colorimetry = - aconnector->dc_sink->is_vsc_sdp_colorimetry_supported; - } else { - if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED) - stream->use_vsc_sdp_for_colorimetry = true; - } - mod_build_vsc_infopacket(stream, &stream->vsc_infopacket, stream->output_color_space); - aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY; + stream->use_vsc_sdp_for_colorimetry = stream->link->dpcd_caps.dpcd_rev.raw >= 0x14 && + stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED && + !disable_colorimetry; + + if (stream->out_transfer_func.tf == TRANSFER_FUNCTION_GAMMA22) + tf = TRANSFER_FUNC_GAMMA_22; + mod_build_vsc_infopacket(stream, &stream->vsc_infopacket, stream->output_color_space, tf); + aconnector->sr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY; } finish: @@ -5830,29 +7386,117 @@ finish: return stream; } +/** + * amdgpu_dm_connector_poll - Poll a connector to see if it's connected to a display + * @aconnector: DM connector to poll (owns @base drm_connector and @dc_link) + * @force: if true, force polling even when DAC load detection was used + * + * Used for connectors that don't support HPD (hotplug detection) to + * periodically check whether the connector is connected to a display. + * + * When connection was determined via DAC load detection, we avoid + * re-running it on normal polls to prevent visible glitches, unless + * @force is set. + * + * Return: The probed connector status (connected/disconnected/unknown). + */ static enum drm_connector_status -amdgpu_dm_connector_detect(struct drm_connector *connector, bool force) +amdgpu_dm_connector_poll(struct amdgpu_dm_connector *aconnector, bool force) { - bool connected; - struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); + struct drm_connector *connector = &aconnector->base; + struct drm_device *dev = connector->dev; + struct amdgpu_device *adev = drm_to_adev(dev); + struct dc_link *link = aconnector->dc_link; + enum dc_connection_type conn_type = dc_connection_none; + enum drm_connector_status status = connector_status_disconnected; - /* - * Notes: - * 1. This interface is NOT called in context of HPD irq. - * 2. This interface *is called* in context of user-mode ioctl. Which - * makes it a bad place for *any* MST-related activity. + /* When we determined the connection using DAC load detection, + * do NOT poll the connector do detect disconnect because + * that would run DAC load detection again which can cause + * visible visual glitches. + * + * Only allow to poll such a connector again when forcing. */ + if (!force && link->local_sink && link->type == dc_connection_dac_load) + return connector->status; - if (aconnector->base.force == DRM_FORCE_UNSPECIFIED && - !aconnector->fake_enable) - connected = (aconnector->dc_sink != NULL); - else - connected = (aconnector->base.force == DRM_FORCE_ON || - aconnector->base.force == DRM_FORCE_ON_DIGITAL); + mutex_lock(&aconnector->hpd_lock); + + if (dc_link_detect_connection_type(aconnector->dc_link, &conn_type) && + conn_type != dc_connection_none) { + mutex_lock(&adev->dm.dc_lock); + + /* Only call full link detection when a sink isn't created yet, + * ie. just when the display is plugged in, otherwise we risk flickering. + */ + if (link->local_sink || + dc_link_detect(link, DETECT_REASON_HPD)) + status = connector_status_connected; + + mutex_unlock(&adev->dm.dc_lock); + } + + if (connector->status != status) { + if (status == connector_status_disconnected) { + if (link->local_sink) + dc_sink_release(link->local_sink); + + link->local_sink = NULL; + link->dpcd_sink_count = 0; + link->type = dc_connection_none; + } + + amdgpu_dm_update_connector_after_detect(aconnector); + } + + mutex_unlock(&aconnector->hpd_lock); + return status; +} + +/** + * amdgpu_dm_connector_detect() - Detect whether a DRM connector is connected to a display + * + * A connector is considered connected when it has a sink that is not NULL. + * For connectors that support HPD (hotplug detection), the connection is + * handled in the HPD interrupt. + * For connectors that may not support HPD, such as analog connectors, + * DRM will call this function repeatedly to poll them. + * + * Notes: + * 1. This interface is NOT called in context of HPD irq. + * 2. This interface *is called* in context of user-mode ioctl. Which + * makes it a bad place for *any* MST-related activity. + * + * @connector: The DRM connector we are checking. We convert it to + * amdgpu_dm_connector so we can read the DC link and state. + * @force: If true, do a full detect again. This is used even when + * a lighter check would normally be used to avoid flicker. + * + * Return: The connector status (connected, disconnected, or unknown). + * + */ +static enum drm_connector_status +amdgpu_dm_connector_detect(struct drm_connector *connector, bool force) +{ + struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); update_subconnector_property(aconnector); - return (connected ? connector_status_connected : + if (aconnector->base.force == DRM_FORCE_ON || + aconnector->base.force == DRM_FORCE_ON_DIGITAL) + return connector_status_connected; + else if (aconnector->base.force == DRM_FORCE_OFF) + return connector_status_disconnected; + + /* Poll analog connectors and only when either + * disconnected or connected to an analog display. + */ + if (drm_kms_helper_is_poll_worker() && + dc_connector_supports_analog(aconnector->dc_link->link_id.id) && + (!aconnector->dc_sink || aconnector->dc_sink->edid_caps.analog)) + return amdgpu_dm_connector_poll(aconnector, force); + + return (aconnector->dc_sink ? connector_status_connected : connector_status_disconnected); } @@ -5904,7 +7548,18 @@ int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector, dm_new_state->underscan_enable = val; ret = 0; } else if (property == adev->mode_info.abm_level_property) { - dm_new_state->abm_level = val; + switch (val) { + case ABM_SYSFS_CONTROL: + dm_new_state->abm_sysfs_forbidden = false; + break; + case ABM_LEVEL_OFF: + dm_new_state->abm_sysfs_forbidden = true; + dm_new_state->abm_level = ABM_LEVEL_IMMEDIATE_DISABLE; + break; + default: + dm_new_state->abm_sysfs_forbidden = true; + dm_new_state->abm_level = val; + } ret = 0; } @@ -5949,27 +7604,130 @@ int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector, *val = dm_state->underscan_enable; ret = 0; } else if (property == adev->mode_info.abm_level_property) { - *val = dm_state->abm_level; + if (!dm_state->abm_sysfs_forbidden) + *val = ABM_SYSFS_CONTROL; + else + *val = (dm_state->abm_level != ABM_LEVEL_IMMEDIATE_DISABLE) ? + dm_state->abm_level : 0; ret = 0; } return ret; } +/** + * DOC: panel power savings + * + * The display manager allows you to set your desired **panel power savings** + * level (between 0-4, with 0 representing off), e.g. using the following:: + * + * # echo 3 > /sys/class/drm/card0-eDP-1/amdgpu/panel_power_savings + * + * Modifying this value can have implications on color accuracy, so tread + * carefully. + */ + +static ssize_t panel_power_savings_show(struct device *device, + struct device_attribute *attr, + char *buf) +{ + struct drm_connector *connector = dev_get_drvdata(device); + struct drm_device *dev = connector->dev; + u8 val; + + drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); + val = to_dm_connector_state(connector->state)->abm_level == + ABM_LEVEL_IMMEDIATE_DISABLE ? 0 : + to_dm_connector_state(connector->state)->abm_level; + drm_modeset_unlock(&dev->mode_config.connection_mutex); + + return sysfs_emit(buf, "%u\n", val); +} + +static ssize_t panel_power_savings_store(struct device *device, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct drm_connector *connector = dev_get_drvdata(device); + struct drm_device *dev = connector->dev; + long val; + int ret; + + ret = kstrtol(buf, 0, &val); + + if (ret) + return ret; + + if (val < 0 || val > 4) + return -EINVAL; + + drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); + if (to_dm_connector_state(connector->state)->abm_sysfs_forbidden) + ret = -EBUSY; + else + to_dm_connector_state(connector->state)->abm_level = val ?: + ABM_LEVEL_IMMEDIATE_DISABLE; + drm_modeset_unlock(&dev->mode_config.connection_mutex); + + if (ret) + return ret; + + drm_kms_helper_hotplug_event(dev); + + return count; +} + +static DEVICE_ATTR_RW(panel_power_savings); + +static struct attribute *amdgpu_attrs[] = { + &dev_attr_panel_power_savings.attr, + NULL +}; + +static const struct attribute_group amdgpu_group = { + .name = "amdgpu", + .attrs = amdgpu_attrs +}; + +static bool +amdgpu_dm_should_create_sysfs(struct amdgpu_dm_connector *amdgpu_dm_connector) +{ + if (amdgpu_dm_abm_level >= 0) + return false; + + if (amdgpu_dm_connector->base.connector_type != DRM_MODE_CONNECTOR_eDP) + return false; + + /* check for OLED panels */ + if (amdgpu_dm_connector->bl_idx >= 0) { + struct drm_device *drm = amdgpu_dm_connector->base.dev; + struct amdgpu_display_manager *dm = &drm_to_adev(drm)->dm; + struct amdgpu_dm_backlight_caps *caps; + + caps = &dm->backlight_caps[amdgpu_dm_connector->bl_idx]; + if (caps->aux_support) + return false; + } + + return true; +} + static void amdgpu_dm_connector_unregister(struct drm_connector *connector) { struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector); + if (amdgpu_dm_should_create_sysfs(amdgpu_dm_connector)) + sysfs_remove_group(&connector->kdev->kobj, &amdgpu_group); + + cec_notifier_conn_unregister(amdgpu_dm_connector->notifier); drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux); } static void amdgpu_dm_connector_destroy(struct drm_connector *connector) { struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); - const struct dc_link *link = aconnector->dc_link; struct amdgpu_device *adev = drm_to_adev(connector->dev); struct amdgpu_display_manager *dm = &adev->dm; - int i; /* * Call only if mst_mgr was initialized before since it's not done @@ -5978,15 +7736,17 @@ static void amdgpu_dm_connector_destroy(struct drm_connector *connector) if (aconnector->mst_mgr.dev) drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr); -#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\ - defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE) - for (i = 0; i < dm->num_of_edps; i++) { - if ((link == dm->backlight_link[i]) && dm->backlight_dev[i]) { - backlight_device_unregister(dm->backlight_dev[i]); - dm->backlight_dev[i] = NULL; - } + /* Cancel and flush any pending HDMI HPD debounce work */ + cancel_delayed_work_sync(&aconnector->hdmi_hpd_debounce_work); + if (aconnector->hdmi_prev_sink) { + dc_sink_release(aconnector->hdmi_prev_sink); + aconnector->hdmi_prev_sink = NULL; + } + + if (aconnector->bl_idx != -1) { + backlight_device_unregister(dm->backlight_dev[aconnector->bl_idx]); + dm->backlight_dev[aconnector->bl_idx] = NULL; } -#endif if (aconnector->dc_em_sink) dc_sink_release(aconnector->dc_em_sink); @@ -5998,10 +7758,6 @@ static void amdgpu_dm_connector_destroy(struct drm_connector *connector) drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux); drm_connector_unregister(connector); drm_connector_cleanup(connector); - if (aconnector->i2c) { - i2c_del_adapter(&aconnector->i2c->base); - kfree(aconnector->i2c); - } kfree(aconnector->dm_dp_aux.aux.name); kfree(connector); @@ -6028,8 +7784,12 @@ void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector) state->vcpi_slots = 0; state->pbn = 0; - if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) - state->abm_level = amdgpu_dm_abm_level; + if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) { + if (amdgpu_dm_abm_level <= 0) + state->abm_level = ABM_LEVEL_IMMEDIATE_DISABLE; + else + state->abm_level = amdgpu_dm_abm_level; + } __drm_atomic_helper_connector_reset(connector, &state->base); } @@ -6067,6 +7827,15 @@ amdgpu_dm_connector_late_register(struct drm_connector *connector) to_amdgpu_dm_connector(connector); int r; + if (amdgpu_dm_should_create_sysfs(amdgpu_dm_connector)) { + r = sysfs_create_group(&connector->kdev->kobj, + &amdgpu_group); + if (r) + return r; + } + + amdgpu_dm_register_backlight_device(amdgpu_dm_connector); + if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) || (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) { amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev; @@ -6082,6 +7851,43 @@ amdgpu_dm_connector_late_register(struct drm_connector *connector) return 0; } +static void amdgpu_dm_connector_funcs_force(struct drm_connector *connector) +{ + struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); + struct dc_link *dc_link = aconnector->dc_link; + struct dc_sink *dc_em_sink = aconnector->dc_em_sink; + const struct drm_edid *drm_edid; + struct i2c_adapter *ddc; + struct drm_device *dev = connector->dev; + + if (dc_link && dc_link->aux_mode) + ddc = &aconnector->dm_dp_aux.aux.ddc; + else + ddc = &aconnector->i2c->base; + + drm_edid = drm_edid_read_ddc(connector, ddc); + drm_edid_connector_update(connector, drm_edid); + if (!drm_edid) { + drm_err(dev, "No EDID found on connector: %s.\n", connector->name); + return; + } + + aconnector->drm_edid = drm_edid; + /* Update emulated (virtual) sink's EDID */ + if (dc_em_sink && dc_link) { + // FIXME: Get rid of drm_edid_raw() + const struct edid *edid = drm_edid_raw(drm_edid); + + memset(&dc_em_sink->edid_caps, 0, sizeof(struct dc_edid_caps)); + memmove(dc_em_sink->dc_edid.raw_edid, edid, + (edid->extensions + 1) * EDID_LENGTH); + dm_helpers_parse_edid_caps( + dc_link, + &dc_em_sink->dc_edid, + &dc_em_sink->edid_caps); + } +} + static const struct drm_connector_funcs amdgpu_dm_connector_funcs = { .reset = amdgpu_dm_connector_funcs_reset, .detect = amdgpu_dm_connector_detect, @@ -6092,7 +7898,8 @@ static const struct drm_connector_funcs amdgpu_dm_connector_funcs = { .atomic_set_property = amdgpu_dm_connector_atomic_set_property, .atomic_get_property = amdgpu_dm_connector_atomic_get_property, .late_register = amdgpu_dm_connector_late_register, - .early_unregister = amdgpu_dm_connector_unregister + .early_unregister = amdgpu_dm_connector_unregister, + .force = amdgpu_dm_connector_funcs_force }; static int get_modes(struct drm_connector *connector) @@ -6102,25 +7909,34 @@ static int get_modes(struct drm_connector *connector) static void create_eml_sink(struct amdgpu_dm_connector *aconnector) { + struct drm_connector *connector = &aconnector->base; + struct dc_link *dc_link = aconnector->dc_link; struct dc_sink_init_data init_params = { .link = aconnector->dc_link, .sink_signal = SIGNAL_TYPE_VIRTUAL }; - struct edid *edid; + const struct drm_edid *drm_edid; + const struct edid *edid; + struct i2c_adapter *ddc; - if (!aconnector->base.edid_blob_ptr) { - DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n", - aconnector->base.name); + if (dc_link && dc_link->aux_mode) + ddc = &aconnector->dm_dp_aux.aux.ddc; + else + ddc = &aconnector->i2c->base; - aconnector->base.force = DRM_FORCE_OFF; - aconnector->base.override_edid = false; + drm_edid = drm_edid_read_ddc(connector, ddc); + drm_edid_connector_update(connector, drm_edid); + if (!drm_edid) { + drm_err(connector->dev, "No EDID found on connector: %s.\n", connector->name); return; } - edid = (struct edid *) aconnector->base.edid_blob_ptr->data; + if (connector->display_info.is_hdmi) + init_params.sink_signal = SIGNAL_TYPE_HDMI_TYPE_A; - aconnector->edid = edid; + aconnector->drm_edid = drm_edid; + edid = drm_edid_raw(drm_edid); // FIXME: Get rid of drm_edid_raw() aconnector->dc_em_sink = dc_link_add_remote_sink( aconnector->dc_link, (uint8_t *)edid, @@ -6131,7 +7947,8 @@ static void create_eml_sink(struct amdgpu_dm_connector *aconnector) aconnector->dc_sink = aconnector->dc_link->local_sink ? aconnector->dc_link->local_sink : aconnector->dc_em_sink; - dc_sink_retain(aconnector->dc_sink); + if (aconnector->dc_sink) + dc_sink_retain(aconnector->dc_sink); } } @@ -6148,43 +7965,125 @@ static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector) link->verified_link_cap.link_rate = LINK_RATE_HIGH2; } - - aconnector->base.override_edid = true; create_eml_sink(aconnector); } +static enum dc_status dm_validate_stream_and_context(struct dc *dc, + struct dc_stream_state *stream) +{ + enum dc_status dc_result = DC_ERROR_UNEXPECTED; + struct dc_plane_state *dc_plane_state = NULL; + struct dc_state *dc_state = NULL; + + if (!stream) + goto cleanup; + + dc_plane_state = dc_create_plane_state(dc); + if (!dc_plane_state) + goto cleanup; + + dc_state = dc_state_create(dc, NULL); + if (!dc_state) + goto cleanup; + + /* populate stream to plane */ + dc_plane_state->src_rect.height = stream->src.height; + dc_plane_state->src_rect.width = stream->src.width; + dc_plane_state->dst_rect.height = stream->src.height; + dc_plane_state->dst_rect.width = stream->src.width; + dc_plane_state->clip_rect.height = stream->src.height; + dc_plane_state->clip_rect.width = stream->src.width; + dc_plane_state->plane_size.surface_pitch = ((stream->src.width + 255) / 256) * 256; + dc_plane_state->plane_size.surface_size.height = stream->src.height; + dc_plane_state->plane_size.surface_size.width = stream->src.width; + dc_plane_state->plane_size.chroma_size.height = stream->src.height; + dc_plane_state->plane_size.chroma_size.width = stream->src.width; + dc_plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888; + dc_plane_state->tiling_info.gfx9.swizzle = DC_SW_UNKNOWN; + dc_plane_state->rotation = ROTATION_ANGLE_0; + dc_plane_state->is_tiling_rotated = false; + dc_plane_state->tiling_info.gfx8.array_mode = DC_ARRAY_LINEAR_GENERAL; + + dc_result = dc_validate_stream(dc, stream); + if (dc_result == DC_OK) + dc_result = dc_validate_plane(dc, dc_plane_state); + + if (dc_result == DC_OK) + dc_result = dc_state_add_stream(dc, dc_state, stream); + + if (dc_result == DC_OK && !dc_state_add_plane( + dc, + stream, + dc_plane_state, + dc_state)) + dc_result = DC_FAIL_ATTACH_SURFACES; + + if (dc_result == DC_OK) + dc_result = dc_validate_global_state(dc, dc_state, DC_VALIDATE_MODE_ONLY); + +cleanup: + if (dc_state) + dc_state_release(dc_state); + + if (dc_plane_state) + dc_plane_state_release(dc_plane_state); + + return dc_result; +} + struct dc_stream_state * -create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector, +create_validate_stream_for_sink(struct drm_connector *connector, const struct drm_display_mode *drm_mode, const struct dm_connector_state *dm_state, const struct dc_stream_state *old_stream) { - struct drm_connector *connector = &aconnector->base; + struct amdgpu_dm_connector *aconnector = NULL; struct amdgpu_device *adev = drm_to_adev(connector->dev); struct dc_stream_state *stream; const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL; int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8; enum dc_status dc_result = DC_OK; + uint8_t bpc_limit = 6; + + if (!dm_state) + return NULL; + + if (connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK) + aconnector = to_amdgpu_dm_connector(connector); + + if (aconnector && + (aconnector->dc_link->connector_signal == SIGNAL_TYPE_HDMI_TYPE_A || + aconnector->dc_link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER)) + bpc_limit = 8; do { - stream = create_stream_for_sink(aconnector, drm_mode, + drm_dbg_kms(connector->dev, "Trying with %d bpc\n", requested_bpc); + stream = create_stream_for_sink(connector, drm_mode, dm_state, old_stream, requested_bpc); if (stream == NULL) { - DRM_ERROR("Failed to create stream for sink!\n"); + drm_err(adev_to_drm(adev), "Failed to create stream for sink!\n"); break; } dc_result = dc_validate_stream(adev->dm.dc, stream); + + if (!aconnector) /* writeback connector */ + return stream; + if (dc_result == DC_OK && stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) dc_result = dm_dp_mst_is_port_support_mode(aconnector, stream); + if (dc_result == DC_OK) + dc_result = dm_validate_stream_and_context(adev->dm.dc, stream); + if (dc_result != DC_OK) { - DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n", + DRM_DEBUG_KMS("Pruned mode %d x %d (clk %d) %s %s -- %s\n", drm_mode->hdisplay, drm_mode->vdisplay, drm_mode->clock, - dc_result, + dc_pixel_encoding_to_str(stream->timing.pixel_encoding), + dc_color_depth_to_str(stream->timing.display_color_depth), dc_status_to_str(dc_result)); dc_stream_release(stream); @@ -6192,27 +8091,59 @@ create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector, requested_bpc -= 2; /* lower bpc to retry validation */ } - } while (stream == NULL && requested_bpc >= 6); + } while (stream == NULL && requested_bpc >= bpc_limit); - if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) { - DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n"); - - aconnector->force_yuv420_output = true; - stream = create_validate_stream_for_sink(aconnector, drm_mode, - dm_state, old_stream); + switch (dc_result) { + /* + * If we failed to validate DP bandwidth stream with the requested RGB color depth, + * we try to fallback and configure in order: + * YUV422 (8bpc, 6bpc) + * YUV420 (8bpc, 6bpc) + */ + case DC_FAIL_ENC_VALIDATE: + case DC_EXCEED_DONGLE_CAP: + case DC_NO_DP_LINK_BANDWIDTH: + /* recursively entered twice and already tried both YUV422 and YUV420 */ + if (aconnector->force_yuv422_output && aconnector->force_yuv420_output) + break; + /* first failure; try YUV422 */ + if (!aconnector->force_yuv422_output) { + drm_dbg_kms(connector->dev, "%s:%d Validation failed with %d, retrying w/ YUV422\n", + __func__, __LINE__, dc_result); + aconnector->force_yuv422_output = true; + /* recursively entered and YUV422 failed, try YUV420 */ + } else if (!aconnector->force_yuv420_output) { + drm_dbg_kms(connector->dev, "%s:%d Validation failed with %d, retrying w/ YUV420\n", + __func__, __LINE__, dc_result); + aconnector->force_yuv420_output = true; + } + stream = create_validate_stream_for_sink(connector, drm_mode, + dm_state, old_stream); + aconnector->force_yuv422_output = false; aconnector->force_yuv420_output = false; + break; + case DC_OK: + break; + default: + drm_dbg_kms(connector->dev, "%s:%d Unhandled validation failure %d\n", + __func__, __LINE__, dc_result); + break; } return stream; } enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector, - struct drm_display_mode *mode) + const struct drm_display_mode *mode) { int result = MODE_ERROR; struct dc_sink *dc_sink; + struct drm_display_mode *test_mode; /* TODO: Unhardcode stream count */ struct dc_stream_state *stream; + /* we always have an amdgpu_dm_connector here since we got + * here via the amdgpu_dm_connector_helper_funcs + */ struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); if ((mode->flags & DRM_MODE_FLAG_INTERLACE) || @@ -6231,11 +8162,20 @@ enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connec if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL && aconnector->base.force != DRM_FORCE_ON) { - DRM_ERROR("dc_sink is NULL!\n"); + drm_err(connector->dev, "dc_sink is NULL!\n"); goto fail; } - stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL); + test_mode = drm_mode_duplicate(connector->dev, mode); + if (!test_mode) + goto fail; + + drm_mode_set_crtcinfo(test_mode, 0); + + stream = create_validate_stream_for_sink(connector, test_mode, + to_dm_connector_state(connector->state), + NULL); + drm_mode_destroy(connector->dev, test_mode); if (stream) { dc_stream_release(stream); result = MODE_OK; @@ -6318,6 +8258,9 @@ amdgpu_dm_connector_atomic_check(struct drm_connector *conn, struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(conn); int ret; + if (WARN_ON(unlikely(!old_con_state || !new_con_state))) + return -EINVAL; + trace_amdgpu_dm_connector_atomic_check(new_con_state); if (conn->connector_type == DRM_MODE_CONNECTOR_DisplayPort) { @@ -6329,6 +8272,30 @@ amdgpu_dm_connector_atomic_check(struct drm_connector *conn, if (!crtc) return 0; + if (new_con_state->privacy_screen_sw_state != old_con_state->privacy_screen_sw_state) { + new_crtc_state = drm_atomic_get_crtc_state(state, crtc); + if (IS_ERR(new_crtc_state)) + return PTR_ERR(new_crtc_state); + + new_crtc_state->mode_changed = true; + } + + if (new_con_state->colorspace != old_con_state->colorspace) { + new_crtc_state = drm_atomic_get_crtc_state(state, crtc); + if (IS_ERR(new_crtc_state)) + return PTR_ERR(new_crtc_state); + + new_crtc_state->mode_changed = true; + } + + if (new_con_state->content_type != old_con_state->content_type) { + new_crtc_state = drm_atomic_get_crtc_state(state, crtc); + if (IS_ERR(new_crtc_state)) + return PTR_ERR(new_crtc_state); + + new_crtc_state->mode_changed = true; + } + if (!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state)) { struct dc_info_packet hdr_infopacket; @@ -6351,7 +8318,7 @@ amdgpu_dm_connector_atomic_check(struct drm_connector *conn, * set is permissible, however. So only force a * modeset if we're entering or exiting HDR. */ - new_crtc_state->mode_changed = + new_crtc_state->mode_changed = new_crtc_state->mode_changed || !old_con_state->hdr_output_metadata || !new_con_state->hdr_output_metadata; } @@ -6414,11 +8381,28 @@ static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder, int clock, bpp = 0; bool is_y420 = false; - if (!aconnector->port || !aconnector->dc_sink) + if ((connector->connector_type == DRM_MODE_CONNECTOR_eDP) || + (connector->connector_type == DRM_MODE_CONNECTOR_LVDS)) { + struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); + struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode; + enum drm_mode_status result; + + result = drm_crtc_helper_mode_valid_fixed(encoder->crtc, adjusted_mode, native_mode); + if (result != MODE_OK && dm_new_connector_state->scaling == RMX_OFF) { + drm_dbg_driver(encoder->dev, + "mode %dx%d@%dHz is not native, enabling scaling\n", + adjusted_mode->hdisplay, adjusted_mode->vdisplay, + drm_mode_vrefresh(adjusted_mode)); + dm_new_connector_state->scaling = RMX_ASPECT; + } return 0; + } - mst_port = aconnector->port; - mst_mgr = &aconnector->mst_port->mst_mgr; + if (!aconnector->mst_output_port) + return 0; + + mst_port = aconnector->mst_output_port; + mst_mgr = &aconnector->mst_root->mst_mgr; if (!crtc_state->connectors_changed && !crtc_state->mode_changed) return 0; @@ -6427,11 +8411,11 @@ static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder, if (IS_ERR(mst_state)) return PTR_ERR(mst_state); - if (!mst_state->pbn_div) - mst_state->pbn_div = dm_mst_get_pbn_divider(aconnector->mst_port->dc_link); + mst_state->pbn_div.full = dm_mst_get_pbn_divider(aconnector->mst_root->dc_link); if (!state->duplicated) { int max_bpc = conn_state->max_requested_bpc; + is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) && aconnector->force_yuv420_output; color_depth = convert_color_depth_from_display_info(connector, @@ -6439,7 +8423,7 @@ static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder, max_bpc); bpp = convert_dc_color_depth_into_bpc(color_depth) * 3; clock = adjusted_mode->clock; - dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false); + dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp << 4); } dm_new_connector_state->vcpi_slots = @@ -6457,7 +8441,6 @@ const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = { .atomic_check = dm_encoder_helper_atomic_check }; -#if defined(CONFIG_DRM_AMD_DC_DCN) static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state, struct dc_state *dc_state, struct dsc_mst_fairness_vars *vars) @@ -6467,14 +8450,17 @@ static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state, struct drm_connector_state *new_con_state; struct amdgpu_dm_connector *aconnector; struct dm_connector_state *dm_conn_state; - int i, j; - int vcpi, pbn_div, pbn, slot_num = 0; + int i, j, ret; + int vcpi, pbn_div, pbn = 0, slot_num = 0; for_each_new_connector_in_state(state, connector, new_con_state, i) { + if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) + continue; + aconnector = to_amdgpu_dm_connector(connector); - if (!aconnector->port) + if (!aconnector->mst_output_port) continue; if (!new_con_state || !new_con_state->crtc) @@ -6505,7 +8491,7 @@ static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state, } } - if (j == dc_state->stream_count) + if (j == dc_state->stream_count || pbn_div == 0) continue; slot_num = DIV_ROUND_UP(pbn, pbn_div); @@ -6514,12 +8500,15 @@ static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state, dm_conn_state->pbn = pbn; dm_conn_state->vcpi_slots = slot_num; - drm_dp_mst_atomic_enable_dsc(state, aconnector->port, dm_conn_state->pbn, - false); + ret = drm_dp_mst_atomic_enable_dsc(state, aconnector->mst_output_port, + dm_conn_state->pbn, false); + if (ret < 0) + return ret; + continue; } - vcpi = drm_dp_mst_atomic_enable_dsc(state, aconnector->port, pbn, true); + vcpi = drm_dp_mst_atomic_enable_dsc(state, aconnector->mst_output_port, pbn, true); if (vcpi < 0) return vcpi; @@ -6528,9 +8517,8 @@ static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state, } return 0; } -#endif -static int to_drm_connector_type(enum signal_type st) +static int to_drm_connector_type(enum signal_type st, uint32_t connector_id) { switch (st) { case SIGNAL_TYPE_HDMI_TYPE_A: @@ -6546,6 +8534,10 @@ static int to_drm_connector_type(enum signal_type st) return DRM_MODE_CONNECTOR_DisplayPort; case SIGNAL_TYPE_DVI_DUAL_LINK: case SIGNAL_TYPE_DVI_SINGLE_LINK: + if (connector_id == CONNECTOR_ID_SINGLE_LINK_DVII || + connector_id == CONNECTOR_ID_DUAL_LINK_DVII) + return DRM_MODE_CONNECTOR_DVII; + return DRM_MODE_CONNECTOR_DVID; case SIGNAL_TYPE_VIRTUAL: return DRM_MODE_CONNECTOR_VIRTUAL; @@ -6597,7 +8589,7 @@ static void amdgpu_dm_get_native_mode(struct drm_connector *connector) static struct drm_display_mode * amdgpu_dm_create_common_mode(struct drm_encoder *encoder, - char *name, + const char *name, int hdisplay, int vdisplay) { struct drm_device *dev = encoder->dev; @@ -6619,6 +8611,24 @@ amdgpu_dm_create_common_mode(struct drm_encoder *encoder, } +static const struct amdgpu_dm_mode_size { + char name[DRM_DISPLAY_MODE_LEN]; + int w; + int h; +} common_modes[] = { + { "640x480", 640, 480}, + { "800x600", 800, 600}, + { "1024x768", 1024, 768}, + { "1280x720", 1280, 720}, + { "1280x800", 1280, 800}, + {"1280x1024", 1280, 1024}, + { "1440x900", 1440, 900}, + {"1680x1050", 1680, 1050}, + {"1600x1200", 1600, 1200}, + {"1920x1080", 1920, 1080}, + {"1920x1200", 1920, 1200} +}; + static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder, struct drm_connector *connector) { @@ -6629,23 +8639,10 @@ static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder, to_amdgpu_dm_connector(connector); int i; int n; - struct mode_size { - char name[DRM_DISPLAY_MODE_LEN]; - int w; - int h; - } common_modes[] = { - { "640x480", 640, 480}, - { "800x600", 800, 600}, - { "1024x768", 1024, 768}, - { "1280x720", 1280, 720}, - { "1280x800", 1280, 800}, - {"1280x1024", 1280, 1024}, - { "1440x900", 1440, 900}, - {"1680x1050", 1680, 1050}, - {"1600x1200", 1600, 1200}, - {"1920x1080", 1920, 1080}, - {"1920x1200", 1920, 1200} - }; + + if ((connector->connector_type != DRM_MODE_CONNECTOR_eDP) && + (connector->connector_type != DRM_MODE_CONNECTOR_LVDS)) + return; n = ARRAY_SIZE(common_modes); @@ -6712,16 +8709,16 @@ static void amdgpu_set_panel_orientation(struct drm_connector *connector) } static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector, - struct edid *edid) + const struct drm_edid *drm_edid) { struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector); - if (edid) { + if (drm_edid) { /* empty probed_modes */ INIT_LIST_HEAD(&connector->probed_modes); amdgpu_dm_connector->num_modes = - drm_add_edid_modes(connector, edid); + drm_edid_connector_add_modes(connector); /* sorting the probed modes before calling function * amdgpu_dm_get_native_mode() since EDID can have @@ -6735,10 +8732,10 @@ static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector, amdgpu_dm_get_native_mode(connector); /* Freesync capabilities are reset by calling - * drm_add_edid_modes() and need to be + * drm_edid_connector_add_modes() and need to be * restored here. */ - amdgpu_dm_update_freesync_caps(connector, edid); + amdgpu_dm_update_freesync_caps(connector, drm_edid); } else { amdgpu_dm_connector->num_modes = 0; } @@ -6749,7 +8746,7 @@ static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector, { struct drm_display_mode *m; - list_for_each_entry (m, &aconnector->base.probed_modes, head) { + list_for_each_entry(m, &aconnector->base.probed_modes, head) { if (drm_mode_equal(m, mode)) return true; } @@ -6762,21 +8759,21 @@ static uint add_fs_modes(struct amdgpu_dm_connector *aconnector) const struct drm_display_mode *m; struct drm_display_mode *new_mode; uint i; - uint32_t new_modes_count = 0; + u32 new_modes_count = 0; /* Standard FPS values * * 23.976 - TV/NTSC - * 24 - Cinema - * 25 - TV/PAL + * 24 - Cinema + * 25 - TV/PAL * 29.97 - TV/NTSC - * 30 - TV/NTSC - * 48 - Cinema HFR - * 50 - TV/PAL - * 60 - Commonly used + * 30 - TV/NTSC + * 48 - Cinema HFR + * 50 - TV/PAL + * 60 - Commonly used * 48,72,96,120 - Multiples of 24 */ - static const uint32_t common_rates[] = { + static const u32 common_rates[] = { 23976, 24000, 25000, 29970, 30000, 48000, 50000, 60000, 72000, 96000, 120000 }; @@ -6792,8 +8789,8 @@ static uint add_fs_modes(struct amdgpu_dm_connector *aconnector) return 0; for (i = 0; i < ARRAY_SIZE(common_rates); i++) { - uint64_t target_vtotal, target_vtotal_diff; - uint64_t num, den; + u64 target_vtotal, target_vtotal_diff; + u64 num, den; if (drm_mode_vrefresh(m) * 1000 < common_rates[i]) continue; @@ -6834,12 +8831,22 @@ static uint add_fs_modes(struct amdgpu_dm_connector *aconnector) } static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector, - struct edid *edid) + const struct drm_edid *drm_edid) { struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector); - if (!edid) + if (!(amdgpu_freesync_vid_mode && drm_edid)) + return; + + if (!amdgpu_dm_connector->dc_sink || !amdgpu_dm_connector->dc_link) + return; + + if (!dc_supports_vrr(amdgpu_dm_connector->dc_sink->ctx->dce_version)) + return; + + if (dc_connector_supports_analog(amdgpu_dm_connector->dc_link->link_id.id) && + amdgpu_dm_connector->dc_sink->edid_caps.analog) return; if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10) @@ -6851,24 +8858,48 @@ static int amdgpu_dm_connector_get_modes(struct drm_connector *connector) { struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector); + struct dc_link *dc_link = amdgpu_dm_connector->dc_link; struct drm_encoder *encoder; - struct edid *edid = amdgpu_dm_connector->edid; + const struct drm_edid *drm_edid = amdgpu_dm_connector->drm_edid; + struct dc_link_settings *verified_link_cap = &dc_link->verified_link_cap; + const struct dc *dc = dc_link->dc; encoder = amdgpu_dm_connector_to_encoder(connector); - if (!drm_edid_is_valid(edid)) { + if (!drm_edid) { amdgpu_dm_connector->num_modes = drm_add_modes_noedid(connector, 640, 480); + if (dc->link_srv->dp_get_encoding_format(verified_link_cap) == DP_128b_132b_ENCODING) + amdgpu_dm_connector->num_modes += + drm_add_modes_noedid(connector, 1920, 1080); + + if (amdgpu_dm_connector->dc_sink && + amdgpu_dm_connector->dc_sink->edid_caps.analog && + dc_connector_supports_analog(dc_link->link_id.id)) { + /* Analog monitor connected by DAC load detection. + * Add common modes. It will be up to the user to select one that works. + */ + for (int i = 0; i < ARRAY_SIZE(common_modes); i++) + amdgpu_dm_connector->num_modes += drm_add_modes_noedid( + connector, common_modes[i].w, common_modes[i].h); + } } else { - amdgpu_dm_connector_ddc_get_modes(connector, edid); - amdgpu_dm_connector_add_common_modes(encoder, connector); - amdgpu_dm_connector_add_freesync_modes(connector, edid); + amdgpu_dm_connector_ddc_get_modes(connector, drm_edid); + if (encoder) + amdgpu_dm_connector_add_common_modes(encoder, connector); + amdgpu_dm_connector_add_freesync_modes(connector, drm_edid); } amdgpu_dm_fbc_init(connector); return amdgpu_dm_connector->num_modes; } +static const u32 supported_colorspaces = + BIT(DRM_MODE_COLORIMETRY_BT709_YCC) | + BIT(DRM_MODE_COLORIMETRY_OPRGB) | + BIT(DRM_MODE_COLORIMETRY_BT2020_RGB) | + BIT(DRM_MODE_COLORIMETRY_BT2020_YCC); + void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm, struct amdgpu_dm_connector *aconnector, int connector_type, @@ -6885,6 +8916,7 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm, aconnector->base.funcs->reset(&aconnector->base); aconnector->connector_id = link_index; + aconnector->bl_idx = -1; aconnector->dc_link = link; aconnector->base.interlace_allowed = false; aconnector->base.doublescan_allowed = false; @@ -6892,7 +8924,15 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm, aconnector->base.dpms = DRM_MODE_DPMS_OFF; aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */ aconnector->audio_inst = -1; + aconnector->pack_sdp_v1_3 = false; + aconnector->as_type = ADAPTIVE_SYNC_TYPE_NONE; + memset(&aconnector->vsdb_info, 0, sizeof(aconnector->vsdb_info)); mutex_init(&aconnector->hpd_lock); + mutex_init(&aconnector->handle_mst_msg_ready); + + aconnector->hdmi_hpd_debounce_delay_ms = AMDGPU_DM_HDMI_HPD_DEBOUNCE_MS; + INIT_DELAYED_WORK(&aconnector->hdmi_hpd_debounce_work, hdmi_hpd_debounce_work); + aconnector->hdmi_prev_sink = NULL; /* * configure support HPD hot plug connector_>polled default value is 0 @@ -6915,6 +8955,11 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm, case DRM_MODE_CONNECTOR_DVID: aconnector->base.polled = DRM_CONNECTOR_POLL_HPD; break; + case DRM_MODE_CONNECTOR_DVII: + case DRM_MODE_CONNECTOR_VGA: + aconnector->base.polled = + DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT; + break; default: break; } @@ -6923,6 +8968,10 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm, dm->ddev->mode_config.scaling_mode_property, DRM_MODE_SCALE_NONE); + if (connector_type == DRM_MODE_CONNECTOR_HDMIA + || (connector_type == DRM_MODE_CONNECTOR_DisplayPort && !aconnector->mst_root)) + drm_connector_attach_broadcast_rgb_property(&aconnector->base); + drm_object_attach_property(&aconnector->base.base, adev->mode_info.underscan_property, UNDERSCAN_OFF); @@ -6933,17 +8982,24 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm, adev->mode_info.underscan_vborder_property, 0); - if (!aconnector->mst_port) + if (!aconnector->mst_root) drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16); - /* This defaults to the max in the range, but we want 8bpc for non-edp. */ - aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8; + aconnector->base.state->max_bpc = 16; aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc; - if (connector_type == DRM_MODE_CONNECTOR_eDP && - (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) { - drm_object_attach_property(&aconnector->base.base, - adev->mode_info.abm_level_property, 0); + if (connector_type == DRM_MODE_CONNECTOR_HDMIA) { + /* Content Type is currently only implemented for HDMI. */ + drm_connector_attach_content_type_property(&aconnector->base); + } + + if (connector_type == DRM_MODE_CONNECTOR_HDMIA) { + if (!drm_mode_create_hdmi_colorspace_property(&aconnector->base, supported_colorspaces)) + drm_connector_attach_colorspace_property(&aconnector->base); + } else if ((connector_type == DRM_MODE_CONNECTOR_DisplayPort && !aconnector->mst_root) || + connector_type == DRM_MODE_CONNECTOR_eDP) { + if (!drm_mode_create_dp_colorspace_property(&aconnector->base, supported_colorspaces)) + drm_connector_attach_colorspace_property(&aconnector->base); } if (connector_type == DRM_MODE_CONNECTOR_HDMIA || @@ -6951,13 +9007,23 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm, connector_type == DRM_MODE_CONNECTOR_eDP) { drm_connector_attach_hdr_output_metadata_property(&aconnector->base); - if (!aconnector->mst_port) + if (!aconnector->mst_root) drm_connector_attach_vrr_capable_property(&aconnector->base); -#ifdef CONFIG_DRM_AMD_DC_HDCP if (adev->dm.hdcp_workqueue) drm_connector_attach_content_protection_property(&aconnector->base, true); -#endif + } + + if (connector_type == DRM_MODE_CONNECTOR_eDP) { + struct drm_privacy_screen *privacy_screen; + + privacy_screen = drm_privacy_screen_get(adev_to_drm(adev)->dev, NULL); + if (!IS_ERR(privacy_screen)) { + drm_connector_attach_privacy_screen_provider(&aconnector->base, + privacy_screen); + } else if (PTR_ERR(privacy_screen) != -ENODEV) { + drm_warn(adev_to_drm(adev), "Error getting privacy-screen\n"); + } } } @@ -6970,6 +9036,9 @@ static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap, int i; int result = -EIO; + if (!ddc_service->ddc_pin) + return result; + cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL); if (!cmd.payloads) @@ -6986,11 +9055,18 @@ static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap, cmd.payloads[i].data = msgs[i].buf; } - if (dc_submit_i2c( - ddc_service->ctx->dc, - ddc_service->link->link_index, - &cmd)) - result = num; + if (i2c->oem) { + if (dc_submit_i2c_oem( + ddc_service->ctx->dc, + &cmd)) + result = num; + } else { + if (dc_submit_i2c( + ddc_service->ctx->dc, + ddc_service->link->link_index, + &cmd)) + result = num; + } kfree(cmd.payloads); return result; @@ -7007,9 +9083,7 @@ static const struct i2c_algorithm amdgpu_dm_i2c_algo = { }; static struct amdgpu_i2c_adapter * -create_i2c(struct ddc_service *ddc_service, - int link_index, - int *res) +create_i2c(struct ddc_service *ddc_service, bool oem) { struct amdgpu_device *adev = ddc_service->ctx->driver_context; struct amdgpu_i2c_adapter *i2c; @@ -7018,16 +9092,41 @@ create_i2c(struct ddc_service *ddc_service, if (!i2c) return NULL; i2c->base.owner = THIS_MODULE; - i2c->base.class = I2C_CLASS_DDC; i2c->base.dev.parent = &adev->pdev->dev; i2c->base.algo = &amdgpu_dm_i2c_algo; - snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index); + if (oem) + snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c OEM bus"); + else + snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", + ddc_service->link->link_index); i2c_set_adapdata(&i2c->base, i2c); i2c->ddc_service = ddc_service; + i2c->oem = oem; return i2c; } +int amdgpu_dm_initialize_hdmi_connector(struct amdgpu_dm_connector *aconnector) +{ + struct cec_connector_info conn_info; + struct drm_device *ddev = aconnector->base.dev; + struct device *hdmi_dev = ddev->dev; + + if (amdgpu_dc_debug_mask & DC_DISABLE_HDMI_CEC) { + drm_info(ddev, "HDMI-CEC feature masked\n"); + return -EINVAL; + } + + cec_fill_conn_info_from_drm(&conn_info, &aconnector->base); + aconnector->notifier = + cec_notifier_conn_register(hdmi_dev, NULL, &conn_info); + if (!aconnector->notifier) { + drm_err(ddev, "Failed to create cec notifier\n"); + return -ENOMEM; + } + + return 0; +} /* * Note: this function assumes that dc_link_detect() was called for the @@ -7035,7 +9134,7 @@ create_i2c(struct ddc_service *ddc_service, */ static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm, struct amdgpu_dm_connector *aconnector, - uint32_t link_index, + u32 link_index, struct amdgpu_encoder *aencoder) { int res = 0; @@ -7044,25 +9143,25 @@ static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm, struct dc_link *link = dc_get_link_at_index(dc, link_index); struct amdgpu_i2c_adapter *i2c; + /* Not needed for writeback connector */ link->priv = aconnector; - DRM_DEBUG_DRIVER("%s()\n", __func__); - i2c = create_i2c(link->ddc, link->link_index, &res); + i2c = create_i2c(link->ddc, false); if (!i2c) { - DRM_ERROR("Failed to create i2c adapter data\n"); + drm_err(adev_to_drm(dm->adev), "Failed to create i2c adapter data\n"); return -ENOMEM; } aconnector->i2c = i2c; - res = i2c_add_adapter(&i2c->base); + res = devm_i2c_add_adapter(dm->adev->dev, &i2c->base); if (res) { - DRM_ERROR("Failed to register hw i2c %d\n", link->link_index); + drm_err(adev_to_drm(dm->adev), "Failed to register hw i2c %d\n", link->link_index); goto out_free; } - connector_type = to_drm_connector_type(link->connector_signal); + connector_type = to_drm_connector_type(link->connector_signal, link->link_id.id); res = drm_connector_init_with_ddc( dm->ddev, @@ -7072,7 +9171,7 @@ static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm, &i2c->base); if (res) { - DRM_ERROR("connector_init failed\n"); + drm_err(adev_to_drm(dm->adev), "connector_init failed\n"); aconnector->connector_id = -1; goto out_free; } @@ -7091,6 +9190,10 @@ static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm, drm_connector_attach_encoder( &aconnector->base, &aencoder->base); + if (connector_type == DRM_MODE_CONNECTOR_HDMIA || + connector_type == DRM_MODE_CONNECTOR_HDMIB) + amdgpu_dm_initialize_hdmi_connector(aconnector); + if (connector_type == DRM_MODE_CONNECTOR_DisplayPort || connector_type == DRM_MODE_CONNECTOR_eDP) amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index); @@ -7148,42 +9251,98 @@ static int amdgpu_dm_encoder_init(struct drm_device *dev, static void manage_dm_interrupts(struct amdgpu_device *adev, struct amdgpu_crtc *acrtc, - bool enable) -{ - /* - * We have no guarantee that the frontend index maps to the same + struct dm_crtc_state *acrtc_state) +{ /* + * We cannot be sure that the frontend index maps to the same * backend index - some even map to more than one. - * - * TODO: Use a different interrupt or check DC itself for the mapping. + * So we have to go through the CRTC to find the right IRQ. */ - int irq_type = - amdgpu_display_crtc_idx_to_irq_type( + int irq_type = amdgpu_display_crtc_idx_to_irq_type( adev, acrtc->crtc_id); + struct drm_device *dev = adev_to_drm(adev); - if (enable) { - drm_crtc_vblank_on(&acrtc->base); - amdgpu_irq_get( - adev, - &adev->pageflip_irq, - irq_type); + struct drm_vblank_crtc_config config = {0}; + struct dc_crtc_timing *timing; + int offdelay; + + if (acrtc_state) { + timing = &acrtc_state->stream->timing; + + /* + * Depending on when the HW latching event of double-buffered + * registers happen relative to the PSR SDP deadline, and how + * bad the Panel clock has drifted since the last ALPM off + * event, there can be up to 3 frames of delay between sending + * the PSR exit cmd to DMUB fw, and when the panel starts + * displaying live frames. + * + * We can set: + * + * 20/100 * offdelay_ms = 3_frames_ms + * => offdelay_ms = 5 * 3_frames_ms + * + * This ensures that `3_frames_ms` will only be experienced as a + * 20% delay on top how long the display has been static, and + * thus make the delay less perceivable. + */ + if (acrtc_state->stream->link->psr_settings.psr_version < + DC_PSR_VERSION_UNSUPPORTED) { + offdelay = DIV64_U64_ROUND_UP((u64)5 * 3 * 10 * + timing->v_total * + timing->h_total, + timing->pix_clk_100hz); + config.offdelay_ms = offdelay ?: 30; + } else if (amdgpu_ip_version(adev, DCE_HWIP, 0) < + IP_VERSION(3, 5, 0) || + !(adev->flags & AMD_IS_APU)) { + /* + * Older HW and DGPU have issues with instant off; + * use a 2 frame offdelay. + */ + offdelay = DIV64_U64_ROUND_UP((u64)20 * + timing->v_total * + timing->h_total, + timing->pix_clk_100hz); + + config.offdelay_ms = offdelay ?: 30; + } else { + /* offdelay_ms = 0 will never disable vblank */ + config.offdelay_ms = 1; + config.disable_immediate = true; + } + + drm_crtc_vblank_on_config(&acrtc->base, + &config); + /* Allow RX6xxx, RX7700, RX7800 GPUs to call amdgpu_irq_get.*/ + switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { + case IP_VERSION(3, 0, 0): + case IP_VERSION(3, 0, 2): + case IP_VERSION(3, 0, 3): + case IP_VERSION(3, 2, 0): + if (amdgpu_irq_get(adev, &adev->pageflip_irq, irq_type)) + drm_err(dev, "DM_IRQ: Cannot get pageflip irq!\n"); #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) - amdgpu_irq_get( - adev, - &adev->vline0_irq, - irq_type); + if (amdgpu_irq_get(adev, &adev->vline0_irq, irq_type)) + drm_err(dev, "DM_IRQ: Cannot get vline0 irq!\n"); #endif + } + } else { + /* Allow RX6xxx, RX7700, RX7800 GPUs to call amdgpu_irq_put.*/ + switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { + case IP_VERSION(3, 0, 0): + case IP_VERSION(3, 0, 2): + case IP_VERSION(3, 0, 3): + case IP_VERSION(3, 2, 0): #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) - amdgpu_irq_put( - adev, - &adev->vline0_irq, - irq_type); + if (amdgpu_irq_put(adev, &adev->vline0_irq, irq_type)) + drm_err(dev, "DM_IRQ: Cannot put vline0 irq!\n"); #endif - amdgpu_irq_put( - adev, - &adev->pageflip_irq, - irq_type); + if (amdgpu_irq_put(adev, &adev->pageflip_irq, irq_type)) + drm_err(dev, "DM_IRQ: Cannot put pageflip irq!\n"); + } + drm_crtc_vblank_off(&acrtc->base); } } @@ -7219,28 +9378,55 @@ is_scaling_state_different(const struct dm_connector_state *dm_state, return false; } -#ifdef CONFIG_DRM_AMD_DC_HDCP -static bool is_content_protection_different(struct drm_connector_state *state, - const struct drm_connector_state *old_state, - const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w) +static bool is_content_protection_different(struct drm_crtc_state *new_crtc_state, + struct drm_crtc_state *old_crtc_state, + struct drm_connector_state *new_conn_state, + struct drm_connector_state *old_conn_state, + const struct drm_connector *connector, + struct hdcp_workqueue *hdcp_w) { struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state); - /* Handle: Type0/1 change */ - if (old_state->hdcp_content_type != state->hdcp_content_type && - state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) { - state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED; + pr_debug("[HDCP_DM] connector->index: %x connect_status: %x dpms: %x\n", + connector->index, connector->status, connector->dpms); + pr_debug("[HDCP_DM] state protection old: %x new: %x\n", + old_conn_state->content_protection, new_conn_state->content_protection); + + if (old_crtc_state) + pr_debug("[HDCP_DM] old crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n", + old_crtc_state->enable, + old_crtc_state->active, + old_crtc_state->mode_changed, + old_crtc_state->active_changed, + old_crtc_state->connectors_changed); + + if (new_crtc_state) + pr_debug("[HDCP_DM] NEW crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n", + new_crtc_state->enable, + new_crtc_state->active, + new_crtc_state->mode_changed, + new_crtc_state->active_changed, + new_crtc_state->connectors_changed); + + /* hdcp content type change */ + if (old_conn_state->hdcp_content_type != new_conn_state->hdcp_content_type && + new_conn_state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) { + new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED; + pr_debug("[HDCP_DM] Type0/1 change %s :true\n", __func__); return true; } - /* CP is being re enabled, ignore this - * - * Handles: ENABLED -> DESIRED - */ - if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED && - state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) { - state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED; + /* CP is being re enabled, ignore this */ + if (old_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED && + new_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) { + if (new_crtc_state && new_crtc_state->mode_changed) { + new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED; + pr_debug("[HDCP_DM] ENABLED->DESIRED & mode_changed %s :true\n", __func__); + return true; + } + new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED; + pr_debug("[HDCP_DM] ENABLED -> DESIRED %s :false\n", __func__); return false; } @@ -7248,9 +9434,9 @@ static bool is_content_protection_different(struct drm_connector_state *state, * * Handles: UNDESIRED -> ENABLED */ - if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED && - state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) - state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED; + if (old_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED && + new_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) + new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED; /* Stream removed and re-enabled * @@ -7260,10 +9446,12 @@ static bool is_content_protection_different(struct drm_connector_state *state, * * Handles: DESIRED -> DESIRED (Special case) */ - if (!(old_state->crtc && old_state->crtc->enabled) && - state->crtc && state->crtc->enabled && + if (!(old_conn_state->crtc && old_conn_state->crtc->enabled) && + new_conn_state->crtc && new_conn_state->crtc->enabled && connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) { dm_con_state->update_hdcp = false; + pr_debug("[HDCP_DM] DESIRED->DESIRED (Stream removed and re-enabled) %s :true\n", + __func__); return true; } @@ -7275,35 +9463,41 @@ static bool is_content_protection_different(struct drm_connector_state *state, * * Handles: DESIRED -> DESIRED (Special case) */ - if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED && - connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) { + if (dm_con_state->update_hdcp && + new_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED && + connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) { dm_con_state->update_hdcp = false; + pr_debug("[HDCP_DM] DESIRED->DESIRED (Hot-plug, headless s3, dpms) %s :true\n", + __func__); return true; } - /* - * Handles: UNDESIRED -> UNDESIRED - * DESIRED -> DESIRED - * ENABLED -> ENABLED - */ - if (old_state->content_protection == state->content_protection) + if (old_conn_state->content_protection == new_conn_state->content_protection) { + if (new_conn_state->content_protection >= DRM_MODE_CONTENT_PROTECTION_DESIRED) { + if (new_crtc_state && new_crtc_state->mode_changed) { + pr_debug("[HDCP_DM] DESIRED->DESIRED or ENABLE->ENABLE mode_change %s :true\n", + __func__); + return true; + } + pr_debug("[HDCP_DM] DESIRED->DESIRED & ENABLE->ENABLE %s :false\n", + __func__); + return false; + } + + pr_debug("[HDCP_DM] UNDESIRED->UNDESIRED %s :false\n", __func__); return false; + } - /* - * Handles: UNDESIRED -> DESIRED - * DESIRED -> UNDESIRED - * ENABLED -> UNDESIRED - */ - if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED) + if (new_conn_state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED) { + pr_debug("[HDCP_DM] UNDESIRED->DESIRED or DESIRED->UNDESIRED or ENABLED->UNDESIRED %s :true\n", + __func__); return true; + } - /* - * Handles: DESIRED -> ENABLED - */ + pr_debug("[HDCP_DM] DESIRED->ENABLED %s :false\n", __func__); return false; } -#endif static void remove_stream(struct amdgpu_device *adev, struct amdgpu_crtc *acrtc, struct dc_stream_state *stream) @@ -7328,8 +9522,9 @@ static void prepare_flip_isr(struct amdgpu_crtc *acrtc) /* Mark this event as consumed */ acrtc->base.state->event = NULL; - DC_LOG_PFLIP("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n", - acrtc->crtc_id); + drm_dbg_state(acrtc->base.dev, + "crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n", + acrtc->crtc_id); } static void update_freesync_state_on_stream( @@ -7345,6 +9540,8 @@ static void update_freesync_state_on_stream( struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc); unsigned long flags; bool pack_sdp_v1_3 = false; + struct amdgpu_dm_connector *aconn; + enum vrr_packet_type packet_type = PACKET_TYPE_VRR; if (!new_stream) return; @@ -7358,7 +9555,7 @@ static void update_freesync_state_on_stream( return; spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); - vrr_params = acrtc->dm_irq_params.vrr_params; + vrr_params = acrtc->dm_irq_params.vrr_params; if (surface) { mod_freesync_handle_preflip( @@ -7369,7 +9566,7 @@ static void update_freesync_state_on_stream( &vrr_params); if (adev->family < AMDGPU_FAMILY_AI && - amdgpu_dm_vrr_active(new_crtc_state)) { + amdgpu_dm_crtc_vrr_active(new_crtc_state)) { mod_freesync_handle_v_update(dm->freesync_module, new_stream, &vrr_params); @@ -7380,11 +9577,27 @@ static void update_freesync_state_on_stream( } } + aconn = (struct amdgpu_dm_connector *)new_stream->dm_stream_context; + + if (aconn && (aconn->as_type == FREESYNC_TYPE_PCON_IN_WHITELIST || aconn->vsdb_info.replay_mode)) { + pack_sdp_v1_3 = aconn->pack_sdp_v1_3; + + if (aconn->vsdb_info.amd_vsdb_version == 1) + packet_type = PACKET_TYPE_FS_V1; + else if (aconn->vsdb_info.amd_vsdb_version == 2) + packet_type = PACKET_TYPE_FS_V2; + else if (aconn->vsdb_info.amd_vsdb_version == 3) + packet_type = PACKET_TYPE_FS_V3; + + mod_build_adaptive_sync_infopacket(new_stream, aconn->as_type, NULL, + &new_stream->adaptive_sync_infopacket); + } + mod_freesync_build_vrr_infopacket( dm->freesync_module, new_stream, &vrr_params, - PACKET_TYPE_VRR, + packet_type, TRANSFER_FUNC_UNKNOWN, &vrr_infopacket, pack_sdp_v1_3); @@ -7398,6 +9611,7 @@ static void update_freesync_state_on_stream( new_crtc_state->vrr_infopacket = vrr_infopacket; new_stream->vrr_infopacket = vrr_infopacket; + new_stream->allow_freesync = mod_freesync_get_freesync_enabled(&vrr_params); if (new_crtc_state->freesync_vrr_info_changed) DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d", @@ -7470,8 +9684,8 @@ static void update_stream_irq_parameters( static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state, struct dm_crtc_state *new_state) { - bool old_vrr_active = amdgpu_dm_vrr_active(old_state); - bool new_vrr_active = amdgpu_dm_vrr_active(new_state); + bool old_vrr_active = amdgpu_dm_crtc_vrr_active(old_state); + bool new_vrr_active = amdgpu_dm_crtc_vrr_active(new_state); if (!old_vrr_active && new_vrr_active) { /* Transition VRR inactive -> active: @@ -7482,17 +9696,17 @@ static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state, * We also need vupdate irq for the actual core vblank handling * at end of vblank. */ - WARN_ON(dm_set_vupdate_irq(new_state->base.crtc, true) != 0); + WARN_ON(amdgpu_dm_crtc_set_vupdate_irq(new_state->base.crtc, true) != 0); WARN_ON(drm_crtc_vblank_get(new_state->base.crtc) != 0); - DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n", + drm_dbg_driver(new_state->base.crtc->dev, "%s: crtc=%u VRR off->on: Get vblank ref\n", __func__, new_state->base.crtc->base.id); } else if (old_vrr_active && !new_vrr_active) { /* Transition VRR active -> inactive: * Allow vblank irq disable again for fixed refresh rate. */ - WARN_ON(dm_set_vupdate_irq(new_state->base.crtc, false) != 0); + WARN_ON(amdgpu_dm_crtc_set_vupdate_irq(new_state->base.crtc, false) != 0); drm_crtc_vblank_put(new_state->base.crtc); - DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n", + drm_dbg_driver(new_state->base.crtc->dev, "%s: crtc=%u VRR on->off: Drop vblank ref\n", __func__, new_state->base.crtc->base.id); } } @@ -7509,18 +9723,148 @@ static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state) */ for_each_old_plane_in_state(state, plane, old_plane_state, i) if (plane->type == DRM_PLANE_TYPE_CURSOR) - handle_cursor_update(plane, old_plane_state); + amdgpu_dm_plane_handle_cursor_update(plane, old_plane_state); +} + +static inline uint32_t get_mem_type(struct drm_framebuffer *fb) +{ + struct amdgpu_bo *abo = gem_to_amdgpu_bo(fb->obj[0]); + + return abo->tbo.resource ? abo->tbo.resource->mem_type : 0; +} + +static void amdgpu_dm_update_cursor(struct drm_plane *plane, + struct drm_plane_state *old_plane_state, + struct dc_stream_update *update) +{ + struct amdgpu_device *adev = drm_to_adev(plane->dev); + struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb); + struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc; + struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL; + struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); + uint64_t address = afb ? afb->address : 0; + struct dc_cursor_position position = {0}; + struct dc_cursor_attributes attributes; + int ret; + + if (!plane->state->fb && !old_plane_state->fb) + return; + + drm_dbg_atomic(plane->dev, "crtc_id=%d with size %d to %d\n", + amdgpu_crtc->crtc_id, plane->state->crtc_w, + plane->state->crtc_h); + + ret = amdgpu_dm_plane_get_cursor_position(plane, crtc, &position); + if (ret) + return; + + if (!position.enable) { + /* turn off cursor */ + if (crtc_state && crtc_state->stream) { + dc_stream_set_cursor_position(crtc_state->stream, + &position); + update->cursor_position = &crtc_state->stream->cursor_position; + } + return; + } + + amdgpu_crtc->cursor_width = plane->state->crtc_w; + amdgpu_crtc->cursor_height = plane->state->crtc_h; + + memset(&attributes, 0, sizeof(attributes)); + attributes.address.high_part = upper_32_bits(address); + attributes.address.low_part = lower_32_bits(address); + attributes.width = plane->state->crtc_w; + attributes.height = plane->state->crtc_h; + attributes.color_format = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA; + attributes.rotation_angle = 0; + attributes.attribute_flags.value = 0; + + /* Enable cursor degamma ROM on DCN3+ for implicit sRGB degamma in DRM + * legacy gamma setup. + */ + if (crtc_state->cm_is_degamma_srgb && + adev->dm.dc->caps.color.dpp.gamma_corr) + attributes.attribute_flags.bits.ENABLE_CURSOR_DEGAMMA = 1; + + if (afb) + attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0]; + + if (crtc_state->stream) { + if (!dc_stream_set_cursor_attributes(crtc_state->stream, + &attributes)) + drm_err(adev_to_drm(adev), "DC failed to set cursor attributes\n"); + + update->cursor_attributes = &crtc_state->stream->cursor_attributes; + + if (!dc_stream_set_cursor_position(crtc_state->stream, + &position)) + drm_err(adev_to_drm(adev), "DC failed to set cursor position\n"); + + update->cursor_position = &crtc_state->stream->cursor_position; + } +} + +static void amdgpu_dm_enable_self_refresh(struct amdgpu_crtc *acrtc_attach, + const struct dm_crtc_state *acrtc_state, + const u64 current_ts) +{ + struct psr_settings *psr = &acrtc_state->stream->link->psr_settings; + struct replay_settings *pr = &acrtc_state->stream->link->replay_settings; + struct amdgpu_dm_connector *aconn = + (struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context; + bool vrr_active = amdgpu_dm_crtc_vrr_active(acrtc_state); + + if (acrtc_state->update_type > UPDATE_TYPE_FAST) { + if (pr->config.replay_supported && !pr->replay_feature_enabled) + amdgpu_dm_link_setup_replay(acrtc_state->stream->link, aconn); + else if (psr->psr_version != DC_PSR_VERSION_UNSUPPORTED && + !psr->psr_feature_enabled) + if (!aconn->disallow_edp_enter_psr) + amdgpu_dm_link_setup_psr(acrtc_state->stream); + } + + /* Decrement skip count when SR is enabled and we're doing fast updates. */ + if (acrtc_state->update_type == UPDATE_TYPE_FAST && + (psr->psr_feature_enabled || pr->config.replay_supported)) { + if (aconn->sr_skip_count > 0) + aconn->sr_skip_count--; + + /* Allow SR when skip count is 0. */ + acrtc_attach->dm_irq_params.allow_sr_entry = !aconn->sr_skip_count; + + /* + * If sink supports PSR SU/Panel Replay, there is no need to rely on + * a vblank event disable request to enable PSR/RP. PSR SU/RP + * can be enabled immediately once OS demonstrates an + * adequate number of fast atomic commits to notify KMD + * of update events. See `vblank_control_worker()`. + */ + if (!vrr_active && + acrtc_attach->dm_irq_params.allow_sr_entry && +#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY + !amdgpu_dm_crc_window_is_activated(acrtc_state->base.crtc) && +#endif + (current_ts - psr->psr_dirty_rects_change_timestamp_ns) > 500000000) { + if (pr->replay_feature_enabled && !pr->replay_allow_active) + amdgpu_dm_replay_enable(acrtc_state->stream, true); + if (psr->psr_version == DC_PSR_VERSION_SU_1 && + !psr->psr_allow_active && !aconn->disallow_edp_enter_psr) + amdgpu_dm_psr_enable(acrtc_state->stream); + } + } else { + acrtc_attach->dm_irq_params.allow_sr_entry = false; + } } static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, - struct dc_state *dc_state, struct drm_device *dev, struct amdgpu_display_manager *dm, struct drm_crtc *pcrtc, bool wait_for_vblank) { - uint32_t i; - uint64_t timestamp_ns; + u32 i; + u64 timestamp_ns = ktime_get_ns(); struct drm_plane *plane; struct drm_plane_state *old_plane_state, *new_plane_state; struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc); @@ -7531,10 +9875,12 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc)); int planes_count = 0, vpos, hpos; unsigned long flags; - uint32_t target_vblank, last_flip_vblank; - bool vrr_active = amdgpu_dm_vrr_active(acrtc_state); + u32 target_vblank, last_flip_vblank; + bool vrr_active = amdgpu_dm_crtc_vrr_active(acrtc_state); bool cursor_update = false; bool pflip_present = false; + bool dirty_rects_changed = false; + bool updated_planes_and_streams = false; struct { struct dc_surface_update surface_updates[MAX_SURFACES]; struct dc_plane_info plane_infos[MAX_SURFACES]; @@ -7546,7 +9892,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, bundle = kzalloc(sizeof(*bundle), GFP_KERNEL); if (!bundle) { - dm_error("Failed to allocate update bundle\n"); + drm_err(dev, "Failed to allocate update bundle\n"); goto cleanup; } @@ -7554,8 +9900,24 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, * Disable the cursor first if we're disabling all the planes. * It'll remain on the screen after the planes are re-enabled * if we don't. + * + * If the cursor is transitioning from native to overlay mode, the + * native cursor needs to be disabled first. */ - if (acrtc_state->active_planes == 0) + if (acrtc_state->cursor_mode == DM_CURSOR_OVERLAY_MODE && + dm_old_crtc_state->cursor_mode == DM_CURSOR_NATIVE_MODE) { + struct dc_cursor_position cursor_position = {0}; + + if (!dc_stream_set_cursor_position(acrtc_state->stream, + &cursor_position)) + drm_err(dev, "DC failed to disable native cursor\n"); + + bundle->stream_update.cursor_position = + &acrtc_state->stream->cursor_position; + } + + if (acrtc_state->active_planes == 0 && + dm_old_crtc_state->cursor_mode == DM_CURSOR_NATIVE_MODE) amdgpu_dm_commit_cursors(state); /* update planes when needed */ @@ -7569,10 +9931,14 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state); /* Cursor plane is handled after stream updates */ - if (plane->type == DRM_PLANE_TYPE_CURSOR) { + if (plane->type == DRM_PLANE_TYPE_CURSOR && + acrtc_state->cursor_mode == DM_CURSOR_NATIVE_MODE) { if ((fb && crtc == pcrtc) || - (old_plane_state->fb && old_plane_state->crtc == pcrtc)) + (old_plane_state->fb && old_plane_state->crtc == pcrtc)) { cursor_update = true; + if (amdgpu_ip_version(dm->adev, DCE_HWIP, 0) != 0) + amdgpu_dm_update_cursor(plane, old_plane_state, &bundle->stream_update); + } continue; } @@ -7585,15 +9951,21 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, continue; dc_plane = dm_new_plane_state->dc_state; + if (!dc_plane) + continue; bundle->surface_updates[planes_count].surface = dc_plane; if (new_pcrtc_state->color_mgmt_changed) { - bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction; - bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func; + bundle->surface_updates[planes_count].gamma = &dc_plane->gamma_correction; + bundle->surface_updates[planes_count].in_transfer_func = &dc_plane->in_transfer_func; bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix; + bundle->surface_updates[planes_count].hdr_mult = dc_plane->hdr_mult; + bundle->surface_updates[planes_count].func_shaper = &dc_plane->in_shaper_func; + bundle->surface_updates[planes_count].lut3d_func = &dc_plane->lut3d_func; + bundle->surface_updates[planes_count].blend_tf = &dc_plane->blend_tf; } - fill_dc_scaling_info(dm->adev, new_plane_state, + amdgpu_dm_plane_fill_dc_scaling_info(dm->adev, new_plane_state, &bundle->scaling_infos[planes_count]); bundle->surface_updates[planes_count].scaling_info = @@ -7613,7 +9985,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, afb->tiling_flags, &bundle->plane_infos[planes_count], &bundle->flip_addrs[planes_count].address, - afb->tmz_surface, false); + afb->tmz_surface); drm_dbg_state(state->dev, "plane: id=%d dcc_en=%d\n", new_plane_state->plane->index, @@ -7622,17 +9994,55 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, bundle->surface_updates[planes_count].plane_info = &bundle->plane_infos[planes_count]; - fill_dc_dirty_rects(plane, old_plane_state, new_plane_state, - new_crtc_state, - &bundle->flip_addrs[planes_count]); + if (acrtc_state->stream->link->psr_settings.psr_feature_enabled || + acrtc_state->stream->link->replay_settings.replay_feature_enabled) { + fill_dc_dirty_rects(plane, old_plane_state, + new_plane_state, new_crtc_state, + &bundle->flip_addrs[planes_count], + acrtc_state->stream->link->psr_settings.psr_version == + DC_PSR_VERSION_SU_1, + &dirty_rects_changed); + + /* + * If the dirty regions changed, PSR-SU need to be disabled temporarily + * and enabled it again after dirty regions are stable to avoid video glitch. + * PSR-SU will be enabled in vblank_control_worker() if user pause the video + * during the PSR-SU was disabled. + */ + if (acrtc_state->stream->link->psr_settings.psr_version >= DC_PSR_VERSION_SU_1 && + acrtc_attach->dm_irq_params.allow_sr_entry && +#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY + !amdgpu_dm_crc_window_is_activated(acrtc_state->base.crtc) && +#endif + dirty_rects_changed) { + mutex_lock(&dm->dc_lock); + acrtc_state->stream->link->psr_settings.psr_dirty_rects_change_timestamp_ns = + timestamp_ns; + if (acrtc_state->stream->link->psr_settings.psr_allow_active) + amdgpu_dm_psr_disable(acrtc_state->stream, true); + mutex_unlock(&dm->dc_lock); + } + } /* * Only allow immediate flips for fast updates that don't - * change FB pitch, DCC state, rotation or mirroing. + * change memory domain, FB pitch, DCC state, rotation or + * mirroring. + * + * dm_crtc_helper_atomic_check() only accepts async flips with + * fast updates. */ + if (crtc->state->async_flip && + (acrtc_state->update_type != UPDATE_TYPE_FAST || + get_mem_type(old_plane_state->fb) != get_mem_type(fb))) + drm_warn_once(state->dev, + "[PLANE:%d:%s] async flip with non-fast update\n", + plane->base.id, plane->name); + bundle->flip_addrs[planes_count].flip_immediate = crtc->state->async_flip && - acrtc_state->update_type == UPDATE_TYPE_FAST; + acrtc_state->update_type == UPDATE_TYPE_FAST && + get_mem_type(old_plane_state->fb) == get_mem_type(fb); timestamp_ns = ktime_get_ns(); bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000); @@ -7640,7 +10050,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, bundle->surface_updates[planes_count].surface = dc_plane; if (!bundle->surface_updates[planes_count].surface) { - DRM_ERROR("No surface for CRTC: id=%d\n", + drm_err(dev, "No surface for CRTC: id=%d\n", acrtc_attach->crtc_id); continue; } @@ -7671,8 +10081,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, * DRI3/Present extension with defined target_msc. */ last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc); - } - else { + } else { /* For variable refresh rate mode only: * Get vblank of last completed flip to avoid > 1 vrr * flips per video frame by use of throttling, but allow @@ -7728,15 +10137,13 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, bundle->stream_update.vrr_infopacket = &acrtc_state->stream->vrr_infopacket; } - } else if (cursor_update && acrtc_state->active_planes > 0 && - acrtc_attach->base.state->event) { - drm_crtc_vblank_get(pcrtc); - + } else if (cursor_update && acrtc_state->active_planes > 0) { spin_lock_irqsave(&pcrtc->dev->event_lock, flags); - - acrtc_attach->event = acrtc_attach->base.state->event; - acrtc_attach->base.state->event = NULL; - + if (acrtc_attach->base.state->event) { + drm_crtc_vblank_get(pcrtc); + acrtc_attach->event = acrtc_attach->base.state->event; + acrtc_attach->base.state->event = NULL; + } spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags); } @@ -7766,13 +10173,26 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, bundle->stream_update.output_csc_transform = &acrtc_state->stream->csc_color_matrix; bundle->stream_update.out_transfer_func = - acrtc_state->stream->out_transfer_func; + &acrtc_state->stream->out_transfer_func; + bundle->stream_update.lut3d_func = + (struct dc_3dlut *) acrtc_state->stream->lut3d_func; + bundle->stream_update.func_shaper = + (struct dc_transfer_func *) acrtc_state->stream->func_shaper; } acrtc_state->stream->abm_level = acrtc_state->abm_level; if (acrtc_state->abm_level != dm_old_crtc_state->abm_level) bundle->stream_update.abm_level = &acrtc_state->abm_level; + mutex_lock(&dm->dc_lock); + if ((acrtc_state->update_type > UPDATE_TYPE_FAST) || vrr_active) { + if (acrtc_state->stream->link->replay_settings.replay_allow_active) + amdgpu_dm_replay_disable(acrtc_state->stream); + if (acrtc_state->stream->link->psr_settings.psr_allow_active) + amdgpu_dm_psr_disable(acrtc_state->stream, true); + } + mutex_unlock(&dm->dc_lock); + /* * If FreeSync state on the stream has changed then we need to * re-adjust the min/max bounds now that DC doesn't handle this @@ -7786,16 +10206,13 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags); } mutex_lock(&dm->dc_lock); - if ((acrtc_state->update_type > UPDATE_TYPE_FAST) && - acrtc_state->stream->link->psr_settings.psr_allow_active) - amdgpu_dm_psr_disable(acrtc_state->stream); - - dc_commit_updates_for_stream(dm->dc, - bundle->surface_updates, - planes_count, - acrtc_state->stream, - &bundle->stream_update, - dc_state); + update_planes_and_stream_adapter(dm->dc, + acrtc_state->update_type, + planes_count, + acrtc_state->stream, + &bundle->stream_update, + bundle->surface_updates); + updated_planes_and_streams = true; /** * Enable or disable the interrupts on the backend. @@ -7814,38 +10231,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, dm_update_pflip_irq_state(drm_to_adev(dev), acrtc_attach); - if ((acrtc_state->update_type > UPDATE_TYPE_FAST) && - acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED && - !acrtc_state->stream->link->psr_settings.psr_feature_enabled) - amdgpu_dm_link_setup_psr(acrtc_state->stream); - - /* Decrement skip count when PSR is enabled and we're doing fast updates. */ - if (acrtc_state->update_type == UPDATE_TYPE_FAST && - acrtc_state->stream->link->psr_settings.psr_feature_enabled) { - struct amdgpu_dm_connector *aconn = - (struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context; - - if (aconn->psr_skip_count > 0) - aconn->psr_skip_count--; - - /* Allow PSR when skip count is 0. */ - acrtc_attach->dm_irq_params.allow_psr_entry = !aconn->psr_skip_count; - - /* - * If sink supports PSR SU, there is no need to rely on - * a vblank event disable request to enable PSR. PSR SU - * can be enabled immediately once OS demonstrates an - * adequate number of fast atomic commits to notify KMD - * of update events. See `vblank_control_worker()`. - */ - if (acrtc_state->stream->link->psr_settings.psr_version >= DC_PSR_VERSION_SU_1 && - acrtc_attach->dm_irq_params.allow_psr_entry && - !acrtc_state->stream->link->psr_settings.psr_allow_active) - amdgpu_dm_psr_enable(acrtc_state->stream); - } else { - acrtc_attach->dm_irq_params.allow_psr_entry = false; - } - + amdgpu_dm_enable_self_refresh(acrtc_attach, acrtc_state, timestamp_ns); mutex_unlock(&dm->dc_lock); } @@ -7854,7 +10240,9 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, * This avoids redundant programming in the case where we're going * to be disabling a single plane - those pipes are being disabled. */ - if (acrtc_state->active_planes) + if (acrtc_state->active_planes && + (!updated_planes_and_streams || amdgpu_ip_version(dm->adev, DCE_HWIP, 0) == 0) && + acrtc_state->cursor_mode == DM_CURSOR_NATIVE_MODE) amdgpu_dm_commit_cursors(state); cleanup: @@ -7892,7 +10280,10 @@ static void amdgpu_dm_commit_audio(struct drm_device *dev, if (!drm_atomic_crtc_needs_modeset(new_crtc_state)) continue; - notify: +notify: + if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) + continue; + aconnector = to_amdgpu_dm_connector(connector); mutex_lock(&adev->dm.audio_lock); @@ -7925,6 +10316,9 @@ static void amdgpu_dm_commit_audio(struct drm_device *dev, if (!status) continue; + if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) + continue; + aconnector = to_amdgpu_dm_connector(connector); mutex_lock(&adev->dm.audio_lock); @@ -7950,55 +10344,57 @@ static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_stat stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state); } -/** - * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation. - * @state: The atomic state to commit - * - * This will tell DC to commit the constructed DC state from atomic_check, - * programming the hardware. Any failures here implies a hardware failure, since - * atomic check should have filtered anything non-kosher. - */ -static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) +static void dm_clear_writeback(struct amdgpu_display_manager *dm, + struct dm_crtc_state *crtc_state) +{ + dc_stream_remove_writeback(dm->dc, crtc_state->stream, 0); +} + +static void amdgpu_dm_commit_streams(struct drm_atomic_state *state, + struct dc_state *dc_state) { struct drm_device *dev = state->dev; struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_display_manager *dm = &adev->dm; - struct dm_atomic_state *dm_state; - struct dc_state *dc_state = NULL, *dc_state_temp = NULL; - uint32_t i, j; struct drm_crtc *crtc; struct drm_crtc_state *old_crtc_state, *new_crtc_state; - unsigned long flags; - bool wait_for_vblank = true; - struct drm_connector *connector; - struct drm_connector_state *old_con_state, *new_con_state; struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state; - int crtc_disable_count = 0; + struct drm_connector_state *old_con_state; + struct drm_connector *connector; bool mode_set_reset_required = false; - int r; + u32 i; + struct dc_commit_streams_params params = {dc_state->streams, dc_state->stream_count}; + bool set_backlight_level = false; - trace_amdgpu_dm_atomic_commit_tail_begin(state); + /* Disable writeback */ + for_each_old_connector_in_state(state, connector, old_con_state, i) { + struct dm_connector_state *dm_old_con_state; + struct amdgpu_crtc *acrtc; - r = drm_atomic_helper_wait_for_fences(dev, state, false); - if (unlikely(r)) - DRM_ERROR("Waiting for fences timed out!"); + if (connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK) + continue; - drm_atomic_helper_update_legacy_modeset_state(dev, state); - drm_dp_mst_atomic_wait_for_dependencies(state); + old_crtc_state = NULL; - dm_state = dm_atomic_get_new_state(state); - if (dm_state && dm_state->context) { - dc_state = dm_state->context; - } else { - /* No state changes, retain current state. */ - dc_state_temp = dc_create_state(dm->dc); - ASSERT(dc_state_temp); - dc_state = dc_state_temp; - dc_resource_state_copy_construct_current(dm->dc, dc_state); + dm_old_con_state = to_dm_connector_state(old_con_state); + if (!dm_old_con_state->base.crtc) + continue; + + acrtc = to_amdgpu_crtc(dm_old_con_state->base.crtc); + if (acrtc) + old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base); + + if (!acrtc || !acrtc->wb_enabled) + continue; + + dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); + + dm_clear_writeback(dm, dm_old_crtc_state); + acrtc->wb_enabled = false; } - for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state, - new_crtc_state, i) { + for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, + new_crtc_state, i) { struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); @@ -8006,7 +10402,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) if (old_crtc_state->active && (!new_crtc_state->active || drm_atomic_crtc_needs_modeset(new_crtc_state))) { - manage_dm_interrupts(adev, acrtc, false); + manage_dm_interrupts(adev, acrtc, NULL); dc_stream_release(dm_old_crtc_state->stream); } } @@ -8021,9 +10417,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); drm_dbg_state(state->dev, - "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, " - "planes_changed:%d, mode_changed:%d,active_changed:%d," - "connectors_changed:%d\n", + "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, planes_changed:%d, mode_changed:%d,active_changed:%d,connectors_changed:%d\n", acrtc->crtc_id, new_crtc_state->enable, new_crtc_state->active, @@ -8038,7 +10432,8 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) memset(&position, 0, sizeof(position)); mutex_lock(&dm->dc_lock); - dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position); + dc_exit_ips_for_hw_access(dm->dc); + dc_stream_program_cursor_position(dm_old_crtc_state->stream, &position); mutex_unlock(&dm->dc_lock); } @@ -8052,9 +10447,11 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) * aconnector as needed */ - if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) { + if (amdgpu_dm_crtc_modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) { - DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc); + drm_dbg_atomic(dev, + "Atomic commit: SET crtc id %d: [%p]\n", + acrtc->crtc_id, acrtc); if (!dm_new_crtc_state->stream) { /* @@ -8072,8 +10469,9 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) * have a sink to keep the pipe running so that * hw state is consistent with the sw state */ - DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n", - __func__, acrtc->base.base.id); + drm_dbg_atomic(dev, + "Failed to create new stream for crtc %d\n", + acrtc->base.base.id); continue; } @@ -8086,8 +10484,11 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) acrtc->hw_mode = new_crtc_state->mode; crtc->hwmode = new_crtc_state->mode; mode_set_reset_required = true; + set_backlight_level = true; } else if (modereset_required(new_crtc_state)) { - DRM_DEBUG_ATOMIC("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc); + drm_dbg_atomic(dev, + "Atomic commit: RESET. crtc id %d:[%p]\n", + acrtc->crtc_id, acrtc); /* i.e. reset mode */ if (dm_old_crtc_state->stream) remove_stream(adev, acrtc, dm_old_crtc_state->stream); @@ -8096,24 +10497,24 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) } } /* for_each_crtc_in_state() */ - if (dc_state) { - /* if there mode set or reset, disable eDP PSR */ - if (mode_set_reset_required) { - if (dm->vblank_control_workqueue) - flush_workqueue(dm->vblank_control_workqueue); + /* if there mode set or reset, disable eDP PSR, Replay */ + if (mode_set_reset_required) { + if (dm->vblank_control_workqueue) + flush_workqueue(dm->vblank_control_workqueue); - amdgpu_dm_psr_disable_all(dm); - } + amdgpu_dm_replay_disable_all(dm); + amdgpu_dm_psr_disable_all(dm); + } - dm_enable_per_frame_crtc_master_sync(dc_state); - mutex_lock(&dm->dc_lock); - WARN_ON(!dc_commit_state(dm->dc, dc_state)); + dm_enable_per_frame_crtc_master_sync(dc_state); + mutex_lock(&dm->dc_lock); + dc_exit_ips_for_hw_access(dm->dc); + WARN_ON(!dc_commit_streams(dm->dc, ¶ms)); - /* Allow idle optimization when vblank count is 0 for display off */ - if (dm->active_vblank_irq_count == 0) - dc_allow_idle_optimizations(dm->dc, true); - mutex_unlock(&dm->dc_lock); - } + /* Allow idle optimization when vblank count is 0 for display off */ + if ((dm->active_vblank_irq_count == 0) && amdgpu_dm_is_headless(dm->adev)) + dc_allow_idle_optimizations(dm->dc, true); + mutex_unlock(&dm->dc_lock); for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); @@ -8125,24 +10526,192 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) dc_stream_get_status(dm_new_crtc_state->stream); if (!status) - status = dc_stream_get_status_from_state(dc_state, + status = dc_state_get_stream_status(dc_state, dm_new_crtc_state->stream); if (!status) - DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc); + drm_err(dev, + "got no status for stream %p on acrtc%p\n", + dm_new_crtc_state->stream, acrtc); else acrtc->otg_inst = status->primary_otg_inst; } } -#ifdef CONFIG_DRM_AMD_DC_HDCP + + /* During boot up and resume the DC layer will reset the panel brightness + * to fix a flicker issue. + * It will cause the dm->actual_brightness is not the current panel brightness + * level. (the dm->brightness is the correct panel level) + * So we set the backlight level with dm->brightness value after set mode + */ + if (set_backlight_level) { + for (i = 0; i < dm->num_of_edps; i++) { + if (dm->backlight_dev[i]) + amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]); + } + } +} + +static void dm_set_writeback(struct amdgpu_display_manager *dm, + struct dm_crtc_state *crtc_state, + struct drm_connector *connector, + struct drm_connector_state *new_con_state) +{ + struct drm_writeback_connector *wb_conn = drm_connector_to_writeback(connector); + struct amdgpu_device *adev = dm->adev; + struct amdgpu_crtc *acrtc; + struct dc_writeback_info *wb_info; + struct pipe_ctx *pipe = NULL; + struct amdgpu_framebuffer *afb; + int i = 0; + + wb_info = kzalloc(sizeof(*wb_info), GFP_KERNEL); + if (!wb_info) { + drm_err(adev_to_drm(adev), "Failed to allocate wb_info\n"); + return; + } + + acrtc = to_amdgpu_crtc(wb_conn->encoder.crtc); + if (!acrtc) { + drm_err(adev_to_drm(adev), "no amdgpu_crtc found\n"); + kfree(wb_info); + return; + } + + afb = to_amdgpu_framebuffer(new_con_state->writeback_job->fb); + if (!afb) { + drm_err(adev_to_drm(adev), "No amdgpu_framebuffer found\n"); + kfree(wb_info); + return; + } + + for (i = 0; i < MAX_PIPES; i++) { + if (dm->dc->current_state->res_ctx.pipe_ctx[i].stream == crtc_state->stream) { + pipe = &dm->dc->current_state->res_ctx.pipe_ctx[i]; + break; + } + } + + /* fill in wb_info */ + wb_info->wb_enabled = true; + + wb_info->dwb_pipe_inst = 0; + wb_info->dwb_params.dwbscl_black_color = 0; + wb_info->dwb_params.hdr_mult = 0x1F000; + wb_info->dwb_params.csc_params.gamut_adjust_type = CM_GAMUT_ADJUST_TYPE_BYPASS; + wb_info->dwb_params.csc_params.gamut_coef_format = CM_GAMUT_REMAP_COEF_FORMAT_S2_13; + wb_info->dwb_params.output_depth = DWB_OUTPUT_PIXEL_DEPTH_10BPC; + wb_info->dwb_params.cnv_params.cnv_out_bpc = DWB_CNV_OUT_BPC_10BPC; + + /* width & height from crtc */ + wb_info->dwb_params.cnv_params.src_width = acrtc->base.mode.crtc_hdisplay; + wb_info->dwb_params.cnv_params.src_height = acrtc->base.mode.crtc_vdisplay; + wb_info->dwb_params.dest_width = acrtc->base.mode.crtc_hdisplay; + wb_info->dwb_params.dest_height = acrtc->base.mode.crtc_vdisplay; + + wb_info->dwb_params.cnv_params.crop_en = false; + wb_info->dwb_params.stereo_params.stereo_enabled = false; + + wb_info->dwb_params.cnv_params.out_max_pix_val = 0x3ff; // 10 bits + wb_info->dwb_params.cnv_params.out_min_pix_val = 0; + wb_info->dwb_params.cnv_params.fc_out_format = DWB_OUT_FORMAT_32BPP_ARGB; + wb_info->dwb_params.cnv_params.out_denorm_mode = DWB_OUT_DENORM_BYPASS; + + wb_info->dwb_params.out_format = dwb_scaler_mode_bypass444; + + wb_info->dwb_params.capture_rate = dwb_capture_rate_0; + + wb_info->dwb_params.scaler_taps.h_taps = 4; + wb_info->dwb_params.scaler_taps.v_taps = 4; + wb_info->dwb_params.scaler_taps.h_taps_c = 2; + wb_info->dwb_params.scaler_taps.v_taps_c = 2; + wb_info->dwb_params.subsample_position = DWB_INTERSTITIAL_SUBSAMPLING; + + wb_info->mcif_buf_params.luma_pitch = afb->base.pitches[0]; + wb_info->mcif_buf_params.chroma_pitch = afb->base.pitches[1]; + + for (i = 0; i < DWB_MCIF_BUF_COUNT; i++) { + wb_info->mcif_buf_params.luma_address[i] = afb->address; + wb_info->mcif_buf_params.chroma_address[i] = 0; + } + + wb_info->mcif_buf_params.p_vmid = 1; + if (amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(3, 0, 0)) { + wb_info->mcif_warmup_params.start_address.quad_part = afb->address; + wb_info->mcif_warmup_params.region_size = + wb_info->mcif_buf_params.luma_pitch * wb_info->dwb_params.dest_height; + } + wb_info->mcif_warmup_params.p_vmid = 1; + wb_info->writeback_source_plane = pipe->plane_state; + + dc_stream_add_writeback(dm->dc, crtc_state->stream, wb_info); + + acrtc->wb_pending = true; + acrtc->wb_conn = wb_conn; + drm_writeback_queue_job(wb_conn, new_con_state); +} + +static void amdgpu_dm_update_hdcp(struct drm_atomic_state *state) +{ + struct drm_connector_state *old_con_state, *new_con_state; + struct drm_device *dev = state->dev; + struct drm_connector *connector; + struct amdgpu_device *adev = drm_to_adev(dev); + int i; + + if (!adev->dm.hdcp_workqueue) + return; + for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) { struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state); struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc); - struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); + struct drm_crtc_state *old_crtc_state, *new_crtc_state; + struct dm_crtc_state *dm_new_crtc_state; + struct amdgpu_dm_connector *aconnector; + + if (!connector || connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) + continue; + + aconnector = to_amdgpu_dm_connector(connector); + + drm_dbg(dev, "[HDCP_DM] -------------- i : %x ----------\n", i); + + drm_dbg(dev, "[HDCP_DM] connector->index: %x connect_status: %x dpms: %x\n", + connector->index, connector->status, connector->dpms); + drm_dbg(dev, "[HDCP_DM] state protection old: %x new: %x\n", + old_con_state->content_protection, new_con_state->content_protection); + + if (aconnector->dc_sink) { + if (aconnector->dc_sink->sink_signal != SIGNAL_TYPE_VIRTUAL && + aconnector->dc_sink->sink_signal != SIGNAL_TYPE_NONE) { + drm_dbg(dev, "[HDCP_DM] pipe_ctx dispname=%s\n", + aconnector->dc_sink->edid_caps.display_name); + } + } new_crtc_state = NULL; + old_crtc_state = NULL; - if (acrtc) + if (acrtc) { new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base); + old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base); + } + + if (old_crtc_state) + drm_dbg(dev, "old crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n", + old_crtc_state->enable, + old_crtc_state->active, + old_crtc_state->mode_changed, + old_crtc_state->active_changed, + old_crtc_state->connectors_changed); + + if (new_crtc_state) + drm_dbg(dev, "NEW crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n", + new_crtc_state->enable, + new_crtc_state->active, + new_crtc_state->mode_changed, + new_crtc_state->active_changed, + new_crtc_state->connectors_changed); + dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); @@ -8154,26 +10723,130 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) continue; } - if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue)) - hdcp_update_display( - adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector, - new_con_state->hdcp_content_type, - new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED); + if (is_content_protection_different(new_crtc_state, old_crtc_state, new_con_state, + old_con_state, connector, adev->dm.hdcp_workqueue)) { + /* when display is unplugged from mst hub, connctor will + * be destroyed within dm_dp_mst_connector_destroy. connector + * hdcp perperties, like type, undesired, desired, enabled, + * will be lost. So, save hdcp properties into hdcp_work within + * amdgpu_dm_atomic_commit_tail. if the same display is + * plugged back with same display index, its hdcp properties + * will be retrieved from hdcp_work within dm_dp_mst_get_modes + */ + + bool enable_encryption = false; + + if (new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) + enable_encryption = true; + + if (aconnector->dc_link && aconnector->dc_sink && + aconnector->dc_link->type == dc_connection_mst_branch) { + struct hdcp_workqueue *hdcp_work = adev->dm.hdcp_workqueue; + struct hdcp_workqueue *hdcp_w = + &hdcp_work[aconnector->dc_link->link_index]; + + hdcp_w->hdcp_content_type[connector->index] = + new_con_state->hdcp_content_type; + hdcp_w->content_protection[connector->index] = + new_con_state->content_protection; + } + + if (new_crtc_state && new_crtc_state->mode_changed && + new_con_state->content_protection >= DRM_MODE_CONTENT_PROTECTION_DESIRED) + enable_encryption = true; + + drm_info(dev, "[HDCP_DM] hdcp_update_display enable_encryption = %x\n", enable_encryption); + + if (aconnector->dc_link) + hdcp_update_display( + adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector, + new_con_state->hdcp_content_type, enable_encryption); + } } -#endif +} + +static int amdgpu_dm_atomic_setup_commit(struct drm_atomic_state *state) +{ + struct drm_crtc *crtc; + struct drm_crtc_state *old_crtc_state, *new_crtc_state; + struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state; + int i, ret; + + ret = drm_dp_mst_atomic_setup_commit(state); + if (ret) + return ret; + + for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { + dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); + dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); + /* + * Color management settings. We also update color properties + * when a modeset is needed, to ensure it gets reprogrammed. + */ + if (dm_new_crtc_state->base.active && dm_new_crtc_state->stream && + (dm_new_crtc_state->base.color_mgmt_changed || + dm_old_crtc_state->regamma_tf != dm_new_crtc_state->regamma_tf || + drm_atomic_crtc_needs_modeset(new_crtc_state))) { + ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state); + if (ret) { + drm_dbg_atomic(state->dev, "Failed to update color state\n"); + return ret; + } + } + } + + return 0; +} + +/** + * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation. + * @state: The atomic state to commit + * + * This will tell DC to commit the constructed DC state from atomic_check, + * programming the hardware. Any failures here implies a hardware failure, since + * atomic check should have filtered anything non-kosher. + */ +static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) +{ + struct drm_device *dev = state->dev; + struct amdgpu_device *adev = drm_to_adev(dev); + struct amdgpu_display_manager *dm = &adev->dm; + struct dm_atomic_state *dm_state; + struct dc_state *dc_state = NULL; + u32 i, j; + struct drm_crtc *crtc; + struct drm_crtc_state *old_crtc_state, *new_crtc_state; + unsigned long flags; + bool wait_for_vblank = true; + struct drm_connector *connector; + struct drm_connector_state *old_con_state = NULL, *new_con_state = NULL; + struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state; + int crtc_disable_count = 0; + + trace_amdgpu_dm_atomic_commit_tail_begin(state); + + drm_atomic_helper_update_legacy_modeset_state(dev, state); + drm_dp_mst_atomic_wait_for_dependencies(state); + + dm_state = dm_atomic_get_new_state(state); + if (dm_state && dm_state->context) { + dc_state = dm_state->context; + amdgpu_dm_commit_streams(state, dc_state); + } + + amdgpu_dm_update_hdcp(state); /* Handle connector state changes */ for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) { struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state); struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state); struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc); - struct dc_surface_update dummy_updates[MAX_SURFACES]; + struct dc_surface_update *dummy_updates; struct dc_stream_update stream_update; struct dc_info_packet hdr_packet; struct dc_stream_status *status = NULL; - bool abm_changed, hdr_changed, scaling_changed; + bool abm_changed, hdr_changed, scaling_changed, output_color_space_changed = false; - memset(&dummy_updates, 0, sizeof(dummy_updates)); memset(&stream_update, 0, sizeof(stream_update)); if (acrtc) { @@ -8191,13 +10864,18 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) scaling_changed = is_scaling_state_different(dm_new_con_state, dm_old_con_state); + if ((new_con_state->hdmi.broadcast_rgb != old_con_state->hdmi.broadcast_rgb) && + (dm_old_crtc_state->stream->output_color_space != + get_output_color_space(&dm_new_crtc_state->stream->timing, new_con_state))) + output_color_space_changed = true; + abm_changed = dm_new_crtc_state->abm_level != dm_old_crtc_state->abm_level; hdr_changed = !drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state); - if (!scaling_changed && !abm_changed && !hdr_changed) + if (!scaling_changed && !abm_changed && !hdr_changed && !output_color_space_changed) continue; stream_update.stream = dm_new_crtc_state->stream; @@ -8209,6 +10887,13 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) stream_update.dst = dm_new_crtc_state->stream->dst; } + if (output_color_space_changed) { + dm_new_crtc_state->stream->output_color_space + = get_output_color_space(&dm_new_crtc_state->stream->timing, new_con_state); + + stream_update.output_color_space = &dm_new_crtc_state->stream->output_color_space; + } + if (abm_changed) { dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level; @@ -8232,18 +10917,28 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) * Here we create an empty update on each plane. * To fix this, DC should permit updating only stream properties. */ + dummy_updates = kzalloc(sizeof(struct dc_surface_update) * MAX_SURFACES, GFP_KERNEL); + if (!dummy_updates) { + drm_err(adev_to_drm(adev), "Failed to allocate memory for dummy_updates.\n"); + continue; + } for (j = 0; j < status->plane_count; j++) dummy_updates[j].surface = status->plane_states[0]; + sort(dummy_updates, status->plane_count, + sizeof(*dummy_updates), dm_plane_layer_index_cmp, NULL); mutex_lock(&dm->dc_lock); - dc_commit_updates_for_stream(dm->dc, - dummy_updates, - status->plane_count, - dm_new_crtc_state->stream, - &stream_update, - dc_state); + dc_exit_ips_for_hw_access(dm->dc); + dc_update_planes_and_stream(dm->dc, + dummy_updates, + status->plane_count, + dm_new_crtc_state->stream, + &stream_update); mutex_unlock(&dm->dc_lock); + kfree(dummy_updates); + + drm_connector_update_privacy_screen(new_con_state); } /** @@ -8256,9 +10951,6 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); #ifdef CONFIG_DEBUG_FS enum amdgpu_dm_pipe_crc_source cur_crc_src; -#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) - struct crc_rd_work *crc_rd_wrk; -#endif #endif /* Count number of newly disabled CRTCs for dropping PM refs later. */ if (old_crtc_state->active && !new_crtc_state->active) @@ -8271,9 +10963,6 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) update_stream_irq_parameters(dm, dm_new_crtc_state); #ifdef CONFIG_DEBUG_FS -#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) - crc_rd_wrk = dm->crc_rd_wrk; -#endif spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); cur_crc_src = acrtc->dm_irq_params.crc_src; spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); @@ -8284,7 +10973,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) drm_atomic_crtc_needs_modeset(new_crtc_state))) { dc_stream_retain(dm_new_crtc_state->stream); acrtc->dm_irq_params.stream = dm_new_crtc_state->stream; - manage_dm_interrupts(adev, acrtc, true); + manage_dm_interrupts(adev, acrtc, dm_new_crtc_state); } /* Handle vrr on->off / off->on transitions */ amdgpu_dm_handle_vrr_transition(dm_old_crtc_state, dm_new_crtc_state); @@ -8300,18 +10989,26 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) { #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) if (amdgpu_dm_crc_window_is_activated(crtc)) { + uint8_t cnt; + spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); - acrtc->dm_irq_params.crc_window.update_win = true; - acrtc->dm_irq_params.crc_window.skip_frame_cnt = 2; - spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock); - crc_rd_wrk->crtc = crtc; - spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock); + for (cnt = 0; cnt < MAX_CRC_WINDOW_NUM; cnt++) { + if (acrtc->dm_irq_params.window_param[cnt].enable) { + acrtc->dm_irq_params.window_param[cnt].update_win = true; + + /** + * It takes 2 frames for HW to stably generate CRC when + * resuming from suspend, so we set skip_frame_cnt 2. + */ + acrtc->dm_irq_params.window_param[cnt].skip_frame_cnt = 2; + } + } spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); } #endif if (amdgpu_dm_crtc_configure_crc_source( crtc, dm_new_crtc_state, cur_crc_src)) - DRM_DEBUG_DRIVER("Failed to configure crc source"); + drm_dbg_atomic(dev, "Failed to configure crc source"); } } #endif @@ -8326,8 +11023,32 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); if (dm_new_crtc_state->stream) - amdgpu_dm_commit_planes(state, dc_state, dev, - dm, crtc, wait_for_vblank); + amdgpu_dm_commit_planes(state, dev, dm, crtc, wait_for_vblank); + } + + /* Enable writeback */ + for_each_new_connector_in_state(state, connector, new_con_state, i) { + struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state); + struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc); + + if (connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK) + continue; + + if (!new_con_state->writeback_job) + continue; + + new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base); + + if (!new_crtc_state) + continue; + + if (acrtc->wb_enabled) + continue; + + dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); + + dm_set_writeback(dm, dm_new_crtc_state, connector, new_con_state); + acrtc->wb_enabled = true; } /* Update audio instances for each connector. */ @@ -8362,10 +11083,17 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) drm_atomic_helper_cleanup_planes(dev, state); - /* return the stolen vga memory back to VRAM */ - if (!adev->mman.keep_stolen_vga_memory) - amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL); - amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL); + /* Don't free the memory if we are hitting this as part of suspend. + * This way we don't free any memory during suspend; see + * amdgpu_bo_free_kernel(). The memory will be freed in the first + * non-suspend modeset or when the driver is torn down. + */ + if (!adev->in_suspend) { + /* return the stolen vga memory back to VRAM */ + if (!adev->mman.keep_stolen_vga_memory) + amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL); + amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL); + } /* * Finally, drop a runtime PM reference for each newly disabled CRTC, @@ -8376,8 +11104,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) pm_runtime_put_autosuspend(dev->dev); pm_runtime_mark_last_busy(dev->dev); - if (dc_state_temp) - dc_release_state(dc_state_temp); + trace_amdgpu_dm_atomic_commit_tail_finish(state); } static int dm_force_atomic_commit(struct drm_connector *connector) @@ -8403,16 +11130,20 @@ static int dm_force_atomic_commit(struct drm_connector *connector) */ conn_state = drm_atomic_get_connector_state(state, connector); - ret = PTR_ERR_OR_ZERO(conn_state); - if (ret) + /* Check for error in getting connector state */ + if (IS_ERR(conn_state)) { + ret = PTR_ERR(conn_state); goto out; + } /* Attach crtc to drm_atomic_state*/ crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base); - ret = PTR_ERR_OR_ZERO(crtc_state); - if (ret) + /* Check for error in getting crtc state */ + if (IS_ERR(crtc_state)) { + ret = PTR_ERR(crtc_state); goto out; + } /* force a restore */ crtc_state->mode_changed = true; @@ -8420,9 +11151,11 @@ static int dm_force_atomic_commit(struct drm_connector *connector) /* Attach plane to drm_atomic_state */ plane_state = drm_atomic_get_plane_state(state, plane); - ret = PTR_ERR_OR_ZERO(plane_state); - if (ret) + /* Check for error in getting plane state */ + if (IS_ERR(plane_state)) { + ret = PTR_ERR(plane_state); goto out; + } /* Call commit internally with the state we just constructed */ ret = drm_atomic_commit(state); @@ -8430,7 +11163,7 @@ static int dm_force_atomic_commit(struct drm_connector *connector) out: drm_atomic_state_put(state); if (ret) - DRM_ERROR("Restoring old state failed with %i\n", ret); + drm_err(ddev, "Restoring old state failed with %i\n", ret); return ret; } @@ -8443,10 +11176,15 @@ out: void dm_restore_drm_connector_state(struct drm_device *dev, struct drm_connector *connector) { - struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); + struct amdgpu_dm_connector *aconnector; struct amdgpu_crtc *disconnected_acrtc; struct dm_crtc_state *acrtc_state; + if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) + return; + + aconnector = to_amdgpu_dm_connector(connector); + if (!aconnector->dc_sink || !connector->state || !connector->encoder) return; @@ -8509,8 +11247,8 @@ static int do_aquire_global_lock(struct drm_device *dev, &commit->flip_done, 10*HZ); if (ret == 0) - DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done " - "timed out\n", crtc->base.id, crtc->name); + drm_err(dev, "[CRTC:%d:%s] hw_done or flip_done timed out\n", + crtc->base.id, crtc->name); drm_crtc_commit_put(commit); } @@ -8523,12 +11261,16 @@ static void get_freesync_config_for_crtc( struct dm_connector_state *new_con_state) { struct mod_freesync_config config = {0}; - struct amdgpu_dm_connector *aconnector = - to_amdgpu_dm_connector(new_con_state->base.connector); + struct amdgpu_dm_connector *aconnector; struct drm_display_mode *mode = &new_crtc_state->base.mode; int vrefresh = drm_mode_vrefresh(mode); bool fs_vid_mode = false; + if (new_con_state->base.connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) + return; + + aconnector = to_amdgpu_dm_connector(new_con_state->base.connector); + new_crtc_state->vrr_supported = new_con_state->freesync_capable && vrefresh >= aconnector->min_vfreq && vrefresh <= aconnector->max_vfreq; @@ -8551,6 +11293,8 @@ static void get_freesync_config_for_crtc( } else { config.state = VRR_STATE_INACTIVE; } + } else { + config.state = VRR_STATE_UNSUPPORTED; } out: new_crtc_state->freesync_config = config; @@ -8595,8 +11339,9 @@ is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state, return false; } -static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) { - uint64_t num, den, res; +static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) +{ + u64 num, den, res; struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base; dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED; @@ -8620,6 +11365,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm, struct dm_atomic_state *dm_state = NULL; struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state; struct dc_stream_state *new_stream; + struct amdgpu_device *adev = dm->adev; int ret = 0; /* @@ -8627,6 +11373,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm, * update changed items */ struct amdgpu_crtc *acrtc = NULL; + struct drm_connector *connector = NULL; struct amdgpu_dm_connector *aconnector = NULL; struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL; struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL; @@ -8636,18 +11383,20 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm, dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); acrtc = to_amdgpu_crtc(crtc); - aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc); + connector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc); + if (connector) + aconnector = to_amdgpu_dm_connector(connector); /* TODO This hack should go away */ - if (aconnector && enable) { + if (connector && enable) { /* Make sure fake sink is created in plug-in scenario */ drm_new_conn_state = drm_atomic_get_new_connector_state(state, - &aconnector->base); + connector); drm_old_conn_state = drm_atomic_get_old_connector_state(state, - &aconnector->base); + connector); - if (IS_ERR(drm_new_conn_state)) { - ret = PTR_ERR_OR_ZERO(drm_new_conn_state); + if (WARN_ON(!drm_new_conn_state)) { + ret = -EINVAL; goto fail; } @@ -8657,7 +11406,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm, if (!drm_atomic_crtc_needs_modeset(new_crtc_state)) goto skip_modeset; - new_stream = create_validate_stream_for_sink(aconnector, + new_stream = create_validate_stream_for_sink(connector, &new_crtc_state->mode, dm_new_conn_state, dm_old_crtc_state->stream); @@ -8670,7 +11419,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm, */ if (!new_stream) { - DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n", + drm_dbg_driver(adev_to_drm(adev), "%s: Failed to create new stream for crtc %d\n", __func__, acrtc->base.base.id); ret = -ENOMEM; goto fail; @@ -8699,7 +11448,8 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm, * TODO: Refactor this function to allow this check to work * in all conditions. */ - if (dm_new_crtc_state->stream && + if (amdgpu_freesync_vid_mode && + dm_new_crtc_state->stream && is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state)) goto skip_modeset; @@ -8707,7 +11457,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm, dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) && dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) { new_crtc_state->mode_changed = false; - DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d", + drm_dbg_driver(adev_to_drm(adev), "Mode change not required, setting mode_changed to %d", new_crtc_state->mode_changed); } } @@ -8717,9 +11467,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm, goto skip_modeset; drm_dbg_state(state->dev, - "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, " - "planes_changed:%d, mode_changed:%d,active_changed:%d," - "connectors_changed:%d\n", + "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, planes_changed:%d, mode_changed:%d,active_changed:%d,connectors_changed:%d\n", acrtc->crtc_id, new_crtc_state->enable, new_crtc_state->active, @@ -8734,38 +11482,45 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm, if (!dm_old_crtc_state->stream) goto skip_modeset; - if (dm_new_crtc_state->stream && + /* Unset freesync video if it was active before */ + if (dm_old_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED) { + dm_new_crtc_state->freesync_config.state = VRR_STATE_INACTIVE; + dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = 0; + } + + /* Now check if we should set freesync video mode */ + if (amdgpu_freesync_vid_mode && dm_new_crtc_state->stream && + dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) && + dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream) && is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state)) { new_crtc_state->mode_changed = false; - DRM_DEBUG_DRIVER( - "Mode change not required for front porch change, " - "setting mode_changed to %d", + drm_dbg_driver(adev_to_drm(adev), + "Mode change not required for front porch change, setting mode_changed to %d", new_crtc_state->mode_changed); set_freesync_fixed_config(dm_new_crtc_state); goto skip_modeset; - } else if (aconnector && + } else if (amdgpu_freesync_vid_mode && aconnector && is_freesync_video_mode(&new_crtc_state->mode, aconnector)) { struct drm_display_mode *high_mode; high_mode = get_highest_refresh_rate_mode(aconnector, false); - if (!drm_mode_equal(&new_crtc_state->mode, high_mode)) { + if (!drm_mode_equal(&new_crtc_state->mode, high_mode)) set_freesync_fixed_config(dm_new_crtc_state); - } } ret = dm_atomic_get_state(state, &dm_state); if (ret) goto fail; - DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n", + drm_dbg_driver(adev_to_drm(adev), "Disabling DRM crtc: %d\n", crtc->base.id); /* i.e. reset mode */ - if (dc_remove_stream_from_ctx( + if (dc_state_remove_stream( dm->dc, dm_state->context, dm_old_crtc_state->stream) != DC_OK) { @@ -8786,13 +11541,13 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm, * added MST connectors not found in existing crtc_state in the chained mode * TODO: need to dig out the root cause of that */ - if (!aconnector) + if (!connector) goto skip_modeset; if (modereset_required(new_crtc_state)) goto skip_modeset; - if (modeset_required(new_crtc_state, new_stream, + if (amdgpu_dm_crtc_modeset_required(new_crtc_state, new_stream, dm_old_crtc_state->stream)) { WARN_ON(dm_new_crtc_state->stream); @@ -8808,7 +11563,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm, DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n", crtc->base.id); - if (dc_add_stream_to_ctx( + if (dc_state_add_stream( dm->dc, dm_state->context, dm_new_crtc_state->stream) != DC_OK) { @@ -8823,13 +11578,13 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm, skip_modeset: /* Release extra reference */ if (new_stream) - dc_stream_release(new_stream); + dc_stream_release(new_stream); /* * We want to do dc stream updates that do not require a * full modeset below. */ - if (!(enable && aconnector && new_crtc_state->active)) + if (!(enable && connector && new_crtc_state->active)) return 0; /* * Given above conditions, the dc state cannot be NULL because: @@ -8855,8 +11610,9 @@ skip_modeset: * when a modeset is needed, to ensure it gets reprogrammed. */ if (dm_new_crtc_state->base.color_mgmt_changed || + dm_old_crtc_state->regamma_tf != dm_new_crtc_state->regamma_tf || drm_atomic_crtc_needs_modeset(new_crtc_state)) { - ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state); + ret = amdgpu_dm_check_crtc_color_mgmt(dm_new_crtc_state, true); if (ret) goto fail; } @@ -8880,15 +11636,20 @@ static bool should_reset_plane(struct drm_atomic_state *state, { struct drm_plane *other; struct drm_plane_state *old_other_state, *new_other_state; - struct drm_crtc_state *new_crtc_state; + struct drm_crtc_state *old_crtc_state, *new_crtc_state; + struct dm_crtc_state *old_dm_crtc_state, *new_dm_crtc_state; + struct amdgpu_device *adev = drm_to_adev(plane->dev); int i; /* - * TODO: Remove this hack once the checks below are sufficient - * enough to determine when we need to reset all the planes on - * the stream. + * TODO: Remove this hack for all asics once it proves that the + * fast updates works fine on DCN3.2+. */ - if (state->allow_modeset) + if (amdgpu_ip_version(adev, DCE_HWIP, 0) < IP_VERSION(3, 2, 0) && + state->allow_modeset) + return true; + + if (amdgpu_in_reset(adev) && state->allow_modeset) return true; /* Exit early if we know that we're adding or removing the plane. */ @@ -8901,14 +11662,38 @@ static bool should_reset_plane(struct drm_atomic_state *state, new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_state->crtc); + old_crtc_state = + drm_atomic_get_old_crtc_state(state, old_plane_state->crtc); if (!new_crtc_state) return true; + /* + * A change in cursor mode means a new dc pipe needs to be acquired or + * released from the state + */ + old_dm_crtc_state = to_dm_crtc_state(old_crtc_state); + new_dm_crtc_state = to_dm_crtc_state(new_crtc_state); + if (plane->type == DRM_PLANE_TYPE_CURSOR && + old_dm_crtc_state != NULL && + old_dm_crtc_state->cursor_mode != new_dm_crtc_state->cursor_mode) { + return true; + } + /* CRTC Degamma changes currently require us to recreate planes. */ if (new_crtc_state->color_mgmt_changed) return true; + /* + * On zpos change, planes need to be reordered by removing and re-adding + * them one by one to the dc state, in order of descending zpos. + * + * TODO: We can likely skip bandwidth validation if the only thing that + * changed about the plane was it'z z-ordering. + */ + if (old_plane_state->normalized_zpos != new_plane_state->normalized_zpos) + return true; + if (drm_atomic_crtc_needs_modeset(new_crtc_state)) return true; @@ -8922,6 +11707,11 @@ static bool should_reset_plane(struct drm_atomic_state *state, */ for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) { struct amdgpu_framebuffer *old_afb, *new_afb; + struct dm_plane_state *dm_new_other_state, *dm_old_other_state; + + dm_new_other_state = to_dm_plane_state(new_other_state); + dm_old_other_state = to_dm_plane_state(old_other_state); + if (other->type == DRM_PLANE_TYPE_CURSOR) continue; @@ -8957,6 +11747,18 @@ static bool should_reset_plane(struct drm_atomic_state *state, old_other_state->color_encoding != new_other_state->color_encoding) return true; + /* HDR/Transfer Function changes. */ + if (dm_old_other_state->degamma_tf != dm_new_other_state->degamma_tf || + dm_old_other_state->degamma_lut != dm_new_other_state->degamma_lut || + dm_old_other_state->hdr_mult != dm_new_other_state->hdr_mult || + dm_old_other_state->ctm != dm_new_other_state->ctm || + dm_old_other_state->shaper_lut != dm_new_other_state->shaper_lut || + dm_old_other_state->shaper_tf != dm_new_other_state->shaper_tf || + dm_old_other_state->lut3d != dm_new_other_state->lut3d || + dm_old_other_state->blend_lut != dm_new_other_state->blend_lut || + dm_old_other_state->blend_tf != dm_new_other_state->blend_tf) + return true; + /* Framebuffer checks fall at the end. */ if (!old_other_state->fb || !new_other_state->fb) continue; @@ -9020,14 +11822,17 @@ static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc, } /* Core DRM takes care of checking FB modifiers, so we only need to - * check tiling flags when the FB doesn't have a modifier. */ + * check tiling flags when the FB doesn't have a modifier. + */ if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) { - if (adev->family < AMDGPU_FAMILY_AI) { + if (adev->family >= AMDGPU_FAMILY_GC_12_0_0) { + linear = AMDGPU_TILING_GET(afb->tiling_flags, GFX12_SWIZZLE_MODE) == 0; + } else if (adev->family >= AMDGPU_FAMILY_AI) { + linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0; + } else { linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 && - AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 && + AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 && AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0; - } else { - linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0; } if (!linear) { DRM_DEBUG_ATOMIC("Cursor FB not linear"); @@ -9038,13 +11843,76 @@ static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc, return 0; } +/* + * Helper function for checking the cursor in native mode + */ +static int dm_check_native_cursor_state(struct drm_crtc *new_plane_crtc, + struct drm_plane *plane, + struct drm_plane_state *new_plane_state, + bool enable) +{ + + struct amdgpu_crtc *new_acrtc; + int ret; + + if (!enable || !new_plane_crtc || + drm_atomic_plane_disabling(plane->state, new_plane_state)) + return 0; + + new_acrtc = to_amdgpu_crtc(new_plane_crtc); + + if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) { + DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n"); + return -EINVAL; + } + + if (new_plane_state->fb) { + ret = dm_check_cursor_fb(new_acrtc, new_plane_state, + new_plane_state->fb); + if (ret) + return ret; + } + + return 0; +} + +static bool dm_should_update_native_cursor(struct drm_atomic_state *state, + struct drm_crtc *old_plane_crtc, + struct drm_crtc *new_plane_crtc, + bool enable) +{ + struct drm_crtc_state *old_crtc_state, *new_crtc_state; + struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state; + + if (!enable) { + if (old_plane_crtc == NULL) + return true; + + old_crtc_state = drm_atomic_get_old_crtc_state( + state, old_plane_crtc); + dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); + + return dm_old_crtc_state->cursor_mode == DM_CURSOR_NATIVE_MODE; + } else { + if (new_plane_crtc == NULL) + return true; + + new_crtc_state = drm_atomic_get_new_crtc_state( + state, new_plane_crtc); + dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); + + return dm_new_crtc_state->cursor_mode == DM_CURSOR_NATIVE_MODE; + } +} + static int dm_update_plane_state(struct dc *dc, struct drm_atomic_state *state, struct drm_plane *plane, struct drm_plane_state *old_plane_state, struct drm_plane_state *new_plane_state, bool enable, - bool *lock_and_validation_needed) + bool *lock_and_validation_needed, + bool *is_top_most_overlay) { struct dm_atomic_state *dm_state = NULL; @@ -9052,8 +11920,7 @@ static int dm_update_plane_state(struct dc *dc, struct drm_crtc_state *old_crtc_state, *new_crtc_state; struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state; struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state; - struct amdgpu_crtc *new_acrtc; - bool needs_reset; + bool needs_reset, update_native_cursor; int ret = 0; @@ -9062,24 +11929,16 @@ static int dm_update_plane_state(struct dc *dc, dm_new_plane_state = to_dm_plane_state(new_plane_state); dm_old_plane_state = to_dm_plane_state(old_plane_state); - if (plane->type == DRM_PLANE_TYPE_CURSOR) { - if (!enable || !new_plane_crtc || - drm_atomic_plane_disabling(plane->state, new_plane_state)) - return 0; - - new_acrtc = to_amdgpu_crtc(new_plane_crtc); + update_native_cursor = dm_should_update_native_cursor(state, + old_plane_crtc, + new_plane_crtc, + enable); - if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) { - DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n"); - return -EINVAL; - } - - if (new_plane_state->fb) { - ret = dm_check_cursor_fb(new_acrtc, new_plane_state, - new_plane_state->fb); - if (ret) - return ret; - } + if (plane->type == DRM_PLANE_TYPE_CURSOR && update_native_cursor) { + ret = dm_check_native_cursor_state(new_plane_crtc, plane, + new_plane_state, enable); + if (ret) + return ret; return 0; } @@ -9109,7 +11968,7 @@ static int dm_update_plane_state(struct dc *dc, if (ret) return ret; - if (!dc_remove_plane_from_context( + if (!dc_state_remove_plane( dc, dm_old_crtc_state->stream, dm_old_plane_state->dc_state, @@ -9118,8 +11977,9 @@ static int dm_update_plane_state(struct dc *dc, return -EINVAL; } + if (dm_old_plane_state->dc_state) + dc_plane_state_release(dm_old_plane_state->dc_state); - dc_plane_state_release(dm_old_plane_state->dc_state); dm_new_plane_state->dc_state = NULL; *lock_and_validation_needed = true; @@ -9142,15 +12002,17 @@ static int dm_update_plane_state(struct dc *dc, if (!needs_reset) return 0; - ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state); + ret = amdgpu_dm_plane_helper_check_state(new_plane_state, new_crtc_state); if (ret) - return ret; + goto out; WARN_ON(dm_new_plane_state->dc_state); dc_new_plane_state = dc_create_plane_state(dc); - if (!dc_new_plane_state) - return -ENOMEM; + if (!dc_new_plane_state) { + ret = -ENOMEM; + goto out; + } DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n", plane->base.id, new_plane_crtc->base.id); @@ -9162,13 +12024,13 @@ static int dm_update_plane_state(struct dc *dc, new_crtc_state); if (ret) { dc_plane_state_release(dc_new_plane_state); - return ret; + goto out; } ret = dm_atomic_get_state(state, &dm_state); if (ret) { dc_plane_state_release(dc_new_plane_state); - return ret; + goto out; } /* @@ -9178,14 +12040,15 @@ static int dm_update_plane_state(struct dc *dc, * state. It'll be released when the atomic state is * cleaned. */ - if (!dc_add_plane_to_context( + if (!dc_state_add_plane( dc, dm_new_crtc_state->stream, dc_new_plane_state, dm_state->context)) { dc_plane_state_release(dc_new_plane_state); - return -EINVAL; + ret = -EINVAL; + goto out; } dm_new_plane_state->dc_state = dc_new_plane_state; @@ -9200,6 +12063,16 @@ static int dm_update_plane_state(struct dc *dc, *lock_and_validation_needed = true; } +out: + /* If enabling cursor overlay failed, attempt fallback to native mode */ + if (enable && ret == -EINVAL && plane->type == DRM_PLANE_TYPE_CURSOR) { + ret = dm_check_native_cursor_state(new_plane_crtc, plane, + new_plane_state, enable); + if (ret) + return ret; + + dm_new_crtc_state->cursor_mode = DM_CURSOR_NATIVE_MODE; + } return ret; } @@ -9222,71 +12095,82 @@ static void dm_get_oriented_plane_size(struct drm_plane_state *plane_state, } } -static int dm_check_crtc_cursor(struct drm_atomic_state *state, - struct drm_crtc *crtc, - struct drm_crtc_state *new_crtc_state) +static void +dm_get_plane_scale(struct drm_plane_state *plane_state, + int *out_plane_scale_w, int *out_plane_scale_h) { - struct drm_plane *cursor = crtc->cursor, *underlying; - struct drm_plane_state *new_cursor_state, *new_underlying_state; - int i; - int cursor_scale_w, cursor_scale_h, underlying_scale_w, underlying_scale_h; - int cursor_src_w, cursor_src_h; - int underlying_src_w, underlying_src_h; - - /* On DCE and DCN there is no dedicated hardware cursor plane. We get a - * cursor per pipe but it's going to inherit the scaling and - * positioning from the underlying pipe. Check the cursor plane's - * blending properties match the underlying planes'. */ + int plane_src_w, plane_src_h; - new_cursor_state = drm_atomic_get_new_plane_state(state, cursor); - if (!new_cursor_state || !new_cursor_state->fb) { - return 0; - } + dm_get_oriented_plane_size(plane_state, &plane_src_w, &plane_src_h); + *out_plane_scale_w = plane_src_w ? plane_state->crtc_w * 1000 / plane_src_w : 0; + *out_plane_scale_h = plane_src_h ? plane_state->crtc_h * 1000 / plane_src_h : 0; +} - dm_get_oriented_plane_size(new_cursor_state, &cursor_src_w, &cursor_src_h); - cursor_scale_w = new_cursor_state->crtc_w * 1000 / cursor_src_w; - cursor_scale_h = new_cursor_state->crtc_h * 1000 / cursor_src_h; +/* + * The normalized_zpos value cannot be used by this iterator directly. It's only + * calculated for enabled planes, potentially causing normalized_zpos collisions + * between enabled/disabled planes in the atomic state. We need a unique value + * so that the iterator will not generate the same object twice, or loop + * indefinitely. + */ +static inline struct __drm_planes_state *__get_next_zpos( + struct drm_atomic_state *state, + struct __drm_planes_state *prev) +{ + unsigned int highest_zpos = 0, prev_zpos = 256; + uint32_t highest_id = 0, prev_id = UINT_MAX; + struct drm_plane_state *new_plane_state; + struct drm_plane *plane; + int i, highest_i = -1; - for_each_new_plane_in_state_reverse(state, underlying, new_underlying_state, i) { - /* Narrow down to non-cursor planes on the same CRTC as the cursor */ - if (new_underlying_state->crtc != crtc || underlying == crtc->cursor) - continue; + if (prev != NULL) { + prev_zpos = prev->new_state->zpos; + prev_id = prev->ptr->base.id; + } - /* Ignore disabled planes */ - if (!new_underlying_state->fb) + for_each_new_plane_in_state(state, plane, new_plane_state, i) { + /* Skip planes with higher zpos than the previously returned */ + if (new_plane_state->zpos > prev_zpos || + (new_plane_state->zpos == prev_zpos && + plane->base.id >= prev_id)) continue; - dm_get_oriented_plane_size(new_underlying_state, - &underlying_src_w, &underlying_src_h); - underlying_scale_w = new_underlying_state->crtc_w * 1000 / underlying_src_w; - underlying_scale_h = new_underlying_state->crtc_h * 1000 / underlying_src_h; - - if (cursor_scale_w != underlying_scale_w || - cursor_scale_h != underlying_scale_h) { - drm_dbg_atomic(crtc->dev, - "Cursor [PLANE:%d:%s] scaling doesn't match underlying [PLANE:%d:%s]\n", - cursor->base.id, cursor->name, underlying->base.id, underlying->name); - return -EINVAL; + /* Save the index of the plane with highest zpos */ + if (new_plane_state->zpos > highest_zpos || + (new_plane_state->zpos == highest_zpos && + plane->base.id > highest_id)) { + highest_zpos = new_plane_state->zpos; + highest_id = plane->base.id; + highest_i = i; } - - /* If this plane covers the whole CRTC, no need to check planes underneath */ - if (new_underlying_state->crtc_x <= 0 && - new_underlying_state->crtc_y <= 0 && - new_underlying_state->crtc_x + new_underlying_state->crtc_w >= new_crtc_state->mode.hdisplay && - new_underlying_state->crtc_y + new_underlying_state->crtc_h >= new_crtc_state->mode.vdisplay) - break; } - return 0; + if (highest_i < 0) + return NULL; + + return &state->planes[highest_i]; } -#if defined(CONFIG_DRM_AMD_DC_DCN) +/* + * Use the uniqueness of the plane's (zpos, drm obj ID) combination to iterate + * by descending zpos, as read from the new plane state. This is the same + * ordering as defined by drm_atomic_normalize_zpos(). + */ +#define for_each_oldnew_plane_in_descending_zpos(__state, plane, old_plane_state, new_plane_state) \ + for (struct __drm_planes_state *__i = __get_next_zpos((__state), NULL); \ + __i != NULL; __i = __get_next_zpos((__state), __i)) \ + for_each_if(((plane) = __i->ptr, \ + (void)(plane) /* Only to avoid unused-but-set-variable warning */, \ + (old_plane_state) = __i->old_state, \ + (new_plane_state) = __i->new_state, 1)) + static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc) { struct drm_connector *connector; struct drm_connector_state *conn_state, *old_conn_state; struct amdgpu_dm_connector *aconnector = NULL; int i; + for_each_oldnew_connector_in_state(state, connector, old_conn_state, conn_state, i) { if (!conn_state->crtc) conn_state = old_conn_state; @@ -9294,8 +12178,11 @@ static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm if (conn_state->crtc != crtc) continue; + if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) + continue; + aconnector = to_amdgpu_dm_connector(connector); - if (!aconnector->port || !aconnector->mst_port) + if (!aconnector->mst_output_port || !aconnector->mst_root) aconnector = NULL; else break; @@ -9304,9 +12191,195 @@ static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm if (!aconnector) return 0; - return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr); + return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_root->mst_mgr); +} + +/** + * DOC: Cursor Modes - Native vs Overlay + * + * In native mode, the cursor uses a integrated cursor pipe within each DCN hw + * plane. It does not require a dedicated hw plane to enable, but it is + * subjected to the same z-order and scaling as the hw plane. It also has format + * restrictions, a RGB cursor in native mode cannot be enabled within a non-RGB + * hw plane. + * + * In overlay mode, the cursor uses a separate DCN hw plane, and thus has its + * own scaling and z-pos. It also has no blending restrictions. It lends to a + * cursor behavior more akin to a DRM client's expectations. However, it does + * occupy an extra DCN plane, and therefore will only be used if a DCN plane is + * available. + */ + +/** + * dm_crtc_get_cursor_mode() - Determine the required cursor mode on crtc + * @adev: amdgpu device + * @state: DRM atomic state + * @dm_crtc_state: amdgpu state for the CRTC containing the cursor + * @cursor_mode: Returns the required cursor mode on dm_crtc_state + * + * Get whether the cursor should be enabled in native mode, or overlay mode, on + * the dm_crtc_state. + * + * The cursor should be enabled in overlay mode if there exists an underlying + * plane - on which the cursor may be blended - that is either YUV formatted, or + * scaled differently from the cursor. + * + * Since zpos info is required, drm_atomic_normalize_zpos must be called before + * calling this function. + * + * Return: 0 on success, or an error code if getting the cursor plane state + * failed. + */ +static int dm_crtc_get_cursor_mode(struct amdgpu_device *adev, + struct drm_atomic_state *state, + struct dm_crtc_state *dm_crtc_state, + enum amdgpu_dm_cursor_mode *cursor_mode) +{ + struct drm_plane_state *old_plane_state, *plane_state, *cursor_state; + struct drm_crtc_state *crtc_state = &dm_crtc_state->base; + struct drm_plane *plane; + bool consider_mode_change = false; + bool entire_crtc_covered = false; + bool cursor_changed = false; + int underlying_scale_w, underlying_scale_h; + int cursor_scale_w, cursor_scale_h; + int i; + + /* Overlay cursor not supported on HW before DCN + * DCN401 does not have the cursor-on-scaled-plane or cursor-on-yuv-plane restrictions + * as previous DCN generations, so enable native mode on DCN401 in addition to DCE + */ + if (amdgpu_ip_version(adev, DCE_HWIP, 0) == 0 || + amdgpu_ip_version(adev, DCE_HWIP, 0) == IP_VERSION(4, 0, 1)) { + *cursor_mode = DM_CURSOR_NATIVE_MODE; + return 0; + } + + /* Init cursor_mode to be the same as current */ + *cursor_mode = dm_crtc_state->cursor_mode; + + /* + * Cursor mode can change if a plane's format changes, scale changes, is + * enabled/disabled, or z-order changes. + */ + for_each_oldnew_plane_in_state(state, plane, old_plane_state, plane_state, i) { + int new_scale_w, new_scale_h, old_scale_w, old_scale_h; + + /* Only care about planes on this CRTC */ + if ((drm_plane_mask(plane) & crtc_state->plane_mask) == 0) + continue; + + if (plane->type == DRM_PLANE_TYPE_CURSOR) + cursor_changed = true; + + if (drm_atomic_plane_enabling(old_plane_state, plane_state) || + drm_atomic_plane_disabling(old_plane_state, plane_state) || + old_plane_state->fb->format != plane_state->fb->format) { + consider_mode_change = true; + break; + } + + dm_get_plane_scale(plane_state, &new_scale_w, &new_scale_h); + dm_get_plane_scale(old_plane_state, &old_scale_w, &old_scale_h); + if (new_scale_w != old_scale_w || new_scale_h != old_scale_h) { + consider_mode_change = true; + break; + } + } + + if (!consider_mode_change && !crtc_state->zpos_changed) + return 0; + + /* + * If no cursor change on this CRTC, and not enabled on this CRTC, then + * no need to set cursor mode. This avoids needlessly locking the cursor + * state. + */ + if (!cursor_changed && + !(drm_plane_mask(crtc_state->crtc->cursor) & crtc_state->plane_mask)) { + return 0; + } + + cursor_state = drm_atomic_get_plane_state(state, + crtc_state->crtc->cursor); + if (IS_ERR(cursor_state)) + return PTR_ERR(cursor_state); + + /* Cursor is disabled */ + if (!cursor_state->fb) + return 0; + + /* For all planes in descending z-order (all of which are below cursor + * as per zpos definitions), check their scaling and format + */ + for_each_oldnew_plane_in_descending_zpos(state, plane, old_plane_state, plane_state) { + + /* Only care about non-cursor planes on this CRTC */ + if ((drm_plane_mask(plane) & crtc_state->plane_mask) == 0 || + plane->type == DRM_PLANE_TYPE_CURSOR) + continue; + + /* Underlying plane is YUV format - use overlay cursor */ + if (amdgpu_dm_plane_is_video_format(plane_state->fb->format->format)) { + *cursor_mode = DM_CURSOR_OVERLAY_MODE; + return 0; + } + + dm_get_plane_scale(plane_state, + &underlying_scale_w, &underlying_scale_h); + dm_get_plane_scale(cursor_state, + &cursor_scale_w, &cursor_scale_h); + + /* Underlying plane has different scale - use overlay cursor */ + if (cursor_scale_w != underlying_scale_w && + cursor_scale_h != underlying_scale_h) { + *cursor_mode = DM_CURSOR_OVERLAY_MODE; + return 0; + } + + /* If this plane covers the whole CRTC, no need to check planes underneath */ + if (plane_state->crtc_x <= 0 && plane_state->crtc_y <= 0 && + plane_state->crtc_x + plane_state->crtc_w >= crtc_state->mode.hdisplay && + plane_state->crtc_y + plane_state->crtc_h >= crtc_state->mode.vdisplay) { + entire_crtc_covered = true; + break; + } + } + + /* If planes do not cover the entire CRTC, use overlay mode to enable + * cursor over holes + */ + if (entire_crtc_covered) + *cursor_mode = DM_CURSOR_NATIVE_MODE; + else + *cursor_mode = DM_CURSOR_OVERLAY_MODE; + + return 0; +} + +static bool amdgpu_dm_crtc_mem_type_changed(struct drm_device *dev, + struct drm_atomic_state *state, + struct drm_crtc_state *crtc_state) +{ + struct drm_plane *plane; + struct drm_plane_state *new_plane_state, *old_plane_state; + + drm_for_each_plane_mask(plane, dev, crtc_state->plane_mask) { + new_plane_state = drm_atomic_get_plane_state(state, plane); + old_plane_state = drm_atomic_get_plane_state(state, plane); + + if (IS_ERR(new_plane_state) || IS_ERR(old_plane_state)) { + drm_err(dev, "Failed to get plane state for plane %s\n", plane->name); + return false; + } + + if (old_plane_state->fb && new_plane_state->fb && + get_mem_type(old_plane_state->fb) != get_mem_type(new_plane_state->fb)) + return true; + } + + return false; } -#endif /** * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM. @@ -9344,20 +12417,21 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, struct drm_crtc *crtc; struct drm_crtc_state *old_crtc_state, *new_crtc_state; struct drm_plane *plane; - struct drm_plane_state *old_plane_state, *new_plane_state; + struct drm_plane_state *old_plane_state, *new_plane_state, *new_cursor_state; enum dc_status status; int ret, i; bool lock_and_validation_needed = false; + bool is_top_most_overlay = true; struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state; -#if defined(CONFIG_DRM_AMD_DC_DCN) - struct dsc_mst_fairness_vars vars[MAX_PIPES]; -#endif + struct drm_dp_mst_topology_mgr *mgr; + struct drm_dp_mst_topology_state *mst_state; + struct dsc_mst_fairness_vars vars[MAX_PIPES] = {0}; trace_amdgpu_dm_atomic_check_begin(state); ret = drm_atomic_helper_check_modeset(dev, state); if (ret) { - DRM_DEBUG_DRIVER("drm_atomic_helper_check_modeset() failed\n"); + drm_dbg_atomic(dev, "drm_atomic_helper_check_modeset() failed\n"); goto fail; } @@ -9372,29 +12446,27 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc); if (IS_ERR(new_crtc_state)) { - DRM_DEBUG_DRIVER("drm_atomic_get_crtc_state() failed\n"); + drm_dbg_atomic(dev, "drm_atomic_get_crtc_state() failed\n"); ret = PTR_ERR(new_crtc_state); goto fail; } - if (dm_old_con_state->abm_level != - dm_new_con_state->abm_level) + if (dm_old_con_state->abm_level != dm_new_con_state->abm_level || + dm_old_con_state->scaling != dm_new_con_state->scaling) new_crtc_state->connectors_changed = true; } -#if defined(CONFIG_DRM_AMD_DC_DCN) if (dc_resource_is_dsc_encoding_supported(dc)) { for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { if (drm_atomic_crtc_needs_modeset(new_crtc_state)) { ret = add_affected_mst_dsc_crtcs(state, crtc); if (ret) { - DRM_DEBUG_DRIVER("add_affected_mst_dsc_crtcs() failed\n"); + drm_dbg_atomic(dev, "add_affected_mst_dsc_crtcs() failed\n"); goto fail; } } } } -#endif for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); @@ -9406,7 +12478,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, ret = amdgpu_dm_verify_lut_sizes(new_crtc_state); if (ret) { - DRM_DEBUG_DRIVER("amdgpu_dm_verify_lut_sizes() failed\n"); + drm_dbg_atomic(dev, "amdgpu_dm_verify_lut_sizes() failed\n"); goto fail; } @@ -9415,13 +12487,13 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, ret = drm_atomic_add_affected_connectors(state, crtc); if (ret) { - DRM_DEBUG_DRIVER("drm_atomic_add_affected_connectors() failed\n"); + drm_dbg_atomic(dev, "drm_atomic_add_affected_connectors() failed\n"); goto fail; } ret = drm_atomic_add_affected_planes(state, crtc); if (ret) { - DRM_DEBUG_DRIVER("drm_atomic_add_affected_planes() failed\n"); + drm_dbg_atomic(dev, "drm_atomic_add_affected_planes() failed\n"); goto fail; } @@ -9460,7 +12532,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, if (IS_ERR(new_plane_state)) { ret = PTR_ERR(new_plane_state); - DRM_DEBUG_DRIVER("new_plane_state is BAD\n"); + drm_dbg_atomic(dev, "new_plane_state is BAD\n"); goto fail; } } @@ -9472,17 +12544,49 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, * `dcn10_can_pipe_disable_cursor`). By now, all modified planes are in * atomic state, so call drm helper to normalize zpos. */ - drm_atomic_normalize_zpos(dev, state); + ret = drm_atomic_normalize_zpos(dev, state); + if (ret) { + drm_dbg(dev, "drm_atomic_normalize_zpos() failed\n"); + goto fail; + } + + /* + * Determine whether cursors on each CRTC should be enabled in native or + * overlay mode. + */ + for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { + dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); + + ret = dm_crtc_get_cursor_mode(adev, state, dm_new_crtc_state, + &dm_new_crtc_state->cursor_mode); + if (ret) { + drm_dbg(dev, "Failed to determine cursor mode\n"); + goto fail; + } + + /* + * If overlay cursor is needed, DC cannot go through the + * native cursor update path. All enabled planes on the CRTC + * need to be added for DC to not disable a plane by mistake + */ + if (dm_new_crtc_state->cursor_mode == DM_CURSOR_OVERLAY_MODE) { + ret = drm_atomic_add_affected_planes(state, crtc); + if (ret) + goto fail; + } + } /* Remove exiting planes if they are modified */ - for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) { + for_each_oldnew_plane_in_descending_zpos(state, plane, old_plane_state, new_plane_state) { + ret = dm_update_plane_state(dc, state, plane, old_plane_state, new_plane_state, false, - &lock_and_validation_needed); + &lock_and_validation_needed, + &is_top_most_overlay); if (ret) { - DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n"); + drm_dbg_atomic(dev, "dm_update_plane_state() failed\n"); goto fail; } } @@ -9495,7 +12599,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, false, &lock_and_validation_needed); if (ret) { - DRM_DEBUG_DRIVER("DISABLE: dm_update_crtc_state() failed\n"); + drm_dbg_atomic(dev, "DISABLE: dm_update_crtc_state() failed\n"); goto fail; } } @@ -9508,51 +12612,89 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, true, &lock_and_validation_needed); if (ret) { - DRM_DEBUG_DRIVER("ENABLE: dm_update_crtc_state() failed\n"); + drm_dbg_atomic(dev, "ENABLE: dm_update_crtc_state() failed\n"); goto fail; } } /* Add new/modified planes */ - for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) { + for_each_oldnew_plane_in_descending_zpos(state, plane, old_plane_state, new_plane_state) { ret = dm_update_plane_state(dc, state, plane, old_plane_state, new_plane_state, true, - &lock_and_validation_needed); + &lock_and_validation_needed, + &is_top_most_overlay); if (ret) { - DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n"); + drm_dbg_atomic(dev, "dm_update_plane_state() failed\n"); goto fail; } } -#if defined(CONFIG_DRM_AMD_DC_DCN) +#if defined(CONFIG_DRM_AMD_DC_FP) if (dc_resource_is_dsc_encoding_supported(dc)) { - if (!pre_validate_dsc(state, &dm_state, vars)) { - ret = -EINVAL; + ret = pre_validate_dsc(state, &dm_state, vars); + if (ret != 0) goto fail; - } } #endif /* Run this here since we want to validate the streams we created */ ret = drm_atomic_helper_check_planes(dev, state); if (ret) { - DRM_DEBUG_DRIVER("drm_atomic_helper_check_planes() failed\n"); + drm_dbg_atomic(dev, "drm_atomic_helper_check_planes() failed\n"); goto fail; } for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); if (dm_new_crtc_state->mpo_requested) - DRM_DEBUG_DRIVER("MPO enablement requested on crtc:[%p]\n", crtc); + drm_dbg_atomic(dev, "MPO enablement requested on crtc:[%p]\n", crtc); } - /* Check cursor planes scaling */ + /* Check cursor restrictions */ for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { - ret = dm_check_crtc_cursor(state, crtc, new_crtc_state); + enum amdgpu_dm_cursor_mode required_cursor_mode; + int is_rotated, is_scaled; + + /* Overlay cusor not subject to native cursor restrictions */ + dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); + if (dm_new_crtc_state->cursor_mode == DM_CURSOR_OVERLAY_MODE) + continue; + + /* Check if rotation or scaling is enabled on DCN401 */ + if ((drm_plane_mask(crtc->cursor) & new_crtc_state->plane_mask) && + amdgpu_ip_version(adev, DCE_HWIP, 0) == IP_VERSION(4, 0, 1)) { + new_cursor_state = drm_atomic_get_new_plane_state(state, crtc->cursor); + + is_rotated = new_cursor_state && + ((new_cursor_state->rotation & DRM_MODE_ROTATE_MASK) != DRM_MODE_ROTATE_0); + is_scaled = new_cursor_state && ((new_cursor_state->src_w >> 16 != new_cursor_state->crtc_w) || + (new_cursor_state->src_h >> 16 != new_cursor_state->crtc_h)); + + if (is_rotated || is_scaled) { + drm_dbg_driver( + crtc->dev, + "[CRTC:%d:%s] cannot enable hardware cursor due to rotation/scaling\n", + crtc->base.id, crtc->name); + ret = -EINVAL; + goto fail; + } + } + + /* If HW can only do native cursor, check restrictions again */ + ret = dm_crtc_get_cursor_mode(adev, state, dm_new_crtc_state, + &required_cursor_mode); if (ret) { - DRM_DEBUG_DRIVER("dm_check_crtc_cursor() failed\n"); + drm_dbg_driver(crtc->dev, + "[CRTC:%d:%s] Checking cursor mode failed\n", + crtc->base.id, crtc->name); + goto fail; + } else if (required_cursor_mode == DM_CURSOR_OVERLAY_MODE) { + drm_dbg_driver(crtc->dev, + "[CRTC:%d:%s] Cannot enable native cursor due to scaling or YUV restrictions\n", + crtc->base.id, crtc->name); + ret = -EINVAL; goto fail; } } @@ -9599,6 +12741,26 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, lock_and_validation_needed = true; } + /* set the slot info for each mst_state based on the link encoding format */ + for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) { + struct amdgpu_dm_connector *aconnector; + struct drm_connector *connector; + struct drm_connector_list_iter iter; + u8 link_coding_cap; + + drm_connector_list_iter_begin(dev, &iter); + drm_for_each_connector_iter(connector, &iter) { + if (connector->index == mst_state->mgr->conn_base_id) { + aconnector = to_amdgpu_dm_connector(connector); + link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(aconnector->dc_link); + drm_dp_mst_update_slots(mst_state, link_coding_cap); + + break; + } + } + drm_connector_list_iter_end(&iter); + } + /** * Streams and planes are reset when there are changes that affect * bandwidth. Anything that affects bandwidth needs to go through @@ -9615,29 +12777,32 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, if (lock_and_validation_needed) { ret = dm_atomic_get_state(state, &dm_state); if (ret) { - DRM_DEBUG_DRIVER("dm_atomic_get_state() failed\n"); + drm_dbg_atomic(dev, "dm_atomic_get_state() failed\n"); goto fail; } ret = do_aquire_global_lock(dev, state); if (ret) { - DRM_DEBUG_DRIVER("do_aquire_global_lock() failed\n"); + drm_dbg_atomic(dev, "do_aquire_global_lock() failed\n"); goto fail; } -#if defined(CONFIG_DRM_AMD_DC_DCN) - if (!compute_mst_dsc_configs_for_state(state, dm_state->context, vars)) { - DRM_DEBUG_DRIVER("compute_mst_dsc_configs_for_state() failed\n"); - ret = -EINVAL; - goto fail; +#if defined(CONFIG_DRM_AMD_DC_FP) + if (dc_resource_is_dsc_encoding_supported(dc)) { + ret = compute_mst_dsc_configs_for_state(state, dm_state->context, vars); + if (ret) { + drm_dbg_atomic(dev, "MST_DSC compute_mst_dsc_configs_for_state() failed\n"); + ret = -EINVAL; + goto fail; + } } +#endif ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context, vars); if (ret) { - DRM_DEBUG_DRIVER("dm_update_mst_vcpi_slots_for_dsc() failed\n"); + drm_dbg_atomic(dev, "dm_update_mst_vcpi_slots_for_dsc() failed\n"); goto fail; } -#endif /* * Perform validation of MST topology in the state: @@ -9647,12 +12812,12 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, */ ret = drm_dp_mst_atomic_check(state); if (ret) { - DRM_DEBUG_DRIVER("drm_dp_mst_atomic_check() failed\n"); + drm_dbg_atomic(dev, "MST drm_dp_mst_atomic_check() failed\n"); goto fail; } - status = dc_validate_global_state(dc, dm_state->context, true); + status = dc_validate_global_state(dc, dm_state->context, DC_VALIDATE_MODE_ONLY); if (status != DC_OK) { - DRM_DEBUG_DRIVER("DC global validation failure: %s (%d)", + drm_dbg_atomic(dev, "DC global validation failure: %s (%d)", dc_status_to_str(status), status); ret = -EINVAL; goto fail; @@ -9679,7 +12844,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, int j = state->num_private_objs-1; dm_atomic_destroy_state(obj, - state->private_objs[i].state); + state->private_objs[i].state_to_destroy); /* If i is not at the end of the array then the * last element needs to be moved to where i was @@ -9690,7 +12855,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, state->private_objs[j]; state->private_objs[j].ptr = NULL; - state->private_objs[j].state = NULL; + state->private_objs[j].state_to_destroy = NULL; state->private_objs[j].old_state = NULL; state->private_objs[j].new_state = NULL; @@ -9701,13 +12866,26 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, } /* Store the overall update type for use later in atomic check. */ - for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) { + for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { struct dm_crtc_state *dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); + /* + * Only allow async flips for fast updates that don't change + * the FB pitch, the DCC state, rotation, mem_type, etc. + */ + if (new_crtc_state->async_flip && + (lock_and_validation_needed || + amdgpu_dm_crtc_mem_type_changed(dev, state, new_crtc_state))) { + drm_dbg_atomic(crtc->dev, + "[CRTC:%d:%s] async flips are only supported for fast updates\n", + crtc->base.id, crtc->name); + ret = -EINVAL; + goto fail; + } + dm_new_crtc_state->update_type = lock_and_validation_needed ? - UPDATE_TYPE_FULL : - UPDATE_TYPE_FAST; + UPDATE_TYPE_FULL : UPDATE_TYPE_FAST; } /* Must be success */ @@ -9719,40 +12897,21 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, fail: if (ret == -EDEADLK) - DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n"); + drm_dbg_atomic(dev, "Atomic check stopped to avoid deadlock.\n"); else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS) - DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n"); + drm_dbg_atomic(dev, "Atomic check stopped due to signal.\n"); else - DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret); + drm_dbg_atomic(dev, "Atomic check failed with err: %d\n", ret); trace_amdgpu_dm_atomic_check_finish(state, ret); return ret; } -static bool is_dp_capable_without_timing_msa(struct dc *dc, - struct amdgpu_dm_connector *amdgpu_dm_connector) -{ - uint8_t dpcd_data; - bool capable = false; - - if (amdgpu_dm_connector->dc_link && - dm_helpers_dp_read_dpcd( - NULL, - amdgpu_dm_connector->dc_link, - DP_DOWN_STREAM_PORT_COUNT, - &dpcd_data, - sizeof(dpcd_data))) { - capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false; - } - - return capable; -} - static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm, unsigned int offset, unsigned int total_length, - uint8_t *data, + u8 *data, unsigned int length, struct amdgpu_hdmi_vsdb_info *vsdb) { @@ -9777,9 +12936,9 @@ static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm, input->cea_total_length = total_length; memcpy(input->payload, data, length); - res = dc_dmub_srv_cmd_with_reply_data(dm->dc->ctx->dmub_srv, &cmd); + res = dc_wake_and_execute_dmub_cmd(dm->dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY); if (!res) { - DRM_ERROR("EDID CEA parser failed\n"); + drm_err(adev_to_drm(dm->adev), "EDID CEA parser failed\n"); return false; } @@ -9787,7 +12946,7 @@ static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm, if (output->type == DMUB_CMD__EDID_CEA_ACK) { if (!output->ack.success) { - DRM_ERROR("EDID CEA ack failed at offset %d\n", + drm_err(adev_to_drm(dm->adev), "EDID CEA ack failed at offset %d\n", output->ack.offset); } } else if (output->type == DMUB_CMD__EDID_CEA_AMD_VSDB) { @@ -9799,7 +12958,7 @@ static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm, vsdb->min_refresh_rate_hz = output->amd_vsdb.min_frame_rate; vsdb->max_refresh_rate_hz = output->amd_vsdb.max_frame_rate; } else { - DRM_WARN("Unknown EDID CEA parser results\n"); + drm_warn(adev_to_drm(dm->adev), "Unknown EDID CEA parser results\n"); return false; } @@ -9807,7 +12966,7 @@ static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm, } static bool parse_edid_cea_dmcu(struct amdgpu_display_manager *dm, - uint8_t *edid_ext, int len, + u8 *edid_ext, int len, struct amdgpu_hdmi_vsdb_info *vsdb_info) { int i; @@ -9848,7 +13007,7 @@ static bool parse_edid_cea_dmcu(struct amdgpu_display_manager *dm, } static bool parse_edid_cea_dmub(struct amdgpu_display_manager *dm, - uint8_t *edid_ext, int len, + u8 *edid_ext, int len, struct amdgpu_hdmi_vsdb_info *vsdb_info) { int i; @@ -9864,21 +13023,104 @@ static bool parse_edid_cea_dmub(struct amdgpu_display_manager *dm, } static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector, - uint8_t *edid_ext, int len, + u8 *edid_ext, int len, struct amdgpu_hdmi_vsdb_info *vsdb_info) { struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev); + bool ret; + mutex_lock(&adev->dm.dc_lock); if (adev->dm.dmub_srv) - return parse_edid_cea_dmub(&adev->dm, edid_ext, len, vsdb_info); + ret = parse_edid_cea_dmub(&adev->dm, edid_ext, len, vsdb_info); else - return parse_edid_cea_dmcu(&adev->dm, edid_ext, len, vsdb_info); + ret = parse_edid_cea_dmcu(&adev->dm, edid_ext, len, vsdb_info); + mutex_unlock(&adev->dm.dc_lock); + return ret; +} + +static void parse_edid_displayid_vrr(struct drm_connector *connector, + const struct edid *edid) +{ + u8 *edid_ext = NULL; + int i; + int j = 0; + u16 min_vfreq; + u16 max_vfreq; + + if (edid == NULL || edid->extensions == 0) + return; + + /* Find DisplayID extension */ + for (i = 0; i < edid->extensions; i++) { + edid_ext = (void *)(edid + (i + 1)); + if (edid_ext[0] == DISPLAYID_EXT) + break; + } + + if (edid_ext == NULL) + return; + + while (j < EDID_LENGTH) { + /* Get dynamic video timing range from DisplayID if available */ + if (EDID_LENGTH - j > 13 && edid_ext[j] == 0x25 && + (edid_ext[j+1] & 0xFE) == 0 && (edid_ext[j+2] == 9)) { + min_vfreq = edid_ext[j+9]; + if (edid_ext[j+1] & 7) + max_vfreq = edid_ext[j+10] + ((edid_ext[j+11] & 3) << 8); + else + max_vfreq = edid_ext[j+10]; + + if (max_vfreq && min_vfreq) { + connector->display_info.monitor_range.max_vfreq = max_vfreq; + connector->display_info.monitor_range.min_vfreq = min_vfreq; + + return; + } + } + j++; + } +} + +static int parse_amd_vsdb(struct amdgpu_dm_connector *aconnector, + const struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info) +{ + u8 *edid_ext = NULL; + int i; + int j = 0; + + if (edid == NULL || edid->extensions == 0) + return -ENODEV; + + /* Find DisplayID extension */ + for (i = 0; i < edid->extensions; i++) { + edid_ext = (void *)(edid + (i + 1)); + if (edid_ext[0] == DISPLAYID_EXT) + break; + } + + while (j < EDID_LENGTH - sizeof(struct amd_vsdb_block)) { + struct amd_vsdb_block *amd_vsdb = (struct amd_vsdb_block *)&edid_ext[j]; + unsigned int ieeeId = (amd_vsdb->ieee_id[2] << 16) | (amd_vsdb->ieee_id[1] << 8) | (amd_vsdb->ieee_id[0]); + + if (ieeeId == HDMI_AMD_VENDOR_SPECIFIC_DATA_BLOCK_IEEE_REGISTRATION_ID && + amd_vsdb->version == HDMI_AMD_VENDOR_SPECIFIC_DATA_BLOCK_VERSION_3) { + vsdb_info->replay_mode = (amd_vsdb->feature_caps & AMD_VSDB_VERSION_3_FEATURECAP_REPLAYMODE) ? true : false; + vsdb_info->amd_vsdb_version = HDMI_AMD_VENDOR_SPECIFIC_DATA_BLOCK_VERSION_3; + DRM_DEBUG_KMS("Panel supports Replay Mode: %d\n", vsdb_info->replay_mode); + + return true; + } + j++; + } + + return false; } static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector, - struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info) + const struct edid *edid, + struct amdgpu_hdmi_vsdb_info *vsdb_info) { - uint8_t *edid_ext = NULL; + u8 *edid_ext = NULL; int i; bool valid_vsdb_found = false; @@ -9910,7 +13152,7 @@ static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector, * amdgpu_dm_update_freesync_caps - Update Freesync capabilities * * @connector: Connector to query. - * @edid: EDID from monitor + * @drm_edid: DRM EDID from monitor * * Amdgpu supports Freesync in DP and HDMI displays, and it is required to keep * track of some of the display information in the internal data struct used by @@ -9918,24 +13160,21 @@ static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector, * FreeSync parameters. */ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector, - struct edid *edid) + const struct drm_edid *drm_edid) { int i = 0; - struct detailed_timing *timing; - struct detailed_non_pixel *data; - struct detailed_data_monitor_range *range; struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector); struct dm_connector_state *dm_con_state = NULL; struct dc_sink *sink; - - struct drm_device *dev = connector->dev; - struct amdgpu_device *adev = drm_to_adev(dev); + struct amdgpu_device *adev = drm_to_adev(connector->dev); struct amdgpu_hdmi_vsdb_info vsdb_info = {0}; + const struct edid *edid; bool freesync_capable = false; + enum adaptive_sync_type as_type = ADAPTIVE_SYNC_TYPE_NONE; if (!connector->state) { - DRM_ERROR("%s - Connector has no state", __func__); + drm_err(adev_to_drm(adev), "%s - Connector has no state", __func__); goto update; } @@ -9943,14 +13182,13 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector, amdgpu_dm_connector->dc_sink : amdgpu_dm_connector->dc_em_sink; - if (!edid || !sink) { + drm_edid_connector_update(connector, drm_edid); + + if (!drm_edid || !sink) { dm_con_state = to_dm_connector_state(connector->state); amdgpu_dm_connector->min_vfreq = 0; amdgpu_dm_connector->max_vfreq = 0; - amdgpu_dm_connector->pixel_clock_mhz = 0; - connector->display_info.monitor_range.min_vfreq = 0; - connector->display_info.monitor_range.max_vfreq = 0; freesync_capable = false; goto update; @@ -9958,62 +13196,57 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector, dm_con_state = to_dm_connector_state(connector->state); - if (!adev->dm.freesync_module) + if (!adev->dm.freesync_module || !dc_supports_vrr(sink->ctx->dce_version)) goto update; - if (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT - || sink->sink_signal == SIGNAL_TYPE_EDP) { - bool edid_check_required = false; + edid = drm_edid_raw(drm_edid); // FIXME: Get rid of drm_edid_raw() + + /* Some eDP panels only have the refresh rate range info in DisplayID */ + if ((connector->display_info.monitor_range.min_vfreq == 0 || + connector->display_info.monitor_range.max_vfreq == 0)) + parse_edid_displayid_vrr(connector, edid); - if (edid) { - edid_check_required = is_dp_capable_without_timing_msa( - adev->dm.dc, - amdgpu_dm_connector); + if (edid && (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT || + sink->sink_signal == SIGNAL_TYPE_EDP)) { + if (amdgpu_dm_connector->dc_link && + amdgpu_dm_connector->dc_link->dpcd_caps.allow_invalid_MSA_timing_param) { + amdgpu_dm_connector->min_vfreq = connector->display_info.monitor_range.min_vfreq; + amdgpu_dm_connector->max_vfreq = connector->display_info.monitor_range.max_vfreq; + if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10) + freesync_capable = true; } - if (edid_check_required == true && (edid->version > 1 || - (edid->version == 1 && edid->revision > 1))) { - for (i = 0; i < 4; i++) { + parse_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info); - timing = &edid->detailed_timings[i]; - data = &timing->data.other_data; - range = &data->data.range; - /* - * Check if monitor has continuous frequency mode - */ - if (data->type != EDID_DETAIL_MONITOR_RANGE) - continue; - /* - * Check for flag range limits only. If flag == 1 then - * no additional timing information provided. - * Default GTF, GTF Secondary curve and CVT are not - * supported - */ - if (range->flags != 1) - continue; - - amdgpu_dm_connector->min_vfreq = range->min_vfreq; - amdgpu_dm_connector->max_vfreq = range->max_vfreq; - amdgpu_dm_connector->pixel_clock_mhz = - range->pixel_clock_mhz * 10; + if (vsdb_info.replay_mode) { + amdgpu_dm_connector->vsdb_info.replay_mode = vsdb_info.replay_mode; + amdgpu_dm_connector->vsdb_info.amd_vsdb_version = vsdb_info.amd_vsdb_version; + amdgpu_dm_connector->as_type = ADAPTIVE_SYNC_TYPE_EDP; + } - connector->display_info.monitor_range.min_vfreq = range->min_vfreq; - connector->display_info.monitor_range.max_vfreq = range->max_vfreq; + } else if (drm_edid && sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) { + i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info); + if (i >= 0 && vsdb_info.freesync_supported) { + amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz; + amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz; + if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10) + freesync_capable = true; - break; - } + connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz; + connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz; + } + } - if (amdgpu_dm_connector->max_vfreq - - amdgpu_dm_connector->min_vfreq > 10) { + if (amdgpu_dm_connector->dc_link) + as_type = dm_get_adaptive_sync_support_type(amdgpu_dm_connector->dc_link); - freesync_capable = true; - } - } - } else if (edid && sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) { + if (as_type == FREESYNC_TYPE_PCON_IN_WHITELIST) { i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info); - if (i >= 0 && vsdb_info.freesync_supported) { - timing = &edid->detailed_timings[i]; - data = &timing->data.other_data; + if (i >= 0 && vsdb_info.freesync_supported && vsdb_info.amd_vsdb_version > 0) { + + amdgpu_dm_connector->pack_sdp_v1_3 = true; + amdgpu_dm_connector->as_type = as_type; + amdgpu_dm_connector->vsdb_info = vsdb_info; amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz; amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz; @@ -10029,6 +13262,12 @@ update: if (dm_con_state) dm_con_state->freesync_capable = freesync_capable; + if (connector->state && amdgpu_dm_connector->dc_link && !freesync_capable && + amdgpu_dm_connector->dc_link->replay_settings.config.replay_supported) { + amdgpu_dm_connector->dc_link->replay_settings.config.replay_supported = false; + amdgpu_dm_connector->dc_link->replay_settings.replay_feature_enabled = false; + } + if (connector->vrr_capable_property) drm_connector_set_vrr_capable_property(connector, freesync_capable); @@ -10053,15 +13292,24 @@ void amdgpu_dm_trigger_timing_sync(struct drm_device *dev) mutex_unlock(&adev->dm.dc_lock); } +static inline void amdgpu_dm_exit_ips_for_hw_access(struct dc *dc) +{ + if (dc->ctx->dmub_srv && !dc->ctx->dmub_srv->idle_exit_counter) + dc_exit_ips_for_hw_access(dc); +} + void dm_write_reg_func(const struct dc_context *ctx, uint32_t address, - uint32_t value, const char *func_name) + u32 value, const char *func_name) { #ifdef DM_CHECK_ADDR_0 if (address == 0) { - DC_ERR("invalid register write. address = 0"); + drm_err(adev_to_drm(ctx->driver_context), + "invalid register write. address = 0"); return; } #endif + + amdgpu_dm_exit_ips_for_hw_access(ctx->dc); cgs_write_register(ctx->cgs_device, address, value); trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value); } @@ -10069,10 +13317,11 @@ void dm_write_reg_func(const struct dc_context *ctx, uint32_t address, uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address, const char *func_name) { - uint32_t value; + u32 value; #ifdef DM_CHECK_ADDR_0 if (address == 0) { - DC_ERR("invalid register read; address = 0\n"); + drm_err(adev_to_drm(ctx->driver_context), + "invalid register read; address = 0\n"); return 0; } #endif @@ -10084,6 +13333,8 @@ uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address, return 0; } + amdgpu_dm_exit_ips_for_hw_access(ctx->dc); + value = cgs_read_register(ctx->cgs_device, address); trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value); @@ -10091,98 +13342,176 @@ uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address, return value; } -static int amdgpu_dm_set_dmub_async_sync_status(bool is_cmd_aux, - struct dc_context *ctx, - uint8_t status_type, - uint32_t *operation_result) +int amdgpu_dm_process_dmub_aux_transfer_sync( + struct dc_context *ctx, + unsigned int link_index, + struct aux_payload *payload, + enum aux_return_code_type *operation_result) { struct amdgpu_device *adev = ctx->driver_context; - int return_status = -1; struct dmub_notification *p_notify = adev->dm.dmub_notify; + int ret = -1; - if (is_cmd_aux) { - if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) { - return_status = p_notify->aux_reply.length; - *operation_result = p_notify->result; - } else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT) { - *operation_result = AUX_RET_ERROR_TIMEOUT; - } else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_FAIL) { - *operation_result = AUX_RET_ERROR_ENGINE_ACQUIRE; - } else { - *operation_result = AUX_RET_ERROR_UNKNOWN; - } - } else { - if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) { - return_status = 0; - *operation_result = p_notify->sc_status; - } else { - *operation_result = SET_CONFIG_UNKNOWN_ERROR; + mutex_lock(&adev->dm.dpia_aux_lock); + if (!dc_process_dmub_aux_transfer_async(ctx->dc, link_index, payload)) { + *operation_result = AUX_RET_ERROR_ENGINE_ACQUIRE; + goto out; + } + + if (!wait_for_completion_timeout(&adev->dm.dmub_aux_transfer_done, 10 * HZ)) { + drm_err(adev_to_drm(adev), "wait_for_completion_timeout timeout!"); + *operation_result = AUX_RET_ERROR_TIMEOUT; + goto out; + } + + if (p_notify->result != AUX_RET_SUCCESS) { + /* + * Transient states before tunneling is enabled could + * lead to this error. We can ignore this for now. + */ + if (p_notify->result == AUX_RET_ERROR_PROTOCOL_ERROR) { + drm_warn(adev_to_drm(adev), "DPIA AUX failed on 0x%x(%d), error %d\n", + payload->address, payload->length, + p_notify->result); } + *operation_result = p_notify->result; + goto out; } - return return_status; + payload->reply[0] = adev->dm.dmub_notify->aux_reply.command & 0xF; + if (adev->dm.dmub_notify->aux_reply.command & 0xF0) + /* The reply is stored in the top nibble of the command. */ + payload->reply[0] = (adev->dm.dmub_notify->aux_reply.command >> 4) & 0xF; + + /*write req may receive a byte indicating partially written number as well*/ + if (p_notify->aux_reply.length) + memcpy(payload->data, p_notify->aux_reply.data, + p_notify->aux_reply.length); + + /* success */ + ret = p_notify->aux_reply.length; + *operation_result = p_notify->result; +out: + reinit_completion(&adev->dm.dmub_aux_transfer_done); + mutex_unlock(&adev->dm.dpia_aux_lock); + return ret; +} + +static void abort_fused_io( + struct dc_context *ctx, + const struct dmub_cmd_fused_request *request +) +{ + union dmub_rb_cmd command = { 0 }; + struct dmub_rb_cmd_fused_io *io = &command.fused_io; + + io->header.type = DMUB_CMD__FUSED_IO; + io->header.sub_type = DMUB_CMD__FUSED_IO_ABORT; + io->header.payload_bytes = sizeof(*io) - sizeof(io->header); + io->request = *request; + dm_execute_dmub_cmd(ctx, &command, DM_DMUB_WAIT_TYPE_NO_WAIT); } -int amdgpu_dm_process_dmub_aux_transfer_sync(bool is_cmd_aux, struct dc_context *ctx, - unsigned int link_index, void *cmd_payload, void *operation_result) +static bool execute_fused_io( + struct amdgpu_device *dev, + struct dc_context *ctx, + union dmub_rb_cmd *commands, + uint8_t count, + uint32_t timeout_us +) { - struct amdgpu_device *adev = ctx->driver_context; - int ret = 0; + const uint8_t ddc_line = commands[0].fused_io.request.u.aux.ddc_line; - if (is_cmd_aux) { - dc_process_dmub_aux_transfer_async(ctx->dc, - link_index, (struct aux_payload *)cmd_payload); - } else if (dc_process_dmub_set_config_async(ctx->dc, link_index, - (struct set_config_cmd_payload *)cmd_payload, - adev->dm.dmub_notify)) { - return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux, - ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS, - (uint32_t *)operation_result); - } + if (ddc_line >= ARRAY_SIZE(dev->dm.fused_io)) + return false; - ret = wait_for_completion_timeout(&adev->dm.dmub_aux_transfer_done, 10 * HZ); - if (ret == 0) { - DRM_ERROR("wait_for_completion_timeout timeout!"); - return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux, - ctx, DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT, - (uint32_t *)operation_result); - } + struct fused_io_sync *sync = &dev->dm.fused_io[ddc_line]; + struct dmub_rb_cmd_fused_io *first = &commands[0].fused_io; + const bool result = dm_execute_dmub_cmd_list(ctx, count, commands, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY) + && first->header.ret_status + && first->request.status == FUSED_REQUEST_STATUS_SUCCESS; - if (is_cmd_aux) { - if (adev->dm.dmub_notify->result == AUX_RET_SUCCESS) { - struct aux_payload *payload = (struct aux_payload *)cmd_payload; + if (!result) + return false; - payload->reply[0] = adev->dm.dmub_notify->aux_reply.command; - if (!payload->write && adev->dm.dmub_notify->aux_reply.length && - payload->reply[0] == AUX_TRANSACTION_REPLY_AUX_ACK) { - memcpy(payload->data, adev->dm.dmub_notify->aux_reply.data, - adev->dm.dmub_notify->aux_reply.length); - } + while (wait_for_completion_timeout(&sync->replied, usecs_to_jiffies(timeout_us))) { + reinit_completion(&sync->replied); + + struct dmub_cmd_fused_request *reply = (struct dmub_cmd_fused_request *) sync->reply_data; + + static_assert(sizeof(*reply) <= sizeof(sync->reply_data), "Size mismatch"); + + if (reply->identifier == first->request.identifier) { + first->request = *reply; + return true; } } - return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux, - ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS, - (uint32_t *)operation_result); + reinit_completion(&sync->replied); + first->request.status = FUSED_REQUEST_STATUS_TIMEOUT; + abort_fused_io(ctx, &first->request); + return false; } -/* - * Check whether seamless boot is supported. - * - * So far we only support seamless boot on CHIP_VANGOGH. - * If everything goes well, we may consider expanding - * seamless boot to other ASICs. - */ -bool check_seamless_boot_capability(struct amdgpu_device *adev) +bool amdgpu_dm_execute_fused_io( + struct amdgpu_device *dev, + struct dc_link *link, + union dmub_rb_cmd *commands, + uint8_t count, + uint32_t timeout_us) { - switch (adev->asic_type) { - case CHIP_VANGOGH: - if (!adev->mman.keep_stolen_vga_memory) - return true; - break; - default: - break; + struct amdgpu_display_manager *dm = &dev->dm; + + mutex_lock(&dm->dpia_aux_lock); + + const bool result = execute_fused_io(dev, link->ctx, commands, count, timeout_us); + + mutex_unlock(&dm->dpia_aux_lock); + return result; +} + +int amdgpu_dm_process_dmub_set_config_sync( + struct dc_context *ctx, + unsigned int link_index, + struct set_config_cmd_payload *payload, + enum set_config_status *operation_result) +{ + struct amdgpu_device *adev = ctx->driver_context; + bool is_cmd_complete; + int ret; + + mutex_lock(&adev->dm.dpia_aux_lock); + is_cmd_complete = dc_process_dmub_set_config_async(ctx->dc, + link_index, payload, adev->dm.dmub_notify); + + if (is_cmd_complete || wait_for_completion_timeout(&adev->dm.dmub_aux_transfer_done, 10 * HZ)) { + ret = 0; + *operation_result = adev->dm.dmub_notify->sc_status; + } else { + drm_err(adev_to_drm(adev), "wait_for_completion_timeout timeout!"); + ret = -1; + *operation_result = SET_CONFIG_UNKNOWN_ERROR; } - return false; + if (!is_cmd_complete) + reinit_completion(&adev->dm.dmub_aux_transfer_done); + mutex_unlock(&adev->dm.dpia_aux_lock); + return ret; +} + +bool dm_execute_dmub_cmd(const struct dc_context *ctx, union dmub_rb_cmd *cmd, enum dm_dmub_wait_type wait_type) +{ + return dc_dmub_srv_cmd_run(ctx->dmub_srv, cmd, wait_type); +} + +bool dm_execute_dmub_cmd_list(const struct dc_context *ctx, unsigned int count, union dmub_rb_cmd *cmd, enum dm_dmub_wait_type wait_type) +{ + return dc_dmub_srv_cmd_run_list(ctx->dmub_srv, count, cmd, wait_type); +} + +void dm_acpi_process_phy_transition_interlock( + const struct dc_context *ctx, + struct dm_process_phy_transition_init_params process_phy_transition_init_params) +{ + // Not yet implemented } diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h index b5ce15c43bcc..ef97cede9926 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: MIT */ /* * Copyright (C) 2015-2020 Advanced Micro Devices, Inc. All rights reserved. * @@ -31,6 +32,8 @@ #include <drm/drm_connector.h> #include <drm/drm_crtc.h> #include <drm/drm_plane.h> +#include "link_service_types.h" +#include <drm/drm_writeback.h> /* * This file contains the definition for amdgpu_display_manager @@ -48,14 +51,15 @@ #define AMDGPU_DM_MAX_NUM_EDP 2 -#define AMDGPU_DMUB_NOTIFICATION_MAX 5 +#define AMDGPU_DMUB_NOTIFICATION_MAX 8 -/* - * DMUB Async to Sync Mechanism Status - */ -#define DMUB_ASYNC_TO_SYNC_ACCESS_FAIL 1 -#define DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT 2 -#define DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS 3 +#define HDMI_AMD_VENDOR_SPECIFIC_DATA_BLOCK_IEEE_REGISTRATION_ID 0x00001A +#define AMD_VSDB_VERSION_3_FEATURECAP_REPLAYMODE 0x40 +#define HDMI_AMD_VENDOR_SPECIFIC_DATA_BLOCK_VERSION_3 0x3 + +#define AMDGPU_HDR_MULT_DEFAULT (0x100000000LL) + +#define AMDGPU_DM_HDMI_HPD_DEBOUNCE_MS 1500 /* #include "include/amdgpu_dal_power_if.h" #include "amdgpu_dm_irq.h" @@ -64,8 +68,11 @@ #include "irq_types.h" #include "signal_types.h" #include "amdgpu_dm_crc.h" +#include "mod_info_packet.h" struct aux_payload; +struct set_config_cmd_payload; enum aux_return_code_type; +enum set_config_status; /* Forward declarations */ struct amdgpu_device; @@ -76,6 +83,13 @@ struct amdgpu_bo; struct dmub_srv; struct dc_plane_state; struct dmub_notification; +struct dmub_cmd_fused_request; + +struct amd_vsdb_block { + unsigned char ieee_id[3]; + unsigned char version; + unsigned char feature_caps; +}; struct common_irq_params { struct amdgpu_device *adev; @@ -127,6 +141,46 @@ struct vblank_control_work { }; /** + * struct idle_workqueue - Work data for periodic action in idle + * @work: Kernel work data for the work event + * @dm: amdgpu display manager device + * @enable: true if idle worker is enabled + * @running: true if idle worker is running + */ +struct idle_workqueue { + struct work_struct work; + struct amdgpu_display_manager *dm; + bool enable; + bool running; +}; + +/** + * struct vupdate_offload_work - Work data for offloading task from vupdate handler + * @work: Kernel work data for the work event + * @adev: amdgpu_device back pointer + * @stream: DC stream associated with the crtc + * @adjust: DC CRTC timing adjust to be applied to the crtc + */ +struct vupdate_offload_work { + struct work_struct work; + struct amdgpu_device *adev; + struct dc_stream_state *stream; + struct dc_crtc_timing_adjust *adjust; +}; + +#define MAX_LUMINANCE_DATA_POINTS 99 + +/** + * struct amdgpu_dm_luminance_data - Custom luminance data + * @luminance: Luminance in percent + * @input_signal: Input signal in range 0-255 + */ +struct amdgpu_dm_luminance_data { + u8 luminance; + u8 input_signal; +} __packed; + +/** * struct amdgpu_dm_backlight_caps - Information about backlight * * Describe the backlight support for ACPI or eDP AUX. @@ -162,6 +216,27 @@ struct amdgpu_dm_backlight_caps { * @aux_support: Describes if the display supports AUX backlight. */ bool aux_support; + /** + * @brightness_mask: After deriving brightness, OR it with this mask. + * Workaround for panels with issues with certain brightness values. + */ + u32 brightness_mask; + /** + * @ac_level: the default brightness if booted on AC + */ + u8 ac_level; + /** + * @dc_level: the default brightness if booted on DC + */ + u8 dc_level; + /** + * @data_points: the number of custom luminance data points + */ + u8 data_points; + /** + * @luminance_data: custom luminance data + */ + struct amdgpu_dm_luminance_data luminance_data[MAX_LUMINANCE_DATA_POINTS]; }; /** @@ -197,6 +272,11 @@ struct hpd_rx_irq_offload_work_queue { */ bool is_handling_link_loss; /** + * @is_handling_mst_msg_rdy_event: Used to prevent inserting mst message + * ready event when we're already handling mst message ready event + */ + bool is_handling_mst_msg_rdy_event; + /** * @aconnector: The aconnector that this work queue is attached to */ struct amdgpu_dm_connector *aconnector; @@ -218,6 +298,10 @@ struct hpd_rx_irq_offload_work { * @offload_wq: offload work queue that this work is queued to */ struct hpd_rx_irq_offload_work_queue *offload_wq; + /** + * @adev: amdgpu_device pointer + */ + struct amdgpu_device *adev; }; /** @@ -366,13 +450,6 @@ struct amdgpu_display_manager { struct mutex audio_lock; /** - * @vblank_lock: - * - * Guards access to deferred vblank work state. - */ - spinlock_t vblank_lock; - - /** * @audio_component: * * Used to notify ELD changes to sound driver. @@ -470,9 +547,7 @@ struct amdgpu_display_manager { struct amdgpu_dm_backlight_caps backlight_caps[AMDGPU_DM_MAX_NUM_EDP]; struct mod_freesync *freesync_module; -#ifdef CONFIG_DRM_AMD_DC_HDCP struct hdcp_workqueue *hdcp_workqueue; -#endif /** * @vblank_control_workqueue: @@ -481,6 +556,13 @@ struct amdgpu_display_manager { */ struct workqueue_struct *vblank_control_workqueue; + /** + * @idle_workqueue: + * + * Periodic work for idle events. + */ + struct idle_workqueue *idle_workqueue; + struct drm_atomic_state *cached_state; struct dc_state *cached_dc_state; @@ -505,11 +587,12 @@ struct amdgpu_display_manager { #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) /** - * @crc_rd_wrk: + * @secure_display_ctx: * - * Work to be executed in a separate thread to communicate with PSP. + * Store secure display relevant info. e.g. the ROI information + * , the work_struct to command dmub, etc. */ - struct crc_rd_work *crc_rd_wrk; + struct secure_display_context secure_display_ctx; #endif /** * @hpd_rx_offload_wq: @@ -555,6 +638,45 @@ struct amdgpu_display_manager { * occurred on certain intel platform */ bool aux_hpd_discon_quirk; + + /** + * @edp0_on_dp1_quirk: + * + * quirk for platforms that put edp0 on DP1. + */ + bool edp0_on_dp1_quirk; + + /** + * @dpia_aux_lock: + * + * Guards access to DPIA AUX + */ + struct mutex dpia_aux_lock; + + /** + * @bb_from_dmub: + * + * Bounding box data read from dmub during early initialization for DCN4+ + * Data is stored as a byte array that should be casted to the appropriate bb struct + */ + void *bb_from_dmub; + + /** + * @oem_i2c: + * + * OEM i2c bus + */ + struct amdgpu_i2c_adapter *oem_i2c; + + /** + * @fused_io: + * + * dmub fused io interface + */ + struct fused_io_sync { + struct completion replied; + char reply_data[0x40]; // Cannot include dmub_cmd here + } fused_io[8]; }; enum dsc_clock_force_state { @@ -579,14 +701,52 @@ enum mst_progress_status { MST_CLEAR_ALLOCATED_PAYLOAD = BIT(3), }; +/** + * struct amdgpu_hdmi_vsdb_info - Keep track of the VSDB info + * + * AMDGPU supports FreeSync over HDMI by using the VSDB section, and this + * struct is useful to keep track of the display-specific information about + * FreeSync. + */ +struct amdgpu_hdmi_vsdb_info { + /** + * @amd_vsdb_version: Vendor Specific Data Block Version, should be + * used to determine which Vendor Specific InfoFrame (VSIF) to send. + */ + unsigned int amd_vsdb_version; + + /** + * @freesync_supported: FreeSync Supported. + */ + bool freesync_supported; + + /** + * @min_refresh_rate_hz: FreeSync Minimum Refresh Rate in Hz. + */ + unsigned int min_refresh_rate_hz; + + /** + * @max_refresh_rate_hz: FreeSync Maximum Refresh Rate in Hz + */ + unsigned int max_refresh_rate_hz; + + /** + * @replay_mode: Replay supported + */ + bool replay_mode; +}; + struct amdgpu_dm_connector { struct drm_connector base; uint32_t connector_id; + int bl_idx; + + struct cec_notifier *notifier; /* we need to mind the EDID between detect and get modes due to analog/digital/tvencoder */ - struct edid *edid; + const struct drm_edid *drm_edid; /* shared with amdgpu */ struct amdgpu_hpd hpd; @@ -607,9 +767,16 @@ struct amdgpu_dm_connector { /* DM only */ struct drm_dp_mst_topology_mgr mst_mgr; struct amdgpu_dm_dp_aux dm_dp_aux; - struct drm_dp_mst_port *port; - struct amdgpu_dm_connector *mst_port; + struct drm_dp_mst_port *mst_output_port; + struct amdgpu_dm_connector *mst_root; struct drm_dp_aux *dsc_aux; + uint32_t mst_local_bw; + uint16_t vc_full_pbn; + struct mutex handle_mst_msg_ready; + + /* branch device specific data */ + uint32_t branch_ieee_oui; + /* TODO see if we can merge with ddc_bus or make a dm_connector */ struct amdgpu_i2c_adapter *i2c; @@ -625,7 +792,6 @@ struct amdgpu_dm_connector { * value is set to zero when there is no FreeSync support. */ int max_vfreq ; - int pixel_clock_mhz; /* Audio instance - protected by audio_lock. */ int audio_inst; @@ -633,20 +799,32 @@ struct amdgpu_dm_connector { struct mutex hpd_lock; bool fake_enable; -#ifdef CONFIG_DEBUG_FS - uint32_t debugfs_dpcd_address; - uint32_t debugfs_dpcd_size; -#endif bool force_yuv420_output; + bool force_yuv422_output; struct dsc_preferred_settings dsc_settings; union dp_downstream_port_present mst_downstream_port_present; /* Cached display modes */ struct drm_display_mode freesync_vid_base; - int psr_skip_count; + int sr_skip_count; + bool disallow_edp_enter_psr; /* Record progress status of mst*/ uint8_t mst_status; + + /* Automated testing */ + bool timing_changed; + struct dc_crtc_timing *timing_requested; + + /* Adaptive Sync */ + bool pack_sdp_v1_3; + enum adaptive_sync_type as_type; + struct amdgpu_hdmi_vsdb_info vsdb_info; + + /* HDMI HPD debounce support */ + unsigned int hdmi_hpd_debounce_delay_ms; + struct delayed_work hdmi_hpd_debounce_work; + struct dc_sink *hdmi_prev_sink; }; static inline void amdgpu_dm_set_mst_status(uint8_t *status, @@ -660,11 +838,112 @@ static inline void amdgpu_dm_set_mst_status(uint8_t *status, #define to_amdgpu_dm_connector(x) container_of(x, struct amdgpu_dm_connector, base) +struct amdgpu_dm_wb_connector { + struct drm_writeback_connector base; + struct dc_link *link; +}; + +#define to_amdgpu_dm_wb_connector(x) container_of(x, struct amdgpu_dm_wb_connector, base) + extern const struct amdgpu_ip_block_version dm_ip_block; +/* enum amdgpu_transfer_function: pre-defined transfer function supported by AMD. + * + * It includes standardized transfer functions and pure power functions. The + * transfer function coefficients are available at modules/color/color_gamma.c + */ +enum amdgpu_transfer_function { + AMDGPU_TRANSFER_FUNCTION_DEFAULT, + AMDGPU_TRANSFER_FUNCTION_SRGB_EOTF, + AMDGPU_TRANSFER_FUNCTION_BT709_INV_OETF, + AMDGPU_TRANSFER_FUNCTION_PQ_EOTF, + AMDGPU_TRANSFER_FUNCTION_IDENTITY, + AMDGPU_TRANSFER_FUNCTION_GAMMA22_EOTF, + AMDGPU_TRANSFER_FUNCTION_GAMMA24_EOTF, + AMDGPU_TRANSFER_FUNCTION_GAMMA26_EOTF, + AMDGPU_TRANSFER_FUNCTION_SRGB_INV_EOTF, + AMDGPU_TRANSFER_FUNCTION_BT709_OETF, + AMDGPU_TRANSFER_FUNCTION_PQ_INV_EOTF, + AMDGPU_TRANSFER_FUNCTION_GAMMA22_INV_EOTF, + AMDGPU_TRANSFER_FUNCTION_GAMMA24_INV_EOTF, + AMDGPU_TRANSFER_FUNCTION_GAMMA26_INV_EOTF, + AMDGPU_TRANSFER_FUNCTION_COUNT +}; + struct dm_plane_state { struct drm_plane_state base; struct dc_plane_state *dc_state; + + /* Plane color mgmt */ + /** + * @degamma_lut: + * + * 1D LUT for mapping framebuffer/plane pixel data before sampling or + * blending operations. It's usually applied to linearize input space. + * The blob (if not NULL) is an array of &struct drm_color_lut. + */ + struct drm_property_blob *degamma_lut; + /** + * @degamma_tf: + * + * Predefined transfer function to tell DC driver the input space to + * linearize. + */ + enum amdgpu_transfer_function degamma_tf; + /** + * @hdr_mult: + * + * Multiplier to 'gain' the plane. When PQ is decoded using the fixed + * func transfer function to the internal FP16 fb, 1.0 -> 80 nits (on + * AMD at least). When sRGB is decoded, 1.0 -> 1.0, obviously. + * Therefore, 1.0 multiplier = 80 nits for SDR content. So if you + * want, 203 nits for SDR content, pass in (203.0 / 80.0). Format is + * S31.32 sign-magnitude. + * + * HDR multiplier can wide range beyond [0.0, 1.0]. This means that PQ + * TF is needed for any subsequent linear-to-non-linear transforms. + */ + __u64 hdr_mult; + /** + * @ctm: + * + * Color transformation matrix. The blob (if not NULL) is a &struct + * drm_color_ctm_3x4. + */ + struct drm_property_blob *ctm; + /** + * @shaper_lut: shaper lookup table blob. The blob (if not NULL) is an + * array of &struct drm_color_lut. + */ + struct drm_property_blob *shaper_lut; + /** + * @shaper_tf: + * + * Predefined transfer function to delinearize color space. + */ + enum amdgpu_transfer_function shaper_tf; + /** + * @lut3d: 3D lookup table blob. The blob (if not NULL) is an array of + * &struct drm_color_lut. + */ + struct drm_property_blob *lut3d; + /** + * @blend_lut: blend lut lookup table blob. The blob (if not NULL) is an + * array of &struct drm_color_lut. + */ + struct drm_property_blob *blend_lut; + /** + * @blend_tf: + * + * Pre-defined transfer function for converting plane pixel data before + * applying blend LUT. + */ + enum amdgpu_transfer_function blend_tf; +}; + +enum amdgpu_dm_cursor_mode { + DM_CURSOR_NATIVE_MODE = 0, + DM_CURSOR_OVERLAY_MODE, }; struct dm_crtc_state { @@ -689,6 +968,16 @@ struct dm_crtc_state { struct dc_info_packet vrr_infopacket; int abm_level; + + /** + * @regamma_tf: + * + * Pre-defined transfer function for converting internal FB -> wire + * encoding. + */ + enum amdgpu_transfer_function regamma_tf; + + enum amdgpu_dm_cursor_mode cursor_mode; }; #define to_dm_crtc_state(x) container_of(x, struct dm_crtc_state, base) @@ -709,45 +998,13 @@ struct dm_connector_state { uint8_t underscan_hborder; bool underscan_enable; bool freesync_capable; -#ifdef CONFIG_DRM_AMD_DC_HDCP bool update_hdcp; -#endif + bool abm_sysfs_forbidden; uint8_t abm_level; int vcpi_slots; uint64_t pbn; }; -/** - * struct amdgpu_hdmi_vsdb_info - Keep track of the VSDB info - * - * AMDGPU supports FreeSync over HDMI by using the VSDB section, and this - * struct is useful to keep track of the display-specific information about - * FreeSync. - */ -struct amdgpu_hdmi_vsdb_info { - /** - * @amd_vsdb_version: Vendor Specific Data Block Version, should be - * used to determine which Vendor Specific InfoFrame (VSIF) to send. - */ - unsigned int amd_vsdb_version; - - /** - * @freesync_supported: FreeSync Supported. - */ - bool freesync_supported; - - /** - * @min_refresh_rate_hz: FreeSync Minimum Refresh Rate in Hz. - */ - unsigned int min_refresh_rate_hz; - - /** - * @max_refresh_rate_hz: FreeSync Maximum Refresh Rate in Hz - */ - unsigned int max_refresh_rate_hz; -}; - - #define to_dm_connector_state(x)\ container_of((x), struct dm_connector_state, base) @@ -773,24 +1030,34 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm, int link_index); enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector, - struct drm_display_mode *mode); + const struct drm_display_mode *mode); void dm_restore_drm_connector_state(struct drm_device *dev, struct drm_connector *connector); void amdgpu_dm_update_freesync_caps(struct drm_connector *connector, - struct edid *edid); + const struct drm_edid *drm_edid); void amdgpu_dm_trigger_timing_sync(struct drm_device *dev); +/* 3D LUT max size is 17x17x17 (4913 entries) */ +#define MAX_COLOR_3DLUT_SIZE 17 +#define MAX_COLOR_3DLUT_BITDEPTH 12 +int amdgpu_dm_verify_lut3d_size(struct amdgpu_device *adev, + struct drm_plane_state *plane_state); +/* 1D LUT size */ #define MAX_COLOR_LUT_ENTRIES 4096 /* Legacy gamm LUT users such as X doesn't like large LUT sizes */ #define MAX_COLOR_LEGACY_LUT_ENTRIES 256 void amdgpu_dm_init_color_mod(void); +int amdgpu_dm_create_color_properties(struct amdgpu_device *adev); int amdgpu_dm_verify_lut_sizes(const struct drm_crtc_state *crtc_state); int amdgpu_dm_update_crtc_color_mgmt(struct dm_crtc_state *crtc); +int amdgpu_dm_check_crtc_color_mgmt(struct dm_crtc_state *crtc, + bool check_only); int amdgpu_dm_update_plane_color_mgmt(struct dm_crtc_state *crtc, + struct drm_plane_state *plane_state, struct dc_plane_state *dc_plane_state); void amdgpu_dm_update_connector_after_detect( @@ -798,14 +1065,22 @@ void amdgpu_dm_update_connector_after_detect( extern const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs; -int amdgpu_dm_process_dmub_aux_transfer_sync(bool is_cmd_aux, - struct dc_context *ctx, unsigned int link_index, - void *payload, void *operation_result); +int amdgpu_dm_process_dmub_aux_transfer_sync(struct dc_context *ctx, unsigned int link_index, + struct aux_payload *payload, enum aux_return_code_type *operation_result); + +bool amdgpu_dm_execute_fused_io( + struct amdgpu_device *dev, + struct dc_link *link, + union dmub_rb_cmd *commands, + uint8_t count, + uint32_t timeout_us +); -bool check_seamless_boot_capability(struct amdgpu_device *adev); +int amdgpu_dm_process_dmub_set_config_sync(struct dc_context *ctx, unsigned int link_index, + struct set_config_cmd_payload *payload, enum set_config_status *operation_result); struct dc_stream_state * - create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector, + create_validate_stream_for_sink(struct drm_connector *connector, const struct drm_display_mode *drm_mode, const struct dm_connector_state *dm_state, const struct dc_stream_state *old_stream); @@ -813,9 +1088,27 @@ struct dc_stream_state * int dm_atomic_get_state(struct drm_atomic_state *state, struct dm_atomic_state **dm_state); -struct amdgpu_dm_connector * +struct drm_connector * amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state, struct drm_crtc *crtc); int convert_dc_color_depth_into_bpc(enum dc_color_depth display_color_depth); +struct idle_workqueue *idle_create_workqueue(struct amdgpu_device *adev); + +void *dm_allocate_gpu_mem(struct amdgpu_device *adev, + enum dc_gpu_mem_alloc_type type, + size_t size, + long long *addr); +void dm_free_gpu_mem(struct amdgpu_device *adev, + enum dc_gpu_mem_alloc_type type, + void *addr); + +bool amdgpu_dm_is_headless(struct amdgpu_device *adev); + +void hdmi_cec_set_edid(struct amdgpu_dm_connector *aconnector); +void hdmi_cec_unset_edid(struct amdgpu_dm_connector *aconnector); +int amdgpu_dm_initialize_hdmi_connector(struct amdgpu_dm_connector *aconnector); + +void retrieve_dmi_info(struct amdgpu_display_manager *dm); + #endif /* __AMDGPU_DM_H__ */ diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c index a4cb23d059bd..1dcc79b35225 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: MIT /* * Copyright 2018 Advanced Micro Devices, Inc. * @@ -25,13 +26,39 @@ #include "amdgpu.h" #include "amdgpu_mode.h" #include "amdgpu_dm.h" +#include "amdgpu_dm_colorop.h" #include "dc.h" #include "modules/color/color_gamma.h" -#include "basics/conversion.h" /** * DOC: overview * + * We have three types of color management in the AMD display driver. + * 1. the legacy &drm_crtc DEGAMMA, CTM, and GAMMA properties + * 2. AMD driver private color management on &drm_plane and &drm_crtc + * 3. AMD plane color pipeline + * + * The CRTC properties are the original color management. When they were + * implemented per-plane color management was not a thing yet. Because + * of that we could get away with plumbing the DEGAMMA and CTM + * properties to pre-blending HW functions. This is incompatible with + * per-plane color management, such as via the AMD private properties or + * the new drm_plane color pipeline. The only compatible CRTC property + * with per-plane color management is the GAMMA property as it is + * applied post-blending. + * + * The AMD driver private color management properties are only exposed + * when the kernel is built explicitly with -DAMD_PRIVATE_COLOR. They + * are temporary building blocks on the path to full-fledged &drm_plane + * and &drm_crtc color pipelines and lay the driver's groundwork for the + * color pipelines. + * + * The AMD plane color pipeline describes AMD's &drm_colorops via the + * &drm_plane's COLOR_PIPELINE property. + * + * drm_crtc Properties + * ------------------- + * * The DC interface to HW gives us the following color management blocks * per pipe (surface): * @@ -42,36 +69,94 @@ * - Surface regamma LUT (normalized) * - Output CSC (normalized) * - * But these aren't a direct mapping to DRM color properties. The current DRM - * interface exposes CRTC degamma, CRTC CTM and CRTC regamma while our hardware - * is essentially giving: + * But these aren't a direct mapping to DRM color properties. The + * current DRM interface exposes CRTC degamma, CRTC CTM and CRTC regamma + * while our hardware is essentially giving: * * Plane CTM -> Plane degamma -> Plane CTM -> Plane regamma -> Plane CTM * - * The input gamma LUT block isn't really applicable here since it operates - * on the actual input data itself rather than the HW fp representation. The - * input and output CSC blocks are technically available to use as part of - * the DC interface but are typically used internally by DC for conversions - * between color spaces. These could be blended together with user - * adjustments in the future but for now these should remain untouched. + * The input gamma LUT block isn't really applicable here since it + * operates on the actual input data itself rather than the HW fp + * representation. The input and output CSC blocks are technically + * available to use as part of the DC interface but are typically used + * internally by DC for conversions between color spaces. These could be + * blended together with user adjustments in the future but for now + * these should remain untouched. + * + * The pipe blending also happens after these blocks so we don't + * actually support any CRTC props with correct blending with multiple + * planes - but we can still support CRTC color management properties in + * DM in most single plane cases correctly with clever management of the + * DC interface in DM. + * + * As per DRM documentation, blocks should be in hardware bypass when + * their respective property is set to NULL. A linear DGM/RGM LUT should + * also considered as putting the respective block into bypass mode. + * + * This means that the following configuration is assumed to be the + * default: + * + * Plane DGM Bypass -> Plane CTM Bypass -> Plane RGM Bypass -> ... CRTC + * DGM Bypass -> CRTC CTM Bypass -> CRTC RGM Bypass + * + * AMD Private Color Management on drm_plane + * ----------------------------------------- + * + * The AMD private color management properties on a &drm_plane are: + * + * - AMD_PLANE_DEGAMMA_LUT + * - AMD_PLANE_DEGAMMA_LUT_SIZE + * - AMD_PLANE_DEGAMMA_TF + * - AMD_PLANE_HDR_MULT + * - AMD_PLANE_CTM + * - AMD_PLANE_SHAPER_LUT + * - AMD_PLANE_SHAPER_LUT_SIZE + * - AMD_PLANE_SHAPER_TF + * - AMD_PLANE_LUT3D + * - AMD_PLANE_LUT3D_SIZE + * - AMD_PLANE_BLEND_LUT + * - AMD_PLANE_BLEND_LUT_SIZE + * - AMD_PLANE_BLEND_TF + * + * The AMD private color management property on a &drm_crtc is: + * + * - AMD_CRTC_REGAMMA_TF + * + * Use of these properties is discouraged. + * + * AMD plane color pipeline + * ------------------------ + * + * The AMD &drm_plane color pipeline is advertised for DCN generations + * 3.0 and newer. It exposes these elements in this order: + * + * 1. 1D curve colorop + * 2. Multiplier + * 3. 3x4 CTM + * 4. 1D curve colorop + * 5. 1D LUT + * 6. 3D LUT + * 7. 1D curve colorop + * 8. 1D LUT * - * The pipe blending also happens after these blocks so we don't actually - * support any CRTC props with correct blending with multiple planes - but we - * can still support CRTC color management properties in DM in most single - * plane cases correctly with clever management of the DC interface in DM. + * The multiplier (#2) is a simple multiplier that is applied to all + * channels. * - * As per DRM documentation, blocks should be in hardware bypass when their - * respective property is set to NULL. A linear DGM/RGM LUT should also - * considered as putting the respective block into bypass mode. + * The 3x4 CTM (#3) is a simple 3x4 matrix. * - * This means that the following - * configuration is assumed to be the default: + * #1, and #7 are non-linear to linear curves. #4 is a linear to + * non-linear curve. They support sRGB, PQ, and BT.709/BT.2020 EOTFs or + * their inverse. + * + * The 1D LUTs (#5 and #8) are plain 4096 entry LUTs. + * + * The 3DLUT (#6) is a tetrahedrally interpolated 17 cube LUT. * - * Plane DGM Bypass -> Plane CTM Bypass -> Plane RGM Bypass -> ... - * CRTC DGM Bypass -> CRTC CTM Bypass -> CRTC RGM Bypass */ #define MAX_DRM_LUT_VALUE 0xFFFF +#define MAX_DRM_LUT32_VALUE 0xFFFFFFFF +#define SDR_WHITE_LEVEL_INIT_VALUE 80 /** * amdgpu_dm_init_color_mod - Initialize the color module. @@ -84,6 +169,247 @@ void amdgpu_dm_init_color_mod(void) setup_x_points_distribution(); } +static inline struct fixed31_32 amdgpu_dm_fixpt_from_s3132(__u64 x) +{ + struct fixed31_32 val; + + /* If negative, convert to 2's complement. */ + if (x & (1ULL << 63)) + x = -(x & ~(1ULL << 63)); + + val.value = x; + return val; +} + +#ifdef AMD_PRIVATE_COLOR +/* Pre-defined Transfer Functions (TF) + * + * AMD driver supports pre-defined mathematical functions for transferring + * between encoded values and optical/linear space. Depending on HW color caps, + * ROMs and curves built by the AMD color module support these transforms. + * + * The driver-specific color implementation exposes properties for pre-blending + * degamma TF, shaper TF (before 3D LUT), and blend(dpp.ogam) TF and + * post-blending regamma (mpc.ogam) TF. However, only pre-blending degamma + * supports ROM curves. AMD color module uses pre-defined coefficients to build + * curves for the other blocks. What can be done by each color block is + * described by struct dpp_color_capsand struct mpc_color_caps. + * + * AMD driver-specific color API exposes the following pre-defined transfer + * functions: + * + * - Identity: linear/identity relationship between pixel value and + * luminance value; + * - Gamma 2.2, Gamma 2.4, Gamma 2.6: pure power functions; + * - sRGB: 2.4: The piece-wise transfer function from IEC 61966-2-1:1999; + * - BT.709: has a linear segment in the bottom part and then a power function + * with a 0.45 (~1/2.22) gamma for the rest of the range; standardized by + * ITU-R BT.709-6; + * - PQ (Perceptual Quantizer): used for HDR display, allows luminance range + * capability of 0 to 10,000 nits; standardized by SMPTE ST 2084. + * + * The AMD color model is designed with an assumption that SDR (sRGB, BT.709, + * Gamma 2.2, etc.) peak white maps (normalized to 1.0 FP) to 80 nits in the PQ + * system. This has the implication that PQ EOTF (non-linear to linear) maps to + * [0.0..125.0] where 125.0 = 10,000 nits / 80 nits. + * + * Non-linear and linear forms are described in the table below: + * + * ┌───────────┬─────────────────────┬──────────────────────┐ + * │ │ Non-linear │ Linear │ + * ├───────────┼─────────────────────┼──────────────────────┤ + * │ sRGB │ UNORM or [0.0, 1.0] │ [0.0, 1.0] │ + * ├───────────┼─────────────────────┼──────────────────────┤ + * │ BT709 │ UNORM or [0.0, 1.0] │ [0.0, 1.0] │ + * ├───────────┼─────────────────────┼──────────────────────┤ + * │ Gamma 2.x │ UNORM or [0.0, 1.0] │ [0.0, 1.0] │ + * ├───────────┼─────────────────────┼──────────────────────┤ + * │ PQ │ UNORM or FP16 CCCS* │ [0.0, 125.0] │ + * ├───────────┼─────────────────────┼──────────────────────┤ + * │ Identity │ UNORM or FP16 CCCS* │ [0.0, 1.0] or CCCS** │ + * └───────────┴─────────────────────┴──────────────────────┘ + * * CCCS: Windows canonical composition color space + * ** Respectively + * + * In the driver-specific API, color block names attached to TF properties + * suggest the intention regarding non-linear encoding pixel's luminance + * values. As some newer encodings don't use gamma curve, we make encoding and + * decoding explicit by defining an enum list of transfer functions supported + * in terms of EOTF and inverse EOTF, where: + * + * - EOTF (electro-optical transfer function): is the transfer function to go + * from the encoded value to an optical (linear) value. De-gamma functions + * traditionally do this. + * - Inverse EOTF (simply the inverse of the EOTF): is usually intended to go + * from an optical/linear space (which might have been used for blending) + * back to the encoded values. Gamma functions traditionally do this. + */ +static const char * const +amdgpu_transfer_function_names[] = { + [AMDGPU_TRANSFER_FUNCTION_DEFAULT] = "Default", + [AMDGPU_TRANSFER_FUNCTION_IDENTITY] = "Identity", + [AMDGPU_TRANSFER_FUNCTION_SRGB_EOTF] = "sRGB EOTF", + [AMDGPU_TRANSFER_FUNCTION_BT709_INV_OETF] = "BT.709 inv_OETF", + [AMDGPU_TRANSFER_FUNCTION_PQ_EOTF] = "PQ EOTF", + [AMDGPU_TRANSFER_FUNCTION_GAMMA22_EOTF] = "Gamma 2.2 EOTF", + [AMDGPU_TRANSFER_FUNCTION_GAMMA24_EOTF] = "Gamma 2.4 EOTF", + [AMDGPU_TRANSFER_FUNCTION_GAMMA26_EOTF] = "Gamma 2.6 EOTF", + [AMDGPU_TRANSFER_FUNCTION_SRGB_INV_EOTF] = "sRGB inv_EOTF", + [AMDGPU_TRANSFER_FUNCTION_BT709_OETF] = "BT.709 OETF", + [AMDGPU_TRANSFER_FUNCTION_PQ_INV_EOTF] = "PQ inv_EOTF", + [AMDGPU_TRANSFER_FUNCTION_GAMMA22_INV_EOTF] = "Gamma 2.2 inv_EOTF", + [AMDGPU_TRANSFER_FUNCTION_GAMMA24_INV_EOTF] = "Gamma 2.4 inv_EOTF", + [AMDGPU_TRANSFER_FUNCTION_GAMMA26_INV_EOTF] = "Gamma 2.6 inv_EOTF", +}; + +static const u32 amdgpu_eotf = + BIT(AMDGPU_TRANSFER_FUNCTION_SRGB_EOTF) | + BIT(AMDGPU_TRANSFER_FUNCTION_BT709_INV_OETF) | + BIT(AMDGPU_TRANSFER_FUNCTION_PQ_EOTF) | + BIT(AMDGPU_TRANSFER_FUNCTION_GAMMA22_EOTF) | + BIT(AMDGPU_TRANSFER_FUNCTION_GAMMA24_EOTF) | + BIT(AMDGPU_TRANSFER_FUNCTION_GAMMA26_EOTF); + +static const u32 amdgpu_inv_eotf = + BIT(AMDGPU_TRANSFER_FUNCTION_SRGB_INV_EOTF) | + BIT(AMDGPU_TRANSFER_FUNCTION_BT709_OETF) | + BIT(AMDGPU_TRANSFER_FUNCTION_PQ_INV_EOTF) | + BIT(AMDGPU_TRANSFER_FUNCTION_GAMMA22_INV_EOTF) | + BIT(AMDGPU_TRANSFER_FUNCTION_GAMMA24_INV_EOTF) | + BIT(AMDGPU_TRANSFER_FUNCTION_GAMMA26_INV_EOTF); + +static struct drm_property * +amdgpu_create_tf_property(struct drm_device *dev, + const char *name, + u32 supported_tf) +{ + u32 transfer_functions = supported_tf | + BIT(AMDGPU_TRANSFER_FUNCTION_DEFAULT) | + BIT(AMDGPU_TRANSFER_FUNCTION_IDENTITY); + struct drm_prop_enum_list enum_list[AMDGPU_TRANSFER_FUNCTION_COUNT]; + int i, len; + + len = 0; + for (i = 0; i < AMDGPU_TRANSFER_FUNCTION_COUNT; i++) { + if ((transfer_functions & BIT(i)) == 0) + continue; + + enum_list[len].type = i; + enum_list[len].name = amdgpu_transfer_function_names[i]; + len++; + } + + return drm_property_create_enum(dev, DRM_MODE_PROP_ENUM, + name, enum_list, len); +} + +int +amdgpu_dm_create_color_properties(struct amdgpu_device *adev) +{ + struct drm_property *prop; + + prop = drm_property_create(adev_to_drm(adev), + DRM_MODE_PROP_BLOB, + "AMD_PLANE_DEGAMMA_LUT", 0); + if (!prop) + return -ENOMEM; + adev->mode_info.plane_degamma_lut_property = prop; + + prop = drm_property_create_range(adev_to_drm(adev), + DRM_MODE_PROP_IMMUTABLE, + "AMD_PLANE_DEGAMMA_LUT_SIZE", + 0, UINT_MAX); + if (!prop) + return -ENOMEM; + adev->mode_info.plane_degamma_lut_size_property = prop; + + prop = amdgpu_create_tf_property(adev_to_drm(adev), + "AMD_PLANE_DEGAMMA_TF", + amdgpu_eotf); + if (!prop) + return -ENOMEM; + adev->mode_info.plane_degamma_tf_property = prop; + + prop = drm_property_create_range(adev_to_drm(adev), + 0, "AMD_PLANE_HDR_MULT", 0, U64_MAX); + if (!prop) + return -ENOMEM; + adev->mode_info.plane_hdr_mult_property = prop; + + prop = drm_property_create(adev_to_drm(adev), + DRM_MODE_PROP_BLOB, + "AMD_PLANE_CTM", 0); + if (!prop) + return -ENOMEM; + adev->mode_info.plane_ctm_property = prop; + + prop = drm_property_create(adev_to_drm(adev), + DRM_MODE_PROP_BLOB, + "AMD_PLANE_SHAPER_LUT", 0); + if (!prop) + return -ENOMEM; + adev->mode_info.plane_shaper_lut_property = prop; + + prop = drm_property_create_range(adev_to_drm(adev), + DRM_MODE_PROP_IMMUTABLE, + "AMD_PLANE_SHAPER_LUT_SIZE", 0, UINT_MAX); + if (!prop) + return -ENOMEM; + adev->mode_info.plane_shaper_lut_size_property = prop; + + prop = amdgpu_create_tf_property(adev_to_drm(adev), + "AMD_PLANE_SHAPER_TF", + amdgpu_inv_eotf); + if (!prop) + return -ENOMEM; + adev->mode_info.plane_shaper_tf_property = prop; + + prop = drm_property_create(adev_to_drm(adev), + DRM_MODE_PROP_BLOB, + "AMD_PLANE_LUT3D", 0); + if (!prop) + return -ENOMEM; + adev->mode_info.plane_lut3d_property = prop; + + prop = drm_property_create_range(adev_to_drm(adev), + DRM_MODE_PROP_IMMUTABLE, + "AMD_PLANE_LUT3D_SIZE", 0, UINT_MAX); + if (!prop) + return -ENOMEM; + adev->mode_info.plane_lut3d_size_property = prop; + + prop = drm_property_create(adev_to_drm(adev), + DRM_MODE_PROP_BLOB, + "AMD_PLANE_BLEND_LUT", 0); + if (!prop) + return -ENOMEM; + adev->mode_info.plane_blend_lut_property = prop; + + prop = drm_property_create_range(adev_to_drm(adev), + DRM_MODE_PROP_IMMUTABLE, + "AMD_PLANE_BLEND_LUT_SIZE", 0, UINT_MAX); + if (!prop) + return -ENOMEM; + adev->mode_info.plane_blend_lut_size_property = prop; + + prop = amdgpu_create_tf_property(adev_to_drm(adev), + "AMD_PLANE_BLEND_TF", + amdgpu_eotf); + if (!prop) + return -ENOMEM; + adev->mode_info.plane_blend_tf_property = prop; + + prop = amdgpu_create_tf_property(adev_to_drm(adev), + "AMD_CRTC_REGAMMA_TF", + amdgpu_inv_eotf); + if (!prop) + return -ENOMEM; + adev->mode_info.regamma_tf_property = prop; + + return 0; +} +#endif + /** * __extract_blob_lut - Extracts the DRM lut and lut size from a blob. * @blob: DRM color mgmt property blob @@ -100,6 +426,21 @@ __extract_blob_lut(const struct drm_property_blob *blob, uint32_t *size) } /** + * __extract_blob_lut32 - Extracts the DRM lut and lut size from a blob. + * @blob: DRM color mgmt property blob + * @size: lut size + * + * Returns: + * DRM LUT or NULL + */ +static const struct drm_color_lut32 * +__extract_blob_lut32(const struct drm_property_blob *blob, uint32_t *size) +{ + *size = blob ? drm_color_lut32_size(blob) : 0; + return blob ? (struct drm_color_lut32 *)blob->data : NULL; +} + +/** * __is_lut_linear - check if the given lut is a linear mapping of values * @lut: given lut to check values * @size: lut size @@ -173,6 +514,24 @@ static void __drm_lut_to_dc_gamma(const struct drm_color_lut *lut, } /** + * __drm_lut32_to_dc_gamma - convert the drm_color_lut to dc_gamma. + * @lut: DRM lookup table for color conversion + * @gamma: DC gamma to set entries + * + * The conversion depends on the size of the lut - whether or not it's legacy. + */ +static void __drm_lut32_to_dc_gamma(const struct drm_color_lut32 *lut, struct dc_gamma *gamma) +{ + int i; + + for (i = 0; i < MAX_COLOR_LUT_ENTRIES; i++) { + gamma->entries.red[i] = dc_fixpt_from_fraction(lut[i].red, MAX_DRM_LUT32_VALUE); + gamma->entries.green[i] = dc_fixpt_from_fraction(lut[i].green, MAX_DRM_LUT32_VALUE); + gamma->entries.blue[i] = dc_fixpt_from_fraction(lut[i].blue, MAX_DRM_LUT32_VALUE); + } +} + +/** * __drm_ctm_to_dc_matrix - converts a DRM CTM to a DC CSC float matrix * @ctm: DRM color transformation matrix * @matrix: DC CSC float matrix @@ -182,7 +541,6 @@ static void __drm_lut_to_dc_gamma(const struct drm_color_lut *lut, static void __drm_ctm_to_dc_matrix(const struct drm_color_ctm *ctm, struct fixed31_32 *matrix) { - int64_t val; int i; /* @@ -201,12 +559,29 @@ static void __drm_ctm_to_dc_matrix(const struct drm_color_ctm *ctm, } /* gamut_remap_matrix[i] = ctm[i - floor(i/4)] */ - val = ctm->matrix[i - (i / 4)]; - /* If negative, convert to 2's complement. */ - if (val & (1ULL << 63)) - val = -(val & ~(1ULL << 63)); + matrix[i] = amdgpu_dm_fixpt_from_s3132(ctm->matrix[i - (i / 4)]); + } +} + +/** + * __drm_ctm_3x4_to_dc_matrix - converts a DRM CTM 3x4 to a DC CSC float matrix + * @ctm: DRM color transformation matrix with 3x4 dimensions + * @matrix: DC CSC float matrix + * + * The matrix needs to be a 3x4 (12 entry) matrix. + */ +static void __drm_ctm_3x4_to_dc_matrix(const struct drm_color_ctm_3x4 *ctm, + struct fixed31_32 *matrix) +{ + int i; - matrix[i].value = val; + /* The format provided is S31.32, using signed-magnitude representation. + * Our fixed31_32 is also S31.32, but is using 2's complement. We have + * to convert from signed-magnitude to 2's complement. + */ + for (i = 0; i < 12; i++) { + /* gamut_remap_matrix[i] = ctm[i - floor(i/4)] */ + matrix[i] = amdgpu_dm_fixpt_from_s3132(ctm->matrix[i]); } } @@ -268,16 +643,74 @@ static int __set_output_tf(struct dc_transfer_func *func, struct calculate_buffer cal_buffer = {0}; bool res; - ASSERT(lut && lut_size == MAX_COLOR_LUT_ENTRIES); + cal_buffer.buffer_index = -1; + + if (lut_size) { + ASSERT(lut && lut_size == MAX_COLOR_LUT_ENTRIES); + + gamma = dc_create_gamma(); + if (!gamma) + return -ENOMEM; + + gamma->num_entries = lut_size; + __drm_lut_to_dc_gamma(lut, gamma, false); + } + + if (func->tf == TRANSFER_FUNCTION_LINEAR) { + /* + * Color module doesn't like calculating regamma params + * on top of a linear input. But degamma params can be used + * instead to simulate this. + */ + if (gamma) + gamma->type = GAMMA_CUSTOM; + res = mod_color_calculate_degamma_params(NULL, func, + gamma, gamma != NULL); + } else { + /* + * Assume sRGB. The actual mapping will depend on whether the + * input was legacy or not. + */ + if (gamma) + gamma->type = GAMMA_CS_TFM_1D; + res = mod_color_calculate_regamma_params(func, gamma, gamma != NULL, + has_rom, NULL, &cal_buffer); + } + + if (gamma) + dc_gamma_release(&gamma); + + return res ? 0 : -ENOMEM; +} + +/** + * __set_output_tf_32 - calculates the output transfer function based on expected input space. + * @func: transfer function + * @lut: lookup table that defines the color space + * @lut_size: size of respective lut + * @has_rom: if ROM can be used for hardcoded curve + * + * Returns: + * 0 in case of success. -ENOMEM if fails. + */ +static int __set_output_tf_32(struct dc_transfer_func *func, + const struct drm_color_lut32 *lut, uint32_t lut_size, + bool has_rom) +{ + struct dc_gamma *gamma = NULL; + struct calculate_buffer cal_buffer = {0}; + bool res; cal_buffer.buffer_index = -1; - gamma = dc_create_gamma(); - if (!gamma) - return -ENOMEM; + if (lut_size) { + gamma = dc_create_gamma(); + if (!gamma) + return -ENOMEM; - gamma->num_entries = lut_size; - __drm_lut_to_dc_gamma(lut, gamma, false); + gamma->num_entries = lut_size; + __drm_lut32_to_dc_gamma(lut, gamma); + } if (func->tf == TRANSFER_FUNCTION_LINEAR) { /* @@ -285,27 +718,68 @@ static int __set_output_tf(struct dc_transfer_func *func, * on top of a linear input. But degamma params can be used * instead to simulate this. */ - gamma->type = GAMMA_CUSTOM; + if (gamma) + gamma->type = GAMMA_CUSTOM; res = mod_color_calculate_degamma_params(NULL, func, - gamma, true); + gamma, gamma != NULL); } else { /* * Assume sRGB. The actual mapping will depend on whether the * input was legacy or not. */ - gamma->type = GAMMA_CS_TFM_1D; - res = mod_color_calculate_regamma_params(func, gamma, false, + if (gamma) + gamma->type = GAMMA_CS_TFM_1D; + res = mod_color_calculate_regamma_params(func, gamma, gamma != NULL, has_rom, NULL, &cal_buffer); } - dc_gamma_release(&gamma); + if (gamma) + dc_gamma_release(&gamma); return res ? 0 : -ENOMEM; } + +static int amdgpu_dm_set_atomic_regamma(struct dc_transfer_func *out_tf, + const struct drm_color_lut *regamma_lut, + uint32_t regamma_size, bool has_rom, + enum dc_transfer_func_predefined tf) +{ + int ret = 0; + + if (regamma_size || tf != TRANSFER_FUNCTION_LINEAR) { + /* + * CRTC RGM goes into RGM LUT. + * + * Note: there is no implicit sRGB regamma here. We are using + * degamma calculation from color module to calculate the curve + * from a linear base if gamma TF is not set. However, if gamma + * TF (!= Linear) and LUT are set at the same time, we will use + * regamma calculation, and the color module will combine the + * pre-defined TF and the custom LUT values into the LUT that's + * actually programmed. + */ + out_tf->type = TF_TYPE_DISTRIBUTED_POINTS; + out_tf->tf = tf; + out_tf->sdr_ref_white_level = SDR_WHITE_LEVEL_INIT_VALUE; + + ret = __set_output_tf(out_tf, regamma_lut, regamma_size, has_rom); + } else { + /* + * No CRTC RGM means we can just put the block into bypass + * since we don't have any plane level adjustments using it. + */ + out_tf->type = TF_TYPE_BYPASS; + out_tf->tf = TRANSFER_FUNCTION_LINEAR; + } + + return ret; +} + /** * __set_input_tf - calculates the input transfer function based on expected * input space. + * @caps: dc color capabilities * @func: transfer function * @lut: lookup table that defines the color space * @lut_size: size of respective lut. @@ -313,27 +787,350 @@ static int __set_output_tf(struct dc_transfer_func *func, * Returns: * 0 in case of success. -ENOMEM if fails. */ -static int __set_input_tf(struct dc_transfer_func *func, +static int __set_input_tf(struct dc_color_caps *caps, struct dc_transfer_func *func, const struct drm_color_lut *lut, uint32_t lut_size) { struct dc_gamma *gamma = NULL; bool res; - gamma = dc_create_gamma(); - if (!gamma) - return -ENOMEM; + if (lut_size) { + gamma = dc_create_gamma(); + if (!gamma) + return -ENOMEM; - gamma->type = GAMMA_CUSTOM; - gamma->num_entries = lut_size; + gamma->type = GAMMA_CUSTOM; + gamma->num_entries = lut_size; - __drm_lut_to_dc_gamma(lut, gamma, false); + __drm_lut_to_dc_gamma(lut, gamma, false); + } - res = mod_color_calculate_degamma_params(NULL, func, gamma, true); - dc_gamma_release(&gamma); + res = mod_color_calculate_degamma_params(caps, func, gamma, gamma != NULL); + + if (gamma) + dc_gamma_release(&gamma); + + return res ? 0 : -ENOMEM; +} + +/** + * __set_input_tf_32 - calculates the input transfer function based on expected + * input space. + * @caps: dc color capabilities + * @func: transfer function + * @lut: lookup table that defines the color space + * @lut_size: size of respective lut. + * + * Returns: + * 0 in case of success. -ENOMEM if fails. + */ +static int __set_input_tf_32(struct dc_color_caps *caps, struct dc_transfer_func *func, + const struct drm_color_lut32 *lut, uint32_t lut_size) +{ + struct dc_gamma *gamma = NULL; + bool res; + + if (lut_size) { + gamma = dc_create_gamma(); + if (!gamma) + return -ENOMEM; + + gamma->type = GAMMA_CUSTOM; + gamma->num_entries = lut_size; + + __drm_lut32_to_dc_gamma(lut, gamma); + } + + res = mod_color_calculate_degamma_params(caps, func, gamma, gamma != NULL); + + if (gamma) + dc_gamma_release(&gamma); return res ? 0 : -ENOMEM; } +static enum dc_transfer_func_predefined +amdgpu_tf_to_dc_tf(enum amdgpu_transfer_function tf) +{ + switch (tf) { + default: + case AMDGPU_TRANSFER_FUNCTION_DEFAULT: + case AMDGPU_TRANSFER_FUNCTION_IDENTITY: + return TRANSFER_FUNCTION_LINEAR; + case AMDGPU_TRANSFER_FUNCTION_SRGB_EOTF: + case AMDGPU_TRANSFER_FUNCTION_SRGB_INV_EOTF: + return TRANSFER_FUNCTION_SRGB; + case AMDGPU_TRANSFER_FUNCTION_BT709_OETF: + case AMDGPU_TRANSFER_FUNCTION_BT709_INV_OETF: + return TRANSFER_FUNCTION_BT709; + case AMDGPU_TRANSFER_FUNCTION_PQ_EOTF: + case AMDGPU_TRANSFER_FUNCTION_PQ_INV_EOTF: + return TRANSFER_FUNCTION_PQ; + case AMDGPU_TRANSFER_FUNCTION_GAMMA22_EOTF: + case AMDGPU_TRANSFER_FUNCTION_GAMMA22_INV_EOTF: + return TRANSFER_FUNCTION_GAMMA22; + case AMDGPU_TRANSFER_FUNCTION_GAMMA24_EOTF: + case AMDGPU_TRANSFER_FUNCTION_GAMMA24_INV_EOTF: + return TRANSFER_FUNCTION_GAMMA24; + case AMDGPU_TRANSFER_FUNCTION_GAMMA26_EOTF: + case AMDGPU_TRANSFER_FUNCTION_GAMMA26_INV_EOTF: + return TRANSFER_FUNCTION_GAMMA26; + } +} + +static enum dc_transfer_func_predefined +amdgpu_colorop_tf_to_dc_tf(enum drm_colorop_curve_1d_type tf) +{ + switch (tf) { + case DRM_COLOROP_1D_CURVE_SRGB_EOTF: + case DRM_COLOROP_1D_CURVE_SRGB_INV_EOTF: + return TRANSFER_FUNCTION_SRGB; + case DRM_COLOROP_1D_CURVE_PQ_125_EOTF: + case DRM_COLOROP_1D_CURVE_PQ_125_INV_EOTF: + return TRANSFER_FUNCTION_PQ; + case DRM_COLOROP_1D_CURVE_BT2020_INV_OETF: + case DRM_COLOROP_1D_CURVE_BT2020_OETF: + return TRANSFER_FUNCTION_BT709; + case DRM_COLOROP_1D_CURVE_GAMMA22: + case DRM_COLOROP_1D_CURVE_GAMMA22_INV: + return TRANSFER_FUNCTION_GAMMA22; + default: + return TRANSFER_FUNCTION_LINEAR; + } +} + +static void __to_dc_lut3d_color(struct dc_rgb *rgb, + const struct drm_color_lut lut, + int bit_precision) +{ + rgb->red = drm_color_lut_extract(lut.red, bit_precision); + rgb->green = drm_color_lut_extract(lut.green, bit_precision); + rgb->blue = drm_color_lut_extract(lut.blue, bit_precision); +} + +static void __drm_3dlut_to_dc_3dlut(const struct drm_color_lut *lut, + uint32_t lut3d_size, + struct tetrahedral_params *params, + bool use_tetrahedral_9, + int bit_depth) +{ + struct dc_rgb *lut0; + struct dc_rgb *lut1; + struct dc_rgb *lut2; + struct dc_rgb *lut3; + int lut_i, i; + + + if (use_tetrahedral_9) { + lut0 = params->tetrahedral_9.lut0; + lut1 = params->tetrahedral_9.lut1; + lut2 = params->tetrahedral_9.lut2; + lut3 = params->tetrahedral_9.lut3; + } else { + lut0 = params->tetrahedral_17.lut0; + lut1 = params->tetrahedral_17.lut1; + lut2 = params->tetrahedral_17.lut2; + lut3 = params->tetrahedral_17.lut3; + } + + for (lut_i = 0, i = 0; i < lut3d_size - 4; lut_i++, i += 4) { + /* + * We should consider the 3D LUT RGB values are distributed + * along four arrays lut0-3 where the first sizes 1229 and the + * other 1228. The bit depth supported for 3dlut channel is + * 12-bit, but DC also supports 10-bit. + * + * TODO: improve color pipeline API to enable the userspace set + * bit depth and 3D LUT size/stride, as specified by VA-API. + */ + __to_dc_lut3d_color(&lut0[lut_i], lut[i], bit_depth); + __to_dc_lut3d_color(&lut1[lut_i], lut[i + 1], bit_depth); + __to_dc_lut3d_color(&lut2[lut_i], lut[i + 2], bit_depth); + __to_dc_lut3d_color(&lut3[lut_i], lut[i + 3], bit_depth); + } + /* lut0 has 1229 points (lut_size/4 + 1) */ + __to_dc_lut3d_color(&lut0[lut_i], lut[i], bit_depth); +} + +static void __to_dc_lut3d_32_color(struct dc_rgb *rgb, + const struct drm_color_lut32 lut, + int bit_precision) +{ + rgb->red = drm_color_lut32_extract(lut.red, bit_precision); + rgb->green = drm_color_lut32_extract(lut.green, bit_precision); + rgb->blue = drm_color_lut32_extract(lut.blue, bit_precision); +} + +static void __drm_3dlut32_to_dc_3dlut(const struct drm_color_lut32 *lut, + uint32_t lut3d_size, + struct tetrahedral_params *params, + bool use_tetrahedral_9, + int bit_depth) +{ + struct dc_rgb *lut0; + struct dc_rgb *lut1; + struct dc_rgb *lut2; + struct dc_rgb *lut3; + int lut_i, i; + + + if (use_tetrahedral_9) { + lut0 = params->tetrahedral_9.lut0; + lut1 = params->tetrahedral_9.lut1; + lut2 = params->tetrahedral_9.lut2; + lut3 = params->tetrahedral_9.lut3; + } else { + lut0 = params->tetrahedral_17.lut0; + lut1 = params->tetrahedral_17.lut1; + lut2 = params->tetrahedral_17.lut2; + lut3 = params->tetrahedral_17.lut3; + } + + for (lut_i = 0, i = 0; i < lut3d_size - 4; lut_i++, i += 4) { + /* + * We should consider the 3D LUT RGB values are distributed + * along four arrays lut0-3 where the first sizes 1229 and the + * other 1228. The bit depth supported for 3dlut channel is + * 12-bit, but DC also supports 10-bit. + * + * TODO: improve color pipeline API to enable the userspace set + * bit depth and 3D LUT size/stride, as specified by VA-API. + */ + __to_dc_lut3d_32_color(&lut0[lut_i], lut[i], bit_depth); + __to_dc_lut3d_32_color(&lut1[lut_i], lut[i + 1], bit_depth); + __to_dc_lut3d_32_color(&lut2[lut_i], lut[i + 2], bit_depth); + __to_dc_lut3d_32_color(&lut3[lut_i], lut[i + 3], bit_depth); + } + /* lut0 has 1229 points (lut_size/4 + 1) */ + __to_dc_lut3d_32_color(&lut0[lut_i], lut[i], bit_depth); +} + +/* amdgpu_dm_atomic_lut3d - set DRM 3D LUT to DC stream + * @drm_lut3d: user 3D LUT + * @drm_lut3d_size: size of 3D LUT + * @lut3d: DC 3D LUT + * + * Map user 3D LUT data to DC 3D LUT and all necessary bits to program it + * on DCN accordingly. + */ +static void amdgpu_dm_atomic_lut3d(const struct drm_color_lut *drm_lut3d, + uint32_t drm_lut3d_size, + struct dc_3dlut *lut) +{ + if (!drm_lut3d_size) { + lut->state.bits.initialized = 0; + } else { + /* Stride and bit depth are not programmable by API yet. + * Therefore, only supports 17x17x17 3D LUT (12-bit). + */ + lut->lut_3d.use_tetrahedral_9 = false; + lut->lut_3d.use_12bits = true; + lut->state.bits.initialized = 1; + __drm_3dlut_to_dc_3dlut(drm_lut3d, drm_lut3d_size, &lut->lut_3d, + lut->lut_3d.use_tetrahedral_9, + MAX_COLOR_3DLUT_BITDEPTH); + } +} + +static int amdgpu_dm_atomic_shaper_lut(const struct drm_color_lut *shaper_lut, + bool has_rom, + enum dc_transfer_func_predefined tf, + uint32_t shaper_size, + struct dc_transfer_func *func_shaper) +{ + int ret = 0; + + if (shaper_size || tf != TRANSFER_FUNCTION_LINEAR) { + /* + * If user shaper LUT is set, we assume a linear color space + * (linearized by degamma 1D LUT or not). + */ + func_shaper->type = TF_TYPE_DISTRIBUTED_POINTS; + func_shaper->tf = tf; + func_shaper->sdr_ref_white_level = SDR_WHITE_LEVEL_INIT_VALUE; + + ret = __set_output_tf(func_shaper, shaper_lut, shaper_size, has_rom); + } else { + func_shaper->type = TF_TYPE_BYPASS; + func_shaper->tf = TRANSFER_FUNCTION_LINEAR; + } + + return ret; +} + +static int amdgpu_dm_atomic_blend_lut(const struct drm_color_lut *blend_lut, + bool has_rom, + enum dc_transfer_func_predefined tf, + uint32_t blend_size, + struct dc_transfer_func *func_blend) +{ + int ret = 0; + + if (blend_size || tf != TRANSFER_FUNCTION_LINEAR) { + /* + * DRM plane gamma LUT or TF means we are linearizing color + * space before blending (similar to degamma programming). As + * we don't have hardcoded curve support, or we use AMD color + * module to fill the parameters that will be translated to HW + * points. + */ + func_blend->type = TF_TYPE_DISTRIBUTED_POINTS; + func_blend->tf = tf; + func_blend->sdr_ref_white_level = SDR_WHITE_LEVEL_INIT_VALUE; + + ret = __set_input_tf(NULL, func_blend, blend_lut, blend_size); + } else { + func_blend->type = TF_TYPE_BYPASS; + func_blend->tf = TRANSFER_FUNCTION_LINEAR; + } + + return ret; +} + +/** + * amdgpu_dm_verify_lut3d_size - verifies if 3D LUT is supported and if user + * shaper and 3D LUTs match the hw supported size + * @adev: amdgpu device + * @plane_state: the DRM plane state + * + * Verifies if pre-blending (DPP) 3D LUT is supported by the HW (DCN 2.0 or + * newer) and if the user shaper and 3D LUTs match the supported size. + * + * Returns: + * 0 on success. -EINVAL if lut size are invalid. + */ +int amdgpu_dm_verify_lut3d_size(struct amdgpu_device *adev, + struct drm_plane_state *plane_state) +{ + struct dm_plane_state *dm_plane_state = to_dm_plane_state(plane_state); + const struct drm_color_lut *shaper = NULL, *lut3d = NULL; + uint32_t exp_size, size, dim_size = MAX_COLOR_3DLUT_SIZE; + bool has_3dlut = adev->dm.dc->caps.color.dpp.hw_3d_lut || adev->dm.dc->caps.color.mpc.preblend; + + /* shaper LUT is only available if 3D LUT color caps */ + exp_size = has_3dlut ? MAX_COLOR_LUT_ENTRIES : 0; + shaper = __extract_blob_lut(dm_plane_state->shaper_lut, &size); + + if (shaper && size != exp_size) { + drm_dbg(&adev->ddev, + "Invalid Shaper LUT size. Should be %u but got %u.\n", + exp_size, size); + return -EINVAL; + } + + /* The number of 3D LUT entries is the dimension size cubed */ + exp_size = has_3dlut ? dim_size * dim_size * dim_size : 0; + lut3d = __extract_blob_lut(dm_plane_state->lut3d, &size); + + if (lut3d && size != exp_size) { + drm_dbg(&adev->ddev, + "Invalid 3D LUT size. Should be %u but got %u.\n", + exp_size, size); + return -EINVAL; + } + + return 0; +} + /** * amdgpu_dm_verify_lut_sizes - verifies if DRM luts match the hw supported sizes * @crtc_state: the DRM CRTC state @@ -371,39 +1168,42 @@ int amdgpu_dm_verify_lut_sizes(const struct drm_crtc_state *crtc_state) } /** - * amdgpu_dm_update_crtc_color_mgmt: Maps DRM color management to DC stream. + * amdgpu_dm_check_crtc_color_mgmt: Check if DRM color props are programmable by DC. * @crtc: amdgpu_dm crtc state + * @check_only: only check color state without update dc stream * - * With no plane level color management properties we're free to use any - * of the HW blocks as long as the CRTC CTM always comes before the - * CRTC RGM and after the CRTC DGM. - * - * - The CRTC RGM block will be placed in the RGM LUT block if it is non-linear. - * - The CRTC DGM block will be placed in the DGM LUT block if it is non-linear. - * - The CRTC CTM will be placed in the gamut remap block if it is non-linear. + * This function just verifies CRTC LUT sizes, if there is enough space for + * output transfer function and if its parameters can be calculated by AMD + * color module. It also adjusts some settings for programming CRTC degamma at + * plane stage, using plane DGM block. * * The RGM block is typically more fully featured and accurate across * all ASICs - DCE can't support a custom non-linear CRTC DGM. * * For supporting both plane level color management and CRTC level color - * management at once we have to either restrict the usage of CRTC properties - * or blend adjustments together. + * management at once we have to either restrict the usage of some CRTC + * properties or blend adjustments together. * * Returns: - * 0 on success. Error code if setup fails. + * 0 on success. Error code if validation fails. */ -int amdgpu_dm_update_crtc_color_mgmt(struct dm_crtc_state *crtc) + +int amdgpu_dm_check_crtc_color_mgmt(struct dm_crtc_state *crtc, + bool check_only) { struct dc_stream_state *stream = crtc->stream; struct amdgpu_device *adev = drm_to_adev(crtc->base.state->dev); bool has_rom = adev->asic_type <= CHIP_RAVEN; - struct drm_color_ctm *ctm = NULL; + struct dc_transfer_func *out_tf; const struct drm_color_lut *degamma_lut, *regamma_lut; uint32_t degamma_size, regamma_size; bool has_regamma, has_degamma; + enum dc_transfer_func_predefined tf = TRANSFER_FUNCTION_LINEAR; bool is_legacy; int r; + tf = amdgpu_tf_to_dc_tf(crtc->regamma_tf); + r = amdgpu_dm_verify_lut_sizes(&crtc->base); if (r) return r; @@ -423,6 +1223,14 @@ int amdgpu_dm_update_crtc_color_mgmt(struct dm_crtc_state *crtc) crtc->cm_has_degamma = false; crtc->cm_is_degamma_srgb = false; + if (check_only) { + out_tf = kvzalloc(sizeof(*out_tf), GFP_KERNEL); + if (!out_tf) + return -ENOMEM; + } else { + out_tf = &stream->out_transfer_func; + } + /* Setup regamma and degamma. */ if (is_legacy) { /* @@ -437,29 +1245,21 @@ int amdgpu_dm_update_crtc_color_mgmt(struct dm_crtc_state *crtc) * inverse color ramp in legacy userspace. */ crtc->cm_is_degamma_srgb = true; - stream->out_transfer_func->type = TF_TYPE_DISTRIBUTED_POINTS; - stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB; - - r = __set_legacy_tf(stream->out_transfer_func, regamma_lut, - regamma_size, has_rom); - if (r) - return r; - } else if (has_regamma) { - /* If atomic regamma, CRTC RGM goes into RGM LUT. */ - stream->out_transfer_func->type = TF_TYPE_DISTRIBUTED_POINTS; - stream->out_transfer_func->tf = TRANSFER_FUNCTION_LINEAR; - - r = __set_output_tf(stream->out_transfer_func, regamma_lut, - regamma_size, has_rom); - if (r) - return r; - } else { + out_tf->type = TF_TYPE_DISTRIBUTED_POINTS; + out_tf->tf = TRANSFER_FUNCTION_SRGB; /* - * No CRTC RGM means we can just put the block into bypass - * since we don't have any plane level adjustments using it. + * Note: although we pass has_rom as parameter here, we never + * actually use ROM because the color module only takes the ROM + * path if transfer_func->type == PREDEFINED. + * + * See more in mod_color_calculate_regamma_params() */ - stream->out_transfer_func->type = TF_TYPE_BYPASS; - stream->out_transfer_func->tf = TRANSFER_FUNCTION_LINEAR; + r = __set_legacy_tf(out_tf, regamma_lut, + regamma_size, has_rom); + } else { + regamma_size = has_regamma ? regamma_size : 0; + r = amdgpu_dm_set_atomic_regamma(out_tf, regamma_lut, + regamma_size, has_rom, tf); } /* @@ -468,6 +1268,43 @@ int amdgpu_dm_update_crtc_color_mgmt(struct dm_crtc_state *crtc) * have to place the CTM in the OCSC in that case. */ crtc->cm_has_degamma = has_degamma; + if (check_only) + kvfree(out_tf); + + return r; +} + +/** + * amdgpu_dm_update_crtc_color_mgmt: Maps DRM color management to DC stream. + * @crtc: amdgpu_dm crtc state + * + * With no plane level color management properties we're free to use any + * of the HW blocks as long as the CRTC CTM always comes before the + * CRTC RGM and after the CRTC DGM. + * + * - The CRTC RGM block will be placed in the RGM LUT block if it is non-linear. + * - The CRTC DGM block will be placed in the DGM LUT block if it is non-linear. + * - The CRTC CTM will be placed in the gamut remap block if it is non-linear. + * + * The RGM block is typically more fully featured and accurate across + * all ASICs - DCE can't support a custom non-linear CRTC DGM. + * + * For supporting both plane level color management and CRTC level color + * management at once we have to either restrict the usage of CRTC properties + * or blend adjustments together. + * + * Returns: + * 0 on success. Error code if setup fails. + */ +int amdgpu_dm_update_crtc_color_mgmt(struct dm_crtc_state *crtc) +{ + struct dc_stream_state *stream = crtc->stream; + struct drm_color_ctm *ctm = NULL; + int ret; + + ret = amdgpu_dm_check_crtc_color_mgmt(crtc, false); + if (ret) + return ret; /* Setup CRTC CTM. */ if (crtc->base.ctm) { @@ -495,20 +1332,10 @@ int amdgpu_dm_update_crtc_color_mgmt(struct dm_crtc_state *crtc) return 0; } -/** - * amdgpu_dm_update_plane_color_mgmt: Maps DRM color management to DC plane. - * @crtc: amdgpu_dm crtc state - * @dc_plane_state: target DC surface - * - * Update the underlying dc_stream_state's input transfer function (ITF) in - * preparation for hardware commit. The transfer function used depends on - * the preparation done on the stream for color management. - * - * Returns: - * 0 on success. -ENOMEM if mem allocation fails. - */ -int amdgpu_dm_update_plane_color_mgmt(struct dm_crtc_state *crtc, - struct dc_plane_state *dc_plane_state) +static int +map_crtc_degamma_to_dc_plane(struct dm_crtc_state *crtc, + struct dc_plane_state *dc_plane_state, + struct dc_color_caps *caps) { const struct drm_color_lut *degamma_lut; enum dc_transfer_func_predefined tf = TRANSFER_FUNCTION_SRGB; @@ -531,8 +1358,7 @@ int amdgpu_dm_update_plane_color_mgmt(struct dm_crtc_state *crtc, °amma_size); ASSERT(degamma_size == MAX_COLOR_LUT_ENTRIES); - dc_plane_state->in_transfer_func->type = - TF_TYPE_DISTRIBUTED_POINTS; + dc_plane_state->in_transfer_func.type = TF_TYPE_DISTRIBUTED_POINTS; /* * This case isn't fully correct, but also fairly @@ -559,32 +1385,671 @@ int amdgpu_dm_update_plane_color_mgmt(struct dm_crtc_state *crtc, * map these to the atomic one instead. */ if (crtc->cm_is_degamma_srgb) - dc_plane_state->in_transfer_func->tf = tf; + dc_plane_state->in_transfer_func.tf = tf; else - dc_plane_state->in_transfer_func->tf = + dc_plane_state->in_transfer_func.tf = TRANSFER_FUNCTION_LINEAR; - r = __set_input_tf(dc_plane_state->in_transfer_func, + r = __set_input_tf(caps, &dc_plane_state->in_transfer_func, degamma_lut, degamma_size); if (r) return r; - } else if (crtc->cm_is_degamma_srgb) { + } else { /* * For legacy gamma support we need the regamma input * in linear space. Assume that the input is sRGB. */ - dc_plane_state->in_transfer_func->type = TF_TYPE_PREDEFINED; - dc_plane_state->in_transfer_func->tf = tf; + dc_plane_state->in_transfer_func.type = TF_TYPE_PREDEFINED; + dc_plane_state->in_transfer_func.tf = tf; if (tf != TRANSFER_FUNCTION_SRGB && - !mod_color_calculate_degamma_params(NULL, - dc_plane_state->in_transfer_func, NULL, false)) + !mod_color_calculate_degamma_params(caps, + &dc_plane_state->in_transfer_func, + NULL, false)) return -ENOMEM; - } else { - /* ...Otherwise we can just bypass the DGM block. */ - dc_plane_state->in_transfer_func->type = TF_TYPE_BYPASS; - dc_plane_state->in_transfer_func->tf = TRANSFER_FUNCTION_LINEAR; } return 0; } + +static int +__set_dm_plane_degamma(struct drm_plane_state *plane_state, + struct dc_plane_state *dc_plane_state, + struct dc_color_caps *color_caps) +{ + struct dm_plane_state *dm_plane_state = to_dm_plane_state(plane_state); + const struct drm_color_lut *degamma_lut; + enum amdgpu_transfer_function tf = AMDGPU_TRANSFER_FUNCTION_DEFAULT; + uint32_t degamma_size; + bool has_degamma_lut; + int ret; + + degamma_lut = __extract_blob_lut(dm_plane_state->degamma_lut, + °amma_size); + + has_degamma_lut = degamma_lut && + !__is_lut_linear(degamma_lut, degamma_size); + + tf = dm_plane_state->degamma_tf; + + /* If we don't have plane degamma LUT nor TF to set on DC, we have + * nothing to do here, return. + */ + if (!has_degamma_lut && tf == AMDGPU_TRANSFER_FUNCTION_DEFAULT) + return -EINVAL; + + dc_plane_state->in_transfer_func.tf = amdgpu_tf_to_dc_tf(tf); + + if (has_degamma_lut) { + ASSERT(degamma_size == MAX_COLOR_LUT_ENTRIES); + + dc_plane_state->in_transfer_func.type = + TF_TYPE_DISTRIBUTED_POINTS; + + ret = __set_input_tf(color_caps, &dc_plane_state->in_transfer_func, + degamma_lut, degamma_size); + if (ret) + return ret; + } else { + dc_plane_state->in_transfer_func.type = + TF_TYPE_PREDEFINED; + + if (!mod_color_calculate_degamma_params(color_caps, + &dc_plane_state->in_transfer_func, NULL, false)) + return -ENOMEM; + } + return 0; +} + +static int +__set_colorop_in_tf_1d_curve(struct dc_plane_state *dc_plane_state, + struct drm_colorop_state *colorop_state) +{ + struct dc_transfer_func *tf = &dc_plane_state->in_transfer_func; + struct drm_colorop *colorop = colorop_state->colorop; + struct drm_device *drm = colorop->dev; + + if (colorop->type != DRM_COLOROP_1D_CURVE) + return -EINVAL; + + if (!(BIT(colorop_state->curve_1d_type) & amdgpu_dm_supported_degam_tfs)) + return -EINVAL; + + if (colorop_state->bypass) { + tf->type = TF_TYPE_BYPASS; + tf->tf = TRANSFER_FUNCTION_LINEAR; + return 0; + } + + drm_dbg(drm, "Degamma colorop with ID: %d\n", colorop->base.id); + + tf->type = TF_TYPE_PREDEFINED; + tf->tf = amdgpu_colorop_tf_to_dc_tf(colorop_state->curve_1d_type); + + return 0; +} + +static int +__set_dm_plane_colorop_degamma(struct drm_plane_state *plane_state, + struct dc_plane_state *dc_plane_state, + struct drm_colorop *colorop) +{ + struct drm_colorop *old_colorop; + struct drm_colorop_state *colorop_state = NULL, *new_colorop_state; + struct drm_atomic_state *state = plane_state->state; + int i = 0; + + old_colorop = colorop; + + /* 1st op: 1d curve - degamma */ + for_each_new_colorop_in_state(state, colorop, new_colorop_state, i) { + if (new_colorop_state->colorop == old_colorop && + (BIT(new_colorop_state->curve_1d_type) & amdgpu_dm_supported_degam_tfs)) { + colorop_state = new_colorop_state; + break; + } + } + + if (!colorop_state) + return -EINVAL; + + return __set_colorop_in_tf_1d_curve(dc_plane_state, colorop_state); +} + +static int +__set_dm_plane_colorop_3x4_matrix(struct drm_plane_state *plane_state, + struct dc_plane_state *dc_plane_state, + struct drm_colorop *colorop) +{ + struct drm_colorop *old_colorop; + struct drm_colorop_state *colorop_state = NULL, *new_colorop_state; + struct drm_atomic_state *state = plane_state->state; + const struct drm_device *dev = colorop->dev; + const struct drm_property_blob *blob; + struct drm_color_ctm_3x4 *ctm = NULL; + int i = 0; + + /* 3x4 matrix */ + old_colorop = colorop; + for_each_new_colorop_in_state(state, colorop, new_colorop_state, i) { + if (new_colorop_state->colorop == old_colorop && + new_colorop_state->colorop->type == DRM_COLOROP_CTM_3X4) { + colorop_state = new_colorop_state; + break; + } + } + + if (colorop_state && !colorop_state->bypass && colorop->type == DRM_COLOROP_CTM_3X4) { + drm_dbg(dev, "3x4 matrix colorop with ID: %d\n", colorop->base.id); + blob = colorop_state->data; + if (blob->length == sizeof(struct drm_color_ctm_3x4)) { + ctm = (struct drm_color_ctm_3x4 *) blob->data; + __drm_ctm_3x4_to_dc_matrix(ctm, dc_plane_state->gamut_remap_matrix.matrix); + dc_plane_state->gamut_remap_matrix.enable_remap = true; + dc_plane_state->input_csc_color_matrix.enable_adjustment = false; + } else { + drm_warn(dev, "blob->length (%zu) isn't equal to drm_color_ctm_3x4 (%zu)\n", + blob->length, sizeof(struct drm_color_ctm_3x4)); + return -EINVAL; + } + } + + return 0; +} + +static int +__set_dm_plane_colorop_multiplier(struct drm_plane_state *plane_state, + struct dc_plane_state *dc_plane_state, + struct drm_colorop *colorop) +{ + struct drm_colorop *old_colorop; + struct drm_colorop_state *colorop_state = NULL, *new_colorop_state; + struct drm_atomic_state *state = plane_state->state; + const struct drm_device *dev = colorop->dev; + int i = 0; + + /* Multiplier */ + old_colorop = colorop; + for_each_new_colorop_in_state(state, colorop, new_colorop_state, i) { + if (new_colorop_state->colorop == old_colorop && + new_colorop_state->colorop->type == DRM_COLOROP_MULTIPLIER) { + colorop_state = new_colorop_state; + break; + } + } + + if (colorop_state && !colorop_state->bypass && colorop->type == DRM_COLOROP_MULTIPLIER) { + drm_dbg(dev, "Multiplier colorop with ID: %d\n", colorop->base.id); + dc_plane_state->hdr_mult = amdgpu_dm_fixpt_from_s3132(colorop_state->multiplier); + } + + return 0; +} + +static int +__set_dm_plane_colorop_shaper(struct drm_plane_state *plane_state, + struct dc_plane_state *dc_plane_state, + struct drm_colorop *colorop) +{ + struct drm_colorop *old_colorop; + struct drm_colorop_state *colorop_state = NULL, *new_colorop_state; + struct drm_atomic_state *state = plane_state->state; + enum dc_transfer_func_predefined default_tf = TRANSFER_FUNCTION_LINEAR; + struct dc_transfer_func *tf = &dc_plane_state->in_shaper_func; + const struct drm_color_lut32 *shaper_lut; + struct drm_device *dev = colorop->dev; + bool enabled = false; + u32 shaper_size; + int i = 0, ret = 0; + + /* 1D Curve - SHAPER TF */ + old_colorop = colorop; + for_each_new_colorop_in_state(state, colorop, new_colorop_state, i) { + if (new_colorop_state->colorop == old_colorop && + (BIT(new_colorop_state->curve_1d_type) & amdgpu_dm_supported_shaper_tfs)) { + colorop_state = new_colorop_state; + break; + } + } + + if (colorop_state && !colorop_state->bypass && colorop->type == DRM_COLOROP_1D_CURVE) { + drm_dbg(dev, "Shaper TF colorop with ID: %d\n", colorop->base.id); + tf->type = TF_TYPE_DISTRIBUTED_POINTS; + tf->tf = default_tf = amdgpu_colorop_tf_to_dc_tf(colorop_state->curve_1d_type); + tf->sdr_ref_white_level = SDR_WHITE_LEVEL_INIT_VALUE; + ret = __set_output_tf(tf, 0, 0, false); + if (ret) + return ret; + enabled = true; + } + + /* 1D LUT - SHAPER LUT */ + colorop = old_colorop->next; + if (!colorop) { + drm_dbg(dev, "no Shaper LUT colorop found\n"); + return -EINVAL; + } + + old_colorop = colorop; + for_each_new_colorop_in_state(state, colorop, new_colorop_state, i) { + if (new_colorop_state->colorop == old_colorop && + new_colorop_state->colorop->type == DRM_COLOROP_1D_LUT) { + colorop_state = new_colorop_state; + break; + } + } + + if (colorop_state && !colorop_state->bypass && colorop->type == DRM_COLOROP_1D_LUT) { + drm_dbg(dev, "Shaper LUT colorop with ID: %d\n", colorop->base.id); + tf->type = TF_TYPE_DISTRIBUTED_POINTS; + tf->tf = default_tf; + tf->sdr_ref_white_level = SDR_WHITE_LEVEL_INIT_VALUE; + shaper_lut = __extract_blob_lut32(colorop_state->data, &shaper_size); + shaper_size = shaper_lut != NULL ? shaper_size : 0; + + /* Custom LUT size must be the same as supported size */ + if (shaper_size == colorop->size) { + ret = __set_output_tf_32(tf, shaper_lut, shaper_size, false); + if (ret) + return ret; + enabled = true; + } + } + + if (!enabled) + tf->type = TF_TYPE_BYPASS; + + return 0; +} + +/* __set_colorop_3dlut - set DRM 3D LUT to DC stream + * @drm_lut3d: user 3D LUT + * @drm_lut3d_size: size of 3D LUT + * @lut3d: DC 3D LUT + * + * Map user 3D LUT data to DC 3D LUT and all necessary bits to program it + * on DCN accordingly. + * + * Returns: + * 0 on success. -EINVAL if drm_lut3d_size is zero. + */ +static int __set_colorop_3dlut(const struct drm_color_lut32 *drm_lut3d, + uint32_t drm_lut3d_size, + struct dc_3dlut *lut) +{ + if (!drm_lut3d_size) { + lut->state.bits.initialized = 0; + return -EINVAL; + } + + /* Only supports 17x17x17 3D LUT (12-bit) now */ + lut->lut_3d.use_12bits = true; + lut->lut_3d.use_tetrahedral_9 = false; + + lut->state.bits.initialized = 1; + __drm_3dlut32_to_dc_3dlut(drm_lut3d, drm_lut3d_size, &lut->lut_3d, + lut->lut_3d.use_tetrahedral_9, 12); + + return 0; +} + +static int +__set_dm_plane_colorop_3dlut(struct drm_plane_state *plane_state, + struct dc_plane_state *dc_plane_state, + struct drm_colorop *colorop) +{ + struct drm_colorop *old_colorop; + struct drm_colorop_state *colorop_state = NULL, *new_colorop_state; + struct dc_transfer_func *tf = &dc_plane_state->in_shaper_func; + struct drm_atomic_state *state = plane_state->state; + const struct amdgpu_device *adev = drm_to_adev(colorop->dev); + const struct drm_device *dev = colorop->dev; + const struct drm_color_lut32 *lut3d; + uint32_t lut3d_size; + int i = 0, ret = 0; + + /* 3D LUT */ + old_colorop = colorop; + for_each_new_colorop_in_state(state, colorop, new_colorop_state, i) { + if (new_colorop_state->colorop == old_colorop && + new_colorop_state->colorop->type == DRM_COLOROP_3D_LUT) { + colorop_state = new_colorop_state; + break; + } + } + + if (colorop_state && !colorop_state->bypass && colorop->type == DRM_COLOROP_3D_LUT) { + if (!adev->dm.dc->caps.color.dpp.hw_3d_lut) { + drm_dbg(dev, "3D LUT is not supported by hardware\n"); + return -EINVAL; + } + + drm_dbg(dev, "3D LUT colorop with ID: %d\n", colorop->base.id); + lut3d = __extract_blob_lut32(colorop_state->data, &lut3d_size); + lut3d_size = lut3d != NULL ? lut3d_size : 0; + ret = __set_colorop_3dlut(lut3d, lut3d_size, &dc_plane_state->lut3d_func); + if (ret) { + drm_dbg(dev, "3D LUT colorop with ID: %d has LUT size = %d\n", + colorop->base.id, lut3d_size); + return ret; + } + + /* 3D LUT requires shaper. If shaper colorop is bypassed, enable shaper curve + * with TRANSFER_FUNCTION_LINEAR + */ + if (tf->type == TF_TYPE_BYPASS) { + tf->type = TF_TYPE_DISTRIBUTED_POINTS; + tf->tf = TRANSFER_FUNCTION_LINEAR; + tf->sdr_ref_white_level = SDR_WHITE_LEVEL_INIT_VALUE; + ret = __set_output_tf_32(tf, NULL, 0, false); + } + } + + return ret; +} + +static int +__set_dm_plane_colorop_blend(struct drm_plane_state *plane_state, + struct dc_plane_state *dc_plane_state, + struct drm_colorop *colorop) +{ + struct drm_colorop *old_colorop; + struct drm_colorop_state *colorop_state = NULL, *new_colorop_state; + struct drm_atomic_state *state = plane_state->state; + enum dc_transfer_func_predefined default_tf = TRANSFER_FUNCTION_LINEAR; + struct dc_transfer_func *tf = &dc_plane_state->blend_tf; + const struct drm_color_lut32 *blend_lut = NULL; + struct drm_device *dev = colorop->dev; + uint32_t blend_size = 0; + int i = 0; + + /* 1D Curve - BLND TF */ + old_colorop = colorop; + for_each_new_colorop_in_state(state, colorop, new_colorop_state, i) { + if (new_colorop_state->colorop == old_colorop && + (BIT(new_colorop_state->curve_1d_type) & amdgpu_dm_supported_blnd_tfs)) { + colorop_state = new_colorop_state; + break; + } + } + + if (colorop_state && !colorop_state->bypass && colorop->type == DRM_COLOROP_1D_CURVE && + (BIT(colorop_state->curve_1d_type) & amdgpu_dm_supported_blnd_tfs)) { + drm_dbg(dev, "Blend TF colorop with ID: %d\n", colorop->base.id); + tf->type = TF_TYPE_DISTRIBUTED_POINTS; + tf->tf = default_tf = amdgpu_colorop_tf_to_dc_tf(colorop_state->curve_1d_type); + tf->sdr_ref_white_level = SDR_WHITE_LEVEL_INIT_VALUE; + __set_input_tf_32(NULL, tf, blend_lut, blend_size); + } + + /* 1D Curve - BLND LUT */ + colorop = old_colorop->next; + if (!colorop) { + drm_dbg(dev, "no Blend LUT colorop found\n"); + return -EINVAL; + } + + old_colorop = colorop; + for_each_new_colorop_in_state(state, colorop, new_colorop_state, i) { + if (new_colorop_state->colorop == old_colorop && + new_colorop_state->colorop->type == DRM_COLOROP_1D_LUT) { + colorop_state = new_colorop_state; + break; + } + } + + if (colorop_state && !colorop_state->bypass && colorop->type == DRM_COLOROP_1D_LUT && + (BIT(colorop_state->curve_1d_type) & amdgpu_dm_supported_blnd_tfs)) { + drm_dbg(dev, "Blend LUT colorop with ID: %d\n", colorop->base.id); + tf->type = TF_TYPE_DISTRIBUTED_POINTS; + tf->tf = default_tf; + tf->sdr_ref_white_level = SDR_WHITE_LEVEL_INIT_VALUE; + blend_lut = __extract_blob_lut32(colorop_state->data, &blend_size); + blend_size = blend_lut != NULL ? blend_size : 0; + + /* Custom LUT size must be the same as supported size */ + if (blend_size == colorop->size) + __set_input_tf_32(NULL, tf, blend_lut, blend_size); + } + + return 0; +} + +static int +amdgpu_dm_plane_set_color_properties(struct drm_plane_state *plane_state, + struct dc_plane_state *dc_plane_state) +{ + struct dm_plane_state *dm_plane_state = to_dm_plane_state(plane_state); + enum amdgpu_transfer_function shaper_tf = AMDGPU_TRANSFER_FUNCTION_DEFAULT; + enum amdgpu_transfer_function blend_tf = AMDGPU_TRANSFER_FUNCTION_DEFAULT; + const struct drm_color_lut *shaper_lut, *lut3d, *blend_lut; + uint32_t shaper_size, lut3d_size, blend_size; + int ret; + + dc_plane_state->hdr_mult = amdgpu_dm_fixpt_from_s3132(dm_plane_state->hdr_mult); + + shaper_lut = __extract_blob_lut(dm_plane_state->shaper_lut, &shaper_size); + shaper_size = shaper_lut != NULL ? shaper_size : 0; + shaper_tf = dm_plane_state->shaper_tf; + lut3d = __extract_blob_lut(dm_plane_state->lut3d, &lut3d_size); + lut3d_size = lut3d != NULL ? lut3d_size : 0; + + amdgpu_dm_atomic_lut3d(lut3d, lut3d_size, &dc_plane_state->lut3d_func); + ret = amdgpu_dm_atomic_shaper_lut(shaper_lut, false, + amdgpu_tf_to_dc_tf(shaper_tf), + shaper_size, + &dc_plane_state->in_shaper_func); + if (ret) { + drm_dbg_kms(plane_state->plane->dev, + "setting plane %d shaper LUT failed.\n", + plane_state->plane->index); + + return ret; + } + + blend_tf = dm_plane_state->blend_tf; + blend_lut = __extract_blob_lut(dm_plane_state->blend_lut, &blend_size); + blend_size = blend_lut != NULL ? blend_size : 0; + + ret = amdgpu_dm_atomic_blend_lut(blend_lut, false, + amdgpu_tf_to_dc_tf(blend_tf), + blend_size, &dc_plane_state->blend_tf); + if (ret) { + drm_dbg_kms(plane_state->plane->dev, + "setting plane %d gamma lut failed.\n", + plane_state->plane->index); + + return ret; + } + + return 0; +} + +static int +amdgpu_dm_plane_set_colorop_properties(struct drm_plane_state *plane_state, + struct dc_plane_state *dc_plane_state) +{ + struct drm_colorop *colorop = plane_state->color_pipeline; + struct drm_device *dev = plane_state->plane->dev; + struct amdgpu_device *adev = drm_to_adev(dev); + int ret; + + /* 1D Curve - DEGAM TF */ + if (!colorop) + return -EINVAL; + + ret = __set_dm_plane_colorop_degamma(plane_state, dc_plane_state, colorop); + if (ret) + return ret; + + /* Multiplier */ + colorop = colorop->next; + if (!colorop) { + drm_dbg(dev, "no multiplier colorop found\n"); + return -EINVAL; + } + + ret = __set_dm_plane_colorop_multiplier(plane_state, dc_plane_state, colorop); + if (ret) + return ret; + + /* 3x4 matrix */ + colorop = colorop->next; + if (!colorop) { + drm_dbg(dev, "no 3x4 matrix colorop found\n"); + return -EINVAL; + } + + ret = __set_dm_plane_colorop_3x4_matrix(plane_state, dc_plane_state, colorop); + if (ret) + return ret; + + if (adev->dm.dc->caps.color.dpp.hw_3d_lut) { + /* 1D Curve & LUT - SHAPER TF & LUT */ + colorop = colorop->next; + if (!colorop) { + drm_dbg(dev, "no Shaper TF colorop found\n"); + return -EINVAL; + } + + ret = __set_dm_plane_colorop_shaper(plane_state, dc_plane_state, colorop); + if (ret) + return ret; + + /* Shaper LUT colorop is already handled, just skip here */ + colorop = colorop->next; + if (!colorop) + return -EINVAL; + + /* 3D LUT */ + colorop = colorop->next; + if (!colorop) { + drm_dbg(dev, "no 3D LUT colorop found\n"); + return -EINVAL; + } + + ret = __set_dm_plane_colorop_3dlut(plane_state, dc_plane_state, colorop); + if (ret) + return ret; + } + + /* 1D Curve & LUT - BLND TF & LUT */ + colorop = colorop->next; + if (!colorop) { + drm_dbg(dev, "no Blend TF colorop found\n"); + return -EINVAL; + } + + ret = __set_dm_plane_colorop_blend(plane_state, dc_plane_state, colorop); + if (ret) + return ret; + + /* BLND LUT colorop is already handled, just skip here */ + colorop = colorop->next; + if (!colorop) + return -EINVAL; + + return 0; +} + +/** + * amdgpu_dm_update_plane_color_mgmt: Maps DRM color management to DC plane. + * @crtc: amdgpu_dm crtc state + * @plane_state: DRM plane state + * @dc_plane_state: target DC surface + * + * Update the underlying dc_stream_state's input transfer function (ITF) in + * preparation for hardware commit. The transfer function used depends on + * the preparation done on the stream for color management. + * + * Returns: + * 0 on success. -ENOMEM if mem allocation fails. + */ +int amdgpu_dm_update_plane_color_mgmt(struct dm_crtc_state *crtc, + struct drm_plane_state *plane_state, + struct dc_plane_state *dc_plane_state) +{ + struct amdgpu_device *adev = drm_to_adev(crtc->base.state->dev); + struct dm_plane_state *dm_plane_state = to_dm_plane_state(plane_state); + struct drm_color_ctm_3x4 *ctm = NULL; + struct dc_color_caps *color_caps = NULL; + bool has_crtc_cm_degamma; + int ret; + + ret = amdgpu_dm_verify_lut3d_size(adev, plane_state); + if (ret) { + drm_dbg_driver(&adev->ddev, "amdgpu_dm_verify_lut3d_size() failed\n"); + return ret; + } + + if (dc_plane_state->ctx && dc_plane_state->ctx->dc) + color_caps = &dc_plane_state->ctx->dc->caps.color; + + /* Initially, we can just bypass the DGM block. */ + dc_plane_state->in_transfer_func.type = TF_TYPE_BYPASS; + dc_plane_state->in_transfer_func.tf = TRANSFER_FUNCTION_LINEAR; + + /* After, we start to update values according to color props */ + has_crtc_cm_degamma = (crtc->cm_has_degamma || crtc->cm_is_degamma_srgb); + + ret = __set_dm_plane_degamma(plane_state, dc_plane_state, color_caps); + if (ret == -ENOMEM) + return ret; + + /* We only have one degamma block available (pre-blending) for the + * whole color correction pipeline, so that we can't actually perform + * plane and CRTC degamma at the same time. Explicitly reject atomic + * updates when userspace sets both plane and CRTC degamma properties. + */ + if (has_crtc_cm_degamma && ret != -EINVAL) { + drm_dbg_kms(crtc->base.crtc->dev, + "doesn't support plane and CRTC degamma at the same time\n"); + return -EINVAL; + } + + /* If we are here, it means we don't have plane degamma settings, check + * if we have CRTC degamma waiting for mapping to pre-blending degamma + * block + */ + if (has_crtc_cm_degamma) { + /* + * AMD HW doesn't have post-blending degamma caps. When DRM + * CRTC atomic degamma is set, we maps it to DPP degamma block + * (pre-blending) or, on legacy gamma, we use DPP degamma to + * linearize (implicit degamma) from sRGB/BT709 according to + * the input space. + */ + ret = map_crtc_degamma_to_dc_plane(crtc, dc_plane_state, color_caps); + if (ret) + return ret; + } + + /* Setup CRTC CTM. */ + if (dm_plane_state->ctm) { + ctm = (struct drm_color_ctm_3x4 *)dm_plane_state->ctm->data; + /* + * DCN2 and older don't support both pre-blending and + * post-blending gamut remap. For this HW family, if we have + * the plane and CRTC CTMs simultaneously, CRTC CTM takes + * priority, and we discard plane CTM, as implemented in + * dcn10_program_gamut_remap(). However, DCN3+ has DPP + * (pre-blending) and MPC (post-blending) `gamut remap` blocks; + * therefore, we can program plane and CRTC CTMs together by + * mapping CRTC CTM to MPC and keeping plane CTM setup at DPP, + * as it's done by dcn30_program_gamut_remap(). + */ + __drm_ctm_3x4_to_dc_matrix(ctm, dc_plane_state->gamut_remap_matrix.matrix); + + dc_plane_state->gamut_remap_matrix.enable_remap = true; + dc_plane_state->input_csc_color_matrix.enable_adjustment = false; + } else { + /* Bypass CTM. */ + dc_plane_state->gamut_remap_matrix.enable_remap = false; + dc_plane_state->input_csc_color_matrix.enable_adjustment = false; + } + + if (!amdgpu_dm_plane_set_colorop_properties(plane_state, dc_plane_state)) + return 0; + + return amdgpu_dm_plane_set_color_properties(plane_state, dc_plane_state); +} diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_colorop.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_colorop.c new file mode 100644 index 000000000000..d585618b8064 --- /dev/null +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_colorop.c @@ -0,0 +1,209 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include <drm/drm_print.h> +#include <drm/drm_plane.h> +#include <drm/drm_property.h> +#include <drm/drm_colorop.h> + +#include "amdgpu.h" +#include "amdgpu_dm_colorop.h" +#include "dc.h" + +const u64 amdgpu_dm_supported_degam_tfs = + BIT(DRM_COLOROP_1D_CURVE_SRGB_EOTF) | + BIT(DRM_COLOROP_1D_CURVE_PQ_125_EOTF) | + BIT(DRM_COLOROP_1D_CURVE_BT2020_INV_OETF) | + BIT(DRM_COLOROP_1D_CURVE_GAMMA22_INV); + +const u64 amdgpu_dm_supported_shaper_tfs = + BIT(DRM_COLOROP_1D_CURVE_SRGB_INV_EOTF) | + BIT(DRM_COLOROP_1D_CURVE_PQ_125_INV_EOTF) | + BIT(DRM_COLOROP_1D_CURVE_BT2020_OETF) | + BIT(DRM_COLOROP_1D_CURVE_GAMMA22); + +const u64 amdgpu_dm_supported_blnd_tfs = + BIT(DRM_COLOROP_1D_CURVE_SRGB_EOTF) | + BIT(DRM_COLOROP_1D_CURVE_PQ_125_EOTF) | + BIT(DRM_COLOROP_1D_CURVE_BT2020_INV_OETF) | + BIT(DRM_COLOROP_1D_CURVE_GAMMA22_INV); + +#define MAX_COLOR_PIPELINE_OPS 10 + +#define LUT3D_SIZE 17 + +int amdgpu_dm_initialize_default_pipeline(struct drm_plane *plane, struct drm_prop_enum_list *list) +{ + struct drm_colorop *ops[MAX_COLOR_PIPELINE_OPS]; + struct drm_device *dev = plane->dev; + struct amdgpu_device *adev = drm_to_adev(dev); + int ret; + int i = 0; + + memset(ops, 0, sizeof(ops)); + + /* 1D curve - DEGAM TF */ + ops[i] = kzalloc(sizeof(*ops[0]), GFP_KERNEL); + if (!ops[i]) { + ret = -ENOMEM; + goto cleanup; + } + + ret = drm_plane_colorop_curve_1d_init(dev, ops[i], plane, + amdgpu_dm_supported_degam_tfs, + DRM_COLOROP_FLAG_ALLOW_BYPASS); + if (ret) + goto cleanup; + + list->type = ops[i]->base.id; + list->name = kasprintf(GFP_KERNEL, "Color Pipeline %d", ops[i]->base.id); + + i++; + + /* Multiplier */ + ops[i] = kzalloc(sizeof(struct drm_colorop), GFP_KERNEL); + if (!ops[i]) { + ret = -ENOMEM; + goto cleanup; + } + + ret = drm_plane_colorop_mult_init(dev, ops[i], plane, DRM_COLOROP_FLAG_ALLOW_BYPASS); + if (ret) + goto cleanup; + + drm_colorop_set_next_property(ops[i-1], ops[i]); + + i++; + + /* 3x4 matrix */ + ops[i] = kzalloc(sizeof(struct drm_colorop), GFP_KERNEL); + if (!ops[i]) { + ret = -ENOMEM; + goto cleanup; + } + + ret = drm_plane_colorop_ctm_3x4_init(dev, ops[i], plane, DRM_COLOROP_FLAG_ALLOW_BYPASS); + if (ret) + goto cleanup; + + drm_colorop_set_next_property(ops[i-1], ops[i]); + + i++; + + if (adev->dm.dc->caps.color.dpp.hw_3d_lut) { + /* 1D curve - SHAPER TF */ + ops[i] = kzalloc(sizeof(*ops[0]), GFP_KERNEL); + if (!ops[i]) { + ret = -ENOMEM; + goto cleanup; + } + + ret = drm_plane_colorop_curve_1d_init(dev, ops[i], plane, + amdgpu_dm_supported_shaper_tfs, + DRM_COLOROP_FLAG_ALLOW_BYPASS); + if (ret) + goto cleanup; + + drm_colorop_set_next_property(ops[i-1], ops[i]); + + i++; + + /* 1D LUT - SHAPER LUT */ + ops[i] = kzalloc(sizeof(*ops[0]), GFP_KERNEL); + if (!ops[i]) { + ret = -ENOMEM; + goto cleanup; + } + + ret = drm_plane_colorop_curve_1d_lut_init(dev, ops[i], plane, MAX_COLOR_LUT_ENTRIES, + DRM_COLOROP_LUT1D_INTERPOLATION_LINEAR, + DRM_COLOROP_FLAG_ALLOW_BYPASS); + if (ret) + goto cleanup; + + drm_colorop_set_next_property(ops[i-1], ops[i]); + + i++; + + /* 3D LUT */ + ops[i] = kzalloc(sizeof(*ops[0]), GFP_KERNEL); + if (!ops[i]) { + ret = -ENOMEM; + goto cleanup; + } + + ret = drm_plane_colorop_3dlut_init(dev, ops[i], plane, LUT3D_SIZE, + DRM_COLOROP_LUT3D_INTERPOLATION_TETRAHEDRAL, + DRM_COLOROP_FLAG_ALLOW_BYPASS); + if (ret) + goto cleanup; + + drm_colorop_set_next_property(ops[i-1], ops[i]); + + i++; + } + + /* 1D curve - BLND TF */ + ops[i] = kzalloc(sizeof(*ops[0]), GFP_KERNEL); + if (!ops[i]) { + ret = -ENOMEM; + goto cleanup; + } + + ret = drm_plane_colorop_curve_1d_init(dev, ops[i], plane, + amdgpu_dm_supported_blnd_tfs, + DRM_COLOROP_FLAG_ALLOW_BYPASS); + if (ret) + goto cleanup; + + drm_colorop_set_next_property(ops[i - 1], ops[i]); + + i++; + + /* 1D LUT - BLND LUT */ + ops[i] = kzalloc(sizeof(struct drm_colorop), GFP_KERNEL); + if (!ops[i]) { + ret = -ENOMEM; + goto cleanup; + } + + ret = drm_plane_colorop_curve_1d_lut_init(dev, ops[i], plane, MAX_COLOR_LUT_ENTRIES, + DRM_COLOROP_LUT1D_INTERPOLATION_LINEAR, + DRM_COLOROP_FLAG_ALLOW_BYPASS); + if (ret) + goto cleanup; + + drm_colorop_set_next_property(ops[i-1], ops[i]); + return 0; + +cleanup: + if (ret == -ENOMEM) + drm_err(plane->dev, "KMS: Failed to allocate colorop\n"); + + drm_colorop_pipeline_destroy(dev); + + return ret; +} diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_colorop.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_colorop.h new file mode 100644 index 000000000000..2e1617ffc8ee --- /dev/null +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_colorop.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __AMDGPU_DM_COLOROP_H__ +#define __AMDGPU_DM_COLOROP_H__ + +extern const u64 amdgpu_dm_supported_degam_tfs; +extern const u64 amdgpu_dm_supported_shaper_tfs; +extern const u64 amdgpu_dm_supported_blnd_tfs; + +int amdgpu_dm_initialize_default_pipeline(struct drm_plane *plane, struct drm_prop_enum_list *list); + +#endif /* __AMDGPU_DM_COLOROP_H__*/ diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c index 8a441a22c46e..e20aa7438066 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: MIT /* * Copyright 2015 Advanced Micro Devices, Inc. * @@ -30,6 +31,7 @@ #include "amdgpu_dm.h" #include "dc.h" #include "amdgpu_securedisplay.h" +#include "amdgpu_dm_psr.h" static const char *const pipe_crc_sources[] = { "none", @@ -83,56 +85,383 @@ const char *const *amdgpu_dm_crtc_get_crc_sources(struct drm_crtc *crtc, } #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY -static void amdgpu_dm_set_crc_window_default(struct drm_crtc *crtc) +static void update_phy_id_mapping(struct amdgpu_device *adev) +{ + struct drm_device *ddev = adev_to_drm(adev); + struct amdgpu_display_manager *dm = &adev->dm; + struct drm_connector *connector; + struct amdgpu_dm_connector *aconnector; + struct amdgpu_dm_connector *sort_connector[AMDGPU_DM_MAX_CRTC] = {NULL}; + struct drm_connector_list_iter iter; + uint8_t idx = 0, idx_2 = 0, connector_cnt = 0; + + dm->secure_display_ctx.phy_mapping_updated = false; + + mutex_lock(&ddev->mode_config.mutex); + drm_connector_list_iter_begin(ddev, &iter); + drm_for_each_connector_iter(connector, &iter) { + + if (connector->status != connector_status_connected) + continue; + + if (idx >= AMDGPU_DM_MAX_CRTC) { + DRM_WARN("%s connected connectors exceed max crtc\n", __func__); + mutex_unlock(&ddev->mode_config.mutex); + return; + } + + aconnector = to_amdgpu_dm_connector(connector); + + sort_connector[idx] = aconnector; + idx++; + connector_cnt++; + } + drm_connector_list_iter_end(&iter); + + /* sort connectors by link_enc_hw_instance first */ + for (idx = connector_cnt; idx > 1 ; idx--) { + for (idx_2 = 0; idx_2 < (idx - 1); idx_2++) { + if (sort_connector[idx_2]->dc_link->link_enc_hw_inst > + sort_connector[idx_2 + 1]->dc_link->link_enc_hw_inst) + swap(sort_connector[idx_2], sort_connector[idx_2 + 1]); + } + } + + /* + * Sort mst connectors by RAD. mst connectors with the same enc_hw_instance are already + * sorted together above. + */ + for (idx = 0; idx < connector_cnt; /*Do nothing*/) { + if (sort_connector[idx]->mst_root) { + uint8_t i, j, k; + uint8_t mst_con_cnt = 1; + + for (idx_2 = (idx + 1); idx_2 < connector_cnt; idx_2++) { + if (sort_connector[idx_2]->mst_root == sort_connector[idx]->mst_root) + mst_con_cnt++; + else + break; + } + + for (i = mst_con_cnt; i > 1; i--) { + for (j = idx; j < (idx + i - 2); j++) { + int mstb_lct = sort_connector[j]->mst_output_port->parent->lct; + int next_mstb_lct = sort_connector[j + 1]->mst_output_port->parent->lct; + u8 *rad; + u8 *next_rad; + bool swap = false; + + /* Sort by mst tree depth first. Then compare RAD if depth is the same*/ + if (mstb_lct > next_mstb_lct) { + swap = true; + } else if (mstb_lct == next_mstb_lct) { + if (mstb_lct == 1) { + if (sort_connector[j]->mst_output_port->port_num > sort_connector[j + 1]->mst_output_port->port_num) + swap = true; + } else if (mstb_lct > 1) { + rad = sort_connector[j]->mst_output_port->parent->rad; + next_rad = sort_connector[j + 1]->mst_output_port->parent->rad; + + for (k = 0; k < mstb_lct - 1; k++) { + int shift = (k % 2) ? 0 : 4; + int port_num = (rad[k / 2] >> shift) & 0xf; + int next_port_num = (next_rad[k / 2] >> shift) & 0xf; + + if (port_num > next_port_num) { + swap = true; + break; + } + } + } else { + DRM_ERROR("MST LCT shouldn't be set as < 1"); + mutex_unlock(&ddev->mode_config.mutex); + return; + } + } + + if (swap) + swap(sort_connector[j], sort_connector[j + 1]); + } + } + + idx += mst_con_cnt; + } else { + idx++; + } + } + + /* Complete sorting. Assign relavant result to dm->secure_display_ctx.phy_id_mapping[]*/ + memset(dm->secure_display_ctx.phy_id_mapping, 0, sizeof(dm->secure_display_ctx.phy_id_mapping)); + for (idx = 0; idx < connector_cnt; idx++) { + aconnector = sort_connector[idx]; + + dm->secure_display_ctx.phy_id_mapping[idx].assigned = true; + dm->secure_display_ctx.phy_id_mapping[idx].is_mst = false; + dm->secure_display_ctx.phy_id_mapping[idx].enc_hw_inst = aconnector->dc_link->link_enc_hw_inst; + + if (sort_connector[idx]->mst_root) { + dm->secure_display_ctx.phy_id_mapping[idx].is_mst = true; + dm->secure_display_ctx.phy_id_mapping[idx].lct = aconnector->mst_output_port->parent->lct; + dm->secure_display_ctx.phy_id_mapping[idx].port_num = aconnector->mst_output_port->port_num; + memcpy(dm->secure_display_ctx.phy_id_mapping[idx].rad, + aconnector->mst_output_port->parent->rad, sizeof(aconnector->mst_output_port->parent->rad)); + } + } + mutex_unlock(&ddev->mode_config.mutex); + + dm->secure_display_ctx.phy_id_mapping_cnt = connector_cnt; + dm->secure_display_ctx.phy_mapping_updated = true; +} + +static bool get_phy_id(struct amdgpu_display_manager *dm, + struct amdgpu_dm_connector *aconnector, uint8_t *phy_id) +{ + int idx, idx_2; + bool found = false; + + /* + * Assume secure display start after all connectors are probed. The connection + * config is static as well + */ + if (!dm->secure_display_ctx.phy_mapping_updated) { + DRM_WARN("%s Should update the phy id table before get it's value", __func__); + return false; + } + + for (idx = 0; idx < dm->secure_display_ctx.phy_id_mapping_cnt; idx++) { + if (!dm->secure_display_ctx.phy_id_mapping[idx].assigned) { + DRM_ERROR("phy_id_mapping[%d] should be assigned", idx); + return false; + } + + if (aconnector->dc_link->link_enc_hw_inst == + dm->secure_display_ctx.phy_id_mapping[idx].enc_hw_inst) { + if (!dm->secure_display_ctx.phy_id_mapping[idx].is_mst) { + found = true; + goto out; + } else { + /* Could caused by wrongly pass mst root connector */ + if (!aconnector->mst_output_port) { + DRM_ERROR("%s Check mst case but connector without a port assigned", __func__); + return false; + } + + if (aconnector->mst_root && + aconnector->mst_root->mst_mgr.mst_primary == NULL) { + DRM_WARN("%s pass in a stale mst connector", __func__); + } + + if (aconnector->mst_output_port->parent->lct == dm->secure_display_ctx.phy_id_mapping[idx].lct && + aconnector->mst_output_port->port_num == dm->secure_display_ctx.phy_id_mapping[idx].port_num) { + if (aconnector->mst_output_port->parent->lct == 1) { + found = true; + goto out; + } else if (aconnector->mst_output_port->parent->lct > 1) { + /* Check RAD */ + for (idx_2 = 0; idx_2 < aconnector->mst_output_port->parent->lct - 1; idx_2++) { + int shift = (idx_2 % 2) ? 0 : 4; + int port_num = (aconnector->mst_output_port->parent->rad[idx_2 / 2] >> shift) & 0xf; + int port_num2 = (dm->secure_display_ctx.phy_id_mapping[idx].rad[idx_2 / 2] >> shift) & 0xf; + + if (port_num != port_num2) + break; + } + + if (idx_2 == aconnector->mst_output_port->parent->lct - 1) { + found = true; + goto out; + } + } else { + DRM_ERROR("lCT should be >= 1"); + return false; + } + } + } + } + } + +out: + if (found) { + DRM_DEBUG_DRIVER("Associated secure display PHY ID as %d", idx); + *phy_id = idx; + } else { + DRM_WARN("Can't find associated phy ID"); + return false; + } + + return true; +} + +static void amdgpu_dm_set_crc_window_default(struct drm_crtc *crtc, struct dc_stream_state *stream) { struct drm_device *drm_dev = crtc->dev; + struct amdgpu_display_manager *dm = &drm_to_adev(drm_dev)->dm; struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); + struct amdgpu_dm_connector *aconnector; + bool was_activated; + uint8_t phy_id; + unsigned long flags; + int i; - spin_lock_irq(&drm_dev->event_lock); - acrtc->dm_irq_params.crc_window.x_start = 0; - acrtc->dm_irq_params.crc_window.y_start = 0; - acrtc->dm_irq_params.crc_window.x_end = 0; - acrtc->dm_irq_params.crc_window.y_end = 0; - acrtc->dm_irq_params.crc_window.activated = false; - acrtc->dm_irq_params.crc_window.update_win = false; - acrtc->dm_irq_params.crc_window.skip_frame_cnt = 0; - spin_unlock_irq(&drm_dev->event_lock); + spin_lock_irqsave(&drm_dev->event_lock, flags); + was_activated = acrtc->dm_irq_params.crc_window_activated; + for (i = 0; i < MAX_CRC_WINDOW_NUM; i++) { + acrtc->dm_irq_params.window_param[i].x_start = 0; + acrtc->dm_irq_params.window_param[i].y_start = 0; + acrtc->dm_irq_params.window_param[i].x_end = 0; + acrtc->dm_irq_params.window_param[i].y_end = 0; + acrtc->dm_irq_params.window_param[i].enable = false; + acrtc->dm_irq_params.window_param[i].update_win = false; + acrtc->dm_irq_params.window_param[i].skip_frame_cnt = 0; + } + acrtc->dm_irq_params.crc_window_activated = false; + spin_unlock_irqrestore(&drm_dev->event_lock, flags); + + /* Disable secure_display if it was enabled */ + if (was_activated && dm->secure_display_ctx.op_mode == LEGACY_MODE) { + /* stop ROI update on this crtc */ + flush_work(&dm->secure_display_ctx.crtc_ctx[crtc->index].notify_ta_work); + flush_work(&dm->secure_display_ctx.crtc_ctx[crtc->index].forward_roi_work); + aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context; + + if (aconnector && get_phy_id(dm, aconnector, &phy_id)) { + if (dm->secure_display_ctx.support_mul_roi) + dc_stream_forward_multiple_crc_window(stream, NULL, phy_id, true); + else + dc_stream_forward_crc_window(stream, NULL, phy_id, true); + } else { + DRM_DEBUG_DRIVER("%s Can't find matching phy id", __func__); + } + } } static void amdgpu_dm_crtc_notify_ta_to_read(struct work_struct *work) { - struct crc_rd_work *crc_rd_wrk; - struct amdgpu_device *adev; + struct secure_display_crtc_context *crtc_ctx; struct psp_context *psp; - struct securedisplay_cmd *securedisplay_cmd; + struct ta_securedisplay_cmd *securedisplay_cmd; struct drm_crtc *crtc; - uint8_t phy_id; + struct dc_stream_state *stream; + struct amdgpu_dm_connector *aconnector; + uint8_t phy_inst; + struct amdgpu_display_manager *dm; + struct crc_data crc_cpy[MAX_CRC_WINDOW_NUM]; + unsigned long flags; + uint8_t roi_idx = 0; int ret; + int i; - crc_rd_wrk = container_of(work, struct crc_rd_work, notify_ta_work); - spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock); - crtc = crc_rd_wrk->crtc; + crtc_ctx = container_of(work, struct secure_display_crtc_context, notify_ta_work); + crtc = crtc_ctx->crtc; - if (!crtc) { - spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock); + if (!crtc) + return; + + psp = &drm_to_adev(crtc->dev)->psp; + + if (!psp->securedisplay_context.context.initialized) { + DRM_DEBUG_DRIVER("Secure Display fails to notify PSP TA\n"); return; } - adev = drm_to_adev(crtc->dev); - psp = &adev->psp; - phy_id = crc_rd_wrk->phy_inst; - spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock); - - psp_prep_securedisplay_cmd_buf(psp, &securedisplay_cmd, - TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC); - securedisplay_cmd->securedisplay_in_message.send_roi_crc.phy_id = - phy_id; - ret = psp_securedisplay_invoke(psp, TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC); + dm = &drm_to_adev(crtc->dev)->dm; + stream = to_amdgpu_crtc(crtc)->dm_irq_params.stream; + aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context; + if (!aconnector) + return; + + mutex_lock(&crtc->dev->mode_config.mutex); + if (!get_phy_id(dm, aconnector, &phy_inst)) { + DRM_WARN("%s Can't find mapping phy id!", __func__); + mutex_unlock(&crtc->dev->mode_config.mutex); + return; + } + mutex_unlock(&crtc->dev->mode_config.mutex); + + spin_lock_irqsave(&crtc->dev->event_lock, flags); + memcpy(crc_cpy, crtc_ctx->crc_info.crc, sizeof(struct crc_data) * MAX_CRC_WINDOW_NUM); + spin_unlock_irqrestore(&crtc->dev->event_lock, flags); + + /* need lock for multiple crtcs to use the command buffer */ + mutex_lock(&psp->securedisplay_context.mutex); + /* PSP TA is expected to finish data transmission over I2C within current frame, + * even there are up to 4 crtcs request to send in this frame. + */ + if (dm->secure_display_ctx.support_mul_roi) { + psp_prep_securedisplay_cmd_buf(psp, &securedisplay_cmd, + TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC_V2); + + securedisplay_cmd->securedisplay_in_message.send_roi_crc_v2.phy_id = phy_inst; + + for (i = 0; i < MAX_CRC_WINDOW_NUM; i++) { + if (crc_cpy[i].crc_ready) + roi_idx |= 1 << i; + } + securedisplay_cmd->securedisplay_in_message.send_roi_crc_v2.roi_idx = roi_idx; + + ret = psp_securedisplay_invoke(psp, TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC_V2); + } else { + psp_prep_securedisplay_cmd_buf(psp, &securedisplay_cmd, + TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC); + + securedisplay_cmd->securedisplay_in_message.send_roi_crc.phy_id = phy_inst; + + ret = psp_securedisplay_invoke(psp, TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC); + } + if (!ret) { - if (securedisplay_cmd->status != TA_SECUREDISPLAY_STATUS__SUCCESS) { + if (securedisplay_cmd->status != TA_SECUREDISPLAY_STATUS__SUCCESS) psp_securedisplay_parse_resp_status(psp, securedisplay_cmd->status); - } } + + mutex_unlock(&psp->securedisplay_context.mutex); +} + +static void +amdgpu_dm_forward_crc_window(struct work_struct *work) +{ + struct secure_display_crtc_context *crtc_ctx; + struct amdgpu_display_manager *dm; + struct drm_crtc *crtc; + struct dc_stream_state *stream; + struct amdgpu_dm_connector *aconnector; + struct crc_window roi_cpy[MAX_CRC_WINDOW_NUM]; + unsigned long flags; + uint8_t phy_id; + + crtc_ctx = container_of(work, struct secure_display_crtc_context, forward_roi_work); + crtc = crtc_ctx->crtc; + + if (!crtc) + return; + + dm = &drm_to_adev(crtc->dev)->dm; + stream = to_amdgpu_crtc(crtc)->dm_irq_params.stream; + aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context; + + if (!aconnector) + return; + + mutex_lock(&crtc->dev->mode_config.mutex); + if (!get_phy_id(dm, aconnector, &phy_id)) { + DRM_WARN("%s Can't find mapping phy id!", __func__); + mutex_unlock(&crtc->dev->mode_config.mutex); + return; + } + mutex_unlock(&crtc->dev->mode_config.mutex); + + spin_lock_irqsave(&crtc->dev->event_lock, flags); + memcpy(roi_cpy, crtc_ctx->roi, sizeof(struct crc_window) * MAX_CRC_WINDOW_NUM); + spin_unlock_irqrestore(&crtc->dev->event_lock, flags); + + mutex_lock(&dm->dc_lock); + if (dm->secure_display_ctx.support_mul_roi) + dc_stream_forward_multiple_crc_window(stream, roi_cpy, + phy_id, false); + else + dc_stream_forward_crc_window(stream, &roi_cpy[0].rect, + phy_id, false); + mutex_unlock(&dm->dc_lock); } bool amdgpu_dm_crc_window_is_activated(struct drm_crtc *crtc) @@ -142,7 +471,7 @@ bool amdgpu_dm_crc_window_is_activated(struct drm_crtc *crtc) bool ret = false; spin_lock_irq(&drm_dev->event_lock); - ret = acrtc->dm_irq_params.crc_window.activated; + ret = acrtc->dm_irq_params.crc_window_activated; spin_unlock_irq(&drm_dev->event_lock); return ret; @@ -180,24 +509,14 @@ int amdgpu_dm_crtc_configure_crc_source(struct drm_crtc *crtc, mutex_lock(&adev->dm.dc_lock); - /* Enable CRTC CRC generation if necessary. */ + /* For PSR1, check that the panel has exited PSR */ + if (stream_state->link->psr_settings.psr_version < DC_PSR_VERSION_SU_1) + amdgpu_dm_psr_wait_disable(stream_state); + + /* Enable or disable CRTC CRC generation */ if (dm_is_crc_source_crtc(source) || source == AMDGPU_DM_PIPE_CRC_SOURCE_NONE) { -#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) - if (!enable) { - if (adev->dm.crc_rd_wrk) { - flush_work(&adev->dm.crc_rd_wrk->notify_ta_work); - spin_lock_irq(&adev->dm.crc_rd_wrk->crc_rd_work_lock); - if (adev->dm.crc_rd_wrk->crtc == crtc) { - dc_stream_stop_dmcu_crc_win_update(stream_state->ctx->dc, - dm_crtc_state->stream); - adev->dm.crc_rd_wrk->crtc = NULL; - } - spin_unlock_irq(&adev->dm.crc_rd_wrk->crc_rd_work_lock); - } - } -#endif if (!dc_stream_configure_crc(stream_state->ctx->dc, - stream_state, NULL, enable, enable)) { + stream_state, NULL, enable, enable, 0, true)) { ret = -EINVAL; goto unlock; } @@ -228,6 +547,10 @@ int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name) struct drm_crtc_commit *commit; struct dm_crtc_state *crtc_state; struct drm_device *drm_dev = crtc->dev; +#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) + struct amdgpu_device *adev = drm_to_adev(drm_dev); + struct amdgpu_display_manager *dm = &adev->dm; +#endif struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); struct drm_dp_aux *aux = NULL; bool enable = false; @@ -296,6 +619,9 @@ int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name) if (!connector->state || connector->state->crtc != crtc) continue; + if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) + continue; + aconn = to_amdgpu_dm_connector(connector); break; } @@ -307,7 +633,7 @@ int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name) goto cleanup; } - aux = (aconn->port) ? &aconn->port->aux : &aconn->dm_dp_aux.aux; + aux = (aconn->mst_output_port) ? &aconn->mst_output_port->aux : &aconn->dm_dp_aux.aux; if (!aux) { DRM_DEBUG_DRIVER("No dp aux for amd connector\n"); @@ -324,15 +650,6 @@ int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name) } -#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) - amdgpu_dm_set_crc_window_default(crtc); -#endif - - if (amdgpu_dm_crtc_configure_crc_source(crtc, crtc_state, source)) { - ret = -EINVAL; - goto cleanup; - } - /* * Reading the CRC requires the vblank interrupt handler to be * enabled. Keep a reference until CRC capture stops. @@ -342,7 +659,19 @@ int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name) ret = drm_crtc_vblank_get(crtc); if (ret) goto cleanup; + } + +#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) + /* Reset secure_display when we change crc source from debugfs */ + amdgpu_dm_set_crc_window_default(crtc, crtc_state->stream); +#endif + + if (amdgpu_dm_crtc_configure_crc_source(crtc, crtc_state, source)) { + ret = -EINVAL; + goto cleanup; + } + if (!enabled && enable) { if (dm_is_crc_source_dprx(source)) { if (drm_dp_start_crc(aux, crtc)) { DRM_DEBUG_DRIVER("dp start crc failed\n"); @@ -368,6 +697,13 @@ int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name) /* Reset crc_skipped on dm state */ crtc_state->crc_skip_count = 0; +#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) + /* Initialize phy id mapping table for secure display*/ + if (dm->secure_display_ctx.op_mode == LEGACY_MODE && + !dm->secure_display_ctx.phy_mapping_updated) + update_phy_id_mapping(adev); +#endif + cleanup: if (commit) drm_crtc_commit_put(commit); @@ -422,7 +758,7 @@ void amdgpu_dm_crtc_handle_crc_irq(struct drm_crtc *crtc) } if (dm_is_crc_source_crtc(cur_crc_src)) { - if (!dc_stream_get_crc(stream_state->ctx->dc, stream_state, + if (!dc_stream_get_crc(stream_state->ctx->dc, stream_state, 0, &crcs[0], &crcs[1], &crcs[2])) return; @@ -434,19 +770,21 @@ void amdgpu_dm_crtc_handle_crc_irq(struct drm_crtc *crtc) #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) void amdgpu_dm_crtc_handle_crc_window_irq(struct drm_crtc *crtc) { - struct dc_stream_state *stream_state; struct drm_device *drm_dev = NULL; enum amdgpu_dm_pipe_crc_source cur_crc_src; struct amdgpu_crtc *acrtc = NULL; struct amdgpu_device *adev = NULL; - struct crc_rd_work *crc_rd_wrk = NULL; - struct crc_params *crc_window = NULL, tmp_window; - unsigned long flags1, flags2; - struct crtc_position position; - uint32_t v_blank; - uint32_t v_back_porch; - uint32_t crc_window_latch_up_line; - struct dc_crtc_timing *timing_out; + struct secure_display_crtc_context *crtc_ctx = NULL; + bool reset_crc_frame_count[MAX_CRC_WINDOW_NUM] = {false}; + uint32_t crc_r[MAX_CRC_WINDOW_NUM] = {0}; + uint32_t crc_g[MAX_CRC_WINDOW_NUM] = {0}; + uint32_t crc_b[MAX_CRC_WINDOW_NUM] = {0}; + unsigned long flags1; + bool forward_roi_change = false; + bool notify_ta = false; + bool all_crc_ready = true; + struct dc_stream_state *stream_state; + int i; if (crtc == NULL) return; @@ -454,97 +792,160 @@ void amdgpu_dm_crtc_handle_crc_window_irq(struct drm_crtc *crtc) acrtc = to_amdgpu_crtc(crtc); adev = drm_to_adev(crtc->dev); drm_dev = crtc->dev; + stream_state = to_dm_crtc_state(crtc->state)->stream; spin_lock_irqsave(&drm_dev->event_lock, flags1); - stream_state = acrtc->dm_irq_params.stream; cur_crc_src = acrtc->dm_irq_params.crc_src; - timing_out = &stream_state->timing; /* Early return if CRC capture is not enabled. */ - if (!amdgpu_dm_is_valid_crc_source(cur_crc_src)) - goto cleanup; + if (!amdgpu_dm_is_valid_crc_source(cur_crc_src) || + !dm_is_crc_source_crtc(cur_crc_src)) { + spin_unlock_irqrestore(&drm_dev->event_lock, flags1); + return; + } - if (dm_is_crc_source_crtc(cur_crc_src)) { - if (acrtc->dm_irq_params.crc_window.activated) { - if (acrtc->dm_irq_params.crc_window.update_win) { - if (acrtc->dm_irq_params.crc_window.skip_frame_cnt) { - acrtc->dm_irq_params.crc_window.skip_frame_cnt -= 1; - goto cleanup; - } - crc_window = &tmp_window; - - tmp_window.windowa_x_start = - acrtc->dm_irq_params.crc_window.x_start; - tmp_window.windowa_y_start = - acrtc->dm_irq_params.crc_window.y_start; - tmp_window.windowa_x_end = - acrtc->dm_irq_params.crc_window.x_end; - tmp_window.windowa_y_end = - acrtc->dm_irq_params.crc_window.y_end; - tmp_window.windowb_x_start = - acrtc->dm_irq_params.crc_window.x_start; - tmp_window.windowb_y_start = - acrtc->dm_irq_params.crc_window.y_start; - tmp_window.windowb_x_end = - acrtc->dm_irq_params.crc_window.x_end; - tmp_window.windowb_y_end = - acrtc->dm_irq_params.crc_window.y_end; - - dc_stream_forward_dmcu_crc_window(stream_state->ctx->dc, - stream_state, crc_window); - - acrtc->dm_irq_params.crc_window.update_win = false; - - dc_stream_get_crtc_position(stream_state->ctx->dc, &stream_state, 1, - &position.vertical_count, - &position.nominal_vcount); - - v_blank = timing_out->v_total - timing_out->v_border_top - - timing_out->v_addressable - timing_out->v_border_bottom; - - v_back_porch = v_blank - timing_out->v_front_porch - - timing_out->v_sync_width; - - crc_window_latch_up_line = v_back_porch + timing_out->v_sync_width; - - /* take 3 lines margin*/ - if ((position.vertical_count + 3) >= crc_window_latch_up_line) - acrtc->dm_irq_params.crc_window.skip_frame_cnt = 1; - else - acrtc->dm_irq_params.crc_window.skip_frame_cnt = 0; - } else { - if (acrtc->dm_irq_params.crc_window.skip_frame_cnt == 0) { - if (adev->dm.crc_rd_wrk) { - crc_rd_wrk = adev->dm.crc_rd_wrk; - spin_lock_irqsave(&crc_rd_wrk->crc_rd_work_lock, flags2); - crc_rd_wrk->phy_inst = - stream_state->link->link_enc_hw_inst; - spin_unlock_irqrestore(&crc_rd_wrk->crc_rd_work_lock, flags2); - schedule_work(&crc_rd_wrk->notify_ta_work); - } - } else { - acrtc->dm_irq_params.crc_window.skip_frame_cnt -= 1; - } - } + if (!acrtc->dm_irq_params.crc_window_activated) { + spin_unlock_irqrestore(&drm_dev->event_lock, flags1); + return; + } + + crtc_ctx = &adev->dm.secure_display_ctx.crtc_ctx[acrtc->crtc_id]; + if (WARN_ON(crtc_ctx->crtc != crtc)) { + /* We have set the crtc when creating secure_display_crtc_context, + * don't expect it to be changed here. + */ + crtc_ctx->crtc = crtc; + } + + for (i = 0; i < MAX_CRC_WINDOW_NUM; i++) { + struct crc_params crc_window = { + .windowa_x_start = acrtc->dm_irq_params.window_param[i].x_start, + .windowa_y_start = acrtc->dm_irq_params.window_param[i].y_start, + .windowa_x_end = acrtc->dm_irq_params.window_param[i].x_end, + .windowa_y_end = acrtc->dm_irq_params.window_param[i].y_end, + .windowb_x_start = acrtc->dm_irq_params.window_param[i].x_start, + .windowb_y_start = acrtc->dm_irq_params.window_param[i].y_start, + .windowb_x_end = acrtc->dm_irq_params.window_param[i].x_end, + .windowb_y_end = acrtc->dm_irq_params.window_param[i].y_end, + }; + + crtc_ctx->roi[i].enable = acrtc->dm_irq_params.window_param[i].enable; + + if (!acrtc->dm_irq_params.window_param[i].enable) { + crtc_ctx->crc_info.crc[i].crc_ready = false; + continue; + } + + if (acrtc->dm_irq_params.window_param[i].skip_frame_cnt) { + acrtc->dm_irq_params.window_param[i].skip_frame_cnt -= 1; + crtc_ctx->crc_info.crc[i].crc_ready = false; + continue; + } + + if (acrtc->dm_irq_params.window_param[i].update_win) { + crtc_ctx->roi[i].rect.x = crc_window.windowa_x_start; + crtc_ctx->roi[i].rect.y = crc_window.windowa_y_start; + crtc_ctx->roi[i].rect.width = crc_window.windowa_x_end - + crc_window.windowa_x_start; + crtc_ctx->roi[i].rect.height = crc_window.windowa_y_end - + crc_window.windowa_y_start; + + if (adev->dm.secure_display_ctx.op_mode == LEGACY_MODE) + /* forward task to dmub to update ROI */ + forward_roi_change = true; + else if (adev->dm.secure_display_ctx.op_mode == DISPLAY_CRC_MODE) + /* update ROI via dm*/ + dc_stream_configure_crc(stream_state->ctx->dc, stream_state, + &crc_window, true, true, i, false); + + reset_crc_frame_count[i] = true; + + acrtc->dm_irq_params.window_param[i].update_win = false; + + /* Statically skip 1 frame, because we may need to wait below things + * before sending ROI to dmub: + * 1. We defer the work by using system workqueue. + * 2. We may need to wait for dc_lock before accessing dmub. + */ + acrtc->dm_irq_params.window_param[i].skip_frame_cnt = 1; + crtc_ctx->crc_info.crc[i].crc_ready = false; + } else { + if (!dc_stream_get_crc(stream_state->ctx->dc, stream_state, i, + &crc_r[i], &crc_g[i], &crc_b[i])) + DRM_ERROR("Secure Display: fail to get crc from engine %d\n", i); + + if (adev->dm.secure_display_ctx.op_mode == LEGACY_MODE) + /* forward task to psp to read ROI/CRC and output via I2C */ + notify_ta = true; + else if (adev->dm.secure_display_ctx.op_mode == DISPLAY_CRC_MODE) + /* Avoid ROI window get changed, keep overwriting. */ + dc_stream_configure_crc(stream_state->ctx->dc, stream_state, + &crc_window, true, true, i, false); + + /* crc ready for psp to read out */ + crtc_ctx->crc_info.crc[i].crc_ready = true; } } -cleanup: spin_unlock_irqrestore(&drm_dev->event_lock, flags1); + + if (forward_roi_change) + schedule_work(&crtc_ctx->forward_roi_work); + + if (notify_ta) + schedule_work(&crtc_ctx->notify_ta_work); + + spin_lock_irqsave(&crtc_ctx->crc_info.lock, flags1); + for (i = 0; i < MAX_CRC_WINDOW_NUM; i++) { + crtc_ctx->crc_info.crc[i].crc_R = crc_r[i]; + crtc_ctx->crc_info.crc[i].crc_G = crc_g[i]; + crtc_ctx->crc_info.crc[i].crc_B = crc_b[i]; + + if (!crtc_ctx->roi[i].enable) { + crtc_ctx->crc_info.crc[i].frame_count = 0; + continue; + } + + if (!crtc_ctx->crc_info.crc[i].crc_ready) + all_crc_ready = false; + + if (reset_crc_frame_count[i] || crtc_ctx->crc_info.crc[i].frame_count == UINT_MAX) + /* Reset the reference frame count after user update the ROI + * or it reaches the maximum value. + */ + crtc_ctx->crc_info.crc[i].frame_count = 0; + else + crtc_ctx->crc_info.crc[i].frame_count += 1; + } + spin_unlock_irqrestore(&crtc_ctx->crc_info.lock, flags1); + + if (all_crc_ready) + complete_all(&crtc_ctx->crc_info.completion); } -struct crc_rd_work *amdgpu_dm_crtc_secure_display_create_work(void) +void amdgpu_dm_crtc_secure_display_create_contexts(struct amdgpu_device *adev) { - struct crc_rd_work *crc_rd_wrk = NULL; + struct secure_display_crtc_context *crtc_ctx = NULL; + int i; - crc_rd_wrk = kzalloc(sizeof(*crc_rd_wrk), GFP_KERNEL); + crtc_ctx = kcalloc(adev->mode_info.num_crtc, + sizeof(struct secure_display_crtc_context), + GFP_KERNEL); - if (!crc_rd_wrk) - return NULL; + if (!crtc_ctx) { + adev->dm.secure_display_ctx.crtc_ctx = NULL; + return; + } + + for (i = 0; i < adev->mode_info.num_crtc; i++) { + INIT_WORK(&crtc_ctx[i].forward_roi_work, amdgpu_dm_forward_crc_window); + INIT_WORK(&crtc_ctx[i].notify_ta_work, amdgpu_dm_crtc_notify_ta_to_read); + crtc_ctx[i].crtc = &adev->mode_info.crtcs[i]->base; + spin_lock_init(&crtc_ctx[i].crc_info.lock); + } - spin_lock_init(&crc_rd_wrk->crc_rd_work_lock); - INIT_WORK(&crc_rd_wrk->notify_ta_work, amdgpu_dm_crtc_notify_ta_to_read); + adev->dm.secure_display_ctx.crtc_ctx = crtc_ctx; - return crc_rd_wrk; + adev->dm.secure_display_ctx.op_mode = DISPLAY_CRC_MODE; } #endif diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h index f07850db60a6..95bdb8699d7f 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: MIT */ /* * Copyright 2019 Advanced Micro Devices, Inc. * @@ -40,25 +41,76 @@ enum amdgpu_dm_pipe_crc_source { }; #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY -struct crc_window_parm { +#define MAX_CRTC 6 + +enum secure_display_mode { + /* via dmub + psp */ + LEGACY_MODE = 0, + /* driver directly */ + DISPLAY_CRC_MODE, + SECURE_DISPLAY_MODE_MAX, +}; + +struct phy_id_mapping { + bool assigned; + bool is_mst; + uint8_t enc_hw_inst; + u8 lct; + u8 port_num; + u8 rad[8]; +}; + +struct crc_data { + uint32_t crc_R; + uint32_t crc_G; + uint32_t crc_B; + uint32_t frame_count; + bool crc_ready; +}; + +struct crc_info { + struct crc_data crc[MAX_CRC_WINDOW_NUM]; + struct completion completion; + spinlock_t lock; +}; + +struct crc_window_param { uint16_t x_start; uint16_t y_start; uint16_t x_end; uint16_t y_end; - /* CRC windwo is activated or not*/ - bool activated; + /* CRC window is activated or not*/ + bool enable; /* Update crc window during vertical blank or not */ bool update_win; /* skip reading/writing for few frames */ int skip_frame_cnt; }; -struct crc_rd_work { +struct secure_display_crtc_context { + /* work to notify PSP TA*/ struct work_struct notify_ta_work; - /* To protect crc_rd_work carried fields*/ - spinlock_t crc_rd_work_lock; + + /* work to forward ROI to dmcu/dmub */ + struct work_struct forward_roi_work; + struct drm_crtc *crtc; - uint8_t phy_inst; + + /* Region of Interest (ROI) */ + struct crc_window roi[MAX_CRC_WINDOW_NUM]; + + struct crc_info crc_info; +}; + +struct secure_display_context { + + struct secure_display_crtc_context *crtc_ctx; + /* Whether dmub support multiple ROI setting */ + bool support_mul_roi; + enum secure_display_mode op_mode; + bool phy_mapping_updated; + int phy_id_mapping_cnt; + struct phy_id_mapping phy_id_mapping[MAX_CRTC]; }; #endif @@ -90,11 +142,11 @@ void amdgpu_dm_crtc_handle_crc_irq(struct drm_crtc *crtc); #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY bool amdgpu_dm_crc_window_is_activated(struct drm_crtc *crtc); void amdgpu_dm_crtc_handle_crc_window_irq(struct drm_crtc *crtc); -struct crc_rd_work *amdgpu_dm_crtc_secure_display_create_work(void); +void amdgpu_dm_crtc_secure_display_create_contexts(struct amdgpu_device *adev); #else #define amdgpu_dm_crc_window_is_activated(x) #define amdgpu_dm_crtc_handle_crc_window_irq(x) -#define amdgpu_dm_crtc_secure_display_create_work() +#define amdgpu_dm_crtc_secure_display_create_contexts(x) #endif #endif /* AMD_DAL_DEV_AMDGPU_DM_AMDGPU_DM_CRC_H_ */ diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c index 594fe8a4d02b..697e232acebf 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c @@ -29,12 +29,16 @@ #include "dc.h" #include "amdgpu.h" #include "amdgpu_dm_psr.h" +#include "amdgpu_dm_replay.h" #include "amdgpu_dm_crtc.h" #include "amdgpu_dm_plane.h" #include "amdgpu_dm_trace.h" #include "amdgpu_dm_debugfs.h" -void dm_crtc_handle_vblank(struct amdgpu_crtc *acrtc) +#define HPD_DETECTION_PERIOD_uS 2000000 +#define HPD_DETECTION_TIME_uS 100000 + +void amdgpu_dm_crtc_handle_vblank(struct amdgpu_crtc *acrtc) { struct drm_crtc *crtc = &acrtc->base; struct drm_device *dev = crtc->dev; @@ -54,14 +58,14 @@ void dm_crtc_handle_vblank(struct amdgpu_crtc *acrtc) spin_unlock_irqrestore(&dev->event_lock, flags); } -bool modeset_required(struct drm_crtc_state *crtc_state, +bool amdgpu_dm_crtc_modeset_required(struct drm_crtc_state *crtc_state, struct dc_stream_state *new_stream, struct dc_stream_state *old_stream) { return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state); } -bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc) +bool amdgpu_dm_crtc_vrr_active_irq(struct amdgpu_crtc *acrtc) { return acrtc->dm_irq_params.freesync_config.state == @@ -70,13 +74,16 @@ bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc) VRR_STATE_ACTIVE_FIXED; } -int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable) +int amdgpu_dm_crtc_set_vupdate_irq(struct drm_crtc *crtc, bool enable) { enum dc_irq_source irq_source; struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); struct amdgpu_device *adev = drm_to_adev(crtc->dev); int rc; + if (acrtc->otg_inst == -1) + return 0; + irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst; rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY; @@ -86,17 +93,163 @@ int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable) return rc; } -bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state) +bool amdgpu_dm_crtc_vrr_active(const struct dm_crtc_state *dm_state) { return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE || dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED; } -static void vblank_control_worker(struct work_struct *work) +/** + * amdgpu_dm_crtc_set_panel_sr_feature() - Manage panel self-refresh features. + * + * @vblank_work: is a pointer to a struct vblank_control_work object. + * @vblank_enabled: indicates whether the DRM vblank counter is currently + * enabled (true) or disabled (false). + * @allow_sr_entry: represents whether entry into the self-refresh mode is + * allowed (true) or not allowed (false). + * + * The DRM vblank counter enable/disable action is used as the trigger to enable + * or disable various panel self-refresh features: + * + * Panel Replay and PSR SU + * - Enable when: + * - VRR is disabled + * - vblank counter is disabled + * - entry is allowed: usermode demonstrates an adequate number of fast + * commits) + * - CRC capture window isn't active + * - Keep enabled even when vblank counter gets enabled + * + * PSR1 + * - Enable condition same as above + * - Disable when vblank counter is enabled + */ +static void amdgpu_dm_crtc_set_panel_sr_feature( + struct vblank_control_work *vblank_work, + bool vblank_enabled, bool allow_sr_entry) +{ + struct dc_link *link = vblank_work->stream->link; + bool is_sr_active = (link->replay_settings.replay_allow_active || + link->psr_settings.psr_allow_active); + bool is_crc_window_active = false; + bool vrr_active = amdgpu_dm_crtc_vrr_active_irq(vblank_work->acrtc); + +#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY + is_crc_window_active = + amdgpu_dm_crc_window_is_activated(&vblank_work->acrtc->base); +#endif + + if (link->replay_settings.replay_feature_enabled && !vrr_active && + allow_sr_entry && !is_sr_active && !is_crc_window_active) { + amdgpu_dm_replay_enable(vblank_work->stream, true); + } else if (vblank_enabled) { + if (link->psr_settings.psr_version < DC_PSR_VERSION_SU_1 && is_sr_active) + amdgpu_dm_psr_disable(vblank_work->stream, false); + } else if (link->psr_settings.psr_feature_enabled && !vrr_active && + allow_sr_entry && !is_sr_active && !is_crc_window_active) { + + struct amdgpu_dm_connector *aconn = + (struct amdgpu_dm_connector *) vblank_work->stream->dm_stream_context; + + if (!aconn->disallow_edp_enter_psr) { + struct amdgpu_display_manager *dm = vblank_work->dm; + + amdgpu_dm_psr_enable(vblank_work->stream); + if (dm->idle_workqueue && + (dm->dc->config.disable_ips == DMUB_IPS_ENABLE) && + dm->dc->idle_optimizations_allowed && + dm->idle_workqueue->enable && + !dm->idle_workqueue->running) + schedule_work(&dm->idle_workqueue->work); + } + } +} + +bool amdgpu_dm_is_headless(struct amdgpu_device *adev) +{ + struct drm_connector *connector; + struct drm_connector_list_iter iter; + struct drm_device *dev; + bool is_headless = true; + + if (adev == NULL) + return true; + + dev = adev->dm.ddev; + + drm_connector_list_iter_begin(dev, &iter); + drm_for_each_connector_iter(connector, &iter) { + + if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) + continue; + + if (connector->status == connector_status_connected) { + is_headless = false; + break; + } + } + drm_connector_list_iter_end(&iter); + return is_headless; +} + +static void amdgpu_dm_idle_worker(struct work_struct *work) +{ + struct idle_workqueue *idle_work; + + idle_work = container_of(work, struct idle_workqueue, work); + idle_work->dm->idle_workqueue->running = true; + + while (idle_work->enable) { + fsleep(HPD_DETECTION_PERIOD_uS); + mutex_lock(&idle_work->dm->dc_lock); + if (!idle_work->dm->dc->idle_optimizations_allowed) { + mutex_unlock(&idle_work->dm->dc_lock); + break; + } + dc_allow_idle_optimizations(idle_work->dm->dc, false); + + mutex_unlock(&idle_work->dm->dc_lock); + fsleep(HPD_DETECTION_TIME_uS); + mutex_lock(&idle_work->dm->dc_lock); + + if (!amdgpu_dm_is_headless(idle_work->dm->adev) && + !amdgpu_dm_psr_is_active_allowed(idle_work->dm)) { + mutex_unlock(&idle_work->dm->dc_lock); + break; + } + + if (idle_work->enable) { + dc_post_update_surfaces_to_stream(idle_work->dm->dc); + dc_allow_idle_optimizations(idle_work->dm->dc, true); + } + mutex_unlock(&idle_work->dm->dc_lock); + } + idle_work->dm->idle_workqueue->running = false; +} + +struct idle_workqueue *idle_create_workqueue(struct amdgpu_device *adev) +{ + struct idle_workqueue *idle_work; + + idle_work = kzalloc(sizeof(*idle_work), GFP_KERNEL); + if (ZERO_OR_NULL_PTR(idle_work)) + return NULL; + + idle_work->dm = &adev->dm; + idle_work->enable = false; + idle_work->running = false; + INIT_WORK(&idle_work->work, amdgpu_dm_idle_worker); + + return idle_work; +} + +static void amdgpu_dm_crtc_vblank_control_worker(struct work_struct *work) { struct vblank_control_work *vblank_work = container_of(work, struct vblank_control_work, work); struct amdgpu_display_manager *dm = vblank_work->dm; + struct amdgpu_device *adev = drm_to_adev(dm->ddev); + int r; mutex_lock(&dm->dc_lock); @@ -105,10 +258,8 @@ static void vblank_control_worker(struct work_struct *work) else if (dm->active_vblank_irq_count) dm->active_vblank_irq_count--; - dc_allow_idle_optimizations( - dm->dc, dm->active_vblank_irq_count == 0 ? true : false); - - DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0); + if (dm->active_vblank_irq_count > 0) + dc_allow_idle_optimizations(dm->dc, false); /* * Control PSR based on vblank requirements from OS @@ -120,16 +271,24 @@ static void vblank_control_worker(struct work_struct *work) * where the SU region is the full hactive*vactive region. See * fill_dc_dirty_rects(). */ - if (vblank_work->stream && vblank_work->stream->link) { - if (vblank_work->enable) { - if (vblank_work->stream->link->psr_settings.psr_version < DC_PSR_VERSION_SU_1 && - vblank_work->stream->link->psr_settings.psr_allow_active) - amdgpu_dm_psr_disable(vblank_work->stream); - } else if (vblank_work->stream->link->psr_settings.psr_feature_enabled && - !vblank_work->stream->link->psr_settings.psr_allow_active && - vblank_work->acrtc->dm_irq_params.allow_psr_entry) { - amdgpu_dm_psr_enable(vblank_work->stream); - } + if (vblank_work->stream && vblank_work->stream->link && vblank_work->acrtc) { + amdgpu_dm_crtc_set_panel_sr_feature( + vblank_work, vblank_work->enable, + vblank_work->acrtc->dm_irq_params.allow_sr_entry); + } + + if (dm->active_vblank_irq_count == 0) { + dc_post_update_surfaces_to_stream(dm->dc); + + r = amdgpu_dpm_pause_power_profile(adev, true); + if (r) + dev_warn(adev->dev, "failed to set default power profile mode\n"); + + dc_allow_idle_optimizations(dm->dc, true); + + r = amdgpu_dpm_pause_power_profile(adev, false); + if (r) + dev_warn(adev->dev, "failed to restore the power profile mode\n"); } mutex_unlock(&dm->dc_lock); @@ -139,32 +298,106 @@ static void vblank_control_worker(struct work_struct *work) kfree(vblank_work); } -static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable) +static inline int amdgpu_dm_crtc_set_vblank(struct drm_crtc *crtc, bool enable) { - enum dc_irq_source irq_source; struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); struct amdgpu_device *adev = drm_to_adev(crtc->dev); struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state); struct amdgpu_display_manager *dm = &adev->dm; struct vblank_control_work *work; + int irq_type; int rc = 0; + if (enable && !acrtc->base.enabled) { + drm_dbg_vbl(crtc->dev, + "Reject vblank enable on unconfigured CRTC %d (enabled=%d)\n", + acrtc->crtc_id, acrtc->base.enabled); + return -EINVAL; + } + + irq_type = amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id); + if (enable) { - /* vblank irq on -> Only need vupdate irq in vrr mode */ - if (amdgpu_dm_vrr_active(acrtc_state)) - rc = dm_set_vupdate_irq(crtc, true); + struct dc *dc = adev->dm.dc; + struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(crtc); + struct psr_settings *psr = &acrtc_state->stream->link->psr_settings; + struct replay_settings *pr = &acrtc_state->stream->link->replay_settings; + bool sr_supported = (psr->psr_version != DC_PSR_VERSION_UNSUPPORTED) || + pr->config.replay_supported; + + /* + * IPS & self-refresh feature can cause vblank counter resets between + * vblank disable and enable. + * It may cause system stuck due to waiting for the vblank counter. + * Call this function to estimate missed vblanks by using timestamps and + * update the vblank counter in DRM. + */ + if (dc->caps.ips_support && + dc->config.disable_ips != DMUB_IPS_DISABLE_ALL && + sr_supported && vblank->config.disable_immediate) + drm_crtc_vblank_restore(crtc); + } + + if (dc_supports_vrr(dm->dc->ctx->dce_version)) { + if (enable) { + /* vblank irq on -> Only need vupdate irq in vrr mode */ + if (amdgpu_dm_crtc_vrr_active(acrtc_state)) + rc = amdgpu_dm_crtc_set_vupdate_irq(crtc, true); + } else { + /* vblank irq off -> vupdate irq off */ + rc = amdgpu_dm_crtc_set_vupdate_irq(crtc, false); + } + } + + if (rc) + return rc; + + /* crtc vblank or vstartup interrupt */ + if (enable) { + rc = amdgpu_irq_get(adev, &adev->crtc_irq, irq_type); + drm_dbg_vbl(crtc->dev, "Get crtc_irq ret=%d\n", rc); + } else { + rc = amdgpu_irq_put(adev, &adev->crtc_irq, irq_type); + drm_dbg_vbl(crtc->dev, "Put crtc_irq ret=%d\n", rc); + } + + if (rc) + return rc; + + /* + * hubp surface flip interrupt + * + * We have no guarantee that the frontend index maps to the same + * backend index - some even map to more than one. + * + * TODO: Use a different interrupt or check DC itself for the mapping. + */ + if (enable) { + rc = amdgpu_irq_get(adev, &adev->pageflip_irq, irq_type); + drm_dbg_vbl(crtc->dev, "Get pageflip_irq ret=%d\n", rc); } else { - /* vblank irq off -> vupdate irq off */ - rc = dm_set_vupdate_irq(crtc, false); + rc = amdgpu_irq_put(adev, &adev->pageflip_irq, irq_type); + drm_dbg_vbl(crtc->dev, "Put pageflip_irq ret=%d\n", rc); } if (rc) return rc; - irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst; +#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) + /* crtc vline0 interrupt, only available on DCN+ */ + if (amdgpu_ip_version(adev, DCE_HWIP, 0) != 0) { + if (enable) { + rc = amdgpu_irq_get(adev, &adev->vline0_irq, irq_type); + drm_dbg_vbl(crtc->dev, "Get vline0_irq ret=%d\n", rc); + } else { + rc = amdgpu_irq_put(adev, &adev->vline0_irq, irq_type); + drm_dbg_vbl(crtc->dev, "Put vline0_irq ret=%d\n", rc); + } - if (!dc_interrupt_set(adev->dm.dc, irq_source, enable)) - return -EBUSY; + if (rc) + return rc; + } +#endif if (amdgpu_in_reset(adev)) return 0; @@ -174,7 +407,7 @@ static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable) if (!work) return -ENOMEM; - INIT_WORK(&work->work, vblank_control_worker); + INIT_WORK(&work->work, amdgpu_dm_crtc_vblank_control_worker); work->dm = dm; work->acrtc = acrtc; work->enable = enable; @@ -190,17 +423,17 @@ static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable) return 0; } -int dm_enable_vblank(struct drm_crtc *crtc) +int amdgpu_dm_crtc_enable_vblank(struct drm_crtc *crtc) { - return dm_set_vblank(crtc, true); + return amdgpu_dm_crtc_set_vblank(crtc, true); } -void dm_disable_vblank(struct drm_crtc *crtc) +void amdgpu_dm_crtc_disable_vblank(struct drm_crtc *crtc) { - dm_set_vblank(crtc, false); + amdgpu_dm_crtc_set_vblank(crtc, false); } -static void dm_crtc_destroy_state(struct drm_crtc *crtc, +static void amdgpu_dm_crtc_destroy_state(struct drm_crtc *crtc, struct drm_crtc_state *state) { struct dm_crtc_state *cur = to_dm_crtc_state(state); @@ -216,7 +449,7 @@ static void dm_crtc_destroy_state(struct drm_crtc *crtc, kfree(state); } -static struct drm_crtc_state *dm_crtc_duplicate_state(struct drm_crtc *crtc) +static struct drm_crtc_state *amdgpu_dm_crtc_duplicate_state(struct drm_crtc *crtc) { struct dm_crtc_state *state, *cur; @@ -243,8 +476,10 @@ static struct drm_crtc_state *dm_crtc_duplicate_state(struct drm_crtc *crtc) state->freesync_config = cur->freesync_config; state->cm_has_degamma = cur->cm_has_degamma; state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb; + state->regamma_tf = cur->regamma_tf; state->crc_skip_count = cur->crc_skip_count; state->mpo_requested = cur->mpo_requested; + state->cursor_mode = cur->cursor_mode; /* TODO Duplicate dc_stream after objects are stream object is flattened */ return &state->base; @@ -256,12 +491,12 @@ static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc) kfree(crtc); } -static void dm_crtc_reset_state(struct drm_crtc *crtc) +static void amdgpu_dm_crtc_reset_state(struct drm_crtc *crtc) { struct dm_crtc_state *state; if (crtc->state) - dm_crtc_destroy_state(crtc, crtc->state); + amdgpu_dm_crtc_destroy_state(crtc, crtc->state); state = kzalloc(sizeof(*state), GFP_KERNEL); if (WARN_ON(!state)) @@ -279,31 +514,99 @@ static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc) } #endif +#ifdef AMD_PRIVATE_COLOR +/** + * dm_crtc_additional_color_mgmt - enable additional color properties + * @crtc: DRM CRTC + * + * This function lets the driver enable post-blending CRTC regamma transfer + * function property in addition to DRM CRTC gamma LUT. Default value means + * linear transfer function, which is the default CRTC gamma LUT behaviour + * without this property. + */ +static void +dm_crtc_additional_color_mgmt(struct drm_crtc *crtc) +{ + struct amdgpu_device *adev = drm_to_adev(crtc->dev); + + if (adev->dm.dc->caps.color.mpc.ogam_ram) + drm_object_attach_property(&crtc->base, + adev->mode_info.regamma_tf_property, + AMDGPU_TRANSFER_FUNCTION_DEFAULT); +} + +static int +amdgpu_dm_atomic_crtc_set_property(struct drm_crtc *crtc, + struct drm_crtc_state *state, + struct drm_property *property, + uint64_t val) +{ + struct amdgpu_device *adev = drm_to_adev(crtc->dev); + struct dm_crtc_state *acrtc_state = to_dm_crtc_state(state); + + if (property == adev->mode_info.regamma_tf_property) { + if (acrtc_state->regamma_tf != val) { + acrtc_state->regamma_tf = val; + acrtc_state->base.color_mgmt_changed |= 1; + } + } else { + drm_dbg_atomic(crtc->dev, + "[CRTC:%d:%s] unknown property [PROP:%d:%s]]\n", + crtc->base.id, crtc->name, + property->base.id, property->name); + return -EINVAL; + } + + return 0; +} + +static int +amdgpu_dm_atomic_crtc_get_property(struct drm_crtc *crtc, + const struct drm_crtc_state *state, + struct drm_property *property, + uint64_t *val) +{ + struct amdgpu_device *adev = drm_to_adev(crtc->dev); + struct dm_crtc_state *acrtc_state = to_dm_crtc_state(state); + + if (property == adev->mode_info.regamma_tf_property) + *val = acrtc_state->regamma_tf; + else + return -EINVAL; + + return 0; +} +#endif + /* Implemented only the options currently available for the driver */ static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = { - .reset = dm_crtc_reset_state, + .reset = amdgpu_dm_crtc_reset_state, .destroy = amdgpu_dm_crtc_destroy, .set_config = drm_atomic_helper_set_config, .page_flip = drm_atomic_helper_page_flip, - .atomic_duplicate_state = dm_crtc_duplicate_state, - .atomic_destroy_state = dm_crtc_destroy_state, + .atomic_duplicate_state = amdgpu_dm_crtc_duplicate_state, + .atomic_destroy_state = amdgpu_dm_crtc_destroy_state, .set_crc_source = amdgpu_dm_crtc_set_crc_source, .verify_crc_source = amdgpu_dm_crtc_verify_crc_source, .get_crc_sources = amdgpu_dm_crtc_get_crc_sources, .get_vblank_counter = amdgpu_get_vblank_counter_kms, - .enable_vblank = dm_enable_vblank, - .disable_vblank = dm_disable_vblank, + .enable_vblank = amdgpu_dm_crtc_enable_vblank, + .disable_vblank = amdgpu_dm_crtc_disable_vblank, .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp, #if defined(CONFIG_DEBUG_FS) .late_register = amdgpu_dm_crtc_late_register, #endif +#ifdef AMD_PRIVATE_COLOR + .atomic_set_property = amdgpu_dm_atomic_crtc_set_property, + .atomic_get_property = amdgpu_dm_atomic_crtc_get_property, +#endif }; -static void dm_crtc_helper_disable(struct drm_crtc *crtc) +static void amdgpu_dm_crtc_helper_disable(struct drm_crtc *crtc) { } -static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state) +static int amdgpu_dm_crtc_count_crtc_active_planes(struct drm_crtc_state *new_crtc_state) { struct drm_atomic_state *state = new_crtc_state->state; struct drm_plane *plane; @@ -335,8 +638,8 @@ static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state) return num_active; } -static void dm_update_crtc_active_planes(struct drm_crtc *crtc, - struct drm_crtc_state *new_crtc_state) +static void amdgpu_dm_crtc_update_crtc_active_planes(struct drm_crtc *crtc, + struct drm_crtc_state *new_crtc_state) { struct dm_crtc_state *dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); @@ -347,18 +650,18 @@ static void dm_update_crtc_active_planes(struct drm_crtc *crtc, return; dm_new_crtc_state->active_planes = - count_crtc_active_planes(new_crtc_state); + amdgpu_dm_crtc_count_crtc_active_planes(new_crtc_state); } -static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc, +static bool amdgpu_dm_crtc_helper_mode_fixup(struct drm_crtc *crtc, const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { return true; } -static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc, - struct drm_atomic_state *state) +static int amdgpu_dm_crtc_helper_atomic_check(struct drm_crtc *crtc, + struct drm_atomic_state *state) { struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, crtc); @@ -369,10 +672,10 @@ static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc, trace_amdgpu_dm_crtc_atomic_check(crtc_state); - dm_update_crtc_active_planes(crtc, crtc_state); + amdgpu_dm_crtc_update_crtc_active_planes(crtc, crtc_state); if (WARN_ON(unlikely(!dm_crtc_state->stream && - modeset_required(crtc_state, NULL, dm_crtc_state->stream)))) { + amdgpu_dm_crtc_modeset_required(crtc_state, NULL, dm_crtc_state->stream)))) { return ret; } @@ -388,6 +691,27 @@ static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc, return -EINVAL; } + /* + * Only allow async flips for fast updates that don't change the FB + * pitch, the DCC state, rotation, etc. + */ + if (crtc_state->async_flip && + dm_crtc_state->update_type != UPDATE_TYPE_FAST) { + drm_dbg_atomic(crtc->dev, + "[CRTC:%d:%s] async flips are only supported for fast updates\n", + crtc->base.id, crtc->name); + return -EINVAL; + } + + if (!state->legacy_cursor_update && amdgpu_dm_crtc_vrr_active(dm_crtc_state)) { + struct drm_plane_state *primary_state; + + /* Pull in primary plane for correct VRR handling */ + primary_state = drm_atomic_get_plane_state(state, crtc->primary); + if (IS_ERR(primary_state)) + return PTR_ERR(primary_state); + } + /* In some use cases, like reset, no stream is attached */ if (!dm_crtc_state->stream) return 0; @@ -400,9 +724,9 @@ static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc, } static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = { - .disable = dm_crtc_helper_disable, - .atomic_check = dm_crtc_helper_atomic_check, - .mode_fixup = dm_crtc_helper_mode_fixup, + .disable = amdgpu_dm_crtc_helper_disable, + .atomic_check = amdgpu_dm_crtc_helper_atomic_check, + .mode_fixup = amdgpu_dm_crtc_helper_mode_fixup, .get_scanout_position = amdgpu_crtc_get_scanout_position, }; @@ -412,7 +736,7 @@ int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm, { struct amdgpu_crtc *acrtc = NULL; struct drm_plane *cursor_plane; - + bool has_degamma; int res = -ENOMEM; cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL); @@ -450,10 +774,26 @@ int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm, acrtc->otg_inst = -1; dm->adev->mode_info.crtcs[crtc_index] = acrtc; - drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES, + + /* Don't enable DRM CRTC degamma property for + * 1. Degamma is replaced by color pipeline. + * 2. DCE since it doesn't support programmable degamma anywhere. + * 3. DCN401 since pre-blending degamma LUT doesn't apply to cursor. + */ + if (plane->color_pipeline_property) + has_degamma = false; + else + has_degamma = dm->adev->dm.dc->caps.color.dpp.dcn_arch && + dm->adev->dm.dc->ctx->dce_version != DCN_VERSION_4_01; + + drm_crtc_enable_color_mgmt(&acrtc->base, has_degamma ? MAX_COLOR_LUT_ENTRIES : 0, true, MAX_COLOR_LUT_ENTRIES); + drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES); +#ifdef AMD_PRIVATE_COLOR + dm_crtc_additional_color_mgmt(&acrtc->base); +#endif return 0; fail: diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.h index 1ac8692354cf..c1212947a77b 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.h @@ -27,21 +27,21 @@ #ifndef __AMDGPU_DM_CRTC_H__ #define __AMDGPU_DM_CRTC_H__ -void dm_crtc_handle_vblank(struct amdgpu_crtc *acrtc); +void amdgpu_dm_crtc_handle_vblank(struct amdgpu_crtc *acrtc); -bool modeset_required(struct drm_crtc_state *crtc_state, +bool amdgpu_dm_crtc_modeset_required(struct drm_crtc_state *crtc_state, struct dc_stream_state *new_stream, struct dc_stream_state *old_stream); -int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable); +int amdgpu_dm_crtc_set_vupdate_irq(struct drm_crtc *crtc, bool enable); -bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc); +bool amdgpu_dm_crtc_vrr_active_irq(struct amdgpu_crtc *acrtc); -bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state); +bool amdgpu_dm_crtc_vrr_active(const struct dm_crtc_state *dm_state); -int dm_enable_vblank(struct drm_crtc *crtc); +int amdgpu_dm_crtc_enable_vblank(struct drm_crtc *crtc); -void dm_disable_vblank(struct drm_crtc *crtc); +void amdgpu_dm_crtc_disable_vblank(struct drm_crtc *crtc); int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm, struct drm_plane *plane, diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c index ee242d9d8b06..a9839485f2a2 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: MIT /* * Copyright 2018 Advanced Micro Devices, Inc. * @@ -25,18 +26,25 @@ #include <linux/string_helpers.h> #include <linux/uaccess.h> +#include <media/cec-notifier.h> #include "dc.h" #include "amdgpu.h" #include "amdgpu_dm.h" #include "amdgpu_dm_debugfs.h" +#include "amdgpu_dm_replay.h" #include "dm_helpers.h" #include "dmub/dmub_srv.h" #include "resource.h" #include "dsc.h" -#include "dc_link_dp.h" #include "link_hwss.h" #include "dc/dc_dmub_srv.h" +#include "link/protocols/link_dp_capability.h" +#include "inc/hw/dchubbub.h" + +#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY +#include "amdgpu_dm_psr.h" +#endif struct dmub_debugfs_trace_header { uint32_t entry_count; @@ -252,7 +260,7 @@ static ssize_t dp_link_settings_write(struct file *f, const char __user *buf, struct dc_link *link = connector->dc_link; struct amdgpu_device *adev = drm_to_adev(connector->base.dev); struct dc *dc = (struct dc *)link->dc; - struct dc_link_settings prefer_link_settings; + struct dc_link_settings prefer_link_settings = {0}; char *wr_buf = NULL; const uint32_t wr_buf_size = 40; /* 0: lane_count; 1: link_rate */ @@ -299,6 +307,8 @@ static ssize_t dp_link_settings_write(struct file *f, const char __user *buf, case LINK_RATE_HIGH2: case LINK_RATE_HIGH3: case LINK_RATE_UHBR10: + case LINK_RATE_UHBR13_5: + case LINK_RATE_UHBR20: break; default: valid_input = false; @@ -330,6 +340,153 @@ static ssize_t dp_link_settings_write(struct file *f, const char __user *buf, return size; } +static bool dp_mst_is_end_device(struct amdgpu_dm_connector *aconnector) +{ + bool is_end_device = false; + struct drm_dp_mst_topology_mgr *mgr = NULL; + struct drm_dp_mst_port *port = NULL; + + if (aconnector->mst_root && aconnector->mst_root->mst_mgr.mst_state) { + mgr = &aconnector->mst_root->mst_mgr; + port = aconnector->mst_output_port; + + drm_modeset_lock(&mgr->base.lock, NULL); + if (port->pdt == DP_PEER_DEVICE_SST_SINK || + port->pdt == DP_PEER_DEVICE_DP_LEGACY_CONV) + is_end_device = true; + drm_modeset_unlock(&mgr->base.lock); + } + + return is_end_device; +} + +/* Change MST link setting + * + * valid lane count value: 1, 2, 4 + * valid link rate value: + * 06h = 1.62Gbps per lane + * 0Ah = 2.7Gbps per lane + * 0Ch = 3.24Gbps per lane + * 14h = 5.4Gbps per lane + * 1Eh = 8.1Gbps per lane + * 3E8h = 10.0Gbps per lane + * 546h = 13.5Gbps per lane + * 7D0h = 20.0Gbps per lane + * + * debugfs is located at /sys/kernel/debug/dri/0/DP-x/mst_link_settings + * + * for example, to force to 2 lane, 10.0GHz, + * echo 2 0x3e8 > /sys/kernel/debug/dri/0/DP-x/mst_link_settings + * + * Valid input will trigger hotplug event to get new link setting applied + * Invalid input will trigger training setting reset + * + * The usage can be referred to link_settings entry + * + */ +static ssize_t dp_mst_link_setting(struct file *f, const char __user *buf, + size_t size, loff_t *pos) +{ + struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private; + struct dc_link *link = aconnector->dc_link; + struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev); + struct dc *dc = (struct dc *)link->dc; + struct dc_link_settings prefer_link_settings = {0}; + char *wr_buf = NULL; + const uint32_t wr_buf_size = 40; + /* 0: lane_count; 1: link_rate */ + int max_param_num = 2; + uint8_t param_nums = 0; + long param[2]; + bool valid_input = true; + + if (!dp_mst_is_end_device(aconnector)) + return -EINVAL; + + if (size == 0) + return -EINVAL; + + wr_buf = kcalloc(wr_buf_size, sizeof(char), GFP_KERNEL); + if (!wr_buf) + return -ENOSPC; + + if (parse_write_buffer_into_params(wr_buf, wr_buf_size, + (long *)param, buf, + max_param_num, + ¶m_nums)) { + kfree(wr_buf); + return -EINVAL; + } + + if (param_nums <= 0) { + kfree(wr_buf); + DRM_DEBUG_DRIVER("user data not be read\n"); + return -EINVAL; + } + + switch (param[0]) { + case LANE_COUNT_ONE: + case LANE_COUNT_TWO: + case LANE_COUNT_FOUR: + break; + default: + valid_input = false; + break; + } + + switch (param[1]) { + case LINK_RATE_LOW: + case LINK_RATE_HIGH: + case LINK_RATE_RBR2: + case LINK_RATE_HIGH2: + case LINK_RATE_HIGH3: + case LINK_RATE_UHBR10: + case LINK_RATE_UHBR13_5: + case LINK_RATE_UHBR20: + break; + default: + valid_input = false; + break; + } + + if (!valid_input) { + kfree(wr_buf); + DRM_DEBUG_DRIVER("Invalid Input value No HW will be programmed\n"); + mutex_lock(&adev->dm.dc_lock); + dc_link_set_preferred_training_settings(dc, NULL, NULL, link, false); + mutex_unlock(&adev->dm.dc_lock); + return -EINVAL; + } + + /* save user force lane_count, link_rate to preferred settings + * spread spectrum will not be changed + */ + prefer_link_settings.link_spread = link->cur_link_settings.link_spread; + prefer_link_settings.use_link_rate_set = false; + prefer_link_settings.lane_count = param[0]; + prefer_link_settings.link_rate = param[1]; + + /* skip immediate retrain, and train to new link setting after hotplug event triggered */ + mutex_lock(&adev->dm.dc_lock); + dc_link_set_preferred_training_settings(dc, &prefer_link_settings, NULL, link, true); + mutex_unlock(&adev->dm.dc_lock); + + mutex_lock(&aconnector->base.dev->mode_config.mutex); + aconnector->base.force = DRM_FORCE_OFF; + mutex_unlock(&aconnector->base.dev->mode_config.mutex); + drm_kms_helper_hotplug_event(aconnector->base.dev); + + msleep(100); + + mutex_lock(&aconnector->base.dev->mode_config.mutex); + aconnector->base.force = DRM_FORCE_UNSPECIFIED; + mutex_unlock(&aconnector->base.dev->mode_config.mutex); + drm_kms_helper_hotplug_event(aconnector->base.dev); + + kfree(wr_buf); + return size; +} + /* function: get current DP PHY settings: voltage swing, pre-emphasis, * post-cursor2 (defined by VESA DP specification) * @@ -413,67 +570,38 @@ static ssize_t dp_phy_settings_read(struct file *f, char __user *buf, return result; } -static int dp_lttpr_status_show(struct seq_file *m, void *d) +static int dp_lttpr_status_show(struct seq_file *m, void *unused) { - char *data; - struct amdgpu_dm_connector *connector = file_inode(m->file)->i_private; - struct dc_link *link = connector->dc_link; - uint32_t read_size = 1; - uint8_t repeater_count = 0; + struct drm_connector *connector = m->private; + struct amdgpu_dm_connector *aconnector = + to_amdgpu_dm_connector(connector); + struct dc_lttpr_caps caps = aconnector->dc_link->dpcd_caps.lttpr_caps; - data = kzalloc(read_size, GFP_KERNEL); - if (!data) - return 0; + if (connector->status != connector_status_connected) + return -ENODEV; - dm_helpers_dp_read_dpcd(link->ctx, link, 0xF0002, data, read_size); + seq_printf(m, "phy repeater count: %u (raw: 0x%x)\n", + dp_parse_lttpr_repeater_count(caps.phy_repeater_cnt), + caps.phy_repeater_cnt); - switch ((uint8_t)*data) { - case 0x80: - repeater_count = 1; - break; - case 0x40: - repeater_count = 2; - break; - case 0x20: - repeater_count = 3; - break; - case 0x10: - repeater_count = 4; - break; - case 0x8: - repeater_count = 5; - break; - case 0x4: - repeater_count = 6; - break; - case 0x2: - repeater_count = 7; + seq_puts(m, "phy repeater mode: "); + + switch (caps.mode) { + case DP_PHY_REPEATER_MODE_TRANSPARENT: + seq_puts(m, "transparent"); break; - case 0x1: - repeater_count = 8; + case DP_PHY_REPEATER_MODE_NON_TRANSPARENT: + seq_puts(m, "non-transparent"); break; - case 0x0: - repeater_count = 0; + case 0x00: + seq_puts(m, "non lttpr"); break; default: - repeater_count = (uint8_t)*data; + seq_printf(m, "read error (raw: 0x%x)", caps.mode); break; } - seq_printf(m, "phy repeater count: %d\n", repeater_count); - - dm_helpers_dp_read_dpcd(link->ctx, link, 0xF0003, data, read_size); - - if ((uint8_t)*data == 0x55) - seq_printf(m, "phy repeater mode: transparent\n"); - else if ((uint8_t)*data == 0xAA) - seq_printf(m, "phy repeater mode: non-transparent\n"); - else if ((uint8_t)*data == 0x00) - seq_printf(m, "phy repeater mode: non lttpr\n"); - else - seq_printf(m, "phy repeater mode: read error\n"); - - kfree(data); + seq_puts(m, "\n"); return 0; } @@ -487,7 +615,7 @@ static ssize_t dp_phy_settings_write(struct file *f, const char __user *buf, uint32_t wr_buf_size = 40; long param[3]; bool use_prefer_link_setting; - struct link_training_settings link_lane_settings; + struct link_training_settings link_lane_settings = {0}; int max_param_num = 3; uint8_t param_nums = 0; int r = 0; @@ -631,6 +759,7 @@ static ssize_t dp_phy_test_pattern_debugfs_write(struct file *f, const char __us int max_param_num = 11; enum dp_test_pattern test_pattern = DP_TEST_PATTERN_UNSUPPORTED; bool disable_hpd = false; + bool supports_hpd = link->irq_source_hpd != DC_IRQ_SOURCE_INVALID; bool valid_test_pattern = false; uint8_t param_nums = 0; /* init with default 80bit custom pattern */ @@ -642,7 +771,7 @@ static ssize_t dp_phy_test_pattern_debugfs_write(struct file *f, const char __us LINK_RATE_UNKNOWN, LINK_SPREAD_DISABLED}; struct dc_link_settings cur_link_settings = {LANE_COUNT_UNKNOWN, LINK_RATE_UNKNOWN, LINK_SPREAD_DISABLED}; - struct link_training_settings link_training_settings; + struct link_training_settings link_training_settings = {0}; int i; if (size == 0) @@ -722,7 +851,7 @@ static ssize_t dp_phy_test_pattern_debugfs_write(struct file *f, const char __us * because it might have been disabled after a test pattern was set. * AUX depends on HPD * sequence dependent, do not move! */ - if (!disable_hpd) + if (supports_hpd && !disable_hpd) dc_link_enable_hpd(link); prefer_link_settings.lane_count = link->verified_link_cap.lane_count; @@ -747,7 +876,7 @@ static ssize_t dp_phy_test_pattern_debugfs_write(struct file *f, const char __us for (i = 0; i < (unsigned int)(link_training_settings.link_settings.lane_count); i++) link_training_settings.hw_lane_settings[i] = link->cur_lane_setting[i]; - dc_link_set_test_pattern( + dc_link_dp_set_test_pattern( link, test_pattern, DP_TEST_PATTERN_COLOR_SPACE_RGB, @@ -760,7 +889,7 @@ static ssize_t dp_phy_test_pattern_debugfs_write(struct file *f, const char __us * Need disable interrupt to avoid SW driver disable DP output. This is * done after the test pattern is set. */ - if (valid_test_pattern && disable_hpd) + if (valid_test_pattern && supports_hpd && disable_hpd) dc_link_disable_hpd(link); kfree(wr_buf); @@ -776,9 +905,10 @@ static int dmub_tracebuffer_show(struct seq_file *m, void *data) { struct amdgpu_device *adev = m->private; struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info; + struct dmub_fw_meta_info *fw_meta_info = NULL; struct dmub_debugfs_trace_entry *entries; uint8_t *tbuf_base; - uint32_t tbuf_size, max_entries, num_entries, i; + uint32_t tbuf_size, max_entries, num_entries, first_entry, i; if (!fb_info) return 0; @@ -787,20 +917,42 @@ static int dmub_tracebuffer_show(struct seq_file *m, void *data) if (!tbuf_base) return 0; - tbuf_size = fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size; + if (adev->dm.dmub_srv) + fw_meta_info = &adev->dm.dmub_srv->meta_info; + + tbuf_size = fw_meta_info ? fw_meta_info->trace_buffer_size : + DMUB_TRACE_BUFFER_SIZE; max_entries = (tbuf_size - sizeof(struct dmub_debugfs_trace_header)) / sizeof(struct dmub_debugfs_trace_entry); num_entries = ((struct dmub_debugfs_trace_header *)tbuf_base)->entry_count; + /* DMCUB tracebuffer is a ring. If it rolled over, print a hint that + * entries are being overwritten. + */ + if (num_entries > max_entries) + seq_printf(m, "...\n"); + + first_entry = num_entries % max_entries; num_entries = min(num_entries, max_entries); entries = (struct dmub_debugfs_trace_entry *)(tbuf_base + sizeof(struct dmub_debugfs_trace_header)); - for (i = 0; i < num_entries; ++i) { + /* To print entries chronologically, start from the first entry till the + * top of buffer, then from base of buffer to first entry. + */ + for (i = first_entry; i < num_entries; ++i) { + struct dmub_debugfs_trace_entry *entry = &entries[i]; + + seq_printf(m, + "trace_code=%u tick_count=%u param0=%u param1=%u\n", + entry->trace_code, entry->tick_count, entry->param0, + entry->param1); + } + for (i = 0; i < first_entry; ++i) { struct dmub_debugfs_trace_entry *entry = &entries[i]; seq_printf(m, @@ -835,6 +987,58 @@ static int dmub_fw_state_show(struct seq_file *m, void *data) return seq_write(m, state_base, state_size); } +/* replay_capability_show() - show eDP panel replay capability + * + * The read function: replay_capability_show + * Shows if sink and driver has Replay capability or not. + * + * cat /sys/kernel/debug/dri/0/eDP-X/replay_capability + * + * Expected output: + * "Sink support: no\n" - if panel doesn't support Replay + * "Sink support: yes\n" - if panel supports Replay + * "Driver support: no\n" - if driver doesn't support Replay + * "Driver support: yes\n" - if driver supports Replay + */ +static int replay_capability_show(struct seq_file *m, void *data) +{ + struct drm_connector *connector = m->private; + struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); + struct dc_link *link = aconnector->dc_link; + bool sink_support_replay = false; + bool driver_support_replay = false; + + if (!link) + return -ENODEV; + + if (link->type == dc_connection_none) + return -ENODEV; + + if (!(link->connector_signal & SIGNAL_TYPE_EDP)) + return -ENODEV; + + /* If Replay is already set to support, skip the checks */ + if (link->replay_settings.config.replay_supported) { + sink_support_replay = true; + driver_support_replay = true; + } else if ((amdgpu_dc_debug_mask & DC_DISABLE_REPLAY)) { + sink_support_replay = amdgpu_dm_link_supports_replay(link, aconnector); + } else { + struct dc *dc = link->ctx->dc; + + sink_support_replay = amdgpu_dm_link_supports_replay(link, aconnector); + if (dc->ctx->dmub_srv && dc->ctx->dmub_srv->dmub) + driver_support_replay = + (bool)dc->ctx->dmub_srv->dmub->feature_caps.replay_supported; + } + + seq_printf(m, "Sink support: %s\n", str_yes_no(sink_support_replay)); + seq_printf(m, "Driver support: %s\n", str_yes_no(driver_support_replay)); + seq_printf(m, "Config support: %s\n", str_yes_no(link->replay_settings.config.replay_supported)); + + return 0; +} + /* psr_capability_show() - show eDP panel PSR capability * * The read function: sink_psr_capability_show @@ -930,6 +1134,61 @@ unlock: DEFINE_SHOW_ATTRIBUTE(amdgpu_current_bpc); /* + * Returns the current colorspace for the crtc. + * Example usage: cat /sys/kernel/debug/dri/0/crtc-0/amdgpu_current_colorspace + */ +static int amdgpu_current_colorspace_show(struct seq_file *m, void *data) +{ + struct drm_crtc *crtc = m->private; + struct drm_device *dev = crtc->dev; + struct dm_crtc_state *dm_crtc_state = NULL; + int res = -ENODEV; + + mutex_lock(&dev->mode_config.mutex); + drm_modeset_lock(&crtc->mutex, NULL); + if (crtc->state == NULL) + goto unlock; + + dm_crtc_state = to_dm_crtc_state(crtc->state); + if (dm_crtc_state->stream == NULL) + goto unlock; + + switch (dm_crtc_state->stream->output_color_space) { + case COLOR_SPACE_SRGB: + seq_puts(m, "sRGB"); + break; + case COLOR_SPACE_YCBCR601: + case COLOR_SPACE_YCBCR601_LIMITED: + seq_puts(m, "BT601_YCC"); + break; + case COLOR_SPACE_YCBCR709: + case COLOR_SPACE_YCBCR709_LIMITED: + seq_puts(m, "BT709_YCC"); + break; + case COLOR_SPACE_ADOBERGB: + seq_puts(m, "opRGB"); + break; + case COLOR_SPACE_2020_RGB_FULLRANGE: + seq_puts(m, "BT2020_RGB"); + break; + case COLOR_SPACE_2020_YCBCR_LIMITED: + seq_puts(m, "BT2020_YCC"); + break; + default: + goto unlock; + } + res = 0; + +unlock: + drm_modeset_unlock(&crtc->mutex); + mutex_unlock(&dev->mode_config.mutex); + + return res; +} +DEFINE_SHOW_ATTRIBUTE(amdgpu_current_colorspace); + + +/* * Example usage: * Disable dsc passthrough, i.e.,: have dsc decoding at converver, not external RX * echo 1 /sys/kernel/debug/dri/0/DP-1/dsc_disable_passthrough @@ -970,7 +1229,6 @@ static ssize_t dp_dsc_passthrough_set(struct file *f, const char __user *buf, return 0; } -#ifdef CONFIG_DRM_AMD_DC_HDCP /* * Returns the HDCP capability of the Display (1.4 for now). * @@ -1007,7 +1265,6 @@ static int hdcp_sink_capability_show(struct seq_file *m, void *data) return 0; } -#endif /* * Returns whether the connected display is internal and not hotpluggable. @@ -1024,6 +1281,36 @@ static int internal_display_show(struct seq_file *m, void *data) return 0; } +/* + * Returns the number of segments used if ODM Combine mode is enabled. + * Example usage: cat /sys/kernel/debug/dri/0/DP-1/odm_combine_segments + */ +static int odm_combine_segments_show(struct seq_file *m, void *unused) +{ + struct drm_connector *connector = m->private; + struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); + struct dc_link *link = aconnector->dc_link; + struct pipe_ctx *pipe_ctx = NULL; + int i, segments = -EOPNOTSUPP; + + for (i = 0; i < MAX_PIPES; i++) { + pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i]; + if (pipe_ctx->stream && + pipe_ctx->stream->link == link) + break; + } + + if (connector->status != connector_status_connected) + return -ENODEV; + + if (pipe_ctx && pipe_ctx->stream_res.tg && + pipe_ctx->stream_res.tg->funcs->get_odm_combine_segments) + pipe_ctx->stream_res.tg->funcs->get_odm_combine_segments(pipe_ctx->stream_res.tg, &segments); + + seq_printf(m, "%d\n", segments); + return 0; +} + /* function description * * generic SDP message access for testing @@ -1042,7 +1329,7 @@ static ssize_t dp_sdp_message_debugfs_write(struct file *f, const char __user *b size_t size, loff_t *pos) { int r; - uint8_t data[36]; + uint8_t data[36] = {0}; struct amdgpu_dm_connector *connector = file_inode(f)->i_private; struct dm_crtc_state *acrtc_state; uint32_t write_size = 36; @@ -1064,88 +1351,6 @@ static ssize_t dp_sdp_message_debugfs_write(struct file *f, const char __user *b return write_size; } -static ssize_t dp_dpcd_address_write(struct file *f, const char __user *buf, - size_t size, loff_t *pos) -{ - int r; - struct amdgpu_dm_connector *connector = file_inode(f)->i_private; - - if (size < sizeof(connector->debugfs_dpcd_address)) - return -EINVAL; - - r = copy_from_user(&connector->debugfs_dpcd_address, - buf, sizeof(connector->debugfs_dpcd_address)); - - return size - r; -} - -static ssize_t dp_dpcd_size_write(struct file *f, const char __user *buf, - size_t size, loff_t *pos) -{ - int r; - struct amdgpu_dm_connector *connector = file_inode(f)->i_private; - - if (size < sizeof(connector->debugfs_dpcd_size)) - return -EINVAL; - - r = copy_from_user(&connector->debugfs_dpcd_size, - buf, sizeof(connector->debugfs_dpcd_size)); - - if (connector->debugfs_dpcd_size > 256) - connector->debugfs_dpcd_size = 0; - - return size - r; -} - -static ssize_t dp_dpcd_data_write(struct file *f, const char __user *buf, - size_t size, loff_t *pos) -{ - int r; - char *data; - struct amdgpu_dm_connector *connector = file_inode(f)->i_private; - struct dc_link *link = connector->dc_link; - uint32_t write_size = connector->debugfs_dpcd_size; - - if (!write_size || size < write_size) - return -EINVAL; - - data = kzalloc(write_size, GFP_KERNEL); - if (!data) - return 0; - - r = copy_from_user(data, buf, write_size); - - dm_helpers_dp_write_dpcd(link->ctx, link, - connector->debugfs_dpcd_address, data, write_size - r); - kfree(data); - return write_size - r; -} - -static ssize_t dp_dpcd_data_read(struct file *f, char __user *buf, - size_t size, loff_t *pos) -{ - int r; - char *data; - struct amdgpu_dm_connector *connector = file_inode(f)->i_private; - struct dc_link *link = connector->dc_link; - uint32_t read_size = connector->debugfs_dpcd_size; - - if (!read_size || size < read_size) - return 0; - - data = kzalloc(read_size, GFP_KERNEL); - if (!data) - return 0; - - dm_helpers_dp_read_dpcd(link->ctx, link, - connector->debugfs_dpcd_address, data, read_size); - - r = copy_to_user(buf, data, read_size); - - kfree(data); - return read_size - r; -} - /* function: Read link's DSC & FEC capabilities * * @@ -1186,7 +1391,7 @@ static int dp_dsc_fec_support_show(struct seq_file *m, void *data) break; } dpcd_caps = aconnector->dc_link->dpcd_caps; - if (aconnector->port) { + if (aconnector->mst_output_port) { /* aconnector sets dsc_aux during get_modes call * if MST connector has it means it can either * enable DSC on the sink device or on MST branch @@ -1242,7 +1447,7 @@ static ssize_t trigger_hotplug(struct file *f, const char __user *buf, uint8_t param_nums = 0; bool ret = false; - if (!aconnector || !aconnector->dc_link) + if (!aconnector->dc_link) return -EINVAL; if (size == 0) @@ -1273,14 +1478,14 @@ static ssize_t trigger_hotplug(struct file *f, const char __user *buf, mutex_lock(&aconnector->hpd_lock); /* Don't support for mst end device*/ - if (aconnector->mst_port) { + if (aconnector->mst_root) { mutex_unlock(&aconnector->hpd_lock); return -EINVAL; } if (param[0] == 1) { - if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type) && + if (!dc_link_detect_connection_type(aconnector->dc_link, &new_connection_type) && new_connection_type != dc_connection_none) goto unlock; @@ -1317,7 +1522,7 @@ static ssize_t trigger_hotplug(struct file *f, const char __user *buf, /* If the aconnector is the root node in mst topology */ if (aconnector->mst_mgr.mst_state == true) - reset_cur_dp_mst_topology(link); + dc_link_reset_cur_dp_mst_topology(link); drm_modeset_lock_all(dev); dm_restore_drm_connector_state(dev, connector); @@ -1351,42 +1556,35 @@ static ssize_t dp_dsc_clock_en_read(struct file *f, char __user *buf, size_t size, loff_t *pos) { char *rd_buf = NULL; - char *rd_buf_ptr = NULL; struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private; struct display_stream_compressor *dsc; struct dcn_dsc_state dsc_state = {0}; const uint32_t rd_buf_size = 10; struct pipe_ctx *pipe_ctx; ssize_t result = 0; - int i, r, str_len = 30; + int i, r, str_len = 10; rd_buf = kcalloc(rd_buf_size, sizeof(char), GFP_KERNEL); if (!rd_buf) return -ENOMEM; - rd_buf_ptr = rd_buf; - for (i = 0; i < MAX_PIPES; i++) { pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; - if (pipe_ctx && pipe_ctx->stream && - pipe_ctx->stream->link == aconnector->dc_link) + if (pipe_ctx->stream && + pipe_ctx->stream->link == aconnector->dc_link && + pipe_ctx->stream->sink && + pipe_ctx->stream->sink == aconnector->dc_sink) break; } - if (!pipe_ctx) { - kfree(rd_buf); - return -ENXIO; - } - dsc = pipe_ctx->stream_res.dsc; if (dsc) dsc->funcs->dsc_read_state(dsc, &dsc_state); - snprintf(rd_buf_ptr, str_len, + snprintf(rd_buf, str_len, "%d\n", dsc_state.dsc_clock_en); - rd_buf_ptr += str_len; while (size) { if (*pos >= rd_buf_size) @@ -1475,12 +1673,14 @@ static ssize_t dp_dsc_clock_en_write(struct file *f, const char __user *buf, for (i = 0; i < MAX_PIPES; i++) { pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; - if (pipe_ctx && pipe_ctx->stream && - pipe_ctx->stream->link == aconnector->dc_link) + if (pipe_ctx->stream && + pipe_ctx->stream->link == aconnector->dc_link && + pipe_ctx->stream->sink && + pipe_ctx->stream->sink == aconnector->dc_sink) break; } - if (!pipe_ctx || !pipe_ctx->stream) + if (!pipe_ctx->stream) goto done; // Get CRTC state @@ -1542,7 +1742,6 @@ static ssize_t dp_dsc_slice_width_read(struct file *f, char __user *buf, size_t size, loff_t *pos) { char *rd_buf = NULL; - char *rd_buf_ptr = NULL; struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private; struct display_stream_compressor *dsc; struct dcn_dsc_state dsc_state = {0}; @@ -1556,28 +1755,22 @@ static ssize_t dp_dsc_slice_width_read(struct file *f, char __user *buf, if (!rd_buf) return -ENOMEM; - rd_buf_ptr = rd_buf; - for (i = 0; i < MAX_PIPES; i++) { pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; - if (pipe_ctx && pipe_ctx->stream && - pipe_ctx->stream->link == aconnector->dc_link) + if (pipe_ctx->stream && + pipe_ctx->stream->link == aconnector->dc_link && + pipe_ctx->stream->sink && + pipe_ctx->stream->sink == aconnector->dc_sink) break; } - if (!pipe_ctx) { - kfree(rd_buf); - return -ENXIO; - } - dsc = pipe_ctx->stream_res.dsc; if (dsc) dsc->funcs->dsc_read_state(dsc, &dsc_state); - snprintf(rd_buf_ptr, str_len, + snprintf(rd_buf, str_len, "%d\n", dsc_state.dsc_slice_width); - rd_buf_ptr += str_len; while (size) { if (*pos >= rd_buf_size) @@ -1664,12 +1857,14 @@ static ssize_t dp_dsc_slice_width_write(struct file *f, const char __user *buf, for (i = 0; i < MAX_PIPES; i++) { pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; - if (pipe_ctx && pipe_ctx->stream && - pipe_ctx->stream->link == aconnector->dc_link) + if (pipe_ctx->stream && + pipe_ctx->stream->link == aconnector->dc_link && + pipe_ctx->stream->sink && + pipe_ctx->stream->sink == aconnector->dc_sink) break; } - if (!pipe_ctx || !pipe_ctx->stream) + if (!pipe_ctx->stream) goto done; // Safely get CRTC state @@ -1731,7 +1926,6 @@ static ssize_t dp_dsc_slice_height_read(struct file *f, char __user *buf, size_t size, loff_t *pos) { char *rd_buf = NULL; - char *rd_buf_ptr = NULL; struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private; struct display_stream_compressor *dsc; struct dcn_dsc_state dsc_state = {0}; @@ -1745,28 +1939,22 @@ static ssize_t dp_dsc_slice_height_read(struct file *f, char __user *buf, if (!rd_buf) return -ENOMEM; - rd_buf_ptr = rd_buf; - for (i = 0; i < MAX_PIPES; i++) { pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; - if (pipe_ctx && pipe_ctx->stream && - pipe_ctx->stream->link == aconnector->dc_link) + if (pipe_ctx->stream && + pipe_ctx->stream->link == aconnector->dc_link && + pipe_ctx->stream->sink && + pipe_ctx->stream->sink == aconnector->dc_sink) break; } - if (!pipe_ctx) { - kfree(rd_buf); - return -ENXIO; - } - dsc = pipe_ctx->stream_res.dsc; if (dsc) dsc->funcs->dsc_read_state(dsc, &dsc_state); - snprintf(rd_buf_ptr, str_len, + snprintf(rd_buf, str_len, "%d\n", dsc_state.dsc_slice_height); - rd_buf_ptr += str_len; while (size) { if (*pos >= rd_buf_size) @@ -1853,12 +2041,14 @@ static ssize_t dp_dsc_slice_height_write(struct file *f, const char __user *buf, for (i = 0; i < MAX_PIPES; i++) { pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; - if (pipe_ctx && pipe_ctx->stream && - pipe_ctx->stream->link == aconnector->dc_link) + if (pipe_ctx->stream && + pipe_ctx->stream->link == aconnector->dc_link && + pipe_ctx->stream->sink && + pipe_ctx->stream->sink == aconnector->dc_sink) break; } - if (!pipe_ctx || !pipe_ctx->stream) + if (!pipe_ctx->stream) goto done; // Get CRTC state @@ -1916,7 +2106,6 @@ static ssize_t dp_dsc_bits_per_pixel_read(struct file *f, char __user *buf, size_t size, loff_t *pos) { char *rd_buf = NULL; - char *rd_buf_ptr = NULL; struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private; struct display_stream_compressor *dsc; struct dcn_dsc_state dsc_state = {0}; @@ -1930,28 +2119,22 @@ static ssize_t dp_dsc_bits_per_pixel_read(struct file *f, char __user *buf, if (!rd_buf) return -ENOMEM; - rd_buf_ptr = rd_buf; - for (i = 0; i < MAX_PIPES; i++) { pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; - if (pipe_ctx && pipe_ctx->stream && - pipe_ctx->stream->link == aconnector->dc_link) + if (pipe_ctx->stream && + pipe_ctx->stream->link == aconnector->dc_link && + pipe_ctx->stream->sink && + pipe_ctx->stream->sink == aconnector->dc_sink) break; } - if (!pipe_ctx) { - kfree(rd_buf); - return -ENXIO; - } - dsc = pipe_ctx->stream_res.dsc; if (dsc) dsc->funcs->dsc_read_state(dsc, &dsc_state); - snprintf(rd_buf_ptr, str_len, + snprintf(rd_buf, str_len, "%d\n", dsc_state.dsc_bits_per_pixel); - rd_buf_ptr += str_len; while (size) { if (*pos >= rd_buf_size) @@ -2035,12 +2218,14 @@ static ssize_t dp_dsc_bits_per_pixel_write(struct file *f, const char __user *bu for (i = 0; i < MAX_PIPES; i++) { pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; - if (pipe_ctx && pipe_ctx->stream && - pipe_ctx->stream->link == aconnector->dc_link) + if (pipe_ctx->stream && + pipe_ctx->stream->link == aconnector->dc_link && + pipe_ctx->stream->sink && + pipe_ctx->stream->sink == aconnector->dc_sink) break; } - if (!pipe_ctx || !pipe_ctx->stream) + if (!pipe_ctx->stream) goto done; // Get CRTC state @@ -2096,7 +2281,6 @@ static ssize_t dp_dsc_pic_width_read(struct file *f, char __user *buf, size_t size, loff_t *pos) { char *rd_buf = NULL; - char *rd_buf_ptr = NULL; struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private; struct display_stream_compressor *dsc; struct dcn_dsc_state dsc_state = {0}; @@ -2110,28 +2294,22 @@ static ssize_t dp_dsc_pic_width_read(struct file *f, char __user *buf, if (!rd_buf) return -ENOMEM; - rd_buf_ptr = rd_buf; - for (i = 0; i < MAX_PIPES; i++) { pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; - if (pipe_ctx && pipe_ctx->stream && - pipe_ctx->stream->link == aconnector->dc_link) + if (pipe_ctx->stream && + pipe_ctx->stream->link == aconnector->dc_link && + pipe_ctx->stream->sink && + pipe_ctx->stream->sink == aconnector->dc_sink) break; } - if (!pipe_ctx) { - kfree(rd_buf); - return -ENXIO; - } - dsc = pipe_ctx->stream_res.dsc; if (dsc) dsc->funcs->dsc_read_state(dsc, &dsc_state); - snprintf(rd_buf_ptr, str_len, + snprintf(rd_buf, str_len, "%d\n", dsc_state.dsc_pic_width); - rd_buf_ptr += str_len; while (size) { if (*pos >= rd_buf_size) @@ -2157,7 +2335,6 @@ static ssize_t dp_dsc_pic_height_read(struct file *f, char __user *buf, size_t size, loff_t *pos) { char *rd_buf = NULL; - char *rd_buf_ptr = NULL; struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private; struct display_stream_compressor *dsc; struct dcn_dsc_state dsc_state = {0}; @@ -2171,28 +2348,22 @@ static ssize_t dp_dsc_pic_height_read(struct file *f, char __user *buf, if (!rd_buf) return -ENOMEM; - rd_buf_ptr = rd_buf; - for (i = 0; i < MAX_PIPES; i++) { pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; - if (pipe_ctx && pipe_ctx->stream && - pipe_ctx->stream->link == aconnector->dc_link) + if (pipe_ctx->stream && + pipe_ctx->stream->link == aconnector->dc_link && + pipe_ctx->stream->sink && + pipe_ctx->stream->sink == aconnector->dc_sink) break; } - if (!pipe_ctx) { - kfree(rd_buf); - return -ENXIO; - } - dsc = pipe_ctx->stream_res.dsc; if (dsc) dsc->funcs->dsc_read_state(dsc, &dsc_state); - snprintf(rd_buf_ptr, str_len, + snprintf(rd_buf, str_len, "%d\n", dsc_state.dsc_pic_height); - rd_buf_ptr += str_len; while (size) { if (*pos >= rd_buf_size) @@ -2233,7 +2404,6 @@ static ssize_t dp_dsc_chunk_size_read(struct file *f, char __user *buf, size_t size, loff_t *pos) { char *rd_buf = NULL; - char *rd_buf_ptr = NULL; struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private; struct display_stream_compressor *dsc; struct dcn_dsc_state dsc_state = {0}; @@ -2247,28 +2417,22 @@ static ssize_t dp_dsc_chunk_size_read(struct file *f, char __user *buf, if (!rd_buf) return -ENOMEM; - rd_buf_ptr = rd_buf; - for (i = 0; i < MAX_PIPES; i++) { pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; - if (pipe_ctx && pipe_ctx->stream && - pipe_ctx->stream->link == aconnector->dc_link) + if (pipe_ctx->stream && + pipe_ctx->stream->link == aconnector->dc_link && + pipe_ctx->stream->sink && + pipe_ctx->stream->sink == aconnector->dc_sink) break; } - if (!pipe_ctx) { - kfree(rd_buf); - return -ENXIO; - } - dsc = pipe_ctx->stream_res.dsc; if (dsc) dsc->funcs->dsc_read_state(dsc, &dsc_state); - snprintf(rd_buf_ptr, str_len, + snprintf(rd_buf, str_len, "%d\n", dsc_state.dsc_chunk_size); - rd_buf_ptr += str_len; while (size) { if (*pos >= rd_buf_size) @@ -2309,7 +2473,6 @@ static ssize_t dp_dsc_slice_bpg_offset_read(struct file *f, char __user *buf, size_t size, loff_t *pos) { char *rd_buf = NULL; - char *rd_buf_ptr = NULL; struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private; struct display_stream_compressor *dsc; struct dcn_dsc_state dsc_state = {0}; @@ -2323,28 +2486,22 @@ static ssize_t dp_dsc_slice_bpg_offset_read(struct file *f, char __user *buf, if (!rd_buf) return -ENOMEM; - rd_buf_ptr = rd_buf; - for (i = 0; i < MAX_PIPES; i++) { pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; - if (pipe_ctx && pipe_ctx->stream && - pipe_ctx->stream->link == aconnector->dc_link) + if (pipe_ctx->stream && + pipe_ctx->stream->link == aconnector->dc_link && + pipe_ctx->stream->sink && + pipe_ctx->stream->sink == aconnector->dc_sink) break; } - if (!pipe_ctx) { - kfree(rd_buf); - return -ENXIO; - } - dsc = pipe_ctx->stream_res.dsc; if (dsc) dsc->funcs->dsc_read_state(dsc, &dsc_state); - snprintf(rd_buf_ptr, str_len, + snprintf(rd_buf, str_len, "%d\n", dsc_state.dsc_slice_bpg_offset); - rd_buf_ptr += str_len; while (size) { if (*pos >= rd_buf_size) @@ -2510,6 +2667,49 @@ unlock: } /* + * IPS status. Read only. + * + * Example usage: cat /sys/kernel/debug/dri/0/amdgpu_dm_ips_status + */ +static int ips_status_show(struct seq_file *m, void *unused) +{ + struct amdgpu_device *adev = m->private; + struct dc *dc = adev->dm.dc; + struct dc_dmub_srv *dc_dmub_srv; + + seq_printf(m, "IPS config: %d\n", dc->config.disable_ips); + seq_printf(m, "Idle optimization: %d\n", dc->idle_optimizations_allowed); + + if (adev->dm.idle_workqueue) { + seq_printf(m, "Idle workqueue - enabled: %d\n", adev->dm.idle_workqueue->enable); + seq_printf(m, "Idle workqueue - running: %d\n", adev->dm.idle_workqueue->running); + } + + dc_dmub_srv = dc->ctx->dmub_srv; + if (dc_dmub_srv && dc_dmub_srv->dmub) { + uint32_t rcg_count, ips1_count, ips2_count; + volatile const struct dmub_shared_state_ips_fw *ips_fw = + &dc_dmub_srv->dmub->shared_state[DMUB_SHARED_SHARE_FEATURE__IPS_FW].data.ips_fw; + rcg_count = ips_fw->rcg_entry_count; + ips1_count = ips_fw->ips1_entry_count; + ips2_count = ips_fw->ips2_entry_count; + seq_printf(m, "entry counts: rcg=%u ips1=%u ips2=%u\n", + rcg_count, + ips1_count, + ips2_count); + rcg_count = ips_fw->rcg_exit_count; + ips1_count = ips_fw->ips1_exit_count; + ips2_count = ips_fw->ips2_exit_count; + seq_printf(m, "exit counts: rcg=%u ips1=%u ips2=%u", + rcg_count, + ips1_count, + ips2_count); + seq_puts(m, "\n"); + } + return 0; +} + +/* * Backlight at this moment. Read only. * As written to display, taking ABM and backlight lut into account. * Ranges from 0x0 to 0x10000 (= 100% PWM) @@ -2572,13 +2772,13 @@ static int dp_is_mst_connector_show(struct seq_file *m, void *unused) if (aconnector->mst_mgr.mst_state) { role = "root"; - } else if (aconnector->mst_port && - aconnector->mst_port->mst_mgr.mst_state) { + } else if (aconnector->mst_root && + aconnector->mst_root->mst_mgr.mst_state) { role = "end"; - mgr = &aconnector->mst_port->mst_mgr; - port = aconnector->port; + mgr = &aconnector->mst_root->mst_mgr; + port = aconnector->mst_output_port; drm_modeset_lock(&mgr->base.lock, NULL); if (port->pdt == DP_PEER_DEVICE_MST_BRANCHING && @@ -2633,17 +2833,99 @@ static int dp_mst_progress_status_show(struct seq_file *m, void *unused) return 0; } +/* + * Reports whether the connected display is a USB4 DPIA tunneled display + * Example usage: cat /sys/kernel/debug/dri/0/DP-8/is_dpia_link + */ +static int is_dpia_link_show(struct seq_file *m, void *data) +{ + struct drm_connector *connector = m->private; + struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); + struct dc_link *link = aconnector->dc_link; + + if (connector->status != connector_status_connected) + return -ENODEV; + + seq_printf(m, "%s\n", (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) ? "yes" : + (link->ep_type == DISPLAY_ENDPOINT_PHY) ? "no" : "unknown"); + + return 0; +} + +/** + * hdmi_cec_state_show - Read out the HDMI-CEC feature status + * @m: sequence file. + * @data: unused. + * + * Return 0 on success + */ +static int hdmi_cec_state_show(struct seq_file *m, void *data) +{ + struct drm_connector *connector = m->private; + struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); + + seq_printf(m, "%s:%d\n", connector->name, connector->base.id); + seq_printf(m, "HDMI-CEC status: %d\n", aconnector->notifier ? 1 : 0); + + return 0; +} + +/** + * hdmi_cec_state_write - Enable/Disable HDMI-CEC feature from driver side + * @f: file structure. + * @buf: userspace buffer. set to '1' to enable; '0' to disable cec feature. + * @size: size of buffer from userpsace. + * @pos: unused. + * + * Return size on success, error code on failure + */ +static ssize_t hdmi_cec_state_write(struct file *f, const char __user *buf, + size_t size, loff_t *pos) +{ + int ret; + bool enable; + struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private; + struct drm_device *ddev = aconnector->base.dev; + + if (size == 0) + return -EINVAL; + + ret = kstrtobool_from_user(buf, size, &enable); + if (ret) { + drm_dbg_driver(ddev, "invalid user data !\n"); + return ret; + } + + if (enable) { + if (aconnector->notifier) + return -EINVAL; + ret = amdgpu_dm_initialize_hdmi_connector(aconnector); + if (ret) + return ret; + hdmi_cec_set_edid(aconnector); + } else { + if (!aconnector->notifier) + return -EINVAL; + cec_notifier_conn_unregister(aconnector->notifier); + aconnector->notifier = NULL; + } + + return size; +} + DEFINE_SHOW_ATTRIBUTE(dp_dsc_fec_support); DEFINE_SHOW_ATTRIBUTE(dmub_fw_state); DEFINE_SHOW_ATTRIBUTE(dmub_tracebuffer); DEFINE_SHOW_ATTRIBUTE(dp_lttpr_status); -#ifdef CONFIG_DRM_AMD_DC_HDCP DEFINE_SHOW_ATTRIBUTE(hdcp_sink_capability); -#endif DEFINE_SHOW_ATTRIBUTE(internal_display); +DEFINE_SHOW_ATTRIBUTE(odm_combine_segments); +DEFINE_SHOW_ATTRIBUTE(replay_capability); DEFINE_SHOW_ATTRIBUTE(psr_capability); DEFINE_SHOW_ATTRIBUTE(dp_is_mst_connector); DEFINE_SHOW_ATTRIBUTE(dp_mst_progress_status); +DEFINE_SHOW_ATTRIBUTE(is_dpia_link); +DEFINE_SHOW_STORE_ATTRIBUTE(hdmi_cec_state); static const struct file_operations dp_dsc_clock_en_debugfs_fops = { .owner = THIS_MODULE, @@ -2729,25 +3011,6 @@ static const struct file_operations sdp_message_fops = { .llseek = default_llseek }; -static const struct file_operations dp_dpcd_address_debugfs_fops = { - .owner = THIS_MODULE, - .write = dp_dpcd_address_write, - .llseek = default_llseek -}; - -static const struct file_operations dp_dpcd_size_debugfs_fops = { - .owner = THIS_MODULE, - .write = dp_dpcd_size_write, - .llseek = default_llseek -}; - -static const struct file_operations dp_dpcd_data_debugfs_fops = { - .owner = THIS_MODULE, - .read = dp_dpcd_data_read, - .write = dp_dpcd_data_write, - .llseek = default_llseek -}; - static const struct file_operations dp_max_bpc_debugfs_fops = { .owner = THIS_MODULE, .read = dp_max_bpc_read, @@ -2761,6 +3024,12 @@ static const struct file_operations dp_dsc_disable_passthrough_debugfs_fops = { .llseek = default_llseek }; +static const struct file_operations dp_mst_link_settings_debugfs_fops = { + .owner = THIS_MODULE, + .write = dp_mst_link_setting, + .llseek = default_llseek +}; + static const struct { char *name; const struct file_operations *fops; @@ -2769,13 +3038,8 @@ static const struct { {"phy_settings", &dp_phy_settings_debugfs_fop}, {"lttpr_status", &dp_lttpr_status_fops}, {"test_pattern", &dp_phy_test_pattern_fops}, -#ifdef CONFIG_DRM_AMD_DC_HDCP {"hdcp_sink_capability", &hdcp_sink_capability_fops}, -#endif {"sdp_message", &sdp_message_fops}, - {"aux_dpcd_address", &dp_dpcd_address_debugfs_fops}, - {"aux_dpcd_size", &dp_dpcd_size_debugfs_fops}, - {"aux_dpcd_data", &dp_dpcd_data_debugfs_fops}, {"dsc_clock_en", &dp_dsc_clock_en_debugfs_fops}, {"dsc_slice_width", &dp_dsc_slice_width_debugfs_fops}, {"dsc_slice_height", &dp_dsc_slice_height_debugfs_fops}, @@ -2788,17 +3052,19 @@ static const struct { {"max_bpc", &dp_max_bpc_debugfs_fops}, {"dsc_disable_passthrough", &dp_dsc_disable_passthrough_debugfs_fops}, {"is_mst_connector", &dp_is_mst_connector_fops}, - {"mst_progress_status", &dp_mst_progress_status_fops} + {"mst_progress_status", &dp_mst_progress_status_fops}, + {"is_dpia_link", &is_dpia_link_fops}, + {"mst_link_settings", &dp_mst_link_settings_debugfs_fops} }; -#ifdef CONFIG_DRM_AMD_DC_HDCP static const struct { char *name; const struct file_operations *fops; } hdmi_debugfs_entries[] = { - {"hdcp_sink_capability", &hdcp_sink_capability_fops} + {"hdcp_sink_capability", &hdcp_sink_capability_fops}, + {"hdmi_cec_state", &hdmi_cec_state_fops} }; -#endif + /* * Force YUV420 output if available from the given mode */ @@ -2827,6 +3093,51 @@ DEFINE_DEBUGFS_ATTRIBUTE(force_yuv420_output_fops, force_yuv420_output_get, force_yuv420_output_set, "%llu\n"); /* + * Read Replay state + */ +static int replay_get_state(void *data, u64 *val) +{ + struct amdgpu_dm_connector *connector = data; + struct dc_link *link = connector->dc_link; + uint64_t state = REPLAY_STATE_INVALID; + + dc_link_get_replay_state(link, &state); + + *val = state; + + return 0; +} + +/* + * Start / Stop capture Replay residency + */ +static int replay_set_residency(void *data, u64 val) +{ + struct amdgpu_dm_connector *connector = data; + struct dc_link *link = connector->dc_link; + bool is_start = (val != 0); + u32 residency = 0; + + link->dc->link_srv->edp_replay_residency(link, &residency, is_start, PR_RESIDENCY_MODE_PHY); + return 0; +} + +/* + * Read Replay residency + */ +static int replay_get_residency(void *data, u64 *val) +{ + struct amdgpu_dm_connector *connector = data; + struct dc_link *link = connector->dc_link; + u32 residency = 0; + + link->dc->link_srv->edp_replay_residency(link, &residency, false, PR_RESIDENCY_MODE_PHY); + *val = (u64)residency; + + return 0; +} + +/* * Read PSR state */ static int psr_get(void *data, u64 *val) @@ -2843,6 +3154,174 @@ static int psr_get(void *data, u64 *val) } /* + * Read PSR state residency + */ +static int psr_read_residency(void *data, u64 *val) +{ + struct amdgpu_dm_connector *connector = data; + struct dc_link *link = connector->dc_link; + u32 residency = 0; + + link->dc->link_srv->edp_get_psr_residency(link, &residency, PSR_RESIDENCY_MODE_PHY); + + *val = (u64)residency; + + return 0; +} + +/* read allow_edp_hotplug_detection */ +static int allow_edp_hotplug_detection_get(void *data, u64 *val) +{ + struct amdgpu_dm_connector *aconnector = data; + struct drm_connector *connector = &aconnector->base; + struct drm_device *dev = connector->dev; + struct amdgpu_device *adev = drm_to_adev(dev); + + *val = adev->dm.dc->config.allow_edp_hotplug_detection; + + return 0; +} + +/* set allow_edp_hotplug_detection */ +static int allow_edp_hotplug_detection_set(void *data, u64 val) +{ + struct amdgpu_dm_connector *aconnector = data; + struct drm_connector *connector = &aconnector->base; + struct drm_device *dev = connector->dev; + struct amdgpu_device *adev = drm_to_adev(dev); + + adev->dm.dc->config.allow_edp_hotplug_detection = (uint32_t) val; + + return 0; +} + +/* check if kernel disallow eDP enter psr state + * cat /sys/kernel/debug/dri/0/eDP-X/disallow_edp_enter_psr + * 0: allow edp enter psr; 1: disallow + */ +static int disallow_edp_enter_psr_get(void *data, u64 *val) +{ + struct amdgpu_dm_connector *aconnector = data; + + *val = (u64) aconnector->disallow_edp_enter_psr; + return 0; +} + +/* set kernel disallow eDP enter psr state + * echo 0x0 /sys/kernel/debug/dri/0/eDP-X/disallow_edp_enter_psr + * 0: allow edp enter psr; 1: disallow + * + * usage: test app read crc from PSR eDP rx. + * + * during kernel boot up, kernel write dpcd 0x170 = 5. + * this notify eDP rx psr enable and let rx check crc. + * rx fw will start checking crc for rx internal logic. + * crc read count within dpcd 0x246 is not updated and + * value is 0. when eDP tx driver wants to read rx crc + * from dpcd 0x246, 0x270, read count 0 lead tx driver + * timeout. + * + * to avoid this, we add this debugfs to let test app to disbable + * rx crc checking for rx internal logic. then test app can read + * non-zero crc read count. + * + * expected app sequence is as below: + * 1. disable eDP PHY and notify eDP rx with dpcd 0x600 = 2. + * 2. echo 0x1 /sys/kernel/debug/dri/0/eDP-X/disallow_edp_enter_psr + * 3. enable eDP PHY and notify eDP rx with dpcd 0x600 = 1 but + * without dpcd 0x170 = 5. + * 4. read crc from rx dpcd 0x270, 0x246, etc. + * 5. echo 0x0 /sys/kernel/debug/dri/0/eDP-X/disallow_edp_enter_psr. + * this will let eDP back to normal with psr setup dpcd 0x170 = 5. + */ +static int disallow_edp_enter_psr_set(void *data, u64 val) +{ + struct amdgpu_dm_connector *aconnector = data; + + aconnector->disallow_edp_enter_psr = val ? true : false; + return 0; +} + +static int dmub_trace_mask_set(void *data, u64 val) +{ + struct amdgpu_device *adev = data; + struct dmub_srv *srv = adev->dm.dc->ctx->dmub_srv->dmub; + enum dmub_gpint_command cmd; + u64 mask = 0xffff; + u8 shift = 0; + u32 res; + int i; + + if (!srv->fw_version) + return -EINVAL; + + for (i = 0; i < 4; i++) { + res = (val & mask) >> shift; + + switch (i) { + case 0: + cmd = DMUB_GPINT__SET_TRACE_BUFFER_MASK_WORD0; + break; + case 1: + cmd = DMUB_GPINT__SET_TRACE_BUFFER_MASK_WORD1; + break; + case 2: + cmd = DMUB_GPINT__SET_TRACE_BUFFER_MASK_WORD2; + break; + case 3: + cmd = DMUB_GPINT__SET_TRACE_BUFFER_MASK_WORD3; + break; + } + + if (!dc_wake_and_execute_gpint(adev->dm.dc->ctx, cmd, res, NULL, DM_DMUB_WAIT_TYPE_WAIT)) + return -EIO; + + usleep_range(100, 1000); + + mask <<= 16; + shift += 16; + } + + return 0; +} + +static int dmub_trace_mask_show(void *data, u64 *val) +{ + enum dmub_gpint_command cmd = DMUB_GPINT__GET_TRACE_BUFFER_MASK_WORD0; + struct amdgpu_device *adev = data; + struct dmub_srv *srv = adev->dm.dc->ctx->dmub_srv->dmub; + u8 shift = 0; + u64 raw = 0; + u64 res = 0; + int i = 0; + + if (!srv->fw_version) + return -EINVAL; + + while (i < 4) { + uint32_t response; + + if (!dc_wake_and_execute_gpint(adev->dm.dc->ctx, cmd, 0, &response, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) + return -EIO; + + raw = response; + usleep_range(100, 1000); + + cmd++; + res |= (raw << shift); + shift += 16; + i++; + } + + *val = res; + + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(dmub_trace_mask_fops, dmub_trace_mask_show, + dmub_trace_mask_set, "0x%llx\n"); + +/* * Set dmcub trace event IRQ enable or disable. * Usage to enable dmcub trace event IRQ: echo 1 > /sys/kernel/debug/dri/0/amdgpu_dm_dmcub_trace_event_en * Usage to disable dmcub trace event IRQ: echo 0 > /sys/kernel/debug/dri/0/amdgpu_dm_dmcub_trace_event_en @@ -2876,10 +3355,24 @@ static int dmcub_trace_event_state_get(void *data, u64 *val) DEFINE_DEBUGFS_ATTRIBUTE(dmcub_trace_event_state_fops, dmcub_trace_event_state_get, dmcub_trace_event_state_set, "%llu\n"); +DEFINE_DEBUGFS_ATTRIBUTE(replay_state_fops, replay_get_state, NULL, "%llu\n"); +DEFINE_DEBUGFS_ATTRIBUTE(replay_residency_fops, replay_get_residency, replay_set_residency, + "%llu\n"); DEFINE_DEBUGFS_ATTRIBUTE(psr_fops, psr_get, NULL, "%llu\n"); +DEFINE_DEBUGFS_ATTRIBUTE(psr_residency_fops, psr_read_residency, NULL, + "%llu\n"); + +DEFINE_DEBUGFS_ATTRIBUTE(allow_edp_hotplug_detection_fops, + allow_edp_hotplug_detection_get, + allow_edp_hotplug_detection_set, "%llu\n"); + +DEFINE_DEBUGFS_ATTRIBUTE(disallow_edp_enter_psr_fops, + disallow_edp_enter_psr_get, + disallow_edp_enter_psr_set, "%llu\n"); DEFINE_SHOW_ATTRIBUTE(current_backlight); DEFINE_SHOW_ATTRIBUTE(target_backlight); +DEFINE_SHOW_ATTRIBUTE(ips_status); static const struct { char *name; @@ -2887,7 +3380,8 @@ static const struct { } connector_debugfs_entries[] = { {"force_yuv420_output", &force_yuv420_output_fops}, {"trigger_hotplug", &trigger_hotplug_debugfs_fops}, - {"internal_display", &internal_display_fops} + {"internal_display", &internal_display_fops}, + {"odm_combine_segments", &odm_combine_segments_fops} }; /* @@ -2918,7 +3412,7 @@ static int edp_ilr_show(struct seq_file *m, void *unused) seq_printf(m, "[%d] %d kHz\n", entry/2, link_rate_in_khz); } } else { - seq_printf(m, "ILR is not supported by this eDP panel.\n"); + seq_puts(m, "ILR is not supported by this eDP panel.\n"); } return 0; @@ -3038,14 +3532,25 @@ void connector_debugfs_init(struct amdgpu_dm_connector *connector) } } if (connector->base.connector_type == DRM_MODE_CONNECTOR_eDP) { + debugfs_create_file("replay_capability", 0444, dir, connector, + &replay_capability_fops); + debugfs_create_file("replay_state", 0444, dir, connector, &replay_state_fops); + debugfs_create_file_unsafe("replay_residency", 0444, dir, + connector, &replay_residency_fops); debugfs_create_file_unsafe("psr_capability", 0444, dir, connector, &psr_capability_fops); debugfs_create_file_unsafe("psr_state", 0444, dir, connector, &psr_fops); + debugfs_create_file_unsafe("psr_residency", 0444, dir, + connector, &psr_residency_fops); debugfs_create_file("amdgpu_current_backlight_pwm", 0444, dir, connector, ¤t_backlight_fops); debugfs_create_file("amdgpu_target_backlight_pwm", 0444, dir, connector, &target_backlight_fops); debugfs_create_file("ilr_setting", 0644, dir, connector, &edp_ilr_debugfs_fops); + debugfs_create_file("allow_edp_hotplug_detection", 0644, dir, connector, + &allow_edp_hotplug_detection_fops); + debugfs_create_file("disallow_edp_enter_psr", 0644, dir, connector, + &disallow_edp_enter_psr_fops); } for (i = 0; i < ARRAY_SIZE(connector_debugfs_entries); i++) { @@ -3054,10 +3559,6 @@ void connector_debugfs_init(struct amdgpu_dm_connector *connector) connector_debugfs_entries[i].fops); } - connector->debugfs_dpcd_address = 0; - connector->debugfs_dpcd_size = 0; - -#ifdef CONFIG_DRM_AMD_DC_HDCP if (connector->base.connector_type == DRM_MODE_CONNECTOR_HDMIA) { for (i = 0; i < ARRAY_SIZE(hdmi_debugfs_entries); i++) { debugfs_create_file(hdmi_debugfs_entries[i].name, @@ -3065,7 +3566,6 @@ void connector_debugfs_init(struct amdgpu_dm_connector *connector) hdmi_debugfs_entries[i].fops); } } -#endif } #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY @@ -3079,8 +3579,8 @@ static int crc_win_x_start_set(void *data, u64 val) struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); spin_lock_irq(&drm_dev->event_lock); - acrtc->dm_irq_params.crc_window.x_start = (uint16_t) val; - acrtc->dm_irq_params.crc_window.update_win = false; + acrtc->dm_irq_params.window_param[0].x_start = (uint16_t) val; + acrtc->dm_irq_params.window_param[0].update_win = false; spin_unlock_irq(&drm_dev->event_lock); return 0; @@ -3096,7 +3596,7 @@ static int crc_win_x_start_get(void *data, u64 *val) struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); spin_lock_irq(&drm_dev->event_lock); - *val = acrtc->dm_irq_params.crc_window.x_start; + *val = acrtc->dm_irq_params.window_param[0].x_start; spin_unlock_irq(&drm_dev->event_lock); return 0; @@ -3116,8 +3616,8 @@ static int crc_win_y_start_set(void *data, u64 val) struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); spin_lock_irq(&drm_dev->event_lock); - acrtc->dm_irq_params.crc_window.y_start = (uint16_t) val; - acrtc->dm_irq_params.crc_window.update_win = false; + acrtc->dm_irq_params.window_param[0].y_start = (uint16_t) val; + acrtc->dm_irq_params.window_param[0].update_win = false; spin_unlock_irq(&drm_dev->event_lock); return 0; @@ -3133,7 +3633,7 @@ static int crc_win_y_start_get(void *data, u64 *val) struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); spin_lock_irq(&drm_dev->event_lock); - *val = acrtc->dm_irq_params.crc_window.y_start; + *val = acrtc->dm_irq_params.window_param[0].y_start; spin_unlock_irq(&drm_dev->event_lock); return 0; @@ -3152,8 +3652,8 @@ static int crc_win_x_end_set(void *data, u64 val) struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); spin_lock_irq(&drm_dev->event_lock); - acrtc->dm_irq_params.crc_window.x_end = (uint16_t) val; - acrtc->dm_irq_params.crc_window.update_win = false; + acrtc->dm_irq_params.window_param[0].x_end = (uint16_t) val; + acrtc->dm_irq_params.window_param[0].update_win = false; spin_unlock_irq(&drm_dev->event_lock); return 0; @@ -3169,7 +3669,7 @@ static int crc_win_x_end_get(void *data, u64 *val) struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); spin_lock_irq(&drm_dev->event_lock); - *val = acrtc->dm_irq_params.crc_window.x_end; + *val = acrtc->dm_irq_params.window_param[0].x_end; spin_unlock_irq(&drm_dev->event_lock); return 0; @@ -3188,8 +3688,8 @@ static int crc_win_y_end_set(void *data, u64 val) struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); spin_lock_irq(&drm_dev->event_lock); - acrtc->dm_irq_params.crc_window.y_end = (uint16_t) val; - acrtc->dm_irq_params.crc_window.update_win = false; + acrtc->dm_irq_params.window_param[0].y_end = (uint16_t) val; + acrtc->dm_irq_params.window_param[0].update_win = false; spin_unlock_irq(&drm_dev->event_lock); return 0; @@ -3205,7 +3705,7 @@ static int crc_win_y_end_get(void *data, u64 *val) struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); spin_lock_irq(&drm_dev->event_lock); - *val = acrtc->dm_irq_params.crc_window.y_end; + *val = acrtc->dm_irq_params.window_param[0].y_end; spin_unlock_irq(&drm_dev->event_lock); return 0; @@ -3218,41 +3718,27 @@ DEFINE_DEBUGFS_ATTRIBUTE(crc_win_y_end_fops, crc_win_y_end_get, */ static int crc_win_update_set(void *data, u64 val) { - struct drm_crtc *new_crtc = data; - struct drm_crtc *old_crtc = NULL; - struct amdgpu_crtc *new_acrtc, *old_acrtc; - struct amdgpu_device *adev = drm_to_adev(new_crtc->dev); - struct crc_rd_work *crc_rd_wrk = adev->dm.crc_rd_wrk; - - if (!crc_rd_wrk) - return 0; + struct drm_crtc *crtc = data; + struct amdgpu_crtc *acrtc; + struct amdgpu_device *adev = drm_to_adev(crtc->dev); if (val) { + acrtc = to_amdgpu_crtc(crtc); + mutex_lock(&adev->dm.dc_lock); + /* PSR may write to OTG CRC window control register, + * so close it before starting secure_display. + */ + amdgpu_dm_psr_disable(acrtc->dm_irq_params.stream, true); + spin_lock_irq(&adev_to_drm(adev)->event_lock); - spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock); - if (crc_rd_wrk->crtc) { - old_crtc = crc_rd_wrk->crtc; - old_acrtc = to_amdgpu_crtc(old_crtc); - } - new_acrtc = to_amdgpu_crtc(new_crtc); - if (old_crtc && old_crtc != new_crtc) { - old_acrtc->dm_irq_params.crc_window.activated = false; - old_acrtc->dm_irq_params.crc_window.update_win = false; - old_acrtc->dm_irq_params.crc_window.skip_frame_cnt = 0; + acrtc->dm_irq_params.window_param[0].enable = true; + acrtc->dm_irq_params.window_param[0].update_win = true; + acrtc->dm_irq_params.window_param[0].skip_frame_cnt = 0; + acrtc->dm_irq_params.crc_window_activated = true; - new_acrtc->dm_irq_params.crc_window.activated = true; - new_acrtc->dm_irq_params.crc_window.update_win = true; - new_acrtc->dm_irq_params.crc_window.skip_frame_cnt = 0; - crc_rd_wrk->crtc = new_crtc; - } else { - new_acrtc->dm_irq_params.crc_window.activated = true; - new_acrtc->dm_irq_params.crc_window.update_win = true; - new_acrtc->dm_irq_params.crc_window.skip_frame_cnt = 0; - crc_rd_wrk->crtc = new_crtc; - } - spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock); spin_unlock_irq(&adev_to_drm(adev)->event_lock); + mutex_unlock(&adev->dm.dc_lock); } return 0; @@ -3292,6 +3778,8 @@ void crtc_debugfs_init(struct drm_crtc *crtc) #endif debugfs_create_file("amdgpu_current_bpc", 0644, crtc->debugfs_entry, crtc, &amdgpu_current_bpc_fops); + debugfs_create_file("amdgpu_current_colorspace", 0644, crtc->debugfs_entry, + crtc, &amdgpu_current_colorspace_fops); } /* @@ -3398,6 +3886,7 @@ static int trigger_hpd_mst_set(void *data, u64 val) struct amdgpu_dm_connector *aconnector; struct drm_connector *connector; struct dc_link *link = NULL; + int ret; if (val == 1) { drm_connector_list_iter_begin(dev, &iter); @@ -3406,10 +3895,15 @@ static int trigger_hpd_mst_set(void *data, u64 val) if (aconnector->dc_link->type == dc_connection_mst_branch && aconnector->mst_mgr.aux) { mutex_lock(&adev->dm.dc_lock); - dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD); + ret = dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD); mutex_unlock(&adev->dm.dc_lock); - drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true); + if (!ret) + DRM_ERROR("DM_MST: Failed to detect dc link!"); + + ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true); + if (ret < 0) + DRM_ERROR("DM_MST: Failed to set the device into MST mode!"); } } } else if (val == 0) { @@ -3419,12 +3913,12 @@ static int trigger_hpd_mst_set(void *data, u64 val) if (!aconnector->dc_link) continue; - if (!aconnector->mst_port) + if (!aconnector->mst_root) continue; link = aconnector->dc_link; - dp_receiver_power_ctrl(link, false); - drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_port->mst_mgr, false); + dc_link_dp_receiver_power_ctrl(link, false); + drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_root->mst_mgr, false); link->mst_stream_alloc_table.stream_count = 0; memset(link->mst_stream_alloc_table.stream_allocations, 0, sizeof(link->mst_stream_alloc_table.stream_allocations)); @@ -3516,6 +4010,36 @@ DEFINE_DEBUGFS_ATTRIBUTE(disable_hpd_ops, disable_hpd_get, disable_hpd_set, "%llu\n"); /* + * Prints hardware capabilities. These are used for IGT testing. + */ +static int capabilities_show(struct seq_file *m, void *unused) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)m->private; + struct dc *dc = adev->dm.dc; + bool mall_supported = dc->caps.mall_size_total; + bool subvp_supported = dc->caps.subvp_fw_processing_delay_us; + unsigned int mall_in_use = false; + unsigned int subvp_in_use = false; + + struct hubbub *hubbub = dc->res_pool->hubbub; + + if (hubbub && hubbub->funcs->get_mall_en) + hubbub->funcs->get_mall_en(hubbub, &mall_in_use); + + if (dc->cap_funcs.get_subvp_en) + subvp_in_use = dc->cap_funcs.get_subvp_en(dc, dc->current_state); + + seq_printf(m, "mall supported: %s, enabled: %s\n", + mall_supported ? "yes" : "no", mall_in_use ? "yes" : "no"); + seq_printf(m, "sub-viewport supported: %s, enabled: %s\n", + subvp_supported ? "yes" : "no", subvp_in_use ? "yes" : "no"); + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(capabilities); + +/* * Temporary w/a to force sst sequence in M42D DP2 mst receiver * Example usage: echo 1 > /sys/kernel/debug/dri/0/amdgpu_dm_dp_set_mst_en_for_sst */ @@ -3708,6 +4232,8 @@ void dtn_debugfs_init(struct amdgpu_device *adev) debugfs_create_file("amdgpu_mst_topology", 0444, root, adev, &mst_topo_fops); + debugfs_create_file("amdgpu_dm_capabilities", 0444, root, + adev, &capabilities_fops); debugfs_create_file("amdgpu_dm_dtn_log", 0644, root, adev, &dtn_log_fops); debugfs_create_file("amdgpu_dm_dp_set_mst_en_for_sst", 0644, root, adev, @@ -3730,6 +4256,9 @@ void dtn_debugfs_init(struct amdgpu_device *adev) debugfs_create_file_unsafe("amdgpu_dm_force_timing_sync", 0644, root, adev, &force_timing_sync_ops); + debugfs_create_file_unsafe("amdgpu_dm_dmub_trace_mask", 0644, root, + adev, &dmub_trace_mask_fops); + debugfs_create_file_unsafe("amdgpu_dm_dmcub_trace_event_en", 0644, root, adev, &dmcub_trace_event_state_fops); @@ -3742,4 +4271,7 @@ void dtn_debugfs_init(struct amdgpu_device *adev) debugfs_create_file_unsafe("amdgpu_dm_disable_hpd", 0644, root, adev, &disable_hpd_ops); + if (adev->dm.dc->caps.ips_support) + debugfs_create_file_unsafe("amdgpu_dm_ips_status", 0644, root, adev, + &ips_status_fops); } diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.h index 071200473c27..122cdc124b3b 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: MIT */ /* * Copyright 2018 Advanced Micro Devices, Inc. * diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c index 6202e31c7e3a..85ce558cefc5 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: MIT /* * Copyright 2019 Advanced Micro Devices, Inc. * @@ -26,6 +27,7 @@ #include "amdgpu_dm_hdcp.h" #include "amdgpu.h" #include "amdgpu_dm.h" +#include "dc_fused_io.h" #include "dm_helpers.h" #include <drm/display/drm_hdcp_helper.h> #include "hdcp_psp.h" @@ -39,10 +41,10 @@ static bool lp_write_i2c(void *handle, uint32_t address, const uint8_t *data, uint32_t size) { - struct dc_link *link = handle; struct i2c_payload i2c_payloads[] = {{true, address, size, (void *)data} }; - struct i2c_command cmd = {i2c_payloads, 1, I2C_COMMAND_ENGINE_HW, link->dc->caps.i2c_speed_in_khz}; + struct i2c_command cmd = {i2c_payloads, 1, I2C_COMMAND_ENGINE_HW, + link->dc->caps.i2c_speed_in_khz}; return dm_helpers_submit_i2c(link->ctx, link, &cmd); } @@ -52,8 +54,10 @@ lp_read_i2c(void *handle, uint32_t address, uint8_t offset, uint8_t *data, uint3 { struct dc_link *link = handle; - struct i2c_payload i2c_payloads[] = {{true, address, 1, &offset}, {false, address, size, data} }; - struct i2c_command cmd = {i2c_payloads, 2, I2C_COMMAND_ENGINE_HW, link->dc->caps.i2c_speed_in_khz}; + struct i2c_payload i2c_payloads[] = {{true, address, 1, &offset}, + {false, address, size, data} }; + struct i2c_command cmd = {i2c_payloads, 2, I2C_COMMAND_ENGINE_HW, + link->dc->caps.i2c_speed_in_khz}; return dm_helpers_submit_i2c(link->ctx, link, &cmd); } @@ -74,9 +78,36 @@ lp_read_dpcd(void *handle, uint32_t address, uint8_t *data, uint32_t size) return dm_helpers_dp_read_dpcd(link->ctx, link, address, data, size); } -static uint8_t *psp_get_srm(struct psp_context *psp, uint32_t *srm_version, uint32_t *srm_size) +static bool lp_atomic_write_poll_read_i2c( + void *handle, + const struct mod_hdcp_atomic_op_i2c *write, + const struct mod_hdcp_atomic_op_i2c *poll, + struct mod_hdcp_atomic_op_i2c *read, + uint32_t poll_timeout_us, + uint8_t poll_mask_msb +) +{ + struct dc_link *link = handle; + + return dm_atomic_write_poll_read_i2c(link, write, poll, read, poll_timeout_us, poll_mask_msb); +} + +static bool lp_atomic_write_poll_read_aux( + void *handle, + const struct mod_hdcp_atomic_op_aux *write, + const struct mod_hdcp_atomic_op_aux *poll, + struct mod_hdcp_atomic_op_aux *read, + uint32_t poll_timeout_us, + uint8_t poll_mask_msb +) { + struct dc_link *link = handle; + + return dm_atomic_write_poll_read_aux(link, write, poll, read, poll_timeout_us, poll_mask_msb); +} +static uint8_t *psp_get_srm(struct psp_context *psp, uint32_t *srm_version, uint32_t *srm_size) +{ struct ta_hdcp_shared_memory *hdcp_cmd; if (!psp->hdcp_context.context.initialized) { @@ -96,13 +127,12 @@ static uint8_t *psp_get_srm(struct psp_context *psp, uint32_t *srm_version, uint *srm_version = hdcp_cmd->out_msg.hdcp_get_srm.srm_version; *srm_size = hdcp_cmd->out_msg.hdcp_get_srm.srm_buf_size; - return hdcp_cmd->out_msg.hdcp_get_srm.srm_buf; } -static int psp_set_srm(struct psp_context *psp, uint8_t *srm, uint32_t srm_size, uint32_t *srm_version) +static int psp_set_srm(struct psp_context *psp, + u8 *srm, uint32_t srm_size, uint32_t *srm_version) { - struct ta_hdcp_shared_memory *hdcp_cmd; if (!psp->hdcp_context.context.initialized) { @@ -119,7 +149,8 @@ static int psp_set_srm(struct psp_context *psp, uint8_t *srm, uint32_t srm_size, psp_hdcp_invoke(psp, hdcp_cmd->cmd_id); - if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS || hdcp_cmd->out_msg.hdcp_set_srm.valid_signature != 1 || + if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS || + hdcp_cmd->out_msg.hdcp_set_srm.valid_signature != 1 || hdcp_cmd->out_msg.hdcp_set_srm.srm_version == PSP_SRM_VERSION_MAX) return -EINVAL; @@ -150,7 +181,6 @@ static void process_output(struct hdcp_workqueue *hdcp_work) static void link_lock(struct hdcp_workqueue *work, bool lock) { - int i = 0; for (i = 0; i < work->max_link; i++) { @@ -160,72 +190,74 @@ static void link_lock(struct hdcp_workqueue *work, bool lock) mutex_unlock(&work[i].mutex); } } + void hdcp_update_display(struct hdcp_workqueue *hdcp_work, unsigned int link_index, struct amdgpu_dm_connector *aconnector, - uint8_t content_type, + u8 content_type, bool enable_encryption) { struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index]; - struct mod_hdcp_display *display = &hdcp_work[link_index].display; - struct mod_hdcp_link *link = &hdcp_work[link_index].link; - struct mod_hdcp_display_query query; - - mutex_lock(&hdcp_w->mutex); - hdcp_w->aconnector = aconnector; - - query.display = NULL; - mod_hdcp_query_display(&hdcp_w->hdcp, aconnector->base.index, &query); - - if (query.display != NULL) { - memcpy(display, query.display, sizeof(struct mod_hdcp_display)); - mod_hdcp_remove_display(&hdcp_w->hdcp, aconnector->base.index, &hdcp_w->output); - - hdcp_w->link.adjust.hdcp2.force_type = MOD_HDCP_FORCE_TYPE_0; - - if (enable_encryption) { - /* Explicitly set the saved SRM as sysfs call will be after we already enabled hdcp - * (s3 resume case) - */ - if (hdcp_work->srm_size > 0) - psp_set_srm(hdcp_work->hdcp.config.psp.handle, hdcp_work->srm, hdcp_work->srm_size, - &hdcp_work->srm_version); - - display->adjust.disable = MOD_HDCP_DISPLAY_NOT_DISABLE; - if (content_type == DRM_MODE_HDCP_CONTENT_TYPE0) { - hdcp_w->link.adjust.hdcp1.disable = 0; - hdcp_w->link.adjust.hdcp2.force_type = MOD_HDCP_FORCE_TYPE_0; - } else if (content_type == DRM_MODE_HDCP_CONTENT_TYPE1) { - hdcp_w->link.adjust.hdcp1.disable = 1; - hdcp_w->link.adjust.hdcp2.force_type = MOD_HDCP_FORCE_TYPE_1; - } - - schedule_delayed_work(&hdcp_w->property_validate_dwork, - msecs_to_jiffies(DRM_HDCP_CHECK_PERIOD_MS)); - } else { - display->adjust.disable = MOD_HDCP_DISPLAY_DISABLE_AUTHENTICATION; - hdcp_w->encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF; - cancel_delayed_work(&hdcp_w->property_validate_dwork); + struct mod_hdcp_link_adjustment link_adjust; + struct mod_hdcp_display_adjustment display_adjust; + unsigned int conn_index = aconnector->base.index; + const struct dc *dc = aconnector->dc_link->dc; + + guard(mutex)(&hdcp_w->mutex); + drm_connector_get(&aconnector->base); + if (hdcp_w->aconnector[conn_index]) + drm_connector_put(&hdcp_w->aconnector[conn_index]->base); + hdcp_w->aconnector[conn_index] = aconnector; + + memset(&link_adjust, 0, sizeof(link_adjust)); + memset(&display_adjust, 0, sizeof(display_adjust)); + + if (enable_encryption) { + /* Explicitly set the saved SRM as sysfs call will be after we already enabled hdcp + * (s3 resume case) + */ + if (hdcp_work->srm_size > 0) + psp_set_srm(hdcp_work->hdcp.config.psp.handle, hdcp_work->srm, + hdcp_work->srm_size, + &hdcp_work->srm_version); + + display_adjust.disable = MOD_HDCP_DISPLAY_NOT_DISABLE; + + link_adjust.auth_delay = 2; + link_adjust.retry_limit = MAX_NUM_OF_ATTEMPTS; + + if (content_type == DRM_MODE_HDCP_CONTENT_TYPE0) { + link_adjust.hdcp2.force_type = MOD_HDCP_FORCE_TYPE_0; + } else if (content_type == DRM_MODE_HDCP_CONTENT_TYPE1) { + link_adjust.hdcp1.disable = 1; + link_adjust.hdcp2.force_type = MOD_HDCP_FORCE_TYPE_1; } - - display->state = MOD_HDCP_DISPLAY_ACTIVE; + link_adjust.hdcp2.use_fw_locality_check = + (dc->caps.fused_io_supported || dc->debug.hdcp_lc_force_fw_enable); + link_adjust.hdcp2.use_sw_locality_fallback = dc->debug.hdcp_lc_enable_sw_fallback; + + schedule_delayed_work(&hdcp_w->property_validate_dwork, + msecs_to_jiffies(DRM_HDCP_CHECK_PERIOD_MS)); + } else { + display_adjust.disable = MOD_HDCP_DISPLAY_DISABLE_AUTHENTICATION; + hdcp_w->encryption_status[conn_index] = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF; + cancel_delayed_work(&hdcp_w->property_validate_dwork); } - mod_hdcp_add_display(&hdcp_w->hdcp, link, display, &hdcp_w->output); + mod_hdcp_update_display(&hdcp_w->hdcp, conn_index, &link_adjust, &display_adjust, &hdcp_w->output); process_output(hdcp_w); - mutex_unlock(&hdcp_w->mutex); } static void hdcp_remove_display(struct hdcp_workqueue *hdcp_work, - unsigned int link_index, + unsigned int link_index, struct amdgpu_dm_connector *aconnector) { struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index]; struct drm_connector_state *conn_state = aconnector->base.state; + unsigned int conn_index = aconnector->base.index; - mutex_lock(&hdcp_w->mutex); - hdcp_w->aconnector = aconnector; + guard(mutex)(&hdcp_w->mutex); /* the removal of display will invoke auth reset -> hdcp destroy and * we'd expect the Content Protection (CP) property changed back to @@ -236,28 +268,39 @@ static void hdcp_remove_display(struct hdcp_workqueue *hdcp_work, conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED; DRM_DEBUG_DRIVER("[HDCP_DM] display %d, CP 2 -> 1, type %u, DPMS %u\n", - aconnector->base.index, conn_state->hdcp_content_type, aconnector->base.dpms); + aconnector->base.index, conn_state->hdcp_content_type, + aconnector->base.dpms); } mod_hdcp_remove_display(&hdcp_w->hdcp, aconnector->base.index, &hdcp_w->output); - + if (hdcp_w->aconnector[conn_index]) { + drm_connector_put(&hdcp_w->aconnector[conn_index]->base); + hdcp_w->aconnector[conn_index] = NULL; + } process_output(hdcp_w); - mutex_unlock(&hdcp_w->mutex); } + void hdcp_reset_display(struct hdcp_workqueue *hdcp_work, unsigned int link_index) { struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index]; + unsigned int conn_index; - mutex_lock(&hdcp_w->mutex); + guard(mutex)(&hdcp_w->mutex); mod_hdcp_reset_connection(&hdcp_w->hdcp, &hdcp_w->output); cancel_delayed_work(&hdcp_w->property_validate_dwork); - hdcp_w->encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF; - process_output(hdcp_w); + for (conn_index = 0; conn_index < AMDGPU_DM_MAX_DISPLAY_INDEX; conn_index++) { + hdcp_w->encryption_status[conn_index] = + MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF; + if (hdcp_w->aconnector[conn_index]) { + drm_connector_put(&hdcp_w->aconnector[conn_index]->base); + hdcp_w->aconnector[conn_index] = NULL; + } + } - mutex_unlock(&hdcp_w->mutex); + process_output(hdcp_w); } void hdcp_handle_cpirq(struct hdcp_workqueue *hdcp_work, unsigned int link_index) @@ -267,17 +310,14 @@ void hdcp_handle_cpirq(struct hdcp_workqueue *hdcp_work, unsigned int link_index schedule_work(&hdcp_w->cpirq_work); } - - - static void event_callback(struct work_struct *work) { struct hdcp_workqueue *hdcp_work; hdcp_work = container_of(to_delayed_work(work), struct hdcp_workqueue, - callback_dwork); + callback_dwork); - mutex_lock(&hdcp_work->mutex); + guard(mutex)(&hdcp_work->mutex); cancel_delayed_work(&hdcp_work->callback_dwork); @@ -285,54 +325,76 @@ static void event_callback(struct work_struct *work) &hdcp_work->output); process_output(hdcp_work); - - mutex_unlock(&hdcp_work->mutex); - - } + static void event_property_update(struct work_struct *work) { - - struct hdcp_workqueue *hdcp_work = container_of(work, struct hdcp_workqueue, property_update_work); - struct amdgpu_dm_connector *aconnector = hdcp_work->aconnector; - struct drm_device *dev = hdcp_work->aconnector->base.dev; + struct hdcp_workqueue *hdcp_work = container_of(work, struct hdcp_workqueue, + property_update_work); + struct amdgpu_dm_connector *aconnector = NULL; + struct drm_device *dev; long ret; + unsigned int conn_index; + struct drm_connector *connector; + struct drm_connector_state *conn_state; - drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); - mutex_lock(&hdcp_work->mutex); + for (conn_index = 0; conn_index < AMDGPU_DM_MAX_DISPLAY_INDEX; conn_index++) { + aconnector = hdcp_work->aconnector[conn_index]; + if (!aconnector) + continue; - if (aconnector->base.state && aconnector->base.state->commit) { - ret = wait_for_completion_interruptible_timeout(&aconnector->base.state->commit->hw_done, 10 * HZ); + connector = &aconnector->base; - if (ret == 0) { - DRM_ERROR("HDCP state unknown! Setting it to DESIRED"); - hdcp_work->encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF; - } - } + /* check if display connected */ + if (connector->status != connector_status_connected) + continue; + + conn_state = aconnector->base.state; - if (aconnector->base.state) { - if (hdcp_work->encryption_status != MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF) { - if (aconnector->base.state->hdcp_content_type == + if (!conn_state) + continue; + + dev = connector->dev; + + if (!dev) + continue; + + drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); + guard(mutex)(&hdcp_work->mutex); + + if (conn_state->commit) { + ret = wait_for_completion_interruptible_timeout(&conn_state->commit->hw_done, + 10 * HZ); + if (ret == 0) { + DRM_ERROR("HDCP state unknown! Setting it to DESIRED\n"); + hdcp_work->encryption_status[conn_index] = + MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF; + } + } + if (hdcp_work->encryption_status[conn_index] != + MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF) { + if (conn_state->hdcp_content_type == DRM_MODE_HDCP_CONTENT_TYPE0 && - hdcp_work->encryption_status <= - MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE0_ON) - drm_hdcp_update_content_protection(&aconnector->base, - DRM_MODE_CONTENT_PROTECTION_ENABLED); - else if (aconnector->base.state->hdcp_content_type == + hdcp_work->encryption_status[conn_index] <= + MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE0_ON) { + DRM_DEBUG_DRIVER("[HDCP_DM] DRM_MODE_CONTENT_PROTECTION_ENABLED\n"); + drm_hdcp_update_content_protection(connector, + DRM_MODE_CONTENT_PROTECTION_ENABLED); + } else if (conn_state->hdcp_content_type == DRM_MODE_HDCP_CONTENT_TYPE1 && - hdcp_work->encryption_status == - MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE1_ON) - drm_hdcp_update_content_protection(&aconnector->base, - DRM_MODE_CONTENT_PROTECTION_ENABLED); + hdcp_work->encryption_status[conn_index] == + MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE1_ON) { + drm_hdcp_update_content_protection(connector, + DRM_MODE_CONTENT_PROTECTION_ENABLED); + } } else { - drm_hdcp_update_content_protection(&aconnector->base, - DRM_MODE_CONTENT_PROTECTION_DESIRED); + DRM_DEBUG_DRIVER("[HDCP_DM] DRM_MODE_CONTENT_PROTECTION_DESIRED\n"); + drm_hdcp_update_content_protection(connector, + DRM_MODE_CONTENT_PROTECTION_DESIRED); } + drm_modeset_unlock(&dev->mode_config.connection_mutex); } - - mutex_unlock(&hdcp_work->mutex); - drm_modeset_unlock(&dev->mode_config.connection_mutex); } static void event_property_validate(struct work_struct *work) @@ -340,22 +402,49 @@ static void event_property_validate(struct work_struct *work) struct hdcp_workqueue *hdcp_work = container_of(to_delayed_work(work), struct hdcp_workqueue, property_validate_dwork); struct mod_hdcp_display_query query; - struct amdgpu_dm_connector *aconnector = hdcp_work->aconnector; + struct amdgpu_dm_connector *aconnector; + unsigned int conn_index; - if (!aconnector) - return; + guard(mutex)(&hdcp_work->mutex); - mutex_lock(&hdcp_work->mutex); + for (conn_index = 0; conn_index < AMDGPU_DM_MAX_DISPLAY_INDEX; + conn_index++) { + aconnector = hdcp_work->aconnector[conn_index]; - query.encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF; - mod_hdcp_query_display(&hdcp_work->hdcp, aconnector->base.index, &query); + if (!aconnector) + continue; - if (query.encryption_status != hdcp_work->encryption_status) { - hdcp_work->encryption_status = query.encryption_status; - schedule_work(&hdcp_work->property_update_work); - } + /* check if display connected */ + if (aconnector->base.status != connector_status_connected) + continue; + + if (!aconnector->base.state) + continue; - mutex_unlock(&hdcp_work->mutex); + query.encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF; + mod_hdcp_query_display(&hdcp_work->hdcp, aconnector->base.index, + &query); + + DRM_DEBUG_DRIVER("[HDCP_DM] disp %d, connector->CP %u, (query, work): (%d, %d)\n", + aconnector->base.index, + aconnector->base.state->content_protection, + query.encryption_status, + hdcp_work->encryption_status[conn_index]); + + if (query.encryption_status != + hdcp_work->encryption_status[conn_index]) { + DRM_DEBUG_DRIVER("[HDCP_DM] encryption_status change from %x to %x\n", + hdcp_work->encryption_status[conn_index], + query.encryption_status); + + hdcp_work->encryption_status[conn_index] = + query.encryption_status; + + DRM_DEBUG_DRIVER("[HDCP_DM] trigger property_update_work\n"); + + schedule_work(&hdcp_work->property_update_work); + } + } } static void event_watchdog_timer(struct work_struct *work) @@ -363,10 +452,10 @@ static void event_watchdog_timer(struct work_struct *work) struct hdcp_workqueue *hdcp_work; hdcp_work = container_of(to_delayed_work(work), - struct hdcp_workqueue, + struct hdcp_workqueue, watchdog_timer_dwork); - mutex_lock(&hdcp_work->mutex); + guard(mutex)(&hdcp_work->mutex); cancel_delayed_work(&hdcp_work->watchdog_timer_dwork); @@ -375,9 +464,6 @@ static void event_watchdog_timer(struct work_struct *work) &hdcp_work->output); process_output(hdcp_work); - - mutex_unlock(&hdcp_work->mutex); - } static void event_cpirq(struct work_struct *work) @@ -386,17 +472,13 @@ static void event_cpirq(struct work_struct *work) hdcp_work = container_of(work, struct hdcp_workqueue, cpirq_work); - mutex_lock(&hdcp_work->mutex); + guard(mutex)(&hdcp_work->mutex); mod_hdcp_process_event(&hdcp_work->hdcp, MOD_HDCP_EVENT_CPIRQ, &hdcp_work->output); process_output(hdcp_work); - - mutex_unlock(&hdcp_work->mutex); - } - void hdcp_destroy(struct kobject *kobj, struct hdcp_workqueue *hdcp_work) { int i = 0; @@ -404,6 +486,7 @@ void hdcp_destroy(struct kobject *kobj, struct hdcp_workqueue *hdcp_work) for (i = 0; i < hdcp_work->max_link; i++) { cancel_delayed_work_sync(&hdcp_work[i].callback_dwork); cancel_delayed_work_sync(&hdcp_work[i].watchdog_timer_dwork); + cancel_delayed_work_sync(&hdcp_work[i].property_validate_dwork); } sysfs_remove_bin_file(kobj, &hdcp_work[0].attr); @@ -412,15 +495,12 @@ void hdcp_destroy(struct kobject *kobj, struct hdcp_workqueue *hdcp_work) kfree(hdcp_work); } - static bool enable_assr(void *handle, struct dc_link *link) { - struct hdcp_workqueue *hdcp_work = handle; struct mod_hdcp hdcp = hdcp_work->hdcp; struct psp_context *psp = hdcp.config.psp.handle; struct ta_dtm_shared_memory *dtm_cmd; - bool res = true; if (!psp->dtm_context.context.initialized) { DRM_INFO("Failed to enable ASSR, DTM TA is not initialized."); @@ -429,23 +509,22 @@ static bool enable_assr(void *handle, struct dc_link *link) dtm_cmd = (struct ta_dtm_shared_memory *)psp->dtm_context.context.mem_context.shared_buf; - mutex_lock(&psp->dtm_context.mutex); + guard(mutex)(&psp->dtm_context.mutex); memset(dtm_cmd, 0, sizeof(struct ta_dtm_shared_memory)); dtm_cmd->cmd_id = TA_DTM_COMMAND__TOPOLOGY_ASSR_ENABLE; - dtm_cmd->dtm_in_message.topology_assr_enable.display_topology_dig_be_index = link->link_enc_hw_inst; + dtm_cmd->dtm_in_message.topology_assr_enable.display_topology_dig_be_index = + link->link_enc_hw_inst; dtm_cmd->dtm_status = TA_DTM_STATUS__GENERIC_FAILURE; psp_dtm_invoke(psp, dtm_cmd->cmd_id); if (dtm_cmd->dtm_status != TA_DTM_STATUS__SUCCESS) { DRM_INFO("Failed to enable ASSR"); - res = false; + return false; } - mutex_unlock(&psp->dtm_context.mutex); - - return res; + return true; } static void update_config(void *handle, struct cp_psp_stream_config *config) @@ -453,11 +532,13 @@ static void update_config(void *handle, struct cp_psp_stream_config *config) struct hdcp_workqueue *hdcp_work = handle; struct amdgpu_dm_connector *aconnector = config->dm_stream_ctx; int link_index = aconnector->dc_link->link_index; + unsigned int conn_index = aconnector->base.index; struct mod_hdcp_display *display = &hdcp_work[link_index].display; struct mod_hdcp_link *link = &hdcp_work[link_index].link; - struct drm_connector_state *conn_state; + struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index]; struct dc_sink *sink = NULL; bool link_is_hdcp14 = false; + const struct dc *dc = aconnector->dc_link->dc; if (config->dpms_off) { hdcp_remove_display(hdcp_work, link_index, aconnector); @@ -475,7 +556,7 @@ static void update_config(void *handle, struct cp_psp_stream_config *config) else if (aconnector->dc_em_sink) sink = aconnector->dc_em_sink; - if (sink != NULL) + if (sink) link->mode = mod_hdcp_signal_type_to_operation_mode(sink->sink_signal); display->controller = CONTROLLER_ID_D0 + config->otg_inst; @@ -493,23 +574,36 @@ static void update_config(void *handle, struct cp_psp_stream_config *config) link->dp.rev = aconnector->dc_link->dpcd_caps.dpcd_rev.raw; link->dp.assr_enabled = config->assr_enabled; link->dp.mst_enabled = config->mst_enabled; + link->dp.dp2_enabled = config->dp2_enabled; link->dp.usb4_enabled = config->usb4_enabled; display->adjust.disable = MOD_HDCP_DISPLAY_DISABLE_AUTHENTICATION; - link->adjust.auth_delay = 3; + link->adjust.auth_delay = 2; + link->adjust.retry_limit = MAX_NUM_OF_ATTEMPTS; link->adjust.hdcp1.disable = 0; - conn_state = aconnector->base.state; + link->adjust.hdcp2.use_fw_locality_check = (dc->caps.fused_io_supported || dc->debug.hdcp_lc_force_fw_enable); + link->adjust.hdcp2.use_sw_locality_fallback = dc->debug.hdcp_lc_enable_sw_fallback; + hdcp_w->encryption_status[display->index] = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF; DRM_DEBUG_DRIVER("[HDCP_DM] display %d, CP %d, type %d\n", aconnector->base.index, - (!!aconnector->base.state) ? aconnector->base.state->content_protection : -1, - (!!aconnector->base.state) ? aconnector->base.state->hdcp_content_type : -1); + (!!aconnector->base.state) ? + aconnector->base.state->content_protection : -1, + (!!aconnector->base.state) ? + aconnector->base.state->hdcp_content_type : -1); - if (conn_state) - hdcp_update_display(hdcp_work, link_index, aconnector, - conn_state->hdcp_content_type, false); -} + guard(mutex)(&hdcp_w->mutex); + mod_hdcp_add_display(&hdcp_w->hdcp, link, display, &hdcp_w->output); + drm_connector_get(&aconnector->base); + if (hdcp_w->aconnector[conn_index]) + drm_connector_put(&hdcp_w->aconnector[conn_index]->base); + hdcp_w->aconnector[conn_index] = aconnector; + process_output(hdcp_w); +} -/* NOTE: From the usermodes prospective you only need to call write *ONCE*, the kernel +/** + * DOC: Add sysfs interface for set/get srm + * + * NOTE: From the usermodes prospective you only need to call write *ONCE*, the kernel * will automatically call once or twice depending on the size * * call: "cat file > /sys/class/drm/card0/device/hdcp_srm" from usermode no matter what the size is @@ -520,23 +614,23 @@ static void update_config(void *handle, struct cp_psp_stream_config *config) * sysfs interface doesn't tell us the size we will get so we are sending partial SRMs to psp and on * the last call we will send the full SRM. PSP will fail on every call before the last. * - * This means we don't know if the SRM is good until the last call. And because of this limitation we - * cannot throw errors early as it will stop the kernel from writing to sysfs + * This means we don't know if the SRM is good until the last call. And because of this + * limitation we cannot throw errors early as it will stop the kernel from writing to sysfs * * Example 1: - * Good SRM size = 5096 - * first call to write 4096 -> PSP fails - * Second call to write 1000 -> PSP Pass -> SRM is set + * Good SRM size = 5096 + * first call to write 4096 -> PSP fails + * Second call to write 1000 -> PSP Pass -> SRM is set * * Example 2: - * Bad SRM size = 4096 - * first call to write 4096 -> PSP fails (This is the same as above, but we don't know if this - * is the last call) + * Bad SRM size = 4096 + * first call to write 4096 -> PSP fails (This is the same as above, but we don't know if this + * is the last call) * * Solution?: - * 1: Parse the SRM? -> It is signed so we don't know the EOF - * 2: We can have another sysfs that passes the size before calling set. -> simpler solution - * below + * 1: Parse the SRM? -> It is signed so we don't know the EOF + * 2: We can have another sysfs that passes the size before calling set. -> simpler solution + * below * * Easy Solution: * Always call get after Set to verify if set was successful. @@ -545,20 +639,21 @@ static void update_config(void *handle, struct cp_psp_stream_config *config) * +----------------------+ * PSP will only update its srm if its older than the one we are trying to load. * Always do set first than get. - * -if we try to "1. SET" a older version PSP will reject it and we can "2. GET" the newer - * version and save it + * -if we try to "1. SET" a older version PSP will reject it and we can "2. GET" the newer + * version and save it * - * -if we try to "1. SET" a newer version PSP will accept it and we can "2. GET" the - * same(newer) version back and save it + * -if we try to "1. SET" a newer version PSP will accept it and we can "2. GET" the + * same(newer) version back and save it * - * -if we try to "1. SET" a newer version and PSP rejects it. That means the format is - * incorrect/corrupted and we should correct our SRM by getting it from PSP + * -if we try to "1. SET" a newer version and PSP rejects it. That means the format is + * incorrect/corrupted and we should correct our SRM by getting it from PSP */ -static ssize_t srm_data_write(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buffer, +static ssize_t srm_data_write(struct file *filp, struct kobject *kobj, + const struct bin_attribute *bin_attr, char *buffer, loff_t pos, size_t count) { struct hdcp_workqueue *work; - uint32_t srm_version = 0; + u32 srm_version = 0; work = container_of(bin_attr, struct hdcp_workqueue, attr); link_lock(work, true); @@ -572,19 +667,19 @@ static ssize_t srm_data_write(struct file *filp, struct kobject *kobj, struct bi work->srm_version = srm_version; } - link_lock(work, false); return count; } -static ssize_t srm_data_read(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buffer, +static ssize_t srm_data_read(struct file *filp, struct kobject *kobj, + const struct bin_attribute *bin_attr, char *buffer, loff_t pos, size_t count) { struct hdcp_workqueue *work; - uint8_t *srm = NULL; - uint32_t srm_version; - uint32_t srm_size; + u8 *srm = NULL; + u32 srm_version; + u32 srm_size; size_t ret = count; work = container_of(bin_attr, struct hdcp_workqueue, attr); @@ -617,12 +712,12 @@ ret: /* From the hdcp spec (5.Renewability) SRM needs to be stored in a non-volatile memory. * * For example, - * if Application "A" sets the SRM (ver 2) and we reboot/suspend and later when Application "B" - * needs to use HDCP, the version in PSP should be SRM(ver 2). So SRM should be persistent - * across boot/reboots/suspend/resume/shutdown + * if Application "A" sets the SRM (ver 2) and we reboot/suspend and later when Application "B" + * needs to use HDCP, the version in PSP should be SRM(ver 2). So SRM should be persistent + * across boot/reboots/suspend/resume/shutdown * - * Currently when the system goes down (suspend/shutdown) the SRM is cleared from PSP. For HDCP we need - * to make the SRM persistent. + * Currently when the system goes down (suspend/shutdown) the SRM is cleared from PSP. For HDCP + * we need to make the SRM persistent. * * -PSP owns the checking of SRM but doesn't have the ability to store it in a non-volatile memory. * -The kernel cannot write to the file systems. @@ -632,8 +727,8 @@ ret: * * Usermode can read/write to/from PSP using the sysfs interface * For example: - * to save SRM from PSP to storage : cat /sys/class/drm/card0/device/hdcp_srm > srmfile - * to load from storage to PSP: cat srmfile > /sys/class/drm/card0/device/hdcp_srm + * to save SRM from PSP to storage : cat /sys/class/drm/card0/device/hdcp_srm > srmfile + * to load from storage to PSP: cat srmfile > /sys/class/drm/card0/device/hdcp_srm */ static const struct bin_attribute data_attr = { .attr = {.name = "hdcp_srm", .mode = 0664}, @@ -642,10 +737,9 @@ static const struct bin_attribute data_attr = { .read = srm_data_read, }; - -struct hdcp_workqueue *hdcp_create_workqueue(struct amdgpu_device *adev, struct cp_psp *cp_psp, struct dc *dc) +struct hdcp_workqueue *hdcp_create_workqueue(struct amdgpu_device *adev, + struct cp_psp *cp_psp, struct dc *dc) { - int max_caps = dc->caps.max_links; struct hdcp_workqueue *hdcp_work; int i = 0; @@ -654,14 +748,16 @@ struct hdcp_workqueue *hdcp_create_workqueue(struct amdgpu_device *adev, struct if (ZERO_OR_NULL_PTR(hdcp_work)) return NULL; - hdcp_work->srm = kcalloc(PSP_HDCP_SRM_FIRST_GEN_MAX_SIZE, sizeof(*hdcp_work->srm), GFP_KERNEL); + hdcp_work->srm = kcalloc(PSP_HDCP_SRM_FIRST_GEN_MAX_SIZE, + sizeof(*hdcp_work->srm), GFP_KERNEL); - if (hdcp_work->srm == NULL) + if (!hdcp_work->srm) goto fail_alloc_context; - hdcp_work->srm_temp = kcalloc(PSP_HDCP_SRM_FIRST_GEN_MAX_SIZE, sizeof(*hdcp_work->srm_temp), GFP_KERNEL); + hdcp_work->srm_temp = kcalloc(PSP_HDCP_SRM_FIRST_GEN_MAX_SIZE, + sizeof(*hdcp_work->srm_temp), GFP_KERNEL); - if (hdcp_work->srm_temp == NULL) + if (!hdcp_work->srm_temp) goto fail_alloc_context; hdcp_work->max_link = max_caps; @@ -675,17 +771,37 @@ struct hdcp_workqueue *hdcp_create_workqueue(struct amdgpu_device *adev, struct INIT_DELAYED_WORK(&hdcp_work[i].watchdog_timer_dwork, event_watchdog_timer); INIT_DELAYED_WORK(&hdcp_work[i].property_validate_dwork, event_property_validate); - hdcp_work[i].hdcp.config.psp.handle = &adev->psp; - if (dc->ctx->dce_version == DCN_VERSION_3_1 || + struct mod_hdcp_config *config = &hdcp_work[i].hdcp.config; + struct mod_hdcp_ddc_funcs *ddc_funcs = &config->ddc.funcs; + + config->psp.handle = &adev->psp; + if (dc->ctx->dce_version == DCN_VERSION_3_1 || dc->ctx->dce_version == DCN_VERSION_3_14 || dc->ctx->dce_version == DCN_VERSION_3_15 || - dc->ctx->dce_version == DCN_VERSION_3_16) - hdcp_work[i].hdcp.config.psp.caps.dtm_v3_supported = 1; - hdcp_work[i].hdcp.config.ddc.handle = dc_get_link_at_index(dc, i); - hdcp_work[i].hdcp.config.ddc.funcs.write_i2c = lp_write_i2c; - hdcp_work[i].hdcp.config.ddc.funcs.read_i2c = lp_read_i2c; - hdcp_work[i].hdcp.config.ddc.funcs.write_dpcd = lp_write_dpcd; - hdcp_work[i].hdcp.config.ddc.funcs.read_dpcd = lp_read_dpcd; + dc->ctx->dce_version == DCN_VERSION_3_16 || + dc->ctx->dce_version == DCN_VERSION_3_2 || + dc->ctx->dce_version == DCN_VERSION_3_21 || + dc->ctx->dce_version == DCN_VERSION_3_5 || + dc->ctx->dce_version == DCN_VERSION_3_51 || + dc->ctx->dce_version == DCN_VERSION_3_6 || + dc->ctx->dce_version == DCN_VERSION_4_01) + config->psp.caps.dtm_v3_supported = 1; + + config->ddc.handle = dc_get_link_at_index(dc, i); + + ddc_funcs->write_i2c = lp_write_i2c; + ddc_funcs->read_i2c = lp_read_i2c; + ddc_funcs->write_dpcd = lp_write_dpcd; + ddc_funcs->read_dpcd = lp_read_dpcd; + ddc_funcs->atomic_write_poll_read_i2c = lp_atomic_write_poll_read_i2c; + ddc_funcs->atomic_write_poll_read_aux = lp_atomic_write_poll_read_aux; + + memset(hdcp_work[i].aconnector, 0, + sizeof(struct amdgpu_dm_connector *) * + AMDGPU_DM_MAX_DISPLAY_INDEX); + memset(hdcp_work[i].encryption_status, 0, + sizeof(enum mod_hdcp_encryption_status) * + AMDGPU_DM_MAX_DISPLAY_INDEX); } cp_psp->funcs.update_stream_config = update_config; @@ -707,10 +823,5 @@ fail_alloc_context: kfree(hdcp_work); return NULL; - - - } - - diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h index 09294ff122fe..4faa344f196e 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: MIT */ /* * Copyright 2019 Advanced Micro Devices, Inc. * @@ -43,7 +44,7 @@ struct hdcp_workqueue { struct delayed_work callback_dwork; struct delayed_work watchdog_timer_dwork; struct delayed_work property_validate_dwork; - struct amdgpu_dm_connector *aconnector; + struct amdgpu_dm_connector *aconnector[AMDGPU_DM_MAX_DISPLAY_INDEX]; struct mutex mutex; struct mod_hdcp hdcp; @@ -51,7 +52,20 @@ struct hdcp_workqueue { struct mod_hdcp_display display; struct mod_hdcp_link link; - enum mod_hdcp_encryption_status encryption_status; + enum mod_hdcp_encryption_status encryption_status[AMDGPU_DM_MAX_DISPLAY_INDEX]; + /* when display is unplugged from mst hub, connctor will be + * destroyed within dm_dp_mst_connector_destroy. connector + * hdcp perperties, like type, undesired, desired, enabled, + * will be lost. So, save hdcp properties into hdcp_work within + * amdgpu_dm_atomic_commit_tail. if the same display is + * plugged back with same display index, its hdcp properties + * will be retrieved from hdcp_work within dm_dp_mst_get_modes + */ + /* un-desired, desired, enabled */ + unsigned int content_protection[AMDGPU_DM_MAX_DISPLAY_INDEX]; + /* hdcp1.x, hdcp2.x */ + unsigned int hdcp_content_type[AMDGPU_DM_MAX_DISPLAY_INDEX]; + uint8_t max_link; uint8_t *srm; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c index f0b01c8dc4a6..ac98c746c3de 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: MIT /* * Copyright 2015 Advanced Micro Devices, Inc. * @@ -23,6 +24,8 @@ * */ +#include <acpi/video.h> + #include <linux/string.h> #include <linux/acpi.h> #include <linux/i2c.h> @@ -31,6 +34,7 @@ #include <drm/drm_probe_helper.h> #include <drm/amdgpu_drm.h> #include <drm/drm_edid.h> +#include <drm/drm_fixed.h> #include "dm_services.h" #include "amdgpu.h" @@ -38,52 +42,65 @@ #include "amdgpu_dm.h" #include "amdgpu_dm_irq.h" #include "amdgpu_dm_mst_types.h" +#include "dpcd_defs.h" +#include "dc/inc/core_types.h" #include "dm_helpers.h" #include "ddc_service_types.h" +#include "clk_mgr.h" -struct monitor_patch_info { - unsigned int manufacturer_id; - unsigned int product_id; - void (*patch_func)(struct dc_edid_caps *edid_caps, unsigned int param); - unsigned int patch_param; -}; -static void set_max_dsc_bpp_limit(struct dc_edid_caps *edid_caps, unsigned int param); - -static const struct monitor_patch_info monitor_patch_table[] = { -{0x6D1E, 0x5BBF, set_max_dsc_bpp_limit, 15}, -{0x6D1E, 0x5B9A, set_max_dsc_bpp_limit, 15}, -}; - -static void set_max_dsc_bpp_limit(struct dc_edid_caps *edid_caps, unsigned int param) +static u32 edid_extract_panel_id(struct edid *edid) { - if (edid_caps) - edid_caps->panel_patch.max_dsc_target_bpp_limit = param; + return (u32)edid->mfg_id[0] << 24 | + (u32)edid->mfg_id[1] << 16 | + (u32)EDID_PRODUCT_ID(edid); } -static int amdgpu_dm_patch_edid_caps(struct dc_edid_caps *edid_caps) +static void apply_edid_quirks(struct drm_device *dev, struct edid *edid, struct dc_edid_caps *edid_caps) { - int i, ret = 0; - - for (i = 0; i < ARRAY_SIZE(monitor_patch_table); i++) - if ((edid_caps->manufacturer_id == monitor_patch_table[i].manufacturer_id) - && (edid_caps->product_id == monitor_patch_table[i].product_id)) { - monitor_patch_table[i].patch_func(edid_caps, monitor_patch_table[i].patch_param); - ret++; - } - - return ret; + uint32_t panel_id = edid_extract_panel_id(edid); + + switch (panel_id) { + /* Workaround for monitors that need a delay after detecting the link */ + case drm_edid_encode_panel_id('G', 'B', 'T', 0x3215): + drm_dbg_driver(dev, "Add 10s delay for link detection for panel id %X\n", panel_id); + edid_caps->panel_patch.wait_after_dpcd_poweroff_ms = 10000; + break; + /* Workaround for some monitors which does not work well with FAMS */ + case drm_edid_encode_panel_id('S', 'A', 'M', 0x0E5E): + case drm_edid_encode_panel_id('S', 'A', 'M', 0x7053): + case drm_edid_encode_panel_id('S', 'A', 'M', 0x71AC): + drm_dbg_driver(dev, "Disabling FAMS on monitor with panel id %X\n", panel_id); + edid_caps->panel_patch.disable_fams = true; + break; + /* Workaround for some monitors that do not clear DPCD 0x317 if FreeSync is unsupported */ + case drm_edid_encode_panel_id('A', 'U', 'O', 0xA7AB): + case drm_edid_encode_panel_id('A', 'U', 'O', 0xE69B): + case drm_edid_encode_panel_id('B', 'O', 'E', 0x092A): + case drm_edid_encode_panel_id('L', 'G', 'D', 0x06D1): + case drm_edid_encode_panel_id('M', 'S', 'F', 0x1003): + drm_dbg_driver(dev, "Clearing DPCD 0x317 on monitor with panel id %X\n", panel_id); + edid_caps->panel_patch.remove_sink_ext_caps = true; + break; + case drm_edid_encode_panel_id('S', 'D', 'C', 0x4154): + case drm_edid_encode_panel_id('S', 'D', 'C', 0x4171): + drm_dbg_driver(dev, "Disabling VSC on monitor with panel id %X\n", panel_id); + edid_caps->panel_patch.disable_colorimetry = true; + break; + default: + return; + } } -/* dm_helpers_parse_edid_caps - * - * Parse edid caps +/** + * dm_helpers_parse_edid_caps() - Parse edid caps * + * @link: current detected link * @edid: [in] pointer to edid - * edid_caps: [in] pointer to edid caps - * @return - * void - * */ + * @edid_caps: [in] pointer to edid caps + * + * Return: void + */ enum dc_edid_status dm_helpers_parse_edid_caps( struct dc_link *link, const struct dc_edid *edid, @@ -91,6 +108,7 @@ enum dc_edid_status dm_helpers_parse_edid_caps( { struct amdgpu_dm_connector *aconnector = link->priv; struct drm_connector *connector = &aconnector->base; + struct drm_device *dev = connector->dev; struct edid *edid_buf = edid ? (struct edid *) edid->raw_edid : NULL; struct cea_sad *sads; int sad_count = -1; @@ -113,6 +131,7 @@ enum dc_edid_status dm_helpers_parse_edid_caps( edid_caps->serial_number = edid_buf->serial; edid_caps->manufacture_week = edid_buf->mfg_week; edid_caps->manufacture_year = edid_buf->mfg_year; + edid_caps->analog = !(edid_buf->input & DRM_EDID_INPUT_DIGITAL); drm_edid_get_monitor_name(edid_buf, edid_caps->display_name, @@ -120,11 +139,13 @@ enum dc_edid_status dm_helpers_parse_edid_caps( edid_caps->edid_hdmi = connector->display_info.is_hdmi; + apply_edid_quirks(dev, edid_buf, edid_caps); + sad_count = drm_edid_to_sad((struct edid *) edid->raw_edid, &sads); if (sad_count <= 0) return result; - edid_caps->audio_mode_count = sad_count < DC_MAX_AUDIO_DESC_COUNT ? sad_count : DC_MAX_AUDIO_DESC_COUNT; + edid_caps->audio_mode_count = min(sad_count, DC_MAX_AUDIO_DESC_COUNT); for (i = 0; i < edid_caps->audio_mode_count; ++i) { struct cea_sad *sad = &sads[i]; @@ -149,29 +170,54 @@ enum dc_edid_status dm_helpers_parse_edid_caps( kfree(sads); kfree(sadb); - amdgpu_dm_patch_edid_caps(edid_caps); - return result; } static void -fill_dc_mst_payload_table_from_drm(struct drm_dp_mst_topology_state *mst_state, - struct amdgpu_dm_connector *aconnector, +fill_dc_mst_payload_table_from_drm(struct dc_link *link, + bool enable, + struct drm_dp_mst_atomic_payload *target_payload, struct dc_dp_mst_stream_allocation_table *table) { struct dc_dp_mst_stream_allocation_table new_table = { 0 }; struct dc_dp_mst_stream_allocation *sa; - struct drm_dp_mst_atomic_payload *payload; + struct link_mst_stream_allocation_table copy_of_link_table = + link->mst_stream_alloc_table; - /* Fill payload info*/ - list_for_each_entry(payload, &mst_state->payloads, next) { - if (payload->delete) - continue; + int i; + int current_hw_table_stream_cnt = copy_of_link_table.stream_count; + struct link_mst_stream_allocation *dc_alloc; - sa = &new_table.stream_allocations[new_table.stream_count]; - sa->slot_count = payload->time_slots; - sa->vcp_id = payload->vcpi; - new_table.stream_count++; + /* TODO: refactor to set link->mst_stream_alloc_table directly if possible.*/ + if (enable) { + dc_alloc = + ©_of_link_table.stream_allocations[current_hw_table_stream_cnt]; + dc_alloc->vcp_id = target_payload->vcpi; + dc_alloc->slot_count = target_payload->time_slots; + } else { + for (i = 0; i < copy_of_link_table.stream_count; i++) { + dc_alloc = + ©_of_link_table.stream_allocations[i]; + + if (dc_alloc->vcp_id == target_payload->vcpi) { + dc_alloc->vcp_id = 0; + dc_alloc->slot_count = 0; + break; + } + } + ASSERT(i != copy_of_link_table.stream_count); + } + + /* Fill payload info*/ + for (i = 0; i < MAX_CONTROLLER_NUM; i++) { + dc_alloc = + ©_of_link_table.stream_allocations[i]; + if (dc_alloc->vcp_id > 0 && dc_alloc->slot_count > 0) { + sa = &new_table.stream_allocations[new_table.stream_count]; + sa->slot_count = dc_alloc->slot_count; + sa->vcp_id = dc_alloc->vcp_id; + new_table.stream_count++; + } } /* Overwrite the old table */ @@ -183,6 +229,38 @@ void dm_helpers_dp_update_branch_info( const struct dc_link *link) {} +static void dm_helpers_construct_old_payload( + struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_topology_state *mst_state, + struct drm_dp_mst_atomic_payload *new_payload, + struct drm_dp_mst_atomic_payload *old_payload) +{ + struct drm_dp_mst_atomic_payload *pos; + int pbn_per_slot = dfixed_trunc(mst_state->pbn_div); + u8 next_payload_vc_start = mgr->next_start_slot; + u8 payload_vc_start = new_payload->vc_start_slot; + u8 allocated_time_slots; + + *old_payload = *new_payload; + + /* Set correct time_slots/PBN of old payload. + * other fields (delete & dsc_enabled) in + * struct drm_dp_mst_atomic_payload are don't care fields + * while calling drm_dp_remove_payload_part2() + */ + list_for_each_entry(pos, &mst_state->payloads, next) { + if (pos != new_payload && + pos->vc_start_slot > payload_vc_start && + pos->vc_start_slot < next_payload_vc_start) + next_payload_vc_start = pos->vc_start_slot; + } + + allocated_time_slots = next_payload_vc_start - payload_vc_start; + + old_payload->time_slots = allocated_time_slots; + old_payload->pbn = allocated_time_slots * pbn_per_slot; +} + /* * Writes payload allocation table in immediate downstream device. */ @@ -194,33 +272,43 @@ bool dm_helpers_dp_mst_write_payload_allocation_table( { struct amdgpu_dm_connector *aconnector; struct drm_dp_mst_topology_state *mst_state; - struct drm_dp_mst_atomic_payload *payload; + struct drm_dp_mst_atomic_payload *target_payload, *new_payload, old_payload; struct drm_dp_mst_topology_mgr *mst_mgr; aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context; /* Accessing the connector state is required for vcpi_slots allocation * and directly relies on behaviour in commit check * that blocks before commit guaranteeing that the state - * is not gonna be swapped while still in use in commit tail */ + * is not gonna be swapped while still in use in commit tail + */ - if (!aconnector || !aconnector->mst_port) + if (!aconnector || !aconnector->mst_root) return false; - mst_mgr = &aconnector->mst_port->mst_mgr; + mst_mgr = &aconnector->mst_root->mst_mgr; mst_state = to_drm_dp_mst_topology_state(mst_mgr->base.state); + new_payload = drm_atomic_get_mst_payload_state(mst_state, aconnector->mst_output_port); - /* It's OK for this to fail */ - payload = drm_atomic_get_mst_payload_state(mst_state, aconnector->port); - if (enable) - drm_dp_add_payload_part1(mst_mgr, mst_state, payload); - else - drm_dp_remove_payload(mst_mgr, mst_state, payload); + if (enable) { + target_payload = new_payload; + + /* It's OK for this to fail */ + drm_dp_add_payload_part1(mst_mgr, mst_state, new_payload); + } else { + /* construct old payload by VCPI*/ + dm_helpers_construct_old_payload(mst_mgr, mst_state, + new_payload, &old_payload); + target_payload = &old_payload; + + drm_dp_remove_payload_part1(mst_mgr, mst_state, new_payload); + } /* mst_mgr->->payloads are VC payload notify MST branch using DPCD or * AUX message. The sequence is slot 1-63 allocated sequence for each * stream. AMD ASIC stream slot allocation should follow the same - * sequence. copy DRM MST allocation to dc */ - fill_dc_mst_payload_table_from_drm(mst_state, aconnector, proposed_table); + * sequence. copy DRM MST allocation to dc + */ + fill_dc_mst_payload_table_from_drm(stream->link, enable, target_payload, proposed_table); return true; } @@ -255,10 +343,10 @@ enum act_return_status dm_helpers_dp_mst_poll_for_allocation_change_trigger( aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context; - if (!aconnector || !aconnector->mst_port) + if (!aconnector || !aconnector->mst_root) return ACT_FAILED; - mst_mgr = &aconnector->mst_port->mst_mgr; + mst_mgr = &aconnector->mst_root->mst_mgr; if (!mst_mgr->mst_state) return ACT_FAILED; @@ -271,33 +359,30 @@ enum act_return_status dm_helpers_dp_mst_poll_for_allocation_change_trigger( return ACT_SUCCESS; } -bool dm_helpers_dp_mst_send_payload_allocation( +void dm_helpers_dp_mst_send_payload_allocation( struct dc_context *ctx, - const struct dc_stream_state *stream, - bool enable) + const struct dc_stream_state *stream) { struct amdgpu_dm_connector *aconnector; struct drm_dp_mst_topology_state *mst_state; struct drm_dp_mst_topology_mgr *mst_mgr; - struct drm_dp_mst_atomic_payload *payload; + struct drm_dp_mst_atomic_payload *new_payload; enum mst_progress_status set_flag = MST_ALLOCATE_NEW_PAYLOAD; enum mst_progress_status clr_flag = MST_CLEAR_ALLOCATED_PAYLOAD; + int ret = 0; aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context; - if (!aconnector || !aconnector->mst_port) - return false; + if (!aconnector || !aconnector->mst_root) + return; - mst_mgr = &aconnector->mst_port->mst_mgr; + mst_mgr = &aconnector->mst_root->mst_mgr; mst_state = to_drm_dp_mst_topology_state(mst_mgr->base.state); + new_payload = drm_atomic_get_mst_payload_state(mst_state, aconnector->mst_output_port); - payload = drm_atomic_get_mst_payload_state(mst_state, aconnector->port); - if (!enable) { - set_flag = MST_CLEAR_ALLOCATED_PAYLOAD; - clr_flag = MST_ALLOCATE_NEW_PAYLOAD; - } + ret = drm_dp_add_payload_part2(mst_mgr, new_payload); - if (enable && drm_dp_add_payload_part2(mst_mgr, mst_state->base.state, payload)) { + if (ret) { amdgpu_dm_set_mst_status(&aconnector->mst_status, set_flag, false); } else { @@ -306,10 +391,36 @@ bool dm_helpers_dp_mst_send_payload_allocation( amdgpu_dm_set_mst_status(&aconnector->mst_status, clr_flag, false); } - - return true; } +void dm_helpers_dp_mst_update_mst_mgr_for_deallocation( + struct dc_context *ctx, + const struct dc_stream_state *stream) +{ + struct amdgpu_dm_connector *aconnector; + struct drm_dp_mst_topology_state *mst_state; + struct drm_dp_mst_topology_mgr *mst_mgr; + struct drm_dp_mst_atomic_payload *new_payload, old_payload; + enum mst_progress_status set_flag = MST_CLEAR_ALLOCATED_PAYLOAD; + enum mst_progress_status clr_flag = MST_ALLOCATE_NEW_PAYLOAD; + + aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context; + + if (!aconnector || !aconnector->mst_root) + return; + + mst_mgr = &aconnector->mst_root->mst_mgr; + mst_state = to_drm_dp_mst_topology_state(mst_mgr->base.state); + new_payload = drm_atomic_get_mst_payload_state(mst_state, aconnector->mst_output_port); + dm_helpers_construct_old_payload(mst_mgr, mst_state, + new_payload, &old_payload); + + drm_dp_remove_payload_part2(mst_mgr, mst_state, &old_payload, new_payload); + + amdgpu_dm_set_mst_status(&aconnector->mst_status, set_flag, true); + amdgpu_dm_set_mst_status(&aconnector->mst_status, clr_flag, false); + } + void dm_dtn_log_begin(struct dc_context *ctx, struct dc_log_buffer_ctx *log_ctx) { @@ -358,7 +469,7 @@ void dm_dtn_log_append_v(struct dc_context *ctx, total = log_ctx->pos + n + 1; if (total > log_ctx->size) { - char *buf = (char *)kvcalloc(total, sizeof(char), GFP_KERNEL); + char *buf = kvcalloc(total, sizeof(char), GFP_KERNEL); if (buf) { memcpy(buf, log_ctx->buf, log_ctx->pos); @@ -404,6 +515,7 @@ bool dm_helpers_dp_mst_start_top_mgr( bool boot) { struct amdgpu_dm_connector *aconnector = link->priv; + int ret; if (!aconnector) { DRM_ERROR("Failed to find connector for link!"); @@ -419,7 +531,16 @@ bool dm_helpers_dp_mst_start_top_mgr( DRM_INFO("DM_MST: starting TM on aconnector: %p [id: %d]\n", aconnector, aconnector->base.base.id); - return (drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true) == 0); + ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true); + if (ret < 0) { + DRM_ERROR("DM_MST: Failed to set the device into MST mode!"); + return false; + } + + DRM_INFO("DM_MST: DP%x, %d-lane link detected\n", aconnector->mst_mgr.dpcd[0], + aconnector->mst_mgr.dpcd[2] & DP_MAX_LANE_COUNT_MASK); + + return true; } bool dm_helpers_dp_mst_stop_top_mgr( @@ -454,13 +575,11 @@ bool dm_helpers_dp_read_dpcd( struct amdgpu_dm_connector *aconnector = link->priv; - if (!aconnector) { - DC_LOG_DC("Failed to find connector for link!\n"); + if (!aconnector) return false; - } - return drm_dp_dpcd_read(&aconnector->dm_dp_aux.aux, address, - data, size) > 0; + return drm_dp_dpcd_read(&aconnector->dm_dp_aux.aux, address, data, + size) == size; } bool dm_helpers_dp_write_dpcd( @@ -472,10 +591,8 @@ bool dm_helpers_dp_write_dpcd( { struct amdgpu_dm_connector *aconnector = link->priv; - if (!aconnector) { - DRM_ERROR("Failed to find connector for link!"); + if (!aconnector) return false; - } return drm_dp_dpcd_write(&aconnector->dm_dp_aux.aux, address, (uint8_t *)data, size) > 0; @@ -516,7 +633,19 @@ bool dm_helpers_submit_i2c( return result; } -#if defined(CONFIG_DRM_AMD_DC_DCN) +bool dm_helpers_execute_fused_io( + struct dc_context *ctx, + struct dc_link *link, + union dmub_rb_cmd *commands, + uint8_t count, + uint32_t timeout_us +) +{ + struct amdgpu_device *dev = ctx->driver_context; + + return amdgpu_dm_execute_fused_io(dev, link, commands, count, timeout_us); +} + static bool execute_synaptics_rc_command(struct drm_dp_aux *aux, bool is_write_cmd, unsigned char cmd, @@ -537,6 +666,8 @@ static bool execute_synaptics_rc_command(struct drm_dp_aux *aux, // write rc data memmove(rc_data, data, length); ret = drm_dp_dpcd_write(aux, SYNAPTICS_RC_DATA, rc_data, sizeof(rc_data)); + if (ret < 0) + goto err; } // write rc offset @@ -545,20 +676,21 @@ static bool execute_synaptics_rc_command(struct drm_dp_aux *aux, rc_offset[2] = (unsigned char) (offset >> 16) & 0xFF; rc_offset[3] = (unsigned char) (offset >> 24) & 0xFF; ret = drm_dp_dpcd_write(aux, SYNAPTICS_RC_OFFSET, rc_offset, sizeof(rc_offset)); + if (ret < 0) + goto err; // write rc length rc_length[0] = (unsigned char) length & 0xFF; rc_length[1] = (unsigned char) (length >> 8) & 0xFF; ret = drm_dp_dpcd_write(aux, SYNAPTICS_RC_LENGTH, rc_length, sizeof(rc_length)); + if (ret < 0) + goto err; // write rc cmd rc_cmd = cmd | 0x80; ret = drm_dp_dpcd_write(aux, SYNAPTICS_RC_COMMAND, &rc_cmd, sizeof(rc_cmd)); - - if (ret < 0) { - DRM_ERROR(" execute_synaptics_rc_command - write cmd ..., err = %d\n", ret); - return false; - } + if (ret < 0) + goto err; // poll until active is 0 for (i = 0; i < 10; i++) { @@ -578,16 +710,20 @@ static bool execute_synaptics_rc_command(struct drm_dp_aux *aux, drm_dp_dpcd_read(aux, SYNAPTICS_RC_DATA, data, length); } - DC_LOG_DC(" execute_synaptics_rc_command - success = %d\n", success); + drm_dbg_dp(aux->drm_dev, "success = %d\n", success); return success; + +err: + DRM_ERROR("%s: write cmd ..., err = %d\n", __func__, ret); + return false; } static void apply_synaptics_fifo_reset_wa(struct drm_dp_aux *aux) { unsigned char data[16] = {0}; - DC_LOG_DC("Start apply_synaptics_fifo_reset_wa\n"); + drm_dbg_dp(aux->drm_dev, "Start\n"); // Step 2 data[0] = 'P'; @@ -633,7 +769,6 @@ static void apply_synaptics_fifo_reset_wa(struct drm_dp_aux *aux) return; data[0] |= (1 << 1); // set bit 1 to 1 - return; if (!execute_synaptics_rc_command(aux, false, 0x31, 4, 0x221198, data)) return; @@ -646,9 +781,12 @@ static void apply_synaptics_fifo_reset_wa(struct drm_dp_aux *aux) if (!execute_synaptics_rc_command(aux, true, 0x02, 0, 0, NULL)) return; - DC_LOG_DC("Done apply_synaptics_fifo_reset_wa\n"); + drm_dbg_dp(aux->drm_dev, "Done\n"); } +/* MST Dock */ +static const uint8_t SYNAPTICS_DEVICE_ID[] = "SYNA"; + static uint8_t write_dsc_enable_synaptics_non_virtual_dpcd_mst( struct drm_dp_aux *aux, const struct dc_stream_state *stream, @@ -656,7 +794,8 @@ static uint8_t write_dsc_enable_synaptics_non_virtual_dpcd_mst( { uint8_t ret = 0; - DC_LOG_DC("Configure DSC to non-virtual dpcd synaptics\n"); + drm_dbg_dp(aux->drm_dev, + "MST_DSC Configure DSC to non-virtual dpcd synaptics\n"); if (enable) { /* When DSC is enabled on previous boot and reboot with the hub, @@ -669,7 +808,7 @@ static uint8_t write_dsc_enable_synaptics_non_virtual_dpcd_mst( apply_synaptics_fifo_reset_wa(aux); ret = drm_dp_dpcd_write(aux, DP_DSC_ENABLE, &enable, 1); - DRM_INFO("Send DSC enable to synaptics\n"); + DRM_INFO("MST_DSC Send DSC enable to synaptics\n"); } else { /* Synaptics hub not support virtual dpcd, @@ -678,13 +817,12 @@ static uint8_t write_dsc_enable_synaptics_non_virtual_dpcd_mst( */ if (!stream->link->link_status.link_active) { ret = drm_dp_dpcd_write(aux, DP_DSC_ENABLE, &enable, 1); - DRM_INFO("Send DSC disable to synaptics\n"); + DRM_INFO("MST_DSC Send DSC disable to synaptics\n"); } } return ret; } -#endif bool dm_helpers_dp_write_dsc_enable( struct dc_context *ctx, @@ -695,81 +833,86 @@ bool dm_helpers_dp_write_dsc_enable( static const uint8_t DSC_DECODING = 0x01; static const uint8_t DSC_PASSTHROUGH = 0x02; - struct amdgpu_dm_connector *aconnector; + struct amdgpu_dm_connector *aconnector = + (struct amdgpu_dm_connector *)stream->dm_stream_context; + struct drm_device *dev = aconnector->base.dev; struct drm_dp_mst_port *port; uint8_t enable_dsc = enable ? DSC_DECODING : DSC_DISABLE; uint8_t enable_passthrough = enable ? DSC_PASSTHROUGH : DSC_DISABLE; uint8_t ret = 0; - if (!stream) - return false; - if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { - aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context; - if (!aconnector->dsc_aux) return false; -#if defined(CONFIG_DRM_AMD_DC_DCN) // apply w/a to synaptics if (needs_dsc_aux_workaround(aconnector->dc_link) && (aconnector->mst_downstream_port_present.byte & 0x7) != 0x3) return write_dsc_enable_synaptics_non_virtual_dpcd_mst( aconnector->dsc_aux, stream, enable_dsc); -#endif - port = aconnector->port; + port = aconnector->mst_output_port; if (enable) { if (port->passthrough_aux) { ret = drm_dp_dpcd_write(port->passthrough_aux, DP_DSC_ENABLE, &enable_passthrough, 1); - DC_LOG_DC("Sent DSC pass-through enable to virtual dpcd port, ret = %u\n", - ret); + drm_dbg_dp(dev, + "MST_DSC Sent DSC pass-through enable to virtual dpcd port, ret = %u\n", + ret); } ret = drm_dp_dpcd_write(aconnector->dsc_aux, DP_DSC_ENABLE, &enable_dsc, 1); - DC_LOG_DC("Sent DSC decoding enable to %s port, ret = %u\n", - (port->passthrough_aux) ? "remote RX" : - "virtual dpcd", - ret); + drm_dbg_dp(dev, + "MST_DSC Sent DSC decoding enable to %s port, ret = %u\n", + (port->passthrough_aux) ? "remote RX" : + "virtual dpcd", + ret); } else { ret = drm_dp_dpcd_write(aconnector->dsc_aux, DP_DSC_ENABLE, &enable_dsc, 1); - DC_LOG_DC("Sent DSC decoding disable to %s port, ret = %u\n", - (port->passthrough_aux) ? "remote RX" : - "virtual dpcd", - ret); + drm_dbg_dp(dev, + "MST_DSC Sent DSC decoding disable to %s port, ret = %u\n", + (port->passthrough_aux) ? "remote RX" : + "virtual dpcd", + ret); if (port->passthrough_aux) { ret = drm_dp_dpcd_write(port->passthrough_aux, DP_DSC_ENABLE, &enable_passthrough, 1); - DC_LOG_DC("Sent DSC pass-through disable to virtual dpcd port, ret = %u\n", - ret); + drm_dbg_dp(dev, + "MST_DSC Sent DSC pass-through disable to virtual dpcd port, ret = %u\n", + ret); } } } if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT || stream->signal == SIGNAL_TYPE_EDP) { -#if defined(CONFIG_DRM_AMD_DC_DCN) if (stream->sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE) { -#endif ret = dm_helpers_dp_write_dpcd(ctx, stream->link, DP_DSC_ENABLE, &enable_dsc, 1); - DC_LOG_DC("Send DSC %s to SST RX\n", enable_dsc ? "enable" : "disable"); -#if defined(CONFIG_DRM_AMD_DC_DCN) + drm_dbg_dp(dev, + "SST_DSC Send DSC %s to SST RX\n", + enable_dsc ? "enable" : "disable"); } else if (stream->sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) { ret = dm_helpers_dp_write_dpcd(ctx, stream->link, DP_DSC_ENABLE, &enable_dsc, 1); - DC_LOG_DC("Send DSC %s to DP-HDMI PCON\n", enable_dsc ? "enable" : "disable"); + drm_dbg_dp(dev, + "SST_DSC Send DSC %s to DP-HDMI PCON\n", + enable_dsc ? "enable" : "disable"); } -#endif } return ret; } +bool dm_helpers_dp_write_hblank_reduction(struct dc_context *ctx, const struct dc_stream_state *stream) +{ + // TODO + return false; +} + bool dm_helpers_is_dp_sink_present(struct dc_link *link) { bool dp_sink_present; @@ -786,6 +929,67 @@ bool dm_helpers_is_dp_sink_present(struct dc_link *link) return dp_sink_present; } +static int +dm_helpers_probe_acpi_edid(void *data, u8 *buf, unsigned int block, size_t len) +{ + struct drm_connector *connector = data; + struct acpi_device *acpidev = ACPI_COMPANION(connector->dev->dev); + unsigned short start = block * EDID_LENGTH; + struct edid *edid; + int r; + + if (!acpidev) + return -ENODEV; + + /* fetch the entire edid from BIOS */ + r = acpi_video_get_edid(acpidev, ACPI_VIDEO_DISPLAY_LCD, -1, (void *)&edid); + if (r < 0) { + drm_dbg(connector->dev, "Failed to get EDID from ACPI: %d\n", r); + return r; + } + if (len > r || start > r || start + len > r) { + r = -EINVAL; + goto cleanup; + } + + /* sanity check */ + if (edid->revision < 4 || !(edid->input & DRM_EDID_INPUT_DIGITAL) || + (edid->input & DRM_EDID_DIGITAL_TYPE_MASK) == DRM_EDID_DIGITAL_TYPE_UNDEF) { + r = -EINVAL; + goto cleanup; + } + + memcpy(buf, (void *)edid + start, len); + r = 0; + +cleanup: + kfree(edid); + + return r; +} + +static const struct drm_edid * +dm_helpers_read_acpi_edid(struct amdgpu_dm_connector *aconnector) +{ + struct drm_connector *connector = &aconnector->base; + + if (amdgpu_dc_debug_mask & DC_DISABLE_ACPI_EDID) + return NULL; + + switch (connector->connector_type) { + case DRM_MODE_CONNECTOR_LVDS: + case DRM_MODE_CONNECTOR_eDP: + break; + default: + return NULL; + } + + if (connector->force == DRM_FORCE_OFF) + return NULL; + + return drm_edid_read_custom(connector, dm_helpers_probe_acpi_edid, connector); +} + enum dc_edid_status dm_helpers_read_local_edid( struct dc_context *ctx, struct dc_link *link, @@ -794,9 +998,10 @@ enum dc_edid_status dm_helpers_read_local_edid( struct amdgpu_dm_connector *aconnector = link->priv; struct drm_connector *connector = &aconnector->base; struct i2c_adapter *ddc; - int retry = 3; - enum dc_edid_status edid_status; - struct edid *edid; + int retry = 25; + enum dc_edid_status edid_status = EDID_NO_RESPONSE; + const struct drm_edid *drm_edid; + const struct edid *edid; if (link->aux_mode) ddc = &aconnector->dm_dp_aux.aux.ddc; @@ -807,42 +1012,75 @@ enum dc_edid_status dm_helpers_read_local_edid( * do check sum and retry to make sure read correct edid. */ do { - - edid = drm_get_edid(&aconnector->base, ddc); + drm_edid = dm_helpers_read_acpi_edid(aconnector); + if (drm_edid) + drm_info(connector->dev, "Using ACPI provided EDID for %s\n", connector->name); + else + drm_edid = drm_edid_read_ddc(connector, ddc); + drm_edid_connector_update(connector, drm_edid); /* DP Compliance Test 4.2.2.6 */ if (link->aux_mode && connector->edid_corrupt) drm_dp_send_real_edid_checksum(&aconnector->dm_dp_aux.aux, connector->real_edid_checksum); - if (!edid && connector->edid_corrupt) { + if (!drm_edid && connector->edid_corrupt) { connector->edid_corrupt = false; return EDID_BAD_CHECKSUM; } - if (!edid) - return EDID_NO_RESPONSE; + if (!drm_edid) + continue; + + edid = drm_edid_raw(drm_edid); // FIXME: Get rid of drm_edid_raw() + if (!edid || + edid->extensions >= sizeof(sink->dc_edid.raw_edid) / EDID_LENGTH) + return EDID_BAD_INPUT; sink->dc_edid.length = EDID_LENGTH * (edid->extensions + 1); memmove(sink->dc_edid.raw_edid, (uint8_t *)edid, sink->dc_edid.length); /* We don't need the original edid anymore */ - kfree(edid); + drm_edid_free(drm_edid); edid_status = dm_helpers_parse_edid_caps( link, &sink->dc_edid, &sink->edid_caps); - } while (edid_status == EDID_BAD_CHECKSUM && --retry > 0); + } while ((edid_status == EDID_BAD_CHECKSUM || edid_status == EDID_NO_RESPONSE) && --retry > 0); if (edid_status != EDID_OK) DRM_ERROR("EDID err: %d, on connector: %s", edid_status, aconnector->base.name); + if (link->aux_mode) { + union test_request test_request = {0}; + union test_response test_response = {0}; - /* DP Compliance Test 4.2.2.3 */ - if (link->aux_mode) - drm_dp_send_real_edid_checksum(&aconnector->dm_dp_aux.aux, sink->dc_edid.raw_edid[sink->dc_edid.length-1]); + dm_helpers_dp_read_dpcd(ctx, + link, + DP_TEST_REQUEST, + &test_request.raw, + sizeof(union test_request)); + + if (!test_request.bits.EDID_READ) + return edid_status; + + test_response.bits.EDID_CHECKSUM_WRITE = 1; + + dm_helpers_dp_write_dpcd(ctx, + link, + DP_TEST_EDID_CHECKSUM, + &sink->dc_edid.raw_edid[sink->dc_edid.length-1], + 1); + + dm_helpers_dp_write_dpcd(ctx, + link, + DP_TEST_RESPONSE, + &test_response.raw, + sizeof(test_response)); + + } return edid_status; } @@ -852,9 +1090,13 @@ int dm_helper_dmub_aux_transfer_sync( struct aux_payload *payload, enum aux_return_code_type *operation_result) { - return amdgpu_dm_process_dmub_aux_transfer_sync(true, ctx, - link->link_index, (void *)payload, - (void *)operation_result); + if (!link->hpd_status) { + *operation_result = AUX_RET_ERROR_HPD_DISCON; + return -1; + } + + return amdgpu_dm_process_dmub_aux_transfer_sync(ctx, link->link_index, payload, + operation_result); } int dm_helpers_dmub_set_config_sync(struct dc_context *ctx, @@ -862,9 +1104,8 @@ int dm_helpers_dmub_set_config_sync(struct dc_context *ctx, struct set_config_cmd_payload *payload, enum set_config_status *operation_result) { - return amdgpu_dm_process_dmub_aux_transfer_sync(false, ctx, - link->link_index, (void *)payload, - (void *)operation_result); + return amdgpu_dm_process_dmub_set_config_sync(ctx, link->link_index, payload, + operation_result); } void dm_set_dcn_clocks(struct dc_context *ctx, struct dc_clocks *clks) @@ -901,9 +1142,8 @@ void dm_helpers_override_panel_settings( struct dc_panel_config *panel_config) { // Feature DSC - if (amdgpu_dc_debug_mask & DC_DISABLE_DSC) { + if (amdgpu_dc_debug_mask & DC_DISABLE_DSC) panel_config->dsc.disable_dsc_edp = true; - } } void *dm_helpers_allocate_gpu_mem( @@ -913,30 +1153,8 @@ void *dm_helpers_allocate_gpu_mem( long long *addr) { struct amdgpu_device *adev = ctx->driver_context; - struct dal_allocation *da; - u32 domain = (type == DC_MEM_ALLOC_TYPE_GART) ? - AMDGPU_GEM_DOMAIN_GTT : AMDGPU_GEM_DOMAIN_VRAM; - int ret; - - da = kzalloc(sizeof(struct dal_allocation), GFP_KERNEL); - if (!da) - return NULL; - - ret = amdgpu_bo_create_kernel(adev, size, PAGE_SIZE, - domain, &da->bo, - &da->gpu_addr, &da->cpu_ptr); - - *addr = da->gpu_addr; - if (ret) { - kfree(da); - return NULL; - } - - /* add da to list in dm */ - list_add(&da->list, &adev->dm.da_list); - - return da->cpu_ptr; + return dm_allocate_gpu_mem(adev, type, size, addr); } void dm_helpers_free_gpu_mem( @@ -945,17 +1163,8 @@ void dm_helpers_free_gpu_mem( void *pvMem) { struct amdgpu_device *adev = ctx->driver_context; - struct dal_allocation *da; - - /* walk the da list in DM */ - list_for_each_entry(da, &adev->dm.da_list, list) { - if (pvMem == da->cpu_ptr) { - amdgpu_bo_free_kernel(&da->bo, &da->gpu_addr, &da->cpu_ptr); - list_del(&da->list); - kfree(da); - break; - } - } + + dm_free_gpu_mem(adev, type, pvMem); } bool dm_helpers_dmub_outbox_interrupt_control(struct dc_context *ctx, bool enable) @@ -997,6 +1206,144 @@ void dm_helpers_mst_enable_stream_features(const struct dc_stream_state *stream) sizeof(new_downspread)); } +bool dm_helpers_dp_handle_test_pattern_request( + struct dc_context *ctx, + const struct dc_link *link, + union link_test_pattern dpcd_test_pattern, + union test_misc dpcd_test_params) +{ + enum dp_test_pattern test_pattern; + enum dp_test_pattern_color_space test_pattern_color_space = + DP_TEST_PATTERN_COLOR_SPACE_UNDEFINED; + enum dc_color_depth requestColorDepth = COLOR_DEPTH_UNDEFINED; + enum dc_pixel_encoding requestPixelEncoding = PIXEL_ENCODING_UNDEFINED; + struct pipe_ctx *pipes = link->dc->current_state->res_ctx.pipe_ctx; + struct pipe_ctx *pipe_ctx = NULL; + struct amdgpu_dm_connector *aconnector = link->priv; + struct drm_device *dev = aconnector->base.dev; + struct dc_state *dc_state = ctx->dc->current_state; + struct clk_mgr *clk_mgr = ctx->dc->clk_mgr; + int i; + + for (i = 0; i < MAX_PIPES; i++) { + if (pipes[i].stream == NULL) + continue; + + if (pipes[i].stream->link == link && !pipes[i].top_pipe && + !pipes[i].prev_odm_pipe) { + pipe_ctx = &pipes[i]; + break; + } + } + + if (pipe_ctx == NULL) + return false; + + switch (dpcd_test_pattern.bits.PATTERN) { + case LINK_TEST_PATTERN_COLOR_RAMP: + test_pattern = DP_TEST_PATTERN_COLOR_RAMP; + break; + case LINK_TEST_PATTERN_VERTICAL_BARS: + test_pattern = DP_TEST_PATTERN_VERTICAL_BARS; + break; /* black and white */ + case LINK_TEST_PATTERN_COLOR_SQUARES: + test_pattern = (dpcd_test_params.bits.DYN_RANGE == + TEST_DYN_RANGE_VESA ? + DP_TEST_PATTERN_COLOR_SQUARES : + DP_TEST_PATTERN_COLOR_SQUARES_CEA); + break; + default: + test_pattern = DP_TEST_PATTERN_VIDEO_MODE; + break; + } + + if (dpcd_test_params.bits.CLR_FORMAT == 0) + test_pattern_color_space = DP_TEST_PATTERN_COLOR_SPACE_RGB; + else + test_pattern_color_space = dpcd_test_params.bits.YCBCR_COEFS ? + DP_TEST_PATTERN_COLOR_SPACE_YCBCR709 : + DP_TEST_PATTERN_COLOR_SPACE_YCBCR601; + + switch (dpcd_test_params.bits.BPC) { + case 0: // 6 bits + requestColorDepth = COLOR_DEPTH_666; + break; + case 1: // 8 bits + requestColorDepth = COLOR_DEPTH_888; + break; + case 2: // 10 bits + requestColorDepth = COLOR_DEPTH_101010; + break; + case 3: // 12 bits + requestColorDepth = COLOR_DEPTH_121212; + break; + default: + break; + } + + switch (dpcd_test_params.bits.CLR_FORMAT) { + case 0: + requestPixelEncoding = PIXEL_ENCODING_RGB; + break; + case 1: + requestPixelEncoding = PIXEL_ENCODING_YCBCR422; + break; + case 2: + requestPixelEncoding = PIXEL_ENCODING_YCBCR444; + break; + default: + requestPixelEncoding = PIXEL_ENCODING_RGB; + break; + } + + if ((requestColorDepth != COLOR_DEPTH_UNDEFINED + && pipe_ctx->stream->timing.display_color_depth != requestColorDepth) + || (requestPixelEncoding != PIXEL_ENCODING_UNDEFINED + && pipe_ctx->stream->timing.pixel_encoding != requestPixelEncoding)) { + drm_dbg(dev, + "original bpc %d pix encoding %d, changing to %d %d\n", + pipe_ctx->stream->timing.display_color_depth, + pipe_ctx->stream->timing.pixel_encoding, + requestColorDepth, + requestPixelEncoding); + pipe_ctx->stream->timing.display_color_depth = requestColorDepth; + pipe_ctx->stream->timing.pixel_encoding = requestPixelEncoding; + + dc_link_update_dsc_config(pipe_ctx); + + aconnector->timing_changed = true; + /* store current timing */ + if (aconnector->timing_requested) + *aconnector->timing_requested = pipe_ctx->stream->timing; + else + drm_err(dev, "timing storage failed\n"); + + } + + pipe_ctx->stream->test_pattern.type = test_pattern; + pipe_ctx->stream->test_pattern.color_space = test_pattern_color_space; + + /* Temp W/A for compliance test failure */ + dc_state->bw_ctx.bw.dcn.clk.p_state_change_support = false; + dc_state->bw_ctx.bw.dcn.clk.dramclk_khz = clk_mgr->dc_mode_softmax_enabled ? + clk_mgr->bw_params->dc_mode_softmax_memclk : clk_mgr->bw_params->max_memclk_mhz; + dc_state->bw_ctx.bw.dcn.clk.idle_dramclk_khz = dc_state->bw_ctx.bw.dcn.clk.dramclk_khz; + ctx->dc->clk_mgr->funcs->update_clocks( + ctx->dc->clk_mgr, + dc_state, + false); + + dc_link_dp_set_test_pattern( + (struct dc_link *) link, + test_pattern, + test_pattern_color_space, + NULL, + NULL, + 0); + + return false; +} + void dm_set_phyd32clk(struct dc_context *ctx, int freq_khz) { // TODO @@ -1004,5 +1351,66 @@ void dm_set_phyd32clk(struct dc_context *ctx, int freq_khz) void dm_helpers_enable_periodic_detection(struct dc_context *ctx, bool enable) { - /* TODO: add periodic detection implementation */ + struct amdgpu_device *adev = ctx->driver_context; + + if (adev->dm.idle_workqueue) { + adev->dm.idle_workqueue->enable = enable; + if (enable && !adev->dm.idle_workqueue->running && amdgpu_dm_is_headless(adev)) + schedule_work(&adev->dm.idle_workqueue->work); + } +} + +void dm_helpers_dp_mst_update_branch_bandwidth( + struct dc_context *ctx, + struct dc_link *link) +{ + // TODO +} + +static bool dm_is_freesync_pcon_whitelist(const uint32_t branch_dev_id) +{ + bool ret_val = false; + + switch (branch_dev_id) { + case DP_BRANCH_DEVICE_ID_0060AD: + case DP_BRANCH_DEVICE_ID_00E04C: + case DP_BRANCH_DEVICE_ID_90CC24: + ret_val = true; + break; + default: + break; + } + + return ret_val; +} + +enum adaptive_sync_type dm_get_adaptive_sync_support_type(struct dc_link *link) +{ + struct dpcd_caps *dpcd_caps = &link->dpcd_caps; + enum adaptive_sync_type as_type = ADAPTIVE_SYNC_TYPE_NONE; + + switch (dpcd_caps->dongle_type) { + case DISPLAY_DONGLE_DP_HDMI_CONVERTER: + if (dpcd_caps->adaptive_sync_caps.dp_adap_sync_caps.bits.ADAPTIVE_SYNC_SDP_SUPPORT == true && + dpcd_caps->allow_invalid_MSA_timing_param == true && + dm_is_freesync_pcon_whitelist(dpcd_caps->branch_dev_id)) + as_type = FREESYNC_TYPE_PCON_IN_WHITELIST; + break; + default: + break; + } + + return as_type; +} + +bool dm_helpers_is_fullscreen(struct dc_context *ctx, struct dc_stream_state *stream) +{ + // TODO + return false; +} + +bool dm_helpers_is_hdr_on(struct dc_context *ctx, struct dc_stream_state *stream) +{ + // TODO + return false; } diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c index 19f543ba7205..0a2a3f233a0e 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: MIT /* * Copyright 2015 Advanced Micro Devices, Inc. * @@ -120,7 +121,8 @@ static void dm_irq_work_func(struct work_struct *work) /* Call a DAL subcomponent which registered for interrupt notification * at INTERRUPT_LOW_IRQ_CONTEXT. - * (The most common use is HPD interrupt) */ + * (The most common use is HPD interrupt) + */ } /* @@ -172,7 +174,8 @@ static struct list_head *remove_irq_handler(struct amdgpu_device *adev, if (handler_removed == false) { /* Not necessarily an error - caller may not - * know the context. */ + * know the context. + */ return NULL; } @@ -261,7 +264,7 @@ validate_irq_registration_params(struct dc_interrupt_params *int_params, static bool validate_irq_unregistration_params(enum dc_irq_source irq_source, irq_handler_idx handler_idx) { - if (DAL_INVALID_IRQ_HANDLER_IDX == handler_idx) { + if (handler_idx == DAL_INVALID_IRQ_HANDLER_IDX) { DRM_ERROR("DM_IRQ: invalid handler_idx==NULL!\n"); return false; } @@ -343,7 +346,8 @@ void *amdgpu_dm_irq_register_interrupt(struct amdgpu_device *adev, /* This pointer will be stored by code which requested interrupt * registration. * The same pointer will be needed in order to unregister the - * interrupt. */ + * interrupt. + */ DRM_DEBUG_KMS( "DM_IRQ: added irq handler: %p for: dal_src=%d, irq context=%d\n", @@ -390,7 +394,8 @@ void amdgpu_dm_irq_unregister_interrupt(struct amdgpu_device *adev, if (handler_list == NULL) { /* If we got here, it means we searched all irq contexts - * for this irq source, but the handler was not found. */ + * for this irq source, but the handler was not found. + */ DRM_ERROR( "DM_IRQ: failed to find irq handler:%p for irq_source:%d!\n", ih, irq_source); @@ -450,7 +455,8 @@ void amdgpu_dm_irq_fini(struct amdgpu_device *adev) DM_IRQ_TABLE_LOCK(adev, irq_table_flags); /* The handler was removed from the table, * it means it is safe to flush all the 'work' - * (because no code can schedule a new one). */ + * (because no code can schedule a new one). + */ lh = &adev->dm.irq_handler_list_low_tab[src]; DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags); @@ -468,8 +474,9 @@ void amdgpu_dm_irq_fini(struct amdgpu_device *adev) unregister_all_irq_handlers(adev); } -int amdgpu_dm_irq_suspend(struct amdgpu_device *adev) +void amdgpu_dm_irq_suspend(struct amdgpu_device *adev) { + struct drm_device *dev = adev_to_drm(adev); int src; struct list_head *hnd_list_h; struct list_head *hnd_list_l; @@ -494,7 +501,7 @@ int amdgpu_dm_irq_suspend(struct amdgpu_device *adev) DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags); if (!list_empty(hnd_list_l)) { - list_for_each_safe (entry, tmp, hnd_list_l) { + list_for_each_safe(entry, tmp, hnd_list_l) { handler = list_entry( entry, struct amdgpu_dm_irq_handler_data, @@ -506,10 +513,12 @@ int amdgpu_dm_irq_suspend(struct amdgpu_device *adev) } DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags); - return 0; + + if (dev->mode_config.poll_enabled) + drm_kms_helper_poll_disable(dev); } -int amdgpu_dm_irq_resume_early(struct amdgpu_device *adev) +void amdgpu_dm_irq_resume_early(struct amdgpu_device *adev) { int src; struct list_head *hnd_list_h, *hnd_list_l; @@ -517,7 +526,7 @@ int amdgpu_dm_irq_resume_early(struct amdgpu_device *adev) DM_IRQ_TABLE_LOCK(adev, irq_table_flags); - DRM_DEBUG_KMS("DM_IRQ: early resume\n"); + drm_dbg(adev_to_drm(adev), "DM_IRQ: early resume\n"); /* re-enable short pulse interrupts HW interrupt */ for (src = DC_IRQ_SOURCE_HPD1RX; src <= DC_IRQ_SOURCE_HPD6RX; src++) { @@ -528,19 +537,18 @@ int amdgpu_dm_irq_resume_early(struct amdgpu_device *adev) } DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags); - - return 0; } -int amdgpu_dm_irq_resume_late(struct amdgpu_device *adev) +void amdgpu_dm_irq_resume_late(struct amdgpu_device *adev) { + struct drm_device *dev = adev_to_drm(adev); int src; struct list_head *hnd_list_h, *hnd_list_l; unsigned long irq_table_flags; DM_IRQ_TABLE_LOCK(adev, irq_table_flags); - DRM_DEBUG_KMS("DM_IRQ: resume\n"); + drm_dbg(adev_to_drm(adev), "DM_IRQ: resume\n"); /** * Renable HW interrupt for HPD and only since FLIP and VBLANK @@ -554,7 +562,9 @@ int amdgpu_dm_irq_resume_late(struct amdgpu_device *adev) } DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags); - return 0; + + if (dev->mode_config.poll_enabled) + drm_kms_helper_poll_enable(dev); } /* @@ -571,7 +581,7 @@ static void amdgpu_dm_irq_schedule_work(struct amdgpu_device *adev, if (list_empty(handler_list)) return; - list_for_each_entry (handler_data, handler_list, list) { + list_for_each_entry(handler_data, handler_list, list) { if (queue_work(system_highpri_wq, &handler_data->work)) { work_queued = true; break; @@ -627,7 +637,8 @@ static void amdgpu_dm_irq_immediate_work(struct amdgpu_device *adev, &adev->dm.irq_handler_list_high_tab[irq_source], list) { /* Call a subcomponent which registered for immediate - * interrupt notification */ + * interrupt notification + */ handler_data->handler(handler_data->handler_arg); } @@ -664,7 +675,7 @@ static int amdgpu_dm_irq_handler(struct amdgpu_device *adev, return 0; } -static enum dc_irq_source amdgpu_dm_hpd_to_dal_irq_source(unsigned type) +static enum dc_irq_source amdgpu_dm_hpd_to_dal_irq_source(unsigned int type) { switch (type) { case AMDGPU_HPD_1: @@ -686,7 +697,7 @@ static enum dc_irq_source amdgpu_dm_hpd_to_dal_irq_source(unsigned type) static int amdgpu_dm_set_hpd_irq_state(struct amdgpu_device *adev, struct amdgpu_irq_src *source, - unsigned type, + unsigned int type, enum amdgpu_interrupt_state state) { enum dc_irq_source src = amdgpu_dm_hpd_to_dal_irq_source(type); @@ -698,14 +709,14 @@ static int amdgpu_dm_set_hpd_irq_state(struct amdgpu_device *adev, static inline int dm_irq_state(struct amdgpu_device *adev, struct amdgpu_irq_src *source, - unsigned crtc_id, + unsigned int crtc_id, enum amdgpu_interrupt_state state, const enum irq_type dal_irq_type, const char *func) { bool st; enum dc_irq_source irq_source; - + struct dc *dc = adev->dm.dc; struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc_id]; if (!acrtc) { @@ -723,13 +734,16 @@ static inline int dm_irq_state(struct amdgpu_device *adev, st = (state == AMDGPU_IRQ_STATE_ENABLE); + if (dc && dc->caps.ips_support && dc->idle_optimizations_allowed) + dc_allow_idle_optimizations(dc, false); + dc_interrupt_set(adev->dm.dc, irq_source, st); return 0; } static int amdgpu_dm_set_pflip_irq_state(struct amdgpu_device *adev, struct amdgpu_irq_src *source, - unsigned crtc_id, + unsigned int crtc_id, enum amdgpu_interrupt_state state) { return dm_irq_state( @@ -743,7 +757,7 @@ static int amdgpu_dm_set_pflip_irq_state(struct amdgpu_device *adev, static int amdgpu_dm_set_crtc_irq_state(struct amdgpu_device *adev, struct amdgpu_irq_src *source, - unsigned crtc_id, + unsigned int crtc_id, enum amdgpu_interrupt_state state) { return dm_irq_state( @@ -885,27 +899,68 @@ void amdgpu_dm_hpd_init(struct amdgpu_device *adev) struct drm_device *dev = adev_to_drm(adev); struct drm_connector *connector; struct drm_connector_list_iter iter; + int irq_type; + int i; + bool use_polling = false; + + /* First, clear all hpd and hpdrx interrupts */ + for (i = DC_IRQ_SOURCE_HPD1; i <= DC_IRQ_SOURCE_HPD6RX; i++) { + if (!dc_interrupt_set(adev->dm.dc, i, false)) + drm_err(dev, "Failed to clear hpd(rx) source=%d on init\n", + i); + } drm_connector_list_iter_begin(dev, &iter); drm_for_each_connector_iter(connector, &iter) { - struct amdgpu_dm_connector *amdgpu_dm_connector = - to_amdgpu_dm_connector(connector); + struct amdgpu_dm_connector *amdgpu_dm_connector; + const struct dc_link *dc_link; - const struct dc_link *dc_link = amdgpu_dm_connector->dc_link; + use_polling |= connector->polled != DRM_CONNECTOR_POLL_HPD; - if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) { - dc_interrupt_set(adev->dm.dc, - dc_link->irq_source_hpd, - true); + if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) + continue; + + amdgpu_dm_connector = to_amdgpu_dm_connector(connector); + + dc_link = amdgpu_dm_connector->dc_link; + + /* + * Get a base driver irq reference for hpd ints for the lifetime + * of dm. Note that only hpd interrupt types are registered with + * base driver; hpd_rx types aren't. IOW, amdgpu_irq_get/put on + * hpd_rx isn't available. DM currently controls hpd_rx + * explicitly with dc_interrupt_set() + */ + if (dc_link->irq_source_hpd != DC_IRQ_SOURCE_INVALID) { + irq_type = dc_link->irq_source_hpd - DC_IRQ_SOURCE_HPD1; + /* + * TODO: There's a mismatch between mode_info.num_hpd + * and what bios reports as the # of connectors with hpd + * sources. Since the # of hpd source types registered + * with base driver == mode_info.num_hpd, we have to + * fallback to dc_interrupt_set for the remaining types. + */ + if (irq_type < adev->mode_info.num_hpd) { + if (amdgpu_irq_get(adev, &adev->hpd_irq, irq_type)) + drm_err(dev, "DM_IRQ: Failed get HPD for source=%d)!\n", + dc_link->irq_source_hpd); + } else { + dc_interrupt_set(adev->dm.dc, + dc_link->irq_source_hpd, + true); + } } - if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) { + if (dc_link->irq_source_hpd_rx != DC_IRQ_SOURCE_INVALID) { dc_interrupt_set(adev->dm.dc, dc_link->irq_source_hpd_rx, true); } } drm_connector_list_iter_end(&iter); + + if (use_polling) + drm_kms_helper_poll_init(dev); } /** @@ -921,24 +976,42 @@ void amdgpu_dm_hpd_fini(struct amdgpu_device *adev) struct drm_device *dev = adev_to_drm(adev); struct drm_connector *connector; struct drm_connector_list_iter iter; + int irq_type; drm_connector_list_iter_begin(dev, &iter); drm_for_each_connector_iter(connector, &iter) { - struct amdgpu_dm_connector *amdgpu_dm_connector = - to_amdgpu_dm_connector(connector); - const struct dc_link *dc_link = amdgpu_dm_connector->dc_link; + struct amdgpu_dm_connector *amdgpu_dm_connector; + const struct dc_link *dc_link; - if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) { - dc_interrupt_set(adev->dm.dc, - dc_link->irq_source_hpd, - false); + if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) + continue; + + amdgpu_dm_connector = to_amdgpu_dm_connector(connector); + dc_link = amdgpu_dm_connector->dc_link; + + if (dc_link->irq_source_hpd != DC_IRQ_SOURCE_INVALID) { + irq_type = dc_link->irq_source_hpd - DC_IRQ_SOURCE_HPD1; + + /* TODO: See same TODO in amdgpu_dm_hpd_init() */ + if (irq_type < adev->mode_info.num_hpd) { + if (amdgpu_irq_put(adev, &adev->hpd_irq, irq_type)) + drm_err(dev, "DM_IRQ: Failed put HPD for source=%d!\n", + dc_link->irq_source_hpd); + } else { + dc_interrupt_set(adev->dm.dc, + dc_link->irq_source_hpd, + false); + } } - if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) { + if (dc_link->irq_source_hpd_rx != DC_IRQ_SOURCE_INVALID) { dc_interrupt_set(adev->dm.dc, dc_link->irq_source_hpd_rx, false); } } drm_connector_list_iter_end(&iter); + + if (dev->mode_config.poll_enabled) + drm_kms_helper_poll_fini(dev); } diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.h index 2349238a626b..4f6b58f4f90d 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: MIT */ /* * Copyright 2015 Advanced Micro Devices, Inc. * @@ -90,14 +91,14 @@ void amdgpu_dm_hpd_fini(struct amdgpu_device *adev); * amdgpu_dm_irq_suspend - disable ASIC interrupt during suspend. * */ -int amdgpu_dm_irq_suspend(struct amdgpu_device *adev); +void amdgpu_dm_irq_suspend(struct amdgpu_device *adev); /** * amdgpu_dm_irq_resume_early - enable HPDRX ASIC interrupts during resume. * amdgpu_dm_irq_resume - enable ASIC interrupt during resume. * */ -int amdgpu_dm_irq_resume_early(struct amdgpu_device *adev); -int amdgpu_dm_irq_resume_late(struct amdgpu_device *adev); +void amdgpu_dm_irq_resume_early(struct amdgpu_device *adev); +void amdgpu_dm_irq_resume_late(struct amdgpu_device *adev); #endif /* __AMDGPU_DM_IRQ_H__ */ diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq_params.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq_params.h index 79b5f9999fec..3c9995275cbd 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq_params.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq_params.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: MIT */ /* * Copyright 2020 Advanced Micro Devices, Inc. * @@ -33,13 +34,15 @@ struct dm_irq_params { struct mod_vrr_params vrr_params; struct dc_stream_state *stream; int active_planes; - bool allow_psr_entry; + bool allow_sr_entry; struct mod_freesync_config freesync_config; #ifdef CONFIG_DEBUG_FS enum amdgpu_dm_pipe_crc_source crc_src; #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY - struct crc_window_parm crc_window; + struct crc_window_param window_param[MAX_CRC_WINDOW_NUM]; + /* At least one CRC window is activated or not*/ + bool crc_window_activated; #endif #endif }; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c index 6ff96b4bdda5..dbd1da4d85d3 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: MIT /* * Copyright 2012-15 Advanced Micro Devices, Inc. * @@ -23,34 +24,37 @@ * */ +#include <linux/vmalloc.h> #include <drm/display/drm_dp_helper.h> #include <drm/display/drm_dp_mst_helper.h> #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_fixed.h> +#include <drm/drm_edid.h> #include "dm_services.h" #include "amdgpu.h" #include "amdgpu_dm.h" #include "amdgpu_dm_mst_types.h" +#include "amdgpu_dm_hdcp.h" #include "dc.h" #include "dm_helpers.h" -#include "dc_link_ddc.h" -#include "dc_link_dp.h" #include "ddc_service_types.h" #include "dpcd_defs.h" -#include "i2caux_interface.h" #include "dmub_cmd.h" #if defined(CONFIG_DEBUG_FS) #include "amdgpu_dm_debugfs.h" #endif -#include "dc/dcn20/dcn20_resource.h" -bool is_timing_changed(struct dc_stream_state *cur_stream, - struct dc_stream_state *new_stream); +#include "dc/resource/dcn20/dcn20_resource.h" +#define PEAK_FACTOR_X1000 1006 +/* + * This function handles both native AUX and I2C-Over-AUX transactions. + */ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) { @@ -59,6 +63,7 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux, enum aux_return_code_type operation_result; struct amdgpu_device *adev; struct ddc_service *ddc; + uint8_t copy[16]; if (WARN_ON(msg->size > 16)) return -E2BIG; @@ -74,6 +79,11 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux, (msg->request & DP_AUX_I2C_WRITE_STATUS_UPDATE) != 0; payload.defer_delay = 0; + if (payload.write) { + memcpy(copy, msg->buffer, msg->size); + payload.data = copy; + } + result = dc_link_aux_transfer_raw(TO_DM_AUX(aux)->ddc_service, &payload, &operation_result); @@ -87,15 +97,25 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux, if (adev->dm.aux_hpd_discon_quirk) { if (msg->address == DP_SIDEBAND_MSG_DOWN_REQ_BASE && operation_result == AUX_RET_ERROR_HPD_DISCON) { - result = 0; + result = msg->size; operation_result = AUX_RET_SUCCESS; } } - if (payload.write && result >= 0) - result = msg->size; + /* + * result equals to 0 includes the cases of AUX_DEFER/I2C_DEFER + */ + if (payload.write && result >= 0) { + if (result) { + /*one byte indicating partially written bytes*/ + drm_dbg_dp(adev_to_drm(adev), "AUX partially written\n"); + result = payload.data[0]; + } else if (!payload.reply[0]) + /*I2C_ACK|AUX_ACK*/ + result = msg->size; + } - if (result < 0) + if (result < 0) { switch (operation_result) { case AUX_RET_SUCCESS: break; @@ -114,6 +134,13 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux, break; } + drm_dbg_dp(adev_to_drm(adev), "DP AUX transfer fail:%d\n", operation_result); + } + + if (payload.reply[0]) + drm_dbg_dp(adev_to_drm(adev), "AUX reply command not ACK: 0x%02x.", + payload.reply[0]); + return result; } @@ -129,10 +156,10 @@ dm_dp_mst_connector_destroy(struct drm_connector *connector) dc_sink_release(aconnector->dc_sink); } - kfree(aconnector->edid); + drm_edid_free(aconnector->drm_edid); drm_connector_cleanup(connector); - drm_dp_mst_put_port_malloc(aconnector->port); + drm_dp_mst_put_port_malloc(aconnector->mst_output_port); kfree(aconnector); } @@ -144,7 +171,7 @@ amdgpu_dm_mst_connector_late_register(struct drm_connector *connector) int r; r = drm_dp_mst_connector_late_register(connector, - amdgpu_dm_connector->port); + amdgpu_dm_connector->mst_output_port); if (r < 0) return r; @@ -155,13 +182,24 @@ amdgpu_dm_mst_connector_late_register(struct drm_connector *connector) return 0; } + +static inline void +amdgpu_dm_mst_reset_mst_connector_setting(struct amdgpu_dm_connector *aconnector) +{ + aconnector->drm_edid = NULL; + aconnector->dsc_aux = NULL; + aconnector->mst_output_port->passthrough_aux = NULL; + aconnector->mst_local_bw = 0; + aconnector->vc_full_pbn = 0; +} + static void amdgpu_dm_mst_connector_early_unregister(struct drm_connector *connector) { struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); - struct drm_dp_mst_port *port = aconnector->port; - struct amdgpu_dm_connector *root = aconnector->mst_port; + struct drm_dp_mst_port *port = aconnector->mst_output_port; + struct amdgpu_dm_connector *root = aconnector->mst_root; struct dc_link *dc_link = aconnector->dc_link; struct dc_sink *dc_sink = aconnector->dc_sink; @@ -176,9 +214,13 @@ amdgpu_dm_mst_connector_early_unregister(struct drm_connector *connector) if (dc_link->sink_count) dc_link_remove_remote_sink(dc_link, dc_sink); + drm_dbg_dp(connector->dev, + "DM_MST: remove remote sink 0x%p, %d remaining\n", + dc_sink, dc_link->sink_count); + dc_sink_release(dc_sink); aconnector->dc_sink = NULL; - aconnector->edid = NULL; + amdgpu_dm_mst_reset_mst_connector_setting(aconnector); } aconnector->mst_status = MST_STATUS_DEFAULT; @@ -197,7 +239,6 @@ static const struct drm_connector_funcs dm_dp_mst_connector_funcs = { .early_unregister = amdgpu_dm_mst_connector_early_unregister, }; -#if defined(CONFIG_DRM_AMD_DC_DCN) bool needs_dsc_aux_workaround(struct dc_link *link) { if (link->dpcd_caps.branch_dev_id == DP_BRANCH_DEVICE_ID_90CC24 && @@ -208,10 +249,26 @@ bool needs_dsc_aux_workaround(struct dc_link *link) return false; } +#if defined(CONFIG_DRM_AMD_DC_FP) +static bool is_synaptics_cascaded_panamera(struct dc_link *link, struct drm_dp_mst_port *port) +{ + u8 branch_vendor_data[4] = { 0 }; // Vendor data 0x50C ~ 0x50F + + if (drm_dp_dpcd_read(port->mgr->aux, DP_BRANCH_VENDOR_SPECIFIC_START, &branch_vendor_data, 4) == 4) { + if (link->dpcd_caps.branch_dev_id == DP_BRANCH_DEVICE_ID_90CC24 && + IS_SYNAPTICS_CASCADED_PANAMERA(link->dpcd_caps.branch_dev_name, branch_vendor_data)) { + DRM_INFO("Synaptics Cascaded MST hub\n"); + return true; + } + } + + return false; +} + static bool validate_dsc_caps_on_connector(struct amdgpu_dm_connector *aconnector) { struct dc_sink *dc_sink = aconnector->dc_sink; - struct drm_dp_mst_port *port = aconnector->port; + struct drm_dp_mst_port *port = aconnector->mst_output_port; u8 dsc_caps[16] = { 0 }; u8 dsc_branch_dec_caps_raw[3] = { 0 }; // DSC branch decoder caps 0xA0 ~ 0xA2 u8 *dsc_branch_dec_caps = NULL; @@ -229,7 +286,11 @@ static bool validate_dsc_caps_on_connector(struct amdgpu_dm_connector *aconnecto */ if (!aconnector->dsc_aux && !port->parent->port_parent && needs_dsc_aux_workaround(aconnector->dc_link)) - aconnector->dsc_aux = &aconnector->mst_port->dm_dp_aux.aux; + aconnector->dsc_aux = &aconnector->mst_root->dm_dp_aux.aux; + + /* synaptics cascaded MST hub case */ + if (is_synaptics_cascaded_panamera(aconnector->dc_link, port)) + aconnector->dsc_aux = port->mgr->aux; if (!aconnector->dsc_aux) return false; @@ -248,6 +309,7 @@ static bool validate_dsc_caps_on_connector(struct amdgpu_dm_connector *aconnecto return true; } +#endif static bool retrieve_downstream_port_device(struct amdgpu_dm_connector *aconnector) { @@ -267,7 +329,34 @@ static bool retrieve_downstream_port_device(struct amdgpu_dm_connector *aconnect return true; } -#endif + +static bool retrieve_branch_specific_data(struct amdgpu_dm_connector *aconnector) +{ + struct drm_connector *connector = &aconnector->base; + struct drm_dp_mst_port *port = aconnector->mst_output_port; + struct drm_dp_mst_port *port_parent; + struct drm_dp_aux *immediate_upstream_aux; + struct drm_dp_desc branch_desc; + + if (!port->parent) + return false; + + port_parent = port->parent->port_parent; + + immediate_upstream_aux = port_parent ? &port_parent->aux : port->mgr->aux; + + if (drm_dp_read_desc(immediate_upstream_aux, &branch_desc, true)) + return false; + + aconnector->branch_ieee_oui = (branch_desc.ident.oui[0] << 16) + + (branch_desc.ident.oui[1] << 8) + + (branch_desc.ident.oui[2]); + + drm_dbg_dp(port->aux.drm_dev, "MST branch oui 0x%x detected at %s\n", + aconnector->branch_ieee_oui, connector->name); + + return true; +} static int dm_dp_mst_get_modes(struct drm_connector *connector) { @@ -277,15 +366,18 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector) if (!aconnector) return drm_add_edid_modes(connector, NULL); - if (!aconnector->edid) { - struct edid *edid; - edid = drm_dp_mst_get_edid(connector, &aconnector->mst_port->mst_mgr, aconnector->port); + if (!aconnector->drm_edid) { + const struct drm_edid *drm_edid; - if (!edid) { + drm_edid = drm_dp_mst_edid_read(connector, + &aconnector->mst_root->mst_mgr, + aconnector->mst_output_port); + + if (!drm_edid) { amdgpu_dm_set_mst_status(&aconnector->mst_status, MST_REMOTE_EDID, false); - drm_connector_update_edid_property( + drm_edid_connector_update( &aconnector->base, NULL); @@ -307,6 +399,11 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector) return 0; } + drm_dbg_dp(connector->dev, + "DM_MST: add remote sink 0x%p, %d remaining\n", + dc_sink, + aconnector->dc_link->sink_count); + dc_sink->priv = aconnector; aconnector->dc_sink = dc_sink; } @@ -314,7 +411,7 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector) return ret; } - aconnector->edid = edid; + aconnector->drm_edid = drm_edid; amdgpu_dm_set_mst_status(&aconnector->mst_status, MST_REMOTE_EDID, true); } @@ -329,10 +426,13 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector) struct dc_sink_init_data init_params = { .link = aconnector->dc_link, .sink_signal = SIGNAL_TYPE_DISPLAY_PORT_MST }; + const struct edid *edid; + + edid = drm_edid_raw(aconnector->drm_edid); // FIXME: Get rid of drm_edid_raw() dc_sink = dc_link_add_remote_sink( aconnector->dc_link, - (uint8_t *)aconnector->edid, - (aconnector->edid->extensions + 1) * EDID_LENGTH, + (uint8_t *)edid, + (edid->extensions + 1) * EDID_LENGTH, &init_params); if (!dc_sink) { @@ -340,30 +440,57 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector) return 0; } + drm_dbg_dp(connector->dev, + "DM_MST: add remote sink 0x%p, %d remaining\n", + dc_sink, aconnector->dc_link->sink_count); + dc_sink->priv = aconnector; /* dc_link_add_remote_sink returns a new reference */ aconnector->dc_sink = dc_sink; + /* when display is unplugged from mst hub, connctor will be + * destroyed within dm_dp_mst_connector_destroy. connector + * hdcp perperties, like type, undesired, desired, enabled, + * will be lost. So, save hdcp properties into hdcp_work within + * amdgpu_dm_atomic_commit_tail. if the same display is + * plugged back with same display index, its hdcp properties + * will be retrieved from hdcp_work within dm_dp_mst_get_modes + */ + if (aconnector->dc_sink && connector->state) { + struct drm_device *dev = connector->dev; + struct amdgpu_device *adev = drm_to_adev(dev); + + if (adev->dm.hdcp_workqueue) { + struct hdcp_workqueue *hdcp_work = adev->dm.hdcp_workqueue; + struct hdcp_workqueue *hdcp_w = + &hdcp_work[aconnector->dc_link->link_index]; + + connector->state->hdcp_content_type = + hdcp_w->hdcp_content_type[connector->index]; + connector->state->content_protection = + hdcp_w->content_protection[connector->index]; + } + } + if (aconnector->dc_sink) { amdgpu_dm_update_freesync_caps( - connector, aconnector->edid); + connector, aconnector->drm_edid); -#if defined(CONFIG_DRM_AMD_DC_DCN) +#if defined(CONFIG_DRM_AMD_DC_FP) if (!validate_dsc_caps_on_connector(aconnector)) memset(&aconnector->dc_sink->dsc_caps, 0, sizeof(aconnector->dc_sink->dsc_caps)); +#endif if (!retrieve_downstream_port_device(aconnector)) memset(&aconnector->mst_downstream_port_present, 0, sizeof(aconnector->mst_downstream_port_present)); -#endif } } - drm_connector_update_edid_property( - &aconnector->base, aconnector->edid); + drm_edid_connector_update(&aconnector->base, aconnector->drm_edid); - ret = drm_add_edid_modes(connector, aconnector->edid); + ret = drm_edid_connector_add_modes(connector); return ret; } @@ -374,8 +501,7 @@ dm_mst_atomic_best_encoder(struct drm_connector *connector, { struct drm_connector_state *connector_state = drm_atomic_get_new_connector_state(state, connector); - struct drm_device *dev = connector->dev; - struct amdgpu_device *adev = drm_to_adev(dev); + struct amdgpu_device *adev = drm_to_adev(connector->dev); struct amdgpu_crtc *acrtc = to_amdgpu_crtc(connector_state->crtc); return &adev->dm.mst_encoders[acrtc->crtc_id].base; @@ -386,15 +512,15 @@ dm_dp_mst_detect(struct drm_connector *connector, struct drm_modeset_acquire_ctx *ctx, bool force) { struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); - struct amdgpu_dm_connector *master = aconnector->mst_port; - struct drm_dp_mst_port *port = aconnector->port; + struct amdgpu_dm_connector *master = aconnector->mst_root; + struct drm_dp_mst_port *port = aconnector->mst_output_port; int connection_status; if (drm_connector_is_unregistered(connector)) return connector_status_disconnected; connection_status = drm_dp_mst_detect_port(connector, ctx, &master->mst_mgr, - aconnector->port); + aconnector->mst_output_port); if (port->pdt != DP_PEER_DEVICE_NONE && !port->dpcd_rev) { uint8_t dpcd_rev; @@ -435,9 +561,14 @@ dm_dp_mst_detect(struct drm_connector *connector, if (aconnector->dc_link->sink_count) dc_link_remove_remote_sink(aconnector->dc_link, aconnector->dc_sink); + drm_dbg_dp(connector->dev, + "DM_MST: remove remote sink 0x%p, %d remaining\n", + aconnector->dc_link, + aconnector->dc_link->sink_count); + dc_sink_release(aconnector->dc_sink); aconnector->dc_sink = NULL; - aconnector->edid = NULL; + amdgpu_dm_mst_reset_mst_connector_setting(aconnector); amdgpu_dm_set_mst_status(&aconnector->mst_status, MST_REMOTE_EDID | MST_ALLOCATE_NEW_PAYLOAD | MST_CLEAR_ALLOCATED_PAYLOAD, @@ -451,8 +582,8 @@ static int dm_dp_mst_atomic_check(struct drm_connector *connector, struct drm_atomic_state *state) { struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); - struct drm_dp_mst_topology_mgr *mst_mgr = &aconnector->mst_port->mst_mgr; - struct drm_dp_mst_port *mst_port = aconnector->port; + struct drm_dp_mst_topology_mgr *mst_mgr = &aconnector->mst_root->mst_mgr; + struct drm_dp_mst_port *mst_port = aconnector->mst_output_port; return drm_dp_atomic_release_time_slots(state, mst_mgr, mst_port); } @@ -468,7 +599,6 @@ static const struct drm_connector_helper_funcs dm_dp_mst_connector_helper_funcs static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder) { drm_encoder_cleanup(encoder); - kfree(encoder); } static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = { @@ -514,17 +644,20 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr, if (!aconnector) return NULL; + DRM_DEBUG_DRIVER("%s: Create aconnector 0x%p for port 0x%p\n", __func__, aconnector, port); + connector = &aconnector->base; - aconnector->port = port; - aconnector->mst_port = master; + aconnector->mst_output_port = port; + aconnector->mst_root = master; amdgpu_dm_set_mst_status(&aconnector->mst_status, MST_PROBE, true); - if (drm_connector_init( + if (drm_connector_dynamic_init( dev, connector, &dm_dp_mst_connector_funcs, - DRM_MODE_CONNECTOR_DisplayPort)) { + DRM_MODE_CONNECTOR_DisplayPort, + NULL)) { kfree(aconnector); return NULL; } @@ -558,9 +691,15 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr, &connector->base, dev->mode_config.tile_property, 0); + connector->colorspace_property = master->base.colorspace_property; + if (connector->colorspace_property) + drm_connector_attach_colorspace_property(connector); drm_connector_set_path_property(connector, pathprop); + if (!retrieve_branch_specific_data(aconnector)) + aconnector->branch_ieee_oui = 0; + /* * Initialize connector state before adding the connectror to drm and * framebuffer lists @@ -572,8 +711,118 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr, return connector; } +void dm_handle_mst_sideband_msg_ready_event( + struct drm_dp_mst_topology_mgr *mgr, + enum mst_msg_ready_type msg_rdy_type) +{ + uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 }; + uint8_t dret; + bool new_irq_handled = false; + int dpcd_addr; + uint8_t dpcd_bytes_to_read; + const uint8_t max_process_count = 30; + uint8_t process_count = 0; + u8 retry; + struct amdgpu_dm_connector *aconnector = + container_of(mgr, struct amdgpu_dm_connector, mst_mgr); + + + const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link); + + if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) { + dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT; + /* DPCD 0x200 - 0x201 for downstream IRQ */ + dpcd_addr = DP_SINK_COUNT; + } else { + dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI; + /* DPCD 0x2002 - 0x2005 for downstream IRQ */ + dpcd_addr = DP_SINK_COUNT_ESI; + } + + mutex_lock(&aconnector->handle_mst_msg_ready); + + while (process_count < max_process_count) { + u8 ack[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = {}; + + process_count++; + + dret = drm_dp_dpcd_read( + &aconnector->dm_dp_aux.aux, + dpcd_addr, + esi, + dpcd_bytes_to_read); + + if (dret != dpcd_bytes_to_read) { + DRM_DEBUG_KMS("DPCD read and acked number is not as expected!"); + break; + } + + DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]); + + switch (msg_rdy_type) { + case DOWN_REP_MSG_RDY_EVENT: + /* Only handle DOWN_REP_MSG_RDY case*/ + esi[1] &= DP_DOWN_REP_MSG_RDY; + break; + case UP_REQ_MSG_RDY_EVENT: + /* Only handle UP_REQ_MSG_RDY case*/ + esi[1] &= DP_UP_REQ_MSG_RDY; + break; + default: + /* Handle both cases*/ + esi[1] &= (DP_DOWN_REP_MSG_RDY | DP_UP_REQ_MSG_RDY); + break; + } + + if (!esi[1]) + break; + + /* handle MST irq */ + if (aconnector->mst_mgr.mst_state) + drm_dp_mst_hpd_irq_handle_event(&aconnector->mst_mgr, + esi, + ack, + &new_irq_handled); + + if (new_irq_handled) { + /* ACK at DPCD to notify down stream */ + for (retry = 0; retry < 3; retry++) { + ssize_t wret; + + wret = drm_dp_dpcd_writeb(&aconnector->dm_dp_aux.aux, + dpcd_addr + 1, + ack[1]); + if (wret == 1) + break; + } + + if (retry == 3) { + DRM_ERROR("Failed to ack MST event.\n"); + break; + } + + drm_dp_mst_hpd_irq_send_new_request(&aconnector->mst_mgr); + + new_irq_handled = false; + } else { + break; + } + } + + mutex_unlock(&aconnector->handle_mst_msg_ready); + + if (process_count == max_process_count) + DRM_DEBUG_DRIVER("Loop exceeded max iterations\n"); +} + +static void dm_handle_mst_down_rep_msg_ready(struct drm_dp_mst_topology_mgr *mgr) +{ + dm_handle_mst_sideband_msg_ready_event(mgr, DOWN_REP_MSG_RDY_EVENT); +} + static const struct drm_dp_mst_topology_cbs dm_mst_cbs = { .add_connector = dm_dp_add_mst_connector, + .poll_hpd_irq = dm_handle_mst_down_rep_msg_ready, }; void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm, @@ -592,6 +841,7 @@ void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm, drm_dp_aux_init(&aconnector->dm_dp_aux.aux); drm_dp_cec_register_connector(&aconnector->dm_dp_aux.aux, &aconnector->base); + drm_dp_dpcd_set_probe(&aconnector->dm_dp_aux.aux, false); if (aconnector->base.connector_type == DRM_MODE_CONNECTOR_eDP) return; @@ -604,16 +854,21 @@ void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm, drm_connector_attach_dp_subconnector_property(&aconnector->base); } -int dm_mst_get_pbn_divider(struct dc_link *link) +uint32_t dm_mst_get_pbn_divider(struct dc_link *link) { + uint32_t pbn_div_x100; + uint64_t dividend, divisor; + if (!link) return 0; - return dc_link_bandwidth_kbps(link, - dc_link_get_link_cap(link)) / (8 * 1000 * 54); -} + dividend = (uint64_t)dc_link_bandwidth_kbps(link, dc_link_get_link_cap(link)) * 100; + divisor = 8 * 1000 * 54; -#if defined(CONFIG_DRM_AMD_DC_DCN) + pbn_div_x100 = div64_u64(dividend, divisor); + + return dfixed_const(pbn_div_x100) / 100; +} struct dsc_mst_fairness_params { struct dc_crtc_timing *timing; @@ -628,13 +883,29 @@ struct dsc_mst_fairness_params { struct amdgpu_dm_connector *aconnector; }; -static int kbps_to_peak_pbn(int kbps) +#if defined(CONFIG_DRM_AMD_DC_FP) +static uint64_t kbps_to_pbn(int kbps, bool is_peak_pbn) +{ + uint64_t effective_kbps = (uint64_t)kbps; + + if (is_peak_pbn) { // add 0.6% (1006/1000) overhead into effective kbps + effective_kbps *= 1006; + effective_kbps = div_u64(effective_kbps, 1000); + } + + return (uint64_t) DIV64_U64_ROUND_UP(effective_kbps * 64, (54 * 8 * 1000)); +} + +static uint32_t pbn_to_kbps(unsigned int pbn, bool with_margin) { - u64 peak_kbps = kbps; + uint64_t pbn_effective = (uint64_t)pbn; + + if (with_margin) // deduct 0.6% (994/1000) overhead from effective pbn + pbn_effective *= (1000000 / PEAK_FACTOR_X1000); + else + pbn_effective *= 1000; - peak_kbps *= 1006; - peak_kbps = div_u64(peak_kbps, 1000); - return (int) DIV64_U64_ROUND_UP(peak_kbps * 64, (54 * 8 * 1000)); + return DIV_U64_ROUND_UP(pbn_effective * 8 * 54, 64); } static void set_dsc_configs_from_fairness_vars(struct dsc_mst_fairness_params *params, @@ -642,17 +913,24 @@ static void set_dsc_configs_from_fairness_vars(struct dsc_mst_fairness_params *p int count, int k) { + struct drm_connector *drm_connector; int i; + struct dc_dsc_config_options dsc_options = {0}; for (i = 0; i < count; i++) { + drm_connector = ¶ms[i].aconnector->base; + + dc_dsc_get_default_config_option(params[i].sink->ctx->dc, &dsc_options); + dsc_options.max_target_bpp_limit_override_x16 = drm_connector->display_info.max_dsc_bpp * 16; + memset(¶ms[i].timing->dsc_cfg, 0, sizeof(params[i].timing->dsc_cfg)); if (vars[i + k].dsc_enabled && dc_dsc_compute_config( params[i].sink->ctx->dc->res_pool->dscs[0], ¶ms[i].sink->dsc_caps.dsc_dec_caps, - params[i].sink->ctx->dc->debug.dsc_min_slice_height_override, - params[i].sink->edid_caps.panel_patch.max_dsc_target_bpp_limit, + &dsc_options, 0, params[i].timing, + dc_link_get_highest_encoding_format(params[i].aconnector->dc_link), ¶ms[i].timing->dsc_cfg)) { params[i].timing->flags.DSC = 1; @@ -676,11 +954,11 @@ static void set_dsc_configs_from_fairness_vars(struct dsc_mst_fairness_params *p if (params[i].sink) { if (params[i].sink->sink_signal != SIGNAL_TYPE_VIRTUAL && params[i].sink->sink_signal != SIGNAL_TYPE_NONE) - DRM_DEBUG_DRIVER("%s i=%d dispname=%s\n", __func__, i, + DRM_DEBUG_DRIVER("MST_DSC %s i=%d dispname=%s\n", __func__, i, params[i].sink->edid_caps.display_name); } - DRM_DEBUG_DRIVER("dsc=%d bits_per_pixel=%d pbn=%d\n", + DRM_DEBUG_DRIVER("MST_DSC dsc=%d bits_per_pixel=%d pbn=%d\n", params[i].timing->flags.DSC, params[i].timing->dsc_cfg.bits_per_pixel, vars[i + k].pbn); @@ -692,24 +970,31 @@ static int bpp_x16_from_pbn(struct dsc_mst_fairness_params param, int pbn) struct dc_dsc_config dsc_config; u64 kbps; - kbps = div_u64((u64)pbn * 994 * 8 * 54, 64); + struct drm_connector *drm_connector = ¶m.aconnector->base; + struct dc_dsc_config_options dsc_options = {0}; + + dc_dsc_get_default_config_option(param.sink->ctx->dc, &dsc_options); + dsc_options.max_target_bpp_limit_override_x16 = drm_connector->display_info.max_dsc_bpp * 16; + + kbps = pbn_to_kbps(pbn, false); dc_dsc_compute_config( param.sink->ctx->dc->res_pool->dscs[0], ¶m.sink->dsc_caps.dsc_dec_caps, - param.sink->ctx->dc->debug.dsc_min_slice_height_override, - param.sink->edid_caps.panel_patch.max_dsc_target_bpp_limit, - (int) kbps, param.timing, &dsc_config); + &dsc_options, + (int) kbps, param.timing, + dc_link_get_highest_encoding_format(param.aconnector->dc_link), + &dsc_config); return dsc_config.bits_per_pixel; } -static bool increase_dsc_bpp(struct drm_atomic_state *state, - struct drm_dp_mst_topology_state *mst_state, - struct dc_link *dc_link, - struct dsc_mst_fairness_params *params, - struct dsc_mst_fairness_vars *vars, - int count, - int k) +static int increase_dsc_bpp(struct drm_atomic_state *state, + struct drm_dp_mst_topology_state *mst_state, + struct dc_link *dc_link, + struct dsc_mst_fairness_params *params, + struct dsc_mst_fairness_vars *vars, + int count, + int k) { int i; bool bpp_increased[MAX_PIPES]; @@ -719,11 +1004,12 @@ static bool increase_dsc_bpp(struct drm_atomic_state *state, int remaining_to_increase = 0; int link_timeslots_used; int fair_pbn_alloc; + int ret = 0; for (i = 0; i < count; i++) { if (vars[i + k].dsc_enabled) { initial_slack[i] = - kbps_to_peak_pbn(params[i].bw_range.max_kbps) - vars[i + k].pbn; + kbps_to_pbn(params[i].bw_range.max_kbps, false) - vars[i + k].pbn; bpp_increased[i] = false; remaining_to_increase += 1; } else { @@ -750,59 +1036,67 @@ static bool increase_dsc_bpp(struct drm_atomic_state *state, link_timeslots_used = 0; for (i = 0; i < count; i++) - link_timeslots_used += DIV_ROUND_UP(vars[i + k].pbn, mst_state->pbn_div); + link_timeslots_used += DIV_ROUND_UP(vars[i + k].pbn, dfixed_trunc(mst_state->pbn_div)); fair_pbn_alloc = - (63 - link_timeslots_used) / remaining_to_increase * mst_state->pbn_div; + (63 - link_timeslots_used) / remaining_to_increase * dfixed_trunc(mst_state->pbn_div); if (initial_slack[next_index] > fair_pbn_alloc) { vars[next_index].pbn += fair_pbn_alloc; - if (drm_dp_atomic_find_time_slots(state, - params[next_index].port->mgr, - params[next_index].port, - vars[next_index].pbn) < 0) - return false; - if (!drm_dp_mst_atomic_check(state)) { + ret = drm_dp_atomic_find_time_slots(state, + params[next_index].port->mgr, + params[next_index].port, + vars[next_index].pbn); + if (ret < 0) + return ret; + + ret = drm_dp_mst_atomic_check(state); + if (ret == 0) { vars[next_index].bpp_x16 = bpp_x16_from_pbn(params[next_index], vars[next_index].pbn); } else { vars[next_index].pbn -= fair_pbn_alloc; - if (drm_dp_atomic_find_time_slots(state, - params[next_index].port->mgr, - params[next_index].port, - vars[next_index].pbn) < 0) - return false; + ret = drm_dp_atomic_find_time_slots(state, + params[next_index].port->mgr, + params[next_index].port, + vars[next_index].pbn); + if (ret < 0) + return ret; } } else { vars[next_index].pbn += initial_slack[next_index]; - if (drm_dp_atomic_find_time_slots(state, - params[next_index].port->mgr, - params[next_index].port, - vars[next_index].pbn) < 0) - return false; - if (!drm_dp_mst_atomic_check(state)) { + ret = drm_dp_atomic_find_time_slots(state, + params[next_index].port->mgr, + params[next_index].port, + vars[next_index].pbn); + if (ret < 0) + return ret; + + ret = drm_dp_mst_atomic_check(state); + if (ret == 0) { vars[next_index].bpp_x16 = params[next_index].bw_range.max_target_bpp_x16; } else { vars[next_index].pbn -= initial_slack[next_index]; - if (drm_dp_atomic_find_time_slots(state, - params[next_index].port->mgr, - params[next_index].port, - vars[next_index].pbn) < 0) - return false; + ret = drm_dp_atomic_find_time_slots(state, + params[next_index].port->mgr, + params[next_index].port, + vars[next_index].pbn); + if (ret < 0) + return ret; } } bpp_increased[next_index] = true; remaining_to_increase--; } - return true; + return 0; } -static bool try_disable_dsc(struct drm_atomic_state *state, - struct dc_link *dc_link, - struct dsc_mst_fairness_params *params, - struct dsc_mst_fairness_vars *vars, - int count, - int k) +static int try_disable_dsc(struct drm_atomic_state *state, + struct dc_link *dc_link, + struct dsc_mst_fairness_params *params, + struct dsc_mst_fairness_vars *vars, + int count, + int k) { int i; bool tried[MAX_PIPES]; @@ -810,6 +1104,8 @@ static bool try_disable_dsc(struct drm_atomic_state *state, int max_kbps_increase; int next_index; int remaining_to_try = 0; + int ret; + int var_pbn; for (i = 0; i < count; i++) { if (vars[i + k].dsc_enabled @@ -839,57 +1135,77 @@ static bool try_disable_dsc(struct drm_atomic_state *state, if (next_index == -1) break; - vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.stream_kbps); - if (drm_dp_atomic_find_time_slots(state, - params[next_index].port->mgr, - params[next_index].port, - vars[next_index].pbn) < 0) - return false; + DRM_DEBUG_DRIVER("MST_DSC index #%d, try no compression\n", next_index); + var_pbn = vars[next_index].pbn; + vars[next_index].pbn = kbps_to_pbn(params[next_index].bw_range.stream_kbps, true); + ret = drm_dp_atomic_find_time_slots(state, + params[next_index].port->mgr, + params[next_index].port, + vars[next_index].pbn); + if (ret < 0) { + DRM_DEBUG_DRIVER("%s:%d MST_DSC index #%d, failed to set pbn to the state, %d\n", + __func__, __LINE__, next_index, ret); + vars[next_index].pbn = var_pbn; + return ret; + } - if (!drm_dp_mst_atomic_check(state)) { + ret = drm_dp_mst_atomic_check(state); + if (ret == 0) { + DRM_DEBUG_DRIVER("MST_DSC index #%d, greedily disable dsc\n", next_index); vars[next_index].dsc_enabled = false; vars[next_index].bpp_x16 = 0; } else { - vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.max_kbps); - if (drm_dp_atomic_find_time_slots(state, - params[next_index].port->mgr, - params[next_index].port, - vars[next_index].pbn) < 0) - return false; + DRM_DEBUG_DRIVER("MST_DSC index #%d, restore optimized pbn value\n", next_index); + vars[next_index].pbn = var_pbn; + ret = drm_dp_atomic_find_time_slots(state, + params[next_index].port->mgr, + params[next_index].port, + vars[next_index].pbn); + if (ret < 0) { + DRM_DEBUG_DRIVER("%s:%d MST_DSC index #%d, failed to set pbn to the state, %d\n", + __func__, __LINE__, next_index, ret); + return ret; + } } tried[next_index] = true; remaining_to_try--; } - return true; + return 0; } -static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state, - struct dc_state *dc_state, - struct dc_link *dc_link, - struct dsc_mst_fairness_vars *vars, - struct drm_dp_mst_topology_mgr *mgr, - int *link_vars_start_index) +static void log_dsc_params(int count, struct dsc_mst_fairness_vars *vars, int k) +{ + int i; + + for (i = 0; i < count; i++) + DRM_DEBUG_DRIVER("MST_DSC DSC params: stream #%d --- dsc_enabled = %d, bpp_x16 = %d, pbn = %d\n", + i, vars[i + k].dsc_enabled, vars[i + k].bpp_x16, vars[i + k].pbn); +} + +static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state, + struct dc_state *dc_state, + struct dc_link *dc_link, + struct dsc_mst_fairness_vars *vars, + struct drm_dp_mst_topology_mgr *mgr, + int *link_vars_start_index) { struct dc_stream_state *stream; struct dsc_mst_fairness_params params[MAX_PIPES]; struct amdgpu_dm_connector *aconnector; struct drm_dp_mst_topology_state *mst_state = drm_atomic_get_mst_topology_state(state, mgr); int count = 0; - int i, k; + int i, k, ret; bool debugfs_overwrite = false; + struct drm_connector_state *new_conn_state; memset(params, 0, sizeof(params)); if (IS_ERR(mst_state)) - return false; - - mst_state->pbn_div = dm_mst_get_pbn_divider(dc_link); -#if defined(CONFIG_DRM_AMD_DC_DCN) - drm_dp_mst_update_slots(mst_state, dc_link_dp_mst_decide_link_encoding_format(dc_link)); -#endif + return PTR_ERR(mst_state); /* Set up params */ + DRM_DEBUG_DRIVER("%s: MST_DSC Try to set up params from %d streams\n", __func__, dc_state->stream_count); for (i = 0; i < dc_state->stream_count; i++) { struct dc_dsc_policy dsc_policy = {0}; @@ -902,15 +1218,23 @@ static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state, if (!aconnector) continue; - if (!aconnector->port) + if (!aconnector->mst_output_port) continue; + new_conn_state = drm_atomic_get_new_connector_state(state, &aconnector->base); + + if (!new_conn_state) { + DRM_DEBUG_DRIVER("%s:%d MST_DSC Skip the stream 0x%p with invalid new_conn_state\n", + __func__, __LINE__, stream); + continue; + } + stream->timing.flags.DSC = 0; params[count].timing = &stream->timing; params[count].sink = stream->sink; params[count].aconnector = aconnector; - params[count].port = aconnector->port; + params[count].port = aconnector->mst_output_port; params[count].clock_force_enable = aconnector->dsc_settings.dsc_force_enable; if (params[count].clock_force_enable == DSC_CLK_FORCE_ENABLE) debugfs_overwrite = true; @@ -918,22 +1242,30 @@ static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state, params[count].num_slices_v = aconnector->dsc_settings.dsc_num_slices_v; params[count].bpp_overwrite = aconnector->dsc_settings.dsc_bits_per_pixel; params[count].compression_possible = stream->sink->dsc_caps.dsc_dec_caps.is_dsc_supported; - dc_dsc_get_policy_for_timing(params[count].timing, 0, &dsc_policy); + dc_dsc_get_policy_for_timing(params[count].timing, 0, &dsc_policy, dc_link_get_highest_encoding_format(stream->link)); if (!dc_dsc_compute_bandwidth_range( stream->sink->ctx->dc->res_pool->dscs[0], stream->sink->ctx->dc->debug.dsc_min_slice_height_override, dsc_policy.min_target_bpp * 16, dsc_policy.max_target_bpp * 16, &stream->sink->dsc_caps.dsc_dec_caps, - &stream->timing, ¶ms[count].bw_range)) - params[count].bw_range.stream_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing); - + &stream->timing, + dc_link_get_highest_encoding_format(dc_link), + ¶ms[count].bw_range)) + params[count].bw_range.stream_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing, + dc_link_get_highest_encoding_format(dc_link)); + + DRM_DEBUG_DRIVER("MST_DSC #%d stream 0x%p - max_kbps = %u, min_kbps = %u, uncompressed_kbps = %u\n", + count, stream, params[count].bw_range.max_kbps, params[count].bw_range.min_kbps, + params[count].bw_range.stream_kbps); count++; } + DRM_DEBUG_DRIVER("%s: MST_DSC Params set up for %d streams\n", __func__, count); + if (count == 0) { ASSERT(0); - return true; + return 0; } /* k is start index of vars for current phy link used by mst hub */ @@ -942,51 +1274,76 @@ static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state, *link_vars_start_index += count; /* Try no compression */ + DRM_DEBUG_DRIVER("MST_DSC Try no compression\n"); for (i = 0; i < count; i++) { vars[i + k].aconnector = params[i].aconnector; - vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps); + vars[i + k].pbn = kbps_to_pbn(params[i].bw_range.stream_kbps, false); vars[i + k].dsc_enabled = false; vars[i + k].bpp_x16 = 0; - if (drm_dp_atomic_find_time_slots(state, params[i].port->mgr, params[i].port, - vars[i + k].pbn) < 0) - return false; + ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr, params[i].port, + vars[i + k].pbn); + if (ret < 0) + return ret; } - if (!drm_dp_mst_atomic_check(state) && !debugfs_overwrite) { + ret = drm_dp_mst_atomic_check(state); + if (ret == 0 && !debugfs_overwrite) { set_dsc_configs_from_fairness_vars(params, vars, count, k); - return true; + return 0; + } else if (ret != -ENOSPC) { + return ret; } + log_dsc_params(count, vars, k); + /* Try max compression */ + DRM_DEBUG_DRIVER("MST_DSC Try max compression\n"); for (i = 0; i < count; i++) { if (params[i].compression_possible && params[i].clock_force_enable != DSC_CLK_FORCE_DISABLE) { - vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.min_kbps); + vars[i + k].pbn = kbps_to_pbn(params[i].bw_range.min_kbps, false); vars[i + k].dsc_enabled = true; vars[i + k].bpp_x16 = params[i].bw_range.min_target_bpp_x16; - if (drm_dp_atomic_find_time_slots(state, params[i].port->mgr, - params[i].port, vars[i + k].pbn) < 0) - return false; + ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr, + params[i].port, vars[i + k].pbn); + if (ret < 0) + return ret; } else { - vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps); + vars[i + k].pbn = kbps_to_pbn(params[i].bw_range.stream_kbps, false); vars[i + k].dsc_enabled = false; vars[i + k].bpp_x16 = 0; - if (drm_dp_atomic_find_time_slots(state, params[i].port->mgr, - params[i].port, vars[i + k].pbn) < 0) - return false; + ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr, + params[i].port, vars[i + k].pbn); + if (ret < 0) + return ret; } } - if (drm_dp_mst_atomic_check(state)) - return false; + ret = drm_dp_mst_atomic_check(state); + if (ret != 0) + return ret; + + log_dsc_params(count, vars, k); /* Optimize degree of compression */ - if (!increase_dsc_bpp(state, mst_state, dc_link, params, vars, count, k)) - return false; + DRM_DEBUG_DRIVER("MST_DSC Try optimize compression\n"); + ret = increase_dsc_bpp(state, mst_state, dc_link, params, vars, count, k); + if (ret < 0) { + DRM_DEBUG_DRIVER("MST_DSC Failed to optimize compression\n"); + return ret; + } - if (!try_disable_dsc(state, dc_link, params, vars, count, k)) - return false; + log_dsc_params(count, vars, k); + + DRM_DEBUG_DRIVER("MST_DSC Try disable compression\n"); + ret = try_disable_dsc(state, dc_link, params, vars, count, k); + if (ret < 0) { + DRM_DEBUG_DRIVER("MST_DSC Failed to disable compression\n"); + return ret; + } + + log_dsc_params(count, vars, k); set_dsc_configs_from_fairness_vars(params, vars, count, k); - return true; + return 0; } static bool is_dsc_need_re_compute( @@ -1004,15 +1361,19 @@ static bool is_dsc_need_re_compute( /* only check phy used by dsc mst branch */ if (dc_link->type != dc_connection_mst_branch) - return false; + goto out; - if (!(dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_SUPPORT || - dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_PASSTHROUGH_SUPPORT)) - return false; + /* add a check for older MST DSC with no virtual DPCDs */ + if (needs_dsc_aux_workaround(dc_link) && + (!(dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_SUPPORT || + dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_PASSTHROUGH_SUPPORT))) + goto out; for (i = 0; i < MAX_PIPES; i++) stream_on_link[i] = NULL; + DRM_DEBUG_DRIVER("%s: MST_DSC check on %d streams in new dc_state\n", __func__, dc_state->stream_count); + /* check if there is mode change in new request */ for (i = 0; i < dc_state->stream_count; i++) { struct drm_crtc_state *new_crtc_state; @@ -1022,6 +1383,8 @@ static bool is_dsc_need_re_compute( if (!stream) continue; + DRM_DEBUG_DRIVER("%s:%d MST_DSC checking #%d stream 0x%p\n", __func__, __LINE__, i, stream); + /* check if stream using the same link for mst */ if (stream->link != dc_link) continue; @@ -1034,8 +1397,11 @@ static bool is_dsc_need_re_compute( new_stream_on_link_num++; new_conn_state = drm_atomic_get_new_connector_state(state, &aconnector->base); - if (!new_conn_state) + if (!new_conn_state) { + DRM_DEBUG_DRIVER("%s:%d MST_DSC no new_conn_state for stream 0x%p, aconnector 0x%p\n", + __func__, __LINE__, stream, aconnector); continue; + } if (IS_ERR(new_conn_state)) continue; @@ -1044,19 +1410,37 @@ static bool is_dsc_need_re_compute( continue; new_crtc_state = drm_atomic_get_new_crtc_state(state, new_conn_state->crtc); - if (!new_crtc_state) + if (!new_crtc_state) { + DRM_DEBUG_DRIVER("%s:%d MST_DSC no new_crtc_state for crtc of stream 0x%p, aconnector 0x%p\n", + __func__, __LINE__, stream, aconnector); continue; + } if (IS_ERR(new_crtc_state)) continue; if (new_crtc_state->enable && new_crtc_state->active) { if (new_crtc_state->mode_changed || new_crtc_state->active_changed || - new_crtc_state->connectors_changed) - return true; + new_crtc_state->connectors_changed) { + DRM_DEBUG_DRIVER("%s:%d MST_DSC dsc recompute required." + "stream 0x%p in new dc_state\n", + __func__, __LINE__, stream); + is_dsc_need_re_compute = true; + goto out; + } } } + if (new_stream_on_link_num == 0) { + DRM_DEBUG_DRIVER("%s:%d MST_DSC no mode change request for streams in new dc_state\n", + __func__, __LINE__); + is_dsc_need_re_compute = false; + goto out; + } + + DRM_DEBUG_DRIVER("%s: MST_DSC check on %d streams in current dc_state\n", + __func__, dc->current_state->stream_count); + /* check current_state if there stream on link but it is not in * new request state */ @@ -1079,36 +1463,50 @@ static bool is_dsc_need_re_compute( if (j == new_stream_on_link_num) { /* not in new state */ + DRM_DEBUG_DRIVER("%s:%d MST_DSC dsc recompute required." + "stream 0x%p in current dc_state but not in new dc_state\n", + __func__, __LINE__, stream); is_dsc_need_re_compute = true; break; } } +out: + DRM_DEBUG_DRIVER("%s: MST_DSC dsc recompute %s\n", + __func__, is_dsc_need_re_compute ? "required" : "not required"); + return is_dsc_need_re_compute; } -bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state, - struct dc_state *dc_state, - struct dsc_mst_fairness_vars *vars) +int compute_mst_dsc_configs_for_state(struct drm_atomic_state *state, + struct dc_state *dc_state, + struct dsc_mst_fairness_vars *vars) { int i, j; struct dc_stream_state *stream; bool computed_streams[MAX_PIPES]; struct amdgpu_dm_connector *aconnector; + struct drm_dp_mst_topology_mgr *mst_mgr; + struct resource_pool *res_pool; int link_vars_start_index = 0; + int ret = 0; for (i = 0; i < dc_state->stream_count; i++) computed_streams[i] = false; for (i = 0; i < dc_state->stream_count; i++) { stream = dc_state->streams[i]; + res_pool = stream->ctx->dc->res_pool; if (stream->signal != SIGNAL_TYPE_DISPLAY_PORT_MST) continue; aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context; - if (!aconnector || !aconnector->dc_sink) + DRM_DEBUG_DRIVER("%s: MST_DSC compute mst dsc configs for stream 0x%p, aconnector 0x%p\n", + __func__, stream, aconnector); + + if (!aconnector || !aconnector->dc_sink || !aconnector->mst_output_port) continue; if (!aconnector->dc_sink->dsc_caps.dsc_dec_caps.is_dsc_supported) @@ -1117,20 +1515,18 @@ bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state, if (computed_streams[i]) continue; - if (dcn20_remove_stream_from_ctx(stream->ctx->dc, dc_state, stream) != DC_OK) - return false; + if (res_pool->funcs->remove_stream_from_ctx && + res_pool->funcs->remove_stream_from_ctx(stream->ctx->dc, dc_state, stream) != DC_OK) + return -EINVAL; if (!is_dsc_need_re_compute(state, dc_state, stream->link)) continue; - mutex_lock(&aconnector->mst_mgr.lock); - if (!compute_mst_dsc_configs_for_link(state, dc_state, stream->link, vars, - &aconnector->mst_mgr, - &link_vars_start_index)) { - mutex_unlock(&aconnector->mst_mgr.lock); - return false; - } - mutex_unlock(&aconnector->mst_mgr.lock); + mst_mgr = aconnector->mst_output_port->mgr; + ret = compute_mst_dsc_configs_for_link(state, dc_state, stream->link, vars, mst_mgr, + &link_vars_start_index); + if (ret != 0) + return ret; for (j = 0; j < dc_state->stream_count; j++) { if (dc_state->streams[j]->link == stream->link) @@ -1142,23 +1538,27 @@ bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state, stream = dc_state->streams[i]; if (stream->timing.flags.DSC == 1) - if (dc_stream_add_dsc_to_resource(stream->ctx->dc, dc_state, stream) != DC_OK) - return false; + if (dc_stream_add_dsc_to_resource(stream->ctx->dc, dc_state, stream) != DC_OK) { + DRM_DEBUG_DRIVER("%s:%d MST_DSC Failed to request dsc hw resource for stream 0x%p\n", + __func__, __LINE__, stream); + return -EINVAL; + } } - return true; + return ret; } -static bool - pre_compute_mst_dsc_configs_for_state(struct drm_atomic_state *state, - struct dc_state *dc_state, - struct dsc_mst_fairness_vars *vars) +static int pre_compute_mst_dsc_configs_for_state(struct drm_atomic_state *state, + struct dc_state *dc_state, + struct dsc_mst_fairness_vars *vars) { int i, j; struct dc_stream_state *stream; bool computed_streams[MAX_PIPES]; struct amdgpu_dm_connector *aconnector; + struct drm_dp_mst_topology_mgr *mst_mgr; int link_vars_start_index = 0; + int ret = 0; for (i = 0; i < dc_state->stream_count; i++) computed_streams[i] = false; @@ -1171,7 +1571,10 @@ static bool aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context; - if (!aconnector || !aconnector->dc_sink) + DRM_DEBUG_DRIVER("MST_DSC pre compute mst dsc configs for #%d stream 0x%p, aconnector 0x%p\n", + i, stream, aconnector); + + if (!aconnector || !aconnector->dc_sink || !aconnector->mst_output_port) continue; if (!aconnector->dc_sink->dsc_caps.dsc_dec_caps.is_dsc_supported) @@ -1183,14 +1586,11 @@ static bool if (!is_dsc_need_re_compute(state, dc_state, stream->link)) continue; - mutex_lock(&aconnector->mst_mgr.lock); - if (!compute_mst_dsc_configs_for_link(state, dc_state, stream->link, vars, - &aconnector->mst_mgr, - &link_vars_start_index)) { - mutex_unlock(&aconnector->mst_mgr.lock); - return false; - } - mutex_unlock(&aconnector->mst_mgr.lock); + mst_mgr = aconnector->mst_output_port->mgr; + ret = compute_mst_dsc_configs_for_link(state, dc_state, stream->link, vars, mst_mgr, + &link_vars_start_index); + if (ret != 0) + return ret; for (j = 0; j < dc_state->stream_count; j++) { if (dc_state->streams[j]->link == stream->link) @@ -1198,7 +1598,7 @@ static bool } } - return true; + return ret; } static int find_crtc_index_in_state_by_stream(struct drm_atomic_state *state, @@ -1253,9 +1653,9 @@ static bool is_dsc_precompute_needed(struct drm_atomic_state *state) return ret; } -bool pre_validate_dsc(struct drm_atomic_state *state, - struct dm_atomic_state **dm_state_ptr, - struct dsc_mst_fairness_vars *vars) +int pre_validate_dsc(struct drm_atomic_state *state, + struct dm_atomic_state **dm_state_ptr, + struct dsc_mst_fairness_vars *vars) { int i; struct dm_atomic_state *dm_state; @@ -1263,12 +1663,13 @@ bool pre_validate_dsc(struct drm_atomic_state *state, int ret = 0; if (!is_dsc_precompute_needed(state)) { - DRM_INFO_ONCE("DSC precompute is not needed.\n"); - return true; + DRM_INFO_ONCE("%s:%d MST_DSC dsc precompute is not needed\n", __func__, __LINE__); + return 0; } - if (dm_atomic_get_state(state, dm_state_ptr)) { - DRM_INFO_ONCE("dm_atomic_get_state() failed\n"); - return false; + ret = dm_atomic_get_state(state, dm_state_ptr); + if (ret != 0) { + DRM_INFO_ONCE("%s:%d MST_DSC dm_atomic_get_state() failed\n", __func__, __LINE__); + return ret; } dm_state = *dm_state_ptr; @@ -1278,31 +1679,35 @@ bool pre_validate_dsc(struct drm_atomic_state *state, * from dm_state->context. */ - local_dc_state = kmemdup(dm_state->context, sizeof(struct dc_state), GFP_KERNEL); + local_dc_state = vmalloc(sizeof(struct dc_state)); if (!local_dc_state) - return false; + return -ENOMEM; + memcpy(local_dc_state, dm_state->context, sizeof(struct dc_state)); for (i = 0; i < local_dc_state->stream_count; i++) { struct dc_stream_state *stream = dm_state->context->streams[i]; int ind = find_crtc_index_in_state_by_stream(state, stream); if (ind >= 0) { - struct amdgpu_dm_connector *aconnector; + struct drm_connector *connector; struct drm_connector_state *drm_new_conn_state; struct dm_connector_state *dm_new_conn_state; struct dm_crtc_state *dm_old_crtc_state; - aconnector = + connector = amdgpu_dm_find_first_crtc_matching_connector(state, state->crtcs[ind].ptr); + if (!connector) + continue; + drm_new_conn_state = drm_atomic_get_new_connector_state(state, - &aconnector->base); + connector); dm_new_conn_state = to_dm_connector_state(drm_new_conn_state); dm_old_crtc_state = to_dm_crtc_state(state->crtcs[ind].old_state); local_dc_state->streams[i] = - create_validate_stream_for_sink(aconnector, + create_validate_stream_for_sink(connector, &state->crtcs[ind].new_state->mode, dm_new_conn_state, dm_old_crtc_state->stream); @@ -1316,8 +1721,10 @@ bool pre_validate_dsc(struct drm_atomic_state *state, if (ret != 0) goto clean_exit; - if (!pre_compute_mst_dsc_configs_for_state(state, local_dc_state, vars)) { - DRM_INFO_ONCE("pre_compute_mst_dsc_configs_for_state() failed\n"); + ret = pre_compute_mst_dsc_configs_for_state(state, local_dc_state, vars); + if (ret != 0) { + DRM_INFO_ONCE("%s:%d MST_DSC dsc pre_compute_mst_dsc_configs_for_state() failed\n", + __func__, __LINE__); ret = -EINVAL; goto clean_exit; } @@ -1330,13 +1737,16 @@ bool pre_validate_dsc(struct drm_atomic_state *state, struct dc_stream_state *stream = dm_state->context->streams[i]; if (local_dc_state->streams[i] && - is_timing_changed(stream, local_dc_state->streams[i])) { - DRM_INFO_ONCE("crtc[%d] needs mode_changed\n", i); + dc_is_timing_changed(stream, local_dc_state->streams[i])) { + DRM_INFO_ONCE("%s:%d MST_DSC crtc[%d] needs mode_change\n", __func__, __LINE__, i); } else { int ind = find_crtc_index_in_state_by_stream(state, stream); - if (ind >= 0) + if (ind >= 0) { + DRM_INFO_ONCE("%s:%d MST_DSC no mode changed for stream 0x%p\n", + __func__, __LINE__, stream); state->crtcs[ind].new_state->mode_changed = 0; + } } } clean_exit: @@ -1347,115 +1757,218 @@ clean_exit: dc_stream_release(local_dc_state->streams[i]); } - kfree(local_dc_state); + vfree(local_dc_state); - return (ret == 0); + return ret; } -static unsigned int kbps_from_pbn(unsigned int pbn) +static bool is_dsc_common_config_possible(struct dc_stream_state *stream, + struct dc_dsc_bw_range *bw_range) { - unsigned int kbps = pbn; + struct dc_dsc_policy dsc_policy = {0}; + bool is_dsc_possible; - kbps *= (1000000 / PEAK_FACTOR_X1000); - kbps *= 8; - kbps *= 54; - kbps /= 64; + dc_dsc_get_policy_for_timing(&stream->timing, 0, &dsc_policy, dc_link_get_highest_encoding_format(stream->link)); + is_dsc_possible = dc_dsc_compute_bandwidth_range(stream->sink->ctx->dc->res_pool->dscs[0], + stream->sink->ctx->dc->debug.dsc_min_slice_height_override, + dsc_policy.min_target_bpp * 16, + dsc_policy.max_target_bpp * 16, + &stream->sink->dsc_caps.dsc_dec_caps, + &stream->timing, dc_link_get_highest_encoding_format(stream->link), bw_range); - return kbps; + return is_dsc_possible; } +#endif -static bool is_dsc_common_config_possible(struct dc_stream_state *stream, - struct dc_dsc_bw_range *bw_range) +#if defined(CONFIG_DRM_AMD_DC_FP) +static bool dp_get_link_current_set_bw(struct drm_dp_aux *aux, uint32_t *cur_link_bw) { - struct dc_dsc_policy dsc_policy = {0}; + uint32_t total_data_bw_efficiency_x10000 = 0; + uint32_t link_rate_per_lane_kbps = 0; + enum dc_link_rate link_rate; + union lane_count_set lane_count; + u8 dp_link_encoding; + u8 link_bw_set = 0; + u8 data[16] = {0}; + + *cur_link_bw = 0; + + if (drm_dp_dpcd_read(aux, DP_LINK_BW_SET, data, 16) != 16) + return false; + + dp_link_encoding = data[DP_MAIN_LINK_CHANNEL_CODING_SET - DP_LINK_BW_SET]; + link_bw_set = data[DP_LINK_BW_SET - DP_LINK_BW_SET]; + lane_count.raw = data[DP_LANE_COUNT_SET - DP_LINK_BW_SET]; + + drm_dbg_dp(aux->drm_dev, "MST_DSC downlink setting: %d, 0x%x x %d\n", + dp_link_encoding, link_bw_set, lane_count.bits.LANE_COUNT_SET); + + switch (dp_link_encoding) { + case DP_8b_10b_ENCODING: + link_rate = link_bw_set; + link_rate_per_lane_kbps = link_rate * LINK_RATE_REF_FREQ_IN_KHZ * BITS_PER_DP_BYTE; + total_data_bw_efficiency_x10000 = DATA_EFFICIENCY_8b_10b_x10000; + total_data_bw_efficiency_x10000 /= 100; + total_data_bw_efficiency_x10000 *= DATA_EFFICIENCY_8b_10b_FEC_EFFICIENCY_x100; + break; + case DP_128b_132b_ENCODING: + switch (link_bw_set) { + case DP_LINK_BW_10: + link_rate = LINK_RATE_UHBR10; + break; + case DP_LINK_BW_13_5: + link_rate = LINK_RATE_UHBR13_5; + break; + case DP_LINK_BW_20: + link_rate = LINK_RATE_UHBR20; + break; + default: + return false; + } - dc_dsc_get_policy_for_timing(&stream->timing, 0, &dsc_policy); - dc_dsc_compute_bandwidth_range(stream->sink->ctx->dc->res_pool->dscs[0], - stream->sink->ctx->dc->debug.dsc_min_slice_height_override, - dsc_policy.min_target_bpp * 16, - dsc_policy.max_target_bpp * 16, - &stream->sink->dsc_caps.dsc_dec_caps, - &stream->timing, bw_range); + link_rate_per_lane_kbps = link_rate * 10000; + total_data_bw_efficiency_x10000 = DATA_EFFICIENCY_128b_132b_x10000; + break; + default: + return false; + } - return bw_range->max_target_bpp_x16 && bw_range->min_target_bpp_x16; + *cur_link_bw = link_rate_per_lane_kbps * lane_count.bits.LANE_COUNT_SET / 10000 * total_data_bw_efficiency_x10000; + return true; } -#endif /* CONFIG_DRM_AMD_DC_DCN */ +#endif enum dc_status dm_dp_mst_is_port_support_mode( struct amdgpu_dm_connector *aconnector, struct dc_stream_state *stream) { - int bpp, pbn, branch_max_throughput_mps = 0; -#if defined(CONFIG_DRM_AMD_DC_DCN) +#if defined(CONFIG_DRM_AMD_DC_FP) + int branch_max_throughput_mps = 0; struct dc_link_settings cur_link_settings; - unsigned int end_to_end_bw_in_kbps = 0; - unsigned int upper_link_bw_in_kbps = 0, down_link_bw_in_kbps = 0; - unsigned int max_compressed_bw_in_kbps = 0; + uint32_t end_to_end_bw_in_kbps = 0; + uint32_t root_link_bw_in_kbps = 0; + uint32_t virtual_channel_bw_in_kbps = 0; struct dc_dsc_bw_range bw_range = {0}; + struct dc_dsc_config_options dsc_options = {0}; + uint32_t stream_kbps; - /* - * check if the mode could be supported if DSC pass-through is supported - * AND check if there enough bandwidth available to support the mode - * with DSC enabled. + /* DSC unnecessary case + * Check if timing could be supported within end-to-end BW */ - if (is_dsc_common_config_possible(stream, &bw_range) && - aconnector->port->passthrough_aux) { - mutex_lock(&aconnector->mst_mgr.lock); + stream_kbps = + dc_bandwidth_in_kbps_from_timing(&stream->timing, + dc_link_get_highest_encoding_format(stream->link)); + cur_link_settings = stream->link->verified_link_cap; + root_link_bw_in_kbps = dc_link_bandwidth_kbps(aconnector->dc_link, &cur_link_settings); + virtual_channel_bw_in_kbps = pbn_to_kbps(aconnector->mst_output_port->full_pbn, true); + + /* pick the end to end bw bottleneck */ + end_to_end_bw_in_kbps = min(root_link_bw_in_kbps, virtual_channel_bw_in_kbps); + + if (stream_kbps <= end_to_end_bw_in_kbps) { + DRM_DEBUG_DRIVER("MST_DSC no dsc required. End-to-end bw sufficient\n"); + return DC_OK; + } + + /*DSC necessary case*/ + if (!aconnector->dsc_aux) + return DC_FAIL_BANDWIDTH_VALIDATE; + + if (is_dsc_common_config_possible(stream, &bw_range)) { - cur_link_settings = stream->link->verified_link_cap; + /*capable of dsc passthough. dsc bitstream along the entire path*/ + if (aconnector->mst_output_port->passthrough_aux) { + if (bw_range.min_kbps > end_to_end_bw_in_kbps) { + DRM_DEBUG_DRIVER("MST_DSC dsc passthrough and decode at endpoint" + "Max dsc compression bw can't fit into end-to-end bw\n"); + return DC_FAIL_BANDWIDTH_VALIDATE; + } + } else { + /*dsc bitstream decoded at the dp last link*/ + struct drm_dp_mst_port *immediate_upstream_port = NULL; + uint32_t end_link_bw = 0; + + /*Get last DP link BW capability. Mode shall be supported by Legacy peer*/ + if (aconnector->mst_output_port->pdt != DP_PEER_DEVICE_DP_LEGACY_CONV && + aconnector->mst_output_port->pdt != DP_PEER_DEVICE_NONE) { + if (aconnector->vc_full_pbn != aconnector->mst_output_port->full_pbn) { + dp_get_link_current_set_bw(&aconnector->mst_output_port->aux, &end_link_bw); + aconnector->vc_full_pbn = aconnector->mst_output_port->full_pbn; + aconnector->mst_local_bw = end_link_bw; + } else { + end_link_bw = aconnector->mst_local_bw; + } - upper_link_bw_in_kbps = dc_link_bandwidth_kbps(aconnector->dc_link, - &cur_link_settings - ); - down_link_bw_in_kbps = kbps_from_pbn(aconnector->port->full_pbn); + if (end_link_bw > 0 && + stream_kbps > end_link_bw && + aconnector->branch_ieee_oui != DP_BRANCH_DEVICE_ID_90CC24) { + DRM_DEBUG_DRIVER("MST_DSC dsc decode at last link. " + "Mode required bw can't fit into last link\n"); + return DC_FAIL_BANDWIDTH_VALIDATE; + } + } - /* pick the bottleneck */ - end_to_end_bw_in_kbps = min(upper_link_bw_in_kbps, - down_link_bw_in_kbps); + /*Get virtual channel bandwidth between source and the link before the last link*/ + if (aconnector->mst_output_port->parent->port_parent) + immediate_upstream_port = aconnector->mst_output_port->parent->port_parent; - mutex_unlock(&aconnector->mst_mgr.lock); + if (immediate_upstream_port) { + virtual_channel_bw_in_kbps = pbn_to_kbps(immediate_upstream_port->full_pbn, true); + virtual_channel_bw_in_kbps = min(root_link_bw_in_kbps, virtual_channel_bw_in_kbps); + } else { + /* For topology LCT 1 case - only one mstb*/ + virtual_channel_bw_in_kbps = root_link_bw_in_kbps; + } - /* - * use the maximum dsc compression bandwidth as the required - * bandwidth for the mode - */ - max_compressed_bw_in_kbps = bw_range.min_kbps; + if (bw_range.min_kbps > virtual_channel_bw_in_kbps) { + DRM_DEBUG_DRIVER("MST_DSC dsc decode at last link." + "Max dsc compression can't fit into MST available bw\n"); + return DC_FAIL_BANDWIDTH_VALIDATE; + } + } - if (end_to_end_bw_in_kbps < max_compressed_bw_in_kbps) { - DRM_DEBUG_DRIVER("Mode does not fit into DSC pass-through bandwidth validation\n"); + /*Confirm if we can obtain dsc config*/ + dc_dsc_get_default_config_option(stream->link->dc, &dsc_options); + dsc_options.max_target_bpp_limit_override_x16 = aconnector->base.display_info.max_dsc_bpp * 16; + if (dc_dsc_compute_config(stream->sink->ctx->dc->res_pool->dscs[0], + &stream->sink->dsc_caps.dsc_dec_caps, + &dsc_options, + end_to_end_bw_in_kbps, + &stream->timing, + dc_link_get_highest_encoding_format(stream->link), + &stream->timing.dsc_cfg)) { + stream->timing.flags.DSC = 1; + DRM_DEBUG_DRIVER("MST_DSC require dsc and dsc config found\n"); + } else { + DRM_DEBUG_DRIVER("MST_DSC require dsc but can't find appropriate dsc config\n"); return DC_FAIL_BANDWIDTH_VALIDATE; } - } else { -#endif - /* check if mode could be supported within full_pbn */ - bpp = convert_dc_color_depth_into_bpc(stream->timing.display_color_depth) * 3; - pbn = drm_dp_calc_pbn_mode(stream->timing.pix_clk_100hz / 10, bpp, false); - if (pbn > aconnector->port->full_pbn) + /* check is mst dsc output bandwidth branch_overall_throughput_0_mps */ + switch (stream->timing.pixel_encoding) { + case PIXEL_ENCODING_RGB: + case PIXEL_ENCODING_YCBCR444: + branch_max_throughput_mps = + aconnector->dc_sink->dsc_caps.dsc_dec_caps.branch_overall_throughput_0_mps; + break; + case PIXEL_ENCODING_YCBCR422: + case PIXEL_ENCODING_YCBCR420: + branch_max_throughput_mps = + aconnector->dc_sink->dsc_caps.dsc_dec_caps.branch_overall_throughput_1_mps; + break; + default: + break; + } + + if (branch_max_throughput_mps != 0 && + ((stream->timing.pix_clk_100hz / 10) > branch_max_throughput_mps * 1000)) { + DRM_DEBUG_DRIVER("MST_DSC require dsc but max throughput mps fails\n"); return DC_FAIL_BANDWIDTH_VALIDATE; -#if defined(CONFIG_DRM_AMD_DC_DCN) + } + } else { + DRM_DEBUG_DRIVER("MST_DSC require dsc but can't find common dsc config\n"); + return DC_FAIL_BANDWIDTH_VALIDATE; } #endif - - /* check is mst dsc output bandwidth branch_overall_throughput_0_mps */ - switch (stream->timing.pixel_encoding) { - case PIXEL_ENCODING_RGB: - case PIXEL_ENCODING_YCBCR444: - branch_max_throughput_mps = - aconnector->dc_sink->dsc_caps.dsc_dec_caps.branch_overall_throughput_0_mps; - break; - case PIXEL_ENCODING_YCBCR422: - case PIXEL_ENCODING_YCBCR420: - branch_max_throughput_mps = - aconnector->dc_sink->dsc_caps.dsc_dec_caps.branch_overall_throughput_1_mps; - break; - default: - break; - } - - if (branch_max_throughput_mps != 0 && - ((stream->timing.pix_clk_100hz / 10) > branch_max_throughput_mps * 1000)) - return DC_FAIL_BANDWIDTH_VALIDATE; - return DC_OK; } diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h index b92a7c5671aa..6f7ea684b555 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: MIT */ /* * Copyright 2012-15 Advanced Micro Devices, Inc. * @@ -34,10 +35,32 @@ #define SYNAPTICS_RC_OFFSET 0x4BC #define SYNAPTICS_RC_DATA 0x4C0 +#define DP_BRANCH_VENDOR_SPECIFIC_START 0x50C + +/** + * Panamera MST Hub detection + * Offset DPCD 050Eh == 0x5A indicates cascaded MST hub case + * Check from beginning of branch device vendor specific field (050Ch) + */ +#define IS_SYNAPTICS_PANAMERA(branchDevName) (((int)branchDevName[4] & 0xF0) == 0x50 ? 1 : 0) +#define BRANCH_HW_REVISION_PANAMERA_A2 0x10 +#define SYNAPTICS_CASCADED_HUB_ID 0x5A +#define IS_SYNAPTICS_CASCADED_PANAMERA(devName, data) ((IS_SYNAPTICS_PANAMERA(devName) && ((int)data[2] == SYNAPTICS_CASCADED_HUB_ID)) ? 1 : 0) + +#define PBN_FEC_OVERHEAD_MULTIPLIER_8B_10B 1031 +#define PBN_FEC_OVERHEAD_MULTIPLIER_128B_132B 1000 + +enum mst_msg_ready_type { + NONE_MSG_RDY_EVENT = 0, + DOWN_REP_MSG_RDY_EVENT = 1, + UP_REQ_MSG_RDY_EVENT = 2, + DOWN_OR_UP_MSG_RDY_EVENT = 3 +}; + struct amdgpu_display_manager; struct amdgpu_dm_connector; -int dm_mst_get_pbn_divider(struct dc_link *link); +uint32_t dm_mst_get_pbn_divider(struct dc_link *link); void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm, struct amdgpu_dm_connector *aconnector, @@ -46,6 +69,10 @@ void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm, void dm_dp_create_fake_mst_encoders(struct amdgpu_device *adev); +void dm_handle_mst_sideband_msg_ready_event( + struct drm_dp_mst_topology_mgr *mgr, + enum mst_msg_ready_type msg_rdy_type); + struct dsc_mst_fairness_vars { int pbn; bool dsc_enabled; @@ -53,15 +80,15 @@ struct dsc_mst_fairness_vars { struct amdgpu_dm_connector *aconnector; }; -bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state, - struct dc_state *dc_state, - struct dsc_mst_fairness_vars *vars); +int compute_mst_dsc_configs_for_state(struct drm_atomic_state *state, + struct dc_state *dc_state, + struct dsc_mst_fairness_vars *vars); bool needs_dsc_aux_workaround(struct dc_link *link); -bool pre_validate_dsc(struct drm_atomic_state *state, - struct dm_atomic_state **dm_state_ptr, - struct dsc_mst_fairness_vars *vars); +int pre_validate_dsc(struct drm_atomic_state *state, + struct dm_atomic_state **dm_state_ptr, + struct dsc_mst_fairness_vars *vars); enum dc_status dm_dp_mst_is_port_support_mode( struct amdgpu_dm_connector *aconnector, diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c index e6854f7270a6..2e3ee78999d9 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c @@ -26,8 +26,10 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_blend.h> +#include "drm/drm_framebuffer.h" #include <drm/drm_gem_atomic_helper.h> #include <drm/drm_plane_helper.h> +#include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_fourcc.h> #include "amdgpu.h" @@ -35,6 +37,7 @@ #include "amdgpu_display.h" #include "amdgpu_dm_trace.h" #include "amdgpu_dm_plane.h" +#include "amdgpu_dm_colorop.h" #include "gc/gc_11_0_0_offset.h" #include "gc/gc_11_0_0_sh_mask.h" @@ -67,7 +70,16 @@ static const uint32_t overlay_formats[] = { DRM_FORMAT_RGBA8888, DRM_FORMAT_XBGR8888, DRM_FORMAT_ABGR8888, - DRM_FORMAT_RGB565 + DRM_FORMAT_RGB565, + DRM_FORMAT_NV21, + DRM_FORMAT_NV12, + DRM_FORMAT_P010 +}; + +static const uint32_t video_formats[] = { + DRM_FORMAT_NV21, + DRM_FORMAT_NV12, + DRM_FORMAT_P010 }; static const u32 cursor_formats[] = { @@ -81,12 +93,12 @@ enum dm_micro_swizzle { MICRO_SWIZZLE_R = 3 }; -const struct drm_format_info *amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd) +const struct drm_format_info *amdgpu_dm_plane_get_format_info(u32 pixel_format, u64 modifier) { - return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]); + return amdgpu_lookup_format_info(pixel_format, modifier); } -void fill_blending_from_plane_state(const struct drm_plane_state *plane_state, +void amdgpu_dm_plane_fill_blending_from_plane_state(const struct drm_plane_state *plane_state, bool *per_pixel_alpha, bool *pre_multiplied_alpha, bool *global_alpha, int *global_alpha_value) { @@ -95,8 +107,6 @@ void fill_blending_from_plane_state(const struct drm_plane_state *plane_state, *global_alpha = false; *global_alpha_value = 0xff; - if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY) - return; if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI || plane_state->pixel_blend_mode == DRM_MODE_BLEND_COVERAGE) { @@ -104,6 +114,11 @@ void fill_blending_from_plane_state(const struct drm_plane_state *plane_state, DRM_FORMAT_ARGB8888, DRM_FORMAT_RGBA8888, DRM_FORMAT_ABGR8888, + DRM_FORMAT_ARGB2101010, + DRM_FORMAT_ABGR2101010, + DRM_FORMAT_ARGB16161616, + DRM_FORMAT_ABGR16161616, + DRM_FORMAT_ARGB16161616F, }; uint32_t format = plane_state->fb->format->format; unsigned int i; @@ -125,14 +140,14 @@ void fill_blending_from_plane_state(const struct drm_plane_state *plane_state, } } -static void add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod) +static void amdgpu_dm_plane_add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod) { if (!*mods) return; if (*cap - *size < 1) { uint64_t new_cap = *cap * 2; - uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL); + uint64_t *new_mods = kmalloc_array(new_cap, sizeof(uint64_t), GFP_KERNEL); if (!new_mods) { kfree(*mods); @@ -150,12 +165,12 @@ static void add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_ *size += 1; } -static bool modifier_has_dcc(uint64_t modifier) +static bool amdgpu_dm_plane_modifier_has_dcc(uint64_t modifier) { return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier); } -static unsigned modifier_gfx9_swizzle_mode(uint64_t modifier) +static unsigned int amdgpu_dm_plane_modifier_gfx9_swizzle_mode(uint64_t modifier) { if (modifier == DRM_FORMAT_MOD_LINEAR) return 0; @@ -163,8 +178,8 @@ static unsigned modifier_gfx9_swizzle_mode(uint64_t modifier) return AMD_FMT_MOD_GET(TILE, modifier); } -static void fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info, - uint64_t tiling_flags) +static void amdgpu_dm_plane_fill_gfx8_tiling_info_from_flags(struct dc_tiling_info *tiling_info, + uint64_t tiling_flags) { /* Fill GFX8 params */ if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) { @@ -176,6 +191,7 @@ static void fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info, tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT); num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS); + tiling_info->gfxversion = DcGfxVersion8; /* XXX fix me for VI */ tiling_info->gfx8.num_banks = num_banks; tiling_info->gfx8.array_mode = @@ -195,8 +211,8 @@ static void fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info, AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG); } -static void fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev, - union dc_tiling_info *tiling_info) +static void amdgpu_dm_plane_fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev, + struct dc_tiling_info *tiling_info) { /* Fill GFX9 params */ tiling_info->gfx9.num_pipes = @@ -212,13 +228,13 @@ static void fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev, tiling_info->gfx9.num_rb_per_se = adev->gfx.config.gb_addr_config_fields.num_rb_per_se; tiling_info->gfx9.shaderEnable = 1; - if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0)) + if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(10, 3, 0)) tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs; } -static void fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev, - union dc_tiling_info *tiling_info, - uint64_t modifier) +static void amdgpu_dm_plane_fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev, + struct dc_tiling_info *tiling_info, + uint64_t modifier) { unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier); unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier); @@ -227,7 +243,7 @@ static void fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev pipes_log2 = min(5u, mod_pipe_xor_bits); - fill_gfx9_tiling_info_from_device(adev, tiling_info); + amdgpu_dm_plane_fill_gfx9_tiling_info_from_device(adev, tiling_info); if (!IS_AMD_FMT_MOD(modifier)) return; @@ -244,13 +260,13 @@ static void fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev } } -static int validate_dcc(struct amdgpu_device *adev, - const enum surface_pixel_format format, - const enum dc_rotation_angle rotation, - const union dc_tiling_info *tiling_info, - const struct dc_plane_dcc_param *dcc, - const struct dc_plane_address *address, - const struct plane_size *plane_size) +static int amdgpu_dm_plane_validate_dcc(struct amdgpu_device *adev, + const enum surface_pixel_format format, + const enum dc_rotation_angle rotation, + const struct dc_tiling_info *tiling_info, + const struct dc_plane_dcc_param *dcc, + const struct dc_plane_address *address, + const struct plane_size *plane_size) { struct dc *dc = adev->dm.dc; struct dc_dcc_surface_param input; @@ -262,8 +278,11 @@ static int validate_dcc(struct amdgpu_device *adev, if (!dcc->enable) return 0; - if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN || - !dc->cap_funcs.get_dcc_compression_cap) + if (adev->family < AMDGPU_FAMILY_GC_12_0_0 && + format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) + return -EINVAL; + + if (!dc->cap_funcs.get_dcc_compression_cap) return -EINVAL; input.format = format; @@ -289,23 +308,23 @@ static int validate_dcc(struct amdgpu_device *adev, return 0; } -static int fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev, - const struct amdgpu_framebuffer *afb, - const enum surface_pixel_format format, - const enum dc_rotation_angle rotation, - const struct plane_size *plane_size, - union dc_tiling_info *tiling_info, - struct dc_plane_dcc_param *dcc, - struct dc_plane_address *address, - const bool force_disable_dcc) +static int amdgpu_dm_plane_fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev, + const struct amdgpu_framebuffer *afb, + const enum surface_pixel_format format, + const enum dc_rotation_angle rotation, + const struct plane_size *plane_size, + struct dc_tiling_info *tiling_info, + struct dc_plane_dcc_param *dcc, + struct dc_plane_address *address) { const uint64_t modifier = afb->base.modifier; int ret = 0; - fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier); - tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier); + amdgpu_dm_plane_fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier); + tiling_info->gfx9.swizzle = amdgpu_dm_plane_modifier_gfx9_swizzle_mode(modifier); + tiling_info->gfxversion = DcGfxVersion9; - if (modifier_has_dcc(modifier) && !force_disable_dcc) { + if (amdgpu_dm_plane_modifier_has_dcc(modifier)) { uint64_t dcc_address = afb->address + afb->base.offsets[1]; bool independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier); bool independent_128b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_128B, modifier); @@ -333,60 +352,104 @@ static int fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev, address->grph.meta_addr.high_part = upper_32_bits(dcc_address); } - ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size); + ret = amdgpu_dm_plane_validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size); if (ret) - drm_dbg_kms(adev_to_drm(adev), "validate_dcc: returned error: %d\n", ret); + drm_dbg_kms(adev_to_drm(adev), "amdgpu_dm_plane_validate_dcc: returned error: %d\n", ret); return ret; } -static void add_gfx10_1_modifiers(const struct amdgpu_device *adev, - uint64_t **mods, uint64_t *size, uint64_t *capacity) +static int amdgpu_dm_plane_fill_gfx12_plane_attributes_from_modifiers(struct amdgpu_device *adev, + const struct amdgpu_framebuffer *afb, + const enum surface_pixel_format format, + const enum dc_rotation_angle rotation, + const struct plane_size *plane_size, + struct dc_tiling_info *tiling_info, + struct dc_plane_dcc_param *dcc, + struct dc_plane_address *address) +{ + const uint64_t modifier = afb->base.modifier; + int ret = 0; + + /* TODO: Most of this function shouldn't be needed on GFX12. */ + amdgpu_dm_plane_fill_gfx9_tiling_info_from_device(adev, tiling_info); + + tiling_info->gfx9.swizzle = amdgpu_dm_plane_modifier_gfx9_swizzle_mode(modifier); + tiling_info->gfxversion = DcGfxAddr3; + + if (amdgpu_dm_plane_modifier_has_dcc(modifier)) { + int max_compressed_block = AMD_FMT_MOD_GET(DCC_MAX_COMPRESSED_BLOCK, modifier); + + dcc->enable = 1; + dcc->independent_64b_blks = max_compressed_block == 0; + + if (max_compressed_block == 0) + dcc->dcc_ind_blk = hubp_ind_block_64b; + else if (max_compressed_block == 1) + dcc->dcc_ind_blk = hubp_ind_block_128b; + else + dcc->dcc_ind_blk = hubp_ind_block_unconstrained; + } + + /* TODO: This seems wrong because there is no DCC plane on GFX12. */ + ret = amdgpu_dm_plane_validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size); + if (ret) + drm_dbg_kms(adev_to_drm(adev), "amdgpu_dm_plane_validate_dcc: returned error: %d\n", ret); + + return ret; +} + +static void amdgpu_dm_plane_add_gfx10_1_modifiers(const struct amdgpu_device *adev, + uint64_t **mods, + uint64_t *size, + uint64_t *capacity) { int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes); - add_modifier(mods, size, capacity, AMD_FMT_MOD | - AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | - AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) | - AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | - AMD_FMT_MOD_SET(DCC, 1) | - AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | - AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | - AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B)); - - add_modifier(mods, size, capacity, AMD_FMT_MOD | - AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | - AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) | - AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | - AMD_FMT_MOD_SET(DCC, 1) | - AMD_FMT_MOD_SET(DCC_RETILE, 1) | - AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | - AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | - AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B)); - - add_modifier(mods, size, capacity, AMD_FMT_MOD | - AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | - AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) | - AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits)); - - add_modifier(mods, size, capacity, AMD_FMT_MOD | - AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) | - AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) | - AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits)); - - - /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */ - add_modifier(mods, size, capacity, AMD_FMT_MOD | - AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) | - AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9)); - - add_modifier(mods, size, capacity, AMD_FMT_MOD | - AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) | - AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9)); + amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | + AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | + AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) | + AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | + AMD_FMT_MOD_SET(DCC, 1) | + AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | + AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | + AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B)); + + amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | + AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | + AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) | + AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | + AMD_FMT_MOD_SET(DCC, 1) | + AMD_FMT_MOD_SET(DCC_RETILE, 1) | + AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | + AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | + AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B)); + + amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | + AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | + AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) | + AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits)); + + amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | + AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) | + AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) | + AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits)); + + + /* Only supported for 64bpp, will be filtered in amdgpu_dm_plane_format_mod_supported */ + amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | + AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) | + AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9)); + + amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | + AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) | + AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9)); } -static void add_gfx9_modifiers(const struct amdgpu_device *adev, - uint64_t **mods, uint64_t *size, uint64_t *capacity) +static void amdgpu_dm_plane_add_gfx9_modifiers(const struct amdgpu_device *adev, + uint64_t **mods, + uint64_t *size, + uint64_t *capacity) { int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes); int pipe_xor_bits = min(8, pipes + @@ -407,163 +470,164 @@ static void add_gfx9_modifiers(const struct amdgpu_device *adev, */ if (has_constant_encode) { - add_modifier(mods, size, capacity, AMD_FMT_MOD | - AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) | - AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) | - AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | - AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) | - AMD_FMT_MOD_SET(DCC, 1) | - AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | - AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) | - AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1)); + amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | + AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) | + AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) | + AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | + AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) | + AMD_FMT_MOD_SET(DCC, 1) | + AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | + AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) | + AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1)); } - add_modifier(mods, size, capacity, AMD_FMT_MOD | - AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) | - AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) | - AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | - AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) | - AMD_FMT_MOD_SET(DCC, 1) | - AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | - AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) | - AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0)); + amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | + AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) | + AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) | + AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | + AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) | + AMD_FMT_MOD_SET(DCC, 1) | + AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | + AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) | + AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0)); if (has_constant_encode) { - add_modifier(mods, size, capacity, AMD_FMT_MOD | - AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) | - AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) | - AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | - AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) | - AMD_FMT_MOD_SET(DCC, 1) | - AMD_FMT_MOD_SET(DCC_RETILE, 1) | - AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | - AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) | - - AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | - AMD_FMT_MOD_SET(RB, rb) | - AMD_FMT_MOD_SET(PIPE, pipes)); + amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | + AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) | + AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) | + AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | + AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) | + AMD_FMT_MOD_SET(DCC, 1) | + AMD_FMT_MOD_SET(DCC_RETILE, 1) | + AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | + AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) | + AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | + AMD_FMT_MOD_SET(RB, rb) | + AMD_FMT_MOD_SET(PIPE, pipes)); } - add_modifier(mods, size, capacity, AMD_FMT_MOD | - AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) | - AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) | - AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | - AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) | - AMD_FMT_MOD_SET(DCC, 1) | - AMD_FMT_MOD_SET(DCC_RETILE, 1) | - AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | - AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) | - AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) | - AMD_FMT_MOD_SET(RB, rb) | - AMD_FMT_MOD_SET(PIPE, pipes)); + amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | + AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) | + AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) | + AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | + AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) | + AMD_FMT_MOD_SET(DCC, 1) | + AMD_FMT_MOD_SET(DCC_RETILE, 1) | + AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | + AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) | + AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) | + AMD_FMT_MOD_SET(RB, rb) | + AMD_FMT_MOD_SET(PIPE, pipes)); } /* * Only supported for 64bpp on Raven, will be filtered on format in - * dm_plane_format_mod_supported. + * amdgpu_dm_plane_format_mod_supported. */ - add_modifier(mods, size, capacity, AMD_FMT_MOD | - AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) | - AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) | - AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | - AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits)); + amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | + AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) | + AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) | + AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | + AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits)); if (adev->family == AMDGPU_FAMILY_RV) { - add_modifier(mods, size, capacity, AMD_FMT_MOD | - AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) | - AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) | - AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | - AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits)); + amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | + AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) | + AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) | + AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | + AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits)); } /* * Only supported for 64bpp on Raven, will be filtered on format in - * dm_plane_format_mod_supported. + * amdgpu_dm_plane_format_mod_supported. */ - add_modifier(mods, size, capacity, AMD_FMT_MOD | - AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) | - AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9)); + amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | + AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) | + AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9)); if (adev->family == AMDGPU_FAMILY_RV) { - add_modifier(mods, size, capacity, AMD_FMT_MOD | - AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) | - AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9)); + amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | + AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) | + AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9)); } } -static void add_gfx10_3_modifiers(const struct amdgpu_device *adev, - uint64_t **mods, uint64_t *size, uint64_t *capacity) +static void amdgpu_dm_plane_add_gfx10_3_modifiers(const struct amdgpu_device *adev, + uint64_t **mods, + uint64_t *size, + uint64_t *capacity) { int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes); int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs); - add_modifier(mods, size, capacity, AMD_FMT_MOD | - AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | - AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) | - AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | - AMD_FMT_MOD_SET(PACKERS, pkrs) | - AMD_FMT_MOD_SET(DCC, 1) | - AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | - AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | - AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) | - AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B)); - - add_modifier(mods, size, capacity, AMD_FMT_MOD | - AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | - AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) | - AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | - AMD_FMT_MOD_SET(PACKERS, pkrs) | - AMD_FMT_MOD_SET(DCC, 1) | - AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | - AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) | - AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B)); - - add_modifier(mods, size, capacity, AMD_FMT_MOD | - AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | - AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) | - AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | - AMD_FMT_MOD_SET(PACKERS, pkrs) | - AMD_FMT_MOD_SET(DCC, 1) | - AMD_FMT_MOD_SET(DCC_RETILE, 1) | - AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | - AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | - AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) | - AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B)); - - add_modifier(mods, size, capacity, AMD_FMT_MOD | - AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | - AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) | - AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | - AMD_FMT_MOD_SET(PACKERS, pkrs) | - AMD_FMT_MOD_SET(DCC, 1) | - AMD_FMT_MOD_SET(DCC_RETILE, 1) | - AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | - AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) | - AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B)); - - add_modifier(mods, size, capacity, AMD_FMT_MOD | - AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | - AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) | - AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | - AMD_FMT_MOD_SET(PACKERS, pkrs)); - - add_modifier(mods, size, capacity, AMD_FMT_MOD | - AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) | - AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) | - AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | - AMD_FMT_MOD_SET(PACKERS, pkrs)); - - /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */ - add_modifier(mods, size, capacity, AMD_FMT_MOD | - AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) | - AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9)); - - add_modifier(mods, size, capacity, AMD_FMT_MOD | - AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) | - AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9)); + amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | + AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | + AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) | + AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | + AMD_FMT_MOD_SET(PACKERS, pkrs) | + AMD_FMT_MOD_SET(DCC, 1) | + AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | + AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | + AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) | + AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B)); + + amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | + AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | + AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) | + AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | + AMD_FMT_MOD_SET(PACKERS, pkrs) | + AMD_FMT_MOD_SET(DCC, 1) | + AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | + AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) | + AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B)); + + amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | + AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | + AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) | + AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | + AMD_FMT_MOD_SET(PACKERS, pkrs) | + AMD_FMT_MOD_SET(DCC, 1) | + AMD_FMT_MOD_SET(DCC_RETILE, 1) | + AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | + AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | + AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) | + AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B)); + + amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | + AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | + AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) | + AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | + AMD_FMT_MOD_SET(PACKERS, pkrs) | + AMD_FMT_MOD_SET(DCC, 1) | + AMD_FMT_MOD_SET(DCC_RETILE, 1) | + AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | + AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) | + AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B)); + + amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | + AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | + AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) | + AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | + AMD_FMT_MOD_SET(PACKERS, pkrs)); + + amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | + AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) | + AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) | + AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | + AMD_FMT_MOD_SET(PACKERS, pkrs)); + + /* Only supported for 64bpp, will be filtered in amdgpu_dm_plane_format_mod_supported */ + amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | + AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) | + AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9)); + + amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | + AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) | + AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9)); } -static void add_gfx11_modifiers(struct amdgpu_device *adev, +static void amdgpu_dm_plane_add_gfx11_modifiers(struct amdgpu_device *adev, uint64_t **mods, uint64_t *size, uint64_t *capacity) { int num_pipes = 0; @@ -572,7 +636,7 @@ static void add_gfx11_modifiers(struct amdgpu_device *adev, int pkrs = 0; u32 gb_addr_config; u8 i = 0; - unsigned swizzle_r_x; + unsigned int swizzle_r_x; uint64_t modifier_r_x; uint64_t modifier_dcc_best; uint64_t modifier_dcc_4k; @@ -614,21 +678,53 @@ static void add_gfx11_modifiers(struct amdgpu_device *adev, AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) | AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B); - add_modifier(mods, size, capacity, modifier_dcc_best); - add_modifier(mods, size, capacity, modifier_dcc_4k); + amdgpu_dm_plane_add_modifier(mods, size, capacity, modifier_dcc_best); + amdgpu_dm_plane_add_modifier(mods, size, capacity, modifier_dcc_4k); - add_modifier(mods, size, capacity, modifier_dcc_best | AMD_FMT_MOD_SET(DCC_RETILE, 1)); - add_modifier(mods, size, capacity, modifier_dcc_4k | AMD_FMT_MOD_SET(DCC_RETILE, 1)); + amdgpu_dm_plane_add_modifier(mods, size, capacity, modifier_dcc_best | AMD_FMT_MOD_SET(DCC_RETILE, 1)); + amdgpu_dm_plane_add_modifier(mods, size, capacity, modifier_dcc_4k | AMD_FMT_MOD_SET(DCC_RETILE, 1)); - add_modifier(mods, size, capacity, modifier_r_x); + amdgpu_dm_plane_add_modifier(mods, size, capacity, modifier_r_x); } - add_modifier(mods, size, capacity, AMD_FMT_MOD | - AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX11) | - AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D)); + amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | + AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX11) | + AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D)); } -static int get_plane_modifiers(struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods) +static void amdgpu_dm_plane_add_gfx12_modifiers(struct amdgpu_device *adev, + uint64_t **mods, uint64_t *size, uint64_t *capacity) +{ + uint64_t ver = AMD_FMT_MOD | AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX12); + uint64_t mod_256k = ver | AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX12_256K_2D); + uint64_t mod_64k = ver | AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX12_64K_2D); + uint64_t mod_4k = ver | AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX12_4K_2D); + uint64_t mod_256b = ver | AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX12_256B_2D); + uint64_t dcc = ver | AMD_FMT_MOD_SET(DCC, 1); + uint8_t max_comp_block[] = {2, 1, 0}; + uint64_t max_comp_block_mod[ARRAY_SIZE(max_comp_block)] = {0}; + uint8_t i = 0, j = 0; + uint64_t gfx12_modifiers[] = {mod_256k, mod_64k, mod_4k, mod_256b, DRM_FORMAT_MOD_LINEAR}; + + for (i = 0; i < ARRAY_SIZE(max_comp_block); i++) + max_comp_block_mod[i] = AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, max_comp_block[i]); + + /* With DCC: Best choice should be kept first. Hence, add all 256k modifiers of different + * max compressed blocks first and then move on to the next smaller sized layouts. + * Do not add the linear modifier here, and hence the condition of size-1 for the loop + */ + for (j = 0; j < ARRAY_SIZE(gfx12_modifiers) - 1; j++) + for (i = 0; i < ARRAY_SIZE(max_comp_block); i++) + amdgpu_dm_plane_add_modifier(mods, size, capacity, + ver | dcc | max_comp_block_mod[i] | gfx12_modifiers[j]); + + /* Without DCC. Add all modifiers including linear at the end */ + for (i = 0; i < ARRAY_SIZE(gfx12_modifiers); i++) + amdgpu_dm_plane_add_modifier(mods, size, capacity, gfx12_modifiers[i]); + +} + +static int amdgpu_dm_plane_get_plane_modifiers(struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods) { uint64_t size = 0, capacity = 128; *mods = NULL; @@ -637,39 +733,43 @@ static int get_plane_modifiers(struct amdgpu_device *adev, unsigned int plane_ty if (adev->family < AMDGPU_FAMILY_AI) return 0; - *mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL); + *mods = kmalloc_array(capacity, sizeof(uint64_t), GFP_KERNEL); if (plane_type == DRM_PLANE_TYPE_CURSOR) { - add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR); - add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID); + amdgpu_dm_plane_add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR); + amdgpu_dm_plane_add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID); return *mods ? 0 : -ENOMEM; } switch (adev->family) { case AMDGPU_FAMILY_AI: case AMDGPU_FAMILY_RV: - add_gfx9_modifiers(adev, mods, &size, &capacity); + amdgpu_dm_plane_add_gfx9_modifiers(adev, mods, &size, &capacity); break; case AMDGPU_FAMILY_NV: case AMDGPU_FAMILY_VGH: case AMDGPU_FAMILY_YC: case AMDGPU_FAMILY_GC_10_3_6: case AMDGPU_FAMILY_GC_10_3_7: - if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0)) - add_gfx10_3_modifiers(adev, mods, &size, &capacity); + if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(10, 3, 0)) + amdgpu_dm_plane_add_gfx10_3_modifiers(adev, mods, &size, &capacity); else - add_gfx10_1_modifiers(adev, mods, &size, &capacity); + amdgpu_dm_plane_add_gfx10_1_modifiers(adev, mods, &size, &capacity); break; case AMDGPU_FAMILY_GC_11_0_0: case AMDGPU_FAMILY_GC_11_0_1: - add_gfx11_modifiers(adev, mods, &size, &capacity); + case AMDGPU_FAMILY_GC_11_5_0: + amdgpu_dm_plane_add_gfx11_modifiers(adev, mods, &size, &capacity); + break; + case AMDGPU_FAMILY_GC_12_0_0: + amdgpu_dm_plane_add_gfx12_modifiers(adev, mods, &size, &capacity); break; } - add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR); + amdgpu_dm_plane_add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR); /* INVALID marks the end of the list. */ - add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID); + amdgpu_dm_plane_add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID); if (!*mods) return -ENOMEM; @@ -677,9 +777,9 @@ static int get_plane_modifiers(struct amdgpu_device *adev, unsigned int plane_ty return 0; } -static int get_plane_formats(const struct drm_plane *plane, - const struct dc_plane_cap *plane_cap, - uint32_t *formats, int max_formats) +static int amdgpu_dm_plane_get_plane_formats(const struct drm_plane *plane, + const struct dc_plane_cap *plane_cap, + uint32_t *formats, int max_formats) { int i, num_formats = 0; @@ -689,8 +789,8 @@ static int get_plane_formats(const struct drm_plane *plane, * caps list. */ - switch (plane->type) { - case DRM_PLANE_TYPE_PRIMARY: + if (plane->type == DRM_PLANE_TYPE_PRIMARY || + (plane_cap && plane_cap->type == DC_PLANE_TYPE_DCN_UNIVERSAL && plane->type != DRM_PLANE_TYPE_CURSOR)) { for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) { if (num_formats >= max_formats) break; @@ -708,59 +808,44 @@ static int get_plane_formats(const struct drm_plane *plane, formats[num_formats++] = DRM_FORMAT_XBGR16161616F; formats[num_formats++] = DRM_FORMAT_ABGR16161616F; } - break; + } else { + switch (plane->type) { + case DRM_PLANE_TYPE_OVERLAY: + for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) { + if (num_formats >= max_formats) + break; - case DRM_PLANE_TYPE_OVERLAY: - for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) { - if (num_formats >= max_formats) - break; + formats[num_formats++] = overlay_formats[i]; + } + break; - formats[num_formats++] = overlay_formats[i]; - } - break; + case DRM_PLANE_TYPE_CURSOR: + for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) { + if (num_formats >= max_formats) + break; - case DRM_PLANE_TYPE_CURSOR: - for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) { - if (num_formats >= max_formats) - break; + formats[num_formats++] = cursor_formats[i]; + } + break; - formats[num_formats++] = cursor_formats[i]; + default: + break; } - break; } return num_formats; } -#ifdef CONFIG_DRM_AMD_DC_HDR -static int attach_color_mgmt_properties(struct amdgpu_display_manager *dm, struct drm_plane *plane) -{ - drm_object_attach_property(&plane->base, - dm->degamma_lut_property, - 0); - drm_object_attach_property(&plane->base, - dm->degamma_lut_size_property, - MAX_COLOR_LUT_ENTRIES); - drm_object_attach_property(&plane->base, dm->ctm_property, - 0); - drm_object_attach_property(&plane->base, dm->sdr_boost_property, - DEFAULT_SDR_BOOST); - - return 0; -} -#endif - -int fill_plane_buffer_attributes(struct amdgpu_device *adev, +int amdgpu_dm_plane_fill_plane_buffer_attributes(struct amdgpu_device *adev, const struct amdgpu_framebuffer *afb, const enum surface_pixel_format format, const enum dc_rotation_angle rotation, const uint64_t tiling_flags, - union dc_tiling_info *tiling_info, + struct dc_tiling_info *tiling_info, struct plane_size *plane_size, struct dc_plane_dcc_param *dcc, struct dc_plane_address *address, - bool tmz_surface, - bool force_disable_dcc) + bool tmz_surface) { const struct drm_framebuffer *fb = &afb->base; int ret; @@ -816,23 +901,29 @@ int fill_plane_buffer_attributes(struct amdgpu_device *adev, upper_32_bits(chroma_addr); } - if (adev->family >= AMDGPU_FAMILY_AI) { - ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format, - rotation, plane_size, - tiling_info, dcc, - address, - force_disable_dcc); + if (adev->family >= AMDGPU_FAMILY_GC_12_0_0) { + ret = amdgpu_dm_plane_fill_gfx12_plane_attributes_from_modifiers(adev, afb, format, + rotation, plane_size, + tiling_info, dcc, + address); + if (ret) + return ret; + } else if (adev->family >= AMDGPU_FAMILY_AI) { + ret = amdgpu_dm_plane_fill_gfx9_plane_attributes_from_modifiers(adev, afb, format, + rotation, plane_size, + tiling_info, dcc, + address); if (ret) return ret; } else { - fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags); + amdgpu_dm_plane_fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags); } return 0; } -static int dm_plane_helper_prepare_fb(struct drm_plane *plane, - struct drm_plane_state *new_state) +static int amdgpu_dm_plane_helper_prepare_fb(struct drm_plane *plane, + struct drm_plane_state *new_state) { struct amdgpu_framebuffer *afb; struct drm_gem_object *obj; @@ -848,19 +939,23 @@ static int dm_plane_helper_prepare_fb(struct drm_plane *plane, } afb = to_amdgpu_framebuffer(new_state->fb); - obj = new_state->fb->obj[0]; + obj = drm_gem_fb_get_obj(new_state->fb, 0); + if (!obj) { + DRM_ERROR("Failed to get obj from framebuffer\n"); + return -EINVAL; + } + rbo = gem_to_amdgpu_bo(obj); adev = amdgpu_ttm_adev(rbo->tbo.bdev); - r = amdgpu_bo_reserve(rbo, true); if (r) { - dev_err(adev->dev, "fail to reserve bo (%d)\n", r); + drm_err(adev_to_drm(adev), "fail to reserve bo (%d)\n", r); return r; } r = dma_resv_reserve_fences(rbo->tbo.base.resv, 1); if (r) { - dev_err(adev->dev, "reserving fence slot failed (%d)\n", r); + drm_err(adev_to_drm(adev), "reserving fence slot failed (%d)\n", r); goto error_unlock; } @@ -869,6 +964,7 @@ static int dm_plane_helper_prepare_fb(struct drm_plane *plane, else domain = AMDGPU_GEM_DOMAIN_VRAM; + rbo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS; r = amdgpu_bo_pin(rbo, domain); if (unlikely(r != 0)) { if (r != -ERESTARTSYS) @@ -907,14 +1003,13 @@ static int dm_plane_helper_prepare_fb(struct drm_plane *plane, dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) { struct dc_plane_state *plane_state = dm_plane_state_new->dc_state; - bool force_disable_dcc = !plane_state->dcc.enable; - fill_plane_buffer_attributes( + amdgpu_dm_plane_fill_plane_buffer_attributes( adev, afb, plane_state->format, plane_state->rotation, afb->tiling_flags, &plane_state->tiling_info, &plane_state->plane_size, &plane_state->dcc, &plane_state->address, - afb->tmz_surface, force_disable_dcc); + afb->tmz_surface); } return 0; @@ -927,8 +1022,8 @@ error_unlock: return r; } -static void dm_plane_helper_cleanup_fb(struct drm_plane *plane, - struct drm_plane_state *old_state) +static void amdgpu_dm_plane_helper_cleanup_fb(struct drm_plane *plane, + struct drm_plane_state *old_state) { struct amdgpu_bo *rbo; int r; @@ -948,7 +1043,7 @@ static void dm_plane_helper_cleanup_fb(struct drm_plane *plane, amdgpu_bo_unref(&rbo); } -static void get_min_max_dc_plane_scaling(struct drm_device *dev, +static void amdgpu_dm_plane_get_min_max_dc_plane_scaling(struct drm_device *dev, struct drm_framebuffer *fb, int *min_downscale, int *max_upscale) { @@ -990,7 +1085,7 @@ static void get_min_max_dc_plane_scaling(struct drm_device *dev, *min_downscale = 1000; } -int dm_plane_helper_check_state(struct drm_plane_state *state, +int amdgpu_dm_plane_helper_check_state(struct drm_plane_state *state, struct drm_crtc_state *new_crtc_state) { struct drm_framebuffer *fb = state->fb; @@ -1029,8 +1124,8 @@ int dm_plane_helper_check_state(struct drm_plane_state *state, } /* Get min/max allowed scaling factors from plane caps. */ - get_min_max_dc_plane_scaling(state->crtc->dev, fb, - &min_downscale, &max_upscale); + amdgpu_dm_plane_get_min_max_dc_plane_scaling(state->crtc->dev, fb, + &min_downscale, &max_upscale); /* * Convert to drm convention: 16.16 fixed point, instead of dc's * 1.0 == 1000. Also drm scaling is src/dst instead of dc's @@ -1044,7 +1139,7 @@ int dm_plane_helper_check_state(struct drm_plane_state *state, state, new_crtc_state, min_scale, max_scale, true, true); } -int fill_dc_scaling_info(struct amdgpu_device *adev, +int amdgpu_dm_plane_fill_dc_scaling_info(struct amdgpu_device *adev, const struct drm_plane_state *state, struct dc_scaling_info *scaling_info) { @@ -1068,8 +1163,8 @@ int fill_dc_scaling_info(struct amdgpu_device *adev, * is to gesture the YouTube Android app into full screen * on ChromeOS. */ - if (((adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 0)) || - (adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 1))) && + if (((amdgpu_ip_version(adev, DCE_HWIP, 0) == IP_VERSION(1, 0, 0)) || + (amdgpu_ip_version(adev, DCE_HWIP, 0) == IP_VERSION(1, 0, 1))) && (state->fb && state->fb->format->format == DRM_FORMAT_NV12 && (scaling_info->src_rect.x != 0 || scaling_info->src_rect.y != 0))) return -EINVAL; @@ -1100,8 +1195,8 @@ int fill_dc_scaling_info(struct amdgpu_device *adev, /* Validate scaling per-format with DC plane caps */ if (state->plane && state->plane->dev && state->fb) { - get_min_max_dc_plane_scaling(state->plane->dev, state->fb, - &min_downscale, &max_upscale); + amdgpu_dm_plane_get_min_max_dc_plane_scaling(state->plane->dev, state->fb, + &min_downscale, &max_upscale); } else { min_downscale = 250; max_upscale = 16000; @@ -1127,8 +1222,8 @@ int fill_dc_scaling_info(struct amdgpu_device *adev, return 0; } -static int dm_plane_atomic_check(struct drm_plane *plane, - struct drm_atomic_state *state) +static int amdgpu_dm_plane_atomic_check(struct drm_plane *plane, + struct drm_atomic_state *state) { struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, plane); @@ -1152,11 +1247,11 @@ static int dm_plane_atomic_check(struct drm_plane *plane, if (!new_crtc_state) return -EINVAL; - ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state); + ret = amdgpu_dm_plane_helper_check_state(new_plane_state, new_crtc_state); if (ret) return ret; - ret = fill_dc_scaling_info(adev, new_plane_state, &scaling_info); + ret = amdgpu_dm_plane_fill_dc_scaling_info(adev, new_plane_state, &scaling_info); if (ret) return ret; @@ -1166,20 +1261,35 @@ static int dm_plane_atomic_check(struct drm_plane *plane, return -EINVAL; } -static int dm_plane_atomic_async_check(struct drm_plane *plane, - struct drm_atomic_state *state) +static int amdgpu_dm_plane_atomic_async_check(struct drm_plane *plane, + struct drm_atomic_state *state, bool flip) { - /* Only support async updates on cursor planes. */ - if (plane->type != DRM_PLANE_TYPE_CURSOR) + struct drm_crtc_state *new_crtc_state; + struct drm_plane_state *new_plane_state; + struct dm_crtc_state *dm_new_crtc_state; + + if (flip) { + if (plane->type != DRM_PLANE_TYPE_OVERLAY) + return -EINVAL; + } else if (plane->type != DRM_PLANE_TYPE_CURSOR) { + return -EINVAL; + } + + new_plane_state = drm_atomic_get_new_plane_state(state, plane); + new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_state->crtc); + dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); + /* Reject overlay cursors for now*/ + if (!flip && dm_new_crtc_state->cursor_mode == DM_CURSOR_OVERLAY_MODE) return -EINVAL; return 0; } -static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc, - struct dc_cursor_position *position) +int amdgpu_dm_plane_get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc, + struct dc_cursor_position *position) { struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); + struct amdgpu_device *adev = drm_to_adev(plane->dev); int x, y; int xorigin = 0, yorigin = 0; @@ -1211,16 +1321,18 @@ static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc, y = 0; } position->enable = true; - position->translate_by_source = true; position->x = x; position->y = y; position->x_hotspot = xorigin; position->y_hotspot = yorigin; + if (amdgpu_ip_version(adev, DCE_HWIP, 0) < IP_VERSION(4, 0, 1)) + position->translate_by_source = true; + return 0; } -void handle_cursor_update(struct drm_plane *plane, +void amdgpu_dm_plane_handle_cursor_update(struct drm_plane *plane, struct drm_plane_state *old_plane_state) { struct amdgpu_device *adev = drm_to_adev(plane->dev); @@ -1236,13 +1348,11 @@ void handle_cursor_update(struct drm_plane *plane, if (!plane->state->fb && !old_plane_state->fb) return; - DC_LOG_CURSOR("%s: crtc_id=%d with size %d to %d\n", - __func__, - amdgpu_crtc->crtc_id, - plane->state->crtc_w, - plane->state->crtc_h); + drm_dbg_atomic(plane->dev, "crtc_id=%d with size %d to %d\n", + amdgpu_crtc->crtc_id, plane->state->crtc_w, + plane->state->crtc_h); - ret = get_cursor_position(plane, crtc, &position); + ret = amdgpu_dm_plane_get_cursor_position(plane, crtc, &position); if (ret) return; @@ -1250,7 +1360,7 @@ void handle_cursor_update(struct drm_plane *plane, /* turn off cursor */ if (crtc_state && crtc_state->stream) { mutex_lock(&adev->dm.dc_lock); - dc_stream_set_cursor_position(crtc_state->stream, + dc_stream_program_cursor_position(crtc_state->stream, &position); mutex_unlock(&adev->dm.dc_lock); } @@ -1269,23 +1379,31 @@ void handle_cursor_update(struct drm_plane *plane, attributes.rotation_angle = 0; attributes.attribute_flags.value = 0; - attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0]; + /* Enable cursor degamma ROM on DCN3+ for implicit sRGB degamma in DRM + * legacy gamma setup. + */ + if (crtc_state->cm_is_degamma_srgb && + adev->dm.dc->caps.color.dpp.gamma_corr) + attributes.attribute_flags.bits.ENABLE_CURSOR_DEGAMMA = 1; + + if (afb) + attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0]; if (crtc_state->stream) { mutex_lock(&adev->dm.dc_lock); - if (!dc_stream_set_cursor_attributes(crtc_state->stream, + if (!dc_stream_program_cursor_attributes(crtc_state->stream, &attributes)) DRM_ERROR("DC failed to set cursor attributes\n"); - if (!dc_stream_set_cursor_position(crtc_state->stream, + if (!dc_stream_program_cursor_position(crtc_state->stream, &position)) DRM_ERROR("DC failed to set cursor position\n"); mutex_unlock(&adev->dm.dc_lock); } } -static void dm_plane_atomic_async_update(struct drm_plane *plane, - struct drm_atomic_state *state) +static void amdgpu_dm_plane_atomic_async_update(struct drm_plane *plane, + struct drm_atomic_state *state) { struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, plane); @@ -1305,18 +1423,42 @@ static void dm_plane_atomic_async_update(struct drm_plane *plane, plane->state->crtc_w = new_state->crtc_w; plane->state->crtc_h = new_state->crtc_h; - handle_cursor_update(plane, old_state); + amdgpu_dm_plane_handle_cursor_update(plane, old_state); +} + +static void amdgpu_dm_plane_panic_flush(struct drm_plane *plane) +{ + struct dm_plane_state *dm_plane_state = to_dm_plane_state(plane->state); + struct drm_framebuffer *fb = plane->state->fb; + struct dc_plane_state *dc_plane_state; + + if (!dm_plane_state || !dm_plane_state->dc_state) + return; + + dc_plane_state = dm_plane_state->dc_state; + + dc_plane_force_dcc_and_tiling_disable(dc_plane_state, fb->modifier ? true : false); } static const struct drm_plane_helper_funcs dm_plane_helper_funcs = { - .prepare_fb = dm_plane_helper_prepare_fb, - .cleanup_fb = dm_plane_helper_cleanup_fb, - .atomic_check = dm_plane_atomic_check, - .atomic_async_check = dm_plane_atomic_async_check, - .atomic_async_update = dm_plane_atomic_async_update + .prepare_fb = amdgpu_dm_plane_helper_prepare_fb, + .cleanup_fb = amdgpu_dm_plane_helper_cleanup_fb, + .atomic_check = amdgpu_dm_plane_atomic_check, + .atomic_async_check = amdgpu_dm_plane_atomic_async_check, + .atomic_async_update = amdgpu_dm_plane_atomic_async_update +}; + +static const struct drm_plane_helper_funcs dm_primary_plane_helper_funcs = { + .prepare_fb = amdgpu_dm_plane_helper_prepare_fb, + .cleanup_fb = amdgpu_dm_plane_helper_cleanup_fb, + .atomic_check = amdgpu_dm_plane_atomic_check, + .atomic_async_check = amdgpu_dm_plane_atomic_async_check, + .atomic_async_update = amdgpu_dm_plane_atomic_async_update, + .get_scanout_buffer = amdgpu_display_get_scanout_buffer, + .panic_flush = amdgpu_dm_plane_panic_flush, }; -static void dm_drm_plane_reset(struct drm_plane *plane) +static void amdgpu_dm_plane_drm_plane_reset(struct drm_plane *plane) { struct dm_plane_state *amdgpu_state = NULL; @@ -1326,16 +1468,17 @@ static void dm_drm_plane_reset(struct drm_plane *plane) amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL); WARN_ON(amdgpu_state == NULL); - if (amdgpu_state) - __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base); -#ifdef CONFIG_DRM_AMD_DC_HDR - if (amdgpu_state) - amdgpu_state->sdr_boost = DEFAULT_SDR_BOOST; -#endif + if (!amdgpu_state) + return; + + __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base); + amdgpu_state->degamma_tf = AMDGPU_TRANSFER_FUNCTION_DEFAULT; + amdgpu_state->hdr_mult = AMDGPU_HDR_MULT_DEFAULT; + amdgpu_state->shaper_tf = AMDGPU_TRANSFER_FUNCTION_DEFAULT; + amdgpu_state->blend_tf = AMDGPU_TRANSFER_FUNCTION_DEFAULT; } -static struct drm_plane_state * -dm_drm_plane_duplicate_state(struct drm_plane *plane) +static struct drm_plane_state *amdgpu_dm_plane_drm_plane_duplicate_state(struct drm_plane *plane) { struct dm_plane_state *dm_plane_state, *old_dm_plane_state; @@ -1351,28 +1494,38 @@ dm_drm_plane_duplicate_state(struct drm_plane *plane) dc_plane_state_retain(dm_plane_state->dc_state); } -#ifdef CONFIG_DRM_AMD_DC_HDR - if (dm_plane_state->degamma_lut) - drm_property_blob_get(dm_plane_state->degamma_lut); - if (dm_plane_state->ctm) - drm_property_blob_get(dm_plane_state->ctm); - - dm_plane_state->sdr_boost = old_dm_plane_state->sdr_boost; -#endif + if (old_dm_plane_state->degamma_lut) + dm_plane_state->degamma_lut = + drm_property_blob_get(old_dm_plane_state->degamma_lut); + if (old_dm_plane_state->ctm) + dm_plane_state->ctm = + drm_property_blob_get(old_dm_plane_state->ctm); + if (old_dm_plane_state->shaper_lut) + dm_plane_state->shaper_lut = + drm_property_blob_get(old_dm_plane_state->shaper_lut); + if (old_dm_plane_state->lut3d) + dm_plane_state->lut3d = + drm_property_blob_get(old_dm_plane_state->lut3d); + if (old_dm_plane_state->blend_lut) + dm_plane_state->blend_lut = + drm_property_blob_get(old_dm_plane_state->blend_lut); + + dm_plane_state->degamma_tf = old_dm_plane_state->degamma_tf; + dm_plane_state->hdr_mult = old_dm_plane_state->hdr_mult; + dm_plane_state->shaper_tf = old_dm_plane_state->shaper_tf; + dm_plane_state->blend_tf = old_dm_plane_state->blend_tf; return &dm_plane_state->base; } -static bool dm_plane_format_mod_supported(struct drm_plane *plane, - uint32_t format, - uint64_t modifier) +static bool amdgpu_dm_plane_format_mod_supported(struct drm_plane *plane, + uint32_t format, + uint64_t modifier) { struct amdgpu_device *adev = drm_to_adev(plane->dev); const struct drm_format_info *info = drm_format_info(format); int i; - enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3; - if (!info) return false; @@ -1394,147 +1547,288 @@ static bool dm_plane_format_mod_supported(struct drm_plane *plane, if (i == plane->modifier_count) return false; - /* - * For D swizzle the canonical modifier depends on the bpp, so check - * it here. - */ - if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 && - adev->family >= AMDGPU_FAMILY_NV) { - if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4) - return false; - } + /* GFX12 doesn't have these limitations. */ + if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) <= AMD_FMT_MOD_TILE_VER_GFX11) { + enum dm_micro_swizzle microtile = amdgpu_dm_plane_modifier_gfx9_swizzle_mode(modifier) & 3; - if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D && - info->cpp[0] < 8) - return false; - - if (modifier_has_dcc(modifier)) { - /* Per radeonsi comments 16/64 bpp are more complicated. */ - if (info->cpp[0] != 4) - return false; - /* We support multi-planar formats, but not when combined with - * additional DCC metadata planes. + /* + * For D swizzle the canonical modifier depends on the bpp, so check + * it here. */ - if (info->num_planes > 1) + if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 && + adev->family >= AMDGPU_FAMILY_NV) { + if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4) + return false; + } + + if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D && + info->cpp[0] < 8) return false; + + if (amdgpu_dm_plane_modifier_has_dcc(modifier)) { + /* Per radeonsi comments 16/64 bpp are more complicated. */ + if (info->cpp[0] != 4) + return false; + /* We support multi-planar formats, but not when combined with + * additional DCC metadata planes. + */ + if (info->num_planes > 1) + return false; + } } return true; } -static void dm_drm_plane_destroy_state(struct drm_plane *plane, - struct drm_plane_state *state) +static void amdgpu_dm_plane_drm_plane_destroy_state(struct drm_plane *plane, + struct drm_plane_state *state) { struct dm_plane_state *dm_plane_state = to_dm_plane_state(state); -#ifdef CONFIG_DRM_AMD_DC_HDR - drm_property_blob_put(dm_plane_state->degamma_lut); - drm_property_blob_put(dm_plane_state->ctm); -#endif + if (dm_plane_state->degamma_lut) + drm_property_blob_put(dm_plane_state->degamma_lut); + if (dm_plane_state->ctm) + drm_property_blob_put(dm_plane_state->ctm); + if (dm_plane_state->lut3d) + drm_property_blob_put(dm_plane_state->lut3d); + if (dm_plane_state->shaper_lut) + drm_property_blob_put(dm_plane_state->shaper_lut); + if (dm_plane_state->blend_lut) + drm_property_blob_put(dm_plane_state->blend_lut); + if (dm_plane_state->dc_state) dc_plane_state_release(dm_plane_state->dc_state); drm_atomic_helper_plane_destroy_state(plane, state); } -#ifdef CONFIG_DRM_AMD_DC_HDR -/* copied from drm_atomic_uapi.c */ -static int atomic_replace_property_blob_from_id(struct drm_device *dev, - struct drm_property_blob **blob, - uint64_t blob_id, - ssize_t expected_size, - ssize_t expected_elem_size, - bool *replaced) +#ifdef AMD_PRIVATE_COLOR +static void +dm_atomic_plane_attach_color_mgmt_properties(struct amdgpu_display_manager *dm, + struct drm_plane *plane) { - struct drm_property_blob *new_blob = NULL; - - if (blob_id != 0) { - new_blob = drm_property_lookup_blob(dev, blob_id); - if (new_blob == NULL) - return -EINVAL; + struct amdgpu_mode_info mode_info = dm->adev->mode_info; + struct dpp_color_caps dpp_color_caps = dm->dc->caps.color.dpp; - if (expected_size > 0 && - new_blob->length != expected_size) { - drm_property_blob_put(new_blob); - return -EINVAL; - } - if (expected_elem_size > 0 && - new_blob->length % expected_elem_size != 0) { - drm_property_blob_put(new_blob); - return -EINVAL; - } + /* Check HW color pipeline capabilities on DPP block (pre-blending) + * before exposing related properties. + */ + if (dpp_color_caps.dgam_ram || dpp_color_caps.gamma_corr) { + drm_object_attach_property(&plane->base, + mode_info.plane_degamma_lut_property, + 0); + drm_object_attach_property(&plane->base, + mode_info.plane_degamma_lut_size_property, + MAX_COLOR_LUT_ENTRIES); + drm_object_attach_property(&plane->base, + dm->adev->mode_info.plane_degamma_tf_property, + AMDGPU_TRANSFER_FUNCTION_DEFAULT); + } + /* HDR MULT is always available */ + drm_object_attach_property(&plane->base, + dm->adev->mode_info.plane_hdr_mult_property, + AMDGPU_HDR_MULT_DEFAULT); + + /* Only enable plane CTM if both DPP and MPC gamut remap is available. */ + if (dm->dc->caps.color.mpc.gamut_remap) + drm_object_attach_property(&plane->base, + dm->adev->mode_info.plane_ctm_property, 0); + + if (dpp_color_caps.hw_3d_lut || dm->dc->caps.color.mpc.preblend) { + drm_object_attach_property(&plane->base, + mode_info.plane_shaper_lut_property, 0); + drm_object_attach_property(&plane->base, + mode_info.plane_shaper_lut_size_property, + MAX_COLOR_LUT_ENTRIES); + drm_object_attach_property(&plane->base, + mode_info.plane_shaper_tf_property, + AMDGPU_TRANSFER_FUNCTION_DEFAULT); + drm_object_attach_property(&plane->base, + mode_info.plane_lut3d_property, 0); + drm_object_attach_property(&plane->base, + mode_info.plane_lut3d_size_property, + MAX_COLOR_3DLUT_SIZE); } - *replaced |= drm_property_replace_blob(blob, new_blob); - drm_property_blob_put(new_blob); - - return 0; + if (dpp_color_caps.ogam_ram) { + drm_object_attach_property(&plane->base, + mode_info.plane_blend_lut_property, 0); + drm_object_attach_property(&plane->base, + mode_info.plane_blend_lut_size_property, + MAX_COLOR_LUT_ENTRIES); + drm_object_attach_property(&plane->base, + mode_info.plane_blend_tf_property, + AMDGPU_TRANSFER_FUNCTION_DEFAULT); + } } -int dm_drm_plane_set_property(struct drm_plane *plane, - struct drm_plane_state *state, - struct drm_property *property, - uint64_t val) +static int +dm_atomic_plane_set_property(struct drm_plane *plane, + struct drm_plane_state *state, + struct drm_property *property, + uint64_t val) { - struct amdgpu_device *adev = drm_to_adev(plane->dev); struct dm_plane_state *dm_plane_state = to_dm_plane_state(state); - int ret = 0; - bool replaced; - - if (property == adev->dm.degamma_lut_property) { - ret = atomic_replace_property_blob_from_id(adev_to_drm(adev), - &dm_plane_state->degamma_lut, - val, -1, sizeof(struct drm_color_lut), - &replaced); - } else if (property == adev->dm.ctm_property) { - ret = atomic_replace_property_blob_from_id(adev_to_drm(adev), - &dm_plane_state->ctm, - val, - sizeof(struct drm_color_ctm), -1, - &replaced); - } else if (property == adev->dm.sdr_boost_property) { - dm_plane_state->sdr_boost = val; + struct amdgpu_device *adev = drm_to_adev(plane->dev); + bool replaced = false; + int ret; + + if (property == adev->mode_info.plane_degamma_lut_property) { + ret = drm_property_replace_blob_from_id(plane->dev, + &dm_plane_state->degamma_lut, + val, -1, + sizeof(struct drm_color_lut), + &replaced); + dm_plane_state->base.color_mgmt_changed |= replaced; + return ret; + } else if (property == adev->mode_info.plane_degamma_tf_property) { + if (dm_plane_state->degamma_tf != val) { + dm_plane_state->degamma_tf = val; + dm_plane_state->base.color_mgmt_changed = 1; + } + } else if (property == adev->mode_info.plane_hdr_mult_property) { + if (dm_plane_state->hdr_mult != val) { + dm_plane_state->hdr_mult = val; + dm_plane_state->base.color_mgmt_changed = 1; + } + } else if (property == adev->mode_info.plane_ctm_property) { + ret = drm_property_replace_blob_from_id(plane->dev, + &dm_plane_state->ctm, + val, + sizeof(struct drm_color_ctm_3x4), -1, + &replaced); + dm_plane_state->base.color_mgmt_changed |= replaced; + return ret; + } else if (property == adev->mode_info.plane_shaper_lut_property) { + ret = drm_property_replace_blob_from_id(plane->dev, + &dm_plane_state->shaper_lut, + val, -1, + sizeof(struct drm_color_lut), + &replaced); + dm_plane_state->base.color_mgmt_changed |= replaced; + return ret; + } else if (property == adev->mode_info.plane_shaper_tf_property) { + if (dm_plane_state->shaper_tf != val) { + dm_plane_state->shaper_tf = val; + dm_plane_state->base.color_mgmt_changed = 1; + } + } else if (property == adev->mode_info.plane_lut3d_property) { + ret = drm_property_replace_blob_from_id(plane->dev, + &dm_plane_state->lut3d, + val, -1, + sizeof(struct drm_color_lut), + &replaced); + dm_plane_state->base.color_mgmt_changed |= replaced; + return ret; + } else if (property == adev->mode_info.plane_blend_lut_property) { + ret = drm_property_replace_blob_from_id(plane->dev, + &dm_plane_state->blend_lut, + val, -1, + sizeof(struct drm_color_lut), + &replaced); + dm_plane_state->base.color_mgmt_changed |= replaced; + return ret; + } else if (property == adev->mode_info.plane_blend_tf_property) { + if (dm_plane_state->blend_tf != val) { + dm_plane_state->blend_tf = val; + dm_plane_state->base.color_mgmt_changed = 1; + } } else { + drm_dbg_atomic(plane->dev, + "[PLANE:%d:%s] unknown property [PROP:%d:%s]]\n", + plane->base.id, plane->name, + property->base.id, property->name); return -EINVAL; } - return ret; + return 0; } -int dm_drm_plane_get_property(struct drm_plane *plane, - const struct drm_plane_state *state, - struct drm_property *property, - uint64_t *val) +static int +dm_atomic_plane_get_property(struct drm_plane *plane, + const struct drm_plane_state *state, + struct drm_property *property, + uint64_t *val) { struct dm_plane_state *dm_plane_state = to_dm_plane_state(state); struct amdgpu_device *adev = drm_to_adev(plane->dev); - if (property == adev->dm.degamma_lut_property) { + if (property == adev->mode_info.plane_degamma_lut_property) { *val = (dm_plane_state->degamma_lut) ? dm_plane_state->degamma_lut->base.id : 0; - } else if (property == adev->dm.ctm_property) { - *val = (dm_plane_state->ctm) ? dm_plane_state->ctm->base.id : 0; - } else if (property == adev->dm.sdr_boost_property) { - *val = dm_plane_state->sdr_boost; + } else if (property == adev->mode_info.plane_degamma_tf_property) { + *val = dm_plane_state->degamma_tf; + } else if (property == adev->mode_info.plane_hdr_mult_property) { + *val = dm_plane_state->hdr_mult; + } else if (property == adev->mode_info.plane_ctm_property) { + *val = (dm_plane_state->ctm) ? + dm_plane_state->ctm->base.id : 0; + } else if (property == adev->mode_info.plane_shaper_lut_property) { + *val = (dm_plane_state->shaper_lut) ? + dm_plane_state->shaper_lut->base.id : 0; + } else if (property == adev->mode_info.plane_shaper_tf_property) { + *val = dm_plane_state->shaper_tf; + } else if (property == adev->mode_info.plane_lut3d_property) { + *val = (dm_plane_state->lut3d) ? + dm_plane_state->lut3d->base.id : 0; + } else if (property == adev->mode_info.plane_blend_lut_property) { + *val = (dm_plane_state->blend_lut) ? + dm_plane_state->blend_lut->base.id : 0; + } else if (property == adev->mode_info.plane_blend_tf_property) { + *val = dm_plane_state->blend_tf; + } else { return -EINVAL; } return 0; } +#else + +#define MAX_COLOR_PIPELINES 5 + +static int +dm_plane_init_colorops(struct drm_plane *plane) +{ + struct drm_prop_enum_list pipelines[MAX_COLOR_PIPELINES]; + struct drm_device *dev = plane->dev; + struct amdgpu_device *adev = drm_to_adev(dev); + struct dc *dc = adev->dm.dc; + int len = 0; + int ret; + + if (plane->type == DRM_PLANE_TYPE_CURSOR) + return 0; + + /* initialize pipeline */ + if (dc->ctx->dce_version >= DCN_VERSION_3_0) { + ret = amdgpu_dm_initialize_default_pipeline(plane, &pipelines[len]); + if (ret) { + drm_err(plane->dev, "Failed to create color pipeline for plane %d: %d\n", + plane->base.id, ret); + return ret; + } + len++; + + /* Create COLOR_PIPELINE property and attach */ + drm_plane_create_color_pipeline_property(plane, pipelines, len); + } + + return 0; +} #endif static const struct drm_plane_funcs dm_plane_funcs = { .update_plane = drm_atomic_helper_update_plane, .disable_plane = drm_atomic_helper_disable_plane, .destroy = drm_plane_helper_destroy, - .reset = dm_drm_plane_reset, - .atomic_duplicate_state = dm_drm_plane_duplicate_state, - .atomic_destroy_state = dm_drm_plane_destroy_state, - .format_mod_supported = dm_plane_format_mod_supported, -#ifdef CONFIG_DRM_AMD_DC_HDR - .atomic_set_property = dm_drm_plane_set_property, - .atomic_get_property = dm_drm_plane_get_property, + .reset = amdgpu_dm_plane_drm_plane_reset, + .atomic_duplicate_state = amdgpu_dm_plane_drm_plane_duplicate_state, + .atomic_destroy_state = amdgpu_dm_plane_drm_plane_destroy_state, + .format_mod_supported = amdgpu_dm_plane_format_mod_supported, +#ifdef AMD_PRIVATE_COLOR + .atomic_set_property = dm_atomic_plane_set_property, + .atomic_get_property = dm_atomic_plane_get_property, #endif }; @@ -1548,11 +1842,12 @@ int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm, int res = -EPERM; unsigned int supported_rotations; uint64_t *modifiers = NULL; + unsigned int primary_zpos = dm->dc->caps.max_slave_planes; - num_formats = get_plane_formats(plane, plane_cap, formats, - ARRAY_SIZE(formats)); + num_formats = amdgpu_dm_plane_get_plane_formats(plane, plane_cap, formats, + ARRAY_SIZE(formats)); - res = get_plane_modifiers(dm->adev, plane->type, &modifiers); + res = amdgpu_dm_plane_get_plane_modifiers(dm->adev, plane->type, &modifiers); if (res) return res; @@ -1576,6 +1871,24 @@ int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm, drm_plane_create_blend_mode_property(plane, blend_caps); } + if (plane->type == DRM_PLANE_TYPE_PRIMARY) { + /* + * Allow OVERLAY planes to be used as underlays by assigning an + * immutable zpos = # of OVERLAY planes to the PRIMARY plane. + */ + drm_plane_create_zpos_immutable_property(plane, primary_zpos); + } else if (plane->type == DRM_PLANE_TYPE_OVERLAY) { + /* + * OVERLAY planes can be below or above the PRIMARY, but cannot + * be above the CURSOR plane. + */ + unsigned int zpos = primary_zpos + 1 + drm_plane_index(plane); + + drm_plane_create_zpos_property(plane, zpos, 0, 254); + } else if (plane->type == DRM_PLANE_TYPE_CURSOR) { + drm_plane_create_zpos_immutable_property(plane, 255); + } + if (plane->type == DRM_PLANE_TYPE_PRIMARY && plane_cap && (plane_cap->pixel_format_support.nv12 || @@ -1600,11 +1913,23 @@ int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm, drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0, supported_rotations); - drm_plane_helper_add(plane, &dm_plane_helper_funcs); + if (amdgpu_ip_version(dm->adev, DCE_HWIP, 0) > IP_VERSION(3, 0, 1) && + plane->type != DRM_PLANE_TYPE_CURSOR) + drm_plane_enable_fb_damage_clips(plane); -#ifdef CONFIG_DRM_AMD_DC_HDR - attach_color_mgmt_properties(dm, plane); + if (plane->type == DRM_PLANE_TYPE_PRIMARY) + drm_plane_helper_add(plane, &dm_primary_plane_helper_funcs); + else + drm_plane_helper_add(plane, &dm_plane_helper_funcs); + +#ifdef AMD_PRIVATE_COLOR + dm_atomic_plane_attach_color_mgmt_properties(dm, plane); +#else + res = dm_plane_init_colorops(plane); + if (res) + return res; #endif + /* Create (reset) the plane state */ if (plane->funcs->reset) plane->funcs->reset(plane); @@ -1612,3 +1937,14 @@ int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm, return 0; } +bool amdgpu_dm_plane_is_video_format(uint32_t format) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(video_formats); i++) + if (format == video_formats[i]) + return true; + + return false; +} + diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h index 286981a2dd40..ea2619b507db 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h @@ -29,37 +29,40 @@ #include "dc.h" -void handle_cursor_update(struct drm_plane *plane, +int amdgpu_dm_plane_get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc, + struct dc_cursor_position *position); + +void amdgpu_dm_plane_handle_cursor_update(struct drm_plane *plane, struct drm_plane_state *old_plane_state); -int fill_dc_scaling_info(struct amdgpu_device *adev, +int amdgpu_dm_plane_fill_dc_scaling_info(struct amdgpu_device *adev, const struct drm_plane_state *state, struct dc_scaling_info *scaling_info); -int dm_plane_helper_check_state(struct drm_plane_state *state, +int amdgpu_dm_plane_helper_check_state(struct drm_plane_state *state, struct drm_crtc_state *new_crtc_state); -int fill_plane_buffer_attributes(struct amdgpu_device *adev, +int amdgpu_dm_plane_fill_plane_buffer_attributes(struct amdgpu_device *adev, const struct amdgpu_framebuffer *afb, const enum surface_pixel_format format, const enum dc_rotation_angle rotation, const uint64_t tiling_flags, - union dc_tiling_info *tiling_info, + struct dc_tiling_info *tiling_info, struct plane_size *plane_size, struct dc_plane_dcc_param *dcc, struct dc_plane_address *address, - bool tmz_surface, - bool force_disable_dcc); + bool tmz_surface); int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm, struct drm_plane *plane, unsigned long possible_crtcs, const struct dc_plane_cap *plane_cap); -const struct drm_format_info *amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd); +const struct drm_format_info *amdgpu_dm_plane_get_format_info(u32 pixel_format, u64 modifier); -void fill_blending_from_plane_state(const struct drm_plane_state *plane_state, +void amdgpu_dm_plane_fill_blending_from_plane_state(const struct drm_plane_state *plane_state, bool *per_pixel_alpha, bool *pre_multiplied_alpha, bool *global_alpha, int *global_alpha_value); +bool amdgpu_dm_plane_is_video_format(uint32_t format); #endif diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c index 75284e2cec74..11b2ea6edf95 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: MIT /* * Copyright 2018 Advanced Micro Devices, Inc. * @@ -97,6 +98,7 @@ bool dm_pp_apply_display_requirements( const struct dm_pp_single_disp_config *dc_cfg = &pp_display_cfg->disp_configs[i]; adev->pm.pm_display_cfg.displays[i].controller_id = dc_cfg->pipe_idx + 1; + adev->pm.pm_display_cfg.displays[i].pixel_clock = dc_cfg->pixel_clock; } amdgpu_dpm_display_configuration_change(adev, &adev->pm.pm_display_cfg); @@ -334,7 +336,8 @@ bool dm_pp_get_clock_levels_by_type( if (dc_clks->clocks_in_khz[i] > validation_clks.engine_max_clock) { /* This clock is higher the validation clock. * Than means the previous one is the highest - * non-boosted one. */ + * non-boosted one. + */ DRM_INFO("DM_PPLIB: reducing engine clock level from %d to %d\n", dc_clks->num_levels, i); dc_clks->num_levels = i > 0 ? i : 1; @@ -406,10 +409,10 @@ bool dm_pp_notify_wm_clock_changes( * TODO: expand this to other ASICs */ if ((adev->asic_type >= CHIP_POLARIS10) && - (adev->asic_type <= CHIP_VEGAM) && - !amdgpu_dpm_set_watermarks_for_clocks_ranges(adev, - (void *)wm_with_clock_ranges)) - return true; + (adev->asic_type <= CHIP_VEGAM) && + !amdgpu_dpm_set_watermarks_for_clocks_ranges(adev, + (void *)wm_with_clock_ranges)) + return true; return false; } diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c index 26291db0a3cf..fd491b7a3cd7 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: MIT /* * Copyright 2021 Advanced Micro Devices, Inc. * @@ -24,8 +25,8 @@ */ #include "amdgpu_dm_psr.h" +#include "dc_dmub_srv.h" #include "dc.h" -#include "dm_helpers.h" #include "amdgpu_dm.h" #include "modules/power/power_helpers.h" @@ -50,7 +51,11 @@ static bool link_supports_psrsu(struct dc_link *link) !link->dpcd_caps.psr_info.psr2_su_y_granularity_cap) return false; - return true; + if (amdgpu_dc_debug_mask & DC_DISABLE_PSR_SU) + return false; + + /* Temporarily disable PSR-SU to avoid glitches */ + return false; } /* @@ -82,14 +87,6 @@ void amdgpu_dm_set_psr_caps(struct dc_link *link) link->psr_settings.psr_feature_enabled = true; } - - DRM_INFO("PSR support %d, DC PSR ver %d, sink PSR ver %d DPCD caps 0x%x su_y_granularity %d\n", - link->psr_settings.psr_feature_enabled, - link->psr_settings.psr_version, - link->dpcd_caps.psr_info.psr_version, - link->dpcd_caps.psr_info.psr_dpcd_caps.raw, - link->dpcd_caps.psr_info.psr2_su_y_granularity_cap); - } /* @@ -122,6 +119,11 @@ bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream) psr_config.allow_multi_disp_optimizations = (amdgpu_dc_feature_mask & DC_PSR_ALLOW_MULTI_DISP_OPT); + if (link->psr_settings.psr_version == DC_PSR_VERSION_SU_1) { + if (!psr_su_set_dsc_slice_height(dc, link, stream, &psr_config)) + return false; + } + ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context); } @@ -134,9 +136,8 @@ bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream) * amdgpu_dm_psr_enable() - enable psr f/w * @stream: stream state * - * Return: true if success */ -bool amdgpu_dm_psr_enable(struct dc_stream_state *stream) +void amdgpu_dm_psr_enable(struct dc_stream_state *stream) { struct dc_link *link = stream->link; unsigned int vsync_rate_hz = 0; @@ -152,7 +153,7 @@ bool amdgpu_dm_psr_enable(struct dc_stream_state *stream) DRM_DEBUG_DRIVER("Enabling psr...\n"); vsync_rate_hz = div64_u64(div64_u64(( - stream->timing.pix_clk_100hz * 100), + stream->timing.pix_clk_100hz * (uint64_t)100), stream->timing.v_total), stream->timing.h_total); @@ -162,6 +163,7 @@ bool amdgpu_dm_psr_enable(struct dc_stream_state *stream) */ if (vsync_rate_hz != 0) { unsigned int frame_time_microsec = 1000000 / vsync_rate_hz; + num_frames_static = (30000 / frame_time_microsec) + 1; } @@ -182,7 +184,10 @@ bool amdgpu_dm_psr_enable(struct dc_stream_state *stream) if (link->psr_settings.psr_version < DC_PSR_VERSION_SU_1) power_opt |= psr_power_opt_z10_static_screen; - return dc_link_set_psr_allow_active(link, &psr_enable, false, false, &power_opt); + dc_link_set_psr_allow_active(link, &psr_enable, false, false, &power_opt); + + if (link->ctx->dc->caps.ips_support) + dc_allow_idle_optimizations(link->ctx->dc, true); } /* @@ -191,18 +196,17 @@ bool amdgpu_dm_psr_enable(struct dc_stream_state *stream) * * Return: true if success */ -bool amdgpu_dm_psr_disable(struct dc_stream_state *stream) +bool amdgpu_dm_psr_disable(struct dc_stream_state *stream, bool wait) { - unsigned int power_opt = 0; bool psr_enable = false; DRM_DEBUG_DRIVER("Disabling psr...\n"); - return dc_link_set_psr_allow_active(stream->link, &psr_enable, true, false, &power_opt); + return dc_link_set_psr_allow_active(stream->link, &psr_enable, wait, false, NULL); } /* - * amdgpu_dm_psr_disable() - disable psr f/w + * amdgpu_dm_psr_disable_all() - disable psr f/w for all streams * if psr is enabled on any stream * * Return: true if success @@ -213,3 +217,61 @@ bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm) return dc_set_psr_allow_active(dm->dc, false); } +/* + * amdgpu_dm_psr_is_active_allowed() - check if psr is allowed on any stream + * @dm: pointer to amdgpu_display_manager + * + * Return: true if allowed + */ + +bool amdgpu_dm_psr_is_active_allowed(struct amdgpu_display_manager *dm) +{ + unsigned int i; + bool allow_active = false; + + for (i = 0; i < dm->dc->current_state->stream_count ; i++) { + struct dc_link *link; + struct dc_stream_state *stream = dm->dc->current_state->streams[i]; + + link = stream->link; + if (!link) + continue; + if (link->psr_settings.psr_feature_enabled && + link->psr_settings.psr_allow_active) { + allow_active = true; + break; + } + } + + return allow_active; +} + +/** + * amdgpu_dm_psr_wait_disable() - Wait for eDP panel to exit PSR + * @stream: stream state attached to the eDP link + * + * Waits for a max of 500ms for the eDP panel to exit PSR. + * + * Return: true if panel exited PSR, false otherwise. + */ +bool amdgpu_dm_psr_wait_disable(struct dc_stream_state *stream) +{ + enum dc_psr_state psr_state = PSR_STATE0; + struct dc_link *link = stream->link; + int retry_count; + + if (link == NULL) + return false; + + for (retry_count = 0; retry_count <= 1000; retry_count++) { + dc_link_get_psr_state(link, &psr_state); + if (psr_state == PSR_STATE0) + break; + udelay(500); + } + + if (retry_count == 1000) + return false; + + return true; +} diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.h index 6806b3c9c84b..4fb8626913cf 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: MIT */ /* * Copyright 2021 Advanced Micro Devices, Inc. * @@ -32,9 +33,11 @@ #define AMDGPU_DM_PSR_ENTRY_DELAY 5 void amdgpu_dm_set_psr_caps(struct dc_link *link); -bool amdgpu_dm_psr_enable(struct dc_stream_state *stream); +void amdgpu_dm_psr_enable(struct dc_stream_state *stream); bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream); -bool amdgpu_dm_psr_disable(struct dc_stream_state *stream); +bool amdgpu_dm_psr_disable(struct dc_stream_state *stream, bool wait); bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm); +bool amdgpu_dm_psr_is_active_allowed(struct amdgpu_display_manager *dm); +bool amdgpu_dm_psr_wait_disable(struct dc_stream_state *stream); #endif /* AMDGPU_DM_AMDGPU_DM_PSR_H_ */ diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_quirks.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_quirks.c new file mode 100644 index 000000000000..1da07ebf9217 --- /dev/null +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_quirks.c @@ -0,0 +1,178 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include <linux/dmi.h> + +#include "amdgpu.h" +#include "amdgpu_dm.h" + +struct amdgpu_dm_quirks { + bool aux_hpd_discon; + bool support_edp0_on_dp1; +}; + +static struct amdgpu_dm_quirks quirk_entries = { + .aux_hpd_discon = false, + .support_edp0_on_dp1 = false +}; + +static int edp0_on_dp1_callback(const struct dmi_system_id *id) +{ + quirk_entries.support_edp0_on_dp1 = true; + return 0; +} + +static int aux_hpd_discon_callback(const struct dmi_system_id *id) +{ + quirk_entries.aux_hpd_discon = true; + return 0; +} + +static const struct dmi_system_id dmi_quirk_table[] = { + { + .callback = aux_hpd_discon_callback, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3660"), + }, + }, + { + .callback = aux_hpd_discon_callback, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3260"), + }, + }, + { + .callback = aux_hpd_discon_callback, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3460"), + }, + }, + { + .callback = aux_hpd_discon_callback, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Tower Plus 7010"), + }, + }, + { + .callback = aux_hpd_discon_callback, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Tower 7010"), + }, + }, + { + .callback = aux_hpd_discon_callback, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex SFF Plus 7010"), + }, + }, + { + .callback = aux_hpd_discon_callback, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex SFF 7010"), + }, + }, + { + .callback = aux_hpd_discon_callback, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Micro Plus 7010"), + }, + }, + { + .callback = aux_hpd_discon_callback, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Micro 7010"), + }, + }, + { + .callback = edp0_on_dp1_callback, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "HP"), + DMI_MATCH(DMI_PRODUCT_NAME, "HP Elite mt645 G8 Mobile Thin Client"), + }, + }, + { + .callback = edp0_on_dp1_callback, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "HP"), + DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook 645 14 inch G11 Notebook PC"), + }, + }, + { + .callback = edp0_on_dp1_callback, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "HP"), + DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook 665 16 inch G11 Notebook PC"), + }, + }, + { + .callback = edp0_on_dp1_callback, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "HP"), + DMI_MATCH(DMI_PRODUCT_NAME, "HP ProBook 445 14 inch G11 Notebook PC"), + }, + }, + { + .callback = edp0_on_dp1_callback, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "HP"), + DMI_MATCH(DMI_PRODUCT_NAME, "HP ProBook 465 16 inch G11 Notebook PC"), + }, + }, + {} + /* TODO: refactor this from a fixed table to a dynamic option */ +}; + +void retrieve_dmi_info(struct amdgpu_display_manager *dm) +{ + struct drm_device *dev = dm->ddev; + int dmi_id; + + dm->aux_hpd_discon_quirk = false; + dm->edp0_on_dp1_quirk = false; + + dmi_id = dmi_check_system(dmi_quirk_table); + + if (!dmi_id) + return; + + if (quirk_entries.aux_hpd_discon) { + dm->aux_hpd_discon_quirk = true; + drm_info(dev, "aux_hpd_discon_quirk attached\n"); + } + if (quirk_entries.support_edp0_on_dp1) { + dm->edp0_on_dp1_quirk = true; + drm_info(dev, "support_edp0_on_dp1 attached\n"); + } +} diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.c new file mode 100644 index 000000000000..da94e3544b65 --- /dev/null +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.c @@ -0,0 +1,209 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "amdgpu_dm_replay.h" +#include "dc_dmub_srv.h" +#include "dc.h" +#include "dm_helpers.h" +#include "amdgpu_dm.h" +#include "modules/power/power_helpers.h" +#include "dmub/inc/dmub_cmd.h" +#include "dc/inc/link_service.h" + +/* + * amdgpu_dm_link_supports_replay() - check if the link supports replay + * @link: link + * @aconnector: aconnector + * + */ +bool amdgpu_dm_link_supports_replay(struct dc_link *link, struct amdgpu_dm_connector *aconnector) +{ + struct dm_connector_state *state = to_dm_connector_state(aconnector->base.state); + struct dpcd_caps *dpcd_caps = &link->dpcd_caps; + struct adaptive_sync_caps *as_caps = &link->dpcd_caps.adaptive_sync_caps; + + if (!state->freesync_capable) + return false; + + if (!aconnector->vsdb_info.replay_mode) + return false; + + // Check the eDP version + if (dpcd_caps->edp_rev < EDP_REVISION_13) + return false; + + if (!dpcd_caps->alpm_caps.bits.AUX_WAKE_ALPM_CAP) + return false; + + // Check adaptive sync support cap + if (!as_caps->dp_adap_sync_caps.bits.ADAPTIVE_SYNC_SDP_SUPPORT) + return false; + + // Sink shall populate line deviation information + if (dpcd_caps->pr_info.pixel_deviation_per_line == 0 || + dpcd_caps->pr_info.max_deviation_line == 0) + return false; + + return true; +} + +/* + * amdgpu_dm_set_replay_caps() - setup Replay capabilities + * @link: link + * @aconnector: aconnector + * + */ +bool amdgpu_dm_set_replay_caps(struct dc_link *link, struct amdgpu_dm_connector *aconnector) +{ + struct replay_config pr_config = { 0 }; + union replay_debug_flags *debug_flags = NULL; + struct dc *dc = link->ctx->dc; + + // If Replay is already set to support, return true to skip checks + if (link->replay_settings.config.replay_supported) + return true; + + if (!dc_is_embedded_signal(link->connector_signal)) + return false; + + if (link->panel_config.psr.disallow_replay) + return false; + + if (!amdgpu_dm_link_supports_replay(link, aconnector)) + return false; + + if (!dc->ctx->dmub_srv || !dc->ctx->dmub_srv->dmub || + !dc->ctx->dmub_srv->dmub->feature_caps.replay_supported) + return false; + + // Mark Replay is supported in pr_config + pr_config.replay_supported = true; + + debug_flags = (union replay_debug_flags *)&pr_config.debug_flags; + debug_flags->u32All = 0; + debug_flags->bitfields.visual_confirm = + link->ctx->dc->debug.visual_confirm == VISUAL_CONFIRM_REPLAY; + + init_replay_config(link, &pr_config); + + return true; +} + +/* + * amdgpu_dm_link_setup_replay() - configure replay link + * @link: link + * @aconnector: aconnector + * + */ +bool amdgpu_dm_link_setup_replay(struct dc_link *link, struct amdgpu_dm_connector *aconnector) +{ + struct replay_config *pr_config; + + if (link == NULL || aconnector == NULL) + return false; + + pr_config = &link->replay_settings.config; + + if (!pr_config->replay_supported) + return false; + + pr_config->replay_power_opt_supported = 0x11; + pr_config->replay_smu_opt_supported = false; + pr_config->replay_enable_option |= pr_enable_option_static_screen; + pr_config->replay_support_fast_resync_in_ultra_sleep_mode = aconnector->max_vfreq >= 2 * aconnector->min_vfreq; + pr_config->replay_timing_sync_supported = false; + + if (!pr_config->replay_timing_sync_supported) + pr_config->replay_enable_option &= ~pr_enable_option_general_ui; + + link->replay_settings.replay_feature_enabled = true; + + return true; +} + +/* + * amdgpu_dm_replay_enable() - enable replay f/w + * @stream: stream state + * + * Return: true if success + */ +bool amdgpu_dm_replay_enable(struct dc_stream_state *stream, bool wait) +{ + bool replay_active = true; + struct dc_link *link = NULL; + + if (stream == NULL) + return false; + + link = stream->link; + + if (link) { + link->dc->link_srv->edp_setup_replay(link, stream); + link->dc->link_srv->edp_set_coasting_vtotal(link, stream->timing.v_total, 0); + DRM_DEBUG_DRIVER("Enabling replay...\n"); + link->dc->link_srv->edp_set_replay_allow_active(link, &replay_active, wait, false, NULL); + return true; + } + + return false; +} + +/* + * amdgpu_dm_replay_disable() - disable replay f/w + * @stream: stream state + * + * Return: true if success + */ +bool amdgpu_dm_replay_disable(struct dc_stream_state *stream) +{ + bool replay_active = false; + struct dc_link *link = NULL; + + if (stream == NULL) + return false; + + link = stream->link; + + if (link) { + DRM_DEBUG_DRIVER("Disabling replay...\n"); + link->dc->link_srv->edp_set_replay_allow_active(stream->link, &replay_active, true, false, NULL); + return true; + } + + return false; +} + +/* + * amdgpu_dm_replay_disable_all() - disable replay f/w + * if replay is enabled on any stream + * + * Return: true if success + */ +bool amdgpu_dm_replay_disable_all(struct amdgpu_display_manager *dm) +{ + DRM_DEBUG_DRIVER("Disabling replay if replay is enabled on any stream\n"); + return dc_set_replay_allow_active(dm->dc, false); +} diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.h new file mode 100644 index 000000000000..73b6c67ae5e7 --- /dev/null +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.h @@ -0,0 +1,49 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef AMDGPU_DM_AMDGPU_DM_REPLAY_H_ +#define AMDGPU_DM_AMDGPU_DM_REPLAY_H_ + +#include "amdgpu.h" + +enum replay_enable_option { + pr_enable_option_static_screen = 0x1, + pr_enable_option_mpo_video = 0x2, + pr_enable_option_full_screen_video = 0x4, + pr_enable_option_general_ui = 0x8, + pr_enable_option_static_screen_coasting = 0x10000, + pr_enable_option_mpo_video_coasting = 0x20000, + pr_enable_option_full_screen_video_coasting = 0x40000, +}; + +bool amdgpu_dm_link_supports_replay(struct dc_link *link, struct amdgpu_dm_connector *aconnector); +bool amdgpu_dm_replay_enable(struct dc_stream_state *stream, bool enable); +bool amdgpu_dm_set_replay_caps(struct dc_link *link, struct amdgpu_dm_connector *aconnector); +bool amdgpu_dm_link_setup_replay(struct dc_link *link, struct amdgpu_dm_connector *aconnector); +bool amdgpu_dm_replay_disable(struct dc_stream_state *stream); +bool amdgpu_dm_replay_disable_all(struct amdgpu_display_manager *dm); + +#endif /* AMDGPU_DM_AMDGPU_DM_REPLAY_H_ */ diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c index d9e33c6bccd9..8550d5e8b753 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c @@ -1,3 +1,4 @@ +//SPDX-License-Identifier: MIT /* * Copyright 2015 Advanced Micro Devices, Inc. * @@ -52,4 +53,12 @@ void dm_perf_trace_timestamp(const char *func_name, unsigned int line, struct dc func_name, line); } +void dm_trace_smu_enter(uint32_t msg_id, uint32_t param_in, unsigned int delay, struct dc_context *ctx) +{ +} + +void dm_trace_smu_exit(bool success, uint32_t response, struct dc_context *ctx) +{ +} + /**** power component interfaces ****/ diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h index d3bc9dc21771..aa56fd6d56c3 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h @@ -1,3 +1,4 @@ +//SPDX-License-Identifier: MIT /* * Copyright 2018 Advanced Micro Devices, Inc. * @@ -37,6 +38,7 @@ #include <drm/drm_framebuffer.h> #include <drm/drm_encoder.h> #include <drm/drm_atomic.h> +#include "dc/inc/hw/optc.h" #include "dc/inc/core_types.h" @@ -86,7 +88,7 @@ TRACE_EVENT(amdgpu_dc_performance, __entry->writes = write_count; __entry->read_delta = read_count - *last_read; __entry->write_delta = write_count - *last_write; - __assign_str(func, func); + __assign_str(func); __entry->line = line; *last_read = read_count; *last_write = write_count; @@ -662,6 +664,95 @@ TRACE_EVENT(dcn_fpu, ) ); +TRACE_EVENT(dcn_optc_lock_unlock_state, + TP_PROTO(const struct optc *optc_state, int instance, bool lock, const char *function, const int line), + TP_ARGS(optc_state, instance, lock, function, line), + + TP_STRUCT__entry( + __field(const char *, function) + __field(int, instance) + __field(bool, lock) + __field(int, line) + __field(int, opp_count) + __field(int, max_h_total) + __field(int, max_v_total) + __field(int, min_h_blank) + __field(int, min_h_sync_width) + __field(int, min_v_sync_width) + __field(int, min_v_blank) + __field(int, min_v_blank_interlace) + __field(int, vstartup_start) + __field(int, vupdate_offset) + __field(int, vupdate_width) + __field(int, vready_offset) + ), + TP_fast_assign( + __entry->function = function; + __entry->instance = instance; + __entry->lock = lock; + __entry->line = line; + __entry->opp_count = optc_state->opp_count; + __entry->max_h_total = optc_state->max_h_total; + __entry->max_v_total = optc_state->max_v_total; + __entry->min_h_blank = optc_state->min_h_blank; + __entry->min_h_sync_width = optc_state->min_h_sync_width; + __entry->min_v_sync_width = optc_state->min_v_sync_width; + __entry->min_v_blank = optc_state->min_v_blank; + __entry->min_v_blank_interlace = optc_state->min_v_blank_interlace; + __entry->vstartup_start = optc_state->vstartup_start; + __entry->vupdate_offset = optc_state->vupdate_offset; + __entry->vupdate_width = optc_state->vupdate_width; + __entry->vready_offset = optc_state->vupdate_offset; + ), + TP_printk("%s: %s()+%d: optc_instance=%d opp_count=%d max_h_total=%d max_v_total=%d " + "min_h_blank=%d min_h_sync_width=%d min_v_sync_width=%d min_v_blank=%d " + "min_v_blank_interlace=%d vstartup_start=%d vupdate_offset=%d vupdate_width=%d " + "vready_offset=%d", + __entry->lock ? "Lock" : "Unlock", + __entry->function, + __entry->line, + __entry->instance, + __entry->opp_count, + __entry->max_h_total, + __entry->max_v_total, + __entry->min_h_blank, + __entry->min_h_sync_width, + __entry->min_v_sync_width, + __entry->min_v_blank, + __entry->min_v_blank_interlace, + __entry->vstartup_start, + __entry->vupdate_offset, + __entry->vupdate_width, + __entry->vready_offset + ) +); + +TRACE_EVENT(amdgpu_dm_brightness, + TP_PROTO(void *function, u32 user_brightness, u32 converted_brightness, bool aux, bool ac), + TP_ARGS(function, user_brightness, converted_brightness, aux, ac), + TP_STRUCT__entry( + __field(void *, function) + __field(u32, user_brightness) + __field(u32, converted_brightness) + __field(bool, aux) + __field(bool, ac) + ), + TP_fast_assign( + __entry->function = function; + __entry->user_brightness = user_brightness; + __entry->converted_brightness = converted_brightness; + __entry->aux = aux; + __entry->ac = ac; + ), + TP_printk("%ps: brightness requested=%u converted=%u aux=%s power=%s", + (void *)__entry->function, + (u32)__entry->user_brightness, + (u32)__entry->converted_brightness, + (__entry->aux) ? "true" : "false", + (__entry->ac) ? "AC" : "DC" + ) +); + #endif /* _AMDGPU_DM_TRACE_H_ */ #undef TRACE_INCLUDE_PATH diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.c new file mode 100644 index 000000000000..d9527c05fc87 --- /dev/null +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.c @@ -0,0 +1,215 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "dm_services_types.h" + +#include "amdgpu.h" +#include "amdgpu_dm.h" +#include "amdgpu_dm_wb.h" +#include "amdgpu_display.h" +#include "dc.h" + +#include <drm/drm_edid.h> +#include <drm/drm_atomic_state_helper.h> +#include <drm/drm_modeset_helper_vtables.h> + +static const u32 amdgpu_dm_wb_formats[] = { + DRM_FORMAT_XRGB2101010, +}; + +static int amdgpu_dm_wb_encoder_atomic_check(struct drm_encoder *encoder, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state) +{ + struct drm_framebuffer *fb; + const struct drm_display_mode *mode = &crtc_state->mode; + bool found = false; + uint8_t i; + + if (!conn_state->writeback_job || !conn_state->writeback_job->fb) + return 0; + + fb = conn_state->writeback_job->fb; + if (fb->width != mode->hdisplay || fb->height != mode->vdisplay) { + DRM_DEBUG_KMS("Invalid framebuffer size %ux%u\n", + fb->width, fb->height); + return -EINVAL; + } + + for (i = 0; i < sizeof(amdgpu_dm_wb_formats) / sizeof(u32); i++) { + if (fb->format->format == amdgpu_dm_wb_formats[i]) + found = true; + } + + if (!found) { + DRM_DEBUG_KMS("Invalid pixel format %p4cc\n", + &fb->format->format); + return -EINVAL; + } + + return 0; +} + + +static int amdgpu_dm_wb_connector_get_modes(struct drm_connector *connector) +{ + /* Maximum resolution supported by DWB */ + return drm_add_modes_noedid(connector, 3840, 2160); +} + +static int amdgpu_dm_wb_prepare_job(struct drm_writeback_connector *wb_connector, + struct drm_writeback_job *job) +{ + struct amdgpu_framebuffer *afb; + struct drm_gem_object *obj; + struct amdgpu_device *adev; + struct amdgpu_bo *rbo; + uint32_t domain; + int r; + + if (!job->fb) { + DRM_DEBUG_KMS("No FB bound\n"); + return 0; + } + + afb = to_amdgpu_framebuffer(job->fb); + obj = job->fb->obj[0]; + rbo = gem_to_amdgpu_bo(obj); + adev = amdgpu_ttm_adev(rbo->tbo.bdev); + + r = amdgpu_bo_reserve(rbo, true); + if (r) { + drm_err(adev_to_drm(adev), "fail to reserve bo (%d)\n", r); + return r; + } + + r = dma_resv_reserve_fences(rbo->tbo.base.resv, 1); + if (r) { + drm_err(adev_to_drm(adev), "reserving fence slot failed (%d)\n", r); + goto error_unlock; + } + + domain = amdgpu_display_supported_domains(adev, rbo->flags); + + rbo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS; + r = amdgpu_bo_pin(rbo, domain); + if (unlikely(r != 0)) { + if (r != -ERESTARTSYS) + DRM_ERROR("Failed to pin framebuffer with error %d\n", r); + goto error_unlock; + } + + r = amdgpu_ttm_alloc_gart(&rbo->tbo); + if (unlikely(r != 0)) { + DRM_ERROR("%p bind failed\n", rbo); + goto error_unpin; + } + + amdgpu_bo_unreserve(rbo); + + afb->address = amdgpu_bo_gpu_offset(rbo); + + amdgpu_bo_ref(rbo); + + return 0; + +error_unpin: + amdgpu_bo_unpin(rbo); + +error_unlock: + amdgpu_bo_unreserve(rbo); + return r; +} + +static void amdgpu_dm_wb_cleanup_job(struct drm_writeback_connector *connector, + struct drm_writeback_job *job) +{ + struct amdgpu_bo *rbo; + int r; + + if (!job->fb) + return; + + rbo = gem_to_amdgpu_bo(job->fb->obj[0]); + r = amdgpu_bo_reserve(rbo, false); + if (unlikely(r)) { + DRM_ERROR("failed to reserve rbo before unpin\n"); + return; + } + + amdgpu_bo_unpin(rbo); + amdgpu_bo_unreserve(rbo); + amdgpu_bo_unref(&rbo); +} + +static const struct drm_encoder_helper_funcs amdgpu_dm_wb_encoder_helper_funcs = { + .atomic_check = amdgpu_dm_wb_encoder_atomic_check, +}; + +static const struct drm_connector_funcs amdgpu_dm_wb_connector_funcs = { + .fill_modes = drm_helper_probe_single_connector_modes, + .destroy = drm_connector_cleanup, + .reset = amdgpu_dm_connector_funcs_reset, + .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, +}; + +static const struct drm_connector_helper_funcs amdgpu_dm_wb_conn_helper_funcs = { + .get_modes = amdgpu_dm_wb_connector_get_modes, + .prepare_writeback_job = amdgpu_dm_wb_prepare_job, + .cleanup_writeback_job = amdgpu_dm_wb_cleanup_job, +}; + +int amdgpu_dm_wb_connector_init(struct amdgpu_display_manager *dm, + struct amdgpu_dm_wb_connector *wbcon, + uint32_t link_index) +{ + struct dc *dc = dm->dc; + struct dc_link *link = dc_get_link_at_index(dc, link_index); + int res = 0; + + wbcon->link = link; + + drm_connector_helper_add(&wbcon->base.base, &amdgpu_dm_wb_conn_helper_funcs); + + res = drm_writeback_connector_init(&dm->adev->ddev, &wbcon->base, + &amdgpu_dm_wb_connector_funcs, + &amdgpu_dm_wb_encoder_helper_funcs, + amdgpu_dm_wb_formats, + ARRAY_SIZE(amdgpu_dm_wb_formats), + amdgpu_dm_get_encoder_crtc_mask(dm->adev)); + + if (res) + return res; + /* + * Some of the properties below require access to state, like bpc. + * Allocate some default initial connector state with our reset helper. + */ + if (wbcon->base.base.funcs->reset) + wbcon->base.base.funcs->reset(&wbcon->base.base); + + return 0; +} diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.h new file mode 100644 index 000000000000..13d31c857dee --- /dev/null +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __AMDGPU_DM_WB_H__ +#define __AMDGPU_DM_WB_H__ + +#include <drm/drm_writeback.h> + +int amdgpu_dm_wb_connector_init(struct amdgpu_display_manager *dm, + struct amdgpu_dm_wb_connector *dm_wbcon, + uint32_t link_index); + +#endif diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c b/drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c index ab0c6d191038..e46f8ce41d87 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c @@ -26,12 +26,7 @@ #include "dc_trace.h" -#if defined(CONFIG_X86) -#include <asm/fpu/api.h> -#elif defined(CONFIG_PPC64) -#include <asm/switch_to.h> -#include <asm/cputable.h> -#endif +#include <linux/fpu.h> /** * DOC: DC FPU manipulation overview @@ -56,11 +51,9 @@ static DEFINE_PER_CPU(int, fpu_recursion_depth); */ inline void dc_assert_fp_enabled(void) { - int *pcpu, depth = 0; + int depth; - pcpu = get_cpu_ptr(&fpu_recursion_depth); - depth = *pcpu; - put_cpu_ptr(&fpu_recursion_depth); + depth = __this_cpu_read(fpu_recursion_depth); ASSERT(depth >= 1); } @@ -80,30 +73,17 @@ inline void dc_assert_fp_enabled(void) */ void dc_fpu_begin(const char *function_name, const int line) { - int *pcpu; + int depth; - pcpu = get_cpu_ptr(&fpu_recursion_depth); - *pcpu += 1; - - if (*pcpu == 1) { -#if defined(CONFIG_X86) + WARN_ON_ONCE(!in_task()); + preempt_disable(); + depth = __this_cpu_inc_return(fpu_recursion_depth); + if (depth == 1) { + BUG_ON(!kernel_fpu_available()); kernel_fpu_begin(); -#elif defined(CONFIG_PPC64) - if (cpu_has_feature(CPU_FTR_VSX_COMP)) { - preempt_disable(); - enable_kernel_vsx(); - } else if (cpu_has_feature(CPU_FTR_ALTIVEC_COMP)) { - preempt_disable(); - enable_kernel_altivec(); - } else if (!cpu_has_feature(CPU_FTR_FPU_UNAVAILABLE)) { - preempt_disable(); - enable_kernel_fp(); - } -#endif } - TRACE_DCN_FPU(true, function_name, line, *pcpu); - put_cpu_ptr(&fpu_recursion_depth); + TRACE_DCN_FPU(true, function_name, line, depth); } /** @@ -118,27 +98,15 @@ void dc_fpu_begin(const char *function_name, const int line) */ void dc_fpu_end(const char *function_name, const int line) { - int *pcpu; + int depth; - pcpu = get_cpu_ptr(&fpu_recursion_depth); - *pcpu -= 1; - if (*pcpu <= 0) { -#if defined(CONFIG_X86) + depth = __this_cpu_dec_return(fpu_recursion_depth); + if (depth == 0) { kernel_fpu_end(); -#elif defined(CONFIG_PPC64) - if (cpu_has_feature(CPU_FTR_VSX_COMP)) { - disable_kernel_vsx(); - preempt_enable(); - } else if (cpu_has_feature(CPU_FTR_ALTIVEC_COMP)) { - disable_kernel_altivec(); - preempt_enable(); - } else if (!cpu_has_feature(CPU_FTR_FPU_UNAVAILABLE)) { - disable_kernel_fp(); - preempt_enable(); - } -#endif + } else { + WARN_ON_ONCE(depth < 0); } - TRACE_DCN_FPU(false, function_name, line, *pcpu); - put_cpu_ptr(&fpu_recursion_depth); + TRACE_DCN_FPU(false, function_name, line, depth); + preempt_enable(); } |
