diff options
Diffstat (limited to 'drivers/gpu/drm/amd/display')
111 files changed, 5136 insertions, 877 deletions
diff --git a/drivers/gpu/drm/amd/display/Kconfig b/drivers/gpu/drm/amd/display/Kconfig index e509a175ed17..7dffc04a557e 100644 --- a/drivers/gpu/drm/amd/display/Kconfig +++ b/drivers/gpu/drm/amd/display/Kconfig @@ -38,4 +38,18 @@ config DEBUG_KERNEL_DC help Choose this option if you want to hit kdgb_break in assert. +config DRM_AMD_SECURE_DISPLAY + bool "Enable secure display support" + default n + depends on DEBUG_FS + depends on DRM_AMD_DC_DCN + help + Choose this option if you want to + support secure display + + This option enables the calculation + of crc of specific region via debugfs. + Cooperate with specific DMCU FW. + + endmenu diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 55e39b462a5e..00edf78975b1 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -34,6 +34,7 @@ #include "dc/inc/hw/dmcu.h" #include "dc/inc/hw/abm.h" #include "dc/dc_dmub_srv.h" +#include "dc/dc_edid_parser.h" #include "amdgpu_dm_trace.h" #include "vid.h" @@ -75,7 +76,6 @@ #include <drm/drm_edid.h> #include <drm/drm_vblank.h> #include <drm/drm_audio_component.h> -#include <drm/drm_hdcp.h> #if defined(CONFIG_DRM_AMD_DC_DCN) #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h" @@ -212,6 +212,9 @@ static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm); static const struct drm_format_info * amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd); +static bool +is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state, + struct drm_crtc_state *new_crtc_state); /* * dm_vblank_get_counter * @@ -335,6 +338,17 @@ static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state) dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED; } +static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state, + struct dm_crtc_state *new_state) +{ + if (new_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED) + return true; + else if (amdgpu_dm_vrr_active(old_state) != amdgpu_dm_vrr_active(new_state)) + return true; + else + return false; +} + /** * dm_pflip_high_irq() - Handle pageflip interrupt * @interrupt_params: ignored @@ -566,6 +580,31 @@ static void dm_crtc_high_irq(void *interrupt_params) spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); } +#if defined(CONFIG_DRM_AMD_DC_DCN) +/** + * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for + * DCN generation ASICs + * @interrupt params - interrupt parameters + * + * Used to set crc window/read out crc value at vertical line 0 position + */ +#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) +static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params) +{ + struct common_irq_params *irq_params = interrupt_params; + struct amdgpu_device *adev = irq_params->adev; + struct amdgpu_crtc *acrtc; + + acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0); + + if (!acrtc) + return; + + amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base); +} +#endif +#endif + static int dm_set_clockgating_state(void *handle, enum amd_clockgating_state state) { @@ -951,9 +990,7 @@ static void event_mall_stutter(struct work_struct *work) else dm->active_vblank_irq_count--; - - dc_allow_idle_optimizations( - dm->dc, dm->active_vblank_irq_count == 0 ? true : false); + dc_allow_idle_optimizations(dm->dc, dm->active_vblank_irq_count == 0); DRM_DEBUG_DRIVER("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0); @@ -1060,6 +1097,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) init_data.flags.power_down_display_on_boot = true; + INIT_LIST_HEAD(&adev->dm.da_list); /* Display Core create. */ adev->dm.dc = dc_create(&init_data); @@ -1139,6 +1177,9 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) dc_init_callbacks(adev->dm.dc, &init_params); } #endif +#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) + adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work(); +#endif if (amdgpu_dm_initialize_drm_device(adev)) { DRM_ERROR( "amdgpu: failed to initialize sw for display support.\n"); @@ -1182,6 +1223,13 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev) amdgpu_dm_destroy_drm_device(&adev->dm); +#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) + if (adev->dm.crc_rd_wrk) { + flush_work(&adev->dm.crc_rd_wrk->notify_ta_work); + kfree(adev->dm.crc_rd_wrk); + adev->dm.crc_rd_wrk = NULL; + } +#endif #ifdef CONFIG_DRM_AMD_DC_HDCP if (adev->dm.hdcp_workqueue) { hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue); @@ -1191,6 +1239,15 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev) if (adev->dm.dc) dc_deinit_callbacks(adev->dm.dc); #endif + +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (adev->dm.vblank_workqueue) { + adev->dm.vblank_workqueue->dm = NULL; + kfree(adev->dm.vblank_workqueue); + adev->dm.vblank_workqueue = NULL; + } +#endif + if (adev->dm.dc->ctx->dmub_srv) { dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv); adev->dm.dc->ctx->dmub_srv = NULL; @@ -1847,6 +1904,9 @@ static int dm_suspend(void *handle) return ret; } +#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY + amdgpu_dm_crtc_secure_display_suspend(adev); +#endif WARN_ON(adev->dm.cached_state); adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev)); @@ -2171,6 +2231,10 @@ static int dm_resume(void *handle) dm->cached_state = NULL; +#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY + amdgpu_dm_crtc_secure_display_resume(adev); +#endif + amdgpu_dm_irq_resume_late(adev); amdgpu_dm_smu_write_watermarks_table(adev); @@ -2907,6 +2971,16 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev) struct dc_interrupt_params int_params = {0}; int r; int i; +#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) + static const unsigned int vrtl_int_srcid[] = { + DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL, + DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL, + DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL, + DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL, + DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL, + DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL + }; +#endif int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT; int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT; @@ -2947,6 +3021,37 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev) adev, &int_params, dm_crtc_high_irq, c_irq_params); } + /* Use otg vertical line interrupt */ +#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) + for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) { + r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, + vrtl_int_srcid[i], &adev->vline0_irq); + + if (r) { + DRM_ERROR("Failed to add vline0 irq id!\n"); + return r; + } + + int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; + int_params.irq_source = + dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0); + + if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) { + DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]); + break; + } + + c_irq_params = &adev->dm.vline0_params[int_params.irq_source + - DC_IRQ_SOURCE_DC1_VLINE0]; + + c_irq_params->adev = adev; + c_irq_params->irq_src = int_params.irq_source; + + amdgpu_dm_irq_register_interrupt(adev, &int_params, + dm_dcn_vertical_interrupt0_high_irq, c_irq_params); + } +#endif + /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx * to trigger at end of each vblank, regardless of state of the lock, @@ -5001,19 +5106,16 @@ static void fill_stream_properties_from_drm_display_mode( timing_out->hdmi_vic = hv_frame.vic; } - timing_out->h_addressable = mode_in->crtc_hdisplay; - timing_out->h_total = mode_in->crtc_htotal; - timing_out->h_sync_width = - mode_in->crtc_hsync_end - mode_in->crtc_hsync_start; - timing_out->h_front_porch = - mode_in->crtc_hsync_start - mode_in->crtc_hdisplay; - timing_out->v_total = mode_in->crtc_vtotal; - timing_out->v_addressable = mode_in->crtc_vdisplay; - timing_out->v_front_porch = - mode_in->crtc_vsync_start - mode_in->crtc_vdisplay; - timing_out->v_sync_width = - mode_in->crtc_vsync_end - mode_in->crtc_vsync_start; - timing_out->pix_clk_100hz = mode_in->crtc_clock * 10; + timing_out->h_addressable = mode_in->hdisplay; + timing_out->h_total = mode_in->htotal; + timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start; + timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay; + timing_out->v_total = mode_in->vtotal; + timing_out->v_addressable = mode_in->vdisplay; + timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay; + timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start; + timing_out->pix_clk_100hz = mode_in->clock * 10; + timing_out->aspect_ratio = get_aspect_ratio(mode_in); stream->output_color_space = get_output_color_space(timing_out); @@ -5180,6 +5282,86 @@ static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context) set_master_stream(context->streams, context->stream_count); } +static struct drm_display_mode * +get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector, + bool use_probed_modes) +{ + struct drm_display_mode *m, *m_pref = NULL; + u16 current_refresh, highest_refresh; + struct list_head *list_head = use_probed_modes ? + &aconnector->base.probed_modes : + &aconnector->base.modes; + + if (aconnector->freesync_vid_base.clock != 0) + return &aconnector->freesync_vid_base; + + /* Find the preferred mode */ + list_for_each_entry (m, list_head, head) { + if (m->type & DRM_MODE_TYPE_PREFERRED) { + m_pref = m; + break; + } + } + + if (!m_pref) { + /* Probably an EDID with no preferred mode. Fallback to first entry */ + m_pref = list_first_entry_or_null( + &aconnector->base.modes, struct drm_display_mode, head); + if (!m_pref) { + DRM_DEBUG_DRIVER("No preferred mode found in EDID\n"); + return NULL; + } + } + + highest_refresh = drm_mode_vrefresh(m_pref); + + /* + * Find the mode with highest refresh rate with same resolution. + * For some monitors, preferred mode is not the mode with highest + * supported refresh rate. + */ + list_for_each_entry (m, list_head, head) { + current_refresh = drm_mode_vrefresh(m); + + if (m->hdisplay == m_pref->hdisplay && + m->vdisplay == m_pref->vdisplay && + highest_refresh < current_refresh) { + highest_refresh = current_refresh; + m_pref = m; + } + } + + aconnector->freesync_vid_base = *m_pref; + return m_pref; +} + +static bool is_freesync_video_mode(struct drm_display_mode *mode, + struct amdgpu_dm_connector *aconnector) +{ + struct drm_display_mode *high_mode; + int timing_diff; + + high_mode = get_highest_refresh_rate_mode(aconnector, false); + if (!high_mode || !mode) + return false; + + timing_diff = high_mode->vtotal - mode->vtotal; + + if (high_mode->clock == 0 || high_mode->clock != mode->clock || + high_mode->hdisplay != mode->hdisplay || + high_mode->vdisplay != mode->vdisplay || + high_mode->hsync_start != mode->hsync_start || + high_mode->hsync_end != mode->hsync_end || + high_mode->htotal != mode->htotal || + high_mode->hskew != mode->hskew || + high_mode->vscan != mode->vscan || + high_mode->vsync_start - mode->vsync_start != timing_diff || + high_mode->vsync_end - mode->vsync_end != timing_diff) + return false; + else + return true; +} + static struct dc_stream_state * create_stream_for_sink(struct amdgpu_dm_connector *aconnector, const struct drm_display_mode *drm_mode, @@ -5193,8 +5375,10 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector, dm_state ? &dm_state->base : NULL; struct dc_stream_state *stream = NULL; struct drm_display_mode mode = *drm_mode; + struct drm_display_mode saved_mode; + struct drm_display_mode *freesync_mode = NULL; bool native_mode_found = false; - bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false; + bool recalculate_timing = dm_state ? (dm_state->scaling != RMX_OFF) : false; int mode_refresh; int preferred_refresh = 0; #if defined(CONFIG_DRM_AMD_DC_DCN) @@ -5202,6 +5386,9 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector, uint32_t link_bandwidth_kbps; #endif struct dc_sink *sink = NULL; + + memset(&saved_mode, 0, sizeof(saved_mode)); + if (aconnector == NULL) { DRM_ERROR("aconnector is NULL!\n"); return stream; @@ -5254,25 +5441,38 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector, */ DRM_DEBUG_DRIVER("No preferred mode found\n"); } else { - decide_crtc_timing_for_drm_display_mode( + recalculate_timing |= amdgpu_freesync_vid_mode && + is_freesync_video_mode(&mode, aconnector); + if (recalculate_timing) { + freesync_mode = get_highest_refresh_rate_mode(aconnector, false); + saved_mode = mode; + mode = *freesync_mode; + } else { + decide_crtc_timing_for_drm_display_mode( &mode, preferred_mode, dm_state ? (dm_state->scaling != RMX_OFF) : false); + } + preferred_refresh = drm_mode_vrefresh(preferred_mode); } - if (!dm_state) + if (recalculate_timing) + drm_mode_set_crtcinfo(&saved_mode, 0); + else drm_mode_set_crtcinfo(&mode, 0); - /* + /* * If scaling is enabled and refresh rate didn't change * we copy the vic and polarities of the old timings */ - if (!scale || mode_refresh != preferred_refresh) - fill_stream_properties_from_drm_display_mode(stream, - &mode, &aconnector->base, con_state, NULL, requested_bpc); + if (!recalculate_timing || mode_refresh != preferred_refresh) + fill_stream_properties_from_drm_display_mode( + stream, &mode, &aconnector->base, con_state, NULL, + requested_bpc); else - fill_stream_properties_from_drm_display_mode(stream, - &mode, &aconnector->base, con_state, old_stream, requested_bpc); + fill_stream_properties_from_drm_display_mode( + stream, &mode, &aconnector->base, con_state, old_stream, + requested_bpc); stream->timing.flags.DSC = 0; @@ -5409,15 +5609,22 @@ dm_crtc_duplicate_state(struct drm_crtc *crtc) state->abm_level = cur->abm_level; state->vrr_supported = cur->vrr_supported; state->freesync_config = cur->freesync_config; - state->crc_src = cur->crc_src; state->cm_has_degamma = cur->cm_has_degamma; state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb; - /* TODO Duplicate dc_stream after objects are stream object is flattened */ return &state->base; } +#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY +static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc) +{ + crtc_debugfs_init(crtc); + + return 0; +} +#endif + static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable) { enum dc_irq_source irq_source; @@ -5503,6 +5710,9 @@ static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = { .enable_vblank = dm_enable_vblank, .disable_vblank = dm_disable_vblank, .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp, +#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) + .late_register = amdgpu_dm_crtc_late_register, +#endif }; static enum drm_connector_status @@ -6488,13 +6698,17 @@ static int dm_plane_helper_check_state(struct drm_plane_state *state, else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay) viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y; - /* If completely outside of screen, viewport_width and/or viewport_height will be negative, - * which is still OK to satisfy the condition below, thereby also covering these cases - * (when plane is completely outside of screen). - * x2 for width is because of pipe-split. - */ - if (viewport_width < MIN_VIEWPORT_SIZE*2 || viewport_height < MIN_VIEWPORT_SIZE) + if (viewport_width < 0 || viewport_height < 0) { + DRM_DEBUG_ATOMIC("Plane completely outside of screen\n"); + return -EINVAL; + } else if (viewport_width < MIN_VIEWPORT_SIZE*2) { /* x2 for width is because of pipe-split. */ + DRM_DEBUG_ATOMIC("Viewport width %d smaller than %d\n", viewport_width, MIN_VIEWPORT_SIZE*2); return -EINVAL; + } else if (viewport_height < MIN_VIEWPORT_SIZE) { + DRM_DEBUG_ATOMIC("Viewport height %d smaller than %d\n", viewport_height, MIN_VIEWPORT_SIZE); + return -EINVAL; + } + } /* Get min/max allowed scaling factors from plane caps. */ @@ -6975,11 +7189,118 @@ static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector, */ drm_mode_sort(&connector->probed_modes); amdgpu_dm_get_native_mode(connector); + + /* Freesync capabilities are reset by calling + * drm_add_edid_modes() and need to be + * restored here. + */ + amdgpu_dm_update_freesync_caps(connector, edid); } else { amdgpu_dm_connector->num_modes = 0; } } +static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector, + struct drm_display_mode *mode) +{ + struct drm_display_mode *m; + + list_for_each_entry (m, &aconnector->base.probed_modes, head) { + if (drm_mode_equal(m, mode)) + return true; + } + + return false; +} + +static uint add_fs_modes(struct amdgpu_dm_connector *aconnector) +{ + const struct drm_display_mode *m; + struct drm_display_mode *new_mode; + uint i; + uint32_t new_modes_count = 0; + + /* Standard FPS values + * + * 23.976 - TV/NTSC + * 24 - Cinema + * 25 - TV/PAL + * 29.97 - TV/NTSC + * 30 - TV/NTSC + * 48 - Cinema HFR + * 50 - TV/PAL + * 60 - Commonly used + * 48,72,96 - Multiples of 24 + */ + const uint32_t common_rates[] = { 23976, 24000, 25000, 29970, 30000, + 48000, 50000, 60000, 72000, 96000 }; + + /* + * Find mode with highest refresh rate with the same resolution + * as the preferred mode. Some monitors report a preferred mode + * with lower resolution than the highest refresh rate supported. + */ + + m = get_highest_refresh_rate_mode(aconnector, true); + if (!m) + return 0; + + for (i = 0; i < ARRAY_SIZE(common_rates); i++) { + uint64_t target_vtotal, target_vtotal_diff; + uint64_t num, den; + + if (drm_mode_vrefresh(m) * 1000 < common_rates[i]) + continue; + + if (common_rates[i] < aconnector->min_vfreq * 1000 || + common_rates[i] > aconnector->max_vfreq * 1000) + continue; + + num = (unsigned long long)m->clock * 1000 * 1000; + den = common_rates[i] * (unsigned long long)m->htotal; + target_vtotal = div_u64(num, den); + target_vtotal_diff = target_vtotal - m->vtotal; + + /* Check for illegal modes */ + if (m->vsync_start + target_vtotal_diff < m->vdisplay || + m->vsync_end + target_vtotal_diff < m->vsync_start || + m->vtotal + target_vtotal_diff < m->vsync_end) + continue; + + new_mode = drm_mode_duplicate(aconnector->base.dev, m); + if (!new_mode) + goto out; + + new_mode->vtotal += (u16)target_vtotal_diff; + new_mode->vsync_start += (u16)target_vtotal_diff; + new_mode->vsync_end += (u16)target_vtotal_diff; + new_mode->type &= ~DRM_MODE_TYPE_PREFERRED; + new_mode->type |= DRM_MODE_TYPE_DRIVER; + + if (!is_duplicate_mode(aconnector, new_mode)) { + drm_mode_probed_add(&aconnector->base, new_mode); + new_modes_count += 1; + } else + drm_mode_destroy(aconnector->base.dev, new_mode); + } + out: + return new_modes_count; +} + +static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector, + struct edid *edid) +{ + struct amdgpu_dm_connector *amdgpu_dm_connector = + to_amdgpu_dm_connector(connector); + + if (!(amdgpu_freesync_vid_mode && edid)) + return; + + if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10) + amdgpu_dm_connector->num_modes += + add_fs_modes(amdgpu_dm_connector); +} + static int amdgpu_dm_connector_get_modes(struct drm_connector *connector) { struct amdgpu_dm_connector *amdgpu_dm_connector = @@ -6995,6 +7316,7 @@ static int amdgpu_dm_connector_get_modes(struct drm_connector *connector) } else { amdgpu_dm_connector_ddc_get_modes(connector, edid); amdgpu_dm_connector_add_common_modes(encoder, connector); + amdgpu_dm_connector_add_freesync_modes(connector, edid); } amdgpu_dm_fbc_init(connector); @@ -7299,8 +7621,19 @@ static void manage_dm_interrupts(struct amdgpu_device *adev, adev, &adev->pageflip_irq, irq_type); +#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) + amdgpu_irq_get( + adev, + &adev->vline0_irq, + irq_type); +#endif } else { - +#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) + amdgpu_irq_put( + adev, + &adev->vline0_irq, + irq_type); +#endif amdgpu_irq_put( adev, &adev->pageflip_irq, @@ -7424,10 +7757,6 @@ static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc, int x, y; int xorigin = 0, yorigin = 0; - position->enable = false; - position->x = 0; - position->y = 0; - if (!crtc || !plane->state->fb) return 0; @@ -7474,7 +7803,7 @@ static void handle_cursor_update(struct drm_plane *plane, struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL; struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); uint64_t address = afb ? afb->address : 0; - struct dc_cursor_position position; + struct dc_cursor_position position = {0}; struct dc_cursor_attributes attributes; int ret; @@ -7559,6 +7888,7 @@ static void update_freesync_state_on_stream( struct amdgpu_device *adev = dm->adev; struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc); unsigned long flags; + bool pack_sdp_v1_3 = false; if (!new_stream) return; @@ -7600,7 +7930,8 @@ static void update_freesync_state_on_stream( &vrr_params, PACKET_TYPE_VRR, TRANSFER_FUNC_UNKNOWN, - &vrr_infopacket); + &vrr_infopacket, + pack_sdp_v1_3); new_crtc_state->freesync_timing_changed |= (memcmp(&acrtc->dm_irq_params.vrr_params.adjust, @@ -7654,9 +7985,22 @@ static void update_stream_irq_parameters( if (new_crtc_state->vrr_supported && config.min_refresh_in_uhz && config.max_refresh_in_uhz) { - config.state = new_crtc_state->base.vrr_enabled ? - VRR_STATE_ACTIVE_VARIABLE : - VRR_STATE_INACTIVE; + /* + * if freesync compatible mode was set, config.state will be set + * in atomic check + */ + if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz && + (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) || + new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) { + vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz; + vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz; + vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz; + vrr_params.state = VRR_STATE_ACTIVE_FIXED; + } else { + config.state = new_crtc_state->base.vrr_enabled ? + VRR_STATE_ACTIVE_VARIABLE : + VRR_STATE_INACTIVE; + } } else { config.state = VRR_STATE_UNSUPPORTED; } @@ -7977,8 +8321,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, * re-adjust the min/max bounds now that DC doesn't handle this * as part of commit. */ - if (amdgpu_dm_vrr_active(dm_old_crtc_state) != - amdgpu_dm_vrr_active(acrtc_state)) { + if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) { spin_lock_irqsave(&pcrtc->dev->event_lock, flags); dc_stream_adjust_vmin_vmax( dm->dc, acrtc_state->stream, @@ -8263,6 +8606,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) /* i.e. reset mode */ if (dm_old_crtc_state->stream) remove_stream(adev, acrtc, dm_old_crtc_state->stream); + mode_set_reset_required = true; } } /* for_each_crtc_in_state() */ @@ -8321,8 +8665,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) hdcp_update_display( adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector, new_con_state->hdcp_content_type, - new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED ? true - : false); + new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED); } #endif @@ -8432,7 +8775,10 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) */ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); - +#ifdef CONFIG_DEBUG_FS + bool configure_crc = false; + enum amdgpu_dm_pipe_crc_source cur_crc_src; +#endif dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); if (new_crtc_state->active && @@ -8448,12 +8794,21 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) * settings for the stream. */ dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); + spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); + cur_crc_src = acrtc->dm_irq_params.crc_src; + spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); + + if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) { + configure_crc = true; +#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) + if (amdgpu_dm_crc_window_is_activated(crtc)) + configure_crc = false; +#endif + } - if (amdgpu_dm_is_valid_crc_source(dm_new_crtc_state->crc_src)) { + if (configure_crc) amdgpu_dm_crtc_configure_crc_source( - crtc, dm_new_crtc_state, - dm_new_crtc_state->crc_src); - } + crtc, dm_new_crtc_state, cur_crc_src); #endif } } @@ -8662,6 +9017,7 @@ static void get_freesync_config_for_crtc( to_amdgpu_dm_connector(new_con_state->base.connector); struct drm_display_mode *mode = &new_crtc_state->base.mode; int vrefresh = drm_mode_vrefresh(mode); + bool fs_vid_mode = false; new_crtc_state->vrr_supported = new_con_state->freesync_capable && vrefresh >= aconnector->min_vfreq && @@ -8669,17 +9025,24 @@ static void get_freesync_config_for_crtc( if (new_crtc_state->vrr_supported) { new_crtc_state->stream->ignore_msa_timing_param = true; - config.state = new_crtc_state->base.vrr_enabled ? - VRR_STATE_ACTIVE_VARIABLE : - VRR_STATE_INACTIVE; - config.min_refresh_in_uhz = - aconnector->min_vfreq * 1000000; - config.max_refresh_in_uhz = - aconnector->max_vfreq * 1000000; + fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED; + + config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000; + config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000; config.vsif_supported = true; config.btr = true; - } + if (fs_vid_mode) { + config.state = VRR_STATE_ACTIVE_FIXED; + config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz; + goto out; + } else if (new_crtc_state->base.vrr_enabled) { + config.state = VRR_STATE_ACTIVE_VARIABLE; + } else { + config.state = VRR_STATE_INACTIVE; + } + } +out: new_crtc_state->freesync_config = config; } @@ -8692,6 +9055,50 @@ static void reset_freesync_config_for_crtc( sizeof(new_crtc_state->vrr_infopacket)); } +static bool +is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state, + struct drm_crtc_state *new_crtc_state) +{ + struct drm_display_mode old_mode, new_mode; + + if (!old_crtc_state || !new_crtc_state) + return false; + + old_mode = old_crtc_state->mode; + new_mode = new_crtc_state->mode; + + if (old_mode.clock == new_mode.clock && + old_mode.hdisplay == new_mode.hdisplay && + old_mode.vdisplay == new_mode.vdisplay && + old_mode.htotal == new_mode.htotal && + old_mode.vtotal != new_mode.vtotal && + old_mode.hsync_start == new_mode.hsync_start && + old_mode.vsync_start != new_mode.vsync_start && + old_mode.hsync_end == new_mode.hsync_end && + old_mode.vsync_end != new_mode.vsync_end && + old_mode.hskew == new_mode.hskew && + old_mode.vscan == new_mode.vscan && + (old_mode.vsync_end - old_mode.vsync_start) == + (new_mode.vsync_end - new_mode.vsync_start)) + return true; + + return false; +} + +static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) { + uint64_t num, den, res; + struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base; + + dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED; + + num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000; + den = (unsigned long long)new_crtc_state->mode.htotal * + (unsigned long long)new_crtc_state->mode.vtotal; + + res = div_u64(num, den); + dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res; +} + static int dm_update_crtc_state(struct amdgpu_display_manager *dm, struct drm_atomic_state *state, struct drm_crtc *crtc, @@ -8782,6 +9189,11 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm, * TODO: Refactor this function to allow this check to work * in all conditions. */ + if (amdgpu_freesync_vid_mode && + dm_new_crtc_state->stream && + is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state)) + goto skip_modeset; + if (dm_new_crtc_state->stream && dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) && dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) { @@ -8813,6 +9225,24 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm, if (!dm_old_crtc_state->stream) goto skip_modeset; + if (amdgpu_freesync_vid_mode && dm_new_crtc_state->stream && + is_timing_unchanged_for_freesync(new_crtc_state, + old_crtc_state)) { + new_crtc_state->mode_changed = false; + DRM_DEBUG_DRIVER( + "Mode change not required for front porch change, " + "setting mode_changed to %d", + new_crtc_state->mode_changed); + + set_freesync_fixed_config(dm_new_crtc_state); + + goto skip_modeset; + } else if (amdgpu_freesync_vid_mode && aconnector && + is_freesync_video_mode(&new_crtc_state->mode, + aconnector)) { + set_freesync_fixed_config(dm_new_crtc_state); + } + ret = dm_atomic_get_state(state, &dm_state); if (ret) goto fail; @@ -9390,7 +9820,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, } #if defined(CONFIG_DRM_AMD_DC_DCN) - if (adev->asic_type >= CHIP_NAVI10) { + if (dc_resource_is_dsc_encoding_supported(dc)) { for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { if (drm_atomic_crtc_needs_modeset(new_crtc_state)) { ret = add_affected_mst_dsc_crtcs(state, crtc); @@ -9696,11 +10126,85 @@ static bool is_dp_capable_without_timing_msa(struct dc *dc, return capable; } + +static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector, + uint8_t *edid_ext, int len, + struct amdgpu_hdmi_vsdb_info *vsdb_info) +{ + int i; + struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev); + struct dc *dc = adev->dm.dc; + + /* send extension block to DMCU for parsing */ + for (i = 0; i < len; i += 8) { + bool res; + int offset; + + /* send 8 bytes a time */ + if (!dc_edid_parser_send_cea(dc, i, len, &edid_ext[i], 8)) + return false; + + if (i+8 == len) { + /* EDID block sent completed, expect result */ + int version, min_rate, max_rate; + + res = dc_edid_parser_recv_amd_vsdb(dc, &version, &min_rate, &max_rate); + if (res) { + /* amd vsdb found */ + vsdb_info->freesync_supported = 1; + vsdb_info->amd_vsdb_version = version; + vsdb_info->min_refresh_rate_hz = min_rate; + vsdb_info->max_refresh_rate_hz = max_rate; + return true; + } + /* not amd vsdb */ + return false; + } + + /* check for ack*/ + res = dc_edid_parser_recv_cea_ack(dc, &offset); + if (!res) + return false; + } + + return false; +} + +static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector, + struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info) +{ + uint8_t *edid_ext = NULL; + int i; + bool valid_vsdb_found = false; + + /*----- drm_find_cea_extension() -----*/ + /* No EDID or EDID extensions */ + if (edid == NULL || edid->extensions == 0) + return -ENODEV; + + /* Find CEA extension */ + for (i = 0; i < edid->extensions; i++) { + edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1); + if (edid_ext[0] == CEA_EXT) + break; + } + + if (i == edid->extensions) + return -ENODEV; + + /*----- cea_db_offsets() -----*/ + if (edid_ext[0] != CEA_EXT) + return -ENODEV; + + valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info); + + return valid_vsdb_found ? i : -ENODEV; +} + void amdgpu_dm_update_freesync_caps(struct drm_connector *connector, struct edid *edid) { - int i; - bool edid_check_required; + int i = 0; struct detailed_timing *timing; struct detailed_non_pixel *data; struct detailed_data_monitor_range *range; @@ -9711,6 +10215,7 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector, struct drm_device *dev = connector->dev; struct amdgpu_device *adev = drm_to_adev(dev); bool freesync_capable = false; + struct amdgpu_hdmi_vsdb_info vsdb_info = {0}; if (!connector->state) { DRM_ERROR("%s - Connector has no state", __func__); @@ -9729,60 +10234,75 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector, dm_con_state = to_dm_connector_state(connector->state); - edid_check_required = false; if (!amdgpu_dm_connector->dc_sink) { DRM_ERROR("dc_sink NULL, could not add free_sync module.\n"); goto update; } if (!adev->dm.freesync_module) goto update; - /* - * if edid non zero restrict freesync only for dp and edp - */ - if (edid) { - if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT - || amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) { + + + if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT + || amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) { + bool edid_check_required = false; + + if (edid) { edid_check_required = is_dp_capable_without_timing_msa( adev->dm.dc, amdgpu_dm_connector); } - } - if (edid_check_required == true && (edid->version > 1 || - (edid->version == 1 && edid->revision > 1))) { - for (i = 0; i < 4; i++) { - timing = &edid->detailed_timings[i]; - data = &timing->data.other_data; - range = &data->data.range; - /* - * Check if monitor has continuous frequency mode - */ - if (data->type != EDID_DETAIL_MONITOR_RANGE) - continue; - /* - * Check for flag range limits only. If flag == 1 then - * no additional timing information provided. - * Default GTF, GTF Secondary curve and CVT are not - * supported - */ - if (range->flags != 1) - continue; + if (edid_check_required == true && (edid->version > 1 || + (edid->version == 1 && edid->revision > 1))) { + for (i = 0; i < 4; i++) { - amdgpu_dm_connector->min_vfreq = range->min_vfreq; - amdgpu_dm_connector->max_vfreq = range->max_vfreq; - amdgpu_dm_connector->pixel_clock_mhz = - range->pixel_clock_mhz * 10; + timing = &edid->detailed_timings[i]; + data = &timing->data.other_data; + range = &data->data.range; + /* + * Check if monitor has continuous frequency mode + */ + if (data->type != EDID_DETAIL_MONITOR_RANGE) + continue; + /* + * Check for flag range limits only. If flag == 1 then + * no additional timing information provided. + * Default GTF, GTF Secondary curve and CVT are not + * supported + */ + if (range->flags != 1) + continue; - connector->display_info.monitor_range.min_vfreq = range->min_vfreq; - connector->display_info.monitor_range.max_vfreq = range->max_vfreq; + amdgpu_dm_connector->min_vfreq = range->min_vfreq; + amdgpu_dm_connector->max_vfreq = range->max_vfreq; + amdgpu_dm_connector->pixel_clock_mhz = + range->pixel_clock_mhz * 10; - break; - } + connector->display_info.monitor_range.min_vfreq = range->min_vfreq; + connector->display_info.monitor_range.max_vfreq = range->max_vfreq; - if (amdgpu_dm_connector->max_vfreq - - amdgpu_dm_connector->min_vfreq > 10) { + break; + } - freesync_capable = true; + if (amdgpu_dm_connector->max_vfreq - + amdgpu_dm_connector->min_vfreq > 10) { + + freesync_capable = true; + } + } + } else if (edid && amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) { + i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info); + if (i >= 0 && vsdb_info.freesync_supported) { + timing = &edid->detailed_timings[i]; + data = &timing->data.other_data; + + amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz; + amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz; + if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10) + freesync_capable = true; + + connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz; + connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz; } } diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h index 8bfe901cf237..8f98d44490aa 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h @@ -69,18 +69,6 @@ struct common_irq_params { }; /** - * struct irq_list_head - Linked-list for low context IRQ handlers. - * - * @head: The list_head within &struct handler_data - * @work: A work_struct containing the deferred handler work - */ -struct irq_list_head { - struct list_head head; - /* In case this interrupt needs post-processing, 'work' will be queued*/ - struct work_struct work; -}; - -/** * struct dm_compressor_info - Buffer info used by frame buffer compression * @cpu_addr: MMIO cpu addr * @bo_ptr: Pointer to the buffer object @@ -145,6 +133,16 @@ struct amdgpu_dm_backlight_caps { }; /** + * struct dal_allocation - Tracks mapped FB memory for SMU communication + */ +struct dal_allocation { + struct list_head list; + struct amdgpu_bo *bo; + void *cpu_ptr; + u64 gpu_addr; +}; + +/** * struct amdgpu_display_manager - Central amdgpu display manager device * * @dc: Display Core control structure @@ -257,12 +255,12 @@ struct amdgpu_display_manager { */ struct mutex audio_lock; +#if defined(CONFIG_DRM_AMD_DC_DCN) /** - * @vblank_work_lock: + * @vblank_lock: * * Guards access to deferred vblank work state. */ -#if defined(CONFIG_DRM_AMD_DC_DCN) spinlock_t vblank_lock; #endif @@ -293,7 +291,7 @@ struct amdgpu_display_manager { * Note that handlers are called in the same order as they were * registered (FIFO). */ - struct irq_list_head irq_handler_list_low_tab[DAL_IRQ_SOURCES_NUMBER]; + struct list_head irq_handler_list_low_tab[DAL_IRQ_SOURCES_NUMBER]; /** * @irq_handler_list_high_tab: @@ -324,6 +322,15 @@ struct amdgpu_display_manager { vblank_params[DC_IRQ_SOURCE_VBLANK6 - DC_IRQ_SOURCE_VBLANK1 + 1]; /** + * @vline0_params: + * + * OTG vertical interrupt0 IRQ parameters, passed to registered + * handlers when triggered. + */ + struct common_irq_params + vline0_params[DC_IRQ_SOURCE_DC6_VLINE0 - DC_IRQ_SOURCE_DC1_VLINE0 + 1]; + + /** * @vupdate_params: * * Vertical update IRQ parameters, passed to registered handlers when @@ -345,6 +352,11 @@ struct amdgpu_display_manager { #endif #if defined(CONFIG_DRM_AMD_DC_DCN) + /** + * @vblank_workqueue: + * + * amdgpu workqueue during vblank + */ struct vblank_workqueue *vblank_workqueue; #endif @@ -363,12 +375,18 @@ struct amdgpu_display_manager { */ const struct gpu_info_soc_bounding_box_v1_0 *soc_bounding_box; +#if defined(CONFIG_DRM_AMD_DC_DCN) /** * @active_vblank_irq_count: * * number of currently active vblank irqs */ uint32_t active_vblank_irq_count; +#endif + +#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) + struct crc_rd_work *crc_rd_wrk; +#endif /** * @mst_encoders: @@ -377,6 +395,13 @@ struct amdgpu_display_manager { */ struct amdgpu_encoder mst_encoders[AMDGPU_DM_MAX_CRTC]; bool force_timing_sync; + bool dmcub_trace_event_en; + /** + * @da_list: + * + * DAL fb memory allocation list, for communication with SMU. + */ + struct list_head da_list; }; enum dsc_clock_force_state { @@ -440,6 +465,8 @@ struct amdgpu_dm_connector { #endif bool force_yuv420_output; struct dsc_preferred_settings dsc_settings; + /* Cached display modes */ + struct drm_display_mode freesync_vid_base; }; #define to_amdgpu_dm_connector(x) container_of(x, struct amdgpu_dm_connector, base) @@ -462,7 +489,6 @@ struct dm_crtc_state { int active_planes; int crc_skip_count; - enum amdgpu_dm_pipe_crc_source crc_src; bool freesync_timing_changed; bool freesync_vrr_info_changed; @@ -501,6 +527,14 @@ struct dm_connector_state { uint64_t pbn; }; +struct amdgpu_hdmi_vsdb_info { + unsigned int amd_vsdb_version; /* VSDB version, should be used to determine which VSIF to send */ + bool freesync_supported; /* FreeSync Supported */ + unsigned int min_refresh_rate_hz; /* FreeSync Minimum Refresh Rate in Hz */ + unsigned int max_refresh_rate_hz; /* FreeSync Maximum Refresh Rate in Hz */ +}; + + #define to_dm_connector_state(x)\ container_of((x), struct dm_connector_state, base) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c index 66cb8730586b..c6d6baab106e 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c @@ -29,6 +29,7 @@ #include "amdgpu.h" #include "amdgpu_dm.h" #include "dc.h" +#include "amdgpu_securedisplay.h" static const char *const pipe_crc_sources[] = { "none", @@ -81,6 +82,73 @@ const char *const *amdgpu_dm_crtc_get_crc_sources(struct drm_crtc *crtc, return pipe_crc_sources; } +#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY +static void amdgpu_dm_set_crc_window_default(struct drm_crtc *crtc) +{ + struct drm_device *drm_dev = crtc->dev; + struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); + + spin_lock_irq(&drm_dev->event_lock); + acrtc->dm_irq_params.crc_window.x_start = 0; + acrtc->dm_irq_params.crc_window.y_start = 0; + acrtc->dm_irq_params.crc_window.x_end = 0; + acrtc->dm_irq_params.crc_window.y_end = 0; + acrtc->dm_irq_params.crc_window.activated = false; + acrtc->dm_irq_params.crc_window.update_win = false; + acrtc->dm_irq_params.crc_window.skip_frame_cnt = 0; + spin_unlock_irq(&drm_dev->event_lock); +} + +static void amdgpu_dm_crtc_notify_ta_to_read(struct work_struct *work) +{ + struct crc_rd_work *crc_rd_wrk; + struct amdgpu_device *adev; + struct psp_context *psp; + struct securedisplay_cmd *securedisplay_cmd; + struct drm_crtc *crtc; + uint8_t phy_id; + int ret; + + crc_rd_wrk = container_of(work, struct crc_rd_work, notify_ta_work); + spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock); + crtc = crc_rd_wrk->crtc; + + if (!crtc) { + spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock); + return; + } + + adev = drm_to_adev(crtc->dev); + psp = &adev->psp; + phy_id = crc_rd_wrk->phy_inst; + spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock); + + psp_prep_securedisplay_cmd_buf(psp, &securedisplay_cmd, + TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC); + securedisplay_cmd->securedisplay_in_message.send_roi_crc.phy_id = + phy_id; + ret = psp_securedisplay_invoke(psp, TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC); + if (!ret) { + if (securedisplay_cmd->status != TA_SECUREDISPLAY_STATUS__SUCCESS) { + psp_securedisplay_parse_resp_status(psp, securedisplay_cmd->status); + } + } +} + +bool amdgpu_dm_crc_window_is_activated(struct drm_crtc *crtc) +{ + struct drm_device *drm_dev = crtc->dev; + struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); + bool ret = false; + + spin_lock_irq(&drm_dev->event_lock); + ret = acrtc->dm_irq_params.crc_window.activated; + spin_unlock_irq(&drm_dev->event_lock); + + return ret; +} +#endif + int amdgpu_dm_crtc_verify_crc_source(struct drm_crtc *crtc, const char *src_name, size_t *values_cnt) @@ -114,6 +182,20 @@ int amdgpu_dm_crtc_configure_crc_source(struct drm_crtc *crtc, /* Enable CRTC CRC generation if necessary. */ if (dm_is_crc_source_crtc(source) || source == AMDGPU_DM_PIPE_CRC_SOURCE_NONE) { +#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) + if (!enable) { + if (adev->dm.crc_rd_wrk) { + flush_work(&adev->dm.crc_rd_wrk->notify_ta_work); + spin_lock_irq(&adev->dm.crc_rd_wrk->crc_rd_work_lock); + if (adev->dm.crc_rd_wrk->crtc == crtc) { + dc_stream_stop_dmcu_crc_win_update(stream_state->ctx->dc, + dm_crtc_state->stream); + adev->dm.crc_rd_wrk->crtc = NULL; + } + spin_unlock_irq(&adev->dm.crc_rd_wrk->crc_rd_work_lock); + } + } +#endif if (!dc_stream_configure_crc(stream_state->ctx->dc, stream_state, NULL, enable, enable)) { ret = -EINVAL; @@ -142,8 +224,11 @@ unlock: int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name) { enum amdgpu_dm_pipe_crc_source source = dm_parse_crc_source(src_name); + enum amdgpu_dm_pipe_crc_source cur_crc_src; struct drm_crtc_commit *commit; struct dm_crtc_state *crtc_state; + struct drm_device *drm_dev = crtc->dev; + struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); struct drm_dp_aux *aux = NULL; bool enable = false; bool enabled = false; @@ -182,6 +267,9 @@ int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name) enable = amdgpu_dm_is_valid_crc_source(source); crtc_state = to_dm_crtc_state(crtc->state); + spin_lock_irq(&drm_dev->event_lock); + cur_crc_src = acrtc->dm_irq_params.crc_src; + spin_unlock_irq(&drm_dev->event_lock); /* * USER REQ SRC | CURRENT SRC | BEHAVIOR @@ -198,7 +286,7 @@ int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name) */ if (dm_is_crc_source_dprx(source) || (source == AMDGPU_DM_PIPE_CRC_SOURCE_NONE && - dm_is_crc_source_dprx(crtc_state->crc_src))) { + dm_is_crc_source_dprx(cur_crc_src))) { struct amdgpu_dm_connector *aconn = NULL; struct drm_connector *connector; struct drm_connector_list_iter conn_iter; @@ -228,6 +316,10 @@ int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name) } } +#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) + amdgpu_dm_set_crc_window_default(crtc); +#endif + if (amdgpu_dm_crtc_configure_crc_source(crtc, crtc_state, source)) { ret = -EINVAL; goto cleanup; @@ -237,7 +329,7 @@ int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name) * Reading the CRC requires the vblank interrupt handler to be * enabled. Keep a reference until CRC capture stops. */ - enabled = amdgpu_dm_is_valid_crc_source(crtc_state->crc_src); + enabled = amdgpu_dm_is_valid_crc_source(cur_crc_src); if (!enabled && enable) { ret = drm_crtc_vblank_get(crtc); if (ret) @@ -261,7 +353,9 @@ int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name) } } - crtc_state->crc_src = source; + spin_lock_irq(&drm_dev->event_lock); + acrtc->dm_irq_params.crc_src = source; + spin_unlock_irq(&drm_dev->event_lock); /* Reset crc_skipped on dm state */ crtc_state->crc_skip_count = 0; @@ -286,16 +380,26 @@ void amdgpu_dm_crtc_handle_crc_irq(struct drm_crtc *crtc) { struct dm_crtc_state *crtc_state; struct dc_stream_state *stream_state; + struct drm_device *drm_dev = NULL; + enum amdgpu_dm_pipe_crc_source cur_crc_src; + struct amdgpu_crtc *acrtc = NULL; uint32_t crcs[3]; + unsigned long flags; if (crtc == NULL) return; crtc_state = to_dm_crtc_state(crtc->state); stream_state = crtc_state->stream; + acrtc = to_amdgpu_crtc(crtc); + drm_dev = crtc->dev; + + spin_lock_irqsave(&drm_dev->event_lock, flags); + cur_crc_src = acrtc->dm_irq_params.crc_src; + spin_unlock_irqrestore(&drm_dev->event_lock, flags); /* Early return if CRC capture is not enabled. */ - if (!amdgpu_dm_is_valid_crc_source(crtc_state->crc_src)) + if (!amdgpu_dm_is_valid_crc_source(cur_crc_src)) return; /* @@ -309,7 +413,7 @@ void amdgpu_dm_crtc_handle_crc_irq(struct drm_crtc *crtc) return; } - if (dm_is_crc_source_crtc(crtc_state->crc_src)) { + if (dm_is_crc_source_crtc(cur_crc_src)) { if (!dc_stream_get_crc(stream_state->ctx->dc, stream_state, &crcs[0], &crcs[1], &crcs[2])) return; @@ -318,3 +422,182 @@ void amdgpu_dm_crtc_handle_crc_irq(struct drm_crtc *crtc) drm_crtc_accurate_vblank_count(crtc), crcs); } } + +#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) +void amdgpu_dm_crtc_handle_crc_window_irq(struct drm_crtc *crtc) +{ + struct dc_stream_state *stream_state; + struct drm_device *drm_dev = NULL; + enum amdgpu_dm_pipe_crc_source cur_crc_src; + struct amdgpu_crtc *acrtc = NULL; + struct amdgpu_device *adev = NULL; + struct crc_rd_work *crc_rd_wrk = NULL; + struct crc_params *crc_window = NULL, tmp_window; + unsigned long flags1, flags2; + struct crtc_position position; + uint32_t v_blank; + uint32_t v_back_porch; + uint32_t crc_window_latch_up_line; + struct dc_crtc_timing *timing_out; + + if (crtc == NULL) + return; + + acrtc = to_amdgpu_crtc(crtc); + adev = drm_to_adev(crtc->dev); + drm_dev = crtc->dev; + + spin_lock_irqsave(&drm_dev->event_lock, flags1); + stream_state = acrtc->dm_irq_params.stream; + cur_crc_src = acrtc->dm_irq_params.crc_src; + timing_out = &stream_state->timing; + + /* Early return if CRC capture is not enabled. */ + if (!amdgpu_dm_is_valid_crc_source(cur_crc_src)) + goto cleanup; + + if (dm_is_crc_source_crtc(cur_crc_src)) { + if (acrtc->dm_irq_params.crc_window.activated) { + if (acrtc->dm_irq_params.crc_window.update_win) { + if (acrtc->dm_irq_params.crc_window.skip_frame_cnt) { + acrtc->dm_irq_params.crc_window.skip_frame_cnt -= 1; + goto cleanup; + } + crc_window = &tmp_window; + + tmp_window.windowa_x_start = + acrtc->dm_irq_params.crc_window.x_start; + tmp_window.windowa_y_start = + acrtc->dm_irq_params.crc_window.y_start; + tmp_window.windowa_x_end = + acrtc->dm_irq_params.crc_window.x_end; + tmp_window.windowa_y_end = + acrtc->dm_irq_params.crc_window.y_end; + tmp_window.windowb_x_start = + acrtc->dm_irq_params.crc_window.x_start; + tmp_window.windowb_y_start = + acrtc->dm_irq_params.crc_window.y_start; + tmp_window.windowb_x_end = + acrtc->dm_irq_params.crc_window.x_end; + tmp_window.windowb_y_end = + acrtc->dm_irq_params.crc_window.y_end; + + dc_stream_forward_dmcu_crc_window(stream_state->ctx->dc, + stream_state, crc_window); + + acrtc->dm_irq_params.crc_window.update_win = false; + + dc_stream_get_crtc_position(stream_state->ctx->dc, &stream_state, 1, + &position.vertical_count, + &position.nominal_vcount); + + v_blank = timing_out->v_total - timing_out->v_border_top - + timing_out->v_addressable - timing_out->v_border_bottom; + + v_back_porch = v_blank - timing_out->v_front_porch - + timing_out->v_sync_width; + + crc_window_latch_up_line = v_back_porch + timing_out->v_sync_width; + + /* take 3 lines margin*/ + if ((position.vertical_count + 3) >= crc_window_latch_up_line) + acrtc->dm_irq_params.crc_window.skip_frame_cnt = 1; + else + acrtc->dm_irq_params.crc_window.skip_frame_cnt = 0; + } else { + if (acrtc->dm_irq_params.crc_window.skip_frame_cnt == 0) { + if (adev->dm.crc_rd_wrk) { + crc_rd_wrk = adev->dm.crc_rd_wrk; + spin_lock_irqsave(&crc_rd_wrk->crc_rd_work_lock, flags2); + crc_rd_wrk->phy_inst = + stream_state->link->link_enc_hw_inst; + spin_unlock_irqrestore(&crc_rd_wrk->crc_rd_work_lock, flags2); + schedule_work(&crc_rd_wrk->notify_ta_work); + } + } else { + acrtc->dm_irq_params.crc_window.skip_frame_cnt -= 1; + } + } + } + } + +cleanup: + spin_unlock_irqrestore(&drm_dev->event_lock, flags1); +} + +void amdgpu_dm_crtc_secure_display_resume(struct amdgpu_device *adev) +{ + struct drm_crtc *crtc; + enum amdgpu_dm_pipe_crc_source cur_crc_src; + struct crc_rd_work *crc_rd_wrk = adev->dm.crc_rd_wrk; + struct crc_window_parm cur_crc_window; + struct amdgpu_crtc *acrtc = NULL; + + drm_for_each_crtc(crtc, &adev->ddev) { + acrtc = to_amdgpu_crtc(crtc); + + spin_lock_irq(&adev_to_drm(adev)->event_lock); + cur_crc_src = acrtc->dm_irq_params.crc_src; + cur_crc_window = acrtc->dm_irq_params.crc_window; + spin_unlock_irq(&adev_to_drm(adev)->event_lock); + + if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) { + amdgpu_dm_crtc_set_crc_source(crtc, + pipe_crc_sources[cur_crc_src]); + spin_lock_irq(&adev_to_drm(adev)->event_lock); + acrtc->dm_irq_params.crc_window = cur_crc_window; + if (acrtc->dm_irq_params.crc_window.activated) { + acrtc->dm_irq_params.crc_window.update_win = true; + acrtc->dm_irq_params.crc_window.skip_frame_cnt = 1; + spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock); + crc_rd_wrk->crtc = crtc; + spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock); + } + spin_unlock_irq(&adev_to_drm(adev)->event_lock); + } + } +} + +void amdgpu_dm_crtc_secure_display_suspend(struct amdgpu_device *adev) +{ + struct drm_crtc *crtc; + struct crc_window_parm cur_crc_window; + enum amdgpu_dm_pipe_crc_source cur_crc_src; + struct amdgpu_crtc *acrtc = NULL; + + drm_for_each_crtc(crtc, &adev->ddev) { + acrtc = to_amdgpu_crtc(crtc); + + spin_lock_irq(&adev_to_drm(adev)->event_lock); + cur_crc_src = acrtc->dm_irq_params.crc_src; + cur_crc_window = acrtc->dm_irq_params.crc_window; + cur_crc_window.update_win = false; + spin_unlock_irq(&adev_to_drm(adev)->event_lock); + + if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) { + amdgpu_dm_crtc_set_crc_source(crtc, NULL); + spin_lock_irq(&adev_to_drm(adev)->event_lock); + /* For resume to set back crc source*/ + acrtc->dm_irq_params.crc_src = cur_crc_src; + acrtc->dm_irq_params.crc_window = cur_crc_window; + spin_unlock_irq(&adev_to_drm(adev)->event_lock); + } + } + +} + +struct crc_rd_work *amdgpu_dm_crtc_secure_display_create_work(void) +{ + struct crc_rd_work *crc_rd_wrk = NULL; + + crc_rd_wrk = kzalloc(sizeof(*crc_rd_wrk), GFP_KERNEL); + + if (!crc_rd_wrk) + return NULL; + + spin_lock_init(&crc_rd_wrk->crc_rd_work_lock); + INIT_WORK(&crc_rd_wrk->notify_ta_work, amdgpu_dm_crtc_notify_ta_to_read); + + return crc_rd_wrk; +} +#endif diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h index f7d731797d3f..737e701fb0f0 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h @@ -39,6 +39,29 @@ enum amdgpu_dm_pipe_crc_source { AMDGPU_DM_PIPE_CRC_SOURCE_INVALID = -1, }; +#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY +struct crc_window_parm { + uint16_t x_start; + uint16_t y_start; + uint16_t x_end; + uint16_t y_end; + /* CRC windwo is activated or not*/ + bool activated; + /* Update crc window during vertical blank or not */ + bool update_win; + /* skip reading/writing for few frames */ + int skip_frame_cnt; +}; + +struct crc_rd_work { + struct work_struct notify_ta_work; + /* To protect crc_rd_work carried fields*/ + spinlock_t crc_rd_work_lock; + struct drm_crtc *crtc; + uint8_t phy_inst; +}; +#endif + static inline bool amdgpu_dm_is_valid_crc_source(enum amdgpu_dm_pipe_crc_source source) { return (source > AMDGPU_DM_PIPE_CRC_SOURCE_NONE) && @@ -64,4 +87,18 @@ void amdgpu_dm_crtc_handle_crc_irq(struct drm_crtc *crtc); #define amdgpu_dm_crtc_handle_crc_irq(x) #endif +#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY +bool amdgpu_dm_crc_window_is_activated(struct drm_crtc *crtc); +void amdgpu_dm_crtc_handle_crc_window_irq(struct drm_crtc *crtc); +struct crc_rd_work *amdgpu_dm_crtc_secure_display_create_work(void); +void amdgpu_dm_crtc_secure_display_resume(struct amdgpu_device *adev); +void amdgpu_dm_crtc_secure_display_suspend(struct amdgpu_device *adev); +#else +#define amdgpu_dm_crc_window_is_activated(x) +#define amdgpu_dm_crtc_handle_crc_window_irq(x) +#define amdgpu_dm_crtc_secure_display_create_work() +#define amdgpu_dm_crtc_secure_display_resume(x) +#define amdgpu_dm_crtc_secure_display_suspend(x) +#endif + #endif /* AMD_DAL_DEV_AMDGPU_DM_AMDGPU_DM_CRC_H_ */ diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c index 360952129b6d..927de7678a4f 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c @@ -25,8 +25,6 @@ #include <linux/uaccess.h> -#include <drm/drm_debugfs.h> - #include "dc.h" #include "amdgpu.h" #include "amdgpu_dm.h" @@ -36,6 +34,7 @@ #include "resource.h" #include "dsc.h" #include "dc_link_dp.h" +#include "dc/dc_dmub_srv.h" struct dmub_debugfs_trace_header { uint32_t entry_count; @@ -2154,6 +2153,149 @@ static ssize_t dp_dsc_slice_bpg_offset_read(struct file *f, char __user *buf, return result; } + +/* + * function description: Read max_requested_bpc property from the connector + * + * Access it with the following command: + * + * cat /sys/kernel/debug/dri/0/DP-X/max_bpc + * + */ +static ssize_t dp_max_bpc_read(struct file *f, char __user *buf, + size_t size, loff_t *pos) +{ + struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private; + struct drm_connector *connector = &aconnector->base; + struct drm_device *dev = connector->dev; + struct dm_connector_state *state; + ssize_t result = 0; + char *rd_buf = NULL; + char *rd_buf_ptr = NULL; + const uint32_t rd_buf_size = 10; + int r; + + rd_buf = kcalloc(rd_buf_size, sizeof(char), GFP_KERNEL); + + if (!rd_buf) + return -ENOMEM; + + mutex_lock(&dev->mode_config.mutex); + drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); + + if (connector->state == NULL) + goto unlock; + + state = to_dm_connector_state(connector->state); + + rd_buf_ptr = rd_buf; + snprintf(rd_buf_ptr, rd_buf_size, + "%u\n", + state->base.max_requested_bpc); + + while (size) { + if (*pos >= rd_buf_size) + break; + + r = put_user(*(rd_buf + result), buf); + if (r) { + result = r; /* r = -EFAULT */ + goto unlock; + } + buf += 1; + size -= 1; + *pos += 1; + result += 1; + } +unlock: + drm_modeset_unlock(&dev->mode_config.connection_mutex); + mutex_unlock(&dev->mode_config.mutex); + kfree(rd_buf); + return result; +} + + +/* + * function description: Set max_requested_bpc property on the connector + * + * This function will not force the input BPC on connector, it will only + * change the max value. This is equivalent to setting max_bpc through + * xrandr. + * + * The BPC value written must be >= 6 and <= 16. Values outside of this + * range will result in errors. + * + * BPC values: + * 0x6 - 6 BPC + * 0x8 - 8 BPC + * 0xa - 10 BPC + * 0xc - 12 BPC + * 0x10 - 16 BPC + * + * Write the max_bpc in the following way: + * + * echo 0x6 > /sys/kernel/debug/dri/0/DP-X/max_bpc + * + */ +static ssize_t dp_max_bpc_write(struct file *f, const char __user *buf, + size_t size, loff_t *pos) +{ + struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private; + struct drm_connector *connector = &aconnector->base; + struct dm_connector_state *state; + struct drm_device *dev = connector->dev; + char *wr_buf = NULL; + uint32_t wr_buf_size = 42; + int max_param_num = 1; + long param[1] = {0}; + uint8_t param_nums = 0; + + if (size == 0) + return -EINVAL; + + wr_buf = kcalloc(wr_buf_size, sizeof(char), GFP_KERNEL); + + if (!wr_buf) { + DRM_DEBUG_DRIVER("no memory to allocate write buffer\n"); + return -ENOSPC; + } + + if (parse_write_buffer_into_params(wr_buf, size, + (long *)param, buf, + max_param_num, + ¶m_nums)) { + kfree(wr_buf); + return -EINVAL; + } + + if (param_nums <= 0) { + DRM_DEBUG_DRIVER("user data not be read\n"); + kfree(wr_buf); + return -EINVAL; + } + + if (param[0] < 6 || param[0] > 16) { + DRM_DEBUG_DRIVER("bad max_bpc value\n"); + kfree(wr_buf); + return -EINVAL; + } + + mutex_lock(&dev->mode_config.mutex); + drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); + + if (connector->state == NULL) + goto unlock; + + state = to_dm_connector_state(connector->state); + state->base.max_requested_bpc = param[0]; +unlock: + drm_modeset_unlock(&dev->mode_config.connection_mutex); + mutex_unlock(&dev->mode_config.mutex); + + kfree(wr_buf); + return size; +} + DEFINE_SHOW_ATTRIBUTE(dp_dsc_fec_support); DEFINE_SHOW_ATTRIBUTE(dmub_fw_state); DEFINE_SHOW_ATTRIBUTE(dmub_tracebuffer); @@ -2265,6 +2407,13 @@ static const struct file_operations dp_dpcd_data_debugfs_fops = { .llseek = default_llseek }; +static const struct file_operations dp_max_bpc_debugfs_fops = { + .owner = THIS_MODULE, + .read = dp_max_bpc_read, + .write = dp_max_bpc_write, + .llseek = default_llseek +}; + static const struct { char *name; const struct file_operations *fops; @@ -2287,7 +2436,8 @@ static const struct { {"dsc_pic_height", &dp_dsc_pic_height_debugfs_fops}, {"dsc_chunk_size", &dp_dsc_chunk_size_debugfs_fops}, {"dsc_slice_bpg", &dp_dsc_slice_bpg_offset_debugfs_fops}, - {"dp_dsc_fec_support", &dp_dsc_fec_support_fops} + {"dp_dsc_fec_support", &dp_dsc_fec_support_fops}, + {"max_bpc", &dp_max_bpc_debugfs_fops} }; #ifdef CONFIG_DRM_AMD_DC_HDCP @@ -2341,9 +2491,51 @@ static int psr_get(void *data, u64 *val) return 0; } +/* + * Set dmcub trace event IRQ enable or disable. + * Usage to enable dmcub trace event IRQ: echo 1 > /sys/kernel/debug/dri/0/amdgpu_dm_dmcub_trace_event_en + * Usage to disable dmcub trace event IRQ: echo 0 > /sys/kernel/debug/dri/0/amdgpu_dm_dmcub_trace_event_en + */ +static int dmcub_trace_event_state_set(void *data, u64 val) +{ + struct amdgpu_device *adev = data; + + if (val == 1 || val == 0) { + dc_dmub_trace_event_control(adev->dm.dc, val); + adev->dm.dmcub_trace_event_en = (bool)val; + } else + return 0; + + return 0; +} + +/* + * The interface doesn't need get function, so it will return the + * value of zero + * Usage: cat /sys/kernel/debug/dri/0/amdgpu_dm_dmcub_trace_event_en + */ +static int dmcub_trace_event_state_get(void *data, u64 *val) +{ + struct amdgpu_device *adev = data; + + *val = adev->dm.dmcub_trace_event_en; + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(dmcub_trace_event_state_fops, dmcub_trace_event_state_get, + dmcub_trace_event_state_set, "%llu\n"); DEFINE_DEBUGFS_ATTRIBUTE(psr_fops, psr_get, NULL, "%llu\n"); +static const struct { + char *name; + const struct file_operations *fops; +} connector_debugfs_entries[] = { + {"force_yuv420_output", &force_yuv420_output_fops}, + {"output_bpc", &output_bpc_fops}, + {"trigger_hotplug", &trigger_hotplug_debugfs_fops} +}; + void connector_debugfs_init(struct amdgpu_dm_connector *connector) { int i; @@ -2360,14 +2552,11 @@ void connector_debugfs_init(struct amdgpu_dm_connector *connector) if (connector->base.connector_type == DRM_MODE_CONNECTOR_eDP) debugfs_create_file_unsafe("psr_state", 0444, dir, connector, &psr_fops); - debugfs_create_file_unsafe("force_yuv420_output", 0644, dir, connector, - &force_yuv420_output_fops); - - debugfs_create_file("output_bpc", 0644, dir, connector, - &output_bpc_fops); - - debugfs_create_file("trigger_hotplug", 0644, dir, connector, - &trigger_hotplug_debugfs_fops); + for (i = 0; i < ARRAY_SIZE(connector_debugfs_entries); i++) { + debugfs_create_file(connector_debugfs_entries[i].name, + 0644, dir, connector, + connector_debugfs_entries[i].fops); + } connector->debugfs_dpcd_address = 0; connector->debugfs_dpcd_size = 0; @@ -2383,6 +2572,225 @@ void connector_debugfs_init(struct amdgpu_dm_connector *connector) #endif } +#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY +/* + * Set crc window coordinate x start + */ +static int crc_win_x_start_set(void *data, u64 val) +{ + struct drm_crtc *crtc = data; + struct drm_device *drm_dev = crtc->dev; + struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); + + spin_lock_irq(&drm_dev->event_lock); + acrtc->dm_irq_params.crc_window.x_start = (uint16_t) val; + acrtc->dm_irq_params.crc_window.update_win = false; + spin_unlock_irq(&drm_dev->event_lock); + + return 0; +} + +/* + * Get crc window coordinate x start + */ +static int crc_win_x_start_get(void *data, u64 *val) +{ + struct drm_crtc *crtc = data; + struct drm_device *drm_dev = crtc->dev; + struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); + + spin_lock_irq(&drm_dev->event_lock); + *val = acrtc->dm_irq_params.crc_window.x_start; + spin_unlock_irq(&drm_dev->event_lock); + + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(crc_win_x_start_fops, crc_win_x_start_get, + crc_win_x_start_set, "%llu\n"); + + +/* + * Set crc window coordinate y start + */ +static int crc_win_y_start_set(void *data, u64 val) +{ + struct drm_crtc *crtc = data; + struct drm_device *drm_dev = crtc->dev; + struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); + + spin_lock_irq(&drm_dev->event_lock); + acrtc->dm_irq_params.crc_window.y_start = (uint16_t) val; + acrtc->dm_irq_params.crc_window.update_win = false; + spin_unlock_irq(&drm_dev->event_lock); + + return 0; +} + +/* + * Get crc window coordinate y start + */ +static int crc_win_y_start_get(void *data, u64 *val) +{ + struct drm_crtc *crtc = data; + struct drm_device *drm_dev = crtc->dev; + struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); + + spin_lock_irq(&drm_dev->event_lock); + *val = acrtc->dm_irq_params.crc_window.y_start; + spin_unlock_irq(&drm_dev->event_lock); + + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(crc_win_y_start_fops, crc_win_y_start_get, + crc_win_y_start_set, "%llu\n"); + +/* + * Set crc window coordinate x end + */ +static int crc_win_x_end_set(void *data, u64 val) +{ + struct drm_crtc *crtc = data; + struct drm_device *drm_dev = crtc->dev; + struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); + + spin_lock_irq(&drm_dev->event_lock); + acrtc->dm_irq_params.crc_window.x_end = (uint16_t) val; + acrtc->dm_irq_params.crc_window.update_win = false; + spin_unlock_irq(&drm_dev->event_lock); + + return 0; +} + +/* + * Get crc window coordinate x end + */ +static int crc_win_x_end_get(void *data, u64 *val) +{ + struct drm_crtc *crtc = data; + struct drm_device *drm_dev = crtc->dev; + struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); + + spin_lock_irq(&drm_dev->event_lock); + *val = acrtc->dm_irq_params.crc_window.x_end; + spin_unlock_irq(&drm_dev->event_lock); + + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(crc_win_x_end_fops, crc_win_x_end_get, + crc_win_x_end_set, "%llu\n"); + +/* + * Set crc window coordinate y end + */ +static int crc_win_y_end_set(void *data, u64 val) +{ + struct drm_crtc *crtc = data; + struct drm_device *drm_dev = crtc->dev; + struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); + + spin_lock_irq(&drm_dev->event_lock); + acrtc->dm_irq_params.crc_window.y_end = (uint16_t) val; + acrtc->dm_irq_params.crc_window.update_win = false; + spin_unlock_irq(&drm_dev->event_lock); + + return 0; +} + +/* + * Get crc window coordinate y end + */ +static int crc_win_y_end_get(void *data, u64 *val) +{ + struct drm_crtc *crtc = data; + struct drm_device *drm_dev = crtc->dev; + struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); + + spin_lock_irq(&drm_dev->event_lock); + *val = acrtc->dm_irq_params.crc_window.y_end; + spin_unlock_irq(&drm_dev->event_lock); + + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(crc_win_y_end_fops, crc_win_y_end_get, + crc_win_y_end_set, "%llu\n"); +/* + * Trigger to commit crc window + */ +static int crc_win_update_set(void *data, u64 val) +{ + struct drm_crtc *new_crtc = data; + struct drm_crtc *old_crtc = NULL; + struct amdgpu_crtc *new_acrtc, *old_acrtc; + struct amdgpu_device *adev = drm_to_adev(new_crtc->dev); + struct crc_rd_work *crc_rd_wrk = adev->dm.crc_rd_wrk; + + if (val) { + spin_lock_irq(&adev_to_drm(adev)->event_lock); + spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock); + if (crc_rd_wrk && crc_rd_wrk->crtc) { + old_crtc = crc_rd_wrk->crtc; + old_acrtc = to_amdgpu_crtc(old_crtc); + } + new_acrtc = to_amdgpu_crtc(new_crtc); + + if (old_crtc && old_crtc != new_crtc) { + old_acrtc->dm_irq_params.crc_window.activated = false; + old_acrtc->dm_irq_params.crc_window.update_win = false; + old_acrtc->dm_irq_params.crc_window.skip_frame_cnt = 0; + + new_acrtc->dm_irq_params.crc_window.activated = true; + new_acrtc->dm_irq_params.crc_window.update_win = true; + new_acrtc->dm_irq_params.crc_window.skip_frame_cnt = 0; + crc_rd_wrk->crtc = new_crtc; + } else { + new_acrtc->dm_irq_params.crc_window.activated = true; + new_acrtc->dm_irq_params.crc_window.update_win = true; + new_acrtc->dm_irq_params.crc_window.skip_frame_cnt = 0; + crc_rd_wrk->crtc = new_crtc; + } + spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock); + spin_unlock_irq(&adev_to_drm(adev)->event_lock); + } + + return 0; +} + +/* + * Get crc window update flag + */ +static int crc_win_update_get(void *data, u64 *val) +{ + *val = 0; + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(crc_win_update_fops, crc_win_update_get, + crc_win_update_set, "%llu\n"); + +void crtc_debugfs_init(struct drm_crtc *crtc) +{ + struct dentry *dir = debugfs_lookup("crc", crtc->debugfs_entry); + + if (!dir) + return; + + debugfs_create_file_unsafe("crc_win_x_start", 0644, dir, crtc, + &crc_win_x_start_fops); + debugfs_create_file_unsafe("crc_win_y_start", 0644, dir, crtc, + &crc_win_y_start_fops); + debugfs_create_file_unsafe("crc_win_x_end", 0644, dir, crtc, + &crc_win_x_end_fops); + debugfs_create_file_unsafe("crc_win_y_end", 0644, dir, crtc, + &crc_win_y_end_fops); + debugfs_create_file_unsafe("crc_win_update", 0644, dir, crtc, + &crc_win_update_fops); + +} +#endif /* * Writes DTN log state to the user supplied buffer. * Example usage: cat /sys/kernel/debug/dri/0/amdgpu_dm_dtn_log @@ -2450,11 +2858,9 @@ static ssize_t dtn_log_write( * As written to display, taking ABM and backlight lut into account. * Ranges from 0x0 to 0x10000 (= 100% PWM) */ -static int current_backlight_read(struct seq_file *m, void *data) +static int current_backlight_show(struct seq_file *m, void *unused) { - struct drm_info_node *node = (struct drm_info_node *)m->private; - struct drm_device *dev = node->minor->dev; - struct amdgpu_device *adev = drm_to_adev(dev); + struct amdgpu_device *adev = (struct amdgpu_device *)m->private; struct amdgpu_display_manager *dm = &adev->dm; unsigned int backlight = dc_link_get_backlight_level(dm->backlight_link); @@ -2468,11 +2874,9 @@ static int current_backlight_read(struct seq_file *m, void *data) * As written to display, taking ABM and backlight lut into account. * Ranges from 0x0 to 0x10000 (= 100% PWM) */ -static int target_backlight_read(struct seq_file *m, void *data) +static int target_backlight_show(struct seq_file *m, void *unused) { - struct drm_info_node *node = (struct drm_info_node *)m->private; - struct drm_device *dev = node->minor->dev; - struct amdgpu_device *adev = drm_to_adev(dev); + struct amdgpu_device *adev = (struct amdgpu_device *)m->private; struct amdgpu_display_manager *dm = &adev->dm; unsigned int backlight = dc_link_get_target_backlight_pwm(dm->backlight_link); @@ -2481,10 +2885,10 @@ static int target_backlight_read(struct seq_file *m, void *data) return 0; } -static int mst_topo(struct seq_file *m, void *unused) +static int mst_topo_show(struct seq_file *m, void *unused) { - struct drm_info_node *node = (struct drm_info_node *)m->private; - struct drm_device *dev = node->minor->dev; + struct amdgpu_device *adev = (struct amdgpu_device *)m->private; + struct drm_device *dev = adev_to_drm(adev); struct drm_connector *connector; struct drm_connector_list_iter conn_iter; struct amdgpu_dm_connector *aconnector; @@ -2504,12 +2908,6 @@ static int mst_topo(struct seq_file *m, void *unused) return 0; } -static const struct drm_info_list amdgpu_dm_debugfs_list[] = { - {"amdgpu_current_backlight_pwm", ¤t_backlight_read}, - {"amdgpu_target_backlight_pwm", &target_backlight_read}, - {"amdgpu_mst_topology", &mst_topo}, -}; - /* * Sets the force_timing_sync debug optino from the given string. * All connected displays will be force synchronized immediately. @@ -2568,10 +2966,13 @@ static int visual_confirm_get(void *data, u64 *val) return 0; } +DEFINE_SHOW_ATTRIBUTE(current_backlight); +DEFINE_SHOW_ATTRIBUTE(target_backlight); +DEFINE_SHOW_ATTRIBUTE(mst_topo); DEFINE_DEBUGFS_ATTRIBUTE(visual_confirm_fops, visual_confirm_get, visual_confirm_set, "%llu\n"); -int dtn_debugfs_init(struct amdgpu_device *adev) +void dtn_debugfs_init(struct amdgpu_device *adev) { static const struct file_operations dtn_log_fops = { .owner = THIS_MODULE, @@ -2582,13 +2983,13 @@ int dtn_debugfs_init(struct amdgpu_device *adev) struct drm_minor *minor = adev_to_drm(adev)->primary; struct dentry *root = minor->debugfs_root; - int ret; - - ret = amdgpu_debugfs_add_files(adev, amdgpu_dm_debugfs_list, - ARRAY_SIZE(amdgpu_dm_debugfs_list)); - if (ret) - return ret; + debugfs_create_file("amdgpu_current_backlight_pwm", 0444, + root, adev, ¤t_backlight_fops); + debugfs_create_file("amdgpu_target_backlight_pwm", 0444, + root, adev, &target_backlight_fops); + debugfs_create_file("amdgpu_mst_topology", 0444, root, + adev, &mst_topo_fops); debugfs_create_file("amdgpu_dm_dtn_log", 0644, root, adev, &dtn_log_fops); @@ -2604,5 +3005,6 @@ int dtn_debugfs_init(struct amdgpu_device *adev) debugfs_create_file_unsafe("amdgpu_dm_force_timing_sync", 0644, root, adev, &force_timing_sync_ops); - return 0; + debugfs_create_file_unsafe("amdgpu_dm_dmcub_trace_event_en", 0644, root, + adev, &dmcub_trace_event_state_fops); } diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.h index 5e5b2b2afa31..3366cb644053 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.h @@ -30,6 +30,9 @@ #include "amdgpu_dm.h" void connector_debugfs_init(struct amdgpu_dm_connector *connector); -int dtn_debugfs_init(struct amdgpu_device *adev); +void dtn_debugfs_init(struct amdgpu_device *adev); +#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) +void crtc_debugfs_init(struct drm_crtc *crtc); +#endif #endif diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c index 5750818db8f6..09bdffb3a09e 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c @@ -652,8 +652,31 @@ void *dm_helpers_allocate_gpu_mem( size_t size, long long *addr) { - // TODO - return NULL; + struct amdgpu_device *adev = ctx->driver_context; + struct dal_allocation *da; + u32 domain = (type == DC_MEM_ALLOC_TYPE_GART) ? + AMDGPU_GEM_DOMAIN_GTT : AMDGPU_GEM_DOMAIN_VRAM; + int ret; + + da = kzalloc(sizeof(struct dal_allocation), GFP_KERNEL); + if (!da) + return NULL; + + ret = amdgpu_bo_create_kernel(adev, size, PAGE_SIZE, + domain, &da->bo, + &da->gpu_addr, &da->cpu_ptr); + + *addr = da->gpu_addr; + + if (ret) { + kfree(da); + return NULL; + } + + /* add da to list in dm */ + list_add(&da->list, &adev->dm.da_list); + + return da->cpu_ptr; } void dm_helpers_free_gpu_mem( @@ -661,5 +684,22 @@ void dm_helpers_free_gpu_mem( enum dc_gpu_mem_alloc_type type, void *pvMem) { + struct amdgpu_device *adev = ctx->driver_context; + struct dal_allocation *da; + + /* walk the da list in DM */ + list_for_each_entry(da, &adev->dm.da_list, list) { + if (pvMem == da->cpu_ptr) { + amdgpu_bo_free_kernel(&da->bo, &da->gpu_addr, &da->cpu_ptr); + list_del(&da->list); + kfree(da); + break; + } + } +} + +bool dm_helpers_dmub_outbox0_interrupt_control(struct dc_context *ctx, bool enable) +{ // TODO + return true; } diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c index e0000c180ed1..d3c687d07ee6 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c @@ -82,6 +82,7 @@ struct amdgpu_dm_irq_handler_data { struct amdgpu_display_manager *dm; /* DAL irq source which registered for this interrupt. */ enum dc_irq_source irq_source; + struct work_struct work; }; #define DM_IRQ_TABLE_LOCK(adev, flags) \ @@ -111,20 +112,10 @@ static void init_handler_common_data(struct amdgpu_dm_irq_handler_data *hcd, */ static void dm_irq_work_func(struct work_struct *work) { - struct irq_list_head *irq_list_head = - container_of(work, struct irq_list_head, work); - struct list_head *handler_list = &irq_list_head->head; - struct amdgpu_dm_irq_handler_data *handler_data; - - list_for_each_entry(handler_data, handler_list, list) { - DRM_DEBUG_KMS("DM_IRQ: work_func: for dal_src=%d\n", - handler_data->irq_source); + struct amdgpu_dm_irq_handler_data *handler_data = + container_of(work, struct amdgpu_dm_irq_handler_data, work); - DRM_DEBUG_KMS("DM_IRQ: schedule_work: for dal_src=%d\n", - handler_data->irq_source); - - handler_data->handler(handler_data->handler_arg); - } + handler_data->handler(handler_data->handler_arg); /* Call a DAL subcomponent which registered for interrupt notification * at INTERRUPT_LOW_IRQ_CONTEXT. @@ -156,7 +147,7 @@ static struct list_head *remove_irq_handler(struct amdgpu_device *adev, break; case INTERRUPT_LOW_IRQ_CONTEXT: default: - hnd_list = &adev->dm.irq_handler_list_low_tab[irq_source].head; + hnd_list = &adev->dm.irq_handler_list_low_tab[irq_source]; break; } @@ -290,7 +281,8 @@ void *amdgpu_dm_irq_register_interrupt(struct amdgpu_device *adev, break; case INTERRUPT_LOW_IRQ_CONTEXT: default: - hnd_list = &adev->dm.irq_handler_list_low_tab[irq_source].head; + hnd_list = &adev->dm.irq_handler_list_low_tab[irq_source]; + INIT_WORK(&handler_data->work, dm_irq_work_func); break; } @@ -372,7 +364,7 @@ void amdgpu_dm_irq_unregister_interrupt(struct amdgpu_device *adev, int amdgpu_dm_irq_init(struct amdgpu_device *adev) { int src; - struct irq_list_head *lh; + struct list_head *lh; DRM_DEBUG_KMS("DM_IRQ\n"); @@ -381,9 +373,7 @@ int amdgpu_dm_irq_init(struct amdgpu_device *adev) for (src = 0; src < DAL_IRQ_SOURCES_NUMBER; src++) { /* low context handler list init */ lh = &adev->dm.irq_handler_list_low_tab[src]; - INIT_LIST_HEAD(&lh->head); - INIT_WORK(&lh->work, dm_irq_work_func); - + INIT_LIST_HEAD(lh); /* high context handler init */ INIT_LIST_HEAD(&adev->dm.irq_handler_list_high_tab[src]); } @@ -400,8 +390,11 @@ int amdgpu_dm_irq_init(struct amdgpu_device *adev) void amdgpu_dm_irq_fini(struct amdgpu_device *adev) { int src; - struct irq_list_head *lh; + struct list_head *lh; + struct list_head *entry, *tmp; + struct amdgpu_dm_irq_handler_data *handler; unsigned long irq_table_flags; + DRM_DEBUG_KMS("DM_IRQ: releasing resources.\n"); for (src = 0; src < DAL_IRQ_SOURCES_NUMBER; src++) { DM_IRQ_TABLE_LOCK(adev, irq_table_flags); @@ -410,7 +403,16 @@ void amdgpu_dm_irq_fini(struct amdgpu_device *adev) * (because no code can schedule a new one). */ lh = &adev->dm.irq_handler_list_low_tab[src]; DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags); - flush_work(&lh->work); + + if (!list_empty(lh)) { + list_for_each_safe(entry, tmp, lh) { + handler = list_entry( + entry, + struct amdgpu_dm_irq_handler_data, + list); + flush_work(&handler->work); + } + } } } @@ -420,6 +422,8 @@ int amdgpu_dm_irq_suspend(struct amdgpu_device *adev) struct list_head *hnd_list_h; struct list_head *hnd_list_l; unsigned long irq_table_flags; + struct list_head *entry, *tmp; + struct amdgpu_dm_irq_handler_data *handler; DM_IRQ_TABLE_LOCK(adev, irq_table_flags); @@ -430,14 +434,22 @@ int amdgpu_dm_irq_suspend(struct amdgpu_device *adev) * will be disabled from manage_dm_interrupts on disable CRTC. */ for (src = DC_IRQ_SOURCE_HPD1; src <= DC_IRQ_SOURCE_HPD6RX; src++) { - hnd_list_l = &adev->dm.irq_handler_list_low_tab[src].head; + hnd_list_l = &adev->dm.irq_handler_list_low_tab[src]; hnd_list_h = &adev->dm.irq_handler_list_high_tab[src]; if (!list_empty(hnd_list_l) || !list_empty(hnd_list_h)) dc_interrupt_set(adev->dm.dc, src, false); DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags); - flush_work(&adev->dm.irq_handler_list_low_tab[src].work); + if (!list_empty(hnd_list_l)) { + list_for_each_safe (entry, tmp, hnd_list_l) { + handler = list_entry( + entry, + struct amdgpu_dm_irq_handler_data, + list); + flush_work(&handler->work); + } + } DM_IRQ_TABLE_LOCK(adev, irq_table_flags); } @@ -457,7 +469,7 @@ int amdgpu_dm_irq_resume_early(struct amdgpu_device *adev) /* re-enable short pulse interrupts HW interrupt */ for (src = DC_IRQ_SOURCE_HPD1RX; src <= DC_IRQ_SOURCE_HPD6RX; src++) { - hnd_list_l = &adev->dm.irq_handler_list_low_tab[src].head; + hnd_list_l = &adev->dm.irq_handler_list_low_tab[src]; hnd_list_h = &adev->dm.irq_handler_list_high_tab[src]; if (!list_empty(hnd_list_l) || !list_empty(hnd_list_h)) dc_interrupt_set(adev->dm.dc, src, true); @@ -483,7 +495,7 @@ int amdgpu_dm_irq_resume_late(struct amdgpu_device *adev) * will be enabled from manage_dm_interrupts on enable CRTC. */ for (src = DC_IRQ_SOURCE_HPD1; src <= DC_IRQ_SOURCE_HPD6; src++) { - hnd_list_l = &adev->dm.irq_handler_list_low_tab[src].head; + hnd_list_l = &adev->dm.irq_handler_list_low_tab[src]; hnd_list_h = &adev->dm.irq_handler_list_high_tab[src]; if (!list_empty(hnd_list_l) || !list_empty(hnd_list_h)) dc_interrupt_set(adev->dm.dc, src, true); @@ -500,22 +512,51 @@ int amdgpu_dm_irq_resume_late(struct amdgpu_device *adev) static void amdgpu_dm_irq_schedule_work(struct amdgpu_device *adev, enum dc_irq_source irq_source) { - unsigned long irq_table_flags; - struct work_struct *work = NULL; + struct list_head *handler_list = &adev->dm.irq_handler_list_low_tab[irq_source]; + struct amdgpu_dm_irq_handler_data *handler_data; + bool work_queued = false; - DM_IRQ_TABLE_LOCK(adev, irq_table_flags); + if (list_empty(handler_list)) + return; - if (!list_empty(&adev->dm.irq_handler_list_low_tab[irq_source].head)) - work = &adev->dm.irq_handler_list_low_tab[irq_source].work; + list_for_each_entry (handler_data, handler_list, list) { + if (queue_work(system_highpri_wq, &handler_data->work)) { + work_queued = true; + break; + } + } - DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags); + if (!work_queued) { + struct amdgpu_dm_irq_handler_data *handler_data_add; + /*get the amdgpu_dm_irq_handler_data of first item pointed by handler_list*/ + handler_data = container_of(handler_list->next, struct amdgpu_dm_irq_handler_data, list); - if (work) { - if (!schedule_work(work)) - DRM_INFO("amdgpu_dm_irq_schedule_work FAILED src %d\n", - irq_source); - } + /*allocate a new amdgpu_dm_irq_handler_data*/ + handler_data_add = kzalloc(sizeof(*handler_data), GFP_KERNEL); + if (!handler_data_add) { + DRM_ERROR("DM_IRQ: failed to allocate irq handler!\n"); + return; + } + + /*copy new amdgpu_dm_irq_handler_data members from handler_data*/ + handler_data_add->handler = handler_data->handler; + handler_data_add->handler_arg = handler_data->handler_arg; + handler_data_add->dm = handler_data->dm; + handler_data_add->irq_source = irq_source; + list_add_tail(&handler_data_add->list, handler_list); + + INIT_WORK(&handler_data_add->work, dm_irq_work_func); + + if (queue_work(system_highpri_wq, &handler_data_add->work)) + DRM_DEBUG("Queued work for handling interrupt from " + "display for IRQ source %d\n", + irq_source); + else + DRM_ERROR("Failed to queue work for handling interrupt " + "from display for IRQ source %d\n", + irq_source); + } } /* diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq_params.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq_params.h index 45825a34f8eb..f3b93ba69a27 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq_params.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq_params.h @@ -26,12 +26,21 @@ #ifndef __AMDGPU_DM_IRQ_PARAMS_H__ #define __AMDGPU_DM_IRQ_PARAMS_H__ +#include "amdgpu_dm_crc.h" + struct dm_irq_params { u32 last_flip_vblank; struct mod_vrr_params vrr_params; struct dc_stream_state *stream; int active_planes; struct mod_freesync_config freesync_config; + +#ifdef CONFIG_DEBUG_FS + enum amdgpu_dm_pipe_crc_source crc_src; +#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY + struct crc_window_parm crc_window; +#endif +#endif }; #endif /* __AMDGPU_DM_IRQ_PARAMS_H__ */ diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c index 41b09ab22233..73cdb9fe981a 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c @@ -38,6 +38,7 @@ #include "dc_link_ddc.h" #include "i2caux_interface.h" +#include "dmub_cmd.h" #if defined(CONFIG_DEBUG_FS) #include "amdgpu_dm_debugfs.h" #endif @@ -51,7 +52,7 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux, { ssize_t result = 0; struct aux_payload payload; - enum aux_channel_operation_result operation_result; + enum aux_return_code_type operation_result; if (WARN_ON(msg->size > 16)) return -E2BIG; @@ -73,17 +74,19 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux, if (result < 0) switch (operation_result) { - case AUX_CHANNEL_OPERATION_SUCCEEDED: + case AUX_RET_SUCCESS: break; - case AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON: - case AUX_CHANNEL_OPERATION_FAILED_REASON_UNKNOWN: + case AUX_RET_ERROR_HPD_DISCON: + case AUX_RET_ERROR_UNKNOWN: + case AUX_RET_ERROR_INVALID_OPERATION: + case AUX_RET_ERROR_PROTOCOL_ERROR: result = -EIO; break; - case AUX_CHANNEL_OPERATION_FAILED_INVALID_REPLY: - case AUX_CHANNEL_OPERATION_FAILED_ENGINE_ACQUIRE: + case AUX_RET_ERROR_INVALID_REPLY: + case AUX_RET_ERROR_ENGINE_ACQUIRE: result = -EBUSY; break; - case AUX_CHANNEL_OPERATION_FAILED_TIMEOUT: + case AUX_RET_ERROR_TIMEOUT: result = -ETIMEDOUT; break; } diff --git a/drivers/gpu/drm/amd/display/dc/Makefile b/drivers/gpu/drm/amd/display/dc/Makefile index 5bf2f2375b40..bbde6e6a4e43 100644 --- a/drivers/gpu/drm/amd/display/dc/Makefile +++ b/drivers/gpu/drm/amd/display/dc/Makefile @@ -54,7 +54,7 @@ AMD_DC = $(addsuffix /Makefile, $(addprefix $(FULL_AMD_DISPLAY_PATH)/dc/,$(DC_LI include $(AMD_DC) -DISPLAY_CORE = dc.o dc_link.o dc_resource.o dc_hw_sequencer.o dc_sink.o \ +DISPLAY_CORE = dc.o dc_stat.o dc_link.o dc_resource.o dc_hw_sequencer.o dc_sink.o \ dc_surface.o dc_link_hwss.o dc_link_dp.o dc_link_ddc.o dc_debug.o dc_stream.o ifdef CONFIG_DRM_AMD_DC_DCN diff --git a/drivers/gpu/drm/amd/display/dc/basics/dc_common.c b/drivers/gpu/drm/amd/display/dc/basics/dc_common.c index ad04ef98e652..b2fc4f8e6482 100644 --- a/drivers/gpu/drm/amd/display/dc/basics/dc_common.c +++ b/drivers/gpu/drm/amd/display/dc/basics/dc_common.c @@ -49,24 +49,20 @@ bool is_rgb_cspace(enum dc_color_space output_color_space) } } -bool is_child_pipe_tree_visible(struct pipe_ctx *pipe_ctx) +bool is_lower_pipe_tree_visible(struct pipe_ctx *pipe_ctx) { if (pipe_ctx->plane_state && pipe_ctx->plane_state->visible) return true; - if (pipe_ctx->bottom_pipe && is_child_pipe_tree_visible(pipe_ctx->bottom_pipe)) - return true; - if (pipe_ctx->next_odm_pipe && is_child_pipe_tree_visible(pipe_ctx->next_odm_pipe)) + if (pipe_ctx->bottom_pipe && is_lower_pipe_tree_visible(pipe_ctx->bottom_pipe)) return true; return false; } -bool is_parent_pipe_tree_visible(struct pipe_ctx *pipe_ctx) +bool is_upper_pipe_tree_visible(struct pipe_ctx *pipe_ctx) { if (pipe_ctx->plane_state && pipe_ctx->plane_state->visible) return true; - if (pipe_ctx->top_pipe && is_parent_pipe_tree_visible(pipe_ctx->top_pipe)) - return true; - if (pipe_ctx->prev_odm_pipe && is_parent_pipe_tree_visible(pipe_ctx->prev_odm_pipe)) + if (pipe_ctx->top_pipe && is_upper_pipe_tree_visible(pipe_ctx->top_pipe)) return true; return false; } @@ -75,13 +71,9 @@ bool is_pipe_tree_visible(struct pipe_ctx *pipe_ctx) { if (pipe_ctx->plane_state && pipe_ctx->plane_state->visible) return true; - if (pipe_ctx->top_pipe && is_parent_pipe_tree_visible(pipe_ctx->top_pipe)) - return true; - if (pipe_ctx->bottom_pipe && is_child_pipe_tree_visible(pipe_ctx->bottom_pipe)) - return true; - if (pipe_ctx->prev_odm_pipe && is_parent_pipe_tree_visible(pipe_ctx->prev_odm_pipe)) + if (pipe_ctx->top_pipe && is_upper_pipe_tree_visible(pipe_ctx->top_pipe)) return true; - if (pipe_ctx->next_odm_pipe && is_child_pipe_tree_visible(pipe_ctx->next_odm_pipe)) + if (pipe_ctx->bottom_pipe && is_lower_pipe_tree_visible(pipe_ctx->bottom_pipe)) return true; return false; } diff --git a/drivers/gpu/drm/amd/display/dc/basics/dc_common.h b/drivers/gpu/drm/amd/display/dc/basics/dc_common.h index b061497480b8..7c0cbf47e8ce 100644 --- a/drivers/gpu/drm/amd/display/dc/basics/dc_common.h +++ b/drivers/gpu/drm/amd/display/dc/basics/dc_common.h @@ -30,9 +30,9 @@ bool is_rgb_cspace(enum dc_color_space output_color_space); -bool is_child_pipe_tree_visible(struct pipe_ctx *pipe_ctx); +bool is_lower_pipe_tree_visible(struct pipe_ctx *pipe_ctx); -bool is_parent_pipe_tree_visible(struct pipe_ctx *pipe_ctx); +bool is_upper_pipe_tree_visible(struct pipe_ctx *pipe_ctx); bool is_pipe_tree_visible(struct pipe_ctx *pipe_ctx); diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c index 9f9fda3118d1..b208f06ed514 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c +++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c @@ -1180,14 +1180,15 @@ static enum bp_result bios_parser_enable_disp_power_gating( static enum bp_result bios_parser_enable_lvtma_control( struct dc_bios *dcb, - uint8_t uc_pwr_on) + uint8_t uc_pwr_on, + uint8_t panel_instance) { struct bios_parser *bp = BP_FROM_DCB(dcb); if (!bp->cmd_tbl.enable_lvtma_control) return BP_RESULT_FAILURE; - return bp->cmd_tbl.enable_lvtma_control(bp, uc_pwr_on); + return bp->cmd_tbl.enable_lvtma_control(bp, uc_pwr_on, panel_instance); } static bool bios_parser_is_accelerated_mode( diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.c index fce46ab54c54..53d7513b5083 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.c +++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.c @@ -64,9 +64,10 @@ bool bios_is_accelerated_mode( void bios_set_scratch_acc_mode_change( - struct dc_bios *bios) + struct dc_bios *bios, + uint32_t state) { - REG_UPDATE(BIOS_SCRATCH_6, S6_ACC_MODE, 1); + REG_UPDATE(BIOS_SCRATCH_6, S6_ACC_MODE, state); } diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.h b/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.h index 75a29e68fb27..e1b4a40a353d 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.h +++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.h @@ -32,7 +32,7 @@ uint8_t *bios_get_image(struct dc_bios *bp, uint32_t offset, uint32_t size); bool bios_is_accelerated_mode(struct dc_bios *bios); -void bios_set_scratch_acc_mode_change(struct dc_bios *bios); +void bios_set_scratch_acc_mode_change(struct dc_bios *bios, uint32_t state); void bios_set_scratch_critical_state(struct dc_bios *bios, bool state); uint32_t bios_get_vga_enabled_displays(struct dc_bios *bios); diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table.c b/drivers/gpu/drm/amd/display/dc/bios/command_table.c index afc10b954ffa..ad13e4e36d77 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/command_table.c +++ b/drivers/gpu/drm/amd/display/dc/bios/command_table.c @@ -1531,6 +1531,27 @@ static enum bp_result adjust_display_pll_v2( params.ucEncodeMode = (uint8_t)bp->cmd_helper->encoder_mode_bp_to_atom( bp_params->signal_type, false); + + if (EXEC_BIOS_CMD_TABLE(AdjustDisplayPll, params)) { + /* Convert output pixel clock back 10KHz-->KHz: multiply + * original pixel clock in KHz by ratio + * [output pxlClk/input pxlClk] */ + uint64_t pixel_clk_10_khz_out = + (uint64_t)le16_to_cpu(params.usPixelClock); + uint64_t pixel_clk = (uint64_t)bp_params->pixel_clock; + + if (pixel_clock_10KHz_in != 0) { + bp_params->adjusted_pixel_clock = + div_u64(pixel_clk * pixel_clk_10_khz_out, + pixel_clock_10KHz_in); + } else { + bp_params->adjusted_pixel_clock = 0; + BREAK_TO_DEBUGGER(); + } + + result = BP_RESULT_OK; + } + return result; } diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c index 25bdf1c38e0a..f1f672a997d7 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c +++ b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c @@ -218,6 +218,10 @@ static enum bp_result transmitter_control_v1_6( struct bios_parser *bp, struct bp_transmitter_control *cntl); +static enum bp_result transmitter_control_v1_7( + struct bios_parser *bp, + struct bp_transmitter_control *cntl); + static enum bp_result transmitter_control_fallback( struct bios_parser *bp, struct bp_transmitter_control *cntl); @@ -233,6 +237,9 @@ static void init_transmitter_control(struct bios_parser *bp) case 6: bp->cmd_tbl.transmitter_control = transmitter_control_v1_6; break; + case 7: + bp->cmd_tbl.transmitter_control = transmitter_control_v1_7; + break; default: dm_output_to_console("Don't have transmitter_control for v%d\n", crev); bp->cmd_tbl.transmitter_control = transmitter_control_fallback; @@ -304,13 +311,76 @@ static enum bp_result transmitter_control_v1_6( return result; } +static void transmitter_control_dmcub_v1_7( + struct dc_dmub_srv *dmcub, + struct dmub_dig_transmitter_control_data_v1_7 *dig) +{ + union dmub_rb_cmd cmd; + + memset(&cmd, 0, sizeof(cmd)); + + cmd.dig1_transmitter_control.header.type = DMUB_CMD__VBIOS; + cmd.dig1_transmitter_control.header.sub_type = + DMUB_CMD__VBIOS_DIG1_TRANSMITTER_CONTROL; + cmd.dig1_transmitter_control.header.payload_bytes = + sizeof(cmd.dig1_transmitter_control) - + sizeof(cmd.dig1_transmitter_control.header); + cmd.dig1_transmitter_control.transmitter_control.dig_v1_7 = *dig; + + dc_dmub_srv_cmd_queue(dmcub, &cmd); + dc_dmub_srv_cmd_execute(dmcub); + dc_dmub_srv_wait_idle(dmcub); +} + +static enum bp_result transmitter_control_v1_7( + struct bios_parser *bp, + struct bp_transmitter_control *cntl) +{ + enum bp_result result = BP_RESULT_FAILURE; + const struct command_table_helper *cmd = bp->cmd_helper; + struct dmub_dig_transmitter_control_data_v1_7 dig_v1_7 = {0}; + + dig_v1_7.phyid = cmd->phy_id_to_atom(cntl->transmitter); + dig_v1_7.action = (uint8_t)cntl->action; + + if (cntl->action == TRANSMITTER_CONTROL_SET_VOLTAGE_AND_PREEMPASIS) + dig_v1_7.mode_laneset.dplaneset = (uint8_t)cntl->lane_settings; + else + dig_v1_7.mode_laneset.digmode = + cmd->signal_type_to_atom_dig_mode(cntl->signal); + + dig_v1_7.lanenum = (uint8_t)cntl->lanes_number; + dig_v1_7.hpdsel = cmd->hpd_sel_to_atom(cntl->hpd_sel); + dig_v1_7.digfe_sel = cmd->dig_encoder_sel_to_atom(cntl->engine_id); + dig_v1_7.connobj_id = (uint8_t)cntl->connector_obj_id.id; + dig_v1_7.symclk_units.symclk_10khz = cntl->pixel_clock/10; + + if (cntl->action == TRANSMITTER_CONTROL_ENABLE || + cntl->action == TRANSMITTER_CONTROL_ACTIAVATE || + cntl->action == TRANSMITTER_CONTROL_DEACTIVATE) { + DC_LOG_BIOS("%s:dig_v1_7.symclk_units.symclk_10khz = %d\n", + __func__, dig_v1_7.symclk_units.symclk_10khz); + } + + if (bp->base.ctx->dc->ctx->dmub_srv && + bp->base.ctx->dc->debug.dmub_command_table) { + transmitter_control_dmcub_v1_7(bp->base.ctx->dmub_srv, &dig_v1_7); + return BP_RESULT_OK; + } + +/*color_depth not used any more, driver has deep color factor in the Phyclk*/ + if (EXEC_BIOS_CMD_TABLE(dig1transmittercontrol, dig_v1_7)) + result = BP_RESULT_OK; + return result; +} + static enum bp_result transmitter_control_fallback( struct bios_parser *bp, struct bp_transmitter_control *cntl) { if (bp->base.ctx->dc->ctx->dmub_srv && bp->base.ctx->dc->debug.dmub_command_table) { - return transmitter_control_v1_6(bp, cntl); + return transmitter_control_v1_7(bp, cntl); } return BP_RESULT_FAILURE; @@ -911,7 +981,8 @@ static unsigned int get_smu_clock_info_v3_1(struct bios_parser *bp, uint8_t id) static enum bp_result enable_lvtma_control( struct bios_parser *bp, - uint8_t uc_pwr_on); + uint8_t uc_pwr_on, + uint8_t panel_instance); static void init_enable_lvtma_control(struct bios_parser *bp) { @@ -922,19 +993,21 @@ static void init_enable_lvtma_control(struct bios_parser *bp) static void enable_lvtma_control_dmcub( struct dc_dmub_srv *dmcub, - uint8_t uc_pwr_on) + uint8_t uc_pwr_on, + uint8_t panel_instance) { union dmub_rb_cmd cmd; memset(&cmd, 0, sizeof(cmd)); - cmd.cmd_common.header.type = DMUB_CMD__VBIOS; - cmd.cmd_common.header.sub_type = + cmd.lvtma_control.header.type = DMUB_CMD__VBIOS; + cmd.lvtma_control.header.sub_type = DMUB_CMD__VBIOS_LVTMA_CONTROL; - cmd.cmd_common.cmd_buffer[0] = + cmd.lvtma_control.data.uc_pwr_action = uc_pwr_on; - + cmd.lvtma_control.data.panel_inst = + panel_instance; dc_dmub_srv_cmd_queue(dmcub, &cmd); dc_dmub_srv_cmd_execute(dmcub); dc_dmub_srv_wait_idle(dmcub); @@ -943,14 +1016,16 @@ static void enable_lvtma_control_dmcub( static enum bp_result enable_lvtma_control( struct bios_parser *bp, - uint8_t uc_pwr_on) + uint8_t uc_pwr_on, + uint8_t panel_instance) { enum bp_result result = BP_RESULT_FAILURE; if (bp->base.ctx->dc->ctx->dmub_srv && bp->base.ctx->dc->debug.dmub_command_table) { enable_lvtma_control_dmcub(bp->base.ctx->dmub_srv, - uc_pwr_on); + uc_pwr_on, + panel_instance); return BP_RESULT_OK; } return result; diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table2.h b/drivers/gpu/drm/amd/display/dc/bios/command_table2.h index 7bdce013cde5..be060b4b87db 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/command_table2.h +++ b/drivers/gpu/drm/amd/display/dc/bios/command_table2.h @@ -95,7 +95,8 @@ struct cmd_tbl { unsigned int (*get_smu_clock_info)( struct bios_parser *bp, uint8_t id); enum bp_result (*enable_lvtma_control)(struct bios_parser *bp, - uint8_t uc_pwr_on); + uint8_t uc_pwr_on, + uint8_t panel_instance); }; void dal_firmware_parser_init_cmd_tbl(struct bios_parser *bp); diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c index 995ffbbf64e7..f7c728d4f50a 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c @@ -87,12 +87,16 @@ int clk_mgr_helper_get_active_plane_cnt( void clk_mgr_exit_optimized_pwr_state(const struct dc *dc, struct clk_mgr *clk_mgr) { - struct dc_link *edp_link = get_edp_link(dc); + struct dc_link *edp_links[MAX_NUM_EDP]; + struct dc_link *edp_link = NULL; + int edp_num; + get_edp_links(dc, edp_links, &edp_num); if (dc->hwss.exit_optimized_pwr_state) dc->hwss.exit_optimized_pwr_state(dc, dc->current_state); - if (edp_link) { + if (edp_num) { + edp_link = edp_links[0]; clk_mgr->psr_allow_active_cache = edp_link->psr_settings.psr_allow_active; dc_link_set_psr_allow_active(edp_link, false, false, false); } @@ -101,11 +105,16 @@ void clk_mgr_exit_optimized_pwr_state(const struct dc *dc, struct clk_mgr *clk_m void clk_mgr_optimize_pwr_state(const struct dc *dc, struct clk_mgr *clk_mgr) { - struct dc_link *edp_link = get_edp_link(dc); + struct dc_link *edp_links[MAX_NUM_EDP]; + struct dc_link *edp_link = NULL; + int edp_num; - if (edp_link) + get_edp_links(dc, edp_links, &edp_num); + if (edp_num) { + edp_link = edp_links[0]; dc_link_set_psr_allow_active(edp_link, clk_mgr->psr_allow_active_cache, false, false); + } if (dc->hwss.optimize_pwr_state) dc->hwss.optimize_pwr_state(dc, dc->current_state); diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c index ec9dc265cde0..372d53b5a34d 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c @@ -361,7 +361,7 @@ void dcn2_read_clocks_from_hw_dentist(struct clk_mgr *clk_mgr_base) REG_GET(DENTIST_DISPCLK_CNTL, DENTIST_DPPCLK_WDIVIDER, &dppclk_wdivider); disp_divider = dentist_get_divider_from_did(dispclk_wdivider); - dpp_divider = dentist_get_divider_from_did(dispclk_wdivider); + dpp_divider = dentist_get_divider_from_did(dppclk_wdivider); if (disp_divider && dpp_divider) { /* Calculate the current DFS clock, in kHz.*/ diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c index c7e5a64e06af..81ea5d3a1947 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c @@ -252,6 +252,7 @@ static void dcn3_update_clocks(struct clk_mgr *clk_mgr_base, bool force_reset = false; bool update_uclk = false; bool p_state_change_support; + int total_plane_count; if (dc->work_arounds.skip_clock_update || !clk_mgr->smu_present) return; @@ -292,7 +293,8 @@ static void dcn3_update_clocks(struct clk_mgr *clk_mgr_base, clk_mgr_base->clks.socclk_khz = new_clocks->socclk_khz; clk_mgr_base->clks.prev_p_state_change_support = clk_mgr_base->clks.p_state_change_support; - p_state_change_support = new_clocks->p_state_change_support || (display_count == 0); + total_plane_count = clk_mgr_helper_get_active_plane_cnt(dc, context); + p_state_change_support = new_clocks->p_state_change_support || (total_plane_count == 0); if (should_update_pstate_support(safe_to_lower, p_state_change_support, clk_mgr_base->clks.p_state_change_support)) { clk_mgr_base->clks.p_state_change_support = p_state_change_support; diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 8f8a13c7cf73..8e6c815b55d2 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -41,6 +41,7 @@ #include "dc_bios_types.h" #include "bios_parser_interface.h" +#include "bios/bios_parser_helper.h" #include "include/irq_service_interface.h" #include "transform.h" #include "dmcu.h" @@ -68,6 +69,7 @@ #include "dmub/dmub_srv.h" +#include "i2caux_interface.h" #include "dce/dmub_hw_lock_mgr.h" #include "dc_trace.h" @@ -163,6 +165,18 @@ static uint32_t get_num_of_internal_disp(struct dc_link **links, uint32_t num_li return count; } +static int get_seamless_boot_stream_count(struct dc_state *ctx) +{ + uint8_t i; + uint8_t seamless_boot_stream_count = 0; + + for (i = 0; i < ctx->stream_count; i++) + if (ctx->streams[i]->apply_seamless_boot_optimization) + seamless_boot_stream_count++; + + return seamless_boot_stream_count; +} + static bool create_links( struct dc *dc, uint32_t num_virtual_links) @@ -334,6 +348,88 @@ bool dc_stream_get_crtc_position(struct dc *dc, return ret; } +#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) +bool dc_stream_forward_dmcu_crc_window(struct dc *dc, struct dc_stream_state *stream, + struct crc_params *crc_window) +{ + int i; + struct dmcu *dmcu = dc->res_pool->dmcu; + struct pipe_ctx *pipe; + struct crc_region tmp_win, *crc_win; + struct otg_phy_mux mapping_tmp, *mux_mapping; + + /*crc window can't be null*/ + if (!crc_window) + return false; + + if ((dmcu != NULL && dmcu->funcs->is_dmcu_initialized(dmcu))) { + crc_win = &tmp_win; + mux_mapping = &mapping_tmp; + /*set crc window*/ + tmp_win.x_start = crc_window->windowa_x_start; + tmp_win.y_start = crc_window->windowa_y_start; + tmp_win.x_end = crc_window->windowa_x_end; + tmp_win.y_end = crc_window->windowa_y_end; + + for (i = 0; i < MAX_PIPES; i++) { + pipe = &dc->current_state->res_ctx.pipe_ctx[i]; + if (pipe->stream == stream && !pipe->top_pipe && !pipe->prev_odm_pipe) + break; + } + + /* Stream not found */ + if (i == MAX_PIPES) + return false; + + + /*set mux routing info*/ + mapping_tmp.phy_output_num = stream->link->link_enc_hw_inst; + mapping_tmp.otg_output_num = pipe->stream_res.tg->inst; + + dmcu->funcs->forward_crc_window(dmcu, crc_win, mux_mapping); + } else { + DC_LOG_DC("dmcu is not initialized"); + return false; + } + + return true; +} + +bool dc_stream_stop_dmcu_crc_win_update(struct dc *dc, struct dc_stream_state *stream) +{ + int i; + struct dmcu *dmcu = dc->res_pool->dmcu; + struct pipe_ctx *pipe; + struct otg_phy_mux mapping_tmp, *mux_mapping; + + if ((dmcu != NULL && dmcu->funcs->is_dmcu_initialized(dmcu))) { + mux_mapping = &mapping_tmp; + + for (i = 0; i < MAX_PIPES; i++) { + pipe = &dc->current_state->res_ctx.pipe_ctx[i]; + if (pipe->stream == stream && !pipe->top_pipe && !pipe->prev_odm_pipe) + break; + } + + /* Stream not found */ + if (i == MAX_PIPES) + return false; + + + /*set mux routing info*/ + mapping_tmp.phy_output_num = stream->link->link_enc_hw_inst; + mapping_tmp.otg_output_num = pipe->stream_res.tg->inst; + + dmcu->funcs->stop_crc_win_update(dmcu, mux_mapping); + } else { + DC_LOG_DC("dmcu is not initialized"); + return false; + } + + return true; +} +#endif + /** * dc_stream_configure_crc() - Configure CRC capture for the given stream. * @dc: DC Object @@ -970,7 +1066,6 @@ struct dc *dc_create(const struct dc_init_data *init_params) full_pipe_count, dc->res_pool->stream_enc_count); - dc->optimize_seamless_boot_streams = 0; dc->caps.max_links = dc->link_count; dc->caps.max_audios = dc->res_pool->audio_count; dc->caps.linear_pitch_alignment = 64; @@ -1000,22 +1095,25 @@ destruct_dc: static void detect_edp_presence(struct dc *dc) { - struct dc_link *edp_link = get_edp_link(dc); - bool edp_sink_present = true; + struct dc_link *edp_links[MAX_NUM_EDP]; + struct dc_link *edp_link = NULL; + enum dc_connection_type type; + int i; + int edp_num; - if (!edp_link) + get_edp_links(dc, edp_links, &edp_num); + if (!edp_num) return; - if (dc->config.edp_not_connected) { - edp_sink_present = false; - } else { - enum dc_connection_type type; - dc_link_detect_sink(edp_link, &type); - if (type == dc_connection_none) - edp_sink_present = false; + for (i = 0; i < edp_num; i++) { + edp_link = edp_links[i]; + if (dc->config.edp_not_connected) { + edp_link->edp_sink_present = false; + } else { + dc_link_detect_sink(edp_link, &type); + edp_link->edp_sink_present = (type != dc_connection_none); + } } - - edp_link->edp_sink_present = edp_sink_present; } void dc_hardware_init(struct dc *dc) @@ -1091,6 +1189,7 @@ static void program_timing_sync( for (i = 0; i < pipe_count; i++) { int group_size = 1; + enum timing_synchronization_type sync_type = NOT_SYNCHRONIZABLE; struct pipe_ctx *pipe_set[MAX_PIPES]; if (!unsynced_pipes[i]) @@ -1105,10 +1204,22 @@ static void program_timing_sync( for (j = i + 1; j < pipe_count; j++) { if (!unsynced_pipes[j]) continue; - - if (resource_are_streams_timing_synchronizable( + if (sync_type != TIMING_SYNCHRONIZABLE && + dc->hwss.enable_vblanks_synchronization && + unsynced_pipes[j]->stream_res.tg->funcs->align_vblanks && + resource_are_vblanks_synchronizable( + unsynced_pipes[j]->stream, + pipe_set[0]->stream)) { + sync_type = VBLANK_SYNCHRONIZABLE; + pipe_set[group_size] = unsynced_pipes[j]; + unsynced_pipes[j] = NULL; + group_size++; + } else + if (sync_type != VBLANK_SYNCHRONIZABLE && + resource_are_streams_timing_synchronizable( unsynced_pipes[j]->stream, pipe_set[0]->stream)) { + sync_type = TIMING_SYNCHRONIZABLE; pipe_set[group_size] = unsynced_pipes[j]; unsynced_pipes[j] = NULL; group_size++; @@ -1134,7 +1245,6 @@ static void program_timing_sync( } } - for (k = 0; k < group_size; k++) { struct dc_stream_status *status = dc_stream_get_status_from_state(ctx, pipe_set[k]->stream); @@ -1164,8 +1274,14 @@ static void program_timing_sync( } if (group_size > 1) { - dc->hwss.enable_timing_synchronization( - dc, group_index, group_size, pipe_set); + if (sync_type == TIMING_SYNCHRONIZABLE) { + dc->hwss.enable_timing_synchronization( + dc, group_index, group_size, pipe_set); + } else + if (sync_type == VBLANK_SYNCHRONIZABLE) { + dc->hwss.enable_vblanks_synchronization( + dc, group_index, group_size, pipe_set); + } group_index++; } num_group++; @@ -1202,8 +1318,9 @@ bool dc_validate_seamless_boot_timing(const struct dc *dc, unsigned int i, enc_inst, tg_inst = 0; // Seamless port only support single DP and EDP so far - if (sink->sink_signal != SIGNAL_TYPE_DISPLAY_PORT && - sink->sink_signal != SIGNAL_TYPE_EDP) + if ((sink->sink_signal != SIGNAL_TYPE_DISPLAY_PORT && + sink->sink_signal != SIGNAL_TYPE_EDP) || + sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) return false; /* Check for enabled DIG to identify enabled display */ @@ -1377,11 +1494,7 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c dc->hwss.enable_accelerated_mode(dc, context); } - for (i = 0; i < context->stream_count; i++) - if (context->streams[i]->apply_seamless_boot_optimization) - dc->optimize_seamless_boot_streams++; - - if (context->stream_count > dc->optimize_seamless_boot_streams || + if (context->stream_count > get_seamless_boot_stream_count(context) || context->stream_count == 0) dc->hwss.prepare_bandwidth(dc, context); @@ -1464,7 +1577,7 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c dc_enable_stereo(dc, context, dc_streams, context->stream_count); - if (context->stream_count > dc->optimize_seamless_boot_streams || + if (context->stream_count > get_seamless_boot_stream_count(context) || context->stream_count == 0) { /* Must wait for no flips to be pending before doing optimize bw */ wait_for_no_pipes_pending(dc, context); @@ -1578,7 +1691,7 @@ void dc_post_update_surfaces_to_stream(struct dc *dc) int i; struct dc_state *context = dc->current_state; - if ((!dc->optimized_required) || dc->optimize_seamless_boot_streams > 0) + if ((!dc->optimized_required) || get_seamless_boot_stream_count(context) > 0) return; post_surface_trace(dc); @@ -2277,8 +2390,6 @@ static void copy_stream_update_to_stream(struct dc *dc, if (update->dither_option) stream->dither_option = *update->dither_option; - if (update->pending_test_pattern) - stream->test_pattern = *update->pending_test_pattern; /* update current stream with writeback info */ if (update->wb_update) { int i; @@ -2324,7 +2435,6 @@ static void commit_planes_do_stream_update(struct dc *dc, struct dc_state *context) { int j; - bool should_program_abm; // Stream updates for (j = 0; j < dc->res_pool->pipe_count; j++) { @@ -2375,15 +2485,6 @@ static void commit_planes_do_stream_update(struct dc *dc, } } - if (stream_update->pending_test_pattern) { - dc_link_dp_set_test_pattern(stream->link, - stream->test_pattern.type, - stream->test_pattern.color_space, - stream->test_pattern.p_link_settings, - stream->test_pattern.p_custom_pattern, - stream->test_pattern.cust_pattern_size); - } - /* Full fe update*/ if (update_type == UPDATE_TYPE_FAST) continue; @@ -2398,9 +2499,10 @@ static void commit_planes_do_stream_update(struct dc *dc, if (pipe_ctx->stream_res.audio && !dc->debug.az_endpoint_mute_only) pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio); - dc->hwss.optimize_bandwidth(dc, dc->current_state); + dc->optimized_required = true; + } else { - if (dc->optimize_seamless_boot_streams == 0) + if (get_seamless_boot_stream_count(context) == 0) dc->hwss.prepare_bandwidth(dc, dc->current_state); core_link_enable_stream(dc->current_state, pipe_ctx); @@ -2408,7 +2510,7 @@ static void commit_planes_do_stream_update(struct dc *dc, } if (stream_update->abm_level && pipe_ctx->stream_res.abm) { - should_program_abm = true; + bool should_program_abm = true; // if otg funcs defined check if blanked before programming if (pipe_ctx->stream_res.tg->funcs->is_blanked) @@ -2439,7 +2541,7 @@ static void commit_planes_for_stream(struct dc *dc, int i, j; struct pipe_ctx *top_pipe_to_program = NULL; - if (dc->optimize_seamless_boot_streams > 0 && surface_count > 0) { + if (get_seamless_boot_stream_count(context) > 0 && surface_count > 0) { /* Optimize seamless boot flag keeps clocks and watermarks high until * first flip. After first flip, optimization is required to lower * bandwidth. Important to note that it is expected UEFI will @@ -2448,9 +2550,8 @@ static void commit_planes_for_stream(struct dc *dc, */ if (stream->apply_seamless_boot_optimization) { stream->apply_seamless_boot_optimization = false; - dc->optimize_seamless_boot_streams--; - if (dc->optimize_seamless_boot_streams == 0) + if (get_seamless_boot_stream_count(context) == 0) dc->optimized_required = true; } } @@ -2460,7 +2561,7 @@ static void commit_planes_for_stream(struct dc *dc, dc_allow_idle_optimizations(dc, false); #endif - if (dc->optimize_seamless_boot_streams == 0) + if (get_seamless_boot_stream_count(context) == 0) dc->hwss.prepare_bandwidth(dc, context); context_clock_trace(dc, context); @@ -2868,6 +2969,9 @@ void dc_set_power_state( struct kref refcount; struct display_mode_lib *dml; + if (!dc->current_state) + return; + switch (power_state) { case DC_ACPI_CM_POWER_STATE_D0: dc_resource_state_construct(dc, dc->current_state); @@ -3176,3 +3280,113 @@ void dc_hardware_release(struct dc *dc) dc->hwss.hardware_release(dc); } #endif + +/** + ***************************************************************************** + * Function: dc_enable_dmub_notifications + * + * @brief + * Returns whether dmub notification can be enabled + * + * @param + * [in] dc: dc structure + * + * @return + * True to enable dmub notifications, False otherwise + ***************************************************************************** + */ +bool dc_enable_dmub_notifications(struct dc *dc) +{ + /* dmub aux needs dmub notifications to be enabled */ + return dc->debug.enable_dmub_aux_for_legacy_ddc; +} + +/** + ***************************************************************************** + * Function: dc_process_dmub_aux_transfer_async + * + * @brief + * Submits aux command to dmub via inbox message + * Sets port index appropriately for legacy DDC + * + * @param + * [in] dc: dc structure + * [in] link_index: link index + * [in] payload: aux payload + * + * @return + * True if successful, False if failure + ***************************************************************************** + */ +bool dc_process_dmub_aux_transfer_async(struct dc *dc, + uint32_t link_index, + struct aux_payload *payload) +{ + uint8_t action; + union dmub_rb_cmd cmd = {0}; + struct dc_dmub_srv *dmub_srv = dc->ctx->dmub_srv; + + ASSERT(payload->length <= 16); + + cmd.dp_aux_access.header.type = DMUB_CMD__DP_AUX_ACCESS; + cmd.dp_aux_access.header.payload_bytes = 0; + cmd.dp_aux_access.aux_control.type = AUX_CHANNEL_LEGACY_DDC; + cmd.dp_aux_access.aux_control.instance = dc->links[link_index]->ddc_hw_inst; + cmd.dp_aux_access.aux_control.sw_crc_enabled = 0; + cmd.dp_aux_access.aux_control.timeout = 0; + cmd.dp_aux_access.aux_control.dpaux.address = payload->address; + cmd.dp_aux_access.aux_control.dpaux.is_i2c_over_aux = payload->i2c_over_aux; + cmd.dp_aux_access.aux_control.dpaux.length = payload->length; + + /* set aux action */ + if (payload->i2c_over_aux) { + if (payload->write) { + if (payload->mot) + action = DP_AUX_REQ_ACTION_I2C_WRITE_MOT; + else + action = DP_AUX_REQ_ACTION_I2C_WRITE; + } else { + if (payload->mot) + action = DP_AUX_REQ_ACTION_I2C_READ_MOT; + else + action = DP_AUX_REQ_ACTION_I2C_READ; + } + } else { + if (payload->write) + action = DP_AUX_REQ_ACTION_DPCD_WRITE; + else + action = DP_AUX_REQ_ACTION_DPCD_READ; + } + + cmd.dp_aux_access.aux_control.dpaux.action = action; + + if (payload->length && payload->write) { + memcpy(cmd.dp_aux_access.aux_control.dpaux.data, + payload->data, + payload->length + ); + } + + dc_dmub_srv_cmd_queue(dmub_srv, &cmd); + dc_dmub_srv_cmd_execute(dmub_srv); + dc_dmub_srv_wait_idle(dmub_srv); + + return true; +} + +/** + ***************************************************************************** + * Function: dc_disable_accelerated_mode + * + * @brief + * disable accelerated mode + * + * @param + * [in] dc: dc structure + * + ***************************************************************************** + */ +void dc_disable_accelerated_mode(struct dc *dc) +{ + bios_set_scratch_acc_mode_change(dc->ctx->dc_bios, 0); +} diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index bd0101013ec8..f9a33dc52c45 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c @@ -91,8 +91,14 @@ static void dc_link_destruct(struct dc_link *link) if (link->panel_cntl) link->panel_cntl->funcs->destroy(&link->panel_cntl); - if (link->link_enc) + if (link->link_enc) { + /* Update link encoder tracking variables. These are used for the dynamic + * assignment of link encoders to streams. + */ + link->dc->res_pool->link_encoders[link->link_enc->preferred_engine] = NULL; + link->dc->res_pool->dig_link_enc_count--; link->link_enc->funcs->destroy(&link->link_enc); + } if (link->local_sink) dc_sink_release(link->local_sink); @@ -1500,7 +1506,7 @@ static bool dc_link_construct(struct dc_link *link, (link->link_id.id == CONNECTOR_ID_EDP || link->link_id.id == CONNECTOR_ID_LVDS)) { panel_cntl_init_data.ctx = dc_ctx; - panel_cntl_init_data.inst = 0; + panel_cntl_init_data.inst = link->link_index; link->panel_cntl = link->dc->res_pool->funcs->panel_cntl_create( &panel_cntl_init_data); @@ -1532,6 +1538,12 @@ static bool dc_link_construct(struct dc_link *link, DC_LOG_DC("BIOS object table - DP_IS_USB_C: %d", link->link_enc->features.flags.bits.DP_IS_USB_C); + /* Update link encoder tracking variables. These are used for the dynamic + * assignment of link encoders to streams. + */ + link->dc->res_pool->link_encoders[link->link_enc->preferred_engine] = link->link_enc; + link->dc->res_pool->dig_link_enc_count++; + link->link_enc_hw_inst = link->link_enc->transmitter; for (i = 0; i < 4; i++) { @@ -1603,6 +1615,7 @@ static bool dc_link_construct(struct dc_link *link, link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED; DC_LOG_DC("BIOS object table - %s finished successfully.\n", __func__); + kfree(info); return true; device_tag_fail: link->link_enc->funcs->destroy(&link->link_enc); @@ -3257,6 +3270,16 @@ void core_link_enable_stream( /* Do not touch link on seamless boot optimization. */ if (pipe_ctx->stream->apply_seamless_boot_optimization) { pipe_ctx->stream->dpms_off = false; + + /* Still enable stream features & audio on seamless boot for DP external displays */ + if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT) { + enable_stream_features(pipe_ctx); + if (pipe_ctx->stream_res.audio != NULL) { + pipe_ctx->stream_res.stream_enc->funcs->dp_audio_enable(pipe_ctx->stream_res.stream_enc); + dc->hwss.enable_audio_stream(pipe_ctx); + } + } + #if defined(CONFIG_DRM_AMD_DC_HDCP) update_psp_stream_config(pipe_ctx, false); #endif @@ -3480,15 +3503,12 @@ uint32_t dc_bandwidth_in_kbps_from_timing( { uint32_t bits_per_channel = 0; uint32_t kbps; - struct fixed31_32 link_bw_kbps; +#if defined(CONFIG_DRM_AMD_DC_DCN) if (timing->flags.DSC) { - link_bw_kbps = dc_fixpt_from_int(timing->pix_clk_100hz); - link_bw_kbps = dc_fixpt_div_int(link_bw_kbps, 160); - link_bw_kbps = dc_fixpt_mul_int(link_bw_kbps, timing->dsc_cfg.bits_per_pixel); - kbps = dc_fixpt_ceil(link_bw_kbps); - return kbps; + return dc_dsc_stream_bandwidth_in_kbps(timing->pix_clk_100hz, timing->dsc_cfg.bits_per_pixel); } +#endif switch (timing->display_color_depth) { case COLOR_DEPTH_666: diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c index ae6484ab567b..64414c51312d 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c @@ -36,6 +36,7 @@ #include "core_types.h" #include "dc_link_ddc.h" #include "dce/dce_aux.h" +#include "dmub/inc/dmub_cmd.h" #define DC_LOGGER_INIT(logger) @@ -558,7 +559,7 @@ bool dal_ddc_service_query_ddc_data( /* should not set mot (middle of transaction) to 0 * if there are pending read payloads */ - payload.mot = read_size == 0 ? false : true; + payload.mot = !(read_size == 0); payload.length = write_size; payload.data = write_buf; @@ -655,7 +656,7 @@ bool dal_ddc_submit_aux_command(struct ddc_service *ddc, */ int dc_link_aux_transfer_raw(struct ddc_service *ddc, struct aux_payload *payload, - enum aux_channel_operation_result *operation_result) + enum aux_return_code_type *operation_result) { return dce_aux_transfer_raw(ddc, payload, operation_result); } diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c index c1391bfb7a9b..47e6c33f73cb 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c @@ -1669,6 +1669,22 @@ bool perform_link_training_with_retries( msleep(delay_dp_power_up_in_ms); } +#ifdef CONFIG_DRM_AMD_DC_HDCP + if (panel_mode == DP_PANEL_MODE_EDP) { + struct cp_psp *cp_psp = &stream->ctx->cp_psp; + + if (cp_psp && cp_psp->funcs.enable_assr) { + if (!cp_psp->funcs.enable_assr(cp_psp->handle, link)) { + /* since eDP implies ASSR on, change panel + * mode to disable ASSR + */ + panel_mode = DP_PANEL_MODE_DEFAULT; + } + } else + panel_mode = DP_PANEL_MODE_DEFAULT; + } +#endif + dp_set_panel_mode(link, panel_mode); if (link->aux_access_disabled) { @@ -4265,7 +4281,7 @@ void dp_set_panel_mode(struct dc_link *link, enum dp_panel_mode panel_mode) if (edp_config_set.bits.PANEL_MODE_EDP != panel_mode_edp) { - enum dc_status result = DC_ERROR_UNEXPECTED; + enum dc_status result; edp_config_set.bits.PANEL_MODE_EDP = panel_mode_edp; diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index 0c26c2ade782..3c91d16c2710 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -417,6 +417,49 @@ int resource_get_clock_source_reference( return -1; } +bool resource_are_vblanks_synchronizable( + struct dc_stream_state *stream1, + struct dc_stream_state *stream2) +{ + uint32_t base60_refresh_rates[] = {10, 20, 5}; + uint8_t i; + uint8_t rr_count = sizeof(base60_refresh_rates)/sizeof(base60_refresh_rates[0]); + uint64_t frame_time_diff; + + if (stream1->ctx->dc->config.vblank_alignment_dto_params && + stream1->ctx->dc->config.vblank_alignment_max_frame_time_diff > 0 && + dc_is_dp_signal(stream1->signal) && + dc_is_dp_signal(stream2->signal) && + false == stream1->has_non_synchronizable_pclk && + false == stream2->has_non_synchronizable_pclk && + stream1->timing.flags.VBLANK_SYNCHRONIZABLE && + stream2->timing.flags.VBLANK_SYNCHRONIZABLE) { + /* disable refresh rates higher than 60Hz for now */ + if (stream1->timing.pix_clk_100hz*100/stream1->timing.h_total/ + stream1->timing.v_total > 60) + return false; + if (stream2->timing.pix_clk_100hz*100/stream2->timing.h_total/ + stream2->timing.v_total > 60) + return false; + frame_time_diff = (uint64_t)10000 * + stream1->timing.h_total * + stream1->timing.v_total * + stream2->timing.pix_clk_100hz; + frame_time_diff = div_u64(frame_time_diff, stream1->timing.pix_clk_100hz); + frame_time_diff = div_u64(frame_time_diff, stream2->timing.h_total); + frame_time_diff = div_u64(frame_time_diff, stream2->timing.v_total); + for (i = 0; i < rr_count; i++) { + int64_t diff = (int64_t)div_u64(frame_time_diff * base60_refresh_rates[i], 10) - 10000; + + if (diff < 0) + diff = -diff; + if (diff < stream1->ctx->dc->config.vblank_alignment_max_frame_time_diff) + return true; + } + } + return false; +} + bool resource_are_streams_timing_synchronizable( struct dc_stream_state *stream1, struct dc_stream_state *stream2) diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stat.c b/drivers/gpu/drm/amd/display/dc/core/dc_stat.c new file mode 100644 index 000000000000..31761f3595a6 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/core/dc_stat.c @@ -0,0 +1,64 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + */ + +#include "dc/dc_stat.h" +#include "dmub/dmub_srv_stat.h" +#include "dc_dmub_srv.h" + +/** + * DOC: DC STAT Interface + * + * These interfaces are called without acquiring DAL and DC locks. + * Hence, there is limitations on whese interfaces can access. Only + * variables exclusively defined for these interfaces can be modified. + */ + +/** + ***************************************************************************** + * Function: dc_stat_get_dmub_notification + * + * @brief + * Calls dmub layer to retrieve dmub notification + * + * @param + * [in] dc: dc structure + * [in] notify: dmub notification structure + * + * @return + * None + ***************************************************************************** + */ +void dc_stat_get_dmub_notification(const struct dc *dc, struct dmub_notification *notify) +{ + /** + * This function is called without dal and dc locks, so + * we shall not modify any dc, dc_dmub_srv or dmub variables + * except variables exclusively accessed by this function + */ + struct dmub_srv *dmub = dc->ctx->dmub_srv->dmub; + enum dmub_status status; + + status = dmub_srv_stat_get_notification(dmub, notify); + ASSERT(status == DMUB_STATUS_OK); +} diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 18ed0d3f247e..d163007e057c 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -42,13 +42,17 @@ #include "inc/hw/dmcu.h" #include "dml/display_mode_lib.h" -#define DC_VER "3.2.122" +/* forward declaration */ +struct aux_payload; + +#define DC_VER "3.2.127" #define MAX_SURFACES 3 #define MAX_PLANES 6 #define MAX_STREAMS 6 #define MAX_SINKS_PER_LINK 4 #define MIN_VIEWPORT_SIZE 12 +#define MAX_NUM_EDP 2 /******************************************************************************* * Display Core Interfaces @@ -151,6 +155,8 @@ struct dc_caps { uint32_t max_links; uint32_t max_audios; uint32_t max_slave_planes; + uint32_t max_slave_yuv_planes; + uint32_t max_slave_rgb_planes; uint32_t max_planes; uint32_t max_downscale_ratio; uint32_t i2c_speed_in_khz; @@ -301,6 +307,8 @@ struct dc_config { #if defined(CONFIG_DRM_AMD_DC_DCN) bool clamp_min_dcfclk; #endif + uint64_t vblank_alignment_dto_params; + uint8_t vblank_alignment_max_frame_time_diff; }; enum visual_confirm { @@ -528,6 +536,10 @@ struct dc_debug_options { bool disable_dsc; bool enable_dram_clock_change_one_display_vactive; union mem_low_power_enable_options enable_mem_low_power; + bool force_vblank_alignment; + + /* Enable dmub aux for legacy ddc */ + bool enable_dmub_aux_for_legacy_ddc; }; struct dc_debug_data { @@ -628,7 +640,6 @@ struct dc { #endif /* Require to maintain clocks and bandwidth for UEFI enabled HW */ - int optimize_seamless_boot_streams; /* FBC compressor */ struct compressor *fbc_compressor; @@ -1292,8 +1303,20 @@ void dc_hardware_release(struct dc *dc); bool dc_set_psr_allow_active(struct dc *dc, bool enable); +bool dc_enable_dmub_notifications(struct dc *dc); + +bool dc_process_dmub_aux_transfer_async(struct dc *dc, + uint32_t link_index, + struct aux_payload *payload); + /******************************************************************************* * DSC Interfaces ******************************************************************************/ #include "dc_dsc.h" + +/******************************************************************************* + * Disable acc mode Interfaces + ******************************************************************************/ +void dc_disable_accelerated_mode(struct dc *dc); + #endif /* DC_INTERFACE_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/dc_bios_types.h b/drivers/gpu/drm/amd/display/dc/dc_bios_types.h index 509d23fdd3c9..86ab8f16f621 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_bios_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_bios_types.h @@ -139,7 +139,8 @@ struct dc_vbios_funcs { enum bp_result (*enable_lvtma_control)( struct dc_bios *bios, - uint8_t uc_pwr_on); + uint8_t uc_pwr_on, + uint8_t panel_instance); enum bp_result (*get_soc_bb_info)( struct dc_bios *dcb, diff --git a/drivers/gpu/drm/amd/display/dc/dc_ddc_types.h b/drivers/gpu/drm/amd/display/dc/dc_ddc_types.h index 4f8f576d5fcf..7769bd099a5a 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_ddc_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_ddc_types.h @@ -44,16 +44,6 @@ enum i2caux_transaction_action { I2CAUX_TRANSACTION_ACTION_DP_READ = 0x90 }; -enum aux_channel_operation_result { - AUX_CHANNEL_OPERATION_SUCCEEDED, - AUX_CHANNEL_OPERATION_FAILED_REASON_UNKNOWN, - AUX_CHANNEL_OPERATION_FAILED_INVALID_REPLY, - AUX_CHANNEL_OPERATION_FAILED_TIMEOUT, - AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON, - AUX_CHANNEL_OPERATION_FAILED_ENGINE_ACQUIRE -}; - - struct aux_request_transaction_data { enum aux_transaction_type type; enum i2caux_transaction_action action; diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c index b98754811977..6b72af2b3f4c 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c +++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c @@ -26,6 +26,10 @@ #include "dc.h" #include "dc_dmub_srv.h" #include "../dmub/dmub_srv.h" +#include "dm_helpers.h" + +#define CTX dc_dmub_srv->ctx +#define DC_LOGGER CTX->logger static void dc_dmub_srv_construct(struct dc_dmub_srv *dc_srv, struct dc *dc, struct dmub_srv *dmub) @@ -106,6 +110,25 @@ void dc_dmub_srv_wait_idle(struct dc_dmub_srv *dc_dmub_srv) DC_ERROR("Error waiting for DMUB idle: status=%d\n", status); } +bool dc_dmub_srv_cmd_with_reply_data(struct dc_dmub_srv *dc_dmub_srv, union dmub_rb_cmd *cmd) +{ + struct dmub_srv *dmub; + enum dmub_status status; + + if (!dc_dmub_srv || !dc_dmub_srv->dmub) + return false; + + dmub = dc_dmub_srv->dmub; + + status = dmub_srv_cmd_with_reply_data(dmub, cmd); + if (status != DMUB_STATUS_OK) { + DC_LOG_DEBUG("No reply for DMUB command: status=%d\n", status); + return false; + } + + return true; +} + void dc_dmub_srv_wait_phy_init(struct dc_dmub_srv *dc_dmub_srv) { struct dmub_srv *dmub = dc_dmub_srv->dmub; @@ -148,3 +171,14 @@ bool dc_dmub_srv_notify_stream_mask(struct dc_dmub_srv *dc_dmub_srv, dmub, DMUB_GPINT__IDLE_OPT_NOTIFY_STREAM_MASK, stream_mask, timeout) == DMUB_STATUS_OK; } + +bool dc_dmub_srv_get_dmub_outbox0_msg(const struct dc *dc, struct dmcub_trace_buf_entry *entry) +{ + struct dmub_srv *dmub = dc->ctx->dmub_srv->dmub; + return dmub_srv_get_outbox0_msg(dmub, entry); +} + +void dc_dmub_trace_event_control(struct dc *dc, bool enable) +{ + dm_helpers_dmub_outbox0_interrupt_control(dc->ctx, enable); +} diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h index bb4ab61887e4..338f776990db 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h +++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h @@ -30,6 +30,7 @@ #include "dmub/dmub_srv.h" struct dmub_srv; +struct dc; struct dc_reg_helper_state { bool gather_in_progress; @@ -56,6 +57,13 @@ void dc_dmub_srv_wait_idle(struct dc_dmub_srv *dc_dmub_srv); void dc_dmub_srv_wait_phy_init(struct dc_dmub_srv *dc_dmub_srv); +bool dc_dmub_srv_cmd_with_reply_data(struct dc_dmub_srv *dc_dmub_srv, union dmub_rb_cmd *cmd); + bool dc_dmub_srv_notify_stream_mask(struct dc_dmub_srv *dc_dmub_srv, unsigned int stream_mask); + +bool dc_dmub_srv_get_dmub_outbox0_msg(const struct dc *dc, struct dmcub_trace_buf_entry *entry); + +void dc_dmub_trace_event_control(struct dc *dc, bool enable); + #endif /* _DMUB_DC_SRV_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/dc_dsc.h b/drivers/gpu/drm/amd/display/dc/dc_dsc.h index ec55b77727d5..c51d2d961b7a 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dsc.h +++ b/drivers/gpu/drm/amd/display/dc/dc_dsc.h @@ -51,6 +51,7 @@ struct dc_dsc_policy { int min_slice_height; // Must not be less than 8 uint32_t max_target_bpp; uint32_t min_target_bpp; + uint32_t preferred_bpp_x16; bool enable_dsc_when_not_needed; }; @@ -62,8 +63,8 @@ bool dc_dsc_parse_dsc_dpcd(const struct dc *dc, bool dc_dsc_compute_bandwidth_range( const struct display_stream_compressor *dsc, uint32_t dsc_min_slice_height_override, - uint32_t min_bpp, - uint32_t max_bpp, + uint32_t min_bpp_x16, + uint32_t max_bpp_x16, const struct dsc_dec_dpcd_caps *dsc_sink_caps, const struct dc_crtc_timing *timing, struct dc_dsc_bw_range *range); @@ -77,12 +78,16 @@ bool dc_dsc_compute_config( const struct dc_crtc_timing *timing, struct dc_dsc_config *dsc_cfg); +uint32_t dc_dsc_stream_bandwidth_in_kbps(uint32_t pix_clk_100hz, uint32_t bpp_x16); + void dc_dsc_get_policy_for_timing(const struct dc_crtc_timing *timing, - uint32_t max_target_bpp_limit_override, + uint32_t max_target_bpp_limit_override_x16, struct dc_dsc_policy *policy); void dc_dsc_policy_set_max_target_bpp_limit(uint32_t limit); void dc_dsc_policy_set_enable_dsc_when_not_needed(bool enable); +uint32_t dc_dsc_stream_bandwidth_in_kbps(uint32_t pix_clk_100hz, uint32_t bpp_x16); + #endif diff --git a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h index b41e6367b15e..bcec019efa6f 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h @@ -705,6 +705,7 @@ struct dc_crtc_timing_flags { #ifndef TRIM_FSFT uint32_t FAST_TRANSPORT: 1; #endif + uint32_t VBLANK_SYNCHRONIZABLE: 1; }; enum dc_timing_3d_format { @@ -769,6 +770,7 @@ struct dc_crtc_timing { #endif struct dc_crtc_timing_flags flags; + uint32_t dsc_fixed_bits_per_pixel_x16; /* DSC target bitrate in 1/16 of bpp (e.g. 128 -> 8bpp) */ struct dc_dsc_config dsc_cfg; }; diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h index e189f16bc026..c50ef5a909a6 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_link.h +++ b/drivers/gpu/drm/amd/display/dc/dc_link.h @@ -103,6 +103,10 @@ struct dc_link { bool lttpr_non_transparent_mode; bool is_internal_display; + /* TODO: Rename. Flag an endpoint as having a programmable mapping to a + * DIG encoder. */ + bool is_dig_mapping_flexible; + bool edp_sink_present; /* caps is the same as reported_link_cap. link_traing use @@ -183,16 +187,21 @@ static inline struct dc_link *dc_get_link_at_index(struct dc *dc, uint32_t link_ return dc->links[link_index]; } -static inline struct dc_link *get_edp_link(const struct dc *dc) +static inline void get_edp_links(const struct dc *dc, + struct dc_link **edp_links, + int *edp_num) { int i; - // report any eDP links, even unconnected DDI's + *edp_num = 0; for (i = 0; i < dc->link_count; i++) { - if (dc->links[i]->connector_signal == SIGNAL_TYPE_EDP) - return dc->links[i]; + // report any eDP links, even unconnected DDI's + if (dc->links[i]->connector_signal == SIGNAL_TYPE_EDP) { + edp_links[*edp_num] = dc->links[i]; + if (++(*edp_num) == MAX_NUM_EDP) + return; + } } - return NULL; } /* Set backlight level of an embedded panel (eDP, LVDS). diff --git a/drivers/gpu/drm/amd/display/dc/dc_stat.h b/drivers/gpu/drm/amd/display/dc/dc_stat.h new file mode 100644 index 000000000000..2a000ba54ddb --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dc_stat.h @@ -0,0 +1,42 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef _DC_STAT_H_ +#define _DC_STAT_H_ + +/** + * DOC: DC STAT Interface + * + * These interfaces are called without acquiring DAL and DC locks. + * Hence, there is limitations on whese interfaces can access. Only + * variables exclusively defined for these interfaces can be modified. + */ + +#include "dc.h" +#include "dmub/dmub_srv.h" + +void dc_stat_get_dmub_notification(const struct dc *dc, struct dmub_notification *notify); + +#endif /* _DC_STAT_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h index 80b67b860091..e747370fc43b 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_stream.h +++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h @@ -130,14 +130,6 @@ union stream_update_flags { uint32_t raw; }; -struct test_pattern { - enum dp_test_pattern type; - enum dp_test_pattern_color_space color_space; - struct link_training_settings const *p_link_settings; - unsigned char const *p_custom_pattern; - unsigned int cust_pattern_size; -}; - struct dc_stream_state { // sink is deprecated, new code should not reference // this pointer @@ -235,9 +227,10 @@ struct dc_stream_state { uint32_t stream_id; bool is_dsc_enabled; - - struct test_pattern test_pattern; union stream_update_flags update_flags; + + bool has_non_synchronizable_pclk; + bool vblank_synchronized; }; #define ABM_LEVEL_IMMEDIATE_DISABLE 255 @@ -271,7 +264,6 @@ struct dc_stream_update { struct dc_dsc_config *dsc_config; struct dc_transfer_func *func_shaper; struct dc_3dlut *lut3d_func; - struct test_pattern *pending_test_pattern; }; bool dc_is_stream_unchanged( @@ -461,6 +453,13 @@ bool dc_stream_get_crtc_position(struct dc *dc, unsigned int *v_pos, unsigned int *nom_v_pos); +#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) +bool dc_stream_forward_dmcu_crc_window(struct dc *dc, struct dc_stream_state *stream, + struct crc_params *crc_window); +bool dc_stream_stop_dmcu_crc_win_update(struct dc *dc, + struct dc_stream_state *stream); +#endif + bool dc_stream_configure_crc(struct dc *dc, struct dc_stream_state *stream, struct crc_params *crc_window, diff --git a/drivers/gpu/drm/amd/display/dc/dce/Makefile b/drivers/gpu/drm/amd/display/dc/dce/Makefile index 973be8f9fd10..0d7db132a20f 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dce/Makefile @@ -30,7 +30,7 @@ DCE = dce_audio.o dce_stream_encoder.o dce_link_encoder.o dce_hwseq.o \ dce_mem_input.o dce_clock_source.o dce_scl_filters.o dce_transform.o \ dce_opp.o dce_dmcu.o dce_abm.o dce_ipp.o dce_aux.o \ dce_i2c.o dce_i2c_hw.o dce_i2c_sw.o dmub_psr.o dmub_abm.o dce_panel_cntl.o \ -dmub_hw_lock_mgr.o +dmub_hw_lock_mgr.o dmub_outbox.o AMD_DAL_DCE = $(addprefix $(AMDDALPATH)/dc/dce/,$(DCE)) diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c index d51b5fe91287..87d57e81de12 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c @@ -31,6 +31,8 @@ #include "dce_aux.h" #include "dce/dce_11_0_sh_mask.h" #include "dm_event_log.h" +#include "dm_helpers.h" +#include "dmub/inc/dmub_cmd.h" #define CTX \ aux110->base.ctx @@ -324,7 +326,7 @@ static int read_channel_reply(struct dce_aux *engine, uint32_t size, return 0; } -static enum aux_channel_operation_result get_channel_status( +static enum aux_return_code_type get_channel_status( struct dce_aux *engine, uint8_t *returned_bytes) { @@ -335,7 +337,7 @@ static enum aux_channel_operation_result get_channel_status( if (returned_bytes == NULL) { /*caller pass NULL pointer*/ ASSERT_CRITICAL(false); - return AUX_CHANNEL_OPERATION_FAILED_REASON_UNKNOWN; + return AUX_RET_ERROR_UNKNOWN; } *returned_bytes = 0; @@ -346,7 +348,7 @@ static enum aux_channel_operation_result get_channel_status( value = REG_READ(AUX_SW_STATUS); /* in case HPD is LOW, exit AUX transaction */ if ((value & AUX_SW_STATUS__AUX_SW_HPD_DISCON_MASK)) - return AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON; + return AUX_RET_ERROR_HPD_DISCON; /* Note that the following bits are set in 'status.bits' * during CTS 4.2.1.2 (FW 3.3.1): @@ -359,14 +361,14 @@ static enum aux_channel_operation_result get_channel_status( if (value & AUX_SW_STATUS__AUX_SW_DONE_MASK) { if ((value & AUX_SW_STATUS__AUX_SW_RX_TIMEOUT_STATE_MASK) || (value & AUX_SW_STATUS__AUX_SW_RX_TIMEOUT_MASK)) - return AUX_CHANNEL_OPERATION_FAILED_TIMEOUT; + return AUX_RET_ERROR_TIMEOUT; else if ((value & AUX_SW_STATUS__AUX_SW_RX_INVALID_STOP_MASK) || (value & AUX_SW_STATUS__AUX_SW_RX_RECV_NO_DET_MASK) || (value & AUX_SW_STATUS__AUX_SW_RX_RECV_INVALID_H_MASK) || (value & AUX_SW_STATUS__AUX_SW_RX_RECV_INVALID_L_MASK)) - return AUX_CHANNEL_OPERATION_FAILED_INVALID_REPLY; + return AUX_RET_ERROR_INVALID_REPLY; *returned_bytes = get_reg_field_value(value, AUX_SW_STATUS, @@ -374,17 +376,17 @@ static enum aux_channel_operation_result get_channel_status( if (*returned_bytes == 0) return - AUX_CHANNEL_OPERATION_FAILED_INVALID_REPLY; + AUX_RET_ERROR_INVALID_REPLY; else { *returned_bytes -= 1; - return AUX_CHANNEL_OPERATION_SUCCEEDED; + return AUX_RET_SUCCESS; } } else { /*time_elapsed >= aux_engine->timeout_period * AUX_SW_STATUS__AUX_SW_HPD_DISCON = at this point */ ASSERT_CRITICAL(false); - return AUX_CHANNEL_OPERATION_FAILED_TIMEOUT; + return AUX_RET_ERROR_TIMEOUT; } } @@ -541,7 +543,7 @@ static enum i2caux_transaction_action i2caux_action_from_payload(struct aux_payl int dce_aux_transfer_raw(struct ddc_service *ddc, struct aux_payload *payload, - enum aux_channel_operation_result *operation_result) + enum aux_return_code_type *operation_result) { struct ddc *ddc_pin = ddc->ddc_pin; struct dce_aux *aux_engine; @@ -556,7 +558,7 @@ int dce_aux_transfer_raw(struct ddc_service *ddc, aux_engine = ddc->ctx->dc->res_pool->engines[ddc_pin->pin_data->en]; if (!acquire(aux_engine, ddc_pin)) { - *operation_result = AUX_CHANNEL_OPERATION_FAILED_ENGINE_ACQUIRE; + *operation_result = AUX_RET_ERROR_ENGINE_ACQUIRE; return -1; } @@ -575,8 +577,9 @@ int dce_aux_transfer_raw(struct ddc_service *ddc, submit_channel_request(aux_engine, &aux_req); *operation_result = get_channel_status(aux_engine, &returned_bytes); - if (*operation_result == AUX_CHANNEL_OPERATION_SUCCEEDED) { + if (*operation_result == AUX_RET_SUCCESS) { int __maybe_unused bytes_replied = 0; + bytes_replied = read_channel_reply(aux_engine, payload->length, payload->data, payload->reply, &status); @@ -604,7 +607,7 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc, int i, ret = 0; uint8_t reply; bool payload_reply = true; - enum aux_channel_operation_result operation_result; + enum aux_return_code_type operation_result; bool retry_on_defer = false; int aux_ack_retries = 0, @@ -620,8 +623,9 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc, for (i = 0; i < AUX_MAX_RETRIES; i++) { ret = dce_aux_transfer_raw(ddc, payload, &operation_result); + switch (operation_result) { - case AUX_CHANNEL_OPERATION_SUCCEEDED: + case AUX_RET_SUCCESS: aux_timeout_retries = 0; aux_invalid_reply_retries = 0; @@ -667,14 +671,14 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc, } break; - case AUX_CHANNEL_OPERATION_FAILED_INVALID_REPLY: + case AUX_RET_ERROR_INVALID_REPLY: if (++aux_invalid_reply_retries >= AUX_MAX_INVALID_REPLY_RETRIES) goto fail; else udelay(400); break; - case AUX_CHANNEL_OPERATION_FAILED_TIMEOUT: + case AUX_RET_ERROR_TIMEOUT: // Check whether a DEFER had occurred before the timeout. // If so, treat timeout as a DEFER. if (retry_on_defer) { @@ -696,9 +700,9 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc, } break; - case AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON: - case AUX_CHANNEL_OPERATION_FAILED_ENGINE_ACQUIRE: - case AUX_CHANNEL_OPERATION_FAILED_REASON_UNKNOWN: + case AUX_RET_ERROR_HPD_DISCON: + case AUX_RET_ERROR_ENGINE_ACQUIRE: + case AUX_RET_ERROR_UNKNOWN: default: goto fail; } diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h index 277484cf853e..566b1bddd8cc 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h @@ -29,6 +29,7 @@ #include "i2caux_interface.h" #include "inc/hw/aux_engine.h" +enum aux_return_code_type; #define AUX_COMMON_REG_LIST0(id)\ SRI(AUX_CONTROL, DP_AUX, id), \ @@ -99,7 +100,6 @@ struct dce110_aux_registers { AUX_SF(AUX_SW_CONTROL, AUX_SW_GO, mask_sh),\ AUX_SF(AUX_SW_DATA, AUX_SW_AUTOINCREMENT_DISABLE, mask_sh),\ AUX_SF(AUX_SW_DATA, AUX_SW_DATA_RW, mask_sh),\ - AUX_SF(AUX_SW_DATA, AUX_SW_AUTOINCREMENT_DISABLE, mask_sh),\ AUX_SF(AUX_SW_DATA, AUX_SW_INDEX, mask_sh),\ AUX_SF(AUX_SW_DATA, AUX_SW_DATA, mask_sh),\ AUX_SF(AUX_SW_STATUS, AUX_SW_REPLY_BYTE_COUNT, mask_sh),\ @@ -302,7 +302,7 @@ bool dce110_aux_engine_acquire( int dce_aux_transfer_raw(struct ddc_service *ddc, struct aux_payload *cmd, - enum aux_channel_operation_result *operation_result); + enum aux_return_code_type *operation_result); bool dce_aux_transfer_with_retries(struct ddc_service *ddc, struct aux_payload *cmd); diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c index dec58b3c42e4..2c7eb982eabc 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c @@ -1002,15 +1002,27 @@ static bool get_pixel_clk_frequency_100hz( { struct dce110_clk_src *clk_src = TO_DCE110_CLK_SRC(clock_source); unsigned int clock_hz = 0; + unsigned int modulo_hz = 0; if (clock_source->id == CLOCK_SOURCE_ID_DP_DTO) { clock_hz = REG_READ(PHASE[inst]); - /* NOTE: There is agreement with VBIOS here that MODULO is - * programmed equal to DPREFCLK, in which case PHASE will be - * equivalent to pixel clock. - */ - *pixel_clk_khz = clock_hz / 100; + if (clock_source->ctx->dc->hwss.enable_vblanks_synchronization && + clock_source->ctx->dc->config.vblank_alignment_max_frame_time_diff > 0) { + /* NOTE: In case VBLANK syncronization is enabled, MODULO may + * not be programmed equal to DPREFCLK + */ + modulo_hz = REG_READ(MODULO[inst]); + *pixel_clk_khz = div_u64((uint64_t)clock_hz* + clock_source->ctx->dc->clk_mgr->dprefclk_khz*10, + modulo_hz); + } else { + /* NOTE: There is agreement with VBIOS here that MODULO is + * programmed equal to DPREFCLK, in which case PHASE will be + * equivalent to pixel clock. + */ + *pixel_clk_khz = clock_hz / 100; + } return true; } @@ -1074,8 +1086,35 @@ static bool dcn20_program_pix_clk( struct pixel_clk_params *pix_clk_params, struct pll_settings *pll_settings) { + struct dce110_clk_src *clk_src = TO_DCE110_CLK_SRC(clock_source); + unsigned int inst = pix_clk_params->controller_id - CONTROLLER_ID_D0; + dce112_program_pix_clk(clock_source, pix_clk_params, pll_settings); + if (clock_source->ctx->dc->hwss.enable_vblanks_synchronization && + clock_source->ctx->dc->config.vblank_alignment_max_frame_time_diff > 0) { + /* NOTE: In case VBLANK syncronization is enabled, + * we need to set modulo to default DPREFCLK first + * dce112_program_pix_clk does not set default DPREFCLK + */ + REG_WRITE(MODULO[inst], + clock_source->ctx->dc->clk_mgr->dprefclk_khz*1000); + } + return true; +} + +static bool dcn20_override_dp_pix_clk( + struct clock_source *clock_source, + unsigned int inst, + unsigned int pixel_clk, + unsigned int ref_clk) +{ + struct dce110_clk_src *clk_src = TO_DCE110_CLK_SRC(clock_source); + + REG_UPDATE(PIXEL_RATE_CNTL[inst], DP_DTO0_ENABLE, 0); + REG_WRITE(PHASE[inst], pixel_clk); + REG_WRITE(MODULO[inst], ref_clk); + REG_UPDATE(PIXEL_RATE_CNTL[inst], DP_DTO0_ENABLE, 1); return true; } @@ -1083,7 +1122,8 @@ static const struct clock_source_funcs dcn20_clk_src_funcs = { .cs_power_down = dce110_clock_source_power_down, .program_pix_clk = dcn20_program_pix_clk, .get_pix_clk_dividers = dce112_get_pix_clk_dividers, - .get_pixel_clk_frequency_100hz = get_pixel_clk_frequency_100hz + .get_pixel_clk_frequency_100hz = get_pixel_clk_frequency_100hz, + .override_dp_pix_clk = dcn20_override_dp_pix_clk }; #if defined(CONFIG_DRM_AMD_DC_DCN) diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c index ddc789daf3b1..4f864501e046 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c @@ -57,6 +57,8 @@ #define MCP_SYNC_PHY_LOCK 0x90 #define MCP_SYNC_PHY_UNLOCK 0x91 #define MCP_BL_SET_PWM_FRAC 0x6A /* Enable or disable Fractional PWM */ +#define CRC_WIN_NOTIFY 0x92 +#define CRC_STOP_UPDATE 0x93 #define MCP_SEND_EDID_CEA 0xA0 #define EDID_CEA_CMD_ACK 1 #define EDID_CEA_CMD_NACK 2 @@ -930,6 +932,84 @@ static bool dcn10_recv_edid_cea_ack(struct dmcu *dmcu, int *offset) #endif //(CONFIG_DRM_AMD_DC_DCN) +#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) +static void dcn10_forward_crc_window(struct dmcu *dmcu, + struct crc_region *crc_win, + struct otg_phy_mux *mux_mapping) +{ + struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(dmcu); + unsigned int dmcu_max_retry_on_wait_reg_ready = 801; + unsigned int dmcu_wait_reg_ready_interval = 100; + unsigned int crc_start = 0, crc_end = 0, otg_phy_mux = 0; + + /* If microcontroller is not running, do nothing */ + if (dmcu->dmcu_state != DMCU_RUNNING) + return; + + if (!crc_win) + return; + + /* waitDMCUReadyForCmd */ + REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0, + dmcu_wait_reg_ready_interval, + dmcu_max_retry_on_wait_reg_ready); + + /* build up nitification data */ + crc_start = (((unsigned int) crc_win->x_start) << 16) | crc_win->y_start; + crc_end = (((unsigned int) crc_win->x_end) << 16) | crc_win->y_end; + otg_phy_mux = + (((unsigned int) mux_mapping->otg_output_num) << 16) | mux_mapping->phy_output_num; + + dm_write_reg(dmcu->ctx, REG(MASTER_COMM_DATA_REG1), + crc_start); + + dm_write_reg(dmcu->ctx, REG(MASTER_COMM_DATA_REG2), + crc_end); + + dm_write_reg(dmcu->ctx, REG(MASTER_COMM_DATA_REG3), + otg_phy_mux); + + /* setDMCUParam_Cmd */ + REG_UPDATE(MASTER_COMM_CMD_REG, MASTER_COMM_CMD_REG_BYTE0, + CRC_WIN_NOTIFY); + + /* notifyDMCUMsg */ + REG_UPDATE(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 1); +} + +static void dcn10_stop_crc_win_update(struct dmcu *dmcu, + struct otg_phy_mux *mux_mapping) +{ + struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(dmcu); + unsigned int dmcu_max_retry_on_wait_reg_ready = 801; + unsigned int dmcu_wait_reg_ready_interval = 100; + unsigned int otg_phy_mux = 0; + + /* If microcontroller is not running, do nothing */ + if (dmcu->dmcu_state != DMCU_RUNNING) + return; + + /* waitDMCUReadyForCmd */ + REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0, + dmcu_wait_reg_ready_interval, + dmcu_max_retry_on_wait_reg_ready); + + /* build up nitification data */ + otg_phy_mux = + (((unsigned int) mux_mapping->otg_output_num) << 16) | mux_mapping->phy_output_num; + + dm_write_reg(dmcu->ctx, REG(MASTER_COMM_DATA_REG1), + otg_phy_mux); + + /* setDMCUParam_Cmd */ + REG_UPDATE(MASTER_COMM_CMD_REG, MASTER_COMM_CMD_REG_BYTE0, + CRC_STOP_UPDATE); + + /* notifyDMCUMsg */ + REG_UPDATE(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 1); +} +#endif + static const struct dmcu_funcs dce_funcs = { .dmcu_init = dce_dmcu_init, .load_iram = dce_dmcu_load_iram, @@ -953,6 +1033,10 @@ static const struct dmcu_funcs dcn10_funcs = { .send_edid_cea = dcn10_send_edid_cea, .recv_amd_vsdb = dcn10_recv_amd_vsdb, .recv_edid_cea_ack = dcn10_recv_edid_cea_ack, +#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) + .forward_crc_window = dcn10_forward_crc_window, + .stop_crc_win_update = dcn10_stop_crc_win_update, +#endif .is_dmcu_initialized = dcn10_is_dmcu_initialized }; diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_opp.c b/drivers/gpu/drm/amd/display/dc/dce/dce_opp.c index 4600231da6cb..895b015b02e8 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_opp.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_opp.c @@ -216,9 +216,7 @@ static void set_spatial_dither( REG_UPDATE(FMT_BIT_DEPTH_CONTROL, FMT_TEMPORAL_DITHER_EN, 0); - /* no 10bpc on DCE11*/ - if (params->flags.SPATIAL_DITHER_ENABLED == 0 || - params->flags.SPATIAL_DITHER_DEPTH == 2) + if (params->flags.SPATIAL_DITHER_ENABLED == 0) return; /* only use FRAME_COUNTER_MAX if frameRandom == 1*/ diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c index 453aaa5757bd..eb1698d54a48 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c @@ -72,11 +72,11 @@ static void dmub_abm_init(struct abm *abm, uint32_t backlight) { struct dce_abm *dce_abm = TO_DMUB_ABM(abm); - REG_WRITE(DC_ABM1_HG_SAMPLE_RATE, 0x103); - REG_WRITE(DC_ABM1_HG_SAMPLE_RATE, 0x101); - REG_WRITE(DC_ABM1_LS_SAMPLE_RATE, 0x103); - REG_WRITE(DC_ABM1_LS_SAMPLE_RATE, 0x101); - REG_WRITE(BL1_PWM_BL_UPDATE_SAMPLE_RATE, 0x101); + REG_WRITE(DC_ABM1_HG_SAMPLE_RATE, 0x3); + REG_WRITE(DC_ABM1_HG_SAMPLE_RATE, 0x1); + REG_WRITE(DC_ABM1_LS_SAMPLE_RATE, 0x3); + REG_WRITE(DC_ABM1_LS_SAMPLE_RATE, 0x1); + REG_WRITE(BL1_PWM_BL_UPDATE_SAMPLE_RATE, 0x1); REG_SET_3(DC_ABM1_HG_MISC_CTRL, 0, ABM1_HG_NUM_OF_BINS_SEL, 0, diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_outbox.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_outbox.c new file mode 100644 index 000000000000..295596d1f47f --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_outbox.c @@ -0,0 +1,60 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + */ + +#include "dmub_outbox.h" +#include "dc_dmub_srv.h" +#include "dmub/inc/dmub_cmd.h" + +/** + ***************************************************************************** + * Function: dmub_enable_outbox_notification + * + * @brief + * Sends inbox cmd to dmub to enable outbox1 messages with interrupt. + * Dmub sends outbox1 message and triggers outbox1 interrupt. + * + * @param + * [in] dc: dc structure + * + * @return + * None + ***************************************************************************** + */ +void dmub_enable_outbox_notification(struct dc *dc) +{ + union dmub_rb_cmd cmd; + struct dc_context *dc_ctx = dc->ctx; + + memset(&cmd, 0x0, sizeof(cmd)); + cmd.outbox1_enable.header.type = DMUB_CMD__OUTBOX1_ENABLE; + cmd.outbox1_enable.header.sub_type = 0; + cmd.outbox1_enable.header.payload_bytes = + sizeof(cmd.outbox1_enable) - + sizeof(cmd.outbox1_enable.header); + cmd.outbox1_enable.enable = true; + + dc_dmub_srv_cmd_queue(dc_ctx->dmub_srv, &cmd); + dc_dmub_srv_cmd_execute(dc_ctx->dmub_srv); + dc_dmub_srv_wait_idle(dc_ctx->dmub_srv); +} diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_outbox.h b/drivers/gpu/drm/amd/display/dc/dce/dmub_outbox.h new file mode 100644 index 000000000000..4e0aa0d1a2d5 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_outbox.h @@ -0,0 +1,33 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef _DMUB_OUTBOX_H_ +#define _DMUB_OUTBOX_H_ + +#include "dc.h" + +void dmub_enable_outbox_notification(struct dc *dc); + +#endif /* _DMUB_OUTBOX_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c index 69e34bef274c..15ed09b7a452 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c @@ -81,13 +81,18 @@ static void dmub_psr_get_state(struct dmub_psr *dmub, enum dc_psr_state *state) { struct dmub_srv *srv = dmub->ctx->dmub_srv->dmub; uint32_t raw_state; + enum dmub_status status; // Send gpint command and wait for ack - dmub_srv_send_gpint_command(srv, DMUB_GPINT__GET_PSR_STATE, 0, 30); - - dmub_srv_get_gpint_response(srv, &raw_state); - - *state = convert_psr_state(raw_state); + status = dmub_srv_send_gpint_command(srv, DMUB_GPINT__GET_PSR_STATE, 0, 30); + + if (status == DMUB_STATUS_OK) { + // GPINT was executed, get response + dmub_srv_get_gpint_response(srv, &raw_state); + *state = convert_psr_state(raw_state); + } else + // Return invalid state when GPINT times out + *state = 0xFF; } /* @@ -216,6 +221,7 @@ static bool dmub_psr_copy_settings(struct dmub_psr *dmub, res_ctx->pipe_ctx[i].stream->link == link && res_ctx->pipe_ctx[i].stream->link->connector_signal == SIGNAL_TYPE_EDP) { pipe_ctx = &res_ctx->pipe_ctx[i]; + //TODO: refactor for multi edp support break; } } @@ -269,8 +275,7 @@ static bool dmub_psr_copy_settings(struct dmub_psr *dmub, copy_settings_data->frame_cap_ind = psr_context->psrFrameCaptureIndicationReq; copy_settings_data->init_sdp_deadline = psr_context->sdpTransmitLineNumDeadline; copy_settings_data->debug.u32All = 0; - copy_settings_data->debug.bitfields.visual_confirm = dc->dc->debug.visual_confirm == VISUAL_CONFIRM_PSR ? - true : false; + copy_settings_data->debug.bitfields.visual_confirm = dc->dc->debug.visual_confirm == VISUAL_CONFIRM_PSR; copy_settings_data->debug.bitfields.use_hw_lock_mgr = 1; dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd); diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c index caee1c9f54bd..804092f81f85 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c @@ -797,6 +797,7 @@ void dce110_edp_power_control( struct dc_context *ctx = link->ctx; struct bp_transmitter_control cntl = { 0 }; enum bp_result bp_result; + uint8_t panel_instance; if (dal_graphics_object_id_get_connector_id(link->link_enc->connector) @@ -807,7 +808,6 @@ void dce110_edp_power_control( if (!link->panel_cntl) return; - if (power_up != link->panel_cntl->funcs->is_panel_powered_on(link->panel_cntl)) { @@ -880,15 +880,18 @@ void dce110_edp_power_control( cntl.coherent = false; cntl.lanes_number = LANE_COUNT_FOUR; cntl.hpd_sel = link->link_enc->hpd_source; + panel_instance = link->panel_cntl->inst; if (ctx->dc->ctx->dmub_srv && ctx->dc->debug.dmub_command_table) { if (cntl.action == TRANSMITTER_CONTROL_POWER_ON) bp_result = ctx->dc_bios->funcs->enable_lvtma_control(ctx->dc_bios, - LVTMA_CONTROL_POWER_ON); + LVTMA_CONTROL_POWER_ON, + panel_instance); else bp_result = ctx->dc_bios->funcs->enable_lvtma_control(ctx->dc_bios, - LVTMA_CONTROL_POWER_OFF); + LVTMA_CONTROL_POWER_OFF, + panel_instance); } bp_result = link_transmitter_control(ctx->dc_bios, &cntl); @@ -963,6 +966,7 @@ void dce110_edp_backlight_control( { struct dc_context *ctx = link->ctx; struct bp_transmitter_control cntl = { 0 }; + uint8_t panel_instance; if (dal_graphics_object_id_get_connector_id(link->link_enc->connector) != CONNECTOR_ID_EDP) { @@ -1011,6 +1015,7 @@ void dce110_edp_backlight_control( */ /* dc_service_sleep_in_milliseconds(50); */ /*edp 1.2*/ + panel_instance = link->panel_cntl->inst; if (cntl.action == TRANSMITTER_CONTROL_BACKLIGHT_ON) edp_receiver_ready_T7(link); @@ -1018,10 +1023,12 @@ void dce110_edp_backlight_control( ctx->dc->debug.dmub_command_table) { if (cntl.action == TRANSMITTER_CONTROL_BACKLIGHT_ON) ctx->dc_bios->funcs->enable_lvtma_control(ctx->dc_bios, - LVTMA_CONTROL_LCD_BLON); + LVTMA_CONTROL_LCD_BLON, + panel_instance); else ctx->dc_bios->funcs->enable_lvtma_control(ctx->dc_bios, - LVTMA_CONTROL_LCD_BLOFF); + LVTMA_CONTROL_LCD_BLOFF, + panel_instance); } link_transmitter_control(ctx->dc_bios, &cntl); @@ -1629,34 +1636,39 @@ static void disable_vga_and_power_gate_all_controllers( } -static struct dc_stream_state *get_edp_stream(struct dc_state *context) +static void get_edp_streams(struct dc_state *context, + struct dc_stream_state **edp_streams, + int *edp_stream_num) { int i; + *edp_stream_num = 0; for (i = 0; i < context->stream_count; i++) { - if (context->streams[i]->signal == SIGNAL_TYPE_EDP) - return context->streams[i]; + if (context->streams[i]->signal == SIGNAL_TYPE_EDP) { + edp_streams[*edp_stream_num] = context->streams[i]; + if (++(*edp_stream_num) == MAX_NUM_EDP) + return; + } } - return NULL; } -static struct dc_link *get_edp_link_with_sink( +static void get_edp_links_with_sink( struct dc *dc, - struct dc_state *context) + struct dc_link **edp_links_with_sink, + int *edp_with_sink_num) { int i; - struct dc_link *link = NULL; /* check if there is an eDP panel not in use */ + *edp_with_sink_num = 0; for (i = 0; i < dc->link_count; i++) { if (dc->links[i]->local_sink && dc->links[i]->local_sink->sink_signal == SIGNAL_TYPE_EDP) { - link = dc->links[i]; - break; + edp_links_with_sink[*edp_with_sink_num] = dc->links[i]; + if (++(*edp_with_sink_num) == MAX_NUM_EDP) + return; } } - - return link; } /* @@ -1668,36 +1680,48 @@ static struct dc_link *get_edp_link_with_sink( */ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context) { - int i; - struct dc_link *edp_link_with_sink = get_edp_link_with_sink(dc, context); - struct dc_link *edp_link = get_edp_link(dc); + struct dc_link *edp_links_with_sink[MAX_NUM_EDP]; + struct dc_link *edp_links[MAX_NUM_EDP]; + struct dc_stream_state *edp_streams[MAX_NUM_EDP]; + struct dc_link *edp_link_with_sink = NULL; + struct dc_link *edp_link = NULL; struct dc_stream_state *edp_stream = NULL; + struct dce_hwseq *hws = dc->hwseq; + int edp_with_sink_num; + int edp_num; + int edp_stream_num; + int i; bool can_apply_edp_fast_boot = false; bool can_apply_seamless_boot = false; bool keep_edp_vdd_on = false; - struct dce_hwseq *hws = dc->hwseq; + + get_edp_links_with_sink(dc, edp_links_with_sink, &edp_with_sink_num); + get_edp_links(dc, edp_links, &edp_num); if (hws->funcs.init_pipes) hws->funcs.init_pipes(dc, context); - edp_stream = get_edp_stream(context); + get_edp_streams(context, edp_streams, &edp_stream_num); // Check fastboot support, disable on DCE8 because of blank screens - if (edp_link && dc->ctx->dce_version != DCE_VERSION_8_0 && + if (edp_num && dc->ctx->dce_version != DCE_VERSION_8_0 && dc->ctx->dce_version != DCE_VERSION_8_1 && dc->ctx->dce_version != DCE_VERSION_8_3) { - - // enable fastboot if backend is enabled on eDP - if (edp_link->link_enc->funcs->is_dig_enabled(edp_link->link_enc)) { - /* Set optimization flag on eDP stream*/ - if (edp_stream && edp_link->link_status.link_active) { - edp_stream->apply_edp_fast_boot_optimization = true; - can_apply_edp_fast_boot = true; + for (i = 0; i < edp_num; i++) { + edp_link = edp_links[i]; + // enable fastboot if backend is enabled on eDP + if (edp_link->link_enc->funcs->is_dig_enabled(edp_link->link_enc)) { + /* Set optimization flag on eDP stream*/ + if (edp_stream_num && edp_link->link_status.link_active) { + edp_stream = edp_streams[0]; + edp_stream->apply_edp_fast_boot_optimization = true; + can_apply_edp_fast_boot = true; + break; + } } } - // We are trying to enable eDP, don't power down VDD - if (edp_stream) + if (edp_stream_num) keep_edp_vdd_on = true; } @@ -1712,6 +1736,9 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context) /* eDP should not have stream in resume from S4 and so even with VBios post * it should get turned off */ + if (edp_with_sink_num) + edp_link_with_sink = edp_links_with_sink[0]; + if (!can_apply_edp_fast_boot && !can_apply_seamless_boot) { if (edp_link_with_sink && !keep_edp_vdd_on) { /*turn off backlight before DP_blank and encoder powered down*/ @@ -1723,7 +1750,7 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context) if (edp_link_with_sink && !keep_edp_vdd_on) dc->hwss.edp_power_control(edp_link_with_sink, false); } - bios_set_scratch_acc_mode_change(dc->ctx->dc_bios); + bios_set_scratch_acc_mode_change(dc->ctx->dc_bios, 1); } static uint32_t compute_pstate_blackout_duration( diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c index d7fcc5cccdce..ef56eab4e5da 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c @@ -1272,6 +1272,8 @@ static bool underlay_create(struct dc_context *ctx, struct resource_pool *pool) /* update the public caps to indicate an underlay is available */ ctx->dc->caps.max_slave_planes = 1; + ctx->dc->caps.max_slave_yuv_planes = 1; + ctx->dc->caps.max_slave_rgb_planes = 0; return true; } diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c index 9ba5c624770d..9eb33eae0e81 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c @@ -53,6 +53,7 @@ #include "dsc.h" #include "dce/dmub_hw_lock_mgr.h" #include "dc_trace.h" +#include "dce/dmub_outbox.h" #define DC_LOGGER_INIT(logger) @@ -1355,6 +1356,10 @@ void dcn10_init_hw(struct dc *dc) hws->funcs.dsc_pg_control(hws, res_pool->dscs[i]->inst, false); } + /* Enable outbox notification feature of dmub */ + if (dc->debug.enable_dmub_aux_for_legacy_ddc) + dmub_enable_outbox_notification(dc); + /* we want to turn off all dp displays before doing detection */ if (dc->config.power_down_display_on_boot) { uint8_t dpcd_power_state = '\0'; @@ -1457,19 +1462,26 @@ void dcn10_init_hw(struct dc *dc) */ void dcn10_power_down_on_boot(struct dc *dc) { - int i = 0; + struct dc_link *edp_links[MAX_NUM_EDP]; struct dc_link *edp_link; + int edp_num; + int i = 0; - edp_link = get_edp_link(dc); - if (edp_link && - edp_link->link_enc->funcs->is_dig_enabled && - edp_link->link_enc->funcs->is_dig_enabled(edp_link->link_enc) && - dc->hwseq->funcs.edp_backlight_control && - dc->hwss.power_down && - dc->hwss.edp_power_control) { - dc->hwseq->funcs.edp_backlight_control(edp_link, false); - dc->hwss.power_down(dc); - dc->hwss.edp_power_control(edp_link, false); + get_edp_links(dc, edp_links, &edp_num); + + if (edp_num) { + for (i = 0; i < edp_num; i++) { + edp_link = edp_links[i]; + if (edp_link->link_enc->funcs->is_dig_enabled && + edp_link->link_enc->funcs->is_dig_enabled(edp_link->link_enc) && + dc->hwseq->funcs.edp_backlight_control && + dc->hwss.power_down && + dc->hwss.edp_power_control) { + dc->hwseq->funcs.edp_backlight_control(edp_link, false); + dc->hwss.power_down(dc); + dc->hwss.edp_power_control(edp_link, false); + } + } } else { for (i = 0; i < dc->link_count; i++) { struct dc_link *link = dc->links[i]; @@ -1851,6 +1863,230 @@ static bool wait_for_reset_trigger_to_occur( return rc; } +uint64_t reduceSizeAndFraction( + uint64_t *numerator, + uint64_t *denominator, + bool checkUint32Bounary) +{ + int i; + bool ret = checkUint32Bounary == false; + uint64_t max_int32 = 0xffffffff; + uint64_t num, denom; + static const uint16_t prime_numbers[] = { + 2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, + 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103, + 107, 109, 113, 127, 131, 137, 139, 149, 151, 157, 163, + 167, 173, 179, 181, 191, 193, 197, 199, 211, 223, 227, + 229, 233, 239, 241, 251, 257, 263, 269, 271, 277, 281, + 283, 293, 307, 311, 313, 317, 331, 337, 347, 349, 353, + 359, 367, 373, 379, 383, 389, 397, 401, 409, 419, 421, + 431, 433, 439, 443, 449, 457, 461, 463, 467, 479, 487, + 491, 499, 503, 509, 521, 523, 541, 547, 557, 563, 569, + 571, 577, 587, 593, 599, 601, 607, 613, 617, 619, 631, + 641, 643, 647, 653, 659, 661, 673, 677, 683, 691, 701, + 709, 719, 727, 733, 739, 743, 751, 757, 761, 769, 773, + 787, 797, 809, 811, 821, 823, 827, 829, 839, 853, 857, + 859, 863, 877, 881, 883, 887, 907, 911, 919, 929, 937, + 941, 947, 953, 967, 971, 977, 983, 991, 997}; + int count = ARRAY_SIZE(prime_numbers); + + num = *numerator; + denom = *denominator; + for (i = 0; i < count; i++) { + uint32_t num_reminder, denom_reminder; + uint64_t num_result, denom_result; + if (checkUint32Bounary && + num <= max_int32 && denom <= max_int32) { + ret = true; + break; + } + do { + num_result = div_u64_rem(num, prime_numbers[i], &num_reminder); + denom_result = div_u64_rem(denom, prime_numbers[i], &denom_reminder); + if (num_reminder == 0 && denom_reminder == 0) { + num = num_result; + denom = denom_result; + } + } while (num_reminder == 0 && denom_reminder == 0); + } + *numerator = num; + *denominator = denom; + return ret; +} + +bool is_low_refresh_rate(struct pipe_ctx *pipe) +{ + uint32_t master_pipe_refresh_rate = + pipe->stream->timing.pix_clk_100hz * 100 / + pipe->stream->timing.h_total / + pipe->stream->timing.v_total; + return master_pipe_refresh_rate <= 30; +} + +uint8_t get_clock_divider(struct pipe_ctx *pipe, bool account_low_refresh_rate) +{ + uint32_t clock_divider = 1; + uint32_t numpipes = 1; + + if (account_low_refresh_rate && is_low_refresh_rate(pipe)) + clock_divider *= 2; + + if (pipe->stream_res.pix_clk_params.pixel_encoding == PIXEL_ENCODING_YCBCR420) + clock_divider *= 2; + + while (pipe->next_odm_pipe) { + pipe = pipe->next_odm_pipe; + numpipes++; + } + clock_divider *= numpipes; + + return clock_divider; +} + +int dcn10_align_pixel_clocks( + struct dc *dc, + int group_size, + struct pipe_ctx *grouped_pipes[]) +{ + struct dc_context *dc_ctx = dc->ctx; + int i, master = -1, embedded = -1; + struct dc_crtc_timing hw_crtc_timing[MAX_PIPES] = {0}; + uint64_t phase[MAX_PIPES]; + uint64_t modulo[MAX_PIPES]; + unsigned int pclk; + + uint32_t embedded_pix_clk_100hz; + uint16_t embedded_h_total; + uint16_t embedded_v_total; + bool clamshell_closed = false; + uint32_t dp_ref_clk_100hz = + dc->res_pool->dp_clock_source->ctx->dc->clk_mgr->dprefclk_khz*10; + + if (dc->config.vblank_alignment_dto_params && + dc->res_pool->dp_clock_source->funcs->override_dp_pix_clk) { + clamshell_closed = + (dc->config.vblank_alignment_dto_params >> 63); + embedded_h_total = + (dc->config.vblank_alignment_dto_params >> 32) & 0x7FFF; + embedded_v_total = + (dc->config.vblank_alignment_dto_params >> 48) & 0x7FFF; + embedded_pix_clk_100hz = + dc->config.vblank_alignment_dto_params & 0xFFFFFFFF; + + for (i = 0; i < group_size; i++) { + grouped_pipes[i]->stream_res.tg->funcs->get_hw_timing( + grouped_pipes[i]->stream_res.tg, + &hw_crtc_timing[i]); + dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz( + dc->res_pool->dp_clock_source, + grouped_pipes[i]->stream_res.tg->inst, + &pclk); + hw_crtc_timing[i].pix_clk_100hz = pclk; + if (dc_is_embedded_signal( + grouped_pipes[i]->stream->signal)) { + embedded = i; + master = i; + phase[i] = embedded_pix_clk_100hz*100; + modulo[i] = dp_ref_clk_100hz*100; + } else { + + phase[i] = (uint64_t)embedded_pix_clk_100hz* + hw_crtc_timing[i].h_total* + hw_crtc_timing[i].v_total; + phase[i] = div_u64(phase[i], get_clock_divider(grouped_pipes[i], true)); + modulo[i] = (uint64_t)dp_ref_clk_100hz* + embedded_h_total* + embedded_v_total; + + if (reduceSizeAndFraction(&phase[i], + &modulo[i], true) == false) { + /* + * this will help to stop reporting + * this timing synchronizable + */ + DC_SYNC_INFO("Failed to reduce DTO parameters\n"); + grouped_pipes[i]->stream->has_non_synchronizable_pclk = true; + } + } + } + + for (i = 0; i < group_size; i++) { + if (i != embedded && !grouped_pipes[i]->stream->has_non_synchronizable_pclk) { + dc->res_pool->dp_clock_source->funcs->override_dp_pix_clk( + dc->res_pool->dp_clock_source, + grouped_pipes[i]->stream_res.tg->inst, + phase[i], modulo[i]); + dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz( + dc->res_pool->dp_clock_source, + grouped_pipes[i]->stream_res.tg->inst, &pclk); + grouped_pipes[i]->stream->timing.pix_clk_100hz = + pclk*get_clock_divider(grouped_pipes[i], false); + if (master == -1) + master = i; + } + } + + } + return master; +} + +void dcn10_enable_vblanks_synchronization( + struct dc *dc, + int group_index, + int group_size, + struct pipe_ctx *grouped_pipes[]) +{ + struct dc_context *dc_ctx = dc->ctx; + struct output_pixel_processor *opp; + struct timing_generator *tg; + int i, width, height, master; + + for (i = 1; i < group_size; i++) { + opp = grouped_pipes[i]->stream_res.opp; + tg = grouped_pipes[i]->stream_res.tg; + tg->funcs->get_otg_active_size(tg, &width, &height); + if (opp->funcs->opp_program_dpg_dimensions) + opp->funcs->opp_program_dpg_dimensions(opp, width, 2*(height) + 1); + } + + for (i = 0; i < group_size; i++) { + if (grouped_pipes[i]->stream == NULL) + continue; + grouped_pipes[i]->stream->vblank_synchronized = false; + grouped_pipes[i]->stream->has_non_synchronizable_pclk = false; + } + + DC_SYNC_INFO("Aligning DP DTOs\n"); + + master = dcn10_align_pixel_clocks(dc, group_size, grouped_pipes); + + DC_SYNC_INFO("Synchronizing VBlanks\n"); + + if (master >= 0) { + for (i = 0; i < group_size; i++) { + if (i != master && !grouped_pipes[i]->stream->has_non_synchronizable_pclk) + grouped_pipes[i]->stream_res.tg->funcs->align_vblanks( + grouped_pipes[master]->stream_res.tg, + grouped_pipes[i]->stream_res.tg, + grouped_pipes[master]->stream->timing.pix_clk_100hz, + grouped_pipes[i]->stream->timing.pix_clk_100hz, + get_clock_divider(grouped_pipes[master], false), + get_clock_divider(grouped_pipes[i], false)); + grouped_pipes[i]->stream->vblank_synchronized = true; + } + grouped_pipes[master]->stream->vblank_synchronized = true; + DC_SYNC_INFO("Sync complete\n"); + } + + for (i = 1; i < group_size; i++) { + opp = grouped_pipes[i]->stream_res.opp; + tg = grouped_pipes[i]->stream_res.tg; + tg->funcs->get_otg_active_size(tg, &width, &height); + if (opp->funcs->opp_program_dpg_dimensions) + opp->funcs->opp_program_dpg_dimensions(opp, width, height); + } +} + void dcn10_enable_timing_synchronization( struct dc *dc, int group_index, @@ -1872,6 +2108,12 @@ void dcn10_enable_timing_synchronization( opp->funcs->opp_program_dpg_dimensions(opp, width, 2*(height) + 1); } + for (i = 0; i < group_size; i++) { + if (grouped_pipes[i]->stream == NULL) + continue; + grouped_pipes[i]->stream->vblank_synchronized = false; + } + for (i = 1; i < group_size; i++) grouped_pipes[i]->stream_res.tg->funcs->enable_reset_trigger( grouped_pipes[i]->stream_res.tg, @@ -2642,7 +2884,7 @@ static void dcn10_update_dchubp_dpp( hws->funcs.update_plane_addr(dc, pipe_ctx); if (is_pipe_tree_visible(pipe_ctx)) - dc->hwss.set_hubp_blank(dc, pipe_ctx, false); + hubp->funcs->set_blank(hubp, false); } void dcn10_blank_pixel_data( @@ -3153,16 +3395,13 @@ void dcn10_setup_stereo(struct pipe_ctx *pipe_ctx, struct dc *dc) return; } -static struct pipe_ctx *get_pipe_ctx_by_hubp_inst(struct dc_state *context, int mpcc_inst) +static struct hubp *get_hubp_by_inst(struct resource_pool *res_pool, int mpcc_inst) { int i; - for (i = 0; i < MAX_PIPES; i++) { - if (context->res_ctx.pipe_ctx[i].plane_res.hubp - && context->res_ctx.pipe_ctx[i].plane_res.hubp->inst == mpcc_inst) { - return &context->res_ctx.pipe_ctx[i]; - } - + for (i = 0; i < res_pool->pipe_count; i++) { + if (res_pool->hubps[i]->inst == mpcc_inst) + return res_pool->hubps[i]; } ASSERT(false); return NULL; @@ -3185,23 +3424,11 @@ void dcn10_wait_for_mpcc_disconnect( for (mpcc_inst = 0; mpcc_inst < MAX_PIPES; mpcc_inst++) { if (pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst]) { - struct pipe_ctx *restore_bottom_pipe; - struct pipe_ctx *restore_top_pipe; - struct pipe_ctx *inst_pipe_ctx = get_pipe_ctx_by_hubp_inst(dc->current_state, mpcc_inst); + struct hubp *hubp = get_hubp_by_inst(res_pool, mpcc_inst); - ASSERT(inst_pipe_ctx); res_pool->mpc->funcs->wait_for_idle(res_pool->mpc, mpcc_inst); pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst] = false; - /* - * Set top and bottom pipes NULL, as we don't want - * to blank those pipes when disconnecting from MPCC - */ - restore_bottom_pipe = inst_pipe_ctx->bottom_pipe; - restore_top_pipe = inst_pipe_ctx->top_pipe; - inst_pipe_ctx->top_pipe = inst_pipe_ctx->bottom_pipe = NULL; - dc->hwss.set_hubp_blank(dc, inst_pipe_ctx, true); - inst_pipe_ctx->top_pipe = restore_top_pipe; - inst_pipe_ctx->bottom_pipe = restore_bottom_pipe; + hubp->funcs->set_blank(hubp, true); } } @@ -3754,10 +3981,3 @@ void dcn10_get_clock(struct dc *dc, dc->clk_mgr->funcs->get_clock(dc->clk_mgr, context, clock_type, clock_cfg); } - -void dcn10_set_hubp_blank(const struct dc *dc, - struct pipe_ctx *pipe_ctx, - bool blank_enable) -{ - pipe_ctx->plane_res.hubp->funcs->set_blank(pipe_ctx->plane_res.hubp, blank_enable); -} diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h index 89e6dfb63da0..e0800cd1cc02 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h @@ -123,6 +123,11 @@ void dcn10_enable_timing_synchronization( int group_index, int group_size, struct pipe_ctx *grouped_pipes[]); +void dcn10_enable_vblanks_synchronization( + struct dc *dc, + int group_index, + int group_size, + struct pipe_ctx *grouped_pipes[]); void dcn10_enable_per_frame_crtc_position_reset( struct dc *dc, int group_size, @@ -204,8 +209,5 @@ void dcn10_wait_for_pending_cleared(struct dc *dc, struct dc_state *context); void dcn10_set_hdr_multiplier(struct pipe_ctx *pipe_ctx); void dcn10_verify_allow_pstate_change_high(struct dc *dc); -void dcn10_set_hubp_blank(const struct dc *dc, - struct pipe_ctx *pipe_ctx, - bool blank_enable); #endif /* __DC_HWSS_DCN10_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c index 2f1b802e66a1..254300b06b43 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c @@ -79,7 +79,6 @@ static const struct hw_sequencer_funcs dcn10_funcs = { .set_backlight_level = dce110_set_backlight_level, .set_abm_immediate_disable = dce110_set_abm_immediate_disable, .set_pipe = dce110_set_pipe, - .set_hubp_blank = dcn10_set_hubp_blank, }; static const struct hwseq_private_funcs dcn10_private_funcs = { diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h index b222c67973d4..2529723beeb1 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h @@ -212,6 +212,7 @@ struct dcn_optc_registers { SF(OTG0_OTG_CONTROL, OTG_START_POINT_CNTL, mask_sh),\ SF(OTG0_OTG_CONTROL, OTG_DISABLE_POINT_CNTL, mask_sh),\ SF(OTG0_OTG_CONTROL, OTG_FIELD_NUMBER_CNTL, mask_sh),\ + SF(OTG0_OTG_CONTROL, OTG_CURRENT_MASTER_EN_STATE, mask_sh),\ SF(OTG0_OTG_STEREO_CONTROL, OTG_STEREO_EN, mask_sh),\ SF(OTG0_OTG_STEREO_CONTROL, OTG_STEREO_SYNC_OUTPUT_LINE_NUM, mask_sh),\ SF(OTG0_OTG_STEREO_CONTROL, OTG_STEREO_SYNC_OUTPUT_POLARITY, mask_sh),\ @@ -352,6 +353,7 @@ struct dcn_optc_registers { type OTG_START_POINT_CNTL;\ type OTG_DISABLE_POINT_CNTL;\ type OTG_FIELD_NUMBER_CNTL;\ + type OTG_CURRENT_MASTER_EN_STATE;\ type OTG_STEREO_EN;\ type OTG_STEREO_SYNC_OUTPUT_LINE_NUM;\ type OTG_STEREO_SYNC_OUTPUT_POLARITY;\ diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c index 90e912fef2b3..d079f4e491e5 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c @@ -82,7 +82,7 @@ const struct _vcs_dpi_ip_params_st dcn1_0_ip = { .meta_chunk_size_kbytes = 2, .writeback_chunk_size_kbytes = 2, .line_buffer_size_bits = 589824, - .max_line_buffer_lines = 12, + .max_line_buffer_lines = 32, .IsLineBufferBppFixed = 0, .LineBufferFixedBpp = -1, .writeback_luma_buffer_size_kbytes = 12, @@ -619,6 +619,7 @@ static const struct dc_debug_options debug_defaults_drv = { .recovery_enabled = false, /*enable this by default after testing.*/ .max_downscale_src_width = 3840, .underflow_assert_delay_us = 0xFFFFFFFF, + .use_max_lb = true }; static const struct dc_debug_options debug_defaults_diags = { @@ -630,6 +631,7 @@ static const struct dc_debug_options debug_defaults_diags = { .disable_pplib_clock_request = true, .disable_pplib_wm_range = true, .underflow_assert_delay_us = 0xFFFFFFFF, + .use_max_lb = true }; static void dcn10_dpp_destroy(struct dpp **dpp) @@ -1420,6 +1422,8 @@ static bool dcn10_resource_construct( dc->caps.max_cursor_size = 256; dc->caps.min_horizontal_blanking_period = 80; dc->caps.max_slave_planes = 1; + dc->caps.max_slave_yuv_planes = 1; + dc->caps.max_slave_rgb_planes = 0; dc->caps.is_apu = true; dc->caps.post_blend_color_processing = false; dc->caps.extended_aux_timeout_support = false; diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c index 73ac78b16bd4..f1a08a7736ac 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c @@ -738,7 +738,6 @@ void enc1_stream_encoder_update_dp_info_packets( REG_UPDATE(DP_SEC_CNTL, DP_SEC_GSP2_ENABLE, info_frame->spd.valid); REG_UPDATE(DP_SEC_CNTL, DP_SEC_GSP3_ENABLE, info_frame->hdrsmd.valid); - /* This bit is the master enable bit. * When enabling secondary stream engine, * this master bit must also be set. diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c index 5342c309b78c..6a10daec15cc 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c @@ -1507,38 +1507,8 @@ static void dcn20_update_dchubp_dpp( if (pipe_ctx->update_flags.bits.enable || pipe_ctx->update_flags.bits.opp_changed || pipe_ctx->stream->update_flags.bits.gamut_remap || pipe_ctx->stream->update_flags.bits.out_csc) { - struct mpc *mpc = pipe_ctx->stream_res.opp->ctx->dc->res_pool->mpc; - - if (mpc->funcs->set_gamut_remap) { - int i; - int mpcc_id = hubp->inst; - struct mpc_grph_gamut_adjustment adjust; - bool enable_remap_dpp = false; - - memset(&adjust, 0, sizeof(adjust)); - adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS; - - /* save the enablement of gamut remap for dpp */ - enable_remap_dpp = pipe_ctx->stream->gamut_remap_matrix.enable_remap; - - /* force bypass gamut remap for dpp/cm */ - pipe_ctx->stream->gamut_remap_matrix.enable_remap = false; - dc->hwss.program_gamut_remap(pipe_ctx); - - /* restore gamut remap flag and use this remap into mpc */ - pipe_ctx->stream->gamut_remap_matrix.enable_remap = enable_remap_dpp; - - /* build remap matrix for top plane if enabled */ - if (enable_remap_dpp && pipe_ctx->top_pipe == NULL) { - adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW; - for (i = 0; i < CSC_TEMPERATURE_MATRIX_SIZE; i++) - adjust.temperature_matrix[i] = - pipe_ctx->stream->gamut_remap_matrix.matrix[i]; - } - mpc->funcs->set_gamut_remap(mpc, mpcc_id, &adjust); - } else - /* dpp/cm gamut remap*/ - dc->hwss.program_gamut_remap(pipe_ctx); + /* dpp/cm gamut remap*/ + dc->hwss.program_gamut_remap(pipe_ctx); /*call the dcn2 method which uses mpc csc*/ dc->hwss.program_output_csc(dc, @@ -1581,8 +1551,8 @@ static void dcn20_update_dchubp_dpp( - if (is_pipe_tree_visible(pipe_ctx)) - dc->hwss.set_hubp_blank(dc, pipe_ctx, false); + if (pipe_ctx->update_flags.bits.enable) + hubp->funcs->set_blank(hubp, false); } @@ -1778,10 +1748,19 @@ void dcn20_post_unlock_program_front_end( for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; + struct pipe_ctx *mpcc_pipe; if (pipe->vtp_locked) { - dc->hwss.set_hubp_blank(dc, pipe, true); + dc->hwseq->funcs.wait_for_blank_complete(pipe->stream_res.opp); + pipe->plane_res.hubp->funcs->set_blank(pipe->plane_res.hubp, true); pipe->vtp_locked = false; + + for (mpcc_pipe = pipe->bottom_pipe; mpcc_pipe; mpcc_pipe = mpcc_pipe->bottom_pipe) + mpcc_pipe->plane_res.hubp->funcs->set_blank(mpcc_pipe->plane_res.hubp, true); + + for (i = 0; i < dc->res_pool->pipe_count; i++) + if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable) + dc->hwss.disable_plane(dc, &dc->current_state->res_ctx.pipe_ctx[i]); } } /* WA to apply WM setting*/ diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c index 51a4166e9750..7218ed9e43dc 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c @@ -42,6 +42,7 @@ static const struct hw_sequencer_funcs dcn20_funcs = { .program_output_csc = dcn20_program_output_csc, .enable_accelerated_mode = dce110_enable_accelerated_mode, .enable_timing_synchronization = dcn10_enable_timing_synchronization, + .enable_vblanks_synchronization = dcn10_enable_vblanks_synchronization, .enable_per_frame_crtc_position_reset = dcn10_enable_per_frame_crtc_position_reset, .update_info_frame = dce110_update_info_frame, .send_immediate_sdp_message = dcn10_send_immediate_sdp_message, @@ -94,7 +95,6 @@ static const struct hw_sequencer_funcs dcn20_funcs = { .optimize_timing_for_fsft = dcn20_optimize_timing_for_fsft, #endif .set_disp_pattern_generator = dcn20_set_disp_pattern_generator, - .set_hubp_blank = dcn10_set_hubp_blank, }; static const struct hwseq_private_funcs dcn20_private_funcs = { diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c index d8b18c515d06..ea7eaf7d755f 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c @@ -309,6 +309,129 @@ void optc2_set_dwb_source(struct timing_generator *optc, OPTC_DWB1_SOURCE_SELECT, optc->inst); } +void optc2_align_vblanks( + struct timing_generator *optc_master, + struct timing_generator *optc_slave, + uint32_t master_pixel_clock_100Hz, + uint32_t slave_pixel_clock_100Hz, + uint8_t master_clock_divider, + uint8_t slave_clock_divider) +{ + /* accessing slave OTG registers */ + struct optc *optc1 = DCN10TG_FROM_TG(optc_slave); + + uint32_t master_v_active = 0; + uint32_t master_h_total = 0; + uint32_t slave_h_total = 0; + uint64_t L, XY; + uint32_t X, Y, p = 10000; + uint32_t master_update_lock; + + /* disable slave OTG */ + REG_UPDATE(OTG_CONTROL, OTG_MASTER_EN, 0); + /* wait until disabled */ + REG_WAIT(OTG_CONTROL, + OTG_CURRENT_MASTER_EN_STATE, + 0, 10, 5000); + + REG_GET(OTG_H_TOTAL, OTG_H_TOTAL, &slave_h_total); + + /* assign slave OTG to be controlled by master update lock */ + REG_SET(OTG_GLOBAL_CONTROL0, 0, + OTG_MASTER_UPDATE_LOCK_SEL, optc_master->inst); + + /* accessing master OTG registers */ + optc1 = DCN10TG_FROM_TG(optc_master); + + /* saving update lock state, not sure if it's needed */ + REG_GET(OTG_MASTER_UPDATE_LOCK, + OTG_MASTER_UPDATE_LOCK, &master_update_lock); + /* unlocking master OTG */ + REG_SET(OTG_MASTER_UPDATE_LOCK, 0, + OTG_MASTER_UPDATE_LOCK, 0); + + REG_GET(OTG_V_BLANK_START_END, + OTG_V_BLANK_START, &master_v_active); + REG_GET(OTG_H_TOTAL, OTG_H_TOTAL, &master_h_total); + + /* calculate when to enable slave OTG */ + L = (uint64_t)p * slave_h_total * master_pixel_clock_100Hz; + L = div_u64(L, master_h_total); + L = div_u64(L, slave_pixel_clock_100Hz); + XY = div_u64(L, p); + Y = master_v_active - XY - 1; + X = div_u64(((XY + 1) * p - L) * master_h_total, p * master_clock_divider); + + /* + * set master OTG to unlock when V/H + * counters reach calculated values + */ + REG_UPDATE(OTG_GLOBAL_CONTROL1, + MASTER_UPDATE_LOCK_DB_EN, 1); + REG_UPDATE_2(OTG_GLOBAL_CONTROL1, + MASTER_UPDATE_LOCK_DB_X, + X, + MASTER_UPDATE_LOCK_DB_Y, + Y); + + /* lock master OTG */ + REG_SET(OTG_MASTER_UPDATE_LOCK, 0, + OTG_MASTER_UPDATE_LOCK, 1); + REG_WAIT(OTG_MASTER_UPDATE_LOCK, + UPDATE_LOCK_STATUS, 1, 1, 10); + + /* accessing slave OTG registers */ + optc1 = DCN10TG_FROM_TG(optc_slave); + + /* + * enable slave OTG, the OTG is locked with + * master's update lock, so it will not run + */ + REG_UPDATE(OTG_CONTROL, + OTG_MASTER_EN, 1); + + /* accessing master OTG registers */ + optc1 = DCN10TG_FROM_TG(optc_master); + + /* + * unlock master OTG. When master H/V counters reach + * DB_XY point, slave OTG will start + */ + REG_SET(OTG_MASTER_UPDATE_LOCK, 0, + OTG_MASTER_UPDATE_LOCK, 0); + + /* accessing slave OTG registers */ + optc1 = DCN10TG_FROM_TG(optc_slave); + + /* wait for slave OTG to start running*/ + REG_WAIT(OTG_CONTROL, + OTG_CURRENT_MASTER_EN_STATE, + 1, 10, 5000); + + /* accessing master OTG registers */ + optc1 = DCN10TG_FROM_TG(optc_master); + + /* disable the XY point*/ + REG_UPDATE(OTG_GLOBAL_CONTROL1, + MASTER_UPDATE_LOCK_DB_EN, 0); + REG_UPDATE_2(OTG_GLOBAL_CONTROL1, + MASTER_UPDATE_LOCK_DB_X, + 0, + MASTER_UPDATE_LOCK_DB_Y, + 0); + + /*restore master update lock*/ + REG_SET(OTG_MASTER_UPDATE_LOCK, 0, + OTG_MASTER_UPDATE_LOCK, master_update_lock); + + /* accessing slave OTG registers */ + optc1 = DCN10TG_FROM_TG(optc_slave); + /* restore slave to be controlled by it's own */ + REG_SET(OTG_GLOBAL_CONTROL0, 0, + OTG_MASTER_UPDATE_LOCK_SEL, optc_slave->inst); + +} + void optc2_triplebuffer_lock(struct timing_generator *optc) { struct optc *optc1 = DCN10TG_FROM_TG(optc); @@ -468,6 +591,7 @@ static struct timing_generator_funcs dcn20_tg_funcs = { .program_manual_trigger = optc2_program_manual_trigger, .setup_manual_trigger = optc2_setup_manual_trigger, .get_hw_timing = optc1_get_hw_timing, + .align_vblanks = optc2_align_vblanks, }; void dcn20_timing_generator_init(struct optc *optc1) diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c index 2c2dbfcd8957..2307b3517821 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c @@ -112,7 +112,7 @@ struct _vcs_dpi_ip_params_st dcn2_0_ip = { .is_line_buffer_bpp_fixed = 0, .line_buffer_fixed_bpp = 0, .dcc_supported = true, - .max_line_buffer_lines = 12, + .max_line_buffer_lines = 32, .writeback_luma_buffer_size_kbytes = 12, .writeback_chroma_buffer_size_kbytes = 8, .writeback_chroma_line_buffer_width_pixels = 4, @@ -180,7 +180,7 @@ static struct _vcs_dpi_ip_params_st dcn2_0_nv14_ip = { .is_line_buffer_bpp_fixed = 0, .line_buffer_fixed_bpp = 0, .dcc_supported = true, - .max_line_buffer_lines = 12, + .max_line_buffer_lines = 32, .writeback_luma_buffer_size_kbytes = 12, .writeback_chroma_buffer_size_kbytes = 8, .writeback_chroma_line_buffer_width_pixels = 4, @@ -1075,6 +1075,7 @@ static const struct dc_debug_options debug_defaults_drv = { .scl_reset_length10 = true, .sanity_checks = false, .underflow_assert_delay_us = 0xFFFFFFFF, + .use_max_lb = true }; static const struct dc_debug_options debug_defaults_diags = { @@ -1091,6 +1092,7 @@ static const struct dc_debug_options debug_defaults_diags = { .scl_reset_length10 = true, .underflow_assert_delay_us = 0xFFFFFFFF, .enable_tri_buf = true, + .use_max_lb = true }; void dcn20_dpp_destroy(struct dpp **dpp) @@ -2033,9 +2035,13 @@ int dcn20_populate_dml_pipes_from_context( if (res_ctx->pipe_ctx[pipe_cnt].stream == res_ctx->pipe_ctx[i].stream) continue; - if (dc->debug.disable_timing_sync || !resource_are_streams_timing_synchronizable( + if (dc->debug.disable_timing_sync || + (!resource_are_streams_timing_synchronizable( res_ctx->pipe_ctx[pipe_cnt].stream, - res_ctx->pipe_ctx[i].stream)) { + res_ctx->pipe_ctx[i].stream) && + !resource_are_vblanks_synchronizable( + res_ctx->pipe_ctx[pipe_cnt].stream, + res_ctx->pipe_ctx[i].stream))) { synchronized_vblank = false; break; } @@ -3697,6 +3703,8 @@ static bool dcn20_resource_construct( dc->caps.dmdata_alloc_size = 2048; dc->caps.max_slave_planes = 1; + dc->caps.max_slave_yuv_planes = 1; + dc->caps.max_slave_rgb_planes = 1; dc->caps.post_blend_color_processing = true; dc->caps.force_dp_tps4_for_cp2520 = true; dc->caps.extended_aux_timeout_support = true; diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c index 0597391b2171..074e2713257f 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c @@ -99,7 +99,6 @@ static const struct hw_sequencer_funcs dcn21_funcs = { #endif .is_abm_supported = dcn21_is_abm_supported, .set_disp_pattern_generator = dcn20_set_disp_pattern_generator, - .set_hubp_blank = dcn10_set_hubp_blank, }; static const struct hwseq_private_funcs dcn21_private_funcs = { diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c index 173488ab787a..e62f931fc269 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c @@ -61,7 +61,6 @@ #include "dcn21/dcn21_dccg.h" #include "dcn21_hubbub.h" #include "dcn10/dcn10_resource.h" -#include "dce110/dce110_resource.h" #include "dce/dce_panel_cntl.h" #include "dcn20/dcn20_dwb.h" @@ -116,7 +115,7 @@ struct _vcs_dpi_ip_params_st dcn2_1_ip = { .is_line_buffer_bpp_fixed = 0, .line_buffer_fixed_bpp = 0, .dcc_supported = true, - .max_line_buffer_lines = 12, + .max_line_buffer_lines = 32, .writeback_luma_buffer_size_kbytes = 12, .writeback_chroma_buffer_size_kbytes = 8, .writeback_chroma_line_buffer_width_pixels = 4, @@ -883,7 +882,9 @@ static const struct dc_debug_options debug_defaults_drv = { .scl_reset_length10 = true, .sanity_checks = true, .disable_48mhz_pwrdwn = false, - .usbc_combo_phy_reset_wa = true + .usbc_combo_phy_reset_wa = true, + .dmub_command_table = true, + .use_max_lb = true }; static const struct dc_debug_options debug_defaults_diags = { @@ -899,7 +900,8 @@ static const struct dc_debug_options debug_defaults_diags = { .disable_stutter = true, .disable_48mhz_pwrdwn = true, .disable_psr = true, - .enable_tri_buf = true + .enable_tri_buf = true, + .use_max_lb = true }; enum dcn20_clk_src_array_id { @@ -1595,6 +1597,11 @@ static void update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param dcn2_1_soc.num_chans = bw_params->num_channels; ASSERT(clk_table->num_entries); + /* Copy dcn2_1_soc.clock_limits to clock_limits to avoid copying over null states later */ + for (i = 0; i < dcn2_1_soc.num_states + 1; i++) { + clock_limits[i] = dcn2_1_soc.clock_limits[i]; + } + for (i = 0; i < clk_table->num_entries; i++) { /* loop backwards*/ for (closest_clk_lvl = 0, j = dcn2_1_soc.num_states - 1; j >= 0; j--) { @@ -1628,11 +1635,11 @@ static void update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param dcn2_1_soc.clock_limits[i] = clock_limits[i]; if (clk_table->num_entries) { dcn2_1_soc.num_states = clk_table->num_entries + 1; + /* fill in min DF PState */ + dcn2_1_soc.clock_limits[1] = construct_low_pstate_lvl(clk_table, closest_clk_lvl); /* duplicate last level */ dcn2_1_soc.clock_limits[dcn2_1_soc.num_states] = dcn2_1_soc.clock_limits[dcn2_1_soc.num_states - 1]; dcn2_1_soc.clock_limits[dcn2_1_soc.num_states].state = dcn2_1_soc.num_states; - /* fill in min DF PState */ - dcn2_1_soc.clock_limits[1] = construct_low_pstate_lvl(clk_table, closest_clk_lvl); } dml_init_instance(&dc->dml, &dcn2_1_soc, &dcn2_1_ip, DML_PROJECT_DCN21); @@ -1975,6 +1982,8 @@ static bool dcn21_resource_construct( dc->caps.dmdata_alloc_size = 2048; dc->caps.max_slave_planes = 1; + dc->caps.max_slave_yuv_planes = 1; + dc->caps.max_slave_rgb_planes = 1; dc->caps.post_blend_color_processing = true; dc->caps.force_dp_tps4_for_cp2520 = true; dc->caps.extended_aux_timeout_support = true; diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_cm_common.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_cm_common.c index 41a1d0e9b7e2..e0df9b0065f9 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_cm_common.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_cm_common.c @@ -113,6 +113,7 @@ bool cm3_helper_translate_curve_to_hw_format( struct pwl_result_data *rgb_resulted; struct pwl_result_data *rgb; struct pwl_result_data *rgb_plus_1; + struct pwl_result_data *rgb_minus_1; struct fixed31_32 end_value; int32_t region_start, region_end; @@ -140,7 +141,7 @@ bool cm3_helper_translate_curve_to_hw_format( region_start = -MAX_LOW_POINT; region_end = NUMBER_REGIONS - MAX_LOW_POINT; } else { - /* 10 segments + /* 11 segments * segment is from 2^-10 to 2^0 * There are less than 256 points, for optimization */ @@ -154,9 +155,10 @@ bool cm3_helper_translate_curve_to_hw_format( seg_distr[7] = 4; seg_distr[8] = 4; seg_distr[9] = 4; + seg_distr[10] = 1; region_start = -10; - region_end = 0; + region_end = 1; } for (i = region_end - region_start; i < MAX_REGIONS_NUMBER ; i++) @@ -189,6 +191,10 @@ bool cm3_helper_translate_curve_to_hw_format( rgb_resulted[hw_points - 1].green = output_tf->tf_pts.green[start_index]; rgb_resulted[hw_points - 1].blue = output_tf->tf_pts.blue[start_index]; + rgb_resulted[hw_points].red = rgb_resulted[hw_points - 1].red; + rgb_resulted[hw_points].green = rgb_resulted[hw_points - 1].green; + rgb_resulted[hw_points].blue = rgb_resulted[hw_points - 1].blue; + // All 3 color channels have same x corner_points[0].red.x = dc_fixpt_pow(dc_fixpt_from_int(2), dc_fixpt_from_int(region_start)); @@ -259,15 +265,18 @@ bool cm3_helper_translate_curve_to_hw_format( rgb = rgb_resulted; rgb_plus_1 = rgb_resulted + 1; + rgb_minus_1 = rgb; i = 1; while (i != hw_points + 1) { - if (dc_fixpt_lt(rgb_plus_1->red, rgb->red)) - rgb_plus_1->red = rgb->red; - if (dc_fixpt_lt(rgb_plus_1->green, rgb->green)) - rgb_plus_1->green = rgb->green; - if (dc_fixpt_lt(rgb_plus_1->blue, rgb->blue)) - rgb_plus_1->blue = rgb->blue; + if (i >= hw_points - 1) { + if (dc_fixpt_lt(rgb_plus_1->red, rgb->red)) + rgb_plus_1->red = dc_fixpt_add(rgb->red, rgb_minus_1->delta_red); + if (dc_fixpt_lt(rgb_plus_1->green, rgb->green)) + rgb_plus_1->green = dc_fixpt_add(rgb->green, rgb_minus_1->delta_green); + if (dc_fixpt_lt(rgb_plus_1->blue, rgb->blue)) + rgb_plus_1->blue = dc_fixpt_add(rgb->blue, rgb_minus_1->delta_blue); + } rgb->delta_red = dc_fixpt_sub(rgb_plus_1->red, rgb->red); rgb->delta_green = dc_fixpt_sub(rgb_plus_1->green, rgb->green); @@ -283,6 +292,7 @@ bool cm3_helper_translate_curve_to_hw_format( } ++rgb_plus_1; + rgb_minus_1 = rgb; ++rgb; ++i; } diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c index 6c0f7ef0a3df..72bee637c1e4 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c @@ -454,7 +454,6 @@ static void enc3_stream_encoder_update_dp_info_packets( REG_UPDATE(DP_SEC_CNTL, DP_SEC_GSP2_ENABLE, info_frame->spd.valid); REG_UPDATE(DP_SEC_CNTL, DP_SEC_GSP3_ENABLE, info_frame->hdrsmd.valid); - /* This bit is the master enable bit. * When enabling secondary stream engine, * this master bit must also be set. diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c index 6e864b1a95c4..434d3c46cad4 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c @@ -718,7 +718,7 @@ bool dpp3_program_blnd_lut( next_mode = LUT_RAM_B; dpp3_power_on_blnd_lut(dpp_base, true); - dpp3_configure_blnd_lut(dpp_base, next_mode == LUT_RAM_A ? true:false); + dpp3_configure_blnd_lut(dpp_base, next_mode == LUT_RAM_A); if (next_mode == LUT_RAM_A) dpp3_program_blnd_luta_settings(dpp_base, params); @@ -1136,7 +1136,7 @@ bool dpp3_program_shaper( else next_mode = LUT_RAM_A; - dpp3_configure_shaper_lut(dpp_base, next_mode == LUT_RAM_A ? true:false); + dpp3_configure_shaper_lut(dpp_base, next_mode == LUT_RAM_A); if (next_mode == LUT_RAM_A) dpp3_program_shaper_luta_settings(dpp_base, params); diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp_cm.c index 33985401f25c..72c5687adc68 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp_cm.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp_cm.c @@ -240,7 +240,7 @@ bool dpp3_program_gamcor_lut( next_mode = LUT_RAM_A; dpp3_power_on_gamcor_lut(dpp_base, true); - dpp3_configure_gamcor_lut(dpp_base, next_mode == LUT_RAM_A ? true:false); + dpp3_configure_gamcor_lut(dpp_base, next_mode == LUT_RAM_A); if (next_mode == LUT_RAM_B) { gam_regs.start_cntl_b = REG(CM_GAMCOR_RAMB_START_CNTL_B); @@ -295,7 +295,7 @@ bool dpp3_program_gamcor_lut( cm_helper_program_gamcor_xfer_func(dpp_base->ctx, params, &gam_regs); dpp3_program_gammcor_lut(dpp_base, params->rgb_resulted, params->hw_points_num, - next_mode == LUT_RAM_A ? true:false); + next_mode == LUT_RAM_A); //select Gamma LUT to use for next frame REG_UPDATE(CM_GAMCOR_CONTROL, CM_GAMCOR_SELECT, next_mode == LUT_RAM_A ? 0:1); diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb_cm.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb_cm.c index 8593145379d9..3fe9e41e4dbd 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb_cm.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb_cm.c @@ -217,7 +217,7 @@ static bool dwb3_program_ogam_lut( else next_mode = LUT_RAM_A; - dwb3_configure_ogam_lut(dwbc30, next_mode == LUT_RAM_A ? true : false); + dwb3_configure_ogam_lut(dwbc30, next_mode == LUT_RAM_A); if (next_mode == LUT_RAM_A) dwb3_program_ogam_luta_settings(dwbc30, params); diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c index 06dc1e2e8383..d53f8b39699b 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c @@ -421,11 +421,12 @@ void dcn30_program_all_writeback_pipes_in_tree( void dcn30_init_hw(struct dc *dc) { - int i, j; struct abm **abms = dc->res_pool->multiple_abms; struct dce_hwseq *hws = dc->hwseq; struct dc_bios *dcb = dc->ctx->dc_bios; struct resource_pool *res_pool = dc->res_pool; + int i, j; + int edp_num; uint32_t backlight = MAX_BACKLIGHT_LEVEL; if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks) @@ -574,17 +575,23 @@ void dcn30_init_hw(struct dc *dc) * if DIG is turned on and seamless boot not enabled */ if (dc->config.power_down_display_on_boot) { - struct dc_link *edp_link = get_edp_link(dc); - - if (edp_link && - edp_link->link_enc->funcs->is_dig_enabled && - edp_link->link_enc->funcs->is_dig_enabled(edp_link->link_enc) && - dc->hwss.edp_backlight_control && - dc->hwss.power_down && - dc->hwss.edp_power_control) { - dc->hwss.edp_backlight_control(edp_link, false); - dc->hwss.power_down(dc); - dc->hwss.edp_power_control(edp_link, false); + struct dc_link *edp_links[MAX_NUM_EDP]; + struct dc_link *edp_link; + + get_edp_links(dc, edp_links, &edp_num); + if (edp_num) { + for (i = 0; i < edp_num; i++) { + edp_link = edp_links[i]; + if (edp_link->link_enc->funcs->is_dig_enabled && + edp_link->link_enc->funcs->is_dig_enabled(edp_link->link_enc) && + dc->hwss.edp_backlight_control && + dc->hwss.power_down && + dc->hwss.edp_power_control) { + dc->hwss.edp_backlight_control(edp_link, false); + dc->hwss.power_down(dc); + dc->hwss.edp_power_control(edp_link, false); + } + } } else { for (i = 0; i < dc->link_count; i++) { struct dc_link *link = dc->links[i]; @@ -651,7 +658,7 @@ void dcn30_set_avmute(struct pipe_ctx *pipe_ctx, bool enable) if (pipe_ctx == NULL) return; - if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal) && pipe_ctx->stream_res.stream_enc != NULL) + if (dc_is_hdmi_signal(pipe_ctx->stream->signal) && pipe_ctx->stream_res.stream_enc != NULL) pipe_ctx->stream_res.stream_enc->funcs->set_avmute( pipe_ctx->stream_res.stream_enc, enable); @@ -848,7 +855,7 @@ bool dcn30_apply_idle_power_optimizations(struct dc *dc, bool enable) cmd.mall.cursor_copy_src.quad_part = cursor_attr.address.quad_part; cmd.mall.cursor_copy_dst.quad_part = - plane->address.grph.cursor_cache_addr.quad_part; + (plane->address.grph.cursor_cache_addr.quad_part + 2047) & ~2047; cmd.mall.cursor_width = cursor_attr.width; cmd.mall.cursor_height = cursor_attr.height; cmd.mall.cursor_pitch = cursor_attr.pitch; @@ -858,8 +865,7 @@ bool dcn30_apply_idle_power_optimizations(struct dc *dc, bool enable) dc_dmub_srv_wait_idle(dc->ctx->dmub_srv); /* Use copied cursor, and it's okay to not switch back */ - cursor_attr.address.quad_part = - plane->address.grph.cursor_cache_addr.quad_part; + cursor_attr.address.quad_part = cmd.mall.cursor_copy_dst.quad_part; dc_stream_set_cursor_attributes(stream, &cursor_attr); } @@ -940,53 +946,6 @@ void dcn30_hardware_release(struct dc *dc) dc->res_pool->hubbub, true, true); } -void dcn30_set_hubp_blank(const struct dc *dc, - struct pipe_ctx *pipe_ctx, - bool blank_enable) -{ - struct pipe_ctx *mpcc_pipe; - struct pipe_ctx *odm_pipe; - - if (blank_enable) { - struct plane_resource *plane_res = &pipe_ctx->plane_res; - struct stream_resource *stream_res = &pipe_ctx->stream_res; - - /* Wait for enter vblank */ - stream_res->tg->funcs->wait_for_state(stream_res->tg, CRTC_STATE_VBLANK); - - /* Blank HUBP to allow p-state during blank on all timings */ - pipe_ctx->plane_res.hubp->funcs->set_blank(pipe_ctx->plane_res.hubp, true); - /* Confirm hubp in blank */ - ASSERT(plane_res->hubp->funcs->hubp_in_blank(plane_res->hubp)); - /* Toggle HUBP_DISABLE */ - plane_res->hubp->funcs->hubp_soft_reset(plane_res->hubp, true); - plane_res->hubp->funcs->hubp_soft_reset(plane_res->hubp, false); - for (mpcc_pipe = pipe_ctx->bottom_pipe; mpcc_pipe; mpcc_pipe = mpcc_pipe->bottom_pipe) { - mpcc_pipe->plane_res.hubp->funcs->set_blank(mpcc_pipe->plane_res.hubp, true); - /* Confirm hubp in blank */ - ASSERT(mpcc_pipe->plane_res.hubp->funcs->hubp_in_blank(mpcc_pipe->plane_res.hubp)); - /* Toggle HUBP_DISABLE */ - mpcc_pipe->plane_res.hubp->funcs->hubp_soft_reset(mpcc_pipe->plane_res.hubp, true); - mpcc_pipe->plane_res.hubp->funcs->hubp_soft_reset(mpcc_pipe->plane_res.hubp, false); - - } - for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) { - odm_pipe->plane_res.hubp->funcs->set_blank(odm_pipe->plane_res.hubp, true); - /* Confirm hubp in blank */ - ASSERT(odm_pipe->plane_res.hubp->funcs->hubp_in_blank(odm_pipe->plane_res.hubp)); - /* Toggle HUBP_DISABLE */ - odm_pipe->plane_res.hubp->funcs->hubp_soft_reset(odm_pipe->plane_res.hubp, true); - odm_pipe->plane_res.hubp->funcs->hubp_soft_reset(odm_pipe->plane_res.hubp, false); - } - } else { - pipe_ctx->plane_res.hubp->funcs->set_blank(pipe_ctx->plane_res.hubp, false); - for (mpcc_pipe = pipe_ctx->bottom_pipe; mpcc_pipe; mpcc_pipe = mpcc_pipe->bottom_pipe) - mpcc_pipe->plane_res.hubp->funcs->set_blank(mpcc_pipe->plane_res.hubp, false); - for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) - odm_pipe->plane_res.hubp->funcs->set_blank(odm_pipe->plane_res.hubp, false); - } -} - void dcn30_set_disp_pattern_generator(const struct dc *dc, struct pipe_ctx *pipe_ctx, enum controller_dp_test_pattern test_pattern, @@ -996,6 +955,7 @@ void dcn30_set_disp_pattern_generator(const struct dc *dc, int width, int height, int offset) { struct stream_resource *stream_res = &pipe_ctx->stream_res; + struct pipe_ctx *mpcc_pipe; if (test_pattern != CONTROLLER_DP_TEST_PATTERN_VIDEOMODE) { pipe_ctx->vtp_locked = false; @@ -1007,12 +967,20 @@ void dcn30_set_disp_pattern_generator(const struct dc *dc, if (stream_res->tg->funcs->is_tg_enabled(stream_res->tg)) { if (stream_res->tg->funcs->is_locked(stream_res->tg)) pipe_ctx->vtp_locked = true; - else - dc->hwss.set_hubp_blank(dc, pipe_ctx, true); + else { + /* Blank HUBP to allow p-state during blank on all timings */ + pipe_ctx->plane_res.hubp->funcs->set_blank(pipe_ctx->plane_res.hubp, true); + + for (mpcc_pipe = pipe_ctx->bottom_pipe; mpcc_pipe; mpcc_pipe = mpcc_pipe->bottom_pipe) + mpcc_pipe->plane_res.hubp->funcs->set_blank(mpcc_pipe->plane_res.hubp, true); + } } } else { - dc->hwss.set_hubp_blank(dc, pipe_ctx, false); /* turning off DPG */ + pipe_ctx->plane_res.hubp->funcs->set_blank(pipe_ctx->plane_res.hubp, false); + for (mpcc_pipe = pipe_ctx->bottom_pipe; mpcc_pipe; mpcc_pipe = mpcc_pipe->bottom_pipe) + mpcc_pipe->plane_res.hubp->funcs->set_blank(mpcc_pipe->plane_res.hubp, false); + stream_res->opp->funcs->opp_set_disp_pattern_generator(stream_res->opp, test_pattern, color_space, color_depth, solid_color, width, height, offset); } diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.h index 3b7d4812e311..e9a0005288d3 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.h @@ -80,8 +80,4 @@ void dcn30_set_disp_pattern_generator(const struct dc *dc, const struct tg_color *solid_color, int width, int height, int offset); -void dcn30_set_hubp_blank(const struct dc *dc, - struct pipe_ctx *pipe_ctx, - bool blank_enable); - #endif /* __DC_HWSS_DCN30_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c index 204444fead97..c4c14e9c1309 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c @@ -98,7 +98,6 @@ static const struct hw_sequencer_funcs dcn30_funcs = { .hardware_release = dcn30_hardware_release, .set_pipe = dcn21_set_pipe, .set_disp_pattern_generator = dcn30_set_disp_pattern_generator, - .set_hubp_blank = dcn30_set_hubp_blank, }; static const struct hwseq_private_funcs dcn30_private_funcs = { diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c index 3e6f76096119..910c17fd4278 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c @@ -143,16 +143,18 @@ static void mpc3_power_on_ogam_lut( { struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc); - if (mpc->ctx->dc->debug.enable_mem_low_power.bits.mpc) { - // Force power on - REG_UPDATE(MPCC_MEM_PWR_CTRL[mpcc_id], MPCC_OGAM_MEM_PWR_DIS, power_on == true ? 1:0); - // Wait for confirmation when powering on - if (power_on) - REG_WAIT(MPCC_MEM_PWR_CTRL[mpcc_id], MPCC_OGAM_MEM_PWR_STATE, 0, 10, 10); - } else { - REG_SET(MPCC_MEM_PWR_CTRL[mpcc_id], 0, - MPCC_OGAM_MEM_PWR_FORCE, power_on == true ? 0 : 1); - } + /* + * Powering on: force memory active so the LUT can be updated. + * Powering off: allow entering memory low power mode + * + * Memory low power mode is controlled during MPC OGAM LUT init. + */ + REG_UPDATE(MPCC_MEM_PWR_CTRL[mpcc_id], + MPCC_OGAM_MEM_PWR_DIS, power_on != 0); + + /* Wait for memory to be powered on - we won't be able to write to it otherwise. */ + if (power_on) + REG_WAIT(MPCC_MEM_PWR_CTRL[mpcc_id], MPCC_OGAM_MEM_PWR_STATE, 0, 10, 10); } static void mpc3_configure_ogam_lut( @@ -355,7 +357,7 @@ void mpc3_set_output_gamma( next_mode = LUT_RAM_A; mpc3_power_on_ogam_lut(mpc, mpcc_id, true); - mpc3_configure_ogam_lut(mpc, mpcc_id, next_mode == LUT_RAM_A ? true:false); + mpc3_configure_ogam_lut(mpc, mpcc_id, next_mode == LUT_RAM_A); if (next_mode == LUT_RAM_A) mpc3_program_luta(mpc, mpcc_id, params); @@ -1427,7 +1429,7 @@ const struct mpc_funcs dcn30_mpc_funcs = { .acquire_rmu = mpcc3_acquire_rmu, .program_3dlut = mpc3_program_3dlut, .release_rmu = mpcc3_release_rmu, - .power_on_mpc_mem_pwr = mpc20_power_on_ogam_lut, + .power_on_mpc_mem_pwr = mpc3_power_on_ogam_lut, .get_mpc_out_mux = mpc1_get_mpc_out_mux, }; diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c index fb7f1dea3c46..263c2986682d 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c @@ -120,7 +120,7 @@ struct _vcs_dpi_ip_params_st dcn3_0_ip = { .dcc_supported = true, .writeback_interface_buffer_size_kbytes = 90, .writeback_line_buffer_buffer_size = 0, - .max_line_buffer_lines = 12, + .max_line_buffer_lines = 32, .writeback_luma_buffer_size_kbytes = 12, // writeback_line_buffer_buffer_size = 656640 .writeback_chroma_buffer_size_kbytes = 8, .writeback_chroma_line_buffer_width_pixels = 4, @@ -181,7 +181,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_0_soc = { }, .min_dcfclk = 500.0, /* TODO: set this to actual min DCFCLK */ .num_states = 1, - .sr_exit_time_us = 12, + .sr_exit_time_us = 15.5, .sr_enter_plus_exit_time_us = 20, .urgent_latency_us = 4.0, .urgent_latency_pixel_data_only_us = 4.0, @@ -852,6 +852,7 @@ static const struct dc_debug_options debug_defaults_drv = { .dwb_fi_phase = -1, // -1 = disable, .dmub_command_table = true, .disable_psr = false, + .use_max_lb = true }; static const struct dc_debug_options debug_defaults_diags = { @@ -870,6 +871,7 @@ static const struct dc_debug_options debug_defaults_diags = { .dmub_command_table = true, .disable_psr = true, .enable_tri_buf = true, + .use_max_lb = true }; void dcn30_dpp_destroy(struct dpp **dpp) @@ -1874,6 +1876,7 @@ static noinline bool dcn30_internal_validate_bw( if (!pipes) return false; + dc->res_pool->funcs->update_soc_for_wm_a(dc, context); pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, fast_validate); DC_FP_START(); @@ -2223,11 +2226,7 @@ static noinline void dcn30_calculate_wm_and_dlg_fp( * * Set A calculated last so that following calculations are based on Set A */ - if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].valid) { - context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us; - context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.sr_enter_plus_exit_time_us; - context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.sr_exit_time_us; - } + dc->res_pool->funcs->update_soc_for_wm_a(dc, context); context->bw_ctx.bw.dcn.watermarks.a.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; @@ -2270,6 +2269,15 @@ static noinline void dcn30_calculate_wm_and_dlg_fp( dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us; } +void dcn30_update_soc_for_wm_a(struct dc *dc, struct dc_state *context) +{ + if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].valid) { + context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us; + context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.sr_enter_plus_exit_time_us; + context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.sr_exit_time_us; + } +} + void dcn30_calculate_wm_and_dlg( struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, @@ -2494,6 +2502,7 @@ static const struct resource_funcs dcn30_res_pool_funcs = { .panel_cntl_create = dcn30_panel_cntl_create, .validate_bandwidth = dcn30_validate_bandwidth, .calculate_wm_and_dlg = dcn30_calculate_wm_and_dlg, + .update_soc_for_wm_a = dcn30_update_soc_for_wm_a, .populate_dml_pipes = dcn30_populate_dml_pipes_from_context, .acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer, .add_stream_to_ctx = dcn30_add_stream_to_ctx, @@ -2566,6 +2575,8 @@ static bool dcn30_resource_construct( dc->caps.cursor_cache_size = dc->caps.max_cursor_size * dc->caps.max_cursor_size * 8; dc->caps.max_slave_planes = 1; + dc->caps.max_slave_yuv_planes = 1; + dc->caps.max_slave_rgb_planes = 1; dc->caps.post_blend_color_processing = true; dc->caps.force_dp_tps4_for_cp2520 = true; dc->caps.extended_aux_timeout_support = true; diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.h index 8ce7f6d39a20..b754b89beadf 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.h +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.h @@ -60,6 +60,7 @@ void dcn30_calculate_wm_and_dlg( display_e2e_pipe_params_st *pipes, int pipe_cnt, int vlevel); +void dcn30_update_soc_for_wm_a(struct dc *dc, struct dc_state *context); void dcn30_populate_dml_writeback_from_context( struct dc *dc, struct resource_context *res_ctx, display_e2e_pipe_params_st *pipes); diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c index b8bf6d61005b..bdad72140cbc 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c @@ -98,7 +98,6 @@ static const struct hw_sequencer_funcs dcn301_funcs = { .set_abm_immediate_disable = dcn21_set_abm_immediate_disable, .set_pipe = dcn21_set_pipe, .set_disp_pattern_generator = dcn30_set_disp_pattern_generator, - .set_hubp_blank = dcn30_set_hubp_blank, }; static const struct hwseq_private_funcs dcn301_private_funcs = { diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c index c494235016e0..622a5bf9737f 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c @@ -116,7 +116,7 @@ struct _vcs_dpi_ip_params_st dcn3_01_ip = { .dcc_supported = true, .writeback_interface_buffer_size_kbytes = 90, .writeback_line_buffer_buffer_size = 656640, - .max_line_buffer_lines = 12, + .max_line_buffer_lines = 32, .writeback_luma_buffer_size_kbytes = 12, // writeback_line_buffer_buffer_size = 656640 .writeback_chroma_buffer_size_kbytes = 8, .writeback_chroma_line_buffer_width_pixels = 4, @@ -873,6 +873,7 @@ static const struct dc_debug_options debug_defaults_drv = { .underflow_assert_delay_us = 0xFFFFFFFF, .dwb_fi_phase = -1, // -1 = disable .dmub_command_table = true, + .use_max_lb = false, }; static const struct dc_debug_options debug_defaults_diags = { @@ -889,6 +890,7 @@ static const struct dc_debug_options debug_defaults_diags = { .scl_reset_length10 = true, .dwb_fi_phase = -1, // -1 = disable .dmub_command_table = true, + .use_max_lb = false, }; void dcn301_dpp_destroy(struct dpp **dpp) @@ -1719,6 +1721,7 @@ static struct resource_funcs dcn301_res_pool_funcs = { .panel_cntl_create = dcn301_panel_cntl_create, .validate_bandwidth = dcn30_validate_bandwidth, .calculate_wm_and_dlg = dcn301_calculate_wm_and_dlg, + .update_soc_for_wm_a = dcn30_update_soc_for_wm_a, .populate_dml_pipes = dcn30_populate_dml_pipes_from_context, .acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer, .add_stream_to_ctx = dcn30_add_stream_to_ctx, @@ -1764,6 +1767,8 @@ static bool dcn301_resource_construct( dc->caps.min_horizontal_blanking_period = 80; dc->caps.dmdata_alloc_size = 2048; dc->caps.max_slave_planes = 1; + dc->caps.max_slave_yuv_planes = 1; + dc->caps.max_slave_rgb_planes = 1; dc->caps.is_apu = true; dc->caps.post_blend_color_processing = true; dc->caps.force_dp_tps4_for_cp2520 = true; diff --git a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c index 4b659b63f75b..0723e29fd42e 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c @@ -101,7 +101,7 @@ struct _vcs_dpi_ip_params_st dcn3_02_ip = { .dcc_supported = true, .writeback_interface_buffer_size_kbytes = 90, .writeback_line_buffer_buffer_size = 0, - .max_line_buffer_lines = 12, + .max_line_buffer_lines = 32, .writeback_luma_buffer_size_kbytes = 12, // writeback_line_buffer_buffer_size = 656640 .writeback_chroma_buffer_size_kbytes = 8, .writeback_chroma_line_buffer_width_pixels = 4, @@ -223,6 +223,7 @@ static const struct dc_debug_options debug_defaults_drv = { .underflow_assert_delay_us = 0xFFFFFFFF, .dwb_fi_phase = -1, // -1 = disable, .dmub_command_table = true, + .use_max_lb = true }; static const struct dc_debug_options debug_defaults_diags = { @@ -241,6 +242,7 @@ static const struct dc_debug_options debug_defaults_diags = { .dmub_command_table = true, .enable_tri_buf = true, .disable_psr = true, + .use_max_lb = true }; enum dcn302_clk_src_array_id { @@ -1395,6 +1397,7 @@ static struct resource_funcs dcn302_res_pool_funcs = { .panel_cntl_create = dcn302_panel_cntl_create, .validate_bandwidth = dcn30_validate_bandwidth, .calculate_wm_and_dlg = dcn30_calculate_wm_and_dlg, + .update_soc_for_wm_a = dcn30_update_soc_for_wm_a, .populate_dml_pipes = dcn30_populate_dml_pipes_from_context, .acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer, .add_stream_to_ctx = dcn30_add_stream_to_ctx, @@ -1481,6 +1484,8 @@ static bool dcn302_resource_construct( dc->caps.mall_size_total = dc->caps.mall_size_per_mem_channel * dc->ctx->dc_bios->vram_info.num_chans * 1048576; dc->caps.cursor_cache_size = dc->caps.max_cursor_size * dc->caps.max_cursor_size * 8; dc->caps.max_slave_planes = 1; + dc->caps.max_slave_yuv_planes = 1; + dc->caps.max_slave_rgb_planes = 1; dc->caps.post_blend_color_processing = true; dc->caps.force_dp_tps4_for_cp2520 = true; dc->caps.extended_aux_timeout_support = true; diff --git a/drivers/gpu/drm/amd/display/dc/dm_helpers.h b/drivers/gpu/drm/amd/display/dc/dm_helpers.h index 07e349b1067b..f41db27c44de 100644 --- a/drivers/gpu/drm/amd/display/dc/dm_helpers.h +++ b/drivers/gpu/drm/amd/display/dc/dm_helpers.h @@ -156,4 +156,6 @@ void dm_set_dcn_clocks( struct dc_context *ctx, struct dc_clocks *clks); +bool dm_helpers_dmub_outbox0_interrupt_control(struct dc_context *ctx, bool enable); + #endif /* __DM_HELPERS__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c index 0f3f510fd83b..9729cf292e84 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c @@ -3437,6 +3437,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l mode_lib->vba.DCCEnabledInAnyPlane = true; } } + mode_lib->vba.UrgentLatency = mode_lib->vba.UrgentLatencyPixelDataOnly; for (i = 0; i <= mode_lib->vba.soc.num_states; i++) { locals->FabricAndDRAMBandwidthPerState[i] = dml_min( mode_lib->vba.DRAMSpeedPerState[i] * mode_lib->vba.NumberOfChannels diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c index 210c96cd5b03..51098c2c9854 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c @@ -3544,6 +3544,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode mode_lib->vba.DCCEnabledInAnyPlane = true; } } + mode_lib->vba.UrgentLatency = mode_lib->vba.UrgentLatencyPixelDataOnly; for (i = 0; i <= mode_lib->vba.soc.num_states; i++) { locals->FabricAndDRAMBandwidthPerState[i] = dml_min( mode_lib->vba.DRAMSpeedPerState[i] * mode_lib->vba.NumberOfChannels diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c index bc07082c1357..cb3f70a71b51 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c @@ -4050,7 +4050,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l v->RequiredDPPCLK[i][j][NumberOfNonSplitPlaneOfMaximumBandwidth] = v->MinDPPCLKUsingSingleDPP[NumberOfNonSplitPlaneOfMaximumBandwidth] * (1 + v->DISPCLKDPPCLKDSCCLKDownSpreading / 100) / 2; v->TotalNumberOfActiveDPP[i][j] = v->TotalNumberOfActiveDPP[i][j] + 1; - v->TotalNumberOfSingleDPPPlanes[i][j] = v->TotalNumberOfSingleDPPPlanes[i][j] + 1; + v->TotalNumberOfSingleDPPPlanes[i][j] = v->TotalNumberOfSingleDPPPlanes[i][j] - 1; } } if (v->TotalNumberOfActiveDPP[i][j] > v->MaxNumDPP) { diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c index bc0485a59018..94036a9612cf 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c +++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c @@ -599,7 +599,6 @@ static void fetch_pipe_params(struct display_mode_lib *mode_lib) for (k = j + 1; k < mode_lib->vba.cache_num_pipes; ++k) { display_pipe_source_params_st *src_k = &pipes[k].pipe.src; display_pipe_dest_params_st *dst_k = &pipes[k].pipe.dest; - display_output_params_st *dout_k = &pipes[j].dout; if (src_k->is_hsplit && !visited[k] && src->hsplit_grp == src_k->hsplit_grp) { @@ -620,8 +619,6 @@ static void fetch_pipe_params(struct display_mode_lib *mode_lib) mode_lib->vba.ViewportHeightChroma[mode_lib->vba.NumberOfActivePlanes] += src_k->viewport_height_c; } - mode_lib->vba.NumberOfDSCSlices[mode_lib->vba.NumberOfActivePlanes] += - dout_k->dsc_slices; visited[k] = true; } diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c index c62d0eddc9c6..be57088d185d 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c +++ b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c @@ -37,59 +37,6 @@ static uint32_t dsc_policy_max_target_bpp_limit = 16; /* default DSC policy enables DSC only when needed */ static bool dsc_policy_enable_dsc_when_not_needed; -static uint32_t dc_dsc_bandwidth_in_kbps_from_timing( - const struct dc_crtc_timing *timing) -{ - uint32_t bits_per_channel = 0; - uint32_t kbps; - - if (timing->flags.DSC) { - kbps = (timing->pix_clk_100hz * timing->dsc_cfg.bits_per_pixel); - kbps = kbps / 160 + ((kbps % 160) ? 1 : 0); - return kbps; - } - - switch (timing->display_color_depth) { - case COLOR_DEPTH_666: - bits_per_channel = 6; - break; - case COLOR_DEPTH_888: - bits_per_channel = 8; - break; - case COLOR_DEPTH_101010: - bits_per_channel = 10; - break; - case COLOR_DEPTH_121212: - bits_per_channel = 12; - break; - case COLOR_DEPTH_141414: - bits_per_channel = 14; - break; - case COLOR_DEPTH_161616: - bits_per_channel = 16; - break; - default: - break; - } - - ASSERT(bits_per_channel != 0); - - kbps = timing->pix_clk_100hz / 10; - kbps *= bits_per_channel; - - if (timing->flags.Y_ONLY != 1) { - /*Only YOnly make reduce bandwidth by 1/3 compares to RGB*/ - kbps *= 3; - if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420) - kbps /= 2; - else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR422) - kbps = kbps * 2 / 3; - } - - return kbps; - -} - static bool dsc_buff_block_size_from_dpcd(int dpcd_buff_block_size, int *buff_block_size) { @@ -315,18 +262,18 @@ static inline uint32_t dsc_div_by_10_round_up(uint32_t value) * and uncompressed bandwidth. */ static void get_dsc_bandwidth_range( - const uint32_t min_bpp, - const uint32_t max_bpp, + const uint32_t min_bpp_x16, + const uint32_t max_bpp_x16, const struct dsc_enc_caps *dsc_caps, const struct dc_crtc_timing *timing, struct dc_dsc_bw_range *range) { /* native stream bandwidth */ - range->stream_kbps = dc_dsc_bandwidth_in_kbps_from_timing(timing); + range->stream_kbps = dc_bandwidth_in_kbps_from_timing(timing); /* max dsc target bpp */ - range->max_kbps = dsc_div_by_10_round_up(max_bpp * timing->pix_clk_100hz); - range->max_target_bpp_x16 = max_bpp * 16; + range->max_kbps = dc_dsc_stream_bandwidth_in_kbps(timing->pix_clk_100hz, max_bpp_x16); + range->max_target_bpp_x16 = max_bpp_x16; if (range->max_kbps > range->stream_kbps) { /* max dsc target bpp is capped to native bandwidth */ range->max_kbps = range->stream_kbps; @@ -334,8 +281,8 @@ static void get_dsc_bandwidth_range( } /* min dsc target bpp */ - range->min_kbps = dsc_div_by_10_round_up(min_bpp * timing->pix_clk_100hz); - range->min_target_bpp_x16 = min_bpp * 16; + range->min_kbps = dc_dsc_stream_bandwidth_in_kbps(timing->pix_clk_100hz, min_bpp_x16); + range->min_target_bpp_x16 = min_bpp_x16; if (range->min_kbps > range->max_kbps) { /* min dsc target bpp is capped to max dsc bandwidth*/ range->min_kbps = range->max_kbps; @@ -363,12 +310,17 @@ static bool decide_dsc_target_bpp_x16( memset(&range, 0, sizeof(range)); - get_dsc_bandwidth_range(policy->min_target_bpp, policy->max_target_bpp, + get_dsc_bandwidth_range(policy->min_target_bpp * 16, policy->max_target_bpp * 16, dsc_common_caps, timing, &range); if (!policy->enable_dsc_when_not_needed && target_bandwidth_kbps >= range.stream_kbps) { /* enough bandwidth without dsc */ *target_bpp_x16 = 0; should_use_dsc = false; + } else if (policy->preferred_bpp_x16 > 0 && + policy->preferred_bpp_x16 <= range.max_target_bpp_x16 && + policy->preferred_bpp_x16 >= range.min_target_bpp_x16) { + *target_bpp_x16 = policy->preferred_bpp_x16; + should_use_dsc = true; } else if (target_bandwidth_kbps >= range.max_kbps) { /* use max target bpp allowed */ *target_bpp_x16 = range.max_target_bpp_x16; @@ -545,7 +497,7 @@ static bool setup_dsc_config( int target_bandwidth_kbps, const struct dc_crtc_timing *timing, int min_slice_height_override, - int max_dsc_target_bpp_limit_override, + int max_dsc_target_bpp_limit_override_x16, struct dc_dsc_config *dsc_cfg) { struct dsc_enc_caps dsc_common_caps; @@ -564,7 +516,7 @@ static bool setup_dsc_config( memset(dsc_cfg, 0, sizeof(struct dc_dsc_config)); - dc_dsc_get_policy_for_timing(timing, max_dsc_target_bpp_limit_override, &policy); + dc_dsc_get_policy_for_timing(timing, max_dsc_target_bpp_limit_override_x16, &policy); pic_width = timing->h_addressable + timing->h_border_left + timing->h_border_right; pic_height = timing->v_addressable + timing->v_border_top + timing->v_border_bottom; @@ -865,8 +817,8 @@ bool dc_dsc_parse_dsc_dpcd(const struct dc *dc, const uint8_t *dpcd_dsc_basic_da bool dc_dsc_compute_bandwidth_range( const struct display_stream_compressor *dsc, uint32_t dsc_min_slice_height_override, - uint32_t min_bpp, - uint32_t max_bpp, + uint32_t min_bpp_x16, + uint32_t max_bpp_x16, const struct dsc_dec_dpcd_caps *dsc_sink_caps, const struct dc_crtc_timing *timing, struct dc_dsc_bw_range *range) @@ -883,10 +835,10 @@ bool dc_dsc_compute_bandwidth_range( if (is_dsc_possible) is_dsc_possible = setup_dsc_config(dsc_sink_caps, &dsc_enc_caps, 0, timing, - dsc_min_slice_height_override, max_bpp, &config); + dsc_min_slice_height_override, max_bpp_x16, &config); if (is_dsc_possible) - get_dsc_bandwidth_range(min_bpp, max_bpp, &dsc_common_caps, timing, range); + get_dsc_bandwidth_range(min_bpp_x16, max_bpp_x16, &dsc_common_caps, timing, range); return is_dsc_possible; } @@ -908,11 +860,20 @@ bool dc_dsc_compute_config( &dsc_enc_caps, target_bandwidth_kbps, timing, dsc_min_slice_height_override, - max_target_bpp_limit_override, dsc_cfg); + max_target_bpp_limit_override * 16, dsc_cfg); return is_dsc_possible; } -void dc_dsc_get_policy_for_timing(const struct dc_crtc_timing *timing, uint32_t max_target_bpp_limit_override, struct dc_dsc_policy *policy) +uint32_t dc_dsc_stream_bandwidth_in_kbps(uint32_t pix_clk_100hz, uint32_t bpp_x16) +{ + struct fixed31_32 link_bw_kbps; + link_bw_kbps = dc_fixpt_from_int(pix_clk_100hz); + link_bw_kbps = dc_fixpt_div_int(link_bw_kbps, 160); + link_bw_kbps = dc_fixpt_mul_int(link_bw_kbps, bpp_x16); + return dc_fixpt_ceil(link_bw_kbps); +} + +void dc_dsc_get_policy_for_timing(const struct dc_crtc_timing *timing, uint32_t max_target_bpp_limit_override_x16, struct dc_dsc_policy *policy) { uint32_t bpc = 0; @@ -967,13 +928,15 @@ void dc_dsc_get_policy_for_timing(const struct dc_crtc_timing *timing, uint32_t return; } + policy->preferred_bpp_x16 = timing->dsc_fixed_bits_per_pixel_x16; + /* internal upper limit, default 16 bpp */ if (policy->max_target_bpp > dsc_policy_max_target_bpp_limit) policy->max_target_bpp = dsc_policy_max_target_bpp_limit; /* apply override */ - if (max_target_bpp_limit_override && policy->max_target_bpp > max_target_bpp_limit_override) - policy->max_target_bpp = max_target_bpp_limit_override; + if (max_target_bpp_limit_override_x16 && policy->max_target_bpp > max_target_bpp_limit_override_x16 / 16) + policy->max_target_bpp = max_target_bpp_limit_override_x16 / 16; /* enable DSC when not needed, default false */ if (dsc_policy_enable_dsc_when_not_needed) diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dce110/hw_factory_dce110.c b/drivers/gpu/drm/amd/display/dc/gpio/dce110/hw_factory_dce110.c index 66e4841f41e4..ca335ea60412 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/dce110/hw_factory_dce110.c +++ b/drivers/gpu/drm/amd/display/dc/gpio/dce110/hw_factory_dce110.c @@ -48,10 +48,6 @@ #define REGI(reg_name, block, id)\ mm ## block ## id ## _ ## reg_name -#include "../hw_gpio.h" -#include "../hw_ddc.h" -#include "../hw_hpd.h" - #include "reg_helper.h" #include "../hpd_regs.h" diff --git a/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c b/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c index 5e384a8a83dc..51855a2624cf 100644 --- a/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c +++ b/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c @@ -39,7 +39,7 @@ #define HDCP14_KSV_SIZE 5 #define HDCP14_MAX_KSV_FIFO_SIZE 127*HDCP14_KSV_SIZE -static const bool hdcp_cmd_is_read[] = { +static const bool hdcp_cmd_is_read[HDCP_MESSAGE_ID_MAX] = { [HDCP_MESSAGE_ID_READ_BKSV] = true, [HDCP_MESSAGE_ID_READ_RI_R0] = true, [HDCP_MESSAGE_ID_READ_PJ] = true, @@ -75,7 +75,7 @@ static const bool hdcp_cmd_is_read[] = { [HDCP_MESSAGE_ID_WRITE_CONTENT_STREAM_TYPE] = false }; -static const uint8_t hdcp_i2c_offsets[] = { +static const uint8_t hdcp_i2c_offsets[HDCP_MESSAGE_ID_MAX] = { [HDCP_MESSAGE_ID_READ_BKSV] = 0x0, [HDCP_MESSAGE_ID_READ_RI_R0] = 0x8, [HDCP_MESSAGE_ID_READ_PJ] = 0xA, @@ -106,7 +106,8 @@ static const uint8_t hdcp_i2c_offsets[] = { [HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_SEND_ACK] = 0x60, [HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_STREAM_MANAGE] = 0x60, [HDCP_MESSAGE_ID_READ_REPEATER_AUTH_STREAM_READY] = 0x80, - [HDCP_MESSAGE_ID_READ_RXSTATUS] = 0x70 + [HDCP_MESSAGE_ID_READ_RXSTATUS] = 0x70, + [HDCP_MESSAGE_ID_WRITE_CONTENT_STREAM_TYPE] = 0x0, }; struct protection_properties { @@ -184,7 +185,7 @@ static const struct protection_properties hdmi_14_protection = { .process_transaction = hdmi_14_process_transaction }; -static const uint32_t hdcp_dpcd_addrs[] = { +static const uint32_t hdcp_dpcd_addrs[HDCP_MESSAGE_ID_MAX] = { [HDCP_MESSAGE_ID_READ_BKSV] = 0x68000, [HDCP_MESSAGE_ID_READ_RI_R0] = 0x68005, [HDCP_MESSAGE_ID_READ_PJ] = 0xFFFFFFFF, diff --git a/drivers/gpu/drm/amd/display/dc/inc/clock_source.h b/drivers/gpu/drm/amd/display/dc/inc/clock_source.h index 1b01a9a58d14..e2b3a2c7a927 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/clock_source.h +++ b/drivers/gpu/drm/amd/display/dc/inc/clock_source.h @@ -170,6 +170,11 @@ struct clock_source_funcs { const struct clock_source *clock_source, unsigned int inst, unsigned int *pixel_clk_khz); + bool (*override_dp_pix_clk)( + struct clock_source *clock_source, + unsigned int inst, + unsigned int pixel_clk, + unsigned int ref_clk); }; struct clock_source { diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h index 8efa1b80546d..eb1a19bf0d81 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h +++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h @@ -97,6 +97,10 @@ struct resource_funcs { const struct panel_cntl_init_data *panel_cntl_init_data); struct link_encoder *(*link_enc_create)( const struct encoder_init_data *init); + /* Create a minimal link encoder object with no dc_link object + * associated with it. */ + struct link_encoder *(*link_enc_create_minimal)(struct dc_context *ctx, enum engine_id eng_id); + bool (*validate_bandwidth)( struct dc *dc, struct dc_state *context, @@ -106,6 +110,8 @@ struct resource_funcs { display_e2e_pipe_params_st *pipes, int pipe_cnt, int vlevel); + void (*update_soc_for_wm_a)( + struct dc *dc, struct dc_state *context); int (*populate_dml_pipes)( struct dc *dc, struct dc_state *context, @@ -210,6 +216,15 @@ struct resource_pool { unsigned int underlay_pipe_index; unsigned int stream_enc_count; + /* An array for accessing the link encoder objects that have been created. + * Index in array corresponds to engine ID - viz. 0: ENGINE_ID_DIGA + */ + struct link_encoder *link_encoders[MAX_DIG_LINK_ENCODERS]; + /* Number of DIG link encoder objects created - i.e. number of valid + * entries in link_encoders array. + */ + unsigned int dig_link_enc_count; + #if defined(CONFIG_DRM_AMD_DC_DCN) struct dc_3dlut *mpc_lut[MAX_PIPES]; struct dc_transfer_func *mpc_shaper[MAX_PIPES]; diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h index b324e13f3f78..4d7b271b6409 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h +++ b/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h @@ -56,6 +56,7 @@ struct dp_receiver_id_info; struct i2c_payloads; struct aux_payloads; +enum aux_return_code_type; void dal_ddc_i2c_payloads_add( struct i2c_payloads *payloads, @@ -100,7 +101,7 @@ bool dal_ddc_submit_aux_command(struct ddc_service *ddc, int dc_link_aux_transfer_raw(struct ddc_service *ddc, struct aux_payload *payload, - enum aux_channel_operation_result *operation_result); + enum aux_return_code_type *operation_result); bool dc_link_aux_transfer_with_retries(struct ddc_service *ddc, struct aux_payload *payload); diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/aux_engine.h b/drivers/gpu/drm/amd/display/dc/inc/hw/aux_engine.h index e77b3a76766d..2ae630bf2aee 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/aux_engine.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/aux_engine.h @@ -29,6 +29,8 @@ #include "dc_ddc_types.h" #include "include/i2caux_interface.h" +enum aux_return_code_type; + enum i2caux_transaction_operation { I2CAUX_TRANSACTION_READ, I2CAUX_TRANSACTION_WRITE @@ -162,7 +164,7 @@ struct aux_engine_funcs { uint8_t *buffer, uint8_t *reply_result, uint32_t *sw_status); - enum aux_channel_operation_result (*get_channel_status)( + enum aux_return_code_type (*get_channel_status)( struct aux_engine *engine, uint8_t *returned_bytes); bool (*is_engine_available)(struct aux_engine *engine); diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h index cd1c0dc32bf8..8df2765cce78 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h @@ -56,6 +56,20 @@ struct dmcu { bool auto_load_dmcu; }; +#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) +struct crc_region { + uint16_t x_start; + uint16_t y_start; + uint16_t x_end; + uint16_t y_end; +}; + +struct otg_phy_mux { + uint8_t phy_output_num; + uint8_t otg_output_num; +}; +#endif + struct dmcu_funcs { bool (*dmcu_init)(struct dmcu *dmcu); bool (*load_iram)(struct dmcu *dmcu, @@ -84,6 +98,13 @@ struct dmcu_funcs { int *min_frame_rate, int *max_frame_rate); bool (*recv_edid_cea_ack)(struct dmcu *dmcu, int *offset); +#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) + void (*forward_crc_window)(struct dmcu *dmcu, + struct crc_region *crc_win, + struct otg_phy_mux *mux_mapping); + void (*stop_crc_win_update)(struct dmcu *dmcu, + struct otg_phy_mux *mux_mapping); +#endif }; #endif diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h index 43e33f47734d..31a1713bb49f 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h @@ -36,6 +36,7 @@ #define MAX_AUDIOS 7 #define MAX_PIPES 6 +#define MAX_DIG_LINK_ENCODERS 7 #define MAX_DWB_PIPES 1 struct gamma_curve { diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h index 754832d216fd..9ff68b67780c 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h @@ -109,6 +109,12 @@ enum h_timing_div_mode { H_TIMING_DIV_BY4, }; +enum timing_synchronization_type { + NOT_SYNCHRONIZABLE, + TIMING_SYNCHRONIZABLE, + VBLANK_SYNCHRONIZABLE +}; + struct crc_params { /* Regions used to calculate CRC*/ uint16_t windowa_x_start; @@ -292,6 +298,12 @@ struct timing_generator_funcs { uint32_t window_start, uint32_t window_end); void (*set_vtotal_change_limit)(struct timing_generator *optc, uint32_t limit); + void (*align_vblanks)(struct timing_generator *master_optc, + struct timing_generator *slave_optc, + uint32_t master_pixel_clock_100Hz, + uint32_t slave_pixel_clock_100Hz, + uint8_t master_clock_divider, + uint8_t slave_clock_divider); }; #endif diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h index 0586ab2ffd6a..2fedfcac6705 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h @@ -111,6 +111,9 @@ struct hw_sequencer_funcs { void (*enable_timing_synchronization)(struct dc *dc, int group_index, int group_size, struct pipe_ctx *grouped_pipes[]); + void (*enable_vblanks_synchronization)(struct dc *dc, + int group_index, int group_size, + struct pipe_ctx *grouped_pipes[]); void (*setup_periodic_interrupt)(struct dc *dc, struct pipe_ctx *pipe_ctx, enum vline_select vline); @@ -231,10 +234,6 @@ struct hw_sequencer_funcs { enum dc_color_depth color_depth, const struct tg_color *solid_color, int width, int height, int offset); - - void (*set_hubp_blank)(const struct dc *dc, - struct pipe_ctx *pipe_ctx, - bool blank_enable); }; void color_space_to_black_color( diff --git a/drivers/gpu/drm/amd/display/dc/inc/resource.h b/drivers/gpu/drm/amd/display/dc/inc/resource.h index d89815a46190..fe1e5833c96a 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/resource.h +++ b/drivers/gpu/drm/amd/display/dc/inc/resource.h @@ -48,6 +48,7 @@ struct resource_caps { int num_ddc; int num_vmid; int num_dsc; + unsigned int num_dig_link_enc; // Total number of DIGs (digital encoders) in DIO (Display Input/Output). int num_mpc_3dlut; }; @@ -115,6 +116,10 @@ bool resource_are_streams_timing_synchronizable( struct dc_stream_state *stream1, struct dc_stream_state *stream2); +bool resource_are_vblanks_synchronizable( + struct dc_stream_state *stream1, + struct dc_stream_state *stream2); + struct clock_source *resource_find_used_clk_src_for_sharing( struct resource_context *res_ctx, struct pipe_ctx *pipe_ctx); diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c b/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c index 3f1e7a196a23..c4b067d01895 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c +++ b/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c @@ -58,6 +58,18 @@ enum dc_irq_source to_dal_irq_source_dcn20( return DC_IRQ_SOURCE_VBLANK5; case DCN_1_0__SRCID__DC_D6_OTG_VSTARTUP: return DC_IRQ_SOURCE_VBLANK6; + case DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL: + return DC_IRQ_SOURCE_DC1_VLINE0; + case DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL: + return DC_IRQ_SOURCE_DC2_VLINE0; + case DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL: + return DC_IRQ_SOURCE_DC3_VLINE0; + case DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL: + return DC_IRQ_SOURCE_DC4_VLINE0; + case DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL: + return DC_IRQ_SOURCE_DC5_VLINE0; + case DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL: + return DC_IRQ_SOURCE_DC6_VLINE0; case DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT: return DC_IRQ_SOURCE_PFLIP1; case DCN_1_0__SRCID__HUBP1_FLIP_INTERRUPT: @@ -172,6 +184,11 @@ static const struct irq_source_info_funcs vupdate_no_lock_irq_info_funcs = { .ack = NULL }; +static const struct irq_source_info_funcs vline0_irq_info_funcs = { + .set = NULL, + .ack = NULL +}; + #undef BASE_INNER #define BASE_INNER(seg) DCN_BASE__INST0_SEG ## seg @@ -245,6 +262,14 @@ static const struct irq_source_info_funcs vupdate_no_lock_irq_info_funcs = { .funcs = &vblank_irq_info_funcs\ } +#define vline0_int_entry(reg_num)\ + [DC_IRQ_SOURCE_DC1_VLINE0 + reg_num] = {\ + IRQ_REG_ENTRY(OTG, reg_num,\ + OTG_VERTICAL_INTERRUPT0_CONTROL, OTG_VERTICAL_INTERRUPT0_INT_ENABLE,\ + OTG_VERTICAL_INTERRUPT0_CONTROL, OTG_VERTICAL_INTERRUPT0_CLEAR),\ + .funcs = &vline0_irq_info_funcs\ + } + #define dummy_irq_entry() \ {\ .funcs = &dummy_irq_info_funcs\ @@ -353,6 +378,12 @@ irq_source_info_dcn20[DAL_IRQ_SOURCES_NUMBER] = { vblank_int_entry(3), vblank_int_entry(4), vblank_int_entry(5), + vline0_int_entry(0), + vline0_int_entry(1), + vline0_int_entry(2), + vline0_int_entry(3), + vline0_int_entry(4), + vline0_int_entry(5), }; static const struct irq_service_funcs irq_service_funcs_dcn20 = { diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c b/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c index 0e0f494fbb5e..6ee9dd833b85 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c +++ b/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c @@ -58,6 +58,20 @@ enum dc_irq_source to_dal_irq_source_dcn21( return DC_IRQ_SOURCE_VBLANK5; case DCN_1_0__SRCID__DC_D6_OTG_VSTARTUP: return DC_IRQ_SOURCE_VBLANK6; + case DCN_1_0__SRCID__DMCUB_OUTBOX_HIGH_PRIORITY_READY_INT: + return DC_IRQ_SOURCE_DMCUB_OUTBOX0; + case DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL: + return DC_IRQ_SOURCE_DC1_VLINE0; + case DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL: + return DC_IRQ_SOURCE_DC2_VLINE0; + case DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL: + return DC_IRQ_SOURCE_DC3_VLINE0; + case DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL: + return DC_IRQ_SOURCE_DC4_VLINE0; + case DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL: + return DC_IRQ_SOURCE_DC5_VLINE0; + case DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL: + return DC_IRQ_SOURCE_DC6_VLINE0; case DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT: return DC_IRQ_SOURCE_PFLIP1; case DCN_1_0__SRCID__HUBP1_FLIP_INTERRUPT: @@ -173,6 +187,12 @@ static const struct irq_source_info_funcs vupdate_no_lock_irq_info_funcs = { .ack = NULL }; + +static const struct irq_source_info_funcs vline0_irq_info_funcs = { + .set = NULL, + .ack = NULL +}; + #undef BASE_INNER #define BASE_INNER(seg) DMU_BASE__INST0_SEG ## seg @@ -254,6 +274,14 @@ static const struct irq_source_info_funcs vupdate_no_lock_irq_info_funcs = { .funcs = &vblank_irq_info_funcs\ } +#define vline0_int_entry(reg_num)\ + [DC_IRQ_SOURCE_DC1_VLINE0 + reg_num] = {\ + IRQ_REG_ENTRY(OTG, reg_num,\ + OTG_VERTICAL_INTERRUPT0_CONTROL, OTG_VERTICAL_INTERRUPT0_INT_ENABLE,\ + OTG_VERTICAL_INTERRUPT0_CONTROL, OTG_VERTICAL_INTERRUPT0_CLEAR),\ + .funcs = &vline0_irq_info_funcs\ + } + #define dummy_irq_entry() \ {\ .funcs = &dummy_irq_info_funcs\ @@ -366,6 +394,12 @@ irq_source_info_dcn21[DAL_IRQ_SOURCES_NUMBER] = { vblank_int_entry(3), vblank_int_entry(4), vblank_int_entry(5), + vline0_int_entry(0), + vline0_int_entry(1), + vline0_int_entry(2), + vline0_int_entry(3), + vline0_int_entry(4), + vline0_int_entry(5), }; static const struct irq_service_funcs irq_service_funcs_dcn21 = { diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn30/irq_service_dcn30.c b/drivers/gpu/drm/amd/display/dc/irq/dcn30/irq_service_dcn30.c index a35b76772b9d..4ec6f6ad8c48 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/dcn30/irq_service_dcn30.c +++ b/drivers/gpu/drm/amd/display/dc/irq/dcn30/irq_service_dcn30.c @@ -65,6 +65,20 @@ enum dc_irq_source to_dal_irq_source_dcn30( return DC_IRQ_SOURCE_VBLANK5; case DCN_1_0__SRCID__DC_D6_OTG_VSTARTUP: return DC_IRQ_SOURCE_VBLANK6; + case DCN_1_0__SRCID__DMCUB_OUTBOX_HIGH_PRIORITY_READY_INT: + return DC_IRQ_SOURCE_DMCUB_OUTBOX0; + case DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL: + return DC_IRQ_SOURCE_DC1_VLINE0; + case DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL: + return DC_IRQ_SOURCE_DC2_VLINE0; + case DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL: + return DC_IRQ_SOURCE_DC3_VLINE0; + case DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL: + return DC_IRQ_SOURCE_DC4_VLINE0; + case DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL: + return DC_IRQ_SOURCE_DC5_VLINE0; + case DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL: + return DC_IRQ_SOURCE_DC6_VLINE0; case DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT: return DC_IRQ_SOURCE_PFLIP1; case DCN_1_0__SRCID__HUBP1_FLIP_INTERRUPT: @@ -179,6 +193,16 @@ static const struct irq_source_info_funcs vblank_irq_info_funcs = { .ack = NULL }; +static const struct irq_source_info_funcs dmub_trace_irq_info_funcs = { + .set = NULL, + .ack = NULL +}; + +static const struct irq_source_info_funcs vline0_irq_info_funcs = { + .set = NULL, + .ack = NULL +}; + #undef BASE_INNER #define BASE_INNER(seg) DCN_BASE__INST0_SEG ## seg @@ -252,6 +276,14 @@ static const struct irq_source_info_funcs vblank_irq_info_funcs = { .funcs = &vblank_irq_info_funcs\ } +#define vline0_int_entry(reg_num)\ + [DC_IRQ_SOURCE_DC1_VLINE0 + reg_num] = {\ + IRQ_REG_ENTRY(OTG, reg_num,\ + OTG_VERTICAL_INTERRUPT0_CONTROL, OTG_VERTICAL_INTERRUPT0_INT_ENABLE,\ + OTG_VERTICAL_INTERRUPT0_CONTROL, OTG_VERTICAL_INTERRUPT0_CLEAR),\ + .funcs = &vline0_irq_info_funcs\ + } + #define dummy_irq_entry() \ {\ .funcs = &dummy_irq_info_funcs\ @@ -360,6 +392,12 @@ irq_source_info_dcn30[DAL_IRQ_SOURCES_NUMBER] = { vblank_int_entry(3), vblank_int_entry(4), vblank_int_entry(5), + vline0_int_entry(0), + vline0_int_entry(1), + vline0_int_entry(2), + vline0_int_entry(3), + vline0_int_entry(4), + vline0_int_entry(5), }; static const struct irq_service_funcs irq_service_funcs_dcn30 = { diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn302/irq_service_dcn302.c b/drivers/gpu/drm/amd/display/dc/irq/dcn302/irq_service_dcn302.c index 927fdc43fb9f..2313a5664f44 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/dcn302/irq_service_dcn302.c +++ b/drivers/gpu/drm/amd/display/dc/irq/dcn302/irq_service_dcn302.c @@ -50,6 +50,18 @@ static enum dc_irq_source to_dal_irq_source_dcn302(struct irq_service *irq_servi return DC_IRQ_SOURCE_VBLANK5; case DCN_1_0__SRCID__DC_D6_OTG_VSTARTUP: return DC_IRQ_SOURCE_VBLANK6; + case DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL: + return DC_IRQ_SOURCE_DC1_VLINE0; + case DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL: + return DC_IRQ_SOURCE_DC2_VLINE0; + case DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL: + return DC_IRQ_SOURCE_DC3_VLINE0; + case DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL: + return DC_IRQ_SOURCE_DC4_VLINE0; + case DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL: + return DC_IRQ_SOURCE_DC5_VLINE0; + case DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL: + return DC_IRQ_SOURCE_DC6_VLINE0; case DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT: return DC_IRQ_SOURCE_PFLIP1; case DCN_1_0__SRCID__HUBP1_FLIP_INTERRUPT: @@ -154,6 +166,11 @@ static const struct irq_source_info_funcs vblank_irq_info_funcs = { .ack = NULL }; +static const struct irq_source_info_funcs vline0_irq_info_funcs = { + .set = NULL, + .ack = NULL +}; + #undef BASE_INNER #define BASE_INNER(seg) DCN_BASE__INST0_SEG ## seg @@ -222,6 +239,14 @@ static const struct irq_source_info_funcs vblank_irq_info_funcs = { .funcs = &vblank_irq_info_funcs\ } +#define vline0_int_entry(reg_num)\ + [DC_IRQ_SOURCE_DC1_VLINE0 + reg_num] = {\ + IRQ_REG_ENTRY(OTG, reg_num,\ + OTG_VERTICAL_INTERRUPT0_CONTROL, OTG_VERTICAL_INTERRUPT0_INT_ENABLE,\ + OTG_VERTICAL_INTERRUPT0_CONTROL, OTG_VERTICAL_INTERRUPT0_CLEAR),\ + .funcs = &vline0_irq_info_funcs\ + } + #define dummy_irq_entry() { .funcs = &dummy_irq_info_funcs } #define i2c_int_entry(reg_num) \ @@ -318,6 +343,11 @@ static const struct irq_source_info irq_source_info_dcn302[DAL_IRQ_SOURCES_NUMBE vblank_int_entry(2), vblank_int_entry(3), vblank_int_entry(4), + vline0_int_entry(0), + vline0_int_entry(1), + vline0_int_entry(2), + vline0_int_entry(3), + vline0_int_entry(4), }; static const struct irq_service_funcs irq_service_funcs_dcn302 = { diff --git a/drivers/gpu/drm/amd/display/dc/irq_types.h b/drivers/gpu/drm/amd/display/dc/irq_types.h index 87812d81fed3..ae8f47ec0f8c 100644 --- a/drivers/gpu/drm/amd/display/dc/irq_types.h +++ b/drivers/gpu/drm/amd/display/dc/irq_types.h @@ -150,7 +150,8 @@ enum dc_irq_source { DC_IRQ_SOURCE_DC4_VLINE1, DC_IRQ_SOURCE_DC5_VLINE1, DC_IRQ_SOURCE_DC6_VLINE1, - + DC_IRQ_DMCUB_OUTBOX1, + DC_IRQ_SOURCE_DMCUB_OUTBOX0, DAL_IRQ_SOURCES_NUMBER }; diff --git a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h index 863cd9cc93ff..b4e14960b164 100644 --- a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h +++ b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h @@ -74,6 +74,8 @@ extern "C" { struct dmub_srv; struct dmub_srv_common_regs; +struct dmcub_trace_buf_entry; + /* enum dmub_status - return code for dmcub functions */ enum dmub_status { DMUB_STATUS_OK = 0, @@ -107,6 +109,15 @@ enum dmub_window_id { DMUB_WINDOW_TOTAL, }; +/* enum dmub_notification_type - dmub outbox notification identifier */ +enum dmub_notification_type { + DMUB_NOTIFICATION_NO_DATA = 0, + DMUB_NOTIFICATION_AUX_REPLY, + DMUB_NOTIFICATION_HPD, + DMUB_NOTIFICATION_HPD_IRQ, + DMUB_NOTIFICATION_MAX +}; + /** * struct dmub_region - dmub hw memory region * @base: base address for region, must be 256 byte aligned @@ -256,6 +267,20 @@ struct dmub_srv_hw_funcs { void (*set_inbox1_wptr)(struct dmub_srv *dmub, uint32_t wptr_offset); + void (*setup_out_mailbox)(struct dmub_srv *dmub, + const struct dmub_region *outbox1); + + uint32_t (*get_outbox1_wptr)(struct dmub_srv *dmub); + + void (*set_outbox1_rptr)(struct dmub_srv *dmub, uint32_t rptr_offset); + + void (*setup_outbox0)(struct dmub_srv *dmub, + const struct dmub_region *outbox0); + + uint32_t (*get_outbox0_wptr)(struct dmub_srv *dmub); + + void (*set_outbox0_rptr)(struct dmub_srv *dmub, uint32_t rptr_offset); + uint32_t (*emul_get_inbox1_rptr)(struct dmub_srv *dmub); void (*emul_set_inbox1_wptr)(struct dmub_srv *dmub, uint32_t wptr_offset); @@ -279,6 +304,7 @@ struct dmub_srv_hw_funcs { union dmub_gpint_data_register reg); uint32_t (*get_gpint_response)(struct dmub_srv *dmub); + }; /** @@ -338,6 +364,13 @@ struct dmub_srv { struct dmub_srv_base_funcs funcs; struct dmub_srv_hw_funcs hw_funcs; struct dmub_rb inbox1_rb; + /** + * outbox1_rb is accessed without locks (dal & dc) + * and to be used only in dmub_srv_stat_get_notification() + */ + struct dmub_rb outbox1_rb; + + struct dmub_rb outbox0_rb; bool sw_init; bool hw_init; @@ -351,6 +384,26 @@ struct dmub_srv { }; /** + * struct dmub_notification - dmub notification data + * @type: dmub notification type + * @link_index: link index to identify aux connection + * @result: USB4 status returned from dmub + * @pending_notification: Indicates there are other pending notifications + * @aux_reply: aux reply + * @hpd_status: hpd status + */ +struct dmub_notification { + enum dmub_notification_type type; + uint8_t link_index; + uint8_t result; + bool pending_notification; + union { + struct aux_reply_data aux_reply; + enum dp_hpd_status hpd_status; + }; +}; + +/** * DMUB firmware version helper macro - useful for checking if the version * of a firmware to know if feature or functionality is supported or present. */ @@ -614,6 +667,8 @@ enum dmub_status dmub_srv_get_fw_boot_status(struct dmub_srv *dmub, enum dmub_status dmub_srv_cmd_with_reply_data(struct dmub_srv *dmub, union dmub_rb_cmd *cmd); +bool dmub_srv_get_outbox0_msg(struct dmub_srv *dmub, struct dmcub_trace_buf_entry *entry); + #if defined(__cplusplus) } #endif diff --git a/drivers/gpu/drm/amd/display/dmub/dmub_srv_stat.h b/drivers/gpu/drm/amd/display/dmub/dmub_srv_stat.h new file mode 100644 index 000000000000..6c78aa406e90 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dmub/dmub_srv_stat.h @@ -0,0 +1,41 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef _DMUB_SRV_STAT_H_ +#define _DMUB_SRV_STAT_H_ + +/** + * DOC: DMUB_SRV STAT Interface + * + * These interfaces are called without acquiring DAL and DC locks. + * Hence, there is limitations on whese interfaces can access. Only + * variables exclusively defined for these interfaces can be modified. + */ +#include "dmub_srv.h" + +enum dmub_status dmub_srv_stat_get_notification(struct dmub_srv *dmub, + struct dmub_notification *notify); + +#endif /* _DMUB_SRV_STAT_H_ */ diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h index 072b4e7e624b..f07b348f7c29 100644 --- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h +++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h @@ -47,10 +47,10 @@ /* Firmware versioning. */ #ifdef DMUB_EXPOSE_VERSION -#define DMUB_FW_VERSION_GIT_HASH 0x6444c02e7 +#define DMUB_FW_VERSION_GIT_HASH 0xc29b1734b #define DMUB_FW_VERSION_MAJOR 0 #define DMUB_FW_VERSION_MINOR 0 -#define DMUB_FW_VERSION_REVISION 51 +#define DMUB_FW_VERSION_REVISION 56 #define DMUB_FW_VERSION_TEST 0 #define DMUB_FW_VERSION_VBIOS 0 #define DMUB_FW_VERSION_HOTFIX 0 @@ -68,25 +68,75 @@ #define __forceinline inline +/** + * Flag from driver to indicate that ABM should be disabled gradually + * by slowly reversing all backlight programming and pixel compensation. + */ #define SET_ABM_PIPE_GRADUALLY_DISABLE 0 + +/** + * Flag from driver to indicate that ABM should be disabled immediately + * and undo all backlight programming and pixel compensation. + */ #define SET_ABM_PIPE_IMMEDIATELY_DISABLE 255 + +/** + * Flag from driver to indicate that ABM should be disabled immediately + * and keep the current backlight programming and pixel compensation. + */ #define SET_ABM_PIPE_IMMEDIATE_KEEP_GAIN_DISABLE 254 + +/** + * Flag from driver to set the current ABM pipe index or ABM operating level. + */ #define SET_ABM_PIPE_NORMAL 1 +/** + * Number of ambient light levels in ABM algorithm. + */ +#define NUM_AMBI_LEVEL 5 + +/** + * Number of operating/aggression levels in ABM algorithm. + */ +#define NUM_AGGR_LEVEL 4 + +/** + * Number of segments in the gamma curve. + */ +#define NUM_POWER_FN_SEGS 8 + +/** + * Number of segments in the backlight curve. + */ +#define NUM_BL_CURVE_SEGS 16 + /* Maximum number of streams on any ASIC. */ #define DMUB_MAX_STREAMS 6 /* Maximum number of planes on any ASIC. */ #define DMUB_MAX_PLANES 6 +/* Trace buffer offset for entry */ +#define TRACE_BUFFER_ENTRY_OFFSET 16 + +/** + * Physical framebuffer address location, 64-bit. + */ #ifndef PHYSICAL_ADDRESS_LOC #define PHYSICAL_ADDRESS_LOC union large_integer #endif +/** + * OS/FW agnostic memcpy + */ #ifndef dmub_memcpy #define dmub_memcpy(dest, source, bytes) memcpy((dest), (source), (bytes)) #endif +/** + * OS/FW agnostic memset + */ #ifndef dmub_memset #define dmub_memset(dest, val, bytes) memset((dest), (val), (bytes)) #endif @@ -95,31 +145,69 @@ extern "C" { #endif +/** + * OS/FW agnostic udelay + */ #ifndef dmub_udelay #define dmub_udelay(microseconds) udelay(microseconds) #endif +/** + * union dmub_addr - DMUB physical/virtual 64-bit address. + */ union dmub_addr { struct { - uint32_t low_part; - uint32_t high_part; - } u; - uint64_t quad_part; + uint32_t low_part; /**< Lower 32 bits */ + uint32_t high_part; /**< Upper 32 bits */ + } u; /*<< Low/high bit access */ + uint64_t quad_part; /*<< 64 bit address */ }; +/** + * Flags that can be set by driver to change some PSR behaviour. + */ union dmub_psr_debug_flags { + /** + * Debug flags. + */ struct { + /** + * Enable visual confirm in FW. + */ uint32_t visual_confirm : 1; + /** + * Use HW Lock Mgr object to do HW locking in FW. + */ uint32_t use_hw_lock_mgr : 1; + + /** + * Unused. + * TODO: Remove. + */ uint32_t log_line_nums : 1; } bitfields; + /** + * Union for debug flags. + */ uint32_t u32All; }; +/** + * DMUB feature capabilities. + * After DMUB init, driver will query FW capabilities prior to enabling certain features. + */ struct dmub_feature_caps { + /** + * Max PSR version supported by FW. + */ uint8_t psr; +#ifndef TRIM_FAMS + uint8_t fw_assisted_mclk_switch; + uint8_t reserved[6]; +#else uint8_t reserved[7]; +#endif }; #if defined(__cplusplus) @@ -153,23 +241,43 @@ struct dmub_feature_caps { * @dal_fw: 1 if the firmware is DAL */ struct dmub_fw_meta_info { - uint32_t magic_value; - uint32_t fw_region_size; - uint32_t trace_buffer_size; - uint32_t fw_version; - uint8_t dal_fw; - uint8_t reserved[3]; + uint32_t magic_value; /**< magic value identifying DMUB firmware meta info */ + uint32_t fw_region_size; /**< size of the firmware state region */ + uint32_t trace_buffer_size; /**< size of the tracebuffer region */ + uint32_t fw_version; /**< the firmware version information */ + uint8_t dal_fw; /**< 1 if the firmware is DAL */ + uint8_t reserved[3]; /**< padding bits */ }; -/* Ensure that the structure remains 64 bytes. */ +/** + * union dmub_fw_meta - ensures that dmub_fw_meta_info remains 64 bytes + */ union dmub_fw_meta { - struct dmub_fw_meta_info info; - uint8_t reserved[64]; + struct dmub_fw_meta_info info; /**< metadata info */ + uint8_t reserved[64]; /**< padding bits */ }; #pragma pack(pop) //============================================================================== +//< DMUB Trace Buffer>================================================================ +//============================================================================== +/** + * dmub_trace_code_t - firmware trace code, 32-bits + */ +typedef uint32_t dmub_trace_code_t; + +/** + * struct dmcub_trace_buf_entry - Firmware trace entry + */ +struct dmcub_trace_buf_entry { + dmub_trace_code_t trace_code; /**< trace code for the event */ + uint32_t tick_count; /**< the tick count at time of trace */ + uint32_t param0; /**< trace defined parameter 0 */ + uint32_t param1; /**< trace defined parameter 1 */ +}; + +//============================================================================== //< DMUB_STATUS>================================================================ //============================================================================== @@ -181,42 +289,49 @@ union dmub_fw_meta { * SCRATCH15: FW Boot Options register */ -/* Register bit definition for SCRATCH0 */ +/** + * union dmub_fw_boot_status - Status bit definitions for SCRATCH0. + */ union dmub_fw_boot_status { struct { - uint32_t dal_fw : 1; - uint32_t mailbox_rdy : 1; - uint32_t optimized_init_done : 1; - uint32_t restore_required : 1; - } bits; - uint32_t all; + uint32_t dal_fw : 1; /**< 1 if DAL FW */ + uint32_t mailbox_rdy : 1; /**< 1 if mailbox ready */ + uint32_t optimized_init_done : 1; /**< 1 if optimized init done */ + uint32_t restore_required : 1; /**< 1 if driver should call restore */ + } bits; /**< status bits */ + uint32_t all; /**< 32-bit access to status bits */ }; +/** + * enum dmub_fw_boot_status_bit - Enum bit definitions for SCRATCH0. + */ enum dmub_fw_boot_status_bit { - DMUB_FW_BOOT_STATUS_BIT_DAL_FIRMWARE = (1 << 0), - DMUB_FW_BOOT_STATUS_BIT_MAILBOX_READY = (1 << 1), - DMUB_FW_BOOT_STATUS_BIT_OPTIMIZED_INIT_DONE = (1 << 2), - DMUB_FW_BOOT_STATUS_BIT_RESTORE_REQUIRED = (1 << 3), + DMUB_FW_BOOT_STATUS_BIT_DAL_FIRMWARE = (1 << 0), /**< 1 if DAL FW */ + DMUB_FW_BOOT_STATUS_BIT_MAILBOX_READY = (1 << 1), /**< 1 if mailbox ready */ + DMUB_FW_BOOT_STATUS_BIT_OPTIMIZED_INIT_DONE = (1 << 2), /**< 1 if init done */ + DMUB_FW_BOOT_STATUS_BIT_RESTORE_REQUIRED = (1 << 3), /**< 1 if driver should call restore */ }; -/* Register bit definition for SCRATCH15 */ +/** + * union dmub_fw_boot_options - Boot option definitions for SCRATCH15 + */ union dmub_fw_boot_options { struct { - uint32_t pemu_env : 1; - uint32_t fpga_env : 1; - uint32_t optimized_init : 1; - uint32_t skip_phy_access : 1; - uint32_t disable_clk_gate: 1; - uint32_t skip_phy_init_panel_sequence: 1; - uint32_t reserved : 26; - } bits; - uint32_t all; + uint32_t pemu_env : 1; /**< 1 if PEMU */ + uint32_t fpga_env : 1; /**< 1 if FPGA */ + uint32_t optimized_init : 1; /**< 1 if optimized init */ + uint32_t skip_phy_access : 1; /**< 1 if PHY access should be skipped */ + uint32_t disable_clk_gate: 1; /**< 1 if clock gating should be disabled */ + uint32_t skip_phy_init_panel_sequence: 1; /**< 1 to skip panel init seq */ + uint32_t reserved : 26; /**< reserved */ + } bits; /**< boot bits */ + uint32_t all; /**< 32-bit access to bits */ }; enum dmub_fw_boot_options_bit { - DMUB_FW_BOOT_OPTION_BIT_PEMU_ENV = (1 << 0), - DMUB_FW_BOOT_OPTION_BIT_FPGA_ENV = (1 << 1), - DMUB_FW_BOOT_OPTION_BIT_OPTIMIZED_INIT_DONE = (1 << 2), + DMUB_FW_BOOT_OPTION_BIT_PEMU_ENV = (1 << 0), /**< 1 if PEMU */ + DMUB_FW_BOOT_OPTION_BIT_FPGA_ENV = (1 << 1), /**< 1 if FPGA */ + DMUB_FW_BOOT_OPTION_BIT_OPTIMIZED_INIT_DONE = (1 << 2), /**< 1 if optimized init done */ }; //============================================================================== @@ -226,14 +341,27 @@ enum dmub_fw_boot_options_bit { //============================================================================== /* + * enum dmub_cmd_vbios_type - VBIOS commands. + * * Command IDs should be treated as stable ABI. * Do not reuse or modify IDs. */ - enum dmub_cmd_vbios_type { + /** + * Configures the DIG encoder. + */ DMUB_CMD__VBIOS_DIGX_ENCODER_CONTROL = 0, + /** + * Controls the PHY. + */ DMUB_CMD__VBIOS_DIG1_TRANSMITTER_CONTROL = 1, + /** + * Sets the pixel clock/symbol clock. + */ DMUB_CMD__VBIOS_SET_PIXEL_CLOCK = 2, + /** + * Enables or disables power gating. + */ DMUB_CMD__VBIOS_ENABLE_DISP_POWER_GATING = 3, DMUB_CMD__VBIOS_LVTMA_CONTROL = 15, }; @@ -262,35 +390,60 @@ enum dmub_cmd_vbios_type { * Command responses. */ +/** + * Return response for DMUB_GPINT__STOP_FW command. + */ #define DMUB_GPINT__STOP_FW_RESPONSE 0xDEADDEAD /** - * The register format for sending a command via the GPINT. + * union dmub_gpint_data_register - Format for sending a command via the GPINT. */ union dmub_gpint_data_register { struct { - uint32_t param : 16; - uint32_t command_code : 12; - uint32_t status : 4; - } bits; - uint32_t all; + uint32_t param : 16; /**< 16-bit parameter */ + uint32_t command_code : 12; /**< GPINT command */ + uint32_t status : 4; /**< Command status bit */ + } bits; /**< GPINT bit access */ + uint32_t all; /**< GPINT 32-bit access */ }; /* + * enum dmub_gpint_command - GPINT command to DMCUB FW + * * Command IDs should be treated as stable ABI. * Do not reuse or modify IDs. */ - enum dmub_gpint_command { + /** + * Invalid command, ignored. + */ DMUB_GPINT__INVALID_COMMAND = 0, + /** + * DESC: Queries the firmware version. + * RETURN: Firmware version. + */ DMUB_GPINT__GET_FW_VERSION = 1, + /** + * DESC: Halts the firmware. + * RETURN: DMUB_GPINT__STOP_FW_RESPONSE (0xDEADDEAD) when halted + */ DMUB_GPINT__STOP_FW = 2, + /** + * DESC: Get PSR state from FW. + * RETURN: PSR state enum. This enum may need to be converted to the legacy PSR state value. + */ DMUB_GPINT__GET_PSR_STATE = 7, /** * DESC: Notifies DMCUB of the currently active streams. * ARGS: Stream mask, 1 bit per active stream index. */ DMUB_GPINT__IDLE_OPT_NOTIFY_STREAM_MASK = 8, + /** + * DESC: Start PSR residency counter. Stop PSR resdiency counter and get value. + * ARGS: We can measure residency from various points. The argument will specify the residency mode. + * By default, it is measured from after we powerdown the PHY, to just before we powerup the PHY. + * RETURN: PSR residency in milli-percent. + */ DMUB_GPINT__PSR_RESIDENCY = 9, }; @@ -300,52 +453,129 @@ enum dmub_gpint_command { //< DMUB_CMD>=================================================================== //============================================================================== +/** + * Size in bytes of each DMUB command. + */ #define DMUB_RB_CMD_SIZE 64 + +/** + * Maximum number of items in the DMUB ringbuffer. + */ #define DMUB_RB_MAX_ENTRY 128 + +/** + * Ringbuffer size in bytes. + */ #define DMUB_RB_SIZE (DMUB_RB_CMD_SIZE * DMUB_RB_MAX_ENTRY) + +/** + * REG_SET mask for reg offload. + */ #define REG_SET_MASK 0xFFFF /* + * enum dmub_cmd_type - DMUB inbox command. + * * Command IDs should be treated as stable ABI. * Do not reuse or modify IDs. */ - enum dmub_cmd_type { + /** + * Invalid command. + */ DMUB_CMD__NULL = 0, + /** + * Read modify write register sequence offload. + */ DMUB_CMD__REG_SEQ_READ_MODIFY_WRITE = 1, + /** + * Field update register sequence offload. + */ DMUB_CMD__REG_SEQ_FIELD_UPDATE_SEQ = 2, + /** + * Burst write sequence offload. + */ DMUB_CMD__REG_SEQ_BURST_WRITE = 3, + /** + * Reg wait sequence offload. + */ DMUB_CMD__REG_REG_WAIT = 4, + /** + * Workaround to avoid HUBP underflow during NV12 playback. + */ DMUB_CMD__PLAT_54186_WA = 5, + /** + * Command type used to query FW feature caps. + */ DMUB_CMD__QUERY_FEATURE_CAPS = 6, + /** + * Command type used for all PSR commands. + */ DMUB_CMD__PSR = 64, + /** + * Command type used for all MALL commands. + */ DMUB_CMD__MALL = 65, + /** + * Command type used for all ABM commands. + */ DMUB_CMD__ABM = 66, + /** + * Command type used for HW locking in FW. + */ DMUB_CMD__HW_LOCK = 69, + /** + * Command type used to access DP AUX. + */ DMUB_CMD__DP_AUX_ACCESS = 70, + /** + * Command type used for OUTBOX1 notification enable + */ DMUB_CMD__OUTBOX1_ENABLE = 71, +#ifndef TRIM_FAMS + DMUB_CMD__FW_ASSISTED_MCLK_SWITCH = 76, +#endif + + /** + * Command type used for all VBIOS interface commands. + */ DMUB_CMD__VBIOS = 128, }; +/** + * enum dmub_out_cmd_type - DMUB outbox commands. + */ enum dmub_out_cmd_type { + /** + * Invalid outbox command, ignored. + */ DMUB_OUT_CMD__NULL = 0, + /** + * Command type used for DP AUX Reply data notification + */ DMUB_OUT_CMD__DP_AUX_REPLY = 1, + /** + * Command type used for DP HPD event notification + */ DMUB_OUT_CMD__DP_HPD_NOTIFY = 2, }; #pragma pack(push, 1) +/** + * struct dmub_cmd_header - Common command header fields. + */ struct dmub_cmd_header { - unsigned int type : 8; - unsigned int sub_type : 8; - unsigned int ret_status : 1; - unsigned int reserved0 : 7; - unsigned int payload_bytes : 6; /* up to 60 bytes */ - unsigned int reserved1 : 2; + unsigned int type : 8; /**< command type */ + unsigned int sub_type : 8; /**< command sub type */ + unsigned int ret_status : 1; /**< 1 if returned data, 0 otherwise */ + unsigned int reserved0 : 7; /**< reserved bits */ + unsigned int payload_bytes : 6; /* payload excluding header - up to 60 bytes */ + unsigned int reserved1 : 2; /**< reserved bits */ }; /* - * Read modify write + * struct dmub_cmd_read_modify_write_sequence - Read modify write * * 60 payload bytes can hold up to 5 sets of read modify writes, * each take 3 dwords. @@ -356,14 +586,24 @@ struct dmub_cmd_header { * command parser will skip the read and we can use modify_mask = 0xffff'ffff as reg write */ struct dmub_cmd_read_modify_write_sequence { - uint32_t addr; - uint32_t modify_mask; - uint32_t modify_value; + uint32_t addr; /**< register address */ + uint32_t modify_mask; /**< modify mask */ + uint32_t modify_value; /**< modify value */ }; -#define DMUB_READ_MODIFY_WRITE_SEQ__MAX 5 +/** + * Maximum number of ops in read modify write sequence. + */ +#define DMUB_READ_MODIFY_WRITE_SEQ__MAX 5 + +/** + * struct dmub_cmd_read_modify_write_sequence - Read modify write command. + */ struct dmub_rb_cmd_read_modify_write { - struct dmub_cmd_header header; // type = DMUB_CMD__REG_SEQ_READ_MODIFY_WRITE + struct dmub_cmd_header header; /**< command header */ + /** + * Read modify write sequence. + */ struct dmub_cmd_read_modify_write_sequence seq[DMUB_READ_MODIFY_WRITE_SEQ__MAX]; }; @@ -381,19 +621,35 @@ struct dmub_rb_cmd_read_modify_write { */ struct dmub_cmd_reg_field_update_sequence { - uint32_t modify_mask; // 0xffff'ffff to skip initial read - uint32_t modify_value; + uint32_t modify_mask; /**< 0xffff'ffff to skip initial read */ + uint32_t modify_value; /**< value to update with */ }; -#define DMUB_REG_FIELD_UPDATE_SEQ__MAX 7 +/** + * Maximum number of ops in field update sequence. + */ +#define DMUB_REG_FIELD_UPDATE_SEQ__MAX 7 + +/** + * struct dmub_rb_cmd_reg_field_update_sequence - Field update command. + */ struct dmub_rb_cmd_reg_field_update_sequence { - struct dmub_cmd_header header; - uint32_t addr; + struct dmub_cmd_header header; /**< command header */ + uint32_t addr; /**< register address */ + /** + * Field update sequence. + */ struct dmub_cmd_reg_field_update_sequence seq[DMUB_REG_FIELD_UPDATE_SEQ__MAX]; }; + +/** + * Maximum number of burst write values. + */ +#define DMUB_BURST_WRITE_VALUES__MAX 14 + /* - * Burst write + * struct dmub_rb_cmd_burst_write - Burst write * * support use case such as writing out LUTs. * @@ -401,96 +657,141 @@ struct dmub_rb_cmd_reg_field_update_sequence { * * number of payload = header.payload_bytes / sizeof(struct read_modify_write_sequence) */ -#define DMUB_BURST_WRITE_VALUES__MAX 14 struct dmub_rb_cmd_burst_write { - struct dmub_cmd_header header; // type = DMUB_CMD__REG_SEQ_BURST_WRITE - uint32_t addr; + struct dmub_cmd_header header; /**< command header */ + uint32_t addr; /**< register start address */ + /** + * Burst write register values. + */ uint32_t write_values[DMUB_BURST_WRITE_VALUES__MAX]; }; - +/** + * struct dmub_rb_cmd_common - Common command header + */ struct dmub_rb_cmd_common { - struct dmub_cmd_header header; + struct dmub_cmd_header header; /**< command header */ + /** + * Padding to RB_CMD_SIZE + */ uint8_t cmd_buffer[DMUB_RB_CMD_SIZE - sizeof(struct dmub_cmd_header)]; }; +/** + * struct dmub_cmd_reg_wait_data - Register wait data + */ struct dmub_cmd_reg_wait_data { - uint32_t addr; - uint32_t mask; - uint32_t condition_field_value; - uint32_t time_out_us; + uint32_t addr; /**< Register address */ + uint32_t mask; /**< Mask for register bits */ + uint32_t condition_field_value; /**< Value to wait for */ + uint32_t time_out_us; /**< Time out for reg wait in microseconds */ }; +/** + * struct dmub_rb_cmd_reg_wait - Register wait command + */ struct dmub_rb_cmd_reg_wait { - struct dmub_cmd_header header; - struct dmub_cmd_reg_wait_data reg_wait; + struct dmub_cmd_header header; /**< Command header */ + struct dmub_cmd_reg_wait_data reg_wait; /**< Register wait data */ }; +/** + * struct dmub_cmd_PLAT_54186_wa - Underflow workaround + * + * Reprograms surface parameters to avoid underflow. + */ struct dmub_cmd_PLAT_54186_wa { - uint32_t DCSURF_SURFACE_CONTROL; - uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH; - uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS; - uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C; - uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS_C; + uint32_t DCSURF_SURFACE_CONTROL; /**< reg value */ + uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH; /**< reg value */ + uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS; /**< reg value */ + uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C; /**< reg value */ + uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS_C; /**< reg value */ struct { - uint8_t hubp_inst : 4; - uint8_t tmz_surface : 1; - uint8_t immediate :1; - uint8_t vmid : 4; - uint8_t grph_stereo : 1; - uint32_t reserved : 21; - } flip_params; - uint32_t reserved[9]; + uint8_t hubp_inst : 4; /**< HUBP instance */ + uint8_t tmz_surface : 1; /**< TMZ enable or disable */ + uint8_t immediate :1; /**< Immediate flip */ + uint8_t vmid : 4; /**< VMID */ + uint8_t grph_stereo : 1; /**< 1 if stereo */ + uint32_t reserved : 21; /**< Reserved */ + } flip_params; /**< Pageflip parameters */ + uint32_t reserved[9]; /**< Reserved bits */ }; +/** + * struct dmub_rb_cmd_PLAT_54186_wa - Underflow workaround command + */ struct dmub_rb_cmd_PLAT_54186_wa { - struct dmub_cmd_header header; - struct dmub_cmd_PLAT_54186_wa flip; + struct dmub_cmd_header header; /**< Command header */ + struct dmub_cmd_PLAT_54186_wa flip; /**< Flip data */ }; +/** + * struct dmub_rb_cmd_mall - MALL command data. + */ struct dmub_rb_cmd_mall { - struct dmub_cmd_header header; - union dmub_addr cursor_copy_src; - union dmub_addr cursor_copy_dst; - uint32_t tmr_delay; - uint32_t tmr_scale; - uint16_t cursor_width; - uint16_t cursor_pitch; - uint16_t cursor_height; - uint8_t cursor_bpp; - uint8_t debug_bits; + struct dmub_cmd_header header; /**< Common command header */ + union dmub_addr cursor_copy_src; /**< Cursor copy address */ + union dmub_addr cursor_copy_dst; /**< Cursor copy destination */ + uint32_t tmr_delay; /**< Timer delay */ + uint32_t tmr_scale; /**< Timer scale */ + uint16_t cursor_width; /**< Cursor width in pixels */ + uint16_t cursor_pitch; /**< Cursor pitch in pixels */ + uint16_t cursor_height; /**< Cursor height in pixels */ + uint8_t cursor_bpp; /**< Cursor bits per pixel */ + uint8_t debug_bits; /**< Debug bits */ - uint8_t reserved1; - uint8_t reserved2; + uint8_t reserved1; /**< Reserved bits */ + uint8_t reserved2; /**< Reserved bits */ }; +/** + * struct dmub_cmd_digx_encoder_control_data - Encoder control data. + */ struct dmub_cmd_digx_encoder_control_data { - union dig_encoder_control_parameters_v1_5 dig; + union dig_encoder_control_parameters_v1_5 dig; /**< payload */ }; +/** + * struct dmub_rb_cmd_digx_encoder_control - Encoder control command. + */ struct dmub_rb_cmd_digx_encoder_control { - struct dmub_cmd_header header; - struct dmub_cmd_digx_encoder_control_data encoder_control; + struct dmub_cmd_header header; /**< header */ + struct dmub_cmd_digx_encoder_control_data encoder_control; /**< payload */ }; +/** + * struct dmub_cmd_set_pixel_clock_data - Set pixel clock data. + */ struct dmub_cmd_set_pixel_clock_data { - struct set_pixel_clock_parameter_v1_7 clk; + struct set_pixel_clock_parameter_v1_7 clk; /**< payload */ }; +/** + * struct dmub_cmd_set_pixel_clock_data - Set pixel clock command. + */ struct dmub_rb_cmd_set_pixel_clock { - struct dmub_cmd_header header; - struct dmub_cmd_set_pixel_clock_data pixel_clock; + struct dmub_cmd_header header; /**< header */ + struct dmub_cmd_set_pixel_clock_data pixel_clock; /**< payload */ }; +/** + * struct dmub_cmd_enable_disp_power_gating_data - Display power gating. + */ struct dmub_cmd_enable_disp_power_gating_data { - struct enable_disp_power_gating_parameters_v2_1 pwr; + struct enable_disp_power_gating_parameters_v2_1 pwr; /**< payload */ }; +/** + * struct dmub_rb_cmd_enable_disp_power_gating - Display power command. + */ struct dmub_rb_cmd_enable_disp_power_gating { - struct dmub_cmd_header header; - struct dmub_cmd_enable_disp_power_gating_data power_gating; + struct dmub_cmd_header header; /**< header */ + struct dmub_cmd_enable_disp_power_gating_data power_gating; /**< payload */ }; +/** + * struct dmub_dig_transmitter_control_data_v1_7 - Transmitter control. + */ struct dmub_dig_transmitter_control_data_v1_7 { uint8_t phyid; /**< 0=UNIPHYA, 1=UNIPHYB, 2=UNIPHYC, 3=UNIPHYD, 4=UNIPHYE, 5=UNIPHYF */ uint8_t action; /**< Defined as ATOM_TRANSMITER_ACTION_xxx */ @@ -511,118 +812,266 @@ struct dmub_dig_transmitter_control_data_v1_7 { uint32_t reserved3[11]; /**< For future use */ }; +/** + * union dmub_cmd_dig1_transmitter_control_data - Transmitter control data. + */ union dmub_cmd_dig1_transmitter_control_data { - struct dig_transmitter_control_parameters_v1_6 dig; - struct dmub_dig_transmitter_control_data_v1_7 dig_v1_7; + struct dig_transmitter_control_parameters_v1_6 dig; /**< payload */ + struct dmub_dig_transmitter_control_data_v1_7 dig_v1_7; /**< payload 1.7 */ }; +/** + * struct dmub_rb_cmd_dig1_transmitter_control - Transmitter control command. + */ struct dmub_rb_cmd_dig1_transmitter_control { - struct dmub_cmd_header header; - union dmub_cmd_dig1_transmitter_control_data transmitter_control; + struct dmub_cmd_header header; /**< header */ + union dmub_cmd_dig1_transmitter_control_data transmitter_control; /**< payload */ }; +/** + * struct dmub_rb_cmd_dpphy_init - DPPHY init. + */ struct dmub_rb_cmd_dpphy_init { - struct dmub_cmd_header header; - uint8_t reserved[60]; + struct dmub_cmd_header header; /**< header */ + uint8_t reserved[60]; /**< reserved bits */ }; +/** + * enum dp_aux_request_action - DP AUX request command listing. + * + * 4 AUX request command bits are shifted to high nibble. + */ enum dp_aux_request_action { + /** I2C-over-AUX write request */ DP_AUX_REQ_ACTION_I2C_WRITE = 0x00, + /** I2C-over-AUX read request */ DP_AUX_REQ_ACTION_I2C_READ = 0x10, + /** I2C-over-AUX write status request */ DP_AUX_REQ_ACTION_I2C_STATUS_REQ = 0x20, + /** I2C-over-AUX write request with MOT=1 */ DP_AUX_REQ_ACTION_I2C_WRITE_MOT = 0x40, + /** I2C-over-AUX read request with MOT=1 */ DP_AUX_REQ_ACTION_I2C_READ_MOT = 0x50, + /** I2C-over-AUX write status request with MOT=1 */ DP_AUX_REQ_ACTION_I2C_STATUS_REQ_MOT = 0x60, + /** Native AUX write request */ DP_AUX_REQ_ACTION_DPCD_WRITE = 0x80, + /** Native AUX read request */ DP_AUX_REQ_ACTION_DPCD_READ = 0x90 }; +/** + * enum aux_return_code_type - DP AUX process return code listing. + */ enum aux_return_code_type { + /** AUX process succeeded */ AUX_RET_SUCCESS = 0, + /** AUX process failed with unknown reason */ AUX_RET_ERROR_UNKNOWN, + /** AUX process completed with invalid reply */ AUX_RET_ERROR_INVALID_REPLY, + /** AUX process timed out */ AUX_RET_ERROR_TIMEOUT, + /** HPD was low during AUX process */ AUX_RET_ERROR_HPD_DISCON, + /** Failed to acquire AUX engine */ AUX_RET_ERROR_ENGINE_ACQUIRE, + /** AUX request not supported */ AUX_RET_ERROR_INVALID_OPERATION, + /** AUX process not available */ AUX_RET_ERROR_PROTOCOL_ERROR, }; +/** + * enum aux_channel_type - DP AUX channel type listing. + */ enum aux_channel_type { + /** AUX thru Legacy DP AUX */ AUX_CHANNEL_LEGACY_DDC, + /** AUX thru DPIA DP tunneling */ AUX_CHANNEL_DPIA }; -/* DP AUX command */ +/** + * struct aux_transaction_parameters - DP AUX request transaction data + */ struct aux_transaction_parameters { - uint8_t is_i2c_over_aux; - uint8_t action; - uint8_t length; - uint8_t pad; - uint32_t address; - uint8_t data[16]; + uint8_t is_i2c_over_aux; /**< 0=native AUX, 1=I2C-over-AUX */ + uint8_t action; /**< enum dp_aux_request_action */ + uint8_t length; /**< DP AUX request data length */ + uint8_t reserved; /**< For future use */ + uint32_t address; /**< DP AUX address */ + uint8_t data[16]; /**< DP AUX write data */ }; +/** + * Data passed from driver to FW in a DMUB_CMD__DP_AUX_ACCESS command. + */ struct dmub_cmd_dp_aux_control_data { - uint32_t handle; - uint8_t instance; - uint8_t sw_crc_enabled; - uint16_t timeout; - enum aux_channel_type type; - struct aux_transaction_parameters dpaux; + uint8_t instance; /**< AUX instance or DPIA instance */ + uint8_t manual_acq_rel_enable; /**< manual control for acquiring or releasing AUX channel */ + uint8_t sw_crc_enabled; /**< Use software CRC for tunneling packet instead of hardware CRC */ + uint8_t reserved0; /**< For future use */ + uint16_t timeout; /**< timeout time in us */ + uint16_t reserved1; /**< For future use */ + enum aux_channel_type type; /**< enum aux_channel_type */ + struct aux_transaction_parameters dpaux; /**< struct aux_transaction_parameters */ }; +/** + * Definition of a DMUB_CMD__DP_AUX_ACCESS command. + */ struct dmub_rb_cmd_dp_aux_access { + /** + * Command header. + */ struct dmub_cmd_header header; + /** + * Data passed from driver to FW in a DMUB_CMD__DP_AUX_ACCESS command. + */ struct dmub_cmd_dp_aux_control_data aux_control; }; +/** + * Definition of a DMUB_CMD__OUTBOX1_ENABLE command. + */ struct dmub_rb_cmd_outbox1_enable { + /** + * Command header. + */ struct dmub_cmd_header header; + /** + * enable: 0x0 -> disable outbox1 notification (default value) + * 0x1 -> enable outbox1 notification + */ uint32_t enable; }; /* DP AUX Reply command - OutBox Cmd */ +/** + * Data passed to driver from FW in a DMUB_OUT_CMD__DP_AUX_REPLY command. + */ struct aux_reply_data { + /** + * Aux cmd + */ uint8_t command; + /** + * Aux reply data length (max: 16 bytes) + */ uint8_t length; + /** + * Alignment only + */ uint8_t pad[2]; + /** + * Aux reply data + */ uint8_t data[16]; }; +/** + * Control Data passed to driver from FW in a DMUB_OUT_CMD__DP_AUX_REPLY command. + */ struct aux_reply_control_data { + /** + * Reserved for future use + */ uint32_t handle; + /** + * Aux Instance + */ uint8_t instance; + /** + * Aux transaction result: definition in enum aux_return_code_type + */ uint8_t result; + /** + * Alignment only + */ uint16_t pad; }; +/** + * Definition of a DMUB_OUT_CMD__DP_AUX_REPLY command. + */ struct dmub_rb_cmd_dp_aux_reply { + /** + * Command header. + */ struct dmub_cmd_header header; + /** + * Control Data passed to driver from FW in a DMUB_OUT_CMD__DP_AUX_REPLY command. + */ struct aux_reply_control_data control; + /** + * Data passed to driver from FW in a DMUB_OUT_CMD__DP_AUX_REPLY command. + */ struct aux_reply_data reply_data; }; /* DP HPD Notify command - OutBox Cmd */ +/** + * DP HPD Type + */ enum dp_hpd_type { + /** + * Normal DP HPD + */ DP_HPD = 0, + /** + * DP HPD short pulse + */ DP_IRQ }; +/** + * DP HPD Status + */ enum dp_hpd_status { + /** + * DP_HPD status low + */ DP_HPD_UNPLUG = 0, + /** + * DP_HPD status high + */ DP_HPD_PLUG }; +/** + * Data passed to driver from FW in a DMUB_OUT_CMD__DP_HPD_NOTIFY command. + */ struct dp_hpd_data { + /** + * DP HPD instance + */ uint8_t instance; + /** + * HPD type + */ uint8_t hpd_type; + /** + * HPD status: only for type: DP_HPD to indicate status + */ uint8_t hpd_status; + /** + * Alignment only + */ uint8_t pad; }; +/** + * Definition of a DMUB_OUT_CMD__DP_HPD_NOTIFY command. + */ struct dmub_rb_cmd_dp_hpd_notify { + /** + * Command header. + */ struct dmub_cmd_header header; + /** + * Data passed to driver from FW in a DMUB_OUT_CMD__DP_HPD_NOTIFY command. + */ struct dp_hpd_data hpd_data; }; @@ -631,270 +1080,901 @@ struct dmub_rb_cmd_dp_hpd_notify { * Do not reuse or modify IDs. */ +/** + * PSR command sub-types. + */ enum dmub_cmd_psr_type { + /** + * Set PSR version support. + */ DMUB_CMD__PSR_SET_VERSION = 0, + /** + * Copy driver-calculated parameters to PSR state. + */ DMUB_CMD__PSR_COPY_SETTINGS = 1, + /** + * Enable PSR. + */ DMUB_CMD__PSR_ENABLE = 2, + + /** + * Disable PSR. + */ DMUB_CMD__PSR_DISABLE = 3, + + /** + * Set PSR level. + * PSR level is a 16-bit value dicated by driver that + * will enable/disable different functionality. + */ DMUB_CMD__PSR_SET_LEVEL = 4, + + /** + * Forces PSR enabled until an explicit PSR disable call. + */ DMUB_CMD__PSR_FORCE_STATIC = 5, }; +#ifndef TRIM_FAMS +enum dmub_cmd_fams_type { + DMUB_CMD__FAMS_SETUP_FW_CTRL = 0, + DMUB_CMD__FAMS_DRR_UPDATE = 1, +}; +#endif + +/** + * PSR versions. + */ enum psr_version { + /** + * PSR version 1. + */ PSR_VERSION_1 = 0, + /** + * PSR not supported. + */ PSR_VERSION_UNSUPPORTED = 0xFFFFFFFF, }; +/** + * enum dmub_cmd_mall_type - MALL commands + */ enum dmub_cmd_mall_type { + /** + * Allows display refresh from MALL. + */ DMUB_CMD__MALL_ACTION_ALLOW = 0, + /** + * Disallows display refresh from MALL. + */ DMUB_CMD__MALL_ACTION_DISALLOW = 1, + /** + * Cursor copy for MALL. + */ DMUB_CMD__MALL_ACTION_COPY_CURSOR = 2, + /** + * Controls DF requests. + */ DMUB_CMD__MALL_ACTION_NO_DF_REQ = 3, }; + +/** + * Data passed from driver to FW in a DMUB_CMD__PSR_COPY_SETTINGS command. + */ struct dmub_cmd_psr_copy_settings_data { + /** + * Flags that can be set by driver to change some PSR behaviour. + */ union dmub_psr_debug_flags debug; + /** + * 16-bit value dicated by driver that will enable/disable different functionality. + */ uint16_t psr_level; + /** + * DPP HW instance. + */ uint8_t dpp_inst; - /* opp_inst and mpcc_inst will not be used in dmub fw, + /** + * MPCC HW instance. + * Not used in dmub fw, * dmub fw will get active opp by reading odm registers. */ uint8_t mpcc_inst; + /** + * OPP HW instance. + * Not used in dmub fw, + * dmub fw will get active opp by reading odm registers. + */ uint8_t opp_inst; - + /** + * OTG HW instance. + */ uint8_t otg_inst; + /** + * DIG FE HW instance. + */ uint8_t digfe_inst; + /** + * DIG BE HW instance. + */ uint8_t digbe_inst; + /** + * DP PHY HW instance. + */ uint8_t dpphy_inst; + /** + * AUX HW instance. + */ uint8_t aux_inst; + /** + * Determines if SMU optimzations are enabled/disabled. + */ uint8_t smu_optimizations_en; + /** + * Unused. + * TODO: Remove. + */ uint8_t frame_delay; + /** + * If RFB setup time is greater than the total VBLANK time, + * it is not possible for the sink to capture the video frame + * in the same frame the SDP is sent. In this case, + * the frame capture indication bit should be set and an extra + * static frame should be transmitted to the sink. + */ uint8_t frame_cap_ind; + /** + * Explicit padding to 4 byte boundary. + */ uint8_t pad[2]; + /** + * Multi-display optimizations are implemented on certain ASICs. + */ uint8_t multi_disp_optimizations_en; + /** + * The last possible line SDP may be transmitted without violating + * the RFB setup time or entering the active video frame. + */ uint16_t init_sdp_deadline; + /** + * Explicit padding to 4 byte boundary. + */ uint16_t pad2; + /** + * Length of each horizontal line in us. + */ uint32_t line_time_in_us; }; +/** + * Definition of a DMUB_CMD__PSR_COPY_SETTINGS command. + */ struct dmub_rb_cmd_psr_copy_settings { + /** + * Command header. + */ struct dmub_cmd_header header; + /** + * Data passed from driver to FW in a DMUB_CMD__PSR_COPY_SETTINGS command. + */ struct dmub_cmd_psr_copy_settings_data psr_copy_settings_data; }; +/** + * Data passed from driver to FW in a DMUB_CMD__PSR_SET_LEVEL command. + */ struct dmub_cmd_psr_set_level_data { + /** + * 16-bit value dicated by driver that will enable/disable different functionality. + */ uint16_t psr_level; + /** + * Explicit padding to 4 byte boundary. + */ uint8_t pad[2]; }; +/** + * Definition of a DMUB_CMD__PSR_SET_LEVEL command. + */ struct dmub_rb_cmd_psr_set_level { + /** + * Command header. + */ struct dmub_cmd_header header; + /** + * Definition of a DMUB_CMD__PSR_SET_LEVEL command. + */ struct dmub_cmd_psr_set_level_data psr_set_level_data; }; +/** + * Definition of a DMUB_CMD__PSR_ENABLE command. + * PSR enable/disable is controlled using the sub_type. + */ struct dmub_rb_cmd_psr_enable { + /** + * Command header. + */ struct dmub_cmd_header header; }; +/** + * Data passed from driver to FW in a DMUB_CMD__PSR_SET_VERSION command. + */ struct dmub_cmd_psr_set_version_data { - enum psr_version version; // PSR version 1 or 2 + /** + * PSR version that FW should implement. + */ + enum psr_version version; }; +/** + * Definition of a DMUB_CMD__PSR_SET_VERSION command. + */ struct dmub_rb_cmd_psr_set_version { + /** + * Command header. + */ struct dmub_cmd_header header; + /** + * Data passed from driver to FW in a DMUB_CMD__PSR_SET_VERSION command. + */ struct dmub_cmd_psr_set_version_data psr_set_version_data; }; +/** + * Definition of a DMUB_CMD__PSR_FORCE_STATIC command. + */ struct dmub_rb_cmd_psr_force_static { + /** + * Command header. + */ struct dmub_cmd_header header; }; +/** + * Set of HW components that can be locked. + */ union dmub_hw_lock_flags { + /** + * Set of HW components that can be locked. + */ struct { + /** + * Lock/unlock OTG master update lock. + */ uint8_t lock_pipe : 1; + /** + * Lock/unlock cursor. + */ uint8_t lock_cursor : 1; + /** + * Lock/unlock global update lock. + */ uint8_t lock_dig : 1; + /** + * Triple buffer lock requires additional hw programming to usual OTG master lock. + */ uint8_t triple_buffer_lock : 1; } bits; + /** + * Union for HW Lock flags. + */ uint8_t u8All; }; +/** + * Instances of HW to be locked. + */ struct dmub_hw_lock_inst_flags { + /** + * OTG HW instance for OTG master update lock. + */ uint8_t otg_inst; + /** + * OPP instance for cursor lock. + */ uint8_t opp_inst; + /** + * OTG HW instance for global update lock. + * TODO: Remove, and re-use otg_inst. + */ uint8_t dig_inst; + /** + * Explicit pad to 4 byte boundary. + */ uint8_t pad; }; +/** + * Clients that can acquire the HW Lock Manager. + */ enum hw_lock_client { + /** + * Driver is the client of HW Lock Manager. + */ HW_LOCK_CLIENT_DRIVER = 0, + /** + * FW is the client of HW Lock Manager. + */ HW_LOCK_CLIENT_FW, + /** + * Invalid client. + */ HW_LOCK_CLIENT_INVALID = 0xFFFFFFFF, }; +/** + * Data passed to HW Lock Mgr in a DMUB_CMD__HW_LOCK command. + */ struct dmub_cmd_lock_hw_data { + /** + * Specifies the client accessing HW Lock Manager. + */ enum hw_lock_client client; + /** + * HW instances to be locked. + */ struct dmub_hw_lock_inst_flags inst_flags; + /** + * Which components to be locked. + */ union dmub_hw_lock_flags hw_locks; + /** + * Specifies lock/unlock. + */ uint8_t lock; + /** + * HW can be unlocked separately from releasing the HW Lock Mgr. + * This flag is set if the client wishes to release the object. + */ uint8_t should_release; + /** + * Explicit padding to 4 byte boundary. + */ uint8_t pad; }; +/** + * Definition of a DMUB_CMD__HW_LOCK command. + * Command is used by driver and FW. + */ struct dmub_rb_cmd_lock_hw { + /** + * Command header. + */ struct dmub_cmd_header header; + /** + * Data passed to HW Lock Mgr in a DMUB_CMD__HW_LOCK command. + */ struct dmub_cmd_lock_hw_data lock_hw_data; }; +/** + * ABM command sub-types. + */ enum dmub_cmd_abm_type { + /** + * Initialize parameters for ABM algorithm. + * Data is passed through an indirect buffer. + */ DMUB_CMD__ABM_INIT_CONFIG = 0, + /** + * Set OTG and panel HW instance. + */ DMUB_CMD__ABM_SET_PIPE = 1, + /** + * Set user requested backklight level. + */ DMUB_CMD__ABM_SET_BACKLIGHT = 2, + /** + * Set ABM operating/aggression level. + */ DMUB_CMD__ABM_SET_LEVEL = 3, + /** + * Set ambient light level. + */ DMUB_CMD__ABM_SET_AMBIENT_LEVEL = 4, + /** + * Enable/disable fractional duty cycle for backlight PWM. + */ DMUB_CMD__ABM_SET_PWM_FRAC = 5, }; -#define NUM_AMBI_LEVEL 5 -#define NUM_AGGR_LEVEL 4 -#define NUM_POWER_FN_SEGS 8 -#define NUM_BL_CURVE_SEGS 16 - -/* - * Parameters for ABM2.4 algorithm. - * Padded explicitly to 32-bit boundary. +/** + * Parameters for ABM2.4 algorithm. Passed from driver to FW via an indirect buffer. + * Requirements: + * - Padded explicitly to 32-bit boundary. + * - Must ensure this structure matches the one on driver-side, + * otherwise it won't be aligned. */ struct abm_config_table { - /* Parameters for crgb conversion */ + /** + * Gamma curve thresholds, used for crgb conversion. + */ uint16_t crgb_thresh[NUM_POWER_FN_SEGS]; // 0B + /** + * Gamma curve offsets, used for crgb conversion. + */ uint16_t crgb_offset[NUM_POWER_FN_SEGS]; // 16B + /** + * Gamma curve slopes, used for crgb conversion. + */ uint16_t crgb_slope[NUM_POWER_FN_SEGS]; // 32B - - /* Parameters for custom curve */ + /** + * Custom backlight curve thresholds. + */ uint16_t backlight_thresholds[NUM_BL_CURVE_SEGS]; // 48B + /** + * Custom backlight curve offsets. + */ uint16_t backlight_offsets[NUM_BL_CURVE_SEGS]; // 78B - + /** + * Ambient light thresholds. + */ uint16_t ambient_thresholds_lux[NUM_AMBI_LEVEL]; // 112B + /** + * Minimum programmable backlight. + */ uint16_t min_abm_backlight; // 122B - + /** + * Minimum reduction values. + */ uint8_t min_reduction[NUM_AMBI_LEVEL][NUM_AGGR_LEVEL]; // 124B + /** + * Maximum reduction values. + */ uint8_t max_reduction[NUM_AMBI_LEVEL][NUM_AGGR_LEVEL]; // 144B + /** + * Bright positive gain. + */ uint8_t bright_pos_gain[NUM_AMBI_LEVEL][NUM_AGGR_LEVEL]; // 164B + /** + * Dark negative gain. + */ uint8_t dark_pos_gain[NUM_AMBI_LEVEL][NUM_AGGR_LEVEL]; // 184B + /** + * Hybrid factor. + */ uint8_t hybrid_factor[NUM_AGGR_LEVEL]; // 204B + /** + * Contrast factor. + */ uint8_t contrast_factor[NUM_AGGR_LEVEL]; // 208B + /** + * Deviation gain. + */ uint8_t deviation_gain[NUM_AGGR_LEVEL]; // 212B + /** + * Minimum knee. + */ uint8_t min_knee[NUM_AGGR_LEVEL]; // 216B + /** + * Maximum knee. + */ uint8_t max_knee[NUM_AGGR_LEVEL]; // 220B + /** + * Unused. + */ uint8_t iir_curve[NUM_AMBI_LEVEL]; // 224B + /** + * Explicit padding to 4 byte boundary. + */ uint8_t pad3[3]; // 229B - + /** + * Backlight ramp reduction. + */ uint16_t blRampReduction[NUM_AGGR_LEVEL]; // 232B + /** + * Backlight ramp start. + */ uint16_t blRampStart[NUM_AGGR_LEVEL]; // 240B }; +/** + * Data passed from driver to FW in a DMUB_CMD__ABM_SET_PIPE command. + */ struct dmub_cmd_abm_set_pipe_data { + /** + * OTG HW instance. + */ uint8_t otg_inst; + + /** + * Panel Control HW instance. + */ uint8_t panel_inst; + + /** + * Controls how ABM will interpret a set pipe or set level command. + */ uint8_t set_pipe_option; - uint8_t ramping_boundary; // TODO: Remove this + + /** + * Unused. + * TODO: Remove. + */ + uint8_t ramping_boundary; }; +/** + * Definition of a DMUB_CMD__ABM_SET_PIPE command. + */ struct dmub_rb_cmd_abm_set_pipe { + /** + * Command header. + */ struct dmub_cmd_header header; + + /** + * Data passed from driver to FW in a DMUB_CMD__ABM_SET_PIPE command. + */ struct dmub_cmd_abm_set_pipe_data abm_set_pipe_data; }; +/** + * Data passed from driver to FW in a DMUB_CMD__ABM_SET_BACKLIGHT command. + */ struct dmub_cmd_abm_set_backlight_data { + /** + * Number of frames to ramp to backlight user level. + */ uint32_t frame_ramp; + + /** + * Requested backlight level from user. + */ uint32_t backlight_user_level; }; +/** + * Definition of a DMUB_CMD__ABM_SET_BACKLIGHT command. + */ struct dmub_rb_cmd_abm_set_backlight { + /** + * Command header. + */ struct dmub_cmd_header header; + + /** + * Data passed from driver to FW in a DMUB_CMD__ABM_SET_BACKLIGHT command. + */ struct dmub_cmd_abm_set_backlight_data abm_set_backlight_data; }; +/** + * Data passed from driver to FW in a DMUB_CMD__ABM_SET_LEVEL command. + */ struct dmub_cmd_abm_set_level_data { + /** + * Set current ABM operating/aggression level. + */ uint32_t level; }; +/** + * Definition of a DMUB_CMD__ABM_SET_LEVEL command. + */ struct dmub_rb_cmd_abm_set_level { + /** + * Command header. + */ struct dmub_cmd_header header; + + /** + * Data passed from driver to FW in a DMUB_CMD__ABM_SET_LEVEL command. + */ struct dmub_cmd_abm_set_level_data abm_set_level_data; }; +/** + * Data passed from driver to FW in a DMUB_CMD__ABM_SET_AMBIENT_LEVEL command. + */ struct dmub_cmd_abm_set_ambient_level_data { + /** + * Ambient light sensor reading from OS. + */ uint32_t ambient_lux; }; +/** + * Definition of a DMUB_CMD__ABM_SET_AMBIENT_LEVEL command. + */ struct dmub_rb_cmd_abm_set_ambient_level { + /** + * Command header. + */ struct dmub_cmd_header header; + + /** + * Data passed from driver to FW in a DMUB_CMD__ABM_SET_AMBIENT_LEVEL command. + */ struct dmub_cmd_abm_set_ambient_level_data abm_set_ambient_level_data; }; +/** + * Data passed from driver to FW in a DMUB_CMD__ABM_SET_PWM_FRAC command. + */ struct dmub_cmd_abm_set_pwm_frac_data { + /** + * Enable/disable fractional duty cycle for backlight PWM. + * TODO: Convert to uint8_t. + */ uint32_t fractional_pwm; }; +/** + * Definition of a DMUB_CMD__ABM_SET_PWM_FRAC command. + */ struct dmub_rb_cmd_abm_set_pwm_frac { + /** + * Command header. + */ struct dmub_cmd_header header; + + /** + * Data passed from driver to FW in a DMUB_CMD__ABM_SET_PWM_FRAC command. + */ struct dmub_cmd_abm_set_pwm_frac_data abm_set_pwm_frac_data; }; +/** + * Data passed from driver to FW in a DMUB_CMD__ABM_INIT_CONFIG command. + */ struct dmub_cmd_abm_init_config_data { + /** + * Location of indirect buffer used to pass init data to ABM. + */ union dmub_addr src; + + /** + * Indirect buffer length. + */ uint16_t bytes; }; +/** + * Definition of a DMUB_CMD__ABM_INIT_CONFIG command. + */ struct dmub_rb_cmd_abm_init_config { + /** + * Command header. + */ struct dmub_cmd_header header; + + /** + * Data passed from driver to FW in a DMUB_CMD__ABM_INIT_CONFIG command. + */ struct dmub_cmd_abm_init_config_data abm_init_config_data; }; +/** + * Data passed from driver to FW in a DMUB_CMD__QUERY_FEATURE_CAPS command. + */ struct dmub_cmd_query_feature_caps_data { - struct dmub_feature_caps feature_caps; + /** + * DMUB feature capabilities. + * After DMUB init, driver will query FW capabilities prior to enabling certain features. + */ + struct dmub_feature_caps feature_caps; }; +/** + * Definition of a DMUB_CMD__QUERY_FEATURE_CAPS command. + */ struct dmub_rb_cmd_query_feature_caps { - struct dmub_cmd_header header; - struct dmub_cmd_query_feature_caps_data query_feature_caps_data; + /** + * Command header. + */ + struct dmub_cmd_header header; + /** + * Data passed from driver to FW in a DMUB_CMD__QUERY_FEATURE_CAPS command. + */ + struct dmub_cmd_query_feature_caps_data query_feature_caps_data; +}; + +struct dmub_optc_state { + uint32_t v_total_max; + uint32_t v_total_min; + uint32_t v_total_mid; + uint32_t v_total_mid_frame_num; + uint32_t tg_inst; + uint32_t enable_manual_trigger; + uint32_t clear_force_vsync; +}; + +struct dmub_rb_cmd_drr_update { + struct dmub_cmd_header header; + struct dmub_optc_state dmub_optc_state_req; +}; + +#ifndef TRIM_FAMS +struct dmub_cmd_fw_assisted_mclk_switch_pipe_data { + uint32_t pix_clk_100hz; + uint32_t min_refresh_in_uhz; + uint32_t max_ramp_step; +}; + +struct dmub_cmd_fw_assisted_mclk_switch_config { + uint32_t fams_enabled; + struct dmub_cmd_fw_assisted_mclk_switch_pipe_data pipe_data[DMUB_MAX_STREAMS]; +}; + +struct dmub_rb_cmd_fw_assisted_mclk_switch { + struct dmub_cmd_header header; + struct dmub_cmd_fw_assisted_mclk_switch_config config_data; +}; +#endif + +/** + * Data passed from driver to FW in a DMUB_CMD__VBIOS_LVTMA_CONTROL command. + */ +struct dmub_cmd_lvtma_control_data { + uint8_t uc_pwr_action; /**< LVTMA_ACTION */ + uint8_t reserved_0[3]; /**< For future use */ + uint8_t panel_inst; /**< LVTMA control instance */ + uint8_t reserved_1[3]; /**< For future use */ }; - union dmub_rb_cmd { +/** + * Definition of a DMUB_CMD__VBIOS_LVTMA_CONTROL command. + */ +struct dmub_rb_cmd_lvtma_control { + /** + * Command header. + */ + struct dmub_cmd_header header; + /** + * Data passed from driver to FW in a DMUB_CMD__VBIOS_LVTMA_CONTROL command. + */ + struct dmub_cmd_lvtma_control_data data; +}; + +/** + * union dmub_rb_cmd - DMUB inbox command. + */ +union dmub_rb_cmd { struct dmub_rb_cmd_lock_hw lock_hw; + /** + * Elements shared with all commands. + */ + struct dmub_rb_cmd_common cmd_common; + /** + * Definition of a DMUB_CMD__REG_SEQ_READ_MODIFY_WRITE command. + */ struct dmub_rb_cmd_read_modify_write read_modify_write; + /** + * Definition of a DMUB_CMD__REG_SEQ_FIELD_UPDATE_SEQ command. + */ struct dmub_rb_cmd_reg_field_update_sequence reg_field_update_seq; + /** + * Definition of a DMUB_CMD__REG_SEQ_BURST_WRITE command. + */ struct dmub_rb_cmd_burst_write burst_write; + /** + * Definition of a DMUB_CMD__REG_REG_WAIT command. + */ struct dmub_rb_cmd_reg_wait reg_wait; - struct dmub_rb_cmd_common cmd_common; + /** + * Definition of a DMUB_CMD__VBIOS_DIGX_ENCODER_CONTROL command. + */ struct dmub_rb_cmd_digx_encoder_control digx_encoder_control; + /** + * Definition of a DMUB_CMD__VBIOS_SET_PIXEL_CLOCK command. + */ struct dmub_rb_cmd_set_pixel_clock set_pixel_clock; + /** + * Definition of a DMUB_CMD__VBIOS_ENABLE_DISP_POWER_GATING command. + */ struct dmub_rb_cmd_enable_disp_power_gating enable_disp_power_gating; + /** + * Definition of a DMUB_CMD__VBIOS_DPPHY_INIT command. + */ struct dmub_rb_cmd_dpphy_init dpphy_init; + /** + * Definition of a DMUB_CMD__VBIOS_DIG1_TRANSMITTER_CONTROL command. + */ struct dmub_rb_cmd_dig1_transmitter_control dig1_transmitter_control; + /** + * Definition of a DMUB_CMD__PSR_SET_VERSION command. + */ struct dmub_rb_cmd_psr_set_version psr_set_version; + /** + * Definition of a DMUB_CMD__PSR_COPY_SETTINGS command. + */ struct dmub_rb_cmd_psr_copy_settings psr_copy_settings; + /** + * Definition of a DMUB_CMD__PSR_ENABLE command. + */ struct dmub_rb_cmd_psr_enable psr_enable; + /** + * Definition of a DMUB_CMD__PSR_SET_LEVEL command. + */ struct dmub_rb_cmd_psr_set_level psr_set_level; + /** + * Definition of a DMUB_CMD__PSR_FORCE_STATIC command. + */ struct dmub_rb_cmd_psr_force_static psr_force_static; + /** + * Definition of a DMUB_CMD__PLAT_54186_WA command. + */ struct dmub_rb_cmd_PLAT_54186_wa PLAT_54186_wa; + /** + * Definition of a DMUB_CMD__MALL command. + */ struct dmub_rb_cmd_mall mall; + /** + * Definition of a DMUB_CMD__ABM_SET_PIPE command. + */ struct dmub_rb_cmd_abm_set_pipe abm_set_pipe; + + /** + * Definition of a DMUB_CMD__ABM_SET_BACKLIGHT command. + */ struct dmub_rb_cmd_abm_set_backlight abm_set_backlight; + + /** + * Definition of a DMUB_CMD__ABM_SET_LEVEL command. + */ struct dmub_rb_cmd_abm_set_level abm_set_level; + + /** + * Definition of a DMUB_CMD__ABM_SET_AMBIENT_LEVEL command. + */ struct dmub_rb_cmd_abm_set_ambient_level abm_set_ambient_level; + + /** + * Definition of a DMUB_CMD__ABM_SET_PWM_FRAC command. + */ struct dmub_rb_cmd_abm_set_pwm_frac abm_set_pwm_frac; + + /** + * Definition of a DMUB_CMD__ABM_INIT_CONFIG command. + */ struct dmub_rb_cmd_abm_init_config abm_init_config; + + /** + * Definition of a DMUB_CMD__DP_AUX_ACCESS command. + */ struct dmub_rb_cmd_dp_aux_access dp_aux_access; + + /** + * Definition of a DMUB_CMD__OUTBOX1_ENABLE command. + */ struct dmub_rb_cmd_outbox1_enable outbox1_enable; + + /** + * Definition of a DMUB_CMD__QUERY_FEATURE_CAPS command. + */ struct dmub_rb_cmd_query_feature_caps query_feature_caps; + struct dmub_rb_cmd_drr_update drr_update; +#ifndef TRIM_FAMS + struct dmub_rb_cmd_fw_assisted_mclk_switch fw_assisted_mclk_switch; +#endif + /** + * Definition of a DMUB_CMD__VBIOS_LVTMA_CONTROL command. + */ + struct dmub_rb_cmd_lvtma_control lvtma_control; }; +/** + * union dmub_rb_out_cmd - Outbox command + */ union dmub_rb_out_cmd { + /** + * Parameters common to every command. + */ struct dmub_rb_cmd_common cmd_common; + /** + * AUX reply command. + */ struct dmub_rb_cmd_dp_aux_reply dp_aux_reply; + /** + * HPD notify command. + */ struct dmub_rb_cmd_dp_hpd_notify dp_hpd_notify; }; #pragma pack(pop) @@ -910,31 +1990,49 @@ union dmub_rb_out_cmd { extern "C" { #endif +/** + * struct dmub_rb_init_params - Initialization params for DMUB ringbuffer + */ struct dmub_rb_init_params { - void *ctx; - void *base_address; - uint32_t capacity; - uint32_t read_ptr; - uint32_t write_ptr; + void *ctx; /**< Caller provided context pointer */ + void *base_address; /**< CPU base address for ring's data */ + uint32_t capacity; /**< Ringbuffer capacity in bytes */ + uint32_t read_ptr; /**< Initial read pointer for consumer in bytes */ + uint32_t write_ptr; /**< Initial write pointer for producer in bytes */ }; +/** + * struct dmub_rb - Inbox or outbox DMUB ringbuffer + */ struct dmub_rb { - void *base_address; - uint32_t data_count; - uint32_t rptr; - uint32_t wrpt; - uint32_t capacity; + void *base_address; /**< CPU address for the ring's data */ + uint32_t rptr; /**< Read pointer for consumer in bytes */ + uint32_t wrpt; /**< Write pointer for producer in bytes */ + uint32_t capacity; /**< Ringbuffer capacity in bytes */ - void *ctx; - void *dmub; + void *ctx; /**< Caller provided context pointer */ + void *dmub; /**< Pointer to the DMUB interface */ }; - +/** + * @brief Checks if the ringbuffer is empty. + * + * @param rb DMUB Ringbuffer + * @return true if empty + * @return false otherwise + */ static inline bool dmub_rb_empty(struct dmub_rb *rb) { return (rb->wrpt == rb->rptr); } +/** + * @brief Checks if the ringbuffer is full + * + * @param rb DMUB Ringbuffer + * @return true if full + * @return false otherwise + */ static inline bool dmub_rb_full(struct dmub_rb *rb) { uint32_t data_count; @@ -947,6 +2045,14 @@ static inline bool dmub_rb_full(struct dmub_rb *rb) return (data_count == (rb->capacity - DMUB_RB_CMD_SIZE)); } +/** + * @brief Pushes a command into the ringbuffer + * + * @param rb DMUB ringbuffer + * @param cmd The command to push + * @return true if the ringbuffer was not full + * @return false otherwise + */ static inline bool dmub_rb_push_front(struct dmub_rb *rb, const union dmub_rb_cmd *cmd) { @@ -969,6 +2075,14 @@ static inline bool dmub_rb_push_front(struct dmub_rb *rb, return true; } +/** + * @brief Pushes a command into the DMUB outbox ringbuffer + * + * @param rb DMUB outbox ringbuffer + * @param cmd Outbox command + * @return true if not full + * @return false otherwise + */ static inline bool dmub_rb_out_push_front(struct dmub_rb *rb, const union dmub_rb_out_cmd *cmd) { @@ -988,6 +2102,14 @@ static inline bool dmub_rb_out_push_front(struct dmub_rb *rb, return true; } +/** + * @brief Returns the next unprocessed command in the ringbuffer. + * + * @param rb DMUB ringbuffer + * @param cmd The command to return + * @return true if not empty + * @return false otherwise + */ static inline bool dmub_rb_front(struct dmub_rb *rb, union dmub_rb_cmd **cmd) { @@ -1001,6 +2123,14 @@ static inline bool dmub_rb_front(struct dmub_rb *rb, return true; } +/** + * @brief Returns the next unprocessed command in the outbox. + * + * @param rb DMUB outbox ringbuffer + * @param cmd The outbox command to return + * @return true if not empty + * @return false otherwise + */ static inline bool dmub_rb_out_front(struct dmub_rb *rb, union dmub_rb_out_cmd *cmd) { @@ -1018,6 +2148,13 @@ static inline bool dmub_rb_out_front(struct dmub_rb *rb, return true; } +/** + * @brief Removes the front entry in the ringbuffer. + * + * @param rb DMUB ringbuffer + * @return true if the command was removed + * @return false if there were no commands + */ static inline bool dmub_rb_pop_front(struct dmub_rb *rb) { if (dmub_rb_empty(rb)) @@ -1031,6 +2168,14 @@ static inline bool dmub_rb_pop_front(struct dmub_rb *rb) return true; } +/** + * @brief Flushes commands in the ringbuffer to framebuffer memory. + * + * Avoids a race condition where DMCUB accesses memory while + * there are still writes in flight to framebuffer. + * + * @param rb DMUB ringbuffer + */ static inline void dmub_rb_flush_pending(const struct dmub_rb *rb) { uint32_t rptr = rb->rptr; @@ -1049,6 +2194,12 @@ static inline void dmub_rb_flush_pending(const struct dmub_rb *rb) } } +/** + * @brief Initializes a DMCUB ringbuffer + * + * @param rb DMUB ringbuffer + * @param init_params initial configuration for the ringbuffer + */ static inline void dmub_rb_init(struct dmub_rb *rb, struct dmub_rb_init_params *init_params) { @@ -1058,6 +2209,12 @@ static inline void dmub_rb_init(struct dmub_rb *rb, rb->wrpt = init_params->write_ptr; } +/** + * @brief Copies output data from in/out commands into the given command. + * + * @param rb DMUB ringbuffer + * @param cmd Command to copy data into + */ static inline void dmub_rb_get_return_data(struct dmub_rb *rb, union dmub_rb_cmd *cmd) { diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_trace_buffer.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_trace_buffer.h index 6b3ee42db350..8a122ceabb3a 100644 --- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_trace_buffer.h +++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_trace_buffer.h @@ -25,7 +25,7 @@ #ifndef _DMUB_TRACE_BUFFER_H_ #define _DMUB_TRACE_BUFFER_H_ -#include "dmub_types.h" +#include "dmub_cmd.h" #define LOAD_DMCU_FW 1 #define LOAD_PHY_FW 2 @@ -65,5 +65,4 @@ struct dmcub_trace_buf { struct dmcub_trace_buf_entry entries[PERF_TRACE_MAX_ENTRY]; }; - #endif /* _DMUB_TRACE_BUFFER_H_ */ diff --git a/drivers/gpu/drm/amd/display/dmub/src/Makefile b/drivers/gpu/drm/amd/display/dmub/src/Makefile index 945287164cf2..7495c23c73a9 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/Makefile +++ b/drivers/gpu/drm/amd/display/dmub/src/Makefile @@ -20,7 +20,7 @@ # OTHER DEALINGS IN THE SOFTWARE. # -DMUB = dmub_srv.o dmub_reg.o dmub_dcn20.o dmub_dcn21.o +DMUB = dmub_srv.o dmub_srv_stat.o dmub_reg.o dmub_dcn20.o dmub_dcn21.o DMUB += dmub_dcn30.o dmub_dcn301.o DMUB += dmub_dcn302.o diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c index 8e8e65fa83c0..6934906c665e 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c @@ -135,6 +135,8 @@ void dmub_dcn20_reset(struct dmub_srv *dmub) REG_UPDATE(MMHUBBUB_SOFT_RESET, DMUIF_SOFT_RESET, 1); REG_WRITE(DMCUB_INBOX1_RPTR, 0); REG_WRITE(DMCUB_INBOX1_WPTR, 0); + REG_WRITE(DMCUB_OUTBOX1_RPTR, 0); + REG_WRITE(DMCUB_OUTBOX1_WPTR, 0); REG_WRITE(DMCUB_SCRATCH0, 0); } @@ -248,6 +250,13 @@ void dmub_dcn20_setup_windows(struct dmub_srv *dmub, DMCUB_REGION3_CW5_TOP_ADDRESS, cw5->region.top, DMCUB_REGION3_CW5_ENABLE, 1); + REG_WRITE(DMCUB_REGION5_OFFSET, offset.u.low_part); + REG_WRITE(DMCUB_REGION5_OFFSET_HIGH, offset.u.high_part); + REG_SET_2(DMCUB_REGION5_TOP_ADDRESS, 0, + DMCUB_REGION5_TOP_ADDRESS, + cw5->region.top - cw5->region.base - 1, + DMCUB_REGION5_ENABLE, 1); + dmub_dcn20_translate_addr(&cw6->offset, fb_base, fb_offset, &offset); REG_WRITE(DMCUB_REGION3_CW6_OFFSET, offset.u.low_part); @@ -280,6 +289,54 @@ void dmub_dcn20_set_inbox1_wptr(struct dmub_srv *dmub, uint32_t wptr_offset) REG_WRITE(DMCUB_INBOX1_WPTR, wptr_offset); } +void dmub_dcn20_setup_out_mailbox(struct dmub_srv *dmub, + const struct dmub_region *outbox1) +{ + /* New firmware can support CW4 for the outbox. */ + if (dmub_dcn20_use_cached_inbox(dmub)) + REG_WRITE(DMCUB_OUTBOX1_BASE_ADDRESS, outbox1->base); + else + REG_WRITE(DMCUB_OUTBOX1_BASE_ADDRESS, 0x80002000); + + REG_WRITE(DMCUB_OUTBOX1_SIZE, outbox1->top - outbox1->base); +} + +uint32_t dmub_dcn20_get_outbox1_wptr(struct dmub_srv *dmub) +{ + /** + * outbox1 wptr register is accessed without locks (dal & dc) + * and to be called only by dmub_srv_stat_get_notification() + */ + return REG_READ(DMCUB_OUTBOX1_WPTR); +} + +void dmub_dcn20_set_outbox1_rptr(struct dmub_srv *dmub, uint32_t rptr_offset) +{ + /** + * outbox1 rptr register is accessed without locks (dal & dc) + * and to be called only by dmub_srv_stat_get_notification() + */ + REG_WRITE(DMCUB_OUTBOX1_RPTR, rptr_offset); +} + +void dmub_dcn20_setup_outbox0(struct dmub_srv *dmub, + const struct dmub_region *outbox0) +{ + REG_WRITE(DMCUB_OUTBOX0_BASE_ADDRESS, outbox0->base); + + REG_WRITE(DMCUB_OUTBOX0_SIZE, outbox0->top - outbox0->base); +} + +uint32_t dmub_dcn20_get_outbox0_wptr(struct dmub_srv *dmub) +{ + return REG_READ(DMCUB_OUTBOX0_WPTR); +} + +void dmub_dcn20_set_outbox0_rptr(struct dmub_srv *dmub, uint32_t rptr_offset) +{ + REG_WRITE(DMCUB_OUTBOX0_RPTR, rptr_offset); +} + bool dmub_dcn20_is_hw_init(struct dmub_srv *dmub) { uint32_t is_hw_init; diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h index a62be9c0652e..de5351cd5abc 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h @@ -40,6 +40,14 @@ struct dmub_srv; DMUB_SR(DMCUB_INBOX1_SIZE) \ DMUB_SR(DMCUB_INBOX1_RPTR) \ DMUB_SR(DMCUB_INBOX1_WPTR) \ + DMUB_SR(DMCUB_OUTBOX0_BASE_ADDRESS) \ + DMUB_SR(DMCUB_OUTBOX0_SIZE) \ + DMUB_SR(DMCUB_OUTBOX0_RPTR) \ + DMUB_SR(DMCUB_OUTBOX0_WPTR) \ + DMUB_SR(DMCUB_OUTBOX1_BASE_ADDRESS) \ + DMUB_SR(DMCUB_OUTBOX1_SIZE) \ + DMUB_SR(DMCUB_OUTBOX1_RPTR) \ + DMUB_SR(DMCUB_OUTBOX1_WPTR) \ DMUB_SR(DMCUB_REGION3_CW0_OFFSET) \ DMUB_SR(DMCUB_REGION3_CW1_OFFSET) \ DMUB_SR(DMCUB_REGION3_CW2_OFFSET) \ @@ -75,6 +83,9 @@ struct dmub_srv; DMUB_SR(DMCUB_REGION4_OFFSET) \ DMUB_SR(DMCUB_REGION4_OFFSET_HIGH) \ DMUB_SR(DMCUB_REGION4_TOP_ADDRESS) \ + DMUB_SR(DMCUB_REGION5_OFFSET) \ + DMUB_SR(DMCUB_REGION5_OFFSET_HIGH) \ + DMUB_SR(DMCUB_REGION5_TOP_ADDRESS) \ DMUB_SR(DMCUB_SCRATCH0) \ DMUB_SR(DMCUB_SCRATCH1) \ DMUB_SR(DMCUB_SCRATCH2) \ @@ -95,7 +106,8 @@ struct dmub_srv; DMUB_SR(CC_DC_PIPE_DIS) \ DMUB_SR(MMHUBBUB_SOFT_RESET) \ DMUB_SR(DCN_VM_FB_LOCATION_BASE) \ - DMUB_SR(DCN_VM_FB_OFFSET) + DMUB_SR(DCN_VM_FB_OFFSET) \ + DMUB_SR(DMCUB_INTERRUPT_ACK) #define DMUB_COMMON_FIELDS() \ DMUB_SF(DMCUB_CNTL, DMCUB_ENABLE) \ @@ -123,10 +135,13 @@ struct dmub_srv; DMUB_SF(DMCUB_REGION3_CW7_TOP_ADDRESS, DMCUB_REGION3_CW7_ENABLE) \ DMUB_SF(DMCUB_REGION4_TOP_ADDRESS, DMCUB_REGION4_TOP_ADDRESS) \ DMUB_SF(DMCUB_REGION4_TOP_ADDRESS, DMCUB_REGION4_ENABLE) \ + DMUB_SF(DMCUB_REGION5_TOP_ADDRESS, DMCUB_REGION5_TOP_ADDRESS) \ + DMUB_SF(DMCUB_REGION5_TOP_ADDRESS, DMCUB_REGION5_ENABLE) \ DMUB_SF(CC_DC_PIPE_DIS, DC_DMCUB_ENABLE) \ DMUB_SF(MMHUBBUB_SOFT_RESET, DMUIF_SOFT_RESET) \ DMUB_SF(DCN_VM_FB_LOCATION_BASE, FB_BASE) \ - DMUB_SF(DCN_VM_FB_OFFSET, FB_OFFSET) + DMUB_SF(DCN_VM_FB_OFFSET, FB_OFFSET) \ + DMUB_SF(DMCUB_INTERRUPT_ACK, DMCUB_OUTBOX0_READY_INT_ACK) struct dmub_srv_common_reg_offset { #define DMUB_SR(reg) uint32_t reg; @@ -180,6 +195,20 @@ uint32_t dmub_dcn20_get_inbox1_rptr(struct dmub_srv *dmub); void dmub_dcn20_set_inbox1_wptr(struct dmub_srv *dmub, uint32_t wptr_offset); +void dmub_dcn20_setup_out_mailbox(struct dmub_srv *dmub, + const struct dmub_region *outbox1); + +uint32_t dmub_dcn20_get_outbox1_wptr(struct dmub_srv *dmub); + +void dmub_dcn20_set_outbox1_rptr(struct dmub_srv *dmub, uint32_t rptr_offset); + +void dmub_dcn20_setup_outbox0(struct dmub_srv *dmub, + const struct dmub_region *outbox0); + +uint32_t dmub_dcn20_get_outbox0_wptr(struct dmub_srv *dmub); + +void dmub_dcn20_set_outbox0_rptr(struct dmub_srv *dmub, uint32_t rptr_offset); + bool dmub_dcn20_is_hw_init(struct dmub_srv *dmub); bool dmub_dcn20_is_supported(struct dmub_srv *dmub); @@ -200,4 +229,6 @@ union dmub_fw_boot_status dmub_dcn20_get_fw_boot_status(struct dmub_srv *dmub); bool dmub_dcn20_use_cached_inbox(struct dmub_srv *dmub); +bool dmub_dcn20_use_cached_trace_buffer(struct dmub_srv *dmub); + #endif /* _DMUB_DCN20_H_ */ diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn30.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn30.c index b4bc0df2f14a..fb11c8d39208 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn30.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn30.c @@ -180,6 +180,13 @@ void dmub_dcn30_setup_windows(struct dmub_srv *dmub, DMCUB_REGION3_CW5_TOP_ADDRESS, cw5->region.top, DMCUB_REGION3_CW5_ENABLE, 1); + REG_WRITE(DMCUB_REGION5_OFFSET, offset.u.low_part); + REG_WRITE(DMCUB_REGION5_OFFSET_HIGH, offset.u.high_part); + REG_SET_2(DMCUB_REGION5_TOP_ADDRESS, 0, + DMCUB_REGION5_TOP_ADDRESS, + cw5->region.top - cw5->region.base - 1, + DMCUB_REGION5_ENABLE, 1); + offset = cw6->offset; REG_WRITE(DMCUB_REGION3_CW6_OFFSET, offset.u.low_part); diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c index 61f64a295f06..8ba0a9e2da54 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c @@ -46,8 +46,8 @@ /* Context size. */ #define DMUB_CONTEXT_SIZE (512 * 1024) -/* Mailbox size */ -#define DMUB_MAILBOX_SIZE (DMUB_RB_SIZE) +/* Mailbox size : Ring buffers are required for both inbox and outbox */ +#define DMUB_MAILBOX_SIZE ((2 * DMUB_RB_SIZE)) /* Default state size if meta is absent. */ #define DMUB_FW_STATE_SIZE (64 * 1024) @@ -55,6 +55,7 @@ /* Default tracebuffer size if meta is absent. */ #define DMUB_TRACE_BUFFER_SIZE (64 * 1024) + /* Default scratch mem size. */ #define DMUB_SCRATCH_MEM_SIZE (256) @@ -69,6 +70,8 @@ #define DMUB_CW5_BASE (0x65000000) #define DMUB_CW6_BASE (0x66000000) +#define DMUB_REGION5_BASE (0xA0000000) + static inline uint32_t dmub_align(uint32_t val, uint32_t factor) { return (val + factor - 1) / factor * factor; @@ -157,6 +160,16 @@ static bool dmub_srv_hw_setup(struct dmub_srv *dmub, enum dmub_asic asic) funcs->enable_dmub_boot_options = dmub_dcn20_enable_dmub_boot_options; funcs->skip_dmub_panel_power_sequence = dmub_dcn20_skip_dmub_panel_power_sequence; + // Out mailbox register access functions for RN and above + funcs->setup_out_mailbox = dmub_dcn20_setup_out_mailbox; + funcs->get_outbox1_wptr = dmub_dcn20_get_outbox1_wptr; + funcs->set_outbox1_rptr = dmub_dcn20_set_outbox1_rptr; + + //outbox0 call stacks + funcs->setup_outbox0 = dmub_dcn20_setup_outbox0; + funcs->get_outbox0_wptr = dmub_dcn20_get_outbox0_wptr; + funcs->set_outbox0_rptr = dmub_dcn20_set_outbox0_rptr; + if (asic == DMUB_ASIC_DCN21) { dmub->regs = &dmub_srv_dcn21_regs; @@ -395,9 +408,9 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub, struct dmub_fb *fw_state_fb = params->fb[DMUB_WINDOW_6_FW_STATE]; struct dmub_fb *scratch_mem_fb = params->fb[DMUB_WINDOW_7_SCRATCH_MEM]; - struct dmub_rb_init_params rb_params; + struct dmub_rb_init_params rb_params, outbox0_rb_params; struct dmub_window cw0, cw1, cw2, cw3, cw4, cw5, cw6; - struct dmub_region inbox1; + struct dmub_region inbox1, outbox1, outbox0; if (!dmub->sw_init) return DMUB_STATUS_INVALID; @@ -444,13 +457,26 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub, cw4.region.base = DMUB_CW4_BASE; cw4.region.top = cw4.region.base + mail_fb->size; + /** + * Doubled the mailbox region to accomodate inbox and outbox. + * Note: Currently, currently total mailbox size is 16KB. It is split + * equally into 8KB between inbox and outbox. If this config is + * changed, then uncached base address configuration of outbox1 + * has to be updated in funcs->setup_out_mailbox. + */ inbox1.base = cw4.region.base; - inbox1.top = cw4.region.top; + inbox1.top = cw4.region.base + DMUB_RB_SIZE; + outbox1.base = inbox1.top; + outbox1.top = cw4.region.top; cw5.offset.quad_part = tracebuff_fb->gpu_addr; cw5.region.base = DMUB_CW5_BASE; cw5.region.top = cw5.region.base + tracebuff_fb->size; + outbox0.base = DMUB_REGION5_BASE + TRACE_BUFFER_ENTRY_OFFSET; + outbox0.top = outbox0.base + tracebuff_fb->size - TRACE_BUFFER_ENTRY_OFFSET; + + cw6.offset.quad_part = fw_state_fb->gpu_addr; cw6.region.base = DMUB_CW6_BASE; cw6.region.top = cw6.region.base + fw_state_fb->size; @@ -463,8 +489,13 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub, dmub->hw_funcs.setup_windows(dmub, &cw2, &cw3, &cw4, &cw5, &cw6); + if (dmub->hw_funcs.setup_outbox0) + dmub->hw_funcs.setup_outbox0(dmub, &outbox0); + if (dmub->hw_funcs.setup_mailbox) dmub->hw_funcs.setup_mailbox(dmub, &inbox1); + if (dmub->hw_funcs.setup_out_mailbox) + dmub->hw_funcs.setup_out_mailbox(dmub, &outbox1); } if (mail_fb) { @@ -474,8 +505,21 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub, rb_params.capacity = DMUB_RB_SIZE; dmub_rb_init(&dmub->inbox1_rb, &rb_params); + + // Initialize outbox1 ring buffer + rb_params.ctx = dmub; + rb_params.base_address = (void *) ((uint8_t *) (mail_fb->cpu_addr) + DMUB_RB_SIZE); + rb_params.capacity = DMUB_RB_SIZE; + dmub_rb_init(&dmub->outbox1_rb, &rb_params); + } + dmub_memset(&outbox0_rb_params, 0, sizeof(outbox0_rb_params)); + outbox0_rb_params.ctx = dmub; + outbox0_rb_params.base_address = (void *)((uint64_t)(tracebuff_fb->cpu_addr) + TRACE_BUFFER_ENTRY_OFFSET); + outbox0_rb_params.capacity = tracebuff_fb->size - dmub_align(TRACE_BUFFER_ENTRY_OFFSET, 64); + dmub_rb_init(&dmub->outbox0_rb, &outbox0_rb_params); + if (dmub->hw_funcs.reset_release) dmub->hw_funcs.reset_release(dmub); @@ -674,3 +718,33 @@ enum dmub_status dmub_srv_cmd_with_reply_data(struct dmub_srv *dmub, return status; } + +static inline bool dmub_rb_out_trace_buffer_front(struct dmub_rb *rb, + void *entry) +{ + const uint64_t *src = (const uint64_t *)(rb->base_address) + rb->rptr / sizeof(uint64_t); + uint64_t *dst = (uint64_t *)entry; + uint8_t i; + uint8_t loop_count; + + if (rb->rptr == rb->wrpt) + return false; + + loop_count = sizeof(struct dmcub_trace_buf_entry) / sizeof(uint64_t); + // copying data + for (i = 0; i < loop_count; i++) + *dst++ = *src++; + + rb->rptr += sizeof(struct dmcub_trace_buf_entry); + + rb->rptr %= rb->capacity; + + return true; +} + +bool dmub_srv_get_outbox0_msg(struct dmub_srv *dmub, struct dmcub_trace_buf_entry *entry) +{ + dmub->outbox0_rb.wrpt = dmub->hw_funcs.get_outbox0_wptr(dmub); + + return dmub_rb_out_trace_buffer_front(&dmub->outbox0_rb, (void *)entry); +} diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv_stat.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv_stat.c new file mode 100644 index 000000000000..e6f3bfab33d3 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv_stat.c @@ -0,0 +1,105 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "dmub/dmub_srv_stat.h" +#include "dmub/inc/dmub_cmd.h" + +/** + * DOC: DMUB_SRV STAT Interface + * + * These interfaces are called without acquiring DAL and DC locks. + * Hence, there is limitations on whese interfaces can access. Only + * variables exclusively defined for these interfaces can be modified. + */ + +/** + ***************************************************************************** + * Function: dmub_srv_stat_get_notification + * + * @brief + * Retrieves a dmub outbox notification, set up dmub notification + * structure with message information. Also a pending bit if queue + * is having more notifications + * + * @param [in] dmub: dmub srv structure + * @param [out] pnotify: dmub notification structure to be filled up + * + * @return + * dmub_status + ***************************************************************************** + */ +enum dmub_status dmub_srv_stat_get_notification(struct dmub_srv *dmub, + struct dmub_notification *notify) +{ + /** + * This function is called without dal and dc locks, so + * we shall not modify any dmub variables, only dmub->outbox1_rb + * is exempted as it is exclusively accessed by this function + */ + union dmub_rb_out_cmd cmd = {0}; + + if (!dmub->hw_init) { + notify->type = DMUB_NOTIFICATION_NO_DATA; + notify->pending_notification = false; + return DMUB_STATUS_INVALID; + } + + /* Get write pointer which is updated by dmub */ + dmub->outbox1_rb.wrpt = dmub->hw_funcs.get_outbox1_wptr(dmub); + + if (!dmub_rb_out_front(&dmub->outbox1_rb, &cmd)) { + notify->type = DMUB_NOTIFICATION_NO_DATA; + notify->pending_notification = false; + return DMUB_STATUS_OK; + } + + switch (cmd.cmd_common.header.type) { + case DMUB_OUT_CMD__DP_AUX_REPLY: + notify->type = DMUB_NOTIFICATION_AUX_REPLY; + notify->link_index = cmd.dp_aux_reply.control.instance; + notify->result = cmd.dp_aux_reply.control.result; + dmub_memcpy((void *)¬ify->aux_reply, + (void *)&cmd.dp_aux_reply.reply_data, sizeof(struct aux_reply_data)); + break; + default: + notify->type = DMUB_NOTIFICATION_NO_DATA; + break; + } + + /* Pop outbox1 ringbuffer and update read pointer */ + dmub_rb_pop_front(&dmub->outbox1_rb); + dmub->hw_funcs.set_outbox1_rptr(dmub, dmub->outbox1_rb.rptr); + + /** + * Notify dc whether dmub has a pending outbox message, + * this is to avoid one more call to dmub_srv_stat_get_notification + */ + if (dmub_rb_empty(&dmub->outbox1_rb)) + notify->pending_notification = false; + else + notify->pending_notification = true; + + return DMUB_STATUS_OK; +} diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c index 4762273b5bb9..e5f9d7704a63 100644 --- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c +++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c @@ -903,12 +903,31 @@ static void build_vrr_infopacket_v3(enum signal_type signal, infopacket->valid = true; } +static void build_vrr_infopacket_sdp_v1_3(enum vrr_packet_type packet_type, + struct dc_info_packet *infopacket) +{ + uint8_t idx = 0, size = 0; + + size = ((packet_type == PACKET_TYPE_FS_V1) ? 0x08 : + (packet_type == PACKET_TYPE_FS_V3) ? 0x10 : + 0x09); + + for (idx = infopacket->hb2; idx > 1; idx--) // Data Byte Count: 0x1B + infopacket->sb[idx] = infopacket->sb[idx-1]; + + infopacket->sb[1] = size; // Length + infopacket->sb[0] = (infopacket->hb3 >> 2) & 0x3F;//Version + infopacket->hb3 = (0x13 << 2); // Header,SDP 1.3 + infopacket->hb2 = 0x1D; +} + void mod_freesync_build_vrr_infopacket(struct mod_freesync *mod_freesync, const struct dc_stream_state *stream, const struct mod_vrr_params *vrr, enum vrr_packet_type packet_type, enum color_transfer_func app_tf, - struct dc_info_packet *infopacket) + struct dc_info_packet *infopacket, + bool pack_sdp_v1_3) { /* SPD info packet for FreeSync * VTEM info packet for HdmiVRR @@ -941,6 +960,12 @@ void mod_freesync_build_vrr_infopacket(struct mod_freesync *mod_freesync, default: build_vrr_infopacket_v1(stream->signal, vrr, infopacket); } + + if (true == pack_sdp_v1_3 && + true == dc_is_dp_signal(stream->signal) && + packet_type != PACKET_TYPE_VRR && + packet_type != PACKET_TYPE_VTEM) + build_vrr_infopacket_sdp_v1_3(packet_type, infopacket); } void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync, @@ -1304,4 +1329,3 @@ bool mod_freesync_is_valid_range(uint32_t min_refresh_cap_in_uhz, return true; } - diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h b/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h index c80fc10d732c..b64cd5bdc7b5 100644 --- a/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h +++ b/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h @@ -150,7 +150,8 @@ void mod_freesync_build_vrr_infopacket(struct mod_freesync *mod_freesync, const struct mod_vrr_params *vrr, enum vrr_packet_type packet_type, enum color_transfer_func app_tf, - struct dc_info_packet *infopacket); + struct dc_info_packet *infopacket, + bool pack_sdp_v1_3); void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync, const struct dc_stream_state *stream, |