diff options
Diffstat (limited to 'drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c')
| -rw-r--r-- | drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c | 605 |
1 files changed, 487 insertions, 118 deletions
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c index 8ce7586e2ddf..c39f1908ea65 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved. * Copyright (c) 2014-2021 The Linux Foundation. All rights reserved. * Copyright (C) 2013 Red Hat * Author: Rob Clark <robdclark@gmail.com> @@ -51,17 +51,6 @@ static struct dpu_kms *_dpu_crtc_get_kms(struct drm_crtc *crtc) return to_dpu_kms(priv->kms); } -static void dpu_crtc_destroy(struct drm_crtc *crtc) -{ - struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); - - if (!crtc) - return; - - drm_crtc_cleanup(crtc); - kfree(dpu_crtc); -} - static struct drm_encoder *get_encoder_from_crtc(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; @@ -125,7 +114,7 @@ static void dpu_crtc_setup_lm_misr(struct dpu_crtc_state *crtc_state) continue; /* Calculate MISR over 1 frame */ - m->hw_lm->ops.setup_misr(m->hw_lm, true, 1); + m->hw_lm->ops.setup_misr(m->hw_lm); } } @@ -211,7 +200,7 @@ static int dpu_crtc_get_lm_crc(struct drm_crtc *crtc, struct dpu_crtc_state *crtc_state) { struct dpu_crtc_mixer *m; - u32 crcs[CRTC_DUAL_MIXERS]; + u32 crcs[CRTC_QUAD_MIXERS]; int rc = 0; int i; @@ -331,14 +320,22 @@ static bool dpu_crtc_get_scanout_position(struct drm_crtc *crtc, } static void _dpu_crtc_setup_blend_cfg(struct dpu_crtc_mixer *mixer, - struct dpu_plane_state *pstate, struct dpu_format *format) + struct dpu_plane_state *pstate, + const struct msm_format *format, + const struct dpu_mdss_version *mdss_ver) { struct dpu_hw_mixer *lm = mixer->hw_lm; - uint32_t blend_op; - uint32_t fg_alpha, bg_alpha; + u32 blend_op; + u32 fg_alpha, bg_alpha, max_alpha; - fg_alpha = pstate->base.alpha >> 8; - bg_alpha = 0xff - fg_alpha; + if (mdss_ver->core_major_ver < 12) { + max_alpha = 0xff; + fg_alpha = pstate->base.alpha >> 8; + } else { + max_alpha = 0x3ff; + fg_alpha = pstate->base.alpha >> 6; + } + bg_alpha = max_alpha - fg_alpha; /* default to opaque blending */ if (pstate->base.pixel_blend_mode == DRM_MODE_BLEND_PIXEL_NONE || @@ -348,7 +345,7 @@ static void _dpu_crtc_setup_blend_cfg(struct dpu_crtc_mixer *mixer, } else if (pstate->base.pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) { blend_op = DPU_BLEND_FG_ALPHA_FG_CONST | DPU_BLEND_BG_ALPHA_FG_PIXEL; - if (fg_alpha != 0xff) { + if (fg_alpha != max_alpha) { bg_alpha = fg_alpha; blend_op |= DPU_BLEND_BG_MOD_ALPHA | DPU_BLEND_BG_INV_MOD_ALPHA; @@ -359,7 +356,7 @@ static void _dpu_crtc_setup_blend_cfg(struct dpu_crtc_mixer *mixer, /* coverage blending */ blend_op = DPU_BLEND_FG_ALPHA_FG_PIXEL | DPU_BLEND_BG_ALPHA_FG_PIXEL; - if (fg_alpha != 0xff) { + if (fg_alpha != max_alpha) { bg_alpha = fg_alpha; blend_op |= DPU_BLEND_FG_MOD_ALPHA | DPU_BLEND_FG_INV_MOD_ALPHA | @@ -374,17 +371,16 @@ static void _dpu_crtc_setup_blend_cfg(struct dpu_crtc_mixer *mixer, fg_alpha, bg_alpha, blend_op); DRM_DEBUG_ATOMIC("format:%p4cc, alpha_en:%u blend_op:0x%x\n", - &format->base.pixel_format, format->alpha_enable, blend_op); + &format->pixel_format, format->alpha_enable, blend_op); } static void _dpu_crtc_program_lm_output_roi(struct drm_crtc *crtc) { struct dpu_crtc_state *crtc_state; - int lm_idx, lm_horiz_position; + int lm_idx; crtc_state = to_dpu_crtc_state(crtc->state); - lm_horiz_position = 0; for (lm_idx = 0; lm_idx < crtc_state->num_mixers; lm_idx++) { const struct drm_rect *lm_roi = &crtc_state->lm_bounds[lm_idx]; struct dpu_hw_mixer *hw_lm = crtc_state->mixers[lm_idx].hw_lm; @@ -395,7 +391,7 @@ static void _dpu_crtc_program_lm_output_roi(struct drm_crtc *crtc) cfg.out_width = drm_rect_width(lm_roi); cfg.out_height = drm_rect_height(lm_roi); - cfg.right_mixer = lm_horiz_position++; + cfg.right_mixer = lm_idx & 0x1; cfg.flags = 0; hw_lm->ops.setup_mixer_out(hw_lm, &cfg); } @@ -404,16 +400,16 @@ static void _dpu_crtc_program_lm_output_roi(struct drm_crtc *crtc) static void _dpu_crtc_blend_setup_pipe(struct drm_crtc *crtc, struct drm_plane *plane, struct dpu_crtc_mixer *mixer, - u32 num_mixers, + u32 lms_in_pair, enum dpu_stage stage, - struct dpu_format *format, + const struct msm_format *format, uint64_t modifier, struct dpu_sw_pipe *pipe, unsigned int stage_idx, struct dpu_hw_stage_cfg *stage_cfg ) { - uint32_t lm_idx; + u32 lm_idx; enum dpu_sspp sspp_idx; struct drm_plane_state *state; @@ -423,7 +419,7 @@ static void _dpu_crtc_blend_setup_pipe(struct drm_crtc *crtc, trace_dpu_crtc_setup_mixer(DRMID(crtc), DRMID(plane), state, to_dpu_plane_state(state), stage_idx, - format->base.pixel_format, + format->pixel_format, pipe, modifier); DRM_DEBUG_ATOMIC("crtc %d stage:%d - plane %d sspp %d fb %d multirect_idx %d\n", @@ -438,7 +434,7 @@ static void _dpu_crtc_blend_setup_pipe(struct drm_crtc *crtc, stage_cfg->multirect_index[stage][stage_idx] = pipe->multirect_index; /* blend config update */ - for (lm_idx = 0; lm_idx < num_mixers; lm_idx++) + for (lm_idx = 0; lm_idx < lms_in_pair; lm_idx++) mixer[lm_idx].lm_ctl->ops.update_pending_flush_sspp(mixer[lm_idx].lm_ctl, sspp_idx); } @@ -451,14 +447,15 @@ static void _dpu_crtc_blend_setup_mixer(struct drm_crtc *crtc, struct drm_plane_state *state; struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state); struct dpu_plane_state *pstate = NULL; - struct dpu_format *format; + const struct msm_format *format; struct dpu_hw_ctl *ctl = mixer->lm_ctl; - - uint32_t lm_idx; + u32 lm_idx, stage, i, pipe_idx, head_pipe_in_stage, lms_in_pair; bool bg_alpha_enable = false; - DECLARE_BITMAP(fetch_active, SSPP_MAX); + DECLARE_BITMAP(active_fetch, SSPP_MAX); + DECLARE_BITMAP(active_pipes, SSPP_MAX); - memset(fetch_active, 0, sizeof(fetch_active)); + memset(active_fetch, 0, sizeof(active_fetch)); + memset(active_pipes, 0, sizeof(active_pipes)); drm_atomic_crtc_for_each_plane(plane, crtc) { state = plane->state; if (!state) @@ -470,30 +467,36 @@ static void _dpu_crtc_blend_setup_mixer(struct drm_crtc *crtc, pstate = to_dpu_plane_state(state); fb = state->fb; - format = to_dpu_format(msm_framebuffer_format(pstate->base.fb)); + format = msm_framebuffer_format(pstate->base.fb); if (pstate->stage == DPU_STAGE_BASE && format->alpha_enable) bg_alpha_enable = true; - set_bit(pstate->pipe.sspp->idx, fetch_active); - _dpu_crtc_blend_setup_pipe(crtc, plane, - mixer, cstate->num_mixers, - pstate->stage, - format, fb ? fb->modifier : 0, - &pstate->pipe, 0, stage_cfg); - - if (pstate->r_pipe.sspp) { - set_bit(pstate->r_pipe.sspp->idx, fetch_active); - _dpu_crtc_blend_setup_pipe(crtc, plane, - mixer, cstate->num_mixers, - pstate->stage, - format, fb ? fb->modifier : 0, - &pstate->r_pipe, 1, stage_cfg); + /* loop pipe per mixer pair with config in stage structure */ + for (stage = 0; stage < STAGES_PER_PLANE; stage++) { + head_pipe_in_stage = stage * PIPES_PER_STAGE; + for (i = 0; i < PIPES_PER_STAGE; i++) { + pipe_idx = i + head_pipe_in_stage; + if (!pstate->pipe[pipe_idx].sspp) + continue; + lms_in_pair = min(cstate->num_mixers - (stage * PIPES_PER_STAGE), + PIPES_PER_STAGE); + set_bit(pstate->pipe[pipe_idx].sspp->idx, active_fetch); + set_bit(pstate->pipe[pipe_idx].sspp->idx, active_pipes); + _dpu_crtc_blend_setup_pipe(crtc, plane, + &mixer[head_pipe_in_stage], + lms_in_pair, + pstate->stage, + format, fb ? fb->modifier : 0, + &pstate->pipe[pipe_idx], i, + &stage_cfg[stage]); + } } /* blend config update */ for (lm_idx = 0; lm_idx < cstate->num_mixers; lm_idx++) { - _dpu_crtc_setup_blend_cfg(mixer + lm_idx, pstate, format); + _dpu_crtc_setup_blend_cfg(mixer + lm_idx, pstate, format, + ctl->mdss_ver); if (bg_alpha_enable && !format->alpha_enable) mixer[lm_idx].mixer_op_mode = 0; @@ -503,8 +506,11 @@ static void _dpu_crtc_blend_setup_mixer(struct drm_crtc *crtc, } } + if (ctl->ops.set_active_fetch_pipes) + ctl->ops.set_active_fetch_pipes(ctl, active_fetch); + if (ctl->ops.set_active_pipes) - ctl->ops.set_active_pipes(ctl, fetch_active); + ctl->ops.set_active_pipes(ctl, active_pipes); _dpu_crtc_program_lm_output_roi(crtc); } @@ -520,7 +526,8 @@ static void _dpu_crtc_blend_setup(struct drm_crtc *crtc) struct dpu_crtc_mixer *mixer = cstate->mixers; struct dpu_hw_ctl *ctl; struct dpu_hw_mixer *lm; - struct dpu_hw_stage_cfg stage_cfg; + struct dpu_hw_stage_cfg stage_cfg[STAGES_PER_PLANE]; + DECLARE_BITMAP(active_lms, LM_MAX); int i; DRM_DEBUG_ATOMIC("%s\n", dpu_crtc->name); @@ -530,12 +537,20 @@ static void _dpu_crtc_blend_setup(struct drm_crtc *crtc) if (mixer[i].lm_ctl->ops.clear_all_blendstages) mixer[i].lm_ctl->ops.clear_all_blendstages( mixer[i].lm_ctl); + if (mixer[i].lm_ctl->ops.set_active_fetch_pipes) + mixer[i].lm_ctl->ops.set_active_fetch_pipes(mixer[i].lm_ctl, NULL); + if (mixer[i].lm_ctl->ops.set_active_pipes) + mixer[i].lm_ctl->ops.set_active_pipes(mixer[i].lm_ctl, NULL); + + if (mixer[i].hw_lm->ops.clear_all_blendstages) + mixer[i].hw_lm->ops.clear_all_blendstages(mixer[i].hw_lm); } /* initialize stage cfg */ - memset(&stage_cfg, 0, sizeof(struct dpu_hw_stage_cfg)); + memset(&stage_cfg, 0, sizeof(stage_cfg)); + memset(active_lms, 0, sizeof(active_lms)); - _dpu_crtc_blend_setup_mixer(crtc, dpu_crtc, mixer, &stage_cfg); + _dpu_crtc_blend_setup_mixer(crtc, dpu_crtc, mixer, stage_cfg); for (i = 0; i < cstate->num_mixers; i++) { ctl = mixer[i].lm_ctl; @@ -547,13 +562,26 @@ static void _dpu_crtc_blend_setup(struct drm_crtc *crtc) ctl->ops.update_pending_flush_mixer(ctl, mixer[i].hw_lm->idx); + set_bit(lm->idx, active_lms); + if (ctl->ops.set_active_lms) + ctl->ops.set_active_lms(ctl, active_lms); + DRM_DEBUG_ATOMIC("lm %d, op_mode 0x%X, ctl %d\n", mixer[i].hw_lm->idx - LM_0, mixer[i].mixer_op_mode, ctl->idx - CTL_0); - ctl->ops.setup_blendstage(ctl, mixer[i].hw_lm->idx, - &stage_cfg); + /* + * call dpu_hw_ctl_setup_blendstage() to blend layers per stage cfg. + * stage data is shared between PIPES_PER_STAGE pipes. + */ + if (ctl->ops.setup_blendstage) + ctl->ops.setup_blendstage(ctl, mixer[i].hw_lm->idx, + &stage_cfg[i / PIPES_PER_STAGE]); + + if (lm->ops.setup_blendstage) + lm->ops.setup_blendstage(lm, mixer[i].hw_lm->idx, + &stage_cfg[i / PIPES_PER_STAGE]); } } @@ -574,7 +602,7 @@ static void _dpu_crtc_complete_flip(struct drm_crtc *crtc) spin_lock_irqsave(&dev->event_lock, flags); if (dpu_crtc->event) { - DRM_DEBUG_VBL("%s: send event: %pK\n", dpu_crtc->name, + DRM_DEBUG_VBL("%s: send event: %p\n", dpu_crtc->name, dpu_crtc->event); trace_dpu_crtc_complete_flip(DRMID(crtc)); drm_crtc_send_vblank_event(crtc, dpu_crtc->event); @@ -583,6 +611,10 @@ static void _dpu_crtc_complete_flip(struct drm_crtc *crtc) spin_unlock_irqrestore(&dev->event_lock, flags); } +/** + * dpu_crtc_get_intf_mode - get interface mode of the given crtc + * @crtc: Pointert to crtc + */ enum dpu_intf_mode dpu_crtc_get_intf_mode(struct drm_crtc *crtc) { struct drm_encoder *encoder; @@ -605,6 +637,10 @@ enum dpu_intf_mode dpu_crtc_get_intf_mode(struct drm_crtc *crtc) return INTF_MODE_NONE; } +/** + * dpu_crtc_vblank_callback - called on vblank irq, issues completion events + * @crtc: Pointer to drm crtc object + */ void dpu_crtc_vblank_callback(struct drm_crtc *crtc) { struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); @@ -669,18 +705,18 @@ static void dpu_crtc_frame_event_work(struct kthread_work *work) DPU_ATRACE_END("crtc_frame_event"); } -/* - * dpu_crtc_frame_event_cb - crtc frame event callback API. CRTC module - * registers this API to encoder for all frame event callbacks like - * frame_error, frame_done, idle_timeout, etc. Encoder may call different events - * from different context - IRQ, user thread, commit_thread, etc. Each event - * should be carefully reviewed and should be processed in proper task context - * to avoid schedulin delay or properly manage the irq context's bottom half - * processing. +/** + * dpu_crtc_frame_event_cb - crtc frame event callback API + * @crtc: Pointer to crtc + * @event: Event to process + * + * Encoder may call this for different events from different context - IRQ, + * user thread, commit_thread, etc. Each event should be carefully reviewed and + * should be processed in proper task context to avoid schedulin delay or + * properly manage the irq context's bottom half processing. */ -static void dpu_crtc_frame_event_cb(void *data, u32 event) +void dpu_crtc_frame_event_cb(struct drm_crtc *crtc, u32 event) { - struct drm_crtc *crtc = (struct drm_crtc *)data; struct dpu_crtc *dpu_crtc; struct msm_drm_private *priv; struct dpu_crtc_frame_event *fevent; @@ -712,9 +748,13 @@ static void dpu_crtc_frame_event_cb(void *data, u32 event) fevent->event = event; fevent->crtc = crtc; fevent->ts = ktime_get(); - kthread_queue_work(priv->event_thread[crtc_id].worker, &fevent->work); + kthread_queue_work(priv->kms->event_thread[crtc_id].worker, &fevent->work); } +/** + * dpu_crtc_complete_commit - callback signalling completion of current commit + * @crtc: Pointer to drm crtc object + */ void dpu_crtc_complete_commit(struct drm_crtc *crtc) { trace_dpu_crtc_complete_commit(DRMID(crtc)); @@ -722,14 +762,22 @@ void dpu_crtc_complete_commit(struct drm_crtc *crtc) _dpu_crtc_complete_flip(crtc); } -static void _dpu_crtc_setup_lm_bounds(struct drm_crtc *crtc, +static int _dpu_crtc_check_and_setup_lm_bounds(struct drm_crtc *crtc, struct drm_crtc_state *state) { struct dpu_crtc_state *cstate = to_dpu_crtc_state(state); struct drm_display_mode *adj_mode = &state->adjusted_mode; u32 crtc_split_width = adj_mode->hdisplay / cstate->num_mixers; + struct dpu_kms *dpu_kms = _dpu_crtc_get_kms(crtc); int i; + /* if we cannot merge 2 LMs (no 3d mux) better to fail earlier + * before even checking the width after the split + */ + if (!dpu_kms->catalog->caps->has_3d_merge && + adj_mode->hdisplay > dpu_kms->catalog->caps->max_mixer_width) + return -E2BIG; + for (i = 0; i < cstate->num_mixers; i++) { struct drm_rect *r = &cstate->lm_bounds[i]; r->x1 = crtc_split_width * i; @@ -738,7 +786,12 @@ static void _dpu_crtc_setup_lm_bounds(struct drm_crtc *crtc, r->y2 = adj_mode->vdisplay; trace_dpu_crtc_setup_lm_bounds(DRMID(crtc), i, r); + + if (drm_rect_width(r) > dpu_kms->catalog->caps->max_mixer_width) + return -E2BIG; } + + return 0; } static void _dpu_crtc_get_pcc_coeff(struct drm_crtc_state *state, @@ -814,7 +867,7 @@ static void dpu_crtc_atomic_begin(struct drm_crtc *crtc, DRM_DEBUG_ATOMIC("crtc%d\n", crtc->base.id); - _dpu_crtc_setup_lm_bounds(crtc, crtc->state); + _dpu_crtc_check_and_setup_lm_bounds(crtc, crtc->state); /* encoder will trigger pending mask now */ drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask) @@ -864,7 +917,7 @@ static void dpu_crtc_atomic_flush(struct drm_crtc *crtc, dev = crtc->dev; priv = dev->dev_private; - if (crtc->index >= ARRAY_SIZE(priv->event_thread)) { + if (crtc->index >= ARRAY_SIZE(priv->kms->event_thread)) { DPU_ERROR("invalid crtc index[%d]\n", crtc->index); return; } @@ -939,6 +992,49 @@ static int _dpu_crtc_wait_for_frame_done(struct drm_crtc *crtc) return rc; } +static int dpu_crtc_kickoff_clone_mode(struct drm_crtc *crtc) +{ + struct drm_encoder *encoder; + struct drm_encoder *rt_encoder = NULL, *wb_encoder = NULL; + struct dpu_kms *dpu_kms = _dpu_crtc_get_kms(crtc); + + /* Find encoder for real time display */ + drm_for_each_encoder_mask(encoder, crtc->dev, + crtc->state->encoder_mask) { + if (encoder->encoder_type == DRM_MODE_ENCODER_VIRTUAL) + wb_encoder = encoder; + else + rt_encoder = encoder; + } + + if (!rt_encoder || !wb_encoder) { + DRM_DEBUG_ATOMIC("real time or wb encoder not found\n"); + return -EINVAL; + } + + dpu_encoder_prepare_for_kickoff(wb_encoder); + dpu_encoder_prepare_for_kickoff(rt_encoder); + + dpu_vbif_clear_errors(dpu_kms); + + /* + * Kickoff real time encoder last as it's the encoder that + * will do the flush + */ + dpu_encoder_kickoff(wb_encoder); + dpu_encoder_kickoff(rt_encoder); + + /* Don't start frame done timers until the kickoffs have finished */ + dpu_encoder_start_frame_done_timer(wb_encoder); + dpu_encoder_start_frame_done_timer(rt_encoder); + + return 0; +} + +/** + * dpu_crtc_commit_kickoff - trigger kickoff of the commit for this crtc + * @crtc: Pointer to drm crtc object + */ void dpu_crtc_commit_kickoff(struct drm_crtc *crtc) { struct drm_encoder *encoder; @@ -963,13 +1059,27 @@ void dpu_crtc_commit_kickoff(struct drm_crtc *crtc) goto end; } } - /* - * Encoder will flush/start now, unless it has a tx pending. If so, it - * may delay and flush at an irq event (e.g. ppdone) - */ - drm_for_each_encoder_mask(encoder, crtc->dev, - crtc->state->encoder_mask) - dpu_encoder_prepare_for_kickoff(encoder); + + if (drm_crtc_in_clone_mode(crtc->state)) { + if (dpu_crtc_kickoff_clone_mode(crtc)) + goto end; + } else { + /* + * Encoder will flush/start now, unless it has a tx pending. + * If so, it may delay and flush at an irq event (e.g. ppdone) + */ + drm_for_each_encoder_mask(encoder, crtc->dev, + crtc->state->encoder_mask) + dpu_encoder_prepare_for_kickoff(encoder); + + dpu_vbif_clear_errors(dpu_kms); + + drm_for_each_encoder_mask(encoder, crtc->dev, + crtc->state->encoder_mask) { + dpu_encoder_kickoff(encoder); + dpu_encoder_start_frame_done_timer(encoder); + } + } if (atomic_inc_return(&dpu_crtc->frame_pending) == 1) { /* acquire bandwidth and other resources */ @@ -979,11 +1089,6 @@ void dpu_crtc_commit_kickoff(struct drm_crtc *crtc) dpu_crtc->play_count++; - dpu_vbif_clear_errors(dpu_kms); - - drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask) - dpu_encoder_kickoff(encoder); - reinit_completion(&dpu_crtc->frame_done_comp); end: @@ -1102,12 +1207,6 @@ static void dpu_crtc_disable(struct drm_crtc *crtc, dpu_core_perf_crtc_update(crtc, 0); - drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask) - dpu_encoder_register_frame_event_callback(encoder, NULL, NULL); - - memset(cstate->mixers, 0, sizeof(cstate->mixers)); - cstate->num_mixers = 0; - /* disable clk & bw control until clk & bw properties are set */ cstate->bw_control = false; cstate->bw_split_vote = false; @@ -1143,8 +1242,6 @@ static void dpu_crtc_enable(struct drm_crtc *crtc, */ if (dpu_encoder_get_intf_mode(encoder) == INTF_MODE_VIDEO) request_bandwidth = true; - dpu_encoder_register_frame_event_callback(encoder, - dpu_crtc_frame_event_cb, (void *)crtc); } if (request_bandwidth) @@ -1179,6 +1276,205 @@ static bool dpu_crtc_needs_dirtyfb(struct drm_crtc_state *cstate) return false; } +static int dpu_crtc_reassign_planes(struct drm_crtc *crtc, struct drm_crtc_state *crtc_state) +{ + int total_planes = crtc->dev->mode_config.num_total_plane; + struct drm_atomic_state *state = crtc_state->state; + struct dpu_global_state *global_state; + struct drm_plane_state **states; + struct drm_plane *plane; + int ret; + + global_state = dpu_kms_get_global_state(crtc_state->state); + if (IS_ERR(global_state)) + return PTR_ERR(global_state); + + dpu_rm_release_all_sspp(global_state, crtc); + + if (!crtc_state->enable) + return 0; + + states = kcalloc(total_planes, sizeof(*states), GFP_KERNEL); + if (!states) + return -ENOMEM; + + drm_atomic_crtc_state_for_each_plane(plane, crtc_state) { + struct drm_plane_state *plane_state = + drm_atomic_get_plane_state(state, plane); + + if (IS_ERR(plane_state)) { + ret = PTR_ERR(plane_state); + goto done; + } + + states[plane_state->normalized_zpos] = plane_state; + } + + ret = dpu_assign_plane_resources(global_state, state, crtc, states, total_planes); + +done: + kfree(states); + return ret; +} + +#define MAX_CHANNELS_PER_CRTC PIPES_PER_PLANE +#define MAX_HDISPLAY_SPLIT 1080 + +static struct msm_display_topology dpu_crtc_get_topology( + struct drm_crtc *crtc, + struct dpu_kms *dpu_kms, + struct drm_crtc_state *crtc_state) +{ + struct drm_display_mode *mode = &crtc_state->adjusted_mode; + struct msm_display_topology topology = {0}; + struct drm_encoder *drm_enc; + u32 num_rt_intf; + + drm_for_each_encoder_mask(drm_enc, crtc->dev, crtc_state->encoder_mask) + dpu_encoder_update_topology(drm_enc, &topology, crtc_state->state, + &crtc_state->adjusted_mode); + + topology.cwb_enabled = drm_crtc_in_clone_mode(crtc_state); + + /* + * Datapath topology selection + * + * Dual display + * 2 LM, 2 INTF ( Split display using 2 interfaces) + * + * If DSC is enabled, try to use 4:4:2 topology if there is enough + * resource. Otherwise, use 2:2:2 topology. + * + * Single display + * 1 LM, 1 INTF + * 2 LM, 1 INTF (stream merge to support high resolution interfaces) + * + * If DSC is enabled, use 2:2:1 topology + * + * Add dspps to the reservation requirements if ctm is requested + * + * Only hardcode num_lm to 2 for cases where num_intf == 2 and CWB is not + * enabled. This is because in cases where CWB is enabled, num_intf will + * count both the WB and real-time phys encoders. + * + * For non-DSC CWB usecases, have the num_lm be decided by the + * (mode->hdisplay > MAX_HDISPLAY_SPLIT) check. + */ + + num_rt_intf = topology.num_intf; + if (topology.cwb_enabled) + num_rt_intf--; + + if (topology.num_dsc) { + if (dpu_kms->catalog->dsc_count >= num_rt_intf * 2) + topology.num_dsc = num_rt_intf * 2; + else + topology.num_dsc = num_rt_intf; + topology.num_lm = topology.num_dsc; + } else if (num_rt_intf == 2) { + topology.num_lm = 2; + } else if (dpu_kms->catalog->caps->has_3d_merge) { + topology.num_lm = (mode->hdisplay > MAX_HDISPLAY_SPLIT) ? 2 : 1; + } else { + topology.num_lm = 1; + } + + if (crtc_state->ctm) + topology.num_dspp = topology.num_lm; + + return topology; +} + +static int dpu_crtc_assign_resources(struct drm_crtc *crtc, + struct drm_crtc_state *crtc_state) +{ + struct dpu_hw_blk *hw_ctl[MAX_CHANNELS_PER_CRTC]; + struct dpu_hw_blk *hw_lm[MAX_CHANNELS_PER_CRTC]; + struct dpu_hw_blk *hw_dspp[MAX_CHANNELS_PER_CRTC]; + int i, num_lm, num_ctl, num_dspp; + struct dpu_kms *dpu_kms = _dpu_crtc_get_kms(crtc); + struct dpu_global_state *global_state; + struct dpu_crtc_state *cstate; + struct msm_display_topology topology; + int ret; + + /* + * Release and Allocate resources on every modeset + */ + global_state = dpu_kms_get_global_state(crtc_state->state); + if (IS_ERR(global_state)) + return PTR_ERR(global_state); + + dpu_rm_release(global_state, crtc); + + if (!crtc_state->enable) + return 0; + + topology = dpu_crtc_get_topology(crtc, dpu_kms, crtc_state); + ret = dpu_rm_reserve(&dpu_kms->rm, global_state, + crtc_state->crtc, &topology); + if (ret) + return ret; + + cstate = to_dpu_crtc_state(crtc_state); + + num_ctl = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state, + crtc_state->crtc, + DPU_HW_BLK_CTL, hw_ctl, + ARRAY_SIZE(hw_ctl)); + num_lm = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state, + crtc_state->crtc, + DPU_HW_BLK_LM, hw_lm, + ARRAY_SIZE(hw_lm)); + num_dspp = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state, + crtc_state->crtc, + DPU_HW_BLK_DSPP, hw_dspp, + ARRAY_SIZE(hw_dspp)); + + for (i = 0; i < num_lm; i++) { + int ctl_idx = (i < num_ctl) ? i : (num_ctl-1); + + cstate->mixers[i].hw_lm = to_dpu_hw_mixer(hw_lm[i]); + cstate->mixers[i].lm_ctl = to_dpu_hw_ctl(hw_ctl[ctl_idx]); + if (i < num_dspp) + cstate->mixers[i].hw_dspp = to_dpu_hw_dspp(hw_dspp[i]); + } + + cstate->num_mixers = num_lm; + + return 0; +} + +/** + * dpu_crtc_check_mode_changed: check if full modeset is required + * @old_crtc_state: Previous CRTC state + * @new_crtc_state: Corresponding CRTC state to be checked + * + * Check if the changes in the object properties demand full mode set. + */ +int dpu_crtc_check_mode_changed(struct drm_crtc_state *old_crtc_state, + struct drm_crtc_state *new_crtc_state) +{ + struct drm_encoder *drm_enc; + struct drm_crtc *crtc = new_crtc_state->crtc; + bool clone_mode_enabled = drm_crtc_in_clone_mode(old_crtc_state); + bool clone_mode_requested = drm_crtc_in_clone_mode(new_crtc_state); + + DRM_DEBUG_ATOMIC("%d\n", crtc->base.id); + + /* there might be cases where encoder needs a modeset too */ + drm_for_each_encoder_mask(drm_enc, crtc->dev, new_crtc_state->encoder_mask) { + if (dpu_encoder_needs_modeset(drm_enc, new_crtc_state->state)) + new_crtc_state->mode_changed = true; + } + + if ((clone_mode_requested && !clone_mode_enabled) || + (!clone_mode_requested && clone_mode_enabled)) + new_crtc_state->mode_changed = true; + + return 0; +} + static int dpu_crtc_atomic_check(struct drm_crtc *crtc, struct drm_atomic_state *state) { @@ -1194,6 +1490,20 @@ static int dpu_crtc_atomic_check(struct drm_crtc *crtc, bool needs_dirtyfb = dpu_crtc_needs_dirtyfb(crtc_state); + /* don't reallocate resources if only ACTIVE has beeen changed */ + if (crtc_state->mode_changed || crtc_state->connectors_changed) { + rc = dpu_crtc_assign_resources(crtc, crtc_state); + if (rc < 0) + return rc; + } + + if (dpu_use_virtual_planes && + (crtc_state->planes_changed || crtc_state->zpos_changed)) { + rc = dpu_crtc_reassign_planes(crtc, crtc_state); + if (rc < 0) + return rc; + } + if (!crtc_state->enable || !drm_atomic_crtc_effectively_active(crtc_state)) { DRM_DEBUG_ATOMIC("crtc%d -> enable %d, active %d, skip atomic_check\n", crtc->base.id, crtc_state->enable, @@ -1204,12 +1514,11 @@ static int dpu_crtc_atomic_check(struct drm_crtc *crtc, DRM_DEBUG_ATOMIC("%s: check\n", dpu_crtc->name); - /* force a full mode set if active state changed */ - if (crtc_state->active_changed) - crtc_state->mode_changed = true; - - if (cstate->num_mixers) - _dpu_crtc_setup_lm_bounds(crtc, crtc_state); + if (cstate->num_mixers) { + rc = _dpu_crtc_check_and_setup_lm_bounds(crtc, crtc_state); + if (rc) + return rc; + } /* FIXME: move this to dpu_plane_atomic_check? */ drm_atomic_crtc_state_for_each_plane_state(plane, pstate, crtc_state) { @@ -1240,6 +1549,45 @@ static int dpu_crtc_atomic_check(struct drm_crtc *crtc, return 0; } +static enum drm_mode_status dpu_crtc_mode_valid(struct drm_crtc *crtc, + const struct drm_display_mode *mode) +{ + struct dpu_kms *dpu_kms = _dpu_crtc_get_kms(crtc); + u64 adjusted_mode_clk; + + /* if there is no 3d_mux block we cannot merge LMs so we cannot + * split the large layer into 2 LMs, filter out such modes + */ + if (!dpu_kms->catalog->caps->has_3d_merge && + mode->hdisplay > dpu_kms->catalog->caps->max_mixer_width) + return MODE_BAD_HVALUE; + + adjusted_mode_clk = dpu_core_perf_adjusted_mode_clk(mode->clock, + dpu_kms->perf.perf_cfg); + + if (dpu_kms->catalog->caps->has_3d_merge) + adjusted_mode_clk /= 2; + + /* + * The given mode, adjusted for the perf clock factor, should not exceed + * the max core clock rate + */ + if (dpu_kms->perf.max_core_clk_rate < adjusted_mode_clk * 1000) + return MODE_CLOCK_HIGH; + + /* + * max crtc width is equal to the max mixer width * 2 and max height is 4K + */ + return drm_mode_validate_size(mode, + 2 * dpu_kms->catalog->caps->max_mixer_width, + 4096); +} + +/** + * dpu_crtc_vblank - enable or disable vblanks for this crtc + * @crtc: Pointer to drm crtc object + * @en: true to enable vblanks, false to disable + */ int dpu_crtc_vblank(struct drm_crtc *crtc, bool en) { struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); @@ -1272,6 +1620,17 @@ int dpu_crtc_vblank(struct drm_crtc *crtc, bool en) return 0; } +/** + * dpu_crtc_get_num_lm - Get mixer number in this CRTC pipeline + * @state: Pointer to drm crtc state object + */ +unsigned int dpu_crtc_get_num_lm(const struct drm_crtc_state *state) +{ + struct dpu_crtc_state *cstate = to_dpu_crtc_state(state); + + return cstate->num_mixers; +} + #ifdef CONFIG_DEBUG_FS static int _dpu_debugfs_status_show(struct seq_file *s, void *data) { @@ -1354,15 +1713,15 @@ static int _dpu_debugfs_status_show(struct seq_file *s, void *data) seq_printf(s, "\tdst x:%4d dst_y:%4d dst_w:%4d dst_h:%4d\n", state->crtc_x, state->crtc_y, state->crtc_w, state->crtc_h); - seq_printf(s, "\tsspp[0]:%s\n", - pstate->pipe.sspp->cap->name); - seq_printf(s, "\tmultirect[0]: mode: %d index: %d\n", - pstate->pipe.multirect_mode, pstate->pipe.multirect_index); - if (pstate->r_pipe.sspp) { - seq_printf(s, "\tsspp[1]:%s\n", - pstate->r_pipe.sspp->cap->name); - seq_printf(s, "\tmultirect[1]: mode: %d index: %d\n", - pstate->r_pipe.multirect_mode, pstate->r_pipe.multirect_index); + + for (i = 0; i < PIPES_PER_PLANE; i++) { + if (!pstate->pipe[i].sspp) + continue; + seq_printf(s, "\tsspp[%d]:%s\n", + i, pstate->pipe[i].sspp->cap->name); + seq_printf(s, "\tmultirect[%d]: mode: %d index: %d\n", + i, pstate->pipe[i].multirect_mode, + pstate->pipe[i].multirect_index); } seq_puts(s, "\n"); @@ -1399,8 +1758,9 @@ static int dpu_crtc_debugfs_state_show(struct seq_file *s, void *v) seq_printf(s, "intf_mode: %d\n", dpu_crtc_get_intf_mode(crtc)); seq_printf(s, "core_clk_rate: %llu\n", dpu_crtc->cur_perf.core_clk_rate); - seq_printf(s, "bw_ctl: %llu\n", dpu_crtc->cur_perf.bw_ctl); - seq_printf(s, "max_per_pipe_ib: %llu\n", + seq_printf(s, "bw_ctl: %uk\n", + (u32)DIV_ROUND_UP_ULL(dpu_crtc->cur_perf.bw_ctl, 1000)); + seq_printf(s, "max_per_pipe_ib: %u\n", dpu_crtc->cur_perf.max_per_pipe_ib); return 0; @@ -1435,7 +1795,6 @@ static int dpu_crtc_late_register(struct drm_crtc *crtc) static const struct drm_crtc_funcs dpu_crtc_funcs = { .set_config = drm_atomic_helper_set_config, - .destroy = dpu_crtc_destroy, .page_flip = drm_atomic_helper_page_flip, .reset = dpu_crtc_reset, .atomic_duplicate_state = dpu_crtc_duplicate_state, @@ -1456,22 +1815,35 @@ static const struct drm_crtc_helper_funcs dpu_crtc_helper_funcs = { .atomic_check = dpu_crtc_atomic_check, .atomic_begin = dpu_crtc_atomic_begin, .atomic_flush = dpu_crtc_atomic_flush, + .mode_valid = dpu_crtc_mode_valid, .get_scanout_position = dpu_crtc_get_scanout_position, }; -/* initialize crtc */ +/** + * dpu_crtc_init - create a new crtc object + * @dev: dpu device + * @plane: base plane + * @cursor: cursor plane + * @return: new crtc object or error + * + * initialize CRTC + */ struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane, struct drm_plane *cursor) { struct msm_drm_private *priv = dev->dev_private; struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms); struct drm_crtc *crtc = NULL; - struct dpu_crtc *dpu_crtc = NULL; + struct dpu_crtc *dpu_crtc; int i, ret; - dpu_crtc = kzalloc(sizeof(*dpu_crtc), GFP_KERNEL); - if (!dpu_crtc) - return ERR_PTR(-ENOMEM); + dpu_crtc = drmm_crtc_alloc_with_planes(dev, struct dpu_crtc, base, + plane, cursor, + &dpu_crtc_funcs, + NULL); + + if (IS_ERR(dpu_crtc)) + return ERR_CAST(dpu_crtc); crtc = &dpu_crtc->base; crtc->dev = dev; @@ -1491,9 +1863,6 @@ struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane, dpu_crtc_frame_event_work); } - drm_crtc_init_with_planes(dev, crtc, plane, cursor, &dpu_crtc_funcs, - NULL); - drm_crtc_helper_add(crtc, &dpu_crtc_helper_funcs); if (dpu_kms->catalog->dspp_count) |
