diff options
author | Moudy Ho <moudy.ho@mediatek.com> | 2023-12-20 11:18:38 +0100 |
---|---|---|
committer | Mauro Carvalho Chehab <mchehab@kernel.org> | 2024-02-01 07:53:49 +0100 |
commit | 9288eae430cbc4de82e82f295ccf7c5d877c0c2b (patch) | |
tree | 99501d43adf4da8d598d9daaff659dcbe2b5c415 /drivers/media/platform/mediatek/mdp3/mtk-mdp3-cmdq.c | |
parent | 0e9bd2fcda5dfc0d703c659d4b7ba2cf07dfdb9f (diff) |
media: platform: mtk-mdp3: add support for parallel pipe to improve FPS
In some chips, MDP3 has the ability to utilize two pipelines to
parallelly process a single frame.
To enable this feature, multiple CMDQ clients and packets need to
be configured at the same time.
Signed-off-by: Moudy Ho <moudy.ho@mediatek.com>
Reviewed-by: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
Signed-off-by: Sebastian Fricke <sebastian.fricke@collabora.com>
Signed-off-by: Mauro Carvalho Chehab <mchehab@kernel.org>
Diffstat (limited to 'drivers/media/platform/mediatek/mdp3/mtk-mdp3-cmdq.c')
-rw-r--r-- | drivers/media/platform/mediatek/mdp3/mtk-mdp3-cmdq.c | 188 |
1 files changed, 140 insertions, 48 deletions
diff --git a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-cmdq.c b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-cmdq.c index 8114c288da9c..1d64bac34b90 100644 --- a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-cmdq.c +++ b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-cmdq.c @@ -55,6 +55,16 @@ static struct mtk_mutex *__get_mutex(const struct mdp_dev *mdp_dev, return mdp_dev->mm_subsys[p->sub_id].mdp_mutex[p->mutex_id]; } +static u8 __get_pp_num(enum mdp_stream_type type) +{ + switch (type) { + case MDP_STREAM_TYPE_DUAL_BITBLT: + return MDP_PP_USED_2; + default: + return MDP_PP_USED_1; + } +} + static enum mdp_pipe_id __get_pipe(const struct mdp_dev *mdp_dev, enum mtk_mdp_comp_id id) { @@ -92,6 +102,44 @@ static enum mdp_pipe_id __get_pipe(const struct mdp_dev *mdp_dev, return pipe_id; } +static struct img_config *__get_config_offset(struct mdp_dev *mdp, + struct mdp_cmdq_param *param, + u8 pp_idx) +{ + const int p_id = mdp->mdp_data->mdp_plat_id; + struct device *dev = &mdp->pdev->dev; + void *cfg_c, *cfg_n; + long bound = mdp->vpu.config_size; + + if (pp_idx >= mdp->mdp_data->pp_used) + goto err_param; + + if (CFG_CHECK(MT8183, p_id)) + cfg_c = CFG_OFST(MT8183, param->config, pp_idx); + else if (CFG_CHECK(MT8195, p_id)) + cfg_c = CFG_OFST(MT8195, param->config, pp_idx); + else + goto err_param; + + if (CFG_CHECK(MT8183, p_id)) + cfg_n = CFG_OFST(MT8183, param->config, pp_idx + 1); + else if (CFG_CHECK(MT8195, p_id)) + cfg_n = CFG_OFST(MT8195, param->config, pp_idx + 1); + else + goto err_param; + + if ((long)cfg_n - (long)mdp->vpu.config > bound) { + dev_err(dev, "config offset %ld OOB %ld\n", (long)cfg_n, bound); + cfg_c = ERR_PTR(-EFAULT); + } + + return (struct img_config *)cfg_c; + +err_param: + cfg_c = ERR_PTR(-EINVAL); + return (struct img_config *)cfg_c; +} + static int mdp_path_subfrm_require(const struct mdp_path *path, struct mdp_cmdq_cmd *cmd, struct mdp_pipe_info *p, u32 count) @@ -476,8 +524,19 @@ static void mdp_auto_release_work(struct work_struct *work) mdp_comp_clocks_off(&mdp->pdev->dev, cmd->comps, cmd->num_comps); - atomic_dec(&mdp->job_count); - wake_up(&mdp->callback_wq); + if (atomic_dec_and_test(&mdp->job_count)) { + if (cmd->mdp_ctx) + mdp_m2m_job_finish(cmd->mdp_ctx); + + if (cmd->user_cmdq_cb) { + struct cmdq_cb_data user_cb_data; + + user_cb_data.sta = cmd->data->sta; + user_cb_data.pkt = cmd->data->pkt; + cmd->user_cmdq_cb(user_cb_data); + } + wake_up(&mdp->callback_wq); + } mdp_cmdq_pkt_destroy(&cmd->pkt); kfree(cmd->comps); @@ -501,20 +560,10 @@ static void mdp_handle_cmdq_callback(struct mbox_client *cl, void *mssg) data = (struct cmdq_cb_data *)mssg; cmd = container_of(data->pkt, struct mdp_cmdq_cmd, pkt); + cmd->data = data; mdp = cmd->mdp; dev = &mdp->pdev->dev; - if (cmd->mdp_ctx) - mdp_m2m_job_finish(cmd->mdp_ctx); - - if (cmd->user_cmdq_cb) { - struct cmdq_cb_data user_cb_data; - - user_cb_data.sta = data->sta; - user_cb_data.pkt = data->pkt; - cmd->user_cmdq_cb(user_cb_data); - } - INIT_WORK(&cmd->auto_release_work, mdp_auto_release_work); if (!queue_work(mdp->clock_wq, &cmd->auto_release_work)) { struct mtk_mutex *mutex; @@ -526,8 +575,8 @@ static void mdp_handle_cmdq_callback(struct mbox_client *cl, void *mssg) mdp_comp_clocks_off(&mdp->pdev->dev, cmd->comps, cmd->num_comps); - atomic_dec(&mdp->job_count); - wake_up(&mdp->callback_wq); + if (atomic_dec_and_test(&mdp->job_count)) + wake_up(&mdp->callback_wq); mdp_cmdq_pkt_destroy(&cmd->pkt); kfree(cmd->comps); @@ -537,31 +586,41 @@ static void mdp_handle_cmdq_callback(struct mbox_client *cl, void *mssg) } } -int mdp_cmdq_send(struct mdp_dev *mdp, struct mdp_cmdq_param *param) +static struct mdp_cmdq_cmd *mdp_cmdq_prepare(struct mdp_dev *mdp, + struct mdp_cmdq_param *param, + u8 pp_idx) { struct mdp_path *path = NULL; struct mdp_cmdq_cmd *cmd = NULL; struct mdp_comp *comps = NULL; struct device *dev = &mdp->pdev->dev; const int p_id = mdp->mdp_data->mdp_plat_id; + struct img_config *config; struct mtk_mutex *mutex = NULL; enum mdp_pipe_id pipe_id; - int i, ret; - u32 num_comp = 0; + int i, ret = -ECANCELED; + u32 num_comp; - atomic_inc(&mdp->job_count); - if (atomic_read(&mdp->suspended)) { - atomic_dec(&mdp->job_count); - return -ECANCELED; + config = __get_config_offset(mdp, param, pp_idx); + if (IS_ERR(config)) { + ret = PTR_ERR(config); + goto err_uninit; } + if (CFG_CHECK(MT8183, p_id)) + num_comp = CFG_GET(MT8183, config, num_components); + else if (CFG_CHECK(MT8195, p_id)) + num_comp = CFG_GET(MT8195, config, num_components); + else + goto err_uninit; + cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); if (!cmd) { ret = -ENOMEM; - goto err_cancel_job; + goto err_uninit; } - ret = mdp_cmdq_pkt_create(mdp->cmdq_clt, &cmd->pkt, SZ_16K); + ret = mdp_cmdq_pkt_create(mdp->cmdq_clt[pp_idx], &cmd->pkt, SZ_16K); if (ret) goto err_free_cmd; @@ -586,7 +645,7 @@ int mdp_cmdq_send(struct mdp_dev *mdp, struct mdp_cmdq_param *param) } path->mdp_dev = mdp; - path->config = param->config; + path->config = config; path->param = param->param; for (i = 0; i < param->param->num_outputs; i++) { path->bounds[i].left = 0; @@ -600,7 +659,7 @@ int mdp_cmdq_send(struct mdp_dev *mdp, struct mdp_cmdq_param *param) } ret = mdp_path_ctx_init(mdp, path); if (ret) { - dev_err(dev, "mdp_path_ctx_init error\n"); + dev_err(dev, "mdp_path_ctx_init error %d\n", pp_idx); goto err_free_path; } @@ -608,13 +667,13 @@ int mdp_cmdq_send(struct mdp_dev *mdp, struct mdp_cmdq_param *param) mutex = __get_mutex(mdp, &mdp->mdp_data->pipe_info[pipe_id]); ret = mtk_mutex_prepare(mutex); if (ret) { - dev_err(dev, "Fail to enable mutex clk\n"); + dev_err(dev, "Fail to enable mutex %d clk\n", pp_idx); goto err_free_path; } ret = mdp_path_config(mdp, cmd, path); if (ret) { - dev_err(dev, "mdp_path_config error\n"); + dev_err(dev, "mdp_path_config error %d\n", pp_idx); goto err_free_path; } cmdq_pkt_finalize(&cmd->pkt); @@ -633,7 +692,7 @@ int mdp_cmdq_send(struct mdp_dev *mdp, struct mdp_cmdq_param *param) sizeof(struct mdp_comp)); } - mdp->cmdq_clt->client.rx_callback = mdp_handle_cmdq_callback; + mdp->cmdq_clt[pp_idx]->client.rx_callback = mdp_handle_cmdq_callback; cmd->mdp = mdp; cmd->user_cmdq_cb = param->cmdq_cb; cmd->user_cb_data = param->cb_data; @@ -641,26 +700,9 @@ int mdp_cmdq_send(struct mdp_dev *mdp, struct mdp_cmdq_param *param) cmd->num_comps = num_comp; cmd->mdp_ctx = param->mdp_ctx; - ret = mdp_comp_clocks_on(&mdp->pdev->dev, cmd->comps, cmd->num_comps); - if (ret) - goto err_free_path; - - dma_sync_single_for_device(mdp->cmdq_clt->chan->mbox->dev, - cmd->pkt.pa_base, cmd->pkt.cmd_buf_size, - DMA_TO_DEVICE); - ret = mbox_send_message(mdp->cmdq_clt->chan, &cmd->pkt); - if (ret < 0) { - dev_err(dev, "mbox send message fail %d!\n", ret); - goto err_clock_off; - } - mbox_client_txdone(mdp->cmdq_clt->chan, 0); - kfree(path); - return 0; + return cmd; -err_clock_off: - mdp_comp_clocks_off(&mdp->pdev->dev, cmd->comps, - cmd->num_comps); err_free_path: if (mutex) mtk_mutex_unprepare(mutex); @@ -671,8 +713,58 @@ err_destroy_pkt: mdp_cmdq_pkt_destroy(&cmd->pkt); err_free_cmd: kfree(cmd); +err_uninit: + return ERR_PTR(ret); +} + +int mdp_cmdq_send(struct mdp_dev *mdp, struct mdp_cmdq_param *param) +{ + struct mdp_cmdq_cmd *cmd[MDP_PP_MAX] = {NULL}; + struct device *dev = &mdp->pdev->dev; + int i, ret; + u8 pp_used = __get_pp_num(param->param->type); + + atomic_set(&mdp->job_count, pp_used); + if (atomic_read(&mdp->suspended)) { + atomic_set(&mdp->job_count, 0); + return -ECANCELED; + } + + for (i = 0; i < pp_used; i++) { + cmd[i] = mdp_cmdq_prepare(mdp, param, i); + if (IS_ERR_OR_NULL(cmd[i])) { + ret = PTR_ERR(cmd[i]); + goto err_cancel_job; + } + } + + for (i = 0; i < pp_used; i++) { + ret = mdp_comp_clocks_on(&mdp->pdev->dev, cmd[i]->comps, cmd[i]->num_comps); + if (ret) + goto err_clock_off; + } + + for (i = 0; i < pp_used; i++) { + dma_sync_single_for_device(mdp->cmdq_clt[i]->chan->mbox->dev, + cmd[i]->pkt.pa_base, cmd[i]->pkt.cmd_buf_size, + DMA_TO_DEVICE); + + ret = mbox_send_message(mdp->cmdq_clt[i]->chan, &cmd[i]->pkt); + if (ret < 0) { + dev_err(dev, "mbox send message fail %d!\n", ret); + i = pp_used; + goto err_clock_off; + } + mbox_client_txdone(mdp->cmdq_clt[i]->chan, 0); + } + return 0; + +err_clock_off: + while (--i >= 0) + mdp_comp_clocks_off(&mdp->pdev->dev, cmd[i]->comps, + cmd[i]->num_comps); err_cancel_job: - atomic_dec(&mdp->job_count); + atomic_set(&mdp->job_count, 0); return ret; } |