diff options
Diffstat (limited to 'drivers/gpu/drm/mediatek')
41 files changed, 20004 insertions, 0 deletions
diff --git a/drivers/gpu/drm/mediatek/Kconfig b/drivers/gpu/drm/mediatek/Kconfig new file mode 100644 index 000000000000..96188bf9274a --- /dev/null +++ b/drivers/gpu/drm/mediatek/Kconfig @@ -0,0 +1,59 @@ +# SPDX-License-Identifier: GPL-2.0-only +config DRM_MEDIATEK + tristate "DRM Support for Mediatek SoCs" + depends on DRM + depends on ARCH_MEDIATEK || COMPILE_TEST + depends on COMMON_CLK + depends on HAVE_ARM_SMCCC || COMPILE_TEST + depends on OF + depends on MTK_MMSYS + select DRM_CLIENT_SELECTION + select DRM_GEM_DMA_HELPER if DRM_FBDEV_EMULATION + select DRM_KMS_HELPER + select DRM_DISPLAY_HELPER + select DRM_BRIDGE_CONNECTOR + select DRM_MIPI_DSI + select DRM_PANEL + select VIDEOMODE_HELPERS + help + Choose this option if you have a Mediatek SoCs. + The module will be called mediatek-drm + This driver provides kernel mode setting and + buffer management to userspace. + +config DRM_MEDIATEK_DP + tristate "DRM DPTX Support for MediaTek SoCs" + depends on DRM_MEDIATEK + select DRM_DISPLAY_HELPER + select DRM_DISPLAY_DP_HELPER + select DRM_DISPLAY_DP_AUX_BUS + help + DRM/KMS Display Port driver for MediaTek SoCs. + +config DRM_MEDIATEK_HDMI_COMMON + tristate + depends on DRM_MEDIATEK + select DRM_DISPLAY_HDMI_HELPER + select DRM_DISPLAY_HELPER + select SND_SOC_HDMI_CODEC if SND_SOC + help + MediaTek SoC HDMI common library + +config DRM_MEDIATEK_HDMI + tristate "DRM HDMI Support for Mediatek SoCs" + depends on DRM_MEDIATEK + select DRM_MEDIATEK_HDMI_COMMON + help + DRM/KMS HDMI driver for Mediatek SoCs + +config DRM_MEDIATEK_HDMI_V2 + tristate "DRM HDMI v2 IP support for MediaTek SoCs" + depends on DRM_MEDIATEK + select DRM_MEDIATEK_HDMI_COMMON + help + Say yes here to enable support for the HDMIv2 IP and related + DDCv2 as found in the MediaTek MT8195, MT8188 SoCs and other + variants. + This driver can also be built as a module. If so, the HDMIv2 + module will be called "mtk_hdmi_v2", and the DDCv2 module + will be called "mtk_hdmi_ddc_v2". diff --git a/drivers/gpu/drm/mediatek/Makefile b/drivers/gpu/drm/mediatek/Makefile new file mode 100644 index 000000000000..e0ac49b07d50 --- /dev/null +++ b/drivers/gpu/drm/mediatek/Makefile @@ -0,0 +1,31 @@ +# SPDX-License-Identifier: GPL-2.0 + +mediatek-drm-y := mtk_crtc.o \ + mtk_ddp_comp.o \ + mtk_disp_aal.o \ + mtk_disp_ccorr.o \ + mtk_disp_color.o \ + mtk_disp_gamma.o \ + mtk_disp_merge.o \ + mtk_disp_ovl.o \ + mtk_disp_ovl_adaptor.o \ + mtk_disp_rdma.o \ + mtk_drm_drv.o \ + mtk_dsi.o \ + mtk_dpi.o \ + mtk_ethdr.o \ + mtk_gem.o \ + mtk_mdp_rdma.o \ + mtk_padding.o \ + mtk_plane.o + +obj-$(CONFIG_DRM_MEDIATEK) += mediatek-drm.o + +obj-$(CONFIG_DRM_MEDIATEK_HDMI_COMMON) += mtk_hdmi_common.o +obj-$(CONFIG_DRM_MEDIATEK_HDMI) += mtk_cec.o +obj-$(CONFIG_DRM_MEDIATEK_HDMI) += mtk_hdmi.o +obj-$(CONFIG_DRM_MEDIATEK_HDMI) += mtk_hdmi_ddc.o +obj-$(CONFIG_DRM_MEDIATEK_HDMI_V2) += mtk_hdmi_v2.o +obj-$(CONFIG_DRM_MEDIATEK_HDMI_V2) += mtk_hdmi_ddc_v2.o + +obj-$(CONFIG_DRM_MEDIATEK_DP) += mtk_dp.o diff --git a/drivers/gpu/drm/mediatek/mtk_cec.c b/drivers/gpu/drm/mediatek/mtk_cec.c new file mode 100644 index 000000000000..c7be530ca041 --- /dev/null +++ b/drivers/gpu/drm/mediatek/mtk_cec.c @@ -0,0 +1,254 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2014 MediaTek Inc. + * Author: Jie Qiu <jie.qiu@mediatek.com> + */ +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/io.h> +#include <linux/interrupt.h> +#include <linux/module.h> +#include <linux/mod_devicetable.h> +#include <linux/platform_device.h> + +#include "mtk_cec.h" +#include "mtk_drm_drv.h" + +#define TR_CONFIG 0x00 +#define CLEAR_CEC_IRQ BIT(15) + +#define CEC_CKGEN 0x04 +#define CEC_32K_PDN BIT(19) +#define PDN BIT(16) + +#define RX_EVENT 0x54 +#define HDMI_PORD BIT(25) +#define HDMI_HTPLG BIT(24) +#define HDMI_PORD_INT_EN BIT(9) +#define HDMI_HTPLG_INT_EN BIT(8) + +#define RX_GEN_WD 0x58 +#define HDMI_PORD_INT_32K_STATUS BIT(26) +#define RX_RISC_INT_32K_STATUS BIT(25) +#define HDMI_HTPLG_INT_32K_STATUS BIT(24) +#define HDMI_PORD_INT_32K_CLR BIT(18) +#define RX_INT_32K_CLR BIT(17) +#define HDMI_HTPLG_INT_32K_CLR BIT(16) +#define HDMI_PORD_INT_32K_STA_MASK BIT(10) +#define RX_RISC_INT_32K_STA_MASK BIT(9) +#define HDMI_HTPLG_INT_32K_STA_MASK BIT(8) +#define HDMI_PORD_INT_32K_EN BIT(2) +#define RX_INT_32K_EN BIT(1) +#define HDMI_HTPLG_INT_32K_EN BIT(0) + +#define NORMAL_INT_CTRL 0x5C +#define HDMI_HTPLG_INT_STA BIT(0) +#define HDMI_PORD_INT_STA BIT(1) +#define HDMI_HTPLG_INT_CLR BIT(16) +#define HDMI_PORD_INT_CLR BIT(17) +#define HDMI_FULL_INT_CLR BIT(20) + +struct mtk_cec { + void __iomem *regs; + struct clk *clk; + int irq; + bool hpd; + void (*hpd_event)(bool hpd, struct device *dev); + struct device *hdmi_dev; + spinlock_t lock; +}; + +static void mtk_cec_clear_bits(struct mtk_cec *cec, unsigned int offset, + unsigned int bits) +{ + void __iomem *reg = cec->regs + offset; + u32 tmp; + + tmp = readl(reg); + tmp &= ~bits; + writel(tmp, reg); +} + +static void mtk_cec_set_bits(struct mtk_cec *cec, unsigned int offset, + unsigned int bits) +{ + void __iomem *reg = cec->regs + offset; + u32 tmp; + + tmp = readl(reg); + tmp |= bits; + writel(tmp, reg); +} + +static void mtk_cec_mask(struct mtk_cec *cec, unsigned int offset, + unsigned int val, unsigned int mask) +{ + u32 tmp = readl(cec->regs + offset) & ~mask; + + tmp |= val & mask; + writel(tmp, cec->regs + offset); +} + +void mtk_cec_set_hpd_event(struct device *dev, + void (*hpd_event)(bool hpd, struct device *dev), + struct device *hdmi_dev) +{ + struct mtk_cec *cec = dev_get_drvdata(dev); + unsigned long flags; + + spin_lock_irqsave(&cec->lock, flags); + cec->hdmi_dev = hdmi_dev; + cec->hpd_event = hpd_event; + spin_unlock_irqrestore(&cec->lock, flags); +} +EXPORT_SYMBOL_NS_GPL(mtk_cec_set_hpd_event, "DRM_MTK_HDMI_V1"); + +bool mtk_cec_hpd_high(struct device *dev) +{ + struct mtk_cec *cec = dev_get_drvdata(dev); + unsigned int status; + + status = readl(cec->regs + RX_EVENT); + + return (status & (HDMI_PORD | HDMI_HTPLG)) == (HDMI_PORD | HDMI_HTPLG); +} +EXPORT_SYMBOL_NS_GPL(mtk_cec_hpd_high, "DRM_MTK_HDMI_V1"); + +static void mtk_cec_htplg_irq_init(struct mtk_cec *cec) +{ + mtk_cec_mask(cec, CEC_CKGEN, 0 | CEC_32K_PDN, PDN | CEC_32K_PDN); + mtk_cec_set_bits(cec, RX_GEN_WD, HDMI_PORD_INT_32K_CLR | + RX_INT_32K_CLR | HDMI_HTPLG_INT_32K_CLR); + mtk_cec_mask(cec, RX_GEN_WD, 0, HDMI_PORD_INT_32K_CLR | RX_INT_32K_CLR | + HDMI_HTPLG_INT_32K_CLR | HDMI_PORD_INT_32K_EN | + RX_INT_32K_EN | HDMI_HTPLG_INT_32K_EN); +} + +static void mtk_cec_htplg_irq_enable(struct mtk_cec *cec) +{ + mtk_cec_set_bits(cec, RX_EVENT, HDMI_PORD_INT_EN | HDMI_HTPLG_INT_EN); +} + +static void mtk_cec_htplg_irq_disable(struct mtk_cec *cec) +{ + mtk_cec_clear_bits(cec, RX_EVENT, HDMI_PORD_INT_EN | HDMI_HTPLG_INT_EN); +} + +static void mtk_cec_clear_htplg_irq(struct mtk_cec *cec) +{ + mtk_cec_set_bits(cec, TR_CONFIG, CLEAR_CEC_IRQ); + mtk_cec_set_bits(cec, NORMAL_INT_CTRL, HDMI_HTPLG_INT_CLR | + HDMI_PORD_INT_CLR | HDMI_FULL_INT_CLR); + mtk_cec_set_bits(cec, RX_GEN_WD, HDMI_PORD_INT_32K_CLR | + RX_INT_32K_CLR | HDMI_HTPLG_INT_32K_CLR); + usleep_range(5, 10); + mtk_cec_clear_bits(cec, NORMAL_INT_CTRL, HDMI_HTPLG_INT_CLR | + HDMI_PORD_INT_CLR | HDMI_FULL_INT_CLR); + mtk_cec_clear_bits(cec, TR_CONFIG, CLEAR_CEC_IRQ); + mtk_cec_clear_bits(cec, RX_GEN_WD, HDMI_PORD_INT_32K_CLR | + RX_INT_32K_CLR | HDMI_HTPLG_INT_32K_CLR); +} + +static void mtk_cec_hpd_event(struct mtk_cec *cec, bool hpd) +{ + void (*hpd_event)(bool hpd, struct device *dev); + struct device *hdmi_dev; + unsigned long flags; + + spin_lock_irqsave(&cec->lock, flags); + hpd_event = cec->hpd_event; + hdmi_dev = cec->hdmi_dev; + spin_unlock_irqrestore(&cec->lock, flags); + + if (hpd_event) + hpd_event(hpd, hdmi_dev); +} + +static irqreturn_t mtk_cec_htplg_isr_thread(int irq, void *arg) +{ + struct device *dev = arg; + struct mtk_cec *cec = dev_get_drvdata(dev); + bool hpd; + + mtk_cec_clear_htplg_irq(cec); + hpd = mtk_cec_hpd_high(dev); + + if (cec->hpd != hpd) { + dev_dbg(dev, "hotplug event! cur hpd = %d, hpd = %d\n", + cec->hpd, hpd); + cec->hpd = hpd; + mtk_cec_hpd_event(cec, hpd); + } + return IRQ_HANDLED; +} + +static int mtk_cec_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct mtk_cec *cec; + int ret; + + cec = devm_kzalloc(dev, sizeof(*cec), GFP_KERNEL); + if (!cec) + return -ENOMEM; + + platform_set_drvdata(pdev, cec); + spin_lock_init(&cec->lock); + + cec->regs = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(cec->regs)) + return dev_err_probe(dev, PTR_ERR(cec->regs), + "Failed to ioremap cec\n"); + + cec->clk = devm_clk_get(dev, NULL); + if (IS_ERR(cec->clk)) + return dev_err_probe(dev, PTR_ERR(cec->clk), + "Failed to get cec clock\n"); + + cec->irq = platform_get_irq(pdev, 0); + if (cec->irq < 0) + return cec->irq; + + ret = devm_request_threaded_irq(dev, cec->irq, NULL, + mtk_cec_htplg_isr_thread, + IRQF_SHARED | IRQF_TRIGGER_LOW | + IRQF_ONESHOT, "hdmi hpd", dev); + if (ret) + return dev_err_probe(dev, ret, "Failed to register cec irq\n"); + + ret = clk_prepare_enable(cec->clk); + if (ret) + return dev_err_probe(dev, ret, "Failed to enable cec clock\n"); + + mtk_cec_htplg_irq_init(cec); + mtk_cec_htplg_irq_enable(cec); + + return 0; +} + +static void mtk_cec_remove(struct platform_device *pdev) +{ + struct mtk_cec *cec = platform_get_drvdata(pdev); + + mtk_cec_htplg_irq_disable(cec); + clk_disable_unprepare(cec->clk); +} + +static const struct of_device_id mtk_cec_of_ids[] = { + { .compatible = "mediatek,mt8173-cec", }, + {} +}; +MODULE_DEVICE_TABLE(of, mtk_cec_of_ids); + +struct platform_driver mtk_cec_driver = { + .probe = mtk_cec_probe, + .remove = mtk_cec_remove, + .driver = { + .name = "mediatek-cec", + .of_match_table = mtk_cec_of_ids, + }, +}; +module_platform_driver(mtk_cec_driver); + +MODULE_DESCRIPTION("MediaTek HDMI CEC Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/mediatek/mtk_cec.h b/drivers/gpu/drm/mediatek/mtk_cec.h new file mode 100644 index 000000000000..c6412dddb388 --- /dev/null +++ b/drivers/gpu/drm/mediatek/mtk_cec.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2014 MediaTek Inc. + * Author: Jie Qiu <jie.qiu@mediatek.com> + */ +#ifndef _MTK_CEC_H +#define _MTK_CEC_H + +#include <linux/types.h> + +struct device; + +void mtk_cec_set_hpd_event(struct device *dev, + void (*hotplug_event)(bool hpd, struct device *dev), + struct device *hdmi_dev); +bool mtk_cec_hpd_high(struct device *dev); + +#endif /* _MTK_CEC_H */ diff --git a/drivers/gpu/drm/mediatek/mtk_crtc.c b/drivers/gpu/drm/mediatek/mtk_crtc.c new file mode 100644 index 000000000000..991cdb3d7d5f --- /dev/null +++ b/drivers/gpu/drm/mediatek/mtk_crtc.c @@ -0,0 +1,1186 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2015 MediaTek Inc. + */ + +#include <linux/clk.h> +#include <linux/dma-mapping.h> +#include <linux/mailbox_controller.h> +#include <linux/of.h> +#include <linux/pm_runtime.h> +#include <linux/soc/mediatek/mtk-cmdq.h> +#include <linux/soc/mediatek/mtk-mmsys.h> +#include <linux/soc/mediatek/mtk-mutex.h> + +#include <asm/barrier.h> + +#include <drm/drm_atomic.h> +#include <drm/drm_atomic_helper.h> +#include <drm/drm_print.h> +#include <drm/drm_probe_helper.h> +#include <drm/drm_vblank.h> + +#include "mtk_crtc.h" +#include "mtk_ddp_comp.h" +#include "mtk_drm_drv.h" +#include "mtk_gem.h" +#include "mtk_plane.h" + +/* + * struct mtk_crtc - MediaTek specific crtc structure. + * @base: crtc object. + * @enabled: records whether crtc_enable succeeded + * @planes: array of 4 drm_plane structures, one for each overlay plane + * @pending_planes: whether any plane has pending changes to be applied + * @mmsys_dev: pointer to the mmsys device for configuration registers + * @mutex: handle to one of the ten disp_mutex streams + * @ddp_comp_nr: number of components in ddp_comp + * @ddp_comp: array of pointers the mtk_ddp_comp structures used by this crtc + * + * TODO: Needs update: this header is missing a bunch of member descriptions. + */ +struct mtk_crtc { + struct drm_crtc base; + bool enabled; + + bool pending_needs_vblank; + struct drm_pending_vblank_event *event; + + struct drm_plane *planes; + unsigned int layer_nr; + bool pending_planes; + bool pending_async_planes; + +#if IS_REACHABLE(CONFIG_MTK_CMDQ) + struct cmdq_client cmdq_client; + struct cmdq_pkt cmdq_handle; + u32 cmdq_event; + u32 cmdq_vblank_cnt; + wait_queue_head_t cb_blocking_queue; +#endif + + struct device *mmsys_dev; + struct device *dma_dev; + struct mtk_mutex *mutex; + unsigned int ddp_comp_nr; + struct mtk_ddp_comp **ddp_comp; + unsigned int num_conn_routes; + const struct mtk_drm_route *conn_routes; + + /* lock for display hardware access */ + struct mutex hw_lock; + bool config_updating; + /* lock for config_updating to cmd buffer */ + spinlock_t config_lock; +}; + +struct mtk_crtc_state { + struct drm_crtc_state base; + + bool pending_config; + unsigned int pending_width; + unsigned int pending_height; + unsigned int pending_vrefresh; +}; + +static inline struct mtk_crtc *to_mtk_crtc(struct drm_crtc *c) +{ + return container_of(c, struct mtk_crtc, base); +} + +static inline struct mtk_crtc_state *to_mtk_crtc_state(struct drm_crtc_state *s) +{ + return container_of(s, struct mtk_crtc_state, base); +} + +static void mtk_crtc_finish_page_flip(struct mtk_crtc *mtk_crtc) +{ + struct drm_crtc *crtc = &mtk_crtc->base; + unsigned long flags; + + if (mtk_crtc->event) { + spin_lock_irqsave(&crtc->dev->event_lock, flags); + drm_crtc_send_vblank_event(crtc, mtk_crtc->event); + drm_crtc_vblank_put(crtc); + mtk_crtc->event = NULL; + spin_unlock_irqrestore(&crtc->dev->event_lock, flags); + } +} + +static void mtk_drm_finish_page_flip(struct mtk_crtc *mtk_crtc) +{ + unsigned long flags; + + drm_crtc_handle_vblank(&mtk_crtc->base); + +#if IS_REACHABLE(CONFIG_MTK_CMDQ) + if (mtk_crtc->cmdq_client.chan) + return; +#endif + + spin_lock_irqsave(&mtk_crtc->config_lock, flags); + if (!mtk_crtc->config_updating && mtk_crtc->pending_needs_vblank) { + mtk_crtc_finish_page_flip(mtk_crtc); + mtk_crtc->pending_needs_vblank = false; + } + spin_unlock_irqrestore(&mtk_crtc->config_lock, flags); +} + +static void mtk_crtc_destroy(struct drm_crtc *crtc) +{ + struct mtk_crtc *mtk_crtc = to_mtk_crtc(crtc); + int i; + + mtk_mutex_put(mtk_crtc->mutex); +#if IS_REACHABLE(CONFIG_MTK_CMDQ) + if (mtk_crtc->cmdq_client.chan) { + cmdq_pkt_destroy(&mtk_crtc->cmdq_client, &mtk_crtc->cmdq_handle); + mbox_free_channel(mtk_crtc->cmdq_client.chan); + mtk_crtc->cmdq_client.chan = NULL; + } +#endif + + for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) { + struct mtk_ddp_comp *comp; + + comp = mtk_crtc->ddp_comp[i]; + mtk_ddp_comp_unregister_vblank_cb(comp); + } + + drm_crtc_cleanup(crtc); +} + +static void mtk_crtc_reset(struct drm_crtc *crtc) +{ + struct mtk_crtc_state *state; + + if (crtc->state) + __drm_atomic_helper_crtc_destroy_state(crtc->state); + + kfree(to_mtk_crtc_state(crtc->state)); + crtc->state = NULL; + + state = kzalloc(sizeof(*state), GFP_KERNEL); + if (state) + __drm_atomic_helper_crtc_reset(crtc, &state->base); +} + +static struct drm_crtc_state *mtk_crtc_duplicate_state(struct drm_crtc *crtc) +{ + struct mtk_crtc_state *state; + + state = kmalloc(sizeof(*state), GFP_KERNEL); + if (!state) + return NULL; + + __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base); + + WARN_ON(state->base.crtc != crtc); + state->base.crtc = crtc; + state->pending_config = false; + + return &state->base; +} + +static void mtk_crtc_destroy_state(struct drm_crtc *crtc, + struct drm_crtc_state *state) +{ + __drm_atomic_helper_crtc_destroy_state(state); + kfree(to_mtk_crtc_state(state)); +} + +static enum drm_mode_status +mtk_crtc_mode_valid(struct drm_crtc *crtc, const struct drm_display_mode *mode) +{ + struct mtk_crtc *mtk_crtc = to_mtk_crtc(crtc); + enum drm_mode_status status = MODE_OK; + int i; + + for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) { + status = mtk_ddp_comp_mode_valid(mtk_crtc->ddp_comp[i], mode); + if (status != MODE_OK) + break; + } + return status; +} + +static bool mtk_crtc_mode_fixup(struct drm_crtc *crtc, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + /* Nothing to do here, but this callback is mandatory. */ + return true; +} + +static void mtk_crtc_mode_set_nofb(struct drm_crtc *crtc) +{ + struct mtk_crtc_state *state = to_mtk_crtc_state(crtc->state); + + state->pending_width = crtc->mode.hdisplay; + state->pending_height = crtc->mode.vdisplay; + state->pending_vrefresh = drm_mode_vrefresh(&crtc->mode); + wmb(); /* Make sure the above parameters are set before update */ + state->pending_config = true; +} + +static int mtk_crtc_ddp_clk_enable(struct mtk_crtc *mtk_crtc) +{ + int ret; + int i; + + for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) { + ret = mtk_ddp_comp_clk_enable(mtk_crtc->ddp_comp[i]); + if (ret) { + DRM_ERROR("Failed to enable clock %d: %d\n", i, ret); + goto err; + } + } + + return 0; +err: + while (--i >= 0) + mtk_ddp_comp_clk_disable(mtk_crtc->ddp_comp[i]); + return ret; +} + +static void mtk_crtc_ddp_clk_disable(struct mtk_crtc *mtk_crtc) +{ + int i; + + for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) + mtk_ddp_comp_clk_disable(mtk_crtc->ddp_comp[i]); +} + +static +struct mtk_ddp_comp *mtk_ddp_comp_for_plane(struct drm_crtc *crtc, + struct drm_plane *plane, + unsigned int *local_layer) +{ + struct mtk_crtc *mtk_crtc = to_mtk_crtc(crtc); + struct mtk_ddp_comp *comp; + int i, count = 0; + unsigned int local_index = plane - mtk_crtc->planes; + + for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) { + comp = mtk_crtc->ddp_comp[i]; + if (local_index < (count + mtk_ddp_comp_layer_nr(comp))) { + *local_layer = local_index - count; + return comp; + } + count += mtk_ddp_comp_layer_nr(comp); + } + + WARN(1, "Failed to find component for plane %d\n", plane->index); + return NULL; +} + +#if IS_REACHABLE(CONFIG_MTK_CMDQ) +static void ddp_cmdq_cb(struct mbox_client *cl, void *mssg) +{ + struct cmdq_cb_data *data = mssg; + struct cmdq_client *cmdq_cl = container_of(cl, struct cmdq_client, client); + struct mtk_crtc *mtk_crtc = container_of(cmdq_cl, struct mtk_crtc, cmdq_client); + struct mtk_crtc_state *state; + unsigned int i; + unsigned long flags; + + /* release GCE HW usage and start autosuspend */ + pm_runtime_mark_last_busy(cmdq_cl->chan->mbox->dev); + pm_runtime_put_autosuspend(cmdq_cl->chan->mbox->dev); + + if (data->sta < 0) + return; + + state = to_mtk_crtc_state(mtk_crtc->base.state); + + spin_lock_irqsave(&mtk_crtc->config_lock, flags); + if (mtk_crtc->config_updating) + goto ddp_cmdq_cb_out; + + state->pending_config = false; + + if (mtk_crtc->pending_planes) { + for (i = 0; i < mtk_crtc->layer_nr; i++) { + struct drm_plane *plane = &mtk_crtc->planes[i]; + struct mtk_plane_state *plane_state; + + plane_state = to_mtk_plane_state(plane->state); + + plane_state->pending.config = false; + } + mtk_crtc->pending_planes = false; + } + + if (mtk_crtc->pending_async_planes) { + for (i = 0; i < mtk_crtc->layer_nr; i++) { + struct drm_plane *plane = &mtk_crtc->planes[i]; + struct mtk_plane_state *plane_state; + + plane_state = to_mtk_plane_state(plane->state); + + plane_state->pending.async_config = false; + } + mtk_crtc->pending_async_planes = false; + } + +ddp_cmdq_cb_out: + + if (mtk_crtc->pending_needs_vblank) { + mtk_crtc_finish_page_flip(mtk_crtc); + mtk_crtc->pending_needs_vblank = false; + } + + spin_unlock_irqrestore(&mtk_crtc->config_lock, flags); + + mtk_crtc->cmdq_vblank_cnt = 0; + wake_up(&mtk_crtc->cb_blocking_queue); +} +#endif + +static int mtk_crtc_ddp_hw_init(struct mtk_crtc *mtk_crtc) +{ + struct drm_crtc *crtc = &mtk_crtc->base; + struct drm_connector *connector; + struct drm_encoder *encoder; + struct drm_connector_list_iter conn_iter; + unsigned int width, height, vrefresh, bpc = MTK_MAX_BPC; + int ret; + int i; + + if (WARN_ON(!crtc->state)) + return -EINVAL; + + width = crtc->state->adjusted_mode.hdisplay; + height = crtc->state->adjusted_mode.vdisplay; + vrefresh = drm_mode_vrefresh(&crtc->state->adjusted_mode); + + drm_for_each_encoder(encoder, crtc->dev) { + if (encoder->crtc != crtc) + continue; + + drm_connector_list_iter_begin(crtc->dev, &conn_iter); + drm_for_each_connector_iter(connector, &conn_iter) { + if (connector->encoder != encoder) + continue; + if (connector->display_info.bpc != 0 && + bpc > connector->display_info.bpc) + bpc = connector->display_info.bpc; + } + drm_connector_list_iter_end(&conn_iter); + } + + ret = pm_runtime_resume_and_get(crtc->dev->dev); + if (ret < 0) { + DRM_ERROR("Failed to enable power domain: %d\n", ret); + return ret; + } + + ret = mtk_mutex_prepare(mtk_crtc->mutex); + if (ret < 0) { + DRM_ERROR("Failed to enable mutex clock: %d\n", ret); + goto err_pm_runtime_put; + } + + ret = mtk_crtc_ddp_clk_enable(mtk_crtc); + if (ret < 0) { + DRM_ERROR("Failed to enable component clocks: %d\n", ret); + goto err_mutex_unprepare; + } + + for (i = 0; i < mtk_crtc->ddp_comp_nr - 1; i++) { + if (!mtk_ddp_comp_connect(mtk_crtc->ddp_comp[i], mtk_crtc->mmsys_dev, + mtk_crtc->ddp_comp[i + 1]->id)) + mtk_mmsys_ddp_connect(mtk_crtc->mmsys_dev, + mtk_crtc->ddp_comp[i]->id, + mtk_crtc->ddp_comp[i + 1]->id); + if (!mtk_ddp_comp_add(mtk_crtc->ddp_comp[i], mtk_crtc->mutex)) + mtk_mutex_add_comp(mtk_crtc->mutex, + mtk_crtc->ddp_comp[i]->id); + } + if (!mtk_ddp_comp_add(mtk_crtc->ddp_comp[i], mtk_crtc->mutex)) + mtk_mutex_add_comp(mtk_crtc->mutex, mtk_crtc->ddp_comp[i]->id); + mtk_mutex_enable(mtk_crtc->mutex); + + for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) { + struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[i]; + + if (i == 1) + mtk_ddp_comp_bgclr_in_on(comp); + + mtk_ddp_comp_config(comp, width, height, vrefresh, bpc, NULL); + mtk_ddp_comp_start(comp); + } + + /* Initially configure all planes */ + for (i = 0; i < mtk_crtc->layer_nr; i++) { + struct drm_plane *plane = &mtk_crtc->planes[i]; + struct mtk_plane_state *plane_state; + struct mtk_ddp_comp *comp; + unsigned int local_layer; + + plane_state = to_mtk_plane_state(plane->state); + + /* should not enable layer before crtc enabled */ + plane_state->pending.enable = false; + comp = mtk_ddp_comp_for_plane(crtc, plane, &local_layer); + if (comp) + mtk_ddp_comp_layer_config(comp, local_layer, + plane_state, NULL); + } + + return 0; + +err_mutex_unprepare: + mtk_mutex_unprepare(mtk_crtc->mutex); +err_pm_runtime_put: + pm_runtime_put(crtc->dev->dev); + return ret; +} + +static void mtk_crtc_ddp_hw_fini(struct mtk_crtc *mtk_crtc) +{ + struct drm_device *drm = mtk_crtc->base.dev; + struct drm_crtc *crtc = &mtk_crtc->base; + unsigned long flags; + int i; + + for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) { + mtk_ddp_comp_stop(mtk_crtc->ddp_comp[i]); + if (i == 1) + mtk_ddp_comp_bgclr_in_off(mtk_crtc->ddp_comp[i]); + } + + for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) + if (!mtk_ddp_comp_remove(mtk_crtc->ddp_comp[i], mtk_crtc->mutex)) + mtk_mutex_remove_comp(mtk_crtc->mutex, + mtk_crtc->ddp_comp[i]->id); + mtk_mutex_disable(mtk_crtc->mutex); + for (i = 0; i < mtk_crtc->ddp_comp_nr - 1; i++) { + if (!mtk_ddp_comp_disconnect(mtk_crtc->ddp_comp[i], mtk_crtc->mmsys_dev, + mtk_crtc->ddp_comp[i + 1]->id)) + mtk_mmsys_ddp_disconnect(mtk_crtc->mmsys_dev, + mtk_crtc->ddp_comp[i]->id, + mtk_crtc->ddp_comp[i + 1]->id); + if (!mtk_ddp_comp_remove(mtk_crtc->ddp_comp[i], mtk_crtc->mutex)) + mtk_mutex_remove_comp(mtk_crtc->mutex, + mtk_crtc->ddp_comp[i]->id); + } + if (!mtk_ddp_comp_remove(mtk_crtc->ddp_comp[i], mtk_crtc->mutex)) + mtk_mutex_remove_comp(mtk_crtc->mutex, mtk_crtc->ddp_comp[i]->id); + mtk_crtc_ddp_clk_disable(mtk_crtc); + mtk_mutex_unprepare(mtk_crtc->mutex); + + pm_runtime_put(drm->dev); + + if (crtc->state->event && !crtc->state->active) { + spin_lock_irqsave(&crtc->dev->event_lock, flags); + drm_crtc_send_vblank_event(crtc, crtc->state->event); + crtc->state->event = NULL; + spin_unlock_irqrestore(&crtc->dev->event_lock, flags); + } +} + +static void mtk_crtc_ddp_config(struct drm_crtc *crtc, + struct cmdq_pkt *cmdq_handle) +{ + struct mtk_crtc *mtk_crtc = to_mtk_crtc(crtc); + struct mtk_crtc_state *state = to_mtk_crtc_state(mtk_crtc->base.state); + struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0]; + unsigned int i; + unsigned int local_layer; + + /* + * TODO: instead of updating the registers here, we should prepare + * working registers in atomic_commit and let the hardware command + * queue update module registers on vblank. + */ + if (state->pending_config) { + mtk_ddp_comp_config(comp, state->pending_width, + state->pending_height, + state->pending_vrefresh, 0, + cmdq_handle); + + if (!cmdq_handle) + state->pending_config = false; + } + + if (mtk_crtc->pending_planes) { + for (i = 0; i < mtk_crtc->layer_nr; i++) { + struct drm_plane *plane = &mtk_crtc->planes[i]; + struct mtk_plane_state *plane_state; + + plane_state = to_mtk_plane_state(plane->state); + + if (!plane_state->pending.config) + continue; + + comp = mtk_ddp_comp_for_plane(crtc, plane, &local_layer); + + if (comp) + mtk_ddp_comp_layer_config(comp, local_layer, + plane_state, + cmdq_handle); + if (!cmdq_handle) + plane_state->pending.config = false; + } + + if (!cmdq_handle) + mtk_crtc->pending_planes = false; + } + + if (mtk_crtc->pending_async_planes) { + for (i = 0; i < mtk_crtc->layer_nr; i++) { + struct drm_plane *plane = &mtk_crtc->planes[i]; + struct mtk_plane_state *plane_state; + + plane_state = to_mtk_plane_state(plane->state); + + if (!plane_state->pending.async_config) + continue; + + comp = mtk_ddp_comp_for_plane(crtc, plane, &local_layer); + + if (comp) + mtk_ddp_comp_layer_config(comp, local_layer, + plane_state, + cmdq_handle); + if (!cmdq_handle) + plane_state->pending.async_config = false; + } + + if (!cmdq_handle) + mtk_crtc->pending_async_planes = false; + } +} + +static void mtk_crtc_update_config(struct mtk_crtc *mtk_crtc, bool needs_vblank) +{ +#if IS_REACHABLE(CONFIG_MTK_CMDQ) + struct cmdq_pkt *cmdq_handle = &mtk_crtc->cmdq_handle; +#endif + struct drm_crtc *crtc = &mtk_crtc->base; + struct mtk_drm_private *priv = crtc->dev->dev_private; + unsigned int pending_planes = 0, pending_async_planes = 0; + int i; + unsigned long flags; + + mutex_lock(&mtk_crtc->hw_lock); + + spin_lock_irqsave(&mtk_crtc->config_lock, flags); + mtk_crtc->config_updating = true; + spin_unlock_irqrestore(&mtk_crtc->config_lock, flags); + + if (needs_vblank) + mtk_crtc->pending_needs_vblank = true; + + for (i = 0; i < mtk_crtc->layer_nr; i++) { + struct drm_plane *plane = &mtk_crtc->planes[i]; + struct mtk_plane_state *plane_state; + + plane_state = to_mtk_plane_state(plane->state); + if (plane_state->pending.dirty) { + plane_state->pending.config = true; + plane_state->pending.dirty = false; + pending_planes |= BIT(i); + } else if (plane_state->pending.async_dirty) { + plane_state->pending.async_config = true; + plane_state->pending.async_dirty = false; + pending_async_planes |= BIT(i); + } + } + if (pending_planes) + mtk_crtc->pending_planes = true; + if (pending_async_planes) + mtk_crtc->pending_async_planes = true; + + if (priv->data->shadow_register) { + mtk_mutex_acquire(mtk_crtc->mutex); + mtk_crtc_ddp_config(crtc, NULL); + mtk_mutex_release(mtk_crtc->mutex); + } +#if IS_REACHABLE(CONFIG_MTK_CMDQ) + if (mtk_crtc->cmdq_client.chan) { + mbox_flush(mtk_crtc->cmdq_client.chan, 2000); + cmdq_handle->cmd_buf_size = 0; + cmdq_pkt_clear_event(cmdq_handle, mtk_crtc->cmdq_event); + cmdq_pkt_wfe(cmdq_handle, mtk_crtc->cmdq_event, false); + mtk_crtc_ddp_config(crtc, cmdq_handle); + cmdq_pkt_eoc(cmdq_handle); + dma_sync_single_for_device(mtk_crtc->cmdq_client.chan->mbox->dev, + cmdq_handle->pa_base, + cmdq_handle->cmd_buf_size, + DMA_TO_DEVICE); + /* + * CMDQ command should execute in next 3 vblank. + * One vblank interrupt before send message (occasionally) + * and one vblank interrupt after cmdq done, + * so it's timeout after 3 vblank interrupt. + * If it fail to execute in next 3 vblank, timeout happen. + */ + mtk_crtc->cmdq_vblank_cnt = 3; + + spin_lock_irqsave(&mtk_crtc->config_lock, flags); + mtk_crtc->config_updating = false; + spin_unlock_irqrestore(&mtk_crtc->config_lock, flags); + + if (pm_runtime_resume_and_get(mtk_crtc->cmdq_client.chan->mbox->dev) < 0) + goto update_config_out; + + mbox_send_message(mtk_crtc->cmdq_client.chan, cmdq_handle); + mbox_client_txdone(mtk_crtc->cmdq_client.chan, 0); + goto update_config_out; + } +#endif + spin_lock_irqsave(&mtk_crtc->config_lock, flags); + mtk_crtc->config_updating = false; + spin_unlock_irqrestore(&mtk_crtc->config_lock, flags); + +#if IS_REACHABLE(CONFIG_MTK_CMDQ) +update_config_out: +#endif + mutex_unlock(&mtk_crtc->hw_lock); +} + +static void mtk_crtc_ddp_irq(void *data) +{ + struct drm_crtc *crtc = data; + struct mtk_crtc *mtk_crtc = to_mtk_crtc(crtc); + struct mtk_drm_private *priv = crtc->dev->dev_private; + +#if IS_REACHABLE(CONFIG_MTK_CMDQ) + if (!priv->data->shadow_register && !mtk_crtc->cmdq_client.chan) + mtk_crtc_ddp_config(crtc, NULL); + else if (mtk_crtc->cmdq_vblank_cnt > 0 && --mtk_crtc->cmdq_vblank_cnt == 0) + DRM_ERROR("mtk_crtc %d CMDQ execute command timeout!\n", + drm_crtc_index(&mtk_crtc->base)); +#else + if (!priv->data->shadow_register) + mtk_crtc_ddp_config(crtc, NULL); +#endif + mtk_drm_finish_page_flip(mtk_crtc); +} + +static int mtk_crtc_enable_vblank(struct drm_crtc *crtc) +{ + struct mtk_crtc *mtk_crtc = to_mtk_crtc(crtc); + struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0]; + + mtk_ddp_comp_enable_vblank(comp); + + return 0; +} + +static void mtk_crtc_disable_vblank(struct drm_crtc *crtc) +{ + struct mtk_crtc *mtk_crtc = to_mtk_crtc(crtc); + struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0]; + + mtk_ddp_comp_disable_vblank(comp); +} + +static void mtk_crtc_update_output(struct drm_crtc *crtc, + struct drm_atomic_state *state) +{ + int crtc_index = drm_crtc_index(crtc); + int i; + struct device *dev; + struct drm_crtc_state *crtc_state = state->crtcs[crtc_index].new_state; + struct mtk_crtc *mtk_crtc = to_mtk_crtc(crtc); + struct mtk_drm_private *priv; + unsigned int encoder_mask = crtc_state->encoder_mask; + + if (!crtc_state->connectors_changed) + return; + + if (!mtk_crtc->num_conn_routes) + return; + + priv = ((struct mtk_drm_private *)crtc->dev->dev_private)->all_drm_private[crtc_index]; + dev = priv->dev; + + dev_dbg(dev, "connector change:%d, encoder mask:0x%x for crtc:%d\n", + crtc_state->connectors_changed, encoder_mask, crtc_index); + + for (i = 0; i < mtk_crtc->num_conn_routes; i++) { + unsigned int comp_id = mtk_crtc->conn_routes[i].route_ddp; + struct mtk_ddp_comp *comp = &priv->ddp_comp[comp_id]; + + if (comp->encoder_index >= 0 && + (encoder_mask & BIT(comp->encoder_index))) { + mtk_crtc->ddp_comp[mtk_crtc->ddp_comp_nr - 1] = comp; + dev_dbg(dev, "Add comp_id: %d at path index %d\n", + comp->id, mtk_crtc->ddp_comp_nr - 1); + break; + } + } +} + +int mtk_crtc_plane_check(struct drm_crtc *crtc, struct drm_plane *plane, + struct mtk_plane_state *state) +{ + unsigned int local_layer; + struct mtk_ddp_comp *comp; + + comp = mtk_ddp_comp_for_plane(crtc, plane, &local_layer); + if (comp) + return mtk_ddp_comp_layer_check(comp, local_layer, state); + return 0; +} + +void mtk_crtc_plane_disable(struct drm_crtc *crtc, struct drm_plane *plane) +{ +#if IS_REACHABLE(CONFIG_MTK_CMDQ) + struct mtk_crtc *mtk_crtc = to_mtk_crtc(crtc); + struct mtk_plane_state *plane_state = to_mtk_plane_state(plane->state); + int i; + + /* no need to wait for disabling the plane by CPU */ + if (!mtk_crtc->cmdq_client.chan) + return; + + if (!mtk_crtc->enabled) + return; + + /* set pending plane state to disabled */ + for (i = 0; i < mtk_crtc->layer_nr; i++) { + struct drm_plane *mtk_plane = &mtk_crtc->planes[i]; + struct mtk_plane_state *mtk_plane_state = to_mtk_plane_state(mtk_plane->state); + + if (mtk_plane->index == plane->index) { + memcpy(mtk_plane_state, plane_state, sizeof(*plane_state)); + break; + } + } + mtk_crtc_update_config(mtk_crtc, false); + + /* wait for planes to be disabled by CMDQ */ + wait_event_timeout(mtk_crtc->cb_blocking_queue, + mtk_crtc->cmdq_vblank_cnt == 0, + msecs_to_jiffies(500)); +#endif +} + +void mtk_crtc_async_update(struct drm_crtc *crtc, struct drm_plane *plane, + struct drm_atomic_state *state) +{ + struct mtk_crtc *mtk_crtc = to_mtk_crtc(crtc); + + if (!mtk_crtc->enabled) + return; + + mtk_crtc_update_config(mtk_crtc, false); +} + +static void mtk_crtc_atomic_enable(struct drm_crtc *crtc, + struct drm_atomic_state *state) +{ + struct mtk_crtc *mtk_crtc = to_mtk_crtc(crtc); + struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0]; + int ret; + + DRM_DEBUG_DRIVER("%s %d\n", __func__, crtc->base.id); + + ret = mtk_ddp_comp_power_on(comp); + if (ret < 0) { + DRM_DEV_ERROR(comp->dev, "Failed to enable power domain: %d\n", ret); + return; + } + + mtk_crtc_update_output(crtc, state); + + ret = mtk_crtc_ddp_hw_init(mtk_crtc); + if (ret) { + mtk_ddp_comp_power_off(comp); + return; + } + + drm_crtc_vblank_on(crtc); + mtk_crtc->enabled = true; +} + +static void mtk_crtc_atomic_disable(struct drm_crtc *crtc, + struct drm_atomic_state *state) +{ + struct mtk_crtc *mtk_crtc = to_mtk_crtc(crtc); + struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0]; + int i; + + DRM_DEBUG_DRIVER("%s %d\n", __func__, crtc->base.id); + if (!mtk_crtc->enabled) + return; + + /* Set all pending plane state to disabled */ + for (i = 0; i < mtk_crtc->layer_nr; i++) { + struct drm_plane *plane = &mtk_crtc->planes[i]; + struct mtk_plane_state *plane_state; + + plane_state = to_mtk_plane_state(plane->state); + plane_state->pending.enable = false; + plane_state->pending.config = true; + } + mtk_crtc->pending_planes = true; + + mtk_crtc_update_config(mtk_crtc, false); +#if IS_REACHABLE(CONFIG_MTK_CMDQ) + /* Wait for planes to be disabled by cmdq */ + if (mtk_crtc->cmdq_client.chan) + wait_event_timeout(mtk_crtc->cb_blocking_queue, + mtk_crtc->cmdq_vblank_cnt == 0, + msecs_to_jiffies(500)); +#endif + /* Wait for planes to be disabled */ + drm_crtc_wait_one_vblank(crtc); + + drm_crtc_vblank_off(crtc); + mtk_crtc_ddp_hw_fini(mtk_crtc); + mtk_ddp_comp_power_off(comp); + + mtk_crtc->enabled = false; +} + +static void mtk_crtc_atomic_begin(struct drm_crtc *crtc, + struct drm_atomic_state *state) +{ + struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, + crtc); + struct mtk_crtc_state *mtk_crtc_state = to_mtk_crtc_state(crtc_state); + struct mtk_crtc *mtk_crtc = to_mtk_crtc(crtc); + unsigned long flags; + + if (mtk_crtc->event && mtk_crtc_state->base.event) + DRM_ERROR("new event while there is still a pending event\n"); + + if (mtk_crtc_state->base.event) { + mtk_crtc_state->base.event->pipe = drm_crtc_index(crtc); + WARN_ON(drm_crtc_vblank_get(crtc) != 0); + + spin_lock_irqsave(&crtc->dev->event_lock, flags); + mtk_crtc->event = mtk_crtc_state->base.event; + spin_unlock_irqrestore(&crtc->dev->event_lock, flags); + + mtk_crtc_state->base.event = NULL; + } +} + +static void mtk_crtc_atomic_flush(struct drm_crtc *crtc, + struct drm_atomic_state *state) +{ + struct mtk_crtc *mtk_crtc = to_mtk_crtc(crtc); + int i; + + if (crtc->state->color_mgmt_changed) + for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) { + mtk_ddp_gamma_set(mtk_crtc->ddp_comp[i], crtc->state); + mtk_ddp_ctm_set(mtk_crtc->ddp_comp[i], crtc->state); + } + mtk_crtc_update_config(mtk_crtc, !!mtk_crtc->event); +} + +static const struct drm_crtc_funcs mtk_crtc_funcs = { + .set_config = drm_atomic_helper_set_config, + .page_flip = drm_atomic_helper_page_flip, + .destroy = mtk_crtc_destroy, + .reset = mtk_crtc_reset, + .atomic_duplicate_state = mtk_crtc_duplicate_state, + .atomic_destroy_state = mtk_crtc_destroy_state, + .enable_vblank = mtk_crtc_enable_vblank, + .disable_vblank = mtk_crtc_disable_vblank, +}; + +static const struct drm_crtc_helper_funcs mtk_crtc_helper_funcs = { + .mode_fixup = mtk_crtc_mode_fixup, + .mode_set_nofb = mtk_crtc_mode_set_nofb, + .mode_valid = mtk_crtc_mode_valid, + .atomic_begin = mtk_crtc_atomic_begin, + .atomic_flush = mtk_crtc_atomic_flush, + .atomic_enable = mtk_crtc_atomic_enable, + .atomic_disable = mtk_crtc_atomic_disable, +}; + +static int mtk_crtc_init(struct drm_device *drm, struct mtk_crtc *mtk_crtc, + unsigned int pipe) +{ + struct drm_plane *primary = NULL; + struct drm_plane *cursor = NULL; + int i, ret; + + for (i = 0; i < mtk_crtc->layer_nr; i++) { + if (mtk_crtc->planes[i].type == DRM_PLANE_TYPE_PRIMARY) + primary = &mtk_crtc->planes[i]; + else if (mtk_crtc->planes[i].type == DRM_PLANE_TYPE_CURSOR) + cursor = &mtk_crtc->planes[i]; + } + + ret = drm_crtc_init_with_planes(drm, &mtk_crtc->base, primary, cursor, + &mtk_crtc_funcs, NULL); + if (ret) + goto err_cleanup_crtc; + + drm_crtc_helper_add(&mtk_crtc->base, &mtk_crtc_helper_funcs); + + return 0; + +err_cleanup_crtc: + drm_crtc_cleanup(&mtk_crtc->base); + return ret; +} + +static int mtk_crtc_num_comp_planes(struct mtk_crtc *mtk_crtc, int comp_idx) +{ + struct mtk_ddp_comp *comp; + + if (comp_idx > 1) + return 0; + + comp = mtk_crtc->ddp_comp[comp_idx]; + if (!comp->funcs) + return 0; + + if (comp_idx == 1 && !comp->funcs->bgclr_in_on) + return 0; + + return mtk_ddp_comp_layer_nr(comp); +} + +static inline +enum drm_plane_type mtk_crtc_plane_type(unsigned int plane_idx, + unsigned int num_planes) +{ + if (plane_idx == 0) + return DRM_PLANE_TYPE_PRIMARY; + else if (plane_idx == (num_planes - 1)) + return DRM_PLANE_TYPE_CURSOR; + else + return DRM_PLANE_TYPE_OVERLAY; + +} + +static int mtk_crtc_init_comp_planes(struct drm_device *drm_dev, + struct mtk_crtc *mtk_crtc, + int comp_idx, int pipe) +{ + int num_planes = mtk_crtc_num_comp_planes(mtk_crtc, comp_idx); + struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[comp_idx]; + int i, ret; + + for (i = 0; i < num_planes; i++) { + ret = mtk_plane_init(drm_dev, + &mtk_crtc->planes[mtk_crtc->layer_nr], + BIT(pipe), + mtk_crtc_plane_type(mtk_crtc->layer_nr, num_planes), + mtk_ddp_comp_supported_rotations(comp), + mtk_ddp_comp_get_blend_modes(comp), + mtk_ddp_comp_get_formats(comp), + mtk_ddp_comp_get_num_formats(comp), + mtk_ddp_comp_is_afbc_supported(comp), i); + if (ret) + return ret; + + mtk_crtc->layer_nr++; + } + return 0; +} + +struct device *mtk_crtc_dma_dev_get(struct drm_crtc *crtc) +{ + struct mtk_crtc *mtk_crtc = NULL; + + if (!crtc) + return NULL; + + mtk_crtc = to_mtk_crtc(crtc); + if (!mtk_crtc) + return NULL; + + return mtk_crtc->dma_dev; +} + +int mtk_crtc_create(struct drm_device *drm_dev, const unsigned int *path, + unsigned int path_len, int priv_data_index, + const struct mtk_drm_route *conn_routes, + unsigned int num_conn_routes) +{ + struct mtk_drm_private *priv = drm_dev->dev_private; + struct device *dev = drm_dev->dev; + struct mtk_crtc *mtk_crtc; + unsigned int num_comp_planes = 0; + int ret; + int i; + bool has_ctm = false; + uint gamma_lut_size = 0; + struct drm_crtc *tmp; + int crtc_i = 0; + + if (!path) + return 0; + + priv = priv->all_drm_private[priv_data_index]; + + drm_for_each_crtc(tmp, drm_dev) + crtc_i++; + + for (i = 0; i < path_len; i++) { + enum mtk_ddp_comp_id comp_id = path[i]; + struct device_node *node; + struct mtk_ddp_comp *comp; + + node = priv->comp_node[comp_id]; + comp = &priv->ddp_comp[comp_id]; + + /* Not all drm components have a DTS device node, such as ovl_adaptor, + * which is the drm bring up sub driver + */ + if (!node && comp_id != DDP_COMPONENT_DRM_OVL_ADAPTOR) { + dev_info(dev, + "Not creating crtc %d because component %d is disabled or missing\n", + crtc_i, comp_id); + return 0; + } + + if (!comp->dev) { + dev_err(dev, "Component %pOF not initialized\n", node); + return -ENODEV; + } + } + + mtk_crtc = devm_kzalloc(dev, sizeof(*mtk_crtc), GFP_KERNEL); + if (!mtk_crtc) + return -ENOMEM; + + mtk_crtc->mmsys_dev = priv->mmsys_dev; + mtk_crtc->ddp_comp_nr = path_len; + mtk_crtc->ddp_comp = devm_kcalloc(dev, + mtk_crtc->ddp_comp_nr + (conn_routes ? 1 : 0), + sizeof(*mtk_crtc->ddp_comp), + GFP_KERNEL); + if (!mtk_crtc->ddp_comp) + return -ENOMEM; + + mtk_crtc->mutex = mtk_mutex_get(priv->mutex_dev); + if (IS_ERR(mtk_crtc->mutex)) { + ret = PTR_ERR(mtk_crtc->mutex); + dev_err(dev, "Failed to get mutex: %d\n", ret); + return ret; + } + + for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) { + unsigned int comp_id = path[i]; + struct mtk_ddp_comp *comp; + + comp = &priv->ddp_comp[comp_id]; + mtk_crtc->ddp_comp[i] = comp; + + if (comp->funcs) { + if (comp->funcs->gamma_set && comp->funcs->gamma_get_lut_size) { + unsigned int lut_sz = mtk_ddp_gamma_get_lut_size(comp); + + if (lut_sz) + gamma_lut_size = lut_sz; + } + + if (comp->funcs->ctm_set) + has_ctm = true; + } + + mtk_ddp_comp_register_vblank_cb(comp, mtk_crtc_ddp_irq, + &mtk_crtc->base); + } + + for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) + num_comp_planes += mtk_crtc_num_comp_planes(mtk_crtc, i); + + mtk_crtc->planes = devm_kcalloc(dev, num_comp_planes, + sizeof(struct drm_plane), GFP_KERNEL); + if (!mtk_crtc->planes) + return -ENOMEM; + + for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) { + ret = mtk_crtc_init_comp_planes(drm_dev, mtk_crtc, i, crtc_i); + if (ret) + return ret; + } + + /* + * Default to use the first component as the dma dev. + * In the case of ovl_adaptor sub driver, it needs to use the + * dma_dev_get function to get representative dma dev. + */ + mtk_crtc->dma_dev = mtk_ddp_comp_dma_dev_get(&priv->ddp_comp[path[0]]); + + ret = mtk_crtc_init(drm_dev, mtk_crtc, crtc_i); + if (ret < 0) + return ret; + + if (gamma_lut_size) + drm_mode_crtc_set_gamma_size(&mtk_crtc->base, gamma_lut_size); + drm_crtc_enable_color_mgmt(&mtk_crtc->base, 0, has_ctm, gamma_lut_size); + mutex_init(&mtk_crtc->hw_lock); + spin_lock_init(&mtk_crtc->config_lock); + +#if IS_REACHABLE(CONFIG_MTK_CMDQ) + i = priv->mbox_index++; + mtk_crtc->cmdq_client.client.dev = mtk_crtc->mmsys_dev; + mtk_crtc->cmdq_client.client.tx_block = false; + mtk_crtc->cmdq_client.client.knows_txdone = true; + mtk_crtc->cmdq_client.client.rx_callback = ddp_cmdq_cb; + mtk_crtc->cmdq_client.chan = + mbox_request_channel(&mtk_crtc->cmdq_client.client, i); + if (IS_ERR(mtk_crtc->cmdq_client.chan)) { + dev_dbg(dev, "mtk_crtc %d failed to create mailbox client, writing register by CPU now\n", + drm_crtc_index(&mtk_crtc->base)); + mtk_crtc->cmdq_client.chan = NULL; + } + + if (mtk_crtc->cmdq_client.chan) { + ret = of_property_read_u32_index(priv->mutex_node, + "mediatek,gce-events", + i, + &mtk_crtc->cmdq_event); + if (ret) { + dev_dbg(dev, "mtk_crtc %d failed to get mediatek,gce-events property\n", + drm_crtc_index(&mtk_crtc->base)); + mbox_free_channel(mtk_crtc->cmdq_client.chan); + mtk_crtc->cmdq_client.chan = NULL; + } else { + ret = cmdq_pkt_create(&mtk_crtc->cmdq_client, + &mtk_crtc->cmdq_handle, + PAGE_SIZE); + if (ret) { + dev_dbg(dev, "mtk_crtc %d failed to create cmdq packet\n", + drm_crtc_index(&mtk_crtc->base)); + mbox_free_channel(mtk_crtc->cmdq_client.chan); + mtk_crtc->cmdq_client.chan = NULL; + } + } + + /* for sending blocking cmd in crtc disable */ + init_waitqueue_head(&mtk_crtc->cb_blocking_queue); + } +#endif + + if (conn_routes) { + for (i = 0; i < num_conn_routes; i++) { + unsigned int comp_id = conn_routes[i].route_ddp; + struct device_node *node = priv->comp_node[comp_id]; + struct mtk_ddp_comp *comp = &priv->ddp_comp[comp_id]; + + if (!comp->dev) { + dev_dbg(dev, "comp_id:%d, Component %pOF not initialized\n", + comp_id, node); + /* mark encoder_index to -1, if route comp device is not enabled */ + comp->encoder_index = -1; + continue; + } + + mtk_ddp_comp_encoder_index_set(&priv->ddp_comp[comp_id]); + } + + mtk_crtc->num_conn_routes = num_conn_routes; + mtk_crtc->conn_routes = conn_routes; + + /* increase ddp_comp_nr at the end of mtk_crtc_create */ + mtk_crtc->ddp_comp_nr++; + } + + return 0; +} diff --git a/drivers/gpu/drm/mediatek/mtk_crtc.h b/drivers/gpu/drm/mediatek/mtk_crtc.h new file mode 100644 index 000000000000..828f109b83e7 --- /dev/null +++ b/drivers/gpu/drm/mediatek/mtk_crtc.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2015 MediaTek Inc. + */ + +#ifndef MTK_CRTC_H +#define MTK_CRTC_H + +#include <drm/drm_crtc.h> +#include "mtk_ddp_comp.h" +#include "mtk_drm_drv.h" +#include "mtk_plane.h" + +#define MTK_MAX_BPC 10 +#define MTK_MIN_BPC 3 + +void mtk_crtc_commit(struct drm_crtc *crtc); +int mtk_crtc_create(struct drm_device *drm_dev, const unsigned int *path, + unsigned int path_len, int priv_data_index, + const struct mtk_drm_route *conn_routes, + unsigned int num_conn_routes); +int mtk_crtc_plane_check(struct drm_crtc *crtc, struct drm_plane *plane, + struct mtk_plane_state *state); +void mtk_crtc_plane_disable(struct drm_crtc *crtc, struct drm_plane *plane); +void mtk_crtc_async_update(struct drm_crtc *crtc, struct drm_plane *plane, + struct drm_atomic_state *plane_state); +struct device *mtk_crtc_dma_dev_get(struct drm_crtc *crtc); + +#endif /* MTK_CRTC_H */ diff --git a/drivers/gpu/drm/mediatek/mtk_ddp_comp.c b/drivers/gpu/drm/mediatek/mtk_ddp_comp.c new file mode 100644 index 000000000000..9672ea1f91a2 --- /dev/null +++ b/drivers/gpu/drm/mediatek/mtk_ddp_comp.c @@ -0,0 +1,710 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2015 MediaTek Inc. + * Authors: + * YT Shen <yt.shen@mediatek.com> + * CK Hu <ck.hu@mediatek.com> + */ + +#include <linux/clk.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/of_platform.h> +#include <linux/platform_device.h> +#include <linux/soc/mediatek/mtk-cmdq.h> +#include <drm/drm_print.h> + +#include "mtk_crtc.h" +#include "mtk_ddp_comp.h" +#include "mtk_disp_drv.h" +#include "mtk_drm_drv.h" +#include "mtk_plane.h" + + +#define DISP_REG_DITHER_EN 0x0000 +#define DITHER_EN BIT(0) +#define DISP_REG_DITHER_CFG 0x0020 +#define DITHER_RELAY_MODE BIT(0) +#define DITHER_ENGINE_EN BIT(1) +#define DISP_DITHERING BIT(2) +#define DISP_REG_DITHER_SIZE 0x0030 +#define DISP_REG_DITHER_5 0x0114 +#define DISP_REG_DITHER_7 0x011c +#define DISP_REG_DITHER_15 0x013c +#define DITHER_LSB_ERR_SHIFT_R(x) (((x) & 0x7) << 28) +#define DITHER_ADD_LSHIFT_R(x) (((x) & 0x7) << 20) +#define DITHER_NEW_BIT_MODE BIT(0) +#define DISP_REG_DITHER_16 0x0140 +#define DITHER_LSB_ERR_SHIFT_B(x) (((x) & 0x7) << 28) +#define DITHER_ADD_LSHIFT_B(x) (((x) & 0x7) << 20) +#define DITHER_LSB_ERR_SHIFT_G(x) (((x) & 0x7) << 12) +#define DITHER_ADD_LSHIFT_G(x) (((x) & 0x7) << 4) + +#define DISP_REG_DSC_CON 0x0000 +#define DSC_EN BIT(0) +#define DSC_DUAL_INOUT BIT(2) +#define DSC_BYPASS BIT(4) +#define DSC_UFOE_SEL BIT(16) + +#define DISP_REG_OD_EN 0x0000 +#define DISP_REG_OD_CFG 0x0020 +#define OD_RELAYMODE BIT(0) +#define DISP_REG_OD_SIZE 0x0030 + +#define DISP_REG_POSTMASK_EN 0x0000 +#define POSTMASK_EN BIT(0) +#define DISP_REG_POSTMASK_CFG 0x0020 +#define POSTMASK_RELAY_MODE BIT(0) +#define DISP_REG_POSTMASK_SIZE 0x0030 + +#define DISP_REG_UFO_START 0x0000 +#define UFO_BYPASS BIT(2) + +struct mtk_ddp_comp_dev { + struct clk *clk; + void __iomem *regs; + struct cmdq_client_reg cmdq_reg; +}; + +void mtk_ddp_write(struct cmdq_pkt *cmdq_pkt, unsigned int value, + struct cmdq_client_reg *cmdq_reg, void __iomem *regs, + unsigned int offset) +{ +#if IS_REACHABLE(CONFIG_MTK_CMDQ) + if (cmdq_pkt) + cmdq_pkt_write(cmdq_pkt, cmdq_reg->subsys, + cmdq_reg->offset + offset, value); + else +#endif + writel(value, regs + offset); +} + +void mtk_ddp_write_relaxed(struct cmdq_pkt *cmdq_pkt, unsigned int value, + struct cmdq_client_reg *cmdq_reg, void __iomem *regs, + unsigned int offset) +{ +#if IS_REACHABLE(CONFIG_MTK_CMDQ) + if (cmdq_pkt) + cmdq_pkt_write(cmdq_pkt, cmdq_reg->subsys, + cmdq_reg->offset + offset, value); + else +#endif + writel_relaxed(value, regs + offset); +} + +void mtk_ddp_write_mask(struct cmdq_pkt *cmdq_pkt, unsigned int value, + struct cmdq_client_reg *cmdq_reg, void __iomem *regs, + unsigned int offset, unsigned int mask) +{ +#if IS_REACHABLE(CONFIG_MTK_CMDQ) + if (cmdq_pkt) { + cmdq_pkt_write_mask(cmdq_pkt, cmdq_reg->subsys, + cmdq_reg->offset + offset, value, mask); + } else { +#endif + u32 tmp = readl(regs + offset); + + tmp = (tmp & ~mask) | (value & mask); + writel(tmp, regs + offset); +#if IS_REACHABLE(CONFIG_MTK_CMDQ) + } +#endif +} + +static int mtk_ddp_clk_enable(struct device *dev) +{ + struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev); + + return clk_prepare_enable(priv->clk); +} + +static void mtk_ddp_clk_disable(struct device *dev) +{ + struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev); + + clk_disable_unprepare(priv->clk); +} + +void mtk_dither_set_common(void __iomem *regs, struct cmdq_client_reg *cmdq_reg, + unsigned int bpc, unsigned int cfg, + unsigned int dither_en, struct cmdq_pkt *cmdq_pkt) +{ + /* If bpc equal to 0, the dithering function didn't be enabled */ + if (bpc == 0) + return; + + if (bpc >= MTK_MIN_BPC) { + mtk_ddp_write(cmdq_pkt, 0, cmdq_reg, regs, DISP_REG_DITHER_5); + mtk_ddp_write(cmdq_pkt, 0, cmdq_reg, regs, DISP_REG_DITHER_7); + mtk_ddp_write(cmdq_pkt, + DITHER_LSB_ERR_SHIFT_R(MTK_MAX_BPC - bpc) | + DITHER_ADD_LSHIFT_R(MTK_MAX_BPC - bpc) | + DITHER_NEW_BIT_MODE, + cmdq_reg, regs, DISP_REG_DITHER_15); + mtk_ddp_write(cmdq_pkt, + DITHER_LSB_ERR_SHIFT_B(MTK_MAX_BPC - bpc) | + DITHER_ADD_LSHIFT_B(MTK_MAX_BPC - bpc) | + DITHER_LSB_ERR_SHIFT_G(MTK_MAX_BPC - bpc) | + DITHER_ADD_LSHIFT_G(MTK_MAX_BPC - bpc), + cmdq_reg, regs, DISP_REG_DITHER_16); + mtk_ddp_write(cmdq_pkt, dither_en, cmdq_reg, regs, cfg); + } +} + +static void mtk_dither_config(struct device *dev, unsigned int w, + unsigned int h, unsigned int vrefresh, + unsigned int bpc, struct cmdq_pkt *cmdq_pkt) +{ + struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev); + + mtk_ddp_write(cmdq_pkt, w << 16 | h, &priv->cmdq_reg, priv->regs, DISP_REG_DITHER_SIZE); + mtk_ddp_write(cmdq_pkt, DITHER_RELAY_MODE, &priv->cmdq_reg, priv->regs, + DISP_REG_DITHER_CFG); + mtk_dither_set_common(priv->regs, &priv->cmdq_reg, bpc, DISP_REG_DITHER_CFG, + DITHER_ENGINE_EN, cmdq_pkt); +} + +static void mtk_dither_start(struct device *dev) +{ + struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev); + + writel(DITHER_EN, priv->regs + DISP_REG_DITHER_EN); +} + +static void mtk_dither_stop(struct device *dev) +{ + struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev); + + writel_relaxed(0x0, priv->regs + DISP_REG_DITHER_EN); +} + +static void mtk_dither_set(struct device *dev, unsigned int bpc, + unsigned int cfg, struct cmdq_pkt *cmdq_pkt) +{ + struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev); + + mtk_dither_set_common(priv->regs, &priv->cmdq_reg, bpc, cfg, + DISP_DITHERING, cmdq_pkt); +} + +static void mtk_dsc_config(struct device *dev, unsigned int w, + unsigned int h, unsigned int vrefresh, + unsigned int bpc, struct cmdq_pkt *cmdq_pkt) +{ + struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev); + + /* dsc bypass mode */ + mtk_ddp_write_mask(cmdq_pkt, DSC_BYPASS, &priv->cmdq_reg, priv->regs, + DISP_REG_DSC_CON, DSC_BYPASS); + mtk_ddp_write_mask(cmdq_pkt, DSC_UFOE_SEL, &priv->cmdq_reg, priv->regs, + DISP_REG_DSC_CON, DSC_UFOE_SEL); + mtk_ddp_write_mask(cmdq_pkt, DSC_DUAL_INOUT, &priv->cmdq_reg, priv->regs, + DISP_REG_DSC_CON, DSC_DUAL_INOUT); +} + +static void mtk_dsc_start(struct device *dev) +{ + struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev); + + /* write with mask to reserve the value set in mtk_dsc_config */ + mtk_ddp_write_mask(NULL, DSC_EN, &priv->cmdq_reg, priv->regs, DISP_REG_DSC_CON, DSC_EN); +} + +static void mtk_dsc_stop(struct device *dev) +{ + struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev); + + writel_relaxed(0x0, priv->regs + DISP_REG_DSC_CON); +} + +static void mtk_od_config(struct device *dev, unsigned int w, + unsigned int h, unsigned int vrefresh, + unsigned int bpc, struct cmdq_pkt *cmdq_pkt) +{ + struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev); + + mtk_ddp_write(cmdq_pkt, w << 16 | h, &priv->cmdq_reg, priv->regs, DISP_REG_OD_SIZE); + mtk_ddp_write(cmdq_pkt, OD_RELAYMODE, &priv->cmdq_reg, priv->regs, DISP_REG_OD_CFG); + mtk_dither_set(dev, bpc, DISP_REG_OD_CFG, cmdq_pkt); +} + +static void mtk_od_start(struct device *dev) +{ + struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev); + + writel(1, priv->regs + DISP_REG_OD_EN); +} + +static void mtk_postmask_config(struct device *dev, unsigned int w, + unsigned int h, unsigned int vrefresh, + unsigned int bpc, struct cmdq_pkt *cmdq_pkt) +{ + struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev); + + mtk_ddp_write(cmdq_pkt, w << 16 | h, &priv->cmdq_reg, priv->regs, + DISP_REG_POSTMASK_SIZE); + mtk_ddp_write(cmdq_pkt, POSTMASK_RELAY_MODE, &priv->cmdq_reg, + priv->regs, DISP_REG_POSTMASK_CFG); +} + +static void mtk_postmask_start(struct device *dev) +{ + struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev); + + writel(POSTMASK_EN, priv->regs + DISP_REG_POSTMASK_EN); +} + +static void mtk_postmask_stop(struct device *dev) +{ + struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev); + + writel_relaxed(0x0, priv->regs + DISP_REG_POSTMASK_EN); +} + +static void mtk_ufoe_start(struct device *dev) +{ + struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev); + + writel(UFO_BYPASS, priv->regs + DISP_REG_UFO_START); +} + +static const struct mtk_ddp_comp_funcs ddp_aal = { + .clk_enable = mtk_aal_clk_enable, + .clk_disable = mtk_aal_clk_disable, + .gamma_get_lut_size = mtk_aal_gamma_get_lut_size, + .gamma_set = mtk_aal_gamma_set, + .config = mtk_aal_config, + .start = mtk_aal_start, + .stop = mtk_aal_stop, +}; + +static const struct mtk_ddp_comp_funcs ddp_ccorr = { + .clk_enable = mtk_ccorr_clk_enable, + .clk_disable = mtk_ccorr_clk_disable, + .config = mtk_ccorr_config, + .start = mtk_ccorr_start, + .stop = mtk_ccorr_stop, + .ctm_set = mtk_ccorr_ctm_set, +}; + +static const struct mtk_ddp_comp_funcs ddp_color = { + .clk_enable = mtk_color_clk_enable, + .clk_disable = mtk_color_clk_disable, + .config = mtk_color_config, + .start = mtk_color_start, +}; + +static const struct mtk_ddp_comp_funcs ddp_dither = { + .clk_enable = mtk_ddp_clk_enable, + .clk_disable = mtk_ddp_clk_disable, + .config = mtk_dither_config, + .start = mtk_dither_start, + .stop = mtk_dither_stop, +}; + +static const struct mtk_ddp_comp_funcs ddp_dpi = { + .start = mtk_dpi_start, + .stop = mtk_dpi_stop, + .encoder_index = mtk_dpi_encoder_index, +}; + +static const struct mtk_ddp_comp_funcs ddp_dsc = { + .clk_enable = mtk_ddp_clk_enable, + .clk_disable = mtk_ddp_clk_disable, + .config = mtk_dsc_config, + .start = mtk_dsc_start, + .stop = mtk_dsc_stop, +}; + +static const struct mtk_ddp_comp_funcs ddp_dsi = { + .start = mtk_dsi_ddp_start, + .stop = mtk_dsi_ddp_stop, + .encoder_index = mtk_dsi_encoder_index, +}; + +static const struct mtk_ddp_comp_funcs ddp_gamma = { + .clk_enable = mtk_gamma_clk_enable, + .clk_disable = mtk_gamma_clk_disable, + .gamma_get_lut_size = mtk_gamma_get_lut_size, + .gamma_set = mtk_gamma_set, + .config = mtk_gamma_config, + .start = mtk_gamma_start, + .stop = mtk_gamma_stop, +}; + +static const struct mtk_ddp_comp_funcs ddp_merge = { + .clk_enable = mtk_merge_clk_enable, + .clk_disable = mtk_merge_clk_disable, + .start = mtk_merge_start, + .stop = mtk_merge_stop, + .config = mtk_merge_config, +}; + +static const struct mtk_ddp_comp_funcs ddp_od = { + .clk_enable = mtk_ddp_clk_enable, + .clk_disable = mtk_ddp_clk_disable, + .config = mtk_od_config, + .start = mtk_od_start, +}; + +static const struct mtk_ddp_comp_funcs ddp_ovl = { + .clk_enable = mtk_ovl_clk_enable, + .clk_disable = mtk_ovl_clk_disable, + .config = mtk_ovl_config, + .start = mtk_ovl_start, + .stop = mtk_ovl_stop, + .register_vblank_cb = mtk_ovl_register_vblank_cb, + .unregister_vblank_cb = mtk_ovl_unregister_vblank_cb, + .enable_vblank = mtk_ovl_enable_vblank, + .disable_vblank = mtk_ovl_disable_vblank, + .supported_rotations = mtk_ovl_supported_rotations, + .layer_nr = mtk_ovl_layer_nr, + .layer_check = mtk_ovl_layer_check, + .layer_config = mtk_ovl_layer_config, + .bgclr_in_on = mtk_ovl_bgclr_in_on, + .bgclr_in_off = mtk_ovl_bgclr_in_off, + .get_blend_modes = mtk_ovl_get_blend_modes, + .get_formats = mtk_ovl_get_formats, + .get_num_formats = mtk_ovl_get_num_formats, + .is_afbc_supported = mtk_ovl_is_afbc_supported, +}; + +static const struct mtk_ddp_comp_funcs ddp_postmask = { + .clk_enable = mtk_ddp_clk_enable, + .clk_disable = mtk_ddp_clk_disable, + .config = mtk_postmask_config, + .start = mtk_postmask_start, + .stop = mtk_postmask_stop, +}; + +static const struct mtk_ddp_comp_funcs ddp_rdma = { + .clk_enable = mtk_rdma_clk_enable, + .clk_disable = mtk_rdma_clk_disable, + .config = mtk_rdma_config, + .start = mtk_rdma_start, + .stop = mtk_rdma_stop, + .register_vblank_cb = mtk_rdma_register_vblank_cb, + .unregister_vblank_cb = mtk_rdma_unregister_vblank_cb, + .enable_vblank = mtk_rdma_enable_vblank, + .disable_vblank = mtk_rdma_disable_vblank, + .layer_nr = mtk_rdma_layer_nr, + .layer_config = mtk_rdma_layer_config, + .get_formats = mtk_rdma_get_formats, + .get_num_formats = mtk_rdma_get_num_formats, +}; + +static const struct mtk_ddp_comp_funcs ddp_ufoe = { + .clk_enable = mtk_ddp_clk_enable, + .clk_disable = mtk_ddp_clk_disable, + .start = mtk_ufoe_start, +}; + +static const struct mtk_ddp_comp_funcs ddp_ovl_adaptor = { + .power_on = mtk_ovl_adaptor_power_on, + .power_off = mtk_ovl_adaptor_power_off, + .clk_enable = mtk_ovl_adaptor_clk_enable, + .clk_disable = mtk_ovl_adaptor_clk_disable, + .config = mtk_ovl_adaptor_config, + .start = mtk_ovl_adaptor_start, + .stop = mtk_ovl_adaptor_stop, + .layer_nr = mtk_ovl_adaptor_layer_nr, + .layer_config = mtk_ovl_adaptor_layer_config, + .register_vblank_cb = mtk_ovl_adaptor_register_vblank_cb, + .unregister_vblank_cb = mtk_ovl_adaptor_unregister_vblank_cb, + .enable_vblank = mtk_ovl_adaptor_enable_vblank, + .disable_vblank = mtk_ovl_adaptor_disable_vblank, + .dma_dev_get = mtk_ovl_adaptor_dma_dev_get, + .connect = mtk_ovl_adaptor_connect, + .disconnect = mtk_ovl_adaptor_disconnect, + .add = mtk_ovl_adaptor_add_comp, + .remove = mtk_ovl_adaptor_remove_comp, + .get_blend_modes = mtk_ovl_adaptor_get_blend_modes, + .get_formats = mtk_ovl_adaptor_get_formats, + .get_num_formats = mtk_ovl_adaptor_get_num_formats, + .mode_valid = mtk_ovl_adaptor_mode_valid, +}; + +static const char * const mtk_ddp_comp_stem[MTK_DDP_COMP_TYPE_MAX] = { + [MTK_DISP_AAL] = "aal", + [MTK_DISP_BLS] = "bls", + [MTK_DISP_CCORR] = "ccorr", + [MTK_DISP_COLOR] = "color", + [MTK_DISP_DITHER] = "dither", + [MTK_DISP_DSC] = "dsc", + [MTK_DISP_GAMMA] = "gamma", + [MTK_DISP_MERGE] = "merge", + [MTK_DISP_MUTEX] = "mutex", + [MTK_DISP_OD] = "od", + [MTK_DISP_OVL] = "ovl", + [MTK_DISP_OVL_2L] = "ovl-2l", + [MTK_DISP_OVL_ADAPTOR] = "ovl_adaptor", + [MTK_DISP_POSTMASK] = "postmask", + [MTK_DISP_PWM] = "pwm", + [MTK_DISP_RDMA] = "rdma", + [MTK_DISP_UFOE] = "ufoe", + [MTK_DISP_WDMA] = "wdma", + [MTK_DP_INTF] = "dp-intf", + [MTK_DPI] = "dpi", + [MTK_DSI] = "dsi", +}; + +struct mtk_ddp_comp_match { + enum mtk_ddp_comp_type type; + int alias_id; + const struct mtk_ddp_comp_funcs *funcs; +}; + +static const struct mtk_ddp_comp_match mtk_ddp_matches[DDP_COMPONENT_DRM_ID_MAX] = { + [DDP_COMPONENT_AAL0] = { MTK_DISP_AAL, 0, &ddp_aal }, + [DDP_COMPONENT_AAL1] = { MTK_DISP_AAL, 1, &ddp_aal }, + [DDP_COMPONENT_BLS] = { MTK_DISP_BLS, 0, NULL }, + [DDP_COMPONENT_CCORR] = { MTK_DISP_CCORR, 0, &ddp_ccorr }, + [DDP_COMPONENT_COLOR0] = { MTK_DISP_COLOR, 0, &ddp_color }, + [DDP_COMPONENT_COLOR1] = { MTK_DISP_COLOR, 1, &ddp_color }, + [DDP_COMPONENT_DITHER0] = { MTK_DISP_DITHER, 0, &ddp_dither }, + [DDP_COMPONENT_DP_INTF0] = { MTK_DP_INTF, 0, &ddp_dpi }, + [DDP_COMPONENT_DP_INTF1] = { MTK_DP_INTF, 1, &ddp_dpi }, + [DDP_COMPONENT_DPI0] = { MTK_DPI, 0, &ddp_dpi }, + [DDP_COMPONENT_DPI1] = { MTK_DPI, 1, &ddp_dpi }, + [DDP_COMPONENT_DRM_OVL_ADAPTOR] = { MTK_DISP_OVL_ADAPTOR, 0, &ddp_ovl_adaptor }, + [DDP_COMPONENT_DSC0] = { MTK_DISP_DSC, 0, &ddp_dsc }, + [DDP_COMPONENT_DSC1] = { MTK_DISP_DSC, 1, &ddp_dsc }, + [DDP_COMPONENT_DSI0] = { MTK_DSI, 0, &ddp_dsi }, + [DDP_COMPONENT_DSI1] = { MTK_DSI, 1, &ddp_dsi }, + [DDP_COMPONENT_DSI2] = { MTK_DSI, 2, &ddp_dsi }, + [DDP_COMPONENT_DSI3] = { MTK_DSI, 3, &ddp_dsi }, + [DDP_COMPONENT_GAMMA] = { MTK_DISP_GAMMA, 0, &ddp_gamma }, + [DDP_COMPONENT_MERGE0] = { MTK_DISP_MERGE, 0, &ddp_merge }, + [DDP_COMPONENT_MERGE1] = { MTK_DISP_MERGE, 1, &ddp_merge }, + [DDP_COMPONENT_MERGE2] = { MTK_DISP_MERGE, 2, &ddp_merge }, + [DDP_COMPONENT_MERGE3] = { MTK_DISP_MERGE, 3, &ddp_merge }, + [DDP_COMPONENT_MERGE4] = { MTK_DISP_MERGE, 4, &ddp_merge }, + [DDP_COMPONENT_MERGE5] = { MTK_DISP_MERGE, 5, &ddp_merge }, + [DDP_COMPONENT_OD0] = { MTK_DISP_OD, 0, &ddp_od }, + [DDP_COMPONENT_OD1] = { MTK_DISP_OD, 1, &ddp_od }, + [DDP_COMPONENT_OVL0] = { MTK_DISP_OVL, 0, &ddp_ovl }, + [DDP_COMPONENT_OVL1] = { MTK_DISP_OVL, 1, &ddp_ovl }, + [DDP_COMPONENT_OVL_2L0] = { MTK_DISP_OVL_2L, 0, &ddp_ovl }, + [DDP_COMPONENT_OVL_2L1] = { MTK_DISP_OVL_2L, 1, &ddp_ovl }, + [DDP_COMPONENT_OVL_2L2] = { MTK_DISP_OVL_2L, 2, &ddp_ovl }, + [DDP_COMPONENT_POSTMASK0] = { MTK_DISP_POSTMASK, 0, &ddp_postmask }, + [DDP_COMPONENT_PWM0] = { MTK_DISP_PWM, 0, NULL }, + [DDP_COMPONENT_PWM1] = { MTK_DISP_PWM, 1, NULL }, + [DDP_COMPONENT_PWM2] = { MTK_DISP_PWM, 2, NULL }, + [DDP_COMPONENT_RDMA0] = { MTK_DISP_RDMA, 0, &ddp_rdma }, + [DDP_COMPONENT_RDMA1] = { MTK_DISP_RDMA, 1, &ddp_rdma }, + [DDP_COMPONENT_RDMA2] = { MTK_DISP_RDMA, 2, &ddp_rdma }, + [DDP_COMPONENT_RDMA4] = { MTK_DISP_RDMA, 4, &ddp_rdma }, + [DDP_COMPONENT_UFOE] = { MTK_DISP_UFOE, 0, &ddp_ufoe }, + [DDP_COMPONENT_WDMA0] = { MTK_DISP_WDMA, 0, NULL }, + [DDP_COMPONENT_WDMA1] = { MTK_DISP_WDMA, 1, NULL }, +}; + +static bool mtk_ddp_comp_find(struct device *dev, + const unsigned int *path, + unsigned int path_len, + struct mtk_ddp_comp *ddp_comp) +{ + unsigned int i; + + if (path == NULL) + return false; + + for (i = 0U; i < path_len; i++) + if (dev == ddp_comp[path[i]].dev) + return true; + + return false; +} + +static int mtk_ddp_comp_find_in_route(struct device *dev, + const struct mtk_drm_route *routes, + unsigned int num_routes, + struct mtk_ddp_comp *ddp_comp) +{ + unsigned int i; + + if (!routes) + return -EINVAL; + + for (i = 0; i < num_routes; i++) + if (dev == ddp_comp[routes[i].route_ddp].dev) + return BIT(routes[i].crtc_id); + + return -ENODEV; +} + +static bool mtk_ddp_path_available(const unsigned int *path, + unsigned int path_len, + struct device_node **comp_node) +{ + unsigned int i; + + if (!path || !path_len) + return false; + + for (i = 0U; i < path_len; i++) { + /* OVL_ADAPTOR doesn't have a device node */ + if (path[i] == DDP_COMPONENT_DRM_OVL_ADAPTOR) + continue; + + if (!comp_node[path[i]]) + return false; + } + + return true; +} + +int mtk_ddp_comp_get_id(struct device_node *node, + enum mtk_ddp_comp_type comp_type) +{ + int id = of_alias_get_id(node, mtk_ddp_comp_stem[comp_type]); + int i; + + for (i = 0; i < ARRAY_SIZE(mtk_ddp_matches); i++) { + if (comp_type == mtk_ddp_matches[i].type && + (id < 0 || id == mtk_ddp_matches[i].alias_id)) + return i; + } + + return -EINVAL; +} + +int mtk_find_possible_crtcs(struct drm_device *drm, struct device *dev) +{ + struct mtk_drm_private *private = drm->dev_private; + const struct mtk_mmsys_driver_data *data; + struct mtk_drm_private *priv_n; + int i = 0, j; + int ret; + + for (j = 0; j < private->data->mmsys_dev_num; j++) { + priv_n = private->all_drm_private[j]; + data = priv_n->data; + + if (mtk_ddp_path_available(data->main_path, data->main_len, + priv_n->comp_node)) { + if (mtk_ddp_comp_find(dev, data->main_path, + data->main_len, + priv_n->ddp_comp)) + return BIT(i); + i++; + } + + if (mtk_ddp_path_available(data->ext_path, data->ext_len, + priv_n->comp_node)) { + if (mtk_ddp_comp_find(dev, data->ext_path, + data->ext_len, + priv_n->ddp_comp)) + return BIT(i); + i++; + } + + if (mtk_ddp_path_available(data->third_path, data->third_len, + priv_n->comp_node)) { + if (mtk_ddp_comp_find(dev, data->third_path, + data->third_len, + priv_n->ddp_comp)) + return BIT(i); + i++; + } + } + + ret = mtk_ddp_comp_find_in_route(dev, + private->data->conn_routes, + private->data->num_conn_routes, + private->ddp_comp); + + if (ret < 0) + DRM_INFO("Failed to find comp in ddp table, ret = %d\n", ret); + + return ret; +} + +static void mtk_ddp_comp_put_device(void *_dev) +{ + struct device *dev = _dev; + + put_device(dev); +} + +static void mtk_ddp_comp_clk_put(void *_clk) +{ + struct clk *clk = _clk; + + clk_put(clk); +} + +int mtk_ddp_comp_init(struct device *dev, struct device_node *node, struct mtk_ddp_comp *comp, + unsigned int comp_id) +{ + struct platform_device *comp_pdev; + enum mtk_ddp_comp_type type; + struct mtk_ddp_comp_dev *priv; + int ret; + + if (comp_id >= DDP_COMPONENT_DRM_ID_MAX) + return -EINVAL; + + type = mtk_ddp_matches[comp_id].type; + + comp->id = comp_id; + comp->funcs = mtk_ddp_matches[comp_id].funcs; + /* Not all drm components have a DTS device node, such as ovl_adaptor, + * which is the drm bring up sub driver + */ + if (!node) + return 0; + + comp_pdev = of_find_device_by_node(node); + if (!comp_pdev) { + DRM_INFO("Waiting for device %s\n", node->full_name); + return -EPROBE_DEFER; + } + comp->dev = &comp_pdev->dev; + + ret = devm_add_action_or_reset(dev, mtk_ddp_comp_put_device, comp->dev); + if (ret) + return ret; + + if (type == MTK_DISP_AAL || + type == MTK_DISP_BLS || + type == MTK_DISP_CCORR || + type == MTK_DISP_COLOR || + type == MTK_DISP_GAMMA || + type == MTK_DISP_MERGE || + type == MTK_DISP_OVL || + type == MTK_DISP_OVL_2L || + type == MTK_DISP_PWM || + type == MTK_DISP_RDMA || + type == MTK_DPI || + type == MTK_DP_INTF || + type == MTK_DSI) + return 0; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->regs = devm_of_iomap(dev, node, 0, NULL); + if (IS_ERR(priv->regs)) + return PTR_ERR(priv->regs); + + priv->clk = of_clk_get(node, 0); + if (IS_ERR(priv->clk)) + return PTR_ERR(priv->clk); + + ret = devm_add_action_or_reset(dev, mtk_ddp_comp_clk_put, priv->clk); + if (ret) + return ret; + +#if IS_REACHABLE(CONFIG_MTK_CMDQ) + ret = cmdq_dev_get_client_reg(comp->dev, &priv->cmdq_reg, 0); + if (ret) + dev_dbg(comp->dev, "get mediatek,gce-client-reg fail!\n"); +#endif + + platform_set_drvdata(comp_pdev, priv); + + return 0; +} diff --git a/drivers/gpu/drm/mediatek/mtk_ddp_comp.h b/drivers/gpu/drm/mediatek/mtk_ddp_comp.h new file mode 100644 index 000000000000..3f3d43f4330d --- /dev/null +++ b/drivers/gpu/drm/mediatek/mtk_ddp_comp.h @@ -0,0 +1,365 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2015 MediaTek Inc. + */ + +#ifndef MTK_DDP_COMP_H +#define MTK_DDP_COMP_H + +#include <linux/io.h> +#include <linux/pm_runtime.h> +#include <linux/soc/mediatek/mtk-cmdq.h> +#include <linux/soc/mediatek/mtk-mmsys.h> +#include <linux/soc/mediatek/mtk-mutex.h> + +#include <drm/drm_modes.h> + +struct device; +struct device_node; +struct drm_crtc; +struct drm_device; +struct mtk_plane_state; +struct drm_crtc_state; + +enum mtk_ddp_comp_type { + MTK_DISP_AAL, + MTK_DISP_BLS, + MTK_DISP_CCORR, + MTK_DISP_COLOR, + MTK_DISP_DITHER, + MTK_DISP_DSC, + MTK_DISP_GAMMA, + MTK_DISP_MERGE, + MTK_DISP_MUTEX, + MTK_DISP_OD, + MTK_DISP_OVL, + MTK_DISP_OVL_2L, + MTK_DISP_OVL_ADAPTOR, + MTK_DISP_POSTMASK, + MTK_DISP_PWM, + MTK_DISP_RDMA, + MTK_DISP_UFOE, + MTK_DISP_WDMA, + MTK_DPI, + MTK_DP_INTF, + MTK_DSI, + MTK_DDP_COMP_TYPE_MAX, +}; + +struct mtk_ddp_comp; +struct cmdq_pkt; +struct mtk_ddp_comp_funcs { + int (*power_on)(struct device *dev); + void (*power_off)(struct device *dev); + int (*clk_enable)(struct device *dev); + void (*clk_disable)(struct device *dev); + void (*config)(struct device *dev, unsigned int w, + unsigned int h, unsigned int vrefresh, + unsigned int bpc, struct cmdq_pkt *cmdq_pkt); + void (*start)(struct device *dev); + void (*stop)(struct device *dev); + void (*register_vblank_cb)(struct device *dev, + void (*vblank_cb)(void *), + void *vblank_cb_data); + void (*unregister_vblank_cb)(struct device *dev); + void (*enable_vblank)(struct device *dev); + void (*disable_vblank)(struct device *dev); + unsigned int (*supported_rotations)(struct device *dev); + unsigned int (*layer_nr)(struct device *dev); + int (*layer_check)(struct device *dev, + unsigned int idx, + struct mtk_plane_state *state); + void (*layer_config)(struct device *dev, unsigned int idx, + struct mtk_plane_state *state, + struct cmdq_pkt *cmdq_pkt); + unsigned int (*gamma_get_lut_size)(struct device *dev); + void (*gamma_set)(struct device *dev, + struct drm_crtc_state *state); + void (*bgclr_in_on)(struct device *dev); + void (*bgclr_in_off)(struct device *dev); + void (*ctm_set)(struct device *dev, + struct drm_crtc_state *state); + struct device * (*dma_dev_get)(struct device *dev); + u32 (*get_blend_modes)(struct device *dev); + const u32 *(*get_formats)(struct device *dev); + size_t (*get_num_formats)(struct device *dev); + bool (*is_afbc_supported)(struct device *dev); + void (*connect)(struct device *dev, struct device *mmsys_dev, unsigned int next); + void (*disconnect)(struct device *dev, struct device *mmsys_dev, unsigned int next); + void (*add)(struct device *dev, struct mtk_mutex *mutex); + void (*remove)(struct device *dev, struct mtk_mutex *mutex); + unsigned int (*encoder_index)(struct device *dev); + enum drm_mode_status (*mode_valid)(struct device *dev, const struct drm_display_mode *mode); +}; + +struct mtk_ddp_comp { + struct device *dev; + int irq; + unsigned int id; + int encoder_index; + const struct mtk_ddp_comp_funcs *funcs; +}; + +static inline int mtk_ddp_comp_power_on(struct mtk_ddp_comp *comp) +{ + if (comp->funcs && comp->funcs->power_on) + return comp->funcs->power_on(comp->dev); + else + return pm_runtime_resume_and_get(comp->dev); + return 0; +} + +static inline void mtk_ddp_comp_power_off(struct mtk_ddp_comp *comp) +{ + if (comp->funcs && comp->funcs->power_off) + comp->funcs->power_off(comp->dev); + else + pm_runtime_put(comp->dev); +} + +static inline int mtk_ddp_comp_clk_enable(struct mtk_ddp_comp *comp) +{ + if (comp->funcs && comp->funcs->clk_enable) + return comp->funcs->clk_enable(comp->dev); + + return 0; +} + +static inline void mtk_ddp_comp_clk_disable(struct mtk_ddp_comp *comp) +{ + if (comp->funcs && comp->funcs->clk_disable) + comp->funcs->clk_disable(comp->dev); +} + +static inline +enum drm_mode_status mtk_ddp_comp_mode_valid(struct mtk_ddp_comp *comp, + const struct drm_display_mode *mode) +{ + if (comp && comp->funcs && comp->funcs->mode_valid) + return comp->funcs->mode_valid(comp->dev, mode); + return MODE_OK; +} + +static inline void mtk_ddp_comp_config(struct mtk_ddp_comp *comp, + unsigned int w, unsigned int h, + unsigned int vrefresh, unsigned int bpc, + struct cmdq_pkt *cmdq_pkt) +{ + if (comp->funcs && comp->funcs->config) + comp->funcs->config(comp->dev, w, h, vrefresh, bpc, cmdq_pkt); +} + +static inline void mtk_ddp_comp_start(struct mtk_ddp_comp *comp) +{ + if (comp->funcs && comp->funcs->start) + comp->funcs->start(comp->dev); +} + +static inline void mtk_ddp_comp_stop(struct mtk_ddp_comp *comp) +{ + if (comp->funcs && comp->funcs->stop) + comp->funcs->stop(comp->dev); +} + +static inline void mtk_ddp_comp_register_vblank_cb(struct mtk_ddp_comp *comp, + void (*vblank_cb)(void *), + void *vblank_cb_data) +{ + if (comp->funcs && comp->funcs->register_vblank_cb) + comp->funcs->register_vblank_cb(comp->dev, vblank_cb, + vblank_cb_data); +} + +static inline void mtk_ddp_comp_unregister_vblank_cb(struct mtk_ddp_comp *comp) +{ + if (comp->funcs && comp->funcs->unregister_vblank_cb) + comp->funcs->unregister_vblank_cb(comp->dev); +} + +static inline void mtk_ddp_comp_enable_vblank(struct mtk_ddp_comp *comp) +{ + if (comp->funcs && comp->funcs->enable_vblank) + comp->funcs->enable_vblank(comp->dev); +} + +static inline void mtk_ddp_comp_disable_vblank(struct mtk_ddp_comp *comp) +{ + if (comp->funcs && comp->funcs->disable_vblank) + comp->funcs->disable_vblank(comp->dev); +} + +static inline +unsigned int mtk_ddp_comp_supported_rotations(struct mtk_ddp_comp *comp) +{ + if (comp->funcs && comp->funcs->supported_rotations) + return comp->funcs->supported_rotations(comp->dev); + + /* + * In order to pass IGT tests, DRM_MODE_ROTATE_0 is required when + * rotation is not supported. + */ + return DRM_MODE_ROTATE_0; +} + +static inline unsigned int mtk_ddp_comp_layer_nr(struct mtk_ddp_comp *comp) +{ + if (comp->funcs && comp->funcs->layer_nr) + return comp->funcs->layer_nr(comp->dev); + + return 0; +} + +static inline int mtk_ddp_comp_layer_check(struct mtk_ddp_comp *comp, + unsigned int idx, + struct mtk_plane_state *state) +{ + if (comp->funcs && comp->funcs->layer_check) + return comp->funcs->layer_check(comp->dev, idx, state); + return 0; +} + +static inline void mtk_ddp_comp_layer_config(struct mtk_ddp_comp *comp, + unsigned int idx, + struct mtk_plane_state *state, + struct cmdq_pkt *cmdq_pkt) +{ + if (comp->funcs && comp->funcs->layer_config) + comp->funcs->layer_config(comp->dev, idx, state, cmdq_pkt); +} + +static inline unsigned int mtk_ddp_gamma_get_lut_size(struct mtk_ddp_comp *comp) +{ + if (comp->funcs && comp->funcs->gamma_get_lut_size) + return comp->funcs->gamma_get_lut_size(comp->dev); + + return 0; +} + +static inline void mtk_ddp_gamma_set(struct mtk_ddp_comp *comp, + struct drm_crtc_state *state) +{ + if (comp->funcs && comp->funcs->gamma_set) + comp->funcs->gamma_set(comp->dev, state); +} + +static inline void mtk_ddp_comp_bgclr_in_on(struct mtk_ddp_comp *comp) +{ + if (comp->funcs && comp->funcs->bgclr_in_on) + comp->funcs->bgclr_in_on(comp->dev); +} + +static inline void mtk_ddp_comp_bgclr_in_off(struct mtk_ddp_comp *comp) +{ + if (comp->funcs && comp->funcs->bgclr_in_off) + comp->funcs->bgclr_in_off(comp->dev); +} + +static inline void mtk_ddp_ctm_set(struct mtk_ddp_comp *comp, + struct drm_crtc_state *state) +{ + if (comp->funcs && comp->funcs->ctm_set) + comp->funcs->ctm_set(comp->dev, state); +} + +static inline struct device *mtk_ddp_comp_dma_dev_get(struct mtk_ddp_comp *comp) +{ + if (comp->funcs && comp->funcs->dma_dev_get) + return comp->funcs->dma_dev_get(comp->dev); + return comp->dev; +} + +static inline +u32 mtk_ddp_comp_get_blend_modes(struct mtk_ddp_comp *comp) +{ + if (comp->funcs && comp->funcs->get_blend_modes) + return comp->funcs->get_blend_modes(comp->dev); + + return 0; +} + +static inline +const u32 *mtk_ddp_comp_get_formats(struct mtk_ddp_comp *comp) +{ + if (comp->funcs && comp->funcs->get_formats) + return comp->funcs->get_formats(comp->dev); + + return NULL; +} + +static inline +size_t mtk_ddp_comp_get_num_formats(struct mtk_ddp_comp *comp) +{ + if (comp->funcs && comp->funcs->get_num_formats) + return comp->funcs->get_num_formats(comp->dev); + + return 0; +} + +static inline bool mtk_ddp_comp_is_afbc_supported(struct mtk_ddp_comp *comp) +{ + if (comp->funcs && comp->funcs->is_afbc_supported) + return comp->funcs->is_afbc_supported(comp->dev); + + return false; +} + +static inline bool mtk_ddp_comp_add(struct mtk_ddp_comp *comp, struct mtk_mutex *mutex) +{ + if (comp->funcs && comp->funcs->add) { + comp->funcs->add(comp->dev, mutex); + return true; + } + return false; +} + +static inline bool mtk_ddp_comp_remove(struct mtk_ddp_comp *comp, struct mtk_mutex *mutex) +{ + if (comp->funcs && comp->funcs->remove) { + comp->funcs->remove(comp->dev, mutex); + return true; + } + return false; +} + +static inline bool mtk_ddp_comp_connect(struct mtk_ddp_comp *comp, struct device *mmsys_dev, + unsigned int next) +{ + if (comp->funcs && comp->funcs->connect) { + comp->funcs->connect(comp->dev, mmsys_dev, next); + return true; + } + return false; +} + +static inline bool mtk_ddp_comp_disconnect(struct mtk_ddp_comp *comp, struct device *mmsys_dev, + unsigned int next) +{ + if (comp->funcs && comp->funcs->disconnect) { + comp->funcs->disconnect(comp->dev, mmsys_dev, next); + return true; + } + return false; +} + +static inline void mtk_ddp_comp_encoder_index_set(struct mtk_ddp_comp *comp) +{ + if (comp->funcs && comp->funcs->encoder_index) + comp->encoder_index = (int)comp->funcs->encoder_index(comp->dev); +} + +int mtk_ddp_comp_get_id(struct device_node *node, + enum mtk_ddp_comp_type comp_type); +int mtk_find_possible_crtcs(struct drm_device *drm, struct device *dev); +int mtk_ddp_comp_init(struct device *dev, struct device_node *comp_node, struct mtk_ddp_comp *comp, + unsigned int comp_id); +enum mtk_ddp_comp_type mtk_ddp_comp_get_type(unsigned int comp_id); +void mtk_ddp_write(struct cmdq_pkt *cmdq_pkt, unsigned int value, + struct cmdq_client_reg *cmdq_reg, void __iomem *regs, + unsigned int offset); +void mtk_ddp_write_relaxed(struct cmdq_pkt *cmdq_pkt, unsigned int value, + struct cmdq_client_reg *cmdq_reg, void __iomem *regs, + unsigned int offset); +void mtk_ddp_write_mask(struct cmdq_pkt *cmdq_pkt, unsigned int value, + struct cmdq_client_reg *cmdq_reg, void __iomem *regs, + unsigned int offset, unsigned int mask); +#endif /* MTK_DDP_COMP_H */ diff --git a/drivers/gpu/drm/mediatek/mtk_disp_aal.c b/drivers/gpu/drm/mediatek/mtk_disp_aal.c new file mode 100644 index 000000000000..abc9e5525d03 --- /dev/null +++ b/drivers/gpu/drm/mediatek/mtk_disp_aal.c @@ -0,0 +1,226 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2021 MediaTek Inc. + */ + +#include <linux/bitfield.h> +#include <linux/clk.h> +#include <linux/component.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/soc/mediatek/mtk-cmdq.h> + +#include "mtk_crtc.h" +#include "mtk_ddp_comp.h" +#include "mtk_disp_drv.h" +#include "mtk_drm_drv.h" + +#define DISP_AAL_EN 0x0000 +#define AAL_EN BIT(0) +#define DISP_AAL_CFG 0x0020 +#define AAL_RELAY_MODE BIT(0) +#define AAL_GAMMA_LUT_EN BIT(1) +#define DISP_AAL_SIZE 0x0030 +#define DISP_AAL_SIZE_HSIZE GENMASK(28, 16) +#define DISP_AAL_SIZE_VSIZE GENMASK(12, 0) +#define DISP_AAL_OUTPUT_SIZE 0x04d8 +#define DISP_AAL_GAMMA_LUT 0x0700 +#define DISP_AAL_GAMMA_LUT_R GENMASK(29, 20) +#define DISP_AAL_GAMMA_LUT_G GENMASK(19, 10) +#define DISP_AAL_GAMMA_LUT_B GENMASK(9, 0) +#define DISP_AAL_LUT_BITS 10 +#define DISP_AAL_LUT_SIZE 512 + +struct mtk_disp_aal_data { + bool has_gamma; +}; + + /** + * struct mtk_disp_aal - Display Adaptive Ambient Light driver structure + * @clk: clock for DISP_AAL controller + * @regs: MMIO registers base + * @cmdq_reg: CMDQ Client register + * @data: platform specific data for DISP_AAL + */ +struct mtk_disp_aal { + struct clk *clk; + void __iomem *regs; + struct cmdq_client_reg cmdq_reg; + const struct mtk_disp_aal_data *data; +}; + +int mtk_aal_clk_enable(struct device *dev) +{ + struct mtk_disp_aal *aal = dev_get_drvdata(dev); + + return clk_prepare_enable(aal->clk); +} + +void mtk_aal_clk_disable(struct device *dev) +{ + struct mtk_disp_aal *aal = dev_get_drvdata(dev); + + clk_disable_unprepare(aal->clk); +} + +void mtk_aal_config(struct device *dev, unsigned int w, + unsigned int h, unsigned int vrefresh, + unsigned int bpc, struct cmdq_pkt *cmdq_pkt) +{ + struct mtk_disp_aal *aal = dev_get_drvdata(dev); + u32 sz; + + sz = FIELD_PREP(DISP_AAL_SIZE_HSIZE, w); + sz |= FIELD_PREP(DISP_AAL_SIZE_VSIZE, h); + + mtk_ddp_write(cmdq_pkt, sz, &aal->cmdq_reg, aal->regs, DISP_AAL_SIZE); + mtk_ddp_write(cmdq_pkt, sz, &aal->cmdq_reg, aal->regs, DISP_AAL_OUTPUT_SIZE); +} + +/** + * mtk_aal_gamma_get_lut_size() - Get gamma LUT size for AAL + * @dev: Pointer to struct device + * + * Return: 0 if gamma control not supported in AAL or gamma LUT size + */ +unsigned int mtk_aal_gamma_get_lut_size(struct device *dev) +{ + struct mtk_disp_aal *aal = dev_get_drvdata(dev); + + if (aal->data && aal->data->has_gamma) + return DISP_AAL_LUT_SIZE; + return 0; +} + +void mtk_aal_gamma_set(struct device *dev, struct drm_crtc_state *state) +{ + struct mtk_disp_aal *aal = dev_get_drvdata(dev); + struct drm_color_lut *lut; + unsigned int i; + u32 cfg_val; + + /* If gamma is not supported in AAL, go out immediately */ + if (!(aal->data && aal->data->has_gamma)) + return; + + /* Also, if there's no gamma lut there's nothing to do here. */ + if (!state->gamma_lut) + return; + + lut = (struct drm_color_lut *)state->gamma_lut->data; + for (i = 0; i < DISP_AAL_LUT_SIZE; i++) { + struct drm_color_lut hwlut = { + .red = drm_color_lut_extract(lut[i].red, DISP_AAL_LUT_BITS), + .green = drm_color_lut_extract(lut[i].green, DISP_AAL_LUT_BITS), + .blue = drm_color_lut_extract(lut[i].blue, DISP_AAL_LUT_BITS) + }; + u32 word; + + word = FIELD_PREP(DISP_AAL_GAMMA_LUT_R, hwlut.red); + word |= FIELD_PREP(DISP_AAL_GAMMA_LUT_G, hwlut.green); + word |= FIELD_PREP(DISP_AAL_GAMMA_LUT_B, hwlut.blue); + writel(word, aal->regs + DISP_AAL_GAMMA_LUT + i * 4); + } + + cfg_val = readl(aal->regs + DISP_AAL_CFG); + + /* Enable the gamma table */ + cfg_val |= FIELD_PREP(AAL_GAMMA_LUT_EN, 1); + + /* Disable RELAY mode to pass the processed image */ + cfg_val &= ~AAL_RELAY_MODE; + + writel(cfg_val, aal->regs + DISP_AAL_CFG); +} + +void mtk_aal_start(struct device *dev) +{ + struct mtk_disp_aal *aal = dev_get_drvdata(dev); + + writel(AAL_EN, aal->regs + DISP_AAL_EN); +} + +void mtk_aal_stop(struct device *dev) +{ + struct mtk_disp_aal *aal = dev_get_drvdata(dev); + + writel_relaxed(0x0, aal->regs + DISP_AAL_EN); +} + +static int mtk_disp_aal_bind(struct device *dev, struct device *master, + void *data) +{ + return 0; +} + +static void mtk_disp_aal_unbind(struct device *dev, struct device *master, + void *data) +{ +} + +static const struct component_ops mtk_disp_aal_component_ops = { + .bind = mtk_disp_aal_bind, + .unbind = mtk_disp_aal_unbind, +}; + +static int mtk_disp_aal_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct mtk_disp_aal *priv; + int ret; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->clk = devm_clk_get(dev, NULL); + if (IS_ERR(priv->clk)) + return dev_err_probe(dev, PTR_ERR(priv->clk), + "failed to get aal clk\n"); + + priv->regs = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(priv->regs)) + return dev_err_probe(dev, PTR_ERR(priv->regs), + "failed to ioremap aal\n"); + +#if IS_REACHABLE(CONFIG_MTK_CMDQ) + ret = cmdq_dev_get_client_reg(dev, &priv->cmdq_reg, 0); + if (ret) + dev_dbg(dev, "get mediatek,gce-client-reg fail!\n"); +#endif + + priv->data = of_device_get_match_data(dev); + platform_set_drvdata(pdev, priv); + + ret = component_add(dev, &mtk_disp_aal_component_ops); + if (ret) + return dev_err_probe(dev, ret, "Failed to add component\n"); + + return 0; +} + +static void mtk_disp_aal_remove(struct platform_device *pdev) +{ + component_del(&pdev->dev, &mtk_disp_aal_component_ops); +} + +static const struct mtk_disp_aal_data mt8173_aal_driver_data = { + .has_gamma = true, +}; + +static const struct of_device_id mtk_disp_aal_driver_dt_match[] = { + { .compatible = "mediatek,mt8173-disp-aal", .data = &mt8173_aal_driver_data }, + { .compatible = "mediatek,mt8183-disp-aal" }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, mtk_disp_aal_driver_dt_match); + +struct platform_driver mtk_disp_aal_driver = { + .probe = mtk_disp_aal_probe, + .remove = mtk_disp_aal_remove, + .driver = { + .name = "mediatek-disp-aal", + .of_match_table = mtk_disp_aal_driver_dt_match, + }, +}; diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ccorr.c b/drivers/gpu/drm/mediatek/mtk_disp_ccorr.c new file mode 100644 index 000000000000..6d7bf4afa78d --- /dev/null +++ b/drivers/gpu/drm/mediatek/mtk_disp_ccorr.c @@ -0,0 +1,196 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2021 MediaTek Inc. + */ + +#include <linux/clk.h> +#include <linux/component.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/soc/mediatek/mtk-cmdq.h> + +#include "mtk_crtc.h" +#include "mtk_ddp_comp.h" +#include "mtk_disp_drv.h" +#include "mtk_drm_drv.h" + +#define DISP_CCORR_EN 0x0000 +#define CCORR_EN BIT(0) +#define DISP_CCORR_CFG 0x0020 +#define CCORR_RELAY_MODE BIT(0) +#define CCORR_ENGINE_EN BIT(1) +#define CCORR_GAMMA_OFF BIT(2) +#define CCORR_WGAMUT_SRC_CLIP BIT(3) +#define DISP_CCORR_SIZE 0x0030 +#define DISP_CCORR_COEF_0 0x0080 +#define DISP_CCORR_COEF_1 0x0084 +#define DISP_CCORR_COEF_2 0x0088 +#define DISP_CCORR_COEF_3 0x008C +#define DISP_CCORR_COEF_4 0x0090 + +struct mtk_disp_ccorr_data { + u32 matrix_bits; +}; + +struct mtk_disp_ccorr { + struct clk *clk; + void __iomem *regs; + struct cmdq_client_reg cmdq_reg; + const struct mtk_disp_ccorr_data *data; +}; + +int mtk_ccorr_clk_enable(struct device *dev) +{ + struct mtk_disp_ccorr *ccorr = dev_get_drvdata(dev); + + return clk_prepare_enable(ccorr->clk); +} + +void mtk_ccorr_clk_disable(struct device *dev) +{ + struct mtk_disp_ccorr *ccorr = dev_get_drvdata(dev); + + clk_disable_unprepare(ccorr->clk); +} + +void mtk_ccorr_config(struct device *dev, unsigned int w, + unsigned int h, unsigned int vrefresh, + unsigned int bpc, struct cmdq_pkt *cmdq_pkt) +{ + struct mtk_disp_ccorr *ccorr = dev_get_drvdata(dev); + + mtk_ddp_write(cmdq_pkt, w << 16 | h, &ccorr->cmdq_reg, ccorr->regs, + DISP_CCORR_SIZE); + mtk_ddp_write(cmdq_pkt, CCORR_ENGINE_EN, &ccorr->cmdq_reg, ccorr->regs, + DISP_CCORR_CFG); +} + +void mtk_ccorr_start(struct device *dev) +{ + struct mtk_disp_ccorr *ccorr = dev_get_drvdata(dev); + + writel(CCORR_EN, ccorr->regs + DISP_CCORR_EN); +} + +void mtk_ccorr_stop(struct device *dev) +{ + struct mtk_disp_ccorr *ccorr = dev_get_drvdata(dev); + + writel_relaxed(0x0, ccorr->regs + DISP_CCORR_EN); +} + +void mtk_ccorr_ctm_set(struct device *dev, struct drm_crtc_state *state) +{ + struct mtk_disp_ccorr *ccorr = dev_get_drvdata(dev); + struct drm_property_blob *blob = state->ctm; + struct drm_color_ctm *ctm; + const u64 *input; + uint16_t coeffs[9] = { 0 }; + int i; + struct cmdq_pkt *cmdq_pkt = NULL; + u32 matrix_bits = ccorr->data->matrix_bits; + + if (!blob) + return; + + ctm = (struct drm_color_ctm *)blob->data; + input = ctm->matrix; + + for (i = 0; i < ARRAY_SIZE(coeffs); i++) + coeffs[i] = drm_color_ctm_s31_32_to_qm_n(input[i], 2, matrix_bits); + + mtk_ddp_write(cmdq_pkt, coeffs[0] << 16 | coeffs[1], + &ccorr->cmdq_reg, ccorr->regs, DISP_CCORR_COEF_0); + mtk_ddp_write(cmdq_pkt, coeffs[2] << 16 | coeffs[3], + &ccorr->cmdq_reg, ccorr->regs, DISP_CCORR_COEF_1); + mtk_ddp_write(cmdq_pkt, coeffs[4] << 16 | coeffs[5], + &ccorr->cmdq_reg, ccorr->regs, DISP_CCORR_COEF_2); + mtk_ddp_write(cmdq_pkt, coeffs[6] << 16 | coeffs[7], + &ccorr->cmdq_reg, ccorr->regs, DISP_CCORR_COEF_3); + mtk_ddp_write(cmdq_pkt, coeffs[8] << 16, + &ccorr->cmdq_reg, ccorr->regs, DISP_CCORR_COEF_4); +} + +static int mtk_disp_ccorr_bind(struct device *dev, struct device *master, + void *data) +{ + return 0; +} + +static void mtk_disp_ccorr_unbind(struct device *dev, struct device *master, + void *data) +{ +} + +static const struct component_ops mtk_disp_ccorr_component_ops = { + .bind = mtk_disp_ccorr_bind, + .unbind = mtk_disp_ccorr_unbind, +}; + +static int mtk_disp_ccorr_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct mtk_disp_ccorr *priv; + int ret; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->clk = devm_clk_get(dev, NULL); + if (IS_ERR(priv->clk)) + return dev_err_probe(dev, PTR_ERR(priv->clk), + "failed to get ccorr clk\n"); + + priv->regs = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(priv->regs)) + return dev_err_probe(dev, PTR_ERR(priv->regs), + "failed to ioremap ccorr\n"); + +#if IS_REACHABLE(CONFIG_MTK_CMDQ) + ret = cmdq_dev_get_client_reg(dev, &priv->cmdq_reg, 0); + if (ret) + dev_dbg(dev, "get mediatek,gce-client-reg fail!\n"); +#endif + + priv->data = of_device_get_match_data(dev); + platform_set_drvdata(pdev, priv); + + ret = component_add(dev, &mtk_disp_ccorr_component_ops); + if (ret) + return dev_err_probe(dev, ret, "Failed to add component\n"); + + return 0; +} + +static void mtk_disp_ccorr_remove(struct platform_device *pdev) +{ + component_del(&pdev->dev, &mtk_disp_ccorr_component_ops); +} + +static const struct mtk_disp_ccorr_data mt8183_ccorr_driver_data = { + .matrix_bits = 10, +}; + +static const struct mtk_disp_ccorr_data mt8192_ccorr_driver_data = { + .matrix_bits = 11, +}; + +static const struct of_device_id mtk_disp_ccorr_driver_dt_match[] = { + { .compatible = "mediatek,mt8183-disp-ccorr", + .data = &mt8183_ccorr_driver_data}, + { .compatible = "mediatek,mt8192-disp-ccorr", + .data = &mt8192_ccorr_driver_data}, + {}, +}; +MODULE_DEVICE_TABLE(of, mtk_disp_ccorr_driver_dt_match); + +struct platform_driver mtk_disp_ccorr_driver = { + .probe = mtk_disp_ccorr_probe, + .remove = mtk_disp_ccorr_remove, + .driver = { + .name = "mediatek-disp-ccorr", + .of_match_table = mtk_disp_ccorr_driver_dt_match, + }, +}; diff --git a/drivers/gpu/drm/mediatek/mtk_disp_color.c b/drivers/gpu/drm/mediatek/mtk_disp_color.c new file mode 100644 index 000000000000..39c7de4cdcc1 --- /dev/null +++ b/drivers/gpu/drm/mediatek/mtk_disp_color.c @@ -0,0 +1,165 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2017 MediaTek Inc. + */ + +#include <linux/clk.h> +#include <linux/component.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/soc/mediatek/mtk-cmdq.h> + +#include "mtk_crtc.h" +#include "mtk_ddp_comp.h" +#include "mtk_disp_drv.h" +#include "mtk_drm_drv.h" + +#define DISP_COLOR_CFG_MAIN 0x0400 +#define DISP_COLOR_START_MT2701 0x0f00 +#define DISP_COLOR_START_MT8167 0x0400 +#define DISP_COLOR_START_MT8173 0x0c00 +#define DISP_COLOR_START(comp) ((comp)->data->color_offset) +#define DISP_COLOR_WIDTH(comp) (DISP_COLOR_START(comp) + 0x50) +#define DISP_COLOR_HEIGHT(comp) (DISP_COLOR_START(comp) + 0x54) + +#define COLOR_BYPASS_ALL BIT(7) +#define COLOR_SEQ_SEL BIT(13) + +struct mtk_disp_color_data { + unsigned int color_offset; +}; + +/* + * struct mtk_disp_color - DISP_COLOR driver structure + * @crtc: associated crtc to report irq events to + * @data: platform colour driver data + */ +struct mtk_disp_color { + struct drm_crtc *crtc; + struct clk *clk; + void __iomem *regs; + struct cmdq_client_reg cmdq_reg; + const struct mtk_disp_color_data *data; +}; + +int mtk_color_clk_enable(struct device *dev) +{ + struct mtk_disp_color *color = dev_get_drvdata(dev); + + return clk_prepare_enable(color->clk); +} + +void mtk_color_clk_disable(struct device *dev) +{ + struct mtk_disp_color *color = dev_get_drvdata(dev); + + clk_disable_unprepare(color->clk); +} + +void mtk_color_config(struct device *dev, unsigned int w, + unsigned int h, unsigned int vrefresh, + unsigned int bpc, struct cmdq_pkt *cmdq_pkt) +{ + struct mtk_disp_color *color = dev_get_drvdata(dev); + + mtk_ddp_write(cmdq_pkt, w, &color->cmdq_reg, color->regs, DISP_COLOR_WIDTH(color)); + mtk_ddp_write(cmdq_pkt, h, &color->cmdq_reg, color->regs, DISP_COLOR_HEIGHT(color)); +} + +void mtk_color_start(struct device *dev) +{ + struct mtk_disp_color *color = dev_get_drvdata(dev); + + writel(COLOR_BYPASS_ALL | COLOR_SEQ_SEL, + color->regs + DISP_COLOR_CFG_MAIN); + writel(0x1, color->regs + DISP_COLOR_START(color)); +} + +static int mtk_disp_color_bind(struct device *dev, struct device *master, + void *data) +{ + return 0; +} + +static void mtk_disp_color_unbind(struct device *dev, struct device *master, + void *data) +{ +} + +static const struct component_ops mtk_disp_color_component_ops = { + .bind = mtk_disp_color_bind, + .unbind = mtk_disp_color_unbind, +}; + +static int mtk_disp_color_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct mtk_disp_color *priv; + int ret; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->clk = devm_clk_get(dev, NULL); + if (IS_ERR(priv->clk)) + return dev_err_probe(dev, PTR_ERR(priv->clk), + "failed to get color clk\n"); + + priv->regs = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(priv->regs)) + return dev_err_probe(dev, PTR_ERR(priv->regs), + "failed to ioremap color\n"); +#if IS_REACHABLE(CONFIG_MTK_CMDQ) + ret = cmdq_dev_get_client_reg(dev, &priv->cmdq_reg, 0); + if (ret) + dev_dbg(dev, "get mediatek,gce-client-reg fail!\n"); +#endif + + priv->data = of_device_get_match_data(dev); + platform_set_drvdata(pdev, priv); + + ret = component_add(dev, &mtk_disp_color_component_ops); + if (ret) + return dev_err_probe(dev, ret, "Failed to add component\n"); + + return 0; +} + +static void mtk_disp_color_remove(struct platform_device *pdev) +{ + component_del(&pdev->dev, &mtk_disp_color_component_ops); +} + +static const struct mtk_disp_color_data mt2701_color_driver_data = { + .color_offset = DISP_COLOR_START_MT2701, +}; + +static const struct mtk_disp_color_data mt8167_color_driver_data = { + .color_offset = DISP_COLOR_START_MT8167, +}; + +static const struct mtk_disp_color_data mt8173_color_driver_data = { + .color_offset = DISP_COLOR_START_MT8173, +}; + +static const struct of_device_id mtk_disp_color_driver_dt_match[] = { + { .compatible = "mediatek,mt2701-disp-color", + .data = &mt2701_color_driver_data}, + { .compatible = "mediatek,mt8167-disp-color", + .data = &mt8167_color_driver_data}, + { .compatible = "mediatek,mt8173-disp-color", + .data = &mt8173_color_driver_data}, + {}, +}; +MODULE_DEVICE_TABLE(of, mtk_disp_color_driver_dt_match); + +struct platform_driver mtk_disp_color_driver = { + .probe = mtk_disp_color_probe, + .remove = mtk_disp_color_remove, + .driver = { + .name = "mediatek-disp-color", + .of_match_table = mtk_disp_color_driver_dt_match, + }, +}; diff --git a/drivers/gpu/drm/mediatek/mtk_disp_drv.h b/drivers/gpu/drm/mediatek/mtk_disp_drv.h new file mode 100644 index 000000000000..679d413bf10b --- /dev/null +++ b/drivers/gpu/drm/mediatek/mtk_disp_drv.h @@ -0,0 +1,179 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2020 MediaTek Inc. + */ + +#ifndef _MTK_DISP_DRV_H_ +#define _MTK_DISP_DRV_H_ + +#include <linux/soc/mediatek/mtk-cmdq.h> +#include <linux/soc/mediatek/mtk-mmsys.h> +#include <linux/soc/mediatek/mtk-mutex.h> +#include "mtk_mdp_rdma.h" +#include "mtk_plane.h" + +int mtk_aal_clk_enable(struct device *dev); +void mtk_aal_clk_disable(struct device *dev); +void mtk_aal_config(struct device *dev, unsigned int w, + unsigned int h, unsigned int vrefresh, + unsigned int bpc, struct cmdq_pkt *cmdq_pkt); +unsigned int mtk_aal_gamma_get_lut_size(struct device *dev); +void mtk_aal_gamma_set(struct device *dev, struct drm_crtc_state *state); +void mtk_aal_start(struct device *dev); +void mtk_aal_stop(struct device *dev); + +void mtk_ccorr_ctm_set(struct device *dev, struct drm_crtc_state *state); +int mtk_ccorr_clk_enable(struct device *dev); +void mtk_ccorr_clk_disable(struct device *dev); +void mtk_ccorr_config(struct device *dev, unsigned int w, + unsigned int h, unsigned int vrefresh, + unsigned int bpc, struct cmdq_pkt *cmdq_pkt); +void mtk_ccorr_start(struct device *dev); +void mtk_ccorr_stop(struct device *dev); + +void mtk_color_bypass_shadow(struct device *dev); +int mtk_color_clk_enable(struct device *dev); +void mtk_color_clk_disable(struct device *dev); +void mtk_color_config(struct device *dev, unsigned int w, + unsigned int h, unsigned int vrefresh, + unsigned int bpc, struct cmdq_pkt *cmdq_pkt); +void mtk_color_start(struct device *dev); + +void mtk_dither_set_common(void __iomem *regs, struct cmdq_client_reg *cmdq_reg, + unsigned int bpc, unsigned int cfg, + unsigned int dither_en, struct cmdq_pkt *cmdq_pkt); + +void mtk_dpi_start(struct device *dev); +void mtk_dpi_stop(struct device *dev); +unsigned int mtk_dpi_encoder_index(struct device *dev); + +void mtk_dsi_ddp_start(struct device *dev); +void mtk_dsi_ddp_stop(struct device *dev); +unsigned int mtk_dsi_encoder_index(struct device *dev); + +int mtk_gamma_clk_enable(struct device *dev); +void mtk_gamma_clk_disable(struct device *dev); +void mtk_gamma_config(struct device *dev, unsigned int w, + unsigned int h, unsigned int vrefresh, + unsigned int bpc, struct cmdq_pkt *cmdq_pkt); +unsigned int mtk_gamma_get_lut_size(struct device *dev); +void mtk_gamma_set(struct device *dev, struct drm_crtc_state *state); +void mtk_gamma_start(struct device *dev); +void mtk_gamma_stop(struct device *dev); + +int mtk_merge_clk_enable(struct device *dev); +void mtk_merge_clk_disable(struct device *dev); +void mtk_merge_config(struct device *dev, unsigned int width, + unsigned int height, unsigned int vrefresh, + unsigned int bpc, struct cmdq_pkt *cmdq_pkt); +void mtk_merge_start(struct device *dev); +void mtk_merge_stop(struct device *dev); +void mtk_merge_advance_config(struct device *dev, unsigned int l_w, unsigned int r_w, + unsigned int h, unsigned int vrefresh, unsigned int bpc, + struct cmdq_pkt *cmdq_pkt); +void mtk_merge_start_cmdq(struct device *dev, struct cmdq_pkt *cmdq_pkt); +void mtk_merge_stop_cmdq(struct device *dev, struct cmdq_pkt *cmdq_pkt); +enum drm_mode_status mtk_merge_mode_valid(struct device *dev, + const struct drm_display_mode *mode); + +void mtk_ovl_bgclr_in_on(struct device *dev); +void mtk_ovl_bgclr_in_off(struct device *dev); +void mtk_ovl_bypass_shadow(struct device *dev); +int mtk_ovl_clk_enable(struct device *dev); +void mtk_ovl_clk_disable(struct device *dev); +void mtk_ovl_config(struct device *dev, unsigned int w, + unsigned int h, unsigned int vrefresh, + unsigned int bpc, struct cmdq_pkt *cmdq_pkt); +int mtk_ovl_layer_check(struct device *dev, unsigned int idx, + struct mtk_plane_state *mtk_state); +void mtk_ovl_layer_config(struct device *dev, unsigned int idx, + struct mtk_plane_state *state, + struct cmdq_pkt *cmdq_pkt); +unsigned int mtk_ovl_layer_nr(struct device *dev); +void mtk_ovl_layer_on(struct device *dev, unsigned int idx, + struct cmdq_pkt *cmdq_pkt); +void mtk_ovl_layer_off(struct device *dev, unsigned int idx, + struct cmdq_pkt *cmdq_pkt); +void mtk_ovl_start(struct device *dev); +void mtk_ovl_stop(struct device *dev); +unsigned int mtk_ovl_supported_rotations(struct device *dev); +void mtk_ovl_register_vblank_cb(struct device *dev, + void (*vblank_cb)(void *), + void *vblank_cb_data); +void mtk_ovl_unregister_vblank_cb(struct device *dev); +void mtk_ovl_enable_vblank(struct device *dev); +void mtk_ovl_disable_vblank(struct device *dev); +u32 mtk_ovl_get_blend_modes(struct device *dev); +const u32 *mtk_ovl_get_formats(struct device *dev); +size_t mtk_ovl_get_num_formats(struct device *dev); +bool mtk_ovl_is_afbc_supported(struct device *dev); + +void mtk_ovl_adaptor_add_comp(struct device *dev, struct mtk_mutex *mutex); +void mtk_ovl_adaptor_remove_comp(struct device *dev, struct mtk_mutex *mutex); +bool mtk_ovl_adaptor_is_comp_present(struct device_node *node); +void mtk_ovl_adaptor_connect(struct device *dev, struct device *mmsys_dev, + unsigned int next); +void mtk_ovl_adaptor_disconnect(struct device *dev, struct device *mmsys_dev, + unsigned int next); +int mtk_ovl_adaptor_power_on(struct device *dev); +void mtk_ovl_adaptor_power_off(struct device *dev); +int mtk_ovl_adaptor_clk_enable(struct device *dev); +void mtk_ovl_adaptor_clk_disable(struct device *dev); +void mtk_ovl_adaptor_config(struct device *dev, unsigned int w, + unsigned int h, unsigned int vrefresh, + unsigned int bpc, struct cmdq_pkt *cmdq_pkt); +void mtk_ovl_adaptor_layer_config(struct device *dev, unsigned int idx, + struct mtk_plane_state *state, + struct cmdq_pkt *cmdq_pkt); +void mtk_ovl_adaptor_register_vblank_cb(struct device *dev, void (*vblank_cb)(void *), + void *vblank_cb_data); +void mtk_ovl_adaptor_unregister_vblank_cb(struct device *dev); +void mtk_ovl_adaptor_enable_vblank(struct device *dev); +void mtk_ovl_adaptor_disable_vblank(struct device *dev); +void mtk_ovl_adaptor_start(struct device *dev); +void mtk_ovl_adaptor_stop(struct device *dev); +unsigned int mtk_ovl_adaptor_layer_nr(struct device *dev); +struct device *mtk_ovl_adaptor_dma_dev_get(struct device *dev); +u32 mtk_ovl_adaptor_get_blend_modes(struct device *dev); +const u32 *mtk_ovl_adaptor_get_formats(struct device *dev); +size_t mtk_ovl_adaptor_get_num_formats(struct device *dev); +enum drm_mode_status mtk_ovl_adaptor_mode_valid(struct device *dev, + const struct drm_display_mode *mode); + +void mtk_rdma_bypass_shadow(struct device *dev); +int mtk_rdma_clk_enable(struct device *dev); +void mtk_rdma_clk_disable(struct device *dev); +void mtk_rdma_config(struct device *dev, unsigned int width, + unsigned int height, unsigned int vrefresh, + unsigned int bpc, struct cmdq_pkt *cmdq_pkt); +unsigned int mtk_rdma_layer_nr(struct device *dev); +void mtk_rdma_layer_config(struct device *dev, unsigned int idx, + struct mtk_plane_state *state, + struct cmdq_pkt *cmdq_pkt); +void mtk_rdma_start(struct device *dev); +void mtk_rdma_stop(struct device *dev); +void mtk_rdma_register_vblank_cb(struct device *dev, + void (*vblank_cb)(void *), + void *vblank_cb_data); +void mtk_rdma_unregister_vblank_cb(struct device *dev); +void mtk_rdma_enable_vblank(struct device *dev); +void mtk_rdma_disable_vblank(struct device *dev); +const u32 *mtk_rdma_get_formats(struct device *dev); +size_t mtk_rdma_get_num_formats(struct device *dev); + +int mtk_mdp_rdma_power_on(struct device *dev); +void mtk_mdp_rdma_power_off(struct device *dev); +int mtk_mdp_rdma_clk_enable(struct device *dev); +void mtk_mdp_rdma_clk_disable(struct device *dev); +void mtk_mdp_rdma_start(struct device *dev, struct cmdq_pkt *cmdq_pkt); +void mtk_mdp_rdma_stop(struct device *dev, struct cmdq_pkt *cmdq_pkt); +void mtk_mdp_rdma_config(struct device *dev, struct mtk_mdp_rdma_cfg *cfg, + struct cmdq_pkt *cmdq_pkt); +const u32 *mtk_mdp_rdma_get_formats(struct device *dev); +size_t mtk_mdp_rdma_get_num_formats(struct device *dev); + +int mtk_padding_clk_enable(struct device *dev); +void mtk_padding_clk_disable(struct device *dev); +void mtk_padding_start(struct device *dev); +void mtk_padding_stop(struct device *dev); +#endif diff --git a/drivers/gpu/drm/mediatek/mtk_disp_gamma.c b/drivers/gpu/drm/mediatek/mtk_disp_gamma.c new file mode 100644 index 000000000000..8afd15006df2 --- /dev/null +++ b/drivers/gpu/drm/mediatek/mtk_disp_gamma.c @@ -0,0 +1,335 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2021 MediaTek Inc. + */ + +#include <linux/bitfield.h> +#include <linux/clk.h> +#include <linux/component.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/soc/mediatek/mtk-cmdq.h> + +#include "mtk_crtc.h" +#include "mtk_ddp_comp.h" +#include "mtk_disp_drv.h" +#include "mtk_drm_drv.h" + +#define DISP_GAMMA_EN 0x0000 +#define GAMMA_EN BIT(0) +#define DISP_GAMMA_CFG 0x0020 +#define GAMMA_RELAY_MODE BIT(0) +#define GAMMA_LUT_EN BIT(1) +#define GAMMA_DITHERING BIT(2) +#define GAMMA_LUT_TYPE BIT(2) +#define DISP_GAMMA_SIZE 0x0030 +#define DISP_GAMMA_SIZE_HSIZE GENMASK(28, 16) +#define DISP_GAMMA_SIZE_VSIZE GENMASK(12, 0) +#define DISP_GAMMA_BANK 0x0100 +#define DISP_GAMMA_BANK_BANK GENMASK(1, 0) +#define DISP_GAMMA_BANK_DATA_MODE BIT(2) +#define DISP_GAMMA_LUT 0x0700 +#define DISP_GAMMA_LUT1 0x0b00 + +/* For 10 bit LUT layout, R/G/B are in the same register */ +#define DISP_GAMMA_LUT_10BIT_R GENMASK(29, 20) +#define DISP_GAMMA_LUT_10BIT_G GENMASK(19, 10) +#define DISP_GAMMA_LUT_10BIT_B GENMASK(9, 0) + +/* For 12 bit LUT layout, R/G are in LUT, B is in LUT1 */ +#define DISP_GAMMA_LUT_12BIT_R GENMASK(11, 0) +#define DISP_GAMMA_LUT_12BIT_G GENMASK(23, 12) +#define DISP_GAMMA_LUT_12BIT_B GENMASK(11, 0) + +struct mtk_disp_gamma_data { + bool has_dither; + bool lut_diff; + u16 lut_bank_size; + u16 lut_size; + u8 lut_bits; +}; + +/** + * struct mtk_disp_gamma - Display Gamma driver structure + * @clk: clock for DISP_GAMMA block + * @regs: MMIO registers base + * @cmdq_reg: CMDQ Client register + * @data: platform data for DISP_GAMMA + */ +struct mtk_disp_gamma { + struct clk *clk; + void __iomem *regs; + struct cmdq_client_reg cmdq_reg; + const struct mtk_disp_gamma_data *data; +}; + +int mtk_gamma_clk_enable(struct device *dev) +{ + struct mtk_disp_gamma *gamma = dev_get_drvdata(dev); + + return clk_prepare_enable(gamma->clk); +} + +void mtk_gamma_clk_disable(struct device *dev) +{ + struct mtk_disp_gamma *gamma = dev_get_drvdata(dev); + + clk_disable_unprepare(gamma->clk); +} + +unsigned int mtk_gamma_get_lut_size(struct device *dev) +{ + struct mtk_disp_gamma *gamma = dev_get_drvdata(dev); + + if (gamma && gamma->data) + return gamma->data->lut_size; + return 0; +} + +static bool mtk_gamma_lut_is_descending(struct drm_color_lut *lut, u32 lut_size) +{ + u64 first, last; + int last_entry = lut_size - 1; + + first = lut[0].red + lut[0].green + lut[0].blue; + last = lut[last_entry].red + lut[last_entry].green + lut[last_entry].blue; + + return !!(first > last); +} + +/* + * SoCs supporting 12-bits LUTs are using a new register layout that does + * always support (by HW) both 12-bits and 10-bits LUT but, on those, we + * ignore the support for 10-bits in this driver and always use 12-bits. + * + * Summarizing: + * - SoC HW support 9/10-bits LUT only + * - Old register layout + * - 10-bits LUT supported + * - 9-bits LUT not supported + * - SoC HW support both 10/12bits LUT + * - New register layout + * - 12-bits LUT supported + * - 10-its LUT not supported + */ +void mtk_gamma_set(struct device *dev, struct drm_crtc_state *state) +{ + struct mtk_disp_gamma *gamma = dev_get_drvdata(dev); + void __iomem *lut0_base = gamma->regs + DISP_GAMMA_LUT; + void __iomem *lut1_base = gamma->regs + DISP_GAMMA_LUT1; + u32 cfg_val, data_mode, lbank_val, word[2]; + u8 lut_bits = gamma->data->lut_bits; + int cur_bank, num_lut_banks; + struct drm_color_lut *lut; + unsigned int i; + + /* If there's no gamma lut there's nothing to do here. */ + if (!state->gamma_lut) + return; + + num_lut_banks = gamma->data->lut_size / gamma->data->lut_bank_size; + lut = (struct drm_color_lut *)state->gamma_lut->data; + + /* Switch to 12 bits data mode if supported */ + data_mode = FIELD_PREP(DISP_GAMMA_BANK_DATA_MODE, !!(lut_bits == 12)); + + for (cur_bank = 0; cur_bank < num_lut_banks; cur_bank++) { + + /* Switch gamma bank and set data mode before writing LUT */ + if (num_lut_banks > 1) { + lbank_val = FIELD_PREP(DISP_GAMMA_BANK_BANK, cur_bank); + lbank_val |= data_mode; + writel(lbank_val, gamma->regs + DISP_GAMMA_BANK); + } + + for (i = 0; i < gamma->data->lut_bank_size; i++) { + int n = cur_bank * gamma->data->lut_bank_size + i; + struct drm_color_lut diff, hwlut; + + hwlut.red = drm_color_lut_extract(lut[n].red, lut_bits); + hwlut.green = drm_color_lut_extract(lut[n].green, lut_bits); + hwlut.blue = drm_color_lut_extract(lut[n].blue, lut_bits); + + if (!gamma->data->lut_diff || (i % 2 == 0)) { + if (lut_bits == 12) { + word[0] = FIELD_PREP(DISP_GAMMA_LUT_12BIT_R, hwlut.red); + word[0] |= FIELD_PREP(DISP_GAMMA_LUT_12BIT_G, hwlut.green); + word[1] = FIELD_PREP(DISP_GAMMA_LUT_12BIT_B, hwlut.blue); + } else { + word[0] = FIELD_PREP(DISP_GAMMA_LUT_10BIT_R, hwlut.red); + word[0] |= FIELD_PREP(DISP_GAMMA_LUT_10BIT_G, hwlut.green); + word[0] |= FIELD_PREP(DISP_GAMMA_LUT_10BIT_B, hwlut.blue); + } + } else { + diff.red = lut[n].red - lut[n - 1].red; + diff.red = drm_color_lut_extract(diff.red, lut_bits); + + diff.green = lut[n].green - lut[n - 1].green; + diff.green = drm_color_lut_extract(diff.green, lut_bits); + + diff.blue = lut[n].blue - lut[n - 1].blue; + diff.blue = drm_color_lut_extract(diff.blue, lut_bits); + + if (lut_bits == 12) { + word[0] = FIELD_PREP(DISP_GAMMA_LUT_12BIT_R, diff.red); + word[0] |= FIELD_PREP(DISP_GAMMA_LUT_12BIT_G, diff.green); + word[1] = FIELD_PREP(DISP_GAMMA_LUT_12BIT_B, diff.blue); + } else { + word[0] = FIELD_PREP(DISP_GAMMA_LUT_10BIT_R, diff.red); + word[0] |= FIELD_PREP(DISP_GAMMA_LUT_10BIT_G, diff.green); + word[0] |= FIELD_PREP(DISP_GAMMA_LUT_10BIT_B, diff.blue); + } + } + writel(word[0], lut0_base + i * 4); + if (lut_bits == 12) + writel(word[1], lut1_base + i * 4); + } + } + + cfg_val = readl(gamma->regs + DISP_GAMMA_CFG); + + if (!gamma->data->has_dither) { + /* Descending or Rising LUT */ + if (mtk_gamma_lut_is_descending(lut, gamma->data->lut_size - 1)) + cfg_val |= FIELD_PREP(GAMMA_LUT_TYPE, 1); + else + cfg_val &= ~GAMMA_LUT_TYPE; + } + + /* Enable the gamma table */ + cfg_val |= FIELD_PREP(GAMMA_LUT_EN, 1); + + /* Disable RELAY mode to pass the processed image */ + cfg_val &= ~GAMMA_RELAY_MODE; + + writel(cfg_val, gamma->regs + DISP_GAMMA_CFG); +} + +void mtk_gamma_config(struct device *dev, unsigned int w, + unsigned int h, unsigned int vrefresh, + unsigned int bpc, struct cmdq_pkt *cmdq_pkt) +{ + struct mtk_disp_gamma *gamma = dev_get_drvdata(dev); + u32 sz; + + sz = FIELD_PREP(DISP_GAMMA_SIZE_HSIZE, w); + sz |= FIELD_PREP(DISP_GAMMA_SIZE_VSIZE, h); + + mtk_ddp_write(cmdq_pkt, sz, &gamma->cmdq_reg, gamma->regs, DISP_GAMMA_SIZE); + if (gamma->data && gamma->data->has_dither) + mtk_dither_set_common(gamma->regs, &gamma->cmdq_reg, bpc, + DISP_GAMMA_CFG, GAMMA_DITHERING, cmdq_pkt); +} + +void mtk_gamma_start(struct device *dev) +{ + struct mtk_disp_gamma *gamma = dev_get_drvdata(dev); + + writel(GAMMA_EN, gamma->regs + DISP_GAMMA_EN); +} + +void mtk_gamma_stop(struct device *dev) +{ + struct mtk_disp_gamma *gamma = dev_get_drvdata(dev); + + writel_relaxed(0x0, gamma->regs + DISP_GAMMA_EN); +} + +static int mtk_disp_gamma_bind(struct device *dev, struct device *master, + void *data) +{ + return 0; +} + +static void mtk_disp_gamma_unbind(struct device *dev, struct device *master, + void *data) +{ +} + +static const struct component_ops mtk_disp_gamma_component_ops = { + .bind = mtk_disp_gamma_bind, + .unbind = mtk_disp_gamma_unbind, +}; + +static int mtk_disp_gamma_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct mtk_disp_gamma *priv; + int ret; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->clk = devm_clk_get(dev, NULL); + if (IS_ERR(priv->clk)) + return dev_err_probe(dev, PTR_ERR(priv->clk), + "failed to get gamma clk\n"); + + priv->regs = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(priv->regs)) + return dev_err_probe(dev, PTR_ERR(priv->regs), + "failed to ioremap gamma\n"); + +#if IS_REACHABLE(CONFIG_MTK_CMDQ) + ret = cmdq_dev_get_client_reg(dev, &priv->cmdq_reg, 0); + if (ret) + dev_dbg(dev, "get mediatek,gce-client-reg fail!\n"); +#endif + + priv->data = of_device_get_match_data(dev); + platform_set_drvdata(pdev, priv); + + ret = component_add(dev, &mtk_disp_gamma_component_ops); + if (ret) + return dev_err_probe(dev, ret, "Failed to add component\n"); + + return 0; +} + +static void mtk_disp_gamma_remove(struct platform_device *pdev) +{ + component_del(&pdev->dev, &mtk_disp_gamma_component_ops); +} + +static const struct mtk_disp_gamma_data mt8173_gamma_driver_data = { + .has_dither = true, + .lut_bank_size = 512, + .lut_bits = 10, + .lut_size = 512, +}; + +static const struct mtk_disp_gamma_data mt8183_gamma_driver_data = { + .lut_bank_size = 512, + .lut_bits = 10, + .lut_diff = true, + .lut_size = 512, +}; + +static const struct mtk_disp_gamma_data mt8195_gamma_driver_data = { + .lut_bank_size = 256, + .lut_bits = 12, + .lut_diff = true, + .lut_size = 1024, +}; + +static const struct of_device_id mtk_disp_gamma_driver_dt_match[] = { + { .compatible = "mediatek,mt8173-disp-gamma", + .data = &mt8173_gamma_driver_data}, + { .compatible = "mediatek,mt8183-disp-gamma", + .data = &mt8183_gamma_driver_data}, + { .compatible = "mediatek,mt8195-disp-gamma", + .data = &mt8195_gamma_driver_data}, + {}, +}; +MODULE_DEVICE_TABLE(of, mtk_disp_gamma_driver_dt_match); + +struct platform_driver mtk_disp_gamma_driver = { + .probe = mtk_disp_gamma_probe, + .remove = mtk_disp_gamma_remove, + .driver = { + .name = "mediatek-disp-gamma", + .of_match_table = mtk_disp_gamma_driver_dt_match, + }, +}; diff --git a/drivers/gpu/drm/mediatek/mtk_disp_merge.c b/drivers/gpu/drm/mediatek/mtk_disp_merge.c new file mode 100644 index 000000000000..b174dda091d3 --- /dev/null +++ b/drivers/gpu/drm/mediatek/mtk_disp_merge.c @@ -0,0 +1,376 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2021 MediaTek Inc. + */ + +#include <linux/clk.h> +#include <linux/component.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/reset.h> +#include <linux/soc/mediatek/mtk-cmdq.h> + +#include "mtk_ddp_comp.h" +#include "mtk_drm_drv.h" +#include "mtk_disp_drv.h" + +#define DISP_REG_MERGE_CTRL 0x000 +#define MERGE_EN 1 +#define DISP_REG_MERGE_CFG_0 0x010 +#define DISP_REG_MERGE_CFG_1 0x014 +#define DISP_REG_MERGE_CFG_4 0x020 +#define DISP_REG_MERGE_CFG_10 0x038 +/* no swap */ +#define SWAP_MODE 0 +#define FLD_SWAP_MODE GENMASK(4, 0) +#define DISP_REG_MERGE_CFG_12 0x040 +#define CFG_10_10_1PI_2PO_BUF_MODE 6 +#define CFG_10_10_2PI_2PO_BUF_MODE 8 +#define CFG_11_10_1PI_2PO_MERGE 18 +#define FLD_CFG_MERGE_MODE GENMASK(4, 0) +#define DISP_REG_MERGE_CFG_24 0x070 +#define DISP_REG_MERGE_CFG_25 0x074 +#define DISP_REG_MERGE_CFG_26 0x078 +#define DISP_REG_MERGE_CFG_27 0x07c +#define DISP_REG_MERGE_CFG_36 0x0a0 +#define ULTRA_EN BIT(0) +#define PREULTRA_EN BIT(4) +#define DISP_REG_MERGE_CFG_37 0x0a4 +/* 0: Off, 1: SRAM0, 2: SRAM1, 3: SRAM0 + SRAM1 */ +#define BUFFER_MODE 3 +#define FLD_BUFFER_MODE GENMASK(1, 0) +/* + * For the ultra and preultra settings, 6us ~ 9us is experience value + * and the maximum frequency of mmsys clock is 594MHz. + */ +#define DISP_REG_MERGE_CFG_40 0x0b0 +/* 6 us, 594M pixel/sec */ +#define ULTRA_TH_LOW (6 * 594) +/* 8 us, 594M pixel/sec */ +#define ULTRA_TH_HIGH (8 * 594) +#define FLD_ULTRA_TH_LOW GENMASK(15, 0) +#define FLD_ULTRA_TH_HIGH GENMASK(31, 16) +#define DISP_REG_MERGE_CFG_41 0x0b4 +/* 8 us, 594M pixel/sec */ +#define PREULTRA_TH_LOW (8 * 594) +/* 9 us, 594M pixel/sec */ +#define PREULTRA_TH_HIGH (9 * 594) +#define FLD_PREULTRA_TH_LOW GENMASK(15, 0) +#define FLD_PREULTRA_TH_HIGH GENMASK(31, 16) + +#define DISP_REG_MERGE_MUTE_0 0xf00 + +struct mtk_disp_merge { + void __iomem *regs; + struct clk *clk; + struct clk *async_clk; + struct cmdq_client_reg cmdq_reg; + bool fifo_en; + bool mute_support; + struct reset_control *reset_ctl; +}; + +void mtk_merge_start(struct device *dev) +{ + mtk_merge_start_cmdq(dev, NULL); +} + +void mtk_merge_stop(struct device *dev) +{ + mtk_merge_stop_cmdq(dev, NULL); +} + +void mtk_merge_start_cmdq(struct device *dev, struct cmdq_pkt *cmdq_pkt) +{ + struct mtk_disp_merge *priv = dev_get_drvdata(dev); + + if (priv->mute_support) + mtk_ddp_write(cmdq_pkt, 0x0, &priv->cmdq_reg, priv->regs, + DISP_REG_MERGE_MUTE_0); + + mtk_ddp_write(cmdq_pkt, 1, &priv->cmdq_reg, priv->regs, + DISP_REG_MERGE_CTRL); +} + +void mtk_merge_stop_cmdq(struct device *dev, struct cmdq_pkt *cmdq_pkt) +{ + struct mtk_disp_merge *priv = dev_get_drvdata(dev); + + if (priv->mute_support) + mtk_ddp_write(cmdq_pkt, 0x1, &priv->cmdq_reg, priv->regs, + DISP_REG_MERGE_MUTE_0); + + mtk_ddp_write(cmdq_pkt, 0, &priv->cmdq_reg, priv->regs, + DISP_REG_MERGE_CTRL); + + if (!cmdq_pkt && priv->async_clk) + reset_control_reset(priv->reset_ctl); +} + +static void mtk_merge_fifo_setting(struct mtk_disp_merge *priv, + struct cmdq_pkt *cmdq_pkt) +{ + mtk_ddp_write(cmdq_pkt, ULTRA_EN | PREULTRA_EN, + &priv->cmdq_reg, priv->regs, DISP_REG_MERGE_CFG_36); + + mtk_ddp_write_mask(cmdq_pkt, BUFFER_MODE, + &priv->cmdq_reg, priv->regs, DISP_REG_MERGE_CFG_37, + FLD_BUFFER_MODE); + + mtk_ddp_write_mask(cmdq_pkt, ULTRA_TH_LOW | ULTRA_TH_HIGH << 16, + &priv->cmdq_reg, priv->regs, DISP_REG_MERGE_CFG_40, + FLD_ULTRA_TH_LOW | FLD_ULTRA_TH_HIGH); + + mtk_ddp_write_mask(cmdq_pkt, PREULTRA_TH_LOW | PREULTRA_TH_HIGH << 16, + &priv->cmdq_reg, priv->regs, DISP_REG_MERGE_CFG_41, + FLD_PREULTRA_TH_LOW | FLD_PREULTRA_TH_HIGH); +} + +void mtk_merge_config(struct device *dev, unsigned int w, + unsigned int h, unsigned int vrefresh, + unsigned int bpc, struct cmdq_pkt *cmdq_pkt) +{ + mtk_merge_advance_config(dev, w, 0, h, vrefresh, bpc, cmdq_pkt); +} + +void mtk_merge_advance_config(struct device *dev, unsigned int l_w, unsigned int r_w, + unsigned int h, unsigned int vrefresh, unsigned int bpc, + struct cmdq_pkt *cmdq_pkt) +{ + struct mtk_disp_merge *priv = dev_get_drvdata(dev); + unsigned int mode = CFG_10_10_1PI_2PO_BUF_MODE; + + if (!h || !l_w) { + dev_err(dev, "%s: input width(%d) or height(%d) is invalid\n", __func__, l_w, h); + return; + } + + if (priv->fifo_en) { + mtk_merge_fifo_setting(priv, cmdq_pkt); + mode = CFG_10_10_2PI_2PO_BUF_MODE; + } + + if (r_w) + mode = CFG_11_10_1PI_2PO_MERGE; + + mtk_ddp_write(cmdq_pkt, h << 16 | l_w, &priv->cmdq_reg, priv->regs, + DISP_REG_MERGE_CFG_0); + mtk_ddp_write(cmdq_pkt, h << 16 | r_w, &priv->cmdq_reg, priv->regs, + DISP_REG_MERGE_CFG_1); + mtk_ddp_write(cmdq_pkt, h << 16 | (l_w + r_w), &priv->cmdq_reg, priv->regs, + DISP_REG_MERGE_CFG_4); + /* + * DISP_REG_MERGE_CFG_24 is merge SRAM0 w/h + * DISP_REG_MERGE_CFG_25 is merge SRAM1 w/h. + * If r_w > 0, the merge is in merge mode (input0 and input1 merge together), + * the input0 goes to SRAM0, and input1 goes to SRAM1. + * If r_w = 0, the merge is in buffer mode, the input goes through SRAM0 and + * then to SRAM1. Both SRAM0 and SRAM1 are set to the same size. + */ + mtk_ddp_write(cmdq_pkt, h << 16 | l_w, &priv->cmdq_reg, priv->regs, + DISP_REG_MERGE_CFG_24); + if (r_w) + mtk_ddp_write(cmdq_pkt, h << 16 | r_w, &priv->cmdq_reg, priv->regs, + DISP_REG_MERGE_CFG_25); + else + mtk_ddp_write(cmdq_pkt, h << 16 | l_w, &priv->cmdq_reg, priv->regs, + DISP_REG_MERGE_CFG_25); + + /* + * DISP_REG_MERGE_CFG_26 and DISP_REG_MERGE_CFG_27 is only used in LR merge. + * Only take effect when the merge is setting to merge mode. + */ + mtk_ddp_write(cmdq_pkt, h << 16 | l_w, &priv->cmdq_reg, priv->regs, + DISP_REG_MERGE_CFG_26); + mtk_ddp_write(cmdq_pkt, h << 16 | r_w, &priv->cmdq_reg, priv->regs, + DISP_REG_MERGE_CFG_27); + + mtk_ddp_write_mask(cmdq_pkt, SWAP_MODE, &priv->cmdq_reg, priv->regs, + DISP_REG_MERGE_CFG_10, FLD_SWAP_MODE); + mtk_ddp_write_mask(cmdq_pkt, mode, &priv->cmdq_reg, priv->regs, + DISP_REG_MERGE_CFG_12, FLD_CFG_MERGE_MODE); +} + +int mtk_merge_clk_enable(struct device *dev) +{ + int ret = 0; + struct mtk_disp_merge *priv = dev_get_drvdata(dev); + + ret = clk_prepare_enable(priv->clk); + if (ret) { + dev_err(dev, "merge clk prepare enable failed\n"); + return ret; + } + + ret = clk_prepare_enable(priv->async_clk); + if (ret) { + /* should clean up the state of priv->clk */ + clk_disable_unprepare(priv->clk); + + dev_err(dev, "async clk prepare enable failed\n"); + return ret; + } + + return ret; +} + +void mtk_merge_clk_disable(struct device *dev) +{ + struct mtk_disp_merge *priv = dev_get_drvdata(dev); + + clk_disable_unprepare(priv->async_clk); + clk_disable_unprepare(priv->clk); +} + +enum drm_mode_status mtk_merge_mode_valid(struct device *dev, + const struct drm_display_mode *mode) +{ + struct mtk_disp_merge *priv = dev_get_drvdata(dev); + unsigned long rate; + + rate = clk_get_rate(priv->clk); + + /* Convert to KHz and round the number */ + rate = (rate + 500) / 1000; + + if (rate && mode->clock > rate) { + dev_dbg(dev, "invalid clock: %d (>%lu)\n", mode->clock, rate); + return MODE_CLOCK_HIGH; + } + + /* + * Measure the bandwidth requirement of hardware prefetch (per frame) + * + * let N = prefetch buffer size in lines + * (ex. N=3, then prefetch buffer size = 3 lines) + * + * prefetch size = htotal * N (pixels) + * time per line = 1 / fps / vtotal (seconds) + * duration = vbp * time per line + * = vbp / fps / vtotal + * + * data rate = prefetch size / duration + * = htotal * N / (vbp / fps / vtotal) + * = htotal * vtotal * fps * N / vbp + * = clk * N / vbp (pixels per second) + * + * Say 4K60 (CEA-861) is the maximum mode supported by the SoC + * data rate = 594000K * N / 72 = 8250 (standard) + * (remove K * N due to the same unit) + * + * For 2560x1440@144 (clk=583600K, vbp=17): + * data rate = 583600 / 17 ~= 34329 > 8250 (NG) + * + * For 2560x1440@120 (clk=497760K, vbp=77): + * data rate = 497760 / 77 ~= 6464 < 8250 (OK) + * + * A non-standard 4K60 timing (clk=521280K, vbp=54) + * data rate = 521280 / 54 ~= 9653 > 8250 (NG) + * + * Bandwidth requirement of hardware prefetch increases significantly + * when the VBP decreases (more than 4x in this example). + * + * The proposed formula is only one way to estimate whether our SoC + * supports the mode setting. The basic idea behind it is just to check + * if the data rate requirement is too high (directly proportional to + * pixel clock, inversely proportional to vbp). Please adjust the + * function if it doesn't fit your situation in the future. + */ + rate = mode->clock / (mode->vtotal - mode->vsync_end); + + if (rate > 8250) { + dev_dbg(dev, "invalid rate: %lu (>8250): " DRM_MODE_FMT "\n", + rate, DRM_MODE_ARG(mode)); + return MODE_BAD; + } + + return MODE_OK; +} + +static int mtk_disp_merge_bind(struct device *dev, struct device *master, + void *data) +{ + return 0; +} + +static void mtk_disp_merge_unbind(struct device *dev, struct device *master, + void *data) +{ +} + +static const struct component_ops mtk_disp_merge_component_ops = { + .bind = mtk_disp_merge_bind, + .unbind = mtk_disp_merge_unbind, +}; + +static int mtk_disp_merge_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct mtk_disp_merge *priv; + int ret; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->regs = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(priv->regs)) + return dev_err_probe(dev, PTR_ERR(priv->regs), + "failed to ioremap merge\n"); + + priv->clk = devm_clk_get(dev, NULL); + if (IS_ERR(priv->clk)) + return dev_err_probe(dev, PTR_ERR(priv->clk), + "failed to get merge clk\n"); + + priv->async_clk = devm_clk_get_optional(dev, "merge_async"); + if (IS_ERR(priv->async_clk)) + return dev_err_probe(dev, PTR_ERR(priv->async_clk), + "failed to get merge async clock\n"); + + if (priv->async_clk) { + priv->reset_ctl = devm_reset_control_get_optional_exclusive(dev, NULL); + if (IS_ERR(priv->reset_ctl)) + return PTR_ERR(priv->reset_ctl); + } + +#if IS_REACHABLE(CONFIG_MTK_CMDQ) + ret = cmdq_dev_get_client_reg(dev, &priv->cmdq_reg, 0); + if (ret) + dev_dbg(dev, "get mediatek,gce-client-reg fail!\n"); +#endif + + priv->fifo_en = of_property_read_bool(dev->of_node, + "mediatek,merge-fifo-en"); + + priv->mute_support = of_property_read_bool(dev->of_node, + "mediatek,merge-mute"); + platform_set_drvdata(pdev, priv); + + ret = component_add(dev, &mtk_disp_merge_component_ops); + if (ret != 0) + return dev_err_probe(dev, ret, "Failed to add component\n"); + + return 0; +} + +static void mtk_disp_merge_remove(struct platform_device *pdev) +{ + component_del(&pdev->dev, &mtk_disp_merge_component_ops); +} + +static const struct of_device_id mtk_disp_merge_driver_dt_match[] = { + { .compatible = "mediatek,mt8195-disp-merge", }, + {}, +}; + +MODULE_DEVICE_TABLE(of, mtk_disp_merge_driver_dt_match); + +struct platform_driver mtk_disp_merge_driver = { + .probe = mtk_disp_merge_probe, + .remove = mtk_disp_merge_remove, + .driver = { + .name = "mediatek-disp-merge", + .of_match_table = mtk_disp_merge_driver_dt_match, + }, +}; diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c new file mode 100644 index 000000000000..e0236353d499 --- /dev/null +++ b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c @@ -0,0 +1,768 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2015 MediaTek Inc. + */ + +#include <drm/drm_blend.h> +#include <drm/drm_fourcc.h> +#include <drm/drm_framebuffer.h> + +#include <linux/clk.h> +#include <linux/component.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/pm_runtime.h> +#include <linux/soc/mediatek/mtk-cmdq.h> + +#include "mtk_crtc.h" +#include "mtk_ddp_comp.h" +#include "mtk_disp_drv.h" +#include "mtk_drm_drv.h" + +#define DISP_REG_OVL_INTEN 0x0004 +#define OVL_FME_CPL_INT BIT(1) +#define DISP_REG_OVL_INTSTA 0x0008 +#define DISP_REG_OVL_EN 0x000c +#define DISP_REG_OVL_RST 0x0014 +#define DISP_REG_OVL_ROI_SIZE 0x0020 +#define DISP_REG_OVL_DATAPATH_CON 0x0024 +#define OVL_LAYER_SMI_ID_EN BIT(0) +#define OVL_BGCLR_SEL_IN BIT(2) +#define OVL_LAYER_AFBC_EN(n) BIT(4+n) +#define DISP_REG_OVL_ROI_BGCLR 0x0028 +#define DISP_REG_OVL_SRC_CON 0x002c +#define DISP_REG_OVL_CON(n) (0x0030 + 0x20 * (n)) +#define DISP_REG_OVL_SRC_SIZE(n) (0x0038 + 0x20 * (n)) +#define DISP_REG_OVL_OFFSET(n) (0x003c + 0x20 * (n)) +#define DISP_REG_OVL_PITCH_MSB(n) (0x0040 + 0x20 * (n)) +#define OVL_PITCH_MSB_2ND_SUBBUF BIT(16) +#define DISP_REG_OVL_PITCH(n) (0x0044 + 0x20 * (n)) +#define OVL_CONST_BLEND BIT(28) +#define DISP_REG_OVL_RDMA_CTRL(n) (0x00c0 + 0x20 * (n)) +#define DISP_REG_OVL_RDMA_GMC(n) (0x00c8 + 0x20 * (n)) +#define DISP_REG_OVL_ADDR_MT2701 0x0040 +#define DISP_REG_OVL_CLRFMT_EXT 0x02d0 +#define OVL_CON_CLRFMT_BIT_DEPTH_MASK(n) (GENMASK(1, 0) << (4 * (n))) +#define OVL_CON_CLRFMT_BIT_DEPTH(depth, n) ((depth) << (4 * (n))) +#define OVL_CON_CLRFMT_8_BIT (0) +#define OVL_CON_CLRFMT_10_BIT (1) +#define DISP_REG_OVL_ADDR_MT8173 0x0f40 +#define DISP_REG_OVL_ADDR(ovl, n) ((ovl)->data->addr + 0x20 * (n)) +#define DISP_REG_OVL_HDR_ADDR(ovl, n) ((ovl)->data->addr + 0x20 * (n) + 0x04) +#define DISP_REG_OVL_HDR_PITCH(ovl, n) ((ovl)->data->addr + 0x20 * (n) + 0x08) + +#define GMC_THRESHOLD_BITS 16 +#define GMC_THRESHOLD_HIGH ((1 << GMC_THRESHOLD_BITS) / 4) +#define GMC_THRESHOLD_LOW ((1 << GMC_THRESHOLD_BITS) / 8) + +#define OVL_CON_CLRFMT_MAN BIT(23) +#define OVL_CON_BYTE_SWAP BIT(24) + +/* OVL_CON_RGB_SWAP works only if OVL_CON_CLRFMT_MAN is enabled */ +#define OVL_CON_RGB_SWAP BIT(25) + +#define OVL_CON_CLRFMT_RGB (1 << 12) +#define OVL_CON_CLRFMT_ARGB8888 (2 << 12) +#define OVL_CON_CLRFMT_RGBA8888 (3 << 12) +#define OVL_CON_CLRFMT_ABGR8888 (OVL_CON_CLRFMT_ARGB8888 | OVL_CON_BYTE_SWAP) +#define OVL_CON_CLRFMT_BGRA8888 (OVL_CON_CLRFMT_RGBA8888 | OVL_CON_BYTE_SWAP) +#define OVL_CON_CLRFMT_UYVY (4 << 12) +#define OVL_CON_CLRFMT_YUYV (5 << 12) +#define OVL_CON_MTX_YUV_TO_RGB (6 << 16) +#define OVL_CON_CLRFMT_PARGB8888 ((3 << 12) | OVL_CON_CLRFMT_MAN) +#define OVL_CON_CLRFMT_PABGR8888 (OVL_CON_CLRFMT_PARGB8888 | OVL_CON_RGB_SWAP) +#define OVL_CON_CLRFMT_PBGRA8888 (OVL_CON_CLRFMT_PARGB8888 | OVL_CON_BYTE_SWAP) +#define OVL_CON_CLRFMT_PRGBA8888 (OVL_CON_CLRFMT_PABGR8888 | OVL_CON_BYTE_SWAP) +#define OVL_CON_CLRFMT_RGB565(ovl) ((ovl)->data->fmt_rgb565_is_0 ? \ + 0 : OVL_CON_CLRFMT_RGB) +#define OVL_CON_CLRFMT_RGB888(ovl) ((ovl)->data->fmt_rgb565_is_0 ? \ + OVL_CON_CLRFMT_RGB : 0) +#define OVL_CON_AEN BIT(8) +#define OVL_CON_ALPHA 0xff +#define OVL_CON_VIRT_FLIP BIT(9) +#define OVL_CON_HORZ_FLIP BIT(10) + +#define OVL_COLOR_ALPHA GENMASK(31, 24) + +static inline bool is_10bit_rgb(u32 fmt) +{ + switch (fmt) { + case DRM_FORMAT_XRGB2101010: + case DRM_FORMAT_ARGB2101010: + case DRM_FORMAT_RGBX1010102: + case DRM_FORMAT_RGBA1010102: + case DRM_FORMAT_XBGR2101010: + case DRM_FORMAT_ABGR2101010: + case DRM_FORMAT_BGRX1010102: + case DRM_FORMAT_BGRA1010102: + return true; + } + return false; +} + +static const u32 mt8173_formats[] = { + DRM_FORMAT_XRGB8888, + DRM_FORMAT_ARGB8888, + DRM_FORMAT_BGRX8888, + DRM_FORMAT_BGRA8888, + DRM_FORMAT_ABGR8888, + DRM_FORMAT_XBGR8888, + DRM_FORMAT_RGB888, + DRM_FORMAT_BGR888, + DRM_FORMAT_RGB565, + DRM_FORMAT_UYVY, + DRM_FORMAT_YUYV, +}; + +static const u32 mt8195_formats[] = { + DRM_FORMAT_XRGB8888, + DRM_FORMAT_ARGB8888, + DRM_FORMAT_XRGB2101010, + DRM_FORMAT_ARGB2101010, + DRM_FORMAT_BGRX8888, + DRM_FORMAT_BGRA8888, + DRM_FORMAT_BGRX1010102, + DRM_FORMAT_BGRA1010102, + DRM_FORMAT_ABGR8888, + DRM_FORMAT_XBGR8888, + DRM_FORMAT_XBGR2101010, + DRM_FORMAT_ABGR2101010, + DRM_FORMAT_RGBX8888, + DRM_FORMAT_RGBA8888, + DRM_FORMAT_RGBX1010102, + DRM_FORMAT_RGBA1010102, + DRM_FORMAT_RGB888, + DRM_FORMAT_BGR888, + DRM_FORMAT_RGB565, + DRM_FORMAT_UYVY, + DRM_FORMAT_YUYV, +}; + +struct mtk_disp_ovl_data { + unsigned int addr; + unsigned int gmc_bits; + unsigned int layer_nr; + bool fmt_rgb565_is_0; + bool smi_id_en; + bool supports_afbc; + const u32 blend_modes; + const u32 *formats; + size_t num_formats; + bool supports_clrfmt_ext; +}; + +/* + * struct mtk_disp_ovl - DISP_OVL driver structure + * @crtc: associated crtc to report vblank events to + * @data: platform data + */ +struct mtk_disp_ovl { + struct drm_crtc *crtc; + struct clk *clk; + void __iomem *regs; + struct cmdq_client_reg cmdq_reg; + const struct mtk_disp_ovl_data *data; + void (*vblank_cb)(void *data); + void *vblank_cb_data; +}; + +static irqreturn_t mtk_disp_ovl_irq_handler(int irq, void *dev_id) +{ + struct mtk_disp_ovl *priv = dev_id; + + /* Clear frame completion interrupt */ + writel(0x0, priv->regs + DISP_REG_OVL_INTSTA); + + if (!priv->vblank_cb) + return IRQ_NONE; + + priv->vblank_cb(priv->vblank_cb_data); + + return IRQ_HANDLED; +} + +void mtk_ovl_register_vblank_cb(struct device *dev, + void (*vblank_cb)(void *), + void *vblank_cb_data) +{ + struct mtk_disp_ovl *ovl = dev_get_drvdata(dev); + + ovl->vblank_cb = vblank_cb; + ovl->vblank_cb_data = vblank_cb_data; +} + +void mtk_ovl_unregister_vblank_cb(struct device *dev) +{ + struct mtk_disp_ovl *ovl = dev_get_drvdata(dev); + + ovl->vblank_cb = NULL; + ovl->vblank_cb_data = NULL; +} + +void mtk_ovl_enable_vblank(struct device *dev) +{ + struct mtk_disp_ovl *ovl = dev_get_drvdata(dev); + + writel(0x0, ovl->regs + DISP_REG_OVL_INTSTA); + writel_relaxed(OVL_FME_CPL_INT, ovl->regs + DISP_REG_OVL_INTEN); +} + +void mtk_ovl_disable_vblank(struct device *dev) +{ + struct mtk_disp_ovl *ovl = dev_get_drvdata(dev); + + writel_relaxed(0x0, ovl->regs + DISP_REG_OVL_INTEN); +} + +u32 mtk_ovl_get_blend_modes(struct device *dev) +{ + struct mtk_disp_ovl *ovl = dev_get_drvdata(dev); + + return ovl->data->blend_modes; +} + +const u32 *mtk_ovl_get_formats(struct device *dev) +{ + struct mtk_disp_ovl *ovl = dev_get_drvdata(dev); + + return ovl->data->formats; +} + +size_t mtk_ovl_get_num_formats(struct device *dev) +{ + struct mtk_disp_ovl *ovl = dev_get_drvdata(dev); + + return ovl->data->num_formats; +} + +bool mtk_ovl_is_afbc_supported(struct device *dev) +{ + struct mtk_disp_ovl *ovl = dev_get_drvdata(dev); + + return ovl->data->supports_afbc; +} + +int mtk_ovl_clk_enable(struct device *dev) +{ + struct mtk_disp_ovl *ovl = dev_get_drvdata(dev); + + return clk_prepare_enable(ovl->clk); +} + +void mtk_ovl_clk_disable(struct device *dev) +{ + struct mtk_disp_ovl *ovl = dev_get_drvdata(dev); + + clk_disable_unprepare(ovl->clk); +} + +void mtk_ovl_start(struct device *dev) +{ + struct mtk_disp_ovl *ovl = dev_get_drvdata(dev); + + if (ovl->data->smi_id_en) { + unsigned int reg; + + reg = readl(ovl->regs + DISP_REG_OVL_DATAPATH_CON); + reg = reg | OVL_LAYER_SMI_ID_EN; + writel_relaxed(reg, ovl->regs + DISP_REG_OVL_DATAPATH_CON); + } + writel_relaxed(0x1, ovl->regs + DISP_REG_OVL_EN); +} + +void mtk_ovl_stop(struct device *dev) +{ + struct mtk_disp_ovl *ovl = dev_get_drvdata(dev); + + writel_relaxed(0x0, ovl->regs + DISP_REG_OVL_EN); + if (ovl->data->smi_id_en) { + unsigned int reg; + + reg = readl(ovl->regs + DISP_REG_OVL_DATAPATH_CON); + reg = reg & ~OVL_LAYER_SMI_ID_EN; + writel_relaxed(reg, ovl->regs + DISP_REG_OVL_DATAPATH_CON); + } +} + +static void mtk_ovl_set_afbc(struct mtk_disp_ovl *ovl, struct cmdq_pkt *cmdq_pkt, + int idx, bool enabled) +{ + mtk_ddp_write_mask(cmdq_pkt, enabled ? OVL_LAYER_AFBC_EN(idx) : 0, + &ovl->cmdq_reg, ovl->regs, + DISP_REG_OVL_DATAPATH_CON, OVL_LAYER_AFBC_EN(idx)); +} + +static void mtk_ovl_set_bit_depth(struct device *dev, int idx, u32 format, + struct cmdq_pkt *cmdq_pkt) +{ + struct mtk_disp_ovl *ovl = dev_get_drvdata(dev); + unsigned int bit_depth = OVL_CON_CLRFMT_8_BIT; + + if (!ovl->data->supports_clrfmt_ext) + return; + + if (is_10bit_rgb(format)) + bit_depth = OVL_CON_CLRFMT_10_BIT; + + mtk_ddp_write_mask(cmdq_pkt, OVL_CON_CLRFMT_BIT_DEPTH(bit_depth, idx), + &ovl->cmdq_reg, ovl->regs, DISP_REG_OVL_CLRFMT_EXT, + OVL_CON_CLRFMT_BIT_DEPTH_MASK(idx)); +} + +void mtk_ovl_config(struct device *dev, unsigned int w, + unsigned int h, unsigned int vrefresh, + unsigned int bpc, struct cmdq_pkt *cmdq_pkt) +{ + struct mtk_disp_ovl *ovl = dev_get_drvdata(dev); + + if (w != 0 && h != 0) + mtk_ddp_write_relaxed(cmdq_pkt, h << 16 | w, &ovl->cmdq_reg, ovl->regs, + DISP_REG_OVL_ROI_SIZE); + + /* + * The background color must be opaque black (ARGB), + * otherwise the alpha blending will have no effect + */ + mtk_ddp_write_relaxed(cmdq_pkt, OVL_COLOR_ALPHA, &ovl->cmdq_reg, + ovl->regs, DISP_REG_OVL_ROI_BGCLR); + + mtk_ddp_write(cmdq_pkt, 0x1, &ovl->cmdq_reg, ovl->regs, DISP_REG_OVL_RST); + mtk_ddp_write(cmdq_pkt, 0x0, &ovl->cmdq_reg, ovl->regs, DISP_REG_OVL_RST); +} + +unsigned int mtk_ovl_layer_nr(struct device *dev) +{ + struct mtk_disp_ovl *ovl = dev_get_drvdata(dev); + + return ovl->data->layer_nr; +} + +unsigned int mtk_ovl_supported_rotations(struct device *dev) +{ + return DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180 | + DRM_MODE_REFLECT_X | DRM_MODE_REFLECT_Y; +} + +int mtk_ovl_layer_check(struct device *dev, unsigned int idx, + struct mtk_plane_state *mtk_state) +{ + struct drm_plane_state *state = &mtk_state->base; + + /* check if any unsupported rotation is set */ + if (state->rotation & ~mtk_ovl_supported_rotations(dev)) + return -EINVAL; + + /* + * TODO: Rotating/reflecting YUV buffers is not supported at this time. + * Only RGB[AX] variants are supported. + * Since DRM_MODE_ROTATE_0 means "no rotation", we should not + * reject layers with this property. + */ + if (state->fb->format->is_yuv && (state->rotation & ~DRM_MODE_ROTATE_0)) + return -EINVAL; + + return 0; +} + +void mtk_ovl_layer_on(struct device *dev, unsigned int idx, + struct cmdq_pkt *cmdq_pkt) +{ + unsigned int gmc_thrshd_l; + unsigned int gmc_thrshd_h; + unsigned int gmc_value; + struct mtk_disp_ovl *ovl = dev_get_drvdata(dev); + + mtk_ddp_write(cmdq_pkt, 0x1, &ovl->cmdq_reg, ovl->regs, + DISP_REG_OVL_RDMA_CTRL(idx)); + gmc_thrshd_l = GMC_THRESHOLD_LOW >> + (GMC_THRESHOLD_BITS - ovl->data->gmc_bits); + gmc_thrshd_h = GMC_THRESHOLD_HIGH >> + (GMC_THRESHOLD_BITS - ovl->data->gmc_bits); + if (ovl->data->gmc_bits == 10) + gmc_value = gmc_thrshd_h | gmc_thrshd_h << 16; + else + gmc_value = gmc_thrshd_l | gmc_thrshd_l << 8 | + gmc_thrshd_h << 16 | gmc_thrshd_h << 24; + mtk_ddp_write(cmdq_pkt, gmc_value, + &ovl->cmdq_reg, ovl->regs, DISP_REG_OVL_RDMA_GMC(idx)); + mtk_ddp_write_mask(cmdq_pkt, BIT(idx), &ovl->cmdq_reg, ovl->regs, + DISP_REG_OVL_SRC_CON, BIT(idx)); +} + +void mtk_ovl_layer_off(struct device *dev, unsigned int idx, + struct cmdq_pkt *cmdq_pkt) +{ + struct mtk_disp_ovl *ovl = dev_get_drvdata(dev); + + mtk_ddp_write_mask(cmdq_pkt, 0, &ovl->cmdq_reg, ovl->regs, + DISP_REG_OVL_SRC_CON, BIT(idx)); + mtk_ddp_write(cmdq_pkt, 0, &ovl->cmdq_reg, ovl->regs, + DISP_REG_OVL_RDMA_CTRL(idx)); +} + +static unsigned int mtk_ovl_fmt_convert(struct mtk_disp_ovl *ovl, + struct mtk_plane_state *state) +{ + unsigned int fmt = state->pending.format; + unsigned int blend_mode = DRM_MODE_BLEND_COVERAGE; + + /* + * For the platforms where OVL_CON_CLRFMT_MAN is defined in the hardware data sheet + * and supports premultiplied color formats, such as OVL_CON_CLRFMT_PARGB8888. + * + * Check blend_modes in the driver data to see if premultiplied mode is supported. + * If not, use coverage mode instead to set it to the supported color formats. + * + * Current DRM assumption is that alpha is default premultiplied, so the bitmask of + * blend_modes must include BIT(DRM_MODE_BLEND_PREMULTI). Otherwise, mtk_plane_init() + * will get an error return from drm_plane_create_blend_mode_property() and + * state->base.pixel_blend_mode should not be used. + */ + if (ovl->data->blend_modes & BIT(DRM_MODE_BLEND_PREMULTI)) + blend_mode = state->base.pixel_blend_mode; + + switch (fmt) { + default: + case DRM_FORMAT_RGB565: + return OVL_CON_CLRFMT_RGB565(ovl); + case DRM_FORMAT_BGR565: + return OVL_CON_CLRFMT_RGB565(ovl) | OVL_CON_BYTE_SWAP; + case DRM_FORMAT_RGB888: + return OVL_CON_CLRFMT_RGB888(ovl); + case DRM_FORMAT_BGR888: + return OVL_CON_CLRFMT_RGB888(ovl) | OVL_CON_BYTE_SWAP; + case DRM_FORMAT_RGBX8888: + case DRM_FORMAT_RGBA8888: + case DRM_FORMAT_RGBX1010102: + case DRM_FORMAT_RGBA1010102: + return blend_mode == DRM_MODE_BLEND_COVERAGE ? + OVL_CON_CLRFMT_RGBA8888 : + OVL_CON_CLRFMT_PRGBA8888; + case DRM_FORMAT_BGRX8888: + case DRM_FORMAT_BGRA8888: + case DRM_FORMAT_BGRX1010102: + case DRM_FORMAT_BGRA1010102: + return blend_mode == DRM_MODE_BLEND_COVERAGE ? + OVL_CON_CLRFMT_BGRA8888 : + OVL_CON_CLRFMT_PBGRA8888; + case DRM_FORMAT_XRGB8888: + case DRM_FORMAT_ARGB8888: + case DRM_FORMAT_XRGB2101010: + case DRM_FORMAT_ARGB2101010: + return blend_mode == DRM_MODE_BLEND_COVERAGE ? + OVL_CON_CLRFMT_ARGB8888 : + OVL_CON_CLRFMT_PARGB8888; + case DRM_FORMAT_XBGR8888: + case DRM_FORMAT_ABGR8888: + case DRM_FORMAT_XBGR2101010: + case DRM_FORMAT_ABGR2101010: + return blend_mode == DRM_MODE_BLEND_COVERAGE ? + OVL_CON_CLRFMT_ABGR8888 : + OVL_CON_CLRFMT_PABGR8888; + case DRM_FORMAT_UYVY: + return OVL_CON_CLRFMT_UYVY | OVL_CON_MTX_YUV_TO_RGB; + case DRM_FORMAT_YUYV: + return OVL_CON_CLRFMT_YUYV | OVL_CON_MTX_YUV_TO_RGB; + } +} + +static void mtk_ovl_afbc_layer_config(struct mtk_disp_ovl *ovl, + unsigned int idx, + struct mtk_plane_pending_state *pending, + struct cmdq_pkt *cmdq_pkt) +{ + unsigned int pitch_msb = pending->pitch >> 16; + unsigned int hdr_pitch = pending->hdr_pitch; + unsigned int hdr_addr = pending->hdr_addr; + + if (pending->modifier != DRM_FORMAT_MOD_LINEAR) { + mtk_ddp_write_relaxed(cmdq_pkt, hdr_addr, &ovl->cmdq_reg, ovl->regs, + DISP_REG_OVL_HDR_ADDR(ovl, idx)); + mtk_ddp_write_relaxed(cmdq_pkt, + OVL_PITCH_MSB_2ND_SUBBUF | pitch_msb, + &ovl->cmdq_reg, ovl->regs, DISP_REG_OVL_PITCH_MSB(idx)); + mtk_ddp_write_relaxed(cmdq_pkt, hdr_pitch, &ovl->cmdq_reg, ovl->regs, + DISP_REG_OVL_HDR_PITCH(ovl, idx)); + } else { + mtk_ddp_write_relaxed(cmdq_pkt, pitch_msb, + &ovl->cmdq_reg, ovl->regs, DISP_REG_OVL_PITCH_MSB(idx)); + } +} + +void mtk_ovl_layer_config(struct device *dev, unsigned int idx, + struct mtk_plane_state *state, + struct cmdq_pkt *cmdq_pkt) +{ + struct mtk_disp_ovl *ovl = dev_get_drvdata(dev); + struct mtk_plane_pending_state *pending = &state->pending; + unsigned int addr = pending->addr; + unsigned int pitch_lsb = pending->pitch & GENMASK(15, 0); + unsigned int fmt = pending->format; + unsigned int rotation = pending->rotation; + unsigned int offset = (pending->y << 16) | pending->x; + unsigned int src_size = (pending->height << 16) | pending->width; + unsigned int blend_mode = state->base.pixel_blend_mode; + unsigned int ignore_pixel_alpha = 0; + unsigned int con; + + if (!pending->enable) { + mtk_ovl_layer_off(dev, idx, cmdq_pkt); + return; + } + + con = mtk_ovl_fmt_convert(ovl, state); + if (state->base.fb) { + con |= state->base.alpha & OVL_CON_ALPHA; + + /* + * For blend_modes supported SoCs, always enable alpha blending. + * For blend_modes unsupported SoCs, enable alpha blending when has_alpha is set. + */ + if (blend_mode || state->base.fb->format->has_alpha) + con |= OVL_CON_AEN; + + /* + * Although the alpha channel can be ignored, CONST_BLD must be enabled + * for XRGB format, otherwise OVL will still read the value from memory. + * For RGB888 related formats, whether CONST_BLD is enabled or not won't + * affect the result. Therefore we use !has_alpha as the condition. + */ + if (blend_mode == DRM_MODE_BLEND_PIXEL_NONE || !state->base.fb->format->has_alpha) + ignore_pixel_alpha = OVL_CONST_BLEND; + } + + /* + * Treat rotate 180 as flip x + flip y, and XOR the original rotation value + * to flip x + flip y to support both in the same time. + */ + if (rotation & DRM_MODE_ROTATE_180) + rotation ^= DRM_MODE_REFLECT_X | DRM_MODE_REFLECT_Y; + + if (rotation & DRM_MODE_REFLECT_Y) { + con |= OVL_CON_VIRT_FLIP; + addr += (pending->height - 1) * pending->pitch; + } + + if (rotation & DRM_MODE_REFLECT_X) { + con |= OVL_CON_HORZ_FLIP; + addr += pending->pitch - 1; + } + + if (ovl->data->supports_afbc) + mtk_ovl_set_afbc(ovl, cmdq_pkt, idx, + pending->modifier != DRM_FORMAT_MOD_LINEAR); + + mtk_ddp_write_relaxed(cmdq_pkt, con, &ovl->cmdq_reg, ovl->regs, + DISP_REG_OVL_CON(idx)); + mtk_ddp_write_relaxed(cmdq_pkt, pitch_lsb | ignore_pixel_alpha, + &ovl->cmdq_reg, ovl->regs, DISP_REG_OVL_PITCH(idx)); + mtk_ddp_write_relaxed(cmdq_pkt, src_size, &ovl->cmdq_reg, ovl->regs, + DISP_REG_OVL_SRC_SIZE(idx)); + mtk_ddp_write_relaxed(cmdq_pkt, offset, &ovl->cmdq_reg, ovl->regs, + DISP_REG_OVL_OFFSET(idx)); + mtk_ddp_write_relaxed(cmdq_pkt, addr, &ovl->cmdq_reg, ovl->regs, + DISP_REG_OVL_ADDR(ovl, idx)); + + if (ovl->data->supports_afbc) + mtk_ovl_afbc_layer_config(ovl, idx, pending, cmdq_pkt); + + mtk_ovl_set_bit_depth(dev, idx, fmt, cmdq_pkt); + mtk_ovl_layer_on(dev, idx, cmdq_pkt); +} + +void mtk_ovl_bgclr_in_on(struct device *dev) +{ + struct mtk_disp_ovl *ovl = dev_get_drvdata(dev); + unsigned int reg; + + reg = readl(ovl->regs + DISP_REG_OVL_DATAPATH_CON); + reg = reg | OVL_BGCLR_SEL_IN; + writel(reg, ovl->regs + DISP_REG_OVL_DATAPATH_CON); +} + +void mtk_ovl_bgclr_in_off(struct device *dev) +{ + struct mtk_disp_ovl *ovl = dev_get_drvdata(dev); + unsigned int reg; + + reg = readl(ovl->regs + DISP_REG_OVL_DATAPATH_CON); + reg = reg & ~OVL_BGCLR_SEL_IN; + writel(reg, ovl->regs + DISP_REG_OVL_DATAPATH_CON); +} + +static int mtk_disp_ovl_bind(struct device *dev, struct device *master, + void *data) +{ + return 0; +} + +static void mtk_disp_ovl_unbind(struct device *dev, struct device *master, + void *data) +{ +} + +static const struct component_ops mtk_disp_ovl_component_ops = { + .bind = mtk_disp_ovl_bind, + .unbind = mtk_disp_ovl_unbind, +}; + +static int mtk_disp_ovl_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct mtk_disp_ovl *priv; + int irq; + int ret; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return irq; + + priv->clk = devm_clk_get(dev, NULL); + if (IS_ERR(priv->clk)) + return dev_err_probe(dev, PTR_ERR(priv->clk), + "failed to get ovl clk\n"); + + priv->regs = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(priv->regs)) + return dev_err_probe(dev, PTR_ERR(priv->regs), + "failed to ioremap ovl\n"); +#if IS_REACHABLE(CONFIG_MTK_CMDQ) + ret = cmdq_dev_get_client_reg(dev, &priv->cmdq_reg, 0); + if (ret) + dev_dbg(dev, "get mediatek,gce-client-reg fail!\n"); +#endif + + priv->data = of_device_get_match_data(dev); + platform_set_drvdata(pdev, priv); + + ret = devm_request_irq(dev, irq, mtk_disp_ovl_irq_handler, + IRQF_TRIGGER_NONE, dev_name(dev), priv); + if (ret < 0) + return dev_err_probe(dev, ret, "Failed to request irq %d\n", irq); + + pm_runtime_enable(dev); + + ret = component_add(dev, &mtk_disp_ovl_component_ops); + if (ret) { + pm_runtime_disable(dev); + return dev_err_probe(dev, ret, "Failed to add component\n"); + } + + return 0; +} + +static void mtk_disp_ovl_remove(struct platform_device *pdev) +{ + component_del(&pdev->dev, &mtk_disp_ovl_component_ops); + pm_runtime_disable(&pdev->dev); +} + +static const struct mtk_disp_ovl_data mt2701_ovl_driver_data = { + .addr = DISP_REG_OVL_ADDR_MT2701, + .gmc_bits = 8, + .layer_nr = 4, + .fmt_rgb565_is_0 = false, + .formats = mt8173_formats, + .num_formats = ARRAY_SIZE(mt8173_formats), +}; + +static const struct mtk_disp_ovl_data mt8173_ovl_driver_data = { + .addr = DISP_REG_OVL_ADDR_MT8173, + .gmc_bits = 8, + .layer_nr = 4, + .fmt_rgb565_is_0 = true, + .formats = mt8173_formats, + .num_formats = ARRAY_SIZE(mt8173_formats), +}; + +static const struct mtk_disp_ovl_data mt8183_ovl_driver_data = { + .addr = DISP_REG_OVL_ADDR_MT8173, + .gmc_bits = 10, + .layer_nr = 4, + .fmt_rgb565_is_0 = true, + .formats = mt8173_formats, + .num_formats = ARRAY_SIZE(mt8173_formats), +}; + +static const struct mtk_disp_ovl_data mt8183_ovl_2l_driver_data = { + .addr = DISP_REG_OVL_ADDR_MT8173, + .gmc_bits = 10, + .layer_nr = 2, + .fmt_rgb565_is_0 = true, + .formats = mt8173_formats, + .num_formats = ARRAY_SIZE(mt8173_formats), +}; + +static const struct mtk_disp_ovl_data mt8192_ovl_driver_data = { + .addr = DISP_REG_OVL_ADDR_MT8173, + .gmc_bits = 10, + .layer_nr = 4, + .fmt_rgb565_is_0 = true, + .smi_id_en = true, + .blend_modes = BIT(DRM_MODE_BLEND_PREMULTI) | + BIT(DRM_MODE_BLEND_COVERAGE) | + BIT(DRM_MODE_BLEND_PIXEL_NONE), + .formats = mt8173_formats, + .num_formats = ARRAY_SIZE(mt8173_formats), +}; + +static const struct mtk_disp_ovl_data mt8192_ovl_2l_driver_data = { + .addr = DISP_REG_OVL_ADDR_MT8173, + .gmc_bits = 10, + .layer_nr = 2, + .fmt_rgb565_is_0 = true, + .smi_id_en = true, + .blend_modes = BIT(DRM_MODE_BLEND_PREMULTI) | + BIT(DRM_MODE_BLEND_COVERAGE) | + BIT(DRM_MODE_BLEND_PIXEL_NONE), + .formats = mt8173_formats, + .num_formats = ARRAY_SIZE(mt8173_formats), +}; + +static const struct mtk_disp_ovl_data mt8195_ovl_driver_data = { + .addr = DISP_REG_OVL_ADDR_MT8173, + .gmc_bits = 10, + .layer_nr = 4, + .fmt_rgb565_is_0 = true, + .smi_id_en = true, + .supports_afbc = true, + .blend_modes = BIT(DRM_MODE_BLEND_PREMULTI) | + BIT(DRM_MODE_BLEND_COVERAGE) | + BIT(DRM_MODE_BLEND_PIXEL_NONE), + .formats = mt8195_formats, + .num_formats = ARRAY_SIZE(mt8195_formats), + .supports_clrfmt_ext = true, +}; + +static const struct of_device_id mtk_disp_ovl_driver_dt_match[] = { + { .compatible = "mediatek,mt2701-disp-ovl", + .data = &mt2701_ovl_driver_data}, + { .compatible = "mediatek,mt8173-disp-ovl", + .data = &mt8173_ovl_driver_data}, + { .compatible = "mediatek,mt8183-disp-ovl", + .data = &mt8183_ovl_driver_data}, + { .compatible = "mediatek,mt8183-disp-ovl-2l", + .data = &mt8183_ovl_2l_driver_data}, + { .compatible = "mediatek,mt8192-disp-ovl", + .data = &mt8192_ovl_driver_data}, + { .compatible = "mediatek,mt8192-disp-ovl-2l", + .data = &mt8192_ovl_2l_driver_data}, + { .compatible = "mediatek,mt8195-disp-ovl", + .data = &mt8195_ovl_driver_data}, + {}, +}; +MODULE_DEVICE_TABLE(of, mtk_disp_ovl_driver_dt_match); + +struct platform_driver mtk_disp_ovl_driver = { + .probe = mtk_disp_ovl_probe, + .remove = mtk_disp_ovl_remove, + .driver = { + .name = "mediatek-disp-ovl", + .of_match_table = mtk_disp_ovl_driver_dt_match, + }, +}; diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c b/drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c new file mode 100644 index 000000000000..c0af3e3b51d5 --- /dev/null +++ b/drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c @@ -0,0 +1,679 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2021 MediaTek Inc. + */ + +#include <drm/drm_fourcc.h> +#include <drm/drm_of.h> +#include <linux/clk.h> +#include <linux/component.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/of_platform.h> +#include <linux/platform_device.h> +#include <linux/pm_runtime.h> +#include <linux/reset.h> +#include <linux/soc/mediatek/mtk-cmdq.h> +#include <linux/soc/mediatek/mtk-mmsys.h> +#include <linux/soc/mediatek/mtk-mutex.h> + +#include "mtk_ddp_comp.h" +#include "mtk_disp_drv.h" +#include "mtk_drm_drv.h" +#include "mtk_ethdr.h" + +#define MTK_OVL_ADAPTOR_RDMA_MAX_WIDTH 1920 +#define MTK_OVL_ADAPTOR_LAYER_NUM 4 + +enum mtk_ovl_adaptor_comp_type { + OVL_ADAPTOR_TYPE_ETHDR, + OVL_ADAPTOR_TYPE_MDP_RDMA, + OVL_ADAPTOR_TYPE_MERGE, + OVL_ADAPTOR_TYPE_PADDING, + OVL_ADAPTOR_TYPE_NUM, +}; + +enum mtk_ovl_adaptor_comp_id { + OVL_ADAPTOR_ETHDR0, + OVL_ADAPTOR_MDP_RDMA0, + OVL_ADAPTOR_MDP_RDMA1, + OVL_ADAPTOR_MDP_RDMA2, + OVL_ADAPTOR_MDP_RDMA3, + OVL_ADAPTOR_MDP_RDMA4, + OVL_ADAPTOR_MDP_RDMA5, + OVL_ADAPTOR_MDP_RDMA6, + OVL_ADAPTOR_MDP_RDMA7, + OVL_ADAPTOR_MERGE0, + OVL_ADAPTOR_MERGE1, + OVL_ADAPTOR_MERGE2, + OVL_ADAPTOR_MERGE3, + OVL_ADAPTOR_PADDING0, + OVL_ADAPTOR_PADDING1, + OVL_ADAPTOR_PADDING2, + OVL_ADAPTOR_PADDING3, + OVL_ADAPTOR_PADDING4, + OVL_ADAPTOR_PADDING5, + OVL_ADAPTOR_PADDING6, + OVL_ADAPTOR_PADDING7, + OVL_ADAPTOR_ID_MAX +}; + +struct ovl_adaptor_comp_match { + enum mtk_ovl_adaptor_comp_type type; + enum mtk_ddp_comp_id comp_id; + int alias_id; + const struct mtk_ddp_comp_funcs *funcs; +}; + +struct mtk_disp_ovl_adaptor { + struct device *ovl_adaptor_comp[OVL_ADAPTOR_ID_MAX]; + struct device *mmsys_dev; + bool children_bound; +}; + +static const char * const private_comp_stem[OVL_ADAPTOR_TYPE_NUM] = { + [OVL_ADAPTOR_TYPE_ETHDR] = "ethdr", + [OVL_ADAPTOR_TYPE_MDP_RDMA] = "vdo1-rdma", + [OVL_ADAPTOR_TYPE_MERGE] = "merge", + [OVL_ADAPTOR_TYPE_PADDING] = "padding", +}; + +static const struct mtk_ddp_comp_funcs ethdr = { + .clk_enable = mtk_ethdr_clk_enable, + .clk_disable = mtk_ethdr_clk_disable, + .start = mtk_ethdr_start, + .stop = mtk_ethdr_stop, +}; + +static const struct mtk_ddp_comp_funcs merge = { + .clk_enable = mtk_merge_clk_enable, + .clk_disable = mtk_merge_clk_disable, + .mode_valid = mtk_merge_mode_valid, +}; + +static const struct mtk_ddp_comp_funcs padding = { + .clk_enable = mtk_padding_clk_enable, + .clk_disable = mtk_padding_clk_disable, + .start = mtk_padding_start, + .stop = mtk_padding_stop, +}; + +static const struct mtk_ddp_comp_funcs rdma = { + .power_on = mtk_mdp_rdma_power_on, + .power_off = mtk_mdp_rdma_power_off, + .clk_enable = mtk_mdp_rdma_clk_enable, + .clk_disable = mtk_mdp_rdma_clk_disable, +}; + +static const struct ovl_adaptor_comp_match comp_matches[OVL_ADAPTOR_ID_MAX] = { + [OVL_ADAPTOR_ETHDR0] = { OVL_ADAPTOR_TYPE_ETHDR, DDP_COMPONENT_ETHDR_MIXER, 0, ðdr }, + [OVL_ADAPTOR_MDP_RDMA0] = { OVL_ADAPTOR_TYPE_MDP_RDMA, DDP_COMPONENT_MDP_RDMA0, 0, &rdma }, + [OVL_ADAPTOR_MDP_RDMA1] = { OVL_ADAPTOR_TYPE_MDP_RDMA, DDP_COMPONENT_MDP_RDMA1, 1, &rdma }, + [OVL_ADAPTOR_MDP_RDMA2] = { OVL_ADAPTOR_TYPE_MDP_RDMA, DDP_COMPONENT_MDP_RDMA2, 2, &rdma }, + [OVL_ADAPTOR_MDP_RDMA3] = { OVL_ADAPTOR_TYPE_MDP_RDMA, DDP_COMPONENT_MDP_RDMA3, 3, &rdma }, + [OVL_ADAPTOR_MDP_RDMA4] = { OVL_ADAPTOR_TYPE_MDP_RDMA, DDP_COMPONENT_MDP_RDMA4, 4, &rdma }, + [OVL_ADAPTOR_MDP_RDMA5] = { OVL_ADAPTOR_TYPE_MDP_RDMA, DDP_COMPONENT_MDP_RDMA5, 5, &rdma }, + [OVL_ADAPTOR_MDP_RDMA6] = { OVL_ADAPTOR_TYPE_MDP_RDMA, DDP_COMPONENT_MDP_RDMA6, 6, &rdma }, + [OVL_ADAPTOR_MDP_RDMA7] = { OVL_ADAPTOR_TYPE_MDP_RDMA, DDP_COMPONENT_MDP_RDMA7, 7, &rdma }, + [OVL_ADAPTOR_MERGE0] = { OVL_ADAPTOR_TYPE_MERGE, DDP_COMPONENT_MERGE1, 1, &merge }, + [OVL_ADAPTOR_MERGE1] = { OVL_ADAPTOR_TYPE_MERGE, DDP_COMPONENT_MERGE2, 2, &merge }, + [OVL_ADAPTOR_MERGE2] = { OVL_ADAPTOR_TYPE_MERGE, DDP_COMPONENT_MERGE3, 3, &merge }, + [OVL_ADAPTOR_MERGE3] = { OVL_ADAPTOR_TYPE_MERGE, DDP_COMPONENT_MERGE4, 4, &merge }, + [OVL_ADAPTOR_PADDING0] = { OVL_ADAPTOR_TYPE_PADDING, DDP_COMPONENT_PADDING0, 0, &padding }, + [OVL_ADAPTOR_PADDING1] = { OVL_ADAPTOR_TYPE_PADDING, DDP_COMPONENT_PADDING1, 1, &padding }, + [OVL_ADAPTOR_PADDING2] = { OVL_ADAPTOR_TYPE_PADDING, DDP_COMPONENT_PADDING2, 2, &padding }, + [OVL_ADAPTOR_PADDING3] = { OVL_ADAPTOR_TYPE_PADDING, DDP_COMPONENT_PADDING3, 3, &padding }, + [OVL_ADAPTOR_PADDING4] = { OVL_ADAPTOR_TYPE_PADDING, DDP_COMPONENT_PADDING4, 4, &padding }, + [OVL_ADAPTOR_PADDING5] = { OVL_ADAPTOR_TYPE_PADDING, DDP_COMPONENT_PADDING5, 5, &padding }, + [OVL_ADAPTOR_PADDING6] = { OVL_ADAPTOR_TYPE_PADDING, DDP_COMPONENT_PADDING6, 6, &padding }, + [OVL_ADAPTOR_PADDING7] = { OVL_ADAPTOR_TYPE_PADDING, DDP_COMPONENT_PADDING7, 7, &padding }, +}; + +void mtk_ovl_adaptor_layer_config(struct device *dev, unsigned int idx, + struct mtk_plane_state *state, + struct cmdq_pkt *cmdq_pkt) +{ + struct mtk_disp_ovl_adaptor *ovl_adaptor = dev_get_drvdata(dev); + struct mtk_plane_pending_state *pending = &state->pending; + struct mtk_mdp_rdma_cfg rdma_config = {0}; + struct device *rdma_l; + struct device *rdma_r; + struct device *merge; + struct device *ethdr; + const struct drm_format_info *fmt_info = drm_format_info(pending->format); + bool use_dual_pipe = false; + unsigned int align_width; + unsigned int l_w = 0; + unsigned int r_w = 0; + + dev_dbg(dev, "%s+ idx:%d, enable:%d, fmt:0x%x\n", __func__, idx, + pending->enable, pending->format); + dev_dbg(dev, "addr 0x%pad, fb w:%d, {%d,%d,%d,%d}\n", + &pending->addr, (pending->pitch / fmt_info->cpp[0]), + pending->x, pending->y, pending->width, pending->height); + + rdma_l = ovl_adaptor->ovl_adaptor_comp[OVL_ADAPTOR_MDP_RDMA0 + 2 * idx]; + rdma_r = ovl_adaptor->ovl_adaptor_comp[OVL_ADAPTOR_MDP_RDMA0 + 2 * idx + 1]; + merge = ovl_adaptor->ovl_adaptor_comp[OVL_ADAPTOR_MERGE0 + idx]; + ethdr = ovl_adaptor->ovl_adaptor_comp[OVL_ADAPTOR_ETHDR0]; + + if (!pending->enable || !pending->width || !pending->height) { + mtk_merge_stop_cmdq(merge, cmdq_pkt); + mtk_mdp_rdma_stop(rdma_l, cmdq_pkt); + mtk_mdp_rdma_stop(rdma_r, cmdq_pkt); + mtk_ethdr_layer_config(ethdr, idx, state, cmdq_pkt); + return; + } + + /* ETHDR is in 1T2P domain, width needs to be 2 pixels align */ + align_width = ALIGN_DOWN(pending->width, 2); + + if (align_width > MTK_OVL_ADAPTOR_RDMA_MAX_WIDTH) + use_dual_pipe = true; + + if (use_dual_pipe) { + l_w = (align_width / 2) + ((pending->width / 2) % 2); + r_w = align_width - l_w; + } else { + l_w = align_width; + } + mtk_merge_advance_config(merge, l_w, r_w, pending->height, 0, 0, cmdq_pkt); + mtk_mmsys_merge_async_config(ovl_adaptor->mmsys_dev, idx, align_width / 2, + pending->height, cmdq_pkt); + + rdma_config.width = l_w; + rdma_config.height = pending->height; + rdma_config.addr0 = pending->addr; + rdma_config.pitch = pending->pitch; + rdma_config.fmt = pending->format; + rdma_config.color_encoding = pending->color_encoding; + mtk_mdp_rdma_config(rdma_l, &rdma_config, cmdq_pkt); + + if (use_dual_pipe) { + rdma_config.x_left = l_w; + rdma_config.width = r_w; + mtk_mdp_rdma_config(rdma_r, &rdma_config, cmdq_pkt); + } + + mtk_merge_start_cmdq(merge, cmdq_pkt); + + mtk_mdp_rdma_start(rdma_l, cmdq_pkt); + if (use_dual_pipe) + mtk_mdp_rdma_start(rdma_r, cmdq_pkt); + else + mtk_mdp_rdma_stop(rdma_r, cmdq_pkt); + + mtk_ethdr_layer_config(ethdr, idx, state, cmdq_pkt); +} + +void mtk_ovl_adaptor_config(struct device *dev, unsigned int w, + unsigned int h, unsigned int vrefresh, + unsigned int bpc, struct cmdq_pkt *cmdq_pkt) +{ + struct mtk_disp_ovl_adaptor *ovl_adaptor = dev_get_drvdata(dev); + + mtk_ethdr_config(ovl_adaptor->ovl_adaptor_comp[OVL_ADAPTOR_ETHDR0], w, h, + vrefresh, bpc, cmdq_pkt); +} + +void mtk_ovl_adaptor_start(struct device *dev) +{ + int i; + struct mtk_disp_ovl_adaptor *ovl_adaptor = dev_get_drvdata(dev); + + for (i = 0; i < OVL_ADAPTOR_ID_MAX; i++) { + if (!ovl_adaptor->ovl_adaptor_comp[i] || + !comp_matches[i].funcs->start) + continue; + + comp_matches[i].funcs->start(ovl_adaptor->ovl_adaptor_comp[i]); + } +} + +void mtk_ovl_adaptor_stop(struct device *dev) +{ + int i; + struct mtk_disp_ovl_adaptor *ovl_adaptor = dev_get_drvdata(dev); + + for (i = 0; i < OVL_ADAPTOR_ID_MAX; i++) { + if (!ovl_adaptor->ovl_adaptor_comp[i] || + !comp_matches[i].funcs->stop) + continue; + + comp_matches[i].funcs->stop(ovl_adaptor->ovl_adaptor_comp[i]); + } +} + +/** + * power_off - Power off the devices in OVL adaptor + * @dev: Device to be powered off + * @num: Number of the devices to be powered off + * + * Calls the .power_off() ovl_adaptor component callback if it is present. + */ +static inline void power_off(struct device *dev, int num) +{ + struct mtk_disp_ovl_adaptor *ovl_adaptor = dev_get_drvdata(dev); + int i; + + if (num > OVL_ADAPTOR_ID_MAX) + num = OVL_ADAPTOR_ID_MAX; + + for (i = num - 1; i >= 0; i--) { + if (!ovl_adaptor->ovl_adaptor_comp[i] || + !comp_matches[i].funcs->power_off) + continue; + + comp_matches[i].funcs->power_off(ovl_adaptor->ovl_adaptor_comp[i]); + } +} + +/** + * mtk_ovl_adaptor_power_on - Power on the devices in OVL adaptor + * @dev: Device to be powered on + * + * Different from OVL, OVL adaptor is a pseudo device so + * we didn't define it in the device tree, pm_runtime_resume_and_get() + * called by .atomic_enable() power on no device in OVL adaptor, + * we have to implement a function to do the job instead. + * + * Return: Zero for success or negative number for failure. + */ +int mtk_ovl_adaptor_power_on(struct device *dev) +{ + struct mtk_disp_ovl_adaptor *ovl_adaptor = dev_get_drvdata(dev); + int i, ret; + + for (i = 0; i < OVL_ADAPTOR_ID_MAX; i++) { + if (!ovl_adaptor->ovl_adaptor_comp[i] || + !comp_matches[i].funcs->power_on) + continue; + + ret = comp_matches[i].funcs->power_on(ovl_adaptor->ovl_adaptor_comp[i]); + if (ret < 0) { + dev_err(dev, "Failed to enable power domain %d, err %d\n", i, ret); + power_off(dev, i); + return ret; + } + } + return 0; +} + +void mtk_ovl_adaptor_power_off(struct device *dev) +{ + power_off(dev, OVL_ADAPTOR_ID_MAX); +} + +int mtk_ovl_adaptor_clk_enable(struct device *dev) +{ + struct mtk_disp_ovl_adaptor *ovl_adaptor = dev_get_drvdata(dev); + struct device *comp; + int ret; + int i; + + for (i = 0; i < OVL_ADAPTOR_ID_MAX; i++) { + comp = ovl_adaptor->ovl_adaptor_comp[i]; + if (!comp || !comp_matches[i].funcs->clk_enable) + continue; + ret = comp_matches[i].funcs->clk_enable(comp); + if (ret) { + dev_err(dev, "Failed to enable clock %d, err %d\n", i, ret); + while (--i >= 0) + comp_matches[i].funcs->clk_disable(comp); + return ret; + } + } + return 0; +} + +void mtk_ovl_adaptor_clk_disable(struct device *dev) +{ + struct mtk_disp_ovl_adaptor *ovl_adaptor = dev_get_drvdata(dev); + struct device *comp; + int i; + + for (i = 0; i < OVL_ADAPTOR_ID_MAX; i++) { + comp = ovl_adaptor->ovl_adaptor_comp[i]; + if (!comp || !comp_matches[i].funcs->clk_disable) + continue; + comp_matches[i].funcs->clk_disable(comp); + if (i < OVL_ADAPTOR_MERGE0) + pm_runtime_put(comp); + } +} + +enum drm_mode_status mtk_ovl_adaptor_mode_valid(struct device *dev, + const struct drm_display_mode *mode) + +{ + int i; + struct mtk_disp_ovl_adaptor *ovl_adaptor = dev_get_drvdata(dev); + + for (i = 0; i < OVL_ADAPTOR_ID_MAX; i++) { + dev = ovl_adaptor->ovl_adaptor_comp[i]; + if (!dev || !comp_matches[i].funcs->mode_valid) + continue; + return comp_matches[i].funcs->mode_valid(dev, mode); + } + return MODE_OK; +} + +unsigned int mtk_ovl_adaptor_layer_nr(struct device *dev) +{ + return MTK_OVL_ADAPTOR_LAYER_NUM; +} + +struct device *mtk_ovl_adaptor_dma_dev_get(struct device *dev) +{ + struct mtk_disp_ovl_adaptor *ovl_adaptor = dev_get_drvdata(dev); + + return ovl_adaptor->ovl_adaptor_comp[OVL_ADAPTOR_MDP_RDMA0]; +} + +void mtk_ovl_adaptor_register_vblank_cb(struct device *dev, void (*vblank_cb)(void *), + void *vblank_cb_data) +{ + struct mtk_disp_ovl_adaptor *ovl_adaptor = dev_get_drvdata(dev); + + mtk_ethdr_register_vblank_cb(ovl_adaptor->ovl_adaptor_comp[OVL_ADAPTOR_ETHDR0], + vblank_cb, vblank_cb_data); +} + +void mtk_ovl_adaptor_unregister_vblank_cb(struct device *dev) +{ + struct mtk_disp_ovl_adaptor *ovl_adaptor = dev_get_drvdata(dev); + + mtk_ethdr_unregister_vblank_cb(ovl_adaptor->ovl_adaptor_comp[OVL_ADAPTOR_ETHDR0]); +} + +void mtk_ovl_adaptor_enable_vblank(struct device *dev) +{ + struct mtk_disp_ovl_adaptor *ovl_adaptor = dev_get_drvdata(dev); + + mtk_ethdr_enable_vblank(ovl_adaptor->ovl_adaptor_comp[OVL_ADAPTOR_ETHDR0]); +} + +void mtk_ovl_adaptor_disable_vblank(struct device *dev) +{ + struct mtk_disp_ovl_adaptor *ovl_adaptor = dev_get_drvdata(dev); + + mtk_ethdr_disable_vblank(ovl_adaptor->ovl_adaptor_comp[OVL_ADAPTOR_ETHDR0]); +} + +u32 mtk_ovl_adaptor_get_blend_modes(struct device *dev) +{ + struct mtk_disp_ovl_adaptor *ovl_adaptor = dev_get_drvdata(dev); + + return mtk_ethdr_get_blend_modes(ovl_adaptor->ovl_adaptor_comp[OVL_ADAPTOR_ETHDR0]); +} + +const u32 *mtk_ovl_adaptor_get_formats(struct device *dev) +{ + struct mtk_disp_ovl_adaptor *ovl_adaptor = dev_get_drvdata(dev); + + return mtk_mdp_rdma_get_formats(ovl_adaptor->ovl_adaptor_comp[OVL_ADAPTOR_MDP_RDMA0]); +} + +size_t mtk_ovl_adaptor_get_num_formats(struct device *dev) +{ + struct mtk_disp_ovl_adaptor *ovl_adaptor = dev_get_drvdata(dev); + + return mtk_mdp_rdma_get_num_formats(ovl_adaptor->ovl_adaptor_comp[OVL_ADAPTOR_MDP_RDMA0]); +} + +void mtk_ovl_adaptor_add_comp(struct device *dev, struct mtk_mutex *mutex) +{ + int i; + struct mtk_disp_ovl_adaptor *ovl_adaptor = dev_get_drvdata(dev); + + for (i = 0; i < OVL_ADAPTOR_ID_MAX; i++) { + if (!ovl_adaptor->ovl_adaptor_comp[i]) + continue; + mtk_mutex_add_comp(mutex, comp_matches[i].comp_id); + } +} + +void mtk_ovl_adaptor_remove_comp(struct device *dev, struct mtk_mutex *mutex) +{ + int i; + struct mtk_disp_ovl_adaptor *ovl_adaptor = dev_get_drvdata(dev); + + for (i = 0; i < OVL_ADAPTOR_ID_MAX; i++) { + if (!ovl_adaptor->ovl_adaptor_comp[i]) + continue; + mtk_mutex_remove_comp(mutex, comp_matches[i].comp_id); + } +} + +void mtk_ovl_adaptor_connect(struct device *dev, struct device *mmsys_dev, unsigned int next) +{ + mtk_mmsys_ddp_connect(mmsys_dev, DDP_COMPONENT_ETHDR_MIXER, next); + mtk_mmsys_ddp_connect(mmsys_dev, DDP_COMPONENT_MDP_RDMA0, DDP_COMPONENT_MERGE1); + mtk_mmsys_ddp_connect(mmsys_dev, DDP_COMPONENT_MDP_RDMA1, DDP_COMPONENT_MERGE1); + mtk_mmsys_ddp_connect(mmsys_dev, DDP_COMPONENT_MDP_RDMA2, DDP_COMPONENT_MERGE2); + mtk_mmsys_ddp_connect(mmsys_dev, DDP_COMPONENT_MERGE1, DDP_COMPONENT_ETHDR_MIXER); + mtk_mmsys_ddp_connect(mmsys_dev, DDP_COMPONENT_MERGE2, DDP_COMPONENT_ETHDR_MIXER); + mtk_mmsys_ddp_connect(mmsys_dev, DDP_COMPONENT_MERGE3, DDP_COMPONENT_ETHDR_MIXER); + mtk_mmsys_ddp_connect(mmsys_dev, DDP_COMPONENT_MERGE4, DDP_COMPONENT_ETHDR_MIXER); +} + +void mtk_ovl_adaptor_disconnect(struct device *dev, struct device *mmsys_dev, unsigned int next) +{ + mtk_mmsys_ddp_disconnect(mmsys_dev, DDP_COMPONENT_ETHDR_MIXER, next); + mtk_mmsys_ddp_disconnect(mmsys_dev, DDP_COMPONENT_MDP_RDMA0, DDP_COMPONENT_MERGE1); + mtk_mmsys_ddp_disconnect(mmsys_dev, DDP_COMPONENT_MDP_RDMA1, DDP_COMPONENT_MERGE1); + mtk_mmsys_ddp_disconnect(mmsys_dev, DDP_COMPONENT_MDP_RDMA2, DDP_COMPONENT_MERGE2); + mtk_mmsys_ddp_disconnect(mmsys_dev, DDP_COMPONENT_MERGE1, DDP_COMPONENT_ETHDR_MIXER); + mtk_mmsys_ddp_disconnect(mmsys_dev, DDP_COMPONENT_MERGE2, DDP_COMPONENT_ETHDR_MIXER); + mtk_mmsys_ddp_disconnect(mmsys_dev, DDP_COMPONENT_MERGE3, DDP_COMPONENT_ETHDR_MIXER); + mtk_mmsys_ddp_disconnect(mmsys_dev, DDP_COMPONENT_MERGE4, DDP_COMPONENT_ETHDR_MIXER); +} + +static int ovl_adaptor_comp_get_id(struct device *dev, struct device_node *node, + enum mtk_ovl_adaptor_comp_type type) +{ + int alias_id = of_alias_get_id(node, private_comp_stem[type]); + int i; + + for (i = 0; i < ARRAY_SIZE(comp_matches); i++) + if (comp_matches[i].type == type && + comp_matches[i].alias_id == alias_id) + return i; + + dev_warn(dev, "Failed to get id. type: %d, alias: %d\n", type, alias_id); + return -EINVAL; +} + +static const struct of_device_id mtk_ovl_adaptor_comp_dt_ids[] = { + { .compatible = "mediatek,mt8188-disp-padding", .data = (void *)OVL_ADAPTOR_TYPE_PADDING }, + { .compatible = "mediatek,mt8195-disp-ethdr", .data = (void *)OVL_ADAPTOR_TYPE_ETHDR }, + { .compatible = "mediatek,mt8195-disp-merge", .data = (void *)OVL_ADAPTOR_TYPE_MERGE }, + { .compatible = "mediatek,mt8195-vdo1-rdma", .data = (void *)OVL_ADAPTOR_TYPE_MDP_RDMA }, + { /* sentinel */ } +}; + +static int ovl_adaptor_of_get_ddp_comp_type(struct device_node *node, + enum mtk_ovl_adaptor_comp_type *ctype) +{ + const struct of_device_id *of_id = of_match_node(mtk_ovl_adaptor_comp_dt_ids, node); + + if (!of_id) + return -EINVAL; + + *ctype = (enum mtk_ovl_adaptor_comp_type)((uintptr_t)of_id->data); + + return 0; +} + +bool mtk_ovl_adaptor_is_comp_present(struct device_node *node) +{ + enum mtk_ovl_adaptor_comp_type type; + int ret; + + ret = ovl_adaptor_of_get_ddp_comp_type(node, &type); + if (ret) + return false; + + if (type >= OVL_ADAPTOR_TYPE_NUM) + return false; + + /* + * In the context of mediatek-drm, ETHDR, MDP_RDMA and Padding are + * used exclusively by OVL Adaptor: if this component is not one of + * those, it's likely not an OVL Adaptor path. + */ + return type == OVL_ADAPTOR_TYPE_ETHDR || + type == OVL_ADAPTOR_TYPE_MDP_RDMA || + type == OVL_ADAPTOR_TYPE_PADDING; +} + +static void ovl_adaptor_put_device(void *_dev) +{ + struct device *dev = _dev; + + put_device(dev); +} + +static int ovl_adaptor_comp_init(struct device *dev, struct component_match **match) +{ + struct mtk_disp_ovl_adaptor *priv = dev_get_drvdata(dev); + struct device_node *parent; + struct platform_device *comp_pdev; + + parent = dev->parent->parent->of_node->parent; + + for_each_child_of_node_scoped(parent, node) { + enum mtk_ovl_adaptor_comp_type type; + int id, ret; + + ret = ovl_adaptor_of_get_ddp_comp_type(node, &type); + if (ret) + continue; + + if (!of_device_is_available(node)) { + dev_dbg(dev, "Skipping disabled component %pOF\n", + node); + continue; + } + + id = ovl_adaptor_comp_get_id(dev, node, type); + if (id < 0) { + dev_warn(dev, "Skipping unknown component %pOF\n", + node); + continue; + } + + comp_pdev = of_find_device_by_node(node); + if (!comp_pdev) + return -EPROBE_DEFER; + + ret = devm_add_action_or_reset(dev, ovl_adaptor_put_device, + &comp_pdev->dev); + if (ret) + return ret; + + priv->ovl_adaptor_comp[id] = &comp_pdev->dev; + + drm_of_component_match_add(dev, match, component_compare_of, node); + dev_dbg(dev, "Adding component match for %pOF\n", node); + } + + if (!*match) { + dev_err(dev, "No match device for ovl_adaptor\n"); + return -ENODEV; + } + + return 0; +} + +static int mtk_disp_ovl_adaptor_comp_bind(struct device *dev, struct device *master, + void *data) +{ + struct mtk_disp_ovl_adaptor *priv = dev_get_drvdata(dev); + + if (!priv->children_bound) + return -EPROBE_DEFER; + + return 0; +} + +static void mtk_disp_ovl_adaptor_comp_unbind(struct device *dev, struct device *master, + void *data) +{ +} + +static const struct component_ops mtk_disp_ovl_adaptor_comp_ops = { + .bind = mtk_disp_ovl_adaptor_comp_bind, + .unbind = mtk_disp_ovl_adaptor_comp_unbind, +}; + +static int mtk_disp_ovl_adaptor_master_bind(struct device *dev) +{ + struct mtk_disp_ovl_adaptor *priv = dev_get_drvdata(dev); + int ret; + + ret = component_bind_all(dev, priv->mmsys_dev); + if (ret) + return dev_err_probe(dev, ret, "component_bind_all failed!\n"); + + priv->children_bound = true; + return 0; +} + +static void mtk_disp_ovl_adaptor_master_unbind(struct device *dev) +{ + struct mtk_disp_ovl_adaptor *priv = dev_get_drvdata(dev); + + priv->children_bound = false; +} + +static const struct component_master_ops mtk_disp_ovl_adaptor_master_ops = { + .bind = mtk_disp_ovl_adaptor_master_bind, + .unbind = mtk_disp_ovl_adaptor_master_unbind, +}; + +static int mtk_disp_ovl_adaptor_probe(struct platform_device *pdev) +{ + struct mtk_disp_ovl_adaptor *priv; + struct device *dev = &pdev->dev; + struct component_match *match = NULL; + int ret; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + platform_set_drvdata(pdev, priv); + + ret = ovl_adaptor_comp_init(dev, &match); + if (ret < 0) + return ret; + + priv->mmsys_dev = pdev->dev.platform_data; + + component_master_add_with_match(dev, &mtk_disp_ovl_adaptor_master_ops, match); + + pm_runtime_enable(dev); + + ret = component_add(dev, &mtk_disp_ovl_adaptor_comp_ops); + if (ret != 0) { + pm_runtime_disable(dev); + return dev_err_probe(dev, ret, "Failed to add component\n"); + } + + return 0; +} + +static void mtk_disp_ovl_adaptor_remove(struct platform_device *pdev) +{ + component_master_del(&pdev->dev, &mtk_disp_ovl_adaptor_master_ops); + pm_runtime_disable(&pdev->dev); +} + +struct platform_driver mtk_disp_ovl_adaptor_driver = { + .probe = mtk_disp_ovl_adaptor_probe, + .remove = mtk_disp_ovl_adaptor_remove, + .driver = { + .name = "mediatek-disp-ovl-adaptor", + }, +}; diff --git a/drivers/gpu/drm/mediatek/mtk_disp_rdma.c b/drivers/gpu/drm/mediatek/mtk_disp_rdma.c new file mode 100644 index 000000000000..c9d41d75e7f2 --- /dev/null +++ b/drivers/gpu/drm/mediatek/mtk_disp_rdma.c @@ -0,0 +1,423 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2015 MediaTek Inc. + */ + +#include <drm/drm_fourcc.h> + +#include <linux/clk.h> +#include <linux/component.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/pm_runtime.h> +#include <linux/soc/mediatek/mtk-cmdq.h> + +#include "mtk_crtc.h" +#include "mtk_ddp_comp.h" +#include "mtk_disp_drv.h" +#include "mtk_drm_drv.h" + +#define DISP_REG_RDMA_INT_ENABLE 0x0000 +#define DISP_REG_RDMA_INT_STATUS 0x0004 +#define RDMA_TARGET_LINE_INT BIT(5) +#define RDMA_FIFO_UNDERFLOW_INT BIT(4) +#define RDMA_EOF_ABNORMAL_INT BIT(3) +#define RDMA_FRAME_END_INT BIT(2) +#define RDMA_FRAME_START_INT BIT(1) +#define RDMA_REG_UPDATE_INT BIT(0) +#define DISP_REG_RDMA_GLOBAL_CON 0x0010 +#define RDMA_ENGINE_EN BIT(0) +#define RDMA_MODE_MEMORY BIT(1) +#define DISP_REG_RDMA_SIZE_CON_0 0x0014 +#define RDMA_MATRIX_ENABLE BIT(17) +#define RDMA_MATRIX_INT_MTX_SEL GENMASK(23, 20) +#define RDMA_MATRIX_INT_MTX_BT601_to_RGB (6 << 20) +#define DISP_REG_RDMA_SIZE_CON_1 0x0018 +#define DISP_REG_RDMA_TARGET_LINE 0x001c +#define DISP_RDMA_MEM_CON 0x0024 +#define MEM_MODE_INPUT_FORMAT_RGB565 (0x000 << 4) +#define MEM_MODE_INPUT_FORMAT_RGB888 (0x001 << 4) +#define MEM_MODE_INPUT_FORMAT_RGBA8888 (0x002 << 4) +#define MEM_MODE_INPUT_FORMAT_ARGB8888 (0x003 << 4) +#define MEM_MODE_INPUT_FORMAT_UYVY (0x004 << 4) +#define MEM_MODE_INPUT_FORMAT_YUYV (0x005 << 4) +#define MEM_MODE_INPUT_SWAP BIT(8) +#define DISP_RDMA_MEM_SRC_PITCH 0x002c +#define DISP_RDMA_MEM_GMC_SETTING_0 0x0030 +#define DISP_REG_RDMA_FIFO_CON 0x0040 +#define RDMA_FIFO_UNDERFLOW_EN BIT(31) +#define RDMA_FIFO_PSEUDO_SIZE(bytes) (((bytes) / 16) << 16) +#define RDMA_OUTPUT_VALID_FIFO_THRESHOLD(bytes) ((bytes) / 16) +#define RDMA_FIFO_SIZE(rdma) ((rdma)->data->fifo_size) +#define DISP_RDMA_MEM_START_ADDR 0x0f00 + +#define RDMA_MEM_GMC 0x40402020 + +static const u32 mt8173_formats[] = { + DRM_FORMAT_XRGB8888, + DRM_FORMAT_ARGB8888, + DRM_FORMAT_BGRX8888, + DRM_FORMAT_BGRA8888, + DRM_FORMAT_ABGR8888, + DRM_FORMAT_XBGR8888, + DRM_FORMAT_RGB888, + DRM_FORMAT_BGR888, + DRM_FORMAT_RGB565, + DRM_FORMAT_UYVY, + DRM_FORMAT_YUYV, +}; + +struct mtk_disp_rdma_data { + unsigned int fifo_size; + const u32 *formats; + size_t num_formats; +}; + +/* + * struct mtk_disp_rdma - DISP_RDMA driver structure + * @data: local driver data + */ +struct mtk_disp_rdma { + struct clk *clk; + void __iomem *regs; + struct cmdq_client_reg cmdq_reg; + const struct mtk_disp_rdma_data *data; + void (*vblank_cb)(void *data); + void *vblank_cb_data; + u32 fifo_size; +}; + +static irqreturn_t mtk_disp_rdma_irq_handler(int irq, void *dev_id) +{ + struct mtk_disp_rdma *priv = dev_id; + + /* Clear frame completion interrupt */ + writel(0x0, priv->regs + DISP_REG_RDMA_INT_STATUS); + + if (!priv->vblank_cb) + return IRQ_NONE; + + priv->vblank_cb(priv->vblank_cb_data); + + return IRQ_HANDLED; +} + +static void rdma_update_bits(struct device *dev, unsigned int reg, + unsigned int mask, unsigned int val) +{ + struct mtk_disp_rdma *rdma = dev_get_drvdata(dev); + unsigned int tmp = readl(rdma->regs + reg); + + tmp = (tmp & ~mask) | (val & mask); + writel(tmp, rdma->regs + reg); +} + +void mtk_rdma_register_vblank_cb(struct device *dev, + void (*vblank_cb)(void *), + void *vblank_cb_data) +{ + struct mtk_disp_rdma *rdma = dev_get_drvdata(dev); + + rdma->vblank_cb = vblank_cb; + rdma->vblank_cb_data = vblank_cb_data; +} + +void mtk_rdma_unregister_vblank_cb(struct device *dev) +{ + struct mtk_disp_rdma *rdma = dev_get_drvdata(dev); + + rdma->vblank_cb = NULL; + rdma->vblank_cb_data = NULL; +} + +void mtk_rdma_enable_vblank(struct device *dev) +{ + rdma_update_bits(dev, DISP_REG_RDMA_INT_ENABLE, RDMA_FRAME_END_INT, + RDMA_FRAME_END_INT); +} + +void mtk_rdma_disable_vblank(struct device *dev) +{ + rdma_update_bits(dev, DISP_REG_RDMA_INT_ENABLE, RDMA_FRAME_END_INT, 0); +} + +const u32 *mtk_rdma_get_formats(struct device *dev) +{ + struct mtk_disp_rdma *rdma = dev_get_drvdata(dev); + + return rdma->data->formats; +} + +size_t mtk_rdma_get_num_formats(struct device *dev) +{ + struct mtk_disp_rdma *rdma = dev_get_drvdata(dev); + + return rdma->data->num_formats; +} + +int mtk_rdma_clk_enable(struct device *dev) +{ + struct mtk_disp_rdma *rdma = dev_get_drvdata(dev); + + return clk_prepare_enable(rdma->clk); +} + +void mtk_rdma_clk_disable(struct device *dev) +{ + struct mtk_disp_rdma *rdma = dev_get_drvdata(dev); + + clk_disable_unprepare(rdma->clk); +} + +void mtk_rdma_start(struct device *dev) +{ + rdma_update_bits(dev, DISP_REG_RDMA_GLOBAL_CON, RDMA_ENGINE_EN, + RDMA_ENGINE_EN); +} + +void mtk_rdma_stop(struct device *dev) +{ + rdma_update_bits(dev, DISP_REG_RDMA_GLOBAL_CON, RDMA_ENGINE_EN, 0); +} + +void mtk_rdma_config(struct device *dev, unsigned int width, + unsigned int height, unsigned int vrefresh, + unsigned int bpc, struct cmdq_pkt *cmdq_pkt) +{ + unsigned int threshold; + unsigned int reg; + struct mtk_disp_rdma *rdma = dev_get_drvdata(dev); + u32 rdma_fifo_size; + + mtk_ddp_write_mask(cmdq_pkt, width, &rdma->cmdq_reg, rdma->regs, + DISP_REG_RDMA_SIZE_CON_0, 0xfff); + mtk_ddp_write_mask(cmdq_pkt, height, &rdma->cmdq_reg, rdma->regs, + DISP_REG_RDMA_SIZE_CON_1, 0xfffff); + + if (rdma->fifo_size) + rdma_fifo_size = rdma->fifo_size; + else + rdma_fifo_size = RDMA_FIFO_SIZE(rdma); + + /* + * Enable FIFO underflow since DSI and DPI can't be blocked. + * Keep the FIFO pseudo size reset default of 8 KiB. Set the + * output threshold to 70% of max fifo size to make sure the + * threhold will not overflow + */ + threshold = rdma_fifo_size * 7 / 10; + reg = RDMA_FIFO_UNDERFLOW_EN | + RDMA_FIFO_PSEUDO_SIZE(rdma_fifo_size) | + RDMA_OUTPUT_VALID_FIFO_THRESHOLD(threshold); + mtk_ddp_write(cmdq_pkt, reg, &rdma->cmdq_reg, rdma->regs, DISP_REG_RDMA_FIFO_CON); +} + +static unsigned int rdma_fmt_convert(struct mtk_disp_rdma *rdma, + unsigned int fmt) +{ + /* The return value in switch "MEM_MODE_INPUT_FORMAT_XXX" + * is defined in mediatek HW data sheet. + * The alphabet order in XXX is no relation to data + * arrangement in memory. + */ + switch (fmt) { + default: + case DRM_FORMAT_RGB565: + return MEM_MODE_INPUT_FORMAT_RGB565; + case DRM_FORMAT_BGR565: + return MEM_MODE_INPUT_FORMAT_RGB565 | MEM_MODE_INPUT_SWAP; + case DRM_FORMAT_RGB888: + return MEM_MODE_INPUT_FORMAT_RGB888; + case DRM_FORMAT_BGR888: + return MEM_MODE_INPUT_FORMAT_RGB888 | MEM_MODE_INPUT_SWAP; + case DRM_FORMAT_RGBX8888: + case DRM_FORMAT_RGBA8888: + return MEM_MODE_INPUT_FORMAT_ARGB8888; + case DRM_FORMAT_BGRX8888: + case DRM_FORMAT_BGRA8888: + return MEM_MODE_INPUT_FORMAT_ARGB8888 | MEM_MODE_INPUT_SWAP; + case DRM_FORMAT_XRGB8888: + case DRM_FORMAT_ARGB8888: + return MEM_MODE_INPUT_FORMAT_RGBA8888; + case DRM_FORMAT_XBGR8888: + case DRM_FORMAT_ABGR8888: + return MEM_MODE_INPUT_FORMAT_RGBA8888 | MEM_MODE_INPUT_SWAP; + case DRM_FORMAT_UYVY: + return MEM_MODE_INPUT_FORMAT_UYVY; + case DRM_FORMAT_YUYV: + return MEM_MODE_INPUT_FORMAT_YUYV; + } +} + +unsigned int mtk_rdma_layer_nr(struct device *dev) +{ + return 1; +} + +void mtk_rdma_layer_config(struct device *dev, unsigned int idx, + struct mtk_plane_state *state, + struct cmdq_pkt *cmdq_pkt) +{ + struct mtk_disp_rdma *rdma = dev_get_drvdata(dev); + struct mtk_plane_pending_state *pending = &state->pending; + unsigned int addr = pending->addr; + unsigned int pitch = pending->pitch & 0xffff; + unsigned int fmt = pending->format; + unsigned int con; + + con = rdma_fmt_convert(rdma, fmt); + mtk_ddp_write_relaxed(cmdq_pkt, con, &rdma->cmdq_reg, rdma->regs, DISP_RDMA_MEM_CON); + + if (fmt == DRM_FORMAT_UYVY || fmt == DRM_FORMAT_YUYV) { + mtk_ddp_write_mask(cmdq_pkt, RDMA_MATRIX_ENABLE, &rdma->cmdq_reg, rdma->regs, + DISP_REG_RDMA_SIZE_CON_0, + RDMA_MATRIX_ENABLE); + mtk_ddp_write_mask(cmdq_pkt, RDMA_MATRIX_INT_MTX_BT601_to_RGB, + &rdma->cmdq_reg, rdma->regs, DISP_REG_RDMA_SIZE_CON_0, + RDMA_MATRIX_INT_MTX_SEL); + } else { + mtk_ddp_write_mask(cmdq_pkt, 0, &rdma->cmdq_reg, rdma->regs, + DISP_REG_RDMA_SIZE_CON_0, + RDMA_MATRIX_ENABLE); + } + mtk_ddp_write_relaxed(cmdq_pkt, addr, &rdma->cmdq_reg, rdma->regs, + DISP_RDMA_MEM_START_ADDR); + mtk_ddp_write_relaxed(cmdq_pkt, pitch, &rdma->cmdq_reg, rdma->regs, + DISP_RDMA_MEM_SRC_PITCH); + mtk_ddp_write(cmdq_pkt, RDMA_MEM_GMC, &rdma->cmdq_reg, rdma->regs, + DISP_RDMA_MEM_GMC_SETTING_0); + mtk_ddp_write_mask(cmdq_pkt, RDMA_MODE_MEMORY, &rdma->cmdq_reg, rdma->regs, + DISP_REG_RDMA_GLOBAL_CON, RDMA_MODE_MEMORY); + +} + +static int mtk_disp_rdma_bind(struct device *dev, struct device *master, + void *data) +{ + return 0; + +} + +static void mtk_disp_rdma_unbind(struct device *dev, struct device *master, + void *data) +{ +} + +static const struct component_ops mtk_disp_rdma_component_ops = { + .bind = mtk_disp_rdma_bind, + .unbind = mtk_disp_rdma_unbind, +}; + +static int mtk_disp_rdma_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct mtk_disp_rdma *priv; + int irq; + int ret; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return irq; + + priv->clk = devm_clk_get(dev, NULL); + if (IS_ERR(priv->clk)) + return dev_err_probe(dev, PTR_ERR(priv->clk), + "failed to get rdma clk\n"); + + priv->regs = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(priv->regs)) + return dev_err_probe(dev, PTR_ERR(priv->regs), + "failed to ioremap rdma\n"); +#if IS_REACHABLE(CONFIG_MTK_CMDQ) + ret = cmdq_dev_get_client_reg(dev, &priv->cmdq_reg, 0); + if (ret) + dev_dbg(dev, "get mediatek,gce-client-reg fail!\n"); +#endif + + ret = of_property_read_u32(dev->of_node, + "mediatek,rdma-fifo-size", + &priv->fifo_size); + if (ret && (ret != -EINVAL)) + return dev_err_probe(dev, ret, "Failed to get rdma fifo size\n"); + + /* Disable and clear pending interrupts */ + writel(0x0, priv->regs + DISP_REG_RDMA_INT_ENABLE); + writel(0x0, priv->regs + DISP_REG_RDMA_INT_STATUS); + + ret = devm_request_irq(dev, irq, mtk_disp_rdma_irq_handler, + IRQF_TRIGGER_NONE, dev_name(dev), priv); + if (ret < 0) + return dev_err_probe(dev, ret, "Failed to request irq %d\n", irq); + + priv->data = of_device_get_match_data(dev); + + platform_set_drvdata(pdev, priv); + + pm_runtime_enable(dev); + + ret = component_add(dev, &mtk_disp_rdma_component_ops); + if (ret) { + pm_runtime_disable(dev); + return dev_err_probe(dev, ret, "Failed to add component\n"); + } + + return 0; +} + +static void mtk_disp_rdma_remove(struct platform_device *pdev) +{ + component_del(&pdev->dev, &mtk_disp_rdma_component_ops); + + pm_runtime_disable(&pdev->dev); +} + +static const struct mtk_disp_rdma_data mt2701_rdma_driver_data = { + .fifo_size = SZ_4K, + .formats = mt8173_formats, + .num_formats = ARRAY_SIZE(mt8173_formats), +}; + +static const struct mtk_disp_rdma_data mt8173_rdma_driver_data = { + .fifo_size = SZ_8K, + .formats = mt8173_formats, + .num_formats = ARRAY_SIZE(mt8173_formats), +}; + +static const struct mtk_disp_rdma_data mt8183_rdma_driver_data = { + .fifo_size = 5 * SZ_1K, + .formats = mt8173_formats, + .num_formats = ARRAY_SIZE(mt8173_formats), +}; + +static const struct mtk_disp_rdma_data mt8195_rdma_driver_data = { + .fifo_size = 1920, + .formats = mt8173_formats, + .num_formats = ARRAY_SIZE(mt8173_formats), +}; + +static const struct of_device_id mtk_disp_rdma_driver_dt_match[] = { + { .compatible = "mediatek,mt2701-disp-rdma", + .data = &mt2701_rdma_driver_data}, + { .compatible = "mediatek,mt8173-disp-rdma", + .data = &mt8173_rdma_driver_data}, + { .compatible = "mediatek,mt8183-disp-rdma", + .data = &mt8183_rdma_driver_data}, + { .compatible = "mediatek,mt8195-disp-rdma", + .data = &mt8195_rdma_driver_data}, + {}, +}; +MODULE_DEVICE_TABLE(of, mtk_disp_rdma_driver_dt_match); + +struct platform_driver mtk_disp_rdma_driver = { + .probe = mtk_disp_rdma_probe, + .remove = mtk_disp_rdma_remove, + .driver = { + .name = "mediatek-disp-rdma", + .of_match_table = mtk_disp_rdma_driver_dt_match, + }, +}; diff --git a/drivers/gpu/drm/mediatek/mtk_dp.c b/drivers/gpu/drm/mediatek/mtk_dp.c new file mode 100644 index 000000000000..b0b1e158600f --- /dev/null +++ b/drivers/gpu/drm/mediatek/mtk_dp.c @@ -0,0 +1,2951 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2019-2022 MediaTek Inc. + * Copyright (c) 2022 BayLibre + */ + +#include <drm/display/drm_dp_aux_bus.h> +#include <drm/display/drm_dp.h> +#include <drm/display/drm_dp_helper.h> +#include <drm/drm_atomic_helper.h> +#include <drm/drm_bridge.h> +#include <drm/drm_crtc.h> +#include <drm/drm_edid.h> +#include <drm/drm_of.h> +#include <drm/drm_panel.h> +#include <drm/drm_print.h> +#include <drm/drm_probe_helper.h> +#include <linux/arm-smccc.h> +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/errno.h> +#include <linux/kernel.h> +#include <linux/media-bus-format.h> +#include <linux/nvmem-consumer.h> +#include <linux/of.h> +#include <linux/of_irq.h> +#include <linux/of_platform.h> +#include <linux/phy/phy.h> +#include <linux/platform_device.h> +#include <linux/pm_runtime.h> +#include <linux/regmap.h> +#include <linux/soc/mediatek/mtk_sip_svc.h> +#include <sound/hdmi-codec.h> +#include <video/videomode.h> + +#include "mtk_dp_reg.h" + +#define MTK_DP_SIP_CONTROL_AARCH32 MTK_SIP_SMC_CMD(0x523) +#define MTK_DP_SIP_ATF_EDP_VIDEO_UNMUTE (BIT(0) | BIT(5)) +#define MTK_DP_SIP_ATF_VIDEO_UNMUTE BIT(5) + +#define MTK_DP_THREAD_CABLE_STATE_CHG BIT(0) +#define MTK_DP_THREAD_HPD_EVENT BIT(1) + +#define MTK_DP_4P1T 4 +#define MTK_DP_HDE 2 +#define MTK_DP_PIX_PER_ADDR 2 +#define MTK_DP_AUX_WAIT_REPLY_COUNT 20 +#define MTK_DP_TBC_BUF_READ_START_ADDR 0x8 +#define MTK_DP_TRAIN_VOLTAGE_LEVEL_RETRY 5 +#define MTK_DP_TRAIN_DOWNSCALE_RETRY 10 +#define MTK_DP_VERSION 0x11 +#define MTK_DP_SDP_AUI 0x4 + +enum { + MTK_DP_CAL_GLB_BIAS_TRIM = 0, + MTK_DP_CAL_CLKTX_IMPSE, + MTK_DP_CAL_LN_TX_IMPSEL_PMOS_0, + MTK_DP_CAL_LN_TX_IMPSEL_PMOS_1, + MTK_DP_CAL_LN_TX_IMPSEL_PMOS_2, + MTK_DP_CAL_LN_TX_IMPSEL_PMOS_3, + MTK_DP_CAL_LN_TX_IMPSEL_NMOS_0, + MTK_DP_CAL_LN_TX_IMPSEL_NMOS_1, + MTK_DP_CAL_LN_TX_IMPSEL_NMOS_2, + MTK_DP_CAL_LN_TX_IMPSEL_NMOS_3, + MTK_DP_CAL_MAX, +}; + +struct mtk_dp_train_info { + bool sink_ssc; + bool cable_plugged_in; + /* link_rate is in multiple of 0.27Gbps */ + int link_rate; + int lane_count; + unsigned int channel_eq_pattern; +}; + +struct mtk_dp_audio_cfg { + bool detect_monitor; + int sad_count; + int sample_rate; + int word_length_bits; + int channels; +}; + +struct mtk_dp_info { + enum dp_pixelformat format; + struct videomode vm; + struct mtk_dp_audio_cfg audio_cur_cfg; +}; + +struct mtk_dp_efuse_fmt { + unsigned short idx; + unsigned short shift; + unsigned short mask; + unsigned short min_val; + unsigned short max_val; + unsigned short default_val; +}; + +struct mtk_dp { + bool enabled; + bool need_debounce; + int irq; + u8 max_lanes; + u8 max_linkrate; + u8 rx_cap[DP_RECEIVER_CAP_SIZE]; + u32 cal_data[MTK_DP_CAL_MAX]; + u32 irq_thread_handle; + /* irq_thread_lock is used to protect irq_thread_handle */ + spinlock_t irq_thread_lock; + + struct device *dev; + struct drm_bridge bridge; + struct drm_bridge *next_bridge; + struct drm_connector *conn; + struct drm_device *drm_dev; + struct drm_dp_aux aux; + + const struct mtk_dp_data *data; + struct mtk_dp_info info; + struct mtk_dp_train_info train_info; + + struct platform_device *phy_dev; + struct phy *phy; + struct regmap *regs; + struct timer_list debounce_timer; + + /* For audio */ + bool audio_enable; + hdmi_codec_plugged_cb plugged_cb; + struct platform_device *audio_pdev; + + struct device *codec_dev; + /* protect the plugged_cb as it's used in both bridge ops and audio */ + struct mutex update_plugged_status_lock; +}; + +struct mtk_dp_data { + int bridge_type; + unsigned int smc_cmd; + const struct mtk_dp_efuse_fmt *efuse_fmt; + bool audio_supported; + bool audio_pkt_in_hblank_area; + u16 audio_m_div2_bit; +}; + +static const struct mtk_dp_efuse_fmt mt8188_dp_efuse_fmt[MTK_DP_CAL_MAX] = { + [MTK_DP_CAL_GLB_BIAS_TRIM] = { + .idx = 0, + .shift = 10, + .mask = 0x1f, + .min_val = 1, + .max_val = 0x1e, + .default_val = 0xf, + }, + [MTK_DP_CAL_CLKTX_IMPSE] = { + .idx = 0, + .shift = 15, + .mask = 0xf, + .min_val = 1, + .max_val = 0xe, + .default_val = 0x8, + }, + [MTK_DP_CAL_LN_TX_IMPSEL_PMOS_0] = { + .idx = 1, + .shift = 0, + .mask = 0xf, + .min_val = 1, + .max_val = 0xe, + .default_val = 0x8, + }, + [MTK_DP_CAL_LN_TX_IMPSEL_PMOS_1] = { + .idx = 1, + .shift = 8, + .mask = 0xf, + .min_val = 1, + .max_val = 0xe, + .default_val = 0x8, + }, + [MTK_DP_CAL_LN_TX_IMPSEL_PMOS_2] = { + .idx = 1, + .shift = 16, + .mask = 0xf, + .min_val = 1, + .max_val = 0xe, + .default_val = 0x8, + }, + [MTK_DP_CAL_LN_TX_IMPSEL_PMOS_3] = { + .idx = 1, + .shift = 24, + .mask = 0xf, + .min_val = 1, + .max_val = 0xe, + .default_val = 0x8, + }, + [MTK_DP_CAL_LN_TX_IMPSEL_NMOS_0] = { + .idx = 1, + .shift = 4, + .mask = 0xf, + .min_val = 1, + .max_val = 0xe, + .default_val = 0x8, + }, + [MTK_DP_CAL_LN_TX_IMPSEL_NMOS_1] = { + .idx = 1, + .shift = 12, + .mask = 0xf, + .min_val = 1, + .max_val = 0xe, + .default_val = 0x8, + }, + [MTK_DP_CAL_LN_TX_IMPSEL_NMOS_2] = { + .idx = 1, + .shift = 20, + .mask = 0xf, + .min_val = 1, + .max_val = 0xe, + .default_val = 0x8, + }, + [MTK_DP_CAL_LN_TX_IMPSEL_NMOS_3] = { + .idx = 1, + .shift = 28, + .mask = 0xf, + .min_val = 1, + .max_val = 0xe, + .default_val = 0x8, + }, +}; + +static const struct mtk_dp_efuse_fmt mt8195_edp_efuse_fmt[MTK_DP_CAL_MAX] = { + [MTK_DP_CAL_GLB_BIAS_TRIM] = { + .idx = 3, + .shift = 27, + .mask = 0x1f, + .min_val = 1, + .max_val = 0x1e, + .default_val = 0xf, + }, + [MTK_DP_CAL_CLKTX_IMPSE] = { + .idx = 0, + .shift = 9, + .mask = 0xf, + .min_val = 1, + .max_val = 0xe, + .default_val = 0x8, + }, + [MTK_DP_CAL_LN_TX_IMPSEL_PMOS_0] = { + .idx = 2, + .shift = 28, + .mask = 0xf, + .min_val = 1, + .max_val = 0xe, + .default_val = 0x8, + }, + [MTK_DP_CAL_LN_TX_IMPSEL_PMOS_1] = { + .idx = 2, + .shift = 20, + .mask = 0xf, + .min_val = 1, + .max_val = 0xe, + .default_val = 0x8, + }, + [MTK_DP_CAL_LN_TX_IMPSEL_PMOS_2] = { + .idx = 2, + .shift = 12, + .mask = 0xf, + .min_val = 1, + .max_val = 0xe, + .default_val = 0x8, + }, + [MTK_DP_CAL_LN_TX_IMPSEL_PMOS_3] = { + .idx = 2, + .shift = 4, + .mask = 0xf, + .min_val = 1, + .max_val = 0xe, + .default_val = 0x8, + }, + [MTK_DP_CAL_LN_TX_IMPSEL_NMOS_0] = { + .idx = 2, + .shift = 24, + .mask = 0xf, + .min_val = 1, + .max_val = 0xe, + .default_val = 0x8, + }, + [MTK_DP_CAL_LN_TX_IMPSEL_NMOS_1] = { + .idx = 2, + .shift = 16, + .mask = 0xf, + .min_val = 1, + .max_val = 0xe, + .default_val = 0x8, + }, + [MTK_DP_CAL_LN_TX_IMPSEL_NMOS_2] = { + .idx = 2, + .shift = 8, + .mask = 0xf, + .min_val = 1, + .max_val = 0xe, + .default_val = 0x8, + }, + [MTK_DP_CAL_LN_TX_IMPSEL_NMOS_3] = { + .idx = 2, + .shift = 0, + .mask = 0xf, + .min_val = 1, + .max_val = 0xe, + .default_val = 0x8, + }, +}; + +static const struct mtk_dp_efuse_fmt mt8195_dp_efuse_fmt[MTK_DP_CAL_MAX] = { + [MTK_DP_CAL_GLB_BIAS_TRIM] = { + .idx = 0, + .shift = 27, + .mask = 0x1f, + .min_val = 1, + .max_val = 0x1e, + .default_val = 0xf, + }, + [MTK_DP_CAL_CLKTX_IMPSE] = { + .idx = 0, + .shift = 13, + .mask = 0xf, + .min_val = 1, + .max_val = 0xe, + .default_val = 0x8, + }, + [MTK_DP_CAL_LN_TX_IMPSEL_PMOS_0] = { + .idx = 1, + .shift = 28, + .mask = 0xf, + .min_val = 1, + .max_val = 0xe, + .default_val = 0x8, + }, + [MTK_DP_CAL_LN_TX_IMPSEL_PMOS_1] = { + .idx = 1, + .shift = 20, + .mask = 0xf, + .min_val = 1, + .max_val = 0xe, + .default_val = 0x8, + }, + [MTK_DP_CAL_LN_TX_IMPSEL_PMOS_2] = { + .idx = 1, + .shift = 12, + .mask = 0xf, + .min_val = 1, + .max_val = 0xe, + .default_val = 0x8, + }, + [MTK_DP_CAL_LN_TX_IMPSEL_PMOS_3] = { + .idx = 1, + .shift = 4, + .mask = 0xf, + .min_val = 1, + .max_val = 0xe, + .default_val = 0x8, + }, + [MTK_DP_CAL_LN_TX_IMPSEL_NMOS_0] = { + .idx = 1, + .shift = 24, + .mask = 0xf, + .min_val = 1, + .max_val = 0xe, + .default_val = 0x8, + }, + [MTK_DP_CAL_LN_TX_IMPSEL_NMOS_1] = { + .idx = 1, + .shift = 16, + .mask = 0xf, + .min_val = 1, + .max_val = 0xe, + .default_val = 0x8, + }, + [MTK_DP_CAL_LN_TX_IMPSEL_NMOS_2] = { + .idx = 1, + .shift = 8, + .mask = 0xf, + .min_val = 1, + .max_val = 0xe, + .default_val = 0x8, + }, + [MTK_DP_CAL_LN_TX_IMPSEL_NMOS_3] = { + .idx = 1, + .shift = 0, + .mask = 0xf, + .min_val = 1, + .max_val = 0xe, + .default_val = 0x8, + }, +}; + +static const struct regmap_config mtk_dp_regmap_config = { + .reg_bits = 32, + .val_bits = 32, + .reg_stride = 4, + .max_register = SEC_OFFSET + 0x90, + .name = "mtk-dp-registers", +}; + +static struct mtk_dp *mtk_dp_from_bridge(struct drm_bridge *b) +{ + return container_of(b, struct mtk_dp, bridge); +} + +static u32 mtk_dp_read(struct mtk_dp *mtk_dp, u32 offset) +{ + u32 read_val; + int ret; + + ret = regmap_read(mtk_dp->regs, offset, &read_val); + if (ret) { + dev_err(mtk_dp->dev, "Failed to read register 0x%x: %d\n", + offset, ret); + return 0; + } + + return read_val; +} + +static int mtk_dp_write(struct mtk_dp *mtk_dp, u32 offset, u32 val) +{ + int ret = regmap_write(mtk_dp->regs, offset, val); + + if (ret) + dev_err(mtk_dp->dev, + "Failed to write register 0x%x with value 0x%x\n", + offset, val); + return ret; +} + +static int mtk_dp_update_bits(struct mtk_dp *mtk_dp, u32 offset, + u32 val, u32 mask) +{ + int ret = regmap_update_bits(mtk_dp->regs, offset, mask, val); + + if (ret) + dev_err(mtk_dp->dev, + "Failed to update register 0x%x with value 0x%x, mask 0x%x\n", + offset, val, mask); + return ret; +} + +static void mtk_dp_bulk_16bit_write(struct mtk_dp *mtk_dp, u32 offset, u8 *buf, + size_t length) +{ + int i; + + /* 2 bytes per register */ + for (i = 0; i < length; i += 2) { + u32 val = buf[i] | (i + 1 < length ? buf[i + 1] << 8 : 0); + + if (mtk_dp_write(mtk_dp, offset + i * 2, val)) + return; + } +} + +static void mtk_dp_msa_bypass_enable(struct mtk_dp *mtk_dp, bool enable) +{ + u32 mask = HTOTAL_SEL_DP_ENC0_P0 | VTOTAL_SEL_DP_ENC0_P0 | + HSTART_SEL_DP_ENC0_P0 | VSTART_SEL_DP_ENC0_P0 | + HWIDTH_SEL_DP_ENC0_P0 | VHEIGHT_SEL_DP_ENC0_P0 | + HSP_SEL_DP_ENC0_P0 | HSW_SEL_DP_ENC0_P0 | + VSP_SEL_DP_ENC0_P0 | VSW_SEL_DP_ENC0_P0; + + mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3030, enable ? 0 : mask, mask); +} + +static void mtk_dp_set_msa(struct mtk_dp *mtk_dp) +{ + struct drm_display_mode mode; + struct videomode *vm = &mtk_dp->info.vm; + + drm_display_mode_from_videomode(vm, &mode); + + /* horizontal */ + mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3010, + mode.htotal, HTOTAL_SW_DP_ENC0_P0_MASK); + mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3018, + vm->hsync_len + vm->hback_porch, + HSTART_SW_DP_ENC0_P0_MASK); + mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3028, + vm->hsync_len, HSW_SW_DP_ENC0_P0_MASK); + mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3028, + 0, HSP_SW_DP_ENC0_P0_MASK); + mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3020, + vm->hactive, HWIDTH_SW_DP_ENC0_P0_MASK); + + /* vertical */ + mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3014, + mode.vtotal, VTOTAL_SW_DP_ENC0_P0_MASK); + mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_301C, + vm->vsync_len + vm->vback_porch, + VSTART_SW_DP_ENC0_P0_MASK); + mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_302C, + vm->vsync_len, VSW_SW_DP_ENC0_P0_MASK); + mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_302C, + 0, VSP_SW_DP_ENC0_P0_MASK); + mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3024, + vm->vactive, VHEIGHT_SW_DP_ENC0_P0_MASK); + + /* horizontal */ + mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3064, + vm->hactive, HDE_NUM_LAST_DP_ENC0_P0_MASK); + mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3154, + mode.htotal, PGEN_HTOTAL_DP_ENC0_P0_MASK); + mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3158, + vm->hfront_porch, + PGEN_HSYNC_RISING_DP_ENC0_P0_MASK); + mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_315C, + vm->hsync_len, + PGEN_HSYNC_PULSE_WIDTH_DP_ENC0_P0_MASK); + mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3160, + vm->hback_porch + vm->hsync_len, + PGEN_HFDE_START_DP_ENC0_P0_MASK); + mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3164, + vm->hactive, + PGEN_HFDE_ACTIVE_WIDTH_DP_ENC0_P0_MASK); + + /* vertical */ + mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3168, + mode.vtotal, + PGEN_VTOTAL_DP_ENC0_P0_MASK); + mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_316C, + vm->vfront_porch, + PGEN_VSYNC_RISING_DP_ENC0_P0_MASK); + mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3170, + vm->vsync_len, + PGEN_VSYNC_PULSE_WIDTH_DP_ENC0_P0_MASK); + mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3174, + vm->vback_porch + vm->vsync_len, + PGEN_VFDE_START_DP_ENC0_P0_MASK); + mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3178, + vm->vactive, + PGEN_VFDE_ACTIVE_WIDTH_DP_ENC0_P0_MASK); +} + +static int mtk_dp_set_color_format(struct mtk_dp *mtk_dp, + enum dp_pixelformat color_format) +{ + u32 val; + u32 misc0_color; + + switch (color_format) { + case DP_PIXELFORMAT_YUV422: + val = PIXEL_ENCODE_FORMAT_DP_ENC0_P0_YCBCR422; + misc0_color = DP_COLOR_FORMAT_YCbCr422; + break; + case DP_PIXELFORMAT_RGB: + val = PIXEL_ENCODE_FORMAT_DP_ENC0_P0_RGB; + misc0_color = DP_COLOR_FORMAT_RGB; + break; + default: + drm_warn(mtk_dp->drm_dev, "Unsupported color format: %d\n", + color_format); + return -EINVAL; + } + + /* update MISC0 */ + mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3034, + misc0_color, + DP_TEST_COLOR_FORMAT_MASK); + + mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_303C, + val, PIXEL_ENCODE_FORMAT_DP_ENC0_P0_MASK); + return 0; +} + +static void mtk_dp_set_color_depth(struct mtk_dp *mtk_dp) +{ + /* Only support 8 bits currently */ + /* Update MISC0 */ + mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3034, + DP_MSA_MISC_8_BPC, DP_TEST_BIT_DEPTH_MASK); + + mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_303C, + VIDEO_COLOR_DEPTH_DP_ENC0_P0_8BIT, + VIDEO_COLOR_DEPTH_DP_ENC0_P0_MASK); +} + +static void mtk_dp_config_mn_mode(struct mtk_dp *mtk_dp) +{ + /* 0: hw mode, 1: sw mode */ + mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3004, + 0, VIDEO_M_CODE_SEL_DP_ENC0_P0_MASK); +} + +static void mtk_dp_set_sram_read_start(struct mtk_dp *mtk_dp, u32 val) +{ + mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_303C, + val, SRAM_START_READ_THRD_DP_ENC0_P0_MASK); +} + +static void mtk_dp_setup_encoder(struct mtk_dp *mtk_dp) +{ + mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_303C, + VIDEO_MN_GEN_EN_DP_ENC0_P0, + VIDEO_MN_GEN_EN_DP_ENC0_P0); + mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3040, + SDP_DOWN_CNT_DP_ENC0_P0_VAL, + SDP_DOWN_CNT_INIT_DP_ENC0_P0_MASK); + mtk_dp_update_bits(mtk_dp, MTK_DP_ENC1_P0_3364, + SDP_DOWN_CNT_IN_HBLANK_DP_ENC1_P0_VAL, + SDP_DOWN_CNT_INIT_IN_HBLANK_DP_ENC1_P0_MASK); + mtk_dp_update_bits(mtk_dp, MTK_DP_ENC1_P0_3300, + VIDEO_AFIFO_RDY_SEL_DP_ENC1_P0_VAL << 8, + VIDEO_AFIFO_RDY_SEL_DP_ENC1_P0_MASK); + mtk_dp_update_bits(mtk_dp, MTK_DP_ENC1_P0_3364, + FIFO_READ_START_POINT_DP_ENC1_P0_VAL << 12, + FIFO_READ_START_POINT_DP_ENC1_P0_MASK); + mtk_dp_write(mtk_dp, MTK_DP_ENC1_P0_3368, DP_ENC1_P0_3368_VAL); +} + +static void mtk_dp_pg_enable(struct mtk_dp *mtk_dp, bool enable) +{ + mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3038, + enable ? VIDEO_SOURCE_SEL_DP_ENC0_P0_MASK : 0, + VIDEO_SOURCE_SEL_DP_ENC0_P0_MASK); + mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_31B0, + PGEN_PATTERN_SEL_VAL << 4, PGEN_PATTERN_SEL_MASK); +} + +static void mtk_dp_audio_setup_channels(struct mtk_dp *mtk_dp, + struct mtk_dp_audio_cfg *cfg) +{ + u32 channel_enable_bits; + + mtk_dp_update_bits(mtk_dp, MTK_DP_ENC1_P0_3324, + AUDIO_SOURCE_MUX_DP_ENC1_P0_DPRX, + AUDIO_SOURCE_MUX_DP_ENC1_P0_MASK); + + /* audio channel count change reset */ + mtk_dp_update_bits(mtk_dp, MTK_DP_ENC1_P0_33F4, + DP_ENC_DUMMY_RW_1, DP_ENC_DUMMY_RW_1); + mtk_dp_update_bits(mtk_dp, MTK_DP_ENC1_P0_3304, + AU_PRTY_REGEN_DP_ENC1_P0_MASK | + AU_CH_STS_REGEN_DP_ENC1_P0_MASK | + AUDIO_SAMPLE_PRSENT_REGEN_DP_ENC1_P0_MASK, + AU_PRTY_REGEN_DP_ENC1_P0_MASK | + AU_CH_STS_REGEN_DP_ENC1_P0_MASK | + AUDIO_SAMPLE_PRSENT_REGEN_DP_ENC1_P0_MASK); + + switch (cfg->channels) { + case 2: + channel_enable_bits = AUDIO_2CH_SEL_DP_ENC0_P0_MASK | + AUDIO_2CH_EN_DP_ENC0_P0_MASK; + break; + case 8: + default: + channel_enable_bits = AUDIO_8CH_SEL_DP_ENC0_P0_MASK | + AUDIO_8CH_EN_DP_ENC0_P0_MASK; + break; + } + mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3088, + channel_enable_bits | AU_EN_DP_ENC0_P0, + AUDIO_2CH_SEL_DP_ENC0_P0_MASK | + AUDIO_2CH_EN_DP_ENC0_P0_MASK | + AUDIO_8CH_SEL_DP_ENC0_P0_MASK | + AUDIO_8CH_EN_DP_ENC0_P0_MASK | + AU_EN_DP_ENC0_P0); + + /* audio channel count change reset */ + mtk_dp_update_bits(mtk_dp, MTK_DP_ENC1_P0_33F4, 0, DP_ENC_DUMMY_RW_1); + + /* enable audio reset */ + mtk_dp_update_bits(mtk_dp, MTK_DP_ENC1_P0_33F4, + DP_ENC_DUMMY_RW_1_AUDIO_RST_EN, + DP_ENC_DUMMY_RW_1_AUDIO_RST_EN); +} + +static void mtk_dp_audio_channel_status_set(struct mtk_dp *mtk_dp, + struct mtk_dp_audio_cfg *cfg) +{ + struct snd_aes_iec958 iec = { 0 }; + + switch (cfg->sample_rate) { + case 32000: + iec.status[3] = IEC958_AES3_CON_FS_32000; + break; + case 44100: + iec.status[3] = IEC958_AES3_CON_FS_44100; + break; + case 48000: + iec.status[3] = IEC958_AES3_CON_FS_48000; + break; + case 88200: + iec.status[3] = IEC958_AES3_CON_FS_88200; + break; + case 96000: + iec.status[3] = IEC958_AES3_CON_FS_96000; + break; + case 192000: + iec.status[3] = IEC958_AES3_CON_FS_192000; + break; + default: + iec.status[3] = IEC958_AES3_CON_FS_NOTID; + break; + } + + switch (cfg->word_length_bits) { + case 16: + iec.status[4] = IEC958_AES4_CON_WORDLEN_20_16; + break; + case 20: + iec.status[4] = IEC958_AES4_CON_WORDLEN_20_16 | + IEC958_AES4_CON_MAX_WORDLEN_24; + break; + case 24: + iec.status[4] = IEC958_AES4_CON_WORDLEN_24_20 | + IEC958_AES4_CON_MAX_WORDLEN_24; + break; + default: + iec.status[4] = IEC958_AES4_CON_WORDLEN_NOTID; + } + + /* IEC 60958 consumer channel status bits */ + mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_308C, + 0, CH_STATUS_0_DP_ENC0_P0_MASK); + mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3090, + iec.status[3] << 8, CH_STATUS_1_DP_ENC0_P0_MASK); + mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3094, + iec.status[4], CH_STATUS_2_DP_ENC0_P0_MASK); +} + +static void mtk_dp_audio_sdp_asp_set_channels(struct mtk_dp *mtk_dp, + int channels) +{ + mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_312C, + (min(8, channels) - 1) << 8, + ASP_HB2_DP_ENC0_P0_MASK | ASP_HB3_DP_ENC0_P0_MASK); +} + +static void mtk_dp_audio_set_divider(struct mtk_dp *mtk_dp) +{ + mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_30BC, + mtk_dp->data->audio_m_div2_bit, + AUDIO_M_CODE_MULT_DIV_SEL_DP_ENC0_P0_MASK); +} + +static void mtk_dp_sdp_trigger_aui(struct mtk_dp *mtk_dp) +{ + mtk_dp_update_bits(mtk_dp, MTK_DP_ENC1_P0_3280, + MTK_DP_SDP_AUI, SDP_PACKET_TYPE_DP_ENC1_P0_MASK); + mtk_dp_update_bits(mtk_dp, MTK_DP_ENC1_P0_3280, + SDP_PACKET_W_DP_ENC1_P0, SDP_PACKET_W_DP_ENC1_P0); +} + +static void mtk_dp_sdp_set_data(struct mtk_dp *mtk_dp, u8 *data_bytes) +{ + mtk_dp_bulk_16bit_write(mtk_dp, MTK_DP_ENC1_P0_3200, + data_bytes, 0x10); +} + +static void mtk_dp_sdp_set_header_aui(struct mtk_dp *mtk_dp, + struct dp_sdp_header *header) +{ + u32 db_addr = MTK_DP_ENC0_P0_30D8 + (MTK_DP_SDP_AUI - 1) * 8; + + mtk_dp_bulk_16bit_write(mtk_dp, db_addr, (u8 *)header, 4); +} + +static void mtk_dp_disable_sdp_aui(struct mtk_dp *mtk_dp) +{ + /* Disable periodic send */ + mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_30A8 & 0xfffc, 0, + 0xff << ((MTK_DP_ENC0_P0_30A8 & 3) * 8)); +} + +static void mtk_dp_setup_sdp_aui(struct mtk_dp *mtk_dp, + struct dp_sdp *sdp) +{ + u32 shift; + + mtk_dp_sdp_set_data(mtk_dp, sdp->db); + mtk_dp_sdp_set_header_aui(mtk_dp, &sdp->sdp_header); + mtk_dp_disable_sdp_aui(mtk_dp); + + shift = (MTK_DP_ENC0_P0_30A8 & 3) * 8; + + mtk_dp_sdp_trigger_aui(mtk_dp); + /* Enable periodic sending */ + mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_30A8 & 0xfffc, + 0x05 << shift, 0xff << shift); +} + +static void mtk_dp_aux_irq_clear(struct mtk_dp *mtk_dp) +{ + mtk_dp_write(mtk_dp, MTK_DP_AUX_P0_3640, DP_AUX_P0_3640_VAL); +} + +static void mtk_dp_aux_set_cmd(struct mtk_dp *mtk_dp, u8 cmd, u32 addr) +{ + mtk_dp_update_bits(mtk_dp, MTK_DP_AUX_P0_3644, + cmd, MCU_REQUEST_COMMAND_AUX_TX_P0_MASK); + mtk_dp_update_bits(mtk_dp, MTK_DP_AUX_P0_3648, + addr, MCU_REQUEST_ADDRESS_LSB_AUX_TX_P0_MASK); + mtk_dp_update_bits(mtk_dp, MTK_DP_AUX_P0_364C, + addr >> 16, MCU_REQUEST_ADDRESS_MSB_AUX_TX_P0_MASK); +} + +static void mtk_dp_aux_clear_fifo(struct mtk_dp *mtk_dp) +{ + mtk_dp_update_bits(mtk_dp, MTK_DP_AUX_P0_3650, + MCU_ACK_TRAN_COMPLETE_AUX_TX_P0, + MCU_ACK_TRAN_COMPLETE_AUX_TX_P0 | + PHY_FIFO_RST_AUX_TX_P0_MASK | + MCU_REQ_DATA_NUM_AUX_TX_P0_MASK); +} + +static void mtk_dp_aux_request_ready(struct mtk_dp *mtk_dp) +{ + mtk_dp_update_bits(mtk_dp, MTK_DP_AUX_P0_3630, + AUX_TX_REQUEST_READY_AUX_TX_P0, + AUX_TX_REQUEST_READY_AUX_TX_P0); +} + +static void mtk_dp_aux_fill_write_fifo(struct mtk_dp *mtk_dp, u8 *buf, + size_t length) +{ + mtk_dp_bulk_16bit_write(mtk_dp, MTK_DP_AUX_P0_3708, buf, length); +} + +static void mtk_dp_aux_read_rx_fifo(struct mtk_dp *mtk_dp, u8 *buf, + size_t length, int read_delay) +{ + int read_pos; + + mtk_dp_update_bits(mtk_dp, MTK_DP_AUX_P0_3620, + 0, AUX_RD_MODE_AUX_TX_P0_MASK); + + for (read_pos = 0; read_pos < length; read_pos++) { + mtk_dp_update_bits(mtk_dp, MTK_DP_AUX_P0_3620, + AUX_RX_FIFO_READ_PULSE_TX_P0, + AUX_RX_FIFO_READ_PULSE_TX_P0); + + /* Hardware needs time to update the data */ + usleep_range(read_delay, read_delay * 2); + buf[read_pos] = (u8)(mtk_dp_read(mtk_dp, MTK_DP_AUX_P0_3620) & + AUX_RX_FIFO_READ_DATA_AUX_TX_P0_MASK); + } +} + +static void mtk_dp_aux_set_length(struct mtk_dp *mtk_dp, size_t length) +{ + if (length > 0) { + mtk_dp_update_bits(mtk_dp, MTK_DP_AUX_P0_3650, + (length - 1) << 12, + MCU_REQ_DATA_NUM_AUX_TX_P0_MASK); + mtk_dp_update_bits(mtk_dp, MTK_DP_AUX_P0_362C, + 0, + AUX_NO_LENGTH_AUX_TX_P0 | + AUX_TX_AUXTX_OV_EN_AUX_TX_P0_MASK | + AUX_RESERVED_RW_0_AUX_TX_P0_MASK); + } else { + mtk_dp_update_bits(mtk_dp, MTK_DP_AUX_P0_362C, + AUX_NO_LENGTH_AUX_TX_P0, + AUX_NO_LENGTH_AUX_TX_P0 | + AUX_TX_AUXTX_OV_EN_AUX_TX_P0_MASK | + AUX_RESERVED_RW_0_AUX_TX_P0_MASK); + } +} + +static int mtk_dp_aux_wait_for_completion(struct mtk_dp *mtk_dp, bool is_read) +{ + int wait_reply = MTK_DP_AUX_WAIT_REPLY_COUNT; + + while (--wait_reply) { + u32 aux_irq_status; + + if (is_read) { + u32 fifo_status = mtk_dp_read(mtk_dp, MTK_DP_AUX_P0_3618); + + if (fifo_status & + (AUX_RX_FIFO_WRITE_POINTER_AUX_TX_P0_MASK | + AUX_RX_FIFO_FULL_AUX_TX_P0_MASK)) { + return 0; + } + } + + aux_irq_status = mtk_dp_read(mtk_dp, MTK_DP_AUX_P0_3640); + if (aux_irq_status & AUX_RX_AUX_RECV_COMPLETE_IRQ_AUX_TX_P0) + return 0; + + if (aux_irq_status & AUX_400US_TIMEOUT_IRQ_AUX_TX_P0) + return -ETIMEDOUT; + + /* Give the hardware a chance to reach completion before retrying */ + usleep_range(100, 500); + } + + return -ETIMEDOUT; +} + +static int mtk_dp_aux_do_transfer(struct mtk_dp *mtk_dp, bool is_read, u8 cmd, + u32 addr, u8 *buf, size_t length, u8 *reply_cmd) +{ + int ret; + + if (is_read && (length > DP_AUX_MAX_PAYLOAD_BYTES || + (cmd == DP_AUX_NATIVE_READ && !length))) + return -EINVAL; + + if (!is_read) + mtk_dp_update_bits(mtk_dp, MTK_DP_AUX_P0_3704, + AUX_TX_FIFO_NEW_MODE_EN_AUX_TX_P0, + AUX_TX_FIFO_NEW_MODE_EN_AUX_TX_P0); + + /* We need to clear fifo and irq before sending commands to the sink device. */ + mtk_dp_aux_clear_fifo(mtk_dp); + mtk_dp_aux_irq_clear(mtk_dp); + + mtk_dp_aux_set_cmd(mtk_dp, cmd, addr); + mtk_dp_aux_set_length(mtk_dp, length); + + if (!is_read) { + if (length) + mtk_dp_aux_fill_write_fifo(mtk_dp, buf, length); + + mtk_dp_update_bits(mtk_dp, MTK_DP_AUX_P0_3704, + AUX_TX_FIFO_WDATA_NEW_MODE_T_AUX_TX_P0_MASK, + AUX_TX_FIFO_WDATA_NEW_MODE_T_AUX_TX_P0_MASK); + } + + mtk_dp_aux_request_ready(mtk_dp); + + /* Wait for feedback from sink device. */ + ret = mtk_dp_aux_wait_for_completion(mtk_dp, is_read); + + *reply_cmd = mtk_dp_read(mtk_dp, MTK_DP_AUX_P0_3624) & + AUX_RX_REPLY_COMMAND_AUX_TX_P0_MASK; + + if (ret) { + u32 phy_status = mtk_dp_read(mtk_dp, MTK_DP_AUX_P0_3628) & + AUX_RX_PHY_STATE_AUX_TX_P0_MASK; + if (phy_status != AUX_RX_PHY_STATE_AUX_TX_P0_RX_IDLE) { + dev_err(mtk_dp->dev, + "AUX Rx Aux hang, need SW reset\n"); + return -EIO; + } + + return -ETIMEDOUT; + } + + if (!length) { + mtk_dp_update_bits(mtk_dp, MTK_DP_AUX_P0_362C, + 0, + AUX_NO_LENGTH_AUX_TX_P0 | + AUX_TX_AUXTX_OV_EN_AUX_TX_P0_MASK | + AUX_RESERVED_RW_0_AUX_TX_P0_MASK); + } else if (is_read) { + int read_delay; + + if (cmd == (DP_AUX_I2C_READ | DP_AUX_I2C_MOT) || + cmd == DP_AUX_I2C_READ) + read_delay = 500; + else + read_delay = 100; + + mtk_dp_aux_read_rx_fifo(mtk_dp, buf, length, read_delay); + } + + return 0; +} + +static void mtk_dp_set_swing_pre_emphasis(struct mtk_dp *mtk_dp, int lane_num, + int swing_val, int preemphasis) +{ + u32 lane_shift = lane_num * DP_TX1_VOLT_SWING_SHIFT; + + dev_dbg(mtk_dp->dev, + "link training: swing_val = 0x%x, pre-emphasis = 0x%x\n", + swing_val, preemphasis); + + mtk_dp_update_bits(mtk_dp, MTK_DP_TOP_SWING_EMP, + swing_val << (DP_TX0_VOLT_SWING_SHIFT + lane_shift), + DP_TX0_VOLT_SWING_MASK << lane_shift); + mtk_dp_update_bits(mtk_dp, MTK_DP_TOP_SWING_EMP, + preemphasis << (DP_TX0_PRE_EMPH_SHIFT + lane_shift), + DP_TX0_PRE_EMPH_MASK << lane_shift); +} + +static void mtk_dp_reset_swing_pre_emphasis(struct mtk_dp *mtk_dp) +{ + mtk_dp_update_bits(mtk_dp, MTK_DP_TOP_SWING_EMP, + 0, + DP_TX0_VOLT_SWING_MASK | + DP_TX1_VOLT_SWING_MASK | + DP_TX2_VOLT_SWING_MASK | + DP_TX3_VOLT_SWING_MASK | + DP_TX0_PRE_EMPH_MASK | + DP_TX1_PRE_EMPH_MASK | + DP_TX2_PRE_EMPH_MASK | + DP_TX3_PRE_EMPH_MASK); +} + +static u32 mtk_dp_swirq_get_clear(struct mtk_dp *mtk_dp) +{ + u32 irq_status = mtk_dp_read(mtk_dp, MTK_DP_TRANS_P0_35D0) & + SW_IRQ_FINAL_STATUS_DP_TRANS_P0_MASK; + + if (irq_status) { + mtk_dp_update_bits(mtk_dp, MTK_DP_TRANS_P0_35C8, + irq_status, SW_IRQ_CLR_DP_TRANS_P0_MASK); + mtk_dp_update_bits(mtk_dp, MTK_DP_TRANS_P0_35C8, + 0, SW_IRQ_CLR_DP_TRANS_P0_MASK); + } + + return irq_status; +} + +static u32 mtk_dp_hwirq_get_clear(struct mtk_dp *mtk_dp) +{ + u32 irq_status = (mtk_dp_read(mtk_dp, MTK_DP_TRANS_P0_3418) & + IRQ_STATUS_DP_TRANS_P0_MASK) >> 12; + + if (irq_status) { + mtk_dp_update_bits(mtk_dp, MTK_DP_TRANS_P0_3418, + irq_status, IRQ_CLR_DP_TRANS_P0_MASK); + mtk_dp_update_bits(mtk_dp, MTK_DP_TRANS_P0_3418, + 0, IRQ_CLR_DP_TRANS_P0_MASK); + } + + return irq_status; +} + +static void mtk_dp_hwirq_enable(struct mtk_dp *mtk_dp, bool enable) +{ + mtk_dp_update_bits(mtk_dp, MTK_DP_TRANS_P0_3418, + enable ? 0 : + IRQ_MASK_DP_TRANS_P0_DISC_IRQ | + IRQ_MASK_DP_TRANS_P0_CONN_IRQ | + IRQ_MASK_DP_TRANS_P0_INT_IRQ, + IRQ_MASK_DP_TRANS_P0_MASK); +} + +static void mtk_dp_initialize_settings(struct mtk_dp *mtk_dp) +{ + mtk_dp_update_bits(mtk_dp, MTK_DP_TRANS_P0_342C, + XTAL_FREQ_DP_TRANS_P0_DEFAULT, + XTAL_FREQ_DP_TRANS_P0_MASK); + mtk_dp_update_bits(mtk_dp, MTK_DP_TRANS_P0_3540, + FEC_CLOCK_EN_MODE_DP_TRANS_P0, + FEC_CLOCK_EN_MODE_DP_TRANS_P0); + mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_31EC, + AUDIO_CH_SRC_SEL_DP_ENC0_P0, + AUDIO_CH_SRC_SEL_DP_ENC0_P0); + mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_304C, + 0, SDP_VSYNC_RISING_MASK_DP_ENC0_P0_MASK); + mtk_dp_update_bits(mtk_dp, MTK_DP_TOP_IRQ_MASK, + IRQ_MASK_AUX_TOP_IRQ, IRQ_MASK_AUX_TOP_IRQ); +} + +static void mtk_dp_initialize_hpd_detect_settings(struct mtk_dp *mtk_dp) +{ + u32 val; + /* Debounce threshold */ + mtk_dp_update_bits(mtk_dp, MTK_DP_TRANS_P0_3410, + 8, HPD_DEB_THD_DP_TRANS_P0_MASK); + + val = (HPD_INT_THD_DP_TRANS_P0_LOWER_500US | + HPD_INT_THD_DP_TRANS_P0_UPPER_1100US) << 4; + mtk_dp_update_bits(mtk_dp, MTK_DP_TRANS_P0_3410, + val, HPD_INT_THD_DP_TRANS_P0_MASK); + + /* + * Connect threshold 1.5ms + 5 x 0.1ms = 2ms + * Disconnect threshold 1.5ms + 5 x 0.1ms = 2ms + */ + val = (5 << 8) | (5 << 12); + mtk_dp_update_bits(mtk_dp, MTK_DP_TRANS_P0_3410, + val, + HPD_DISC_THD_DP_TRANS_P0_MASK | + HPD_CONN_THD_DP_TRANS_P0_MASK); + mtk_dp_update_bits(mtk_dp, MTK_DP_TRANS_P0_3430, + HPD_INT_THD_ECO_DP_TRANS_P0_HIGH_BOUND_EXT, + HPD_INT_THD_ECO_DP_TRANS_P0_MASK); +} + +static void mtk_dp_initialize_aux_settings(struct mtk_dp *mtk_dp) +{ + /* modify timeout threshold = 0x1595 */ + mtk_dp_update_bits(mtk_dp, MTK_DP_AUX_P0_360C, + AUX_TIMEOUT_THR_AUX_TX_P0_VAL, + AUX_TIMEOUT_THR_AUX_TX_P0_MASK); + mtk_dp_update_bits(mtk_dp, MTK_DP_AUX_P0_3658, + 0, AUX_TX_OV_EN_AUX_TX_P0_MASK); + /* 25 for 26M */ + mtk_dp_update_bits(mtk_dp, MTK_DP_AUX_P0_3634, + AUX_TX_OVER_SAMPLE_RATE_FOR_26M << 8, + AUX_TX_OVER_SAMPLE_RATE_AUX_TX_P0_MASK); + /* 13 for 26M */ + mtk_dp_update_bits(mtk_dp, MTK_DP_AUX_P0_3614, + AUX_RX_UI_CNT_THR_AUX_FOR_26M, + AUX_RX_UI_CNT_THR_AUX_TX_P0_MASK); + mtk_dp_update_bits(mtk_dp, MTK_DP_AUX_P0_37C8, + MTK_ATOP_EN_AUX_TX_P0, + MTK_ATOP_EN_AUX_TX_P0); + + /* Set complete reply mode for AUX */ + mtk_dp_update_bits(mtk_dp, MTK_DP_AUX_P0_3690, + RX_REPLY_COMPLETE_MODE_AUX_TX_P0, + RX_REPLY_COMPLETE_MODE_AUX_TX_P0); +} + +static void mtk_dp_initialize_digital_settings(struct mtk_dp *mtk_dp) +{ + mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_304C, + 0, VBID_VIDEO_MUTE_DP_ENC0_P0_MASK); + + mtk_dp_update_bits(mtk_dp, MTK_DP_ENC1_P0_3368, + BS2BS_MODE_DP_ENC1_P0_VAL << 12, + BS2BS_MODE_DP_ENC1_P0_MASK); + + /* dp tx encoder reset all sw */ + mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3004, + DP_TX_ENCODER_4P_RESET_SW_DP_ENC0_P0, + DP_TX_ENCODER_4P_RESET_SW_DP_ENC0_P0); + + /* Wait for sw reset to complete */ + usleep_range(1000, 5000); + mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3004, + 0, DP_TX_ENCODER_4P_RESET_SW_DP_ENC0_P0); +} + +static void mtk_dp_digital_sw_reset(struct mtk_dp *mtk_dp) +{ + mtk_dp_update_bits(mtk_dp, MTK_DP_TRANS_P0_340C, + DP_TX_TRANSMITTER_4P_RESET_SW_DP_TRANS_P0, + DP_TX_TRANSMITTER_4P_RESET_SW_DP_TRANS_P0); + + /* Wait for sw reset to complete */ + usleep_range(1000, 5000); + mtk_dp_update_bits(mtk_dp, MTK_DP_TRANS_P0_340C, + 0, DP_TX_TRANSMITTER_4P_RESET_SW_DP_TRANS_P0); +} + +static void mtk_dp_sdp_path_reset(struct mtk_dp *mtk_dp) +{ + mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3004, + SDP_RESET_SW_DP_ENC0_P0, + SDP_RESET_SW_DP_ENC0_P0); + + /* Wait for sdp path reset to complete */ + usleep_range(1000, 5000); + mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3004, + 0, SDP_RESET_SW_DP_ENC0_P0); +} + +static void mtk_dp_set_lanes(struct mtk_dp *mtk_dp, int lanes) +{ + mtk_dp_update_bits(mtk_dp, MTK_DP_TRANS_P0_35F0, + lanes == 0 ? 0 : DP_TRANS_DUMMY_RW_0, + DP_TRANS_DUMMY_RW_0_MASK); + mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3000, + lanes, LANE_NUM_DP_ENC0_P0_MASK); + mtk_dp_update_bits(mtk_dp, MTK_DP_TRANS_P0_34A4, + lanes << 2, LANE_NUM_DP_TRANS_P0_MASK); +} + +static void mtk_dp_get_calibration_data(struct mtk_dp *mtk_dp) +{ + const struct mtk_dp_efuse_fmt *fmt; + struct device *dev = mtk_dp->dev; + struct nvmem_cell *cell; + u32 *cal_data = mtk_dp->cal_data; + u32 *buf; + int i; + size_t len; + + cell = nvmem_cell_get(dev, "dp_calibration_data"); + if (IS_ERR(cell)) { + dev_warn(dev, "Failed to get nvmem cell dp_calibration_data\n"); + goto use_default_val; + } + + buf = (u32 *)nvmem_cell_read(cell, &len); + nvmem_cell_put(cell); + + if (IS_ERR(buf)) { + dev_warn(dev, "Failed to read nvmem_cell_read\n"); + goto use_default_val; + } + + /* The cell length is in bytes. Convert it to be compatible with u32 buffer. */ + len /= sizeof(u32); + + for (i = 0; i < MTK_DP_CAL_MAX; i++) { + fmt = &mtk_dp->data->efuse_fmt[i]; + + if (fmt->idx >= len) { + dev_warn(mtk_dp->dev, + "Out-of-bound efuse data access, fmt idx = %d, buf len = %zu\n", + fmt->idx, len); + kfree(buf); + goto use_default_val; + } + + cal_data[i] = (buf[fmt->idx] >> fmt->shift) & fmt->mask; + + if (cal_data[i] < fmt->min_val || cal_data[i] > fmt->max_val) { + dev_warn(mtk_dp->dev, "Invalid efuse data, idx = %d\n", i); + kfree(buf); + goto use_default_val; + } + } + kfree(buf); + + return; + +use_default_val: + dev_warn(mtk_dp->dev, "Use default calibration data\n"); + for (i = 0; i < MTK_DP_CAL_MAX; i++) + cal_data[i] = mtk_dp->data->efuse_fmt[i].default_val; +} + +static void mtk_dp_set_calibration_data(struct mtk_dp *mtk_dp) +{ + u32 *cal_data = mtk_dp->cal_data; + + mtk_dp_update_bits(mtk_dp, DP_PHY_GLB_DPAUX_TX, + cal_data[MTK_DP_CAL_CLKTX_IMPSE] << 20, + RG_CKM_PT0_CKTX_IMPSEL); + mtk_dp_update_bits(mtk_dp, DP_PHY_GLB_BIAS_GEN_00, + cal_data[MTK_DP_CAL_GLB_BIAS_TRIM] << 16, + RG_XTP_GLB_BIAS_INTR_CTRL); + mtk_dp_update_bits(mtk_dp, DP_PHY_LANE_TX_0, + cal_data[MTK_DP_CAL_LN_TX_IMPSEL_PMOS_0] << 12, + RG_XTP_LN0_TX_IMPSEL_PMOS); + mtk_dp_update_bits(mtk_dp, DP_PHY_LANE_TX_0, + cal_data[MTK_DP_CAL_LN_TX_IMPSEL_NMOS_0] << 16, + RG_XTP_LN0_TX_IMPSEL_NMOS); + mtk_dp_update_bits(mtk_dp, DP_PHY_LANE_TX_1, + cal_data[MTK_DP_CAL_LN_TX_IMPSEL_PMOS_1] << 12, + RG_XTP_LN1_TX_IMPSEL_PMOS); + mtk_dp_update_bits(mtk_dp, DP_PHY_LANE_TX_1, + cal_data[MTK_DP_CAL_LN_TX_IMPSEL_NMOS_1] << 16, + RG_XTP_LN1_TX_IMPSEL_NMOS); + mtk_dp_update_bits(mtk_dp, DP_PHY_LANE_TX_2, + cal_data[MTK_DP_CAL_LN_TX_IMPSEL_PMOS_2] << 12, + RG_XTP_LN2_TX_IMPSEL_PMOS); + mtk_dp_update_bits(mtk_dp, DP_PHY_LANE_TX_2, + cal_data[MTK_DP_CAL_LN_TX_IMPSEL_NMOS_2] << 16, + RG_XTP_LN2_TX_IMPSEL_NMOS); + mtk_dp_update_bits(mtk_dp, DP_PHY_LANE_TX_3, + cal_data[MTK_DP_CAL_LN_TX_IMPSEL_PMOS_3] << 12, + RG_XTP_LN3_TX_IMPSEL_PMOS); + mtk_dp_update_bits(mtk_dp, DP_PHY_LANE_TX_3, + cal_data[MTK_DP_CAL_LN_TX_IMPSEL_NMOS_3] << 16, + RG_XTP_LN3_TX_IMPSEL_NMOS); +} + +static int mtk_dp_phy_configure(struct mtk_dp *mtk_dp, + u32 link_rate, int lane_count) +{ + int ret; + union phy_configure_opts phy_opts = { + .dp = { + .link_rate = drm_dp_bw_code_to_link_rate(link_rate) / 100, + .set_rate = 1, + .lanes = lane_count, + .set_lanes = 1, + .ssc = mtk_dp->train_info.sink_ssc, + } + }; + + mtk_dp_update_bits(mtk_dp, MTK_DP_TOP_PWR_STATE, DP_PWR_STATE_BANDGAP, + DP_PWR_STATE_MASK); + + ret = phy_configure(mtk_dp->phy, &phy_opts); + if (ret) + return ret; + + mtk_dp_set_calibration_data(mtk_dp); + mtk_dp_update_bits(mtk_dp, MTK_DP_TOP_PWR_STATE, + DP_PWR_STATE_BANDGAP_TPLL_LANE, DP_PWR_STATE_MASK); + + return 0; +} + +static void mtk_dp_set_idle_pattern(struct mtk_dp *mtk_dp, bool enable) +{ + u32 val = POST_MISC_DATA_LANE0_OV_DP_TRANS_P0_MASK | + POST_MISC_DATA_LANE1_OV_DP_TRANS_P0_MASK | + POST_MISC_DATA_LANE2_OV_DP_TRANS_P0_MASK | + POST_MISC_DATA_LANE3_OV_DP_TRANS_P0_MASK; + + mtk_dp_update_bits(mtk_dp, MTK_DP_TRANS_P0_3580, + enable ? val : 0, val); +} + +static void mtk_dp_train_set_pattern(struct mtk_dp *mtk_dp, int pattern) +{ + /* TPS1 */ + if (pattern == 1) + mtk_dp_set_idle_pattern(mtk_dp, false); + + mtk_dp_update_bits(mtk_dp, + MTK_DP_TRANS_P0_3400, + pattern ? BIT(pattern - 1) << 12 : 0, + PATTERN1_EN_DP_TRANS_P0_MASK | + PATTERN2_EN_DP_TRANS_P0_MASK | + PATTERN3_EN_DP_TRANS_P0_MASK | + PATTERN4_EN_DP_TRANS_P0_MASK); +} + +static void mtk_dp_set_enhanced_frame_mode(struct mtk_dp *mtk_dp) +{ + mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3000, + ENHANCED_FRAME_EN_DP_ENC0_P0, + ENHANCED_FRAME_EN_DP_ENC0_P0); +} + +static void mtk_dp_training_set_scramble(struct mtk_dp *mtk_dp, bool enable) +{ + mtk_dp_update_bits(mtk_dp, MTK_DP_TRANS_P0_3404, + enable ? DP_SCR_EN_DP_TRANS_P0_MASK : 0, + DP_SCR_EN_DP_TRANS_P0_MASK); +} + +static void mtk_dp_video_mute(struct mtk_dp *mtk_dp, bool enable) +{ + struct arm_smccc_res res; + u32 val = VIDEO_MUTE_SEL_DP_ENC0_P0 | + (enable ? VIDEO_MUTE_SW_DP_ENC0_P0 : 0); + + mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3000, + val, + VIDEO_MUTE_SEL_DP_ENC0_P0 | + VIDEO_MUTE_SW_DP_ENC0_P0); + + arm_smccc_smc(MTK_DP_SIP_CONTROL_AARCH32, + mtk_dp->data->smc_cmd, enable, + 0, 0, 0, 0, 0, &res); + + dev_dbg(mtk_dp->dev, "smc cmd: 0x%x, p1: %s, ret: 0x%lx-0x%lx\n", + mtk_dp->data->smc_cmd, enable ? "enable" : "disable", res.a0, res.a1); +} + +static void mtk_dp_audio_mute(struct mtk_dp *mtk_dp, bool mute) +{ + u32 val[3]; + + if (mute) { + val[0] = VBID_AUDIO_MUTE_FLAG_SW_DP_ENC0_P0 | + VBID_AUDIO_MUTE_FLAG_SEL_DP_ENC0_P0; + val[1] = 0; + val[2] = 0; + } else { + val[0] = 0; + val[1] = AU_EN_DP_ENC0_P0; + /* Send one every two frames */ + val[2] = 0x0F; + } + + mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3030, + val[0], + VBID_AUDIO_MUTE_FLAG_SW_DP_ENC0_P0 | + VBID_AUDIO_MUTE_FLAG_SEL_DP_ENC0_P0); + mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3088, + val[1], AU_EN_DP_ENC0_P0); + mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_30A4, + val[2], AU_TS_CFG_DP_ENC0_P0_MASK); +} + +static void mtk_dp_aux_panel_poweron(struct mtk_dp *mtk_dp, bool pwron) +{ + if (pwron) { + /* power on aux */ + mtk_dp_update_bits(mtk_dp, MTK_DP_TOP_PWR_STATE, + DP_PWR_STATE_BANDGAP_TPLL_LANE, + DP_PWR_STATE_MASK); + + /* power on panel */ + drm_dp_dpcd_writeb(&mtk_dp->aux, DP_SET_POWER, DP_SET_POWER_D0); + usleep_range(2000, 5000); + } else { + /* power off panel */ + drm_dp_dpcd_writeb(&mtk_dp->aux, DP_SET_POWER, DP_SET_POWER_D3); + usleep_range(2000, 3000); + + /* power off aux */ + mtk_dp_update_bits(mtk_dp, MTK_DP_TOP_PWR_STATE, + DP_PWR_STATE_BANDGAP_TPLL, + DP_PWR_STATE_MASK); + } +} + +static void mtk_dp_power_enable(struct mtk_dp *mtk_dp) +{ + mtk_dp_update_bits(mtk_dp, MTK_DP_TOP_RESET_AND_PROBE, + 0, SW_RST_B_PHYD); + + /* Wait for power enable */ + usleep_range(10, 200); + + mtk_dp_update_bits(mtk_dp, MTK_DP_TOP_RESET_AND_PROBE, + SW_RST_B_PHYD, SW_RST_B_PHYD); + mtk_dp_update_bits(mtk_dp, MTK_DP_TOP_PWR_STATE, + DP_PWR_STATE_BANDGAP_TPLL, DP_PWR_STATE_MASK); + mtk_dp_write(mtk_dp, MTK_DP_1040, + RG_DPAUX_RX_VALID_DEGLITCH_EN | RG_XTP_GLB_CKDET_EN | + RG_DPAUX_RX_EN); + mtk_dp_update_bits(mtk_dp, MTK_DP_0034, 0, DA_CKM_CKTX0_EN_FORCE_EN); +} + +static void mtk_dp_power_disable(struct mtk_dp *mtk_dp) +{ + mtk_dp_write(mtk_dp, MTK_DP_TOP_PWR_STATE, 0); + + mtk_dp_update_bits(mtk_dp, MTK_DP_0034, + DA_CKM_CKTX0_EN_FORCE_EN, DA_CKM_CKTX0_EN_FORCE_EN); + + /* Disable RX */ + mtk_dp_write(mtk_dp, MTK_DP_1040, 0); + mtk_dp_write(mtk_dp, MTK_DP_TOP_MEM_PD, + 0x550 | FUSE_SEL | MEM_ISO_EN); +} + +static void mtk_dp_initialize_priv_data(struct mtk_dp *mtk_dp) +{ + bool plugged_in = (mtk_dp->bridge.type == DRM_MODE_CONNECTOR_eDP); + + mtk_dp->train_info.link_rate = DP_LINK_BW_5_4; + mtk_dp->train_info.lane_count = mtk_dp->max_lanes; + mtk_dp->train_info.cable_plugged_in = plugged_in; + + mtk_dp->info.format = DP_PIXELFORMAT_RGB; + memset(&mtk_dp->info.vm, 0, sizeof(struct videomode)); + mtk_dp->audio_enable = false; +} + +static void mtk_dp_sdp_set_down_cnt_init(struct mtk_dp *mtk_dp, + u32 sram_read_start) +{ + u32 sdp_down_cnt_init = 0; + struct drm_display_mode mode; + struct videomode *vm = &mtk_dp->info.vm; + + drm_display_mode_from_videomode(vm, &mode); + + if (mode.clock > 0) + sdp_down_cnt_init = sram_read_start * + mtk_dp->train_info.link_rate * 2700 * 8 / + (mode.clock * 4); + + switch (mtk_dp->train_info.lane_count) { + case 1: + sdp_down_cnt_init = max_t(u32, sdp_down_cnt_init, 0x1A); + break; + case 2: + /* case for LowResolution && High Audio Sample Rate */ + sdp_down_cnt_init = max_t(u32, sdp_down_cnt_init, 0x10); + sdp_down_cnt_init += mode.vtotal <= 525 ? 4 : 0; + break; + case 4: + default: + sdp_down_cnt_init = max_t(u32, sdp_down_cnt_init, 6); + break; + } + + mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3040, + sdp_down_cnt_init, + SDP_DOWN_CNT_INIT_DP_ENC0_P0_MASK); +} + +static void mtk_dp_sdp_set_down_cnt_init_in_hblank(struct mtk_dp *mtk_dp) +{ + int pix_clk_mhz; + u32 dc_offset; + u32 spd_down_cnt_init = 0; + struct drm_display_mode mode; + struct videomode *vm = &mtk_dp->info.vm; + + drm_display_mode_from_videomode(vm, &mode); + + pix_clk_mhz = mtk_dp->info.format == DP_PIXELFORMAT_YUV420 ? + mode.clock / 2000 : mode.clock / 1000; + + switch (mtk_dp->train_info.lane_count) { + case 1: + spd_down_cnt_init = 0x20; + break; + case 2: + dc_offset = (mode.vtotal <= 525) ? 0x14 : 0x00; + spd_down_cnt_init = 0x18 + dc_offset; + break; + case 4: + default: + dc_offset = (mode.vtotal <= 525) ? 0x08 : 0x00; + if (pix_clk_mhz > mtk_dp->train_info.link_rate * 27) + spd_down_cnt_init = 0x8; + else + spd_down_cnt_init = 0x10 + dc_offset; + break; + } + + mtk_dp_update_bits(mtk_dp, MTK_DP_ENC1_P0_3364, spd_down_cnt_init, + SDP_DOWN_CNT_INIT_IN_HBLANK_DP_ENC1_P0_MASK); +} + +static void mtk_dp_audio_sample_arrange_disable(struct mtk_dp *mtk_dp) +{ + /* arrange audio packets into the Hblanking and Vblanking area */ + if (!mtk_dp->data->audio_pkt_in_hblank_area) + return; + + mtk_dp_update_bits(mtk_dp, MTK_DP_ENC1_P0_3374, 0, + SDP_ASP_INSERT_IN_HBLANK_DP_ENC1_P0_MASK); + mtk_dp_update_bits(mtk_dp, MTK_DP_ENC1_P0_3374, 0, + SDP_DOWN_ASP_CNT_INIT_DP_ENC1_P0_MASK); +} + +static void mtk_dp_setup_tu(struct mtk_dp *mtk_dp) +{ + u32 sram_read_start = min_t(u32, MTK_DP_TBC_BUF_READ_START_ADDR, + mtk_dp->info.vm.hactive / + mtk_dp->train_info.lane_count / + MTK_DP_4P1T / MTK_DP_HDE / + MTK_DP_PIX_PER_ADDR); + mtk_dp_set_sram_read_start(mtk_dp, sram_read_start); + mtk_dp_setup_encoder(mtk_dp); + mtk_dp_audio_sample_arrange_disable(mtk_dp); + mtk_dp_sdp_set_down_cnt_init_in_hblank(mtk_dp); + mtk_dp_sdp_set_down_cnt_init(mtk_dp, sram_read_start); +} + +static void mtk_dp_set_tx_out(struct mtk_dp *mtk_dp) +{ + mtk_dp_setup_tu(mtk_dp); +} + +static void mtk_dp_train_update_swing_pre(struct mtk_dp *mtk_dp, int lanes, + u8 dpcd_adjust_req[2]) +{ + int lane; + + for (lane = 0; lane < lanes; ++lane) { + u8 val; + u8 swing; + u8 preemphasis; + int index = lane / 2; + int shift = lane % 2 ? DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT : 0; + + swing = (dpcd_adjust_req[index] >> shift) & + DP_ADJUST_VOLTAGE_SWING_LANE0_MASK; + preemphasis = ((dpcd_adjust_req[index] >> shift) & + DP_ADJUST_PRE_EMPHASIS_LANE0_MASK) >> + DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT; + val = swing << DP_TRAIN_VOLTAGE_SWING_SHIFT | + preemphasis << DP_TRAIN_PRE_EMPHASIS_SHIFT; + + if (swing == DP_TRAIN_VOLTAGE_SWING_LEVEL_3) + val |= DP_TRAIN_MAX_SWING_REACHED; + if (preemphasis == 3) + val |= DP_TRAIN_MAX_PRE_EMPHASIS_REACHED; + + mtk_dp_set_swing_pre_emphasis(mtk_dp, lane, swing, preemphasis); + drm_dp_dpcd_writeb(&mtk_dp->aux, DP_TRAINING_LANE0_SET + lane, + val); + } +} + +static void mtk_dp_pattern(struct mtk_dp *mtk_dp, bool is_tps1) +{ + int pattern; + unsigned int aux_offset; + + if (is_tps1) { + pattern = 1; + aux_offset = DP_LINK_SCRAMBLING_DISABLE | DP_TRAINING_PATTERN_1; + } else { + aux_offset = mtk_dp->train_info.channel_eq_pattern; + + switch (mtk_dp->train_info.channel_eq_pattern) { + case DP_TRAINING_PATTERN_4: + pattern = 4; + break; + case DP_TRAINING_PATTERN_3: + pattern = 3; + aux_offset |= DP_LINK_SCRAMBLING_DISABLE; + break; + case DP_TRAINING_PATTERN_2: + default: + pattern = 2; + aux_offset |= DP_LINK_SCRAMBLING_DISABLE; + break; + } + } + + mtk_dp_train_set_pattern(mtk_dp, pattern); + drm_dp_dpcd_writeb(&mtk_dp->aux, DP_TRAINING_PATTERN_SET, aux_offset); +} + +static int mtk_dp_train_setting(struct mtk_dp *mtk_dp, u8 target_link_rate, + u8 target_lane_count) +{ + int ret; + + drm_dp_dpcd_writeb(&mtk_dp->aux, DP_LINK_BW_SET, target_link_rate); + drm_dp_dpcd_writeb(&mtk_dp->aux, DP_LANE_COUNT_SET, + target_lane_count | DP_LANE_COUNT_ENHANCED_FRAME_EN); + + if (mtk_dp->train_info.sink_ssc) + drm_dp_dpcd_writeb(&mtk_dp->aux, DP_DOWNSPREAD_CTRL, + DP_SPREAD_AMP_0_5); + + mtk_dp_set_lanes(mtk_dp, target_lane_count / 2); + ret = mtk_dp_phy_configure(mtk_dp, target_link_rate, target_lane_count); + if (ret) + return ret; + + dev_dbg(mtk_dp->dev, + "Link train target_link_rate = 0x%x, target_lane_count = 0x%x\n", + target_link_rate, target_lane_count); + + return 0; +} + +static int mtk_dp_train_cr(struct mtk_dp *mtk_dp, u8 target_lane_count) +{ + u8 lane_adjust[2] = {}; + u8 link_status[DP_LINK_STATUS_SIZE] = {}; + u8 prev_lane_adjust = 0xff; + int train_retries = 0; + int voltage_retries = 0; + + mtk_dp_pattern(mtk_dp, true); + + /* In DP spec 1.4, the retry count of CR is defined as 10. */ + do { + train_retries++; + if (!mtk_dp->train_info.cable_plugged_in) { + mtk_dp_train_set_pattern(mtk_dp, 0); + return -ENODEV; + } + + drm_dp_dpcd_read(&mtk_dp->aux, DP_ADJUST_REQUEST_LANE0_1, + lane_adjust, sizeof(lane_adjust)); + mtk_dp_train_update_swing_pre(mtk_dp, target_lane_count, + lane_adjust); + + drm_dp_link_train_clock_recovery_delay(&mtk_dp->aux, + mtk_dp->rx_cap); + + /* check link status from sink device */ + drm_dp_dpcd_read_link_status(&mtk_dp->aux, link_status); + if (drm_dp_clock_recovery_ok(link_status, + target_lane_count)) { + dev_dbg(mtk_dp->dev, "Link train CR pass\n"); + return 0; + } + + /* + * In DP spec 1.4, if current voltage level is the same + * with previous voltage level, we need to retry 5 times. + */ + if (prev_lane_adjust == link_status[4]) { + voltage_retries++; + /* + * Condition of CR fail: + * 1. Failed to pass CR using the same voltage + * level over five times. + * 2. Failed to pass CR when the current voltage + * level is the same with previous voltage + * level and reach max voltage level (3). + */ + if (voltage_retries > MTK_DP_TRAIN_VOLTAGE_LEVEL_RETRY || + (prev_lane_adjust & DP_ADJUST_VOLTAGE_SWING_LANE0_MASK) == 3) { + dev_dbg(mtk_dp->dev, "Link train CR fail\n"); + break; + } + } else { + /* + * If the voltage level is changed, we need to + * re-calculate this retry count. + */ + voltage_retries = 0; + } + prev_lane_adjust = link_status[4]; + } while (train_retries < MTK_DP_TRAIN_DOWNSCALE_RETRY); + + /* Failed to train CR, and disable pattern. */ + drm_dp_dpcd_writeb(&mtk_dp->aux, DP_TRAINING_PATTERN_SET, + DP_TRAINING_PATTERN_DISABLE); + mtk_dp_train_set_pattern(mtk_dp, 0); + + return -ETIMEDOUT; +} + +static int mtk_dp_train_eq(struct mtk_dp *mtk_dp, u8 target_lane_count) +{ + u8 lane_adjust[2] = {}; + u8 link_status[DP_LINK_STATUS_SIZE] = {}; + int train_retries = 0; + + mtk_dp_pattern(mtk_dp, false); + + do { + train_retries++; + if (!mtk_dp->train_info.cable_plugged_in) { + mtk_dp_train_set_pattern(mtk_dp, 0); + return -ENODEV; + } + + drm_dp_dpcd_read(&mtk_dp->aux, DP_ADJUST_REQUEST_LANE0_1, + lane_adjust, sizeof(lane_adjust)); + mtk_dp_train_update_swing_pre(mtk_dp, target_lane_count, + lane_adjust); + + drm_dp_link_train_channel_eq_delay(&mtk_dp->aux, + mtk_dp->rx_cap); + + /* check link status from sink device */ + drm_dp_dpcd_read_link_status(&mtk_dp->aux, link_status); + if (drm_dp_channel_eq_ok(link_status, target_lane_count)) { + dev_dbg(mtk_dp->dev, "Link train EQ pass\n"); + + /* Training done, and disable pattern. */ + drm_dp_dpcd_writeb(&mtk_dp->aux, DP_TRAINING_PATTERN_SET, + DP_TRAINING_PATTERN_DISABLE); + mtk_dp_train_set_pattern(mtk_dp, 0); + return 0; + } + dev_dbg(mtk_dp->dev, "Link train EQ fail\n"); + } while (train_retries < MTK_DP_TRAIN_DOWNSCALE_RETRY); + + /* Failed to train EQ, and disable pattern. */ + drm_dp_dpcd_writeb(&mtk_dp->aux, DP_TRAINING_PATTERN_SET, + DP_TRAINING_PATTERN_DISABLE); + mtk_dp_train_set_pattern(mtk_dp, 0); + + return -ETIMEDOUT; +} + +static int mtk_dp_parse_capabilities(struct mtk_dp *mtk_dp) +{ + u8 val; + ssize_t ret; + + /* + * If we're eDP and capabilities were already parsed we can skip + * reading again because eDP panels aren't hotpluggable hence the + * caps and training information won't ever change in a boot life + */ + if (mtk_dp->bridge.type == DRM_MODE_CONNECTOR_eDP && + mtk_dp->rx_cap[DP_MAX_LINK_RATE] && + mtk_dp->train_info.sink_ssc) + return 0; + + ret = drm_dp_read_dpcd_caps(&mtk_dp->aux, mtk_dp->rx_cap); + if (ret < 0) + return ret; + + if (drm_dp_tps4_supported(mtk_dp->rx_cap)) + mtk_dp->train_info.channel_eq_pattern = DP_TRAINING_PATTERN_4; + else if (drm_dp_tps3_supported(mtk_dp->rx_cap)) + mtk_dp->train_info.channel_eq_pattern = DP_TRAINING_PATTERN_3; + else + mtk_dp->train_info.channel_eq_pattern = DP_TRAINING_PATTERN_2; + + mtk_dp->train_info.sink_ssc = drm_dp_max_downspread(mtk_dp->rx_cap); + + ret = drm_dp_dpcd_readb(&mtk_dp->aux, DP_MSTM_CAP, &val); + if (ret < 1) { + dev_err(mtk_dp->dev, "Read mstm cap failed: %zd\n", ret); + return ret == 0 ? -EIO : ret; + } + + if (val & DP_MST_CAP) { + /* Clear DP_DEVICE_SERVICE_IRQ_VECTOR_ESI0 */ + ret = drm_dp_dpcd_readb(&mtk_dp->aux, + DP_DEVICE_SERVICE_IRQ_VECTOR_ESI0, + &val); + if (ret < 1) { + dev_err(mtk_dp->dev, "Read irq vector failed: %zd\n", ret); + return ret == 0 ? -EIO : ret; + } + + if (val) { + ret = drm_dp_dpcd_writeb(&mtk_dp->aux, + DP_DEVICE_SERVICE_IRQ_VECTOR_ESI0, + val); + if (ret < 0) + return ret; + } + } + + return 0; +} + +static bool mtk_dp_edid_parse_audio_capabilities(struct mtk_dp *mtk_dp, + struct mtk_dp_audio_cfg *cfg) +{ + if (!mtk_dp->data->audio_supported) + return false; + + if (mtk_dp->info.audio_cur_cfg.sad_count <= 0) { + drm_info(mtk_dp->drm_dev, "The SADs is NULL\n"); + return false; + } + + return true; +} + +static void mtk_dp_train_change_mode(struct mtk_dp *mtk_dp) +{ + phy_reset(mtk_dp->phy); + mtk_dp_reset_swing_pre_emphasis(mtk_dp); +} + +static int mtk_dp_training(struct mtk_dp *mtk_dp) +{ + int ret; + u8 lane_count, link_rate, train_limit, max_link_rate; + + link_rate = min_t(u8, mtk_dp->max_linkrate, + mtk_dp->rx_cap[DP_MAX_LINK_RATE]); + max_link_rate = link_rate; + lane_count = min_t(u8, mtk_dp->max_lanes, + drm_dp_max_lane_count(mtk_dp->rx_cap)); + + /* + * TPS are generated by the hardware pattern generator. From the + * hardware setting we need to disable this scramble setting before + * use the TPS pattern generator. + */ + mtk_dp_training_set_scramble(mtk_dp, false); + + for (train_limit = 6; train_limit > 0; train_limit--) { + mtk_dp_train_change_mode(mtk_dp); + + ret = mtk_dp_train_setting(mtk_dp, link_rate, lane_count); + if (ret) + return ret; + + ret = mtk_dp_train_cr(mtk_dp, lane_count); + if (ret == -ENODEV) { + return ret; + } else if (ret) { + /* reduce link rate */ + switch (link_rate) { + case DP_LINK_BW_1_62: + lane_count = lane_count / 2; + link_rate = max_link_rate; + if (lane_count == 0) + return -EIO; + break; + case DP_LINK_BW_2_7: + link_rate = DP_LINK_BW_1_62; + break; + case DP_LINK_BW_5_4: + link_rate = DP_LINK_BW_2_7; + break; + case DP_LINK_BW_8_1: + link_rate = DP_LINK_BW_5_4; + break; + default: + return -EINVAL; + } + continue; + } + + ret = mtk_dp_train_eq(mtk_dp, lane_count); + if (ret == -ENODEV) { + return ret; + } else if (ret) { + /* reduce lane count */ + if (lane_count == 0) + return -EIO; + lane_count /= 2; + continue; + } + + /* if we can run to this, training is done. */ + break; + } + + if (train_limit == 0) + return -ETIMEDOUT; + + mtk_dp->train_info.link_rate = link_rate; + mtk_dp->train_info.lane_count = lane_count; + + /* + * After training done, we need to output normal stream instead of TPS, + * so we need to enable scramble. + */ + mtk_dp_training_set_scramble(mtk_dp, true); + mtk_dp_set_enhanced_frame_mode(mtk_dp); + + return 0; +} + +static void mtk_dp_video_enable(struct mtk_dp *mtk_dp, bool enable) +{ + /* the mute sequence is different between enable and disable */ + if (enable) { + mtk_dp_msa_bypass_enable(mtk_dp, false); + mtk_dp_pg_enable(mtk_dp, false); + mtk_dp_set_tx_out(mtk_dp); + mtk_dp_video_mute(mtk_dp, false); + } else { + mtk_dp_video_mute(mtk_dp, true); + mtk_dp_pg_enable(mtk_dp, true); + mtk_dp_msa_bypass_enable(mtk_dp, true); + } +} + +static void mtk_dp_audio_sdp_setup(struct mtk_dp *mtk_dp, + struct mtk_dp_audio_cfg *cfg) +{ + struct dp_sdp sdp; + struct hdmi_audio_infoframe frame; + + hdmi_audio_infoframe_init(&frame); + frame.coding_type = HDMI_AUDIO_CODING_TYPE_PCM; + frame.channels = cfg->channels; + frame.sample_frequency = cfg->sample_rate; + + switch (cfg->word_length_bits) { + case 16: + frame.sample_size = HDMI_AUDIO_SAMPLE_SIZE_16; + break; + case 20: + frame.sample_size = HDMI_AUDIO_SAMPLE_SIZE_20; + break; + case 24: + default: + frame.sample_size = HDMI_AUDIO_SAMPLE_SIZE_24; + break; + } + + hdmi_audio_infoframe_pack_for_dp(&frame, &sdp, MTK_DP_VERSION); + + mtk_dp_audio_sdp_asp_set_channels(mtk_dp, cfg->channels); + mtk_dp_setup_sdp_aui(mtk_dp, &sdp); +} + +static void mtk_dp_audio_setup(struct mtk_dp *mtk_dp, + struct mtk_dp_audio_cfg *cfg) +{ + mtk_dp_audio_sdp_setup(mtk_dp, cfg); + mtk_dp_audio_channel_status_set(mtk_dp, cfg); + + mtk_dp_audio_setup_channels(mtk_dp, cfg); + mtk_dp_audio_set_divider(mtk_dp); +} + +static int mtk_dp_video_config(struct mtk_dp *mtk_dp) +{ + mtk_dp_config_mn_mode(mtk_dp); + mtk_dp_set_msa(mtk_dp); + mtk_dp_set_color_depth(mtk_dp); + return mtk_dp_set_color_format(mtk_dp, mtk_dp->info.format); +} + +static void mtk_dp_init_port(struct mtk_dp *mtk_dp) +{ + mtk_dp_set_idle_pattern(mtk_dp, true); + mtk_dp_initialize_priv_data(mtk_dp); + + mtk_dp_initialize_settings(mtk_dp); + mtk_dp_initialize_aux_settings(mtk_dp); + mtk_dp_initialize_digital_settings(mtk_dp); + mtk_dp_initialize_hpd_detect_settings(mtk_dp); + + mtk_dp_digital_sw_reset(mtk_dp); +} + +static irqreturn_t mtk_dp_hpd_event_thread(int hpd, void *dev) +{ + struct mtk_dp *mtk_dp = dev; + unsigned long flags; + u32 status; + + if (mtk_dp->need_debounce && mtk_dp->train_info.cable_plugged_in) + msleep(100); + + spin_lock_irqsave(&mtk_dp->irq_thread_lock, flags); + status = mtk_dp->irq_thread_handle; + mtk_dp->irq_thread_handle = 0; + spin_unlock_irqrestore(&mtk_dp->irq_thread_lock, flags); + + if (status & MTK_DP_THREAD_CABLE_STATE_CHG) { + if (mtk_dp->bridge.dev) + drm_helper_hpd_irq_event(mtk_dp->bridge.dev); + + if (!mtk_dp->train_info.cable_plugged_in) { + mtk_dp_disable_sdp_aui(mtk_dp); + memset(&mtk_dp->info.audio_cur_cfg, 0, + sizeof(mtk_dp->info.audio_cur_cfg)); + + mtk_dp->need_debounce = false; + mod_timer(&mtk_dp->debounce_timer, + jiffies + msecs_to_jiffies(100) - 1); + } + } + + if (status & MTK_DP_THREAD_HPD_EVENT) + dev_dbg(mtk_dp->dev, "Receive IRQ from sink devices\n"); + + return IRQ_HANDLED; +} + +static irqreturn_t mtk_dp_hpd_event(int hpd, void *dev) +{ + struct mtk_dp *mtk_dp = dev; + bool cable_sta_chg = false; + unsigned long flags; + u32 irq_status = mtk_dp_swirq_get_clear(mtk_dp) | + mtk_dp_hwirq_get_clear(mtk_dp); + + if (!irq_status) + return IRQ_HANDLED; + + spin_lock_irqsave(&mtk_dp->irq_thread_lock, flags); + + if (irq_status & MTK_DP_HPD_INTERRUPT) + mtk_dp->irq_thread_handle |= MTK_DP_THREAD_HPD_EVENT; + + /* Cable state is changed. */ + if (irq_status != MTK_DP_HPD_INTERRUPT) { + mtk_dp->irq_thread_handle |= MTK_DP_THREAD_CABLE_STATE_CHG; + cable_sta_chg = true; + } + + spin_unlock_irqrestore(&mtk_dp->irq_thread_lock, flags); + + if (cable_sta_chg) { + if (!!(mtk_dp_read(mtk_dp, MTK_DP_TRANS_P0_3414) & + HPD_DB_DP_TRANS_P0_MASK)) + mtk_dp->train_info.cable_plugged_in = true; + else + mtk_dp->train_info.cable_plugged_in = false; + } + + return IRQ_WAKE_THREAD; +} + +static int mtk_dp_wait_hpd_asserted(struct drm_dp_aux *mtk_aux, unsigned long wait_us) +{ + struct mtk_dp *mtk_dp = container_of(mtk_aux, struct mtk_dp, aux); + u32 val; + int ret; + + ret = regmap_read_poll_timeout(mtk_dp->regs, MTK_DP_TRANS_P0_3414, + val, !!(val & HPD_DB_DP_TRANS_P0_MASK), + wait_us / 100, wait_us); + if (ret) { + mtk_dp->train_info.cable_plugged_in = false; + return ret; + } + + mtk_dp->train_info.cable_plugged_in = true; + + ret = mtk_dp_parse_capabilities(mtk_dp); + if (ret) { + dev_err(mtk_dp->dev, "Can't parse capabilities: %d\n", ret); + return ret; + } + + return 0; +} + +static int mtk_dp_dt_parse(struct mtk_dp *mtk_dp, + struct platform_device *pdev) +{ + struct device_node *endpoint; + struct device *dev = &pdev->dev; + int ret; + void __iomem *base; + u32 linkrate; + int len; + + base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(base)) + return PTR_ERR(base); + + mtk_dp->regs = devm_regmap_init_mmio(dev, base, &mtk_dp_regmap_config); + if (IS_ERR(mtk_dp->regs)) + return PTR_ERR(mtk_dp->regs); + + endpoint = of_graph_get_endpoint_by_regs(pdev->dev.of_node, 1, -1); + len = of_property_count_elems_of_size(endpoint, + "data-lanes", sizeof(u32)); + of_node_put(endpoint); + if (len < 0 || len > 4 || len == 3) { + dev_err(dev, "invalid data lane size: %d\n", len); + return -EINVAL; + } + + mtk_dp->max_lanes = len; + + ret = device_property_read_u32(dev, "max-linkrate-mhz", &linkrate); + if (ret) { + dev_err(dev, "failed to read max linkrate: %d\n", ret); + return ret; + } + + mtk_dp->max_linkrate = drm_dp_link_rate_to_bw_code(linkrate * 100); + + return 0; +} + +static void mtk_dp_update_plugged_status(struct mtk_dp *mtk_dp) +{ + if (!mtk_dp->data->audio_supported || !mtk_dp->audio_enable) + return; + + mutex_lock(&mtk_dp->update_plugged_status_lock); + if (mtk_dp->plugged_cb && mtk_dp->codec_dev) + mtk_dp->plugged_cb(mtk_dp->codec_dev, + mtk_dp->enabled & + mtk_dp->info.audio_cur_cfg.detect_monitor); + mutex_unlock(&mtk_dp->update_plugged_status_lock); +} + +static enum drm_connector_status +mtk_dp_bdg_detect(struct drm_bridge *bridge, struct drm_connector *connector) +{ + struct mtk_dp *mtk_dp = mtk_dp_from_bridge(bridge); + enum drm_connector_status ret = connector_status_disconnected; + bool enabled = mtk_dp->enabled; + + if (!mtk_dp->train_info.cable_plugged_in) + return ret; + + if (!enabled) + mtk_dp_aux_panel_poweron(mtk_dp, true); + + /* + * Some dongles still source HPD when they do not connect to any + * sink device. To avoid this, we need to read the sink count + * to make sure we do connect to sink devices. After this detect + * function, we just need to check the HPD connection to check + * whether we connect to a sink device. + */ + + if (drm_dp_read_sink_count(&mtk_dp->aux) > 0) + ret = connector_status_connected; + + if (!enabled) + mtk_dp_aux_panel_poweron(mtk_dp, false); + + return ret; +} + +static const struct drm_edid *mtk_dp_edid_read(struct drm_bridge *bridge, + struct drm_connector *connector) +{ + struct mtk_dp *mtk_dp = mtk_dp_from_bridge(bridge); + bool enabled = mtk_dp->enabled; + const struct drm_edid *drm_edid; + struct mtk_dp_audio_cfg *audio_caps = &mtk_dp->info.audio_cur_cfg; + + if (!enabled) { + drm_atomic_bridge_chain_pre_enable(bridge, connector->state->state); + mtk_dp_aux_panel_poweron(mtk_dp, true); + } + + drm_edid = drm_edid_read_ddc(connector, &mtk_dp->aux.ddc); + + /* + * Parse capability here to let atomic_get_input_bus_fmts and + * mode_valid use the capability to calculate sink bitrates. + */ + if (mtk_dp_parse_capabilities(mtk_dp)) { + drm_err(mtk_dp->drm_dev, "Can't parse capabilities\n"); + drm_edid_free(drm_edid); + drm_edid = NULL; + } + + if (drm_edid) { + /* + * FIXME: get rid of drm_edid_raw() + */ + const struct edid *edid = drm_edid_raw(drm_edid); + struct cea_sad *sads; + int ret; + + ret = drm_edid_to_sad(edid, &sads); + /* Ignore any errors */ + if (ret < 0) + ret = 0; + if (ret) + kfree(sads); + audio_caps->sad_count = ret; + + /* + * FIXME: This should use connector->display_info.has_audio from + * a path that has read the EDID and called + * drm_edid_connector_update(). + */ + audio_caps->detect_monitor = drm_detect_monitor_audio(edid); + } + + if (!enabled) { + mtk_dp_aux_panel_poweron(mtk_dp, false); + drm_atomic_bridge_chain_post_disable(bridge, connector->state->state); + } + + return drm_edid; +} + +static ssize_t mtk_dp_aux_transfer(struct drm_dp_aux *mtk_aux, + struct drm_dp_aux_msg *msg) +{ + struct mtk_dp *mtk_dp = container_of(mtk_aux, struct mtk_dp, aux); + bool is_read; + u8 request; + size_t accessed_bytes = 0; + int ret; + + if (mtk_dp->bridge.type != DRM_MODE_CONNECTOR_eDP && + !mtk_dp->train_info.cable_plugged_in) { + ret = -EIO; + goto err; + } + + switch (msg->request) { + case DP_AUX_I2C_MOT: + case DP_AUX_I2C_WRITE: + case DP_AUX_NATIVE_WRITE: + case DP_AUX_I2C_WRITE_STATUS_UPDATE: + case DP_AUX_I2C_WRITE_STATUS_UPDATE | DP_AUX_I2C_MOT: + request = msg->request & ~DP_AUX_I2C_WRITE_STATUS_UPDATE; + is_read = false; + break; + case DP_AUX_I2C_READ: + case DP_AUX_NATIVE_READ: + case DP_AUX_I2C_READ | DP_AUX_I2C_MOT: + request = msg->request; + is_read = true; + break; + default: + dev_err(mtk_dp->dev, "invalid aux cmd = %d\n", + msg->request); + ret = -EINVAL; + goto err; + } + + do { + size_t to_access = min_t(size_t, DP_AUX_MAX_PAYLOAD_BYTES, + msg->size - accessed_bytes); + + ret = mtk_dp_aux_do_transfer(mtk_dp, is_read, request, + msg->address + accessed_bytes, + msg->buffer + accessed_bytes, + to_access, &msg->reply); + + if (ret) { + dev_info(mtk_dp->dev, + "Failed to do AUX transfer: %d\n", ret); + goto err; + } + accessed_bytes += to_access; + } while (accessed_bytes < msg->size); + + return msg->size; +err: + msg->reply = DP_AUX_NATIVE_REPLY_NACK | DP_AUX_I2C_REPLY_NACK; + return ret; +} + +static int mtk_dp_poweron(struct mtk_dp *mtk_dp) +{ + int ret; + + ret = phy_init(mtk_dp->phy); + if (ret) { + dev_err(mtk_dp->dev, "Failed to initialize phy: %d\n", ret); + return ret; + } + + mtk_dp_init_port(mtk_dp); + mtk_dp_power_enable(mtk_dp); + + return 0; +} + +static void mtk_dp_poweroff(struct mtk_dp *mtk_dp) +{ + mtk_dp_power_disable(mtk_dp); + phy_exit(mtk_dp->phy); +} + +static int mtk_dp_bridge_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, + enum drm_bridge_attach_flags flags) +{ + struct mtk_dp *mtk_dp = mtk_dp_from_bridge(bridge); + int ret; + + if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)) { + dev_err(mtk_dp->dev, "Driver does not provide a connector!"); + return -EINVAL; + } + + mtk_dp->aux.drm_dev = bridge->dev; + ret = drm_dp_aux_register(&mtk_dp->aux); + if (ret) { + dev_err(mtk_dp->dev, + "failed to register DP AUX channel: %d\n", ret); + return ret; + } + + ret = mtk_dp_poweron(mtk_dp); + if (ret) + goto err_aux_register; + + if (mtk_dp->next_bridge) { + ret = drm_bridge_attach(encoder, mtk_dp->next_bridge, + &mtk_dp->bridge, flags); + if (ret) { + drm_warn(mtk_dp->drm_dev, + "Failed to attach external bridge: %d\n", ret); + goto err_bridge_attach; + } + } + + mtk_dp->drm_dev = bridge->dev; + + if (mtk_dp->bridge.type != DRM_MODE_CONNECTOR_eDP) { + irq_clear_status_flags(mtk_dp->irq, IRQ_NOAUTOEN); + enable_irq(mtk_dp->irq); + mtk_dp_hwirq_enable(mtk_dp, true); + } + + return 0; + +err_bridge_attach: + mtk_dp_poweroff(mtk_dp); +err_aux_register: + drm_dp_aux_unregister(&mtk_dp->aux); + return ret; +} + +static void mtk_dp_bridge_detach(struct drm_bridge *bridge) +{ + struct mtk_dp *mtk_dp = mtk_dp_from_bridge(bridge); + + if (mtk_dp->bridge.type != DRM_MODE_CONNECTOR_eDP) { + mtk_dp_hwirq_enable(mtk_dp, false); + disable_irq(mtk_dp->irq); + } + mtk_dp->drm_dev = NULL; + mtk_dp_poweroff(mtk_dp); + drm_dp_aux_unregister(&mtk_dp->aux); +} + +static void mtk_dp_bridge_atomic_enable(struct drm_bridge *bridge, + struct drm_atomic_state *state) +{ + struct mtk_dp *mtk_dp = mtk_dp_from_bridge(bridge); + int ret; + + mtk_dp->conn = drm_atomic_get_new_connector_for_encoder(state, + bridge->encoder); + if (!mtk_dp->conn) { + drm_err(mtk_dp->drm_dev, + "Can't enable bridge as connector is missing\n"); + return; + } + + mtk_dp_aux_panel_poweron(mtk_dp, true); + + /* Training */ + ret = mtk_dp_training(mtk_dp); + if (ret) { + drm_err(mtk_dp->drm_dev, "Training failed, %d\n", ret); + goto power_off_aux; + } + + ret = mtk_dp_video_config(mtk_dp); + if (ret) + goto power_off_aux; + + mtk_dp_video_enable(mtk_dp, true); + + mtk_dp->audio_enable = + mtk_dp_edid_parse_audio_capabilities(mtk_dp, + &mtk_dp->info.audio_cur_cfg); + if (mtk_dp->audio_enable) { + mtk_dp_audio_setup(mtk_dp, &mtk_dp->info.audio_cur_cfg); + mtk_dp_audio_mute(mtk_dp, false); + } else { + memset(&mtk_dp->info.audio_cur_cfg, 0, + sizeof(mtk_dp->info.audio_cur_cfg)); + } + + mtk_dp->enabled = true; + mtk_dp_update_plugged_status(mtk_dp); + + return; +power_off_aux: + mtk_dp_update_bits(mtk_dp, MTK_DP_TOP_PWR_STATE, + DP_PWR_STATE_BANDGAP_TPLL, + DP_PWR_STATE_MASK); +} + +static void mtk_dp_bridge_atomic_disable(struct drm_bridge *bridge, + struct drm_atomic_state *state) +{ + struct mtk_dp *mtk_dp = mtk_dp_from_bridge(bridge); + + mtk_dp->enabled = false; + mtk_dp_update_plugged_status(mtk_dp); + mtk_dp_video_enable(mtk_dp, false); + mtk_dp_audio_mute(mtk_dp, true); + + if (mtk_dp->train_info.cable_plugged_in) { + drm_dp_dpcd_writeb(&mtk_dp->aux, DP_SET_POWER, DP_SET_POWER_D3); + usleep_range(2000, 3000); + } + + /* power off aux */ + mtk_dp_update_bits(mtk_dp, MTK_DP_TOP_PWR_STATE, + DP_PWR_STATE_BANDGAP_TPLL, + DP_PWR_STATE_MASK); + + /* SDP path reset sw*/ + mtk_dp_sdp_path_reset(mtk_dp); + + /* Ensure the sink is muted */ + msleep(20); +} + +static enum drm_mode_status +mtk_dp_bridge_mode_valid(struct drm_bridge *bridge, + const struct drm_display_info *info, + const struct drm_display_mode *mode) +{ + struct mtk_dp *mtk_dp = mtk_dp_from_bridge(bridge); + u32 bpp = info->color_formats & DRM_COLOR_FORMAT_YCBCR422 ? 16 : 24; + u32 lane_count_min = mtk_dp->train_info.lane_count; + u32 rate = drm_dp_bw_code_to_link_rate(mtk_dp->train_info.link_rate) * + lane_count_min; + + /* + *FEC overhead is approximately 2.4% from DP 1.4a spec 2.2.1.4.2. + *The down-spread amplitude shall either be disabled (0.0%) or up + *to 0.5% from 1.4a 3.5.2.6. Add up to approximately 3% total overhead. + * + *Because rate is already divided by 10, + *mode->clock does not need to be multiplied by 10 + */ + if ((rate * 97 / 100) < (mode->clock * bpp / 8)) + return MODE_CLOCK_HIGH; + + return MODE_OK; +} + +static u32 *mtk_dp_bridge_atomic_get_output_bus_fmts(struct drm_bridge *bridge, + struct drm_bridge_state *bridge_state, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state, + unsigned int *num_output_fmts) +{ + u32 *output_fmts; + + *num_output_fmts = 0; + output_fmts = kmalloc(sizeof(*output_fmts), GFP_KERNEL); + if (!output_fmts) + return NULL; + *num_output_fmts = 1; + output_fmts[0] = MEDIA_BUS_FMT_FIXED; + return output_fmts; +} + +static const u32 mt8195_input_fmts[] = { + MEDIA_BUS_FMT_RGB888_1X24, + MEDIA_BUS_FMT_YUV8_1X24, + MEDIA_BUS_FMT_YUYV8_1X16, +}; + +static u32 *mtk_dp_bridge_atomic_get_input_bus_fmts(struct drm_bridge *bridge, + struct drm_bridge_state *bridge_state, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state, + u32 output_fmt, + unsigned int *num_input_fmts) +{ + u32 *input_fmts; + struct mtk_dp *mtk_dp = mtk_dp_from_bridge(bridge); + struct drm_display_mode *mode = &crtc_state->adjusted_mode; + struct drm_display_info *display_info = + &conn_state->connector->display_info; + u32 lane_count_min = mtk_dp->train_info.lane_count; + u32 rate = drm_dp_bw_code_to_link_rate(mtk_dp->train_info.link_rate) * + lane_count_min; + + *num_input_fmts = 0; + + /* + * If the linkrate is smaller than datarate of RGB888, larger than + * datarate of YUV422 and sink device supports YUV422, we output YUV422 + * format. Use this condition, we can support more resolution. + */ + if (((rate * 97 / 100) < (mode->clock * 24 / 8)) && + ((rate * 97 / 100) > (mode->clock * 16 / 8)) && + (display_info->color_formats & DRM_COLOR_FORMAT_YCBCR422)) { + input_fmts = kcalloc(1, sizeof(*input_fmts), GFP_KERNEL); + if (!input_fmts) + return NULL; + *num_input_fmts = 1; + input_fmts[0] = MEDIA_BUS_FMT_YUYV8_1X16; + } else { + input_fmts = kcalloc(ARRAY_SIZE(mt8195_input_fmts), + sizeof(*input_fmts), + GFP_KERNEL); + if (!input_fmts) + return NULL; + + *num_input_fmts = ARRAY_SIZE(mt8195_input_fmts); + memcpy(input_fmts, mt8195_input_fmts, sizeof(mt8195_input_fmts)); + } + + return input_fmts; +} + +static int mtk_dp_bridge_atomic_check(struct drm_bridge *bridge, + struct drm_bridge_state *bridge_state, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state) +{ + struct mtk_dp *mtk_dp = mtk_dp_from_bridge(bridge); + struct drm_crtc *crtc = conn_state->crtc; + unsigned int input_bus_format; + + input_bus_format = bridge_state->input_bus_cfg.format; + + dev_dbg(mtk_dp->dev, "input format 0x%04x, output format 0x%04x\n", + bridge_state->input_bus_cfg.format, + bridge_state->output_bus_cfg.format); + + if (input_bus_format == MEDIA_BUS_FMT_YUYV8_1X16) + mtk_dp->info.format = DP_PIXELFORMAT_YUV422; + else + mtk_dp->info.format = DP_PIXELFORMAT_RGB; + + if (!crtc) { + drm_err(mtk_dp->drm_dev, + "Can't enable bridge as connector state doesn't have a crtc\n"); + return -EINVAL; + } + + drm_display_mode_to_videomode(&crtc_state->adjusted_mode, &mtk_dp->info.vm); + + return 0; +} + +static const struct drm_bridge_funcs mtk_dp_bridge_funcs = { + .atomic_check = mtk_dp_bridge_atomic_check, + .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state, + .atomic_get_output_bus_fmts = mtk_dp_bridge_atomic_get_output_bus_fmts, + .atomic_get_input_bus_fmts = mtk_dp_bridge_atomic_get_input_bus_fmts, + .atomic_reset = drm_atomic_helper_bridge_reset, + .attach = mtk_dp_bridge_attach, + .detach = mtk_dp_bridge_detach, + .atomic_enable = mtk_dp_bridge_atomic_enable, + .atomic_disable = mtk_dp_bridge_atomic_disable, + .mode_valid = mtk_dp_bridge_mode_valid, + .edid_read = mtk_dp_edid_read, + .detect = mtk_dp_bdg_detect, +}; + +static void mtk_dp_debounce_timer(struct timer_list *t) +{ + struct mtk_dp *mtk_dp = timer_container_of(mtk_dp, t, debounce_timer); + + mtk_dp->need_debounce = true; +} + +/* + * HDMI audio codec callbacks + */ +static int mtk_dp_audio_hw_params(struct device *dev, void *data, + struct hdmi_codec_daifmt *daifmt, + struct hdmi_codec_params *params) +{ + struct mtk_dp *mtk_dp = dev_get_drvdata(dev); + + if (!mtk_dp->enabled) { + dev_err(mtk_dp->dev, "%s, DP is not ready!\n", __func__); + return -ENODEV; + } + + mtk_dp->info.audio_cur_cfg.channels = params->cea.channels; + mtk_dp->info.audio_cur_cfg.sample_rate = params->sample_rate; + + mtk_dp_audio_setup(mtk_dp, &mtk_dp->info.audio_cur_cfg); + + return 0; +} + +static int mtk_dp_audio_startup(struct device *dev, void *data) +{ + struct mtk_dp *mtk_dp = dev_get_drvdata(dev); + + mtk_dp_audio_mute(mtk_dp, false); + + return 0; +} + +static void mtk_dp_audio_shutdown(struct device *dev, void *data) +{ + struct mtk_dp *mtk_dp = dev_get_drvdata(dev); + + mtk_dp_audio_mute(mtk_dp, true); +} + +static int mtk_dp_audio_get_eld(struct device *dev, void *data, uint8_t *buf, + size_t len) +{ + struct mtk_dp *mtk_dp = dev_get_drvdata(dev); + + if (mtk_dp->enabled) + memcpy(buf, mtk_dp->conn->eld, len); + else + memset(buf, 0, len); + + return 0; +} + +static int mtk_dp_audio_hook_plugged_cb(struct device *dev, void *data, + hdmi_codec_plugged_cb fn, + struct device *codec_dev) +{ + struct mtk_dp *mtk_dp = data; + + mutex_lock(&mtk_dp->update_plugged_status_lock); + mtk_dp->plugged_cb = fn; + mtk_dp->codec_dev = codec_dev; + mutex_unlock(&mtk_dp->update_plugged_status_lock); + + mtk_dp_update_plugged_status(mtk_dp); + + return 0; +} + +static const struct hdmi_codec_ops mtk_dp_audio_codec_ops = { + .hw_params = mtk_dp_audio_hw_params, + .audio_startup = mtk_dp_audio_startup, + .audio_shutdown = mtk_dp_audio_shutdown, + .get_eld = mtk_dp_audio_get_eld, + .hook_plugged_cb = mtk_dp_audio_hook_plugged_cb, +}; + +static int mtk_dp_register_audio_driver(struct device *dev) +{ + struct mtk_dp *mtk_dp = dev_get_drvdata(dev); + struct hdmi_codec_pdata codec_data = { + .ops = &mtk_dp_audio_codec_ops, + .max_i2s_channels = 8, + .i2s = 1, + .data = mtk_dp, + .no_capture_mute = 1, + }; + + mtk_dp->audio_pdev = platform_device_register_data(dev, + HDMI_CODEC_DRV_NAME, + PLATFORM_DEVID_AUTO, + &codec_data, + sizeof(codec_data)); + return PTR_ERR_OR_ZERO(mtk_dp->audio_pdev); +} + +static int mtk_dp_register_phy(struct mtk_dp *mtk_dp) +{ + struct device *dev = mtk_dp->dev; + + mtk_dp->phy_dev = platform_device_register_data(dev, "mediatek-dp-phy", + PLATFORM_DEVID_AUTO, + &mtk_dp->regs, + sizeof(struct regmap *)); + if (IS_ERR(mtk_dp->phy_dev)) + return dev_err_probe(dev, PTR_ERR(mtk_dp->phy_dev), + "Failed to create device mediatek-dp-phy\n"); + + mtk_dp_get_calibration_data(mtk_dp); + + mtk_dp->phy = devm_phy_get(&mtk_dp->phy_dev->dev, "dp"); + if (IS_ERR(mtk_dp->phy)) { + platform_device_unregister(mtk_dp->phy_dev); + return dev_err_probe(dev, PTR_ERR(mtk_dp->phy), "Failed to get phy\n"); + } + + return 0; +} + +static int mtk_dp_edp_link_panel(struct drm_dp_aux *mtk_aux) +{ + struct mtk_dp *mtk_dp = container_of(mtk_aux, struct mtk_dp, aux); + struct device *dev = mtk_aux->dev; + int ret; + + mtk_dp->next_bridge = devm_drm_of_get_bridge(dev, dev->of_node, 1, 0); + + /* Power off the DP and AUX: either detection is done, or no panel present */ + mtk_dp_update_bits(mtk_dp, MTK_DP_TOP_PWR_STATE, + DP_PWR_STATE_BANDGAP_TPLL, + DP_PWR_STATE_MASK); + mtk_dp_power_disable(mtk_dp); + + if (IS_ERR(mtk_dp->next_bridge)) { + ret = PTR_ERR(mtk_dp->next_bridge); + mtk_dp->next_bridge = NULL; + return ret; + } + + /* For eDP, we add the bridge only if the panel was found */ + ret = devm_drm_bridge_add(dev, &mtk_dp->bridge); + if (ret) + return ret; + + return 0; +} + +static int mtk_dp_probe(struct platform_device *pdev) +{ + struct mtk_dp *mtk_dp; + struct device *dev = &pdev->dev; + int ret; + + mtk_dp = devm_drm_bridge_alloc(dev, struct mtk_dp, bridge, + &mtk_dp_bridge_funcs); + if (IS_ERR(mtk_dp)) + return PTR_ERR(mtk_dp); + + mtk_dp->dev = dev; + mtk_dp->data = (struct mtk_dp_data *)of_device_get_match_data(dev); + + ret = mtk_dp_dt_parse(mtk_dp, pdev); + if (ret) + return dev_err_probe(dev, ret, "Failed to parse dt\n"); + + /* + * Request the interrupt and install service routine only if we are + * on full DisplayPort. + * For eDP, polling the HPD instead is more convenient because we + * don't expect any (un)plug events during runtime, hence we can + * avoid some locking. + */ + if (mtk_dp->data->bridge_type != DRM_MODE_CONNECTOR_eDP) { + mtk_dp->irq = platform_get_irq(pdev, 0); + if (mtk_dp->irq < 0) + return dev_err_probe(dev, mtk_dp->irq, + "failed to request dp irq resource\n"); + + spin_lock_init(&mtk_dp->irq_thread_lock); + + irq_set_status_flags(mtk_dp->irq, IRQ_NOAUTOEN); + ret = devm_request_threaded_irq(dev, mtk_dp->irq, mtk_dp_hpd_event, + mtk_dp_hpd_event_thread, + IRQ_TYPE_LEVEL_HIGH, dev_name(dev), + mtk_dp); + if (ret) + return dev_err_probe(dev, ret, + "failed to request mediatek dptx irq\n"); + + mtk_dp->need_debounce = true; + timer_setup(&mtk_dp->debounce_timer, mtk_dp_debounce_timer, 0); + } + + mtk_dp->aux.name = "aux_mtk_dp"; + mtk_dp->aux.dev = dev; + mtk_dp->aux.transfer = mtk_dp_aux_transfer; + mtk_dp->aux.wait_hpd_asserted = mtk_dp_wait_hpd_asserted; + drm_dp_aux_init(&mtk_dp->aux); + + platform_set_drvdata(pdev, mtk_dp); + + if (mtk_dp->data->audio_supported) { + mutex_init(&mtk_dp->update_plugged_status_lock); + + ret = mtk_dp_register_audio_driver(dev); + if (ret) + return dev_err_probe(dev, ret, + "Failed to register audio driver\n"); + } + + ret = mtk_dp_register_phy(mtk_dp); + if (ret) + return ret; + + mtk_dp->bridge.of_node = dev->of_node; + mtk_dp->bridge.type = mtk_dp->data->bridge_type; + + if (mtk_dp->bridge.type == DRM_MODE_CONNECTOR_eDP) { + /* + * Set the data lanes to idle in case the bootloader didn't + * properly close the eDP port to avoid stalls and then + * reinitialize, reset and power on the AUX block. + */ + mtk_dp_set_idle_pattern(mtk_dp, true); + mtk_dp_initialize_aux_settings(mtk_dp); + mtk_dp_power_enable(mtk_dp); + + /* Disable HW interrupts: we don't need any for eDP */ + mtk_dp_hwirq_enable(mtk_dp, false); + + /* + * Power on the AUX to allow reading the EDID from aux-bus: + * please note that it is necessary to call power off in the + * .done_probing() callback (mtk_dp_edp_link_panel), as only + * there we can safely assume that we finished reading EDID. + */ + mtk_dp_update_bits(mtk_dp, MTK_DP_TOP_PWR_STATE, + DP_PWR_STATE_BANDGAP_TPLL_LANE, + DP_PWR_STATE_MASK); + + ret = devm_of_dp_aux_populate_bus(&mtk_dp->aux, mtk_dp_edp_link_panel); + if (ret) { + /* -ENODEV this means that the panel is not on the aux-bus */ + if (ret == -ENODEV) { + ret = mtk_dp_edp_link_panel(&mtk_dp->aux); + if (ret) + return ret; + } else { + mtk_dp_update_bits(mtk_dp, MTK_DP_TOP_PWR_STATE, + DP_PWR_STATE_BANDGAP_TPLL, + DP_PWR_STATE_MASK); + mtk_dp_power_disable(mtk_dp); + return ret; + } + } + } else { + mtk_dp->bridge.ops = DRM_BRIDGE_OP_DETECT | + DRM_BRIDGE_OP_EDID | DRM_BRIDGE_OP_HPD; + ret = devm_drm_bridge_add(dev, &mtk_dp->bridge); + if (ret) + return dev_err_probe(dev, ret, "Failed to add bridge\n"); + } + + pm_runtime_enable(dev); + pm_runtime_get_sync(dev); + + return 0; +} + +static void mtk_dp_remove(struct platform_device *pdev) +{ + struct mtk_dp *mtk_dp = platform_get_drvdata(pdev); + + pm_runtime_put(&pdev->dev); + pm_runtime_disable(&pdev->dev); + if (mtk_dp->data->bridge_type != DRM_MODE_CONNECTOR_eDP) + timer_delete_sync(&mtk_dp->debounce_timer); + platform_device_unregister(mtk_dp->phy_dev); + if (mtk_dp->audio_pdev) + platform_device_unregister(mtk_dp->audio_pdev); +} + +#ifdef CONFIG_PM_SLEEP +static int mtk_dp_suspend(struct device *dev) +{ + struct mtk_dp *mtk_dp = dev_get_drvdata(dev); + + mtk_dp_power_disable(mtk_dp); + if (mtk_dp->bridge.type != DRM_MODE_CONNECTOR_eDP) + mtk_dp_hwirq_enable(mtk_dp, false); + pm_runtime_put_sync(dev); + + return 0; +} + +static int mtk_dp_resume(struct device *dev) +{ + struct mtk_dp *mtk_dp = dev_get_drvdata(dev); + + pm_runtime_get_sync(dev); + mtk_dp_init_port(mtk_dp); + if (mtk_dp->bridge.type != DRM_MODE_CONNECTOR_eDP) + mtk_dp_hwirq_enable(mtk_dp, true); + mtk_dp_power_enable(mtk_dp); + + return 0; +} +#endif + +static SIMPLE_DEV_PM_OPS(mtk_dp_pm_ops, mtk_dp_suspend, mtk_dp_resume); + +static const struct mtk_dp_data mt8188_dp_data = { + .bridge_type = DRM_MODE_CONNECTOR_DisplayPort, + .smc_cmd = MTK_DP_SIP_ATF_VIDEO_UNMUTE, + .efuse_fmt = mt8188_dp_efuse_fmt, + .audio_supported = true, + .audio_pkt_in_hblank_area = true, + .audio_m_div2_bit = MT8188_AUDIO_M_CODE_MULT_DIV_SEL_DP_ENC0_P0_DIV_2, +}; + +static const struct mtk_dp_data mt8195_edp_data = { + .bridge_type = DRM_MODE_CONNECTOR_eDP, + .smc_cmd = MTK_DP_SIP_ATF_EDP_VIDEO_UNMUTE, + .efuse_fmt = mt8195_edp_efuse_fmt, + .audio_supported = false, + .audio_m_div2_bit = MT8195_AUDIO_M_CODE_MULT_DIV_SEL_DP_ENC0_P0_DIV_2, +}; + +static const struct mtk_dp_data mt8195_dp_data = { + .bridge_type = DRM_MODE_CONNECTOR_DisplayPort, + .smc_cmd = MTK_DP_SIP_ATF_VIDEO_UNMUTE, + .efuse_fmt = mt8195_dp_efuse_fmt, + .audio_supported = true, + .audio_m_div2_bit = MT8195_AUDIO_M_CODE_MULT_DIV_SEL_DP_ENC0_P0_DIV_2, +}; + +static const struct of_device_id mtk_dp_of_match[] = { + { + .compatible = "mediatek,mt8188-edp-tx", + .data = &mt8195_edp_data, + }, + { + .compatible = "mediatek,mt8188-dp-tx", + .data = &mt8188_dp_data, + }, + { + .compatible = "mediatek,mt8195-edp-tx", + .data = &mt8195_edp_data, + }, + { + .compatible = "mediatek,mt8195-dp-tx", + .data = &mt8195_dp_data, + }, + {}, +}; +MODULE_DEVICE_TABLE(of, mtk_dp_of_match); + +static struct platform_driver mtk_dp_driver = { + .probe = mtk_dp_probe, + .remove = mtk_dp_remove, + .driver = { + .name = "mediatek-drm-dp", + .of_match_table = mtk_dp_of_match, + .pm = &mtk_dp_pm_ops, + }, +}; + +module_platform_driver(mtk_dp_driver); + +MODULE_AUTHOR("Jitao Shi <jitao.shi@mediatek.com>"); +MODULE_AUTHOR("Markus Schneider-Pargmann <msp@baylibre.com>"); +MODULE_AUTHOR("Bo-Chen Chen <rex-bc.chen@mediatek.com>"); +MODULE_DESCRIPTION("MediaTek DisplayPort Driver"); +MODULE_LICENSE("GPL"); +MODULE_SOFTDEP("pre: phy_mtk_dp"); diff --git a/drivers/gpu/drm/mediatek/mtk_dp_reg.h b/drivers/gpu/drm/mediatek/mtk_dp_reg.h new file mode 100644 index 000000000000..8ad7a9cc259e --- /dev/null +++ b/drivers/gpu/drm/mediatek/mtk_dp_reg.h @@ -0,0 +1,362 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2019-2022 MediaTek Inc. + * Copyright (c) 2022 BayLibre + */ +#ifndef _MTK_DP_REG_H_ +#define _MTK_DP_REG_H_ + +#define SEC_OFFSET 0x4000 + +#define MTK_DP_HPD_DISCONNECT BIT(1) +#define MTK_DP_HPD_CONNECT BIT(2) +#define MTK_DP_HPD_INTERRUPT BIT(3) + +/* offset: 0x0 */ +#define DP_PHY_GLB_BIAS_GEN_00 0x0 +#define RG_XTP_GLB_BIAS_INTR_CTRL GENMASK(20, 16) +#define DP_PHY_GLB_DPAUX_TX 0x8 +#define RG_CKM_PT0_CKTX_IMPSEL GENMASK(23, 20) +#define MTK_DP_0034 0x34 +#define DA_XTP_GLB_CKDET_EN_FORCE_VAL BIT(15) +#define DA_XTP_GLB_CKDET_EN_FORCE_EN BIT(14) +#define DA_CKM_INTCKTX_EN_FORCE_VAL BIT(13) +#define DA_CKM_INTCKTX_EN_FORCE_EN BIT(12) +#define DA_CKM_CKTX0_EN_FORCE_VAL BIT(11) +#define DA_CKM_CKTX0_EN_FORCE_EN BIT(10) +#define DA_CKM_XTAL_CK_FORCE_VAL BIT(9) +#define DA_CKM_XTAL_CK_FORCE_EN BIT(8) +#define DA_CKM_BIAS_LPF_EN_FORCE_VAL BIT(7) +#define DA_CKM_BIAS_LPF_EN_FORCE_EN BIT(6) +#define DA_CKM_BIAS_EN_FORCE_VAL BIT(5) +#define DA_CKM_BIAS_EN_FORCE_EN BIT(4) +#define DA_XTP_GLB_AVD10_ON_FORCE_VAL BIT(3) +#define DA_XTP_GLB_AVD10_ON_FORCE BIT(2) +#define DA_XTP_GLB_LDO_EN_FORCE_VAL BIT(1) +#define DA_XTP_GLB_LDO_EN_FORCE_EN BIT(0) +#define DP_PHY_LANE_TX_0 0x104 +#define RG_XTP_LN0_TX_IMPSEL_PMOS GENMASK(15, 12) +#define RG_XTP_LN0_TX_IMPSEL_NMOS GENMASK(19, 16) +#define DP_PHY_LANE_TX_1 0x204 +#define RG_XTP_LN1_TX_IMPSEL_PMOS GENMASK(15, 12) +#define RG_XTP_LN1_TX_IMPSEL_NMOS GENMASK(19, 16) +#define DP_PHY_LANE_TX_2 0x304 +#define RG_XTP_LN2_TX_IMPSEL_PMOS GENMASK(15, 12) +#define RG_XTP_LN2_TX_IMPSEL_NMOS GENMASK(19, 16) +#define DP_PHY_LANE_TX_3 0x404 +#define RG_XTP_LN3_TX_IMPSEL_PMOS GENMASK(15, 12) +#define RG_XTP_LN3_TX_IMPSEL_NMOS GENMASK(19, 16) +#define MTK_DP_1040 0x1040 +#define RG_DPAUX_RX_VALID_DEGLITCH_EN BIT(2) +#define RG_XTP_GLB_CKDET_EN BIT(1) +#define RG_DPAUX_RX_EN BIT(0) + +/* offset: TOP_OFFSET (0x2000) */ +#define MTK_DP_TOP_PWR_STATE 0x2000 +#define DP_PWR_STATE_MASK GENMASK(1, 0) +#define DP_PWR_STATE_BANDGAP BIT(0) +#define DP_PWR_STATE_BANDGAP_TPLL BIT(1) +#define DP_PWR_STATE_BANDGAP_TPLL_LANE GENMASK(1, 0) +#define MTK_DP_TOP_SWING_EMP 0x2004 +#define DP_TX0_VOLT_SWING_MASK GENMASK(1, 0) +#define DP_TX0_VOLT_SWING_SHIFT 0 +#define DP_TX0_PRE_EMPH_MASK GENMASK(3, 2) +#define DP_TX0_PRE_EMPH_SHIFT 2 +#define DP_TX1_VOLT_SWING_MASK GENMASK(9, 8) +#define DP_TX1_VOLT_SWING_SHIFT 8 +#define DP_TX1_PRE_EMPH_MASK GENMASK(11, 10) +#define DP_TX2_VOLT_SWING_MASK GENMASK(17, 16) +#define DP_TX2_PRE_EMPH_MASK GENMASK(19, 18) +#define DP_TX3_VOLT_SWING_MASK GENMASK(25, 24) +#define DP_TX3_PRE_EMPH_MASK GENMASK(27, 26) +#define MTK_DP_TOP_RESET_AND_PROBE 0x2020 +#define SW_RST_B_PHYD BIT(4) +#define MTK_DP_TOP_IRQ_MASK 0x202c +#define IRQ_MASK_AUX_TOP_IRQ BIT(2) +#define MTK_DP_TOP_MEM_PD 0x2038 +#define MEM_ISO_EN BIT(0) +#define FUSE_SEL BIT(2) + +/* offset: ENC0_OFFSET (0x3000) */ +#define MTK_DP_ENC0_P0_3000 0x3000 +#define LANE_NUM_DP_ENC0_P0_MASK GENMASK(1, 0) +#define VIDEO_MUTE_SW_DP_ENC0_P0 BIT(2) +#define VIDEO_MUTE_SEL_DP_ENC0_P0 BIT(3) +#define ENHANCED_FRAME_EN_DP_ENC0_P0 BIT(4) +#define MTK_DP_ENC0_P0_3004 0x3004 +#define VIDEO_M_CODE_SEL_DP_ENC0_P0_MASK BIT(8) +#define DP_TX_ENCODER_4P_RESET_SW_DP_ENC0_P0 BIT(9) +#define SDP_RESET_SW_DP_ENC0_P0 BIT(13) +#define MTK_DP_ENC0_P0_3010 0x3010 +#define HTOTAL_SW_DP_ENC0_P0_MASK GENMASK(15, 0) +#define MTK_DP_ENC0_P0_3014 0x3014 +#define VTOTAL_SW_DP_ENC0_P0_MASK GENMASK(15, 0) +#define MTK_DP_ENC0_P0_3018 0x3018 +#define HSTART_SW_DP_ENC0_P0_MASK GENMASK(15, 0) +#define MTK_DP_ENC0_P0_301C 0x301c +#define VSTART_SW_DP_ENC0_P0_MASK GENMASK(15, 0) +#define MTK_DP_ENC0_P0_3020 0x3020 +#define HWIDTH_SW_DP_ENC0_P0_MASK GENMASK(15, 0) +#define MTK_DP_ENC0_P0_3024 0x3024 +#define VHEIGHT_SW_DP_ENC0_P0_MASK GENMASK(15, 0) +#define MTK_DP_ENC0_P0_3028 0x3028 +#define HSW_SW_DP_ENC0_P0_MASK GENMASK(14, 0) +#define HSP_SW_DP_ENC0_P0_MASK BIT(15) +#define MTK_DP_ENC0_P0_302C 0x302c +#define VSW_SW_DP_ENC0_P0_MASK GENMASK(14, 0) +#define VSP_SW_DP_ENC0_P0_MASK BIT(15) +#define MTK_DP_ENC0_P0_3030 0x3030 +#define HTOTAL_SEL_DP_ENC0_P0 BIT(0) +#define VTOTAL_SEL_DP_ENC0_P0 BIT(1) +#define HSTART_SEL_DP_ENC0_P0 BIT(2) +#define VSTART_SEL_DP_ENC0_P0 BIT(3) +#define HWIDTH_SEL_DP_ENC0_P0 BIT(4) +#define VHEIGHT_SEL_DP_ENC0_P0 BIT(5) +#define HSP_SEL_DP_ENC0_P0 BIT(6) +#define HSW_SEL_DP_ENC0_P0 BIT(7) +#define VSP_SEL_DP_ENC0_P0 BIT(8) +#define VSW_SEL_DP_ENC0_P0 BIT(9) +#define VBID_AUDIO_MUTE_FLAG_SW_DP_ENC0_P0 BIT(11) +#define VBID_AUDIO_MUTE_FLAG_SEL_DP_ENC0_P0 BIT(12) +#define MTK_DP_ENC0_P0_3034 0x3034 +#define MTK_DP_ENC0_P0_3038 0x3038 +#define VIDEO_SOURCE_SEL_DP_ENC0_P0_MASK BIT(11) +#define MTK_DP_ENC0_P0_303C 0x303c +#define SRAM_START_READ_THRD_DP_ENC0_P0_MASK GENMASK(5, 0) +#define VIDEO_COLOR_DEPTH_DP_ENC0_P0_MASK GENMASK(10, 8) +#define VIDEO_COLOR_DEPTH_DP_ENC0_P0_16BIT (0 << 8) +#define VIDEO_COLOR_DEPTH_DP_ENC0_P0_12BIT (1 << 8) +#define VIDEO_COLOR_DEPTH_DP_ENC0_P0_10BIT (2 << 8) +#define VIDEO_COLOR_DEPTH_DP_ENC0_P0_8BIT (3 << 8) +#define VIDEO_COLOR_DEPTH_DP_ENC0_P0_6BIT (4 << 8) +#define PIXEL_ENCODE_FORMAT_DP_ENC0_P0_MASK GENMASK(14, 12) +#define PIXEL_ENCODE_FORMAT_DP_ENC0_P0_RGB (0 << 12) +#define PIXEL_ENCODE_FORMAT_DP_ENC0_P0_YCBCR422 (1 << 12) +#define PIXEL_ENCODE_FORMAT_DP_ENC0_P0_YCBCR420 (2 << 12) +#define VIDEO_MN_GEN_EN_DP_ENC0_P0 BIT(15) +#define MTK_DP_ENC0_P0_3040 0x3040 +#define SDP_DOWN_CNT_DP_ENC0_P0_VAL 0x20 +#define SDP_DOWN_CNT_INIT_DP_ENC0_P0_MASK GENMASK(11, 0) +#define MTK_DP_ENC0_P0_304C 0x304c +#define VBID_VIDEO_MUTE_DP_ENC0_P0_MASK BIT(2) +#define SDP_VSYNC_RISING_MASK_DP_ENC0_P0_MASK BIT(8) +#define MTK_DP_ENC0_P0_3064 0x3064 +#define HDE_NUM_LAST_DP_ENC0_P0_MASK GENMASK(15, 0) +#define MTK_DP_ENC0_P0_3088 0x3088 +#define AU_EN_DP_ENC0_P0 BIT(6) +#define AUDIO_8CH_EN_DP_ENC0_P0_MASK BIT(7) +#define AUDIO_8CH_SEL_DP_ENC0_P0_MASK BIT(8) +#define AUDIO_2CH_EN_DP_ENC0_P0_MASK BIT(14) +#define AUDIO_2CH_SEL_DP_ENC0_P0_MASK BIT(15) +#define MTK_DP_ENC0_P0_308C 0x308c +#define CH_STATUS_0_DP_ENC0_P0_MASK GENMASK(15, 0) +#define MTK_DP_ENC0_P0_3090 0x3090 +#define CH_STATUS_1_DP_ENC0_P0_MASK GENMASK(15, 0) +#define MTK_DP_ENC0_P0_3094 0x3094 +#define CH_STATUS_2_DP_ENC0_P0_MASK GENMASK(7, 0) +#define MTK_DP_ENC0_P0_30A4 0x30a4 +#define AU_TS_CFG_DP_ENC0_P0_MASK GENMASK(7, 0) +#define MTK_DP_ENC0_P0_30A8 0x30a8 +#define MTK_DP_ENC0_P0_30BC 0x30bc +#define ISRC_CONT_DP_ENC0_P0 BIT(0) +#define AUDIO_M_CODE_MULT_DIV_SEL_DP_ENC0_P0_MASK GENMASK(10, 8) +#define MT8195_AUDIO_M_CODE_MULT_DIV_SEL_DP_ENC0_P0_MUL_2 (1 << 8) +#define MT8195_AUDIO_M_CODE_MULT_DIV_SEL_DP_ENC0_P0_MUL_4 (2 << 8) +#define MT8195_AUDIO_M_CODE_MULT_DIV_SEL_DP_ENC0_P0_MUL_8 (3 << 8) +#define MT8195_AUDIO_M_CODE_MULT_DIV_SEL_DP_ENC0_P0_DIV_2 (5 << 8) +#define MT8195_AUDIO_M_CODE_MULT_DIV_SEL_DP_ENC0_P0_DIV_4 (6 << 8) +#define MT8195_AUDIO_M_CODE_MULT_DIV_SEL_DP_ENC0_P0_DIV_8 (7 << 8) +#define MT8188_AUDIO_M_CODE_MULT_DIV_SEL_DP_ENC0_P0_MUL_2 (1 << 8) +#define MT8188_AUDIO_M_CODE_MULT_DIV_SEL_DP_ENC0_P0_MUL_4 (2 << 8) +#define MT8188_AUDIO_M_CODE_MULT_DIV_SEL_DP_ENC0_P0_MUL_8 (3 << 8) +#define MT8188_AUDIO_M_CODE_MULT_DIV_SEL_DP_ENC0_P0_DIV_2 (4 << 8) +#define MT8188_AUDIO_M_CODE_MULT_DIV_SEL_DP_ENC0_P0_DIV_4 (5 << 8) +#define MT8188_AUDIO_M_CODE_MULT_DIV_SEL_DP_ENC0_P0_DIV_8 (7 << 8) +#define MTK_DP_ENC0_P0_30D8 0x30d8 +#define MTK_DP_ENC0_P0_312C 0x312c +#define ASP_HB2_DP_ENC0_P0_MASK GENMASK(7, 0) +#define ASP_HB3_DP_ENC0_P0_MASK GENMASK(15, 8) +#define MTK_DP_ENC0_P0_3154 0x3154 +#define PGEN_HTOTAL_DP_ENC0_P0_MASK GENMASK(13, 0) +#define MTK_DP_ENC0_P0_3158 0x3158 +#define PGEN_HSYNC_RISING_DP_ENC0_P0_MASK GENMASK(13, 0) +#define MTK_DP_ENC0_P0_315C 0x315c +#define PGEN_HSYNC_PULSE_WIDTH_DP_ENC0_P0_MASK GENMASK(13, 0) +#define MTK_DP_ENC0_P0_3160 0x3160 +#define PGEN_HFDE_START_DP_ENC0_P0_MASK GENMASK(13, 0) +#define MTK_DP_ENC0_P0_3164 0x3164 +#define PGEN_HFDE_ACTIVE_WIDTH_DP_ENC0_P0_MASK GENMASK(13, 0) +#define MTK_DP_ENC0_P0_3168 0x3168 +#define PGEN_VTOTAL_DP_ENC0_P0_MASK GENMASK(12, 0) +#define MTK_DP_ENC0_P0_316C 0x316c +#define PGEN_VSYNC_RISING_DP_ENC0_P0_MASK GENMASK(12, 0) +#define MTK_DP_ENC0_P0_3170 0x3170 +#define PGEN_VSYNC_PULSE_WIDTH_DP_ENC0_P0_MASK GENMASK(12, 0) +#define MTK_DP_ENC0_P0_3174 0x3174 +#define PGEN_VFDE_START_DP_ENC0_P0_MASK GENMASK(12, 0) +#define MTK_DP_ENC0_P0_3178 0x3178 +#define PGEN_VFDE_ACTIVE_WIDTH_DP_ENC0_P0_MASK GENMASK(12, 0) +#define MTK_DP_ENC0_P0_31B0 0x31b0 +#define PGEN_PATTERN_SEL_VAL 4 +#define PGEN_PATTERN_SEL_MASK GENMASK(6, 4) +#define MTK_DP_ENC0_P0_31EC 0x31ec +#define AUDIO_CH_SRC_SEL_DP_ENC0_P0 BIT(4) +#define ISRC1_HB3_DP_ENC0_P0_MASK GENMASK(15, 8) + +/* offset: ENC1_OFFSET (0x3200) */ +#define MTK_DP_ENC1_P0_3200 0x3200 +#define MTK_DP_ENC1_P0_3280 0x3280 +#define SDP_PACKET_TYPE_DP_ENC1_P0_MASK GENMASK(4, 0) +#define SDP_PACKET_W_DP_ENC1_P0 BIT(5) +#define SDP_PACKET_W_DP_ENC1_P0_MASK BIT(5) +#define MTK_DP_ENC1_P0_3300 0x3300 +#define VIDEO_AFIFO_RDY_SEL_DP_ENC1_P0_VAL 2 +#define VIDEO_AFIFO_RDY_SEL_DP_ENC1_P0_MASK GENMASK(9, 8) +#define MTK_DP_ENC1_P0_3304 0x3304 +#define AU_PRTY_REGEN_DP_ENC1_P0_MASK BIT(8) +#define AU_CH_STS_REGEN_DP_ENC1_P0_MASK BIT(9) +#define AUDIO_SAMPLE_PRSENT_REGEN_DP_ENC1_P0_MASK BIT(12) +#define MTK_DP_ENC1_P0_3324 0x3324 +#define AUDIO_SOURCE_MUX_DP_ENC1_P0_MASK GENMASK(9, 8) +#define AUDIO_SOURCE_MUX_DP_ENC1_P0_DPRX 0 +#define MTK_DP_ENC1_P0_3364 0x3364 +#define SDP_DOWN_CNT_IN_HBLANK_DP_ENC1_P0_VAL 0x20 +#define SDP_DOWN_CNT_INIT_IN_HBLANK_DP_ENC1_P0_MASK GENMASK(11, 0) +#define FIFO_READ_START_POINT_DP_ENC1_P0_VAL 4 +#define FIFO_READ_START_POINT_DP_ENC1_P0_MASK GENMASK(15, 12) +#define MTK_DP_ENC1_P0_3368 0x3368 +#define VIDEO_SRAM_FIFO_CNT_RESET_SEL_DP_ENC1_P0 BIT(0) +#define VIDEO_STABLE_CNT_THRD_DP_ENC1_P0 BIT(4) +#define SDP_DP13_EN_DP_ENC1_P0 BIT(8) +#define BS2BS_MODE_DP_ENC1_P0 BIT(12) +#define BS2BS_MODE_DP_ENC1_P0_MASK GENMASK(13, 12) +#define BS2BS_MODE_DP_ENC1_P0_VAL 1 +#define DP_ENC1_P0_3368_VAL (VIDEO_SRAM_FIFO_CNT_RESET_SEL_DP_ENC1_P0 | \ + VIDEO_STABLE_CNT_THRD_DP_ENC1_P0 | \ + SDP_DP13_EN_DP_ENC1_P0 | \ + BS2BS_MODE_DP_ENC1_P0) + +#define MTK_DP_ENC1_P0_3374 0x3374 +#define SDP_ASP_INSERT_IN_HBLANK_DP_ENC1_P0_MASK BIT(12) +#define SDP_DOWN_ASP_CNT_INIT_DP_ENC1_P0_MASK GENMASK(11, 0) + +#define MTK_DP_ENC1_P0_33F4 0x33f4 +#define DP_ENC_DUMMY_RW_1_AUDIO_RST_EN BIT(0) +#define DP_ENC_DUMMY_RW_1 BIT(9) + +/* offset: TRANS_OFFSET (0x3400) */ +#define MTK_DP_TRANS_P0_3400 0x3400 +#define PATTERN1_EN_DP_TRANS_P0_MASK BIT(12) +#define PATTERN2_EN_DP_TRANS_P0_MASK BIT(13) +#define PATTERN3_EN_DP_TRANS_P0_MASK BIT(14) +#define PATTERN4_EN_DP_TRANS_P0_MASK BIT(15) +#define MTK_DP_TRANS_P0_3404 0x3404 +#define DP_SCR_EN_DP_TRANS_P0_MASK BIT(0) +#define MTK_DP_TRANS_P0_340C 0x340c +#define DP_TX_TRANSMITTER_4P_RESET_SW_DP_TRANS_P0 BIT(13) +#define MTK_DP_TRANS_P0_3410 0x3410 +#define HPD_DEB_THD_DP_TRANS_P0_MASK GENMASK(3, 0) +#define HPD_INT_THD_DP_TRANS_P0_MASK GENMASK(7, 4) +#define HPD_INT_THD_DP_TRANS_P0_LOWER_500US (2 << 4) +#define HPD_INT_THD_DP_TRANS_P0_UPPER_1100US (2 << 6) +#define HPD_DISC_THD_DP_TRANS_P0_MASK GENMASK(11, 8) +#define HPD_CONN_THD_DP_TRANS_P0_MASK GENMASK(15, 12) +#define MTK_DP_TRANS_P0_3414 0x3414 +#define HPD_DB_DP_TRANS_P0_MASK BIT(2) +#define MTK_DP_TRANS_P0_3418 0x3418 +#define IRQ_CLR_DP_TRANS_P0_MASK GENMASK(3, 0) +#define IRQ_MASK_DP_TRANS_P0_MASK GENMASK(7, 4) +#define IRQ_MASK_DP_TRANS_P0_DISC_IRQ (BIT(1) << 4) +#define IRQ_MASK_DP_TRANS_P0_CONN_IRQ (BIT(2) << 4) +#define IRQ_MASK_DP_TRANS_P0_INT_IRQ (BIT(3) << 4) +#define IRQ_STATUS_DP_TRANS_P0_MASK GENMASK(15, 12) +#define MTK_DP_TRANS_P0_342C 0x342c +#define XTAL_FREQ_DP_TRANS_P0_DEFAULT (BIT(0) | BIT(3) | BIT(5) | BIT(6)) +#define XTAL_FREQ_DP_TRANS_P0_MASK GENMASK(7, 0) +#define MTK_DP_TRANS_P0_3430 0x3430 +#define HPD_INT_THD_ECO_DP_TRANS_P0_MASK GENMASK(1, 0) +#define HPD_INT_THD_ECO_DP_TRANS_P0_HIGH_BOUND_EXT BIT(1) +#define MTK_DP_TRANS_P0_34A4 0x34a4 +#define LANE_NUM_DP_TRANS_P0_MASK GENMASK(3, 2) +#define MTK_DP_TRANS_P0_3540 0x3540 +#define FEC_EN_DP_TRANS_P0_MASK BIT(0) +#define FEC_CLOCK_EN_MODE_DP_TRANS_P0 BIT(3) +#define MTK_DP_TRANS_P0_3580 0x3580 +#define POST_MISC_DATA_LANE0_OV_DP_TRANS_P0_MASK BIT(8) +#define POST_MISC_DATA_LANE1_OV_DP_TRANS_P0_MASK BIT(9) +#define POST_MISC_DATA_LANE2_OV_DP_TRANS_P0_MASK BIT(10) +#define POST_MISC_DATA_LANE3_OV_DP_TRANS_P0_MASK BIT(11) +#define MTK_DP_TRANS_P0_35C8 0x35c8 +#define SW_IRQ_CLR_DP_TRANS_P0_MASK GENMASK(15, 0) +#define SW_IRQ_STATUS_DP_TRANS_P0_MASK GENMASK(15, 0) +#define MTK_DP_TRANS_P0_35D0 0x35d0 +#define SW_IRQ_FINAL_STATUS_DP_TRANS_P0_MASK GENMASK(15, 0) +#define MTK_DP_TRANS_P0_35F0 0x35f0 +#define DP_TRANS_DUMMY_RW_0 BIT(3) +#define DP_TRANS_DUMMY_RW_0_MASK GENMASK(3, 2) + +/* offset: AUX_OFFSET (0x3600) */ +#define MTK_DP_AUX_P0_360C 0x360c +#define AUX_TIMEOUT_THR_AUX_TX_P0_MASK GENMASK(12, 0) +#define AUX_TIMEOUT_THR_AUX_TX_P0_VAL 0x1595 +#define MTK_DP_AUX_P0_3614 0x3614 +#define AUX_RX_UI_CNT_THR_AUX_TX_P0_MASK GENMASK(6, 0) +#define AUX_RX_UI_CNT_THR_AUX_FOR_26M 13 +#define MTK_DP_AUX_P0_3618 0x3618 +#define AUX_RX_FIFO_FULL_AUX_TX_P0_MASK BIT(9) +#define AUX_RX_FIFO_WRITE_POINTER_AUX_TX_P0_MASK GENMASK(3, 0) +#define MTK_DP_AUX_P0_3620 0x3620 +#define AUX_RD_MODE_AUX_TX_P0_MASK BIT(9) +#define AUX_RX_FIFO_READ_PULSE_TX_P0 BIT(8) +#define AUX_RX_FIFO_READ_DATA_AUX_TX_P0_MASK GENMASK(7, 0) +#define MTK_DP_AUX_P0_3624 0x3624 +#define AUX_RX_REPLY_COMMAND_AUX_TX_P0_MASK GENMASK(3, 0) +#define MTK_DP_AUX_P0_3628 0x3628 +#define AUX_RX_PHY_STATE_AUX_TX_P0_MASK GENMASK(9, 0) +#define AUX_RX_PHY_STATE_AUX_TX_P0_RX_IDLE BIT(0) +#define MTK_DP_AUX_P0_362C 0x362c +#define AUX_NO_LENGTH_AUX_TX_P0 BIT(0) +#define AUX_TX_AUXTX_OV_EN_AUX_TX_P0_MASK BIT(1) +#define AUX_RESERVED_RW_0_AUX_TX_P0_MASK GENMASK(15, 2) +#define MTK_DP_AUX_P0_3630 0x3630 +#define AUX_TX_REQUEST_READY_AUX_TX_P0 BIT(3) +#define MTK_DP_AUX_P0_3634 0x3634 +#define AUX_TX_OVER_SAMPLE_RATE_AUX_TX_P0_MASK GENMASK(15, 8) +#define AUX_TX_OVER_SAMPLE_RATE_FOR_26M 25 +#define MTK_DP_AUX_P0_3640 0x3640 +#define AUX_RX_AUX_RECV_COMPLETE_IRQ_AUX_TX_P0 BIT(6) +#define AUX_RX_EDID_RECV_COMPLETE_IRQ_AUX_TX_P0 BIT(5) +#define AUX_RX_MCCS_RECV_COMPLETE_IRQ_AUX_TX_P0 BIT(4) +#define AUX_RX_CMD_RECV_IRQ_AUX_TX_P0 BIT(3) +#define AUX_RX_ADDR_RECV_IRQ_AUX_TX_P0 BIT(2) +#define AUX_RX_DATA_RECV_IRQ_AUX_TX_P0 BIT(1) +#define AUX_400US_TIMEOUT_IRQ_AUX_TX_P0 BIT(0) +#define DP_AUX_P0_3640_VAL (AUX_400US_TIMEOUT_IRQ_AUX_TX_P0 | \ + AUX_RX_DATA_RECV_IRQ_AUX_TX_P0 | \ + AUX_RX_ADDR_RECV_IRQ_AUX_TX_P0 | \ + AUX_RX_CMD_RECV_IRQ_AUX_TX_P0 | \ + AUX_RX_MCCS_RECV_COMPLETE_IRQ_AUX_TX_P0 | \ + AUX_RX_EDID_RECV_COMPLETE_IRQ_AUX_TX_P0 | \ + AUX_RX_AUX_RECV_COMPLETE_IRQ_AUX_TX_P0) +#define MTK_DP_AUX_P0_3644 0x3644 +#define MCU_REQUEST_COMMAND_AUX_TX_P0_MASK GENMASK(3, 0) +#define MTK_DP_AUX_P0_3648 0x3648 +#define MCU_REQUEST_ADDRESS_LSB_AUX_TX_P0_MASK GENMASK(15, 0) +#define MTK_DP_AUX_P0_364C 0x364c +#define MCU_REQUEST_ADDRESS_MSB_AUX_TX_P0_MASK GENMASK(3, 0) +#define MTK_DP_AUX_P0_3650 0x3650 +#define MCU_REQ_DATA_NUM_AUX_TX_P0_MASK GENMASK(15, 12) +#define PHY_FIFO_RST_AUX_TX_P0_MASK BIT(9) +#define MCU_ACK_TRAN_COMPLETE_AUX_TX_P0 BIT(8) +#define MTK_DP_AUX_P0_3658 0x3658 +#define AUX_TX_OV_EN_AUX_TX_P0_MASK BIT(0) +#define MTK_DP_AUX_P0_3690 0x3690 +#define RX_REPLY_COMPLETE_MODE_AUX_TX_P0 BIT(8) +#define MTK_DP_AUX_P0_3704 0x3704 +#define AUX_TX_FIFO_WDATA_NEW_MODE_T_AUX_TX_P0_MASK BIT(1) +#define AUX_TX_FIFO_NEW_MODE_EN_AUX_TX_P0 BIT(2) +#define MTK_DP_AUX_P0_3708 0x3708 +#define MTK_DP_AUX_P0_37C8 0x37c8 +#define MTK_ATOP_EN_AUX_TX_P0 BIT(0) + +#endif /*_MTK_DP_REG_H_*/ diff --git a/drivers/gpu/drm/mediatek/mtk_dpi.c b/drivers/gpu/drm/mediatek/mtk_dpi.c new file mode 100644 index 000000000000..61cab32e213a --- /dev/null +++ b/drivers/gpu/drm/mediatek/mtk_dpi.c @@ -0,0 +1,1363 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2014 MediaTek Inc. + * Author: Jie Qiu <jie.qiu@mediatek.com> + */ + +#include <linux/bitfield.h> +#include <linux/clk.h> +#include <linux/component.h> +#include <linux/debugfs.h> +#include <linux/interrupt.h> +#include <linux/kernel.h> +#include <linux/media-bus-format.h> +#include <linux/of.h> +#include <linux/of_graph.h> +#include <linux/pinctrl/consumer.h> +#include <linux/platform_device.h> +#include <linux/soc/mediatek/mtk-mmsys.h> +#include <linux/types.h> + +#include <video/videomode.h> + +#include <drm/drm_atomic_helper.h> +#include <drm/drm_bridge.h> +#include <drm/drm_bridge_connector.h> +#include <drm/drm_crtc.h> +#include <drm/drm_edid.h> +#include <drm/drm_of.h> +#include <drm/drm_simple_kms_helper.h> + +#include "mtk_ddp_comp.h" +#include "mtk_disp_drv.h" +#include "mtk_dpi_regs.h" +#include "mtk_drm_drv.h" + +enum mtk_dpi_out_bit_num { + MTK_DPI_OUT_BIT_NUM_8BITS, + MTK_DPI_OUT_BIT_NUM_10BITS, + MTK_DPI_OUT_BIT_NUM_12BITS, + MTK_DPI_OUT_BIT_NUM_16BITS +}; + +enum mtk_dpi_out_yc_map { + MTK_DPI_OUT_YC_MAP_RGB, + MTK_DPI_OUT_YC_MAP_CYCY, + MTK_DPI_OUT_YC_MAP_YCYC, + MTK_DPI_OUT_YC_MAP_CY, + MTK_DPI_OUT_YC_MAP_YC +}; + +enum mtk_dpi_out_channel_swap { + MTK_DPI_OUT_CHANNEL_SWAP_RGB, + MTK_DPI_OUT_CHANNEL_SWAP_GBR, + MTK_DPI_OUT_CHANNEL_SWAP_BRG, + MTK_DPI_OUT_CHANNEL_SWAP_RBG, + MTK_DPI_OUT_CHANNEL_SWAP_GRB, + MTK_DPI_OUT_CHANNEL_SWAP_BGR +}; + +enum mtk_dpi_out_color_format { + MTK_DPI_COLOR_FORMAT_RGB, + MTK_DPI_COLOR_FORMAT_YCBCR_422, + MTK_DPI_COLOR_FORMAT_YCBCR_444 +}; + +struct mtk_dpi { + struct drm_encoder encoder; + struct drm_bridge bridge; + struct drm_bridge *next_bridge; + struct drm_connector *connector; + void __iomem *regs; + struct device *dev; + struct device *mmsys_dev; + struct clk *engine_clk; + struct clk *pixel_clk; + struct clk *tvd_clk; + int irq; + struct drm_display_mode mode; + const struct mtk_dpi_conf *conf; + enum mtk_dpi_out_color_format color_format; + enum mtk_dpi_out_yc_map yc_map; + enum mtk_dpi_out_bit_num bit_num; + enum mtk_dpi_out_channel_swap channel_swap; + struct pinctrl *pinctrl; + struct pinctrl_state *pins_gpio; + struct pinctrl_state *pins_dpi; + u32 output_fmt; + int refcount; +}; + +static inline struct mtk_dpi *bridge_to_dpi(struct drm_bridge *b) +{ + return container_of(b, struct mtk_dpi, bridge); +} + +enum mtk_dpi_polarity { + MTK_DPI_POLARITY_RISING, + MTK_DPI_POLARITY_FALLING, +}; + +struct mtk_dpi_polarities { + enum mtk_dpi_polarity de_pol; + enum mtk_dpi_polarity ck_pol; + enum mtk_dpi_polarity hsync_pol; + enum mtk_dpi_polarity vsync_pol; +}; + +struct mtk_dpi_sync_param { + u32 sync_width; + u32 front_porch; + u32 back_porch; + bool shift_half_line; +}; + +struct mtk_dpi_yc_limit { + u16 y_top; + u16 y_bottom; + u16 c_top; + u16 c_bottom; +}; + +struct mtk_dpi_factor { + u32 clock; + u8 factor; +}; + +/** + * struct mtk_dpi_conf - Configuration of mediatek dpi. + * @dpi_factor: SoC-specific pixel clock PLL factor values. + * @num_dpi_factor: Number of pixel clock PLL factor values. + * @reg_h_fre_con: Register address of frequency control. + * @max_clock_khz: Max clock frequency supported for this SoCs in khz units. + * @edge_sel_en: Enable of edge selection. + * @output_fmts: Array of supported output formats. + * @num_output_fmts: Quantity of supported output formats. + * @is_ck_de_pol: Support CK/DE polarity. + * @swap_input_support: Support input swap function. + * @support_direct_pin: IP supports direct connection to dpi panels. + * @dimension_mask: Mask used for HWIDTH, HPORCH, VSYNC_WIDTH and VSYNC_PORCH + * (no shift). + * @hvsize_mask: Mask of HSIZE and VSIZE mask (no shift). + * @channel_swap_shift: Shift value of channel swap. + * @yuv422_en_bit: Enable bit of yuv422. + * @csc_enable_bit: Enable bit of CSC. + * @input_2p_en_bit: Enable bit for input two pixel per round feature. + * If present, implies that the feature must be enabled. + * @pixels_per_iter: Quantity of transferred pixels per iteration. + * @edge_cfg_in_mmsys: If the edge configuration for DPI's output needs to be set in MMSYS. + * @clocked_by_hdmi: HDMI IP outputs clock to dpi_pixel_clk input clock, needed + * for DPI registers access. + * @output_1pixel: Enable outputting one pixel per round; if the input is two pixel per + * round, the DPI hardware will internally transform it to 1T1P. + */ +struct mtk_dpi_conf { + const struct mtk_dpi_factor *dpi_factor; + const u8 num_dpi_factor; + u32 reg_h_fre_con; + u32 max_clock_khz; + bool edge_sel_en; + const u32 *output_fmts; + u32 num_output_fmts; + bool is_ck_de_pol; + bool swap_input_support; + bool support_direct_pin; + u32 dimension_mask; + u32 hvsize_mask; + u32 channel_swap_shift; + u32 yuv422_en_bit; + u32 csc_enable_bit; + u32 input_2p_en_bit; + u32 pixels_per_iter; + bool edge_cfg_in_mmsys; + bool clocked_by_hdmi; + bool output_1pixel; +}; + +static void mtk_dpi_mask(struct mtk_dpi *dpi, u32 offset, u32 val, u32 mask) +{ + u32 tmp = readl(dpi->regs + offset) & ~mask; + + tmp |= (val & mask); + writel(tmp, dpi->regs + offset); +} + +static void mtk_dpi_test_pattern_en(struct mtk_dpi *dpi, u8 type, bool enable) +{ + u32 val; + + if (enable) + val = FIELD_PREP(DPI_PAT_SEL, type) | DPI_PAT_EN; + else + val = 0; + + mtk_dpi_mask(dpi, DPI_PATTERN0, val, DPI_PAT_SEL | DPI_PAT_EN); +} + +static void mtk_dpi_sw_reset(struct mtk_dpi *dpi, bool reset) +{ + mtk_dpi_mask(dpi, DPI_RET, reset ? RST : 0, RST); +} + +static void mtk_dpi_enable(struct mtk_dpi *dpi) +{ + mtk_dpi_mask(dpi, DPI_EN, EN, EN); +} + +static void mtk_dpi_disable(struct mtk_dpi *dpi) +{ + mtk_dpi_mask(dpi, DPI_EN, 0, EN); +} + +static void mtk_dpi_config_hsync(struct mtk_dpi *dpi, + struct mtk_dpi_sync_param *sync) +{ + mtk_dpi_mask(dpi, DPI_TGEN_HWIDTH, sync->sync_width << HPW, + dpi->conf->dimension_mask << HPW); + mtk_dpi_mask(dpi, DPI_TGEN_HPORCH, sync->back_porch << HBP, + dpi->conf->dimension_mask << HBP); + mtk_dpi_mask(dpi, DPI_TGEN_HPORCH, sync->front_porch << HFP, + dpi->conf->dimension_mask << HFP); +} + +static void mtk_dpi_config_vsync(struct mtk_dpi *dpi, + struct mtk_dpi_sync_param *sync, + u32 width_addr, u32 porch_addr) +{ + mtk_dpi_mask(dpi, width_addr, + sync->shift_half_line << VSYNC_HALF_LINE_SHIFT, + VSYNC_HALF_LINE_MASK); + mtk_dpi_mask(dpi, width_addr, + sync->sync_width << VSYNC_WIDTH_SHIFT, + dpi->conf->dimension_mask << VSYNC_WIDTH_SHIFT); + mtk_dpi_mask(dpi, porch_addr, + sync->back_porch << VSYNC_BACK_PORCH_SHIFT, + dpi->conf->dimension_mask << VSYNC_BACK_PORCH_SHIFT); + mtk_dpi_mask(dpi, porch_addr, + sync->front_porch << VSYNC_FRONT_PORCH_SHIFT, + dpi->conf->dimension_mask << VSYNC_FRONT_PORCH_SHIFT); +} + +static void mtk_dpi_config_vsync_lodd(struct mtk_dpi *dpi, + struct mtk_dpi_sync_param *sync) +{ + mtk_dpi_config_vsync(dpi, sync, DPI_TGEN_VWIDTH, DPI_TGEN_VPORCH); +} + +static void mtk_dpi_config_vsync_leven(struct mtk_dpi *dpi, + struct mtk_dpi_sync_param *sync) +{ + mtk_dpi_config_vsync(dpi, sync, DPI_TGEN_VWIDTH_LEVEN, + DPI_TGEN_VPORCH_LEVEN); +} + +static void mtk_dpi_config_vsync_rodd(struct mtk_dpi *dpi, + struct mtk_dpi_sync_param *sync) +{ + mtk_dpi_config_vsync(dpi, sync, DPI_TGEN_VWIDTH_RODD, + DPI_TGEN_VPORCH_RODD); +} + +static void mtk_dpi_config_vsync_reven(struct mtk_dpi *dpi, + struct mtk_dpi_sync_param *sync) +{ + mtk_dpi_config_vsync(dpi, sync, DPI_TGEN_VWIDTH_REVEN, + DPI_TGEN_VPORCH_REVEN); +} + +static void mtk_dpi_config_pol(struct mtk_dpi *dpi, + struct mtk_dpi_polarities *dpi_pol) +{ + unsigned int pol; + unsigned int mask; + + mask = HSYNC_POL | VSYNC_POL; + pol = (dpi_pol->hsync_pol == MTK_DPI_POLARITY_RISING ? 0 : HSYNC_POL) | + (dpi_pol->vsync_pol == MTK_DPI_POLARITY_RISING ? 0 : VSYNC_POL); + if (dpi->conf->is_ck_de_pol) { + mask |= CK_POL | DE_POL; + pol |= (dpi_pol->ck_pol == MTK_DPI_POLARITY_RISING ? + 0 : CK_POL) | + (dpi_pol->de_pol == MTK_DPI_POLARITY_RISING ? + 0 : DE_POL); + } + + mtk_dpi_mask(dpi, DPI_OUTPUT_SETTING, pol, mask); +} + +static void mtk_dpi_config_3d(struct mtk_dpi *dpi, bool en_3d) +{ + mtk_dpi_mask(dpi, DPI_CON, en_3d ? TDFP_EN : 0, TDFP_EN); +} + +static void mtk_dpi_config_interface(struct mtk_dpi *dpi, bool inter) +{ + mtk_dpi_mask(dpi, DPI_CON, inter ? INTL_EN : 0, INTL_EN); +} + +static void mtk_dpi_config_fb_size(struct mtk_dpi *dpi, u32 width, u32 height) +{ + mtk_dpi_mask(dpi, DPI_SIZE, width << HSIZE, + dpi->conf->hvsize_mask << HSIZE); + mtk_dpi_mask(dpi, DPI_SIZE, height << VSIZE, + dpi->conf->hvsize_mask << VSIZE); +} + +static void mtk_dpi_config_channel_limit(struct mtk_dpi *dpi) +{ + struct mtk_dpi_yc_limit limit; + + if (drm_default_rgb_quant_range(&dpi->mode) == + HDMI_QUANTIZATION_RANGE_LIMITED) { + limit.y_bottom = 0x10; + limit.y_top = 0xfe0; + limit.c_bottom = 0x10; + limit.c_top = 0xfe0; + } else { + limit.y_bottom = 0; + limit.y_top = 0xfff; + limit.c_bottom = 0; + limit.c_top = 0xfff; + } + + mtk_dpi_mask(dpi, DPI_Y_LIMIT, limit.y_bottom << Y_LIMINT_BOT, + Y_LIMINT_BOT_MASK); + mtk_dpi_mask(dpi, DPI_Y_LIMIT, limit.y_top << Y_LIMINT_TOP, + Y_LIMINT_TOP_MASK); + mtk_dpi_mask(dpi, DPI_C_LIMIT, limit.c_bottom << C_LIMIT_BOT, + C_LIMIT_BOT_MASK); + mtk_dpi_mask(dpi, DPI_C_LIMIT, limit.c_top << C_LIMIT_TOP, + C_LIMIT_TOP_MASK); +} + +static void mtk_dpi_config_bit_num(struct mtk_dpi *dpi, + enum mtk_dpi_out_bit_num num) +{ + u32 val; + + switch (num) { + case MTK_DPI_OUT_BIT_NUM_8BITS: + val = OUT_BIT_8; + break; + case MTK_DPI_OUT_BIT_NUM_10BITS: + val = OUT_BIT_10; + break; + case MTK_DPI_OUT_BIT_NUM_12BITS: + val = OUT_BIT_12; + break; + case MTK_DPI_OUT_BIT_NUM_16BITS: + val = OUT_BIT_16; + break; + default: + val = OUT_BIT_8; + break; + } + mtk_dpi_mask(dpi, DPI_OUTPUT_SETTING, val << OUT_BIT, + OUT_BIT_MASK); +} + +static void mtk_dpi_config_yc_map(struct mtk_dpi *dpi, + enum mtk_dpi_out_yc_map map) +{ + u32 val; + + switch (map) { + case MTK_DPI_OUT_YC_MAP_RGB: + val = YC_MAP_RGB; + break; + case MTK_DPI_OUT_YC_MAP_CYCY: + val = YC_MAP_CYCY; + break; + case MTK_DPI_OUT_YC_MAP_YCYC: + val = YC_MAP_YCYC; + break; + case MTK_DPI_OUT_YC_MAP_CY: + val = YC_MAP_CY; + break; + case MTK_DPI_OUT_YC_MAP_YC: + val = YC_MAP_YC; + break; + default: + val = YC_MAP_RGB; + break; + } + + mtk_dpi_mask(dpi, DPI_OUTPUT_SETTING, val << YC_MAP, YC_MAP_MASK); +} + +static void mtk_dpi_config_channel_swap(struct mtk_dpi *dpi, + enum mtk_dpi_out_channel_swap swap) +{ + u32 val; + + switch (swap) { + case MTK_DPI_OUT_CHANNEL_SWAP_RGB: + val = SWAP_RGB; + break; + case MTK_DPI_OUT_CHANNEL_SWAP_GBR: + val = SWAP_GBR; + break; + case MTK_DPI_OUT_CHANNEL_SWAP_BRG: + val = SWAP_BRG; + break; + case MTK_DPI_OUT_CHANNEL_SWAP_RBG: + val = SWAP_RBG; + break; + case MTK_DPI_OUT_CHANNEL_SWAP_GRB: + val = SWAP_GRB; + break; + case MTK_DPI_OUT_CHANNEL_SWAP_BGR: + val = SWAP_BGR; + break; + default: + val = SWAP_RGB; + break; + } + + mtk_dpi_mask(dpi, DPI_OUTPUT_SETTING, + val << dpi->conf->channel_swap_shift, + CH_SWAP_MASK << dpi->conf->channel_swap_shift); +} + +static void mtk_dpi_config_yuv422_enable(struct mtk_dpi *dpi, bool enable) +{ + mtk_dpi_mask(dpi, DPI_CON, enable ? dpi->conf->yuv422_en_bit : 0, + dpi->conf->yuv422_en_bit); +} + +static void mtk_dpi_config_csc_enable(struct mtk_dpi *dpi, bool enable) +{ + mtk_dpi_mask(dpi, DPI_CON, enable ? dpi->conf->csc_enable_bit : 0, + dpi->conf->csc_enable_bit); +} + +static void mtk_dpi_config_swap_input(struct mtk_dpi *dpi, bool enable) +{ + mtk_dpi_mask(dpi, DPI_CON, enable ? IN_RB_SWAP : 0, IN_RB_SWAP); +} + +static void mtk_dpi_config_2n_h_fre(struct mtk_dpi *dpi) +{ + if (dpi->conf->reg_h_fre_con) + mtk_dpi_mask(dpi, dpi->conf->reg_h_fre_con, H_FRE_2N, H_FRE_2N); +} + +static void mtk_dpi_config_disable_edge(struct mtk_dpi *dpi) +{ + if (dpi->conf->edge_sel_en && dpi->conf->reg_h_fre_con) + mtk_dpi_mask(dpi, dpi->conf->reg_h_fre_con, 0, EDGE_SEL_EN); +} + +static void mtk_dpi_config_color_format(struct mtk_dpi *dpi, + enum mtk_dpi_out_color_format format) +{ + mtk_dpi_config_channel_swap(dpi, dpi->channel_swap); + + switch (format) { + case MTK_DPI_COLOR_FORMAT_YCBCR_444: + mtk_dpi_config_yuv422_enable(dpi, false); + mtk_dpi_config_csc_enable(dpi, true); + if (dpi->conf->swap_input_support) + mtk_dpi_config_swap_input(dpi, false); + break; + case MTK_DPI_COLOR_FORMAT_YCBCR_422: + mtk_dpi_config_yuv422_enable(dpi, true); + mtk_dpi_config_csc_enable(dpi, true); + + /* + * If height is smaller than 720, we need to use RGB_TO_BT601 + * to transfer to yuv422. Otherwise, we use RGB_TO_JPEG. + */ + mtk_dpi_mask(dpi, DPI_MATRIX_SET, dpi->mode.hdisplay <= 720 ? + MATRIX_SEL_RGB_TO_BT601 : MATRIX_SEL_RGB_TO_JPEG, + INT_MATRIX_SEL_MASK); + break; + default: + case MTK_DPI_COLOR_FORMAT_RGB: + mtk_dpi_config_yuv422_enable(dpi, false); + mtk_dpi_config_csc_enable(dpi, false); + if (dpi->conf->swap_input_support) + mtk_dpi_config_swap_input(dpi, false); + break; + } +} + +static void mtk_dpi_dual_edge(struct mtk_dpi *dpi) +{ + if ((dpi->output_fmt == MEDIA_BUS_FMT_RGB888_2X12_LE) || + (dpi->output_fmt == MEDIA_BUS_FMT_RGB888_2X12_BE)) { + mtk_dpi_mask(dpi, DPI_DDR_SETTING, DDR_EN | DDR_4PHASE, + DDR_EN | DDR_4PHASE); + mtk_dpi_mask(dpi, DPI_OUTPUT_SETTING, + dpi->output_fmt == MEDIA_BUS_FMT_RGB888_2X12_LE ? + EDGE_SEL : 0, EDGE_SEL); + if (dpi->conf->edge_cfg_in_mmsys) + mtk_mmsys_ddp_dpi_fmt_config(dpi->mmsys_dev, MTK_DPI_RGB888_DDR_CON); + } else { + mtk_dpi_mask(dpi, DPI_DDR_SETTING, DDR_EN | DDR_4PHASE, 0); + if (dpi->conf->edge_cfg_in_mmsys) + mtk_mmsys_ddp_dpi_fmt_config(dpi->mmsys_dev, MTK_DPI_RGB888_SDR_CON); + } +} + +static void mtk_dpi_power_off(struct mtk_dpi *dpi) +{ + if (WARN_ON(dpi->refcount == 0)) + return; + + if (--dpi->refcount != 0) + return; + + mtk_dpi_disable(dpi); + clk_disable_unprepare(dpi->pixel_clk); + clk_disable_unprepare(dpi->tvd_clk); + clk_disable_unprepare(dpi->engine_clk); +} + +static int mtk_dpi_power_on(struct mtk_dpi *dpi) +{ + int ret; + + if (++dpi->refcount != 1) + return 0; + + ret = clk_prepare_enable(dpi->engine_clk); + if (ret) { + dev_err(dpi->dev, "Failed to enable engine clock: %d\n", ret); + goto err_refcount; + } + + ret = clk_prepare_enable(dpi->tvd_clk); + if (ret) { + dev_err(dpi->dev, "Failed to enable tvd pll: %d\n", ret); + goto err_engine; + } + + ret = clk_prepare_enable(dpi->pixel_clk); + if (ret) { + dev_err(dpi->dev, "Failed to enable pixel clock: %d\n", ret); + goto err_pixel; + } + + return 0; + +err_pixel: + clk_disable_unprepare(dpi->tvd_clk); +err_engine: + clk_disable_unprepare(dpi->engine_clk); +err_refcount: + dpi->refcount--; + return ret; +} + +static unsigned int mtk_dpi_calculate_factor(struct mtk_dpi *dpi, int mode_clk) +{ + const struct mtk_dpi_factor *dpi_factor = dpi->conf->dpi_factor; + int i; + + for (i = 0; i < dpi->conf->num_dpi_factor; i++) { + if (mode_clk <= dpi_factor[i].clock) + return dpi_factor[i].factor; + } + + /* If no match try the lowest possible factor */ + return dpi_factor[dpi->conf->num_dpi_factor - 1].factor; +} + +static void mtk_dpi_set_pixel_clk(struct mtk_dpi *dpi, struct videomode *vm, int mode_clk) +{ + unsigned long pll_rate; + unsigned int factor; + + /* let pll_rate can fix the valid range of tvdpll (1G~2GHz) */ + factor = mtk_dpi_calculate_factor(dpi, mode_clk); + pll_rate = vm->pixelclock * factor; + + dev_dbg(dpi->dev, "Want PLL %lu Hz, pixel clock %lu Hz\n", + pll_rate, vm->pixelclock); + + clk_set_rate(dpi->tvd_clk, pll_rate); + pll_rate = clk_get_rate(dpi->tvd_clk); + + /* + * Depending on the IP version, we may output a different amount of + * pixels for each iteration: divide the clock by this number and + * adjust the display porches accordingly. + */ + vm->pixelclock = pll_rate / factor; + vm->pixelclock /= dpi->conf->pixels_per_iter; + + if ((dpi->output_fmt == MEDIA_BUS_FMT_RGB888_2X12_LE) || + (dpi->output_fmt == MEDIA_BUS_FMT_RGB888_2X12_BE)) + clk_set_rate(dpi->pixel_clk, vm->pixelclock * 2); + else + clk_set_rate(dpi->pixel_clk, vm->pixelclock); + + vm->pixelclock = clk_get_rate(dpi->pixel_clk); + + dev_dbg(dpi->dev, "Got PLL %lu Hz, pixel clock %lu Hz\n", + pll_rate, vm->pixelclock); +} + +static int mtk_dpi_set_display_mode(struct mtk_dpi *dpi, + struct drm_display_mode *mode) +{ + struct mtk_dpi_polarities dpi_pol; + struct mtk_dpi_sync_param hsync; + struct mtk_dpi_sync_param vsync_lodd = { 0 }; + struct mtk_dpi_sync_param vsync_leven = { 0 }; + struct mtk_dpi_sync_param vsync_rodd = { 0 }; + struct mtk_dpi_sync_param vsync_reven = { 0 }; + struct videomode vm = { 0 }; + + drm_display_mode_to_videomode(mode, &vm); + + if (!dpi->conf->clocked_by_hdmi) + mtk_dpi_set_pixel_clk(dpi, &vm, mode->clock); + + dpi_pol.ck_pol = MTK_DPI_POLARITY_FALLING; + dpi_pol.de_pol = MTK_DPI_POLARITY_RISING; + dpi_pol.hsync_pol = vm.flags & DISPLAY_FLAGS_HSYNC_HIGH ? + MTK_DPI_POLARITY_FALLING : MTK_DPI_POLARITY_RISING; + dpi_pol.vsync_pol = vm.flags & DISPLAY_FLAGS_VSYNC_HIGH ? + MTK_DPI_POLARITY_FALLING : MTK_DPI_POLARITY_RISING; + + /* + * Depending on the IP version, we may output a different amount of + * pixels for each iteration: divide the clock by this number and + * adjust the display porches accordingly. + */ + hsync.sync_width = vm.hsync_len / dpi->conf->pixels_per_iter; + hsync.back_porch = vm.hback_porch / dpi->conf->pixels_per_iter; + hsync.front_porch = vm.hfront_porch / dpi->conf->pixels_per_iter; + + hsync.shift_half_line = false; + vsync_lodd.sync_width = vm.vsync_len; + vsync_lodd.back_porch = vm.vback_porch; + vsync_lodd.front_porch = vm.vfront_porch; + vsync_lodd.shift_half_line = false; + + if (vm.flags & DISPLAY_FLAGS_INTERLACED && + mode->flags & DRM_MODE_FLAG_3D_MASK) { + vsync_leven = vsync_lodd; + vsync_rodd = vsync_lodd; + vsync_reven = vsync_lodd; + vsync_leven.shift_half_line = true; + vsync_reven.shift_half_line = true; + } else if (vm.flags & DISPLAY_FLAGS_INTERLACED && + !(mode->flags & DRM_MODE_FLAG_3D_MASK)) { + vsync_leven = vsync_lodd; + vsync_leven.shift_half_line = true; + } else if (!(vm.flags & DISPLAY_FLAGS_INTERLACED) && + mode->flags & DRM_MODE_FLAG_3D_MASK) { + vsync_rodd = vsync_lodd; + } + mtk_dpi_sw_reset(dpi, true); + mtk_dpi_config_pol(dpi, &dpi_pol); + + mtk_dpi_config_hsync(dpi, &hsync); + mtk_dpi_config_vsync_lodd(dpi, &vsync_lodd); + mtk_dpi_config_vsync_rodd(dpi, &vsync_rodd); + mtk_dpi_config_vsync_leven(dpi, &vsync_leven); + mtk_dpi_config_vsync_reven(dpi, &vsync_reven); + + mtk_dpi_config_3d(dpi, !!(mode->flags & DRM_MODE_FLAG_3D_MASK)); + mtk_dpi_config_interface(dpi, !!(vm.flags & + DISPLAY_FLAGS_INTERLACED)); + if (vm.flags & DISPLAY_FLAGS_INTERLACED) + mtk_dpi_config_fb_size(dpi, vm.hactive, vm.vactive >> 1); + else + mtk_dpi_config_fb_size(dpi, vm.hactive, vm.vactive); + + mtk_dpi_config_channel_limit(dpi); + mtk_dpi_config_bit_num(dpi, dpi->bit_num); + mtk_dpi_config_channel_swap(dpi, dpi->channel_swap); + mtk_dpi_config_color_format(dpi, dpi->color_format); + if (dpi->conf->support_direct_pin) { + mtk_dpi_config_yc_map(dpi, dpi->yc_map); + mtk_dpi_config_2n_h_fre(dpi); + + /* DPI can connect to either an external bridge or the internal HDMI encoder */ + if (dpi->conf->output_1pixel) + mtk_dpi_mask(dpi, DPI_CON, DPI_OUTPUT_1T1P_EN, DPI_OUTPUT_1T1P_EN); + else + mtk_dpi_dual_edge(dpi); + + mtk_dpi_config_disable_edge(dpi); + } + if (dpi->conf->input_2p_en_bit) { + mtk_dpi_mask(dpi, DPI_CON, dpi->conf->input_2p_en_bit, + dpi->conf->input_2p_en_bit); + } + mtk_dpi_sw_reset(dpi, false); + + return 0; +} + +static u32 *mtk_dpi_bridge_atomic_get_output_bus_fmts(struct drm_bridge *bridge, + struct drm_bridge_state *bridge_state, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state, + unsigned int *num_output_fmts) +{ + struct mtk_dpi *dpi = bridge_to_dpi(bridge); + u32 *output_fmts; + + *num_output_fmts = 0; + + if (!dpi->conf->output_fmts) { + dev_err(dpi->dev, "output_fmts should not be null\n"); + return NULL; + } + + output_fmts = kcalloc(dpi->conf->num_output_fmts, sizeof(*output_fmts), + GFP_KERNEL); + if (!output_fmts) + return NULL; + + *num_output_fmts = dpi->conf->num_output_fmts; + + memcpy(output_fmts, dpi->conf->output_fmts, + sizeof(*output_fmts) * dpi->conf->num_output_fmts); + + return output_fmts; +} + +static u32 *mtk_dpi_bridge_atomic_get_input_bus_fmts(struct drm_bridge *bridge, + struct drm_bridge_state *bridge_state, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state, + u32 output_fmt, + unsigned int *num_input_fmts) +{ + u32 *input_fmts; + + *num_input_fmts = 0; + + input_fmts = kcalloc(1, sizeof(*input_fmts), + GFP_KERNEL); + if (!input_fmts) + return NULL; + + *num_input_fmts = 1; + input_fmts[0] = MEDIA_BUS_FMT_RGB888_1X24; + + return input_fmts; +} + +static unsigned int mtk_dpi_bus_fmt_bit_num(unsigned int out_bus_format) +{ + switch (out_bus_format) { + default: + case MEDIA_BUS_FMT_RGB888_1X24: + case MEDIA_BUS_FMT_BGR888_1X24: + case MEDIA_BUS_FMT_RGB888_2X12_LE: + case MEDIA_BUS_FMT_RGB888_2X12_BE: + case MEDIA_BUS_FMT_YUYV8_1X16: + case MEDIA_BUS_FMT_YUV8_1X24: + return MTK_DPI_OUT_BIT_NUM_8BITS; + case MEDIA_BUS_FMT_RGB101010_1X30: + case MEDIA_BUS_FMT_YUYV10_1X20: + case MEDIA_BUS_FMT_YUV10_1X30: + return MTK_DPI_OUT_BIT_NUM_10BITS; + case MEDIA_BUS_FMT_YUYV12_1X24: + return MTK_DPI_OUT_BIT_NUM_12BITS; + } +} + +static unsigned int mtk_dpi_bus_fmt_channel_swap(unsigned int out_bus_format) +{ + switch (out_bus_format) { + default: + case MEDIA_BUS_FMT_RGB888_1X24: + case MEDIA_BUS_FMT_RGB888_2X12_LE: + case MEDIA_BUS_FMT_RGB888_2X12_BE: + case MEDIA_BUS_FMT_RGB101010_1X30: + case MEDIA_BUS_FMT_YUYV8_1X16: + case MEDIA_BUS_FMT_YUYV10_1X20: + case MEDIA_BUS_FMT_YUYV12_1X24: + return MTK_DPI_OUT_CHANNEL_SWAP_RGB; + case MEDIA_BUS_FMT_BGR888_1X24: + case MEDIA_BUS_FMT_YUV8_1X24: + case MEDIA_BUS_FMT_YUV10_1X30: + return MTK_DPI_OUT_CHANNEL_SWAP_BGR; + } +} + +static unsigned int mtk_dpi_bus_fmt_color_format(unsigned int out_bus_format) +{ + switch (out_bus_format) { + default: + case MEDIA_BUS_FMT_RGB888_1X24: + case MEDIA_BUS_FMT_BGR888_1X24: + case MEDIA_BUS_FMT_RGB888_2X12_LE: + case MEDIA_BUS_FMT_RGB888_2X12_BE: + case MEDIA_BUS_FMT_RGB101010_1X30: + return MTK_DPI_COLOR_FORMAT_RGB; + case MEDIA_BUS_FMT_YUYV8_1X16: + case MEDIA_BUS_FMT_YUYV10_1X20: + case MEDIA_BUS_FMT_YUYV12_1X24: + return MTK_DPI_COLOR_FORMAT_YCBCR_422; + case MEDIA_BUS_FMT_YUV8_1X24: + case MEDIA_BUS_FMT_YUV10_1X30: + return MTK_DPI_COLOR_FORMAT_YCBCR_444; + } +} + +static int mtk_dpi_bridge_atomic_check(struct drm_bridge *bridge, + struct drm_bridge_state *bridge_state, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state) +{ + struct mtk_dpi *dpi = bridge_to_dpi(bridge); + unsigned int out_bus_format; + + out_bus_format = bridge_state->output_bus_cfg.format; + + if (out_bus_format == MEDIA_BUS_FMT_FIXED) + if (dpi->conf->num_output_fmts) + out_bus_format = dpi->conf->output_fmts[0]; + + dev_dbg(dpi->dev, "input format 0x%04x, output format 0x%04x\n", + bridge_state->input_bus_cfg.format, + bridge_state->output_bus_cfg.format); + + dpi->output_fmt = out_bus_format; + dpi->bit_num = mtk_dpi_bus_fmt_bit_num(out_bus_format); + dpi->channel_swap = mtk_dpi_bus_fmt_channel_swap(out_bus_format); + dpi->yc_map = MTK_DPI_OUT_YC_MAP_RGB; + dpi->color_format = mtk_dpi_bus_fmt_color_format(out_bus_format); + + return 0; +} + +static int mtk_dpi_bridge_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, + enum drm_bridge_attach_flags flags) +{ + struct mtk_dpi *dpi = bridge_to_dpi(bridge); + int ret; + + dpi->next_bridge = devm_drm_of_get_bridge(dpi->dev, dpi->dev->of_node, 1, -1); + if (IS_ERR(dpi->next_bridge)) { + ret = PTR_ERR(dpi->next_bridge); + if (ret == -EPROBE_DEFER) + return ret; + + /* Old devicetree has only one endpoint */ + dpi->next_bridge = devm_drm_of_get_bridge(dpi->dev, dpi->dev->of_node, 0, 0); + if (IS_ERR(dpi->next_bridge)) + return dev_err_probe(dpi->dev, PTR_ERR(dpi->next_bridge), + "Failed to get bridge\n"); + } + + return drm_bridge_attach(encoder, dpi->next_bridge, + &dpi->bridge, flags); +} + +static void mtk_dpi_bridge_mode_set(struct drm_bridge *bridge, + const struct drm_display_mode *mode, + const struct drm_display_mode *adjusted_mode) +{ + struct mtk_dpi *dpi = bridge_to_dpi(bridge); + + drm_mode_copy(&dpi->mode, adjusted_mode); +} + +static void mtk_dpi_bridge_disable(struct drm_bridge *bridge) +{ + struct mtk_dpi *dpi = bridge_to_dpi(bridge); + + mtk_dpi_power_off(dpi); + + if (dpi->pinctrl && dpi->pins_gpio) + pinctrl_select_state(dpi->pinctrl, dpi->pins_gpio); +} + +static void mtk_dpi_bridge_enable(struct drm_bridge *bridge) +{ + struct mtk_dpi *dpi = bridge_to_dpi(bridge); + + if (dpi->pinctrl && dpi->pins_dpi) + pinctrl_select_state(dpi->pinctrl, dpi->pins_dpi); + + mtk_dpi_power_on(dpi); + mtk_dpi_set_display_mode(dpi, &dpi->mode); + mtk_dpi_enable(dpi); +} + +static enum drm_mode_status +mtk_dpi_bridge_mode_valid(struct drm_bridge *bridge, + const struct drm_display_info *info, + const struct drm_display_mode *mode) +{ + struct mtk_dpi *dpi = bridge_to_dpi(bridge); + + if (mode->clock > dpi->conf->max_clock_khz) + return MODE_CLOCK_HIGH; + + return MODE_OK; +} + +static int mtk_dpi_debug_tp_show(struct seq_file *m, void *arg) +{ + struct mtk_dpi *dpi = m->private; + bool en; + u32 val; + + if (!dpi) + return -EINVAL; + + val = readl(dpi->regs + DPI_PATTERN0); + en = val & DPI_PAT_EN; + val = FIELD_GET(DPI_PAT_SEL, val); + + seq_printf(m, "DPI Test Pattern: %s\n", en ? "Enabled" : "Disabled"); + + if (en) { + seq_printf(m, "Internal pattern %d: ", val); + switch (val) { + case 0: + seq_puts(m, "256 Vertical Gray\n"); + break; + case 1: + seq_puts(m, "1024 Vertical Gray\n"); + break; + case 2: + seq_puts(m, "256 Horizontal Gray\n"); + break; + case 3: + seq_puts(m, "1024 Horizontal Gray\n"); + break; + case 4: + seq_puts(m, "Vertical Color bars\n"); + break; + case 6: + seq_puts(m, "Frame border\n"); + break; + case 7: + seq_puts(m, "Dot moire\n"); + break; + default: + seq_puts(m, "Invalid selection\n"); + break; + } + } + + return 0; +} + +static ssize_t mtk_dpi_debug_tp_write(struct file *file, const char __user *ubuf, + size_t len, loff_t *offp) +{ + struct seq_file *m = file->private_data; + u32 en, type; + char buf[6]; + + if (!m || !m->private || *offp || len > sizeof(buf) - 1) + return -EINVAL; + + memset(buf, 0, sizeof(buf)); + if (copy_from_user(buf, ubuf, len)) + return -EFAULT; + + if (sscanf(buf, "%u %u", &en, &type) != 2) + return -EINVAL; + + if (en < 0 || en > 1 || type < 0 || type > 7) + return -EINVAL; + + mtk_dpi_test_pattern_en((struct mtk_dpi *)m->private, type, en); + return len; +} + +static int mtk_dpi_debug_tp_open(struct inode *inode, struct file *file) +{ + return single_open(file, mtk_dpi_debug_tp_show, inode->i_private); +} + +static const struct file_operations mtk_dpi_debug_tp_fops = { + .owner = THIS_MODULE, + .open = mtk_dpi_debug_tp_open, + .read = seq_read, + .write = mtk_dpi_debug_tp_write, + .llseek = seq_lseek, + .release = single_release, +}; + +static void mtk_dpi_debugfs_init(struct drm_bridge *bridge, struct dentry *root) +{ + struct mtk_dpi *dpi = bridge_to_dpi(bridge); + + debugfs_create_file("dpi_test_pattern", 0640, root, dpi, &mtk_dpi_debug_tp_fops); +} + +static const struct drm_bridge_funcs mtk_dpi_bridge_funcs = { + .attach = mtk_dpi_bridge_attach, + .mode_set = mtk_dpi_bridge_mode_set, + .mode_valid = mtk_dpi_bridge_mode_valid, + .disable = mtk_dpi_bridge_disable, + .enable = mtk_dpi_bridge_enable, + .atomic_check = mtk_dpi_bridge_atomic_check, + .atomic_get_output_bus_fmts = mtk_dpi_bridge_atomic_get_output_bus_fmts, + .atomic_get_input_bus_fmts = mtk_dpi_bridge_atomic_get_input_bus_fmts, + .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state, + .atomic_reset = drm_atomic_helper_bridge_reset, + .debugfs_init = mtk_dpi_debugfs_init, +}; + +void mtk_dpi_start(struct device *dev) +{ + struct mtk_dpi *dpi = dev_get_drvdata(dev); + + if (!dpi->conf->clocked_by_hdmi) + mtk_dpi_power_on(dpi); +} + +void mtk_dpi_stop(struct device *dev) +{ + struct mtk_dpi *dpi = dev_get_drvdata(dev); + + if (!dpi->conf->clocked_by_hdmi) + mtk_dpi_power_off(dpi); +} + +unsigned int mtk_dpi_encoder_index(struct device *dev) +{ + struct mtk_dpi *dpi = dev_get_drvdata(dev); + unsigned int encoder_index = drm_encoder_index(&dpi->encoder); + + dev_dbg(dev, "encoder index:%d\n", encoder_index); + return encoder_index; +} + +static int mtk_dpi_bind(struct device *dev, struct device *master, void *data) +{ + struct mtk_dpi *dpi = dev_get_drvdata(dev); + struct drm_device *drm_dev = data; + struct mtk_drm_private *priv = drm_dev->dev_private; + int ret; + + dpi->mmsys_dev = priv->mmsys_dev; + ret = drm_simple_encoder_init(drm_dev, &dpi->encoder, + DRM_MODE_ENCODER_TMDS); + if (ret) { + dev_err(dev, "Failed to initialize decoder: %d\n", ret); + return ret; + } + + ret = mtk_find_possible_crtcs(drm_dev, dpi->dev); + if (ret < 0) + goto err_cleanup; + dpi->encoder.possible_crtcs = ret; + + ret = drm_bridge_attach(&dpi->encoder, &dpi->bridge, NULL, + DRM_BRIDGE_ATTACH_NO_CONNECTOR); + if (ret) + goto err_cleanup; + + dpi->connector = drm_bridge_connector_init(drm_dev, &dpi->encoder); + if (IS_ERR(dpi->connector)) { + dev_err(dev, "Unable to create bridge connector\n"); + ret = PTR_ERR(dpi->connector); + goto err_cleanup; + } + drm_connector_attach_encoder(dpi->connector, &dpi->encoder); + + return 0; + +err_cleanup: + drm_encoder_cleanup(&dpi->encoder); + return ret; +} + +static void mtk_dpi_unbind(struct device *dev, struct device *master, + void *data) +{ + struct mtk_dpi *dpi = dev_get_drvdata(dev); + + drm_encoder_cleanup(&dpi->encoder); +} + +static const struct component_ops mtk_dpi_component_ops = { + .bind = mtk_dpi_bind, + .unbind = mtk_dpi_unbind, +}; + +static const u32 mt8173_output_fmts[] = { + MEDIA_BUS_FMT_RGB888_1X24, +}; + +static const u32 mt8183_output_fmts[] = { + MEDIA_BUS_FMT_RGB888_2X12_LE, + MEDIA_BUS_FMT_RGB888_2X12_BE, +}; + +static const u32 mt8195_dpi_output_fmts[] = { + MEDIA_BUS_FMT_RGB888_1X24, + MEDIA_BUS_FMT_RGB888_2X12_LE, + MEDIA_BUS_FMT_RGB888_2X12_BE, + MEDIA_BUS_FMT_RGB101010_1X30, + MEDIA_BUS_FMT_YUYV8_1X16, + MEDIA_BUS_FMT_YUYV10_1X20, + MEDIA_BUS_FMT_YUYV12_1X24, + MEDIA_BUS_FMT_BGR888_1X24, + MEDIA_BUS_FMT_YUV8_1X24, + MEDIA_BUS_FMT_YUV10_1X30, +}; + +static const u32 mt8195_dp_intf_output_fmts[] = { + MEDIA_BUS_FMT_RGB888_1X24, + MEDIA_BUS_FMT_RGB888_2X12_LE, + MEDIA_BUS_FMT_RGB888_2X12_BE, + MEDIA_BUS_FMT_RGB101010_1X30, + MEDIA_BUS_FMT_YUYV8_1X16, + MEDIA_BUS_FMT_YUYV10_1X20, + MEDIA_BUS_FMT_BGR888_1X24, + MEDIA_BUS_FMT_YUV8_1X24, + MEDIA_BUS_FMT_YUV10_1X30, +}; + +static const struct mtk_dpi_factor dpi_factor_mt2701[] = { + { 64000, 4 }, { 128000, 2 }, { U32_MAX, 1 } +}; + +static const struct mtk_dpi_factor dpi_factor_mt8173[] = { + { 27000, 48 }, { 84000, 24 }, { 167000, 12 }, { U32_MAX, 6 } +}; + +static const struct mtk_dpi_factor dpi_factor_mt8183[] = { + { 27000, 8 }, { 167000, 4 }, { U32_MAX, 2 } +}; + +static const struct mtk_dpi_factor dpi_factor_mt8195_dp_intf[] = { + { 70000 - 1, 4 }, { 200000 - 1, 2 }, { U32_MAX, 1 } +}; + +static const struct mtk_dpi_conf mt8173_conf = { + .dpi_factor = dpi_factor_mt8173, + .num_dpi_factor = ARRAY_SIZE(dpi_factor_mt8173), + .reg_h_fre_con = 0xe0, + .max_clock_khz = 300000, + .output_fmts = mt8173_output_fmts, + .num_output_fmts = ARRAY_SIZE(mt8173_output_fmts), + .pixels_per_iter = 1, + .is_ck_de_pol = true, + .swap_input_support = true, + .support_direct_pin = true, + .dimension_mask = HPW_MASK, + .hvsize_mask = HSIZE_MASK, + .channel_swap_shift = CH_SWAP, + .yuv422_en_bit = YUV422_EN, + .csc_enable_bit = CSC_ENABLE, +}; + +static const struct mtk_dpi_conf mt2701_conf = { + .dpi_factor = dpi_factor_mt2701, + .num_dpi_factor = ARRAY_SIZE(dpi_factor_mt2701), + .reg_h_fre_con = 0xb0, + .edge_sel_en = true, + .max_clock_khz = 150000, + .output_fmts = mt8173_output_fmts, + .num_output_fmts = ARRAY_SIZE(mt8173_output_fmts), + .pixels_per_iter = 1, + .is_ck_de_pol = true, + .swap_input_support = true, + .support_direct_pin = true, + .dimension_mask = HPW_MASK, + .hvsize_mask = HSIZE_MASK, + .channel_swap_shift = CH_SWAP, + .yuv422_en_bit = YUV422_EN, + .csc_enable_bit = CSC_ENABLE, +}; + +static const struct mtk_dpi_conf mt8183_conf = { + .dpi_factor = dpi_factor_mt8183, + .num_dpi_factor = ARRAY_SIZE(dpi_factor_mt8183), + .reg_h_fre_con = 0xe0, + .max_clock_khz = 100000, + .output_fmts = mt8183_output_fmts, + .num_output_fmts = ARRAY_SIZE(mt8183_output_fmts), + .pixels_per_iter = 1, + .is_ck_de_pol = true, + .swap_input_support = true, + .support_direct_pin = true, + .dimension_mask = HPW_MASK, + .hvsize_mask = HSIZE_MASK, + .channel_swap_shift = CH_SWAP, + .yuv422_en_bit = YUV422_EN, + .csc_enable_bit = CSC_ENABLE, +}; + +static const struct mtk_dpi_conf mt8186_conf = { + .dpi_factor = dpi_factor_mt8183, + .num_dpi_factor = ARRAY_SIZE(dpi_factor_mt8183), + .reg_h_fre_con = 0xe0, + .max_clock_khz = 150000, + .output_fmts = mt8183_output_fmts, + .num_output_fmts = ARRAY_SIZE(mt8183_output_fmts), + .edge_cfg_in_mmsys = true, + .pixels_per_iter = 1, + .is_ck_de_pol = true, + .swap_input_support = true, + .support_direct_pin = true, + .dimension_mask = HPW_MASK, + .hvsize_mask = HSIZE_MASK, + .channel_swap_shift = CH_SWAP, + .yuv422_en_bit = YUV422_EN, + .csc_enable_bit = CSC_ENABLE, +}; + +static const struct mtk_dpi_conf mt8192_conf = { + .dpi_factor = dpi_factor_mt8183, + .num_dpi_factor = ARRAY_SIZE(dpi_factor_mt8183), + .reg_h_fre_con = 0xe0, + .max_clock_khz = 150000, + .output_fmts = mt8183_output_fmts, + .num_output_fmts = ARRAY_SIZE(mt8183_output_fmts), + .pixels_per_iter = 1, + .is_ck_de_pol = true, + .swap_input_support = true, + .support_direct_pin = true, + .dimension_mask = HPW_MASK, + .hvsize_mask = HSIZE_MASK, + .channel_swap_shift = CH_SWAP, + .yuv422_en_bit = YUV422_EN, + .csc_enable_bit = CSC_ENABLE, +}; + +static const struct mtk_dpi_conf mt8195_conf = { + .max_clock_khz = 594000, + .output_fmts = mt8195_dpi_output_fmts, + .num_output_fmts = ARRAY_SIZE(mt8195_dpi_output_fmts), + .pixels_per_iter = 1, + .is_ck_de_pol = true, + .swap_input_support = true, + .support_direct_pin = true, + .dimension_mask = HPW_MASK, + .hvsize_mask = HSIZE_MASK, + .channel_swap_shift = CH_SWAP, + .yuv422_en_bit = YUV422_EN, + .csc_enable_bit = CSC_ENABLE, + .input_2p_en_bit = DPI_INPUT_2P_EN, + .clocked_by_hdmi = true, + .output_1pixel = true, +}; + +static const struct mtk_dpi_conf mt8195_dpintf_conf = { + .dpi_factor = dpi_factor_mt8195_dp_intf, + .num_dpi_factor = ARRAY_SIZE(dpi_factor_mt8195_dp_intf), + .max_clock_khz = 600000, + .output_fmts = mt8195_dp_intf_output_fmts, + .num_output_fmts = ARRAY_SIZE(mt8195_dp_intf_output_fmts), + .pixels_per_iter = 4, + .dimension_mask = DPINTF_HPW_MASK, + .hvsize_mask = DPINTF_HSIZE_MASK, + .channel_swap_shift = DPINTF_CH_SWAP, + .yuv422_en_bit = DPINTF_YUV422_EN, + .csc_enable_bit = DPINTF_CSC_ENABLE, + .input_2p_en_bit = DPINTF_INPUT_2P_EN, +}; + +static int mtk_dpi_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct mtk_dpi *dpi; + int ret; + + dpi = devm_drm_bridge_alloc(dev, struct mtk_dpi, bridge, + &mtk_dpi_bridge_funcs); + if (IS_ERR(dpi)) + return PTR_ERR(dpi); + + dpi->dev = dev; + dpi->conf = (struct mtk_dpi_conf *)of_device_get_match_data(dev); + dpi->output_fmt = MEDIA_BUS_FMT_RGB888_1X24; + + dpi->pinctrl = devm_pinctrl_get(&pdev->dev); + if (IS_ERR(dpi->pinctrl)) { + dpi->pinctrl = NULL; + dev_dbg(&pdev->dev, "Cannot find pinctrl!\n"); + } + if (dpi->pinctrl) { + dpi->pins_gpio = pinctrl_lookup_state(dpi->pinctrl, "sleep"); + if (IS_ERR(dpi->pins_gpio)) { + dpi->pins_gpio = NULL; + dev_dbg(&pdev->dev, "Cannot find pinctrl idle!\n"); + } + if (dpi->pins_gpio) + pinctrl_select_state(dpi->pinctrl, dpi->pins_gpio); + + dpi->pins_dpi = pinctrl_lookup_state(dpi->pinctrl, "default"); + if (IS_ERR(dpi->pins_dpi)) { + dpi->pins_dpi = NULL; + dev_dbg(&pdev->dev, "Cannot find pinctrl active!\n"); + } + } + dpi->regs = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(dpi->regs)) + return dev_err_probe(dev, PTR_ERR(dpi->regs), + "Failed to ioremap mem resource\n"); + + dpi->engine_clk = devm_clk_get(dev, "engine"); + if (IS_ERR(dpi->engine_clk)) + return dev_err_probe(dev, PTR_ERR(dpi->engine_clk), + "Failed to get engine clock\n"); + + dpi->pixel_clk = devm_clk_get(dev, "pixel"); + if (IS_ERR(dpi->pixel_clk)) + return dev_err_probe(dev, PTR_ERR(dpi->pixel_clk), + "Failed to get pixel clock\n"); + + dpi->tvd_clk = devm_clk_get(dev, "pll"); + if (IS_ERR(dpi->tvd_clk)) + return dev_err_probe(dev, PTR_ERR(dpi->tvd_clk), + "Failed to get tvdpll clock\n"); + + dpi->irq = platform_get_irq(pdev, 0); + if (dpi->irq < 0) + return dpi->irq; + + platform_set_drvdata(pdev, dpi); + + dpi->bridge.of_node = dev->of_node; + dpi->bridge.type = DRM_MODE_CONNECTOR_DPI; + + ret = devm_drm_bridge_add(dev, &dpi->bridge); + if (ret) + return ret; + + ret = component_add(dev, &mtk_dpi_component_ops); + if (ret) + return dev_err_probe(dev, ret, "Failed to add component.\n"); + + return 0; +} + +static void mtk_dpi_remove(struct platform_device *pdev) +{ + component_del(&pdev->dev, &mtk_dpi_component_ops); +} + +static const struct of_device_id mtk_dpi_of_ids[] = { + { .compatible = "mediatek,mt2701-dpi", .data = &mt2701_conf }, + { .compatible = "mediatek,mt8173-dpi", .data = &mt8173_conf }, + { .compatible = "mediatek,mt8183-dpi", .data = &mt8183_conf }, + { .compatible = "mediatek,mt8186-dpi", .data = &mt8186_conf }, + { .compatible = "mediatek,mt8188-dp-intf", .data = &mt8195_dpintf_conf }, + { .compatible = "mediatek,mt8192-dpi", .data = &mt8192_conf }, + { .compatible = "mediatek,mt8195-dp-intf", .data = &mt8195_dpintf_conf }, + { .compatible = "mediatek,mt8195-dpi", .data = &mt8195_conf }, + { /* sentinel */ }, +}; +MODULE_DEVICE_TABLE(of, mtk_dpi_of_ids); + +struct platform_driver mtk_dpi_driver = { + .probe = mtk_dpi_probe, + .remove = mtk_dpi_remove, + .driver = { + .name = "mediatek-dpi", + .of_match_table = mtk_dpi_of_ids, + }, +}; diff --git a/drivers/gpu/drm/mediatek/mtk_dpi_regs.h b/drivers/gpu/drm/mediatek/mtk_dpi_regs.h new file mode 100644 index 000000000000..23eeefce8fd2 --- /dev/null +++ b/drivers/gpu/drm/mediatek/mtk_dpi_regs.h @@ -0,0 +1,247 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2014 MediaTek Inc. + * Author: Jie Qiu <jie.qiu@mediatek.com> + */ +#ifndef __MTK_DPI_REGS_H +#define __MTK_DPI_REGS_H + +#define DPI_EN 0x00 +#define EN BIT(0) + +#define DPI_RET 0x04 +#define RST BIT(0) + +#define DPI_INTEN 0x08 +#define INT_VSYNC_EN BIT(0) +#define INT_VDE_EN BIT(1) +#define INT_UNDERFLOW_EN BIT(2) + +#define DPI_INTSTA 0x0C +#define INT_VSYNC_STA BIT(0) +#define INT_VDE_STA BIT(1) +#define INT_UNDERFLOW_STA BIT(2) + +#define DPI_CON 0x10 +#define BG_ENABLE BIT(0) +#define IN_RB_SWAP BIT(1) +#define INTL_EN BIT(2) +#define TDFP_EN BIT(3) +#define CLPF_EN BIT(4) +#define YUV422_EN BIT(5) +#define CSC_ENABLE BIT(6) +#define R601_SEL BIT(7) +#define EMBSYNC_EN BIT(8) +#define VS_LODD_EN BIT(16) +#define VS_LEVEN_EN BIT(17) +#define VS_RODD_EN BIT(18) +#define VS_REVEN BIT(19) +#define FAKE_DE_LODD BIT(20) +#define FAKE_DE_LEVEN BIT(21) +#define FAKE_DE_RODD BIT(22) +#define FAKE_DE_REVEN BIT(23) + +/* DPI_CON: DPI instances */ +#define DPI_OUTPUT_1T1P_EN BIT(24) +#define DPI_INPUT_2P_EN BIT(25) +/* DPI_CON: DPINTF instances */ +#define DPINTF_YUV422_EN BIT(24) +#define DPINTF_CSC_ENABLE BIT(26) +#define DPINTF_INPUT_2P_EN BIT(29) + +#define DPI_OUTPUT_SETTING 0x14 +#define CH_SWAP 0 +#define DPINTF_CH_SWAP 1 +#define CH_SWAP_MASK (0x7 << 0) +#define SWAP_RGB 0x00 +#define SWAP_GBR 0x01 +#define SWAP_BRG 0x02 +#define SWAP_RBG 0x03 +#define SWAP_GRB 0x04 +#define SWAP_BGR 0x05 +#define BIT_SWAP BIT(3) +#define B_MASK BIT(4) +#define G_MASK BIT(5) +#define R_MASK BIT(6) +#define DE_MASK BIT(8) +#define HS_MASK BIT(9) +#define VS_MASK BIT(10) +#define DE_POL BIT(12) +#define HSYNC_POL BIT(13) +#define VSYNC_POL BIT(14) +#define CK_POL BIT(15) +#define OEN_OFF BIT(16) +#define EDGE_SEL BIT(17) +#define OUT_BIT 18 +#define OUT_BIT_MASK (0x3 << 18) +#define OUT_BIT_8 0x00 +#define OUT_BIT_10 0x01 +#define OUT_BIT_12 0x02 +#define OUT_BIT_16 0x03 +#define YC_MAP 20 +#define YC_MAP_MASK (0x7 << 20) +#define YC_MAP_RGB 0x00 +#define YC_MAP_CYCY 0x04 +#define YC_MAP_YCYC 0x05 +#define YC_MAP_CY 0x06 +#define YC_MAP_YC 0x07 + +#define DPI_SIZE 0x18 +#define HSIZE 0 +#define HSIZE_MASK (0x1FFF << 0) +#define DPINTF_HSIZE_MASK (0xFFFF << 0) +#define VSIZE 16 +#define VSIZE_MASK (0x1FFF << 16) +#define DPINTF_VSIZE_MASK (0xFFFF << 16) + +#define DPI_DDR_SETTING 0x1C +#define DDR_EN BIT(0) +#define DDDR_SEL BIT(1) +#define DDR_4PHASE BIT(2) +#define DDR_WIDTH (0x3 << 4) +#define DDR_PAD_MODE (0x1 << 8) + +#define DPI_TGEN_HWIDTH 0x20 +#define HPW 0 +#define HPW_MASK (0xFFF << 0) +#define DPINTF_HPW_MASK (0xFFFF << 0) + +#define DPI_TGEN_HPORCH 0x24 +#define HBP 0 +#define HBP_MASK (0xFFF << 0) +#define DPINTF_HBP_MASK (0xFFFF << 0) +#define HFP 16 +#define HFP_MASK (0xFFF << 16) +#define DPINTF_HFP_MASK (0xFFFF << 16) + +#define DPI_TGEN_VWIDTH 0x28 +#define DPI_TGEN_VPORCH 0x2C + +#define VSYNC_WIDTH_SHIFT 0 +#define VSYNC_WIDTH_MASK (0xFFF << 0) +#define DPINTF_VSYNC_WIDTH_MASK (0xFFFF << 0) +#define VSYNC_HALF_LINE_SHIFT 16 +#define VSYNC_HALF_LINE_MASK BIT(16) +#define VSYNC_BACK_PORCH_SHIFT 0 +#define VSYNC_BACK_PORCH_MASK (0xFFF << 0) +#define DPINTF_VSYNC_BACK_PORCH_MASK (0xFFFF << 0) +#define VSYNC_FRONT_PORCH_SHIFT 16 +#define VSYNC_FRONT_PORCH_MASK (0xFFF << 16) +#define DPINTF_VSYNC_FRONT_PORCH_MASK (0xFFFF << 16) + +#define DPI_BG_HCNTL 0x30 +#define BG_RIGHT (0x1FFF << 0) +#define BG_LEFT (0x1FFF << 16) + +#define DPI_BG_VCNTL 0x34 +#define BG_BOT (0x1FFF << 0) +#define BG_TOP (0x1FFF << 16) + +#define DPI_BG_COLOR 0x38 +#define BG_B (0xF << 0) +#define BG_G (0xF << 8) +#define BG_R (0xF << 16) + +#define DPI_FIFO_CTL 0x3C +#define FIFO_VALID_SET (0x1F << 0) +#define FIFO_RST_SEL (0x1 << 8) + +#define DPI_STATUS 0x40 +#define VCOUNTER (0x1FFF << 0) +#define DPI_BUSY BIT(16) +#define OUTEN BIT(17) +#define FIELD BIT(20) +#define TDLR BIT(21) + +#define DPI_TMODE 0x44 +#define DPI_OEN_ON BIT(0) + +#define DPI_CHECKSUM 0x48 +#define DPI_CHECKSUM_MASK (0xFFFFFF << 0) +#define DPI_CHECKSUM_READY BIT(30) +#define DPI_CHECKSUM_EN BIT(31) + +#define DPI_DUMMY 0x50 +#define DPI_DUMMY_MASK (0xFFFFFFFF << 0) + +#define DPI_TGEN_VWIDTH_LEVEN 0x68 +#define DPI_TGEN_VPORCH_LEVEN 0x6C +#define DPI_TGEN_VWIDTH_RODD 0x70 +#define DPI_TGEN_VPORCH_RODD 0x74 +#define DPI_TGEN_VWIDTH_REVEN 0x78 +#define DPI_TGEN_VPORCH_REVEN 0x7C + +#define DPI_ESAV_VTIMING_LODD 0x80 +#define ESAV_VOFST_LODD (0xFFF << 0) +#define ESAV_VWID_LODD (0xFFF << 16) + +#define DPI_ESAV_VTIMING_LEVEN 0x84 +#define ESAV_VOFST_LEVEN (0xFFF << 0) +#define ESAV_VWID_LEVEN (0xFFF << 16) + +#define DPI_ESAV_VTIMING_RODD 0x88 +#define ESAV_VOFST_RODD (0xFFF << 0) +#define ESAV_VWID_RODD (0xFFF << 16) + +#define DPI_ESAV_VTIMING_REVEN 0x8C +#define ESAV_VOFST_REVEN (0xFFF << 0) +#define ESAV_VWID_REVEN (0xFFF << 16) + +#define DPI_ESAV_FTIMING 0x90 +#define ESAV_FOFST_ODD (0xFFF << 0) +#define ESAV_FOFST_EVEN (0xFFF << 16) + +#define DPI_CLPF_SETTING 0x94 +#define CLPF_TYPE (0x3 << 0) +#define ROUND_EN BIT(4) + +#define DPI_Y_LIMIT 0x98 +#define Y_LIMINT_BOT 0 +#define Y_LIMINT_BOT_MASK (0xFFF << 0) +#define Y_LIMINT_TOP 16 +#define Y_LIMINT_TOP_MASK (0xFFF << 16) + +#define DPI_C_LIMIT 0x9C +#define C_LIMIT_BOT 0 +#define C_LIMIT_BOT_MASK (0xFFF << 0) +#define C_LIMIT_TOP 16 +#define C_LIMIT_TOP_MASK (0xFFF << 16) + +#define DPI_YUV422_SETTING 0xA0 +#define UV_SWAP BIT(0) +#define CR_DELSEL BIT(4) +#define CB_DELSEL BIT(5) +#define Y_DELSEL BIT(6) +#define DE_DELSEL BIT(7) + +#define DPI_EMBSYNC_SETTING 0xA4 +#define EMBSYNC_R_CR_EN BIT(0) +#define EMPSYNC_G_Y_EN BIT(1) +#define EMPSYNC_B_CB_EN BIT(2) +#define ESAV_F_INV BIT(4) +#define ESAV_V_INV BIT(5) +#define ESAV_H_INV BIT(6) +#define ESAV_CODE_MAN BIT(8) +#define VS_OUT_SEL (0x7 << 12) + +#define DPI_ESAV_CODE_SET0 0xA8 +#define ESAV_CODE0 (0xFFF << 0) +#define ESAV_CODE1 (0xFFF << 16) + +#define DPI_ESAV_CODE_SET1 0xAC +#define ESAV_CODE2 (0xFFF << 0) +#define ESAV_CODE3_MSB BIT(16) + +#define EDGE_SEL_EN BIT(5) +#define H_FRE_2N BIT(25) + +#define DPI_MATRIX_SET 0xB4 +#define INT_MATRIX_SEL_MASK GENMASK(4, 0) +#define MATRIX_SEL_RGB_TO_JPEG 0 +#define MATRIX_SEL_RGB_TO_BT601 2 + +#define DPI_PATTERN0 0xf00 +#define DPI_PAT_EN BIT(0) +#define DPI_PAT_SEL GENMASK(6, 4) + +#endif /* __MTK_DPI_REGS_H */ diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c new file mode 100644 index 000000000000..a94c51a83261 --- /dev/null +++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c @@ -0,0 +1,1315 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2015 MediaTek Inc. + * Author: YT SHEN <yt.shen@mediatek.com> + */ + +#include <linux/component.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_platform.h> +#include <linux/platform_device.h> +#include <linux/pm_runtime.h> +#include <linux/dma-mapping.h> + +#include <drm/clients/drm_client_setup.h> +#include <drm/drm_atomic.h> +#include <drm/drm_atomic_helper.h> +#include <drm/drm_drv.h> +#include <drm/drm_fbdev_dma.h> +#include <drm/drm_fourcc.h> +#include <drm/drm_gem.h> +#include <drm/drm_gem_framebuffer_helper.h> +#include <drm/drm_ioctl.h> +#include <drm/drm_of.h> +#include <drm/drm_probe_helper.h> +#include <drm/drm_vblank.h> + +#include "mtk_crtc.h" +#include "mtk_ddp_comp.h" +#include "mtk_disp_drv.h" +#include "mtk_drm_drv.h" +#include "mtk_gem.h" + +#define DRIVER_NAME "mediatek" +#define DRIVER_DESC "Mediatek SoC DRM" +#define DRIVER_MAJOR 1 +#define DRIVER_MINOR 0 + +static const struct drm_mode_config_helper_funcs mtk_drm_mode_config_helpers = { + .atomic_commit_tail = drm_atomic_helper_commit_tail_rpm, +}; + +static struct drm_framebuffer * +mtk_drm_mode_fb_create(struct drm_device *dev, + struct drm_file *file, + const struct drm_format_info *info, + const struct drm_mode_fb_cmd2 *cmd) +{ + if (info->num_planes != 1) + return ERR_PTR(-EINVAL); + + return drm_gem_fb_create(dev, file, info, cmd); +} + +static const struct drm_mode_config_funcs mtk_drm_mode_config_funcs = { + .fb_create = mtk_drm_mode_fb_create, + .atomic_check = drm_atomic_helper_check, + .atomic_commit = drm_atomic_helper_commit, +}; + +static const unsigned int mt2701_mtk_ddp_main[] = { + DDP_COMPONENT_OVL0, + DDP_COMPONENT_RDMA0, + DDP_COMPONENT_COLOR0, + DDP_COMPONENT_BLS, + DDP_COMPONENT_DSI0, +}; + +static const unsigned int mt2701_mtk_ddp_ext[] = { + DDP_COMPONENT_RDMA1, + DDP_COMPONENT_DPI0, +}; + +static const unsigned int mt7623_mtk_ddp_main[] = { + DDP_COMPONENT_OVL0, + DDP_COMPONENT_RDMA0, + DDP_COMPONENT_COLOR0, + DDP_COMPONENT_BLS, + DDP_COMPONENT_DPI0, +}; + +static const unsigned int mt7623_mtk_ddp_ext[] = { + DDP_COMPONENT_RDMA1, + DDP_COMPONENT_DSI0, +}; + +static const unsigned int mt2712_mtk_ddp_main[] = { + DDP_COMPONENT_OVL0, + DDP_COMPONENT_COLOR0, + DDP_COMPONENT_AAL0, + DDP_COMPONENT_OD0, + DDP_COMPONENT_RDMA0, + DDP_COMPONENT_DPI0, + DDP_COMPONENT_PWM0, +}; + +static const unsigned int mt2712_mtk_ddp_ext[] = { + DDP_COMPONENT_OVL1, + DDP_COMPONENT_COLOR1, + DDP_COMPONENT_AAL1, + DDP_COMPONENT_OD1, + DDP_COMPONENT_RDMA1, + DDP_COMPONENT_DPI1, + DDP_COMPONENT_PWM1, +}; + +static const unsigned int mt2712_mtk_ddp_third[] = { + DDP_COMPONENT_RDMA2, + DDP_COMPONENT_DSI3, + DDP_COMPONENT_PWM2, +}; + +static unsigned int mt8167_mtk_ddp_main[] = { + DDP_COMPONENT_OVL0, + DDP_COMPONENT_COLOR0, + DDP_COMPONENT_CCORR, + DDP_COMPONENT_AAL0, + DDP_COMPONENT_GAMMA, + DDP_COMPONENT_DITHER0, + DDP_COMPONENT_RDMA0, + DDP_COMPONENT_DSI0, +}; + +static const unsigned int mt8173_mtk_ddp_main[] = { + DDP_COMPONENT_OVL0, + DDP_COMPONENT_COLOR0, + DDP_COMPONENT_AAL0, + DDP_COMPONENT_OD0, + DDP_COMPONENT_RDMA0, + DDP_COMPONENT_UFOE, + DDP_COMPONENT_DSI0, + DDP_COMPONENT_PWM0, +}; + +static const unsigned int mt8173_mtk_ddp_ext[] = { + DDP_COMPONENT_OVL1, + DDP_COMPONENT_COLOR1, + DDP_COMPONENT_GAMMA, + DDP_COMPONENT_RDMA1, + DDP_COMPONENT_DPI0, +}; + +static const unsigned int mt8183_mtk_ddp_main[] = { + DDP_COMPONENT_OVL0, + DDP_COMPONENT_OVL_2L0, + DDP_COMPONENT_RDMA0, + DDP_COMPONENT_COLOR0, + DDP_COMPONENT_CCORR, + DDP_COMPONENT_AAL0, + DDP_COMPONENT_GAMMA, + DDP_COMPONENT_DITHER0, + DDP_COMPONENT_DSI0, +}; + +static const unsigned int mt8183_mtk_ddp_ext[] = { + DDP_COMPONENT_OVL_2L1, + DDP_COMPONENT_RDMA1, + DDP_COMPONENT_DPI0, +}; + +static const unsigned int mt8186_mtk_ddp_main[] = { + DDP_COMPONENT_OVL0, + DDP_COMPONENT_RDMA0, + DDP_COMPONENT_COLOR0, + DDP_COMPONENT_CCORR, + DDP_COMPONENT_AAL0, + DDP_COMPONENT_GAMMA, + DDP_COMPONENT_POSTMASK0, + DDP_COMPONENT_DITHER0, + DDP_COMPONENT_DSI0, +}; + +static const unsigned int mt8186_mtk_ddp_ext[] = { + DDP_COMPONENT_OVL_2L0, + DDP_COMPONENT_RDMA1, + DDP_COMPONENT_DPI0, +}; + +static const unsigned int mt8188_mtk_ddp_main[] = { + DDP_COMPONENT_OVL0, + DDP_COMPONENT_RDMA0, + DDP_COMPONENT_COLOR0, + DDP_COMPONENT_CCORR, + DDP_COMPONENT_AAL0, + DDP_COMPONENT_GAMMA, + DDP_COMPONENT_POSTMASK0, + DDP_COMPONENT_DITHER0, +}; + +static const struct mtk_drm_route mt8188_mtk_ddp_main_routes[] = { + {0, DDP_COMPONENT_DP_INTF0}, + {0, DDP_COMPONENT_DSI0}, +}; + +static const unsigned int mt8192_mtk_ddp_main[] = { + DDP_COMPONENT_OVL0, + DDP_COMPONENT_OVL_2L0, + DDP_COMPONENT_RDMA0, + DDP_COMPONENT_COLOR0, + DDP_COMPONENT_CCORR, + DDP_COMPONENT_AAL0, + DDP_COMPONENT_GAMMA, + DDP_COMPONENT_POSTMASK0, + DDP_COMPONENT_DITHER0, + DDP_COMPONENT_DSI0, +}; + +static const unsigned int mt8192_mtk_ddp_ext[] = { + DDP_COMPONENT_OVL_2L2, + DDP_COMPONENT_RDMA4, + DDP_COMPONENT_DPI0, +}; + +static const unsigned int mt8195_mtk_ddp_main[] = { + DDP_COMPONENT_OVL0, + DDP_COMPONENT_RDMA0, + DDP_COMPONENT_COLOR0, + DDP_COMPONENT_CCORR, + DDP_COMPONENT_AAL0, + DDP_COMPONENT_GAMMA, + DDP_COMPONENT_DITHER0, + DDP_COMPONENT_DSC0, + DDP_COMPONENT_MERGE0, + DDP_COMPONENT_DP_INTF0, +}; + +static const unsigned int mt8195_mtk_ddp_ext[] = { + DDP_COMPONENT_DRM_OVL_ADAPTOR, + DDP_COMPONENT_MERGE5, + DDP_COMPONENT_DP_INTF1, +}; + +static const struct mtk_mmsys_driver_data mt2701_mmsys_driver_data = { + .main_path = mt2701_mtk_ddp_main, + .main_len = ARRAY_SIZE(mt2701_mtk_ddp_main), + .ext_path = mt2701_mtk_ddp_ext, + .ext_len = ARRAY_SIZE(mt2701_mtk_ddp_ext), + .shadow_register = true, + .mmsys_dev_num = 1, +}; + +static const struct mtk_mmsys_driver_data mt7623_mmsys_driver_data = { + .main_path = mt7623_mtk_ddp_main, + .main_len = ARRAY_SIZE(mt7623_mtk_ddp_main), + .ext_path = mt7623_mtk_ddp_ext, + .ext_len = ARRAY_SIZE(mt7623_mtk_ddp_ext), + .shadow_register = true, + .mmsys_dev_num = 1, +}; + +static const struct mtk_mmsys_driver_data mt2712_mmsys_driver_data = { + .main_path = mt2712_mtk_ddp_main, + .main_len = ARRAY_SIZE(mt2712_mtk_ddp_main), + .ext_path = mt2712_mtk_ddp_ext, + .ext_len = ARRAY_SIZE(mt2712_mtk_ddp_ext), + .third_path = mt2712_mtk_ddp_third, + .third_len = ARRAY_SIZE(mt2712_mtk_ddp_third), + .mmsys_dev_num = 1, +}; + +static const struct mtk_mmsys_driver_data mt8167_mmsys_driver_data = { + .main_path = mt8167_mtk_ddp_main, + .main_len = ARRAY_SIZE(mt8167_mtk_ddp_main), + .mmsys_dev_num = 1, +}; + +static const struct mtk_mmsys_driver_data mt8173_mmsys_driver_data = { + .main_path = mt8173_mtk_ddp_main, + .main_len = ARRAY_SIZE(mt8173_mtk_ddp_main), + .ext_path = mt8173_mtk_ddp_ext, + .ext_len = ARRAY_SIZE(mt8173_mtk_ddp_ext), + .mmsys_dev_num = 1, +}; + +static const struct mtk_mmsys_driver_data mt8183_mmsys_driver_data = { + .main_path = mt8183_mtk_ddp_main, + .main_len = ARRAY_SIZE(mt8183_mtk_ddp_main), + .ext_path = mt8183_mtk_ddp_ext, + .ext_len = ARRAY_SIZE(mt8183_mtk_ddp_ext), + .mmsys_dev_num = 1, +}; + +static const struct mtk_mmsys_driver_data mt8186_mmsys_driver_data = { + .main_path = mt8186_mtk_ddp_main, + .main_len = ARRAY_SIZE(mt8186_mtk_ddp_main), + .ext_path = mt8186_mtk_ddp_ext, + .ext_len = ARRAY_SIZE(mt8186_mtk_ddp_ext), + .mmsys_dev_num = 1, +}; + +static const struct mtk_mmsys_driver_data mt8188_vdosys0_driver_data = { + .main_path = mt8188_mtk_ddp_main, + .main_len = ARRAY_SIZE(mt8188_mtk_ddp_main), + .conn_routes = mt8188_mtk_ddp_main_routes, + .num_conn_routes = ARRAY_SIZE(mt8188_mtk_ddp_main_routes), + .mmsys_dev_num = 2, + .max_width = 8191, + .min_width = 1, + .min_height = 1, +}; + +static const struct mtk_mmsys_driver_data mt8192_mmsys_driver_data = { + .main_path = mt8192_mtk_ddp_main, + .main_len = ARRAY_SIZE(mt8192_mtk_ddp_main), + .ext_path = mt8192_mtk_ddp_ext, + .ext_len = ARRAY_SIZE(mt8192_mtk_ddp_ext), + .mmsys_dev_num = 1, +}; + +static const struct mtk_mmsys_driver_data mt8195_vdosys0_driver_data = { + .main_path = mt8195_mtk_ddp_main, + .main_len = ARRAY_SIZE(mt8195_mtk_ddp_main), + .mmsys_dev_num = 2, + .max_width = 8191, + .min_width = 1, + .min_height = 1, +}; + +static const struct mtk_mmsys_driver_data mt8195_vdosys1_driver_data = { + .ext_path = mt8195_mtk_ddp_ext, + .ext_len = ARRAY_SIZE(mt8195_mtk_ddp_ext), + .mmsys_id = 1, + .mmsys_dev_num = 2, + .max_width = 8191, + .min_width = 2, /* 2-pixel align when ethdr is bypassed */ + .min_height = 1, +}; + +static const struct mtk_mmsys_driver_data mt8365_mmsys_driver_data = { + .mmsys_dev_num = 1, +}; + +static const struct of_device_id mtk_drm_of_ids[] = { + { .compatible = "mediatek,mt2701-mmsys", + .data = &mt2701_mmsys_driver_data}, + { .compatible = "mediatek,mt7623-mmsys", + .data = &mt7623_mmsys_driver_data}, + { .compatible = "mediatek,mt2712-mmsys", + .data = &mt2712_mmsys_driver_data}, + { .compatible = "mediatek,mt8167-mmsys", + .data = &mt8167_mmsys_driver_data}, + { .compatible = "mediatek,mt8173-mmsys", + .data = &mt8173_mmsys_driver_data}, + { .compatible = "mediatek,mt8183-mmsys", + .data = &mt8183_mmsys_driver_data}, + { .compatible = "mediatek,mt8186-mmsys", + .data = &mt8186_mmsys_driver_data}, + { .compatible = "mediatek,mt8188-vdosys0", + .data = &mt8188_vdosys0_driver_data}, + { .compatible = "mediatek,mt8188-vdosys1", + .data = &mt8195_vdosys1_driver_data}, + { .compatible = "mediatek,mt8192-mmsys", + .data = &mt8192_mmsys_driver_data}, + { .compatible = "mediatek,mt8195-mmsys", + .data = &mt8195_vdosys0_driver_data}, + { .compatible = "mediatek,mt8195-vdosys0", + .data = &mt8195_vdosys0_driver_data}, + { .compatible = "mediatek,mt8195-vdosys1", + .data = &mt8195_vdosys1_driver_data}, + { .compatible = "mediatek,mt8365-mmsys", + .data = &mt8365_mmsys_driver_data}, + { } +}; +MODULE_DEVICE_TABLE(of, mtk_drm_of_ids); + +static int mtk_drm_match(struct device *dev, const void *data) +{ + if (!strncmp(dev_name(dev), "mediatek-drm", sizeof("mediatek-drm") - 1)) + return true; + return false; +} + +static bool mtk_drm_get_all_drm_priv(struct device *dev) +{ + struct mtk_drm_private *drm_priv = dev_get_drvdata(dev); + struct mtk_drm_private *all_drm_priv[MAX_CRTC]; + struct mtk_drm_private *temp_drm_priv; + struct device_node *phandle = dev->parent->of_node; + const struct of_device_id *of_id; + struct device_node *node; + struct device *drm_dev; + unsigned int cnt = 0; + int i, j; + + for_each_child_of_node(phandle->parent, node) { + struct platform_device *pdev; + + of_id = of_match_node(mtk_drm_of_ids, node); + if (!of_id) + continue; + + pdev = of_find_device_by_node(node); + if (!pdev) + continue; + + drm_dev = device_find_child(&pdev->dev, NULL, mtk_drm_match); + put_device(&pdev->dev); + if (!drm_dev) + continue; + + temp_drm_priv = dev_get_drvdata(drm_dev); + put_device(drm_dev); + if (!temp_drm_priv) + continue; + + if (temp_drm_priv->data->main_len) + all_drm_priv[CRTC_MAIN] = temp_drm_priv; + else if (temp_drm_priv->data->ext_len) + all_drm_priv[CRTC_EXT] = temp_drm_priv; + else if (temp_drm_priv->data->third_len) + all_drm_priv[CRTC_THIRD] = temp_drm_priv; + + if (temp_drm_priv->mtk_drm_bound) + cnt++; + + if (cnt == MAX_CRTC) { + of_node_put(node); + break; + } + } + + if (drm_priv->data->mmsys_dev_num == cnt) { + for (i = 0; i < cnt; i++) + for (j = 0; j < cnt; j++) + all_drm_priv[j]->all_drm_private[i] = all_drm_priv[i]; + + return true; + } + + return false; +} + +static bool mtk_drm_find_mmsys_comp(struct mtk_drm_private *private, int comp_id) +{ + const struct mtk_mmsys_driver_data *drv_data = private->data; + int i; + + if (drv_data->main_path) + for (i = 0; i < drv_data->main_len; i++) + if (drv_data->main_path[i] == comp_id) + return true; + + if (drv_data->ext_path) + for (i = 0; i < drv_data->ext_len; i++) + if (drv_data->ext_path[i] == comp_id) + return true; + + if (drv_data->third_path) + for (i = 0; i < drv_data->third_len; i++) + if (drv_data->third_path[i] == comp_id) + return true; + + if (drv_data->num_conn_routes) + for (i = 0; i < drv_data->num_conn_routes; i++) + if (drv_data->conn_routes[i].route_ddp == comp_id) + return true; + + return false; +} + +static int mtk_drm_kms_init(struct drm_device *drm) +{ + struct mtk_drm_private *private = drm->dev_private; + struct mtk_drm_private *priv_n; + struct device *dma_dev = NULL; + struct drm_crtc *crtc; + int ret, i, j; + + if (drm_firmware_drivers_only()) + return -ENODEV; + + ret = drmm_mode_config_init(drm); + if (ret) + return ret; + + drm->mode_config.min_width = 64; + drm->mode_config.min_height = 64; + + /* + * set max width and height as default value(4096x4096). + * this value would be used to check framebuffer size limitation + * at drm_mode_addfb(). + */ + drm->mode_config.max_width = 4096; + drm->mode_config.max_height = 4096; + drm->mode_config.funcs = &mtk_drm_mode_config_funcs; + drm->mode_config.helper_private = &mtk_drm_mode_config_helpers; + + for (i = 0; i < private->data->mmsys_dev_num; i++) { + drm->dev_private = private->all_drm_private[i]; + ret = component_bind_all(private->all_drm_private[i]->dev, drm); + if (ret) { + while (--i >= 0) + component_unbind_all(private->all_drm_private[i]->dev, drm); + return ret; + } + } + + /* + * Ensure internal panels are at the top of the connector list before + * crtc creation. + */ + drm_helper_move_panel_connectors_to_head(drm); + + /* + * 1. We currently support two fixed data streams, each optional, + * and each statically assigned to a crtc: + * OVL0 -> COLOR0 -> AAL -> OD -> RDMA0 -> UFOE -> DSI0 ... + * 2. For multi mmsys architecture, crtc path data are located in + * different drm private data structures. Loop through crtc index to + * create crtc from the main path and then ext_path and finally the + * third path. + */ + for (i = 0; i < MAX_CRTC; i++) { + for (j = 0; j < private->data->mmsys_dev_num; j++) { + priv_n = private->all_drm_private[j]; + + if (priv_n->data->max_width) + drm->mode_config.max_width = priv_n->data->max_width; + + if (priv_n->data->min_width) + drm->mode_config.min_width = priv_n->data->min_width; + + if (priv_n->data->min_height) + drm->mode_config.min_height = priv_n->data->min_height; + + if (i == CRTC_MAIN && priv_n->data->main_len) { + ret = mtk_crtc_create(drm, priv_n->data->main_path, + priv_n->data->main_len, j, + priv_n->data->conn_routes, + priv_n->data->num_conn_routes); + if (ret) + goto err_component_unbind; + + continue; + } else if (i == CRTC_EXT && priv_n->data->ext_len) { + ret = mtk_crtc_create(drm, priv_n->data->ext_path, + priv_n->data->ext_len, j, NULL, 0); + if (ret) + goto err_component_unbind; + + continue; + } else if (i == CRTC_THIRD && priv_n->data->third_len) { + ret = mtk_crtc_create(drm, priv_n->data->third_path, + priv_n->data->third_len, j, NULL, 0); + if (ret) + goto err_component_unbind; + + continue; + } + } + } + + /* IGT will check if the cursor size is configured */ + drm->mode_config.cursor_width = 512; + drm->mode_config.cursor_height = 512; + + /* Use OVL device for all DMA memory allocations */ + crtc = drm_crtc_from_index(drm, 0); + if (crtc) + dma_dev = mtk_crtc_dma_dev_get(crtc); + if (!dma_dev) { + ret = -ENODEV; + dev_err(drm->dev, "Need at least one OVL device\n"); + goto err_component_unbind; + } + + for (i = 0; i < private->data->mmsys_dev_num; i++) + private->all_drm_private[i]->dma_dev = dma_dev; + + /* + * Configure the DMA segment size to make sure we get contiguous IOVA + * when importing PRIME buffers. + */ + dma_set_max_seg_size(dma_dev, UINT_MAX); + + ret = drm_vblank_init(drm, MAX_CRTC); + if (ret < 0) + goto err_component_unbind; + + drm_kms_helper_poll_init(drm); + drm_mode_config_reset(drm); + + return 0; + +err_component_unbind: + for (i = 0; i < private->data->mmsys_dev_num; i++) + component_unbind_all(private->all_drm_private[i]->dev, drm); + + return ret; +} + +static void mtk_drm_kms_deinit(struct drm_device *drm) +{ + drm_kms_helper_poll_fini(drm); + drm_atomic_helper_shutdown(drm); + + component_unbind_all(drm->dev, drm); +} + +DEFINE_DRM_GEM_FOPS(mtk_drm_fops); + +/* + * We need to override this because the device used to import the memory is + * not dev->dev, as drm_gem_prime_import() expects. + */ +static struct drm_gem_object *mtk_gem_prime_import(struct drm_device *dev, + struct dma_buf *dma_buf) +{ + struct mtk_drm_private *private = dev->dev_private; + + return drm_gem_prime_import_dev(dev, dma_buf, private->dma_dev); +} + +static const struct drm_driver mtk_drm_driver = { + .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC, + + .dumb_create = mtk_gem_dumb_create, + DRM_FBDEV_DMA_DRIVER_OPS, + + .gem_prime_import = mtk_gem_prime_import, + .gem_prime_import_sg_table = mtk_gem_prime_import_sg_table, + .fops = &mtk_drm_fops, + + .name = DRIVER_NAME, + .desc = DRIVER_DESC, + .major = DRIVER_MAJOR, + .minor = DRIVER_MINOR, +}; + +static int compare_dev(struct device *dev, void *data) +{ + return dev == (struct device *)data; +} + +static int mtk_drm_bind(struct device *dev) +{ + struct mtk_drm_private *private = dev_get_drvdata(dev); + struct platform_device *pdev; + struct drm_device *drm; + int ret, i; + + pdev = of_find_device_by_node(private->mutex_node); + if (!pdev) { + dev_err(dev, "Waiting for disp-mutex device %pOF\n", + private->mutex_node); + of_node_put(private->mutex_node); + return -EPROBE_DEFER; + } + + private->mutex_dev = &pdev->dev; + private->mtk_drm_bound = true; + private->dev = dev; + + if (!mtk_drm_get_all_drm_priv(dev)) + return 0; + + drm = drm_dev_alloc(&mtk_drm_driver, dev); + if (IS_ERR(drm)) { + ret = PTR_ERR(drm); + goto err_put_dev; + } + + private->drm_master = true; + drm->dev_private = private; + for (i = 0; i < private->data->mmsys_dev_num; i++) + private->all_drm_private[i]->drm = drm; + + ret = mtk_drm_kms_init(drm); + if (ret < 0) + goto err_free; + + ret = drm_dev_register(drm, 0); + if (ret < 0) + goto err_deinit; + + drm_client_setup(drm, NULL); + + return 0; + +err_deinit: + mtk_drm_kms_deinit(drm); +err_free: + private->drm = NULL; + drm_dev_put(drm); + for (i = 0; i < private->data->mmsys_dev_num; i++) + private->all_drm_private[i]->drm = NULL; +err_put_dev: + put_device(private->mutex_dev); + return ret; +} + +static void mtk_drm_unbind(struct device *dev) +{ + struct mtk_drm_private *private = dev_get_drvdata(dev); + + /* for multi mmsys dev, unregister drm dev in mmsys master */ + if (private->drm_master) { + drm_dev_unregister(private->drm); + mtk_drm_kms_deinit(private->drm); + drm_dev_put(private->drm); + put_device(private->mutex_dev); + } + private->mtk_drm_bound = false; + private->drm_master = false; + private->drm = NULL; +} + +static const struct component_master_ops mtk_drm_ops = { + .bind = mtk_drm_bind, + .unbind = mtk_drm_unbind, +}; + +static const struct of_device_id mtk_ddp_comp_dt_ids[] = { + { .compatible = "mediatek,mt8167-disp-aal", + .data = (void *)MTK_DISP_AAL}, + { .compatible = "mediatek,mt8173-disp-aal", + .data = (void *)MTK_DISP_AAL}, + { .compatible = "mediatek,mt8183-disp-aal", + .data = (void *)MTK_DISP_AAL}, + { .compatible = "mediatek,mt8192-disp-aal", + .data = (void *)MTK_DISP_AAL}, + { .compatible = "mediatek,mt8167-disp-ccorr", + .data = (void *)MTK_DISP_CCORR }, + { .compatible = "mediatek,mt8183-disp-ccorr", + .data = (void *)MTK_DISP_CCORR }, + { .compatible = "mediatek,mt8192-disp-ccorr", + .data = (void *)MTK_DISP_CCORR }, + { .compatible = "mediatek,mt2701-disp-color", + .data = (void *)MTK_DISP_COLOR }, + { .compatible = "mediatek,mt8167-disp-color", + .data = (void *)MTK_DISP_COLOR }, + { .compatible = "mediatek,mt8173-disp-color", + .data = (void *)MTK_DISP_COLOR }, + { .compatible = "mediatek,mt8167-disp-dither", + .data = (void *)MTK_DISP_DITHER }, + { .compatible = "mediatek,mt8183-disp-dither", + .data = (void *)MTK_DISP_DITHER }, + { .compatible = "mediatek,mt8195-disp-dsc", + .data = (void *)MTK_DISP_DSC }, + { .compatible = "mediatek,mt8167-disp-gamma", + .data = (void *)MTK_DISP_GAMMA, }, + { .compatible = "mediatek,mt8173-disp-gamma", + .data = (void *)MTK_DISP_GAMMA, }, + { .compatible = "mediatek,mt8183-disp-gamma", + .data = (void *)MTK_DISP_GAMMA, }, + { .compatible = "mediatek,mt8195-disp-gamma", + .data = (void *)MTK_DISP_GAMMA, }, + { .compatible = "mediatek,mt8195-disp-merge", + .data = (void *)MTK_DISP_MERGE }, + { .compatible = "mediatek,mt2701-disp-mutex", + .data = (void *)MTK_DISP_MUTEX }, + { .compatible = "mediatek,mt2712-disp-mutex", + .data = (void *)MTK_DISP_MUTEX }, + { .compatible = "mediatek,mt8167-disp-mutex", + .data = (void *)MTK_DISP_MUTEX }, + { .compatible = "mediatek,mt8173-disp-mutex", + .data = (void *)MTK_DISP_MUTEX }, + { .compatible = "mediatek,mt8183-disp-mutex", + .data = (void *)MTK_DISP_MUTEX }, + { .compatible = "mediatek,mt8186-disp-mutex", + .data = (void *)MTK_DISP_MUTEX }, + { .compatible = "mediatek,mt8188-disp-mutex", + .data = (void *)MTK_DISP_MUTEX }, + { .compatible = "mediatek,mt8192-disp-mutex", + .data = (void *)MTK_DISP_MUTEX }, + { .compatible = "mediatek,mt8195-disp-mutex", + .data = (void *)MTK_DISP_MUTEX }, + { .compatible = "mediatek,mt8365-disp-mutex", + .data = (void *)MTK_DISP_MUTEX }, + { .compatible = "mediatek,mt8173-disp-od", + .data = (void *)MTK_DISP_OD }, + { .compatible = "mediatek,mt2701-disp-ovl", + .data = (void *)MTK_DISP_OVL }, + { .compatible = "mediatek,mt8167-disp-ovl", + .data = (void *)MTK_DISP_OVL }, + { .compatible = "mediatek,mt8173-disp-ovl", + .data = (void *)MTK_DISP_OVL }, + { .compatible = "mediatek,mt8183-disp-ovl", + .data = (void *)MTK_DISP_OVL }, + { .compatible = "mediatek,mt8192-disp-ovl", + .data = (void *)MTK_DISP_OVL }, + { .compatible = "mediatek,mt8195-disp-ovl", + .data = (void *)MTK_DISP_OVL }, + { .compatible = "mediatek,mt8183-disp-ovl-2l", + .data = (void *)MTK_DISP_OVL_2L }, + { .compatible = "mediatek,mt8192-disp-ovl-2l", + .data = (void *)MTK_DISP_OVL_2L }, + { .compatible = "mediatek,mt8192-disp-postmask", + .data = (void *)MTK_DISP_POSTMASK }, + { .compatible = "mediatek,mt2701-disp-pwm", + .data = (void *)MTK_DISP_BLS }, + { .compatible = "mediatek,mt8167-disp-pwm", + .data = (void *)MTK_DISP_PWM }, + { .compatible = "mediatek,mt8173-disp-pwm", + .data = (void *)MTK_DISP_PWM }, + { .compatible = "mediatek,mt2701-disp-rdma", + .data = (void *)MTK_DISP_RDMA }, + { .compatible = "mediatek,mt8167-disp-rdma", + .data = (void *)MTK_DISP_RDMA }, + { .compatible = "mediatek,mt8173-disp-rdma", + .data = (void *)MTK_DISP_RDMA }, + { .compatible = "mediatek,mt8183-disp-rdma", + .data = (void *)MTK_DISP_RDMA }, + { .compatible = "mediatek,mt8195-disp-rdma", + .data = (void *)MTK_DISP_RDMA }, + { .compatible = "mediatek,mt8173-disp-ufoe", + .data = (void *)MTK_DISP_UFOE }, + { .compatible = "mediatek,mt8173-disp-wdma", + .data = (void *)MTK_DISP_WDMA }, + { .compatible = "mediatek,mt2701-dpi", + .data = (void *)MTK_DPI }, + { .compatible = "mediatek,mt8167-dsi", + .data = (void *)MTK_DSI }, + { .compatible = "mediatek,mt8173-dpi", + .data = (void *)MTK_DPI }, + { .compatible = "mediatek,mt8183-dpi", + .data = (void *)MTK_DPI }, + { .compatible = "mediatek,mt8186-dpi", + .data = (void *)MTK_DPI }, + { .compatible = "mediatek,mt8188-dp-intf", + .data = (void *)MTK_DP_INTF }, + { .compatible = "mediatek,mt8192-dpi", + .data = (void *)MTK_DPI }, + { .compatible = "mediatek,mt8195-dp-intf", + .data = (void *)MTK_DP_INTF }, + { .compatible = "mediatek,mt8195-dpi", + .data = (void *)MTK_DPI }, + { .compatible = "mediatek,mt2701-dsi", + .data = (void *)MTK_DSI }, + { .compatible = "mediatek,mt8173-dsi", + .data = (void *)MTK_DSI }, + { .compatible = "mediatek,mt8183-dsi", + .data = (void *)MTK_DSI }, + { .compatible = "mediatek,mt8186-dsi", + .data = (void *)MTK_DSI }, + { .compatible = "mediatek,mt8188-dsi", + .data = (void *)MTK_DSI }, + { } +}; + +static int mtk_drm_of_get_ddp_comp_type(struct device_node *node, enum mtk_ddp_comp_type *ctype) +{ + const struct of_device_id *of_id = of_match_node(mtk_ddp_comp_dt_ids, node); + + if (!of_id) + return -EINVAL; + + *ctype = (enum mtk_ddp_comp_type)((uintptr_t)of_id->data); + + return 0; +} + +static int mtk_drm_of_get_ddp_ep_cid(struct device_node *node, + int output_port, enum mtk_crtc_path crtc_path, + struct device_node **next, unsigned int *cid) +{ + struct device_node *ep_dev_node, *ep_out; + enum mtk_ddp_comp_type comp_type; + int ret; + + ep_out = of_graph_get_endpoint_by_regs(node, output_port, crtc_path); + if (!ep_out) + return -ENOENT; + + ep_dev_node = of_graph_get_remote_port_parent(ep_out); + of_node_put(ep_out); + if (!ep_dev_node) + return -EINVAL; + + /* + * Pass the next node pointer regardless of failures in the later code + * so that if this function is called in a loop it will walk through all + * of the subsequent endpoints anyway. + */ + *next = ep_dev_node; + + if (!of_device_is_available(ep_dev_node)) + return -ENODEV; + + ret = mtk_drm_of_get_ddp_comp_type(ep_dev_node, &comp_type); + if (ret) { + if (mtk_ovl_adaptor_is_comp_present(ep_dev_node)) { + *cid = (unsigned int)DDP_COMPONENT_DRM_OVL_ADAPTOR; + return 0; + } + return ret; + } + + ret = mtk_ddp_comp_get_id(ep_dev_node, comp_type); + if (ret < 0) + return ret; + + /* All ok! Pass the Component ID to the caller. */ + *cid = (unsigned int)ret; + + return 0; +} + +/** + * mtk_drm_of_ddp_path_build_one - Build a Display HW Pipeline for a CRTC Path + * @dev: The mediatek-drm device + * @cpath: CRTC Path relative to a VDO or MMSYS + * @out_path: Pointer to an array that will contain the new pipeline + * @out_path_len: Number of entries in the pipeline array + * + * MediaTek SoCs can use different DDP hardware pipelines (or paths) depending + * on the board-specific desired display configuration; this function walks + * through all of the output endpoints starting from a VDO or MMSYS hardware + * instance and builds the right pipeline as specified in device trees. + * + * Return: + * * %0 - Display HW Pipeline successfully built and validated + * * %-ENOENT - Display pipeline was not specified in device tree + * * %-EINVAL - Display pipeline built but validation failed + * * %-ENOMEM - Failure to allocate pipeline array to pass to the caller + */ +static int mtk_drm_of_ddp_path_build_one(struct device *dev, enum mtk_crtc_path cpath, + const unsigned int **out_path, + unsigned int *out_path_len) +{ + struct device_node *next = NULL, *prev, *vdo = dev->parent->of_node; + unsigned int temp_path[DDP_COMPONENT_DRM_ID_MAX] = { 0 }; + unsigned int *final_ddp_path; + unsigned short int idx = 0; + bool ovl_adaptor_comp_added = false; + int ret; + + /* Get the first entry for the temp_path array */ + ret = mtk_drm_of_get_ddp_ep_cid(vdo, 0, cpath, &next, &temp_path[idx]); + if (ret) { + if (next && temp_path[idx] == DDP_COMPONENT_DRM_OVL_ADAPTOR) { + dev_dbg(dev, "Adding OVL Adaptor for %pOF\n", next); + ovl_adaptor_comp_added = true; + } else { + if (next) + dev_err(dev, "Invalid component %pOF\n", next); + else + dev_err(dev, "Cannot find first endpoint for path %d\n", cpath); + + return ret; + } + } + idx++; + + /* + * Walk through port outputs until we reach the last valid mediatek-drm component. + * To be valid, this must end with an "invalid" component that is a display node. + */ + do { + prev = next; + ret = mtk_drm_of_get_ddp_ep_cid(next, 1, cpath, &next, &temp_path[idx]); + of_node_put(prev); + if (ret) { + of_node_put(next); + break; + } + + /* + * If this is an OVL adaptor exclusive component and one of those + * was already added, don't add another instance of the generic + * DDP_COMPONENT_OVL_ADAPTOR, as this is used only to decide whether + * to probe that component master driver of which only one instance + * is needed and possible. + */ + if (temp_path[idx] == DDP_COMPONENT_DRM_OVL_ADAPTOR) { + if (!ovl_adaptor_comp_added) + ovl_adaptor_comp_added = true; + else + idx--; + } + } while (++idx < DDP_COMPONENT_DRM_ID_MAX); + + /* + * The device component might not be enabled: in that case, don't + * check the last entry and just report that the device is missing. + */ + if (ret == -ENODEV) + return ret; + + /* If the last entry is not a final display output, the configuration is wrong */ + switch (temp_path[idx - 1]) { + case DDP_COMPONENT_DP_INTF0: + case DDP_COMPONENT_DP_INTF1: + case DDP_COMPONENT_DPI0: + case DDP_COMPONENT_DPI1: + case DDP_COMPONENT_DSI0: + case DDP_COMPONENT_DSI1: + case DDP_COMPONENT_DSI2: + case DDP_COMPONENT_DSI3: + break; + default: + dev_err(dev, "Invalid display hw pipeline. Last component: %d (ret=%d)\n", + temp_path[idx - 1], ret); + return -EINVAL; + } + + final_ddp_path = devm_kmemdup(dev, temp_path, idx * sizeof(temp_path[0]), GFP_KERNEL); + if (!final_ddp_path) + return -ENOMEM; + + dev_dbg(dev, "Display HW Pipeline built with %d components.\n", idx); + + /* Pipeline built! */ + *out_path = final_ddp_path; + *out_path_len = idx; + + return 0; +} + +static int mtk_drm_of_ddp_path_build(struct device *dev, struct device_node *node, + struct mtk_mmsys_driver_data *data) +{ + struct device_node *ep_node; + struct of_endpoint of_ep; + bool output_present[MAX_CRTC] = { false }; + int ret; + + for_each_endpoint_of_node(node, ep_node) { + ret = of_graph_parse_endpoint(ep_node, &of_ep); + if (ret) { + dev_err_probe(dev, ret, "Cannot parse endpoint\n"); + break; + } + + if (of_ep.id >= MAX_CRTC) { + ret = dev_err_probe(dev, -EINVAL, + "Invalid endpoint%u number\n", of_ep.port); + break; + } + + output_present[of_ep.id] = true; + } + + if (ret) { + of_node_put(ep_node); + return ret; + } + + if (output_present[CRTC_MAIN]) { + ret = mtk_drm_of_ddp_path_build_one(dev, CRTC_MAIN, + &data->main_path, &data->main_len); + if (ret && ret != -ENODEV) + return ret; + } + + if (output_present[CRTC_EXT]) { + ret = mtk_drm_of_ddp_path_build_one(dev, CRTC_EXT, + &data->ext_path, &data->ext_len); + if (ret && ret != -ENODEV) + return ret; + } + + if (output_present[CRTC_THIRD]) { + ret = mtk_drm_of_ddp_path_build_one(dev, CRTC_THIRD, + &data->third_path, &data->third_len); + if (ret && ret != -ENODEV) + return ret; + } + + return 0; +} + +static int mtk_drm_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct device_node *phandle = dev->parent->of_node; + const struct of_device_id *of_id; + struct mtk_drm_private *private; + struct mtk_mmsys_driver_data *mtk_drm_data; + struct device_node *node; + struct component_match *match = NULL; + struct platform_device *ovl_adaptor; + int ret; + int i; + + private = devm_kzalloc(dev, sizeof(*private), GFP_KERNEL); + if (!private) + return -ENOMEM; + + private->mmsys_dev = dev->parent; + if (!private->mmsys_dev) { + dev_err(dev, "Failed to get MMSYS device\n"); + return -ENODEV; + } + + of_id = of_match_node(mtk_drm_of_ids, phandle); + if (!of_id) + return -ENODEV; + + mtk_drm_data = (struct mtk_mmsys_driver_data *)of_id->data; + if (!mtk_drm_data) + return -EINVAL; + + /* Try to build the display pipeline from devicetree graphs */ + if (of_graph_is_present(phandle)) { + dev_dbg(dev, "Building display pipeline for MMSYS %u\n", + mtk_drm_data->mmsys_id); + private->data = devm_kmemdup(dev, mtk_drm_data, + sizeof(*mtk_drm_data), GFP_KERNEL); + if (!private->data) + return -ENOMEM; + + ret = mtk_drm_of_ddp_path_build(dev, phandle, private->data); + if (ret) + return ret; + } else { + /* No devicetree graphs support: go with hardcoded paths if present */ + dev_dbg(dev, "Using hardcoded paths for MMSYS %u\n", mtk_drm_data->mmsys_id); + private->data = mtk_drm_data; + } + + private->all_drm_private = devm_kmalloc_array(dev, private->data->mmsys_dev_num, + sizeof(*private->all_drm_private), + GFP_KERNEL); + if (!private->all_drm_private) + return -ENOMEM; + + /* Bringup ovl_adaptor */ + if (mtk_drm_find_mmsys_comp(private, DDP_COMPONENT_DRM_OVL_ADAPTOR)) { + ovl_adaptor = platform_device_register_data(dev, "mediatek-disp-ovl-adaptor", + PLATFORM_DEVID_AUTO, + (void *)private->mmsys_dev, + sizeof(*private->mmsys_dev)); + private->ddp_comp[DDP_COMPONENT_DRM_OVL_ADAPTOR].dev = &ovl_adaptor->dev; + mtk_ddp_comp_init(dev, NULL, &private->ddp_comp[DDP_COMPONENT_DRM_OVL_ADAPTOR], + DDP_COMPONENT_DRM_OVL_ADAPTOR); + component_match_add(dev, &match, compare_dev, &ovl_adaptor->dev); + } + + /* Iterate over sibling DISP function blocks */ + for_each_child_of_node(phandle->parent, node) { + enum mtk_ddp_comp_type comp_type; + int comp_id; + + ret = mtk_drm_of_get_ddp_comp_type(node, &comp_type); + if (ret) + continue; + + if (!of_device_is_available(node)) { + dev_dbg(dev, "Skipping disabled component %pOF\n", + node); + continue; + } + + if (comp_type == MTK_DISP_MUTEX) { + int id; + + id = of_alias_get_id(node, "mutex"); + if (id < 0 || id == private->data->mmsys_id) { + private->mutex_node = of_node_get(node); + dev_dbg(dev, "get mutex for mmsys %d", private->data->mmsys_id); + } + continue; + } + + comp_id = mtk_ddp_comp_get_id(node, comp_type); + if (comp_id < 0) { + dev_warn(dev, "Skipping unknown component %pOF\n", + node); + continue; + } + + if (!mtk_drm_find_mmsys_comp(private, comp_id)) + continue; + + private->comp_node[comp_id] = of_node_get(node); + + /* + * Currently only the AAL, CCORR, COLOR, GAMMA, MERGE, OVL, RDMA, DSI, and DPI + * blocks have separate component platform drivers and initialize their own + * DDP component structure. The others are initialized here. + */ + if (comp_type == MTK_DISP_AAL || + comp_type == MTK_DISP_CCORR || + comp_type == MTK_DISP_COLOR || + comp_type == MTK_DISP_GAMMA || + comp_type == MTK_DISP_MERGE || + comp_type == MTK_DISP_OVL || + comp_type == MTK_DISP_OVL_2L || + comp_type == MTK_DISP_OVL_ADAPTOR || + comp_type == MTK_DISP_RDMA || + comp_type == MTK_DP_INTF || + comp_type == MTK_DPI || + comp_type == MTK_DSI) { + dev_info(dev, "Adding component match for %pOF\n", + node); + drm_of_component_match_add(dev, &match, component_compare_of, + node); + } + + ret = mtk_ddp_comp_init(dev, node, &private->ddp_comp[comp_id], comp_id); + if (ret) { + of_node_put(node); + goto err_node; + } + } + + if (!private->mutex_node) { + dev_err(dev, "Failed to find disp-mutex node\n"); + ret = -ENODEV; + goto err_node; + } + + pm_runtime_enable(dev); + + platform_set_drvdata(pdev, private); + + ret = component_master_add_with_match(dev, &mtk_drm_ops, match); + if (ret) + goto err_pm; + + return 0; + +err_pm: + pm_runtime_disable(dev); +err_node: + of_node_put(private->mutex_node); + for (i = 0; i < DDP_COMPONENT_DRM_ID_MAX; i++) + of_node_put(private->comp_node[i]); + return ret; +} + +static void mtk_drm_remove(struct platform_device *pdev) +{ + struct mtk_drm_private *private = platform_get_drvdata(pdev); + int i; + + component_master_del(&pdev->dev, &mtk_drm_ops); + pm_runtime_disable(&pdev->dev); + of_node_put(private->mutex_node); + for (i = 0; i < DDP_COMPONENT_DRM_ID_MAX; i++) + of_node_put(private->comp_node[i]); +} + +static void mtk_drm_shutdown(struct platform_device *pdev) +{ + struct mtk_drm_private *private = platform_get_drvdata(pdev); + + drm_atomic_helper_shutdown(private->drm); +} + +static int mtk_drm_sys_prepare(struct device *dev) +{ + struct mtk_drm_private *private = dev_get_drvdata(dev); + struct drm_device *drm = private->drm; + + if (private->drm_master) + return drm_mode_config_helper_suspend(drm); + else + return 0; +} + +static void mtk_drm_sys_complete(struct device *dev) +{ + struct mtk_drm_private *private = dev_get_drvdata(dev); + struct drm_device *drm = private->drm; + int ret = 0; + + if (private->drm_master) + ret = drm_mode_config_helper_resume(drm); + if (ret) + dev_err(dev, "Failed to resume\n"); +} + +static const struct dev_pm_ops mtk_drm_pm_ops = { + .prepare = mtk_drm_sys_prepare, + .complete = mtk_drm_sys_complete, +}; + +static struct platform_driver mtk_drm_platform_driver = { + .probe = mtk_drm_probe, + .remove = mtk_drm_remove, + .shutdown = mtk_drm_shutdown, + .driver = { + .name = "mediatek-drm", + .pm = &mtk_drm_pm_ops, + }, +}; + +static struct platform_driver * const mtk_drm_drivers[] = { + &mtk_disp_aal_driver, + &mtk_disp_ccorr_driver, + &mtk_disp_color_driver, + &mtk_disp_gamma_driver, + &mtk_disp_merge_driver, + &mtk_disp_ovl_adaptor_driver, + &mtk_disp_ovl_driver, + &mtk_disp_rdma_driver, + &mtk_dpi_driver, + &mtk_drm_platform_driver, + &mtk_dsi_driver, + &mtk_ethdr_driver, + &mtk_mdp_rdma_driver, + &mtk_padding_driver, +}; + +static int __init mtk_drm_init(void) +{ + return platform_register_drivers(mtk_drm_drivers, + ARRAY_SIZE(mtk_drm_drivers)); +} + +static void __exit mtk_drm_exit(void) +{ + platform_unregister_drivers(mtk_drm_drivers, + ARRAY_SIZE(mtk_drm_drivers)); +} + +module_init(mtk_drm_init); +module_exit(mtk_drm_exit); + +MODULE_AUTHOR("YT SHEN <yt.shen@mediatek.com>"); +MODULE_DESCRIPTION("Mediatek SoC DRM driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.h b/drivers/gpu/drm/mediatek/mtk_drm_drv.h new file mode 100644 index 000000000000..675cdc90a440 --- /dev/null +++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.h @@ -0,0 +1,85 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2015 MediaTek Inc. + */ + +#ifndef MTK_DRM_DRV_H +#define MTK_DRM_DRV_H + +#include <linux/io.h> +#include "mtk_ddp_comp.h" + +#define MAX_CONNECTOR 2 +#define DDP_COMPONENT_DRM_OVL_ADAPTOR (DDP_COMPONENT_ID_MAX + 1) +#define DDP_COMPONENT_DRM_ID_MAX (DDP_COMPONENT_DRM_OVL_ADAPTOR + 1) + +enum mtk_crtc_path { + CRTC_MAIN, + CRTC_EXT, + CRTC_THIRD, + MAX_CRTC, +}; + +struct device; +struct device_node; +struct drm_crtc; +struct drm_device; +struct drm_fb_helper; +struct drm_property; +struct regmap; + +struct mtk_drm_route { + const unsigned int crtc_id; + const unsigned int route_ddp; +}; + +struct mtk_mmsys_driver_data { + const unsigned int *main_path; + unsigned int main_len; + const unsigned int *ext_path; + unsigned int ext_len; + const unsigned int *third_path; + unsigned int third_len; + const struct mtk_drm_route *conn_routes; + unsigned int num_conn_routes; + + bool shadow_register; + unsigned int mmsys_id; + unsigned int mmsys_dev_num; + + u16 max_width; + u16 min_width; + u16 min_height; +}; + +struct mtk_drm_private { + struct drm_device *drm; + struct device *dma_dev; + bool mtk_drm_bound; + bool drm_master; + struct device *dev; + struct device_node *mutex_node; + struct device *mutex_dev; + struct device *mmsys_dev; + struct device_node *comp_node[DDP_COMPONENT_DRM_ID_MAX]; + struct mtk_ddp_comp ddp_comp[DDP_COMPONENT_DRM_ID_MAX]; + struct mtk_mmsys_driver_data *data; + struct drm_atomic_state *suspend_state; + unsigned int mbox_index; + struct mtk_drm_private **all_drm_private; +}; + +extern struct platform_driver mtk_disp_aal_driver; +extern struct platform_driver mtk_disp_ccorr_driver; +extern struct platform_driver mtk_disp_color_driver; +extern struct platform_driver mtk_disp_gamma_driver; +extern struct platform_driver mtk_disp_merge_driver; +extern struct platform_driver mtk_disp_ovl_adaptor_driver; +extern struct platform_driver mtk_disp_ovl_driver; +extern struct platform_driver mtk_disp_rdma_driver; +extern struct platform_driver mtk_dpi_driver; +extern struct platform_driver mtk_dsi_driver; +extern struct platform_driver mtk_ethdr_driver; +extern struct platform_driver mtk_mdp_rdma_driver; +extern struct platform_driver mtk_padding_driver; +#endif /* MTK_DRM_DRV_H */ diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c new file mode 100644 index 000000000000..0e2bcd5f67b7 --- /dev/null +++ b/drivers/gpu/drm/mediatek/mtk_dsi.c @@ -0,0 +1,1325 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2015 MediaTek Inc. + */ + +#include <linux/bitfield.h> +#include <linux/clk.h> +#include <linux/component.h> +#include <linux/iopoll.h> +#include <linux/irq.h> +#include <linux/of.h> +#include <linux/of_platform.h> +#include <linux/phy/phy.h> +#include <linux/platform_device.h> +#include <linux/reset.h> +#include <linux/units.h> + +#include <video/mipi_display.h> +#include <video/videomode.h> + +#include <drm/drm_atomic_helper.h> +#include <drm/drm_bridge.h> +#include <drm/drm_bridge_connector.h> +#include <drm/drm_mipi_dsi.h> +#include <drm/drm_of.h> +#include <drm/drm_panel.h> +#include <drm/drm_print.h> +#include <drm/drm_probe_helper.h> +#include <drm/drm_simple_kms_helper.h> + +#include "mtk_ddp_comp.h" +#include "mtk_disp_drv.h" +#include "mtk_drm_drv.h" + +#define DSI_START 0x00 + +#define DSI_INTEN 0x08 + +#define DSI_INTSTA 0x0c +#define LPRX_RD_RDY_INT_FLAG BIT(0) +#define CMD_DONE_INT_FLAG BIT(1) +#define TE_RDY_INT_FLAG BIT(2) +#define VM_DONE_INT_FLAG BIT(3) +#define EXT_TE_RDY_INT_FLAG BIT(4) +#define DSI_BUSY BIT(31) + +#define DSI_CON_CTRL 0x10 +#define DSI_RESET BIT(0) +#define DSI_EN BIT(1) +#define DPHY_RESET BIT(2) + +#define DSI_MODE_CTRL 0x14 +#define MODE (3) +#define CMD_MODE 0 +#define SYNC_PULSE_MODE 1 +#define SYNC_EVENT_MODE 2 +#define BURST_MODE 3 +#define FRM_MODE BIT(16) +#define MIX_MODE BIT(17) + +#define DSI_TXRX_CTRL 0x18 +#define VC_NUM BIT(1) +#define LANE_NUM GENMASK(5, 2) +#define DIS_EOT BIT(6) +#define NULL_EN BIT(7) +#define TE_FREERUN BIT(8) +#define EXT_TE_EN BIT(9) +#define EXT_TE_EDGE BIT(10) +#define MAX_RTN_SIZE GENMASK(15, 12) +#define HSTX_CKLP_EN BIT(16) + +#define DSI_PSCTRL 0x1c +#define DSI_PS_WC GENMASK(13, 0) +#define DSI_PS_SEL GENMASK(17, 16) +#define PACKED_PS_16BIT_RGB565 0 +#define PACKED_PS_18BIT_RGB666 1 +#define LOOSELY_PS_24BIT_RGB666 2 +#define PACKED_PS_24BIT_RGB888 3 + +#define DSI_VSA_NL 0x20 +#define DSI_VBP_NL 0x24 +#define DSI_VFP_NL 0x28 +#define DSI_VACT_NL 0x2C +#define VACT_NL GENMASK(14, 0) +#define DSI_SIZE_CON 0x38 +#define DSI_HEIGHT GENMASK(30, 16) +#define DSI_WIDTH GENMASK(14, 0) +#define DSI_HSA_WC 0x50 +#define DSI_HBP_WC 0x54 +#define DSI_HFP_WC 0x58 +#define HFP_HS_VB_PS_WC GENMASK(30, 16) +#define HFP_HS_EN BIT(31) + +#define DSI_CMDQ_SIZE 0x60 +#define CMDQ_SIZE 0x3f +#define CMDQ_SIZE_SEL BIT(15) + +#define DSI_HSTX_CKL_WC 0x64 +#define HSTX_CKL_WC GENMASK(15, 2) + +#define DSI_RX_DATA0 0x74 +#define DSI_RX_DATA1 0x78 +#define DSI_RX_DATA2 0x7c +#define DSI_RX_DATA3 0x80 + +#define DSI_RACK 0x84 +#define RACK BIT(0) + +#define DSI_PHY_LCCON 0x104 +#define LC_HS_TX_EN BIT(0) +#define LC_ULPM_EN BIT(1) +#define LC_WAKEUP_EN BIT(2) + +#define DSI_PHY_LD0CON 0x108 +#define LD0_HS_TX_EN BIT(0) +#define LD0_ULPM_EN BIT(1) +#define LD0_WAKEUP_EN BIT(2) + +#define DSI_PHY_TIMECON0 0x110 +#define LPX GENMASK(7, 0) +#define HS_PREP GENMASK(15, 8) +#define HS_ZERO GENMASK(23, 16) +#define HS_TRAIL GENMASK(31, 24) + +#define DSI_PHY_TIMECON1 0x114 +#define TA_GO GENMASK(7, 0) +#define TA_SURE GENMASK(15, 8) +#define TA_GET GENMASK(23, 16) +#define DA_HS_EXIT GENMASK(31, 24) + +#define DSI_PHY_TIMECON2 0x118 +#define CONT_DET GENMASK(7, 0) +#define DA_HS_SYNC GENMASK(15, 8) +#define CLK_ZERO GENMASK(23, 16) +#define CLK_TRAIL GENMASK(31, 24) + +#define DSI_PHY_TIMECON3 0x11c +#define CLK_HS_PREP GENMASK(7, 0) +#define CLK_HS_POST GENMASK(15, 8) +#define CLK_HS_EXIT GENMASK(23, 16) + +/* DSI_VM_CMD_CON */ +#define VM_CMD_EN BIT(0) +#define TS_VFP_EN BIT(5) + +/* DSI_SHADOW_DEBUG */ +#define FORCE_COMMIT BIT(0) +#define BYPASS_SHADOW BIT(1) + +/* CMDQ related bits */ +#define CONFIG GENMASK(7, 0) +#define SHORT_PACKET 0 +#define LONG_PACKET 2 +#define BTA BIT(2) +#define DATA_ID GENMASK(15, 8) +#define DATA_0 GENMASK(23, 16) +#define DATA_1 GENMASK(31, 24) + +#define NS_TO_CYCLE(n, c) ((n) / (c) + (((n) % (c)) ? 1 : 0)) + +#define MTK_DSI_HOST_IS_READ(type) \ + ((type == MIPI_DSI_GENERIC_READ_REQUEST_0_PARAM) || \ + (type == MIPI_DSI_GENERIC_READ_REQUEST_1_PARAM) || \ + (type == MIPI_DSI_GENERIC_READ_REQUEST_2_PARAM) || \ + (type == MIPI_DSI_DCS_READ)) + +struct mtk_phy_timing { + u32 lpx; + u32 da_hs_prepare; + u32 da_hs_zero; + u32 da_hs_trail; + + u32 ta_go; + u32 ta_sure; + u32 ta_get; + u32 da_hs_exit; + + u32 clk_hs_zero; + u32 clk_hs_trail; + + u32 clk_hs_prepare; + u32 clk_hs_post; + u32 clk_hs_exit; +}; + +struct phy; + +struct mtk_dsi_driver_data { + const u32 reg_cmdq_off; + const u32 reg_vm_cmd_off; + const u32 reg_shadow_dbg_off; + bool has_shadow_ctl; + bool has_size_ctl; + bool cmdq_long_packet_ctl; + bool support_per_frame_lp; +}; + +struct mtk_dsi { + struct device *dev; + struct mipi_dsi_host host; + struct drm_encoder encoder; + struct drm_bridge bridge; + struct drm_bridge *next_bridge; + struct drm_connector *connector; + struct phy *phy; + + void __iomem *regs; + + struct clk *engine_clk; + struct clk *digital_clk; + struct clk *hs_clk; + + u32 data_rate; + + unsigned long mode_flags; + enum mipi_dsi_pixel_format format; + unsigned int lanes; + struct videomode vm; + struct mtk_phy_timing phy_timing; + int refcount; + bool enabled; + bool lanes_ready; + u32 irq_data; + wait_queue_head_t irq_wait_queue; + const struct mtk_dsi_driver_data *driver_data; +}; + +static inline struct mtk_dsi *bridge_to_dsi(struct drm_bridge *b) +{ + return container_of(b, struct mtk_dsi, bridge); +} + +static inline struct mtk_dsi *host_to_dsi(struct mipi_dsi_host *h) +{ + return container_of(h, struct mtk_dsi, host); +} + +static void mtk_dsi_mask(struct mtk_dsi *dsi, u32 offset, u32 mask, u32 data) +{ + u32 temp = readl(dsi->regs + offset); + + writel((temp & ~mask) | (data & mask), dsi->regs + offset); +} + +static void mtk_dsi_phy_timconfig(struct mtk_dsi *dsi) +{ + u32 timcon0, timcon1, timcon2, timcon3; + u32 data_rate_mhz = DIV_ROUND_UP(dsi->data_rate, HZ_PER_MHZ); + struct mtk_phy_timing *timing = &dsi->phy_timing; + + timing->lpx = (60 * data_rate_mhz / (8 * 1000)) + 1; + timing->da_hs_prepare = (80 * data_rate_mhz + 4 * 1000) / 8000; + timing->da_hs_zero = (170 * data_rate_mhz + 10 * 1000) / 8000 + 1 - + timing->da_hs_prepare; + timing->da_hs_trail = timing->da_hs_prepare + 1; + + timing->ta_go = 4 * timing->lpx - 2; + timing->ta_sure = timing->lpx + 2; + timing->ta_get = 4 * timing->lpx; + timing->da_hs_exit = 2 * timing->lpx + 1; + + timing->clk_hs_prepare = 70 * data_rate_mhz / (8 * 1000); + timing->clk_hs_post = timing->clk_hs_prepare + 8; + timing->clk_hs_trail = timing->clk_hs_prepare; + timing->clk_hs_zero = timing->clk_hs_trail * 4; + timing->clk_hs_exit = 2 * timing->clk_hs_trail; + + timcon0 = FIELD_PREP(LPX, timing->lpx) | + FIELD_PREP(HS_PREP, timing->da_hs_prepare) | + FIELD_PREP(HS_ZERO, timing->da_hs_zero) | + FIELD_PREP(HS_TRAIL, timing->da_hs_trail); + + timcon1 = FIELD_PREP(TA_GO, timing->ta_go) | + FIELD_PREP(TA_SURE, timing->ta_sure) | + FIELD_PREP(TA_GET, timing->ta_get) | + FIELD_PREP(DA_HS_EXIT, timing->da_hs_exit); + + timcon2 = FIELD_PREP(DA_HS_SYNC, 1) | + FIELD_PREP(CLK_ZERO, timing->clk_hs_zero) | + FIELD_PREP(CLK_TRAIL, timing->clk_hs_trail); + + timcon3 = FIELD_PREP(CLK_HS_PREP, timing->clk_hs_prepare) | + FIELD_PREP(CLK_HS_POST, timing->clk_hs_post) | + FIELD_PREP(CLK_HS_EXIT, timing->clk_hs_exit); + + writel(timcon0, dsi->regs + DSI_PHY_TIMECON0); + writel(timcon1, dsi->regs + DSI_PHY_TIMECON1); + writel(timcon2, dsi->regs + DSI_PHY_TIMECON2); + writel(timcon3, dsi->regs + DSI_PHY_TIMECON3); +} + +static void mtk_dsi_enable(struct mtk_dsi *dsi) +{ + mtk_dsi_mask(dsi, DSI_CON_CTRL, DSI_EN, DSI_EN); +} + +static void mtk_dsi_disable(struct mtk_dsi *dsi) +{ + mtk_dsi_mask(dsi, DSI_CON_CTRL, DSI_EN, 0); +} + +static void mtk_dsi_reset_engine(struct mtk_dsi *dsi) +{ + mtk_dsi_mask(dsi, DSI_CON_CTRL, DSI_RESET, DSI_RESET); + mtk_dsi_mask(dsi, DSI_CON_CTRL, DSI_RESET, 0); +} + +static void mtk_dsi_reset_dphy(struct mtk_dsi *dsi) +{ + mtk_dsi_mask(dsi, DSI_CON_CTRL, DPHY_RESET, DPHY_RESET); + mtk_dsi_mask(dsi, DSI_CON_CTRL, DPHY_RESET, 0); +} + +static void mtk_dsi_clk_ulp_mode_enter(struct mtk_dsi *dsi) +{ + mtk_dsi_mask(dsi, DSI_PHY_LCCON, LC_HS_TX_EN, 0); + mtk_dsi_mask(dsi, DSI_PHY_LCCON, LC_ULPM_EN, 0); +} + +static void mtk_dsi_clk_ulp_mode_leave(struct mtk_dsi *dsi) +{ + mtk_dsi_mask(dsi, DSI_PHY_LCCON, LC_ULPM_EN, 0); + mtk_dsi_mask(dsi, DSI_PHY_LCCON, LC_WAKEUP_EN, LC_WAKEUP_EN); + mtk_dsi_mask(dsi, DSI_PHY_LCCON, LC_WAKEUP_EN, 0); +} + +static void mtk_dsi_lane0_ulp_mode_enter(struct mtk_dsi *dsi) +{ + mtk_dsi_mask(dsi, DSI_PHY_LD0CON, LD0_HS_TX_EN, 0); + mtk_dsi_mask(dsi, DSI_PHY_LD0CON, LD0_ULPM_EN, 0); +} + +static void mtk_dsi_lane0_ulp_mode_leave(struct mtk_dsi *dsi) +{ + mtk_dsi_mask(dsi, DSI_PHY_LD0CON, LD0_ULPM_EN, 0); + mtk_dsi_mask(dsi, DSI_PHY_LD0CON, LD0_WAKEUP_EN, LD0_WAKEUP_EN); + mtk_dsi_mask(dsi, DSI_PHY_LD0CON, LD0_WAKEUP_EN, 0); +} + +static bool mtk_dsi_clk_hs_state(struct mtk_dsi *dsi) +{ + return readl(dsi->regs + DSI_PHY_LCCON) & LC_HS_TX_EN; +} + +static void mtk_dsi_clk_hs_mode(struct mtk_dsi *dsi, bool enter) +{ + if (enter && !mtk_dsi_clk_hs_state(dsi)) + mtk_dsi_mask(dsi, DSI_PHY_LCCON, LC_HS_TX_EN, LC_HS_TX_EN); + else if (!enter && mtk_dsi_clk_hs_state(dsi)) + mtk_dsi_mask(dsi, DSI_PHY_LCCON, LC_HS_TX_EN, 0); +} + +static void mtk_dsi_set_mode(struct mtk_dsi *dsi) +{ + u32 vid_mode = CMD_MODE; + + if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO) { + if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_BURST) + vid_mode = BURST_MODE; + else if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE) + vid_mode = SYNC_PULSE_MODE; + else + vid_mode = SYNC_EVENT_MODE; + } + + writel(vid_mode, dsi->regs + DSI_MODE_CTRL); +} + +static void mtk_dsi_set_vm_cmd(struct mtk_dsi *dsi) +{ + mtk_dsi_mask(dsi, dsi->driver_data->reg_vm_cmd_off, VM_CMD_EN, VM_CMD_EN); + mtk_dsi_mask(dsi, dsi->driver_data->reg_vm_cmd_off, TS_VFP_EN, TS_VFP_EN); +} + +static void mtk_dsi_rxtx_control(struct mtk_dsi *dsi) +{ + u32 regval, tmp_reg = 0; + u8 i; + + /* Number of DSI lanes (max 4 lanes), each bit enables one DSI lane. */ + for (i = 0; i < dsi->lanes; i++) + tmp_reg |= BIT(i); + + regval = FIELD_PREP(LANE_NUM, tmp_reg); + + if (dsi->mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS) + regval |= HSTX_CKLP_EN; + + if (dsi->mode_flags & MIPI_DSI_MODE_NO_EOT_PACKET) + regval |= DIS_EOT; + + writel(regval, dsi->regs + DSI_TXRX_CTRL); +} + +static void mtk_dsi_ps_control(struct mtk_dsi *dsi, bool config_vact) +{ + u32 dsi_buf_bpp, ps_val, ps_wc, vact_nl; + + if (dsi->format == MIPI_DSI_FMT_RGB565) + dsi_buf_bpp = 2; + else + dsi_buf_bpp = 3; + + /* Word count */ + ps_wc = FIELD_PREP(DSI_PS_WC, dsi->vm.hactive * dsi_buf_bpp); + ps_val = ps_wc; + + /* Pixel Stream type */ + switch (dsi->format) { + default: + fallthrough; + case MIPI_DSI_FMT_RGB888: + ps_val |= FIELD_PREP(DSI_PS_SEL, PACKED_PS_24BIT_RGB888); + break; + case MIPI_DSI_FMT_RGB666: + ps_val |= FIELD_PREP(DSI_PS_SEL, LOOSELY_PS_24BIT_RGB666); + break; + case MIPI_DSI_FMT_RGB666_PACKED: + ps_val |= FIELD_PREP(DSI_PS_SEL, PACKED_PS_18BIT_RGB666); + break; + case MIPI_DSI_FMT_RGB565: + ps_val |= FIELD_PREP(DSI_PS_SEL, PACKED_PS_16BIT_RGB565); + break; + } + + if (config_vact) { + vact_nl = FIELD_PREP(VACT_NL, dsi->vm.vactive); + writel(vact_nl, dsi->regs + DSI_VACT_NL); + writel(ps_wc, dsi->regs + DSI_HSTX_CKL_WC); + } + writel(ps_val, dsi->regs + DSI_PSCTRL); +} + +static void mtk_dsi_config_vdo_timing_per_frame_lp(struct mtk_dsi *dsi) +{ + u32 horizontal_sync_active_byte; + u32 horizontal_backporch_byte; + u32 horizontal_frontporch_byte; + u32 hfp_byte_adjust, v_active_adjust; + u32 cklp_wc_min_adjust, cklp_wc_max_adjust; + u32 dsi_tmp_buf_bpp; + unsigned int da_hs_trail; + unsigned int ps_wc, hs_vb_ps_wc; + u32 v_active_roundup, hstx_cklp_wc; + u32 hstx_cklp_wc_max, hstx_cklp_wc_min; + struct videomode *vm = &dsi->vm; + + if (dsi->format == MIPI_DSI_FMT_RGB565) + dsi_tmp_buf_bpp = 2; + else + dsi_tmp_buf_bpp = 3; + + da_hs_trail = dsi->phy_timing.da_hs_trail; + ps_wc = vm->hactive * dsi_tmp_buf_bpp; + + if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE) { + horizontal_sync_active_byte = + vm->hsync_len * dsi_tmp_buf_bpp - 10; + horizontal_backporch_byte = + vm->hback_porch * dsi_tmp_buf_bpp - 10; + hfp_byte_adjust = 12; + v_active_adjust = 32 + horizontal_sync_active_byte; + cklp_wc_min_adjust = 12 + 2 + 4 + horizontal_sync_active_byte; + cklp_wc_max_adjust = 20 + 6 + 4 + horizontal_sync_active_byte; + } else { + horizontal_sync_active_byte = vm->hsync_len * dsi_tmp_buf_bpp - 4; + horizontal_backporch_byte = (vm->hback_porch + vm->hsync_len) * + dsi_tmp_buf_bpp - 10; + cklp_wc_min_adjust = 4; + cklp_wc_max_adjust = 12 + 4 + 4; + if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_BURST) { + hfp_byte_adjust = 18; + v_active_adjust = 28; + } else { + hfp_byte_adjust = 12; + v_active_adjust = 22; + } + } + horizontal_frontporch_byte = vm->hfront_porch * dsi_tmp_buf_bpp - hfp_byte_adjust; + v_active_roundup = (v_active_adjust + horizontal_backporch_byte + ps_wc + + horizontal_frontporch_byte) % dsi->lanes; + if (v_active_roundup) + horizontal_backporch_byte += dsi->lanes - v_active_roundup; + hstx_cklp_wc_min = (DIV_ROUND_UP(cklp_wc_min_adjust, dsi->lanes) + da_hs_trail + 1) + * dsi->lanes / 6 - 1; + hstx_cklp_wc_max = (DIV_ROUND_UP((cklp_wc_max_adjust + horizontal_backporch_byte + + ps_wc), dsi->lanes) + da_hs_trail + 1) * dsi->lanes / 6 - 1; + + hstx_cklp_wc = FIELD_PREP(HSTX_CKL_WC, (hstx_cklp_wc_min + hstx_cklp_wc_max) / 2); + writel(hstx_cklp_wc, dsi->regs + DSI_HSTX_CKL_WC); + + hs_vb_ps_wc = ps_wc - (dsi->phy_timing.lpx + dsi->phy_timing.da_hs_exit + + dsi->phy_timing.da_hs_prepare + dsi->phy_timing.da_hs_zero + 2) * dsi->lanes; + horizontal_frontporch_byte |= FIELD_PREP(HFP_HS_EN, 1) | + FIELD_PREP(HFP_HS_VB_PS_WC, hs_vb_ps_wc); + + writel(horizontal_sync_active_byte, dsi->regs + DSI_HSA_WC); + writel(horizontal_backporch_byte, dsi->regs + DSI_HBP_WC); + writel(horizontal_frontporch_byte, dsi->regs + DSI_HFP_WC); +} + +static void mtk_dsi_config_vdo_timing_per_line_lp(struct mtk_dsi *dsi) +{ + u32 horizontal_sync_active_byte; + u32 horizontal_backporch_byte; + u32 horizontal_frontporch_byte; + u32 horizontal_front_back_byte; + u32 data_phy_cycles_byte; + u32 dsi_tmp_buf_bpp, data_phy_cycles; + u32 delta; + struct mtk_phy_timing *timing = &dsi->phy_timing; + struct videomode *vm = &dsi->vm; + + if (dsi->format == MIPI_DSI_FMT_RGB565) + dsi_tmp_buf_bpp = 2; + else + dsi_tmp_buf_bpp = 3; + + horizontal_sync_active_byte = (vm->hsync_len * dsi_tmp_buf_bpp - 10); + + if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE) + horizontal_backporch_byte = vm->hback_porch * dsi_tmp_buf_bpp - 10; + else + horizontal_backporch_byte = (vm->hback_porch + vm->hsync_len) * + dsi_tmp_buf_bpp - 10; + + data_phy_cycles = timing->lpx + timing->da_hs_prepare + + timing->da_hs_zero + timing->da_hs_exit + 3; + + delta = dsi->mode_flags & MIPI_DSI_MODE_VIDEO_BURST ? 18 : 12; + delta += dsi->mode_flags & MIPI_DSI_MODE_NO_EOT_PACKET ? 0 : 2; + + horizontal_frontporch_byte = vm->hfront_porch * dsi_tmp_buf_bpp; + horizontal_front_back_byte = horizontal_frontporch_byte + horizontal_backporch_byte; + data_phy_cycles_byte = data_phy_cycles * dsi->lanes + delta; + + if (horizontal_front_back_byte > data_phy_cycles_byte) { + horizontal_frontporch_byte -= data_phy_cycles_byte * + horizontal_frontporch_byte / + horizontal_front_back_byte; + + horizontal_backporch_byte -= data_phy_cycles_byte * + horizontal_backporch_byte / + horizontal_front_back_byte; + } else { + DRM_WARN("HFP + HBP less than d-phy, FPS will under 60Hz\n"); + } + + if ((dsi->mode_flags & MIPI_DSI_HS_PKT_END_ALIGNED) && + (dsi->lanes == 4)) { + horizontal_sync_active_byte = + roundup(horizontal_sync_active_byte, dsi->lanes) - 2; + horizontal_frontporch_byte = + roundup(horizontal_frontporch_byte, dsi->lanes) - 2; + horizontal_backporch_byte = + roundup(horizontal_backporch_byte, dsi->lanes) - 2; + horizontal_backporch_byte -= + (vm->hactive * dsi_tmp_buf_bpp + 2) % dsi->lanes; + } + + writel(horizontal_sync_active_byte, dsi->regs + DSI_HSA_WC); + writel(horizontal_backporch_byte, dsi->regs + DSI_HBP_WC); + writel(horizontal_frontporch_byte, dsi->regs + DSI_HFP_WC); +} + +static void mtk_dsi_config_vdo_timing(struct mtk_dsi *dsi) +{ + struct videomode *vm = &dsi->vm; + + writel(vm->vsync_len, dsi->regs + DSI_VSA_NL); + writel(vm->vback_porch, dsi->regs + DSI_VBP_NL); + writel(vm->vfront_porch, dsi->regs + DSI_VFP_NL); + writel(vm->vactive, dsi->regs + DSI_VACT_NL); + + if (dsi->driver_data->has_size_ctl) + writel(FIELD_PREP(DSI_HEIGHT, vm->vactive) | + FIELD_PREP(DSI_WIDTH, vm->hactive), + dsi->regs + DSI_SIZE_CON); + + if (dsi->driver_data->support_per_frame_lp) + mtk_dsi_config_vdo_timing_per_frame_lp(dsi); + else + mtk_dsi_config_vdo_timing_per_line_lp(dsi); + + mtk_dsi_ps_control(dsi, false); +} + +static void mtk_dsi_start(struct mtk_dsi *dsi) +{ + writel(0, dsi->regs + DSI_START); + writel(1, dsi->regs + DSI_START); +} + +static void mtk_dsi_stop(struct mtk_dsi *dsi) +{ + writel(0, dsi->regs + DSI_START); +} + +static void mtk_dsi_set_cmd_mode(struct mtk_dsi *dsi) +{ + writel(CMD_MODE, dsi->regs + DSI_MODE_CTRL); +} + +static void mtk_dsi_set_interrupt_enable(struct mtk_dsi *dsi) +{ + u32 inten = LPRX_RD_RDY_INT_FLAG | CMD_DONE_INT_FLAG | VM_DONE_INT_FLAG; + + writel(inten, dsi->regs + DSI_INTEN); +} + +static void mtk_dsi_irq_data_set(struct mtk_dsi *dsi, u32 irq_bit) +{ + dsi->irq_data |= irq_bit; +} + +static void mtk_dsi_irq_data_clear(struct mtk_dsi *dsi, u32 irq_bit) +{ + dsi->irq_data &= ~irq_bit; +} + +static s32 mtk_dsi_wait_for_irq_done(struct mtk_dsi *dsi, u32 irq_flag, + unsigned int timeout) +{ + s32 ret = 0; + unsigned long jiffies = msecs_to_jiffies(timeout); + + ret = wait_event_interruptible_timeout(dsi->irq_wait_queue, + dsi->irq_data & irq_flag, + jiffies); + if (ret == 0) { + DRM_WARN("Wait DSI IRQ(0x%08x) Timeout\n", irq_flag); + + mtk_dsi_enable(dsi); + mtk_dsi_reset_engine(dsi); + } + + return ret; +} + +static irqreturn_t mtk_dsi_irq(int irq, void *dev_id) +{ + struct mtk_dsi *dsi = dev_id; + u32 status, tmp; + u32 flag = LPRX_RD_RDY_INT_FLAG | CMD_DONE_INT_FLAG | VM_DONE_INT_FLAG; + + status = readl(dsi->regs + DSI_INTSTA) & flag; + + if (status) { + do { + mtk_dsi_mask(dsi, DSI_RACK, RACK, RACK); + tmp = readl(dsi->regs + DSI_INTSTA); + } while (tmp & DSI_BUSY); + + mtk_dsi_mask(dsi, DSI_INTSTA, status, 0); + mtk_dsi_irq_data_set(dsi, status); + wake_up_interruptible(&dsi->irq_wait_queue); + } + + return IRQ_HANDLED; +} + +static s32 mtk_dsi_switch_to_cmd_mode(struct mtk_dsi *dsi, u8 irq_flag, u32 t) +{ + mtk_dsi_irq_data_clear(dsi, irq_flag); + mtk_dsi_set_cmd_mode(dsi); + + if (!mtk_dsi_wait_for_irq_done(dsi, irq_flag, t)) { + DRM_ERROR("failed to switch cmd mode\n"); + return -ETIME; + } else { + return 0; + } +} + +static int mtk_dsi_poweron(struct mtk_dsi *dsi) +{ + struct device *dev = dsi->host.dev; + int ret; + u32 bit_per_pixel; + + if (++dsi->refcount != 1) + return 0; + + ret = mipi_dsi_pixel_format_to_bpp(dsi->format); + if (ret < 0) { + dev_err(dev, "Unknown MIPI DSI format %d\n", dsi->format); + return ret; + } + bit_per_pixel = ret; + + dsi->data_rate = DIV_ROUND_UP_ULL(dsi->vm.pixelclock * bit_per_pixel, + dsi->lanes); + + ret = clk_set_rate(dsi->hs_clk, dsi->data_rate); + if (ret < 0) { + dev_err(dev, "Failed to set data rate: %d\n", ret); + goto err_refcount; + } + + phy_power_on(dsi->phy); + + ret = clk_prepare_enable(dsi->engine_clk); + if (ret < 0) { + dev_err(dev, "Failed to enable engine clock: %d\n", ret); + goto err_phy_power_off; + } + + ret = clk_prepare_enable(dsi->digital_clk); + if (ret < 0) { + dev_err(dev, "Failed to enable digital clock: %d\n", ret); + goto err_disable_engine_clk; + } + + mtk_dsi_enable(dsi); + + if (dsi->driver_data->has_shadow_ctl) + writel(FORCE_COMMIT | BYPASS_SHADOW, + dsi->regs + dsi->driver_data->reg_shadow_dbg_off); + + mtk_dsi_reset_engine(dsi); + mtk_dsi_phy_timconfig(dsi); + + mtk_dsi_ps_control(dsi, true); + mtk_dsi_set_vm_cmd(dsi); + mtk_dsi_config_vdo_timing(dsi); + mtk_dsi_set_interrupt_enable(dsi); + + return 0; +err_disable_engine_clk: + clk_disable_unprepare(dsi->engine_clk); +err_phy_power_off: + phy_power_off(dsi->phy); +err_refcount: + dsi->refcount--; + return ret; +} + +static void mtk_dsi_poweroff(struct mtk_dsi *dsi) +{ + if (WARN_ON(dsi->refcount == 0)) + return; + + if (--dsi->refcount != 0) + return; + + /* + * mtk_dsi_stop() and mtk_dsi_start() is asymmetric, since + * mtk_dsi_stop() should be called after mtk_crtc_atomic_disable(), + * which needs irq for vblank, and mtk_dsi_stop() will disable irq. + * mtk_dsi_start() needs to be called in mtk_output_dsi_enable(), + * after dsi is fully set. + */ + mtk_dsi_stop(dsi); + + mtk_dsi_switch_to_cmd_mode(dsi, VM_DONE_INT_FLAG, 500); + mtk_dsi_reset_engine(dsi); + mtk_dsi_lane0_ulp_mode_enter(dsi); + mtk_dsi_clk_ulp_mode_enter(dsi); + /* set the lane number as 0 to pull down mipi */ + writel(0, dsi->regs + DSI_TXRX_CTRL); + + mtk_dsi_disable(dsi); + + clk_disable_unprepare(dsi->engine_clk); + clk_disable_unprepare(dsi->digital_clk); + + phy_power_off(dsi->phy); + + dsi->lanes_ready = false; +} + +static void mtk_dsi_lane_ready(struct mtk_dsi *dsi) +{ + if (!dsi->lanes_ready) { + dsi->lanes_ready = true; + mtk_dsi_rxtx_control(dsi); + usleep_range(30, 100); + mtk_dsi_reset_dphy(dsi); + mtk_dsi_clk_ulp_mode_leave(dsi); + mtk_dsi_lane0_ulp_mode_leave(dsi); + mtk_dsi_clk_hs_mode(dsi, 0); + usleep_range(1000, 3000); + /* The reaction time after pulling up the mipi signal for dsi_rx */ + } +} + +static void mtk_output_dsi_enable(struct mtk_dsi *dsi) +{ + if (dsi->enabled) + return; + + mtk_dsi_lane_ready(dsi); + mtk_dsi_set_mode(dsi); + mtk_dsi_clk_hs_mode(dsi, 1); + + mtk_dsi_start(dsi); + + dsi->enabled = true; +} + +static void mtk_output_dsi_disable(struct mtk_dsi *dsi) +{ + if (!dsi->enabled) + return; + + dsi->enabled = false; +} + +static int mtk_dsi_bridge_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, + enum drm_bridge_attach_flags flags) +{ + struct mtk_dsi *dsi = bridge_to_dsi(bridge); + + /* Attach the panel or bridge to the dsi bridge */ + return drm_bridge_attach(encoder, dsi->next_bridge, + &dsi->bridge, flags); +} + +static void mtk_dsi_bridge_mode_set(struct drm_bridge *bridge, + const struct drm_display_mode *mode, + const struct drm_display_mode *adjusted) +{ + struct mtk_dsi *dsi = bridge_to_dsi(bridge); + + drm_display_mode_to_videomode(adjusted, &dsi->vm); +} + +static void mtk_dsi_bridge_atomic_disable(struct drm_bridge *bridge, + struct drm_atomic_state *state) +{ + struct mtk_dsi *dsi = bridge_to_dsi(bridge); + + mtk_output_dsi_disable(dsi); +} + +static void mtk_dsi_bridge_atomic_enable(struct drm_bridge *bridge, + struct drm_atomic_state *state) +{ + struct mtk_dsi *dsi = bridge_to_dsi(bridge); + + if (dsi->refcount == 0) + return; + + mtk_output_dsi_enable(dsi); +} + +static void mtk_dsi_bridge_atomic_pre_enable(struct drm_bridge *bridge, + struct drm_atomic_state *state) +{ + struct mtk_dsi *dsi = bridge_to_dsi(bridge); + int ret; + + ret = mtk_dsi_poweron(dsi); + if (ret < 0) + DRM_ERROR("failed to power on dsi\n"); +} + +static void mtk_dsi_bridge_atomic_post_disable(struct drm_bridge *bridge, + struct drm_atomic_state *state) +{ + struct mtk_dsi *dsi = bridge_to_dsi(bridge); + + mtk_dsi_poweroff(dsi); +} + +static enum drm_mode_status +mtk_dsi_bridge_mode_valid(struct drm_bridge *bridge, + const struct drm_display_info *info, + const struct drm_display_mode *mode) +{ + struct mtk_dsi *dsi = bridge_to_dsi(bridge); + int bpp; + + bpp = mipi_dsi_pixel_format_to_bpp(dsi->format); + if (bpp < 0) + return MODE_ERROR; + + if (mode->clock * bpp / dsi->lanes > 1500000) + return MODE_CLOCK_HIGH; + + return MODE_OK; +} + +static const struct drm_bridge_funcs mtk_dsi_bridge_funcs = { + .attach = mtk_dsi_bridge_attach, + .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state, + .atomic_disable = mtk_dsi_bridge_atomic_disable, + .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state, + .atomic_enable = mtk_dsi_bridge_atomic_enable, + .atomic_pre_enable = mtk_dsi_bridge_atomic_pre_enable, + .atomic_post_disable = mtk_dsi_bridge_atomic_post_disable, + .atomic_reset = drm_atomic_helper_bridge_reset, + .mode_valid = mtk_dsi_bridge_mode_valid, + .mode_set = mtk_dsi_bridge_mode_set, +}; + +void mtk_dsi_ddp_start(struct device *dev) +{ + struct mtk_dsi *dsi = dev_get_drvdata(dev); + + mtk_dsi_poweron(dsi); +} + +void mtk_dsi_ddp_stop(struct device *dev) +{ + struct mtk_dsi *dsi = dev_get_drvdata(dev); + + mtk_dsi_poweroff(dsi); +} + +static int mtk_dsi_encoder_init(struct drm_device *drm, struct mtk_dsi *dsi) +{ + int ret; + + ret = drm_simple_encoder_init(drm, &dsi->encoder, + DRM_MODE_ENCODER_DSI); + if (ret) { + DRM_ERROR("Failed to encoder init to drm\n"); + return ret; + } + + ret = mtk_find_possible_crtcs(drm, dsi->host.dev); + if (ret < 0) + goto err_cleanup_encoder; + dsi->encoder.possible_crtcs = ret; + + ret = drm_bridge_attach(&dsi->encoder, &dsi->bridge, NULL, + DRM_BRIDGE_ATTACH_NO_CONNECTOR); + if (ret) + goto err_cleanup_encoder; + + dsi->connector = drm_bridge_connector_init(drm, &dsi->encoder); + if (IS_ERR(dsi->connector)) { + DRM_ERROR("Unable to create bridge connector\n"); + ret = PTR_ERR(dsi->connector); + goto err_cleanup_encoder; + } + drm_connector_attach_encoder(dsi->connector, &dsi->encoder); + + return 0; + +err_cleanup_encoder: + drm_encoder_cleanup(&dsi->encoder); + return ret; +} + +unsigned int mtk_dsi_encoder_index(struct device *dev) +{ + struct mtk_dsi *dsi = dev_get_drvdata(dev); + unsigned int encoder_index = drm_encoder_index(&dsi->encoder); + + dev_dbg(dev, "encoder index:%d\n", encoder_index); + return encoder_index; +} + +static int mtk_dsi_bind(struct device *dev, struct device *master, void *data) +{ + int ret; + struct drm_device *drm = data; + struct mtk_dsi *dsi = dev_get_drvdata(dev); + + ret = mtk_dsi_encoder_init(drm, dsi); + if (ret) + return ret; + + return device_reset_optional(dev); +} + +static void mtk_dsi_unbind(struct device *dev, struct device *master, + void *data) +{ + struct mtk_dsi *dsi = dev_get_drvdata(dev); + + drm_encoder_cleanup(&dsi->encoder); +} + +static const struct component_ops mtk_dsi_component_ops = { + .bind = mtk_dsi_bind, + .unbind = mtk_dsi_unbind, +}; + +static int mtk_dsi_host_attach(struct mipi_dsi_host *host, + struct mipi_dsi_device *device) +{ + struct mtk_dsi *dsi = host_to_dsi(host); + struct device *dev = host->dev; + int ret; + + dsi->lanes = device->lanes; + dsi->format = device->format; + dsi->mode_flags = device->mode_flags; + dsi->next_bridge = devm_drm_of_get_bridge(dev, dev->of_node, 1, 0); + if (IS_ERR(dsi->next_bridge)) { + ret = PTR_ERR(dsi->next_bridge); + if (ret == -EPROBE_DEFER) + return ret; + + /* Old devicetree has only one endpoint */ + dsi->next_bridge = devm_drm_of_get_bridge(dev, dev->of_node, 0, 0); + if (IS_ERR(dsi->next_bridge)) + return PTR_ERR(dsi->next_bridge); + } + + /* + * set flag to request the DSI host bridge be pre-enabled before device bridge + * in the chain, so the DSI host is ready when the device bridge is pre-enabled + */ + dsi->next_bridge->pre_enable_prev_first = true; + + drm_bridge_add(&dsi->bridge); + + ret = component_add(host->dev, &mtk_dsi_component_ops); + if (ret) { + DRM_ERROR("failed to add dsi_host component: %d\n", ret); + drm_bridge_remove(&dsi->bridge); + return ret; + } + + return 0; +} + +static int mtk_dsi_host_detach(struct mipi_dsi_host *host, + struct mipi_dsi_device *device) +{ + struct mtk_dsi *dsi = host_to_dsi(host); + + component_del(host->dev, &mtk_dsi_component_ops); + drm_bridge_remove(&dsi->bridge); + return 0; +} + +static void mtk_dsi_wait_for_idle(struct mtk_dsi *dsi) +{ + int ret; + u32 val; + + ret = readl_poll_timeout(dsi->regs + DSI_INTSTA, val, !(val & DSI_BUSY), + 4, 2000000); + if (ret) { + DRM_WARN("polling dsi wait not busy timeout!\n"); + + mtk_dsi_enable(dsi); + mtk_dsi_reset_engine(dsi); + } +} + +static u32 mtk_dsi_recv_cnt(u8 type, u8 *read_data) +{ + switch (type) { + case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_1BYTE: + case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_1BYTE: + return 1; + case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_2BYTE: + case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_2BYTE: + return 2; + case MIPI_DSI_RX_GENERIC_LONG_READ_RESPONSE: + case MIPI_DSI_RX_DCS_LONG_READ_RESPONSE: + return read_data[1] + read_data[2] * 16; + case MIPI_DSI_RX_ACKNOWLEDGE_AND_ERROR_REPORT: + DRM_INFO("type is 0x02, try again\n"); + break; + default: + DRM_INFO("type(0x%x) not recognized\n", type); + break; + } + + return 0; +} + +static void mtk_dsi_cmdq(struct mtk_dsi *dsi, const struct mipi_dsi_msg *msg) +{ + const char *tx_buf = msg->tx_buf; + u8 config, cmdq_size, cmdq_off, type = msg->type; + u32 reg_val, cmdq_mask, i; + u32 reg_cmdq_off = dsi->driver_data->reg_cmdq_off; + + if (MTK_DSI_HOST_IS_READ(type)) + config = BTA; + else + config = (msg->tx_len > 2) ? LONG_PACKET : SHORT_PACKET; + + if (msg->tx_len > 2) { + cmdq_size = 1 + (msg->tx_len + 3) / 4; + cmdq_off = 4; + cmdq_mask = CONFIG | DATA_ID | DATA_0 | DATA_1; + reg_val = (msg->tx_len << 16) | (type << 8) | config; + } else { + cmdq_size = 1; + cmdq_off = 2; + cmdq_mask = CONFIG | DATA_ID; + reg_val = (type << 8) | config; + } + + for (i = 0; i < msg->tx_len; i++) + mtk_dsi_mask(dsi, (reg_cmdq_off + cmdq_off + i) & (~0x3U), + (0xffUL << (((i + cmdq_off) & 3U) * 8U)), + tx_buf[i] << (((i + cmdq_off) & 3U) * 8U)); + + mtk_dsi_mask(dsi, reg_cmdq_off, cmdq_mask, reg_val); + mtk_dsi_mask(dsi, DSI_CMDQ_SIZE, CMDQ_SIZE, cmdq_size); + if (dsi->driver_data->cmdq_long_packet_ctl) { + /* Disable setting cmdq_size automatically for long packets */ + mtk_dsi_mask(dsi, DSI_CMDQ_SIZE, CMDQ_SIZE_SEL, CMDQ_SIZE_SEL); + } +} + +static ssize_t mtk_dsi_host_send_cmd(struct mtk_dsi *dsi, + const struct mipi_dsi_msg *msg, u8 flag) +{ + mtk_dsi_wait_for_idle(dsi); + mtk_dsi_irq_data_clear(dsi, flag); + mtk_dsi_cmdq(dsi, msg); + mtk_dsi_start(dsi); + + if (!mtk_dsi_wait_for_irq_done(dsi, flag, 2000)) + return -ETIME; + else + return 0; +} + +static ssize_t mtk_dsi_host_transfer(struct mipi_dsi_host *host, + const struct mipi_dsi_msg *msg) +{ + struct mtk_dsi *dsi = host_to_dsi(host); + ssize_t recv_cnt; + u8 read_data[16]; + void *src_addr; + u8 irq_flag = CMD_DONE_INT_FLAG; + u32 dsi_mode; + int ret, i; + + dsi_mode = readl(dsi->regs + DSI_MODE_CTRL); + if (dsi_mode & MODE) { + mtk_dsi_stop(dsi); + ret = mtk_dsi_switch_to_cmd_mode(dsi, VM_DONE_INT_FLAG, 500); + if (ret) + goto restore_dsi_mode; + } + + if (MTK_DSI_HOST_IS_READ(msg->type)) + irq_flag |= LPRX_RD_RDY_INT_FLAG; + + mtk_dsi_lane_ready(dsi); + + ret = mtk_dsi_host_send_cmd(dsi, msg, irq_flag); + if (ret) + goto restore_dsi_mode; + + if (!MTK_DSI_HOST_IS_READ(msg->type)) { + recv_cnt = 0; + goto restore_dsi_mode; + } + + if (!msg->rx_buf) { + DRM_ERROR("dsi receive buffer size may be NULL\n"); + ret = -EINVAL; + goto restore_dsi_mode; + } + + for (i = 0; i < 16; i++) + *(read_data + i) = readb(dsi->regs + DSI_RX_DATA0 + i); + + recv_cnt = mtk_dsi_recv_cnt(read_data[0], read_data); + + if (recv_cnt > 2) + src_addr = &read_data[4]; + else + src_addr = &read_data[1]; + + if (recv_cnt > 10) + recv_cnt = 10; + + if (recv_cnt > msg->rx_len) + recv_cnt = msg->rx_len; + + if (recv_cnt) + memcpy(msg->rx_buf, src_addr, recv_cnt); + + DRM_INFO("dsi get %zd byte data from the panel address(0x%x)\n", + recv_cnt, *((u8 *)(msg->tx_buf))); + +restore_dsi_mode: + if (dsi_mode & MODE) { + mtk_dsi_set_mode(dsi); + mtk_dsi_start(dsi); + } + + return ret < 0 ? ret : recv_cnt; +} + +static const struct mipi_dsi_host_ops mtk_dsi_ops = { + .attach = mtk_dsi_host_attach, + .detach = mtk_dsi_host_detach, + .transfer = mtk_dsi_host_transfer, +}; + +static int mtk_dsi_probe(struct platform_device *pdev) +{ + struct mtk_dsi *dsi; + struct device *dev = &pdev->dev; + int irq_num; + int ret; + + dsi = devm_drm_bridge_alloc(dev, struct mtk_dsi, bridge, + &mtk_dsi_bridge_funcs); + if (IS_ERR(dsi)) + return PTR_ERR(dsi); + + dsi->driver_data = of_device_get_match_data(dev); + + dsi->engine_clk = devm_clk_get(dev, "engine"); + if (IS_ERR(dsi->engine_clk)) + return dev_err_probe(dev, PTR_ERR(dsi->engine_clk), + "Failed to get engine clock\n"); + + + dsi->digital_clk = devm_clk_get(dev, "digital"); + if (IS_ERR(dsi->digital_clk)) + return dev_err_probe(dev, PTR_ERR(dsi->digital_clk), + "Failed to get digital clock\n"); + + dsi->hs_clk = devm_clk_get(dev, "hs"); + if (IS_ERR(dsi->hs_clk)) + return dev_err_probe(dev, PTR_ERR(dsi->hs_clk), "Failed to get hs clock\n"); + + dsi->regs = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(dsi->regs)) + return dev_err_probe(dev, PTR_ERR(dsi->regs), "Failed to ioremap memory\n"); + + dsi->phy = devm_phy_get(dev, "dphy"); + if (IS_ERR(dsi->phy)) + return dev_err_probe(dev, PTR_ERR(dsi->phy), "Failed to get MIPI-DPHY\n"); + + irq_num = platform_get_irq(pdev, 0); + if (irq_num < 0) + return irq_num; + + dsi->host.ops = &mtk_dsi_ops; + dsi->host.dev = dev; + ret = mipi_dsi_host_register(&dsi->host); + if (ret < 0) + return dev_err_probe(dev, ret, "Failed to register DSI host\n"); + + ret = devm_request_irq(&pdev->dev, irq_num, mtk_dsi_irq, + IRQF_TRIGGER_NONE, dev_name(&pdev->dev), dsi); + if (ret) { + mipi_dsi_host_unregister(&dsi->host); + return dev_err_probe(&pdev->dev, ret, "Failed to request DSI irq\n"); + } + + init_waitqueue_head(&dsi->irq_wait_queue); + + platform_set_drvdata(pdev, dsi); + + dsi->bridge.of_node = dev->of_node; + dsi->bridge.type = DRM_MODE_CONNECTOR_DSI; + + return 0; +} + +static void mtk_dsi_remove(struct platform_device *pdev) +{ + struct mtk_dsi *dsi = platform_get_drvdata(pdev); + + mtk_output_dsi_disable(dsi); + mipi_dsi_host_unregister(&dsi->host); +} + +static const struct mtk_dsi_driver_data mt8173_dsi_driver_data = { + .reg_cmdq_off = 0x200, + .reg_vm_cmd_off = 0x130, + .reg_shadow_dbg_off = 0x190 +}; + +static const struct mtk_dsi_driver_data mt2701_dsi_driver_data = { + .reg_cmdq_off = 0x180, + .reg_vm_cmd_off = 0x130, + .reg_shadow_dbg_off = 0x190 +}; + +static const struct mtk_dsi_driver_data mt8183_dsi_driver_data = { + .reg_cmdq_off = 0x200, + .reg_vm_cmd_off = 0x130, + .reg_shadow_dbg_off = 0x190, + .has_shadow_ctl = true, + .has_size_ctl = true, +}; + +static const struct mtk_dsi_driver_data mt8186_dsi_driver_data = { + .reg_cmdq_off = 0xd00, + .reg_vm_cmd_off = 0x200, + .reg_shadow_dbg_off = 0xc00, + .has_shadow_ctl = true, + .has_size_ctl = true, +}; + +static const struct mtk_dsi_driver_data mt8188_dsi_driver_data = { + .reg_cmdq_off = 0xd00, + .reg_vm_cmd_off = 0x200, + .reg_shadow_dbg_off = 0xc00, + .has_shadow_ctl = true, + .has_size_ctl = true, + .cmdq_long_packet_ctl = true, + .support_per_frame_lp = true, +}; + +static const struct of_device_id mtk_dsi_of_match[] = { + { .compatible = "mediatek,mt2701-dsi", .data = &mt2701_dsi_driver_data }, + { .compatible = "mediatek,mt8173-dsi", .data = &mt8173_dsi_driver_data }, + { .compatible = "mediatek,mt8183-dsi", .data = &mt8183_dsi_driver_data }, + { .compatible = "mediatek,mt8186-dsi", .data = &mt8186_dsi_driver_data }, + { .compatible = "mediatek,mt8188-dsi", .data = &mt8188_dsi_driver_data }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, mtk_dsi_of_match); + +struct platform_driver mtk_dsi_driver = { + .probe = mtk_dsi_probe, + .remove = mtk_dsi_remove, + .driver = { + .name = "mtk-dsi", + .of_match_table = mtk_dsi_of_match, + }, +}; diff --git a/drivers/gpu/drm/mediatek/mtk_ethdr.c b/drivers/gpu/drm/mediatek/mtk_ethdr.c new file mode 100644 index 000000000000..96832d0cca37 --- /dev/null +++ b/drivers/gpu/drm/mediatek/mtk_ethdr.c @@ -0,0 +1,396 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2021 MediaTek Inc. + */ + +#include <drm/drm_blend.h> +#include <drm/drm_fourcc.h> +#include <drm/drm_framebuffer.h> +#include <linux/clk.h> +#include <linux/component.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/platform_device.h> +#include <linux/reset.h> +#include <linux/soc/mediatek/mtk-cmdq.h> +#include <linux/soc/mediatek/mtk-mmsys.h> + +#include "mtk_crtc.h" +#include "mtk_ddp_comp.h" +#include "mtk_drm_drv.h" +#include "mtk_ethdr.h" + +#define MIX_INTEN 0x4 +#define MIX_FME_CPL_INTEN BIT(1) +#define MIX_INTSTA 0x8 +#define MIX_EN 0xc +#define MIX_RST 0x14 +#define MIX_ROI_SIZE 0x18 +#define MIX_DATAPATH_CON 0x1c +#define OUTPUT_NO_RND BIT(3) +#define SOURCE_RGB_SEL BIT(7) +#define BACKGROUND_RELAY (4 << 9) +#define MIX_ROI_BGCLR 0x20 +#define BGCLR_BLACK 0xff000000 +#define MIX_SRC_CON 0x24 +#define MIX_SRC_L0_EN BIT(0) +#define MIX_L_SRC_CON(n) (0x28 + 0x18 * (n)) +#define NON_PREMULTI_SOURCE (2 << 12) +#define PREMULTI_SOURCE (3 << 12) +#define MIX_L_SRC_SIZE(n) (0x30 + 0x18 * (n)) +#define MIX_L_SRC_OFFSET(n) (0x34 + 0x18 * (n)) +#define MIX_FUNC_DCM0 0x120 +#define MIX_FUNC_DCM1 0x124 +#define MIX_FUNC_DCM_ENABLE 0xffffffff + +#define HDR_VDO_FE_0804_HDR_DM_FE 0x804 +#define HDR_VDO_FE_0804_BYPASS_ALL 0xfd +#define HDR_GFX_FE_0204_GFX_HDR_FE 0x204 +#define HDR_GFX_FE_0204_BYPASS_ALL 0xfd +#define HDR_VDO_BE_0204_VDO_DM_BE 0x204 +#define HDR_VDO_BE_0204_BYPASS_ALL 0x7e + +#define MIXER_INX_MODE_BYPASS 0 +#define MIXER_INX_MODE_EVEN_EXTEND 1 +#define MIXER_ALPHA_AEN BIT(8) +#define MIXER_ALPHA 0xff +#define ETHDR_CLK_NUM 13 + +enum mtk_ethdr_comp_id { + ETHDR_MIXER, + ETHDR_VDO_FE0, + ETHDR_VDO_FE1, + ETHDR_GFX_FE0, + ETHDR_GFX_FE1, + ETHDR_VDO_BE, + ETHDR_ADL_DS, + ETHDR_ID_MAX +}; + +struct mtk_ethdr_comp { + struct device *dev; + void __iomem *regs; + struct cmdq_client_reg cmdq_base; +}; + +struct mtk_ethdr { + struct mtk_ethdr_comp ethdr_comp[ETHDR_ID_MAX]; + struct clk_bulk_data ethdr_clk[ETHDR_CLK_NUM]; + struct device *mmsys_dev; + void (*vblank_cb)(void *data); + void *vblank_cb_data; + int irq; + struct reset_control *reset_ctl; +}; + +static const char * const ethdr_clk_str[] = { + "ethdr_top", + "mixer", + "vdo_fe0", + "vdo_fe1", + "gfx_fe0", + "gfx_fe1", + "vdo_be", + "adl_ds", + "vdo_fe0_async", + "vdo_fe1_async", + "gfx_fe0_async", + "gfx_fe1_async", + "vdo_be_async", +}; + +void mtk_ethdr_register_vblank_cb(struct device *dev, + void (*vblank_cb)(void *), + void *vblank_cb_data) +{ + struct mtk_ethdr *priv = dev_get_drvdata(dev); + + priv->vblank_cb = vblank_cb; + priv->vblank_cb_data = vblank_cb_data; +} + +void mtk_ethdr_unregister_vblank_cb(struct device *dev) +{ + struct mtk_ethdr *priv = dev_get_drvdata(dev); + + priv->vblank_cb = NULL; + priv->vblank_cb_data = NULL; +} + +void mtk_ethdr_enable_vblank(struct device *dev) +{ + struct mtk_ethdr *priv = dev_get_drvdata(dev); + + writel(MIX_FME_CPL_INTEN, priv->ethdr_comp[ETHDR_MIXER].regs + MIX_INTEN); +} + +void mtk_ethdr_disable_vblank(struct device *dev) +{ + struct mtk_ethdr *priv = dev_get_drvdata(dev); + + writel(0x0, priv->ethdr_comp[ETHDR_MIXER].regs + MIX_INTEN); +} + +static irqreturn_t mtk_ethdr_irq_handler(int irq, void *dev_id) +{ + struct mtk_ethdr *priv = dev_id; + + writel(0x0, priv->ethdr_comp[ETHDR_MIXER].regs + MIX_INTSTA); + + if (!priv->vblank_cb) + return IRQ_NONE; + + priv->vblank_cb(priv->vblank_cb_data); + + return IRQ_HANDLED; +} + +u32 mtk_ethdr_get_blend_modes(struct device *dev) +{ + return BIT(DRM_MODE_BLEND_PREMULTI) | + BIT(DRM_MODE_BLEND_COVERAGE) | + BIT(DRM_MODE_BLEND_PIXEL_NONE); +} + +void mtk_ethdr_layer_config(struct device *dev, unsigned int idx, + struct mtk_plane_state *state, + struct cmdq_pkt *cmdq_pkt) +{ + struct mtk_ethdr *priv = dev_get_drvdata(dev); + struct mtk_ethdr_comp *mixer = &priv->ethdr_comp[ETHDR_MIXER]; + struct mtk_plane_pending_state *pending = &state->pending; + unsigned int offset = (pending->x & 1) << 31 | pending->y << 16 | pending->x; + unsigned int align_width = ALIGN_DOWN(pending->width, 2); + unsigned int alpha_con = 0; + bool replace_src_a = false; + + dev_dbg(dev, "%s+ idx:%d", __func__, idx); + + if (idx >= 4) + return; + + if (!pending->enable || !pending->width || !pending->height) { + /* + * instead of disabling layer with MIX_SRC_CON directly + * set the size to 0 to avoid screen shift due to mixer + * mode switch (hardware behavior) + */ + mtk_ddp_write(cmdq_pkt, 0, &mixer->cmdq_base, mixer->regs, MIX_L_SRC_SIZE(idx)); + return; + } + + if (state->base.fb) { + alpha_con |= MIXER_ALPHA_AEN; + alpha_con |= state->base.alpha & MIXER_ALPHA; + } + + if (state->base.pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) + alpha_con |= PREMULTI_SOURCE; + else + alpha_con |= NON_PREMULTI_SOURCE; + + if ((state->base.fb && !state->base.fb->format->has_alpha) || + state->base.pixel_blend_mode == DRM_MODE_BLEND_PIXEL_NONE) { + /* + * Mixer doesn't support CONST_BLD mode, + * use a trick to make the output equivalent + */ + replace_src_a = true; + } + + mtk_mmsys_mixer_in_config(priv->mmsys_dev, idx + 1, replace_src_a, + MIXER_ALPHA, + pending->x & 1 ? MIXER_INX_MODE_EVEN_EXTEND : + MIXER_INX_MODE_BYPASS, align_width / 2 - 1, cmdq_pkt); + + mtk_ddp_write(cmdq_pkt, pending->height << 16 | align_width, &mixer->cmdq_base, + mixer->regs, MIX_L_SRC_SIZE(idx)); + mtk_ddp_write(cmdq_pkt, offset, &mixer->cmdq_base, mixer->regs, MIX_L_SRC_OFFSET(idx)); + mtk_ddp_write(cmdq_pkt, alpha_con, &mixer->cmdq_base, mixer->regs, MIX_L_SRC_CON(idx)); + mtk_ddp_write_mask(cmdq_pkt, BIT(idx), &mixer->cmdq_base, mixer->regs, MIX_SRC_CON, + BIT(idx)); +} + +void mtk_ethdr_config(struct device *dev, unsigned int w, + unsigned int h, unsigned int vrefresh, + unsigned int bpc, struct cmdq_pkt *cmdq_pkt) +{ + struct mtk_ethdr *priv = dev_get_drvdata(dev); + struct mtk_ethdr_comp *vdo_fe0 = &priv->ethdr_comp[ETHDR_VDO_FE0]; + struct mtk_ethdr_comp *vdo_fe1 = &priv->ethdr_comp[ETHDR_VDO_FE1]; + struct mtk_ethdr_comp *gfx_fe0 = &priv->ethdr_comp[ETHDR_GFX_FE0]; + struct mtk_ethdr_comp *gfx_fe1 = &priv->ethdr_comp[ETHDR_GFX_FE1]; + struct mtk_ethdr_comp *vdo_be = &priv->ethdr_comp[ETHDR_VDO_BE]; + struct mtk_ethdr_comp *mixer = &priv->ethdr_comp[ETHDR_MIXER]; + + dev_dbg(dev, "%s-w:%d, h:%d\n", __func__, w, h); + + mtk_ddp_write(cmdq_pkt, HDR_VDO_FE_0804_BYPASS_ALL, &vdo_fe0->cmdq_base, + vdo_fe0->regs, HDR_VDO_FE_0804_HDR_DM_FE); + + mtk_ddp_write(cmdq_pkt, HDR_VDO_FE_0804_BYPASS_ALL, &vdo_fe1->cmdq_base, + vdo_fe1->regs, HDR_VDO_FE_0804_HDR_DM_FE); + + mtk_ddp_write(cmdq_pkt, HDR_GFX_FE_0204_BYPASS_ALL, &gfx_fe0->cmdq_base, + gfx_fe0->regs, HDR_GFX_FE_0204_GFX_HDR_FE); + + mtk_ddp_write(cmdq_pkt, HDR_GFX_FE_0204_BYPASS_ALL, &gfx_fe1->cmdq_base, + gfx_fe1->regs, HDR_GFX_FE_0204_GFX_HDR_FE); + + mtk_ddp_write(cmdq_pkt, HDR_VDO_BE_0204_BYPASS_ALL, &vdo_be->cmdq_base, + vdo_be->regs, HDR_VDO_BE_0204_VDO_DM_BE); + + mtk_ddp_write(cmdq_pkt, MIX_FUNC_DCM_ENABLE, &mixer->cmdq_base, mixer->regs, MIX_FUNC_DCM0); + mtk_ddp_write(cmdq_pkt, MIX_FUNC_DCM_ENABLE, &mixer->cmdq_base, mixer->regs, MIX_FUNC_DCM1); + mtk_ddp_write(cmdq_pkt, h << 16 | w, &mixer->cmdq_base, mixer->regs, MIX_ROI_SIZE); + mtk_ddp_write(cmdq_pkt, BGCLR_BLACK, &mixer->cmdq_base, mixer->regs, MIX_ROI_BGCLR); + mtk_ddp_write(cmdq_pkt, NON_PREMULTI_SOURCE, &mixer->cmdq_base, mixer->regs, + MIX_L_SRC_CON(0)); + mtk_ddp_write(cmdq_pkt, NON_PREMULTI_SOURCE, &mixer->cmdq_base, mixer->regs, + MIX_L_SRC_CON(1)); + mtk_ddp_write(cmdq_pkt, NON_PREMULTI_SOURCE, &mixer->cmdq_base, mixer->regs, + MIX_L_SRC_CON(2)); + mtk_ddp_write(cmdq_pkt, NON_PREMULTI_SOURCE, &mixer->cmdq_base, mixer->regs, + MIX_L_SRC_CON(3)); + mtk_ddp_write(cmdq_pkt, 0x0, &mixer->cmdq_base, mixer->regs, MIX_L_SRC_SIZE(0)); + mtk_ddp_write(cmdq_pkt, OUTPUT_NO_RND | SOURCE_RGB_SEL | BACKGROUND_RELAY, + &mixer->cmdq_base, mixer->regs, MIX_DATAPATH_CON); + mtk_ddp_write_mask(cmdq_pkt, MIX_SRC_L0_EN, &mixer->cmdq_base, mixer->regs, + MIX_SRC_CON, MIX_SRC_L0_EN); + + mtk_mmsys_hdr_config(priv->mmsys_dev, w / 2, h, cmdq_pkt); + mtk_mmsys_mixer_in_channel_swap(priv->mmsys_dev, 4, 0, cmdq_pkt); +} + +void mtk_ethdr_start(struct device *dev) +{ + struct mtk_ethdr *priv = dev_get_drvdata(dev); + struct mtk_ethdr_comp *mixer = &priv->ethdr_comp[ETHDR_MIXER]; + + writel(1, mixer->regs + MIX_EN); +} + +void mtk_ethdr_stop(struct device *dev) +{ + struct mtk_ethdr *priv = dev_get_drvdata(dev); + struct mtk_ethdr_comp *mixer = &priv->ethdr_comp[ETHDR_MIXER]; + + writel(0, mixer->regs + MIX_EN); + writel(1, mixer->regs + MIX_RST); + reset_control_reset(priv->reset_ctl); + writel(0, mixer->regs + MIX_RST); +} + +int mtk_ethdr_clk_enable(struct device *dev) +{ + int ret; + struct mtk_ethdr *priv = dev_get_drvdata(dev); + + ret = clk_bulk_prepare_enable(ETHDR_CLK_NUM, priv->ethdr_clk); + if (ret) + dev_err(dev, + "ethdr_clk prepare enable failed\n"); + return ret; +} + +void mtk_ethdr_clk_disable(struct device *dev) +{ + struct mtk_ethdr *priv = dev_get_drvdata(dev); + + clk_bulk_disable_unprepare(ETHDR_CLK_NUM, priv->ethdr_clk); +} + +static int mtk_ethdr_bind(struct device *dev, struct device *master, + void *data) +{ + struct mtk_ethdr *priv = dev_get_drvdata(dev); + + priv->mmsys_dev = data; + return 0; +} + +static void mtk_ethdr_unbind(struct device *dev, struct device *master, void *data) +{ +} + +static const struct component_ops mtk_ethdr_component_ops = { + .bind = mtk_ethdr_bind, + .unbind = mtk_ethdr_unbind, +}; + +static int mtk_ethdr_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct mtk_ethdr *priv; + int ret; + int i; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + for (i = 0; i < ETHDR_ID_MAX; i++) { + priv->ethdr_comp[i].dev = dev; + priv->ethdr_comp[i].regs = of_iomap(dev->of_node, i); +#if IS_REACHABLE(CONFIG_MTK_CMDQ) + ret = cmdq_dev_get_client_reg(dev, + &priv->ethdr_comp[i].cmdq_base, i); + if (ret) + dev_dbg(dev, "get mediatek,gce-client-reg fail!\n"); +#endif + dev_dbg(dev, "[DRM]regs:0x%p, node:%d\n", priv->ethdr_comp[i].regs, i); + } + + for (i = 0; i < ETHDR_CLK_NUM; i++) + priv->ethdr_clk[i].id = ethdr_clk_str[i]; + ret = devm_clk_bulk_get_optional(dev, ETHDR_CLK_NUM, priv->ethdr_clk); + if (ret) + return ret; + + priv->irq = platform_get_irq(pdev, 0); + if (priv->irq < 0) + priv->irq = 0; + + if (priv->irq) { + ret = devm_request_irq(dev, priv->irq, mtk_ethdr_irq_handler, + IRQF_TRIGGER_NONE, dev_name(dev), priv); + if (ret < 0) + return dev_err_probe(dev, ret, + "Failed to request irq %d\n", + priv->irq); + } + + priv->reset_ctl = devm_reset_control_array_get_optional_exclusive(dev); + if (IS_ERR(priv->reset_ctl)) + return dev_err_probe(dev, PTR_ERR(priv->reset_ctl), + "cannot get ethdr reset control\n"); + + platform_set_drvdata(pdev, priv); + + ret = component_add(dev, &mtk_ethdr_component_ops); + if (ret) + return dev_err_probe(dev, ret, "Failed to add component\n"); + + return 0; +} + +static void mtk_ethdr_remove(struct platform_device *pdev) +{ + component_del(&pdev->dev, &mtk_ethdr_component_ops); +} + +static const struct of_device_id mtk_ethdr_driver_dt_match[] = { + { .compatible = "mediatek,mt8195-disp-ethdr"}, + {}, +}; + +MODULE_DEVICE_TABLE(of, mtk_ethdr_driver_dt_match); + +struct platform_driver mtk_ethdr_driver = { + .probe = mtk_ethdr_probe, + .remove = mtk_ethdr_remove, + .driver = { + .name = "mediatek-disp-ethdr", + .of_match_table = mtk_ethdr_driver_dt_match, + }, +}; diff --git a/drivers/gpu/drm/mediatek/mtk_ethdr.h b/drivers/gpu/drm/mediatek/mtk_ethdr.h new file mode 100644 index 000000000000..a72aeee46829 --- /dev/null +++ b/drivers/gpu/drm/mediatek/mtk_ethdr.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2021 MediaTek Inc. + */ + +#ifndef __MTK_ETHDR_H__ +#define __MTK_ETHDR_H__ + +void mtk_ethdr_start(struct device *dev); +void mtk_ethdr_stop(struct device *dev); +int mtk_ethdr_clk_enable(struct device *dev); +void mtk_ethdr_clk_disable(struct device *dev); +void mtk_ethdr_config(struct device *dev, unsigned int w, + unsigned int h, unsigned int vrefresh, + unsigned int bpc, struct cmdq_pkt *cmdq_pkt); +u32 mtk_ethdr_get_blend_modes(struct device *dev); +void mtk_ethdr_layer_config(struct device *dev, unsigned int idx, + struct mtk_plane_state *state, + struct cmdq_pkt *cmdq_pkt); +void mtk_ethdr_register_vblank_cb(struct device *dev, + void (*vblank_cb)(void *), + void *vblank_cb_data); +void mtk_ethdr_unregister_vblank_cb(struct device *dev); +void mtk_ethdr_enable_vblank(struct device *dev); +void mtk_ethdr_disable_vblank(struct device *dev); +#endif diff --git a/drivers/gpu/drm/mediatek/mtk_gem.c b/drivers/gpu/drm/mediatek/mtk_gem.c new file mode 100644 index 000000000000..024cc7e9036c --- /dev/null +++ b/drivers/gpu/drm/mediatek/mtk_gem.c @@ -0,0 +1,289 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2015 MediaTek Inc. + */ + +#include <linux/dma-buf.h> +#include <linux/vmalloc.h> + +#include <drm/drm.h> +#include <drm/drm_device.h> +#include <drm/drm_gem.h> +#include <drm/drm_gem_dma_helper.h> +#include <drm/drm_prime.h> +#include <drm/drm_print.h> + +#include "mtk_drm_drv.h" +#include "mtk_gem.h" + +static int mtk_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma); + +static const struct vm_operations_struct vm_ops = { + .open = drm_gem_vm_open, + .close = drm_gem_vm_close, +}; + +static const struct drm_gem_object_funcs mtk_gem_object_funcs = { + .free = mtk_gem_free_object, + .get_sg_table = mtk_gem_prime_get_sg_table, + .vmap = mtk_gem_prime_vmap, + .vunmap = mtk_gem_prime_vunmap, + .mmap = mtk_gem_object_mmap, + .vm_ops = &vm_ops, +}; + +static struct mtk_gem_obj *mtk_gem_init(struct drm_device *dev, + unsigned long size) +{ + struct mtk_gem_obj *mtk_gem_obj; + int ret; + + size = round_up(size, PAGE_SIZE); + + if (size == 0) + return ERR_PTR(-EINVAL); + + mtk_gem_obj = kzalloc(sizeof(*mtk_gem_obj), GFP_KERNEL); + if (!mtk_gem_obj) + return ERR_PTR(-ENOMEM); + + mtk_gem_obj->base.funcs = &mtk_gem_object_funcs; + + ret = drm_gem_object_init(dev, &mtk_gem_obj->base, size); + if (ret < 0) { + DRM_ERROR("failed to initialize gem object\n"); + kfree(mtk_gem_obj); + return ERR_PTR(ret); + } + + return mtk_gem_obj; +} + +struct mtk_gem_obj *mtk_gem_create(struct drm_device *dev, + size_t size, bool alloc_kmap) +{ + struct mtk_drm_private *priv = dev->dev_private; + struct mtk_gem_obj *mtk_gem; + struct drm_gem_object *obj; + int ret; + + mtk_gem = mtk_gem_init(dev, size); + if (IS_ERR(mtk_gem)) + return ERR_CAST(mtk_gem); + + obj = &mtk_gem->base; + + mtk_gem->dma_attrs = DMA_ATTR_WRITE_COMBINE; + + if (!alloc_kmap) + mtk_gem->dma_attrs |= DMA_ATTR_NO_KERNEL_MAPPING; + + mtk_gem->cookie = dma_alloc_attrs(priv->dma_dev, obj->size, + &mtk_gem->dma_addr, GFP_KERNEL, + mtk_gem->dma_attrs); + if (!mtk_gem->cookie) { + DRM_ERROR("failed to allocate %zx byte dma buffer", obj->size); + ret = -ENOMEM; + goto err_gem_free; + } + + if (alloc_kmap) + mtk_gem->kvaddr = mtk_gem->cookie; + + DRM_DEBUG_DRIVER("cookie = %p dma_addr = %pad size = %zu\n", + mtk_gem->cookie, &mtk_gem->dma_addr, + size); + + return mtk_gem; + +err_gem_free: + drm_gem_object_release(obj); + kfree(mtk_gem); + return ERR_PTR(ret); +} + +void mtk_gem_free_object(struct drm_gem_object *obj) +{ + struct mtk_gem_obj *mtk_gem = to_mtk_gem_obj(obj); + struct mtk_drm_private *priv = obj->dev->dev_private; + + if (mtk_gem->sg) + drm_prime_gem_destroy(obj, mtk_gem->sg); + else + dma_free_attrs(priv->dma_dev, obj->size, mtk_gem->cookie, + mtk_gem->dma_addr, mtk_gem->dma_attrs); + + /* release file pointer to gem object. */ + drm_gem_object_release(obj); + + kfree(mtk_gem); +} + +int mtk_gem_dumb_create(struct drm_file *file_priv, struct drm_device *dev, + struct drm_mode_create_dumb *args) +{ + struct mtk_gem_obj *mtk_gem; + int ret; + + args->pitch = DIV_ROUND_UP(args->width * args->bpp, 8); + + /* + * Multiply 2 variables of different types, + * for example: args->size = args->spacing * args->height; + * may cause coverity issue with unintentional overflow. + */ + args->size = args->pitch; + args->size *= args->height; + + mtk_gem = mtk_gem_create(dev, args->size, false); + if (IS_ERR(mtk_gem)) + return PTR_ERR(mtk_gem); + + /* + * allocate a id of idr table where the obj is registered + * and handle has the id what user can see. + */ + ret = drm_gem_handle_create(file_priv, &mtk_gem->base, &args->handle); + if (ret) + goto err_handle_create; + + /* drop reference from allocate - handle holds it now. */ + drm_gem_object_put(&mtk_gem->base); + + return 0; + +err_handle_create: + mtk_gem_free_object(&mtk_gem->base); + return ret; +} + +static int mtk_gem_object_mmap(struct drm_gem_object *obj, + struct vm_area_struct *vma) + +{ + int ret; + struct mtk_gem_obj *mtk_gem = to_mtk_gem_obj(obj); + struct mtk_drm_private *priv = obj->dev->dev_private; + + /* + * Set vm_pgoff (used as a fake buffer offset by DRM) to 0 and map the + * whole buffer from the start. + */ + vma->vm_pgoff = 0; + + /* + * dma_alloc_attrs() allocated a struct page table for mtk_gem, so clear + * VM_PFNMAP flag that was set by drm_gem_mmap_obj()/drm_gem_mmap(). + */ + vm_flags_set(vma, VM_IO | VM_DONTEXPAND | VM_DONTDUMP); + vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); + vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot); + + ret = dma_mmap_attrs(priv->dma_dev, vma, mtk_gem->cookie, + mtk_gem->dma_addr, obj->size, mtk_gem->dma_attrs); + + return ret; +} + +/* + * Allocate a sg_table for this GEM object. + * Note: Both the table's contents, and the sg_table itself must be freed by + * the caller. + * Returns a pointer to the newly allocated sg_table, or an ERR_PTR() error. + */ +struct sg_table *mtk_gem_prime_get_sg_table(struct drm_gem_object *obj) +{ + struct mtk_gem_obj *mtk_gem = to_mtk_gem_obj(obj); + struct mtk_drm_private *priv = obj->dev->dev_private; + struct sg_table *sgt; + int ret; + + sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); + if (!sgt) + return ERR_PTR(-ENOMEM); + + ret = dma_get_sgtable_attrs(priv->dma_dev, sgt, mtk_gem->cookie, + mtk_gem->dma_addr, obj->size, + mtk_gem->dma_attrs); + if (ret) { + DRM_ERROR("failed to allocate sgt, %d\n", ret); + kfree(sgt); + return ERR_PTR(ret); + } + + return sgt; +} + +struct drm_gem_object *mtk_gem_prime_import_sg_table(struct drm_device *dev, + struct dma_buf_attachment *attach, struct sg_table *sg) +{ + struct mtk_gem_obj *mtk_gem; + + /* check if the entries in the sg_table are contiguous */ + if (drm_prime_get_contiguous_size(sg) < attach->dmabuf->size) { + DRM_ERROR("sg_table is not contiguous"); + return ERR_PTR(-EINVAL); + } + + mtk_gem = mtk_gem_init(dev, attach->dmabuf->size); + if (IS_ERR(mtk_gem)) + return ERR_CAST(mtk_gem); + + mtk_gem->dma_addr = sg_dma_address(sg->sgl); + mtk_gem->sg = sg; + + return &mtk_gem->base; +} + +int mtk_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map) +{ + struct mtk_gem_obj *mtk_gem = to_mtk_gem_obj(obj); + struct sg_table *sgt = NULL; + unsigned int npages; + + if (mtk_gem->kvaddr) + goto out; + + sgt = mtk_gem_prime_get_sg_table(obj); + if (IS_ERR(sgt)) + return PTR_ERR(sgt); + + npages = obj->size >> PAGE_SHIFT; + mtk_gem->pages = kcalloc(npages, sizeof(*mtk_gem->pages), GFP_KERNEL); + if (!mtk_gem->pages) { + sg_free_table(sgt); + kfree(sgt); + return -ENOMEM; + } + + drm_prime_sg_to_page_array(sgt, mtk_gem->pages, npages); + + mtk_gem->kvaddr = vmap(mtk_gem->pages, npages, VM_MAP, + pgprot_writecombine(PAGE_KERNEL)); + if (!mtk_gem->kvaddr) { + sg_free_table(sgt); + kfree(sgt); + kfree(mtk_gem->pages); + return -ENOMEM; + } + sg_free_table(sgt); + kfree(sgt); + +out: + iosys_map_set_vaddr(map, mtk_gem->kvaddr); + + return 0; +} + +void mtk_gem_prime_vunmap(struct drm_gem_object *obj, struct iosys_map *map) +{ + struct mtk_gem_obj *mtk_gem = to_mtk_gem_obj(obj); + void *vaddr = map->vaddr; + + if (!mtk_gem->pages) + return; + + vunmap(vaddr); + mtk_gem->kvaddr = NULL; + kfree(mtk_gem->pages); +} diff --git a/drivers/gpu/drm/mediatek/mtk_gem.h b/drivers/gpu/drm/mediatek/mtk_gem.h new file mode 100644 index 000000000000..66e5f154f698 --- /dev/null +++ b/drivers/gpu/drm/mediatek/mtk_gem.h @@ -0,0 +1,48 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2015 MediaTek Inc. + */ + +#ifndef _MTK_GEM_H_ +#define _MTK_GEM_H_ + +#include <drm/drm_gem.h> + +/* + * mtk drm buffer structure. + * + * @base: a gem object. + * - a new handle to this gem object would be created + * by drm_gem_handle_create(). + * @cookie: the return value of dma_alloc_attrs(), keep it for dma_free_attrs() + * @kvaddr: kernel virtual address of gem buffer. + * @dma_addr: dma address of gem buffer. + * @dma_attrs: dma attributes of gem buffer. + * + * P.S. this object would be transferred to user as kms_bo.handle so + * user can access the buffer through kms_bo.handle. + */ +struct mtk_gem_obj { + struct drm_gem_object base; + void *cookie; + void *kvaddr; + dma_addr_t dma_addr; + unsigned long dma_attrs; + struct sg_table *sg; + struct page **pages; +}; + +#define to_mtk_gem_obj(x) container_of(x, struct mtk_gem_obj, base) + +void mtk_gem_free_object(struct drm_gem_object *gem); +struct mtk_gem_obj *mtk_gem_create(struct drm_device *dev, size_t size, + bool alloc_kmap); +int mtk_gem_dumb_create(struct drm_file *file_priv, struct drm_device *dev, + struct drm_mode_create_dumb *args); +struct sg_table *mtk_gem_prime_get_sg_table(struct drm_gem_object *obj); +struct drm_gem_object *mtk_gem_prime_import_sg_table(struct drm_device *dev, + struct dma_buf_attachment *attach, struct sg_table *sg); +int mtk_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map); +void mtk_gem_prime_vunmap(struct drm_gem_object *obj, struct iosys_map *map); + +#endif diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c new file mode 100644 index 000000000000..0face4dcaa36 --- /dev/null +++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c @@ -0,0 +1,1249 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2014 MediaTek Inc. + * Author: Jie Qiu <jie.qiu@mediatek.com> + */ + +#include <linux/arm-smccc.h> +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/hdmi.h> +#include <linux/i2c.h> +#include <linux/io.h> +#include <linux/kernel.h> +#include <linux/mfd/syscon.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/of_platform.h> +#include <linux/of.h> +#include <linux/of_graph.h> +#include <linux/phy/phy.h> +#include <linux/platform_device.h> +#include <linux/regmap.h> + +#include <sound/hdmi-codec.h> + +#include <drm/drm_atomic_helper.h> +#include <drm/drm_bridge.h> +#include <drm/drm_crtc.h> +#include <drm/drm_edid.h> +#include <drm/drm_print.h> +#include <drm/drm_probe_helper.h> + +#include "mtk_cec.h" +#include "mtk_hdmi_common.h" +#include "mtk_hdmi_regs.h" + +#define NCTS_BYTES 7 + +enum mtk_hdmi_clk_id { + MTK_HDMI_CLK_HDMI_PIXEL, + MTK_HDMI_CLK_HDMI_PLL, + MTK_HDMI_CLK_AUD_BCLK, + MTK_HDMI_CLK_AUD_SPDIF, + MTK_HDMI_CLK_COUNT +}; + +static void mtk_hdmi_hw_vid_black(struct mtk_hdmi *hdmi, bool black) +{ + regmap_update_bits(hdmi->regs, VIDEO_CFG_4, + VIDEO_SOURCE_SEL, black ? GEN_RGB : NORMAL_PATH); +} + +static void mtk_hdmi_hw_make_reg_writable(struct mtk_hdmi *hdmi, bool enable) +{ + struct arm_smccc_res res; + + /* + * MT8173 HDMI hardware has an output control bit to enable/disable HDMI + * output. This bit can only be controlled in ARM supervisor mode. + * The ARM trusted firmware provides an API for the HDMI driver to set + * this control bit to enable HDMI output in supervisor mode. + */ + if (hdmi->conf && hdmi->conf->tz_disabled) + regmap_update_bits(hdmi->sys_regmap, + hdmi->sys_offset + HDMI_SYS_CFG20, + 0x80008005, enable ? 0x80000005 : 0x8000); + else + arm_smccc_smc(MTK_SIP_SET_AUTHORIZED_SECURE_REG, 0x14000904, + 0x80000000, 0, 0, 0, 0, 0, &res); + + regmap_update_bits(hdmi->sys_regmap, hdmi->sys_offset + HDMI_SYS_CFG20, + HDMI_PCLK_FREE_RUN, enable ? HDMI_PCLK_FREE_RUN : 0); + regmap_update_bits(hdmi->sys_regmap, hdmi->sys_offset + HDMI_SYS_CFG1C, + HDMI_ON | ANLG_ON, enable ? (HDMI_ON | ANLG_ON) : 0); +} + +static void mtk_hdmi_hw_1p4_version_enable(struct mtk_hdmi *hdmi, bool enable) +{ + regmap_update_bits(hdmi->sys_regmap, hdmi->sys_offset + HDMI_SYS_CFG20, + HDMI2P0_EN, enable ? 0 : HDMI2P0_EN); +} + +static void mtk_hdmi_hw_aud_mute(struct mtk_hdmi *hdmi) +{ + regmap_set_bits(hdmi->regs, GRL_AUDIO_CFG, AUDIO_ZERO); +} + +static void mtk_hdmi_hw_aud_unmute(struct mtk_hdmi *hdmi) +{ + regmap_clear_bits(hdmi->regs, GRL_AUDIO_CFG, AUDIO_ZERO); +} + +static void mtk_hdmi_hw_reset(struct mtk_hdmi *hdmi) +{ + regmap_update_bits(hdmi->sys_regmap, hdmi->sys_offset + HDMI_SYS_CFG1C, + HDMI_RST, HDMI_RST); + regmap_update_bits(hdmi->sys_regmap, hdmi->sys_offset + HDMI_SYS_CFG1C, + HDMI_RST, 0); + regmap_clear_bits(hdmi->regs, GRL_CFG3, CFG3_CONTROL_PACKET_DELAY); + regmap_update_bits(hdmi->sys_regmap, hdmi->sys_offset + HDMI_SYS_CFG1C, + ANLG_ON, ANLG_ON); +} + +static void mtk_hdmi_hw_enable_notice(struct mtk_hdmi *hdmi, bool enable_notice) +{ + regmap_update_bits(hdmi->regs, GRL_CFG2, CFG2_NOTICE_EN, + enable_notice ? CFG2_NOTICE_EN : 0); +} + +static void mtk_hdmi_hw_write_int_mask(struct mtk_hdmi *hdmi, u32 int_mask) +{ + regmap_write(hdmi->regs, GRL_INT_MASK, int_mask); +} + +static void mtk_hdmi_hw_enable_dvi_mode(struct mtk_hdmi *hdmi, bool enable) +{ + regmap_update_bits(hdmi->regs, GRL_CFG1, CFG1_DVI, enable ? CFG1_DVI : 0); +} + +static void mtk_hdmi_hw_send_info_frame(struct mtk_hdmi *hdmi, u8 *buffer, + u8 len) +{ + u32 ctrl_reg = GRL_CTRL; + int i; + u8 *frame_data; + enum hdmi_infoframe_type frame_type; + u8 frame_ver; + u8 frame_len; + u8 checksum; + int ctrl_frame_en = 0; + + frame_type = *buffer++; + frame_ver = *buffer++; + frame_len = *buffer++; + checksum = *buffer++; + frame_data = buffer; + + dev_dbg(hdmi->dev, + "frame_type:0x%x,frame_ver:0x%x,frame_len:0x%x,checksum:0x%x\n", + frame_type, frame_ver, frame_len, checksum); + + switch (frame_type) { + case HDMI_INFOFRAME_TYPE_AVI: + ctrl_frame_en = CTRL_AVI_EN; + ctrl_reg = GRL_CTRL; + break; + case HDMI_INFOFRAME_TYPE_SPD: + ctrl_frame_en = CTRL_SPD_EN; + ctrl_reg = GRL_CTRL; + break; + case HDMI_INFOFRAME_TYPE_AUDIO: + ctrl_frame_en = CTRL_AUDIO_EN; + ctrl_reg = GRL_CTRL; + break; + case HDMI_INFOFRAME_TYPE_VENDOR: + ctrl_frame_en = VS_EN; + ctrl_reg = GRL_ACP_ISRC_CTRL; + break; + default: + dev_err(hdmi->dev, "Unknown infoframe type %d\n", frame_type); + return; + } + regmap_clear_bits(hdmi->regs, ctrl_reg, ctrl_frame_en); + regmap_write(hdmi->regs, GRL_INFOFRM_TYPE, frame_type); + regmap_write(hdmi->regs, GRL_INFOFRM_VER, frame_ver); + regmap_write(hdmi->regs, GRL_INFOFRM_LNG, frame_len); + + regmap_write(hdmi->regs, GRL_IFM_PORT, checksum); + for (i = 0; i < frame_len; i++) + regmap_write(hdmi->regs, GRL_IFM_PORT, frame_data[i]); + + regmap_set_bits(hdmi->regs, ctrl_reg, ctrl_frame_en); +} + +static void mtk_hdmi_hw_send_aud_packet(struct mtk_hdmi *hdmi, bool enable) +{ + regmap_update_bits(hdmi->regs, GRL_SHIFT_R2, + AUDIO_PACKET_OFF, enable ? 0 : AUDIO_PACKET_OFF); +} + +static void mtk_hdmi_hw_config_sys(struct mtk_hdmi *hdmi) +{ + regmap_update_bits(hdmi->sys_regmap, hdmi->sys_offset + HDMI_SYS_CFG20, + HDMI_OUT_FIFO_EN | MHL_MODE_ON, 0); + usleep_range(2000, 4000); + regmap_update_bits(hdmi->sys_regmap, hdmi->sys_offset + HDMI_SYS_CFG20, + HDMI_OUT_FIFO_EN | MHL_MODE_ON, HDMI_OUT_FIFO_EN); +} + +static void mtk_hdmi_hw_set_deep_color_mode(struct mtk_hdmi *hdmi) +{ + regmap_update_bits(hdmi->sys_regmap, hdmi->sys_offset + HDMI_SYS_CFG20, + DEEP_COLOR_MODE_MASK | DEEP_COLOR_EN, + COLOR_8BIT_MODE); +} + +static void mtk_hdmi_hw_send_av_mute(struct mtk_hdmi *hdmi) +{ + regmap_clear_bits(hdmi->regs, GRL_CFG4, CTRL_AVMUTE); + usleep_range(2000, 4000); + regmap_set_bits(hdmi->regs, GRL_CFG4, CTRL_AVMUTE); +} + +static void mtk_hdmi_hw_send_av_unmute(struct mtk_hdmi *hdmi) +{ + regmap_update_bits(hdmi->regs, GRL_CFG4, CFG4_AV_UNMUTE_EN | CFG4_AV_UNMUTE_SET, + CFG4_AV_UNMUTE_EN); + usleep_range(2000, 4000); + regmap_update_bits(hdmi->regs, GRL_CFG4, CFG4_AV_UNMUTE_EN | CFG4_AV_UNMUTE_SET, + CFG4_AV_UNMUTE_SET); +} + +static void mtk_hdmi_hw_ncts_enable(struct mtk_hdmi *hdmi, bool on) +{ + regmap_update_bits(hdmi->regs, GRL_CTS_CTRL, CTS_CTRL_SOFT, + on ? 0 : CTS_CTRL_SOFT); +} + +static void mtk_hdmi_hw_ncts_auto_write_enable(struct mtk_hdmi *hdmi, + bool enable) +{ + regmap_update_bits(hdmi->regs, GRL_CTS_CTRL, NCTS_WRI_ANYTIME, + enable ? NCTS_WRI_ANYTIME : 0); +} + +static void mtk_hdmi_hw_msic_setting(struct mtk_hdmi *hdmi, + struct drm_display_mode *mode) +{ + regmap_clear_bits(hdmi->regs, GRL_CFG4, CFG4_MHL_MODE); + + if (mode->flags & DRM_MODE_FLAG_INTERLACE && + mode->clock == 74250 && + mode->vdisplay == 1080) + regmap_clear_bits(hdmi->regs, GRL_CFG2, CFG2_MHL_DE_SEL); + else + regmap_set_bits(hdmi->regs, GRL_CFG2, CFG2_MHL_DE_SEL); +} + +static void mtk_hdmi_hw_aud_set_channel_swap(struct mtk_hdmi *hdmi, + enum hdmi_aud_channel_swap_type swap) +{ + u8 swap_bit; + + switch (swap) { + case HDMI_AUD_SWAP_LR: + swap_bit = LR_SWAP; + break; + case HDMI_AUD_SWAP_LFE_CC: + swap_bit = LFE_CC_SWAP; + break; + case HDMI_AUD_SWAP_LSRS: + swap_bit = LSRS_SWAP; + break; + case HDMI_AUD_SWAP_RLS_RRS: + swap_bit = RLS_RRS_SWAP; + break; + case HDMI_AUD_SWAP_LR_STATUS: + swap_bit = LR_STATUS_SWAP; + break; + default: + swap_bit = LFE_CC_SWAP; + break; + } + regmap_update_bits(hdmi->regs, GRL_CH_SWAP, 0xff, swap_bit); +} + +static void mtk_hdmi_hw_aud_set_bit_num(struct mtk_hdmi *hdmi, + enum hdmi_audio_sample_size bit_num) +{ + u32 val; + + switch (bit_num) { + case HDMI_AUDIO_SAMPLE_SIZE_16: + val = AOUT_16BIT; + break; + case HDMI_AUDIO_SAMPLE_SIZE_20: + val = AOUT_20BIT; + break; + case HDMI_AUDIO_SAMPLE_SIZE_24: + case HDMI_AUDIO_SAMPLE_SIZE_STREAM: + val = AOUT_24BIT; + break; + } + + regmap_update_bits(hdmi->regs, GRL_AOUT_CFG, AOUT_BNUM_SEL_MASK, val); +} + +static void mtk_hdmi_hw_aud_set_i2s_fmt(struct mtk_hdmi *hdmi, + enum hdmi_aud_i2s_fmt i2s_fmt) +{ + u32 val; + + regmap_read(hdmi->regs, GRL_CFG0, &val); + val &= ~(CFG0_W_LENGTH_MASK | CFG0_I2S_MODE_MASK); + + switch (i2s_fmt) { + case HDMI_I2S_MODE_RJT_24BIT: + val |= CFG0_I2S_MODE_RTJ | CFG0_W_LENGTH_24BIT; + break; + case HDMI_I2S_MODE_RJT_16BIT: + val |= CFG0_I2S_MODE_RTJ | CFG0_W_LENGTH_16BIT; + break; + case HDMI_I2S_MODE_LJT_24BIT: + default: + val |= CFG0_I2S_MODE_LTJ | CFG0_W_LENGTH_24BIT; + break; + case HDMI_I2S_MODE_LJT_16BIT: + val |= CFG0_I2S_MODE_LTJ | CFG0_W_LENGTH_16BIT; + break; + case HDMI_I2S_MODE_I2S_24BIT: + val |= CFG0_I2S_MODE_I2S | CFG0_W_LENGTH_24BIT; + break; + case HDMI_I2S_MODE_I2S_16BIT: + val |= CFG0_I2S_MODE_I2S | CFG0_W_LENGTH_16BIT; + break; + } + regmap_write(hdmi->regs, GRL_CFG0, val); +} + +static void mtk_hdmi_hw_audio_config(struct mtk_hdmi *hdmi, bool dst) +{ + const u8 mask = HIGH_BIT_RATE | DST_NORMAL_DOUBLE | SACD_DST | DSD_SEL; + u8 val; + + /* Disable high bitrate, set DST packet normal/double */ + regmap_clear_bits(hdmi->regs, GRL_AOUT_CFG, HIGH_BIT_RATE_PACKET_ALIGN); + + if (dst) + val = DST_NORMAL_DOUBLE | SACD_DST; + else + val = 0; + + regmap_update_bits(hdmi->regs, GRL_AUDIO_CFG, mask, val); +} + +static void mtk_hdmi_hw_aud_set_i2s_chan_num(struct mtk_hdmi *hdmi, + enum hdmi_aud_channel_type channel_type, + u8 channel_count) +{ + unsigned int ch_switch; + u8 i2s_uv; + + ch_switch = CH_SWITCH(7, 7) | CH_SWITCH(6, 6) | + CH_SWITCH(5, 5) | CH_SWITCH(4, 4) | + CH_SWITCH(3, 3) | CH_SWITCH(1, 2) | + CH_SWITCH(2, 1) | CH_SWITCH(0, 0); + + if (channel_count == 2) { + i2s_uv = I2S_UV_CH_EN(0); + } else if (channel_count == 3 || channel_count == 4) { + if (channel_count == 4 && + (channel_type == HDMI_AUD_CHAN_TYPE_3_0_LRS || + channel_type == HDMI_AUD_CHAN_TYPE_4_0)) + i2s_uv = I2S_UV_CH_EN(2) | I2S_UV_CH_EN(0); + else + i2s_uv = I2S_UV_CH_EN(3) | I2S_UV_CH_EN(2); + } else if (channel_count == 6 || channel_count == 5) { + if (channel_count == 6 && + channel_type != HDMI_AUD_CHAN_TYPE_5_1 && + channel_type != HDMI_AUD_CHAN_TYPE_4_1_CLRS) { + i2s_uv = I2S_UV_CH_EN(3) | I2S_UV_CH_EN(2) | + I2S_UV_CH_EN(1) | I2S_UV_CH_EN(0); + } else { + i2s_uv = I2S_UV_CH_EN(2) | I2S_UV_CH_EN(1) | + I2S_UV_CH_EN(0); + } + } else if (channel_count == 8 || channel_count == 7) { + i2s_uv = I2S_UV_CH_EN(3) | I2S_UV_CH_EN(2) | + I2S_UV_CH_EN(1) | I2S_UV_CH_EN(0); + } else { + i2s_uv = I2S_UV_CH_EN(0); + } + + regmap_write(hdmi->regs, GRL_CH_SW0, ch_switch & 0xff); + regmap_write(hdmi->regs, GRL_CH_SW1, (ch_switch >> 8) & 0xff); + regmap_write(hdmi->regs, GRL_CH_SW2, (ch_switch >> 16) & 0xff); + regmap_write(hdmi->regs, GRL_I2S_UV, i2s_uv); +} + +static void mtk_hdmi_hw_aud_set_input_type(struct mtk_hdmi *hdmi, + enum hdmi_aud_input_type input_type) +{ + u32 val; + + regmap_read(hdmi->regs, GRL_CFG1, &val); + if (input_type == HDMI_AUD_INPUT_I2S && + (val & CFG1_SPDIF) == CFG1_SPDIF) { + val &= ~CFG1_SPDIF; + } else if (input_type == HDMI_AUD_INPUT_SPDIF && + (val & CFG1_SPDIF) == 0) { + val |= CFG1_SPDIF; + } + regmap_write(hdmi->regs, GRL_CFG1, val); +} + +static void mtk_hdmi_hw_aud_set_channel_status(struct mtk_hdmi *hdmi, + u8 *channel_status) +{ + int i; + + for (i = 0; i < 5; i++) { + regmap_write(hdmi->regs, GRL_I2S_C_STA0 + i * 4, channel_status[i]); + regmap_write(hdmi->regs, GRL_L_STATUS_0 + i * 4, channel_status[i]); + regmap_write(hdmi->regs, GRL_R_STATUS_0 + i * 4, channel_status[i]); + } + for (; i < 24; i++) { + regmap_write(hdmi->regs, GRL_L_STATUS_0 + i * 4, 0); + regmap_write(hdmi->regs, GRL_R_STATUS_0 + i * 4, 0); + } +} + +static void mtk_hdmi_hw_aud_src_reenable(struct mtk_hdmi *hdmi) +{ + u32 val; + + regmap_read(hdmi->regs, GRL_MIX_CTRL, &val); + if (val & MIX_CTRL_SRC_EN) { + val &= ~MIX_CTRL_SRC_EN; + regmap_write(hdmi->regs, GRL_MIX_CTRL, val); + usleep_range(255, 512); + val |= MIX_CTRL_SRC_EN; + regmap_write(hdmi->regs, GRL_MIX_CTRL, val); + } +} + +static void mtk_hdmi_hw_aud_src_disable(struct mtk_hdmi *hdmi) +{ + u32 val; + + regmap_read(hdmi->regs, GRL_MIX_CTRL, &val); + val &= ~MIX_CTRL_SRC_EN; + regmap_write(hdmi->regs, GRL_MIX_CTRL, val); + regmap_write(hdmi->regs, GRL_SHIFT_L1, 0x00); +} + +static void mtk_hdmi_hw_aud_set_mclk(struct mtk_hdmi *hdmi, + enum hdmi_aud_mclk mclk) +{ + u32 val; + + regmap_read(hdmi->regs, GRL_CFG5, &val); + val &= CFG5_CD_RATIO_MASK; + + switch (mclk) { + case HDMI_AUD_MCLK_128FS: + val |= CFG5_FS128; + break; + case HDMI_AUD_MCLK_256FS: + val |= CFG5_FS256; + break; + case HDMI_AUD_MCLK_384FS: + val |= CFG5_FS384; + break; + case HDMI_AUD_MCLK_512FS: + val |= CFG5_FS512; + break; + case HDMI_AUD_MCLK_768FS: + val |= CFG5_FS768; + break; + default: + val |= CFG5_FS256; + break; + } + regmap_write(hdmi->regs, GRL_CFG5, val); +} + +static void do_hdmi_hw_aud_set_ncts(struct mtk_hdmi *hdmi, unsigned int n, + unsigned int cts) +{ + unsigned char val[NCTS_BYTES]; + int i; + + regmap_write(hdmi->regs, GRL_NCTS, 0); + regmap_write(hdmi->regs, GRL_NCTS, 0); + regmap_write(hdmi->regs, GRL_NCTS, 0); + memset(val, 0, sizeof(val)); + + val[0] = (cts >> 24) & 0xff; + val[1] = (cts >> 16) & 0xff; + val[2] = (cts >> 8) & 0xff; + val[3] = cts & 0xff; + + val[4] = (n >> 16) & 0xff; + val[5] = (n >> 8) & 0xff; + val[6] = n & 0xff; + + for (i = 0; i < NCTS_BYTES; i++) + regmap_write(hdmi->regs, GRL_NCTS, val[i]); +} + +static void mtk_hdmi_hw_aud_set_ncts(struct mtk_hdmi *hdmi, + unsigned int sample_rate, + unsigned int clock) +{ + unsigned int n, cts; + + mtk_hdmi_get_ncts(sample_rate, clock, &n, &cts); + + dev_dbg(hdmi->dev, "%s: sample_rate=%u, clock=%d, cts=%u, n=%u\n", + __func__, sample_rate, clock, n, cts); + + regmap_update_bits(hdmi->regs, DUMMY_304, AUDIO_I2S_NCTS_SEL, AUDIO_I2S_NCTS_SEL_64); + do_hdmi_hw_aud_set_ncts(hdmi, n, cts); +} + +static u8 mtk_hdmi_aud_get_chnl_count(enum hdmi_aud_channel_type channel_type) +{ + switch (channel_type) { + case HDMI_AUD_CHAN_TYPE_1_0: + case HDMI_AUD_CHAN_TYPE_1_1: + case HDMI_AUD_CHAN_TYPE_2_0: + return 2; + case HDMI_AUD_CHAN_TYPE_2_1: + case HDMI_AUD_CHAN_TYPE_3_0: + return 3; + case HDMI_AUD_CHAN_TYPE_3_1: + case HDMI_AUD_CHAN_TYPE_4_0: + case HDMI_AUD_CHAN_TYPE_3_0_LRS: + return 4; + case HDMI_AUD_CHAN_TYPE_4_1: + case HDMI_AUD_CHAN_TYPE_5_0: + case HDMI_AUD_CHAN_TYPE_3_1_LRS: + case HDMI_AUD_CHAN_TYPE_4_0_CLRS: + return 5; + case HDMI_AUD_CHAN_TYPE_5_1: + case HDMI_AUD_CHAN_TYPE_6_0: + case HDMI_AUD_CHAN_TYPE_4_1_CLRS: + case HDMI_AUD_CHAN_TYPE_6_0_CS: + case HDMI_AUD_CHAN_TYPE_6_0_CH: + case HDMI_AUD_CHAN_TYPE_6_0_OH: + case HDMI_AUD_CHAN_TYPE_6_0_CHR: + return 6; + case HDMI_AUD_CHAN_TYPE_6_1: + case HDMI_AUD_CHAN_TYPE_6_1_CS: + case HDMI_AUD_CHAN_TYPE_6_1_CH: + case HDMI_AUD_CHAN_TYPE_6_1_OH: + case HDMI_AUD_CHAN_TYPE_6_1_CHR: + case HDMI_AUD_CHAN_TYPE_7_0: + case HDMI_AUD_CHAN_TYPE_7_0_LH_RH: + case HDMI_AUD_CHAN_TYPE_7_0_LSR_RSR: + case HDMI_AUD_CHAN_TYPE_7_0_LC_RC: + case HDMI_AUD_CHAN_TYPE_7_0_LW_RW: + case HDMI_AUD_CHAN_TYPE_7_0_LSD_RSD: + case HDMI_AUD_CHAN_TYPE_7_0_LSS_RSS: + case HDMI_AUD_CHAN_TYPE_7_0_LHS_RHS: + case HDMI_AUD_CHAN_TYPE_7_0_CS_CH: + case HDMI_AUD_CHAN_TYPE_7_0_CS_OH: + case HDMI_AUD_CHAN_TYPE_7_0_CS_CHR: + case HDMI_AUD_CHAN_TYPE_7_0_CH_OH: + case HDMI_AUD_CHAN_TYPE_7_0_CH_CHR: + case HDMI_AUD_CHAN_TYPE_7_0_OH_CHR: + case HDMI_AUD_CHAN_TYPE_7_0_LSS_RSS_LSR_RSR: + case HDMI_AUD_CHAN_TYPE_8_0_LH_RH_CS: + return 7; + case HDMI_AUD_CHAN_TYPE_7_1: + case HDMI_AUD_CHAN_TYPE_7_1_LH_RH: + case HDMI_AUD_CHAN_TYPE_7_1_LSR_RSR: + case HDMI_AUD_CHAN_TYPE_7_1_LC_RC: + case HDMI_AUD_CHAN_TYPE_7_1_LW_RW: + case HDMI_AUD_CHAN_TYPE_7_1_LSD_RSD: + case HDMI_AUD_CHAN_TYPE_7_1_LSS_RSS: + case HDMI_AUD_CHAN_TYPE_7_1_LHS_RHS: + case HDMI_AUD_CHAN_TYPE_7_1_CS_CH: + case HDMI_AUD_CHAN_TYPE_7_1_CS_OH: + case HDMI_AUD_CHAN_TYPE_7_1_CS_CHR: + case HDMI_AUD_CHAN_TYPE_7_1_CH_OH: + case HDMI_AUD_CHAN_TYPE_7_1_CH_CHR: + case HDMI_AUD_CHAN_TYPE_7_1_OH_CHR: + case HDMI_AUD_CHAN_TYPE_7_1_LSS_RSS_LSR_RSR: + return 8; + default: + return 2; + } +} + +static int mtk_hdmi_video_change_vpll(struct mtk_hdmi *hdmi, u32 clock) +{ + unsigned long rate; + int ret; + + /* The DPI driver already should have set TVDPLL to the correct rate */ + ret = clk_set_rate(hdmi->clk[MTK_HDMI_CLK_HDMI_PLL], clock); + if (ret) { + dev_err(hdmi->dev, "Failed to set PLL to %u Hz: %d\n", clock, + ret); + return ret; + } + + rate = clk_get_rate(hdmi->clk[MTK_HDMI_CLK_HDMI_PLL]); + + if (DIV_ROUND_CLOSEST(rate, 1000) != DIV_ROUND_CLOSEST(clock, 1000)) + dev_warn(hdmi->dev, "Want PLL %u Hz, got %lu Hz\n", clock, + rate); + else + dev_dbg(hdmi->dev, "Want PLL %u Hz, got %lu Hz\n", clock, rate); + + mtk_hdmi_hw_config_sys(hdmi); + mtk_hdmi_hw_set_deep_color_mode(hdmi); + return 0; +} + +static void mtk_hdmi_video_set_display_mode(struct mtk_hdmi *hdmi, + struct drm_display_mode *mode) +{ + mtk_hdmi_hw_reset(hdmi); + mtk_hdmi_hw_enable_notice(hdmi, true); + mtk_hdmi_hw_write_int_mask(hdmi, 0xff); + mtk_hdmi_hw_enable_dvi_mode(hdmi, hdmi->dvi_mode); + mtk_hdmi_hw_ncts_auto_write_enable(hdmi, true); + + mtk_hdmi_hw_msic_setting(hdmi, mode); +} + + +static void mtk_hdmi_aud_set_input(struct mtk_hdmi *hdmi) +{ + enum hdmi_aud_channel_type chan_type; + u8 chan_count; + bool dst; + + mtk_hdmi_hw_aud_set_channel_swap(hdmi, HDMI_AUD_SWAP_LFE_CC); + regmap_set_bits(hdmi->regs, GRL_MIX_CTRL, MIX_CTRL_FLAT); + + if (hdmi->aud_param.aud_input_type == HDMI_AUD_INPUT_SPDIF && + hdmi->aud_param.aud_codec == HDMI_AUDIO_CODING_TYPE_DST) { + mtk_hdmi_hw_aud_set_bit_num(hdmi, HDMI_AUDIO_SAMPLE_SIZE_24); + } else if (hdmi->aud_param.aud_i2s_fmt == HDMI_I2S_MODE_LJT_24BIT) { + hdmi->aud_param.aud_i2s_fmt = HDMI_I2S_MODE_LJT_16BIT; + } + + mtk_hdmi_hw_aud_set_i2s_fmt(hdmi, hdmi->aud_param.aud_i2s_fmt); + mtk_hdmi_hw_aud_set_bit_num(hdmi, HDMI_AUDIO_SAMPLE_SIZE_24); + + dst = ((hdmi->aud_param.aud_input_type == HDMI_AUD_INPUT_SPDIF) && + (hdmi->aud_param.aud_codec == HDMI_AUDIO_CODING_TYPE_DST)); + mtk_hdmi_hw_audio_config(hdmi, dst); + + if (hdmi->aud_param.aud_input_type == HDMI_AUD_INPUT_SPDIF) + chan_type = HDMI_AUD_CHAN_TYPE_2_0; + else + chan_type = hdmi->aud_param.aud_input_chan_type; + chan_count = mtk_hdmi_aud_get_chnl_count(chan_type); + mtk_hdmi_hw_aud_set_i2s_chan_num(hdmi, chan_type, chan_count); + mtk_hdmi_hw_aud_set_input_type(hdmi, hdmi->aud_param.aud_input_type); +} + +static int mtk_hdmi_aud_set_src(struct mtk_hdmi *hdmi, + struct drm_display_mode *display_mode) +{ + unsigned int sample_rate = hdmi->aud_param.codec_params.sample_rate; + + mtk_hdmi_hw_ncts_enable(hdmi, false); + mtk_hdmi_hw_aud_src_disable(hdmi); + regmap_clear_bits(hdmi->regs, GRL_CFG2, CFG2_ACLK_INV); + + if (hdmi->aud_param.aud_input_type == HDMI_AUD_INPUT_I2S) { + switch (sample_rate) { + case 32000: + case 44100: + case 48000: + case 88200: + case 96000: + break; + default: + return -EINVAL; + } + mtk_hdmi_hw_aud_set_mclk(hdmi, hdmi->aud_param.aud_mclk); + } else { + switch (sample_rate) { + case 32000: + case 44100: + case 48000: + break; + default: + return -EINVAL; + } + mtk_hdmi_hw_aud_set_mclk(hdmi, HDMI_AUD_MCLK_128FS); + } + + mtk_hdmi_hw_aud_set_ncts(hdmi, sample_rate, display_mode->clock); + + mtk_hdmi_hw_aud_src_reenable(hdmi); + return 0; +} + +static int mtk_hdmi_aud_output_config(struct mtk_hdmi *hdmi, + struct drm_display_mode *display_mode) +{ + mtk_hdmi_hw_aud_mute(hdmi); + mtk_hdmi_hw_send_aud_packet(hdmi, false); + + mtk_hdmi_aud_set_input(hdmi); + mtk_hdmi_aud_set_src(hdmi, display_mode); + mtk_hdmi_hw_aud_set_channel_status(hdmi, + hdmi->aud_param.codec_params.iec.status); + + usleep_range(50, 100); + + mtk_hdmi_hw_ncts_enable(hdmi, true); + mtk_hdmi_hw_send_aud_packet(hdmi, true); + mtk_hdmi_hw_aud_unmute(hdmi); + return 0; +} + +static int mtk_hdmi_setup_avi_infoframe(struct mtk_hdmi *hdmi, + struct drm_display_mode *mode) +{ + struct hdmi_avi_infoframe frame; + u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE]; + ssize_t err; + + err = drm_hdmi_avi_infoframe_from_display_mode(&frame, + hdmi->curr_conn, mode); + if (err < 0) { + dev_err(hdmi->dev, + "Failed to get AVI infoframe from mode: %zd\n", err); + return err; + } + + err = hdmi_avi_infoframe_pack(&frame, buffer, sizeof(buffer)); + if (err < 0) { + dev_err(hdmi->dev, "Failed to pack AVI infoframe: %zd\n", err); + return err; + } + + mtk_hdmi_hw_send_info_frame(hdmi, buffer, sizeof(buffer)); + return 0; +} + +static int mtk_hdmi_setup_spd_infoframe(struct mtk_hdmi *hdmi) +{ + struct drm_bridge *bridge = &hdmi->bridge; + struct hdmi_spd_infoframe frame; + u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_SPD_INFOFRAME_SIZE]; + ssize_t err; + + err = hdmi_spd_infoframe_init(&frame, bridge->vendor, bridge->product); + if (err < 0) { + dev_err(hdmi->dev, "Failed to initialize SPD infoframe: %zd\n", + err); + return err; + } + + err = hdmi_spd_infoframe_pack(&frame, buffer, sizeof(buffer)); + if (err < 0) { + dev_err(hdmi->dev, "Failed to pack SDP infoframe: %zd\n", err); + return err; + } + + mtk_hdmi_hw_send_info_frame(hdmi, buffer, sizeof(buffer)); + return 0; +} + +static int mtk_hdmi_setup_audio_infoframe(struct mtk_hdmi *hdmi) +{ + struct hdmi_audio_infoframe frame; + u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AUDIO_INFOFRAME_SIZE]; + ssize_t err; + + err = hdmi_audio_infoframe_init(&frame); + if (err < 0) { + dev_err(hdmi->dev, "Failed to setup audio infoframe: %zd\n", + err); + return err; + } + + frame.coding_type = HDMI_AUDIO_CODING_TYPE_STREAM; + frame.sample_frequency = HDMI_AUDIO_SAMPLE_FREQUENCY_STREAM; + frame.sample_size = HDMI_AUDIO_SAMPLE_SIZE_STREAM; + frame.channels = mtk_hdmi_aud_get_chnl_count( + hdmi->aud_param.aud_input_chan_type); + + err = hdmi_audio_infoframe_pack(&frame, buffer, sizeof(buffer)); + if (err < 0) { + dev_err(hdmi->dev, "Failed to pack audio infoframe: %zd\n", + err); + return err; + } + + mtk_hdmi_hw_send_info_frame(hdmi, buffer, sizeof(buffer)); + return 0; +} + +static int mtk_hdmi_setup_vendor_specific_infoframe(struct mtk_hdmi *hdmi, + struct drm_display_mode *mode) +{ + struct hdmi_vendor_infoframe frame; + u8 buffer[10]; + ssize_t err; + + err = drm_hdmi_vendor_infoframe_from_display_mode(&frame, + hdmi->curr_conn, mode); + if (err) { + dev_err(hdmi->dev, + "Failed to get vendor infoframe from mode: %zd\n", err); + return err; + } + + err = hdmi_vendor_infoframe_pack(&frame, buffer, sizeof(buffer)); + if (err < 0) { + dev_err(hdmi->dev, "Failed to pack vendor infoframe: %zd\n", + err); + return err; + } + + mtk_hdmi_hw_send_info_frame(hdmi, buffer, sizeof(buffer)); + return 0; +} + +static void mtk_hdmi_audio_enable(struct mtk_hdmi *hdmi) +{ + mtk_hdmi_hw_send_aud_packet(hdmi, true); + hdmi->audio_enable = true; +} + +static void mtk_hdmi_audio_disable(struct mtk_hdmi *hdmi) +{ + mtk_hdmi_hw_send_aud_packet(hdmi, false); + hdmi->audio_enable = false; +} + +static int mtk_hdmi_output_set_display_mode(struct mtk_hdmi *hdmi, + struct drm_display_mode *mode) +{ + int ret; + + mtk_hdmi_hw_vid_black(hdmi, true); + mtk_hdmi_hw_aud_mute(hdmi); + mtk_hdmi_hw_send_av_mute(hdmi); + phy_power_off(hdmi->phy); + + ret = mtk_hdmi_video_change_vpll(hdmi, + mode->clock * 1000); + if (ret) { + dev_err(hdmi->dev, "Failed to set vpll: %d\n", ret); + return ret; + } + mtk_hdmi_video_set_display_mode(hdmi, mode); + + phy_power_on(hdmi->phy); + mtk_hdmi_aud_output_config(hdmi, mode); + + mtk_hdmi_hw_vid_black(hdmi, false); + mtk_hdmi_hw_aud_unmute(hdmi); + mtk_hdmi_hw_send_av_unmute(hdmi); + + return 0; +} + +static const char * const mtk_hdmi_clk_names[MTK_HDMI_CLK_COUNT] = { + [MTK_HDMI_CLK_HDMI_PIXEL] = "pixel", + [MTK_HDMI_CLK_HDMI_PLL] = "pll", + [MTK_HDMI_CLK_AUD_BCLK] = "bclk", + [MTK_HDMI_CLK_AUD_SPDIF] = "spdif", +}; + +static int mtk_hdmi_clk_enable_audio(struct mtk_hdmi *hdmi) +{ + int ret; + + ret = clk_prepare_enable(hdmi->clk[MTK_HDMI_CLK_AUD_BCLK]); + if (ret) + return ret; + + ret = clk_prepare_enable(hdmi->clk[MTK_HDMI_CLK_AUD_SPDIF]); + if (ret) { + clk_disable_unprepare(hdmi->clk[MTK_HDMI_CLK_AUD_BCLK]); + return ret; + } + + return 0; +} + +static void mtk_hdmi_clk_disable_audio(struct mtk_hdmi *hdmi) +{ + clk_disable_unprepare(hdmi->clk[MTK_HDMI_CLK_AUD_BCLK]); + clk_disable_unprepare(hdmi->clk[MTK_HDMI_CLK_AUD_SPDIF]); +} + +static enum drm_connector_status +mtk_hdmi_update_plugged_status(struct mtk_hdmi *hdmi) +{ + bool connected; + + mutex_lock(&hdmi->update_plugged_status_lock); + connected = mtk_cec_hpd_high(hdmi->cec_dev); + if (hdmi->plugged_cb && hdmi->codec_dev) + hdmi->plugged_cb(hdmi->codec_dev, connected); + mutex_unlock(&hdmi->update_plugged_status_lock); + + return connected ? + connector_status_connected : connector_status_disconnected; +} + +static enum drm_connector_status mtk_hdmi_detect(struct mtk_hdmi *hdmi) +{ + return mtk_hdmi_update_plugged_status(hdmi); +} + +static enum drm_mode_status +mtk_hdmi_bridge_mode_valid(struct drm_bridge *bridge, + const struct drm_display_info *info, + const struct drm_display_mode *mode) +{ + struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge); + + dev_dbg(hdmi->dev, "xres=%d, yres=%d, refresh=%d, intl=%d clock=%d\n", + mode->hdisplay, mode->vdisplay, drm_mode_vrefresh(mode), + !!(mode->flags & DRM_MODE_FLAG_INTERLACE), mode->clock * 1000); + + if (hdmi->conf) { + if (hdmi->conf->cea_modes_only && !drm_match_cea_mode(mode)) + return MODE_BAD; + + if (hdmi->conf->max_mode_clock && + mode->clock > hdmi->conf->max_mode_clock) + return MODE_CLOCK_HIGH; + } + + if (mode->clock < 27000) + return MODE_CLOCK_LOW; + if (mode->clock > 297000) + return MODE_CLOCK_HIGH; + + return drm_mode_validate_size(mode, 0x1fff, 0x1fff); +} + +static void mtk_hdmi_hpd_event(bool hpd, struct device *dev) +{ + struct mtk_hdmi *hdmi = dev_get_drvdata(dev); + + if (hdmi && hdmi->bridge.encoder && hdmi->bridge.encoder->dev) { + static enum drm_connector_status status; + + status = mtk_hdmi_detect(hdmi); + drm_helper_hpd_irq_event(hdmi->bridge.encoder->dev); + drm_bridge_hpd_notify(&hdmi->bridge, status); + } +} + +/* + * Bridge callbacks + */ + +static enum drm_connector_status +mtk_hdmi_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector) +{ + struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge); + + return mtk_hdmi_detect(hdmi); +} + +static const struct drm_edid *mtk_hdmi_bridge_edid_read(struct drm_bridge *bridge, + struct drm_connector *connector) +{ + struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge); + const struct drm_edid *drm_edid; + + if (!hdmi->ddc_adpt) + return NULL; + drm_edid = drm_edid_read_ddc(connector, hdmi->ddc_adpt); + if (drm_edid) { + /* + * FIXME: This should use !connector->display_info.has_audio (or + * !connector->display_info.is_hdmi) from a path that has read + * the EDID and called drm_edid_connector_update(). + */ + const struct edid *edid = drm_edid_raw(drm_edid); + + hdmi->dvi_mode = !drm_detect_monitor_audio(edid); + } + + return drm_edid; +} + +static int mtk_hdmi_bridge_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, + enum drm_bridge_attach_flags flags) +{ + struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge); + int ret; + + if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)) { + DRM_ERROR("%s: The flag DRM_BRIDGE_ATTACH_NO_CONNECTOR must be supplied\n", + __func__); + return -EINVAL; + } + + if (hdmi->next_bridge) { + ret = drm_bridge_attach(encoder, hdmi->next_bridge, + bridge, flags); + if (ret) + return ret; + } + + mtk_cec_set_hpd_event(hdmi->cec_dev, mtk_hdmi_hpd_event, hdmi->dev); + + return 0; +} + +static void mtk_hdmi_bridge_atomic_disable(struct drm_bridge *bridge, + struct drm_atomic_state *state) +{ + struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge); + + if (!hdmi->enabled) + return; + + phy_power_off(hdmi->phy); + clk_disable_unprepare(hdmi->clk[MTK_HDMI_CLK_HDMI_PIXEL]); + clk_disable_unprepare(hdmi->clk[MTK_HDMI_CLK_HDMI_PLL]); + + hdmi->curr_conn = NULL; + + hdmi->enabled = false; +} + +static void mtk_hdmi_bridge_atomic_post_disable(struct drm_bridge *bridge, + struct drm_atomic_state *state) +{ + struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge); + + if (!hdmi->powered) + return; + + mtk_hdmi_hw_1p4_version_enable(hdmi, true); + mtk_hdmi_hw_make_reg_writable(hdmi, false); + + hdmi->powered = false; +} + +static void mtk_hdmi_bridge_atomic_pre_enable(struct drm_bridge *bridge, + struct drm_atomic_state *state) +{ + struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge); + + mtk_hdmi_hw_make_reg_writable(hdmi, true); + mtk_hdmi_hw_1p4_version_enable(hdmi, true); + + hdmi->powered = true; +} + +static void mtk_hdmi_send_infoframe(struct mtk_hdmi *hdmi, + struct drm_display_mode *mode) +{ + mtk_hdmi_setup_audio_infoframe(hdmi); + mtk_hdmi_setup_avi_infoframe(hdmi, mode); + mtk_hdmi_setup_spd_infoframe(hdmi); + if (mode->flags & DRM_MODE_FLAG_3D_MASK) + mtk_hdmi_setup_vendor_specific_infoframe(hdmi, mode); +} + +static void mtk_hdmi_bridge_atomic_enable(struct drm_bridge *bridge, + struct drm_atomic_state *state) +{ + struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge); + + /* Retrieve the connector through the atomic state. */ + hdmi->curr_conn = drm_atomic_get_new_connector_for_encoder(state, + bridge->encoder); + + mtk_hdmi_output_set_display_mode(hdmi, &hdmi->mode); + clk_prepare_enable(hdmi->clk[MTK_HDMI_CLK_HDMI_PLL]); + clk_prepare_enable(hdmi->clk[MTK_HDMI_CLK_HDMI_PIXEL]); + phy_power_on(hdmi->phy); + mtk_hdmi_send_infoframe(hdmi, &hdmi->mode); + + hdmi->enabled = true; +} + +static const struct drm_bridge_funcs mtk_hdmi_bridge_funcs = { + .mode_valid = mtk_hdmi_bridge_mode_valid, + .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state, + .atomic_reset = drm_atomic_helper_bridge_reset, + .attach = mtk_hdmi_bridge_attach, + .mode_fixup = mtk_hdmi_bridge_mode_fixup, + .atomic_disable = mtk_hdmi_bridge_atomic_disable, + .atomic_post_disable = mtk_hdmi_bridge_atomic_post_disable, + .mode_set = mtk_hdmi_bridge_mode_set, + .atomic_pre_enable = mtk_hdmi_bridge_atomic_pre_enable, + .atomic_enable = mtk_hdmi_bridge_atomic_enable, + .detect = mtk_hdmi_bridge_detect, + .edid_read = mtk_hdmi_bridge_edid_read, +}; + +/* + * HDMI audio codec callbacks + */ + +static int mtk_hdmi_audio_hw_params(struct device *dev, void *data, + struct hdmi_codec_daifmt *daifmt, + struct hdmi_codec_params *params) +{ + struct mtk_hdmi *hdmi = dev_get_drvdata(dev); + + if (!hdmi->audio_enable) { + dev_err(hdmi->dev, "hdmi audio is in disable state!\n"); + return -EINVAL; + } + + mtk_hdmi_audio_params(hdmi, daifmt, params); + mtk_hdmi_aud_output_config(hdmi, &hdmi->mode); + + return 0; +} + +static int mtk_hdmi_audio_startup(struct device *dev, void *data) +{ + struct mtk_hdmi *hdmi = dev_get_drvdata(dev); + + mtk_hdmi_audio_enable(hdmi); + + return 0; +} + +static void mtk_hdmi_audio_shutdown(struct device *dev, void *data) +{ + struct mtk_hdmi *hdmi = dev_get_drvdata(dev); + + mtk_hdmi_audio_disable(hdmi); +} + +static int +mtk_hdmi_audio_mute(struct device *dev, void *data, + bool enable, int direction) +{ + struct mtk_hdmi *hdmi = dev_get_drvdata(dev); + + if (enable) + mtk_hdmi_hw_aud_mute(hdmi); + else + mtk_hdmi_hw_aud_unmute(hdmi); + + return 0; +} + +static int mtk_hdmi_audio_hook_plugged_cb(struct device *dev, void *data, + hdmi_codec_plugged_cb fn, + struct device *codec_dev) +{ + struct mtk_hdmi *hdmi = data; + + mtk_hdmi_audio_set_plugged_cb(hdmi, fn, codec_dev); + mtk_hdmi_update_plugged_status(hdmi); + + return 0; +} + +static const struct hdmi_codec_ops mtk_hdmi_audio_codec_ops = { + .hw_params = mtk_hdmi_audio_hw_params, + .audio_startup = mtk_hdmi_audio_startup, + .audio_shutdown = mtk_hdmi_audio_shutdown, + .mute_stream = mtk_hdmi_audio_mute, + .get_eld = mtk_hdmi_audio_get_eld, + .hook_plugged_cb = mtk_hdmi_audio_hook_plugged_cb, +}; + +static int mtk_hdmi_probe(struct platform_device *pdev) +{ + struct mtk_hdmi *hdmi; + int ret; + + hdmi = mtk_hdmi_common_probe(pdev); + if (IS_ERR(hdmi)) + return PTR_ERR(hdmi); + + if (!hdmi->cec_dev) + return dev_err_probe(hdmi->dev, -ENODEV, "CEC is required by HDMIv1\n"); + + ret = mtk_hdmi_clk_enable_audio(hdmi); + if (ret) + return dev_err_probe(hdmi->dev, ret, + "Failed to enable audio clocks\n"); + + return 0; +} + +static void mtk_hdmi_remove(struct platform_device *pdev) +{ + struct mtk_hdmi *hdmi = platform_get_drvdata(pdev); + + mtk_hdmi_clk_disable_audio(hdmi); +} + +static __maybe_unused int mtk_hdmi_suspend(struct device *dev) +{ + struct mtk_hdmi *hdmi = dev_get_drvdata(dev); + + mtk_hdmi_clk_disable_audio(hdmi); + + return 0; +} + +static __maybe_unused int mtk_hdmi_resume(struct device *dev) +{ + struct mtk_hdmi *hdmi = dev_get_drvdata(dev); + + return mtk_hdmi_clk_enable_audio(hdmi); +} + +static SIMPLE_DEV_PM_OPS(mtk_hdmi_pm_ops, mtk_hdmi_suspend, mtk_hdmi_resume); + +static const struct mtk_hdmi_ver_conf mtk_hdmi_v1_ver_conf = { + .bridge_funcs = &mtk_hdmi_bridge_funcs, + .codec_ops = &mtk_hdmi_audio_codec_ops, + .mtk_hdmi_clock_names = mtk_hdmi_clk_names, + .num_clocks = ARRAY_SIZE(mtk_hdmi_clk_names) +}; + +static const struct mtk_hdmi_conf mtk_hdmi_conf_mt2701 = { + .tz_disabled = true, + .ver_conf = &mtk_hdmi_v1_ver_conf +}; + +static const struct mtk_hdmi_conf mtk_hdmi_conf_mt8167 = { + .cea_modes_only = true, + .max_mode_clock = 148500, + .ver_conf = &mtk_hdmi_v1_ver_conf +}; + +static const struct mtk_hdmi_conf mtk_hdmi_conf_mt8173 = { + .ver_conf = &mtk_hdmi_v1_ver_conf +}; + +static const struct of_device_id mtk_hdmi_of_ids[] = { + { .compatible = "mediatek,mt2701-hdmi", .data = &mtk_hdmi_conf_mt2701 }, + { .compatible = "mediatek,mt8167-hdmi", .data = &mtk_hdmi_conf_mt8167 }, + { .compatible = "mediatek,mt8173-hdmi", .data = &mtk_hdmi_conf_mt8173 }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, mtk_hdmi_of_ids); + +static struct platform_driver mtk_hdmi_driver = { + .probe = mtk_hdmi_probe, + .remove = mtk_hdmi_remove, + .driver = { + .name = "mediatek-drm-hdmi", + .of_match_table = mtk_hdmi_of_ids, + .pm = &mtk_hdmi_pm_ops, + }, +}; +module_platform_driver(mtk_hdmi_driver); + +MODULE_AUTHOR("Jie Qiu <jie.qiu@mediatek.com>"); +MODULE_DESCRIPTION("MediaTek HDMI Driver"); +MODULE_LICENSE("GPL v2"); +MODULE_IMPORT_NS("DRM_MTK_HDMI_V1"); +MODULE_IMPORT_NS("DRM_MTK_HDMI"); diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi_common.c b/drivers/gpu/drm/mediatek/mtk_hdmi_common.c new file mode 100644 index 000000000000..e78eb0876f16 --- /dev/null +++ b/drivers/gpu/drm/mediatek/mtk_hdmi_common.c @@ -0,0 +1,456 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2014 MediaTek Inc. + * Copyright (c) 2024 Collabora Ltd. + * AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com> + */ + +#include <drm/drm_modes.h> +#include <linux/device.h> +#include <linux/hdmi.h> +#include <linux/i2c.h> +#include <linux/math.h> +#include <linux/of.h> +#include <linux/of_platform.h> +#include <linux/platform_device.h> +#include <linux/mfd/syscon.h> +#include <sound/hdmi-codec.h> + +#include "mtk_hdmi_common.h" + +struct hdmi_acr_n { + unsigned int clock; + unsigned int n[3]; +}; + +/* Recommended N values from HDMI specification, tables 7-1 to 7-3 */ +static const struct hdmi_acr_n hdmi_rec_n_table[] = { + /* Clock, N: 32kHz 44.1kHz 48kHz */ + { 25175, { 4576, 7007, 6864 } }, + { 74176, { 11648, 17836, 11648 } }, + { 148352, { 11648, 8918, 5824 } }, + { 296703, { 5824, 4459, 5824 } }, + { 297000, { 3072, 4704, 5120 } }, + { 0, { 4096, 6272, 6144 } }, /* all other TMDS clocks */ +}; + +/** + * hdmi_recommended_n() - Return N value recommended by HDMI specification + * @freq: audio sample rate in Hz + * @clock: rounded TMDS clock in kHz + */ +static unsigned int hdmi_recommended_n(unsigned int freq, unsigned int clock) +{ + const struct hdmi_acr_n *recommended; + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(hdmi_rec_n_table) - 1; i++) { + if (clock == hdmi_rec_n_table[i].clock) + break; + } + recommended = hdmi_rec_n_table + i; + + switch (freq) { + case 32000: + return recommended->n[0]; + case 44100: + return recommended->n[1]; + case 48000: + return recommended->n[2]; + case 88200: + return recommended->n[1] * 2; + case 96000: + return recommended->n[2] * 2; + case 176400: + return recommended->n[1] * 4; + case 192000: + return recommended->n[2] * 4; + default: + return (128 * freq) / 1000; + } +} + +static unsigned int hdmi_mode_clock_to_hz(unsigned int clock) +{ + switch (clock) { + case 25175: + return 25174825; /* 25.2/1.001 MHz */ + case 74176: + return 74175824; /* 74.25/1.001 MHz */ + case 148352: + return 148351648; /* 148.5/1.001 MHz */ + case 296703: + return 296703297; /* 297/1.001 MHz */ + default: + return clock * 1000; + } +} + +static unsigned int hdmi_expected_cts(unsigned int audio_sample_rate, + unsigned int tmds_clock, unsigned int n) +{ + return DIV_ROUND_CLOSEST_ULL((u64)hdmi_mode_clock_to_hz(tmds_clock) * n, + 128 * audio_sample_rate); +} + +void mtk_hdmi_get_ncts(unsigned int sample_rate, unsigned int clock, + unsigned int *n, unsigned int *cts) +{ + *n = hdmi_recommended_n(sample_rate, clock); + *cts = hdmi_expected_cts(sample_rate, clock, *n); +} +EXPORT_SYMBOL_NS_GPL(mtk_hdmi_get_ncts, "DRM_MTK_HDMI"); + +int mtk_hdmi_audio_params(struct mtk_hdmi *hdmi, + struct hdmi_codec_daifmt *daifmt, + struct hdmi_codec_params *params) +{ + struct hdmi_audio_param aud_params = { 0 }; + unsigned int chan = params->cea.channels; + + dev_dbg(hdmi->dev, "%s: %u Hz, %d bit, %d channels\n", __func__, + params->sample_rate, params->sample_width, chan); + + if (!hdmi->bridge.encoder) + return -ENODEV; + + switch (chan) { + case 2: + aud_params.aud_input_chan_type = HDMI_AUD_CHAN_TYPE_2_0; + break; + case 4: + aud_params.aud_input_chan_type = HDMI_AUD_CHAN_TYPE_4_0; + break; + case 6: + aud_params.aud_input_chan_type = HDMI_AUD_CHAN_TYPE_5_1; + break; + case 8: + aud_params.aud_input_chan_type = HDMI_AUD_CHAN_TYPE_7_1; + break; + default: + dev_err(hdmi->dev, "channel[%d] not supported!\n", chan); + return -EINVAL; + } + + switch (params->sample_rate) { + case 32000: + case 44100: + case 48000: + case 88200: + case 96000: + case 176400: + case 192000: + break; + default: + dev_err(hdmi->dev, "rate[%d] not supported!\n", + params->sample_rate); + return -EINVAL; + } + + switch (daifmt->fmt) { + case HDMI_I2S: + aud_params.aud_codec = HDMI_AUDIO_CODING_TYPE_PCM; + aud_params.aud_sample_size = HDMI_AUDIO_SAMPLE_SIZE_16; + aud_params.aud_input_type = HDMI_AUD_INPUT_I2S; + aud_params.aud_i2s_fmt = HDMI_I2S_MODE_I2S_24BIT; + aud_params.aud_mclk = HDMI_AUD_MCLK_128FS; + break; + case HDMI_SPDIF: + aud_params.aud_codec = HDMI_AUDIO_CODING_TYPE_PCM; + aud_params.aud_sample_size = HDMI_AUDIO_SAMPLE_SIZE_16; + aud_params.aud_input_type = HDMI_AUD_INPUT_SPDIF; + break; + default: + dev_err(hdmi->dev, "%s: Invalid DAI format %d\n", __func__, + daifmt->fmt); + return -EINVAL; + } + memcpy(&aud_params.codec_params, params, sizeof(aud_params.codec_params)); + memcpy(&hdmi->aud_param, &aud_params, sizeof(aud_params)); + + dev_dbg(hdmi->dev, "codec:%d, input:%d, channel:%d, fs:%d\n", + aud_params.aud_codec, aud_params.aud_input_type, + aud_params.aud_input_chan_type, aud_params.codec_params.sample_rate); + + return 0; +} +EXPORT_SYMBOL_NS_GPL(mtk_hdmi_audio_params, "DRM_MTK_HDMI"); + +int mtk_hdmi_audio_get_eld(struct device *dev, void *data, uint8_t *buf, size_t len) +{ + struct mtk_hdmi *hdmi = dev_get_drvdata(dev); + + if (hdmi->enabled) + memcpy(buf, hdmi->curr_conn->eld, min(sizeof(hdmi->curr_conn->eld), len)); + else + memset(buf, 0, len); + + return 0; +} +EXPORT_SYMBOL_NS_GPL(mtk_hdmi_audio_get_eld, "DRM_MTK_HDMI"); + +void mtk_hdmi_audio_set_plugged_cb(struct mtk_hdmi *hdmi, hdmi_codec_plugged_cb fn, + struct device *codec_dev) +{ + mutex_lock(&hdmi->update_plugged_status_lock); + hdmi->plugged_cb = fn; + hdmi->codec_dev = codec_dev; + mutex_unlock(&hdmi->update_plugged_status_lock); +} +EXPORT_SYMBOL_NS_GPL(mtk_hdmi_audio_set_plugged_cb, "DRM_MTK_HDMI"); + +static int mtk_hdmi_get_all_clk(struct mtk_hdmi *hdmi, struct device_node *np, + const char * const *clock_names, size_t num_clocks) +{ + int i; + + for (i = 0; i < num_clocks; i++) { + hdmi->clk[i] = of_clk_get_by_name(np, clock_names[i]); + + if (IS_ERR(hdmi->clk[i])) + return PTR_ERR(hdmi->clk[i]); + } + + return 0; +} + +bool mtk_hdmi_bridge_mode_fixup(struct drm_bridge *bridge, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + return true; +} +EXPORT_SYMBOL_NS_GPL(mtk_hdmi_bridge_mode_fixup, "DRM_MTK_HDMI"); + +void mtk_hdmi_bridge_mode_set(struct drm_bridge *bridge, + const struct drm_display_mode *mode, + const struct drm_display_mode *adjusted_mode) +{ + struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge); + + dev_dbg(hdmi->dev, "cur info: name:%s, hdisplay:%d\n", + adjusted_mode->name, adjusted_mode->hdisplay); + dev_dbg(hdmi->dev, "hsync_start:%d,hsync_end:%d, htotal:%d", + adjusted_mode->hsync_start, adjusted_mode->hsync_end, + adjusted_mode->htotal); + dev_dbg(hdmi->dev, "hskew:%d, vdisplay:%d\n", + adjusted_mode->hskew, adjusted_mode->vdisplay); + dev_dbg(hdmi->dev, "vsync_start:%d, vsync_end:%d, vtotal:%d", + adjusted_mode->vsync_start, adjusted_mode->vsync_end, + adjusted_mode->vtotal); + dev_dbg(hdmi->dev, "vscan:%d, flag:%d\n", + adjusted_mode->vscan, adjusted_mode->flags); + + drm_mode_copy(&hdmi->mode, adjusted_mode); +} +EXPORT_SYMBOL_NS_GPL(mtk_hdmi_bridge_mode_set, "DRM_MTK_HDMI"); + +static void mtk_hdmi_put_device(void *_dev) +{ + struct device *dev = _dev; + + put_device(dev); +} + +static int mtk_hdmi_get_cec_dev(struct mtk_hdmi *hdmi, struct device *dev, struct device_node *np) +{ + struct platform_device *cec_pdev; + struct device_node *cec_np; + int ret; + + /* The CEC module handles HDMI hotplug detection */ + cec_np = of_get_compatible_child(np->parent, "mediatek,mt8173-cec"); + if (!cec_np) + return dev_err_probe(dev, -EOPNOTSUPP, "Failed to find CEC node\n"); + + cec_pdev = of_find_device_by_node(cec_np); + if (!cec_pdev) { + dev_err(hdmi->dev, "Waiting for CEC device %pOF\n", cec_np); + of_node_put(cec_np); + return -EPROBE_DEFER; + } + of_node_put(cec_np); + + ret = devm_add_action_or_reset(dev, mtk_hdmi_put_device, &cec_pdev->dev); + if (ret) + return ret; + + /* + * The mediatek,syscon-hdmi property contains a phandle link to the + * MMSYS_CONFIG device and the register offset of the HDMI_SYS_CFG + * registers it contains. + */ + hdmi->sys_regmap = syscon_regmap_lookup_by_phandle_args(np, "mediatek,syscon-hdmi", + 1, &hdmi->sys_offset); + if (IS_ERR(hdmi->sys_regmap)) + return dev_err_probe(dev, PTR_ERR(hdmi->sys_regmap), + "Failed to get system configuration registers\n"); + + hdmi->cec_dev = &cec_pdev->dev; + return 0; +} + +static int mtk_hdmi_dt_parse_pdata(struct mtk_hdmi *hdmi, struct platform_device *pdev, + const char * const *clk_names, size_t num_clocks) +{ + struct device *dev = &pdev->dev; + struct device_node *np = dev->of_node; + struct device_node *remote, *i2c_np; + int ret; + + ret = mtk_hdmi_get_all_clk(hdmi, np, clk_names, num_clocks); + if (ret) + return dev_err_probe(dev, ret, "Failed to get clocks\n"); + + hdmi->irq = platform_get_irq(pdev, 0); + if (!hdmi->irq) + return hdmi->irq; + + hdmi->regs = device_node_to_regmap(dev->of_node); + if (IS_ERR(hdmi->regs)) + return PTR_ERR(hdmi->regs); + + remote = of_graph_get_remote_node(np, 1, 0); + if (!remote) + return -EINVAL; + + if (!of_device_is_compatible(remote, "hdmi-connector")) { + hdmi->next_bridge = of_drm_find_bridge(remote); + if (!hdmi->next_bridge) { + dev_err(dev, "Waiting for external bridge\n"); + of_node_put(remote); + return -EPROBE_DEFER; + } + } + + i2c_np = of_parse_phandle(remote, "ddc-i2c-bus", 0); + of_node_put(remote); + if (!i2c_np) + return dev_err_probe(dev, -EINVAL, "No ddc-i2c-bus in connector\n"); + + hdmi->ddc_adpt = of_find_i2c_adapter_by_node(i2c_np); + of_node_put(i2c_np); + if (!hdmi->ddc_adpt) + return dev_err_probe(dev, -EPROBE_DEFER, "Failed to get ddc i2c adapter by node\n"); + + ret = devm_add_action_or_reset(dev, mtk_hdmi_put_device, &hdmi->ddc_adpt->dev); + if (ret) + return ret; + + ret = mtk_hdmi_get_cec_dev(hdmi, dev, np); + if (ret == -EOPNOTSUPP) + dev_info(dev, "CEC support unavailable: node not found\n"); + else if (ret) + return ret; + + return 0; +} + +static void mtk_hdmi_unregister_audio_driver(void *data) +{ + platform_device_unregister(data); +} + +static int mtk_hdmi_register_audio_driver(struct device *dev) +{ + struct mtk_hdmi *hdmi = dev_get_drvdata(dev); + struct hdmi_audio_param *aud_param = &hdmi->aud_param; + struct hdmi_codec_pdata codec_data = { + .ops = hdmi->conf->ver_conf->codec_ops, + .max_i2s_channels = 2, + .i2s = 1, + .data = hdmi, + .no_capture_mute = 1, + }; + int ret; + + aud_param->aud_codec = HDMI_AUDIO_CODING_TYPE_PCM; + aud_param->aud_sample_size = HDMI_AUDIO_SAMPLE_SIZE_16; + aud_param->aud_input_type = HDMI_AUD_INPUT_I2S; + aud_param->aud_i2s_fmt = HDMI_I2S_MODE_I2S_24BIT; + aud_param->aud_mclk = HDMI_AUD_MCLK_128FS; + aud_param->aud_input_chan_type = HDMI_AUD_CHAN_TYPE_2_0; + + hdmi->audio_pdev = platform_device_register_data(dev, + HDMI_CODEC_DRV_NAME, + PLATFORM_DEVID_AUTO, + &codec_data, + sizeof(codec_data)); + if (IS_ERR(hdmi->audio_pdev)) + return PTR_ERR(hdmi->audio_pdev); + + ret = devm_add_action_or_reset(dev, mtk_hdmi_unregister_audio_driver, + hdmi->audio_pdev); + if (ret) + return ret; + + return 0; +} + +struct mtk_hdmi *mtk_hdmi_common_probe(struct platform_device *pdev) +{ + const struct mtk_hdmi_ver_conf *ver_conf; + const struct mtk_hdmi_conf *hdmi_conf; + struct device *dev = &pdev->dev; + struct mtk_hdmi *hdmi; + int ret; + + hdmi_conf = of_device_get_match_data(dev); + if (!hdmi_conf) + return ERR_PTR(-ENODEV); + + ver_conf = hdmi_conf->ver_conf; + + hdmi = devm_drm_bridge_alloc(dev, struct mtk_hdmi, bridge, + ver_conf->bridge_funcs); + if (IS_ERR(hdmi)) + return hdmi; + + hdmi->dev = dev; + hdmi->conf = hdmi_conf; + + hdmi->clk = devm_kcalloc(dev, ver_conf->num_clocks, sizeof(*hdmi->clk), GFP_KERNEL); + if (!hdmi->clk) + return ERR_PTR(-ENOMEM); + + ret = mtk_hdmi_dt_parse_pdata(hdmi, pdev, ver_conf->mtk_hdmi_clock_names, + ver_conf->num_clocks); + if (ret) + return ERR_PTR(ret); + + hdmi->phy = devm_phy_get(dev, "hdmi"); + if (IS_ERR(hdmi->phy)) + return dev_err_cast_probe(dev, hdmi->phy, "Failed to get HDMI PHY\n"); + + mutex_init(&hdmi->update_plugged_status_lock); + platform_set_drvdata(pdev, hdmi); + + ret = mtk_hdmi_register_audio_driver(dev); + if (ret) + return dev_err_ptr_probe(dev, ret, "Cannot register HDMI Audio driver\n"); + + hdmi->bridge.of_node = pdev->dev.of_node; + hdmi->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID + | DRM_BRIDGE_OP_HPD; + + if (ver_conf->bridge_funcs->hdmi_write_infoframe && + ver_conf->bridge_funcs->hdmi_clear_infoframe) + hdmi->bridge.ops |= DRM_BRIDGE_OP_HDMI; + + hdmi->bridge.type = DRM_MODE_CONNECTOR_HDMIA; + hdmi->bridge.ddc = hdmi->ddc_adpt; + hdmi->bridge.vendor = "MediaTek"; + hdmi->bridge.product = "On-Chip HDMI"; + hdmi->bridge.interlace_allowed = ver_conf->interlace_allowed; + + ret = devm_drm_bridge_add(dev, &hdmi->bridge); + if (ret) + return dev_err_ptr_probe(dev, ret, "Failed to add bridge\n"); + + return hdmi; +} +EXPORT_SYMBOL_NS_GPL(mtk_hdmi_common_probe, "DRM_MTK_HDMI"); + +MODULE_AUTHOR("AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>"); +MODULE_DESCRIPTION("MediaTek HDMI Common Library"); +MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi_common.h b/drivers/gpu/drm/mediatek/mtk_hdmi_common.h new file mode 100644 index 000000000000..de5e064585f8 --- /dev/null +++ b/drivers/gpu/drm/mediatek/mtk_hdmi_common.h @@ -0,0 +1,198 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2021 MediaTek Inc. + * Copyright (c) 2024 Collabora Ltd. + */ + +#ifndef _MTK_HDMI_COMMON_H +#define _MTK_HDMI_COMMON_H + +#include <drm/drm_atomic_helper.h> +#include <drm/drm_bridge.h> +#include <drm/drm_crtc.h> +#include <drm/drm_crtc_helper.h> +#include <drm/drm_edid.h> +#include <drm/drm_print.h> + +#include <linux/clk.h> +#include <linux/device.h> +#include <linux/hdmi.h> +#include <linux/i2c.h> +#include <linux/mfd/syscon.h> +#include <linux/mutex.h> +#include <linux/phy/phy.h> +#include <linux/platform_device.h> + +#include <sound/hdmi-codec.h> + +enum hdmi_aud_input_type { + HDMI_AUD_INPUT_I2S = 0, + HDMI_AUD_INPUT_SPDIF, +}; + +enum hdmi_aud_i2s_fmt { + HDMI_I2S_MODE_RJT_24BIT = 0, + HDMI_I2S_MODE_RJT_16BIT, + HDMI_I2S_MODE_LJT_24BIT, + HDMI_I2S_MODE_LJT_16BIT, + HDMI_I2S_MODE_I2S_24BIT, + HDMI_I2S_MODE_I2S_16BIT +}; + +enum hdmi_aud_mclk { + HDMI_AUD_MCLK_128FS, + HDMI_AUD_MCLK_192FS, + HDMI_AUD_MCLK_256FS, + HDMI_AUD_MCLK_384FS, + HDMI_AUD_MCLK_512FS, + HDMI_AUD_MCLK_768FS, + HDMI_AUD_MCLK_1152FS, +}; + +enum hdmi_aud_channel_type { + HDMI_AUD_CHAN_TYPE_1_0 = 0, + HDMI_AUD_CHAN_TYPE_1_1, + HDMI_AUD_CHAN_TYPE_2_0, + HDMI_AUD_CHAN_TYPE_2_1, + HDMI_AUD_CHAN_TYPE_3_0, + HDMI_AUD_CHAN_TYPE_3_1, + HDMI_AUD_CHAN_TYPE_4_0, + HDMI_AUD_CHAN_TYPE_4_1, + HDMI_AUD_CHAN_TYPE_5_0, + HDMI_AUD_CHAN_TYPE_5_1, + HDMI_AUD_CHAN_TYPE_6_0, + HDMI_AUD_CHAN_TYPE_6_1, + HDMI_AUD_CHAN_TYPE_7_0, + HDMI_AUD_CHAN_TYPE_7_1, + HDMI_AUD_CHAN_TYPE_3_0_LRS, + HDMI_AUD_CHAN_TYPE_3_1_LRS, + HDMI_AUD_CHAN_TYPE_4_0_CLRS, + HDMI_AUD_CHAN_TYPE_4_1_CLRS, + HDMI_AUD_CHAN_TYPE_6_1_CS, + HDMI_AUD_CHAN_TYPE_6_1_CH, + HDMI_AUD_CHAN_TYPE_6_1_OH, + HDMI_AUD_CHAN_TYPE_6_1_CHR, + HDMI_AUD_CHAN_TYPE_7_1_LH_RH, + HDMI_AUD_CHAN_TYPE_7_1_LSR_RSR, + HDMI_AUD_CHAN_TYPE_7_1_LC_RC, + HDMI_AUD_CHAN_TYPE_7_1_LW_RW, + HDMI_AUD_CHAN_TYPE_7_1_LSD_RSD, + HDMI_AUD_CHAN_TYPE_7_1_LSS_RSS, + HDMI_AUD_CHAN_TYPE_7_1_LHS_RHS, + HDMI_AUD_CHAN_TYPE_7_1_CS_CH, + HDMI_AUD_CHAN_TYPE_7_1_CS_OH, + HDMI_AUD_CHAN_TYPE_7_1_CS_CHR, + HDMI_AUD_CHAN_TYPE_7_1_CH_OH, + HDMI_AUD_CHAN_TYPE_7_1_CH_CHR, + HDMI_AUD_CHAN_TYPE_7_1_OH_CHR, + HDMI_AUD_CHAN_TYPE_7_1_LSS_RSS_LSR_RSR, + HDMI_AUD_CHAN_TYPE_6_0_CS, + HDMI_AUD_CHAN_TYPE_6_0_CH, + HDMI_AUD_CHAN_TYPE_6_0_OH, + HDMI_AUD_CHAN_TYPE_6_0_CHR, + HDMI_AUD_CHAN_TYPE_7_0_LH_RH, + HDMI_AUD_CHAN_TYPE_7_0_LSR_RSR, + HDMI_AUD_CHAN_TYPE_7_0_LC_RC, + HDMI_AUD_CHAN_TYPE_7_0_LW_RW, + HDMI_AUD_CHAN_TYPE_7_0_LSD_RSD, + HDMI_AUD_CHAN_TYPE_7_0_LSS_RSS, + HDMI_AUD_CHAN_TYPE_7_0_LHS_RHS, + HDMI_AUD_CHAN_TYPE_7_0_CS_CH, + HDMI_AUD_CHAN_TYPE_7_0_CS_OH, + HDMI_AUD_CHAN_TYPE_7_0_CS_CHR, + HDMI_AUD_CHAN_TYPE_7_0_CH_OH, + HDMI_AUD_CHAN_TYPE_7_0_CH_CHR, + HDMI_AUD_CHAN_TYPE_7_0_OH_CHR, + HDMI_AUD_CHAN_TYPE_7_0_LSS_RSS_LSR_RSR, + HDMI_AUD_CHAN_TYPE_8_0_LH_RH_CS, + HDMI_AUD_CHAN_TYPE_UNKNOWN = 0xFF +}; + +enum hdmi_aud_channel_swap_type { + HDMI_AUD_SWAP_LR, + HDMI_AUD_SWAP_LFE_CC, + HDMI_AUD_SWAP_LSRS, + HDMI_AUD_SWAP_RLS_RRS, + HDMI_AUD_SWAP_LR_STATUS, +}; + +struct hdmi_audio_param { + enum hdmi_audio_coding_type aud_codec; + enum hdmi_audio_sample_size aud_sample_size; + enum hdmi_aud_input_type aud_input_type; + enum hdmi_aud_i2s_fmt aud_i2s_fmt; + enum hdmi_aud_mclk aud_mclk; + enum hdmi_aud_channel_type aud_input_chan_type; + struct hdmi_codec_params codec_params; +}; + +enum hdmi_hpd_state { + HDMI_PLUG_OUT = 0, + HDMI_PLUG_IN_AND_SINK_POWER_ON, + HDMI_PLUG_IN_ONLY, +}; + +struct mtk_hdmi_ver_conf { + const struct drm_bridge_funcs *bridge_funcs; + const struct hdmi_codec_ops *codec_ops; + const char * const *mtk_hdmi_clock_names; + int num_clocks; + bool interlace_allowed; +}; + +struct mtk_hdmi_conf { + const struct mtk_hdmi_ver_conf *ver_conf; + bool tz_disabled; + bool cea_modes_only; + unsigned long max_mode_clock; + u32 reg_hdmi_tx_cfg; +}; + +struct mtk_hdmi { + struct drm_bridge bridge; + struct drm_bridge *next_bridge; + struct drm_connector *curr_conn;/* current connector (only valid when 'enabled') */ + struct device *dev; + const struct mtk_hdmi_conf *conf; + struct phy *phy; + struct device *cec_dev; + struct i2c_adapter *ddc_adpt; + struct clk **clk; + struct drm_display_mode mode; + bool dvi_mode; + struct regmap *sys_regmap; + unsigned int sys_offset; + struct regmap *regs; + struct platform_device *audio_pdev; + struct hdmi_audio_param aud_param; + bool audio_enable; + bool powered; + bool enabled; + unsigned int irq; + enum hdmi_hpd_state hpd; + hdmi_codec_plugged_cb plugged_cb; + struct device *codec_dev; + struct mutex update_plugged_status_lock; +}; + +static inline struct mtk_hdmi *hdmi_ctx_from_bridge(struct drm_bridge *b) +{ + return container_of(b, struct mtk_hdmi, bridge); +} + + +int mtk_hdmi_audio_get_eld(struct device *dev, void *data, uint8_t *buf, size_t len); +void mtk_hdmi_audio_set_plugged_cb(struct mtk_hdmi *hdmi, hdmi_codec_plugged_cb fn, + struct device *codec_dev); +int mtk_hdmi_audio_params(struct mtk_hdmi *hdmi, struct hdmi_codec_daifmt *daifmt, + struct hdmi_codec_params *params); +void mtk_hdmi_get_ncts(unsigned int sample_rate, unsigned int clock, + unsigned int *n, unsigned int *cts); +bool mtk_hdmi_bridge_mode_fixup(struct drm_bridge *bridge, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode); +void mtk_hdmi_bridge_mode_set(struct drm_bridge *bridge, + const struct drm_display_mode *mode, + const struct drm_display_mode *adjusted_mode); +struct mtk_hdmi *mtk_hdmi_common_probe(struct platform_device *pdev); +#endif /* _MTK_HDMI_COMMON_H */ diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi_ddc.c b/drivers/gpu/drm/mediatek/mtk_hdmi_ddc.c new file mode 100644 index 000000000000..6358e1af69b4 --- /dev/null +++ b/drivers/gpu/drm/mediatek/mtk_hdmi_ddc.c @@ -0,0 +1,343 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2014 MediaTek Inc. + * Author: Jie Qiu <jie.qiu@mediatek.com> + */ +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/i2c.h> +#include <linux/time.h> +#include <linux/delay.h> +#include <linux/errno.h> +#include <linux/err.h> +#include <linux/platform_device.h> +#include <linux/clk.h> +#include <linux/slab.h> +#include <linux/io.h> +#include <linux/iopoll.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> +#include <linux/of_platform.h> + +#include "mtk_drm_drv.h" + +#define SIF1_CLOK (288) +#define DDC_DDCMCTL0 (0x0) +#define DDCM_ODRAIN BIT(31) +#define DDCM_CLK_DIV_OFFSET (16) +#define DDCM_CLK_DIV_MASK (0xfff << 16) +#define DDCM_CS_STATUS BIT(4) +#define DDCM_SCL_STATE BIT(3) +#define DDCM_SDA_STATE BIT(2) +#define DDCM_SM0EN BIT(1) +#define DDCM_SCL_STRECH BIT(0) +#define DDC_DDCMCTL1 (0x4) +#define DDCM_ACK_OFFSET (16) +#define DDCM_ACK_MASK (0xff << 16) +#define DDCM_PGLEN_OFFSET (8) +#define DDCM_PGLEN_MASK (0x7 << 8) +#define DDCM_SIF_MODE_OFFSET (4) +#define DDCM_SIF_MODE_MASK (0x7 << 4) +#define DDCM_START (0x1) +#define DDCM_WRITE_DATA (0x2) +#define DDCM_STOP (0x3) +#define DDCM_READ_DATA_NO_ACK (0x4) +#define DDCM_READ_DATA_ACK (0x5) +#define DDCM_TRI BIT(0) +#define DDC_DDCMD0 (0x8) +#define DDCM_DATA3 (0xff << 24) +#define DDCM_DATA2 (0xff << 16) +#define DDCM_DATA1 (0xff << 8) +#define DDCM_DATA0 (0xff << 0) +#define DDC_DDCMD1 (0xc) +#define DDCM_DATA7 (0xff << 24) +#define DDCM_DATA6 (0xff << 16) +#define DDCM_DATA5 (0xff << 8) +#define DDCM_DATA4 (0xff << 0) + +struct mtk_hdmi_ddc { + struct i2c_adapter adap; + struct clk *clk; + void __iomem *regs; +}; + +static inline void sif_set_bit(struct mtk_hdmi_ddc *ddc, unsigned int offset, + unsigned int val) +{ + writel(readl(ddc->regs + offset) | val, ddc->regs + offset); +} + +static inline void sif_clr_bit(struct mtk_hdmi_ddc *ddc, unsigned int offset, + unsigned int val) +{ + writel(readl(ddc->regs + offset) & ~val, ddc->regs + offset); +} + +static inline bool sif_bit_is_set(struct mtk_hdmi_ddc *ddc, unsigned int offset, + unsigned int val) +{ + return (readl(ddc->regs + offset) & val) == val; +} + +static inline void sif_write_mask(struct mtk_hdmi_ddc *ddc, unsigned int offset, + unsigned int mask, unsigned int shift, + unsigned int val) +{ + unsigned int tmp; + + tmp = readl(ddc->regs + offset); + tmp &= ~mask; + tmp |= (val << shift) & mask; + writel(tmp, ddc->regs + offset); +} + +static inline unsigned int sif_read_mask(struct mtk_hdmi_ddc *ddc, + unsigned int offset, unsigned int mask, + unsigned int shift) +{ + return (readl(ddc->regs + offset) & mask) >> shift; +} + +static void ddcm_trigger_mode(struct mtk_hdmi_ddc *ddc, int mode) +{ + u32 val; + + sif_write_mask(ddc, DDC_DDCMCTL1, DDCM_SIF_MODE_MASK, + DDCM_SIF_MODE_OFFSET, mode); + sif_set_bit(ddc, DDC_DDCMCTL1, DDCM_TRI); + readl_poll_timeout(ddc->regs + DDC_DDCMCTL1, val, + (val & DDCM_TRI) != DDCM_TRI, 4, 20000); +} + +static int mtk_hdmi_ddc_read_msg(struct mtk_hdmi_ddc *ddc, struct i2c_msg *msg) +{ + struct device *dev = ddc->adap.dev.parent; + u32 remain_count, ack_count, ack_final, read_count, temp_count; + u32 index = 0; + u32 ack; + int i; + + ddcm_trigger_mode(ddc, DDCM_START); + sif_write_mask(ddc, DDC_DDCMD0, 0xff, 0, (msg->addr << 1) | 0x01); + sif_write_mask(ddc, DDC_DDCMCTL1, DDCM_PGLEN_MASK, DDCM_PGLEN_OFFSET, + 0x00); + ddcm_trigger_mode(ddc, DDCM_WRITE_DATA); + ack = sif_read_mask(ddc, DDC_DDCMCTL1, DDCM_ACK_MASK, DDCM_ACK_OFFSET); + dev_dbg(dev, "ack = 0x%x\n", ack); + if (ack != 0x01) { + dev_err(dev, "i2c ack err!\n"); + return -ENXIO; + } + + remain_count = msg->len; + ack_count = (msg->len - 1) / 8; + ack_final = 0; + + while (remain_count > 0) { + if (ack_count > 0) { + read_count = 8; + ack_final = 0; + ack_count--; + } else { + read_count = remain_count; + ack_final = 1; + } + + sif_write_mask(ddc, DDC_DDCMCTL1, DDCM_PGLEN_MASK, + DDCM_PGLEN_OFFSET, read_count - 1); + ddcm_trigger_mode(ddc, (ack_final == 1) ? + DDCM_READ_DATA_NO_ACK : + DDCM_READ_DATA_ACK); + + ack = sif_read_mask(ddc, DDC_DDCMCTL1, DDCM_ACK_MASK, + DDCM_ACK_OFFSET); + temp_count = 0; + while (((ack & (1 << temp_count)) != 0) && (temp_count < 8)) + temp_count++; + if (((ack_final == 1) && (temp_count != (read_count - 1))) || + ((ack_final == 0) && (temp_count != read_count))) { + dev_err(dev, "Address NACK! ACK(0x%x)\n", ack); + break; + } + + for (i = read_count; i >= 1; i--) { + int shift; + int offset; + + if (i > 4) { + offset = DDC_DDCMD1; + shift = (i - 5) * 8; + } else { + offset = DDC_DDCMD0; + shift = (i - 1) * 8; + } + + msg->buf[index + i - 1] = sif_read_mask(ddc, offset, + 0xff << shift, + shift); + } + + remain_count -= read_count; + index += read_count; + } + + return 0; +} + +static int mtk_hdmi_ddc_write_msg(struct mtk_hdmi_ddc *ddc, struct i2c_msg *msg) +{ + struct device *dev = ddc->adap.dev.parent; + u32 ack; + + ddcm_trigger_mode(ddc, DDCM_START); + sif_write_mask(ddc, DDC_DDCMD0, DDCM_DATA0, 0, msg->addr << 1); + sif_write_mask(ddc, DDC_DDCMD0, DDCM_DATA1, 8, msg->buf[0]); + sif_write_mask(ddc, DDC_DDCMCTL1, DDCM_PGLEN_MASK, DDCM_PGLEN_OFFSET, + 0x1); + ddcm_trigger_mode(ddc, DDCM_WRITE_DATA); + + ack = sif_read_mask(ddc, DDC_DDCMCTL1, DDCM_ACK_MASK, DDCM_ACK_OFFSET); + dev_dbg(dev, "ack = %d\n", ack); + + if (ack != 0x03) { + dev_err(dev, "i2c ack err!\n"); + return -EIO; + } + + return 0; +} + +static int mtk_hdmi_ddc_xfer(struct i2c_adapter *adapter, + struct i2c_msg *msgs, int num) +{ + struct mtk_hdmi_ddc *ddc = adapter->algo_data; + struct device *dev = adapter->dev.parent; + int ret; + int i; + + if (!ddc) { + dev_err(dev, "invalid arguments\n"); + return -EINVAL; + } + + sif_set_bit(ddc, DDC_DDCMCTL0, DDCM_SCL_STRECH); + sif_set_bit(ddc, DDC_DDCMCTL0, DDCM_SM0EN); + sif_clr_bit(ddc, DDC_DDCMCTL0, DDCM_ODRAIN); + + if (sif_bit_is_set(ddc, DDC_DDCMCTL1, DDCM_TRI)) { + dev_err(dev, "ddc line is busy!\n"); + return -EBUSY; + } + + sif_write_mask(ddc, DDC_DDCMCTL0, DDCM_CLK_DIV_MASK, + DDCM_CLK_DIV_OFFSET, SIF1_CLOK); + + for (i = 0; i < num; i++) { + struct i2c_msg *msg = &msgs[i]; + + dev_dbg(dev, "i2c msg, adr:0x%x, flags:%d, len :0x%x\n", + msg->addr, msg->flags, msg->len); + + if (msg->flags & I2C_M_RD) + ret = mtk_hdmi_ddc_read_msg(ddc, msg); + else + ret = mtk_hdmi_ddc_write_msg(ddc, msg); + if (ret < 0) + goto xfer_end; + } + + ddcm_trigger_mode(ddc, DDCM_STOP); + + return i; + +xfer_end: + ddcm_trigger_mode(ddc, DDCM_STOP); + dev_err(dev, "ddc failed!\n"); + return ret; +} + +static u32 mtk_hdmi_ddc_func(struct i2c_adapter *adapter) +{ + return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; +} + +static const struct i2c_algorithm mtk_hdmi_ddc_algorithm = { + .master_xfer = mtk_hdmi_ddc_xfer, + .functionality = mtk_hdmi_ddc_func, +}; + +static int mtk_hdmi_ddc_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct mtk_hdmi_ddc *ddc; + struct resource *mem; + int ret; + + ddc = devm_kzalloc(dev, sizeof(struct mtk_hdmi_ddc), GFP_KERNEL); + if (!ddc) + return -ENOMEM; + + ddc->clk = devm_clk_get(dev, "ddc-i2c"); + if (IS_ERR(ddc->clk)) + return dev_err_probe(dev, PTR_ERR(ddc->clk), + "get ddc_clk failed\n"); + + ddc->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &mem); + if (IS_ERR(ddc->regs)) + return PTR_ERR(ddc->regs); + + ret = clk_prepare_enable(ddc->clk); + if (ret) + return dev_err_probe(dev, ret, "enable ddc clk failed!\n"); + + strscpy(ddc->adap.name, "mediatek-hdmi-ddc", sizeof(ddc->adap.name)); + ddc->adap.owner = THIS_MODULE; + ddc->adap.algo = &mtk_hdmi_ddc_algorithm; + ddc->adap.retries = 3; + ddc->adap.dev.of_node = dev->of_node; + ddc->adap.algo_data = ddc; + ddc->adap.dev.parent = &pdev->dev; + + ret = i2c_add_adapter(&ddc->adap); + if (ret < 0) { + clk_disable_unprepare(ddc->clk); + return dev_err_probe(dev, ret, "failed to add bus to i2c core\n"); + } + + platform_set_drvdata(pdev, ddc); + + dev_dbg(dev, "ddc->adap: %p\n", &ddc->adap); + dev_dbg(dev, "ddc->clk: %p\n", ddc->clk); + dev_dbg(dev, "physical adr: %pa, end: %pa\n", &mem->start, + &mem->end); + + return 0; +} + +static void mtk_hdmi_ddc_remove(struct platform_device *pdev) +{ + struct mtk_hdmi_ddc *ddc = platform_get_drvdata(pdev); + + i2c_del_adapter(&ddc->adap); + clk_disable_unprepare(ddc->clk); +} + +static const struct of_device_id mtk_hdmi_ddc_match[] = { + { .compatible = "mediatek,mt8173-hdmi-ddc", }, + {}, +}; +MODULE_DEVICE_TABLE(of, mtk_hdmi_ddc_match); + +struct platform_driver mtk_hdmi_ddc_driver = { + .probe = mtk_hdmi_ddc_probe, + .remove = mtk_hdmi_ddc_remove, + .driver = { + .name = "mediatek-hdmi-ddc", + .of_match_table = mtk_hdmi_ddc_match, + }, +}; +module_platform_driver(mtk_hdmi_ddc_driver); + +MODULE_AUTHOR("Jie Qiu <jie.qiu@mediatek.com>"); +MODULE_DESCRIPTION("MediaTek HDMI DDC Driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi_ddc_v2.c b/drivers/gpu/drm/mediatek/mtk_hdmi_ddc_v2.c new file mode 100644 index 000000000000..b844e2c10f28 --- /dev/null +++ b/drivers/gpu/drm/mediatek/mtk_hdmi_ddc_v2.c @@ -0,0 +1,396 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * MediaTek HDMI v2 Display Data Channel Driver + * + * Copyright (c) 2021 MediaTek Inc. + * Copyright (c) 2021 BayLibre, SAS + * Copyright (c) 2024 Collabora Ltd. + * AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com> + */ + +#include <linux/bitfield.h> +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/device.h> +#include <linux/err.h> +#include <linux/i2c.h> +#include <linux/kernel.h> +#include <linux/mfd/syscon.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/of_platform.h> +#include <linux/platform_device.h> +#include <linux/pm_runtime.h> +#include <linux/regmap.h> +#include <linux/types.h> + +#include <drm/drm_edid.h> + +#include "mtk_hdmi_common.h" +#include "mtk_hdmi_regs_v2.h" + +#define DDC2_DLY_CNT 572 /* BIM=208M/(v*4) = 90Khz */ +#define DDC2_DLY_CNT_EDID 832 /* BIM=208M/(v*4) = 62.5Khz */ +#define SI2C_ADDR_READ 0xf4 +#define SCDC_I2C_SLAVE_ADDRESS 0x54 + +struct mtk_hdmi_ddc { + struct device *dev; + struct regmap *regs; + struct clk *clk; + struct i2c_adapter adap; +}; + +static int mtk_ddc_check_and_rise_low_bus(struct mtk_hdmi_ddc *ddc) +{ + u32 val; + + regmap_read(ddc->regs, HDCP2X_DDCM_STATUS, &val); + if (val & DDC_I2C_BUS_LOW) { + regmap_update_bits(ddc->regs, DDC_CTRL, DDC_CTRL_CMD, + FIELD_PREP(DDC_CTRL_CMD, DDC_CMD_CLOCK_SCL)); + usleep_range(250, 300); + } + + if (val & DDC_I2C_NO_ACK) { + u32 ddc_ctrl, hpd_ddc_ctrl, hpd_ddc_status; + + regmap_read(ddc->regs, DDC_CTRL, &ddc_ctrl); + regmap_read(ddc->regs, HPD_DDC_CTRL, &hpd_ddc_ctrl); + regmap_read(ddc->regs, HPD_DDC_STATUS, &hpd_ddc_status); + } + + if (val & DDC_I2C_NO_ACK) + return -EIO; + + return 0; +} + +static int mtk_ddc_wr_one(struct mtk_hdmi_ddc *ddc, u16 addr_id, + u16 offset_id, u8 *wr_data) +{ + u32 val; + int ret; + + /* If down, rise bus for write operation */ + mtk_ddc_check_and_rise_low_bus(ddc); + + regmap_update_bits(ddc->regs, HPD_DDC_CTRL, HPD_DDC_DELAY_CNT, + FIELD_PREP(HPD_DDC_DELAY_CNT, DDC2_DLY_CNT)); + + if (wr_data) { + regmap_write(ddc->regs, SI2C_CTRL, + FIELD_PREP(SI2C_ADDR, SI2C_ADDR_READ) | + FIELD_PREP(SI2C_WDATA, *wr_data) | + SI2C_WR); + } + + regmap_write(ddc->regs, DDC_CTRL, + FIELD_PREP(DDC_CTRL_CMD, DDC_CMD_SEQ_WRITE) | + FIELD_PREP(DDC_CTRL_DIN_CNT, wr_data == NULL ? 0 : 1) | + FIELD_PREP(DDC_CTRL_OFFSET, offset_id) | + FIELD_PREP(DDC_CTRL_ADDR, addr_id)); + usleep_range(1000, 1250); + + ret = regmap_read_poll_timeout(ddc->regs, HPD_DDC_STATUS, val, + !(val & DDC_I2C_IN_PROG), 500, 1000); + if (ret) { + dev_err(ddc->dev, "DDC I2C write timeout\n"); + return ret; + } + + /* The I2C bus might be down after WR operation: rise it again */ + ret = mtk_ddc_check_and_rise_low_bus(ddc); + if (ret) { + dev_err(ddc->dev, "Error during write operation: No ACK\n"); + return ret; + } + + return 0; +} + +static int mtk_ddcm_read_hdmi(struct mtk_hdmi_ddc *ddc, u16 uc_dev, + u8 addr, u8 *puc_value, u16 data_cnt) +{ + u16 dly_cnt, i, uc_idx; + u32 rem, temp_length, uc_read_count, val; + u64 loop_counter; + int ret; + + mtk_ddc_check_and_rise_low_bus(ddc); + + regmap_update_bits(ddc->regs, DDC_CTRL, DDC_CTRL_CMD, + FIELD_PREP(DDC_CTRL_CMD, DDC_CMD_CLEAR_FIFO)); + + if (data_cnt >= 16) { + temp_length = 16; + loop_counter = data_cnt; + + rem = do_div(loop_counter, temp_length); + if (rem) + loop_counter++; + } else { + temp_length = data_cnt; + loop_counter = 1; + } + + if (uc_dev >= DDC_ADDR) + dly_cnt = DDC2_DLY_CNT_EDID; + else + dly_cnt = DDC2_DLY_CNT; + + regmap_update_bits(ddc->regs, HPD_DDC_CTRL, HPD_DDC_DELAY_CNT, + FIELD_PREP(HPD_DDC_DELAY_CNT, dly_cnt)); + + for (i = 0; i < loop_counter; i++) { + rem = data_cnt % 16; + + if (i > 0 && i == (loop_counter - 1) && rem) + temp_length = rem; + + /* 0x51 - 0x53: Flow control */ + if (uc_dev > DDC_ADDR && uc_dev <= 0x53) { + regmap_update_bits(ddc->regs, SCDC_CTRL, SCDC_DDC_SEGMENT, + FIELD_PREP(SCDC_DDC_SEGMENT, uc_dev - DDC_ADDR)); + + regmap_write(ddc->regs, DDC_CTRL, + FIELD_PREP(DDC_CTRL_CMD, DDC_CMD_ENH_READ_NOACK) | + FIELD_PREP(DDC_CTRL_DIN_CNT, temp_length) | + FIELD_PREP(DDC_CTRL_OFFSET, addr + i * temp_length) | + FIELD_PREP(DDC_CTRL_ADDR, DDC_ADDR)); + } else { + u16 offset; + + if (addr != 0x43) + offset = i * 16; + else + offset = 0; + + regmap_write(ddc->regs, DDC_CTRL, + FIELD_PREP(DDC_CTRL_CMD, DDC_CMD_SEQ_READ_NOACK) | + FIELD_PREP(DDC_CTRL_DIN_CNT, temp_length) | + FIELD_PREP(DDC_CTRL_OFFSET, addr + offset) | + FIELD_PREP(DDC_CTRL_ADDR, uc_dev)); + } + usleep_range(5000, 5500); + + ret = regmap_read_poll_timeout(ddc->regs, HPD_DDC_STATUS, val, + !(val & DDC_I2C_IN_PROG), 1000, + 500 * (temp_length + 5)); + if (ret) { + dev_err(ddc->dev, "Timeout waiting for DDC I2C\n"); + return ret; + } + + ret = mtk_ddc_check_and_rise_low_bus(ddc); + if (ret) { + dev_err(ddc->dev, "Error during read operation: No ACK\n"); + return ret; + } + + for (uc_idx = 0; uc_idx < temp_length; uc_idx++) { + unsigned int read_idx = i * 16 + uc_idx; + + regmap_write(ddc->regs, SI2C_CTRL, + FIELD_PREP(SI2C_ADDR, SI2C_ADDR_READ) | + SI2C_RD); + + regmap_read(ddc->regs, HPD_DDC_STATUS, &val); + puc_value[read_idx] = FIELD_GET(DDC_DATA_OUT, val); + + regmap_write(ddc->regs, SI2C_CTRL, + FIELD_PREP(SI2C_ADDR, SI2C_ADDR_READ) | + SI2C_CONFIRM_READ); + + /* + * If HDMI IP gets reset during EDID read, DDC read + * operation will fail and its delay counter will be + * reset to 400. + */ + regmap_read(ddc->regs, HPD_DDC_CTRL, &val); + if (FIELD_GET(HPD_DDC_DELAY_CNT, val) < DDC2_DLY_CNT) + return 0; + + uc_read_count = read_idx + 1; + } + } + if (uc_read_count > U8_MAX) + dev_warn(ddc->dev, "Invalid read data count %u\n", uc_read_count); + + return uc_read_count; +} + +static int mtk_hdmi_fg_ddc_data_read(struct mtk_hdmi_ddc *ddc, u16 b_dev, + u8 data_addr, u16 data_cnt, u8 *pr_data) +{ + int read_data_cnt; + u16 req_data_cnt; + + if (!data_cnt) { + dev_err(ddc->dev, "Invalid DDCM read request\n"); + return -EINVAL; + } + + req_data_cnt = U8_MAX - data_addr + 1; + if (req_data_cnt > data_cnt) + req_data_cnt = data_cnt; + + regmap_set_bits(ddc->regs, HDCP2X_POL_CTRL, HDCP2X_DIS_POLL_EN); + + read_data_cnt = mtk_ddcm_read_hdmi(ddc, b_dev, data_addr, pr_data, req_data_cnt); + + if (read_data_cnt < 0) + return read_data_cnt; + else if (read_data_cnt != req_data_cnt) + return -EINVAL; + + return 0; +} + +static int mtk_hdmi_ddc_fg_data_write(struct mtk_hdmi_ddc *ddc, u16 b_dev, + u8 data_addr, u16 data_cnt, u8 *pr_data) +{ + int i, ret; + + regmap_set_bits(ddc->regs, HDCP2X_POL_CTRL, HDCP2X_DIS_POLL_EN); + /* + * In case there is no payload data, just do a single write for the + * address only + */ + if (data_cnt == 0) + return mtk_ddc_wr_one(ddc, b_dev, data_addr, NULL); + + i = 0; + do { + ret = mtk_ddc_wr_one(ddc, b_dev, data_addr + i, pr_data + i); + if (ret) + return ret; + } while (++i < data_cnt); + + return 0; +} + +static int mtk_hdmi_ddc_v2_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num) +{ + struct mtk_hdmi_ddc *ddc; + u8 offset = 0; + int i, ret; + + ddc = adapter->algo_data; + + for (i = 0; i < num; i++) { + struct i2c_msg *msg = &msgs[i]; + + if (!msg->buf) { + dev_err(ddc->dev, "No message buffer\n"); + return -EINVAL; + } + + if (msg->flags & I2C_M_RD) { + /* + * The underlying DDC hardware always issues a write request + * that assigns the read offset as part of the read operation, + * therefore, use the `offset` value assigned in the previous + * write request from drm_edid + */ + ret = mtk_hdmi_fg_ddc_data_read(ddc, msg->addr, offset, + msg->len, &msg->buf[0]); + if (ret) + return ret; + } else { + /* + * The HW needs the data offset, found in buf[0], in the + * DDC_CTRL register, and each byte of data, starting at + * buf[1], goes in the SI2C_WDATA register. + */ + ret = mtk_hdmi_ddc_fg_data_write(ddc, msg->addr, msg->buf[0], + msg->len - 1, &msg->buf[1]); + if (ret) + return ret; + + /* + * Store the offset value requested by drm_edid or by + * scdc to use in subsequent read requests. + */ + if ((msg->addr == DDC_ADDR || msg->addr == SCDC_I2C_SLAVE_ADDRESS) && + msg->len == 1) { + offset = msg->buf[0]; + } + } + } + + return i; +} + +static u32 mtk_hdmi_ddc_v2_func(struct i2c_adapter *adapter) +{ + return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; +} + +static const struct i2c_algorithm mtk_hdmi_ddc_v2_algorithm = { + .master_xfer = mtk_hdmi_ddc_v2_xfer, + .functionality = mtk_hdmi_ddc_v2_func, +}; + +static int mtk_hdmi_ddc_v2_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct mtk_hdmi_ddc *ddc; + int ret; + + ddc = devm_kzalloc(dev, sizeof(*ddc), GFP_KERNEL); + if (!ddc) + return -ENOMEM; + + ddc->dev = dev; + ddc->regs = device_node_to_regmap(dev->parent->of_node); + if (IS_ERR_OR_NULL(ddc->regs)) + return dev_err_probe(dev, + IS_ERR(ddc->regs) ? PTR_ERR(ddc->regs) : -EINVAL, + "Cannot get regmap\n"); + + ddc->clk = devm_clk_get_enabled(dev, NULL); + if (IS_ERR(ddc->clk)) + return dev_err_probe(dev, PTR_ERR(ddc->clk), "Cannot get DDC clock\n"); + + strscpy(ddc->adap.name, "mediatek-hdmi-ddc-v2", sizeof(ddc->adap.name)); + ddc->adap.owner = THIS_MODULE; + ddc->adap.algo = &mtk_hdmi_ddc_v2_algorithm; + ddc->adap.retries = 3; + ddc->adap.dev.of_node = dev->of_node; + ddc->adap.algo_data = ddc; + ddc->adap.dev.parent = &pdev->dev; + + ret = devm_pm_runtime_enable(&pdev->dev); + if (ret) + return dev_err_probe(&pdev->dev, ret, "Cannot enable Runtime PM\n"); + + pm_runtime_get_sync(dev); + + ret = devm_i2c_add_adapter(dev, &ddc->adap); + if (ret < 0) + return dev_err_probe(dev, ret, "Cannot add DDC I2C adapter\n"); + + platform_set_drvdata(pdev, ddc); + return 0; +} + +static const struct of_device_id mtk_hdmi_ddc_v2_match[] = { + { .compatible = "mediatek,mt8195-hdmi-ddc" }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, mtk_hdmi_ddc_v2_match); + +struct platform_driver mtk_hdmi_ddc_v2_driver = { + .probe = mtk_hdmi_ddc_v2_probe, + .driver = { + .name = "mediatek-hdmi-ddc-v2", + .of_match_table = mtk_hdmi_ddc_v2_match, + }, +}; +module_platform_driver(mtk_hdmi_ddc_v2_driver); + +MODULE_AUTHOR("AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>"); +MODULE_AUTHOR("Can Zeng <can.zeng@mediatek.com>"); +MODULE_DESCRIPTION("MediaTek HDMIv2 DDC Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi_regs.h b/drivers/gpu/drm/mediatek/mtk_hdmi_regs.h new file mode 100644 index 000000000000..2050ba45b23a --- /dev/null +++ b/drivers/gpu/drm/mediatek/mtk_hdmi_regs.h @@ -0,0 +1,230 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2014 MediaTek Inc. + * Author: Jie Qiu <jie.qiu@mediatek.com> + */ +#ifndef _MTK_HDMI_REGS_H +#define _MTK_HDMI_REGS_H + +#define GRL_INT_MASK 0x18 +#define GRL_IFM_PORT 0x188 +#define GRL_CH_SWAP 0x198 +#define LR_SWAP BIT(0) +#define LFE_CC_SWAP BIT(1) +#define LSRS_SWAP BIT(2) +#define RLS_RRS_SWAP BIT(3) +#define LR_STATUS_SWAP BIT(4) +#define GRL_I2S_C_STA0 0x140 +#define GRL_I2S_C_STA1 0x144 +#define GRL_I2S_C_STA2 0x148 +#define GRL_I2S_C_STA3 0x14C +#define GRL_I2S_C_STA4 0x150 +#define GRL_I2S_UV 0x154 +#define I2S_UV_V BIT(0) +#define I2S_UV_U BIT(1) +#define I2S_UV_CH_EN_MASK 0x3c +#define I2S_UV_CH_EN(x) BIT((x) + 2) +#define I2S_UV_TMDS_DEBUG BIT(6) +#define I2S_UV_NORMAL_INFO_INV BIT(7) +#define GRL_ACP_ISRC_CTRL 0x158 +#define VS_EN BIT(0) +#define ACP_EN BIT(1) +#define ISRC1_EN BIT(2) +#define ISRC2_EN BIT(3) +#define GAMUT_EN BIT(4) +#define GRL_CTS_CTRL 0x160 +#define CTS_CTRL_SOFT BIT(0) +#define GRL_INT 0x14 +#define INT_MDI BIT(0) +#define INT_HDCP BIT(1) +#define INT_FIFO_O BIT(2) +#define INT_FIFO_U BIT(3) +#define INT_IFM_ERR BIT(4) +#define INT_INF_DONE BIT(5) +#define INT_NCTS_DONE BIT(6) +#define INT_CTRL_PKT_DONE BIT(7) +#define GRL_INT_MASK 0x18 +#define GRL_CTRL 0x1C +#define CTRL_GEN_EN BIT(2) +#define CTRL_SPD_EN BIT(3) +#define CTRL_MPEG_EN BIT(4) +#define CTRL_AUDIO_EN BIT(5) +#define CTRL_AVI_EN BIT(6) +#define CTRL_AVMUTE BIT(7) +#define GRL_STATUS 0x20 +#define STATUS_HTPLG BIT(0) +#define STATUS_PORD BIT(1) +#define GRL_DIVN 0x170 +#define NCTS_WRI_ANYTIME BIT(6) +#define GRL_AUDIO_CFG 0x17C +#define AUDIO_ZERO BIT(0) +#define HIGH_BIT_RATE BIT(1) +#define SACD_DST BIT(2) +#define DST_NORMAL_DOUBLE BIT(3) +#define DSD_INV BIT(4) +#define LR_INV BIT(5) +#define LR_MIX BIT(6) +#define DSD_SEL BIT(7) +#define GRL_NCTS 0x184 +#define GRL_CH_SW0 0x18C +#define GRL_CH_SW1 0x190 +#define GRL_CH_SW2 0x194 +#define CH_SWITCH(from, to) ((from) << ((to) * 3)) +#define GRL_INFOFRM_VER 0x19C +#define GRL_INFOFRM_TYPE 0x1A0 +#define GRL_INFOFRM_LNG 0x1A4 +#define GRL_MIX_CTRL 0x1B4 +#define MIX_CTRL_SRC_EN BIT(0) +#define BYPASS_VOLUME BIT(1) +#define MIX_CTRL_FLAT BIT(7) +#define GRL_AOUT_CFG 0x1C4 +#define AOUT_BNUM_SEL_MASK 0x03 +#define AOUT_24BIT 0x00 +#define AOUT_20BIT 0x02 +#define AOUT_16BIT 0x03 +#define AOUT_FIFO_ADAP_CTRL BIT(6) +#define AOUT_BURST_PREAMBLE_EN BIT(7) +#define HIGH_BIT_RATE_PACKET_ALIGN (AOUT_BURST_PREAMBLE_EN | \ + AOUT_FIFO_ADAP_CTRL) +#define GRL_SHIFT_L1 0x1C0 +#define GRL_SHIFT_R2 0x1B0 +#define AUDIO_PACKET_OFF BIT(6) +#define GRL_CFG0 0x24 +#define CFG0_I2S_MODE_MASK 0x3 +#define CFG0_I2S_MODE_RTJ 0x1 +#define CFG0_I2S_MODE_LTJ 0x0 +#define CFG0_I2S_MODE_I2S 0x2 +#define CFG0_W_LENGTH_MASK 0x30 +#define CFG0_W_LENGTH_24BIT 0x00 +#define CFG0_W_LENGTH_16BIT 0x10 +#define GRL_CFG1 0x28 +#define CFG1_EDG_SEL BIT(0) +#define CFG1_SPDIF BIT(1) +#define CFG1_DVI BIT(2) +#define CFG1_HDCP_DEBUG BIT(3) +#define GRL_CFG2 0x2c +#define CFG2_MHL_DE_SEL BIT(3) +#define CFG2_MHL_FAKE_DE_SEL BIT(4) +#define CFG2_MHL_DATA_REMAP BIT(5) +#define CFG2_NOTICE_EN BIT(6) +#define CFG2_ACLK_INV BIT(7) +#define GRL_CFG3 0x30 +#define CFG3_AES_KEY_INDEX_MASK 0x3f +#define CFG3_CONTROL_PACKET_DELAY BIT(6) +#define CFG3_KSV_LOAD_START BIT(7) +#define GRL_CFG4 0x34 +#define CFG4_AES_KEY_LOAD BIT(4) +#define CFG4_AV_UNMUTE_EN BIT(5) +#define CFG4_AV_UNMUTE_SET BIT(6) +#define CFG4_MHL_MODE BIT(7) +#define GRL_CFG5 0x38 +#define CFG5_CD_RATIO_MASK 0x8F +#define CFG5_FS128 (0x1 << 4) +#define CFG5_FS256 (0x2 << 4) +#define CFG5_FS384 (0x3 << 4) +#define CFG5_FS512 (0x4 << 4) +#define CFG5_FS768 (0x6 << 4) +#define DUMMY_304 0x304 +#define CHMO_SEL (0x3 << 2) +#define CHM1_SEL (0x3 << 4) +#define CHM2_SEL (0x3 << 6) +#define AUDIO_I2S_NCTS_SEL BIT(1) +#define AUDIO_I2S_NCTS_SEL_64 (1 << 1) +#define AUDIO_I2S_NCTS_SEL_128 (0 << 1) +#define NEW_GCP_CTRL BIT(0) +#define NEW_GCP_CTRL_MERGE BIT(0) +#define GRL_L_STATUS_0 0x200 +#define GRL_L_STATUS_1 0x204 +#define GRL_L_STATUS_2 0x208 +#define GRL_L_STATUS_3 0x20c +#define GRL_L_STATUS_4 0x210 +#define GRL_L_STATUS_5 0x214 +#define GRL_L_STATUS_6 0x218 +#define GRL_L_STATUS_7 0x21c +#define GRL_L_STATUS_8 0x220 +#define GRL_L_STATUS_9 0x224 +#define GRL_L_STATUS_10 0x228 +#define GRL_L_STATUS_11 0x22c +#define GRL_L_STATUS_12 0x230 +#define GRL_L_STATUS_13 0x234 +#define GRL_L_STATUS_14 0x238 +#define GRL_L_STATUS_15 0x23c +#define GRL_L_STATUS_16 0x240 +#define GRL_L_STATUS_17 0x244 +#define GRL_L_STATUS_18 0x248 +#define GRL_L_STATUS_19 0x24c +#define GRL_L_STATUS_20 0x250 +#define GRL_L_STATUS_21 0x254 +#define GRL_L_STATUS_22 0x258 +#define GRL_L_STATUS_23 0x25c +#define GRL_R_STATUS_0 0x260 +#define GRL_R_STATUS_1 0x264 +#define GRL_R_STATUS_2 0x268 +#define GRL_R_STATUS_3 0x26c +#define GRL_R_STATUS_4 0x270 +#define GRL_R_STATUS_5 0x274 +#define GRL_R_STATUS_6 0x278 +#define GRL_R_STATUS_7 0x27c +#define GRL_R_STATUS_8 0x280 +#define GRL_R_STATUS_9 0x284 +#define GRL_R_STATUS_10 0x288 +#define GRL_R_STATUS_11 0x28c +#define GRL_R_STATUS_12 0x290 +#define GRL_R_STATUS_13 0x294 +#define GRL_R_STATUS_14 0x298 +#define GRL_R_STATUS_15 0x29c +#define GRL_R_STATUS_16 0x2a0 +#define GRL_R_STATUS_17 0x2a4 +#define GRL_R_STATUS_18 0x2a8 +#define GRL_R_STATUS_19 0x2ac +#define GRL_R_STATUS_20 0x2b0 +#define GRL_R_STATUS_21 0x2b4 +#define GRL_R_STATUS_22 0x2b8 +#define GRL_R_STATUS_23 0x2bc +#define GRL_ABIST_CTRL0 0x2D4 +#define GRL_ABIST_CTRL1 0x2D8 +#define ABIST_EN BIT(7) +#define ABIST_DATA_FMT (0x7 << 0) +#define VIDEO_CFG_0 0x380 +#define VIDEO_CFG_1 0x384 +#define VIDEO_CFG_2 0x388 +#define VIDEO_CFG_3 0x38c +#define VIDEO_CFG_4 0x390 +#define VIDEO_SOURCE_SEL BIT(7) +#define NORMAL_PATH (1 << 7) +#define GEN_RGB (0 << 7) + +#define HDMI_SYS_CFG1C 0x000 +#define HDMI_ON BIT(0) +#define HDMI_RST BIT(1) +#define ANLG_ON BIT(2) +#define CFG10_DVI BIT(3) +#define HDMI_TST BIT(3) +#define SYS_KEYMASK1 (0xff << 8) +#define SYS_KEYMASK2 (0xff << 16) +#define AUD_OUTSYNC_EN BIT(24) +#define AUD_OUTSYNC_PRE_EN BIT(25) +#define I2CM_ON BIT(26) +#define E2PROM_TYPE_8BIT BIT(27) +#define MCM_E2PROM_ON BIT(28) +#define EXT_E2PROM_ON BIT(29) +#define HTPLG_PIN_SEL_OFF BIT(30) +#define AES_EFUSE_ENABLE BIT(31) +#define HDMI_SYS_CFG20 0x004 +#define DEEP_COLOR_MODE_MASK (3 << 1) +#define COLOR_8BIT_MODE (0 << 1) +#define COLOR_10BIT_MODE (1 << 1) +#define COLOR_12BIT_MODE (2 << 1) +#define COLOR_16BIT_MODE (3 << 1) +#define DEEP_COLOR_EN BIT(0) +#define HDMI_AUDIO_TEST_SEL BIT(8) +#define HDMI2P0_EN BIT(11) +#define HDMI_OUT_FIFO_EN BIT(16) +#define HDMI_OUT_FIFO_CLK_INV BIT(17) +#define MHL_MODE_ON BIT(28) +#define MHL_PP_MODE BIT(29) +#define MHL_SYNC_AUTO_EN BIT(30) +#define HDMI_PCLK_FREE_RUN BIT(31) + +#define MTK_SIP_SET_AUTHORIZED_SECURE_REG 0x82000001 +#endif diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi_regs_v2.h b/drivers/gpu/drm/mediatek/mtk_hdmi_regs_v2.h new file mode 100644 index 000000000000..521b35c7e14d --- /dev/null +++ b/drivers/gpu/drm/mediatek/mtk_hdmi_regs_v2.h @@ -0,0 +1,263 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2021 MediaTek Inc. + * Copyright (c) 2021 BayLibre, SAS + * Copyright (c) 2024 Collabora Ltd. + * AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com> + */ + +#ifndef _MTK_HDMI_REGS_H +#define _MTK_HDMI_REGS_H + +/* HDMI_TOP Config */ +#define TOP_CFG00 0x000 +#define HDMI2_ON BIT(2) +#define HDMI_MODE_HDMI BIT(3) +#define SCR_ON BIT(4) +#define TMDS_PACK_MODE GENMASK(9, 8) +#define TMDS_PACK_MODE_8BPP 0 +#define TMDS_PACK_MODE_10BPP 1 +#define TMDS_PACK_MODE_12BPP 2 +#define TMDS_PACK_MODE_16BPP 3 +#define DEEPCOLOR_PKT_EN BIT(12) +#define HDMI_ABIST_VIDEO_FORMAT GENMASK(21, 16) +#define HDMI_ABIST_ENABLE BIT(31) +#define TOP_CFG01 0x004 +#define CP_SET_MUTE_EN BIT(0) +#define CP_CLR_MUTE_EN BIT(1) +#define NULL_PKT_EN BIT(2) +#define NULL_PKT_VSYNC_HIGH_EN BIT(3) + +/* HDMI_TOP Audio: Channel Mapping */ +#define TOP_AUD_MAP 0x00c +#define SD0_MAP GENMASK(2, 0) +#define SD1_MAP GENMASK(6, 4) +#define SD2_MAP GENMASK(10, 8) +#define SD3_MAP GENMASK(14, 12) +#define SD4_MAP GENMASK(18, 16) +#define SD5_MAP GENMASK(22, 20) +#define SD6_MAP GENMASK(26, 24) +#define SD7_MAP GENMASK(30, 28) + +/* Auxiliary Video Information (AVI) Infoframe */ +#define TOP_AVI_HEADER 0x024 +#define TOP_AVI_PKT00 0x028 +#define TOP_AVI_PKT01 0x02C +#define TOP_AVI_PKT02 0x030 +#define TOP_AVI_PKT03 0x034 +#define TOP_AVI_PKT04 0x038 +#define TOP_AVI_PKT05 0x03C + +/* Audio Interface Infoframe */ +#define TOP_AIF_HEADER 0x040 +#define TOP_AIF_PKT00 0x044 +#define TOP_AIF_PKT01 0x048 +#define TOP_AIF_PKT02 0x04c +#define TOP_AIF_PKT03 0x050 + +/* Audio SPDIF Infoframe */ +#define TOP_SPDIF_HEADER 0x054 +#define TOP_SPDIF_PKT00 0x058 +#define TOP_SPDIF_PKT01 0x05c +#define TOP_SPDIF_PKT02 0x060 +#define TOP_SPDIF_PKT03 0x064 +#define TOP_SPDIF_PKT04 0x068 +#define TOP_SPDIF_PKT05 0x06c +#define TOP_SPDIF_PKT06 0x070 +#define TOP_SPDIF_PKT07 0x074 + +/* Infoframes Configuration */ +#define TOP_INFO_EN 0x01c +#define AVI_EN BIT(0) +#define SPD_EN BIT(1) +#define AUD_EN BIT(2) +#define CP_EN BIT(5) +#define VSIF_EN BIT(11) +#define AVI_EN_WR BIT(16) +#define SPD_EN_WR BIT(17) +#define AUD_EN_WR BIT(18) +#define CP_EN_WR BIT(21) +#define VSIF_EN_WR BIT(27) +#define TOP_INFO_RPT 0x020 +#define AVI_RPT_EN BIT(0) +#define SPD_RPT_EN BIT(1) +#define AUD_RPT_EN BIT(2) +#define CP_RPT_EN BIT(5) +#define VSIF_RPT_EN BIT(11) + +/* Vendor Specific Infoframe */ +#define TOP_VSIF_HEADER 0x174 +#define TOP_VSIF_PKT00 0x178 +#define TOP_VSIF_PKT01 0x17c +#define TOP_VSIF_PKT02 0x180 +#define TOP_VSIF_PKT03 0x184 +#define TOP_VSIF_PKT04 0x188 +#define TOP_VSIF_PKT05 0x18c +#define TOP_VSIF_PKT06 0x190 +#define TOP_VSIF_PKT07 0x194 + +/* HDMI_TOP Misc */ +#define TOP_MISC_CTLR 0x1a4 +#define DEEP_COLOR_ADD BIT(4) + +/* Hardware interrupts */ +#define TOP_INT_STA00 0x1a8 +#define TOP_INT_ENABLE00 0x1b0 +#define HTPLG_R_INT BIT(0) +#define HTPLG_F_INT BIT(1) +#define PORD_R_INT BIT(2) +#define PORD_F_INT BIT(3) +#define HDMI_VSYNC_INT BIT(4) +#define HDMI_AUDIO_INT BIT(5) +#define HDCP2X_RX_REAUTH_REQ_DDCM_INT BIT(25) +#define TOP_INT_ENABLE01 0x1b4 +#define TOP_INT_CLR00 0x1b8 +#define TOP_INT_CLR01 0x1bc + + +/* Video Mute */ +#define TOP_VMUTE_CFG1 0x1c8 +#define REG_VMUTE_EN BIT(16) + +/* HDMI Audio IP */ +#define AIP_CTRL 0x400 +#define CTS_SW_SEL BIT(0) +#define CTS_REQ_EN BIT(1) +#define MCLK_EN BIT(2) +#define NO_MCLK_CTSGEN_SEL BIT(3) +#define AUD_IN_EN BIT(8) +#define AUD_SEL_OWRT BIT(9) +#define SPDIF_EN BIT(13) +#define HBRA_ON BIT(14) +#define DSD_EN BIT(15) +#define I2S_EN GENMASK(19, 16) +#define HBR_FROM_SPDIF BIT(20) +#define CTS_CAL_N4 BIT(23) +#define SPDIF_INTERNAL_MODULE BIT(24) +#define AIP_N_VAL 0x404 +#define AIP_CTS_SVAL 0x408 +#define AIP_SPDIF_CTRL 0x40c +#define WR_1UI_LOCK BIT(0) +#define FS_OVERRIDE_WRITE BIT(1) +#define WR_2UI_LOCK BIT(2) +#define MAX_1UI_WRITE GENMASK(15, 8) +#define MAX_2UI_SPDIF_WRITE GENMASK(23, 16) +#define MAX_2UI_I2S_HI_WRITE GENMASK(23, 20) +#define MAX_2UI_I2S_LFE_CC_SWAP BIT(1) +#define MAX_2UI_I2S_LO_WRITE GENMASK(19, 16) +#define AUD_ERR_THRESH GENMASK(29, 24) +#define I2S2DSD_EN BIT(30) +#define AIP_I2S_CTRL 0x410 +#define FIFO0_MAP GENMASK(1, 0) +#define FIFO1_MAP GENMASK(3, 2) +#define FIFO2_MAP GENMASK(5, 4) +#define FIFO3_MAP GENMASK(7, 6) +#define I2S_1ST_BIT_NOSHIFT BIT(8) +#define I2S_DATA_DIR_LSB BIT(9) +#define JUSTIFY_RIGHT BIT(10) +#define WS_HIGH BIT(11) +#define VBIT_COMPRESSED BIT(12) +#define CBIT_ORDER_SAME BIT(13) +#define SCK_EDGE_RISE BIT(14) +#define AIP_I2S_CHST0 0x414 +#define AIP_I2S_CHST1 0x418 +#define AIP_TXCTRL 0x424 +#define RST4AUDIO BIT(0) +#define RST4AUDIO_FIFO BIT(1) +#define RST4AUDIO_ACR BIT(2) +#define AUD_LAYOUT_1 BIT(4) +#define AUD_MUTE_FIFO_EN BIT(5) +#define AUD_PACKET_DROP BIT(6) +#define DSD_MUTE_EN BIT(7) +#define AIP_TPI_CTRL 0x428 +#define TPI_AUDIO_LOOKUP_EN BIT(2) + +/* Video downsampling configuration */ +#define VID_DOWNSAMPLE_CONFIG 0x8d0 +#define C444_C422_CONFIG_ENABLE BIT(0) +#define C422_C420_CONFIG_ENABLE BIT(4) +#define C422_C420_CONFIG_BYPASS BIT(5) +#define C422_C420_CONFIG_OUT_CB_OR_CR BIT(6) +#define VID_OUT_FORMAT 0x8fc +#define OUTPUT_FORMAT_DEMUX_420_ENABLE BIT(10) + +/* HDCP registers */ +#define HDCP_TOP_CTRL 0xc00 +#define HDCP2X_CTRL_0 0xc20 +#define HDCP2X_EN BIT(0) +#define HDCP2X_ENCRYPT_EN BIT(7) +#define HDCP2X_HPD_OVR BIT(10) +#define HDCP2X_HPD_SW BIT(11) +#define HDCP2X_POL_CTRL 0xc54 +#define HDCP2X_DIS_POLL_EN BIT(16) +#define HDCP1X_CTRL 0xcd0 +#define HDCP1X_ENC_EN BIT(6) + +/* HDMI DDC registers */ +#define HPD_DDC_CTRL 0xc08 +#define HPD_DDC_DELAY_CNT GENMASK(31, 16) +#define HPD_DDC_HPD_DBNC_EN BIT(2) +#define HPD_DDC_PORD_DBNC_EN BIT(3) +#define DDC_CTRL 0xc10 +#define DDC_CTRL_ADDR GENMASK(7, 1) +#define DDC_CTRL_OFFSET GENMASK(15, 8) +#define DDC_CTRL_DIN_CNT GENMASK(25, 16) +#define DDC_CTRL_CMD GENMASK(31, 28) +#define SCDC_CTRL 0xc18 +#define SCDC_DDC_SEGMENT GENMASK(15, 8) +#define HPD_DDC_STATUS 0xc60 +#define HPD_STATE GENMASK(1, 0) +#define HPD_STATE_CONNECTED 2 +#define HPD_PIN_STA BIT(4) +#define PORD_PIN_STA BIT(5) +#define DDC_I2C_IN_PROG BIT(13) +#define DDC_DATA_OUT GENMASK(23, 16) +#define SI2C_CTRL 0xcac +#define SI2C_WR BIT(0) +#define SI2C_RD BIT(1) +#define SI2C_CONFIRM_READ BIT(2) +#define SI2C_WDATA GENMASK(15, 8) +#define SI2C_ADDR GENMASK(23, 16) + +/* HDCP DDC registers */ +#define HDCP2X_DDCM_STATUS 0xc68 +#define DDC_I2C_NO_ACK BIT(10) +#define DDC_I2C_BUS_LOW BIT(11) + +/* HDMI TX registers */ +#define HDMITX_CONFIG_MT8188 0xea0 +#define HDMITX_CONFIG_MT8195 0x900 +#define HDMI_YUV420_MODE BIT(10) +#define HDMITX_SW_HPD BIT(29) +#define HDMITX_SW_RSTB BIT(31) + +/** + * enum mtk_hdmi_ddc_v2_cmds - DDC_CMD register commands + * @DDC_CMD_READ_NOACK: Current address read with no ACK on last byte + * @DDC_CMD_READ: Current address read with ACK on last byte + * @DDC_CMD_SEQ_READ_NOACK: Sequential read with no ACK on last byte + * @DDC_CMD_SEQ_READ: Sequential read with ACK on last byte + * @DDC_CMD_ENH_READ_NOACK: Enhanced read with no ACK on last byte + * @DDC_CMD_ENH_READ: Enhanced read with ACK on last byte + * @DDC_CMD_SEQ_WRITE_NOACK: Sequential write ignoring ACK on last byte + * @DDC_CMD_SEQ_WRITE: Sequential write requiring ACK on last byte + * @DDC_CMD_RSVD: Reserved for future use + * @DDC_CMD_CLEAR_FIFO: Clear DDC I2C FIFO + * @DDC_CMD_CLOCK_SCL: Start clocking DDC I2C SCL + * @DDC_CMD_ABORT_XFER: Abort DDC I2C transaction + */ +enum mtk_hdmi_ddc_v2_cmds { + DDC_CMD_READ_NOACK = 0x0, + DDC_CMD_READ, + DDC_CMD_SEQ_READ_NOACK, + DDC_CMD_SEQ_READ, + DDC_CMD_ENH_READ_NOACK, + DDC_CMD_ENH_READ, + DDC_CMD_SEQ_WRITE_NOACK, + DDC_CMD_SEQ_WRITE = 0x07, + DDC_CMD_CLEAR_FIFO = 0x09, + DDC_CMD_CLOCK_SCL = 0x0a, + DDC_CMD_ABORT_XFER = 0x0f +}; + +#endif /* _MTK_HDMI_REGS_H */ diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi_v2.c b/drivers/gpu/drm/mediatek/mtk_hdmi_v2.c new file mode 100644 index 000000000000..c272e1e74b7d --- /dev/null +++ b/drivers/gpu/drm/mediatek/mtk_hdmi_v2.c @@ -0,0 +1,1521 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * MediaTek HDMI v2 IP driver + * + * Copyright (c) 2022 MediaTek Inc. + * Copyright (c) 2022 BayLibre, SAS + * Copyright (c) 2024 Collabora Ltd. + * AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com> + */ + +#include <linux/bitfield.h> +#include <linux/clk.h> +#include <linux/debugfs.h> +#include <linux/delay.h> +#include <linux/device.h> +#include <linux/err.h> +#include <linux/interrupt.h> +#include <linux/irq.h> +#include <linux/kernel.h> +#include <linux/mutex.h> +#include <linux/of.h> +#include <linux/of_platform.h> +#include <linux/pm_runtime.h> +#include <linux/regmap.h> +#include <linux/suspend.h> +#include <linux/units.h> +#include <linux/phy/phy.h> + +#include <drm/display/drm_hdmi_helper.h> +#include <drm/display/drm_hdmi_state_helper.h> +#include <drm/display/drm_scdc_helper.h> +#include <drm/drm_drv.h> +#include <drm/drm_edid.h> +#include <drm/drm_print.h> +#include <drm/drm_probe_helper.h> + +#include "mtk_hdmi_common.h" +#include "mtk_hdmi_regs_v2.h" + +#define MTK_HDMI_V2_CLOCK_MIN 27000 +#define MTK_HDMI_V2_CLOCK_MAX 594000 + +#define HPD_PORD_HWIRQS (HTPLG_R_INT | HTPLG_F_INT | PORD_F_INT | PORD_R_INT) + +enum mtk_hdmi_v2_clk_id { + MTK_HDMI_V2_CLK_HDCP_SEL, + MTK_HDMI_V2_CLK_HDCP_24M_SEL, + MTK_HDMI_V2_CLK_VPP_SPLIT_HDMI, + MTK_HDMI_V2_CLK_HDMI_APB_SEL, + MTK_HDMI_V2_CLK_COUNT, +}; + +const char *const mtk_hdmi_v2_clk_names[MTK_HDMI_V2_CLK_COUNT] = { + [MTK_HDMI_V2_CLK_HDMI_APB_SEL] = "bus", + [MTK_HDMI_V2_CLK_HDCP_SEL] = "hdcp", + [MTK_HDMI_V2_CLK_HDCP_24M_SEL] = "hdcp24m", + [MTK_HDMI_V2_CLK_VPP_SPLIT_HDMI] = "hdmi-split", +}; + +static inline void mtk_hdmi_v2_hwirq_disable(struct mtk_hdmi *hdmi) +{ + regmap_write(hdmi->regs, TOP_INT_ENABLE00, 0); + regmap_write(hdmi->regs, TOP_INT_ENABLE01, 0); +} + +static inline void mtk_hdmi_v2_enable_hpd_pord_irq(struct mtk_hdmi *hdmi, bool enable) +{ + if (enable) + regmap_set_bits(hdmi->regs, TOP_INT_ENABLE00, HPD_PORD_HWIRQS); + else + regmap_clear_bits(hdmi->regs, TOP_INT_ENABLE00, HPD_PORD_HWIRQS); +} + +static inline void mtk_hdmi_v2_set_sw_hpd(struct mtk_hdmi *hdmi, bool enable) +{ + if (enable) { + regmap_set_bits(hdmi->regs, hdmi->conf->reg_hdmi_tx_cfg, HDMITX_SW_HPD); + regmap_set_bits(hdmi->regs, HDCP2X_CTRL_0, HDCP2X_HPD_OVR); + regmap_set_bits(hdmi->regs, HDCP2X_CTRL_0, HDCP2X_HPD_SW); + } else { + regmap_clear_bits(hdmi->regs, HDCP2X_CTRL_0, HDCP2X_HPD_OVR); + regmap_clear_bits(hdmi->regs, HDCP2X_CTRL_0, HDCP2X_HPD_SW); + regmap_clear_bits(hdmi->regs, hdmi->conf->reg_hdmi_tx_cfg, HDMITX_SW_HPD); + } +} + +static inline void mtk_hdmi_v2_enable_scrambling(struct mtk_hdmi *hdmi, bool enable) +{ + struct drm_scdc *scdc = &hdmi->curr_conn->display_info.hdmi.scdc; + + if (enable) + regmap_set_bits(hdmi->regs, TOP_CFG00, SCR_ON | HDMI2_ON); + else + regmap_clear_bits(hdmi->regs, TOP_CFG00, SCR_ON | HDMI2_ON); + + if (scdc->supported) { + if (scdc->scrambling.supported) + drm_scdc_set_scrambling(hdmi->curr_conn, enable); + drm_scdc_set_high_tmds_clock_ratio(hdmi->curr_conn, enable); + } +} + +static void mtk_hdmi_v2_hw_vid_mute(struct mtk_hdmi *hdmi, bool enable) +{ + /* If enabled, sends a black image */ + if (enable) + regmap_set_bits(hdmi->regs, TOP_VMUTE_CFG1, REG_VMUTE_EN); + else + regmap_clear_bits(hdmi->regs, TOP_VMUTE_CFG1, REG_VMUTE_EN); +} + +static void mtk_hdmi_v2_hw_aud_mute(struct mtk_hdmi *hdmi, bool enable) +{ + u32 aip, val; + + if (!enable) { + regmap_clear_bits(hdmi->regs, AIP_TXCTRL, AUD_MUTE_FIFO_EN); + return; + } + + regmap_read(hdmi->regs, AIP_CTRL, &aip); + + val = AUD_MUTE_FIFO_EN; + if (aip & DSD_EN) + val |= DSD_MUTE_EN; + + regmap_update_bits(hdmi->regs, AIP_TXCTRL, val, val); +} + +static void mtk_hdmi_v2_hw_reset(struct mtk_hdmi *hdmi) +{ + regmap_clear_bits(hdmi->regs, hdmi->conf->reg_hdmi_tx_cfg, HDMITX_SW_RSTB); + udelay(5); + regmap_set_bits(hdmi->regs, hdmi->conf->reg_hdmi_tx_cfg, HDMITX_SW_RSTB); +} + +static inline u32 mtk_hdmi_v2_format_hw_packet(const u8 *buffer, u8 len) +{ + unsigned short i; + u32 val = 0; + + for (i = 0; i < len; i++) + val |= buffer[i] << (i * 8); + + return val; +} + +static void mtk_hdmi_v2_hw_write_audio_infoframe(struct mtk_hdmi *hdmi, const u8 *buffer) +{ + regmap_clear_bits(hdmi->regs, TOP_INFO_EN, AUD_EN | AUD_EN_WR); + regmap_clear_bits(hdmi->regs, TOP_INFO_RPT, AUD_RPT_EN); + + regmap_write(hdmi->regs, TOP_AIF_HEADER, mtk_hdmi_v2_format_hw_packet(&buffer[0], 3)); + regmap_write(hdmi->regs, TOP_AIF_PKT00, mtk_hdmi_v2_format_hw_packet(&buffer[3], 3)); + regmap_write(hdmi->regs, TOP_AIF_PKT01, mtk_hdmi_v2_format_hw_packet(&buffer[7], 2)); + regmap_write(hdmi->regs, TOP_AIF_PKT02, 0); + regmap_write(hdmi->regs, TOP_AIF_PKT03, 0); + + regmap_set_bits(hdmi->regs, TOP_INFO_RPT, AUD_RPT_EN); + regmap_set_bits(hdmi->regs, TOP_INFO_EN, AUD_EN | AUD_EN_WR); +} + +static void mtk_hdmi_v2_hw_write_avi_infoframe(struct mtk_hdmi *hdmi, const u8 *buffer) +{ + regmap_clear_bits(hdmi->regs, TOP_INFO_EN, AVI_EN_WR | AVI_EN); + regmap_clear_bits(hdmi->regs, TOP_INFO_RPT, AVI_RPT_EN); + + regmap_write(hdmi->regs, TOP_AVI_HEADER, mtk_hdmi_v2_format_hw_packet(&buffer[0], 3)); + regmap_write(hdmi->regs, TOP_AVI_PKT00, mtk_hdmi_v2_format_hw_packet(&buffer[3], 4)); + regmap_write(hdmi->regs, TOP_AVI_PKT01, mtk_hdmi_v2_format_hw_packet(&buffer[7], 3)); + regmap_write(hdmi->regs, TOP_AVI_PKT02, mtk_hdmi_v2_format_hw_packet(&buffer[10], 4)); + regmap_write(hdmi->regs, TOP_AVI_PKT03, mtk_hdmi_v2_format_hw_packet(&buffer[14], 3)); + regmap_write(hdmi->regs, TOP_AVI_PKT04, 0); + regmap_write(hdmi->regs, TOP_AVI_PKT05, 0); + + regmap_set_bits(hdmi->regs, TOP_INFO_RPT, AVI_RPT_EN); + regmap_set_bits(hdmi->regs, TOP_INFO_EN, AVI_EN_WR | AVI_EN); +} + +static void mtk_hdmi_v2_hw_write_spd_infoframe(struct mtk_hdmi *hdmi, const u8 *buffer) +{ + regmap_clear_bits(hdmi->regs, TOP_INFO_EN, SPD_EN_WR | SPD_EN); + regmap_clear_bits(hdmi->regs, TOP_INFO_RPT, SPD_RPT_EN); + + regmap_write(hdmi->regs, TOP_SPDIF_HEADER, mtk_hdmi_v2_format_hw_packet(&buffer[0], 3)); + regmap_write(hdmi->regs, TOP_SPDIF_PKT00, mtk_hdmi_v2_format_hw_packet(&buffer[3], 4)); + regmap_write(hdmi->regs, TOP_SPDIF_PKT01, mtk_hdmi_v2_format_hw_packet(&buffer[7], 3)); + regmap_write(hdmi->regs, TOP_SPDIF_PKT02, mtk_hdmi_v2_format_hw_packet(&buffer[10], 4)); + regmap_write(hdmi->regs, TOP_SPDIF_PKT03, mtk_hdmi_v2_format_hw_packet(&buffer[14], 3)); + regmap_write(hdmi->regs, TOP_SPDIF_PKT04, mtk_hdmi_v2_format_hw_packet(&buffer[17], 4)); + regmap_write(hdmi->regs, TOP_SPDIF_PKT05, mtk_hdmi_v2_format_hw_packet(&buffer[21], 3)); + regmap_write(hdmi->regs, TOP_SPDIF_PKT06, mtk_hdmi_v2_format_hw_packet(&buffer[24], 4)); + regmap_write(hdmi->regs, TOP_SPDIF_PKT07, buffer[28]); + + regmap_set_bits(hdmi->regs, TOP_INFO_EN, SPD_EN_WR | SPD_EN); + regmap_set_bits(hdmi->regs, TOP_INFO_RPT, SPD_RPT_EN); +} + +static void mtk_hdmi_v2_hw_write_vendor_infoframe(struct mtk_hdmi *hdmi, const u8 *buffer) +{ + regmap_clear_bits(hdmi->regs, TOP_INFO_EN, VSIF_EN_WR | VSIF_EN); + regmap_clear_bits(hdmi->regs, TOP_INFO_RPT, VSIF_RPT_EN); + + regmap_write(hdmi->regs, TOP_VSIF_HEADER, mtk_hdmi_v2_format_hw_packet(&buffer[0], 3)); + regmap_write(hdmi->regs, TOP_VSIF_PKT00, mtk_hdmi_v2_format_hw_packet(&buffer[3], 4)); + regmap_write(hdmi->regs, TOP_VSIF_PKT01, mtk_hdmi_v2_format_hw_packet(&buffer[7], 2)); + regmap_write(hdmi->regs, TOP_VSIF_PKT02, 0); + regmap_write(hdmi->regs, TOP_VSIF_PKT03, 0); + regmap_write(hdmi->regs, TOP_VSIF_PKT04, 0); + regmap_write(hdmi->regs, TOP_VSIF_PKT05, 0); + regmap_write(hdmi->regs, TOP_VSIF_PKT06, 0); + regmap_write(hdmi->regs, TOP_VSIF_PKT07, 0); + + regmap_set_bits(hdmi->regs, TOP_INFO_EN, VSIF_EN_WR | VSIF_EN); + regmap_set_bits(hdmi->regs, TOP_INFO_RPT, VSIF_RPT_EN); +} + +static void mtk_hdmi_yuv420_downsampling(struct mtk_hdmi *hdmi, bool enable) +{ + u32 val; + + regmap_read(hdmi->regs, VID_DOWNSAMPLE_CONFIG, &val); + + if (enable) { + regmap_set_bits(hdmi->regs, hdmi->conf->reg_hdmi_tx_cfg, HDMI_YUV420_MODE); + + val |= C444_C422_CONFIG_ENABLE | C422_C420_CONFIG_ENABLE; + val |= C422_C420_CONFIG_OUT_CB_OR_CR; + val &= ~C422_C420_CONFIG_BYPASS; + regmap_write(hdmi->regs, VID_DOWNSAMPLE_CONFIG, val); + + regmap_set_bits(hdmi->regs, VID_OUT_FORMAT, OUTPUT_FORMAT_DEMUX_420_ENABLE); + } else { + regmap_clear_bits(hdmi->regs, hdmi->conf->reg_hdmi_tx_cfg, HDMI_YUV420_MODE); + + val &= ~(C444_C422_CONFIG_ENABLE | C422_C420_CONFIG_ENABLE); + val &= ~C422_C420_CONFIG_OUT_CB_OR_CR; + val |= C422_C420_CONFIG_BYPASS; + regmap_write(hdmi->regs, VID_DOWNSAMPLE_CONFIG, val); + + regmap_clear_bits(hdmi->regs, VID_OUT_FORMAT, OUTPUT_FORMAT_DEMUX_420_ENABLE); + } +} + +static int mtk_hdmi_v2_setup_audio_infoframe(struct mtk_hdmi *hdmi) +{ + struct hdmi_codec_params *params = &hdmi->aud_param.codec_params; + struct hdmi_audio_infoframe frame; + u8 buffer[14]; + ssize_t ret; + + memcpy(&frame, ¶ms->cea, sizeof(frame)); + + ret = hdmi_audio_infoframe_pack(&frame, buffer, sizeof(buffer)); + if (ret < 0) + return ret; + + mtk_hdmi_v2_hw_write_audio_infoframe(hdmi, buffer); + + return 0; +} + +static inline void mtk_hdmi_v2_hw_gcp_avmute(struct mtk_hdmi *hdmi, bool mute) +{ + u32 val; + + regmap_read(hdmi->regs, TOP_CFG01, &val); + val &= ~(CP_CLR_MUTE_EN | CP_SET_MUTE_EN); + + if (mute) { + val |= CP_SET_MUTE_EN; + val &= ~CP_CLR_MUTE_EN; + } else { + val |= CP_CLR_MUTE_EN; + val &= ~CP_SET_MUTE_EN; + } + regmap_write(hdmi->regs, TOP_CFG01, val); + + regmap_set_bits(hdmi->regs, TOP_INFO_RPT, CP_RPT_EN); + regmap_set_bits(hdmi->regs, TOP_INFO_EN, CP_EN | CP_EN_WR); +} + +static void mtk_hdmi_v2_hw_ncts_enable(struct mtk_hdmi *hdmi, bool enable) +{ + if (enable) + regmap_set_bits(hdmi->regs, AIP_CTRL, CTS_SW_SEL); + else + regmap_clear_bits(hdmi->regs, AIP_CTRL, CTS_SW_SEL); +} + +static void mtk_hdmi_v2_hw_aud_set_channel_status(struct mtk_hdmi *hdmi) +{ + u8 *ch_status = hdmi->aud_param.codec_params.iec.status; + + /* Only the first 5 to 7 bytes of Channel Status contain useful information */ + regmap_write(hdmi->regs, AIP_I2S_CHST0, mtk_hdmi_v2_format_hw_packet(&ch_status[0], 4)); + regmap_write(hdmi->regs, AIP_I2S_CHST1, mtk_hdmi_v2_format_hw_packet(&ch_status[4], 3)); +} + +static void mtk_hdmi_v2_hw_aud_set_ncts(struct mtk_hdmi *hdmi, + unsigned int sample_rate, + unsigned int clock) +{ + unsigned int n, cts; + + mtk_hdmi_get_ncts(sample_rate, clock, &n, &cts); + + regmap_write(hdmi->regs, AIP_N_VAL, n); + regmap_write(hdmi->regs, AIP_CTS_SVAL, cts); +} + +static void mtk_hdmi_v2_hw_aud_enable(struct mtk_hdmi *hdmi, bool enable) +{ + if (enable) + regmap_clear_bits(hdmi->regs, AIP_TXCTRL, AUD_PACKET_DROP); + else + regmap_set_bits(hdmi->regs, AIP_TXCTRL, AUD_PACKET_DROP); +} + +static u32 mtk_hdmi_v2_aud_output_channel_map(u8 sd0, u8 sd1, u8 sd2, u8 sd3, + u8 sd4, u8 sd5, u8 sd6, u8 sd7) +{ + u32 val; + + /* + * Each of the Output Channels (0-7) can be mapped to get their input + * from any of the available Input Channels (0-7): this function + * takes input channel numbers and formats a value that must then + * be written to the TOP_AUD_MAP hardware register by the caller. + */ + val = FIELD_PREP(SD0_MAP, sd0) | FIELD_PREP(SD1_MAP, sd1); + val |= FIELD_PREP(SD2_MAP, sd2) | FIELD_PREP(SD3_MAP, sd3); + val |= FIELD_PREP(SD4_MAP, sd4) | FIELD_PREP(SD5_MAP, sd5); + val |= FIELD_PREP(SD6_MAP, sd6) | FIELD_PREP(SD7_MAP, sd7); + + return val; +} + +static void mtk_hdmi_audio_dsd_config(struct mtk_hdmi *hdmi, + unsigned char chnum, bool dsd_bypass) +{ + u32 channel_map; + + regmap_update_bits(hdmi->regs, AIP_CTRL, SPDIF_EN | DSD_EN | HBRA_ON, DSD_EN); + regmap_set_bits(hdmi->regs, AIP_TXCTRL, DSD_MUTE_EN); + + if (dsd_bypass) + channel_map = mtk_hdmi_v2_aud_output_channel_map(0, 2, 4, 6, 1, 3, 5, 7); + else + channel_map = mtk_hdmi_v2_aud_output_channel_map(0, 5, 1, 0, 3, 2, 4, 0); + + regmap_write(hdmi->regs, TOP_AUD_MAP, channel_map); + regmap_clear_bits(hdmi->regs, AIP_SPDIF_CTRL, I2S2DSD_EN); +} + +static inline void mtk_hdmi_v2_hw_i2s_fifo_map(struct mtk_hdmi *hdmi, u32 fifo_mapping) +{ + regmap_update_bits(hdmi->regs, AIP_I2S_CTRL, + FIFO0_MAP | FIFO1_MAP | FIFO2_MAP | FIFO3_MAP, fifo_mapping); +} + +static inline void mtk_hdmi_v2_hw_i2s_ch_number(struct mtk_hdmi *hdmi, u8 chnum) +{ + regmap_update_bits(hdmi->regs, AIP_CTRL, I2S_EN, FIELD_PREP(I2S_EN, chnum)); +} + +static void mtk_hdmi_v2_hw_i2s_ch_mapping(struct mtk_hdmi *hdmi, u8 chnum, u8 mapping) +{ + u32 fifo_map; + u8 bdata; + + switch (chnum) { + default: + case 2: + bdata = 0x1; + break; + case 3: + bdata = 0x3; + break; + case 6: + if (mapping == 0x0e) { + bdata = 0xf; + break; + } + fallthrough; + case 5: + bdata = 0x7; + break; + case 7: + case 8: + bdata = 0xf; + break; + } + + /* Assign default FIFO mapping: SD0 to FIFO0, SD1 to FIFO1, etc. */ + fifo_map = FIELD_PREP(FIFO0_MAP, 0) | FIELD_PREP(FIFO1_MAP, 1); + fifo_map |= FIELD_PREP(FIFO2_MAP, 2) | FIELD_PREP(FIFO3_MAP, 3); + mtk_hdmi_v2_hw_i2s_fifo_map(hdmi, fifo_map); + mtk_hdmi_v2_hw_i2s_ch_number(hdmi, bdata); + + /* + * Set HDMI Audio packet layout indicator: + * Layout 0 is for two channels + * Layout 1 is for up to eight channels + */ + if (chnum == 2) + regmap_set_bits(hdmi->regs, AIP_TXCTRL, AUD_LAYOUT_1); + else + regmap_clear_bits(hdmi->regs, AIP_TXCTRL, AUD_LAYOUT_1); +} + +static void mtk_hdmi_i2s_data_fmt(struct mtk_hdmi *hdmi, unsigned char fmt) +{ + u32 val; + + regmap_read(hdmi->regs, AIP_I2S_CTRL, &val); + val &= ~(WS_HIGH | I2S_1ST_BIT_NOSHIFT | JUSTIFY_RIGHT); + + switch (fmt) { + case HDMI_I2S_MODE_RJT_24BIT: + case HDMI_I2S_MODE_RJT_16BIT: + val |= (WS_HIGH | I2S_1ST_BIT_NOSHIFT | JUSTIFY_RIGHT); + break; + case HDMI_I2S_MODE_LJT_24BIT: + case HDMI_I2S_MODE_LJT_16BIT: + val |= (WS_HIGH | I2S_1ST_BIT_NOSHIFT); + break; + case HDMI_I2S_MODE_I2S_24BIT: + case HDMI_I2S_MODE_I2S_16BIT: + default: + break; + } + + regmap_write(hdmi->regs, AIP_I2S_CTRL, val); +} + +static inline void mtk_hdmi_i2s_sck_edge_rise(struct mtk_hdmi *hdmi, bool rise) +{ + if (rise) + regmap_set_bits(hdmi->regs, AIP_I2S_CTRL, SCK_EDGE_RISE); + else + regmap_clear_bits(hdmi->regs, AIP_I2S_CTRL, SCK_EDGE_RISE); +} + +static inline void mtk_hdmi_i2s_cbit_order(struct mtk_hdmi *hdmi, unsigned int cbit) +{ + regmap_update_bits(hdmi->regs, AIP_I2S_CTRL, CBIT_ORDER_SAME, cbit); +} + +static inline void mtk_hdmi_i2s_vbit(struct mtk_hdmi *hdmi, unsigned int vbit) +{ + /* V bit: 0 for PCM, 1 for Compressed data */ + regmap_update_bits(hdmi->regs, AIP_I2S_CTRL, VBIT_COMPRESSED, vbit); +} + +static inline void mtk_hdmi_i2s_data_direction(struct mtk_hdmi *hdmi, unsigned int is_lsb) +{ + regmap_update_bits(hdmi->regs, AIP_I2S_CTRL, I2S_DATA_DIR_LSB, is_lsb); +} + +static inline void mtk_hdmi_v2_hw_audio_type(struct mtk_hdmi *hdmi, unsigned int spdif_i2s) +{ + regmap_update_bits(hdmi->regs, AIP_CTRL, SPDIF_EN, FIELD_PREP(SPDIF_EN, spdif_i2s)); +} + +static u8 mtk_hdmi_v2_get_i2s_ch_mapping(struct mtk_hdmi *hdmi, u8 channel_type) +{ + switch (channel_type) { + case HDMI_AUD_CHAN_TYPE_1_1: + case HDMI_AUD_CHAN_TYPE_2_1: + return 0x01; + case HDMI_AUD_CHAN_TYPE_3_0: + return 0x02; + case HDMI_AUD_CHAN_TYPE_3_1: + return 0x03; + case HDMI_AUD_CHAN_TYPE_3_0_LRS: + case HDMI_AUD_CHAN_TYPE_4_0: + return 0x08; + case HDMI_AUD_CHAN_TYPE_5_1: + return 0x0b; + case HDMI_AUD_CHAN_TYPE_4_1_CLRS: + case HDMI_AUD_CHAN_TYPE_6_0: + case HDMI_AUD_CHAN_TYPE_6_0_CS: + case HDMI_AUD_CHAN_TYPE_6_0_CH: + case HDMI_AUD_CHAN_TYPE_6_0_OH: + case HDMI_AUD_CHAN_TYPE_6_0_CHR: + return 0x0e; + case HDMI_AUD_CHAN_TYPE_1_0: + case HDMI_AUD_CHAN_TYPE_2_0: + case HDMI_AUD_CHAN_TYPE_3_1_LRS: + case HDMI_AUD_CHAN_TYPE_4_1: + case HDMI_AUD_CHAN_TYPE_5_0: + case HDMI_AUD_CHAN_TYPE_4_0_CLRS: + case HDMI_AUD_CHAN_TYPE_6_1: + case HDMI_AUD_CHAN_TYPE_6_1_CS: + case HDMI_AUD_CHAN_TYPE_6_1_CH: + case HDMI_AUD_CHAN_TYPE_6_1_OH: + case HDMI_AUD_CHAN_TYPE_6_1_CHR: + case HDMI_AUD_CHAN_TYPE_7_0: + case HDMI_AUD_CHAN_TYPE_7_0_LH_RH: + case HDMI_AUD_CHAN_TYPE_7_0_LSR_RSR: + case HDMI_AUD_CHAN_TYPE_7_0_LC_RC: + case HDMI_AUD_CHAN_TYPE_7_0_LW_RW: + case HDMI_AUD_CHAN_TYPE_7_0_LSD_RSD: + case HDMI_AUD_CHAN_TYPE_7_0_LSS_RSS: + case HDMI_AUD_CHAN_TYPE_7_0_LHS_RHS: + case HDMI_AUD_CHAN_TYPE_7_0_CS_CH: + case HDMI_AUD_CHAN_TYPE_7_0_CS_OH: + case HDMI_AUD_CHAN_TYPE_7_0_CS_CHR: + case HDMI_AUD_CHAN_TYPE_7_0_CH_OH: + case HDMI_AUD_CHAN_TYPE_7_0_CH_CHR: + case HDMI_AUD_CHAN_TYPE_7_0_OH_CHR: + case HDMI_AUD_CHAN_TYPE_7_0_LSS_RSS_LSR_RSR: + case HDMI_AUD_CHAN_TYPE_8_0_LH_RH_CS: + case HDMI_AUD_CHAN_TYPE_7_1: + case HDMI_AUD_CHAN_TYPE_7_1_LH_RH: + case HDMI_AUD_CHAN_TYPE_7_1_LSR_RSR: + case HDMI_AUD_CHAN_TYPE_7_1_LC_RC: + case HDMI_AUD_CHAN_TYPE_7_1_LW_RW: + case HDMI_AUD_CHAN_TYPE_7_1_LSD_RSD: + case HDMI_AUD_CHAN_TYPE_7_1_LSS_RSS: + case HDMI_AUD_CHAN_TYPE_7_1_LHS_RHS: + case HDMI_AUD_CHAN_TYPE_7_1_CS_CH: + case HDMI_AUD_CHAN_TYPE_7_1_CS_OH: + case HDMI_AUD_CHAN_TYPE_7_1_CS_CHR: + case HDMI_AUD_CHAN_TYPE_7_1_CH_OH: + case HDMI_AUD_CHAN_TYPE_7_1_CH_CHR: + case HDMI_AUD_CHAN_TYPE_7_1_OH_CHR: + case HDMI_AUD_CHAN_TYPE_7_1_LSS_RSS_LSR_RSR: + default: + return 0; + } + + return 0; +} + +static inline void mtk_hdmi_v2_hw_i2s_ch_swap(struct mtk_hdmi *hdmi) +{ + regmap_update_bits(hdmi->regs, AIP_SPDIF_CTRL, MAX_2UI_I2S_HI_WRITE, + FIELD_PREP(MAX_2UI_I2S_HI_WRITE, MAX_2UI_I2S_LFE_CC_SWAP)); +} + +static void mtk_hdmi_hbr_config(struct mtk_hdmi *hdmi, bool dsd_bypass) +{ + const u32 hbr_mask = SPDIF_EN | DSD_EN | HBRA_ON; + + if (dsd_bypass) { + regmap_update_bits(hdmi->regs, AIP_CTRL, hbr_mask, HBRA_ON); + regmap_set_bits(hdmi->regs, AIP_CTRL, I2S_EN); + } else { + regmap_update_bits(hdmi->regs, AIP_CTRL, hbr_mask, SPDIF_EN); + regmap_set_bits(hdmi->regs, AIP_CTRL, SPDIF_INTERNAL_MODULE); + regmap_set_bits(hdmi->regs, AIP_CTRL, HBR_FROM_SPDIF); + regmap_set_bits(hdmi->regs, AIP_CTRL, CTS_CAL_N4); + } +} + +static inline void mtk_hdmi_v2_hw_spdif_config(struct mtk_hdmi *hdmi) +{ + regmap_clear_bits(hdmi->regs, AIP_SPDIF_CTRL, WR_1UI_LOCK); + regmap_clear_bits(hdmi->regs, AIP_SPDIF_CTRL, FS_OVERRIDE_WRITE); + regmap_clear_bits(hdmi->regs, AIP_SPDIF_CTRL, WR_2UI_LOCK); + + regmap_update_bits(hdmi->regs, AIP_SPDIF_CTRL, MAX_1UI_WRITE, + FIELD_PREP(MAX_1UI_WRITE, 4)); + regmap_update_bits(hdmi->regs, AIP_SPDIF_CTRL, MAX_2UI_SPDIF_WRITE, + FIELD_PREP(MAX_2UI_SPDIF_WRITE, 9)); + regmap_update_bits(hdmi->regs, AIP_SPDIF_CTRL, AUD_ERR_THRESH, + FIELD_PREP(AUD_ERR_THRESH, 4)); + + regmap_set_bits(hdmi->regs, AIP_SPDIF_CTRL, I2S2DSD_EN); +} + +static void mtk_hdmi_v2_aud_set_input(struct mtk_hdmi *hdmi) +{ + struct hdmi_audio_param *aud_param = &hdmi->aud_param; + struct hdmi_codec_params *codec_params = &aud_param->codec_params; + u8 i2s_ch_map; + u32 out_ch_map; + + /* Write the default output channel map. CH0 maps to SD0, CH1 maps to SD1, etc */ + out_ch_map = mtk_hdmi_v2_aud_output_channel_map(0, 1, 2, 3, 4, 5, 6, 7); + regmap_write(hdmi->regs, TOP_AUD_MAP, out_ch_map); + + regmap_update_bits(hdmi->regs, AIP_SPDIF_CTRL, MAX_2UI_I2S_HI_WRITE, 0); + regmap_clear_bits(hdmi->regs, AIP_CTRL, + SPDIF_EN | DSD_EN | HBRA_ON | CTS_CAL_N4 | + HBR_FROM_SPDIF | SPDIF_INTERNAL_MODULE); + regmap_clear_bits(hdmi->regs, AIP_TXCTRL, DSD_MUTE_EN | AUD_LAYOUT_1); + + if (aud_param->aud_input_type == HDMI_AUD_INPUT_I2S) { + switch (aud_param->aud_codec) { + case HDMI_AUDIO_CODING_TYPE_DTS_HD: + case HDMI_AUDIO_CODING_TYPE_MLP: + mtk_hdmi_i2s_data_fmt(hdmi, aud_param->aud_i2s_fmt); + mtk_hdmi_hbr_config(hdmi, true); + break; + case HDMI_AUDIO_CODING_TYPE_DSD: + mtk_hdmi_audio_dsd_config(hdmi, codec_params->channels, 0); + mtk_hdmi_v2_hw_i2s_ch_mapping(hdmi, codec_params->channels, 1); + break; + default: + mtk_hdmi_i2s_data_fmt(hdmi, aud_param->aud_i2s_fmt); + mtk_hdmi_i2s_sck_edge_rise(hdmi, true); + mtk_hdmi_i2s_cbit_order(hdmi, CBIT_ORDER_SAME); + mtk_hdmi_i2s_vbit(hdmi, 0); /* PCM data */ + mtk_hdmi_i2s_data_direction(hdmi, 0); /* MSB first */ + mtk_hdmi_v2_hw_audio_type(hdmi, HDMI_AUD_INPUT_I2S); + i2s_ch_map = mtk_hdmi_v2_get_i2s_ch_mapping(hdmi, + aud_param->aud_input_chan_type); + mtk_hdmi_v2_hw_i2s_ch_mapping(hdmi, codec_params->channels, i2s_ch_map); + mtk_hdmi_v2_hw_i2s_ch_swap(hdmi); + } + } else { + if (codec_params->sample_rate == 768000 && + (aud_param->aud_codec == HDMI_AUDIO_CODING_TYPE_DTS_HD || + aud_param->aud_codec == HDMI_AUDIO_CODING_TYPE_MLP)) { + mtk_hdmi_hbr_config(hdmi, false); + } else { + mtk_hdmi_v2_hw_spdif_config(hdmi); + mtk_hdmi_v2_hw_i2s_ch_mapping(hdmi, 2, 0); + } + } +} + +static inline void mtk_hdmi_v2_hw_audio_input_enable(struct mtk_hdmi *hdmi, bool ena) +{ + if (ena) + regmap_set_bits(hdmi->regs, AIP_CTRL, AUD_IN_EN); + else + regmap_clear_bits(hdmi->regs, AIP_CTRL, AUD_IN_EN); +} + +static void mtk_hdmi_v2_aip_ctrl_init(struct mtk_hdmi *hdmi) +{ + regmap_set_bits(hdmi->regs, AIP_CTRL, + AUD_SEL_OWRT | NO_MCLK_CTSGEN_SEL | MCLK_EN | CTS_REQ_EN); + regmap_clear_bits(hdmi->regs, AIP_TPI_CTRL, TPI_AUDIO_LOOKUP_EN); +} + +static void mtk_hdmi_v2_audio_reset(struct mtk_hdmi *hdmi, bool reset) +{ + const u32 arst_bits = RST4AUDIO | RST4AUDIO_FIFO | RST4AUDIO_ACR; + + if (reset) + regmap_set_bits(hdmi->regs, AIP_TXCTRL, arst_bits); + else + regmap_clear_bits(hdmi->regs, AIP_TXCTRL, arst_bits); +} + +static void mtk_hdmi_v2_aud_output_config(struct mtk_hdmi *hdmi, + struct drm_display_mode *display_mode) +{ + /* Shut down and reset the HDMI Audio HW to avoid glitching */ + mtk_hdmi_v2_hw_aud_mute(hdmi, true); + mtk_hdmi_v2_hw_aud_enable(hdmi, false); + mtk_hdmi_v2_audio_reset(hdmi, true); + + /* Configure the main hardware params and get out of reset */ + mtk_hdmi_v2_aip_ctrl_init(hdmi); + mtk_hdmi_v2_aud_set_input(hdmi); + mtk_hdmi_v2_hw_aud_set_channel_status(hdmi); + mtk_hdmi_v2_setup_audio_infoframe(hdmi); + mtk_hdmi_v2_hw_audio_input_enable(hdmi, true); + mtk_hdmi_v2_audio_reset(hdmi, false); + + /* Ignore N/CTS packet transmission requests and configure */ + mtk_hdmi_v2_hw_ncts_enable(hdmi, false); + mtk_hdmi_v2_hw_aud_set_ncts(hdmi, hdmi->aud_param.codec_params.sample_rate, + display_mode->clock); + + /* Wait for the HW to apply settings */ + usleep_range(25, 50); + + /* Hardware is fully configured: enable TX of N/CTS pkts and unmute */ + mtk_hdmi_v2_hw_ncts_enable(hdmi, true); + mtk_hdmi_v2_hw_aud_enable(hdmi, true); + mtk_hdmi_v2_hw_aud_mute(hdmi, false); +} + +static void mtk_hdmi_v2_change_video_resolution(struct mtk_hdmi *hdmi, + struct drm_connector_state *conn_state) +{ + mtk_hdmi_v2_hw_reset(hdmi); + mtk_hdmi_v2_set_sw_hpd(hdmi, true); + udelay(2); + + regmap_write(hdmi->regs, HDCP_TOP_CTRL, 0); + + /* + * Enable HDCP reauthentication interrupt: the HW uses this internally + * for the HPD state machine even if HDCP encryption is not enabled. + */ + regmap_set_bits(hdmi->regs, TOP_INT_ENABLE00, HDCP2X_RX_REAUTH_REQ_DDCM_INT); + + /* Enable hotplug and pord interrupts */ + mtk_hdmi_v2_enable_hpd_pord_irq(hdmi, true); + + /* Force enabling HDCP HPD */ + regmap_set_bits(hdmi->regs, HDCP2X_CTRL_0, HDCP2X_HPD_OVR); + regmap_set_bits(hdmi->regs, HDCP2X_CTRL_0, HDCP2X_HPD_SW); + + /* Set 8 bits per pixel */ + regmap_update_bits(hdmi->regs, TOP_CFG00, TMDS_PACK_MODE, + FIELD_PREP(TMDS_PACK_MODE, TMDS_PACK_MODE_8BPP)); + /* Disable generating deepcolor packets */ + regmap_clear_bits(hdmi->regs, TOP_CFG00, DEEPCOLOR_PKT_EN); + /* Disable adding deepcolor information to the general packet */ + regmap_clear_bits(hdmi->regs, TOP_MISC_CTLR, DEEP_COLOR_ADD); + + if (hdmi->curr_conn->display_info.is_hdmi) + regmap_set_bits(hdmi->regs, TOP_CFG00, HDMI_MODE_HDMI); + else + regmap_clear_bits(hdmi->regs, TOP_CFG00, HDMI_MODE_HDMI); + + udelay(5); + mtk_hdmi_v2_hw_vid_mute(hdmi, true); + mtk_hdmi_v2_hw_aud_mute(hdmi, true); + mtk_hdmi_v2_hw_gcp_avmute(hdmi, false); + + regmap_update_bits(hdmi->regs, TOP_CFG01, + NULL_PKT_VSYNC_HIGH_EN | NULL_PKT_EN, NULL_PKT_VSYNC_HIGH_EN); + usleep_range(100, 150); + + /* Enable scrambling if tmds clock is 340MHz or more */ + mtk_hdmi_v2_enable_scrambling(hdmi, hdmi->mode.clock >= 340 * KILO); + + switch (conn_state->hdmi.output_format) { + default: + case HDMI_COLORSPACE_RGB: + case HDMI_COLORSPACE_YUV444: + /* Disable YUV420 downsampling for RGB and YUV444 */ + mtk_hdmi_yuv420_downsampling(hdmi, false); + break; + case HDMI_COLORSPACE_YUV422: + /* + * YUV420 downsampling is special and needs a bit of setup + * so we disable everything there before doing anything else. + * + * YUV422 downsampling instead just needs one bit to be set. + */ + mtk_hdmi_yuv420_downsampling(hdmi, false); + regmap_set_bits(hdmi->regs, VID_DOWNSAMPLE_CONFIG, + C444_C422_CONFIG_ENABLE); + break; + case HDMI_COLORSPACE_YUV420: + mtk_hdmi_yuv420_downsampling(hdmi, true); + break; + }; +} + +static void mtk_hdmi_v2_output_set_display_mode(struct mtk_hdmi *hdmi, + struct drm_connector_state *conn_state, + struct drm_display_mode *mode) +{ + union phy_configure_opts opts = { + .dp = { .link_rate = hdmi->mode.clock * KILO } + }; + int ret; + + ret = phy_configure(hdmi->phy, &opts); + if (ret) + dev_err(hdmi->dev, "Setting clock=%d failed: %d", mode->clock, ret); + + mtk_hdmi_v2_change_video_resolution(hdmi, conn_state); + mtk_hdmi_v2_aud_output_config(hdmi, mode); +} + +static int mtk_hdmi_v2_clk_enable(struct mtk_hdmi *hdmi) +{ + int ret; + + ret = clk_prepare_enable(hdmi->clk[MTK_HDMI_V2_CLK_HDCP_SEL]); + if (ret) + return ret; + + ret = clk_prepare_enable(hdmi->clk[MTK_HDMI_V2_CLK_HDCP_24M_SEL]); + if (ret) + goto disable_hdcp_clk; + + ret = clk_prepare_enable(hdmi->clk[MTK_HDMI_V2_CLK_HDMI_APB_SEL]); + if (ret) + goto disable_hdcp_24m_clk; + + ret = clk_prepare_enable(hdmi->clk[MTK_HDMI_V2_CLK_VPP_SPLIT_HDMI]); + if (ret) + goto disable_bus_clk; + + return 0; + +disable_bus_clk: + clk_disable_unprepare(hdmi->clk[MTK_HDMI_V2_CLK_HDMI_APB_SEL]); +disable_hdcp_24m_clk: + clk_disable_unprepare(hdmi->clk[MTK_HDMI_V2_CLK_HDCP_24M_SEL]); +disable_hdcp_clk: + clk_disable_unprepare(hdmi->clk[MTK_HDMI_V2_CLK_HDCP_SEL]); + + return ret; +} + +static void mtk_hdmi_v2_clk_disable(struct mtk_hdmi *hdmi) +{ + clk_disable_unprepare(hdmi->clk[MTK_HDMI_V2_CLK_VPP_SPLIT_HDMI]); + clk_disable_unprepare(hdmi->clk[MTK_HDMI_V2_CLK_HDMI_APB_SEL]); + clk_disable_unprepare(hdmi->clk[MTK_HDMI_V2_CLK_HDCP_24M_SEL]); + clk_disable_unprepare(hdmi->clk[MTK_HDMI_V2_CLK_HDCP_SEL]); +} + +static enum hdmi_hpd_state mtk_hdmi_v2_hpd_pord_status(struct mtk_hdmi *hdmi) +{ + u8 hpd_pin_sta, pord_pin_sta; + u32 hpd_status; + + regmap_read(hdmi->regs, HPD_DDC_STATUS, &hpd_status); + hpd_pin_sta = FIELD_GET(HPD_PIN_STA, hpd_status); + pord_pin_sta = FIELD_GET(PORD_PIN_STA, hpd_status); + + /* + * Inform that the cable is plugged in (hpd_pin_sta) so that the + * sink can be powered on by switching the 5V VBUS as required by + * the HDMI spec for reading EDID and for HDMI Audio registers to + * be accessible. + * + * PORD detection succeeds only when the cable is plugged in and + * the sink is powered on: reaching that state means that the + * communication with the sink can be started. + * + * Please note that when the cable is plugged out the HPD pin will + * be the first one to fall, while PORD may still be in rise state + * for a few more milliseconds, so we decide HDMI_PLUG_OUT without + * checking PORD at all (we check only HPD falling for that). + */ + if (hpd_pin_sta && pord_pin_sta) + return HDMI_PLUG_IN_AND_SINK_POWER_ON; + else if (hpd_pin_sta) + return HDMI_PLUG_IN_ONLY; + else + return HDMI_PLUG_OUT; +} + +static irqreturn_t mtk_hdmi_v2_isr(int irq, void *arg) +{ + struct mtk_hdmi *hdmi = arg; + unsigned int irq_sta; + int ret = IRQ_HANDLED; + + regmap_read(hdmi->regs, TOP_INT_STA00, &irq_sta); + + /* Handle Hotplug Detection interrupts */ + if (irq_sta & HPD_PORD_HWIRQS) { + /* + * Disable the HPD/PORD IRQs now and until thread done to + * avoid interrupt storm that could happen with bad cables + */ + mtk_hdmi_v2_enable_hpd_pord_irq(hdmi, false); + ret = IRQ_WAKE_THREAD; + + /* Clear HPD/PORD irqs to avoid unwanted retriggering */ + regmap_write(hdmi->regs, TOP_INT_CLR00, HPD_PORD_HWIRQS); + regmap_write(hdmi->regs, TOP_INT_CLR00, 0); + } + + return ret; +} + +static irqreturn_t __mtk_hdmi_v2_isr_thread(struct mtk_hdmi *hdmi) +{ + enum hdmi_hpd_state hpd; + + hpd = mtk_hdmi_v2_hpd_pord_status(hdmi); + if (hpd != hdmi->hpd) { + struct drm_encoder *encoder = hdmi->bridge.encoder; + + hdmi->hpd = hpd; + + if (encoder && encoder->dev) + drm_helper_hpd_irq_event(hdmi->bridge.encoder->dev); + } + + mtk_hdmi_v2_enable_hpd_pord_irq(hdmi, true); + return IRQ_HANDLED; +} + +static irqreturn_t mtk_hdmi_v2_isr_thread(int irq, void *arg) +{ + struct mtk_hdmi *hdmi = arg; + + /* + * Debounce HDMI monitor HPD status. + * Empirical testing shows that 30ms is enough wait + */ + msleep(30); + + return __mtk_hdmi_v2_isr_thread(hdmi); +} + +static int mtk_hdmi_v2_enable(struct mtk_hdmi *hdmi) +{ + bool was_active = pm_runtime_active(hdmi->dev); + int ret; + + ret = pm_runtime_resume_and_get(hdmi->dev); + if (ret) { + dev_err(hdmi->dev, "Cannot resume HDMI\n"); + return ret; + } + + ret = mtk_hdmi_v2_clk_enable(hdmi); + if (ret) { + pm_runtime_put(hdmi->dev); + return ret; + } + + if (!was_active) { + mtk_hdmi_v2_hw_reset(hdmi); + mtk_hdmi_v2_set_sw_hpd(hdmi, true); + } + + return 0; +} + +static void mtk_hdmi_v2_disable(struct mtk_hdmi *hdmi) +{ + mtk_hdmi_v2_clk_disable(hdmi); + pm_runtime_put_sync(hdmi->dev); +} + +/* + * Bridge callbacks + */ + +static int mtk_hdmi_v2_bridge_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, + enum drm_bridge_attach_flags flags) +{ + struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge); + int ret; + + if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)) { + DRM_ERROR("The flag DRM_BRIDGE_ATTACH_NO_CONNECTOR must be supplied\n"); + return -EINVAL; + } + if (hdmi->next_bridge) { + ret = drm_bridge_attach(encoder, hdmi->next_bridge, bridge, flags); + if (ret) + return ret; + } + + ret = mtk_hdmi_v2_enable(hdmi); + if (ret) + return ret; + + /* Enable Hotplug and Pord pins internal debouncing */ + regmap_set_bits(hdmi->regs, HPD_DDC_CTRL, + HPD_DDC_HPD_DBNC_EN | HPD_DDC_PORD_DBNC_EN); + + irq_clear_status_flags(hdmi->irq, IRQ_NOAUTOEN); + enable_irq(hdmi->irq); + + /* + * Check if any HDMI monitor was connected before probing this driver + * and/or attaching the bridge, without debouncing: if so, we want to + * notify the DRM so that we start outputting an image ASAP. + * Note that calling the ISR thread function will also perform a HW + * registers write that enables both the HPD and Pord interrupts. + */ + __mtk_hdmi_v2_isr_thread(hdmi); + + mtk_hdmi_v2_disable(hdmi); + + return 0; +} + +static void mtk_hdmi_v2_bridge_detach(struct drm_bridge *bridge) +{ + struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge); + + WARN_ON(pm_runtime_active(hdmi->dev)); + + /* The controller is already powered off, just disable irq here */ + disable_irq(hdmi->irq); +} + +static void mtk_hdmi_v2_handle_plugged_change(struct mtk_hdmi *hdmi, bool plugged) +{ + mutex_lock(&hdmi->update_plugged_status_lock); + if (hdmi->plugged_cb && hdmi->codec_dev) + hdmi->plugged_cb(hdmi->codec_dev, plugged); + mutex_unlock(&hdmi->update_plugged_status_lock); +} + +static void mtk_hdmi_v2_bridge_pre_enable(struct drm_bridge *bridge, + struct drm_atomic_state *state) +{ + struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge); + struct drm_connector_state *conn_state; + union phy_configure_opts opts = { + .dp = { .link_rate = hdmi->mode.clock * KILO } + }; + int ret; + + /* Power on the controller before trying to write to registers */ + ret = mtk_hdmi_v2_enable(hdmi); + if (WARN_ON(ret)) + return; + + /* Retrieve the connector through the atomic state */ + hdmi->curr_conn = drm_atomic_get_new_connector_for_encoder(state, bridge->encoder); + + conn_state = drm_atomic_get_new_connector_state(state, hdmi->curr_conn); + if (WARN_ON(!conn_state)) + return; + + /* + * Preconfigure the HDMI controller and the HDMI PHY at pre_enable + * stage to make sure that this IP is ready and clocked before the + * mtk_dpi gets powered on and before it enables the output. + */ + mtk_hdmi_v2_output_set_display_mode(hdmi, conn_state, &hdmi->mode); + + /* Reconfigure phy clock link with appropriate rate */ + phy_configure(hdmi->phy, &opts); + + /* Power on the PHY here to make sure that DPI_HDMI is clocked */ + phy_power_on(hdmi->phy); + + hdmi->powered = true; +} + +static void mtk_hdmi_v2_bridge_enable(struct drm_bridge *bridge, + struct drm_atomic_state *state) +{ + struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge); + int ret; + + if (WARN_ON(!hdmi->powered)) + return; + + ret = drm_atomic_helper_connector_hdmi_update_infoframes(hdmi->curr_conn, state); + if (ret) + dev_err(hdmi->dev, "Could not update infoframes: %d\n", ret); + + mtk_hdmi_v2_hw_vid_mute(hdmi, false); + + /* signal the connect event to audio codec */ + mtk_hdmi_v2_handle_plugged_change(hdmi, true); + + hdmi->enabled = true; +} + +static void mtk_hdmi_v2_bridge_disable(struct drm_bridge *bridge, + struct drm_atomic_state *state) +{ + struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge); + + if (!hdmi->enabled) + return; + + mtk_hdmi_v2_hw_gcp_avmute(hdmi, true); + msleep(50); + mtk_hdmi_v2_hw_vid_mute(hdmi, true); + mtk_hdmi_v2_hw_aud_mute(hdmi, true); + msleep(50); + + hdmi->enabled = false; +} + +static void mtk_hdmi_v2_bridge_post_disable(struct drm_bridge *bridge, + struct drm_atomic_state *state) +{ + struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge); + + if (!hdmi->powered) + return; + + phy_power_off(hdmi->phy); + hdmi->powered = false; + + /* signal the disconnect event to audio codec */ + mtk_hdmi_v2_handle_plugged_change(hdmi, false); + + /* Power off */ + mtk_hdmi_v2_disable(hdmi); +} + +static enum drm_connector_status mtk_hdmi_v2_bridge_detect(struct drm_bridge *bridge, + struct drm_connector *connector) +{ + struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge); + + return hdmi->hpd != HDMI_PLUG_OUT ? + connector_status_connected : connector_status_disconnected; +} + +static const struct drm_edid *mtk_hdmi_v2_bridge_edid_read(struct drm_bridge *bridge, + struct drm_connector *connector) +{ + return drm_edid_read(connector); +} + +static void mtk_hdmi_v2_hpd_enable(struct drm_bridge *bridge) +{ + struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge); + int ret; + + ret = mtk_hdmi_v2_enable(hdmi); + if (ret) { + dev_err(hdmi->dev, "Cannot power on controller for HPD: %d\n", ret); + return; + } + + mtk_hdmi_v2_enable_hpd_pord_irq(hdmi, true); +} + +static void mtk_hdmi_v2_hpd_disable(struct drm_bridge *bridge) +{ + struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge); + + mtk_hdmi_v2_enable_hpd_pord_irq(hdmi, false); + mtk_hdmi_v2_disable(hdmi); +} + +static int mtk_hdmi_v2_hdmi_tmds_char_rate_valid(const struct drm_bridge *bridge, + const struct drm_display_mode *mode, + unsigned long long tmds_rate) +{ + if (mode->clock < MTK_HDMI_V2_CLOCK_MIN) + return MODE_CLOCK_LOW; + else if (mode->clock > MTK_HDMI_V2_CLOCK_MAX) + return MODE_CLOCK_HIGH; + else + return MODE_OK; +} + +static int mtk_hdmi_v2_hdmi_clear_infoframe(struct drm_bridge *bridge, + enum hdmi_infoframe_type type) +{ + struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge); + + switch (type) { + case HDMI_INFOFRAME_TYPE_AUDIO: + regmap_clear_bits(hdmi->regs, TOP_INFO_EN, AUD_EN_WR | AUD_EN); + regmap_clear_bits(hdmi->regs, TOP_INFO_RPT, AUD_RPT_EN); + break; + case HDMI_INFOFRAME_TYPE_AVI: + regmap_clear_bits(hdmi->regs, TOP_INFO_EN, AVI_EN_WR | AVI_EN); + regmap_clear_bits(hdmi->regs, TOP_INFO_RPT, AVI_RPT_EN); + break; + case HDMI_INFOFRAME_TYPE_SPD: + regmap_clear_bits(hdmi->regs, TOP_INFO_EN, SPD_EN_WR | SPD_EN); + regmap_clear_bits(hdmi->regs, TOP_INFO_RPT, SPD_RPT_EN); + break; + case HDMI_INFOFRAME_TYPE_VENDOR: + regmap_clear_bits(hdmi->regs, TOP_INFO_EN, VSIF_EN_WR | VSIF_EN); + regmap_clear_bits(hdmi->regs, TOP_INFO_RPT, VSIF_RPT_EN); + break; + case HDMI_INFOFRAME_TYPE_DRM: + default: + break; + }; + + return 0; +} + +static int mtk_hdmi_v2_hdmi_write_infoframe(struct drm_bridge *bridge, + enum hdmi_infoframe_type type, + const u8 *buffer, size_t len) +{ + struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge); + + switch (type) { + case HDMI_INFOFRAME_TYPE_AUDIO: + mtk_hdmi_v2_hw_write_audio_infoframe(hdmi, buffer); + break; + case HDMI_INFOFRAME_TYPE_AVI: + mtk_hdmi_v2_hw_write_avi_infoframe(hdmi, buffer); + break; + case HDMI_INFOFRAME_TYPE_SPD: + mtk_hdmi_v2_hw_write_spd_infoframe(hdmi, buffer); + break; + case HDMI_INFOFRAME_TYPE_VENDOR: + mtk_hdmi_v2_hw_write_vendor_infoframe(hdmi, buffer); + break; + case HDMI_INFOFRAME_TYPE_DRM: + default: + dev_err(hdmi->dev, "Unsupported HDMI infoframe type %u\n", type); + break; + }; + + return 0; +} + +static int mtk_hdmi_v2_set_abist(struct mtk_hdmi *hdmi, bool enable) +{ + struct drm_display_mode *mode = &hdmi->mode; + int abist_format = -EINVAL; + bool interlaced; + + if (!enable) { + regmap_clear_bits(hdmi->regs, TOP_CFG00, HDMI_ABIST_ENABLE); + return 0; + } + + if (!mode->hdisplay || !mode->vdisplay) + return -EINVAL; + + interlaced = mode->flags & DRM_MODE_FLAG_INTERLACE; + + switch (mode->hdisplay) { + case 720: + if (mode->vdisplay == 480) + abist_format = 2; + else if (mode->vdisplay == 576) + abist_format = 11; + break; + case 1280: + if (mode->vdisplay == 720) + abist_format = 3; + break; + case 1440: + if (mode->vdisplay == 480) + abist_format = interlaced ? 5 : 9; + else if (mode->vdisplay == 576) + abist_format = interlaced ? 14 : 18; + break; + case 1920: + if (mode->vdisplay == 1080) + abist_format = interlaced ? 4 : 10; + break; + case 3840: + if (mode->vdisplay == 2160) + abist_format = 25; + break; + case 4096: + if (mode->vdisplay == 2160) + abist_format = 26; + break; + default: + break; + } + if (abist_format < 0) + return abist_format; + + regmap_update_bits(hdmi->regs, TOP_CFG00, HDMI_ABIST_VIDEO_FORMAT, + FIELD_PREP(HDMI_ABIST_VIDEO_FORMAT, abist_format)); + regmap_set_bits(hdmi->regs, TOP_CFG00, HDMI_ABIST_ENABLE); + return 0; +} + +static int mtk_hdmi_v2_debug_abist_show(struct seq_file *m, void *arg) +{ + struct mtk_hdmi *hdmi = m->private; + bool en; + u32 val; + int ret; + + if (!hdmi) + return -EINVAL; + + ret = regmap_read(hdmi->regs, TOP_CFG00, &val); + if (ret) + return ret; + + en = FIELD_GET(HDMI_ABIST_ENABLE, val); + + seq_printf(m, "HDMI Automated Built-In Self Test: %s\n", + en ? "Enabled" : "Disabled"); + + return 0; +} + +static ssize_t mtk_hdmi_v2_debug_abist_write(struct file *file, + const char __user *ubuf, + size_t len, loff_t *offp) +{ + struct seq_file *m = file->private_data; + int ret; + u32 en; + + if (!m || !m->private || *offp) + return -EINVAL; + + ret = kstrtouint_from_user(ubuf, len, 0, &en); + if (ret) + return ret; + + if (en < 0 || en > 1) + return -EINVAL; + + mtk_hdmi_v2_set_abist((struct mtk_hdmi *)m->private, en); + return len; +} + +static int mtk_hdmi_v2_debug_abist_open(struct inode *inode, struct file *file) +{ + return single_open(file, mtk_hdmi_v2_debug_abist_show, inode->i_private); +} + +static const struct file_operations mtk_hdmi_debug_abist_fops = { + .owner = THIS_MODULE, + .open = mtk_hdmi_v2_debug_abist_open, + .read = seq_read, + .write = mtk_hdmi_v2_debug_abist_write, + .llseek = seq_lseek, + .release = single_release, +}; + +static void mtk_hdmi_v2_debugfs_init(struct drm_bridge *bridge, struct dentry *root) +{ + struct mtk_hdmi *dpi = hdmi_ctx_from_bridge(bridge); + + debugfs_create_file("hdmi_abist", 0640, root, dpi, &mtk_hdmi_debug_abist_fops); +} + +static const struct drm_bridge_funcs mtk_v2_hdmi_bridge_funcs = { + .attach = mtk_hdmi_v2_bridge_attach, + .detach = mtk_hdmi_v2_bridge_detach, + .mode_fixup = mtk_hdmi_bridge_mode_fixup, + .mode_set = mtk_hdmi_bridge_mode_set, + .atomic_pre_enable = mtk_hdmi_v2_bridge_pre_enable, + .atomic_enable = mtk_hdmi_v2_bridge_enable, + .atomic_disable = mtk_hdmi_v2_bridge_disable, + .atomic_post_disable = mtk_hdmi_v2_bridge_post_disable, + .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state, + .atomic_reset = drm_atomic_helper_bridge_reset, + .detect = mtk_hdmi_v2_bridge_detect, + .edid_read = mtk_hdmi_v2_bridge_edid_read, + .hpd_enable = mtk_hdmi_v2_hpd_enable, + .hpd_disable = mtk_hdmi_v2_hpd_disable, + .hdmi_tmds_char_rate_valid = mtk_hdmi_v2_hdmi_tmds_char_rate_valid, + .hdmi_clear_infoframe = mtk_hdmi_v2_hdmi_clear_infoframe, + .hdmi_write_infoframe = mtk_hdmi_v2_hdmi_write_infoframe, + .debugfs_init = mtk_hdmi_v2_debugfs_init, +}; + +/* + * HDMI audio codec callbacks + */ +static int mtk_hdmi_v2_audio_hook_plugged_cb(struct device *dev, void *data, + hdmi_codec_plugged_cb fn, + struct device *codec_dev) +{ + struct mtk_hdmi *hdmi = dev_get_drvdata(dev); + bool plugged; + + if (!hdmi) + return -ENODEV; + + mtk_hdmi_audio_set_plugged_cb(hdmi, fn, codec_dev); + plugged = (hdmi->hpd == HDMI_PLUG_IN_AND_SINK_POWER_ON); + mtk_hdmi_v2_handle_plugged_change(hdmi, plugged); + + return 0; +} + +static int mtk_hdmi_v2_audio_hw_params(struct device *dev, void *data, + struct hdmi_codec_daifmt *daifmt, + struct hdmi_codec_params *params) +{ + struct mtk_hdmi *hdmi = dev_get_drvdata(dev); + + if (hdmi->audio_enable) { + mtk_hdmi_audio_params(hdmi, daifmt, params); + mtk_hdmi_v2_aud_output_config(hdmi, &hdmi->mode); + } + return 0; +} + +static int mtk_hdmi_v2_audio_startup(struct device *dev, void *data) +{ + struct mtk_hdmi *hdmi = dev_get_drvdata(dev); + + mtk_hdmi_v2_hw_aud_enable(hdmi, true); + hdmi->audio_enable = true; + + return 0; +} + +static void mtk_hdmi_v2_audio_shutdown(struct device *dev, void *data) +{ + struct mtk_hdmi *hdmi = dev_get_drvdata(dev); + + hdmi->audio_enable = false; + mtk_hdmi_v2_hw_aud_enable(hdmi, false); +} + +static int mtk_hdmi_v2_audio_mute(struct device *dev, void *data, bool enable, int dir) +{ + struct mtk_hdmi *hdmi = dev_get_drvdata(dev); + + mtk_hdmi_v2_hw_aud_mute(hdmi, enable); + + return 0; +} + +static const struct hdmi_codec_ops mtk_hdmi_v2_audio_codec_ops = { + .hw_params = mtk_hdmi_v2_audio_hw_params, + .audio_startup = mtk_hdmi_v2_audio_startup, + .audio_shutdown = mtk_hdmi_v2_audio_shutdown, + .mute_stream = mtk_hdmi_v2_audio_mute, + .get_eld = mtk_hdmi_audio_get_eld, + .hook_plugged_cb = mtk_hdmi_v2_audio_hook_plugged_cb, +}; + +static __maybe_unused int mtk_hdmi_v2_suspend(struct device *dev) +{ + struct mtk_hdmi *hdmi = dev_get_drvdata(dev); + + mtk_hdmi_v2_disable(hdmi); + + return 0; +} + +static __maybe_unused int mtk_hdmi_v2_resume(struct device *dev) +{ + struct mtk_hdmi *hdmi = dev_get_drvdata(dev); + + return mtk_hdmi_v2_enable(hdmi); +} + +static SIMPLE_DEV_PM_OPS(mtk_hdmi_v2_pm_ops, mtk_hdmi_v2_suspend, mtk_hdmi_v2_resume); + +static const struct mtk_hdmi_ver_conf mtk_hdmi_conf_v2 = { + .bridge_funcs = &mtk_v2_hdmi_bridge_funcs, + .codec_ops = &mtk_hdmi_v2_audio_codec_ops, + .mtk_hdmi_clock_names = mtk_hdmi_v2_clk_names, + .num_clocks = MTK_HDMI_V2_CLK_COUNT, + .interlace_allowed = true, +}; + +static const struct mtk_hdmi_conf mtk_hdmi_conf_mt8188 = { + .ver_conf = &mtk_hdmi_conf_v2, + .reg_hdmi_tx_cfg = HDMITX_CONFIG_MT8188 +}; + +static const struct mtk_hdmi_conf mtk_hdmi_conf_mt8195 = { + .ver_conf = &mtk_hdmi_conf_v2, + .reg_hdmi_tx_cfg = HDMITX_CONFIG_MT8195 +}; + +static int mtk_hdmi_v2_probe(struct platform_device *pdev) +{ + struct mtk_hdmi *hdmi; + int ret; + + /* Populate HDMI sub-devices if present */ + ret = devm_of_platform_populate(&pdev->dev); + if (ret) + return ret; + + hdmi = mtk_hdmi_common_probe(pdev); + if (IS_ERR(hdmi)) + return PTR_ERR(hdmi); + + hdmi->hpd = HDMI_PLUG_OUT; + + /* Disable all HW interrupts at probe stage */ + mtk_hdmi_v2_hwirq_disable(hdmi); + + /* + * In case bootloader leaves HDMI enabled before booting, make + * sure that any interrupt that was left is cleared by setting + * all bits in the INT_CLR registers for all 32+19 interrupts. + */ + regmap_write(hdmi->regs, TOP_INT_CLR00, GENMASK(31, 0)); + regmap_write(hdmi->regs, TOP_INT_CLR01, GENMASK(18, 0)); + + /* Restore interrupt clearing registers to zero */ + regmap_write(hdmi->regs, TOP_INT_CLR00, 0); + regmap_write(hdmi->regs, TOP_INT_CLR01, 0); + + /* + * Install the ISR but keep it disabled: as the interrupts are + * being set up in the .bridge_attach() callback which will + * enable both the right HW IRQs and the ISR. + */ + irq_set_status_flags(hdmi->irq, IRQ_NOAUTOEN); + ret = devm_request_threaded_irq(&pdev->dev, hdmi->irq, mtk_hdmi_v2_isr, + mtk_hdmi_v2_isr_thread, + IRQ_TYPE_LEVEL_HIGH, + dev_name(&pdev->dev), hdmi); + if (ret) + return dev_err_probe(&pdev->dev, ret, "Cannot request IRQ\n"); + + ret = devm_pm_runtime_enable(&pdev->dev); + if (ret) + return dev_err_probe(&pdev->dev, ret, "Cannot enable Runtime PM\n"); + + return 0; +} + +static void mtk_hdmi_v2_remove(struct platform_device *pdev) +{ + struct mtk_hdmi *hdmi = platform_get_drvdata(pdev); + + i2c_put_adapter(hdmi->ddc_adpt); +} + +static const struct of_device_id mtk_drm_hdmi_v2_of_ids[] = { + { .compatible = "mediatek,mt8188-hdmi-tx", .data = &mtk_hdmi_conf_mt8188 }, + { .compatible = "mediatek,mt8195-hdmi-tx", .data = &mtk_hdmi_conf_mt8195 }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, mtk_drm_hdmi_v2_of_ids); + +static struct platform_driver mtk_hdmi_v2_driver = { + .probe = mtk_hdmi_v2_probe, + .remove = mtk_hdmi_v2_remove, + .driver = { + .name = "mediatek-drm-hdmi-v2", + .of_match_table = mtk_drm_hdmi_v2_of_ids, + .pm = &mtk_hdmi_v2_pm_ops, + }, +}; +module_platform_driver(mtk_hdmi_v2_driver); + +MODULE_AUTHOR("AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>>"); +MODULE_DESCRIPTION("MediaTek HDMIv2 Driver"); +MODULE_LICENSE("GPL"); +MODULE_IMPORT_NS("DRM_MTK_HDMI"); diff --git a/drivers/gpu/drm/mediatek/mtk_mdp_rdma.c b/drivers/gpu/drm/mediatek/mtk_mdp_rdma.c new file mode 100644 index 000000000000..7982788ae9df --- /dev/null +++ b/drivers/gpu/drm/mediatek/mtk_mdp_rdma.c @@ -0,0 +1,347 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2021 MediaTek Inc. + */ + +#include <drm/drm_fourcc.h> +#include <linux/clk.h> +#include <linux/component.h> +#include <linux/mod_devicetable.h> +#include <linux/platform_device.h> +#include <linux/pm_runtime.h> +#include <linux/soc/mediatek/mtk-cmdq.h> + +#include "mtk_disp_drv.h" +#include "mtk_drm_drv.h" +#include "mtk_mdp_rdma.h" + +#define MDP_RDMA_EN 0x000 +#define FLD_ROT_ENABLE BIT(0) +#define MDP_RDMA_RESET 0x008 +#define MDP_RDMA_CON 0x020 +#define FLD_OUTPUT_10B BIT(5) +#define FLD_SIMPLE_MODE BIT(4) +#define MDP_RDMA_GMCIF_CON 0x028 +#define FLD_COMMAND_DIV BIT(0) +#define FLD_EXT_PREULTRA_EN BIT(3) +#define FLD_RD_REQ_TYPE GENMASK(7, 4) +#define VAL_RD_REQ_TYPE_BURST_8_ACCESS 7 +#define FLD_ULTRA_EN GENMASK(13, 12) +#define VAL_ULTRA_EN_ENABLE 1 +#define FLD_PRE_ULTRA_EN GENMASK(17, 16) +#define VAL_PRE_ULTRA_EN_ENABLE 1 +#define FLD_EXT_ULTRA_EN BIT(18) +#define MDP_RDMA_SRC_CON 0x030 +#define FLD_OUTPUT_ARGB BIT(25) +#define FLD_BIT_NUMBER GENMASK(19, 18) +#define FLD_SWAP BIT(14) +#define FLD_UNIFORM_CONFIG BIT(17) +#define RDMA_INPUT_10BIT BIT(18) +#define FLD_SRC_FORMAT GENMASK(3, 0) +#define MDP_RDMA_COMP_CON 0x038 +#define FLD_AFBC_EN BIT(22) +#define FLD_AFBC_YUV_TRANSFORM BIT(21) +#define FLD_UFBDC_EN BIT(12) +#define MDP_RDMA_MF_BKGD_SIZE_IN_BYTE 0x060 +#define FLD_MF_BKGD_WB GENMASK(22, 0) +#define MDP_RDMA_MF_SRC_SIZE 0x070 +#define FLD_MF_SRC_H GENMASK(30, 16) +#define FLD_MF_SRC_W GENMASK(14, 0) +#define MDP_RDMA_MF_CLIP_SIZE 0x078 +#define FLD_MF_CLIP_H GENMASK(30, 16) +#define FLD_MF_CLIP_W GENMASK(14, 0) +#define MDP_RDMA_SRC_OFFSET_0 0x118 +#define FLD_SRC_OFFSET_0 GENMASK(31, 0) +#define MDP_RDMA_TRANSFORM_0 0x200 +#define FLD_INT_MATRIX_SEL GENMASK(27, 23) +#define FLD_TRANS_EN BIT(16) +#define MDP_RDMA_SRC_BASE_0 0xf00 +#define FLD_SRC_BASE_0 GENMASK(31, 0) + +#define RDMA_CSC_FULL709_TO_RGB 5 +#define RDMA_CSC_BT601_TO_RGB 6 + +static const u32 formats[] = { + DRM_FORMAT_XRGB8888, + DRM_FORMAT_ARGB8888, + DRM_FORMAT_BGRX8888, + DRM_FORMAT_BGRA8888, + DRM_FORMAT_ABGR8888, + DRM_FORMAT_XBGR8888, + DRM_FORMAT_RGB888, + DRM_FORMAT_BGR888, + DRM_FORMAT_RGB565, + DRM_FORMAT_UYVY, + DRM_FORMAT_YUYV, +}; + +enum rdma_format { + RDMA_INPUT_FORMAT_RGB565 = 0, + RDMA_INPUT_FORMAT_RGB888 = 1, + RDMA_INPUT_FORMAT_RGBA8888 = 2, + RDMA_INPUT_FORMAT_ARGB8888 = 3, + RDMA_INPUT_FORMAT_UYVY = 4, + RDMA_INPUT_FORMAT_YUY2 = 5, + RDMA_INPUT_FORMAT_Y8 = 7, + RDMA_INPUT_FORMAT_YV12 = 8, + RDMA_INPUT_FORMAT_UYVY_3PL = 9, + RDMA_INPUT_FORMAT_NV12 = 12, + RDMA_INPUT_FORMAT_UYVY_2PL = 13, + RDMA_INPUT_FORMAT_Y410 = 14 +}; + +struct mtk_mdp_rdma { + void __iomem *regs; + struct clk *clk; + struct cmdq_client_reg cmdq_reg; +}; + +static unsigned int rdma_fmt_convert(unsigned int fmt) +{ + switch (fmt) { + default: + case DRM_FORMAT_RGB565: + return RDMA_INPUT_FORMAT_RGB565; + case DRM_FORMAT_BGR565: + return RDMA_INPUT_FORMAT_RGB565 | FLD_SWAP; + case DRM_FORMAT_RGB888: + return RDMA_INPUT_FORMAT_RGB888; + case DRM_FORMAT_BGR888: + return RDMA_INPUT_FORMAT_RGB888 | FLD_SWAP; + case DRM_FORMAT_RGBX8888: + case DRM_FORMAT_RGBA8888: + return RDMA_INPUT_FORMAT_ARGB8888; + case DRM_FORMAT_BGRX8888: + case DRM_FORMAT_BGRA8888: + return RDMA_INPUT_FORMAT_ARGB8888 | FLD_SWAP; + case DRM_FORMAT_XRGB8888: + case DRM_FORMAT_ARGB8888: + return RDMA_INPUT_FORMAT_RGBA8888; + case DRM_FORMAT_XBGR8888: + case DRM_FORMAT_ABGR8888: + return RDMA_INPUT_FORMAT_RGBA8888 | FLD_SWAP; + case DRM_FORMAT_ABGR2101010: + return RDMA_INPUT_FORMAT_RGBA8888 | FLD_SWAP | RDMA_INPUT_10BIT; + case DRM_FORMAT_ARGB2101010: + return RDMA_INPUT_FORMAT_RGBA8888 | RDMA_INPUT_10BIT; + case DRM_FORMAT_RGBA1010102: + return RDMA_INPUT_FORMAT_ARGB8888 | FLD_SWAP | RDMA_INPUT_10BIT; + case DRM_FORMAT_BGRA1010102: + return RDMA_INPUT_FORMAT_ARGB8888 | RDMA_INPUT_10BIT; + case DRM_FORMAT_UYVY: + return RDMA_INPUT_FORMAT_UYVY; + case DRM_FORMAT_YUYV: + return RDMA_INPUT_FORMAT_YUY2; + } +} + +static unsigned int rdma_color_convert(unsigned int color_encoding) +{ + switch (color_encoding) { + default: + case DRM_COLOR_YCBCR_BT709: + return RDMA_CSC_FULL709_TO_RGB; + case DRM_COLOR_YCBCR_BT601: + return RDMA_CSC_BT601_TO_RGB; + } +} + +static void mtk_mdp_rdma_fifo_config(struct device *dev, struct cmdq_pkt *cmdq_pkt) +{ + struct mtk_mdp_rdma *priv = dev_get_drvdata(dev); + + mtk_ddp_write_mask(cmdq_pkt, FLD_EXT_ULTRA_EN | VAL_PRE_ULTRA_EN_ENABLE << 16 | + VAL_ULTRA_EN_ENABLE << 12 | VAL_RD_REQ_TYPE_BURST_8_ACCESS << 4 | + FLD_EXT_PREULTRA_EN | FLD_COMMAND_DIV, &priv->cmdq_reg, + priv->regs, MDP_RDMA_GMCIF_CON, FLD_EXT_ULTRA_EN | + FLD_PRE_ULTRA_EN | FLD_ULTRA_EN | FLD_RD_REQ_TYPE | + FLD_EXT_PREULTRA_EN | FLD_COMMAND_DIV); +} + +void mtk_mdp_rdma_start(struct device *dev, struct cmdq_pkt *cmdq_pkt) +{ + struct mtk_mdp_rdma *priv = dev_get_drvdata(dev); + + mtk_ddp_write_mask(cmdq_pkt, FLD_ROT_ENABLE, &priv->cmdq_reg, + priv->regs, MDP_RDMA_EN, FLD_ROT_ENABLE); +} + +void mtk_mdp_rdma_stop(struct device *dev, struct cmdq_pkt *cmdq_pkt) +{ + struct mtk_mdp_rdma *priv = dev_get_drvdata(dev); + + mtk_ddp_write_mask(cmdq_pkt, 0, &priv->cmdq_reg, + priv->regs, MDP_RDMA_EN, FLD_ROT_ENABLE); + mtk_ddp_write(cmdq_pkt, 1, &priv->cmdq_reg, priv->regs, MDP_RDMA_RESET); + mtk_ddp_write(cmdq_pkt, 0, &priv->cmdq_reg, priv->regs, MDP_RDMA_RESET); +} + +void mtk_mdp_rdma_config(struct device *dev, struct mtk_mdp_rdma_cfg *cfg, + struct cmdq_pkt *cmdq_pkt) +{ + struct mtk_mdp_rdma *priv = dev_get_drvdata(dev); + const struct drm_format_info *fmt_info = drm_format_info(cfg->fmt); + bool csc_enable = fmt_info->is_yuv ? true : false; + unsigned int src_pitch_y = cfg->pitch; + unsigned int offset_y = 0; + + mtk_mdp_rdma_fifo_config(dev, cmdq_pkt); + + mtk_ddp_write_mask(cmdq_pkt, FLD_UNIFORM_CONFIG, &priv->cmdq_reg, priv->regs, + MDP_RDMA_SRC_CON, FLD_UNIFORM_CONFIG); + mtk_ddp_write_mask(cmdq_pkt, rdma_fmt_convert(cfg->fmt), &priv->cmdq_reg, priv->regs, + MDP_RDMA_SRC_CON, FLD_SWAP | FLD_SRC_FORMAT | FLD_BIT_NUMBER); + + if (!csc_enable && fmt_info->has_alpha) + mtk_ddp_write_mask(cmdq_pkt, FLD_OUTPUT_ARGB, &priv->cmdq_reg, + priv->regs, MDP_RDMA_SRC_CON, FLD_OUTPUT_ARGB); + else + mtk_ddp_write_mask(cmdq_pkt, 0, &priv->cmdq_reg, priv->regs, + MDP_RDMA_SRC_CON, FLD_OUTPUT_ARGB); + + mtk_ddp_write_mask(cmdq_pkt, cfg->addr0, &priv->cmdq_reg, priv->regs, + MDP_RDMA_SRC_BASE_0, FLD_SRC_BASE_0); + + mtk_ddp_write_mask(cmdq_pkt, src_pitch_y, &priv->cmdq_reg, priv->regs, + MDP_RDMA_MF_BKGD_SIZE_IN_BYTE, FLD_MF_BKGD_WB); + + mtk_ddp_write_mask(cmdq_pkt, 0, &priv->cmdq_reg, priv->regs, MDP_RDMA_COMP_CON, + FLD_AFBC_YUV_TRANSFORM | FLD_UFBDC_EN | FLD_AFBC_EN); + mtk_ddp_write_mask(cmdq_pkt, FLD_OUTPUT_10B, &priv->cmdq_reg, priv->regs, + MDP_RDMA_CON, FLD_OUTPUT_10B); + mtk_ddp_write_mask(cmdq_pkt, FLD_SIMPLE_MODE, &priv->cmdq_reg, priv->regs, + MDP_RDMA_CON, FLD_SIMPLE_MODE); + if (csc_enable) + mtk_ddp_write_mask(cmdq_pkt, rdma_color_convert(cfg->color_encoding) << 23, + &priv->cmdq_reg, priv->regs, MDP_RDMA_TRANSFORM_0, + FLD_INT_MATRIX_SEL); + mtk_ddp_write_mask(cmdq_pkt, csc_enable << 16, &priv->cmdq_reg, priv->regs, + MDP_RDMA_TRANSFORM_0, FLD_TRANS_EN); + + offset_y = cfg->x_left * fmt_info->cpp[0] + cfg->y_top * src_pitch_y; + + mtk_ddp_write_mask(cmdq_pkt, offset_y, &priv->cmdq_reg, priv->regs, + MDP_RDMA_SRC_OFFSET_0, FLD_SRC_OFFSET_0); + mtk_ddp_write_mask(cmdq_pkt, cfg->width, &priv->cmdq_reg, priv->regs, + MDP_RDMA_MF_SRC_SIZE, FLD_MF_SRC_W); + mtk_ddp_write_mask(cmdq_pkt, cfg->height << 16, &priv->cmdq_reg, priv->regs, + MDP_RDMA_MF_SRC_SIZE, FLD_MF_SRC_H); + mtk_ddp_write_mask(cmdq_pkt, cfg->width, &priv->cmdq_reg, priv->regs, + MDP_RDMA_MF_CLIP_SIZE, FLD_MF_CLIP_W); + mtk_ddp_write_mask(cmdq_pkt, cfg->height << 16, &priv->cmdq_reg, priv->regs, + MDP_RDMA_MF_CLIP_SIZE, FLD_MF_CLIP_H); +} + +const u32 *mtk_mdp_rdma_get_formats(struct device *dev) +{ + return formats; +} + +size_t mtk_mdp_rdma_get_num_formats(struct device *dev) +{ + return ARRAY_SIZE(formats); +} + +int mtk_mdp_rdma_power_on(struct device *dev) +{ + int ret = pm_runtime_resume_and_get(dev); + + if (ret < 0) { + dev_err(dev, "Failed to power on: %d\n", ret); + return ret; + } + return 0; +} + +void mtk_mdp_rdma_power_off(struct device *dev) +{ + pm_runtime_put(dev); +} + +int mtk_mdp_rdma_clk_enable(struct device *dev) +{ + struct mtk_mdp_rdma *rdma = dev_get_drvdata(dev); + + return clk_prepare_enable(rdma->clk); +} + +void mtk_mdp_rdma_clk_disable(struct device *dev) +{ + struct mtk_mdp_rdma *rdma = dev_get_drvdata(dev); + + clk_disable_unprepare(rdma->clk); +} + +static int mtk_mdp_rdma_bind(struct device *dev, struct device *master, + void *data) +{ + return 0; +} + +static void mtk_mdp_rdma_unbind(struct device *dev, struct device *master, + void *data) +{ +} + +static const struct component_ops mtk_mdp_rdma_component_ops = { + .bind = mtk_mdp_rdma_bind, + .unbind = mtk_mdp_rdma_unbind, +}; + +static int mtk_mdp_rdma_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct mtk_mdp_rdma *priv; + int ret = 0; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->regs = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(priv->regs)) + return dev_err_probe(dev, PTR_ERR(priv->regs), + "failed to ioremap rdma\n"); + + priv->clk = devm_clk_get(dev, NULL); + if (IS_ERR(priv->clk)) + return dev_err_probe(dev, PTR_ERR(priv->clk), + "failed to get rdma clk\n"); + +#if IS_REACHABLE(CONFIG_MTK_CMDQ) + ret = cmdq_dev_get_client_reg(dev, &priv->cmdq_reg, 0); + if (ret) + dev_dbg(dev, "get mediatek,gce-client-reg fail!\n"); +#endif + platform_set_drvdata(pdev, priv); + + pm_runtime_enable(dev); + + ret = component_add(dev, &mtk_mdp_rdma_component_ops); + if (ret != 0) { + pm_runtime_disable(dev); + return dev_err_probe(dev, ret, "Failed to add component\n"); + } + return 0; +} + +static void mtk_mdp_rdma_remove(struct platform_device *pdev) +{ + component_del(&pdev->dev, &mtk_mdp_rdma_component_ops); + pm_runtime_disable(&pdev->dev); +} + +static const struct of_device_id mtk_mdp_rdma_driver_dt_match[] = { + { .compatible = "mediatek,mt8195-vdo1-rdma", }, + {}, +}; +MODULE_DEVICE_TABLE(of, mtk_mdp_rdma_driver_dt_match); + +struct platform_driver mtk_mdp_rdma_driver = { + .probe = mtk_mdp_rdma_probe, + .remove = mtk_mdp_rdma_remove, + .driver = { + .name = "mediatek-mdp-rdma", + .of_match_table = mtk_mdp_rdma_driver_dt_match, + }, +}; diff --git a/drivers/gpu/drm/mediatek/mtk_mdp_rdma.h b/drivers/gpu/drm/mediatek/mtk_mdp_rdma.h new file mode 100644 index 000000000000..9943ee3aac31 --- /dev/null +++ b/drivers/gpu/drm/mediatek/mtk_mdp_rdma.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2021 MediaTek Inc. + */ + +#ifndef __MTK_MDP_RDMA_H__ +#define __MTK_MDP_RDMA_H__ + +struct mtk_mdp_rdma_cfg { + unsigned int pitch; + unsigned int addr0; + unsigned int width; + unsigned int height; + unsigned int x_left; + unsigned int y_top; + int fmt; + int color_encoding; +}; + +#endif // __MTK_MDP_RDMA_H__ diff --git a/drivers/gpu/drm/mediatek/mtk_padding.c b/drivers/gpu/drm/mediatek/mtk_padding.c new file mode 100644 index 000000000000..b4e3e5a3428b --- /dev/null +++ b/drivers/gpu/drm/mediatek/mtk_padding.c @@ -0,0 +1,154 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2023 MediaTek Inc. + */ + +#include <linux/clk.h> +#include <linux/component.h> +#include <linux/module.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> +#include <linux/pm_runtime.h> +#include <linux/soc/mediatek/mtk-cmdq.h> + +#include "mtk_crtc.h" +#include "mtk_ddp_comp.h" +#include "mtk_disp_drv.h" + +#define PADDING_CONTROL_REG 0x00 +#define PADDING_BYPASS BIT(0) +#define PADDING_ENABLE BIT(1) +#define PADDING_PIC_SIZE_REG 0x04 +#define PADDING_H_REG 0x08 /* horizontal */ +#define PADDING_V_REG 0x0c /* vertical */ +#define PADDING_COLOR_REG 0x10 + +/** + * struct mtk_padding - Basic information of the Padding + * @clk: Clock of the module + * @reg: Virtual address of the Padding for CPU to access + * @cmdq_reg: CMDQ setting of the Padding + * + * Every Padding should have different clock source, register base, and + * CMDQ settings, we stored these differences all together. + */ +struct mtk_padding { + struct clk *clk; + void __iomem *reg; + struct cmdq_client_reg cmdq_reg; +}; + +int mtk_padding_clk_enable(struct device *dev) +{ + struct mtk_padding *padding = dev_get_drvdata(dev); + + return clk_prepare_enable(padding->clk); +} + +void mtk_padding_clk_disable(struct device *dev) +{ + struct mtk_padding *padding = dev_get_drvdata(dev); + + clk_disable_unprepare(padding->clk); +} + +void mtk_padding_start(struct device *dev) +{ + struct mtk_padding *padding = dev_get_drvdata(dev); + + writel(PADDING_ENABLE | PADDING_BYPASS, + padding->reg + PADDING_CONTROL_REG); + + /* + * Notice that even the padding is in bypass mode, + * all the settings must be cleared to 0 or + * undefined behaviors could happen + */ + writel(0, padding->reg + PADDING_PIC_SIZE_REG); + writel(0, padding->reg + PADDING_H_REG); + writel(0, padding->reg + PADDING_V_REG); + writel(0, padding->reg + PADDING_COLOR_REG); +} + +void mtk_padding_stop(struct device *dev) +{ + struct mtk_padding *padding = dev_get_drvdata(dev); + + writel(0, padding->reg + PADDING_CONTROL_REG); +} + +static int mtk_padding_bind(struct device *dev, struct device *master, void *data) +{ + return 0; +} + +static void mtk_padding_unbind(struct device *dev, struct device *master, void *data) +{ +} + +static const struct component_ops mtk_padding_component_ops = { + .bind = mtk_padding_bind, + .unbind = mtk_padding_unbind, +}; + +static int mtk_padding_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct mtk_padding *priv; + struct resource *res; + int ret; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->clk = devm_clk_get(dev, NULL); + if (IS_ERR(priv->clk)) + return dev_err_probe(dev, PTR_ERR(priv->clk), + "failed to get clk\n"); + + priv->reg = devm_platform_get_and_ioremap_resource(pdev, 0, &res); + if (IS_ERR(priv->reg)) + return dev_err_probe(dev, PTR_ERR(priv->reg), + "failed to do ioremap\n"); + +#if IS_REACHABLE(CONFIG_MTK_CMDQ) + ret = cmdq_dev_get_client_reg(dev, &priv->cmdq_reg, 0); + if (ret) + return dev_err_probe(dev, ret, "failed to get gce client reg\n"); +#endif + + platform_set_drvdata(pdev, priv); + + ret = devm_pm_runtime_enable(dev); + if (ret) + return ret; + + ret = component_add(dev, &mtk_padding_component_ops); + if (ret) { + pm_runtime_disable(dev); + return dev_err_probe(dev, ret, "failed to add component\n"); + } + + return 0; +} + +static void mtk_padding_remove(struct platform_device *pdev) +{ + component_del(&pdev->dev, &mtk_padding_component_ops); +} + +static const struct of_device_id mtk_padding_driver_dt_match[] = { + { .compatible = "mediatek,mt8188-disp-padding" }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, mtk_padding_driver_dt_match); + +struct platform_driver mtk_padding_driver = { + .probe = mtk_padding_probe, + .remove = mtk_padding_remove, + .driver = { + .name = "mediatek-disp-padding", + .of_match_table = mtk_padding_driver_dt_match, + }, +}; diff --git a/drivers/gpu/drm/mediatek/mtk_plane.c b/drivers/gpu/drm/mediatek/mtk_plane.c new file mode 100644 index 000000000000..5043e0377270 --- /dev/null +++ b/drivers/gpu/drm/mediatek/mtk_plane.c @@ -0,0 +1,367 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2015 MediaTek Inc. + * Author: CK Hu <ck.hu@mediatek.com> + */ + +#include <drm/drm_atomic.h> +#include <drm/drm_atomic_helper.h> +#include <drm/drm_atomic_uapi.h> +#include <drm/drm_blend.h> +#include <drm/drm_fourcc.h> +#include <drm/drm_framebuffer.h> +#include <drm/drm_gem_atomic_helper.h> +#include <drm/drm_print.h> +#include <linux/align.h> + +#include "mtk_crtc.h" +#include "mtk_ddp_comp.h" +#include "mtk_drm_drv.h" +#include "mtk_gem.h" +#include "mtk_plane.h" + +static const u64 modifiers[] = { + DRM_FORMAT_MOD_LINEAR, + DRM_FORMAT_MOD_INVALID, +}; + +static void mtk_plane_reset(struct drm_plane *plane) +{ + struct mtk_plane_state *state; + + if (plane->state) { + __drm_atomic_helper_plane_destroy_state(plane->state); + + state = to_mtk_plane_state(plane->state); + memset(state, 0, sizeof(*state)); + } else { + state = kzalloc(sizeof(*state), GFP_KERNEL); + if (!state) + return; + } + + __drm_atomic_helper_plane_reset(plane, &state->base); + + state->base.plane = plane; + state->pending.format = DRM_FORMAT_RGB565; + state->pending.modifier = DRM_FORMAT_MOD_LINEAR; +} + +static struct drm_plane_state *mtk_plane_duplicate_state(struct drm_plane *plane) +{ + struct mtk_plane_state *old_state = to_mtk_plane_state(plane->state); + struct mtk_plane_state *state; + + state = kmalloc(sizeof(*state), GFP_KERNEL); + if (!state) + return NULL; + + __drm_atomic_helper_plane_duplicate_state(plane, &state->base); + + WARN_ON(state->base.plane != plane); + + state->pending = old_state->pending; + + return &state->base; +} + +static bool mtk_plane_format_mod_supported(struct drm_plane *plane, + uint32_t format, + uint64_t modifier) +{ + return modifier == DRM_FORMAT_MOD_LINEAR; +} + +static void mtk_plane_destroy_state(struct drm_plane *plane, + struct drm_plane_state *state) +{ + __drm_atomic_helper_plane_destroy_state(state); + kfree(to_mtk_plane_state(state)); +} + +static int mtk_plane_atomic_async_check(struct drm_plane *plane, + struct drm_atomic_state *state, bool flip) +{ + struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, + plane); + struct drm_crtc_state *crtc_state; + int ret; + + if (plane != new_plane_state->crtc->cursor) + return -EINVAL; + + if (!plane->state) + return -EINVAL; + + if (!plane->state->fb) + return -EINVAL; + + ret = mtk_crtc_plane_check(new_plane_state->crtc, plane, + to_mtk_plane_state(new_plane_state)); + if (ret) + return ret; + + crtc_state = drm_atomic_get_new_crtc_state(state, + new_plane_state->crtc); + + return drm_atomic_helper_check_plane_state(plane->state, crtc_state, + DRM_PLANE_NO_SCALING, + DRM_PLANE_NO_SCALING, + true, true); +} + +static void mtk_plane_update_new_state(struct drm_plane_state *new_state, + struct mtk_plane_state *mtk_plane_state) +{ + struct drm_framebuffer *fb = new_state->fb; + struct drm_gem_object *gem; + struct mtk_gem_obj *mtk_gem; + unsigned int pitch, format; + u64 modifier; + dma_addr_t addr; + dma_addr_t hdr_addr = 0; + unsigned int hdr_pitch = 0; + int offset; + + gem = fb->obj[0]; + mtk_gem = to_mtk_gem_obj(gem); + addr = mtk_gem->dma_addr; + pitch = fb->pitches[0]; + format = fb->format->format; + modifier = fb->modifier; + + if (modifier == DRM_FORMAT_MOD_LINEAR) { + /* + * Using dma_addr_t variable to calculate with multiplier of different types, + * for example: addr += (new_state->src.x1 >> 16) * fb->format->cpp[0]; + * may cause coverity issue with unintentional overflow. + */ + offset = (new_state->src.x1 >> 16) * fb->format->cpp[0]; + addr += offset; + offset = (new_state->src.y1 >> 16) * pitch; + addr += offset; + } else { + int width_in_blocks = ALIGN(fb->width, AFBC_DATA_BLOCK_WIDTH) + / AFBC_DATA_BLOCK_WIDTH; + int height_in_blocks = ALIGN(fb->height, AFBC_DATA_BLOCK_HEIGHT) + / AFBC_DATA_BLOCK_HEIGHT; + int x_offset_in_blocks = (new_state->src.x1 >> 16) / AFBC_DATA_BLOCK_WIDTH; + int y_offset_in_blocks = (new_state->src.y1 >> 16) / AFBC_DATA_BLOCK_HEIGHT; + int hdr_size, hdr_offset; + + hdr_pitch = width_in_blocks * AFBC_HEADER_BLOCK_SIZE; + pitch = width_in_blocks * AFBC_DATA_BLOCK_WIDTH * + AFBC_DATA_BLOCK_HEIGHT * fb->format->cpp[0]; + + hdr_size = ALIGN(hdr_pitch * height_in_blocks, AFBC_HEADER_ALIGNMENT); + hdr_offset = hdr_pitch * y_offset_in_blocks + + AFBC_HEADER_BLOCK_SIZE * x_offset_in_blocks; + + /* + * Using dma_addr_t variable to calculate with multiplier of different types, + * for example: addr += hdr_pitch * y_offset_in_blocks; + * may cause coverity issue with unintentional overflow. + */ + hdr_addr = addr + hdr_offset; + + /* The data plane is offset by 1 additional block. */ + offset = pitch * y_offset_in_blocks + + AFBC_DATA_BLOCK_WIDTH * AFBC_DATA_BLOCK_HEIGHT * + fb->format->cpp[0] * (x_offset_in_blocks + 1); + + /* + * Using dma_addr_t variable to calculate with multiplier of different types, + * for example: addr += pitch * y_offset_in_blocks; + * may cause coverity issue with unintentional overflow. + */ + addr = addr + hdr_size + offset; + } + + mtk_plane_state->pending.enable = true; + mtk_plane_state->pending.pitch = pitch; + mtk_plane_state->pending.hdr_pitch = hdr_pitch; + mtk_plane_state->pending.format = format; + mtk_plane_state->pending.modifier = modifier; + mtk_plane_state->pending.addr = addr; + mtk_plane_state->pending.hdr_addr = hdr_addr; + mtk_plane_state->pending.x = new_state->dst.x1; + mtk_plane_state->pending.y = new_state->dst.y1; + mtk_plane_state->pending.width = drm_rect_width(&new_state->dst); + mtk_plane_state->pending.height = drm_rect_height(&new_state->dst); + mtk_plane_state->pending.rotation = new_state->rotation; + mtk_plane_state->pending.color_encoding = new_state->color_encoding; +} + +static void mtk_plane_atomic_async_update(struct drm_plane *plane, + struct drm_atomic_state *state) +{ + struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, + plane); + struct mtk_plane_state *new_plane_state = to_mtk_plane_state(plane->state); + + plane->state->crtc_x = new_state->crtc_x; + plane->state->crtc_y = new_state->crtc_y; + plane->state->crtc_h = new_state->crtc_h; + plane->state->crtc_w = new_state->crtc_w; + plane->state->src_x = new_state->src_x; + plane->state->src_y = new_state->src_y; + plane->state->src_h = new_state->src_h; + plane->state->src_w = new_state->src_w; + plane->state->dst.x1 = new_state->dst.x1; + plane->state->dst.y1 = new_state->dst.y1; + + mtk_plane_update_new_state(new_state, new_plane_state); + swap(plane->state->fb, new_state->fb); + wmb(); /* Make sure the above parameters are set before update */ + new_plane_state->pending.async_dirty = true; + mtk_crtc_async_update(new_state->crtc, plane, state); +} + +static const struct drm_plane_funcs mtk_plane_funcs = { + .update_plane = drm_atomic_helper_update_plane, + .disable_plane = drm_atomic_helper_disable_plane, + .destroy = drm_plane_cleanup, + .reset = mtk_plane_reset, + .atomic_duplicate_state = mtk_plane_duplicate_state, + .atomic_destroy_state = mtk_plane_destroy_state, + .format_mod_supported = mtk_plane_format_mod_supported, +}; + +static int mtk_plane_atomic_check(struct drm_plane *plane, + struct drm_atomic_state *state) +{ + struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, + plane); + struct drm_framebuffer *fb = new_plane_state->fb; + struct drm_crtc_state *crtc_state; + int ret; + + if (!fb) + return 0; + + if (WARN_ON(!new_plane_state->crtc)) + return 0; + + ret = mtk_crtc_plane_check(new_plane_state->crtc, plane, + to_mtk_plane_state(new_plane_state)); + if (ret) + return ret; + + crtc_state = drm_atomic_get_crtc_state(state, + new_plane_state->crtc); + if (IS_ERR(crtc_state)) + return PTR_ERR(crtc_state); + + return drm_atomic_helper_check_plane_state(new_plane_state, + crtc_state, + DRM_PLANE_NO_SCALING, + DRM_PLANE_NO_SCALING, + true, true); +} + +static void mtk_plane_atomic_disable(struct drm_plane *plane, + struct drm_atomic_state *state) +{ + struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, + plane); + struct mtk_plane_state *mtk_plane_state = to_mtk_plane_state(new_state); + struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state, + plane); + + mtk_plane_state->pending.enable = false; + wmb(); /* Make sure the above parameter is set before update */ + mtk_plane_state->pending.dirty = true; + + if (old_state && old_state->crtc) + mtk_crtc_plane_disable(old_state->crtc, plane); +} + +static void mtk_plane_atomic_update(struct drm_plane *plane, + struct drm_atomic_state *state) +{ + struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, + plane); + struct mtk_plane_state *mtk_plane_state = to_mtk_plane_state(new_state); + + if (!new_state->crtc || WARN_ON(!new_state->fb)) + return; + + if (!new_state->visible) { + mtk_plane_atomic_disable(plane, state); + return; + } + + mtk_plane_update_new_state(new_state, mtk_plane_state); + wmb(); /* Make sure the above parameters are set before update */ + mtk_plane_state->pending.dirty = true; +} + +static const struct drm_plane_helper_funcs mtk_plane_helper_funcs = { + .atomic_check = mtk_plane_atomic_check, + .atomic_update = mtk_plane_atomic_update, + .atomic_disable = mtk_plane_atomic_disable, + .atomic_async_update = mtk_plane_atomic_async_update, + .atomic_async_check = mtk_plane_atomic_async_check, +}; + +int mtk_plane_init(struct drm_device *dev, struct drm_plane *plane, + unsigned long possible_crtcs, enum drm_plane_type type, + unsigned int supported_rotations, const u32 blend_modes, + const u32 *formats, size_t num_formats, + bool supports_afbc, unsigned int plane_idx) +{ + int err; + + if (!formats || !num_formats) { + DRM_ERROR("no formats for plane\n"); + return -EINVAL; + } + + err = drm_universal_plane_init(dev, plane, possible_crtcs, + &mtk_plane_funcs, formats, + num_formats, + supports_afbc ? modifiers : NULL, + type, NULL); + if (err) { + DRM_ERROR("failed to initialize plane\n"); + return err; + } + + /* + * The hardware does not support repositioning planes by muxing: their + * Z-position is infact fixed and the only way to change the actual + * order is to swap the contents of the entire register set of one + * overlay with another, which may be more expensive than desired. + * + * With no repositioning, the caller of this function guarantees that + * the plane_idx is correct. This means that, for example, the PRIMARY + * plane fed to this function will always have plane_idx zero. + */ + err = drm_plane_create_zpos_immutable_property(plane, plane_idx); + if (err) { + DRM_ERROR("Failed to create zpos property for plane %u\n", plane_idx); + return err; + } + + if (supported_rotations) { + err = drm_plane_create_rotation_property(plane, + DRM_MODE_ROTATE_0, + supported_rotations); + if (err) + DRM_INFO("Create rotation property failed\n"); + } + + err = drm_plane_create_alpha_property(plane); + if (err) + DRM_ERROR("failed to create property: alpha\n"); + + if (blend_modes) { + err = drm_plane_create_blend_mode_property(plane, blend_modes); + if (err) + DRM_ERROR("failed to create property: blend_mode\n"); + } + + drm_plane_helper_add(plane, &mtk_plane_helper_funcs); + + return 0; +} diff --git a/drivers/gpu/drm/mediatek/mtk_plane.h b/drivers/gpu/drm/mediatek/mtk_plane.h new file mode 100644 index 000000000000..95c5fa5295d8 --- /dev/null +++ b/drivers/gpu/drm/mediatek/mtk_plane.h @@ -0,0 +1,54 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2015 MediaTek Inc. + * Author: CK Hu <ck.hu@mediatek.com> + */ + +#ifndef _MTK_PLANE_H_ +#define _MTK_PLANE_H_ + +#include <drm/drm_crtc.h> +#include <linux/types.h> + +#define AFBC_DATA_BLOCK_WIDTH 32 +#define AFBC_DATA_BLOCK_HEIGHT 8 +#define AFBC_HEADER_BLOCK_SIZE 16 +#define AFBC_HEADER_ALIGNMENT 1024 + +struct mtk_plane_pending_state { + bool config; + bool enable; + dma_addr_t addr; + dma_addr_t hdr_addr; + unsigned int pitch; + unsigned int hdr_pitch; + unsigned int format; + unsigned long long modifier; + unsigned int x; + unsigned int y; + unsigned int width; + unsigned int height; + unsigned int rotation; + bool dirty; + bool async_dirty; + bool async_config; + enum drm_color_encoding color_encoding; +}; + +struct mtk_plane_state { + struct drm_plane_state base; + struct mtk_plane_pending_state pending; +}; + +static inline struct mtk_plane_state * +to_mtk_plane_state(struct drm_plane_state *state) +{ + return container_of(state, struct mtk_plane_state, base); +} + +int mtk_plane_init(struct drm_device *dev, struct drm_plane *plane, + unsigned long possible_crtcs, enum drm_plane_type type, + unsigned int supported_rotations, const u32 blend_modes, + const u32 *formats, size_t num_formats, + bool supports_afbc, unsigned int plane_idx); +#endif |
