summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/imx
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/imx')
-rw-r--r--drivers/gpu/drm/imx/Kconfig6
-rw-r--r--drivers/gpu/drm/imx/Makefile6
-rw-r--r--drivers/gpu/drm/imx/dc/Kconfig13
-rw-r--r--drivers/gpu/drm/imx/dc/Makefile7
-rw-r--r--drivers/gpu/drm/imx/dc/dc-cf.c172
-rw-r--r--drivers/gpu/drm/imx/dc/dc-crtc.c555
-rw-r--r--drivers/gpu/drm/imx/dc/dc-de.c177
-rw-r--r--drivers/gpu/drm/imx/dc/dc-de.h59
-rw-r--r--drivers/gpu/drm/imx/dc/dc-drv.c293
-rw-r--r--drivers/gpu/drm/imx/dc/dc-drv.h102
-rw-r--r--drivers/gpu/drm/imx/dc/dc-ed.c288
-rw-r--r--drivers/gpu/drm/imx/dc/dc-fg.c376
-rw-r--r--drivers/gpu/drm/imx/dc/dc-fl.c185
-rw-r--r--drivers/gpu/drm/imx/dc/dc-fu.c258
-rw-r--r--drivers/gpu/drm/imx/dc/dc-fu.h129
-rw-r--r--drivers/gpu/drm/imx/dc/dc-fw.c222
-rw-r--r--drivers/gpu/drm/imx/dc/dc-ic.c282
-rw-r--r--drivers/gpu/drm/imx/dc/dc-kms.c143
-rw-r--r--drivers/gpu/drm/imx/dc/dc-kms.h131
-rw-r--r--drivers/gpu/drm/imx/dc/dc-lb.c325
-rw-r--r--drivers/gpu/drm/imx/dc/dc-pe.c158
-rw-r--r--drivers/gpu/drm/imx/dc/dc-pe.h101
-rw-r--r--drivers/gpu/drm/imx/dc/dc-plane.c224
-rw-r--r--drivers/gpu/drm/imx/dc/dc-tc.c141
-rw-r--r--drivers/gpu/drm/imx/dcss/Kconfig13
-rw-r--r--drivers/gpu/drm/imx/dcss/Makefile6
-rw-r--r--drivers/gpu/drm/imx/dcss/dcss-blkctl.c61
-rw-r--r--drivers/gpu/drm/imx/dcss/dcss-crtc.c222
-rw-r--r--drivers/gpu/drm/imx/dcss/dcss-ctxld.c416
-rw-r--r--drivers/gpu/drm/imx/dcss/dcss-dev.c321
-rw-r--r--drivers/gpu/drm/imx/dcss/dcss-dev.h178
-rw-r--r--drivers/gpu/drm/imx/dcss/dcss-dpr.c547
-rw-r--r--drivers/gpu/drm/imx/dcss/dcss-drv.c128
-rw-r--r--drivers/gpu/drm/imx/dcss/dcss-dtg.c391
-rw-r--r--drivers/gpu/drm/imx/dcss/dcss-kms.c183
-rw-r--r--drivers/gpu/drm/imx/dcss/dcss-kms.h45
-rw-r--r--drivers/gpu/drm/imx/dcss/dcss-plane.c425
-rw-r--r--drivers/gpu/drm/imx/dcss/dcss-scaler.c840
-rw-r--r--drivers/gpu/drm/imx/dcss/dcss-ss.c174
-rw-r--r--drivers/gpu/drm/imx/ipuv3/Kconfig49
-rw-r--r--drivers/gpu/drm/imx/ipuv3/Makefile11
-rw-r--r--drivers/gpu/drm/imx/ipuv3/dw_hdmi-imx.c280
-rw-r--r--drivers/gpu/drm/imx/ipuv3/imx-drm-core.c377
-rw-r--r--drivers/gpu/drm/imx/ipuv3/imx-drm.h31
-rw-r--r--drivers/gpu/drm/imx/ipuv3/imx-ldb.c646
-rw-r--r--drivers/gpu/drm/imx/ipuv3/imx-tve.c679
-rw-r--r--drivers/gpu/drm/imx/ipuv3/ipuv3-crtc.c453
-rw-r--r--drivers/gpu/drm/imx/ipuv3/ipuv3-plane.c957
-rw-r--r--drivers/gpu/drm/imx/ipuv3/ipuv3-plane.h50
-rw-r--r--drivers/gpu/drm/imx/ipuv3/parallel-display.c288
-rw-r--r--drivers/gpu/drm/imx/lcdc/Kconfig10
-rw-r--r--drivers/gpu/drm/imx/lcdc/Makefile1
-rw-r--r--drivers/gpu/drm/imx/lcdc/imx-lcdc.c537
53 files changed, 12672 insertions, 0 deletions
diff --git a/drivers/gpu/drm/imx/Kconfig b/drivers/gpu/drm/imx/Kconfig
new file mode 100644
index 000000000000..3e8c6edbc17c
--- /dev/null
+++ b/drivers/gpu/drm/imx/Kconfig
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+source "drivers/gpu/drm/imx/dc/Kconfig"
+source "drivers/gpu/drm/imx/dcss/Kconfig"
+source "drivers/gpu/drm/imx/ipuv3/Kconfig"
+source "drivers/gpu/drm/imx/lcdc/Kconfig"
diff --git a/drivers/gpu/drm/imx/Makefile b/drivers/gpu/drm/imx/Makefile
new file mode 100644
index 000000000000..c7b317640d71
--- /dev/null
+++ b/drivers/gpu/drm/imx/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_DRM_IMX8_DC) += dc/
+obj-$(CONFIG_DRM_IMX_DCSS) += dcss/
+obj-$(CONFIG_DRM_IMX) += ipuv3/
+obj-$(CONFIG_DRM_IMX_LCDC) += lcdc/
diff --git a/drivers/gpu/drm/imx/dc/Kconfig b/drivers/gpu/drm/imx/dc/Kconfig
new file mode 100644
index 000000000000..415993207f2e
--- /dev/null
+++ b/drivers/gpu/drm/imx/dc/Kconfig
@@ -0,0 +1,13 @@
+config DRM_IMX8_DC
+ tristate "Freescale i.MX8 Display Controller Graphics"
+ depends on DRM && COMMON_CLK && OF && (ARCH_MXC || COMPILE_TEST)
+ select DRM_CLIENT_SELECTION
+ select DRM_GEM_DMA_HELPER
+ select DRM_KMS_HELPER
+ select DRM_DISPLAY_HELPER
+ select DRM_BRIDGE_CONNECTOR
+ select GENERIC_IRQ_CHIP
+ select REGMAP
+ select REGMAP_MMIO
+ help
+ enable Freescale i.MX8 Display Controller(DC) graphics support
diff --git a/drivers/gpu/drm/imx/dc/Makefile b/drivers/gpu/drm/imx/dc/Makefile
new file mode 100644
index 000000000000..b9d33c074984
--- /dev/null
+++ b/drivers/gpu/drm/imx/dc/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0
+
+imx8-dc-drm-objs := dc-cf.o dc-crtc.o dc-de.o dc-drv.o dc-ed.o dc-fg.o dc-fl.o \
+ dc-fu.o dc-fw.o dc-ic.o dc-kms.o dc-lb.o dc-pe.o \
+ dc-plane.o dc-tc.o
+
+obj-$(CONFIG_DRM_IMX8_DC) += imx8-dc-drm.o
diff --git a/drivers/gpu/drm/imx/dc/dc-cf.c b/drivers/gpu/drm/imx/dc/dc-cf.c
new file mode 100644
index 000000000000..2f077161e912
--- /dev/null
+++ b/drivers/gpu/drm/imx/dc/dc-cf.c
@@ -0,0 +1,172 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2024 NXP
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/component.h>
+#include <linux/ioport.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include "dc-drv.h"
+#include "dc-pe.h"
+
+#define STATICCONTROL 0x8
+
+#define FRAMEDIMENSIONS 0xc
+#define HEIGHT(x) FIELD_PREP(GENMASK(29, 16), ((x) - 1))
+#define WIDTH(x) FIELD_PREP(GENMASK(13, 0), ((x) - 1))
+
+#define CONSTANTCOLOR 0x10
+#define BLUE(x) FIELD_PREP(GENMASK(15, 8), (x))
+
+static const struct dc_subdev_info dc_cf_info[] = {
+ { .reg_start = 0x56180960, .id = 0, },
+ { .reg_start = 0x561809e0, .id = 1, },
+ { .reg_start = 0x561809a0, .id = 4, },
+ { .reg_start = 0x56180a20, .id = 5, },
+};
+
+static const struct regmap_range dc_cf_regmap_ranges[] = {
+ regmap_reg_range(STATICCONTROL, CONSTANTCOLOR),
+};
+
+static const struct regmap_access_table dc_cf_regmap_access_table = {
+ .yes_ranges = dc_cf_regmap_ranges,
+ .n_yes_ranges = ARRAY_SIZE(dc_cf_regmap_ranges),
+};
+
+static const struct regmap_config dc_cf_cfg_regmap_config = {
+ .name = "cfg",
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .fast_io = true,
+ .wr_table = &dc_cf_regmap_access_table,
+ .rd_table = &dc_cf_regmap_access_table,
+ .max_register = CONSTANTCOLOR,
+};
+
+static inline void dc_cf_enable_shden(struct dc_cf *cf)
+{
+ regmap_write(cf->reg_cfg, STATICCONTROL, SHDEN);
+}
+
+enum dc_link_id dc_cf_get_link_id(struct dc_cf *cf)
+{
+ return cf->link;
+}
+
+void dc_cf_framedimensions(struct dc_cf *cf, unsigned int w,
+ unsigned int h)
+{
+ regmap_write(cf->reg_cfg, FRAMEDIMENSIONS, WIDTH(w) | HEIGHT(h));
+}
+
+void dc_cf_constantcolor_black(struct dc_cf *cf)
+{
+ regmap_write(cf->reg_cfg, CONSTANTCOLOR, 0);
+}
+
+void dc_cf_constantcolor_blue(struct dc_cf *cf)
+{
+ regmap_write(cf->reg_cfg, CONSTANTCOLOR, BLUE(0xff));
+}
+
+void dc_cf_init(struct dc_cf *cf)
+{
+ dc_cf_enable_shden(cf);
+}
+
+static int dc_cf_bind(struct device *dev, struct device *master, void *data)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct dc_drm_device *dc_drm = data;
+ struct resource *res_pec;
+ void __iomem *base_cfg;
+ struct dc_cf *cf;
+ int id;
+
+ cf = devm_kzalloc(dev, sizeof(*cf), GFP_KERNEL);
+ if (!cf)
+ return -ENOMEM;
+
+ res_pec = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ base_cfg = devm_platform_ioremap_resource_byname(pdev, "cfg");
+ if (IS_ERR(base_cfg))
+ return PTR_ERR(base_cfg);
+
+ cf->reg_cfg = devm_regmap_init_mmio(dev, base_cfg,
+ &dc_cf_cfg_regmap_config);
+ if (IS_ERR(cf->reg_cfg))
+ return PTR_ERR(cf->reg_cfg);
+
+ id = dc_subdev_get_id(dc_cf_info, ARRAY_SIZE(dc_cf_info), res_pec);
+ if (id < 0) {
+ dev_err(dev, "failed to get instance number: %d\n", id);
+ return id;
+ }
+
+ switch (id) {
+ case 0:
+ cf->link = LINK_ID_CONSTFRAME0;
+ dc_drm->cf_cont[0] = cf;
+ break;
+ case 1:
+ cf->link = LINK_ID_CONSTFRAME1;
+ dc_drm->cf_cont[1] = cf;
+ break;
+ case 4:
+ cf->link = LINK_ID_CONSTFRAME4;
+ dc_drm->cf_safe[0] = cf;
+ break;
+ case 5:
+ cf->link = LINK_ID_CONSTFRAME5;
+ dc_drm->cf_safe[1] = cf;
+ break;
+ }
+
+ return 0;
+}
+
+static const struct component_ops dc_cf_ops = {
+ .bind = dc_cf_bind,
+};
+
+static int dc_cf_probe(struct platform_device *pdev)
+{
+ int ret;
+
+ ret = component_add(&pdev->dev, &dc_cf_ops);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "failed to add component\n");
+
+ return 0;
+}
+
+static void dc_cf_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &dc_cf_ops);
+}
+
+static const struct of_device_id dc_cf_dt_ids[] = {
+ { .compatible = "fsl,imx8qxp-dc-constframe" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, dc_cf_dt_ids);
+
+struct platform_driver dc_cf_driver = {
+ .probe = dc_cf_probe,
+ .remove = dc_cf_remove,
+ .driver = {
+ .name = "imx8-dc-constframe",
+ .suppress_bind_attrs = true,
+ .of_match_table = dc_cf_dt_ids,
+ },
+};
diff --git a/drivers/gpu/drm/imx/dc/dc-crtc.c b/drivers/gpu/drm/imx/dc/dc-crtc.c
new file mode 100644
index 000000000000..31d3a982deaf
--- /dev/null
+++ b/drivers/gpu/drm/imx/dc/dc-crtc.c
@@ -0,0 +1,555 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2024 NXP
+ */
+
+#include <linux/completion.h>
+#include <linux/container_of.h>
+#include <linux/interrupt.h>
+#include <linux/irqreturn.h>
+#include <linux/pm_runtime.h>
+#include <linux/spinlock.h>
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_atomic_state_helper.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_device.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_modeset_helper_vtables.h>
+#include <drm/drm_plane.h>
+#include <drm/drm_print.h>
+#include <drm/drm_vblank.h>
+
+#include "dc-de.h"
+#include "dc-drv.h"
+#include "dc-kms.h"
+#include "dc-pe.h"
+
+#define dc_crtc_dbg(crtc, fmt, ...) \
+do { \
+ struct drm_crtc *_crtc = (crtc); \
+ drm_dbg_kms(_crtc->dev, "[CRTC:%d:%s] " fmt, \
+ _crtc->base.id, _crtc->name, ##__VA_ARGS__); \
+} while (0)
+
+#define dc_crtc_err(crtc, fmt, ...) \
+do { \
+ struct drm_crtc *_crtc = (crtc); \
+ drm_err(_crtc->dev, "[CRTC:%d:%s] " fmt, \
+ _crtc->base.id, _crtc->name, ##__VA_ARGS__); \
+} while (0)
+
+#define DC_CRTC_WAIT_FOR_COMPLETION_TIMEOUT(c) \
+do { \
+ unsigned long ret; \
+ ret = wait_for_completion_timeout(&dc_crtc->c, HZ); \
+ if (ret == 0) \
+ dc_crtc_err(crtc, "%s: wait for " #c " timeout\n", \
+ __func__); \
+} while (0)
+
+#define DC_CRTC_CHECK_FRAMEGEN_FIFO(fg) \
+do { \
+ struct dc_fg *_fg = (fg); \
+ if (dc_fg_secondary_requests_to_read_empty_fifo(_fg)) { \
+ dc_fg_secondary_clear_channel_status(_fg); \
+ dc_crtc_err(crtc, "%s: FrameGen FIFO empty\n", \
+ __func__); \
+ } \
+} while (0)
+
+#define DC_CRTC_WAIT_FOR_FRAMEGEN_SECONDARY_SYNCUP(fg) \
+do { \
+ if (dc_fg_wait_for_secondary_syncup(fg)) \
+ dc_crtc_err(crtc, \
+ "%s: FrameGen secondary channel isn't syncup\n",\
+ __func__); \
+} while (0)
+
+static inline struct dc_crtc *to_dc_crtc(struct drm_crtc *crtc)
+{
+ return container_of(crtc, struct dc_crtc, base);
+}
+
+static u32 dc_crtc_get_vblank_counter(struct drm_crtc *crtc)
+{
+ struct dc_crtc *dc_crtc = to_dc_crtc(crtc);
+
+ return dc_fg_get_frame_index(dc_crtc->fg);
+}
+
+static int dc_crtc_enable_vblank(struct drm_crtc *crtc)
+{
+ struct dc_crtc *dc_crtc = to_dc_crtc(crtc);
+
+ enable_irq(dc_crtc->irq_dec_framecomplete);
+
+ return 0;
+}
+
+static void dc_crtc_disable_vblank(struct drm_crtc *crtc)
+{
+ struct dc_crtc *dc_crtc = to_dc_crtc(crtc);
+
+ /* nosync due to atomic context */
+ disable_irq_nosync(dc_crtc->irq_dec_framecomplete);
+}
+
+static const struct drm_crtc_funcs dc_crtc_funcs = {
+ .reset = drm_atomic_helper_crtc_reset,
+ .destroy = drm_crtc_cleanup,
+ .set_config = drm_atomic_helper_set_config,
+ .page_flip = drm_atomic_helper_page_flip,
+ .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+ .get_vblank_counter = dc_crtc_get_vblank_counter,
+ .enable_vblank = dc_crtc_enable_vblank,
+ .disable_vblank = dc_crtc_disable_vblank,
+ .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
+};
+
+static void dc_crtc_queue_state_event(struct drm_crtc_state *crtc_state)
+{
+ struct drm_crtc *crtc = crtc_state->crtc;
+ struct dc_crtc *dc_crtc = to_dc_crtc(crtc);
+
+ spin_lock_irq(&crtc->dev->event_lock);
+ if (crtc_state->event) {
+ WARN_ON(drm_crtc_vblank_get(crtc));
+ WARN_ON(dc_crtc->event);
+ dc_crtc->event = crtc_state->event;
+ crtc_state->event = NULL;
+ }
+ spin_unlock_irq(&crtc->dev->event_lock);
+}
+
+static inline enum drm_mode_status
+dc_crtc_check_clock(struct dc_crtc *dc_crtc, int clk_khz)
+{
+ return dc_fg_check_clock(dc_crtc->fg, clk_khz);
+}
+
+static enum drm_mode_status
+dc_crtc_mode_valid(struct drm_crtc *crtc, const struct drm_display_mode *mode)
+{
+ struct dc_crtc *dc_crtc = to_dc_crtc(crtc);
+ enum drm_mode_status status;
+
+ status = dc_crtc_check_clock(dc_crtc, mode->clock);
+ if (status != MODE_OK)
+ return status;
+
+ if (mode->crtc_clock > DC_FRAMEGEN_MAX_CLOCK_KHZ)
+ return MODE_CLOCK_HIGH;
+
+ return MODE_OK;
+}
+
+static int
+dc_crtc_atomic_check(struct drm_crtc *crtc, struct drm_atomic_state *state)
+{
+ struct drm_crtc_state *new_crtc_state =
+ drm_atomic_get_new_crtc_state(state, crtc);
+ struct drm_display_mode *adj = &new_crtc_state->adjusted_mode;
+ struct dc_crtc *dc_crtc = to_dc_crtc(crtc);
+ enum drm_mode_status status;
+
+ status = dc_crtc_check_clock(dc_crtc, adj->clock);
+ if (status != MODE_OK)
+ return -EINVAL;
+
+ return 0;
+}
+
+static void
+dc_crtc_atomic_begin(struct drm_crtc *crtc, struct drm_atomic_state *state)
+{
+ struct drm_crtc_state *new_crtc_state =
+ drm_atomic_get_new_crtc_state(state, crtc);
+ struct dc_drm_device *dc_drm = to_dc_drm_device(crtc->dev);
+ int idx, ret;
+
+ if (!drm_atomic_crtc_needs_modeset(new_crtc_state) ||
+ !new_crtc_state->active)
+ return;
+
+ if (!drm_dev_enter(crtc->dev, &idx))
+ return;
+
+ /* request pixel engine power-on when CRTC starts to be active */
+ ret = pm_runtime_resume_and_get(dc_drm->pe->dev);
+ if (ret)
+ dc_crtc_err(crtc, "failed to get DC pixel engine RPM: %d\n",
+ ret);
+
+ drm_dev_exit(idx);
+}
+
+static void
+dc_crtc_atomic_flush(struct drm_crtc *crtc, struct drm_atomic_state *state)
+{
+ struct drm_crtc_state *old_crtc_state =
+ drm_atomic_get_old_crtc_state(state, crtc);
+ struct drm_crtc_state *new_crtc_state =
+ drm_atomic_get_new_crtc_state(state, crtc);
+ struct dc_crtc *dc_crtc = to_dc_crtc(crtc);
+ int idx;
+
+ if (drm_atomic_crtc_needs_modeset(new_crtc_state) ||
+ (!old_crtc_state->active && !new_crtc_state->active))
+ return;
+
+ if (!drm_dev_enter(crtc->dev, &idx))
+ goto out;
+
+ enable_irq(dc_crtc->irq_ed_cont_shdload);
+
+ /* flush plane update out to display */
+ dc_ed_pec_sync_trigger(dc_crtc->ed_cont);
+
+ DC_CRTC_WAIT_FOR_COMPLETION_TIMEOUT(ed_cont_shdload_done);
+
+ disable_irq(dc_crtc->irq_ed_cont_shdload);
+
+ DC_CRTC_CHECK_FRAMEGEN_FIFO(dc_crtc->fg);
+
+ drm_dev_exit(idx);
+
+out:
+ dc_crtc_queue_state_event(new_crtc_state);
+}
+
+static void
+dc_crtc_atomic_enable(struct drm_crtc *crtc, struct drm_atomic_state *state)
+{
+ struct drm_crtc_state *new_crtc_state =
+ drm_atomic_get_new_crtc_state(state, crtc);
+ struct drm_display_mode *adj = &new_crtc_state->adjusted_mode;
+ struct dc_crtc *dc_crtc = to_dc_crtc(crtc);
+ enum dc_link_id cf_link;
+ int idx, ret;
+
+ dc_crtc_dbg(crtc, "mode " DRM_MODE_FMT "\n", DRM_MODE_ARG(adj));
+
+ drm_crtc_vblank_on(crtc);
+
+ if (!drm_dev_enter(crtc->dev, &idx))
+ goto out;
+
+ /* request display engine power-on when CRTC is enabled */
+ ret = pm_runtime_resume_and_get(dc_crtc->de->dev);
+ if (ret < 0)
+ dc_crtc_err(crtc, "failed to get DC display engine RPM: %d\n",
+ ret);
+
+ enable_irq(dc_crtc->irq_dec_shdload);
+ enable_irq(dc_crtc->irq_ed_cont_shdload);
+ enable_irq(dc_crtc->irq_ed_safe_shdload);
+
+ dc_fg_cfg_videomode(dc_crtc->fg, adj);
+
+ dc_cf_framedimensions(dc_crtc->cf_cont,
+ adj->crtc_hdisplay, adj->crtc_vdisplay);
+ dc_cf_framedimensions(dc_crtc->cf_safe,
+ adj->crtc_hdisplay, adj->crtc_vdisplay);
+
+ /* constframe in safety stream shows blue frame */
+ dc_cf_constantcolor_blue(dc_crtc->cf_safe);
+ cf_link = dc_cf_get_link_id(dc_crtc->cf_safe);
+ dc_ed_pec_src_sel(dc_crtc->ed_safe, cf_link);
+
+ /* show CRTC background if no plane is enabled */
+ if (new_crtc_state->plane_mask == 0) {
+ /* constframe in content stream shows black frame */
+ dc_cf_constantcolor_black(dc_crtc->cf_cont);
+
+ cf_link = dc_cf_get_link_id(dc_crtc->cf_cont);
+ dc_ed_pec_src_sel(dc_crtc->ed_cont, cf_link);
+ }
+
+ dc_fg_enable_clock(dc_crtc->fg);
+ dc_ed_pec_sync_trigger(dc_crtc->ed_cont);
+ dc_ed_pec_sync_trigger(dc_crtc->ed_safe);
+ dc_fg_shdtokgen(dc_crtc->fg);
+ dc_fg_enable(dc_crtc->fg);
+
+ DC_CRTC_WAIT_FOR_COMPLETION_TIMEOUT(ed_safe_shdload_done);
+ DC_CRTC_WAIT_FOR_COMPLETION_TIMEOUT(ed_cont_shdload_done);
+ DC_CRTC_WAIT_FOR_COMPLETION_TIMEOUT(dec_shdload_done);
+
+ disable_irq(dc_crtc->irq_ed_safe_shdload);
+ disable_irq(dc_crtc->irq_ed_cont_shdload);
+ disable_irq(dc_crtc->irq_dec_shdload);
+
+ DC_CRTC_WAIT_FOR_FRAMEGEN_SECONDARY_SYNCUP(dc_crtc->fg);
+
+ DC_CRTC_CHECK_FRAMEGEN_FIFO(dc_crtc->fg);
+
+ drm_dev_exit(idx);
+
+out:
+ dc_crtc_queue_state_event(new_crtc_state);
+}
+
+static void
+dc_crtc_atomic_disable(struct drm_crtc *crtc, struct drm_atomic_state *state)
+{
+ struct drm_crtc_state *new_crtc_state =
+ drm_atomic_get_new_crtc_state(state, crtc);
+ struct dc_drm_device *dc_drm = to_dc_drm_device(crtc->dev);
+ struct dc_crtc *dc_crtc = to_dc_crtc(crtc);
+ int idx, ret;
+
+ if (!drm_dev_enter(crtc->dev, &idx))
+ goto out;
+
+ enable_irq(dc_crtc->irq_dec_seqcomplete);
+ dc_fg_disable(dc_crtc->fg);
+ DC_CRTC_WAIT_FOR_COMPLETION_TIMEOUT(dec_seqcomplete_done);
+ disable_irq(dc_crtc->irq_dec_seqcomplete);
+
+ dc_fg_disable_clock(dc_crtc->fg);
+
+ /* request pixel engine power-off as plane is off too */
+ ret = pm_runtime_put(dc_drm->pe->dev);
+ if (ret)
+ dc_crtc_err(crtc, "failed to put DC pixel engine RPM: %d\n",
+ ret);
+
+ /* request display engine power-off when CRTC is disabled */
+ ret = pm_runtime_put(dc_crtc->de->dev);
+ if (ret < 0)
+ dc_crtc_err(crtc, "failed to put DC display engine RPM: %d\n",
+ ret);
+
+ drm_dev_exit(idx);
+
+out:
+ drm_crtc_vblank_off(crtc);
+
+ spin_lock_irq(&crtc->dev->event_lock);
+ if (new_crtc_state->event && !new_crtc_state->active) {
+ drm_crtc_send_vblank_event(crtc, new_crtc_state->event);
+ new_crtc_state->event = NULL;
+ }
+ spin_unlock_irq(&crtc->dev->event_lock);
+}
+
+static bool dc_crtc_get_scanout_position(struct drm_crtc *crtc,
+ bool in_vblank_irq,
+ int *vpos, int *hpos,
+ ktime_t *stime, ktime_t *etime,
+ const struct drm_display_mode *mode)
+{
+ struct dc_crtc *dc_crtc = to_dc_crtc(crtc);
+ int vdisplay = mode->crtc_vdisplay;
+ int vtotal = mode->crtc_vtotal;
+ bool reliable;
+ int line;
+ int idx;
+
+ if (stime)
+ *stime = ktime_get();
+
+ if (!drm_dev_enter(crtc->dev, &idx)) {
+ reliable = false;
+ *vpos = 0;
+ *hpos = 0;
+ goto out;
+ }
+
+ /* line index starts with 0 for the first active output line */
+ line = dc_fg_get_line_index(dc_crtc->fg);
+
+ if (line < vdisplay)
+ /* active scanout area - positive */
+ *vpos = line + 1;
+ else
+ /* inside vblank - negative */
+ *vpos = line - (vtotal - 1);
+
+ *hpos = 0;
+
+ reliable = true;
+
+ drm_dev_exit(idx);
+out:
+ if (etime)
+ *etime = ktime_get();
+
+ return reliable;
+}
+
+static const struct drm_crtc_helper_funcs dc_helper_funcs = {
+ .mode_valid = dc_crtc_mode_valid,
+ .atomic_check = dc_crtc_atomic_check,
+ .atomic_begin = dc_crtc_atomic_begin,
+ .atomic_flush = dc_crtc_atomic_flush,
+ .atomic_enable = dc_crtc_atomic_enable,
+ .atomic_disable = dc_crtc_atomic_disable,
+ .get_scanout_position = dc_crtc_get_scanout_position,
+};
+
+static irqreturn_t dc_crtc_irq_handler_dec_framecomplete(int irq, void *dev_id)
+{
+ struct dc_crtc *dc_crtc = dev_id;
+ struct drm_crtc *crtc = &dc_crtc->base;
+ unsigned long flags;
+
+ drm_crtc_handle_vblank(crtc);
+
+ spin_lock_irqsave(&crtc->dev->event_lock, flags);
+ if (dc_crtc->event) {
+ drm_crtc_send_vblank_event(crtc, dc_crtc->event);
+ dc_crtc->event = NULL;
+ drm_crtc_vblank_put(crtc);
+ }
+ spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t
+dc_crtc_irq_handler_dec_seqcomplete_done(int irq, void *dev_id)
+{
+ struct dc_crtc *dc_crtc = dev_id;
+
+ complete(&dc_crtc->dec_seqcomplete_done);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t dc_crtc_irq_handler_dec_shdload_done(int irq, void *dev_id)
+{
+ struct dc_crtc *dc_crtc = dev_id;
+
+ complete(&dc_crtc->dec_shdload_done);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t
+dc_crtc_irq_handler_ed_cont_shdload_done(int irq, void *dev_id)
+{
+ struct dc_crtc *dc_crtc = dev_id;
+
+ complete(&dc_crtc->ed_cont_shdload_done);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t
+dc_crtc_irq_handler_ed_safe_shdload_done(int irq, void *dev_id)
+{
+ struct dc_crtc *dc_crtc = dev_id;
+
+ complete(&dc_crtc->ed_safe_shdload_done);
+
+ return IRQ_HANDLED;
+}
+
+static int dc_crtc_request_irqs(struct drm_device *drm, struct dc_crtc *dc_crtc)
+{
+ struct {
+ struct device *dev;
+ unsigned int irq;
+ irqreturn_t (*irq_handler)(int irq, void *dev_id);
+ } irqs[DC_CRTC_IRQS] = {
+ {
+ dc_crtc->de->dev,
+ dc_crtc->irq_dec_framecomplete,
+ dc_crtc_irq_handler_dec_framecomplete,
+ }, {
+ dc_crtc->de->dev,
+ dc_crtc->irq_dec_seqcomplete,
+ dc_crtc_irq_handler_dec_seqcomplete_done,
+ }, {
+ dc_crtc->de->dev,
+ dc_crtc->irq_dec_shdload,
+ dc_crtc_irq_handler_dec_shdload_done,
+ }, {
+ dc_crtc->ed_cont->dev,
+ dc_crtc->irq_ed_cont_shdload,
+ dc_crtc_irq_handler_ed_cont_shdload_done,
+ }, {
+ dc_crtc->ed_safe->dev,
+ dc_crtc->irq_ed_safe_shdload,
+ dc_crtc_irq_handler_ed_safe_shdload_done,
+ },
+ };
+ int i, ret;
+
+ for (i = 0; i < DC_CRTC_IRQS; i++) {
+ struct dc_crtc_irq *irq = &dc_crtc->irqs[i];
+
+ ret = devm_request_irq(irqs[i].dev, irqs[i].irq,
+ irqs[i].irq_handler, IRQF_NO_AUTOEN,
+ dev_name(irqs[i].dev), dc_crtc);
+ if (ret) {
+ dev_err(irqs[i].dev, "failed to request irq(%u): %d\n",
+ irqs[i].irq, ret);
+ return ret;
+ }
+
+ irq->dc_crtc = dc_crtc;
+ irq->irq = irqs[i].irq;
+ }
+
+ return 0;
+}
+
+int dc_crtc_init(struct dc_drm_device *dc_drm, int crtc_index)
+{
+ struct dc_crtc *dc_crtc = &dc_drm->dc_crtc[crtc_index];
+ struct drm_device *drm = &dc_drm->base;
+ struct dc_de *de = dc_drm->de[crtc_index];
+ struct dc_pe *pe = dc_drm->pe;
+ struct dc_plane *dc_primary;
+ int ret;
+
+ dc_crtc->de = de;
+
+ init_completion(&dc_crtc->dec_seqcomplete_done);
+ init_completion(&dc_crtc->dec_shdload_done);
+ init_completion(&dc_crtc->ed_cont_shdload_done);
+ init_completion(&dc_crtc->ed_safe_shdload_done);
+
+ dc_crtc->cf_cont = pe->cf_cont[crtc_index];
+ dc_crtc->cf_safe = pe->cf_safe[crtc_index];
+ dc_crtc->ed_cont = pe->ed_cont[crtc_index];
+ dc_crtc->ed_safe = pe->ed_safe[crtc_index];
+ dc_crtc->fg = de->fg;
+
+ dc_crtc->irq_dec_framecomplete = de->irq_framecomplete;
+ dc_crtc->irq_dec_seqcomplete = de->irq_seqcomplete;
+ dc_crtc->irq_dec_shdload = de->irq_shdload;
+ dc_crtc->irq_ed_safe_shdload = dc_crtc->ed_safe->irq_shdload;
+ dc_crtc->irq_ed_cont_shdload = dc_crtc->ed_cont->irq_shdload;
+
+ dc_primary = &dc_drm->dc_primary[crtc_index];
+ ret = dc_plane_init(dc_drm, dc_primary);
+ if (ret) {
+ dev_err(de->dev, "failed to initialize primary plane: %d\n",
+ ret);
+ return ret;
+ }
+
+ drm_crtc_helper_add(&dc_crtc->base, &dc_helper_funcs);
+
+ ret = drm_crtc_init_with_planes(drm, &dc_crtc->base, &dc_primary->base,
+ NULL, &dc_crtc_funcs, NULL);
+ if (ret)
+ dev_err(de->dev, "failed to add CRTC: %d\n", ret);
+
+ return ret;
+}
+
+int dc_crtc_post_init(struct dc_drm_device *dc_drm, int crtc_index)
+{
+ struct dc_crtc *dc_crtc = &dc_drm->dc_crtc[crtc_index];
+ struct drm_device *drm = &dc_drm->base;
+
+ return dc_crtc_request_irqs(drm, dc_crtc);
+}
diff --git a/drivers/gpu/drm/imx/dc/dc-de.c b/drivers/gpu/drm/imx/dc/dc-de.c
new file mode 100644
index 000000000000..5a3125596fdf
--- /dev/null
+++ b/drivers/gpu/drm/imx/dc/dc-de.c
@@ -0,0 +1,177 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2024 NXP
+ */
+
+#include <linux/component.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+
+#include "dc-de.h"
+#include "dc-drv.h"
+
+#define POLARITYCTRL 0xc
+#define POLEN_HIGH BIT(2)
+
+static const struct dc_subdev_info dc_de_info[] = {
+ { .reg_start = 0x5618b400, .id = 0, },
+ { .reg_start = 0x5618b420, .id = 1, },
+};
+
+static const struct regmap_range dc_de_regmap_ranges[] = {
+ regmap_reg_range(POLARITYCTRL, POLARITYCTRL),
+};
+
+static const struct regmap_access_table dc_de_regmap_access_table = {
+ .yes_ranges = dc_de_regmap_ranges,
+ .n_yes_ranges = ARRAY_SIZE(dc_de_regmap_ranges),
+};
+
+static const struct regmap_config dc_de_top_regmap_config = {
+ .name = "top",
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .fast_io = true,
+ .wr_table = &dc_de_regmap_access_table,
+ .rd_table = &dc_de_regmap_access_table,
+ .max_register = POLARITYCTRL,
+};
+
+static inline void dc_dec_init(struct dc_de *de)
+{
+ regmap_write_bits(de->reg_top, POLARITYCTRL, POLARITYCTRL, POLEN_HIGH);
+}
+
+static int dc_de_bind(struct device *dev, struct device *master, void *data)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct dc_drm_device *dc_drm = data;
+ struct resource *res_top;
+ void __iomem *base_top;
+ struct dc_de *de;
+ int ret, id;
+
+ de = devm_kzalloc(dev, sizeof(*de), GFP_KERNEL);
+ if (!de)
+ return -ENOMEM;
+
+ base_top = devm_platform_get_and_ioremap_resource(pdev, 0, &res_top);
+ if (IS_ERR(base_top))
+ return PTR_ERR(base_top);
+
+ de->reg_top = devm_regmap_init_mmio(dev, base_top,
+ &dc_de_top_regmap_config);
+ if (IS_ERR(de->reg_top))
+ return PTR_ERR(de->reg_top);
+
+ de->irq_shdload = platform_get_irq_byname(pdev, "shdload");
+ if (de->irq_shdload < 0)
+ return de->irq_shdload;
+
+ de->irq_framecomplete = platform_get_irq_byname(pdev, "framecomplete");
+ if (de->irq_framecomplete < 0)
+ return de->irq_framecomplete;
+
+ de->irq_seqcomplete = platform_get_irq_byname(pdev, "seqcomplete");
+ if (de->irq_seqcomplete < 0)
+ return de->irq_seqcomplete;
+
+ de->dev = dev;
+
+ dev_set_drvdata(dev, de);
+
+ ret = devm_pm_runtime_enable(dev);
+ if (ret)
+ return ret;
+
+ id = dc_subdev_get_id(dc_de_info, ARRAY_SIZE(dc_de_info), res_top);
+ if (id < 0) {
+ dev_err(dev, "failed to get instance number: %d\n", id);
+ return id;
+ }
+
+ dc_drm->de[id] = de;
+
+ return 0;
+}
+
+/*
+ * It's possible to get the child device pointers from the child component
+ * bind callbacks, but it depends on the component helper behavior to bind
+ * the display engine component first. To avoid the dependency, post bind
+ * to get the pointers from dc_drm in a safe manner.
+ */
+void dc_de_post_bind(struct dc_drm_device *dc_drm)
+{
+ struct dc_de *de;
+ int i;
+
+ for (i = 0; i < DC_DISPLAYS; i++) {
+ de = dc_drm->de[i];
+ de->fg = dc_drm->fg[i];
+ de->tc = dc_drm->tc[i];
+ }
+}
+
+static const struct component_ops dc_de_ops = {
+ .bind = dc_de_bind,
+};
+
+static int dc_de_probe(struct platform_device *pdev)
+{
+ int ret;
+
+ ret = devm_of_platform_populate(&pdev->dev);
+ if (ret < 0)
+ return ret;
+
+ ret = component_add(&pdev->dev, &dc_de_ops);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "failed to add component\n");
+
+ return 0;
+}
+
+static void dc_de_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &dc_de_ops);
+}
+
+static int dc_de_runtime_resume(struct device *dev)
+{
+ struct dc_de *de = dev_get_drvdata(dev);
+
+ dc_dec_init(de);
+ dc_fg_init(de->fg);
+ dc_tc_init(de->tc);
+
+ return 0;
+}
+
+static const struct dev_pm_ops dc_de_pm_ops = {
+ RUNTIME_PM_OPS(NULL, dc_de_runtime_resume, NULL)
+};
+
+static const struct of_device_id dc_de_dt_ids[] = {
+ { .compatible = "fsl,imx8qxp-dc-display-engine" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, dc_de_dt_ids);
+
+struct platform_driver dc_de_driver = {
+ .probe = dc_de_probe,
+ .remove = dc_de_remove,
+ .driver = {
+ .name = "imx8-dc-display-engine",
+ .suppress_bind_attrs = true,
+ .of_match_table = dc_de_dt_ids,
+ .pm = pm_sleep_ptr(&dc_de_pm_ops),
+ },
+};
diff --git a/drivers/gpu/drm/imx/dc/dc-de.h b/drivers/gpu/drm/imx/dc/dc-de.h
new file mode 100644
index 000000000000..211f3fcc1a9a
--- /dev/null
+++ b/drivers/gpu/drm/imx/dc/dc-de.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright 2024 NXP
+ */
+
+#ifndef __DC_DISPLAY_ENGINE_H__
+#define __DC_DISPLAY_ENGINE_H__
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/regmap.h>
+#include <drm/drm_modes.h>
+
+#define DC_DISPLAYS 2
+
+#define DC_FRAMEGEN_MAX_FRAME_INDEX 0x3ffff
+#define DC_FRAMEGEN_MAX_CLOCK_KHZ 300000
+
+struct dc_fg {
+ struct device *dev;
+ struct regmap *reg;
+ struct clk *clk_disp;
+};
+
+struct dc_tc {
+ struct device *dev;
+ struct regmap *reg;
+};
+
+struct dc_de {
+ struct device *dev;
+ struct regmap *reg_top;
+ struct dc_fg *fg;
+ struct dc_tc *tc;
+ int irq_shdload;
+ int irq_framecomplete;
+ int irq_seqcomplete;
+};
+
+/* Frame Generator Unit */
+void dc_fg_cfg_videomode(struct dc_fg *fg, struct drm_display_mode *m);
+void dc_fg_enable(struct dc_fg *fg);
+void dc_fg_disable(struct dc_fg *fg);
+void dc_fg_shdtokgen(struct dc_fg *fg);
+u32 dc_fg_get_frame_index(struct dc_fg *fg);
+u32 dc_fg_get_line_index(struct dc_fg *fg);
+bool dc_fg_wait_for_frame_index_moving(struct dc_fg *fg);
+bool dc_fg_secondary_requests_to_read_empty_fifo(struct dc_fg *fg);
+void dc_fg_secondary_clear_channel_status(struct dc_fg *fg);
+int dc_fg_wait_for_secondary_syncup(struct dc_fg *fg);
+void dc_fg_enable_clock(struct dc_fg *fg);
+void dc_fg_disable_clock(struct dc_fg *fg);
+enum drm_mode_status dc_fg_check_clock(struct dc_fg *fg, int clk_khz);
+void dc_fg_init(struct dc_fg *fg);
+
+/* Timing Controller Unit */
+void dc_tc_init(struct dc_tc *tc);
+
+#endif /* __DC_DISPLAY_ENGINE_H__ */
diff --git a/drivers/gpu/drm/imx/dc/dc-drv.c b/drivers/gpu/drm/imx/dc/dc-drv.c
new file mode 100644
index 000000000000..04f021d2d6cf
--- /dev/null
+++ b/drivers/gpu/drm/imx/dc/dc-drv.c
@@ -0,0 +1,293 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2024 NXP
+ */
+
+#include <linux/clk.h>
+#include <linux/component.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+
+#include <drm/clients/drm_client_setup.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_fbdev_dma.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_gem_dma_helper.h>
+#include <drm/drm_managed.h>
+#include <drm/drm_modeset_helper.h>
+#include <drm/drm_of.h>
+
+#include "dc-de.h"
+#include "dc-drv.h"
+#include "dc-pe.h"
+
+struct dc_priv {
+ struct drm_device *drm;
+ struct clk *clk_cfg;
+};
+
+DEFINE_DRM_GEM_DMA_FOPS(dc_drm_driver_fops);
+
+static struct drm_driver dc_drm_driver = {
+ .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
+ DRM_GEM_DMA_DRIVER_OPS,
+ DRM_FBDEV_DMA_DRIVER_OPS,
+ .fops = &dc_drm_driver_fops,
+ .name = "imx8-dc",
+ .desc = "i.MX8 DC DRM graphics",
+ .major = 1,
+ .minor = 0,
+ .patchlevel = 0,
+};
+
+static void
+dc_add_components(struct device *dev, struct component_match **matchptr)
+{
+ struct device_node *child, *grandchild;
+
+ for_each_available_child_of_node(dev->of_node, child) {
+ /* The interrupt controller is not a component. */
+ if (of_device_is_compatible(child, "fsl,imx8qxp-dc-intc"))
+ continue;
+
+ drm_of_component_match_add(dev, matchptr, component_compare_of,
+ child);
+
+ for_each_available_child_of_node(child, grandchild)
+ drm_of_component_match_add(dev, matchptr,
+ component_compare_of,
+ grandchild);
+ }
+}
+
+static int dc_drm_component_bind_all(struct dc_drm_device *dc_drm)
+{
+ struct drm_device *drm = &dc_drm->base;
+ int ret;
+
+ ret = component_bind_all(drm->dev, dc_drm);
+ if (ret)
+ return ret;
+
+ dc_de_post_bind(dc_drm);
+ dc_pe_post_bind(dc_drm);
+
+ return 0;
+}
+
+static void dc_drm_component_unbind_all(void *ptr)
+{
+ struct dc_drm_device *dc_drm = ptr;
+ struct drm_device *drm = &dc_drm->base;
+
+ component_unbind_all(drm->dev, dc_drm);
+}
+
+static int dc_drm_bind(struct device *dev)
+{
+ struct dc_priv *priv = dev_get_drvdata(dev);
+ struct dc_drm_device *dc_drm;
+ struct drm_device *drm;
+ int ret;
+
+ dc_drm = devm_drm_dev_alloc(dev, &dc_drm_driver, struct dc_drm_device,
+ base);
+ if (IS_ERR(dc_drm))
+ return PTR_ERR(dc_drm);
+
+ drm = &dc_drm->base;
+
+ ret = dc_drm_component_bind_all(dc_drm);
+ if (ret)
+ return ret;
+
+ ret = devm_add_action_or_reset(dev, dc_drm_component_unbind_all,
+ dc_drm);
+ if (ret)
+ return ret;
+
+ ret = dc_kms_init(dc_drm);
+ if (ret)
+ return ret;
+
+ ret = drm_dev_register(drm, 0);
+ if (ret) {
+ dev_err(dev, "failed to register drm device: %d\n", ret);
+ goto err;
+ }
+
+ drm_client_setup_with_fourcc(drm, DRM_FORMAT_XRGB8888);
+
+ priv->drm = drm;
+
+ return 0;
+
+err:
+ dc_kms_uninit(dc_drm);
+
+ return ret;
+}
+
+static void dc_drm_unbind(struct device *dev)
+{
+ struct dc_priv *priv = dev_get_drvdata(dev);
+ struct dc_drm_device *dc_drm = to_dc_drm_device(priv->drm);
+ struct drm_device *drm = &dc_drm->base;
+
+ priv->drm = NULL;
+ drm_dev_unplug(drm);
+ dc_kms_uninit(dc_drm);
+ drm_atomic_helper_shutdown(drm);
+}
+
+static const struct component_master_ops dc_drm_ops = {
+ .bind = dc_drm_bind,
+ .unbind = dc_drm_unbind,
+};
+
+static int dc_probe(struct platform_device *pdev)
+{
+ struct component_match *match = NULL;
+ struct dc_priv *priv;
+ int ret;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->clk_cfg = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(priv->clk_cfg))
+ return dev_err_probe(&pdev->dev, PTR_ERR(priv->clk_cfg),
+ "failed to get cfg clock\n");
+
+ dev_set_drvdata(&pdev->dev, priv);
+
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
+
+ ret = devm_pm_runtime_enable(&pdev->dev);
+ if (ret)
+ return ret;
+
+ ret = devm_of_platform_populate(&pdev->dev);
+ if (ret)
+ return ret;
+
+ dc_add_components(&pdev->dev, &match);
+
+ ret = component_master_add_with_match(&pdev->dev, &dc_drm_ops, match);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "failed to add component master\n");
+
+ return 0;
+}
+
+static void dc_remove(struct platform_device *pdev)
+{
+ component_master_del(&pdev->dev, &dc_drm_ops);
+}
+
+static int dc_runtime_suspend(struct device *dev)
+{
+ struct dc_priv *priv = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(priv->clk_cfg);
+
+ return 0;
+}
+
+static int dc_runtime_resume(struct device *dev)
+{
+ struct dc_priv *priv = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_prepare_enable(priv->clk_cfg);
+ if (ret)
+ dev_err(dev, "failed to enable cfg clock: %d\n", ret);
+
+ return ret;
+}
+
+static int dc_suspend(struct device *dev)
+{
+ struct dc_priv *priv = dev_get_drvdata(dev);
+
+ return drm_mode_config_helper_suspend(priv->drm);
+}
+
+static int dc_resume(struct device *dev)
+{
+ struct dc_priv *priv = dev_get_drvdata(dev);
+
+ return drm_mode_config_helper_resume(priv->drm);
+}
+
+static void dc_shutdown(struct platform_device *pdev)
+{
+ struct dc_priv *priv = dev_get_drvdata(&pdev->dev);
+
+ drm_atomic_helper_shutdown(priv->drm);
+}
+
+static const struct dev_pm_ops dc_pm_ops = {
+ RUNTIME_PM_OPS(dc_runtime_suspend, dc_runtime_resume, NULL)
+ SYSTEM_SLEEP_PM_OPS(dc_suspend, dc_resume)
+};
+
+static const struct of_device_id dc_dt_ids[] = {
+ { .compatible = "fsl,imx8qxp-dc", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, dc_dt_ids);
+
+static struct platform_driver dc_driver = {
+ .probe = dc_probe,
+ .remove = dc_remove,
+ .shutdown = dc_shutdown,
+ .driver = {
+ .name = "imx8-dc",
+ .of_match_table = dc_dt_ids,
+ .pm = pm_sleep_ptr(&dc_pm_ops),
+ },
+};
+
+static struct platform_driver * const dc_drivers[] = {
+ &dc_cf_driver,
+ &dc_de_driver,
+ &dc_ed_driver,
+ &dc_fg_driver,
+ &dc_fl_driver,
+ &dc_fw_driver,
+ &dc_ic_driver,
+ &dc_lb_driver,
+ &dc_pe_driver,
+ &dc_tc_driver,
+ &dc_driver,
+};
+
+static int __init dc_drm_init(void)
+{
+ return platform_register_drivers(dc_drivers, ARRAY_SIZE(dc_drivers));
+}
+
+static void __exit dc_drm_exit(void)
+{
+ platform_unregister_drivers(dc_drivers, ARRAY_SIZE(dc_drivers));
+}
+
+module_init(dc_drm_init);
+module_exit(dc_drm_exit);
+
+MODULE_DESCRIPTION("i.MX8 Display Controller DRM Driver");
+MODULE_AUTHOR("Liu Ying <victor.liu@nxp.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/imx/dc/dc-drv.h b/drivers/gpu/drm/imx/dc/dc-drv.h
new file mode 100644
index 000000000000..eb61b8c76269
--- /dev/null
+++ b/drivers/gpu/drm/imx/dc/dc-drv.h
@@ -0,0 +1,102 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright 2024 NXP
+ */
+
+#ifndef __DC_DRV_H__
+#define __DC_DRV_H__
+
+#include <linux/container_of.h>
+#include <linux/ioport.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+
+#include <drm/drm_device.h>
+#include <drm/drm_encoder.h>
+
+#include "dc-de.h"
+#include "dc-kms.h"
+#include "dc-pe.h"
+
+/**
+ * struct dc_drm_device - DC specific drm_device
+ */
+struct dc_drm_device {
+ /** @base: base drm_device structure */
+ struct drm_device base;
+ /** @dc_crtc: DC specific CRTC list */
+ struct dc_crtc dc_crtc[DC_DISPLAYS];
+ /** @dc_primary: DC specific primary plane list */
+ struct dc_plane dc_primary[DC_DISPLAYS];
+ /** @encoder: encoder list */
+ struct drm_encoder encoder[DC_DISPLAYS];
+ /** @cf_safe: constframe list(safety stream) */
+ struct dc_cf *cf_safe[DC_DISPLAYS];
+ /** @cf_cont: constframe list(content stream) */
+ struct dc_cf *cf_cont[DC_DISPLAYS];
+ /** @de: display engine list */
+ struct dc_de *de[DC_DISPLAYS];
+ /** @ed_safe: extdst list(safety stream) */
+ struct dc_ed *ed_safe[DC_DISPLAYS];
+ /** @ed_cont: extdst list(content stream) */
+ struct dc_ed *ed_cont[DC_DISPLAYS];
+ /** @fg: framegen list */
+ struct dc_fg *fg[DC_DISPLAYS];
+ /** @fu_disp: fetchunit list(used by display engine) */
+ struct dc_fu *fu_disp[DC_DISP_FU_CNT];
+ /** @lb: layerblend list */
+ struct dc_lb *lb[DC_LB_CNT];
+ /** @pe: pixel engine */
+ struct dc_pe *pe;
+ /** @tc: tcon list */
+ struct dc_tc *tc[DC_DISPLAYS];
+};
+
+struct dc_subdev_info {
+ resource_size_t reg_start;
+ int id;
+};
+
+static inline struct dc_drm_device *to_dc_drm_device(struct drm_device *drm)
+{
+ return container_of(drm, struct dc_drm_device, base);
+}
+
+int dc_crtc_init(struct dc_drm_device *dc_drm, int crtc_index);
+int dc_crtc_post_init(struct dc_drm_device *dc_drm, int crtc_index);
+
+int dc_kms_init(struct dc_drm_device *dc_drm);
+void dc_kms_uninit(struct dc_drm_device *dc_drm);
+
+int dc_plane_init(struct dc_drm_device *dc_drm, struct dc_plane *dc_plane);
+
+extern struct platform_driver dc_cf_driver;
+extern struct platform_driver dc_de_driver;
+extern struct platform_driver dc_ed_driver;
+extern struct platform_driver dc_fg_driver;
+extern struct platform_driver dc_fl_driver;
+extern struct platform_driver dc_fw_driver;
+extern struct platform_driver dc_ic_driver;
+extern struct platform_driver dc_lb_driver;
+extern struct platform_driver dc_pe_driver;
+extern struct platform_driver dc_tc_driver;
+
+static inline int dc_subdev_get_id(const struct dc_subdev_info *info,
+ int info_cnt, struct resource *res)
+{
+ int i;
+
+ if (!res)
+ return -EINVAL;
+
+ for (i = 0; i < info_cnt; i++)
+ if (info[i].reg_start == res->start)
+ return info[i].id;
+
+ return -EINVAL;
+}
+
+void dc_de_post_bind(struct dc_drm_device *dc_drm);
+void dc_pe_post_bind(struct dc_drm_device *dc_drm);
+
+#endif /* __DC_DRV_H__ */
diff --git a/drivers/gpu/drm/imx/dc/dc-ed.c b/drivers/gpu/drm/imx/dc/dc-ed.c
new file mode 100644
index 000000000000..d42f33d6f3fc
--- /dev/null
+++ b/drivers/gpu/drm/imx/dc/dc-ed.c
@@ -0,0 +1,288 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2024 NXP
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/component.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include "dc-drv.h"
+#include "dc-pe.h"
+
+#define PIXENGCFG_STATIC 0x8
+#define DIV_MASK GENMASK(23, 16)
+#define DIV(x) FIELD_PREP(DIV_MASK, (x))
+#define DIV_RESET 0x80
+#define SYNC_MODE BIT(8)
+#define SINGLE 0
+#define POWERDOWN BIT(4)
+
+#define PIXENGCFG_DYNAMIC 0xc
+
+#define PIXENGCFG_TRIGGER 0x14
+#define SYNC_TRIGGER BIT(0)
+
+#define STATICCONTROL 0x8
+#define PERFCOUNTMODE BIT(12)
+#define KICK_MODE BIT(8)
+#define EXTERNAL BIT(8)
+
+#define CONTROL 0xc
+#define GAMMAAPPLYENABLE BIT(0)
+
+static const struct dc_subdev_info dc_ed_info[] = {
+ { .reg_start = 0x56180980, .id = 0, },
+ { .reg_start = 0x56180a00, .id = 1, },
+ { .reg_start = 0x561809c0, .id = 4, },
+ { .reg_start = 0x56180a40, .id = 5, },
+};
+
+static const struct regmap_range dc_ed_pec_regmap_write_ranges[] = {
+ regmap_reg_range(PIXENGCFG_STATIC, PIXENGCFG_STATIC),
+ regmap_reg_range(PIXENGCFG_DYNAMIC, PIXENGCFG_DYNAMIC),
+ regmap_reg_range(PIXENGCFG_TRIGGER, PIXENGCFG_TRIGGER),
+};
+
+static const struct regmap_access_table dc_ed_pec_regmap_write_table = {
+ .yes_ranges = dc_ed_pec_regmap_write_ranges,
+ .n_yes_ranges = ARRAY_SIZE(dc_ed_pec_regmap_write_ranges),
+};
+
+static const struct regmap_range dc_ed_pec_regmap_read_ranges[] = {
+ regmap_reg_range(PIXENGCFG_STATIC, PIXENGCFG_STATIC),
+ regmap_reg_range(PIXENGCFG_DYNAMIC, PIXENGCFG_DYNAMIC),
+};
+
+static const struct regmap_access_table dc_ed_pec_regmap_read_table = {
+ .yes_ranges = dc_ed_pec_regmap_read_ranges,
+ .n_yes_ranges = ARRAY_SIZE(dc_ed_pec_regmap_read_ranges),
+};
+
+static const struct regmap_range dc_ed_pec_regmap_volatile_ranges[] = {
+ regmap_reg_range(PIXENGCFG_TRIGGER, PIXENGCFG_TRIGGER),
+};
+
+static const struct regmap_access_table dc_ed_pec_regmap_volatile_table = {
+ .yes_ranges = dc_ed_pec_regmap_volatile_ranges,
+ .n_yes_ranges = ARRAY_SIZE(dc_ed_pec_regmap_volatile_ranges),
+};
+
+static const struct regmap_config dc_ed_pec_regmap_config = {
+ .name = "pec",
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .fast_io = true,
+ .wr_table = &dc_ed_pec_regmap_write_table,
+ .rd_table = &dc_ed_pec_regmap_read_table,
+ .volatile_table = &dc_ed_pec_regmap_volatile_table,
+ .max_register = PIXENGCFG_TRIGGER,
+};
+
+static const struct regmap_range dc_ed_regmap_ranges[] = {
+ regmap_reg_range(STATICCONTROL, STATICCONTROL),
+ regmap_reg_range(CONTROL, CONTROL),
+};
+
+static const struct regmap_access_table dc_ed_regmap_access_table = {
+ .yes_ranges = dc_ed_regmap_ranges,
+ .n_yes_ranges = ARRAY_SIZE(dc_ed_regmap_ranges),
+};
+
+static const struct regmap_config dc_ed_cfg_regmap_config = {
+ .name = "cfg",
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .fast_io = true,
+ .wr_table = &dc_ed_regmap_access_table,
+ .rd_table = &dc_ed_regmap_access_table,
+ .max_register = CONTROL,
+};
+
+static const enum dc_link_id src_sels[] = {
+ LINK_ID_NONE,
+ LINK_ID_CONSTFRAME0,
+ LINK_ID_CONSTFRAME1,
+ LINK_ID_CONSTFRAME4,
+ LINK_ID_CONSTFRAME5,
+ LINK_ID_LAYERBLEND3,
+ LINK_ID_LAYERBLEND2,
+ LINK_ID_LAYERBLEND1,
+ LINK_ID_LAYERBLEND0,
+};
+
+static inline void dc_ed_pec_enable_shden(struct dc_ed *ed)
+{
+ regmap_write_bits(ed->reg_pec, PIXENGCFG_STATIC, SHDEN, SHDEN);
+}
+
+static inline void dc_ed_pec_poweron(struct dc_ed *ed)
+{
+ regmap_write_bits(ed->reg_pec, PIXENGCFG_STATIC, POWERDOWN, 0);
+}
+
+static inline void dc_ed_pec_sync_mode_single(struct dc_ed *ed)
+{
+ regmap_write_bits(ed->reg_pec, PIXENGCFG_STATIC, SYNC_MODE, SINGLE);
+}
+
+static inline void dc_ed_pec_div_reset(struct dc_ed *ed)
+{
+ regmap_write_bits(ed->reg_pec, PIXENGCFG_STATIC, DIV_MASK,
+ DIV(DIV_RESET));
+}
+
+void dc_ed_pec_src_sel(struct dc_ed *ed, enum dc_link_id src)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(src_sels); i++) {
+ if (src_sels[i] == src) {
+ regmap_write(ed->reg_pec, PIXENGCFG_DYNAMIC, src);
+ return;
+ }
+ }
+}
+
+void dc_ed_pec_sync_trigger(struct dc_ed *ed)
+{
+ regmap_write(ed->reg_pec, PIXENGCFG_TRIGGER, SYNC_TRIGGER);
+}
+
+static inline void dc_ed_enable_shden(struct dc_ed *ed)
+{
+ regmap_write_bits(ed->reg_cfg, STATICCONTROL, SHDEN, SHDEN);
+}
+
+static inline void dc_ed_kick_mode_external(struct dc_ed *ed)
+{
+ regmap_write_bits(ed->reg_cfg, STATICCONTROL, KICK_MODE, EXTERNAL);
+}
+
+static inline void dc_ed_disable_perfcountmode(struct dc_ed *ed)
+{
+ regmap_write_bits(ed->reg_cfg, STATICCONTROL, PERFCOUNTMODE, 0);
+}
+
+static inline void dc_ed_disable_gamma_apply(struct dc_ed *ed)
+{
+ regmap_write_bits(ed->reg_cfg, CONTROL, GAMMAAPPLYENABLE, 0);
+}
+
+void dc_ed_init(struct dc_ed *ed)
+{
+ dc_ed_pec_src_sel(ed, LINK_ID_NONE);
+ dc_ed_pec_enable_shden(ed);
+ dc_ed_pec_poweron(ed);
+ dc_ed_pec_sync_mode_single(ed);
+ dc_ed_pec_div_reset(ed);
+ dc_ed_enable_shden(ed);
+ dc_ed_disable_perfcountmode(ed);
+ dc_ed_kick_mode_external(ed);
+ dc_ed_disable_gamma_apply(ed);
+}
+
+static int dc_ed_bind(struct device *dev, struct device *master, void *data)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct dc_drm_device *dc_drm = data;
+ struct resource *res_pec;
+ void __iomem *base_pec;
+ void __iomem *base_cfg;
+ struct dc_ed *ed;
+ int id;
+
+ ed = devm_kzalloc(dev, sizeof(*ed), GFP_KERNEL);
+ if (!ed)
+ return -ENOMEM;
+
+ base_pec = devm_platform_get_and_ioremap_resource(pdev, 0, &res_pec);
+ if (IS_ERR(base_pec))
+ return PTR_ERR(base_pec);
+
+ base_cfg = devm_platform_ioremap_resource_byname(pdev, "cfg");
+ if (IS_ERR(base_cfg))
+ return PTR_ERR(base_cfg);
+
+ ed->reg_pec = devm_regmap_init_mmio(dev, base_pec,
+ &dc_ed_pec_regmap_config);
+ if (IS_ERR(ed->reg_pec))
+ return PTR_ERR(ed->reg_pec);
+
+ ed->reg_cfg = devm_regmap_init_mmio(dev, base_cfg,
+ &dc_ed_cfg_regmap_config);
+ if (IS_ERR(ed->reg_cfg))
+ return PTR_ERR(ed->reg_cfg);
+
+ ed->irq_shdload = platform_get_irq_byname(pdev, "shdload");
+ if (ed->irq_shdload < 0)
+ return ed->irq_shdload;
+
+ ed->dev = dev;
+
+ id = dc_subdev_get_id(dc_ed_info, ARRAY_SIZE(dc_ed_info), res_pec);
+ if (id < 0) {
+ dev_err(dev, "failed to get instance number: %d\n", id);
+ return id;
+ }
+
+ switch (id) {
+ case 0:
+ dc_drm->ed_cont[0] = ed;
+ break;
+ case 1:
+ dc_drm->ed_cont[1] = ed;
+ break;
+ case 4:
+ dc_drm->ed_safe[0] = ed;
+ break;
+ case 5:
+ dc_drm->ed_safe[1] = ed;
+ break;
+ }
+
+ return 0;
+}
+
+static const struct component_ops dc_ed_ops = {
+ .bind = dc_ed_bind,
+};
+
+static int dc_ed_probe(struct platform_device *pdev)
+{
+ int ret;
+
+ ret = component_add(&pdev->dev, &dc_ed_ops);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "failed to add component\n");
+
+ return 0;
+}
+
+static void dc_ed_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &dc_ed_ops);
+}
+
+static const struct of_device_id dc_ed_dt_ids[] = {
+ { .compatible = "fsl,imx8qxp-dc-extdst" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, dc_ed_dt_ids);
+
+struct platform_driver dc_ed_driver = {
+ .probe = dc_ed_probe,
+ .remove = dc_ed_remove,
+ .driver = {
+ .name = "imx8-dc-extdst",
+ .suppress_bind_attrs = true,
+ .of_match_table = dc_ed_dt_ids,
+ },
+};
diff --git a/drivers/gpu/drm/imx/dc/dc-fg.c b/drivers/gpu/drm/imx/dc/dc-fg.c
new file mode 100644
index 000000000000..28f372be9247
--- /dev/null
+++ b/drivers/gpu/drm/imx/dc/dc-fg.c
@@ -0,0 +1,376 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2024 NXP
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/clk.h>
+#include <linux/component.h>
+#include <linux/device.h>
+#include <linux/jiffies.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/units.h>
+
+#include <drm/drm_modes.h>
+
+#include "dc-de.h"
+#include "dc-drv.h"
+
+#define FGSTCTRL 0x8
+#define FGSYNCMODE_MASK GENMASK(2, 1)
+#define FGSYNCMODE(x) FIELD_PREP(FGSYNCMODE_MASK, (x))
+#define SHDEN BIT(0)
+
+#define HTCFG1 0xc
+#define HTOTAL(x) FIELD_PREP(GENMASK(29, 16), ((x) - 1))
+#define HACT(x) FIELD_PREP(GENMASK(13, 0), (x))
+
+#define HTCFG2 0x10
+#define HSEN BIT(31)
+#define HSBP(x) FIELD_PREP(GENMASK(29, 16), ((x) - 1))
+#define HSYNC(x) FIELD_PREP(GENMASK(13, 0), ((x) - 1))
+
+#define VTCFG1 0x14
+#define VTOTAL(x) FIELD_PREP(GENMASK(29, 16), ((x) - 1))
+#define VACT(x) FIELD_PREP(GENMASK(13, 0), (x))
+
+#define VTCFG2 0x18
+#define VSEN BIT(31)
+#define VSBP(x) FIELD_PREP(GENMASK(29, 16), ((x) - 1))
+#define VSYNC(x) FIELD_PREP(GENMASK(13, 0), ((x) - 1))
+
+#define PKICKCONFIG 0x2c
+#define SKICKCONFIG 0x30
+#define EN BIT(31)
+#define ROW(x) FIELD_PREP(GENMASK(29, 16), (x))
+#define COL(x) FIELD_PREP(GENMASK(13, 0), (x))
+
+#define PACFG 0x54
+#define SACFG 0x58
+#define STARTY(x) FIELD_PREP(GENMASK(29, 16), ((x) + 1))
+#define STARTX(x) FIELD_PREP(GENMASK(13, 0), ((x) + 1))
+
+#define FGINCTRL 0x5c
+#define FGINCTRLPANIC 0x60
+#define ENSECALPHA BIT(4)
+#define ENPRIMALPHA BIT(3)
+#define FGDM_MASK GENMASK(2, 0)
+
+#define FGCCR 0x64
+#define CCGREEN(x) FIELD_PREP(GENMASK(19, 10), (x))
+
+#define FGENABLE 0x68
+#define FGEN BIT(0)
+
+#define FGSLR 0x6c
+#define SHDTOKGEN BIT(0)
+
+#define FGTIMESTAMP 0x74
+#define FRAMEINDEX(x) FIELD_GET(GENMASK(31, 14), (x))
+#define LINEINDEX(x) FIELD_GET(GENMASK(13, 0), (x))
+
+#define FGCHSTAT 0x78
+#define SECSYNCSTAT BIT(24)
+#define SFIFOEMPTY BIT(16)
+
+#define FGCHSTATCLR 0x7c
+#define CLRSECSTAT BIT(16)
+
+enum dc_fg_syncmode {
+ FG_SYNCMODE_OFF, /* No side-by-side synchronization. */
+};
+
+enum dc_fg_dm {
+ FG_DM_CONSTCOL = 0x1, /* Constant Color Background is shown. */
+ FG_DM_SEC_ON_TOP = 0x5, /* Both inputs overlaid with secondary on top. */
+};
+
+static const struct dc_subdev_info dc_fg_info[] = {
+ { .reg_start = 0x5618b800, .id = 0, },
+ { .reg_start = 0x5618d400, .id = 1, },
+};
+
+static const struct regmap_range dc_fg_regmap_write_ranges[] = {
+ regmap_reg_range(FGSTCTRL, VTCFG2),
+ regmap_reg_range(PKICKCONFIG, SKICKCONFIG),
+ regmap_reg_range(PACFG, FGSLR),
+ regmap_reg_range(FGCHSTATCLR, FGCHSTATCLR),
+};
+
+static const struct regmap_range dc_fg_regmap_read_ranges[] = {
+ regmap_reg_range(FGSTCTRL, VTCFG2),
+ regmap_reg_range(PKICKCONFIG, SKICKCONFIG),
+ regmap_reg_range(PACFG, FGENABLE),
+ regmap_reg_range(FGTIMESTAMP, FGCHSTAT),
+};
+
+static const struct regmap_access_table dc_fg_regmap_write_table = {
+ .yes_ranges = dc_fg_regmap_write_ranges,
+ .n_yes_ranges = ARRAY_SIZE(dc_fg_regmap_write_ranges),
+};
+
+static const struct regmap_access_table dc_fg_regmap_read_table = {
+ .yes_ranges = dc_fg_regmap_read_ranges,
+ .n_yes_ranges = ARRAY_SIZE(dc_fg_regmap_read_ranges),
+};
+
+static const struct regmap_config dc_fg_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .fast_io = true,
+ .wr_table = &dc_fg_regmap_write_table,
+ .rd_table = &dc_fg_regmap_read_table,
+ .max_register = FGCHSTATCLR,
+};
+
+static inline void dc_fg_enable_shden(struct dc_fg *fg)
+{
+ regmap_write_bits(fg->reg, FGSTCTRL, SHDEN, SHDEN);
+}
+
+static inline void dc_fg_syncmode(struct dc_fg *fg, enum dc_fg_syncmode mode)
+{
+ regmap_write_bits(fg->reg, FGSTCTRL, FGSYNCMODE_MASK, FGSYNCMODE(mode));
+}
+
+void dc_fg_cfg_videomode(struct dc_fg *fg, struct drm_display_mode *m)
+{
+ u32 hact, htotal, hsync, hsbp;
+ u32 vact, vtotal, vsync, vsbp;
+ u32 kick_row, kick_col;
+ int ret;
+
+ hact = m->crtc_hdisplay;
+ htotal = m->crtc_htotal;
+ hsync = m->crtc_hsync_end - m->crtc_hsync_start;
+ hsbp = m->crtc_htotal - m->crtc_hsync_start;
+
+ vact = m->crtc_vdisplay;
+ vtotal = m->crtc_vtotal;
+ vsync = m->crtc_vsync_end - m->crtc_vsync_start;
+ vsbp = m->crtc_vtotal - m->crtc_vsync_start;
+
+ /* video mode */
+ regmap_write(fg->reg, HTCFG1, HACT(hact) | HTOTAL(htotal));
+ regmap_write(fg->reg, HTCFG2, HSYNC(hsync) | HSBP(hsbp) | HSEN);
+ regmap_write(fg->reg, VTCFG1, VACT(vact) | VTOTAL(vtotal));
+ regmap_write(fg->reg, VTCFG2, VSYNC(vsync) | VSBP(vsbp) | VSEN);
+
+ kick_col = hact + 1;
+ kick_row = vact;
+
+ /* pkickconfig */
+ regmap_write(fg->reg, PKICKCONFIG, COL(kick_col) | ROW(kick_row) | EN);
+
+ /* skikconfig */
+ regmap_write(fg->reg, SKICKCONFIG, COL(kick_col) | ROW(kick_row) | EN);
+
+ /* primary and secondary area position configuration */
+ regmap_write(fg->reg, PACFG, STARTX(0) | STARTY(0));
+ regmap_write(fg->reg, SACFG, STARTX(0) | STARTY(0));
+
+ /* alpha */
+ regmap_write_bits(fg->reg, FGINCTRL, ENPRIMALPHA | ENSECALPHA, 0);
+ regmap_write_bits(fg->reg, FGINCTRLPANIC, ENPRIMALPHA | ENSECALPHA, 0);
+
+ /* constant color is green(used in panic mode) */
+ regmap_write(fg->reg, FGCCR, CCGREEN(0x3ff));
+
+ ret = clk_set_rate(fg->clk_disp, m->clock * HZ_PER_KHZ);
+ if (ret < 0)
+ dev_err(fg->dev, "failed to set display clock rate: %d\n", ret);
+}
+
+static inline void dc_fg_displaymode(struct dc_fg *fg, enum dc_fg_dm mode)
+{
+ regmap_write_bits(fg->reg, FGINCTRL, FGDM_MASK, mode);
+}
+
+static inline void dc_fg_panic_displaymode(struct dc_fg *fg, enum dc_fg_dm mode)
+{
+ regmap_write_bits(fg->reg, FGINCTRLPANIC, FGDM_MASK, mode);
+}
+
+void dc_fg_enable(struct dc_fg *fg)
+{
+ regmap_write(fg->reg, FGENABLE, FGEN);
+}
+
+void dc_fg_disable(struct dc_fg *fg)
+{
+ regmap_write(fg->reg, FGENABLE, 0);
+}
+
+void dc_fg_shdtokgen(struct dc_fg *fg)
+{
+ regmap_write(fg->reg, FGSLR, SHDTOKGEN);
+}
+
+u32 dc_fg_get_frame_index(struct dc_fg *fg)
+{
+ u32 val;
+
+ regmap_read(fg->reg, FGTIMESTAMP, &val);
+
+ return FRAMEINDEX(val);
+}
+
+u32 dc_fg_get_line_index(struct dc_fg *fg)
+{
+ u32 val;
+
+ regmap_read(fg->reg, FGTIMESTAMP, &val);
+
+ return LINEINDEX(val);
+}
+
+bool dc_fg_wait_for_frame_index_moving(struct dc_fg *fg)
+{
+ unsigned long timeout = jiffies + msecs_to_jiffies(100);
+ u32 frame_index, last_frame_index;
+
+ frame_index = dc_fg_get_frame_index(fg);
+ do {
+ last_frame_index = frame_index;
+ frame_index = dc_fg_get_frame_index(fg);
+ } while (last_frame_index == frame_index &&
+ time_before(jiffies, timeout));
+
+ return last_frame_index != frame_index;
+}
+
+bool dc_fg_secondary_requests_to_read_empty_fifo(struct dc_fg *fg)
+{
+ u32 val;
+
+ regmap_read(fg->reg, FGCHSTAT, &val);
+
+ return !!(val & SFIFOEMPTY);
+}
+
+void dc_fg_secondary_clear_channel_status(struct dc_fg *fg)
+{
+ regmap_write(fg->reg, FGCHSTATCLR, CLRSECSTAT);
+}
+
+int dc_fg_wait_for_secondary_syncup(struct dc_fg *fg)
+{
+ unsigned int val;
+
+ return regmap_read_poll_timeout(fg->reg, FGCHSTAT, val,
+ val & SECSYNCSTAT, 5, 100000);
+}
+
+void dc_fg_enable_clock(struct dc_fg *fg)
+{
+ int ret;
+
+ ret = clk_prepare_enable(fg->clk_disp);
+ if (ret)
+ dev_err(fg->dev, "failed to enable display clock: %d\n", ret);
+}
+
+void dc_fg_disable_clock(struct dc_fg *fg)
+{
+ clk_disable_unprepare(fg->clk_disp);
+}
+
+enum drm_mode_status dc_fg_check_clock(struct dc_fg *fg, int clk_khz)
+{
+ unsigned long rounded_rate;
+
+ rounded_rate = clk_round_rate(fg->clk_disp, clk_khz * HZ_PER_KHZ);
+
+ if (rounded_rate != clk_khz * HZ_PER_KHZ)
+ return MODE_NOCLOCK;
+
+ return MODE_OK;
+}
+
+void dc_fg_init(struct dc_fg *fg)
+{
+ dc_fg_enable_shden(fg);
+ dc_fg_syncmode(fg, FG_SYNCMODE_OFF);
+ dc_fg_displaymode(fg, FG_DM_SEC_ON_TOP);
+ dc_fg_panic_displaymode(fg, FG_DM_CONSTCOL);
+}
+
+static int dc_fg_bind(struct device *dev, struct device *master, void *data)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct dc_drm_device *dc_drm = data;
+ struct resource *res;
+ void __iomem *base;
+ struct dc_fg *fg;
+ int id;
+
+ fg = devm_kzalloc(dev, sizeof(*fg), GFP_KERNEL);
+ if (!fg)
+ return -ENOMEM;
+
+ base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ fg->reg = devm_regmap_init_mmio(dev, base, &dc_fg_regmap_config);
+ if (IS_ERR(fg->reg))
+ return PTR_ERR(fg->reg);
+
+ fg->clk_disp = devm_clk_get(dev, NULL);
+ if (IS_ERR(fg->clk_disp))
+ return dev_err_probe(dev, PTR_ERR(fg->clk_disp),
+ "failed to get display clock\n");
+
+ id = dc_subdev_get_id(dc_fg_info, ARRAY_SIZE(dc_fg_info), res);
+ if (id < 0) {
+ dev_err(dev, "failed to get instance number: %d\n", id);
+ return id;
+ }
+
+ fg->dev = dev;
+ dc_drm->fg[id] = fg;
+
+ return 0;
+}
+
+static const struct component_ops dc_fg_ops = {
+ .bind = dc_fg_bind,
+};
+
+static int dc_fg_probe(struct platform_device *pdev)
+{
+ int ret;
+
+ ret = component_add(&pdev->dev, &dc_fg_ops);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "failed to add component\n");
+
+ return 0;
+}
+
+static void dc_fg_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &dc_fg_ops);
+}
+
+static const struct of_device_id dc_fg_dt_ids[] = {
+ { .compatible = "fsl,imx8qxp-dc-framegen" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, dc_fg_dt_ids);
+
+struct platform_driver dc_fg_driver = {
+ .probe = dc_fg_probe,
+ .remove = dc_fg_remove,
+ .driver = {
+ .name = "imx8-dc-framegen",
+ .suppress_bind_attrs = true,
+ .of_match_table = dc_fg_dt_ids,
+ },
+};
diff --git a/drivers/gpu/drm/imx/dc/dc-fl.c b/drivers/gpu/drm/imx/dc/dc-fl.c
new file mode 100644
index 000000000000..3ce24c72aa13
--- /dev/null
+++ b/drivers/gpu/drm/imx/dc/dc-fl.c
@@ -0,0 +1,185 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2024 NXP
+ */
+
+#include <linux/component.h>
+#include <linux/ioport.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <drm/drm_fourcc.h>
+
+#include "dc-drv.h"
+#include "dc-fu.h"
+
+#define BASEADDRESS(x) (0x10 + FRAC_OFFSET * (x))
+#define SOURCEBUFFERATTRIBUTES(x) (0x14 + FRAC_OFFSET * (x))
+#define SOURCEBUFFERDIMENSION(x) (0x18 + FRAC_OFFSET * (x))
+#define COLORCOMPONENTBITS(x) (0x1c + FRAC_OFFSET * (x))
+#define COLORCOMPONENTSHIFT(x) (0x20 + FRAC_OFFSET * (x))
+#define LAYEROFFSET(x) (0x24 + FRAC_OFFSET * (x))
+#define CLIPWINDOWOFFSET(x) (0x28 + FRAC_OFFSET * (x))
+#define CLIPWINDOWDIMENSIONS(x) (0x2c + FRAC_OFFSET * (x))
+#define CONSTANTCOLOR(x) (0x30 + FRAC_OFFSET * (x))
+#define LAYERPROPERTY(x) (0x34 + FRAC_OFFSET * (x))
+#define FRAMEDIMENSIONS 0x150
+
+struct dc_fl {
+ struct dc_fu fu;
+};
+
+static const struct dc_subdev_info dc_fl_info[] = {
+ { .reg_start = 0x56180ac0, .id = 0, },
+};
+
+static const struct regmap_range dc_fl_regmap_ranges[] = {
+ regmap_reg_range(STATICCONTROL, FRAMEDIMENSIONS),
+};
+
+static const struct regmap_access_table dc_fl_regmap_access_table = {
+ .yes_ranges = dc_fl_regmap_ranges,
+ .n_yes_ranges = ARRAY_SIZE(dc_fl_regmap_ranges),
+};
+
+static const struct regmap_config dc_fl_cfg_regmap_config = {
+ .name = "cfg",
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .fast_io = true,
+ .wr_table = &dc_fl_regmap_access_table,
+ .rd_table = &dc_fl_regmap_access_table,
+ .max_register = FRAMEDIMENSIONS,
+};
+
+static void dc_fl_set_fmt(struct dc_fu *fu, enum dc_fu_frac frac,
+ const struct drm_format_info *format)
+{
+ u32 bits = 0, shifts = 0;
+
+ dc_fu_set_src_bpp(fu, frac, format->cpp[0] * 8);
+
+ regmap_write_bits(fu->reg_cfg, LAYERPROPERTY(frac),
+ YUVCONVERSIONMODE_MASK,
+ YUVCONVERSIONMODE(YUVCONVERSIONMODE_OFF));
+
+ dc_fu_get_pixel_format_bits(fu, format->format, &bits);
+ dc_fu_get_pixel_format_shifts(fu, format->format, &shifts);
+
+ regmap_write(fu->reg_cfg, COLORCOMPONENTBITS(frac), bits);
+ regmap_write(fu->reg_cfg, COLORCOMPONENTSHIFT(frac), shifts);
+}
+
+static void dc_fl_set_framedimensions(struct dc_fu *fu, int w, int h)
+{
+ regmap_write(fu->reg_cfg, FRAMEDIMENSIONS,
+ FRAMEWIDTH(w) | FRAMEHEIGHT(h));
+}
+
+static void dc_fl_init(struct dc_fu *fu)
+{
+ dc_fu_common_hw_init(fu);
+ dc_fu_shdldreq_sticky(fu, 0xff);
+}
+
+static void dc_fl_set_ops(struct dc_fu *fu)
+{
+ memcpy(&fu->ops, &dc_fu_common_ops, sizeof(dc_fu_common_ops));
+ fu->ops.init = dc_fl_init;
+ fu->ops.set_fmt = dc_fl_set_fmt;
+ fu->ops.set_framedimensions = dc_fl_set_framedimensions;
+}
+
+static int dc_fl_bind(struct device *dev, struct device *master, void *data)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct dc_drm_device *dc_drm = data;
+ struct resource *res_pec;
+ void __iomem *base_cfg;
+ struct dc_fl *fl;
+ struct dc_fu *fu;
+ int i, id;
+
+ fl = devm_kzalloc(dev, sizeof(*fl), GFP_KERNEL);
+ if (!fl)
+ return -ENOMEM;
+
+ fu = &fl->fu;
+
+ res_pec = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ base_cfg = devm_platform_ioremap_resource_byname(pdev, "cfg");
+ if (IS_ERR(base_cfg))
+ return PTR_ERR(base_cfg);
+
+ fu->reg_cfg = devm_regmap_init_mmio(dev, base_cfg,
+ &dc_fl_cfg_regmap_config);
+ if (IS_ERR(fu->reg_cfg))
+ return PTR_ERR(fu->reg_cfg);
+
+ id = dc_subdev_get_id(dc_fl_info, ARRAY_SIZE(dc_fl_info), res_pec);
+ if (id < 0) {
+ dev_err(dev, "failed to get instance number: %d\n", id);
+ return id;
+ }
+
+ fu->link_id = LINK_ID_FETCHLAYER0;
+ fu->id = DC_FETCHUNIT_FL0;
+ for (i = 0; i < DC_FETCHUNIT_FRAC_NUM; i++) {
+ fu->reg_baseaddr[i] = BASEADDRESS(i);
+ fu->reg_sourcebufferattributes[i] = SOURCEBUFFERATTRIBUTES(i);
+ fu->reg_sourcebufferdimension[i] = SOURCEBUFFERDIMENSION(i);
+ fu->reg_layeroffset[i] = LAYEROFFSET(i);
+ fu->reg_clipwindowoffset[i] = CLIPWINDOWOFFSET(i);
+ fu->reg_clipwindowdimensions[i] = CLIPWINDOWDIMENSIONS(i);
+ fu->reg_constantcolor[i] = CONSTANTCOLOR(i);
+ fu->reg_layerproperty[i] = LAYERPROPERTY(i);
+ }
+ snprintf(fu->name, sizeof(fu->name), "FetchLayer%d", id);
+
+ dc_fl_set_ops(fu);
+
+ dc_drm->fu_disp[fu->id] = fu;
+
+ return 0;
+}
+
+static const struct component_ops dc_fl_ops = {
+ .bind = dc_fl_bind,
+};
+
+static int dc_fl_probe(struct platform_device *pdev)
+{
+ int ret;
+
+ ret = component_add(&pdev->dev, &dc_fl_ops);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "failed to add component\n");
+
+ return 0;
+}
+
+static void dc_fl_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &dc_fl_ops);
+}
+
+static const struct of_device_id dc_fl_dt_ids[] = {
+ { .compatible = "fsl,imx8qxp-dc-fetchlayer" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, dc_fl_dt_ids);
+
+struct platform_driver dc_fl_driver = {
+ .probe = dc_fl_probe,
+ .remove = dc_fl_remove,
+ .driver = {
+ .name = "imx8-dc-fetchlayer",
+ .suppress_bind_attrs = true,
+ .of_match_table = dc_fl_dt_ids,
+ },
+};
diff --git a/drivers/gpu/drm/imx/dc/dc-fu.c b/drivers/gpu/drm/imx/dc/dc-fu.c
new file mode 100644
index 000000000000..1d8f74babef8
--- /dev/null
+++ b/drivers/gpu/drm/imx/dc/dc-fu.c
@@ -0,0 +1,258 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2024 NXP
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/bits.h>
+#include <linux/math.h>
+
+#include "dc-fu.h"
+#include "dc-pe.h"
+
+/* STATICCONTROL */
+#define SHDLDREQSTICKY_MASK GENMASK(31, 24)
+#define SHDLDREQSTICKY(x) FIELD_PREP(SHDLDREQSTICKY_MASK, (x))
+#define BASEADDRESSAUTOUPDATE_MASK GENMASK(23, 16)
+#define BASEADDRESSAUTOUPDATE(x) FIELD_PREP(BASEADDRESSAUTOUPDATE_MASK, (x))
+
+/* BURSTBUFFERMANAGEMENT */
+#define LINEMODE_MASK BIT(31)
+#define SETBURSTLENGTH_MASK GENMASK(12, 8)
+#define SETBURSTLENGTH(x) FIELD_PREP(SETBURSTLENGTH_MASK, (x))
+#define SETNUMBUFFERS_MASK GENMASK(7, 0)
+#define SETNUMBUFFERS(x) FIELD_PREP(SETNUMBUFFERS_MASK, (x))
+
+/* SOURCEBUFFERATTRIBUTES */
+#define BITSPERPIXEL_MASK GENMASK(21, 16)
+#define BITSPERPIXEL(x) FIELD_PREP(BITSPERPIXEL_MASK, (x))
+#define STRIDE_MASK GENMASK(15, 0)
+#define STRIDE(x) FIELD_PREP(STRIDE_MASK, (x) - 1)
+
+/* SOURCEBUFFERDIMENSION */
+#define LINECOUNT(x) FIELD_PREP(GENMASK(29, 16), (x))
+#define LINEWIDTH(x) FIELD_PREP(GENMASK(13, 0), (x))
+
+/* LAYEROFFSET */
+#define LAYERYOFFSET(x) FIELD_PREP(GENMASK(30, 16), (x))
+#define LAYERXOFFSET(x) FIELD_PREP(GENMASK(14, 0), (x))
+
+/* CLIPWINDOWOFFSET */
+#define CLIPWINDOWYOFFSET(x) FIELD_PREP(GENMASK(30, 16), (x))
+#define CLIPWINDOWXOFFSET(x) FIELD_PREP(GENMASK(14, 0), (x))
+
+/* CLIPWINDOWDIMENSIONS */
+#define CLIPWINDOWHEIGHT(x) FIELD_PREP(GENMASK(29, 16), (x) - 1)
+#define CLIPWINDOWWIDTH(x) FIELD_PREP(GENMASK(13, 0), (x) - 1)
+
+enum dc_linemode {
+ /*
+ * Mandatory setting for operation in the Display Controller.
+ * Works also for Blit Engine with marginal performance impact.
+ */
+ LINEMODE_DISPLAY = 0,
+};
+
+struct dc_fu_pixel_format {
+ u32 pixel_format;
+ u32 bits;
+ u32 shifts;
+};
+
+static const struct dc_fu_pixel_format pixel_formats[] = {
+ {
+ DRM_FORMAT_XRGB8888,
+ R_BITS(8) | G_BITS(8) | B_BITS(8) | A_BITS(0),
+ R_SHIFT(16) | G_SHIFT(8) | B_SHIFT(0) | A_SHIFT(0),
+ },
+};
+
+void dc_fu_get_pixel_format_bits(struct dc_fu *fu, u32 format, u32 *bits)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(pixel_formats); i++) {
+ if (pixel_formats[i].pixel_format == format) {
+ *bits = pixel_formats[i].bits;
+ return;
+ }
+ }
+}
+
+void
+dc_fu_get_pixel_format_shifts(struct dc_fu *fu, u32 format, u32 *shifts)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(pixel_formats); i++) {
+ if (pixel_formats[i].pixel_format == format) {
+ *shifts = pixel_formats[i].shifts;
+ return;
+ }
+ }
+}
+
+static inline void dc_fu_enable_shden(struct dc_fu *fu)
+{
+ regmap_write_bits(fu->reg_cfg, STATICCONTROL, SHDEN, SHDEN);
+}
+
+static inline void dc_fu_baddr_autoupdate(struct dc_fu *fu, u8 layer_mask)
+{
+ regmap_write_bits(fu->reg_cfg, STATICCONTROL,
+ BASEADDRESSAUTOUPDATE_MASK,
+ BASEADDRESSAUTOUPDATE(layer_mask));
+}
+
+void dc_fu_shdldreq_sticky(struct dc_fu *fu, u8 layer_mask)
+{
+ regmap_write_bits(fu->reg_cfg, STATICCONTROL, SHDLDREQSTICKY_MASK,
+ SHDLDREQSTICKY(layer_mask));
+}
+
+static inline void dc_fu_set_linemode(struct dc_fu *fu, enum dc_linemode mode)
+{
+ regmap_write_bits(fu->reg_cfg, BURSTBUFFERMANAGEMENT, LINEMODE_MASK,
+ mode);
+}
+
+static inline void dc_fu_set_numbuffers(struct dc_fu *fu, unsigned int num)
+{
+ regmap_write_bits(fu->reg_cfg, BURSTBUFFERMANAGEMENT,
+ SETNUMBUFFERS_MASK, SETNUMBUFFERS(num));
+}
+
+static void dc_fu_set_burstlength(struct dc_fu *fu, dma_addr_t baddr)
+{
+ unsigned int burst_size, burst_length;
+
+ burst_size = 1 << __ffs(baddr);
+ burst_size = round_up(burst_size, 8);
+ burst_size = min(burst_size, 128U);
+ burst_length = burst_size / 8;
+
+ regmap_write_bits(fu->reg_cfg, BURSTBUFFERMANAGEMENT,
+ SETBURSTLENGTH_MASK, SETBURSTLENGTH(burst_length));
+}
+
+static void dc_fu_set_baseaddress(struct dc_fu *fu, enum dc_fu_frac frac,
+ dma_addr_t baddr)
+{
+ regmap_write(fu->reg_cfg, fu->reg_baseaddr[frac], baddr);
+}
+
+void dc_fu_set_src_bpp(struct dc_fu *fu, enum dc_fu_frac frac, unsigned int bpp)
+{
+ regmap_write_bits(fu->reg_cfg, fu->reg_sourcebufferattributes[frac],
+ BITSPERPIXEL_MASK, BITSPERPIXEL(bpp));
+}
+
+static void dc_fu_set_src_stride(struct dc_fu *fu, enum dc_fu_frac frac,
+ unsigned int stride)
+{
+ regmap_write_bits(fu->reg_cfg, fu->reg_sourcebufferattributes[frac],
+ STRIDE_MASK, STRIDE(stride));
+}
+
+static void dc_fu_set_src_buf_dimensions(struct dc_fu *fu, enum dc_fu_frac frac,
+ int w, int h)
+{
+ regmap_write(fu->reg_cfg, fu->reg_sourcebufferdimension[frac],
+ LINEWIDTH(w) | LINECOUNT(h));
+}
+
+static inline void dc_fu_layeroffset(struct dc_fu *fu, enum dc_fu_frac frac,
+ unsigned int x, unsigned int y)
+{
+ regmap_write(fu->reg_cfg, fu->reg_layeroffset[frac],
+ LAYERXOFFSET(x) | LAYERYOFFSET(y));
+}
+
+static inline void dc_fu_clipoffset(struct dc_fu *fu, enum dc_fu_frac frac,
+ unsigned int x, unsigned int y)
+{
+ regmap_write(fu->reg_cfg, fu->reg_clipwindowoffset[frac],
+ CLIPWINDOWXOFFSET(x) | CLIPWINDOWYOFFSET(y));
+}
+
+static inline void dc_fu_clipdimensions(struct dc_fu *fu, enum dc_fu_frac frac,
+ unsigned int w, unsigned int h)
+{
+ regmap_write(fu->reg_cfg, fu->reg_clipwindowdimensions[frac],
+ CLIPWINDOWWIDTH(w) | CLIPWINDOWHEIGHT(h));
+}
+
+static inline void
+dc_fu_set_pixel_blend_mode(struct dc_fu *fu, enum dc_fu_frac frac)
+{
+ regmap_write(fu->reg_cfg, fu->reg_layerproperty[frac], 0);
+ regmap_write(fu->reg_cfg, fu->reg_constantcolor[frac], 0);
+}
+
+static void dc_fu_enable_src_buf(struct dc_fu *fu, enum dc_fu_frac frac)
+{
+ regmap_write_bits(fu->reg_cfg, fu->reg_layerproperty[frac],
+ SOURCEBUFFERENABLE, SOURCEBUFFERENABLE);
+}
+
+static void dc_fu_disable_src_buf(struct dc_fu *fu, enum dc_fu_frac frac)
+{
+ regmap_write_bits(fu->reg_cfg, fu->reg_layerproperty[frac],
+ SOURCEBUFFERENABLE, 0);
+
+ if (fu->lb) {
+ dc_lb_pec_clken(fu->lb, CLKEN_DISABLE);
+ dc_lb_mode(fu->lb, LB_NEUTRAL);
+ }
+}
+
+static void dc_fu_set_layerblend(struct dc_fu *fu, struct dc_lb *lb)
+{
+ fu->lb = lb;
+}
+
+static enum dc_link_id dc_fu_get_link_id(struct dc_fu *fu)
+{
+ return fu->link_id;
+}
+
+static const char *dc_fu_get_name(struct dc_fu *fu)
+{
+ return fu->name;
+}
+
+const struct dc_fu_ops dc_fu_common_ops = {
+ .set_burstlength = dc_fu_set_burstlength,
+ .set_baseaddress = dc_fu_set_baseaddress,
+ .set_src_stride = dc_fu_set_src_stride,
+ .set_src_buf_dimensions = dc_fu_set_src_buf_dimensions,
+ .enable_src_buf = dc_fu_enable_src_buf,
+ .disable_src_buf = dc_fu_disable_src_buf,
+ .set_layerblend = dc_fu_set_layerblend,
+ .get_link_id = dc_fu_get_link_id,
+ .get_name = dc_fu_get_name,
+};
+
+const struct dc_fu_ops *dc_fu_get_ops(struct dc_fu *fu)
+{
+ return &fu->ops;
+}
+
+void dc_fu_common_hw_init(struct dc_fu *fu)
+{
+ enum dc_fu_frac i;
+
+ dc_fu_baddr_autoupdate(fu, 0x0);
+ dc_fu_enable_shden(fu);
+ dc_fu_set_linemode(fu, LINEMODE_DISPLAY);
+ dc_fu_set_numbuffers(fu, 16);
+
+ for (i = DC_FETCHUNIT_FRAC0; i < DC_FETCHUNIT_FRAC_NUM; i++) {
+ dc_fu_layeroffset(fu, i, 0, 0);
+ dc_fu_clipoffset(fu, i, 0, 0);
+ dc_fu_clipdimensions(fu, i, 1, 1);
+ dc_fu_disable_src_buf(fu, i);
+ dc_fu_set_pixel_blend_mode(fu, i);
+ }
+}
diff --git a/drivers/gpu/drm/imx/dc/dc-fu.h b/drivers/gpu/drm/imx/dc/dc-fu.h
new file mode 100644
index 000000000000..f678de3ca8c0
--- /dev/null
+++ b/drivers/gpu/drm/imx/dc/dc-fu.h
@@ -0,0 +1,129 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright 2024 NXP
+ */
+
+#ifndef __DC_FETCHUNIT_H__
+#define __DC_FETCHUNIT_H__
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/regmap.h>
+#include <linux/types.h>
+
+#include <drm/drm_fourcc.h>
+
+#include "dc-pe.h"
+
+#define FRAC_OFFSET 0x28
+
+#define STATICCONTROL 0x8
+#define BURSTBUFFERMANAGEMENT 0xc
+
+/* COLORCOMPONENTBITS */
+#define R_BITS(x) FIELD_PREP_CONST(GENMASK(27, 24), (x))
+#define G_BITS(x) FIELD_PREP_CONST(GENMASK(19, 16), (x))
+#define B_BITS(x) FIELD_PREP_CONST(GENMASK(11, 8), (x))
+#define A_BITS(x) FIELD_PREP_CONST(GENMASK(3, 0), (x))
+
+/* COLORCOMPONENTSHIFT */
+#define R_SHIFT(x) FIELD_PREP_CONST(GENMASK(28, 24), (x))
+#define G_SHIFT(x) FIELD_PREP_CONST(GENMASK(20, 16), (x))
+#define B_SHIFT(x) FIELD_PREP_CONST(GENMASK(12, 8), (x))
+#define A_SHIFT(x) FIELD_PREP_CONST(GENMASK(4, 0), (x))
+
+/* LAYERPROPERTY */
+#define SOURCEBUFFERENABLE BIT(31)
+#define YUVCONVERSIONMODE_MASK GENMASK(18, 17)
+#define YUVCONVERSIONMODE(x) FIELD_PREP(YUVCONVERSIONMODE_MASK, (x))
+
+/* FRAMEDIMENSIONS */
+#define FRAMEHEIGHT(x) FIELD_PREP(GENMASK(29, 16), (x))
+#define FRAMEWIDTH(x) FIELD_PREP(GENMASK(13, 0), (x))
+
+/* CONTROL */
+#define INPUTSELECT_MASK GENMASK(4, 3)
+#define INPUTSELECT(x) FIELD_PREP(INPUTSELECT_MASK, (x))
+#define RASTERMODE_MASK GENMASK(2, 0)
+#define RASTERMODE(x) FIELD_PREP(RASTERMODE_MASK, (x))
+
+enum dc_yuvconversionmode {
+ YUVCONVERSIONMODE_OFF,
+};
+
+enum dc_inputselect {
+ INPUTSELECT_INACTIVE,
+};
+
+enum dc_rastermode {
+ RASTERMODE_NORMAL,
+};
+
+enum {
+ DC_FETCHUNIT_FL0,
+ DC_FETCHUNIT_FW2,
+};
+
+enum dc_fu_frac {
+ DC_FETCHUNIT_FRAC0,
+ DC_FETCHUNIT_FRAC1,
+ DC_FETCHUNIT_FRAC2,
+ DC_FETCHUNIT_FRAC3,
+ DC_FETCHUNIT_FRAC4,
+ DC_FETCHUNIT_FRAC5,
+ DC_FETCHUNIT_FRAC6,
+ DC_FETCHUNIT_FRAC7,
+ DC_FETCHUNIT_FRAC_NUM
+};
+
+struct dc_fu;
+struct dc_lb;
+
+struct dc_fu_ops {
+ void (*init)(struct dc_fu *fu);
+ void (*set_burstlength)(struct dc_fu *fu, dma_addr_t baddr);
+ void (*set_baseaddress)(struct dc_fu *fu, enum dc_fu_frac frac,
+ dma_addr_t baddr);
+ void (*set_src_stride)(struct dc_fu *fu, enum dc_fu_frac frac,
+ unsigned int stride);
+ void (*set_src_buf_dimensions)(struct dc_fu *fu, enum dc_fu_frac frac,
+ int w, int h);
+ void (*set_fmt)(struct dc_fu *fu, enum dc_fu_frac frac,
+ const struct drm_format_info *format);
+ void (*enable_src_buf)(struct dc_fu *fu, enum dc_fu_frac frac);
+ void (*disable_src_buf)(struct dc_fu *fu, enum dc_fu_frac frac);
+ void (*set_framedimensions)(struct dc_fu *fu, int w, int h);
+ void (*set_layerblend)(struct dc_fu *fu, struct dc_lb *lb);
+ enum dc_link_id (*get_link_id)(struct dc_fu *fu);
+ const char *(*get_name)(struct dc_fu *fu);
+};
+
+struct dc_fu {
+ struct regmap *reg_pec;
+ struct regmap *reg_cfg;
+ char name[21];
+ u32 reg_baseaddr[DC_FETCHUNIT_FRAC_NUM];
+ u32 reg_sourcebufferattributes[DC_FETCHUNIT_FRAC_NUM];
+ u32 reg_sourcebufferdimension[DC_FETCHUNIT_FRAC_NUM];
+ u32 reg_layeroffset[DC_FETCHUNIT_FRAC_NUM];
+ u32 reg_clipwindowoffset[DC_FETCHUNIT_FRAC_NUM];
+ u32 reg_clipwindowdimensions[DC_FETCHUNIT_FRAC_NUM];
+ u32 reg_constantcolor[DC_FETCHUNIT_FRAC_NUM];
+ u32 reg_layerproperty[DC_FETCHUNIT_FRAC_NUM];
+ unsigned int id;
+ enum dc_link_id link_id;
+ struct dc_fu_ops ops;
+ struct dc_lb *lb;
+};
+
+extern const struct dc_fu_ops dc_fu_common_ops;
+
+void dc_fu_get_pixel_format_bits(struct dc_fu *fu, u32 format, u32 *bits);
+void dc_fu_get_pixel_format_shifts(struct dc_fu *fu, u32 format, u32 *shifts);
+void dc_fu_shdldreq_sticky(struct dc_fu *fu, u8 layer_mask);
+void dc_fu_set_src_bpp(struct dc_fu *fu, enum dc_fu_frac frac, unsigned int bpp);
+void dc_fu_common_hw_init(struct dc_fu *fu);
+
+const struct dc_fu_ops *dc_fu_get_ops(struct dc_fu *fu);
+
+#endif /* __DC_FETCHUNIT_H__ */
diff --git a/drivers/gpu/drm/imx/dc/dc-fw.c b/drivers/gpu/drm/imx/dc/dc-fw.c
new file mode 100644
index 000000000000..acb2d4d9e2ec
--- /dev/null
+++ b/drivers/gpu/drm/imx/dc/dc-fw.c
@@ -0,0 +1,222 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2024 NXP
+ */
+
+#include <linux/component.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <drm/drm_fourcc.h>
+
+#include "dc-drv.h"
+#include "dc-fu.h"
+
+#define PIXENGCFG_DYNAMIC 0x8
+
+#define BASEADDRESS(x) (0x10 + FRAC_OFFSET * (x))
+#define SOURCEBUFFERATTRIBUTES(x) (0x14 + FRAC_OFFSET * (x))
+#define SOURCEBUFFERDIMENSION(x) (0x18 + FRAC_OFFSET * (x))
+#define COLORCOMPONENTBITS(x) (0x1c + FRAC_OFFSET * (x))
+#define COLORCOMPONENTSHIFT(x) (0x20 + FRAC_OFFSET * (x))
+#define LAYEROFFSET(x) (0x24 + FRAC_OFFSET * (x))
+#define CLIPWINDOWOFFSET(x) (0x28 + FRAC_OFFSET * (x))
+#define CLIPWINDOWDIMENSIONS(x) (0x2c + FRAC_OFFSET * (x))
+#define CONSTANTCOLOR(x) (0x30 + FRAC_OFFSET * (x))
+#define LAYERPROPERTY(x) (0x34 + FRAC_OFFSET * (x))
+#define FRAMEDIMENSIONS 0x150
+#define CONTROL 0x170
+
+struct dc_fw {
+ struct dc_fu fu;
+};
+
+static const struct dc_subdev_info dc_fw_info[] = {
+ { .reg_start = 0x56180a60, .id = 2, },
+};
+
+static const struct regmap_range dc_fw_pec_regmap_access_ranges[] = {
+ regmap_reg_range(PIXENGCFG_DYNAMIC, PIXENGCFG_DYNAMIC),
+};
+
+static const struct regmap_access_table dc_fw_pec_regmap_access_table = {
+ .yes_ranges = dc_fw_pec_regmap_access_ranges,
+ .n_yes_ranges = ARRAY_SIZE(dc_fw_pec_regmap_access_ranges),
+};
+
+static const struct regmap_config dc_fw_pec_regmap_config = {
+ .name = "pec",
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .fast_io = true,
+ .wr_table = &dc_fw_pec_regmap_access_table,
+ .rd_table = &dc_fw_pec_regmap_access_table,
+ .max_register = PIXENGCFG_DYNAMIC,
+};
+
+static const struct regmap_range dc_fw_regmap_ranges[] = {
+ regmap_reg_range(STATICCONTROL, FRAMEDIMENSIONS),
+ regmap_reg_range(CONTROL, CONTROL),
+};
+
+static const struct regmap_access_table dc_fw_regmap_access_table = {
+ .yes_ranges = dc_fw_regmap_ranges,
+ .n_yes_ranges = ARRAY_SIZE(dc_fw_regmap_ranges),
+};
+
+static const struct regmap_config dc_fw_cfg_regmap_config = {
+ .name = "cfg",
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .fast_io = true,
+ .wr_table = &dc_fw_regmap_access_table,
+ .rd_table = &dc_fw_regmap_access_table,
+ .max_register = CONTROL,
+};
+
+static void dc_fw_set_fmt(struct dc_fu *fu, enum dc_fu_frac frac,
+ const struct drm_format_info *format)
+{
+ u32 bits = 0, shifts = 0;
+
+ dc_fu_set_src_bpp(fu, frac, format->cpp[0] * 8);
+
+ regmap_write_bits(fu->reg_cfg, CONTROL, INPUTSELECT_MASK,
+ INPUTSELECT(INPUTSELECT_INACTIVE));
+ regmap_write_bits(fu->reg_cfg, CONTROL, RASTERMODE_MASK,
+ RASTERMODE(RASTERMODE_NORMAL));
+
+ regmap_write_bits(fu->reg_cfg, LAYERPROPERTY(frac),
+ YUVCONVERSIONMODE_MASK,
+ YUVCONVERSIONMODE(YUVCONVERSIONMODE_OFF));
+
+ dc_fu_get_pixel_format_bits(fu, format->format, &bits);
+ dc_fu_get_pixel_format_shifts(fu, format->format, &shifts);
+
+ regmap_write(fu->reg_cfg, COLORCOMPONENTBITS(frac), bits);
+ regmap_write(fu->reg_cfg, COLORCOMPONENTSHIFT(frac), shifts);
+}
+
+static void dc_fw_set_framedimensions(struct dc_fu *fu, int w, int h)
+{
+ regmap_write(fu->reg_cfg, FRAMEDIMENSIONS,
+ FRAMEWIDTH(w) | FRAMEHEIGHT(h));
+}
+
+static void dc_fw_init(struct dc_fu *fu)
+{
+ regmap_write(fu->reg_pec, PIXENGCFG_DYNAMIC, LINK_ID_NONE);
+ dc_fu_common_hw_init(fu);
+ dc_fu_shdldreq_sticky(fu, 0xff);
+}
+
+static void dc_fw_set_ops(struct dc_fu *fu)
+{
+ memcpy(&fu->ops, &dc_fu_common_ops, sizeof(dc_fu_common_ops));
+ fu->ops.init = dc_fw_init;
+ fu->ops.set_fmt = dc_fw_set_fmt;
+ fu->ops.set_framedimensions = dc_fw_set_framedimensions;
+}
+
+static int dc_fw_bind(struct device *dev, struct device *master, void *data)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct dc_drm_device *dc_drm = data;
+ struct resource *res_pec;
+ void __iomem *base_pec;
+ void __iomem *base_cfg;
+ struct dc_fw *fw;
+ struct dc_fu *fu;
+ int i, id;
+
+ fw = devm_kzalloc(dev, sizeof(*fw), GFP_KERNEL);
+ if (!fw)
+ return -ENOMEM;
+
+ fu = &fw->fu;
+
+ base_pec = devm_platform_get_and_ioremap_resource(pdev, 0, &res_pec);
+ if (IS_ERR(base_pec))
+ return PTR_ERR(base_pec);
+
+ base_cfg = devm_platform_ioremap_resource_byname(pdev, "cfg");
+ if (IS_ERR(base_cfg))
+ return PTR_ERR(base_cfg);
+
+ fu->reg_pec = devm_regmap_init_mmio(dev, base_pec,
+ &dc_fw_pec_regmap_config);
+ if (IS_ERR(fu->reg_pec))
+ return PTR_ERR(fu->reg_pec);
+
+ fu->reg_cfg = devm_regmap_init_mmio(dev, base_cfg,
+ &dc_fw_cfg_regmap_config);
+ if (IS_ERR(fu->reg_cfg))
+ return PTR_ERR(fu->reg_cfg);
+
+ id = dc_subdev_get_id(dc_fw_info, ARRAY_SIZE(dc_fw_info), res_pec);
+ if (id < 0) {
+ dev_err(dev, "failed to get instance number: %d\n", id);
+ return id;
+ }
+
+ fu->link_id = LINK_ID_FETCHWARP2;
+ fu->id = DC_FETCHUNIT_FW2;
+ for (i = 0; i < DC_FETCHUNIT_FRAC_NUM; i++) {
+ fu->reg_baseaddr[i] = BASEADDRESS(i);
+ fu->reg_sourcebufferattributes[i] = SOURCEBUFFERATTRIBUTES(i);
+ fu->reg_sourcebufferdimension[i] = SOURCEBUFFERDIMENSION(i);
+ fu->reg_layeroffset[i] = LAYEROFFSET(i);
+ fu->reg_clipwindowoffset[i] = CLIPWINDOWOFFSET(i);
+ fu->reg_clipwindowdimensions[i] = CLIPWINDOWDIMENSIONS(i);
+ fu->reg_constantcolor[i] = CONSTANTCOLOR(i);
+ fu->reg_layerproperty[i] = LAYERPROPERTY(i);
+ }
+ snprintf(fu->name, sizeof(fu->name), "FetchWarp%d", id);
+
+ dc_fw_set_ops(fu);
+
+ dc_drm->fu_disp[fu->id] = fu;
+
+ return 0;
+}
+
+static const struct component_ops dc_fw_ops = {
+ .bind = dc_fw_bind,
+};
+
+static int dc_fw_probe(struct platform_device *pdev)
+{
+ int ret;
+
+ ret = component_add(&pdev->dev, &dc_fw_ops);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "failed to add component\n");
+
+ return 0;
+}
+
+static void dc_fw_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &dc_fw_ops);
+}
+
+static const struct of_device_id dc_fw_dt_ids[] = {
+ { .compatible = "fsl,imx8qxp-dc-fetchwarp" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, dc_fw_dt_ids);
+
+struct platform_driver dc_fw_driver = {
+ .probe = dc_fw_probe,
+ .remove = dc_fw_remove,
+ .driver = {
+ .name = "imx8-dc-fetchwarp",
+ .suppress_bind_attrs = true,
+ .of_match_table = dc_fw_dt_ids,
+ },
+};
diff --git a/drivers/gpu/drm/imx/dc/dc-ic.c b/drivers/gpu/drm/imx/dc/dc-ic.c
new file mode 100644
index 000000000000..a270ae4030cd
--- /dev/null
+++ b/drivers/gpu/drm/imx/dc/dc-ic.c
@@ -0,0 +1,282 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2024 NXP
+ */
+
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+
+#define USERINTERRUPTMASK(n) (0x8 + 4 * (n))
+#define INTERRUPTENABLE(n) (0x10 + 4 * (n))
+#define INTERRUPTPRESET(n) (0x18 + 4 * (n))
+#define INTERRUPTCLEAR(n) (0x20 + 4 * (n))
+#define INTERRUPTSTATUS(n) (0x28 + 4 * (n))
+#define USERINTERRUPTENABLE(n) (0x40 + 4 * (n))
+#define USERINTERRUPTPRESET(n) (0x48 + 4 * (n))
+#define USERINTERRUPTCLEAR(n) (0x50 + 4 * (n))
+#define USERINTERRUPTSTATUS(n) (0x58 + 4 * (n))
+
+#define IRQ_COUNT 49
+#define IRQ_RESERVED 35
+#define REG_NUM 2
+
+struct dc_ic_data {
+ struct regmap *regs;
+ struct clk *clk_axi;
+ int irq[IRQ_COUNT];
+ struct irq_domain *domain;
+};
+
+struct dc_ic_entry {
+ struct dc_ic_data *data;
+ int irq;
+};
+
+static const struct regmap_range dc_ic_regmap_write_ranges[] = {
+ regmap_reg_range(USERINTERRUPTMASK(0), INTERRUPTCLEAR(1)),
+ regmap_reg_range(USERINTERRUPTENABLE(0), USERINTERRUPTCLEAR(1)),
+};
+
+static const struct regmap_access_table dc_ic_regmap_write_table = {
+ .yes_ranges = dc_ic_regmap_write_ranges,
+ .n_yes_ranges = ARRAY_SIZE(dc_ic_regmap_write_ranges),
+};
+
+static const struct regmap_range dc_ic_regmap_read_ranges[] = {
+ regmap_reg_range(USERINTERRUPTMASK(0), INTERRUPTENABLE(1)),
+ regmap_reg_range(INTERRUPTSTATUS(0), INTERRUPTSTATUS(1)),
+ regmap_reg_range(USERINTERRUPTENABLE(0), USERINTERRUPTENABLE(1)),
+ regmap_reg_range(USERINTERRUPTSTATUS(0), USERINTERRUPTSTATUS(1)),
+};
+
+static const struct regmap_access_table dc_ic_regmap_read_table = {
+ .yes_ranges = dc_ic_regmap_read_ranges,
+ .n_yes_ranges = ARRAY_SIZE(dc_ic_regmap_read_ranges),
+};
+
+static const struct regmap_range dc_ic_regmap_volatile_ranges[] = {
+ regmap_reg_range(INTERRUPTPRESET(0), INTERRUPTCLEAR(1)),
+ regmap_reg_range(USERINTERRUPTPRESET(0), USERINTERRUPTCLEAR(1)),
+};
+
+static const struct regmap_access_table dc_ic_regmap_volatile_table = {
+ .yes_ranges = dc_ic_regmap_volatile_ranges,
+ .n_yes_ranges = ARRAY_SIZE(dc_ic_regmap_volatile_ranges),
+};
+
+static const struct regmap_config dc_ic_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .fast_io = true,
+ .wr_table = &dc_ic_regmap_write_table,
+ .rd_table = &dc_ic_regmap_read_table,
+ .volatile_table = &dc_ic_regmap_volatile_table,
+ .max_register = USERINTERRUPTSTATUS(1),
+};
+
+static void dc_ic_irq_handler(struct irq_desc *desc)
+{
+ struct dc_ic_entry *entry = irq_desc_get_handler_data(desc);
+ struct dc_ic_data *data = entry->data;
+ unsigned int status, enable;
+ unsigned int virq;
+
+ chained_irq_enter(irq_desc_get_chip(desc), desc);
+
+ regmap_read(data->regs, USERINTERRUPTSTATUS(entry->irq / 32), &status);
+ regmap_read(data->regs, USERINTERRUPTENABLE(entry->irq / 32), &enable);
+
+ status &= enable;
+
+ if (status & BIT(entry->irq % 32)) {
+ virq = irq_find_mapping(data->domain, entry->irq);
+ if (virq)
+ generic_handle_irq(virq);
+ }
+
+ chained_irq_exit(irq_desc_get_chip(desc), desc);
+}
+
+static const unsigned long unused_irq[REG_NUM] = {0x00000000, 0xfffe0008};
+
+static int dc_ic_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct irq_chip_generic *gc;
+ struct dc_ic_entry *entry;
+ struct irq_chip_type *ct;
+ struct dc_ic_data *data;
+ void __iomem *base;
+ int i, ret;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ entry = devm_kcalloc(dev, IRQ_COUNT, sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return -ENOMEM;
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base)) {
+ dev_err(dev, "failed to initialize reg\n");
+ return PTR_ERR(base);
+ }
+
+ data->regs = devm_regmap_init_mmio(dev, base, &dc_ic_regmap_config);
+ if (IS_ERR(data->regs))
+ return PTR_ERR(data->regs);
+
+ data->clk_axi = devm_clk_get(dev, NULL);
+ if (IS_ERR(data->clk_axi))
+ return dev_err_probe(dev, PTR_ERR(data->clk_axi),
+ "failed to get AXI clock\n");
+
+ for (i = 0; i < IRQ_COUNT; i++) {
+ /* skip the reserved IRQ */
+ if (i == IRQ_RESERVED)
+ continue;
+
+ ret = platform_get_irq(pdev, i);
+ if (ret < 0)
+ return ret;
+ }
+
+ dev_set_drvdata(dev, data);
+
+ ret = devm_pm_runtime_enable(dev);
+ if (ret)
+ return ret;
+
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret < 0) {
+ dev_err(dev, "failed to get runtime PM sync: %d\n", ret);
+ return ret;
+ }
+
+ for (i = 0; i < REG_NUM; i++) {
+ /* mask and clear all interrupts */
+ regmap_write(data->regs, USERINTERRUPTENABLE(i), 0x0);
+ regmap_write(data->regs, INTERRUPTENABLE(i), 0x0);
+ regmap_write(data->regs, USERINTERRUPTCLEAR(i), 0xffffffff);
+ regmap_write(data->regs, INTERRUPTCLEAR(i), 0xffffffff);
+
+ /* set all interrupts to user mode */
+ regmap_write(data->regs, USERINTERRUPTMASK(i), 0xffffffff);
+ }
+
+ data->domain = irq_domain_add_linear(dev->of_node, IRQ_COUNT,
+ &irq_generic_chip_ops, data);
+ if (!data->domain) {
+ dev_err(dev, "failed to create IRQ domain\n");
+ pm_runtime_put(dev);
+ return -ENOMEM;
+ }
+ irq_domain_set_pm_device(data->domain, dev);
+
+ ret = irq_alloc_domain_generic_chips(data->domain, 32, 1, "DC",
+ handle_level_irq, 0, 0, 0);
+ if (ret) {
+ dev_err(dev, "failed to alloc generic IRQ chips: %d\n", ret);
+ irq_domain_remove(data->domain);
+ pm_runtime_put(dev);
+ return ret;
+ }
+
+ for (i = 0; i < IRQ_COUNT; i += 32) {
+ gc = irq_get_domain_generic_chip(data->domain, i);
+ gc->reg_base = base;
+ gc->unused = unused_irq[i / 32];
+ ct = gc->chip_types;
+ ct->chip.irq_ack = irq_gc_ack_set_bit;
+ ct->chip.irq_mask = irq_gc_mask_clr_bit;
+ ct->chip.irq_unmask = irq_gc_mask_set_bit;
+ ct->regs.ack = USERINTERRUPTCLEAR(i / 32);
+ ct->regs.mask = USERINTERRUPTENABLE(i / 32);
+ }
+
+ for (i = 0; i < IRQ_COUNT; i++) {
+ /* skip the reserved IRQ */
+ if (i == IRQ_RESERVED)
+ continue;
+
+ data->irq[i] = irq_of_parse_and_map(dev->of_node, i);
+
+ entry[i].data = data;
+ entry[i].irq = i;
+
+ irq_set_chained_handler_and_data(data->irq[i],
+ dc_ic_irq_handler, &entry[i]);
+ }
+
+ return 0;
+}
+
+static void dc_ic_remove(struct platform_device *pdev)
+{
+ struct dc_ic_data *data = dev_get_drvdata(&pdev->dev);
+ int i;
+
+ for (i = 0; i < IRQ_COUNT; i++) {
+ if (i == IRQ_RESERVED)
+ continue;
+
+ irq_set_chained_handler_and_data(data->irq[i], NULL, NULL);
+ }
+
+ irq_domain_remove(data->domain);
+
+ pm_runtime_put_sync(&pdev->dev);
+}
+
+static int dc_ic_runtime_suspend(struct device *dev)
+{
+ struct dc_ic_data *data = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(data->clk_axi);
+
+ return 0;
+}
+
+static int dc_ic_runtime_resume(struct device *dev)
+{
+ struct dc_ic_data *data = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_prepare_enable(data->clk_axi);
+ if (ret)
+ dev_err(dev, "failed to enable AXI clock: %d\n", ret);
+
+ return ret;
+}
+
+static const struct dev_pm_ops dc_ic_pm_ops = {
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+ RUNTIME_PM_OPS(dc_ic_runtime_suspend, dc_ic_runtime_resume, NULL)
+};
+
+static const struct of_device_id dc_ic_dt_ids[] = {
+ { .compatible = "fsl,imx8qxp-dc-intc", },
+ { /* sentinel */ }
+};
+
+struct platform_driver dc_ic_driver = {
+ .probe = dc_ic_probe,
+ .remove = dc_ic_remove,
+ .driver = {
+ .name = "imx8-dc-intc",
+ .suppress_bind_attrs = true,
+ .of_match_table = dc_ic_dt_ids,
+ .pm = pm_sleep_ptr(&dc_ic_pm_ops),
+ },
+};
diff --git a/drivers/gpu/drm/imx/dc/dc-kms.c b/drivers/gpu/drm/imx/dc/dc-kms.c
new file mode 100644
index 000000000000..2b18aa37a4a8
--- /dev/null
+++ b/drivers/gpu/drm/imx/dc/dc-kms.c
@@ -0,0 +1,143 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2024 NXP
+ */
+
+#include <linux/of.h>
+#include <linux/of_graph.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_bridge_connector.h>
+#include <drm/drm_connector.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_device.h>
+#include <drm/drm_encoder.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_mode_config.h>
+#include <drm/drm_print.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
+#include <drm/drm_vblank.h>
+
+#include "dc-de.h"
+#include "dc-drv.h"
+#include "dc-kms.h"
+
+static const struct drm_mode_config_funcs dc_drm_mode_config_funcs = {
+ .fb_create = drm_gem_fb_create,
+ .atomic_check = drm_atomic_helper_check,
+ .atomic_commit = drm_atomic_helper_commit,
+};
+
+static int dc_kms_init_encoder_per_crtc(struct dc_drm_device *dc_drm,
+ int crtc_index)
+{
+ struct dc_crtc *dc_crtc = &dc_drm->dc_crtc[crtc_index];
+ struct drm_device *drm = &dc_drm->base;
+ struct drm_crtc *crtc = &dc_crtc->base;
+ struct drm_connector *connector;
+ struct device *dev = drm->dev;
+ struct drm_encoder *encoder;
+ struct drm_bridge *bridge;
+ int ret;
+
+ bridge = devm_drm_of_get_bridge(dev, dc_crtc->de->tc->dev->of_node,
+ 0, 0);
+ if (IS_ERR(bridge)) {
+ ret = PTR_ERR(bridge);
+ if (ret == -ENODEV)
+ return 0;
+
+ return dev_err_probe(dev, ret,
+ "failed to find bridge for CRTC%u\n",
+ crtc->index);
+ }
+
+ encoder = &dc_drm->encoder[crtc_index];
+ ret = drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_NONE);
+ if (ret) {
+ dev_err(dev, "failed to initialize encoder for CRTC%u: %d\n",
+ crtc->index, ret);
+ return ret;
+ }
+
+ encoder->possible_crtcs = drm_crtc_mask(crtc);
+
+ ret = drm_bridge_attach(encoder, bridge, NULL,
+ DRM_BRIDGE_ATTACH_NO_CONNECTOR);
+ if (ret) {
+ dev_err(dev,
+ "failed to attach bridge to encoder for CRTC%u: %d\n",
+ crtc->index, ret);
+ return ret;
+ }
+
+ connector = drm_bridge_connector_init(drm, encoder);
+ if (IS_ERR(connector)) {
+ ret = PTR_ERR(connector);
+ dev_err(dev, "failed to init bridge connector for CRTC%u: %d\n",
+ crtc->index, ret);
+ return ret;
+ }
+
+ ret = drm_connector_attach_encoder(connector, encoder);
+ if (ret)
+ dev_err(dev,
+ "failed to attach encoder to connector for CRTC%u: %d\n",
+ crtc->index, ret);
+
+ return ret;
+}
+
+int dc_kms_init(struct dc_drm_device *dc_drm)
+{
+ struct drm_device *drm = &dc_drm->base;
+ int ret, i;
+
+ ret = drmm_mode_config_init(drm);
+ if (ret)
+ return ret;
+
+ drm->mode_config.min_width = 60;
+ drm->mode_config.min_height = 60;
+ drm->mode_config.max_width = 8192;
+ drm->mode_config.max_height = 8192;
+ drm->mode_config.funcs = &dc_drm_mode_config_funcs;
+
+ drm->vblank_disable_immediate = true;
+ drm->max_vblank_count = DC_FRAMEGEN_MAX_FRAME_INDEX;
+
+ for (i = 0; i < DC_DISPLAYS; i++) {
+ ret = dc_crtc_init(dc_drm, i);
+ if (ret)
+ return ret;
+
+ ret = dc_kms_init_encoder_per_crtc(dc_drm, i);
+ if (ret)
+ return ret;
+ }
+
+ for (i = 0; i < DC_DISPLAYS; i++) {
+ ret = dc_crtc_post_init(dc_drm, i);
+ if (ret)
+ return ret;
+ }
+
+ ret = drm_vblank_init(drm, DC_DISPLAYS);
+ if (ret) {
+ dev_err(drm->dev, "failed to init vblank support: %d\n", ret);
+ return ret;
+ }
+
+ drm_mode_config_reset(drm);
+
+ drm_kms_helper_poll_init(drm);
+
+ return 0;
+}
+
+void dc_kms_uninit(struct dc_drm_device *dc_drm)
+{
+ drm_kms_helper_poll_fini(&dc_drm->base);
+}
diff --git a/drivers/gpu/drm/imx/dc/dc-kms.h b/drivers/gpu/drm/imx/dc/dc-kms.h
new file mode 100644
index 000000000000..cd7860eff986
--- /dev/null
+++ b/drivers/gpu/drm/imx/dc/dc-kms.h
@@ -0,0 +1,131 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright 2024 NXP
+ */
+
+#ifndef __DC_KMS_H__
+#define __DC_KMS_H__
+
+#include <linux/completion.h>
+
+#include <drm/drm_crtc.h>
+#include <drm/drm_plane.h>
+#include <drm/drm_vblank.h>
+
+#include "dc-de.h"
+#include "dc-fu.h"
+#include "dc-pe.h"
+
+#define DC_CRTC_IRQS 5
+
+struct dc_crtc_irq {
+ struct dc_crtc *dc_crtc;
+ unsigned int irq;
+};
+
+/**
+ * struct dc_crtc - DC specific drm_crtc
+ *
+ * Each display controller contains one content stream and one safety stream.
+ * In general, the two streams have the same functionality. One stream is
+ * overlaid on the other by @fg. This driver chooses to generate black constant
+ * color from the content stream as background color, build plane(s) on the
+ * content stream by using layerblend(s) and always generate a constant color
+ * from the safety stream. Note that due to the decoupled timing, the safety
+ * stream still works to show the constant color properly even when the content
+ * stream has completely hung up due to mal-function of this driver.
+ */
+struct dc_crtc {
+ /** @base: base drm_crtc structure */
+ struct drm_crtc base;
+ /** @de: display engine */
+ struct dc_de *de;
+ /** @cf_cont: content stream constframe */
+ struct dc_cf *cf_cont;
+ /** @cf_safe: safety stream constframe */
+ struct dc_cf *cf_safe;
+ /** @ed_cont: content stream extdst */
+ struct dc_ed *ed_cont;
+ /** @ed_safe: safety stream extdst */
+ struct dc_ed *ed_safe;
+ /** @fg: framegen */
+ struct dc_fg *fg;
+ /**
+ * @irq_dec_framecomplete:
+ *
+ * display engine configuration frame complete interrupt
+ */
+ unsigned int irq_dec_framecomplete;
+ /**
+ * @irq_dec_seqcomplete:
+ *
+ * display engine configuration sequence complete interrupt
+ */
+ unsigned int irq_dec_seqcomplete;
+ /**
+ * @irq_dec_shdload:
+ *
+ * display engine configuration shadow load interrupt
+ */
+ unsigned int irq_dec_shdload;
+ /**
+ * @irq_ed_cont_shdload:
+ *
+ * content stream extdst shadow load interrupt
+ */
+ unsigned int irq_ed_cont_shdload;
+ /**
+ * @irq_ed_safe_shdload:
+ *
+ * safety stream extdst shadow load interrupt
+ */
+ unsigned int irq_ed_safe_shdload;
+ /**
+ * @dec_seqcomplete_done:
+ *
+ * display engine configuration sequence completion
+ */
+ struct completion dec_seqcomplete_done;
+ /**
+ * @dec_shdload_done:
+ *
+ * display engine configuration shadow load completion
+ */
+ struct completion dec_shdload_done;
+ /**
+ * @ed_cont_shdload_done:
+ *
+ * content stream extdst shadow load completion
+ */
+ struct completion ed_cont_shdload_done;
+ /**
+ * @ed_safe_shdload_done:
+ *
+ * safety stream extdst shadow load completion
+ */
+ struct completion ed_safe_shdload_done;
+ /** @event: cached pending vblank event */
+ struct drm_pending_vblank_event *event;
+ /** @irqs: interrupt list */
+ struct dc_crtc_irq irqs[DC_CRTC_IRQS];
+};
+
+/**
+ * struct dc_plane - DC specific drm_plane
+ *
+ * Build a plane on content stream with a fetchunit and a layerblend.
+ */
+struct dc_plane {
+ /** @base: base drm_plane structure */
+ struct drm_plane base;
+ /** @fu: fetchunit */
+ struct dc_fu *fu;
+ /** @cf: content stream constframe */
+ struct dc_cf *cf;
+ /** @lb: layerblend */
+ struct dc_lb *lb;
+ /** @ed: content stream extdst */
+ struct dc_ed *ed;
+};
+
+#endif /* __DC_KMS_H__ */
diff --git a/drivers/gpu/drm/imx/dc/dc-lb.c b/drivers/gpu/drm/imx/dc/dc-lb.c
new file mode 100644
index 000000000000..ca1d714c8d6e
--- /dev/null
+++ b/drivers/gpu/drm/imx/dc/dc-lb.c
@@ -0,0 +1,325 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2024 NXP
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/component.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <drm/drm_blend.h>
+
+#include "dc-drv.h"
+#include "dc-pe.h"
+
+#define PIXENGCFG_DYNAMIC 0x8
+#define PIXENGCFG_DYNAMIC_SEC_SEL_MASK GENMASK(13, 8)
+#define PIXENGCFG_DYNAMIC_SEC_SEL(x) \
+ FIELD_PREP(PIXENGCFG_DYNAMIC_SEC_SEL_MASK, (x))
+#define PIXENGCFG_DYNAMIC_PRIM_SEL_MASK GENMASK(5, 0)
+#define PIXENGCFG_DYNAMIC_PRIM_SEL(x) \
+ FIELD_PREP(PIXENGCFG_DYNAMIC_PRIM_SEL_MASK, (x))
+
+#define STATICCONTROL 0x8
+#define SHDTOKSEL_MASK GENMASK(4, 3)
+#define SHDTOKSEL(x) FIELD_PREP(SHDTOKSEL_MASK, (x))
+#define SHDLDSEL_MASK GENMASK(2, 1)
+#define SHDLDSEL(x) FIELD_PREP(SHDLDSEL_MASK, (x))
+
+#define CONTROL 0xc
+#define CTRL_MODE_MASK BIT(0)
+#define CTRL_MODE(x) FIELD_PREP(CTRL_MODE_MASK, (x))
+
+#define BLENDCONTROL 0x10
+#define ALPHA_MASK GENMASK(23, 16)
+#define ALPHA(x) FIELD_PREP(ALPHA_MASK, (x))
+#define SEC_A_BLD_FUNC_MASK GENMASK(14, 12)
+#define SEC_A_BLD_FUNC(x) \
+ FIELD_PREP(SEC_A_BLD_FUNC_MASK, (x))
+#define PRIM_A_BLD_FUNC_MASK GENMASK(10, 8)
+#define PRIM_A_BLD_FUNC(x) \
+ FIELD_PREP(PRIM_A_BLD_FUNC_MASK, (x))
+#define SEC_C_BLD_FUNC_MASK GENMASK(6, 4)
+#define SEC_C_BLD_FUNC(x) \
+ FIELD_PREP(SEC_C_BLD_FUNC_MASK, (x))
+#define PRIM_C_BLD_FUNC_MASK GENMASK(2, 0)
+#define PRIM_C_BLD_FUNC(x) \
+ FIELD_PREP(PRIM_C_BLD_FUNC_MASK, (x))
+
+#define POSITION 0x14
+#define YPOS_MASK GENMASK(31, 16)
+#define YPOS(x) FIELD_PREP(YPOS_MASK, (x))
+#define XPOS_MASK GENMASK(15, 0)
+#define XPOS(x) FIELD_PREP(XPOS_MASK, (x))
+
+enum dc_lb_blend_func {
+ DC_LAYERBLEND_BLEND_ZERO,
+ DC_LAYERBLEND_BLEND_ONE,
+ DC_LAYERBLEND_BLEND_PRIM_ALPHA,
+ DC_LAYERBLEND_BLEND_ONE_MINUS_PRIM_ALPHA,
+ DC_LAYERBLEND_BLEND_SEC_ALPHA,
+ DC_LAYERBLEND_BLEND_ONE_MINUS_SEC_ALPHA,
+ DC_LAYERBLEND_BLEND_CONST_ALPHA,
+ DC_LAYERBLEND_BLEND_ONE_MINUS_CONST_ALPHA,
+};
+
+enum dc_lb_shadow_sel {
+ BOTH = 0x2,
+};
+
+static const struct dc_subdev_info dc_lb_info[] = {
+ { .reg_start = 0x56180ba0, .id = 0, },
+ { .reg_start = 0x56180bc0, .id = 1, },
+ { .reg_start = 0x56180be0, .id = 2, },
+ { .reg_start = 0x56180c00, .id = 3, },
+};
+
+static const struct regmap_range dc_lb_pec_regmap_access_ranges[] = {
+ regmap_reg_range(PIXENGCFG_DYNAMIC, PIXENGCFG_DYNAMIC),
+};
+
+static const struct regmap_access_table dc_lb_pec_regmap_access_table = {
+ .yes_ranges = dc_lb_pec_regmap_access_ranges,
+ .n_yes_ranges = ARRAY_SIZE(dc_lb_pec_regmap_access_ranges),
+};
+
+static const struct regmap_config dc_lb_pec_regmap_config = {
+ .name = "pec",
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .fast_io = true,
+ .wr_table = &dc_lb_pec_regmap_access_table,
+ .rd_table = &dc_lb_pec_regmap_access_table,
+ .max_register = PIXENGCFG_DYNAMIC,
+};
+
+static const struct regmap_range dc_lb_regmap_ranges[] = {
+ regmap_reg_range(STATICCONTROL, POSITION),
+};
+
+static const struct regmap_access_table dc_lb_regmap_access_table = {
+ .yes_ranges = dc_lb_regmap_ranges,
+ .n_yes_ranges = ARRAY_SIZE(dc_lb_regmap_ranges),
+};
+
+static const struct regmap_config dc_lb_cfg_regmap_config = {
+ .name = "cfg",
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .fast_io = true,
+ .wr_table = &dc_lb_regmap_access_table,
+ .rd_table = &dc_lb_regmap_access_table,
+ .max_register = POSITION,
+};
+
+static const enum dc_link_id prim_sels[] = {
+ /* common options */
+ LINK_ID_NONE,
+ LINK_ID_CONSTFRAME0,
+ LINK_ID_CONSTFRAME1,
+ LINK_ID_CONSTFRAME4,
+ LINK_ID_CONSTFRAME5,
+ /*
+ * special options:
+ * layerblend(n) has n special options,
+ * from layerblend0 to layerblend(n - 1), e.g.,
+ * layerblend3 has 3 special options -
+ * layerblend0/1/2.
+ */
+ LINK_ID_LAYERBLEND0,
+ LINK_ID_LAYERBLEND1,
+ LINK_ID_LAYERBLEND2,
+ LINK_ID_LAYERBLEND3,
+};
+
+static const enum dc_link_id sec_sels[] = {
+ LINK_ID_NONE,
+ LINK_ID_FETCHWARP2,
+ LINK_ID_FETCHLAYER0,
+};
+
+enum dc_link_id dc_lb_get_link_id(struct dc_lb *lb)
+{
+ return lb->link;
+}
+
+void dc_lb_pec_dynamic_prim_sel(struct dc_lb *lb, enum dc_link_id prim)
+{
+ int fixed_sels_num = ARRAY_SIZE(prim_sels) - 4;
+ int i;
+
+ for (i = 0; i < fixed_sels_num + lb->id; i++) {
+ if (prim_sels[i] == prim) {
+ regmap_write_bits(lb->reg_pec, PIXENGCFG_DYNAMIC,
+ PIXENGCFG_DYNAMIC_PRIM_SEL_MASK,
+ PIXENGCFG_DYNAMIC_PRIM_SEL(prim));
+ return;
+ }
+ }
+
+ dev_warn(lb->dev, "invalid primary input selection:%d\n", prim);
+}
+
+void dc_lb_pec_dynamic_sec_sel(struct dc_lb *lb, enum dc_link_id sec)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(sec_sels); i++) {
+ if (sec_sels[i] == sec) {
+ regmap_write_bits(lb->reg_pec, PIXENGCFG_DYNAMIC,
+ PIXENGCFG_DYNAMIC_SEC_SEL_MASK,
+ PIXENGCFG_DYNAMIC_SEC_SEL(sec));
+ return;
+ }
+ }
+
+ dev_warn(lb->dev, "invalid secondary input selection:%d\n", sec);
+}
+
+void dc_lb_pec_clken(struct dc_lb *lb, enum dc_pec_clken clken)
+{
+ regmap_write_bits(lb->reg_pec, PIXENGCFG_DYNAMIC, CLKEN_MASK,
+ CLKEN(clken));
+}
+
+static inline void dc_lb_enable_shden(struct dc_lb *lb)
+{
+ regmap_write_bits(lb->reg_cfg, STATICCONTROL, SHDEN, SHDEN);
+}
+
+static inline void dc_lb_shdtoksel(struct dc_lb *lb, enum dc_lb_shadow_sel sel)
+{
+ regmap_write_bits(lb->reg_cfg, STATICCONTROL, SHDTOKSEL_MASK,
+ SHDTOKSEL(sel));
+}
+
+static inline void dc_lb_shdldsel(struct dc_lb *lb, enum dc_lb_shadow_sel sel)
+{
+ regmap_write_bits(lb->reg_cfg, STATICCONTROL, SHDLDSEL_MASK,
+ SHDLDSEL(sel));
+}
+
+void dc_lb_mode(struct dc_lb *lb, enum dc_lb_mode mode)
+{
+ regmap_write_bits(lb->reg_cfg, CONTROL, CTRL_MODE_MASK, mode);
+}
+
+static inline void dc_lb_blendcontrol(struct dc_lb *lb)
+{
+ u32 val = PRIM_A_BLD_FUNC(DC_LAYERBLEND_BLEND_ZERO) |
+ SEC_A_BLD_FUNC(DC_LAYERBLEND_BLEND_ZERO) |
+ PRIM_C_BLD_FUNC(DC_LAYERBLEND_BLEND_ZERO) |
+ SEC_C_BLD_FUNC(DC_LAYERBLEND_BLEND_CONST_ALPHA) |
+ ALPHA(DRM_BLEND_ALPHA_OPAQUE >> 8);
+
+ regmap_write(lb->reg_cfg, BLENDCONTROL, val);
+}
+
+void dc_lb_position(struct dc_lb *lb, int x, int y)
+{
+ regmap_write(lb->reg_cfg, POSITION, XPOS(x) | YPOS(y));
+}
+
+int dc_lb_get_id(struct dc_lb *lb)
+{
+ return lb->id;
+}
+
+void dc_lb_init(struct dc_lb *lb)
+{
+ dc_lb_pec_dynamic_prim_sel(lb, LINK_ID_NONE);
+ dc_lb_pec_dynamic_sec_sel(lb, LINK_ID_NONE);
+ dc_lb_pec_clken(lb, CLKEN_DISABLE);
+ dc_lb_shdldsel(lb, BOTH);
+ dc_lb_shdtoksel(lb, BOTH);
+ dc_lb_blendcontrol(lb);
+ dc_lb_enable_shden(lb);
+}
+
+static int dc_lb_bind(struct device *dev, struct device *master, void *data)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct dc_drm_device *dc_drm = data;
+ struct resource *res_pec;
+ void __iomem *base_pec;
+ void __iomem *base_cfg;
+ struct dc_lb *lb;
+
+ lb = devm_kzalloc(dev, sizeof(*lb), GFP_KERNEL);
+ if (!lb)
+ return -ENOMEM;
+
+ base_pec = devm_platform_get_and_ioremap_resource(pdev, 0, &res_pec);
+ if (IS_ERR(base_pec))
+ return PTR_ERR(base_pec);
+
+ base_cfg = devm_platform_ioremap_resource_byname(pdev, "cfg");
+ if (IS_ERR(base_cfg))
+ return PTR_ERR(base_cfg);
+
+ lb->reg_pec = devm_regmap_init_mmio(dev, base_pec,
+ &dc_lb_pec_regmap_config);
+ if (IS_ERR(lb->reg_pec))
+ return PTR_ERR(lb->reg_pec);
+
+ lb->reg_cfg = devm_regmap_init_mmio(dev, base_cfg,
+ &dc_lb_cfg_regmap_config);
+ if (IS_ERR(lb->reg_cfg))
+ return PTR_ERR(lb->reg_cfg);
+
+ lb->id = dc_subdev_get_id(dc_lb_info, ARRAY_SIZE(dc_lb_info), res_pec);
+ if (lb->id < 0) {
+ dev_err(dev, "failed to get instance number: %d\n", lb->id);
+ return lb->id;
+ }
+
+ lb->dev = dev;
+ lb->link = LINK_ID_LAYERBLEND0 + lb->id;
+
+ dc_drm->lb[lb->id] = lb;
+
+ return 0;
+}
+
+static const struct component_ops dc_lb_ops = {
+ .bind = dc_lb_bind,
+};
+
+static int dc_lb_probe(struct platform_device *pdev)
+{
+ int ret;
+
+ ret = component_add(&pdev->dev, &dc_lb_ops);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "failed to add component\n");
+
+ return 0;
+}
+
+static void dc_lb_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &dc_lb_ops);
+}
+
+static const struct of_device_id dc_lb_dt_ids[] = {
+ { .compatible = "fsl,imx8qxp-dc-layerblend" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, dc_lb_dt_ids);
+
+struct platform_driver dc_lb_driver = {
+ .probe = dc_lb_probe,
+ .remove = dc_lb_remove,
+ .driver = {
+ .name = "imx8-dc-layerblend",
+ .suppress_bind_attrs = true,
+ .of_match_table = dc_lb_dt_ids,
+ },
+};
diff --git a/drivers/gpu/drm/imx/dc/dc-pe.c b/drivers/gpu/drm/imx/dc/dc-pe.c
new file mode 100644
index 000000000000..6676c22f3f45
--- /dev/null
+++ b/drivers/gpu/drm/imx/dc/dc-pe.c
@@ -0,0 +1,158 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2024 NXP
+ */
+
+#include <linux/clk.h>
+#include <linux/component.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+
+#include "dc-drv.h"
+#include "dc-fu.h"
+#include "dc-pe.h"
+
+static int dc_pe_bind(struct device *dev, struct device *master, void *data)
+{
+ struct dc_drm_device *dc_drm = data;
+ struct dc_pe *pe;
+ int ret;
+
+ pe = devm_kzalloc(dev, sizeof(*pe), GFP_KERNEL);
+ if (!pe)
+ return -ENOMEM;
+
+ pe->clk_axi = devm_clk_get(dev, NULL);
+ if (IS_ERR(pe->clk_axi))
+ return dev_err_probe(dev, PTR_ERR(pe->clk_axi),
+ "failed to get AXI clock\n");
+
+ pe->dev = dev;
+
+ dev_set_drvdata(dev, pe);
+
+ ret = devm_pm_runtime_enable(dev);
+ if (ret)
+ return ret;
+
+ dc_drm->pe = pe;
+
+ return 0;
+}
+
+/*
+ * It's possible to get the child device pointers from the child component
+ * bind callbacks, but it depends on the component helper behavior to bind
+ * the pixel engine component first. To avoid the dependency, post bind to
+ * get the pointers from dc_drm in a safe manner.
+ */
+void dc_pe_post_bind(struct dc_drm_device *dc_drm)
+{
+ struct dc_pe *pe = dc_drm->pe;
+ int i;
+
+ for (i = 0; i < DC_DISPLAYS; i++) {
+ pe->cf_safe[i] = dc_drm->cf_safe[i];
+ pe->cf_cont[i] = dc_drm->cf_cont[i];
+ pe->ed_safe[i] = dc_drm->ed_safe[i];
+ pe->ed_cont[i] = dc_drm->ed_cont[i];
+ }
+
+ for (i = 0; i < DC_DISP_FU_CNT; i++)
+ pe->fu_disp[i] = dc_drm->fu_disp[i];
+
+ for (i = 0; i < DC_LB_CNT; i++)
+ pe->lb[i] = dc_drm->lb[i];
+}
+
+static const struct component_ops dc_pe_ops = {
+ .bind = dc_pe_bind,
+};
+
+static int dc_pe_probe(struct platform_device *pdev)
+{
+ int ret;
+
+ ret = devm_of_platform_populate(&pdev->dev);
+ if (ret < 0)
+ return ret;
+
+ ret = component_add(&pdev->dev, &dc_pe_ops);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "failed to add component\n");
+
+ return 0;
+}
+
+static void dc_pe_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &dc_pe_ops);
+}
+
+static int dc_pe_runtime_suspend(struct device *dev)
+{
+ struct dc_pe *pe = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(pe->clk_axi);
+
+ return 0;
+}
+
+static int dc_pe_runtime_resume(struct device *dev)
+{
+ struct dc_pe *pe = dev_get_drvdata(dev);
+ int i, ret;
+
+ ret = clk_prepare_enable(pe->clk_axi);
+ if (ret) {
+ dev_err(dev, "failed to enable AXI clock: %d\n", ret);
+ return ret;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(pe->cf_safe); i++)
+ dc_cf_init(pe->cf_safe[i]);
+
+ for (i = 0; i < ARRAY_SIZE(pe->cf_cont); i++)
+ dc_cf_init(pe->cf_cont[i]);
+
+ for (i = 0; i < ARRAY_SIZE(pe->ed_safe); i++)
+ dc_ed_init(pe->ed_safe[i]);
+
+ for (i = 0; i < ARRAY_SIZE(pe->ed_cont); i++)
+ dc_ed_init(pe->ed_cont[i]);
+
+ for (i = 0; i < ARRAY_SIZE(pe->fu_disp); i++)
+ pe->fu_disp[i]->ops.init(pe->fu_disp[i]);
+
+ for (i = 0; i < ARRAY_SIZE(pe->lb); i++)
+ dc_lb_init(pe->lb[i]);
+
+ return 0;
+}
+
+static const struct dev_pm_ops dc_pe_pm_ops = {
+ RUNTIME_PM_OPS(dc_pe_runtime_suspend, dc_pe_runtime_resume, NULL)
+};
+
+static const struct of_device_id dc_pe_dt_ids[] = {
+ { .compatible = "fsl,imx8qxp-dc-pixel-engine", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, dc_pe_dt_ids);
+
+struct platform_driver dc_pe_driver = {
+ .probe = dc_pe_probe,
+ .remove = dc_pe_remove,
+ .driver = {
+ .name = "imx8-dc-pixel-engine",
+ .suppress_bind_attrs = true,
+ .of_match_table = dc_pe_dt_ids,
+ .pm = pm_sleep_ptr(&dc_pe_pm_ops),
+ },
+};
diff --git a/drivers/gpu/drm/imx/dc/dc-pe.h b/drivers/gpu/drm/imx/dc/dc-pe.h
new file mode 100644
index 000000000000..f5e01a6eb9e9
--- /dev/null
+++ b/drivers/gpu/drm/imx/dc/dc-pe.h
@@ -0,0 +1,101 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright 2024 NXP
+ */
+
+#ifndef __DC_PIXEL_ENGINE_H__
+#define __DC_PIXEL_ENGINE_H__
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/regmap.h>
+
+#include "dc-de.h"
+
+#define SHDEN BIT(0)
+
+#define CLKEN_MASK_SHIFT 24
+#define CLKEN_MASK (0x3 << CLKEN_MASK_SHIFT)
+#define CLKEN(n) ((n) << CLKEN_MASK_SHIFT)
+
+#define DC_DISP_FU_CNT 2
+#define DC_LB_CNT 4
+
+enum dc_link_id {
+ LINK_ID_NONE = 0x00,
+ LINK_ID_CONSTFRAME0 = 0x0c,
+ LINK_ID_CONSTFRAME4 = 0x0e,
+ LINK_ID_CONSTFRAME1 = 0x10,
+ LINK_ID_CONSTFRAME5 = 0x12,
+ LINK_ID_FETCHWARP2 = 0x14,
+ LINK_ID_FETCHLAYER0 = 0x1a,
+ LINK_ID_LAYERBLEND0 = 0x21,
+ LINK_ID_LAYERBLEND1 = 0x22,
+ LINK_ID_LAYERBLEND2 = 0x23,
+ LINK_ID_LAYERBLEND3 = 0x24,
+};
+
+enum dc_lb_mode {
+ LB_NEUTRAL, /* Output is same as primary input. */
+ LB_BLEND,
+};
+
+enum dc_pec_clken {
+ CLKEN_DISABLE,
+ CLKEN_AUTOMATIC,
+};
+
+struct dc_cf {
+ struct regmap *reg_cfg;
+ enum dc_link_id link;
+};
+
+struct dc_ed {
+ struct device *dev;
+ struct regmap *reg_pec;
+ struct regmap *reg_cfg;
+ int irq_shdload;
+};
+
+struct dc_lb {
+ struct device *dev;
+ struct regmap *reg_pec;
+ struct regmap *reg_cfg;
+ int id;
+ enum dc_link_id link;
+};
+
+struct dc_pe {
+ struct device *dev;
+ struct clk *clk_axi;
+ struct dc_cf *cf_safe[DC_DISPLAYS];
+ struct dc_cf *cf_cont[DC_DISPLAYS];
+ struct dc_ed *ed_safe[DC_DISPLAYS];
+ struct dc_ed *ed_cont[DC_DISPLAYS];
+ struct dc_fu *fu_disp[DC_DISP_FU_CNT];
+ struct dc_lb *lb[DC_LB_CNT];
+};
+
+/* Constant Frame Unit */
+enum dc_link_id dc_cf_get_link_id(struct dc_cf *cf);
+void dc_cf_framedimensions(struct dc_cf *cf, unsigned int w, unsigned int h);
+void dc_cf_constantcolor_black(struct dc_cf *cf);
+void dc_cf_constantcolor_blue(struct dc_cf *cf);
+void dc_cf_init(struct dc_cf *cf);
+
+/* External Destination Unit */
+void dc_ed_pec_src_sel(struct dc_ed *ed, enum dc_link_id src);
+void dc_ed_pec_sync_trigger(struct dc_ed *ed);
+void dc_ed_init(struct dc_ed *ed);
+
+/* Layer Blend Unit */
+enum dc_link_id dc_lb_get_link_id(struct dc_lb *lb);
+void dc_lb_pec_dynamic_prim_sel(struct dc_lb *lb, enum dc_link_id prim);
+void dc_lb_pec_dynamic_sec_sel(struct dc_lb *lb, enum dc_link_id sec);
+void dc_lb_pec_clken(struct dc_lb *lb, enum dc_pec_clken clken);
+void dc_lb_mode(struct dc_lb *lb, enum dc_lb_mode mode);
+void dc_lb_position(struct dc_lb *lb, int x, int y);
+int dc_lb_get_id(struct dc_lb *lb);
+void dc_lb_init(struct dc_lb *lb);
+
+#endif /* __DC_PIXEL_ENGINE_H__ */
diff --git a/drivers/gpu/drm/imx/dc/dc-plane.c b/drivers/gpu/drm/imx/dc/dc-plane.c
new file mode 100644
index 000000000000..e40d5d66c5c1
--- /dev/null
+++ b/drivers/gpu/drm/imx/dc/dc-plane.c
@@ -0,0 +1,224 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2024 NXP
+ */
+
+#include <linux/container_of.h>
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_atomic_state_helper.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_fb_dma_helper.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
+#include <drm/drm_gem_atomic_helper.h>
+#include <drm/drm_plane_helper.h>
+#include <drm/drm_print.h>
+
+#include "dc-drv.h"
+#include "dc-fu.h"
+#include "dc-kms.h"
+
+#define DC_PLANE_MAX_PITCH 0x10000
+#define DC_PLANE_MAX_PIX_CNT 8192
+
+#define dc_plane_dbg(plane, fmt, ...) \
+do { \
+ struct drm_plane *_plane = (plane); \
+ drm_dbg_kms(_plane->dev, "[PLANE:%d:%s] " fmt, \
+ _plane->base.id, _plane->name, ##__VA_ARGS__); \
+} while (0)
+
+static const uint32_t dc_plane_formats[] = {
+ DRM_FORMAT_XRGB8888,
+};
+
+static const struct drm_plane_funcs dc_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = drm_plane_cleanup,
+ .reset = drm_atomic_helper_plane_reset,
+ .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
+};
+
+static inline struct dc_plane *to_dc_plane(struct drm_plane *plane)
+{
+ return container_of(plane, struct dc_plane, base);
+}
+
+static int dc_plane_check_max_source_resolution(struct drm_plane_state *state)
+{
+ int src_h = drm_rect_height(&state->src) >> 16;
+ int src_w = drm_rect_width(&state->src) >> 16;
+
+ if (src_w > DC_PLANE_MAX_PIX_CNT || src_h > DC_PLANE_MAX_PIX_CNT) {
+ dc_plane_dbg(state->plane, "invalid source resolution\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int dc_plane_check_fb(struct drm_plane_state *state)
+{
+ struct drm_framebuffer *fb = state->fb;
+ dma_addr_t baseaddr = drm_fb_dma_get_gem_addr(fb, state, 0);
+
+ /* base address alignment */
+ if (baseaddr & 0x3) {
+ dc_plane_dbg(state->plane, "fb bad baddr alignment\n");
+ return -EINVAL;
+ }
+
+ /* pitches[0] range */
+ if (fb->pitches[0] > DC_PLANE_MAX_PITCH) {
+ dc_plane_dbg(state->plane, "fb pitches[0] is out of range\n");
+ return -EINVAL;
+ }
+
+ /* pitches[0] alignment */
+ if (fb->pitches[0] & 0x3) {
+ dc_plane_dbg(state->plane, "fb bad pitches[0] alignment\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+dc_plane_atomic_check(struct drm_plane *plane, struct drm_atomic_state *state)
+{
+ struct drm_plane_state *plane_state =
+ drm_atomic_get_new_plane_state(state, plane);
+ struct drm_crtc_state *crtc_state;
+ int ret;
+
+ /* ok to disable */
+ if (!plane_state->fb)
+ return 0;
+
+ if (!plane_state->crtc) {
+ dc_plane_dbg(plane, "no CRTC in plane state\n");
+ return -EINVAL;
+ }
+
+ crtc_state =
+ drm_atomic_get_new_crtc_state(state, plane_state->crtc);
+ if (WARN_ON(!crtc_state))
+ return -EINVAL;
+
+ ret = drm_atomic_helper_check_plane_state(plane_state, crtc_state,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
+ true, false);
+ if (ret) {
+ dc_plane_dbg(plane, "failed to check plane state: %d\n", ret);
+ return ret;
+ }
+
+ ret = dc_plane_check_max_source_resolution(plane_state);
+ if (ret)
+ return ret;
+
+ return dc_plane_check_fb(plane_state);
+}
+
+static void
+dc_plane_atomic_update(struct drm_plane *plane, struct drm_atomic_state *state)
+{
+ struct drm_plane_state *new_state =
+ drm_atomic_get_new_plane_state(state, plane);
+ struct dc_plane *dplane = to_dc_plane(plane);
+ struct drm_framebuffer *fb = new_state->fb;
+ const struct dc_fu_ops *fu_ops;
+ struct dc_lb *lb = dplane->lb;
+ struct dc_fu *fu = dplane->fu;
+ dma_addr_t baseaddr;
+ int src_w, src_h;
+ int idx;
+
+ if (!drm_dev_enter(plane->dev, &idx))
+ return;
+
+ src_w = drm_rect_width(&new_state->src) >> 16;
+ src_h = drm_rect_height(&new_state->src) >> 16;
+
+ baseaddr = drm_fb_dma_get_gem_addr(fb, new_state, 0);
+
+ fu_ops = dc_fu_get_ops(dplane->fu);
+
+ fu_ops->set_layerblend(fu, lb);
+ fu_ops->set_burstlength(fu, baseaddr);
+ fu_ops->set_src_stride(fu, DC_FETCHUNIT_FRAC0, fb->pitches[0]);
+ fu_ops->set_src_buf_dimensions(fu, DC_FETCHUNIT_FRAC0, src_w, src_h);
+ fu_ops->set_fmt(fu, DC_FETCHUNIT_FRAC0, fb->format);
+ fu_ops->set_framedimensions(fu, src_w, src_h);
+ fu_ops->set_baseaddress(fu, DC_FETCHUNIT_FRAC0, baseaddr);
+ fu_ops->enable_src_buf(fu, DC_FETCHUNIT_FRAC0);
+
+ dc_plane_dbg(plane, "uses %s\n", fu_ops->get_name(fu));
+
+ dc_lb_pec_dynamic_prim_sel(lb, dc_cf_get_link_id(dplane->cf));
+ dc_lb_pec_dynamic_sec_sel(lb, fu_ops->get_link_id(fu));
+ dc_lb_mode(lb, LB_BLEND);
+ dc_lb_position(lb, new_state->dst.x1, new_state->dst.y1);
+ dc_lb_pec_clken(lb, CLKEN_AUTOMATIC);
+
+ dc_plane_dbg(plane, "uses LayerBlend%d\n", dc_lb_get_id(lb));
+
+ /* set ExtDst's source to LayerBlend */
+ dc_ed_pec_src_sel(dplane->ed, dc_lb_get_link_id(lb));
+
+ drm_dev_exit(idx);
+}
+
+static void dc_plane_atomic_disable(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct dc_plane *dplane = to_dc_plane(plane);
+ const struct dc_fu_ops *fu_ops;
+ int idx;
+
+ if (!drm_dev_enter(plane->dev, &idx))
+ return;
+
+ /* disable fetchunit in shadow */
+ fu_ops = dc_fu_get_ops(dplane->fu);
+ fu_ops->disable_src_buf(dplane->fu, DC_FETCHUNIT_FRAC0);
+
+ /* set ExtDst's source to ConstFrame */
+ dc_ed_pec_src_sel(dplane->ed, dc_cf_get_link_id(dplane->cf));
+
+ drm_dev_exit(idx);
+}
+
+static const struct drm_plane_helper_funcs dc_plane_helper_funcs = {
+ .atomic_check = dc_plane_atomic_check,
+ .atomic_update = dc_plane_atomic_update,
+ .atomic_disable = dc_plane_atomic_disable,
+};
+
+int dc_plane_init(struct dc_drm_device *dc_drm, struct dc_plane *dc_plane)
+{
+ struct drm_plane *plane = &dc_plane->base;
+ int ret;
+
+ ret = drm_universal_plane_init(&dc_drm->base, plane, 0, &dc_plane_funcs,
+ dc_plane_formats,
+ ARRAY_SIZE(dc_plane_formats),
+ NULL, DRM_PLANE_TYPE_PRIMARY, NULL);
+ if (ret)
+ return ret;
+
+ drm_plane_helper_add(plane, &dc_plane_helper_funcs);
+
+ dc_plane->fu = dc_drm->pe->fu_disp[plane->index];
+ dc_plane->cf = dc_drm->pe->cf_cont[plane->index];
+ dc_plane->lb = dc_drm->pe->lb[plane->index];
+ dc_plane->ed = dc_drm->pe->ed_cont[plane->index];
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/imx/dc/dc-tc.c b/drivers/gpu/drm/imx/dc/dc-tc.c
new file mode 100644
index 000000000000..0bfd381b2cea
--- /dev/null
+++ b/drivers/gpu/drm/imx/dc/dc-tc.c
@@ -0,0 +1,141 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2024 NXP
+ */
+
+#include <linux/component.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include "dc-drv.h"
+#include "dc-de.h"
+
+#define TCON_CTRL 0x410
+#define CTRL_RST_VAL 0x01401408
+
+/* red: MAPBIT 29-20, green: MAPBIT 19-10, blue: MAPBIT 9-0 */
+#define MAPBIT3_0 0x418
+#define MAPBIT7_4 0x41c
+#define MAPBIT11_8 0x420
+#define MAPBIT15_12 0x424
+#define MAPBIT19_16 0x428
+#define MAPBIT23_20 0x42c
+#define MAPBIT27_24 0x430
+#define MAPBIT31_28 0x434
+
+static const struct dc_subdev_info dc_tc_info[] = {
+ { .reg_start = 0x5618c800, .id = 0, },
+ { .reg_start = 0x5618e400, .id = 1, },
+};
+
+static const struct regmap_range dc_tc_regmap_ranges[] = {
+ regmap_reg_range(TCON_CTRL, TCON_CTRL),
+ regmap_reg_range(MAPBIT3_0, MAPBIT31_28),
+};
+
+static const struct regmap_access_table dc_tc_regmap_access_table = {
+ .yes_ranges = dc_tc_regmap_ranges,
+ .n_yes_ranges = ARRAY_SIZE(dc_tc_regmap_ranges),
+};
+
+static const struct regmap_config dc_tc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .fast_io = true,
+ .wr_table = &dc_tc_regmap_access_table,
+ .rd_table = &dc_tc_regmap_access_table,
+ .max_register = MAPBIT31_28,
+};
+
+/*
+ * The pixels reach TCON are always in 30-bit BGR format.
+ * The first bridge always receives pixels in 30-bit RGB format.
+ * So, map the format to MEDIA_BUS_FMT_RGB101010_1X30.
+ */
+static const u32 dc_tc_mapbit[] = {
+ 0x17161514, 0x1b1a1918, 0x0b0a1d1c, 0x0f0e0d0c,
+ 0x13121110, 0x03020100, 0x07060504, 0x00000908,
+};
+
+void dc_tc_init(struct dc_tc *tc)
+{
+ /* reset TCON_CTRL to POR default so that TCON works in bypass mode */
+ regmap_write(tc->reg, TCON_CTRL, CTRL_RST_VAL);
+
+ /* set format */
+ regmap_bulk_write(tc->reg, MAPBIT3_0, dc_tc_mapbit,
+ ARRAY_SIZE(dc_tc_mapbit));
+}
+
+static int dc_tc_bind(struct device *dev, struct device *master, void *data)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct dc_drm_device *dc_drm = data;
+ struct resource *res;
+ void __iomem *base;
+ struct dc_tc *tc;
+ int id;
+
+ tc = devm_kzalloc(dev, sizeof(*tc), GFP_KERNEL);
+ if (!tc)
+ return -ENOMEM;
+
+ base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ tc->reg = devm_regmap_init_mmio(dev, base, &dc_tc_regmap_config);
+ if (IS_ERR(tc->reg))
+ return PTR_ERR(tc->reg);
+
+ id = dc_subdev_get_id(dc_tc_info, ARRAY_SIZE(dc_tc_info), res);
+ if (id < 0) {
+ dev_err(dev, "failed to get instance number: %d\n", id);
+ return id;
+ }
+
+ tc->dev = dev;
+ dc_drm->tc[id] = tc;
+
+ return 0;
+}
+
+static const struct component_ops dc_tc_ops = {
+ .bind = dc_tc_bind,
+};
+
+static int dc_tc_probe(struct platform_device *pdev)
+{
+ int ret;
+
+ ret = component_add(&pdev->dev, &dc_tc_ops);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "failed to add component\n");
+
+ return 0;
+}
+
+static void dc_tc_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &dc_tc_ops);
+}
+
+static const struct of_device_id dc_tc_dt_ids[] = {
+ { .compatible = "fsl,imx8qxp-dc-tcon" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, dc_tc_dt_ids);
+
+struct platform_driver dc_tc_driver = {
+ .probe = dc_tc_probe,
+ .remove = dc_tc_remove,
+ .driver = {
+ .name = "imx8-dc-tcon",
+ .suppress_bind_attrs = true,
+ .of_match_table = dc_tc_dt_ids,
+ },
+};
diff --git a/drivers/gpu/drm/imx/dcss/Kconfig b/drivers/gpu/drm/imx/dcss/Kconfig
new file mode 100644
index 000000000000..e014ed3ae66c
--- /dev/null
+++ b/drivers/gpu/drm/imx/dcss/Kconfig
@@ -0,0 +1,13 @@
+config DRM_IMX_DCSS
+ tristate "i.MX8MQ DCSS"
+ select IMX_IRQSTEER
+ select DRM_CLIENT_SELECTION
+ select DRM_KMS_HELPER
+ select DRM_DISPLAY_HELPER
+ select DRM_BRIDGE_CONNECTOR
+ select DRM_GEM_DMA_HELPER
+ select VIDEOMODE_HELPERS
+ depends on DRM && ((ARCH_MXC && ARM64) || COMPILE_TEST)
+ help
+ Choose this if you have a NXP i.MX8MQ based system and want to use the
+ Display Controller Subsystem. This option enables DCSS support.
diff --git a/drivers/gpu/drm/imx/dcss/Makefile b/drivers/gpu/drm/imx/dcss/Makefile
new file mode 100644
index 000000000000..8c7c8da42792
--- /dev/null
+++ b/drivers/gpu/drm/imx/dcss/Makefile
@@ -0,0 +1,6 @@
+imx-dcss-objs := dcss-drv.o dcss-dev.o dcss-blkctl.o dcss-ctxld.o dcss-dtg.o \
+ dcss-ss.o dcss-dpr.o dcss-scaler.o dcss-kms.o dcss-crtc.o \
+ dcss-plane.o
+
+obj-$(CONFIG_DRM_IMX_DCSS) += imx-dcss.o
+
diff --git a/drivers/gpu/drm/imx/dcss/dcss-blkctl.c b/drivers/gpu/drm/imx/dcss/dcss-blkctl.c
new file mode 100644
index 000000000000..803e3fcdb50f
--- /dev/null
+++ b/drivers/gpu/drm/imx/dcss/dcss-blkctl.c
@@ -0,0 +1,61 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2019 NXP.
+ */
+
+#include <linux/device.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+
+#include "dcss-dev.h"
+
+#define DCSS_BLKCTL_RESET_CTRL 0x00
+#define B_CLK_RESETN BIT(0)
+#define APB_CLK_RESETN BIT(1)
+#define P_CLK_RESETN BIT(2)
+#define RTR_CLK_RESETN BIT(4)
+#define DCSS_BLKCTL_CONTROL0 0x10
+#define HDMI_MIPI_CLK_SEL BIT(0)
+#define DISPMIX_REFCLK_SEL_POS 4
+#define DISPMIX_REFCLK_SEL_MASK GENMASK(5, 4)
+#define DISPMIX_PIXCLK_SEL BIT(8)
+#define HDMI_SRC_SECURE_EN BIT(16)
+
+struct dcss_blkctl {
+ struct dcss_dev *dcss;
+ void __iomem *base_reg;
+};
+
+void dcss_blkctl_cfg(struct dcss_blkctl *blkctl)
+{
+ if (blkctl->dcss->hdmi_output)
+ dcss_writel(0, blkctl->base_reg + DCSS_BLKCTL_CONTROL0);
+ else
+ dcss_writel(DISPMIX_PIXCLK_SEL,
+ blkctl->base_reg + DCSS_BLKCTL_CONTROL0);
+
+ dcss_set(B_CLK_RESETN | APB_CLK_RESETN | P_CLK_RESETN | RTR_CLK_RESETN,
+ blkctl->base_reg + DCSS_BLKCTL_RESET_CTRL);
+}
+
+int dcss_blkctl_init(struct dcss_dev *dcss, unsigned long blkctl_base)
+{
+ struct dcss_blkctl *blkctl;
+
+ blkctl = devm_kzalloc(dcss->dev, sizeof(*blkctl), GFP_KERNEL);
+ if (!blkctl)
+ return -ENOMEM;
+
+ blkctl->base_reg = devm_ioremap(dcss->dev, blkctl_base, SZ_4K);
+ if (!blkctl->base_reg) {
+ dev_err(dcss->dev, "unable to remap BLK CTRL base\n");
+ return -ENOMEM;
+ }
+
+ dcss->blkctl = blkctl;
+ blkctl->dcss = dcss;
+
+ dcss_blkctl_cfg(blkctl);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/imx/dcss/dcss-crtc.c b/drivers/gpu/drm/imx/dcss/dcss-crtc.c
new file mode 100644
index 000000000000..af91e45b5d13
--- /dev/null
+++ b/drivers/gpu/drm/imx/dcss/dcss-crtc.c
@@ -0,0 +1,222 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2019 NXP.
+ */
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_vblank.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+
+#include "dcss-dev.h"
+#include "dcss-kms.h"
+
+static int dcss_enable_vblank(struct drm_crtc *crtc)
+{
+ struct dcss_crtc *dcss_crtc = container_of(crtc, struct dcss_crtc,
+ base);
+ struct dcss_dev *dcss = crtc->dev->dev_private;
+
+ dcss_dtg_vblank_irq_enable(dcss->dtg, true);
+
+ dcss_dtg_ctxld_kick_irq_enable(dcss->dtg, true);
+
+ enable_irq(dcss_crtc->irq);
+
+ return 0;
+}
+
+static void dcss_disable_vblank(struct drm_crtc *crtc)
+{
+ struct dcss_crtc *dcss_crtc = container_of(crtc, struct dcss_crtc,
+ base);
+ struct dcss_dev *dcss = dcss_crtc->base.dev->dev_private;
+
+ disable_irq_nosync(dcss_crtc->irq);
+
+ dcss_dtg_vblank_irq_enable(dcss->dtg, false);
+
+ if (dcss_crtc->disable_ctxld_kick_irq)
+ dcss_dtg_ctxld_kick_irq_enable(dcss->dtg, false);
+}
+
+static const struct drm_crtc_funcs dcss_crtc_funcs = {
+ .set_config = drm_atomic_helper_set_config,
+ .destroy = drm_crtc_cleanup,
+ .page_flip = drm_atomic_helper_page_flip,
+ .reset = drm_atomic_helper_crtc_reset,
+ .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+ .enable_vblank = dcss_enable_vblank,
+ .disable_vblank = dcss_disable_vblank,
+};
+
+static void dcss_crtc_atomic_begin(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ drm_crtc_vblank_on(crtc);
+}
+
+static void dcss_crtc_atomic_flush(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ struct dcss_crtc *dcss_crtc = container_of(crtc, struct dcss_crtc,
+ base);
+ struct dcss_dev *dcss = dcss_crtc->base.dev->dev_private;
+
+ spin_lock_irq(&crtc->dev->event_lock);
+ if (crtc->state->event) {
+ WARN_ON(drm_crtc_vblank_get(crtc));
+ drm_crtc_arm_vblank_event(crtc, crtc->state->event);
+ crtc->state->event = NULL;
+ }
+ spin_unlock_irq(&crtc->dev->event_lock);
+
+ if (dcss_dtg_is_enabled(dcss->dtg))
+ dcss_ctxld_enable(dcss->ctxld);
+}
+
+static void dcss_crtc_atomic_enable(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ struct drm_crtc_state *old_crtc_state = drm_atomic_get_old_crtc_state(state,
+ crtc);
+ struct dcss_crtc *dcss_crtc = container_of(crtc, struct dcss_crtc,
+ base);
+ struct dcss_dev *dcss = dcss_crtc->base.dev->dev_private;
+ struct drm_display_mode *mode = &crtc->state->adjusted_mode;
+ struct drm_display_mode *old_mode = &old_crtc_state->adjusted_mode;
+ struct videomode vm;
+
+ drm_display_mode_to_videomode(mode, &vm);
+
+ pm_runtime_get_sync(dcss->dev);
+
+ vm.pixelclock = mode->crtc_clock * 1000;
+
+ dcss_ss_subsam_set(dcss->ss);
+ dcss_dtg_css_set(dcss->dtg);
+
+ if (!drm_mode_equal(mode, old_mode) || !old_crtc_state->active) {
+ dcss_dtg_sync_set(dcss->dtg, &vm);
+ dcss_ss_sync_set(dcss->ss, &vm,
+ mode->flags & DRM_MODE_FLAG_PHSYNC,
+ mode->flags & DRM_MODE_FLAG_PVSYNC);
+ }
+
+ dcss_enable_dtg_and_ss(dcss);
+
+ dcss_ctxld_enable(dcss->ctxld);
+
+ /* Allow CTXLD kick interrupt to be disabled when VBLANK is disabled. */
+ dcss_crtc->disable_ctxld_kick_irq = true;
+}
+
+static void dcss_crtc_atomic_disable(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ struct drm_crtc_state *old_crtc_state = drm_atomic_get_old_crtc_state(state,
+ crtc);
+ struct dcss_crtc *dcss_crtc = container_of(crtc, struct dcss_crtc,
+ base);
+ struct dcss_dev *dcss = dcss_crtc->base.dev->dev_private;
+ struct drm_display_mode *mode = &crtc->state->adjusted_mode;
+ struct drm_display_mode *old_mode = &old_crtc_state->adjusted_mode;
+
+ drm_atomic_helper_disable_planes_on_crtc(old_crtc_state, false);
+
+ spin_lock_irq(&crtc->dev->event_lock);
+ if (crtc->state->event) {
+ drm_crtc_send_vblank_event(crtc, crtc->state->event);
+ crtc->state->event = NULL;
+ }
+ spin_unlock_irq(&crtc->dev->event_lock);
+
+ dcss_dtg_ctxld_kick_irq_enable(dcss->dtg, true);
+
+ reinit_completion(&dcss->disable_completion);
+
+ dcss_disable_dtg_and_ss(dcss);
+
+ dcss_ctxld_enable(dcss->ctxld);
+
+ if (!drm_mode_equal(mode, old_mode) || !crtc->state->active)
+ if (!wait_for_completion_timeout(&dcss->disable_completion,
+ msecs_to_jiffies(100)))
+ dev_err(dcss->dev, "Shutting off DTG timed out.\n");
+
+ /*
+ * Do not shut off CTXLD kick interrupt when shutting VBLANK off. It
+ * will be needed to commit the last changes, before going to suspend.
+ */
+ dcss_crtc->disable_ctxld_kick_irq = false;
+
+ drm_crtc_vblank_off(crtc);
+
+ pm_runtime_mark_last_busy(dcss->dev);
+ pm_runtime_put_autosuspend(dcss->dev);
+}
+
+static const struct drm_crtc_helper_funcs dcss_helper_funcs = {
+ .atomic_begin = dcss_crtc_atomic_begin,
+ .atomic_flush = dcss_crtc_atomic_flush,
+ .atomic_enable = dcss_crtc_atomic_enable,
+ .atomic_disable = dcss_crtc_atomic_disable,
+};
+
+static irqreturn_t dcss_crtc_irq_handler(int irq, void *dev_id)
+{
+ struct dcss_crtc *dcss_crtc = dev_id;
+ struct dcss_dev *dcss = dcss_crtc->base.dev->dev_private;
+
+ if (!dcss_dtg_vblank_irq_valid(dcss->dtg))
+ return IRQ_NONE;
+
+ if (dcss_ctxld_is_flushed(dcss->ctxld))
+ drm_crtc_handle_vblank(&dcss_crtc->base);
+
+ dcss_dtg_vblank_irq_clear(dcss->dtg);
+
+ return IRQ_HANDLED;
+}
+
+int dcss_crtc_init(struct dcss_crtc *crtc, struct drm_device *drm)
+{
+ struct dcss_dev *dcss = drm->dev_private;
+ struct platform_device *pdev = to_platform_device(dcss->dev);
+ int ret;
+
+ crtc->plane[0] = dcss_plane_init(drm, drm_crtc_mask(&crtc->base),
+ DRM_PLANE_TYPE_PRIMARY, 0);
+ if (IS_ERR(crtc->plane[0]))
+ return PTR_ERR(crtc->plane[0]);
+
+ crtc->base.port = dcss->of_port;
+
+ drm_crtc_helper_add(&crtc->base, &dcss_helper_funcs);
+ ret = drm_crtc_init_with_planes(drm, &crtc->base, &crtc->plane[0]->base,
+ NULL, &dcss_crtc_funcs, NULL);
+ if (ret) {
+ dev_err(dcss->dev, "failed to init crtc\n");
+ return ret;
+ }
+
+ crtc->irq = platform_get_irq_byname(pdev, "vblank");
+ if (crtc->irq < 0)
+ return crtc->irq;
+
+ ret = request_irq(crtc->irq, dcss_crtc_irq_handler, IRQF_NO_AUTOEN,
+ "dcss_drm", crtc);
+ if (ret) {
+ dev_err(dcss->dev, "irq request failed with %d.\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+void dcss_crtc_deinit(struct dcss_crtc *crtc, struct drm_device *drm)
+{
+ free_irq(crtc->irq, crtc);
+}
diff --git a/drivers/gpu/drm/imx/dcss/dcss-ctxld.c b/drivers/gpu/drm/imx/dcss/dcss-ctxld.c
new file mode 100644
index 000000000000..e41d5c2a3ea4
--- /dev/null
+++ b/drivers/gpu/drm/imx/dcss/dcss-ctxld.c
@@ -0,0 +1,416 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2019 NXP.
+ */
+
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include "dcss-dev.h"
+
+#define DCSS_CTXLD_CONTROL_STATUS 0x0
+#define CTXLD_ENABLE BIT(0)
+#define ARB_SEL BIT(1)
+#define RD_ERR_EN BIT(2)
+#define DB_COMP_EN BIT(3)
+#define SB_HP_COMP_EN BIT(4)
+#define SB_LP_COMP_EN BIT(5)
+#define DB_PEND_SB_REC_EN BIT(6)
+#define SB_PEND_DISP_ACTIVE_EN BIT(7)
+#define AHB_ERR_EN BIT(8)
+#define RD_ERR BIT(16)
+#define DB_COMP BIT(17)
+#define SB_HP_COMP BIT(18)
+#define SB_LP_COMP BIT(19)
+#define DB_PEND_SB_REC BIT(20)
+#define SB_PEND_DISP_ACTIVE BIT(21)
+#define AHB_ERR BIT(22)
+#define DCSS_CTXLD_DB_BASE_ADDR 0x10
+#define DCSS_CTXLD_DB_COUNT 0x14
+#define DCSS_CTXLD_SB_BASE_ADDR 0x18
+#define DCSS_CTXLD_SB_COUNT 0x1C
+#define SB_HP_COUNT_POS 0
+#define SB_HP_COUNT_MASK 0xffff
+#define SB_LP_COUNT_POS 16
+#define SB_LP_COUNT_MASK 0xffff0000
+#define DCSS_AHB_ERR_ADDR 0x20
+
+#define CTXLD_IRQ_COMPLETION (DB_COMP | SB_HP_COMP | SB_LP_COMP)
+#define CTXLD_IRQ_ERROR (RD_ERR | DB_PEND_SB_REC | AHB_ERR)
+
+/* The following sizes are in context loader entries, 8 bytes each. */
+#define CTXLD_DB_CTX_ENTRIES 1024 /* max 65536 */
+#define CTXLD_SB_LP_CTX_ENTRIES 10240 /* max 65536 */
+#define CTXLD_SB_HP_CTX_ENTRIES 20000 /* max 65536 */
+#define CTXLD_SB_CTX_ENTRIES (CTXLD_SB_LP_CTX_ENTRIES + \
+ CTXLD_SB_HP_CTX_ENTRIES)
+
+/* Sizes, in entries, of the DB, SB_HP and SB_LP context regions. */
+static u16 dcss_ctxld_ctx_size[3] = {
+ CTXLD_DB_CTX_ENTRIES,
+ CTXLD_SB_HP_CTX_ENTRIES,
+ CTXLD_SB_LP_CTX_ENTRIES
+};
+
+/* this represents an entry in the context loader map */
+struct dcss_ctxld_item {
+ u32 val;
+ u32 ofs;
+};
+
+#define CTX_ITEM_SIZE sizeof(struct dcss_ctxld_item)
+
+struct dcss_ctxld {
+ struct device *dev;
+ void __iomem *ctxld_reg;
+ int irq;
+ bool irq_en;
+
+ struct dcss_ctxld_item *db[2];
+ struct dcss_ctxld_item *sb_hp[2];
+ struct dcss_ctxld_item *sb_lp[2];
+
+ dma_addr_t db_paddr[2];
+ dma_addr_t sb_paddr[2];
+
+ u16 ctx_size[2][3]; /* holds the sizes of DB, SB_HP and SB_LP ctx */
+ u8 current_ctx;
+
+ bool in_use;
+ bool armed;
+
+ spinlock_t lock; /* protects concurent access to private data */
+};
+
+static irqreturn_t dcss_ctxld_irq_handler(int irq, void *data)
+{
+ struct dcss_ctxld *ctxld = data;
+ struct dcss_dev *dcss = dcss_drv_dev_to_dcss(ctxld->dev);
+ u32 irq_status;
+
+ irq_status = dcss_readl(ctxld->ctxld_reg + DCSS_CTXLD_CONTROL_STATUS);
+
+ if (irq_status & CTXLD_IRQ_COMPLETION &&
+ !(irq_status & CTXLD_ENABLE) && ctxld->in_use) {
+ ctxld->in_use = false;
+
+ if (dcss && dcss->disable_callback)
+ dcss->disable_callback(dcss);
+ } else if (irq_status & CTXLD_IRQ_ERROR) {
+ /*
+ * Except for throwing an error message and clearing the status
+ * register, there's not much we can do here.
+ */
+ dev_err(ctxld->dev, "ctxld: error encountered: %08x\n",
+ irq_status);
+ dev_err(ctxld->dev, "ctxld: db=%d, sb_hp=%d, sb_lp=%d\n",
+ ctxld->ctx_size[ctxld->current_ctx ^ 1][CTX_DB],
+ ctxld->ctx_size[ctxld->current_ctx ^ 1][CTX_SB_HP],
+ ctxld->ctx_size[ctxld->current_ctx ^ 1][CTX_SB_LP]);
+ }
+
+ dcss_clr(irq_status & (CTXLD_IRQ_ERROR | CTXLD_IRQ_COMPLETION),
+ ctxld->ctxld_reg + DCSS_CTXLD_CONTROL_STATUS);
+
+ return IRQ_HANDLED;
+}
+
+static int dcss_ctxld_irq_config(struct dcss_ctxld *ctxld,
+ struct platform_device *pdev)
+{
+ int ret;
+
+ ctxld->irq = platform_get_irq_byname(pdev, "ctxld");
+ if (ctxld->irq < 0)
+ return ctxld->irq;
+
+ ret = request_irq(ctxld->irq, dcss_ctxld_irq_handler,
+ 0, "dcss_ctxld", ctxld);
+ if (ret) {
+ dev_err(ctxld->dev, "ctxld: irq request failed.\n");
+ return ret;
+ }
+
+ ctxld->irq_en = true;
+
+ return 0;
+}
+
+static void dcss_ctxld_hw_cfg(struct dcss_ctxld *ctxld)
+{
+ dcss_writel(RD_ERR_EN | SB_HP_COMP_EN |
+ DB_PEND_SB_REC_EN | AHB_ERR_EN | RD_ERR | AHB_ERR,
+ ctxld->ctxld_reg + DCSS_CTXLD_CONTROL_STATUS);
+}
+
+static void dcss_ctxld_free_ctx(struct dcss_ctxld *ctxld)
+{
+ struct dcss_ctxld_item *ctx;
+ int i;
+
+ for (i = 0; i < 2; i++) {
+ if (ctxld->db[i]) {
+ dma_free_coherent(ctxld->dev,
+ CTXLD_DB_CTX_ENTRIES * sizeof(*ctx),
+ ctxld->db[i], ctxld->db_paddr[i]);
+ ctxld->db[i] = NULL;
+ ctxld->db_paddr[i] = 0;
+ }
+
+ if (ctxld->sb_hp[i]) {
+ dma_free_coherent(ctxld->dev,
+ CTXLD_SB_CTX_ENTRIES * sizeof(*ctx),
+ ctxld->sb_hp[i], ctxld->sb_paddr[i]);
+ ctxld->sb_hp[i] = NULL;
+ ctxld->sb_paddr[i] = 0;
+ }
+ }
+}
+
+static int dcss_ctxld_alloc_ctx(struct dcss_ctxld *ctxld)
+{
+ struct dcss_ctxld_item *ctx;
+ int i;
+
+ for (i = 0; i < 2; i++) {
+ ctx = dma_alloc_coherent(ctxld->dev,
+ CTXLD_DB_CTX_ENTRIES * sizeof(*ctx),
+ &ctxld->db_paddr[i], GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctxld->db[i] = ctx;
+
+ ctx = dma_alloc_coherent(ctxld->dev,
+ CTXLD_SB_CTX_ENTRIES * sizeof(*ctx),
+ &ctxld->sb_paddr[i], GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctxld->sb_hp[i] = ctx;
+ ctxld->sb_lp[i] = ctx + CTXLD_SB_HP_CTX_ENTRIES;
+ }
+
+ return 0;
+}
+
+int dcss_ctxld_init(struct dcss_dev *dcss, unsigned long ctxld_base)
+{
+ struct dcss_ctxld *ctxld;
+ int ret;
+
+ ctxld = devm_kzalloc(dcss->dev, sizeof(*ctxld), GFP_KERNEL);
+ if (!ctxld)
+ return -ENOMEM;
+
+ dcss->ctxld = ctxld;
+ ctxld->dev = dcss->dev;
+
+ spin_lock_init(&ctxld->lock);
+
+ ret = dcss_ctxld_alloc_ctx(ctxld);
+ if (ret) {
+ dev_err(dcss->dev, "ctxld: cannot allocate context memory.\n");
+ goto err;
+ }
+
+ ctxld->ctxld_reg = devm_ioremap(dcss->dev, ctxld_base, SZ_4K);
+ if (!ctxld->ctxld_reg) {
+ dev_err(dcss->dev, "ctxld: unable to remap ctxld base\n");
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ ret = dcss_ctxld_irq_config(ctxld, to_platform_device(dcss->dev));
+ if (ret)
+ goto err;
+
+ dcss_ctxld_hw_cfg(ctxld);
+
+ return 0;
+
+err:
+ dcss_ctxld_free_ctx(ctxld);
+
+ return ret;
+}
+
+void dcss_ctxld_exit(struct dcss_ctxld *ctxld)
+{
+ free_irq(ctxld->irq, ctxld);
+
+ dcss_ctxld_free_ctx(ctxld);
+}
+
+static int dcss_ctxld_enable_locked(struct dcss_ctxld *ctxld)
+{
+ int curr_ctx = ctxld->current_ctx;
+ u32 db_base, sb_base, sb_count;
+ u32 sb_hp_cnt, sb_lp_cnt, db_cnt;
+ struct dcss_dev *dcss = dcss_drv_dev_to_dcss(ctxld->dev);
+
+ if (!dcss)
+ return 0;
+
+ dcss_dpr_write_sysctrl(dcss->dpr);
+
+ dcss_scaler_write_sclctrl(dcss->scaler);
+
+ sb_hp_cnt = ctxld->ctx_size[curr_ctx][CTX_SB_HP];
+ sb_lp_cnt = ctxld->ctx_size[curr_ctx][CTX_SB_LP];
+ db_cnt = ctxld->ctx_size[curr_ctx][CTX_DB];
+
+ /* make sure SB_LP context area comes after SB_HP */
+ if (sb_lp_cnt &&
+ ctxld->sb_lp[curr_ctx] != ctxld->sb_hp[curr_ctx] + sb_hp_cnt) {
+ struct dcss_ctxld_item *sb_lp_adjusted;
+
+ sb_lp_adjusted = ctxld->sb_hp[curr_ctx] + sb_hp_cnt;
+
+ memcpy(sb_lp_adjusted, ctxld->sb_lp[curr_ctx],
+ sb_lp_cnt * CTX_ITEM_SIZE);
+ }
+
+ db_base = db_cnt ? ctxld->db_paddr[curr_ctx] : 0;
+
+ dcss_writel(db_base, ctxld->ctxld_reg + DCSS_CTXLD_DB_BASE_ADDR);
+ dcss_writel(db_cnt, ctxld->ctxld_reg + DCSS_CTXLD_DB_COUNT);
+
+ if (sb_hp_cnt)
+ sb_count = ((sb_hp_cnt << SB_HP_COUNT_POS) & SB_HP_COUNT_MASK) |
+ ((sb_lp_cnt << SB_LP_COUNT_POS) & SB_LP_COUNT_MASK);
+ else
+ sb_count = (sb_lp_cnt << SB_HP_COUNT_POS) & SB_HP_COUNT_MASK;
+
+ sb_base = sb_count ? ctxld->sb_paddr[curr_ctx] : 0;
+
+ dcss_writel(sb_base, ctxld->ctxld_reg + DCSS_CTXLD_SB_BASE_ADDR);
+ dcss_writel(sb_count, ctxld->ctxld_reg + DCSS_CTXLD_SB_COUNT);
+
+ /* enable the context loader */
+ dcss_set(CTXLD_ENABLE, ctxld->ctxld_reg + DCSS_CTXLD_CONTROL_STATUS);
+
+ ctxld->in_use = true;
+
+ /*
+ * Toggle the current context to the alternate one so that any updates
+ * in the modules' settings take place there.
+ */
+ ctxld->current_ctx ^= 1;
+
+ ctxld->ctx_size[ctxld->current_ctx][CTX_DB] = 0;
+ ctxld->ctx_size[ctxld->current_ctx][CTX_SB_HP] = 0;
+ ctxld->ctx_size[ctxld->current_ctx][CTX_SB_LP] = 0;
+
+ return 0;
+}
+
+int dcss_ctxld_enable(struct dcss_ctxld *ctxld)
+{
+ spin_lock_irq(&ctxld->lock);
+ ctxld->armed = true;
+ spin_unlock_irq(&ctxld->lock);
+
+ return 0;
+}
+
+void dcss_ctxld_kick(struct dcss_ctxld *ctxld)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctxld->lock, flags);
+ if (ctxld->armed && !ctxld->in_use) {
+ ctxld->armed = false;
+ dcss_ctxld_enable_locked(ctxld);
+ }
+ spin_unlock_irqrestore(&ctxld->lock, flags);
+}
+
+void dcss_ctxld_write_irqsafe(struct dcss_ctxld *ctxld, u32 ctx_id, u32 val,
+ u32 reg_ofs)
+{
+ int curr_ctx = ctxld->current_ctx;
+ struct dcss_ctxld_item *ctx[] = {
+ [CTX_DB] = ctxld->db[curr_ctx],
+ [CTX_SB_HP] = ctxld->sb_hp[curr_ctx],
+ [CTX_SB_LP] = ctxld->sb_lp[curr_ctx]
+ };
+ int item_idx = ctxld->ctx_size[curr_ctx][ctx_id];
+
+ if (item_idx + 1 > dcss_ctxld_ctx_size[ctx_id]) {
+ WARN_ON(1);
+ return;
+ }
+
+ ctx[ctx_id][item_idx].val = val;
+ ctx[ctx_id][item_idx].ofs = reg_ofs;
+ ctxld->ctx_size[curr_ctx][ctx_id] += 1;
+}
+
+void dcss_ctxld_write(struct dcss_ctxld *ctxld, u32 ctx_id,
+ u32 val, u32 reg_ofs)
+{
+ spin_lock_irq(&ctxld->lock);
+ dcss_ctxld_write_irqsafe(ctxld, ctx_id, val, reg_ofs);
+ spin_unlock_irq(&ctxld->lock);
+}
+
+bool dcss_ctxld_is_flushed(struct dcss_ctxld *ctxld)
+{
+ return ctxld->ctx_size[ctxld->current_ctx][CTX_DB] == 0 &&
+ ctxld->ctx_size[ctxld->current_ctx][CTX_SB_HP] == 0 &&
+ ctxld->ctx_size[ctxld->current_ctx][CTX_SB_LP] == 0;
+}
+
+int dcss_ctxld_resume(struct dcss_ctxld *ctxld)
+{
+ dcss_ctxld_hw_cfg(ctxld);
+
+ if (!ctxld->irq_en) {
+ enable_irq(ctxld->irq);
+ ctxld->irq_en = true;
+ }
+
+ return 0;
+}
+
+int dcss_ctxld_suspend(struct dcss_ctxld *ctxld)
+{
+ int ret = 0;
+ unsigned long timeout = jiffies + msecs_to_jiffies(500);
+
+ if (!dcss_ctxld_is_flushed(ctxld)) {
+ dcss_ctxld_kick(ctxld);
+
+ while (!time_after(jiffies, timeout) && ctxld->in_use)
+ msleep(20);
+
+ if (time_after(jiffies, timeout))
+ return -ETIMEDOUT;
+ }
+
+ spin_lock_irq(&ctxld->lock);
+
+ if (ctxld->irq_en) {
+ disable_irq_nosync(ctxld->irq);
+ ctxld->irq_en = false;
+ }
+
+ /* reset context region and sizes */
+ ctxld->current_ctx = 0;
+ ctxld->ctx_size[0][CTX_DB] = 0;
+ ctxld->ctx_size[0][CTX_SB_HP] = 0;
+ ctxld->ctx_size[0][CTX_SB_LP] = 0;
+
+ spin_unlock_irq(&ctxld->lock);
+
+ return ret;
+}
+
+void dcss_ctxld_assert_locked(struct dcss_ctxld *ctxld)
+{
+ lockdep_assert_held(&ctxld->lock);
+}
diff --git a/drivers/gpu/drm/imx/dcss/dcss-dev.c b/drivers/gpu/drm/imx/dcss/dcss-dev.c
new file mode 100644
index 000000000000..7fd0c4c14205
--- /dev/null
+++ b/drivers/gpu/drm/imx/dcss/dcss-dev.c
@@ -0,0 +1,321 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2019 NXP.
+ */
+
+#include <linux/clk.h>
+#include <linux/of.h>
+#include <linux/of_graph.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <drm/drm_bridge_connector.h>
+#include <drm/drm_device.h>
+#include <drm/drm_modeset_helper.h>
+
+#include "dcss-dev.h"
+#include "dcss-kms.h"
+
+static void dcss_clocks_enable(struct dcss_dev *dcss)
+{
+ clk_prepare_enable(dcss->axi_clk);
+ clk_prepare_enable(dcss->apb_clk);
+ clk_prepare_enable(dcss->rtrm_clk);
+ clk_prepare_enable(dcss->dtrc_clk);
+ clk_prepare_enable(dcss->pix_clk);
+}
+
+static void dcss_clocks_disable(struct dcss_dev *dcss)
+{
+ clk_disable_unprepare(dcss->pix_clk);
+ clk_disable_unprepare(dcss->dtrc_clk);
+ clk_disable_unprepare(dcss->rtrm_clk);
+ clk_disable_unprepare(dcss->apb_clk);
+ clk_disable_unprepare(dcss->axi_clk);
+}
+
+static void dcss_disable_dtg_and_ss_cb(void *data)
+{
+ struct dcss_dev *dcss = data;
+
+ dcss->disable_callback = NULL;
+
+ dcss_ss_shutoff(dcss->ss);
+ dcss_dtg_shutoff(dcss->dtg);
+
+ complete(&dcss->disable_completion);
+}
+
+void dcss_disable_dtg_and_ss(struct dcss_dev *dcss)
+{
+ dcss->disable_callback = dcss_disable_dtg_and_ss_cb;
+}
+
+void dcss_enable_dtg_and_ss(struct dcss_dev *dcss)
+{
+ if (dcss->disable_callback)
+ dcss->disable_callback = NULL;
+
+ dcss_dtg_enable(dcss->dtg);
+ dcss_ss_enable(dcss->ss);
+}
+
+static int dcss_submodules_init(struct dcss_dev *dcss)
+{
+ int ret = 0;
+ u32 base_addr = dcss->start_addr;
+ const struct dcss_type_data *devtype = dcss->devtype;
+
+ dcss_clocks_enable(dcss);
+
+ ret = dcss_blkctl_init(dcss, base_addr + devtype->blkctl_ofs);
+ if (ret)
+ return ret;
+
+ ret = dcss_ctxld_init(dcss, base_addr + devtype->ctxld_ofs);
+ if (ret)
+ goto ctxld_err;
+
+ ret = dcss_dtg_init(dcss, base_addr + devtype->dtg_ofs);
+ if (ret)
+ goto dtg_err;
+
+ ret = dcss_ss_init(dcss, base_addr + devtype->ss_ofs);
+ if (ret)
+ goto ss_err;
+
+ ret = dcss_dpr_init(dcss, base_addr + devtype->dpr_ofs);
+ if (ret)
+ goto dpr_err;
+
+ ret = dcss_scaler_init(dcss, base_addr + devtype->scaler_ofs);
+ if (ret)
+ goto scaler_err;
+
+ dcss_clocks_disable(dcss);
+
+ return 0;
+
+scaler_err:
+ dcss_dpr_exit(dcss->dpr);
+
+dpr_err:
+ dcss_ss_exit(dcss->ss);
+
+ss_err:
+ dcss_dtg_exit(dcss->dtg);
+
+dtg_err:
+ dcss_ctxld_exit(dcss->ctxld);
+
+ctxld_err:
+ dcss_clocks_disable(dcss);
+
+ return ret;
+}
+
+static void dcss_submodules_stop(struct dcss_dev *dcss)
+{
+ dcss_clocks_enable(dcss);
+ dcss_scaler_exit(dcss->scaler);
+ dcss_dpr_exit(dcss->dpr);
+ dcss_ss_exit(dcss->ss);
+ dcss_dtg_exit(dcss->dtg);
+ dcss_ctxld_exit(dcss->ctxld);
+ dcss_clocks_disable(dcss);
+}
+
+static int dcss_clks_init(struct dcss_dev *dcss)
+{
+ int i;
+ struct {
+ const char *id;
+ struct clk **clk;
+ } clks[] = {
+ {"apb", &dcss->apb_clk},
+ {"axi", &dcss->axi_clk},
+ {"pix", &dcss->pix_clk},
+ {"rtrm", &dcss->rtrm_clk},
+ {"dtrc", &dcss->dtrc_clk},
+ };
+
+ for (i = 0; i < ARRAY_SIZE(clks); i++) {
+ *clks[i].clk = devm_clk_get(dcss->dev, clks[i].id);
+ if (IS_ERR(*clks[i].clk)) {
+ dev_err(dcss->dev, "failed to get %s clock\n",
+ clks[i].id);
+ return PTR_ERR(*clks[i].clk);
+ }
+ }
+
+ return 0;
+}
+
+static void dcss_clks_release(struct dcss_dev *dcss)
+{
+ devm_clk_put(dcss->dev, dcss->dtrc_clk);
+ devm_clk_put(dcss->dev, dcss->rtrm_clk);
+ devm_clk_put(dcss->dev, dcss->pix_clk);
+ devm_clk_put(dcss->dev, dcss->axi_clk);
+ devm_clk_put(dcss->dev, dcss->apb_clk);
+}
+
+struct dcss_dev *dcss_dev_create(struct device *dev, bool hdmi_output)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ int ret;
+ struct resource *res;
+ struct dcss_dev *dcss;
+ const struct dcss_type_data *devtype;
+
+ devtype = of_device_get_match_data(dev);
+ if (!devtype) {
+ dev_err(dev, "no device match found\n");
+ return ERR_PTR(-ENODEV);
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(dev, "cannot get memory resource\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (!devm_request_mem_region(dev, res->start, resource_size(res), "dcss")) {
+ dev_err(dev, "cannot request memory region\n");
+ return ERR_PTR(-EBUSY);
+ }
+
+ dcss = devm_kzalloc(dev, sizeof(*dcss), GFP_KERNEL);
+ if (!dcss)
+ return ERR_PTR(-ENOMEM);
+
+ dcss->dev = dev;
+ dcss->devtype = devtype;
+ dcss->hdmi_output = hdmi_output;
+
+ ret = dcss_clks_init(dcss);
+ if (ret) {
+ dev_err(dev, "clocks initialization failed\n");
+ return ERR_PTR(ret);
+ }
+
+ dcss->of_port = of_graph_get_port_by_id(dev->of_node, 0);
+ if (!dcss->of_port) {
+ dev_err(dev, "no port@0 node in %pOF\n", dev->of_node);
+ ret = -ENODEV;
+ goto clks_err;
+ }
+
+ dcss->start_addr = res->start;
+
+ ret = dcss_submodules_init(dcss);
+ if (ret) {
+ of_node_put(dcss->of_port);
+ dev_err(dev, "submodules initialization failed\n");
+ goto clks_err;
+ }
+
+ init_completion(&dcss->disable_completion);
+
+ pm_runtime_set_autosuspend_delay(dev, 100);
+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_set_suspended(dev);
+ pm_runtime_allow(dev);
+ pm_runtime_enable(dev);
+
+ return dcss;
+
+clks_err:
+ dcss_clks_release(dcss);
+
+ return ERR_PTR(ret);
+}
+
+void dcss_dev_destroy(struct dcss_dev *dcss)
+{
+ if (!pm_runtime_suspended(dcss->dev)) {
+ dcss_ctxld_suspend(dcss->ctxld);
+ dcss_clocks_disable(dcss);
+ }
+
+ of_node_put(dcss->of_port);
+
+ pm_runtime_disable(dcss->dev);
+
+ dcss_submodules_stop(dcss);
+
+ dcss_clks_release(dcss);
+}
+
+static int dcss_dev_suspend(struct device *dev)
+{
+ struct dcss_dev *dcss = dcss_drv_dev_to_dcss(dev);
+ struct drm_device *ddev = dcss_drv_dev_to_drm(dev);
+ int ret;
+
+ drm_mode_config_helper_suspend(ddev);
+
+ if (pm_runtime_suspended(dev))
+ return 0;
+
+ ret = dcss_ctxld_suspend(dcss->ctxld);
+ if (ret)
+ return ret;
+
+ dcss_clocks_disable(dcss);
+
+ return 0;
+}
+
+static int dcss_dev_resume(struct device *dev)
+{
+ struct dcss_dev *dcss = dcss_drv_dev_to_dcss(dev);
+ struct drm_device *ddev = dcss_drv_dev_to_drm(dev);
+
+ if (pm_runtime_suspended(dev)) {
+ drm_mode_config_helper_resume(ddev);
+ return 0;
+ }
+
+ dcss_clocks_enable(dcss);
+
+ dcss_blkctl_cfg(dcss->blkctl);
+
+ dcss_ctxld_resume(dcss->ctxld);
+
+ drm_mode_config_helper_resume(ddev);
+
+ return 0;
+}
+
+static int dcss_dev_runtime_suspend(struct device *dev)
+{
+ struct dcss_dev *dcss = dcss_drv_dev_to_dcss(dev);
+ int ret;
+
+ ret = dcss_ctxld_suspend(dcss->ctxld);
+ if (ret)
+ return ret;
+
+ dcss_clocks_disable(dcss);
+
+ return 0;
+}
+
+static int dcss_dev_runtime_resume(struct device *dev)
+{
+ struct dcss_dev *dcss = dcss_drv_dev_to_dcss(dev);
+
+ dcss_clocks_enable(dcss);
+
+ dcss_blkctl_cfg(dcss->blkctl);
+
+ dcss_ctxld_resume(dcss->ctxld);
+
+ return 0;
+}
+
+EXPORT_GPL_DEV_PM_OPS(dcss_dev_pm_ops) = {
+ RUNTIME_PM_OPS(dcss_dev_runtime_suspend, dcss_dev_runtime_resume, NULL)
+ SYSTEM_SLEEP_PM_OPS(dcss_dev_suspend, dcss_dev_resume)
+};
diff --git a/drivers/gpu/drm/imx/dcss/dcss-dev.h b/drivers/gpu/drm/imx/dcss/dcss-dev.h
new file mode 100644
index 000000000000..b032e873d227
--- /dev/null
+++ b/drivers/gpu/drm/imx/dcss/dcss-dev.h
@@ -0,0 +1,178 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2019 NXP.
+ */
+
+#ifndef __DCSS_PRV_H__
+#define __DCSS_PRV_H__
+
+#include <drm/drm_fourcc.h>
+#include <drm/drm_plane.h>
+#include <linux/io.h>
+#include <linux/pm.h>
+#include <video/videomode.h>
+
+#define SET 0x04
+#define CLR 0x08
+#define TGL 0x0C
+
+#define dcss_writel(v, c) writel((v), (c))
+#define dcss_readl(c) readl(c)
+#define dcss_set(v, c) writel((v), (c) + SET)
+#define dcss_clr(v, c) writel((v), (c) + CLR)
+#define dcss_toggle(v, c) writel((v), (c) + TGL)
+
+static inline void dcss_update(u32 v, u32 m, void __iomem *c)
+{
+ writel((readl(c) & ~(m)) | (v), (c));
+}
+
+#define DCSS_DBG_REG(reg) {.name = #reg, .ofs = reg}
+
+enum {
+ DCSS_IMX8MQ = 0,
+};
+
+struct dcss_type_data {
+ const char *name;
+ u32 blkctl_ofs;
+ u32 ctxld_ofs;
+ u32 rdsrc_ofs;
+ u32 wrscl_ofs;
+ u32 dtg_ofs;
+ u32 scaler_ofs;
+ u32 ss_ofs;
+ u32 dpr_ofs;
+ u32 dtrc_ofs;
+ u32 dec400d_ofs;
+ u32 hdr10_ofs;
+};
+
+struct dcss_debug_reg {
+ char *name;
+ u32 ofs;
+};
+
+enum dcss_ctxld_ctx_type {
+ CTX_DB,
+ CTX_SB_HP, /* high-priority */
+ CTX_SB_LP, /* low-priority */
+};
+
+struct dcss_dev {
+ struct device *dev;
+ const struct dcss_type_data *devtype;
+ struct device_node *of_port;
+
+ u32 start_addr;
+
+ struct dcss_blkctl *blkctl;
+ struct dcss_ctxld *ctxld;
+ struct dcss_dpr *dpr;
+ struct dcss_dtg *dtg;
+ struct dcss_ss *ss;
+ struct dcss_hdr10 *hdr10;
+ struct dcss_scaler *scaler;
+ struct dcss_dtrc *dtrc;
+ struct dcss_dec400d *dec400d;
+ struct dcss_wrscl *wrscl;
+ struct dcss_rdsrc *rdsrc;
+
+ struct clk *apb_clk;
+ struct clk *axi_clk;
+ struct clk *pix_clk;
+ struct clk *rtrm_clk;
+ struct clk *dtrc_clk;
+ struct clk *pll_src_clk;
+ struct clk *pll_phy_ref_clk;
+
+ bool hdmi_output;
+
+ void (*disable_callback)(void *data);
+ struct completion disable_completion;
+};
+
+struct dcss_dev *dcss_drv_dev_to_dcss(struct device *dev);
+struct drm_device *dcss_drv_dev_to_drm(struct device *dev);
+struct dcss_dev *dcss_dev_create(struct device *dev, bool hdmi_output);
+void dcss_dev_destroy(struct dcss_dev *dcss);
+void dcss_enable_dtg_and_ss(struct dcss_dev *dcss);
+void dcss_disable_dtg_and_ss(struct dcss_dev *dcss);
+
+extern const struct dev_pm_ops dcss_dev_pm_ops;
+
+/* BLKCTL */
+int dcss_blkctl_init(struct dcss_dev *dcss, unsigned long blkctl_base);
+void dcss_blkctl_cfg(struct dcss_blkctl *blkctl);
+
+/* CTXLD */
+int dcss_ctxld_init(struct dcss_dev *dcss, unsigned long ctxld_base);
+void dcss_ctxld_exit(struct dcss_ctxld *ctxld);
+void dcss_ctxld_write(struct dcss_ctxld *ctxld, u32 ctx_id,
+ u32 val, u32 reg_idx);
+int dcss_ctxld_resume(struct dcss_ctxld *dcss_ctxld);
+int dcss_ctxld_suspend(struct dcss_ctxld *dcss_ctxld);
+void dcss_ctxld_write_irqsafe(struct dcss_ctxld *ctlxd, u32 ctx_id, u32 val,
+ u32 reg_ofs);
+void dcss_ctxld_kick(struct dcss_ctxld *ctxld);
+bool dcss_ctxld_is_flushed(struct dcss_ctxld *ctxld);
+int dcss_ctxld_enable(struct dcss_ctxld *ctxld);
+void dcss_ctxld_register_completion(struct dcss_ctxld *ctxld,
+ struct completion *dis_completion);
+void dcss_ctxld_assert_locked(struct dcss_ctxld *ctxld);
+
+/* DPR */
+int dcss_dpr_init(struct dcss_dev *dcss, unsigned long dpr_base);
+void dcss_dpr_exit(struct dcss_dpr *dpr);
+void dcss_dpr_write_sysctrl(struct dcss_dpr *dpr);
+void dcss_dpr_set_res(struct dcss_dpr *dpr, int ch_num, u32 xres, u32 yres);
+void dcss_dpr_addr_set(struct dcss_dpr *dpr, int ch_num, u32 luma_base_addr,
+ u32 chroma_base_addr, u16 pitch);
+void dcss_dpr_enable(struct dcss_dpr *dpr, int ch_num, bool en);
+void dcss_dpr_format_set(struct dcss_dpr *dpr, int ch_num,
+ const struct drm_format_info *format, u64 modifier);
+void dcss_dpr_set_rotation(struct dcss_dpr *dpr, int ch_num, u32 rotation);
+
+/* DTG */
+int dcss_dtg_init(struct dcss_dev *dcss, unsigned long dtg_base);
+void dcss_dtg_exit(struct dcss_dtg *dtg);
+bool dcss_dtg_vblank_irq_valid(struct dcss_dtg *dtg);
+void dcss_dtg_vblank_irq_enable(struct dcss_dtg *dtg, bool en);
+void dcss_dtg_vblank_irq_clear(struct dcss_dtg *dtg);
+void dcss_dtg_sync_set(struct dcss_dtg *dtg, struct videomode *vm);
+void dcss_dtg_css_set(struct dcss_dtg *dtg);
+void dcss_dtg_enable(struct dcss_dtg *dtg);
+void dcss_dtg_shutoff(struct dcss_dtg *dtg);
+bool dcss_dtg_is_enabled(struct dcss_dtg *dtg);
+void dcss_dtg_ctxld_kick_irq_enable(struct dcss_dtg *dtg, bool en);
+bool dcss_dtg_global_alpha_changed(struct dcss_dtg *dtg, int ch_num, int alpha);
+void dcss_dtg_plane_alpha_set(struct dcss_dtg *dtg, int ch_num,
+ const struct drm_format_info *format, int alpha);
+void dcss_dtg_plane_pos_set(struct dcss_dtg *dtg, int ch_num,
+ int px, int py, int pw, int ph);
+void dcss_dtg_ch_enable(struct dcss_dtg *dtg, int ch_num, bool en);
+
+/* SUBSAM */
+int dcss_ss_init(struct dcss_dev *dcss, unsigned long subsam_base);
+void dcss_ss_exit(struct dcss_ss *ss);
+void dcss_ss_enable(struct dcss_ss *ss);
+void dcss_ss_shutoff(struct dcss_ss *ss);
+void dcss_ss_subsam_set(struct dcss_ss *ss);
+void dcss_ss_sync_set(struct dcss_ss *ss, struct videomode *vm,
+ bool phsync, bool pvsync);
+
+/* SCALER */
+int dcss_scaler_init(struct dcss_dev *dcss, unsigned long scaler_base);
+void dcss_scaler_exit(struct dcss_scaler *scl);
+void dcss_scaler_set_filter(struct dcss_scaler *scl, int ch_num,
+ enum drm_scaling_filter scaling_filter);
+void dcss_scaler_setup(struct dcss_scaler *scl, int ch_num,
+ const struct drm_format_info *format,
+ int src_xres, int src_yres, int dst_xres, int dst_yres,
+ u32 vrefresh_hz);
+void dcss_scaler_ch_enable(struct dcss_scaler *scl, int ch_num, bool en);
+int dcss_scaler_get_min_max_ratios(struct dcss_scaler *scl, int ch_num,
+ int *min, int *max);
+void dcss_scaler_write_sclctrl(struct dcss_scaler *scl);
+
+#endif /* __DCSS_PRV_H__ */
diff --git a/drivers/gpu/drm/imx/dcss/dcss-dpr.c b/drivers/gpu/drm/imx/dcss/dcss-dpr.c
new file mode 100644
index 000000000000..072eb209249f
--- /dev/null
+++ b/drivers/gpu/drm/imx/dcss/dcss-dpr.c
@@ -0,0 +1,547 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2019 NXP.
+ */
+
+#include <linux/device.h>
+#include <linux/slab.h>
+
+#include "dcss-dev.h"
+
+#define DCSS_DPR_SYSTEM_CTRL0 0x000
+#define RUN_EN BIT(0)
+#define SOFT_RESET BIT(1)
+#define REPEAT_EN BIT(2)
+#define SHADOW_LOAD_EN BIT(3)
+#define SW_SHADOW_LOAD_SEL BIT(4)
+#define BCMD2AXI_MSTR_ID_CTRL BIT(16)
+#define DCSS_DPR_IRQ_MASK 0x020
+#define DCSS_DPR_IRQ_MASK_STATUS 0x030
+#define DCSS_DPR_IRQ_NONMASK_STATUS 0x040
+#define IRQ_DPR_CTRL_DONE BIT(0)
+#define IRQ_DPR_RUN BIT(1)
+#define IRQ_DPR_SHADOW_LOADED BIT(2)
+#define IRQ_AXI_READ_ERR BIT(3)
+#define DPR2RTR_YRGB_FIFO_OVFL BIT(4)
+#define DPR2RTR_UV_FIFO_OVFL BIT(5)
+#define DPR2RTR_FIFO_LD_BUF_RDY_YRGB_ERR BIT(6)
+#define DPR2RTR_FIFO_LD_BUF_RDY_UV_ERR BIT(7)
+#define DCSS_DPR_MODE_CTRL0 0x050
+#define RTR_3BUF_EN BIT(0)
+#define RTR_4LINE_BUF_EN BIT(1)
+#define TILE_TYPE_POS 2
+#define TILE_TYPE_MASK GENMASK(4, 2)
+#define YUV_EN BIT(6)
+#define COMP_2PLANE_EN BIT(7)
+#define PIX_SIZE_POS 8
+#define PIX_SIZE_MASK GENMASK(9, 8)
+#define PIX_LUMA_UV_SWAP BIT(10)
+#define PIX_UV_SWAP BIT(11)
+#define B_COMP_SEL_POS 12
+#define B_COMP_SEL_MASK GENMASK(13, 12)
+#define G_COMP_SEL_POS 14
+#define G_COMP_SEL_MASK GENMASK(15, 14)
+#define R_COMP_SEL_POS 16
+#define R_COMP_SEL_MASK GENMASK(17, 16)
+#define A_COMP_SEL_POS 18
+#define A_COMP_SEL_MASK GENMASK(19, 18)
+#define DCSS_DPR_FRAME_CTRL0 0x070
+#define HFLIP_EN BIT(0)
+#define VFLIP_EN BIT(1)
+#define ROT_ENC_POS 2
+#define ROT_ENC_MASK GENMASK(3, 2)
+#define ROT_FLIP_ORDER_EN BIT(4)
+#define PITCH_POS 16
+#define PITCH_MASK GENMASK(31, 16)
+#define DCSS_DPR_FRAME_1P_CTRL0 0x090
+#define DCSS_DPR_FRAME_1P_PIX_X_CTRL 0x0A0
+#define DCSS_DPR_FRAME_1P_PIX_Y_CTRL 0x0B0
+#define DCSS_DPR_FRAME_1P_BASE_ADDR 0x0C0
+#define DCSS_DPR_FRAME_2P_CTRL0 0x0E0
+#define DCSS_DPR_FRAME_2P_PIX_X_CTRL 0x0F0
+#define DCSS_DPR_FRAME_2P_PIX_Y_CTRL 0x100
+#define DCSS_DPR_FRAME_2P_BASE_ADDR 0x110
+#define DCSS_DPR_STATUS_CTRL0 0x130
+#define STATUS_MUX_SEL_MASK GENMASK(2, 0)
+#define STATUS_SRC_SEL_POS 16
+#define STATUS_SRC_SEL_MASK GENMASK(18, 16)
+#define DCSS_DPR_STATUS_CTRL1 0x140
+#define DCSS_DPR_RTRAM_CTRL0 0x200
+#define NUM_ROWS_ACTIVE BIT(0)
+#define THRES_HIGH_POS 1
+#define THRES_HIGH_MASK GENMASK(3, 1)
+#define THRES_LOW_POS 4
+#define THRES_LOW_MASK GENMASK(6, 4)
+#define ABORT_SEL BIT(7)
+
+enum dcss_tile_type {
+ TILE_LINEAR = 0,
+ TILE_GPU_STANDARD,
+ TILE_GPU_SUPER,
+ TILE_VPU_YUV420,
+ TILE_VPU_VP9,
+};
+
+enum dcss_pix_size {
+ PIX_SIZE_8,
+ PIX_SIZE_16,
+ PIX_SIZE_32,
+};
+
+struct dcss_dpr_ch {
+ struct dcss_dpr *dpr;
+ void __iomem *base_reg;
+ u32 base_ofs;
+
+ struct drm_format_info format;
+ enum dcss_pix_size pix_size;
+ enum dcss_tile_type tile;
+ bool rtram_4line_en;
+ bool rtram_3buf_en;
+
+ u32 frame_ctrl;
+ u32 mode_ctrl;
+ u32 sys_ctrl;
+ u32 rtram_ctrl;
+
+ bool sys_ctrl_chgd;
+
+ int ch_num;
+ int irq;
+};
+
+struct dcss_dpr {
+ struct device *dev;
+ struct dcss_ctxld *ctxld;
+ u32 ctx_id;
+
+ struct dcss_dpr_ch ch[3];
+};
+
+static void dcss_dpr_write(struct dcss_dpr_ch *ch, u32 val, u32 ofs)
+{
+ struct dcss_dpr *dpr = ch->dpr;
+
+ dcss_ctxld_write(dpr->ctxld, dpr->ctx_id, val, ch->base_ofs + ofs);
+}
+
+static int dcss_dpr_ch_init_all(struct dcss_dpr *dpr, unsigned long dpr_base)
+{
+ struct dcss_dpr_ch *ch;
+ int i;
+
+ for (i = 0; i < 3; i++) {
+ ch = &dpr->ch[i];
+
+ ch->base_ofs = dpr_base + i * 0x1000;
+
+ ch->base_reg = devm_ioremap(dpr->dev, ch->base_ofs, SZ_4K);
+ if (!ch->base_reg) {
+ dev_err(dpr->dev, "dpr: unable to remap ch %d base\n",
+ i);
+ return -ENOMEM;
+ }
+
+ ch->dpr = dpr;
+ ch->ch_num = i;
+
+ dcss_writel(0xff, ch->base_reg + DCSS_DPR_IRQ_MASK);
+ }
+
+ return 0;
+}
+
+int dcss_dpr_init(struct dcss_dev *dcss, unsigned long dpr_base)
+{
+ struct dcss_dpr *dpr;
+
+ dpr = devm_kzalloc(dcss->dev, sizeof(*dpr), GFP_KERNEL);
+ if (!dpr)
+ return -ENOMEM;
+
+ dcss->dpr = dpr;
+ dpr->dev = dcss->dev;
+ dpr->ctxld = dcss->ctxld;
+ dpr->ctx_id = CTX_SB_HP;
+
+ if (dcss_dpr_ch_init_all(dpr, dpr_base))
+ return -ENOMEM;
+
+ return 0;
+}
+
+void dcss_dpr_exit(struct dcss_dpr *dpr)
+{
+ int ch_no;
+
+ /* stop DPR on all channels */
+ for (ch_no = 0; ch_no < 3; ch_no++) {
+ struct dcss_dpr_ch *ch = &dpr->ch[ch_no];
+
+ dcss_writel(0, ch->base_reg + DCSS_DPR_SYSTEM_CTRL0);
+ }
+}
+
+static u32 dcss_dpr_x_pix_wide_adjust(struct dcss_dpr_ch *ch, u32 pix_wide,
+ u32 pix_format)
+{
+ u8 pix_in_64byte_map[3][5] = {
+ /* LIN, GPU_STD, GPU_SUP, VPU_YUV420, VPU_VP9 */
+ { 64, 8, 8, 8, 16}, /* PIX_SIZE_8 */
+ { 32, 8, 8, 8, 8}, /* PIX_SIZE_16 */
+ { 16, 4, 4, 8, 8}, /* PIX_SIZE_32 */
+ };
+ u32 offset;
+ u32 div_64byte_mod, pix_in_64byte;
+
+ pix_in_64byte = pix_in_64byte_map[ch->pix_size][ch->tile];
+
+ div_64byte_mod = pix_wide % pix_in_64byte;
+ offset = (div_64byte_mod == 0) ? 0 : (pix_in_64byte - div_64byte_mod);
+
+ return pix_wide + offset;
+}
+
+static u32 dcss_dpr_y_pix_high_adjust(struct dcss_dpr_ch *ch, u32 pix_high,
+ u32 pix_format)
+{
+ u8 num_rows_buf = ch->rtram_4line_en ? 4 : 8;
+ u32 offset, pix_y_mod;
+
+ pix_y_mod = pix_high % num_rows_buf;
+ offset = pix_y_mod ? (num_rows_buf - pix_y_mod) : 0;
+
+ return pix_high + offset;
+}
+
+void dcss_dpr_set_res(struct dcss_dpr *dpr, int ch_num, u32 xres, u32 yres)
+{
+ struct dcss_dpr_ch *ch = &dpr->ch[ch_num];
+ u32 pix_format = ch->format.format;
+ u32 gap = DCSS_DPR_FRAME_2P_BASE_ADDR - DCSS_DPR_FRAME_1P_BASE_ADDR;
+ int plane, max_planes = 1;
+ u32 pix_x_wide, pix_y_high;
+
+ if (pix_format == DRM_FORMAT_NV12 ||
+ pix_format == DRM_FORMAT_NV21)
+ max_planes = 2;
+
+ for (plane = 0; plane < max_planes; plane++) {
+ yres = plane == 1 ? yres >> 1 : yres;
+
+ pix_x_wide = dcss_dpr_x_pix_wide_adjust(ch, xres, pix_format);
+ pix_y_high = dcss_dpr_y_pix_high_adjust(ch, yres, pix_format);
+
+ dcss_dpr_write(ch, pix_x_wide,
+ DCSS_DPR_FRAME_1P_PIX_X_CTRL + plane * gap);
+ dcss_dpr_write(ch, pix_y_high,
+ DCSS_DPR_FRAME_1P_PIX_Y_CTRL + plane * gap);
+
+ dcss_dpr_write(ch, 2, DCSS_DPR_FRAME_1P_CTRL0 + plane * gap);
+ }
+}
+
+void dcss_dpr_addr_set(struct dcss_dpr *dpr, int ch_num, u32 luma_base_addr,
+ u32 chroma_base_addr, u16 pitch)
+{
+ struct dcss_dpr_ch *ch = &dpr->ch[ch_num];
+
+ dcss_dpr_write(ch, luma_base_addr, DCSS_DPR_FRAME_1P_BASE_ADDR);
+
+ dcss_dpr_write(ch, chroma_base_addr, DCSS_DPR_FRAME_2P_BASE_ADDR);
+
+ ch->frame_ctrl &= ~PITCH_MASK;
+ ch->frame_ctrl |= (((u32)pitch << PITCH_POS) & PITCH_MASK);
+}
+
+static void dcss_dpr_argb_comp_sel(struct dcss_dpr_ch *ch, int a_sel, int r_sel,
+ int g_sel, int b_sel)
+{
+ u32 sel;
+
+ sel = ((a_sel << A_COMP_SEL_POS) & A_COMP_SEL_MASK) |
+ ((r_sel << R_COMP_SEL_POS) & R_COMP_SEL_MASK) |
+ ((g_sel << G_COMP_SEL_POS) & G_COMP_SEL_MASK) |
+ ((b_sel << B_COMP_SEL_POS) & B_COMP_SEL_MASK);
+
+ ch->mode_ctrl &= ~(A_COMP_SEL_MASK | R_COMP_SEL_MASK |
+ G_COMP_SEL_MASK | B_COMP_SEL_MASK);
+ ch->mode_ctrl |= sel;
+}
+
+static void dcss_dpr_pix_size_set(struct dcss_dpr_ch *ch,
+ const struct drm_format_info *format)
+{
+ u32 val;
+
+ switch (format->format) {
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV21:
+ val = PIX_SIZE_8;
+ break;
+
+ case DRM_FORMAT_UYVY:
+ case DRM_FORMAT_VYUY:
+ case DRM_FORMAT_YUYV:
+ case DRM_FORMAT_YVYU:
+ val = PIX_SIZE_16;
+ break;
+
+ default:
+ val = PIX_SIZE_32;
+ break;
+ }
+
+ ch->pix_size = val;
+
+ ch->mode_ctrl &= ~PIX_SIZE_MASK;
+ ch->mode_ctrl |= ((val << PIX_SIZE_POS) & PIX_SIZE_MASK);
+}
+
+static void dcss_dpr_uv_swap(struct dcss_dpr_ch *ch, bool swap)
+{
+ ch->mode_ctrl &= ~PIX_UV_SWAP;
+ ch->mode_ctrl |= (swap ? PIX_UV_SWAP : 0);
+}
+
+static void dcss_dpr_y_uv_swap(struct dcss_dpr_ch *ch, bool swap)
+{
+ ch->mode_ctrl &= ~PIX_LUMA_UV_SWAP;
+ ch->mode_ctrl |= (swap ? PIX_LUMA_UV_SWAP : 0);
+}
+
+static void dcss_dpr_2plane_en(struct dcss_dpr_ch *ch, bool en)
+{
+ ch->mode_ctrl &= ~COMP_2PLANE_EN;
+ ch->mode_ctrl |= (en ? COMP_2PLANE_EN : 0);
+}
+
+static void dcss_dpr_yuv_en(struct dcss_dpr_ch *ch, bool en)
+{
+ ch->mode_ctrl &= ~YUV_EN;
+ ch->mode_ctrl |= (en ? YUV_EN : 0);
+}
+
+void dcss_dpr_enable(struct dcss_dpr *dpr, int ch_num, bool en)
+{
+ struct dcss_dpr_ch *ch = &dpr->ch[ch_num];
+ u32 sys_ctrl;
+
+ sys_ctrl = (en ? REPEAT_EN | RUN_EN : 0);
+
+ if (en) {
+ dcss_dpr_write(ch, ch->mode_ctrl, DCSS_DPR_MODE_CTRL0);
+ dcss_dpr_write(ch, ch->frame_ctrl, DCSS_DPR_FRAME_CTRL0);
+ dcss_dpr_write(ch, ch->rtram_ctrl, DCSS_DPR_RTRAM_CTRL0);
+ }
+
+ if (ch->sys_ctrl != sys_ctrl)
+ ch->sys_ctrl_chgd = true;
+
+ ch->sys_ctrl = sys_ctrl;
+}
+
+struct rgb_comp_sel {
+ u32 drm_format;
+ int a_sel;
+ int r_sel;
+ int g_sel;
+ int b_sel;
+};
+
+static struct rgb_comp_sel comp_sel_map[] = {
+ {DRM_FORMAT_ARGB8888, 3, 2, 1, 0},
+ {DRM_FORMAT_XRGB8888, 3, 2, 1, 0},
+ {DRM_FORMAT_ABGR8888, 3, 0, 1, 2},
+ {DRM_FORMAT_XBGR8888, 3, 0, 1, 2},
+ {DRM_FORMAT_RGBA8888, 0, 3, 2, 1},
+ {DRM_FORMAT_RGBX8888, 0, 3, 2, 1},
+ {DRM_FORMAT_BGRA8888, 0, 1, 2, 3},
+ {DRM_FORMAT_BGRX8888, 0, 1, 2, 3},
+};
+
+static int to_comp_sel(u32 pix_fmt, int *a_sel, int *r_sel, int *g_sel,
+ int *b_sel)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(comp_sel_map); i++) {
+ if (comp_sel_map[i].drm_format == pix_fmt) {
+ *a_sel = comp_sel_map[i].a_sel;
+ *r_sel = comp_sel_map[i].r_sel;
+ *g_sel = comp_sel_map[i].g_sel;
+ *b_sel = comp_sel_map[i].b_sel;
+
+ return 0;
+ }
+ }
+
+ return -1;
+}
+
+static void dcss_dpr_rtram_set(struct dcss_dpr_ch *ch, u32 pix_format)
+{
+ u32 val, mask;
+
+ switch (pix_format) {
+ case DRM_FORMAT_NV21:
+ case DRM_FORMAT_NV12:
+ ch->rtram_3buf_en = true;
+ ch->rtram_4line_en = false;
+ break;
+
+ default:
+ ch->rtram_3buf_en = true;
+ ch->rtram_4line_en = true;
+ break;
+ }
+
+ val = (ch->rtram_4line_en ? RTR_4LINE_BUF_EN : 0);
+ val |= (ch->rtram_3buf_en ? RTR_3BUF_EN : 0);
+ mask = RTR_4LINE_BUF_EN | RTR_3BUF_EN;
+
+ ch->mode_ctrl &= ~mask;
+ ch->mode_ctrl |= (val & mask);
+
+ val = (ch->rtram_4line_en ? 0 : NUM_ROWS_ACTIVE);
+ val |= (3 << THRES_LOW_POS) & THRES_LOW_MASK;
+ val |= (4 << THRES_HIGH_POS) & THRES_HIGH_MASK;
+ mask = THRES_LOW_MASK | THRES_HIGH_MASK | NUM_ROWS_ACTIVE;
+
+ ch->rtram_ctrl &= ~mask;
+ ch->rtram_ctrl |= (val & mask);
+}
+
+static void dcss_dpr_setup_components(struct dcss_dpr_ch *ch,
+ const struct drm_format_info *format)
+{
+ int a_sel, r_sel, g_sel, b_sel;
+ bool uv_swap, y_uv_swap;
+
+ switch (format->format) {
+ case DRM_FORMAT_YVYU:
+ uv_swap = true;
+ y_uv_swap = true;
+ break;
+
+ case DRM_FORMAT_VYUY:
+ case DRM_FORMAT_NV21:
+ uv_swap = true;
+ y_uv_swap = false;
+ break;
+
+ case DRM_FORMAT_YUYV:
+ uv_swap = false;
+ y_uv_swap = true;
+ break;
+
+ default:
+ uv_swap = false;
+ y_uv_swap = false;
+ break;
+ }
+
+ dcss_dpr_uv_swap(ch, uv_swap);
+
+ dcss_dpr_y_uv_swap(ch, y_uv_swap);
+
+ if (!format->is_yuv) {
+ if (!to_comp_sel(format->format, &a_sel, &r_sel,
+ &g_sel, &b_sel)) {
+ dcss_dpr_argb_comp_sel(ch, a_sel, r_sel, g_sel, b_sel);
+ } else {
+ dcss_dpr_argb_comp_sel(ch, 3, 2, 1, 0);
+ }
+ } else {
+ dcss_dpr_argb_comp_sel(ch, 0, 0, 0, 0);
+ }
+}
+
+static void dcss_dpr_tile_set(struct dcss_dpr_ch *ch, uint64_t modifier)
+{
+ switch (ch->ch_num) {
+ case 0:
+ switch (modifier) {
+ case DRM_FORMAT_MOD_LINEAR:
+ ch->tile = TILE_LINEAR;
+ break;
+ case DRM_FORMAT_MOD_VIVANTE_TILED:
+ ch->tile = TILE_GPU_STANDARD;
+ break;
+ case DRM_FORMAT_MOD_VIVANTE_SUPER_TILED:
+ ch->tile = TILE_GPU_SUPER;
+ break;
+ default:
+ WARN_ON(1);
+ break;
+ }
+ break;
+ case 1:
+ case 2:
+ ch->tile = TILE_LINEAR;
+ break;
+ default:
+ WARN_ON(1);
+ return;
+ }
+
+ ch->mode_ctrl &= ~TILE_TYPE_MASK;
+ ch->mode_ctrl |= ((ch->tile << TILE_TYPE_POS) & TILE_TYPE_MASK);
+}
+
+void dcss_dpr_format_set(struct dcss_dpr *dpr, int ch_num,
+ const struct drm_format_info *format, u64 modifier)
+{
+ struct dcss_dpr_ch *ch = &dpr->ch[ch_num];
+
+ ch->format = *format;
+
+ dcss_dpr_yuv_en(ch, format->is_yuv);
+
+ dcss_dpr_pix_size_set(ch, format);
+
+ dcss_dpr_setup_components(ch, format);
+
+ dcss_dpr_2plane_en(ch, format->num_planes == 2);
+
+ dcss_dpr_rtram_set(ch, format->format);
+
+ dcss_dpr_tile_set(ch, modifier);
+}
+
+/* This function will be called from interrupt context. */
+void dcss_dpr_write_sysctrl(struct dcss_dpr *dpr)
+{
+ int chnum;
+
+ dcss_ctxld_assert_locked(dpr->ctxld);
+
+ for (chnum = 0; chnum < 3; chnum++) {
+ struct dcss_dpr_ch *ch = &dpr->ch[chnum];
+
+ if (ch->sys_ctrl_chgd) {
+ dcss_ctxld_write_irqsafe(dpr->ctxld, dpr->ctx_id,
+ ch->sys_ctrl,
+ ch->base_ofs +
+ DCSS_DPR_SYSTEM_CTRL0);
+ ch->sys_ctrl_chgd = false;
+ }
+ }
+}
+
+void dcss_dpr_set_rotation(struct dcss_dpr *dpr, int ch_num, u32 rotation)
+{
+ struct dcss_dpr_ch *ch = &dpr->ch[ch_num];
+
+ ch->frame_ctrl &= ~(HFLIP_EN | VFLIP_EN | ROT_ENC_MASK);
+
+ ch->frame_ctrl |= rotation & DRM_MODE_REFLECT_X ? HFLIP_EN : 0;
+ ch->frame_ctrl |= rotation & DRM_MODE_REFLECT_Y ? VFLIP_EN : 0;
+
+ if (rotation & DRM_MODE_ROTATE_90)
+ ch->frame_ctrl |= 1 << ROT_ENC_POS;
+ else if (rotation & DRM_MODE_ROTATE_180)
+ ch->frame_ctrl |= 2 << ROT_ENC_POS;
+ else if (rotation & DRM_MODE_ROTATE_270)
+ ch->frame_ctrl |= 3 << ROT_ENC_POS;
+}
diff --git a/drivers/gpu/drm/imx/dcss/dcss-drv.c b/drivers/gpu/drm/imx/dcss/dcss-drv.c
new file mode 100644
index 000000000000..19b027cc1dc4
--- /dev/null
+++ b/drivers/gpu/drm/imx/dcss/dcss-drv.c
@@ -0,0 +1,128 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2019 NXP.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <drm/drm_module.h>
+#include <drm/drm_of.h>
+
+#include "dcss-dev.h"
+#include "dcss-kms.h"
+
+struct dcss_drv {
+ struct dcss_dev *dcss;
+ struct dcss_kms_dev *kms;
+};
+
+struct dcss_dev *dcss_drv_dev_to_dcss(struct device *dev)
+{
+ struct dcss_drv *mdrv = dev_get_drvdata(dev);
+
+ return mdrv ? mdrv->dcss : NULL;
+}
+
+struct drm_device *dcss_drv_dev_to_drm(struct device *dev)
+{
+ struct dcss_drv *mdrv = dev_get_drvdata(dev);
+
+ return mdrv ? &mdrv->kms->base : NULL;
+}
+
+static int dcss_drv_platform_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *remote;
+ struct dcss_drv *mdrv;
+ int err = 0;
+ bool hdmi_output = true;
+
+ if (!dev->of_node)
+ return -ENODEV;
+
+ remote = of_graph_get_remote_node(dev->of_node, 0, 0);
+ if (!remote)
+ return -ENODEV;
+
+ hdmi_output = !of_device_is_compatible(remote, "fsl,imx8mq-nwl-dsi");
+
+ of_node_put(remote);
+
+ mdrv = devm_kzalloc(dev, sizeof(*mdrv), GFP_KERNEL);
+ if (!mdrv)
+ return -ENOMEM;
+
+ mdrv->dcss = dcss_dev_create(dev, hdmi_output);
+ if (IS_ERR(mdrv->dcss))
+ return PTR_ERR(mdrv->dcss);
+
+ dev_set_drvdata(dev, mdrv);
+
+ mdrv->kms = dcss_kms_attach(mdrv->dcss);
+ if (IS_ERR(mdrv->kms)) {
+ err = PTR_ERR(mdrv->kms);
+ dev_err_probe(dev, err, "Failed to initialize KMS\n");
+ goto dcss_shutoff;
+ }
+
+ return 0;
+
+dcss_shutoff:
+ dcss_dev_destroy(mdrv->dcss);
+
+ return err;
+}
+
+static void dcss_drv_platform_remove(struct platform_device *pdev)
+{
+ struct dcss_drv *mdrv = dev_get_drvdata(&pdev->dev);
+
+ dcss_kms_detach(mdrv->kms);
+ dcss_dev_destroy(mdrv->dcss);
+}
+
+static void dcss_drv_platform_shutdown(struct platform_device *pdev)
+{
+ struct dcss_drv *mdrv = dev_get_drvdata(&pdev->dev);
+
+ dcss_kms_shutdown(mdrv->kms);
+}
+
+static struct dcss_type_data dcss_types[] = {
+ [DCSS_IMX8MQ] = {
+ .name = "DCSS_IMX8MQ",
+ .blkctl_ofs = 0x2F000,
+ .ctxld_ofs = 0x23000,
+ .dtg_ofs = 0x20000,
+ .scaler_ofs = 0x1C000,
+ .ss_ofs = 0x1B000,
+ .dpr_ofs = 0x18000,
+ },
+};
+
+static const struct of_device_id dcss_of_match[] = {
+ { .compatible = "nxp,imx8mq-dcss", .data = &dcss_types[DCSS_IMX8MQ], },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, dcss_of_match);
+
+static struct platform_driver dcss_platform_driver = {
+ .probe = dcss_drv_platform_probe,
+ .remove = dcss_drv_platform_remove,
+ .shutdown = dcss_drv_platform_shutdown,
+ .driver = {
+ .name = "imx-dcss",
+ .of_match_table = dcss_of_match,
+ .pm = pm_ptr(&dcss_dev_pm_ops),
+ },
+};
+
+drm_module_platform_driver(dcss_platform_driver);
+
+MODULE_AUTHOR("Laurentiu Palcu <laurentiu.palcu@nxp.com>");
+MODULE_DESCRIPTION("DCSS driver for i.MX8MQ");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/imx/dcss/dcss-dtg.c b/drivers/gpu/drm/imx/dcss/dcss-dtg.c
new file mode 100644
index 000000000000..6bbfd9aa27ac
--- /dev/null
+++ b/drivers/gpu/drm/imx/dcss/dcss-dtg.c
@@ -0,0 +1,391 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2019 NXP.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include "dcss-dev.h"
+
+#define DCSS_DTG_TC_CONTROL_STATUS 0x00
+#define CH3_EN BIT(0)
+#define CH2_EN BIT(1)
+#define CH1_EN BIT(2)
+#define OVL_DATA_MODE BIT(3)
+#define BLENDER_VIDEO_ALPHA_SEL BIT(7)
+#define DTG_START BIT(8)
+#define DBY_MODE_EN BIT(9)
+#define CH1_ALPHA_SEL BIT(10)
+#define CSS_PIX_COMP_SWAP_POS 12
+#define CSS_PIX_COMP_SWAP_MASK GENMASK(14, 12)
+#define DEFAULT_FG_ALPHA_POS 24
+#define DEFAULT_FG_ALPHA_MASK GENMASK(31, 24)
+#define DCSS_DTG_TC_DTG 0x04
+#define DCSS_DTG_TC_DISP_TOP 0x08
+#define DCSS_DTG_TC_DISP_BOT 0x0C
+#define DCSS_DTG_TC_CH1_TOP 0x10
+#define DCSS_DTG_TC_CH1_BOT 0x14
+#define DCSS_DTG_TC_CH2_TOP 0x18
+#define DCSS_DTG_TC_CH2_BOT 0x1C
+#define DCSS_DTG_TC_CH3_TOP 0x20
+#define DCSS_DTG_TC_CH3_BOT 0x24
+#define TC_X_POS 0
+#define TC_X_MASK GENMASK(12, 0)
+#define TC_Y_POS 16
+#define TC_Y_MASK GENMASK(28, 16)
+#define DCSS_DTG_TC_CTXLD 0x28
+#define TC_CTXLD_DB_Y_POS 0
+#define TC_CTXLD_DB_Y_MASK GENMASK(12, 0)
+#define TC_CTXLD_SB_Y_POS 16
+#define TC_CTXLD_SB_Y_MASK GENMASK(28, 16)
+#define DCSS_DTG_TC_CH1_BKRND 0x2C
+#define DCSS_DTG_TC_CH2_BKRND 0x30
+#define BKRND_R_Y_COMP_POS 20
+#define BKRND_R_Y_COMP_MASK GENMASK(29, 20)
+#define BKRND_G_U_COMP_POS 10
+#define BKRND_G_U_COMP_MASK GENMASK(19, 10)
+#define BKRND_B_V_COMP_POS 0
+#define BKRND_B_V_COMP_MASK GENMASK(9, 0)
+#define DCSS_DTG_BLENDER_DBY_RANGEINV 0x38
+#define DCSS_DTG_BLENDER_DBY_RANGEMIN 0x3C
+#define DCSS_DTG_BLENDER_DBY_BDP 0x40
+#define DCSS_DTG_BLENDER_BKRND_I 0x44
+#define DCSS_DTG_BLENDER_BKRND_P 0x48
+#define DCSS_DTG_BLENDER_BKRND_T 0x4C
+#define DCSS_DTG_LINE0_INT 0x50
+#define DCSS_DTG_LINE1_INT 0x54
+#define DCSS_DTG_BG_ALPHA_DEFAULT 0x58
+#define DCSS_DTG_INT_STATUS 0x5C
+#define DCSS_DTG_INT_CONTROL 0x60
+#define DCSS_DTG_TC_CH3_BKRND 0x64
+#define DCSS_DTG_INT_MASK 0x68
+#define LINE0_IRQ BIT(0)
+#define LINE1_IRQ BIT(1)
+#define LINE2_IRQ BIT(2)
+#define LINE3_IRQ BIT(3)
+#define DCSS_DTG_LINE2_INT 0x6C
+#define DCSS_DTG_LINE3_INT 0x70
+#define DCSS_DTG_DBY_OL 0x74
+#define DCSS_DTG_DBY_BL 0x78
+#define DCSS_DTG_DBY_EL 0x7C
+
+struct dcss_dtg {
+ struct device *dev;
+ struct dcss_ctxld *ctxld;
+ void __iomem *base_reg;
+ u32 base_ofs;
+
+ u32 ctx_id;
+
+ bool in_use;
+
+ u32 dis_ulc_x;
+ u32 dis_ulc_y;
+
+ u32 control_status;
+ u32 alpha;
+ u32 alpha_cfg;
+
+ int ctxld_kick_irq;
+ bool ctxld_kick_irq_en;
+};
+
+static void dcss_dtg_write(struct dcss_dtg *dtg, u32 val, u32 ofs)
+{
+ if (!dtg->in_use)
+ dcss_writel(val, dtg->base_reg + ofs);
+
+ dcss_ctxld_write(dtg->ctxld, dtg->ctx_id,
+ val, dtg->base_ofs + ofs);
+}
+
+static irqreturn_t dcss_dtg_irq_handler(int irq, void *data)
+{
+ struct dcss_dtg *dtg = data;
+ u32 status;
+
+ status = dcss_readl(dtg->base_reg + DCSS_DTG_INT_STATUS);
+
+ if (!(status & LINE0_IRQ))
+ return IRQ_NONE;
+
+ dcss_ctxld_kick(dtg->ctxld);
+
+ dcss_writel(status & LINE0_IRQ, dtg->base_reg + DCSS_DTG_INT_CONTROL);
+
+ return IRQ_HANDLED;
+}
+
+static int dcss_dtg_irq_config(struct dcss_dtg *dtg,
+ struct platform_device *pdev)
+{
+ int ret;
+
+ dtg->ctxld_kick_irq = platform_get_irq_byname(pdev, "ctxld_kick");
+ if (dtg->ctxld_kick_irq < 0)
+ return dtg->ctxld_kick_irq;
+
+ dcss_update(0, LINE0_IRQ | LINE1_IRQ,
+ dtg->base_reg + DCSS_DTG_INT_MASK);
+
+ ret = request_irq(dtg->ctxld_kick_irq, dcss_dtg_irq_handler,
+ IRQF_NO_AUTOEN, "dcss_ctxld_kick", dtg);
+ if (ret) {
+ dev_err(dtg->dev, "dtg: irq request failed.\n");
+ return ret;
+ }
+
+ dtg->ctxld_kick_irq_en = false;
+
+ return 0;
+}
+
+int dcss_dtg_init(struct dcss_dev *dcss, unsigned long dtg_base)
+{
+ int ret = 0;
+ struct dcss_dtg *dtg;
+
+ dtg = devm_kzalloc(dcss->dev, sizeof(*dtg), GFP_KERNEL);
+ if (!dtg)
+ return -ENOMEM;
+
+ dcss->dtg = dtg;
+ dtg->dev = dcss->dev;
+ dtg->ctxld = dcss->ctxld;
+
+ dtg->base_reg = devm_ioremap(dtg->dev, dtg_base, SZ_4K);
+ if (!dtg->base_reg) {
+ dev_err(dtg->dev, "dtg: unable to remap dtg base\n");
+ return -ENOMEM;
+ }
+
+ dtg->base_ofs = dtg_base;
+ dtg->ctx_id = CTX_DB;
+
+ dtg->alpha = 255;
+
+ dtg->control_status |= OVL_DATA_MODE | BLENDER_VIDEO_ALPHA_SEL |
+ ((dtg->alpha << DEFAULT_FG_ALPHA_POS) & DEFAULT_FG_ALPHA_MASK);
+
+ ret = dcss_dtg_irq_config(dtg, to_platform_device(dtg->dev));
+
+ return ret;
+}
+
+void dcss_dtg_exit(struct dcss_dtg *dtg)
+{
+ free_irq(dtg->ctxld_kick_irq, dtg);
+}
+
+void dcss_dtg_sync_set(struct dcss_dtg *dtg, struct videomode *vm)
+{
+ struct dcss_dev *dcss = dcss_drv_dev_to_dcss(dtg->dev);
+ u16 dtg_lrc_x, dtg_lrc_y;
+ u16 dis_ulc_x, dis_ulc_y;
+ u16 dis_lrc_x, dis_lrc_y;
+ u32 sb_ctxld_trig, db_ctxld_trig;
+ u32 pixclock = vm->pixelclock;
+ u32 actual_clk;
+
+ dtg_lrc_x = vm->hfront_porch + vm->hback_porch + vm->hsync_len +
+ vm->hactive - 1;
+ dtg_lrc_y = vm->vfront_porch + vm->vback_porch + vm->vsync_len +
+ vm->vactive - 1;
+ dis_ulc_x = vm->hsync_len + vm->hback_porch - 1;
+ dis_ulc_y = vm->vsync_len + vm->vfront_porch + vm->vback_porch - 1;
+ dis_lrc_x = vm->hsync_len + vm->hback_porch + vm->hactive - 1;
+ dis_lrc_y = vm->vsync_len + vm->vfront_porch + vm->vback_porch +
+ vm->vactive - 1;
+
+ clk_disable_unprepare(dcss->pix_clk);
+ clk_set_rate(dcss->pix_clk, vm->pixelclock);
+ clk_prepare_enable(dcss->pix_clk);
+
+ actual_clk = clk_get_rate(dcss->pix_clk);
+ if (pixclock != actual_clk) {
+ dev_info(dtg->dev,
+ "Pixel clock set to %u kHz instead of %u kHz.\n",
+ (actual_clk / 1000), (pixclock / 1000));
+ }
+
+ dcss_dtg_write(dtg, ((dtg_lrc_y << TC_Y_POS) | dtg_lrc_x),
+ DCSS_DTG_TC_DTG);
+ dcss_dtg_write(dtg, ((dis_ulc_y << TC_Y_POS) | dis_ulc_x),
+ DCSS_DTG_TC_DISP_TOP);
+ dcss_dtg_write(dtg, ((dis_lrc_y << TC_Y_POS) | dis_lrc_x),
+ DCSS_DTG_TC_DISP_BOT);
+
+ dtg->dis_ulc_x = dis_ulc_x;
+ dtg->dis_ulc_y = dis_ulc_y;
+
+ sb_ctxld_trig = ((0 * dis_lrc_y / 100) << TC_CTXLD_SB_Y_POS) &
+ TC_CTXLD_SB_Y_MASK;
+ db_ctxld_trig = ((99 * dis_lrc_y / 100) << TC_CTXLD_DB_Y_POS) &
+ TC_CTXLD_DB_Y_MASK;
+
+ dcss_dtg_write(dtg, sb_ctxld_trig | db_ctxld_trig, DCSS_DTG_TC_CTXLD);
+
+ /* vblank trigger */
+ dcss_dtg_write(dtg, 0, DCSS_DTG_LINE1_INT);
+
+ /* CTXLD trigger */
+ dcss_dtg_write(dtg, ((90 * dis_lrc_y) / 100) << 16, DCSS_DTG_LINE0_INT);
+}
+
+void dcss_dtg_plane_pos_set(struct dcss_dtg *dtg, int ch_num,
+ int px, int py, int pw, int ph)
+{
+ u16 p_ulc_x, p_ulc_y;
+ u16 p_lrc_x, p_lrc_y;
+
+ p_ulc_x = dtg->dis_ulc_x + px;
+ p_ulc_y = dtg->dis_ulc_y + py;
+ p_lrc_x = p_ulc_x + pw;
+ p_lrc_y = p_ulc_y + ph;
+
+ if (!px && !py && !pw && !ph) {
+ dcss_dtg_write(dtg, 0, DCSS_DTG_TC_CH1_TOP + 0x8 * ch_num);
+ dcss_dtg_write(dtg, 0, DCSS_DTG_TC_CH1_BOT + 0x8 * ch_num);
+ } else {
+ dcss_dtg_write(dtg, ((p_ulc_y << TC_Y_POS) | p_ulc_x),
+ DCSS_DTG_TC_CH1_TOP + 0x8 * ch_num);
+ dcss_dtg_write(dtg, ((p_lrc_y << TC_Y_POS) | p_lrc_x),
+ DCSS_DTG_TC_CH1_BOT + 0x8 * ch_num);
+ }
+}
+
+bool dcss_dtg_global_alpha_changed(struct dcss_dtg *dtg, int ch_num, int alpha)
+{
+ if (ch_num)
+ return false;
+
+ return alpha != dtg->alpha;
+}
+
+void dcss_dtg_plane_alpha_set(struct dcss_dtg *dtg, int ch_num,
+ const struct drm_format_info *format, int alpha)
+{
+ /* we care about alpha only when channel 0 is concerned */
+ if (ch_num)
+ return;
+
+ /*
+ * Use global alpha if pixel format does not have alpha channel or the
+ * user explicitly chose to use global alpha (i.e. alpha is not OPAQUE).
+ */
+ if (!format->has_alpha || alpha != 255)
+ dtg->alpha_cfg = (alpha << DEFAULT_FG_ALPHA_POS) & DEFAULT_FG_ALPHA_MASK;
+ else /* use per-pixel alpha otherwise */
+ dtg->alpha_cfg = CH1_ALPHA_SEL;
+
+ dtg->alpha = alpha;
+}
+
+void dcss_dtg_css_set(struct dcss_dtg *dtg)
+{
+ dtg->control_status |=
+ (0x5 << CSS_PIX_COMP_SWAP_POS) & CSS_PIX_COMP_SWAP_MASK;
+}
+
+void dcss_dtg_enable(struct dcss_dtg *dtg)
+{
+ dtg->control_status |= DTG_START;
+
+ dtg->control_status &= ~(CH1_ALPHA_SEL | DEFAULT_FG_ALPHA_MASK);
+ dtg->control_status |= dtg->alpha_cfg;
+
+ dcss_dtg_write(dtg, dtg->control_status, DCSS_DTG_TC_CONTROL_STATUS);
+
+ dtg->in_use = true;
+}
+
+void dcss_dtg_shutoff(struct dcss_dtg *dtg)
+{
+ dtg->control_status &= ~DTG_START;
+
+ dcss_writel(dtg->control_status,
+ dtg->base_reg + DCSS_DTG_TC_CONTROL_STATUS);
+
+ dtg->in_use = false;
+}
+
+bool dcss_dtg_is_enabled(struct dcss_dtg *dtg)
+{
+ return dtg->in_use;
+}
+
+void dcss_dtg_ch_enable(struct dcss_dtg *dtg, int ch_num, bool en)
+{
+ u32 ch_en_map[] = {CH1_EN, CH2_EN, CH3_EN};
+ u32 control_status;
+
+ control_status = dtg->control_status & ~ch_en_map[ch_num];
+ control_status |= en ? ch_en_map[ch_num] : 0;
+
+ control_status &= ~(CH1_ALPHA_SEL | DEFAULT_FG_ALPHA_MASK);
+ control_status |= dtg->alpha_cfg;
+
+ if (dtg->control_status != control_status)
+ dcss_dtg_write(dtg, control_status, DCSS_DTG_TC_CONTROL_STATUS);
+
+ dtg->control_status = control_status;
+}
+
+void dcss_dtg_vblank_irq_enable(struct dcss_dtg *dtg, bool en)
+{
+ u32 status;
+ u32 mask = en ? LINE1_IRQ : 0;
+
+ if (en) {
+ status = dcss_readl(dtg->base_reg + DCSS_DTG_INT_STATUS);
+ dcss_writel(status & LINE1_IRQ,
+ dtg->base_reg + DCSS_DTG_INT_CONTROL);
+ }
+
+ dcss_update(mask, LINE1_IRQ, dtg->base_reg + DCSS_DTG_INT_MASK);
+}
+
+void dcss_dtg_ctxld_kick_irq_enable(struct dcss_dtg *dtg, bool en)
+{
+ u32 status;
+ u32 mask = en ? LINE0_IRQ : 0;
+
+ if (en) {
+ status = dcss_readl(dtg->base_reg + DCSS_DTG_INT_STATUS);
+
+ if (!dtg->ctxld_kick_irq_en) {
+ dcss_writel(status & LINE0_IRQ,
+ dtg->base_reg + DCSS_DTG_INT_CONTROL);
+ enable_irq(dtg->ctxld_kick_irq);
+ dtg->ctxld_kick_irq_en = true;
+ dcss_update(mask, LINE0_IRQ,
+ dtg->base_reg + DCSS_DTG_INT_MASK);
+ }
+
+ return;
+ }
+
+ if (!dtg->ctxld_kick_irq_en)
+ return;
+
+ disable_irq_nosync(dtg->ctxld_kick_irq);
+ dtg->ctxld_kick_irq_en = false;
+
+ dcss_update(mask, LINE0_IRQ, dtg->base_reg + DCSS_DTG_INT_MASK);
+}
+
+void dcss_dtg_vblank_irq_clear(struct dcss_dtg *dtg)
+{
+ dcss_update(LINE1_IRQ, LINE1_IRQ, dtg->base_reg + DCSS_DTG_INT_CONTROL);
+}
+
+bool dcss_dtg_vblank_irq_valid(struct dcss_dtg *dtg)
+{
+ return !!(dcss_readl(dtg->base_reg + DCSS_DTG_INT_STATUS) & LINE1_IRQ);
+}
+
diff --git a/drivers/gpu/drm/imx/dcss/dcss-kms.c b/drivers/gpu/drm/imx/dcss/dcss-kms.c
new file mode 100644
index 000000000000..3633e8f3aff6
--- /dev/null
+++ b/drivers/gpu/drm/imx/dcss/dcss-kms.c
@@ -0,0 +1,183 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2019 NXP.
+ */
+
+#include <drm/clients/drm_client_setup.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_bridge_connector.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_fbdev_dma.h>
+#include <drm/drm_gem_dma_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_of.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/drm_vblank.h>
+
+#include "dcss-dev.h"
+#include "dcss-kms.h"
+
+DEFINE_DRM_GEM_DMA_FOPS(dcss_cma_fops);
+
+static const struct drm_mode_config_funcs dcss_drm_mode_config_funcs = {
+ .fb_create = drm_gem_fb_create,
+ .atomic_check = drm_atomic_helper_check,
+ .atomic_commit = drm_atomic_helper_commit,
+};
+
+static const struct drm_driver dcss_kms_driver = {
+ .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
+ DRM_GEM_DMA_DRIVER_OPS,
+ DRM_FBDEV_DMA_DRIVER_OPS,
+ .fops = &dcss_cma_fops,
+ .name = "imx-dcss",
+ .desc = "i.MX8MQ Display Subsystem",
+ .major = 1,
+ .minor = 0,
+ .patchlevel = 0,
+};
+
+static const struct drm_mode_config_helper_funcs dcss_mode_config_helpers = {
+ .atomic_commit_tail = drm_atomic_helper_commit_tail_rpm,
+};
+
+static void dcss_kms_mode_config_init(struct dcss_kms_dev *kms)
+{
+ struct drm_mode_config *config = &kms->base.mode_config;
+
+ drm_mode_config_init(&kms->base);
+
+ config->min_width = 1;
+ config->min_height = 1;
+ config->max_width = 4096;
+ config->max_height = 4096;
+ config->normalize_zpos = true;
+
+ config->funcs = &dcss_drm_mode_config_funcs;
+ config->helper_private = &dcss_mode_config_helpers;
+}
+
+static const struct drm_encoder_funcs dcss_kms_simple_encoder_funcs = {
+ .destroy = drm_encoder_cleanup,
+};
+
+static int dcss_kms_bridge_connector_init(struct dcss_kms_dev *kms)
+{
+ struct drm_device *ddev = &kms->base;
+ struct drm_encoder *encoder = &kms->encoder;
+ struct drm_crtc *crtc = (struct drm_crtc *)&kms->crtc;
+ struct drm_panel *panel;
+ struct drm_bridge *bridge;
+ int ret;
+
+ ret = drm_of_find_panel_or_bridge(ddev->dev->of_node, 0, 0,
+ &panel, &bridge);
+ if (ret)
+ return ret;
+
+ if (!bridge) {
+ dev_err(ddev->dev, "No bridge found %d.\n", ret);
+ return -ENODEV;
+ }
+
+ encoder->possible_crtcs = drm_crtc_mask(crtc);
+
+ ret = drm_encoder_init(&kms->base, encoder,
+ &dcss_kms_simple_encoder_funcs,
+ DRM_MODE_ENCODER_NONE, NULL);
+ if (ret) {
+ dev_err(ddev->dev, "Failed initializing encoder %d.\n", ret);
+ return ret;
+ }
+
+ ret = drm_bridge_attach(encoder, bridge, NULL,
+ DRM_BRIDGE_ATTACH_NO_CONNECTOR);
+ if (ret < 0)
+ return ret;
+
+ kms->connector = drm_bridge_connector_init(ddev, encoder);
+ if (IS_ERR(kms->connector)) {
+ dev_err(ddev->dev, "Unable to create bridge connector.\n");
+ return PTR_ERR(kms->connector);
+ }
+
+ drm_connector_attach_encoder(kms->connector, encoder);
+
+ return 0;
+}
+
+struct dcss_kms_dev *dcss_kms_attach(struct dcss_dev *dcss)
+{
+ struct dcss_kms_dev *kms;
+ struct drm_device *drm;
+ struct dcss_crtc *crtc;
+ int ret;
+
+ kms = devm_drm_dev_alloc(dcss->dev, &dcss_kms_driver,
+ struct dcss_kms_dev, base);
+ if (IS_ERR(kms))
+ return kms;
+
+ drm = &kms->base;
+ crtc = &kms->crtc;
+
+ drm->dev_private = dcss;
+
+ dcss_kms_mode_config_init(kms);
+
+ ret = drm_vblank_init(drm, 1);
+ if (ret)
+ goto cleanup_mode_config;
+
+ ret = dcss_kms_bridge_connector_init(kms);
+ if (ret)
+ goto cleanup_mode_config;
+
+ ret = dcss_crtc_init(crtc, drm);
+ if (ret)
+ goto cleanup_mode_config;
+
+ drm_mode_config_reset(drm);
+
+ drm_kms_helper_poll_init(drm);
+
+ ret = drm_dev_register(drm, 0);
+ if (ret)
+ goto cleanup_crtc;
+
+ drm_client_setup(drm, NULL);
+
+ return kms;
+
+cleanup_crtc:
+ drm_kms_helper_poll_fini(drm);
+ dcss_crtc_deinit(crtc, drm);
+
+cleanup_mode_config:
+ drm_mode_config_cleanup(drm);
+ drm->dev_private = NULL;
+
+ return ERR_PTR(ret);
+}
+
+void dcss_kms_detach(struct dcss_kms_dev *kms)
+{
+ struct drm_device *drm = &kms->base;
+
+ drm_dev_unregister(drm);
+ drm_kms_helper_poll_fini(drm);
+ drm_atomic_helper_shutdown(drm);
+ drm_crtc_vblank_off(&kms->crtc.base);
+ drm_mode_config_cleanup(drm);
+ dcss_crtc_deinit(&kms->crtc, drm);
+ drm->dev_private = NULL;
+}
+
+void dcss_kms_shutdown(struct dcss_kms_dev *kms)
+{
+ struct drm_device *drm = &kms->base;
+
+ drm_atomic_helper_shutdown(drm);
+}
diff --git a/drivers/gpu/drm/imx/dcss/dcss-kms.h b/drivers/gpu/drm/imx/dcss/dcss-kms.h
new file mode 100644
index 000000000000..62521c1fd6d2
--- /dev/null
+++ b/drivers/gpu/drm/imx/dcss/dcss-kms.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2019 NXP.
+ */
+
+#ifndef _DCSS_KMS_H_
+#define _DCSS_KMS_H_
+
+#include <drm/drm_encoder.h>
+
+struct dcss_plane {
+ struct drm_plane base;
+
+ int ch_num;
+};
+
+struct dcss_crtc {
+ struct drm_crtc base;
+ struct drm_crtc_state *state;
+
+ struct dcss_plane *plane[3];
+
+ int irq;
+
+ bool disable_ctxld_kick_irq;
+};
+
+struct dcss_kms_dev {
+ struct drm_device base;
+ struct dcss_crtc crtc;
+ struct drm_encoder encoder;
+ struct drm_connector *connector;
+};
+
+struct dcss_kms_dev *dcss_kms_attach(struct dcss_dev *dcss);
+void dcss_kms_detach(struct dcss_kms_dev *kms);
+void dcss_kms_shutdown(struct dcss_kms_dev *kms);
+int dcss_crtc_init(struct dcss_crtc *crtc, struct drm_device *drm);
+void dcss_crtc_deinit(struct dcss_crtc *crtc, struct drm_device *drm);
+struct dcss_plane *dcss_plane_init(struct drm_device *drm,
+ unsigned int possible_crtcs,
+ enum drm_plane_type type,
+ unsigned int zpos);
+
+#endif
diff --git a/drivers/gpu/drm/imx/dcss/dcss-plane.c b/drivers/gpu/drm/imx/dcss/dcss-plane.c
new file mode 100644
index 000000000000..0b99b407ac0a
--- /dev/null
+++ b/drivers/gpu/drm/imx/dcss/dcss-plane.c
@@ -0,0 +1,425 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2019 NXP.
+ */
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_blend.h>
+#include <drm/drm_fb_dma_helper.h>
+#include <drm/drm_framebuffer.h>
+#include <drm/drm_gem_atomic_helper.h>
+#include <drm/drm_gem_dma_helper.h>
+#include <drm/drm_print.h>
+
+#include "dcss-dev.h"
+#include "dcss-kms.h"
+
+static const u32 dcss_common_formats[] = {
+ /* RGB */
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_RGBA8888,
+ DRM_FORMAT_RGBX8888,
+ DRM_FORMAT_BGRA8888,
+ DRM_FORMAT_BGRX8888,
+ DRM_FORMAT_XRGB2101010,
+ DRM_FORMAT_XBGR2101010,
+ DRM_FORMAT_RGBX1010102,
+ DRM_FORMAT_BGRX1010102,
+ DRM_FORMAT_ARGB2101010,
+ DRM_FORMAT_ABGR2101010,
+ DRM_FORMAT_RGBA1010102,
+ DRM_FORMAT_BGRA1010102,
+};
+
+static const u64 dcss_video_format_modifiers[] = {
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_FORMAT_MOD_INVALID,
+};
+
+static const u64 dcss_graphics_format_modifiers[] = {
+ DRM_FORMAT_MOD_VIVANTE_TILED,
+ DRM_FORMAT_MOD_VIVANTE_SUPER_TILED,
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_FORMAT_MOD_INVALID,
+};
+
+static inline struct dcss_plane *to_dcss_plane(struct drm_plane *p)
+{
+ return container_of(p, struct dcss_plane, base);
+}
+
+static inline bool dcss_plane_fb_is_linear(const struct drm_framebuffer *fb)
+{
+ return ((fb->flags & DRM_MODE_FB_MODIFIERS) == 0) ||
+ ((fb->flags & DRM_MODE_FB_MODIFIERS) != 0 &&
+ fb->modifier == DRM_FORMAT_MOD_LINEAR);
+}
+
+static void dcss_plane_destroy(struct drm_plane *plane)
+{
+ struct dcss_plane *dcss_plane = container_of(plane, struct dcss_plane,
+ base);
+
+ drm_plane_cleanup(plane);
+ kfree(dcss_plane);
+}
+
+static bool dcss_plane_format_mod_supported(struct drm_plane *plane,
+ u32 format,
+ u64 modifier)
+{
+ switch (plane->type) {
+ case DRM_PLANE_TYPE_PRIMARY:
+ switch (format) {
+ case DRM_FORMAT_ARGB8888:
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_ARGB2101010:
+ return modifier == DRM_FORMAT_MOD_LINEAR ||
+ modifier == DRM_FORMAT_MOD_VIVANTE_TILED ||
+ modifier == DRM_FORMAT_MOD_VIVANTE_SUPER_TILED;
+ default:
+ return modifier == DRM_FORMAT_MOD_LINEAR;
+ }
+ break;
+ case DRM_PLANE_TYPE_OVERLAY:
+ return modifier == DRM_FORMAT_MOD_LINEAR;
+ default:
+ return false;
+ }
+}
+
+static const struct drm_plane_funcs dcss_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = dcss_plane_destroy,
+ .reset = drm_atomic_helper_plane_reset,
+ .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
+ .format_mod_supported = dcss_plane_format_mod_supported,
+};
+
+static bool dcss_plane_can_rotate(const struct drm_format_info *format,
+ bool mod_present, u64 modifier,
+ unsigned int rotation)
+{
+ bool linear_format = !mod_present || modifier == DRM_FORMAT_MOD_LINEAR;
+ u32 supported_rotation = DRM_MODE_ROTATE_0;
+
+ if (!format->is_yuv && linear_format)
+ supported_rotation = DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180 |
+ DRM_MODE_REFLECT_MASK;
+ else if (!format->is_yuv &&
+ (modifier == DRM_FORMAT_MOD_VIVANTE_TILED ||
+ modifier == DRM_FORMAT_MOD_VIVANTE_SUPER_TILED))
+ supported_rotation = DRM_MODE_ROTATE_MASK |
+ DRM_MODE_REFLECT_MASK;
+ else if (format->is_yuv && linear_format &&
+ (format->format == DRM_FORMAT_NV12 ||
+ format->format == DRM_FORMAT_NV21))
+ supported_rotation = DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180 |
+ DRM_MODE_REFLECT_MASK;
+
+ return !!(rotation & supported_rotation);
+}
+
+static bool dcss_plane_is_source_size_allowed(u16 src_w, u16 src_h, u32 pix_fmt)
+{
+ if (src_w < 64 &&
+ (pix_fmt == DRM_FORMAT_NV12 || pix_fmt == DRM_FORMAT_NV21))
+ return false;
+ else if (src_w < 32 &&
+ (pix_fmt == DRM_FORMAT_UYVY || pix_fmt == DRM_FORMAT_VYUY ||
+ pix_fmt == DRM_FORMAT_YUYV || pix_fmt == DRM_FORMAT_YVYU))
+ return false;
+
+ return src_w >= 16 && src_h >= 8;
+}
+
+static int dcss_plane_atomic_check(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
+ plane);
+ struct dcss_plane *dcss_plane = to_dcss_plane(plane);
+ struct dcss_dev *dcss = plane->dev->dev_private;
+ struct drm_framebuffer *fb = new_plane_state->fb;
+ bool is_primary_plane = plane->type == DRM_PLANE_TYPE_PRIMARY;
+ struct drm_gem_dma_object *dma_obj;
+ struct drm_crtc_state *crtc_state;
+ int hdisplay, vdisplay;
+ int min, max;
+ int ret;
+
+ if (!fb || !new_plane_state->crtc)
+ return 0;
+
+ dma_obj = drm_fb_dma_get_gem_obj(fb, 0);
+ WARN_ON(!dma_obj);
+
+ crtc_state = drm_atomic_get_new_crtc_state(state,
+ new_plane_state->crtc);
+
+ hdisplay = crtc_state->adjusted_mode.hdisplay;
+ vdisplay = crtc_state->adjusted_mode.vdisplay;
+
+ if (!dcss_plane_is_source_size_allowed(new_plane_state->src_w >> 16,
+ new_plane_state->src_h >> 16,
+ fb->format->format)) {
+ DRM_DEBUG_KMS("Source plane size is not allowed!\n");
+ return -EINVAL;
+ }
+
+ dcss_scaler_get_min_max_ratios(dcss->scaler, dcss_plane->ch_num,
+ &min, &max);
+
+ ret = drm_atomic_helper_check_plane_state(new_plane_state, crtc_state,
+ min, max, !is_primary_plane,
+ false);
+ if (ret)
+ return ret;
+
+ if (!new_plane_state->visible)
+ return 0;
+
+ if (!dcss_plane_can_rotate(fb->format,
+ !!(fb->flags & DRM_MODE_FB_MODIFIERS),
+ fb->modifier,
+ new_plane_state->rotation)) {
+ DRM_DEBUG_KMS("requested rotation is not allowed!\n");
+ return -EINVAL;
+ }
+
+ if ((new_plane_state->crtc_x < 0 || new_plane_state->crtc_y < 0 ||
+ new_plane_state->crtc_x + new_plane_state->crtc_w > hdisplay ||
+ new_plane_state->crtc_y + new_plane_state->crtc_h > vdisplay) &&
+ !dcss_plane_fb_is_linear(fb)) {
+ DRM_DEBUG_KMS("requested cropping operation is not allowed!\n");
+ return -EINVAL;
+ }
+
+ if ((fb->flags & DRM_MODE_FB_MODIFIERS) &&
+ !plane->funcs->format_mod_supported(plane,
+ fb->format->format,
+ fb->modifier)) {
+ DRM_DEBUG_KMS("Invalid modifier: %llx", fb->modifier);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void dcss_plane_atomic_set_base(struct dcss_plane *dcss_plane)
+{
+ struct drm_plane *plane = &dcss_plane->base;
+ struct drm_plane_state *state = plane->state;
+ struct dcss_dev *dcss = plane->dev->dev_private;
+ struct drm_framebuffer *fb = state->fb;
+ const struct drm_format_info *format = fb->format;
+ struct drm_gem_dma_object *dma_obj = drm_fb_dma_get_gem_obj(fb, 0);
+ unsigned long p1_ba = 0, p2_ba = 0;
+
+ if (!format->is_yuv ||
+ format->format == DRM_FORMAT_NV12 ||
+ format->format == DRM_FORMAT_NV21)
+ p1_ba = dma_obj->dma_addr + fb->offsets[0] +
+ fb->pitches[0] * (state->src.y1 >> 16) +
+ format->char_per_block[0] * (state->src.x1 >> 16);
+ else if (format->format == DRM_FORMAT_UYVY ||
+ format->format == DRM_FORMAT_VYUY ||
+ format->format == DRM_FORMAT_YUYV ||
+ format->format == DRM_FORMAT_YVYU)
+ p1_ba = dma_obj->dma_addr + fb->offsets[0] +
+ fb->pitches[0] * (state->src.y1 >> 16) +
+ 2 * format->char_per_block[0] * (state->src.x1 >> 17);
+
+ if (format->format == DRM_FORMAT_NV12 ||
+ format->format == DRM_FORMAT_NV21)
+ p2_ba = dma_obj->dma_addr + fb->offsets[1] +
+ (((fb->pitches[1] >> 1) * (state->src.y1 >> 17) +
+ (state->src.x1 >> 17)) << 1);
+
+ dcss_dpr_addr_set(dcss->dpr, dcss_plane->ch_num, p1_ba, p2_ba,
+ fb->pitches[0]);
+}
+
+static bool dcss_plane_needs_setup(struct drm_plane_state *state,
+ struct drm_plane_state *old_state)
+{
+ struct drm_framebuffer *fb = state->fb;
+ struct drm_framebuffer *old_fb = old_state->fb;
+
+ return state->crtc_x != old_state->crtc_x ||
+ state->crtc_y != old_state->crtc_y ||
+ state->crtc_w != old_state->crtc_w ||
+ state->crtc_h != old_state->crtc_h ||
+ state->src_x != old_state->src_x ||
+ state->src_y != old_state->src_y ||
+ state->src_w != old_state->src_w ||
+ state->src_h != old_state->src_h ||
+ fb->format->format != old_fb->format->format ||
+ fb->modifier != old_fb->modifier ||
+ state->rotation != old_state->rotation ||
+ state->scaling_filter != old_state->scaling_filter;
+}
+
+static void dcss_plane_atomic_update(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
+ plane);
+ struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
+ plane);
+ struct dcss_plane *dcss_plane = to_dcss_plane(plane);
+ struct dcss_dev *dcss = plane->dev->dev_private;
+ struct drm_framebuffer *fb = new_state->fb;
+ struct drm_crtc_state *crtc_state;
+ bool modifiers_present;
+ u32 src_w, src_h, dst_w, dst_h;
+ struct drm_rect src, dst;
+ bool enable = true;
+ bool is_rotation_90_or_270;
+
+ if (!fb || !new_state->crtc || !new_state->visible)
+ return;
+
+ crtc_state = new_state->crtc->state;
+ modifiers_present = !!(fb->flags & DRM_MODE_FB_MODIFIERS);
+
+ if (old_state->fb && !drm_atomic_crtc_needs_modeset(crtc_state) &&
+ !dcss_plane_needs_setup(new_state, old_state)) {
+ dcss_plane_atomic_set_base(dcss_plane);
+ return;
+ }
+
+ src = plane->state->src;
+ dst = plane->state->dst;
+
+ /*
+ * The width and height after clipping.
+ */
+ src_w = drm_rect_width(&src) >> 16;
+ src_h = drm_rect_height(&src) >> 16;
+ dst_w = drm_rect_width(&dst);
+ dst_h = drm_rect_height(&dst);
+
+ if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
+ modifiers_present && fb->modifier == DRM_FORMAT_MOD_LINEAR)
+ modifiers_present = false;
+
+ dcss_dpr_format_set(dcss->dpr, dcss_plane->ch_num,
+ new_state->fb->format,
+ modifiers_present ? fb->modifier :
+ DRM_FORMAT_MOD_LINEAR);
+ dcss_dpr_set_res(dcss->dpr, dcss_plane->ch_num, src_w, src_h);
+ dcss_dpr_set_rotation(dcss->dpr, dcss_plane->ch_num,
+ new_state->rotation);
+
+ dcss_plane_atomic_set_base(dcss_plane);
+
+ is_rotation_90_or_270 = new_state->rotation & (DRM_MODE_ROTATE_90 |
+ DRM_MODE_ROTATE_270);
+
+ dcss_scaler_set_filter(dcss->scaler, dcss_plane->ch_num,
+ new_state->scaling_filter);
+
+ dcss_scaler_setup(dcss->scaler, dcss_plane->ch_num,
+ new_state->fb->format,
+ is_rotation_90_or_270 ? src_h : src_w,
+ is_rotation_90_or_270 ? src_w : src_h,
+ dst_w, dst_h,
+ drm_mode_vrefresh(&crtc_state->mode));
+
+ dcss_dtg_plane_pos_set(dcss->dtg, dcss_plane->ch_num,
+ dst.x1, dst.y1, dst_w, dst_h);
+ dcss_dtg_plane_alpha_set(dcss->dtg, dcss_plane->ch_num,
+ fb->format, new_state->alpha >> 8);
+
+ if (!dcss_plane->ch_num && (new_state->alpha >> 8) == 0)
+ enable = false;
+
+ dcss_dpr_enable(dcss->dpr, dcss_plane->ch_num, enable);
+ dcss_scaler_ch_enable(dcss->scaler, dcss_plane->ch_num, enable);
+
+ if (!enable)
+ dcss_dtg_plane_pos_set(dcss->dtg, dcss_plane->ch_num,
+ 0, 0, 0, 0);
+
+ dcss_dtg_ch_enable(dcss->dtg, dcss_plane->ch_num, enable);
+}
+
+static void dcss_plane_atomic_disable(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct dcss_plane *dcss_plane = to_dcss_plane(plane);
+ struct dcss_dev *dcss = plane->dev->dev_private;
+
+ dcss_dpr_enable(dcss->dpr, dcss_plane->ch_num, false);
+ dcss_scaler_ch_enable(dcss->scaler, dcss_plane->ch_num, false);
+ dcss_dtg_plane_pos_set(dcss->dtg, dcss_plane->ch_num, 0, 0, 0, 0);
+ dcss_dtg_ch_enable(dcss->dtg, dcss_plane->ch_num, false);
+}
+
+static const struct drm_plane_helper_funcs dcss_plane_helper_funcs = {
+ .atomic_check = dcss_plane_atomic_check,
+ .atomic_update = dcss_plane_atomic_update,
+ .atomic_disable = dcss_plane_atomic_disable,
+};
+
+struct dcss_plane *dcss_plane_init(struct drm_device *drm,
+ unsigned int possible_crtcs,
+ enum drm_plane_type type,
+ unsigned int zpos)
+{
+ struct dcss_plane *dcss_plane;
+ const u64 *format_modifiers = dcss_video_format_modifiers;
+ int ret;
+
+ if (zpos > 2)
+ return ERR_PTR(-EINVAL);
+
+ dcss_plane = kzalloc(sizeof(*dcss_plane), GFP_KERNEL);
+ if (!dcss_plane) {
+ DRM_ERROR("failed to allocate plane\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ if (type == DRM_PLANE_TYPE_PRIMARY)
+ format_modifiers = dcss_graphics_format_modifiers;
+
+ ret = drm_universal_plane_init(drm, &dcss_plane->base, possible_crtcs,
+ &dcss_plane_funcs, dcss_common_formats,
+ ARRAY_SIZE(dcss_common_formats),
+ format_modifiers, type, NULL);
+ if (ret) {
+ DRM_ERROR("failed to initialize plane\n");
+ kfree(dcss_plane);
+ return ERR_PTR(ret);
+ }
+
+ drm_plane_helper_add(&dcss_plane->base, &dcss_plane_helper_funcs);
+
+ ret = drm_plane_create_zpos_immutable_property(&dcss_plane->base, zpos);
+ if (ret)
+ return ERR_PTR(ret);
+
+ drm_plane_create_scaling_filter_property(&dcss_plane->base,
+ BIT(DRM_SCALING_FILTER_DEFAULT) |
+ BIT(DRM_SCALING_FILTER_NEAREST_NEIGHBOR));
+
+ drm_plane_create_rotation_property(&dcss_plane->base,
+ DRM_MODE_ROTATE_0,
+ DRM_MODE_ROTATE_0 |
+ DRM_MODE_ROTATE_90 |
+ DRM_MODE_ROTATE_180 |
+ DRM_MODE_ROTATE_270 |
+ DRM_MODE_REFLECT_X |
+ DRM_MODE_REFLECT_Y);
+
+ dcss_plane->ch_num = zpos;
+
+ return dcss_plane;
+}
diff --git a/drivers/gpu/drm/imx/dcss/dcss-scaler.c b/drivers/gpu/drm/imx/dcss/dcss-scaler.c
new file mode 100644
index 000000000000..32c3f46b21da
--- /dev/null
+++ b/drivers/gpu/drm/imx/dcss/dcss-scaler.c
@@ -0,0 +1,840 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2019 NXP.
+ *
+ * Scaling algorithms were contributed by Dzung Hoang <dzung.hoang@nxp.com>
+ */
+
+#include <linux/device.h>
+#include <linux/slab.h>
+
+#include "dcss-dev.h"
+
+#define DCSS_SCALER_CTRL 0x00
+#define SCALER_EN BIT(0)
+#define REPEAT_EN BIT(4)
+#define SCALE2MEM_EN BIT(8)
+#define MEM2OFIFO_EN BIT(12)
+#define DCSS_SCALER_OFIFO_CTRL 0x04
+#define OFIFO_LOW_THRES_POS 0
+#define OFIFO_LOW_THRES_MASK GENMASK(9, 0)
+#define OFIFO_HIGH_THRES_POS 16
+#define OFIFO_HIGH_THRES_MASK GENMASK(25, 16)
+#define UNDERRUN_DETECT_CLR BIT(26)
+#define LOW_THRES_DETECT_CLR BIT(27)
+#define HIGH_THRES_DETECT_CLR BIT(28)
+#define UNDERRUN_DETECT_EN BIT(29)
+#define LOW_THRES_DETECT_EN BIT(30)
+#define HIGH_THRES_DETECT_EN BIT(31)
+#define DCSS_SCALER_SDATA_CTRL 0x08
+#define YUV_EN BIT(0)
+#define RTRAM_8LINES BIT(1)
+#define Y_UV_BYTE_SWAP BIT(4)
+#define A2R10G10B10_FORMAT_POS 8
+#define A2R10G10B10_FORMAT_MASK GENMASK(11, 8)
+#define DCSS_SCALER_BIT_DEPTH 0x0C
+#define LUM_BIT_DEPTH_POS 0
+#define LUM_BIT_DEPTH_MASK GENMASK(1, 0)
+#define CHR_BIT_DEPTH_POS 4
+#define CHR_BIT_DEPTH_MASK GENMASK(5, 4)
+#define DCSS_SCALER_SRC_FORMAT 0x10
+#define DCSS_SCALER_DST_FORMAT 0x14
+#define FORMAT_MASK GENMASK(1, 0)
+#define DCSS_SCALER_SRC_LUM_RES 0x18
+#define DCSS_SCALER_SRC_CHR_RES 0x1C
+#define DCSS_SCALER_DST_LUM_RES 0x20
+#define DCSS_SCALER_DST_CHR_RES 0x24
+#define WIDTH_POS 0
+#define WIDTH_MASK GENMASK(11, 0)
+#define HEIGHT_POS 16
+#define HEIGHT_MASK GENMASK(27, 16)
+#define DCSS_SCALER_V_LUM_START 0x48
+#define V_START_MASK GENMASK(15, 0)
+#define DCSS_SCALER_V_LUM_INC 0x4C
+#define V_INC_MASK GENMASK(15, 0)
+#define DCSS_SCALER_H_LUM_START 0x50
+#define H_START_MASK GENMASK(18, 0)
+#define DCSS_SCALER_H_LUM_INC 0x54
+#define H_INC_MASK GENMASK(15, 0)
+#define DCSS_SCALER_V_CHR_START 0x58
+#define DCSS_SCALER_V_CHR_INC 0x5C
+#define DCSS_SCALER_H_CHR_START 0x60
+#define DCSS_SCALER_H_CHR_INC 0x64
+#define DCSS_SCALER_COEF_VLUM 0x80
+#define DCSS_SCALER_COEF_HLUM 0x140
+#define DCSS_SCALER_COEF_VCHR 0x200
+#define DCSS_SCALER_COEF_HCHR 0x300
+
+struct dcss_scaler_ch {
+ void __iomem *base_reg;
+ u32 base_ofs;
+ struct dcss_scaler *scl;
+
+ u32 sdata_ctrl;
+ u32 scaler_ctrl;
+
+ bool scaler_ctrl_chgd;
+
+ u32 c_vstart;
+ u32 c_hstart;
+
+ bool use_nn_interpolation;
+};
+
+struct dcss_scaler {
+ struct device *dev;
+
+ struct dcss_ctxld *ctxld;
+ u32 ctx_id;
+
+ struct dcss_scaler_ch ch[3];
+};
+
+/* scaler coefficients generator */
+#define PSC_FRAC_BITS 30
+#define PSC_FRAC_SCALE BIT(PSC_FRAC_BITS)
+#define PSC_BITS_FOR_PHASE 4
+#define PSC_NUM_PHASES 16
+#define PSC_STORED_PHASES (PSC_NUM_PHASES / 2 + 1)
+#define PSC_NUM_TAPS 7
+#define PSC_NUM_TAPS_RGBA 5
+#define PSC_COEFF_PRECISION 10
+#define PSC_PHASE_FRACTION_BITS 13
+#define PSC_PHASE_MASK (PSC_NUM_PHASES - 1)
+#define PSC_Q_FRACTION 19
+#define PSC_Q_ROUND_OFFSET (1 << (PSC_Q_FRACTION - 1))
+
+/**
+ * mult_q() - Performs fixed-point multiplication.
+ * @A: multiplier
+ * @B: multiplicand
+ */
+static int mult_q(int A, int B)
+{
+ int result;
+ s64 temp;
+
+ temp = (int64_t)A * (int64_t)B;
+ temp += PSC_Q_ROUND_OFFSET;
+ result = (int)(temp >> PSC_Q_FRACTION);
+ return result;
+}
+
+/**
+ * div_q() - Performs fixed-point division.
+ * @A: dividend
+ * @B: divisor
+ */
+static int div_q(int A, int B)
+{
+ int result;
+ s64 temp;
+
+ temp = (int64_t)A << PSC_Q_FRACTION;
+ if ((temp >= 0 && B >= 0) || (temp < 0 && B < 0))
+ temp += B / 2;
+ else
+ temp -= B / 2;
+
+ result = div_s64(temp, B);
+ return result;
+}
+
+/**
+ * exp_approx_q() - Compute approximation to exp(x) function using Taylor
+ * series.
+ * @x: fixed-point argument of exp function
+ */
+static int exp_approx_q(int x)
+{
+ int sum = 1 << PSC_Q_FRACTION;
+ int term = 1 << PSC_Q_FRACTION;
+
+ term = mult_q(term, div_q(x, 1 << PSC_Q_FRACTION));
+ sum += term;
+ term = mult_q(term, div_q(x, 2 << PSC_Q_FRACTION));
+ sum += term;
+ term = mult_q(term, div_q(x, 3 << PSC_Q_FRACTION));
+ sum += term;
+ term = mult_q(term, div_q(x, 4 << PSC_Q_FRACTION));
+ sum += term;
+
+ return sum;
+}
+
+/**
+ * dcss_scaler_gaussian_filter() - Generate gaussian prototype filter.
+ * @fc_q: fixed-point cutoff frequency normalized to range [0, 1]
+ * @use_5_taps: indicates whether to use 5 taps or 7 taps
+ * @coef: output filter coefficients
+ */
+static void dcss_scaler_gaussian_filter(int fc_q, bool use_5_taps,
+ bool phase0_identity,
+ int coef[][PSC_NUM_TAPS])
+{
+ int sigma_q, g0_q, g1_q, g2_q;
+ int tap_cnt1, tap_cnt2, tap_idx, phase_cnt;
+ int mid;
+ int phase;
+ int i;
+ int taps;
+
+ if (use_5_taps)
+ for (phase = 0; phase < PSC_STORED_PHASES; phase++) {
+ coef[phase][0] = 0;
+ coef[phase][PSC_NUM_TAPS - 1] = 0;
+ }
+
+ /* seed coefficient scanner */
+ taps = use_5_taps ? PSC_NUM_TAPS_RGBA : PSC_NUM_TAPS;
+ mid = (PSC_NUM_PHASES * taps) / 2 - 1;
+ phase_cnt = (PSC_NUM_PHASES * (PSC_NUM_TAPS + 1)) / 2;
+ tap_cnt1 = (PSC_NUM_PHASES * PSC_NUM_TAPS) / 2;
+ tap_cnt2 = (PSC_NUM_PHASES * PSC_NUM_TAPS) / 2;
+
+ /* seed gaussian filter generator */
+ sigma_q = div_q(PSC_Q_ROUND_OFFSET, fc_q);
+ g0_q = 1 << PSC_Q_FRACTION;
+ g1_q = exp_approx_q(div_q(-PSC_Q_ROUND_OFFSET,
+ mult_q(sigma_q, sigma_q)));
+ g2_q = mult_q(g1_q, g1_q);
+ coef[phase_cnt & PSC_PHASE_MASK][tap_cnt1 >> PSC_BITS_FOR_PHASE] = g0_q;
+
+ for (i = 0; i < mid; i++) {
+ phase_cnt++;
+ tap_cnt1--;
+ tap_cnt2++;
+
+ g0_q = mult_q(g0_q, g1_q);
+ g1_q = mult_q(g1_q, g2_q);
+
+ if ((phase_cnt & PSC_PHASE_MASK) <= 8) {
+ tap_idx = tap_cnt1 >> PSC_BITS_FOR_PHASE;
+ coef[phase_cnt & PSC_PHASE_MASK][tap_idx] = g0_q;
+ }
+ if (((-phase_cnt) & PSC_PHASE_MASK) <= 8) {
+ tap_idx = tap_cnt2 >> PSC_BITS_FOR_PHASE;
+ coef[(-phase_cnt) & PSC_PHASE_MASK][tap_idx] = g0_q;
+ }
+ }
+
+ phase_cnt++;
+ tap_cnt1--;
+ coef[phase_cnt & PSC_PHASE_MASK][tap_cnt1 >> PSC_BITS_FOR_PHASE] = 0;
+
+ /* override phase 0 with identity filter if specified */
+ if (phase0_identity)
+ for (i = 0; i < PSC_NUM_TAPS; i++)
+ coef[0][i] = i == (PSC_NUM_TAPS >> 1) ?
+ (1 << PSC_COEFF_PRECISION) : 0;
+
+ /* normalize coef */
+ for (phase = 0; phase < PSC_STORED_PHASES; phase++) {
+ int sum = 0;
+ s64 ll_temp;
+
+ for (i = 0; i < PSC_NUM_TAPS; i++)
+ sum += coef[phase][i];
+ for (i = 0; i < PSC_NUM_TAPS; i++) {
+ ll_temp = coef[phase][i];
+ ll_temp <<= PSC_COEFF_PRECISION;
+ ll_temp += sum >> 1;
+ ll_temp = div_s64(ll_temp, sum);
+ coef[phase][i] = (int)ll_temp;
+ }
+ }
+}
+
+static void dcss_scaler_nearest_neighbor_filter(bool use_5_taps,
+ int coef[][PSC_NUM_TAPS])
+{
+ int i, j;
+
+ for (i = 0; i < PSC_STORED_PHASES; i++)
+ for (j = 0; j < PSC_NUM_TAPS; j++)
+ coef[i][j] = j == PSC_NUM_TAPS >> 1 ?
+ (1 << PSC_COEFF_PRECISION) : 0;
+}
+
+/**
+ * dcss_scaler_filter_design() - Compute filter coefficients using
+ * Gaussian filter.
+ * @src_length: length of input
+ * @dst_length: length of output
+ * @use_5_taps: 0 for 7 taps per phase, 1 for 5 taps
+ * @coef: output coefficients
+ */
+static void dcss_scaler_filter_design(int src_length, int dst_length,
+ bool use_5_taps, bool phase0_identity,
+ int coef[][PSC_NUM_TAPS],
+ bool nn_interpolation)
+{
+ int fc_q;
+
+ /* compute cutoff frequency */
+ if (dst_length >= src_length)
+ fc_q = div_q(1, PSC_NUM_PHASES);
+ else
+ fc_q = div_q(dst_length, src_length * PSC_NUM_PHASES);
+
+ if (nn_interpolation)
+ dcss_scaler_nearest_neighbor_filter(use_5_taps, coef);
+ else
+ /* compute gaussian filter coefficients */
+ dcss_scaler_gaussian_filter(fc_q, use_5_taps, phase0_identity, coef);
+}
+
+static void dcss_scaler_write(struct dcss_scaler_ch *ch, u32 val, u32 ofs)
+{
+ struct dcss_scaler *scl = ch->scl;
+
+ dcss_ctxld_write(scl->ctxld, scl->ctx_id, val, ch->base_ofs + ofs);
+}
+
+static int dcss_scaler_ch_init_all(struct dcss_scaler *scl,
+ unsigned long scaler_base)
+{
+ struct dcss_scaler_ch *ch;
+ int i;
+
+ for (i = 0; i < 3; i++) {
+ ch = &scl->ch[i];
+
+ ch->base_ofs = scaler_base + i * 0x400;
+
+ ch->base_reg = devm_ioremap(scl->dev, ch->base_ofs, SZ_4K);
+ if (!ch->base_reg) {
+ dev_err(scl->dev, "scaler: unable to remap ch base\n");
+ return -ENOMEM;
+ }
+
+ ch->scl = scl;
+ }
+
+ return 0;
+}
+
+int dcss_scaler_init(struct dcss_dev *dcss, unsigned long scaler_base)
+{
+ struct dcss_scaler *scaler;
+
+ scaler = devm_kzalloc(dcss->dev, sizeof(*scaler), GFP_KERNEL);
+ if (!scaler)
+ return -ENOMEM;
+
+ dcss->scaler = scaler;
+ scaler->dev = dcss->dev;
+ scaler->ctxld = dcss->ctxld;
+ scaler->ctx_id = CTX_SB_HP;
+
+ if (dcss_scaler_ch_init_all(scaler, scaler_base))
+ return -ENOMEM;
+
+ return 0;
+}
+
+void dcss_scaler_exit(struct dcss_scaler *scl)
+{
+ int ch_no;
+
+ for (ch_no = 0; ch_no < 3; ch_no++) {
+ struct dcss_scaler_ch *ch = &scl->ch[ch_no];
+
+ dcss_writel(0, ch->base_reg + DCSS_SCALER_CTRL);
+ }
+}
+
+void dcss_scaler_ch_enable(struct dcss_scaler *scl, int ch_num, bool en)
+{
+ struct dcss_scaler_ch *ch = &scl->ch[ch_num];
+ u32 scaler_ctrl;
+
+ scaler_ctrl = en ? SCALER_EN | REPEAT_EN : 0;
+
+ if (en)
+ dcss_scaler_write(ch, ch->sdata_ctrl, DCSS_SCALER_SDATA_CTRL);
+
+ if (ch->scaler_ctrl != scaler_ctrl)
+ ch->scaler_ctrl_chgd = true;
+
+ ch->scaler_ctrl = scaler_ctrl;
+}
+
+static void dcss_scaler_yuv_enable(struct dcss_scaler_ch *ch, bool en)
+{
+ ch->sdata_ctrl &= ~YUV_EN;
+ ch->sdata_ctrl |= en ? YUV_EN : 0;
+}
+
+static void dcss_scaler_rtr_8lines_enable(struct dcss_scaler_ch *ch, bool en)
+{
+ ch->sdata_ctrl &= ~RTRAM_8LINES;
+ ch->sdata_ctrl |= en ? RTRAM_8LINES : 0;
+}
+
+static void dcss_scaler_bit_depth_set(struct dcss_scaler_ch *ch, int depth)
+{
+ u32 val;
+
+ val = depth == 30 ? 2 : 0;
+
+ dcss_scaler_write(ch,
+ ((val << CHR_BIT_DEPTH_POS) & CHR_BIT_DEPTH_MASK) |
+ ((val << LUM_BIT_DEPTH_POS) & LUM_BIT_DEPTH_MASK),
+ DCSS_SCALER_BIT_DEPTH);
+}
+
+enum buffer_format {
+ BUF_FMT_YUV420,
+ BUF_FMT_YUV422,
+ BUF_FMT_ARGB8888_YUV444,
+};
+
+enum chroma_location {
+ PSC_LOC_HORZ_0_VERT_1_OVER_4 = 0,
+ PSC_LOC_HORZ_1_OVER_4_VERT_1_OVER_4 = 1,
+ PSC_LOC_HORZ_0_VERT_0 = 2,
+ PSC_LOC_HORZ_1_OVER_4_VERT_0 = 3,
+ PSC_LOC_HORZ_0_VERT_1_OVER_2 = 4,
+ PSC_LOC_HORZ_1_OVER_4_VERT_1_OVER_2 = 5
+};
+
+static void dcss_scaler_format_set(struct dcss_scaler_ch *ch,
+ enum buffer_format src_fmt,
+ enum buffer_format dst_fmt)
+{
+ dcss_scaler_write(ch, src_fmt, DCSS_SCALER_SRC_FORMAT);
+ dcss_scaler_write(ch, dst_fmt, DCSS_SCALER_DST_FORMAT);
+}
+
+static void dcss_scaler_res_set(struct dcss_scaler_ch *ch,
+ int src_xres, int src_yres,
+ int dst_xres, int dst_yres,
+ u32 pix_format, enum buffer_format dst_format)
+{
+ u32 lsrc_xres, lsrc_yres, csrc_xres, csrc_yres;
+ u32 ldst_xres, ldst_yres, cdst_xres, cdst_yres;
+ bool src_is_444 = true;
+
+ lsrc_xres = src_xres;
+ csrc_xres = src_xres;
+ lsrc_yres = src_yres;
+ csrc_yres = src_yres;
+ ldst_xres = dst_xres;
+ cdst_xres = dst_xres;
+ ldst_yres = dst_yres;
+ cdst_yres = dst_yres;
+
+ if (pix_format == DRM_FORMAT_UYVY || pix_format == DRM_FORMAT_VYUY ||
+ pix_format == DRM_FORMAT_YUYV || pix_format == DRM_FORMAT_YVYU) {
+ csrc_xres >>= 1;
+ src_is_444 = false;
+ } else if (pix_format == DRM_FORMAT_NV12 ||
+ pix_format == DRM_FORMAT_NV21) {
+ csrc_xres >>= 1;
+ csrc_yres >>= 1;
+ src_is_444 = false;
+ }
+
+ if (dst_format == BUF_FMT_YUV422)
+ cdst_xres >>= 1;
+
+ /* for 4:4:4 to 4:2:2 conversion, source height should be 1 less */
+ if (src_is_444 && dst_format == BUF_FMT_YUV422) {
+ lsrc_yres--;
+ csrc_yres--;
+ }
+
+ dcss_scaler_write(ch, (((lsrc_yres - 1) << HEIGHT_POS) & HEIGHT_MASK) |
+ (((lsrc_xres - 1) << WIDTH_POS) & WIDTH_MASK),
+ DCSS_SCALER_SRC_LUM_RES);
+ dcss_scaler_write(ch, (((csrc_yres - 1) << HEIGHT_POS) & HEIGHT_MASK) |
+ (((csrc_xres - 1) << WIDTH_POS) & WIDTH_MASK),
+ DCSS_SCALER_SRC_CHR_RES);
+ dcss_scaler_write(ch, (((ldst_yres - 1) << HEIGHT_POS) & HEIGHT_MASK) |
+ (((ldst_xres - 1) << WIDTH_POS) & WIDTH_MASK),
+ DCSS_SCALER_DST_LUM_RES);
+ dcss_scaler_write(ch, (((cdst_yres - 1) << HEIGHT_POS) & HEIGHT_MASK) |
+ (((cdst_xres - 1) << WIDTH_POS) & WIDTH_MASK),
+ DCSS_SCALER_DST_CHR_RES);
+}
+
+#define downscale_fp(factor, fp_pos) ((factor) << (fp_pos))
+#define upscale_fp(factor, fp_pos) ((1 << (fp_pos)) / (factor))
+
+struct dcss_scaler_factors {
+ int downscale;
+ int upscale;
+};
+
+static const struct dcss_scaler_factors dcss_scaler_factors[] = {
+ {3, 8}, {5, 8}, {5, 8},
+};
+
+static void dcss_scaler_fractions_set(struct dcss_scaler_ch *ch,
+ int src_xres, int src_yres,
+ int dst_xres, int dst_yres,
+ u32 src_format, u32 dst_format,
+ enum chroma_location src_chroma_loc)
+{
+ int src_c_xres, src_c_yres, dst_c_xres, dst_c_yres;
+ u32 l_vinc, l_hinc, c_vinc, c_hinc;
+ u32 c_vstart, c_hstart;
+
+ src_c_xres = src_xres;
+ src_c_yres = src_yres;
+ dst_c_xres = dst_xres;
+ dst_c_yres = dst_yres;
+
+ c_vstart = 0;
+ c_hstart = 0;
+
+ /* adjustments for source chroma location */
+ if (src_format == BUF_FMT_YUV420) {
+ /* vertical input chroma position adjustment */
+ switch (src_chroma_loc) {
+ case PSC_LOC_HORZ_0_VERT_1_OVER_4:
+ case PSC_LOC_HORZ_1_OVER_4_VERT_1_OVER_4:
+ /*
+ * move chroma up to first luma line
+ * (1/4 chroma input line spacing)
+ */
+ c_vstart -= (1 << (PSC_PHASE_FRACTION_BITS - 2));
+ break;
+ case PSC_LOC_HORZ_0_VERT_1_OVER_2:
+ case PSC_LOC_HORZ_1_OVER_4_VERT_1_OVER_2:
+ /*
+ * move chroma up to first luma line
+ * (1/2 chroma input line spacing)
+ */
+ c_vstart -= (1 << (PSC_PHASE_FRACTION_BITS - 1));
+ break;
+ default:
+ break;
+ }
+ /* horizontal input chroma position adjustment */
+ switch (src_chroma_loc) {
+ case PSC_LOC_HORZ_1_OVER_4_VERT_1_OVER_4:
+ case PSC_LOC_HORZ_1_OVER_4_VERT_0:
+ case PSC_LOC_HORZ_1_OVER_4_VERT_1_OVER_2:
+ /* move chroma left 1/4 chroma input sample spacing */
+ c_hstart -= (1 << (PSC_PHASE_FRACTION_BITS - 2));
+ break;
+ default:
+ break;
+ }
+ }
+
+ /* adjustments to chroma resolution */
+ if (src_format == BUF_FMT_YUV420) {
+ src_c_xres >>= 1;
+ src_c_yres >>= 1;
+ } else if (src_format == BUF_FMT_YUV422) {
+ src_c_xres >>= 1;
+ }
+
+ if (dst_format == BUF_FMT_YUV422)
+ dst_c_xres >>= 1;
+
+ l_vinc = ((src_yres << 13) + (dst_yres >> 1)) / dst_yres;
+ c_vinc = ((src_c_yres << 13) + (dst_c_yres >> 1)) / dst_c_yres;
+ l_hinc = ((src_xres << 13) + (dst_xres >> 1)) / dst_xres;
+ c_hinc = ((src_c_xres << 13) + (dst_c_xres >> 1)) / dst_c_xres;
+
+ /* save chroma start phase */
+ ch->c_vstart = c_vstart;
+ ch->c_hstart = c_hstart;
+
+ dcss_scaler_write(ch, 0, DCSS_SCALER_V_LUM_START);
+ dcss_scaler_write(ch, l_vinc, DCSS_SCALER_V_LUM_INC);
+
+ dcss_scaler_write(ch, 0, DCSS_SCALER_H_LUM_START);
+ dcss_scaler_write(ch, l_hinc, DCSS_SCALER_H_LUM_INC);
+
+ dcss_scaler_write(ch, c_vstart, DCSS_SCALER_V_CHR_START);
+ dcss_scaler_write(ch, c_vinc, DCSS_SCALER_V_CHR_INC);
+
+ dcss_scaler_write(ch, c_hstart, DCSS_SCALER_H_CHR_START);
+ dcss_scaler_write(ch, c_hinc, DCSS_SCALER_H_CHR_INC);
+}
+
+int dcss_scaler_get_min_max_ratios(struct dcss_scaler *scl, int ch_num,
+ int *min, int *max)
+{
+ *min = upscale_fp(dcss_scaler_factors[ch_num].upscale, 16);
+ *max = downscale_fp(dcss_scaler_factors[ch_num].downscale, 16);
+
+ return 0;
+}
+
+static void dcss_scaler_program_5_coef_set(struct dcss_scaler_ch *ch,
+ int base_addr,
+ int coef[][PSC_NUM_TAPS])
+{
+ int i, phase;
+
+ for (i = 0; i < PSC_STORED_PHASES; i++) {
+ dcss_scaler_write(ch, ((coef[i][1] & 0xfff) << 16 |
+ (coef[i][2] & 0xfff) << 4 |
+ (coef[i][3] & 0xf00) >> 8),
+ base_addr + i * sizeof(u32));
+ dcss_scaler_write(ch, ((coef[i][3] & 0x0ff) << 20 |
+ (coef[i][4] & 0xfff) << 8 |
+ (coef[i][5] & 0xff0) >> 4),
+ base_addr + 0x40 + i * sizeof(u32));
+ dcss_scaler_write(ch, ((coef[i][5] & 0x00f) << 24),
+ base_addr + 0x80 + i * sizeof(u32));
+ }
+
+ /* reverse both phase and tap orderings */
+ for (phase = (PSC_NUM_PHASES >> 1) - 1;
+ i < PSC_NUM_PHASES; i++, phase--) {
+ dcss_scaler_write(ch, ((coef[phase][5] & 0xfff) << 16 |
+ (coef[phase][4] & 0xfff) << 4 |
+ (coef[phase][3] & 0xf00) >> 8),
+ base_addr + i * sizeof(u32));
+ dcss_scaler_write(ch, ((coef[phase][3] & 0x0ff) << 20 |
+ (coef[phase][2] & 0xfff) << 8 |
+ (coef[phase][1] & 0xff0) >> 4),
+ base_addr + 0x40 + i * sizeof(u32));
+ dcss_scaler_write(ch, ((coef[phase][1] & 0x00f) << 24),
+ base_addr + 0x80 + i * sizeof(u32));
+ }
+}
+
+static void dcss_scaler_program_7_coef_set(struct dcss_scaler_ch *ch,
+ int base_addr,
+ int coef[][PSC_NUM_TAPS])
+{
+ int i, phase;
+
+ for (i = 0; i < PSC_STORED_PHASES; i++) {
+ dcss_scaler_write(ch, ((coef[i][0] & 0xfff) << 16 |
+ (coef[i][1] & 0xfff) << 4 |
+ (coef[i][2] & 0xf00) >> 8),
+ base_addr + i * sizeof(u32));
+ dcss_scaler_write(ch, ((coef[i][2] & 0x0ff) << 20 |
+ (coef[i][3] & 0xfff) << 8 |
+ (coef[i][4] & 0xff0) >> 4),
+ base_addr + 0x40 + i * sizeof(u32));
+ dcss_scaler_write(ch, ((coef[i][4] & 0x00f) << 24 |
+ (coef[i][5] & 0xfff) << 12 |
+ (coef[i][6] & 0xfff)),
+ base_addr + 0x80 + i * sizeof(u32));
+ }
+
+ /* reverse both phase and tap orderings */
+ for (phase = (PSC_NUM_PHASES >> 1) - 1;
+ i < PSC_NUM_PHASES; i++, phase--) {
+ dcss_scaler_write(ch, ((coef[phase][6] & 0xfff) << 16 |
+ (coef[phase][5] & 0xfff) << 4 |
+ (coef[phase][4] & 0xf00) >> 8),
+ base_addr + i * sizeof(u32));
+ dcss_scaler_write(ch, ((coef[phase][4] & 0x0ff) << 20 |
+ (coef[phase][3] & 0xfff) << 8 |
+ (coef[phase][2] & 0xff0) >> 4),
+ base_addr + 0x40 + i * sizeof(u32));
+ dcss_scaler_write(ch, ((coef[phase][2] & 0x00f) << 24 |
+ (coef[phase][1] & 0xfff) << 12 |
+ (coef[phase][0] & 0xfff)),
+ base_addr + 0x80 + i * sizeof(u32));
+ }
+}
+
+static void dcss_scaler_yuv_coef_set(struct dcss_scaler_ch *ch,
+ enum buffer_format src_format,
+ enum buffer_format dst_format,
+ bool use_5_taps,
+ int src_xres, int src_yres, int dst_xres,
+ int dst_yres)
+{
+ int coef[PSC_STORED_PHASES][PSC_NUM_TAPS];
+ bool program_5_taps = use_5_taps ||
+ (dst_format == BUF_FMT_YUV422 &&
+ src_format == BUF_FMT_ARGB8888_YUV444);
+
+ /* horizontal luma */
+ dcss_scaler_filter_design(src_xres, dst_xres, false,
+ src_xres == dst_xres, coef,
+ ch->use_nn_interpolation);
+ dcss_scaler_program_7_coef_set(ch, DCSS_SCALER_COEF_HLUM, coef);
+
+ /* vertical luma */
+ dcss_scaler_filter_design(src_yres, dst_yres, program_5_taps,
+ src_yres == dst_yres, coef,
+ ch->use_nn_interpolation);
+
+ if (program_5_taps)
+ dcss_scaler_program_5_coef_set(ch, DCSS_SCALER_COEF_VLUM, coef);
+ else
+ dcss_scaler_program_7_coef_set(ch, DCSS_SCALER_COEF_VLUM, coef);
+
+ /* adjust chroma resolution */
+ if (src_format != BUF_FMT_ARGB8888_YUV444)
+ src_xres >>= 1;
+ if (src_format == BUF_FMT_YUV420)
+ src_yres >>= 1;
+ if (dst_format != BUF_FMT_ARGB8888_YUV444)
+ dst_xres >>= 1;
+ if (dst_format == BUF_FMT_YUV420) /* should not happen */
+ dst_yres >>= 1;
+
+ /* horizontal chroma */
+ dcss_scaler_filter_design(src_xres, dst_xres, false,
+ (src_xres == dst_xres) && (ch->c_hstart == 0),
+ coef, ch->use_nn_interpolation);
+
+ dcss_scaler_program_7_coef_set(ch, DCSS_SCALER_COEF_HCHR, coef);
+
+ /* vertical chroma */
+ dcss_scaler_filter_design(src_yres, dst_yres, program_5_taps,
+ (src_yres == dst_yres) && (ch->c_vstart == 0),
+ coef, ch->use_nn_interpolation);
+ if (program_5_taps)
+ dcss_scaler_program_5_coef_set(ch, DCSS_SCALER_COEF_VCHR, coef);
+ else
+ dcss_scaler_program_7_coef_set(ch, DCSS_SCALER_COEF_VCHR, coef);
+}
+
+static void dcss_scaler_rgb_coef_set(struct dcss_scaler_ch *ch,
+ int src_xres, int src_yres, int dst_xres,
+ int dst_yres)
+{
+ int coef[PSC_STORED_PHASES][PSC_NUM_TAPS];
+
+ /* horizontal RGB */
+ dcss_scaler_filter_design(src_xres, dst_xres, false,
+ src_xres == dst_xres, coef,
+ ch->use_nn_interpolation);
+ dcss_scaler_program_7_coef_set(ch, DCSS_SCALER_COEF_HLUM, coef);
+
+ /* vertical RGB */
+ dcss_scaler_filter_design(src_yres, dst_yres, false,
+ src_yres == dst_yres, coef,
+ ch->use_nn_interpolation);
+ dcss_scaler_program_7_coef_set(ch, DCSS_SCALER_COEF_VLUM, coef);
+}
+
+static void dcss_scaler_set_rgb10_order(struct dcss_scaler_ch *ch,
+ const struct drm_format_info *format)
+{
+ u32 a2r10g10b10_format;
+
+ if (format->is_yuv)
+ return;
+
+ ch->sdata_ctrl &= ~A2R10G10B10_FORMAT_MASK;
+
+ if (format->depth != 30)
+ return;
+
+ switch (format->format) {
+ case DRM_FORMAT_ARGB2101010:
+ case DRM_FORMAT_XRGB2101010:
+ a2r10g10b10_format = 0;
+ break;
+
+ case DRM_FORMAT_ABGR2101010:
+ case DRM_FORMAT_XBGR2101010:
+ a2r10g10b10_format = 5;
+ break;
+
+ case DRM_FORMAT_RGBA1010102:
+ case DRM_FORMAT_RGBX1010102:
+ a2r10g10b10_format = 6;
+ break;
+
+ case DRM_FORMAT_BGRA1010102:
+ case DRM_FORMAT_BGRX1010102:
+ a2r10g10b10_format = 11;
+ break;
+
+ default:
+ a2r10g10b10_format = 0;
+ break;
+ }
+
+ ch->sdata_ctrl |= a2r10g10b10_format << A2R10G10B10_FORMAT_POS;
+}
+
+void dcss_scaler_set_filter(struct dcss_scaler *scl, int ch_num,
+ enum drm_scaling_filter scaling_filter)
+{
+ struct dcss_scaler_ch *ch = &scl->ch[ch_num];
+
+ ch->use_nn_interpolation = scaling_filter == DRM_SCALING_FILTER_NEAREST_NEIGHBOR;
+}
+
+void dcss_scaler_setup(struct dcss_scaler *scl, int ch_num,
+ const struct drm_format_info *format,
+ int src_xres, int src_yres, int dst_xres, int dst_yres,
+ u32 vrefresh_hz)
+{
+ struct dcss_scaler_ch *ch = &scl->ch[ch_num];
+ unsigned int pixel_depth = 0;
+ bool rtr_8line_en = false;
+ bool use_5_taps = false;
+ enum buffer_format src_format = BUF_FMT_ARGB8888_YUV444;
+ enum buffer_format dst_format = BUF_FMT_ARGB8888_YUV444;
+ u32 pix_format = format->format;
+
+ if (format->is_yuv) {
+ dcss_scaler_yuv_enable(ch, true);
+
+ if (pix_format == DRM_FORMAT_NV12 ||
+ pix_format == DRM_FORMAT_NV21) {
+ rtr_8line_en = true;
+ src_format = BUF_FMT_YUV420;
+ } else if (pix_format == DRM_FORMAT_UYVY ||
+ pix_format == DRM_FORMAT_VYUY ||
+ pix_format == DRM_FORMAT_YUYV ||
+ pix_format == DRM_FORMAT_YVYU) {
+ src_format = BUF_FMT_YUV422;
+ }
+
+ use_5_taps = !rtr_8line_en;
+ } else {
+ dcss_scaler_yuv_enable(ch, false);
+
+ pixel_depth = format->depth;
+ }
+
+ dcss_scaler_fractions_set(ch, src_xres, src_yres, dst_xres,
+ dst_yres, src_format, dst_format,
+ PSC_LOC_HORZ_0_VERT_1_OVER_4);
+
+ if (format->is_yuv)
+ dcss_scaler_yuv_coef_set(ch, src_format, dst_format,
+ use_5_taps, src_xres, src_yres,
+ dst_xres, dst_yres);
+ else
+ dcss_scaler_rgb_coef_set(ch, src_xres, src_yres,
+ dst_xres, dst_yres);
+
+ dcss_scaler_rtr_8lines_enable(ch, rtr_8line_en);
+ dcss_scaler_bit_depth_set(ch, pixel_depth);
+ dcss_scaler_set_rgb10_order(ch, format);
+ dcss_scaler_format_set(ch, src_format, dst_format);
+ dcss_scaler_res_set(ch, src_xres, src_yres, dst_xres, dst_yres,
+ pix_format, dst_format);
+}
+
+/* This function will be called from interrupt context. */
+void dcss_scaler_write_sclctrl(struct dcss_scaler *scl)
+{
+ int chnum;
+
+ dcss_ctxld_assert_locked(scl->ctxld);
+
+ for (chnum = 0; chnum < 3; chnum++) {
+ struct dcss_scaler_ch *ch = &scl->ch[chnum];
+
+ if (ch->scaler_ctrl_chgd) {
+ dcss_ctxld_write_irqsafe(scl->ctxld, scl->ctx_id,
+ ch->scaler_ctrl,
+ ch->base_ofs +
+ DCSS_SCALER_CTRL);
+ ch->scaler_ctrl_chgd = false;
+ }
+ }
+}
diff --git a/drivers/gpu/drm/imx/dcss/dcss-ss.c b/drivers/gpu/drm/imx/dcss/dcss-ss.c
new file mode 100644
index 000000000000..0df81866fb7b
--- /dev/null
+++ b/drivers/gpu/drm/imx/dcss/dcss-ss.c
@@ -0,0 +1,174 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2019 NXP.
+ */
+
+#include <linux/device.h>
+#include <linux/slab.h>
+
+#include "dcss-dev.h"
+
+#define DCSS_SS_SYS_CTRL 0x00
+#define RUN_EN BIT(0)
+#define DCSS_SS_DISPLAY 0x10
+#define LRC_X_POS 0
+#define LRC_X_MASK GENMASK(12, 0)
+#define LRC_Y_POS 16
+#define LRC_Y_MASK GENMASK(28, 16)
+#define DCSS_SS_HSYNC 0x20
+#define DCSS_SS_VSYNC 0x30
+#define SYNC_START_POS 0
+#define SYNC_START_MASK GENMASK(12, 0)
+#define SYNC_END_POS 16
+#define SYNC_END_MASK GENMASK(28, 16)
+#define SYNC_POL BIT(31)
+#define DCSS_SS_DE_ULC 0x40
+#define ULC_X_POS 0
+#define ULC_X_MASK GENMASK(12, 0)
+#define ULC_Y_POS 16
+#define ULC_Y_MASK GENMASK(28, 16)
+#define ULC_POL BIT(31)
+#define DCSS_SS_DE_LRC 0x50
+#define DCSS_SS_MODE 0x60
+#define PIPE_MODE_POS 0
+#define PIPE_MODE_MASK GENMASK(1, 0)
+#define DCSS_SS_COEFF 0x70
+#define HORIZ_A_POS 0
+#define HORIZ_A_MASK GENMASK(3, 0)
+#define HORIZ_B_POS 4
+#define HORIZ_B_MASK GENMASK(7, 4)
+#define HORIZ_C_POS 8
+#define HORIZ_C_MASK GENMASK(11, 8)
+#define HORIZ_H_NORM_POS 12
+#define HORIZ_H_NORM_MASK GENMASK(14, 12)
+#define VERT_A_POS 16
+#define VERT_A_MASK GENMASK(19, 16)
+#define VERT_B_POS 20
+#define VERT_B_MASK GENMASK(23, 20)
+#define VERT_C_POS 24
+#define VERT_C_MASK GENMASK(27, 24)
+#define VERT_H_NORM_POS 28
+#define VERT_H_NORM_MASK GENMASK(30, 28)
+#define DCSS_SS_CLIP_CB 0x80
+#define DCSS_SS_CLIP_CR 0x90
+#define CLIP_MIN_POS 0
+#define CLIP_MIN_MASK GENMASK(9, 0)
+#define CLIP_MAX_POS 0
+#define CLIP_MAX_MASK GENMASK(23, 16)
+#define DCSS_SS_INTER_MODE 0xA0
+#define INT_EN BIT(0)
+#define VSYNC_SHIFT BIT(1)
+
+struct dcss_ss {
+ struct device *dev;
+ void __iomem *base_reg;
+ u32 base_ofs;
+
+ struct dcss_ctxld *ctxld;
+ u32 ctx_id;
+
+ bool in_use;
+};
+
+static void dcss_ss_write(struct dcss_ss *ss, u32 val, u32 ofs)
+{
+ if (!ss->in_use)
+ dcss_writel(val, ss->base_reg + ofs);
+
+ dcss_ctxld_write(ss->ctxld, ss->ctx_id, val,
+ ss->base_ofs + ofs);
+}
+
+int dcss_ss_init(struct dcss_dev *dcss, unsigned long ss_base)
+{
+ struct dcss_ss *ss;
+
+ ss = devm_kzalloc(dcss->dev, sizeof(*ss), GFP_KERNEL);
+ if (!ss)
+ return -ENOMEM;
+
+ dcss->ss = ss;
+ ss->dev = dcss->dev;
+ ss->ctxld = dcss->ctxld;
+
+ ss->base_reg = devm_ioremap(ss->dev, ss_base, SZ_4K);
+ if (!ss->base_reg) {
+ dev_err(ss->dev, "ss: unable to remap ss base\n");
+ return -ENOMEM;
+ }
+
+ ss->base_ofs = ss_base;
+ ss->ctx_id = CTX_SB_HP;
+
+ return 0;
+}
+
+void dcss_ss_exit(struct dcss_ss *ss)
+{
+ /* stop SS */
+ dcss_writel(0, ss->base_reg + DCSS_SS_SYS_CTRL);
+}
+
+void dcss_ss_subsam_set(struct dcss_ss *ss)
+{
+ dcss_ss_write(ss, 0x41614161, DCSS_SS_COEFF);
+ dcss_ss_write(ss, 0, DCSS_SS_MODE);
+ dcss_ss_write(ss, 0x03ff0000, DCSS_SS_CLIP_CB);
+ dcss_ss_write(ss, 0x03ff0000, DCSS_SS_CLIP_CR);
+}
+
+void dcss_ss_sync_set(struct dcss_ss *ss, struct videomode *vm,
+ bool phsync, bool pvsync)
+{
+ u16 lrc_x, lrc_y;
+ u16 hsync_start, hsync_end;
+ u16 vsync_start, vsync_end;
+ u16 de_ulc_x, de_ulc_y;
+ u16 de_lrc_x, de_lrc_y;
+
+ lrc_x = vm->hfront_porch + vm->hback_porch + vm->hsync_len +
+ vm->hactive - 1;
+ lrc_y = vm->vfront_porch + vm->vback_porch + vm->vsync_len +
+ vm->vactive - 1;
+
+ dcss_ss_write(ss, (lrc_y << LRC_Y_POS) | lrc_x, DCSS_SS_DISPLAY);
+
+ hsync_start = vm->hfront_porch + vm->hback_porch + vm->hsync_len +
+ vm->hactive - 1;
+ hsync_end = vm->hsync_len - 1;
+
+ dcss_ss_write(ss, (phsync ? SYNC_POL : 0) |
+ ((u32)hsync_end << SYNC_END_POS) | hsync_start,
+ DCSS_SS_HSYNC);
+
+ vsync_start = vm->vfront_porch - 1;
+ vsync_end = vm->vfront_porch + vm->vsync_len - 1;
+
+ dcss_ss_write(ss, (pvsync ? SYNC_POL : 0) |
+ ((u32)vsync_end << SYNC_END_POS) | vsync_start,
+ DCSS_SS_VSYNC);
+
+ de_ulc_x = vm->hsync_len + vm->hback_porch - 1;
+ de_ulc_y = vm->vsync_len + vm->vfront_porch + vm->vback_porch;
+
+ dcss_ss_write(ss, SYNC_POL | ((u32)de_ulc_y << ULC_Y_POS) | de_ulc_x,
+ DCSS_SS_DE_ULC);
+
+ de_lrc_x = vm->hsync_len + vm->hback_porch + vm->hactive - 1;
+ de_lrc_y = vm->vsync_len + vm->vfront_porch + vm->vback_porch +
+ vm->vactive - 1;
+
+ dcss_ss_write(ss, (de_lrc_y << LRC_Y_POS) | de_lrc_x, DCSS_SS_DE_LRC);
+}
+
+void dcss_ss_enable(struct dcss_ss *ss)
+{
+ dcss_ss_write(ss, RUN_EN, DCSS_SS_SYS_CTRL);
+ ss->in_use = true;
+}
+
+void dcss_ss_shutoff(struct dcss_ss *ss)
+{
+ dcss_writel(0, ss->base_reg + DCSS_SS_SYS_CTRL);
+ ss->in_use = false;
+}
diff --git a/drivers/gpu/drm/imx/ipuv3/Kconfig b/drivers/gpu/drm/imx/ipuv3/Kconfig
new file mode 100644
index 000000000000..acaf25089001
--- /dev/null
+++ b/drivers/gpu/drm/imx/ipuv3/Kconfig
@@ -0,0 +1,49 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config DRM_IMX
+ tristate "DRM Support for Freescale i.MX"
+ select DRM_CLIENT_SELECTION
+ select DRM_KMS_HELPER
+ select VIDEOMODE_HELPERS
+ select DRM_GEM_DMA_HELPER
+ depends on DRM && (ARCH_MXC || COMPILE_TEST)
+ depends on IMX_IPUV3_CORE
+ help
+ enable i.MX graphics support
+
+config DRM_IMX_PARALLEL_DISPLAY
+ tristate "Support for parallel displays"
+ depends on DRM_IMX
+ select DRM_BRIDGE
+ select DRM_BRIDGE_CONNECTOR
+ select DRM_IMX_LEGACY_BRIDGE
+ select DRM_PANEL_BRIDGE
+ select VIDEOMODE_HELPERS
+
+config DRM_IMX_TVE
+ tristate "Support for TV and VGA displays"
+ depends on DRM_IMX
+ depends on COMMON_CLK
+ select REGMAP_MMIO
+ help
+ Choose this to enable the internal Television Encoder (TVe)
+ found on i.MX53 processors.
+
+config DRM_IMX_LDB
+ tristate "Support for LVDS displays"
+ depends on DRM_IMX
+ depends on COMMON_CLK
+ select MFD_SYSCON
+ select DRM_BRIDGE
+ select DRM_BRIDGE_CONNECTOR
+ select DRM_PANEL_BRIDGE
+ select DRM_IMX_LEGACY_BRIDGE
+ help
+ Choose this to enable the internal LVDS Display Bridge (LDB)
+ found on i.MX53 and i.MX6 processors.
+
+config DRM_IMX_HDMI
+ tristate "Freescale i.MX DRM HDMI"
+ select DRM_DW_HDMI
+ depends on DRM_IMX && OF
+ help
+ Choose this if you want to use HDMI on i.MX6.
diff --git a/drivers/gpu/drm/imx/ipuv3/Makefile b/drivers/gpu/drm/imx/ipuv3/Makefile
new file mode 100644
index 000000000000..21cdcc2faabc
--- /dev/null
+++ b/drivers/gpu/drm/imx/ipuv3/Makefile
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0
+
+imxdrm-objs := imx-drm-core.o ipuv3-crtc.o ipuv3-plane.o
+
+obj-$(CONFIG_DRM_IMX) += imxdrm.o
+
+obj-$(CONFIG_DRM_IMX_PARALLEL_DISPLAY) += parallel-display.o
+obj-$(CONFIG_DRM_IMX_TVE) += imx-tve.o
+obj-$(CONFIG_DRM_IMX_LDB) += imx-ldb.o
+
+obj-$(CONFIG_DRM_IMX_HDMI) += dw_hdmi-imx.o
diff --git a/drivers/gpu/drm/imx/ipuv3/dw_hdmi-imx.c b/drivers/gpu/drm/imx/ipuv3/dw_hdmi-imx.c
new file mode 100644
index 000000000000..07e5f96202d4
--- /dev/null
+++ b/drivers/gpu/drm/imx/ipuv3/dw_hdmi-imx.c
@@ -0,0 +1,280 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2011-2013 Freescale Semiconductor, Inc.
+ *
+ * derived from imx-hdmi.c(renamed to bridge/dw_hdmi.c now)
+ */
+
+#include <linux/component.h>
+#include <linux/mfd/syscon.h>
+#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <video/imx-ipu-v3.h>
+
+#include <drm/bridge/dw_hdmi.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_encoder.h>
+#include <drm/drm_managed.h>
+#include <drm/drm_of.h>
+#include <drm/drm_simple_kms_helper.h>
+
+#include "imx-drm.h"
+
+struct imx_hdmi;
+
+struct imx_hdmi_encoder {
+ struct drm_encoder encoder;
+ struct imx_hdmi *hdmi;
+};
+
+struct imx_hdmi {
+ struct device *dev;
+ struct drm_bridge *bridge;
+ struct dw_hdmi *hdmi;
+ struct regmap *regmap;
+};
+
+static inline struct imx_hdmi *enc_to_imx_hdmi(struct drm_encoder *e)
+{
+ return container_of(e, struct imx_hdmi_encoder, encoder)->hdmi;
+}
+
+static const struct dw_hdmi_mpll_config imx_mpll_cfg[] = {
+ {
+ 45250000, {
+ { 0x01e0, 0x0000 },
+ { 0x21e1, 0x0000 },
+ { 0x41e2, 0x0000 }
+ },
+ }, {
+ 92500000, {
+ { 0x0140, 0x0005 },
+ { 0x2141, 0x0005 },
+ { 0x4142, 0x0005 },
+ },
+ }, {
+ 148500000, {
+ { 0x00a0, 0x000a },
+ { 0x20a1, 0x000a },
+ { 0x40a2, 0x000a },
+ },
+ }, {
+ 216000000, {
+ { 0x00a0, 0x000a },
+ { 0x2001, 0x000f },
+ { 0x4002, 0x000f },
+ },
+ }, {
+ ~0UL, {
+ { 0x0000, 0x0000 },
+ { 0x0000, 0x0000 },
+ { 0x0000, 0x0000 },
+ },
+ }
+};
+
+static const struct dw_hdmi_curr_ctrl imx_cur_ctr[] = {
+ /* pixelclk bpp8 bpp10 bpp12 */
+ {
+ 54000000, { 0x091c, 0x091c, 0x06dc },
+ }, {
+ 58400000, { 0x091c, 0x06dc, 0x06dc },
+ }, {
+ 72000000, { 0x06dc, 0x06dc, 0x091c },
+ }, {
+ 74250000, { 0x06dc, 0x0b5c, 0x091c },
+ }, {
+ 118800000, { 0x091c, 0x091c, 0x06dc },
+ }, {
+ 216000000, { 0x06dc, 0x0b5c, 0x091c },
+ }, {
+ ~0UL, { 0x0000, 0x0000, 0x0000 },
+ },
+};
+
+/*
+ * Resistance term 133Ohm Cfg
+ * PREEMP config 0.00
+ * TX/CK level 10
+ */
+static const struct dw_hdmi_phy_config imx_phy_config[] = {
+ /*pixelclk symbol term vlev */
+ { 216000000, 0x800d, 0x0005, 0x01ad},
+ { ~0UL, 0x0000, 0x0000, 0x0000}
+};
+
+static void dw_hdmi_imx_encoder_enable(struct drm_encoder *encoder)
+{
+ struct imx_hdmi *hdmi = enc_to_imx_hdmi(encoder);
+ int mux = drm_of_encoder_active_port_id(hdmi->dev->of_node, encoder);
+
+ regmap_update_bits(hdmi->regmap, IOMUXC_GPR3,
+ IMX6Q_GPR3_HDMI_MUX_CTL_MASK,
+ mux << IMX6Q_GPR3_HDMI_MUX_CTL_SHIFT);
+}
+
+static int dw_hdmi_imx_atomic_check(struct drm_encoder *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct imx_crtc_state *imx_crtc_state = to_imx_crtc_state(crtc_state);
+
+ imx_crtc_state->bus_format = MEDIA_BUS_FMT_RGB888_1X24;
+ imx_crtc_state->di_hsync_pin = 2;
+ imx_crtc_state->di_vsync_pin = 3;
+
+ return 0;
+}
+
+static const struct drm_encoder_helper_funcs dw_hdmi_imx_encoder_helper_funcs = {
+ .enable = dw_hdmi_imx_encoder_enable,
+ .atomic_check = dw_hdmi_imx_atomic_check,
+};
+
+static enum drm_mode_status
+imx6q_hdmi_mode_valid(struct dw_hdmi *hdmi, void *data,
+ const struct drm_display_info *info,
+ const struct drm_display_mode *mode)
+{
+ if (mode->clock < 13500)
+ return MODE_CLOCK_LOW;
+ /* FIXME: Hardware is capable of 266MHz, but setup data is missing. */
+ if (mode->clock > 216000)
+ return MODE_CLOCK_HIGH;
+
+ return MODE_OK;
+}
+
+static enum drm_mode_status
+imx6dl_hdmi_mode_valid(struct dw_hdmi *hdmi, void *data,
+ const struct drm_display_info *info,
+ const struct drm_display_mode *mode)
+{
+ if (mode->clock < 13500)
+ return MODE_CLOCK_LOW;
+ /* FIXME: Hardware is capable of 270MHz, but setup data is missing. */
+ if (mode->clock > 216000)
+ return MODE_CLOCK_HIGH;
+
+ return MODE_OK;
+}
+
+static struct dw_hdmi_plat_data imx6q_hdmi_drv_data = {
+ .mpll_cfg = imx_mpll_cfg,
+ .cur_ctr = imx_cur_ctr,
+ .phy_config = imx_phy_config,
+ .mode_valid = imx6q_hdmi_mode_valid,
+};
+
+static struct dw_hdmi_plat_data imx6dl_hdmi_drv_data = {
+ .mpll_cfg = imx_mpll_cfg,
+ .cur_ctr = imx_cur_ctr,
+ .phy_config = imx_phy_config,
+ .mode_valid = imx6dl_hdmi_mode_valid,
+};
+
+static const struct of_device_id dw_hdmi_imx_dt_ids[] = {
+ { .compatible = "fsl,imx6q-hdmi",
+ .data = &imx6q_hdmi_drv_data
+ }, {
+ .compatible = "fsl,imx6dl-hdmi",
+ .data = &imx6dl_hdmi_drv_data
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, dw_hdmi_imx_dt_ids);
+
+static int dw_hdmi_imx_bind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct drm_device *drm = data;
+ struct imx_hdmi_encoder *hdmi_encoder;
+ struct drm_encoder *encoder;
+ int ret;
+
+ hdmi_encoder = drmm_simple_encoder_alloc(drm, struct imx_hdmi_encoder,
+ encoder, DRM_MODE_ENCODER_TMDS);
+ if (IS_ERR(hdmi_encoder))
+ return PTR_ERR(hdmi_encoder);
+
+ hdmi_encoder->hdmi = dev_get_drvdata(dev);
+ encoder = &hdmi_encoder->encoder;
+
+ ret = imx_drm_encoder_parse_of(drm, encoder, dev->of_node);
+ if (ret)
+ return ret;
+
+ drm_encoder_helper_add(encoder, &dw_hdmi_imx_encoder_helper_funcs);
+
+ return drm_bridge_attach(encoder, hdmi_encoder->hdmi->bridge, NULL, 0);
+}
+
+static const struct component_ops dw_hdmi_imx_ops = {
+ .bind = dw_hdmi_imx_bind,
+};
+
+static int dw_hdmi_imx_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ const struct of_device_id *match = of_match_node(dw_hdmi_imx_dt_ids, np);
+ struct imx_hdmi *hdmi;
+ int ret;
+
+ hdmi = devm_kzalloc(&pdev->dev, sizeof(*hdmi), GFP_KERNEL);
+ if (!hdmi)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, hdmi);
+ hdmi->dev = &pdev->dev;
+
+ hdmi->regmap = syscon_regmap_lookup_by_phandle(np, "gpr");
+ if (IS_ERR(hdmi->regmap)) {
+ dev_err(hdmi->dev, "Unable to get gpr\n");
+ return PTR_ERR(hdmi->regmap);
+ }
+
+ hdmi->hdmi = dw_hdmi_probe(pdev, match->data);
+ if (IS_ERR(hdmi->hdmi))
+ return PTR_ERR(hdmi->hdmi);
+
+ hdmi->bridge = of_drm_find_bridge(np);
+ if (!hdmi->bridge) {
+ dev_err(hdmi->dev, "Unable to find bridge\n");
+ dw_hdmi_remove(hdmi->hdmi);
+ return -ENODEV;
+ }
+
+ ret = component_add(&pdev->dev, &dw_hdmi_imx_ops);
+ if (ret)
+ dw_hdmi_remove(hdmi->hdmi);
+
+ return ret;
+}
+
+static void dw_hdmi_imx_remove(struct platform_device *pdev)
+{
+ struct imx_hdmi *hdmi = platform_get_drvdata(pdev);
+
+ component_del(&pdev->dev, &dw_hdmi_imx_ops);
+ dw_hdmi_remove(hdmi->hdmi);
+}
+
+static struct platform_driver dw_hdmi_imx_platform_driver = {
+ .probe = dw_hdmi_imx_probe,
+ .remove = dw_hdmi_imx_remove,
+ .driver = {
+ .name = "dwhdmi-imx",
+ .of_match_table = dw_hdmi_imx_dt_ids,
+ },
+};
+
+module_platform_driver(dw_hdmi_imx_platform_driver);
+
+MODULE_AUTHOR("Andy Yan <andy.yan@rock-chips.com>");
+MODULE_AUTHOR("Yakir Yang <ykk@rock-chips.com>");
+MODULE_DESCRIPTION("IMX6 Specific DW-HDMI Driver Extension");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/imx/ipuv3/imx-drm-core.c b/drivers/gpu/drm/imx/ipuv3/imx-drm-core.c
new file mode 100644
index 000000000000..eddb471119c6
--- /dev/null
+++ b/drivers/gpu/drm/imx/ipuv3/imx-drm-core.c
@@ -0,0 +1,377 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Freescale i.MX drm driver
+ *
+ * Copyright (C) 2011 Sascha Hauer, Pengutronix
+ */
+
+#include <linux/component.h>
+#include <linux/device.h>
+#include <linux/dma-buf.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include <video/imx-ipu-v3.h>
+
+#include <drm/clients/drm_client_setup.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_dumb_buffers.h>
+#include <drm/drm_fbdev_dma.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_gem_dma_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_managed.h>
+#include <drm/drm_of.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/drm_vblank.h>
+
+#include "imx-drm.h"
+#include "ipuv3-plane.h"
+
+#define MAX_CRTC 4
+
+static int legacyfb_depth = 16;
+module_param(legacyfb_depth, int, 0444);
+
+DEFINE_DRM_GEM_DMA_FOPS(imx_drm_driver_fops);
+
+static int imx_drm_atomic_check(struct drm_device *dev,
+ struct drm_atomic_state *state)
+{
+ int ret;
+
+ ret = drm_atomic_helper_check(dev, state);
+ if (ret)
+ return ret;
+
+ /*
+ * Check modeset again in case crtc_state->mode_changed is
+ * updated in plane's ->atomic_check callback.
+ */
+ ret = drm_atomic_helper_check_modeset(dev, state);
+ if (ret)
+ return ret;
+
+ /* Assign PRG/PRE channels and check if all constrains are satisfied. */
+ ret = ipu_planes_assign_pre(dev, state);
+ if (ret)
+ return ret;
+
+ return ret;
+}
+
+static const struct drm_mode_config_funcs imx_drm_mode_config_funcs = {
+ .fb_create = drm_gem_fb_create,
+ .atomic_check = imx_drm_atomic_check,
+ .atomic_commit = drm_atomic_helper_commit,
+};
+
+static void imx_drm_atomic_commit_tail(struct drm_atomic_state *state)
+{
+ struct drm_device *dev = state->dev;
+ struct drm_plane *plane;
+ struct drm_plane_state *old_plane_state, *new_plane_state;
+ bool plane_disabling = false;
+ int i;
+
+ drm_atomic_helper_commit_modeset_disables(dev, state);
+
+ drm_atomic_helper_commit_planes(dev, state,
+ DRM_PLANE_COMMIT_ACTIVE_ONLY |
+ DRM_PLANE_COMMIT_NO_DISABLE_AFTER_MODESET);
+
+ drm_atomic_helper_commit_modeset_enables(dev, state);
+
+ for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
+ if (drm_atomic_plane_disabling(old_plane_state, new_plane_state))
+ plane_disabling = true;
+ }
+
+ /*
+ * The flip done wait is only strictly required by imx-drm if a deferred
+ * plane disable is in-flight. As the core requires blocking commits
+ * to wait for the flip it is done here unconditionally. This keeps the
+ * workitem around a bit longer than required for the majority of
+ * non-blocking commits, but we accept that for the sake of simplicity.
+ */
+ drm_atomic_helper_wait_for_flip_done(dev, state);
+
+ if (plane_disabling) {
+ for_each_old_plane_in_state(state, plane, old_plane_state, i)
+ ipu_plane_disable_deferred(plane);
+
+ }
+
+ drm_atomic_helper_commit_hw_done(state);
+}
+
+static const struct drm_mode_config_helper_funcs imx_drm_mode_config_helpers = {
+ .atomic_commit_tail = imx_drm_atomic_commit_tail,
+};
+
+
+int imx_drm_encoder_parse_of(struct drm_device *drm,
+ struct drm_encoder *encoder, struct device_node *np)
+{
+ uint32_t crtc_mask = drm_of_find_possible_crtcs(drm, np);
+
+ /*
+ * If we failed to find the CRTC(s) which this encoder is
+ * supposed to be connected to, it's because the CRTC has
+ * not been registered yet. Defer probing, and hope that
+ * the required CRTC is added later.
+ */
+ if (crtc_mask == 0)
+ return -EPROBE_DEFER;
+
+ encoder->possible_crtcs = crtc_mask;
+
+ /* FIXME: cloning support not clear, disable it all for now */
+ encoder->possible_clones = 0;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(imx_drm_encoder_parse_of);
+
+static const struct drm_ioctl_desc imx_drm_ioctls[] = {
+ /* none so far */
+};
+
+static int imx_drm_dumb_create(struct drm_file *file_priv,
+ struct drm_device *drm,
+ struct drm_mode_create_dumb *args)
+{
+ u32 fourcc;
+ u64 pitch_align;
+ int ret;
+
+ /*
+ * Hardware requires the framebuffer width to be aligned to
+ * multiples of 8. The mode-setting code handles this, but
+ * the buffer pitch has to be aligned as well. Set the pitch
+ * alignment accordingly, so that the each scanline fits into
+ * the allocated buffer.
+ */
+ fourcc = drm_driver_color_mode_format(drm, args->bpp);
+ if (fourcc != DRM_FORMAT_INVALID) {
+ const struct drm_format_info *info = drm_format_info(fourcc);
+
+ if (!info)
+ return -EINVAL;
+ pitch_align = drm_format_info_min_pitch(info, 0, 8);
+ } else {
+ pitch_align = DIV_ROUND_UP(args->bpp, SZ_8) * 8;
+ }
+ if (!pitch_align || pitch_align > U32_MAX)
+ return -EINVAL;
+ ret = drm_mode_size_dumb(drm, args, pitch_align, 0);
+ if (ret)
+ return ret;
+
+ return drm_gem_dma_dumb_create(file_priv, drm, args);
+}
+
+static const struct drm_driver imx_drm_driver = {
+ .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
+ DRM_GEM_DMA_DRIVER_OPS_WITH_DUMB_CREATE(imx_drm_dumb_create),
+ DRM_FBDEV_DMA_DRIVER_OPS,
+ .ioctls = imx_drm_ioctls,
+ .num_ioctls = ARRAY_SIZE(imx_drm_ioctls),
+ .fops = &imx_drm_driver_fops,
+ .name = "imx-drm",
+ .desc = "i.MX DRM graphics",
+ .major = 1,
+ .minor = 0,
+ .patchlevel = 0,
+};
+
+static int compare_of(struct device *dev, void *data)
+{
+ struct device_node *np = data;
+
+ /* Special case for DI, dev->of_node may not be set yet */
+ if (strcmp(dev->driver->name, "imx-ipuv3-crtc") == 0) {
+ struct ipu_client_platformdata *pdata = dev->platform_data;
+
+ return pdata->of_node == np;
+ }
+
+ /* Special case for LDB, one device for two channels */
+ if (of_node_name_eq(np, "lvds-channel")) {
+ np = of_get_parent(np);
+ of_node_put(np);
+ }
+
+ return dev->of_node == np;
+}
+
+static int imx_drm_bind(struct device *dev)
+{
+ struct drm_device *drm;
+ int ret;
+
+ drm = drm_dev_alloc(&imx_drm_driver, dev);
+ if (IS_ERR(drm))
+ return PTR_ERR(drm);
+
+ /*
+ * set max width and height as default value(4096x4096).
+ * this value would be used to check framebuffer size limitation
+ * at drm_mode_addfb().
+ */
+ drm->mode_config.min_width = 1;
+ drm->mode_config.min_height = 1;
+ drm->mode_config.max_width = 4096;
+ drm->mode_config.max_height = 4096;
+ drm->mode_config.funcs = &imx_drm_mode_config_funcs;
+ drm->mode_config.helper_private = &imx_drm_mode_config_helpers;
+ drm->mode_config.normalize_zpos = true;
+
+ ret = drmm_mode_config_init(drm);
+ if (ret)
+ goto err_kms;
+
+ ret = drm_vblank_init(drm, MAX_CRTC);
+ if (ret)
+ goto err_kms;
+
+ dev_set_drvdata(dev, drm);
+
+ /* Now try and bind all our sub-components */
+ ret = component_bind_all(dev, drm);
+ if (ret)
+ goto err_kms;
+
+ drm_mode_config_reset(drm);
+
+ /*
+ * All components are now initialised, so setup the fb helper.
+ * The fb helper takes copies of key hardware information, so the
+ * crtcs/connectors/encoders must not change after this point.
+ */
+ if (legacyfb_depth != 16 && legacyfb_depth != 32) {
+ dev_warn(dev, "Invalid legacyfb_depth. Defaulting to 16bpp\n");
+ legacyfb_depth = 16;
+ }
+
+ drm_kms_helper_poll_init(drm);
+
+ ret = drm_dev_register(drm, 0);
+ if (ret)
+ goto err_poll_fini;
+
+ drm_client_setup_with_color_mode(drm, legacyfb_depth);
+
+ return 0;
+
+err_poll_fini:
+ drm_kms_helper_poll_fini(drm);
+ component_unbind_all(drm->dev, drm);
+err_kms:
+ dev_set_drvdata(dev, NULL);
+ drm_dev_put(drm);
+
+ return ret;
+}
+
+static void imx_drm_unbind(struct device *dev)
+{
+ struct drm_device *drm = dev_get_drvdata(dev);
+
+ drm_dev_unregister(drm);
+
+ drm_kms_helper_poll_fini(drm);
+ drm_atomic_helper_shutdown(drm);
+
+ component_unbind_all(drm->dev, drm);
+
+ drm_dev_put(drm);
+
+ dev_set_drvdata(dev, NULL);
+}
+
+static const struct component_master_ops imx_drm_ops = {
+ .bind = imx_drm_bind,
+ .unbind = imx_drm_unbind,
+};
+
+static int imx_drm_platform_probe(struct platform_device *pdev)
+{
+ int ret = drm_of_component_probe(&pdev->dev, compare_of, &imx_drm_ops);
+
+ if (!ret)
+ ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+
+ return ret;
+}
+
+static void imx_drm_platform_remove(struct platform_device *pdev)
+{
+ component_master_del(&pdev->dev, &imx_drm_ops);
+}
+
+static void imx_drm_platform_shutdown(struct platform_device *pdev)
+{
+ drm_atomic_helper_shutdown(platform_get_drvdata(pdev));
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int imx_drm_suspend(struct device *dev)
+{
+ struct drm_device *drm_dev = dev_get_drvdata(dev);
+
+ return drm_mode_config_helper_suspend(drm_dev);
+}
+
+static int imx_drm_resume(struct device *dev)
+{
+ struct drm_device *drm_dev = dev_get_drvdata(dev);
+
+ return drm_mode_config_helper_resume(drm_dev);
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(imx_drm_pm_ops, imx_drm_suspend, imx_drm_resume);
+
+static const struct of_device_id imx_drm_dt_ids[] = {
+ { .compatible = "fsl,imx-display-subsystem", },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, imx_drm_dt_ids);
+
+static struct platform_driver imx_drm_pdrv = {
+ .probe = imx_drm_platform_probe,
+ .remove = imx_drm_platform_remove,
+ .shutdown = imx_drm_platform_shutdown,
+ .driver = {
+ .name = "imx-drm",
+ .pm = &imx_drm_pm_ops,
+ .of_match_table = imx_drm_dt_ids,
+ },
+};
+
+static struct platform_driver * const drivers[] = {
+ &imx_drm_pdrv,
+ &ipu_drm_driver,
+};
+
+static int __init imx_drm_init(void)
+{
+ if (drm_firmware_drivers_only())
+ return -ENODEV;
+
+ return platform_register_drivers(drivers, ARRAY_SIZE(drivers));
+}
+module_init(imx_drm_init);
+
+static void __exit imx_drm_exit(void)
+{
+ platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
+}
+module_exit(imx_drm_exit);
+
+MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de>");
+MODULE_DESCRIPTION("i.MX drm driver core");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/imx/ipuv3/imx-drm.h b/drivers/gpu/drm/imx/ipuv3/imx-drm.h
new file mode 100644
index 000000000000..0c85bf83ffbf
--- /dev/null
+++ b/drivers/gpu/drm/imx/ipuv3/imx-drm.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _IMX_DRM_H_
+#define _IMX_DRM_H_
+
+struct device_node;
+struct drm_connector;
+struct drm_device;
+struct drm_encoder;
+
+struct imx_crtc_state {
+ struct drm_crtc_state base;
+ u32 bus_format;
+ u32 bus_flags;
+ int di_hsync_pin;
+ int di_vsync_pin;
+};
+
+static inline struct imx_crtc_state *to_imx_crtc_state(struct drm_crtc_state *s)
+{
+ return container_of(s, struct imx_crtc_state, base);
+}
+
+extern struct platform_driver ipu_drm_driver;
+
+int imx_drm_encoder_parse_of(struct drm_device *drm,
+ struct drm_encoder *encoder, struct device_node *np);
+
+int ipu_planes_assign_pre(struct drm_device *dev,
+ struct drm_atomic_state *state);
+
+#endif /* _IMX_DRM_H_ */
diff --git a/drivers/gpu/drm/imx/ipuv3/imx-ldb.c b/drivers/gpu/drm/imx/ipuv3/imx-ldb.c
new file mode 100644
index 000000000000..626d410d9150
--- /dev/null
+++ b/drivers/gpu/drm/imx/ipuv3/imx-ldb.c
@@ -0,0 +1,646 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * i.MX drm driver - LVDS display bridge
+ *
+ * Copyright (C) 2012 Sascha Hauer, Pengutronix
+ */
+
+#include <linux/clk.h>
+#include <linux/component.h>
+#include <linux/i2c.h>
+#include <linux/media-bus-format.h>
+#include <linux/mfd/syscon.h>
+#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_graph.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/regmap.h>
+#include <linux/videodev2.h>
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_bridge_connector.h>
+#include <drm/drm_managed.h>
+#include <drm/drm_of.h>
+#include <drm/drm_print.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
+#include <drm/bridge/imx.h>
+
+#include "imx-drm.h"
+
+#define DRIVER_NAME "imx-ldb"
+
+#define LDB_CH0_MODE_EN_TO_DI0 (1 << 0)
+#define LDB_CH0_MODE_EN_TO_DI1 (3 << 0)
+#define LDB_CH0_MODE_EN_MASK (3 << 0)
+#define LDB_CH1_MODE_EN_TO_DI0 (1 << 2)
+#define LDB_CH1_MODE_EN_TO_DI1 (3 << 2)
+#define LDB_CH1_MODE_EN_MASK (3 << 2)
+#define LDB_SPLIT_MODE_EN (1 << 4)
+#define LDB_DATA_WIDTH_CH0_24 (1 << 5)
+#define LDB_BIT_MAP_CH0_JEIDA (1 << 6)
+#define LDB_DATA_WIDTH_CH1_24 (1 << 7)
+#define LDB_BIT_MAP_CH1_JEIDA (1 << 8)
+#define LDB_DI0_VS_POL_ACT_LOW (1 << 9)
+#define LDB_DI1_VS_POL_ACT_LOW (1 << 10)
+#define LDB_BGREF_RMODE_INT (1 << 15)
+
+struct imx_ldb_channel;
+
+struct imx_ldb_encoder {
+ struct drm_encoder encoder;
+ struct imx_ldb_channel *channel;
+};
+
+struct imx_ldb;
+
+struct imx_ldb_channel {
+ struct imx_ldb *ldb;
+
+ struct drm_bridge *bridge;
+
+ struct device_node *child;
+ int chno;
+ u32 bus_format;
+};
+
+static inline struct imx_ldb_channel *enc_to_imx_ldb_ch(struct drm_encoder *e)
+{
+ return container_of(e, struct imx_ldb_encoder, encoder)->channel;
+}
+
+struct bus_mux {
+ int reg;
+ int shift;
+ int mask;
+};
+
+struct imx_ldb {
+ struct regmap *regmap;
+ struct device *dev;
+ struct imx_ldb_channel channel[2];
+ struct clk *clk[2]; /* our own clock */
+ struct clk *clk_sel[4]; /* parent of display clock */
+ struct clk *clk_parent[4]; /* original parent of clk_sel */
+ struct clk *clk_pll[2]; /* upstream clock we can adjust */
+ u32 ldb_ctrl;
+ const struct bus_mux *lvds_mux;
+};
+
+static void imx_ldb_ch_set_bus_format(struct imx_ldb_channel *imx_ldb_ch,
+ u32 bus_format)
+{
+ struct imx_ldb *ldb = imx_ldb_ch->ldb;
+ int dual = ldb->ldb_ctrl & LDB_SPLIT_MODE_EN;
+
+ switch (bus_format) {
+ case MEDIA_BUS_FMT_RGB666_1X7X3_SPWG:
+ break;
+ case MEDIA_BUS_FMT_RGB888_1X7X4_SPWG:
+ if (imx_ldb_ch->chno == 0 || dual)
+ ldb->ldb_ctrl |= LDB_DATA_WIDTH_CH0_24;
+ if (imx_ldb_ch->chno == 1 || dual)
+ ldb->ldb_ctrl |= LDB_DATA_WIDTH_CH1_24;
+ break;
+ case MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA:
+ if (imx_ldb_ch->chno == 0 || dual)
+ ldb->ldb_ctrl |= LDB_DATA_WIDTH_CH0_24 |
+ LDB_BIT_MAP_CH0_JEIDA;
+ if (imx_ldb_ch->chno == 1 || dual)
+ ldb->ldb_ctrl |= LDB_DATA_WIDTH_CH1_24 |
+ LDB_BIT_MAP_CH1_JEIDA;
+ break;
+ }
+}
+
+static void imx_ldb_set_clock(struct imx_ldb *ldb, int mux, int chno,
+ unsigned long serial_clk, unsigned long di_clk)
+{
+ int ret;
+
+ dev_dbg(ldb->dev, "%s: now: %ld want: %ld\n", __func__,
+ clk_get_rate(ldb->clk_pll[chno]), serial_clk);
+ clk_set_rate(ldb->clk_pll[chno], serial_clk);
+
+ dev_dbg(ldb->dev, "%s after: %ld\n", __func__,
+ clk_get_rate(ldb->clk_pll[chno]));
+
+ dev_dbg(ldb->dev, "%s: now: %ld want: %ld\n", __func__,
+ clk_get_rate(ldb->clk[chno]),
+ (long int)di_clk);
+ clk_set_rate(ldb->clk[chno], di_clk);
+
+ dev_dbg(ldb->dev, "%s after: %ld\n", __func__,
+ clk_get_rate(ldb->clk[chno]));
+
+ /* set display clock mux to LDB input clock */
+ ret = clk_set_parent(ldb->clk_sel[mux], ldb->clk[chno]);
+ if (ret)
+ dev_err(ldb->dev,
+ "unable to set di%d parent clock to ldb_di%d\n", mux,
+ chno);
+}
+
+static void imx_ldb_encoder_enable(struct drm_encoder *encoder)
+{
+ struct imx_ldb_channel *imx_ldb_ch = enc_to_imx_ldb_ch(encoder);
+ struct imx_ldb *ldb = imx_ldb_ch->ldb;
+ int dual = ldb->ldb_ctrl & LDB_SPLIT_MODE_EN;
+ int mux = drm_of_encoder_active_port_id(imx_ldb_ch->child, encoder);
+
+ if (mux < 0 || mux >= ARRAY_SIZE(ldb->clk_sel)) {
+ dev_warn(ldb->dev, "%s: invalid mux %d\n", __func__, mux);
+ return;
+ }
+
+ if (dual) {
+ clk_set_parent(ldb->clk_sel[mux], ldb->clk[0]);
+ clk_set_parent(ldb->clk_sel[mux], ldb->clk[1]);
+
+ clk_prepare_enable(ldb->clk[0]);
+ clk_prepare_enable(ldb->clk[1]);
+ } else {
+ clk_set_parent(ldb->clk_sel[mux], ldb->clk[imx_ldb_ch->chno]);
+ }
+
+ if (imx_ldb_ch == &ldb->channel[0] || dual) {
+ ldb->ldb_ctrl &= ~LDB_CH0_MODE_EN_MASK;
+ if (mux == 0 || ldb->lvds_mux)
+ ldb->ldb_ctrl |= LDB_CH0_MODE_EN_TO_DI0;
+ else if (mux == 1)
+ ldb->ldb_ctrl |= LDB_CH0_MODE_EN_TO_DI1;
+ }
+ if (imx_ldb_ch == &ldb->channel[1] || dual) {
+ ldb->ldb_ctrl &= ~LDB_CH1_MODE_EN_MASK;
+ if (mux == 1 || ldb->lvds_mux)
+ ldb->ldb_ctrl |= LDB_CH1_MODE_EN_TO_DI1;
+ else if (mux == 0)
+ ldb->ldb_ctrl |= LDB_CH1_MODE_EN_TO_DI0;
+ }
+
+ if (ldb->lvds_mux) {
+ const struct bus_mux *lvds_mux = NULL;
+
+ if (imx_ldb_ch == &ldb->channel[0])
+ lvds_mux = &ldb->lvds_mux[0];
+ else if (imx_ldb_ch == &ldb->channel[1])
+ lvds_mux = &ldb->lvds_mux[1];
+
+ regmap_update_bits(ldb->regmap, lvds_mux->reg, lvds_mux->mask,
+ mux << lvds_mux->shift);
+ }
+
+ regmap_write(ldb->regmap, IOMUXC_GPR2, ldb->ldb_ctrl);
+}
+
+static void
+imx_ldb_encoder_atomic_mode_set(struct drm_encoder *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *connector_state)
+{
+ struct imx_ldb_channel *imx_ldb_ch = enc_to_imx_ldb_ch(encoder);
+ struct drm_display_mode *mode = &crtc_state->adjusted_mode;
+ struct imx_ldb *ldb = imx_ldb_ch->ldb;
+ int dual = ldb->ldb_ctrl & LDB_SPLIT_MODE_EN;
+ unsigned long serial_clk;
+ unsigned long di_clk = mode->clock * 1000;
+ int mux = drm_of_encoder_active_port_id(imx_ldb_ch->child, encoder);
+ u32 bus_format = imx_ldb_ch->bus_format;
+
+ if (mux < 0 || mux >= ARRAY_SIZE(ldb->clk_sel)) {
+ dev_warn(ldb->dev, "%s: invalid mux %d\n", __func__, mux);
+ return;
+ }
+
+ if (mode->clock > 170000) {
+ dev_warn(ldb->dev,
+ "%s: mode exceeds 170 MHz pixel clock\n", __func__);
+ }
+ if (mode->clock > 85000 && !dual) {
+ dev_warn(ldb->dev,
+ "%s: mode exceeds 85 MHz pixel clock\n", __func__);
+ }
+
+ if (!IS_ALIGNED(mode->hdisplay, 8)) {
+ dev_warn(ldb->dev,
+ "%s: hdisplay does not align to 8 byte\n", __func__);
+ }
+
+ if (dual) {
+ serial_clk = 3500UL * mode->clock;
+ imx_ldb_set_clock(ldb, mux, 0, serial_clk, di_clk);
+ imx_ldb_set_clock(ldb, mux, 1, serial_clk, di_clk);
+ } else {
+ serial_clk = 7000UL * mode->clock;
+ imx_ldb_set_clock(ldb, mux, imx_ldb_ch->chno, serial_clk,
+ di_clk);
+ }
+
+ /* FIXME - assumes straight connections DI0 --> CH0, DI1 --> CH1 */
+ if (imx_ldb_ch == &ldb->channel[0] || dual) {
+ if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+ ldb->ldb_ctrl |= LDB_DI0_VS_POL_ACT_LOW;
+ else if (mode->flags & DRM_MODE_FLAG_PVSYNC)
+ ldb->ldb_ctrl &= ~LDB_DI0_VS_POL_ACT_LOW;
+ }
+ if (imx_ldb_ch == &ldb->channel[1] || dual) {
+ if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+ ldb->ldb_ctrl |= LDB_DI1_VS_POL_ACT_LOW;
+ else if (mode->flags & DRM_MODE_FLAG_PVSYNC)
+ ldb->ldb_ctrl &= ~LDB_DI1_VS_POL_ACT_LOW;
+ }
+
+ if (!bus_format) {
+ struct drm_connector *connector = connector_state->connector;
+ struct drm_display_info *di = &connector->display_info;
+
+ if (di->num_bus_formats)
+ bus_format = di->bus_formats[0];
+ }
+ imx_ldb_ch_set_bus_format(imx_ldb_ch, bus_format);
+}
+
+static void imx_ldb_encoder_disable(struct drm_encoder *encoder)
+{
+ struct imx_ldb_channel *imx_ldb_ch = enc_to_imx_ldb_ch(encoder);
+ struct imx_ldb *ldb = imx_ldb_ch->ldb;
+ int dual = ldb->ldb_ctrl & LDB_SPLIT_MODE_EN;
+ int mux, ret;
+
+ if (imx_ldb_ch == &ldb->channel[0] || dual)
+ ldb->ldb_ctrl &= ~LDB_CH0_MODE_EN_MASK;
+ if (imx_ldb_ch == &ldb->channel[1] || dual)
+ ldb->ldb_ctrl &= ~LDB_CH1_MODE_EN_MASK;
+
+ regmap_write(ldb->regmap, IOMUXC_GPR2, ldb->ldb_ctrl);
+
+ if (dual) {
+ clk_disable_unprepare(ldb->clk[0]);
+ clk_disable_unprepare(ldb->clk[1]);
+ }
+
+ if (ldb->lvds_mux) {
+ const struct bus_mux *lvds_mux = NULL;
+
+ if (imx_ldb_ch == &ldb->channel[0])
+ lvds_mux = &ldb->lvds_mux[0];
+ else if (imx_ldb_ch == &ldb->channel[1])
+ lvds_mux = &ldb->lvds_mux[1];
+
+ regmap_read(ldb->regmap, lvds_mux->reg, &mux);
+ mux &= lvds_mux->mask;
+ mux >>= lvds_mux->shift;
+ } else {
+ mux = (imx_ldb_ch == &ldb->channel[0]) ? 0 : 1;
+ }
+
+ /* set display clock mux back to original input clock */
+ ret = clk_set_parent(ldb->clk_sel[mux], ldb->clk_parent[mux]);
+ if (ret)
+ dev_err(ldb->dev,
+ "unable to set di%d parent clock to original parent\n",
+ mux);
+}
+
+static int imx_ldb_encoder_atomic_check(struct drm_encoder *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct imx_crtc_state *imx_crtc_state = to_imx_crtc_state(crtc_state);
+ struct imx_ldb_channel *imx_ldb_ch = enc_to_imx_ldb_ch(encoder);
+ struct drm_display_info *di = &conn_state->connector->display_info;
+ u32 bus_format = imx_ldb_ch->bus_format;
+
+ /* Bus format description in DT overrides connector display info. */
+ if (!bus_format && di->num_bus_formats) {
+ bus_format = di->bus_formats[0];
+ } else {
+ bus_format = imx_ldb_ch->bus_format;
+ }
+
+ imx_crtc_state->bus_flags = di->bus_flags;
+
+ switch (bus_format) {
+ case MEDIA_BUS_FMT_RGB666_1X7X3_SPWG:
+ imx_crtc_state->bus_format = MEDIA_BUS_FMT_RGB666_1X18;
+ break;
+ case MEDIA_BUS_FMT_RGB888_1X7X4_SPWG:
+ case MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA:
+ imx_crtc_state->bus_format = MEDIA_BUS_FMT_RGB888_1X24;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ imx_crtc_state->di_hsync_pin = 2;
+ imx_crtc_state->di_vsync_pin = 3;
+
+ return 0;
+}
+
+
+static const struct drm_encoder_helper_funcs imx_ldb_encoder_helper_funcs = {
+ .atomic_mode_set = imx_ldb_encoder_atomic_mode_set,
+ .enable = imx_ldb_encoder_enable,
+ .disable = imx_ldb_encoder_disable,
+ .atomic_check = imx_ldb_encoder_atomic_check,
+};
+
+static int imx_ldb_get_clk(struct imx_ldb *ldb, int chno)
+{
+ char clkname[16];
+
+ snprintf(clkname, sizeof(clkname), "di%d", chno);
+ ldb->clk[chno] = devm_clk_get(ldb->dev, clkname);
+ if (IS_ERR(ldb->clk[chno]))
+ return PTR_ERR(ldb->clk[chno]);
+
+ snprintf(clkname, sizeof(clkname), "di%d_pll", chno);
+ ldb->clk_pll[chno] = devm_clk_get(ldb->dev, clkname);
+
+ return PTR_ERR_OR_ZERO(ldb->clk_pll[chno]);
+}
+
+static int imx_ldb_register(struct drm_device *drm,
+ struct imx_ldb_channel *imx_ldb_ch)
+{
+ struct imx_ldb *ldb = imx_ldb_ch->ldb;
+ struct imx_ldb_encoder *ldb_encoder;
+ struct drm_connector *connector;
+ struct drm_encoder *encoder;
+ int ret;
+
+ ldb_encoder = drmm_simple_encoder_alloc(drm, struct imx_ldb_encoder,
+ encoder, DRM_MODE_ENCODER_LVDS);
+ if (IS_ERR(ldb_encoder))
+ return PTR_ERR(ldb_encoder);
+
+ ldb_encoder->channel = imx_ldb_ch;
+ encoder = &ldb_encoder->encoder;
+
+ ret = imx_drm_encoder_parse_of(drm, encoder, imx_ldb_ch->child);
+ if (ret)
+ return ret;
+
+ ret = imx_ldb_get_clk(ldb, imx_ldb_ch->chno);
+ if (ret)
+ return ret;
+
+ if (ldb->ldb_ctrl & LDB_SPLIT_MODE_EN) {
+ ret = imx_ldb_get_clk(ldb, 1);
+ if (ret)
+ return ret;
+ }
+
+ drm_encoder_helper_add(encoder, &imx_ldb_encoder_helper_funcs);
+
+ ret = drm_bridge_attach(encoder, imx_ldb_ch->bridge, NULL,
+ DRM_BRIDGE_ATTACH_NO_CONNECTOR);
+ if (ret)
+ return ret;
+
+ connector = drm_bridge_connector_init(drm, encoder);
+ if (IS_ERR(connector))
+ return PTR_ERR(connector);
+
+ drm_connector_attach_encoder(connector, encoder);
+
+ return 0;
+}
+
+struct imx_ldb_bit_mapping {
+ u32 bus_format;
+ u32 datawidth;
+ const char * const mapping;
+};
+
+static const struct imx_ldb_bit_mapping imx_ldb_bit_mappings[] = {
+ { MEDIA_BUS_FMT_RGB666_1X7X3_SPWG, 18, "spwg" },
+ { MEDIA_BUS_FMT_RGB888_1X7X4_SPWG, 24, "spwg" },
+ { MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA, 24, "jeida" },
+};
+
+static u32 of_get_bus_format(struct device *dev, struct device_node *np)
+{
+ const char *bm;
+ u32 datawidth = 0;
+ int ret, i;
+
+ ret = of_property_read_string(np, "fsl,data-mapping", &bm);
+ if (ret < 0)
+ return ret;
+
+ of_property_read_u32(np, "fsl,data-width", &datawidth);
+
+ for (i = 0; i < ARRAY_SIZE(imx_ldb_bit_mappings); i++) {
+ if (!strcasecmp(bm, imx_ldb_bit_mappings[i].mapping) &&
+ datawidth == imx_ldb_bit_mappings[i].datawidth)
+ return imx_ldb_bit_mappings[i].bus_format;
+ }
+
+ dev_err(dev, "invalid data mapping: %d-bit \"%s\"\n", datawidth, bm);
+
+ return -ENOENT;
+}
+
+static struct bus_mux imx6q_lvds_mux[2] = {
+ {
+ .reg = IOMUXC_GPR3,
+ .shift = 6,
+ .mask = IMX6Q_GPR3_LVDS0_MUX_CTL_MASK,
+ }, {
+ .reg = IOMUXC_GPR3,
+ .shift = 8,
+ .mask = IMX6Q_GPR3_LVDS1_MUX_CTL_MASK,
+ }
+};
+
+/*
+ * For a device declaring compatible = "fsl,imx6q-ldb", "fsl,imx53-ldb",
+ * of_match_device will walk through this list and take the first entry
+ * matching any of its compatible values. Therefore, the more generic
+ * entries (in this case fsl,imx53-ldb) need to be ordered last.
+ */
+static const struct of_device_id imx_ldb_dt_ids[] = {
+ { .compatible = "fsl,imx6q-ldb", .data = imx6q_lvds_mux, },
+ { .compatible = "fsl,imx53-ldb", .data = NULL, },
+ { }
+};
+MODULE_DEVICE_TABLE(of, imx_ldb_dt_ids);
+
+static int imx_ldb_bind(struct device *dev, struct device *master, void *data)
+{
+ struct drm_device *drm = data;
+ struct imx_ldb *imx_ldb = dev_get_drvdata(dev);
+ int ret;
+ int i;
+
+ for (i = 0; i < 2; i++) {
+ struct imx_ldb_channel *channel = &imx_ldb->channel[i];
+
+ if (!channel->ldb)
+ continue;
+
+ ret = imx_ldb_register(drm, channel);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct component_ops imx_ldb_ops = {
+ .bind = imx_ldb_bind,
+};
+
+static int imx_ldb_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct device_node *child;
+ struct imx_ldb *imx_ldb;
+ int dual;
+ int ret;
+ int i;
+
+ imx_ldb = devm_kzalloc(dev, sizeof(*imx_ldb), GFP_KERNEL);
+ if (!imx_ldb)
+ return -ENOMEM;
+
+ imx_ldb->regmap = syscon_regmap_lookup_by_phandle(np, "gpr");
+ if (IS_ERR(imx_ldb->regmap)) {
+ dev_err(dev, "failed to get parent regmap\n");
+ return PTR_ERR(imx_ldb->regmap);
+ }
+
+ /* disable LDB by resetting the control register to POR default */
+ regmap_write(imx_ldb->regmap, IOMUXC_GPR2, 0);
+
+ imx_ldb->dev = dev;
+ imx_ldb->lvds_mux = device_get_match_data(dev);
+
+ dual = of_property_read_bool(np, "fsl,dual-channel");
+ if (dual)
+ imx_ldb->ldb_ctrl |= LDB_SPLIT_MODE_EN;
+
+ /*
+ * There are three different possible clock mux configurations:
+ * i.MX53: ipu1_di0_sel, ipu1_di1_sel
+ * i.MX6q: ipu1_di0_sel, ipu1_di1_sel, ipu2_di0_sel, ipu2_di1_sel
+ * i.MX6dl: ipu1_di0_sel, ipu1_di1_sel, lcdif_sel
+ * Map them all to di0_sel...di3_sel.
+ */
+ for (i = 0; i < 4; i++) {
+ char clkname[16];
+
+ snprintf(clkname, sizeof(clkname), "di%d_sel", i);
+ imx_ldb->clk_sel[i] = devm_clk_get(imx_ldb->dev, clkname);
+ if (IS_ERR(imx_ldb->clk_sel[i])) {
+ ret = PTR_ERR(imx_ldb->clk_sel[i]);
+ imx_ldb->clk_sel[i] = NULL;
+ break;
+ }
+
+ imx_ldb->clk_parent[i] = clk_get_parent(imx_ldb->clk_sel[i]);
+ }
+ if (i == 0)
+ return ret;
+
+ for_each_child_of_node(np, child) {
+ struct imx_ldb_channel *channel;
+ int bus_format;
+
+ ret = of_property_read_u32(child, "reg", &i);
+ if (ret || i < 0 || i > 1) {
+ ret = -EINVAL;
+ goto free_child;
+ }
+
+ if (!of_device_is_available(child))
+ continue;
+
+ if (dual && i > 0) {
+ dev_warn(dev, "dual-channel mode, ignoring second output\n");
+ continue;
+ }
+
+ channel = &imx_ldb->channel[i];
+ channel->ldb = imx_ldb;
+ channel->chno = i;
+
+ /*
+ * The output port is port@4 with an external 4-port mux or
+ * port@2 with the internal 2-port mux.
+ */
+ channel->bridge = devm_drm_of_get_bridge(dev, child,
+ imx_ldb->lvds_mux ? 4 : 2, 0);
+ if (IS_ERR(channel->bridge)) {
+ ret = PTR_ERR(channel->bridge);
+ if (ret != -ENODEV)
+ goto free_child;
+ channel->bridge = NULL;
+ }
+
+ bus_format = of_get_bus_format(dev, child);
+ /*
+ * If no bus format was specified in the device tree,
+ * we can still get it from the connected panel later.
+ */
+ if (bus_format == -EINVAL && channel->bridge)
+ bus_format = 0;
+ if (bus_format < 0) {
+ dev_err(dev, "could not determine data mapping: %d\n",
+ bus_format);
+ ret = bus_format;
+ goto free_child;
+ }
+ channel->bus_format = bus_format;
+
+ /*
+ * legacy bridge doesn't handle bus_format, so create it after
+ * checking the bus_format property.
+ */
+ if (!channel->bridge) {
+ channel->bridge = devm_imx_drm_legacy_bridge(dev, child,
+ DRM_MODE_CONNECTOR_LVDS);
+ if (IS_ERR(channel->bridge)) {
+ ret = PTR_ERR(channel->bridge);
+ goto free_child;
+ }
+ }
+
+ channel->child = child;
+ }
+
+ platform_set_drvdata(pdev, imx_ldb);
+
+ return component_add(&pdev->dev, &imx_ldb_ops);
+
+free_child:
+ of_node_put(child);
+ return ret;
+}
+
+static void imx_ldb_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &imx_ldb_ops);
+}
+
+static struct platform_driver imx_ldb_driver = {
+ .probe = imx_ldb_probe,
+ .remove = imx_ldb_remove,
+ .driver = {
+ .of_match_table = imx_ldb_dt_ids,
+ .name = DRIVER_NAME,
+ },
+};
+
+module_platform_driver(imx_ldb_driver);
+
+MODULE_DESCRIPTION("i.MX LVDS driver");
+MODULE_AUTHOR("Sascha Hauer, Pengutronix");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/imx/ipuv3/imx-tve.c b/drivers/gpu/drm/imx/ipuv3/imx-tve.c
new file mode 100644
index 000000000000..c5c6e070cc06
--- /dev/null
+++ b/drivers/gpu/drm/imx/ipuv3/imx-tve.c
@@ -0,0 +1,679 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * i.MX drm driver - Television Encoder (TVEv2)
+ *
+ * Copyright (C) 2013 Philipp Zabel, Pengutronix
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/clk.h>
+#include <linux/component.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/videodev2.h>
+
+#include <video/imx-ipu-v3.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_managed.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
+
+#include "imx-drm.h"
+
+#define TVE_COM_CONF_REG 0x00
+#define TVE_TVDAC0_CONT_REG 0x28
+#define TVE_TVDAC1_CONT_REG 0x2c
+#define TVE_TVDAC2_CONT_REG 0x30
+#define TVE_CD_CONT_REG 0x34
+#define TVE_INT_CONT_REG 0x64
+#define TVE_STAT_REG 0x68
+#define TVE_TST_MODE_REG 0x6c
+#define TVE_MV_CONT_REG 0xdc
+
+/* TVE_COM_CONF_REG */
+#define TVE_SYNC_CH_2_EN BIT(22)
+#define TVE_SYNC_CH_1_EN BIT(21)
+#define TVE_SYNC_CH_0_EN BIT(20)
+#define TVE_TV_OUT_MODE_MASK (0x7 << 12)
+#define TVE_TV_OUT_DISABLE (0x0 << 12)
+#define TVE_TV_OUT_CVBS_0 (0x1 << 12)
+#define TVE_TV_OUT_CVBS_2 (0x2 << 12)
+#define TVE_TV_OUT_CVBS_0_2 (0x3 << 12)
+#define TVE_TV_OUT_SVIDEO_0_1 (0x4 << 12)
+#define TVE_TV_OUT_SVIDEO_0_1_CVBS2_2 (0x5 << 12)
+#define TVE_TV_OUT_YPBPR (0x6 << 12)
+#define TVE_TV_OUT_RGB (0x7 << 12)
+#define TVE_TV_STAND_MASK (0xf << 8)
+#define TVE_TV_STAND_HD_1080P30 (0xc << 8)
+#define TVE_P2I_CONV_EN BIT(7)
+#define TVE_INP_VIDEO_FORM BIT(6)
+#define TVE_INP_YCBCR_422 (0x0 << 6)
+#define TVE_INP_YCBCR_444 (0x1 << 6)
+#define TVE_DATA_SOURCE_MASK (0x3 << 4)
+#define TVE_DATA_SOURCE_BUS1 (0x0 << 4)
+#define TVE_DATA_SOURCE_BUS2 (0x1 << 4)
+#define TVE_DATA_SOURCE_EXT (0x2 << 4)
+#define TVE_DATA_SOURCE_TESTGEN (0x3 << 4)
+#define TVE_IPU_CLK_EN_OFS 3
+#define TVE_IPU_CLK_EN BIT(3)
+#define TVE_DAC_SAMP_RATE_OFS 1
+#define TVE_DAC_SAMP_RATE_WIDTH 2
+#define TVE_DAC_SAMP_RATE_MASK (0x3 << 1)
+#define TVE_DAC_FULL_RATE (0x0 << 1)
+#define TVE_DAC_DIV2_RATE (0x1 << 1)
+#define TVE_DAC_DIV4_RATE (0x2 << 1)
+#define TVE_EN BIT(0)
+
+/* TVE_TVDACx_CONT_REG */
+#define TVE_TVDAC_GAIN_MASK (0x3f << 0)
+
+/* TVE_CD_CONT_REG */
+#define TVE_CD_CH_2_SM_EN BIT(22)
+#define TVE_CD_CH_1_SM_EN BIT(21)
+#define TVE_CD_CH_0_SM_EN BIT(20)
+#define TVE_CD_CH_2_LM_EN BIT(18)
+#define TVE_CD_CH_1_LM_EN BIT(17)
+#define TVE_CD_CH_0_LM_EN BIT(16)
+#define TVE_CD_CH_2_REF_LVL BIT(10)
+#define TVE_CD_CH_1_REF_LVL BIT(9)
+#define TVE_CD_CH_0_REF_LVL BIT(8)
+#define TVE_CD_EN BIT(0)
+
+/* TVE_INT_CONT_REG */
+#define TVE_FRAME_END_IEN BIT(13)
+#define TVE_CD_MON_END_IEN BIT(2)
+#define TVE_CD_SM_IEN BIT(1)
+#define TVE_CD_LM_IEN BIT(0)
+
+/* TVE_TST_MODE_REG */
+#define TVE_TVDAC_TEST_MODE_MASK (0x7 << 0)
+
+#define IMX_TVE_DAC_VOLTAGE 2750000
+
+enum {
+ TVE_MODE_TVOUT,
+ TVE_MODE_VGA,
+};
+
+struct imx_tve_encoder {
+ struct drm_connector connector;
+ struct drm_encoder encoder;
+ struct imx_tve *tve;
+};
+
+struct imx_tve {
+ struct device *dev;
+ int mode;
+ int di_hsync_pin;
+ int di_vsync_pin;
+
+ struct regmap *regmap;
+ struct regulator *dac_reg;
+ struct i2c_adapter *ddc;
+ struct clk *clk;
+ struct clk *di_sel_clk;
+ struct clk_hw clk_hw_di;
+ struct clk *di_clk;
+};
+
+static inline struct imx_tve *con_to_tve(struct drm_connector *c)
+{
+ return container_of(c, struct imx_tve_encoder, connector)->tve;
+}
+
+static inline struct imx_tve *enc_to_tve(struct drm_encoder *e)
+{
+ return container_of(e, struct imx_tve_encoder, encoder)->tve;
+}
+
+static void tve_enable(struct imx_tve *tve)
+{
+ clk_prepare_enable(tve->clk);
+ regmap_update_bits(tve->regmap, TVE_COM_CONF_REG, TVE_EN, TVE_EN);
+
+ /* clear interrupt status register */
+ regmap_write(tve->regmap, TVE_STAT_REG, 0xffffffff);
+
+ /* cable detection irq disabled in VGA mode, enabled in TVOUT mode */
+ if (tve->mode == TVE_MODE_VGA)
+ regmap_write(tve->regmap, TVE_INT_CONT_REG, 0);
+ else
+ regmap_write(tve->regmap, TVE_INT_CONT_REG,
+ TVE_CD_SM_IEN |
+ TVE_CD_LM_IEN |
+ TVE_CD_MON_END_IEN);
+}
+
+static void tve_disable(struct imx_tve *tve)
+{
+ regmap_update_bits(tve->regmap, TVE_COM_CONF_REG, TVE_EN, 0);
+ clk_disable_unprepare(tve->clk);
+}
+
+static int tve_setup_tvout(struct imx_tve *tve)
+{
+ return -ENOTSUPP;
+}
+
+static int tve_setup_vga(struct imx_tve *tve)
+{
+ unsigned int mask;
+ unsigned int val;
+ int ret;
+
+ /* set gain to (1 + 10/128) to provide 0.7V peak-to-peak amplitude */
+ ret = regmap_update_bits(tve->regmap, TVE_TVDAC0_CONT_REG,
+ TVE_TVDAC_GAIN_MASK, 0x0a);
+ if (ret)
+ return ret;
+
+ ret = regmap_update_bits(tve->regmap, TVE_TVDAC1_CONT_REG,
+ TVE_TVDAC_GAIN_MASK, 0x0a);
+ if (ret)
+ return ret;
+
+ ret = regmap_update_bits(tve->regmap, TVE_TVDAC2_CONT_REG,
+ TVE_TVDAC_GAIN_MASK, 0x0a);
+ if (ret)
+ return ret;
+
+ /* set configuration register */
+ mask = TVE_DATA_SOURCE_MASK | TVE_INP_VIDEO_FORM;
+ val = TVE_DATA_SOURCE_BUS2 | TVE_INP_YCBCR_444;
+ mask |= TVE_TV_STAND_MASK | TVE_P2I_CONV_EN;
+ val |= TVE_TV_STAND_HD_1080P30 | 0;
+ mask |= TVE_TV_OUT_MODE_MASK | TVE_SYNC_CH_0_EN;
+ val |= TVE_TV_OUT_RGB | TVE_SYNC_CH_0_EN;
+ ret = regmap_update_bits(tve->regmap, TVE_COM_CONF_REG, mask, val);
+ if (ret)
+ return ret;
+
+ /* set test mode (as documented) */
+ return regmap_update_bits(tve->regmap, TVE_TST_MODE_REG,
+ TVE_TVDAC_TEST_MODE_MASK, 1);
+}
+
+static int imx_tve_connector_get_modes(struct drm_connector *connector)
+{
+ struct imx_tve *tve = con_to_tve(connector);
+ const struct drm_edid *drm_edid;
+ int ret;
+
+ if (!tve->ddc)
+ return 0;
+
+ drm_edid = drm_edid_read_ddc(connector, tve->ddc);
+ drm_edid_connector_update(connector, drm_edid);
+ ret = drm_edid_connector_add_modes(connector);
+ drm_edid_free(drm_edid);
+
+ return ret;
+}
+
+static enum drm_mode_status
+imx_tve_connector_mode_valid(struct drm_connector *connector,
+ const struct drm_display_mode *mode)
+{
+ struct imx_tve *tve = con_to_tve(connector);
+ unsigned long rate;
+
+ /* pixel clock with 2x oversampling */
+ rate = clk_round_rate(tve->clk, 2000UL * mode->clock) / 2000;
+ if (rate == mode->clock)
+ return MODE_OK;
+
+ /* pixel clock without oversampling */
+ rate = clk_round_rate(tve->clk, 1000UL * mode->clock) / 1000;
+ if (rate == mode->clock)
+ return MODE_OK;
+
+ dev_warn(tve->dev, "ignoring mode %dx%d\n",
+ mode->hdisplay, mode->vdisplay);
+
+ return MODE_BAD;
+}
+
+static void imx_tve_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *orig_mode,
+ struct drm_display_mode *mode)
+{
+ struct imx_tve *tve = enc_to_tve(encoder);
+ unsigned long rounded_rate;
+ unsigned long rate;
+ int div = 1;
+ int ret;
+
+ /*
+ * FIXME
+ * we should try 4k * mode->clock first,
+ * and enable 4x oversampling for lower resolutions
+ */
+ rate = 2000UL * mode->clock;
+ clk_set_rate(tve->clk, rate);
+ rounded_rate = clk_get_rate(tve->clk);
+ if (rounded_rate >= rate)
+ div = 2;
+ clk_set_rate(tve->di_clk, rounded_rate / div);
+
+ ret = clk_set_parent(tve->di_sel_clk, tve->di_clk);
+ if (ret < 0) {
+ dev_err(tve->dev, "failed to set di_sel parent to tve_di: %d\n",
+ ret);
+ }
+
+ regmap_update_bits(tve->regmap, TVE_COM_CONF_REG,
+ TVE_IPU_CLK_EN, TVE_IPU_CLK_EN);
+
+ if (tve->mode == TVE_MODE_VGA)
+ ret = tve_setup_vga(tve);
+ else
+ ret = tve_setup_tvout(tve);
+ if (ret)
+ dev_err(tve->dev, "failed to set configuration: %d\n", ret);
+}
+
+static void imx_tve_encoder_enable(struct drm_encoder *encoder)
+{
+ struct imx_tve *tve = enc_to_tve(encoder);
+
+ tve_enable(tve);
+}
+
+static void imx_tve_encoder_disable(struct drm_encoder *encoder)
+{
+ struct imx_tve *tve = enc_to_tve(encoder);
+
+ tve_disable(tve);
+}
+
+static int imx_tve_atomic_check(struct drm_encoder *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct imx_crtc_state *imx_crtc_state = to_imx_crtc_state(crtc_state);
+ struct imx_tve *tve = enc_to_tve(encoder);
+
+ imx_crtc_state->bus_format = MEDIA_BUS_FMT_GBR888_1X24;
+ imx_crtc_state->di_hsync_pin = tve->di_hsync_pin;
+ imx_crtc_state->di_vsync_pin = tve->di_vsync_pin;
+
+ return 0;
+}
+
+static void imx_tve_connector_destroy(struct drm_connector *connector)
+{
+ drm_connector_unregister(connector);
+ drm_connector_cleanup(connector);
+}
+
+static const struct drm_connector_funcs imx_tve_connector_funcs = {
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = imx_tve_connector_destroy,
+ .reset = drm_atomic_helper_connector_reset,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static const struct drm_connector_helper_funcs imx_tve_connector_helper_funcs = {
+ .get_modes = imx_tve_connector_get_modes,
+ .mode_valid = imx_tve_connector_mode_valid,
+};
+
+static const struct drm_encoder_helper_funcs imx_tve_encoder_helper_funcs = {
+ .mode_set = imx_tve_encoder_mode_set,
+ .enable = imx_tve_encoder_enable,
+ .disable = imx_tve_encoder_disable,
+ .atomic_check = imx_tve_atomic_check,
+};
+
+static irqreturn_t imx_tve_irq_handler(int irq, void *data)
+{
+ struct imx_tve *tve = data;
+ unsigned int val;
+
+ regmap_read(tve->regmap, TVE_STAT_REG, &val);
+
+ /* clear interrupt status register */
+ regmap_write(tve->regmap, TVE_STAT_REG, 0xffffffff);
+
+ return IRQ_HANDLED;
+}
+
+static unsigned long clk_tve_di_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct imx_tve *tve = container_of(hw, struct imx_tve, clk_hw_di);
+ unsigned int val;
+ int ret;
+
+ ret = regmap_read(tve->regmap, TVE_COM_CONF_REG, &val);
+ if (ret < 0)
+ return 0;
+
+ switch (val & TVE_DAC_SAMP_RATE_MASK) {
+ case TVE_DAC_DIV4_RATE:
+ return parent_rate / 4;
+ case TVE_DAC_DIV2_RATE:
+ return parent_rate / 2;
+ case TVE_DAC_FULL_RATE:
+ default:
+ return parent_rate;
+ }
+
+ return 0;
+}
+
+static int clk_tve_di_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ unsigned long div;
+
+ div = req->best_parent_rate / req->rate;
+ if (div >= 4)
+ req->rate = req->best_parent_rate / 4;
+ else if (div >= 2)
+ req->rate = req->best_parent_rate / 2;
+ else
+ req->rate = req->best_parent_rate;
+
+ return 0;
+}
+
+static int clk_tve_di_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct imx_tve *tve = container_of(hw, struct imx_tve, clk_hw_di);
+ unsigned long div;
+ u32 val;
+ int ret;
+
+ div = parent_rate / rate;
+ if (div >= 4)
+ val = TVE_DAC_DIV4_RATE;
+ else if (div >= 2)
+ val = TVE_DAC_DIV2_RATE;
+ else
+ val = TVE_DAC_FULL_RATE;
+
+ ret = regmap_update_bits(tve->regmap, TVE_COM_CONF_REG,
+ TVE_DAC_SAMP_RATE_MASK, val);
+
+ if (ret < 0) {
+ dev_err(tve->dev, "failed to set divider: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct clk_ops clk_tve_di_ops = {
+ .determine_rate = clk_tve_di_determine_rate,
+ .set_rate = clk_tve_di_set_rate,
+ .recalc_rate = clk_tve_di_recalc_rate,
+};
+
+static int tve_clk_init(struct imx_tve *tve, void __iomem *base)
+{
+ const char *tve_di_parent[1];
+ struct clk_init_data init = {
+ .name = "tve_di",
+ .ops = &clk_tve_di_ops,
+ .num_parents = 1,
+ .flags = 0,
+ };
+
+ tve_di_parent[0] = __clk_get_name(tve->clk);
+ init.parent_names = (const char **)&tve_di_parent;
+
+ tve->clk_hw_di.init = &init;
+ tve->di_clk = devm_clk_register(tve->dev, &tve->clk_hw_di);
+ if (IS_ERR(tve->di_clk)) {
+ dev_err(tve->dev, "failed to register TVE output clock: %ld\n",
+ PTR_ERR(tve->di_clk));
+ return PTR_ERR(tve->di_clk);
+ }
+
+ return 0;
+}
+
+static void imx_tve_disable_regulator(void *data)
+{
+ struct imx_tve *tve = data;
+
+ regulator_disable(tve->dac_reg);
+}
+
+static bool imx_tve_readable_reg(struct device *dev, unsigned int reg)
+{
+ return (reg % 4 == 0) && (reg <= 0xdc);
+}
+
+static struct regmap_config tve_regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+
+ .readable_reg = imx_tve_readable_reg,
+
+ .fast_io = true,
+
+ .max_register = 0xdc,
+};
+
+static const char * const imx_tve_modes[] = {
+ [TVE_MODE_TVOUT] = "tvout",
+ [TVE_MODE_VGA] = "vga",
+};
+
+static int of_get_tve_mode(struct device_node *np)
+{
+ const char *bm;
+ int ret, i;
+
+ ret = of_property_read_string(np, "fsl,tve-mode", &bm);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < ARRAY_SIZE(imx_tve_modes); i++)
+ if (!strcasecmp(bm, imx_tve_modes[i]))
+ return i;
+
+ return -EINVAL;
+}
+
+static int imx_tve_bind(struct device *dev, struct device *master, void *data)
+{
+ struct drm_device *drm = data;
+ struct imx_tve *tve = dev_get_drvdata(dev);
+ struct imx_tve_encoder *tvee;
+ struct drm_encoder *encoder;
+ struct drm_connector *connector;
+ int encoder_type;
+ int ret;
+
+ encoder_type = tve->mode == TVE_MODE_VGA ?
+ DRM_MODE_ENCODER_DAC : DRM_MODE_ENCODER_TVDAC;
+
+ tvee = drmm_simple_encoder_alloc(drm, struct imx_tve_encoder, encoder,
+ encoder_type);
+ if (IS_ERR(tvee))
+ return PTR_ERR(tvee);
+
+ tvee->tve = tve;
+ encoder = &tvee->encoder;
+ connector = &tvee->connector;
+
+ ret = imx_drm_encoder_parse_of(drm, encoder, tve->dev->of_node);
+ if (ret)
+ return ret;
+
+ drm_encoder_helper_add(encoder, &imx_tve_encoder_helper_funcs);
+
+ drm_connector_helper_add(connector, &imx_tve_connector_helper_funcs);
+ ret = drm_connector_init_with_ddc(drm, connector,
+ &imx_tve_connector_funcs,
+ DRM_MODE_CONNECTOR_VGA, tve->ddc);
+ if (ret)
+ return ret;
+
+ return drm_connector_attach_encoder(connector, encoder);
+}
+
+static const struct component_ops imx_tve_ops = {
+ .bind = imx_tve_bind,
+};
+
+static int imx_tve_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct device_node *ddc_node;
+ struct imx_tve *tve;
+ void __iomem *base;
+ unsigned int val;
+ int irq;
+ int ret;
+
+ tve = devm_kzalloc(dev, sizeof(*tve), GFP_KERNEL);
+ if (!tve)
+ return -ENOMEM;
+
+ tve->dev = dev;
+
+ ddc_node = of_parse_phandle(np, "ddc-i2c-bus", 0);
+ if (ddc_node) {
+ tve->ddc = of_find_i2c_adapter_by_node(ddc_node);
+ of_node_put(ddc_node);
+ }
+
+ tve->mode = of_get_tve_mode(np);
+ if (tve->mode != TVE_MODE_VGA) {
+ dev_err(dev, "only VGA mode supported, currently\n");
+ return -EINVAL;
+ }
+
+ if (tve->mode == TVE_MODE_VGA) {
+ ret = of_property_read_u32(np, "fsl,hsync-pin",
+ &tve->di_hsync_pin);
+
+ if (ret < 0) {
+ dev_err(dev, "failed to get hsync pin\n");
+ return ret;
+ }
+
+ ret = of_property_read_u32(np, "fsl,vsync-pin",
+ &tve->di_vsync_pin);
+
+ if (ret < 0) {
+ dev_err(dev, "failed to get vsync pin\n");
+ return ret;
+ }
+ }
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ tve_regmap_config.lock_arg = tve;
+ tve->regmap = devm_regmap_init_mmio_clk(dev, "tve", base,
+ &tve_regmap_config);
+ if (IS_ERR(tve->regmap)) {
+ dev_err(dev, "failed to init regmap: %ld\n",
+ PTR_ERR(tve->regmap));
+ return PTR_ERR(tve->regmap);
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ ret = devm_request_threaded_irq(dev, irq, NULL,
+ imx_tve_irq_handler, IRQF_ONESHOT,
+ "imx-tve", tve);
+ if (ret < 0) {
+ dev_err(dev, "failed to request irq: %d\n", ret);
+ return ret;
+ }
+
+ tve->dac_reg = devm_regulator_get(dev, "dac");
+ if (!IS_ERR(tve->dac_reg)) {
+ if (regulator_get_voltage(tve->dac_reg) != IMX_TVE_DAC_VOLTAGE)
+ dev_warn(dev, "dac voltage is not %d uV\n", IMX_TVE_DAC_VOLTAGE);
+ ret = regulator_enable(tve->dac_reg);
+ if (ret)
+ return ret;
+ ret = devm_add_action_or_reset(dev, imx_tve_disable_regulator, tve);
+ if (ret)
+ return ret;
+ }
+
+ tve->clk = devm_clk_get(dev, "tve");
+ if (IS_ERR(tve->clk)) {
+ dev_err(dev, "failed to get high speed tve clock: %ld\n",
+ PTR_ERR(tve->clk));
+ return PTR_ERR(tve->clk);
+ }
+
+ /* this is the IPU DI clock input selector, can be parented to tve_di */
+ tve->di_sel_clk = devm_clk_get(dev, "di_sel");
+ if (IS_ERR(tve->di_sel_clk)) {
+ dev_err(dev, "failed to get ipu di mux clock: %ld\n",
+ PTR_ERR(tve->di_sel_clk));
+ return PTR_ERR(tve->di_sel_clk);
+ }
+
+ ret = tve_clk_init(tve, base);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_read(tve->regmap, TVE_COM_CONF_REG, &val);
+ if (ret < 0) {
+ dev_err(dev, "failed to read configuration register: %d\n",
+ ret);
+ return ret;
+ }
+ if (val != 0x00100000) {
+ dev_err(dev, "configuration register default value indicates this is not a TVEv2\n");
+ return -ENODEV;
+ }
+
+ /* disable cable detection for VGA mode */
+ ret = regmap_write(tve->regmap, TVE_CD_CONT_REG, 0);
+ if (ret)
+ return ret;
+
+ platform_set_drvdata(pdev, tve);
+
+ return component_add(dev, &imx_tve_ops);
+}
+
+static void imx_tve_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &imx_tve_ops);
+}
+
+static const struct of_device_id imx_tve_dt_ids[] = {
+ { .compatible = "fsl,imx53-tve", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, imx_tve_dt_ids);
+
+static struct platform_driver imx_tve_driver = {
+ .probe = imx_tve_probe,
+ .remove = imx_tve_remove,
+ .driver = {
+ .of_match_table = imx_tve_dt_ids,
+ .name = "imx-tve",
+ },
+};
+
+module_platform_driver(imx_tve_driver);
+
+MODULE_DESCRIPTION("i.MX Television Encoder driver");
+MODULE_AUTHOR("Philipp Zabel, Pengutronix");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/imx/ipuv3/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3/ipuv3-crtc.c
new file mode 100644
index 000000000000..cf7b02b2d52c
--- /dev/null
+++ b/drivers/gpu/drm/imx/ipuv3/ipuv3-crtc.c
@@ -0,0 +1,453 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * i.MX IPUv3 Graphics driver
+ *
+ * Copyright (C) 2011 Sascha Hauer, Pengutronix
+ */
+
+#include <linux/clk.h>
+#include <linux/component.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/errno.h>
+#include <linux/export.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include <video/imx-ipu-v3.h>
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_gem_dma_helper.h>
+#include <drm/drm_managed.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/drm_vblank.h>
+
+#include "imx-drm.h"
+#include "ipuv3-plane.h"
+
+#define DRIVER_DESC "i.MX IPUv3 Graphics"
+
+struct ipu_crtc {
+ struct device *dev;
+ struct drm_crtc base;
+
+ /* plane[0] is the full plane, plane[1] is the partial plane */
+ struct ipu_plane *plane[2];
+
+ struct ipu_dc *dc;
+ struct ipu_di *di;
+ int irq;
+ struct drm_pending_vblank_event *event;
+};
+
+static inline struct ipu_crtc *to_ipu_crtc(struct drm_crtc *crtc)
+{
+ return container_of(crtc, struct ipu_crtc, base);
+}
+
+static void ipu_crtc_atomic_enable(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc);
+ struct ipu_soc *ipu = dev_get_drvdata(ipu_crtc->dev->parent);
+
+ ipu_prg_enable(ipu);
+ ipu_dc_enable(ipu);
+ ipu_dc_enable_channel(ipu_crtc->dc);
+ ipu_di_enable(ipu_crtc->di);
+}
+
+static void ipu_crtc_disable_planes(struct ipu_crtc *ipu_crtc,
+ struct drm_crtc_state *old_crtc_state)
+{
+ bool disable_partial = false;
+ bool disable_full = false;
+ struct drm_plane *plane;
+
+ drm_atomic_crtc_state_for_each_plane(plane, old_crtc_state) {
+ if (plane == &ipu_crtc->plane[0]->base)
+ disable_full = true;
+ if (ipu_crtc->plane[1] && plane == &ipu_crtc->plane[1]->base)
+ disable_partial = true;
+ }
+
+ if (disable_partial)
+ ipu_plane_disable(ipu_crtc->plane[1], true);
+ if (disable_full)
+ ipu_plane_disable(ipu_crtc->plane[0], true);
+}
+
+static void ipu_crtc_atomic_disable(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ struct drm_crtc_state *old_crtc_state = drm_atomic_get_old_crtc_state(state,
+ crtc);
+ struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc);
+ struct ipu_soc *ipu = dev_get_drvdata(ipu_crtc->dev->parent);
+
+ ipu_dc_disable_channel(ipu_crtc->dc);
+ ipu_di_disable(ipu_crtc->di);
+ /*
+ * Planes must be disabled before DC clock is removed, as otherwise the
+ * attached IDMACs will be left in undefined state, possibly hanging
+ * the IPU or even system.
+ */
+ ipu_crtc_disable_planes(ipu_crtc, old_crtc_state);
+ ipu_dc_disable(ipu);
+ ipu_prg_disable(ipu);
+
+ drm_crtc_vblank_off(crtc);
+
+ spin_lock_irq(&crtc->dev->event_lock);
+ if (crtc->state->event && !crtc->state->active) {
+ drm_crtc_send_vblank_event(crtc, crtc->state->event);
+ crtc->state->event = NULL;
+ }
+ spin_unlock_irq(&crtc->dev->event_lock);
+}
+
+static void imx_drm_crtc_reset(struct drm_crtc *crtc)
+{
+ struct imx_crtc_state *state;
+
+ if (crtc->state)
+ __drm_atomic_helper_crtc_destroy_state(crtc->state);
+
+ kfree(to_imx_crtc_state(crtc->state));
+ crtc->state = NULL;
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (state)
+ __drm_atomic_helper_crtc_reset(crtc, &state->base);
+}
+
+static struct drm_crtc_state *imx_drm_crtc_duplicate_state(struct drm_crtc *crtc)
+{
+ struct imx_crtc_state *state;
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return NULL;
+
+ __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
+
+ WARN_ON(state->base.crtc != crtc);
+ state->base.crtc = crtc;
+
+ return &state->base;
+}
+
+static void imx_drm_crtc_destroy_state(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+{
+ __drm_atomic_helper_crtc_destroy_state(state);
+ kfree(to_imx_crtc_state(state));
+}
+
+static int ipu_enable_vblank(struct drm_crtc *crtc)
+{
+ struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc);
+
+ enable_irq(ipu_crtc->irq);
+
+ return 0;
+}
+
+static void ipu_disable_vblank(struct drm_crtc *crtc)
+{
+ struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc);
+
+ disable_irq_nosync(ipu_crtc->irq);
+}
+
+static const struct drm_crtc_funcs ipu_crtc_funcs = {
+ .set_config = drm_atomic_helper_set_config,
+ .page_flip = drm_atomic_helper_page_flip,
+ .reset = imx_drm_crtc_reset,
+ .atomic_duplicate_state = imx_drm_crtc_duplicate_state,
+ .atomic_destroy_state = imx_drm_crtc_destroy_state,
+ .enable_vblank = ipu_enable_vblank,
+ .disable_vblank = ipu_disable_vblank,
+};
+
+static irqreturn_t ipu_irq_handler(int irq, void *dev_id)
+{
+ struct ipu_crtc *ipu_crtc = dev_id;
+ struct drm_crtc *crtc = &ipu_crtc->base;
+ unsigned long flags;
+ int i;
+
+ drm_crtc_handle_vblank(crtc);
+
+ if (ipu_crtc->event) {
+ for (i = 0; i < ARRAY_SIZE(ipu_crtc->plane); i++) {
+ struct ipu_plane *plane = ipu_crtc->plane[i];
+
+ if (!plane)
+ continue;
+
+ if (ipu_plane_atomic_update_pending(&plane->base))
+ break;
+ }
+
+ if (i == ARRAY_SIZE(ipu_crtc->plane)) {
+ spin_lock_irqsave(&crtc->dev->event_lock, flags);
+ drm_crtc_send_vblank_event(crtc, ipu_crtc->event);
+ ipu_crtc->event = NULL;
+ drm_crtc_vblank_put(crtc);
+ spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static bool ipu_crtc_mode_fixup(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc);
+ struct videomode vm;
+ int ret;
+
+ drm_display_mode_to_videomode(adjusted_mode, &vm);
+
+ ret = ipu_di_adjust_videomode(ipu_crtc->di, &vm);
+ if (ret)
+ return false;
+
+ if ((vm.vsync_len == 0) || (vm.hsync_len == 0))
+ return false;
+
+ drm_display_mode_from_videomode(&vm, adjusted_mode);
+
+ return true;
+}
+
+static int ipu_crtc_atomic_check(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
+ crtc);
+ u32 primary_plane_mask = drm_plane_mask(crtc->primary);
+
+ if (crtc_state->active && (primary_plane_mask & crtc_state->plane_mask) == 0)
+ return -EINVAL;
+
+ return 0;
+}
+
+static void ipu_crtc_atomic_begin(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ drm_crtc_vblank_on(crtc);
+}
+
+static void ipu_crtc_atomic_flush(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ spin_lock_irq(&crtc->dev->event_lock);
+ if (crtc->state->event) {
+ struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc);
+
+ WARN_ON(drm_crtc_vblank_get(crtc));
+ ipu_crtc->event = crtc->state->event;
+ crtc->state->event = NULL;
+ }
+ spin_unlock_irq(&crtc->dev->event_lock);
+}
+
+static void ipu_crtc_mode_set_nofb(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_encoder *encoder;
+ struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc);
+ struct drm_display_mode *mode = &crtc->state->adjusted_mode;
+ struct imx_crtc_state *imx_crtc_state = to_imx_crtc_state(crtc->state);
+ struct ipu_di_signal_cfg sig_cfg = {};
+ unsigned long encoder_types = 0;
+
+ dev_dbg(ipu_crtc->dev, "%s: mode->hdisplay: %d\n", __func__,
+ mode->hdisplay);
+ dev_dbg(ipu_crtc->dev, "%s: mode->vdisplay: %d\n", __func__,
+ mode->vdisplay);
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ if (encoder->crtc == crtc)
+ encoder_types |= BIT(encoder->encoder_type);
+ }
+
+ dev_dbg(ipu_crtc->dev, "%s: attached to encoder types 0x%lx\n",
+ __func__, encoder_types);
+
+ /*
+ * If we have DAC or LDB, then we need the IPU DI clock to be
+ * the same as the LDB DI clock. For TVDAC, derive the IPU DI
+ * clock from 27 MHz TVE_DI clock, but allow to divide it.
+ */
+ if (encoder_types & (BIT(DRM_MODE_ENCODER_DAC) |
+ BIT(DRM_MODE_ENCODER_LVDS)))
+ sig_cfg.clkflags = IPU_DI_CLKMODE_SYNC | IPU_DI_CLKMODE_EXT;
+ else if (encoder_types & BIT(DRM_MODE_ENCODER_TVDAC))
+ sig_cfg.clkflags = IPU_DI_CLKMODE_EXT;
+ else
+ sig_cfg.clkflags = 0;
+
+ sig_cfg.enable_pol = !(imx_crtc_state->bus_flags & DRM_BUS_FLAG_DE_LOW);
+ /* Default to driving pixel data on negative clock edges */
+ sig_cfg.clk_pol = !!(imx_crtc_state->bus_flags &
+ DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE);
+ sig_cfg.bus_format = imx_crtc_state->bus_format;
+ sig_cfg.v_to_h_sync = 0;
+ sig_cfg.hsync_pin = imx_crtc_state->di_hsync_pin;
+ sig_cfg.vsync_pin = imx_crtc_state->di_vsync_pin;
+
+ drm_display_mode_to_videomode(mode, &sig_cfg.mode);
+ if (!IS_ALIGNED(sig_cfg.mode.hactive, 8)) {
+ unsigned int new_hactive = ALIGN(sig_cfg.mode.hactive, 8);
+
+ dev_warn(ipu_crtc->dev, "8-pixel align hactive %d -> %d\n",
+ sig_cfg.mode.hactive, new_hactive);
+
+ sig_cfg.mode.hfront_porch -= new_hactive - sig_cfg.mode.hactive;
+ sig_cfg.mode.hactive = new_hactive;
+ }
+
+ ipu_dc_init_sync(ipu_crtc->dc, ipu_crtc->di,
+ mode->flags & DRM_MODE_FLAG_INTERLACE,
+ imx_crtc_state->bus_format, sig_cfg.mode.hactive);
+ ipu_di_init_sync_panel(ipu_crtc->di, &sig_cfg);
+}
+
+static const struct drm_crtc_helper_funcs ipu_helper_funcs = {
+ .mode_fixup = ipu_crtc_mode_fixup,
+ .mode_set_nofb = ipu_crtc_mode_set_nofb,
+ .atomic_check = ipu_crtc_atomic_check,
+ .atomic_begin = ipu_crtc_atomic_begin,
+ .atomic_flush = ipu_crtc_atomic_flush,
+ .atomic_disable = ipu_crtc_atomic_disable,
+ .atomic_enable = ipu_crtc_atomic_enable,
+};
+
+static void ipu_put_resources(struct drm_device *dev, void *ptr)
+{
+ struct ipu_crtc *ipu_crtc = ptr;
+
+ if (!IS_ERR_OR_NULL(ipu_crtc->dc))
+ ipu_dc_put(ipu_crtc->dc);
+ if (!IS_ERR_OR_NULL(ipu_crtc->di))
+ ipu_di_put(ipu_crtc->di);
+}
+
+static int ipu_get_resources(struct drm_device *dev, struct ipu_crtc *ipu_crtc,
+ struct ipu_client_platformdata *pdata)
+{
+ struct ipu_soc *ipu = dev_get_drvdata(ipu_crtc->dev->parent);
+ int ret;
+
+ ipu_crtc->dc = ipu_dc_get(ipu, pdata->dc);
+ if (IS_ERR(ipu_crtc->dc))
+ return PTR_ERR(ipu_crtc->dc);
+
+ ret = drmm_add_action_or_reset(dev, ipu_put_resources, ipu_crtc);
+ if (ret)
+ return ret;
+
+ ipu_crtc->di = ipu_di_get(ipu, pdata->di);
+ if (IS_ERR(ipu_crtc->di))
+ return PTR_ERR(ipu_crtc->di);
+
+ return 0;
+}
+
+static int ipu_drm_bind(struct device *dev, struct device *master, void *data)
+{
+ struct ipu_client_platformdata *pdata = dev->platform_data;
+ struct ipu_soc *ipu = dev_get_drvdata(dev->parent);
+ struct drm_device *drm = data;
+ struct ipu_plane *primary_plane;
+ struct ipu_crtc *ipu_crtc;
+ struct drm_crtc *crtc;
+ int dp = -EINVAL;
+ int ret;
+
+ if (pdata->dp >= 0)
+ dp = IPU_DP_FLOW_SYNC_BG;
+ primary_plane = ipu_plane_init(drm, ipu, pdata->dma[0], dp, 0,
+ DRM_PLANE_TYPE_PRIMARY);
+ if (IS_ERR(primary_plane))
+ return PTR_ERR(primary_plane);
+
+ ipu_crtc = drmm_crtc_alloc_with_planes(drm, struct ipu_crtc, base,
+ &primary_plane->base, NULL,
+ &ipu_crtc_funcs, NULL);
+ if (IS_ERR(ipu_crtc))
+ return PTR_ERR(ipu_crtc);
+
+ ipu_crtc->dev = dev;
+ ipu_crtc->plane[0] = primary_plane;
+
+ crtc = &ipu_crtc->base;
+ crtc->port = pdata->of_node;
+ drm_crtc_helper_add(crtc, &ipu_helper_funcs);
+
+ ret = ipu_get_resources(drm, ipu_crtc, pdata);
+ if (ret) {
+ dev_err(ipu_crtc->dev, "getting resources failed with %d.\n",
+ ret);
+ return ret;
+ }
+
+ /* If this crtc is using the DP, add an overlay plane */
+ if (pdata->dp >= 0 && pdata->dma[1] > 0) {
+ ipu_crtc->plane[1] = ipu_plane_init(drm, ipu, pdata->dma[1],
+ IPU_DP_FLOW_SYNC_FG,
+ drm_crtc_mask(&ipu_crtc->base),
+ DRM_PLANE_TYPE_OVERLAY);
+ if (IS_ERR(ipu_crtc->plane[1]))
+ ipu_crtc->plane[1] = NULL;
+ }
+
+ ipu_crtc->irq = ipu_plane_irq(ipu_crtc->plane[0]);
+ ret = devm_request_irq(ipu_crtc->dev, ipu_crtc->irq, ipu_irq_handler,
+ IRQF_NO_AUTOEN, "imx_drm", ipu_crtc);
+ if (ret < 0) {
+ dev_err(ipu_crtc->dev, "irq request failed with %d.\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct component_ops ipu_crtc_ops = {
+ .bind = ipu_drm_bind,
+};
+
+static int ipu_drm_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ if (!dev->platform_data)
+ return -EINVAL;
+
+ ret = dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
+
+ return component_add(dev, &ipu_crtc_ops);
+}
+
+static void ipu_drm_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &ipu_crtc_ops);
+}
+
+struct platform_driver ipu_drm_driver = {
+ .driver = {
+ .name = "imx-ipuv3-crtc",
+ },
+ .probe = ipu_drm_probe,
+ .remove = ipu_drm_remove,
+};
diff --git a/drivers/gpu/drm/imx/ipuv3/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3/ipuv3-plane.c
new file mode 100644
index 000000000000..db50eccea0ca
--- /dev/null
+++ b/drivers/gpu/drm/imx/ipuv3/ipuv3-plane.c
@@ -0,0 +1,957 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * i.MX IPUv3 DP Overlay Planes
+ *
+ * Copyright (C) 2013 Philipp Zabel, Pengutronix
+ */
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_blend.h>
+#include <drm/drm_fb_dma_helper.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
+#include <drm/drm_gem_atomic_helper.h>
+#include <drm/drm_gem_dma_helper.h>
+#include <drm/drm_managed.h>
+#include <drm/drm_print.h>
+
+#include <video/imx-ipu-v3.h>
+
+#include "imx-drm.h"
+#include "ipuv3-plane.h"
+
+struct ipu_plane_state {
+ struct drm_plane_state base;
+ bool use_pre;
+};
+
+static inline struct ipu_plane_state *
+to_ipu_plane_state(struct drm_plane_state *p)
+{
+ return container_of(p, struct ipu_plane_state, base);
+}
+
+static unsigned int ipu_src_rect_width(const struct drm_plane_state *state)
+{
+ return ALIGN(drm_rect_width(&state->src) >> 16, 8);
+}
+
+static inline struct ipu_plane *to_ipu_plane(struct drm_plane *p)
+{
+ return container_of(p, struct ipu_plane, base);
+}
+
+static const uint32_t ipu_plane_all_formats[] = {
+ DRM_FORMAT_ARGB1555,
+ DRM_FORMAT_XRGB1555,
+ DRM_FORMAT_ABGR1555,
+ DRM_FORMAT_XBGR1555,
+ DRM_FORMAT_RGBA5551,
+ DRM_FORMAT_BGRA5551,
+ DRM_FORMAT_ARGB4444,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_RGBA8888,
+ DRM_FORMAT_RGBX8888,
+ DRM_FORMAT_BGRA8888,
+ DRM_FORMAT_BGRX8888,
+ DRM_FORMAT_UYVY,
+ DRM_FORMAT_VYUY,
+ DRM_FORMAT_YUYV,
+ DRM_FORMAT_YVYU,
+ DRM_FORMAT_YUV420,
+ DRM_FORMAT_YVU420,
+ DRM_FORMAT_YUV422,
+ DRM_FORMAT_YVU422,
+ DRM_FORMAT_YUV444,
+ DRM_FORMAT_YVU444,
+ DRM_FORMAT_NV12,
+ DRM_FORMAT_NV16,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_RGB565_A8,
+ DRM_FORMAT_BGR565_A8,
+ DRM_FORMAT_RGB888_A8,
+ DRM_FORMAT_BGR888_A8,
+ DRM_FORMAT_RGBX8888_A8,
+ DRM_FORMAT_BGRX8888_A8,
+};
+
+static const uint32_t ipu_plane_rgb_formats[] = {
+ DRM_FORMAT_ARGB1555,
+ DRM_FORMAT_XRGB1555,
+ DRM_FORMAT_ABGR1555,
+ DRM_FORMAT_XBGR1555,
+ DRM_FORMAT_RGBA5551,
+ DRM_FORMAT_BGRA5551,
+ DRM_FORMAT_ARGB4444,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_RGBA8888,
+ DRM_FORMAT_RGBX8888,
+ DRM_FORMAT_BGRA8888,
+ DRM_FORMAT_BGRX8888,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_RGB565_A8,
+ DRM_FORMAT_BGR565_A8,
+ DRM_FORMAT_RGB888_A8,
+ DRM_FORMAT_BGR888_A8,
+ DRM_FORMAT_RGBX8888_A8,
+ DRM_FORMAT_BGRX8888_A8,
+};
+
+static const uint64_t ipu_format_modifiers[] = {
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_FORMAT_MOD_INVALID
+};
+
+static const uint64_t pre_format_modifiers[] = {
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_FORMAT_MOD_VIVANTE_TILED,
+ DRM_FORMAT_MOD_VIVANTE_SUPER_TILED,
+ DRM_FORMAT_MOD_INVALID
+};
+
+int ipu_plane_irq(struct ipu_plane *ipu_plane)
+{
+ return ipu_idmac_channel_irq(ipu_plane->ipu, ipu_plane->ipu_ch,
+ IPU_IRQ_EOF);
+}
+
+static inline unsigned long
+drm_plane_state_to_eba(struct drm_plane_state *state, int plane)
+{
+ struct drm_framebuffer *fb = state->fb;
+ struct drm_gem_dma_object *dma_obj;
+ int x = state->src.x1 >> 16;
+ int y = state->src.y1 >> 16;
+
+ dma_obj = drm_fb_dma_get_gem_obj(fb, plane);
+ BUG_ON(!dma_obj);
+
+ return dma_obj->dma_addr + fb->offsets[plane] + fb->pitches[plane] * y +
+ fb->format->cpp[plane] * x;
+}
+
+static inline unsigned long
+drm_plane_state_to_ubo(struct drm_plane_state *state)
+{
+ struct drm_framebuffer *fb = state->fb;
+ struct drm_gem_dma_object *dma_obj;
+ unsigned long eba = drm_plane_state_to_eba(state, 0);
+ int x = state->src.x1 >> 16;
+ int y = state->src.y1 >> 16;
+
+ dma_obj = drm_fb_dma_get_gem_obj(fb, 1);
+ BUG_ON(!dma_obj);
+
+ x /= fb->format->hsub;
+ y /= fb->format->vsub;
+
+ return dma_obj->dma_addr + fb->offsets[1] + fb->pitches[1] * y +
+ fb->format->cpp[1] * x - eba;
+}
+
+static inline unsigned long
+drm_plane_state_to_vbo(struct drm_plane_state *state)
+{
+ struct drm_framebuffer *fb = state->fb;
+ struct drm_gem_dma_object *dma_obj;
+ unsigned long eba = drm_plane_state_to_eba(state, 0);
+ int x = state->src.x1 >> 16;
+ int y = state->src.y1 >> 16;
+
+ dma_obj = drm_fb_dma_get_gem_obj(fb, 2);
+ BUG_ON(!dma_obj);
+
+ x /= fb->format->hsub;
+ y /= fb->format->vsub;
+
+ return dma_obj->dma_addr + fb->offsets[2] + fb->pitches[2] * y +
+ fb->format->cpp[2] * x - eba;
+}
+
+static void ipu_plane_put_resources(struct drm_device *dev, void *ptr)
+{
+ struct ipu_plane *ipu_plane = ptr;
+
+ if (!IS_ERR_OR_NULL(ipu_plane->dp))
+ ipu_dp_put(ipu_plane->dp);
+ if (!IS_ERR_OR_NULL(ipu_plane->dmfc))
+ ipu_dmfc_put(ipu_plane->dmfc);
+ if (!IS_ERR_OR_NULL(ipu_plane->ipu_ch))
+ ipu_idmac_put(ipu_plane->ipu_ch);
+ if (!IS_ERR_OR_NULL(ipu_plane->alpha_ch))
+ ipu_idmac_put(ipu_plane->alpha_ch);
+}
+
+static int ipu_plane_get_resources(struct drm_device *dev,
+ struct ipu_plane *ipu_plane)
+{
+ int ret;
+ int alpha_ch;
+
+ ipu_plane->ipu_ch = ipu_idmac_get(ipu_plane->ipu, ipu_plane->dma);
+ if (IS_ERR(ipu_plane->ipu_ch)) {
+ ret = PTR_ERR(ipu_plane->ipu_ch);
+ DRM_ERROR("failed to get idmac channel: %d\n", ret);
+ return ret;
+ }
+
+ ret = drmm_add_action_or_reset(dev, ipu_plane_put_resources, ipu_plane);
+ if (ret)
+ return ret;
+
+ alpha_ch = ipu_channel_alpha_channel(ipu_plane->dma);
+ if (alpha_ch >= 0) {
+ ipu_plane->alpha_ch = ipu_idmac_get(ipu_plane->ipu, alpha_ch);
+ if (IS_ERR(ipu_plane->alpha_ch)) {
+ ret = PTR_ERR(ipu_plane->alpha_ch);
+ DRM_ERROR("failed to get alpha idmac channel %d: %d\n",
+ alpha_ch, ret);
+ return ret;
+ }
+ }
+
+ ipu_plane->dmfc = ipu_dmfc_get(ipu_plane->ipu, ipu_plane->dma);
+ if (IS_ERR(ipu_plane->dmfc)) {
+ ret = PTR_ERR(ipu_plane->dmfc);
+ DRM_ERROR("failed to get dmfc: ret %d\n", ret);
+ return ret;
+ }
+
+ if (ipu_plane->dp_flow >= 0) {
+ ipu_plane->dp = ipu_dp_get(ipu_plane->ipu, ipu_plane->dp_flow);
+ if (IS_ERR(ipu_plane->dp)) {
+ ret = PTR_ERR(ipu_plane->dp);
+ DRM_ERROR("failed to get dp flow: %d\n", ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static bool ipu_plane_separate_alpha(struct ipu_plane *ipu_plane)
+{
+ switch (ipu_plane->base.state->fb->format->format) {
+ case DRM_FORMAT_RGB565_A8:
+ case DRM_FORMAT_BGR565_A8:
+ case DRM_FORMAT_RGB888_A8:
+ case DRM_FORMAT_BGR888_A8:
+ case DRM_FORMAT_RGBX8888_A8:
+ case DRM_FORMAT_BGRX8888_A8:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static void ipu_plane_enable(struct ipu_plane *ipu_plane)
+{
+ if (ipu_plane->dp)
+ ipu_dp_enable(ipu_plane->ipu);
+ ipu_dmfc_enable_channel(ipu_plane->dmfc);
+ ipu_idmac_enable_channel(ipu_plane->ipu_ch);
+ if (ipu_plane_separate_alpha(ipu_plane))
+ ipu_idmac_enable_channel(ipu_plane->alpha_ch);
+ if (ipu_plane->dp)
+ ipu_dp_enable_channel(ipu_plane->dp);
+}
+
+void ipu_plane_disable(struct ipu_plane *ipu_plane, bool disable_dp_channel)
+{
+ int ret;
+
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+ ret = ipu_idmac_wait_busy(ipu_plane->ipu_ch, 50);
+ if (ret == -ETIMEDOUT) {
+ DRM_ERROR("[PLANE:%d] IDMAC timeout\n",
+ ipu_plane->base.base.id);
+ }
+
+ if (ipu_plane->dp && disable_dp_channel)
+ ipu_dp_disable_channel(ipu_plane->dp, false);
+ ipu_idmac_disable_channel(ipu_plane->ipu_ch);
+ if (ipu_plane->alpha_ch)
+ ipu_idmac_disable_channel(ipu_plane->alpha_ch);
+ ipu_dmfc_disable_channel(ipu_plane->dmfc);
+ if (ipu_plane->dp)
+ ipu_dp_disable(ipu_plane->ipu);
+ if (ipu_prg_present(ipu_plane->ipu))
+ ipu_prg_channel_disable(ipu_plane->ipu_ch);
+}
+
+void ipu_plane_disable_deferred(struct drm_plane *plane)
+{
+ struct ipu_plane *ipu_plane = to_ipu_plane(plane);
+
+ if (ipu_plane->disabling) {
+ ipu_plane->disabling = false;
+ ipu_plane_disable(ipu_plane, false);
+ }
+}
+
+static void ipu_plane_state_reset(struct drm_plane *plane)
+{
+ struct ipu_plane_state *ipu_state;
+
+ if (plane->state) {
+ ipu_state = to_ipu_plane_state(plane->state);
+ __drm_atomic_helper_plane_destroy_state(plane->state);
+ kfree(ipu_state);
+ plane->state = NULL;
+ }
+
+ ipu_state = kzalloc(sizeof(*ipu_state), GFP_KERNEL);
+
+ if (ipu_state)
+ __drm_atomic_helper_plane_reset(plane, &ipu_state->base);
+}
+
+static struct drm_plane_state *
+ipu_plane_duplicate_state(struct drm_plane *plane)
+{
+ struct ipu_plane_state *state;
+
+ if (WARN_ON(!plane->state))
+ return NULL;
+
+ state = kmalloc(sizeof(*state), GFP_KERNEL);
+ if (state)
+ __drm_atomic_helper_plane_duplicate_state(plane, &state->base);
+
+ return &state->base;
+}
+
+static void ipu_plane_destroy_state(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ struct ipu_plane_state *ipu_state = to_ipu_plane_state(state);
+
+ __drm_atomic_helper_plane_destroy_state(state);
+ kfree(ipu_state);
+}
+
+static bool ipu_plane_format_mod_supported(struct drm_plane *plane,
+ uint32_t format, uint64_t modifier)
+{
+ struct ipu_soc *ipu = to_ipu_plane(plane)->ipu;
+
+ /* linear is supported for all planes and formats */
+ if (modifier == DRM_FORMAT_MOD_LINEAR)
+ return true;
+
+ /*
+ * Without a PRG the possible modifiers list only includes the linear
+ * modifier, so we always take the early return from this function and
+ * only end up here if the PRG is present.
+ */
+ return ipu_prg_format_supported(ipu, format, modifier);
+}
+
+static const struct drm_plane_funcs ipu_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .reset = ipu_plane_state_reset,
+ .atomic_duplicate_state = ipu_plane_duplicate_state,
+ .atomic_destroy_state = ipu_plane_destroy_state,
+ .format_mod_supported = ipu_plane_format_mod_supported,
+};
+
+static int ipu_plane_atomic_check(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
+ plane);
+ struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
+ plane);
+ struct drm_crtc_state *crtc_state;
+ struct device *dev = plane->dev->dev;
+ struct drm_framebuffer *fb = new_state->fb;
+ struct drm_framebuffer *old_fb = old_state->fb;
+ unsigned long eba, ubo, vbo, old_ubo, old_vbo, alpha_eba;
+ bool can_position = (plane->type == DRM_PLANE_TYPE_OVERLAY);
+ int ret;
+
+ /* Ok to disable */
+ if (!fb)
+ return 0;
+
+ if (WARN_ON(!new_state->crtc))
+ return -EINVAL;
+
+ crtc_state =
+ drm_atomic_get_new_crtc_state(state, new_state->crtc);
+ if (WARN_ON(!crtc_state))
+ return -EINVAL;
+
+ ret = drm_atomic_helper_check_plane_state(new_state, crtc_state,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
+ can_position, true);
+ if (ret)
+ return ret;
+
+ /* nothing to check when disabling or disabled */
+ if (!crtc_state->enable)
+ return 0;
+
+ switch (plane->type) {
+ case DRM_PLANE_TYPE_PRIMARY:
+ /* full plane minimum width is 13 pixels */
+ if (drm_rect_width(&new_state->dst) < 13)
+ return -EINVAL;
+ break;
+ case DRM_PLANE_TYPE_OVERLAY:
+ break;
+ default:
+ dev_warn(dev, "Unsupported plane type %d\n", plane->type);
+ return -EINVAL;
+ }
+
+ if (drm_rect_height(&new_state->dst) < 2)
+ return -EINVAL;
+
+ /*
+ * We support resizing active plane or changing its format by
+ * forcing CRTC mode change in plane's ->atomic_check callback
+ * and disabling all affected active planes in CRTC's ->atomic_disable
+ * callback. The planes will be reenabled in plane's ->atomic_update
+ * callback.
+ */
+ if (old_fb &&
+ (drm_rect_width(&new_state->dst) != drm_rect_width(&old_state->dst) ||
+ drm_rect_height(&new_state->dst) != drm_rect_height(&old_state->dst) ||
+ fb->format != old_fb->format))
+ crtc_state->mode_changed = true;
+
+ eba = drm_plane_state_to_eba(new_state, 0);
+
+ if (eba & 0x7)
+ return -EINVAL;
+
+ if (fb->pitches[0] < 1 || fb->pitches[0] > 16384)
+ return -EINVAL;
+
+ if (old_fb && fb->pitches[0] != old_fb->pitches[0])
+ crtc_state->mode_changed = true;
+
+ if (ALIGN(fb->width, 8) * fb->format->cpp[0] >
+ fb->pitches[0] + fb->offsets[0]) {
+ dev_warn(dev, "pitch is not big enough for 8 pixels alignment");
+ return -EINVAL;
+ }
+
+ switch (fb->format->format) {
+ case DRM_FORMAT_YUV420:
+ case DRM_FORMAT_YVU420:
+ case DRM_FORMAT_YUV422:
+ case DRM_FORMAT_YVU422:
+ case DRM_FORMAT_YUV444:
+ case DRM_FORMAT_YVU444:
+ /*
+ * Multiplanar formats have to meet the following restrictions:
+ * - The (up to) three plane addresses are EBA, EBA+UBO, EBA+VBO
+ * - EBA, UBO and VBO are a multiple of 8
+ * - UBO and VBO are unsigned and not larger than 0xfffff8
+ * - Only EBA may be changed while scanout is active
+ * - The strides of U and V planes must be identical.
+ */
+ vbo = drm_plane_state_to_vbo(new_state);
+
+ if (vbo & 0x7 || vbo > 0xfffff8)
+ return -EINVAL;
+
+ if (old_fb && (fb->format == old_fb->format)) {
+ old_vbo = drm_plane_state_to_vbo(old_state);
+ if (vbo != old_vbo)
+ crtc_state->mode_changed = true;
+ }
+
+ if (fb->pitches[1] != fb->pitches[2])
+ return -EINVAL;
+
+ fallthrough;
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV16:
+ ubo = drm_plane_state_to_ubo(new_state);
+
+ if (ubo & 0x7 || ubo > 0xfffff8)
+ return -EINVAL;
+
+ if (old_fb && (fb->format == old_fb->format)) {
+ old_ubo = drm_plane_state_to_ubo(old_state);
+ if (ubo != old_ubo)
+ crtc_state->mode_changed = true;
+ }
+
+ if (fb->pitches[1] < 1 || fb->pitches[1] > 16384)
+ return -EINVAL;
+
+ if (old_fb && old_fb->pitches[1] != fb->pitches[1])
+ crtc_state->mode_changed = true;
+
+ /*
+ * The x/y offsets must be even in case of horizontal/vertical
+ * chroma subsampling.
+ */
+ if (((new_state->src.x1 >> 16) & (fb->format->hsub - 1)) ||
+ ((new_state->src.y1 >> 16) & (fb->format->vsub - 1)))
+ return -EINVAL;
+ break;
+ case DRM_FORMAT_RGB565_A8:
+ case DRM_FORMAT_BGR565_A8:
+ case DRM_FORMAT_RGB888_A8:
+ case DRM_FORMAT_BGR888_A8:
+ case DRM_FORMAT_RGBX8888_A8:
+ case DRM_FORMAT_BGRX8888_A8:
+ alpha_eba = drm_plane_state_to_eba(new_state, 1);
+ if (alpha_eba & 0x7)
+ return -EINVAL;
+
+ if (fb->pitches[1] < 1 || fb->pitches[1] > 16384)
+ return -EINVAL;
+
+ if (old_fb && old_fb->pitches[1] != fb->pitches[1])
+ crtc_state->mode_changed = true;
+ break;
+ }
+
+ return 0;
+}
+
+static void ipu_plane_atomic_disable(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct ipu_plane *ipu_plane = to_ipu_plane(plane);
+
+ if (ipu_plane->dp)
+ ipu_dp_disable_channel(ipu_plane->dp, true);
+ ipu_plane->disabling = true;
+}
+
+static int ipu_chan_assign_axi_id(int ipu_chan)
+{
+ switch (ipu_chan) {
+ case IPUV3_CHANNEL_MEM_BG_SYNC:
+ return 1;
+ case IPUV3_CHANNEL_MEM_FG_SYNC:
+ return 2;
+ case IPUV3_CHANNEL_MEM_DC_SYNC:
+ return 3;
+ default:
+ return 0;
+ }
+}
+
+static void ipu_calculate_bursts(u32 width, u32 cpp, u32 stride,
+ u8 *burstsize, u8 *num_bursts)
+{
+ const unsigned int width_bytes = width * cpp;
+ unsigned int npb, bursts;
+
+ /* Maximum number of pixels per burst without overshooting stride */
+ for (npb = 64 / cpp; npb > 0; --npb) {
+ if (round_up(width_bytes, npb * cpp) <= stride)
+ break;
+ }
+ *burstsize = npb;
+
+ /* Maximum number of consecutive bursts without overshooting stride */
+ for (bursts = 8; bursts > 1; bursts /= 2) {
+ if (round_up(width_bytes, npb * cpp * bursts) <= stride)
+ break;
+ }
+ *num_bursts = bursts;
+}
+
+static void ipu_plane_atomic_update(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
+ plane);
+ struct ipu_plane *ipu_plane = to_ipu_plane(plane);
+ struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
+ plane);
+ struct ipu_plane_state *ipu_state = to_ipu_plane_state(new_state);
+ struct drm_crtc_state *crtc_state = new_state->crtc->state;
+ struct drm_framebuffer *fb = new_state->fb;
+ struct drm_rect *dst = &new_state->dst;
+ unsigned long eba, ubo, vbo;
+ unsigned long alpha_eba = 0;
+ enum ipu_color_space ics;
+ unsigned int axi_id = 0;
+ const struct drm_format_info *info;
+ u8 burstsize, num_bursts;
+ u32 width, height;
+ int active;
+
+ if (ipu_plane->dp_flow == IPU_DP_FLOW_SYNC_FG)
+ ipu_dp_set_window_pos(ipu_plane->dp, dst->x1, dst->y1);
+
+ switch (ipu_plane->dp_flow) {
+ case IPU_DP_FLOW_SYNC_BG:
+ if (new_state->normalized_zpos == 1) {
+ ipu_dp_set_global_alpha(ipu_plane->dp,
+ !fb->format->has_alpha, 0xff,
+ true);
+ } else {
+ ipu_dp_set_global_alpha(ipu_plane->dp, true, 0, true);
+ }
+ break;
+ case IPU_DP_FLOW_SYNC_FG:
+ if (new_state->normalized_zpos == 1) {
+ ipu_dp_set_global_alpha(ipu_plane->dp,
+ !fb->format->has_alpha, 0xff,
+ false);
+ }
+ break;
+ }
+
+ if (ipu_plane->dp_flow == IPU_DP_FLOW_SYNC_BG)
+ width = ipu_src_rect_width(new_state);
+ else
+ width = drm_rect_width(&new_state->src) >> 16;
+ height = drm_rect_height(&new_state->src) >> 16;
+
+ eba = drm_plane_state_to_eba(new_state, 0);
+
+ /*
+ * Configure PRG channel and attached PRE, this changes the EBA to an
+ * internal SRAM location.
+ */
+ if (ipu_state->use_pre) {
+ axi_id = ipu_chan_assign_axi_id(ipu_plane->dma);
+ ipu_prg_channel_configure(ipu_plane->ipu_ch, axi_id, width,
+ height, fb->pitches[0],
+ fb->format->format, fb->modifier,
+ &eba);
+ }
+
+ if (!old_state->fb ||
+ old_state->fb->format->format != fb->format->format ||
+ old_state->color_encoding != new_state->color_encoding ||
+ old_state->color_range != new_state->color_range) {
+ ics = ipu_drm_fourcc_to_colorspace(fb->format->format);
+ switch (ipu_plane->dp_flow) {
+ case IPU_DP_FLOW_SYNC_BG:
+ ipu_dp_setup_channel(ipu_plane->dp, new_state->color_encoding,
+ new_state->color_range, ics,
+ IPUV3_COLORSPACE_RGB);
+ break;
+ case IPU_DP_FLOW_SYNC_FG:
+ ipu_dp_setup_channel(ipu_plane->dp, new_state->color_encoding,
+ new_state->color_range, ics,
+ IPUV3_COLORSPACE_UNKNOWN);
+ break;
+ }
+ }
+
+ if (old_state->fb && !drm_atomic_crtc_needs_modeset(crtc_state)) {
+ /* nothing to do if PRE is used */
+ if (ipu_state->use_pre)
+ return;
+ active = ipu_idmac_get_current_buffer(ipu_plane->ipu_ch);
+ ipu_cpmem_set_buffer(ipu_plane->ipu_ch, !active, eba);
+ ipu_idmac_select_buffer(ipu_plane->ipu_ch, !active);
+ if (ipu_plane_separate_alpha(ipu_plane)) {
+ active = ipu_idmac_get_current_buffer(ipu_plane->alpha_ch);
+ ipu_cpmem_set_buffer(ipu_plane->alpha_ch, !active,
+ alpha_eba);
+ ipu_idmac_select_buffer(ipu_plane->alpha_ch, !active);
+ }
+ return;
+ }
+
+ ics = ipu_drm_fourcc_to_colorspace(fb->format->format);
+ switch (ipu_plane->dp_flow) {
+ case IPU_DP_FLOW_SYNC_BG:
+ ipu_dp_setup_channel(ipu_plane->dp, DRM_COLOR_YCBCR_BT601,
+ DRM_COLOR_YCBCR_LIMITED_RANGE, ics,
+ IPUV3_COLORSPACE_RGB);
+ break;
+ case IPU_DP_FLOW_SYNC_FG:
+ ipu_dp_setup_channel(ipu_plane->dp, DRM_COLOR_YCBCR_BT601,
+ DRM_COLOR_YCBCR_LIMITED_RANGE, ics,
+ IPUV3_COLORSPACE_UNKNOWN);
+ break;
+ }
+
+ ipu_dmfc_config_wait4eot(ipu_plane->dmfc, width);
+
+ info = drm_format_info(fb->format->format);
+ ipu_calculate_bursts(width, info->cpp[0], fb->pitches[0],
+ &burstsize, &num_bursts);
+
+ ipu_cpmem_zero(ipu_plane->ipu_ch);
+ ipu_cpmem_set_resolution(ipu_plane->ipu_ch, width, height);
+ ipu_cpmem_set_fmt(ipu_plane->ipu_ch, fb->format->format);
+ ipu_cpmem_set_burstsize(ipu_plane->ipu_ch, burstsize);
+ ipu_cpmem_set_high_priority(ipu_plane->ipu_ch);
+ ipu_idmac_enable_watermark(ipu_plane->ipu_ch, true);
+ ipu_idmac_set_double_buffer(ipu_plane->ipu_ch, 1);
+ ipu_cpmem_set_stride(ipu_plane->ipu_ch, fb->pitches[0]);
+ ipu_cpmem_set_axi_id(ipu_plane->ipu_ch, axi_id);
+
+ switch (fb->format->format) {
+ case DRM_FORMAT_YUV420:
+ case DRM_FORMAT_YVU420:
+ case DRM_FORMAT_YUV422:
+ case DRM_FORMAT_YVU422:
+ case DRM_FORMAT_YUV444:
+ case DRM_FORMAT_YVU444:
+ ubo = drm_plane_state_to_ubo(new_state);
+ vbo = drm_plane_state_to_vbo(new_state);
+ if (fb->format->format == DRM_FORMAT_YVU420 ||
+ fb->format->format == DRM_FORMAT_YVU422 ||
+ fb->format->format == DRM_FORMAT_YVU444)
+ swap(ubo, vbo);
+
+ ipu_cpmem_set_yuv_planar_full(ipu_plane->ipu_ch,
+ fb->pitches[1], ubo, vbo);
+
+ dev_dbg(ipu_plane->base.dev->dev,
+ "phy = %lu %lu %lu, x = %d, y = %d", eba, ubo, vbo,
+ new_state->src.x1 >> 16, new_state->src.y1 >> 16);
+ break;
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV16:
+ ubo = drm_plane_state_to_ubo(new_state);
+
+ ipu_cpmem_set_yuv_planar_full(ipu_plane->ipu_ch,
+ fb->pitches[1], ubo, ubo);
+
+ dev_dbg(ipu_plane->base.dev->dev,
+ "phy = %lu %lu, x = %d, y = %d", eba, ubo,
+ new_state->src.x1 >> 16, new_state->src.y1 >> 16);
+ break;
+ case DRM_FORMAT_RGB565_A8:
+ case DRM_FORMAT_BGR565_A8:
+ case DRM_FORMAT_RGB888_A8:
+ case DRM_FORMAT_BGR888_A8:
+ case DRM_FORMAT_RGBX8888_A8:
+ case DRM_FORMAT_BGRX8888_A8:
+ alpha_eba = drm_plane_state_to_eba(new_state, 1);
+ num_bursts = 0;
+
+ dev_dbg(ipu_plane->base.dev->dev, "phys = %lu %lu, x = %d, y = %d",
+ eba, alpha_eba, new_state->src.x1 >> 16,
+ new_state->src.y1 >> 16);
+
+ ipu_cpmem_set_burstsize(ipu_plane->ipu_ch, 16);
+
+ ipu_cpmem_zero(ipu_plane->alpha_ch);
+ ipu_cpmem_set_resolution(ipu_plane->alpha_ch, width, height);
+ ipu_cpmem_set_format_passthrough(ipu_plane->alpha_ch, 8);
+ ipu_cpmem_set_high_priority(ipu_plane->alpha_ch);
+ ipu_idmac_set_double_buffer(ipu_plane->alpha_ch, 1);
+ ipu_cpmem_set_stride(ipu_plane->alpha_ch, fb->pitches[1]);
+ ipu_cpmem_set_burstsize(ipu_plane->alpha_ch, 16);
+ ipu_cpmem_set_buffer(ipu_plane->alpha_ch, 0, alpha_eba);
+ ipu_cpmem_set_buffer(ipu_plane->alpha_ch, 1, alpha_eba);
+ break;
+ default:
+ dev_dbg(ipu_plane->base.dev->dev, "phys = %lu, x = %d, y = %d",
+ eba, new_state->src.x1 >> 16, new_state->src.y1 >> 16);
+ break;
+ }
+ ipu_cpmem_set_buffer(ipu_plane->ipu_ch, 0, eba);
+ ipu_cpmem_set_buffer(ipu_plane->ipu_ch, 1, eba);
+ ipu_idmac_lock_enable(ipu_plane->ipu_ch, num_bursts);
+ ipu_plane_enable(ipu_plane);
+}
+
+static const struct drm_plane_helper_funcs ipu_plane_helper_funcs = {
+ .atomic_check = ipu_plane_atomic_check,
+ .atomic_disable = ipu_plane_atomic_disable,
+ .atomic_update = ipu_plane_atomic_update,
+};
+
+static const struct drm_plane_helper_funcs ipu_primary_plane_helper_funcs = {
+ .atomic_check = ipu_plane_atomic_check,
+ .atomic_disable = ipu_plane_atomic_disable,
+ .atomic_update = ipu_plane_atomic_update,
+ .get_scanout_buffer = drm_fb_dma_get_scanout_buffer,
+};
+
+bool ipu_plane_atomic_update_pending(struct drm_plane *plane)
+{
+ struct ipu_plane *ipu_plane = to_ipu_plane(plane);
+ struct drm_plane_state *state = plane->state;
+ struct ipu_plane_state *ipu_state = to_ipu_plane_state(state);
+
+ /* disabled crtcs must not block the update */
+ if (!state->crtc)
+ return false;
+
+ if (ipu_state->use_pre)
+ return ipu_prg_channel_configure_pending(ipu_plane->ipu_ch);
+
+ /*
+ * Pretend no update is pending in the non-PRE/PRG case. For this to
+ * happen, an atomic update would have to be deferred until after the
+ * start of the next frame and simultaneously interrupt latency would
+ * have to be high enough to let the atomic update finish and issue an
+ * event before the previous end of frame interrupt handler can be
+ * executed.
+ */
+ return false;
+}
+int ipu_planes_assign_pre(struct drm_device *dev,
+ struct drm_atomic_state *state)
+{
+ struct drm_crtc_state *old_crtc_state, *crtc_state;
+ struct drm_plane_state *plane_state;
+ struct ipu_plane_state *ipu_state;
+ struct ipu_plane *ipu_plane;
+ struct drm_plane *plane;
+ struct drm_crtc *crtc;
+ int available_pres = ipu_prg_max_active_channels();
+ int ret, i;
+
+ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, crtc_state, i) {
+ ret = drm_atomic_add_affected_planes(state, crtc);
+ if (ret)
+ return ret;
+ }
+
+ /*
+ * We are going over the planes in 2 passes: first we assign PREs to
+ * planes with a tiling modifier, which need the PREs to resolve into
+ * linear. Any failure to assign a PRE there is fatal. In the second
+ * pass we try to assign PREs to linear FBs, to improve memory access
+ * patterns for them. Failure at this point is non-fatal, as we can
+ * scan out linear FBs without a PRE.
+ */
+ for_each_new_plane_in_state(state, plane, plane_state, i) {
+ ipu_state = to_ipu_plane_state(plane_state);
+ ipu_plane = to_ipu_plane(plane);
+
+ if (!plane_state->fb) {
+ ipu_state->use_pre = false;
+ continue;
+ }
+
+ if (!(plane_state->fb->flags & DRM_MODE_FB_MODIFIERS) ||
+ plane_state->fb->modifier == DRM_FORMAT_MOD_LINEAR)
+ continue;
+
+ if (!ipu_prg_present(ipu_plane->ipu) || !available_pres)
+ return -EINVAL;
+
+ if (!ipu_prg_format_supported(ipu_plane->ipu,
+ plane_state->fb->format->format,
+ plane_state->fb->modifier))
+ return -EINVAL;
+
+ ipu_state->use_pre = true;
+ available_pres--;
+ }
+
+ for_each_new_plane_in_state(state, plane, plane_state, i) {
+ ipu_state = to_ipu_plane_state(plane_state);
+ ipu_plane = to_ipu_plane(plane);
+
+ if (!plane_state->fb) {
+ ipu_state->use_pre = false;
+ continue;
+ }
+
+ if ((plane_state->fb->flags & DRM_MODE_FB_MODIFIERS) &&
+ plane_state->fb->modifier != DRM_FORMAT_MOD_LINEAR)
+ continue;
+
+ /* make sure that modifier is initialized */
+ plane_state->fb->modifier = DRM_FORMAT_MOD_LINEAR;
+
+ if (ipu_prg_present(ipu_plane->ipu) && available_pres &&
+ ipu_prg_format_supported(ipu_plane->ipu,
+ plane_state->fb->format->format,
+ plane_state->fb->modifier)) {
+ ipu_state->use_pre = true;
+ available_pres--;
+ } else {
+ ipu_state->use_pre = false;
+ }
+ }
+
+ return 0;
+}
+
+struct ipu_plane *ipu_plane_init(struct drm_device *dev, struct ipu_soc *ipu,
+ int dma, int dp, unsigned int possible_crtcs,
+ enum drm_plane_type type)
+{
+ struct ipu_plane *ipu_plane;
+ const uint64_t *modifiers = ipu_format_modifiers;
+ unsigned int zpos = (type == DRM_PLANE_TYPE_PRIMARY) ? 0 : 1;
+ unsigned int format_count;
+ const uint32_t *formats;
+ int ret;
+
+ DRM_DEBUG_KMS("channel %d, dp flow %d, possible_crtcs=0x%x\n",
+ dma, dp, possible_crtcs);
+
+ if (dp == IPU_DP_FLOW_SYNC_BG || dp == IPU_DP_FLOW_SYNC_FG) {
+ formats = ipu_plane_all_formats;
+ format_count = ARRAY_SIZE(ipu_plane_all_formats);
+ } else {
+ formats = ipu_plane_rgb_formats;
+ format_count = ARRAY_SIZE(ipu_plane_rgb_formats);
+ }
+
+ if (ipu_prg_present(ipu))
+ modifiers = pre_format_modifiers;
+
+ ipu_plane = drmm_universal_plane_alloc(dev, struct ipu_plane, base,
+ possible_crtcs, &ipu_plane_funcs,
+ formats, format_count, modifiers,
+ type, NULL);
+ if (IS_ERR(ipu_plane)) {
+ DRM_ERROR("failed to allocate and initialize %s plane\n",
+ zpos ? "overlay" : "primary");
+ return ipu_plane;
+ }
+
+ ipu_plane->ipu = ipu;
+ ipu_plane->dma = dma;
+ ipu_plane->dp_flow = dp;
+
+ if (type == DRM_PLANE_TYPE_PRIMARY)
+ drm_plane_helper_add(&ipu_plane->base, &ipu_primary_plane_helper_funcs);
+ else
+ drm_plane_helper_add(&ipu_plane->base, &ipu_plane_helper_funcs);
+
+ if (dp == IPU_DP_FLOW_SYNC_BG || dp == IPU_DP_FLOW_SYNC_FG)
+ ret = drm_plane_create_zpos_property(&ipu_plane->base, zpos, 0,
+ 1);
+ else
+ ret = drm_plane_create_zpos_immutable_property(&ipu_plane->base,
+ 0);
+ if (ret)
+ return ERR_PTR(ret);
+
+ ret = drm_plane_create_color_properties(&ipu_plane->base,
+ BIT(DRM_COLOR_YCBCR_BT601) |
+ BIT(DRM_COLOR_YCBCR_BT709),
+ BIT(DRM_COLOR_YCBCR_LIMITED_RANGE),
+ DRM_COLOR_YCBCR_BT601,
+ DRM_COLOR_YCBCR_LIMITED_RANGE);
+ if (ret)
+ return ERR_PTR(ret);
+
+ ret = ipu_plane_get_resources(dev, ipu_plane);
+ if (ret) {
+ DRM_ERROR("failed to get %s plane resources: %pe\n",
+ zpos ? "overlay" : "primary", &ret);
+ return ERR_PTR(ret);
+ }
+
+ return ipu_plane;
+}
diff --git a/drivers/gpu/drm/imx/ipuv3/ipuv3-plane.h b/drivers/gpu/drm/imx/ipuv3/ipuv3-plane.h
new file mode 100644
index 000000000000..6d544e6ce63f
--- /dev/null
+++ b/drivers/gpu/drm/imx/ipuv3/ipuv3-plane.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __IPUV3_PLANE_H__
+#define __IPUV3_PLANE_H__
+
+#include <drm/drm_crtc.h> /* drm_plane */
+
+struct drm_plane;
+struct drm_device;
+struct ipu_soc;
+struct drm_crtc;
+struct drm_framebuffer;
+
+struct ipuv3_channel;
+struct dmfc_channel;
+struct ipu_dp;
+
+struct ipu_plane {
+ struct drm_plane base;
+
+ struct ipu_soc *ipu;
+ struct ipuv3_channel *ipu_ch;
+ struct ipuv3_channel *alpha_ch;
+ struct dmfc_channel *dmfc;
+ struct ipu_dp *dp;
+
+ int dma;
+ int dp_flow;
+
+ bool disabling;
+};
+
+struct ipu_plane *ipu_plane_init(struct drm_device *dev, struct ipu_soc *ipu,
+ int dma, int dp, unsigned int possible_crtcs,
+ enum drm_plane_type type);
+
+/* Init IDMAC, DMFC, DP */
+int ipu_plane_mode_set(struct ipu_plane *plane, struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_framebuffer *fb, int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ uint32_t src_x, uint32_t src_y, uint32_t src_w,
+ uint32_t src_h, bool interlaced);
+
+int ipu_plane_irq(struct ipu_plane *plane);
+
+void ipu_plane_disable(struct ipu_plane *ipu_plane, bool disable_dp_channel);
+void ipu_plane_disable_deferred(struct drm_plane *plane);
+bool ipu_plane_atomic_update_pending(struct drm_plane *plane);
+
+#endif
diff --git a/drivers/gpu/drm/imx/ipuv3/parallel-display.c b/drivers/gpu/drm/imx/ipuv3/parallel-display.c
new file mode 100644
index 000000000000..6fbf505d2801
--- /dev/null
+++ b/drivers/gpu/drm/imx/ipuv3/parallel-display.c
@@ -0,0 +1,288 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * i.MX drm driver - parallel display implementation
+ *
+ * Copyright (C) 2012 Sascha Hauer, Pengutronix
+ */
+
+#include <linux/component.h>
+#include <linux/media-bus-format.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/videodev2.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_bridge_connector.h>
+#include <drm/drm_managed.h>
+#include <drm/drm_of.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
+#include <drm/bridge/imx.h>
+
+#include "imx-drm.h"
+
+struct imx_parallel_display_encoder {
+ struct drm_encoder encoder;
+};
+
+struct imx_parallel_display {
+ struct device *dev;
+ u32 bus_format;
+ struct drm_bridge *next_bridge;
+ struct drm_bridge bridge;
+};
+
+static inline struct imx_parallel_display *bridge_to_imxpd(struct drm_bridge *b)
+{
+ return container_of(b, struct imx_parallel_display, bridge);
+}
+
+static const u32 imx_pd_bus_fmts[] = {
+ MEDIA_BUS_FMT_RGB888_1X24,
+ MEDIA_BUS_FMT_BGR888_1X24,
+ MEDIA_BUS_FMT_GBR888_1X24,
+ MEDIA_BUS_FMT_RGB666_1X18,
+ MEDIA_BUS_FMT_RGB666_1X24_CPADHI,
+ MEDIA_BUS_FMT_RGB565_1X16,
+};
+
+static u32 *
+imx_pd_bridge_atomic_get_output_bus_fmts(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ unsigned int *num_output_fmts)
+{
+ struct drm_display_info *di = &conn_state->connector->display_info;
+ struct imx_parallel_display *imxpd = bridge_to_imxpd(bridge);
+ u32 *output_fmts;
+
+ if (!imxpd->bus_format && !di->num_bus_formats) {
+ *num_output_fmts = ARRAY_SIZE(imx_pd_bus_fmts);
+ return kmemdup(imx_pd_bus_fmts, sizeof(imx_pd_bus_fmts),
+ GFP_KERNEL);
+ }
+
+ *num_output_fmts = 1;
+ output_fmts = kmalloc(sizeof(*output_fmts), GFP_KERNEL);
+ if (!output_fmts)
+ return NULL;
+
+ if (!imxpd->bus_format && di->num_bus_formats)
+ output_fmts[0] = di->bus_formats[0];
+ else
+ output_fmts[0] = imxpd->bus_format;
+
+ return output_fmts;
+}
+
+static bool imx_pd_format_supported(u32 output_fmt)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(imx_pd_bus_fmts); i++) {
+ if (imx_pd_bus_fmts[i] == output_fmt)
+ return true;
+ }
+
+ return false;
+}
+
+static u32 *
+imx_pd_bridge_atomic_get_input_bus_fmts(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ u32 output_fmt,
+ unsigned int *num_input_fmts)
+{
+ struct imx_parallel_display *imxpd = bridge_to_imxpd(bridge);
+ u32 *input_fmts;
+
+ /*
+ * If the next bridge does not support bus format negotiation, let's
+ * use the static bus format definition (imxpd->bus_format) if it's
+ * specified, RGB888 when it's not.
+ */
+ if (output_fmt == MEDIA_BUS_FMT_FIXED)
+ output_fmt = imxpd->bus_format ? : MEDIA_BUS_FMT_RGB888_1X24;
+
+ /* Now make sure the requested output format is supported. */
+ if ((imxpd->bus_format && imxpd->bus_format != output_fmt) ||
+ !imx_pd_format_supported(output_fmt)) {
+ *num_input_fmts = 0;
+ return NULL;
+ }
+
+ *num_input_fmts = 1;
+ input_fmts = kmalloc(sizeof(*input_fmts), GFP_KERNEL);
+ if (!input_fmts)
+ return NULL;
+
+ input_fmts[0] = output_fmt;
+ return input_fmts;
+}
+
+static int imx_pd_bridge_atomic_check(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct imx_crtc_state *imx_crtc_state = to_imx_crtc_state(crtc_state);
+ struct drm_display_info *di = &conn_state->connector->display_info;
+ struct drm_bridge_state *next_bridge_state = NULL;
+ u32 bus_flags, bus_fmt;
+
+ struct drm_bridge *next_bridge __free(drm_bridge_put) = drm_bridge_get_next_bridge(bridge);
+
+ if (next_bridge)
+ next_bridge_state = drm_atomic_get_new_bridge_state(crtc_state->state,
+ next_bridge);
+
+ if (next_bridge_state)
+ bus_flags = next_bridge_state->input_bus_cfg.flags;
+ else
+ bus_flags = di->bus_flags;
+
+ bus_fmt = bridge_state->input_bus_cfg.format;
+ if (!imx_pd_format_supported(bus_fmt))
+ return -EINVAL;
+
+ bridge_state->output_bus_cfg.flags = bus_flags;
+ bridge_state->input_bus_cfg.flags = bus_flags;
+ imx_crtc_state->bus_flags = bus_flags;
+ imx_crtc_state->bus_format = bridge_state->input_bus_cfg.format;
+ imx_crtc_state->di_hsync_pin = 2;
+ imx_crtc_state->di_vsync_pin = 3;
+
+ return 0;
+}
+
+static int imx_pd_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
+ enum drm_bridge_attach_flags flags)
+{
+ struct imx_parallel_display *imxpd = bridge_to_imxpd(bridge);
+
+ return drm_bridge_attach(encoder, imxpd->next_bridge, bridge, flags);
+}
+
+static const struct drm_bridge_funcs imx_pd_bridge_funcs = {
+ .attach = imx_pd_bridge_attach,
+ .atomic_reset = drm_atomic_helper_bridge_reset,
+ .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
+ .atomic_check = imx_pd_bridge_atomic_check,
+ .atomic_get_input_bus_fmts = imx_pd_bridge_atomic_get_input_bus_fmts,
+ .atomic_get_output_bus_fmts = imx_pd_bridge_atomic_get_output_bus_fmts,
+};
+
+static int imx_pd_bind(struct device *dev, struct device *master, void *data)
+{
+ struct drm_device *drm = data;
+ struct imx_parallel_display *imxpd = dev_get_drvdata(dev);
+ struct imx_parallel_display_encoder *imxpd_encoder;
+ struct drm_connector *connector;
+ struct drm_encoder *encoder;
+ struct drm_bridge *bridge;
+ int ret;
+
+ imxpd_encoder = drmm_simple_encoder_alloc(drm, struct imx_parallel_display_encoder,
+ encoder, DRM_MODE_ENCODER_NONE);
+ if (IS_ERR(imxpd_encoder))
+ return PTR_ERR(imxpd_encoder);
+
+ encoder = &imxpd_encoder->encoder;
+ bridge = &imxpd->bridge;
+
+ ret = imx_drm_encoder_parse_of(drm, encoder, imxpd->dev->of_node);
+ if (ret)
+ return ret;
+
+ drm_bridge_attach(encoder, bridge, NULL, DRM_BRIDGE_ATTACH_NO_CONNECTOR);
+
+ connector = drm_bridge_connector_init(drm, encoder);
+ if (IS_ERR(connector))
+ return PTR_ERR(connector);
+
+ drm_connector_attach_encoder(connector, encoder);
+
+ return 0;
+}
+
+static const struct component_ops imx_pd_ops = {
+ .bind = imx_pd_bind,
+};
+
+static int imx_pd_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct imx_parallel_display *imxpd;
+ int ret;
+ u32 bus_format = 0;
+ const char *fmt;
+
+ imxpd = devm_drm_bridge_alloc(dev, struct imx_parallel_display, bridge,
+ &imx_pd_bridge_funcs);
+ if (IS_ERR(imxpd))
+ return PTR_ERR(imxpd);
+
+ /* port@1 is the output port */
+ imxpd->next_bridge = devm_drm_of_get_bridge(dev, np, 1, 0);
+ if (imxpd->next_bridge == ERR_PTR(-ENODEV))
+ imxpd->next_bridge = devm_imx_drm_legacy_bridge(dev, np, DRM_MODE_CONNECTOR_DPI);
+ if (IS_ERR(imxpd->next_bridge)) {
+ ret = PTR_ERR(imxpd->next_bridge);
+ return ret;
+ }
+
+ ret = of_property_read_string(np, "interface-pix-fmt", &fmt);
+ if (!ret) {
+ if (!strcmp(fmt, "rgb24"))
+ bus_format = MEDIA_BUS_FMT_RGB888_1X24;
+ else if (!strcmp(fmt, "rgb565"))
+ bus_format = MEDIA_BUS_FMT_RGB565_1X16;
+ else if (!strcmp(fmt, "bgr666"))
+ bus_format = MEDIA_BUS_FMT_RGB666_1X18;
+ else if (!strcmp(fmt, "lvds666"))
+ bus_format = MEDIA_BUS_FMT_RGB666_1X24_CPADHI;
+ }
+ imxpd->bus_format = bus_format;
+
+ imxpd->dev = dev;
+
+ platform_set_drvdata(pdev, imxpd);
+
+ devm_drm_bridge_add(dev, &imxpd->bridge);
+
+ return component_add(dev, &imx_pd_ops);
+}
+
+static void imx_pd_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &imx_pd_ops);
+}
+
+static const struct of_device_id imx_pd_dt_ids[] = {
+ { .compatible = "fsl,imx-parallel-display", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, imx_pd_dt_ids);
+
+static struct platform_driver imx_pd_driver = {
+ .probe = imx_pd_probe,
+ .remove = imx_pd_remove,
+ .driver = {
+ .of_match_table = imx_pd_dt_ids,
+ .name = "imx-parallel-display",
+ },
+};
+
+module_platform_driver(imx_pd_driver);
+
+MODULE_DESCRIPTION("i.MX parallel display driver");
+MODULE_AUTHOR("Sascha Hauer, Pengutronix");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/imx/lcdc/Kconfig b/drivers/gpu/drm/imx/lcdc/Kconfig
new file mode 100644
index 000000000000..75869489b0e6
--- /dev/null
+++ b/drivers/gpu/drm/imx/lcdc/Kconfig
@@ -0,0 +1,10 @@
+config DRM_IMX_LCDC
+ tristate "Freescale i.MX LCDC displays"
+ depends on DRM && (ARCH_MXC || COMPILE_TEST)
+ select DRM_CLIENT_SELECTION
+ select DRM_GEM_DMA_HELPER
+ select DRM_KMS_HELPER
+ select DRM_DISPLAY_HELPER
+ select DRM_BRIDGE_CONNECTOR
+ help
+ Found on i.MX1, i.MX21, i.MX25 and i.MX27.
diff --git a/drivers/gpu/drm/imx/lcdc/Makefile b/drivers/gpu/drm/imx/lcdc/Makefile
new file mode 100644
index 000000000000..e84daa432c2e
--- /dev/null
+++ b/drivers/gpu/drm/imx/lcdc/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_DRM_IMX_LCDC) += imx-lcdc.o
diff --git a/drivers/gpu/drm/imx/lcdc/imx-lcdc.c b/drivers/gpu/drm/imx/lcdc/imx-lcdc.c
new file mode 100644
index 000000000000..e200b40f30fe
--- /dev/null
+++ b/drivers/gpu/drm/imx/lcdc/imx-lcdc.c
@@ -0,0 +1,537 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// SPDX-FileCopyrightText: 2020 Marian Cichy <M.Cichy@pengutronix.de>
+
+#include <drm/clients/drm_client_setup.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_bridge_connector.h>
+#include <drm/drm_damage_helper.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_fbdev_dma.h>
+#include <drm/drm_fb_dma_helper.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
+#include <drm/drm_gem_atomic_helper.h>
+#include <drm/drm_gem_dma_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_of.h>
+#include <drm/drm_print.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
+#include <drm/drm_vblank.h>
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#define IMX21LCDC_LSSAR 0x0000 /* LCDC Screen Start Address Register */
+#define IMX21LCDC_LSR 0x0004 /* LCDC Size Register */
+#define IMX21LCDC_LVPWR 0x0008 /* LCDC Virtual Page Width Register */
+#define IMX21LCDC_LCPR 0x000C /* LCDC Cursor Position Register */
+#define IMX21LCDC_LCWHB 0x0010 /* LCDC Cursor Width Height and Blink Register*/
+#define IMX21LCDC_LCCMR 0x0014 /* LCDC Color Cursor Mapping Register */
+#define IMX21LCDC_LPCR 0x0018 /* LCDC Panel Configuration Register */
+#define IMX21LCDC_LHCR 0x001C /* LCDC Horizontal Configuration Register */
+#define IMX21LCDC_LVCR 0x0020 /* LCDC Vertical Configuration Register */
+#define IMX21LCDC_LPOR 0x0024 /* LCDC Panning Offset Register */
+#define IMX21LCDC_LSCR 0x0028 /* LCDC Sharp Configuration Register */
+#define IMX21LCDC_LPCCR 0x002C /* LCDC PWM Contrast Control Register */
+#define IMX21LCDC_LDCR 0x0030 /* LCDC DMA Control Register */
+#define IMX21LCDC_LRMCR 0x0034 /* LCDC Refresh Mode Control Register */
+#define IMX21LCDC_LICR 0x0038 /* LCDC Interrupt Configuration Register */
+#define IMX21LCDC_LIER 0x003C /* LCDC Interrupt Enable Register */
+#define IMX21LCDC_LISR 0x0040 /* LCDC Interrupt Status Register */
+#define IMX21LCDC_LGWSAR 0x0050 /* LCDC Graphic Window Start Address Register */
+#define IMX21LCDC_LGWSR 0x0054 /* LCDC Graph Window Size Register */
+#define IMX21LCDC_LGWVPWR 0x0058 /* LCDC Graphic Window Virtual Page Width Register */
+#define IMX21LCDC_LGWPOR 0x005C /* LCDC Graphic Window Panning Offset Register */
+#define IMX21LCDC_LGWPR 0x0060 /* LCDC Graphic Window Position Register */
+#define IMX21LCDC_LGWCR 0x0064 /* LCDC Graphic Window Control Register */
+#define IMX21LCDC_LGWDCR 0x0068 /* LCDC Graphic Window DMA Control Register */
+#define IMX21LCDC_LAUSCR 0x0080 /* LCDC AUS Mode Control Register */
+#define IMX21LCDC_LAUSCCR 0x0084 /* LCDC AUS Mode Cursor Control Register */
+#define IMX21LCDC_BGLUT 0x0800 /* Background Lookup Table */
+#define IMX21LCDC_GWLUT 0x0C00 /* Graphic Window Lookup Table */
+
+#define IMX21LCDC_LCPR_CC0 BIT(30) /* Cursor Control Bit 0 */
+#define IMX21LCDC_LCPR_CC1 BIT(31) /* Cursor Control Bit 1 */
+
+/* Values HSYNC, VSYNC and Framesize Register */
+#define IMX21LCDC_LHCR_HWIDTH GENMASK(31, 26)
+#define IMX21LCDC_LHCR_HFPORCH GENMASK(15, 8) /* H_WAIT_1 in the i.MX25 Reference manual */
+#define IMX21LCDC_LHCR_HBPORCH GENMASK(7, 0) /* H_WAIT_2 in the i.MX25 Reference manual */
+
+#define IMX21LCDC_LVCR_VWIDTH GENMASK(31, 26)
+#define IMX21LCDC_LVCR_VFPORCH GENMASK(15, 8) /* V_WAIT_1 in the i.MX25 Reference manual */
+#define IMX21LCDC_LVCR_VBPORCH GENMASK(7, 0) /* V_WAIT_2 in the i.MX25 Reference manual */
+
+#define IMX21LCDC_LSR_XMAX GENMASK(25, 20)
+#define IMX21LCDC_LSR_YMAX GENMASK(9, 0)
+
+/* Values for LPCR Register */
+#define IMX21LCDC_LPCR_PCD GENMASK(5, 0)
+#define IMX21LCDC_LPCR_SHARP BIT(6)
+#define IMX21LCDC_LPCR_SCLKSEL BIT(7)
+#define IMX21LCDC_LPCR_ACD GENMASK(14, 8)
+#define IMX21LCDC_LPCR_ACDSEL BIT(15)
+#define IMX21LCDC_LPCR_REV_VS BIT(16)
+#define IMX21LCDC_LPCR_SWAP_SEL BIT(17)
+#define IMX21LCDC_LPCR_END_SEL BIT(18)
+#define IMX21LCDC_LPCR_SCLKIDLE BIT(19)
+#define IMX21LCDC_LPCR_OEPOL BIT(20)
+#define IMX21LCDC_LPCR_CLKPOL BIT(21)
+#define IMX21LCDC_LPCR_LPPOL BIT(22)
+#define IMX21LCDC_LPCR_FLMPOL BIT(23)
+#define IMX21LCDC_LPCR_PIXPOL BIT(24)
+#define IMX21LCDC_LPCR_BPIX GENMASK(27, 25)
+#define IMX21LCDC_LPCR_PBSIZ GENMASK(29, 28)
+#define IMX21LCDC_LPCR_COLOR BIT(30)
+#define IMX21LCDC_LPCR_TFT BIT(31)
+
+#define INTR_EOF BIT(1) /* VBLANK Interrupt Bit */
+
+#define BPP_RGB565 0x05
+#define BPP_XRGB8888 0x07
+
+#define LCDC_MIN_XRES 64
+#define LCDC_MIN_YRES 64
+
+#define LCDC_MAX_XRES 1024
+#define LCDC_MAX_YRES 1024
+
+struct imx_lcdc {
+ struct drm_device drm;
+ struct drm_simple_display_pipe pipe;
+ struct drm_connector *connector;
+ void __iomem *base;
+
+ struct clk *clk_ipg;
+ struct clk *clk_ahb;
+ struct clk *clk_per;
+};
+
+static const u32 imx_lcdc_formats[] = {
+ DRM_FORMAT_RGB565, DRM_FORMAT_XRGB8888,
+};
+
+static inline struct imx_lcdc *imx_lcdc_from_drmdev(struct drm_device *drm)
+{
+ return container_of(drm, struct imx_lcdc, drm);
+}
+
+static unsigned int imx_lcdc_get_format(unsigned int drm_format)
+{
+ switch (drm_format) {
+ default:
+ DRM_WARN("Format not supported - fallback to XRGB8888\n");
+ fallthrough;
+
+ case DRM_FORMAT_XRGB8888:
+ return BPP_XRGB8888;
+
+ case DRM_FORMAT_RGB565:
+ return BPP_RGB565;
+ }
+}
+
+static void imx_lcdc_update_hw_registers(struct drm_simple_display_pipe *pipe,
+ struct drm_plane_state *old_state,
+ bool mode_set)
+{
+ struct drm_crtc *crtc = &pipe->crtc;
+ struct drm_plane_state *new_state = pipe->plane.state;
+ struct drm_framebuffer *fb = new_state->fb;
+ struct imx_lcdc *lcdc = imx_lcdc_from_drmdev(pipe->crtc.dev);
+ u32 lpcr, lvcr, lhcr;
+ u32 framesize;
+ dma_addr_t addr;
+
+ addr = drm_fb_dma_get_gem_addr(fb, new_state, 0);
+ /* The LSSAR register specifies the LCD screen start address (SSA). */
+ writel(addr, lcdc->base + IMX21LCDC_LSSAR);
+
+ if (!mode_set)
+ return;
+
+ /* Disable PER clock to make register write possible */
+ if (old_state && old_state->crtc && old_state->crtc->enabled)
+ clk_disable_unprepare(lcdc->clk_per);
+
+ /* Framesize */
+ framesize = FIELD_PREP(IMX21LCDC_LSR_XMAX, crtc->mode.hdisplay >> 4) |
+ FIELD_PREP(IMX21LCDC_LSR_YMAX, crtc->mode.vdisplay);
+ writel(framesize, lcdc->base + IMX21LCDC_LSR);
+
+ /* HSYNC */
+ lhcr = FIELD_PREP(IMX21LCDC_LHCR_HFPORCH, crtc->mode.hsync_start - crtc->mode.hdisplay - 1) |
+ FIELD_PREP(IMX21LCDC_LHCR_HWIDTH, crtc->mode.hsync_end - crtc->mode.hsync_start - 1) |
+ FIELD_PREP(IMX21LCDC_LHCR_HBPORCH, crtc->mode.htotal - crtc->mode.hsync_end - 3);
+ writel(lhcr, lcdc->base + IMX21LCDC_LHCR);
+
+ /* VSYNC */
+ lvcr = FIELD_PREP(IMX21LCDC_LVCR_VFPORCH, crtc->mode.vsync_start - crtc->mode.vdisplay) |
+ FIELD_PREP(IMX21LCDC_LVCR_VWIDTH, crtc->mode.vsync_end - crtc->mode.vsync_start) |
+ FIELD_PREP(IMX21LCDC_LVCR_VBPORCH, crtc->mode.vtotal - crtc->mode.vsync_end);
+ writel(lvcr, lcdc->base + IMX21LCDC_LVCR);
+
+ lpcr = readl(lcdc->base + IMX21LCDC_LPCR);
+ lpcr &= ~IMX21LCDC_LPCR_BPIX;
+ lpcr |= FIELD_PREP(IMX21LCDC_LPCR_BPIX, imx_lcdc_get_format(fb->format->format));
+ writel(lpcr, lcdc->base + IMX21LCDC_LPCR);
+
+ /* Virtual Page Width */
+ writel(new_state->fb->pitches[0] / 4, lcdc->base + IMX21LCDC_LVPWR);
+
+ /* Enable PER clock */
+ if (new_state->crtc->enabled)
+ clk_prepare_enable(lcdc->clk_per);
+}
+
+static void imx_lcdc_pipe_enable(struct drm_simple_display_pipe *pipe,
+ struct drm_crtc_state *crtc_state,
+ struct drm_plane_state *plane_state)
+{
+ int ret;
+ int clk_div;
+ int bpp;
+ struct imx_lcdc *lcdc = imx_lcdc_from_drmdev(pipe->crtc.dev);
+ struct drm_display_mode *mode = &pipe->crtc.mode;
+ struct drm_display_info *disp_info = &lcdc->connector->display_info;
+ const int hsync_pol = (mode->flags & DRM_MODE_FLAG_PHSYNC) ? 0 : 1;
+ const int vsync_pol = (mode->flags & DRM_MODE_FLAG_PVSYNC) ? 0 : 1;
+ const int data_enable_pol =
+ (disp_info->bus_flags & DRM_BUS_FLAG_DE_HIGH) ? 0 : 1;
+ const int clk_pol =
+ (disp_info->bus_flags & DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE) ? 0 : 1;
+
+ clk_div = DIV_ROUND_CLOSEST_ULL(clk_get_rate(lcdc->clk_per),
+ mode->clock * 1000);
+ bpp = imx_lcdc_get_format(plane_state->fb->format->format);
+
+ writel(FIELD_PREP(IMX21LCDC_LPCR_PCD, clk_div - 1) |
+ FIELD_PREP(IMX21LCDC_LPCR_LPPOL, hsync_pol) |
+ FIELD_PREP(IMX21LCDC_LPCR_FLMPOL, vsync_pol) |
+ FIELD_PREP(IMX21LCDC_LPCR_OEPOL, data_enable_pol) |
+ FIELD_PREP(IMX21LCDC_LPCR_TFT, 1) |
+ FIELD_PREP(IMX21LCDC_LPCR_COLOR, 1) |
+ FIELD_PREP(IMX21LCDC_LPCR_PBSIZ, 3) |
+ FIELD_PREP(IMX21LCDC_LPCR_BPIX, bpp) |
+ FIELD_PREP(IMX21LCDC_LPCR_SCLKSEL, 1) |
+ FIELD_PREP(IMX21LCDC_LPCR_PIXPOL, 0) |
+ FIELD_PREP(IMX21LCDC_LPCR_CLKPOL, clk_pol),
+ lcdc->base + IMX21LCDC_LPCR);
+
+ /* 0px panning offset */
+ writel(0x00000000, lcdc->base + IMX21LCDC_LPOR);
+
+ /* disable hardware cursor */
+ writel(readl(lcdc->base + IMX21LCDC_LCPR) & ~(IMX21LCDC_LCPR_CC0 | IMX21LCDC_LCPR_CC1),
+ lcdc->base + IMX21LCDC_LCPR);
+
+ ret = clk_prepare_enable(lcdc->clk_ipg);
+ if (ret) {
+ dev_err(pipe->crtc.dev->dev, "Cannot enable ipg clock: %pe\n", ERR_PTR(ret));
+ return;
+ }
+ ret = clk_prepare_enable(lcdc->clk_ahb);
+ if (ret) {
+ dev_err(pipe->crtc.dev->dev, "Cannot enable ahb clock: %pe\n", ERR_PTR(ret));
+
+ clk_disable_unprepare(lcdc->clk_ipg);
+
+ return;
+ }
+
+ imx_lcdc_update_hw_registers(pipe, NULL, true);
+
+ /* Enable VBLANK Interrupt */
+ writel(INTR_EOF, lcdc->base + IMX21LCDC_LIER);
+}
+
+static void imx_lcdc_pipe_disable(struct drm_simple_display_pipe *pipe)
+{
+ struct imx_lcdc *lcdc = imx_lcdc_from_drmdev(pipe->crtc.dev);
+ struct drm_crtc *crtc = &lcdc->pipe.crtc;
+ struct drm_pending_vblank_event *event;
+
+ clk_disable_unprepare(lcdc->clk_ahb);
+ clk_disable_unprepare(lcdc->clk_ipg);
+
+ if (pipe->crtc.enabled)
+ clk_disable_unprepare(lcdc->clk_per);
+
+ spin_lock_irq(&lcdc->drm.event_lock);
+ event = crtc->state->event;
+ if (event) {
+ crtc->state->event = NULL;
+ drm_crtc_send_vblank_event(crtc, event);
+ }
+ spin_unlock_irq(&lcdc->drm.event_lock);
+
+ /* Disable VBLANK Interrupt */
+ writel(0, lcdc->base + IMX21LCDC_LIER);
+}
+
+static int imx_lcdc_pipe_check(struct drm_simple_display_pipe *pipe,
+ struct drm_plane_state *plane_state,
+ struct drm_crtc_state *crtc_state)
+{
+ const struct drm_display_mode *mode = &crtc_state->mode;
+ const struct drm_display_mode *old_mode = &pipe->crtc.state->mode;
+
+ if (mode->hdisplay < LCDC_MIN_XRES || mode->hdisplay > LCDC_MAX_XRES ||
+ mode->vdisplay < LCDC_MIN_YRES || mode->vdisplay > LCDC_MAX_YRES ||
+ mode->hdisplay % 0x10) { /* must be multiple of 16 */
+ drm_err(pipe->crtc.dev, "unsupported display mode (%u x %u)\n",
+ mode->hdisplay, mode->vdisplay);
+ return -EINVAL;
+ }
+
+ crtc_state->mode_changed =
+ old_mode->hdisplay != mode->hdisplay ||
+ old_mode->vdisplay != mode->vdisplay;
+
+ return 0;
+}
+
+static void imx_lcdc_pipe_update(struct drm_simple_display_pipe *pipe,
+ struct drm_plane_state *old_state)
+{
+ struct drm_crtc *crtc = &pipe->crtc;
+ struct drm_pending_vblank_event *event = crtc->state->event;
+ struct drm_plane_state *new_state = pipe->plane.state;
+ struct drm_framebuffer *fb = new_state->fb;
+ struct drm_framebuffer *old_fb = old_state->fb;
+ struct drm_crtc *old_crtc = old_state->crtc;
+ bool mode_changed = false;
+
+ if (old_fb && old_fb->format != fb->format)
+ mode_changed = true;
+ else if (old_crtc != crtc)
+ mode_changed = true;
+
+ imx_lcdc_update_hw_registers(pipe, old_state, mode_changed);
+
+ if (event) {
+ crtc->state->event = NULL;
+
+ spin_lock_irq(&crtc->dev->event_lock);
+
+ if (crtc->state->active && drm_crtc_vblank_get(crtc) == 0)
+ drm_crtc_arm_vblank_event(crtc, event);
+ else
+ drm_crtc_send_vblank_event(crtc, event);
+
+ spin_unlock_irq(&crtc->dev->event_lock);
+ }
+}
+
+static const struct drm_simple_display_pipe_funcs imx_lcdc_pipe_funcs = {
+ .enable = imx_lcdc_pipe_enable,
+ .disable = imx_lcdc_pipe_disable,
+ .check = imx_lcdc_pipe_check,
+ .update = imx_lcdc_pipe_update,
+};
+
+static const struct drm_mode_config_funcs imx_lcdc_mode_config_funcs = {
+ .fb_create = drm_gem_fb_create_with_dirty,
+ .atomic_check = drm_atomic_helper_check,
+ .atomic_commit = drm_atomic_helper_commit,
+};
+
+static const struct drm_mode_config_helper_funcs imx_lcdc_mode_config_helpers = {
+ .atomic_commit_tail = drm_atomic_helper_commit_tail_rpm,
+};
+
+DEFINE_DRM_GEM_DMA_FOPS(imx_lcdc_drm_fops);
+
+static struct drm_driver imx_lcdc_drm_driver = {
+ .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
+ .fops = &imx_lcdc_drm_fops,
+ DRM_GEM_DMA_DRIVER_OPS_VMAP,
+ DRM_FBDEV_DMA_DRIVER_OPS,
+ .name = "imx-lcdc",
+ .desc = "i.MX LCDC driver",
+};
+
+static const struct of_device_id imx_lcdc_of_dev_id[] = {
+ {
+ .compatible = "fsl,imx21-lcdc",
+ },
+ {
+ .compatible = "fsl,imx25-lcdc",
+ },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, imx_lcdc_of_dev_id);
+
+static irqreturn_t imx_lcdc_irq_handler(int irq, void *arg)
+{
+ struct imx_lcdc *lcdc = arg;
+ struct drm_crtc *crtc = &lcdc->pipe.crtc;
+ unsigned int status;
+
+ status = readl(lcdc->base + IMX21LCDC_LISR);
+
+ if (status & INTR_EOF) {
+ drm_crtc_handle_vblank(crtc);
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+static int imx_lcdc_probe(struct platform_device *pdev)
+{
+ struct imx_lcdc *lcdc;
+ struct drm_device *drm;
+ struct drm_bridge *bridge;
+ int irq;
+ int ret;
+ struct device *dev = &pdev->dev;
+
+ lcdc = devm_drm_dev_alloc(dev, &imx_lcdc_drm_driver,
+ struct imx_lcdc, drm);
+ if (IS_ERR(lcdc))
+ return PTR_ERR(lcdc);
+
+ drm = &lcdc->drm;
+
+ lcdc->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(lcdc->base))
+ return dev_err_probe(dev, PTR_ERR(lcdc->base), "Cannot get IO memory\n");
+
+ bridge = devm_drm_of_get_bridge(dev, dev->of_node, 0, 0);
+ if (IS_ERR(bridge))
+ return dev_err_probe(dev, PTR_ERR(bridge), "Failed to find bridge\n");
+
+ /* Get Clocks */
+ lcdc->clk_ipg = devm_clk_get(dev, "ipg");
+ if (IS_ERR(lcdc->clk_ipg))
+ return dev_err_probe(dev, PTR_ERR(lcdc->clk_ipg), "Failed to get %s clk\n", "ipg");
+
+ lcdc->clk_ahb = devm_clk_get(dev, "ahb");
+ if (IS_ERR(lcdc->clk_ahb))
+ return dev_err_probe(dev, PTR_ERR(lcdc->clk_ahb), "Failed to get %s clk\n", "ahb");
+
+ lcdc->clk_per = devm_clk_get(dev, "per");
+ if (IS_ERR(lcdc->clk_per))
+ return dev_err_probe(dev, PTR_ERR(lcdc->clk_per), "Failed to get %s clk\n", "per");
+
+ ret = dma_set_mask_and_coherent(drm->dev, DMA_BIT_MASK(32));
+ if (ret)
+ return dev_err_probe(dev, ret, "Cannot set DMA Mask\n");
+
+ /* Modeset init */
+ ret = drmm_mode_config_init(drm);
+ if (ret)
+ return dev_err_probe(dev, ret, "Cannot initialize mode configuration structure\n");
+
+ /* CRTC, Plane, Encoder */
+ ret = drm_simple_display_pipe_init(drm, &lcdc->pipe,
+ &imx_lcdc_pipe_funcs,
+ imx_lcdc_formats,
+ ARRAY_SIZE(imx_lcdc_formats), NULL, NULL);
+ if (ret < 0)
+ return dev_err_probe(drm->dev, ret, "Cannot setup simple display pipe\n");
+
+ ret = drm_vblank_init(drm, drm->mode_config.num_crtc);
+ if (ret < 0)
+ return dev_err_probe(drm->dev, ret, "Failed to initialize vblank\n");
+
+ ret = drm_bridge_attach(&lcdc->pipe.encoder, bridge, NULL, DRM_BRIDGE_ATTACH_NO_CONNECTOR);
+ if (ret)
+ return dev_err_probe(drm->dev, ret, "Cannot attach bridge\n");
+
+ lcdc->connector = drm_bridge_connector_init(drm, &lcdc->pipe.encoder);
+ if (IS_ERR(lcdc->connector))
+ return dev_err_probe(drm->dev, PTR_ERR(lcdc->connector), "Cannot init bridge connector\n");
+
+ drm_connector_attach_encoder(lcdc->connector, &lcdc->pipe.encoder);
+
+ /*
+ * The LCDC controller does not have an enable bit. The
+ * controller starts directly when the clocks are enabled.
+ * If the clocks are enabled when the controller is not yet
+ * programmed with proper register values (enabled at the
+ * bootloader, for example) then it just goes into some undefined
+ * state.
+ * To avoid this issue, let's enable and disable LCDC IPG,
+ * PER and AHB clock so that we force some kind of 'reset'
+ * to the LCDC block.
+ */
+
+ ret = clk_prepare_enable(lcdc->clk_ipg);
+ if (ret)
+ return dev_err_probe(dev, ret, "Cannot enable ipg clock\n");
+ clk_disable_unprepare(lcdc->clk_ipg);
+
+ ret = clk_prepare_enable(lcdc->clk_per);
+ if (ret)
+ return dev_err_probe(dev, ret, "Cannot enable per clock\n");
+ clk_disable_unprepare(lcdc->clk_per);
+
+ ret = clk_prepare_enable(lcdc->clk_ahb);
+ if (ret)
+ return dev_err_probe(dev, ret, "Cannot enable ahb clock\n");
+ clk_disable_unprepare(lcdc->clk_ahb);
+
+ drm->mode_config.min_width = LCDC_MIN_XRES;
+ drm->mode_config.max_width = LCDC_MAX_XRES;
+ drm->mode_config.min_height = LCDC_MIN_YRES;
+ drm->mode_config.max_height = LCDC_MAX_YRES;
+ drm->mode_config.preferred_depth = 16;
+ drm->mode_config.funcs = &imx_lcdc_mode_config_funcs;
+ drm->mode_config.helper_private = &imx_lcdc_mode_config_helpers;
+
+ drm_mode_config_reset(drm);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ ret = irq;
+ return ret;
+ }
+
+ ret = devm_request_irq(dev, irq, imx_lcdc_irq_handler, 0, "imx-lcdc", lcdc);
+ if (ret < 0)
+ return dev_err_probe(drm->dev, ret, "Failed to install IRQ handler\n");
+
+ platform_set_drvdata(pdev, drm);
+
+ ret = drm_dev_register(&lcdc->drm, 0);
+ if (ret)
+ return dev_err_probe(dev, ret, "Cannot register device\n");
+
+ drm_client_setup(drm, NULL);
+
+ return 0;
+}
+
+static void imx_lcdc_remove(struct platform_device *pdev)
+{
+ struct drm_device *drm = platform_get_drvdata(pdev);
+
+ drm_dev_unregister(drm);
+ drm_atomic_helper_shutdown(drm);
+}
+
+static void imx_lcdc_shutdown(struct platform_device *pdev)
+{
+ drm_atomic_helper_shutdown(platform_get_drvdata(pdev));
+}
+
+static struct platform_driver imx_lcdc_driver = {
+ .driver = {
+ .name = "imx-lcdc",
+ .of_match_table = imx_lcdc_of_dev_id,
+ },
+ .probe = imx_lcdc_probe,
+ .remove = imx_lcdc_remove,
+ .shutdown = imx_lcdc_shutdown,
+};
+module_platform_driver(imx_lcdc_driver);
+
+MODULE_AUTHOR("Marian Cichy <M.Cichy@pengutronix.de>");
+MODULE_DESCRIPTION("Freescale i.MX LCDC driver");
+MODULE_LICENSE("GPL");