diff options
Diffstat (limited to 'drivers/gpu/drm/msm/dsi')
| -rw-r--r-- | drivers/gpu/drm/msm/dsi/dsi.c | 269 | ||||
| -rw-r--r-- | drivers/gpu/drm/msm/dsi/dsi.h | 155 | ||||
| -rw-r--r-- | drivers/gpu/drm/msm/dsi/dsi_cfg.c | 351 | ||||
| -rw-r--r-- | drivers/gpu/drm/msm/dsi/dsi_cfg.h | 74 | ||||
| -rw-r--r-- | drivers/gpu/drm/msm/dsi/dsi_host.c | 2648 | ||||
| -rw-r--r-- | drivers/gpu/drm/msm/dsi/dsi_manager.c | 620 | ||||
| -rw-r--r-- | drivers/gpu/drm/msm/dsi/phy/dsi_phy.c | 866 | ||||
| -rw-r--r-- | drivers/gpu/drm/msm/dsi/phy/dsi_phy.h | 139 | ||||
| -rw-r--r-- | drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c | 1036 | ||||
| -rw-r--r-- | drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c | 1121 | ||||
| -rw-r--r-- | drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c | 147 | ||||
| -rw-r--r-- | drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c | 956 | ||||
| -rw-r--r-- | drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c | 648 | ||||
| -rw-r--r-- | drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c | 1506 |
14 files changed, 10536 insertions, 0 deletions
diff --git a/drivers/gpu/drm/msm/dsi/dsi.c b/drivers/gpu/drm/msm/dsi/dsi.c new file mode 100644 index 000000000000..d8bb40ef820e --- /dev/null +++ b/drivers/gpu/drm/msm/dsi/dsi.c @@ -0,0 +1,269 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2015, The Linux Foundation. All rights reserved. + */ + +#include "dsi.h" + +bool msm_dsi_is_cmd_mode(struct msm_dsi *msm_dsi) +{ + unsigned long host_flags = msm_dsi_host_get_mode_flags(msm_dsi->host); + + return !(host_flags & MIPI_DSI_MODE_VIDEO); +} + +struct drm_dsc_config *msm_dsi_get_dsc_config(struct msm_dsi *msm_dsi) +{ + return msm_dsi_host_get_dsc_config(msm_dsi->host); +} + +bool msm_dsi_wide_bus_enabled(struct msm_dsi *msm_dsi) +{ + return msm_dsi_host_is_wide_bus_enabled(msm_dsi->host); +} + +static int dsi_get_phy(struct msm_dsi *msm_dsi) +{ + struct platform_device *pdev = msm_dsi->pdev; + struct platform_device *phy_pdev; + struct device_node *phy_node; + + phy_node = of_parse_phandle(pdev->dev.of_node, "phys", 0); + if (!phy_node) { + DRM_DEV_ERROR(&pdev->dev, "cannot find phy device\n"); + return -ENXIO; + } + + phy_pdev = of_find_device_by_node(phy_node); + if (phy_pdev) { + msm_dsi->phy = platform_get_drvdata(phy_pdev); + msm_dsi->phy_dev = &phy_pdev->dev; + } + + of_node_put(phy_node); + + if (!phy_pdev) { + DRM_DEV_ERROR(&pdev->dev, "%s: phy driver is not ready\n", __func__); + return -EPROBE_DEFER; + } + if (!msm_dsi->phy) { + put_device(&phy_pdev->dev); + DRM_DEV_ERROR(&pdev->dev, "%s: phy driver is not ready\n", __func__); + return -EPROBE_DEFER; + } + + return 0; +} + +static void dsi_destroy(struct msm_dsi *msm_dsi) +{ + if (!msm_dsi) + return; + + msm_dsi_manager_unregister(msm_dsi); + + if (msm_dsi->phy_dev) { + put_device(msm_dsi->phy_dev); + msm_dsi->phy = NULL; + msm_dsi->phy_dev = NULL; + } + + if (msm_dsi->host) { + msm_dsi_host_destroy(msm_dsi->host); + msm_dsi->host = NULL; + } + + platform_set_drvdata(msm_dsi->pdev, NULL); +} + +static struct msm_dsi *dsi_init(struct platform_device *pdev) +{ + struct msm_dsi *msm_dsi; + int ret; + + if (!pdev) + return ERR_PTR(-ENXIO); + + msm_dsi = devm_kzalloc(&pdev->dev, sizeof(*msm_dsi), GFP_KERNEL); + if (!msm_dsi) + return ERR_PTR(-ENOMEM); + DBG("dsi probed=%p", msm_dsi); + + msm_dsi->id = -1; + msm_dsi->pdev = pdev; + platform_set_drvdata(pdev, msm_dsi); + + /* Init dsi host */ + ret = msm_dsi_host_init(msm_dsi); + if (ret) + goto destroy_dsi; + + /* GET dsi PHY */ + ret = dsi_get_phy(msm_dsi); + if (ret) + goto destroy_dsi; + + /* Register to dsi manager */ + ret = msm_dsi_manager_register(msm_dsi); + if (ret) + goto destroy_dsi; + + return msm_dsi; + +destroy_dsi: + dsi_destroy(msm_dsi); + return ERR_PTR(ret); +} + +static int dsi_bind(struct device *dev, struct device *master, void *data) +{ + struct msm_drm_private *priv = dev_get_drvdata(master); + struct msm_dsi *msm_dsi = dev_get_drvdata(dev); + + /* + * Next bridge doesn't exist for the secondary DSI host in a bonded + * pair. + */ + if (!msm_dsi_is_bonded_dsi(msm_dsi) || + msm_dsi_is_master_dsi(msm_dsi)) { + struct drm_bridge *ext_bridge; + + ext_bridge = devm_drm_of_get_bridge(&msm_dsi->pdev->dev, + msm_dsi->pdev->dev.of_node, 1, 0); + if (IS_ERR(ext_bridge)) + return PTR_ERR(ext_bridge); + + msm_dsi->next_bridge = ext_bridge; + } + + priv->kms->dsi[msm_dsi->id] = msm_dsi; + + return 0; +} + +static void dsi_unbind(struct device *dev, struct device *master, + void *data) +{ + struct msm_drm_private *priv = dev_get_drvdata(master); + struct msm_dsi *msm_dsi = dev_get_drvdata(dev); + + msm_dsi_tx_buf_free(msm_dsi->host); + priv->kms->dsi[msm_dsi->id] = NULL; +} + +static const struct component_ops dsi_ops = { + .bind = dsi_bind, + .unbind = dsi_unbind, +}; + +int dsi_dev_attach(struct platform_device *pdev) +{ + return component_add(&pdev->dev, &dsi_ops); +} + +void dsi_dev_detach(struct platform_device *pdev) +{ + component_del(&pdev->dev, &dsi_ops); +} + +static int dsi_dev_probe(struct platform_device *pdev) +{ + struct msm_dsi *msm_dsi; + + DBG(""); + msm_dsi = dsi_init(pdev); + if (IS_ERR(msm_dsi)) { + /* Don't fail the bind if the dsi port is not connected */ + if (PTR_ERR(msm_dsi) == -ENODEV) + return 0; + else + return PTR_ERR(msm_dsi); + } + + return 0; +} + +static void dsi_dev_remove(struct platform_device *pdev) +{ + struct msm_dsi *msm_dsi = platform_get_drvdata(pdev); + + DBG(""); + dsi_destroy(msm_dsi); +} + +static const struct of_device_id dt_match[] = { + { .compatible = "qcom,mdss-dsi-ctrl" }, + + /* Deprecated, don't use */ + { .compatible = "qcom,dsi-ctrl-6g-qcm2290" }, + {} +}; + +static const struct dev_pm_ops dsi_pm_ops = { + SET_RUNTIME_PM_OPS(msm_dsi_runtime_suspend, msm_dsi_runtime_resume, NULL) + SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, + pm_runtime_force_resume) +}; + +static struct platform_driver dsi_driver = { + .probe = dsi_dev_probe, + .remove = dsi_dev_remove, + .driver = { + .name = "msm_dsi", + .of_match_table = dt_match, + .pm = &dsi_pm_ops, + }, +}; + +void __init msm_dsi_register(void) +{ + DBG(""); + msm_dsi_phy_driver_register(); + platform_driver_register(&dsi_driver); +} + +void __exit msm_dsi_unregister(void) +{ + DBG(""); + msm_dsi_phy_driver_unregister(); + platform_driver_unregister(&dsi_driver); +} + +int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev, + struct drm_encoder *encoder) +{ + int ret; + + msm_dsi->dev = dev; + + ret = msm_dsi_host_modeset_init(msm_dsi->host, dev); + if (ret) { + DRM_DEV_ERROR(dev->dev, "failed to modeset init host: %d\n", ret); + return ret; + } + + if (msm_dsi_is_bonded_dsi(msm_dsi) && + !msm_dsi_is_master_dsi(msm_dsi)) { + /* + * Do not return an eror here, + * Just skip creating encoder/connector for the slave-DSI. + */ + return 0; + } + + ret = msm_dsi_manager_connector_init(msm_dsi, encoder); + if (ret) { + DRM_DEV_ERROR(dev->dev, + "failed to create dsi connector: %d\n", ret); + return ret; + } + + return 0; +} + +void msm_dsi_snapshot(struct msm_disp_state *disp_state, struct msm_dsi *msm_dsi) +{ + msm_dsi_host_snapshot(disp_state, msm_dsi->host); + msm_dsi_phy_snapshot(disp_state, msm_dsi->phy); +} + diff --git a/drivers/gpu/drm/msm/dsi/dsi.h b/drivers/gpu/drm/msm/dsi/dsi.h new file mode 100644 index 000000000000..93c028a122f3 --- /dev/null +++ b/drivers/gpu/drm/msm/dsi/dsi.h @@ -0,0 +1,155 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2015, The Linux Foundation. All rights reserved. + */ + +#ifndef __DSI_CONNECTOR_H__ +#define __DSI_CONNECTOR_H__ + +#include <linux/of_platform.h> +#include <linux/platform_device.h> + +#include <drm/drm_bridge.h> +#include <drm/drm_crtc.h> +#include <drm/drm_mipi_dsi.h> + +#include "msm_drv.h" +#include "disp/msm_disp_snapshot.h" + +#define DSI_0 0 +#define DSI_1 1 +#define DSI_MAX 2 + +struct msm_dsi_phy_shared_timings; +struct msm_dsi_phy_clk_request; + +enum msm_dsi_phy_usecase { + MSM_DSI_PHY_STANDALONE, + MSM_DSI_PHY_MASTER, + MSM_DSI_PHY_SLAVE, +}; + +#define DSI_BUS_CLK_MAX 4 + +struct msm_dsi { + struct drm_device *dev; + struct platform_device *pdev; + + struct mipi_dsi_host *host; + struct msm_dsi_phy *phy; + const char *te_source; + + struct drm_bridge *next_bridge; + + struct device *phy_dev; + bool phy_enabled; + + int id; +}; + +/* dsi manager */ +int msm_dsi_manager_connector_init(struct msm_dsi *msm_dsi, + struct drm_encoder *encoder); +int msm_dsi_manager_cmd_xfer(int id, const struct mipi_dsi_msg *msg); +bool msm_dsi_manager_cmd_xfer_trigger(int id, u32 dma_base, u32 len); +int msm_dsi_manager_register(struct msm_dsi *msm_dsi); +void msm_dsi_manager_unregister(struct msm_dsi *msm_dsi); +void msm_dsi_manager_tpg_enable(void); + +/* dsi host */ +struct msm_dsi_host; +int msm_dsi_host_xfer_prepare(struct mipi_dsi_host *host, + const struct mipi_dsi_msg *msg); +void msm_dsi_host_xfer_restore(struct mipi_dsi_host *host, + const struct mipi_dsi_msg *msg); +int msm_dsi_host_cmd_tx(struct mipi_dsi_host *host, + const struct mipi_dsi_msg *msg); +int msm_dsi_host_cmd_rx(struct mipi_dsi_host *host, + const struct mipi_dsi_msg *msg); +void msm_dsi_host_cmd_xfer_commit(struct mipi_dsi_host *host, + u32 dma_base, u32 len); +int msm_dsi_host_enable(struct mipi_dsi_host *host); +int msm_dsi_host_disable(struct mipi_dsi_host *host); +void msm_dsi_host_enable_irq(struct mipi_dsi_host *host); +void msm_dsi_host_disable_irq(struct mipi_dsi_host *host); +int msm_dsi_host_power_on(struct mipi_dsi_host *host, + struct msm_dsi_phy_shared_timings *phy_shared_timings, + bool is_bonded_dsi, struct msm_dsi_phy *phy); +int msm_dsi_host_power_off(struct mipi_dsi_host *host); +int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host, + const struct drm_display_mode *mode); +enum drm_mode_status msm_dsi_host_check_dsc(struct mipi_dsi_host *host, + const struct drm_display_mode *mode); +unsigned long msm_dsi_host_get_mode_flags(struct mipi_dsi_host *host); +int msm_dsi_host_register(struct mipi_dsi_host *host); +void msm_dsi_host_unregister(struct mipi_dsi_host *host); +void msm_dsi_host_set_phy_mode(struct mipi_dsi_host *host, + struct msm_dsi_phy *src_phy); +int msm_dsi_host_set_src_pll(struct mipi_dsi_host *host, + struct msm_dsi_phy *src_phy); +void msm_dsi_host_reset_phy(struct mipi_dsi_host *host); +void msm_dsi_host_get_phy_clk_req(struct mipi_dsi_host *host, + struct msm_dsi_phy_clk_request *clk_req, + bool is_bonded_dsi); +void msm_dsi_host_destroy(struct mipi_dsi_host *host); +int msm_dsi_host_modeset_init(struct mipi_dsi_host *host, + struct drm_device *dev); +int msm_dsi_host_init(struct msm_dsi *msm_dsi); +int msm_dsi_runtime_suspend(struct device *dev); +int msm_dsi_runtime_resume(struct device *dev); +int dsi_link_clk_set_rate_6g(struct msm_dsi_host *msm_host); +int dsi_link_clk_set_rate_6g_v2_9(struct msm_dsi_host *msm_host); +int dsi_link_clk_set_rate_v2(struct msm_dsi_host *msm_host); +int dsi_link_clk_enable_6g(struct msm_dsi_host *msm_host); +int dsi_link_clk_enable_v2(struct msm_dsi_host *msm_host); +void dsi_link_clk_disable_6g(struct msm_dsi_host *msm_host); +void dsi_link_clk_disable_v2(struct msm_dsi_host *msm_host); +unsigned long dsi_byte_clk_get_rate(struct mipi_dsi_host *host, bool is_bonded_dsi, + const struct drm_display_mode *mode); +int dsi_tx_buf_alloc_6g(struct msm_dsi_host *msm_host, int size); +int dsi_tx_buf_alloc_v2(struct msm_dsi_host *msm_host, int size); +void *dsi_tx_buf_get_6g(struct msm_dsi_host *msm_host); +void *dsi_tx_buf_get_v2(struct msm_dsi_host *msm_host); +void dsi_tx_buf_put_6g(struct msm_dsi_host *msm_host); +void msm_dsi_tx_buf_free(struct mipi_dsi_host *mipi_host); +int dsi_dma_base_get_6g(struct msm_dsi_host *msm_host, uint64_t *iova); +int dsi_dma_base_get_v2(struct msm_dsi_host *msm_host, uint64_t *iova); +int dsi_clk_init_v2(struct msm_dsi_host *msm_host); +int dsi_clk_init_6g_v2(struct msm_dsi_host *msm_host); +int dsi_clk_init_6g_v2_9(struct msm_dsi_host *msm_host); +int dsi_calc_clk_rate_v2(struct msm_dsi_host *msm_host, bool is_bonded_dsi); +int dsi_calc_clk_rate_6g(struct msm_dsi_host *msm_host, bool is_bonded_dsi); +void msm_dsi_host_snapshot(struct msm_disp_state *disp_state, struct mipi_dsi_host *host); +void msm_dsi_host_test_pattern_en(struct mipi_dsi_host *host); +struct drm_dsc_config *msm_dsi_host_get_dsc_config(struct mipi_dsi_host *host); +bool msm_dsi_host_is_wide_bus_enabled(struct mipi_dsi_host *host); + +/* dsi phy */ +struct msm_dsi_phy; +struct msm_dsi_phy_shared_timings { + u32 clk_post; + u32 clk_pre; + bool clk_pre_inc_by_2; + bool byte_intf_clk_div_2; +}; + +struct msm_dsi_phy_clk_request { + unsigned long bitclk_rate; + unsigned long escclk_rate; +}; + +void msm_dsi_phy_driver_register(void); +void msm_dsi_phy_driver_unregister(void); +int msm_dsi_phy_enable(struct msm_dsi_phy *phy, + struct msm_dsi_phy_clk_request *clk_req, + struct msm_dsi_phy_shared_timings *shared_timings); +void msm_dsi_phy_disable(struct msm_dsi_phy *phy); +void msm_dsi_phy_set_usecase(struct msm_dsi_phy *phy, + enum msm_dsi_phy_usecase uc); +void msm_dsi_phy_pll_save_state(struct msm_dsi_phy *phy); +int msm_dsi_phy_pll_restore_state(struct msm_dsi_phy *phy); +void msm_dsi_phy_snapshot(struct msm_disp_state *disp_state, struct msm_dsi_phy *phy); +bool msm_dsi_phy_set_continuous_clock(struct msm_dsi_phy *phy, bool enable); + +#endif /* __DSI_CONNECTOR_H__ */ + diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.c b/drivers/gpu/drm/msm/dsi/dsi_cfg.c new file mode 100644 index 000000000000..fed8e9b67011 --- /dev/null +++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.c @@ -0,0 +1,351 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2015, The Linux Foundation. All rights reserved. + */ + +#include "dsi_cfg.h" + +static const char * const dsi_v2_bus_clk_names[] = { + "core_mmss", "iface", "bus", +}; + +static const struct regulator_bulk_data apq8064_dsi_regulators[] = { + { .supply = "vdda", .init_load_uA = 100000 }, /* 1.2 V */ + { .supply = "avdd", .init_load_uA = 10000 }, /* 3.0 V */ + { .supply = "vddio", .init_load_uA = 100000 }, /* 1.8 V */ +}; + +static const struct msm_dsi_config apq8064_dsi_cfg = { + .io_offset = 0, + .regulator_data = apq8064_dsi_regulators, + .num_regulators = ARRAY_SIZE(apq8064_dsi_regulators), + .bus_clk_names = dsi_v2_bus_clk_names, + .num_bus_clks = ARRAY_SIZE(dsi_v2_bus_clk_names), + .io_start = { + { 0x4700000, 0x5800000 }, + }, +}; + +static const char * const dsi_6g_bus_clk_names[] = { + "mdp_core", "iface", "bus", "core_mmss", +}; + +static const struct regulator_bulk_data msm8974_apq8084_regulators[] = { + { .supply = "vdd", .init_load_uA = 150000 }, /* 3.0 V */ + { .supply = "vdda", .init_load_uA = 100000 }, /* 1.2 V */ + { .supply = "vddio", .init_load_uA = 100000 }, /* 1.8 V */ +}; + +static const struct msm_dsi_config msm8974_apq8084_dsi_cfg = { + .io_offset = DSI_6G_REG_SHIFT, + .regulator_data = msm8974_apq8084_regulators, + .num_regulators = ARRAY_SIZE(msm8974_apq8084_regulators), + .bus_clk_names = dsi_6g_bus_clk_names, + .num_bus_clks = ARRAY_SIZE(dsi_6g_bus_clk_names), + .io_start = { + { 0xfd922800, 0xfd922b00 }, + }, +}; + +static const char * const dsi_v1_3_1_clk_names[] = { + "mdp_core", "iface", "bus", +}; + +static const struct regulator_bulk_data dsi_v1_3_1_regulators[] = { + { .supply = "vdda", .init_load_uA = 100000 }, /* 1.2 V */ + { .supply = "vddio", .init_load_uA = 100000 }, /* 1.8 V */ +}; + +static const struct msm_dsi_config msm8916_dsi_cfg = { + .io_offset = DSI_6G_REG_SHIFT, + .regulator_data = dsi_v1_3_1_regulators, + .num_regulators = ARRAY_SIZE(dsi_v1_3_1_regulators), + .bus_clk_names = dsi_v1_3_1_clk_names, + .num_bus_clks = ARRAY_SIZE(dsi_v1_3_1_clk_names), + .io_start = { + { 0x1a98000 }, + }, +}; + +static const struct msm_dsi_config msm8976_dsi_cfg = { + .io_offset = DSI_6G_REG_SHIFT, + .regulator_data = dsi_v1_3_1_regulators, + .num_regulators = ARRAY_SIZE(dsi_v1_3_1_regulators), + .bus_clk_names = dsi_v1_3_1_clk_names, + .num_bus_clks = ARRAY_SIZE(dsi_v1_3_1_clk_names), + .io_start = { + { 0x1a94000, 0x1a96000 }, + }, +}; + +static const struct regulator_bulk_data msm8994_dsi_regulators[] = { + { .supply = "vdda", .init_load_uA = 100000 }, /* 1.25 V */ + { .supply = "vddio", .init_load_uA = 100000 }, /* 1.8 V */ + { .supply = "vcca", .init_load_uA = 10000 }, /* 1.0 V */ + { .supply = "vdd", .init_load_uA = 100000 }, /* 1.8 V */ + { .supply = "lab_reg", .init_load_uA = -1 }, + { .supply = "ibb_reg", .init_load_uA = -1 }, +}; + +static const struct msm_dsi_config msm8994_dsi_cfg = { + .io_offset = DSI_6G_REG_SHIFT, + .regulator_data = msm8994_dsi_regulators, + .num_regulators = ARRAY_SIZE(msm8994_dsi_regulators), + .bus_clk_names = dsi_6g_bus_clk_names, + .num_bus_clks = ARRAY_SIZE(dsi_6g_bus_clk_names), + .io_start = { + { 0xfd998000, 0xfd9a0000 }, + }, +}; + +static const struct regulator_bulk_data msm8996_dsi_regulators[] = { + { .supply = "vdda", .init_load_uA = 18160 }, /* 1.25 V */ + { .supply = "vcca", .init_load_uA = 17000 }, /* 0.925 V */ + { .supply = "vddio", .init_load_uA = 100000 }, /* 1.8 V */ +}; + +static const struct msm_dsi_config msm8996_dsi_cfg = { + .io_offset = DSI_6G_REG_SHIFT, + .regulator_data = msm8996_dsi_regulators, + .num_regulators = ARRAY_SIZE(msm8996_dsi_regulators), + .bus_clk_names = dsi_6g_bus_clk_names, + .num_bus_clks = ARRAY_SIZE(dsi_6g_bus_clk_names), + .io_start = { + { 0x994000, 0x996000 }, + }, +}; + +static const char * const dsi_msm8998_bus_clk_names[] = { + "iface", "bus", "core", +}; + +static const struct regulator_bulk_data msm8998_dsi_regulators[] = { + { .supply = "vdd", .init_load_uA = 367000 }, /* 0.9 V */ + { .supply = "vdda", .init_load_uA = 62800 }, /* 1.2 V */ +}; + +static const struct msm_dsi_config msm8998_dsi_cfg = { + .io_offset = DSI_6G_REG_SHIFT, + .regulator_data = msm8998_dsi_regulators, + .num_regulators = ARRAY_SIZE(msm8998_dsi_regulators), + .bus_clk_names = dsi_msm8998_bus_clk_names, + .num_bus_clks = ARRAY_SIZE(dsi_msm8998_bus_clk_names), + .io_start = { + { 0xc994000, 0xc996000 }, + }, +}; + +static const char * const dsi_sdm660_bus_clk_names[] = { + "iface", "bus", "core", "core_mmss", +}; + +static const struct regulator_bulk_data sdm660_dsi_regulators[] = { + { .supply = "vdda", .init_load_uA = 12560 }, /* 1.2 V */ +}; + +static const struct msm_dsi_config sdm660_dsi_cfg = { + .io_offset = DSI_6G_REG_SHIFT, + .regulator_data = sdm660_dsi_regulators, + .num_regulators = ARRAY_SIZE(sdm660_dsi_regulators), + .bus_clk_names = dsi_sdm660_bus_clk_names, + .num_bus_clks = ARRAY_SIZE(dsi_sdm660_bus_clk_names), + .io_start = { + { 0xc994000, 0xc996000 }, + }, +}; + +static const char * const dsi_v2_4_clk_names[] = { + "iface", "bus", +}; + +static const struct regulator_bulk_data dsi_v2_4_regulators[] = { + { .supply = "vdda", .init_load_uA = 21800 }, /* 1.2 V */ + { .supply = "refgen" }, +}; + +static const struct msm_dsi_config sdm845_dsi_cfg = { + .io_offset = DSI_6G_REG_SHIFT, + .regulator_data = dsi_v2_4_regulators, + .num_regulators = ARRAY_SIZE(dsi_v2_4_regulators), + .bus_clk_names = dsi_v2_4_clk_names, + .num_bus_clks = ARRAY_SIZE(dsi_v2_4_clk_names), + .io_start = { + { 0xae94000, 0xae96000 }, /* SDM845 / SDM670 */ + { 0x5e94000 }, /* QCM2290 / SM6115 / SM6125 / SM6375 */ + }, +}; + +static const struct regulator_bulk_data sm8550_dsi_regulators[] = { + { .supply = "vdda", .init_load_uA = 16800 }, /* 1.2 V */ +}; + +static const struct msm_dsi_config sm8550_dsi_cfg = { + .io_offset = DSI_6G_REG_SHIFT, + .regulator_data = sm8550_dsi_regulators, + .num_regulators = ARRAY_SIZE(sm8550_dsi_regulators), + .bus_clk_names = dsi_v2_4_clk_names, + .num_bus_clks = ARRAY_SIZE(dsi_v2_4_clk_names), + .io_start = { + { 0xae94000, 0xae96000 }, + }, +}; + +static const struct regulator_bulk_data sm8650_dsi_regulators[] = { + { .supply = "vdda", .init_load_uA = 16600 }, /* 1.2 V */ +}; + +static const struct msm_dsi_config sm8650_dsi_cfg = { + .io_offset = DSI_6G_REG_SHIFT, + .regulator_data = sm8650_dsi_regulators, + .num_regulators = ARRAY_SIZE(sm8650_dsi_regulators), + .bus_clk_names = dsi_v2_4_clk_names, + .num_bus_clks = ARRAY_SIZE(dsi_v2_4_clk_names), + .io_start = { + { 0xae94000, 0xae96000 }, + }, +}; + +static const struct regulator_bulk_data sc7280_dsi_regulators[] = { + { .supply = "vdda", .init_load_uA = 8350 }, /* 1.2 V */ + { .supply = "refgen" }, +}; + +static const struct msm_dsi_config sc7280_dsi_cfg = { + .io_offset = DSI_6G_REG_SHIFT, + .regulator_data = sc7280_dsi_regulators, + .num_regulators = ARRAY_SIZE(sc7280_dsi_regulators), + .bus_clk_names = dsi_v2_4_clk_names, + .num_bus_clks = ARRAY_SIZE(dsi_v2_4_clk_names), + .io_start = { + { 0xae94000, 0xae96000 }, + }, +}; + +static const struct regulator_bulk_data sa8775p_dsi_regulators[] = { + { .supply = "vdda", .init_load_uA = 8300 }, /* 1.2 V */ + { .supply = "refgen" }, +}; + +static const struct msm_dsi_config sa8775p_dsi_cfg = { + .io_offset = DSI_6G_REG_SHIFT, + .regulator_data = sa8775p_dsi_regulators, + .num_regulators = ARRAY_SIZE(sa8775p_dsi_regulators), + .bus_clk_names = dsi_v2_4_clk_names, + .num_bus_clks = ARRAY_SIZE(dsi_v2_4_clk_names), + .io_start = { + { 0xae94000, 0xae96000 }, + }, +}; + +static const struct msm_dsi_host_cfg_ops msm_dsi_v2_host_ops = { + .link_clk_set_rate = dsi_link_clk_set_rate_v2, + .link_clk_enable = dsi_link_clk_enable_v2, + .link_clk_disable = dsi_link_clk_disable_v2, + .clk_init_ver = dsi_clk_init_v2, + .tx_buf_alloc = dsi_tx_buf_alloc_v2, + .tx_buf_get = dsi_tx_buf_get_v2, + .tx_buf_put = NULL, + .dma_base_get = dsi_dma_base_get_v2, + .calc_clk_rate = dsi_calc_clk_rate_v2, +}; + +static const struct msm_dsi_host_cfg_ops msm_dsi_6g_host_ops = { + .link_clk_set_rate = dsi_link_clk_set_rate_6g, + .link_clk_enable = dsi_link_clk_enable_6g, + .link_clk_disable = dsi_link_clk_disable_6g, + .clk_init_ver = NULL, + .tx_buf_alloc = dsi_tx_buf_alloc_6g, + .tx_buf_get = dsi_tx_buf_get_6g, + .tx_buf_put = dsi_tx_buf_put_6g, + .dma_base_get = dsi_dma_base_get_6g, + .calc_clk_rate = dsi_calc_clk_rate_6g, +}; + +static const struct msm_dsi_host_cfg_ops msm_dsi_6g_v2_host_ops = { + .link_clk_set_rate = dsi_link_clk_set_rate_6g, + .link_clk_enable = dsi_link_clk_enable_6g, + .link_clk_disable = dsi_link_clk_disable_6g, + .clk_init_ver = dsi_clk_init_6g_v2, + .tx_buf_alloc = dsi_tx_buf_alloc_6g, + .tx_buf_get = dsi_tx_buf_get_6g, + .tx_buf_put = dsi_tx_buf_put_6g, + .dma_base_get = dsi_dma_base_get_6g, + .calc_clk_rate = dsi_calc_clk_rate_6g, +}; + +static const struct msm_dsi_host_cfg_ops msm_dsi_6g_v2_9_host_ops = { + .link_clk_set_rate = dsi_link_clk_set_rate_6g_v2_9, + .link_clk_enable = dsi_link_clk_enable_6g, + .link_clk_disable = dsi_link_clk_disable_6g, + .clk_init_ver = dsi_clk_init_6g_v2_9, + .tx_buf_alloc = dsi_tx_buf_alloc_6g, + .tx_buf_get = dsi_tx_buf_get_6g, + .tx_buf_put = dsi_tx_buf_put_6g, + .dma_base_get = dsi_dma_base_get_6g, + .calc_clk_rate = dsi_calc_clk_rate_6g, +}; + +static const struct msm_dsi_cfg_handler dsi_cfg_handlers[] = { + {MSM_DSI_VER_MAJOR_V2, MSM_DSI_V2_VER_MINOR_8064, + &apq8064_dsi_cfg, &msm_dsi_v2_host_ops}, + {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_0, + &msm8974_apq8084_dsi_cfg, &msm_dsi_6g_host_ops}, + {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_0_2, + &msm8974_apq8084_dsi_cfg, &msm_dsi_6g_host_ops}, + {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_1, + &msm8974_apq8084_dsi_cfg, &msm_dsi_6g_host_ops}, + {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_1_1, + &msm8974_apq8084_dsi_cfg, &msm_dsi_6g_host_ops}, + {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_2, + &msm8974_apq8084_dsi_cfg, &msm_dsi_6g_host_ops}, + {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_3, + &msm8994_dsi_cfg, &msm_dsi_6g_host_ops}, + {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_3_1, + &msm8916_dsi_cfg, &msm_dsi_6g_host_ops}, + {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_4_1, + &msm8996_dsi_cfg, &msm_dsi_6g_host_ops}, + {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_4_2, + &msm8976_dsi_cfg, &msm_dsi_6g_host_ops}, + {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_1_0, + &sdm660_dsi_cfg, &msm_dsi_6g_v2_host_ops}, + {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_2_0, + &msm8998_dsi_cfg, &msm_dsi_6g_v2_host_ops}, + {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_2_1, + &sdm845_dsi_cfg, &msm_dsi_6g_v2_host_ops}, + {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_3_0, + &sdm845_dsi_cfg, &msm_dsi_6g_v2_host_ops}, + {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_3_1, + &sdm845_dsi_cfg, &msm_dsi_6g_v2_host_ops}, + {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_4_0, + &sdm845_dsi_cfg, &msm_dsi_6g_v2_host_ops}, + {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_4_1, + &sdm845_dsi_cfg, &msm_dsi_6g_v2_host_ops}, + {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_5_0, + &sc7280_dsi_cfg, &msm_dsi_6g_v2_host_ops}, + {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_5_1, + &sa8775p_dsi_cfg, &msm_dsi_6g_v2_host_ops}, + {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_6_0, + &sdm845_dsi_cfg, &msm_dsi_6g_v2_host_ops}, + {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_7_0, + &sm8550_dsi_cfg, &msm_dsi_6g_v2_host_ops}, + {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_8_0, + &sm8650_dsi_cfg, &msm_dsi_6g_v2_host_ops}, + {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_9_0, + &sm8650_dsi_cfg, &msm_dsi_6g_v2_9_host_ops}, +}; + +const struct msm_dsi_cfg_handler *msm_dsi_cfg_get(u32 major, u32 minor) +{ + const struct msm_dsi_cfg_handler *cfg_hnd = NULL; + int i; + + for (i = ARRAY_SIZE(dsi_cfg_handlers) - 1; i >= 0; i--) { + if ((dsi_cfg_handlers[i].major == major) && + (dsi_cfg_handlers[i].minor == minor)) { + cfg_hnd = &dsi_cfg_handlers[i]; + break; + } + } + + return cfg_hnd; +} diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.h b/drivers/gpu/drm/msm/dsi/dsi_cfg.h new file mode 100644 index 000000000000..38f303f2ed04 --- /dev/null +++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.h @@ -0,0 +1,74 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2015, The Linux Foundation. All rights reserved. + */ + +#ifndef __MSM_DSI_CFG_H__ +#define __MSM_DSI_CFG_H__ + +#include "dsi.h" + +#define MSM_DSI_VER_MAJOR_V2 0x02 +#define MSM_DSI_VER_MAJOR_6G 0x03 +#define MSM_DSI_6G_VER_MINOR_V1_0 0x10000000 +#define MSM_DSI_6G_VER_MINOR_V1_0_2 0x10000002 +#define MSM_DSI_6G_VER_MINOR_V1_1 0x10010000 +#define MSM_DSI_6G_VER_MINOR_V1_1_1 0x10010001 +#define MSM_DSI_6G_VER_MINOR_V1_2 0x10020000 +#define MSM_DSI_6G_VER_MINOR_V1_3 0x10030000 +#define MSM_DSI_6G_VER_MINOR_V1_3_1 0x10030001 +#define MSM_DSI_6G_VER_MINOR_V1_4_1 0x10040001 +#define MSM_DSI_6G_VER_MINOR_V1_4_2 0x10040002 +#define MSM_DSI_6G_VER_MINOR_V2_1_0 0x20010000 +#define MSM_DSI_6G_VER_MINOR_V2_2_0 0x20000000 +#define MSM_DSI_6G_VER_MINOR_V2_2_1 0x20020001 +#define MSM_DSI_6G_VER_MINOR_V2_3_0 0x20030000 +#define MSM_DSI_6G_VER_MINOR_V2_3_1 0x20030001 +#define MSM_DSI_6G_VER_MINOR_V2_4_0 0x20040000 +#define MSM_DSI_6G_VER_MINOR_V2_4_1 0x20040001 +#define MSM_DSI_6G_VER_MINOR_V2_5_0 0x20050000 +#define MSM_DSI_6G_VER_MINOR_V2_5_1 0x20050001 +#define MSM_DSI_6G_VER_MINOR_V2_6_0 0x20060000 +#define MSM_DSI_6G_VER_MINOR_V2_7_0 0x20070000 +#define MSM_DSI_6G_VER_MINOR_V2_8_0 0x20080000 +#define MSM_DSI_6G_VER_MINOR_V2_9_0 0x20090000 + +#define MSM_DSI_V2_VER_MINOR_8064 0x0 + +#define DSI_6G_REG_SHIFT 4 + +/* Maximum number of configurations matched against the same hw revision */ +#define VARIANTS_MAX 2 + +struct msm_dsi_config { + u32 io_offset; + const struct regulator_bulk_data *regulator_data; + int num_regulators; + const char * const *bus_clk_names; + const int num_bus_clks; + const resource_size_t io_start[VARIANTS_MAX][DSI_MAX]; +}; + +struct msm_dsi_host_cfg_ops { + int (*link_clk_set_rate)(struct msm_dsi_host *msm_host); + int (*link_clk_enable)(struct msm_dsi_host *msm_host); + void (*link_clk_disable)(struct msm_dsi_host *msm_host); + int (*clk_init_ver)(struct msm_dsi_host *msm_host); + int (*tx_buf_alloc)(struct msm_dsi_host *msm_host, int size); + void* (*tx_buf_get)(struct msm_dsi_host *msm_host); + void (*tx_buf_put)(struct msm_dsi_host *msm_host); + int (*dma_base_get)(struct msm_dsi_host *msm_host, uint64_t *iova); + int (*calc_clk_rate)(struct msm_dsi_host *msm_host, bool is_bonded_dsi); +}; + +struct msm_dsi_cfg_handler { + u32 major; + u32 minor; + const struct msm_dsi_config *cfg; + const struct msm_dsi_host_cfg_ops *ops; +}; + +const struct msm_dsi_cfg_handler *msm_dsi_cfg_get(u32 major, u32 minor); + +#endif /* __MSM_DSI_CFG_H__ */ + diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c new file mode 100644 index 000000000000..e0de545d4077 --- /dev/null +++ b/drivers/gpu/drm/msm/dsi/dsi_host.c @@ -0,0 +1,2648 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2015, The Linux Foundation. All rights reserved. + */ + +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/dma-mapping.h> +#include <linux/err.h> +#include <linux/interrupt.h> +#include <linux/mfd/syscon.h> +#include <linux/of.h> +#include <linux/of_graph.h> +#include <linux/of_irq.h> +#include <linux/pinctrl/consumer.h> +#include <linux/pm_opp.h> +#include <linux/regmap.h> +#include <linux/regulator/consumer.h> +#include <linux/spinlock.h> + +#include <video/mipi_display.h> + +#include <drm/display/drm_dsc_helper.h> +#include <drm/drm_of.h> + +#include "dsi.h" +#include "dsi.xml.h" +#include "sfpb.xml.h" +#include "dsi_cfg.h" +#include "msm_dsc_helper.h" +#include "msm_kms.h" +#include "msm_gem.h" +#include "phy/dsi_phy.h" + +#define DSI_RESET_TOGGLE_DELAY_MS 20 + +static int dsi_populate_dsc_params(struct msm_dsi_host *msm_host, struct drm_dsc_config *dsc); + +static int dsi_get_version(const void __iomem *base, u32 *major, u32 *minor) +{ + u32 ver; + + if (!major || !minor) + return -EINVAL; + + /* + * From DSI6G(v3), addition of a 6G_HW_VERSION register at offset 0 + * makes all other registers 4-byte shifted down. + * + * In order to identify between DSI6G(v3) and beyond, and DSIv2 and + * older, we read the DSI_VERSION register without any shift(offset + * 0x1f0). In the case of DSIv2, this hast to be a non-zero value. In + * the case of DSI6G, this has to be zero (the offset points to a + * scratch register which we never touch) + */ + + ver = readl(base + REG_DSI_VERSION); + if (ver) { + /* older dsi host, there is no register shift */ + ver = FIELD(ver, DSI_VERSION_MAJOR); + if (ver <= MSM_DSI_VER_MAJOR_V2) { + /* old versions */ + *major = ver; + *minor = 0; + return 0; + } else { + return -EINVAL; + } + } else { + /* + * newer host, offset 0 has 6G_HW_VERSION, the rest of the + * registers are shifted down, read DSI_VERSION again with + * the shifted offset + */ + ver = readl(base + DSI_6G_REG_SHIFT + REG_DSI_VERSION); + ver = FIELD(ver, DSI_VERSION_MAJOR); + if (ver == MSM_DSI_VER_MAJOR_6G) { + /* 6G version */ + *major = ver; + *minor = readl(base + REG_DSI_6G_HW_VERSION); + return 0; + } else { + return -EINVAL; + } + } +} + +#define DSI_ERR_STATE_ACK 0x0000 +#define DSI_ERR_STATE_TIMEOUT 0x0001 +#define DSI_ERR_STATE_DLN0_PHY 0x0002 +#define DSI_ERR_STATE_FIFO 0x0004 +#define DSI_ERR_STATE_MDP_FIFO_UNDERFLOW 0x0008 +#define DSI_ERR_STATE_INTERLEAVE_OP_CONTENTION 0x0010 +#define DSI_ERR_STATE_PLL_UNLOCKED 0x0020 + +#define DSI_CLK_CTRL_ENABLE_CLKS \ + (DSI_CLK_CTRL_AHBS_HCLK_ON | DSI_CLK_CTRL_AHBM_SCLK_ON | \ + DSI_CLK_CTRL_PCLK_ON | DSI_CLK_CTRL_DSICLK_ON | \ + DSI_CLK_CTRL_BYTECLK_ON | DSI_CLK_CTRL_ESCCLK_ON | \ + DSI_CLK_CTRL_FORCE_ON_DYN_AHBM_HCLK) + +struct msm_dsi_host { + struct mipi_dsi_host base; + + struct platform_device *pdev; + struct drm_device *dev; + + int id; + + void __iomem *ctrl_base; + phys_addr_t ctrl_size; + struct regulator_bulk_data *supplies; + + int num_bus_clks; + struct clk_bulk_data bus_clks[DSI_BUS_CLK_MAX]; + + struct clk *byte_clk; + struct clk *esc_clk; + struct clk *pixel_clk; + struct clk *byte_intf_clk; + + /* + * Clocks which needs to be properly parented between DISPCC and DSI PHY + * PLL: + */ + struct clk *byte_src_clk; + struct clk *pixel_src_clk; + struct clk *dsi_pll_byte_clk; + struct clk *dsi_pll_pixel_clk; + + unsigned long byte_clk_rate; + unsigned long byte_intf_clk_rate; + unsigned long pixel_clk_rate; + unsigned long esc_clk_rate; + + /* DSI v2 specific clocks */ + struct clk *src_clk; + + unsigned long src_clk_rate; + + const struct msm_dsi_cfg_handler *cfg_hnd; + + struct completion dma_comp; + struct completion video_comp; + struct mutex dev_mutex; + struct mutex cmd_mutex; + spinlock_t intr_lock; /* Protect interrupt ctrl register */ + + u32 err_work_state; + struct work_struct err_work; + struct workqueue_struct *workqueue; + + /* DSI 6G TX buffer*/ + struct drm_gem_object *tx_gem_obj; + struct drm_gpuvm *vm; + + /* DSI v2 TX buffer */ + void *tx_buf; + dma_addr_t tx_buf_paddr; + + int tx_size; + + u8 *rx_buf; + + struct regmap *sfpb; + + struct drm_display_mode *mode; + struct drm_dsc_config *dsc; + + /* connected device info */ + unsigned int channel; + unsigned int lanes; + enum mipi_dsi_pixel_format format; + unsigned long mode_flags; + + /* lane data parsed via DT */ + int dlane_swap; + int num_data_lanes; + + /* from phy DT */ + bool cphy_mode; + + u32 dma_cmd_ctrl_restore; + + bool registered; + bool power_on; + bool enabled; + int irq; +}; + +static inline u32 dsi_read(struct msm_dsi_host *msm_host, u32 reg) +{ + return readl(msm_host->ctrl_base + reg); +} + +static inline void dsi_write(struct msm_dsi_host *msm_host, u32 reg, u32 data) +{ + writel(data, msm_host->ctrl_base + reg); +} + +static const struct msm_dsi_cfg_handler * +dsi_get_config(struct msm_dsi_host *msm_host) +{ + const struct msm_dsi_cfg_handler *cfg_hnd = NULL; + struct device *dev = &msm_host->pdev->dev; + struct clk *ahb_clk; + int ret; + u32 major = 0, minor = 0; + + ahb_clk = msm_clk_get(msm_host->pdev, "iface"); + if (IS_ERR(ahb_clk)) { + dev_err_probe(dev, PTR_ERR(ahb_clk), "%s: cannot get interface clock\n", + __func__); + goto exit; + } + + pm_runtime_get_sync(dev); + + ret = clk_prepare_enable(ahb_clk); + if (ret) { + dev_err_probe(dev, ret, "%s: unable to enable ahb_clk\n", __func__); + goto runtime_put; + } + + ret = dsi_get_version(msm_host->ctrl_base, &major, &minor); + if (ret) { + dev_err_probe(dev, ret, "%s: Invalid version\n", __func__); + goto disable_clks; + } + + cfg_hnd = msm_dsi_cfg_get(major, minor); + + DBG("%s: Version %x:%x\n", __func__, major, minor); + +disable_clks: + clk_disable_unprepare(ahb_clk); +runtime_put: + pm_runtime_put_sync(dev); +exit: + return cfg_hnd; +} + +static inline struct msm_dsi_host *to_msm_dsi_host(struct mipi_dsi_host *host) +{ + return container_of(host, struct msm_dsi_host, base); +} + +int dsi_clk_init_v2(struct msm_dsi_host *msm_host) +{ + struct platform_device *pdev = msm_host->pdev; + int ret = 0; + + msm_host->src_clk = msm_clk_get(pdev, "src"); + + if (IS_ERR(msm_host->src_clk)) { + ret = PTR_ERR(msm_host->src_clk); + pr_err("%s: can't find src clock. ret=%d\n", + __func__, ret); + msm_host->src_clk = NULL; + return ret; + } + + return ret; +} + +int dsi_clk_init_6g_v2(struct msm_dsi_host *msm_host) +{ + struct platform_device *pdev = msm_host->pdev; + int ret = 0; + + msm_host->byte_intf_clk = msm_clk_get(pdev, "byte_intf"); + if (IS_ERR(msm_host->byte_intf_clk)) { + ret = PTR_ERR(msm_host->byte_intf_clk); + pr_err("%s: can't find byte_intf clock. ret=%d\n", + __func__, ret); + } + + return ret; +} + +int dsi_clk_init_6g_v2_9(struct msm_dsi_host *msm_host) +{ + struct device *dev = &msm_host->pdev->dev; + int ret; + + ret = dsi_clk_init_6g_v2(msm_host); + if (ret) + return ret; + + msm_host->byte_src_clk = devm_clk_get(dev, "byte_src"); + if (IS_ERR(msm_host->byte_src_clk)) + return dev_err_probe(dev, PTR_ERR(msm_host->byte_src_clk), + "can't get byte_src clock\n"); + + msm_host->dsi_pll_byte_clk = devm_clk_get(dev, "dsi_pll_byte"); + if (IS_ERR(msm_host->dsi_pll_byte_clk)) + return dev_err_probe(dev, PTR_ERR(msm_host->dsi_pll_byte_clk), + "can't get dsi_pll_byte clock\n"); + + msm_host->pixel_src_clk = devm_clk_get(dev, "pixel_src"); + if (IS_ERR(msm_host->pixel_src_clk)) + return dev_err_probe(dev, PTR_ERR(msm_host->pixel_src_clk), + "can't get pixel_src clock\n"); + + msm_host->dsi_pll_pixel_clk = devm_clk_get(dev, "dsi_pll_pixel"); + if (IS_ERR(msm_host->dsi_pll_pixel_clk)) + return dev_err_probe(dev, PTR_ERR(msm_host->dsi_pll_pixel_clk), + "can't get dsi_pll_pixel clock\n"); + + return 0; +} + +static int dsi_clk_init(struct msm_dsi_host *msm_host) +{ + struct platform_device *pdev = msm_host->pdev; + const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd; + const struct msm_dsi_config *cfg = cfg_hnd->cfg; + int i, ret = 0; + + /* get bus clocks */ + for (i = 0; i < cfg->num_bus_clks; i++) + msm_host->bus_clks[i].id = cfg->bus_clk_names[i]; + msm_host->num_bus_clks = cfg->num_bus_clks; + + ret = devm_clk_bulk_get(&pdev->dev, msm_host->num_bus_clks, msm_host->bus_clks); + if (ret < 0) + return dev_err_probe(&pdev->dev, ret, "Unable to get clocks\n"); + + /* get link and source clocks */ + msm_host->byte_clk = msm_clk_get(pdev, "byte"); + if (IS_ERR(msm_host->byte_clk)) + return dev_err_probe(&pdev->dev, PTR_ERR(msm_host->byte_clk), + "%s: can't find dsi_byte clock\n", + __func__); + + msm_host->pixel_clk = msm_clk_get(pdev, "pixel"); + if (IS_ERR(msm_host->pixel_clk)) + return dev_err_probe(&pdev->dev, PTR_ERR(msm_host->pixel_clk), + "%s: can't find dsi_pixel clock\n", + __func__); + + msm_host->esc_clk = msm_clk_get(pdev, "core"); + if (IS_ERR(msm_host->esc_clk)) + return dev_err_probe(&pdev->dev, PTR_ERR(msm_host->esc_clk), + "%s: can't find dsi_esc clock\n", + __func__); + + if (cfg_hnd->ops->clk_init_ver) + ret = cfg_hnd->ops->clk_init_ver(msm_host); + + return ret; +} + +int msm_dsi_runtime_suspend(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct msm_dsi *msm_dsi = platform_get_drvdata(pdev); + struct mipi_dsi_host *host = msm_dsi->host; + struct msm_dsi_host *msm_host = to_msm_dsi_host(host); + + if (!msm_host->cfg_hnd) + return 0; + + clk_bulk_disable_unprepare(msm_host->num_bus_clks, msm_host->bus_clks); + + return 0; +} + +int msm_dsi_runtime_resume(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct msm_dsi *msm_dsi = platform_get_drvdata(pdev); + struct mipi_dsi_host *host = msm_dsi->host; + struct msm_dsi_host *msm_host = to_msm_dsi_host(host); + + if (!msm_host->cfg_hnd) + return 0; + + return clk_bulk_prepare_enable(msm_host->num_bus_clks, msm_host->bus_clks); +} + +int dsi_link_clk_set_rate_6g(struct msm_dsi_host *msm_host) +{ + int ret; + + DBG("Set clk rates: pclk=%lu, byteclk=%lu", + msm_host->pixel_clk_rate, msm_host->byte_clk_rate); + + ret = dev_pm_opp_set_rate(&msm_host->pdev->dev, + msm_host->byte_clk_rate); + if (ret) { + pr_err("%s: dev_pm_opp_set_rate failed %d\n", __func__, ret); + return ret; + } + + ret = clk_set_rate(msm_host->pixel_clk, msm_host->pixel_clk_rate); + if (ret) { + pr_err("%s: Failed to set rate pixel clk, %d\n", __func__, ret); + return ret; + } + + if (msm_host->byte_intf_clk) { + ret = clk_set_rate(msm_host->byte_intf_clk, msm_host->byte_intf_clk_rate); + if (ret) { + pr_err("%s: Failed to set rate byte intf clk, %d\n", + __func__, ret); + return ret; + } + } + + return 0; +} + +int dsi_link_clk_set_rate_6g_v2_9(struct msm_dsi_host *msm_host) +{ + struct device *dev = &msm_host->pdev->dev; + int ret; + + /* + * DSI PHY PLLs have to be enabled to allow reparenting to them, so + * cannot use assigned-clock-parents. + */ + ret = clk_set_parent(msm_host->byte_src_clk, msm_host->dsi_pll_byte_clk); + if (ret) + dev_err(dev, "Failed to parent byte_src -> dsi_pll_byte: %d\n", ret); + + ret = clk_set_parent(msm_host->pixel_src_clk, msm_host->dsi_pll_pixel_clk); + if (ret) + dev_err(dev, "Failed to parent pixel_src -> dsi_pll_pixel: %d\n", ret); + + return dsi_link_clk_set_rate_6g(msm_host); +} + +int dsi_link_clk_enable_6g(struct msm_dsi_host *msm_host) +{ + int ret; + + ret = clk_prepare_enable(msm_host->esc_clk); + if (ret) { + pr_err("%s: Failed to enable dsi esc clk\n", __func__); + goto error; + } + + ret = clk_prepare_enable(msm_host->byte_clk); + if (ret) { + pr_err("%s: Failed to enable dsi byte clk\n", __func__); + goto byte_clk_err; + } + + ret = clk_prepare_enable(msm_host->pixel_clk); + if (ret) { + pr_err("%s: Failed to enable dsi pixel clk\n", __func__); + goto pixel_clk_err; + } + + ret = clk_prepare_enable(msm_host->byte_intf_clk); + if (ret) { + pr_err("%s: Failed to enable byte intf clk\n", + __func__); + goto byte_intf_clk_err; + } + + return 0; + +byte_intf_clk_err: + clk_disable_unprepare(msm_host->pixel_clk); +pixel_clk_err: + clk_disable_unprepare(msm_host->byte_clk); +byte_clk_err: + clk_disable_unprepare(msm_host->esc_clk); +error: + return ret; +} + +int dsi_link_clk_set_rate_v2(struct msm_dsi_host *msm_host) +{ + int ret; + + DBG("Set clk rates: pclk=%lu, byteclk=%lu, esc_clk=%lu, dsi_src_clk=%lu", + msm_host->pixel_clk_rate, msm_host->byte_clk_rate, + msm_host->esc_clk_rate, msm_host->src_clk_rate); + + ret = clk_set_rate(msm_host->byte_clk, msm_host->byte_clk_rate); + if (ret) { + pr_err("%s: Failed to set rate byte clk, %d\n", __func__, ret); + return ret; + } + + ret = clk_set_rate(msm_host->esc_clk, msm_host->esc_clk_rate); + if (ret) { + pr_err("%s: Failed to set rate esc clk, %d\n", __func__, ret); + return ret; + } + + ret = clk_set_rate(msm_host->src_clk, msm_host->src_clk_rate); + if (ret) { + pr_err("%s: Failed to set rate src clk, %d\n", __func__, ret); + return ret; + } + + ret = clk_set_rate(msm_host->pixel_clk, msm_host->pixel_clk_rate); + if (ret) { + pr_err("%s: Failed to set rate pixel clk, %d\n", __func__, ret); + return ret; + } + + return 0; +} + +int dsi_link_clk_enable_v2(struct msm_dsi_host *msm_host) +{ + int ret; + + ret = clk_prepare_enable(msm_host->byte_clk); + if (ret) { + pr_err("%s: Failed to enable dsi byte clk\n", __func__); + goto error; + } + + ret = clk_prepare_enable(msm_host->esc_clk); + if (ret) { + pr_err("%s: Failed to enable dsi esc clk\n", __func__); + goto esc_clk_err; + } + + ret = clk_prepare_enable(msm_host->src_clk); + if (ret) { + pr_err("%s: Failed to enable dsi src clk\n", __func__); + goto src_clk_err; + } + + ret = clk_prepare_enable(msm_host->pixel_clk); + if (ret) { + pr_err("%s: Failed to enable dsi pixel clk\n", __func__); + goto pixel_clk_err; + } + + return 0; + +pixel_clk_err: + clk_disable_unprepare(msm_host->src_clk); +src_clk_err: + clk_disable_unprepare(msm_host->esc_clk); +esc_clk_err: + clk_disable_unprepare(msm_host->byte_clk); +error: + return ret; +} + +void dsi_link_clk_disable_6g(struct msm_dsi_host *msm_host) +{ + /* Drop the performance state vote */ + dev_pm_opp_set_rate(&msm_host->pdev->dev, 0); + clk_disable_unprepare(msm_host->esc_clk); + clk_disable_unprepare(msm_host->pixel_clk); + clk_disable_unprepare(msm_host->byte_intf_clk); + clk_disable_unprepare(msm_host->byte_clk); +} + +void dsi_link_clk_disable_v2(struct msm_dsi_host *msm_host) +{ + clk_disable_unprepare(msm_host->pixel_clk); + clk_disable_unprepare(msm_host->src_clk); + clk_disable_unprepare(msm_host->esc_clk); + clk_disable_unprepare(msm_host->byte_clk); +} + +/** + * dsi_adjust_pclk_for_compression() - Adjust the pclk rate for compression case + * @mode: The selected mode for the DSI output + * @dsc: DRM DSC configuration for this DSI output + * + * Adjust the pclk rate by calculating a new hdisplay proportional to + * the compression ratio such that: + * new_hdisplay = old_hdisplay * compressed_bpp / uncompressed_bpp + * + * Porches do not need to be adjusted: + * - For VIDEO mode they are not compressed by DSC and are passed as is. + * - For CMD mode there are no actual porches. Instead these fields + * currently represent the overhead to the image data transfer. As such, they + * are calculated for the final mode parameters (after the compression) and + * are not to be adjusted too. + * + * FIXME: Reconsider this if/when CMD mode handling is rewritten to use + * transfer time and data overhead as a starting point of the calculations. + */ +static unsigned long dsi_adjust_pclk_for_compression(const struct drm_display_mode *mode, + const struct drm_dsc_config *dsc) +{ + int new_hdisplay = DIV_ROUND_UP(mode->hdisplay * drm_dsc_get_bpp_int(dsc), + dsc->bits_per_component * 3); + + int new_htotal = mode->htotal - mode->hdisplay + new_hdisplay; + + return mult_frac(mode->clock * 1000u, new_htotal, mode->htotal); +} + +static unsigned long dsi_get_pclk_rate(const struct drm_display_mode *mode, + const struct drm_dsc_config *dsc, bool is_bonded_dsi) +{ + unsigned long pclk_rate; + + pclk_rate = mode->clock * 1000u; + + if (dsc) + pclk_rate = dsi_adjust_pclk_for_compression(mode, dsc); + + /* + * For bonded DSI mode, the current DRM mode has the complete width of the + * panel. Since, the complete panel is driven by two DSI controllers, + * the clock rates have to be split between the two dsi controllers. + * Adjust the byte and pixel clock rates for each dsi host accordingly. + */ + if (is_bonded_dsi) + pclk_rate /= 2; + + return pclk_rate; +} + +unsigned long dsi_byte_clk_get_rate(struct mipi_dsi_host *host, bool is_bonded_dsi, + const struct drm_display_mode *mode) +{ + struct msm_dsi_host *msm_host = to_msm_dsi_host(host); + u8 lanes = msm_host->lanes; + u32 bpp = mipi_dsi_pixel_format_to_bpp(msm_host->format); + unsigned long pclk_rate = dsi_get_pclk_rate(mode, msm_host->dsc, is_bonded_dsi); + unsigned long pclk_bpp; + + if (lanes == 0) { + pr_err("%s: forcing mdss_dsi lanes to 1\n", __func__); + lanes = 1; + } + + /* CPHY "byte_clk" is in units of 16 bits */ + if (msm_host->cphy_mode) + pclk_bpp = mult_frac(pclk_rate, bpp, 16 * lanes); + else + pclk_bpp = mult_frac(pclk_rate, bpp, 8 * lanes); + + return pclk_bpp; +} + +static void dsi_calc_pclk(struct msm_dsi_host *msm_host, bool is_bonded_dsi) +{ + msm_host->pixel_clk_rate = dsi_get_pclk_rate(msm_host->mode, msm_host->dsc, is_bonded_dsi); + msm_host->byte_clk_rate = dsi_byte_clk_get_rate(&msm_host->base, is_bonded_dsi, + msm_host->mode); + + DBG("pclk=%lu, bclk=%lu", msm_host->pixel_clk_rate, + msm_host->byte_clk_rate); +} + +int dsi_calc_clk_rate_6g(struct msm_dsi_host *msm_host, bool is_bonded_dsi) +{ + if (!msm_host->mode) { + pr_err("%s: mode not set\n", __func__); + return -EINVAL; + } + + dsi_calc_pclk(msm_host, is_bonded_dsi); + msm_host->esc_clk_rate = clk_get_rate(msm_host->esc_clk); + return 0; +} + +int dsi_calc_clk_rate_v2(struct msm_dsi_host *msm_host, bool is_bonded_dsi) +{ + u32 bpp = mipi_dsi_pixel_format_to_bpp(msm_host->format); + unsigned int esc_mhz, esc_div; + unsigned long byte_mhz; + + dsi_calc_pclk(msm_host, is_bonded_dsi); + + msm_host->src_clk_rate = mult_frac(msm_host->pixel_clk_rate, bpp, 8); + + /* + * esc clock is byte clock followed by a 4 bit divider, + * we need to find an escape clock frequency within the + * mipi DSI spec range within the maximum divider limit + * We iterate here between an escape clock frequencey + * between 20 Mhz to 5 Mhz and pick up the first one + * that can be supported by our divider + */ + + byte_mhz = msm_host->byte_clk_rate / 1000000; + + for (esc_mhz = 20; esc_mhz >= 5; esc_mhz--) { + esc_div = DIV_ROUND_UP(byte_mhz, esc_mhz); + + /* + * TODO: Ideally, we shouldn't know what sort of divider + * is available in mmss_cc, we're just assuming that + * it'll always be a 4 bit divider. Need to come up with + * a better way here. + */ + if (esc_div >= 1 && esc_div <= 16) + break; + } + + if (esc_mhz < 5) + return -EINVAL; + + msm_host->esc_clk_rate = msm_host->byte_clk_rate / esc_div; + + DBG("esc=%lu, src=%lu", msm_host->esc_clk_rate, + msm_host->src_clk_rate); + + return 0; +} + +static void dsi_intr_ctrl(struct msm_dsi_host *msm_host, u32 mask, int enable) +{ + u32 intr; + unsigned long flags; + + spin_lock_irqsave(&msm_host->intr_lock, flags); + intr = dsi_read(msm_host, REG_DSI_INTR_CTRL); + + if (enable) + intr |= mask; + else + intr &= ~mask; + + DBG("intr=%x enable=%d", intr, enable); + + dsi_write(msm_host, REG_DSI_INTR_CTRL, intr); + spin_unlock_irqrestore(&msm_host->intr_lock, flags); +} + +static inline enum dsi_traffic_mode dsi_get_traffic_mode(const u32 mode_flags) +{ + if (mode_flags & MIPI_DSI_MODE_VIDEO_BURST) + return BURST_MODE; + else if (mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE) + return NON_BURST_SYNCH_PULSE; + + return NON_BURST_SYNCH_EVENT; +} + +static inline enum dsi_vid_dst_format +dsi_get_vid_fmt(const enum mipi_dsi_pixel_format mipi_fmt) +{ + switch (mipi_fmt) { + case MIPI_DSI_FMT_RGB888: return VID_DST_FORMAT_RGB888; + case MIPI_DSI_FMT_RGB666: return VID_DST_FORMAT_RGB666_LOOSE; + case MIPI_DSI_FMT_RGB666_PACKED: return VID_DST_FORMAT_RGB666; + case MIPI_DSI_FMT_RGB565: return VID_DST_FORMAT_RGB565; + default: return VID_DST_FORMAT_RGB888; + } +} + +static inline enum dsi_cmd_dst_format +dsi_get_cmd_fmt(const enum mipi_dsi_pixel_format mipi_fmt) +{ + switch (mipi_fmt) { + case MIPI_DSI_FMT_RGB888: return CMD_DST_FORMAT_RGB888; + case MIPI_DSI_FMT_RGB666_PACKED: + case MIPI_DSI_FMT_RGB666: return CMD_DST_FORMAT_RGB666; + case MIPI_DSI_FMT_RGB565: return CMD_DST_FORMAT_RGB565; + default: return CMD_DST_FORMAT_RGB888; + } +} + +static void dsi_ctrl_disable(struct msm_dsi_host *msm_host) +{ + dsi_write(msm_host, REG_DSI_CTRL, 0); +} + +bool msm_dsi_host_is_wide_bus_enabled(struct mipi_dsi_host *host) +{ + struct msm_dsi_host *msm_host = to_msm_dsi_host(host); + + return msm_host->dsc && + (msm_host->cfg_hnd->major == MSM_DSI_VER_MAJOR_6G && + msm_host->cfg_hnd->minor >= MSM_DSI_6G_VER_MINOR_V2_5_0); +} + +static void dsi_ctrl_enable(struct msm_dsi_host *msm_host, + struct msm_dsi_phy_shared_timings *phy_shared_timings, struct msm_dsi_phy *phy) +{ + u32 flags = msm_host->mode_flags; + enum mipi_dsi_pixel_format mipi_fmt = msm_host->format; + const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd; + u32 data = 0, lane_ctrl = 0; + + if (flags & MIPI_DSI_MODE_VIDEO) { + if (flags & MIPI_DSI_MODE_VIDEO_HSE) + data |= DSI_VID_CFG0_PULSE_MODE_HSA_HE; + if (flags & MIPI_DSI_MODE_VIDEO_NO_HFP) + data |= DSI_VID_CFG0_HFP_POWER_STOP; + if (flags & MIPI_DSI_MODE_VIDEO_NO_HBP) + data |= DSI_VID_CFG0_HBP_POWER_STOP; + if (flags & MIPI_DSI_MODE_VIDEO_NO_HSA) + data |= DSI_VID_CFG0_HSA_POWER_STOP; + /* Always set low power stop mode for BLLP + * to let command engine send packets + */ + data |= DSI_VID_CFG0_EOF_BLLP_POWER_STOP | + DSI_VID_CFG0_BLLP_POWER_STOP; + data |= DSI_VID_CFG0_TRAFFIC_MODE(dsi_get_traffic_mode(flags)); + data |= DSI_VID_CFG0_DST_FORMAT(dsi_get_vid_fmt(mipi_fmt)); + data |= DSI_VID_CFG0_VIRT_CHANNEL(msm_host->channel); + if (msm_dsi_host_is_wide_bus_enabled(&msm_host->base)) + data |= DSI_VID_CFG0_DATABUS_WIDEN; + dsi_write(msm_host, REG_DSI_VID_CFG0, data); + + /* Do not swap RGB colors */ + data = DSI_VID_CFG1_RGB_SWAP(SWAP_RGB); + dsi_write(msm_host, REG_DSI_VID_CFG1, 0); + } else { + /* Do not swap RGB colors */ + data = DSI_CMD_CFG0_RGB_SWAP(SWAP_RGB); + data |= DSI_CMD_CFG0_DST_FORMAT(dsi_get_cmd_fmt(mipi_fmt)); + dsi_write(msm_host, REG_DSI_CMD_CFG0, data); + + data = DSI_CMD_CFG1_WR_MEM_START(MIPI_DCS_WRITE_MEMORY_START) | + DSI_CMD_CFG1_WR_MEM_CONTINUE( + MIPI_DCS_WRITE_MEMORY_CONTINUE); + /* Always insert DCS command */ + data |= DSI_CMD_CFG1_INSERT_DCS_COMMAND; + dsi_write(msm_host, REG_DSI_CMD_CFG1, data); + + if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) { + data = dsi_read(msm_host, REG_DSI_CMD_MODE_MDP_CTRL2); + + if (cfg_hnd->minor >= MSM_DSI_6G_VER_MINOR_V1_3) + data |= DSI_CMD_MODE_MDP_CTRL2_BURST_MODE; + + if (msm_dsi_host_is_wide_bus_enabled(&msm_host->base)) + data |= DSI_CMD_MODE_MDP_CTRL2_DATABUS_WIDEN; + + dsi_write(msm_host, REG_DSI_CMD_MODE_MDP_CTRL2, data); + } + } + + dsi_write(msm_host, REG_DSI_CMD_DMA_CTRL, + DSI_CMD_DMA_CTRL_FROM_FRAME_BUFFER | + DSI_CMD_DMA_CTRL_LOW_POWER); + + data = 0; + /* Always assume dedicated TE pin */ + data |= DSI_TRIG_CTRL_TE; + data |= DSI_TRIG_CTRL_MDP_TRIGGER(TRIGGER_NONE); + data |= DSI_TRIG_CTRL_DMA_TRIGGER(TRIGGER_SW); + data |= DSI_TRIG_CTRL_STREAM(msm_host->channel); + if ((cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) && + (cfg_hnd->minor >= MSM_DSI_6G_VER_MINOR_V1_2)) + data |= DSI_TRIG_CTRL_BLOCK_DMA_WITHIN_FRAME; + dsi_write(msm_host, REG_DSI_TRIG_CTRL, data); + + data = DSI_CLKOUT_TIMING_CTRL_T_CLK_POST(phy_shared_timings->clk_post) | + DSI_CLKOUT_TIMING_CTRL_T_CLK_PRE(phy_shared_timings->clk_pre); + dsi_write(msm_host, REG_DSI_CLKOUT_TIMING_CTRL, data); + + if ((cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) && + (cfg_hnd->minor > MSM_DSI_6G_VER_MINOR_V1_0) && + phy_shared_timings->clk_pre_inc_by_2) + dsi_write(msm_host, REG_DSI_T_CLK_PRE_EXTEND, + DSI_T_CLK_PRE_EXTEND_INC_BY_2_BYTECLK); + + data = 0; + if (!(flags & MIPI_DSI_MODE_NO_EOT_PACKET)) + data |= DSI_EOT_PACKET_CTRL_TX_EOT_APPEND; + dsi_write(msm_host, REG_DSI_EOT_PACKET_CTRL, data); + + /* allow only ack-err-status to generate interrupt */ + dsi_write(msm_host, REG_DSI_ERR_INT_MASK0, 0x13ff3fe0); + + dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_ERROR, 1); + + dsi_write(msm_host, REG_DSI_CLK_CTRL, DSI_CLK_CTRL_ENABLE_CLKS); + + data = DSI_CTRL_CLK_EN; + + DBG("lane number=%d", msm_host->lanes); + data |= ((DSI_CTRL_LANE0 << msm_host->lanes) - DSI_CTRL_LANE0); + + dsi_write(msm_host, REG_DSI_LANE_SWAP_CTRL, + DSI_LANE_SWAP_CTRL_DLN_SWAP_SEL(msm_host->dlane_swap)); + + if (!(flags & MIPI_DSI_CLOCK_NON_CONTINUOUS)) { + lane_ctrl = dsi_read(msm_host, REG_DSI_LANE_CTRL); + + if (msm_dsi_phy_set_continuous_clock(phy, true)) + lane_ctrl &= ~DSI_LANE_CTRL_HS_REQ_SEL_PHY; + + dsi_write(msm_host, REG_DSI_LANE_CTRL, + lane_ctrl | DSI_LANE_CTRL_CLKLN_HS_FORCE_REQUEST); + } + + data |= DSI_CTRL_ENABLE; + + dsi_write(msm_host, REG_DSI_CTRL, data); + + if (msm_host->cphy_mode) + dsi_write(msm_host, REG_DSI_CPHY_MODE_CTRL, BIT(0)); +} + +static void dsi_update_dsc_timing(struct msm_dsi_host *msm_host, bool is_cmd_mode) +{ + struct drm_dsc_config *dsc = msm_host->dsc; + u32 reg, reg_ctrl, reg_ctrl2; + u32 slice_per_intf, total_bytes_per_intf; + u32 pkt_per_line; + u32 eol_byte_num; + u32 bytes_per_pkt; + + /* first calculate dsc parameters and then program + * compress mode registers + */ + slice_per_intf = dsc->slice_count; + + total_bytes_per_intf = dsc->slice_chunk_size * slice_per_intf; + bytes_per_pkt = dsc->slice_chunk_size; /* * slice_per_pkt; */ + + eol_byte_num = total_bytes_per_intf % 3; + + /* + * Typically, pkt_per_line = slice_per_intf * slice_per_pkt. + * + * Since the current driver only supports slice_per_pkt = 1, + * pkt_per_line will be equal to slice per intf for now. + */ + pkt_per_line = slice_per_intf; + + if (is_cmd_mode) /* packet data type */ + reg = DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM0_DATATYPE(MIPI_DSI_DCS_LONG_WRITE); + else + reg = DSI_VIDEO_COMPRESSION_MODE_CTRL_DATATYPE(MIPI_DSI_COMPRESSED_PIXEL_STREAM); + + /* DSI_VIDEO_COMPRESSION_MODE & DSI_COMMAND_COMPRESSION_MODE + * registers have similar offsets, so for below common code use + * DSI_VIDEO_COMPRESSION_MODE_XXXX for setting bits + * + * pkt_per_line is log2 encoded, >>1 works for supported values (1,2,4) + */ + if (pkt_per_line > 4) + drm_warn_once(msm_host->dev, "pkt_per_line too big"); + reg |= DSI_VIDEO_COMPRESSION_MODE_CTRL_PKT_PER_LINE(pkt_per_line >> 1); + reg |= DSI_VIDEO_COMPRESSION_MODE_CTRL_EOL_BYTE_NUM(eol_byte_num); + reg |= DSI_VIDEO_COMPRESSION_MODE_CTRL_EN; + + if (is_cmd_mode) { + reg_ctrl = dsi_read(msm_host, REG_DSI_COMMAND_COMPRESSION_MODE_CTRL); + reg_ctrl2 = dsi_read(msm_host, REG_DSI_COMMAND_COMPRESSION_MODE_CTRL2); + + reg_ctrl &= ~0xffff; + reg_ctrl |= reg; + + reg_ctrl2 &= ~DSI_COMMAND_COMPRESSION_MODE_CTRL2_STREAM0_SLICE_WIDTH__MASK; + reg_ctrl2 |= DSI_COMMAND_COMPRESSION_MODE_CTRL2_STREAM0_SLICE_WIDTH(dsc->slice_chunk_size); + + dsi_write(msm_host, REG_DSI_COMMAND_COMPRESSION_MODE_CTRL, reg_ctrl); + dsi_write(msm_host, REG_DSI_COMMAND_COMPRESSION_MODE_CTRL2, reg_ctrl2); + } else { + reg |= DSI_VIDEO_COMPRESSION_MODE_CTRL_WC(bytes_per_pkt); + dsi_write(msm_host, REG_DSI_VIDEO_COMPRESSION_MODE_CTRL, reg); + } +} + +static void dsi_timing_setup(struct msm_dsi_host *msm_host, bool is_bonded_dsi) +{ + struct drm_display_mode *mode = msm_host->mode; + u32 hs_start = 0, vs_start = 0; /* take sync start as 0 */ + u32 h_total = mode->htotal; + u32 v_total = mode->vtotal; + u32 hs_end = mode->hsync_end - mode->hsync_start; + u32 vs_end = mode->vsync_end - mode->vsync_start; + u32 ha_start = h_total - mode->hsync_start; + u32 ha_end = ha_start + mode->hdisplay; + u32 va_start = v_total - mode->vsync_start; + u32 va_end = va_start + mode->vdisplay; + u32 hdisplay = mode->hdisplay; + u32 wc; + int ret; + bool wide_bus_enabled = msm_dsi_host_is_wide_bus_enabled(&msm_host->base); + + DBG(""); + + /* + * For bonded DSI mode, the current DRM mode has + * the complete width of the panel. Since, the complete + * panel is driven by two DSI controllers, the horizontal + * timings have to be split between the two dsi controllers. + * Adjust the DSI host timing values accordingly. + */ + if (is_bonded_dsi) { + h_total /= 2; + hs_end /= 2; + ha_start /= 2; + ha_end /= 2; + hdisplay /= 2; + } + + if (msm_host->dsc) { + struct drm_dsc_config *dsc = msm_host->dsc; + u32 bytes_per_pclk; + + /* update dsc params with timing params */ + if (!dsc || !mode->hdisplay || !mode->vdisplay) { + pr_err("DSI: invalid input: pic_width: %d pic_height: %d\n", + mode->hdisplay, mode->vdisplay); + return; + } + + dsc->pic_width = mode->hdisplay; + dsc->pic_height = mode->vdisplay; + DBG("Mode %dx%d\n", dsc->pic_width, dsc->pic_height); + + /* we do the calculations for dsc parameters here so that + * panel can use these parameters + */ + ret = dsi_populate_dsc_params(msm_host, dsc); + if (ret) + return; + + /* + * DPU sends 3 bytes per pclk cycle to DSI. If widebus is + * enabled, bus width is extended to 6 bytes. + * + * Calculate the number of pclks needed to transmit one line of + * the compressed data. + + * The back/font porch and pulse width are kept intact. For + * VIDEO mode they represent timing parameters rather than + * actual data transfer, see the documentation for + * dsi_adjust_pclk_for_compression(). For CMD mode they are + * unused anyway. + */ + h_total -= hdisplay; + if (wide_bus_enabled && !(msm_host->mode_flags & MIPI_DSI_MODE_VIDEO)) + bytes_per_pclk = 6; + else + bytes_per_pclk = 3; + + hdisplay = DIV_ROUND_UP(msm_dsc_get_bytes_per_line(msm_host->dsc), bytes_per_pclk); + + h_total += hdisplay; + ha_end = ha_start + hdisplay; + } + + if (msm_host->mode_flags & MIPI_DSI_MODE_VIDEO) { + if (msm_host->dsc) + dsi_update_dsc_timing(msm_host, false); + + dsi_write(msm_host, REG_DSI_ACTIVE_H, + DSI_ACTIVE_H_START(ha_start) | + DSI_ACTIVE_H_END(ha_end)); + dsi_write(msm_host, REG_DSI_ACTIVE_V, + DSI_ACTIVE_V_START(va_start) | + DSI_ACTIVE_V_END(va_end)); + dsi_write(msm_host, REG_DSI_TOTAL, + DSI_TOTAL_H_TOTAL(h_total - 1) | + DSI_TOTAL_V_TOTAL(v_total - 1)); + + dsi_write(msm_host, REG_DSI_ACTIVE_HSYNC, + DSI_ACTIVE_HSYNC_START(hs_start) | + DSI_ACTIVE_HSYNC_END(hs_end)); + dsi_write(msm_host, REG_DSI_ACTIVE_VSYNC_HPOS, 0); + dsi_write(msm_host, REG_DSI_ACTIVE_VSYNC_VPOS, + DSI_ACTIVE_VSYNC_VPOS_START(vs_start) | + DSI_ACTIVE_VSYNC_VPOS_END(vs_end)); + } else { /* command mode */ + if (msm_host->dsc) + dsi_update_dsc_timing(msm_host, true); + + /* image data and 1 byte write_memory_start cmd */ + if (!msm_host->dsc) + wc = hdisplay * mipi_dsi_pixel_format_to_bpp(msm_host->format) / 8 + 1; + else + /* + * When DSC is enabled, WC = slice_chunk_size * slice_per_pkt + 1. + * Currently, the driver only supports default value of slice_per_pkt = 1 + * + * TODO: Expand mipi_dsi_device struct to hold slice_per_pkt info + * and adjust DSC math to account for slice_per_pkt. + */ + wc = msm_host->dsc->slice_chunk_size + 1; + + dsi_write(msm_host, REG_DSI_CMD_MDP_STREAM0_CTRL, + DSI_CMD_MDP_STREAM0_CTRL_WORD_COUNT(wc) | + DSI_CMD_MDP_STREAM0_CTRL_VIRTUAL_CHANNEL( + msm_host->channel) | + DSI_CMD_MDP_STREAM0_CTRL_DATA_TYPE( + MIPI_DSI_DCS_LONG_WRITE)); + + dsi_write(msm_host, REG_DSI_CMD_MDP_STREAM0_TOTAL, + DSI_CMD_MDP_STREAM0_TOTAL_H_TOTAL(hdisplay) | + DSI_CMD_MDP_STREAM0_TOTAL_V_TOTAL(mode->vdisplay)); + } +} + +static void dsi_sw_reset(struct msm_dsi_host *msm_host) +{ + u32 ctrl; + + ctrl = dsi_read(msm_host, REG_DSI_CTRL); + + if (ctrl & DSI_CTRL_ENABLE) { + dsi_write(msm_host, REG_DSI_CTRL, ctrl & ~DSI_CTRL_ENABLE); + /* + * dsi controller need to be disabled before + * clocks turned on + */ + wmb(); + } + + dsi_write(msm_host, REG_DSI_CLK_CTRL, DSI_CLK_CTRL_ENABLE_CLKS); + wmb(); /* clocks need to be enabled before reset */ + + /* dsi controller can only be reset while clocks are running */ + dsi_write(msm_host, REG_DSI_RESET, 1); + msleep(DSI_RESET_TOGGLE_DELAY_MS); /* make sure reset happen */ + dsi_write(msm_host, REG_DSI_RESET, 0); + wmb(); /* controller out of reset */ + + if (ctrl & DSI_CTRL_ENABLE) { + dsi_write(msm_host, REG_DSI_CTRL, ctrl); + wmb(); /* make sure dsi controller enabled again */ + } +} + +static void dsi_op_mode_config(struct msm_dsi_host *msm_host, + bool video_mode, bool enable) +{ + u32 dsi_ctrl; + + dsi_ctrl = dsi_read(msm_host, REG_DSI_CTRL); + + if (!enable) { + dsi_ctrl &= ~(DSI_CTRL_ENABLE | DSI_CTRL_VID_MODE_EN | + DSI_CTRL_CMD_MODE_EN); + dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_CMD_MDP_DONE | + DSI_IRQ_MASK_VIDEO_DONE, 0); + } else { + if (video_mode) { + dsi_ctrl |= DSI_CTRL_VID_MODE_EN; + } else { /* command mode */ + dsi_ctrl |= DSI_CTRL_CMD_MODE_EN; + dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_CMD_MDP_DONE, 1); + } + dsi_ctrl |= DSI_CTRL_ENABLE; + } + + dsi_write(msm_host, REG_DSI_CTRL, dsi_ctrl); +} + +static void dsi_set_tx_power_mode(int mode, struct msm_dsi_host *msm_host) +{ + u32 data; + + data = dsi_read(msm_host, REG_DSI_CMD_DMA_CTRL); + + if (mode == 0) + data &= ~DSI_CMD_DMA_CTRL_LOW_POWER; + else + data |= DSI_CMD_DMA_CTRL_LOW_POWER; + + dsi_write(msm_host, REG_DSI_CMD_DMA_CTRL, data); +} + +static void dsi_wait4video_done(struct msm_dsi_host *msm_host) +{ + u32 ret = 0; + struct device *dev = &msm_host->pdev->dev; + + dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_VIDEO_DONE, 1); + + reinit_completion(&msm_host->video_comp); + + ret = wait_for_completion_timeout(&msm_host->video_comp, + msecs_to_jiffies(70)); + + if (ret == 0) + DRM_DEV_ERROR(dev, "wait for video done timed out\n"); + + dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_VIDEO_DONE, 0); +} + +static void dsi_wait4video_eng_busy(struct msm_dsi_host *msm_host) +{ + u32 data; + + if (!(msm_host->mode_flags & MIPI_DSI_MODE_VIDEO)) + return; + + data = dsi_read(msm_host, REG_DSI_STATUS0); + + /* if video mode engine is not busy, its because + * either timing engine was not turned on or the + * DSI controller has finished transmitting the video + * data already, so no need to wait in those cases + */ + if (!(data & DSI_STATUS0_VIDEO_MODE_ENGINE_BUSY)) + return; + + if (msm_host->power_on && msm_host->enabled) { + dsi_wait4video_done(msm_host); + /* delay 4 ms to skip BLLP */ + usleep_range(2000, 4000); + } +} + +int dsi_tx_buf_alloc_6g(struct msm_dsi_host *msm_host, int size) +{ + struct drm_device *dev = msm_host->dev; + struct msm_drm_private *priv = dev->dev_private; + uint64_t iova; + u8 *data; + + msm_host->vm = drm_gpuvm_get(priv->kms->vm); + + data = msm_gem_kernel_new(dev, size, MSM_BO_WC, + msm_host->vm, + &msm_host->tx_gem_obj, &iova); + + if (IS_ERR(data)) { + msm_host->tx_gem_obj = NULL; + return PTR_ERR(data); + } + + msm_gem_object_set_name(msm_host->tx_gem_obj, "tx_gem"); + + msm_host->tx_size = msm_host->tx_gem_obj->size; + + return 0; +} + +int dsi_tx_buf_alloc_v2(struct msm_dsi_host *msm_host, int size) +{ + struct drm_device *dev = msm_host->dev; + + msm_host->tx_buf = dma_alloc_coherent(dev->dev, size, + &msm_host->tx_buf_paddr, GFP_KERNEL); + if (!msm_host->tx_buf) + return -ENOMEM; + + msm_host->tx_size = size; + + return 0; +} + +void msm_dsi_tx_buf_free(struct mipi_dsi_host *host) +{ + struct msm_dsi_host *msm_host = to_msm_dsi_host(host); + struct drm_device *dev = msm_host->dev; + + /* + * This is possible if we're tearing down before we've had a chance to + * fully initialize. A very real possibility if our probe is deferred, + * in which case we'll hit msm_dsi_host_destroy() without having run + * through the dsi_tx_buf_alloc(). + */ + if (!dev) + return; + + if (msm_host->tx_gem_obj) { + msm_gem_kernel_put(msm_host->tx_gem_obj, msm_host->vm); + drm_gpuvm_put(msm_host->vm); + msm_host->tx_gem_obj = NULL; + msm_host->vm = NULL; + } + + if (msm_host->tx_buf) + dma_free_coherent(dev->dev, msm_host->tx_size, msm_host->tx_buf, + msm_host->tx_buf_paddr); +} + +void *dsi_tx_buf_get_6g(struct msm_dsi_host *msm_host) +{ + return msm_gem_get_vaddr(msm_host->tx_gem_obj); +} + +void *dsi_tx_buf_get_v2(struct msm_dsi_host *msm_host) +{ + return msm_host->tx_buf; +} + +void dsi_tx_buf_put_6g(struct msm_dsi_host *msm_host) +{ + msm_gem_put_vaddr(msm_host->tx_gem_obj); +} + +/* + * prepare cmd buffer to be txed + */ +static int dsi_cmd_dma_add(struct msm_dsi_host *msm_host, + const struct mipi_dsi_msg *msg) +{ + const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd; + struct mipi_dsi_packet packet; + int len; + int ret; + u8 *data; + + ret = mipi_dsi_create_packet(&packet, msg); + if (ret) { + pr_err("%s: create packet failed, %d\n", __func__, ret); + return ret; + } + len = (packet.size + 3) & (~0x3); + + if (len > msm_host->tx_size) { + pr_err("%s: packet size is too big\n", __func__); + return -EINVAL; + } + + data = cfg_hnd->ops->tx_buf_get(msm_host); + if (IS_ERR(data)) { + ret = PTR_ERR(data); + pr_err("%s: get vaddr failed, %d\n", __func__, ret); + return ret; + } + + /* MSM specific command format in memory */ + data[0] = packet.header[1]; + data[1] = packet.header[2]; + data[2] = packet.header[0]; + data[3] = BIT(7); /* Last packet */ + if (mipi_dsi_packet_format_is_long(msg->type)) + data[3] |= BIT(6); + if (msg->rx_buf && msg->rx_len) + data[3] |= BIT(5); + + /* Long packet */ + if (packet.payload && packet.payload_length) + memcpy(data + 4, packet.payload, packet.payload_length); + + /* Append 0xff to the end */ + if (packet.size < len) + memset(data + packet.size, 0xff, len - packet.size); + + if (cfg_hnd->ops->tx_buf_put) + cfg_hnd->ops->tx_buf_put(msm_host); + + return len; +} + +/* + * dsi_short_read1_resp: 1 parameter + */ +static int dsi_short_read1_resp(u8 *buf, const struct mipi_dsi_msg *msg) +{ + u8 *data = msg->rx_buf; + + if (data && (msg->rx_len >= 1)) { + *data = buf[1]; /* strip out dcs type */ + return 1; + } + + pr_err("%s: read data does not match with rx_buf len %zu\n", + __func__, msg->rx_len); + return -EINVAL; +} + +/* + * dsi_short_read2_resp: 2 parameter + */ +static int dsi_short_read2_resp(u8 *buf, const struct mipi_dsi_msg *msg) +{ + u8 *data = msg->rx_buf; + + if (data && (msg->rx_len >= 2)) { + data[0] = buf[1]; /* strip out dcs type */ + data[1] = buf[2]; + return 2; + } + + pr_err("%s: read data does not match with rx_buf len %zu\n", + __func__, msg->rx_len); + return -EINVAL; +} + +static int dsi_long_read_resp(u8 *buf, const struct mipi_dsi_msg *msg) +{ + /* strip out 4 byte dcs header */ + if (msg->rx_buf && msg->rx_len) + memcpy(msg->rx_buf, buf + 4, msg->rx_len); + + return msg->rx_len; +} + +int dsi_dma_base_get_6g(struct msm_dsi_host *msm_host, uint64_t *dma_base) +{ + struct drm_device *dev = msm_host->dev; + struct msm_drm_private *priv = dev->dev_private; + + if (!dma_base) + return -EINVAL; + + return msm_gem_get_and_pin_iova(msm_host->tx_gem_obj, + priv->kms->vm, dma_base); +} + +int dsi_dma_base_get_v2(struct msm_dsi_host *msm_host, uint64_t *dma_base) +{ + if (!dma_base) + return -EINVAL; + + *dma_base = msm_host->tx_buf_paddr; + return 0; +} + +static int dsi_cmd_dma_tx(struct msm_dsi_host *msm_host, int len) +{ + const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd; + int ret; + uint64_t dma_base; + bool triggered; + + ret = cfg_hnd->ops->dma_base_get(msm_host, &dma_base); + if (ret) { + pr_err("%s: failed to get iova: %d\n", __func__, ret); + return ret; + } + + reinit_completion(&msm_host->dma_comp); + + dsi_wait4video_eng_busy(msm_host); + + triggered = msm_dsi_manager_cmd_xfer_trigger( + msm_host->id, dma_base, len); + if (triggered) { + ret = wait_for_completion_timeout(&msm_host->dma_comp, + msecs_to_jiffies(200)); + DBG("ret=%d", ret); + if (ret == 0) + ret = -ETIMEDOUT; + else + ret = len; + } else { + ret = len; + } + + return ret; +} + +static int dsi_cmd_dma_rx(struct msm_dsi_host *msm_host, + u8 *buf, int rx_byte, int pkt_size) +{ + u32 *temp, data; + int i, j = 0, cnt; + u32 read_cnt; + u8 reg[16]; + int repeated_bytes = 0; + int buf_offset = buf - msm_host->rx_buf; + + temp = (u32 *)reg; + cnt = (rx_byte + 3) >> 2; + if (cnt > 4) + cnt = 4; /* 4 x 32 bits registers only */ + + if (rx_byte == 4) + read_cnt = 4; + else + read_cnt = pkt_size + 6; + + /* + * In case of multiple reads from the panel, after the first read, there + * is possibility that there are some bytes in the payload repeating in + * the RDBK_DATA registers. Since we read all the parameters from the + * panel right from the first byte for every pass. We need to skip the + * repeating bytes and then append the new parameters to the rx buffer. + */ + if (read_cnt > 16) { + int bytes_shifted; + /* Any data more than 16 bytes will be shifted out. + * The temp read buffer should already contain these bytes. + * The remaining bytes in read buffer are the repeated bytes. + */ + bytes_shifted = read_cnt - 16; + repeated_bytes = buf_offset - bytes_shifted; + } + + for (i = cnt - 1; i >= 0; i--) { + data = dsi_read(msm_host, REG_DSI_RDBK_DATA(i)); + *temp++ = ntohl(data); /* to host byte order */ + DBG("data = 0x%x and ntohl(data) = 0x%x", data, ntohl(data)); + } + + for (i = repeated_bytes; i < 16; i++) + buf[j++] = reg[i]; + + return j; +} + +static int dsi_cmds2buf_tx(struct msm_dsi_host *msm_host, + const struct mipi_dsi_msg *msg) +{ + int len, ret; + int bllp_len = msm_host->mode->hdisplay * + mipi_dsi_pixel_format_to_bpp(msm_host->format) / 8; + + len = dsi_cmd_dma_add(msm_host, msg); + if (len < 0) { + pr_err("%s: failed to add cmd type = 0x%x\n", + __func__, msg->type); + return len; + } + + /* + * for video mode, do not send cmds more than + * one pixel line, since it only transmit it + * during BLLP. + * + * TODO: if the command is sent in LP mode, the bit rate is only + * half of esc clk rate. In this case, if the video is already + * actively streaming, we need to check more carefully if the + * command can be fit into one BLLP. + */ + if ((msm_host->mode_flags & MIPI_DSI_MODE_VIDEO) && (len > bllp_len)) { + pr_err("%s: cmd cannot fit into BLLP period, len=%d\n", + __func__, len); + return -EINVAL; + } + + ret = dsi_cmd_dma_tx(msm_host, len); + if (ret < 0) { + pr_err("%s: cmd dma tx failed, type=0x%x, data0=0x%x, len=%d, ret=%d\n", + __func__, msg->type, (*(u8 *)(msg->tx_buf)), len, ret); + return ret; + } else if (ret < len) { + pr_err("%s: cmd dma tx failed, type=0x%x, data0=0x%x, ret=%d len=%d\n", + __func__, msg->type, (*(u8 *)(msg->tx_buf)), ret, len); + return -EIO; + } + + return len; +} + +static void dsi_err_worker(struct work_struct *work) +{ + struct msm_dsi_host *msm_host = + container_of(work, struct msm_dsi_host, err_work); + u32 status = msm_host->err_work_state; + + pr_err_ratelimited("%s: status=%x\n", __func__, status); + if (status & DSI_ERR_STATE_MDP_FIFO_UNDERFLOW) + dsi_sw_reset(msm_host); + + /* It is safe to clear here because error irq is disabled. */ + msm_host->err_work_state = 0; + + /* enable dsi error interrupt */ + dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_ERROR, 1); +} + +static void dsi_ack_err_status(struct msm_dsi_host *msm_host) +{ + u32 status; + + status = dsi_read(msm_host, REG_DSI_ACK_ERR_STATUS); + + if (status) { + dsi_write(msm_host, REG_DSI_ACK_ERR_STATUS, status); + /* Writing of an extra 0 needed to clear error bits */ + dsi_write(msm_host, REG_DSI_ACK_ERR_STATUS, 0); + msm_host->err_work_state |= DSI_ERR_STATE_ACK; + } +} + +static void dsi_timeout_status(struct msm_dsi_host *msm_host) +{ + u32 status; + + status = dsi_read(msm_host, REG_DSI_TIMEOUT_STATUS); + + if (status) { + dsi_write(msm_host, REG_DSI_TIMEOUT_STATUS, status); + msm_host->err_work_state |= DSI_ERR_STATE_TIMEOUT; + } +} + +static void dsi_dln0_phy_err(struct msm_dsi_host *msm_host) +{ + u32 status; + + status = dsi_read(msm_host, REG_DSI_DLN0_PHY_ERR); + + if (status & (DSI_DLN0_PHY_ERR_DLN0_ERR_ESC | + DSI_DLN0_PHY_ERR_DLN0_ERR_SYNC_ESC | + DSI_DLN0_PHY_ERR_DLN0_ERR_CONTROL | + DSI_DLN0_PHY_ERR_DLN0_ERR_CONTENTION_LP0 | + DSI_DLN0_PHY_ERR_DLN0_ERR_CONTENTION_LP1)) { + dsi_write(msm_host, REG_DSI_DLN0_PHY_ERR, status); + msm_host->err_work_state |= DSI_ERR_STATE_DLN0_PHY; + } +} + +static void dsi_fifo_status(struct msm_dsi_host *msm_host) +{ + u32 status; + + status = dsi_read(msm_host, REG_DSI_FIFO_STATUS); + + /* fifo underflow, overflow */ + if (status) { + dsi_write(msm_host, REG_DSI_FIFO_STATUS, status); + msm_host->err_work_state |= DSI_ERR_STATE_FIFO; + if (status & DSI_FIFO_STATUS_CMD_MDP_FIFO_UNDERFLOW) + msm_host->err_work_state |= + DSI_ERR_STATE_MDP_FIFO_UNDERFLOW; + } +} + +static void dsi_status(struct msm_dsi_host *msm_host) +{ + u32 status; + + status = dsi_read(msm_host, REG_DSI_STATUS0); + + if (status & DSI_STATUS0_INTERLEAVE_OP_CONTENTION) { + dsi_write(msm_host, REG_DSI_STATUS0, status); + msm_host->err_work_state |= + DSI_ERR_STATE_INTERLEAVE_OP_CONTENTION; + } +} + +static void dsi_clk_status(struct msm_dsi_host *msm_host) +{ + u32 status; + + status = dsi_read(msm_host, REG_DSI_CLK_STATUS); + + if (status & DSI_CLK_STATUS_PLL_UNLOCKED) { + dsi_write(msm_host, REG_DSI_CLK_STATUS, status); + msm_host->err_work_state |= DSI_ERR_STATE_PLL_UNLOCKED; + } +} + +static void dsi_error(struct msm_dsi_host *msm_host) +{ + /* disable dsi error interrupt */ + dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_ERROR, 0); + + dsi_clk_status(msm_host); + dsi_fifo_status(msm_host); + dsi_ack_err_status(msm_host); + dsi_timeout_status(msm_host); + dsi_status(msm_host); + dsi_dln0_phy_err(msm_host); + + queue_work(msm_host->workqueue, &msm_host->err_work); +} + +static irqreturn_t dsi_host_irq(int irq, void *ptr) +{ + struct msm_dsi_host *msm_host = ptr; + u32 isr; + unsigned long flags; + + if (!msm_host->ctrl_base) + return IRQ_HANDLED; + + spin_lock_irqsave(&msm_host->intr_lock, flags); + isr = dsi_read(msm_host, REG_DSI_INTR_CTRL); + dsi_write(msm_host, REG_DSI_INTR_CTRL, isr); + spin_unlock_irqrestore(&msm_host->intr_lock, flags); + + DBG("isr=0x%x, id=%d", isr, msm_host->id); + + if (isr & DSI_IRQ_ERROR) + dsi_error(msm_host); + + if (isr & DSI_IRQ_VIDEO_DONE) + complete(&msm_host->video_comp); + + if (isr & DSI_IRQ_CMD_DMA_DONE) + complete(&msm_host->dma_comp); + + return IRQ_HANDLED; +} + +static int dsi_host_attach(struct mipi_dsi_host *host, + struct mipi_dsi_device *dsi) +{ + struct msm_dsi_host *msm_host = to_msm_dsi_host(host); + int ret; + + if (dsi->lanes > msm_host->num_data_lanes) + return -EINVAL; + + msm_host->channel = dsi->channel; + msm_host->lanes = dsi->lanes; + msm_host->format = dsi->format; + msm_host->mode_flags = dsi->mode_flags; + if (dsi->dsc) + msm_host->dsc = dsi->dsc; + + ret = dsi_dev_attach(msm_host->pdev); + if (ret) + return ret; + + DBG("id=%d", msm_host->id); + + return 0; +} + +static int dsi_host_detach(struct mipi_dsi_host *host, + struct mipi_dsi_device *dsi) +{ + struct msm_dsi_host *msm_host = to_msm_dsi_host(host); + + dsi_dev_detach(msm_host->pdev); + + DBG("id=%d", msm_host->id); + + return 0; +} + +static ssize_t dsi_host_transfer(struct mipi_dsi_host *host, + const struct mipi_dsi_msg *msg) +{ + struct msm_dsi_host *msm_host = to_msm_dsi_host(host); + int ret; + + if (!msg || !msm_host->power_on) + return -EINVAL; + + mutex_lock(&msm_host->cmd_mutex); + ret = msm_dsi_manager_cmd_xfer(msm_host->id, msg); + mutex_unlock(&msm_host->cmd_mutex); + + return ret; +} + +static const struct mipi_dsi_host_ops dsi_host_ops = { + .attach = dsi_host_attach, + .detach = dsi_host_detach, + .transfer = dsi_host_transfer, +}; + +/* + * List of supported physical to logical lane mappings. + * For example, the 2nd entry represents the following mapping: + * + * "3012": Logic 3->Phys 0; Logic 0->Phys 1; Logic 1->Phys 2; Logic 2->Phys 3; + */ +static const int supported_data_lane_swaps[][4] = { + { 0, 1, 2, 3 }, + { 3, 0, 1, 2 }, + { 2, 3, 0, 1 }, + { 1, 2, 3, 0 }, + { 0, 3, 2, 1 }, + { 1, 0, 3, 2 }, + { 2, 1, 0, 3 }, + { 3, 2, 1, 0 }, +}; + +static int dsi_host_parse_lane_data(struct msm_dsi_host *msm_host, + struct device_node *ep) +{ + struct device *dev = &msm_host->pdev->dev; + struct property *prop; + u32 lane_map[4]; + int ret, i, len, num_lanes; + + prop = of_find_property(ep, "data-lanes", &len); + if (!prop) { + DRM_DEV_DEBUG(dev, + "failed to find data lane mapping, using default\n"); + /* Set the number of date lanes to 4 by default. */ + msm_host->num_data_lanes = 4; + return 0; + } + + num_lanes = drm_of_get_data_lanes_count(ep, 1, 4); + if (num_lanes < 0) { + DRM_DEV_ERROR(dev, "bad number of data lanes\n"); + return num_lanes; + } + + msm_host->num_data_lanes = num_lanes; + + ret = of_property_read_u32_array(ep, "data-lanes", lane_map, + num_lanes); + if (ret) { + DRM_DEV_ERROR(dev, "failed to read lane data\n"); + return ret; + } + + /* + * compare DT specified physical-logical lane mappings with the ones + * supported by hardware + */ + for (i = 0; i < ARRAY_SIZE(supported_data_lane_swaps); i++) { + const int *swap = supported_data_lane_swaps[i]; + int j; + + /* + * the data-lanes array we get from DT has a logical->physical + * mapping. The "data lane swap" register field represents + * supported configurations in a physical->logical mapping. + * Translate the DT mapping to what we understand and find a + * configuration that works. + */ + for (j = 0; j < num_lanes; j++) { + if (lane_map[j] < 0 || lane_map[j] > 3) + DRM_DEV_ERROR(dev, "bad physical lane entry %u\n", + lane_map[j]); + + if (swap[lane_map[j]] != j) + break; + } + + if (j == num_lanes) { + msm_host->dlane_swap = i; + return 0; + } + } + + return -EINVAL; +} + +static int dsi_populate_dsc_params(struct msm_dsi_host *msm_host, struct drm_dsc_config *dsc) +{ + int ret; + + if (dsc->bits_per_pixel & 0xf) { + DRM_DEV_ERROR(&msm_host->pdev->dev, "DSI does not support fractional bits_per_pixel\n"); + return -EINVAL; + } + + switch (dsc->bits_per_component) { + case 8: + case 10: + case 12: + /* + * Only 8, 10, and 12 bpc are supported for DSC 1.1 block. + * If additional bpc values need to be supported, update + * this quard with the appropriate DSC version verification. + */ + break; + default: + DRM_DEV_ERROR(&msm_host->pdev->dev, + "Unsupported bits_per_component value: %d\n", + dsc->bits_per_component); + return -EOPNOTSUPP; + } + + dsc->simple_422 = 0; + dsc->convert_rgb = 1; + dsc->vbr_enable = 0; + + drm_dsc_set_const_params(dsc); + drm_dsc_set_rc_buf_thresh(dsc); + + /* DPU supports only pre-SCR panels */ + ret = drm_dsc_setup_rc_params(dsc, DRM_DSC_1_1_PRE_SCR); + if (ret) { + DRM_DEV_ERROR(&msm_host->pdev->dev, "could not find DSC RC parameters\n"); + return ret; + } + + dsc->initial_scale_value = drm_dsc_initial_scale_value(dsc); + dsc->line_buf_depth = dsc->bits_per_component + 1; + + return drm_dsc_compute_rc_parameters(dsc); +} + +static int dsi_host_parse_dt(struct msm_dsi_host *msm_host) +{ + struct msm_dsi *msm_dsi = platform_get_drvdata(msm_host->pdev); + struct device *dev = &msm_host->pdev->dev; + struct device_node *np = dev->of_node; + struct device_node *endpoint; + const char *te_source; + int ret = 0; + + /* + * Get the endpoint of the output port of the DSI host. In our case, + * this is mapped to port number with reg = 1. Don't return an error if + * the remote endpoint isn't defined. It's possible that there is + * nothing connected to the dsi output. + */ + endpoint = of_graph_get_endpoint_by_regs(np, 1, -1); + if (!endpoint) { + DRM_DEV_DEBUG(dev, "%s: no endpoint\n", __func__); + return 0; + } + + ret = dsi_host_parse_lane_data(msm_host, endpoint); + if (ret) { + DRM_DEV_ERROR(dev, "%s: invalid lane configuration %d\n", + __func__, ret); + ret = -EINVAL; + goto err; + } + + ret = of_property_read_string(endpoint, "qcom,te-source", &te_source); + if (ret && ret != -EINVAL) { + DRM_DEV_ERROR(dev, "%s: invalid TE source configuration %d\n", + __func__, ret); + goto err; + } + if (!ret) { + msm_dsi->te_source = devm_kstrdup(dev, te_source, GFP_KERNEL); + if (!msm_dsi->te_source) { + DRM_DEV_ERROR(dev, "%s: failed to allocate te_source\n", + __func__); + ret = -ENOMEM; + goto err; + } + } + ret = 0; + + if (of_property_present(np, "syscon-sfpb")) { + msm_host->sfpb = syscon_regmap_lookup_by_phandle(np, + "syscon-sfpb"); + if (IS_ERR(msm_host->sfpb)) { + DRM_DEV_ERROR(dev, "%s: failed to get sfpb regmap\n", + __func__); + ret = PTR_ERR(msm_host->sfpb); + } + } + +err: + of_node_put(endpoint); + + return ret; +} + +static int dsi_host_get_id(struct msm_dsi_host *msm_host) +{ + struct platform_device *pdev = msm_host->pdev; + const struct msm_dsi_config *cfg = msm_host->cfg_hnd->cfg; + struct resource *res; + int i, j; + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dsi_ctrl"); + if (!res) + return -EINVAL; + + for (i = 0; i < VARIANTS_MAX; i++) + for (j = 0; j < DSI_MAX; j++) + if (cfg->io_start[i][j] == res->start) + return j; + + return -EINVAL; +} + +int msm_dsi_host_init(struct msm_dsi *msm_dsi) +{ + struct msm_dsi_host *msm_host = NULL; + struct platform_device *pdev = msm_dsi->pdev; + const struct msm_dsi_config *cfg; + int ret; + + msm_host = devm_kzalloc(&pdev->dev, sizeof(*msm_host), GFP_KERNEL); + if (!msm_host) + return -ENOMEM; + + msm_host->pdev = pdev; + msm_dsi->host = &msm_host->base; + + ret = dsi_host_parse_dt(msm_host); + if (ret) + return dev_err_probe(&pdev->dev, ret, "%s: failed to parse dt\n", + __func__); + + msm_host->ctrl_base = msm_ioremap_size(pdev, "dsi_ctrl", &msm_host->ctrl_size); + if (IS_ERR(msm_host->ctrl_base)) + return dev_err_probe(&pdev->dev, PTR_ERR(msm_host->ctrl_base), + "%s: unable to map Dsi ctrl base\n", __func__); + + pm_runtime_enable(&pdev->dev); + + msm_host->cfg_hnd = dsi_get_config(msm_host); + if (!msm_host->cfg_hnd) + return dev_err_probe(&pdev->dev, -EINVAL, + "%s: get config failed\n", __func__); + cfg = msm_host->cfg_hnd->cfg; + + msm_host->id = dsi_host_get_id(msm_host); + if (msm_host->id < 0) + return dev_err_probe(&pdev->dev, msm_host->id, + "%s: unable to identify DSI host index\n", + __func__); + + /* fixup base address by io offset */ + msm_host->ctrl_base += cfg->io_offset; + + ret = devm_regulator_bulk_get_const(&pdev->dev, cfg->num_regulators, + cfg->regulator_data, + &msm_host->supplies); + if (ret) + return ret; + + ret = dsi_clk_init(msm_host); + if (ret) + return dev_err_probe(&pdev->dev, ret, "%s: unable to initialize dsi clks\n", __func__); + + msm_host->rx_buf = devm_kzalloc(&pdev->dev, SZ_4K, GFP_KERNEL); + if (!msm_host->rx_buf) + return -ENOMEM; + + ret = devm_pm_opp_set_clkname(&pdev->dev, "byte"); + if (ret) + return ret; + /* OPP table is optional */ + ret = devm_pm_opp_of_add_table(&pdev->dev); + if (ret && ret != -ENODEV) + return dev_err_probe(&pdev->dev, ret, "invalid OPP table in device tree\n"); + + msm_host->irq = irq_of_parse_and_map(pdev->dev.of_node, 0); + if (!msm_host->irq) + return dev_err_probe(&pdev->dev, -EINVAL, "failed to get irq\n"); + + /* do not autoenable, will be enabled later */ + ret = devm_request_irq(&pdev->dev, msm_host->irq, dsi_host_irq, + IRQF_TRIGGER_HIGH | IRQF_NO_AUTOEN, + "dsi_isr", msm_host); + if (ret < 0) + return dev_err_probe(&pdev->dev, ret, "failed to request IRQ%u\n", + msm_host->irq); + + init_completion(&msm_host->dma_comp); + init_completion(&msm_host->video_comp); + mutex_init(&msm_host->dev_mutex); + mutex_init(&msm_host->cmd_mutex); + spin_lock_init(&msm_host->intr_lock); + + /* setup workqueue */ + msm_host->workqueue = alloc_ordered_workqueue("dsi_drm_work", 0); + if (!msm_host->workqueue) + return -ENOMEM; + + INIT_WORK(&msm_host->err_work, dsi_err_worker); + + msm_dsi->id = msm_host->id; + + DBG("Dsi Host %d initialized", msm_host->id); + return 0; +} + +void msm_dsi_host_destroy(struct mipi_dsi_host *host) +{ + struct msm_dsi_host *msm_host = to_msm_dsi_host(host); + + DBG(""); + if (msm_host->workqueue) { + destroy_workqueue(msm_host->workqueue); + msm_host->workqueue = NULL; + } + + mutex_destroy(&msm_host->cmd_mutex); + mutex_destroy(&msm_host->dev_mutex); + + pm_runtime_disable(&msm_host->pdev->dev); +} + +int msm_dsi_host_modeset_init(struct mipi_dsi_host *host, + struct drm_device *dev) +{ + struct msm_dsi_host *msm_host = to_msm_dsi_host(host); + const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd; + int ret; + + msm_host->dev = dev; + + ret = cfg_hnd->ops->tx_buf_alloc(msm_host, SZ_4K); + if (ret) { + pr_err("%s: alloc tx gem obj failed, %d\n", __func__, ret); + return ret; + } + + return 0; +} + +int msm_dsi_host_register(struct mipi_dsi_host *host) +{ + struct msm_dsi_host *msm_host = to_msm_dsi_host(host); + int ret; + + /* Register mipi dsi host */ + if (!msm_host->registered) { + host->dev = &msm_host->pdev->dev; + host->ops = &dsi_host_ops; + ret = mipi_dsi_host_register(host); + if (ret) + return ret; + + msm_host->registered = true; + } + + return 0; +} + +void msm_dsi_host_unregister(struct mipi_dsi_host *host) +{ + struct msm_dsi_host *msm_host = to_msm_dsi_host(host); + + if (msm_host->registered) { + mipi_dsi_host_unregister(host); + host->dev = NULL; + host->ops = NULL; + msm_host->registered = false; + } +} + +int msm_dsi_host_xfer_prepare(struct mipi_dsi_host *host, + const struct mipi_dsi_msg *msg) +{ + struct msm_dsi_host *msm_host = to_msm_dsi_host(host); + const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd; + + /* TODO: make sure dsi_cmd_mdp is idle. + * Since DSI6G v1.2.0, we can set DSI_TRIG_CTRL.BLOCK_DMA_WITHIN_FRAME + * to ask H/W to wait until cmd mdp is idle. S/W wait is not needed. + * How to handle the old versions? Wait for mdp cmd done? + */ + + /* + * mdss interrupt is generated in mdp core clock domain + * mdp clock need to be enabled to receive dsi interrupt + */ + pm_runtime_get_sync(&msm_host->pdev->dev); + cfg_hnd->ops->link_clk_set_rate(msm_host); + cfg_hnd->ops->link_clk_enable(msm_host); + + /* TODO: vote for bus bandwidth */ + + if (!(msg->flags & MIPI_DSI_MSG_USE_LPM)) + dsi_set_tx_power_mode(0, msm_host); + + msm_host->dma_cmd_ctrl_restore = dsi_read(msm_host, REG_DSI_CTRL); + dsi_write(msm_host, REG_DSI_CTRL, + msm_host->dma_cmd_ctrl_restore | + DSI_CTRL_CMD_MODE_EN | + DSI_CTRL_ENABLE); + dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_CMD_DMA_DONE, 1); + + return 0; +} + +void msm_dsi_host_xfer_restore(struct mipi_dsi_host *host, + const struct mipi_dsi_msg *msg) +{ + struct msm_dsi_host *msm_host = to_msm_dsi_host(host); + const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd; + + dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_CMD_DMA_DONE, 0); + dsi_write(msm_host, REG_DSI_CTRL, msm_host->dma_cmd_ctrl_restore); + + if (!(msg->flags & MIPI_DSI_MSG_USE_LPM)) + dsi_set_tx_power_mode(1, msm_host); + + /* TODO: unvote for bus bandwidth */ + + cfg_hnd->ops->link_clk_disable(msm_host); + pm_runtime_put(&msm_host->pdev->dev); +} + +int msm_dsi_host_cmd_tx(struct mipi_dsi_host *host, + const struct mipi_dsi_msg *msg) +{ + struct msm_dsi_host *msm_host = to_msm_dsi_host(host); + + return dsi_cmds2buf_tx(msm_host, msg); +} + +int msm_dsi_host_cmd_rx(struct mipi_dsi_host *host, + const struct mipi_dsi_msg *msg) +{ + struct msm_dsi_host *msm_host = to_msm_dsi_host(host); + const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd; + int data_byte, rx_byte, dlen, end; + int short_response, diff, pkt_size, ret = 0; + char cmd; + int rlen = msg->rx_len; + u8 *buf; + + if (rlen <= 2) { + short_response = 1; + pkt_size = rlen; + rx_byte = 4; + } else { + short_response = 0; + data_byte = 10; /* first read */ + if (rlen < data_byte) + pkt_size = rlen; + else + pkt_size = data_byte; + rx_byte = data_byte + 6; /* 4 header + 2 crc */ + } + + buf = msm_host->rx_buf; + end = 0; + while (!end) { + u8 tx[2] = {pkt_size & 0xff, pkt_size >> 8}; + struct mipi_dsi_msg max_pkt_size_msg = { + .channel = msg->channel, + .type = MIPI_DSI_SET_MAXIMUM_RETURN_PACKET_SIZE, + .tx_len = 2, + .tx_buf = tx, + }; + + DBG("rlen=%d pkt_size=%d rx_byte=%d", + rlen, pkt_size, rx_byte); + + ret = dsi_cmds2buf_tx(msm_host, &max_pkt_size_msg); + if (ret < 2) { + pr_err("%s: Set max pkt size failed, %d\n", + __func__, ret); + return -EINVAL; + } + + if ((cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) && + (cfg_hnd->minor >= MSM_DSI_6G_VER_MINOR_V1_1)) { + /* Clear the RDBK_DATA registers */ + dsi_write(msm_host, REG_DSI_RDBK_DATA_CTRL, + DSI_RDBK_DATA_CTRL_CLR); + wmb(); /* make sure the RDBK registers are cleared */ + dsi_write(msm_host, REG_DSI_RDBK_DATA_CTRL, 0); + wmb(); /* release cleared status before transfer */ + } + + ret = dsi_cmds2buf_tx(msm_host, msg); + if (ret < 0) { + pr_err("%s: Read cmd Tx failed, %d\n", __func__, ret); + return ret; + } else if (ret < msg->tx_len) { + pr_err("%s: Read cmd Tx failed, too short: %d\n", __func__, ret); + return -ECOMM; + } + + /* + * once cmd_dma_done interrupt received, + * return data from client is ready and stored + * at RDBK_DATA register already + * since rx fifo is 16 bytes, dcs header is kept at first loop, + * after that dcs header lost during shift into registers + */ + dlen = dsi_cmd_dma_rx(msm_host, buf, rx_byte, pkt_size); + + if (dlen <= 0) + return 0; + + if (short_response) + break; + + if (rlen <= data_byte) { + diff = data_byte - rlen; + end = 1; + } else { + diff = 0; + rlen -= data_byte; + } + + if (!end) { + dlen -= 2; /* 2 crc */ + dlen -= diff; + buf += dlen; /* next start position */ + data_byte = 14; /* NOT first read */ + if (rlen < data_byte) + pkt_size += rlen; + else + pkt_size += data_byte; + DBG("buf=%p dlen=%d diff=%d", buf, dlen, diff); + } + } + + /* + * For single Long read, if the requested rlen < 10, + * we need to shift the start position of rx + * data buffer to skip the bytes which are not + * updated. + */ + if (pkt_size < 10 && !short_response) + buf = msm_host->rx_buf + (10 - rlen); + else + buf = msm_host->rx_buf; + + cmd = buf[0]; + switch (cmd) { + case MIPI_DSI_RX_ACKNOWLEDGE_AND_ERROR_REPORT: + pr_err("%s: rx ACK_ERR_PACLAGE\n", __func__); + ret = 0; + break; + case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_1BYTE: + case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_1BYTE: + ret = dsi_short_read1_resp(buf, msg); + break; + case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_2BYTE: + case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_2BYTE: + ret = dsi_short_read2_resp(buf, msg); + break; + case MIPI_DSI_RX_GENERIC_LONG_READ_RESPONSE: + case MIPI_DSI_RX_DCS_LONG_READ_RESPONSE: + ret = dsi_long_read_resp(buf, msg); + break; + default: + pr_warn("%s:Invalid response cmd\n", __func__); + ret = 0; + } + + return ret; +} + +void msm_dsi_host_cmd_xfer_commit(struct mipi_dsi_host *host, u32 dma_base, + u32 len) +{ + struct msm_dsi_host *msm_host = to_msm_dsi_host(host); + + dsi_write(msm_host, REG_DSI_DMA_BASE, dma_base); + dsi_write(msm_host, REG_DSI_DMA_LEN, len); + dsi_write(msm_host, REG_DSI_TRIG_DMA, 1); + + /* Make sure trigger happens */ + wmb(); +} + +void msm_dsi_host_set_phy_mode(struct mipi_dsi_host *host, + struct msm_dsi_phy *src_phy) +{ + struct msm_dsi_host *msm_host = to_msm_dsi_host(host); + + msm_host->cphy_mode = src_phy->cphy_mode; +} + +void msm_dsi_host_reset_phy(struct mipi_dsi_host *host) +{ + struct msm_dsi_host *msm_host = to_msm_dsi_host(host); + + DBG(""); + dsi_write(msm_host, REG_DSI_PHY_RESET, DSI_PHY_RESET_RESET); + /* Make sure fully reset */ + wmb(); + udelay(1000); + dsi_write(msm_host, REG_DSI_PHY_RESET, 0); + udelay(100); +} + +void msm_dsi_host_get_phy_clk_req(struct mipi_dsi_host *host, + struct msm_dsi_phy_clk_request *clk_req, + bool is_bonded_dsi) +{ + struct msm_dsi_host *msm_host = to_msm_dsi_host(host); + const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd; + int ret; + + ret = cfg_hnd->ops->calc_clk_rate(msm_host, is_bonded_dsi); + if (ret) { + pr_err("%s: unable to calc clk rate, %d\n", __func__, ret); + return; + } + + /* CPHY transmits 16 bits over 7 clock cycles + * "byte_clk" is in units of 16-bits (see dsi_calc_pclk), + * so multiply by 7 to get the "bitclk rate" + */ + if (msm_host->cphy_mode) + clk_req->bitclk_rate = msm_host->byte_clk_rate * 7; + else + clk_req->bitclk_rate = msm_host->byte_clk_rate * 8; + clk_req->escclk_rate = msm_host->esc_clk_rate; +} + +void msm_dsi_host_enable_irq(struct mipi_dsi_host *host) +{ + struct msm_dsi_host *msm_host = to_msm_dsi_host(host); + + enable_irq(msm_host->irq); +} + +void msm_dsi_host_disable_irq(struct mipi_dsi_host *host) +{ + struct msm_dsi_host *msm_host = to_msm_dsi_host(host); + + disable_irq(msm_host->irq); +} + +int msm_dsi_host_enable(struct mipi_dsi_host *host) +{ + struct msm_dsi_host *msm_host = to_msm_dsi_host(host); + + dsi_op_mode_config(msm_host, + !!(msm_host->mode_flags & MIPI_DSI_MODE_VIDEO), true); + + /* TODO: clock should be turned off for command mode, + * and only turned on before MDP START. + * This part of code should be enabled once mdp driver support it. + */ + /* if (msm_panel->mode == MSM_DSI_CMD_MODE) { + * dsi_link_clk_disable(msm_host); + * pm_runtime_put(&msm_host->pdev->dev); + * } + */ + msm_host->enabled = true; + return 0; +} + +int msm_dsi_host_disable(struct mipi_dsi_host *host) +{ + struct msm_dsi_host *msm_host = to_msm_dsi_host(host); + + msm_host->enabled = false; + dsi_op_mode_config(msm_host, + !!(msm_host->mode_flags & MIPI_DSI_MODE_VIDEO), false); + + /* Since we have disabled INTF, the video engine won't stop so that + * the cmd engine will be blocked. + * Reset to disable video engine so that we can send off cmd. + */ + dsi_sw_reset(msm_host); + + return 0; +} + +static void msm_dsi_sfpb_config(struct msm_dsi_host *msm_host, bool enable) +{ + enum sfpb_ahb_arb_master_port_en en; + + if (!msm_host->sfpb) + return; + + en = enable ? SFPB_MASTER_PORT_ENABLE : SFPB_MASTER_PORT_DISABLE; + + regmap_update_bits(msm_host->sfpb, REG_SFPB_GPREG, + SFPB_GPREG_MASTER_PORT_EN__MASK, + SFPB_GPREG_MASTER_PORT_EN(en)); +} + +int msm_dsi_host_power_on(struct mipi_dsi_host *host, + struct msm_dsi_phy_shared_timings *phy_shared_timings, + bool is_bonded_dsi, struct msm_dsi_phy *phy) +{ + struct msm_dsi_host *msm_host = to_msm_dsi_host(host); + const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd; + int ret = 0; + + mutex_lock(&msm_host->dev_mutex); + if (msm_host->power_on) { + DBG("dsi host already on"); + goto unlock_ret; + } + + msm_host->byte_intf_clk_rate = msm_host->byte_clk_rate; + if (phy_shared_timings->byte_intf_clk_div_2) + msm_host->byte_intf_clk_rate /= 2; + + msm_dsi_sfpb_config(msm_host, true); + + ret = regulator_bulk_enable(msm_host->cfg_hnd->cfg->num_regulators, + msm_host->supplies); + if (ret) { + pr_err("%s:Failed to enable vregs.ret=%d\n", + __func__, ret); + goto unlock_ret; + } + + pm_runtime_get_sync(&msm_host->pdev->dev); + ret = cfg_hnd->ops->link_clk_set_rate(msm_host); + if (!ret) + ret = cfg_hnd->ops->link_clk_enable(msm_host); + if (ret) { + pr_err("%s: failed to enable link clocks. ret=%d\n", + __func__, ret); + goto fail_disable_reg; + } + + ret = pinctrl_pm_select_default_state(&msm_host->pdev->dev); + if (ret) { + pr_err("%s: failed to set pinctrl default state, %d\n", + __func__, ret); + goto fail_disable_clk; + } + + dsi_timing_setup(msm_host, is_bonded_dsi); + dsi_sw_reset(msm_host); + dsi_ctrl_enable(msm_host, phy_shared_timings, phy); + + msm_host->power_on = true; + mutex_unlock(&msm_host->dev_mutex); + + return 0; + +fail_disable_clk: + cfg_hnd->ops->link_clk_disable(msm_host); + pm_runtime_put(&msm_host->pdev->dev); +fail_disable_reg: + regulator_bulk_disable(msm_host->cfg_hnd->cfg->num_regulators, + msm_host->supplies); +unlock_ret: + mutex_unlock(&msm_host->dev_mutex); + return ret; +} + +int msm_dsi_host_power_off(struct mipi_dsi_host *host) +{ + struct msm_dsi_host *msm_host = to_msm_dsi_host(host); + const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd; + + mutex_lock(&msm_host->dev_mutex); + if (!msm_host->power_on) { + DBG("dsi host already off"); + goto unlock_ret; + } + + dsi_ctrl_disable(msm_host); + + pinctrl_pm_select_sleep_state(&msm_host->pdev->dev); + + cfg_hnd->ops->link_clk_disable(msm_host); + pm_runtime_put(&msm_host->pdev->dev); + + regulator_bulk_disable(msm_host->cfg_hnd->cfg->num_regulators, + msm_host->supplies); + + msm_dsi_sfpb_config(msm_host, false); + + DBG("-"); + + msm_host->power_on = false; + +unlock_ret: + mutex_unlock(&msm_host->dev_mutex); + return 0; +} + +int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host, + const struct drm_display_mode *mode) +{ + struct msm_dsi_host *msm_host = to_msm_dsi_host(host); + + if (msm_host->mode) { + drm_mode_destroy(msm_host->dev, msm_host->mode); + msm_host->mode = NULL; + } + + msm_host->mode = drm_mode_duplicate(msm_host->dev, mode); + if (!msm_host->mode) { + pr_err("%s: cannot duplicate mode\n", __func__); + return -ENOMEM; + } + + return 0; +} + +enum drm_mode_status msm_dsi_host_check_dsc(struct mipi_dsi_host *host, + const struct drm_display_mode *mode) +{ + struct msm_dsi_host *msm_host = to_msm_dsi_host(host); + struct drm_dsc_config *dsc = msm_host->dsc; + int pic_width = mode->hdisplay; + int pic_height = mode->vdisplay; + + if (!msm_host->dsc) + return MODE_OK; + + if (pic_width % dsc->slice_width) { + pr_err("DSI: pic_width %d has to be multiple of slice %d\n", + pic_width, dsc->slice_width); + return MODE_H_ILLEGAL; + } + + if (pic_height % dsc->slice_height) { + pr_err("DSI: pic_height %d has to be multiple of slice %d\n", + pic_height, dsc->slice_height); + return MODE_V_ILLEGAL; + } + + return MODE_OK; +} + +unsigned long msm_dsi_host_get_mode_flags(struct mipi_dsi_host *host) +{ + return to_msm_dsi_host(host)->mode_flags; +} + +void msm_dsi_host_snapshot(struct msm_disp_state *disp_state, struct mipi_dsi_host *host) +{ + struct msm_dsi_host *msm_host = to_msm_dsi_host(host); + + pm_runtime_get_sync(&msm_host->pdev->dev); + + msm_disp_snapshot_add_block(disp_state, msm_host->ctrl_size, + msm_host->ctrl_base, "dsi%d_ctrl", msm_host->id); + + pm_runtime_put_sync(&msm_host->pdev->dev); +} + +static void msm_dsi_host_video_test_pattern_setup(struct msm_dsi_host *msm_host) +{ + u32 reg; + + reg = dsi_read(msm_host, REG_DSI_TEST_PATTERN_GEN_CTRL); + + dsi_write(msm_host, REG_DSI_TEST_PATTERN_GEN_VIDEO_INIT_VAL, 0xff); + /* draw checkered rectangle pattern */ + dsi_write(msm_host, REG_DSI_TPG_MAIN_CONTROL, + DSI_TPG_MAIN_CONTROL_CHECKERED_RECTANGLE_PATTERN); + /* use 24-bit RGB test pttern */ + dsi_write(msm_host, REG_DSI_TPG_VIDEO_CONFIG, + DSI_TPG_VIDEO_CONFIG_BPP(VIDEO_CONFIG_24BPP) | + DSI_TPG_VIDEO_CONFIG_RGB); + + reg |= DSI_TEST_PATTERN_GEN_CTRL_VIDEO_PATTERN_SEL(VID_MDSS_GENERAL_PATTERN); + dsi_write(msm_host, REG_DSI_TEST_PATTERN_GEN_CTRL, reg); + + DBG("Video test pattern setup done\n"); +} + +static void msm_dsi_host_cmd_test_pattern_setup(struct msm_dsi_host *msm_host) +{ + u32 reg; + + reg = dsi_read(msm_host, REG_DSI_TEST_PATTERN_GEN_CTRL); + + /* initial value for test pattern */ + dsi_write(msm_host, REG_DSI_TEST_PATTERN_GEN_CMD_MDP_INIT_VAL0, 0xff); + + reg |= DSI_TEST_PATTERN_GEN_CTRL_CMD_MDP_STREAM0_PATTERN_SEL(CMD_MDP_MDSS_GENERAL_PATTERN); + + dsi_write(msm_host, REG_DSI_TEST_PATTERN_GEN_CTRL, reg); + /* draw checkered rectangle pattern */ + dsi_write(msm_host, REG_DSI_TPG_MAIN_CONTROL2, + DSI_TPG_MAIN_CONTROL2_CMD_MDP0_CHECKERED_RECTANGLE_PATTERN); + + DBG("Cmd test pattern setup done\n"); +} + +void msm_dsi_host_test_pattern_en(struct mipi_dsi_host *host) +{ + struct msm_dsi_host *msm_host = to_msm_dsi_host(host); + bool is_video_mode = !!(msm_host->mode_flags & MIPI_DSI_MODE_VIDEO); + u32 reg; + + if (is_video_mode) + msm_dsi_host_video_test_pattern_setup(msm_host); + else + msm_dsi_host_cmd_test_pattern_setup(msm_host); + + reg = dsi_read(msm_host, REG_DSI_TEST_PATTERN_GEN_CTRL); + /* enable the test pattern generator */ + dsi_write(msm_host, REG_DSI_TEST_PATTERN_GEN_CTRL, (reg | DSI_TEST_PATTERN_GEN_CTRL_EN)); + + /* for command mode need to trigger one frame from tpg */ + if (!is_video_mode) + dsi_write(msm_host, REG_DSI_TEST_PATTERN_GEN_CMD_STREAM0_TRIGGER, + DSI_TEST_PATTERN_GEN_CMD_STREAM0_TRIGGER_SW_TRIGGER); +} + +struct drm_dsc_config *msm_dsi_host_get_dsc_config(struct mipi_dsi_host *host) +{ + struct msm_dsi_host *msm_host = to_msm_dsi_host(host); + + return msm_host->dsc; +} diff --git a/drivers/gpu/drm/msm/dsi/dsi_manager.c b/drivers/gpu/drm/msm/dsi/dsi_manager.c new file mode 100644 index 000000000000..ca400924d4ee --- /dev/null +++ b/drivers/gpu/drm/msm/dsi/dsi_manager.c @@ -0,0 +1,620 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2015, The Linux Foundation. All rights reserved. + */ + +#include "drm/drm_bridge_connector.h" + +#include "msm_kms.h" +#include "dsi.h" + +#define DSI_CLOCK_MASTER DSI_0 +#define DSI_CLOCK_SLAVE DSI_1 + +#define DSI_LEFT DSI_0 +#define DSI_RIGHT DSI_1 + +/* According to the current drm framework sequence, take the encoder of + * DSI_1 as master encoder + */ +#define DSI_ENCODER_MASTER DSI_1 +#define DSI_ENCODER_SLAVE DSI_0 + +struct msm_dsi_manager { + struct msm_dsi *dsi[DSI_MAX]; + + bool is_bonded_dsi; + bool is_sync_needed; + int master_dsi_link_id; +}; + +static struct msm_dsi_manager msm_dsim_glb; + +#define IS_BONDED_DSI() (msm_dsim_glb.is_bonded_dsi) +#define IS_SYNC_NEEDED() (msm_dsim_glb.is_sync_needed) +#define IS_MASTER_DSI_LINK(id) (msm_dsim_glb.master_dsi_link_id == id) + +static inline struct msm_dsi *dsi_mgr_get_dsi(int id) +{ + return msm_dsim_glb.dsi[id]; +} + +static inline struct msm_dsi *dsi_mgr_get_other_dsi(int id) +{ + return msm_dsim_glb.dsi[(id + 1) % DSI_MAX]; +} + +static int dsi_mgr_parse_of(struct device_node *np, int id) +{ + struct msm_dsi_manager *msm_dsim = &msm_dsim_glb; + + /* We assume 2 dsi nodes have the same information of bonded dsi and + * sync-mode, and only one node specifies master in case of bonded mode. + */ + if (!msm_dsim->is_bonded_dsi) + msm_dsim->is_bonded_dsi = of_property_read_bool(np, "qcom,dual-dsi-mode"); + + if (msm_dsim->is_bonded_dsi) { + if (of_property_read_bool(np, "qcom,master-dsi")) + msm_dsim->master_dsi_link_id = id; + if (!msm_dsim->is_sync_needed) + msm_dsim->is_sync_needed = of_property_read_bool( + np, "qcom,sync-dual-dsi"); + } + + return 0; +} + +static int dsi_mgr_setup_components(int id) +{ + struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id); + struct msm_dsi *other_dsi = dsi_mgr_get_other_dsi(id); + struct msm_dsi *clk_master_dsi = dsi_mgr_get_dsi(DSI_CLOCK_MASTER); + struct msm_dsi *clk_slave_dsi = dsi_mgr_get_dsi(DSI_CLOCK_SLAVE); + int ret; + + if (!IS_BONDED_DSI()) { + /* + * Set the usecase before calling msm_dsi_host_register(), which would + * already program the PLL source mux based on a default usecase. + */ + msm_dsi_phy_set_usecase(msm_dsi->phy, MSM_DSI_PHY_STANDALONE); + msm_dsi_host_set_phy_mode(msm_dsi->host, msm_dsi->phy); + + ret = msm_dsi_host_register(msm_dsi->host); + if (ret) + return ret; + } else if (other_dsi) { + struct msm_dsi *master_link_dsi = IS_MASTER_DSI_LINK(id) ? + msm_dsi : other_dsi; + struct msm_dsi *slave_link_dsi = IS_MASTER_DSI_LINK(id) ? + other_dsi : msm_dsi; + + /* + * PLL0 is to drive both DSI link clocks in bonded DSI mode. + * + * Set the usecase before calling msm_dsi_host_register(), which would + * already program the PLL source mux based on a default usecase. + */ + msm_dsi_phy_set_usecase(clk_master_dsi->phy, + MSM_DSI_PHY_MASTER); + msm_dsi_phy_set_usecase(clk_slave_dsi->phy, + MSM_DSI_PHY_SLAVE); + msm_dsi_host_set_phy_mode(msm_dsi->host, msm_dsi->phy); + msm_dsi_host_set_phy_mode(other_dsi->host, other_dsi->phy); + + /* Register slave host first, so that slave DSI device + * has a chance to probe, and do not block the master + * DSI device's probe. + * Also, do not check defer for the slave host, + * because only master DSI device adds the panel to global + * panel list. The panel's device is the master DSI device. + */ + ret = msm_dsi_host_register(slave_link_dsi->host); + if (ret) + return ret; + ret = msm_dsi_host_register(master_link_dsi->host); + if (ret) + return ret; + } + + return 0; +} + +static int enable_phy(struct msm_dsi *msm_dsi, + struct msm_dsi_phy_shared_timings *shared_timings) +{ + struct msm_dsi_phy_clk_request clk_req; + bool is_bonded_dsi = IS_BONDED_DSI(); + + msm_dsi_host_get_phy_clk_req(msm_dsi->host, &clk_req, is_bonded_dsi); + + return msm_dsi_phy_enable(msm_dsi->phy, &clk_req, shared_timings); +} + +static int +dsi_mgr_phy_enable(int id, + struct msm_dsi_phy_shared_timings shared_timings[DSI_MAX]) +{ + struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id); + struct msm_dsi *mdsi = dsi_mgr_get_dsi(DSI_CLOCK_MASTER); + struct msm_dsi *sdsi = dsi_mgr_get_dsi(DSI_CLOCK_SLAVE); + int ret; + + /* In case of bonded DSI, some registers in PHY1 have been programmed + * during PLL0 clock's set_rate. The PHY1 reset called by host1 here + * will silently reset those PHY1 registers. Therefore we need to reset + * and enable both PHYs before any PLL clock operation. + */ + if (IS_BONDED_DSI() && mdsi && sdsi) { + if (!mdsi->phy_enabled && !sdsi->phy_enabled) { + msm_dsi_host_reset_phy(mdsi->host); + msm_dsi_host_reset_phy(sdsi->host); + + ret = enable_phy(mdsi, + &shared_timings[DSI_CLOCK_MASTER]); + if (ret) + return ret; + ret = enable_phy(sdsi, + &shared_timings[DSI_CLOCK_SLAVE]); + if (ret) { + msm_dsi_phy_disable(mdsi->phy); + return ret; + } + } + } else { + msm_dsi_host_reset_phy(msm_dsi->host); + ret = enable_phy(msm_dsi, &shared_timings[id]); + if (ret) + return ret; + } + + msm_dsi->phy_enabled = true; + + return 0; +} + +static void dsi_mgr_phy_disable(int id) +{ + struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id); + struct msm_dsi *mdsi = dsi_mgr_get_dsi(DSI_CLOCK_MASTER); + struct msm_dsi *sdsi = dsi_mgr_get_dsi(DSI_CLOCK_SLAVE); + + /* disable DSI phy + * In bonded dsi configuration, the phy should be disabled for the + * first controller only when the second controller is disabled. + */ + msm_dsi->phy_enabled = false; + if (IS_BONDED_DSI() && mdsi && sdsi) { + if (!mdsi->phy_enabled && !sdsi->phy_enabled) { + msm_dsi_phy_disable(sdsi->phy); + msm_dsi_phy_disable(mdsi->phy); + } + } else { + msm_dsi_phy_disable(msm_dsi->phy); + } +} + +struct dsi_bridge { + struct drm_bridge base; + int id; +}; + +#define to_dsi_bridge(x) container_of(x, struct dsi_bridge, base) + +static int dsi_mgr_bridge_get_id(struct drm_bridge *bridge) +{ + struct dsi_bridge *dsi_bridge = to_dsi_bridge(bridge); + return dsi_bridge->id; +} + +static int dsi_mgr_bridge_power_on(struct drm_bridge *bridge) +{ + int id = dsi_mgr_bridge_get_id(bridge); + struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id); + struct msm_dsi *msm_dsi1 = dsi_mgr_get_dsi(DSI_1); + struct mipi_dsi_host *host = msm_dsi->host; + struct msm_dsi_phy_shared_timings phy_shared_timings[DSI_MAX]; + bool is_bonded_dsi = IS_BONDED_DSI(); + int ret; + + DBG("id=%d", id); + + ret = dsi_mgr_phy_enable(id, phy_shared_timings); + if (ret) + goto phy_en_fail; + + ret = msm_dsi_host_power_on(host, &phy_shared_timings[id], is_bonded_dsi, msm_dsi->phy); + if (ret) { + pr_err("%s: power on host %d failed, %d\n", __func__, id, ret); + goto host_on_fail; + } + + if (is_bonded_dsi && msm_dsi1) { + ret = msm_dsi_host_power_on(msm_dsi1->host, + &phy_shared_timings[DSI_1], is_bonded_dsi, msm_dsi1->phy); + if (ret) { + pr_err("%s: power on host1 failed, %d\n", + __func__, ret); + goto host1_on_fail; + } + } + + /* + * Enable before preparing the panel, disable after unpreparing, so + * that the panel can communicate over the DSI link. + */ + msm_dsi_host_enable_irq(host); + if (is_bonded_dsi && msm_dsi1) + msm_dsi_host_enable_irq(msm_dsi1->host); + + return 0; + +host1_on_fail: + msm_dsi_host_power_off(host); +host_on_fail: + dsi_mgr_phy_disable(id); +phy_en_fail: + return ret; +} + +static void dsi_mgr_bridge_power_off(struct drm_bridge *bridge) +{ + int id = dsi_mgr_bridge_get_id(bridge); + struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id); + struct msm_dsi *msm_dsi1 = dsi_mgr_get_dsi(DSI_1); + struct mipi_dsi_host *host = msm_dsi->host; + bool is_bonded_dsi = IS_BONDED_DSI(); + + msm_dsi_host_disable_irq(host); + if (is_bonded_dsi && msm_dsi1) { + msm_dsi_host_disable_irq(msm_dsi1->host); + msm_dsi_host_power_off(msm_dsi1->host); + } + msm_dsi_host_power_off(host); + dsi_mgr_phy_disable(id); +} + +static void dsi_mgr_bridge_pre_enable(struct drm_bridge *bridge) +{ + int id = dsi_mgr_bridge_get_id(bridge); + struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id); + struct msm_dsi *msm_dsi1 = dsi_mgr_get_dsi(DSI_1); + struct mipi_dsi_host *host = msm_dsi->host; + bool is_bonded_dsi = IS_BONDED_DSI(); + int ret; + + DBG("id=%d", id); + + /* Do nothing with the host if it is slave-DSI in case of bonded DSI */ + if (is_bonded_dsi && !IS_MASTER_DSI_LINK(id)) + return; + + ret = dsi_mgr_bridge_power_on(bridge); + if (ret) { + dev_err(&msm_dsi->pdev->dev, "Power on failed: %d\n", ret); + return; + } + + ret = msm_dsi_host_enable(host); + if (ret) { + pr_err("%s: enable host %d failed, %d\n", __func__, id, ret); + goto host_en_fail; + } + + if (is_bonded_dsi && msm_dsi1) { + ret = msm_dsi_host_enable(msm_dsi1->host); + if (ret) { + pr_err("%s: enable host1 failed, %d\n", __func__, ret); + goto host1_en_fail; + } + } + + return; + +host1_en_fail: + msm_dsi_host_disable(host); +host_en_fail: + dsi_mgr_bridge_power_off(bridge); +} + +void msm_dsi_manager_tpg_enable(void) +{ + struct msm_dsi *m_dsi = dsi_mgr_get_dsi(DSI_0); + struct msm_dsi *s_dsi = dsi_mgr_get_dsi(DSI_1); + + /* if dual dsi, trigger tpg on master first then slave */ + if (m_dsi) { + msm_dsi_host_test_pattern_en(m_dsi->host); + if (IS_BONDED_DSI() && s_dsi) + msm_dsi_host_test_pattern_en(s_dsi->host); + } +} + +static void dsi_mgr_bridge_post_disable(struct drm_bridge *bridge) +{ + int id = dsi_mgr_bridge_get_id(bridge); + struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id); + struct msm_dsi *msm_dsi1 = dsi_mgr_get_dsi(DSI_1); + struct mipi_dsi_host *host = msm_dsi->host; + bool is_bonded_dsi = IS_BONDED_DSI(); + int ret; + + DBG("id=%d", id); + + /* + * Do nothing with the host if it is slave-DSI in case of bonded DSI. + * It is safe to call dsi_mgr_phy_disable() here because a single PHY + * won't be diabled until both PHYs request disable. + */ + if (is_bonded_dsi && !IS_MASTER_DSI_LINK(id)) + goto disable_phy; + + ret = msm_dsi_host_disable(host); + if (ret) + pr_err("%s: host %d disable failed, %d\n", __func__, id, ret); + + if (is_bonded_dsi && msm_dsi1) { + ret = msm_dsi_host_disable(msm_dsi1->host); + if (ret) + pr_err("%s: host1 disable failed, %d\n", __func__, ret); + } + + msm_dsi_host_disable_irq(host); + if (is_bonded_dsi && msm_dsi1) + msm_dsi_host_disable_irq(msm_dsi1->host); + + /* Save PHY status if it is a clock source */ + msm_dsi_phy_pll_save_state(msm_dsi->phy); + + ret = msm_dsi_host_power_off(host); + if (ret) + pr_err("%s: host %d power off failed,%d\n", __func__, id, ret); + + if (is_bonded_dsi && msm_dsi1) { + ret = msm_dsi_host_power_off(msm_dsi1->host); + if (ret) + pr_err("%s: host1 power off failed, %d\n", + __func__, ret); + } + +disable_phy: + dsi_mgr_phy_disable(id); +} + +static void dsi_mgr_bridge_mode_set(struct drm_bridge *bridge, + const struct drm_display_mode *mode, + const struct drm_display_mode *adjusted_mode) +{ + int id = dsi_mgr_bridge_get_id(bridge); + struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id); + struct msm_dsi *other_dsi = dsi_mgr_get_other_dsi(id); + struct mipi_dsi_host *host = msm_dsi->host; + bool is_bonded_dsi = IS_BONDED_DSI(); + + DBG("set mode: " DRM_MODE_FMT, DRM_MODE_ARG(mode)); + + if (is_bonded_dsi && !IS_MASTER_DSI_LINK(id)) + return; + + msm_dsi_host_set_display_mode(host, adjusted_mode); + if (is_bonded_dsi && other_dsi) + msm_dsi_host_set_display_mode(other_dsi->host, adjusted_mode); +} + +static enum drm_mode_status dsi_mgr_bridge_mode_valid(struct drm_bridge *bridge, + const struct drm_display_info *info, + const struct drm_display_mode *mode) +{ + int id = dsi_mgr_bridge_get_id(bridge); + struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id); + struct mipi_dsi_host *host = msm_dsi->host; + struct platform_device *pdev = msm_dsi->pdev; + struct dev_pm_opp *opp; + unsigned long byte_clk_rate; + + byte_clk_rate = dsi_byte_clk_get_rate(host, IS_BONDED_DSI(), mode); + + opp = dev_pm_opp_find_freq_ceil(&pdev->dev, &byte_clk_rate); + if (!IS_ERR(opp)) { + dev_pm_opp_put(opp); + } else if (PTR_ERR(opp) == -ERANGE) { + /* + * An empty table is created by devm_pm_opp_set_clkname() even + * if there is none. Thus find_freq_ceil will still return + * -ERANGE in such case. + */ + if (dev_pm_opp_get_opp_count(&pdev->dev) != 0) + return MODE_CLOCK_RANGE; + } else { + return MODE_ERROR; + } + + return msm_dsi_host_check_dsc(host, mode); +} + +static int dsi_mgr_bridge_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, + enum drm_bridge_attach_flags flags) +{ + int id = dsi_mgr_bridge_get_id(bridge); + struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id); + + return drm_bridge_attach(encoder, msm_dsi->next_bridge, + bridge, flags); +} + +static const struct drm_bridge_funcs dsi_mgr_bridge_funcs = { + .attach = dsi_mgr_bridge_attach, + .pre_enable = dsi_mgr_bridge_pre_enable, + .post_disable = dsi_mgr_bridge_post_disable, + .mode_set = dsi_mgr_bridge_mode_set, + .mode_valid = dsi_mgr_bridge_mode_valid, +}; + +/* initialize bridge */ +int msm_dsi_manager_connector_init(struct msm_dsi *msm_dsi, + struct drm_encoder *encoder) +{ + struct drm_device *dev = msm_dsi->dev; + struct drm_bridge *bridge; + struct dsi_bridge *dsi_bridge; + struct drm_connector *connector; + int ret; + + dsi_bridge = devm_drm_bridge_alloc(msm_dsi->dev->dev, struct dsi_bridge, base, + &dsi_mgr_bridge_funcs); + if (IS_ERR(dsi_bridge)) + return PTR_ERR(dsi_bridge); + + dsi_bridge->id = msm_dsi->id; + + bridge = &dsi_bridge->base; + + ret = devm_drm_bridge_add(msm_dsi->dev->dev, bridge); + if (ret) + return ret; + + ret = drm_bridge_attach(encoder, bridge, NULL, DRM_BRIDGE_ATTACH_NO_CONNECTOR); + if (ret) + return ret; + + connector = drm_bridge_connector_init(dev, encoder); + if (IS_ERR(connector)) { + DRM_ERROR("Unable to create bridge connector\n"); + return PTR_ERR(connector); + } + + ret = drm_connector_attach_encoder(connector, encoder); + if (ret < 0) + return ret; + + return 0; +} + +int msm_dsi_manager_cmd_xfer(int id, const struct mipi_dsi_msg *msg) +{ + struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id); + struct msm_dsi *msm_dsi0 = dsi_mgr_get_dsi(DSI_0); + struct mipi_dsi_host *host = msm_dsi->host; + bool is_read = (msg->rx_buf && msg->rx_len); + bool need_sync = (IS_SYNC_NEEDED() && !is_read); + int ret; + + if (!msg->tx_buf || !msg->tx_len) + return 0; + + /* In bonded master case, panel requires the same commands sent to + * both DSI links. Host issues the command trigger to both links + * when DSI_1 calls the cmd transfer function, no matter it happens + * before or after DSI_0 cmd transfer. + */ + if (need_sync && (id == DSI_0)) + return is_read ? msg->rx_len : msg->tx_len; + + if (need_sync && msm_dsi0) { + ret = msm_dsi_host_xfer_prepare(msm_dsi0->host, msg); + if (ret) { + pr_err("%s: failed to prepare non-trigger host, %d\n", + __func__, ret); + return ret; + } + } + ret = msm_dsi_host_xfer_prepare(host, msg); + if (ret) { + pr_err("%s: failed to prepare host, %d\n", __func__, ret); + goto restore_host0; + } + + ret = is_read ? msm_dsi_host_cmd_rx(host, msg) : + msm_dsi_host_cmd_tx(host, msg); + + msm_dsi_host_xfer_restore(host, msg); + +restore_host0: + if (need_sync && msm_dsi0) + msm_dsi_host_xfer_restore(msm_dsi0->host, msg); + + return ret; +} + +bool msm_dsi_manager_cmd_xfer_trigger(int id, u32 dma_base, u32 len) +{ + struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id); + struct msm_dsi *msm_dsi0 = dsi_mgr_get_dsi(DSI_0); + struct mipi_dsi_host *host = msm_dsi->host; + + if (IS_SYNC_NEEDED() && (id == DSI_0)) + return false; + + if (IS_SYNC_NEEDED() && msm_dsi0) + msm_dsi_host_cmd_xfer_commit(msm_dsi0->host, dma_base, len); + + msm_dsi_host_cmd_xfer_commit(host, dma_base, len); + + return true; +} + +int msm_dsi_manager_register(struct msm_dsi *msm_dsi) +{ + struct msm_dsi_manager *msm_dsim = &msm_dsim_glb; + int id = msm_dsi->id; + int ret; + + if (id >= DSI_MAX) { + pr_err("%s: invalid id %d\n", __func__, id); + return -EINVAL; + } + + if (msm_dsim->dsi[id]) { + pr_err("%s: dsi%d already registered\n", __func__, id); + return -EBUSY; + } + + msm_dsim->dsi[id] = msm_dsi; + + ret = dsi_mgr_parse_of(msm_dsi->pdev->dev.of_node, id); + if (ret) { + pr_err("%s: failed to parse OF DSI info\n", __func__); + goto fail; + } + + ret = dsi_mgr_setup_components(id); + if (ret) { + pr_err("%s: failed to register mipi dsi host for DSI %d: %d\n", + __func__, id, ret); + goto fail; + } + + return 0; + +fail: + msm_dsim->dsi[id] = NULL; + return ret; +} + +void msm_dsi_manager_unregister(struct msm_dsi *msm_dsi) +{ + struct msm_dsi_manager *msm_dsim = &msm_dsim_glb; + + if (msm_dsi->host) + msm_dsi_host_unregister(msm_dsi->host); + + if (msm_dsi->id >= 0) + msm_dsim->dsi[msm_dsi->id] = NULL; +} + +bool msm_dsi_is_bonded_dsi(struct msm_dsi *msm_dsi) +{ + return IS_BONDED_DSI(); +} + +bool msm_dsi_is_master_dsi(struct msm_dsi *msm_dsi) +{ + return IS_MASTER_DSI_LINK(msm_dsi->id); +} + +const char *msm_dsi_get_te_source(struct msm_dsi *msm_dsi) +{ + return msm_dsi->te_source; +} diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c new file mode 100644 index 000000000000..4ea681130dba --- /dev/null +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c @@ -0,0 +1,866 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2015, The Linux Foundation. All rights reserved. + */ + +#include <linux/clk-provider.h> +#include <linux/platform_device.h> +#include <linux/pm_clock.h> +#include <linux/pm_runtime.h> +#include <dt-bindings/phy/phy.h> + +#include "dsi_phy.h" + +#define S_DIV_ROUND_UP(n, d) \ + (((n) >= 0) ? (((n) + (d) - 1) / (d)) : (((n) - (d) + 1) / (d))) + +static inline s32 linear_inter(s32 tmax, s32 tmin, s32 percent, + s32 min_result, bool even) +{ + s32 v; + + v = (tmax - tmin) * percent; + v = S_DIV_ROUND_UP(v, 100) + tmin; + if (even && (v & 0x1)) + return max_t(s32, min_result, v - 1); + else + return max_t(s32, min_result, v); +} + +static void dsi_dphy_timing_calc_clk_zero(struct msm_dsi_dphy_timing *timing, + s32 ui, s32 coeff, s32 pcnt) +{ + s32 tmax, tmin, clk_z; + s32 temp; + + /* reset */ + temp = 300 * coeff - ((timing->clk_prepare >> 1) + 1) * 2 * ui; + tmin = S_DIV_ROUND_UP(temp, ui) - 2; + if (tmin > 255) { + tmax = 511; + clk_z = linear_inter(2 * tmin, tmin, pcnt, 0, true); + } else { + tmax = 255; + clk_z = linear_inter(tmax, tmin, pcnt, 0, true); + } + + /* adjust */ + temp = (timing->hs_rqst + timing->clk_prepare + clk_z) & 0x7; + timing->clk_zero = clk_z + 8 - temp; +} + +int msm_dsi_dphy_timing_calc(struct msm_dsi_dphy_timing *timing, + struct msm_dsi_phy_clk_request *clk_req) +{ + const unsigned long bit_rate = clk_req->bitclk_rate; + const unsigned long esc_rate = clk_req->escclk_rate; + s32 ui, lpx; + s32 tmax, tmin; + s32 pcnt0 = 10; + s32 pcnt1 = (bit_rate > 1200000000) ? 15 : 10; + s32 pcnt2 = 10; + s32 pcnt3 = (bit_rate > 180000000) ? 10 : 40; + s32 coeff = 1000; /* Precision, should avoid overflow */ + s32 temp; + + if (!bit_rate || !esc_rate) + return -EINVAL; + + ui = mult_frac(NSEC_PER_MSEC, coeff, bit_rate / 1000); + lpx = mult_frac(NSEC_PER_MSEC, coeff, esc_rate / 1000); + + tmax = S_DIV_ROUND_UP(95 * coeff, ui) - 2; + tmin = S_DIV_ROUND_UP(38 * coeff, ui) - 2; + timing->clk_prepare = linear_inter(tmax, tmin, pcnt0, 0, true); + + temp = lpx / ui; + if (temp & 0x1) + timing->hs_rqst = temp; + else + timing->hs_rqst = max_t(s32, 0, temp - 2); + + /* Calculate clk_zero after clk_prepare and hs_rqst */ + dsi_dphy_timing_calc_clk_zero(timing, ui, coeff, pcnt2); + + temp = 105 * coeff + 12 * ui - 20 * coeff; + tmax = S_DIV_ROUND_UP(temp, ui) - 2; + tmin = S_DIV_ROUND_UP(60 * coeff, ui) - 2; + timing->clk_trail = linear_inter(tmax, tmin, pcnt3, 0, true); + + temp = 85 * coeff + 6 * ui; + tmax = S_DIV_ROUND_UP(temp, ui) - 2; + temp = 40 * coeff + 4 * ui; + tmin = S_DIV_ROUND_UP(temp, ui) - 2; + timing->hs_prepare = linear_inter(tmax, tmin, pcnt1, 0, true); + + tmax = 255; + temp = ((timing->hs_prepare >> 1) + 1) * 2 * ui + 2 * ui; + temp = 145 * coeff + 10 * ui - temp; + tmin = S_DIV_ROUND_UP(temp, ui) - 2; + timing->hs_zero = linear_inter(tmax, tmin, pcnt2, 24, true); + + temp = 105 * coeff + 12 * ui - 20 * coeff; + tmax = S_DIV_ROUND_UP(temp, ui) - 2; + temp = 60 * coeff + 4 * ui; + tmin = DIV_ROUND_UP(temp, ui) - 2; + timing->hs_trail = linear_inter(tmax, tmin, pcnt3, 0, true); + + tmax = 255; + tmin = S_DIV_ROUND_UP(100 * coeff, ui) - 2; + timing->hs_exit = linear_inter(tmax, tmin, pcnt2, 0, true); + + tmax = 63; + temp = ((timing->hs_exit >> 1) + 1) * 2 * ui; + temp = 60 * coeff + 52 * ui - 24 * ui - temp; + tmin = S_DIV_ROUND_UP(temp, 8 * ui) - 1; + timing->shared_timings.clk_post = linear_inter(tmax, tmin, pcnt2, 0, + false); + tmax = 63; + temp = ((timing->clk_prepare >> 1) + 1) * 2 * ui; + temp += ((timing->clk_zero >> 1) + 1) * 2 * ui; + temp += 8 * ui + lpx; + tmin = S_DIV_ROUND_UP(temp, 8 * ui) - 1; + if (tmin > tmax) { + temp = linear_inter(2 * tmax, tmin, pcnt2, 0, false); + timing->shared_timings.clk_pre = temp >> 1; + timing->shared_timings.clk_pre_inc_by_2 = true; + } else { + timing->shared_timings.clk_pre = + linear_inter(tmax, tmin, pcnt2, 0, false); + timing->shared_timings.clk_pre_inc_by_2 = false; + } + + timing->ta_go = 3; + timing->ta_sure = 0; + timing->ta_get = 4; + + DBG("PHY timings: %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d", + timing->shared_timings.clk_pre, timing->shared_timings.clk_post, + timing->shared_timings.clk_pre_inc_by_2, timing->clk_zero, + timing->clk_trail, timing->clk_prepare, timing->hs_exit, + timing->hs_zero, timing->hs_prepare, timing->hs_trail, + timing->hs_rqst); + + return 0; +} + +int msm_dsi_dphy_timing_calc_v2(struct msm_dsi_dphy_timing *timing, + struct msm_dsi_phy_clk_request *clk_req) +{ + const unsigned long bit_rate = clk_req->bitclk_rate; + const unsigned long esc_rate = clk_req->escclk_rate; + s32 ui, ui_x8; + s32 tmax, tmin; + s32 pcnt0 = 50; + s32 pcnt1 = 50; + s32 pcnt2 = 10; + s32 pcnt3 = 30; + s32 pcnt4 = 10; + s32 pcnt5 = 2; + s32 coeff = 1000; /* Precision, should avoid overflow */ + s32 hb_en, hb_en_ckln, pd_ckln, pd; + s32 val, val_ckln; + s32 temp; + + if (!bit_rate || !esc_rate) + return -EINVAL; + + timing->hs_halfbyte_en = 0; + hb_en = 0; + timing->hs_halfbyte_en_ckln = 0; + hb_en_ckln = 0; + timing->hs_prep_dly_ckln = (bit_rate > 100000000) ? 0 : 3; + pd_ckln = timing->hs_prep_dly_ckln; + timing->hs_prep_dly = (bit_rate > 120000000) ? 0 : 1; + pd = timing->hs_prep_dly; + + val = (hb_en << 2) + (pd << 1); + val_ckln = (hb_en_ckln << 2) + (pd_ckln << 1); + + ui = mult_frac(NSEC_PER_MSEC, coeff, bit_rate / 1000); + ui_x8 = ui << 3; + + temp = S_DIV_ROUND_UP(38 * coeff - val_ckln * ui, ui_x8); + tmin = max_t(s32, temp, 0); + temp = (95 * coeff - val_ckln * ui) / ui_x8; + tmax = max_t(s32, temp, 0); + timing->clk_prepare = linear_inter(tmax, tmin, pcnt0, 0, false); + + temp = 300 * coeff - ((timing->clk_prepare << 3) + val_ckln) * ui; + tmin = S_DIV_ROUND_UP(temp - 11 * ui, ui_x8) - 3; + tmax = (tmin > 255) ? 511 : 255; + timing->clk_zero = linear_inter(tmax, tmin, pcnt5, 0, false); + + tmin = DIV_ROUND_UP(60 * coeff + 3 * ui, ui_x8); + temp = 105 * coeff + 12 * ui - 20 * coeff; + tmax = (temp + 3 * ui) / ui_x8; + timing->clk_trail = linear_inter(tmax, tmin, pcnt3, 0, false); + + temp = S_DIV_ROUND_UP(40 * coeff + 4 * ui - val * ui, ui_x8); + tmin = max_t(s32, temp, 0); + temp = (85 * coeff + 6 * ui - val * ui) / ui_x8; + tmax = max_t(s32, temp, 0); + timing->hs_prepare = linear_inter(tmax, tmin, pcnt1, 0, false); + + temp = 145 * coeff + 10 * ui - ((timing->hs_prepare << 3) + val) * ui; + tmin = S_DIV_ROUND_UP(temp - 11 * ui, ui_x8) - 3; + tmax = 255; + timing->hs_zero = linear_inter(tmax, tmin, pcnt4, 0, false); + + tmin = DIV_ROUND_UP(60 * coeff + 4 * ui + 3 * ui, ui_x8); + temp = 105 * coeff + 12 * ui - 20 * coeff; + tmax = (temp + 3 * ui) / ui_x8; + timing->hs_trail = linear_inter(tmax, tmin, pcnt3, 0, false); + + temp = 50 * coeff + ((hb_en << 2) - 8) * ui; + timing->hs_rqst = S_DIV_ROUND_UP(temp, ui_x8); + + tmin = DIV_ROUND_UP(100 * coeff, ui_x8) - 1; + tmax = 255; + timing->hs_exit = linear_inter(tmax, tmin, pcnt2, 0, false); + + temp = 50 * coeff + ((hb_en_ckln << 2) - 8) * ui; + timing->hs_rqst_ckln = S_DIV_ROUND_UP(temp, ui_x8); + + temp = 60 * coeff + 52 * ui - 43 * ui; + tmin = DIV_ROUND_UP(temp, ui_x8) - 1; + tmax = 63; + timing->shared_timings.clk_post = + linear_inter(tmax, tmin, pcnt2, 0, false); + + temp = 8 * ui + ((timing->clk_prepare << 3) + val_ckln) * ui; + temp += (((timing->clk_zero + 3) << 3) + 11 - (pd_ckln << 1)) * ui; + temp += hb_en_ckln ? (((timing->hs_rqst_ckln << 3) + 4) * ui) : + (((timing->hs_rqst_ckln << 3) + 8) * ui); + tmin = S_DIV_ROUND_UP(temp, ui_x8) - 1; + tmax = 63; + if (tmin > tmax) { + temp = linear_inter(tmax << 1, tmin, pcnt2, 0, false); + timing->shared_timings.clk_pre = temp >> 1; + timing->shared_timings.clk_pre_inc_by_2 = 1; + } else { + timing->shared_timings.clk_pre = + linear_inter(tmax, tmin, pcnt2, 0, false); + timing->shared_timings.clk_pre_inc_by_2 = 0; + } + + timing->ta_go = 3; + timing->ta_sure = 0; + timing->ta_get = 4; + + DBG("%d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d", + timing->shared_timings.clk_pre, timing->shared_timings.clk_post, + timing->shared_timings.clk_pre_inc_by_2, timing->clk_zero, + timing->clk_trail, timing->clk_prepare, timing->hs_exit, + timing->hs_zero, timing->hs_prepare, timing->hs_trail, + timing->hs_rqst, timing->hs_rqst_ckln, timing->hs_halfbyte_en, + timing->hs_halfbyte_en_ckln, timing->hs_prep_dly, + timing->hs_prep_dly_ckln); + + return 0; +} + +int msm_dsi_dphy_timing_calc_v3(struct msm_dsi_dphy_timing *timing, + struct msm_dsi_phy_clk_request *clk_req) +{ + const unsigned long bit_rate = clk_req->bitclk_rate; + const unsigned long esc_rate = clk_req->escclk_rate; + s32 ui, ui_x8; + s32 tmax, tmin; + s32 pcnt0 = 50; + s32 pcnt1 = 50; + s32 pcnt2 = 10; + s32 pcnt3 = 30; + s32 pcnt4 = 10; + s32 pcnt5 = 2; + s32 coeff = 1000; /* Precision, should avoid overflow */ + s32 hb_en, hb_en_ckln; + s32 temp; + + if (!bit_rate || !esc_rate) + return -EINVAL; + + timing->hs_halfbyte_en = 0; + hb_en = 0; + timing->hs_halfbyte_en_ckln = 0; + hb_en_ckln = 0; + + ui = mult_frac(NSEC_PER_MSEC, coeff, bit_rate / 1000); + ui_x8 = ui << 3; + + temp = S_DIV_ROUND_UP(38 * coeff, ui_x8); + tmin = max_t(s32, temp, 0); + temp = (95 * coeff) / ui_x8; + tmax = max_t(s32, temp, 0); + timing->clk_prepare = linear_inter(tmax, tmin, pcnt0, 0, false); + + temp = 300 * coeff - (timing->clk_prepare << 3) * ui; + tmin = S_DIV_ROUND_UP(temp, ui_x8) - 1; + tmax = (tmin > 255) ? 511 : 255; + timing->clk_zero = linear_inter(tmax, tmin, pcnt5, 0, false); + + tmin = DIV_ROUND_UP(60 * coeff + 3 * ui, ui_x8); + temp = 105 * coeff + 12 * ui - 20 * coeff; + tmax = (temp + 3 * ui) / ui_x8; + timing->clk_trail = linear_inter(tmax, tmin, pcnt3, 0, false); + + temp = S_DIV_ROUND_UP(40 * coeff + 4 * ui, ui_x8); + tmin = max_t(s32, temp, 0); + temp = (85 * coeff + 6 * ui) / ui_x8; + tmax = max_t(s32, temp, 0); + timing->hs_prepare = linear_inter(tmax, tmin, pcnt1, 0, false); + + temp = 145 * coeff + 10 * ui - (timing->hs_prepare << 3) * ui; + tmin = S_DIV_ROUND_UP(temp, ui_x8) - 1; + tmax = 255; + timing->hs_zero = linear_inter(tmax, tmin, pcnt4, 0, false); + + tmin = DIV_ROUND_UP(60 * coeff + 4 * ui, ui_x8) - 1; + temp = 105 * coeff + 12 * ui - 20 * coeff; + tmax = (temp / ui_x8) - 1; + timing->hs_trail = linear_inter(tmax, tmin, pcnt3, 0, false); + + temp = 50 * coeff + ((hb_en << 2) - 8) * ui; + timing->hs_rqst = S_DIV_ROUND_UP(temp, ui_x8); + + tmin = DIV_ROUND_UP(100 * coeff, ui_x8) - 1; + tmax = 255; + timing->hs_exit = linear_inter(tmax, tmin, pcnt2, 0, false); + + temp = 50 * coeff + ((hb_en_ckln << 2) - 8) * ui; + timing->hs_rqst_ckln = S_DIV_ROUND_UP(temp, ui_x8); + + temp = 60 * coeff + 52 * ui - 43 * ui; + tmin = DIV_ROUND_UP(temp, ui_x8) - 1; + tmax = 63; + timing->shared_timings.clk_post = + linear_inter(tmax, tmin, pcnt2, 0, false); + + temp = 8 * ui + (timing->clk_prepare << 3) * ui; + temp += (((timing->clk_zero + 3) << 3) + 11) * ui; + temp += hb_en_ckln ? (((timing->hs_rqst_ckln << 3) + 4) * ui) : + (((timing->hs_rqst_ckln << 3) + 8) * ui); + tmin = S_DIV_ROUND_UP(temp, ui_x8) - 1; + tmax = 63; + if (tmin > tmax) { + temp = linear_inter(tmax << 1, tmin, pcnt2, 0, false); + timing->shared_timings.clk_pre = temp >> 1; + timing->shared_timings.clk_pre_inc_by_2 = 1; + } else { + timing->shared_timings.clk_pre = + linear_inter(tmax, tmin, pcnt2, 0, false); + timing->shared_timings.clk_pre_inc_by_2 = 0; + } + + timing->shared_timings.byte_intf_clk_div_2 = true; + + timing->ta_go = 3; + timing->ta_sure = 0; + timing->ta_get = 4; + + DBG("%d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d", + timing->shared_timings.clk_pre, timing->shared_timings.clk_post, + timing->shared_timings.clk_pre_inc_by_2, timing->clk_zero, + timing->clk_trail, timing->clk_prepare, timing->hs_exit, + timing->hs_zero, timing->hs_prepare, timing->hs_trail, + timing->hs_rqst, timing->hs_rqst_ckln, timing->hs_halfbyte_en, + timing->hs_halfbyte_en_ckln, timing->hs_prep_dly, + timing->hs_prep_dly_ckln); + + return 0; +} + +int msm_dsi_dphy_timing_calc_v4(struct msm_dsi_dphy_timing *timing, + struct msm_dsi_phy_clk_request *clk_req) +{ + const unsigned long bit_rate = clk_req->bitclk_rate; + const unsigned long esc_rate = clk_req->escclk_rate; + s32 ui, ui_x8; + s32 tmax, tmin; + s32 pcnt_clk_prep = 50; + s32 pcnt_clk_zero = 2; + s32 pcnt_clk_trail = 30; + s32 pcnt_hs_prep = 50; + s32 pcnt_hs_zero = 10; + s32 pcnt_hs_trail = 30; + s32 pcnt_hs_exit = 10; + s32 coeff = 1000; /* Precision, should avoid overflow */ + s32 hb_en; + s32 temp; + + if (!bit_rate || !esc_rate) + return -EINVAL; + + hb_en = 0; + + ui = mult_frac(NSEC_PER_MSEC, coeff, bit_rate / 1000); + ui_x8 = ui << 3; + + /* TODO: verify these calculations against latest downstream driver + * everything except clk_post/clk_pre uses calculations from v3 based + * on the downstream driver having the same calculations for v3 and v4 + */ + + temp = S_DIV_ROUND_UP(38 * coeff, ui_x8); + tmin = max_t(s32, temp, 0); + temp = (95 * coeff) / ui_x8; + tmax = max_t(s32, temp, 0); + timing->clk_prepare = linear_inter(tmax, tmin, pcnt_clk_prep, 0, false); + + temp = 300 * coeff - (timing->clk_prepare << 3) * ui; + tmin = S_DIV_ROUND_UP(temp, ui_x8) - 1; + tmax = (tmin > 255) ? 511 : 255; + timing->clk_zero = linear_inter(tmax, tmin, pcnt_clk_zero, 0, false); + + tmin = DIV_ROUND_UP(60 * coeff + 3 * ui, ui_x8); + temp = 105 * coeff + 12 * ui - 20 * coeff; + tmax = (temp + 3 * ui) / ui_x8; + timing->clk_trail = linear_inter(tmax, tmin, pcnt_clk_trail, 0, false); + + temp = S_DIV_ROUND_UP(40 * coeff + 4 * ui, ui_x8); + tmin = max_t(s32, temp, 0); + temp = (85 * coeff + 6 * ui) / ui_x8; + tmax = max_t(s32, temp, 0); + timing->hs_prepare = linear_inter(tmax, tmin, pcnt_hs_prep, 0, false); + + temp = 145 * coeff + 10 * ui - (timing->hs_prepare << 3) * ui; + tmin = S_DIV_ROUND_UP(temp, ui_x8) - 1; + tmax = 255; + timing->hs_zero = linear_inter(tmax, tmin, pcnt_hs_zero, 0, false); + + tmin = DIV_ROUND_UP(60 * coeff + 4 * ui, ui_x8) - 1; + temp = 105 * coeff + 12 * ui - 20 * coeff; + tmax = (temp / ui_x8) - 1; + timing->hs_trail = linear_inter(tmax, tmin, pcnt_hs_trail, 0, false); + + temp = 50 * coeff + ((hb_en << 2) - 8) * ui; + timing->hs_rqst = S_DIV_ROUND_UP(temp, ui_x8); + + tmin = DIV_ROUND_UP(100 * coeff, ui_x8) - 1; + tmax = 255; + timing->hs_exit = linear_inter(tmax, tmin, pcnt_hs_exit, 0, false); + + /* recommended min + * = roundup((mipi_min_ns + t_hs_trail_ns)/(16*bit_clk_ns), 0) - 1 + */ + temp = 60 * coeff + 52 * ui + + (timing->hs_trail + 1) * ui_x8; + tmin = DIV_ROUND_UP(temp, 16 * ui) - 1; + tmax = 255; + timing->shared_timings.clk_post = linear_inter(tmax, tmin, 5, 0, false); + + /* recommended min + * val1 = (tlpx_ns + clk_prepare_ns + clk_zero_ns + hs_rqst_ns) + * val2 = (16 * bit_clk_ns) + * final = roundup(val1/val2, 0) - 1 + */ + temp = 52 * coeff + (timing->clk_prepare + timing->clk_zero + 1) * ui_x8 + 54 * coeff; + tmin = DIV_ROUND_UP(temp, 16 * ui) - 1; + tmax = 255; + timing->shared_timings.clk_pre = DIV_ROUND_UP((tmax - tmin) * 125, 10000) + tmin; + + timing->shared_timings.byte_intf_clk_div_2 = true; + + DBG("%d, %d, %d, %d, %d, %d, %d, %d, %d, %d", + timing->shared_timings.clk_pre, timing->shared_timings.clk_post, + timing->clk_zero, timing->clk_trail, timing->clk_prepare, timing->hs_exit, + timing->hs_zero, timing->hs_prepare, timing->hs_trail, timing->hs_rqst); + + return 0; +} + +int msm_dsi_cphy_timing_calc_v4(struct msm_dsi_dphy_timing *timing, + struct msm_dsi_phy_clk_request *clk_req) +{ + const unsigned long bit_rate = clk_req->bitclk_rate; + const unsigned long esc_rate = clk_req->escclk_rate; + s32 ui, ui_x7; + s32 tmax, tmin; + s32 coeff = 1000; /* Precision, should avoid overflow */ + s32 temp; + + if (!bit_rate || !esc_rate) + return -EINVAL; + + ui = mult_frac(NSEC_PER_MSEC, coeff, bit_rate / 1000); + ui_x7 = ui * 7; + + temp = S_DIV_ROUND_UP(38 * coeff, ui_x7); + tmin = max_t(s32, temp, 0); + temp = (95 * coeff) / ui_x7; + tmax = max_t(s32, temp, 0); + timing->clk_prepare = linear_inter(tmax, tmin, 50, 0, false); + + tmin = DIV_ROUND_UP(50 * coeff, ui_x7); + tmax = 255; + timing->hs_rqst = linear_inter(tmax, tmin, 1, 0, false); + + tmin = DIV_ROUND_UP(100 * coeff, ui_x7) - 1; + tmax = 255; + timing->hs_exit = linear_inter(tmax, tmin, 10, 0, false); + + tmin = 1; + tmax = 32; + timing->shared_timings.clk_post = linear_inter(tmax, tmin, 80, 0, false); + + tmin = min_t(s32, 64, S_DIV_ROUND_UP(262 * coeff, ui_x7) - 1); + tmax = 64; + timing->shared_timings.clk_pre = linear_inter(tmax, tmin, 20, 0, false); + + DBG("%d, %d, %d, %d, %d", + timing->shared_timings.clk_pre, timing->shared_timings.clk_post, + timing->clk_prepare, timing->hs_exit, timing->hs_rqst); + + return 0; +} + +static const struct of_device_id dsi_phy_dt_match[] = { +#ifdef CONFIG_DRM_MSM_DSI_28NM_PHY + { .compatible = "qcom,dsi-phy-28nm-hpm", + .data = &dsi_phy_28nm_hpm_cfgs }, + { .compatible = "qcom,dsi-phy-28nm-hpm-fam-b", + .data = &dsi_phy_28nm_hpm_famb_cfgs }, + { .compatible = "qcom,dsi-phy-28nm-lp", + .data = &dsi_phy_28nm_lp_cfgs }, + { .compatible = "qcom,dsi-phy-28nm-8226", + .data = &dsi_phy_28nm_8226_cfgs }, + { .compatible = "qcom,dsi-phy-28nm-8937", + .data = &dsi_phy_28nm_8937_cfgs }, +#endif +#ifdef CONFIG_DRM_MSM_DSI_20NM_PHY + { .compatible = "qcom,dsi-phy-20nm", + .data = &dsi_phy_20nm_cfgs }, +#endif +#ifdef CONFIG_DRM_MSM_DSI_28NM_8960_PHY + { .compatible = "qcom,dsi-phy-28nm-8960", + .data = &dsi_phy_28nm_8960_cfgs }, +#endif +#ifdef CONFIG_DRM_MSM_DSI_14NM_PHY + { .compatible = "qcom,dsi-phy-14nm", + .data = &dsi_phy_14nm_cfgs }, + { .compatible = "qcom,dsi-phy-14nm-2290", + .data = &dsi_phy_14nm_2290_cfgs }, + { .compatible = "qcom,dsi-phy-14nm-660", + .data = &dsi_phy_14nm_660_cfgs }, + { .compatible = "qcom,dsi-phy-14nm-8953", + .data = &dsi_phy_14nm_8953_cfgs }, + { .compatible = "qcom,sm6125-dsi-phy-14nm", + .data = &dsi_phy_14nm_2290_cfgs }, + { .compatible = "qcom,sm6150-dsi-phy-14nm", + .data = &dsi_phy_14nm_6150_cfgs }, +#endif +#ifdef CONFIG_DRM_MSM_DSI_10NM_PHY + { .compatible = "qcom,dsi-phy-10nm", + .data = &dsi_phy_10nm_cfgs }, + { .compatible = "qcom,dsi-phy-10nm-8998", + .data = &dsi_phy_10nm_8998_cfgs }, +#endif +#ifdef CONFIG_DRM_MSM_DSI_7NM_PHY + { .compatible = "qcom,dsi-phy-7nm", + .data = &dsi_phy_7nm_cfgs }, + { .compatible = "qcom,dsi-phy-7nm-8150", + .data = &dsi_phy_7nm_8150_cfgs }, + { .compatible = "qcom,sa8775p-dsi-phy-5nm", + .data = &dsi_phy_5nm_8775p_cfgs }, + { .compatible = "qcom,sar2130p-dsi-phy-5nm", + .data = &dsi_phy_5nm_sar2130p_cfgs }, + { .compatible = "qcom,sc7280-dsi-phy-7nm", + .data = &dsi_phy_7nm_7280_cfgs }, + { .compatible = "qcom,sm6375-dsi-phy-7nm", + .data = &dsi_phy_7nm_6375_cfgs }, + { .compatible = "qcom,sm8350-dsi-phy-5nm", + .data = &dsi_phy_5nm_8350_cfgs }, + { .compatible = "qcom,sm8450-dsi-phy-5nm", + .data = &dsi_phy_5nm_8450_cfgs }, + { .compatible = "qcom,sm8550-dsi-phy-4nm", + .data = &dsi_phy_4nm_8550_cfgs }, + { .compatible = "qcom,sm8650-dsi-phy-4nm", + .data = &dsi_phy_4nm_8650_cfgs }, + { .compatible = "qcom,sm8750-dsi-phy-3nm", + .data = &dsi_phy_3nm_8750_cfgs }, +#endif + {} +}; + +/* + * Currently, we only support one SoC for each PHY type. When we have multiple + * SoCs for the same PHY, we can try to make the index searching a bit more + * clever. + */ +static int dsi_phy_get_id(struct msm_dsi_phy *phy) +{ + struct platform_device *pdev = phy->pdev; + const struct msm_dsi_phy_cfg *cfg = phy->cfg; + struct resource *res; + int i; + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dsi_phy"); + if (!res) + return -EINVAL; + + for (i = 0; i < cfg->num_dsi_phy; i++) { + if (cfg->io_start[i] == res->start) + return i; + } + + return -EINVAL; +} + +static int dsi_phy_driver_probe(struct platform_device *pdev) +{ + struct msm_dsi_phy *phy; + struct device *dev = &pdev->dev; + u32 phy_type; + int ret; + + phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL); + if (!phy) + return -ENOMEM; + + phy->provided_clocks = devm_kzalloc(dev, + struct_size(phy->provided_clocks, hws, NUM_PROVIDED_CLKS), + GFP_KERNEL); + if (!phy->provided_clocks) + return -ENOMEM; + + phy->provided_clocks->num = NUM_PROVIDED_CLKS; + + phy->cfg = of_device_get_match_data(&pdev->dev); + if (!phy->cfg) + return -ENODEV; + + phy->pdev = pdev; + + phy->id = dsi_phy_get_id(phy); + if (phy->id < 0) + return dev_err_probe(dev, phy->id, + "Couldn't identify PHY index\n"); + + phy->regulator_ldo_mode = of_property_read_bool(dev->of_node, + "qcom,dsi-phy-regulator-ldo-mode"); + if (!of_property_read_u32(dev->of_node, "phy-type", &phy_type)) + phy->cphy_mode = (phy_type == PHY_TYPE_CPHY); + + phy->base = msm_ioremap_size(pdev, "dsi_phy", &phy->base_size); + if (IS_ERR(phy->base)) + return dev_err_probe(dev, PTR_ERR(phy->base), + "Failed to map phy base\n"); + + phy->pll_base = msm_ioremap_size(pdev, "dsi_pll", &phy->pll_size); + if (IS_ERR(phy->pll_base)) + return dev_err_probe(dev, PTR_ERR(phy->pll_base), + "Failed to map pll base\n"); + + if (phy->cfg->has_phy_lane) { + phy->lane_base = msm_ioremap_size(pdev, "dsi_phy_lane", &phy->lane_size); + if (IS_ERR(phy->lane_base)) + return dev_err_probe(dev, PTR_ERR(phy->lane_base), + "Failed to map phy lane base\n"); + } + + if (phy->cfg->has_phy_regulator) { + phy->reg_base = msm_ioremap_size(pdev, "dsi_phy_regulator", &phy->reg_size); + if (IS_ERR(phy->reg_base)) + return dev_err_probe(dev, PTR_ERR(phy->reg_base), + "Failed to map phy regulator base\n"); + } + + if (phy->cfg->ops.parse_dt_properties) { + ret = phy->cfg->ops.parse_dt_properties(phy); + if (ret) + return ret; + } + + ret = devm_regulator_bulk_get_const(dev, phy->cfg->num_regulators, + phy->cfg->regulator_data, + &phy->supplies); + if (ret) + return ret; + + platform_set_drvdata(pdev, phy); + + ret = devm_pm_runtime_enable(dev); + if (ret) + return ret; + + ret = devm_pm_clk_create(dev); + if (ret) + return ret; + + ret = pm_clk_add(dev, "iface"); + if (ret < 0) + return dev_err_probe(dev, ret, "Unable to get iface clk\n"); + + if (phy->cfg->ops.pll_init) { + ret = phy->cfg->ops.pll_init(phy); + if (ret) + return dev_err_probe(dev, ret, + "PLL init failed; need separate clk driver\n"); + } + + ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get, + phy->provided_clocks); + if (ret) + return dev_err_probe(dev, ret, + "Failed to register clk provider\n"); + + return 0; +} + +static const struct dev_pm_ops dsi_phy_pm_ops = { + SET_RUNTIME_PM_OPS(pm_clk_suspend, pm_clk_resume, NULL) +}; + +static struct platform_driver dsi_phy_platform_driver = { + .probe = dsi_phy_driver_probe, + .driver = { + .name = "msm_dsi_phy", + .of_match_table = dsi_phy_dt_match, + .pm = &dsi_phy_pm_ops, + }, +}; + +void __init msm_dsi_phy_driver_register(void) +{ + platform_driver_register(&dsi_phy_platform_driver); +} + +void __exit msm_dsi_phy_driver_unregister(void) +{ + platform_driver_unregister(&dsi_phy_platform_driver); +} + +int msm_dsi_phy_enable(struct msm_dsi_phy *phy, + struct msm_dsi_phy_clk_request *clk_req, + struct msm_dsi_phy_shared_timings *shared_timings) +{ + struct device *dev; + int ret; + + if (!phy || !phy->cfg->ops.enable) + return -EINVAL; + + dev = &phy->pdev->dev; + + ret = pm_runtime_resume_and_get(dev); + if (ret) { + DRM_DEV_ERROR(dev, "%s: resume failed, %d\n", + __func__, ret); + goto res_en_fail; + } + + ret = regulator_bulk_enable(phy->cfg->num_regulators, phy->supplies); + if (ret) { + DRM_DEV_ERROR(dev, "%s: regulator enable failed, %d\n", + __func__, ret); + goto reg_en_fail; + } + + ret = phy->cfg->ops.enable(phy, clk_req); + if (ret) { + DRM_DEV_ERROR(dev, "%s: phy enable failed, %d\n", __func__, ret); + goto phy_en_fail; + } + + memcpy(shared_timings, &phy->timing.shared_timings, + sizeof(*shared_timings)); + + /* + * Resetting DSI PHY silently changes its PLL registers to reset status, + * which will confuse clock driver and result in wrong output rate of + * link clocks. Restore PLL status if its PLL is being used as clock + * source. + */ + if (phy->usecase != MSM_DSI_PHY_SLAVE) { + ret = msm_dsi_phy_pll_restore_state(phy); + if (ret) { + DRM_DEV_ERROR(dev, "%s: failed to restore phy state, %d\n", + __func__, ret); + goto pll_restor_fail; + } + } + + return 0; + +pll_restor_fail: + if (phy->cfg->ops.disable) + phy->cfg->ops.disable(phy); +phy_en_fail: + regulator_bulk_disable(phy->cfg->num_regulators, phy->supplies); +reg_en_fail: + pm_runtime_put(dev); +res_en_fail: + return ret; +} + +void msm_dsi_phy_disable(struct msm_dsi_phy *phy) +{ + if (!phy || !phy->cfg->ops.disable) + return; + + phy->cfg->ops.disable(phy); + + regulator_bulk_disable(phy->cfg->num_regulators, phy->supplies); + pm_runtime_put(&phy->pdev->dev); +} + +void msm_dsi_phy_set_usecase(struct msm_dsi_phy *phy, + enum msm_dsi_phy_usecase uc) +{ + if (phy) + phy->usecase = uc; +} + +/* Returns true if we have to clear DSI_LANE_CTRL.HS_REQ_SEL_PHY */ +bool msm_dsi_phy_set_continuous_clock(struct msm_dsi_phy *phy, bool enable) +{ + if (!phy || !phy->cfg->ops.set_continuous_clock) + return false; + + return phy->cfg->ops.set_continuous_clock(phy, enable); +} + +void msm_dsi_phy_pll_save_state(struct msm_dsi_phy *phy) +{ + if (phy->cfg->ops.save_pll_state) { + phy->cfg->ops.save_pll_state(phy); + phy->state_saved = true; + } +} + +int msm_dsi_phy_pll_restore_state(struct msm_dsi_phy *phy) +{ + int ret; + + if (phy->cfg->ops.restore_pll_state && phy->state_saved) { + ret = phy->cfg->ops.restore_pll_state(phy); + if (ret) + return ret; + + phy->state_saved = false; + } + + return 0; +} + +void msm_dsi_phy_snapshot(struct msm_disp_state *disp_state, struct msm_dsi_phy *phy) +{ + msm_disp_snapshot_add_block(disp_state, + phy->base_size, phy->base, + "dsi%d_phy", phy->id); + + /* Do not try accessing PLL registers if it is switched off */ + if (phy->pll_on) + msm_disp_snapshot_add_block(disp_state, + phy->pll_size, phy->pll_base, + "dsi%d_pll", phy->id); + + if (phy->lane_base) + msm_disp_snapshot_add_block(disp_state, + phy->lane_size, phy->lane_base, + "dsi%d_lane", phy->id); + + if (phy->reg_base) + msm_disp_snapshot_add_block(disp_state, + phy->reg_size, phy->reg_base, + "dsi%d_reg", phy->id); +} diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h new file mode 100644 index 000000000000..3cbf08231492 --- /dev/null +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h @@ -0,0 +1,139 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2015, The Linux Foundation. All rights reserved. + */ + +#ifndef __DSI_PHY_H__ +#define __DSI_PHY_H__ + +#include <dt-bindings/clock/qcom,dsi-phy-28nm.h> +#include <linux/clk-provider.h> +#include <linux/delay.h> +#include <linux/regulator/consumer.h> + +#include "dsi.h" + +struct msm_dsi_phy_ops { + int (*pll_init)(struct msm_dsi_phy *phy); + int (*enable)(struct msm_dsi_phy *phy, + struct msm_dsi_phy_clk_request *clk_req); + void (*disable)(struct msm_dsi_phy *phy); + void (*save_pll_state)(struct msm_dsi_phy *phy); + int (*restore_pll_state)(struct msm_dsi_phy *phy); + bool (*set_continuous_clock)(struct msm_dsi_phy *phy, bool enable); + int (*parse_dt_properties)(struct msm_dsi_phy *phy); +}; + +struct msm_dsi_phy_cfg { + const struct regulator_bulk_data *regulator_data; + int num_regulators; + struct msm_dsi_phy_ops ops; + + unsigned long min_pll_rate; + unsigned long max_pll_rate; + + const resource_size_t io_start[DSI_MAX]; + const int num_dsi_phy; + const int quirks; + bool has_phy_regulator; + bool has_phy_lane; +}; + +extern const struct msm_dsi_phy_cfg dsi_phy_28nm_hpm_cfgs; +extern const struct msm_dsi_phy_cfg dsi_phy_28nm_hpm_famb_cfgs; +extern const struct msm_dsi_phy_cfg dsi_phy_28nm_lp_cfgs; +extern const struct msm_dsi_phy_cfg dsi_phy_28nm_8226_cfgs; +extern const struct msm_dsi_phy_cfg dsi_phy_28nm_8937_cfgs; +extern const struct msm_dsi_phy_cfg dsi_phy_28nm_8960_cfgs; +extern const struct msm_dsi_phy_cfg dsi_phy_20nm_cfgs; +extern const struct msm_dsi_phy_cfg dsi_phy_14nm_cfgs; +extern const struct msm_dsi_phy_cfg dsi_phy_14nm_6150_cfgs; +extern const struct msm_dsi_phy_cfg dsi_phy_14nm_660_cfgs; +extern const struct msm_dsi_phy_cfg dsi_phy_14nm_2290_cfgs; +extern const struct msm_dsi_phy_cfg dsi_phy_14nm_8953_cfgs; +extern const struct msm_dsi_phy_cfg dsi_phy_10nm_cfgs; +extern const struct msm_dsi_phy_cfg dsi_phy_10nm_8998_cfgs; +extern const struct msm_dsi_phy_cfg dsi_phy_7nm_cfgs; +extern const struct msm_dsi_phy_cfg dsi_phy_7nm_6375_cfgs; +extern const struct msm_dsi_phy_cfg dsi_phy_7nm_8150_cfgs; +extern const struct msm_dsi_phy_cfg dsi_phy_7nm_7280_cfgs; +extern const struct msm_dsi_phy_cfg dsi_phy_5nm_8350_cfgs; +extern const struct msm_dsi_phy_cfg dsi_phy_5nm_8450_cfgs; +extern const struct msm_dsi_phy_cfg dsi_phy_5nm_8775p_cfgs; +extern const struct msm_dsi_phy_cfg dsi_phy_5nm_sar2130p_cfgs; +extern const struct msm_dsi_phy_cfg dsi_phy_4nm_8550_cfgs; +extern const struct msm_dsi_phy_cfg dsi_phy_4nm_8650_cfgs; +extern const struct msm_dsi_phy_cfg dsi_phy_3nm_8750_cfgs; + +struct msm_dsi_dphy_timing { + u32 clk_zero; + u32 clk_trail; + u32 clk_prepare; + u32 hs_exit; + u32 hs_zero; + u32 hs_prepare; + u32 hs_trail; + u32 hs_rqst; + u32 ta_go; + u32 ta_sure; + u32 ta_get; + + struct msm_dsi_phy_shared_timings shared_timings; + + /* For PHY v2 only */ + u32 hs_rqst_ckln; + u32 hs_prep_dly; + u32 hs_prep_dly_ckln; + u8 hs_halfbyte_en; + u8 hs_halfbyte_en_ckln; +}; + +#define NUM_PROVIDED_CLKS (DSI_PIXEL_PLL_CLK + 1) + +#define DSI_LANE_MAX 5 + +struct msm_dsi_phy { + struct platform_device *pdev; + void __iomem *base; + void __iomem *pll_base; + void __iomem *reg_base; + void __iomem *lane_base; + phys_addr_t base_size; + phys_addr_t pll_size; + phys_addr_t reg_size; + phys_addr_t lane_size; + int id; + + struct regulator_bulk_data *supplies; + + struct msm_dsi_dphy_timing timing; + const struct msm_dsi_phy_cfg *cfg; + void *tuning_cfg; + + enum msm_dsi_phy_usecase usecase; + bool regulator_ldo_mode; + bool cphy_mode; + + struct clk_hw *vco_hw; + bool pll_on; + + struct clk_hw_onecell_data *provided_clocks; + + bool state_saved; +}; + +/* + * PHY internal functions + */ +int msm_dsi_dphy_timing_calc(struct msm_dsi_dphy_timing *timing, + struct msm_dsi_phy_clk_request *clk_req); +int msm_dsi_dphy_timing_calc_v2(struct msm_dsi_dphy_timing *timing, + struct msm_dsi_phy_clk_request *clk_req); +int msm_dsi_dphy_timing_calc_v3(struct msm_dsi_dphy_timing *timing, + struct msm_dsi_phy_clk_request *clk_req); +int msm_dsi_dphy_timing_calc_v4(struct msm_dsi_dphy_timing *timing, + struct msm_dsi_phy_clk_request *clk_req); +int msm_dsi_cphy_timing_calc_v4(struct msm_dsi_dphy_timing *timing, + struct msm_dsi_phy_clk_request *clk_req); + +#endif /* __DSI_PHY_H__ */ diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c new file mode 100644 index 000000000000..ec486ff02c9b --- /dev/null +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c @@ -0,0 +1,1036 @@ +/* + * SPDX-License-Identifier: GPL-2.0 + * Copyright (c) 2018, The Linux Foundation + */ + +#include <dt-bindings/clock/qcom,dsi-phy-28nm.h> +#include <linux/clk.h> +#include <linux/clk-provider.h> +#include <linux/iopoll.h> + +#include "dsi_phy.h" +#include "dsi.xml.h" +#include "dsi_phy_10nm.xml.h" + +/* + * DSI PLL 10nm - clock diagram (eg: DSI0): + * + * dsi0_pll_out_div_clk dsi0_pll_bit_clk + * | | + * | | + * +---------+ | +----------+ | +----+ + * dsi0vco_clk ---| out_div |--o--| divl_3_0 |--o--| /8 |-- dsi0_phy_pll_out_byteclk + * +---------+ | +----------+ | +----+ + * | | + * | | dsi0_pll_by_2_bit_clk + * | | | + * | | +----+ | |\ dsi0_pclk_mux + * | |--| /2 |--o--| \ | + * | | +----+ | \ | +---------+ + * | --------------| |--o--| div_7_4 |-- dsi0_phy_pll_out_dsiclk + * |------------------------------| / +---------+ + * | +-----+ | / + * -----------| /4? |--o----------|/ + * +-----+ | | + * | |dsiclk_sel + * | + * dsi0_pll_post_out_div_clk + */ + +#define VCO_REF_CLK_RATE 19200000 +#define FRAC_BITS 18 + +/* v3.0.0 10nm implementation that requires the old timings settings */ +#define DSI_PHY_10NM_QUIRK_OLD_TIMINGS BIT(0) + +struct dsi_pll_config { + bool enable_ssc; + bool ssc_center; + u32 ssc_freq; + u32 ssc_offset; + u32 ssc_adj_per; + + /* out */ + u32 pll_prop_gain_rate; + u32 decimal_div_start; + u32 frac_div_start; + u32 pll_clock_inverters; + u32 ssc_stepsize; + u32 ssc_div_per; +}; + +struct pll_10nm_cached_state { + unsigned long vco_rate; + u8 bit_clk_div; + u8 pix_clk_div; + u8 pll_out_div; + u8 pll_mux; +}; + +struct dsi_pll_10nm { + struct clk_hw clk_hw; + + struct msm_dsi_phy *phy; + + u64 vco_current_rate; + + /* protects REG_DSI_10nm_PHY_CMN_CLK_CFG0 register */ + spinlock_t postdiv_lock; + + struct pll_10nm_cached_state cached_state; + + struct dsi_pll_10nm *slave; +}; + +#define to_pll_10nm(x) container_of(x, struct dsi_pll_10nm, clk_hw) + +/** + * struct dsi_phy_10nm_tuning_cfg - Holds 10nm PHY tuning config parameters. + * @rescode_offset_top: Offset for pull-up legs rescode. + * @rescode_offset_bot: Offset for pull-down legs rescode. + * @vreg_ctrl: vreg ctrl to drive LDO level + */ +struct dsi_phy_10nm_tuning_cfg { + u8 rescode_offset_top[DSI_LANE_MAX]; + u8 rescode_offset_bot[DSI_LANE_MAX]; + u8 vreg_ctrl; +}; + +/* + * Global list of private DSI PLL struct pointers. We need this for bonded DSI + * mode, where the master PLL's clk_ops needs access the slave's private data + */ +static struct dsi_pll_10nm *pll_10nm_list[DSI_MAX]; + +static void dsi_pll_setup_config(struct dsi_pll_config *config) +{ + config->ssc_freq = 31500; + config->ssc_offset = 5000; + config->ssc_adj_per = 2; + + config->enable_ssc = false; + config->ssc_center = false; +} + +static void dsi_pll_calc_dec_frac(struct dsi_pll_10nm *pll, struct dsi_pll_config *config) +{ + u64 fref = VCO_REF_CLK_RATE; + u64 pll_freq; + u64 divider; + u64 dec, dec_multiple; + u32 frac; + u64 multiplier; + + pll_freq = pll->vco_current_rate; + + divider = fref * 2; + + multiplier = 1 << FRAC_BITS; + dec_multiple = div_u64(pll_freq * multiplier, divider); + dec = div_u64_rem(dec_multiple, multiplier, &frac); + + if (pll_freq <= 1900000000UL) + config->pll_prop_gain_rate = 8; + else if (pll_freq <= 3000000000UL) + config->pll_prop_gain_rate = 10; + else + config->pll_prop_gain_rate = 12; + if (pll_freq < 1100000000UL) + config->pll_clock_inverters = 8; + else + config->pll_clock_inverters = 0; + + config->decimal_div_start = dec; + config->frac_div_start = frac; +} + +#define SSC_CENTER BIT(0) +#define SSC_EN BIT(1) + +static void dsi_pll_calc_ssc(struct dsi_pll_10nm *pll, struct dsi_pll_config *config) +{ + u32 ssc_per; + u32 ssc_mod; + u64 ssc_step_size; + u64 frac; + + if (!config->enable_ssc) { + DBG("SSC not enabled\n"); + return; + } + + ssc_per = DIV_ROUND_CLOSEST(VCO_REF_CLK_RATE, config->ssc_freq) / 2 - 1; + ssc_mod = (ssc_per + 1) % (config->ssc_adj_per + 1); + ssc_per -= ssc_mod; + + frac = config->frac_div_start; + ssc_step_size = config->decimal_div_start; + ssc_step_size *= (1 << FRAC_BITS); + ssc_step_size += frac; + ssc_step_size *= config->ssc_offset; + ssc_step_size *= (config->ssc_adj_per + 1); + ssc_step_size = div_u64(ssc_step_size, (ssc_per + 1)); + ssc_step_size = DIV_ROUND_CLOSEST_ULL(ssc_step_size, 1000000); + + config->ssc_div_per = ssc_per; + config->ssc_stepsize = ssc_step_size; + + pr_debug("SCC: Dec:%d, frac:%llu, frac_bits:%d\n", + config->decimal_div_start, frac, FRAC_BITS); + pr_debug("SSC: div_per:0x%X, stepsize:0x%X, adjper:0x%X\n", + ssc_per, (u32)ssc_step_size, config->ssc_adj_per); +} + +static void dsi_pll_ssc_commit(struct dsi_pll_10nm *pll, struct dsi_pll_config *config) +{ + void __iomem *base = pll->phy->pll_base; + + if (config->enable_ssc) { + pr_debug("SSC is enabled\n"); + + writel(config->ssc_stepsize & 0xff, + base + REG_DSI_10nm_PHY_PLL_SSC_STEPSIZE_LOW_1); + writel(config->ssc_stepsize >> 8, + base + REG_DSI_10nm_PHY_PLL_SSC_STEPSIZE_HIGH_1); + writel(config->ssc_div_per & 0xff, + base + REG_DSI_10nm_PHY_PLL_SSC_DIV_PER_LOW_1); + writel(config->ssc_div_per >> 8, + base + REG_DSI_10nm_PHY_PLL_SSC_DIV_PER_HIGH_1); + writel(config->ssc_adj_per & 0xff, + base + REG_DSI_10nm_PHY_PLL_SSC_DIV_ADJPER_LOW_1); + writel(config->ssc_adj_per >> 8, + base + REG_DSI_10nm_PHY_PLL_SSC_DIV_ADJPER_HIGH_1); + writel(SSC_EN | (config->ssc_center ? SSC_CENTER : 0), + base + REG_DSI_10nm_PHY_PLL_SSC_CONTROL); + } +} + +static void dsi_pll_config_hzindep_reg(struct dsi_pll_10nm *pll) +{ + void __iomem *base = pll->phy->pll_base; + + writel(0x80, base + REG_DSI_10nm_PHY_PLL_ANALOG_CONTROLS_ONE); + writel(0x03, base + REG_DSI_10nm_PHY_PLL_ANALOG_CONTROLS_TWO); + writel(0x00, base + REG_DSI_10nm_PHY_PLL_ANALOG_CONTROLS_THREE); + writel(0x00, base + REG_DSI_10nm_PHY_PLL_DSM_DIVIDER); + writel(0x4e, base + REG_DSI_10nm_PHY_PLL_FEEDBACK_DIVIDER); + writel(0x40, base + REG_DSI_10nm_PHY_PLL_CALIBRATION_SETTINGS); + writel(0xba, base + REG_DSI_10nm_PHY_PLL_BAND_SEL_CAL_SETTINGS_THREE); + writel(0x0c, base + REG_DSI_10nm_PHY_PLL_FREQ_DETECT_SETTINGS_ONE); + writel(0x00, base + REG_DSI_10nm_PHY_PLL_OUTDIV); + writel(0x00, base + REG_DSI_10nm_PHY_PLL_CORE_OVERRIDE); + writel(0x08, base + REG_DSI_10nm_PHY_PLL_PLL_DIGITAL_TIMERS_TWO); + writel(0x08, base + REG_DSI_10nm_PHY_PLL_PLL_PROP_GAIN_RATE_1); + writel(0xc0, base + REG_DSI_10nm_PHY_PLL_PLL_BAND_SET_RATE_1); + writel(0xfa, base + REG_DSI_10nm_PHY_PLL_PLL_INT_GAIN_IFILT_BAND_1); + writel(0x4c, base + REG_DSI_10nm_PHY_PLL_PLL_FL_INT_GAIN_PFILT_BAND_1); + writel(0x80, base + REG_DSI_10nm_PHY_PLL_PLL_LOCK_OVERRIDE); + writel(0x29, base + REG_DSI_10nm_PHY_PLL_PFILT); + writel(0x3f, base + REG_DSI_10nm_PHY_PLL_IFILT); +} + +static void dsi_pll_commit(struct dsi_pll_10nm *pll, struct dsi_pll_config *config) +{ + void __iomem *base = pll->phy->pll_base; + + writel(0x12, base + REG_DSI_10nm_PHY_PLL_CORE_INPUT_OVERRIDE); + writel(config->decimal_div_start, + base + REG_DSI_10nm_PHY_PLL_DECIMAL_DIV_START_1); + writel(config->frac_div_start & 0xff, + base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_LOW_1); + writel((config->frac_div_start & 0xff00) >> 8, + base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_MID_1); + writel((config->frac_div_start & 0x30000) >> 16, + base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_HIGH_1); + writel(64, base + REG_DSI_10nm_PHY_PLL_PLL_LOCKDET_RATE_1); + writel(0x06, base + REG_DSI_10nm_PHY_PLL_PLL_LOCK_DELAY); + writel(0x10, base + REG_DSI_10nm_PHY_PLL_CMODE); + writel(config->pll_clock_inverters, base + REG_DSI_10nm_PHY_PLL_CLOCK_INVERTERS); +} + +static int dsi_pll_10nm_vco_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + struct dsi_pll_10nm *pll_10nm = to_pll_10nm(hw); + struct dsi_pll_config config; + + DBG("DSI PLL%d rate=%lu, parent's=%lu", pll_10nm->phy->id, rate, + parent_rate); + + pll_10nm->vco_current_rate = rate; + + dsi_pll_setup_config(&config); + + dsi_pll_calc_dec_frac(pll_10nm, &config); + + dsi_pll_calc_ssc(pll_10nm, &config); + + dsi_pll_commit(pll_10nm, &config); + + dsi_pll_config_hzindep_reg(pll_10nm); + + dsi_pll_ssc_commit(pll_10nm, &config); + + /* flush, ensure all register writes are done*/ + wmb(); + + return 0; +} + +static int dsi_pll_10nm_lock_status(struct dsi_pll_10nm *pll) +{ + struct device *dev = &pll->phy->pdev->dev; + int rc; + u32 status = 0; + u32 const delay_us = 100; + u32 const timeout_us = 5000; + + rc = readl_poll_timeout_atomic(pll->phy->pll_base + + REG_DSI_10nm_PHY_PLL_COMMON_STATUS_ONE, + status, + ((status & BIT(0)) > 0), + delay_us, + timeout_us); + if (rc) + DRM_DEV_ERROR(dev, "DSI PLL(%d) lock failed, status=0x%08x\n", + pll->phy->id, status); + + return rc; +} + +static void dsi_pll_disable_pll_bias(struct dsi_pll_10nm *pll) +{ + u32 data = readl(pll->phy->base + REG_DSI_10nm_PHY_CMN_CTRL_0); + + writel(0, pll->phy->pll_base + REG_DSI_10nm_PHY_PLL_SYSTEM_MUXES); + writel(data & ~BIT(5), pll->phy->base + REG_DSI_10nm_PHY_CMN_CTRL_0); + ndelay(250); +} + +static void dsi_pll_enable_pll_bias(struct dsi_pll_10nm *pll) +{ + u32 data = readl(pll->phy->base + REG_DSI_10nm_PHY_CMN_CTRL_0); + + writel(data | BIT(5), pll->phy->base + REG_DSI_10nm_PHY_CMN_CTRL_0); + writel(0xc0, pll->phy->pll_base + REG_DSI_10nm_PHY_PLL_SYSTEM_MUXES); + ndelay(250); +} + +static void dsi_pll_disable_global_clk(struct dsi_pll_10nm *pll) +{ + u32 data; + + data = readl(pll->phy->base + REG_DSI_10nm_PHY_CMN_CLK_CFG1); + writel(data & ~BIT(5), pll->phy->base + REG_DSI_10nm_PHY_CMN_CLK_CFG1); +} + +static void dsi_pll_enable_global_clk(struct dsi_pll_10nm *pll) +{ + u32 data; + + data = readl(pll->phy->base + REG_DSI_10nm_PHY_CMN_CLK_CFG1); + writel(data | BIT(5), pll->phy->base + REG_DSI_10nm_PHY_CMN_CLK_CFG1); +} + +static int dsi_pll_10nm_vco_prepare(struct clk_hw *hw) +{ + struct dsi_pll_10nm *pll_10nm = to_pll_10nm(hw); + struct device *dev = &pll_10nm->phy->pdev->dev; + int rc; + + dsi_pll_enable_pll_bias(pll_10nm); + if (pll_10nm->slave) + dsi_pll_enable_pll_bias(pll_10nm->slave); + + rc = dsi_pll_10nm_vco_set_rate(hw,pll_10nm->vco_current_rate, 0); + if (rc) { + DRM_DEV_ERROR(dev, "vco_set_rate failed, rc=%d\n", rc); + return rc; + } + + /* Start PLL */ + writel(0x01, pll_10nm->phy->base + REG_DSI_10nm_PHY_CMN_PLL_CNTRL); + + /* + * ensure all PLL configurations are written prior to checking + * for PLL lock. + */ + wmb(); + + /* Check for PLL lock */ + rc = dsi_pll_10nm_lock_status(pll_10nm); + if (rc) { + DRM_DEV_ERROR(dev, "PLL(%d) lock failed\n", pll_10nm->phy->id); + goto error; + } + + pll_10nm->phy->pll_on = true; + + dsi_pll_enable_global_clk(pll_10nm); + if (pll_10nm->slave) + dsi_pll_enable_global_clk(pll_10nm->slave); + + writel(0x01, pll_10nm->phy->base + REG_DSI_10nm_PHY_CMN_RBUF_CTRL); + if (pll_10nm->slave) + writel(0x01, pll_10nm->slave->phy->base + REG_DSI_10nm_PHY_CMN_RBUF_CTRL); + +error: + return rc; +} + +static void dsi_pll_disable_sub(struct dsi_pll_10nm *pll) +{ + writel(0, pll->phy->base + REG_DSI_10nm_PHY_CMN_RBUF_CTRL); + dsi_pll_disable_pll_bias(pll); +} + +static void dsi_pll_10nm_vco_unprepare(struct clk_hw *hw) +{ + struct dsi_pll_10nm *pll_10nm = to_pll_10nm(hw); + + /* + * To avoid any stray glitches while abruptly powering down the PLL + * make sure to gate the clock using the clock enable bit before + * powering down the PLL + */ + dsi_pll_disable_global_clk(pll_10nm); + writel(0, pll_10nm->phy->base + REG_DSI_10nm_PHY_CMN_PLL_CNTRL); + dsi_pll_disable_sub(pll_10nm); + if (pll_10nm->slave) { + dsi_pll_disable_global_clk(pll_10nm->slave); + dsi_pll_disable_sub(pll_10nm->slave); + } + /* flush, ensure all register writes are done */ + wmb(); + pll_10nm->phy->pll_on = false; +} + +static unsigned long dsi_pll_10nm_vco_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct dsi_pll_10nm *pll_10nm = to_pll_10nm(hw); + void __iomem *base = pll_10nm->phy->pll_base; + u64 ref_clk = VCO_REF_CLK_RATE; + u64 vco_rate = 0x0; + u64 multiplier; + u32 frac; + u32 dec; + u64 pll_freq, tmp64; + + dec = readl(base + REG_DSI_10nm_PHY_PLL_DECIMAL_DIV_START_1); + dec &= 0xff; + + frac = readl(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_LOW_1); + frac |= ((readl(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_MID_1) & + 0xff) << 8); + frac |= ((readl(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_HIGH_1) & + 0x3) << 16); + + /* + * TODO: + * 1. Assumes prescaler is disabled + */ + multiplier = 1 << FRAC_BITS; + pll_freq = dec * (ref_clk * 2); + tmp64 = (ref_clk * 2 * frac); + pll_freq += div_u64(tmp64, multiplier); + + vco_rate = pll_freq; + pll_10nm->vco_current_rate = vco_rate; + + DBG("DSI PLL%d returning vco rate = %lu, dec = %x, frac = %x", + pll_10nm->phy->id, (unsigned long)vco_rate, dec, frac); + + return (unsigned long)vco_rate; +} + +static int dsi_pll_10nm_clk_determine_rate(struct clk_hw *hw, + struct clk_rate_request *req) +{ + struct dsi_pll_10nm *pll_10nm = to_pll_10nm(hw); + + req->rate = clamp_t(unsigned long, req->rate, + pll_10nm->phy->cfg->min_pll_rate, pll_10nm->phy->cfg->max_pll_rate); + + return 0; +} + +static const struct clk_ops clk_ops_dsi_pll_10nm_vco = { + .determine_rate = dsi_pll_10nm_clk_determine_rate, + .set_rate = dsi_pll_10nm_vco_set_rate, + .recalc_rate = dsi_pll_10nm_vco_recalc_rate, + .prepare = dsi_pll_10nm_vco_prepare, + .unprepare = dsi_pll_10nm_vco_unprepare, +}; + +/* + * PLL Callbacks + */ + +static void dsi_10nm_pll_save_state(struct msm_dsi_phy *phy) +{ + struct dsi_pll_10nm *pll_10nm = to_pll_10nm(phy->vco_hw); + struct pll_10nm_cached_state *cached = &pll_10nm->cached_state; + void __iomem *phy_base = pll_10nm->phy->base; + u32 cmn_clk_cfg0, cmn_clk_cfg1; + + cached->pll_out_div = readl(pll_10nm->phy->pll_base + + REG_DSI_10nm_PHY_PLL_PLL_OUTDIV_RATE); + cached->pll_out_div &= 0x3; + + cmn_clk_cfg0 = readl(phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG0); + cached->bit_clk_div = cmn_clk_cfg0 & 0xf; + cached->pix_clk_div = (cmn_clk_cfg0 & 0xf0) >> 4; + + cmn_clk_cfg1 = readl(phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG1); + cached->pll_mux = cmn_clk_cfg1 & 0x3; + + DBG("DSI PLL%d outdiv %x bit_clk_div %x pix_clk_div %x pll_mux %x", + pll_10nm->phy->id, cached->pll_out_div, cached->bit_clk_div, + cached->pix_clk_div, cached->pll_mux); +} + +static int dsi_10nm_pll_restore_state(struct msm_dsi_phy *phy) +{ + struct dsi_pll_10nm *pll_10nm = to_pll_10nm(phy->vco_hw); + struct pll_10nm_cached_state *cached = &pll_10nm->cached_state; + void __iomem *phy_base = pll_10nm->phy->base; + u32 val; + int ret; + + val = readl(pll_10nm->phy->pll_base + REG_DSI_10nm_PHY_PLL_PLL_OUTDIV_RATE); + val &= ~0x3; + val |= cached->pll_out_div; + writel(val, pll_10nm->phy->pll_base + REG_DSI_10nm_PHY_PLL_PLL_OUTDIV_RATE); + + writel(cached->bit_clk_div | (cached->pix_clk_div << 4), + phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG0); + + val = readl(phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG1); + val &= ~0x3; + val |= cached->pll_mux; + writel(val, phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG1); + + ret = dsi_pll_10nm_vco_set_rate(phy->vco_hw, + pll_10nm->vco_current_rate, + VCO_REF_CLK_RATE); + if (ret) { + DRM_DEV_ERROR(&pll_10nm->phy->pdev->dev, + "restore vco rate failed. ret=%d\n", ret); + return ret; + } + + DBG("DSI PLL%d", pll_10nm->phy->id); + + return 0; +} + +static int dsi_10nm_set_usecase(struct msm_dsi_phy *phy) +{ + struct dsi_pll_10nm *pll_10nm = to_pll_10nm(phy->vco_hw); + void __iomem *base = phy->base; + u32 data = 0x0; /* internal PLL */ + + DBG("DSI PLL%d", pll_10nm->phy->id); + + switch (phy->usecase) { + case MSM_DSI_PHY_STANDALONE: + break; + case MSM_DSI_PHY_MASTER: + pll_10nm->slave = pll_10nm_list[(pll_10nm->phy->id + 1) % DSI_MAX]; + break; + case MSM_DSI_PHY_SLAVE: + data = 0x1; /* external PLL */ + break; + default: + return -EINVAL; + } + + /* set PLL src */ + writel(data << 2, base + REG_DSI_10nm_PHY_CMN_CLK_CFG1); + + return 0; +} + +/* + * The post dividers and mux clocks are created using the standard divider and + * mux API. Unlike the 14nm PHY, the slave PLL doesn't need its dividers/mux + * state to follow the master PLL's divider/mux state. Therefore, we don't + * require special clock ops that also configure the slave PLL registers + */ +static int pll_10nm_register(struct dsi_pll_10nm *pll_10nm, struct clk_hw **provided_clocks) +{ + char clk_name[32]; + struct clk_init_data vco_init = { + .parent_data = &(const struct clk_parent_data) { + .fw_name = "ref", + }, + .num_parents = 1, + .name = clk_name, + .flags = CLK_IGNORE_UNUSED, + .ops = &clk_ops_dsi_pll_10nm_vco, + }; + struct device *dev = &pll_10nm->phy->pdev->dev; + struct clk_hw *hw, *pll_out_div, *pll_bit, *pll_by_2_bit; + struct clk_hw *pll_post_out_div, *pclk_mux; + int ret; + + DBG("DSI%d", pll_10nm->phy->id); + + snprintf(clk_name, sizeof(clk_name), "dsi%dvco_clk", pll_10nm->phy->id); + pll_10nm->clk_hw.init = &vco_init; + + ret = devm_clk_hw_register(dev, &pll_10nm->clk_hw); + if (ret) + return ret; + + snprintf(clk_name, sizeof(clk_name), "dsi%d_pll_out_div_clk", pll_10nm->phy->id); + + pll_out_div = devm_clk_hw_register_divider_parent_hw(dev, clk_name, + &pll_10nm->clk_hw, CLK_SET_RATE_PARENT, + pll_10nm->phy->pll_base + + REG_DSI_10nm_PHY_PLL_PLL_OUTDIV_RATE, + 0, 2, CLK_DIVIDER_POWER_OF_TWO, NULL); + if (IS_ERR(pll_out_div)) { + ret = PTR_ERR(pll_out_div); + goto fail; + } + + snprintf(clk_name, sizeof(clk_name), "dsi%d_pll_bit_clk", pll_10nm->phy->id); + + /* BIT CLK: DIV_CTRL_3_0 */ + pll_bit = devm_clk_hw_register_divider_parent_hw(dev, clk_name, + pll_out_div, CLK_SET_RATE_PARENT, + pll_10nm->phy->base + REG_DSI_10nm_PHY_CMN_CLK_CFG0, + 0, 4, CLK_DIVIDER_ONE_BASED, &pll_10nm->postdiv_lock); + if (IS_ERR(pll_bit)) { + ret = PTR_ERR(pll_bit); + goto fail; + } + + snprintf(clk_name, sizeof(clk_name), "dsi%d_phy_pll_out_byteclk", pll_10nm->phy->id); + + /* DSI Byte clock = VCO_CLK / OUT_DIV / BIT_DIV / 8 */ + hw = devm_clk_hw_register_fixed_factor_parent_hw(dev, clk_name, + pll_bit, CLK_SET_RATE_PARENT, 1, 8); + if (IS_ERR(hw)) { + ret = PTR_ERR(hw); + goto fail; + } + + provided_clocks[DSI_BYTE_PLL_CLK] = hw; + + snprintf(clk_name, sizeof(clk_name), "dsi%d_pll_by_2_bit_clk", pll_10nm->phy->id); + + pll_by_2_bit = devm_clk_hw_register_fixed_factor_parent_hw(dev, + clk_name, pll_bit, 0, 1, 2); + if (IS_ERR(pll_by_2_bit)) { + ret = PTR_ERR(pll_by_2_bit); + goto fail; + } + + snprintf(clk_name, sizeof(clk_name), "dsi%d_pll_post_out_div_clk", pll_10nm->phy->id); + + pll_post_out_div = devm_clk_hw_register_fixed_factor_parent_hw(dev, + clk_name, pll_out_div, 0, 1, 4); + if (IS_ERR(pll_post_out_div)) { + ret = PTR_ERR(pll_post_out_div); + goto fail; + } + + snprintf(clk_name, sizeof(clk_name), "dsi%d_pclk_mux", pll_10nm->phy->id); + + pclk_mux = devm_clk_hw_register_mux_parent_hws(dev, clk_name, + ((const struct clk_hw *[]){ + pll_bit, + pll_by_2_bit, + pll_out_div, + pll_post_out_div, + }), 4, 0, pll_10nm->phy->base + + REG_DSI_10nm_PHY_CMN_CLK_CFG1, 0, 2, 0, NULL); + if (IS_ERR(pclk_mux)) { + ret = PTR_ERR(pclk_mux); + goto fail; + } + + snprintf(clk_name, sizeof(clk_name), "dsi%d_phy_pll_out_dsiclk", pll_10nm->phy->id); + + /* PIX CLK DIV : DIV_CTRL_7_4*/ + hw = devm_clk_hw_register_divider_parent_hw(dev, clk_name, pclk_mux, + 0, pll_10nm->phy->base + REG_DSI_10nm_PHY_CMN_CLK_CFG0, + 4, 4, CLK_DIVIDER_ONE_BASED, &pll_10nm->postdiv_lock); + if (IS_ERR(hw)) { + ret = PTR_ERR(hw); + goto fail; + } + + provided_clocks[DSI_PIXEL_PLL_CLK] = hw; + + return 0; + +fail: + + return ret; +} + +static int dsi_pll_10nm_init(struct msm_dsi_phy *phy) +{ + struct platform_device *pdev = phy->pdev; + struct dsi_pll_10nm *pll_10nm; + int ret; + + pll_10nm = devm_kzalloc(&pdev->dev, sizeof(*pll_10nm), GFP_KERNEL); + if (!pll_10nm) + return -ENOMEM; + + DBG("DSI PLL%d", phy->id); + + pll_10nm_list[phy->id] = pll_10nm; + + spin_lock_init(&pll_10nm->postdiv_lock); + + pll_10nm->phy = phy; + + ret = pll_10nm_register(pll_10nm, phy->provided_clocks->hws); + if (ret) { + DRM_DEV_ERROR(&pdev->dev, "failed to register PLL: %d\n", ret); + return ret; + } + + phy->vco_hw = &pll_10nm->clk_hw; + + /* TODO: Remove this when we have proper display handover support */ + msm_dsi_phy_pll_save_state(phy); + + /* + * Store also proper vco_current_rate, because its value will be used in + * dsi_10nm_pll_restore_state(). + */ + if (!dsi_pll_10nm_vco_recalc_rate(&pll_10nm->clk_hw, VCO_REF_CLK_RATE)) + pll_10nm->vco_current_rate = pll_10nm->phy->cfg->min_pll_rate; + + return 0; +} + +static int dsi_phy_hw_v3_0_is_pll_on(struct msm_dsi_phy *phy) +{ + void __iomem *base = phy->base; + u32 data = 0; + + data = readl(base + REG_DSI_10nm_PHY_CMN_PLL_CNTRL); + mb(); /* make sure read happened */ + + return (data & BIT(0)); +} + +static void dsi_phy_hw_v3_0_config_lpcdrx(struct msm_dsi_phy *phy, bool enable) +{ + void __iomem *lane_base = phy->lane_base; + int phy_lane_0 = 0; /* TODO: Support all lane swap configs */ + + /* + * LPRX and CDRX need to enabled only for physical data lane + * corresponding to the logical data lane 0 + */ + if (enable) + writel(0x3, lane_base + REG_DSI_10nm_PHY_LN_LPRX_CTRL(phy_lane_0)); + else + writel(0, lane_base + REG_DSI_10nm_PHY_LN_LPRX_CTRL(phy_lane_0)); +} + +static void dsi_phy_hw_v3_0_lane_settings(struct msm_dsi_phy *phy) +{ + int i; + u8 tx_dctrl[] = { 0x00, 0x00, 0x00, 0x04, 0x01 }; + void __iomem *lane_base = phy->lane_base; + struct dsi_phy_10nm_tuning_cfg *tuning_cfg = phy->tuning_cfg; + + if (phy->cfg->quirks & DSI_PHY_10NM_QUIRK_OLD_TIMINGS) + tx_dctrl[3] = 0x02; + + /* Strength ctrl settings */ + for (i = 0; i < 5; i++) { + writel(0x55, lane_base + REG_DSI_10nm_PHY_LN_LPTX_STR_CTRL(i)); + /* + * Disable LPRX and CDRX for all lanes. And later on, it will + * be only enabled for the physical data lane corresponding + * to the logical data lane 0 + */ + writel(0, lane_base + REG_DSI_10nm_PHY_LN_LPRX_CTRL(i)); + writel(0x0, lane_base + REG_DSI_10nm_PHY_LN_PIN_SWAP(i)); + writel(0x88, lane_base + REG_DSI_10nm_PHY_LN_HSTX_STR_CTRL(i)); + } + + dsi_phy_hw_v3_0_config_lpcdrx(phy, true); + + /* other settings */ + for (i = 0; i < 5; i++) { + writel(0, lane_base + REG_DSI_10nm_PHY_LN_CFG0(i)); + writel(0, lane_base + REG_DSI_10nm_PHY_LN_CFG1(i)); + writel(0, lane_base + REG_DSI_10nm_PHY_LN_CFG2(i)); + writel(i == 4 ? 0x80 : 0x0, lane_base + REG_DSI_10nm_PHY_LN_CFG3(i)); + + /* platform specific dsi phy drive strength adjustment */ + writel(tuning_cfg->rescode_offset_top[i], + lane_base + REG_DSI_10nm_PHY_LN_OFFSET_TOP_CTRL(i)); + writel(tuning_cfg->rescode_offset_bot[i], + lane_base + REG_DSI_10nm_PHY_LN_OFFSET_BOT_CTRL(i)); + + writel(tx_dctrl[i], + lane_base + REG_DSI_10nm_PHY_LN_TX_DCTRL(i)); + } + + if (!(phy->cfg->quirks & DSI_PHY_10NM_QUIRK_OLD_TIMINGS)) { + /* Toggle BIT 0 to release freeze I/0 */ + writel(0x05, lane_base + REG_DSI_10nm_PHY_LN_TX_DCTRL(3)); + writel(0x04, lane_base + REG_DSI_10nm_PHY_LN_TX_DCTRL(3)); + } +} + +static int dsi_10nm_phy_enable(struct msm_dsi_phy *phy, + struct msm_dsi_phy_clk_request *clk_req) +{ + int ret; + u32 status; + u32 const delay_us = 5; + u32 const timeout_us = 1000; + struct msm_dsi_dphy_timing *timing = &phy->timing; + void __iomem *base = phy->base; + struct dsi_phy_10nm_tuning_cfg *tuning_cfg = phy->tuning_cfg; + u32 data; + + DBG(""); + + if (msm_dsi_dphy_timing_calc_v3(timing, clk_req)) { + DRM_DEV_ERROR(&phy->pdev->dev, + "%s: D-PHY timing calculation failed\n", __func__); + return -EINVAL; + } + + if (dsi_phy_hw_v3_0_is_pll_on(phy)) + pr_warn("PLL turned on before configuring PHY\n"); + + /* wait for REFGEN READY */ + ret = readl_poll_timeout_atomic(base + REG_DSI_10nm_PHY_CMN_PHY_STATUS, + status, (status & BIT(0)), + delay_us, timeout_us); + if (ret) { + pr_err("Ref gen not ready. Aborting\n"); + return -EINVAL; + } + + /* de-assert digital and pll power down */ + data = BIT(6) | BIT(5); + writel(data, base + REG_DSI_10nm_PHY_CMN_CTRL_0); + + /* Assert PLL core reset */ + writel(0x00, base + REG_DSI_10nm_PHY_CMN_PLL_CNTRL); + + /* turn off resync FIFO */ + writel(0x00, base + REG_DSI_10nm_PHY_CMN_RBUF_CTRL); + + /* Select MS1 byte-clk */ + writel(0x10, base + REG_DSI_10nm_PHY_CMN_GLBL_CTRL); + + /* Enable LDO with platform specific drive level/amplitude adjustment */ + writel(tuning_cfg->vreg_ctrl, base + REG_DSI_10nm_PHY_CMN_VREG_CTRL); + + /* Configure PHY lane swap (TODO: we need to calculate this) */ + writel(0x21, base + REG_DSI_10nm_PHY_CMN_LANE_CFG0); + writel(0x84, base + REG_DSI_10nm_PHY_CMN_LANE_CFG1); + + /* DSI PHY timings */ + writel(timing->hs_halfbyte_en, base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_0); + writel(timing->clk_zero, base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_1); + writel(timing->clk_prepare, base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_2); + writel(timing->clk_trail, base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_3); + writel(timing->hs_exit, base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_4); + writel(timing->hs_zero, base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_5); + writel(timing->hs_prepare, base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_6); + writel(timing->hs_trail, base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_7); + writel(timing->hs_rqst, base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_8); + writel(timing->ta_go | (timing->ta_sure << 3), base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_9); + writel(timing->ta_get, base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_10); + writel(0x00, base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_11); + + /* Remove power down from all blocks */ + writel(0x7f, base + REG_DSI_10nm_PHY_CMN_CTRL_0); + + /* power up lanes */ + data = readl(base + REG_DSI_10nm_PHY_CMN_CTRL_0); + + /* TODO: only power up lanes that are used */ + data |= 0x1F; + writel(data, base + REG_DSI_10nm_PHY_CMN_CTRL_0); + writel(0x1F, base + REG_DSI_10nm_PHY_CMN_LANE_CTRL0); + + /* Select full-rate mode */ + writel(0x40, base + REG_DSI_10nm_PHY_CMN_CTRL_2); + + ret = dsi_10nm_set_usecase(phy); + if (ret) { + DRM_DEV_ERROR(&phy->pdev->dev, "%s: set pll usecase failed, %d\n", + __func__, ret); + return ret; + } + + /* DSI lane settings */ + dsi_phy_hw_v3_0_lane_settings(phy); + + DBG("DSI%d PHY enabled", phy->id); + + return 0; +} + +static void dsi_10nm_phy_disable(struct msm_dsi_phy *phy) +{ + void __iomem *base = phy->base; + u32 data; + + DBG(""); + + if (dsi_phy_hw_v3_0_is_pll_on(phy)) + pr_warn("Turning OFF PHY while PLL is on\n"); + + dsi_phy_hw_v3_0_config_lpcdrx(phy, false); + data = readl(base + REG_DSI_10nm_PHY_CMN_CTRL_0); + + /* disable all lanes */ + data &= ~0x1F; + writel(data, base + REG_DSI_10nm_PHY_CMN_CTRL_0); + writel(0, base + REG_DSI_10nm_PHY_CMN_LANE_CTRL0); + + /* Turn off all PHY blocks */ + writel(0x00, base + REG_DSI_10nm_PHY_CMN_CTRL_0); + /* make sure phy is turned off */ + wmb(); + + DBG("DSI%d PHY disabled", phy->id); +} + +static int dsi_10nm_phy_parse_dt(struct msm_dsi_phy *phy) +{ + struct device *dev = &phy->pdev->dev; + struct dsi_phy_10nm_tuning_cfg *tuning_cfg; + s8 offset_top[DSI_LANE_MAX] = { 0 }; /* No offset */ + s8 offset_bot[DSI_LANE_MAX] = { 0 }; /* No offset */ + u32 ldo_level = 400; /* 400mV */ + u8 level; + int ret, i; + + tuning_cfg = devm_kzalloc(dev, sizeof(*tuning_cfg), GFP_KERNEL); + if (!tuning_cfg) + return -ENOMEM; + + /* Drive strength adjustment parameters */ + ret = of_property_read_u8_array(dev->of_node, "qcom,phy-rescode-offset-top", + offset_top, DSI_LANE_MAX); + if (ret && ret != -EINVAL) { + DRM_DEV_ERROR(dev, "failed to parse qcom,phy-rescode-offset-top, %d\n", ret); + return ret; + } + + for (i = 0; i < DSI_LANE_MAX; i++) { + if (offset_top[i] < -32 || offset_top[i] > 31) { + DRM_DEV_ERROR(dev, + "qcom,phy-rescode-offset-top value %d is not in range [-32..31]\n", + offset_top[i]); + return -EINVAL; + } + tuning_cfg->rescode_offset_top[i] = 0x3f & offset_top[i]; + } + + ret = of_property_read_u8_array(dev->of_node, "qcom,phy-rescode-offset-bot", + offset_bot, DSI_LANE_MAX); + if (ret && ret != -EINVAL) { + DRM_DEV_ERROR(dev, "failed to parse qcom,phy-rescode-offset-bot, %d\n", ret); + return ret; + } + + for (i = 0; i < DSI_LANE_MAX; i++) { + if (offset_bot[i] < -32 || offset_bot[i] > 31) { + DRM_DEV_ERROR(dev, + "qcom,phy-rescode-offset-bot value %d is not in range [-32..31]\n", + offset_bot[i]); + return -EINVAL; + } + tuning_cfg->rescode_offset_bot[i] = 0x3f & offset_bot[i]; + } + + /* Drive level/amplitude adjustment parameters */ + ret = of_property_read_u32(dev->of_node, "qcom,phy-drive-ldo-level", &ldo_level); + if (ret && ret != -EINVAL) { + DRM_DEV_ERROR(dev, "failed to parse qcom,phy-drive-ldo-level, %d\n", ret); + return ret; + } + + switch (ldo_level) { + case 375: + level = 0; + break; + case 400: + level = 1; + break; + case 425: + level = 2; + break; + case 450: + level = 3; + break; + case 475: + level = 4; + break; + case 500: + level = 5; + break; + default: + DRM_DEV_ERROR(dev, "qcom,phy-drive-ldo-level %d is not supported\n", ldo_level); + return -EINVAL; + } + tuning_cfg->vreg_ctrl = 0x58 | (0x7 & level); + + phy->tuning_cfg = tuning_cfg; + + return 0; +} + +static const struct regulator_bulk_data dsi_phy_10nm_regulators[] = { + { .supply = "vdds", .init_load_uA = 36000 }, +}; + +const struct msm_dsi_phy_cfg dsi_phy_10nm_cfgs = { + .has_phy_lane = true, + .regulator_data = dsi_phy_10nm_regulators, + .num_regulators = ARRAY_SIZE(dsi_phy_10nm_regulators), + .ops = { + .enable = dsi_10nm_phy_enable, + .disable = dsi_10nm_phy_disable, + .pll_init = dsi_pll_10nm_init, + .save_pll_state = dsi_10nm_pll_save_state, + .restore_pll_state = dsi_10nm_pll_restore_state, + .parse_dt_properties = dsi_10nm_phy_parse_dt, + }, + .min_pll_rate = 1000000000UL, + .max_pll_rate = 3500000000UL, + .io_start = { 0xae94400, 0xae96400 }, + .num_dsi_phy = 2, +}; + +const struct msm_dsi_phy_cfg dsi_phy_10nm_8998_cfgs = { + .has_phy_lane = true, + .regulator_data = dsi_phy_10nm_regulators, + .num_regulators = ARRAY_SIZE(dsi_phy_10nm_regulators), + .ops = { + .enable = dsi_10nm_phy_enable, + .disable = dsi_10nm_phy_disable, + .pll_init = dsi_pll_10nm_init, + .save_pll_state = dsi_10nm_pll_save_state, + .restore_pll_state = dsi_10nm_pll_restore_state, + .parse_dt_properties = dsi_10nm_phy_parse_dt, + }, + .min_pll_rate = 1000000000UL, + .max_pll_rate = 3500000000UL, + .io_start = { 0xc994400, 0xc996400 }, + .num_dsi_phy = 2, + .quirks = DSI_PHY_10NM_QUIRK_OLD_TIMINGS, +}; diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c new file mode 100644 index 000000000000..fdefcbd9c284 --- /dev/null +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c @@ -0,0 +1,1121 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2016, The Linux Foundation. All rights reserved. + */ + +#include <dt-bindings/clock/qcom,dsi-phy-28nm.h> +#include <linux/clk.h> +#include <linux/clk-provider.h> +#include <linux/delay.h> + +#include "dsi_phy.h" +#include "dsi.xml.h" +#include "dsi_phy_14nm.xml.h" + +#define PHY_14NM_CKLN_IDX 4 + +/* + * DSI PLL 14nm - clock diagram (eg: DSI0): + * + * dsi0n1_postdiv_clk + * | + * | + * +----+ | +----+ + * dsi0vco_clk ---| n1 |--o--| /8 |-- dsi0pllbyte + * +----+ | +----+ + * | dsi0n1_postdivby2_clk + * | +----+ | + * o---| /2 |--o--|\ + * | +----+ | \ +----+ + * | | |--| n2 |-- dsi0pll + * o--------------| / +----+ + * |/ + */ + +#define POLL_MAX_READS 15 +#define POLL_TIMEOUT_US 1000 + +#define VCO_REF_CLK_RATE 19200000 +#define VCO_MIN_RATE 1300000000UL +#define VCO_MAX_RATE 2600000000UL + +struct dsi_pll_config { + u64 vco_current_rate; + + u32 ssc_en; /* SSC enable/disable */ + + /* fixed params */ + u32 plllock_cnt; + u32 ssc_center; + u32 ssc_adj_period; + u32 ssc_spread; + u32 ssc_freq; + + /* calculated */ + u32 dec_start; + u32 div_frac_start; + u32 ssc_period; + u32 ssc_step_size; + u32 plllock_cmp; + u32 pll_vco_div_ref; + u32 pll_vco_count; + u32 pll_kvco_div_ref; + u32 pll_kvco_count; +}; + +struct pll_14nm_cached_state { + unsigned long vco_rate; + u8 n2postdiv; + u8 n1postdiv; +}; + +struct dsi_pll_14nm { + struct clk_hw clk_hw; + + struct msm_dsi_phy *phy; + + /* protects REG_DSI_14nm_PHY_CMN_CLK_CFG0 register */ + spinlock_t postdiv_lock; + + struct pll_14nm_cached_state cached_state; + + struct dsi_pll_14nm *slave; +}; + +#define to_pll_14nm(x) container_of(x, struct dsi_pll_14nm, clk_hw) + +/* + * Private struct for N1/N2 post-divider clocks. These clocks are similar to + * the generic clk_divider class of clocks. The only difference is that it + * also sets the slave DSI PLL's post-dividers if in bonded DSI mode + */ +struct dsi_pll_14nm_postdiv { + struct clk_hw hw; + + /* divider params */ + u8 shift; + u8 width; + u8 flags; /* same flags as used by clk_divider struct */ + + struct dsi_pll_14nm *pll; +}; + +#define to_pll_14nm_postdiv(_hw) container_of(_hw, struct dsi_pll_14nm_postdiv, hw) + +/* + * Global list of private DSI PLL struct pointers. We need this for bonded DSI + * mode, where the master PLL's clk_ops needs access the slave's private data + */ +static struct dsi_pll_14nm *pll_14nm_list[DSI_MAX]; + +static bool pll_14nm_poll_for_ready(struct dsi_pll_14nm *pll_14nm, + u32 nb_tries, u32 timeout_us) +{ + bool pll_locked = false, pll_ready = false; + void __iomem *base = pll_14nm->phy->pll_base; + u32 tries, val; + + tries = nb_tries; + while (tries--) { + val = readl(base + REG_DSI_14nm_PHY_PLL_RESET_SM_READY_STATUS); + pll_locked = !!(val & BIT(5)); + + if (pll_locked) + break; + + udelay(timeout_us); + } + + if (!pll_locked) + goto out; + + tries = nb_tries; + while (tries--) { + val = readl(base + REG_DSI_14nm_PHY_PLL_RESET_SM_READY_STATUS); + pll_ready = !!(val & BIT(0)); + + if (pll_ready) + break; + + udelay(timeout_us); + } + +out: + DBG("DSI PLL is %slocked, %sready", pll_locked ? "" : "*not* ", pll_ready ? "" : "*not* "); + + return pll_locked && pll_ready; +} + +static void dsi_pll_14nm_config_init(struct dsi_pll_config *pconf) +{ + /* fixed input */ + pconf->plllock_cnt = 1; + + /* + * SSC is enabled by default. We might need DT props for configuring + * some SSC params like PPM and center/down spread etc. + */ + pconf->ssc_en = 1; + pconf->ssc_center = 0; /* down spread by default */ + pconf->ssc_spread = 5; /* PPM / 1000 */ + pconf->ssc_freq = 31500; /* default recommended */ + pconf->ssc_adj_period = 37; +} + +#define CEIL(x, y) (((x) + ((y) - 1)) / (y)) + +static void pll_14nm_ssc_calc(struct dsi_pll_14nm *pll, struct dsi_pll_config *pconf) +{ + u32 period, ssc_period; + u32 ref, rem; + u64 step_size; + + DBG("vco=%lld ref=%d", pconf->vco_current_rate, VCO_REF_CLK_RATE); + + ssc_period = pconf->ssc_freq / 500; + period = (u32)VCO_REF_CLK_RATE / 1000; + ssc_period = CEIL(period, ssc_period); + ssc_period -= 1; + pconf->ssc_period = ssc_period; + + DBG("ssc freq=%d spread=%d period=%d", pconf->ssc_freq, + pconf->ssc_spread, pconf->ssc_period); + + step_size = (u32)pconf->vco_current_rate; + ref = VCO_REF_CLK_RATE; + ref /= 1000; + step_size = div_u64(step_size, ref); + step_size <<= 20; + step_size = div_u64(step_size, 1000); + step_size *= pconf->ssc_spread; + step_size = div_u64(step_size, 1000); + step_size *= (pconf->ssc_adj_period + 1); + + rem = 0; + step_size = div_u64_rem(step_size, ssc_period + 1, &rem); + if (rem) + step_size++; + + DBG("step_size=%lld", step_size); + + step_size &= 0x0ffff; /* take lower 16 bits */ + + pconf->ssc_step_size = step_size; +} + +static void pll_14nm_dec_frac_calc(struct dsi_pll_14nm *pll, struct dsi_pll_config *pconf) +{ + u64 multiplier = BIT(20); + u64 dec_start_multiple, dec_start, pll_comp_val; + u32 duration, div_frac_start; + u64 vco_clk_rate = pconf->vco_current_rate; + u64 fref = VCO_REF_CLK_RATE; + + DBG("vco_clk_rate=%lld ref_clk_rate=%lld", vco_clk_rate, fref); + + dec_start_multiple = div_u64(vco_clk_rate * multiplier, fref); + dec_start = div_u64_rem(dec_start_multiple, multiplier, &div_frac_start); + + pconf->dec_start = (u32)dec_start; + pconf->div_frac_start = div_frac_start; + + if (pconf->plllock_cnt == 0) + duration = 1024; + else if (pconf->plllock_cnt == 1) + duration = 256; + else if (pconf->plllock_cnt == 2) + duration = 128; + else + duration = 32; + + pll_comp_val = duration * dec_start_multiple; + pll_comp_val = div_u64(pll_comp_val, multiplier); + do_div(pll_comp_val, 10); + + pconf->plllock_cmp = (u32)pll_comp_val; +} + +static u32 pll_14nm_kvco_slop(u32 vrate) +{ + u32 slop = 0; + + if (vrate > VCO_MIN_RATE && vrate <= 1800000000UL) + slop = 600; + else if (vrate > 1800000000UL && vrate < 2300000000UL) + slop = 400; + else if (vrate > 2300000000UL && vrate < VCO_MAX_RATE) + slop = 280; + + return slop; +} + +static void pll_14nm_calc_vco_count(struct dsi_pll_14nm *pll, struct dsi_pll_config *pconf) +{ + u64 vco_clk_rate = pconf->vco_current_rate; + u64 fref = VCO_REF_CLK_RATE; + u32 vco_measure_time = 5; + u32 kvco_measure_time = 5; + u64 data; + u32 cnt; + + data = fref * vco_measure_time; + do_div(data, 1000000); + data &= 0x03ff; /* 10 bits */ + data -= 2; + pconf->pll_vco_div_ref = data; + + data = div_u64(vco_clk_rate, 1000000); /* unit is Mhz */ + data *= vco_measure_time; + do_div(data, 10); + pconf->pll_vco_count = data; + + data = fref * kvco_measure_time; + do_div(data, 1000000); + data &= 0x03ff; /* 10 bits */ + data -= 1; + pconf->pll_kvco_div_ref = data; + + cnt = pll_14nm_kvco_slop(vco_clk_rate); + cnt *= 2; + cnt /= 100; + cnt *= kvco_measure_time; + pconf->pll_kvco_count = cnt; +} + +static void pll_db_commit_ssc(struct dsi_pll_14nm *pll, struct dsi_pll_config *pconf) +{ + void __iomem *base = pll->phy->pll_base; + u8 data; + + data = pconf->ssc_adj_period; + data &= 0x0ff; + writel(data, base + REG_DSI_14nm_PHY_PLL_SSC_ADJ_PER1); + data = (pconf->ssc_adj_period >> 8); + data &= 0x03; + writel(data, base + REG_DSI_14nm_PHY_PLL_SSC_ADJ_PER2); + + data = pconf->ssc_period; + data &= 0x0ff; + writel(data, base + REG_DSI_14nm_PHY_PLL_SSC_PER1); + data = (pconf->ssc_period >> 8); + data &= 0x0ff; + writel(data, base + REG_DSI_14nm_PHY_PLL_SSC_PER2); + + data = pconf->ssc_step_size; + data &= 0x0ff; + writel(data, base + REG_DSI_14nm_PHY_PLL_SSC_STEP_SIZE1); + data = (pconf->ssc_step_size >> 8); + data &= 0x0ff; + writel(data, base + REG_DSI_14nm_PHY_PLL_SSC_STEP_SIZE2); + + data = (pconf->ssc_center & 0x01); + data <<= 1; + data |= 0x01; /* enable */ + writel(data, base + REG_DSI_14nm_PHY_PLL_SSC_EN_CENTER); + + wmb(); /* make sure register committed */ +} + +static void pll_db_commit_common(struct dsi_pll_14nm *pll, + struct dsi_pll_config *pconf) +{ + void __iomem *base = pll->phy->pll_base; + u8 data; + + /* confgiure the non frequency dependent pll registers */ + data = 0; + writel(data, base + REG_DSI_14nm_PHY_PLL_SYSCLK_EN_RESET); + + writel(1, base + REG_DSI_14nm_PHY_PLL_TXCLK_EN); + + writel(48, base + REG_DSI_14nm_PHY_PLL_RESETSM_CNTRL); + /* bandgap_timer */ + writel(4 << 3, base + REG_DSI_14nm_PHY_PLL_RESETSM_CNTRL2); + /* pll_wakeup_timer */ + writel(5, base + REG_DSI_14nm_PHY_PLL_RESETSM_CNTRL5); + + data = pconf->pll_vco_div_ref & 0xff; + writel(data, base + REG_DSI_14nm_PHY_PLL_VCO_DIV_REF1); + data = (pconf->pll_vco_div_ref >> 8) & 0x3; + writel(data, base + REG_DSI_14nm_PHY_PLL_VCO_DIV_REF2); + + data = pconf->pll_kvco_div_ref & 0xff; + writel(data, base + REG_DSI_14nm_PHY_PLL_KVCO_DIV_REF1); + data = (pconf->pll_kvco_div_ref >> 8) & 0x3; + writel(data, base + REG_DSI_14nm_PHY_PLL_KVCO_DIV_REF2); + + writel(16, base + REG_DSI_14nm_PHY_PLL_PLL_MISC1); + + writel(4, base + REG_DSI_14nm_PHY_PLL_IE_TRIM); + + writel(4, base + REG_DSI_14nm_PHY_PLL_IP_TRIM); + + writel(1 << 3 | 1, base + REG_DSI_14nm_PHY_PLL_CP_SET_CUR); + + writel(0 << 3 | 0, base + REG_DSI_14nm_PHY_PLL_PLL_ICPCSET); + + writel(0 << 3 | 0, base + REG_DSI_14nm_PHY_PLL_PLL_ICPMSET); + + writel(4 << 3 | 4, base + REG_DSI_14nm_PHY_PLL_PLL_ICP_SET); + + writel(1 << 4 | 11, base + REG_DSI_14nm_PHY_PLL_PLL_LPF1); + + writel(7, base + REG_DSI_14nm_PHY_PLL_IPTAT_TRIM); + + writel(1 << 4 | 2, base + REG_DSI_14nm_PHY_PLL_PLL_CRCTRL); +} + +static void pll_14nm_software_reset(struct dsi_pll_14nm *pll_14nm) +{ + void __iomem *cmn_base = pll_14nm->phy->base; + + /* de assert pll start and apply pll sw reset */ + + /* stop pll */ + writel(0, cmn_base + REG_DSI_14nm_PHY_CMN_PLL_CNTRL); + + /* pll sw reset */ + writel(0x20, cmn_base + REG_DSI_14nm_PHY_CMN_CTRL_1); + udelay(10); + wmb(); /* make sure register committed */ + + writel(0, cmn_base + REG_DSI_14nm_PHY_CMN_CTRL_1); + wmb(); /* make sure register committed */ +} + +static void pll_db_commit_14nm(struct dsi_pll_14nm *pll, + struct dsi_pll_config *pconf) +{ + void __iomem *base = pll->phy->pll_base; + void __iomem *cmn_base = pll->phy->base; + u8 data; + + DBG("DSI%d PLL", pll->phy->id); + + writel(0x3c, cmn_base + REG_DSI_14nm_PHY_CMN_LDO_CNTRL); + + pll_db_commit_common(pll, pconf); + + pll_14nm_software_reset(pll); + + /* Use the /2 path in Mux */ + writel(1, cmn_base + REG_DSI_14nm_PHY_CMN_CLK_CFG1); + + data = 0xff; /* data, clk, pll normal operation */ + writel(data, cmn_base + REG_DSI_14nm_PHY_CMN_CTRL_0); + + /* configure the frequency dependent pll registers */ + data = pconf->dec_start; + writel(data, base + REG_DSI_14nm_PHY_PLL_DEC_START); + + data = pconf->div_frac_start & 0xff; + writel(data, base + REG_DSI_14nm_PHY_PLL_DIV_FRAC_START1); + data = (pconf->div_frac_start >> 8) & 0xff; + writel(data, base + REG_DSI_14nm_PHY_PLL_DIV_FRAC_START2); + data = (pconf->div_frac_start >> 16) & 0xf; + writel(data, base + REG_DSI_14nm_PHY_PLL_DIV_FRAC_START3); + + data = pconf->plllock_cmp & 0xff; + writel(data, base + REG_DSI_14nm_PHY_PLL_PLLLOCK_CMP1); + + data = (pconf->plllock_cmp >> 8) & 0xff; + writel(data, base + REG_DSI_14nm_PHY_PLL_PLLLOCK_CMP2); + + data = (pconf->plllock_cmp >> 16) & 0x3; + writel(data, base + REG_DSI_14nm_PHY_PLL_PLLLOCK_CMP3); + + data = pconf->plllock_cnt << 1 | 0 << 3; /* plllock_rng */ + writel(data, base + REG_DSI_14nm_PHY_PLL_PLLLOCK_CMP_EN); + + data = pconf->pll_vco_count & 0xff; + writel(data, base + REG_DSI_14nm_PHY_PLL_VCO_COUNT1); + data = (pconf->pll_vco_count >> 8) & 0xff; + writel(data, base + REG_DSI_14nm_PHY_PLL_VCO_COUNT2); + + data = pconf->pll_kvco_count & 0xff; + writel(data, base + REG_DSI_14nm_PHY_PLL_KVCO_COUNT1); + data = (pconf->pll_kvco_count >> 8) & 0x3; + writel(data, base + REG_DSI_14nm_PHY_PLL_KVCO_COUNT2); + + /* + * High nibble configures the post divider internal to the VCO. It's + * fixed to divide by 1 for now. + * + * 0: divided by 1 + * 1: divided by 2 + * 2: divided by 4 + * 3: divided by 8 + */ + writel(0 << 4 | 3, base + REG_DSI_14nm_PHY_PLL_PLL_LPF2_POSTDIV); + + if (pconf->ssc_en) + pll_db_commit_ssc(pll, pconf); + + wmb(); /* make sure register committed */ +} + +/* + * VCO clock Callbacks + */ +static int dsi_pll_14nm_vco_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + struct dsi_pll_14nm *pll_14nm = to_pll_14nm(hw); + struct dsi_pll_config conf; + + DBG("DSI PLL%d rate=%lu, parent's=%lu", pll_14nm->phy->id, rate, + parent_rate); + + dsi_pll_14nm_config_init(&conf); + conf.vco_current_rate = rate; + + pll_14nm_dec_frac_calc(pll_14nm, &conf); + + if (conf.ssc_en) + pll_14nm_ssc_calc(pll_14nm, &conf); + + pll_14nm_calc_vco_count(pll_14nm, &conf); + + /* commit the slave DSI PLL registers if we're master. Note that we + * don't lock the slave PLL. We just ensure that the PLL/PHY registers + * of the master and slave are identical + */ + if (pll_14nm->phy->usecase == MSM_DSI_PHY_MASTER) { + struct dsi_pll_14nm *pll_14nm_slave = pll_14nm->slave; + + pll_db_commit_14nm(pll_14nm_slave, &conf); + } + + pll_db_commit_14nm(pll_14nm, &conf); + + return 0; +} + +static unsigned long dsi_pll_14nm_vco_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct dsi_pll_14nm *pll_14nm = to_pll_14nm(hw); + void __iomem *base = pll_14nm->phy->pll_base; + u64 vco_rate, multiplier = BIT(20); + u32 div_frac_start; + u32 dec_start; + u64 ref_clk = parent_rate; + + dec_start = readl(base + REG_DSI_14nm_PHY_PLL_DEC_START); + dec_start &= 0x0ff; + + DBG("dec_start = %x", dec_start); + + div_frac_start = (readl(base + REG_DSI_14nm_PHY_PLL_DIV_FRAC_START3) + & 0xf) << 16; + div_frac_start |= (readl(base + REG_DSI_14nm_PHY_PLL_DIV_FRAC_START2) + & 0xff) << 8; + div_frac_start |= readl(base + REG_DSI_14nm_PHY_PLL_DIV_FRAC_START1) + & 0xff; + + DBG("div_frac_start = %x", div_frac_start); + + vco_rate = ref_clk * dec_start; + + vco_rate += ((ref_clk * div_frac_start) / multiplier); + + /* + * Recalculating the rate from dec_start and frac_start doesn't end up + * the rate we originally set. Convert the freq to KHz, round it up and + * convert it back to MHz. + */ + vco_rate = DIV_ROUND_UP_ULL(vco_rate, 1000) * 1000; + + DBG("returning vco rate = %lu", (unsigned long)vco_rate); + + return (unsigned long)vco_rate; +} + +static int dsi_pll_14nm_vco_prepare(struct clk_hw *hw) +{ + struct dsi_pll_14nm *pll_14nm = to_pll_14nm(hw); + void __iomem *base = pll_14nm->phy->pll_base; + void __iomem *cmn_base = pll_14nm->phy->base; + bool locked; + + DBG(""); + + if (unlikely(pll_14nm->phy->pll_on)) + return 0; + + if (dsi_pll_14nm_vco_recalc_rate(hw, VCO_REF_CLK_RATE) == 0) + dsi_pll_14nm_vco_set_rate(hw, pll_14nm->phy->cfg->min_pll_rate, VCO_REF_CLK_RATE); + + writel(0x10, base + REG_DSI_14nm_PHY_PLL_VREF_CFG1); + writel(1, cmn_base + REG_DSI_14nm_PHY_CMN_PLL_CNTRL); + + locked = pll_14nm_poll_for_ready(pll_14nm, POLL_MAX_READS, + POLL_TIMEOUT_US); + + if (unlikely(!locked)) { + DRM_DEV_ERROR(&pll_14nm->phy->pdev->dev, "DSI PLL lock failed\n"); + return -EINVAL; + } + + DBG("DSI PLL lock success"); + pll_14nm->phy->pll_on = true; + + return 0; +} + +static void dsi_pll_14nm_vco_unprepare(struct clk_hw *hw) +{ + struct dsi_pll_14nm *pll_14nm = to_pll_14nm(hw); + void __iomem *cmn_base = pll_14nm->phy->base; + + DBG(""); + + if (unlikely(!pll_14nm->phy->pll_on)) + return; + + writel(0, cmn_base + REG_DSI_14nm_PHY_CMN_PLL_CNTRL); + + pll_14nm->phy->pll_on = false; +} + +static int dsi_pll_14nm_clk_determine_rate(struct clk_hw *hw, + struct clk_rate_request *req) +{ + struct dsi_pll_14nm *pll_14nm = to_pll_14nm(hw); + + req->rate = clamp_t(unsigned long, req->rate, + pll_14nm->phy->cfg->min_pll_rate, pll_14nm->phy->cfg->max_pll_rate); + + return 0; +} + +static const struct clk_ops clk_ops_dsi_pll_14nm_vco = { + .determine_rate = dsi_pll_14nm_clk_determine_rate, + .set_rate = dsi_pll_14nm_vco_set_rate, + .recalc_rate = dsi_pll_14nm_vco_recalc_rate, + .prepare = dsi_pll_14nm_vco_prepare, + .unprepare = dsi_pll_14nm_vco_unprepare, +}; + +/* + * N1 and N2 post-divider clock callbacks + */ +#define div_mask(width) ((1 << (width)) - 1) +static unsigned long dsi_pll_14nm_postdiv_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct dsi_pll_14nm_postdiv *postdiv = to_pll_14nm_postdiv(hw); + struct dsi_pll_14nm *pll_14nm = postdiv->pll; + void __iomem *base = pll_14nm->phy->base; + u8 shift = postdiv->shift; + u8 width = postdiv->width; + u32 val; + + DBG("DSI%d PLL parent rate=%lu", pll_14nm->phy->id, parent_rate); + + val = readl(base + REG_DSI_14nm_PHY_CMN_CLK_CFG0) >> shift; + val &= div_mask(width); + + return divider_recalc_rate(hw, parent_rate, val, NULL, + postdiv->flags, width); +} + +static int dsi_pll_14nm_postdiv_determine_rate(struct clk_hw *hw, + struct clk_rate_request *req) +{ + struct dsi_pll_14nm_postdiv *postdiv = to_pll_14nm_postdiv(hw); + struct dsi_pll_14nm *pll_14nm = postdiv->pll; + + DBG("DSI%d PLL parent rate=%lu", pll_14nm->phy->id, req->rate); + + req->rate = divider_round_rate(hw, req->rate, &req->best_parent_rate, + NULL, + postdiv->width, + postdiv->flags); + + return 0; +} + +static int dsi_pll_14nm_postdiv_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + struct dsi_pll_14nm_postdiv *postdiv = to_pll_14nm_postdiv(hw); + struct dsi_pll_14nm *pll_14nm = postdiv->pll; + void __iomem *base = pll_14nm->phy->base; + spinlock_t *lock = &pll_14nm->postdiv_lock; + u8 shift = postdiv->shift; + u8 width = postdiv->width; + unsigned int value; + unsigned long flags = 0; + u32 val; + + DBG("DSI%d PLL parent rate=%lu parent rate %lu", pll_14nm->phy->id, rate, + parent_rate); + + value = divider_get_val(rate, parent_rate, NULL, postdiv->width, + postdiv->flags); + + spin_lock_irqsave(lock, flags); + + val = readl(base + REG_DSI_14nm_PHY_CMN_CLK_CFG0); + val &= ~(div_mask(width) << shift); + + val |= value << shift; + writel(val, base + REG_DSI_14nm_PHY_CMN_CLK_CFG0); + + /* If we're master in bonded DSI mode, then the slave PLL's post-dividers + * follow the master's post dividers + */ + if (pll_14nm->phy->usecase == MSM_DSI_PHY_MASTER) { + struct dsi_pll_14nm *pll_14nm_slave = pll_14nm->slave; + void __iomem *slave_base = pll_14nm_slave->phy->base; + + writel(val, slave_base + REG_DSI_14nm_PHY_CMN_CLK_CFG0); + } + + spin_unlock_irqrestore(lock, flags); + + return 0; +} + +static const struct clk_ops clk_ops_dsi_pll_14nm_postdiv = { + .recalc_rate = dsi_pll_14nm_postdiv_recalc_rate, + .determine_rate = dsi_pll_14nm_postdiv_determine_rate, + .set_rate = dsi_pll_14nm_postdiv_set_rate, +}; + +/* + * PLL Callbacks + */ + +static void dsi_14nm_pll_save_state(struct msm_dsi_phy *phy) +{ + struct dsi_pll_14nm *pll_14nm = to_pll_14nm(phy->vco_hw); + struct pll_14nm_cached_state *cached_state = &pll_14nm->cached_state; + void __iomem *cmn_base = pll_14nm->phy->base; + u32 data; + + data = readl(cmn_base + REG_DSI_14nm_PHY_CMN_CLK_CFG0); + + cached_state->n1postdiv = data & 0xf; + cached_state->n2postdiv = (data >> 4) & 0xf; + + DBG("DSI%d PLL save state %x %x", pll_14nm->phy->id, + cached_state->n1postdiv, cached_state->n2postdiv); + + cached_state->vco_rate = clk_hw_get_rate(phy->vco_hw); +} + +static int dsi_14nm_pll_restore_state(struct msm_dsi_phy *phy) +{ + struct dsi_pll_14nm *pll_14nm = to_pll_14nm(phy->vco_hw); + struct pll_14nm_cached_state *cached_state = &pll_14nm->cached_state; + void __iomem *cmn_base = pll_14nm->phy->base; + u32 data; + int ret; + + ret = dsi_pll_14nm_vco_set_rate(phy->vco_hw, + cached_state->vco_rate, 0); + if (ret) { + DRM_DEV_ERROR(&pll_14nm->phy->pdev->dev, + "restore vco rate failed. ret=%d\n", ret); + return ret; + } + + data = cached_state->n1postdiv | (cached_state->n2postdiv << 4); + + DBG("DSI%d PLL restore state %x %x", pll_14nm->phy->id, + cached_state->n1postdiv, cached_state->n2postdiv); + + writel(data, cmn_base + REG_DSI_14nm_PHY_CMN_CLK_CFG0); + + /* also restore post-dividers for slave DSI PLL */ + if (phy->usecase == MSM_DSI_PHY_MASTER) { + struct dsi_pll_14nm *pll_14nm_slave = pll_14nm->slave; + void __iomem *slave_base = pll_14nm_slave->phy->base; + + writel(data, slave_base + REG_DSI_14nm_PHY_CMN_CLK_CFG0); + } + + return 0; +} + +static int dsi_14nm_set_usecase(struct msm_dsi_phy *phy) +{ + struct dsi_pll_14nm *pll_14nm = to_pll_14nm(phy->vco_hw); + void __iomem *base = phy->pll_base; + u32 clkbuflr_en, bandgap = 0; + + switch (phy->usecase) { + case MSM_DSI_PHY_STANDALONE: + clkbuflr_en = 0x1; + break; + case MSM_DSI_PHY_MASTER: + clkbuflr_en = 0x3; + pll_14nm->slave = pll_14nm_list[(pll_14nm->phy->id + 1) % DSI_MAX]; + break; + case MSM_DSI_PHY_SLAVE: + clkbuflr_en = 0x0; + bandgap = 0x3; + break; + default: + return -EINVAL; + } + + writel(clkbuflr_en, base + REG_DSI_14nm_PHY_PLL_CLKBUFLR_EN); + if (bandgap) + writel(bandgap, base + REG_DSI_14nm_PHY_PLL_PLL_BANDGAP); + + return 0; +} + +static struct clk_hw *pll_14nm_postdiv_register(struct dsi_pll_14nm *pll_14nm, + const char *name, + const struct clk_hw *parent_hw, + unsigned long flags, + u8 shift) +{ + struct dsi_pll_14nm_postdiv *pll_postdiv; + struct device *dev = &pll_14nm->phy->pdev->dev; + struct clk_init_data postdiv_init = { + .parent_hws = (const struct clk_hw *[]) { parent_hw }, + .num_parents = 1, + .name = name, + .flags = flags, + .ops = &clk_ops_dsi_pll_14nm_postdiv, + }; + int ret; + + pll_postdiv = devm_kzalloc(dev, sizeof(*pll_postdiv), GFP_KERNEL); + if (!pll_postdiv) + return ERR_PTR(-ENOMEM); + + pll_postdiv->pll = pll_14nm; + pll_postdiv->shift = shift; + /* both N1 and N2 postdividers are 4 bits wide */ + pll_postdiv->width = 4; + /* range of each divider is from 1 to 15 */ + pll_postdiv->flags = CLK_DIVIDER_ONE_BASED; + pll_postdiv->hw.init = &postdiv_init; + + ret = devm_clk_hw_register(dev, &pll_postdiv->hw); + if (ret) + return ERR_PTR(ret); + + return &pll_postdiv->hw; +} + +static int pll_14nm_register(struct dsi_pll_14nm *pll_14nm, struct clk_hw **provided_clocks) +{ + char clk_name[32]; + struct clk_init_data vco_init = { + .parent_data = &(const struct clk_parent_data) { + .fw_name = "ref", + }, + .num_parents = 1, + .name = clk_name, + .flags = CLK_IGNORE_UNUSED, + .ops = &clk_ops_dsi_pll_14nm_vco, + }; + struct device *dev = &pll_14nm->phy->pdev->dev; + struct clk_hw *hw, *n1_postdiv, *n1_postdivby2; + int ret; + + DBG("DSI%d", pll_14nm->phy->id); + + snprintf(clk_name, sizeof(clk_name), "dsi%dvco_clk", pll_14nm->phy->id); + pll_14nm->clk_hw.init = &vco_init; + + ret = devm_clk_hw_register(dev, &pll_14nm->clk_hw); + if (ret) + return ret; + + snprintf(clk_name, sizeof(clk_name), "dsi%dn1_postdiv_clk", pll_14nm->phy->id); + + /* N1 postdiv, bits 0-3 in REG_DSI_14nm_PHY_CMN_CLK_CFG0 */ + n1_postdiv = pll_14nm_postdiv_register(pll_14nm, clk_name, + &pll_14nm->clk_hw, CLK_SET_RATE_PARENT, 0); + if (IS_ERR(n1_postdiv)) + return PTR_ERR(n1_postdiv); + + snprintf(clk_name, sizeof(clk_name), "dsi%dpllbyte", pll_14nm->phy->id); + + /* DSI Byte clock = VCO_CLK / N1 / 8 */ + hw = devm_clk_hw_register_fixed_factor_parent_hw(dev, clk_name, + n1_postdiv, CLK_SET_RATE_PARENT, 1, 8); + if (IS_ERR(hw)) + return PTR_ERR(hw); + + provided_clocks[DSI_BYTE_PLL_CLK] = hw; + + snprintf(clk_name, sizeof(clk_name), "dsi%dn1_postdivby2_clk", pll_14nm->phy->id); + + /* + * Skip the mux for now, force DSICLK_SEL to 1, Add a /2 divider + * on the way. Don't let it set parent. + */ + n1_postdivby2 = devm_clk_hw_register_fixed_factor_parent_hw(dev, + clk_name, n1_postdiv, 0, 1, 2); + if (IS_ERR(n1_postdivby2)) + return PTR_ERR(n1_postdivby2); + + snprintf(clk_name, sizeof(clk_name), "dsi%dpll", pll_14nm->phy->id); + + /* DSI pixel clock = VCO_CLK / N1 / 2 / N2 + * This is the output of N2 post-divider, bits 4-7 in + * REG_DSI_14nm_PHY_CMN_CLK_CFG0. Don't let it set parent. + */ + hw = pll_14nm_postdiv_register(pll_14nm, clk_name, n1_postdivby2, + 0, 4); + if (IS_ERR(hw)) + return PTR_ERR(hw); + + provided_clocks[DSI_PIXEL_PLL_CLK] = hw; + + return 0; +} + +static int dsi_pll_14nm_init(struct msm_dsi_phy *phy) +{ + struct platform_device *pdev = phy->pdev; + struct dsi_pll_14nm *pll_14nm; + int ret; + + if (!pdev) + return -ENODEV; + + pll_14nm = devm_kzalloc(&pdev->dev, sizeof(*pll_14nm), GFP_KERNEL); + if (!pll_14nm) + return -ENOMEM; + + DBG("PLL%d", phy->id); + + pll_14nm_list[phy->id] = pll_14nm; + + spin_lock_init(&pll_14nm->postdiv_lock); + + pll_14nm->phy = phy; + + ret = pll_14nm_register(pll_14nm, phy->provided_clocks->hws); + if (ret) { + DRM_DEV_ERROR(&pdev->dev, "failed to register PLL: %d\n", ret); + return ret; + } + + phy->vco_hw = &pll_14nm->clk_hw; + + return 0; +} + +static void dsi_14nm_dphy_set_timing(struct msm_dsi_phy *phy, + struct msm_dsi_dphy_timing *timing, + int lane_idx) +{ + void __iomem *base = phy->lane_base; + bool clk_ln = (lane_idx == PHY_14NM_CKLN_IDX); + u32 zero = clk_ln ? timing->clk_zero : timing->hs_zero; + u32 prepare = clk_ln ? timing->clk_prepare : timing->hs_prepare; + u32 trail = clk_ln ? timing->clk_trail : timing->hs_trail; + u32 rqst = clk_ln ? timing->hs_rqst_ckln : timing->hs_rqst; + u32 prep_dly = clk_ln ? timing->hs_prep_dly_ckln : timing->hs_prep_dly; + u32 halfbyte_en = clk_ln ? timing->hs_halfbyte_en_ckln : + timing->hs_halfbyte_en; + + writel(DSI_14nm_PHY_LN_TIMING_CTRL_4_HS_EXIT(timing->hs_exit), + base + REG_DSI_14nm_PHY_LN_TIMING_CTRL_4(lane_idx)); + writel(DSI_14nm_PHY_LN_TIMING_CTRL_5_HS_ZERO(zero), + base + REG_DSI_14nm_PHY_LN_TIMING_CTRL_5(lane_idx)); + writel(DSI_14nm_PHY_LN_TIMING_CTRL_6_HS_PREPARE(prepare), + base + REG_DSI_14nm_PHY_LN_TIMING_CTRL_6(lane_idx)); + writel(DSI_14nm_PHY_LN_TIMING_CTRL_7_HS_TRAIL(trail), + base + REG_DSI_14nm_PHY_LN_TIMING_CTRL_7(lane_idx)); + writel(DSI_14nm_PHY_LN_TIMING_CTRL_8_HS_RQST(rqst), + base + REG_DSI_14nm_PHY_LN_TIMING_CTRL_8(lane_idx)); + writel(DSI_14nm_PHY_LN_CFG0_PREPARE_DLY(prep_dly), + base + REG_DSI_14nm_PHY_LN_CFG0(lane_idx)); + writel(halfbyte_en ? DSI_14nm_PHY_LN_CFG1_HALFBYTECLK_EN : 0, + base + REG_DSI_14nm_PHY_LN_CFG1(lane_idx)); + writel(DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_GO(timing->ta_go) | + DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_SURE(timing->ta_sure), + base + REG_DSI_14nm_PHY_LN_TIMING_CTRL_9(lane_idx)); + writel(DSI_14nm_PHY_LN_TIMING_CTRL_10_TA_GET(timing->ta_get), + base + REG_DSI_14nm_PHY_LN_TIMING_CTRL_10(lane_idx)); + writel(DSI_14nm_PHY_LN_TIMING_CTRL_11_TRIG3_CMD(0xa0), + base + REG_DSI_14nm_PHY_LN_TIMING_CTRL_11(lane_idx)); +} + +static int dsi_14nm_phy_enable(struct msm_dsi_phy *phy, + struct msm_dsi_phy_clk_request *clk_req) +{ + struct msm_dsi_dphy_timing *timing = &phy->timing; + u32 data; + int i; + int ret; + void __iomem *base = phy->base; + void __iomem *lane_base = phy->lane_base; + u32 glbl_test_ctrl; + + if (msm_dsi_dphy_timing_calc_v2(timing, clk_req)) { + DRM_DEV_ERROR(&phy->pdev->dev, + "%s: D-PHY timing calculation failed\n", + __func__); + return -EINVAL; + } + + data = 0x1c; + if (phy->usecase != MSM_DSI_PHY_STANDALONE) + data |= DSI_14nm_PHY_CMN_LDO_CNTRL_VREG_CTRL(32); + writel(data, base + REG_DSI_14nm_PHY_CMN_LDO_CNTRL); + + writel(0x1, base + REG_DSI_14nm_PHY_CMN_GLBL_TEST_CTRL); + + /* 4 data lanes + 1 clk lane configuration */ + for (i = 0; i < 5; i++) { + writel(0x1d, lane_base + REG_DSI_14nm_PHY_LN_VREG_CNTRL(i)); + + writel(0xff, lane_base + REG_DSI_14nm_PHY_LN_STRENGTH_CTRL_0(i)); + writel(i == PHY_14NM_CKLN_IDX ? 0x00 : 0x06, + lane_base + REG_DSI_14nm_PHY_LN_STRENGTH_CTRL_1(i)); + + writel(i == PHY_14NM_CKLN_IDX ? 0x8f : 0x0f, + lane_base + REG_DSI_14nm_PHY_LN_CFG3(i)); + writel(0x10, lane_base + REG_DSI_14nm_PHY_LN_CFG2(i)); + writel(0, lane_base + REG_DSI_14nm_PHY_LN_TEST_DATAPATH(i)); + writel(0x88, lane_base + REG_DSI_14nm_PHY_LN_TEST_STR(i)); + + dsi_14nm_dphy_set_timing(phy, timing, i); + } + + /* Make sure PLL is not start */ + writel(0x00, base + REG_DSI_14nm_PHY_CMN_PLL_CNTRL); + + wmb(); /* make sure everything is written before reset and enable */ + + /* reset digital block */ + writel(0x80, base + REG_DSI_14nm_PHY_CMN_CTRL_1); + wmb(); /* ensure reset is asserted */ + udelay(100); + writel(0x00, base + REG_DSI_14nm_PHY_CMN_CTRL_1); + + glbl_test_ctrl = readl(base + REG_DSI_14nm_PHY_CMN_GLBL_TEST_CTRL); + if (phy->id == DSI_1 && phy->usecase == MSM_DSI_PHY_SLAVE) + glbl_test_ctrl |= DSI_14nm_PHY_CMN_GLBL_TEST_CTRL_BITCLK_HS_SEL; + else + glbl_test_ctrl &= ~DSI_14nm_PHY_CMN_GLBL_TEST_CTRL_BITCLK_HS_SEL; + writel(glbl_test_ctrl, base + REG_DSI_14nm_PHY_CMN_GLBL_TEST_CTRL); + ret = dsi_14nm_set_usecase(phy); + if (ret) { + DRM_DEV_ERROR(&phy->pdev->dev, "%s: set pll usecase failed, %d\n", + __func__, ret); + return ret; + } + + /* Remove power down from PLL and all lanes */ + writel(0xff, base + REG_DSI_14nm_PHY_CMN_CTRL_0); + + return 0; +} + +static void dsi_14nm_phy_disable(struct msm_dsi_phy *phy) +{ + writel(0, phy->base + REG_DSI_14nm_PHY_CMN_GLBL_TEST_CTRL); + writel(0, phy->base + REG_DSI_14nm_PHY_CMN_CTRL_0); + + /* ensure that the phy is completely disabled */ + wmb(); +} + +static const struct regulator_bulk_data dsi_phy_14nm_17mA_regulators[] = { + { .supply = "vcca", .init_load_uA = 17000 }, +}; + +static const struct regulator_bulk_data dsi_phy_14nm_73p4mA_regulators[] = { + { .supply = "vcca", .init_load_uA = 73400 }, +}; + +static const struct regulator_bulk_data dsi_phy_14nm_36mA_regulators[] = { + { .supply = "vdda", .init_load_uA = 36000 }, +}; + +const struct msm_dsi_phy_cfg dsi_phy_14nm_cfgs = { + .has_phy_lane = true, + .regulator_data = dsi_phy_14nm_17mA_regulators, + .num_regulators = ARRAY_SIZE(dsi_phy_14nm_17mA_regulators), + .ops = { + .enable = dsi_14nm_phy_enable, + .disable = dsi_14nm_phy_disable, + .pll_init = dsi_pll_14nm_init, + .save_pll_state = dsi_14nm_pll_save_state, + .restore_pll_state = dsi_14nm_pll_restore_state, + }, + .min_pll_rate = VCO_MIN_RATE, + .max_pll_rate = VCO_MAX_RATE, + .io_start = { 0x994400, 0x996400 }, + .num_dsi_phy = 2, +}; + +const struct msm_dsi_phy_cfg dsi_phy_14nm_660_cfgs = { + .has_phy_lane = true, + .regulator_data = dsi_phy_14nm_73p4mA_regulators, + .num_regulators = ARRAY_SIZE(dsi_phy_14nm_73p4mA_regulators), + .ops = { + .enable = dsi_14nm_phy_enable, + .disable = dsi_14nm_phy_disable, + .pll_init = dsi_pll_14nm_init, + .save_pll_state = dsi_14nm_pll_save_state, + .restore_pll_state = dsi_14nm_pll_restore_state, + }, + .min_pll_rate = VCO_MIN_RATE, + .max_pll_rate = VCO_MAX_RATE, + .io_start = { 0xc994400, 0xc996400 }, + .num_dsi_phy = 2, +}; + +const struct msm_dsi_phy_cfg dsi_phy_14nm_8953_cfgs = { + .has_phy_lane = true, + .regulator_data = dsi_phy_14nm_17mA_regulators, + .num_regulators = ARRAY_SIZE(dsi_phy_14nm_17mA_regulators), + .ops = { + .enable = dsi_14nm_phy_enable, + .disable = dsi_14nm_phy_disable, + .pll_init = dsi_pll_14nm_init, + .save_pll_state = dsi_14nm_pll_save_state, + .restore_pll_state = dsi_14nm_pll_restore_state, + }, + .min_pll_rate = VCO_MIN_RATE, + .max_pll_rate = VCO_MAX_RATE, + .io_start = { 0x1a94400, 0x1a96400 }, + .num_dsi_phy = 2, +}; + +const struct msm_dsi_phy_cfg dsi_phy_14nm_2290_cfgs = { + .has_phy_lane = true, + .ops = { + .enable = dsi_14nm_phy_enable, + .disable = dsi_14nm_phy_disable, + .pll_init = dsi_pll_14nm_init, + .save_pll_state = dsi_14nm_pll_save_state, + .restore_pll_state = dsi_14nm_pll_restore_state, + }, + .min_pll_rate = VCO_MIN_RATE, + .max_pll_rate = VCO_MAX_RATE, + .io_start = { 0x5e94400 }, + .num_dsi_phy = 1, +}; + +const struct msm_dsi_phy_cfg dsi_phy_14nm_6150_cfgs = { + .has_phy_lane = true, + .regulator_data = dsi_phy_14nm_36mA_regulators, + .num_regulators = ARRAY_SIZE(dsi_phy_14nm_36mA_regulators), + .ops = { + .enable = dsi_14nm_phy_enable, + .disable = dsi_14nm_phy_disable, + .pll_init = dsi_pll_14nm_init, + .save_pll_state = dsi_14nm_pll_save_state, + .restore_pll_state = dsi_14nm_pll_restore_state, + }, + .min_pll_rate = VCO_MIN_RATE, + .max_pll_rate = VCO_MAX_RATE, + .io_start = { 0xae94400 }, + .num_dsi_phy = 1, +}; diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c new file mode 100644 index 000000000000..cee34b76c3d2 --- /dev/null +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c @@ -0,0 +1,147 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2015, The Linux Foundation. All rights reserved. + */ + +#include "dsi_phy.h" +#include "dsi.xml.h" +#include "dsi_phy_20nm.xml.h" + +static void dsi_20nm_dphy_set_timing(struct msm_dsi_phy *phy, + struct msm_dsi_dphy_timing *timing) +{ + void __iomem *base = phy->base; + + writel(DSI_20nm_PHY_TIMING_CTRL_0_CLK_ZERO(timing->clk_zero), + base + REG_DSI_20nm_PHY_TIMING_CTRL_0); + writel(DSI_20nm_PHY_TIMING_CTRL_1_CLK_TRAIL(timing->clk_trail), + base + REG_DSI_20nm_PHY_TIMING_CTRL_1); + writel(DSI_20nm_PHY_TIMING_CTRL_2_CLK_PREPARE(timing->clk_prepare), + base + REG_DSI_20nm_PHY_TIMING_CTRL_2); + if (timing->clk_zero & BIT(8)) + writel(DSI_20nm_PHY_TIMING_CTRL_3_CLK_ZERO_8, + base + REG_DSI_20nm_PHY_TIMING_CTRL_3); + writel(DSI_20nm_PHY_TIMING_CTRL_4_HS_EXIT(timing->hs_exit), + base + REG_DSI_20nm_PHY_TIMING_CTRL_4); + writel(DSI_20nm_PHY_TIMING_CTRL_5_HS_ZERO(timing->hs_zero), + base + REG_DSI_20nm_PHY_TIMING_CTRL_5); + writel(DSI_20nm_PHY_TIMING_CTRL_6_HS_PREPARE(timing->hs_prepare), + base + REG_DSI_20nm_PHY_TIMING_CTRL_6); + writel(DSI_20nm_PHY_TIMING_CTRL_7_HS_TRAIL(timing->hs_trail), + base + REG_DSI_20nm_PHY_TIMING_CTRL_7); + writel(DSI_20nm_PHY_TIMING_CTRL_8_HS_RQST(timing->hs_rqst), + base + REG_DSI_20nm_PHY_TIMING_CTRL_8); + writel(DSI_20nm_PHY_TIMING_CTRL_9_TA_GO(timing->ta_go) | + DSI_20nm_PHY_TIMING_CTRL_9_TA_SURE(timing->ta_sure), + base + REG_DSI_20nm_PHY_TIMING_CTRL_9); + writel(DSI_20nm_PHY_TIMING_CTRL_10_TA_GET(timing->ta_get), + base + REG_DSI_20nm_PHY_TIMING_CTRL_10); + writel(DSI_20nm_PHY_TIMING_CTRL_11_TRIG3_CMD(0), + base + REG_DSI_20nm_PHY_TIMING_CTRL_11); +} + +static void dsi_20nm_phy_regulator_ctrl(struct msm_dsi_phy *phy, bool enable) +{ + void __iomem *base = phy->reg_base; + + if (!enable) { + writel(0, base + REG_DSI_20nm_PHY_REGULATOR_CAL_PWR_CFG); + return; + } + + if (phy->regulator_ldo_mode) { + writel(0x1d, phy->base + REG_DSI_20nm_PHY_LDO_CNTRL); + return; + } + + /* non LDO mode */ + writel(0x03, base + REG_DSI_20nm_PHY_REGULATOR_CTRL_1); + writel(0x03, base + REG_DSI_20nm_PHY_REGULATOR_CTRL_2); + writel(0x00, base + REG_DSI_20nm_PHY_REGULATOR_CTRL_3); + writel(0x20, base + REG_DSI_20nm_PHY_REGULATOR_CTRL_4); + writel(0x01, base + REG_DSI_20nm_PHY_REGULATOR_CAL_PWR_CFG); + writel(0x00, phy->base + REG_DSI_20nm_PHY_LDO_CNTRL); + writel(0x03, base + REG_DSI_20nm_PHY_REGULATOR_CTRL_0); +} + +static int dsi_20nm_phy_enable(struct msm_dsi_phy *phy, + struct msm_dsi_phy_clk_request *clk_req) +{ + struct msm_dsi_dphy_timing *timing = &phy->timing; + int i; + void __iomem *base = phy->base; + u32 cfg_4[4] = {0x20, 0x40, 0x20, 0x00}; + u32 val; + + DBG(""); + + if (msm_dsi_dphy_timing_calc(timing, clk_req)) { + DRM_DEV_ERROR(&phy->pdev->dev, + "%s: D-PHY timing calculation failed\n", __func__); + return -EINVAL; + } + + dsi_20nm_phy_regulator_ctrl(phy, true); + + writel(0xff, base + REG_DSI_20nm_PHY_STRENGTH_0); + + val = readl(base + REG_DSI_20nm_PHY_GLBL_TEST_CTRL); + if (phy->id == DSI_1 && phy->usecase == MSM_DSI_PHY_STANDALONE) + val |= DSI_20nm_PHY_GLBL_TEST_CTRL_BITCLK_HS_SEL; + else + val &= ~DSI_20nm_PHY_GLBL_TEST_CTRL_BITCLK_HS_SEL; + writel(val, base + REG_DSI_20nm_PHY_GLBL_TEST_CTRL); + + for (i = 0; i < 4; i++) { + writel((i >> 1) * 0x40, base + REG_DSI_20nm_PHY_LN_CFG_3(i)); + writel(0x01, base + REG_DSI_20nm_PHY_LN_TEST_STR_0(i)); + writel(0x46, base + REG_DSI_20nm_PHY_LN_TEST_STR_1(i)); + writel(0x02, base + REG_DSI_20nm_PHY_LN_CFG_0(i)); + writel(0xa0, base + REG_DSI_20nm_PHY_LN_CFG_1(i)); + writel(cfg_4[i], base + REG_DSI_20nm_PHY_LN_CFG_4(i)); + } + + writel(0x80, base + REG_DSI_20nm_PHY_LNCK_CFG_3); + writel(0x01, base + REG_DSI_20nm_PHY_LNCK_TEST_STR0); + writel(0x46, base + REG_DSI_20nm_PHY_LNCK_TEST_STR1); + writel(0x00, base + REG_DSI_20nm_PHY_LNCK_CFG_0); + writel(0xa0, base + REG_DSI_20nm_PHY_LNCK_CFG_1); + writel(0x00, base + REG_DSI_20nm_PHY_LNCK_CFG_2); + writel(0x00, base + REG_DSI_20nm_PHY_LNCK_CFG_4); + + dsi_20nm_dphy_set_timing(phy, timing); + + writel(0x00, base + REG_DSI_20nm_PHY_CTRL_1); + + writel(0x06, base + REG_DSI_20nm_PHY_STRENGTH_1); + + /* make sure everything is written before enable */ + wmb(); + writel(0x7f, base + REG_DSI_20nm_PHY_CTRL_0); + + return 0; +} + +static void dsi_20nm_phy_disable(struct msm_dsi_phy *phy) +{ + writel(0, phy->base + REG_DSI_20nm_PHY_CTRL_0); + dsi_20nm_phy_regulator_ctrl(phy, false); +} + +static const struct regulator_bulk_data dsi_phy_20nm_regulators[] = { + { .supply = "vddio", .init_load_uA = 100000 }, /* 1.8 V */ + { .supply = "vcca", .init_load_uA = 10000 }, /* 1.0 V */ +}; + +const struct msm_dsi_phy_cfg dsi_phy_20nm_cfgs = { + .has_phy_regulator = true, + .regulator_data = dsi_phy_20nm_regulators, + .num_regulators = ARRAY_SIZE(dsi_phy_20nm_regulators), + .ops = { + .enable = dsi_20nm_phy_enable, + .disable = dsi_20nm_phy_disable, + }, + .io_start = { 0xfd998500, 0xfd9a0500 }, + .num_dsi_phy = 2, +}; + diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c new file mode 100644 index 000000000000..d00e415b9a99 --- /dev/null +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c @@ -0,0 +1,956 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2015, The Linux Foundation. All rights reserved. + */ + +#include <dt-bindings/clock/qcom,dsi-phy-28nm.h> +#include <linux/clk.h> +#include <linux/clk-provider.h> + +#include "dsi_phy.h" +#include "dsi.xml.h" +#include "dsi_phy_28nm.xml.h" + +/* + * DSI PLL 28nm - clock diagram (eg: DSI0): + * + * dsi0analog_postdiv_clk + * | dsi0indirect_path_div2_clk + * | | + * +------+ | +----+ | |\ dsi0byte_mux + * dsi0vco_clk --o--| DIV1 |--o--| /2 |--o--| \ | + * | +------+ +----+ | m| | +----+ + * | | u|--o--| /4 |-- dsi0pllbyte + * | | x| +----+ + * o--------------------------| / + * | |/ + * | +------+ + * o----------| DIV3 |------------------------- dsi0pll + * +------+ + */ + +#define POLL_MAX_READS 10 +#define POLL_TIMEOUT_US 50 + +#define VCO_REF_CLK_RATE 19200000 +#define VCO_MIN_RATE 350000000 +#define VCO_MAX_RATE 750000000 + +/* v2.0.0 28nm LP implementation */ +#define DSI_PHY_28NM_QUIRK_PHY_LP BIT(0) +#define DSI_PHY_28NM_QUIRK_PHY_8226 BIT(1) + +#define LPFR_LUT_SIZE 10 +struct lpfr_cfg { + unsigned long vco_rate; + u32 resistance; +}; + +/* Loop filter resistance: */ +static const struct lpfr_cfg lpfr_lut[LPFR_LUT_SIZE] = { + { 479500000, 8 }, + { 480000000, 11 }, + { 575500000, 8 }, + { 576000000, 12 }, + { 610500000, 8 }, + { 659500000, 9 }, + { 671500000, 10 }, + { 672000000, 14 }, + { 708500000, 10 }, + { 750000000, 11 }, +}; + +struct pll_28nm_cached_state { + unsigned long vco_rate; + u8 postdiv3; + u8 postdiv1; + u8 byte_mux; +}; + +struct dsi_pll_28nm { + struct clk_hw clk_hw; + + struct msm_dsi_phy *phy; + + struct pll_28nm_cached_state cached_state; +}; + +#define to_pll_28nm(x) container_of(x, struct dsi_pll_28nm, clk_hw) + +static bool pll_28nm_poll_for_ready(struct dsi_pll_28nm *pll_28nm, + u32 nb_tries, u32 timeout_us) +{ + bool pll_locked = false; + u32 val; + + while (nb_tries--) { + val = readl(pll_28nm->phy->pll_base + REG_DSI_28nm_PHY_PLL_STATUS); + pll_locked = !!(val & DSI_28nm_PHY_PLL_STATUS_PLL_RDY); + + if (pll_locked) + break; + + udelay(timeout_us); + } + DBG("DSI PLL is %slocked", pll_locked ? "" : "*not* "); + + return pll_locked; +} + +static void pll_28nm_software_reset(struct dsi_pll_28nm *pll_28nm) +{ + void __iomem *base = pll_28nm->phy->pll_base; + + /* + * Add HW recommended delays after toggling the software + * reset bit off and back on. + */ + writel(DSI_28nm_PHY_PLL_TEST_CFG_PLL_SW_RESET, base + REG_DSI_28nm_PHY_PLL_TEST_CFG); + udelay(1); + writel(0, base + REG_DSI_28nm_PHY_PLL_TEST_CFG); + udelay(1); +} + +/* + * Clock Callbacks + */ +static int dsi_pll_28nm_clk_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + struct dsi_pll_28nm *pll_28nm = to_pll_28nm(hw); + struct device *dev = &pll_28nm->phy->pdev->dev; + void __iomem *base = pll_28nm->phy->pll_base; + unsigned long div_fbx1000, gen_vco_clk; + u32 refclk_cfg, frac_n_mode, frac_n_value; + u32 sdm_cfg0, sdm_cfg1, sdm_cfg2, sdm_cfg3; + u32 cal_cfg10, cal_cfg11; + u32 rem; + int i; + + VERB("rate=%lu, parent's=%lu", rate, parent_rate); + + /* Force postdiv2 to be div-4 */ + writel(3, base + REG_DSI_28nm_PHY_PLL_POSTDIV2_CFG); + + /* Configure the Loop filter resistance */ + for (i = 0; i < LPFR_LUT_SIZE; i++) + if (rate <= lpfr_lut[i].vco_rate) + break; + if (i == LPFR_LUT_SIZE) { + DRM_DEV_ERROR(dev, "unable to get loop filter resistance. vco=%lu\n", + rate); + return -EINVAL; + } + writel(lpfr_lut[i].resistance, base + REG_DSI_28nm_PHY_PLL_LPFR_CFG); + + /* Loop filter capacitance values : c1 and c2 */ + writel(0x70, base + REG_DSI_28nm_PHY_PLL_LPFC1_CFG); + writel(0x15, base + REG_DSI_28nm_PHY_PLL_LPFC2_CFG); + + rem = rate % VCO_REF_CLK_RATE; + if (rem) { + refclk_cfg = DSI_28nm_PHY_PLL_REFCLK_CFG_DBLR; + frac_n_mode = 1; + div_fbx1000 = rate / (VCO_REF_CLK_RATE / 500); + gen_vco_clk = div_fbx1000 * (VCO_REF_CLK_RATE / 500); + } else { + refclk_cfg = 0x0; + frac_n_mode = 0; + div_fbx1000 = rate / (VCO_REF_CLK_RATE / 1000); + gen_vco_clk = div_fbx1000 * (VCO_REF_CLK_RATE / 1000); + } + + DBG("refclk_cfg = %d", refclk_cfg); + + rem = div_fbx1000 % 1000; + frac_n_value = (rem << 16) / 1000; + + DBG("div_fb = %lu", div_fbx1000); + DBG("frac_n_value = %d", frac_n_value); + + DBG("Generated VCO Clock: %lu", gen_vco_clk); + rem = 0; + sdm_cfg1 = readl(base + REG_DSI_28nm_PHY_PLL_SDM_CFG1); + sdm_cfg1 &= ~DSI_28nm_PHY_PLL_SDM_CFG1_DC_OFFSET__MASK; + if (frac_n_mode) { + sdm_cfg0 = 0x0; + sdm_cfg0 |= DSI_28nm_PHY_PLL_SDM_CFG0_BYP_DIV(0); + sdm_cfg1 |= DSI_28nm_PHY_PLL_SDM_CFG1_DC_OFFSET( + (u32)(((div_fbx1000 / 1000) & 0x3f) - 1)); + sdm_cfg3 = frac_n_value >> 8; + sdm_cfg2 = frac_n_value & 0xff; + } else { + sdm_cfg0 = DSI_28nm_PHY_PLL_SDM_CFG0_BYP; + sdm_cfg0 |= DSI_28nm_PHY_PLL_SDM_CFG0_BYP_DIV( + (u32)(((div_fbx1000 / 1000) & 0x3f) - 1)); + sdm_cfg1 |= DSI_28nm_PHY_PLL_SDM_CFG1_DC_OFFSET(0); + sdm_cfg2 = 0; + sdm_cfg3 = 0; + } + + DBG("sdm_cfg0=%d", sdm_cfg0); + DBG("sdm_cfg1=%d", sdm_cfg1); + DBG("sdm_cfg2=%d", sdm_cfg2); + DBG("sdm_cfg3=%d", sdm_cfg3); + + cal_cfg11 = (u32)(gen_vco_clk / (256 * 1000000)); + cal_cfg10 = (u32)((gen_vco_clk % (256 * 1000000)) / 1000000); + DBG("cal_cfg10=%d, cal_cfg11=%d", cal_cfg10, cal_cfg11); + + writel(0x02, base + REG_DSI_28nm_PHY_PLL_CHGPUMP_CFG); + writel(0x2b, base + REG_DSI_28nm_PHY_PLL_CAL_CFG3); + writel(0x06, base + REG_DSI_28nm_PHY_PLL_CAL_CFG4); + writel(0x0d, base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2); + + writel(sdm_cfg1, base + REG_DSI_28nm_PHY_PLL_SDM_CFG1); + writel(DSI_28nm_PHY_PLL_SDM_CFG2_FREQ_SEED_7_0(sdm_cfg2), + base + REG_DSI_28nm_PHY_PLL_SDM_CFG2); + writel(DSI_28nm_PHY_PLL_SDM_CFG3_FREQ_SEED_15_8(sdm_cfg3), + base + REG_DSI_28nm_PHY_PLL_SDM_CFG3); + writel(0, base + REG_DSI_28nm_PHY_PLL_SDM_CFG4); + + /* Add hardware recommended delay for correct PLL configuration */ + if (pll_28nm->phy->cfg->quirks & DSI_PHY_28NM_QUIRK_PHY_LP) + udelay(1000); + else + udelay(1); + + writel(refclk_cfg, base + REG_DSI_28nm_PHY_PLL_REFCLK_CFG); + writel(0x00, base + REG_DSI_28nm_PHY_PLL_PWRGEN_CFG); + writel(0x31, base + REG_DSI_28nm_PHY_PLL_VCOLPF_CFG); + writel(sdm_cfg0, base + REG_DSI_28nm_PHY_PLL_SDM_CFG0); + writel(0x12, base + REG_DSI_28nm_PHY_PLL_CAL_CFG0); + writel(0x30, base + REG_DSI_28nm_PHY_PLL_CAL_CFG6); + writel(0x00, base + REG_DSI_28nm_PHY_PLL_CAL_CFG7); + writel(0x60, base + REG_DSI_28nm_PHY_PLL_CAL_CFG8); + writel(0x00, base + REG_DSI_28nm_PHY_PLL_CAL_CFG9); + writel(cal_cfg10 & 0xff, base + REG_DSI_28nm_PHY_PLL_CAL_CFG10); + writel(cal_cfg11 & 0xff, base + REG_DSI_28nm_PHY_PLL_CAL_CFG11); + writel(0x20, base + REG_DSI_28nm_PHY_PLL_EFUSE_CFG); + + return 0; +} + +static int dsi_pll_28nm_clk_is_enabled(struct clk_hw *hw) +{ + struct dsi_pll_28nm *pll_28nm = to_pll_28nm(hw); + + return pll_28nm_poll_for_ready(pll_28nm, POLL_MAX_READS, + POLL_TIMEOUT_US); +} + +static unsigned long dsi_pll_28nm_clk_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct dsi_pll_28nm *pll_28nm = to_pll_28nm(hw); + void __iomem *base = pll_28nm->phy->pll_base; + u32 sdm0, doubler, sdm_byp_div; + u32 sdm_dc_off, sdm_freq_seed, sdm2, sdm3; + u32 ref_clk = VCO_REF_CLK_RATE; + unsigned long vco_rate; + + VERB("parent_rate=%lu", parent_rate); + + /* Check to see if the ref clk doubler is enabled */ + doubler = readl(base + REG_DSI_28nm_PHY_PLL_REFCLK_CFG) & + DSI_28nm_PHY_PLL_REFCLK_CFG_DBLR; + ref_clk += (doubler * VCO_REF_CLK_RATE); + + /* see if it is integer mode or sdm mode */ + sdm0 = readl(base + REG_DSI_28nm_PHY_PLL_SDM_CFG0); + if (sdm0 & DSI_28nm_PHY_PLL_SDM_CFG0_BYP) { + /* integer mode */ + sdm_byp_div = FIELD( + readl(base + REG_DSI_28nm_PHY_PLL_SDM_CFG0), + DSI_28nm_PHY_PLL_SDM_CFG0_BYP_DIV) + 1; + vco_rate = ref_clk * sdm_byp_div; + } else { + /* sdm mode */ + sdm_dc_off = FIELD( + readl(base + REG_DSI_28nm_PHY_PLL_SDM_CFG1), + DSI_28nm_PHY_PLL_SDM_CFG1_DC_OFFSET); + DBG("sdm_dc_off = %d", sdm_dc_off); + sdm2 = FIELD(readl(base + REG_DSI_28nm_PHY_PLL_SDM_CFG2), + DSI_28nm_PHY_PLL_SDM_CFG2_FREQ_SEED_7_0); + sdm3 = FIELD(readl(base + REG_DSI_28nm_PHY_PLL_SDM_CFG3), + DSI_28nm_PHY_PLL_SDM_CFG3_FREQ_SEED_15_8); + sdm_freq_seed = (sdm3 << 8) | sdm2; + DBG("sdm_freq_seed = %d", sdm_freq_seed); + + vco_rate = (ref_clk * (sdm_dc_off + 1)) + + mult_frac(ref_clk, sdm_freq_seed, BIT(16)); + DBG("vco rate = %lu", vco_rate); + } + + DBG("returning vco rate = %lu", vco_rate); + + return vco_rate; +} + +static int _dsi_pll_28nm_vco_prepare_hpm(struct dsi_pll_28nm *pll_28nm) +{ + struct device *dev = &pll_28nm->phy->pdev->dev; + void __iomem *base = pll_28nm->phy->pll_base; + u32 max_reads = 5, timeout_us = 100; + bool locked; + u32 val; + int i; + + DBG("id=%d", pll_28nm->phy->id); + + pll_28nm_software_reset(pll_28nm); + + /* + * PLL power up sequence. + * Add necessary delays recommended by hardware. + */ + val = DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRDN_B; + writel(val, base + REG_DSI_28nm_PHY_PLL_GLB_CFG); + udelay(1); + + val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRGEN_PWRDN_B; + writel(val, base + REG_DSI_28nm_PHY_PLL_GLB_CFG); + udelay(200); + + val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B; + writel(val, base + REG_DSI_28nm_PHY_PLL_GLB_CFG); + udelay(500); + + val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_ENABLE; + writel(val, base + REG_DSI_28nm_PHY_PLL_GLB_CFG); + udelay(600); + + for (i = 0; i < 2; i++) { + /* DSI Uniphy lock detect setting */ + writel(0x0c, base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2); + udelay(100); + writel(0x0d, base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2); + + /* poll for PLL ready status */ + locked = pll_28nm_poll_for_ready(pll_28nm, max_reads, + timeout_us); + if (locked) + break; + + pll_28nm_software_reset(pll_28nm); + + /* + * PLL power up sequence. + * Add necessary delays recommended by hardware. + */ + val = DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRDN_B; + writel(val, base + REG_DSI_28nm_PHY_PLL_GLB_CFG); + udelay(1); + + val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRGEN_PWRDN_B; + writel(val, base + REG_DSI_28nm_PHY_PLL_GLB_CFG); + udelay(200); + + val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B; + writel(val, base + REG_DSI_28nm_PHY_PLL_GLB_CFG); + udelay(250); + + val &= ~DSI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B; + writel(val, base + REG_DSI_28nm_PHY_PLL_GLB_CFG); + udelay(200); + + val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B; + writel(val, base + REG_DSI_28nm_PHY_PLL_GLB_CFG); + udelay(500); + + val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_ENABLE; + writel(val, base + REG_DSI_28nm_PHY_PLL_GLB_CFG); + udelay(600); + } + + if (unlikely(!locked)) + DRM_DEV_ERROR(dev, "DSI PLL lock failed\n"); + else + DBG("DSI PLL Lock success"); + + return locked ? 0 : -EINVAL; +} + +static int dsi_pll_28nm_vco_prepare_hpm(struct clk_hw *hw) +{ + struct dsi_pll_28nm *pll_28nm = to_pll_28nm(hw); + int i, ret; + + if (unlikely(pll_28nm->phy->pll_on)) + return 0; + + for (i = 0; i < 3; i++) { + ret = _dsi_pll_28nm_vco_prepare_hpm(pll_28nm); + if (!ret) { + pll_28nm->phy->pll_on = true; + return 0; + } + } + + return ret; +} + +static int dsi_pll_28nm_vco_prepare_8226(struct clk_hw *hw) +{ + struct dsi_pll_28nm *pll_28nm = to_pll_28nm(hw); + struct device *dev = &pll_28nm->phy->pdev->dev; + void __iomem *base = pll_28nm->phy->pll_base; + u32 max_reads = 5, timeout_us = 100; + bool locked; + u32 val; + int i; + + DBG("id=%d", pll_28nm->phy->id); + + pll_28nm_software_reset(pll_28nm); + + /* + * PLL power up sequence. + * Add necessary delays recommended by hardware. + */ + writel(0x34, base + REG_DSI_28nm_PHY_PLL_CAL_CFG1); + + val = DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRDN_B; + writel(val, base + REG_DSI_28nm_PHY_PLL_GLB_CFG); + udelay(200); + + val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRGEN_PWRDN_B; + writel(val, base + REG_DSI_28nm_PHY_PLL_GLB_CFG); + udelay(200); + + val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B; + val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_ENABLE; + writel(val, base + REG_DSI_28nm_PHY_PLL_GLB_CFG); + udelay(600); + + for (i = 0; i < 7; i++) { + /* DSI Uniphy lock detect setting */ + writel(0x0d, base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2); + writel(0x0c, base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2); + udelay(100); + writel(0x0d, base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2); + + /* poll for PLL ready status */ + locked = pll_28nm_poll_for_ready(pll_28nm, + max_reads, timeout_us); + if (locked) + break; + + pll_28nm_software_reset(pll_28nm); + + /* + * PLL power up sequence. + * Add necessary delays recommended by hardware. + */ + writel(0x00, base + REG_DSI_28nm_PHY_PLL_PWRGEN_CFG); + udelay(50); + + val = DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRDN_B; + val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRGEN_PWRDN_B; + writel(val, base + REG_DSI_28nm_PHY_PLL_GLB_CFG); + udelay(100); + + val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B; + val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_ENABLE; + writel(val, base + REG_DSI_28nm_PHY_PLL_GLB_CFG); + udelay(600); + } + + if (unlikely(!locked)) + DRM_DEV_ERROR(dev, "DSI PLL lock failed\n"); + else + DBG("DSI PLL Lock success"); + + return locked ? 0 : -EINVAL; +} + +static int dsi_pll_28nm_vco_prepare_lp(struct clk_hw *hw) +{ + struct dsi_pll_28nm *pll_28nm = to_pll_28nm(hw); + struct device *dev = &pll_28nm->phy->pdev->dev; + void __iomem *base = pll_28nm->phy->pll_base; + bool locked; + u32 max_reads = 10, timeout_us = 50; + u32 val; + + DBG("id=%d", pll_28nm->phy->id); + + if (unlikely(pll_28nm->phy->pll_on)) + return 0; + + pll_28nm_software_reset(pll_28nm); + + /* + * PLL power up sequence. + * Add necessary delays recommended by hardware. + */ + writel(0x34, base + REG_DSI_28nm_PHY_PLL_CAL_CFG1); + ndelay(500); + + val = DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRDN_B; + writel(val, base + REG_DSI_28nm_PHY_PLL_GLB_CFG); + ndelay(500); + + val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRGEN_PWRDN_B; + writel(val, base + REG_DSI_28nm_PHY_PLL_GLB_CFG); + ndelay(500); + + val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B | + DSI_28nm_PHY_PLL_GLB_CFG_PLL_ENABLE; + writel(val, base + REG_DSI_28nm_PHY_PLL_GLB_CFG); + ndelay(500); + + /* DSI PLL toggle lock detect setting */ + writel(0x04, base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2); + ndelay(500); + writel(0x05, base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2); + udelay(512); + + locked = pll_28nm_poll_for_ready(pll_28nm, max_reads, timeout_us); + + if (unlikely(!locked)) { + DRM_DEV_ERROR(dev, "DSI PLL lock failed\n"); + return -EINVAL; + } + + DBG("DSI PLL lock success"); + pll_28nm->phy->pll_on = true; + + return 0; +} + +static void dsi_pll_28nm_vco_unprepare(struct clk_hw *hw) +{ + struct dsi_pll_28nm *pll_28nm = to_pll_28nm(hw); + + DBG("id=%d", pll_28nm->phy->id); + + if (unlikely(!pll_28nm->phy->pll_on)) + return; + + writel(0, pll_28nm->phy->pll_base + REG_DSI_28nm_PHY_PLL_GLB_CFG); + + pll_28nm->phy->pll_on = false; +} + +static int dsi_pll_28nm_clk_determine_rate(struct clk_hw *hw, + struct clk_rate_request *req) +{ + struct dsi_pll_28nm *pll_28nm = to_pll_28nm(hw); + + req->rate = clamp_t(unsigned long, req->rate, + pll_28nm->phy->cfg->min_pll_rate, + pll_28nm->phy->cfg->max_pll_rate); + + return 0; +} + +static const struct clk_ops clk_ops_dsi_pll_28nm_vco_hpm = { + .determine_rate = dsi_pll_28nm_clk_determine_rate, + .set_rate = dsi_pll_28nm_clk_set_rate, + .recalc_rate = dsi_pll_28nm_clk_recalc_rate, + .prepare = dsi_pll_28nm_vco_prepare_hpm, + .unprepare = dsi_pll_28nm_vco_unprepare, + .is_enabled = dsi_pll_28nm_clk_is_enabled, +}; + +static const struct clk_ops clk_ops_dsi_pll_28nm_vco_lp = { + .determine_rate = dsi_pll_28nm_clk_determine_rate, + .set_rate = dsi_pll_28nm_clk_set_rate, + .recalc_rate = dsi_pll_28nm_clk_recalc_rate, + .prepare = dsi_pll_28nm_vco_prepare_lp, + .unprepare = dsi_pll_28nm_vco_unprepare, + .is_enabled = dsi_pll_28nm_clk_is_enabled, +}; + +static const struct clk_ops clk_ops_dsi_pll_28nm_vco_8226 = { + .determine_rate = dsi_pll_28nm_clk_determine_rate, + .set_rate = dsi_pll_28nm_clk_set_rate, + .recalc_rate = dsi_pll_28nm_clk_recalc_rate, + .prepare = dsi_pll_28nm_vco_prepare_8226, + .unprepare = dsi_pll_28nm_vco_unprepare, + .is_enabled = dsi_pll_28nm_clk_is_enabled, +}; + +/* + * PLL Callbacks + */ + +static void dsi_28nm_pll_save_state(struct msm_dsi_phy *phy) +{ + struct dsi_pll_28nm *pll_28nm = to_pll_28nm(phy->vco_hw); + struct pll_28nm_cached_state *cached_state = &pll_28nm->cached_state; + void __iomem *base = pll_28nm->phy->pll_base; + + cached_state->postdiv3 = + readl(base + REG_DSI_28nm_PHY_PLL_POSTDIV3_CFG); + cached_state->postdiv1 = + readl(base + REG_DSI_28nm_PHY_PLL_POSTDIV1_CFG); + cached_state->byte_mux = readl(base + REG_DSI_28nm_PHY_PLL_VREG_CFG); + if (dsi_pll_28nm_clk_is_enabled(phy->vco_hw)) + cached_state->vco_rate = clk_hw_get_rate(phy->vco_hw); + else + cached_state->vco_rate = 0; +} + +static int dsi_28nm_pll_restore_state(struct msm_dsi_phy *phy) +{ + struct dsi_pll_28nm *pll_28nm = to_pll_28nm(phy->vco_hw); + struct pll_28nm_cached_state *cached_state = &pll_28nm->cached_state; + void __iomem *base = pll_28nm->phy->pll_base; + int ret; + + ret = dsi_pll_28nm_clk_set_rate(phy->vco_hw, + cached_state->vco_rate, 0); + if (ret) { + DRM_DEV_ERROR(&pll_28nm->phy->pdev->dev, + "restore vco rate failed. ret=%d\n", ret); + return ret; + } + + writel(cached_state->postdiv3, base + REG_DSI_28nm_PHY_PLL_POSTDIV3_CFG); + writel(cached_state->postdiv1, base + REG_DSI_28nm_PHY_PLL_POSTDIV1_CFG); + writel(cached_state->byte_mux, base + REG_DSI_28nm_PHY_PLL_VREG_CFG); + + return 0; +} + +static int pll_28nm_register(struct dsi_pll_28nm *pll_28nm, struct clk_hw **provided_clocks) +{ + char clk_name[32]; + struct clk_init_data vco_init = { + .parent_data = &(const struct clk_parent_data) { + .fw_name = "ref", .name = "xo", + }, + .num_parents = 1, + .name = clk_name, + .flags = CLK_IGNORE_UNUSED, + }; + struct device *dev = &pll_28nm->phy->pdev->dev; + struct clk_hw *hw, *analog_postdiv, *indirect_path_div2, *byte_mux; + int ret; + + DBG("%d", pll_28nm->phy->id); + + if (pll_28nm->phy->cfg->quirks & DSI_PHY_28NM_QUIRK_PHY_LP) + vco_init.ops = &clk_ops_dsi_pll_28nm_vco_lp; + else if (pll_28nm->phy->cfg->quirks & DSI_PHY_28NM_QUIRK_PHY_8226) + vco_init.ops = &clk_ops_dsi_pll_28nm_vco_8226; + else + vco_init.ops = &clk_ops_dsi_pll_28nm_vco_hpm; + + snprintf(clk_name, sizeof(clk_name), "dsi%dvco_clk", pll_28nm->phy->id); + pll_28nm->clk_hw.init = &vco_init; + ret = devm_clk_hw_register(dev, &pll_28nm->clk_hw); + if (ret) + return ret; + + snprintf(clk_name, sizeof(clk_name), "dsi%danalog_postdiv_clk", pll_28nm->phy->id); + analog_postdiv = devm_clk_hw_register_divider_parent_hw(dev, clk_name, + &pll_28nm->clk_hw, CLK_SET_RATE_PARENT, + pll_28nm->phy->pll_base + + REG_DSI_28nm_PHY_PLL_POSTDIV1_CFG, + 0, 4, 0, NULL); + if (IS_ERR(analog_postdiv)) + return PTR_ERR(analog_postdiv); + + snprintf(clk_name, sizeof(clk_name), "dsi%dindirect_path_div2_clk", pll_28nm->phy->id); + indirect_path_div2 = devm_clk_hw_register_fixed_factor_parent_hw(dev, + clk_name, analog_postdiv, CLK_SET_RATE_PARENT, 1, 2); + if (IS_ERR(indirect_path_div2)) + return PTR_ERR(indirect_path_div2); + + snprintf(clk_name, sizeof(clk_name), "dsi%dpll", pll_28nm->phy->id); + hw = devm_clk_hw_register_divider_parent_hw(dev, clk_name, + &pll_28nm->clk_hw, 0, pll_28nm->phy->pll_base + + REG_DSI_28nm_PHY_PLL_POSTDIV3_CFG, + 0, 8, 0, NULL); + if (IS_ERR(hw)) + return PTR_ERR(hw); + provided_clocks[DSI_PIXEL_PLL_CLK] = hw; + + snprintf(clk_name, sizeof(clk_name), "dsi%dbyte_mux", pll_28nm->phy->id); + byte_mux = devm_clk_hw_register_mux_parent_hws(dev, clk_name, + ((const struct clk_hw *[]){ + &pll_28nm->clk_hw, + indirect_path_div2, + }), 2, CLK_SET_RATE_PARENT, pll_28nm->phy->pll_base + + REG_DSI_28nm_PHY_PLL_VREG_CFG, 1, 1, 0, NULL); + if (IS_ERR(byte_mux)) + return PTR_ERR(byte_mux); + + snprintf(clk_name, sizeof(clk_name), "dsi%dpllbyte", pll_28nm->phy->id); + hw = devm_clk_hw_register_fixed_factor_parent_hw(dev, clk_name, + byte_mux, CLK_SET_RATE_PARENT, 1, 4); + if (IS_ERR(hw)) + return PTR_ERR(hw); + provided_clocks[DSI_BYTE_PLL_CLK] = hw; + + return 0; +} + +static int dsi_pll_28nm_init(struct msm_dsi_phy *phy) +{ + struct platform_device *pdev = phy->pdev; + struct dsi_pll_28nm *pll_28nm; + int ret; + + if (!pdev) + return -ENODEV; + + pll_28nm = devm_kzalloc(&pdev->dev, sizeof(*pll_28nm), GFP_KERNEL); + if (!pll_28nm) + return -ENOMEM; + + pll_28nm->phy = phy; + + ret = pll_28nm_register(pll_28nm, phy->provided_clocks->hws); + if (ret) { + DRM_DEV_ERROR(&pdev->dev, "failed to register PLL: %d\n", ret); + return ret; + } + + phy->vco_hw = &pll_28nm->clk_hw; + + return 0; +} + +static void dsi_28nm_dphy_set_timing(struct msm_dsi_phy *phy, + struct msm_dsi_dphy_timing *timing) +{ + void __iomem *base = phy->base; + + writel(DSI_28nm_PHY_TIMING_CTRL_0_CLK_ZERO(timing->clk_zero), + base + REG_DSI_28nm_PHY_TIMING_CTRL_0); + writel(DSI_28nm_PHY_TIMING_CTRL_1_CLK_TRAIL(timing->clk_trail), + base + REG_DSI_28nm_PHY_TIMING_CTRL_1); + writel(DSI_28nm_PHY_TIMING_CTRL_2_CLK_PREPARE(timing->clk_prepare), + base + REG_DSI_28nm_PHY_TIMING_CTRL_2); + if (timing->clk_zero & BIT(8)) + writel(DSI_28nm_PHY_TIMING_CTRL_3_CLK_ZERO_8, + base + REG_DSI_28nm_PHY_TIMING_CTRL_3); + writel(DSI_28nm_PHY_TIMING_CTRL_4_HS_EXIT(timing->hs_exit), + base + REG_DSI_28nm_PHY_TIMING_CTRL_4); + writel(DSI_28nm_PHY_TIMING_CTRL_5_HS_ZERO(timing->hs_zero), + base + REG_DSI_28nm_PHY_TIMING_CTRL_5); + writel(DSI_28nm_PHY_TIMING_CTRL_6_HS_PREPARE(timing->hs_prepare), + base + REG_DSI_28nm_PHY_TIMING_CTRL_6); + writel(DSI_28nm_PHY_TIMING_CTRL_7_HS_TRAIL(timing->hs_trail), + base + REG_DSI_28nm_PHY_TIMING_CTRL_7); + writel(DSI_28nm_PHY_TIMING_CTRL_8_HS_RQST(timing->hs_rqst), + base + REG_DSI_28nm_PHY_TIMING_CTRL_8); + writel(DSI_28nm_PHY_TIMING_CTRL_9_TA_GO(timing->ta_go) | + DSI_28nm_PHY_TIMING_CTRL_9_TA_SURE(timing->ta_sure), + base + REG_DSI_28nm_PHY_TIMING_CTRL_9); + writel(DSI_28nm_PHY_TIMING_CTRL_10_TA_GET(timing->ta_get), + base + REG_DSI_28nm_PHY_TIMING_CTRL_10); + writel(DSI_28nm_PHY_TIMING_CTRL_11_TRIG3_CMD(0), + base + REG_DSI_28nm_PHY_TIMING_CTRL_11); +} + +static void dsi_28nm_phy_regulator_enable_dcdc(struct msm_dsi_phy *phy) +{ + void __iomem *base = phy->reg_base; + + writel(0x0, base + REG_DSI_28nm_PHY_REGULATOR_CTRL_0); + writel(1, base + REG_DSI_28nm_PHY_REGULATOR_CAL_PWR_CFG); + writel(0, base + REG_DSI_28nm_PHY_REGULATOR_CTRL_5); + writel(0, base + REG_DSI_28nm_PHY_REGULATOR_CTRL_3); + writel(0x3, base + REG_DSI_28nm_PHY_REGULATOR_CTRL_2); + writel(0x9, base + REG_DSI_28nm_PHY_REGULATOR_CTRL_1); + writel(0x7, base + REG_DSI_28nm_PHY_REGULATOR_CTRL_0); + writel(0x20, base + REG_DSI_28nm_PHY_REGULATOR_CTRL_4); + writel(0x00, phy->base + REG_DSI_28nm_PHY_LDO_CNTRL); +} + +static void dsi_28nm_phy_regulator_enable_ldo(struct msm_dsi_phy *phy) +{ + void __iomem *base = phy->reg_base; + + writel(0x0, base + REG_DSI_28nm_PHY_REGULATOR_CTRL_0); + writel(0, base + REG_DSI_28nm_PHY_REGULATOR_CAL_PWR_CFG); + writel(0x7, base + REG_DSI_28nm_PHY_REGULATOR_CTRL_5); + writel(0, base + REG_DSI_28nm_PHY_REGULATOR_CTRL_3); + writel(0x1, base + REG_DSI_28nm_PHY_REGULATOR_CTRL_2); + writel(0x1, base + REG_DSI_28nm_PHY_REGULATOR_CTRL_1); + writel(0x20, base + REG_DSI_28nm_PHY_REGULATOR_CTRL_4); + + if (phy->cfg->quirks & DSI_PHY_28NM_QUIRK_PHY_LP) + writel(0x05, phy->base + REG_DSI_28nm_PHY_LDO_CNTRL); + else + writel(0x0d, phy->base + REG_DSI_28nm_PHY_LDO_CNTRL); +} + +static void dsi_28nm_phy_regulator_ctrl(struct msm_dsi_phy *phy, bool enable) +{ + if (!enable) { + writel(0, phy->reg_base + REG_DSI_28nm_PHY_REGULATOR_CAL_PWR_CFG); + return; + } + + if (phy->regulator_ldo_mode) + dsi_28nm_phy_regulator_enable_ldo(phy); + else + dsi_28nm_phy_regulator_enable_dcdc(phy); +} + +static int dsi_28nm_phy_enable(struct msm_dsi_phy *phy, + struct msm_dsi_phy_clk_request *clk_req) +{ + struct msm_dsi_dphy_timing *timing = &phy->timing; + int i; + void __iomem *base = phy->base; + u32 val; + + DBG(""); + + if (msm_dsi_dphy_timing_calc(timing, clk_req)) { + DRM_DEV_ERROR(&phy->pdev->dev, + "%s: D-PHY timing calculation failed\n", + __func__); + return -EINVAL; + } + + writel(0xff, base + REG_DSI_28nm_PHY_STRENGTH_0); + + dsi_28nm_phy_regulator_ctrl(phy, true); + + dsi_28nm_dphy_set_timing(phy, timing); + + writel(0x00, base + REG_DSI_28nm_PHY_CTRL_1); + writel(0x5f, base + REG_DSI_28nm_PHY_CTRL_0); + + writel(0x6, base + REG_DSI_28nm_PHY_STRENGTH_1); + + for (i = 0; i < 4; i++) { + writel(0, base + REG_DSI_28nm_PHY_LN_CFG_0(i)); + writel(0, base + REG_DSI_28nm_PHY_LN_CFG_1(i)); + writel(0, base + REG_DSI_28nm_PHY_LN_CFG_2(i)); + writel(0, base + REG_DSI_28nm_PHY_LN_CFG_3(i)); + writel(0, base + REG_DSI_28nm_PHY_LN_CFG_4(i)); + writel(0, base + REG_DSI_28nm_PHY_LN_TEST_DATAPATH(i)); + writel(0, base + REG_DSI_28nm_PHY_LN_DEBUG_SEL(i)); + writel(0x1, base + REG_DSI_28nm_PHY_LN_TEST_STR_0(i)); + writel(0x97, base + REG_DSI_28nm_PHY_LN_TEST_STR_1(i)); + } + + writel(0, base + REG_DSI_28nm_PHY_LNCK_CFG_4); + writel(0xc0, base + REG_DSI_28nm_PHY_LNCK_CFG_1); + writel(0x1, base + REG_DSI_28nm_PHY_LNCK_TEST_STR0); + writel(0xbb, base + REG_DSI_28nm_PHY_LNCK_TEST_STR1); + + writel(0x5f, base + REG_DSI_28nm_PHY_CTRL_0); + + val = readl(base + REG_DSI_28nm_PHY_GLBL_TEST_CTRL); + if (phy->id == DSI_1 && phy->usecase == MSM_DSI_PHY_SLAVE) + val &= ~DSI_28nm_PHY_GLBL_TEST_CTRL_BITCLK_HS_SEL; + else + val |= DSI_28nm_PHY_GLBL_TEST_CTRL_BITCLK_HS_SEL; + writel(val, base + REG_DSI_28nm_PHY_GLBL_TEST_CTRL); + + return 0; +} + +static void dsi_28nm_phy_disable(struct msm_dsi_phy *phy) +{ + writel(0, phy->base + REG_DSI_28nm_PHY_CTRL_0); + dsi_28nm_phy_regulator_ctrl(phy, false); + + /* + * Wait for the registers writes to complete in order to + * ensure that the phy is completely disabled + */ + wmb(); +} + +static const struct regulator_bulk_data dsi_phy_28nm_regulators[] = { + { .supply = "vddio", .init_load_uA = 100000 }, +}; + +const struct msm_dsi_phy_cfg dsi_phy_28nm_hpm_cfgs = { + .has_phy_regulator = true, + .regulator_data = dsi_phy_28nm_regulators, + .num_regulators = ARRAY_SIZE(dsi_phy_28nm_regulators), + .ops = { + .enable = dsi_28nm_phy_enable, + .disable = dsi_28nm_phy_disable, + .pll_init = dsi_pll_28nm_init, + .save_pll_state = dsi_28nm_pll_save_state, + .restore_pll_state = dsi_28nm_pll_restore_state, + }, + .min_pll_rate = VCO_MIN_RATE, + .max_pll_rate = VCO_MAX_RATE, + .io_start = { 0xfd922b00, 0xfd923100 }, + .num_dsi_phy = 2, +}; + +const struct msm_dsi_phy_cfg dsi_phy_28nm_hpm_famb_cfgs = { + .has_phy_regulator = true, + .regulator_data = dsi_phy_28nm_regulators, + .num_regulators = ARRAY_SIZE(dsi_phy_28nm_regulators), + .ops = { + .enable = dsi_28nm_phy_enable, + .disable = dsi_28nm_phy_disable, + .pll_init = dsi_pll_28nm_init, + .save_pll_state = dsi_28nm_pll_save_state, + .restore_pll_state = dsi_28nm_pll_restore_state, + }, + .min_pll_rate = VCO_MIN_RATE, + .max_pll_rate = VCO_MAX_RATE, + .io_start = { 0x1a94400, 0x1a96400 }, + .num_dsi_phy = 2, +}; + +const struct msm_dsi_phy_cfg dsi_phy_28nm_lp_cfgs = { + .has_phy_regulator = true, + .regulator_data = dsi_phy_28nm_regulators, + .num_regulators = ARRAY_SIZE(dsi_phy_28nm_regulators), + .ops = { + .enable = dsi_28nm_phy_enable, + .disable = dsi_28nm_phy_disable, + .pll_init = dsi_pll_28nm_init, + .save_pll_state = dsi_28nm_pll_save_state, + .restore_pll_state = dsi_28nm_pll_restore_state, + }, + .min_pll_rate = VCO_MIN_RATE, + .max_pll_rate = VCO_MAX_RATE, + .io_start = { 0x1a98500 }, + .num_dsi_phy = 1, + .quirks = DSI_PHY_28NM_QUIRK_PHY_LP, +}; + +const struct msm_dsi_phy_cfg dsi_phy_28nm_8226_cfgs = { + .has_phy_regulator = true, + .regulator_data = dsi_phy_28nm_regulators, + .num_regulators = ARRAY_SIZE(dsi_phy_28nm_regulators), + .ops = { + .enable = dsi_28nm_phy_enable, + .disable = dsi_28nm_phy_disable, + .pll_init = dsi_pll_28nm_init, + .save_pll_state = dsi_28nm_pll_save_state, + .restore_pll_state = dsi_28nm_pll_restore_state, + }, + .min_pll_rate = VCO_MIN_RATE, + .max_pll_rate = VCO_MAX_RATE, + .io_start = { 0xfd922b00 }, + .num_dsi_phy = 1, + .quirks = DSI_PHY_28NM_QUIRK_PHY_8226, +}; + +const struct msm_dsi_phy_cfg dsi_phy_28nm_8937_cfgs = { + .has_phy_regulator = true, + .regulator_data = dsi_phy_28nm_regulators, + .num_regulators = ARRAY_SIZE(dsi_phy_28nm_regulators), + .ops = { + .enable = dsi_28nm_phy_enable, + .disable = dsi_28nm_phy_disable, + .pll_init = dsi_pll_28nm_init, + .save_pll_state = dsi_28nm_pll_save_state, + .restore_pll_state = dsi_28nm_pll_restore_state, + }, + .min_pll_rate = VCO_MIN_RATE, + .max_pll_rate = VCO_MAX_RATE, + .io_start = { 0x1a94400, 0x1a96400 }, + .num_dsi_phy = 2, + .quirks = DSI_PHY_28NM_QUIRK_PHY_LP, +}; diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c new file mode 100644 index 000000000000..8dcce9581dc3 --- /dev/null +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c @@ -0,0 +1,648 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2012-2015, The Linux Foundation. All rights reserved. + */ + +#include <dt-bindings/clock/qcom,dsi-phy-28nm.h> +#include <linux/clk-provider.h> +#include <linux/delay.h> + +#include "dsi_phy.h" +#include "dsi.xml.h" +#include "dsi_phy_28nm_8960.xml.h" + +/* + * DSI PLL 28nm (8960/A family) - clock diagram (eg: DSI1): + * + * + * +------+ + * dsi1vco_clk ----o-----| DIV1 |---dsi1pllbit (not exposed as clock) + * F * byte_clk | +------+ + * | bit clock divider (F / 8) + * | + * | +------+ + * o-----| DIV2 |---dsi0pllbyte---o---> To byte RCG + * | +------+ | (sets parent rate) + * | byte clock divider (F) | + * | | + * | o---> To esc RCG + * | (doesn't set parent rate) + * | + * | +------+ + * o-----| DIV3 |----dsi0pll------o---> To dsi RCG + * +------+ | (sets parent rate) + * dsi clock divider (F * magic) | + * | + * o---> To pixel rcg + * (doesn't set parent rate) + */ + +#define POLL_MAX_READS 8000 +#define POLL_TIMEOUT_US 1 + +#define VCO_REF_CLK_RATE 27000000 +#define VCO_MIN_RATE 600000000 +#define VCO_MAX_RATE 1200000000 + +#define VCO_PREF_DIV_RATIO 27 + +struct pll_28nm_cached_state { + unsigned long vco_rate; + u8 postdiv3; + u8 postdiv2; + u8 postdiv1; +}; + +struct clk_bytediv { + struct clk_hw hw; + void __iomem *reg; +}; + +struct dsi_pll_28nm { + struct clk_hw clk_hw; + + struct msm_dsi_phy *phy; + + struct pll_28nm_cached_state cached_state; +}; + +#define to_pll_28nm(x) container_of(x, struct dsi_pll_28nm, clk_hw) + +static bool pll_28nm_poll_for_ready(struct dsi_pll_28nm *pll_28nm, + int nb_tries, int timeout_us) +{ + bool pll_locked = false; + u32 val; + + while (nb_tries--) { + val = readl(pll_28nm->phy->pll_base + REG_DSI_28nm_8960_PHY_PLL_RDY); + pll_locked = !!(val & DSI_28nm_8960_PHY_PLL_RDY_PLL_RDY); + + if (pll_locked) + break; + + udelay(timeout_us); + } + DBG("DSI PLL is %slocked", pll_locked ? "" : "*not* "); + + return pll_locked; +} + +/* + * Clock Callbacks + */ +static int dsi_pll_28nm_clk_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + struct dsi_pll_28nm *pll_28nm = to_pll_28nm(hw); + void __iomem *base = pll_28nm->phy->pll_base; + u32 val, temp, fb_divider; + + DBG("rate=%lu, parent's=%lu", rate, parent_rate); + + temp = rate / 10; + val = VCO_REF_CLK_RATE / 10; + fb_divider = (temp * VCO_PREF_DIV_RATIO) / val; + fb_divider = fb_divider / 2 - 1; + writel(fb_divider & 0xff, base + REG_DSI_28nm_8960_PHY_PLL_CTRL_1); + + val = readl(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_2); + + val |= (fb_divider >> 8) & 0x07; + + writel(val, base + REG_DSI_28nm_8960_PHY_PLL_CTRL_2); + + val = readl(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_3); + + val |= (VCO_PREF_DIV_RATIO - 1) & 0x3f; + + writel(val, base + REG_DSI_28nm_8960_PHY_PLL_CTRL_3); + + writel(0xf, base + REG_DSI_28nm_8960_PHY_PLL_CTRL_6); + + val = readl(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_8); + val |= 0x7 << 4; + writel(val, base + REG_DSI_28nm_8960_PHY_PLL_CTRL_8); + + return 0; +} + +static int dsi_pll_28nm_clk_is_enabled(struct clk_hw *hw) +{ + struct dsi_pll_28nm *pll_28nm = to_pll_28nm(hw); + + return pll_28nm_poll_for_ready(pll_28nm, POLL_MAX_READS, + POLL_TIMEOUT_US); +} + +static unsigned long dsi_pll_28nm_clk_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct dsi_pll_28nm *pll_28nm = to_pll_28nm(hw); + void __iomem *base = pll_28nm->phy->pll_base; + unsigned long vco_rate; + u32 status, fb_divider, temp, ref_divider; + + VERB("parent_rate=%lu", parent_rate); + + status = readl(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_0); + + if (status & DSI_28nm_8960_PHY_PLL_CTRL_0_ENABLE) { + fb_divider = readl(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_1); + fb_divider &= 0xff; + temp = readl(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_2) & 0x07; + fb_divider = (temp << 8) | fb_divider; + fb_divider += 1; + + ref_divider = readl(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_3); + ref_divider &= 0x3f; + ref_divider += 1; + + /* multiply by 2 */ + vco_rate = (parent_rate / ref_divider) * fb_divider * 2; + } else { + vco_rate = 0; + } + + DBG("returning vco rate = %lu", vco_rate); + + return vco_rate; +} + +static int dsi_pll_28nm_vco_prepare(struct clk_hw *hw) +{ + struct dsi_pll_28nm *pll_28nm = to_pll_28nm(hw); + struct device *dev = &pll_28nm->phy->pdev->dev; + void __iomem *base = pll_28nm->phy->pll_base; + bool locked; + unsigned int bit_div, byte_div; + int max_reads = 1000, timeout_us = 100; + u32 val; + + DBG("id=%d", pll_28nm->phy->id); + + if (unlikely(pll_28nm->phy->pll_on)) + return 0; + + /* + * before enabling the PLL, configure the bit clock divider since we + * don't expose it as a clock to the outside world + * 1: read back the byte clock divider that should already be set + * 2: divide by 8 to get bit clock divider + * 3: write it to POSTDIV1 + */ + val = readl(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_9); + byte_div = val + 1; + bit_div = byte_div / 8; + + val = readl(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_8); + val &= ~0xf; + val |= (bit_div - 1); + writel(val, base + REG_DSI_28nm_8960_PHY_PLL_CTRL_8); + + /* enable the PLL */ + writel(DSI_28nm_8960_PHY_PLL_CTRL_0_ENABLE, + base + REG_DSI_28nm_8960_PHY_PLL_CTRL_0); + + locked = pll_28nm_poll_for_ready(pll_28nm, max_reads, timeout_us); + + if (unlikely(!locked)) { + DRM_DEV_ERROR(dev, "DSI PLL lock failed\n"); + return -EINVAL; + } + + DBG("DSI PLL lock success"); + pll_28nm->phy->pll_on = true; + + return 0; +} + +static void dsi_pll_28nm_vco_unprepare(struct clk_hw *hw) +{ + struct dsi_pll_28nm *pll_28nm = to_pll_28nm(hw); + + DBG("id=%d", pll_28nm->phy->id); + + if (unlikely(!pll_28nm->phy->pll_on)) + return; + + writel(0x00, pll_28nm->phy->pll_base + REG_DSI_28nm_8960_PHY_PLL_CTRL_0); + + pll_28nm->phy->pll_on = false; +} + +static int dsi_pll_28nm_clk_determine_rate(struct clk_hw *hw, + struct clk_rate_request *req) +{ + struct dsi_pll_28nm *pll_28nm = to_pll_28nm(hw); + + req->rate = clamp_t(unsigned long, req->rate, + pll_28nm->phy->cfg->min_pll_rate, pll_28nm->phy->cfg->max_pll_rate); + + return 0; +} + +static const struct clk_ops clk_ops_dsi_pll_28nm_vco = { + .determine_rate = dsi_pll_28nm_clk_determine_rate, + .set_rate = dsi_pll_28nm_clk_set_rate, + .recalc_rate = dsi_pll_28nm_clk_recalc_rate, + .prepare = dsi_pll_28nm_vco_prepare, + .unprepare = dsi_pll_28nm_vco_unprepare, + .is_enabled = dsi_pll_28nm_clk_is_enabled, +}; + +/* + * Custom byte clock divier clk_ops + * + * This clock is the entry point to configuring the PLL. The user (dsi host) + * will set this clock's rate to the desired byte clock rate. The VCO lock + * frequency is a multiple of the byte clock rate. The multiplication factor + * (shown as F in the diagram above) is a function of the byte clock rate. + * + * This custom divider clock ensures that its parent (VCO) is set to the + * desired rate, and that the byte clock postdivider (POSTDIV2) is configured + * accordingly + */ +#define to_clk_bytediv(_hw) container_of(_hw, struct clk_bytediv, hw) + +static unsigned long clk_bytediv_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct clk_bytediv *bytediv = to_clk_bytediv(hw); + unsigned int div; + + div = readl(bytediv->reg) & 0xff; + + return parent_rate / (div + 1); +} + +/* find multiplication factor(wrt byte clock) at which the VCO should be set */ +static unsigned int get_vco_mul_factor(unsigned long byte_clk_rate) +{ + unsigned long bit_mhz; + + /* convert to bit clock in Mhz */ + bit_mhz = (byte_clk_rate * 8) / 1000000; + + if (bit_mhz < 125) + return 64; + else if (bit_mhz < 250) + return 32; + else if (bit_mhz < 600) + return 16; + else + return 8; +} + +static int clk_bytediv_determine_rate(struct clk_hw *hw, + struct clk_rate_request *req) +{ + unsigned long best_parent; + unsigned int factor; + + factor = get_vco_mul_factor(req->rate); + + best_parent = req->rate * factor; + req->best_parent_rate = clk_hw_round_rate(clk_hw_get_parent(hw), best_parent); + + req->rate = req->best_parent_rate / factor; + + return 0; +} + +static int clk_bytediv_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + struct clk_bytediv *bytediv = to_clk_bytediv(hw); + u32 val; + unsigned int factor; + + factor = get_vco_mul_factor(rate); + + val = readl(bytediv->reg); + val |= (factor - 1) & 0xff; + writel(val, bytediv->reg); + + return 0; +} + +/* Our special byte clock divider ops */ +static const struct clk_ops clk_bytediv_ops = { + .determine_rate = clk_bytediv_determine_rate, + .set_rate = clk_bytediv_set_rate, + .recalc_rate = clk_bytediv_recalc_rate, +}; + +/* + * PLL Callbacks + */ +static void dsi_28nm_pll_save_state(struct msm_dsi_phy *phy) +{ + struct dsi_pll_28nm *pll_28nm = to_pll_28nm(phy->vco_hw); + struct pll_28nm_cached_state *cached_state = &pll_28nm->cached_state; + void __iomem *base = pll_28nm->phy->pll_base; + + cached_state->postdiv3 = + readl(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_10); + cached_state->postdiv2 = + readl(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_9); + cached_state->postdiv1 = + readl(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_8); + + cached_state->vco_rate = clk_hw_get_rate(phy->vco_hw); +} + +static int dsi_28nm_pll_restore_state(struct msm_dsi_phy *phy) +{ + struct dsi_pll_28nm *pll_28nm = to_pll_28nm(phy->vco_hw); + struct pll_28nm_cached_state *cached_state = &pll_28nm->cached_state; + void __iomem *base = pll_28nm->phy->pll_base; + int ret; + + ret = dsi_pll_28nm_clk_set_rate(phy->vco_hw, + cached_state->vco_rate, 0); + if (ret) { + DRM_DEV_ERROR(&pll_28nm->phy->pdev->dev, + "restore vco rate failed. ret=%d\n", ret); + return ret; + } + + writel(cached_state->postdiv3, base + REG_DSI_28nm_8960_PHY_PLL_CTRL_10); + writel(cached_state->postdiv2, base + REG_DSI_28nm_8960_PHY_PLL_CTRL_9); + writel(cached_state->postdiv1, base + REG_DSI_28nm_8960_PHY_PLL_CTRL_8); + + return 0; +} + +static int pll_28nm_register(struct dsi_pll_28nm *pll_28nm, struct clk_hw **provided_clocks) +{ + char clk_name[32]; + struct clk_init_data vco_init = { + .parent_data = &(const struct clk_parent_data) { + .fw_name = "ref", + }, + .num_parents = 1, + .flags = CLK_IGNORE_UNUSED, + .ops = &clk_ops_dsi_pll_28nm_vco, + }; + struct device *dev = &pll_28nm->phy->pdev->dev; + struct clk_hw *hw; + struct clk_bytediv *bytediv; + struct clk_init_data bytediv_init = { }; + int ret; + + DBG("%d", pll_28nm->phy->id); + + bytediv = devm_kzalloc(dev, sizeof(*bytediv), GFP_KERNEL); + if (!bytediv) + return -ENOMEM; + + snprintf(clk_name, sizeof(clk_name), "dsi%dvco_clk", pll_28nm->phy->id); + vco_init.name = clk_name; + + pll_28nm->clk_hw.init = &vco_init; + + ret = devm_clk_hw_register(dev, &pll_28nm->clk_hw); + if (ret) + return ret; + + /* prepare and register bytediv */ + bytediv->hw.init = &bytediv_init; + bytediv->reg = pll_28nm->phy->pll_base + REG_DSI_28nm_8960_PHY_PLL_CTRL_9; + + snprintf(clk_name, sizeof(clk_name), "dsi%dpllbyte", pll_28nm->phy->id + 1); + + bytediv_init.name = clk_name; + bytediv_init.ops = &clk_bytediv_ops; + bytediv_init.flags = CLK_SET_RATE_PARENT; + bytediv_init.parent_hws = (const struct clk_hw*[]){ + &pll_28nm->clk_hw, + }; + bytediv_init.num_parents = 1; + + /* DIV2 */ + ret = devm_clk_hw_register(dev, &bytediv->hw); + if (ret) + return ret; + provided_clocks[DSI_BYTE_PLL_CLK] = &bytediv->hw; + + snprintf(clk_name, sizeof(clk_name), "dsi%dpll", pll_28nm->phy->id + 1); + /* DIV3 */ + hw = devm_clk_hw_register_divider_parent_hw(dev, clk_name, + &pll_28nm->clk_hw, 0, pll_28nm->phy->pll_base + + REG_DSI_28nm_8960_PHY_PLL_CTRL_10, + 0, 8, 0, NULL); + if (IS_ERR(hw)) + return PTR_ERR(hw); + provided_clocks[DSI_PIXEL_PLL_CLK] = hw; + + return 0; +} + +static int dsi_pll_28nm_8960_init(struct msm_dsi_phy *phy) +{ + struct platform_device *pdev = phy->pdev; + struct dsi_pll_28nm *pll_28nm; + int ret; + + if (!pdev) + return -ENODEV; + + pll_28nm = devm_kzalloc(&pdev->dev, sizeof(*pll_28nm), GFP_KERNEL); + if (!pll_28nm) + return -ENOMEM; + + pll_28nm->phy = phy; + + ret = pll_28nm_register(pll_28nm, phy->provided_clocks->hws); + if (ret) { + DRM_DEV_ERROR(&pdev->dev, "failed to register PLL: %d\n", ret); + return ret; + } + + phy->vco_hw = &pll_28nm->clk_hw; + + return 0; +} + +static void dsi_28nm_dphy_set_timing(struct msm_dsi_phy *phy, + struct msm_dsi_dphy_timing *timing) +{ + void __iomem *base = phy->base; + + writel(DSI_28nm_8960_PHY_TIMING_CTRL_0_CLK_ZERO(timing->clk_zero), + base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_0); + writel(DSI_28nm_8960_PHY_TIMING_CTRL_1_CLK_TRAIL(timing->clk_trail), + base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_1); + writel(DSI_28nm_8960_PHY_TIMING_CTRL_2_CLK_PREPARE(timing->clk_prepare), + base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_2); + writel(0, base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_3); + writel(DSI_28nm_8960_PHY_TIMING_CTRL_4_HS_EXIT(timing->hs_exit), + base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_4); + writel(DSI_28nm_8960_PHY_TIMING_CTRL_5_HS_ZERO(timing->hs_zero), + base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_5); + writel(DSI_28nm_8960_PHY_TIMING_CTRL_6_HS_PREPARE(timing->hs_prepare), + base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_6); + writel(DSI_28nm_8960_PHY_TIMING_CTRL_7_HS_TRAIL(timing->hs_trail), + base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_7); + writel(DSI_28nm_8960_PHY_TIMING_CTRL_8_HS_RQST(timing->hs_rqst), + base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_8); + writel(DSI_28nm_8960_PHY_TIMING_CTRL_9_TA_GO(timing->ta_go) | + DSI_28nm_8960_PHY_TIMING_CTRL_9_TA_SURE(timing->ta_sure), + base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_9); + writel(DSI_28nm_8960_PHY_TIMING_CTRL_10_TA_GET(timing->ta_get), + base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_10); + writel(DSI_28nm_8960_PHY_TIMING_CTRL_11_TRIG3_CMD(0), + base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_11); +} + +static void dsi_28nm_phy_regulator_init(struct msm_dsi_phy *phy) +{ + void __iomem *base = phy->reg_base; + + writel(0x3, base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_0); + writel(1, base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_1); + writel(1, base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_2); + writel(0, base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_3); + writel(0x100, base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_4); +} + +static void dsi_28nm_phy_regulator_ctrl(struct msm_dsi_phy *phy) +{ + void __iomem *base = phy->reg_base; + + writel(0x3, base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_0); + writel(0xa, base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_1); + writel(0x4, base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_2); + writel(0x0, base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_3); + writel(0x20, base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_4); +} + +static void dsi_28nm_phy_calibration(struct msm_dsi_phy *phy) +{ + void __iomem *base = phy->reg_base; + u32 status; + int i = 5000; + + writel(0x3, base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CAL_PWR_CFG); + + writel(0x0, base + REG_DSI_28nm_8960_PHY_MISC_CAL_SW_CFG_2); + writel(0x5a, base + REG_DSI_28nm_8960_PHY_MISC_CAL_HW_CFG_1); + writel(0x10, base + REG_DSI_28nm_8960_PHY_MISC_CAL_HW_CFG_3); + writel(0x1, base + REG_DSI_28nm_8960_PHY_MISC_CAL_HW_CFG_4); + writel(0x1, base + REG_DSI_28nm_8960_PHY_MISC_CAL_HW_CFG_0); + + writel(0x1, base + REG_DSI_28nm_8960_PHY_MISC_CAL_HW_TRIGGER); + usleep_range(5000, 6000); + writel(0x0, base + REG_DSI_28nm_8960_PHY_MISC_CAL_HW_TRIGGER); + + do { + status = readl(base + + REG_DSI_28nm_8960_PHY_MISC_CAL_STATUS); + + if (!(status & DSI_28nm_8960_PHY_MISC_CAL_STATUS_CAL_BUSY)) + break; + + udelay(1); + } while (--i > 0); +} + +static void dsi_28nm_phy_lane_config(struct msm_dsi_phy *phy) +{ + void __iomem *base = phy->base; + int i; + + for (i = 0; i < 4; i++) { + writel(0x80, base + REG_DSI_28nm_8960_PHY_LN_CFG_0(i)); + writel(0x45, base + REG_DSI_28nm_8960_PHY_LN_CFG_1(i)); + writel(0x00, base + REG_DSI_28nm_8960_PHY_LN_CFG_2(i)); + writel(0x00, base + REG_DSI_28nm_8960_PHY_LN_TEST_DATAPATH(i)); + writel(0x01, base + REG_DSI_28nm_8960_PHY_LN_TEST_STR_0(i)); + writel(0x66, base + REG_DSI_28nm_8960_PHY_LN_TEST_STR_1(i)); + } + + writel(0x40, base + REG_DSI_28nm_8960_PHY_LNCK_CFG_0); + writel(0x67, base + REG_DSI_28nm_8960_PHY_LNCK_CFG_1); + writel(0x0, base + REG_DSI_28nm_8960_PHY_LNCK_CFG_2); + writel(0x0, base + REG_DSI_28nm_8960_PHY_LNCK_TEST_DATAPATH); + writel(0x1, base + REG_DSI_28nm_8960_PHY_LNCK_TEST_STR0); + writel(0x88, base + REG_DSI_28nm_8960_PHY_LNCK_TEST_STR1); +} + +static int dsi_28nm_phy_enable(struct msm_dsi_phy *phy, + struct msm_dsi_phy_clk_request *clk_req) +{ + struct msm_dsi_dphy_timing *timing = &phy->timing; + void __iomem *base = phy->base; + + DBG(""); + + if (msm_dsi_dphy_timing_calc(timing, clk_req)) { + DRM_DEV_ERROR(&phy->pdev->dev, + "%s: D-PHY timing calculation failed\n", + __func__); + return -EINVAL; + } + + dsi_28nm_phy_regulator_init(phy); + + writel(0x04, base + REG_DSI_28nm_8960_PHY_LDO_CTRL); + + /* strength control */ + writel(0xff, base + REG_DSI_28nm_8960_PHY_STRENGTH_0); + writel(0x00, base + REG_DSI_28nm_8960_PHY_STRENGTH_1); + writel(0x06, base + REG_DSI_28nm_8960_PHY_STRENGTH_2); + + /* phy ctrl */ + writel(0x5f, base + REG_DSI_28nm_8960_PHY_CTRL_0); + writel(0x00, base + REG_DSI_28nm_8960_PHY_CTRL_1); + writel(0x00, base + REG_DSI_28nm_8960_PHY_CTRL_2); + writel(0x10, base + REG_DSI_28nm_8960_PHY_CTRL_3); + + dsi_28nm_phy_regulator_ctrl(phy); + + dsi_28nm_phy_calibration(phy); + + dsi_28nm_phy_lane_config(phy); + + writel(0x0f, base + REG_DSI_28nm_8960_PHY_BIST_CTRL_4); + writel(0x03, base + REG_DSI_28nm_8960_PHY_BIST_CTRL_1); + writel(0x03, base + REG_DSI_28nm_8960_PHY_BIST_CTRL_0); + writel(0x0, base + REG_DSI_28nm_8960_PHY_BIST_CTRL_4); + + dsi_28nm_dphy_set_timing(phy, timing); + + return 0; +} + +static void dsi_28nm_phy_disable(struct msm_dsi_phy *phy) +{ + writel(0x0, phy->base + REG_DSI_28nm_8960_PHY_CTRL_0); + + /* + * Wait for the registers writes to complete in order to + * ensure that the phy is completely disabled + */ + wmb(); +} + +static const struct regulator_bulk_data dsi_phy_28nm_8960_regulators[] = { + { .supply = "vddio", .init_load_uA = 100000 }, /* 1.8 V */ +}; + +const struct msm_dsi_phy_cfg dsi_phy_28nm_8960_cfgs = { + .has_phy_regulator = true, + .regulator_data = dsi_phy_28nm_8960_regulators, + .num_regulators = ARRAY_SIZE(dsi_phy_28nm_8960_regulators), + .ops = { + .enable = dsi_28nm_phy_enable, + .disable = dsi_28nm_phy_disable, + .pll_init = dsi_pll_28nm_8960_init, + .save_pll_state = dsi_28nm_pll_save_state, + .restore_pll_state = dsi_28nm_pll_restore_state, + }, + .min_pll_rate = VCO_MIN_RATE, + .max_pll_rate = VCO_MAX_RATE, + .io_start = { 0x4700300, 0x5800300 }, + .num_dsi_phy = 2, +}; diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c new file mode 100644 index 000000000000..c5e1d2016bcc --- /dev/null +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c @@ -0,0 +1,1506 @@ +/* + * SPDX-License-Identifier: GPL-2.0 + * Copyright (c) 2018, The Linux Foundation + */ + +#include <dt-bindings/clock/qcom,dsi-phy-28nm.h> +#include <linux/bitfield.h> +#include <linux/clk.h> +#include <linux/clk-provider.h> +#include <linux/iopoll.h> + +#include "dsi_phy.h" +#include "dsi.xml.h" +#include "dsi_phy_7nm.xml.h" + +/* + * DSI PLL 7nm - clock diagram (eg: DSI0): TODO: updated CPHY diagram + * + * dsi0_pll_out_div_clk dsi0_pll_bit_clk + * | | + * | | + * +---------+ | +----------+ | +----+ + * dsi0vco_clk ---| out_div |--o--| divl_3_0 |--o--| /8 |-- dsi0_phy_pll_out_byteclk + * +---------+ | +----------+ | +----+ + * | | + * | | dsi0_pll_by_2_bit_clk + * | | | + * | | +----+ | |\ dsi0_pclk_mux + * | |--| /2 |--o--| \ | + * | | +----+ | \ | +---------+ + * | --------------| |--o--| div_7_4 |-- dsi0_phy_pll_out_dsiclk + * |------------------------------| / +---------+ + * | +-----+ | / + * -----------| /4? |--o----------|/ + * +-----+ | | + * | |dsiclk_sel + * | + * dsi0_pll_post_out_div_clk + */ + +#define VCO_REF_CLK_RATE 19200000 +#define FRAC_BITS 18 + +/* Hardware is pre V4.1 */ +#define DSI_PHY_7NM_QUIRK_PRE_V4_1 BIT(0) +/* Hardware is V4.1 */ +#define DSI_PHY_7NM_QUIRK_V4_1 BIT(1) +/* Hardware is V4.2 */ +#define DSI_PHY_7NM_QUIRK_V4_2 BIT(2) +/* Hardware is V4.3 */ +#define DSI_PHY_7NM_QUIRK_V4_3 BIT(3) +/* Hardware is V5.2 */ +#define DSI_PHY_7NM_QUIRK_V5_2 BIT(4) +/* Hardware is V7.0 */ +#define DSI_PHY_7NM_QUIRK_V7_0 BIT(5) + +struct dsi_pll_config { + bool enable_ssc; + bool ssc_center; + u32 ssc_freq; + u32 ssc_offset; + u32 ssc_adj_per; + + /* out */ + u32 decimal_div_start; + u32 frac_div_start; + u32 pll_clock_inverters; + u32 ssc_stepsize; + u32 ssc_div_per; +}; + +struct pll_7nm_cached_state { + unsigned long vco_rate; + u8 bit_clk_div; + u8 pix_clk_div; + u8 pll_out_div; + u8 pll_mux; +}; + +struct dsi_pll_7nm { + struct clk_hw clk_hw; + + struct msm_dsi_phy *phy; + + u64 vco_current_rate; + + /* protects REG_DSI_7nm_PHY_CMN_CLK_CFG0 register */ + spinlock_t postdiv_lock; + + /* protects REG_DSI_7nm_PHY_CMN_CLK_CFG1 register */ + spinlock_t pclk_mux_lock; + + /* + * protects REG_DSI_7nm_PHY_CMN_CTRL_0 register and pll_enable_cnt + * member + */ + spinlock_t pll_enable_lock; + int pll_enable_cnt; + + struct pll_7nm_cached_state cached_state; + + struct dsi_pll_7nm *slave; +}; + +#define to_pll_7nm(x) container_of(x, struct dsi_pll_7nm, clk_hw) + +/* + * Global list of private DSI PLL struct pointers. We need this for bonded DSI + * mode, where the master PLL's clk_ops needs access the slave's private data + */ +static struct dsi_pll_7nm *pll_7nm_list[DSI_MAX]; + +static void dsi_pll_enable_pll_bias(struct dsi_pll_7nm *pll); +static void dsi_pll_disable_pll_bias(struct dsi_pll_7nm *pll); + +static void dsi_pll_setup_config(struct dsi_pll_config *config) +{ + config->ssc_freq = 31500; + config->ssc_offset = 4800; + config->ssc_adj_per = 2; + + /* TODO: ssc enable */ + config->enable_ssc = false; + config->ssc_center = 0; +} + +static void dsi_pll_calc_dec_frac(struct dsi_pll_7nm *pll, struct dsi_pll_config *config) +{ + u64 fref = VCO_REF_CLK_RATE; + u64 pll_freq; + u64 divider; + u64 dec, dec_multiple; + u32 frac; + u64 multiplier; + + pll_freq = pll->vco_current_rate; + + divider = fref * 2; + + multiplier = 1 << FRAC_BITS; + dec_multiple = div_u64(pll_freq * multiplier, divider); + dec = div_u64_rem(dec_multiple, multiplier, &frac); + + if (pll->phy->cfg->quirks & DSI_PHY_7NM_QUIRK_PRE_V4_1) { + config->pll_clock_inverters = 0x28; + } else if ((pll->phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V7_0)) { + if (pll_freq < 163000000ULL) + config->pll_clock_inverters = 0xa0; + else if (pll_freq < 175000000ULL) + config->pll_clock_inverters = 0x20; + else if (pll_freq < 325000000ULL) + config->pll_clock_inverters = 0xa0; + else if (pll_freq < 350000000ULL) + config->pll_clock_inverters = 0x20; + else if (pll_freq < 650000000ULL) + config->pll_clock_inverters = 0xa0; + else if (pll_freq < 700000000ULL) + config->pll_clock_inverters = 0x20; + else if (pll_freq < 1300000000ULL) + config->pll_clock_inverters = 0xa0; + else if (pll_freq < 2500000000ULL) + config->pll_clock_inverters = 0x20; + else if (pll_freq < 4000000000ULL) + config->pll_clock_inverters = 0x00; + else + config->pll_clock_inverters = 0x40; + } else if ((pll->phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V5_2)) { + if (pll_freq <= 1300000000ULL) + config->pll_clock_inverters = 0xa0; + else if (pll_freq <= 2500000000ULL) + config->pll_clock_inverters = 0x20; + else if (pll_freq <= 4000000000ULL) + config->pll_clock_inverters = 0x00; + else + config->pll_clock_inverters = 0x40; + } else if (pll->phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V4_1) { + if (pll_freq <= 1000000000ULL) + config->pll_clock_inverters = 0xa0; + else if (pll_freq <= 2500000000ULL) + config->pll_clock_inverters = 0x20; + else if (pll_freq <= 3020000000ULL) + config->pll_clock_inverters = 0x00; + else + config->pll_clock_inverters = 0x40; + } else { + /* 4.2, 4.3 */ + if (pll_freq <= 1000000000ULL) + config->pll_clock_inverters = 0xa0; + else if (pll_freq <= 2500000000ULL) + config->pll_clock_inverters = 0x20; + else if (pll_freq <= 3500000000ULL) + config->pll_clock_inverters = 0x00; + else + config->pll_clock_inverters = 0x40; + } + + config->decimal_div_start = dec; + config->frac_div_start = frac; +} + +#define SSC_CENTER BIT(0) +#define SSC_EN BIT(1) + +static void dsi_pll_calc_ssc(struct dsi_pll_7nm *pll, struct dsi_pll_config *config) +{ + u32 ssc_per; + u32 ssc_mod; + u64 ssc_step_size; + u64 frac; + + if (!config->enable_ssc) { + DBG("SSC not enabled\n"); + return; + } + + ssc_per = DIV_ROUND_CLOSEST(VCO_REF_CLK_RATE, config->ssc_freq) / 2 - 1; + ssc_mod = (ssc_per + 1) % (config->ssc_adj_per + 1); + ssc_per -= ssc_mod; + + frac = config->frac_div_start; + ssc_step_size = config->decimal_div_start; + ssc_step_size *= (1 << FRAC_BITS); + ssc_step_size += frac; + ssc_step_size *= config->ssc_offset; + ssc_step_size *= (config->ssc_adj_per + 1); + ssc_step_size = div_u64(ssc_step_size, (ssc_per + 1)); + ssc_step_size = DIV_ROUND_CLOSEST_ULL(ssc_step_size, 1000000); + + config->ssc_div_per = ssc_per; + config->ssc_stepsize = ssc_step_size; + + pr_debug("SCC: Dec:%d, frac:%llu, frac_bits:%d\n", + config->decimal_div_start, frac, FRAC_BITS); + pr_debug("SSC: div_per:0x%X, stepsize:0x%X, adjper:0x%X\n", + ssc_per, (u32)ssc_step_size, config->ssc_adj_per); +} + +static void dsi_pll_ssc_commit(struct dsi_pll_7nm *pll, struct dsi_pll_config *config) +{ + void __iomem *base = pll->phy->pll_base; + + if (config->enable_ssc) { + pr_debug("SSC is enabled\n"); + + writel(config->ssc_stepsize & 0xff, + base + REG_DSI_7nm_PHY_PLL_SSC_STEPSIZE_LOW_1); + writel(config->ssc_stepsize >> 8, + base + REG_DSI_7nm_PHY_PLL_SSC_STEPSIZE_HIGH_1); + writel(config->ssc_div_per & 0xff, + base + REG_DSI_7nm_PHY_PLL_SSC_DIV_PER_LOW_1); + writel(config->ssc_div_per >> 8, + base + REG_DSI_7nm_PHY_PLL_SSC_DIV_PER_HIGH_1); + writel(config->ssc_adj_per & 0xff, + base + REG_DSI_7nm_PHY_PLL_SSC_ADJPER_LOW_1); + writel(config->ssc_adj_per >> 8, + base + REG_DSI_7nm_PHY_PLL_SSC_ADJPER_HIGH_1); + writel(SSC_EN | (config->ssc_center ? SSC_CENTER : 0), + base + REG_DSI_7nm_PHY_PLL_SSC_CONTROL); + } +} + +static void dsi_pll_config_hzindep_reg(struct dsi_pll_7nm *pll) +{ + void __iomem *base = pll->phy->pll_base; + u8 analog_controls_five_1 = 0x01, vco_config_1 = 0x00; + + if (!(pll->phy->cfg->quirks & DSI_PHY_7NM_QUIRK_PRE_V4_1)) + if (pll->vco_current_rate >= 3100000000ULL) + analog_controls_five_1 = 0x03; + + if (pll->phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V4_1) { + if (pll->vco_current_rate < 1520000000ULL) + vco_config_1 = 0x08; + else if (pll->vco_current_rate < 2990000000ULL) + vco_config_1 = 0x01; + } + + if ((pll->phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V4_2) || + (pll->phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V4_3)) { + if (pll->vco_current_rate < 1520000000ULL) + vco_config_1 = 0x08; + else if (pll->vco_current_rate >= 2990000000ULL) + vco_config_1 = 0x01; + } + + if ((pll->phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V5_2) || + (pll->phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V7_0)) { + if (pll->vco_current_rate < 1557000000ULL) + vco_config_1 = 0x08; + else + vco_config_1 = 0x01; + } + + writel(analog_controls_five_1, base + REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_FIVE_1); + writel(vco_config_1, base + REG_DSI_7nm_PHY_PLL_VCO_CONFIG_1); + writel(0x01, base + REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_FIVE); + writel(0x03, base + REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_TWO); + writel(0x00, base + REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_THREE); + writel(0x00, base + REG_DSI_7nm_PHY_PLL_DSM_DIVIDER); + writel(0x4e, base + REG_DSI_7nm_PHY_PLL_FEEDBACK_DIVIDER); + writel(0x40, base + REG_DSI_7nm_PHY_PLL_CALIBRATION_SETTINGS); + writel(0xba, base + REG_DSI_7nm_PHY_PLL_BAND_SEL_CAL_SETTINGS_THREE); + writel(0x0c, base + REG_DSI_7nm_PHY_PLL_FREQ_DETECT_SETTINGS_ONE); + writel(0x00, base + REG_DSI_7nm_PHY_PLL_OUTDIV); + writel(0x00, base + REG_DSI_7nm_PHY_PLL_CORE_OVERRIDE); + writel(0x08, base + REG_DSI_7nm_PHY_PLL_PLL_DIGITAL_TIMERS_TWO); + writel(0x0a, base + REG_DSI_7nm_PHY_PLL_PLL_PROP_GAIN_RATE_1); + writel(0xc0, base + REG_DSI_7nm_PHY_PLL_PLL_BAND_SEL_RATE_1); + writel(0x84, base + REG_DSI_7nm_PHY_PLL_PLL_INT_GAIN_IFILT_BAND_1); + writel(0x82, base + REG_DSI_7nm_PHY_PLL_PLL_INT_GAIN_IFILT_BAND_1); + writel(0x4c, base + REG_DSI_7nm_PHY_PLL_PLL_FL_INT_GAIN_PFILT_BAND_1); + writel(0x80, base + REG_DSI_7nm_PHY_PLL_PLL_LOCK_OVERRIDE); + writel(0x29, base + REG_DSI_7nm_PHY_PLL_PFILT); + writel(0x2f, base + REG_DSI_7nm_PHY_PLL_PFILT); + writel(0x2a, base + REG_DSI_7nm_PHY_PLL_IFILT); + writel(!(pll->phy->cfg->quirks & DSI_PHY_7NM_QUIRK_PRE_V4_1) ? 0x3f : 0x22, + base + REG_DSI_7nm_PHY_PLL_IFILT); + + if (!(pll->phy->cfg->quirks & DSI_PHY_7NM_QUIRK_PRE_V4_1)) { + writel(0x22, base + REG_DSI_7nm_PHY_PLL_PERF_OPTIMIZE); + if (pll->slave) + writel(0x22, pll->slave->phy->pll_base + REG_DSI_7nm_PHY_PLL_PERF_OPTIMIZE); + } +} + +static void dsi_pll_commit(struct dsi_pll_7nm *pll, struct dsi_pll_config *config) +{ + void __iomem *base = pll->phy->pll_base; + + writel(0x12, base + REG_DSI_7nm_PHY_PLL_CORE_INPUT_OVERRIDE); + writel(config->decimal_div_start, + base + REG_DSI_7nm_PHY_PLL_DECIMAL_DIV_START_1); + writel(config->frac_div_start & 0xff, + base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_LOW_1); + writel((config->frac_div_start & 0xff00) >> 8, + base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_MID_1); + writel((config->frac_div_start & 0x30000) >> 16, + base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_HIGH_1); + writel(0x40, base + REG_DSI_7nm_PHY_PLL_PLL_LOCKDET_RATE_1); + writel(0x06, base + REG_DSI_7nm_PHY_PLL_PLL_LOCK_DELAY); + writel(pll->phy->cphy_mode ? 0x00 : 0x10, + base + REG_DSI_7nm_PHY_PLL_CMODE_1); + writel(config->pll_clock_inverters, + base + REG_DSI_7nm_PHY_PLL_CLOCK_INVERTERS_1); +} + +static int dsi_pll_7nm_vco_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + struct dsi_pll_7nm *pll_7nm = to_pll_7nm(hw); + struct dsi_pll_config config; + + dsi_pll_enable_pll_bias(pll_7nm); + DBG("DSI PLL%d rate=%lu, parent's=%lu", pll_7nm->phy->id, rate, + parent_rate); + + pll_7nm->vco_current_rate = rate; + + dsi_pll_setup_config(&config); + + dsi_pll_calc_dec_frac(pll_7nm, &config); + + dsi_pll_calc_ssc(pll_7nm, &config); + + dsi_pll_commit(pll_7nm, &config); + + dsi_pll_config_hzindep_reg(pll_7nm); + + dsi_pll_ssc_commit(pll_7nm, &config); + + dsi_pll_disable_pll_bias(pll_7nm); + /* flush, ensure all register writes are done*/ + wmb(); + + return 0; +} + +static int dsi_pll_7nm_lock_status(struct dsi_pll_7nm *pll) +{ + int rc; + u32 status = 0; + u32 const delay_us = 100; + u32 const timeout_us = 5000; + + rc = readl_poll_timeout_atomic(pll->phy->pll_base + + REG_DSI_7nm_PHY_PLL_COMMON_STATUS_ONE, + status, + ((status & BIT(0)) > 0), + delay_us, + timeout_us); + if (rc) + pr_err("DSI PLL(%d) lock failed, status=0x%08x\n", + pll->phy->id, status); + + return rc; +} + +static void dsi_pll_disable_pll_bias(struct dsi_pll_7nm *pll) +{ + unsigned long flags; + u32 data; + + spin_lock_irqsave(&pll->pll_enable_lock, flags); + --pll->pll_enable_cnt; + if (pll->pll_enable_cnt < 0) { + spin_unlock_irqrestore(&pll->pll_enable_lock, flags); + DRM_DEV_ERROR_RATELIMITED(&pll->phy->pdev->dev, + "bug: imbalance in disabling PLL bias\n"); + return; + } else if (pll->pll_enable_cnt > 0) { + spin_unlock_irqrestore(&pll->pll_enable_lock, flags); + return; + } /* else: == 0 */ + + data = readl(pll->phy->base + REG_DSI_7nm_PHY_CMN_CTRL_0); + data &= ~DSI_7nm_PHY_CMN_CTRL_0_PLL_SHUTDOWNB; + writel(0, pll->phy->pll_base + REG_DSI_7nm_PHY_PLL_SYSTEM_MUXES); + writel(data, pll->phy->base + REG_DSI_7nm_PHY_CMN_CTRL_0); + spin_unlock_irqrestore(&pll->pll_enable_lock, flags); + ndelay(250); +} + +static void dsi_pll_enable_pll_bias(struct dsi_pll_7nm *pll) +{ + unsigned long flags; + u32 data; + + spin_lock_irqsave(&pll->pll_enable_lock, flags); + pll->pll_enable_cnt++; + WARN_ON(pll->pll_enable_cnt == INT_MAX); + + data = readl(pll->phy->base + REG_DSI_7nm_PHY_CMN_CTRL_0); + data |= DSI_7nm_PHY_CMN_CTRL_0_PLL_SHUTDOWNB; + writel(data, pll->phy->base + REG_DSI_7nm_PHY_CMN_CTRL_0); + + writel(0xc0, pll->phy->pll_base + REG_DSI_7nm_PHY_PLL_SYSTEM_MUXES); + spin_unlock_irqrestore(&pll->pll_enable_lock, flags); + ndelay(250); +} + +static void dsi_pll_cmn_clk_cfg0_write(struct dsi_pll_7nm *pll, u32 val) +{ + unsigned long flags; + + spin_lock_irqsave(&pll->postdiv_lock, flags); + writel(val, pll->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG0); + spin_unlock_irqrestore(&pll->postdiv_lock, flags); +} + +static void dsi_pll_cmn_clk_cfg1_update(struct dsi_pll_7nm *pll, u32 mask, + u32 val) +{ + unsigned long flags; + u32 data; + + spin_lock_irqsave(&pll->pclk_mux_lock, flags); + data = readl(pll->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG1); + data &= ~mask; + data |= val & mask; + + writel(data, pll->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG1); + spin_unlock_irqrestore(&pll->pclk_mux_lock, flags); +} + +static void dsi_pll_disable_global_clk(struct dsi_pll_7nm *pll) +{ + dsi_pll_cmn_clk_cfg1_update(pll, DSI_7nm_PHY_CMN_CLK_CFG1_CLK_EN, 0); +} + +static void dsi_pll_enable_global_clk(struct dsi_pll_7nm *pll) +{ + u32 cfg_1 = DSI_7nm_PHY_CMN_CLK_CFG1_CLK_EN | DSI_7nm_PHY_CMN_CLK_CFG1_CLK_EN_SEL; + + writel(0x04, pll->phy->base + REG_DSI_7nm_PHY_CMN_CTRL_3); + dsi_pll_cmn_clk_cfg1_update(pll, cfg_1, cfg_1); +} + +static void dsi_pll_phy_dig_reset(struct dsi_pll_7nm *pll) +{ + /* + * Reset the PHY digital domain. This would be needed when + * coming out of a CX or analog rail power collapse while + * ensuring that the pads maintain LP00 or LP11 state + */ + writel(BIT(0), pll->phy->base + REG_DSI_7nm_PHY_CMN_GLBL_DIGTOP_SPARE4); + wmb(); /* Ensure that the reset is deasserted */ + writel(0, pll->phy->base + REG_DSI_7nm_PHY_CMN_GLBL_DIGTOP_SPARE4); + wmb(); /* Ensure that the reset is deasserted */ +} + +static int dsi_pll_7nm_vco_prepare(struct clk_hw *hw) +{ + struct dsi_pll_7nm *pll_7nm = to_pll_7nm(hw); + int rc; + + dsi_pll_enable_pll_bias(pll_7nm); + if (pll_7nm->slave) + dsi_pll_enable_pll_bias(pll_7nm->slave); + + /* Start PLL */ + writel(BIT(0), pll_7nm->phy->base + REG_DSI_7nm_PHY_CMN_PLL_CNTRL); + + /* + * ensure all PLL configurations are written prior to checking + * for PLL lock. + */ + wmb(); + + /* Check for PLL lock */ + rc = dsi_pll_7nm_lock_status(pll_7nm); + if (rc) { + pr_err("PLL(%d) lock failed\n", pll_7nm->phy->id); + goto error; + } + + pll_7nm->phy->pll_on = true; + + /* + * assert power on reset for PHY digital in case the PLL is + * enabled after CX of analog domain power collapse. This needs + * to be done before enabling the global clk. + */ + dsi_pll_phy_dig_reset(pll_7nm); + if (pll_7nm->slave) + dsi_pll_phy_dig_reset(pll_7nm->slave); + + dsi_pll_enable_global_clk(pll_7nm); + if (pll_7nm->slave) + dsi_pll_enable_global_clk(pll_7nm->slave); + + writel(0x1, pll_7nm->phy->base + REG_DSI_7nm_PHY_CMN_RBUF_CTRL); + if (pll_7nm->slave) + writel(0x1, pll_7nm->slave->phy->base + REG_DSI_7nm_PHY_CMN_RBUF_CTRL); + +error: + return rc; +} + +static void dsi_pll_disable_sub(struct dsi_pll_7nm *pll) +{ + writel(0, pll->phy->base + REG_DSI_7nm_PHY_CMN_RBUF_CTRL); + dsi_pll_disable_pll_bias(pll); +} + +static void dsi_pll_7nm_vco_unprepare(struct clk_hw *hw) +{ + struct dsi_pll_7nm *pll_7nm = to_pll_7nm(hw); + + /* + * To avoid any stray glitches while abruptly powering down the PLL + * make sure to gate the clock using the clock enable bit before + * powering down the PLL + */ + dsi_pll_disable_global_clk(pll_7nm); + writel(0, pll_7nm->phy->base + REG_DSI_7nm_PHY_CMN_PLL_CNTRL); + dsi_pll_disable_sub(pll_7nm); + if (pll_7nm->slave) { + dsi_pll_disable_global_clk(pll_7nm->slave); + dsi_pll_disable_sub(pll_7nm->slave); + } + /* flush, ensure all register writes are done */ + wmb(); + pll_7nm->phy->pll_on = false; +} + +static unsigned long dsi_pll_7nm_vco_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct dsi_pll_7nm *pll_7nm = to_pll_7nm(hw); + void __iomem *base = pll_7nm->phy->pll_base; + u64 ref_clk = VCO_REF_CLK_RATE; + u64 vco_rate = 0x0; + u64 multiplier; + u32 frac; + u32 dec; + u64 pll_freq, tmp64; + + dsi_pll_enable_pll_bias(pll_7nm); + dec = readl(base + REG_DSI_7nm_PHY_PLL_DECIMAL_DIV_START_1); + dec &= 0xff; + + frac = readl(base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_LOW_1); + frac |= ((readl(base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_MID_1) & + 0xff) << 8); + frac |= ((readl(base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_HIGH_1) & + 0x3) << 16); + + /* + * TODO: + * 1. Assumes prescaler is disabled + */ + multiplier = 1 << FRAC_BITS; + pll_freq = dec * (ref_clk * 2); + tmp64 = (ref_clk * 2 * frac); + pll_freq += div_u64(tmp64, multiplier); + + vco_rate = pll_freq; + pll_7nm->vco_current_rate = vco_rate; + + DBG("DSI PLL%d returning vco rate = %lu, dec = %x, frac = %x", + pll_7nm->phy->id, (unsigned long)vco_rate, dec, frac); + + dsi_pll_disable_pll_bias(pll_7nm); + + return (unsigned long)vco_rate; +} + +static int dsi_pll_7nm_clk_determine_rate(struct clk_hw *hw, + struct clk_rate_request *req) +{ + struct dsi_pll_7nm *pll_7nm = to_pll_7nm(hw); + + req->rate = clamp_t(unsigned long, req->rate, + pll_7nm->phy->cfg->min_pll_rate, pll_7nm->phy->cfg->max_pll_rate); + + return 0; +} + +static const struct clk_ops clk_ops_dsi_pll_7nm_vco = { + .determine_rate = dsi_pll_7nm_clk_determine_rate, + .set_rate = dsi_pll_7nm_vco_set_rate, + .recalc_rate = dsi_pll_7nm_vco_recalc_rate, + .prepare = dsi_pll_7nm_vco_prepare, + .unprepare = dsi_pll_7nm_vco_unprepare, +}; + +/* + * PLL Callbacks + */ + +static void dsi_7nm_pll_save_state(struct msm_dsi_phy *phy) +{ + struct dsi_pll_7nm *pll_7nm = to_pll_7nm(phy->vco_hw); + struct pll_7nm_cached_state *cached = &pll_7nm->cached_state; + void __iomem *phy_base = pll_7nm->phy->base; + u32 cmn_clk_cfg0, cmn_clk_cfg1; + + dsi_pll_enable_pll_bias(pll_7nm); + cached->pll_out_div = readl(pll_7nm->phy->pll_base + + REG_DSI_7nm_PHY_PLL_PLL_OUTDIV_RATE); + cached->pll_out_div &= 0x3; + + cmn_clk_cfg0 = readl(phy_base + REG_DSI_7nm_PHY_CMN_CLK_CFG0); + cached->bit_clk_div = FIELD_GET(DSI_7nm_PHY_CMN_CLK_CFG0_DIV_CTRL_3_0__MASK, cmn_clk_cfg0); + cached->pix_clk_div = FIELD_GET(DSI_7nm_PHY_CMN_CLK_CFG0_DIV_CTRL_7_4__MASK, cmn_clk_cfg0); + + cmn_clk_cfg1 = readl(phy_base + REG_DSI_7nm_PHY_CMN_CLK_CFG1); + cached->pll_mux = FIELD_GET(DSI_7nm_PHY_CMN_CLK_CFG1_DSICLK_SEL__MASK, cmn_clk_cfg1); + + dsi_pll_disable_pll_bias(pll_7nm); + DBG("DSI PLL%d outdiv %x bit_clk_div %x pix_clk_div %x pll_mux %x", + pll_7nm->phy->id, cached->pll_out_div, cached->bit_clk_div, + cached->pix_clk_div, cached->pll_mux); +} + +static int dsi_7nm_pll_restore_state(struct msm_dsi_phy *phy) +{ + struct dsi_pll_7nm *pll_7nm = to_pll_7nm(phy->vco_hw); + struct pll_7nm_cached_state *cached = &pll_7nm->cached_state; + u32 val; + int ret; + + val = readl(pll_7nm->phy->pll_base + REG_DSI_7nm_PHY_PLL_PLL_OUTDIV_RATE); + val &= ~0x3; + val |= cached->pll_out_div; + writel(val, pll_7nm->phy->pll_base + REG_DSI_7nm_PHY_PLL_PLL_OUTDIV_RATE); + + dsi_pll_cmn_clk_cfg0_write(pll_7nm, + DSI_7nm_PHY_CMN_CLK_CFG0_DIV_CTRL_3_0(cached->bit_clk_div) | + DSI_7nm_PHY_CMN_CLK_CFG0_DIV_CTRL_7_4(cached->pix_clk_div)); + dsi_pll_cmn_clk_cfg1_update(pll_7nm, DSI_7nm_PHY_CMN_CLK_CFG1_DSICLK_SEL__MASK, + cached->pll_mux); + + ret = dsi_pll_7nm_vco_set_rate(phy->vco_hw, + pll_7nm->vco_current_rate, + VCO_REF_CLK_RATE); + if (ret) { + DRM_DEV_ERROR(&pll_7nm->phy->pdev->dev, + "restore vco rate failed. ret=%d\n", ret); + return ret; + } + + DBG("DSI PLL%d", pll_7nm->phy->id); + + return 0; +} + +static int dsi_7nm_set_usecase(struct msm_dsi_phy *phy) +{ + struct dsi_pll_7nm *pll_7nm = to_pll_7nm(phy->vco_hw); + void __iomem *base = phy->base; + u32 data = 0x0; /* internal PLL */ + + DBG("DSI PLL%d", pll_7nm->phy->id); + + switch (phy->usecase) { + case MSM_DSI_PHY_STANDALONE: + break; + case MSM_DSI_PHY_MASTER: + pll_7nm->slave = pll_7nm_list[(pll_7nm->phy->id + 1) % DSI_MAX]; + /* v7.0: Enable ATB_EN0 and alternate clock output to external phy */ + if (phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V7_0) + writel(0x07, base + REG_DSI_7nm_PHY_CMN_CTRL_5); + break; + case MSM_DSI_PHY_SLAVE: + data = 0x1; /* external PLL */ + break; + default: + return -EINVAL; + } + + /* set PLL src */ + dsi_pll_cmn_clk_cfg1_update(pll_7nm, DSI_7nm_PHY_CMN_CLK_CFG1_BITCLK_SEL__MASK, + DSI_7nm_PHY_CMN_CLK_CFG1_BITCLK_SEL(data)); + + return 0; +} + +/* + * The post dividers and mux clocks are created using the standard divider and + * mux API. Unlike the 14nm PHY, the slave PLL doesn't need its dividers/mux + * state to follow the master PLL's divider/mux state. Therefore, we don't + * require special clock ops that also configure the slave PLL registers + */ +static int pll_7nm_register(struct dsi_pll_7nm *pll_7nm, struct clk_hw **provided_clocks) +{ + char clk_name[32]; + struct clk_init_data vco_init = { + .parent_data = &(const struct clk_parent_data) { + .fw_name = "ref", + }, + .num_parents = 1, + .name = clk_name, + .flags = CLK_IGNORE_UNUSED, + .ops = &clk_ops_dsi_pll_7nm_vco, + }; + struct device *dev = &pll_7nm->phy->pdev->dev; + struct clk_hw *hw, *pll_out_div, *pll_bit, *pll_by_2_bit; + struct clk_hw *pll_post_out_div, *phy_pll_out_dsi_parent; + int ret; + + DBG("DSI%d", pll_7nm->phy->id); + + snprintf(clk_name, sizeof(clk_name), "dsi%dvco_clk", pll_7nm->phy->id); + pll_7nm->clk_hw.init = &vco_init; + + ret = devm_clk_hw_register(dev, &pll_7nm->clk_hw); + if (ret) + return ret; + + snprintf(clk_name, sizeof(clk_name), "dsi%d_pll_out_div_clk", pll_7nm->phy->id); + + pll_out_div = devm_clk_hw_register_divider_parent_hw(dev, clk_name, + &pll_7nm->clk_hw, CLK_SET_RATE_PARENT, + pll_7nm->phy->pll_base + + REG_DSI_7nm_PHY_PLL_PLL_OUTDIV_RATE, + 0, 2, CLK_DIVIDER_POWER_OF_TWO, NULL); + if (IS_ERR(pll_out_div)) { + ret = PTR_ERR(pll_out_div); + goto fail; + } + + snprintf(clk_name, sizeof(clk_name), "dsi%d_pll_bit_clk", pll_7nm->phy->id); + + /* BIT CLK: DIV_CTRL_3_0 */ + pll_bit = devm_clk_hw_register_divider_parent_hw(dev, clk_name, + pll_out_div, CLK_SET_RATE_PARENT, + pll_7nm->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG0, + 0, 4, CLK_DIVIDER_ONE_BASED, &pll_7nm->postdiv_lock); + if (IS_ERR(pll_bit)) { + ret = PTR_ERR(pll_bit); + goto fail; + } + + snprintf(clk_name, sizeof(clk_name), "dsi%d_phy_pll_out_byteclk", pll_7nm->phy->id); + + /* DSI Byte clock = VCO_CLK / OUT_DIV / BIT_DIV / 8 */ + hw = devm_clk_hw_register_fixed_factor_parent_hw(dev, clk_name, + pll_bit, CLK_SET_RATE_PARENT, 1, + pll_7nm->phy->cphy_mode ? 7 : 8); + if (IS_ERR(hw)) { + ret = PTR_ERR(hw); + goto fail; + } + + provided_clocks[DSI_BYTE_PLL_CLK] = hw; + + snprintf(clk_name, sizeof(clk_name), "dsi%d_pll_by_2_bit_clk", pll_7nm->phy->id); + + pll_by_2_bit = devm_clk_hw_register_fixed_factor_parent_hw(dev, + clk_name, pll_bit, 0, 1, 2); + if (IS_ERR(pll_by_2_bit)) { + ret = PTR_ERR(pll_by_2_bit); + goto fail; + } + + snprintf(clk_name, sizeof(clk_name), "dsi%d_pll_post_out_div_clk", pll_7nm->phy->id); + + if (pll_7nm->phy->cphy_mode) + pll_post_out_div = devm_clk_hw_register_fixed_factor_parent_hw( + dev, clk_name, pll_out_div, 0, 2, 7); + else + pll_post_out_div = devm_clk_hw_register_fixed_factor_parent_hw( + dev, clk_name, pll_out_div, 0, 1, 4); + if (IS_ERR(pll_post_out_div)) { + ret = PTR_ERR(pll_post_out_div); + goto fail; + } + + /* in CPHY mode, pclk_mux will always have post_out_div as parent + * don't register a pclk_mux clock and just use post_out_div instead + */ + if (pll_7nm->phy->cphy_mode) { + dsi_pll_cmn_clk_cfg1_update(pll_7nm, + DSI_7nm_PHY_CMN_CLK_CFG1_DSICLK_SEL__MASK, + DSI_7nm_PHY_CMN_CLK_CFG1_DSICLK_SEL(3)); + phy_pll_out_dsi_parent = pll_post_out_div; + } else { + snprintf(clk_name, sizeof(clk_name), "dsi%d_pclk_mux", pll_7nm->phy->id); + + hw = devm_clk_hw_register_mux_parent_hws(dev, clk_name, + ((const struct clk_hw *[]){ + pll_bit, + pll_by_2_bit, + }), 2, 0, pll_7nm->phy->base + + REG_DSI_7nm_PHY_CMN_CLK_CFG1, + 0, 1, 0, &pll_7nm->pclk_mux_lock); + if (IS_ERR(hw)) { + ret = PTR_ERR(hw); + goto fail; + } + + phy_pll_out_dsi_parent = hw; + } + + snprintf(clk_name, sizeof(clk_name), "dsi%d_phy_pll_out_dsiclk", pll_7nm->phy->id); + + /* PIX CLK DIV : DIV_CTRL_7_4*/ + hw = devm_clk_hw_register_divider_parent_hw(dev, clk_name, + phy_pll_out_dsi_parent, 0, + pll_7nm->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG0, + 4, 4, CLK_DIVIDER_ONE_BASED, &pll_7nm->postdiv_lock); + if (IS_ERR(hw)) { + ret = PTR_ERR(hw); + goto fail; + } + + provided_clocks[DSI_PIXEL_PLL_CLK] = hw; + + return 0; + +fail: + + return ret; +} + +static int dsi_pll_7nm_init(struct msm_dsi_phy *phy) +{ + struct platform_device *pdev = phy->pdev; + struct dsi_pll_7nm *pll_7nm; + int ret; + + pll_7nm = devm_kzalloc(&pdev->dev, sizeof(*pll_7nm), GFP_KERNEL); + if (!pll_7nm) + return -ENOMEM; + + DBG("DSI PLL%d", phy->id); + + pll_7nm_list[phy->id] = pll_7nm; + + spin_lock_init(&pll_7nm->postdiv_lock); + spin_lock_init(&pll_7nm->pclk_mux_lock); + spin_lock_init(&pll_7nm->pll_enable_lock); + + pll_7nm->phy = phy; + + ret = pll_7nm_register(pll_7nm, phy->provided_clocks->hws); + if (ret) { + DRM_DEV_ERROR(&pdev->dev, "failed to register PLL: %d\n", ret); + return ret; + } + + phy->vco_hw = &pll_7nm->clk_hw; + + /* TODO: Remove this when we have proper display handover support */ + msm_dsi_phy_pll_save_state(phy); + /* + * Store also proper vco_current_rate, because its value will be used in + * dsi_7nm_pll_restore_state(). + */ + if (!dsi_pll_7nm_vco_recalc_rate(&pll_7nm->clk_hw, VCO_REF_CLK_RATE)) + pll_7nm->vco_current_rate = pll_7nm->phy->cfg->min_pll_rate; + + return 0; +} + +static int dsi_phy_hw_v4_0_is_pll_on(struct msm_dsi_phy *phy) +{ + void __iomem *base = phy->base; + u32 data = 0; + + data = readl(base + REG_DSI_7nm_PHY_CMN_PLL_CNTRL); + mb(); /* make sure read happened */ + + return (data & BIT(0)); +} + +static void dsi_phy_hw_v4_0_config_lpcdrx(struct msm_dsi_phy *phy, bool enable) +{ + void __iomem *lane_base = phy->lane_base; + int phy_lane_0 = 0; /* TODO: Support all lane swap configs */ + + /* + * LPRX and CDRX need to enabled only for physical data lane + * corresponding to the logical data lane 0 + */ + if (enable) + writel(0x3, lane_base + REG_DSI_7nm_PHY_LN_LPRX_CTRL(phy_lane_0)); + else + writel(0, lane_base + REG_DSI_7nm_PHY_LN_LPRX_CTRL(phy_lane_0)); +} + +static void dsi_phy_hw_v4_0_lane_settings(struct msm_dsi_phy *phy) +{ + int i; + const u8 tx_dctrl_0[] = { 0x00, 0x00, 0x00, 0x04, 0x01 }; + const u8 tx_dctrl_1[] = { 0x40, 0x40, 0x40, 0x46, 0x41 }; + const u8 *tx_dctrl = tx_dctrl_0; + void __iomem *lane_base = phy->lane_base; + + if (!(phy->cfg->quirks & DSI_PHY_7NM_QUIRK_PRE_V4_1)) + tx_dctrl = tx_dctrl_1; + + /* Strength ctrl settings */ + for (i = 0; i < 5; i++) { + /* + * Disable LPRX and CDRX for all lanes. And later on, it will + * be only enabled for the physical data lane corresponding + * to the logical data lane 0 + */ + writel(0, lane_base + REG_DSI_7nm_PHY_LN_LPRX_CTRL(i)); + writel(0x0, lane_base + REG_DSI_7nm_PHY_LN_PIN_SWAP(i)); + } + + dsi_phy_hw_v4_0_config_lpcdrx(phy, true); + + /* other settings */ + for (i = 0; i < 5; i++) { + writel(0x0, lane_base + REG_DSI_7nm_PHY_LN_CFG0(i)); + writel(0x0, lane_base + REG_DSI_7nm_PHY_LN_CFG1(i)); + writel(i == 4 ? 0x8a : 0xa, lane_base + REG_DSI_7nm_PHY_LN_CFG2(i)); + writel(tx_dctrl[i], lane_base + REG_DSI_7nm_PHY_LN_TX_DCTRL(i)); + } +} + +static int dsi_7nm_phy_enable(struct msm_dsi_phy *phy, + struct msm_dsi_phy_clk_request *clk_req) +{ + int ret; + u32 status; + u32 const delay_us = 5; + u32 const timeout_us = 1000; + struct msm_dsi_dphy_timing *timing = &phy->timing; + void __iomem *base = phy->base; + bool less_than_1500_mhz; + u32 vreg_ctrl_0, vreg_ctrl_1, lane_ctrl0; + u32 glbl_pemph_ctrl_0; + u32 glbl_str_swi_cal_sel_ctrl, glbl_hstx_str_ctrl_0; + u32 glbl_rescode_top_ctrl, glbl_rescode_bot_ctrl; + u32 data; + + DBG(""); + + if (phy->cphy_mode) + ret = msm_dsi_cphy_timing_calc_v4(timing, clk_req); + else + ret = msm_dsi_dphy_timing_calc_v4(timing, clk_req); + if (ret) { + DRM_DEV_ERROR(&phy->pdev->dev, + "%s: PHY timing calculation failed\n", __func__); + return -EINVAL; + } + + if (dsi_phy_hw_v4_0_is_pll_on(phy)) + pr_warn("PLL turned on before configuring PHY\n"); + + /* Request for REFGEN READY */ + if ((phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V4_3) || + (phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V5_2) || + (phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V7_0)) { + writel(0x1, phy->base + REG_DSI_7nm_PHY_CMN_GLBL_DIGTOP_SPARE10); + udelay(500); + } + + /* wait for REFGEN READY */ + ret = readl_poll_timeout_atomic(base + REG_DSI_7nm_PHY_CMN_PHY_STATUS, + status, (status & BIT(0)), + delay_us, timeout_us); + if (ret) { + pr_err("Ref gen not ready. Aborting\n"); + return -EINVAL; + } + + /* TODO: CPHY enable path (this is for DPHY only) */ + + /* Alter PHY configurations if data rate less than 1.5GHZ*/ + less_than_1500_mhz = (clk_req->bitclk_rate <= 1500000000); + + glbl_str_swi_cal_sel_ctrl = 0x00; + if (phy->cphy_mode) { + vreg_ctrl_0 = 0x51; + vreg_ctrl_1 = 0x55; + glbl_hstx_str_ctrl_0 = 0x00; + glbl_pemph_ctrl_0 = 0x11; + lane_ctrl0 = 0x17; + } else { + vreg_ctrl_0 = less_than_1500_mhz ? 0x53 : 0x52; + vreg_ctrl_1 = 0x5c; + glbl_hstx_str_ctrl_0 = 0x88; + glbl_pemph_ctrl_0 = 0x00; + lane_ctrl0 = 0x1f; + } + + if ((phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V7_0)) { + if (phy->cphy_mode) { + /* TODO: different for second phy */ + vreg_ctrl_0 = 0x57; + vreg_ctrl_1 = 0x41; + glbl_rescode_top_ctrl = 0x3d; + glbl_rescode_bot_ctrl = 0x38; + } else { + vreg_ctrl_0 = 0x56; + vreg_ctrl_1 = 0x19; + glbl_rescode_top_ctrl = less_than_1500_mhz ? 0x3c : 0x03; + glbl_rescode_bot_ctrl = less_than_1500_mhz ? 0x38 : 0x3c; + } + } else if ((phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V5_2)) { + if (phy->cphy_mode) { + vreg_ctrl_0 = 0x45; + vreg_ctrl_1 = 0x41; + glbl_rescode_top_ctrl = 0x00; + glbl_rescode_bot_ctrl = 0x00; + } else { + vreg_ctrl_0 = 0x44; + vreg_ctrl_1 = 0x19; + glbl_rescode_top_ctrl = less_than_1500_mhz ? 0x3c : 0x03; + glbl_rescode_bot_ctrl = less_than_1500_mhz ? 0x38 : 0x3c; + } + } else if ((phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V4_3)) { + if (phy->cphy_mode) { + glbl_rescode_top_ctrl = less_than_1500_mhz ? 0x3d : 0x01; + glbl_rescode_bot_ctrl = less_than_1500_mhz ? 0x38 : 0x3b; + } else { + glbl_rescode_top_ctrl = less_than_1500_mhz ? 0x3d : 0x01; + glbl_rescode_bot_ctrl = less_than_1500_mhz ? 0x38 : 0x39; + } + } else if (phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V4_2) { + if (phy->cphy_mode) { + glbl_rescode_top_ctrl = less_than_1500_mhz ? 0x3d : 0x01; + glbl_rescode_bot_ctrl = less_than_1500_mhz ? 0x38 : 0x3b; + } else { + glbl_rescode_top_ctrl = less_than_1500_mhz ? 0x3c : 0x00; + glbl_rescode_bot_ctrl = less_than_1500_mhz ? 0x38 : 0x39; + } + } else if (phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V4_1) { + if (phy->cphy_mode) { + glbl_hstx_str_ctrl_0 = 0x88; + glbl_rescode_top_ctrl = 0x00; + glbl_rescode_bot_ctrl = 0x3c; + } else { + glbl_rescode_top_ctrl = less_than_1500_mhz ? 0x3d : 0x00; + glbl_rescode_bot_ctrl = less_than_1500_mhz ? 0x39 : 0x3c; + } + } else { + if (phy->cphy_mode) { + glbl_str_swi_cal_sel_ctrl = 0x03; + glbl_hstx_str_ctrl_0 = 0x66; + } else { + vreg_ctrl_0 = less_than_1500_mhz ? 0x5B : 0x59; + glbl_str_swi_cal_sel_ctrl = less_than_1500_mhz ? 0x03 : 0x00; + glbl_hstx_str_ctrl_0 = less_than_1500_mhz ? 0x66 : 0x88; + } + glbl_rescode_top_ctrl = 0x03; + glbl_rescode_bot_ctrl = 0x3c; + } + + /* de-assert digital and pll power down */ + data = DSI_7nm_PHY_CMN_CTRL_0_DIGTOP_PWRDN_B | + DSI_7nm_PHY_CMN_CTRL_0_PLL_SHUTDOWNB; + writel(data, base + REG_DSI_7nm_PHY_CMN_CTRL_0); + + /* Assert PLL core reset */ + writel(0x00, base + REG_DSI_7nm_PHY_CMN_PLL_CNTRL); + + /* turn off resync FIFO */ + writel(0x00, base + REG_DSI_7nm_PHY_CMN_RBUF_CTRL); + + /* program CMN_CTRL_4 for minor_ver 2 chipsets*/ + if ((phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V5_2) || + (phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V7_0) || + (readl(base + REG_DSI_7nm_PHY_CMN_REVISION_ID0) & (0xf0)) == 0x20) + writel(0x04, base + REG_DSI_7nm_PHY_CMN_CTRL_4); + + /* Configure PHY lane swap (TODO: we need to calculate this) */ + writel(0x21, base + REG_DSI_7nm_PHY_CMN_LANE_CFG0); + writel(0x84, base + REG_DSI_7nm_PHY_CMN_LANE_CFG1); + + if (phy->cphy_mode) + writel(BIT(6), base + REG_DSI_7nm_PHY_CMN_GLBL_CTRL); + + /* Enable LDO */ + writel(vreg_ctrl_0, base + REG_DSI_7nm_PHY_CMN_VREG_CTRL_0); + writel(vreg_ctrl_1, base + REG_DSI_7nm_PHY_CMN_VREG_CTRL_1); + + writel(0x00, base + REG_DSI_7nm_PHY_CMN_CTRL_3); + writel(glbl_str_swi_cal_sel_ctrl, + base + REG_DSI_7nm_PHY_CMN_GLBL_STR_SWI_CAL_SEL_CTRL); + writel(glbl_hstx_str_ctrl_0, + base + REG_DSI_7nm_PHY_CMN_GLBL_HSTX_STR_CTRL_0); + writel(glbl_pemph_ctrl_0, + base + REG_DSI_7nm_PHY_CMN_GLBL_PEMPH_CTRL_0); + if (phy->cphy_mode) + writel(0x01, base + REG_DSI_7nm_PHY_CMN_GLBL_PEMPH_CTRL_1); + writel(glbl_rescode_top_ctrl, + base + REG_DSI_7nm_PHY_CMN_GLBL_RESCODE_OFFSET_TOP_CTRL); + writel(glbl_rescode_bot_ctrl, + base + REG_DSI_7nm_PHY_CMN_GLBL_RESCODE_OFFSET_BOT_CTRL); + writel(0x55, base + REG_DSI_7nm_PHY_CMN_GLBL_LPTX_STR_CTRL); + + /* Remove power down from all blocks */ + writel(0x7f, base + REG_DSI_7nm_PHY_CMN_CTRL_0); + + writel(lane_ctrl0, base + REG_DSI_7nm_PHY_CMN_LANE_CTRL0); + + /* Select full-rate mode */ + if (!phy->cphy_mode) + writel(0x40, base + REG_DSI_7nm_PHY_CMN_CTRL_2); + + ret = dsi_7nm_set_usecase(phy); + if (ret) { + DRM_DEV_ERROR(&phy->pdev->dev, "%s: set pll usecase failed, %d\n", + __func__, ret); + return ret; + } + + /* DSI PHY timings */ + if (phy->cphy_mode) { + writel(0x00, base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_0); + writel(timing->hs_exit, base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_4); + writel(timing->shared_timings.clk_pre, + base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_5); + writel(timing->clk_prepare, base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_6); + writel(timing->shared_timings.clk_post, + base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_7); + writel(timing->hs_rqst, base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_8); + writel(0x02, base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_9); + writel(0x04, base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_10); + writel(0x00, base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_11); + } else { + writel(0x00, base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_0); + writel(timing->clk_zero, base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_1); + writel(timing->clk_prepare, base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_2); + writel(timing->clk_trail, base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_3); + writel(timing->hs_exit, base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_4); + writel(timing->hs_zero, base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_5); + writel(timing->hs_prepare, base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_6); + writel(timing->hs_trail, base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_7); + writel(timing->hs_rqst, base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_8); + writel(0x02, base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_9); + writel(0x04, base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_10); + writel(0x00, base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_11); + writel(timing->shared_timings.clk_pre, + base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_12); + writel(timing->shared_timings.clk_post, + base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_13); + } + + /* DSI lane settings */ + dsi_phy_hw_v4_0_lane_settings(phy); + + DBG("DSI%d PHY enabled", phy->id); + + return 0; +} + +static bool dsi_7nm_set_continuous_clock(struct msm_dsi_phy *phy, bool enable) +{ + void __iomem *base = phy->base; + u32 data; + + data = readl(base + REG_DSI_7nm_PHY_CMN_LANE_CTRL1); + if (enable) + data |= BIT(5) | BIT(6); + else + data &= ~(BIT(5) | BIT(6)); + writel(data, base + REG_DSI_7nm_PHY_CMN_LANE_CTRL1); + + return enable; +} + +static void dsi_7nm_phy_disable(struct msm_dsi_phy *phy) +{ + void __iomem *base = phy->base; + u32 data; + + DBG(""); + + if (dsi_phy_hw_v4_0_is_pll_on(phy)) + pr_warn("Turning OFF PHY while PLL is on\n"); + + dsi_phy_hw_v4_0_config_lpcdrx(phy, false); + + /* Turn off REFGEN Vote */ + if ((phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V4_3) || + (phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V5_2) || + (phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V7_0)) { + writel(0x0, base + REG_DSI_7nm_PHY_CMN_GLBL_DIGTOP_SPARE10); + wmb(); + /* Delay to ensure HW removes vote before PHY shut down */ + udelay(2); + } + + data = readl(base + REG_DSI_7nm_PHY_CMN_CTRL_0); + + /* disable all lanes */ + data &= ~0x1F; + writel(data, base + REG_DSI_7nm_PHY_CMN_CTRL_0); + writel(0, base + REG_DSI_7nm_PHY_CMN_LANE_CTRL0); + + /* Turn off all PHY blocks */ + writel(0x00, base + REG_DSI_7nm_PHY_CMN_CTRL_0); + + /* make sure phy is turned off */ + wmb(); + + DBG("DSI%d PHY disabled", phy->id); +} + +static const struct regulator_bulk_data dsi_phy_7nm_36mA_regulators[] = { + { .supply = "vdds", .init_load_uA = 36000 }, +}; + +static const struct regulator_bulk_data dsi_phy_7nm_37750uA_regulators[] = { + { .supply = "vdds", .init_load_uA = 37550 }, +}; + +static const struct regulator_bulk_data dsi_phy_7nm_48000uA_regulators[] = { + { .supply = "vdds", .init_load_uA = 48000 }, +}; + +static const struct regulator_bulk_data dsi_phy_7nm_98000uA_regulators[] = { + { .supply = "vdds", .init_load_uA = 98000 }, +}; + +static const struct regulator_bulk_data dsi_phy_7nm_97800uA_regulators[] = { + { .supply = "vdds", .init_load_uA = 97800 }, +}; + +static const struct regulator_bulk_data dsi_phy_7nm_98400uA_regulators[] = { + { .supply = "vdds", .init_load_uA = 98400 }, +}; + +const struct msm_dsi_phy_cfg dsi_phy_7nm_cfgs = { + .has_phy_lane = true, + .regulator_data = dsi_phy_7nm_36mA_regulators, + .num_regulators = ARRAY_SIZE(dsi_phy_7nm_36mA_regulators), + .ops = { + .enable = dsi_7nm_phy_enable, + .disable = dsi_7nm_phy_disable, + .pll_init = dsi_pll_7nm_init, + .save_pll_state = dsi_7nm_pll_save_state, + .restore_pll_state = dsi_7nm_pll_restore_state, + .set_continuous_clock = dsi_7nm_set_continuous_clock, + }, + .min_pll_rate = 600000000UL, +#ifdef CONFIG_64BIT + .max_pll_rate = 5000000000UL, +#else + .max_pll_rate = ULONG_MAX, +#endif + .io_start = { 0xae94400, 0xae96400 }, + .num_dsi_phy = 2, + .quirks = DSI_PHY_7NM_QUIRK_V4_1, +}; + +const struct msm_dsi_phy_cfg dsi_phy_7nm_6375_cfgs = { + .has_phy_lane = true, + .ops = { + .enable = dsi_7nm_phy_enable, + .disable = dsi_7nm_phy_disable, + .pll_init = dsi_pll_7nm_init, + .save_pll_state = dsi_7nm_pll_save_state, + .restore_pll_state = dsi_7nm_pll_restore_state, + }, + .min_pll_rate = 600000000UL, +#ifdef CONFIG_64BIT + .max_pll_rate = 5000000000ULL, +#else + .max_pll_rate = ULONG_MAX, +#endif + .io_start = { 0x5e94400 }, + .num_dsi_phy = 1, + .quirks = DSI_PHY_7NM_QUIRK_V4_1, +}; + +const struct msm_dsi_phy_cfg dsi_phy_7nm_8150_cfgs = { + .has_phy_lane = true, + .regulator_data = dsi_phy_7nm_36mA_regulators, + .num_regulators = ARRAY_SIZE(dsi_phy_7nm_36mA_regulators), + .ops = { + .enable = dsi_7nm_phy_enable, + .disable = dsi_7nm_phy_disable, + .pll_init = dsi_pll_7nm_init, + .save_pll_state = dsi_7nm_pll_save_state, + .restore_pll_state = dsi_7nm_pll_restore_state, + .set_continuous_clock = dsi_7nm_set_continuous_clock, + }, + .min_pll_rate = 1000000000UL, + .max_pll_rate = 3500000000UL, + .io_start = { 0xae94400, 0xae96400 }, + .num_dsi_phy = 2, + .quirks = DSI_PHY_7NM_QUIRK_PRE_V4_1, +}; + +const struct msm_dsi_phy_cfg dsi_phy_7nm_7280_cfgs = { + .has_phy_lane = true, + .regulator_data = dsi_phy_7nm_37750uA_regulators, + .num_regulators = ARRAY_SIZE(dsi_phy_7nm_37750uA_regulators), + .ops = { + .enable = dsi_7nm_phy_enable, + .disable = dsi_7nm_phy_disable, + .pll_init = dsi_pll_7nm_init, + .save_pll_state = dsi_7nm_pll_save_state, + .restore_pll_state = dsi_7nm_pll_restore_state, + }, + .min_pll_rate = 600000000UL, +#ifdef CONFIG_64BIT + .max_pll_rate = 5000000000ULL, +#else + .max_pll_rate = ULONG_MAX, +#endif + .io_start = { 0xae94400 }, + .num_dsi_phy = 1, + .quirks = DSI_PHY_7NM_QUIRK_V4_1, +}; + +const struct msm_dsi_phy_cfg dsi_phy_5nm_8350_cfgs = { + .has_phy_lane = true, + .regulator_data = dsi_phy_7nm_37750uA_regulators, + .num_regulators = ARRAY_SIZE(dsi_phy_7nm_37750uA_regulators), + .ops = { + .enable = dsi_7nm_phy_enable, + .disable = dsi_7nm_phy_disable, + .pll_init = dsi_pll_7nm_init, + .save_pll_state = dsi_7nm_pll_save_state, + .restore_pll_state = dsi_7nm_pll_restore_state, + .set_continuous_clock = dsi_7nm_set_continuous_clock, + }, + .min_pll_rate = 600000000UL, +#ifdef CONFIG_64BIT + .max_pll_rate = 5000000000UL, +#else + .max_pll_rate = ULONG_MAX, +#endif + .io_start = { 0xae94400, 0xae96400 }, + .num_dsi_phy = 2, + .quirks = DSI_PHY_7NM_QUIRK_V4_2, +}; + +const struct msm_dsi_phy_cfg dsi_phy_5nm_8450_cfgs = { + .has_phy_lane = true, + .regulator_data = dsi_phy_7nm_97800uA_regulators, + .num_regulators = ARRAY_SIZE(dsi_phy_7nm_97800uA_regulators), + .ops = { + .enable = dsi_7nm_phy_enable, + .disable = dsi_7nm_phy_disable, + .pll_init = dsi_pll_7nm_init, + .save_pll_state = dsi_7nm_pll_save_state, + .restore_pll_state = dsi_7nm_pll_restore_state, + .set_continuous_clock = dsi_7nm_set_continuous_clock, + }, + .min_pll_rate = 600000000UL, +#ifdef CONFIG_64BIT + .max_pll_rate = 5000000000UL, +#else + .max_pll_rate = ULONG_MAX, +#endif + .io_start = { 0xae94400, 0xae96400 }, + .num_dsi_phy = 2, + .quirks = DSI_PHY_7NM_QUIRK_V4_3, +}; + +const struct msm_dsi_phy_cfg dsi_phy_5nm_8775p_cfgs = { + .has_phy_lane = true, + .regulator_data = dsi_phy_7nm_48000uA_regulators, + .num_regulators = ARRAY_SIZE(dsi_phy_7nm_48000uA_regulators), + .ops = { + .enable = dsi_7nm_phy_enable, + .disable = dsi_7nm_phy_disable, + .pll_init = dsi_pll_7nm_init, + .save_pll_state = dsi_7nm_pll_save_state, + .restore_pll_state = dsi_7nm_pll_restore_state, + .set_continuous_clock = dsi_7nm_set_continuous_clock, + }, + .min_pll_rate = 600000000UL, +#ifdef CONFIG_64BIT + .max_pll_rate = 5000000000UL, +#else + .max_pll_rate = ULONG_MAX, +#endif + .io_start = { 0xae94400, 0xae96400 }, + .num_dsi_phy = 2, + .quirks = DSI_PHY_7NM_QUIRK_V4_2, +}; + +const struct msm_dsi_phy_cfg dsi_phy_5nm_sar2130p_cfgs = { + .has_phy_lane = true, + .regulator_data = dsi_phy_7nm_97800uA_regulators, + .num_regulators = ARRAY_SIZE(dsi_phy_7nm_97800uA_regulators), + .ops = { + .enable = dsi_7nm_phy_enable, + .disable = dsi_7nm_phy_disable, + .pll_init = dsi_pll_7nm_init, + .save_pll_state = dsi_7nm_pll_save_state, + .restore_pll_state = dsi_7nm_pll_restore_state, + .set_continuous_clock = dsi_7nm_set_continuous_clock, + }, + .min_pll_rate = 600000000UL, +#ifdef CONFIG_64BIT + .max_pll_rate = 5000000000UL, +#else + .max_pll_rate = ULONG_MAX, +#endif + .io_start = { 0xae95000, 0xae97000 }, + .num_dsi_phy = 2, + .quirks = DSI_PHY_7NM_QUIRK_V5_2, +}; + +const struct msm_dsi_phy_cfg dsi_phy_4nm_8550_cfgs = { + .has_phy_lane = true, + .regulator_data = dsi_phy_7nm_98400uA_regulators, + .num_regulators = ARRAY_SIZE(dsi_phy_7nm_98400uA_regulators), + .ops = { + .enable = dsi_7nm_phy_enable, + .disable = dsi_7nm_phy_disable, + .pll_init = dsi_pll_7nm_init, + .save_pll_state = dsi_7nm_pll_save_state, + .restore_pll_state = dsi_7nm_pll_restore_state, + .set_continuous_clock = dsi_7nm_set_continuous_clock, + }, + .min_pll_rate = 600000000UL, +#ifdef CONFIG_64BIT + .max_pll_rate = 5000000000UL, +#else + .max_pll_rate = ULONG_MAX, +#endif + .io_start = { 0xae95000, 0xae97000 }, + .num_dsi_phy = 2, + .quirks = DSI_PHY_7NM_QUIRK_V5_2, +}; + +const struct msm_dsi_phy_cfg dsi_phy_4nm_8650_cfgs = { + .has_phy_lane = true, + .regulator_data = dsi_phy_7nm_98000uA_regulators, + .num_regulators = ARRAY_SIZE(dsi_phy_7nm_98000uA_regulators), + .ops = { + .enable = dsi_7nm_phy_enable, + .disable = dsi_7nm_phy_disable, + .pll_init = dsi_pll_7nm_init, + .save_pll_state = dsi_7nm_pll_save_state, + .restore_pll_state = dsi_7nm_pll_restore_state, + .set_continuous_clock = dsi_7nm_set_continuous_clock, + }, + .min_pll_rate = 600000000UL, +#ifdef CONFIG_64BIT + .max_pll_rate = 5000000000UL, +#else + .max_pll_rate = ULONG_MAX, +#endif + .io_start = { 0xae95000, 0xae97000 }, + .num_dsi_phy = 2, + .quirks = DSI_PHY_7NM_QUIRK_V5_2, +}; + +const struct msm_dsi_phy_cfg dsi_phy_3nm_8750_cfgs = { + .has_phy_lane = true, + .regulator_data = dsi_phy_7nm_98000uA_regulators, + .num_regulators = ARRAY_SIZE(dsi_phy_7nm_98000uA_regulators), + .ops = { + .enable = dsi_7nm_phy_enable, + .disable = dsi_7nm_phy_disable, + .pll_init = dsi_pll_7nm_init, + .save_pll_state = dsi_7nm_pll_save_state, + .restore_pll_state = dsi_7nm_pll_restore_state, + .set_continuous_clock = dsi_7nm_set_continuous_clock, + }, + .min_pll_rate = 600000000UL, +#ifdef CONFIG_64BIT + .max_pll_rate = 5000000000UL, +#else + .max_pll_rate = ULONG_MAX, +#endif + .io_start = { 0xae95000, 0xae97000 }, + .num_dsi_phy = 2, + .quirks = DSI_PHY_7NM_QUIRK_V7_0, +}; |
