summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/display
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/display')
-rw-r--r--drivers/gpu/drm/display/Kconfig92
-rw-r--r--drivers/gpu/drm/display/Makefile17
-rw-r--r--drivers/gpu/drm/display/drm_bridge_connector.c651
-rw-r--r--drivers/gpu/drm/display/drm_dp_aux_bus.c6
-rw-r--r--drivers/gpu/drm/display/drm_dp_cec.c14
-rw-r--r--drivers/gpu/drm/display/drm_dp_dual_mode_helper.c8
-rw-r--r--drivers/gpu/drm/display/drm_dp_helper.c421
-rw-r--r--drivers/gpu/drm/display/drm_dp_helper_internal.h2
-rw-r--r--drivers/gpu/drm/display/drm_dp_mst_topology.c437
-rw-r--r--drivers/gpu/drm/display/drm_dp_mst_topology_internal.h4
-rw-r--r--drivers/gpu/drm/display/drm_dp_tunnel.c1950
-rw-r--r--drivers/gpu/drm/display/drm_dsc_helper.c91
-rw-r--r--drivers/gpu/drm/display/drm_hdmi_audio_helper.c190
-rw-r--r--drivers/gpu/drm/display/drm_hdmi_helper.c61
-rw-r--r--drivers/gpu/drm/display/drm_hdmi_state_helper.c841
15 files changed, 4526 insertions, 259 deletions
diff --git a/drivers/gpu/drm/display/Kconfig b/drivers/gpu/drm/display/Kconfig
index 09712b88a5b8..8d22b7627d41 100644
--- a/drivers/gpu/drm/display/Kconfig
+++ b/drivers/gpu/drm/display/Kconfig
@@ -1,9 +1,9 @@
# SPDX-License-Identifier: MIT
-config DRM_DP_AUX_BUS
+config DRM_DISPLAY_DP_AUX_BUS
tristate
depends on DRM
- depends on OF || COMPILE_TEST
+ depends on OF
config DRM_DISPLAY_HELPER
tristate
@@ -11,41 +11,87 @@ config DRM_DISPLAY_HELPER
help
DRM helpers for display adapters.
+if DRM_DISPLAY_HELPER
+
+config DRM_BRIDGE_CONNECTOR
+ bool
+ select DRM_DISPLAY_HDMI_AUDIO_HELPER
+ select DRM_DISPLAY_HDMI_STATE_HELPER
+ help
+ DRM connector implementation terminating DRM bridge chains.
+
+config DRM_DISPLAY_DP_AUX_CEC
+ bool "Enable DisplayPort CEC-Tunneling-over-AUX HDMI support"
+ select DRM_DISPLAY_DP_HELPER
+ select CEC_CORE
+ help
+ Choose this option if you want to enable HDMI CEC support for
+ DisplayPort/USB-C to HDMI adapters.
+
+ Note: not all adapters support this feature, and even for those
+ that do support this they often do not hook up the CEC pin.
+
+config DRM_DISPLAY_DP_AUX_CHARDEV
+ bool "DRM DP AUX Interface"
+ select DRM_DISPLAY_DP_HELPER
+ help
+ Choose this option to enable a /dev/drm_dp_auxN node that allows to
+ read and write values to arbitrary DPCD registers on the DP aux
+ channel.
+
config DRM_DISPLAY_DP_HELPER
bool
- depends on DRM_DISPLAY_HELPER
help
DRM display helpers for DisplayPort.
-config DRM_DISPLAY_HDCP_HELPER
+config DRM_DISPLAY_DP_TUNNEL
+ bool
+ select DRM_DISPLAY_DP_HELPER
+ help
+ Enable support for DisplayPort tunnels. This allows drivers to use
+ DP tunnel features like the Bandwidth Allocation mode to maximize the
+ BW utilization for display streams on Thunderbolt links.
+
+config DRM_DISPLAY_DP_TUNNEL_STATE_DEBUG
+ bool "Enable debugging the DP tunnel state"
+ depends on REF_TRACKER
+ depends on DRM_DISPLAY_DP_TUNNEL
+ depends on DEBUG_KERNEL
+ depends on EXPERT
+ help
+ Enables debugging the DP tunnel manager's state, including the
+ consistency of all managed tunnels' reference counting and the state of
+ streams contained in tunnels.
+
+ If in doubt, say "N".
+
+config DRM_DISPLAY_DSC_HELPER
bool
depends on DRM_DISPLAY_HELPER
help
+ DRM display helpers for VESA DSC (used by DSI and DisplayPort).
+
+config DRM_DISPLAY_HDCP_HELPER
+ bool
+ help
DRM display helpers for HDCP.
-config DRM_DISPLAY_HDMI_HELPER
+config DRM_DISPLAY_HDMI_AUDIO_HELPER
bool
- depends on DRM_DISPLAY_HELPER
help
- DRM display helpers for HDMI.
+ DRM display helpers for HDMI Audio functionality (generic HDMI Codec
+ implementation).
-config DRM_DP_AUX_CHARDEV
- bool "DRM DP AUX Interface"
- depends on DRM && DRM_DISPLAY_HELPER
- select DRM_DISPLAY_DP_HELPER
+config DRM_DISPLAY_HDMI_HELPER
+ bool
help
- Choose this option to enable a /dev/drm_dp_auxN node that allows to
- read and write values to arbitrary DPCD registers on the DP aux
- channel.
+ DRM display helpers for HDMI.
-config DRM_DP_CEC
- bool "Enable DisplayPort CEC-Tunneling-over-AUX HDMI support"
- depends on DRM && DRM_DISPLAY_HELPER
- select DRM_DISPLAY_DP_HELPER
- select CEC_CORE
+config DRM_DISPLAY_HDMI_STATE_HELPER
+ bool
+ select DRM_DISPLAY_HDMI_AUDIO_HELPER
+ select DRM_DISPLAY_HDMI_HELPER
help
- Choose this option if you want to enable HDMI CEC support for
- DisplayPort/USB-C to HDMI adapters.
+ DRM KMS state helpers for HDMI.
- Note: not all adapters support this feature, and even for those
- that do support this they often do not hook up the CEC pin.
+endif # DRM_DISPLAY_HELPER
diff --git a/drivers/gpu/drm/display/Makefile b/drivers/gpu/drm/display/Makefile
index 17ac4a1006a8..b17879b957d5 100644
--- a/drivers/gpu/drm/display/Makefile
+++ b/drivers/gpu/drm/display/Makefile
@@ -1,18 +1,27 @@
# SPDX-License-Identifier: MIT
-obj-$(CONFIG_DRM_DP_AUX_BUS) += drm_dp_aux_bus.o
+obj-$(CONFIG_DRM_DISPLAY_DP_AUX_BUS) += drm_dp_aux_bus.o
drm_display_helper-y := drm_display_helper_mod.o
+drm_display_helper-$(CONFIG_DRM_BRIDGE_CONNECTOR) += \
+ drm_bridge_connector.o
drm_display_helper-$(CONFIG_DRM_DISPLAY_DP_HELPER) += \
drm_dp_dual_mode_helper.o \
drm_dp_helper.o \
- drm_dp_mst_topology.o \
+ drm_dp_mst_topology.o
+drm_display_helper-$(CONFIG_DRM_DISPLAY_DP_TUNNEL) += \
+ drm_dp_tunnel.o
+drm_display_helper-$(CONFIG_DRM_DISPLAY_DSC_HELPER) += \
drm_dsc_helper.o
drm_display_helper-$(CONFIG_DRM_DISPLAY_HDCP_HELPER) += drm_hdcp_helper.o
+drm_display_helper-$(CONFIG_DRM_DISPLAY_HDMI_AUDIO_HELPER) += \
+ drm_hdmi_audio_helper.o
drm_display_helper-$(CONFIG_DRM_DISPLAY_HDMI_HELPER) += \
drm_hdmi_helper.o \
drm_scdc_helper.o
-drm_display_helper-$(CONFIG_DRM_DP_AUX_CHARDEV) += drm_dp_aux_dev.o
-drm_display_helper-$(CONFIG_DRM_DP_CEC) += drm_dp_cec.o
+drm_display_helper-$(CONFIG_DRM_DISPLAY_HDMI_STATE_HELPER) += \
+ drm_hdmi_state_helper.o
+drm_display_helper-$(CONFIG_DRM_DISPLAY_DP_AUX_CHARDEV) += drm_dp_aux_dev.o
+drm_display_helper-$(CONFIG_DRM_DISPLAY_DP_AUX_CEC) += drm_dp_cec.o
obj-$(CONFIG_DRM_DISPLAY_HELPER) += drm_display_helper.o
diff --git a/drivers/gpu/drm/display/drm_bridge_connector.c b/drivers/gpu/drm/display/drm_bridge_connector.c
new file mode 100644
index 000000000000..56f977bbe62d
--- /dev/null
+++ b/drivers/gpu/drm/display/drm_bridge_connector.c
@@ -0,0 +1,651 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2019 Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/property.h>
+#include <linux/slab.h>
+
+#include <drm/drm_atomic_state_helper.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_bridge_connector.h>
+#include <drm/drm_connector.h>
+#include <drm/drm_device.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_managed.h>
+#include <drm/drm_modeset_helper_vtables.h>
+#include <drm/drm_print.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/display/drm_hdmi_audio_helper.h>
+#include <drm/display/drm_hdmi_helper.h>
+#include <drm/display/drm_hdmi_state_helper.h>
+
+/**
+ * DOC: overview
+ *
+ * The DRM bridge connector helper object provides a DRM connector
+ * implementation that wraps a chain of &struct drm_bridge. The connector
+ * operations are fully implemented based on the operations of the bridges in
+ * the chain, and don't require any intervention from the display controller
+ * driver at runtime.
+ *
+ * To use the helper, display controller drivers create a bridge connector with
+ * a call to drm_bridge_connector_init(). This associates the newly created
+ * connector with the chain of bridges passed to the function and registers it
+ * with the DRM device. At that point the connector becomes fully usable, no
+ * further operation is needed.
+ *
+ * The DRM bridge connector operations are implemented based on the operations
+ * provided by the bridges in the chain. Each connector operation is delegated
+ * to the bridge closest to the connector (at the end of the chain) that
+ * provides the relevant functionality.
+ *
+ * To make use of this helper, all bridges in the chain shall report bridge
+ * operation flags (&drm_bridge->ops) and bridge output type
+ * (&drm_bridge->type), as well as the DRM_BRIDGE_ATTACH_NO_CONNECTOR attach
+ * flag (none of the bridges shall create a DRM connector directly).
+ */
+
+/**
+ * struct drm_bridge_connector - A connector backed by a chain of bridges
+ */
+struct drm_bridge_connector {
+ /**
+ * @base: The base DRM connector
+ */
+ struct drm_connector base;
+ /**
+ * @encoder:
+ *
+ * The encoder at the start of the bridges chain.
+ */
+ struct drm_encoder *encoder;
+ /**
+ * @bridge_edid:
+ *
+ * The last bridge in the chain (closest to the connector) that provides
+ * EDID read support, if any (see &DRM_BRIDGE_OP_EDID).
+ */
+ struct drm_bridge *bridge_edid;
+ /**
+ * @bridge_hpd:
+ *
+ * The last bridge in the chain (closest to the connector) that provides
+ * hot-plug detection notification, if any (see &DRM_BRIDGE_OP_HPD).
+ */
+ struct drm_bridge *bridge_hpd;
+ /**
+ * @bridge_detect:
+ *
+ * The last bridge in the chain (closest to the connector) that provides
+ * connector detection, if any (see &DRM_BRIDGE_OP_DETECT).
+ */
+ struct drm_bridge *bridge_detect;
+ /**
+ * @bridge_modes:
+ *
+ * The last bridge in the chain (closest to the connector) that provides
+ * connector modes detection, if any (see &DRM_BRIDGE_OP_MODES).
+ */
+ struct drm_bridge *bridge_modes;
+ /**
+ * @bridge_hdmi:
+ *
+ * The bridge in the chain that implements necessary support for the
+ * HDMI connector infrastructure, if any (see &DRM_BRIDGE_OP_HDMI).
+ */
+ struct drm_bridge *bridge_hdmi;
+};
+
+#define to_drm_bridge_connector(x) \
+ container_of(x, struct drm_bridge_connector, base)
+
+/* -----------------------------------------------------------------------------
+ * Bridge Connector Hot-Plug Handling
+ */
+
+static void drm_bridge_connector_hpd_notify(struct drm_connector *connector,
+ enum drm_connector_status status)
+{
+ struct drm_bridge_connector *bridge_connector =
+ to_drm_bridge_connector(connector);
+ struct drm_bridge *bridge;
+
+ /* Notify all bridges in the pipeline of hotplug events. */
+ drm_for_each_bridge_in_chain(bridge_connector->encoder, bridge) {
+ if (bridge->funcs->hpd_notify)
+ bridge->funcs->hpd_notify(bridge, status);
+ }
+}
+
+static void drm_bridge_connector_handle_hpd(struct drm_bridge_connector *drm_bridge_connector,
+ enum drm_connector_status status)
+{
+ struct drm_connector *connector = &drm_bridge_connector->base;
+ struct drm_device *dev = connector->dev;
+
+ mutex_lock(&dev->mode_config.mutex);
+ connector->status = status;
+ mutex_unlock(&dev->mode_config.mutex);
+
+ drm_bridge_connector_hpd_notify(connector, status);
+
+ drm_kms_helper_connector_hotplug_event(connector);
+}
+
+static void drm_bridge_connector_hpd_cb(void *cb_data,
+ enum drm_connector_status status)
+{
+ drm_bridge_connector_handle_hpd(cb_data, status);
+}
+
+static void drm_bridge_connector_oob_hotplug_event(struct drm_connector *connector,
+ enum drm_connector_status status)
+{
+ struct drm_bridge_connector *bridge_connector =
+ to_drm_bridge_connector(connector);
+
+ drm_bridge_connector_handle_hpd(bridge_connector, status);
+}
+
+static void drm_bridge_connector_enable_hpd(struct drm_connector *connector)
+{
+ struct drm_bridge_connector *bridge_connector =
+ to_drm_bridge_connector(connector);
+ struct drm_bridge *hpd = bridge_connector->bridge_hpd;
+
+ if (hpd)
+ drm_bridge_hpd_enable(hpd, drm_bridge_connector_hpd_cb,
+ bridge_connector);
+}
+
+static void drm_bridge_connector_disable_hpd(struct drm_connector *connector)
+{
+ struct drm_bridge_connector *bridge_connector =
+ to_drm_bridge_connector(connector);
+ struct drm_bridge *hpd = bridge_connector->bridge_hpd;
+
+ if (hpd)
+ drm_bridge_hpd_disable(hpd);
+}
+
+/* -----------------------------------------------------------------------------
+ * Bridge Connector Functions
+ */
+
+static enum drm_connector_status
+drm_bridge_connector_detect(struct drm_connector *connector, bool force)
+{
+ struct drm_bridge_connector *bridge_connector =
+ to_drm_bridge_connector(connector);
+ struct drm_bridge *detect = bridge_connector->bridge_detect;
+ struct drm_bridge *hdmi = bridge_connector->bridge_hdmi;
+ enum drm_connector_status status;
+
+ if (detect) {
+ status = detect->funcs->detect(detect);
+
+ if (hdmi)
+ drm_atomic_helper_connector_hdmi_hotplug(connector, status);
+
+ drm_bridge_connector_hpd_notify(connector, status);
+ } else {
+ switch (connector->connector_type) {
+ case DRM_MODE_CONNECTOR_DPI:
+ case DRM_MODE_CONNECTOR_LVDS:
+ case DRM_MODE_CONNECTOR_DSI:
+ case DRM_MODE_CONNECTOR_eDP:
+ status = connector_status_connected;
+ break;
+ default:
+ status = connector_status_unknown;
+ break;
+ }
+ }
+
+ return status;
+}
+
+static void drm_bridge_connector_force(struct drm_connector *connector)
+{
+ struct drm_bridge_connector *bridge_connector =
+ to_drm_bridge_connector(connector);
+ struct drm_bridge *hdmi = bridge_connector->bridge_hdmi;
+
+ if (hdmi)
+ drm_atomic_helper_connector_hdmi_force(connector);
+}
+
+static void drm_bridge_connector_debugfs_init(struct drm_connector *connector,
+ struct dentry *root)
+{
+ struct drm_bridge_connector *bridge_connector =
+ to_drm_bridge_connector(connector);
+ struct drm_encoder *encoder = bridge_connector->encoder;
+ struct drm_bridge *bridge;
+
+ list_for_each_entry(bridge, &encoder->bridge_chain, chain_node) {
+ if (bridge->funcs->debugfs_init)
+ bridge->funcs->debugfs_init(bridge, root);
+ }
+}
+
+static void drm_bridge_connector_reset(struct drm_connector *connector)
+{
+ struct drm_bridge_connector *bridge_connector =
+ to_drm_bridge_connector(connector);
+
+ drm_atomic_helper_connector_reset(connector);
+ if (bridge_connector->bridge_hdmi)
+ __drm_atomic_helper_connector_hdmi_reset(connector,
+ connector->state);
+}
+
+static const struct drm_connector_funcs drm_bridge_connector_funcs = {
+ .reset = drm_bridge_connector_reset,
+ .detect = drm_bridge_connector_detect,
+ .force = drm_bridge_connector_force,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+ .debugfs_init = drm_bridge_connector_debugfs_init,
+ .oob_hotplug_event = drm_bridge_connector_oob_hotplug_event,
+};
+
+/* -----------------------------------------------------------------------------
+ * Bridge Connector Helper Functions
+ */
+
+static int drm_bridge_connector_get_modes_edid(struct drm_connector *connector,
+ struct drm_bridge *bridge)
+{
+ enum drm_connector_status status;
+ const struct drm_edid *drm_edid;
+ int n;
+
+ status = drm_bridge_connector_detect(connector, false);
+ if (status != connector_status_connected)
+ goto no_edid;
+
+ drm_edid = drm_bridge_edid_read(bridge, connector);
+ if (!drm_edid_valid(drm_edid)) {
+ drm_edid_free(drm_edid);
+ goto no_edid;
+ }
+
+ drm_edid_connector_update(connector, drm_edid);
+ n = drm_edid_connector_add_modes(connector);
+
+ drm_edid_free(drm_edid);
+ return n;
+
+no_edid:
+ drm_edid_connector_update(connector, NULL);
+ return 0;
+}
+
+static int drm_bridge_connector_get_modes(struct drm_connector *connector)
+{
+ struct drm_bridge_connector *bridge_connector =
+ to_drm_bridge_connector(connector);
+ struct drm_bridge *bridge;
+
+ /*
+ * If there is a HDMI bridge, EDID has been updated as a part of
+ * the .detect(). Just update the modes here.
+ */
+ bridge = bridge_connector->bridge_hdmi;
+ if (bridge)
+ return drm_edid_connector_add_modes(connector);
+
+ /*
+ * If display exposes EDID, then we parse that in the normal way to
+ * build table of supported modes.
+ */
+ bridge = bridge_connector->bridge_edid;
+ if (bridge)
+ return drm_bridge_connector_get_modes_edid(connector, bridge);
+
+ /*
+ * Otherwise if the display pipeline reports modes (e.g. with a fixed
+ * resolution panel or an analog TV output), query it.
+ */
+ bridge = bridge_connector->bridge_modes;
+ if (bridge)
+ return bridge->funcs->get_modes(bridge, connector);
+
+ /*
+ * We can't retrieve modes, which can happen for instance for a DVI or
+ * VGA output with the DDC bus unconnected. The KMS core will add the
+ * default modes.
+ */
+ return 0;
+}
+
+static enum drm_mode_status
+drm_bridge_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct drm_bridge_connector *bridge_connector =
+ to_drm_bridge_connector(connector);
+
+ if (bridge_connector->bridge_hdmi)
+ return drm_hdmi_connector_mode_valid(connector, mode);
+
+ return MODE_OK;
+}
+
+static int drm_bridge_connector_atomic_check(struct drm_connector *connector,
+ struct drm_atomic_state *state)
+{
+ struct drm_bridge_connector *bridge_connector =
+ to_drm_bridge_connector(connector);
+
+ if (bridge_connector->bridge_hdmi)
+ return drm_atomic_helper_connector_hdmi_check(connector, state);
+
+ return 0;
+}
+
+static const struct drm_connector_helper_funcs drm_bridge_connector_helper_funcs = {
+ .get_modes = drm_bridge_connector_get_modes,
+ .mode_valid = drm_bridge_connector_mode_valid,
+ .enable_hpd = drm_bridge_connector_enable_hpd,
+ .disable_hpd = drm_bridge_connector_disable_hpd,
+ .atomic_check = drm_bridge_connector_atomic_check,
+};
+
+static enum drm_mode_status
+drm_bridge_connector_tmds_char_rate_valid(const struct drm_connector *connector,
+ const struct drm_display_mode *mode,
+ unsigned long long tmds_rate)
+{
+ struct drm_bridge_connector *bridge_connector =
+ to_drm_bridge_connector(connector);
+ struct drm_bridge *bridge;
+
+ bridge = bridge_connector->bridge_hdmi;
+ if (!bridge)
+ return MODE_ERROR;
+
+ if (bridge->funcs->hdmi_tmds_char_rate_valid)
+ return bridge->funcs->hdmi_tmds_char_rate_valid(bridge, mode, tmds_rate);
+ else
+ return MODE_OK;
+}
+
+static int drm_bridge_connector_clear_infoframe(struct drm_connector *connector,
+ enum hdmi_infoframe_type type)
+{
+ struct drm_bridge_connector *bridge_connector =
+ to_drm_bridge_connector(connector);
+ struct drm_bridge *bridge;
+
+ bridge = bridge_connector->bridge_hdmi;
+ if (!bridge)
+ return -EINVAL;
+
+ return bridge->funcs->hdmi_clear_infoframe(bridge, type);
+}
+
+static int drm_bridge_connector_write_infoframe(struct drm_connector *connector,
+ enum hdmi_infoframe_type type,
+ const u8 *buffer, size_t len)
+{
+ struct drm_bridge_connector *bridge_connector =
+ to_drm_bridge_connector(connector);
+ struct drm_bridge *bridge;
+
+ bridge = bridge_connector->bridge_hdmi;
+ if (!bridge)
+ return -EINVAL;
+
+ return bridge->funcs->hdmi_write_infoframe(bridge, type, buffer, len);
+}
+
+static const struct drm_edid *
+drm_bridge_connector_read_edid(struct drm_connector *connector)
+{
+ struct drm_bridge_connector *bridge_connector =
+ to_drm_bridge_connector(connector);
+ struct drm_bridge *bridge;
+
+ bridge = bridge_connector->bridge_edid;
+ if (!bridge)
+ return NULL;
+
+ return drm_bridge_edid_read(bridge, connector);
+}
+
+static const struct drm_connector_hdmi_funcs drm_bridge_connector_hdmi_funcs = {
+ .tmds_char_rate_valid = drm_bridge_connector_tmds_char_rate_valid,
+ .clear_infoframe = drm_bridge_connector_clear_infoframe,
+ .write_infoframe = drm_bridge_connector_write_infoframe,
+ .read_edid = drm_bridge_connector_read_edid,
+};
+
+static int drm_bridge_connector_audio_startup(struct drm_connector *connector)
+{
+ struct drm_bridge_connector *bridge_connector =
+ to_drm_bridge_connector(connector);
+ struct drm_bridge *bridge;
+
+ bridge = bridge_connector->bridge_hdmi;
+ if (!bridge)
+ return -EINVAL;
+
+ if (!bridge->funcs->hdmi_audio_startup)
+ return 0;
+
+ return bridge->funcs->hdmi_audio_startup(connector, bridge);
+}
+
+static int drm_bridge_connector_audio_prepare(struct drm_connector *connector,
+ struct hdmi_codec_daifmt *fmt,
+ struct hdmi_codec_params *hparms)
+{
+ struct drm_bridge_connector *bridge_connector =
+ to_drm_bridge_connector(connector);
+ struct drm_bridge *bridge;
+
+ bridge = bridge_connector->bridge_hdmi;
+ if (!bridge)
+ return -EINVAL;
+
+ return bridge->funcs->hdmi_audio_prepare(connector, bridge, fmt, hparms);
+}
+
+static void drm_bridge_connector_audio_shutdown(struct drm_connector *connector)
+{
+ struct drm_bridge_connector *bridge_connector =
+ to_drm_bridge_connector(connector);
+ struct drm_bridge *bridge;
+
+ bridge = bridge_connector->bridge_hdmi;
+ if (!bridge)
+ return;
+
+ bridge->funcs->hdmi_audio_shutdown(connector, bridge);
+}
+
+static int drm_bridge_connector_audio_mute_stream(struct drm_connector *connector,
+ bool enable, int direction)
+{
+ struct drm_bridge_connector *bridge_connector =
+ to_drm_bridge_connector(connector);
+ struct drm_bridge *bridge;
+
+ bridge = bridge_connector->bridge_hdmi;
+ if (!bridge)
+ return -EINVAL;
+
+ if (bridge->funcs->hdmi_audio_mute_stream)
+ return bridge->funcs->hdmi_audio_mute_stream(connector, bridge,
+ enable, direction);
+ else
+ return -ENOTSUPP;
+}
+
+static const struct drm_connector_hdmi_audio_funcs drm_bridge_connector_hdmi_audio_funcs = {
+ .startup = drm_bridge_connector_audio_startup,
+ .prepare = drm_bridge_connector_audio_prepare,
+ .shutdown = drm_bridge_connector_audio_shutdown,
+ .mute_stream = drm_bridge_connector_audio_mute_stream,
+};
+
+/* -----------------------------------------------------------------------------
+ * Bridge Connector Initialisation
+ */
+
+/**
+ * drm_bridge_connector_init - Initialise a connector for a chain of bridges
+ * @drm: the DRM device
+ * @encoder: the encoder where the bridge chain starts
+ *
+ * Allocate, initialise and register a &drm_bridge_connector with the @drm
+ * device. The connector is associated with a chain of bridges that starts at
+ * the @encoder. All bridges in the chain shall report bridge operation flags
+ * (&drm_bridge->ops) and bridge output type (&drm_bridge->type), and none of
+ * them may create a DRM connector directly.
+ *
+ * Returns a pointer to the new connector on success, or a negative error
+ * pointer otherwise.
+ */
+struct drm_connector *drm_bridge_connector_init(struct drm_device *drm,
+ struct drm_encoder *encoder)
+{
+ struct drm_bridge_connector *bridge_connector;
+ struct drm_connector *connector;
+ struct i2c_adapter *ddc = NULL;
+ struct drm_bridge *bridge, *panel_bridge = NULL;
+ unsigned int supported_formats = BIT(HDMI_COLORSPACE_RGB);
+ unsigned int max_bpc = 8;
+ int connector_type;
+ int ret;
+
+ bridge_connector = drmm_kzalloc(drm, sizeof(*bridge_connector), GFP_KERNEL);
+ if (!bridge_connector)
+ return ERR_PTR(-ENOMEM);
+
+ bridge_connector->encoder = encoder;
+
+ /*
+ * TODO: Handle doublescan_allowed and stereo_allowed.
+ */
+ connector = &bridge_connector->base;
+ connector->interlace_allowed = true;
+ connector->ycbcr_420_allowed = true;
+
+ /*
+ * Initialise connector status handling. First locate the furthest
+ * bridges in the pipeline that support HPD and output detection. Then
+ * initialise the connector polling mode, using HPD if available and
+ * falling back to polling if supported. If neither HPD nor output
+ * detection are available, we don't support hotplug detection at all.
+ */
+ connector_type = DRM_MODE_CONNECTOR_Unknown;
+ drm_for_each_bridge_in_chain(encoder, bridge) {
+ if (!bridge->interlace_allowed)
+ connector->interlace_allowed = false;
+ if (!bridge->ycbcr_420_allowed)
+ connector->ycbcr_420_allowed = false;
+
+ if (bridge->ops & DRM_BRIDGE_OP_EDID)
+ bridge_connector->bridge_edid = bridge;
+ if (bridge->ops & DRM_BRIDGE_OP_HPD)
+ bridge_connector->bridge_hpd = bridge;
+ if (bridge->ops & DRM_BRIDGE_OP_DETECT)
+ bridge_connector->bridge_detect = bridge;
+ if (bridge->ops & DRM_BRIDGE_OP_MODES)
+ bridge_connector->bridge_modes = bridge;
+ if (bridge->ops & DRM_BRIDGE_OP_HDMI) {
+ if (bridge_connector->bridge_hdmi)
+ return ERR_PTR(-EBUSY);
+ if (!bridge->funcs->hdmi_write_infoframe ||
+ !bridge->funcs->hdmi_clear_infoframe)
+ return ERR_PTR(-EINVAL);
+
+ bridge_connector->bridge_hdmi = bridge;
+
+ if (bridge->supported_formats)
+ supported_formats = bridge->supported_formats;
+ if (bridge->max_bpc)
+ max_bpc = bridge->max_bpc;
+ }
+
+ if (!drm_bridge_get_next_bridge(bridge))
+ connector_type = bridge->type;
+
+#ifdef CONFIG_OF
+ if (!drm_bridge_get_next_bridge(bridge) &&
+ bridge->of_node)
+ connector->fwnode = fwnode_handle_get(of_fwnode_handle(bridge->of_node));
+#endif
+
+ if (bridge->ddc)
+ ddc = bridge->ddc;
+
+ if (drm_bridge_is_panel(bridge))
+ panel_bridge = bridge;
+ }
+
+ if (connector_type == DRM_MODE_CONNECTOR_Unknown)
+ return ERR_PTR(-EINVAL);
+
+ if (bridge_connector->bridge_hdmi) {
+ if (!connector->ycbcr_420_allowed)
+ supported_formats &= ~BIT(HDMI_COLORSPACE_YUV420);
+
+ bridge = bridge_connector->bridge_hdmi;
+
+ ret = drmm_connector_hdmi_init(drm, connector,
+ bridge_connector->bridge_hdmi->vendor,
+ bridge_connector->bridge_hdmi->product,
+ &drm_bridge_connector_funcs,
+ &drm_bridge_connector_hdmi_funcs,
+ connector_type, ddc,
+ supported_formats,
+ max_bpc);
+ if (ret)
+ return ERR_PTR(ret);
+
+ if (bridge->hdmi_audio_max_i2s_playback_channels ||
+ bridge->hdmi_audio_spdif_playback) {
+ if (!bridge->funcs->hdmi_audio_prepare ||
+ !bridge->funcs->hdmi_audio_shutdown)
+ return ERR_PTR(-EINVAL);
+
+ ret = drm_connector_hdmi_audio_init(connector,
+ bridge->hdmi_audio_dev,
+ &drm_bridge_connector_hdmi_audio_funcs,
+ bridge->hdmi_audio_max_i2s_playback_channels,
+ bridge->hdmi_audio_spdif_playback,
+ bridge->hdmi_audio_dai_port);
+ if (ret)
+ return ERR_PTR(ret);
+ }
+ } else {
+ ret = drmm_connector_init(drm, connector,
+ &drm_bridge_connector_funcs,
+ connector_type, ddc);
+ if (ret)
+ return ERR_PTR(ret);
+ }
+
+ drm_connector_helper_add(connector, &drm_bridge_connector_helper_funcs);
+
+ if (bridge_connector->bridge_hpd)
+ connector->polled = DRM_CONNECTOR_POLL_HPD;
+ else if (bridge_connector->bridge_detect)
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT
+ | DRM_CONNECTOR_POLL_DISCONNECT;
+
+ if (panel_bridge)
+ drm_panel_bridge_set_orientation(connector, panel_bridge);
+
+ return connector;
+}
+EXPORT_SYMBOL_GPL(drm_bridge_connector_init);
diff --git a/drivers/gpu/drm/display/drm_dp_aux_bus.c b/drivers/gpu/drm/display/drm_dp_aux_bus.c
index 8a165be1a821..ec7eac6b595f 100644
--- a/drivers/gpu/drm/display/drm_dp_aux_bus.c
+++ b/drivers/gpu/drm/display/drm_dp_aux_bus.c
@@ -36,7 +36,7 @@ struct dp_aux_ep_device_with_data {
*
* Return: True if this driver matches this device; false otherwise.
*/
-static int dp_aux_ep_match(struct device *dev, struct device_driver *drv)
+static int dp_aux_ep_match(struct device *dev, const struct device_driver *drv)
{
return !!of_match_device(drv->of_match_table, dev);
}
@@ -127,7 +127,7 @@ static void dp_aux_ep_shutdown(struct device *dev)
aux_ep_drv->shutdown(to_dp_aux_ep_dev(dev));
}
-static struct bus_type dp_aux_bus_type = {
+static const struct bus_type dp_aux_bus_type = {
.name = "dp-aux",
.match = dp_aux_ep_match,
.probe = dp_aux_ep_probe,
@@ -292,7 +292,7 @@ int of_dp_aux_populate_bus(struct drm_dp_aux *aux,
aux_ep->dev.parent = aux->dev;
aux_ep->dev.bus = &dp_aux_bus_type;
aux_ep->dev.type = &dp_aux_device_type_type;
- aux_ep->dev.of_node = of_node_get(np);
+ device_set_node(&aux_ep->dev, of_fwnode_handle(of_node_get(np)));
dev_set_name(&aux_ep->dev, "aux-%s", dev_name(aux->dev));
ret = device_register(&aux_ep->dev);
diff --git a/drivers/gpu/drm/display/drm_dp_cec.c b/drivers/gpu/drm/display/drm_dp_cec.c
index 007ceb281d00..56a4965e518c 100644
--- a/drivers/gpu/drm/display/drm_dp_cec.c
+++ b/drivers/gpu/drm/display/drm_dp_cec.c
@@ -311,16 +311,6 @@ void drm_dp_cec_attach(struct drm_dp_aux *aux, u16 source_physical_address)
if (!aux->transfer)
return;
-#ifndef CONFIG_MEDIA_CEC_RC
- /*
- * CEC_CAP_RC is part of CEC_CAP_DEFAULTS, but it is stripped by
- * cec_allocate_adapter() if CONFIG_MEDIA_CEC_RC is undefined.
- *
- * Do this here as well to ensure the tests against cec_caps are
- * correct.
- */
- cec_caps &= ~CEC_CAP_RC;
-#endif
cancel_delayed_work_sync(&aux->cec.unregister_work);
mutex_lock(&aux->cec.lock);
@@ -337,7 +327,9 @@ void drm_dp_cec_attach(struct drm_dp_aux *aux, u16 source_physical_address)
num_las = CEC_MAX_LOG_ADDRS;
if (aux->cec.adap) {
- if (aux->cec.adap->capabilities == cec_caps &&
+ /* Check if the adapter properties have changed */
+ if ((aux->cec.adap->capabilities & CEC_CAP_MONITOR_ALL) ==
+ (cec_caps & CEC_CAP_MONITOR_ALL) &&
aux->cec.adap->available_log_addrs == num_las) {
/* Unchanged, so just set the phys addr */
cec_s_phys_addr(aux->cec.adap, source_physical_address, false);
diff --git a/drivers/gpu/drm/display/drm_dp_dual_mode_helper.c b/drivers/gpu/drm/display/drm_dp_dual_mode_helper.c
index bd61e20770a5..c491e3203bf1 100644
--- a/drivers/gpu/drm/display/drm_dp_dual_mode_helper.c
+++ b/drivers/gpu/drm/display/drm_dp_dual_mode_helper.c
@@ -52,7 +52,7 @@
* @adapter: I2C adapter for the DDC bus
* @offset: register offset
* @buffer: buffer for return data
- * @size: sizo of the buffer
+ * @size: size of the buffer
*
* Reads @size bytes from the DP dual mode adaptor registers
* starting at @offset.
@@ -116,7 +116,7 @@ EXPORT_SYMBOL(drm_dp_dual_mode_read);
* @adapter: I2C adapter for the DDC bus
* @offset: register offset
* @buffer: buffer for write data
- * @size: sizo of the buffer
+ * @size: size of the buffer
*
* Writes @size bytes to the DP dual mode adaptor registers
* starting at @offset.
@@ -160,11 +160,11 @@ EXPORT_SYMBOL(drm_dp_dual_mode_write);
static bool is_hdmi_adaptor(const char hdmi_id[DP_DUAL_MODE_HDMI_ID_LEN])
{
- static const char dp_dual_mode_hdmi_id[DP_DUAL_MODE_HDMI_ID_LEN] =
+ static const char dp_dual_mode_hdmi_id[DP_DUAL_MODE_HDMI_ID_LEN + 1] =
"DP-HDMI ADAPTOR\x04";
return memcmp(hdmi_id, dp_dual_mode_hdmi_id,
- sizeof(dp_dual_mode_hdmi_id)) == 0;
+ DP_DUAL_MODE_HDMI_ID_LEN) == 0;
}
static bool is_type1_adaptor(uint8_t adaptor_id)
diff --git a/drivers/gpu/drm/display/drm_dp_helper.c b/drivers/gpu/drm/display/drm_dp_helper.c
index b1ca3a1100da..61c7c2c588c6 100644
--- a/drivers/gpu/drm/display/drm_dp_helper.c
+++ b/drivers/gpu/drm/display/drm_dp_helper.c
@@ -22,19 +22,21 @@
#include <linux/backlight.h>
#include <linux/delay.h>
+#include <linux/dynamic_debug.h>
#include <linux/errno.h>
#include <linux/i2c.h>
#include <linux/init.h>
+#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/seq_file.h>
#include <linux/string_helpers.h>
-#include <linux/dynamic_debug.h>
#include <drm/display/drm_dp_helper.h>
#include <drm/display/drm_dp_mst_helper.h>
#include <drm/drm_edid.h>
+#include <drm/drm_fixed.h>
#include <drm/drm_print.h>
#include <drm/drm_vblank.h>
#include <drm/drm_panel.h>
@@ -533,6 +535,15 @@ static int drm_dp_dpcd_access(struct drm_dp_aux *aux, u8 request,
mutex_lock(&aux->hw_mutex);
/*
+ * If the device attached to the aux bus is powered down then there's
+ * no reason to attempt a transfer. Error out immediately.
+ */
+ if (aux->powered_down) {
+ ret = -EBUSY;
+ goto unlock;
+ }
+
+ /*
* The specification doesn't give any recommendation on how often to
* retry native transactions. We used to retry 7 times like for
* aux i2c transactions but real world devices this wasn't
@@ -600,6 +611,29 @@ int drm_dp_dpcd_probe(struct drm_dp_aux *aux, unsigned int offset)
EXPORT_SYMBOL(drm_dp_dpcd_probe);
/**
+ * drm_dp_dpcd_set_powered() - Set whether the DP device is powered
+ * @aux: DisplayPort AUX channel; for convenience it's OK to pass NULL here
+ * and the function will be a no-op.
+ * @powered: true if powered; false if not
+ *
+ * If the endpoint device on the DP AUX bus is known to be powered down
+ * then this function can be called to make future transfers fail immediately
+ * instead of needing to time out.
+ *
+ * If this function is never called then a device defaults to being powered.
+ */
+void drm_dp_dpcd_set_powered(struct drm_dp_aux *aux, bool powered)
+{
+ if (!aux)
+ return;
+
+ mutex_lock(&aux->hw_mutex);
+ aux->powered_down = !powered;
+ mutex_unlock(&aux->hw_mutex);
+}
+EXPORT_SYMBOL(drm_dp_dpcd_set_powered);
+
+/**
* drm_dp_dpcd_read() - read a series of bytes from the DPCD
* @aux: DisplayPort AUX channel (SST or MST)
* @offset: address of the (first) register to read
@@ -746,6 +780,128 @@ int drm_dp_dpcd_read_phy_link_status(struct drm_dp_aux *aux,
}
EXPORT_SYMBOL(drm_dp_dpcd_read_phy_link_status);
+static int read_payload_update_status(struct drm_dp_aux *aux)
+{
+ int ret;
+ u8 status;
+
+ ret = drm_dp_dpcd_readb(aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
+ if (ret < 0)
+ return ret;
+
+ return status;
+}
+
+/**
+ * drm_dp_dpcd_write_payload() - Write Virtual Channel information to payload table
+ * @aux: DisplayPort AUX channel
+ * @vcpid: Virtual Channel Payload ID
+ * @start_time_slot: Starting time slot
+ * @time_slot_count: Time slot count
+ *
+ * Write the Virtual Channel payload allocation table, checking the payload
+ * update status and retrying as necessary.
+ *
+ * Returns:
+ * 0 on success, negative error otherwise
+ */
+int drm_dp_dpcd_write_payload(struct drm_dp_aux *aux,
+ int vcpid, u8 start_time_slot, u8 time_slot_count)
+{
+ u8 payload_alloc[3], status;
+ int ret;
+ int retries = 0;
+
+ drm_dp_dpcd_writeb(aux, DP_PAYLOAD_TABLE_UPDATE_STATUS,
+ DP_PAYLOAD_TABLE_UPDATED);
+
+ payload_alloc[0] = vcpid;
+ payload_alloc[1] = start_time_slot;
+ payload_alloc[2] = time_slot_count;
+
+ ret = drm_dp_dpcd_write(aux, DP_PAYLOAD_ALLOCATE_SET, payload_alloc, 3);
+ if (ret != 3) {
+ drm_dbg_kms(aux->drm_dev, "failed to write payload allocation %d\n", ret);
+ goto fail;
+ }
+
+retry:
+ ret = drm_dp_dpcd_readb(aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
+ if (ret < 0) {
+ drm_dbg_kms(aux->drm_dev, "failed to read payload table status %d\n", ret);
+ goto fail;
+ }
+
+ if (!(status & DP_PAYLOAD_TABLE_UPDATED)) {
+ retries++;
+ if (retries < 20) {
+ usleep_range(10000, 20000);
+ goto retry;
+ }
+ drm_dbg_kms(aux->drm_dev, "status not set after read payload table status %d\n",
+ status);
+ ret = -EINVAL;
+ goto fail;
+ }
+ ret = 0;
+fail:
+ return ret;
+}
+EXPORT_SYMBOL(drm_dp_dpcd_write_payload);
+
+/**
+ * drm_dp_dpcd_clear_payload() - Clear the entire VC Payload ID table
+ * @aux: DisplayPort AUX channel
+ *
+ * Clear the entire VC Payload ID table.
+ *
+ * Returns: 0 on success, negative error code on errors.
+ */
+int drm_dp_dpcd_clear_payload(struct drm_dp_aux *aux)
+{
+ return drm_dp_dpcd_write_payload(aux, 0, 0, 0x3f);
+}
+EXPORT_SYMBOL(drm_dp_dpcd_clear_payload);
+
+/**
+ * drm_dp_dpcd_poll_act_handled() - Poll for ACT handled status
+ * @aux: DisplayPort AUX channel
+ * @timeout_ms: Timeout in ms
+ *
+ * Try waiting for the sink to finish updating its payload table by polling for
+ * the ACT handled bit of DP_PAYLOAD_TABLE_UPDATE_STATUS for up to @timeout_ms
+ * milliseconds, defaulting to 3000 ms if 0.
+ *
+ * Returns:
+ * 0 if the ACT was handled in time, negative error code on failure.
+ */
+int drm_dp_dpcd_poll_act_handled(struct drm_dp_aux *aux, int timeout_ms)
+{
+ int ret, status;
+
+ /* default to 3 seconds, this is arbitrary */
+ timeout_ms = timeout_ms ?: 3000;
+
+ ret = readx_poll_timeout(read_payload_update_status, aux, status,
+ status & DP_PAYLOAD_ACT_HANDLED || status < 0,
+ 200, timeout_ms * USEC_PER_MSEC);
+ if (ret < 0 && status >= 0) {
+ drm_err(aux->drm_dev, "Failed to get ACT after %d ms, last status: %02x\n",
+ timeout_ms, status);
+ return -EINVAL;
+ } else if (status < 0) {
+ /*
+ * Failure here isn't unexpected - the hub may have
+ * just been unplugged
+ */
+ drm_dbg_kms(aux->drm_dev, "Failed to read payload table status: %d\n", status);
+ return status;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_dp_dpcd_poll_act_handled);
+
static bool is_edid_digital_input_dp(const struct drm_edid *drm_edid)
{
/* FIXME: get rid of drm_edid_raw() */
@@ -1858,6 +2014,9 @@ static int drm_dp_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
struct drm_dp_aux_msg msg;
int err = 0;
+ if (aux->powered_down)
+ return -EBUSY;
+
dp_aux_i2c_transfer_size = clamp(dp_aux_i2c_transfer_size, 1, DP_AUX_MAX_PAYLOAD_BYTES);
memset(&msg, 0, sizeof(msg));
@@ -2078,7 +2237,7 @@ EXPORT_SYMBOL(drm_dp_aux_init);
* drm_dp_aux_register() in &drm_connector_funcs.late_register, and likewise to
* call drm_dp_aux_unregister() in &drm_connector_funcs.early_unregister.
* Functions which don't follow this will likely Oops when
- * %CONFIG_DRM_DP_AUX_CHARDEV is enabled.
+ * %CONFIG_DRM_DISPLAY_DP_AUX_CHARDEV is enabled.
*
* For devices where the AUX channel is a device that exists independently of
* the &drm_device that uses it, such as SoCs and bridge devices, it is
@@ -2246,6 +2405,8 @@ static const struct dpcd_quirk dpcd_quirk_list[] = {
{ OUI(0x90, 0xCC, 0x24), DEVICE_ID_ANY, true, BIT(DP_DPCD_QUIRK_DSC_WITHOUT_VIRTUAL_DPCD) },
/* Synaptics DP1.4 MST hubs require DSC for some modes on which it applies HBLANK expansion. */
{ OUI(0x90, 0xCC, 0x24), DEVICE_ID_ANY, true, BIT(DP_DPCD_QUIRK_HBLANK_EXPANSION_REQUIRES_DSC) },
+ /* MediaTek panels (at least in U3224KBA) require DSC for modes with a short HBLANK on UHBR links. */
+ { OUI(0x00, 0x0C, 0xE7), DEVICE_ID_ANY, false, BIT(DP_DPCD_QUIRK_HBLANK_EXPANSION_REQUIRES_DSC) },
/* Apple MacBookPro 2017 15 inch eDP Retina panel reports too low DP_MAX_LINK_RATE */
{ OUI(0x00, 0x10, 0xfa), DEVICE_ID(101, 68, 21, 101, 98, 97), false, BIT(DP_DPCD_QUIRK_CAN_DO_MAX_LINK_RATE_3_24_GBPS) },
};
@@ -2290,6 +2451,31 @@ drm_dp_get_quirks(const struct drm_dp_dpcd_ident *ident, bool is_branch)
#undef DEVICE_ID_ANY
#undef DEVICE_ID
+static int drm_dp_read_ident(struct drm_dp_aux *aux, unsigned int offset,
+ struct drm_dp_dpcd_ident *ident)
+{
+ int ret;
+
+ ret = drm_dp_dpcd_read(aux, offset, ident, sizeof(*ident));
+
+ return ret < 0 ? ret : 0;
+}
+
+static void drm_dp_dump_desc(struct drm_dp_aux *aux,
+ const char *device_name, const struct drm_dp_desc *desc)
+{
+ const struct drm_dp_dpcd_ident *ident = &desc->ident;
+
+ drm_dbg_kms(aux->drm_dev,
+ "%s: %s: OUI %*phD dev-ID %*pE HW-rev %d.%d SW-rev %d.%d quirks 0x%04x\n",
+ aux->name, device_name,
+ (int)sizeof(ident->oui), ident->oui,
+ (int)strnlen(ident->device_id, sizeof(ident->device_id)), ident->device_id,
+ ident->hw_rev >> 4, ident->hw_rev & 0xf,
+ ident->sw_major_rev, ident->sw_minor_rev,
+ desc->quirks);
+}
+
/**
* drm_dp_read_desc - read sink/branch descriptor from DPCD
* @aux: DisplayPort AUX channel
@@ -2306,28 +2492,49 @@ int drm_dp_read_desc(struct drm_dp_aux *aux, struct drm_dp_desc *desc,
{
struct drm_dp_dpcd_ident *ident = &desc->ident;
unsigned int offset = is_branch ? DP_BRANCH_OUI : DP_SINK_OUI;
- int ret, dev_id_len;
+ int ret;
- ret = drm_dp_dpcd_read(aux, offset, ident, sizeof(*ident));
+ ret = drm_dp_read_ident(aux, offset, ident);
if (ret < 0)
return ret;
desc->quirks = drm_dp_get_quirks(ident, is_branch);
- dev_id_len = strnlen(ident->device_id, sizeof(ident->device_id));
-
- drm_dbg_kms(aux->drm_dev,
- "%s: DP %s: OUI %*phD dev-ID %*pE HW-rev %d.%d SW-rev %d.%d quirks 0x%04x\n",
- aux->name, is_branch ? "branch" : "sink",
- (int)sizeof(ident->oui), ident->oui, dev_id_len,
- ident->device_id, ident->hw_rev >> 4, ident->hw_rev & 0xf,
- ident->sw_major_rev, ident->sw_minor_rev, desc->quirks);
+ drm_dp_dump_desc(aux, is_branch ? "DP branch" : "DP sink", desc);
return 0;
}
EXPORT_SYMBOL(drm_dp_read_desc);
/**
+ * drm_dp_dump_lttpr_desc - read and dump the DPCD descriptor for an LTTPR PHY
+ * @aux: DisplayPort AUX channel
+ * @dp_phy: LTTPR PHY instance
+ *
+ * Read the DPCD LTTPR PHY descriptor for @dp_phy and print a debug message
+ * with its details to dmesg.
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+int drm_dp_dump_lttpr_desc(struct drm_dp_aux *aux, enum drm_dp_phy dp_phy)
+{
+ struct drm_dp_desc desc = {};
+ int ret;
+
+ if (drm_WARN_ON(aux->drm_dev, dp_phy < DP_PHY_LTTPR1 || dp_phy > DP_MAX_LTTPR_COUNT))
+ return -EINVAL;
+
+ ret = drm_dp_read_ident(aux, DP_OUI_PHY_REPEATER(dp_phy), &desc.ident);
+ if (ret < 0)
+ return ret;
+
+ drm_dp_dump_desc(aux, drm_dp_phy_name(dp_phy), &desc);
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_dp_dump_lttpr_desc);
+
+/**
* drm_dp_dsc_sink_bpp_incr() - Get bits per pixel increment
* @dsc_dpcd: DSC capabilities from DPCD
*
@@ -2337,7 +2544,7 @@ u8 drm_dp_dsc_sink_bpp_incr(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE])
{
u8 bpp_increment_dpcd = dsc_dpcd[DP_DSC_BITS_PER_PIXEL_INC - DP_DSC_SUPPORT];
- switch (bpp_increment_dpcd) {
+ switch (bpp_increment_dpcd & DP_DSC_BITS_PER_PIXEL_MASK) {
case DP_DSC_BITS_PER_PIXEL_1_16:
return 16;
case DP_DSC_BITS_PER_PIXEL_1_8:
@@ -2897,25 +3104,156 @@ static const char *dp_content_type_get_name(enum dp_content_type content_type)
}
}
-void drm_dp_vsc_sdp_log(const char *level, struct device *dev,
- const struct drm_dp_vsc_sdp *vsc)
+void drm_dp_vsc_sdp_log(struct drm_printer *p, const struct drm_dp_vsc_sdp *vsc)
{
-#define DP_SDP_LOG(fmt, ...) dev_printk(level, dev, fmt, ##__VA_ARGS__)
- DP_SDP_LOG("DP SDP: %s, revision %u, length %u\n", "VSC",
+ drm_printf(p, "DP SDP: VSC, revision %u, length %u\n",
vsc->revision, vsc->length);
- DP_SDP_LOG(" pixelformat: %s\n",
+ drm_printf(p, " pixelformat: %s\n",
dp_pixelformat_get_name(vsc->pixelformat));
- DP_SDP_LOG(" colorimetry: %s\n",
+ drm_printf(p, " colorimetry: %s\n",
dp_colorimetry_get_name(vsc->pixelformat, vsc->colorimetry));
- DP_SDP_LOG(" bpc: %u\n", vsc->bpc);
- DP_SDP_LOG(" dynamic range: %s\n",
+ drm_printf(p, " bpc: %u\n", vsc->bpc);
+ drm_printf(p, " dynamic range: %s\n",
dp_dynamic_range_get_name(vsc->dynamic_range));
- DP_SDP_LOG(" content type: %s\n",
+ drm_printf(p, " content type: %s\n",
dp_content_type_get_name(vsc->content_type));
-#undef DP_SDP_LOG
}
EXPORT_SYMBOL(drm_dp_vsc_sdp_log);
+void drm_dp_as_sdp_log(struct drm_printer *p, const struct drm_dp_as_sdp *as_sdp)
+{
+ drm_printf(p, "DP SDP: AS_SDP, revision %u, length %u\n",
+ as_sdp->revision, as_sdp->length);
+ drm_printf(p, " vtotal: %d\n", as_sdp->vtotal);
+ drm_printf(p, " target_rr: %d\n", as_sdp->target_rr);
+ drm_printf(p, " duration_incr_ms: %d\n", as_sdp->duration_incr_ms);
+ drm_printf(p, " duration_decr_ms: %d\n", as_sdp->duration_decr_ms);
+ drm_printf(p, " operation_mode: %d\n", as_sdp->mode);
+}
+EXPORT_SYMBOL(drm_dp_as_sdp_log);
+
+/**
+ * drm_dp_as_sdp_supported() - check if adaptive sync sdp is supported
+ * @aux: DisplayPort AUX channel
+ * @dpcd: DisplayPort configuration data
+ *
+ * Returns true if adaptive sync sdp is supported, else returns false
+ */
+bool drm_dp_as_sdp_supported(struct drm_dp_aux *aux, const u8 dpcd[DP_RECEIVER_CAP_SIZE])
+{
+ u8 rx_feature;
+
+ if (dpcd[DP_DPCD_REV] < DP_DPCD_REV_13)
+ return false;
+
+ if (drm_dp_dpcd_readb(aux, DP_DPRX_FEATURE_ENUMERATION_LIST_CONT_1,
+ &rx_feature) != 1) {
+ drm_dbg_dp(aux->drm_dev,
+ "Failed to read DP_DPRX_FEATURE_ENUMERATION_LIST_CONT_1\n");
+ return false;
+ }
+
+ return (rx_feature & DP_ADAPTIVE_SYNC_SDP_SUPPORTED);
+}
+EXPORT_SYMBOL(drm_dp_as_sdp_supported);
+
+/**
+ * drm_dp_vsc_sdp_supported() - check if vsc sdp is supported
+ * @aux: DisplayPort AUX channel
+ * @dpcd: DisplayPort configuration data
+ *
+ * Returns true if vsc sdp is supported, else returns false
+ */
+bool drm_dp_vsc_sdp_supported(struct drm_dp_aux *aux, const u8 dpcd[DP_RECEIVER_CAP_SIZE])
+{
+ u8 rx_feature;
+
+ if (dpcd[DP_DPCD_REV] < DP_DPCD_REV_13)
+ return false;
+
+ if (drm_dp_dpcd_readb(aux, DP_DPRX_FEATURE_ENUMERATION_LIST, &rx_feature) != 1) {
+ drm_dbg_dp(aux->drm_dev, "failed to read DP_DPRX_FEATURE_ENUMERATION_LIST\n");
+ return false;
+ }
+
+ return (rx_feature & DP_VSC_SDP_EXT_FOR_COLORIMETRY_SUPPORTED);
+}
+EXPORT_SYMBOL(drm_dp_vsc_sdp_supported);
+
+/**
+ * drm_dp_vsc_sdp_pack() - pack a given vsc sdp into generic dp_sdp
+ * @vsc: vsc sdp initialized according to its purpose as defined in
+ * table 2-118 - table 2-120 in DP 1.4a specification
+ * @sdp: valid handle to the generic dp_sdp which will be packed
+ *
+ * Returns length of sdp on success and error code on failure
+ */
+ssize_t drm_dp_vsc_sdp_pack(const struct drm_dp_vsc_sdp *vsc,
+ struct dp_sdp *sdp)
+{
+ size_t length = sizeof(struct dp_sdp);
+
+ memset(sdp, 0, sizeof(struct dp_sdp));
+
+ /*
+ * Prepare VSC Header for SU as per DP 1.4a spec, Table 2-119
+ * VSC SDP Header Bytes
+ */
+ sdp->sdp_header.HB0 = 0; /* Secondary-Data Packet ID = 0 */
+ sdp->sdp_header.HB1 = vsc->sdp_type; /* Secondary-data Packet Type */
+ sdp->sdp_header.HB2 = vsc->revision; /* Revision Number */
+ sdp->sdp_header.HB3 = vsc->length; /* Number of Valid Data Bytes */
+
+ if (vsc->revision == 0x6) {
+ sdp->db[0] = 1;
+ sdp->db[3] = 1;
+ }
+
+ /*
+ * Revision 0x5 and revision 0x7 supports Pixel Encoding/Colorimetry
+ * Format as per DP 1.4a spec and DP 2.0 respectively.
+ */
+ if (!(vsc->revision == 0x5 || vsc->revision == 0x7))
+ goto out;
+
+ /* VSC SDP Payload for DB16 through DB18 */
+ /* Pixel Encoding and Colorimetry Formats */
+ sdp->db[16] = (vsc->pixelformat & 0xf) << 4; /* DB16[7:4] */
+ sdp->db[16] |= vsc->colorimetry & 0xf; /* DB16[3:0] */
+
+ switch (vsc->bpc) {
+ case 6:
+ /* 6bpc: 0x0 */
+ break;
+ case 8:
+ sdp->db[17] = 0x1; /* DB17[3:0] */
+ break;
+ case 10:
+ sdp->db[17] = 0x2;
+ break;
+ case 12:
+ sdp->db[17] = 0x3;
+ break;
+ case 16:
+ sdp->db[17] = 0x4;
+ break;
+ default:
+ WARN(1, "Missing case %d\n", vsc->bpc);
+ return -EINVAL;
+ }
+
+ /* Dynamic Range and Component Bit Depth */
+ if (vsc->dynamic_range == DP_DYNAMIC_RANGE_CTA)
+ sdp->db[17] |= 0x80; /* DB17[7] */
+
+ /* Content Type */
+ sdp->db[18] = vsc->content_type & 0x7;
+
+out:
+ return length;
+}
+EXPORT_SYMBOL(drm_dp_vsc_sdp_pack);
+
/**
* drm_dp_get_pcon_max_frl_bw() - maximum frl supported by PCON
* @dpcd: DisplayPort configuration data
@@ -3982,6 +4320,13 @@ int drm_dp_bw_overhead(int lane_count, int hactive,
u32 overhead = 1000000;
int symbol_cycles;
+ if (lane_count == 0 || hactive == 0 || bpp_x16 == 0) {
+ DRM_DEBUG_KMS("Invalid BW overhead params: lane_count %d, hactive %d, bpp_x16 " FXP_Q4_FMT "\n",
+ lane_count, hactive,
+ FXP_Q4_ARGS(bpp_x16));
+ return 0;
+ }
+
/*
* DP Standard v2.1 2.6.4.1
* SSC downspread and ref clock variation margin:
@@ -4058,3 +4403,33 @@ int drm_dp_bw_channel_coding_efficiency(bool is_uhbr)
return 800000;
}
EXPORT_SYMBOL(drm_dp_bw_channel_coding_efficiency);
+
+/**
+ * drm_dp_max_dprx_data_rate - Get the max data bandwidth of a DPRX sink
+ * @max_link_rate: max DPRX link rate in 10kbps units
+ * @max_lanes: max DPRX lane count
+ *
+ * Given a link rate and lanes, get the data bandwidth.
+ *
+ * Data bandwidth is the actual payload rate, which depends on the data
+ * bandwidth efficiency and the link rate.
+ *
+ * Note that protocol layers above the DPRX link level considered here can
+ * further limit the maximum data rate. Such layers are the MST topology (with
+ * limits on the link between the source and first branch device as well as on
+ * the whole MST path until the DPRX link) and (Thunderbolt) DP tunnels -
+ * which in turn can encapsulate an MST link with its own limit - with each
+ * SST or MST encapsulated tunnel sharing the BW of a tunnel group.
+ *
+ * Returns the maximum data rate in kBps units.
+ */
+int drm_dp_max_dprx_data_rate(int max_link_rate, int max_lanes)
+{
+ int ch_coding_efficiency =
+ drm_dp_bw_channel_coding_efficiency(drm_dp_is_uhbr_rate(max_link_rate));
+
+ return DIV_ROUND_DOWN_ULL(mul_u32_u32(max_link_rate * 10 * max_lanes,
+ ch_coding_efficiency),
+ 1000000 * 8);
+}
+EXPORT_SYMBOL(drm_dp_max_dprx_data_rate);
diff --git a/drivers/gpu/drm/display/drm_dp_helper_internal.h b/drivers/gpu/drm/display/drm_dp_helper_internal.h
index 8917fc3af9ec..737949a2820f 100644
--- a/drivers/gpu/drm/display/drm_dp_helper_internal.h
+++ b/drivers/gpu/drm/display/drm_dp_helper_internal.h
@@ -5,7 +5,7 @@
struct drm_dp_aux;
-#ifdef CONFIG_DRM_DP_AUX_CHARDEV
+#ifdef CONFIG_DRM_DISPLAY_DP_AUX_CHARDEV
int drm_dp_aux_dev_init(void);
void drm_dp_aux_dev_exit(void);
int drm_dp_aux_register_devnode(struct drm_dp_aux *aux);
diff --git a/drivers/gpu/drm/display/drm_dp_mst_topology.c b/drivers/gpu/drm/display/drm_dp_mst_topology.c
index f7c6b60629c2..6d09bef671da 100644
--- a/drivers/gpu/drm/display/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/display/drm_dp_mst_topology.c
@@ -29,7 +29,6 @@
#include <linux/random.h>
#include <linux/sched.h>
#include <linux/seq_file.h>
-#include <linux/iopoll.h>
#if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
#include <linux/stacktrace.h>
@@ -68,9 +67,6 @@ static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
static void drm_dp_mst_topology_put_port(struct drm_dp_mst_port *port);
-static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr,
- int id, u8 start_slot, u8 num_slots);
-
static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_port *port,
int offset, int size, u8 *bytes);
@@ -89,7 +85,7 @@ static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_branch *mstb,
struct drm_dp_mst_port *port);
static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
- u8 *guid);
+ guid_t *guid);
static int drm_dp_mst_register_i2c_bus(struct drm_dp_mst_port *port);
static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_mst_port *port);
@@ -320,6 +316,9 @@ static bool drm_dp_decode_sideband_msg_hdr(const struct drm_dp_mst_topology_mgr
hdr->broadcast = (buf[idx] >> 7) & 0x1;
hdr->path_msg = (buf[idx] >> 6) & 0x1;
hdr->msg_len = buf[idx] & 0x3f;
+ if (hdr->msg_len < 1) /* min space for body CRC */
+ return false;
+
idx++;
hdr->somt = (buf[idx] >> 7) & 0x1;
hdr->eomt = (buf[idx] >> 6) & 0x1;
@@ -801,7 +800,7 @@ static bool drm_dp_sideband_parse_link_address(const struct drm_dp_mst_topology_
int idx = 1;
int i;
- memcpy(repmsg->u.link_addr.guid, &raw->msg[idx], 16);
+ import_guid(&repmsg->u.link_addr.guid, &raw->msg[idx]);
idx += 16;
repmsg->u.link_addr.nports = raw->msg[idx] & 0xf;
idx++;
@@ -829,7 +828,7 @@ static bool drm_dp_sideband_parse_link_address(const struct drm_dp_mst_topology_
idx++;
if (idx > raw->curlen)
goto fail_len;
- memcpy(repmsg->u.link_addr.ports[i].peer_guid, &raw->msg[idx], 16);
+ import_guid(&repmsg->u.link_addr.ports[i].peer_guid, &raw->msg[idx]);
idx += 16;
if (idx > raw->curlen)
goto fail_len;
@@ -1029,7 +1028,7 @@ static bool drm_dp_sideband_parse_reply(const struct drm_dp_mst_topology_mgr *mg
msg->req_type = (raw->msg[0] & 0x7f);
if (msg->reply_type == DP_SIDEBAND_REPLY_NAK) {
- memcpy(msg->u.nak.guid, &raw->msg[1], 16);
+ import_guid(&msg->u.nak.guid, &raw->msg[1]);
msg->u.nak.reason = raw->msg[17];
msg->u.nak.nak_data = raw->msg[18];
return false;
@@ -1078,7 +1077,7 @@ drm_dp_sideband_parse_connection_status_notify(const struct drm_dp_mst_topology_
if (idx > raw->curlen)
goto fail_len;
- memcpy(msg->u.conn_stat.guid, &raw->msg[idx], 16);
+ import_guid(&msg->u.conn_stat.guid, &raw->msg[idx]);
idx += 16;
if (idx > raw->curlen)
goto fail_len;
@@ -1107,7 +1106,7 @@ static bool drm_dp_sideband_parse_resource_status_notify(const struct drm_dp_mst
if (idx > raw->curlen)
goto fail_len;
- memcpy(msg->u.resource_stat.guid, &raw->msg[idx], 16);
+ import_guid(&msg->u.resource_stat.guid, &raw->msg[idx]);
idx += 16;
if (idx > raw->curlen)
goto fail_len;
@@ -1306,7 +1305,8 @@ static int drm_dp_mst_wait_tx_reply(struct drm_dp_mst_branch *mstb,
}
out:
if (unlikely(ret == -EIO) && drm_debug_enabled(DRM_UT_DP)) {
- struct drm_printer p = drm_debug_printer(DBG_PREFIX);
+ struct drm_printer p = drm_dbg_printer(mgr->dev, DRM_UT_DP,
+ DBG_PREFIX);
drm_dp_mst_dump_sideband_msg_tx(&p, txmsg);
}
@@ -1593,10 +1593,11 @@ topology_ref_type_to_str(enum drm_dp_mst_topology_ref_type type)
}
static void
-__dump_topology_ref_history(struct drm_dp_mst_topology_ref_history *history,
+__dump_topology_ref_history(struct drm_device *drm,
+ struct drm_dp_mst_topology_ref_history *history,
void *ptr, const char *type_str)
{
- struct drm_printer p = drm_debug_printer(DBG_PREFIX);
+ struct drm_printer p = drm_dbg_printer(drm, DRM_UT_DP, DBG_PREFIX);
char *buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
int i;
@@ -1638,15 +1639,15 @@ out:
static __always_inline void
drm_dp_mst_dump_mstb_topology_history(struct drm_dp_mst_branch *mstb)
{
- __dump_topology_ref_history(&mstb->topology_ref_history, mstb,
- "MSTB");
+ __dump_topology_ref_history(mstb->mgr->dev, &mstb->topology_ref_history,
+ mstb, "MSTB");
}
static __always_inline void
drm_dp_mst_dump_port_topology_history(struct drm_dp_mst_port *port)
{
- __dump_topology_ref_history(&port->topology_ref_history, port,
- "Port");
+ __dump_topology_ref_history(port->mgr->dev, &port->topology_ref_history,
+ port, "Port");
}
static __always_inline void
@@ -2172,20 +2173,24 @@ ssize_t drm_dp_mst_dpcd_write(struct drm_dp_aux *aux,
offset, size, buffer);
}
-static int drm_dp_check_mstb_guid(struct drm_dp_mst_branch *mstb, u8 *guid)
+static int drm_dp_check_mstb_guid(struct drm_dp_mst_branch *mstb, guid_t *guid)
{
int ret = 0;
- memcpy(mstb->guid, guid, 16);
+ guid_copy(&mstb->guid, guid);
+
+ if (!drm_dp_validate_guid(mstb->mgr, &mstb->guid)) {
+ u8 buf[UUID_SIZE];
+
+ export_guid(buf, &mstb->guid);
- if (!drm_dp_validate_guid(mstb->mgr, mstb->guid)) {
if (mstb->port_parent) {
ret = drm_dp_send_dpcd_write(mstb->mgr,
mstb->port_parent,
- DP_GUID, 16, mstb->guid);
+ DP_GUID, sizeof(buf), buf);
} else {
ret = drm_dp_dpcd_write(mstb->mgr->aux,
- DP_GUID, mstb->guid, 16);
+ DP_GUID, buf, sizeof(buf));
}
}
@@ -2272,11 +2277,11 @@ drm_dp_mst_port_add_connector(struct drm_dp_mst_branch *mstb,
if (port->pdt != DP_PEER_DEVICE_NONE &&
drm_dp_mst_is_end_device(port->pdt, port->mcs) &&
- port->port_num >= DP_MST_LOGICAL_PORT_0)
+ drm_dp_mst_port_is_logical(port))
port->cached_edid = drm_edid_read_ddc(port->connector,
&port->aux.ddc);
- drm_connector_register(port->connector);
+ drm_connector_dynamic_register(port->connector);
return;
error:
@@ -2337,7 +2342,7 @@ drm_dp_mst_handle_link_address_port(struct drm_dp_mst_branch *mstb,
{
struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
struct drm_dp_mst_port *port;
- int old_ddps = 0, ret;
+ int ret;
u8 new_pdt = DP_PEER_DEVICE_NONE;
bool new_mcs = 0;
bool created = false, send_link_addr = false, changed = false;
@@ -2370,7 +2375,6 @@ drm_dp_mst_handle_link_address_port(struct drm_dp_mst_branch *mstb,
*/
drm_modeset_lock(&mgr->base.lock, NULL);
- old_ddps = port->ddps;
changed = port->ddps != port_msg->ddps ||
(port->ddps &&
(port->ldps != port_msg->legacy_device_plug_status ||
@@ -2405,15 +2409,13 @@ drm_dp_mst_handle_link_address_port(struct drm_dp_mst_branch *mstb,
* Reprobe PBN caps on both hotplug, and when re-probing the link
* for our parent mstb
*/
- if (old_ddps != port->ddps || !created) {
- if (port->ddps && !port->input) {
- ret = drm_dp_send_enum_path_resources(mgr, mstb,
- port);
- if (ret == 1)
- changed = true;
- } else {
- port->full_pbn = 0;
- }
+ if (port->ddps && !port->input) {
+ ret = drm_dp_send_enum_path_resources(mgr, mstb,
+ port);
+ if (ret == 1)
+ changed = true;
+ } else {
+ port->full_pbn = 0;
}
ret = drm_dp_port_set_pdt(port, new_pdt, new_mcs);
@@ -2568,9 +2570,9 @@ out:
return mstb;
}
-static struct drm_dp_mst_branch *get_mst_branch_device_by_guid_helper(
- struct drm_dp_mst_branch *mstb,
- const uint8_t *guid)
+static struct drm_dp_mst_branch *
+get_mst_branch_device_by_guid_helper(struct drm_dp_mst_branch *mstb,
+ const guid_t *guid)
{
struct drm_dp_mst_branch *found_mstb;
struct drm_dp_mst_port *port;
@@ -2578,10 +2580,9 @@ static struct drm_dp_mst_branch *get_mst_branch_device_by_guid_helper(
if (!mstb)
return NULL;
- if (memcmp(mstb->guid, guid, 16) == 0)
+ if (guid_equal(&mstb->guid, guid))
return mstb;
-
list_for_each_entry(port, &mstb->ports, next) {
found_mstb = get_mst_branch_device_by_guid_helper(port->mstb, guid);
@@ -2594,7 +2595,7 @@ static struct drm_dp_mst_branch *get_mst_branch_device_by_guid_helper(
static struct drm_dp_mst_branch *
drm_dp_get_mst_branch_device_by_guid(struct drm_dp_mst_topology_mgr *mgr,
- const uint8_t *guid)
+ const guid_t *guid)
{
struct drm_dp_mst_branch *mstb;
int ret;
@@ -2690,18 +2691,18 @@ static void drm_dp_mst_link_probe_work(struct work_struct *work)
drm_kms_helper_hotplug_event(dev);
}
-static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
- u8 *guid)
+static void drm_dp_mst_queue_probe_work(struct drm_dp_mst_topology_mgr *mgr)
{
- u64 salt;
+ queue_work(system_long_wq, &mgr->work);
+}
- if (memchr_inv(guid, 0, 16))
+static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
+ guid_t *guid)
+{
+ if (!guid_is_null(guid))
return true;
- salt = get_jiffies_64();
-
- memcpy(&guid[0], &salt, sizeof(u64));
- memcpy(&guid[8], &salt, sizeof(u64));
+ guid_gen(guid);
return false;
}
@@ -2824,7 +2825,9 @@ static int process_single_tx_qlock(struct drm_dp_mst_topology_mgr *mgr,
ret = drm_dp_send_sideband_msg(mgr, up, chunk, idx);
if (ret) {
if (drm_debug_enabled(DRM_UT_DP)) {
- struct drm_printer p = drm_debug_printer(DBG_PREFIX);
+ struct drm_printer p = drm_dbg_printer(mgr->dev,
+ DRM_UT_DP,
+ DBG_PREFIX);
drm_printf(&p, "sideband msg failed to send\n");
drm_dp_mst_dump_sideband_msg_tx(&p, txmsg);
@@ -2869,7 +2872,8 @@ static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr,
list_add_tail(&txmsg->next, &mgr->tx_msg_downq);
if (drm_debug_enabled(DRM_UT_DP)) {
- struct drm_printer p = drm_debug_printer(DBG_PREFIX);
+ struct drm_printer p = drm_dbg_printer(mgr->dev, DRM_UT_DP,
+ DBG_PREFIX);
drm_dp_mst_dump_sideband_msg_tx(&p, txmsg);
}
@@ -2924,7 +2928,7 @@ static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
/* FIXME: Actually do some real error handling here */
ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
- if (ret <= 0) {
+ if (ret < 0) {
drm_err(mgr->dev, "Sending link address failed with %d\n", ret);
goto out;
}
@@ -2938,7 +2942,7 @@ static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
drm_dbg_kms(mgr->dev, "link address reply: %d\n", reply->nports);
drm_dp_dump_link_address(mgr, reply);
- ret = drm_dp_check_mstb_guid(mstb, reply->guid);
+ ret = drm_dp_check_mstb_guid(mstb, &reply->guid);
if (ret) {
char buf[64];
@@ -2976,7 +2980,7 @@ static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
mutex_unlock(&mgr->lock);
out:
- if (ret <= 0)
+ if (ret < 0)
mstb->link_address_sent = false;
kfree(txmsg);
return ret < 0 ? ret : changed;
@@ -3259,7 +3263,7 @@ EXPORT_SYMBOL(drm_dp_send_query_stream_enc_status);
static int drm_dp_create_payload_at_dfp(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_atomic_payload *payload)
{
- return drm_dp_dpcd_write_payload(mgr, payload->vcpi, payload->vc_start_slot,
+ return drm_dp_dpcd_write_payload(mgr->aux, payload->vcpi, payload->vc_start_slot,
payload->time_slots);
}
@@ -3290,7 +3294,7 @@ static void drm_dp_destroy_payload_at_remote_and_dfp(struct drm_dp_mst_topology_
}
if (payload->payload_allocation_status == DRM_DP_MST_PAYLOAD_ALLOCATION_DFP)
- drm_dp_dpcd_write_payload(mgr, payload->vcpi, payload->vc_start_slot, 0);
+ drm_dp_dpcd_write_payload(mgr->aux, payload->vcpi, payload->vc_start_slot, 0);
}
/**
@@ -3416,7 +3420,6 @@ EXPORT_SYMBOL(drm_dp_remove_payload_part2);
/**
* drm_dp_add_payload_part2() - Execute payload update part 2
* @mgr: Manager to use.
- * @state: The global atomic state
* @payload: The payload to update
*
* If @payload was successfully assigned a starting time slot by drm_dp_add_payload_part1(), this
@@ -3425,14 +3428,13 @@ EXPORT_SYMBOL(drm_dp_remove_payload_part2);
* Returns: 0 on success, negative error code on failure.
*/
int drm_dp_add_payload_part2(struct drm_dp_mst_topology_mgr *mgr,
- struct drm_atomic_state *state,
struct drm_dp_mst_atomic_payload *payload)
{
int ret = 0;
/* Skip failed payloads */
if (payload->payload_allocation_status != DRM_DP_MST_PAYLOAD_ALLOCATION_DFP) {
- drm_dbg_kms(state->dev, "Part 1 of payload creation for %s failed, skipping part 2\n",
+ drm_dbg_kms(mgr->dev, "Part 1 of payload creation for %s failed, skipping part 2\n",
payload->port->connector->name);
return -EIO;
}
@@ -3570,8 +3572,7 @@ static int drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr *mgr,
}
/**
- * drm_dp_get_vc_payload_bw - get the VC payload BW for an MST link
- * @mgr: The &drm_dp_mst_topology_mgr to use
+ * drm_dp_get_vc_payload_bw - get the VC payload BW for an MTP link
* @link_rate: link rate in 10kbits/s units
* @link_lane_count: lane count
*
@@ -3582,17 +3583,12 @@ static int drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr *mgr,
*
* Returns the BW / timeslot value in 20.12 fixed point format.
*/
-fixed20_12 drm_dp_get_vc_payload_bw(const struct drm_dp_mst_topology_mgr *mgr,
- int link_rate, int link_lane_count)
+fixed20_12 drm_dp_get_vc_payload_bw(int link_rate, int link_lane_count)
{
int ch_coding_efficiency =
drm_dp_bw_channel_coding_efficiency(drm_dp_is_uhbr_rate(link_rate));
fixed20_12 ret;
- if (link_rate == 0 || link_lane_count == 0)
- drm_dbg_kms(mgr->dev, "invalid link rate/lane count: (%d / %d)\n",
- link_rate, link_lane_count);
-
/* See DP v2.0 2.6.4.2, 2.7.6.3 VCPayload_Bandwidth_for_OneTimeSlotPer_MTP_Allocation */
ret.full = DIV_ROUND_DOWN_ULL(mul_u32_u32(link_rate * link_lane_count,
ch_coding_efficiency),
@@ -3603,24 +3599,30 @@ fixed20_12 drm_dp_get_vc_payload_bw(const struct drm_dp_mst_topology_mgr *mgr,
EXPORT_SYMBOL(drm_dp_get_vc_payload_bw);
/**
- * drm_dp_read_mst_cap() - check whether or not a sink supports MST
+ * drm_dp_read_mst_cap() - Read the sink's MST mode capability
* @aux: The DP AUX channel to use
* @dpcd: A cached copy of the DPCD capabilities for this sink
*
- * Returns: %True if the sink supports MST, %false otherwise
+ * Returns: enum drm_dp_mst_mode to indicate MST mode capability
*/
-bool drm_dp_read_mst_cap(struct drm_dp_aux *aux,
- const u8 dpcd[DP_RECEIVER_CAP_SIZE])
+enum drm_dp_mst_mode drm_dp_read_mst_cap(struct drm_dp_aux *aux,
+ const u8 dpcd[DP_RECEIVER_CAP_SIZE])
{
u8 mstm_cap;
if (dpcd[DP_DPCD_REV] < DP_DPCD_REV_12)
- return false;
+ return DRM_DP_SST;
if (drm_dp_dpcd_readb(aux, DP_MSTM_CAP, &mstm_cap) != 1)
- return false;
+ return DRM_DP_SST;
+
+ if (mstm_cap & DP_MST_CAP)
+ return DRM_DP_MST;
- return mstm_cap & DP_MST_CAP;
+ if (mstm_cap & DP_SINGLE_STREAM_SIDEBAND_MSG)
+ return DRM_DP_SST_SIDEBAND_MSG;
+
+ return DRM_DP_SST;
}
EXPORT_SYMBOL(drm_dp_read_mst_cap);
@@ -3674,9 +3676,9 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
goto out_unlock;
/* Write reset payload */
- drm_dp_dpcd_write_payload(mgr, 0, 0, 0x3f);
+ drm_dp_dpcd_clear_payload(mgr->aux);
- queue_work(system_long_wq, &mgr->work);
+ drm_dp_mst_queue_probe_work(mgr);
ret = 0;
} else {
@@ -3688,8 +3690,7 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
ret = 0;
mgr->payload_id_table_cleared = false;
- memset(&mgr->down_rep_recv, 0, sizeof(mgr->down_rep_recv));
- memset(&mgr->up_req_recv, 0, sizeof(mgr->up_req_recv));
+ mgr->reset_rx_state = true;
}
out_unlock:
@@ -3715,6 +3716,33 @@ drm_dp_mst_topology_mgr_invalidate_mstb(struct drm_dp_mst_branch *mstb)
}
/**
+ * drm_dp_mst_topology_queue_probe - Queue a topology probe
+ * @mgr: manager to probe
+ *
+ * Queue a work to probe the MST topology. Driver's should call this only to
+ * sync the topology's HW->SW state after the MST link's parameters have
+ * changed in a way the state could've become out-of-sync. This is the case
+ * for instance when the link rate between the source and first downstream
+ * branch device has switched between UHBR and non-UHBR rates. Except of those
+ * cases - for instance when a sink gets plugged/unplugged to a port - the SW
+ * state will get updated automatically via MST UP message notifications.
+ */
+void drm_dp_mst_topology_queue_probe(struct drm_dp_mst_topology_mgr *mgr)
+{
+ mutex_lock(&mgr->lock);
+
+ if (drm_WARN_ON(mgr->dev, !mgr->mst_state || !mgr->mst_primary))
+ goto out_unlock;
+
+ drm_dp_mst_topology_mgr_invalidate_mstb(mgr->mst_primary);
+ drm_dp_mst_queue_probe_work(mgr);
+
+out_unlock:
+ mutex_unlock(&mgr->lock);
+}
+EXPORT_SYMBOL(drm_dp_mst_topology_queue_probe);
+
+/**
* drm_dp_mst_topology_mgr_suspend() - suspend the MST manager
* @mgr: manager to suspend
*
@@ -3761,8 +3789,9 @@ EXPORT_SYMBOL(drm_dp_mst_topology_mgr_suspend);
int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr,
bool sync)
{
+ u8 buf[UUID_SIZE];
+ guid_t guid;
int ret;
- u8 guid[16];
mutex_lock(&mgr->lock);
if (!mgr->mst_primary)
@@ -3783,13 +3812,15 @@ int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr,
}
/* Some hubs forget their guids after they resume */
- ret = drm_dp_dpcd_read(mgr->aux, DP_GUID, guid, 16);
- if (ret != 16) {
+ ret = drm_dp_dpcd_read(mgr->aux, DP_GUID, buf, sizeof(buf));
+ if (ret != sizeof(buf)) {
drm_dbg_kms(mgr->dev, "dpcd read failed - undocked during suspend?\n");
goto out_fail;
}
- ret = drm_dp_check_mstb_guid(mgr->mst_primary, guid);
+ import_guid(&guid, buf);
+
+ ret = drm_dp_check_mstb_guid(mgr->mst_primary, &guid);
if (ret) {
drm_dbg_kms(mgr->dev, "check mstb failed - undocked during suspend?\n");
goto out_fail;
@@ -3800,7 +3831,7 @@ int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr,
* state of our in-memory topology back into sync with reality. So,
* restart the probing process as if we're probing a new hub
*/
- queue_work(system_long_wq, &mgr->work);
+ drm_dp_mst_queue_probe_work(mgr);
mutex_unlock(&mgr->lock);
if (sync) {
@@ -3817,6 +3848,11 @@ out_fail:
}
EXPORT_SYMBOL(drm_dp_mst_topology_mgr_resume);
+static void reset_msg_rx_state(struct drm_dp_sideband_msg_rx *msg)
+{
+ memset(msg, 0, sizeof(*msg));
+}
+
static bool
drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up,
struct drm_dp_mst_branch **mstb)
@@ -3895,6 +3931,34 @@ drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up,
return true;
}
+static int get_msg_request_type(u8 data)
+{
+ return data & 0x7f;
+}
+
+static bool verify_rx_request_type(struct drm_dp_mst_topology_mgr *mgr,
+ const struct drm_dp_sideband_msg_tx *txmsg,
+ const struct drm_dp_sideband_msg_rx *rxmsg)
+{
+ const struct drm_dp_sideband_msg_hdr *hdr = &rxmsg->initial_hdr;
+ const struct drm_dp_mst_branch *mstb = txmsg->dst;
+ int tx_req_type = get_msg_request_type(txmsg->msg[0]);
+ int rx_req_type = get_msg_request_type(rxmsg->msg[0]);
+ char rad_str[64];
+
+ if (tx_req_type == rx_req_type)
+ return true;
+
+ drm_dp_mst_rad_to_str(mstb->rad, mstb->lct, rad_str, sizeof(rad_str));
+ drm_dbg_kms(mgr->dev,
+ "Got unexpected MST reply, mstb: %p seqno: %d lct: %d rad: %s rx_req_type: %s (%02x) != tx_req_type: %s (%02x)\n",
+ mstb, hdr->seqno, mstb->lct, rad_str,
+ drm_dp_mst_req_type_str(rx_req_type), rx_req_type,
+ drm_dp_mst_req_type_str(tx_req_type), tx_req_type);
+
+ return false;
+}
+
static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
{
struct drm_dp_sideband_msg_tx *txmsg;
@@ -3910,9 +3974,9 @@ static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
/* find the message */
mutex_lock(&mgr->qlock);
+
txmsg = list_first_entry_or_null(&mgr->tx_msg_downq,
struct drm_dp_sideband_msg_tx, next);
- mutex_unlock(&mgr->qlock);
/* Were we actually expecting a response, and from this mstb? */
if (!txmsg || txmsg->dst != mstb) {
@@ -3921,6 +3985,15 @@ static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
hdr = &msg->initial_hdr;
drm_dbg_kms(mgr->dev, "Got MST reply with no msg %p %d %d %02x %02x\n",
mstb, hdr->seqno, hdr->lct, hdr->rad[0], msg->msg[0]);
+
+ mutex_unlock(&mgr->qlock);
+
+ goto out_clear_reply;
+ }
+
+ if (!verify_rx_request_type(mgr, txmsg, msg)) {
+ mutex_unlock(&mgr->qlock);
+
goto out_clear_reply;
}
@@ -3936,20 +4009,15 @@ static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
txmsg->reply.u.nak.nak_data);
}
- memset(msg, 0, sizeof(struct drm_dp_sideband_msg_rx));
- drm_dp_mst_topology_put_mstb(mstb);
-
- mutex_lock(&mgr->qlock);
txmsg->state = DRM_DP_SIDEBAND_TX_RX;
list_del(&txmsg->next);
+
mutex_unlock(&mgr->qlock);
wake_up_all(&mgr->tx_waitq);
- return 0;
-
out_clear_reply:
- memset(msg, 0, sizeof(struct drm_dp_sideband_msg_rx));
+ reset_msg_rx_state(msg);
out:
if (mstb)
drm_dp_mst_topology_put_mstb(mstb);
@@ -3957,6 +4025,22 @@ out:
return 0;
}
+static bool primary_mstb_probing_is_done(struct drm_dp_mst_topology_mgr *mgr)
+{
+ bool probing_done = false;
+
+ mutex_lock(&mgr->lock);
+
+ if (mgr->mst_primary && drm_dp_mst_topology_try_get_mstb(mgr->mst_primary)) {
+ probing_done = mgr->mst_primary->link_address_sent;
+ drm_dp_mst_topology_put_mstb(mgr->mst_primary);
+ }
+
+ mutex_unlock(&mgr->lock);
+
+ return probing_done;
+}
+
static inline bool
drm_dp_mst_process_up_req(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_pending_up_req *up_req)
@@ -3967,12 +4051,12 @@ drm_dp_mst_process_up_req(struct drm_dp_mst_topology_mgr *mgr,
bool hotplug = false, dowork = false;
if (hdr->broadcast) {
- const u8 *guid = NULL;
+ const guid_t *guid = NULL;
if (msg->req_type == DP_CONNECTION_STATUS_NOTIFY)
- guid = msg->u.conn_stat.guid;
+ guid = &msg->u.conn_stat.guid;
else if (msg->req_type == DP_RESOURCE_STATUS_NOTIFY)
- guid = msg->u.resource_stat.guid;
+ guid = &msg->u.resource_stat.guid;
if (guid)
mstb = drm_dp_get_mst_branch_device_by_guid(mgr, guid);
@@ -3987,8 +4071,12 @@ drm_dp_mst_process_up_req(struct drm_dp_mst_topology_mgr *mgr,
/* TODO: Add missing handler for DP_RESOURCE_STATUS_NOTIFY events */
if (msg->req_type == DP_CONNECTION_STATUS_NOTIFY) {
- dowork = drm_dp_mst_handle_conn_stat(mstb, &msg->u.conn_stat);
- hotplug = true;
+ if (!primary_mstb_probing_is_done(mgr)) {
+ drm_dbg_kms(mgr->dev, "Got CSN before finish topology probing. Skip it.\n");
+ } else {
+ dowork = drm_dp_mst_handle_conn_stat(mstb, &msg->u.conn_stat);
+ hotplug = true;
+ }
}
drm_dp_mst_topology_put_mstb(mstb);
@@ -4031,16 +4119,20 @@ static void drm_dp_mst_up_req_work(struct work_struct *work)
static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
{
struct drm_dp_pending_up_req *up_req;
+ struct drm_dp_mst_branch *mst_primary;
+ int ret = 0;
if (!drm_dp_get_one_sb_msg(mgr, true, NULL))
- goto out;
+ goto out_clear_reply;
if (!mgr->up_req_recv.have_eomt)
return 0;
up_req = kzalloc(sizeof(*up_req), GFP_KERNEL);
- if (!up_req)
- return -ENOMEM;
+ if (!up_req) {
+ ret = -ENOMEM;
+ goto out_clear_reply;
+ }
INIT_LIST_HEAD(&up_req->next);
@@ -4051,12 +4143,23 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
drm_dbg_kms(mgr->dev, "Received unknown up req type, ignoring: %x\n",
up_req->msg.req_type);
kfree(up_req);
- goto out;
+ goto out_clear_reply;
}
- drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, up_req->msg.req_type,
+ mutex_lock(&mgr->lock);
+ mst_primary = mgr->mst_primary;
+ if (!mst_primary || !drm_dp_mst_topology_try_get_mstb(mst_primary)) {
+ mutex_unlock(&mgr->lock);
+ kfree(up_req);
+ goto out_clear_reply;
+ }
+ mutex_unlock(&mgr->lock);
+
+ drm_dp_send_up_ack_reply(mgr, mst_primary, up_req->msg.req_type,
false);
+ drm_dp_mst_topology_put_mstb(mst_primary);
+
if (up_req->msg.req_type == DP_CONNECTION_STATUS_NOTIFY) {
const struct drm_dp_connection_status_notify *conn_stat =
&up_req->msg.u.conn_stat;
@@ -4082,10 +4185,20 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
list_add_tail(&up_req->next, &mgr->up_req_list);
mutex_unlock(&mgr->up_req_lock);
queue_work(system_long_wq, &mgr->up_req_work);
+out_clear_reply:
+ reset_msg_rx_state(&mgr->up_req_recv);
+ return ret;
+}
-out:
- memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
- return 0;
+static void update_msg_rx_state(struct drm_dp_mst_topology_mgr *mgr)
+{
+ mutex_lock(&mgr->lock);
+ if (mgr->reset_rx_state) {
+ mgr->reset_rx_state = false;
+ reset_msg_rx_state(&mgr->down_rep_recv);
+ reset_msg_rx_state(&mgr->up_req_recv);
+ }
+ mutex_unlock(&mgr->lock);
}
/**
@@ -4122,6 +4235,8 @@ int drm_dp_mst_hpd_irq_handle_event(struct drm_dp_mst_topology_mgr *mgr, const u
*handled = true;
}
+ update_msg_rx_state(mgr);
+
if (esi[1] & DP_DOWN_REP_MSG_RDY) {
ret = drm_dp_mst_handle_down_rep(mgr);
*handled = true;
@@ -4208,7 +4323,7 @@ drm_dp_mst_detect_port(struct drm_connector *connector,
case DP_PEER_DEVICE_SST_SINK:
ret = connector_status_connected;
/* for logical ports - cache the EDID */
- if (port->port_num >= DP_MST_LOGICAL_PORT_0 && !port->cached_edid)
+ if (drm_dp_mst_port_is_logical(port) && !port->cached_edid)
port->cached_edid = drm_edid_read_ddc(connector, &port->aux.ddc);
break;
case DP_PEER_DEVICE_DP_LEGACY_CONV:
@@ -4630,61 +4745,6 @@ void drm_dp_mst_update_slots(struct drm_dp_mst_topology_state *mst_state, uint8_
}
EXPORT_SYMBOL(drm_dp_mst_update_slots);
-static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr,
- int id, u8 start_slot, u8 num_slots)
-{
- u8 payload_alloc[3], status;
- int ret;
- int retries = 0;
-
- drm_dp_dpcd_writeb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS,
- DP_PAYLOAD_TABLE_UPDATED);
-
- payload_alloc[0] = id;
- payload_alloc[1] = start_slot;
- payload_alloc[2] = num_slots;
-
- ret = drm_dp_dpcd_write(mgr->aux, DP_PAYLOAD_ALLOCATE_SET, payload_alloc, 3);
- if (ret != 3) {
- drm_dbg_kms(mgr->dev, "failed to write payload allocation %d\n", ret);
- goto fail;
- }
-
-retry:
- ret = drm_dp_dpcd_readb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
- if (ret < 0) {
- drm_dbg_kms(mgr->dev, "failed to read payload table status %d\n", ret);
- goto fail;
- }
-
- if (!(status & DP_PAYLOAD_TABLE_UPDATED)) {
- retries++;
- if (retries < 20) {
- usleep_range(10000, 20000);
- goto retry;
- }
- drm_dbg_kms(mgr->dev, "status not set after read payload table status %d\n",
- status);
- ret = -EINVAL;
- goto fail;
- }
- ret = 0;
-fail:
- return ret;
-}
-
-static int do_get_act_status(struct drm_dp_aux *aux)
-{
- int ret;
- u8 status;
-
- ret = drm_dp_dpcd_readb(aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
- if (ret < 0)
- return ret;
-
- return status;
-}
-
/**
* drm_dp_check_act_status() - Polls for ACT handled status.
* @mgr: manager to use
@@ -4702,28 +4762,9 @@ int drm_dp_check_act_status(struct drm_dp_mst_topology_mgr *mgr)
* There doesn't seem to be any recommended retry count or timeout in
* the MST specification. Since some hubs have been observed to take
* over 1 second to update their payload allocations under certain
- * conditions, we use a rather large timeout value.
+ * conditions, we use a rather large timeout value of 3 seconds.
*/
- const int timeout_ms = 3000;
- int ret, status;
-
- ret = readx_poll_timeout(do_get_act_status, mgr->aux, status,
- status & DP_PAYLOAD_ACT_HANDLED || status < 0,
- 200, timeout_ms * USEC_PER_MSEC);
- if (ret < 0 && status >= 0) {
- drm_err(mgr->dev, "Failed to get ACT after %dms, last status: %02x\n",
- timeout_ms, status);
- return -EINVAL;
- } else if (status < 0) {
- /*
- * Failure here isn't unexpected - the hub may have
- * just been unplugged
- */
- drm_dbg_kms(mgr->dev, "Failed to read payload table status: %d\n", status);
- return status;
- }
-
- return 0;
+ return drm_dp_dpcd_poll_act_handled(mgr->aux, 3000);
}
EXPORT_SYMBOL(drm_dp_check_act_status);
@@ -4943,7 +4984,7 @@ void drm_dp_mst_dump_topology(struct seq_file *m,
seq_printf(m, "branch oui: %*phN devid: ", 3, buf);
for (i = 0x3; i < 0x8 && buf[i]; i++)
- seq_printf(m, "%c", buf[i]);
+ seq_putc(m, buf[i]);
seq_printf(m, " revision: hw: %x.%x sw: %x.%x\n",
buf[0x9] >> 4, buf[0x9] & 0xf, buf[0xa], buf[0xb]);
if (dump_dp_payload_table(mgr, buf))
@@ -5549,7 +5590,6 @@ EXPORT_SYMBOL(drm_dp_mst_atomic_check_mgr);
* drm_dp_atomic_release_time_slots()
*
* Returns:
- *
* 0 if the new state is valid, negative error code otherwise.
*/
int drm_dp_mst_atomic_check(struct drm_atomic_state *state)
@@ -5586,7 +5626,6 @@ EXPORT_SYMBOL(drm_dp_mst_topology_state_funcs);
* topology object.
*
* RETURNS:
- *
* The MST topology state or error pointer.
*/
struct drm_dp_mst_topology_state *drm_atomic_get_mst_topology_state(struct drm_atomic_state *state,
@@ -5606,7 +5645,6 @@ EXPORT_SYMBOL(drm_atomic_get_mst_topology_state);
* topology object.
*
* Returns:
- *
* The old MST topology state, or NULL if there's no topology state for this MST mgr
* in the global atomic state
*/
@@ -5631,7 +5669,6 @@ EXPORT_SYMBOL(drm_atomic_get_old_mst_topology_state);
* topology object.
*
* Returns:
- *
* The new MST topology state, or NULL if there's no topology state for this MST mgr
* in the global atomic state
*/
@@ -5972,7 +6009,7 @@ static bool drm_dp_mst_is_virtual_dpcd(struct drm_dp_mst_port *port)
return false;
/* Virtual DP Sink (Internal Display Panel) */
- if (port->port_num >= 8)
+ if (drm_dp_mst_port_is_logical(port))
return true;
/* DP-to-HDMI Protocol Converter */
@@ -6000,6 +6037,22 @@ static bool drm_dp_mst_is_virtual_dpcd(struct drm_dp_mst_port *port)
}
/**
+ * drm_dp_mst_aux_for_parent() - Get the AUX device for an MST port's parent
+ * @port: MST port whose parent's AUX device is returned
+ *
+ * Return the AUX device for @port's parent or NULL if port's parent is the
+ * root port.
+ */
+struct drm_dp_aux *drm_dp_mst_aux_for_parent(struct drm_dp_mst_port *port)
+{
+ if (!port->parent || !port->parent->port_parent)
+ return NULL;
+
+ return &port->parent->port_parent->aux;
+}
+EXPORT_SYMBOL(drm_dp_mst_aux_for_parent);
+
+/**
* drm_dp_mst_dsc_aux_for_port() - Find the correct aux for DSC
* @port: The port to check. A leaf of the MST tree with an attached display.
*
@@ -6021,6 +6074,7 @@ struct drm_dp_aux *drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port *port)
struct drm_dp_aux *immediate_upstream_aux;
struct drm_dp_mst_port *fec_port;
struct drm_dp_desc desc = {};
+ u8 upstream_dsc;
u8 endpoint_fec;
u8 endpoint_dsc;
@@ -6047,8 +6101,6 @@ struct drm_dp_aux *drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port *port)
/* DP-to-DP peer device */
if (drm_dp_mst_is_virtual_dpcd(immediate_upstream_port)) {
- u8 upstream_dsc;
-
if (drm_dp_dpcd_read(&port->aux,
DP_DSC_SUPPORT, &endpoint_dsc, 1) != 1)
return NULL;
@@ -6094,6 +6146,13 @@ struct drm_dp_aux *drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port *port)
if (drm_dp_has_quirk(&desc, DP_DPCD_QUIRK_DSC_WITHOUT_VIRTUAL_DPCD)) {
u8 dpcd_ext[DP_RECEIVER_CAP_SIZE];
+ if (drm_dp_dpcd_read(immediate_upstream_aux,
+ DP_DSC_SUPPORT, &upstream_dsc, 1) != 1)
+ return NULL;
+
+ if (!(upstream_dsc & DP_DSC_DECOMPRESSION_IS_SUPPORTED))
+ return NULL;
+
if (drm_dp_read_dpcd_caps(immediate_upstream_aux, dpcd_ext) < 0)
return NULL;
diff --git a/drivers/gpu/drm/display/drm_dp_mst_topology_internal.h b/drivers/gpu/drm/display/drm_dp_mst_topology_internal.h
index a785ccbfdd73..f41c34e26be2 100644
--- a/drivers/gpu/drm/display/drm_dp_mst_topology_internal.h
+++ b/drivers/gpu/drm/display/drm_dp_mst_topology_internal.h
@@ -10,7 +10,9 @@
#ifndef _DRM_DP_MST_HELPER_INTERNAL_H_
#define _DRM_DP_MST_HELPER_INTERNAL_H_
-#include <drm/display/drm_dp_mst_helper.h>
+struct drm_dp_sideband_msg_req_body;
+struct drm_dp_sideband_msg_tx;
+struct drm_printer;
void
drm_dp_encode_sideband_req(const struct drm_dp_sideband_msg_req_body *req,
diff --git a/drivers/gpu/drm/display/drm_dp_tunnel.c b/drivers/gpu/drm/display/drm_dp_tunnel.c
new file mode 100644
index 000000000000..90fe07a89260
--- /dev/null
+++ b/drivers/gpu/drm/display/drm_dp_tunnel.c
@@ -0,0 +1,1950 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#include <linux/ref_tracker.h>
+#include <linux/types.h>
+
+#include <drm/drm_atomic_state_helper.h>
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_print.h>
+#include <drm/display/drm_dp.h>
+#include <drm/display/drm_dp_helper.h>
+#include <drm/display/drm_dp_tunnel.h>
+
+#define to_group(__private_obj) \
+ container_of(__private_obj, struct drm_dp_tunnel_group, base)
+
+#define to_group_state(__private_state) \
+ container_of(__private_state, struct drm_dp_tunnel_group_state, base)
+
+#define is_dp_tunnel_private_obj(__obj) \
+ ((__obj)->funcs == &tunnel_group_funcs)
+
+#define for_each_new_group_in_state(__state, __new_group_state, __i) \
+ for ((__i) = 0; \
+ (__i) < (__state)->num_private_objs; \
+ (__i)++) \
+ for_each_if ((__state)->private_objs[__i].ptr && \
+ is_dp_tunnel_private_obj((__state)->private_objs[__i].ptr) && \
+ ((__new_group_state) = \
+ to_group_state((__state)->private_objs[__i].new_state), 1))
+
+#define for_each_old_group_in_state(__state, __old_group_state, __i) \
+ for ((__i) = 0; \
+ (__i) < (__state)->num_private_objs; \
+ (__i)++) \
+ for_each_if ((__state)->private_objs[__i].ptr && \
+ is_dp_tunnel_private_obj((__state)->private_objs[__i].ptr) && \
+ ((__old_group_state) = \
+ to_group_state((__state)->private_objs[__i].old_state), 1))
+
+#define for_each_tunnel_in_group(__group, __tunnel) \
+ list_for_each_entry(__tunnel, &(__group)->tunnels, node)
+
+#define for_each_tunnel_state(__group_state, __tunnel_state) \
+ list_for_each_entry(__tunnel_state, &(__group_state)->tunnel_states, node)
+
+#define for_each_tunnel_state_safe(__group_state, __tunnel_state, __tunnel_state_tmp) \
+ list_for_each_entry_safe(__tunnel_state, __tunnel_state_tmp, \
+ &(__group_state)->tunnel_states, node)
+
+#define kbytes_to_mbits(__kbytes) \
+ DIV_ROUND_UP((__kbytes) * 8, 1000)
+
+#define DPTUN_BW_ARG(__bw) ((__bw) < 0 ? (__bw) : kbytes_to_mbits(__bw))
+
+#define __tun_prn(__tunnel, __level, __type, __fmt, ...) \
+ drm_##__level##__type((__tunnel)->group->mgr->dev, \
+ "[DPTUN %s][%s] " __fmt, \
+ drm_dp_tunnel_name(__tunnel), \
+ (__tunnel)->aux->name, ## \
+ __VA_ARGS__)
+
+#define tun_dbg(__tunnel, __fmt, ...) \
+ __tun_prn(__tunnel, dbg, _kms, __fmt, ## __VA_ARGS__)
+
+#define tun_dbg_stat(__tunnel, __err, __fmt, ...) do { \
+ if (__err) \
+ __tun_prn(__tunnel, dbg, _kms, __fmt " (Failed, err: %pe)\n", \
+ ## __VA_ARGS__, ERR_PTR(__err)); \
+ else \
+ __tun_prn(__tunnel, dbg, _kms, __fmt " (Ok)\n", \
+ ## __VA_ARGS__); \
+} while (0)
+
+#define tun_dbg_atomic(__tunnel, __fmt, ...) \
+ __tun_prn(__tunnel, dbg, _atomic, __fmt, ## __VA_ARGS__)
+
+#define tun_grp_dbg(__group, __fmt, ...) \
+ drm_dbg_kms((__group)->mgr->dev, \
+ "[DPTUN %s] " __fmt, \
+ drm_dp_tunnel_group_name(__group), ## \
+ __VA_ARGS__)
+
+#define DP_TUNNELING_BASE DP_TUNNELING_OUI
+
+#define __DPTUN_REG_RANGE(__start, __size) \
+ GENMASK_ULL((__start) + (__size) - 1, (__start))
+
+#define DPTUN_REG_RANGE(__addr, __size) \
+ __DPTUN_REG_RANGE((__addr) - DP_TUNNELING_BASE, (__size))
+
+#define DPTUN_REG(__addr) DPTUN_REG_RANGE(__addr, 1)
+
+#define DPTUN_INFO_REG_MASK ( \
+ DPTUN_REG_RANGE(DP_TUNNELING_OUI, DP_TUNNELING_OUI_BYTES) | \
+ DPTUN_REG_RANGE(DP_TUNNELING_DEV_ID, DP_TUNNELING_DEV_ID_BYTES) | \
+ DPTUN_REG(DP_TUNNELING_HW_REV) | \
+ DPTUN_REG(DP_TUNNELING_SW_REV_MAJOR) | \
+ DPTUN_REG(DP_TUNNELING_SW_REV_MINOR) | \
+ DPTUN_REG(DP_TUNNELING_CAPABILITIES) | \
+ DPTUN_REG(DP_IN_ADAPTER_INFO) | \
+ DPTUN_REG(DP_USB4_DRIVER_ID) | \
+ DPTUN_REG(DP_USB4_DRIVER_BW_CAPABILITY) | \
+ DPTUN_REG(DP_IN_ADAPTER_TUNNEL_INFORMATION) | \
+ DPTUN_REG(DP_BW_GRANULARITY) | \
+ DPTUN_REG(DP_ESTIMATED_BW) | \
+ DPTUN_REG(DP_ALLOCATED_BW) | \
+ DPTUN_REG(DP_TUNNELING_MAX_LINK_RATE) | \
+ DPTUN_REG(DP_TUNNELING_MAX_LANE_COUNT) | \
+ DPTUN_REG(DP_DPTX_BW_ALLOCATION_MODE_CONTROL))
+
+static const DECLARE_BITMAP(dptun_info_regs, 64) = {
+ DPTUN_INFO_REG_MASK & -1UL,
+#if BITS_PER_LONG == 32
+ DPTUN_INFO_REG_MASK >> 32,
+#endif
+};
+
+struct drm_dp_tunnel_regs {
+ u8 buf[HWEIGHT64(DPTUN_INFO_REG_MASK)];
+};
+
+struct drm_dp_tunnel_group;
+
+struct drm_dp_tunnel {
+ struct drm_dp_tunnel_group *group;
+
+ struct list_head node;
+
+ struct kref kref;
+ struct ref_tracker *tracker;
+ struct drm_dp_aux *aux;
+ char name[8];
+
+ int bw_granularity;
+ int estimated_bw;
+ int allocated_bw;
+
+ int max_dprx_rate;
+ u8 max_dprx_lane_count;
+
+ u8 adapter_id;
+
+ bool bw_alloc_supported:1;
+ bool bw_alloc_enabled:1;
+ bool has_io_error:1;
+ bool destroyed:1;
+};
+
+struct drm_dp_tunnel_group_state;
+
+struct drm_dp_tunnel_state {
+ struct drm_dp_tunnel_group_state *group_state;
+
+ struct drm_dp_tunnel_ref tunnel_ref;
+
+ struct list_head node;
+
+ u32 stream_mask;
+ int *stream_bw;
+};
+
+struct drm_dp_tunnel_group_state {
+ struct drm_private_state base;
+
+ struct list_head tunnel_states;
+};
+
+struct drm_dp_tunnel_group {
+ struct drm_private_obj base;
+ struct drm_dp_tunnel_mgr *mgr;
+
+ struct list_head tunnels;
+
+ /* available BW including the allocated_bw of all tunnels in the group */
+ int available_bw;
+
+ u8 drv_group_id;
+ char name[8];
+
+ bool active:1;
+};
+
+struct drm_dp_tunnel_mgr {
+ struct drm_device *dev;
+
+ int group_count;
+ struct drm_dp_tunnel_group *groups;
+ wait_queue_head_t bw_req_queue;
+
+#ifdef CONFIG_DRM_DISPLAY_DP_TUNNEL_STATE_DEBUG
+ struct ref_tracker_dir ref_tracker;
+#endif
+};
+
+/*
+ * The following helpers provide a way to read out the tunneling DPCD
+ * registers with a minimal amount of AUX transfers (1 transfer per contiguous
+ * range, as permitted by the 16 byte per transfer AUX limit), not accessing
+ * other registers to avoid any read side-effects.
+ */
+static int next_reg_area(int *offset)
+{
+ *offset = find_next_bit(dptun_info_regs, 64, *offset);
+
+ return find_next_zero_bit(dptun_info_regs, 64, *offset + 1) - *offset;
+}
+
+#define tunnel_reg_ptr(__regs, __address) ({ \
+ WARN_ON(!test_bit((__address) - DP_TUNNELING_BASE, dptun_info_regs)); \
+ &(__regs)->buf[bitmap_weight(dptun_info_regs, (__address) - DP_TUNNELING_BASE)]; \
+})
+
+static int read_tunnel_regs(struct drm_dp_aux *aux, struct drm_dp_tunnel_regs *regs)
+{
+ int offset = 0;
+ int len;
+
+ while ((len = next_reg_area(&offset))) {
+ int address = DP_TUNNELING_BASE + offset;
+
+ if (drm_dp_dpcd_read(aux, address, tunnel_reg_ptr(regs, address), len) < 0)
+ return -EIO;
+
+ offset += len;
+ }
+
+ return 0;
+}
+
+static u8 tunnel_reg(const struct drm_dp_tunnel_regs *regs, int address)
+{
+ return *tunnel_reg_ptr(regs, address);
+}
+
+static u8 tunnel_reg_drv_group_id(const struct drm_dp_tunnel_regs *regs)
+{
+ u8 drv_id = tunnel_reg(regs, DP_USB4_DRIVER_ID) & DP_USB4_DRIVER_ID_MASK;
+ u8 group_id = tunnel_reg(regs, DP_IN_ADAPTER_TUNNEL_INFORMATION) & DP_GROUP_ID_MASK;
+
+ if (!group_id)
+ return 0;
+
+ return (drv_id << DP_GROUP_ID_BITS) | group_id;
+}
+
+/* Return granularity in kB/s units */
+static int tunnel_reg_bw_granularity(const struct drm_dp_tunnel_regs *regs)
+{
+ int gr = tunnel_reg(regs, DP_BW_GRANULARITY) & DP_BW_GRANULARITY_MASK;
+
+ if (gr > 2)
+ return -1;
+
+ return (250000 << gr) / 8;
+}
+
+static int tunnel_reg_max_dprx_rate(const struct drm_dp_tunnel_regs *regs)
+{
+ u8 bw_code = tunnel_reg(regs, DP_TUNNELING_MAX_LINK_RATE);
+
+ return drm_dp_bw_code_to_link_rate(bw_code);
+}
+
+static int tunnel_reg_max_dprx_lane_count(const struct drm_dp_tunnel_regs *regs)
+{
+ return tunnel_reg(regs, DP_TUNNELING_MAX_LANE_COUNT) &
+ DP_TUNNELING_MAX_LANE_COUNT_MASK;
+}
+
+static bool tunnel_reg_bw_alloc_supported(const struct drm_dp_tunnel_regs *regs)
+{
+ u8 cap_mask = DP_TUNNELING_SUPPORT | DP_IN_BW_ALLOCATION_MODE_SUPPORT;
+
+ if ((tunnel_reg(regs, DP_TUNNELING_CAPABILITIES) & cap_mask) != cap_mask)
+ return false;
+
+ return tunnel_reg(regs, DP_USB4_DRIVER_BW_CAPABILITY) &
+ DP_USB4_DRIVER_BW_ALLOCATION_MODE_SUPPORT;
+}
+
+static bool tunnel_reg_bw_alloc_enabled(const struct drm_dp_tunnel_regs *regs)
+{
+ return tunnel_reg(regs, DP_DPTX_BW_ALLOCATION_MODE_CONTROL) &
+ DP_DISPLAY_DRIVER_BW_ALLOCATION_MODE_ENABLE;
+}
+
+static u8 tunnel_group_drv_id(u8 drv_group_id)
+{
+ return drv_group_id >> DP_GROUP_ID_BITS;
+}
+
+static u8 tunnel_group_id(u8 drv_group_id)
+{
+ return drv_group_id & DP_GROUP_ID_MASK;
+}
+
+const char *drm_dp_tunnel_name(const struct drm_dp_tunnel *tunnel)
+{
+ return tunnel->name;
+}
+EXPORT_SYMBOL(drm_dp_tunnel_name);
+
+static const char *drm_dp_tunnel_group_name(const struct drm_dp_tunnel_group *group)
+{
+ return group->name;
+}
+
+static struct drm_dp_tunnel_group *
+lookup_or_alloc_group(struct drm_dp_tunnel_mgr *mgr, u8 drv_group_id)
+{
+ struct drm_dp_tunnel_group *group = NULL;
+ int i;
+
+ for (i = 0; i < mgr->group_count; i++) {
+ /*
+ * A tunnel group with 0 group ID shouldn't have more than one
+ * tunnels.
+ */
+ if (tunnel_group_id(drv_group_id) &&
+ mgr->groups[i].drv_group_id == drv_group_id)
+ return &mgr->groups[i];
+
+ if (!group && !mgr->groups[i].active)
+ group = &mgr->groups[i];
+ }
+
+ if (!group) {
+ drm_dbg_kms(mgr->dev,
+ "DPTUN: Can't allocate more tunnel groups\n");
+ return NULL;
+ }
+
+ group->drv_group_id = drv_group_id;
+ group->active = true;
+
+ /*
+ * The group name format here and elsewhere: Driver-ID:Group-ID:*
+ * (* standing for all DP-Adapters/tunnels in the group).
+ */
+ snprintf(group->name, sizeof(group->name), "%d:%d:*",
+ tunnel_group_drv_id(drv_group_id) & ((1 << DP_GROUP_ID_BITS) - 1),
+ tunnel_group_id(drv_group_id) & ((1 << DP_USB4_DRIVER_ID_BITS) - 1));
+
+ return group;
+}
+
+static void free_group(struct drm_dp_tunnel_group *group)
+{
+ struct drm_dp_tunnel_mgr *mgr = group->mgr;
+
+ if (drm_WARN_ON(mgr->dev, !list_empty(&group->tunnels)))
+ return;
+
+ group->drv_group_id = 0;
+ group->available_bw = -1;
+ group->active = false;
+}
+
+static struct drm_dp_tunnel *
+tunnel_get(struct drm_dp_tunnel *tunnel)
+{
+ kref_get(&tunnel->kref);
+
+ return tunnel;
+}
+
+static void free_tunnel(struct kref *kref)
+{
+ struct drm_dp_tunnel *tunnel = container_of(kref, typeof(*tunnel), kref);
+ struct drm_dp_tunnel_group *group = tunnel->group;
+
+ list_del(&tunnel->node);
+ if (list_empty(&group->tunnels))
+ free_group(group);
+
+ kfree(tunnel);
+}
+
+static void tunnel_put(struct drm_dp_tunnel *tunnel)
+{
+ kref_put(&tunnel->kref, free_tunnel);
+}
+
+#ifdef CONFIG_DRM_DISPLAY_DP_TUNNEL_STATE_DEBUG
+static void track_tunnel_ref(struct drm_dp_tunnel *tunnel,
+ struct ref_tracker **tracker)
+{
+ ref_tracker_alloc(&tunnel->group->mgr->ref_tracker,
+ tracker, GFP_KERNEL);
+}
+
+static void untrack_tunnel_ref(struct drm_dp_tunnel *tunnel,
+ struct ref_tracker **tracker)
+{
+ ref_tracker_free(&tunnel->group->mgr->ref_tracker,
+ tracker);
+}
+#else
+static void track_tunnel_ref(struct drm_dp_tunnel *tunnel,
+ struct ref_tracker **tracker)
+{
+}
+
+static void untrack_tunnel_ref(struct drm_dp_tunnel *tunnel,
+ struct ref_tracker **tracker)
+{
+}
+#endif
+
+/**
+ * drm_dp_tunnel_get - Get a reference for a DP tunnel
+ * @tunnel: Tunnel object
+ * @tracker: Debug tracker for the reference
+ *
+ * Get a reference for @tunnel, along with a debug tracker to help locating
+ * the source of a reference leak/double reference put etc. issue.
+ *
+ * The reference must be dropped after use calling drm_dp_tunnel_put()
+ * passing @tunnel and *@tracker returned from here.
+ *
+ * Returns @tunnel - as a convenience - along with *@tracker.
+ */
+struct drm_dp_tunnel *
+drm_dp_tunnel_get(struct drm_dp_tunnel *tunnel,
+ struct ref_tracker **tracker)
+{
+ track_tunnel_ref(tunnel, tracker);
+
+ return tunnel_get(tunnel);
+}
+EXPORT_SYMBOL(drm_dp_tunnel_get);
+
+/**
+ * drm_dp_tunnel_put - Put a reference for a DP tunnel
+ * @tunnel: Tunnel object
+ * @tracker: Debug tracker for the reference
+ *
+ * Put a reference for @tunnel along with its debug *@tracker, which
+ * was obtained with drm_dp_tunnel_get().
+ */
+void drm_dp_tunnel_put(struct drm_dp_tunnel *tunnel,
+ struct ref_tracker **tracker)
+{
+ untrack_tunnel_ref(tunnel, tracker);
+
+ tunnel_put(tunnel);
+}
+EXPORT_SYMBOL(drm_dp_tunnel_put);
+
+static bool add_tunnel_to_group(struct drm_dp_tunnel_mgr *mgr,
+ u8 drv_group_id,
+ struct drm_dp_tunnel *tunnel)
+{
+ struct drm_dp_tunnel_group *group;
+
+ group = lookup_or_alloc_group(mgr, drv_group_id);
+ if (!group)
+ return false;
+
+ tunnel->group = group;
+ list_add(&tunnel->node, &group->tunnels);
+
+ return true;
+}
+
+static struct drm_dp_tunnel *
+create_tunnel(struct drm_dp_tunnel_mgr *mgr,
+ struct drm_dp_aux *aux,
+ const struct drm_dp_tunnel_regs *regs)
+{
+ u8 drv_group_id = tunnel_reg_drv_group_id(regs);
+ struct drm_dp_tunnel *tunnel;
+
+ tunnel = kzalloc(sizeof(*tunnel), GFP_KERNEL);
+ if (!tunnel)
+ return NULL;
+
+ INIT_LIST_HEAD(&tunnel->node);
+
+ kref_init(&tunnel->kref);
+
+ tunnel->aux = aux;
+
+ tunnel->adapter_id = tunnel_reg(regs, DP_IN_ADAPTER_INFO) & DP_IN_ADAPTER_NUMBER_MASK;
+
+ snprintf(tunnel->name, sizeof(tunnel->name), "%d:%d:%d",
+ tunnel_group_drv_id(drv_group_id) & ((1 << DP_GROUP_ID_BITS) - 1),
+ tunnel_group_id(drv_group_id) & ((1 << DP_USB4_DRIVER_ID_BITS) - 1),
+ tunnel->adapter_id & ((1 << DP_IN_ADAPTER_NUMBER_BITS) - 1));
+
+ tunnel->bw_granularity = tunnel_reg_bw_granularity(regs);
+ tunnel->allocated_bw = tunnel_reg(regs, DP_ALLOCATED_BW) *
+ tunnel->bw_granularity;
+ /*
+ * An initial allocated BW of 0 indicates an undefined state: the
+ * actual allocation is determined by the TBT CM, usually following a
+ * legacy allocation policy (based on the max DPRX caps). From the
+ * driver's POV the state becomes defined only after the first
+ * allocation request.
+ */
+ if (!tunnel->allocated_bw)
+ tunnel->allocated_bw = -1;
+
+ tunnel->bw_alloc_supported = tunnel_reg_bw_alloc_supported(regs);
+ tunnel->bw_alloc_enabled = tunnel_reg_bw_alloc_enabled(regs);
+
+ if (!add_tunnel_to_group(mgr, drv_group_id, tunnel)) {
+ kfree(tunnel);
+
+ return NULL;
+ }
+
+ track_tunnel_ref(tunnel, &tunnel->tracker);
+
+ return tunnel;
+}
+
+static void destroy_tunnel(struct drm_dp_tunnel *tunnel)
+{
+ untrack_tunnel_ref(tunnel, &tunnel->tracker);
+ tunnel_put(tunnel);
+}
+
+/**
+ * drm_dp_tunnel_set_io_error - Set the IO error flag for a DP tunnel
+ * @tunnel: Tunnel object
+ *
+ * Set the IO error flag for @tunnel. Drivers can call this function upon
+ * detecting a failure that affects the tunnel functionality, for instance
+ * after a DP AUX transfer failure on the port @tunnel is connected to.
+ *
+ * This disables further management of @tunnel, including any related
+ * AUX accesses for tunneling DPCD registers, returning error to the
+ * initiators of these. The driver is supposed to drop this tunnel and -
+ * optionally - recreate it.
+ */
+void drm_dp_tunnel_set_io_error(struct drm_dp_tunnel *tunnel)
+{
+ tunnel->has_io_error = true;
+}
+EXPORT_SYMBOL(drm_dp_tunnel_set_io_error);
+
+#define SKIP_DPRX_CAPS_CHECK BIT(0)
+#define ALLOW_ALLOCATED_BW_CHANGE BIT(1)
+static bool tunnel_regs_are_valid(struct drm_dp_tunnel_mgr *mgr,
+ const struct drm_dp_tunnel_regs *regs,
+ unsigned int flags)
+{
+ u8 drv_group_id = tunnel_reg_drv_group_id(regs);
+ bool check_dprx = !(flags & SKIP_DPRX_CAPS_CHECK);
+ bool ret = true;
+
+ if (!tunnel_reg_bw_alloc_supported(regs)) {
+ if (tunnel_group_id(drv_group_id)) {
+ drm_dbg_kms(mgr->dev,
+ "DPTUN: A non-zero group ID is only allowed with BWA support\n");
+ ret = false;
+ }
+
+ if (tunnel_reg(regs, DP_ALLOCATED_BW)) {
+ drm_dbg_kms(mgr->dev,
+ "DPTUN: BW is allocated without BWA support\n");
+ ret = false;
+ }
+
+ return ret;
+ }
+
+ if (!tunnel_group_id(drv_group_id)) {
+ drm_dbg_kms(mgr->dev,
+ "DPTUN: BWA support requires a non-zero group ID\n");
+ ret = false;
+ }
+
+ if (check_dprx && hweight8(tunnel_reg_max_dprx_lane_count(regs)) != 1) {
+ drm_dbg_kms(mgr->dev,
+ "DPTUN: Invalid DPRX lane count: %d\n",
+ tunnel_reg_max_dprx_lane_count(regs));
+
+ ret = false;
+ }
+
+ if (check_dprx && !tunnel_reg_max_dprx_rate(regs)) {
+ drm_dbg_kms(mgr->dev,
+ "DPTUN: DPRX rate is 0\n");
+
+ ret = false;
+ }
+
+ if (tunnel_reg_bw_granularity(regs) < 0) {
+ drm_dbg_kms(mgr->dev,
+ "DPTUN: Invalid BW granularity\n");
+
+ ret = false;
+ }
+
+ if (tunnel_reg(regs, DP_ALLOCATED_BW) > tunnel_reg(regs, DP_ESTIMATED_BW)) {
+ drm_dbg_kms(mgr->dev,
+ "DPTUN: Allocated BW %d > estimated BW %d Mb/s\n",
+ DPTUN_BW_ARG(tunnel_reg(regs, DP_ALLOCATED_BW) *
+ tunnel_reg_bw_granularity(regs)),
+ DPTUN_BW_ARG(tunnel_reg(regs, DP_ESTIMATED_BW) *
+ tunnel_reg_bw_granularity(regs)));
+
+ ret = false;
+ }
+
+ return ret;
+}
+
+static int tunnel_allocated_bw(const struct drm_dp_tunnel *tunnel)
+{
+ return max(tunnel->allocated_bw, 0);
+}
+
+static bool tunnel_info_changes_are_valid(struct drm_dp_tunnel *tunnel,
+ const struct drm_dp_tunnel_regs *regs,
+ unsigned int flags)
+{
+ u8 new_drv_group_id = tunnel_reg_drv_group_id(regs);
+ bool ret = true;
+
+ if (tunnel->bw_alloc_supported != tunnel_reg_bw_alloc_supported(regs)) {
+ tun_dbg(tunnel,
+ "BW alloc support has changed %s -> %s\n",
+ str_yes_no(tunnel->bw_alloc_supported),
+ str_yes_no(tunnel_reg_bw_alloc_supported(regs)));
+
+ ret = false;
+ }
+
+ if (tunnel->group->drv_group_id != new_drv_group_id) {
+ tun_dbg(tunnel,
+ "Driver/group ID has changed %d:%d:* -> %d:%d:*\n",
+ tunnel_group_drv_id(tunnel->group->drv_group_id),
+ tunnel_group_id(tunnel->group->drv_group_id),
+ tunnel_group_drv_id(new_drv_group_id),
+ tunnel_group_id(new_drv_group_id));
+
+ ret = false;
+ }
+
+ if (!tunnel->bw_alloc_supported)
+ return ret;
+
+ if (tunnel->bw_granularity != tunnel_reg_bw_granularity(regs)) {
+ tun_dbg(tunnel,
+ "BW granularity has changed: %d -> %d Mb/s\n",
+ DPTUN_BW_ARG(tunnel->bw_granularity),
+ DPTUN_BW_ARG(tunnel_reg_bw_granularity(regs)));
+
+ ret = false;
+ }
+
+ /*
+ * On some devices at least the BW alloc mode enabled status is always
+ * reported as 0, so skip checking that here.
+ */
+
+ if (!(flags & ALLOW_ALLOCATED_BW_CHANGE) &&
+ tunnel_allocated_bw(tunnel) !=
+ tunnel_reg(regs, DP_ALLOCATED_BW) * tunnel->bw_granularity) {
+ tun_dbg(tunnel,
+ "Allocated BW has changed: %d -> %d Mb/s\n",
+ DPTUN_BW_ARG(tunnel->allocated_bw),
+ DPTUN_BW_ARG(tunnel_reg(regs, DP_ALLOCATED_BW) * tunnel->bw_granularity));
+
+ ret = false;
+ }
+
+ return ret;
+}
+
+static int
+read_and_verify_tunnel_regs(struct drm_dp_tunnel *tunnel,
+ struct drm_dp_tunnel_regs *regs,
+ unsigned int flags)
+{
+ int err;
+
+ err = read_tunnel_regs(tunnel->aux, regs);
+ if (err < 0) {
+ drm_dp_tunnel_set_io_error(tunnel);
+
+ return err;
+ }
+
+ if (!tunnel_regs_are_valid(tunnel->group->mgr, regs, flags))
+ return -EINVAL;
+
+ if (!tunnel_info_changes_are_valid(tunnel, regs, flags))
+ return -EINVAL;
+
+ return 0;
+}
+
+static bool update_dprx_caps(struct drm_dp_tunnel *tunnel, const struct drm_dp_tunnel_regs *regs)
+{
+ bool changed = false;
+
+ if (tunnel_reg_max_dprx_rate(regs) != tunnel->max_dprx_rate) {
+ tunnel->max_dprx_rate = tunnel_reg_max_dprx_rate(regs);
+ changed = true;
+ }
+
+ if (tunnel_reg_max_dprx_lane_count(regs) != tunnel->max_dprx_lane_count) {
+ tunnel->max_dprx_lane_count = tunnel_reg_max_dprx_lane_count(regs);
+ changed = true;
+ }
+
+ return changed;
+}
+
+static int dev_id_len(const u8 *dev_id, int max_len)
+{
+ while (max_len && dev_id[max_len - 1] == '\0')
+ max_len--;
+
+ return max_len;
+}
+
+static int get_max_dprx_bw(const struct drm_dp_tunnel *tunnel)
+{
+ int max_dprx_bw = drm_dp_max_dprx_data_rate(tunnel->max_dprx_rate,
+ tunnel->max_dprx_lane_count);
+
+ /*
+ * A BW request of roundup(max_dprx_bw, tunnel->bw_granularity) results in
+ * an allocation of max_dprx_bw. A BW request above this rounded-up
+ * value will fail.
+ */
+ return min(roundup(max_dprx_bw, tunnel->bw_granularity),
+ MAX_DP_REQUEST_BW * tunnel->bw_granularity);
+}
+
+static int get_max_tunnel_bw(const struct drm_dp_tunnel *tunnel)
+{
+ return min(get_max_dprx_bw(tunnel), tunnel->group->available_bw);
+}
+
+/**
+ * drm_dp_tunnel_detect - Detect DP tunnel on the link
+ * @mgr: Tunnel manager
+ * @aux: DP AUX on which the tunnel will be detected
+ *
+ * Detect if there is any DP tunnel on the link and add it to the tunnel
+ * group's tunnel list.
+ *
+ * Returns a pointer to a tunnel on success, or an ERR_PTR() error on
+ * failure.
+ */
+struct drm_dp_tunnel *
+drm_dp_tunnel_detect(struct drm_dp_tunnel_mgr *mgr,
+ struct drm_dp_aux *aux)
+{
+ struct drm_dp_tunnel_regs regs;
+ struct drm_dp_tunnel *tunnel;
+ int err;
+
+ err = read_tunnel_regs(aux, &regs);
+ if (err)
+ return ERR_PTR(err);
+
+ if (!(tunnel_reg(&regs, DP_TUNNELING_CAPABILITIES) &
+ DP_TUNNELING_SUPPORT))
+ return ERR_PTR(-ENODEV);
+
+ /* The DPRX caps are valid only after enabling BW alloc mode. */
+ if (!tunnel_regs_are_valid(mgr, &regs, SKIP_DPRX_CAPS_CHECK))
+ return ERR_PTR(-EINVAL);
+
+ tunnel = create_tunnel(mgr, aux, &regs);
+ if (!tunnel)
+ return ERR_PTR(-ENOMEM);
+
+ tun_dbg(tunnel,
+ "OUI:%*phD DevID:%*pE Rev-HW:%d.%d SW:%d.%d PR-Sup:%s BWA-Sup:%s BWA-En:%s\n",
+ DP_TUNNELING_OUI_BYTES,
+ tunnel_reg_ptr(&regs, DP_TUNNELING_OUI),
+ dev_id_len(tunnel_reg_ptr(&regs, DP_TUNNELING_DEV_ID), DP_TUNNELING_DEV_ID_BYTES),
+ tunnel_reg_ptr(&regs, DP_TUNNELING_DEV_ID),
+ (tunnel_reg(&regs, DP_TUNNELING_HW_REV) & DP_TUNNELING_HW_REV_MAJOR_MASK) >>
+ DP_TUNNELING_HW_REV_MAJOR_SHIFT,
+ (tunnel_reg(&regs, DP_TUNNELING_HW_REV) & DP_TUNNELING_HW_REV_MINOR_MASK) >>
+ DP_TUNNELING_HW_REV_MINOR_SHIFT,
+ tunnel_reg(&regs, DP_TUNNELING_SW_REV_MAJOR),
+ tunnel_reg(&regs, DP_TUNNELING_SW_REV_MINOR),
+ str_yes_no(tunnel_reg(&regs, DP_TUNNELING_CAPABILITIES) &
+ DP_PANEL_REPLAY_OPTIMIZATION_SUPPORT),
+ str_yes_no(tunnel->bw_alloc_supported),
+ str_yes_no(tunnel->bw_alloc_enabled));
+
+ return tunnel;
+}
+EXPORT_SYMBOL(drm_dp_tunnel_detect);
+
+/**
+ * drm_dp_tunnel_destroy - Destroy tunnel object
+ * @tunnel: Tunnel object
+ *
+ * Remove the tunnel from the tunnel topology and destroy it.
+ *
+ * Returns 0 on success, -ENODEV if the tunnel has been destroyed already.
+ */
+int drm_dp_tunnel_destroy(struct drm_dp_tunnel *tunnel)
+{
+ if (!tunnel)
+ return 0;
+
+ if (drm_WARN_ON(tunnel->group->mgr->dev, tunnel->destroyed))
+ return -ENODEV;
+
+ tun_dbg(tunnel, "destroying\n");
+
+ tunnel->destroyed = true;
+ destroy_tunnel(tunnel);
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_dp_tunnel_destroy);
+
+static int check_tunnel(const struct drm_dp_tunnel *tunnel)
+{
+ if (tunnel->destroyed)
+ return -ENODEV;
+
+ if (tunnel->has_io_error)
+ return -EIO;
+
+ return 0;
+}
+
+static int group_allocated_bw(struct drm_dp_tunnel_group *group)
+{
+ struct drm_dp_tunnel *tunnel;
+ int group_allocated_bw = 0;
+
+ for_each_tunnel_in_group(group, tunnel) {
+ if (check_tunnel(tunnel) == 0 &&
+ tunnel->bw_alloc_enabled)
+ group_allocated_bw += tunnel_allocated_bw(tunnel);
+ }
+
+ return group_allocated_bw;
+}
+
+/*
+ * The estimated BW reported by the TBT Connection Manager for each tunnel in
+ * a group includes the BW already allocated for the given tunnel and the
+ * unallocated BW which is free to be used by any tunnel in the group.
+ */
+static int group_free_bw(const struct drm_dp_tunnel *tunnel)
+{
+ return tunnel->estimated_bw - tunnel_allocated_bw(tunnel);
+}
+
+static int calc_group_available_bw(const struct drm_dp_tunnel *tunnel)
+{
+ return group_allocated_bw(tunnel->group) +
+ group_free_bw(tunnel);
+}
+
+static int update_group_available_bw(struct drm_dp_tunnel *tunnel,
+ const struct drm_dp_tunnel_regs *regs)
+{
+ struct drm_dp_tunnel *tunnel_iter;
+ int group_available_bw;
+ bool changed;
+
+ tunnel->estimated_bw = tunnel_reg(regs, DP_ESTIMATED_BW) * tunnel->bw_granularity;
+
+ if (calc_group_available_bw(tunnel) == tunnel->group->available_bw)
+ return 0;
+
+ for_each_tunnel_in_group(tunnel->group, tunnel_iter) {
+ int err;
+
+ if (tunnel_iter == tunnel)
+ continue;
+
+ if (check_tunnel(tunnel_iter) != 0 ||
+ !tunnel_iter->bw_alloc_enabled)
+ continue;
+
+ err = drm_dp_dpcd_probe(tunnel_iter->aux, DP_DPCD_REV);
+ if (err) {
+ tun_dbg(tunnel_iter,
+ "Probe failed, assume disconnected (err %pe)\n",
+ ERR_PTR(err));
+ drm_dp_tunnel_set_io_error(tunnel_iter);
+ }
+ }
+
+ group_available_bw = calc_group_available_bw(tunnel);
+
+ tun_dbg(tunnel, "Updated group available BW: %d->%d\n",
+ DPTUN_BW_ARG(tunnel->group->available_bw),
+ DPTUN_BW_ARG(group_available_bw));
+
+ changed = tunnel->group->available_bw != group_available_bw;
+
+ tunnel->group->available_bw = group_available_bw;
+
+ return changed ? 1 : 0;
+}
+
+static int set_bw_alloc_mode(struct drm_dp_tunnel *tunnel, bool enable)
+{
+ u8 mask = DP_DISPLAY_DRIVER_BW_ALLOCATION_MODE_ENABLE | DP_UNMASK_BW_ALLOCATION_IRQ;
+ u8 val;
+
+ if (drm_dp_dpcd_readb(tunnel->aux, DP_DPTX_BW_ALLOCATION_MODE_CONTROL, &val) < 0)
+ goto out_err;
+
+ if (enable)
+ val |= mask;
+ else
+ val &= ~mask;
+
+ if (drm_dp_dpcd_writeb(tunnel->aux, DP_DPTX_BW_ALLOCATION_MODE_CONTROL, val) < 0)
+ goto out_err;
+
+ tunnel->bw_alloc_enabled = enable;
+
+ return 0;
+
+out_err:
+ drm_dp_tunnel_set_io_error(tunnel);
+
+ return -EIO;
+}
+
+/**
+ * drm_dp_tunnel_enable_bw_alloc - Enable DP tunnel BW allocation mode
+ * @tunnel: Tunnel object
+ *
+ * Enable the DP tunnel BW allocation mode on @tunnel if it supports it.
+ *
+ * Returns 0 in case of success, negative error code otherwise.
+ */
+int drm_dp_tunnel_enable_bw_alloc(struct drm_dp_tunnel *tunnel)
+{
+ struct drm_dp_tunnel_regs regs;
+ int err;
+
+ err = check_tunnel(tunnel);
+ if (err)
+ return err;
+
+ if (!tunnel->bw_alloc_supported)
+ return -EOPNOTSUPP;
+
+ if (!tunnel_group_id(tunnel->group->drv_group_id))
+ return -EINVAL;
+
+ err = set_bw_alloc_mode(tunnel, true);
+ if (err)
+ goto out;
+
+ /*
+ * After a BWA disable/re-enable sequence the allocated BW can either
+ * stay at its last requested value or, for instance after system
+ * suspend/resume, TBT CM can reset back the allocation to the amount
+ * allocated in the legacy/non-BWA mode. Accordingly allow for the
+ * allocation to change wrt. the last SW state.
+ */
+ err = read_and_verify_tunnel_regs(tunnel, &regs,
+ ALLOW_ALLOCATED_BW_CHANGE);
+ if (err) {
+ set_bw_alloc_mode(tunnel, false);
+
+ goto out;
+ }
+
+ if (!tunnel->max_dprx_rate)
+ update_dprx_caps(tunnel, &regs);
+
+ if (tunnel->group->available_bw == -1) {
+ err = update_group_available_bw(tunnel, &regs);
+ if (err > 0)
+ err = 0;
+ }
+out:
+ tun_dbg_stat(tunnel, err,
+ "Enabling BW alloc mode: DPRX:%dx%d Group alloc:%d/%d Mb/s",
+ tunnel->max_dprx_rate / 100, tunnel->max_dprx_lane_count,
+ DPTUN_BW_ARG(group_allocated_bw(tunnel->group)),
+ DPTUN_BW_ARG(tunnel->group->available_bw));
+
+ return err;
+}
+EXPORT_SYMBOL(drm_dp_tunnel_enable_bw_alloc);
+
+/**
+ * drm_dp_tunnel_disable_bw_alloc - Disable DP tunnel BW allocation mode
+ * @tunnel: Tunnel object
+ *
+ * Disable the DP tunnel BW allocation mode on @tunnel.
+ *
+ * Returns 0 in case of success, negative error code otherwise.
+ */
+int drm_dp_tunnel_disable_bw_alloc(struct drm_dp_tunnel *tunnel)
+{
+ int err;
+
+ err = check_tunnel(tunnel);
+ if (err)
+ return err;
+
+ tunnel->allocated_bw = -1;
+
+ err = set_bw_alloc_mode(tunnel, false);
+
+ tun_dbg_stat(tunnel, err, "Disabling BW alloc mode");
+
+ return err;
+}
+EXPORT_SYMBOL(drm_dp_tunnel_disable_bw_alloc);
+
+/**
+ * drm_dp_tunnel_bw_alloc_is_enabled - Query the BW allocation mode enabled state
+ * @tunnel: Tunnel object
+ *
+ * Query if the BW allocation mode is enabled for @tunnel.
+ *
+ * Returns %true if the BW allocation mode is enabled for @tunnel.
+ */
+bool drm_dp_tunnel_bw_alloc_is_enabled(const struct drm_dp_tunnel *tunnel)
+{
+ return tunnel && tunnel->bw_alloc_enabled;
+}
+EXPORT_SYMBOL(drm_dp_tunnel_bw_alloc_is_enabled);
+
+static int clear_bw_req_state(struct drm_dp_aux *aux)
+{
+ u8 bw_req_mask = DP_BW_REQUEST_SUCCEEDED | DP_BW_REQUEST_FAILED;
+
+ if (drm_dp_dpcd_writeb(aux, DP_TUNNELING_STATUS, bw_req_mask) < 0)
+ return -EIO;
+
+ return 0;
+}
+
+static int bw_req_complete(struct drm_dp_aux *aux, bool *status_changed)
+{
+ u8 bw_req_mask = DP_BW_REQUEST_SUCCEEDED | DP_BW_REQUEST_FAILED;
+ u8 status_change_mask = DP_BW_ALLOCATION_CAPABILITY_CHANGED | DP_ESTIMATED_BW_CHANGED;
+ u8 val;
+ int err;
+
+ if (drm_dp_dpcd_readb(aux, DP_TUNNELING_STATUS, &val) < 0)
+ return -EIO;
+
+ *status_changed = val & status_change_mask;
+
+ val &= bw_req_mask;
+
+ if (!val)
+ return -EAGAIN;
+
+ err = clear_bw_req_state(aux);
+ if (err < 0)
+ return err;
+
+ return val == DP_BW_REQUEST_SUCCEEDED ? 0 : -ENOSPC;
+}
+
+static int allocate_tunnel_bw(struct drm_dp_tunnel *tunnel, int bw)
+{
+ struct drm_dp_tunnel_mgr *mgr = tunnel->group->mgr;
+ int request_bw = DIV_ROUND_UP(bw, tunnel->bw_granularity);
+ DEFINE_WAIT_FUNC(wait, woken_wake_function);
+ long timeout;
+ int err;
+
+ if (bw < 0) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (request_bw * tunnel->bw_granularity == tunnel->allocated_bw)
+ return 0;
+
+ /* Atomic check should prevent the following. */
+ if (drm_WARN_ON(mgr->dev, request_bw > MAX_DP_REQUEST_BW)) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ err = clear_bw_req_state(tunnel->aux);
+ if (err)
+ goto out;
+
+ if (drm_dp_dpcd_writeb(tunnel->aux, DP_REQUEST_BW, request_bw) < 0) {
+ err = -EIO;
+ goto out;
+ }
+
+ timeout = msecs_to_jiffies(3000);
+ add_wait_queue(&mgr->bw_req_queue, &wait);
+
+ for (;;) {
+ bool status_changed;
+
+ err = bw_req_complete(tunnel->aux, &status_changed);
+ if (err != -EAGAIN)
+ break;
+
+ if (status_changed) {
+ struct drm_dp_tunnel_regs regs;
+
+ err = read_and_verify_tunnel_regs(tunnel, &regs,
+ ALLOW_ALLOCATED_BW_CHANGE);
+ if (err)
+ break;
+ }
+
+ if (!timeout) {
+ err = -ETIMEDOUT;
+ break;
+ }
+
+ timeout = wait_woken(&wait, TASK_UNINTERRUPTIBLE, timeout);
+ };
+
+ remove_wait_queue(&mgr->bw_req_queue, &wait);
+
+ if (err)
+ goto out;
+
+ tunnel->allocated_bw = request_bw * tunnel->bw_granularity;
+
+out:
+ tun_dbg_stat(tunnel, err, "Allocating %d/%d Mb/s for tunnel: Group alloc:%d/%d Mb/s",
+ DPTUN_BW_ARG(request_bw * tunnel->bw_granularity),
+ DPTUN_BW_ARG(get_max_tunnel_bw(tunnel)),
+ DPTUN_BW_ARG(group_allocated_bw(tunnel->group)),
+ DPTUN_BW_ARG(tunnel->group->available_bw));
+
+ if (err == -EIO)
+ drm_dp_tunnel_set_io_error(tunnel);
+
+ return err;
+}
+
+/**
+ * drm_dp_tunnel_alloc_bw - Allocate BW for a DP tunnel
+ * @tunnel: Tunnel object
+ * @bw: BW in kB/s units
+ *
+ * Allocate @bw kB/s for @tunnel. The allocated BW must be freed after use by
+ * calling this function for the same tunnel setting @bw to 0.
+ *
+ * Returns 0 in case of success, a negative error code otherwise.
+ */
+int drm_dp_tunnel_alloc_bw(struct drm_dp_tunnel *tunnel, int bw)
+{
+ int err;
+
+ err = check_tunnel(tunnel);
+ if (err)
+ return err;
+
+ return allocate_tunnel_bw(tunnel, bw);
+}
+EXPORT_SYMBOL(drm_dp_tunnel_alloc_bw);
+
+/**
+ * drm_dp_tunnel_get_allocated_bw - Get the BW allocated for a DP tunnel
+ * @tunnel: Tunnel object
+ *
+ * Get the current BW allocated for @tunnel. After the tunnel is created /
+ * resumed and the BW allocation mode is enabled for it, the allocation
+ * becomes determined only after the first allocation request by the driver
+ * calling drm_dp_tunnel_alloc_bw().
+ *
+ * Return the BW allocated for the tunnel, or -1 if the allocation is
+ * undetermined.
+ */
+int drm_dp_tunnel_get_allocated_bw(struct drm_dp_tunnel *tunnel)
+{
+ return tunnel->allocated_bw;
+}
+EXPORT_SYMBOL(drm_dp_tunnel_get_allocated_bw);
+
+/*
+ * Return 0 if the status hasn't changed, 1 if the status has changed, a
+ * negative error code in case of an I/O failure.
+ */
+static int check_and_clear_status_change(struct drm_dp_tunnel *tunnel)
+{
+ u8 mask = DP_BW_ALLOCATION_CAPABILITY_CHANGED | DP_ESTIMATED_BW_CHANGED;
+ u8 val;
+
+ if (drm_dp_dpcd_readb(tunnel->aux, DP_TUNNELING_STATUS, &val) < 0)
+ goto out_err;
+
+ val &= mask;
+
+ if (val) {
+ if (drm_dp_dpcd_writeb(tunnel->aux, DP_TUNNELING_STATUS, val) < 0)
+ goto out_err;
+
+ return 1;
+ }
+
+ if (!drm_dp_tunnel_bw_alloc_is_enabled(tunnel))
+ return 0;
+
+ /*
+ * Check for estimated BW changes explicitly to account for lost
+ * BW change notifications.
+ */
+ if (drm_dp_dpcd_readb(tunnel->aux, DP_ESTIMATED_BW, &val) < 0)
+ goto out_err;
+
+ if (val * tunnel->bw_granularity != tunnel->estimated_bw)
+ return 1;
+
+ return 0;
+
+out_err:
+ drm_dp_tunnel_set_io_error(tunnel);
+
+ return -EIO;
+}
+
+/**
+ * drm_dp_tunnel_update_state - Update DP tunnel SW state with the HW state
+ * @tunnel: Tunnel object
+ *
+ * Update the SW state of @tunnel with the HW state.
+ *
+ * Returns 0 if the state has not changed, 1 if it has changed and got updated
+ * successfully and a negative error code otherwise.
+ */
+int drm_dp_tunnel_update_state(struct drm_dp_tunnel *tunnel)
+{
+ struct drm_dp_tunnel_regs regs;
+ bool changed = false;
+ int ret;
+
+ ret = check_tunnel(tunnel);
+ if (ret < 0)
+ return ret;
+
+ ret = check_and_clear_status_change(tunnel);
+ if (ret < 0)
+ goto out;
+
+ if (!ret)
+ return 0;
+
+ ret = read_and_verify_tunnel_regs(tunnel, &regs, 0);
+ if (ret)
+ goto out;
+
+ if (update_dprx_caps(tunnel, &regs))
+ changed = true;
+
+ ret = update_group_available_bw(tunnel, &regs);
+ if (ret == 1)
+ changed = true;
+
+out:
+ tun_dbg_stat(tunnel, ret < 0 ? ret : 0,
+ "State update: Changed:%s DPRX:%dx%d Tunnel alloc:%d/%d Group alloc:%d/%d Mb/s",
+ str_yes_no(changed),
+ tunnel->max_dprx_rate / 100, tunnel->max_dprx_lane_count,
+ DPTUN_BW_ARG(tunnel->allocated_bw),
+ DPTUN_BW_ARG(get_max_tunnel_bw(tunnel)),
+ DPTUN_BW_ARG(group_allocated_bw(tunnel->group)),
+ DPTUN_BW_ARG(tunnel->group->available_bw));
+
+ if (ret < 0)
+ return ret;
+
+ if (changed)
+ return 1;
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_dp_tunnel_update_state);
+
+/*
+ * drm_dp_tunnel_handle_irq - Handle DP tunnel IRQs
+ *
+ * Handle any pending DP tunnel IRQs, waking up waiters for a completion
+ * event.
+ *
+ * Returns 1 if the state of the tunnel has changed which requires calling
+ * drm_dp_tunnel_update_state(), a negative error code in case of a failure,
+ * 0 otherwise.
+ */
+int drm_dp_tunnel_handle_irq(struct drm_dp_tunnel_mgr *mgr, struct drm_dp_aux *aux)
+{
+ u8 val;
+
+ if (drm_dp_dpcd_readb(aux, DP_TUNNELING_STATUS, &val) < 0)
+ return -EIO;
+
+ if (val & (DP_BW_REQUEST_SUCCEEDED | DP_BW_REQUEST_FAILED))
+ wake_up_all(&mgr->bw_req_queue);
+
+ if (val & (DP_BW_ALLOCATION_CAPABILITY_CHANGED | DP_ESTIMATED_BW_CHANGED))
+ return 1;
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_dp_tunnel_handle_irq);
+
+/**
+ * drm_dp_tunnel_max_dprx_rate - Query the maximum rate of the tunnel's DPRX
+ * @tunnel: Tunnel object
+ *
+ * The function is used to query the maximum link rate of the DPRX connected
+ * to @tunnel. Note that this rate will not be limited by the BW limit of the
+ * tunnel, as opposed to the standard and extended DP_MAX_LINK_RATE DPCD
+ * registers.
+ *
+ * Returns the maximum link rate in 10 kbit/s units.
+ */
+int drm_dp_tunnel_max_dprx_rate(const struct drm_dp_tunnel *tunnel)
+{
+ return tunnel->max_dprx_rate;
+}
+EXPORT_SYMBOL(drm_dp_tunnel_max_dprx_rate);
+
+/**
+ * drm_dp_tunnel_max_dprx_lane_count - Query the maximum lane count of the tunnel's DPRX
+ * @tunnel: Tunnel object
+ *
+ * The function is used to query the maximum lane count of the DPRX connected
+ * to @tunnel. Note that this lane count will not be limited by the BW limit of
+ * the tunnel, as opposed to the standard and extended DP_MAX_LANE_COUNT DPCD
+ * registers.
+ *
+ * Returns the maximum lane count.
+ */
+int drm_dp_tunnel_max_dprx_lane_count(const struct drm_dp_tunnel *tunnel)
+{
+ return tunnel->max_dprx_lane_count;
+}
+EXPORT_SYMBOL(drm_dp_tunnel_max_dprx_lane_count);
+
+/**
+ * drm_dp_tunnel_available_bw - Query the estimated total available BW of the tunnel
+ * @tunnel: Tunnel object
+ *
+ * This function is used to query the estimated total available BW of the
+ * tunnel. This includes the currently allocated and free BW for all the
+ * tunnels in @tunnel's group. The available BW is valid only after the BW
+ * allocation mode has been enabled for the tunnel and its state got updated
+ * calling drm_dp_tunnel_update_state().
+ *
+ * Returns the @tunnel group's estimated total available bandwidth in kB/s
+ * units, or -1 if the available BW isn't valid (the BW allocation mode is
+ * not enabled or the tunnel's state hasn't been updated).
+ */
+int drm_dp_tunnel_available_bw(const struct drm_dp_tunnel *tunnel)
+{
+ return tunnel->group->available_bw;
+}
+EXPORT_SYMBOL(drm_dp_tunnel_available_bw);
+
+static struct drm_dp_tunnel_group_state *
+drm_dp_tunnel_atomic_get_group_state(struct drm_atomic_state *state,
+ const struct drm_dp_tunnel *tunnel)
+{
+ return (struct drm_dp_tunnel_group_state *)
+ drm_atomic_get_private_obj_state(state,
+ &tunnel->group->base);
+}
+
+static struct drm_dp_tunnel_state *
+add_tunnel_state(struct drm_dp_tunnel_group_state *group_state,
+ struct drm_dp_tunnel *tunnel)
+{
+ struct drm_dp_tunnel_state *tunnel_state;
+
+ tun_dbg_atomic(tunnel,
+ "Adding state for tunnel %p to group state %p\n",
+ tunnel, group_state);
+
+ tunnel_state = kzalloc(sizeof(*tunnel_state), GFP_KERNEL);
+ if (!tunnel_state)
+ return NULL;
+
+ tunnel_state->group_state = group_state;
+
+ drm_dp_tunnel_ref_get(tunnel, &tunnel_state->tunnel_ref);
+
+ INIT_LIST_HEAD(&tunnel_state->node);
+ list_add(&tunnel_state->node, &group_state->tunnel_states);
+
+ return tunnel_state;
+}
+
+static void free_tunnel_state(struct drm_dp_tunnel_state *tunnel_state)
+{
+ tun_dbg_atomic(tunnel_state->tunnel_ref.tunnel,
+ "Freeing state for tunnel %p\n",
+ tunnel_state->tunnel_ref.tunnel);
+
+ list_del(&tunnel_state->node);
+
+ kfree(tunnel_state->stream_bw);
+ drm_dp_tunnel_ref_put(&tunnel_state->tunnel_ref);
+
+ kfree(tunnel_state);
+}
+
+static void free_group_state(struct drm_dp_tunnel_group_state *group_state)
+{
+ struct drm_dp_tunnel_state *tunnel_state;
+ struct drm_dp_tunnel_state *tunnel_state_tmp;
+
+ for_each_tunnel_state_safe(group_state, tunnel_state, tunnel_state_tmp)
+ free_tunnel_state(tunnel_state);
+
+ kfree(group_state);
+}
+
+static struct drm_dp_tunnel_state *
+get_tunnel_state(struct drm_dp_tunnel_group_state *group_state,
+ const struct drm_dp_tunnel *tunnel)
+{
+ struct drm_dp_tunnel_state *tunnel_state;
+
+ for_each_tunnel_state(group_state, tunnel_state)
+ if (tunnel_state->tunnel_ref.tunnel == tunnel)
+ return tunnel_state;
+
+ return NULL;
+}
+
+static struct drm_dp_tunnel_state *
+get_or_add_tunnel_state(struct drm_dp_tunnel_group_state *group_state,
+ struct drm_dp_tunnel *tunnel)
+{
+ struct drm_dp_tunnel_state *tunnel_state;
+
+ tunnel_state = get_tunnel_state(group_state, tunnel);
+ if (tunnel_state)
+ return tunnel_state;
+
+ return add_tunnel_state(group_state, tunnel);
+}
+
+static struct drm_private_state *
+tunnel_group_duplicate_state(struct drm_private_obj *obj)
+{
+ struct drm_dp_tunnel_group_state *group_state;
+ struct drm_dp_tunnel_state *tunnel_state;
+
+ group_state = kzalloc(sizeof(*group_state), GFP_KERNEL);
+ if (!group_state)
+ return NULL;
+
+ INIT_LIST_HEAD(&group_state->tunnel_states);
+
+ __drm_atomic_helper_private_obj_duplicate_state(obj, &group_state->base);
+
+ for_each_tunnel_state(to_group_state(obj->state), tunnel_state) {
+ struct drm_dp_tunnel_state *new_tunnel_state;
+
+ new_tunnel_state = get_or_add_tunnel_state(group_state,
+ tunnel_state->tunnel_ref.tunnel);
+ if (!new_tunnel_state)
+ goto out_free_state;
+
+ new_tunnel_state->stream_mask = tunnel_state->stream_mask;
+ new_tunnel_state->stream_bw = kmemdup(tunnel_state->stream_bw,
+ sizeof(*tunnel_state->stream_bw) *
+ hweight32(tunnel_state->stream_mask),
+ GFP_KERNEL);
+
+ if (!new_tunnel_state->stream_bw)
+ goto out_free_state;
+ }
+
+ return &group_state->base;
+
+out_free_state:
+ free_group_state(group_state);
+
+ return NULL;
+}
+
+static void tunnel_group_destroy_state(struct drm_private_obj *obj, struct drm_private_state *state)
+{
+ free_group_state(to_group_state(state));
+}
+
+static const struct drm_private_state_funcs tunnel_group_funcs = {
+ .atomic_duplicate_state = tunnel_group_duplicate_state,
+ .atomic_destroy_state = tunnel_group_destroy_state,
+};
+
+/**
+ * drm_dp_tunnel_atomic_get_state - get/allocate the new atomic state for a tunnel
+ * @state: Atomic state
+ * @tunnel: Tunnel to get the state for
+ *
+ * Get the new atomic state for @tunnel, duplicating it from the old tunnel
+ * state if not yet allocated.
+ *
+ * Return the state or an ERR_PTR() error on failure.
+ */
+struct drm_dp_tunnel_state *
+drm_dp_tunnel_atomic_get_state(struct drm_atomic_state *state,
+ struct drm_dp_tunnel *tunnel)
+{
+ struct drm_dp_tunnel_group_state *group_state;
+ struct drm_dp_tunnel_state *tunnel_state;
+
+ group_state = drm_dp_tunnel_atomic_get_group_state(state, tunnel);
+ if (IS_ERR(group_state))
+ return ERR_CAST(group_state);
+
+ tunnel_state = get_or_add_tunnel_state(group_state, tunnel);
+ if (!tunnel_state)
+ return ERR_PTR(-ENOMEM);
+
+ return tunnel_state;
+}
+EXPORT_SYMBOL(drm_dp_tunnel_atomic_get_state);
+
+/**
+ * drm_dp_tunnel_atomic_get_old_state - get the old atomic state for a tunnel
+ * @state: Atomic state
+ * @tunnel: Tunnel to get the state for
+ *
+ * Get the old atomic state for @tunnel.
+ *
+ * Return the old state or NULL if the tunnel's atomic state is not in @state.
+ */
+struct drm_dp_tunnel_state *
+drm_dp_tunnel_atomic_get_old_state(struct drm_atomic_state *state,
+ const struct drm_dp_tunnel *tunnel)
+{
+ struct drm_dp_tunnel_group_state *old_group_state;
+ int i;
+
+ for_each_old_group_in_state(state, old_group_state, i)
+ if (to_group(old_group_state->base.obj) == tunnel->group)
+ return get_tunnel_state(old_group_state, tunnel);
+
+ return NULL;
+}
+EXPORT_SYMBOL(drm_dp_tunnel_atomic_get_old_state);
+
+/**
+ * drm_dp_tunnel_atomic_get_new_state - get the new atomic state for a tunnel
+ * @state: Atomic state
+ * @tunnel: Tunnel to get the state for
+ *
+ * Get the new atomic state for @tunnel.
+ *
+ * Return the new state or NULL if the tunnel's atomic state is not in @state.
+ */
+struct drm_dp_tunnel_state *
+drm_dp_tunnel_atomic_get_new_state(struct drm_atomic_state *state,
+ const struct drm_dp_tunnel *tunnel)
+{
+ struct drm_dp_tunnel_group_state *new_group_state;
+ int i;
+
+ for_each_new_group_in_state(state, new_group_state, i)
+ if (to_group(new_group_state->base.obj) == tunnel->group)
+ return get_tunnel_state(new_group_state, tunnel);
+
+ return NULL;
+}
+EXPORT_SYMBOL(drm_dp_tunnel_atomic_get_new_state);
+
+static bool init_group(struct drm_dp_tunnel_mgr *mgr, struct drm_dp_tunnel_group *group)
+{
+ struct drm_dp_tunnel_group_state *group_state;
+
+ group_state = kzalloc(sizeof(*group_state), GFP_KERNEL);
+ if (!group_state)
+ return false;
+
+ INIT_LIST_HEAD(&group_state->tunnel_states);
+
+ group->mgr = mgr;
+ group->available_bw = -1;
+ INIT_LIST_HEAD(&group->tunnels);
+
+ drm_atomic_private_obj_init(mgr->dev, &group->base, &group_state->base,
+ &tunnel_group_funcs);
+
+ return true;
+}
+
+static void cleanup_group(struct drm_dp_tunnel_group *group)
+{
+ drm_atomic_private_obj_fini(&group->base);
+}
+
+#ifdef CONFIG_DRM_DISPLAY_DP_TUNNEL_STATE_DEBUG
+static void check_unique_stream_ids(const struct drm_dp_tunnel_group_state *group_state)
+{
+ const struct drm_dp_tunnel_state *tunnel_state;
+ u32 stream_mask = 0;
+
+ for_each_tunnel_state(group_state, tunnel_state) {
+ drm_WARN(to_group(group_state->base.obj)->mgr->dev,
+ tunnel_state->stream_mask & stream_mask,
+ "[DPTUN %s]: conflicting stream IDs %x (IDs in other tunnels %x)\n",
+ tunnel_state->tunnel_ref.tunnel->name,
+ tunnel_state->stream_mask,
+ stream_mask);
+
+ stream_mask |= tunnel_state->stream_mask;
+ }
+}
+#else
+static void check_unique_stream_ids(const struct drm_dp_tunnel_group_state *group_state)
+{
+}
+#endif
+
+static int stream_id_to_idx(u32 stream_mask, u8 stream_id)
+{
+ return hweight32(stream_mask & (BIT(stream_id) - 1));
+}
+
+static int resize_bw_array(struct drm_dp_tunnel_state *tunnel_state,
+ unsigned long old_mask, unsigned long new_mask)
+{
+ unsigned long move_mask = old_mask & new_mask;
+ int *new_bws = NULL;
+ int id;
+
+ WARN_ON(!new_mask);
+
+ if (old_mask == new_mask)
+ return 0;
+
+ new_bws = kcalloc(hweight32(new_mask), sizeof(*new_bws), GFP_KERNEL);
+ if (!new_bws)
+ return -ENOMEM;
+
+ for_each_set_bit(id, &move_mask, BITS_PER_TYPE(move_mask))
+ new_bws[stream_id_to_idx(new_mask, id)] =
+ tunnel_state->stream_bw[stream_id_to_idx(old_mask, id)];
+
+ kfree(tunnel_state->stream_bw);
+ tunnel_state->stream_bw = new_bws;
+ tunnel_state->stream_mask = new_mask;
+
+ return 0;
+}
+
+static int set_stream_bw(struct drm_dp_tunnel_state *tunnel_state,
+ u8 stream_id, int bw)
+{
+ int err;
+
+ err = resize_bw_array(tunnel_state,
+ tunnel_state->stream_mask,
+ tunnel_state->stream_mask | BIT(stream_id));
+ if (err)
+ return err;
+
+ tunnel_state->stream_bw[stream_id_to_idx(tunnel_state->stream_mask, stream_id)] = bw;
+
+ return 0;
+}
+
+static int clear_stream_bw(struct drm_dp_tunnel_state *tunnel_state,
+ u8 stream_id)
+{
+ if (!(tunnel_state->stream_mask & ~BIT(stream_id))) {
+ free_tunnel_state(tunnel_state);
+ return 0;
+ }
+
+ return resize_bw_array(tunnel_state,
+ tunnel_state->stream_mask,
+ tunnel_state->stream_mask & ~BIT(stream_id));
+}
+
+/**
+ * drm_dp_tunnel_atomic_set_stream_bw - Set the BW for a DP tunnel stream
+ * @state: Atomic state
+ * @tunnel: DP tunnel containing the stream
+ * @stream_id: Stream ID
+ * @bw: BW of the stream
+ *
+ * Set a DP tunnel stream's required BW in the atomic state.
+ *
+ * Returns 0 in case of success, a negative error code otherwise.
+ */
+int drm_dp_tunnel_atomic_set_stream_bw(struct drm_atomic_state *state,
+ struct drm_dp_tunnel *tunnel,
+ u8 stream_id, int bw)
+{
+ struct drm_dp_tunnel_group_state *new_group_state;
+ struct drm_dp_tunnel_state *tunnel_state;
+ int err;
+
+ if (drm_WARN_ON(tunnel->group->mgr->dev,
+ stream_id > BITS_PER_TYPE(tunnel_state->stream_mask)))
+ return -EINVAL;
+
+ tun_dbg(tunnel,
+ "Setting %d Mb/s for stream %d\n",
+ DPTUN_BW_ARG(bw), stream_id);
+
+ new_group_state = drm_dp_tunnel_atomic_get_group_state(state, tunnel);
+ if (IS_ERR(new_group_state))
+ return PTR_ERR(new_group_state);
+
+ if (bw == 0) {
+ tunnel_state = get_tunnel_state(new_group_state, tunnel);
+ if (!tunnel_state)
+ return 0;
+
+ return clear_stream_bw(tunnel_state, stream_id);
+ }
+
+ tunnel_state = get_or_add_tunnel_state(new_group_state, tunnel);
+ if (drm_WARN_ON(state->dev, !tunnel_state))
+ return -EINVAL;
+
+ err = set_stream_bw(tunnel_state, stream_id, bw);
+ if (err)
+ return err;
+
+ check_unique_stream_ids(new_group_state);
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_dp_tunnel_atomic_set_stream_bw);
+
+/**
+ * drm_dp_tunnel_atomic_get_required_bw - Get the BW required by a DP tunnel
+ * @tunnel_state: Atomic state of the queried tunnel
+ *
+ * Calculate the BW required by a tunnel adding up the required BW of all
+ * the streams in the tunnel.
+ *
+ * Return the total BW required by the tunnel.
+ */
+int drm_dp_tunnel_atomic_get_required_bw(const struct drm_dp_tunnel_state *tunnel_state)
+{
+ int tunnel_bw = 0;
+ int i;
+
+ if (!tunnel_state || !tunnel_state->stream_mask)
+ return 0;
+
+ for (i = 0; i < hweight32(tunnel_state->stream_mask); i++)
+ tunnel_bw += tunnel_state->stream_bw[i];
+
+ return tunnel_bw;
+}
+EXPORT_SYMBOL(drm_dp_tunnel_atomic_get_required_bw);
+
+/**
+ * drm_dp_tunnel_atomic_get_group_streams_in_state - Get mask of stream IDs in a group
+ * @state: Atomic state
+ * @tunnel: Tunnel object
+ * @stream_mask: Mask of streams in @tunnel's group
+ *
+ * Get the mask of all the stream IDs in the tunnel group of @tunnel.
+ *
+ * Return 0 in case of success - with the stream IDs in @stream_mask - or a
+ * negative error code in case of failure.
+ */
+int drm_dp_tunnel_atomic_get_group_streams_in_state(struct drm_atomic_state *state,
+ const struct drm_dp_tunnel *tunnel,
+ u32 *stream_mask)
+{
+ struct drm_dp_tunnel_group_state *group_state;
+ struct drm_dp_tunnel_state *tunnel_state;
+
+ group_state = drm_dp_tunnel_atomic_get_group_state(state, tunnel);
+ if (IS_ERR(group_state))
+ return PTR_ERR(group_state);
+
+ *stream_mask = 0;
+ for_each_tunnel_state(group_state, tunnel_state)
+ *stream_mask |= tunnel_state->stream_mask;
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_dp_tunnel_atomic_get_group_streams_in_state);
+
+static int
+drm_dp_tunnel_atomic_check_group_bw(struct drm_dp_tunnel_group_state *new_group_state,
+ u32 *failed_stream_mask)
+{
+ struct drm_dp_tunnel_group *group = to_group(new_group_state->base.obj);
+ struct drm_dp_tunnel_state *new_tunnel_state;
+ u32 group_stream_mask = 0;
+ int group_bw = 0;
+
+ for_each_tunnel_state(new_group_state, new_tunnel_state) {
+ struct drm_dp_tunnel *tunnel = new_tunnel_state->tunnel_ref.tunnel;
+ int max_dprx_bw = get_max_dprx_bw(tunnel);
+ int tunnel_bw = drm_dp_tunnel_atomic_get_required_bw(new_tunnel_state);
+
+ tun_dbg(tunnel,
+ "%sRequired %d/%d Mb/s total for tunnel.\n",
+ tunnel_bw > max_dprx_bw ? "Not enough BW: " : "",
+ DPTUN_BW_ARG(tunnel_bw),
+ DPTUN_BW_ARG(max_dprx_bw));
+
+ if (tunnel_bw > max_dprx_bw) {
+ *failed_stream_mask = new_tunnel_state->stream_mask;
+ return -ENOSPC;
+ }
+
+ group_bw += min(roundup(tunnel_bw, tunnel->bw_granularity),
+ max_dprx_bw);
+ group_stream_mask |= new_tunnel_state->stream_mask;
+ }
+
+ tun_grp_dbg(group,
+ "%sRequired %d/%d Mb/s total for tunnel group.\n",
+ group_bw > group->available_bw ? "Not enough BW: " : "",
+ DPTUN_BW_ARG(group_bw),
+ DPTUN_BW_ARG(group->available_bw));
+
+ if (group_bw > group->available_bw) {
+ *failed_stream_mask = group_stream_mask;
+ return -ENOSPC;
+ }
+
+ return 0;
+}
+
+/**
+ * drm_dp_tunnel_atomic_check_stream_bws - Check BW limit for all streams in state
+ * @state: Atomic state
+ * @failed_stream_mask: Mask of stream IDs with a BW limit failure
+ *
+ * Check the required BW of each DP tunnel in @state against both the DPRX BW
+ * limit of the tunnel and the BW limit of the tunnel group. Return a mask of
+ * stream IDs in @failed_stream_mask once a check fails. The mask will contain
+ * either all the streams in a tunnel (in case a DPRX BW limit check failed) or
+ * all the streams in a tunnel group (in case a group BW limit check failed).
+ *
+ * Return 0 if all the BW limit checks passed, -ENOSPC in case a BW limit
+ * check failed - with @failed_stream_mask containing the streams failing the
+ * check - or a negative error code otherwise.
+ */
+int drm_dp_tunnel_atomic_check_stream_bws(struct drm_atomic_state *state,
+ u32 *failed_stream_mask)
+{
+ struct drm_dp_tunnel_group_state *new_group_state;
+ int i;
+
+ for_each_new_group_in_state(state, new_group_state, i) {
+ int ret;
+
+ ret = drm_dp_tunnel_atomic_check_group_bw(new_group_state,
+ failed_stream_mask);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_dp_tunnel_atomic_check_stream_bws);
+
+static void destroy_mgr(struct drm_dp_tunnel_mgr *mgr)
+{
+ int i;
+
+ for (i = 0; i < mgr->group_count; i++) {
+ cleanup_group(&mgr->groups[i]);
+ drm_WARN_ON(mgr->dev, !list_empty(&mgr->groups[i].tunnels));
+ }
+
+#ifdef CONFIG_DRM_DISPLAY_DP_TUNNEL_STATE_DEBUG
+ ref_tracker_dir_exit(&mgr->ref_tracker);
+#endif
+
+ kfree(mgr->groups);
+ kfree(mgr);
+}
+
+/**
+ * drm_dp_tunnel_mgr_create - Create a DP tunnel manager
+ * @dev: DRM device object
+ * @max_group_count: Maximum number of tunnel groups
+ *
+ * Creates a DP tunnel manager for @dev.
+ *
+ * Returns a pointer to the tunnel manager if created successfully or error
+ * pointer in case of failure.
+ */
+struct drm_dp_tunnel_mgr *
+drm_dp_tunnel_mgr_create(struct drm_device *dev, int max_group_count)
+{
+ struct drm_dp_tunnel_mgr *mgr;
+ int i;
+
+ mgr = kzalloc(sizeof(*mgr), GFP_KERNEL);
+ if (!mgr)
+ return ERR_PTR(-ENOMEM);
+
+ mgr->dev = dev;
+ init_waitqueue_head(&mgr->bw_req_queue);
+
+ mgr->groups = kcalloc(max_group_count, sizeof(*mgr->groups), GFP_KERNEL);
+ if (!mgr->groups) {
+ kfree(mgr);
+
+ return ERR_PTR(-ENOMEM);
+ }
+
+#ifdef CONFIG_DRM_DISPLAY_DP_TUNNEL_STATE_DEBUG
+ ref_tracker_dir_init(&mgr->ref_tracker, 16, "dptun");
+#endif
+
+ for (i = 0; i < max_group_count; i++) {
+ if (!init_group(mgr, &mgr->groups[i])) {
+ destroy_mgr(mgr);
+
+ return ERR_PTR(-ENOMEM);
+ }
+
+ mgr->group_count++;
+ }
+
+ return mgr;
+}
+EXPORT_SYMBOL(drm_dp_tunnel_mgr_create);
+
+/**
+ * drm_dp_tunnel_mgr_destroy - Destroy DP tunnel manager
+ * @mgr: Tunnel manager object
+ *
+ * Destroy the tunnel manager.
+ */
+void drm_dp_tunnel_mgr_destroy(struct drm_dp_tunnel_mgr *mgr)
+{
+ destroy_mgr(mgr);
+}
+EXPORT_SYMBOL(drm_dp_tunnel_mgr_destroy);
diff --git a/drivers/gpu/drm/display/drm_dsc_helper.c b/drivers/gpu/drm/display/drm_dsc_helper.c
index 4424380c6cb6..6900f4dac520 100644
--- a/drivers/gpu/drm/display/drm_dsc_helper.c
+++ b/drivers/gpu/drm/display/drm_dsc_helper.c
@@ -14,6 +14,7 @@
#include <drm/display/drm_dp_helper.h>
#include <drm/display/drm_dsc_helper.h>
+#include <drm/drm_fixed.h>
#include <drm/drm_print.h>
/**
@@ -1472,3 +1473,93 @@ u32 drm_dsc_flatness_det_thresh(const struct drm_dsc_config *dsc)
return 2 << (dsc->bits_per_component - 8);
}
EXPORT_SYMBOL(drm_dsc_flatness_det_thresh);
+
+static void drm_dsc_dump_config_main_params(struct drm_printer *p, int indent,
+ const struct drm_dsc_config *cfg)
+{
+ drm_printf_indent(p, indent,
+ "dsc-cfg: version: %d.%d, picture: w=%d, h=%d, slice: count=%d, w=%d, h=%d, size=%d\n",
+ cfg->dsc_version_major, cfg->dsc_version_minor,
+ cfg->pic_width, cfg->pic_height,
+ cfg->slice_count, cfg->slice_width, cfg->slice_height, cfg->slice_chunk_size);
+ drm_printf_indent(p, indent,
+ "dsc-cfg: mode: block-pred=%s, vbr=%s, rgb=%s, simple-422=%s, native-422=%s, native-420=%s\n",
+ str_yes_no(cfg->block_pred_enable), str_yes_no(cfg->vbr_enable),
+ str_yes_no(cfg->convert_rgb),
+ str_yes_no(cfg->simple_422), str_yes_no(cfg->native_422), str_yes_no(cfg->native_420));
+ drm_printf_indent(p, indent,
+ "dsc-cfg: color-depth: uncompressed-bpc=%d, compressed-bpp=" FXP_Q4_FMT " line-buf-bpp=%d\n",
+ cfg->bits_per_component, FXP_Q4_ARGS(cfg->bits_per_pixel), cfg->line_buf_depth);
+ drm_printf_indent(p, indent,
+ "dsc-cfg: rc-model: size=%d, bits=%d, mux-word-size: %d, initial-delays: xmit=%d, dec=%d\n",
+ cfg->rc_model_size, cfg->rc_bits, cfg->mux_word_size,
+ cfg->initial_xmit_delay, cfg->initial_dec_delay);
+ drm_printf_indent(p, indent,
+ "dsc-cfg: offsets: initial=%d, final=%d, slice-bpg=%d\n",
+ cfg->initial_offset, cfg->final_offset, cfg->slice_bpg_offset);
+ drm_printf_indent(p, indent,
+ "dsc-cfg: line-bpg-offsets: first=%d, non-first=%d, second=%d, non-second=%d, second-adj=%d\n",
+ cfg->first_line_bpg_offset, cfg->nfl_bpg_offset,
+ cfg->second_line_bpg_offset, cfg->nsl_bpg_offset, cfg->second_line_offset_adj);
+ drm_printf_indent(p, indent,
+ "dsc-cfg: rc-tgt-offsets: low=%d, high=%d, rc-edge-factor: %d, rc-quant-incr-limits: [0]=%d, [1]=%d\n",
+ cfg->rc_tgt_offset_low, cfg->rc_tgt_offset_high,
+ cfg->rc_edge_factor, cfg->rc_quant_incr_limit0, cfg->rc_quant_incr_limit1);
+ drm_printf_indent(p, indent,
+ "dsc-cfg: initial-scale: %d, scale-intervals: increment=%d, decrement=%d\n",
+ cfg->initial_scale_value, cfg->scale_increment_interval, cfg->scale_decrement_interval);
+ drm_printf_indent(p, indent,
+ "dsc-cfg: flatness: min-qp=%d, max-qp=%d\n",
+ cfg->flatness_min_qp, cfg->flatness_max_qp);
+}
+
+static void drm_dsc_dump_config_rc_params(struct drm_printer *p, int indent,
+ const struct drm_dsc_config *cfg)
+{
+ const u16 *bt = cfg->rc_buf_thresh;
+ const struct drm_dsc_rc_range_parameters *rp = cfg->rc_range_params;
+
+ BUILD_BUG_ON(ARRAY_SIZE(cfg->rc_buf_thresh) != 14);
+ BUILD_BUG_ON(ARRAY_SIZE(cfg->rc_range_params) != 15);
+
+ drm_printf_indent(p, indent,
+ "dsc-cfg: rc-level: 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14\n");
+ drm_printf_indent(p, indent,
+ "dsc-cfg: rc-buf-thresh: %3d,%3d,%3d,%3d,%3d,%3d,%3d,%3d,%3d,%3d,%3d,%3d,%3d,%3d\n",
+ bt[0], bt[1], bt[2], bt[3], bt[4], bt[5], bt[6], bt[7],
+ bt[8], bt[9], bt[10], bt[11], bt[12], bt[13]);
+ drm_printf_indent(p, indent,
+ "dsc-cfg: rc-min-qp: %3d,%3d,%3d,%3d,%3d,%3d,%3d,%3d,%3d,%3d,%3d,%3d,%3d,%3d,%3d\n",
+ rp[0].range_min_qp, rp[1].range_min_qp, rp[2].range_min_qp, rp[3].range_min_qp,
+ rp[4].range_min_qp, rp[5].range_min_qp, rp[6].range_min_qp, rp[7].range_min_qp,
+ rp[8].range_min_qp, rp[9].range_min_qp, rp[10].range_min_qp, rp[11].range_min_qp,
+ rp[12].range_min_qp, rp[13].range_min_qp, rp[14].range_min_qp);
+ drm_printf_indent(p, indent,
+ "dsc-cfg: rc-max-qp: %3d,%3d,%3d,%3d,%3d,%3d,%3d,%3d,%3d,%3d,%3d,%3d,%3d,%3d,%3d\n",
+ rp[0].range_max_qp, rp[1].range_max_qp, rp[2].range_max_qp, rp[3].range_max_qp,
+ rp[4].range_max_qp, rp[5].range_max_qp, rp[6].range_max_qp, rp[7].range_max_qp,
+ rp[8].range_max_qp, rp[9].range_max_qp, rp[10].range_max_qp, rp[11].range_max_qp,
+ rp[12].range_max_qp, rp[13].range_max_qp, rp[14].range_max_qp);
+ drm_printf_indent(p, indent,
+ "dsc-cfg: rc-bpg-offset: %3d,%3d,%3d,%3d,%3d,%3d,%3d,%3d,%3d,%3d,%3d,%3d,%3d,%3d,%3d\n",
+ rp[0].range_bpg_offset, rp[1].range_bpg_offset, rp[2].range_bpg_offset, rp[3].range_bpg_offset,
+ rp[4].range_bpg_offset, rp[5].range_bpg_offset, rp[6].range_bpg_offset, rp[7].range_bpg_offset,
+ rp[8].range_bpg_offset, rp[9].range_bpg_offset, rp[10].range_bpg_offset, rp[11].range_bpg_offset,
+ rp[12].range_bpg_offset, rp[13].range_bpg_offset, rp[14].range_bpg_offset);
+}
+
+/**
+ * drm_dsc_dump_config - Dump the provided DSC configuration
+ * @p: The printer used for output
+ * @indent: Tab indentation level (max 5)
+ * @cfg: DSC configuration to print
+ *
+ * Print the provided DSC configuration in @cfg.
+ */
+void drm_dsc_dump_config(struct drm_printer *p, int indent,
+ const struct drm_dsc_config *cfg)
+{
+ drm_dsc_dump_config_main_params(p, indent, cfg);
+ drm_dsc_dump_config_rc_params(p, indent, cfg);
+}
+EXPORT_SYMBOL(drm_dsc_dump_config);
diff --git a/drivers/gpu/drm/display/drm_hdmi_audio_helper.c b/drivers/gpu/drm/display/drm_hdmi_audio_helper.c
new file mode 100644
index 000000000000..05afc9f0bdd6
--- /dev/null
+++ b/drivers/gpu/drm/display/drm_hdmi_audio_helper.c
@@ -0,0 +1,190 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright (c) 2024 Linaro Ltd
+ */
+
+#include <linux/mutex.h>
+#include <linux/of_graph.h>
+#include <linux/platform_device.h>
+
+#include <drm/drm_connector.h>
+#include <drm/drm_device.h>
+#include <drm/display/drm_hdmi_audio_helper.h>
+
+#include <sound/hdmi-codec.h>
+
+static int drm_connector_hdmi_audio_startup(struct device *dev, void *data)
+{
+ struct drm_connector *connector = data;
+ const struct drm_connector_hdmi_audio_funcs *funcs =
+ connector->hdmi_audio.funcs;
+
+ if (funcs->startup)
+ return funcs->startup(connector);
+
+ return 0;
+}
+
+static int drm_connector_hdmi_audio_prepare(struct device *dev, void *data,
+ struct hdmi_codec_daifmt *fmt,
+ struct hdmi_codec_params *hparms)
+{
+ struct drm_connector *connector = data;
+ const struct drm_connector_hdmi_audio_funcs *funcs =
+ connector->hdmi_audio.funcs;
+
+ return funcs->prepare(connector, fmt, hparms);
+}
+
+static void drm_connector_hdmi_audio_shutdown(struct device *dev, void *data)
+{
+ struct drm_connector *connector = data;
+ const struct drm_connector_hdmi_audio_funcs *funcs =
+ connector->hdmi_audio.funcs;
+
+ return funcs->shutdown(connector);
+}
+
+static int drm_connector_hdmi_audio_mute_stream(struct device *dev, void *data,
+ bool enable, int direction)
+{
+ struct drm_connector *connector = data;
+ const struct drm_connector_hdmi_audio_funcs *funcs =
+ connector->hdmi_audio.funcs;
+
+ if (funcs->mute_stream)
+ return funcs->mute_stream(connector, enable, direction);
+
+ return -ENOTSUPP;
+}
+
+static int drm_connector_hdmi_audio_get_dai_id(struct snd_soc_component *comment,
+ struct device_node *endpoint,
+ void *data)
+{
+ struct drm_connector *connector = data;
+ struct of_endpoint of_ep;
+ int ret;
+
+ if (connector->hdmi_audio.dai_port < 0)
+ return -ENOTSUPP;
+
+ ret = of_graph_parse_endpoint(endpoint, &of_ep);
+ if (ret < 0)
+ return ret;
+
+ if (of_ep.port == connector->hdmi_audio.dai_port)
+ return 0;
+
+ return -EINVAL;
+}
+
+static int drm_connector_hdmi_audio_get_eld(struct device *dev, void *data,
+ uint8_t *buf, size_t len)
+{
+ struct drm_connector *connector = data;
+
+ mutex_lock(&connector->eld_mutex);
+ memcpy(buf, connector->eld, min(sizeof(connector->eld), len));
+ mutex_unlock(&connector->eld_mutex);
+
+ return 0;
+}
+
+static int drm_connector_hdmi_audio_hook_plugged_cb(struct device *dev,
+ void *data,
+ hdmi_codec_plugged_cb fn,
+ struct device *codec_dev)
+{
+ struct drm_connector *connector = data;
+
+ mutex_lock(&connector->hdmi_audio.lock);
+
+ connector->hdmi_audio.plugged_cb = fn;
+ connector->hdmi_audio.plugged_cb_dev = codec_dev;
+
+ fn(codec_dev, connector->hdmi_audio.last_state);
+
+ mutex_unlock(&connector->hdmi_audio.lock);
+
+ return 0;
+}
+
+void drm_connector_hdmi_audio_plugged_notify(struct drm_connector *connector,
+ bool plugged)
+{
+ mutex_lock(&connector->hdmi_audio.lock);
+
+ connector->hdmi_audio.last_state = plugged;
+
+ if (connector->hdmi_audio.plugged_cb &&
+ connector->hdmi_audio.plugged_cb_dev)
+ connector->hdmi_audio.plugged_cb(connector->hdmi_audio.plugged_cb_dev,
+ connector->hdmi_audio.last_state);
+
+ mutex_unlock(&connector->hdmi_audio.lock);
+}
+EXPORT_SYMBOL(drm_connector_hdmi_audio_plugged_notify);
+
+static const struct hdmi_codec_ops drm_connector_hdmi_audio_ops = {
+ .audio_startup = drm_connector_hdmi_audio_startup,
+ .prepare = drm_connector_hdmi_audio_prepare,
+ .audio_shutdown = drm_connector_hdmi_audio_shutdown,
+ .mute_stream = drm_connector_hdmi_audio_mute_stream,
+ .get_eld = drm_connector_hdmi_audio_get_eld,
+ .get_dai_id = drm_connector_hdmi_audio_get_dai_id,
+ .hook_plugged_cb = drm_connector_hdmi_audio_hook_plugged_cb,
+};
+
+/**
+ * drm_connector_hdmi_audio_init - Initialize HDMI Codec device for the DRM connector
+ * @connector: A pointer to the connector to allocate codec for
+ * @hdmi_codec_dev: device to be used as a parent for the HDMI Codec
+ * @funcs: callbacks for this HDMI Codec
+ * @max_i2s_playback_channels: maximum number of playback I2S channels
+ * @spdif_playback: set if HDMI codec has S/PDIF playback port
+ * @dai_port: sound DAI port, -1 if it is not enabled
+ *
+ * Create a HDMI codec device to be used with the specified connector.
+ *
+ * Returns:
+ * Zero on success, error code on failure.
+ */
+int drm_connector_hdmi_audio_init(struct drm_connector *connector,
+ struct device *hdmi_codec_dev,
+ const struct drm_connector_hdmi_audio_funcs *funcs,
+ unsigned int max_i2s_playback_channels,
+ bool spdif_playback,
+ int dai_port)
+{
+ struct hdmi_codec_pdata codec_pdata = {
+ .ops = &drm_connector_hdmi_audio_ops,
+ .max_i2s_channels = max_i2s_playback_channels,
+ .i2s = !!max_i2s_playback_channels,
+ .spdif = spdif_playback,
+ .no_i2s_capture = true,
+ .no_spdif_capture = true,
+ .data = connector,
+ };
+ struct platform_device *pdev;
+
+ if (!funcs ||
+ !funcs->prepare ||
+ !funcs->shutdown)
+ return -EINVAL;
+
+ connector->hdmi_audio.funcs = funcs;
+ connector->hdmi_audio.dai_port = dai_port;
+
+ pdev = platform_device_register_data(hdmi_codec_dev,
+ HDMI_CODEC_DRV_NAME,
+ PLATFORM_DEVID_AUTO,
+ &codec_pdata, sizeof(codec_pdata));
+ if (IS_ERR(pdev))
+ return PTR_ERR(pdev);
+
+ connector->hdmi_audio.codec_pdev = pdev;
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_connector_hdmi_audio_init);
diff --git a/drivers/gpu/drm/display/drm_hdmi_helper.c b/drivers/gpu/drm/display/drm_hdmi_helper.c
index faf5e9efa7d3..74dd4d01dd9b 100644
--- a/drivers/gpu/drm/display/drm_hdmi_helper.c
+++ b/drivers/gpu/drm/display/drm_hdmi_helper.c
@@ -195,3 +195,64 @@ void drm_hdmi_avi_infoframe_content_type(struct hdmi_avi_infoframe *frame,
frame->itc = conn_state->content_type != DRM_MODE_CONTENT_TYPE_NO_DATA;
}
EXPORT_SYMBOL(drm_hdmi_avi_infoframe_content_type);
+
+/**
+ * drm_hdmi_compute_mode_clock() - Computes the TMDS Character Rate
+ * @mode: Display mode to compute the clock for
+ * @bpc: Bits per character
+ * @fmt: Output Pixel Format used
+ *
+ * Returns the TMDS Character Rate for a given mode, bpc count and output format.
+ *
+ * RETURNS:
+ * The TMDS Character Rate, in Hertz, or 0 on error.
+ */
+unsigned long long
+drm_hdmi_compute_mode_clock(const struct drm_display_mode *mode,
+ unsigned int bpc, enum hdmi_colorspace fmt)
+{
+ unsigned long long clock = mode->clock * 1000ULL;
+ unsigned int vic = drm_match_cea_mode(mode);
+
+ /*
+ * CTA-861-G Spec, section 5.4 - Color Coding and Quantization
+ * mandates that VIC 1 always uses 8 bpc.
+ */
+ if (vic == 1 && bpc != 8)
+ return 0;
+
+ if (fmt == HDMI_COLORSPACE_YUV422) {
+ /*
+ * HDMI 1.0 Spec, section 6.5 - Pixel Encoding states that
+ * YUV422 sends 24 bits over three channels, with Cb and Cr
+ * components being sent on odd and even pixels, respectively.
+ *
+ * If fewer than 12 bpc are sent, data are left justified.
+ */
+ if (bpc > 12)
+ return 0;
+
+ /*
+ * HDMI 1.0 Spec, section 6.5 - Pixel Encoding
+ * specifies that YUV422 sends two 12-bits components over
+ * three TMDS channels per pixel clock, which is equivalent to
+ * three 8-bits components over three channels used by RGB as
+ * far as the clock rate goes.
+ */
+ bpc = 8;
+ }
+
+ /*
+ * HDMI 2.0 Spec, Section 7.1 - YCbCr 4:2:0 Pixel Encoding
+ * specifies that YUV420 encoding is carried at a TMDS Character Rate
+ * equal to half the pixel clock rate.
+ */
+ if (fmt == HDMI_COLORSPACE_YUV420)
+ clock = clock / 2;
+
+ if (mode->flags & DRM_MODE_FLAG_DBLCLK)
+ clock = clock * 2;
+
+ return DIV_ROUND_CLOSEST_ULL(clock * bpc, 8);
+}
+EXPORT_SYMBOL(drm_hdmi_compute_mode_clock);
diff --git a/drivers/gpu/drm/display/drm_hdmi_state_helper.c b/drivers/gpu/drm/display/drm_hdmi_state_helper.c
new file mode 100644
index 000000000000..9b2ee2385634
--- /dev/null
+++ b/drivers/gpu/drm/display/drm_hdmi_state_helper.c
@@ -0,0 +1,841 @@
+// SPDX-License-Identifier: MIT
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_connector.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_print.h>
+
+#include <drm/display/drm_hdmi_audio_helper.h>
+#include <drm/display/drm_hdmi_helper.h>
+#include <drm/display/drm_hdmi_state_helper.h>
+
+/**
+ * __drm_atomic_helper_connector_hdmi_reset() - Initializes all HDMI @drm_connector_state resources
+ * @connector: DRM connector
+ * @new_conn_state: connector state to reset
+ *
+ * Initializes all HDMI resources from a @drm_connector_state without
+ * actually allocating it. This is useful for HDMI drivers, in
+ * combination with __drm_atomic_helper_connector_reset() or
+ * drm_atomic_helper_connector_reset().
+ */
+void __drm_atomic_helper_connector_hdmi_reset(struct drm_connector *connector,
+ struct drm_connector_state *new_conn_state)
+{
+ unsigned int max_bpc = connector->max_bpc;
+
+ new_conn_state->max_bpc = max_bpc;
+ new_conn_state->max_requested_bpc = max_bpc;
+ new_conn_state->hdmi.broadcast_rgb = DRM_HDMI_BROADCAST_RGB_AUTO;
+}
+EXPORT_SYMBOL(__drm_atomic_helper_connector_hdmi_reset);
+
+static const struct drm_display_mode *
+connector_state_get_mode(const struct drm_connector_state *conn_state)
+{
+ struct drm_atomic_state *state;
+ struct drm_crtc_state *crtc_state;
+ struct drm_crtc *crtc;
+
+ state = conn_state->state;
+ if (!state)
+ return NULL;
+
+ crtc = conn_state->crtc;
+ if (!crtc)
+ return NULL;
+
+ crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
+ if (!crtc_state)
+ return NULL;
+
+ return &crtc_state->mode;
+}
+
+static bool hdmi_is_limited_range(const struct drm_connector *connector,
+ const struct drm_connector_state *conn_state)
+{
+ const struct drm_display_info *info = &connector->display_info;
+ const struct drm_display_mode *mode =
+ connector_state_get_mode(conn_state);
+
+ /*
+ * The Broadcast RGB property only applies to RGB format, and
+ * i915 just assumes limited range for YCbCr output, so let's
+ * just do the same.
+ */
+ if (conn_state->hdmi.output_format != HDMI_COLORSPACE_RGB)
+ return true;
+
+ if (conn_state->hdmi.broadcast_rgb == DRM_HDMI_BROADCAST_RGB_FULL)
+ return false;
+
+ if (conn_state->hdmi.broadcast_rgb == DRM_HDMI_BROADCAST_RGB_LIMITED)
+ return true;
+
+ if (!info->is_hdmi)
+ return false;
+
+ return drm_default_rgb_quant_range(mode) == HDMI_QUANTIZATION_RANGE_LIMITED;
+}
+
+static bool
+sink_supports_format_bpc(const struct drm_connector *connector,
+ const struct drm_display_info *info,
+ const struct drm_display_mode *mode,
+ unsigned int format, unsigned int bpc)
+{
+ struct drm_device *dev = connector->dev;
+ u8 vic = drm_match_cea_mode(mode);
+
+ /*
+ * CTA-861-F, section 5.4 - Color Coding & Quantization states
+ * that the bpc must be 8, 10, 12 or 16 except for the default
+ * 640x480 VIC1 where the value must be 8.
+ *
+ * The definition of default here is ambiguous but the spec
+ * refers to VIC1 being the default timing in several occasions
+ * so our understanding is that for the default timing (ie,
+ * VIC1), the bpc must be 8.
+ */
+ if (vic == 1 && bpc != 8) {
+ drm_dbg_kms(dev, "VIC1 requires a bpc of 8, got %u\n", bpc);
+ return false;
+ }
+
+ if (!info->is_hdmi &&
+ (format != HDMI_COLORSPACE_RGB || bpc != 8)) {
+ drm_dbg_kms(dev, "DVI Monitors require an RGB output at 8 bpc\n");
+ return false;
+ }
+
+ if (!(connector->hdmi.supported_formats & BIT(format))) {
+ drm_dbg_kms(dev, "%s format unsupported by the connector.\n",
+ drm_hdmi_connector_get_output_format_name(format));
+ return false;
+ }
+
+ switch (format) {
+ case HDMI_COLORSPACE_RGB:
+ drm_dbg_kms(dev, "RGB Format, checking the constraints.\n");
+
+ /*
+ * In some cases, like when the EDID readout fails, or
+ * is not an HDMI compliant EDID for some reason, the
+ * color_formats field will be blank and not report any
+ * format supported. In such a case, assume that RGB is
+ * supported so we can keep things going and light up
+ * the display.
+ */
+ if (!(info->color_formats & DRM_COLOR_FORMAT_RGB444))
+ drm_warn(dev, "HDMI Sink doesn't support RGB, something's wrong.\n");
+
+ if (bpc == 10 && !(info->edid_hdmi_rgb444_dc_modes & DRM_EDID_HDMI_DC_30)) {
+ drm_dbg_kms(dev, "10 BPC but sink doesn't support Deep Color 30.\n");
+ return false;
+ }
+
+ if (bpc == 12 && !(info->edid_hdmi_rgb444_dc_modes & DRM_EDID_HDMI_DC_36)) {
+ drm_dbg_kms(dev, "12 BPC but sink doesn't support Deep Color 36.\n");
+ return false;
+ }
+
+ drm_dbg_kms(dev, "RGB format supported in that configuration.\n");
+
+ return true;
+
+ case HDMI_COLORSPACE_YUV420:
+ /* TODO: YUV420 is unsupported at the moment. */
+ drm_dbg_kms(dev, "YUV420 format isn't supported yet.\n");
+ return false;
+
+ case HDMI_COLORSPACE_YUV422:
+ drm_dbg_kms(dev, "YUV422 format, checking the constraints.\n");
+
+ if (!(info->color_formats & DRM_COLOR_FORMAT_YCBCR422)) {
+ drm_dbg_kms(dev, "Sink doesn't support YUV422.\n");
+ return false;
+ }
+
+ if (bpc > 12) {
+ drm_dbg_kms(dev, "YUV422 only supports 12 bpc or lower.\n");
+ return false;
+ }
+
+ /*
+ * HDMI Spec 1.3 - Section 6.5 Pixel Encodings and Color Depth
+ * states that Deep Color is not relevant for YUV422 so we
+ * don't need to check the Deep Color bits in the EDIDs here.
+ */
+
+ drm_dbg_kms(dev, "YUV422 format supported in that configuration.\n");
+
+ return true;
+
+ case HDMI_COLORSPACE_YUV444:
+ drm_dbg_kms(dev, "YUV444 format, checking the constraints.\n");
+
+ if (!(info->color_formats & DRM_COLOR_FORMAT_YCBCR444)) {
+ drm_dbg_kms(dev, "Sink doesn't support YUV444.\n");
+ return false;
+ }
+
+ if (bpc == 10 && !(info->edid_hdmi_ycbcr444_dc_modes & DRM_EDID_HDMI_DC_30)) {
+ drm_dbg_kms(dev, "10 BPC but sink doesn't support Deep Color 30.\n");
+ return false;
+ }
+
+ if (bpc == 12 && !(info->edid_hdmi_ycbcr444_dc_modes & DRM_EDID_HDMI_DC_36)) {
+ drm_dbg_kms(dev, "12 BPC but sink doesn't support Deep Color 36.\n");
+ return false;
+ }
+
+ drm_dbg_kms(dev, "YUV444 format supported in that configuration.\n");
+
+ return true;
+ }
+
+ drm_dbg_kms(dev, "Unsupported pixel format.\n");
+ return false;
+}
+
+static enum drm_mode_status
+hdmi_clock_valid(const struct drm_connector *connector,
+ const struct drm_display_mode *mode,
+ unsigned long long clock)
+{
+ const struct drm_connector_hdmi_funcs *funcs = connector->hdmi.funcs;
+ const struct drm_display_info *info = &connector->display_info;
+
+ if (info->max_tmds_clock && clock > info->max_tmds_clock * 1000)
+ return MODE_CLOCK_HIGH;
+
+ if (funcs && funcs->tmds_char_rate_valid) {
+ enum drm_mode_status status;
+
+ status = funcs->tmds_char_rate_valid(connector, mode, clock);
+ if (status != MODE_OK)
+ return status;
+ }
+
+ return MODE_OK;
+}
+
+static int
+hdmi_compute_clock(const struct drm_connector *connector,
+ struct drm_connector_state *conn_state,
+ const struct drm_display_mode *mode,
+ unsigned int bpc, enum hdmi_colorspace fmt)
+{
+ enum drm_mode_status status;
+ unsigned long long clock;
+
+ clock = drm_hdmi_compute_mode_clock(mode, bpc, fmt);
+ if (!clock)
+ return -EINVAL;
+
+ status = hdmi_clock_valid(connector, mode, clock);
+ if (status != MODE_OK)
+ return -EINVAL;
+
+ conn_state->hdmi.tmds_char_rate = clock;
+
+ return 0;
+}
+
+static bool
+hdmi_try_format_bpc(const struct drm_connector *connector,
+ struct drm_connector_state *conn_state,
+ const struct drm_display_mode *mode,
+ unsigned int bpc, enum hdmi_colorspace fmt)
+{
+ const struct drm_display_info *info = &connector->display_info;
+ struct drm_device *dev = connector->dev;
+ int ret;
+
+ drm_dbg_kms(dev, "Trying %s output format\n",
+ drm_hdmi_connector_get_output_format_name(fmt));
+
+ if (!sink_supports_format_bpc(connector, info, mode, fmt, bpc)) {
+ drm_dbg_kms(dev, "%s output format not supported with %u bpc\n",
+ drm_hdmi_connector_get_output_format_name(fmt),
+ bpc);
+ return false;
+ }
+
+ ret = hdmi_compute_clock(connector, conn_state, mode, bpc, fmt);
+ if (ret) {
+ drm_dbg_kms(dev, "Couldn't compute clock for %s output format and %u bpc\n",
+ drm_hdmi_connector_get_output_format_name(fmt),
+ bpc);
+ return false;
+ }
+
+ drm_dbg_kms(dev, "%s output format supported with %u (TMDS char rate: %llu Hz)\n",
+ drm_hdmi_connector_get_output_format_name(fmt),
+ bpc, conn_state->hdmi.tmds_char_rate);
+
+ return true;
+}
+
+static int
+hdmi_compute_format(const struct drm_connector *connector,
+ struct drm_connector_state *conn_state,
+ const struct drm_display_mode *mode,
+ unsigned int bpc)
+{
+ struct drm_device *dev = connector->dev;
+
+ /*
+ * TODO: Add support for YCbCr420 output for HDMI 2.0 capable
+ * devices, for modes that only support YCbCr420.
+ */
+ if (hdmi_try_format_bpc(connector, conn_state, mode, bpc, HDMI_COLORSPACE_RGB)) {
+ conn_state->hdmi.output_format = HDMI_COLORSPACE_RGB;
+ return 0;
+ }
+
+ drm_dbg_kms(dev, "Failed. No Format Supported for that bpc count.\n");
+
+ return -EINVAL;
+}
+
+static int
+hdmi_compute_config(const struct drm_connector *connector,
+ struct drm_connector_state *conn_state,
+ const struct drm_display_mode *mode)
+{
+ struct drm_device *dev = connector->dev;
+ unsigned int max_bpc = clamp_t(unsigned int,
+ conn_state->max_bpc,
+ 8, connector->max_bpc);
+ unsigned int bpc;
+ int ret;
+
+ for (bpc = max_bpc; bpc >= 8; bpc -= 2) {
+ drm_dbg_kms(dev, "Trying with a %d bpc output\n", bpc);
+
+ ret = hdmi_compute_format(connector, conn_state, mode, bpc);
+ if (ret)
+ continue;
+
+ conn_state->hdmi.output_bpc = bpc;
+
+ drm_dbg_kms(dev,
+ "Mode %ux%u @ %uHz: Found configuration: bpc: %u, fmt: %s, clock: %llu\n",
+ mode->hdisplay, mode->vdisplay, drm_mode_vrefresh(mode),
+ conn_state->hdmi.output_bpc,
+ drm_hdmi_connector_get_output_format_name(conn_state->hdmi.output_format),
+ conn_state->hdmi.tmds_char_rate);
+
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int hdmi_generate_avi_infoframe(const struct drm_connector *connector,
+ struct drm_connector_state *conn_state)
+{
+ const struct drm_display_mode *mode =
+ connector_state_get_mode(conn_state);
+ struct drm_connector_hdmi_infoframe *infoframe =
+ &conn_state->hdmi.infoframes.avi;
+ struct hdmi_avi_infoframe *frame =
+ &infoframe->data.avi;
+ bool is_limited_range = conn_state->hdmi.is_limited_range;
+ enum hdmi_quantization_range rgb_quant_range =
+ is_limited_range ? HDMI_QUANTIZATION_RANGE_LIMITED : HDMI_QUANTIZATION_RANGE_FULL;
+ int ret;
+
+ infoframe->set = false;
+
+ ret = drm_hdmi_avi_infoframe_from_display_mode(frame, connector, mode);
+ if (ret)
+ return ret;
+
+ frame->colorspace = conn_state->hdmi.output_format;
+
+ /*
+ * FIXME: drm_hdmi_avi_infoframe_quant_range() doesn't handle
+ * YUV formats at all at the moment, so if we ever support YUV
+ * formats this needs to be revised.
+ */
+ drm_hdmi_avi_infoframe_quant_range(frame, connector, mode, rgb_quant_range);
+ drm_hdmi_avi_infoframe_colorimetry(frame, conn_state);
+ drm_hdmi_avi_infoframe_bars(frame, conn_state);
+
+ infoframe->set = true;
+
+ return 0;
+}
+
+static int hdmi_generate_spd_infoframe(const struct drm_connector *connector,
+ struct drm_connector_state *conn_state)
+{
+ struct drm_connector_hdmi_infoframe *infoframe =
+ &conn_state->hdmi.infoframes.spd;
+ struct hdmi_spd_infoframe *frame =
+ &infoframe->data.spd;
+ int ret;
+
+ infoframe->set = false;
+
+ ret = hdmi_spd_infoframe_init(frame,
+ connector->hdmi.vendor,
+ connector->hdmi.product);
+ if (ret)
+ return ret;
+
+ frame->sdi = HDMI_SPD_SDI_PC;
+
+ infoframe->set = true;
+
+ return 0;
+}
+
+static int hdmi_generate_hdr_infoframe(const struct drm_connector *connector,
+ struct drm_connector_state *conn_state)
+{
+ struct drm_connector_hdmi_infoframe *infoframe =
+ &conn_state->hdmi.infoframes.hdr_drm;
+ struct hdmi_drm_infoframe *frame =
+ &infoframe->data.drm;
+ int ret;
+
+ infoframe->set = false;
+
+ if (connector->max_bpc < 10)
+ return 0;
+
+ if (!conn_state->hdr_output_metadata)
+ return 0;
+
+ ret = drm_hdmi_infoframe_set_hdr_metadata(frame, conn_state);
+ if (ret)
+ return ret;
+
+ infoframe->set = true;
+
+ return 0;
+}
+
+static int hdmi_generate_hdmi_vendor_infoframe(const struct drm_connector *connector,
+ struct drm_connector_state *conn_state)
+{
+ const struct drm_display_info *info = &connector->display_info;
+ const struct drm_display_mode *mode =
+ connector_state_get_mode(conn_state);
+ struct drm_connector_hdmi_infoframe *infoframe =
+ &conn_state->hdmi.infoframes.hdmi;
+ struct hdmi_vendor_infoframe *frame =
+ &infoframe->data.vendor.hdmi;
+ int ret;
+
+ infoframe->set = false;
+
+ if (!info->has_hdmi_infoframe)
+ return 0;
+
+ ret = drm_hdmi_vendor_infoframe_from_display_mode(frame, connector, mode);
+ if (ret)
+ return ret;
+
+ infoframe->set = true;
+
+ return 0;
+}
+
+static int
+hdmi_generate_infoframes(const struct drm_connector *connector,
+ struct drm_connector_state *conn_state)
+{
+ const struct drm_display_info *info = &connector->display_info;
+ int ret;
+
+ if (!info->is_hdmi)
+ return 0;
+
+ ret = hdmi_generate_avi_infoframe(connector, conn_state);
+ if (ret)
+ return ret;
+
+ ret = hdmi_generate_spd_infoframe(connector, conn_state);
+ if (ret)
+ return ret;
+
+ /*
+ * Audio Infoframes will be generated by ALSA, and updated by
+ * drm_atomic_helper_connector_hdmi_update_audio_infoframe().
+ */
+
+ ret = hdmi_generate_hdr_infoframe(connector, conn_state);
+ if (ret)
+ return ret;
+
+ ret = hdmi_generate_hdmi_vendor_infoframe(connector, conn_state);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+/**
+ * drm_atomic_helper_connector_hdmi_check() - Helper to check HDMI connector atomic state
+ * @connector: DRM Connector
+ * @state: the DRM State object
+ *
+ * Provides a default connector state check handler for HDMI connectors.
+ * Checks that a desired connector update is valid, and updates various
+ * fields of derived state.
+ *
+ * RETURNS:
+ * Zero on success, or an errno code otherwise.
+ */
+int drm_atomic_helper_connector_hdmi_check(struct drm_connector *connector,
+ struct drm_atomic_state *state)
+{
+ struct drm_connector_state *old_conn_state =
+ drm_atomic_get_old_connector_state(state, connector);
+ struct drm_connector_state *new_conn_state =
+ drm_atomic_get_new_connector_state(state, connector);
+ const struct drm_display_mode *mode =
+ connector_state_get_mode(new_conn_state);
+ int ret;
+
+ if (!new_conn_state->crtc || !new_conn_state->best_encoder)
+ return 0;
+
+ new_conn_state->hdmi.is_limited_range = hdmi_is_limited_range(connector, new_conn_state);
+
+ ret = hdmi_compute_config(connector, new_conn_state, mode);
+ if (ret)
+ return ret;
+
+ ret = hdmi_generate_infoframes(connector, new_conn_state);
+ if (ret)
+ return ret;
+
+ if (old_conn_state->hdmi.broadcast_rgb != new_conn_state->hdmi.broadcast_rgb ||
+ old_conn_state->hdmi.output_bpc != new_conn_state->hdmi.output_bpc ||
+ old_conn_state->hdmi.output_format != new_conn_state->hdmi.output_format) {
+ struct drm_crtc *crtc = new_conn_state->crtc;
+ struct drm_crtc_state *crtc_state;
+
+ crtc_state = drm_atomic_get_crtc_state(state, crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
+
+ crtc_state->mode_changed = true;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_atomic_helper_connector_hdmi_check);
+
+/**
+ * drm_hdmi_connector_mode_valid() - Check if mode is valid for HDMI connector
+ * @connector: DRM connector to validate the mode
+ * @mode: Display mode to validate
+ *
+ * Generic .mode_valid implementation for HDMI connectors.
+ */
+enum drm_mode_status
+drm_hdmi_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ unsigned long long clock;
+
+ clock = drm_hdmi_compute_mode_clock(mode, 8, HDMI_COLORSPACE_RGB);
+ if (!clock)
+ return MODE_ERROR;
+
+ return hdmi_clock_valid(connector, mode, clock);
+}
+EXPORT_SYMBOL(drm_hdmi_connector_mode_valid);
+
+static int clear_device_infoframe(struct drm_connector *connector,
+ enum hdmi_infoframe_type type)
+{
+ const struct drm_connector_hdmi_funcs *funcs = connector->hdmi.funcs;
+ struct drm_device *dev = connector->dev;
+ int ret;
+
+ drm_dbg_kms(dev, "Clearing infoframe type 0x%x\n", type);
+
+ if (!funcs || !funcs->clear_infoframe) {
+ drm_dbg_kms(dev, "Function not implemented, bailing.\n");
+ return 0;
+ }
+
+ ret = funcs->clear_infoframe(connector, type);
+ if (ret) {
+ drm_dbg_kms(dev, "Call failed: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int clear_infoframe(struct drm_connector *connector,
+ struct drm_connector_hdmi_infoframe *old_frame)
+{
+ int ret;
+
+ ret = clear_device_infoframe(connector, old_frame->data.any.type);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int write_device_infoframe(struct drm_connector *connector,
+ union hdmi_infoframe *frame)
+{
+ const struct drm_connector_hdmi_funcs *funcs = connector->hdmi.funcs;
+ struct drm_device *dev = connector->dev;
+ u8 buffer[HDMI_INFOFRAME_SIZE(MAX)];
+ int ret;
+ int len;
+
+ drm_dbg_kms(dev, "Writing infoframe type %x\n", frame->any.type);
+
+ if (!funcs || !funcs->write_infoframe) {
+ drm_dbg_kms(dev, "Function not implemented, bailing.\n");
+ return -EINVAL;
+ }
+
+ len = hdmi_infoframe_pack(frame, buffer, sizeof(buffer));
+ if (len < 0)
+ return len;
+
+ ret = funcs->write_infoframe(connector, frame->any.type, buffer, len);
+ if (ret) {
+ drm_dbg_kms(dev, "Call failed: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int write_infoframe(struct drm_connector *connector,
+ struct drm_connector_hdmi_infoframe *new_frame)
+{
+ int ret;
+
+ ret = write_device_infoframe(connector, &new_frame->data);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int write_or_clear_infoframe(struct drm_connector *connector,
+ struct drm_connector_hdmi_infoframe *old_frame,
+ struct drm_connector_hdmi_infoframe *new_frame)
+{
+ if (new_frame->set)
+ return write_infoframe(connector, new_frame);
+
+ if (old_frame->set && !new_frame->set)
+ return clear_infoframe(connector, old_frame);
+
+ return 0;
+}
+
+/**
+ * drm_atomic_helper_connector_hdmi_update_infoframes - Update the Infoframes
+ * @connector: A pointer to the HDMI connector
+ * @state: The HDMI connector state to generate the infoframe from
+ *
+ * This function is meant for HDMI connector drivers to write their
+ * infoframes. It will typically be used in a
+ * @drm_connector_helper_funcs.atomic_enable implementation.
+ *
+ * Returns:
+ * Zero on success, error code on failure.
+ */
+int drm_atomic_helper_connector_hdmi_update_infoframes(struct drm_connector *connector,
+ struct drm_atomic_state *state)
+{
+ struct drm_connector_state *old_conn_state =
+ drm_atomic_get_old_connector_state(state, connector);
+ struct drm_connector_state *new_conn_state =
+ drm_atomic_get_new_connector_state(state, connector);
+ struct drm_display_info *info = &connector->display_info;
+ int ret;
+
+ if (!info->is_hdmi)
+ return 0;
+
+ mutex_lock(&connector->hdmi.infoframes.lock);
+
+ ret = write_or_clear_infoframe(connector,
+ &old_conn_state->hdmi.infoframes.avi,
+ &new_conn_state->hdmi.infoframes.avi);
+ if (ret)
+ goto out;
+
+ if (connector->hdmi.infoframes.audio.set) {
+ ret = write_infoframe(connector,
+ &connector->hdmi.infoframes.audio);
+ if (ret)
+ goto out;
+ }
+
+ ret = write_or_clear_infoframe(connector,
+ &old_conn_state->hdmi.infoframes.hdr_drm,
+ &new_conn_state->hdmi.infoframes.hdr_drm);
+ if (ret)
+ goto out;
+
+ ret = write_or_clear_infoframe(connector,
+ &old_conn_state->hdmi.infoframes.spd,
+ &new_conn_state->hdmi.infoframes.spd);
+ if (ret)
+ goto out;
+
+ if (info->has_hdmi_infoframe) {
+ ret = write_or_clear_infoframe(connector,
+ &old_conn_state->hdmi.infoframes.hdmi,
+ &new_conn_state->hdmi.infoframes.hdmi);
+ if (ret)
+ goto out;
+ }
+
+out:
+ mutex_unlock(&connector->hdmi.infoframes.lock);
+ return ret;
+}
+EXPORT_SYMBOL(drm_atomic_helper_connector_hdmi_update_infoframes);
+
+/**
+ * drm_atomic_helper_connector_hdmi_update_audio_infoframe - Update the Audio Infoframe
+ * @connector: A pointer to the HDMI connector
+ * @frame: A pointer to the audio infoframe to write
+ *
+ * This function is meant for HDMI connector drivers to update their
+ * audio infoframe. It will typically be used in one of the ALSA hooks
+ * (most likely prepare).
+ *
+ * Returns:
+ * Zero on success, error code on failure.
+ */
+int
+drm_atomic_helper_connector_hdmi_update_audio_infoframe(struct drm_connector *connector,
+ struct hdmi_audio_infoframe *frame)
+{
+ struct drm_connector_hdmi_infoframe *infoframe =
+ &connector->hdmi.infoframes.audio;
+ struct drm_display_info *info = &connector->display_info;
+ int ret;
+
+ if (!info->is_hdmi)
+ return 0;
+
+ mutex_lock(&connector->hdmi.infoframes.lock);
+
+ memcpy(&infoframe->data, frame, sizeof(infoframe->data));
+ infoframe->set = true;
+
+ ret = write_infoframe(connector, infoframe);
+
+ mutex_unlock(&connector->hdmi.infoframes.lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(drm_atomic_helper_connector_hdmi_update_audio_infoframe);
+
+/**
+ * drm_atomic_helper_connector_hdmi_clear_audio_infoframe - Stop sending the Audio Infoframe
+ * @connector: A pointer to the HDMI connector
+ *
+ * This function is meant for HDMI connector drivers to stop sending their
+ * audio infoframe. It will typically be used in one of the ALSA hooks
+ * (most likely shutdown).
+ *
+ * Returns:
+ * Zero on success, error code on failure.
+ */
+int
+drm_atomic_helper_connector_hdmi_clear_audio_infoframe(struct drm_connector *connector)
+{
+ struct drm_connector_hdmi_infoframe *infoframe =
+ &connector->hdmi.infoframes.audio;
+ struct drm_display_info *info = &connector->display_info;
+ int ret;
+
+ if (!info->is_hdmi)
+ return 0;
+
+ mutex_lock(&connector->hdmi.infoframes.lock);
+
+ infoframe->set = false;
+
+ ret = clear_infoframe(connector, infoframe);
+
+ memset(&infoframe->data, 0, sizeof(infoframe->data));
+
+ mutex_unlock(&connector->hdmi.infoframes.lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(drm_atomic_helper_connector_hdmi_clear_audio_infoframe);
+
+static void
+drm_atomic_helper_connector_hdmi_update(struct drm_connector *connector,
+ enum drm_connector_status status)
+{
+ const struct drm_edid *drm_edid;
+
+ if (status == connector_status_disconnected) {
+ // TODO: also handle CEC and scramber, HDMI sink disconnected.
+ drm_connector_hdmi_audio_plugged_notify(connector, false);
+ drm_edid_connector_update(connector, NULL);
+ return;
+ }
+
+ if (connector->hdmi.funcs->read_edid)
+ drm_edid = connector->hdmi.funcs->read_edid(connector);
+ else
+ drm_edid = drm_edid_read(connector);
+
+ drm_edid_connector_update(connector, drm_edid);
+
+ drm_edid_free(drm_edid);
+
+ if (status == connector_status_connected) {
+ // TODO: also handle CEC and scramber, HDMI sink is now connected.
+ drm_connector_hdmi_audio_plugged_notify(connector, true);
+ }
+}
+
+/**
+ * drm_atomic_helper_connector_hdmi_hotplug - Handle the hotplug event for the HDMI connector
+ * @connector: A pointer to the HDMI connector
+ * @status: Connection status
+ *
+ * This function should be called as a part of the .detect() / .detect_ctx()
+ * callbacks, updating the HDMI-specific connector's data.
+ */
+void drm_atomic_helper_connector_hdmi_hotplug(struct drm_connector *connector,
+ enum drm_connector_status status)
+{
+ drm_atomic_helper_connector_hdmi_update(connector, status);
+}
+EXPORT_SYMBOL(drm_atomic_helper_connector_hdmi_hotplug);
+
+/**
+ * drm_atomic_helper_connector_hdmi_force - HDMI Connector implementation of the force callback
+ * @connector: A pointer to the HDMI connector
+ *
+ * This function implements the .force() callback for the HDMI connectors. It
+ * can either be used directly as the callback or should be called from within
+ * the .force() callback implementation to maintain the HDMI-specific
+ * connector's data.
+ */
+void drm_atomic_helper_connector_hdmi_force(struct drm_connector *connector)
+{
+ drm_atomic_helper_connector_hdmi_update(connector, connector->status);
+}
+EXPORT_SYMBOL(drm_atomic_helper_connector_hdmi_force);