diff options
Diffstat (limited to 'include/drm')
137 files changed, 11278 insertions, 3465 deletions
diff --git a/include/drm/Makefile b/include/drm/Makefile new file mode 100644 index 000000000000..48fae3f167c7 --- /dev/null +++ b/include/drm/Makefile @@ -0,0 +1,18 @@ +# SPDX-License-Identifier: GPL-2.0 + +# Ensure drm headers are self-contained and pass kernel-doc +hdrtest-files := \ + $(shell cd $(src) && find * -name '*.h' 2>/dev/null) + +always-$(CONFIG_DRM_HEADER_TEST) += \ + $(patsubst %.h,%.hdrtest, $(hdrtest-files)) + +# Include the header twice to detect missing include guard. +quiet_cmd_hdrtest = HDRTEST $(patsubst %.hdrtest,%.h,$@) + cmd_hdrtest = \ + $(CC) $(c_flags) -fsyntax-only -x c /dev/null -include $< -include $<; \ + PYTHONDONTWRITEBYTECODE=1 $(PYTHON3) $(KERNELDOC) -none $(if $(CONFIG_WERROR)$(CONFIG_DRM_WERROR),-Werror) $<; \ + touch $@ + +$(obj)/%.hdrtest: $(src)/%.h FORCE + $(call if_changed_dep,hdrtest) diff --git a/include/drm/amd/isp.h b/include/drm/amd/isp.h new file mode 100644 index 000000000000..ec868288abf2 --- /dev/null +++ b/include/drm/amd/isp.h @@ -0,0 +1,51 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright (C) 2025 Advanced Micro Devices, Inc. All rights reserved. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + */ + +#ifndef __ISP_H__ +#define __ISP_H__ + +#include <linux/types.h> + +struct device; + +struct isp_platform_data { + void *adev; + u32 asic_type; + resource_size_t base_rmmio_size; +}; + +int isp_user_buffer_alloc(struct device *dev, void *dmabuf, + void **buf_obj, u64 *buf_addr); + +void isp_user_buffer_free(void *buf_obj); + +int isp_kernel_buffer_alloc(struct device *dev, u64 size, + void **buf_obj, u64 *gpu_addr, void **cpu_addr); + +void isp_kernel_buffer_free(void **buf_obj, u64 *gpu_addr, void **cpu_addr); + +#endif diff --git a/include/drm/amd_asic_type.h b/include/drm/amd_asic_type.h index 90b69270f2fa..9be85b821aa6 100644 --- a/include/drm/amd_asic_type.h +++ b/include/drm/amd_asic_type.h @@ -22,6 +22,9 @@ #ifndef __AMD_ASIC_TYPE_H__ #define __AMD_ASIC_TYPE_H__ + +#include <linux/types.h> + /* * Supported ASIC types */ @@ -68,4 +71,9 @@ enum amd_asic_type { extern const char *amdgpu_asic_name[]; +struct amdgpu_asic_type_quirk { + unsigned short device; /* PCI device ID */ + u8 revision; /* revision ID */ + unsigned short type; /* real ASIC type */ +}; #endif /*__AMD_ASIC_TYPE_H__ */ diff --git a/include/drm/bridge/analogix_dp.h b/include/drm/bridge/analogix_dp.h index b0dcc07334a1..cf17646c1310 100644 --- a/include/drm/bridge/analogix_dp.h +++ b/include/drm/bridge/analogix_dp.h @@ -10,16 +10,18 @@ #include <drm/drm_crtc.h> struct analogix_dp_device; +struct drm_dp_aux; enum analogix_dp_devtype { EXYNOS_DP, RK3288_DP, RK3399_EDP, + RK3588_EDP, }; static inline bool is_rockchip(enum analogix_dp_devtype type) { - return type == RK3288_DP || type == RK3399_EDP; + return type == RK3288_DP || type == RK3399_EDP || type == RK3588_EDP; } struct analogix_dp_plat_data { @@ -29,8 +31,7 @@ struct analogix_dp_plat_data { struct drm_connector *connector; bool skip_connector; - int (*power_on_start)(struct analogix_dp_plat_data *); - int (*power_on_end)(struct analogix_dp_plat_data *); + int (*power_on)(struct analogix_dp_plat_data *); int (*power_off)(struct analogix_dp_plat_data *); int (*attach)(struct analogix_dp_plat_data *, struct drm_bridge *, struct drm_connector *); @@ -45,9 +46,11 @@ struct analogix_dp_device * analogix_dp_probe(struct device *dev, struct analogix_dp_plat_data *plat_data); int analogix_dp_bind(struct analogix_dp_device *dp, struct drm_device *drm_dev); void analogix_dp_unbind(struct analogix_dp_device *dp); -void analogix_dp_remove(struct analogix_dp_device *dp); int analogix_dp_start_crc(struct drm_connector *connector); int analogix_dp_stop_crc(struct drm_connector *connector); +struct analogix_dp_plat_data *analogix_dp_aux_to_plat_data(struct drm_dp_aux *aux); +struct drm_dp_aux *analogix_dp_get_aux(struct analogix_dp_device *dp); + #endif /* _ANALOGIX_DP_H_ */ diff --git a/include/drm/bridge/aux-bridge.h b/include/drm/bridge/aux-bridge.h new file mode 100644 index 000000000000..c2f5a855512f --- /dev/null +++ b/include/drm/bridge/aux-bridge.h @@ -0,0 +1,52 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Copyright (C) 2023 Linaro Ltd. + * + * Author: Dmitry Baryshkov <dmitry.baryshkov@linaro.org> + */ +#ifndef DRM_AUX_BRIDGE_H +#define DRM_AUX_BRIDGE_H + +#include <drm/drm_connector.h> + +struct auxiliary_device; + +#if IS_ENABLED(CONFIG_DRM_AUX_BRIDGE) +int drm_aux_bridge_register(struct device *parent); +#else +static inline int drm_aux_bridge_register(struct device *parent) +{ + return 0; +} +#endif + +#if IS_ENABLED(CONFIG_DRM_AUX_HPD_BRIDGE) +struct auxiliary_device *devm_drm_dp_hpd_bridge_alloc(struct device *parent, struct device_node *np); +int devm_drm_dp_hpd_bridge_add(struct device *dev, struct auxiliary_device *adev); +struct device *drm_dp_hpd_bridge_register(struct device *parent, + struct device_node *np); +void drm_aux_hpd_bridge_notify(struct device *dev, enum drm_connector_status status); +#else +static inline struct auxiliary_device *devm_drm_dp_hpd_bridge_alloc(struct device *parent, + struct device_node *np) +{ + return NULL; +} + +static inline int devm_drm_dp_hpd_bridge_add(struct device *dev, struct auxiliary_device *adev) +{ + return 0; +} + +static inline struct device *drm_dp_hpd_bridge_register(struct device *parent, + struct device_node *np) +{ + return NULL; +} + +static inline void drm_aux_hpd_bridge_notify(struct device *dev, enum drm_connector_status status) +{ +} +#endif + +#endif diff --git a/include/drm/bridge/dw_dp.h b/include/drm/bridge/dw_dp.h new file mode 100644 index 000000000000..d05df49fd884 --- /dev/null +++ b/include/drm/bridge/dw_dp.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (c) 2025 Rockchip Electronics Co., Ltd. + */ + +#ifndef __DW_DP__ +#define __DW_DP__ + +#include <linux/device.h> + +struct drm_encoder; +struct dw_dp; + +struct dw_dp_plat_data { + u32 max_link_rate; +}; + +struct dw_dp *dw_dp_bind(struct device *dev, struct drm_encoder *encoder, + const struct dw_dp_plat_data *plat_data); +#endif /* __DW_DP__ */ diff --git a/include/drm/bridge/dw_hdmi.h b/include/drm/bridge/dw_hdmi.h index f668e75fbabe..336f062e1f9d 100644 --- a/include/drm/bridge/dw_hdmi.h +++ b/include/drm/bridge/dw_hdmi.h @@ -143,9 +143,15 @@ struct dw_hdmi_plat_data { const struct drm_display_info *info, const struct drm_display_mode *mode); + /* + * priv_audio is specially used for additional audio device to get + * driver data through this dw_hdmi_plat_data. + */ + void *priv_audio; + /* Platform-specific audio enable/disable (optional) */ void (*enable_audio)(struct dw_hdmi *hdmi, int channel, - int width, int rate, int non_pcm); + int width, int rate, int non_pcm, int iec958); void (*disable_audio)(struct dw_hdmi *hdmi); /* Vendor PHY support */ @@ -179,6 +185,7 @@ void dw_hdmi_setup_rx_sense(struct dw_hdmi *hdmi, bool hpd, bool rx_sense); int dw_hdmi_set_plugged_cb(struct dw_hdmi *hdmi, hdmi_codec_plugged_cb fn, struct device *codec_dev); void dw_hdmi_set_sample_non_pcm(struct dw_hdmi *hdmi, unsigned int non_pcm); +void dw_hdmi_set_sample_iec958(struct dw_hdmi *hdmi, unsigned int iec958); void dw_hdmi_set_sample_width(struct dw_hdmi *hdmi, unsigned int width); void dw_hdmi_set_sample_rate(struct dw_hdmi *hdmi, unsigned int rate); void dw_hdmi_set_channel_count(struct dw_hdmi *hdmi, unsigned int cnt); @@ -206,4 +213,8 @@ void dw_hdmi_phy_update_hpd(struct dw_hdmi *hdmi, void *data, bool force, bool disabled, bool rxsense); void dw_hdmi_phy_setup_hpd(struct dw_hdmi *hdmi, void *data); +bool dw_hdmi_bus_fmt_is_420(struct dw_hdmi *hdmi); + +const struct dw_hdmi_plat_data *dw_hdmi_to_plat_data(struct dw_hdmi *hdmi); + #endif /* __IMX_HDMI_H__ */ diff --git a/include/drm/bridge/dw_hdmi_qp.h b/include/drm/bridge/dw_hdmi_qp.h new file mode 100644 index 000000000000..3f461f6b9bbf --- /dev/null +++ b/include/drm/bridge/dw_hdmi_qp.h @@ -0,0 +1,38 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (c) 2021-2022 Rockchip Electronics Co., Ltd. + * Copyright (c) 2024 Collabora Ltd. + */ + +#ifndef __DW_HDMI_QP__ +#define __DW_HDMI_QP__ + +struct device; +struct drm_encoder; +struct dw_hdmi_qp; +struct platform_device; + +struct dw_hdmi_qp_phy_ops { + int (*init)(struct dw_hdmi_qp *hdmi, void *data); + void (*disable)(struct dw_hdmi_qp *hdmi, void *data); + enum drm_connector_status (*read_hpd)(struct dw_hdmi_qp *hdmi, void *data); + void (*setup_hpd)(struct dw_hdmi_qp *hdmi, void *data); +}; + +struct dw_hdmi_qp_plat_data { + const struct dw_hdmi_qp_phy_ops *phy_ops; + void *phy_data; + int main_irq; + int cec_irq; + unsigned long ref_clk_rate; + /* Supported output formats: bitmask of @hdmi_colorspace */ + unsigned int supported_formats; + /* Maximum bits per color channel: 8, 10 or 12 */ + unsigned int max_bpc; +}; + +struct dw_hdmi_qp *dw_hdmi_qp_bind(struct platform_device *pdev, + struct drm_encoder *encoder, + const struct dw_hdmi_qp_plat_data *plat_data); +void dw_hdmi_qp_resume(struct device *dev, struct dw_hdmi_qp *hdmi); +#endif /* __DW_HDMI_QP__ */ diff --git a/include/drm/bridge/dw_mipi_dsi.h b/include/drm/bridge/dw_mipi_dsi.h index 5286a53a1875..65d5e68065e3 100644 --- a/include/drm/bridge/dw_mipi_dsi.h +++ b/include/drm/bridge/dw_mipi_dsi.h @@ -11,6 +11,10 @@ #include <linux/types.h> +#include <drm/drm_atomic.h> +#include <drm/drm_bridge.h> +#include <drm/drm_connector.h> +#include <drm/drm_crtc.h> #include <drm/drm_modes.h> struct drm_display_mode; @@ -55,6 +59,17 @@ struct dw_mipi_dsi_plat_data { unsigned long mode_flags, u32 lanes, u32 format); + bool (*mode_fixup)(void *priv_data, const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode); + + u32 *(*get_input_bus_fmts)(void *priv_data, + struct drm_bridge *bridge, + struct drm_bridge_state *bridge_state, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state, + u32 output_fmt, + unsigned int *num_input_fmts); + const struct dw_mipi_dsi_phy_ops *phy_ops; const struct dw_mipi_dsi_host_ops *host_ops; @@ -68,5 +83,6 @@ void dw_mipi_dsi_remove(struct dw_mipi_dsi *dsi); int dw_mipi_dsi_bind(struct dw_mipi_dsi *dsi, struct drm_encoder *encoder); void dw_mipi_dsi_unbind(struct dw_mipi_dsi *dsi); void dw_mipi_dsi_set_slave(struct dw_mipi_dsi *dsi, struct dw_mipi_dsi *slave); +struct drm_bridge *dw_mipi_dsi_get_bridge(struct dw_mipi_dsi *dsi); #endif /* __DW_MIPI_DSI__ */ diff --git a/include/drm/bridge/dw_mipi_dsi2.h b/include/drm/bridge/dw_mipi_dsi2.h new file mode 100644 index 000000000000..c18c49379247 --- /dev/null +++ b/include/drm/bridge/dw_mipi_dsi2.h @@ -0,0 +1,95 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2024, Fuzhou Rockchip Electronics Co., Ltd + * + * Authors: Guochun Huang <hero.huang@rock-chips.com> + * Heiko Stuebner <heiko.stuebner@cherry.de> + */ + +#ifndef __DW_MIPI_DSI2__ +#define __DW_MIPI_DSI2__ + +#include <linux/regmap.h> +#include <linux/types.h> + +#include <drm/drm_atomic.h> +#include <drm/drm_bridge.h> +#include <drm/drm_connector.h> +#include <drm/drm_crtc.h> +#include <drm/drm_modes.h> + +struct drm_display_mode; +struct drm_encoder; +struct dw_mipi_dsi2; +struct mipi_dsi_device; +struct platform_device; + +enum dw_mipi_dsi2_phy_type { + DW_MIPI_DSI2_DPHY, + DW_MIPI_DSI2_CPHY, +}; + +struct dw_mipi_dsi2_phy_iface { + int ppi_width; + enum dw_mipi_dsi2_phy_type phy_type; +}; + +struct dw_mipi_dsi2_phy_timing { + u32 data_hs2lp; + u32 data_lp2hs; +}; + +struct dw_mipi_dsi2_phy_ops { + int (*init)(void *priv_data); + void (*power_on)(void *priv_data); + void (*power_off)(void *priv_data); + void (*get_interface)(void *priv_data, struct dw_mipi_dsi2_phy_iface *iface); + int (*get_lane_mbps)(void *priv_data, + const struct drm_display_mode *mode, + unsigned long mode_flags, u32 lanes, u32 format, + unsigned int *lane_mbps); + int (*get_timing)(void *priv_data, unsigned int lane_mbps, + struct dw_mipi_dsi2_phy_timing *timing); + int (*get_esc_clk_rate)(void *priv_data, unsigned int *esc_clk_rate); +}; + +struct dw_mipi_dsi2_host_ops { + int (*attach)(void *priv_data, + struct mipi_dsi_device *dsi); + int (*detach)(void *priv_data, + struct mipi_dsi_device *dsi); +}; + +struct dw_mipi_dsi2_plat_data { + struct regmap *regmap; + unsigned int max_data_lanes; + + enum drm_mode_status (*mode_valid)(void *priv_data, + const struct drm_display_mode *mode, + unsigned long mode_flags, + u32 lanes, u32 format); + + bool (*mode_fixup)(void *priv_data, const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode); + + u32 *(*get_input_bus_fmts)(void *priv_data, + struct drm_bridge *bridge, + struct drm_bridge_state *bridge_state, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state, + u32 output_fmt, + unsigned int *num_input_fmts); + + const struct dw_mipi_dsi2_phy_ops *phy_ops; + const struct dw_mipi_dsi2_host_ops *host_ops; + + void *priv_data; +}; + +struct dw_mipi_dsi2 *dw_mipi_dsi2_probe(struct platform_device *pdev, + const struct dw_mipi_dsi2_plat_data *plat_data); +void dw_mipi_dsi2_remove(struct dw_mipi_dsi2 *dsi2); +int dw_mipi_dsi2_bind(struct dw_mipi_dsi2 *dsi2, struct drm_encoder *encoder); +void dw_mipi_dsi2_unbind(struct dw_mipi_dsi2 *dsi2); + +#endif /* __DW_MIPI_DSI2__ */ diff --git a/include/drm/bridge/imx.h b/include/drm/bridge/imx.h new file mode 100644 index 000000000000..b93f719fe0e7 --- /dev/null +++ b/include/drm/bridge/imx.h @@ -0,0 +1,17 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (C) 2012 Sascha Hauer, Pengutronix + */ + +#ifndef DRM_IMX_BRIDGE_H +#define DRM_IMX_BRIDGE_H + +struct device; +struct device_node; +struct drm_bridge; + +struct drm_bridge *devm_imx_drm_legacy_bridge(struct device *dev, + struct device_node *np, + int type); + +#endif diff --git a/include/drm/bridge/samsung-dsim.h b/include/drm/bridge/samsung-dsim.h new file mode 100644 index 000000000000..31d7ed589233 --- /dev/null +++ b/include/drm/bridge/samsung-dsim.h @@ -0,0 +1,141 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Copyright (C) 2022 Amarula Solutions(India) + * Author: Jagan Teki <jagan@amarulasolutions.com> + */ + +#ifndef __SAMSUNG_DSIM__ +#define __SAMSUNG_DSIM__ + +#include <linux/gpio/consumer.h> +#include <linux/regulator/consumer.h> + +#include <drm/drm_atomic_helper.h> +#include <drm/drm_bridge.h> +#include <drm/drm_mipi_dsi.h> +#include <drm/drm_of.h> + +struct platform_device; +struct samsung_dsim; + +#define DSIM_STATE_ENABLED BIT(0) +#define DSIM_STATE_INITIALIZED BIT(1) +#define DSIM_STATE_CMD_LPM BIT(2) +#define DSIM_STATE_VIDOUT_AVAILABLE BIT(3) + +enum samsung_dsim_type { + DSIM_TYPE_EXYNOS3250, + DSIM_TYPE_EXYNOS4210, + DSIM_TYPE_EXYNOS5410, + DSIM_TYPE_EXYNOS5422, + DSIM_TYPE_EXYNOS5433, + DSIM_TYPE_EXYNOS7870, + DSIM_TYPE_IMX8MM, + DSIM_TYPE_IMX8MP, + DSIM_TYPE_COUNT, +}; + +#define samsung_dsim_hw_is_exynos(hw) \ + ((hw) >= DSIM_TYPE_EXYNOS3250 && (hw) <= DSIM_TYPE_EXYNOS5433) + +struct samsung_dsim_transfer { + struct list_head list; + struct completion completed; + int result; + struct mipi_dsi_packet packet; + u16 flags; + u16 tx_done; + + u8 *rx_payload; + u16 rx_len; + u16 rx_done; +}; + +struct samsung_dsim_driver_data { + const unsigned int *reg_ofs; + unsigned int plltmr_reg; + unsigned int has_legacy_status_reg:1; + unsigned int has_freqband:1; + unsigned int has_clklane_stop:1; + unsigned int has_broken_fifoctrl_emptyhdr:1; + unsigned int has_sfrctrl:1; + struct clk_bulk_data *clk_data; + unsigned int num_clks; + unsigned int min_freq; + unsigned int max_freq; + unsigned int wait_for_hdr_fifo; + unsigned int wait_for_reset; + unsigned int num_bits_resol; + unsigned int video_mode_bit; + unsigned int pll_stable_bit; + unsigned int esc_clken_bit; + unsigned int byte_clken_bit; + unsigned int tx_req_hsclk_bit; + unsigned int lane_esc_clk_bit; + unsigned int lane_esc_data_offset; + unsigned int pll_p_offset; + unsigned int pll_m_offset; + unsigned int pll_s_offset; + unsigned int main_vsa_offset; + const unsigned int *reg_values; + unsigned int pll_fin_min; + unsigned int pll_fin_max; + u16 m_min; + u16 m_max; +}; + +struct samsung_dsim_host_ops { + int (*register_host)(struct samsung_dsim *dsim); + void (*unregister_host)(struct samsung_dsim *dsim); + int (*attach)(struct samsung_dsim *dsim, struct mipi_dsi_device *device); + void (*detach)(struct samsung_dsim *dsim, struct mipi_dsi_device *device); + irqreturn_t (*te_irq_handler)(struct samsung_dsim *dsim); +}; + +struct samsung_dsim_plat_data { + enum samsung_dsim_type hw_type; + const struct samsung_dsim_host_ops *host_ops; +}; + +struct samsung_dsim { + struct mipi_dsi_host dsi_host; + struct drm_bridge bridge; + struct drm_bridge *out_bridge; + struct device *dev; + struct drm_display_mode mode; + + void __iomem *reg_base; + struct phy *phy; + struct clk *pll_clk; + struct regulator_bulk_data supplies[2]; + int irq; + struct gpio_desc *te_gpio; + + u32 pll_clk_rate; + u32 burst_clk_rate; + u32 hs_clock; + u32 esc_clk_rate; + u32 lanes; + u32 mode_flags; + u32 format; + + bool swap_dn_dp_clk; + bool swap_dn_dp_data; + int state; + struct drm_property *brightness; + struct completion completed; + + spinlock_t transfer_lock; /* protects transfer_list */ + struct list_head transfer_list; + + const struct samsung_dsim_driver_data *driver_data; + const struct samsung_dsim_plat_data *plat_data; + + void *priv; +}; + +extern int samsung_dsim_probe(struct platform_device *pdev); +extern void samsung_dsim_remove(struct platform_device *pdev); +extern const struct dev_pm_ops samsung_dsim_pm_ops; + +#endif /* __SAMSUNG_DSIM__ */ diff --git a/include/drm/clients/drm_client_setup.h b/include/drm/clients/drm_client_setup.h new file mode 100644 index 000000000000..46aab3fb46be --- /dev/null +++ b/include/drm/clients/drm_client_setup.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: MIT */ + +#ifndef DRM_CLIENT_SETUP_H +#define DRM_CLIENT_SETUP_H + +#include <linux/types.h> + +struct drm_device; +struct drm_format_info; + +#if defined(CONFIG_DRM_CLIENT_SETUP) +void drm_client_setup(struct drm_device *dev, const struct drm_format_info *format); +void drm_client_setup_with_fourcc(struct drm_device *dev, u32 fourcc); +void drm_client_setup_with_color_mode(struct drm_device *dev, unsigned int color_mode); +#else +static inline void drm_client_setup(struct drm_device *dev, + const struct drm_format_info *format) +{ } +static inline void drm_client_setup_with_fourcc(struct drm_device *dev, u32 fourcc) +{ } +static inline void drm_client_setup_with_color_mode(struct drm_device *dev, + unsigned int color_mode) +{ } +#endif + +#endif diff --git a/include/drm/display/drm_dp.h b/include/drm/display/drm_dp.h index 632376c291db..e4eebabab975 100644 --- a/include/drm/display/drm_dp.h +++ b/include/drm/display/drm_dp.h @@ -115,6 +115,7 @@ #define DP_MAX_LANE_COUNT 0x002 # define DP_MAX_LANE_COUNT_MASK 0x1f +# define DP_POST_LT_ADJ_REQ_SUPPORTED (1 << 5) /* 1.3 */ # define DP_TPS3_SUPPORTED (1 << 6) /* 1.2 */ # define DP_ENHANCED_FRAME_CAP (1 << 7) @@ -148,6 +149,7 @@ #define DP_RECEIVE_PORT_0_CAP_0 0x008 # define DP_LOCAL_EDID_PRESENT (1 << 1) # define DP_ASSOCIATED_TO_PRECEDING_PORT (1 << 2) +# define DP_HBLANK_EXPANSION_CAPABLE (1 << 3) #define DP_RECEIVE_PORT_0_BUFFER_SIZE 0x009 @@ -231,6 +233,8 @@ #define DP_RECEIVER_ALPM_CAP 0x02e /* eDP 1.4 */ # define DP_ALPM_CAP (1 << 0) +# define DP_ALPM_PM_STATE_2A_SUPPORT (1 << 1) /* eDP 1.5 */ +# define DP_ALPM_AUX_LESS_CAP (1 << 2) /* eDP 1.5 */ #define DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP 0x02f /* eDP 1.4 */ # define DP_AUX_FRAME_SYNC_CAP (1 << 0) @@ -254,6 +258,8 @@ # define DP_DSC_RC_BUF_BLK_SIZE_4 0x1 # define DP_DSC_RC_BUF_BLK_SIZE_16 0x2 # define DP_DSC_RC_BUF_BLK_SIZE_64 0x3 +# define DP_DSC_THROUGHPUT_MODE_0_DELTA_SHIFT 3 /* DP 2.1a, in units of 2 MPixels/sec */ +# define DP_DSC_THROUGHPUT_MODE_0_DELTA_MASK (0x1f << DP_DSC_THROUGHPUT_MODE_0_DELTA_SHIFT) #define DP_DSC_RC_BUF_SIZE 0x063 @@ -286,9 +292,8 @@ #define DP_DSC_MAX_BITS_PER_PIXEL_HI 0x068 /* eDP 1.4 */ # define DP_DSC_MAX_BITS_PER_PIXEL_HI_MASK (0x3 << 0) -# define DP_DSC_MAX_BITS_PER_PIXEL_HI_SHIFT 8 -# define DP_DSC_MAX_BPP_DELTA_VERSION_MASK 0x06 -# define DP_DSC_MAX_BPP_DELTA_AVAILABILITY 0x08 +# define DP_DSC_MAX_BPP_DELTA_VERSION_MASK (0x3 << 5) /* eDP 1.5 & DP 2.0 */ +# define DP_DSC_MAX_BPP_DELTA_AVAILABILITY (1 << 7) /* eDP 1.5 & DP 2.0 */ #define DP_DSC_DEC_COLOR_FORMAT_CAP 0x069 # define DP_DSC_RGB (1 << 0) @@ -357,6 +362,7 @@ # define DP_DSC_BITS_PER_PIXEL_1_4 0x2 # define DP_DSC_BITS_PER_PIXEL_1_2 0x3 # define DP_DSC_BITS_PER_PIXEL_1_1 0x4 +# define DP_DSC_BITS_PER_PIXEL_MASK 0x7 #define DP_PSR_SUPPORT 0x070 /* XXX 1.2? */ # define DP_PSR_IS_SUPPORTED 1 @@ -544,6 +550,29 @@ /* DFP Capability Extension */ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */ +#define DP_PANEL_REPLAY_CAP_SUPPORT 0x0b0 /* DP 2.0 */ +# define DP_PANEL_REPLAY_SUPPORT (1 << 0) +# define DP_PANEL_REPLAY_SU_SUPPORT (1 << 1) +# define DP_PANEL_REPLAY_EARLY_TRANSPORT_SUPPORT (1 << 2) /* eDP 1.5 */ + +#define DP_PANEL_REPLAY_CAP_SIZE 7 + +#define DP_PANEL_REPLAY_CAP_CAPABILITY 0xb1 +# define DP_PANEL_REPLAY_DSC_DECODE_CAPABILITY_IN_PR_SHIFT 1 /* DP 2.1a */ +# define DP_PANEL_REPLAY_DSC_DECODE_CAPABILITY_IN_PR_MASK (3 << DP_PANEL_REPLAY_DSC_DECODE_CAPABILITY_IN_PR_SHIFT) +# define DP_DSC_DECODE_CAPABILITY_IN_PR_SUPPORTED 0x00 +# define DP_DSC_DECODE_CAPABILITY_IN_PR_FULL_FRAME_ONLY 0x01 +# define DP_DSC_DECODE_CAPABILITY_IN_PR_NOT_SUPPORTED 0x02 +# define DP_DSC_DECODE_CAPABILITY_IN_PR_RESERVED 0x03 +# define DP_PANEL_REPLAY_ASYNC_VIDEO_TIMING_NOT_SUPPORTED_IN_PR (1 << 3) +# define DP_PANEL_REPLAY_DSC_CRC_OF_MULTIPLE_SUS_SUPPORTED (1 << 4) +# define DP_PANEL_REPLAY_SU_GRANULARITY_REQUIRED (1 << 5) +# define DP_PANEL_REPLAY_SU_Y_GRANULARITY_EXTENDED_CAPABILITY_SUPPORTED (1 << 6) +# define DP_PANEL_REPLAY_LINK_OFF_SUPPORTED_IN_PR_AFTER_ADAPTIVE_SYNC_SDP (1 << 7) + +#define DP_PANEL_REPLAY_CAP_X_GRANULARITY 0xb2 +#define DP_PANEL_REPLAY_CAP_Y_GRANULARITY 0xb4 + /* Link Configuration */ #define DP_LINK_BW_SET 0x100 # define DP_LINK_RATE_TABLE 0x00 /* eDP 1.4 */ @@ -557,6 +586,7 @@ #define DP_LANE_COUNT_SET 0x101 # define DP_LANE_COUNT_MASK 0x0f +# define DP_POST_LT_ADJ_REQ_GRANTED (1 << 5) /* 1.3 */ # define DP_LANE_COUNT_ENHANCED_FRAME_EN (1 << 7) #define DP_TRAINING_PATTERN_SET 0x102 @@ -647,6 +677,9 @@ # define DP_LINK_QUAL_PATTERN_PRSBS31 0x38 # define DP_LINK_QUAL_PATTERN_CUSTOM 0x40 # define DP_LINK_QUAL_PATTERN_SQUARE 0x48 +# define DP_LINK_QUAL_PATTERN_SQUARE_PRESHOOT_DISABLED 0x49 +# define DP_LINK_QUAL_PATTERN_SQUARE_DEEMPHASIS_DISABLED 0x4a +# define DP_LINK_QUAL_PATTERN_SQUARE_PRESHOOT_DEEMPHASIS_DISABLED 0x4b #define DP_TRAINING_LANE0_1_SET2 0x10f #define DP_TRAINING_LANE2_3_SET2 0x110 @@ -670,7 +703,8 @@ #define DP_RECEIVER_ALPM_CONFIG 0x116 /* eDP 1.4 */ # define DP_ALPM_ENABLE (1 << 0) -# define DP_ALPM_LOCK_ERROR_IRQ_HPD_ENABLE (1 << 1) +# define DP_ALPM_LOCK_ERROR_IRQ_HPD_ENABLE (1 << 1) /* eDP 1.5 */ +# define DP_ALPM_MODE_AUX_LESS (1 << 2) /* eDP 1.5 */ #define DP_SINK_DEVICE_AUX_FRAME_SYNC_CONF 0x117 /* eDP 1.4 */ # define DP_AUX_FRAME_SYNC_ENABLE (1 << 0) @@ -679,6 +713,9 @@ #define DP_UPSTREAM_DEVICE_DP_PWR_NEED 0x118 /* 1.2 */ # define DP_PWR_NOT_NEEDED (1 << 0) +#define DP_EXTENDED_DPRX_SLEEP_WAKE_TIMEOUT_GRANT 0x119 /* 1.4a */ +# define DP_DPRX_SLEEP_WAKE_TIMEOUT_PERIOD_GRANTED (1 << 0) + #define DP_FEC_CONFIGURATION 0x120 /* 1.4 */ # define DP_FEC_READY (1 << 0) # define DP_FEC_ERR_COUNT_SEL_MASK (7 << 1) @@ -692,11 +729,15 @@ # define DP_FEC_LANE_2_SELECT (2 << 4) # define DP_FEC_LANE_3_SELECT (3 << 4) +#define DP_SDP_ERROR_DETECTION_CONFIGURATION 0x121 /* DP 2.0 E11 */ +#define DP_SDP_CRC16_128B132B_EN BIT(0) + #define DP_AUX_FRAME_SYNC_VALUE 0x15c /* eDP 1.4 */ # define DP_AUX_FRAME_SYNC_VALID (1 << 0) #define DP_DSC_ENABLE 0x160 /* DP 1.4 */ # define DP_DECOMPRESSION_EN (1 << 0) +# define DP_DSC_PASSTHROUGH_EN (1 << 1) #define DP_DSC_CONFIGURATION 0x161 /* DP 2.0 */ #define DP_PSR_EN_CFG 0x170 /* XXX 1.2? */ @@ -707,6 +748,7 @@ # define DP_PSR_SU_REGION_SCANLINE_CAPTURE BIT(4) /* eDP 1.4a */ # define DP_PSR_IRQ_HPD_WITH_CRC_ERRORS BIT(5) /* eDP 1.4a */ # define DP_PSR_ENABLE_PSR2 BIT(6) /* eDP 1.4a */ +# define DP_PSR_ENABLE_SU_REGION_ET BIT(7) /* eDP 1.5 */ #define DP_ADAPTER_CTRL 0x1a0 # define DP_ADAPTER_CTRL_FORCE_LOAD_SENSE (1 << 0) @@ -714,6 +756,23 @@ #define DP_BRANCH_DEVICE_CTRL 0x1a1 # define DP_BRANCH_DEVICE_IRQ_HPD (1 << 0) +#define PANEL_REPLAY_CONFIG 0x1b0 /* DP 2.0 */ +# define DP_PANEL_REPLAY_ENABLE (1 << 0) +# define DP_PANEL_REPLAY_VSC_SDP_CRC_EN (1 << 1) /* eDP 1.5 */ +# define DP_PANEL_REPLAY_UNRECOVERABLE_ERROR_EN (1 << 3) +# define DP_PANEL_REPLAY_RFB_STORAGE_ERROR_EN (1 << 4) +# define DP_PANEL_REPLAY_ACTIVE_FRAME_CRC_ERROR_EN (1 << 5) +# define DP_PANEL_REPLAY_SU_ENABLE (1 << 6) +# define DP_PANEL_REPLAY_ENABLE_SU_REGION_ET (1 << 7) /* DP 2.1 */ + +#define PANEL_REPLAY_CONFIG2 0x1b1 /* eDP 1.5 */ +# define DP_PANEL_REPLAY_SINK_REFRESH_RATE_UNLOCK_GRANTED (1 << 0) +# define DP_PANEL_REPLAY_CRC_VERIFICATION (1 << 1) +# define DP_PANEL_REPLAY_SU_Y_GRANULARITY_EXTENDED_EN (1 << 2) +# define DP_PANEL_REPLAY_SU_Y_GRANULARITY_EXTENDED_VAL_SEL_SHIFT 3 +# define DP_PANEL_REPLAY_SU_Y_GRANULARITY_EXTENDED_VAL_SEL_MASK (0xf << 3) +# define DP_PANEL_REPLAY_SU_REGION_SCANLINE_CAPTURE (1 << 7) + #define DP_PAYLOAD_ALLOCATE_SET 0x1c0 #define DP_PAYLOAD_ALLOCATE_START_TIME_SLOT 0x1c1 #define DP_PAYLOAD_ALLOCATE_TIME_SLOT_COUNT 0x1c2 @@ -745,6 +804,7 @@ #define DP_LANE_ALIGN_STATUS_UPDATED 0x204 #define DP_INTERLANE_ALIGN_DONE (1 << 0) +#define DP_POST_LT_ADJ_REQ_IN_PROGRESS (1 << 1) /* 1.3 */ #define DP_128B132B_DPRX_EQ_INTERLANE_ALIGN_DONE (1 << 2) /* 2.0 E11 */ #define DP_128B132B_DPRX_CDS_INTERLANE_ALIGN_DONE (1 << 3) /* 2.0 E11 */ #define DP_128B132B_LT_FAILED (1 << 4) /* 2.0 E11 */ @@ -957,6 +1017,7 @@ # define DP_EDP_14 0x03 # define DP_EDP_14a 0x04 /* eDP 1.4a */ # define DP_EDP_14b 0x05 /* eDP 1.4b */ +# define DP_EDP_15 0x06 /* eDP 1.5 */ #define DP_EDP_GENERAL_CAP_1 0x701 # define DP_EDP_TCON_BACKLIGHT_ADJUSTMENT_CAP (1 << 0) @@ -980,6 +1041,8 @@ #define DP_EDP_GENERAL_CAP_2 0x703 # define DP_EDP_OVERDRIVE_ENGINE_ENABLED (1 << 0) +# define DP_EDP_PANEL_LUMINANCE_CONTROL_CAPABLE (1 << 4) +# define DP_EDP_SMOOTH_BRIGHTNESS_CAPABLE (1 << 6) /* eDP 2.0 */ #define DP_EDP_GENERAL_CAP_3 0x704 /* eDP 1.4 */ # define DP_EDP_X_REGION_CAP_MASK (0xf << 0) @@ -1005,6 +1068,7 @@ # define DP_EDP_DYNAMIC_BACKLIGHT_ENABLE (1 << 4) # define DP_EDP_REGIONAL_BACKLIGHT_ENABLE (1 << 5) # define DP_EDP_UPDATE_REGION_BRIGHTNESS (1 << 6) /* eDP 1.4 */ +# define DP_EDP_PANEL_LUMINANCE_CONTROL_ENABLE (1 << 7) #define DP_EDP_BACKLIGHT_BRIGHTNESS_MSB 0x722 #define DP_EDP_BACKLIGHT_BRIGHTNESS_LSB 0x723 @@ -1029,6 +1093,7 @@ #define DP_EDP_DBC_MINIMUM_BRIGHTNESS_SET 0x732 #define DP_EDP_DBC_MAXIMUM_BRIGHTNESS_SET 0x733 +#define DP_EDP_PANEL_TARGET_LUMINANCE_VALUE 0x734 #define DP_EDP_REGIONAL_BACKLIGHT_BASE 0x740 /* eDP 1.4 */ #define DP_EDP_REGIONAL_BACKLIGHT_0 0x741 /* eDP 1.4 */ @@ -1059,6 +1124,7 @@ # define STREAM_STATUS_CHANGED (1 << 2) # define HDMI_LINK_STATUS_CHANGED (1 << 3) # define CONNECTED_OFF_ENTRY_REQUESTED (1 << 4) +# define DP_TUNNELING_IRQ (1 << 5) #define DP_PSR_ERROR_STATUS 0x2006 /* XXX 1.2? */ # define DP_PSR_LINK_CRC_ERROR (1 << 0) @@ -1100,6 +1166,18 @@ #define DP_LANE_ALIGN_STATUS_UPDATED_ESI 0x200e /* status same as 0x204 */ #define DP_SINK_STATUS_ESI 0x200f /* status same as 0x205 */ +#define DP_PANEL_REPLAY_ERROR_STATUS 0x2020 /* DP 2.1*/ +# define DP_PANEL_REPLAY_LINK_CRC_ERROR (1 << 0) +# define DP_PANEL_REPLAY_RFB_STORAGE_ERROR (1 << 1) +# define DP_PANEL_REPLAY_VSC_SDP_UNCORRECTABLE_ERROR (1 << 2) + +#define DP_SINK_DEVICE_PR_AND_FRAME_LOCK_STATUS 0x2022 /* DP 2.1 */ +# define DP_SINK_DEVICE_PANEL_REPLAY_STATUS_MASK (7 << 0) +# define DP_SINK_FRAME_LOCKED_SHIFT 3 +# define DP_SINK_FRAME_LOCKED_MASK (3 << 3) +# define DP_SINK_FRAME_LOCKED_STATUS_VALID_SHIFT 5 +# define DP_SINK_FRAME_LOCKED_STATUS_VALID_MASK (1 << 5) + /* Extended Receiver Capability: See DP_DPCD_REV for definitions */ #define DP_DP13_DPCD_REV 0x2200 @@ -1113,8 +1191,19 @@ # define DP_VSC_EXT_CEA_SDP_SUPPORTED (1 << 6) /* DP 1.4 */ # define DP_VSC_EXT_CEA_SDP_CHAINING_SUPPORTED (1 << 7) /* DP 1.4 */ +#define DP_EXTENDED_DPRX_SLEEP_WAKE_TIMEOUT_REQUEST 0x2211 /* 1.4a */ +# define DP_DPRX_SLEEP_WAKE_TIMEOUT_PERIOD_MASK 0xff +# define DP_DPRX_SLEEP_WAKE_TIMEOUT_PERIOD_1_MS 0x00 +# define DP_DPRX_SLEEP_WAKE_TIMEOUT_PERIOD_20_MS 0x01 +# define DP_DPRX_SLEEP_WAKE_TIMEOUT_PERIOD_40_MS 0x02 +# define DP_DPRX_SLEEP_WAKE_TIMEOUT_PERIOD_60_MS 0x03 +# define DP_DPRX_SLEEP_WAKE_TIMEOUT_PERIOD_80_MS 0x04 +# define DP_DPRX_SLEEP_WAKE_TIMEOUT_PERIOD_100_MS 0x05 + #define DP_DPRX_FEATURE_ENUMERATION_LIST_CONT_1 0x2214 /* 2.0 E11 */ # define DP_ADAPTIVE_SYNC_SDP_SUPPORTED (1 << 0) +# define DP_ADAPTIVE_SYNC_SDP_OPERATION_MODE GENMASK(1, 0) +# define DP_ADAPTIVE_SYNC_SDP_LENGTH GENMASK(5, 0) # define DP_AS_SDP_FIRST_HALF_LINE_OR_3840_PIXEL_CYCLE_WINDOW_NOT_SUPPORTED (1 << 1) # define DP_VSC_EXT_SDP_FRAMEWORK_VERSION_1_SUPPORTED (1 << 4) @@ -1348,6 +1437,66 @@ #define DP_HDCP_2_2_REG_STREAM_TYPE_OFFSET 0x69494 #define DP_HDCP_2_2_REG_DBG_OFFSET 0x69518 +/* DP-tunneling */ +#define DP_TUNNELING_OUI 0xe0000 +#define DP_TUNNELING_OUI_BYTES 3 + +#define DP_TUNNELING_DEV_ID 0xe0003 +#define DP_TUNNELING_DEV_ID_BYTES 6 + +#define DP_TUNNELING_HW_REV 0xe0009 +#define DP_TUNNELING_HW_REV_MAJOR_SHIFT 4 +#define DP_TUNNELING_HW_REV_MAJOR_MASK (0xf << DP_TUNNELING_HW_REV_MAJOR_SHIFT) +#define DP_TUNNELING_HW_REV_MINOR_SHIFT 0 +#define DP_TUNNELING_HW_REV_MINOR_MASK (0xf << DP_TUNNELING_HW_REV_MINOR_SHIFT) + +#define DP_TUNNELING_SW_REV_MAJOR 0xe000a +#define DP_TUNNELING_SW_REV_MINOR 0xe000b + +#define DP_TUNNELING_CAPABILITIES 0xe000d +#define DP_IN_BW_ALLOCATION_MODE_SUPPORT (1 << 7) +#define DP_PANEL_REPLAY_OPTIMIZATION_SUPPORT (1 << 6) +#define DP_TUNNELING_SUPPORT (1 << 0) + +#define DP_IN_ADAPTER_INFO 0xe000e +#define DP_IN_ADAPTER_NUMBER_BITS 7 +#define DP_IN_ADAPTER_NUMBER_MASK ((1 << DP_IN_ADAPTER_NUMBER_BITS) - 1) + +#define DP_USB4_DRIVER_ID 0xe000f +#define DP_USB4_DRIVER_ID_BITS 4 +#define DP_USB4_DRIVER_ID_MASK ((1 << DP_USB4_DRIVER_ID_BITS) - 1) + +#define DP_USB4_DRIVER_BW_CAPABILITY 0xe0020 +#define DP_USB4_DRIVER_BW_ALLOCATION_MODE_SUPPORT (1 << 7) + +#define DP_IN_ADAPTER_TUNNEL_INFORMATION 0xe0021 +#define DP_GROUP_ID_BITS 3 +#define DP_GROUP_ID_MASK ((1 << DP_GROUP_ID_BITS) - 1) + +#define DP_BW_GRANULARITY 0xe0022 +#define DP_BW_GRANULARITY_MASK 0x3 + +#define DP_ESTIMATED_BW 0xe0023 +#define DP_ALLOCATED_BW 0xe0024 + +#define DP_TUNNELING_STATUS 0xe0025 +#define DP_BW_ALLOCATION_CAPABILITY_CHANGED (1 << 3) +#define DP_ESTIMATED_BW_CHANGED (1 << 2) +#define DP_BW_REQUEST_SUCCEEDED (1 << 1) +#define DP_BW_REQUEST_FAILED (1 << 0) + +#define DP_TUNNELING_MAX_LINK_RATE 0xe0028 + +#define DP_TUNNELING_MAX_LANE_COUNT 0xe0029 +#define DP_TUNNELING_MAX_LANE_COUNT_MASK 0x1f + +#define DP_DPTX_BW_ALLOCATION_MODE_CONTROL 0xe0030 +#define DP_DISPLAY_DRIVER_BW_ALLOCATION_MODE_ENABLE (1 << 7) +#define DP_UNMASK_BW_ALLOCATION_IRQ (1 << 6) + +#define DP_REQUEST_BW 0xe0031 +#define MAX_DP_REQUEST_BW 255 + /* LTTPR: Link Training (LT)-tunable PHY Repeaters */ #define DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV 0xf0000 /* 1.3 */ #define DP_MAX_LINK_RATE_PHY_REPEATER 0xf0001 /* 1.4a */ @@ -1356,6 +1505,8 @@ #define DP_MAX_LANE_COUNT_PHY_REPEATER 0xf0004 /* 1.4a */ #define DP_Repeater_FEC_CAPABILITY 0xf0004 /* 1.4 */ #define DP_PHY_REPEATER_EXTENDED_WAIT_TIMEOUT 0xf0005 /* 1.4a */ +# define DP_EXTENDED_WAKE_TIMEOUT_REQUEST_MASK 0x7f +# define DP_EXTENDED_WAKE_TIMEOUT_GRANT (1 << 7) #define DP_MAIN_LINK_CHANNEL_CODING_PHY_REPEATER 0xf0006 /* 2.0 */ # define DP_PHY_REPEATER_128B132B_SUPPORTED (1 << 0) /* See DP_128B132B_SUPPORTED_LINK_RATES for values */ @@ -1426,6 +1577,10 @@ enum drm_dp_phy { #define DP_SYMBOL_ERROR_COUNT_LANE2_PHY_REPEATER1 0xf0039 /* 1.3 */ #define DP_SYMBOL_ERROR_COUNT_LANE3_PHY_REPEATER1 0xf003b /* 1.3 */ +#define DP_OUI_PHY_REPEATER1 0xf003d /* 1.3 */ +#define DP_OUI_PHY_REPEATER(dp_phy) \ + DP_LTTPR_REG(dp_phy, DP_OUI_PHY_REPEATER1) + #define __DP_FEC1_BASE 0xf0290 /* 1.4 */ #define __DP_FEC2_BASE 0xf0298 /* 1.4 */ #define DP_FEC_BASE(dp_phy) \ @@ -1532,9 +1687,10 @@ enum drm_dp_phy { #define DP_BRANCH_OUI_HEADER_SIZE 0xc #define DP_RECEIVER_CAP_SIZE 0xf -#define DP_DSC_RECEIVER_CAP_SIZE 0xf +#define DP_DSC_RECEIVER_CAP_SIZE 0x10 /* DSC Capabilities 0x60 through 0x6F */ +#define DP_DSC_BRANCH_CAP_SIZE 3 #define EDP_PSR_RECEIVER_CAP_SIZE 2 -#define EDP_DISPLAY_CTL_CAP_SIZE 3 +#define EDP_DISPLAY_CTL_CAP_SIZE 5 #define DP_LTTPR_COMMON_CAP_SIZE 8 #define DP_LTTPR_PHY_CAP_SIZE 3 @@ -1544,10 +1700,12 @@ enum drm_dp_phy { #define DP_SDP_AUDIO_COPYMANAGEMENT 0x05 /* DP 1.2 */ #define DP_SDP_ISRC 0x06 /* DP 1.2 */ #define DP_SDP_VSC 0x07 /* DP 1.2 */ +#define DP_SDP_ADAPTIVE_SYNC 0x22 /* DP 1.4 */ #define DP_SDP_CAMERA_GENERIC(i) (0x08 + (i)) /* 0-7, DP 1.3 */ #define DP_SDP_PPS 0x10 /* DP 1.4 */ #define DP_SDP_VSC_EXT_VESA 0x20 /* DP 1.4 */ #define DP_SDP_VSC_EXT_CEA 0x21 /* DP 1.4 */ + /* 0x80+ CEA-861 infoframe types */ #define DP_SDP_AUDIO_INFOFRAME_HB2 0x1b @@ -1630,7 +1788,7 @@ enum dp_pixelformat { * * This enum is used to indicate DP VSC SDP Colorimetry formats. * It is based on DP 1.4 spec [Table 2-117: VSC SDP Payload for DB16 through - * DB18] and a name of enum member follows DRM_MODE_COLORIMETRY definition. + * DB18] and a name of enum member follows enum drm_colorimetry definition. * * @DP_COLORIMETRY_DEFAULT: sRGB (IEC 61966-2-1) or * ITU-R BT.601 colorimetry format @@ -1703,4 +1861,11 @@ enum dp_content_type { DP_CONTENT_TYPE_GAME = 0x04, }; +enum operation_mode { + DP_AS_SDP_AVT_DYNAMIC_VTOTAL = 0x00, + DP_AS_SDP_AVT_FIXED_VTOTAL = 0x01, + DP_AS_SDP_FAVT_TRR_NOT_REACHED = 0x02, + DP_AS_SDP_FAVT_TRR_REACHED = 0x03 +}; + #endif /* _DRM_DP_H_ */ diff --git a/include/drm/display/drm_dp_dual_mode_helper.h b/include/drm/display/drm_dp_dual_mode_helper.h index 7ee482265087..7ac6969db935 100644 --- a/include/drm/display/drm_dp_dual_mode_helper.h +++ b/include/drm/display/drm_dp_dual_mode_helper.h @@ -117,5 +117,5 @@ const char *drm_dp_get_dual_mode_type_name(enum drm_dp_dual_mode_type type); int drm_lspcon_get_mode(const struct drm_device *dev, struct i2c_adapter *adapter, enum drm_lspcon_mode *current_mode); int drm_lspcon_set_mode(const struct drm_device *dev, struct i2c_adapter *adapter, - enum drm_lspcon_mode reqd_mode); + enum drm_lspcon_mode reqd_mode, int time_out); #endif diff --git a/include/drm/display/drm_dp_helper.h b/include/drm/display/drm_dp_helper.h index ab55453f2d2c..df2f24b950e4 100644 --- a/include/drm/display/drm_dp_helper.h +++ b/include/drm/display/drm_dp_helper.h @@ -37,6 +37,7 @@ bool drm_dp_channel_eq_ok(const u8 link_status[DP_LINK_STATUS_SIZE], int lane_count); bool drm_dp_clock_recovery_ok(const u8 link_status[DP_LINK_STATUS_SIZE], int lane_count); +bool drm_dp_post_lt_adj_req_in_progress(const u8 link_status[DP_LINK_STATUS_SIZE]); u8 drm_dp_get_adjust_request_voltage(const u8 link_status[DP_LINK_STATUS_SIZE], int lane); u8 drm_dp_get_adjust_request_pre_emphasis(const u8 link_status[DP_LINK_STATUS_SIZE], @@ -98,8 +99,41 @@ struct drm_dp_vsc_sdp { enum dp_content_type content_type; }; -void drm_dp_vsc_sdp_log(const char *level, struct device *dev, - const struct drm_dp_vsc_sdp *vsc); +/** + * struct drm_dp_as_sdp - drm DP Adaptive Sync SDP + * + * This structure represents a DP AS SDP of drm + * It is based on DP 2.1 spec [Table 2-126: Adaptive-Sync SDP Header Bytes] and + * [Table 2-127: Adaptive-Sync SDP Payload for DB0 through DB8] + * + * @sdp_type: Secondary-data packet type + * @revision: Revision Number + * @length: Number of valid data bytes + * @vtotal: Minimum Vertical Vtotal + * @target_rr: Target Refresh + * @duration_incr_ms: Successive frame duration increase + * @duration_decr_ms: Successive frame duration decrease + * @target_rr_divider: Target refresh rate divider + * @mode: Adaptive Sync Operation Mode + */ +struct drm_dp_as_sdp { + unsigned char sdp_type; + unsigned char revision; + unsigned char length; + int vtotal; + int target_rr; + int duration_incr_ms; + int duration_decr_ms; + bool target_rr_divider; + enum operation_mode mode; +}; + +void drm_dp_as_sdp_log(struct drm_printer *p, + const struct drm_dp_as_sdp *as_sdp); +void drm_dp_vsc_sdp_log(struct drm_printer *p, const struct drm_dp_vsc_sdp *vsc); + +bool drm_dp_vsc_sdp_supported(struct drm_dp_aux *aux, const u8 dpcd[DP_RECEIVER_CAP_SIZE]); +bool drm_dp_as_sdp_supported(struct drm_dp_aux *aux, const u8 dpcd[DP_RECEIVER_CAP_SIZE]); int drm_dp_psr_setup_time(const u8 psr_cap[EDP_PSR_RECEIVER_CAP_SIZE]); @@ -123,6 +157,13 @@ drm_dp_enhanced_frame_cap(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) } static inline bool +drm_dp_post_lt_adj_req_supported(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) +{ + return dpcd[DP_DPCD_REV] >= 0x13 && + (dpcd[DP_MAX_LANE_COUNT] & DP_POST_LT_ADJ_REQ_SUPPORTED); +} + +static inline bool drm_dp_fast_training_cap(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) { return dpcd[DP_DPCD_REV] >= 0x11 && @@ -164,11 +205,17 @@ drm_dp_is_branch(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) } /* DP/eDP DSC support */ +u8 drm_dp_dsc_sink_bpp_incr(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE]); u8 drm_dp_dsc_sink_max_slice_count(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE], bool is_edp); u8 drm_dp_dsc_sink_line_buf_depth(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE]); int drm_dp_dsc_sink_supported_input_bpcs(const u8 dsc_dpc[DP_DSC_RECEIVER_CAP_SIZE], u8 dsc_bpc[3]); +int drm_dp_dsc_sink_max_slice_throughput(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE], + int peak_pixel_rate, bool is_rgb_yuv444); +int drm_dp_dsc_branch_max_overall_throughput(const u8 dsc_branch_dpcd[DP_DSC_BRANCH_CAP_SIZE], + bool is_rgb_yuv444); +int drm_dp_dsc_branch_max_line_width(const u8 dsc_branch_dpcd[DP_DSC_BRANCH_CAP_SIZE]); static inline bool drm_dp_sink_supports_dsc(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE]) @@ -181,9 +228,8 @@ static inline u16 drm_edp_dsc_sink_output_bpp(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE]) { return dsc_dpcd[DP_DSC_MAX_BITS_PER_PIXEL_LOW - DP_DSC_SUPPORT] | - (dsc_dpcd[DP_DSC_MAX_BITS_PER_PIXEL_HI - DP_DSC_SUPPORT] & - DP_DSC_MAX_BITS_PER_PIXEL_HI_MASK << - DP_DSC_MAX_BITS_PER_PIXEL_HI_SHIFT); + ((dsc_dpcd[DP_DSC_MAX_BITS_PER_PIXEL_HI - DP_DSC_SUPPORT] & + DP_DSC_MAX_BITS_PER_PIXEL_HI_MASK) << 8); } static inline u32 @@ -194,6 +240,19 @@ drm_dp_dsc_sink_max_slice_width(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE]) DP_DSC_SLICE_WIDTH_MULTIPLIER; } +/** + * drm_dp_dsc_sink_supports_format() - check if sink supports DSC with given output format + * @dsc_dpcd : DSC-capability DPCDs of the sink + * @output_format: output_format which is to be checked + * + * Returns true if the sink supports DSC with the given output_format, false otherwise. + */ +static inline bool +drm_dp_dsc_sink_supports_format(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE], u8 output_format) +{ + return dsc_dpcd[DP_DSC_DEC_COLOR_FORMAT_CAP - DP_DSC_SUPPORT] & output_format; +} + /* Forward Error Correction Support on DP 1.4 */ static inline bool drm_dp_sink_supports_fec(const u8 fec_capable) @@ -208,6 +267,12 @@ drm_dp_channel_coding_supported(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) } static inline bool +drm_dp_128b132b_supported(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) +{ + return dpcd[DP_MAIN_LINK_CHANNEL_CODING] & DP_CAP_ANSI_128B132B; +} + +static inline bool drm_dp_alternate_scrambler_reset_cap(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) { return dpcd[DP_EDP_CONFIGURATION_CAP] & @@ -239,6 +304,19 @@ drm_edp_backlight_supported(const u8 edp_dpcd[EDP_DISPLAY_CTL_CAP_SIZE]) return !!(edp_dpcd[1] & DP_EDP_TCON_BACKLIGHT_ADJUSTMENT_CAP); } +/** + * drm_dp_is_uhbr_rate - Determine if a link rate is UHBR + * @link_rate: link rate in 10kbits/s units + * + * Determine if the provided link rate is an UHBR rate. + * + * Returns: %True if @link_rate is an UHBR rate. + */ +static inline bool drm_dp_is_uhbr_rate(int link_rate) +{ + return link_rate >= 1000000; +} + /* * DisplayPort AUX channel */ @@ -260,8 +338,8 @@ struct drm_dp_aux_msg { }; struct cec_adapter; -struct edid; struct drm_connector; +struct drm_edid; /** * struct drm_dp_aux_cec - DisplayPort CEC-Tunneling-over-AUX @@ -395,7 +473,18 @@ struct drm_dp_aux { * @wait_hpd_asserted: wait for HPD to be asserted * * This is mainly useful for eDP panels drivers to wait for an eDP - * panel to finish powering on. This is an optional function. + * panel to finish powering on. It is optional for DP AUX controllers + * to implement this function. It is required for DP AUX endpoints + * (panel drivers) to call this function after powering up but before + * doing AUX transfers unless the DP AUX endpoint driver knows that + * we're not using the AUX controller's HPD. One example of the panel + * driver not needing to call this is if HPD is hooked up to a GPIO + * that the panel driver can read directly. + * + * If a DP AUX controller does not implement this function then it + * may still support eDP panels that use the AUX controller's built-in + * HPD signal by implementing a long wait for HPD in the transfer() + * callback, though this is deprecated. * * This function will efficiently wait for the HPD signal to be * asserted. The `wait_us` parameter that is passed in says that we @@ -437,22 +526,98 @@ struct drm_dp_aux { * @is_remote: Is this AUX CH actually using sideband messaging. */ bool is_remote; + + /** + * @powered_down: If true then the remote endpoint is powered down. + */ + bool powered_down; + + /** + * @no_zero_sized: If the hw can't use zero sized transfers (NVIDIA) + */ + bool no_zero_sized; + + /** + * @dpcd_probe_disabled: If probing before a DPCD access is disabled. + */ + bool dpcd_probe_disabled; }; int drm_dp_dpcd_probe(struct drm_dp_aux *aux, unsigned int offset); +void drm_dp_dpcd_set_powered(struct drm_dp_aux *aux, bool powered); +void drm_dp_dpcd_set_probe(struct drm_dp_aux *aux, bool enable); ssize_t drm_dp_dpcd_read(struct drm_dp_aux *aux, unsigned int offset, void *buffer, size_t size); ssize_t drm_dp_dpcd_write(struct drm_dp_aux *aux, unsigned int offset, void *buffer, size_t size); /** + * drm_dp_dpcd_read_data() - read a series of bytes from the DPCD + * @aux: DisplayPort AUX channel (SST or MST) + * @offset: address of the (first) register to read + * @buffer: buffer to store the register values + * @size: number of bytes in @buffer + * + * Returns zero (0) on success, or a negative error + * code on failure. -EIO is returned if the request was NAKed by the sink or + * if the retry count was exceeded. If not all bytes were transferred, this + * function returns -EPROTO. Errors from the underlying AUX channel transfer + * function, with the exception of -EBUSY (which causes the transaction to + * be retried), are propagated to the caller. + */ +static inline int drm_dp_dpcd_read_data(struct drm_dp_aux *aux, + unsigned int offset, + void *buffer, size_t size) +{ + int ret; + + ret = drm_dp_dpcd_read(aux, offset, buffer, size); + if (ret < 0) + return ret; + if (ret < size) + return -EPROTO; + + return 0; +} + +/** + * drm_dp_dpcd_write_data() - write a series of bytes to the DPCD + * @aux: DisplayPort AUX channel (SST or MST) + * @offset: address of the (first) register to write + * @buffer: buffer containing the values to write + * @size: number of bytes in @buffer + * + * Returns zero (0) on success, or a negative error + * code on failure. -EIO is returned if the request was NAKed by the sink or + * if the retry count was exceeded. If not all bytes were transferred, this + * function returns -EPROTO. Errors from the underlying AUX channel transfer + * function, with the exception of -EBUSY (which causes the transaction to + * be retried), are propagated to the caller. + */ +static inline int drm_dp_dpcd_write_data(struct drm_dp_aux *aux, + unsigned int offset, + void *buffer, size_t size) +{ + int ret; + + ret = drm_dp_dpcd_write(aux, offset, buffer, size); + if (ret < 0) + return ret; + if (ret < size) + return -EPROTO; + + return 0; +} + +/** * drm_dp_dpcd_readb() - read a single byte from the DPCD * @aux: DisplayPort AUX channel * @offset: address of the register to read * @valuep: location where the value of the register will be stored * * Returns the number of bytes transferred (1) on success, or a negative - * error code on failure. + * error code on failure. In most of the cases you should be using + * drm_dp_dpcd_read_byte() instead. */ static inline ssize_t drm_dp_dpcd_readb(struct drm_dp_aux *aux, unsigned int offset, u8 *valuep) @@ -467,7 +632,8 @@ static inline ssize_t drm_dp_dpcd_readb(struct drm_dp_aux *aux, * @value: value to write to the register * * Returns the number of bytes transferred (1) on success, or a negative - * error code on failure. + * error code on failure. In most of the cases you should be using + * drm_dp_dpcd_write_byte() instead. */ static inline ssize_t drm_dp_dpcd_writeb(struct drm_dp_aux *aux, unsigned int offset, u8 value) @@ -475,6 +641,34 @@ static inline ssize_t drm_dp_dpcd_writeb(struct drm_dp_aux *aux, return drm_dp_dpcd_write(aux, offset, &value, 1); } +/** + * drm_dp_dpcd_read_byte() - read a single byte from the DPCD + * @aux: DisplayPort AUX channel + * @offset: address of the register to read + * @valuep: location where the value of the register will be stored + * + * Returns zero (0) on success, or a negative error code on failure. + */ +static inline int drm_dp_dpcd_read_byte(struct drm_dp_aux *aux, + unsigned int offset, u8 *valuep) +{ + return drm_dp_dpcd_read_data(aux, offset, valuep, 1); +} + +/** + * drm_dp_dpcd_write_byte() - write a single byte to the DPCD + * @aux: DisplayPort AUX channel + * @offset: address of the register to write + * @value: value to write to the register + * + * Returns zero (0) on success, or a negative error code on failure. + */ +static inline int drm_dp_dpcd_write_byte(struct drm_dp_aux *aux, + unsigned int offset, u8 value) +{ + return drm_dp_dpcd_write_data(aux, offset, &value, 1); +} + int drm_dp_read_dpcd_caps(struct drm_dp_aux *aux, u8 dpcd[DP_RECEIVER_CAP_SIZE]); @@ -484,6 +678,13 @@ int drm_dp_dpcd_read_link_status(struct drm_dp_aux *aux, int drm_dp_dpcd_read_phy_link_status(struct drm_dp_aux *aux, enum drm_dp_phy dp_phy, u8 link_status[DP_LINK_STATUS_SIZE]); +int drm_dp_link_power_up(struct drm_dp_aux *aux, unsigned char revision); +int drm_dp_link_power_down(struct drm_dp_aux *aux, unsigned char revision); + +int drm_dp_dpcd_write_payload(struct drm_dp_aux *aux, + int vcpid, u8 start_time_slot, u8 time_slot_count); +int drm_dp_dpcd_clear_payload(struct drm_dp_aux *aux); +int drm_dp_dpcd_poll_act_handled(struct drm_dp_aux *aux, int timeout_ms); bool drm_dp_send_real_edid_checksum(struct drm_dp_aux *aux, u8 real_edid_checksum); @@ -495,18 +696,18 @@ bool drm_dp_downstream_is_type(const u8 dpcd[DP_RECEIVER_CAP_SIZE], const u8 port_cap[4], u8 type); bool drm_dp_downstream_is_tmds(const u8 dpcd[DP_RECEIVER_CAP_SIZE], const u8 port_cap[4], - const struct edid *edid); + const struct drm_edid *drm_edid); int drm_dp_downstream_max_dotclock(const u8 dpcd[DP_RECEIVER_CAP_SIZE], const u8 port_cap[4]); int drm_dp_downstream_max_tmds_clock(const u8 dpcd[DP_RECEIVER_CAP_SIZE], const u8 port_cap[4], - const struct edid *edid); + const struct drm_edid *drm_edid); int drm_dp_downstream_min_tmds_clock(const u8 dpcd[DP_RECEIVER_CAP_SIZE], const u8 port_cap[4], - const struct edid *edid); + const struct drm_edid *drm_edid); int drm_dp_downstream_max_bpc(const u8 dpcd[DP_RECEIVER_CAP_SIZE], const u8 port_cap[4], - const struct edid *edid); + const struct drm_edid *drm_edid); bool drm_dp_downstream_420_passthrough(const u8 dpcd[DP_RECEIVER_CAP_SIZE], const u8 port_cap[4]); bool drm_dp_downstream_444_to_420_conversion(const u8 dpcd[DP_RECEIVER_CAP_SIZE], @@ -518,7 +719,7 @@ int drm_dp_downstream_id(struct drm_dp_aux *aux, char id[6]); void drm_dp_downstream_debug(struct seq_file *m, const u8 dpcd[DP_RECEIVER_CAP_SIZE], const u8 port_cap[4], - const struct edid *edid, + const struct drm_edid *drm_edid, struct drm_dp_aux *aux); enum drm_mode_subconnector drm_dp_subconnector_type(const u8 dpcd[DP_RECEIVER_CAP_SIZE], @@ -543,9 +744,12 @@ int drm_dp_read_lttpr_phy_caps(struct drm_dp_aux *aux, u8 caps[DP_LTTPR_PHY_CAP_SIZE]); int drm_dp_lttpr_count(const u8 cap[DP_LTTPR_COMMON_CAP_SIZE]); int drm_dp_lttpr_max_link_rate(const u8 caps[DP_LTTPR_COMMON_CAP_SIZE]); +int drm_dp_lttpr_set_transparent_mode(struct drm_dp_aux *aux, bool enable); +int drm_dp_lttpr_init(struct drm_dp_aux *aux, int lttpr_count); int drm_dp_lttpr_max_lane_count(const u8 caps[DP_LTTPR_COMMON_CAP_SIZE]); bool drm_dp_lttpr_voltage_swing_level_3_supported(const u8 caps[DP_LTTPR_PHY_CAP_SIZE]); bool drm_dp_lttpr_pre_emphasis_level_3_supported(const u8 caps[DP_LTTPR_PHY_CAP_SIZE]); +void drm_dp_lttpr_wake_timeout_setup(struct drm_dp_aux *aux, bool transparent_mode); void drm_dp_remote_aux_init(struct drm_dp_aux *aux); void drm_dp_aux_init(struct drm_dp_aux *aux); @@ -576,6 +780,8 @@ struct drm_dp_desc { int drm_dp_read_desc(struct drm_dp_aux *aux, struct drm_dp_desc *desc, bool is_branch); +int drm_dp_dump_lttpr_desc(struct drm_dp_aux *aux, enum drm_dp_phy dp_phy); + /** * enum drm_dp_quirk - Display Port sink/branch device specific quirks * @@ -620,6 +826,22 @@ enum drm_dp_quirk { * the DP_MAX_LINK_RATE register reporting a lower max multiplier. */ DP_DPCD_QUIRK_CAN_DO_MAX_LINK_RATE_3_24_GBPS, + /** + * @DP_DPCD_QUIRK_HBLANK_EXPANSION_REQUIRES_DSC: + * + * The device applies HBLANK expansion for some modes, but this + * requires enabling DSC. + */ + DP_DPCD_QUIRK_HBLANK_EXPANSION_REQUIRES_DSC, + /** + * @DP_DPCD_QUIRK_DSC_THROUGHPUT_BPP_LIMIT: + * + * The device doesn't support DSC decompression at the maximum DSC + * pixel throughput and compressed bpp it indicates via its DPCD DSC + * capabilities. The compressed bpp must be limited above a device + * specific DSC pixel throughput. + */ + DP_DPCD_QUIRK_DSC_THROUGHPUT_BPP_LIMIT, }; /** @@ -643,6 +865,7 @@ drm_dp_has_quirk(const struct drm_dp_desc *desc, enum drm_dp_quirk quirk) * @lsb_reg_used: Do we also write values to the DP_EDP_BACKLIGHT_BRIGHTNESS_LSB register? * @aux_enable: Does the panel support the AUX enable cap? * @aux_set: Does the panel support setting the brightness through AUX? + * @luminance_set: Does the panel support setting the brightness through AUX using luminance values? * * This structure contains various data about an eDP backlight, which can be populated by using * drm_edp_backlight_init(). @@ -650,21 +873,23 @@ drm_dp_has_quirk(const struct drm_dp_desc *desc, enum drm_dp_quirk quirk) struct drm_edp_backlight_info { u8 pwmgen_bit_count; u8 pwm_freq_pre_divider; - u16 max; + u32 max; bool lsb_reg_used : 1; bool aux_enable : 1; bool aux_set : 1; + bool luminance_set : 1; }; int drm_edp_backlight_init(struct drm_dp_aux *aux, struct drm_edp_backlight_info *bl, + u32 max_luminance, u16 driver_pwm_freq_hz, const u8 edp_dpcd[EDP_DISPLAY_CTL_CAP_SIZE], - u16 *current_level, u8 *current_mode); + u32 *current_level, u8 *current_mode, bool need_luminance); int drm_edp_backlight_set_level(struct drm_dp_aux *aux, const struct drm_edp_backlight_info *bl, - u16 level); + u32 level); int drm_edp_backlight_enable(struct drm_dp_aux *aux, const struct drm_edp_backlight_info *bl, - u16 level); + u32 level); int drm_edp_backlight_disable(struct drm_dp_aux *aux, const struct drm_edp_backlight_info *bl); #if IS_ENABLED(CONFIG_DRM_KMS_HELPER) && (IS_BUILTIN(CONFIG_BACKLIGHT_CLASS_DEVICE) || \ @@ -682,11 +907,12 @@ static inline int drm_panel_dp_aux_backlight(struct drm_panel *panel, #endif -#ifdef CONFIG_DRM_DP_CEC +#ifdef CONFIG_DRM_DISPLAY_DP_AUX_CEC void drm_dp_cec_irq(struct drm_dp_aux *aux); void drm_dp_cec_register_connector(struct drm_dp_aux *aux, struct drm_connector *connector); void drm_dp_cec_unregister_connector(struct drm_dp_aux *aux); +void drm_dp_cec_attach(struct drm_dp_aux *aux, u16 source_physical_address); void drm_dp_cec_set_edid(struct drm_dp_aux *aux, const struct edid *edid); void drm_dp_cec_unset_edid(struct drm_dp_aux *aux); #else @@ -704,6 +930,11 @@ static inline void drm_dp_cec_unregister_connector(struct drm_dp_aux *aux) { } +static inline void drm_dp_cec_attach(struct drm_dp_aux *aux, + u16 source_physical_address) +{ +} + static inline void drm_dp_cec_set_edid(struct drm_dp_aux *aux, const struct edid *edid) { @@ -763,4 +994,20 @@ bool drm_dp_downstream_rgb_to_ycbcr_conversion(const u8 dpcd[DP_RECEIVER_CAP_SIZ const u8 port_cap[4], u8 color_spc); int drm_dp_pcon_convert_rgb_to_ycbcr(struct drm_dp_aux *aux, u8 color_spc); +#define DRM_DP_BW_OVERHEAD_MST BIT(0) +#define DRM_DP_BW_OVERHEAD_UHBR BIT(1) +#define DRM_DP_BW_OVERHEAD_SSC_REF_CLK BIT(2) +#define DRM_DP_BW_OVERHEAD_FEC BIT(3) +#define DRM_DP_BW_OVERHEAD_DSC BIT(4) + +int drm_dp_bw_overhead(int lane_count, int hactive, + int dsc_slice_count, + int bpp_x16, unsigned long flags); +int drm_dp_bw_channel_coding_efficiency(bool is_uhbr); +int drm_dp_max_dprx_data_rate(int max_link_rate, int max_lanes); + +ssize_t drm_dp_vsc_sdp_pack(const struct drm_dp_vsc_sdp *vsc, struct dp_sdp *sdp); +int drm_dp_link_symbol_cycles(int lane_count, int pixels, int dsc_slice_count, + int bpp_x16, int symbol_size, bool is_mst); + #endif /* _DRM_DP_HELPER_H_ */ diff --git a/include/drm/display/drm_dp_mst_helper.h b/include/drm/display/drm_dp_mst_helper.h index 41fd8352ab65..2cfe1d4bfc96 100644 --- a/include/drm/display/drm_dp_mst_helper.h +++ b/include/drm/display/drm_dp_mst_helper.h @@ -25,6 +25,7 @@ #include <linux/types.h> #include <drm/display/drm_dp_helper.h> #include <drm/drm_atomic.h> +#include <drm/drm_fixed.h> #if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS) #include <linux/stackdepot.h> @@ -46,6 +47,13 @@ struct drm_dp_mst_topology_ref_history { }; #endif /* IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS) */ +enum drm_dp_mst_payload_allocation { + DRM_DP_MST_PAYLOAD_ALLOCATION_NONE, + DRM_DP_MST_PAYLOAD_ALLOCATION_LOCAL, + DRM_DP_MST_PAYLOAD_ALLOCATION_DFP, + DRM_DP_MST_PAYLOAD_ALLOCATION_REMOTE, +}; + struct drm_dp_mst_branch; /** @@ -75,7 +83,6 @@ struct drm_dp_mst_branch; * @passthrough_aux: parent aux to which DSC pass-through requests should be * sent, only set if DSC pass-through is possible. * @parent: branch device parent of this port - * @vcpi: Virtual Channel Payload info for this port. * @connector: DRM connector this port is connected to. Protected by * &drm_dp_mst_topology_mgr.base.lock. * @mgr: topology manager this port lives under. @@ -138,12 +145,7 @@ struct drm_dp_mst_port { * @cached_edid: for DP logical ports - make tiling work by ensuring * that the EDID for all connectors is read immediately. */ - struct edid *cached_edid; - /** - * @has_audio: Tracks whether the sink connector to this port is - * audio-capable. - */ - bool has_audio; + const struct drm_edid *cached_edid; /** * @fec_capable: bool indicating if FEC can be supported up to that @@ -220,6 +222,13 @@ struct drm_dp_mst_branch { */ struct list_head destroy_next; + /** + * @rad: Relative Address of the MST branch. + * For &drm_dp_mst_topology_mgr.mst_primary, it's rad[8] are all 0, + * unset and unused. For MST branches connected after mst_primary, + * in each element of rad[] the nibbles are ordered by the most + * signifcant 4 bits first and the least significant 4 bits second. + */ u8 rad[8]; u8 lct; int num_ports; @@ -242,18 +251,18 @@ struct drm_dp_mst_branch { bool link_address_sent; /* global unique identifier to identify branch devices */ - u8 guid[16]; + guid_t guid; }; struct drm_dp_nak_reply { - u8 guid[16]; + guid_t guid; u8 reason; u8 nak_data; }; struct drm_dp_link_address_ack_reply { - u8 guid[16]; + guid_t guid; u8 nports; struct drm_dp_link_addr_reply_port { bool input_port; @@ -263,7 +272,7 @@ struct drm_dp_link_address_ack_reply { bool ddps; bool legacy_device_plug_status; u8 dpcd_revision; - u8 peer_guid[16]; + guid_t peer_guid; u8 num_sdp_streams; u8 num_sdp_stream_sinks; } ports[16]; @@ -346,7 +355,7 @@ struct drm_dp_allocate_payload_ack_reply { }; struct drm_dp_connection_status_notify { - u8 guid[16]; + guid_t guid; u8 port_number; bool legacy_device_plug_status; bool displayport_device_plug_status; @@ -423,7 +432,7 @@ struct drm_dp_query_payload { struct drm_dp_resource_status_notify { u8 port_number; - u8 guid[16]; + guid_t guid; u16 available_pbn; }; @@ -542,7 +551,7 @@ struct drm_dp_mst_atomic_payload { * drm_dp_mst_atomic_wait_for_dependencies() has been called, which will ensure the * previous MST states payload start slots have been copied over to the new state. Note * that a new start slot won't be assigned/removed from this payload until - * drm_dp_add_payload_part1()/drm_dp_remove_payload() have been called. + * drm_dp_add_payload_part1()/drm_dp_remove_payload_part2() have been called. * * Acquire the MST modesetting lock, and then wait for any pending MST-related commits to * get committed to hardware by calling drm_crtc_commit_wait() on each of the * &drm_crtc_commit structs in &drm_dp_mst_topology_state.commit_deps. @@ -569,6 +578,9 @@ struct drm_dp_mst_atomic_payload { /** @dsc_enabled: Whether or not this payload has DSC enabled */ bool dsc_enabled : 1; + /** @payload_allocation_status: The allocation status of this payload */ + enum drm_dp_mst_payload_allocation payload_allocation_status; + /** @next: The list node for this payload */ struct list_head next; }; @@ -612,7 +624,7 @@ struct drm_dp_mst_topology_state { * @pbn_div: The current PBN divisor for this topology. The driver is expected to fill this * out itself. */ - int pbn_div; + fixed20_12 pbn_div; }; #define to_dp_mst_topology_mgr(x) container_of(x, struct drm_dp_mst_topology_mgr, base) @@ -695,6 +707,13 @@ struct drm_dp_mst_topology_mgr { bool payload_id_table_cleared : 1; /** + * @reset_rx_state: The down request's reply and up request message + * receiver state must be reset, after the topology manager got + * removed. Protected by @lock. + */ + bool reset_rx_state : 1; + + /** * @payload_count: The number of currently active payloads in hardware. This value is only * intended to be used internally by MST helpers for payload tracking, and is only safe to * read/write from the atomic commit (not check) context. @@ -812,11 +831,35 @@ int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr, void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr); -bool drm_dp_read_mst_cap(struct drm_dp_aux *aux, const u8 dpcd[DP_RECEIVER_CAP_SIZE]); -int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool mst_state); +/** + * enum drm_dp_mst_mode - sink's MST mode capability + */ +enum drm_dp_mst_mode { + /** + * @DRM_DP_SST: The sink does not support MST nor single stream sideband + * messaging. + */ + DRM_DP_SST, + /** + * @DRM_DP_MST: Sink supports MST, more than one stream and single + * stream sideband messaging. + */ + DRM_DP_MST, + /** + * @DRM_DP_SST_SIDEBAND_MSG: Sink supports only one stream and single + * stream sideband messaging. + */ + DRM_DP_SST_SIDEBAND_MSG, +}; -int drm_dp_mst_hpd_irq(struct drm_dp_mst_topology_mgr *mgr, u8 *esi, bool *handled); +enum drm_dp_mst_mode drm_dp_read_mst_cap(struct drm_dp_aux *aux, const u8 dpcd[DP_RECEIVER_CAP_SIZE]); +int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool mst_state); +int drm_dp_mst_hpd_irq_handle_event(struct drm_dp_mst_topology_mgr *mgr, + const u8 *esi, + u8 *ack, + bool *handled); +void drm_dp_mst_hpd_irq_send_new_request(struct drm_dp_mst_topology_mgr *mgr); int drm_dp_mst_detect_port(struct drm_connector *connector, @@ -824,12 +867,16 @@ drm_dp_mst_detect_port(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port); -struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port); +const struct drm_edid *drm_dp_mst_edid_read(struct drm_connector *connector, + struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_port *port); +struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, + struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_port *port); -int drm_dp_get_vc_payload_bw(const struct drm_dp_mst_topology_mgr *mgr, - int link_rate, int link_lane_count); +fixed20_12 drm_dp_get_vc_payload_bw(int link_rate, int link_lane_count); -int drm_dp_calc_pbn_mode(int clock, int bpp, bool dsc); +int drm_dp_calc_pbn_mode(int clock, int bpp); void drm_dp_mst_update_slots(struct drm_dp_mst_topology_state *mst_state, uint8_t link_encoding_cap); @@ -837,17 +884,22 @@ int drm_dp_add_payload_part1(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_topology_state *mst_state, struct drm_dp_mst_atomic_payload *payload); int drm_dp_add_payload_part2(struct drm_dp_mst_topology_mgr *mgr, - struct drm_atomic_state *state, struct drm_dp_mst_atomic_payload *payload); -void drm_dp_remove_payload(struct drm_dp_mst_topology_mgr *mgr, - struct drm_dp_mst_topology_state *mst_state, - struct drm_dp_mst_atomic_payload *payload); +void drm_dp_remove_payload_part1(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_topology_state *mst_state, + struct drm_dp_mst_atomic_payload *payload); +void drm_dp_remove_payload_part2(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_topology_state *mst_state, + const struct drm_dp_mst_atomic_payload *old_payload, + struct drm_dp_mst_atomic_payload *new_payload); int drm_dp_check_act_status(struct drm_dp_mst_topology_mgr *mgr); void drm_dp_mst_dump_topology(struct seq_file *m, struct drm_dp_mst_topology_mgr *mgr); +void drm_dp_mst_topology_queue_probe(struct drm_dp_mst_topology_mgr *mgr); + void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr); int __must_check drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr, @@ -867,11 +919,17 @@ struct drm_dp_mst_topology_state * drm_atomic_get_mst_topology_state(struct drm_atomic_state *state, struct drm_dp_mst_topology_mgr *mgr); struct drm_dp_mst_topology_state * +drm_atomic_get_old_mst_topology_state(struct drm_atomic_state *state, + struct drm_dp_mst_topology_mgr *mgr); +struct drm_dp_mst_topology_state * drm_atomic_get_new_mst_topology_state(struct drm_atomic_state *state, struct drm_dp_mst_topology_mgr *mgr); struct drm_dp_mst_atomic_payload * drm_atomic_get_mst_payload_state(struct drm_dp_mst_topology_state *state, struct drm_dp_mst_port *port); +bool drm_dp_mst_port_downstream_of_parent(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_port *port, + struct drm_dp_mst_port *parent); int __must_check drm_dp_atomic_find_time_slots(struct drm_atomic_state *state, struct drm_dp_mst_topology_mgr *mgr, @@ -893,6 +951,10 @@ int drm_dp_send_power_updown_phy(struct drm_dp_mst_topology_mgr *mgr, int drm_dp_send_query_stream_enc_status(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, struct drm_dp_query_stream_enc_status_ack_reply *status); +int __must_check drm_dp_mst_atomic_check_mgr(struct drm_atomic_state *state, + struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_topology_state *mst_state, + struct drm_dp_mst_port **failing_port); int __must_check drm_dp_mst_atomic_check(struct drm_atomic_state *state); int __must_check drm_dp_mst_root_conn_atomic_check(struct drm_connector_state *new_conn_state, struct drm_dp_mst_topology_mgr *mgr); @@ -900,6 +962,13 @@ int __must_check drm_dp_mst_root_conn_atomic_check(struct drm_connector_state *n void drm_dp_mst_get_port_malloc(struct drm_dp_mst_port *port); void drm_dp_mst_put_port_malloc(struct drm_dp_mst_port *port); +static inline +bool drm_dp_mst_port_is_logical(struct drm_dp_mst_port *port) +{ + return port->port_num >= DP_MST_LOGICAL_PORT_0; +} + +struct drm_dp_aux *drm_dp_mst_aux_for_parent(struct drm_dp_mst_port *port); struct drm_dp_aux *drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port *port); static inline struct drm_dp_mst_topology_state * diff --git a/include/drm/display/drm_dp_tunnel.h b/include/drm/display/drm_dp_tunnel.h new file mode 100644 index 000000000000..87212c847915 --- /dev/null +++ b/include/drm/display/drm_dp_tunnel.h @@ -0,0 +1,248 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2023 Intel Corporation + */ + +#ifndef __DRM_DP_TUNNEL_H__ +#define __DRM_DP_TUNNEL_H__ + +#include <linux/err.h> +#include <linux/errno.h> +#include <linux/types.h> + +struct drm_dp_aux; + +struct drm_device; + +struct drm_atomic_state; +struct drm_dp_tunnel_mgr; +struct drm_dp_tunnel_state; + +struct ref_tracker; + +struct drm_dp_tunnel_ref { + struct drm_dp_tunnel *tunnel; + struct ref_tracker *tracker; +}; + +#ifdef CONFIG_DRM_DISPLAY_DP_TUNNEL + +struct drm_dp_tunnel * +drm_dp_tunnel_get(struct drm_dp_tunnel *tunnel, struct ref_tracker **tracker); + +void +drm_dp_tunnel_put(struct drm_dp_tunnel *tunnel, struct ref_tracker **tracker); + +static inline void drm_dp_tunnel_ref_get(struct drm_dp_tunnel *tunnel, + struct drm_dp_tunnel_ref *tunnel_ref) +{ + tunnel_ref->tunnel = drm_dp_tunnel_get(tunnel, &tunnel_ref->tracker); +} + +static inline void drm_dp_tunnel_ref_put(struct drm_dp_tunnel_ref *tunnel_ref) +{ + drm_dp_tunnel_put(tunnel_ref->tunnel, &tunnel_ref->tracker); + tunnel_ref->tunnel = NULL; +} + +struct drm_dp_tunnel * +drm_dp_tunnel_detect(struct drm_dp_tunnel_mgr *mgr, + struct drm_dp_aux *aux); +int drm_dp_tunnel_destroy(struct drm_dp_tunnel *tunnel); + +int drm_dp_tunnel_enable_bw_alloc(struct drm_dp_tunnel *tunnel); +int drm_dp_tunnel_disable_bw_alloc(struct drm_dp_tunnel *tunnel); +bool drm_dp_tunnel_bw_alloc_is_enabled(const struct drm_dp_tunnel *tunnel); +int drm_dp_tunnel_alloc_bw(struct drm_dp_tunnel *tunnel, int bw); +int drm_dp_tunnel_get_allocated_bw(struct drm_dp_tunnel *tunnel); +int drm_dp_tunnel_update_state(struct drm_dp_tunnel *tunnel); + +void drm_dp_tunnel_set_io_error(struct drm_dp_tunnel *tunnel); + +int drm_dp_tunnel_handle_irq(struct drm_dp_tunnel_mgr *mgr, + struct drm_dp_aux *aux); + +int drm_dp_tunnel_max_dprx_rate(const struct drm_dp_tunnel *tunnel); +int drm_dp_tunnel_max_dprx_lane_count(const struct drm_dp_tunnel *tunnel); +int drm_dp_tunnel_available_bw(const struct drm_dp_tunnel *tunnel); + +const char *drm_dp_tunnel_name(const struct drm_dp_tunnel *tunnel); + +struct drm_dp_tunnel_state * +drm_dp_tunnel_atomic_get_state(struct drm_atomic_state *state, + struct drm_dp_tunnel *tunnel); + +struct drm_dp_tunnel_state * +drm_dp_tunnel_atomic_get_old_state(struct drm_atomic_state *state, + const struct drm_dp_tunnel *tunnel); + +struct drm_dp_tunnel_state * +drm_dp_tunnel_atomic_get_new_state(struct drm_atomic_state *state, + const struct drm_dp_tunnel *tunnel); + +int drm_dp_tunnel_atomic_set_stream_bw(struct drm_atomic_state *state, + struct drm_dp_tunnel *tunnel, + u8 stream_id, int bw); +int drm_dp_tunnel_atomic_get_group_streams_in_state(struct drm_atomic_state *state, + const struct drm_dp_tunnel *tunnel, + u32 *stream_mask); + +int drm_dp_tunnel_atomic_check_stream_bws(struct drm_atomic_state *state, + u32 *failed_stream_mask); + +int drm_dp_tunnel_atomic_get_required_bw(const struct drm_dp_tunnel_state *tunnel_state); + +struct drm_dp_tunnel_mgr * +drm_dp_tunnel_mgr_create(struct drm_device *dev, int max_group_count); +void drm_dp_tunnel_mgr_destroy(struct drm_dp_tunnel_mgr *mgr); + +#else + +static inline struct drm_dp_tunnel * +drm_dp_tunnel_get(struct drm_dp_tunnel *tunnel, struct ref_tracker **tracker) +{ + return NULL; +} + +static inline void +drm_dp_tunnel_put(struct drm_dp_tunnel *tunnel, struct ref_tracker **tracker) {} + +static inline void drm_dp_tunnel_ref_get(struct drm_dp_tunnel *tunnel, + struct drm_dp_tunnel_ref *tunnel_ref) {} + +static inline void drm_dp_tunnel_ref_put(struct drm_dp_tunnel_ref *tunnel_ref) {} + +static inline struct drm_dp_tunnel * +drm_dp_tunnel_detect(struct drm_dp_tunnel_mgr *mgr, + struct drm_dp_aux *aux) +{ + return ERR_PTR(-EOPNOTSUPP); +} + +static inline int +drm_dp_tunnel_destroy(struct drm_dp_tunnel *tunnel) +{ + return 0; +} + +static inline int drm_dp_tunnel_enable_bw_alloc(struct drm_dp_tunnel *tunnel) +{ + return -EOPNOTSUPP; +} + +static inline int drm_dp_tunnel_disable_bw_alloc(struct drm_dp_tunnel *tunnel) +{ + return -EOPNOTSUPP; +} + +static inline bool drm_dp_tunnel_bw_alloc_is_enabled(const struct drm_dp_tunnel *tunnel) +{ + return false; +} + +static inline int +drm_dp_tunnel_alloc_bw(struct drm_dp_tunnel *tunnel, int bw) +{ + return -EOPNOTSUPP; +} + +static inline int +drm_dp_tunnel_get_allocated_bw(struct drm_dp_tunnel *tunnel) +{ + return -1; +} + +static inline int +drm_dp_tunnel_update_state(struct drm_dp_tunnel *tunnel) +{ + return -EOPNOTSUPP; +} + +static inline void drm_dp_tunnel_set_io_error(struct drm_dp_tunnel *tunnel) {} + +static inline int +drm_dp_tunnel_handle_irq(struct drm_dp_tunnel_mgr *mgr, + struct drm_dp_aux *aux) +{ + return -EOPNOTSUPP; +} + +static inline int +drm_dp_tunnel_max_dprx_rate(const struct drm_dp_tunnel *tunnel) +{ + return 0; +} + +static inline int +drm_dp_tunnel_max_dprx_lane_count(const struct drm_dp_tunnel *tunnel) +{ + return 0; +} + +static inline int +drm_dp_tunnel_available_bw(const struct drm_dp_tunnel *tunnel) +{ + return -1; +} + +static inline const char * +drm_dp_tunnel_name(const struct drm_dp_tunnel *tunnel) +{ + return NULL; +} + +static inline struct drm_dp_tunnel_state * +drm_dp_tunnel_atomic_get_state(struct drm_atomic_state *state, + struct drm_dp_tunnel *tunnel) +{ + return ERR_PTR(-EOPNOTSUPP); +} + +static inline struct drm_dp_tunnel_state * +drm_dp_tunnel_atomic_get_new_state(struct drm_atomic_state *state, + const struct drm_dp_tunnel *tunnel) +{ + return ERR_PTR(-EOPNOTSUPP); +} + +static inline int +drm_dp_tunnel_atomic_set_stream_bw(struct drm_atomic_state *state, + struct drm_dp_tunnel *tunnel, + u8 stream_id, int bw) +{ + return -EOPNOTSUPP; +} + +static inline int +drm_dp_tunnel_atomic_get_group_streams_in_state(struct drm_atomic_state *state, + const struct drm_dp_tunnel *tunnel, + u32 *stream_mask) +{ + return -EOPNOTSUPP; +} + +static inline int +drm_dp_tunnel_atomic_check_stream_bws(struct drm_atomic_state *state, + u32 *failed_stream_mask) +{ + return -EOPNOTSUPP; +} + +static inline int +drm_dp_tunnel_atomic_get_required_bw(const struct drm_dp_tunnel_state *tunnel_state) +{ + return 0; +} + +static inline struct drm_dp_tunnel_mgr * +drm_dp_tunnel_mgr_create(struct drm_device *dev, int max_group_count) +{ + return ERR_PTR(-EOPNOTSUPP); +} + +static inline +void drm_dp_tunnel_mgr_destroy(struct drm_dp_tunnel_mgr *mgr) {} + +#endif /* CONFIG_DRM_DISPLAY_DP_TUNNEL */ + +#endif /* __DRM_DP_TUNNEL_H__ */ diff --git a/include/drm/display/drm_dsc.h b/include/drm/display/drm_dsc.h index bc90273d06a6..bbbe7438473d 100644 --- a/include/drm/display/drm_dsc.h +++ b/include/drm/display/drm_dsc.h @@ -40,9 +40,6 @@ #define DSC_PPS_RC_RANGE_MINQP_SHIFT 11 #define DSC_PPS_RC_RANGE_MAXQP_SHIFT 6 #define DSC_PPS_NATIVE_420_SHIFT 1 -#define DSC_1_2_MAX_LINEBUF_DEPTH_BITS 16 -#define DSC_1_2_MAX_LINEBUF_DEPTH_VAL 0 -#define DSC_1_1_MAX_LINEBUF_DEPTH_BITS 13 /** * struct drm_dsc_rc_range_parameters - DSC Rate Control range parameters diff --git a/include/drm/display/drm_dsc_helper.h b/include/drm/display/drm_dsc_helper.h index 8b41edbbabab..2c2b9033f60f 100644 --- a/include/drm/display/drm_dsc_helper.h +++ b/include/drm/display/drm_dsc_helper.h @@ -10,11 +10,27 @@ #include <drm/display/drm_dsc.h> +enum drm_dsc_params_type { + DRM_DSC_1_2_444, + DRM_DSC_1_1_PRE_SCR, /* legacy params from DSC 1.1 */ + DRM_DSC_1_2_422, + DRM_DSC_1_2_420, +}; + +struct drm_printer; + void drm_dsc_dp_pps_header_init(struct dp_sdp_header *pps_header); int drm_dsc_dp_rc_buffer_size(u8 rc_buffer_block_size, u8 rc_buffer_size); void drm_dsc_pps_payload_pack(struct drm_dsc_picture_parameter_set *pps_sdp, const struct drm_dsc_config *dsc_cfg); +void drm_dsc_set_const_params(struct drm_dsc_config *vdsc_cfg); +void drm_dsc_set_rc_buf_thresh(struct drm_dsc_config *vdsc_cfg); +int drm_dsc_setup_rc_params(struct drm_dsc_config *vdsc_cfg, enum drm_dsc_params_type type); int drm_dsc_compute_rc_parameters(struct drm_dsc_config *vdsc_cfg); +u8 drm_dsc_initial_scale_value(const struct drm_dsc_config *dsc); +u32 drm_dsc_flatness_det_thresh(const struct drm_dsc_config *dsc); +u32 drm_dsc_get_bpp_int(const struct drm_dsc_config *vdsc_cfg); +void drm_dsc_dump_config(struct drm_printer *p, int indent, const struct drm_dsc_config *cfg); #endif /* _DRM_DSC_HELPER_H_ */ diff --git a/include/drm/display/drm_hdmi_audio_helper.h b/include/drm/display/drm_hdmi_audio_helper.h new file mode 100644 index 000000000000..44d910bdc72d --- /dev/null +++ b/include/drm/display/drm_hdmi_audio_helper.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: MIT */ + +#ifndef DRM_DISPLAY_HDMI_AUDIO_HELPER_H_ +#define DRM_DISPLAY_HDMI_AUDIO_HELPER_H_ + +#include <linux/types.h> + +struct drm_connector; +struct drm_connector_hdmi_audio_funcs; + +struct device; + +int drm_connector_hdmi_audio_init(struct drm_connector *connector, + struct device *hdmi_codec_dev, + const struct drm_connector_hdmi_audio_funcs *funcs, + unsigned int max_i2s_playback_channels, + u64 i2s_formats, + bool spdif_playback, + int sound_dai_port); +void drm_connector_hdmi_audio_plugged_notify(struct drm_connector *connector, + bool plugged); + +#endif diff --git a/include/drm/display/drm_hdmi_cec_helper.h b/include/drm/display/drm_hdmi_cec_helper.h new file mode 100644 index 000000000000..fd8f4d2f02c1 --- /dev/null +++ b/include/drm/display/drm_hdmi_cec_helper.h @@ -0,0 +1,72 @@ +/* SPDX-License-Identifier: MIT */ + +#ifndef DRM_DISPLAY_HDMI_CEC_HELPER +#define DRM_DISPLAY_HDMI_CEC_HELPER + +#include <linux/types.h> + +struct drm_connector; + +struct cec_msg; +struct device; + +struct drm_connector_hdmi_cec_funcs { + /** + * @init: perform hardware-specific initialization before registering the CEC adapter + */ + int (*init)(struct drm_connector *connector); + + /** + * @uninit: perform hardware-specific teardown for the CEC adapter + */ + void (*uninit)(struct drm_connector *connector); + + /** + * @enable: enable or disable CEC adapter + */ + int (*enable)(struct drm_connector *connector, bool enable); + + /** + * @log_addr: set adapter's logical address, can be called multiple + * times if adapter supports several LAs + */ + int (*log_addr)(struct drm_connector *connector, u8 logical_addr); + + /** + * @transmit: start transmission of the specified CEC message + */ + int (*transmit)(struct drm_connector *connector, u8 attempts, + u32 signal_free_time, struct cec_msg *msg); +}; + +int drmm_connector_hdmi_cec_register(struct drm_connector *connector, + const struct drm_connector_hdmi_cec_funcs *funcs, + const char *name, + u8 available_las, + struct device *dev); + +void drm_connector_hdmi_cec_received_msg(struct drm_connector *connector, + struct cec_msg *msg); + +void drm_connector_hdmi_cec_transmit_done(struct drm_connector *connector, + u8 status, + u8 arb_lost_cnt, u8 nack_cnt, + u8 low_drive_cnt, u8 error_cnt); + +void drm_connector_hdmi_cec_transmit_attempt_done(struct drm_connector *connector, + u8 status); + +#if IS_ENABLED(CONFIG_DRM_DISPLAY_HDMI_CEC_NOTIFIER_HELPER) +int drmm_connector_hdmi_cec_notifier_register(struct drm_connector *connector, + const char *port_name, + struct device *dev); +#else +static inline int drmm_connector_hdmi_cec_notifier_register(struct drm_connector *connector, + const char *port_name, + struct device *dev) +{ + return 0; +} +#endif + +#endif diff --git a/include/drm/display/drm_hdmi_helper.h b/include/drm/display/drm_hdmi_helper.h index 76d234826e22..09145c9ee9fc 100644 --- a/include/drm/display/drm_hdmi_helper.h +++ b/include/drm/display/drm_hdmi_helper.h @@ -24,4 +24,14 @@ drm_hdmi_infoframe_set_hdr_metadata(struct hdmi_drm_infoframe *frame, void drm_hdmi_avi_infoframe_content_type(struct hdmi_avi_infoframe *frame, const struct drm_connector_state *conn_state); +unsigned long long +drm_hdmi_compute_mode_clock(const struct drm_display_mode *mode, + unsigned int bpc, enum hdmi_colorspace fmt); + +void +drm_hdmi_acr_get_n_cts(unsigned long long tmds_char_rate, + unsigned int sample_rate, + unsigned int *out_n, + unsigned int *out_cts); + #endif diff --git a/include/drm/display/drm_hdmi_state_helper.h b/include/drm/display/drm_hdmi_state_helper.h new file mode 100644 index 000000000000..2349c0d0f00f --- /dev/null +++ b/include/drm/display/drm_hdmi_state_helper.h @@ -0,0 +1,33 @@ +/* SPDX-License-Identifier: MIT */ + +#ifndef DRM_HDMI_STATE_HELPER_H_ +#define DRM_HDMI_STATE_HELPER_H_ + +struct drm_atomic_state; +struct drm_connector; +struct drm_connector_state; +struct drm_display_mode; +struct hdmi_audio_infoframe; + +enum drm_connector_status; + +void __drm_atomic_helper_connector_hdmi_reset(struct drm_connector *connector, + struct drm_connector_state *new_conn_state); + +int drm_atomic_helper_connector_hdmi_check(struct drm_connector *connector, + struct drm_atomic_state *state); + +int drm_atomic_helper_connector_hdmi_update_audio_infoframe(struct drm_connector *connector, + struct hdmi_audio_infoframe *frame); +int drm_atomic_helper_connector_hdmi_clear_audio_infoframe(struct drm_connector *connector); +int drm_atomic_helper_connector_hdmi_update_infoframes(struct drm_connector *connector, + struct drm_atomic_state *state); +void drm_atomic_helper_connector_hdmi_hotplug(struct drm_connector *connector, + enum drm_connector_status status); +void drm_atomic_helper_connector_hdmi_force(struct drm_connector *connector); + +enum drm_mode_status +drm_hdmi_connector_mode_valid(struct drm_connector *connector, + const struct drm_display_mode *mode); + +#endif // DRM_HDMI_STATE_HELPER_H_ diff --git a/include/drm/display/drm_scdc_helper.h b/include/drm/display/drm_scdc_helper.h index ded01fd948b4..34600476a1b9 100644 --- a/include/drm/display/drm_scdc_helper.h +++ b/include/drm/display/drm_scdc_helper.h @@ -28,6 +28,7 @@ #include <drm/display/drm_scdc.h> +struct drm_connector; struct i2c_adapter; ssize_t drm_scdc_read(struct i2c_adapter *adapter, u8 offset, void *buffer, @@ -71,9 +72,9 @@ static inline int drm_scdc_writeb(struct i2c_adapter *adapter, u8 offset, return drm_scdc_write(adapter, offset, &value, sizeof(value)); } -bool drm_scdc_get_scrambling_status(struct i2c_adapter *adapter); +bool drm_scdc_get_scrambling_status(struct drm_connector *connector); -bool drm_scdc_set_scrambling(struct i2c_adapter *adapter, bool enable); -bool drm_scdc_set_high_tmds_clock_ratio(struct i2c_adapter *adapter, bool set); +bool drm_scdc_set_scrambling(struct drm_connector *connector, bool enable); +bool drm_scdc_set_high_tmds_clock_ratio(struct drm_connector *connector, bool set); #endif diff --git a/include/drm/drm_accel.h b/include/drm/drm_accel.h index d4955062c77e..20a665ec6f16 100644 --- a/include/drm/drm_accel.h +++ b/include/drm/drm_accel.h @@ -28,7 +28,8 @@ .poll = drm_poll,\ .read = drm_read,\ .llseek = noop_llseek, \ - .mmap = drm_gem_mmap + .mmap = drm_gem_mmap, \ + .fop_flags = FOP_UNSIGNED_OFFSET /** * DEFINE_DRM_ACCEL_FOPS() - macro to generate file operations for accelerators drivers @@ -51,14 +52,13 @@ #if IS_ENABLED(CONFIG_DRM_ACCEL) +extern struct xarray accel_minors_xa; + void accel_core_exit(void); int accel_core_init(void); -void accel_minor_remove(int index); -int accel_minor_alloc(void); -void accel_minor_replace(struct drm_minor *minor, int index); void accel_set_device_instance_params(struct device *kdev, int index); int accel_open(struct inode *inode, struct file *filp); -void accel_debugfs_init(struct drm_minor *minor, int minor_id); +void accel_debugfs_register(struct drm_device *dev); #else @@ -72,24 +72,11 @@ static inline int __init accel_core_init(void) return 0; } -static inline void accel_minor_remove(int index) -{ -} - -static inline int accel_minor_alloc(void) -{ - return -EOPNOTSUPP; -} - -static inline void accel_minor_replace(struct drm_minor *minor, int index) -{ -} - static inline void accel_set_device_instance_params(struct device *kdev, int index) { } -static inline void accel_debugfs_init(struct drm_minor *minor, int minor_id) +static inline void accel_debugfs_register(struct drm_device *dev) { } diff --git a/include/drm/drm_aperture.h b/include/drm/drm_aperture.h deleted file mode 100644 index 7096703c3949..000000000000 --- a/include/drm/drm_aperture.h +++ /dev/null @@ -1,39 +0,0 @@ -/* SPDX-License-Identifier: MIT */ - -#ifndef _DRM_APERTURE_H_ -#define _DRM_APERTURE_H_ - -#include <linux/types.h> - -struct drm_device; -struct drm_driver; -struct pci_dev; - -int devm_aperture_acquire_from_firmware(struct drm_device *dev, resource_size_t base, - resource_size_t size); - -int drm_aperture_remove_conflicting_framebuffers(resource_size_t base, resource_size_t size, - bool primary, const struct drm_driver *req_driver); - -int drm_aperture_remove_conflicting_pci_framebuffers(struct pci_dev *pdev, - const struct drm_driver *req_driver); - -/** - * drm_aperture_remove_framebuffers - remove all existing framebuffers - * @primary: also kick vga16fb if present - * @req_driver: requesting DRM driver - * - * This function removes all graphics device drivers. Use this function on systems - * that can have their framebuffer located anywhere in memory. - * - * Returns: - * 0 on success, or a negative errno code otherwise - */ -static inline int -drm_aperture_remove_framebuffers(bool primary, const struct drm_driver *req_driver) -{ - return drm_aperture_remove_conflicting_framebuffers(0, (resource_size_t)-1, primary, - req_driver); -} - -#endif diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h index 92586ab55ef5..43783891d359 100644 --- a/include/drm/drm_atomic.h +++ b/include/drm/drm_atomic.h @@ -30,6 +30,7 @@ #include <drm/drm_crtc.h> #include <drm/drm_util.h> +#include <drm/drm_colorop.h> /** * struct drm_crtc_commit - track modeset commits on a CRTC @@ -157,14 +158,51 @@ struct drm_crtc_commit { bool abort_completion; }; +struct __drm_colorops_state { + struct drm_colorop *ptr; + struct drm_colorop_state *state, *old_state, *new_state; +}; + struct __drm_planes_state { struct drm_plane *ptr; - struct drm_plane_state *state, *old_state, *new_state; + + /** + * @state_to_destroy: + * + * Used to track the @drm_plane_state we will need to free when + * tearing down the associated &drm_atomic_state in + * $drm_mode_config_funcs.atomic_state_clear or + * drm_atomic_state_default_clear(). + * + * Before a commit, and the call to + * drm_atomic_helper_swap_state() in particular, it points to + * the same state than @new_state. After a commit, it points to + * the same state than @old_state. + */ + struct drm_plane_state *state_to_destroy; + + struct drm_plane_state *old_state, *new_state; }; struct __drm_crtcs_state { struct drm_crtc *ptr; - struct drm_crtc_state *state, *old_state, *new_state; + + /** + * @state_to_destroy: + * + * Used to track the @drm_crtc_state we will need to free when + * tearing down the associated &drm_atomic_state in + * $drm_mode_config_funcs.atomic_state_clear or + * drm_atomic_state_default_clear(). + * + * Before a commit, and the call to + * drm_atomic_helper_swap_state() in particular, it points to + * the same state than @new_state. After a commit, it points to + * the same state than @old_state. + */ + struct drm_crtc_state *state_to_destroy; + + struct drm_crtc_state *old_state, *new_state; /** * @commit: @@ -182,7 +220,24 @@ struct __drm_crtcs_state { struct __drm_connnectors_state { struct drm_connector *ptr; - struct drm_connector_state *state, *old_state, *new_state; + + /** + * @state_to_destroy: + * + * Used to track the @drm_connector_state we will need to free + * when tearing down the associated &drm_atomic_state in + * $drm_mode_config_funcs.atomic_state_clear or + * drm_atomic_state_default_clear(). + * + * Before a commit, and the call to + * drm_atomic_helper_swap_state() in particular, it points to + * the same state than @new_state. After a commit, it points to + * the same state than @old_state. + */ + struct drm_connector_state *state_to_destroy; + + struct drm_connector_state *old_state, *new_state; + /** * @out_fence_ptr: * @@ -342,29 +397,80 @@ struct drm_private_state { struct __drm_private_objs_state { struct drm_private_obj *ptr; - struct drm_private_state *state, *old_state, *new_state; + + /** + * @state_to_destroy: + * + * Used to track the @drm_private_state we will need to free + * when tearing down the associated &drm_atomic_state in + * $drm_mode_config_funcs.atomic_state_clear or + * drm_atomic_state_default_clear(). + * + * Before a commit, and the call to + * drm_atomic_helper_swap_state() in particular, it points to + * the same state than @new_state. After a commit, it points to + * the same state than @old_state. + */ + struct drm_private_state *state_to_destroy; + + struct drm_private_state *old_state, *new_state; }; /** - * struct drm_atomic_state - the global state object for atomic updates - * @ref: count of all references to this state (will not be freed until zero) - * @dev: parent DRM device - * @async_update: hint for asynchronous plane update - * @planes: pointer to array of structures with per-plane data - * @crtcs: pointer to array of CRTC pointers - * @num_connector: size of the @connectors and @connector_states arrays - * @connectors: pointer to array of structures with per-connector data - * @num_private_objs: size of the @private_objs array - * @private_objs: pointer to array of private object pointers - * @acquire_ctx: acquire context for this atomic modeset state update + * struct drm_atomic_state - Atomic commit structure + * + * This structure is the kernel counterpart of @drm_mode_atomic and represents + * an atomic commit that transitions from an old to a new display state. It + * contains all the objects affected by the atomic commit and both the new + * state structures and pointers to the old state structures for + * these. * * States are added to an atomic update by calling drm_atomic_get_crtc_state(), * drm_atomic_get_plane_state(), drm_atomic_get_connector_state(), or for * private state structures, drm_atomic_get_private_obj_state(). + * + * NOTE: struct drm_atomic_state first started as a single collection of + * entities state pointers (drm_plane_state, drm_crtc_state, etc.). + * + * At atomic_check time, you could get the state about to be committed + * from drm_atomic_state, and the one currently running from the + * entities state pointer (drm_crtc.state, for example). After the call + * to drm_atomic_helper_swap_state(), the entities state pointer would + * contain the state previously checked, and the drm_atomic_state + * structure the old state. + * + * Over time, and in order to avoid confusion, drm_atomic_state has + * grown to have both the old state (ie, the state we replace) and the + * new state (ie, the state we want to apply). Those names are stable + * during the commit process, which makes it easier to reason about. + * + * You can still find some traces of that evolution through some hooks + * or callbacks taking a drm_atomic_state parameter called names like + * "old_state". This doesn't necessarily mean that the previous + * drm_atomic_state is passed, but rather that this used to be the state + * collection we were replacing after drm_atomic_helper_swap_state(), + * but the variable name was never updated. + * + * Some atomic operations implementations followed a similar process. We + * first started to pass the entity state only. However, it was pretty + * cumbersome for drivers, and especially CRTCs, to retrieve the states + * of other components. Thus, we switched to passing the whole + * drm_atomic_state as a parameter to those operations. Similarly, the + * transition isn't complete yet, and one might still find atomic + * operations taking a drm_atomic_state pointer, or a component state + * pointer. The former is the preferred form. */ struct drm_atomic_state { + /** + * @ref: + * + * Count of all references to this update (will not be freed until zero). + */ struct kref ref; + /** + * @dev: Parent DRM Device. + */ struct drm_device *dev; /** @@ -372,8 +478,27 @@ struct drm_atomic_state { * * Allow full modeset. This is used by the ATOMIC IOCTL handler to * implement the DRM_MODE_ATOMIC_ALLOW_MODESET flag. Drivers should - * never consult this flag, instead looking at the output of - * drm_atomic_crtc_needs_modeset(). + * generally not consult this flag, but instead look at the output of + * drm_atomic_crtc_needs_modeset(). The detailed rules are: + * + * - Drivers must not consult @allow_modeset in the atomic commit path. + * Use drm_atomic_crtc_needs_modeset() instead. + * + * - Drivers must consult @allow_modeset before adding unrelated struct + * drm_crtc_state to this commit by calling + * drm_atomic_get_crtc_state(). See also the warning in the + * documentation for that function. + * + * - Drivers must never change this flag, it is under the exclusive + * control of userspace. + * + * - Drivers may consult @allow_modeset in the atomic check path, if + * they have the choice between an optimal hardware configuration + * which requires a modeset, and a less optimal configuration which + * can be committed without a modeset. An example would be suboptimal + * scanout FIFO allocation resulting in increased idle power + * consumption. This allows userspace to avoid flickering and delays + * for the normal composition loop at reasonable cost. */ bool allow_modeset : 1; /** @@ -388,7 +513,12 @@ struct drm_atomic_state { * flag are not allowed. */ bool legacy_cursor_update : 1; + + /** + * @async_update: hint for asynchronous plane update + */ bool async_update : 1; + /** * @duplicated: * @@ -398,13 +528,86 @@ struct drm_atomic_state { * states. */ bool duplicated : 1; + + /** + * @checked: + * + * Indicates the state has been checked and thus must no longer + * be mutated. For internal use only, do not consult from drivers. + */ + bool checked : 1; + + /** + * @plane_color_pipeline: + * + * Indicates whether this atomic state originated with a client that + * set the DRM_CLIENT_CAP_PLANE_COLOR_PIPELINE. + * + * Drivers and helper functions should use this to ignore legacy + * properties that are incompatible with the drm_plane COLOR_PIPELINE + * behavior, such as: + * + * - COLOR_RANGE + * - COLOR_ENCODING + * + * or any other driver-specific properties that might affect pixel + * values. + */ + bool plane_color_pipeline : 1; + + /** + * @colorops: + * + * Pointer to array of @drm_colorop and @drm_colorop_state part of this + * update. + */ + struct __drm_colorops_state *colorops; + + /** + * @planes: + * + * Pointer to array of @drm_plane and @drm_plane_state part of this + * update. + */ struct __drm_planes_state *planes; + + /** + * @crtcs: + * + * Pointer to array of @drm_crtc and @drm_crtc_state part of this + * update. + */ struct __drm_crtcs_state *crtcs; + + /** + * @num_connector: size of the @connectors array + */ int num_connector; + + /** + * @connectors: + * + * Pointer to array of @drm_connector and @drm_connector_state part of + * this update. + */ struct __drm_connnectors_state *connectors; + + /** + * @num_private_objs: size of the @private_objs array + */ int num_private_objs; + + /** + * @private_objs: + * + * Pointer to array of @drm_private_obj and @drm_private_obj_state part + * of this update. + */ struct __drm_private_objs_state *private_objs; + /** + * @acquire_ctx: acquire context for this atomic modeset state update + */ struct drm_modeset_acquire_ctx *acquire_ctx; /** @@ -412,7 +615,7 @@ struct drm_atomic_state { * * Used for signaling unbound planes/connectors. * When a connector or plane is not bound to any CRTC, it's still important - * to preserve linearity to prevent the atomic states from being freed to early. + * to preserve linearity to prevent the atomic states from being freed too early. * * This commit (if set) is not bound to any CRTC, but will be completed when * drm_atomic_helper_commit_hw_done() is called. @@ -501,6 +704,9 @@ drm_atomic_get_crtc_state(struct drm_atomic_state *state, struct drm_plane_state * __must_check drm_atomic_get_plane_state(struct drm_atomic_state *state, struct drm_plane *plane); +struct drm_colorop_state * +drm_atomic_get_colorop_state(struct drm_atomic_state *state, + struct drm_colorop *colorop); struct drm_connector_state * __must_check drm_atomic_get_connector_state(struct drm_atomic_state *state, struct drm_connector *connector); @@ -527,24 +733,16 @@ drm_atomic_get_old_connector_for_encoder(const struct drm_atomic_state *state, struct drm_connector * drm_atomic_get_new_connector_for_encoder(const struct drm_atomic_state *state, struct drm_encoder *encoder); +struct drm_connector * +drm_atomic_get_connector_for_encoder(const struct drm_encoder *encoder, + struct drm_modeset_acquire_ctx *ctx); -/** - * drm_atomic_get_existing_crtc_state - get CRTC state, if it exists - * @state: global atomic state object - * @crtc: CRTC to grab - * - * This function returns the CRTC state for the given CRTC, or NULL - * if the CRTC is not part of the global atomic state. - * - * This function is deprecated, @drm_atomic_get_old_crtc_state or - * @drm_atomic_get_new_crtc_state should be used instead. - */ -static inline struct drm_crtc_state * -drm_atomic_get_existing_crtc_state(const struct drm_atomic_state *state, - struct drm_crtc *crtc) -{ - return state->crtcs[drm_crtc_index(crtc)].state; -} +struct drm_crtc * +drm_atomic_get_old_crtc_for_encoder(struct drm_atomic_state *state, + struct drm_encoder *encoder); +struct drm_crtc * +drm_atomic_get_new_crtc_for_encoder(struct drm_atomic_state *state, + struct drm_encoder *encoder); /** * drm_atomic_get_old_crtc_state - get old CRTC state, if it exists @@ -576,24 +774,6 @@ drm_atomic_get_new_crtc_state(const struct drm_atomic_state *state, } /** - * drm_atomic_get_existing_plane_state - get plane state, if it exists - * @state: global atomic state object - * @plane: plane to grab - * - * This function returns the plane state for the given plane, or NULL - * if the plane is not part of the global atomic state. - * - * This function is deprecated, @drm_atomic_get_old_plane_state or - * @drm_atomic_get_new_plane_state should be used instead. - */ -static inline struct drm_plane_state * -drm_atomic_get_existing_plane_state(const struct drm_atomic_state *state, - struct drm_plane *plane) -{ - return state->planes[drm_plane_index(plane)].state; -} - -/** * drm_atomic_get_old_plane_state - get plane state, if it exists * @state: global atomic state object * @plane: plane to grab @@ -624,26 +804,33 @@ drm_atomic_get_new_plane_state(const struct drm_atomic_state *state, } /** - * drm_atomic_get_existing_connector_state - get connector state, if it exists + * drm_atomic_get_old_colorop_state - get colorop state, if it exists * @state: global atomic state object - * @connector: connector to grab + * @colorop: colorop to grab * - * This function returns the connector state for the given connector, - * or NULL if the connector is not part of the global atomic state. - * - * This function is deprecated, @drm_atomic_get_old_connector_state or - * @drm_atomic_get_new_connector_state should be used instead. + * This function returns the old colorop state for the given colorop, or + * NULL if the colorop is not part of the global atomic state. */ -static inline struct drm_connector_state * -drm_atomic_get_existing_connector_state(const struct drm_atomic_state *state, - struct drm_connector *connector) +static inline struct drm_colorop_state * +drm_atomic_get_old_colorop_state(struct drm_atomic_state *state, + struct drm_colorop *colorop) { - int index = drm_connector_index(connector); - - if (index >= state->num_connector) - return NULL; + return state->colorops[drm_colorop_index(colorop)].old_state; +} - return state->connectors[index].state; +/** + * drm_atomic_get_new_colorop_state - get colorop state, if it exists + * @state: global atomic state object + * @colorop: colorop to grab + * + * This function returns the new colorop state for the given colorop, or + * NULL if the colorop is not part of the global atomic state. + */ +static inline struct drm_colorop_state * +drm_atomic_get_new_colorop_state(struct drm_atomic_state *state, + struct drm_colorop *colorop) +{ + return state->colorops[drm_colorop_index(colorop)].new_state; } /** @@ -691,11 +878,11 @@ drm_atomic_get_new_connector_state(const struct drm_atomic_state *state, * @state: global atomic state object * @plane: plane to grab * - * This function returns the plane state for the given plane, either from - * @state, or if the plane isn't part of the atomic state update, from @plane. - * This is useful in atomic check callbacks, when drivers need to peek at, but - * not change, state of other planes, since it avoids threading an error code - * back up the call chain. + * This function returns the plane state for the given plane, either the + * new plane state from @state, or if the plane isn't part of the atomic + * state update, from @plane. This is useful in atomic check callbacks, + * when drivers need to peek at, but not change, state of other planes, + * since it avoids threading an error code back up the call chain. * * WARNING: * @@ -716,9 +903,15 @@ static inline const struct drm_plane_state * __drm_atomic_get_current_plane_state(const struct drm_atomic_state *state, struct drm_plane *plane) { - if (state->planes[drm_plane_index(plane)].state) - return state->planes[drm_plane_index(plane)].state; + struct drm_plane_state *plane_state; + plane_state = drm_atomic_get_new_plane_state(state, plane); + if (plane_state) + return plane_state; + + /* + * If the plane isn't part of the state, fallback to the currently active one. + */ return plane->state; } @@ -731,6 +924,9 @@ drm_atomic_add_affected_connectors(struct drm_atomic_state *state, int __must_check drm_atomic_add_affected_planes(struct drm_atomic_state *state, struct drm_crtc *crtc); +int __must_check +drm_atomic_add_affected_colorops(struct drm_atomic_state *state, + struct drm_plane *plane); int __must_check drm_atomic_check_only(struct drm_atomic_state *state); int __must_check drm_atomic_commit(struct drm_atomic_state *state); @@ -871,6 +1067,49 @@ void drm_state_dump(struct drm_device *dev, struct drm_printer *p); (void)(new_crtc_state) /* Only to avoid unused-but-set-variable warning */, 1)) /** + * for_each_oldnew_colorop_in_state - iterate over all colorops in an atomic update + * @__state: &struct drm_atomic_state pointer + * @colorop: &struct drm_colorop iteration cursor + * @old_colorop_state: &struct drm_colorop_state iteration cursor for the old state + * @new_colorop_state: &struct drm_colorop_state iteration cursor for the new state + * @__i: int iteration cursor, for macro-internal use + * + * This iterates over all colorops in an atomic update, tracking both old and + * new state. This is useful in places where the state delta needs to be + * considered, for example in atomic check functions. + */ +#define for_each_oldnew_colorop_in_state(__state, colorop, old_colorop_state, \ + new_colorop_state, __i) \ + for ((__i) = 0; \ + (__i) < (__state)->dev->mode_config.num_colorop; \ + (__i)++) \ + for_each_if ((__state)->colorops[__i].ptr && \ + ((colorop) = (__state)->colorops[__i].ptr, \ + (void)(colorop) /* Only to avoid unused-but-set-variable warning */, \ + (old_colorop_state) = (__state)->colorops[__i].old_state,\ + (new_colorop_state) = (__state)->colorops[__i].new_state, 1)) + +/** + * for_each_new_colorop_in_state - iterate over all colorops in an atomic update + * @__state: &struct drm_atomic_state pointer + * @colorop: &struct drm_colorop iteration cursor + * @new_colorop_state: &struct drm_colorop_state iteration cursor for the new state + * @__i: int iteration cursor, for macro-internal use + * + * This iterates over all colorops in an atomic update, tracking new state. This is + * useful in places where the state delta needs to be considered, for example in + * atomic check functions. + */ +#define for_each_new_colorop_in_state(__state, colorop, new_colorop_state, __i) \ + for ((__i) = 0; \ + (__i) < (__state)->dev->mode_config.num_colorop; \ + (__i)++) \ + for_each_if ((__state)->colorops[__i].ptr && \ + ((colorop) = (__state)->colorops[__i].ptr, \ + (void)(colorop) /* Only to avoid unused-but-set-variable warning */, \ + (new_colorop_state) = (__state)->colorops[__i].new_state, 1)) + +/** * for_each_oldnew_plane_in_state - iterate over all planes in an atomic update * @__state: &struct drm_atomic_state pointer * @plane: &struct drm_plane iteration cursor @@ -1119,7 +1358,7 @@ struct drm_bridge_state { struct drm_bus_cfg input_bus_cfg; /** - * @output_bus_cfg: input bus configuration + * @output_bus_cfg: output bus configuration */ struct drm_bus_cfg output_bus_cfg; }; diff --git a/include/drm/drm_atomic_helper.h b/include/drm/drm_atomic_helper.h index 33f982cd1a27..53382fe93537 100644 --- a/include/drm/drm_atomic_helper.h +++ b/include/drm/drm_atomic_helper.h @@ -49,9 +49,8 @@ struct drm_private_state; int drm_atomic_helper_check_modeset(struct drm_device *dev, struct drm_atomic_state *state); -int -drm_atomic_helper_check_wb_encoder_state(struct drm_encoder *encoder, - struct drm_connector_state *conn_state); +int drm_atomic_helper_check_wb_connector_state(struct drm_connector *connector, + struct drm_atomic_state *state); int drm_atomic_helper_check_plane_state(struct drm_plane_state *plane_state, const struct drm_crtc_state *crtc_state, int min_scale, @@ -97,6 +96,8 @@ void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev, int drm_atomic_helper_prepare_planes(struct drm_device *dev, struct drm_atomic_state *state); +void drm_atomic_helper_unprepare_planes(struct drm_device *dev, + struct drm_atomic_state *state); #define DRM_PLANE_COMMIT_ACTIVE_ONLY BIT(0) #define DRM_PLANE_COMMIT_NO_DISABLE_AFTER_MODESET BIT(1) @@ -138,6 +139,8 @@ int drm_atomic_helper_set_config(struct drm_mode_set *set, int drm_atomic_helper_disable_all(struct drm_device *dev, struct drm_modeset_acquire_ctx *ctx); +int drm_atomic_helper_reset_crtc(struct drm_crtc *crtc, + struct drm_modeset_acquire_ctx *ctx); void drm_atomic_helper_shutdown(struct drm_device *dev); struct drm_atomic_state * drm_atomic_helper_duplicate_state(struct drm_device *dev, @@ -210,6 +213,32 @@ int drm_atomic_helper_page_flip_target( plane))) /** + * drm_atomic_plane_enabling - check whether a plane is being enabled + * @old_plane_state: old atomic plane state + * @new_plane_state: new atomic plane state + * + * Checks the atomic state of a plane to determine whether it's being enabled + * or not. This also WARNs if it detects an invalid state (both CRTC and FB + * need to either both be NULL or both be non-NULL). + * + * RETURNS: + * True if the plane is being enabled, false otherwise. + */ +static inline bool drm_atomic_plane_enabling(struct drm_plane_state *old_plane_state, + struct drm_plane_state *new_plane_state) +{ + /* + * When enabling a plane, CRTC and FB should always be set together. + * Anything else should be considered a bug in the atomic core, so we + * gently warn about it. + */ + WARN_ON((!new_plane_state->crtc && new_plane_state->fb) || + (new_plane_state->crtc && !new_plane_state->fb)); + + return !old_plane_state->crtc && new_plane_state->crtc; +} + +/** * drm_atomic_plane_disabling - check whether a plane is being disabled * @old_plane_state: old atomic plane state * @new_plane_state: new atomic plane state diff --git a/include/drm/drm_atomic_uapi.h b/include/drm/drm_atomic_uapi.h index 4c6d39d7bdb2..436315523326 100644 --- a/include/drm/drm_atomic_uapi.h +++ b/include/drm/drm_atomic_uapi.h @@ -37,6 +37,7 @@ struct drm_crtc; struct drm_connector_state; struct dma_fence; struct drm_framebuffer; +struct drm_colorop; int __must_check drm_atomic_set_mode_for_crtc(struct drm_crtc_state *state, @@ -49,6 +50,8 @@ drm_atomic_set_crtc_for_plane(struct drm_plane_state *plane_state, struct drm_crtc *crtc); void drm_atomic_set_fb_for_plane(struct drm_plane_state *plane_state, struct drm_framebuffer *fb); +void drm_atomic_set_colorop_for_plane(struct drm_plane_state *plane_state, + struct drm_colorop *colorop); int __must_check drm_atomic_set_crtc_for_connector(struct drm_connector_state *conn_state, struct drm_crtc *crtc); diff --git a/include/drm/drm_auth.h b/include/drm/drm_auth.h index ba248ca8866f..50131383ed81 100644 --- a/include/drm/drm_auth.h +++ b/include/drm/drm_auth.h @@ -33,24 +33,6 @@ #include <linux/wait.h> struct drm_file; -struct drm_hw_lock; - -/* - * Legacy DRI1 locking data structure. Only here instead of in drm_legacy.h for - * include ordering reasons. - * - * DO NOT USE. - */ -struct drm_lock_data { - struct drm_hw_lock *hw_lock; - struct drm_file *file_priv; - wait_queue_head_t lock_queue; - unsigned long lock_time; - spinlock_t spinlock; - uint32_t kernel_waiters; - uint32_t user_waiters; - int idle_has_lock; -}; /** * struct drm_master - drm master structure @@ -145,10 +127,6 @@ struct drm_master { * Protected by &drm_device.mode_config's &drm_mode_config.idr_mutex. */ struct idr lessee_idr; - /* private: */ -#if IS_ENABLED(CONFIG_DRM_LEGACY) - struct drm_lock_data lock; -#endif }; struct drm_master *drm_master_get(struct drm_master *master); diff --git a/include/drm/drm_bridge.h b/include/drm/drm_bridge.h index 42f86327b40a..0ff7ab4aa868 100644 --- a/include/drm/drm_bridge.h +++ b/include/drm/drm_bridge.h @@ -23,6 +23,7 @@ #ifndef __DRM_BRIDGE_H__ #define __DRM_BRIDGE_H__ +#include <linux/cleanup.h> #include <linux/ctype.h> #include <linux/list.h> #include <linux/mutex.h> @@ -32,12 +33,18 @@ #include <drm/drm_mode_object.h> #include <drm/drm_modes.h> +struct cec_msg; +struct device_node; + struct drm_bridge; struct drm_bridge_timings; struct drm_connector; struct drm_display_info; +struct drm_minor; struct drm_panel; struct edid; +struct hdmi_codec_daifmt; +struct hdmi_codec_params; struct i2c_adapter; /** @@ -68,10 +75,20 @@ struct drm_bridge_funcs { * * Zero on success, error code on failure. */ - int (*attach)(struct drm_bridge *bridge, + int (*attach)(struct drm_bridge *bridge, struct drm_encoder *encoder, enum drm_bridge_attach_flags flags); /** + * @destroy: + * + * This callback is invoked when the bridge is about to be + * deallocated. + * + * The @destroy callback is optional. + */ + void (*destroy)(struct drm_bridge *bridge); + + /** * @detach: * * This callback is invoked whenever our bridge is being detached from a @@ -104,7 +121,7 @@ struct drm_bridge_funcs { * Since this function is both called from the check phase of an atomic * commit, and the mode validation in the probe paths it is not allowed * to look at anything else but the passed-in mode, and validate it - * against configuration-invariant hardward constraints. Any further + * against configuration-invariant hardware constraints. Any further * limits which depend upon the configuration can only be checked in * @mode_fixup. * @@ -159,17 +176,33 @@ struct drm_bridge_funcs { /** * @disable: * - * This callback should disable the bridge. It is called right before - * the preceding element in the display pipe is disabled. If the - * preceding element is a bridge this means it's called before that - * bridge's @disable vfunc. If the preceding element is a &drm_encoder - * it's called right before the &drm_encoder_helper_funcs.disable, - * &drm_encoder_helper_funcs.prepare or &drm_encoder_helper_funcs.dpms - * hook. + * The @disable callback should disable the bridge. * * The bridge can assume that the display pipe (i.e. clocks and timing * signals) feeding it is still running when this callback is called. * + * + * If the preceding element is a &drm_bridge, then this is called before + * that bridge is disabled via one of: + * + * - &drm_bridge_funcs.disable + * - &drm_bridge_funcs.atomic_disable + * + * If the preceding element of the bridge is a display controller, then + * this callback is called before the encoder is disabled via one of: + * + * - &drm_encoder_helper_funcs.atomic_disable + * - &drm_encoder_helper_funcs.prepare + * - &drm_encoder_helper_funcs.disable + * - &drm_encoder_helper_funcs.dpms + * + * and the CRTC is disabled via one of: + * + * - &drm_crtc_helper_funcs.prepare + * - &drm_crtc_helper_funcs.atomic_disable + * - &drm_crtc_helper_funcs.disable + * - &drm_crtc_helper_funcs.dpms. + * * The @disable callback is optional. * * NOTE: @@ -182,17 +215,34 @@ struct drm_bridge_funcs { /** * @post_disable: * - * This callback should disable the bridge. It is called right after the - * preceding element in the display pipe is disabled. If the preceding - * element is a bridge this means it's called after that bridge's - * @post_disable function. If the preceding element is a &drm_encoder - * it's called right after the encoder's - * &drm_encoder_helper_funcs.disable, &drm_encoder_helper_funcs.prepare - * or &drm_encoder_helper_funcs.dpms hook. - * * The bridge must assume that the display pipe (i.e. clocks and timing - * singals) feeding it is no longer running when this callback is - * called. + * signals) feeding this bridge is no longer running when the + * @post_disable is called. + * + * This callback should perform all the actions required by the hardware + * after it has stopped receiving signals from the preceding element. + * + * If the preceding element is a &drm_bridge, then this is called after + * that bridge is post-disabled (unless marked otherwise by the + * @pre_enable_prev_first flag) via one of: + * + * - &drm_bridge_funcs.post_disable + * - &drm_bridge_funcs.atomic_post_disable + * + * If the preceding element of the bridge is a display controller, then + * this callback is called after the encoder is disabled via one of: + * + * - &drm_encoder_helper_funcs.atomic_disable + * - &drm_encoder_helper_funcs.prepare + * - &drm_encoder_helper_funcs.disable + * - &drm_encoder_helper_funcs.dpms + * + * and the CRTC is disabled via one of: + * + * - &drm_crtc_helper_funcs.prepare + * - &drm_crtc_helper_funcs.atomic_disable + * - &drm_crtc_helper_funcs.disable + * - &drm_crtc_helper_funcs.dpms * * The @post_disable callback is optional. * @@ -235,18 +285,30 @@ struct drm_bridge_funcs { /** * @pre_enable: * - * This callback should enable the bridge. It is called right before - * the preceding element in the display pipe is enabled. If the - * preceding element is a bridge this means it's called before that - * bridge's @pre_enable function. If the preceding element is a - * &drm_encoder it's called right before the encoder's - * &drm_encoder_helper_funcs.enable, &drm_encoder_helper_funcs.commit or - * &drm_encoder_helper_funcs.dpms hook. - * * The display pipe (i.e. clocks and timing signals) feeding this bridge - * will not yet be running when this callback is called. The bridge must - * not enable the display link feeding the next bridge in the chain (if - * there is one) when this callback is called. + * will not yet be running when the @pre_enable is called. + * + * This callback should perform all the necessary actions to prepare the + * bridge to accept signals from the preceding element. + * + * If the preceding element is a &drm_bridge, then this is called before + * that bridge is pre-enabled (unless marked otherwise by + * @pre_enable_prev_first flag) via one of: + * + * - &drm_bridge_funcs.pre_enable + * - &drm_bridge_funcs.atomic_pre_enable + * + * If the preceding element of the bridge is a display controller, then + * this callback is called before the CRTC is enabled via one of: + * + * - &drm_crtc_helper_funcs.atomic_enable + * - &drm_crtc_helper_funcs.commit + * + * and the encoder is enabled via one of: + * + * - &drm_encoder_helper_funcs.atomic_enable + * - &drm_encoder_helper_funcs.enable + * - &drm_encoder_helper_funcs.commit * * The @pre_enable callback is optional. * @@ -260,19 +322,31 @@ struct drm_bridge_funcs { /** * @enable: * - * This callback should enable the bridge. It is called right after - * the preceding element in the display pipe is enabled. If the - * preceding element is a bridge this means it's called after that - * bridge's @enable function. If the preceding element is a - * &drm_encoder it's called right after the encoder's - * &drm_encoder_helper_funcs.enable, &drm_encoder_helper_funcs.commit or - * &drm_encoder_helper_funcs.dpms hook. + * The @enable callback should enable the bridge. * * The bridge can assume that the display pipe (i.e. clocks and timing * signals) feeding it is running when this callback is called. This * callback must enable the display link feeding the next bridge in the * chain if there is one. * + * If the preceding element is a &drm_bridge, then this is called after + * that bridge is enabled via one of: + * + * - &drm_bridge_funcs.enable + * - &drm_bridge_funcs.atomic_enable + * + * If the preceding element of the bridge is a display controller, then + * this callback is called after the CRTC is enabled via one of: + * + * - &drm_crtc_helper_funcs.atomic_enable + * - &drm_crtc_helper_funcs.commit + * + * and the encoder is enabled via one of: + * + * - &drm_encoder_helper_funcs.atomic_enable + * - &drm_encoder_helper_funcs.enable + * - drm_encoder_helper_funcs.commit + * * The @enable callback is optional. * * NOTE: @@ -285,78 +359,138 @@ struct drm_bridge_funcs { /** * @atomic_pre_enable: * - * This callback should enable the bridge. It is called right before - * the preceding element in the display pipe is enabled. If the - * preceding element is a bridge this means it's called before that - * bridge's @atomic_pre_enable or @pre_enable function. If the preceding - * element is a &drm_encoder it's called right before the encoder's - * &drm_encoder_helper_funcs.atomic_enable hook. - * * The display pipe (i.e. clocks and timing signals) feeding this bridge - * will not yet be running when this callback is called. The bridge must - * not enable the display link feeding the next bridge in the chain (if - * there is one) when this callback is called. + * will not yet be running when the @atomic_pre_enable is called. + * + * This callback should perform all the necessary actions to prepare the + * bridge to accept signals from the preceding element. + * + * If the preceding element is a &drm_bridge, then this is called before + * that bridge is pre-enabled (unless marked otherwise by + * @pre_enable_prev_first flag) via one of: + * + * - &drm_bridge_funcs.pre_enable + * - &drm_bridge_funcs.atomic_pre_enable + * + * If the preceding element of the bridge is a display controller, then + * this callback is called before the CRTC is enabled via one of: + * + * - &drm_crtc_helper_funcs.atomic_enable + * - &drm_crtc_helper_funcs.commit + * + * and the encoder is enabled via one of: + * + * - &drm_encoder_helper_funcs.atomic_enable + * - &drm_encoder_helper_funcs.enable + * - &drm_encoder_helper_funcs.commit * * The @atomic_pre_enable callback is optional. */ void (*atomic_pre_enable)(struct drm_bridge *bridge, - struct drm_bridge_state *old_bridge_state); + struct drm_atomic_state *state); /** * @atomic_enable: * - * This callback should enable the bridge. It is called right after - * the preceding element in the display pipe is enabled. If the - * preceding element is a bridge this means it's called after that - * bridge's @atomic_enable or @enable function. If the preceding element - * is a &drm_encoder it's called right after the encoder's - * &drm_encoder_helper_funcs.atomic_enable hook. + * The @atomic_enable callback should enable the bridge. * * The bridge can assume that the display pipe (i.e. clocks and timing * signals) feeding it is running when this callback is called. This * callback must enable the display link feeding the next bridge in the * chain if there is one. * + * If the preceding element is a &drm_bridge, then this is called after + * that bridge is enabled via one of: + * + * - &drm_bridge_funcs.enable + * - &drm_bridge_funcs.atomic_enable + * + * If the preceding element of the bridge is a display controller, then + * this callback is called after the CRTC is enabled via one of: + * + * - &drm_crtc_helper_funcs.atomic_enable + * - &drm_crtc_helper_funcs.commit + * + * and the encoder is enabled via one of: + * + * - &drm_encoder_helper_funcs.atomic_enable + * - &drm_encoder_helper_funcs.enable + * - drm_encoder_helper_funcs.commit + * * The @atomic_enable callback is optional. */ void (*atomic_enable)(struct drm_bridge *bridge, - struct drm_bridge_state *old_bridge_state); + struct drm_atomic_state *state); /** * @atomic_disable: * - * This callback should disable the bridge. It is called right before - * the preceding element in the display pipe is disabled. If the - * preceding element is a bridge this means it's called before that - * bridge's @atomic_disable or @disable vfunc. If the preceding element - * is a &drm_encoder it's called right before the - * &drm_encoder_helper_funcs.atomic_disable hook. + * The @atomic_disable callback should disable the bridge. * * The bridge can assume that the display pipe (i.e. clocks and timing * signals) feeding it is still running when this callback is called. * + * If the preceding element is a &drm_bridge, then this is called before + * that bridge is disabled via one of: + * + * - &drm_bridge_funcs.disable + * - &drm_bridge_funcs.atomic_disable + * + * If the preceding element of the bridge is a display controller, then + * this callback is called before the encoder is disabled via one of: + * + * - &drm_encoder_helper_funcs.atomic_disable + * - &drm_encoder_helper_funcs.prepare + * - &drm_encoder_helper_funcs.disable + * - &drm_encoder_helper_funcs.dpms + * + * and the CRTC is disabled via one of: + * + * - &drm_crtc_helper_funcs.prepare + * - &drm_crtc_helper_funcs.atomic_disable + * - &drm_crtc_helper_funcs.disable + * - &drm_crtc_helper_funcs.dpms. + * * The @atomic_disable callback is optional. */ void (*atomic_disable)(struct drm_bridge *bridge, - struct drm_bridge_state *old_bridge_state); + struct drm_atomic_state *state); /** * @atomic_post_disable: * - * This callback should disable the bridge. It is called right after the - * preceding element in the display pipe is disabled. If the preceding - * element is a bridge this means it's called after that bridge's - * @atomic_post_disable or @post_disable function. If the preceding - * element is a &drm_encoder it's called right after the encoder's - * &drm_encoder_helper_funcs.atomic_disable hook. - * * The bridge must assume that the display pipe (i.e. clocks and timing - * signals) feeding it is no longer running when this callback is - * called. + * signals) feeding this bridge is no longer running when the + * @atomic_post_disable is called. + * + * This callback should perform all the actions required by the hardware + * after it has stopped receiving signals from the preceding element. + * + * If the preceding element is a &drm_bridge, then this is called after + * that bridge is post-disabled (unless marked otherwise by the + * @pre_enable_prev_first flag) via one of: + * + * - &drm_bridge_funcs.post_disable + * - &drm_bridge_funcs.atomic_post_disable + * + * If the preceding element of the bridge is a display controller, then + * this callback is called after the encoder is disabled via one of: + * + * - &drm_encoder_helper_funcs.atomic_disable + * - &drm_encoder_helper_funcs.prepare + * - &drm_encoder_helper_funcs.disable + * - &drm_encoder_helper_funcs.dpms + * + * and the CRTC is disabled via one of: + * + * - &drm_crtc_helper_funcs.prepare + * - &drm_crtc_helper_funcs.atomic_disable + * - &drm_crtc_helper_funcs.disable + * - &drm_crtc_helper_funcs.dpms * * The @atomic_post_disable callback is optional. */ void (*atomic_post_disable)(struct drm_bridge *bridge, - struct drm_bridge_state *old_bridge_state); + struct drm_atomic_state *state); /** * @atomic_duplicate_state: @@ -423,11 +557,11 @@ struct drm_bridge_funcs { * * The returned array must be allocated with kmalloc() and will be * freed by the caller. If the allocation fails, NULL should be - * returned. num_output_fmts must be set to the returned array size. + * returned. num_input_fmts must be set to the returned array size. * Formats listed in the returned array should be listed in decreasing * preference order (the core will try all formats until it finds one * that works). When the format is not supported NULL should be - * returned and num_output_fmts should be set to 0. + * returned and num_input_fmts should be set to 0. * * This method is called on all elements of the bridge chain as part of * the bus format negotiation process that happens in @@ -527,7 +661,8 @@ struct drm_bridge_funcs { * * drm_connector_status indicating the bridge output status. */ - enum drm_connector_status (*detect)(struct drm_bridge *bridge); + enum drm_connector_status (*detect)(struct drm_bridge *bridge, + struct drm_connector *connector); /** * @get_modes: @@ -538,7 +673,7 @@ struct drm_bridge_funcs { * The @get_modes callback is mostly intended to support non-probeable * displays such as many fixed panels. Bridges that support reading * EDID shall leave @get_modes unimplemented and implement the - * &drm_bridge_funcs->get_edid callback instead. + * &drm_bridge_funcs->edid_read callback instead. * * This callback is optional. Bridges that implement it shall set the * DRM_BRIDGE_OP_MODES flag in their &drm_bridge->ops. @@ -555,11 +690,11 @@ struct drm_bridge_funcs { struct drm_connector *connector); /** - * @get_edid: + * @edid_read: * - * Read and parse the EDID data of the connected display. + * Read the EDID data of the connected display. * - * The @get_edid callback is the preferred way of reporting mode + * The @edid_read callback is the preferred way of reporting mode * information for a display connected to the bridge output. Bridges * that support reading EDID shall implement this callback and leave * the @get_modes callback unimplemented. @@ -572,17 +707,18 @@ struct drm_bridge_funcs { * DRM_BRIDGE_OP_EDID flag in their &drm_bridge->ops. * * The connector parameter shall be used for the sole purpose of EDID - * retrieval and parsing, and shall not be stored internally by bridge - * drivers for future usage. + * retrieval, and shall not be stored internally by bridge drivers for + * future usage. * * RETURNS: * - * An edid structure newly allocated with kmalloc() (or similar) on - * success, or NULL otherwise. The caller is responsible for freeing - * the returned edid structure with kfree(). + * An edid structure newly allocated with drm_edid_alloc() or returned + * from drm_edid_read() family of functions on success, or NULL + * otherwise. The caller is responsible for freeing the returned edid + * structure with drm_edid_free(). */ - struct edid *(*get_edid)(struct drm_bridge *bridge, - struct drm_connector *connector); + const struct drm_edid *(*edid_read)(struct drm_bridge *bridge, + struct drm_connector *connector); /** * @hpd_notify: @@ -627,6 +763,228 @@ struct drm_bridge_funcs { void (*hpd_disable)(struct drm_bridge *bridge); /** + * @hdmi_tmds_char_rate_valid: + * + * Check whether a particular TMDS character rate is supported by the + * driver. + * + * This callback is optional and should only be implemented by the + * bridges that take part in the HDMI connector implementation. Bridges + * that implement it shall set the DRM_BRIDGE_OP_HDMI flag in their + * &drm_bridge->ops. + * + * Returns: + * + * Either &drm_mode_status.MODE_OK or one of the failure reasons + * in &enum drm_mode_status. + */ + enum drm_mode_status + (*hdmi_tmds_char_rate_valid)(const struct drm_bridge *bridge, + const struct drm_display_mode *mode, + unsigned long long tmds_rate); + + /** + * @hdmi_clear_infoframe: + * + * This callback clears the infoframes in the hardware during commit. + * It will be called multiple times, once for every disabled infoframe + * type. + * + * This callback is optional but it must be implemented by bridges that + * set the DRM_BRIDGE_OP_HDMI flag in their &drm_bridge->ops. + */ + int (*hdmi_clear_infoframe)(struct drm_bridge *bridge, + enum hdmi_infoframe_type type); + /** + * @hdmi_write_infoframe: + * + * Program the infoframe into the hardware. It will be called multiple + * times, once for every updated infoframe type. + * + * This callback is optional but it must be implemented by bridges that + * set the DRM_BRIDGE_OP_HDMI flag in their &drm_bridge->ops. + */ + int (*hdmi_write_infoframe)(struct drm_bridge *bridge, + enum hdmi_infoframe_type type, + const u8 *buffer, size_t len); + + /** + * @hdmi_audio_startup: + * + * Called when ASoC starts an audio stream setup. + * + * This callback is optional, it can be implemented by bridges that + * set the @DRM_BRIDGE_OP_HDMI_AUDIO flag in their &drm_bridge->ops. + * + * Returns: + * 0 on success, a negative error code otherwise + */ + int (*hdmi_audio_startup)(struct drm_bridge *bridge, + struct drm_connector *connector); + + /** + * @hdmi_audio_prepare: + * Configures HDMI-encoder for audio stream. Can be called multiple + * times for each setup. + * + * This callback is optional but it must be implemented by bridges that + * set the @DRM_BRIDGE_OP_HDMI_AUDIO flag in their &drm_bridge->ops. + * + * Returns: + * 0 on success, a negative error code otherwise + */ + int (*hdmi_audio_prepare)(struct drm_bridge *bridge, + struct drm_connector *connector, + struct hdmi_codec_daifmt *fmt, + struct hdmi_codec_params *hparms); + + /** + * @hdmi_audio_shutdown: + * + * Shut down the audio stream. + * + * This callback is optional but it must be implemented by bridges that + * set the @DRM_BRIDGE_OP_HDMI_AUDIO flag in their &drm_bridge->ops. + * + * Returns: + * 0 on success, a negative error code otherwise + */ + void (*hdmi_audio_shutdown)(struct drm_bridge *bridge, + struct drm_connector *connector); + + /** + * @hdmi_audio_mute_stream: + * + * Mute/unmute HDMI audio stream. + * + * This callback is optional, it can be implemented by bridges that + * set the @DRM_BRIDGE_OP_HDMI_AUDIO flag in their &drm_bridge->ops. + * + * Returns: + * 0 on success, a negative error code otherwise + */ + int (*hdmi_audio_mute_stream)(struct drm_bridge *bridge, + struct drm_connector *connector, + bool enable, int direction); + + /** + * @hdmi_cec_init: + * + * Initialize CEC part of the bridge. + * + * This callback is optional, it can be implemented by bridges that + * set the @DRM_BRIDGE_OP_HDMI_CEC_ADAPTER flag in their + * &drm_bridge->ops. + * + * Returns: + * 0 on success, a negative error code otherwise + */ + int (*hdmi_cec_init)(struct drm_bridge *bridge, + struct drm_connector *connector); + + /** + * @hdmi_cec_enable: + * + * Enable or disable the CEC adapter inside the bridge. + * + * This callback is optional, it can be implemented by bridges that + * set the @DRM_BRIDGE_OP_HDMI_CEC_ADAPTER flag in their + * &drm_bridge->ops. + * + * Returns: + * 0 on success, a negative error code otherwise + */ + int (*hdmi_cec_enable)(struct drm_bridge *bridge, bool enable); + + /** + * @hdmi_cec_log_addr: + * + * Set the logical address of the CEC adapter inside the bridge. + * + * This callback is optional, it can be implemented by bridges that + * set the @DRM_BRIDGE_OP_HDMI_CEC_ADAPTER flag in their + * &drm_bridge->ops. + * + * Returns: + * 0 on success, a negative error code otherwise + */ + int (*hdmi_cec_log_addr)(struct drm_bridge *bridge, u8 logical_addr); + + /** + * @hdmi_cec_transmit: + * + * Transmit the message using the CEC adapter inside the bridge. + * + * This callback is optional, it can be implemented by bridges that + * set the @DRM_BRIDGE_OP_HDMI_CEC_ADAPTER flag in their + * &drm_bridge->ops. + * + * Returns: + * 0 on success, a negative error code otherwise + */ + int (*hdmi_cec_transmit)(struct drm_bridge *bridge, u8 attempts, + u32 signal_free_time, struct cec_msg *msg); + + /** + * @dp_audio_startup: + * + * Called when ASoC starts a DisplayPort audio stream setup. + * + * This callback is optional, it can be implemented by bridges that + * set the @DRM_BRIDGE_OP_DP_AUDIO flag in their &drm_bridge->ops. + * + * Returns: + * 0 on success, a negative error code otherwise + */ + int (*dp_audio_startup)(struct drm_bridge *bridge, + struct drm_connector *connector); + + /** + * @dp_audio_prepare: + * Configures DisplayPort audio stream. Can be called multiple + * times for each setup. + * + * This callback is optional but it must be implemented by bridges that + * set the @DRM_BRIDGE_OP_DP_AUDIO flag in their &drm_bridge->ops. + * + * Returns: + * 0 on success, a negative error code otherwise + */ + int (*dp_audio_prepare)(struct drm_bridge *bridge, + struct drm_connector *connector, + struct hdmi_codec_daifmt *fmt, + struct hdmi_codec_params *hparms); + + /** + * @dp_audio_shutdown: + * + * Shut down the DisplayPort audio stream. + * + * This callback is optional but it must be implemented by bridges that + * set the @DRM_BRIDGE_OP_DP_AUDIO flag in their &drm_bridge->ops. + * + * Returns: + * 0 on success, a negative error code otherwise + */ + void (*dp_audio_shutdown)(struct drm_bridge *bridge, + struct drm_connector *connector); + + /** + * @dp_audio_mute_stream: + * + * Mute/unmute DisplayPort audio stream. + * + * This callback is optional, it can be implemented by bridges that + * set the @DRM_BRIDGE_OP_DP_AUDIO flag in their &drm_bridge->ops. + * + * Returns: + * 0 on success, a negative error code otherwise + */ + int (*dp_audio_mute_stream)(struct drm_bridge *bridge, + struct drm_connector *connector, + bool enable, int direction); + + /** * @debugfs_init: * * Allows bridges to create bridge-specific debugfs files. @@ -683,7 +1041,7 @@ enum drm_bridge_ops { /** * @DRM_BRIDGE_OP_EDID: The bridge can retrieve the EDID of the display * connected to its output. Bridges that set this flag shall implement - * the &drm_bridge_funcs->get_edid callback. + * the &drm_bridge_funcs->edid_read callback. */ DRM_BRIDGE_OP_EDID = BIT(1), /** @@ -701,6 +1059,52 @@ enum drm_bridge_ops { * this flag shall implement the &drm_bridge_funcs->get_modes callback. */ DRM_BRIDGE_OP_MODES = BIT(3), + /** + * @DRM_BRIDGE_OP_HDMI: The bridge provides HDMI connector operations, + * including infoframes support. Bridges that set this flag must + * implement the &drm_bridge_funcs->write_infoframe callback. + * + * Note: currently there can be at most one bridge in a chain that sets + * this bit. This is to simplify corresponding glue code in connector + * drivers. + */ + DRM_BRIDGE_OP_HDMI = BIT(4), + /** + * @DRM_BRIDGE_OP_HDMI_AUDIO: The bridge provides HDMI audio operations. + * Bridges that set this flag must implement the + * &drm_bridge_funcs->hdmi_audio_prepare and + * &drm_bridge_funcs->hdmi_audio_shutdown callbacks. + * + * Note: currently there can be at most one bridge in a chain that sets + * this bit. This is to simplify corresponding glue code in connector + * drivers. Also it is not possible to have a bridge in the chain that + * sets @DRM_BRIDGE_OP_DP_AUDIO if there is a bridge that sets this + * flag. + */ + DRM_BRIDGE_OP_HDMI_AUDIO = BIT(5), + /** + * @DRM_BRIDGE_OP_DP_AUDIO: The bridge provides DisplayPort audio operations. + * Bridges that set this flag must implement the + * &drm_bridge_funcs->dp_audio_prepare and + * &drm_bridge_funcs->dp_audio_shutdown callbacks. + * + * Note: currently there can be at most one bridge in a chain that sets + * this bit. This is to simplify corresponding glue code in connector + * drivers. Also it is not possible to have a bridge in the chain that + * sets @DRM_BRIDGE_OP_HDMI_AUDIO if there is a bridge that sets this + * flag. + */ + DRM_BRIDGE_OP_DP_AUDIO = BIT(6), + /** + * @DRM_BRIDGE_OP_HDMI_CEC_NOTIFIER: The bridge requires CEC notifier + * to be present. + */ + DRM_BRIDGE_OP_HDMI_CEC_NOTIFIER = BIT(7), + /** + * @DRM_BRIDGE_OP_HDMI_CEC_ADAPTER: The bridge requires CEC adapter + * to be present. + */ + DRM_BRIDGE_OP_HDMI_CEC_ADAPTER = BIT(8), }; /** @@ -715,10 +1119,8 @@ struct drm_bridge { struct drm_encoder *encoder; /** @chain_node: used to form a bridge chain */ struct list_head chain_node; -#ifdef CONFIG_OF /** @of_node: device node pointer to the bridge */ struct device_node *of_node; -#endif /** @list: to keep track of all added bridges */ struct list_head list; /** @@ -729,6 +1131,18 @@ struct drm_bridge { const struct drm_bridge_timings *timings; /** @funcs: control functions */ const struct drm_bridge_funcs *funcs; + + /** + * @container: Pointer to the private driver struct embedding this + * @struct drm_bridge. + */ + void *container; + + /** + * @refcount: reference count of users referencing this bridge. + */ + struct kref refcount; + /** @driver_private: pointer to the bridge driver's internal context */ void *driver_private; /** @ops: bitmask of operations supported by the bridge */ @@ -745,6 +1159,11 @@ struct drm_bridge { */ bool interlace_allowed; /** + * @ycbcr_420_allowed: Indicate that the bridge can handle YCbCr 420 + * output. + */ + bool ycbcr_420_allowed; + /** * @pre_enable_prev_first: The bridge requires that the prev * bridge @pre_enable function is called before its @pre_enable, * and conversely for post_disable. This is most frequently a @@ -753,9 +1172,89 @@ struct drm_bridge { */ bool pre_enable_prev_first; /** + * @support_hdcp: Indicate that the bridge supports HDCP. + */ + bool support_hdcp; + /** * @ddc: Associated I2C adapter for DDC access, if any. */ struct i2c_adapter *ddc; + + /** + * @vendor: Vendor of the product to be used for the SPD InfoFrame + * generation. This is required if @DRM_BRIDGE_OP_HDMI is set. + */ + const char *vendor; + + /** + * @product: Name of the product to be used for the SPD InfoFrame + * generation. This is required if @DRM_BRIDGE_OP_HDMI is set. + */ + const char *product; + + /** + * @supported_formats: Bitmask of @hdmi_colorspace listing supported + * output formats. This is only relevant if @DRM_BRIDGE_OP_HDMI is set. + */ + unsigned int supported_formats; + + /** + * @max_bpc: Maximum bits per char the HDMI bridge supports. Allowed + * values are 8, 10 and 12. This is only relevant if + * @DRM_BRIDGE_OP_HDMI is set. + */ + unsigned int max_bpc; + + /** + * @hdmi_cec_dev: device to be used as a containing device for CEC + * functions. + */ + struct device *hdmi_cec_dev; + + /** + * @hdmi_audio_dev: device to be used as a parent for the HDMI Codec if + * either of @DRM_BRIDGE_OP_HDMI_AUDIO or @DRM_BRIDGE_OP_DP_AUDIO is set. + */ + struct device *hdmi_audio_dev; + + /** + * @hdmi_audio_max_i2s_playback_channels: maximum number of playback + * I2S channels for the @DRM_BRIDGE_OP_HDMI_AUDIO or + * @DRM_BRIDGE_OP_DP_AUDIO. + */ + int hdmi_audio_max_i2s_playback_channels; + + /** + * @hdmi_audio_i2s_formats: supported I2S formats, optional. The + * default is to allow all formats supported by the corresponding I2S + * bus driver. This is only used for bridges setting + * @DRM_BRIDGE_OP_HDMI_AUDIO or @DRM_BRIDGE_OP_DP_AUDIO. + */ + u64 hdmi_audio_i2s_formats; + + /** + * @hdmi_audio_spdif_playback: set if this bridge has S/PDIF playback + * port for @DRM_BRIDGE_OP_HDMI_AUDIO or @DRM_BRIDGE_OP_DP_AUDIO. + */ + unsigned int hdmi_audio_spdif_playback : 1; + + /** + * @hdmi_audio_dai_port: sound DAI port for either of + * @DRM_BRIDGE_OP_HDMI_AUDIO and @DRM_BRIDGE_OP_DP_AUDIO, -1 if it is + * not used. + */ + int hdmi_audio_dai_port; + + /** + * @hdmi_cec_adapter_name: the name of the adapter to register + */ + const char *hdmi_cec_adapter_name; + + /** + * @hdmi_cec_available_las: number of logical addresses, CEC_MAX_LOG_ADDRS if unset + */ + u8 hdmi_cec_available_las; + /** private: */ /** * @hpd_mutex: Protects the @hpd_cb and @hpd_data fields. @@ -779,6 +1278,33 @@ drm_priv_to_bridge(struct drm_private_obj *priv) return container_of(priv, struct drm_bridge, base); } +struct drm_bridge *drm_bridge_get(struct drm_bridge *bridge); +void drm_bridge_put(struct drm_bridge *bridge); + +/* Cleanup action for use with __free() */ +DEFINE_FREE(drm_bridge_put, struct drm_bridge *, if (_T) drm_bridge_put(_T)) + +void *__devm_drm_bridge_alloc(struct device *dev, size_t size, size_t offset, + const struct drm_bridge_funcs *funcs); + +/** + * devm_drm_bridge_alloc - Allocate and initialize a bridge + * @dev: struct device of the bridge device + * @type: the type of the struct which contains struct &drm_bridge + * @member: the name of the &drm_bridge within @type + * @funcs: callbacks for this bridge + * + * The reference count of the returned bridge is initialized to 1. This + * reference will be automatically dropped via devm (by calling + * drm_bridge_put()) when @dev is removed. + * + * Returns: + * Pointer to new bridge, or ERR_PTR on failure. + */ +#define devm_drm_bridge_alloc(dev, type, member, funcs) \ + ((type *)__devm_drm_bridge_alloc(dev, sizeof(type), \ + offsetof(type, member), funcs)) + void drm_bridge_add(struct drm_bridge *bridge); int devm_drm_bridge_add(struct device *dev, struct drm_bridge *bridge); void drm_bridge_remove(struct drm_bridge *bridge); @@ -795,10 +1321,54 @@ static inline struct drm_bridge *of_drm_find_bridge(struct device_node *np) } #endif +static inline bool drm_bridge_is_last(struct drm_bridge *bridge) +{ + return list_is_last(&bridge->chain_node, &bridge->encoder->bridge_chain); +} + +/** + * drm_bridge_get_current_state() - Get the current bridge state + * @bridge: bridge object + * + * This function must be called with the modeset lock held. + * + * RETURNS: + * + * The current bridge state, or NULL if there is none. + */ +static inline struct drm_bridge_state * +drm_bridge_get_current_state(struct drm_bridge *bridge) +{ + if (!bridge) + return NULL; + + /* + * Only atomic bridges will have bridge->base initialized by + * drm_atomic_private_obj_init(), so we need to make sure we're + * working with one before we try to use the lock. + */ + if (!bridge->funcs || !bridge->funcs->atomic_reset) + return NULL; + + drm_modeset_lock_assert_held(&bridge->base.lock); + + if (!bridge->base.state) + return NULL; + + return drm_priv_to_bridge_state(bridge->base.state); +} + /** * drm_bridge_get_next_bridge() - Get the next bridge in the chain * @bridge: bridge object * + * The caller is responsible of having a reference to @bridge via + * drm_bridge_get() or equivalent. This function leaves the refcount of + * @bridge unmodified. + * + * The refcount of the returned bridge is incremented. Use drm_bridge_put() + * when done with it. + * * RETURNS: * the next bridge in the chain after @bridge, or NULL if @bridge is the last. */ @@ -808,13 +1378,20 @@ drm_bridge_get_next_bridge(struct drm_bridge *bridge) if (list_is_last(&bridge->chain_node, &bridge->encoder->bridge_chain)) return NULL; - return list_next_entry(bridge, chain_node); + return drm_bridge_get(list_next_entry(bridge, chain_node)); } /** * drm_bridge_get_prev_bridge() - Get the previous bridge in the chain * @bridge: bridge object * + * The caller is responsible of having a reference to @bridge via + * drm_bridge_get() or equivalent. This function leaves the refcount of + * @bridge unmodified. + * + * The refcount of the returned bridge is incremented. Use drm_bridge_put() + * when done with it. + * * RETURNS: * the previous bridge in the chain, or NULL if @bridge is the first. */ @@ -824,13 +1401,16 @@ drm_bridge_get_prev_bridge(struct drm_bridge *bridge) if (list_is_first(&bridge->chain_node, &bridge->encoder->bridge_chain)) return NULL; - return list_prev_entry(bridge, chain_node); + return drm_bridge_get(list_prev_entry(bridge, chain_node)); } /** * drm_bridge_chain_get_first_bridge() - Get the first bridge in the chain * @encoder: encoder object * + * The refcount of the returned bridge is incremented. Use drm_bridge_put() + * when done with it. + * * RETURNS: * the first bridge in the chain, or NULL if @encoder has no bridge attached * to it. @@ -838,24 +1418,85 @@ drm_bridge_get_prev_bridge(struct drm_bridge *bridge) static inline struct drm_bridge * drm_bridge_chain_get_first_bridge(struct drm_encoder *encoder) { - return list_first_entry_or_null(&encoder->bridge_chain, - struct drm_bridge, chain_node); + return drm_bridge_get(list_first_entry_or_null(&encoder->bridge_chain, + struct drm_bridge, chain_node)); +} + +/** + * drm_bridge_chain_get_last_bridge() - Get the last bridge in the chain + * @encoder: encoder object + * + * The refcount of the returned bridge is incremented. Use drm_bridge_put() + * when done with it. + * + * RETURNS: + * the last bridge in the chain, or NULL if @encoder has no bridge attached + * to it. + */ +static inline struct drm_bridge * +drm_bridge_chain_get_last_bridge(struct drm_encoder *encoder) +{ + return drm_bridge_get(list_last_entry_or_null(&encoder->bridge_chain, + struct drm_bridge, chain_node)); } /** - * drm_for_each_bridge_in_chain() - Iterate over all bridges present in a chain + * drm_bridge_get_next_bridge_and_put - Get the next bridge in the chain + * and put the previous + * @bridge: bridge object + * + * Same as drm_bridge_get_next_bridge() but additionally puts the @bridge. + * + * RETURNS: + * the next bridge in the chain after @bridge, or NULL if @bridge is the last. + */ +static inline struct drm_bridge * +drm_bridge_get_next_bridge_and_put(struct drm_bridge *bridge) +{ + struct drm_bridge *next = drm_bridge_get_next_bridge(bridge); + + drm_bridge_put(bridge); + + return next; +} + +/** + * drm_for_each_bridge_in_chain_scoped - iterate over all bridges attached + * to an encoder * @encoder: the encoder to iterate bridges on * @bridge: a bridge pointer updated to point to the current bridge at each * iteration * * Iterate over all bridges present in the bridge chain attached to @encoder. + * + * Automatically gets/puts the bridge reference while iterating, and puts + * the reference even if returning or breaking in the middle of the loop. */ -#define drm_for_each_bridge_in_chain(encoder, bridge) \ - list_for_each_entry(bridge, &(encoder)->bridge_chain, chain_node) +#define drm_for_each_bridge_in_chain_scoped(encoder, bridge) \ + for (struct drm_bridge *bridge __free(drm_bridge_put) = \ + drm_bridge_chain_get_first_bridge(encoder); \ + bridge; \ + bridge = drm_bridge_get_next_bridge_and_put(bridge)) + +/** + * drm_for_each_bridge_in_chain_from - iterate over all bridges starting + * from the given bridge + * @first_bridge: the bridge to start from + * @bridge: a bridge pointer updated to point to the current bridge at each + * iteration + * + * Iterate over all bridges in the encoder chain starting from + * @first_bridge, included. + * + * Automatically gets/puts the bridge reference while iterating, and puts + * the reference even if returning or breaking in the middle of the loop. + */ +#define drm_for_each_bridge_in_chain_from(first_bridge, bridge) \ + for (struct drm_bridge *bridge __free(drm_bridge_put) = \ + drm_bridge_get(first_bridge); \ + bridge; \ + bridge = drm_bridge_get_next_bridge_and_put(bridge)) -bool drm_bridge_chain_mode_fixup(struct drm_bridge *bridge, - const struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode); enum drm_mode_status drm_bridge_chain_mode_valid(struct drm_bridge *bridge, const struct drm_display_info *info, @@ -884,11 +1525,12 @@ drm_atomic_helper_bridge_propagate_bus_fmt(struct drm_bridge *bridge, u32 output_fmt, unsigned int *num_input_fmts); -enum drm_connector_status drm_bridge_detect(struct drm_bridge *bridge); +enum drm_connector_status +drm_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector); int drm_bridge_get_modes(struct drm_bridge *bridge, struct drm_connector *connector); -struct edid *drm_bridge_get_edid(struct drm_bridge *bridge, - struct drm_connector *connector); +const struct drm_edid *drm_bridge_edid_read(struct drm_bridge *bridge, + struct drm_connector *connector); void drm_bridge_hpd_enable(struct drm_bridge *bridge, void (*cb)(void *data, enum drm_connector_status status), @@ -949,4 +1591,9 @@ static inline struct drm_bridge *drmm_of_get_bridge(struct drm_device *drm, } #endif +void devm_drm_put_bridge(struct device *dev, struct drm_bridge *bridge); + +void drm_bridge_debugfs_params(struct dentry *root); +void drm_bridge_debugfs_encoder_params(struct dentry *root, struct drm_encoder *encoder); + #endif diff --git a/include/drm/drm_bridge_helper.h b/include/drm/drm_bridge_helper.h new file mode 100644 index 000000000000..6c35b479ec2a --- /dev/null +++ b/include/drm/drm_bridge_helper.h @@ -0,0 +1,12 @@ +// SPDX-License-Identifier: GPL-2.0-or-later + +#ifndef __DRM_BRIDGE_HELPER_H_ +#define __DRM_BRIDGE_HELPER_H_ + +struct drm_bridge; +struct drm_modeset_acquire_ctx; + +int drm_bridge_helper_reset_crtc(struct drm_bridge *bridge, + struct drm_modeset_acquire_ctx *ctx); + +#endif // __DRM_BRIDGE_HELPER_H_ diff --git a/include/drm/drm_buddy.h b/include/drm/drm_buddy.h index 572077ff8ae7..b909fa8f810a 100644 --- a/include/drm/drm_buddy.h +++ b/include/drm/drm_buddy.h @@ -10,20 +10,16 @@ #include <linux/list.h> #include <linux/slab.h> #include <linux/sched.h> +#include <linux/rbtree.h> -#include <drm/drm_print.h> +struct drm_printer; -#define range_overflows(start, size, max) ({ \ - typeof(start) start__ = (start); \ - typeof(size) size__ = (size); \ - typeof(max) max__ = (max); \ - (void)(&start__ == &size__); \ - (void)(&start__ == &max__); \ - start__ >= max__ || size__ > max__ - start__; \ -}) - -#define DRM_BUDDY_RANGE_ALLOCATION (1 << 0) -#define DRM_BUDDY_TOPDOWN_ALLOCATION (1 << 1) +#define DRM_BUDDY_RANGE_ALLOCATION BIT(0) +#define DRM_BUDDY_TOPDOWN_ALLOCATION BIT(1) +#define DRM_BUDDY_CONTIGUOUS_ALLOCATION BIT(2) +#define DRM_BUDDY_CLEAR_ALLOCATION BIT(3) +#define DRM_BUDDY_CLEARED BIT(4) +#define DRM_BUDDY_TRIM_DISABLE BIT(5) struct drm_buddy_block { #define DRM_BUDDY_HEADER_OFFSET GENMASK_ULL(63, 12) @@ -31,8 +27,9 @@ struct drm_buddy_block { #define DRM_BUDDY_ALLOCATED (1 << 10) #define DRM_BUDDY_FREE (2 << 10) #define DRM_BUDDY_SPLIT (3 << 10) +#define DRM_BUDDY_HEADER_CLEAR GENMASK_ULL(9, 9) /* Free to be used, if needed in the future */ -#define DRM_BUDDY_HEADER_UNUSED GENMASK_ULL(9, 6) +#define DRM_BUDDY_HEADER_UNUSED GENMASK_ULL(8, 6) #define DRM_BUDDY_HEADER_ORDER GENMASK_ULL(5, 0) u64 header; @@ -48,12 +45,16 @@ struct drm_buddy_block { * a list, if so desired. As soon as the block is freed with * drm_buddy_free* ownership is given back to the mm. */ - struct list_head link; + union { + struct rb_node rb; + struct list_head link; + }; + struct list_head tmp_link; }; -/* Order-zero must be at least PAGE_SIZE */ -#define DRM_BUDDY_MAX_ORDER (63 - PAGE_SHIFT) +/* Order-zero must be at least SZ_4K */ +#define DRM_BUDDY_MAX_ORDER (63 - 12) /* * Binary Buddy System. @@ -63,7 +64,7 @@ struct drm_buddy_block { */ struct drm_buddy { /* Maintain a free list for each order. */ - struct list_head *free_list; + struct rb_root **free_trees; /* * Maintain explicit binary tree(s) to track the allocation of the @@ -81,14 +82,15 @@ struct drm_buddy { unsigned int n_roots; unsigned int max_order; - /* Must be at least PAGE_SIZE */ + /* Must be at least SZ_4K */ u64 chunk_size; u64 size; u64 avail; + u64 clear_avail; }; static inline u64 -drm_buddy_block_offset(struct drm_buddy_block *block) +drm_buddy_block_offset(const struct drm_buddy_block *block) { return block->header & DRM_BUDDY_HEADER_OFFSET; } @@ -112,6 +114,12 @@ drm_buddy_block_is_allocated(struct drm_buddy_block *block) } static inline bool +drm_buddy_block_is_clear(struct drm_buddy_block *block) +{ + return block->header & DRM_BUDDY_HEADER_CLEAR; +} + +static inline bool drm_buddy_block_is_free(struct drm_buddy_block *block) { return drm_buddy_block_state(block) == DRM_BUDDY_FREE; @@ -144,16 +152,20 @@ int drm_buddy_alloc_blocks(struct drm_buddy *mm, unsigned long flags); int drm_buddy_block_trim(struct drm_buddy *mm, + u64 *start, u64 new_size, struct list_head *blocks); +void drm_buddy_reset_clear(struct drm_buddy *mm, bool is_clear); + void drm_buddy_free_block(struct drm_buddy *mm, struct drm_buddy_block *block); -void drm_buddy_free_list(struct drm_buddy *mm, struct list_head *objects); +void drm_buddy_free_list(struct drm_buddy *mm, + struct list_head *objects, + unsigned int flags); void drm_buddy_print(struct drm_buddy *mm, struct drm_printer *p); void drm_buddy_block_print(struct drm_buddy *mm, struct drm_buddy_block *block, struct drm_printer *p); - #endif diff --git a/include/drm/drm_client.h b/include/drm/drm_client.h index a2c8dabd03b3..c972a8a3385b 100644 --- a/include/drm/drm_client.h +++ b/include/drm/drm_client.h @@ -29,6 +29,16 @@ struct drm_client_funcs { struct module *owner; /** + * @free: + * + * Called when the client gets unregistered. Implementations should + * release all client-specific data and free the memory. + * + * This callback is optional. + */ + void (*free)(struct drm_client_dev *client); + + /** * @unregister: * * Called when &drm_device is unregistered. The client should respond by @@ -47,12 +57,14 @@ struct drm_client_funcs { * * Note that the core does not guarantee exclusion against concurrent * drm_open(). Clients need to ensure this themselves, for example by - * using drm_master_internal_acquire() and - * drm_master_internal_release(). + * using drm_master_internal_acquire() and drm_master_internal_release(). + * + * If the caller passes force, the client should ignore any present DRM + * master and restore the display anyway. * * This callback is optional. */ - int (*restore)(struct drm_client_dev *client); + int (*restore)(struct drm_client_dev *client, bool force); /** * @hotplug: @@ -63,6 +75,24 @@ struct drm_client_funcs { * This callback is optional. */ int (*hotplug)(struct drm_client_dev *client); + + /** + * @suspend: + * + * Called when suspending the device. + * + * This callback is optional. + */ + int (*suspend)(struct drm_client_dev *client); + + /** + * @resume: + * + * Called when resuming the device from suspend. + * + * This callback is optional. + */ + int (*resume)(struct drm_client_dev *client); }; /** @@ -108,6 +138,21 @@ struct drm_client_dev { struct drm_mode_set *modesets; /** + * @suspended: + * + * The client has been suspended. + */ + bool suspended; + + /** + * @hotplug_pending: + * + * A hotplug event has been received while the client was suspended. + * Try again on resume. + */ + bool hotplug_pending; + + /** * @hotplug_failed: * * Set by client hotplug helpers if the hotplugging failed @@ -121,10 +166,6 @@ int drm_client_init(struct drm_device *dev, struct drm_client_dev *client, void drm_client_release(struct drm_client_dev *client); void drm_client_register(struct drm_client_dev *client); -void drm_client_dev_unregister(struct drm_device *dev); -void drm_client_dev_hotplug(struct drm_device *dev); -void drm_client_dev_restore(struct drm_device *dev); - /** * struct drm_client_buffer - DRM client buffer */ @@ -135,17 +176,11 @@ struct drm_client_buffer { struct drm_client_dev *client; /** - * @handle: Buffer handle - */ - u32 handle; - - /** - * @pitch: Buffer pitch - */ - u32 pitch; - - /** * @gem: GEM object backing this buffer + * + * FIXME: The DRM framebuffer holds a reference on its GEM + * buffer objects. Do not use this field in new code and + * update existing users. */ struct drm_gem_object *gem; @@ -161,9 +196,12 @@ struct drm_client_buffer { }; struct drm_client_buffer * -drm_client_framebuffer_create(struct drm_client_dev *client, u32 width, u32 height, u32 format); -void drm_client_framebuffer_delete(struct drm_client_buffer *buffer); -int drm_client_framebuffer_flush(struct drm_client_buffer *buffer, struct drm_rect *rect); +drm_client_buffer_create_dumb(struct drm_client_dev *client, u32 width, u32 height, u32 format); +void drm_client_buffer_delete(struct drm_client_buffer *buffer); +int drm_client_buffer_flush(struct drm_client_buffer *buffer, struct drm_rect *rect); +int drm_client_buffer_vmap_local(struct drm_client_buffer *buffer, + struct iosys_map *map_copy); +void drm_client_buffer_vunmap_local(struct drm_client_buffer *buffer); int drm_client_buffer_vmap(struct drm_client_buffer *buffer, struct iosys_map *map); void drm_client_buffer_vunmap(struct drm_client_buffer *buffer); @@ -176,6 +214,7 @@ int drm_client_modeset_check(struct drm_client_dev *client); int drm_client_modeset_commit_locked(struct drm_client_dev *client); int drm_client_modeset_commit(struct drm_client_dev *client); int drm_client_modeset_dpms(struct drm_client_dev *client, int mode); +int drm_client_modeset_wait_for_vblank(struct drm_client_dev *client, unsigned int crtc_index); /** * drm_client_for_each_modeset() - Iterate over client modesets @@ -200,6 +239,4 @@ int drm_client_modeset_dpms(struct drm_client_dev *client, int mode); drm_for_each_connector_iter(connector, iter) \ if (connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK) -void drm_client_debugfs_init(struct drm_minor *minor); - #endif diff --git a/include/drm/drm_client_event.h b/include/drm/drm_client_event.h new file mode 100644 index 000000000000..79369c755bc9 --- /dev/null +++ b/include/drm/drm_client_event.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: GPL-2.0 or MIT */ + +#ifndef _DRM_CLIENT_EVENT_H_ +#define _DRM_CLIENT_EVENT_H_ + +#include <linux/types.h> + +struct drm_device; + +#if defined(CONFIG_DRM_CLIENT) +void drm_client_dev_unregister(struct drm_device *dev); +void drm_client_dev_hotplug(struct drm_device *dev); +void drm_client_dev_restore(struct drm_device *dev, bool force); +void drm_client_dev_suspend(struct drm_device *dev); +void drm_client_dev_resume(struct drm_device *dev); +#else +static inline void drm_client_dev_unregister(struct drm_device *dev) +{ } +static inline void drm_client_dev_hotplug(struct drm_device *dev) +{ } +static inline void drm_client_dev_restore(struct drm_device *dev, bool force) +{ } +static inline void drm_client_dev_suspend(struct drm_device *dev) +{ } +static inline void drm_client_dev_resume(struct drm_device *dev) +{ } +#endif + +#endif diff --git a/include/drm/drm_color_mgmt.h b/include/drm/drm_color_mgmt.h index 81c298488b0c..5140691f476a 100644 --- a/include/drm/drm_color_mgmt.h +++ b/include/drm/drm_color_mgmt.h @@ -24,6 +24,7 @@ #define __DRM_COLOR_MGMT_H__ #include <linux/ctype.h> +#include <linux/math64.h> #include <drm/drm_property.h> struct drm_crtc; @@ -36,20 +37,33 @@ struct drm_plane; * * Extract a degamma/gamma LUT value provided by user (in the form of * &drm_color_lut entries) and round it to the precision supported by the - * hardware. + * hardware, following OpenGL int<->float conversion rules + * (see eg. OpenGL 4.6 specification - 2.3.5 Fixed-Point Data Conversions). */ static inline u32 drm_color_lut_extract(u32 user_input, int bit_precision) { - u32 val = user_input; - u32 max = 0xffff >> (16 - bit_precision); + if (bit_precision > 16) + return DIV_ROUND_CLOSEST_ULL(mul_u32_u32(user_input, (1 << bit_precision) - 1), + (1 << 16) - 1); + else + return DIV_ROUND_CLOSEST(user_input * ((1 << bit_precision) - 1), + (1 << 16) - 1); +} - /* Round only if we're not using full precision. */ - if (bit_precision < 16) { - val += 1UL << (16 - bit_precision - 1); - val >>= 16 - bit_precision; - } +/** + * drm_color_lut32_extract - clamp and round LUT entries + * @user_input: input value + * @bit_precision: number of bits the hw LUT supports + * + * Extract U0.bit_precision from a U0.32 LUT value. + * + */ +static inline u32 drm_color_lut32_extract(u32 user_input, int bit_precision) +{ + u64 max = (bit_precision >= 64) ? ~0ULL : (1ULL << bit_precision) - 1; - return clamp_val(val, 0, max); + return DIV_ROUND_CLOSEST_ULL((u64)user_input * max, + (1ULL << 32) - 1); } u64 drm_color_ctm_s31_32_to_qm_n(u64 user_input, u32 m, u32 n); @@ -74,6 +88,18 @@ static inline int drm_color_lut_size(const struct drm_property_blob *blob) return blob->length / sizeof(struct drm_color_lut); } +/** + * drm_color_lut32_size - calculate the number of entries in the extended LUT + * @blob: blob containing the LUT + * + * Returns: + * The number of entries in the color LUT stored in @blob. + */ +static inline int drm_color_lut32_size(const struct drm_property_blob *blob) +{ + return blob->length / sizeof(struct drm_color_lut32); +} + enum drm_color_encoding { DRM_COLOR_YCBCR_BT601, DRM_COLOR_YCBCR_BT709, @@ -120,4 +146,33 @@ enum drm_color_lut_tests { }; int drm_color_lut_check(const struct drm_property_blob *lut, u32 tests); + +/* + * Gamma-LUT programming + */ + +typedef void (*drm_crtc_set_lut_func)(struct drm_crtc *, unsigned int, u16, u16, u16); + +void drm_crtc_load_gamma_888(struct drm_crtc *crtc, const struct drm_color_lut *lut, + drm_crtc_set_lut_func set_gamma); +void drm_crtc_load_gamma_565_from_888(struct drm_crtc *crtc, const struct drm_color_lut *lut, + drm_crtc_set_lut_func set_gamma); +void drm_crtc_load_gamma_555_from_888(struct drm_crtc *crtc, const struct drm_color_lut *lut, + drm_crtc_set_lut_func set_gamma); + +void drm_crtc_fill_gamma_888(struct drm_crtc *crtc, drm_crtc_set_lut_func set_gamma); +void drm_crtc_fill_gamma_565(struct drm_crtc *crtc, drm_crtc_set_lut_func set_gamma); +void drm_crtc_fill_gamma_555(struct drm_crtc *crtc, drm_crtc_set_lut_func set_gamma); + +/* + * Color-LUT programming + */ + +void drm_crtc_load_palette_8(struct drm_crtc *crtc, const struct drm_color_lut *lut, + drm_crtc_set_lut_func set_palette); + +void drm_crtc_fill_palette_332(struct drm_crtc *crtc, drm_crtc_set_lut_func set_palette); +void drm_crtc_fill_palette_8(struct drm_crtc *crtc, drm_crtc_set_lut_func set_palette); + +int drm_color_lut32_check(const struct drm_property_blob *lut, u32 tests); #endif diff --git a/include/drm/drm_colorop.h b/include/drm/drm_colorop.h new file mode 100644 index 000000000000..a3a32f9f918c --- /dev/null +++ b/include/drm/drm_colorop.h @@ -0,0 +1,464 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright (C) 2023 Advanced Micro Devices, Inc. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DRM_COLOROP_H__ +#define __DRM_COLOROP_H__ + +#include <drm/drm_mode_object.h> +#include <drm/drm_mode.h> +#include <drm/drm_property.h> + +/* DRM colorop flags */ +#define DRM_COLOROP_FLAG_ALLOW_BYPASS (1<<0) /* Allow bypass on the drm_colorop */ + +/** + * enum drm_colorop_curve_1d_type - type of 1D curve + * + * Describes a 1D curve to be applied by the DRM_COLOROP_1D_CURVE colorop. + */ +enum drm_colorop_curve_1d_type { + /** + * @DRM_COLOROP_1D_CURVE_SRGB_EOTF: + * + * enum string "sRGB EOTF" + * + * sRGB piece-wise electro-optical transfer function. Transfer + * characteristics as defined by IEC 61966-2-1 sRGB. Equivalent + * to H.273 TransferCharacteristics code point 13 with + * MatrixCoefficients set to 0. + */ + DRM_COLOROP_1D_CURVE_SRGB_EOTF, + + /** + * @DRM_COLOROP_1D_CURVE_SRGB_INV_EOTF: + * + * enum string "sRGB Inverse EOTF" + * + * The inverse of &DRM_COLOROP_1D_CURVE_SRGB_EOTF + */ + DRM_COLOROP_1D_CURVE_SRGB_INV_EOTF, + + /** + * @DRM_COLOROP_1D_CURVE_PQ_125_EOTF: + * + * enum string "PQ 125 EOTF" + * + * The PQ transfer function, scaled by 125.0f, so that 10,000 + * nits correspond to 125.0f. + * + * Transfer characteristics of the PQ function as defined by + * SMPTE ST 2084 (2014) for 10-, 12-, 14-, and 16-bit systems + * and Rec. ITU-R BT.2100-2 perceptual quantization (PQ) system, + * represented by H.273 TransferCharacteristics code point 16. + */ + DRM_COLOROP_1D_CURVE_PQ_125_EOTF, + + /** + * @DRM_COLOROP_1D_CURVE_PQ_125_INV_EOTF: + * + * enum string "PQ 125 Inverse EOTF" + * + * The inverse of DRM_COLOROP_1D_CURVE_PQ_125_EOTF. + */ + DRM_COLOROP_1D_CURVE_PQ_125_INV_EOTF, + + /** + * @DRM_COLOROP_1D_CURVE_BT2020_INV_OETF: + * + * enum string "BT.2020 Inverse OETF" + * + * The inverse of &DRM_COLOROP_1D_CURVE_BT2020_OETF + */ + DRM_COLOROP_1D_CURVE_BT2020_INV_OETF, + + /** + * @DRM_COLOROP_1D_CURVE_BT2020_OETF: + * + * enum string "BT.2020 OETF" + * + * The BT.2020/BT.709 transfer function. The BT.709 and BT.2020 + * transfer functions are the same, the only difference is that + * BT.2020 is defined with more precision for 10 and 12-bit + * encodings. + * + * + */ + DRM_COLOROP_1D_CURVE_BT2020_OETF, + + /** + * @DRM_COLOROP_1D_CURVE_GAMMA22: + * + * enum string "Gamma 2.2" + * + * A gamma 2.2 power function. This applies a power curve with + * gamma value of 2.2 to the input values. + */ + DRM_COLOROP_1D_CURVE_GAMMA22, + + /** + * @DRM_COLOROP_1D_CURVE_GAMMA22_INV: + * + * enum string "Gamma 2.2 Inverse" + * + * The inverse of &DRM_COLOROP_1D_CURVE_GAMMA22 + */ + DRM_COLOROP_1D_CURVE_GAMMA22_INV, + /** + * @DRM_COLOROP_1D_CURVE_COUNT: + * + * enum value denoting the size of the enum + */ + DRM_COLOROP_1D_CURVE_COUNT +}; + +/** + * struct drm_colorop_state - mutable colorop state + */ +struct drm_colorop_state { + /** @colorop: backpointer to the colorop */ + struct drm_colorop *colorop; + + /* + * Color properties + * + * The following fields are not always valid, their usage depends + * on the colorop type. See their associated comment for more + * information. + */ + + /** + * @bypass: + * + * When the property BYPASS exists on this colorop, this stores + * the requested bypass state: true if colorop shall be bypassed, + * false if colorop is enabled. + */ + bool bypass; + + /** + * @curve_1d_type: + * + * Type of 1D curve. + */ + enum drm_colorop_curve_1d_type curve_1d_type; + + /** + * @multiplier: + * + * Multiplier to 'gain' the plane. Format is S31.32 sign-magnitude. + */ + uint64_t multiplier; + + /** + * @data: + * + * Data blob for any TYPE that requires such a blob. The + * interpretation of the blob is TYPE-specific. + * + * See the &drm_colorop_type documentation for how blob is laid + * out. + */ + struct drm_property_blob *data; + + /** @state: backpointer to global drm_atomic_state */ + struct drm_atomic_state *state; +}; + +/** + * struct drm_colorop - DRM color operation control structure + * + * A colorop represents one color operation. They can be chained via + * the 'next' pointer to build a color pipeline. + * + * Since colorops cannot stand-alone and are used to describe colorop + * operations on a plane they don't have their own locking mechanism but + * are locked and programmed along with their associated &drm_plane. + * + */ +struct drm_colorop { + /** @dev: parent DRM device */ + struct drm_device *dev; + + /** + * @head: + * + * List of all colorops on @dev, linked from &drm_mode_config.colorop_list. + * Invariant over the lifetime of @dev and therefore does not need + * locking. + */ + struct list_head head; + + /** + * @index: Position inside the mode_config.list, can be used as an array + * index. It is invariant over the lifetime of the colorop. + */ + unsigned int index; + + /** @base: base mode object */ + struct drm_mode_object base; + + /** + * @plane: + * + * The plane on which the colorop sits. A drm_colorop is always unique + * to a plane. + */ + struct drm_plane *plane; + + /** + * @state: + * + * Current atomic state for this colorop. + * + * This is protected by @mutex. Note that nonblocking atomic commits + * access the current colorop state without taking locks. + */ + struct drm_colorop_state *state; + + /* + * Color properties + * + * The following fields are not always valid, their usage depends + * on the colorop type. See their associated comment for more + * information. + */ + + /** @properties: property tracking for this colorop */ + struct drm_object_properties properties; + + /** + * @type: + * + * Read-only + * Type of color operation + */ + enum drm_colorop_type type; + + /** + * @next: + * + * Read-only + * Pointer to next drm_colorop in pipeline + */ + struct drm_colorop *next; + + /** + * @type_property: + * + * Read-only "TYPE" property for specifying the type of + * this color operation. The type is enum drm_colorop_type. + */ + struct drm_property *type_property; + + /** + * @bypass_property: + * + * Boolean property to control enablement of the color + * operation. Only present if DRM_COLOROP_FLAG_ALLOW_BYPASS + * flag is set. When present, setting bypass to "true" shall + * always be supported to allow compositors to quickly fall + * back to alternate methods of color processing. This is + * important since setting color operations can fail due to + * unique HW constraints. + */ + struct drm_property *bypass_property; + + /** + * @size: + * + * Number of entries of the custom LUT. This should be read-only. + */ + uint32_t size; + + /** + * @lut1d_interpolation: + * + * Read-only + * Interpolation for DRM_COLOROP_1D_LUT + */ + enum drm_colorop_lut1d_interpolation_type lut1d_interpolation; + + /** + * @lut3d_interpolation: + * + * Read-only + * Interpolation for DRM_COLOROP_3D_LUT + */ + enum drm_colorop_lut3d_interpolation_type lut3d_interpolation; + + /** + * @lut1d_interpolation_property: + * + * Read-only property for DRM_COLOROP_1D_LUT interpolation + */ + struct drm_property *lut1d_interpolation_property; + + /** + * @curve_1d_type_property: + * + * Sub-type for DRM_COLOROP_1D_CURVE type. + */ + struct drm_property *curve_1d_type_property; + + /** + * @multiplier_property: + * + * Multiplier property for plane gain + */ + struct drm_property *multiplier_property; + + /** + * @size_property: + * + * Size property for custom LUT from userspace. + */ + struct drm_property *size_property; + + /** + * @lut3d_interpolation_property: + * + * Read-only property for DRM_COLOROP_3D_LUT interpolation + */ + struct drm_property *lut3d_interpolation_property; + + /** + * @data_property: + * + * blob property for any TYPE that requires a blob of data, + * such as 1DLUT, CTM, 3DLUT, etc. + * + * The way this blob is interpreted depends on the TYPE of + * this + */ + struct drm_property *data_property; + + /** + * @next_property: + * + * Read-only property to next colorop in the pipeline + */ + struct drm_property *next_property; + +}; + +#define obj_to_colorop(x) container_of(x, struct drm_colorop, base) + +/** + * drm_colorop_find - look up a Colorop object from its ID + * @dev: DRM device + * @file_priv: drm file to check for lease against. + * @id: &drm_mode_object ID + * + * This can be used to look up a Colorop from its userspace ID. Only used by + * drivers for legacy IOCTLs and interface, nowadays extensions to the KMS + * userspace interface should be done using &drm_property. + */ +static inline struct drm_colorop *drm_colorop_find(struct drm_device *dev, + struct drm_file *file_priv, + uint32_t id) +{ + struct drm_mode_object *mo; + + mo = drm_mode_object_find(dev, file_priv, id, DRM_MODE_OBJECT_COLOROP); + return mo ? obj_to_colorop(mo) : NULL; +} + +void drm_colorop_pipeline_destroy(struct drm_device *dev); +void drm_colorop_cleanup(struct drm_colorop *colorop); + +int drm_plane_colorop_curve_1d_init(struct drm_device *dev, struct drm_colorop *colorop, + struct drm_plane *plane, u64 supported_tfs, uint32_t flags); +int drm_plane_colorop_curve_1d_lut_init(struct drm_device *dev, struct drm_colorop *colorop, + struct drm_plane *plane, uint32_t lut_size, + enum drm_colorop_lut1d_interpolation_type interpolation, + uint32_t flags); +int drm_plane_colorop_ctm_3x4_init(struct drm_device *dev, struct drm_colorop *colorop, + struct drm_plane *plane, uint32_t flags); +int drm_plane_colorop_mult_init(struct drm_device *dev, struct drm_colorop *colorop, + struct drm_plane *plane, uint32_t flags); +int drm_plane_colorop_3dlut_init(struct drm_device *dev, struct drm_colorop *colorop, + struct drm_plane *plane, + uint32_t lut_size, + enum drm_colorop_lut3d_interpolation_type interpolation, + uint32_t flags); + +struct drm_colorop_state * +drm_atomic_helper_colorop_duplicate_state(struct drm_colorop *colorop); + +void drm_colorop_atomic_destroy_state(struct drm_colorop *colorop, + struct drm_colorop_state *state); + +/** + * drm_colorop_reset - reset colorop atomic state + * @colorop: drm colorop + * + * Resets the atomic state for @colorop by freeing the state pointer (which might + * be NULL, e.g. at driver load time) and allocating a new empty state object. + */ +void drm_colorop_reset(struct drm_colorop *colorop); + +/** + * drm_colorop_index - find the index of a registered colorop + * @colorop: colorop to find index for + * + * Given a registered colorop, return the index of that colorop within a DRM + * device's list of colorops. + */ +static inline unsigned int drm_colorop_index(const struct drm_colorop *colorop) +{ + return colorop->index; +} + +#define drm_for_each_colorop(colorop, dev) \ + list_for_each_entry(colorop, &(dev)->mode_config.colorop_list, head) + +/** + * drm_get_colorop_type_name - return a string for colorop type + * @type: colorop type to compute name of + * + * In contrast to the other drm_get_*_name functions this one here returns a + * const pointer and hence is threadsafe. + */ +const char *drm_get_colorop_type_name(enum drm_colorop_type type); + +/** + * drm_get_colorop_curve_1d_type_name - return a string for 1D curve type + * @type: 1d curve type to compute name of + * + * In contrast to the other drm_get_*_name functions this one here returns a + * const pointer and hence is threadsafe. + */ +const char *drm_get_colorop_curve_1d_type_name(enum drm_colorop_curve_1d_type type); + +const char * +drm_get_colorop_lut1d_interpolation_name(enum drm_colorop_lut1d_interpolation_type type); + +const char * +drm_get_colorop_lut3d_interpolation_name(enum drm_colorop_lut3d_interpolation_type type); + +void drm_colorop_set_next_property(struct drm_colorop *colorop, struct drm_colorop *next); + +#endif /* __DRM_COLOROP_H__ */ diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h index 7b5048516185..8f34f4b8183d 100644 --- a/include/drm/drm_connector.h +++ b/include/drm/drm_connector.h @@ -30,6 +30,7 @@ #include <linux/notifier.h> #include <drm/drm_mode_object.h> #include <drm/drm_util.h> +#include <drm/drm_property.h> #include <uapi/drm/drm_mode.h> @@ -37,13 +38,17 @@ struct drm_connector_helper_funcs; struct drm_modeset_acquire_ctx; struct drm_device; struct drm_crtc; +struct drm_display_mode; struct drm_encoder; struct drm_panel; struct drm_property; struct drm_property_blob; struct drm_printer; struct drm_privacy_screen; +struct drm_edid; struct edid; +struct hdmi_codec_daifmt; +struct hdmi_codec_params; struct i2c_adapter; enum drm_connector_force { @@ -199,6 +204,18 @@ enum drm_connector_tv_mode { */ DRM_MODE_TV_MODE_SECAM, + /** + * @DRM_MODE_TV_MODE_MONOCHROME: Use timings appropriate to + * the DRM mode, including equalizing pulses for a 525-line + * or 625-line mode, with no pedestal or color encoding. + */ + DRM_MODE_TV_MODE_MONOCHROME, + + /** + * @DRM_MODE_TV_MODE_MAX: Number of analog TV output modes. + * + * Internal implementation detail; this is not uABI. + */ DRM_MODE_TV_MODE_MAX, }; @@ -363,6 +380,32 @@ enum drm_panel_orientation { }; /** + * enum drm_hdmi_broadcast_rgb - Broadcast RGB Selection for an HDMI @drm_connector + */ +enum drm_hdmi_broadcast_rgb { + /** + * @DRM_HDMI_BROADCAST_RGB_AUTO: The RGB range is selected + * automatically based on the mode. + */ + DRM_HDMI_BROADCAST_RGB_AUTO, + + /** + * @DRM_HDMI_BROADCAST_RGB_FULL: Full range RGB is forced. + */ + DRM_HDMI_BROADCAST_RGB_FULL, + + /** + * @DRM_HDMI_BROADCAST_RGB_LIMITED: Limited range RGB is forced. + */ + DRM_HDMI_BROADCAST_RGB_LIMITED, +}; + +const char * +drm_hdmi_connector_get_broadcast_rgb_name(enum drm_hdmi_broadcast_rgb broadcast_rgb); +const char * +drm_hdmi_connector_get_output_format_name(enum hdmi_colorspace fmt); + +/** * struct drm_monitor_range_info - Panel's Monitor range in EDID for * &drm_display_info * @@ -419,37 +462,99 @@ enum drm_privacy_screen_status { PRIVACY_SCREEN_ENABLED_LOCKED, }; -/* - * This is a consolidated colorimetry list supported by HDMI and +/** + * enum drm_colorspace - color space + * + * This enum is a consolidated colorimetry list supported by HDMI and * DP protocol standard. The respective connectors will register * a property with the subset of this list (supported by that * respective protocol). Userspace will set the colorspace through * a colorspace property which will be created and exposed to * userspace. + * + * DP definitions come from the DP v2.0 spec + * HDMI definitions come from the CTA-861-H spec + * + * @DRM_MODE_COLORIMETRY_DEFAULT: + * Driver specific behavior. + * @DRM_MODE_COLORIMETRY_NO_DATA: + * Driver specific behavior. + * @DRM_MODE_COLORIMETRY_SMPTE_170M_YCC: + * (HDMI) + * SMPTE ST 170M colorimetry format + * @DRM_MODE_COLORIMETRY_BT709_YCC: + * (HDMI, DP) + * ITU-R BT.709 colorimetry format + * @DRM_MODE_COLORIMETRY_XVYCC_601: + * (HDMI, DP) + * xvYCC601 colorimetry format + * @DRM_MODE_COLORIMETRY_XVYCC_709: + * (HDMI, DP) + * xvYCC709 colorimetry format + * @DRM_MODE_COLORIMETRY_SYCC_601: + * (HDMI, DP) + * sYCC601 colorimetry format + * @DRM_MODE_COLORIMETRY_OPYCC_601: + * (HDMI, DP) + * opYCC601 colorimetry format + * @DRM_MODE_COLORIMETRY_OPRGB: + * (HDMI, DP) + * opRGB colorimetry format + * @DRM_MODE_COLORIMETRY_BT2020_CYCC: + * (HDMI, DP) + * ITU-R BT.2020 Y'c C'bc C'rc (constant luminance) colorimetry format + * @DRM_MODE_COLORIMETRY_BT2020_RGB: + * (HDMI, DP) + * ITU-R BT.2020 R' G' B' colorimetry format + * @DRM_MODE_COLORIMETRY_BT2020_YCC: + * (HDMI, DP) + * ITU-R BT.2020 Y' C'b C'r colorimetry format + * @DRM_MODE_COLORIMETRY_DCI_P3_RGB_D65: + * (HDMI) + * SMPTE ST 2113 P3D65 colorimetry format + * @DRM_MODE_COLORIMETRY_DCI_P3_RGB_THEATER: + * (HDMI) + * SMPTE ST 2113 P3DCI colorimetry format + * @DRM_MODE_COLORIMETRY_RGB_WIDE_FIXED: + * (DP) + * RGB wide gamut fixed point colorimetry format + * @DRM_MODE_COLORIMETRY_RGB_WIDE_FLOAT: + * (DP) + * RGB wide gamut floating point + * (scRGB (IEC 61966-2-2)) colorimetry format + * @DRM_MODE_COLORIMETRY_BT601_YCC: + * (DP) + * ITU-R BT.601 colorimetry format + * The DP spec does not say whether this is the 525 or the 625 + * line version. + * @DRM_MODE_COLORIMETRY_COUNT: + * Not a valid value; merely used four counting */ - -/* For Default case, driver will set the colorspace */ -#define DRM_MODE_COLORIMETRY_DEFAULT 0 -/* CEA 861 Normal Colorimetry options */ -#define DRM_MODE_COLORIMETRY_NO_DATA 0 -#define DRM_MODE_COLORIMETRY_SMPTE_170M_YCC 1 -#define DRM_MODE_COLORIMETRY_BT709_YCC 2 -/* CEA 861 Extended Colorimetry Options */ -#define DRM_MODE_COLORIMETRY_XVYCC_601 3 -#define DRM_MODE_COLORIMETRY_XVYCC_709 4 -#define DRM_MODE_COLORIMETRY_SYCC_601 5 -#define DRM_MODE_COLORIMETRY_OPYCC_601 6 -#define DRM_MODE_COLORIMETRY_OPRGB 7 -#define DRM_MODE_COLORIMETRY_BT2020_CYCC 8 -#define DRM_MODE_COLORIMETRY_BT2020_RGB 9 -#define DRM_MODE_COLORIMETRY_BT2020_YCC 10 -/* Additional Colorimetry extension added as part of CTA 861.G */ -#define DRM_MODE_COLORIMETRY_DCI_P3_RGB_D65 11 -#define DRM_MODE_COLORIMETRY_DCI_P3_RGB_THEATER 12 -/* Additional Colorimetry Options added for DP 1.4a VSC Colorimetry Format */ -#define DRM_MODE_COLORIMETRY_RGB_WIDE_FIXED 13 -#define DRM_MODE_COLORIMETRY_RGB_WIDE_FLOAT 14 -#define DRM_MODE_COLORIMETRY_BT601_YCC 15 +enum drm_colorspace { + /* For Default case, driver will set the colorspace */ + DRM_MODE_COLORIMETRY_DEFAULT = 0, + /* CEA 861 Normal Colorimetry options */ + DRM_MODE_COLORIMETRY_NO_DATA = 0, + DRM_MODE_COLORIMETRY_SMPTE_170M_YCC = 1, + DRM_MODE_COLORIMETRY_BT709_YCC = 2, + /* CEA 861 Extended Colorimetry Options */ + DRM_MODE_COLORIMETRY_XVYCC_601 = 3, + DRM_MODE_COLORIMETRY_XVYCC_709 = 4, + DRM_MODE_COLORIMETRY_SYCC_601 = 5, + DRM_MODE_COLORIMETRY_OPYCC_601 = 6, + DRM_MODE_COLORIMETRY_OPRGB = 7, + DRM_MODE_COLORIMETRY_BT2020_CYCC = 8, + DRM_MODE_COLORIMETRY_BT2020_RGB = 9, + DRM_MODE_COLORIMETRY_BT2020_YCC = 10, + /* Additional Colorimetry extension added as part of CTA 861.G */ + DRM_MODE_COLORIMETRY_DCI_P3_RGB_D65 = 11, + DRM_MODE_COLORIMETRY_DCI_P3_RGB_THEATER = 12, + /* Additional Colorimetry Options added for DP 1.4a VSC Colorimetry Format */ + DRM_MODE_COLORIMETRY_RGB_WIDE_FIXED = 13, + DRM_MODE_COLORIMETRY_RGB_WIDE_FLOAT = 14, + DRM_MODE_COLORIMETRY_BT601_YCC = 15, + DRM_MODE_COLORIMETRY_COUNT +}; /** * enum drm_bus_flags - bus_flags info for &drm_display_info @@ -654,6 +759,14 @@ struct drm_display_info { bool is_hdmi; /** + * @has_audio: True if the sink supports audio. + * + * This field shall be used instead of calling + * drm_detect_monitor_audio() when possible. + */ + bool has_audio; + + /** * @has_hdmi_infoframe: Does the sink support the HDMI infoframe? */ bool has_hdmi_infoframe; @@ -687,6 +800,11 @@ struct drm_display_info { struct drm_hdmi_info hdmi; /** + * @hdr_sink_metadata: HDR Metadata Information read from sink + */ + struct hdr_sink_metadata hdr_sink_metadata; + + /** * @non_desktop: Non desktop display (HMD). */ bool non_desktop; @@ -730,9 +848,19 @@ struct drm_display_info { int vics_len; /** - * @quirks: EDID based quirks. Internal to EDID parsing. + * @quirks: EDID based quirks. DRM core and drivers can query the + * @drm_edid_quirk quirks using drm_edid_has_quirk(), the rest of + * the quirks also tracked here are internal to EDID parsing. */ u32 quirks; + + /** + * @source_physical_address: Source Physical Address from HDMI + * Vendor-Specific Data Block, for CEC usage. + * + * Defaults to CEC_PHYS_ADDR_INVALID (0xffff). + */ + u16 source_physical_address; }; int drm_display_info_set_bus_formats(struct drm_display_info *info, @@ -796,6 +924,82 @@ struct drm_tv_connector_state { }; /** + * struct drm_connector_hdmi_infoframe - HDMI Infoframe container + */ +struct drm_connector_hdmi_infoframe { + /** + * @data: HDMI Infoframe structure + */ + union hdmi_infoframe data; + + /** + * @set: Is the content of @data valid? + */ + bool set; +}; + +/* + * struct drm_connector_hdmi_state - HDMI state container + */ +struct drm_connector_hdmi_state { + /** + * @broadcast_rgb: Connector property to pass the + * Broadcast RGB selection value. + */ + enum drm_hdmi_broadcast_rgb broadcast_rgb; + + /** + * @infoframes: HDMI Infoframes matching that state + */ + struct { + /** + * @avi: AVI Infoframes structure matching our + * state. + */ + struct drm_connector_hdmi_infoframe avi; + + /** + * @hdr_drm: DRM (Dynamic Range and Mastering) + * Infoframes structure matching our state. + */ + struct drm_connector_hdmi_infoframe hdr_drm; + + /** + * @spd: SPD Infoframes structure matching our + * state. + */ + struct drm_connector_hdmi_infoframe spd; + + /** + * @vendor: HDMI Vendor Infoframes structure + * matching our state. + */ + struct drm_connector_hdmi_infoframe hdmi; + } infoframes; + + /** + * @is_limited_range: Is the output supposed to use a limited + * RGB Quantization Range or not? + */ + bool is_limited_range; + + /** + * @output_bpc: Bits per color channel to output. + */ + unsigned int output_bpc; + + /** + * @output_format: Pixel format to output in. + */ + enum hdmi_colorspace output_format; + + /** + * @tmds_char_rate: TMDS Character Rate, in Hz. + */ + unsigned long long tmds_char_rate; +}; + +/** * struct drm_connector_state - mutable connector state */ struct drm_connector_state { @@ -901,7 +1105,7 @@ struct drm_connector_state { * colorspace change on Sink. This is most commonly used to switch * to wider color gamuts like BT2020. */ - u32 colorspace; + enum drm_colorspace colorspace; /** * @writeback_job: Writeback job for writeback connectors @@ -939,6 +1143,156 @@ struct drm_connector_state { * DRM blob property for HDR output metadata */ struct drm_property_blob *hdr_output_metadata; + + /** + * @hdmi: HDMI-related variable and properties. Filled by + * @drm_atomic_helper_connector_hdmi_check(). + */ + struct drm_connector_hdmi_state hdmi; +}; + +struct drm_connector_hdmi_audio_funcs { + /** + * @startup: + * + * Called when ASoC starts an audio stream setup. The + * @startup() is optional. + * + * Returns: + * 0 on success, a negative error code otherwise + */ + int (*startup)(struct drm_connector *connector); + + /** + * @prepare: + * Configures HDMI-encoder for audio stream. Can be called + * multiple times for each setup. Mandatory. + * + * Returns: + * 0 on success, a negative error code otherwise + */ + int (*prepare)(struct drm_connector *connector, + struct hdmi_codec_daifmt *fmt, + struct hdmi_codec_params *hparms); + + /** + * @shutdown: + * + * Shut down the audio stream. Mandatory. + * + * Returns: + * 0 on success, a negative error code otherwise + */ + void (*shutdown)(struct drm_connector *connector); + + /** + * @mute_stream: + * + * Mute/unmute HDMI audio stream. The @mute_stream callback is + * optional. + * + * Returns: + * 0 on success, a negative error code otherwise + */ + int (*mute_stream)(struct drm_connector *connector, + bool enable, int direction); +}; + +void drm_connector_cec_phys_addr_invalidate(struct drm_connector *connector); +void drm_connector_cec_phys_addr_set(struct drm_connector *connector); + +/** + * struct drm_connector_cec_funcs - drm_hdmi_connector control functions + */ +struct drm_connector_cec_funcs { + /** + * @phys_addr_invalidate: mark CEC physical address as invalid + * + * The callback to mark CEC physical address as invalid, abstracting + * the operation. + */ + void (*phys_addr_invalidate)(struct drm_connector *connector); + + /** + * @phys_addr_set: set CEC physical address + * + * The callback to set CEC physical address, abstracting the operation. + */ + void (*phys_addr_set)(struct drm_connector *connector, u16 addr); +}; + +/** + * struct drm_connector_hdmi_funcs - drm_hdmi_connector control functions + */ +struct drm_connector_hdmi_funcs { + /** + * @tmds_char_rate_valid: + * + * This callback is invoked at atomic_check time to figure out + * whether a particular TMDS character rate is supported by the + * driver. + * + * The @tmds_char_rate_valid callback is optional. + * + * Returns: + * + * Either &drm_mode_status.MODE_OK or one of the failure reasons + * in &enum drm_mode_status. + */ + enum drm_mode_status + (*tmds_char_rate_valid)(const struct drm_connector *connector, + const struct drm_display_mode *mode, + unsigned long long tmds_rate); + + /** + * @clear_infoframe: + * + * This callback is invoked through + * @drm_atomic_helper_connector_hdmi_update_infoframes during a + * commit to clear the infoframes into the hardware. It will be + * called multiple times, once for every disabled infoframe + * type. + * + * The @clear_infoframe callback is optional. + * + * Returns: + * 0 on success, a negative error code otherwise + */ + int (*clear_infoframe)(struct drm_connector *connector, + enum hdmi_infoframe_type type); + + /** + * @write_infoframe: + * + * This callback is invoked through + * @drm_atomic_helper_connector_hdmi_update_infoframes during a + * commit to program the infoframes into the hardware. It will + * be called multiple times, once for every updated infoframe + * type. + * + * The @write_infoframe callback is mandatory. + * + * Returns: + * 0 on success, a negative error code otherwise + */ + int (*write_infoframe)(struct drm_connector *connector, + enum hdmi_infoframe_type type, + const u8 *buffer, size_t len); + + /** + * @read_edid: + * + * This callback is used by the framework as a replacement for reading + * the EDID from connector->ddc. It is still recommended to provide + * connector->ddc instead of implementing this callback. Returned EDID + * should be freed via the drm_edid_free(). + * + * The @read_edid callback is optional. + * + * Returns: + * Valid EDID on success, NULL in case of failure. + */ + const struct drm_edid *(*read_edid)(struct drm_connector *connector); }; /** @@ -1244,7 +1598,8 @@ struct drm_connector_funcs { * This will get called when a hotplug-event for a drm-connector * has been received from a source outside the display driver / device. */ - void (*oob_hotplug_event)(struct drm_connector *connector); + void (*oob_hotplug_event)(struct drm_connector *connector, + enum drm_connector_status status); /** * @debugfs_init: @@ -1401,6 +1756,133 @@ struct drm_cmdline_mode { }; /** + * struct drm_connector_hdmi_audio - DRM gemeric HDMI Codec-related structure + * + * HDMI drivers usually incorporate a HDMI Codec. This structure expresses the + * generic HDMI Codec as used by the DRM HDMI Codec framework. + */ +struct drm_connector_hdmi_audio { + /** + * @funcs: + * + * Implementation of the HDMI codec functionality to be used by the DRM + * HDMI Codec framework. + */ + const struct drm_connector_hdmi_audio_funcs *funcs; + + /** + * @codec_pdev: + * + * Platform device created to hold the HDMI Codec. It will be + * automatically unregistered during drm_connector_cleanup(). + */ + struct platform_device *codec_pdev; + + /** + * @lock: + * + * Mutex to protect @last_state, @plugged_cb and @plugged_cb_dev. + */ + struct mutex lock; + + /** + * @plugged_cb: + * + * Callback to be called when the HDMI sink get plugged to or unplugged + * from this connector. This is assigned by the framework when + * requested by the ASoC code. + */ + void (*plugged_cb)(struct device *dev, bool plugged); + + /** + * @plugged_cb_dev: + * + * The data for @plugged_cb(). It is being provided by the ASoC. + */ + struct device *plugged_cb_dev; + + /** + * @last_state: + * + * Last plugged state recored by the framework. It is used to correctly + * report the state to @plugged_cb(). + */ + bool last_state; + + /** + * @dai_port: + * + * The port in DT that is used for the Codec DAI. + */ + int dai_port; +}; + +/* + * struct drm_connector_hdmi - DRM Connector HDMI-related structure + */ +struct drm_connector_hdmi { +#define DRM_CONNECTOR_HDMI_VENDOR_LEN 8 + /** + * @vendor: HDMI Controller Vendor Name + */ + unsigned char vendor[DRM_CONNECTOR_HDMI_VENDOR_LEN] __nonstring; + +#define DRM_CONNECTOR_HDMI_PRODUCT_LEN 16 + /** + * @product: HDMI Controller Product Name + */ + unsigned char product[DRM_CONNECTOR_HDMI_PRODUCT_LEN] __nonstring; + + /** + * @supported_formats: Bitmask of @hdmi_colorspace + * supported by the controller. + */ + unsigned long supported_formats; + + /** + * @funcs: HDMI connector Control Functions + */ + const struct drm_connector_hdmi_funcs *funcs; + + /** + * @infoframes: Current Infoframes output by the connector + */ + struct { + /** + * @lock: Mutex protecting against concurrent access to + * the infoframes, most notably between KMS and ALSA. + */ + struct mutex lock; + + /** + * @audio: Current Audio Infoframes structure. Protected + * by @lock. + */ + struct drm_connector_hdmi_infoframe audio; + } infoframes; +}; + +/** + * struct drm_connector_cec - DRM Connector CEC-related structure + */ +struct drm_connector_cec { + /** + * @mutex: protects all fields in this structure. + */ + struct mutex mutex; + + /** + * @funcs: CEC Control Functions + */ + const struct drm_connector_cec_funcs *funcs; + + /** + * @data: CEC implementation-specific data + */ + void *data; +}; + +/** * struct drm_connector - central DRM connector control structure * * Each connector may be connected to one or more CRTCs, or may be clonable by @@ -1543,8 +2025,12 @@ struct drm_connector { /** * @edid_blob_ptr: DRM property containing EDID if present. Protected by - * &drm_mode_config.mutex. This should be updated only by calling + * &drm_mode_config.mutex. + * + * This must be updated only by calling drm_edid_connector_update() or * drm_connector_update_edid_property(). + * + * This must not be used by drivers directly. */ struct drm_property_blob *edid_blob_ptr; @@ -1583,6 +2069,11 @@ struct drm_connector { struct drm_property_blob *path_blob_ptr; /** + * @max_bpc: Maximum bits per color channel the connector supports. + */ + unsigned int max_bpc; + + /** * @max_bpc_property: Default connector property for the max bpc to be * driven out of the connector. */ @@ -1606,6 +2097,12 @@ struct drm_connector { */ struct drm_property *privacy_screen_hw_state_property; + /** + * @broadcast_rgb_property: Connector property to set the + * Broadcast RGB selection to output with. + */ + struct drm_property *broadcast_rgb_property; + #define DRM_CONNECTOR_POLL_HPD (1 << 0) #define DRM_CONNECTOR_POLL_CONNECT (1 << 1) #define DRM_CONNECTOR_POLL_DISCONNECT (1 << 2) @@ -1681,8 +2178,11 @@ struct drm_connector { struct drm_encoder *encoder; #define MAX_ELD_BYTES 128 - /** @eld: EDID-like data, if present */ + /** @eld: EDID-like data, if present, protected by @eld_mutex */ uint8_t eld[MAX_ELD_BYTES]; + /** @eld_mutex: protection for concurrenct access to @eld */ + struct mutex eld_mutex; + /** @latency_present: AV delay info from ELD, if found */ bool latency_present[2]; /** @@ -1791,8 +2291,20 @@ struct drm_connector { */ struct llist_node free_node; - /** @hdr_sink_metadata: HDR Metadata Information read from sink */ - struct hdr_sink_metadata hdr_sink_metadata; + /** + * @hdmi: HDMI-related variable and properties. + */ + struct drm_connector_hdmi hdmi; + + /** + * @hdmi_audio: HDMI codec properties and non-DRM state. + */ + struct drm_connector_hdmi_audio hdmi_audio; + + /** + * @cec: CEC-related data. + */ + struct drm_connector_cec cec; }; #define obj_to_connector(x) container_of(x, struct drm_connector, base) @@ -1801,6 +2313,11 @@ int drm_connector_init(struct drm_device *dev, struct drm_connector *connector, const struct drm_connector_funcs *funcs, int connector_type); +int drm_connector_dynamic_init(struct drm_device *dev, + struct drm_connector *connector, + const struct drm_connector_funcs *funcs, + int connector_type, + struct i2c_adapter *ddc); int drm_connector_init_with_ddc(struct drm_device *dev, struct drm_connector *connector, const struct drm_connector_funcs *funcs, @@ -1811,8 +2328,18 @@ int drmm_connector_init(struct drm_device *dev, const struct drm_connector_funcs *funcs, int connector_type, struct i2c_adapter *ddc); +int drmm_connector_hdmi_init(struct drm_device *dev, + struct drm_connector *connector, + const char *vendor, const char *product, + const struct drm_connector_funcs *funcs, + const struct drm_connector_hdmi_funcs *hdmi_funcs, + int connector_type, + struct i2c_adapter *ddc, + unsigned long supported_formats, + unsigned int max_bpc); void drm_connector_attach_edid_property(struct drm_connector *connector); int drm_connector_register(struct drm_connector *connector); +int drm_connector_dynamic_register(struct drm_connector *connector); void drm_connector_unregister(struct drm_connector *connector); int drm_connector_attach_encoder(struct drm_connector *connector, struct drm_encoder *encoder); @@ -1888,7 +2415,8 @@ drm_connector_is_unregistered(struct drm_connector *connector) DRM_CONNECTOR_UNREGISTERED; } -void drm_connector_oob_hotplug_event(struct fwnode_handle *connector_fwnode); +void drm_connector_oob_hotplug_event(struct fwnode_handle *connector_fwnode, + enum drm_connector_status status); const char *drm_get_connector_type_name(unsigned int connector_type); const char *drm_get_connector_status_name(enum drm_connector_status status); const char *drm_get_subpixel_order_name(enum subpixel_order order); @@ -1920,13 +2448,16 @@ int drm_connector_attach_scaling_mode_property(struct drm_connector *connector, u32 scaling_mode_mask); int drm_connector_attach_vrr_capable_property( struct drm_connector *connector); +int drm_connector_attach_broadcast_rgb_property(struct drm_connector *connector); int drm_connector_attach_colorspace_property(struct drm_connector *connector); int drm_connector_attach_hdr_output_metadata_property(struct drm_connector *connector); bool drm_connector_atomic_hdr_metadata_equal(struct drm_connector_state *old_state, struct drm_connector_state *new_state); int drm_mode_create_aspect_ratio_property(struct drm_device *dev); -int drm_mode_create_hdmi_colorspace_property(struct drm_connector *connector); -int drm_mode_create_dp_colorspace_property(struct drm_connector *connector); +int drm_mode_create_hdmi_colorspace_property(struct drm_connector *connector, + u32 supported_colorspaces); +int drm_mode_create_dp_colorspace_property(struct drm_connector *connector, + u32 supported_colorspaces); int drm_mode_create_content_type_property(struct drm_device *dev); int drm_mode_create_suggested_offset_properties(struct drm_device *dev); @@ -2009,6 +2540,7 @@ void drm_connector_list_iter_end(struct drm_connector_list_iter *iter); bool drm_connector_has_possible_encoder(struct drm_connector *connector, struct drm_encoder *encoder); +const char *drm_get_colorspace_name(enum drm_colorspace colorspace); /** * drm_for_each_connector_iter - connector_list iterator macro diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h index 8e1cbc75143e..66278ffeebd6 100644 --- a/include/drm/drm_crtc.h +++ b/include/drm/drm_crtc.h @@ -77,11 +77,6 @@ struct drm_plane_helper_funcs; * intended to indicate whether a full modeset is needed, rather than strictly * describing what has changed in a commit. See also: * drm_atomic_crtc_needs_modeset() - * - * WARNING: Transitional helpers (like drm_helper_crtc_mode_set() or - * drm_helper_crtc_mode_set_base()) do not maintain many of the derived control - * state like @plane_mask so drivers not converted over to atomic helpers should - * not rely on these being accurate! */ struct drm_crtc_state { /** @crtc: backpointer to the CRTC */ @@ -191,7 +186,7 @@ struct drm_crtc_state { * this case the driver will send the VBLANK event on its own when the * writeback job is complete. */ - bool no_vblank : 1; + bool no_vblank; /** * @plane_mask: Bitmask of drm_plane_mask(plane) of planes attached to @@ -323,6 +318,17 @@ struct drm_crtc_state { enum drm_scaling_filter scaling_filter; /** + * @sharpness_strength: + * + * Used by the user to set the sharpness intensity. + * The value ranges from 0-255. + * Default value is 0 which disable the sharpness feature. + * Any value greater than 0 enables sharpening with the + * specified strength. + */ + u8 sharpness_strength; + + /** * @event: * * Optional pointer to a DRM event to signal upon completion of the @@ -1094,6 +1100,12 @@ struct drm_crtc { struct drm_property *scaling_filter_property; /** + * @sharpness_strength_property: property to apply + * the intensity of the sharpness requested. + */ + struct drm_property *sharpness_strength_property; + + /** * @state: * * Current atomic state for this CRTC. @@ -1328,5 +1340,6 @@ static inline struct drm_crtc *drm_crtc_find(struct drm_device *dev, int drm_crtc_create_scaling_filter_property(struct drm_crtc *crtc, unsigned int supported_filters); - +bool drm_crtc_in_clone_mode(struct drm_crtc_state *crtc_state); +int drm_crtc_create_sharpness_strength_property(struct drm_crtc *crtc); #endif /* __DRM_CRTC_H__ */ diff --git a/include/drm/drm_damage_helper.h b/include/drm/drm_damage_helper.h index effda42cce31..a58cbcd11276 100644 --- a/include/drm/drm_damage_helper.h +++ b/include/drm/drm_damage_helper.h @@ -78,7 +78,7 @@ bool drm_atomic_helper_damage_iter_next(struct drm_atomic_helper_damage_iter *iter, struct drm_rect *rect); bool drm_atomic_helper_damage_merged(const struct drm_plane_state *old_state, - struct drm_plane_state *state, + const struct drm_plane_state *state, struct drm_rect *rect); #endif diff --git a/include/drm/drm_debugfs.h b/include/drm/drm_debugfs.h index 7616f457ce70..ea8cba94208a 100644 --- a/include/drm/drm_debugfs.h +++ b/include/drm/drm_debugfs.h @@ -34,6 +34,22 @@ #include <linux/types.h> #include <linux/seq_file.h> + +#include <drm/drm_gpuvm.h> + +/** + * DRM_DEBUGFS_GPUVA_INFO - &drm_info_list entry to dump a GPU VA space + * @show: the &drm_info_list's show callback + * @data: driver private data + * + * Drivers should use this macro to define a &drm_info_list entry to provide a + * debugfs file for dumping the GPU VA space regions and mappings. + * + * For each DRM GPU VA space drivers should call drm_debugfs_gpuva_info() from + * their @show callback. + */ +#define DRM_DEBUGFS_GPUVA_INFO(show, data) {"gpuvas", show, DRIVER_GEM_GPUVA, data} + /** * struct drm_info_list - debugfs info list entry * @@ -126,14 +142,20 @@ struct drm_debugfs_entry { void drm_debugfs_create_files(const struct drm_info_list *files, int count, struct dentry *root, struct drm_minor *minor); -int drm_debugfs_remove_files(const struct drm_info_list *files, - int count, struct drm_minor *minor); +int drm_debugfs_remove_files(const struct drm_info_list *files, int count, + struct dentry *root, struct drm_minor *minor); void drm_debugfs_add_file(struct drm_device *dev, const char *name, int (*show)(struct seq_file*, void*), void *data); void drm_debugfs_add_files(struct drm_device *dev, const struct drm_debugfs_info *files, int count); + +int drm_debugfs_gpuva_info(struct seq_file *m, + struct drm_gpuvm *gpuvm); + +void drm_debugfs_clients_add(struct drm_file *file); +void drm_debugfs_clients_remove(struct drm_file *file); #else static inline void drm_debugfs_create_files(const struct drm_info_list *files, int count, struct dentry *root, @@ -141,7 +163,8 @@ static inline void drm_debugfs_create_files(const struct drm_info_list *files, {} static inline int drm_debugfs_remove_files(const struct drm_info_list *files, - int count, struct drm_minor *minor) + int count, struct dentry *root, + struct drm_minor *minor) { return 0; } @@ -155,6 +178,20 @@ static inline void drm_debugfs_add_files(struct drm_device *dev, const struct drm_debugfs_info *files, int count) {} + +static inline int drm_debugfs_gpuva_info(struct seq_file *m, + struct drm_gpuvm *gpuvm) +{ + return 0; +} + +static inline void drm_debugfs_clients_add(struct drm_file *file) +{ +} + +static inline void drm_debugfs_clients_remove(struct drm_file *file) +{ +} #endif #endif /* _DRM_DEBUGFS_H_ */ diff --git a/include/drm/drm_debugfs_crc.h b/include/drm/drm_debugfs_crc.h index b225eeb30d05..1b4c98c2f838 100644 --- a/include/drm/drm_debugfs_crc.h +++ b/include/drm/drm_debugfs_crc.h @@ -22,13 +22,19 @@ #ifndef __DRM_DEBUGFS_CRC_H__ #define __DRM_DEBUGFS_CRC_H__ +#include <linux/spinlock_types.h> +#include <linux/types.h> +#include <linux/wait.h> + +struct drm_crtc; + #define DRM_MAX_CRC_NR 10 /** * struct drm_crtc_crc_entry - entry describing a frame's content * @has_frame_counter: whether the source was able to provide a frame number * @frame: number of the frame this CRC is about, if @has_frame_counter is true - * @crc: array of values that characterize the frame + * @crcs: array of values that characterize the frame */ struct drm_crtc_crc_entry { bool has_frame_counter; diff --git a/include/drm/drm_device.h b/include/drm/drm_device.h index 7cf4afae2e79..5af49c5c3778 100644 --- a/include/drm/drm_device.h +++ b/include/drm/drm_device.h @@ -5,8 +5,8 @@ #include <linux/kref.h> #include <linux/mutex.h> #include <linux/idr.h> +#include <linux/sched.h> -#include <drm/drm_legacy.h> #include <drm/drm_mode_config.h> struct drm_driver; @@ -22,6 +22,28 @@ struct inode; struct pci_dev; struct pci_controller; +/* + * Recovery methods for wedged device in order of less to more side-effects. + * To be used with drm_dev_wedged_event() as recovery @method. Callers can + * use any one, multiple (or'd) or none depending on their needs. + * + * Refer to "Device Wedging" chapter in Documentation/gpu/drm-uapi.rst for more + * details. + */ +#define DRM_WEDGE_RECOVERY_NONE BIT(0) /* optional telemetry collection */ +#define DRM_WEDGE_RECOVERY_REBIND BIT(1) /* unbind + bind driver */ +#define DRM_WEDGE_RECOVERY_BUS_RESET BIT(2) /* unbind + reset bus device + bind */ +#define DRM_WEDGE_RECOVERY_VENDOR BIT(3) /* vendor specific recovery method */ + +/** + * struct drm_wedge_task_info - information about the guilty task of a wedge dev + */ +struct drm_wedge_task_info { + /** @pid: pid of the task */ + pid_t pid; + /** @comm: command name of the task */ + char comm[TASK_COMM_LEN]; +}; /** * enum switch_power_state - power state of drm device @@ -58,6 +80,28 @@ struct drm_device { struct device *dev; /** + * @dma_dev: + * + * Device for DMA operations. Only required if the device @dev + * cannot perform DMA by itself. Should be NULL otherwise. Call + * drm_dev_dma_dev() to get the DMA device instead of using this + * field directly. Call drm_dev_set_dma_dev() to set this field. + * + * DRM devices are sometimes bound to virtual devices that cannot + * perform DMA by themselves. Drivers should set this field to the + * respective DMA controller. + * + * Devices on USB and other peripheral busses also cannot perform + * DMA by themselves. The @dma_dev field should point the bus + * controller that does DMA on behalve of such a device. Required + * for importing buffers via dma-buf. + * + * If set, the DRM core automatically releases the reference on the + * device. + */ + struct device *dma_dev; + + /** * @managed: * * Managed resources linked to the lifetime of this &drm_device as @@ -149,16 +193,6 @@ struct drm_device { char *unique; /** - * @struct_mutex: - * - * Lock for others (not &drm_minor.master and &drm_file.is_master) - * - * WARNING: - * Only drivers annotated with DRIVER_LEGACY should be using this. - */ - struct mutex struct_mutex; - - /** * @master_mutex: * * Lock for &drm_minor.master and &drm_file.is_master @@ -205,6 +239,14 @@ struct drm_device { struct list_head clientlist; /** + * @client_sysrq_list: + * + * Entry into list of devices registered for sysrq. Allows in-kernel + * clients on this device to handle sysrq keys. + */ + struct list_head client_sysrq_list; + + /** * @vblank_disable_immediate: * * If true, vblank interrupt will be disabled immediately when the @@ -214,8 +256,9 @@ struct drm_device { * This can be set to true it the hardware has a working vblank counter * with high-precision timestamping (otherwise there are races) and the * driver uses drm_crtc_vblank_on() and drm_crtc_vblank_off() - * appropriately. See also @max_vblank_count and - * &drm_crtc_funcs.get_vblank_counter. + * appropriately. Also, see @max_vblank_count, + * &drm_crtc_funcs.get_vblank_counter and + * &drm_vblank_crtc_config.disable_immediate. */ bool vblank_disable_immediate; @@ -312,85 +355,30 @@ struct drm_device { struct drm_fb_helper *fb_helper; /** - * @debugfs_mutex: - * - * Protects &debugfs_list access. - */ - struct mutex debugfs_mutex; - - /** - * @debugfs_list: + * @debugfs_root: * - * List of debugfs files to be created by the DRM device. The files - * must be added during drm_dev_register(). + * Root directory for debugfs files. */ - struct list_head debugfs_list; - - /* Everything below here is for legacy driver, never use! */ - /* private: */ -#if IS_ENABLED(CONFIG_DRM_LEGACY) - /* List of devices per driver for stealth attach cleanup */ - struct list_head legacy_dev_list; - -#ifdef __alpha__ - /** @hose: PCI hose, only used on ALPHA platforms. */ - struct pci_controller *hose; -#endif - - /* AGP data */ - struct drm_agp_head *agp; - - /* Context handle management - linked list of context handles */ - struct list_head ctxlist; - - /* Context handle management - mutex for &ctxlist */ - struct mutex ctxlist_mutex; - - /* Context handle management */ - struct idr ctx_idr; - - /* Memory management - linked list of regions */ - struct list_head maplist; - - /* Memory management - user token hash table for maps */ - struct drm_open_hash map_hash; - - /* Context handle management - list of vmas (for debugging) */ - struct list_head vmalist; - - /* Optional pointer for DMA support */ - struct drm_device_dma *dma; - - /* Context swapping flag */ - __volatile__ long context_flag; - - /* Last current context */ - int last_context; - - /* Lock for &buf_use and a few other things. */ - spinlock_t buf_lock; - - /* Usage counter for buffers in use -- cannot alloc */ - int buf_use; - - /* Buffer allocation in progress */ - atomic_t buf_alloc; - - struct { - int context; - struct drm_hw_lock *lock; - } sigdata; - - struct drm_local_map *agp_buffer_map; - unsigned int agp_buffer_token; + struct dentry *debugfs_root; +}; - /* Scatter gather memory */ - struct drm_sg_mem *sg; +void drm_dev_set_dma_dev(struct drm_device *dev, struct device *dma_dev); - /* IRQs */ - bool irq_enabled; - int irq; -#endif -}; +/** + * drm_dev_dma_dev - returns the DMA device for a DRM device + * @dev: DRM device + * + * Returns the DMA device of the given DRM device. By default, this + * the DRM device's parent. See drm_dev_set_dma_dev(). + * + * Returns: + * A DMA-capable device for the DRM device. + */ +static inline struct device *drm_dev_dma_dev(struct drm_device *dev) +{ + if (dev->dma_dev) + return dev->dma_dev; + return dev->dev; +} #endif diff --git a/include/drm/drm_displayid.h b/include/drm/drm_displayid.h deleted file mode 100644 index 49649eb8447e..000000000000 --- a/include/drm/drm_displayid.h +++ /dev/null @@ -1,160 +0,0 @@ -/* - * Copyright © 2014 Red Hat Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - */ -#ifndef DRM_DISPLAYID_H -#define DRM_DISPLAYID_H - -#include <linux/types.h> -#include <linux/bits.h> - -struct drm_edid; - -#define VESA_IEEE_OUI 0x3a0292 - -/* DisplayID Structure versions */ -#define DISPLAY_ID_STRUCTURE_VER_12 0x12 -#define DISPLAY_ID_STRUCTURE_VER_20 0x20 - -/* DisplayID Structure v1r2 Data Blocks */ -#define DATA_BLOCK_PRODUCT_ID 0x00 -#define DATA_BLOCK_DISPLAY_PARAMETERS 0x01 -#define DATA_BLOCK_COLOR_CHARACTERISTICS 0x02 -#define DATA_BLOCK_TYPE_1_DETAILED_TIMING 0x03 -#define DATA_BLOCK_TYPE_2_DETAILED_TIMING 0x04 -#define DATA_BLOCK_TYPE_3_SHORT_TIMING 0x05 -#define DATA_BLOCK_TYPE_4_DMT_TIMING 0x06 -#define DATA_BLOCK_VESA_TIMING 0x07 -#define DATA_BLOCK_CEA_TIMING 0x08 -#define DATA_BLOCK_VIDEO_TIMING_RANGE 0x09 -#define DATA_BLOCK_PRODUCT_SERIAL_NUMBER 0x0a -#define DATA_BLOCK_GP_ASCII_STRING 0x0b -#define DATA_BLOCK_DISPLAY_DEVICE_DATA 0x0c -#define DATA_BLOCK_INTERFACE_POWER_SEQUENCING 0x0d -#define DATA_BLOCK_TRANSFER_CHARACTERISTICS 0x0e -#define DATA_BLOCK_DISPLAY_INTERFACE 0x0f -#define DATA_BLOCK_STEREO_DISPLAY_INTERFACE 0x10 -#define DATA_BLOCK_TILED_DISPLAY 0x12 -#define DATA_BLOCK_VENDOR_SPECIFIC 0x7f -#define DATA_BLOCK_CTA 0x81 - -/* DisplayID Structure v2r0 Data Blocks */ -#define DATA_BLOCK_2_PRODUCT_ID 0x20 -#define DATA_BLOCK_2_DISPLAY_PARAMETERS 0x21 -#define DATA_BLOCK_2_TYPE_7_DETAILED_TIMING 0x22 -#define DATA_BLOCK_2_TYPE_8_ENUMERATED_TIMING 0x23 -#define DATA_BLOCK_2_TYPE_9_FORMULA_TIMING 0x24 -#define DATA_BLOCK_2_DYNAMIC_VIDEO_TIMING 0x25 -#define DATA_BLOCK_2_DISPLAY_INTERFACE_FEATURES 0x26 -#define DATA_BLOCK_2_STEREO_DISPLAY_INTERFACE 0x27 -#define DATA_BLOCK_2_TILED_DISPLAY_TOPOLOGY 0x28 -#define DATA_BLOCK_2_CONTAINER_ID 0x29 -#define DATA_BLOCK_2_VENDOR_SPECIFIC 0x7e -#define DATA_BLOCK_2_CTA_DISPLAY_ID 0x81 - -/* DisplayID Structure v1r2 Product Type */ -#define PRODUCT_TYPE_EXTENSION 0 -#define PRODUCT_TYPE_TEST 1 -#define PRODUCT_TYPE_PANEL 2 -#define PRODUCT_TYPE_MONITOR 3 -#define PRODUCT_TYPE_TV 4 -#define PRODUCT_TYPE_REPEATER 5 -#define PRODUCT_TYPE_DIRECT_DRIVE 6 - -/* DisplayID Structure v2r0 Display Product Primary Use Case (~Product Type) */ -#define PRIMARY_USE_EXTENSION 0 -#define PRIMARY_USE_TEST 1 -#define PRIMARY_USE_GENERIC 2 -#define PRIMARY_USE_TV 3 -#define PRIMARY_USE_DESKTOP_PRODUCTIVITY 4 -#define PRIMARY_USE_DESKTOP_GAMING 5 -#define PRIMARY_USE_PRESENTATION 6 -#define PRIMARY_USE_HEAD_MOUNTED_VR 7 -#define PRIMARY_USE_HEAD_MOUNTED_AR 8 - -struct displayid_header { - u8 rev; - u8 bytes; - u8 prod_id; - u8 ext_count; -} __packed; - -struct displayid_block { - u8 tag; - u8 rev; - u8 num_bytes; -} __packed; - -struct displayid_tiled_block { - struct displayid_block base; - u8 tile_cap; - u8 topo[3]; - u8 tile_size[4]; - u8 tile_pixel_bezel[5]; - u8 topology_id[8]; -} __packed; - -struct displayid_detailed_timings_1 { - u8 pixel_clock[3]; - u8 flags; - u8 hactive[2]; - u8 hblank[2]; - u8 hsync[2]; - u8 hsw[2]; - u8 vactive[2]; - u8 vblank[2]; - u8 vsync[2]; - u8 vsw[2]; -} __packed; - -struct displayid_detailed_timing_block { - struct displayid_block base; - struct displayid_detailed_timings_1 timings[]; -}; - -#define DISPLAYID_VESA_MSO_OVERLAP GENMASK(3, 0) -#define DISPLAYID_VESA_MSO_MODE GENMASK(6, 5) - -struct displayid_vesa_vendor_specific_block { - struct displayid_block base; - u8 oui[3]; - u8 data_structure_type; - u8 mso; -} __packed; - -/* DisplayID iteration */ -struct displayid_iter { - const struct drm_edid *drm_edid; - - const u8 *section; - int length; - int idx; - int ext_index; -}; - -void displayid_iter_edid_begin(const struct drm_edid *drm_edid, - struct displayid_iter *iter); -const struct displayid_block * -__displayid_iter_next(struct displayid_iter *iter); -#define displayid_iter_for_each(__block, __iter) \ - while (((__block) = __displayid_iter_next(__iter))) -void displayid_iter_end(struct displayid_iter *iter); - -#endif diff --git a/include/drm/drm_drv.h b/include/drm/drm_drv.h index 1d76d0686b03..42fc085f986d 100644 --- a/include/drm/drm_drv.h +++ b/include/drm/drm_drv.h @@ -34,6 +34,9 @@ #include <drm/drm_device.h> +struct dmem_cgroup_region; +struct drm_fb_helper; +struct drm_fb_helper_surface_size; struct drm_file; struct drm_gem_object; struct drm_master; @@ -104,6 +107,21 @@ enum drm_driver_feature { * acceleration should be handled by two drivers that are connected using auxiliary bus. */ DRIVER_COMPUTE_ACCEL = BIT(7), + /** + * @DRIVER_GEM_GPUVA: + * + * Driver supports user defined GPU VA bindings for GEM objects. + */ + DRIVER_GEM_GPUVA = BIT(8), + /** + * @DRIVER_CURSOR_HOTSPOT: + * + * Driver supports and requires cursor hotspot information in the + * cursor plane (e.g. cursor plane has to actually track the mouse + * cursor and the clients are required to set hotspot in order for + * the cursor planes to work correctly). + */ + DRIVER_CURSOR_HOTSPOT = BIT(9), /* IMPORTANT: Below are all the legacy flags, add new ones above. */ @@ -214,34 +232,6 @@ struct drm_driver { void (*postclose) (struct drm_device *, struct drm_file *); /** - * @lastclose: - * - * Called when the last &struct drm_file has been closed and there's - * currently no userspace client for the &struct drm_device. - * - * Modern drivers should only use this to force-restore the fbdev - * framebuffer using drm_fb_helper_restore_fbdev_mode_unlocked(). - * Anything else would indicate there's something seriously wrong. - * Modern drivers can also use this to execute delayed power switching - * state changes, e.g. in conjunction with the :ref:`vga_switcheroo` - * infrastructure. - * - * This is called after @postclose hook has been called. - * - * NOTE: - * - * All legacy drivers use this callback to de-initialize the hardware. - * This is purely because of the shadow-attach model, where the DRM - * kernel driver does not really own the hardware. Instead ownershipe is - * handled with the help of userspace through an inheritedly racy dance - * to set/unset the VT into raw mode. - * - * Legacy drivers initialize the hardware in the @firstopen callback, - * which isn't even called for modern drivers. - */ - void (*lastclose) (struct drm_device *); - - /** * @unload: * * Reverse the effects of the driver load callback. Ideally, @@ -304,22 +294,14 @@ struct drm_driver { /** * @prime_handle_to_fd: * - * Main PRIME export function. Should be implemented with - * drm_gem_prime_handle_to_fd() for GEM based drivers. - * - * For an in-depth discussion see :ref:`PRIME buffer sharing - * documentation <prime_buffer_sharing>`. + * PRIME export function. Only used by vmwgfx. */ int (*prime_handle_to_fd)(struct drm_device *dev, struct drm_file *file_priv, uint32_t handle, uint32_t flags, int *prime_fd); /** * @prime_fd_to_handle: * - * Main PRIME import function. Should be implemented with - * drm_gem_prime_fd_to_handle() for GEM based drivers. - * - * For an in-depth discussion see :ref:`PRIME buffer sharing - * documentation <prime_buffer_sharing>`. + * PRIME import function. Only used by vmwgfx. */ int (*prime_fd_to_handle)(struct drm_device *dev, struct drm_file *file_priv, int prime_fd, uint32_t *handle); @@ -343,20 +325,6 @@ struct drm_driver { struct drm_device *dev, struct dma_buf_attachment *attach, struct sg_table *sgt); - /** - * @gem_prime_mmap: - * - * mmap hook for GEM drivers, used to implement dma-buf mmap in the - * PRIME helpers. - * - * This hook only exists for historical reasons. Drivers must use - * drm_gem_prime_mmap() to implement it. - * - * FIXME: Convert all drivers to implement mmap in struct - * &drm_gem_object_funcs and inline drm_gem_prime_mmap() into - * its callers. This hook should be removed afterwards. - */ - int (*gem_prime_mmap)(struct drm_gem_object *obj, struct vm_area_struct *vma); /** * @dumb_create: @@ -400,25 +368,29 @@ struct drm_driver { int (*dumb_map_offset)(struct drm_file *file_priv, struct drm_device *dev, uint32_t handle, uint64_t *offset); + /** - * @dumb_destroy: - * - * This destroys the userspace handle for the given dumb backing storage buffer. - * Since buffer objects must be reference counted in the kernel a buffer object - * won't be immediately freed if a framebuffer modeset object still uses it. + * @fbdev_probe: * - * Called by the user via ioctl. + * Allocates and initialize the fb_info structure for fbdev emulation. + * Furthermore it also needs to allocate the DRM framebuffer used to + * back the fbdev. * - * The default implementation is drm_gem_dumb_destroy(). GEM based drivers - * must not overwrite this. + * This callback is mandatory for fbdev support. * * Returns: * - * Zero on success, negative errno on failure. + * 0 on success ot a negative error code otherwise. + */ + int (*fbdev_probe)(struct drm_fb_helper *fbdev_helper, + struct drm_fb_helper_surface_size *sizes); + + /** + * @show_fdinfo: + * + * Print device specific fdinfo. See Documentation/gpu/drm-usage-stats.rst. */ - int (*dumb_destroy)(struct drm_file *file_priv, - struct drm_device *dev, - uint32_t handle); + void (*show_fdinfo)(struct drm_printer *p, struct drm_file *f); /** @major: driver major number */ int major; @@ -430,8 +402,6 @@ struct drm_driver { char *name; /** @desc: driver description */ char *desc; - /** @date: driver date */ - char *date; /** * @driver_features: @@ -461,31 +431,16 @@ struct drm_driver { * some examples. */ const struct file_operations *fops; - -#ifdef CONFIG_DRM_LEGACY - /* Everything below here is for legacy driver, never use! */ - /* private: */ - - int (*firstopen) (struct drm_device *); - void (*preclose) (struct drm_device *, struct drm_file *file_priv); - int (*dma_ioctl) (struct drm_device *dev, void *data, struct drm_file *file_priv); - int (*dma_quiescent) (struct drm_device *); - int (*context_dtor) (struct drm_device *dev, int context); - irqreturn_t (*irq_handler)(int irq, void *arg); - void (*irq_preinstall)(struct drm_device *dev); - int (*irq_postinstall)(struct drm_device *dev); - void (*irq_uninstall)(struct drm_device *dev); - u32 (*get_vblank_counter)(struct drm_device *dev, unsigned int pipe); - int (*enable_vblank)(struct drm_device *dev, unsigned int pipe); - void (*disable_vblank)(struct drm_device *dev, unsigned int pipe); - int dev_priv_size; -#endif }; void *__devm_drm_dev_alloc(struct device *parent, const struct drm_driver *driver, size_t size, size_t offset); +struct dmem_cgroup_region * +drmm_cgroup_register_region(struct drm_device *dev, + const char *region_name, u64 size); + /** * devm_drm_dev_alloc - Resource managed allocation of a &drm_device instance * @parent: Parent device object @@ -518,6 +473,11 @@ void *__devm_drm_dev_alloc(struct device *parent, struct drm_device *drm_dev_alloc(const struct drm_driver *driver, struct device *parent); + +void *__drm_dev_alloc(struct device *parent, + const struct drm_driver *driver, + size_t size, size_t offset); + int drm_dev_register(struct drm_device *dev, unsigned long flags); void drm_dev_unregister(struct drm_device *dev); @@ -527,6 +487,8 @@ void drm_put_dev(struct drm_device *dev); bool drm_dev_enter(struct drm_device *dev, int *idx); void drm_dev_exit(int idx); void drm_dev_unplug(struct drm_device *dev); +int drm_dev_wedged_event(struct drm_device *dev, unsigned long method, + struct drm_wedge_task_info *info); /** * drm_dev_is_unplugged - is a DRM device unplugged @@ -603,12 +565,33 @@ static inline bool drm_drv_uses_atomic_modeset(struct drm_device *dev) } -int drm_dev_set_unique(struct drm_device *dev, const char *name); - /* TODO: Inline drm_firmware_drivers_only() in all its callers. */ static inline bool drm_firmware_drivers_only(void) { return video_firmware_drivers_only(); } +#if defined(CONFIG_DEBUG_FS) +void drm_debugfs_dev_init(struct drm_device *dev); +void drm_debugfs_init_root(void); +void drm_debugfs_remove_root(void); +void drm_debugfs_bridge_params(void); +#else +static inline void drm_debugfs_dev_init(struct drm_device *dev) +{ +} + +static inline void drm_debugfs_init_root(void) +{ +} + +static inline void drm_debugfs_remove_root(void) +{ +} + +static inline void drm_debugfs_bridge_params(void) +{ +} +#endif + #endif diff --git a/include/drm/drm_dumb_buffers.h b/include/drm/drm_dumb_buffers.h new file mode 100644 index 000000000000..1f3a8236fb3d --- /dev/null +++ b/include/drm/drm_dumb_buffers.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: MIT */ + +#ifndef __DRM_DUMB_BUFFERS_H__ +#define __DRM_DUMB_BUFFERS_H__ + +struct drm_device; +struct drm_mode_create_dumb; + +int drm_mode_size_dumb(struct drm_device *dev, + struct drm_mode_create_dumb *args, + unsigned long hw_pitch_align, + unsigned long hw_size_align); + +#endif diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h index 70ae6c290bdc..04f7a7f1f108 100644 --- a/include/drm/drm_edid.h +++ b/include/drm/drm_edid.h @@ -24,11 +24,15 @@ #define __DRM_EDID_H__ #include <linux/types.h> -#include <linux/hdmi.h> -#include <drm/drm_mode.h> +enum hdmi_quantization_range; +struct drm_connector; struct drm_device; +struct drm_display_mode; struct drm_edid; +struct drm_printer; +struct hdmi_avi_infoframe; +struct hdmi_vendor_infoframe; struct i2c_adapter; #define EDID_LENGTH 128 @@ -46,7 +50,7 @@ struct est_timings { u8 t1; u8 t2; u8 mfg_rsvd; -} __attribute__((packed)); +} __packed; /* 00=16:10, 01=4:3, 10=5:4, 11=16:9 */ #define EDID_TIMING_ASPECT_SHIFT 6 @@ -59,7 +63,7 @@ struct est_timings { struct std_timing { u8 hsize; /* need to multiply by 8 then add 248 */ u8 vfreq_aspect; -} __attribute__((packed)); +} __packed; #define DRM_EDID_PT_HSYNC_POSITIVE (1 << 1) #define DRM_EDID_PT_VSYNC_POSITIVE (1 << 2) @@ -85,12 +89,12 @@ struct detailed_pixel_timing { u8 hborder; u8 vborder; u8 misc; -} __attribute__((packed)); +} __packed; /* If it's not pixel timing, it'll be one of the below */ struct detailed_data_string { u8 str[13]; -} __attribute__((packed)); +} __packed; #define DRM_EDID_RANGE_OFFSET_MIN_VFREQ (1 << 0) /* 1.4 */ #define DRM_EDID_RANGE_OFFSET_MAX_VFREQ (1 << 1) /* 1.4 */ @@ -105,6 +109,13 @@ struct detailed_data_string { #define DRM_EDID_CVT_FLAGS_STANDARD_BLANKING (1 << 3) #define DRM_EDID_CVT_FLAGS_REDUCED_BLANKING (1 << 4) +enum drm_edid_quirk { + /* Do a dummy read before DPCD accesses, to prevent corruption. */ + DRM_EDID_QUIRK_DP_DPCD_PROBE, + + DRM_EDID_QUIRK_NUM, +}; + struct detailed_data_monitor_range { u8 min_vfreq; u8 max_vfreq; @@ -120,7 +131,7 @@ struct detailed_data_monitor_range { __le16 m; u8 k; u8 j; /* need to divide by 2 */ - } __attribute__((packed)) gtf2; + } __packed gtf2; struct { u8 version; u8 data1; /* high 6 bits: extra clock resolution */ @@ -129,27 +140,27 @@ struct detailed_data_monitor_range { u8 flags; /* preferred aspect and blanking support */ u8 supported_scalings; u8 preferred_refresh; - } __attribute__((packed)) cvt; - } __attribute__((packed)) formula; -} __attribute__((packed)); + } __packed cvt; + } __packed formula; +} __packed; struct detailed_data_wpindex { u8 white_yx_lo; /* Lower 2 bits each */ u8 white_x_hi; u8 white_y_hi; u8 gamma; /* need to divide by 100 then add 1 */ -} __attribute__((packed)); +} __packed; struct detailed_data_color_point { u8 windex1; u8 wpindex1[3]; u8 windex2; u8 wpindex2[3]; -} __attribute__((packed)); +} __packed; struct cvt_timing { u8 code[3]; -} __attribute__((packed)); +} __packed; struct detailed_non_pixel { u8 pad1; @@ -163,8 +174,8 @@ struct detailed_non_pixel { struct detailed_data_wpindex color; struct std_timing timings[6]; struct cvt_timing cvt[4]; - } __attribute__((packed)) data; -} __attribute__((packed)); + } __packed data; +} __packed; #define EDID_DETAIL_EST_TIMINGS 0xf7 #define EDID_DETAIL_CVT_3BYTE 0xf8 @@ -181,8 +192,8 @@ struct detailed_timing { union { struct detailed_pixel_timing pixel_data; struct detailed_non_pixel other_data; - } __attribute__((packed)) data; -} __attribute__((packed)); + } __packed data; +} __packed; #define DRM_EDID_INPUT_SERRATION_VSYNC (1 << 0) #define DRM_EDID_INPUT_SYNC_ON_GREEN (1 << 1) @@ -269,72 +280,27 @@ struct detailed_timing { #define DRM_EDID_DSC_MAX_SLICES 0xf #define DRM_EDID_DSC_TOTAL_CHUNK_KBYTES 0x3f -/* ELD Header Block */ -#define DRM_ELD_HEADER_BLOCK_SIZE 4 - -#define DRM_ELD_VER 0 -# define DRM_ELD_VER_SHIFT 3 -# define DRM_ELD_VER_MASK (0x1f << 3) -# define DRM_ELD_VER_CEA861D (2 << 3) /* supports 861D or below */ -# define DRM_ELD_VER_CANNED (0x1f << 3) - -#define DRM_ELD_BASELINE_ELD_LEN 2 /* in dwords! */ - -/* ELD Baseline Block for ELD_Ver == 2 */ -#define DRM_ELD_CEA_EDID_VER_MNL 4 -# define DRM_ELD_CEA_EDID_VER_SHIFT 5 -# define DRM_ELD_CEA_EDID_VER_MASK (7 << 5) -# define DRM_ELD_CEA_EDID_VER_NONE (0 << 5) -# define DRM_ELD_CEA_EDID_VER_CEA861 (1 << 5) -# define DRM_ELD_CEA_EDID_VER_CEA861A (2 << 5) -# define DRM_ELD_CEA_EDID_VER_CEA861BCD (3 << 5) -# define DRM_ELD_MNL_SHIFT 0 -# define DRM_ELD_MNL_MASK (0x1f << 0) - -#define DRM_ELD_SAD_COUNT_CONN_TYPE 5 -# define DRM_ELD_SAD_COUNT_SHIFT 4 -# define DRM_ELD_SAD_COUNT_MASK (0xf << 4) -# define DRM_ELD_CONN_TYPE_SHIFT 2 -# define DRM_ELD_CONN_TYPE_MASK (3 << 2) -# define DRM_ELD_CONN_TYPE_HDMI (0 << 2) -# define DRM_ELD_CONN_TYPE_DP (1 << 2) -# define DRM_ELD_SUPPORTS_AI (1 << 1) -# define DRM_ELD_SUPPORTS_HDCP (1 << 0) - -#define DRM_ELD_AUD_SYNCH_DELAY 6 /* in units of 2 ms */ -# define DRM_ELD_AUD_SYNCH_DELAY_MAX 0xfa /* 500 ms */ - -#define DRM_ELD_SPEAKER 7 -# define DRM_ELD_SPEAKER_MASK 0x7f -# define DRM_ELD_SPEAKER_RLRC (1 << 6) -# define DRM_ELD_SPEAKER_FLRC (1 << 5) -# define DRM_ELD_SPEAKER_RC (1 << 4) -# define DRM_ELD_SPEAKER_RLR (1 << 3) -# define DRM_ELD_SPEAKER_FC (1 << 2) -# define DRM_ELD_SPEAKER_LFE (1 << 1) -# define DRM_ELD_SPEAKER_FLR (1 << 0) - -#define DRM_ELD_PORT_ID 8 /* offsets 8..15 inclusive */ -# define DRM_ELD_PORT_ID_LEN 8 - -#define DRM_ELD_MANUFACTURER_NAME0 16 -#define DRM_ELD_MANUFACTURER_NAME1 17 - -#define DRM_ELD_PRODUCT_CODE0 18 -#define DRM_ELD_PRODUCT_CODE1 19 - -#define DRM_ELD_MONITOR_NAME_STRING 20 /* offsets 20..(20+mnl-1) inclusive */ - -#define DRM_ELD_CEA_SAD(mnl, sad) (20 + (mnl) + 3 * (sad)) +struct drm_edid_product_id { + __be16 manufacturer_name; + __le16 product_code; + __le32 serial_number; + u8 week_of_manufacture; + u8 year_of_manufacture; +} __packed; struct edid { u8 header[8]; /* Vendor & product info */ - u8 mfg_id[2]; - u8 prod_code[2]; - u32 serial; /* FIXME: byte order */ - u8 mfg_week; - u8 mfg_year; + union { + struct drm_edid_product_id product_id; + struct { + u8 mfg_id[2]; + u8 prod_code[2]; + u32 serial; /* FIXME: byte order */ + u8 mfg_week; + u8 mfg_year; + } __packed; + } __packed; /* EDID version */ u8 version; u8 revision; @@ -365,7 +331,20 @@ struct edid { u8 extensions; /* Checksum */ u8 checksum; -} __attribute__((packed)); +} __packed; + +/* EDID matching */ +struct drm_edid_ident { + /* ID encoded by drm_edid_encode_panel_id() */ + u32 panel_id; + const char *name; +}; + +#define DRM_EDID_IDENT_INIT(_vend_chr_0, _vend_chr_1, _vend_chr_2, _product_id, _name) \ +{ \ + .panel_id = drm_edid_encode_panel_id(_vend_chr_0, _vend_chr_1, _vend_chr_2, _product_id), \ + .name = _name, \ +} #define EDID_PRODUCT_ID(e) ((e)->prod_code[0] | ((e)->prod_code[1] << 8)) @@ -377,23 +356,11 @@ struct cea_sad { u8 byte2; /* meaning depends on format */ }; -struct drm_encoder; -struct drm_connector; -struct drm_connector_state; -struct drm_display_mode; - int drm_edid_to_sad(const struct edid *edid, struct cea_sad **sads); int drm_edid_to_speaker_allocation(const struct edid *edid, u8 **sadb); int drm_av_sync_delay(struct drm_connector *connector, const struct drm_display_mode *mode); -#ifdef CONFIG_DRM_LOAD_EDID_FIRMWARE -int __drm_set_edid_firmware_path(const char *path); -int __drm_get_edid_firmware_path(char *buf, size_t bufsize); -#endif - -bool drm_edid_are_equal(const struct edid *edid1, const struct edid *edid2); - int drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame, const struct drm_connector *connector, @@ -410,96 +377,6 @@ drm_hdmi_avi_infoframe_quant_range(struct hdmi_avi_infoframe *frame, enum hdmi_quantization_range rgb_quant_range); /** - * drm_eld_mnl - Get ELD monitor name length in bytes. - * @eld: pointer to an eld memory structure with mnl set - */ -static inline int drm_eld_mnl(const uint8_t *eld) -{ - return (eld[DRM_ELD_CEA_EDID_VER_MNL] & DRM_ELD_MNL_MASK) >> DRM_ELD_MNL_SHIFT; -} - -/** - * drm_eld_sad - Get ELD SAD structures. - * @eld: pointer to an eld memory structure with sad_count set - */ -static inline const uint8_t *drm_eld_sad(const uint8_t *eld) -{ - unsigned int ver, mnl; - - ver = (eld[DRM_ELD_VER] & DRM_ELD_VER_MASK) >> DRM_ELD_VER_SHIFT; - if (ver != 2 && ver != 31) - return NULL; - - mnl = drm_eld_mnl(eld); - if (mnl > 16) - return NULL; - - return eld + DRM_ELD_CEA_SAD(mnl, 0); -} - -/** - * drm_eld_sad_count - Get ELD SAD count. - * @eld: pointer to an eld memory structure with sad_count set - */ -static inline int drm_eld_sad_count(const uint8_t *eld) -{ - return (eld[DRM_ELD_SAD_COUNT_CONN_TYPE] & DRM_ELD_SAD_COUNT_MASK) >> - DRM_ELD_SAD_COUNT_SHIFT; -} - -/** - * drm_eld_calc_baseline_block_size - Calculate baseline block size in bytes - * @eld: pointer to an eld memory structure with mnl and sad_count set - * - * This is a helper for determining the payload size of the baseline block, in - * bytes, for e.g. setting the Baseline_ELD_Len field in the ELD header block. - */ -static inline int drm_eld_calc_baseline_block_size(const uint8_t *eld) -{ - return DRM_ELD_MONITOR_NAME_STRING - DRM_ELD_HEADER_BLOCK_SIZE + - drm_eld_mnl(eld) + drm_eld_sad_count(eld) * 3; -} - -/** - * drm_eld_size - Get ELD size in bytes - * @eld: pointer to a complete eld memory structure - * - * The returned value does not include the vendor block. It's vendor specific, - * and comprises of the remaining bytes in the ELD memory buffer after - * drm_eld_size() bytes of header and baseline block. - * - * The returned value is guaranteed to be a multiple of 4. - */ -static inline int drm_eld_size(const uint8_t *eld) -{ - return DRM_ELD_HEADER_BLOCK_SIZE + eld[DRM_ELD_BASELINE_ELD_LEN] * 4; -} - -/** - * drm_eld_get_spk_alloc - Get speaker allocation - * @eld: pointer to an ELD memory structure - * - * The returned value is the speakers mask. User has to use %DRM_ELD_SPEAKER - * field definitions to identify speakers. - */ -static inline u8 drm_eld_get_spk_alloc(const uint8_t *eld) -{ - return eld[DRM_ELD_SPEAKER] & DRM_ELD_SPEAKER_MASK; -} - -/** - * drm_eld_get_conn_type - Get device type hdmi/dp connected - * @eld: pointer to an ELD memory structure - * - * The caller need to use %DRM_ELD_CONN_TYPE_HDMI or %DRM_ELD_CONN_TYPE_DP to - * identify the display type connected. - */ -static inline u8 drm_eld_get_conn_type(const uint8_t *eld) -{ - return eld[DRM_ELD_SAD_COUNT_CONN_TYPE] & DRM_ELD_CONN_TYPE_MASK; -} - -/** * drm_edid_decode_mfg_id - Decode the manufacturer ID * @mfg_id: The manufacturer ID * @vend: A 4-byte buffer to store the 3-letter vendor string plus a '\0' @@ -559,13 +436,8 @@ static inline void drm_edid_decode_panel_id(u32 panel_id, char vend[4], u16 *pro } bool drm_probe_ddc(struct i2c_adapter *adapter); -struct edid *drm_do_get_edid(struct drm_connector *connector, - int (*get_edid_block)(void *data, u8 *buf, unsigned int block, - size_t len), - void *data); struct edid *drm_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter); -u32 drm_edid_get_panel_id(struct i2c_adapter *adapter); struct edid *drm_get_edid_switcheroo(struct drm_connector *connector, struct i2c_adapter *adapter); struct edid *drm_edid_duplicate(const struct edid *edid); @@ -578,13 +450,9 @@ bool drm_detect_monitor_audio(const struct edid *edid); enum hdmi_quantization_range drm_default_rgb_quant_range(const struct drm_display_mode *mode); int drm_add_modes_noedid(struct drm_connector *connector, - int hdisplay, int vdisplay); -void drm_set_preferred_mode(struct drm_connector *connector, - int hpref, int vpref); + unsigned int hdisplay, unsigned int vdisplay); int drm_edid_header_is_valid(const void *edid); -bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid, - bool *edid_corrupt); bool drm_edid_is_valid(struct edid *edid); void drm_edid_get_monitor_name(const struct edid *edid, char *name, int buflen); @@ -607,11 +475,20 @@ const struct drm_edid *drm_edid_read_ddc(struct drm_connector *connector, const struct drm_edid *drm_edid_read_custom(struct drm_connector *connector, int (*read_block)(void *context, u8 *buf, unsigned int block, size_t len), void *context); +const struct drm_edid *drm_edid_read_base_block(struct i2c_adapter *adapter); +const struct drm_edid *drm_edid_read_switcheroo(struct drm_connector *connector, + struct i2c_adapter *adapter); int drm_edid_connector_update(struct drm_connector *connector, const struct drm_edid *edid); int drm_edid_connector_add_modes(struct drm_connector *connector); - -const u8 *drm_find_edid_extension(const struct drm_edid *drm_edid, - int ext_id, int *ext_index); +bool drm_edid_is_digital(const struct drm_edid *drm_edid); +void drm_edid_get_product_id(const struct drm_edid *drm_edid, + struct drm_edid_product_id *id); +void drm_edid_print_product_id(struct drm_printer *p, + const struct drm_edid_product_id *id, bool raw); +u32 drm_edid_get_panel_id(const struct drm_edid *drm_edid); +bool drm_edid_match(const struct drm_edid *drm_edid, + const struct drm_edid_ident *ident); +bool drm_edid_has_quirk(struct drm_connector *connector, enum drm_edid_quirk quirk); #endif /* __DRM_EDID_H__ */ diff --git a/include/drm/drm_eld.h b/include/drm/drm_eld.h new file mode 100644 index 000000000000..0a88d10b28b0 --- /dev/null +++ b/include/drm/drm_eld.h @@ -0,0 +1,164 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2023 Intel Corporation + */ + +#ifndef __DRM_ELD_H__ +#define __DRM_ELD_H__ + +#include <linux/types.h> + +struct cea_sad; + +/* ELD Header Block */ +#define DRM_ELD_HEADER_BLOCK_SIZE 4 + +#define DRM_ELD_VER 0 +# define DRM_ELD_VER_SHIFT 3 +# define DRM_ELD_VER_MASK (0x1f << 3) +# define DRM_ELD_VER_CEA861D (2 << 3) /* supports 861D or below */ +# define DRM_ELD_VER_CANNED (0x1f << 3) + +#define DRM_ELD_BASELINE_ELD_LEN 2 /* in dwords! */ + +/* ELD Baseline Block for ELD_Ver == 2 */ +#define DRM_ELD_CEA_EDID_VER_MNL 4 +# define DRM_ELD_CEA_EDID_VER_SHIFT 5 +# define DRM_ELD_CEA_EDID_VER_MASK (7 << 5) +# define DRM_ELD_CEA_EDID_VER_NONE (0 << 5) +# define DRM_ELD_CEA_EDID_VER_CEA861 (1 << 5) +# define DRM_ELD_CEA_EDID_VER_CEA861A (2 << 5) +# define DRM_ELD_CEA_EDID_VER_CEA861BCD (3 << 5) +# define DRM_ELD_MNL_SHIFT 0 +# define DRM_ELD_MNL_MASK (0x1f << 0) + +#define DRM_ELD_SAD_COUNT_CONN_TYPE 5 +# define DRM_ELD_SAD_COUNT_SHIFT 4 +# define DRM_ELD_SAD_COUNT_MASK (0xf << 4) +# define DRM_ELD_CONN_TYPE_SHIFT 2 +# define DRM_ELD_CONN_TYPE_MASK (3 << 2) +# define DRM_ELD_CONN_TYPE_HDMI (0 << 2) +# define DRM_ELD_CONN_TYPE_DP (1 << 2) +# define DRM_ELD_SUPPORTS_AI (1 << 1) +# define DRM_ELD_SUPPORTS_HDCP (1 << 0) + +#define DRM_ELD_AUD_SYNCH_DELAY 6 /* in units of 2 ms */ +# define DRM_ELD_AUD_SYNCH_DELAY_MAX 0xfa /* 500 ms */ + +#define DRM_ELD_SPEAKER 7 +# define DRM_ELD_SPEAKER_MASK 0x7f +# define DRM_ELD_SPEAKER_RLRC (1 << 6) +# define DRM_ELD_SPEAKER_FLRC (1 << 5) +# define DRM_ELD_SPEAKER_RC (1 << 4) +# define DRM_ELD_SPEAKER_RLR (1 << 3) +# define DRM_ELD_SPEAKER_FC (1 << 2) +# define DRM_ELD_SPEAKER_LFE (1 << 1) +# define DRM_ELD_SPEAKER_FLR (1 << 0) + +#define DRM_ELD_PORT_ID 8 /* offsets 8..15 inclusive */ +# define DRM_ELD_PORT_ID_LEN 8 + +#define DRM_ELD_MANUFACTURER_NAME0 16 +#define DRM_ELD_MANUFACTURER_NAME1 17 + +#define DRM_ELD_PRODUCT_CODE0 18 +#define DRM_ELD_PRODUCT_CODE1 19 + +#define DRM_ELD_MONITOR_NAME_STRING 20 /* offsets 20..(20+mnl-1) inclusive */ + +#define DRM_ELD_CEA_SAD(mnl, sad) (20 + (mnl) + 3 * (sad)) + +/** + * drm_eld_mnl - Get ELD monitor name length in bytes. + * @eld: pointer to an eld memory structure with mnl set + */ +static inline int drm_eld_mnl(const u8 *eld) +{ + return (eld[DRM_ELD_CEA_EDID_VER_MNL] & DRM_ELD_MNL_MASK) >> DRM_ELD_MNL_SHIFT; +} + +int drm_eld_sad_get(const u8 *eld, int sad_index, struct cea_sad *cta_sad); +int drm_eld_sad_set(u8 *eld, int sad_index, const struct cea_sad *cta_sad); + +/** + * drm_eld_sad - Get ELD SAD structures. + * @eld: pointer to an eld memory structure with sad_count set + */ +static inline const u8 *drm_eld_sad(const u8 *eld) +{ + unsigned int ver, mnl; + + ver = (eld[DRM_ELD_VER] & DRM_ELD_VER_MASK) >> DRM_ELD_VER_SHIFT; + if (ver != 2 && ver != 31) + return NULL; + + mnl = drm_eld_mnl(eld); + if (mnl > 16) + return NULL; + + return eld + DRM_ELD_CEA_SAD(mnl, 0); +} + +/** + * drm_eld_sad_count - Get ELD SAD count. + * @eld: pointer to an eld memory structure with sad_count set + */ +static inline int drm_eld_sad_count(const u8 *eld) +{ + return (eld[DRM_ELD_SAD_COUNT_CONN_TYPE] & DRM_ELD_SAD_COUNT_MASK) >> + DRM_ELD_SAD_COUNT_SHIFT; +} + +/** + * drm_eld_calc_baseline_block_size - Calculate baseline block size in bytes + * @eld: pointer to an eld memory structure with mnl and sad_count set + * + * This is a helper for determining the payload size of the baseline block, in + * bytes, for e.g. setting the Baseline_ELD_Len field in the ELD header block. + */ +static inline int drm_eld_calc_baseline_block_size(const u8 *eld) +{ + return DRM_ELD_MONITOR_NAME_STRING - DRM_ELD_HEADER_BLOCK_SIZE + + drm_eld_mnl(eld) + drm_eld_sad_count(eld) * 3; +} + +/** + * drm_eld_size - Get ELD size in bytes + * @eld: pointer to a complete eld memory structure + * + * The returned value does not include the vendor block. It's vendor specific, + * and comprises of the remaining bytes in the ELD memory buffer after + * drm_eld_size() bytes of header and baseline block. + * + * The returned value is guaranteed to be a multiple of 4. + */ +static inline int drm_eld_size(const u8 *eld) +{ + return DRM_ELD_HEADER_BLOCK_SIZE + eld[DRM_ELD_BASELINE_ELD_LEN] * 4; +} + +/** + * drm_eld_get_spk_alloc - Get speaker allocation + * @eld: pointer to an ELD memory structure + * + * The returned value is the speakers mask. User has to use %DRM_ELD_SPEAKER + * field definitions to identify speakers. + */ +static inline u8 drm_eld_get_spk_alloc(const u8 *eld) +{ + return eld[DRM_ELD_SPEAKER] & DRM_ELD_SPEAKER_MASK; +} + +/** + * drm_eld_get_conn_type - Get device type hdmi/dp connected + * @eld: pointer to an ELD memory structure + * + * The caller need to use %DRM_ELD_CONN_TYPE_HDMI or %DRM_ELD_CONN_TYPE_DP to + * identify the display type connected. + */ +static inline u8 drm_eld_get_conn_type(const u8 *eld) +{ + return eld[DRM_ELD_SAD_COUNT_CONN_TYPE] & DRM_ELD_CONN_TYPE_MASK; +} + +#endif /* __DRM_ELD_H__ */ diff --git a/include/drm/drm_encoder.h b/include/drm/drm_encoder.h index 3a09682af685..977a9381c8ba 100644 --- a/include/drm/drm_encoder.h +++ b/include/drm/drm_encoder.h @@ -60,7 +60,7 @@ struct drm_encoder_funcs { * @late_register: * * This optional hook can be used to register additional userspace - * interfaces attached to the encoder like debugfs interfaces. + * interfaces attached to the encoder. * It is called late in the driver load sequence from drm_dev_register(). * Everything added from this callback should be unregistered in * the early_unregister callback. @@ -81,6 +81,13 @@ struct drm_encoder_funcs { * before data structures are torndown. */ void (*early_unregister)(struct drm_encoder *encoder); + + /** + * @debugfs_init: + * + * Allows encoders to create encoder-specific debugfs files. + */ + void (*debugfs_init)(struct drm_encoder *encoder, struct dentry *root); }; /** @@ -184,6 +191,13 @@ struct drm_encoder { const struct drm_encoder_funcs *funcs; const struct drm_encoder_helper_funcs *helper_private; + + /** + * @debugfs_entry: + * + * Debugfs directory for this CRTC. + */ + struct dentry *debugfs_entry; }; #define obj_to_encoder(x) container_of(x, struct drm_encoder, base) diff --git a/include/drm/drm_encoder_slave.h b/include/drm/drm_encoder_slave.h deleted file mode 100644 index 7214101fd731..000000000000 --- a/include/drm/drm_encoder_slave.h +++ /dev/null @@ -1,184 +0,0 @@ -/* - * Copyright (C) 2009 Francisco Jerez. - * All Rights Reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sublicense, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial - * portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. - * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE - * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - * - */ - -#ifndef __DRM_ENCODER_SLAVE_H__ -#define __DRM_ENCODER_SLAVE_H__ - -#include <linux/i2c.h> - -#include <drm/drm_crtc.h> -#include <drm/drm_encoder.h> - -/** - * struct drm_encoder_slave_funcs - Entry points exposed by a slave encoder driver - * @set_config: Initialize any encoder-specific modesetting parameters. - * The meaning of the @params parameter is implementation - * dependent. It will usually be a structure with DVO port - * data format settings or timings. It's not required for - * the new parameters to take effect until the next mode - * is set. - * - * Most of its members are analogous to the function pointers in - * &drm_encoder_helper_funcs and they can optionally be used to - * initialize the latter. Connector-like methods (e.g. @get_modes and - * @set_property) will typically be wrapped around and only be called - * if the encoder is the currently selected one for the connector. - */ -struct drm_encoder_slave_funcs { - void (*set_config)(struct drm_encoder *encoder, - void *params); - - void (*destroy)(struct drm_encoder *encoder); - void (*dpms)(struct drm_encoder *encoder, int mode); - void (*save)(struct drm_encoder *encoder); - void (*restore)(struct drm_encoder *encoder); - bool (*mode_fixup)(struct drm_encoder *encoder, - const struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode); - int (*mode_valid)(struct drm_encoder *encoder, - struct drm_display_mode *mode); - void (*mode_set)(struct drm_encoder *encoder, - struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode); - - enum drm_connector_status (*detect)(struct drm_encoder *encoder, - struct drm_connector *connector); - int (*get_modes)(struct drm_encoder *encoder, - struct drm_connector *connector); - int (*create_resources)(struct drm_encoder *encoder, - struct drm_connector *connector); - int (*set_property)(struct drm_encoder *encoder, - struct drm_connector *connector, - struct drm_property *property, - uint64_t val); - -}; - -/** - * struct drm_encoder_slave - Slave encoder struct - * @base: DRM encoder object. - * @slave_funcs: Slave encoder callbacks. - * @slave_priv: Slave encoder private data. - * @bus_priv: Bus specific data. - * - * A &drm_encoder_slave has two sets of callbacks, @slave_funcs and the - * ones in @base. The former are never actually called by the common - * CRTC code, it's just a convenience for splitting the encoder - * functions in an upper, GPU-specific layer and a (hopefully) - * GPU-agnostic lower layer: It's the GPU driver responsibility to - * call the slave methods when appropriate. - * - * drm_i2c_encoder_init() provides a way to get an implementation of - * this. - */ -struct drm_encoder_slave { - struct drm_encoder base; - - const struct drm_encoder_slave_funcs *slave_funcs; - void *slave_priv; - void *bus_priv; -}; -#define to_encoder_slave(x) container_of((x), struct drm_encoder_slave, base) - -int drm_i2c_encoder_init(struct drm_device *dev, - struct drm_encoder_slave *encoder, - struct i2c_adapter *adap, - const struct i2c_board_info *info); - - -/** - * struct drm_i2c_encoder_driver - * - * Describes a device driver for an encoder connected to the GPU - * through an I2C bus. In addition to the entry points in @i2c_driver - * an @encoder_init function should be provided. It will be called to - * give the driver an opportunity to allocate any per-encoder data - * structures and to initialize the @slave_funcs and (optionally) - * @slave_priv members of @encoder. - */ -struct drm_i2c_encoder_driver { - struct i2c_driver i2c_driver; - - int (*encoder_init)(struct i2c_client *client, - struct drm_device *dev, - struct drm_encoder_slave *encoder); - -}; -#define to_drm_i2c_encoder_driver(x) container_of((x), \ - struct drm_i2c_encoder_driver, \ - i2c_driver) - -/** - * drm_i2c_encoder_get_client - Get the I2C client corresponding to an encoder - */ -static inline struct i2c_client *drm_i2c_encoder_get_client(struct drm_encoder *encoder) -{ - return (struct i2c_client *)to_encoder_slave(encoder)->bus_priv; -} - -/** - * drm_i2c_encoder_register - Register an I2C encoder driver - * @owner: Module containing the driver. - * @driver: Driver to be registered. - */ -static inline int drm_i2c_encoder_register(struct module *owner, - struct drm_i2c_encoder_driver *driver) -{ - return i2c_register_driver(owner, &driver->i2c_driver); -} - -/** - * drm_i2c_encoder_unregister - Unregister an I2C encoder driver - * @driver: Driver to be unregistered. - */ -static inline void drm_i2c_encoder_unregister(struct drm_i2c_encoder_driver *driver) -{ - i2c_del_driver(&driver->i2c_driver); -} - -void drm_i2c_encoder_destroy(struct drm_encoder *encoder); - - -/* - * Wrapper fxns which can be plugged in to drm_encoder_helper_funcs: - */ - -void drm_i2c_encoder_dpms(struct drm_encoder *encoder, int mode); -bool drm_i2c_encoder_mode_fixup(struct drm_encoder *encoder, - const struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode); -void drm_i2c_encoder_prepare(struct drm_encoder *encoder); -void drm_i2c_encoder_commit(struct drm_encoder *encoder); -void drm_i2c_encoder_mode_set(struct drm_encoder *encoder, - struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode); -enum drm_connector_status drm_i2c_encoder_detect(struct drm_encoder *encoder, - struct drm_connector *connector); -void drm_i2c_encoder_save(struct drm_encoder *encoder); -void drm_i2c_encoder_restore(struct drm_encoder *encoder); - - -#endif diff --git a/include/drm/drm_exec.h b/include/drm/drm_exec.h new file mode 100644 index 000000000000..aa786b828a0a --- /dev/null +++ b/include/drm/drm_exec.h @@ -0,0 +1,150 @@ +/* SPDX-License-Identifier: GPL-2.0 OR MIT */ + +#ifndef __DRM_EXEC_H__ +#define __DRM_EXEC_H__ + +#include <linux/compiler.h> +#include <linux/ww_mutex.h> + +#define DRM_EXEC_INTERRUPTIBLE_WAIT BIT(0) +#define DRM_EXEC_IGNORE_DUPLICATES BIT(1) + +struct drm_gem_object; + +/** + * struct drm_exec - Execution context + */ +struct drm_exec { + /** + * @flags: Flags to control locking behavior + */ + u32 flags; + + /** + * @ticket: WW ticket used for acquiring locks + */ + struct ww_acquire_ctx ticket; + + /** + * @num_objects: number of objects locked + */ + unsigned int num_objects; + + /** + * @max_objects: maximum objects in array + */ + unsigned int max_objects; + + /** + * @objects: array of the locked objects + */ + struct drm_gem_object **objects; + + /** + * @contended: contended GEM object we backed off for + */ + struct drm_gem_object *contended; + + /** + * @prelocked: already locked GEM object due to contention + */ + struct drm_gem_object *prelocked; +}; + +/** + * drm_exec_obj() - Return the object for a give drm_exec index + * @exec: Pointer to the drm_exec context + * @index: The index. + * + * Return: Pointer to the locked object corresponding to @index if + * index is within the number of locked objects. NULL otherwise. + */ +static inline struct drm_gem_object * +drm_exec_obj(struct drm_exec *exec, unsigned long index) +{ + return index < exec->num_objects ? exec->objects[index] : NULL; +} + +/** + * drm_exec_for_each_locked_object - iterate over all the locked objects + * @exec: drm_exec object + * @index: unsigned long index for the iteration + * @obj: the current GEM object + * + * Iterate over all the locked GEM objects inside the drm_exec object. + */ +#define drm_exec_for_each_locked_object(exec, index, obj) \ + for ((index) = 0; ((obj) = drm_exec_obj(exec, index)); ++(index)) + +/** + * drm_exec_for_each_locked_object_reverse - iterate over all the locked + * objects in reverse locking order + * @exec: drm_exec object + * @index: unsigned long index for the iteration + * @obj: the current GEM object + * + * Iterate over all the locked GEM objects inside the drm_exec object in + * reverse locking order. Note that @index may go below zero and wrap, + * but that will be caught by drm_exec_obj(), returning a NULL object. + */ +#define drm_exec_for_each_locked_object_reverse(exec, index, obj) \ + for ((index) = (exec)->num_objects - 1; \ + ((obj) = drm_exec_obj(exec, index)); --(index)) + +/** + * drm_exec_until_all_locked - loop until all GEM objects are locked + * @exec: drm_exec object + * + * Core functionality of the drm_exec object. Loops until all GEM objects are + * locked and no more contention exists. At the beginning of the loop it is + * guaranteed that no GEM object is locked. + * + * Since labels can't be defined local to the loops body we use a jump pointer + * to make sure that the retry is only used from within the loops body. + */ +#define drm_exec_until_all_locked(exec) \ +__PASTE(__drm_exec_, __LINE__): \ + for (void *__drm_exec_retry_ptr; ({ \ + __drm_exec_retry_ptr = &&__PASTE(__drm_exec_, __LINE__);\ + (void)__drm_exec_retry_ptr; \ + drm_exec_cleanup(exec); \ + });) + +/** + * drm_exec_retry_on_contention - restart the loop to grap all locks + * @exec: drm_exec object + * + * Control flow helper to continue when a contention was detected and we need to + * clean up and re-start the loop to prepare all GEM objects. + */ +#define drm_exec_retry_on_contention(exec) \ + do { \ + if (unlikely(drm_exec_is_contended(exec))) \ + goto *__drm_exec_retry_ptr; \ + } while (0) + +/** + * drm_exec_is_contended - check for contention + * @exec: drm_exec object + * + * Returns true if the drm_exec object has run into some contention while + * locking a GEM object and needs to clean up. + */ +static inline bool drm_exec_is_contended(struct drm_exec *exec) +{ + return !!exec->contended; +} + +void drm_exec_init(struct drm_exec *exec, u32 flags, unsigned nr); +void drm_exec_fini(struct drm_exec *exec); +bool drm_exec_cleanup(struct drm_exec *exec); +int drm_exec_lock_obj(struct drm_exec *exec, struct drm_gem_object *obj); +void drm_exec_unlock_obj(struct drm_exec *exec, struct drm_gem_object *obj); +int drm_exec_prepare_obj(struct drm_exec *exec, struct drm_gem_object *obj, + unsigned int num_fences); +int drm_exec_prepare_array(struct drm_exec *exec, + struct drm_gem_object **objects, + unsigned int num_objects, + unsigned int num_fences); + +#endif diff --git a/include/drm/drm_fb_dma_helper.h b/include/drm/drm_fb_dma_helper.h index d5e036c57801..c950732c6d36 100644 --- a/include/drm/drm_fb_dma_helper.h +++ b/include/drm/drm_fb_dma_helper.h @@ -6,7 +6,9 @@ struct drm_device; struct drm_framebuffer; +struct drm_plane; struct drm_plane_state; +struct drm_scanout_buffer; struct drm_gem_dma_object *drm_fb_dma_get_gem_obj(struct drm_framebuffer *fb, unsigned int plane); @@ -19,5 +21,8 @@ void drm_fb_dma_sync_non_coherent(struct drm_device *drm, struct drm_plane_state *old_state, struct drm_plane_state *state); +int drm_fb_dma_get_scanout_buffer(struct drm_plane *plane, + struct drm_scanout_buffer *sb); + #endif diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h index 013654de3fc5..dd9a18f8de5a 100644 --- a/include/drm/drm_fb_helper.h +++ b/include/drm/drm_fb_helper.h @@ -70,23 +70,6 @@ struct drm_fb_helper_surface_size { */ struct drm_fb_helper_funcs { /** - * @fb_probe: - * - * Driver callback to allocate and initialize the fbdev info structure. - * Furthermore it also needs to allocate the DRM framebuffer used to - * back the fbdev. - * - * This callback is mandatory. - * - * RETURNS: - * - * The driver should return 0 on success and a negative error code on - * failure. - */ - int (*fb_probe)(struct drm_fb_helper *helper, - struct drm_fb_helper_surface_size *sizes); - - /** * @fb_dirty: * * Driver callback to update the framebuffer memory. If set, fbdev @@ -99,6 +82,33 @@ struct drm_fb_helper_funcs { * 0 on success, or an error code otherwise. */ int (*fb_dirty)(struct drm_fb_helper *helper, struct drm_clip_rect *clip); + + /** + * @fb_restore: + * + * Driver callback to restore internal fbdev state. If set, fbdev + * emulation will invoke this callback after restoring the display + * mode. + * + * Only for i915. Do not use in new code. + * + * TODO: Fix i915 to not require this callback. + */ + void (*fb_restore)(struct drm_fb_helper *helper); + + /** + * @fb_set_suspend: + * + * Driver callback to suspend or resume, if set, fbdev emulation will + * invoke this callback during suspend and resume. Driver should call + * fb_set_suspend() from their implementation. If not set, fbdev + * emulation will invoke fb_set_suspend() directly. + * + * Only for i915. Do not use in new code. + * + * TODO: Fix i915 to not require this callback. + */ + void (*fb_set_suspend)(struct drm_fb_helper *helper, bool suspend); }; /** @@ -195,15 +205,6 @@ struct drm_fb_helper { */ int preferred_bpp; - /** - * @hint_leak_smem_start: - * - * Hint to the fbdev emulation to store the framebuffer's physical - * address in struct &fb_info.fix.smem_start. If the hint is unset, - * the smem_start field should always be cleared to zero. - */ - bool hint_leak_smem_start; - #ifdef CONFIG_FB_DEFERRED_IO /** * @fbdefio: @@ -253,39 +254,20 @@ int drm_fb_helper_set_par(struct fb_info *info); int drm_fb_helper_check_var(struct fb_var_screeninfo *var, struct fb_info *info); -int drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper); +int drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper, + bool force); -struct fb_info *drm_fb_helper_alloc_info(struct drm_fb_helper *fb_helper); void drm_fb_helper_unregister_info(struct drm_fb_helper *fb_helper); void drm_fb_helper_fill_info(struct fb_info *info, struct drm_fb_helper *fb_helper, struct drm_fb_helper_surface_size *sizes); -void drm_fb_helper_deferred_io(struct fb_info *info, struct list_head *pagereflist); +void drm_fb_helper_damage_range(struct fb_info *info, off_t off, size_t len); +void drm_fb_helper_damage_area(struct fb_info *info, u32 x, u32 y, u32 width, u32 height); -ssize_t drm_fb_helper_sys_read(struct fb_info *info, char __user *buf, - size_t count, loff_t *ppos); -ssize_t drm_fb_helper_sys_write(struct fb_info *info, const char __user *buf, - size_t count, loff_t *ppos); - -void drm_fb_helper_sys_fillrect(struct fb_info *info, - const struct fb_fillrect *rect); -void drm_fb_helper_sys_copyarea(struct fb_info *info, - const struct fb_copyarea *area); -void drm_fb_helper_sys_imageblit(struct fb_info *info, - const struct fb_image *image); - -ssize_t drm_fb_helper_cfb_read(struct fb_info *info, char __user *buf, - size_t count, loff_t *ppos); -ssize_t drm_fb_helper_cfb_write(struct fb_info *info, const char __user *buf, - size_t count, loff_t *ppos); - -void drm_fb_helper_cfb_fillrect(struct fb_info *info, - const struct fb_fillrect *rect); -void drm_fb_helper_cfb_copyarea(struct fb_info *info, - const struct fb_copyarea *area); -void drm_fb_helper_cfb_imageblit(struct fb_info *info, - const struct fb_image *image); +#ifdef CONFIG_FB_DEFERRED_IO +void drm_fb_helper_deferred_io(struct fb_info *info, struct list_head *pagereflist); +#endif void drm_fb_helper_set_suspend(struct drm_fb_helper *fb_helper, bool suspend); void drm_fb_helper_set_suspend_unlocked(struct drm_fb_helper *fb_helper, @@ -300,9 +282,6 @@ int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper); int drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper); int drm_fb_helper_debug_enter(struct fb_info *info); int drm_fb_helper_debug_leave(struct fb_info *info); - -void drm_fb_helper_lastclose(struct drm_device *dev); -void drm_fb_helper_output_poll_changed(struct drm_device *dev); #else static inline void drm_fb_helper_prepare(struct drm_device *dev, struct drm_fb_helper *helper, @@ -359,12 +338,6 @@ drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper) return 0; } -static inline struct fb_info * -drm_fb_helper_alloc_info(struct drm_fb_helper *fb_helper) -{ - return NULL; -} - static inline void drm_fb_helper_unregister_info(struct drm_fb_helper *fb_helper) { } @@ -388,71 +361,12 @@ static inline int drm_fb_helper_ioctl(struct fb_info *info, unsigned int cmd, return 0; } +#ifdef CONFIG_FB_DEFERRED_IO static inline void drm_fb_helper_deferred_io(struct fb_info *info, struct list_head *pagelist) { } - -static inline int drm_fb_helper_defio_init(struct drm_fb_helper *fb_helper) -{ - return -ENODEV; -} - -static inline ssize_t drm_fb_helper_sys_read(struct fb_info *info, - char __user *buf, size_t count, - loff_t *ppos) -{ - return -ENODEV; -} - -static inline ssize_t drm_fb_helper_sys_write(struct fb_info *info, - const char __user *buf, - size_t count, loff_t *ppos) -{ - return -ENODEV; -} - -static inline void drm_fb_helper_sys_fillrect(struct fb_info *info, - const struct fb_fillrect *rect) -{ -} - -static inline void drm_fb_helper_sys_copyarea(struct fb_info *info, - const struct fb_copyarea *area) -{ -} - -static inline void drm_fb_helper_sys_imageblit(struct fb_info *info, - const struct fb_image *image) -{ -} - -static inline ssize_t drm_fb_helper_cfb_read(struct fb_info *info, char __user *buf, - size_t count, loff_t *ppos) -{ - return -ENODEV; -} - -static inline ssize_t drm_fb_helper_cfb_write(struct fb_info *info, const char __user *buf, - size_t count, loff_t *ppos) -{ - return -ENODEV; -} - -static inline void drm_fb_helper_cfb_fillrect(struct fb_info *info, - const struct fb_fillrect *rect) -{ -} - -static inline void drm_fb_helper_cfb_copyarea(struct fb_info *info, - const struct fb_copyarea *area) -{ -} - -static inline void drm_fb_helper_cfb_imageblit(struct fb_info *info, - const struct fb_image *image) -{ -} +#endif static inline void drm_fb_helper_set_suspend(struct drm_fb_helper *fb_helper, bool suspend) @@ -483,14 +397,6 @@ static inline int drm_fb_helper_debug_leave(struct fb_info *info) { return 0; } - -static inline void drm_fb_helper_lastclose(struct drm_device *dev) -{ -} - -static inline void drm_fb_helper_output_poll_changed(struct drm_device *dev) -{ -} #endif #endif diff --git a/include/drm/drm_fbdev_dma.h b/include/drm/drm_fbdev_dma.h new file mode 100644 index 000000000000..fb3f2a9aa01a --- /dev/null +++ b/include/drm/drm_fbdev_dma.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: MIT */ + +#ifndef DRM_FBDEV_DMA_H +#define DRM_FBDEV_DMA_H + +struct drm_fb_helper; +struct drm_fb_helper_surface_size; + +#ifdef CONFIG_DRM_FBDEV_EMULATION +int drm_fbdev_dma_driver_fbdev_probe(struct drm_fb_helper *fb_helper, + struct drm_fb_helper_surface_size *sizes); + +#define DRM_FBDEV_DMA_DRIVER_OPS \ + .fbdev_probe = drm_fbdev_dma_driver_fbdev_probe +#else +#define DRM_FBDEV_DMA_DRIVER_OPS \ + .fbdev_probe = NULL +#endif + +#endif diff --git a/include/drm/drm_fbdev_generic.h b/include/drm/drm_fbdev_generic.h deleted file mode 100644 index 75799342098d..000000000000 --- a/include/drm/drm_fbdev_generic.h +++ /dev/null @@ -1,15 +0,0 @@ -/* SPDX-License-Identifier: MIT */ - -#ifndef DRM_FBDEV_GENERIC_H -#define DRM_FBDEV_GENERIC_H - -struct drm_device; - -#ifdef CONFIG_DRM_FBDEV_EMULATION -void drm_fbdev_generic_setup(struct drm_device *dev, unsigned int preferred_bpp); -#else -static inline void drm_fbdev_generic_setup(struct drm_device *dev, unsigned int preferred_bpp) -{ } -#endif - -#endif diff --git a/include/drm/drm_fbdev_shmem.h b/include/drm/drm_fbdev_shmem.h new file mode 100644 index 000000000000..2fc708964d75 --- /dev/null +++ b/include/drm/drm_fbdev_shmem.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: MIT */ + +#ifndef DRM_FBDEV_SHMEM_H +#define DRM_FBDEV_SHMEM_H + +struct drm_fb_helper; +struct drm_fb_helper_surface_size; + +#ifdef CONFIG_DRM_FBDEV_EMULATION +int drm_fbdev_shmem_driver_fbdev_probe(struct drm_fb_helper *fb_helper, + struct drm_fb_helper_surface_size *sizes); + +#define DRM_FBDEV_SHMEM_DRIVER_OPS \ + .fbdev_probe = drm_fbdev_shmem_driver_fbdev_probe +#else +#define DRM_FBDEV_SHMEM_DRIVER_OPS \ + .fbdev_probe = NULL +#endif + +#endif diff --git a/include/drm/drm_fbdev_ttm.h b/include/drm/drm_fbdev_ttm.h new file mode 100644 index 000000000000..ad4a10bb4c78 --- /dev/null +++ b/include/drm/drm_fbdev_ttm.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: MIT */ + +#ifndef DRM_FBDEV_TTM_H +#define DRM_FBDEV_TTM_H + +#include <linux/stddef.h> + +struct drm_fb_helper; +struct drm_fb_helper_surface_size; + +#ifdef CONFIG_DRM_FBDEV_EMULATION +int drm_fbdev_ttm_driver_fbdev_probe(struct drm_fb_helper *fb_helper, + struct drm_fb_helper_surface_size *sizes); + +#define DRM_FBDEV_TTM_DRIVER_OPS \ + .fbdev_probe = drm_fbdev_ttm_driver_fbdev_probe +#else +#define DRM_FBDEV_TTM_DRIVER_OPS \ + .fbdev_probe = NULL +#endif + +#endif diff --git a/include/drm/drm_file.h b/include/drm/drm_file.h index 0d1f853092ab..1a3018e4a537 100644 --- a/include/drm/drm_file.h +++ b/include/drm/drm_file.h @@ -41,24 +41,27 @@ struct dma_fence; struct drm_file; struct drm_device; +struct drm_printer; struct device; struct file; +extern struct xarray drm_minors_xa; + /* * FIXME: Not sure we want to have drm_minor here in the end, but to avoid * header include loops we need it here for now. */ -/* Note that the order of this enum is ABI (it determines +/* Note that the values of this enum are ABI (it determines * /dev/dri/renderD* numbers). * * Setting DRM_MINOR_ACCEL to 32 gives enough space for more drm minors to * be implemented before we hit any future */ enum drm_minor_type { - DRM_MINOR_PRIMARY, - DRM_MINOR_CONTROL, - DRM_MINOR_RENDER, + DRM_MINOR_PRIMARY = 0, + DRM_MINOR_CONTROL = 1, + DRM_MINOR_RENDER = 2, DRM_MINOR_ACCEL = 32, }; @@ -78,10 +81,8 @@ struct drm_minor { struct device *kdev; /* Linux device */ struct drm_device *dev; + struct dentry *debugfs_symlink; struct dentry *debugfs_root; - - struct list_head debugfs_list; - struct mutex debugfs_lock; /* Protects debugfs_list. */ }; /** @@ -206,6 +207,13 @@ struct drm_file { bool writeback_connectors; /** + * @plane_color_pipeline: + * + * True if client understands plane color pipelines + */ + bool plane_color_pipeline; + + /** * @was_master: * * This client has or had, master capability. Protected by struct @@ -228,6 +236,18 @@ struct drm_file { bool is_master; /** + * @supports_virtualized_cursor_plane: + * + * This client is capable of handling the cursor plane with the + * restrictions imposed on it by the virtualized drivers. + * + * This implies that the cursor plane has to behave like a cursor + * i.e. track cursor movement. It also requires setting of the + * hotspot properties by the client on the cursor plane. + */ + bool supports_virtualized_cursor_plane; + + /** * @master: * * Master this node is currently associated with. Protected by struct @@ -255,8 +275,18 @@ struct drm_file { /** @master_lookup_lock: Serializes @master. */ spinlock_t master_lookup_lock; - /** @pid: Process that opened this file. */ - struct pid *pid; + /** + * @pid: Process that is using this file. + * + * Must only be dereferenced under a rcu_read_lock or equivalent. + * + * Updates are guarded with dev->filelist_mutex and reference must be + * dropped after a RCU grace period to accommodate lockless readers. + */ + struct pid __rcu *pid; + + /** @client_id: A unique id for fdinfo */ + u64 client_id; /** @magic: Authentication magic, see @authenticated. */ drm_magic_t magic; @@ -277,6 +307,9 @@ struct drm_file { * * Mapping of mm object handles to object pointers. Used by the GEM * subsystem. Protected by @table_lock. + * + * Note that allocated entries might be NULL as a transient state when + * creating or deleting a handle. */ struct idr object_idr; @@ -366,10 +399,24 @@ struct drm_file { */ struct drm_prime_file_private prime; - /* private: */ -#if IS_ENABLED(CONFIG_DRM_LEGACY) - unsigned long lock_count; /* DRI1 legacy lock count */ -#endif + /** + * @client_name: + * + * Userspace-provided name; useful for accounting and debugging. + */ + const char *client_name; + + /** + * @client_name_lock: Protects @client_name. + */ + struct mutex client_name_lock; + + /** + * @debugfs_client: + * + * debugfs directory for each client under a drm node. + */ + struct dentry *debugfs_client; }; /** @@ -408,13 +455,22 @@ static inline bool drm_is_render_client(const struct drm_file *file_priv) * Returns true if this is an open file of the compute acceleration node, i.e. * &drm_file.minor of @file_priv is a accel minor. * - * See also the :ref:`section on accel nodes <drm_accel_node>`. + * See also :doc:`Introduction to compute accelerators subsystem + * </accel/introduction>`. */ static inline bool drm_is_accel_client(const struct drm_file *file_priv) { return file_priv->minor->type == DRM_MINOR_ACCEL; } +__printf(2, 3) +void drm_file_err(struct drm_file *file_priv, const char *fmt, ...); + +void drm_file_update_pid(struct drm_file *); + +struct drm_minor *drm_minor_acquire(struct xarray *minors_xa, unsigned int minor_id); +void drm_minor_release(struct drm_minor *minor); + int drm_open(struct inode *inode, struct file *filp); int drm_open_helper(struct file *filp, struct drm_minor *minor); ssize_t drm_read(struct file *filp, char __user *buffer, @@ -438,6 +494,40 @@ void drm_send_event_timestamp_locked(struct drm_device *dev, struct drm_pending_event *e, ktime_t timestamp); +/** + * struct drm_memory_stats - GEM object stats associated + * @shared: Total size of GEM objects shared between processes + * @private: Total size of GEM objects + * @resident: Total size of GEM objects backing pages + * @purgeable: Total size of GEM objects that can be purged (resident and not active) + * @active: Total size of GEM objects active on one or more engines + * + * Used by drm_print_memory_stats() + */ +struct drm_memory_stats { + u64 shared; + u64 private; + u64 resident; + u64 purgeable; + u64 active; +}; + +enum drm_gem_object_status; + +int drm_memory_stats_is_zero(const struct drm_memory_stats *stats); +void drm_fdinfo_print_size(struct drm_printer *p, + const char *prefix, + const char *stat, + const char *region, + u64 sz); +void drm_print_memory_stats(struct drm_printer *p, + const struct drm_memory_stats *stats, + enum drm_gem_object_status supported_status, + const char *region); + +void drm_show_memory_stats(struct drm_printer *p, struct drm_file *file); +void drm_show_fdinfo(struct seq_file *m, struct file *f); + struct file *mock_drm_getfile(struct drm_minor *minor, unsigned int flags); #endif /* _DRM_FILE_H_ */ diff --git a/include/drm/drm_fixed.h b/include/drm/drm_fixed.h index 255645c1f9a8..33de514a5221 100644 --- a/include/drm/drm_fixed.h +++ b/include/drm/drm_fixed.h @@ -25,8 +25,9 @@ #ifndef DRM_FIXED_H #define DRM_FIXED_H -#include <linux/kernel.h> #include <linux/math64.h> +#include <linux/types.h> +#include <linux/wordpart.h> typedef union dfixed { u32 full; @@ -77,6 +78,23 @@ static inline u32 dfixed_div(fixed20_12 A, fixed20_12 B) #define DRM_FIXED_EPSILON 1LL #define DRM_FIXED_ALMOST_ONE (DRM_FIXED_ONE - DRM_FIXED_EPSILON) +/** + * @drm_sm2fixp + * + * Convert a 1.31.32 signed-magnitude fixed point to 32.32 + * 2s-complement fixed point + * + * @return s64 2s-complement fixed point + */ +static inline s64 drm_sm2fixp(__u64 a) +{ + if ((a & (1LL << 63))) { + return -(a & 0x7fffffffffffffffll); + } else { + return a; + } +} + static inline s64 drm_int2fixp(int a) { return ((s64)a) << DRM_FIXED_POINT; @@ -87,9 +105,14 @@ static inline int drm_fixp2int(s64 a) return ((s64)a) >> DRM_FIXED_POINT; } +static inline int drm_fixp2int_round(s64 a) +{ + return drm_fixp2int(a + DRM_FIXED_ONE / 2); +} + static inline int drm_fixp2int_ceil(s64 a) { - if (a > 0) + if (a >= 0) return drm_fixp2int(a + DRM_FIXED_ALMOST_ONE); else return drm_fixp2int(a - DRM_FIXED_ALMOST_ONE); @@ -209,4 +232,27 @@ static inline s64 drm_fixp_exp(s64 x) return sum; } +static inline int fxp_q4_from_int(int val_int) +{ + return val_int << 4; +} + +static inline int fxp_q4_to_int(int val_q4) +{ + return val_q4 >> 4; +} + +static inline int fxp_q4_to_int_roundup(int val_q4) +{ + return (val_q4 + 0xf) >> 4; +} + +static inline int fxp_q4_to_frac(int val_q4) +{ + return val_q4 & 0xf; +} + +#define FXP_Q4_FMT "%d.%04d" +#define FXP_Q4_ARGS(val_q4) fxp_q4_to_int(val_q4), (fxp_q4_to_frac(val_q4) * 625) + #endif diff --git a/include/drm/drm_flip_work.h b/include/drm/drm_flip_work.h index 21c3d512d25c..1eef3283a109 100644 --- a/include/drm/drm_flip_work.h +++ b/include/drm/drm_flip_work.h @@ -31,11 +31,10 @@ /** * DOC: flip utils * - * Util to queue up work to run from work-queue context after flip/vblank. + * Utility to queue up work to run from work-queue context after flip/vblank. * Typically this can be used to defer unref of framebuffer's, cursor - * bo's, etc until after vblank. The APIs are all thread-safe. - * Moreover, drm_flip_work_queue_task and drm_flip_work_queue can be called - * in atomic context. + * bo's, etc until after vblank. The APIs are all thread-safe. Moreover, + * drm_flip_work_commit() can be called in atomic context. */ struct drm_flip_work; @@ -52,16 +51,6 @@ struct drm_flip_work; typedef void (*drm_flip_func_t)(struct drm_flip_work *work, void *val); /** - * struct drm_flip_task - flip work task - * @node: list entry element - * @data: data to pass to &drm_flip_work.func - */ -struct drm_flip_task { - struct list_head node; - void *data; -}; - -/** * struct drm_flip_work - flip work queue * @name: debug name * @func: callback fxn called for each committed item @@ -79,9 +68,6 @@ struct drm_flip_work { spinlock_t lock; }; -struct drm_flip_task *drm_flip_work_allocate_task(void *data, gfp_t flags); -void drm_flip_work_queue_task(struct drm_flip_work *work, - struct drm_flip_task *task); void drm_flip_work_queue(struct drm_flip_work *work, void *val); void drm_flip_work_commit(struct drm_flip_work *work, struct workqueue_struct *wq); diff --git a/include/drm/drm_format_helper.h b/include/drm/drm_format_helper.h index 291deb09475b..2b5c1aef80b0 100644 --- a/include/drm/drm_format_helper.h +++ b/include/drm/drm_format_helper.h @@ -15,6 +15,58 @@ struct drm_rect; struct iosys_map; +/** + * struct drm_format_conv_state - Stores format-conversion state + * + * DRM helpers for format conversion store temporary state in + * struct drm_xfrm_buf. The buffer's resources can be reused + * among multiple conversion operations. + * + * All fields are considered private. + */ +struct drm_format_conv_state { + /* private: */ + struct { + void *mem; + size_t size; + bool preallocated; + } tmp; +}; + +#define __DRM_FORMAT_CONV_STATE_INIT(_mem, _size, _preallocated) { \ + .tmp = { \ + .mem = (_mem), \ + .size = (_size), \ + .preallocated = (_preallocated), \ + } \ + } + +/** + * DRM_FORMAT_CONV_STATE_INIT - Initializer for struct drm_format_conv_state + * + * Initializes an instance of struct drm_format_conv_state to default values. + */ +#define DRM_FORMAT_CONV_STATE_INIT \ + __DRM_FORMAT_CONV_STATE_INIT(NULL, 0, false) + +/** + * DRM_FORMAT_CONV_STATE_INIT_PREALLOCATED - Initializer for struct drm_format_conv_state + * @_mem: The preallocated memory area + * @_size: The number of bytes in _mem + * + * Initializes an instance of struct drm_format_conv_state to preallocated + * storage. The caller is responsible for releasing the provided memory range. + */ +#define DRM_FORMAT_CONV_STATE_INIT_PREALLOCATED(_mem, _size) \ + __DRM_FORMAT_CONV_STATE_INIT(_mem, _size, true) + +void drm_format_conv_state_init(struct drm_format_conv_state *state); +void drm_format_conv_state_copy(struct drm_format_conv_state *state, + const struct drm_format_conv_state *old_state); +void *drm_format_conv_state_reserve(struct drm_format_conv_state *state, + size_t new_size, gfp_t flags); +void drm_format_conv_state_release(struct drm_format_conv_state *state); + unsigned int drm_fb_clip_offset(unsigned int pitch, const struct drm_format_info *format, const struct drm_rect *clip); @@ -23,48 +75,65 @@ void drm_fb_memcpy(struct iosys_map *dst, const unsigned int *dst_pitch, const struct drm_rect *clip); void drm_fb_swab(struct iosys_map *dst, const unsigned int *dst_pitch, const struct iosys_map *src, const struct drm_framebuffer *fb, - const struct drm_rect *clip, bool cached); + const struct drm_rect *clip, bool cached, + struct drm_format_conv_state *state); void drm_fb_xrgb8888_to_rgb332(struct iosys_map *dst, const unsigned int *dst_pitch, const struct iosys_map *src, const struct drm_framebuffer *fb, - const struct drm_rect *clip); + const struct drm_rect *clip, struct drm_format_conv_state *state); void drm_fb_xrgb8888_to_rgb565(struct iosys_map *dst, const unsigned int *dst_pitch, const struct iosys_map *src, const struct drm_framebuffer *fb, - const struct drm_rect *clip, bool swab); + const struct drm_rect *clip, struct drm_format_conv_state *state); +void drm_fb_xrgb8888_to_rgb565be(struct iosys_map *dst, const unsigned int *dst_pitch, + const struct iosys_map *src, const struct drm_framebuffer *fb, + const struct drm_rect *clip, struct drm_format_conv_state *state); void drm_fb_xrgb8888_to_xrgb1555(struct iosys_map *dst, const unsigned int *dst_pitch, const struct iosys_map *src, const struct drm_framebuffer *fb, - const struct drm_rect *clip); + const struct drm_rect *clip, struct drm_format_conv_state *state); void drm_fb_xrgb8888_to_argb1555(struct iosys_map *dst, const unsigned int *dst_pitch, const struct iosys_map *src, const struct drm_framebuffer *fb, - const struct drm_rect *clip); + const struct drm_rect *clip, struct drm_format_conv_state *state); void drm_fb_xrgb8888_to_rgba5551(struct iosys_map *dst, const unsigned int *dst_pitch, const struct iosys_map *src, const struct drm_framebuffer *fb, - const struct drm_rect *clip); + const struct drm_rect *clip, struct drm_format_conv_state *state); void drm_fb_xrgb8888_to_rgb888(struct iosys_map *dst, const unsigned int *dst_pitch, const struct iosys_map *src, const struct drm_framebuffer *fb, - const struct drm_rect *clip); + const struct drm_rect *clip, struct drm_format_conv_state *state); +void drm_fb_xrgb8888_to_bgr888(struct iosys_map *dst, const unsigned int *dst_pitch, + const struct iosys_map *src, const struct drm_framebuffer *fb, + const struct drm_rect *clip, struct drm_format_conv_state *state); void drm_fb_xrgb8888_to_argb8888(struct iosys_map *dst, const unsigned int *dst_pitch, const struct iosys_map *src, const struct drm_framebuffer *fb, - const struct drm_rect *clip); + const struct drm_rect *clip, struct drm_format_conv_state *state); +void drm_fb_xrgb8888_to_abgr8888(struct iosys_map *dst, const unsigned int *dst_pitch, + const struct iosys_map *src, const struct drm_framebuffer *fb, + const struct drm_rect *clip, struct drm_format_conv_state *state); +void drm_fb_xrgb8888_to_xbgr8888(struct iosys_map *dst, const unsigned int *dst_pitch, + const struct iosys_map *src, const struct drm_framebuffer *fb, + const struct drm_rect *clip, struct drm_format_conv_state *state); +void drm_fb_xrgb8888_to_bgrx8888(struct iosys_map *dst, const unsigned int *dst_pitch, + const struct iosys_map *src, const struct drm_framebuffer *fb, + const struct drm_rect *clip, struct drm_format_conv_state *state); void drm_fb_xrgb8888_to_xrgb2101010(struct iosys_map *dst, const unsigned int *dst_pitch, const struct iosys_map *src, const struct drm_framebuffer *fb, - const struct drm_rect *clip); + const struct drm_rect *clip, + struct drm_format_conv_state *state); void drm_fb_xrgb8888_to_argb2101010(struct iosys_map *dst, const unsigned int *dst_pitch, const struct iosys_map *src, const struct drm_framebuffer *fb, - const struct drm_rect *clip); + const struct drm_rect *clip, + struct drm_format_conv_state *state); void drm_fb_xrgb8888_to_gray8(struct iosys_map *dst, const unsigned int *dst_pitch, const struct iosys_map *src, const struct drm_framebuffer *fb, - const struct drm_rect *clip); - -int drm_fb_blit(struct iosys_map *dst, const unsigned int *dst_pitch, uint32_t dst_format, - const struct iosys_map *src, const struct drm_framebuffer *fb, - const struct drm_rect *rect); + const struct drm_rect *clip, struct drm_format_conv_state *state); +void drm_fb_argb8888_to_argb4444(struct iosys_map *dst, const unsigned int *dst_pitch, + const struct iosys_map *src, const struct drm_framebuffer *fb, + const struct drm_rect *clip, struct drm_format_conv_state *state); void drm_fb_xrgb8888_to_mono(struct iosys_map *dst, const unsigned int *dst_pitch, const struct iosys_map *src, const struct drm_framebuffer *fb, - const struct drm_rect *clip); + const struct drm_rect *clip, struct drm_format_conv_state *state); -size_t drm_fb_build_fourcc_list(struct drm_device *dev, - const u32 *native_fourccs, size_t native_nfourccs, - u32 *fourccs_out, size_t nfourccs_out); +void drm_fb_xrgb8888_to_gray2(struct iosys_map *dst, const unsigned int *dst_pitch, + const struct iosys_map *src, const struct drm_framebuffer *fb, + const struct drm_rect *clip, struct drm_format_conv_state *state); #endif /* __LINUX_DRM_FORMAT_HELPER_H */ diff --git a/include/drm/drm_fourcc.h b/include/drm/drm_fourcc.h index 532ae78ca747..471784426857 100644 --- a/include/drm/drm_fourcc.h +++ b/include/drm/drm_fourcc.h @@ -22,6 +22,7 @@ #ifndef __DRM_FOURCC_H__ #define __DRM_FOURCC_H__ +#include <linux/math.h> #include <linux/types.h> #include <uapi/drm/drm_fourcc.h> @@ -53,7 +54,6 @@ #endif struct drm_device; -struct drm_mode_fb_cmd2; /** * struct drm_format_info - information about a DRM format @@ -279,7 +279,7 @@ int drm_format_info_plane_width(const struct drm_format_info *info, int width, if (plane == 0) return width; - return width / info->hsub; + return DIV_ROUND_UP(width, info->hsub); } /** @@ -301,17 +301,18 @@ int drm_format_info_plane_height(const struct drm_format_info *info, int height, if (plane == 0) return height; - return height / info->vsub; + return DIV_ROUND_UP(height, info->vsub); } const struct drm_format_info *__drm_format_info(u32 format); const struct drm_format_info *drm_format_info(u32 format); const struct drm_format_info * drm_get_format_info(struct drm_device *dev, - const struct drm_mode_fb_cmd2 *mode_cmd); + u32 pixel_format, u64 modifier); uint32_t drm_mode_legacy_fb_format(uint32_t bpp, uint32_t depth); uint32_t drm_driver_legacy_fb_format(struct drm_device *dev, uint32_t bpp, uint32_t depth); +uint32_t drm_driver_color_mode_format(struct drm_device *dev, unsigned int color_mode); unsigned int drm_format_info_block_width(const struct drm_format_info *info, int plane); unsigned int drm_format_info_block_height(const struct drm_format_info *info, diff --git a/include/drm/drm_framebuffer.h b/include/drm/drm_framebuffer.h index 0dcc07b68654..38b24fc8978d 100644 --- a/include/drm/drm_framebuffer.h +++ b/include/drm/drm_framebuffer.h @@ -23,6 +23,7 @@ #ifndef __DRM_FRAMEBUFFER_H__ #define __DRM_FRAMEBUFFER_H__ +#include <linux/bits.h> #include <linux/ctype.h> #include <linux/list.h> #include <linux/sched.h> @@ -100,6 +101,8 @@ struct drm_framebuffer_funcs { unsigned num_clips); }; +#define DRM_FRAMEBUFFER_HAS_HANDLE_REF(_i) BIT(0u + (_i)) + /** * struct drm_framebuffer - frame buffer object * @@ -189,17 +192,9 @@ struct drm_framebuffer { */ int flags; /** - * @hot_x: X coordinate of the cursor hotspot. Used by the legacy cursor - * IOCTL when the driver supports cursor through a DRM_PLANE_TYPE_CURSOR - * universal plane. - */ - int hot_x; - /** - * @hot_y: Y coordinate of the cursor hotspot. Used by the legacy cursor - * IOCTL when the driver supports cursor through a DRM_PLANE_TYPE_CURSOR - * universal plane. + * @internal_flags: Framebuffer flags like DRM_FRAMEBUFFER_HAS_HANDLE_REF. */ - int hot_y; + unsigned int internal_flags; /** * @filp_head: Placed on &drm_file.fbs, protected by &drm_file.fbs_lock. */ @@ -292,11 +287,6 @@ static inline void drm_framebuffer_assign(struct drm_framebuffer **p, &fb->head != (&(dev)->mode_config.fb_list); \ fb = list_next_entry(fb, head)) -int drm_framebuffer_plane_width(int width, - const struct drm_framebuffer *fb, int plane); -int drm_framebuffer_plane_height(int height, - const struct drm_framebuffer *fb, int plane); - /** * struct drm_afbc_framebuffer - a special afbc frame buffer object * diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h index 772a4adf5287..8d48d2af2649 100644 --- a/include/drm/drm_gem.h +++ b/include/drm/drm_gem.h @@ -35,7 +35,10 @@ */ #include <linux/kref.h> +#include <linux/dma-buf.h> #include <linux/dma-resv.h> +#include <linux/list.h> +#include <linux/mutex.h> #include <drm/drm_vma_manager.h> @@ -43,6 +46,27 @@ struct iosys_map; struct drm_gem_object; /** + * enum drm_gem_object_status - bitmask of object state for fdinfo reporting + * @DRM_GEM_OBJECT_RESIDENT: object is resident in memory (ie. not unpinned) + * @DRM_GEM_OBJECT_PURGEABLE: object marked as purgeable by userspace + * @DRM_GEM_OBJECT_ACTIVE: object is currently used by an active submission + * + * Bitmask of status used for fdinfo memory stats, see &drm_gem_object_funcs.status + * and drm_show_fdinfo(). Note that an object can report DRM_GEM_OBJECT_PURGEABLE + * and be active or not resident, in which case drm_show_fdinfo() will not + * account for it as purgeable. So drivers do not need to check if the buffer + * is idle and resident to return this bit, i.e. userspace can mark a buffer as + * purgeable even while it is still busy on the GPU. It will not get reported in + * the puregeable stats until it becomes idle. The status gem object func does + * not need to consider this. + */ +enum drm_gem_object_status { + DRM_GEM_OBJECT_RESIDENT = BIT(0), + DRM_GEM_OBJECT_PURGEABLE = BIT(1), + DRM_GEM_OBJECT_ACTIVE = BIT(2), +}; + +/** * struct drm_gem_object_funcs - GEM object functions */ struct drm_gem_object_funcs { @@ -102,7 +126,8 @@ struct drm_gem_object_funcs { /** * @pin: * - * Pin backing buffer in memory. Used by the drm_gem_map_attach() helper. + * Pin backing buffer in memory, such that dma-buf importers can + * access it. Used by the drm_gem_map_attach() helper. * * This callback is optional. */ @@ -135,7 +160,8 @@ struct drm_gem_object_funcs { * @vmap: * * Returns a virtual address for the buffer. Used by the - * drm_gem_dmabuf_vmap() helper. + * drm_gem_dmabuf_vmap() helper. Called with a held GEM reservation + * lock. * * This callback is optional. */ @@ -145,7 +171,8 @@ struct drm_gem_object_funcs { * @vunmap: * * Releases the address previously returned by @vmap. Used by the - * drm_gem_dmabuf_vunmap() helper. + * drm_gem_dmabuf_vunmap() helper. Called with a held GEM reservation + * lock. * * This callback is optional. */ @@ -165,6 +192,39 @@ struct drm_gem_object_funcs { int (*mmap)(struct drm_gem_object *obj, struct vm_area_struct *vma); /** + * @evict: + * + * Evicts gem object out from memory. Used by the drm_gem_object_evict() + * helper. Returns 0 on success, -errno otherwise. Called with a held + * GEM reservation lock. + * + * This callback is optional. + */ + int (*evict)(struct drm_gem_object *obj); + + /** + * @status: + * + * The optional status callback can return additional object state + * which determines which stats the object is counted against. The + * callback is called under table_lock. Racing against object status + * change is "harmless", and the callback can expect to not race + * against object destruction. + * + * Called by drm_show_memory_stats(). + */ + enum drm_gem_object_status (*status)(struct drm_gem_object *obj); + + /** + * @rss: + * + * Return resident size of the object in physical memory. + * + * Called by drm_show_memory_stats(). + */ + size_t (*rss)(struct drm_gem_object *obj); + + /** * @vm_ops: * * Virtual memory operations used with mmap. @@ -338,6 +398,37 @@ struct drm_gem_object { struct dma_resv _resv; /** + * @gpuva: Fields used by GPUVM to manage mappings pointing to this GEM object. + * + * When DRM_GPUVM_IMMEDIATE_MODE is set, this list is protected by the + * mutex. Otherwise, the list is protected by the GEMs &dma_resv lock. + * + * Note that all entries in this list must agree on whether + * DRM_GPUVM_IMMEDIATE_MODE is set. + */ + struct { + /** + * @gpuva.list: list of GPUVM mappings attached to this GEM object. + * + * Drivers should lock list accesses with either the GEMs + * &dma_resv lock (&drm_gem_object.resv) or the + * &drm_gem_object.gpuva.lock mutex. + */ + struct list_head list; + + /** + * @gpuva.lock: lock protecting access to &drm_gem_object.gpuva.list + * when DRM_GPUVM_IMMEDIATE_MODE is used. + * + * Only used when DRM_GPUVM_IMMEDIATE_MODE is set. It should be + * safe to take this mutex during the fence signalling path, so + * do not allocate memory while holding this lock. Otherwise, + * the &dma_resv lock should be used. + */ + struct mutex lock; + } gpuva; + + /** * @funcs: * * Optional GEM object functions. If this is set, it will be used instead of the @@ -378,7 +469,8 @@ struct drm_gem_object { .poll = drm_poll,\ .read = drm_read,\ .llseek = noop_llseek,\ - .mmap = drm_gem_mmap + .mmap = drm_gem_mmap, \ + .fop_flags = FOP_UNSIGNED_OFFSET /** * DEFINE_DRM_GEM_FOPS() - macro to generate file operations for GEM drivers @@ -403,6 +495,9 @@ void drm_gem_object_release(struct drm_gem_object *obj); void drm_gem_object_free(struct kref *kref); int drm_gem_object_init(struct drm_device *dev, struct drm_gem_object *obj, size_t size); +int drm_gem_object_init_with_mnt(struct drm_device *dev, + struct drm_gem_object *obj, size_t size, + struct vfsmount *gemfs); void drm_gem_private_object_init(struct drm_device *dev, struct drm_gem_object *obj, size_t size); void drm_gem_private_object_fini(struct drm_gem_object *obj); @@ -458,8 +553,11 @@ struct page **drm_gem_get_pages(struct drm_gem_object *obj); void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages, bool dirty, bool accessed); -int drm_gem_vmap_unlocked(struct drm_gem_object *obj, struct iosys_map *map); -void drm_gem_vunmap_unlocked(struct drm_gem_object *obj, struct iosys_map *map); +void drm_gem_lock(struct drm_gem_object *obj); +void drm_gem_unlock(struct drm_gem_object *obj); + +int drm_gem_vmap(struct drm_gem_object *obj, struct iosys_map *map); +void drm_gem_vunmap(struct drm_gem_object *obj, struct iosys_map *map); int drm_gem_objects_lookup(struct drm_file *filp, void __user *bo_handles, int count, struct drm_gem_object ***objs_out); @@ -475,8 +573,90 @@ int drm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev, void drm_gem_lru_init(struct drm_gem_lru *lru, struct mutex *lock); void drm_gem_lru_remove(struct drm_gem_object *obj); +void drm_gem_lru_move_tail_locked(struct drm_gem_lru *lru, struct drm_gem_object *obj); void drm_gem_lru_move_tail(struct drm_gem_lru *lru, struct drm_gem_object *obj); -unsigned long drm_gem_lru_scan(struct drm_gem_lru *lru, unsigned nr_to_scan, - bool (*shrink)(struct drm_gem_object *obj)); +unsigned long +drm_gem_lru_scan(struct drm_gem_lru *lru, + unsigned int nr_to_scan, + unsigned long *remaining, + bool (*shrink)(struct drm_gem_object *obj, struct ww_acquire_ctx *ticket), + struct ww_acquire_ctx *ticket); + +int drm_gem_evict_locked(struct drm_gem_object *obj); + +/** + * drm_gem_object_is_shared_for_memory_stats - helper for shared memory stats + * + * This helper should only be used for fdinfo shared memory stats to determine + * if a GEM object is shared. + * + * @obj: obj in question + */ +static inline bool drm_gem_object_is_shared_for_memory_stats(struct drm_gem_object *obj) +{ + return (obj->handle_count > 1) || obj->dma_buf; +} + +/** + * drm_gem_is_imported() - Tests if GEM object's buffer has been imported + * @obj: the GEM object + * + * Returns: + * True if the GEM object's buffer has been imported, false otherwise + */ +static inline bool drm_gem_is_imported(const struct drm_gem_object *obj) +{ + return !!obj->import_attach; +} + +#ifdef CONFIG_LOCKDEP +#define drm_gem_gpuva_assert_lock_held(gpuvm, obj) \ + lockdep_assert(drm_gpuvm_immediate_mode(gpuvm) ? \ + lockdep_is_held(&(obj)->gpuva.lock) : \ + dma_resv_held((obj)->resv)) +#else +#define drm_gem_gpuva_assert_lock_held(gpuvm, obj) do {} while (0) +#endif + +/** + * drm_gem_gpuva_init() - initialize the gpuva list of a GEM object + * @obj: the &drm_gem_object + * + * This initializes the &drm_gem_object's &drm_gpuvm_bo list. + * + * Calling this function is only necessary for drivers intending to support the + * &drm_driver_feature DRIVER_GEM_GPUVA. + * + * See also drm_gem_gpuva_set_lock(). + */ +static inline void drm_gem_gpuva_init(struct drm_gem_object *obj) +{ + INIT_LIST_HEAD(&obj->gpuva.list); +} + +/** + * drm_gem_for_each_gpuvm_bo() - iterator to walk over a list of &drm_gpuvm_bo + * @entry__: &drm_gpuvm_bo structure to assign to in each iteration step + * @obj__: the &drm_gem_object the &drm_gpuvm_bo to walk are associated with + * + * This iterator walks over all &drm_gpuvm_bo structures associated with the + * &drm_gem_object. + */ +#define drm_gem_for_each_gpuvm_bo(entry__, obj__) \ + list_for_each_entry(entry__, &(obj__)->gpuva.list, list.entry.gem) + +/** + * drm_gem_for_each_gpuvm_bo_safe() - iterator to safely walk over a list of + * &drm_gpuvm_bo + * @entry__: &drm_gpuvm_bostructure to assign to in each iteration step + * @next__: &next &drm_gpuvm_bo to store the next step + * @obj__: the &drm_gem_object the &drm_gpuvm_bo to walk are associated with + * + * This iterator walks over all &drm_gpuvm_bo structures associated with the + * &drm_gem_object. It is implemented with list_for_each_entry_safe(), hence + * it is save against removal of elements. + */ +#define drm_gem_for_each_gpuvm_bo_safe(entry__, next__, obj__) \ + list_for_each_entry_safe(entry__, next__, &(obj__)->gpuva.list, list.entry.gem) #endif /* __DRM_GEM_H__ */ diff --git a/include/drm/drm_gem_atomic_helper.h b/include/drm/drm_gem_atomic_helper.h index 40b8b039518e..3e01c619a25e 100644 --- a/include/drm/drm_gem_atomic_helper.h +++ b/include/drm/drm_gem_atomic_helper.h @@ -5,6 +5,7 @@ #include <linux/iosys-map.h> +#include <drm/drm_format_helper.h> #include <drm/drm_fourcc.h> #include <drm/drm_plane.h> @@ -49,6 +50,15 @@ struct drm_shadow_plane_state { /** @base: plane state */ struct drm_plane_state base; + /** + * @fmtcnv_state: Format-conversion state + * + * Per-plane state for format conversion. + * Flags for copying shadow buffers into backend storage. Also holds + * temporary storage for format conversion. + */ + struct drm_format_conv_state fmtcnv_state; + /* Transitional state - do not export or duplicate */ /** diff --git a/include/drm/drm_gem_dma_helper.h b/include/drm/drm_gem_dma_helper.h index 8a043235dad8..f2678e7ecb98 100644 --- a/include/drm/drm_gem_dma_helper.h +++ b/include/drm/drm_gem_dma_helper.h @@ -166,11 +166,8 @@ drm_gem_dma_prime_import_sg_table(struct drm_device *dev, * DRM_GEM_DMA_DRIVER_OPS_VMAP_WITH_DUMB_CREATE() instead. */ #define DRM_GEM_DMA_DRIVER_OPS_WITH_DUMB_CREATE(dumb_create_func) \ - .dumb_create = (dumb_create_func), \ - .prime_handle_to_fd = drm_gem_prime_handle_to_fd, \ - .prime_fd_to_handle = drm_gem_prime_fd_to_handle, \ - .gem_prime_import_sg_table = drm_gem_dma_prime_import_sg_table, \ - .gem_prime_mmap = drm_gem_prime_mmap + .dumb_create = (dumb_create_func), \ + .gem_prime_import_sg_table = drm_gem_dma_prime_import_sg_table /** * DRM_GEM_DMA_DRIVER_OPS - DMA GEM driver operations @@ -204,11 +201,8 @@ drm_gem_dma_prime_import_sg_table(struct drm_device *dev, * DRM_GEM_DMA_DRIVER_OPS_WITH_DUMB_CREATE() instead. */ #define DRM_GEM_DMA_DRIVER_OPS_VMAP_WITH_DUMB_CREATE(dumb_create_func) \ - .dumb_create = dumb_create_func, \ - .prime_handle_to_fd = drm_gem_prime_handle_to_fd, \ - .prime_fd_to_handle = drm_gem_prime_fd_to_handle, \ - .gem_prime_import_sg_table = drm_gem_dma_prime_import_sg_table_vmap, \ - .gem_prime_mmap = drm_gem_prime_mmap + .dumb_create = (dumb_create_func), \ + .gem_prime_import_sg_table = drm_gem_dma_prime_import_sg_table_vmap /** * DRM_GEM_DMA_DRIVER_OPS_VMAP - DMA GEM driver operations ensuring a virtual @@ -273,6 +267,7 @@ unsigned long drm_gem_dma_get_unmapped_area(struct file *filp, .read = drm_read,\ .llseek = noop_llseek,\ .mmap = drm_gem_mmap,\ + .fop_flags = FOP_UNSIGNED_OFFSET, \ DRM_GEM_DMA_UNMAPPED_AREA_FOPS \ } diff --git a/include/drm/drm_gem_framebuffer_helper.h b/include/drm/drm_gem_framebuffer_helper.h index d302521f3dd4..24f1fd40d553 100644 --- a/include/drm/drm_gem_framebuffer_helper.h +++ b/include/drm/drm_gem_framebuffer_helper.h @@ -8,6 +8,7 @@ struct drm_afbc_framebuffer; struct drm_device; struct drm_fb_helper_surface_size; struct drm_file; +struct drm_format_info; struct drm_framebuffer; struct drm_framebuffer_funcs; struct drm_gem_object; @@ -24,17 +25,21 @@ int drm_gem_fb_create_handle(struct drm_framebuffer *fb, struct drm_file *file, int drm_gem_fb_init_with_funcs(struct drm_device *dev, struct drm_framebuffer *fb, struct drm_file *file, + const struct drm_format_info *info, const struct drm_mode_fb_cmd2 *mode_cmd, const struct drm_framebuffer_funcs *funcs); struct drm_framebuffer * drm_gem_fb_create_with_funcs(struct drm_device *dev, struct drm_file *file, + const struct drm_format_info *info, const struct drm_mode_fb_cmd2 *mode_cmd, const struct drm_framebuffer_funcs *funcs); struct drm_framebuffer * drm_gem_fb_create(struct drm_device *dev, struct drm_file *file, + const struct drm_format_info *info, const struct drm_mode_fb_cmd2 *mode_cmd); struct drm_framebuffer * drm_gem_fb_create_with_dirty(struct drm_device *dev, struct drm_file *file, + const struct drm_format_info *info, const struct drm_mode_fb_cmd2 *mode_cmd); int drm_gem_fb_vmap(struct drm_framebuffer *fb, struct iosys_map *map, @@ -47,6 +52,7 @@ void drm_gem_fb_end_cpu_access(struct drm_framebuffer *fb, enum dma_data_directi (((modifier) & AFBC_VENDOR_AND_TYPE_MASK) == DRM_FORMAT_MOD_ARM_AFBC(0)) int drm_gem_fb_afbc_init(struct drm_device *dev, + const struct drm_format_info *info, const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_afbc_framebuffer *afbc_fb); diff --git a/include/drm/drm_gem_shmem_helper.h b/include/drm/drm_gem_shmem_helper.h index a2201b2488c5..589f7bfe7506 100644 --- a/include/drm/drm_gem_shmem_helper.h +++ b/include/drm/drm_gem_shmem_helper.h @@ -27,11 +27,6 @@ struct drm_gem_shmem_object { struct drm_gem_object base; /** - * @pages_lock: Protects the page table and use count - */ - struct mutex pages_lock; - - /** * @pages: Page table */ struct page **pages; @@ -42,7 +37,18 @@ struct drm_gem_shmem_object { * Reference count on the pages table. * The pages are put when the count reaches zero. */ - unsigned int pages_use_count; + refcount_t pages_use_count; + + /** + * @pages_pin_count: + * + * Reference count on the pinned pages table. + * + * Pages are hard-pinned and reside in memory if count + * greater than zero. Otherwise, when count is zero, the pages are + * allowed to be evicted and purged by memory shrinker. + */ + refcount_t pages_pin_count; /** * @madv: State for madvise @@ -61,30 +67,11 @@ struct drm_gem_shmem_object { struct list_head madv_list; /** - * @pages_mark_dirty_on_put: - * - * Mark pages as dirty when they are put. - */ - unsigned int pages_mark_dirty_on_put : 1; - - /** - * @pages_mark_accessed_on_put: - * - * Mark pages as accessed when they are put. - */ - unsigned int pages_mark_accessed_on_put : 1; - - /** * @sgt: Scatter/gather table for imported PRIME buffers */ struct sg_table *sgt; /** - * @vmap_lock: Protects the vmap address and use count - */ - struct mutex vmap_lock; - - /** * @vaddr: Kernel virtual address of the backing memory */ void *vaddr; @@ -95,41 +82,61 @@ struct drm_gem_shmem_object { * Reference count on the virtual address. * The address are un-mapped when the count reaches zero. */ - unsigned int vmap_use_count; + refcount_t vmap_use_count; + + /** + * @pages_mark_dirty_on_put: + * + * Mark pages as dirty when they are put. + */ + bool pages_mark_dirty_on_put : 1; + + /** + * @pages_mark_accessed_on_put: + * + * Mark pages as accessed when they are put. + */ + bool pages_mark_accessed_on_put : 1; /** * @map_wc: map object write-combined (instead of using shmem defaults). */ - bool map_wc; + bool map_wc : 1; }; #define to_drm_gem_shmem_obj(obj) \ container_of(obj, struct drm_gem_shmem_object, base) +int drm_gem_shmem_init(struct drm_device *dev, struct drm_gem_shmem_object *shmem, size_t size); struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t size); +struct drm_gem_shmem_object *drm_gem_shmem_create_with_mnt(struct drm_device *dev, + size_t size, + struct vfsmount *gemfs); +void drm_gem_shmem_release(struct drm_gem_shmem_object *shmem); void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem); -int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem); -void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem); +void drm_gem_shmem_put_pages_locked(struct drm_gem_shmem_object *shmem); int drm_gem_shmem_pin(struct drm_gem_shmem_object *shmem); void drm_gem_shmem_unpin(struct drm_gem_shmem_object *shmem); -int drm_gem_shmem_vmap(struct drm_gem_shmem_object *shmem, - struct iosys_map *map); -void drm_gem_shmem_vunmap(struct drm_gem_shmem_object *shmem, - struct iosys_map *map); +int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem, + struct iosys_map *map); +void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem, + struct iosys_map *map); int drm_gem_shmem_mmap(struct drm_gem_shmem_object *shmem, struct vm_area_struct *vma); -int drm_gem_shmem_madvise(struct drm_gem_shmem_object *shmem, int madv); +int drm_gem_shmem_pin_locked(struct drm_gem_shmem_object *shmem); +void drm_gem_shmem_unpin_locked(struct drm_gem_shmem_object *shmem); + +int drm_gem_shmem_madvise_locked(struct drm_gem_shmem_object *shmem, int madv); static inline bool drm_gem_shmem_is_purgeable(struct drm_gem_shmem_object *shmem) { return (shmem->madv > 0) && - !shmem->vmap_use_count && shmem->sgt && - !shmem->base.dma_buf && !shmem->base.import_attach; + !refcount_read(&shmem->pages_pin_count) && shmem->sgt && + !shmem->base.dma_buf && !drm_gem_is_imported(&shmem->base); } void drm_gem_shmem_purge_locked(struct drm_gem_shmem_object *shmem); -bool drm_gem_shmem_purge(struct drm_gem_shmem_object *shmem); struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_shmem_object *shmem); struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_shmem_object *shmem); @@ -185,7 +192,7 @@ static inline int drm_gem_shmem_object_pin(struct drm_gem_object *obj) { struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); - return drm_gem_shmem_pin(shmem); + return drm_gem_shmem_pin_locked(shmem); } /** @@ -199,7 +206,7 @@ static inline void drm_gem_shmem_object_unpin(struct drm_gem_object *obj) { struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); - drm_gem_shmem_unpin(shmem); + drm_gem_shmem_unpin_locked(shmem); } /** @@ -220,12 +227,12 @@ static inline struct sg_table *drm_gem_shmem_object_get_sg_table(struct drm_gem_ } /* - * drm_gem_shmem_object_vmap - GEM object function for drm_gem_shmem_vmap() + * drm_gem_shmem_object_vmap - GEM object function for drm_gem_shmem_vmap_locked() * @obj: GEM object * @map: Returns the kernel virtual address of the SHMEM GEM object's backing store. * - * This function wraps drm_gem_shmem_vmap(). Drivers that employ the shmem helpers should - * use it as their &drm_gem_object_funcs.vmap handler. + * This function wraps drm_gem_shmem_vmap_locked(). Drivers that employ the shmem + * helpers should use it as their &drm_gem_object_funcs.vmap handler. * * Returns: * 0 on success or a negative error code on failure. @@ -235,7 +242,7 @@ static inline int drm_gem_shmem_object_vmap(struct drm_gem_object *obj, { struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); - return drm_gem_shmem_vmap(shmem, map); + return drm_gem_shmem_vmap_locked(shmem, map); } /* @@ -243,15 +250,15 @@ static inline int drm_gem_shmem_object_vmap(struct drm_gem_object *obj, * @obj: GEM object * @map: Kernel virtual address where the SHMEM GEM object was mapped * - * This function wraps drm_gem_shmem_vunmap(). Drivers that employ the shmem helpers should - * use it as their &drm_gem_object_funcs.vunmap handler. + * This function wraps drm_gem_shmem_vunmap_locked(). Drivers that employ the shmem + * helpers should use it as their &drm_gem_object_funcs.vunmap handler. */ static inline void drm_gem_shmem_object_vunmap(struct drm_gem_object *obj, struct iosys_map *map) { struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); - drm_gem_shmem_vunmap(shmem, map); + drm_gem_shmem_vunmap_locked(shmem, map); } /** @@ -282,18 +289,18 @@ drm_gem_shmem_prime_import_sg_table(struct drm_device *dev, struct sg_table *sgt); int drm_gem_shmem_dumb_create(struct drm_file *file, struct drm_device *dev, struct drm_mode_create_dumb *args); +struct drm_gem_object *drm_gem_shmem_prime_import_no_map(struct drm_device *dev, + struct dma_buf *buf); /** * DRM_GEM_SHMEM_DRIVER_OPS - Default shmem GEM operations * - * This macro provides a shortcut for setting the shmem GEM operations in - * the &drm_driver structure. + * This macro provides a shortcut for setting the shmem GEM operations + * in the &drm_driver structure. Drivers that do not require an s/g table + * for imported buffers should use this. */ #define DRM_GEM_SHMEM_DRIVER_OPS \ - .prime_handle_to_fd = drm_gem_prime_handle_to_fd, \ - .prime_fd_to_handle = drm_gem_prime_fd_to_handle, \ - .gem_prime_import_sg_table = drm_gem_shmem_prime_import_sg_table, \ - .gem_prime_mmap = drm_gem_prime_mmap, \ - .dumb_create = drm_gem_shmem_dumb_create + .gem_prime_import = drm_gem_shmem_prime_import_no_map, \ + .dumb_create = drm_gem_shmem_dumb_create #endif /* __DRM_GEM_SHMEM_HELPER_H__ */ diff --git a/include/drm/drm_gem_vram_helper.h b/include/drm/drm_gem_vram_helper.h index d3e8920c0b64..2dd42bed679d 100644 --- a/include/drm/drm_gem_vram_helper.h +++ b/include/drm/drm_gem_vram_helper.h @@ -17,7 +17,6 @@ struct drm_mode_create_dumb; struct drm_plane; struct drm_plane_state; -struct drm_simple_display_pipe; struct filp; struct vm_area_struct; @@ -33,8 +32,8 @@ struct vm_area_struct; * struct drm_gem_vram_object - GEM object backed by VRAM * @bo: TTM buffer object * @map: Mapping information for @bo - * @placement: TTM placement information. Supported placements are \ - %TTM_PL_VRAM and %TTM_PL_SYSTEM + * @placement: TTM placement information. Supported placements are %TTM_PL_VRAM + * and %TTM_PL_SYSTEM * @placements: TTM placement information. * * The type struct drm_gem_vram_object represents a GEM object that is @@ -95,8 +94,6 @@ struct drm_gem_vram_object *drm_gem_vram_create(struct drm_device *dev, unsigned long pg_align); void drm_gem_vram_put(struct drm_gem_vram_object *gbo); s64 drm_gem_vram_offset(struct drm_gem_vram_object *gbo); -int drm_gem_vram_pin(struct drm_gem_vram_object *gbo, unsigned long pl_flag); -int drm_gem_vram_unpin(struct drm_gem_vram_object *gbo); int drm_gem_vram_vmap(struct drm_gem_vram_object *gbo, struct iosys_map *map); void drm_gem_vram_vunmap(struct drm_gem_vram_object *gbo, struct iosys_map *map); @@ -126,8 +123,8 @@ drm_gem_vram_plane_helper_cleanup_fb(struct drm_plane *plane, struct drm_plane_state *old_state); /** - * DRM_GEM_VRAM_PLANE_HELPER_FUNCS - - * Initializes struct drm_plane_helper_funcs for VRAM handling + * DRM_GEM_VRAM_PLANE_HELPER_FUNCS - Initializes struct drm_plane_helper_funcs + * for VRAM handling * * Drivers may use GEM BOs as VRAM helpers for the framebuffer memory. This * macro initializes struct drm_plane_helper_funcs to use the respective helper @@ -137,30 +134,17 @@ drm_gem_vram_plane_helper_cleanup_fb(struct drm_plane *plane, .prepare_fb = drm_gem_vram_plane_helper_prepare_fb, \ .cleanup_fb = drm_gem_vram_plane_helper_cleanup_fb -/* - * Helpers for struct drm_simple_display_pipe_funcs - */ - -int drm_gem_vram_simple_display_pipe_prepare_fb( - struct drm_simple_display_pipe *pipe, - struct drm_plane_state *new_state); - -void drm_gem_vram_simple_display_pipe_cleanup_fb( - struct drm_simple_display_pipe *pipe, - struct drm_plane_state *old_state); - /** - * define DRM_GEM_VRAM_DRIVER - default callback functions for \ - &struct drm_driver + * define DRM_GEM_VRAM_DRIVER - default callback functions for + * &struct drm_driver * * Drivers that use VRAM MM and GEM VRAM can use this macro to initialize * &struct drm_driver with default functions. */ #define DRM_GEM_VRAM_DRIVER \ - .debugfs_init = drm_vram_mm_debugfs_init, \ - .dumb_create = drm_gem_vram_driver_dumb_create, \ - .dumb_map_offset = drm_gem_ttm_dumb_map_offset, \ - .gem_prime_mmap = drm_gem_prime_mmap + .debugfs_init = drm_vram_mm_debugfs_init, \ + .dumb_create = drm_gem_vram_driver_dumb_create, \ + .dumb_map_offset = drm_gem_ttm_dumb_map_offset /* * VRAM memory manager @@ -171,7 +155,6 @@ void drm_gem_vram_simple_display_pipe_cleanup_fb( * @vram_base: Base address of the managed video memory * @vram_size: Size of the managed video memory in bytes * @bdev: The TTM BO device. - * @funcs: TTM BO functions * * The fields &struct drm_vram_mm.vram_base and * &struct drm_vram_mm.vrm_size are managed by VRAM MM, but are @@ -186,8 +169,8 @@ struct drm_vram_mm { }; /** - * drm_vram_mm_of_bdev() - \ - Returns the container of type &struct ttm_device for field bdev. + * drm_vram_mm_of_bdev() - Returns the container of type &struct ttm_device for + * field bdev. * @bdev: the TTM BO device * * Returns: diff --git a/include/drm/drm_gpusvm.h b/include/drm/drm_gpusvm.h new file mode 100644 index 000000000000..632e100e6efb --- /dev/null +++ b/include/drm/drm_gpusvm.h @@ -0,0 +1,542 @@ +/* SPDX-License-Identifier: GPL-2.0-only OR MIT */ +/* + * Copyright © 2024 Intel Corporation + */ + +#ifndef __DRM_GPUSVM_H__ +#define __DRM_GPUSVM_H__ + +#include <linux/kref.h> +#include <linux/interval_tree.h> +#include <linux/mmu_notifier.h> + +struct dev_pagemap_ops; +struct drm_device; +struct drm_gpusvm; +struct drm_gpusvm_notifier; +struct drm_gpusvm_ops; +struct drm_gpusvm_range; +struct drm_pagemap; +struct drm_pagemap_addr; + +/** + * struct drm_gpusvm_ops - Operations structure for GPU SVM + * + * This structure defines the operations for GPU Shared Virtual Memory (SVM). + * These operations are provided by the GPU driver to manage SVM ranges and + * notifiers. + */ +struct drm_gpusvm_ops { + /** + * @notifier_alloc: Allocate a GPU SVM notifier (optional) + * + * Allocate a GPU SVM notifier. + * + * Return: Pointer to the allocated GPU SVM notifier on success, NULL on failure. + */ + struct drm_gpusvm_notifier *(*notifier_alloc)(void); + + /** + * @notifier_free: Free a GPU SVM notifier (optional) + * @notifier: Pointer to the GPU SVM notifier to be freed + * + * Free a GPU SVM notifier. + */ + void (*notifier_free)(struct drm_gpusvm_notifier *notifier); + + /** + * @range_alloc: Allocate a GPU SVM range (optional) + * @gpusvm: Pointer to the GPU SVM + * + * Allocate a GPU SVM range. + * + * Return: Pointer to the allocated GPU SVM range on success, NULL on failure. + */ + struct drm_gpusvm_range *(*range_alloc)(struct drm_gpusvm *gpusvm); + + /** + * @range_free: Free a GPU SVM range (optional) + * @range: Pointer to the GPU SVM range to be freed + * + * Free a GPU SVM range. + */ + void (*range_free)(struct drm_gpusvm_range *range); + + /** + * @invalidate: Invalidate GPU SVM notifier (required) + * @gpusvm: Pointer to the GPU SVM + * @notifier: Pointer to the GPU SVM notifier + * @mmu_range: Pointer to the mmu_notifier_range structure + * + * Invalidate the GPU page tables. It can safely walk the notifier range + * RB tree/list in this function. Called while holding the notifier lock. + */ + void (*invalidate)(struct drm_gpusvm *gpusvm, + struct drm_gpusvm_notifier *notifier, + const struct mmu_notifier_range *mmu_range); +}; + +/** + * struct drm_gpusvm_notifier - Structure representing a GPU SVM notifier + * + * @gpusvm: Pointer to the GPU SVM structure + * @notifier: MMU interval notifier + * @itree: Interval tree node for the notifier (inserted in GPU SVM) + * @entry: List entry to fast interval tree traversal + * @root: Cached root node of the RB tree containing ranges + * @range_list: List head containing of ranges in the same order they appear in + * interval tree. This is useful to keep iterating ranges while + * doing modifications to RB tree. + * @flags: Flags for notifier + * @flags.removed: Flag indicating whether the MMU interval notifier has been + * removed + * + * This structure represents a GPU SVM notifier. + */ +struct drm_gpusvm_notifier { + struct drm_gpusvm *gpusvm; + struct mmu_interval_notifier notifier; + struct interval_tree_node itree; + struct list_head entry; + struct rb_root_cached root; + struct list_head range_list; + struct { + u32 removed : 1; + } flags; +}; + +/** + * struct drm_gpusvm_pages_flags - Structure representing a GPU SVM pages flags + * + * @migrate_devmem: Flag indicating whether the pages can be migrated to device memory + * @unmapped: Flag indicating if the pages has been unmapped + * @partial_unmap: Flag indicating if the pages has been partially unmapped + * @has_devmem_pages: Flag indicating if the pages has devmem pages + * @has_dma_mapping: Flag indicating if the pages has a DMA mapping + * @__flags: Flags for pages in u16 form (used for READ_ONCE) + */ +struct drm_gpusvm_pages_flags { + union { + struct { + /* All flags below must be set upon creation */ + u16 migrate_devmem : 1; + /* All flags below must be set / cleared under notifier lock */ + u16 unmapped : 1; + u16 partial_unmap : 1; + u16 has_devmem_pages : 1; + u16 has_dma_mapping : 1; + }; + u16 __flags; + }; +}; + +/** + * struct drm_gpusvm_pages - Structure representing a GPU SVM mapped pages + * + * @dma_addr: Device address array + * @dpagemap: The struct drm_pagemap of the device pages we're dma-mapping. + * Note this is assuming only one drm_pagemap per range is allowed. + * @notifier_seq: Notifier sequence number of the range's pages + * @flags: Flags for range + * @flags.migrate_devmem: Flag indicating whether the range can be migrated to device memory + * @flags.unmapped: Flag indicating if the range has been unmapped + * @flags.partial_unmap: Flag indicating if the range has been partially unmapped + * @flags.has_devmem_pages: Flag indicating if the range has devmem pages + * @flags.has_dma_mapping: Flag indicating if the range has a DMA mapping + */ +struct drm_gpusvm_pages { + struct drm_pagemap_addr *dma_addr; + struct drm_pagemap *dpagemap; + unsigned long notifier_seq; + struct drm_gpusvm_pages_flags flags; +}; + +/** + * struct drm_gpusvm_range - Structure representing a GPU SVM range + * + * @gpusvm: Pointer to the GPU SVM structure + * @notifier: Pointer to the GPU SVM notifier + * @refcount: Reference count for the range + * @itree: Interval tree node for the range (inserted in GPU SVM notifier) + * @entry: List entry to fast interval tree traversal + * @pages: The pages for this range. + * + * This structure represents a GPU SVM range used for tracking memory ranges + * mapped in a DRM device. + */ +struct drm_gpusvm_range { + struct drm_gpusvm *gpusvm; + struct drm_gpusvm_notifier *notifier; + struct kref refcount; + struct interval_tree_node itree; + struct list_head entry; + struct drm_gpusvm_pages pages; +}; + +/** + * struct drm_gpusvm - GPU SVM structure + * + * @name: Name of the GPU SVM + * @drm: Pointer to the DRM device structure + * @mm: Pointer to the mm_struct for the address space + * @mm_start: Start address of GPU SVM + * @mm_range: Range of the GPU SVM + * @notifier_size: Size of individual notifiers + * @ops: Pointer to the operations structure for GPU SVM + * @chunk_sizes: Pointer to the array of chunk sizes used in range allocation. + * Entries should be powers of 2 in descending order. + * @num_chunks: Number of chunks + * @notifier_lock: Read-write semaphore for protecting notifier operations + * @root: Cached root node of the Red-Black tree containing GPU SVM notifiers + * @notifier_list: list head containing of notifiers in the same order they + * appear in interval tree. This is useful to keep iterating + * notifiers while doing modifications to RB tree. + * + * This structure represents a GPU SVM (Shared Virtual Memory) used for tracking + * memory ranges mapped in a DRM (Direct Rendering Manager) device. + * + * No reference counting is provided, as this is expected to be embedded in the + * driver VM structure along with the struct drm_gpuvm, which handles reference + * counting. + */ +struct drm_gpusvm { + const char *name; + struct drm_device *drm; + struct mm_struct *mm; + unsigned long mm_start; + unsigned long mm_range; + unsigned long notifier_size; + const struct drm_gpusvm_ops *ops; + const unsigned long *chunk_sizes; + int num_chunks; + struct rw_semaphore notifier_lock; + struct rb_root_cached root; + struct list_head notifier_list; +#ifdef CONFIG_LOCKDEP + /** + * @lock_dep_map: Annotates drm_gpusvm_range_find_or_insert and + * drm_gpusvm_range_remove with a driver provided lock. + */ + struct lockdep_map *lock_dep_map; +#endif +}; + +/** + * struct drm_gpusvm_ctx - DRM GPU SVM context + * + * @device_private_page_owner: The device-private page owner to use for + * this operation + * @check_pages_threshold: Check CPU pages for present if chunk is less than or + * equal to threshold. If not present, reduce chunk + * size. + * @timeslice_ms: The timeslice MS which in minimum time a piece of memory + * remains with either exclusive GPU or CPU access. + * @in_notifier: entering from a MMU notifier + * @read_only: operating on read-only memory + * @devmem_possible: possible to use device memory + * @devmem_only: use only device memory + * @allow_mixed: Allow mixed mappings in get pages. Mixing between system and + * single dpagemap is supported, mixing between multiple dpagemap + * is unsupported. + * + * Context that is DRM GPUSVM is operating in (i.e. user arguments). + */ +struct drm_gpusvm_ctx { + void *device_private_page_owner; + unsigned long check_pages_threshold; + unsigned long timeslice_ms; + unsigned int in_notifier :1; + unsigned int read_only :1; + unsigned int devmem_possible :1; + unsigned int devmem_only :1; + unsigned int allow_mixed :1; +}; + +int drm_gpusvm_init(struct drm_gpusvm *gpusvm, + const char *name, struct drm_device *drm, + struct mm_struct *mm, + unsigned long mm_start, unsigned long mm_range, + unsigned long notifier_size, + const struct drm_gpusvm_ops *ops, + const unsigned long *chunk_sizes, int num_chunks); + +void drm_gpusvm_fini(struct drm_gpusvm *gpusvm); + +void drm_gpusvm_free(struct drm_gpusvm *gpusvm); + +unsigned long +drm_gpusvm_find_vma_start(struct drm_gpusvm *gpusvm, + unsigned long start, + unsigned long end); + +struct drm_gpusvm_range * +drm_gpusvm_range_find_or_insert(struct drm_gpusvm *gpusvm, + unsigned long fault_addr, + unsigned long gpuva_start, + unsigned long gpuva_end, + const struct drm_gpusvm_ctx *ctx); + +void drm_gpusvm_range_remove(struct drm_gpusvm *gpusvm, + struct drm_gpusvm_range *range); + +int drm_gpusvm_range_evict(struct drm_gpusvm *gpusvm, + struct drm_gpusvm_range *range); + +struct drm_gpusvm_range * +drm_gpusvm_range_get(struct drm_gpusvm_range *range); + +void drm_gpusvm_range_put(struct drm_gpusvm_range *range); + +bool drm_gpusvm_range_pages_valid(struct drm_gpusvm *gpusvm, + struct drm_gpusvm_range *range); + +int drm_gpusvm_range_get_pages(struct drm_gpusvm *gpusvm, + struct drm_gpusvm_range *range, + const struct drm_gpusvm_ctx *ctx); + +void drm_gpusvm_range_unmap_pages(struct drm_gpusvm *gpusvm, + struct drm_gpusvm_range *range, + const struct drm_gpusvm_ctx *ctx); + +bool drm_gpusvm_has_mapping(struct drm_gpusvm *gpusvm, unsigned long start, + unsigned long end); + +struct drm_gpusvm_notifier * +drm_gpusvm_notifier_find(struct drm_gpusvm *gpusvm, unsigned long start, + unsigned long end); + +struct drm_gpusvm_range * +drm_gpusvm_range_find(struct drm_gpusvm_notifier *notifier, unsigned long start, + unsigned long end); + +void drm_gpusvm_range_set_unmapped(struct drm_gpusvm_range *range, + const struct mmu_notifier_range *mmu_range); + +int drm_gpusvm_get_pages(struct drm_gpusvm *gpusvm, + struct drm_gpusvm_pages *svm_pages, + struct mm_struct *mm, + struct mmu_interval_notifier *notifier, + unsigned long pages_start, unsigned long pages_end, + const struct drm_gpusvm_ctx *ctx); + +void drm_gpusvm_unmap_pages(struct drm_gpusvm *gpusvm, + struct drm_gpusvm_pages *svm_pages, + unsigned long npages, + const struct drm_gpusvm_ctx *ctx); + +void drm_gpusvm_free_pages(struct drm_gpusvm *gpusvm, + struct drm_gpusvm_pages *svm_pages, + unsigned long npages); + +#ifdef CONFIG_LOCKDEP +/** + * drm_gpusvm_driver_set_lock() - Set the lock protecting accesses to GPU SVM + * @gpusvm: Pointer to the GPU SVM structure. + * @lock: the lock used to protect the gpuva list. The locking primitive + * must contain a dep_map field. + * + * Call this to annotate drm_gpusvm_range_find_or_insert and + * drm_gpusvm_range_remove. + */ +#define drm_gpusvm_driver_set_lock(gpusvm, lock) \ + do { \ + if (!WARN((gpusvm)->lock_dep_map, \ + "GPUSVM range lock should be set only once."))\ + (gpusvm)->lock_dep_map = &(lock)->dep_map; \ + } while (0) +#else +#define drm_gpusvm_driver_set_lock(gpusvm, lock) do {} while (0) +#endif + +/** + * drm_gpusvm_notifier_lock() - Lock GPU SVM notifier + * @gpusvm__: Pointer to the GPU SVM structure. + * + * Abstract client usage GPU SVM notifier lock, take lock + */ +#define drm_gpusvm_notifier_lock(gpusvm__) \ + down_read(&(gpusvm__)->notifier_lock) + +/** + * drm_gpusvm_notifier_unlock() - Unlock GPU SVM notifier + * @gpusvm__: Pointer to the GPU SVM structure. + * + * Abstract client usage GPU SVM notifier lock, drop lock + */ +#define drm_gpusvm_notifier_unlock(gpusvm__) \ + up_read(&(gpusvm__)->notifier_lock) + +/** + * drm_gpusvm_range_start() - GPU SVM range start address + * @range: Pointer to the GPU SVM range + * + * Return: GPU SVM range start address + */ +static inline unsigned long +drm_gpusvm_range_start(struct drm_gpusvm_range *range) +{ + return range->itree.start; +} + +/** + * drm_gpusvm_range_end() - GPU SVM range end address + * @range: Pointer to the GPU SVM range + * + * Return: GPU SVM range end address + */ +static inline unsigned long +drm_gpusvm_range_end(struct drm_gpusvm_range *range) +{ + return range->itree.last + 1; +} + +/** + * drm_gpusvm_range_size() - GPU SVM range size + * @range: Pointer to the GPU SVM range + * + * Return: GPU SVM range size + */ +static inline unsigned long +drm_gpusvm_range_size(struct drm_gpusvm_range *range) +{ + return drm_gpusvm_range_end(range) - drm_gpusvm_range_start(range); +} + +/** + * drm_gpusvm_notifier_start() - GPU SVM notifier start address + * @notifier: Pointer to the GPU SVM notifier + * + * Return: GPU SVM notifier start address + */ +static inline unsigned long +drm_gpusvm_notifier_start(struct drm_gpusvm_notifier *notifier) +{ + return notifier->itree.start; +} + +/** + * drm_gpusvm_notifier_end() - GPU SVM notifier end address + * @notifier: Pointer to the GPU SVM notifier + * + * Return: GPU SVM notifier end address + */ +static inline unsigned long +drm_gpusvm_notifier_end(struct drm_gpusvm_notifier *notifier) +{ + return notifier->itree.last + 1; +} + +/** + * drm_gpusvm_notifier_size() - GPU SVM notifier size + * @notifier: Pointer to the GPU SVM notifier + * + * Return: GPU SVM notifier size + */ +static inline unsigned long +drm_gpusvm_notifier_size(struct drm_gpusvm_notifier *notifier) +{ + return drm_gpusvm_notifier_end(notifier) - + drm_gpusvm_notifier_start(notifier); +} + +/** + * __drm_gpusvm_range_next() - Get the next GPU SVM range in the list + * @range: a pointer to the current GPU SVM range + * + * Return: A pointer to the next drm_gpusvm_range if available, or NULL if the + * current range is the last one or if the input range is NULL. + */ +static inline struct drm_gpusvm_range * +__drm_gpusvm_range_next(struct drm_gpusvm_range *range) +{ + if (range && !list_is_last(&range->entry, + &range->notifier->range_list)) + return list_next_entry(range, entry); + + return NULL; +} + +/** + * drm_gpusvm_for_each_range() - Iterate over GPU SVM ranges in a notifier + * @range__: Iterator variable for the ranges. If set, it indicates the start of + * the iterator. If NULL, call drm_gpusvm_range_find() to get the range. + * @notifier__: Pointer to the GPU SVM notifier + * @start__: Start address of the range + * @end__: End address of the range + * + * This macro is used to iterate over GPU SVM ranges in a notifier. It is safe + * to use while holding the driver SVM lock or the notifier lock. + */ +#define drm_gpusvm_for_each_range(range__, notifier__, start__, end__) \ + for ((range__) = (range__) ?: \ + drm_gpusvm_range_find((notifier__), (start__), (end__)); \ + (range__) && (drm_gpusvm_range_start(range__) < (end__)); \ + (range__) = __drm_gpusvm_range_next(range__)) + +/** + * drm_gpusvm_for_each_range_safe() - Safely iterate over GPU SVM ranges in a notifier + * @range__: Iterator variable for the ranges + * @next__: Iterator variable for the ranges temporay storage + * @notifier__: Pointer to the GPU SVM notifier + * @start__: Start address of the range + * @end__: End address of the range + * + * This macro is used to iterate over GPU SVM ranges in a notifier while + * removing ranges from it. + */ +#define drm_gpusvm_for_each_range_safe(range__, next__, notifier__, start__, end__) \ + for ((range__) = drm_gpusvm_range_find((notifier__), (start__), (end__)), \ + (next__) = __drm_gpusvm_range_next(range__); \ + (range__) && (drm_gpusvm_range_start(range__) < (end__)); \ + (range__) = (next__), (next__) = __drm_gpusvm_range_next(range__)) + +/** + * __drm_gpusvm_notifier_next() - get the next drm_gpusvm_notifier in the list + * @notifier: a pointer to the current drm_gpusvm_notifier + * + * Return: A pointer to the next drm_gpusvm_notifier if available, or NULL if + * the current notifier is the last one or if the input notifier is + * NULL. + */ +static inline struct drm_gpusvm_notifier * +__drm_gpusvm_notifier_next(struct drm_gpusvm_notifier *notifier) +{ + if (notifier && !list_is_last(¬ifier->entry, + ¬ifier->gpusvm->notifier_list)) + return list_next_entry(notifier, entry); + + return NULL; +} + +/** + * drm_gpusvm_for_each_notifier() - Iterate over GPU SVM notifiers in a gpusvm + * @notifier__: Iterator variable for the notifiers + * @gpusvm__: Pointer to the GPU SVM notifier + * @start__: Start address of the notifier + * @end__: End address of the notifier + * + * This macro is used to iterate over GPU SVM notifiers in a gpusvm. + */ +#define drm_gpusvm_for_each_notifier(notifier__, gpusvm__, start__, end__) \ + for ((notifier__) = drm_gpusvm_notifier_find((gpusvm__), (start__), (end__)); \ + (notifier__) && (drm_gpusvm_notifier_start(notifier__) < (end__)); \ + (notifier__) = __drm_gpusvm_notifier_next(notifier__)) + +/** + * drm_gpusvm_for_each_notifier_safe() - Safely iterate over GPU SVM notifiers in a gpusvm + * @notifier__: Iterator variable for the notifiers + * @next__: Iterator variable for the notifiers temporay storage + * @gpusvm__: Pointer to the GPU SVM notifier + * @start__: Start address of the notifier + * @end__: End address of the notifier + * + * This macro is used to iterate over GPU SVM notifiers in a gpusvm while + * removing notifiers from it. + */ +#define drm_gpusvm_for_each_notifier_safe(notifier__, next__, gpusvm__, start__, end__) \ + for ((notifier__) = drm_gpusvm_notifier_find((gpusvm__), (start__), (end__)), \ + (next__) = __drm_gpusvm_notifier_next(notifier__); \ + (notifier__) && (drm_gpusvm_notifier_start(notifier__) < (end__)); \ + (notifier__) = (next__), (next__) = __drm_gpusvm_notifier_next(notifier__)) + +#endif /* __DRM_GPUSVM_H__ */ diff --git a/include/drm/drm_gpuvm.h b/include/drm/drm_gpuvm.h new file mode 100644 index 000000000000..fdfc575b2603 --- /dev/null +++ b/include/drm/drm_gpuvm.h @@ -0,0 +1,1304 @@ +/* SPDX-License-Identifier: GPL-2.0-only OR MIT */ + +#ifndef __DRM_GPUVM_H__ +#define __DRM_GPUVM_H__ + +/* + * Copyright (c) 2022 Red Hat. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#include <linux/dma-resv.h> +#include <linux/list.h> +#include <linux/llist.h> +#include <linux/rbtree.h> +#include <linux/types.h> + +#include <drm/drm_device.h> +#include <drm/drm_gem.h> +#include <drm/drm_exec.h> + +struct drm_gpuvm; +struct drm_gpuvm_bo; +struct drm_gpuvm_ops; + +/** + * enum drm_gpuva_flags - flags for struct drm_gpuva + */ +enum drm_gpuva_flags { + /** + * @DRM_GPUVA_INVALIDATED: + * + * Flag indicating that the &drm_gpuva's backing GEM is invalidated. + */ + DRM_GPUVA_INVALIDATED = (1 << 0), + + /** + * @DRM_GPUVA_SPARSE: + * + * Flag indicating that the &drm_gpuva is a sparse mapping. + */ + DRM_GPUVA_SPARSE = (1 << 1), + + /** + * @DRM_GPUVA_USERBITS: user defined bits + */ + DRM_GPUVA_USERBITS = (1 << 2), +}; + +/** + * struct drm_gpuva - structure to track a GPU VA mapping + * + * This structure represents a GPU VA mapping and is associated with a + * &drm_gpuvm. + * + * Typically, this structure is embedded in bigger driver structures. + */ +struct drm_gpuva { + /** + * @vm: the &drm_gpuvm this object is associated with + */ + struct drm_gpuvm *vm; + + /** + * @vm_bo: the &drm_gpuvm_bo abstraction for the mapped + * &drm_gem_object + */ + struct drm_gpuvm_bo *vm_bo; + + /** + * @flags: the &drm_gpuva_flags for this mapping + */ + enum drm_gpuva_flags flags; + + /** + * @va: structure containing the address and range of the &drm_gpuva + */ + struct { + /** + * @va.addr: the start address + */ + u64 addr; + + /* + * @range: the range + */ + u64 range; + } va; + + /** + * @gem: structure containing the &drm_gem_object and its offset + */ + struct { + /** + * @gem.offset: the offset within the &drm_gem_object + */ + u64 offset; + + /** + * @gem.obj: the mapped &drm_gem_object + */ + struct drm_gem_object *obj; + + /** + * @gem.entry: the &list_head to attach this object to a &drm_gpuvm_bo + */ + struct list_head entry; + } gem; + + /** + * @rb: structure containing data to store &drm_gpuvas in a rb-tree + */ + struct { + /** + * @rb.node: the rb-tree node + */ + struct rb_node node; + + /** + * @rb.entry: The &list_head to additionally connect &drm_gpuvas + * in the same order they appear in the interval tree. This is + * useful to keep iterating &drm_gpuvas from a start node found + * through the rb-tree while doing modifications on the rb-tree + * itself. + */ + struct list_head entry; + + /** + * @rb.__subtree_last: needed by the interval tree, holding last-in-subtree + */ + u64 __subtree_last; + } rb; +}; + +int drm_gpuva_insert(struct drm_gpuvm *gpuvm, struct drm_gpuva *va); +void drm_gpuva_remove(struct drm_gpuva *va); + +void drm_gpuva_link(struct drm_gpuva *va, struct drm_gpuvm_bo *vm_bo); +void drm_gpuva_unlink(struct drm_gpuva *va); +void drm_gpuva_unlink_defer(struct drm_gpuva *va); + +struct drm_gpuva *drm_gpuva_find(struct drm_gpuvm *gpuvm, + u64 addr, u64 range); +struct drm_gpuva *drm_gpuva_find_first(struct drm_gpuvm *gpuvm, + u64 addr, u64 range); +struct drm_gpuva *drm_gpuva_find_prev(struct drm_gpuvm *gpuvm, u64 start); +struct drm_gpuva *drm_gpuva_find_next(struct drm_gpuvm *gpuvm, u64 end); + +/** + * drm_gpuva_invalidate() - sets whether the backing GEM of this &drm_gpuva is + * invalidated + * @va: the &drm_gpuva to set the invalidate flag for + * @invalidate: indicates whether the &drm_gpuva is invalidated + */ +static inline void drm_gpuva_invalidate(struct drm_gpuva *va, bool invalidate) +{ + if (invalidate) + va->flags |= DRM_GPUVA_INVALIDATED; + else + va->flags &= ~DRM_GPUVA_INVALIDATED; +} + +/** + * drm_gpuva_invalidated() - indicates whether the backing BO of this &drm_gpuva + * is invalidated + * @va: the &drm_gpuva to check + * + * Returns: %true if the GPU VA is invalidated, %false otherwise + */ +static inline bool drm_gpuva_invalidated(struct drm_gpuva *va) +{ + return va->flags & DRM_GPUVA_INVALIDATED; +} + +/** + * enum drm_gpuvm_flags - flags for struct drm_gpuvm + */ +enum drm_gpuvm_flags { + /** + * @DRM_GPUVM_RESV_PROTECTED: GPUVM is protected externally by the + * GPUVM's &dma_resv lock + */ + DRM_GPUVM_RESV_PROTECTED = BIT(0), + + /** + * @DRM_GPUVM_IMMEDIATE_MODE: use the locking scheme for GEMs designed + * for modifying the GPUVM during the fence signalling path + * + * When set, gpuva.lock is used to protect gpuva.list in all GEM + * objects associated with this GPUVM. Otherwise, the GEMs dma-resv is + * used. + */ + DRM_GPUVM_IMMEDIATE_MODE = BIT(1), + + /** + * @DRM_GPUVM_USERBITS: user defined bits + */ + DRM_GPUVM_USERBITS = BIT(2), +}; + +/** + * struct drm_gpuvm - DRM GPU VA Manager + * + * The DRM GPU VA Manager keeps track of a GPU's virtual address space by using + * &maple_tree structures. Typically, this structure is embedded in bigger + * driver structures. + * + * Drivers can pass addresses and ranges in an arbitrary unit, e.g. bytes or + * pages. + * + * There should be one manager instance per GPU virtual address space. + */ +struct drm_gpuvm { + /** + * @name: the name of the DRM GPU VA space + */ + const char *name; + + /** + * @flags: the &drm_gpuvm_flags of this GPUVM + */ + enum drm_gpuvm_flags flags; + + /** + * @drm: the &drm_device this VM lives in + */ + struct drm_device *drm; + + /** + * @mm_start: start of the VA space + */ + u64 mm_start; + + /** + * @mm_range: length of the VA space + */ + u64 mm_range; + + /** + * @rb: structures to track &drm_gpuva entries + */ + struct { + /** + * @rb.tree: the rb-tree to track GPU VA mappings + */ + struct rb_root_cached tree; + + /** + * @rb.list: the &list_head to track GPU VA mappings + */ + struct list_head list; + } rb; + + /** + * @kref: reference count of this object + */ + struct kref kref; + + /** + * @kernel_alloc_node: + * + * &drm_gpuva representing the address space cutout reserved for + * the kernel + */ + struct drm_gpuva kernel_alloc_node; + + /** + * @ops: &drm_gpuvm_ops providing the split/merge steps to drivers + */ + const struct drm_gpuvm_ops *ops; + + /** + * @r_obj: Resv GEM object; representing the GPUVM's common &dma_resv. + */ + struct drm_gem_object *r_obj; + + /** + * @extobj: structure holding the extobj list + */ + struct { + /** + * @extobj.list: &list_head storing &drm_gpuvm_bos serving as + * external object + */ + struct list_head list; + + /** + * @extobj.local_list: pointer to the local list temporarily + * storing entries from the external object list + */ + struct list_head *local_list; + + /** + * @extobj.lock: spinlock to protect the extobj list + */ + spinlock_t lock; + } extobj; + + /** + * @evict: structure holding the evict list and evict list lock + */ + struct { + /** + * @evict.list: &list_head storing &drm_gpuvm_bos currently + * being evicted + */ + struct list_head list; + + /** + * @evict.local_list: pointer to the local list temporarily + * storing entries from the evicted object list + */ + struct list_head *local_list; + + /** + * @evict.lock: spinlock to protect the evict list + */ + spinlock_t lock; + } evict; + + /** + * @bo_defer: structure holding vm_bos that need to be destroyed + */ + struct llist_head bo_defer; +}; + +void drm_gpuvm_init(struct drm_gpuvm *gpuvm, const char *name, + enum drm_gpuvm_flags flags, + struct drm_device *drm, + struct drm_gem_object *r_obj, + u64 start_offset, u64 range, + u64 reserve_offset, u64 reserve_range, + const struct drm_gpuvm_ops *ops); + +/** + * drm_gpuvm_get() - acquire a struct drm_gpuvm reference + * @gpuvm: the &drm_gpuvm to acquire the reference of + * + * This function acquires an additional reference to @gpuvm. It is illegal to + * call this without already holding a reference. No locks required. + * + * Returns: the &struct drm_gpuvm pointer + */ +static inline struct drm_gpuvm * +drm_gpuvm_get(struct drm_gpuvm *gpuvm) +{ + kref_get(&gpuvm->kref); + + return gpuvm; +} + +void drm_gpuvm_put(struct drm_gpuvm *gpuvm); + +bool drm_gpuvm_range_valid(struct drm_gpuvm *gpuvm, u64 addr, u64 range); +bool drm_gpuvm_interval_empty(struct drm_gpuvm *gpuvm, u64 addr, u64 range); + +struct drm_gem_object * +drm_gpuvm_resv_object_alloc(struct drm_device *drm); + +/** + * drm_gpuvm_resv_protected() - indicates whether &DRM_GPUVM_RESV_PROTECTED is + * set + * @gpuvm: the &drm_gpuvm + * + * Returns: true if &DRM_GPUVM_RESV_PROTECTED is set, false otherwise. + */ +static inline bool +drm_gpuvm_resv_protected(struct drm_gpuvm *gpuvm) +{ + return gpuvm->flags & DRM_GPUVM_RESV_PROTECTED; +} + +/** + * drm_gpuvm_immediate_mode() - indicates whether &DRM_GPUVM_IMMEDIATE_MODE is + * set + * @gpuvm: the &drm_gpuvm + * + * Returns: true if &DRM_GPUVM_IMMEDIATE_MODE is set, false otherwise. + */ +static inline bool +drm_gpuvm_immediate_mode(struct drm_gpuvm *gpuvm) +{ + return gpuvm->flags & DRM_GPUVM_IMMEDIATE_MODE; +} + +/** + * drm_gpuvm_resv() - returns the &drm_gpuvm's &dma_resv + * @gpuvm__: the &drm_gpuvm + * + * Returns: a pointer to the &drm_gpuvm's shared &dma_resv + */ +#define drm_gpuvm_resv(gpuvm__) ((gpuvm__)->r_obj->resv) + +/** + * drm_gpuvm_resv_obj() - returns the &drm_gem_object holding the &drm_gpuvm's + * &dma_resv + * @gpuvm__: the &drm_gpuvm + * + * Returns: a pointer to the &drm_gem_object holding the &drm_gpuvm's shared + * &dma_resv + */ +#define drm_gpuvm_resv_obj(gpuvm__) ((gpuvm__)->r_obj) + +#define drm_gpuvm_resv_held(gpuvm__) \ + dma_resv_held(drm_gpuvm_resv(gpuvm__)) + +#define drm_gpuvm_resv_assert_held(gpuvm__) \ + dma_resv_assert_held(drm_gpuvm_resv(gpuvm__)) + +#define drm_gpuvm_resv_held(gpuvm__) \ + dma_resv_held(drm_gpuvm_resv(gpuvm__)) + +#define drm_gpuvm_resv_assert_held(gpuvm__) \ + dma_resv_assert_held(drm_gpuvm_resv(gpuvm__)) + +/** + * drm_gpuvm_is_extobj() - indicates whether the given &drm_gem_object is an + * external object + * @gpuvm: the &drm_gpuvm to check + * @obj: the &drm_gem_object to check + * + * Returns: true if the &drm_gem_object &dma_resv differs from the + * &drm_gpuvms &dma_resv, false otherwise + */ +static inline bool +drm_gpuvm_is_extobj(struct drm_gpuvm *gpuvm, + struct drm_gem_object *obj) +{ + return obj && obj->resv != drm_gpuvm_resv(gpuvm); +} + +static inline struct drm_gpuva * +__drm_gpuva_next(struct drm_gpuva *va) +{ + if (va && !list_is_last(&va->rb.entry, &va->vm->rb.list)) + return list_next_entry(va, rb.entry); + + return NULL; +} + +/** + * drm_gpuvm_for_each_va_range() - iterate over a range of &drm_gpuvas + * @va__: &drm_gpuva structure to assign to in each iteration step + * @gpuvm__: &drm_gpuvm to walk over + * @start__: starting offset, the first gpuva will overlap this + * @end__: ending offset, the last gpuva will start before this (but may + * overlap) + * + * This iterator walks over all &drm_gpuvas in the &drm_gpuvm that lie + * between @start__ and @end__. It is implemented similarly to list_for_each(), + * but is using the &drm_gpuvm's internal interval tree to accelerate + * the search for the starting &drm_gpuva, and hence isn't safe against removal + * of elements. It assumes that @end__ is within (or is the upper limit of) the + * &drm_gpuvm. This iterator does not skip over the &drm_gpuvm's + * @kernel_alloc_node. + */ +#define drm_gpuvm_for_each_va_range(va__, gpuvm__, start__, end__) \ + for (va__ = drm_gpuva_find_first((gpuvm__), (start__), (end__) - (start__)); \ + va__ && (va__->va.addr < (end__)); \ + va__ = __drm_gpuva_next(va__)) + +/** + * drm_gpuvm_for_each_va_range_safe() - safely iterate over a range of + * &drm_gpuvas + * @va__: &drm_gpuva to assign to in each iteration step + * @next__: another &drm_gpuva to use as temporary storage + * @gpuvm__: &drm_gpuvm to walk over + * @start__: starting offset, the first gpuva will overlap this + * @end__: ending offset, the last gpuva will start before this (but may + * overlap) + * + * This iterator walks over all &drm_gpuvas in the &drm_gpuvm that lie + * between @start__ and @end__. It is implemented similarly to + * list_for_each_safe(), but is using the &drm_gpuvm's internal interval + * tree to accelerate the search for the starting &drm_gpuva, and hence is safe + * against removal of elements. It assumes that @end__ is within (or is the + * upper limit of) the &drm_gpuvm. This iterator does not skip over the + * &drm_gpuvm's @kernel_alloc_node. + */ +#define drm_gpuvm_for_each_va_range_safe(va__, next__, gpuvm__, start__, end__) \ + for (va__ = drm_gpuva_find_first((gpuvm__), (start__), (end__) - (start__)), \ + next__ = __drm_gpuva_next(va__); \ + va__ && (va__->va.addr < (end__)); \ + va__ = next__, next__ = __drm_gpuva_next(va__)) + +/** + * drm_gpuvm_for_each_va() - iterate over all &drm_gpuvas + * @va__: &drm_gpuva to assign to in each iteration step + * @gpuvm__: &drm_gpuvm to walk over + * + * This iterator walks over all &drm_gpuva structures associated with the given + * &drm_gpuvm. + */ +#define drm_gpuvm_for_each_va(va__, gpuvm__) \ + list_for_each_entry(va__, &(gpuvm__)->rb.list, rb.entry) + +/** + * drm_gpuvm_for_each_va_safe() - safely iterate over all &drm_gpuvas + * @va__: &drm_gpuva to assign to in each iteration step + * @next__: another &drm_gpuva to use as temporary storage + * @gpuvm__: &drm_gpuvm to walk over + * + * This iterator walks over all &drm_gpuva structures associated with the given + * &drm_gpuvm. It is implemented with list_for_each_entry_safe(), and + * hence safe against the removal of elements. + */ +#define drm_gpuvm_for_each_va_safe(va__, next__, gpuvm__) \ + list_for_each_entry_safe(va__, next__, &(gpuvm__)->rb.list, rb.entry) + +/** + * struct drm_gpuvm_exec - &drm_gpuvm abstraction of &drm_exec + * + * This structure should be created on the stack as &drm_exec should be. + * + * Optionally, @extra can be set in order to lock additional &drm_gem_objects. + */ +struct drm_gpuvm_exec { + /** + * @exec: the &drm_exec structure + */ + struct drm_exec exec; + + /** + * @flags: the flags for the struct drm_exec + */ + u32 flags; + + /** + * @vm: the &drm_gpuvm to lock its DMA reservations + */ + struct drm_gpuvm *vm; + + /** + * @num_fences: the number of fences to reserve for the &dma_resv of the + * locked &drm_gem_objects + */ + unsigned int num_fences; + + /** + * @extra: Callback and corresponding private data for the driver to + * lock arbitrary additional &drm_gem_objects. + */ + struct { + /** + * @extra.fn: The driver callback to lock additional + * &drm_gem_objects. + */ + int (*fn)(struct drm_gpuvm_exec *vm_exec); + + /** + * @extra.priv: driver private data for the @fn callback + */ + void *priv; + } extra; +}; + +int drm_gpuvm_prepare_vm(struct drm_gpuvm *gpuvm, + struct drm_exec *exec, + unsigned int num_fences); + +int drm_gpuvm_prepare_objects(struct drm_gpuvm *gpuvm, + struct drm_exec *exec, + unsigned int num_fences); + +int drm_gpuvm_prepare_range(struct drm_gpuvm *gpuvm, + struct drm_exec *exec, + u64 addr, u64 range, + unsigned int num_fences); + +int drm_gpuvm_exec_lock(struct drm_gpuvm_exec *vm_exec); + +int drm_gpuvm_exec_lock_array(struct drm_gpuvm_exec *vm_exec, + struct drm_gem_object **objs, + unsigned int num_objs); + +int drm_gpuvm_exec_lock_range(struct drm_gpuvm_exec *vm_exec, + u64 addr, u64 range); + +/** + * drm_gpuvm_exec_unlock() - lock all dma-resv of all assoiciated BOs + * @vm_exec: the &drm_gpuvm_exec wrapper + * + * Releases all dma-resv locks of all &drm_gem_objects previously acquired + * through drm_gpuvm_exec_lock() or its variants. + * + * Returns: 0 on success, negative error code on failure. + */ +static inline void +drm_gpuvm_exec_unlock(struct drm_gpuvm_exec *vm_exec) +{ + drm_exec_fini(&vm_exec->exec); +} + +int drm_gpuvm_validate(struct drm_gpuvm *gpuvm, struct drm_exec *exec); +void drm_gpuvm_resv_add_fence(struct drm_gpuvm *gpuvm, + struct drm_exec *exec, + struct dma_fence *fence, + enum dma_resv_usage private_usage, + enum dma_resv_usage extobj_usage); + +/** + * drm_gpuvm_exec_resv_add_fence() - add fence to private and all extobj + * @vm_exec: the &drm_gpuvm_exec wrapper + * @fence: fence to add + * @private_usage: private dma-resv usage + * @extobj_usage: extobj dma-resv usage + * + * See drm_gpuvm_resv_add_fence(). + */ +static inline void +drm_gpuvm_exec_resv_add_fence(struct drm_gpuvm_exec *vm_exec, + struct dma_fence *fence, + enum dma_resv_usage private_usage, + enum dma_resv_usage extobj_usage) +{ + drm_gpuvm_resv_add_fence(vm_exec->vm, &vm_exec->exec, fence, + private_usage, extobj_usage); +} + +/** + * drm_gpuvm_exec_validate() - validate all BOs marked as evicted + * @vm_exec: the &drm_gpuvm_exec wrapper + * + * See drm_gpuvm_validate(). + * + * Returns: 0 on success, negative error code on failure. + */ +static inline int +drm_gpuvm_exec_validate(struct drm_gpuvm_exec *vm_exec) +{ + return drm_gpuvm_validate(vm_exec->vm, &vm_exec->exec); +} + +/** + * struct drm_gpuvm_bo - structure representing a &drm_gpuvm and + * &drm_gem_object combination + * + * This structure is an abstraction representing a &drm_gpuvm and + * &drm_gem_object combination. It serves as an indirection to accelerate + * iterating all &drm_gpuvas within a &drm_gpuvm backed by the same + * &drm_gem_object. + * + * Furthermore it is used cache evicted GEM objects for a certain GPU-VM to + * accelerate validation. + * + * Typically, drivers want to create an instance of a struct drm_gpuvm_bo once + * a GEM object is mapped first in a GPU-VM and release the instance once the + * last mapping of the GEM object in this GPU-VM is unmapped. + */ +struct drm_gpuvm_bo { + /** + * @vm: The &drm_gpuvm the @obj is mapped in. This is a reference + * counted pointer. + */ + struct drm_gpuvm *vm; + + /** + * @obj: The &drm_gem_object being mapped in @vm. This is a reference + * counted pointer. + */ + struct drm_gem_object *obj; + + /** + * @evicted: Indicates whether the &drm_gem_object is evicted; field + * protected by the &drm_gem_object's dma-resv lock. + */ + bool evicted; + + /** + * @kref: The reference count for this &drm_gpuvm_bo. + */ + struct kref kref; + + /** + * @list: Structure containing all &list_heads. + */ + struct { + /** + * @list.gpuva: The list of linked &drm_gpuvas. + * + * It is safe to access entries from this list as long as the + * GEM's gpuva lock is held. See also struct drm_gem_object. + */ + struct list_head gpuva; + + /** + * @list.entry: Structure containing all &list_heads serving as + * entry. + */ + struct { + /** + * @list.entry.gem: List entry to attach to the + * &drm_gem_objects gpuva list. + */ + struct list_head gem; + + /** + * @list.entry.evict: List entry to attach to the + * &drm_gpuvms extobj list. + */ + struct list_head extobj; + + /** + * @list.entry.evict: List entry to attach to the + * &drm_gpuvms evict list. + */ + struct list_head evict; + + /** + * @list.entry.bo_defer: List entry to attach to + * the &drm_gpuvms bo_defer list. + */ + struct llist_node bo_defer; + } entry; + } list; +}; + +struct drm_gpuvm_bo * +drm_gpuvm_bo_create(struct drm_gpuvm *gpuvm, + struct drm_gem_object *obj); + +struct drm_gpuvm_bo * +drm_gpuvm_bo_obtain(struct drm_gpuvm *gpuvm, + struct drm_gem_object *obj); +struct drm_gpuvm_bo * +drm_gpuvm_bo_obtain_prealloc(struct drm_gpuvm_bo *vm_bo); + +/** + * drm_gpuvm_bo_get() - acquire a struct drm_gpuvm_bo reference + * @vm_bo: the &drm_gpuvm_bo to acquire the reference of + * + * This function acquires an additional reference to @vm_bo. It is illegal to + * call this without already holding a reference. No locks required. + * + * Returns: the &struct vm_bo pointer + */ +static inline struct drm_gpuvm_bo * +drm_gpuvm_bo_get(struct drm_gpuvm_bo *vm_bo) +{ + kref_get(&vm_bo->kref); + return vm_bo; +} + +bool drm_gpuvm_bo_put(struct drm_gpuvm_bo *vm_bo); + +bool drm_gpuvm_bo_put_deferred(struct drm_gpuvm_bo *vm_bo); +void drm_gpuvm_bo_deferred_cleanup(struct drm_gpuvm *gpuvm); + +struct drm_gpuvm_bo * +drm_gpuvm_bo_find(struct drm_gpuvm *gpuvm, + struct drm_gem_object *obj); + +void drm_gpuvm_bo_evict(struct drm_gpuvm_bo *vm_bo, bool evict); + +/** + * drm_gpuvm_bo_gem_evict() - add/remove all &drm_gpuvm_bo's in the list + * to/from the &drm_gpuvms evicted list + * @obj: the &drm_gem_object + * @evict: indicates whether @obj is evicted + * + * See drm_gpuvm_bo_evict(). + */ +static inline void +drm_gpuvm_bo_gem_evict(struct drm_gem_object *obj, bool evict) +{ + struct drm_gpuvm_bo *vm_bo; + + drm_gem_for_each_gpuvm_bo(vm_bo, obj) { + drm_gem_gpuva_assert_lock_held(vm_bo->vm, obj); + drm_gpuvm_bo_evict(vm_bo, evict); + } +} + +void drm_gpuvm_bo_extobj_add(struct drm_gpuvm_bo *vm_bo); + +/** + * drm_gpuvm_bo_for_each_va() - iterator to walk over a list of &drm_gpuva + * @va__: &drm_gpuva structure to assign to in each iteration step + * @vm_bo__: the &drm_gpuvm_bo the &drm_gpuva to walk are associated with + * + * This iterator walks over all &drm_gpuva structures associated with the + * &drm_gpuvm_bo. + * + * The caller must hold the GEM's gpuva lock. + */ +#define drm_gpuvm_bo_for_each_va(va__, vm_bo__) \ + list_for_each_entry(va__, &(vm_bo)->list.gpuva, gem.entry) + +/** + * drm_gpuvm_bo_for_each_va_safe() - iterator to safely walk over a list of + * &drm_gpuva + * @va__: &drm_gpuva structure to assign to in each iteration step + * @next__: &next &drm_gpuva to store the next step + * @vm_bo__: the &drm_gpuvm_bo the &drm_gpuva to walk are associated with + * + * This iterator walks over all &drm_gpuva structures associated with the + * &drm_gpuvm_bo. It is implemented with list_for_each_entry_safe(), hence + * it is save against removal of elements. + * + * The caller must hold the GEM's gpuva lock. + */ +#define drm_gpuvm_bo_for_each_va_safe(va__, next__, vm_bo__) \ + list_for_each_entry_safe(va__, next__, &(vm_bo)->list.gpuva, gem.entry) + +/** + * enum drm_gpuva_op_type - GPU VA operation type + * + * Operations to alter the GPU VA mappings tracked by the &drm_gpuvm. + */ +enum drm_gpuva_op_type { + /** + * @DRM_GPUVA_OP_MAP: the map op type + */ + DRM_GPUVA_OP_MAP, + + /** + * @DRM_GPUVA_OP_REMAP: the remap op type + */ + DRM_GPUVA_OP_REMAP, + + /** + * @DRM_GPUVA_OP_UNMAP: the unmap op type + */ + DRM_GPUVA_OP_UNMAP, + + /** + * @DRM_GPUVA_OP_PREFETCH: the prefetch op type + */ + DRM_GPUVA_OP_PREFETCH, + + /** + * @DRM_GPUVA_OP_DRIVER: the driver defined op type + */ + DRM_GPUVA_OP_DRIVER, +}; + +/** + * struct drm_gpuva_op_map - GPU VA map operation + * + * This structure represents a single map operation generated by the + * DRM GPU VA manager. + */ +struct drm_gpuva_op_map { + /** + * @va: structure containing address and range of a map + * operation + */ + struct { + /** + * @va.addr: the base address of the new mapping + */ + u64 addr; + + /** + * @va.range: the range of the new mapping + */ + u64 range; + } va; + + /** + * @gem: structure containing the &drm_gem_object and its offset + */ + struct { + /** + * @gem.offset: the offset within the &drm_gem_object + */ + u64 offset; + + /** + * @gem.obj: the &drm_gem_object to map + */ + struct drm_gem_object *obj; + } gem; +}; + +/** + * struct drm_gpuva_op_unmap - GPU VA unmap operation + * + * This structure represents a single unmap operation generated by the + * DRM GPU VA manager. + */ +struct drm_gpuva_op_unmap { + /** + * @va: the &drm_gpuva to unmap + */ + struct drm_gpuva *va; + + /** + * @keep: + * + * Indicates whether this &drm_gpuva is physically contiguous with the + * original mapping request. + * + * Optionally, if &keep is set, drivers may keep the actual page table + * mappings for this &drm_gpuva, adding the missing page table entries + * only and update the &drm_gpuvm accordingly. + */ + bool keep; +}; + +/** + * struct drm_gpuva_op_remap - GPU VA remap operation + * + * This represents a single remap operation generated by the DRM GPU VA manager. + * + * A remap operation is generated when an existing GPU VA mmapping is split up + * by inserting a new GPU VA mapping or by partially unmapping existent + * mapping(s), hence it consists of a maximum of two map and one unmap + * operation. + * + * The @unmap operation takes care of removing the original existing mapping. + * @prev is used to remap the preceding part, @next the subsequent part. + * + * If either a new mapping's start address is aligned with the start address + * of the old mapping or the new mapping's end address is aligned with the + * end address of the old mapping, either @prev or @next is NULL. + * + * Note, the reason for a dedicated remap operation, rather than arbitrary + * unmap and map operations, is to give drivers the chance of extracting driver + * specific data for creating the new mappings from the unmap operations's + * &drm_gpuva structure which typically is embedded in larger driver specific + * structures. + */ +struct drm_gpuva_op_remap { + /** + * @prev: the preceding part of a split mapping + */ + struct drm_gpuva_op_map *prev; + + /** + * @next: the subsequent part of a split mapping + */ + struct drm_gpuva_op_map *next; + + /** + * @unmap: the unmap operation for the original existing mapping + */ + struct drm_gpuva_op_unmap *unmap; +}; + +/** + * struct drm_gpuva_op_prefetch - GPU VA prefetch operation + * + * This structure represents a single prefetch operation generated by the + * DRM GPU VA manager. + */ +struct drm_gpuva_op_prefetch { + /** + * @va: the &drm_gpuva to prefetch + */ + struct drm_gpuva *va; +}; + +/** + * struct drm_gpuva_op - GPU VA operation + * + * This structure represents a single generic operation. + * + * The particular type of the operation is defined by @op. + */ +struct drm_gpuva_op { + /** + * @entry: + * + * The &list_head used to distribute instances of this struct within + * &drm_gpuva_ops. + */ + struct list_head entry; + + /** + * @op: the type of the operation + */ + enum drm_gpuva_op_type op; + + union { + /** + * @map: the map operation + */ + struct drm_gpuva_op_map map; + + /** + * @remap: the remap operation + */ + struct drm_gpuva_op_remap remap; + + /** + * @unmap: the unmap operation + */ + struct drm_gpuva_op_unmap unmap; + + /** + * @prefetch: the prefetch operation + */ + struct drm_gpuva_op_prefetch prefetch; + }; +}; + +/** + * struct drm_gpuva_ops - wraps a list of &drm_gpuva_op + */ +struct drm_gpuva_ops { + /** + * @list: the &list_head + */ + struct list_head list; +}; + +/** + * drm_gpuva_for_each_op() - iterator to walk over &drm_gpuva_ops + * @op: &drm_gpuva_op to assign in each iteration step + * @ops: &drm_gpuva_ops to walk + * + * This iterator walks over all ops within a given list of operations. + */ +#define drm_gpuva_for_each_op(op, ops) list_for_each_entry(op, &(ops)->list, entry) + +/** + * drm_gpuva_for_each_op_safe() - iterator to safely walk over &drm_gpuva_ops + * @op: &drm_gpuva_op to assign in each iteration step + * @next: &next &drm_gpuva_op to store the next step + * @ops: &drm_gpuva_ops to walk + * + * This iterator walks over all ops within a given list of operations. It is + * implemented with list_for_each_safe(), so save against removal of elements. + */ +#define drm_gpuva_for_each_op_safe(op, next, ops) \ + list_for_each_entry_safe(op, next, &(ops)->list, entry) + +/** + * drm_gpuva_for_each_op_from_reverse() - iterate backwards from the given point + * @op: &drm_gpuva_op to assign in each iteration step + * @ops: &drm_gpuva_ops to walk + * + * This iterator walks over all ops within a given list of operations beginning + * from the given operation in reverse order. + */ +#define drm_gpuva_for_each_op_from_reverse(op, ops) \ + list_for_each_entry_from_reverse(op, &(ops)->list, entry) + +/** + * drm_gpuva_for_each_op_reverse - iterator to walk over &drm_gpuva_ops in reverse + * @op: &drm_gpuva_op to assign in each iteration step + * @ops: &drm_gpuva_ops to walk + * + * This iterator walks over all ops within a given list of operations in reverse + */ +#define drm_gpuva_for_each_op_reverse(op, ops) \ + list_for_each_entry_reverse(op, &(ops)->list, entry) + +/** + * drm_gpuva_first_op() - returns the first &drm_gpuva_op from &drm_gpuva_ops + * @ops: the &drm_gpuva_ops to get the fist &drm_gpuva_op from + */ +#define drm_gpuva_first_op(ops) \ + list_first_entry(&(ops)->list, struct drm_gpuva_op, entry) + +/** + * drm_gpuva_last_op() - returns the last &drm_gpuva_op from &drm_gpuva_ops + * @ops: the &drm_gpuva_ops to get the last &drm_gpuva_op from + */ +#define drm_gpuva_last_op(ops) \ + list_last_entry(&(ops)->list, struct drm_gpuva_op, entry) + +/** + * drm_gpuva_prev_op() - previous &drm_gpuva_op in the list + * @op: the current &drm_gpuva_op + */ +#define drm_gpuva_prev_op(op) list_prev_entry(op, entry) + +/** + * drm_gpuva_next_op() - next &drm_gpuva_op in the list + * @op: the current &drm_gpuva_op + */ +#define drm_gpuva_next_op(op) list_next_entry(op, entry) + +/** + * struct drm_gpuvm_map_req - arguments passed to drm_gpuvm_sm_map[_ops_create]() + */ +struct drm_gpuvm_map_req { + /** + * @map: struct drm_gpuva_op_map + */ + struct drm_gpuva_op_map map; +}; + +struct drm_gpuva_ops * +drm_gpuvm_sm_map_ops_create(struct drm_gpuvm *gpuvm, + const struct drm_gpuvm_map_req *req); +struct drm_gpuva_ops * +drm_gpuvm_madvise_ops_create(struct drm_gpuvm *gpuvm, + const struct drm_gpuvm_map_req *req); + +struct drm_gpuva_ops * +drm_gpuvm_sm_unmap_ops_create(struct drm_gpuvm *gpuvm, + u64 addr, u64 range); + +struct drm_gpuva_ops * +drm_gpuvm_prefetch_ops_create(struct drm_gpuvm *gpuvm, + u64 addr, u64 range); + +struct drm_gpuva_ops * +drm_gpuvm_bo_unmap_ops_create(struct drm_gpuvm_bo *vm_bo); + +void drm_gpuva_ops_free(struct drm_gpuvm *gpuvm, + struct drm_gpuva_ops *ops); + +static inline void drm_gpuva_init_from_op(struct drm_gpuva *va, + struct drm_gpuva_op_map *op) +{ + va->va.addr = op->va.addr; + va->va.range = op->va.range; + va->gem.obj = op->gem.obj; + va->gem.offset = op->gem.offset; +} + +/** + * struct drm_gpuvm_ops - callbacks for split/merge steps + * + * This structure defines the callbacks used by &drm_gpuvm_sm_map and + * &drm_gpuvm_sm_unmap to provide the split/merge steps for map and unmap + * operations to drivers. + */ +struct drm_gpuvm_ops { + /** + * @vm_free: called when the last reference of a struct drm_gpuvm is + * dropped + * + * This callback is mandatory. + */ + void (*vm_free)(struct drm_gpuvm *gpuvm); + + /** + * @op_alloc: called when the &drm_gpuvm allocates + * a struct drm_gpuva_op + * + * Some drivers may want to embed struct drm_gpuva_op into driver + * specific structures. By implementing this callback drivers can + * allocate memory accordingly. + * + * This callback is optional. + */ + struct drm_gpuva_op *(*op_alloc)(void); + + /** + * @op_free: called when the &drm_gpuvm frees a + * struct drm_gpuva_op + * + * Some drivers may want to embed struct drm_gpuva_op into driver + * specific structures. By implementing this callback drivers can + * free the previously allocated memory accordingly. + * + * This callback is optional. + */ + void (*op_free)(struct drm_gpuva_op *op); + + /** + * @vm_bo_alloc: called when the &drm_gpuvm allocates + * a struct drm_gpuvm_bo + * + * Some drivers may want to embed struct drm_gpuvm_bo into driver + * specific structures. By implementing this callback drivers can + * allocate memory accordingly. + * + * This callback is optional. + */ + struct drm_gpuvm_bo *(*vm_bo_alloc)(void); + + /** + * @vm_bo_free: called when the &drm_gpuvm frees a + * struct drm_gpuvm_bo + * + * Some drivers may want to embed struct drm_gpuvm_bo into driver + * specific structures. By implementing this callback drivers can + * free the previously allocated memory accordingly. + * + * This callback is optional. + */ + void (*vm_bo_free)(struct drm_gpuvm_bo *vm_bo); + + /** + * @vm_bo_validate: called from drm_gpuvm_validate() + * + * Drivers receive this callback for every evicted &drm_gem_object being + * mapped in the corresponding &drm_gpuvm. + * + * Typically, drivers would call their driver specific variant of + * ttm_bo_validate() from within this callback. + */ + int (*vm_bo_validate)(struct drm_gpuvm_bo *vm_bo, + struct drm_exec *exec); + + /** + * @sm_step_map: called from &drm_gpuvm_sm_map to finally insert the + * mapping once all previous steps were completed + * + * The &priv pointer matches the one the driver passed to + * &drm_gpuvm_sm_map or &drm_gpuvm_sm_unmap, respectively. + * + * Can be NULL if &drm_gpuvm_sm_map is used. + */ + int (*sm_step_map)(struct drm_gpuva_op *op, void *priv); + + /** + * @sm_step_remap: called from &drm_gpuvm_sm_map and + * &drm_gpuvm_sm_unmap to split up an existent mapping + * + * This callback is called when existent mapping needs to be split up. + * This is the case when either a newly requested mapping overlaps or + * is enclosed by an existent mapping or a partial unmap of an existent + * mapping is requested. + * + * The &priv pointer matches the one the driver passed to + * &drm_gpuvm_sm_map or &drm_gpuvm_sm_unmap, respectively. + * + * Can be NULL if neither &drm_gpuvm_sm_map nor &drm_gpuvm_sm_unmap is + * used. + */ + int (*sm_step_remap)(struct drm_gpuva_op *op, void *priv); + + /** + * @sm_step_unmap: called from &drm_gpuvm_sm_map and + * &drm_gpuvm_sm_unmap to unmap an existing mapping + * + * This callback is called when existing mapping needs to be unmapped. + * This is the case when either a newly requested mapping encloses an + * existing mapping or an unmap of an existing mapping is requested. + * + * The &priv pointer matches the one the driver passed to + * &drm_gpuvm_sm_map or &drm_gpuvm_sm_unmap, respectively. + * + * Can be NULL if neither &drm_gpuvm_sm_map nor &drm_gpuvm_sm_unmap is + * used. + */ + int (*sm_step_unmap)(struct drm_gpuva_op *op, void *priv); +}; + +int drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm, void *priv, + const struct drm_gpuvm_map_req *req); + +int drm_gpuvm_sm_unmap(struct drm_gpuvm *gpuvm, void *priv, + u64 addr, u64 range); + +int drm_gpuvm_sm_map_exec_lock(struct drm_gpuvm *gpuvm, + struct drm_exec *exec, unsigned int num_fences, + struct drm_gpuvm_map_req *req); + +int drm_gpuvm_sm_unmap_exec_lock(struct drm_gpuvm *gpuvm, struct drm_exec *exec, + u64 req_addr, u64 req_range); + +void drm_gpuva_map(struct drm_gpuvm *gpuvm, + struct drm_gpuva *va, + struct drm_gpuva_op_map *op); + +void drm_gpuva_remap(struct drm_gpuva *prev, + struct drm_gpuva *next, + struct drm_gpuva_op_remap *op); + +void drm_gpuva_unmap(struct drm_gpuva_op_unmap *op); + +/** + * drm_gpuva_op_remap_to_unmap_range() - Helper to get the start and range of + * the unmap stage of a remap op. + * @op: Remap op. + * @start_addr: Output pointer for the start of the required unmap. + * @range: Output pointer for the length of the required unmap. + * + * The given start address and range will be set such that they represent the + * range of the address space that was previously covered by the mapping being + * re-mapped, but is now empty. + */ +static inline void +drm_gpuva_op_remap_to_unmap_range(const struct drm_gpuva_op_remap *op, + u64 *start_addr, u64 *range) +{ + const u64 va_start = op->prev ? + op->prev->va.addr + op->prev->va.range : + op->unmap->va->va.addr; + const u64 va_end = op->next ? + op->next->va.addr : + op->unmap->va->va.addr + op->unmap->va->va.range; + + if (start_addr) + *start_addr = va_start; + if (range) + *range = va_end - va_start; +} + +#endif /* __DRM_GPUVM_H__ */ diff --git a/include/drm/drm_ioctl.h b/include/drm/drm_ioctl.h index 6ed61c371f6c..171760b6c4a1 100644 --- a/include/drm/drm_ioctl.h +++ b/include/drm/drm_ioctl.h @@ -110,17 +110,6 @@ enum drm_ioctl_flags { */ DRM_ROOT_ONLY = BIT(2), /** - * @DRM_UNLOCKED: - * - * Whether &drm_ioctl_desc.func should be called with the DRM BKL held - * or not. Enforced as the default for all modern drivers, hence there - * should never be a need to set this flag. - * - * Do not use anywhere else than for the VBLANK_WAIT IOCTL, which is the - * only legacy IOCTL which needs this. - */ - DRM_UNLOCKED = BIT(4), - /** * @DRM_RENDER_ALLOW: * * This is used for all ioctl needed for rendering only, for drivers diff --git a/include/drm/drm_kunit_helpers.h b/include/drm/drm_kunit_helpers.h index ed013fdcc1ff..4948379237e9 100644 --- a/include/drm/drm_kunit_helpers.h +++ b/include/drm/drm_kunit_helpers.h @@ -3,9 +3,18 @@ #ifndef DRM_KUNIT_HELPERS_H_ #define DRM_KUNIT_HELPERS_H_ +#include <drm/drm_drv.h> + +#include <linux/device.h> + #include <kunit/test.h> +struct drm_connector; +struct drm_crtc_funcs; +struct drm_crtc_helper_funcs; struct drm_device; +struct drm_plane_funcs; +struct drm_plane_helper_funcs; struct kunit; struct device *drm_kunit_helper_alloc_device(struct kunit *test); @@ -51,7 +60,7 @@ __drm_kunit_helper_alloc_drm_device(struct kunit *test, { struct drm_driver *driver; - driver = kunit_kzalloc(test, sizeof(*driver), GFP_KERNEL); + driver = devm_kzalloc(dev, sizeof(*driver), GFP_KERNEL); KUNIT_ASSERT_NOT_NULL(test, driver); driver->driver_features = features; @@ -67,7 +76,7 @@ __drm_kunit_helper_alloc_drm_device(struct kunit *test, * @_dev: The parent device object * @_type: the type of the struct which contains struct &drm_device * @_member: the name of the &drm_device within @_type. - * @_features: Mocked DRM device driver features + * @_feat: Mocked DRM device driver features * * This function creates a struct &drm_driver and will create a struct * &drm_device from @_dev and that driver. @@ -88,4 +97,40 @@ __drm_kunit_helper_alloc_drm_device(struct kunit *test, offsetof(_type, _member), \ _feat)) +struct drm_atomic_state * +drm_kunit_helper_atomic_state_alloc(struct kunit *test, + struct drm_device *drm, + struct drm_modeset_acquire_ctx *ctx); + +struct drm_plane * +drm_kunit_helper_create_primary_plane(struct kunit *test, + struct drm_device *drm, + const struct drm_plane_funcs *funcs, + const struct drm_plane_helper_funcs *helper_funcs, + const uint32_t *formats, + unsigned int num_formats, + const uint64_t *modifiers); + +struct drm_crtc * +drm_kunit_helper_create_crtc(struct kunit *test, + struct drm_device *drm, + struct drm_plane *primary, + struct drm_plane *cursor, + const struct drm_crtc_funcs *funcs, + const struct drm_crtc_helper_funcs *helper_funcs); + +int drm_kunit_helper_enable_crtc_connector(struct kunit *test, + struct drm_device *drm, + struct drm_crtc *crtc, + struct drm_connector *connector, + const struct drm_display_mode *mode, + struct drm_modeset_acquire_ctx *ctx); + +int drm_kunit_add_mode_destroy_action(struct kunit *test, + struct drm_display_mode *mode); + +struct drm_display_mode * +drm_kunit_display_mode_from_cea_vic(struct kunit *test, struct drm_device *dev, + u8 video_code); + #endif // DRM_KUNIT_HELPERS_H_ diff --git a/include/drm/drm_lease.h b/include/drm/drm_lease.h index 5c9ef6a2aeae..53545b4ca9ef 100644 --- a/include/drm/drm_lease.h +++ b/include/drm/drm_lease.h @@ -6,6 +6,8 @@ #ifndef _DRM_LEASE_H_ #define _DRM_LEASE_H_ +#include <linux/types.h> + struct drm_file; struct drm_device; struct drm_master; diff --git a/include/drm/drm_legacy.h b/include/drm/drm_legacy.h deleted file mode 100644 index 0fc85418aad8..000000000000 --- a/include/drm/drm_legacy.h +++ /dev/null @@ -1,331 +0,0 @@ -#ifndef __DRM_DRM_LEGACY_H__ -#define __DRM_DRM_LEGACY_H__ -/* - * Legacy driver interfaces for the Direct Rendering Manager - * - * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. - * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. - * Copyright (c) 2009-2010, Code Aurora Forum. - * All rights reserved. - * Copyright © 2014 Intel Corporation - * Daniel Vetter <daniel.vetter@ffwll.ch> - * - * Author: Rickard E. (Rik) Faith <faith@valinux.com> - * Author: Gareth Hughes <gareth@valinux.com> - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - */ - -#include <linux/agp_backend.h> - -#include <drm/drm.h> -#include <drm/drm_auth.h> - -struct drm_device; -struct drm_driver; -struct file; -struct pci_driver; - -/* - * Legacy Support for palateontologic DRM drivers - * - * If you add a new driver and it uses any of these functions or structures, - * you're doing it terribly wrong. - */ - -/* - * Hash-table Support - */ - -struct drm_hash_item { - struct hlist_node head; - unsigned long key; -}; - -struct drm_open_hash { - struct hlist_head *table; - u8 order; -}; - -/** - * DMA buffer. - */ -struct drm_buf { - int idx; /**< Index into master buflist */ - int total; /**< Buffer size */ - int order; /**< log-base-2(total) */ - int used; /**< Amount of buffer in use (for DMA) */ - unsigned long offset; /**< Byte offset (used internally) */ - void *address; /**< Address of buffer */ - unsigned long bus_address; /**< Bus address of buffer */ - struct drm_buf *next; /**< Kernel-only: used for free list */ - __volatile__ int waiting; /**< On kernel DMA queue */ - __volatile__ int pending; /**< On hardware DMA queue */ - struct drm_file *file_priv; /**< Private of holding file descr */ - int context; /**< Kernel queue for this buffer */ - int while_locked; /**< Dispatch this buffer while locked */ - enum { - DRM_LIST_NONE = 0, - DRM_LIST_FREE = 1, - DRM_LIST_WAIT = 2, - DRM_LIST_PEND = 3, - DRM_LIST_PRIO = 4, - DRM_LIST_RECLAIM = 5 - } list; /**< Which list we're on */ - - int dev_priv_size; /**< Size of buffer private storage */ - void *dev_private; /**< Per-buffer private storage */ -}; - -typedef struct drm_dma_handle { - dma_addr_t busaddr; - void *vaddr; - size_t size; -} drm_dma_handle_t; - -/** - * Buffer entry. There is one of this for each buffer size order. - */ -struct drm_buf_entry { - int buf_size; /**< size */ - int buf_count; /**< number of buffers */ - struct drm_buf *buflist; /**< buffer list */ - int seg_count; - int page_order; - struct drm_dma_handle **seglist; - - int low_mark; /**< Low water mark */ - int high_mark; /**< High water mark */ -}; - -/** - * DMA data. - */ -struct drm_device_dma { - - struct drm_buf_entry bufs[DRM_MAX_ORDER + 1]; /**< buffers, grouped by their size order */ - int buf_count; /**< total number of buffers */ - struct drm_buf **buflist; /**< Vector of pointers into drm_device_dma::bufs */ - int seg_count; - int page_count; /**< number of pages */ - unsigned long *pagelist; /**< page list */ - unsigned long byte_count; - enum { - _DRM_DMA_USE_AGP = 0x01, - _DRM_DMA_USE_SG = 0x02, - _DRM_DMA_USE_FB = 0x04, - _DRM_DMA_USE_PCI_RO = 0x08 - } flags; - -}; - -/** - * Scatter-gather memory. - */ -struct drm_sg_mem { - unsigned long handle; - void *virtual; - int pages; - struct page **pagelist; - dma_addr_t *busaddr; -}; - -/** - * Kernel side of a mapping - */ -struct drm_local_map { - dma_addr_t offset; /**< Requested physical address (0 for SAREA)*/ - unsigned long size; /**< Requested physical size (bytes) */ - enum drm_map_type type; /**< Type of memory to map */ - enum drm_map_flags flags; /**< Flags */ - void *handle; /**< User-space: "Handle" to pass to mmap() */ - /**< Kernel-space: kernel-virtual address */ - int mtrr; /**< MTRR slot used */ -}; - -typedef struct drm_local_map drm_local_map_t; - -/** - * Mappings list - */ -struct drm_map_list { - struct list_head head; /**< list head */ - struct drm_hash_item hash; - struct drm_local_map *map; /**< mapping */ - uint64_t user_token; - struct drm_master *master; -}; - -int drm_legacy_addmap(struct drm_device *d, resource_size_t offset, - unsigned int size, enum drm_map_type type, - enum drm_map_flags flags, struct drm_local_map **map_p); -struct drm_local_map *drm_legacy_findmap(struct drm_device *dev, unsigned int token); -void drm_legacy_rmmap(struct drm_device *d, struct drm_local_map *map); -int drm_legacy_rmmap_locked(struct drm_device *d, struct drm_local_map *map); -struct drm_local_map *drm_legacy_getsarea(struct drm_device *dev); -int drm_legacy_mmap(struct file *filp, struct vm_area_struct *vma); - -int drm_legacy_addbufs_agp(struct drm_device *d, struct drm_buf_desc *req); -int drm_legacy_addbufs_pci(struct drm_device *d, struct drm_buf_desc *req); - -/** - * Test that the hardware lock is held by the caller, returning otherwise. - * - * \param dev DRM device. - * \param filp file pointer of the caller. - */ -#define LOCK_TEST_WITH_RETURN( dev, _file_priv ) \ -do { \ - if (!_DRM_LOCK_IS_HELD(_file_priv->master->lock.hw_lock->lock) || \ - _file_priv->master->lock.file_priv != _file_priv) { \ - DRM_ERROR( "%s called without lock held, held %d owner %p %p\n",\ - __func__, _DRM_LOCK_IS_HELD(_file_priv->master->lock.hw_lock->lock),\ - _file_priv->master->lock.file_priv, _file_priv); \ - return -EINVAL; \ - } \ -} while (0) - -void drm_legacy_idlelock_take(struct drm_lock_data *lock); -void drm_legacy_idlelock_release(struct drm_lock_data *lock); - -/* drm_irq.c */ -int drm_legacy_irq_uninstall(struct drm_device *dev); - -/* drm_pci.c */ - -#ifdef CONFIG_PCI - -int drm_legacy_pci_init(const struct drm_driver *driver, - struct pci_driver *pdriver); -void drm_legacy_pci_exit(const struct drm_driver *driver, - struct pci_driver *pdriver); - -#else - -static inline struct drm_dma_handle *drm_pci_alloc(struct drm_device *dev, - size_t size, size_t align) -{ - return NULL; -} - -static inline void drm_pci_free(struct drm_device *dev, - struct drm_dma_handle *dmah) -{ -} - -static inline int drm_legacy_pci_init(const struct drm_driver *driver, - struct pci_driver *pdriver) -{ - return -EINVAL; -} - -static inline void drm_legacy_pci_exit(const struct drm_driver *driver, - struct pci_driver *pdriver) -{ -} - -#endif - -/* - * AGP Support - */ - -struct drm_agp_head { - struct agp_kern_info agp_info; - struct list_head memory; - unsigned long mode; - struct agp_bridge_data *bridge; - int enabled; - int acquired; - unsigned long base; - int agp_mtrr; - int cant_use_aperture; - unsigned long page_mask; -}; - -#if IS_ENABLED(CONFIG_DRM_LEGACY) && IS_ENABLED(CONFIG_AGP) -struct drm_agp_head *drm_legacy_agp_init(struct drm_device *dev); -int drm_legacy_agp_acquire(struct drm_device *dev); -int drm_legacy_agp_release(struct drm_device *dev); -int drm_legacy_agp_enable(struct drm_device *dev, struct drm_agp_mode mode); -int drm_legacy_agp_info(struct drm_device *dev, struct drm_agp_info *info); -int drm_legacy_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request); -int drm_legacy_agp_free(struct drm_device *dev, struct drm_agp_buffer *request); -int drm_legacy_agp_unbind(struct drm_device *dev, struct drm_agp_binding *request); -int drm_legacy_agp_bind(struct drm_device *dev, struct drm_agp_binding *request); -#else -static inline struct drm_agp_head *drm_legacy_agp_init(struct drm_device *dev) -{ - return NULL; -} - -static inline int drm_legacy_agp_acquire(struct drm_device *dev) -{ - return -ENODEV; -} - -static inline int drm_legacy_agp_release(struct drm_device *dev) -{ - return -ENODEV; -} - -static inline int drm_legacy_agp_enable(struct drm_device *dev, - struct drm_agp_mode mode) -{ - return -ENODEV; -} - -static inline int drm_legacy_agp_info(struct drm_device *dev, - struct drm_agp_info *info) -{ - return -ENODEV; -} - -static inline int drm_legacy_agp_alloc(struct drm_device *dev, - struct drm_agp_buffer *request) -{ - return -ENODEV; -} - -static inline int drm_legacy_agp_free(struct drm_device *dev, - struct drm_agp_buffer *request) -{ - return -ENODEV; -} - -static inline int drm_legacy_agp_unbind(struct drm_device *dev, - struct drm_agp_binding *request) -{ - return -ENODEV; -} - -static inline int drm_legacy_agp_bind(struct drm_device *dev, - struct drm_agp_binding *request) -{ - return -ENODEV; -} -#endif - -/* drm_memory.c */ -void drm_legacy_ioremap(struct drm_local_map *map, struct drm_device *dev); -void drm_legacy_ioremap_wc(struct drm_local_map *map, struct drm_device *dev); -void drm_legacy_ioremapfree(struct drm_local_map *map, struct drm_device *dev); - -#endif /* __DRM_DRM_LEGACY_H__ */ diff --git a/include/drm/drm_managed.h b/include/drm/drm_managed.h index 359883942612..72bfac002c06 100644 --- a/include/drm/drm_managed.h +++ b/include/drm/drm_managed.h @@ -45,6 +45,10 @@ int __must_check __drmm_add_action_or_reset(struct drm_device *dev, drmres_release_t action, void *data, const char *name); +void drmm_release_action(struct drm_device *dev, + drmres_release_t action, + void *data); + void *drmm_kmalloc(struct drm_device *dev, size_t size, gfp_t gfp) __malloc; /** @@ -105,6 +109,45 @@ char *drmm_kstrdup(struct drm_device *dev, const char *s, gfp_t gfp); void drmm_kfree(struct drm_device *dev, void *data); -int drmm_mutex_init(struct drm_device *dev, struct mutex *lock); +void __drmm_mutex_release(struct drm_device *dev, void *res); + +/** + * drmm_mutex_init - &drm_device-managed mutex_init() + * @dev: DRM device + * @lock: lock to be initialized + * + * Returns: + * 0 on success, or a negative errno code otherwise. + * + * This is a &drm_device-managed version of mutex_init(). The initialized + * lock is automatically destroyed on the final drm_dev_put(). + */ +#define drmm_mutex_init(dev, lock) ({ \ + mutex_init(lock); \ + drmm_add_action_or_reset(dev, __drmm_mutex_release, lock); \ +}) \ + +void __drmm_workqueue_release(struct drm_device *device, void *wq); + +/** + * drmm_alloc_ordered_workqueue - &drm_device managed alloc_ordered_workqueue() + * @dev: DRM device + * @fmt: printf format for the name of the workqueue + * @flags: WQ_* flags (only WQ_FREEZABLE and WQ_MEM_RECLAIM are meaningful) + * @args: args for @fmt + * + * This is a &drm_device-managed version of alloc_ordered_workqueue(). The + * allocated workqueue is automatically destroyed on the final drm_dev_put(). + * + * Returns: workqueue on success, negative ERR_PTR otherwise. + */ +#define drmm_alloc_ordered_workqueue(dev, fmt, flags, args...) \ + ({ \ + struct workqueue_struct *wq = alloc_ordered_workqueue(fmt, flags, ##args); \ + wq ? ({ \ + int ret = drmm_add_action_or_reset(dev, __drmm_workqueue_release, wq); \ + ret ? ERR_PTR(ret) : wq; \ + }) : ERR_PTR(-ENOMEM); \ + }) #endif diff --git a/include/drm/drm_mipi_dbi.h b/include/drm/drm_mipi_dbi.h index 816f196b3d4c..f45f9612c0bc 100644 --- a/include/drm/drm_mipi_dbi.h +++ b/include/drm/drm_mipi_dbi.h @@ -12,6 +12,7 @@ #include <drm/drm_device.h> #include <drm/drm_simple_kms_helper.h> +struct drm_format_conv_state; struct drm_rect; struct gpio_desc; struct iosys_map; @@ -56,6 +57,11 @@ struct mipi_dbi { struct spi_device *spi; /** + * @write_memory_bpw: Bits per word used on a MIPI_DCS_WRITE_MEMORY_START transfer + */ + unsigned int write_memory_bpw; + + /** * @dc: Optional D/C gpio. */ struct gpio_desc *dc; @@ -96,6 +102,11 @@ struct mipi_dbi_dev { struct drm_display_mode mode; /** + * @pixel_format: Native pixel format (DRM_FORMAT\_\*) + */ + u32 pixel_format; + + /** * @tx_buf: Buffer used for transfer (copy clip rect area) */ u16 *tx_buf; @@ -192,7 +203,8 @@ int mipi_dbi_command_buf(struct mipi_dbi *dbi, u8 cmd, u8 *data, size_t len); int mipi_dbi_command_stackbuf(struct mipi_dbi *dbi, u8 cmd, const u8 *data, size_t len); int mipi_dbi_buf_copy(void *dst, struct iosys_map *src, struct drm_framebuffer *fb, - struct drm_rect *clip, bool swap); + struct drm_rect *clip, bool swap, + struct drm_format_conv_state *fmtcnv_state); /** * mipi_dbi_command - MIPI DCS command with optional parameter(s) diff --git a/include/drm/drm_mipi_dsi.h b/include/drm/drm_mipi_dsi.h index 16f30975b22b..3aba7b380c8d 100644 --- a/include/drm/drm_mipi_dsi.h +++ b/include/drm/drm_mipi_dsi.h @@ -10,6 +10,7 @@ #define __DRM_MIPI_DSI_H__ #include <linux/device.h> +#include <linux/delay.h> struct mipi_dsi_host; struct mipi_dsi_device; @@ -129,8 +130,6 @@ struct mipi_dsi_host *of_find_mipi_dsi_host_by_node(struct device_node *node); #define MIPI_DSI_MODE_VIDEO_NO_HBP BIT(6) /* disable hsync-active area */ #define MIPI_DSI_MODE_VIDEO_NO_HSA BIT(7) -/* flush display FIFO on vsync pulse */ -#define MIPI_DSI_MODE_VSYNC_FLUSH BIT(8) /* disable EoT packets in HS mode */ #define MIPI_DSI_MODE_NO_EOT_PACKET BIT(9) /* device supports non-continuous clock behavior (DSI spec 5.6.1) */ @@ -168,6 +167,7 @@ struct mipi_dsi_device_info { * struct mipi_dsi_device - DSI peripheral device * @host: DSI host for this peripheral * @dev: driver model device node for this peripheral + * @attached: the DSI device has been successfully attached * @name: DSI peripheral chip type * @channel: virtual channel assigned to the peripheral * @format: pixel format for video mode @@ -184,6 +184,7 @@ struct mipi_dsi_device_info { struct mipi_dsi_device { struct mipi_dsi_host *host; struct device dev; + bool attached; char name[DSI_DEV_NAME_SIZE]; unsigned int channel; @@ -195,12 +196,33 @@ struct mipi_dsi_device { struct drm_dsc_config *dsc; }; +/** + * struct mipi_dsi_multi_context - Context to call multiple MIPI DSI funcs in a row + */ +struct mipi_dsi_multi_context { + /** + * @dsi: Pointer to the MIPI DSI device + */ + struct mipi_dsi_device *dsi; + + /** + * @accum_err: Storage for the accumulated error over the multiple calls + * + * Init to 0. If a function encounters an error then the error code + * will be stored here. If you call a function and this points to a + * non-zero value then the function will be a noop. This allows calling + * a function many times in a row and just checking the error at the + * end to see if any of them failed. + */ + int accum_err; +}; + #define MIPI_DSI_MODULE_PREFIX "mipi-dsi:" -static inline struct mipi_dsi_device *to_mipi_dsi_device(struct device *dev) -{ - return container_of(dev, struct mipi_dsi_device, dev); -} +#define to_mipi_dsi_device(__dev) container_of_const(__dev, struct mipi_dsi_device, dev) + +extern const struct bus_type mipi_dsi_bus_type; +#define dev_is_mipi_dsi(dev) ((dev)->bus == &mipi_dsi_bus_type) /** * mipi_dsi_pixel_format_to_bpp - obtain the number of bits per pixel for any @@ -227,6 +249,12 @@ static inline int mipi_dsi_pixel_format_to_bpp(enum mipi_dsi_pixel_format fmt) return -EINVAL; } +enum mipi_dsi_compression_algo { + MIPI_DSI_COMPRESSION_DSC = 0, + MIPI_DSI_COMPRESSION_VENDOR = 3, + /* other two values are reserved, DSI 1.3 */ +}; + struct mipi_dsi_device * mipi_dsi_device_register_full(struct mipi_dsi_host *host, const struct mipi_dsi_device_info *info); @@ -242,14 +270,45 @@ int mipi_dsi_shutdown_peripheral(struct mipi_dsi_device *dsi); int mipi_dsi_turn_on_peripheral(struct mipi_dsi_device *dsi); int mipi_dsi_set_maximum_return_packet_size(struct mipi_dsi_device *dsi, u16 value); -ssize_t mipi_dsi_compression_mode(struct mipi_dsi_device *dsi, bool enable); -ssize_t mipi_dsi_picture_parameter_set(struct mipi_dsi_device *dsi, - const struct drm_dsc_picture_parameter_set *pps); +int mipi_dsi_compression_mode(struct mipi_dsi_device *dsi, bool enable); +int mipi_dsi_compression_mode_ext(struct mipi_dsi_device *dsi, bool enable, + enum mipi_dsi_compression_algo algo, + unsigned int pps_selector); +int mipi_dsi_picture_parameter_set(struct mipi_dsi_device *dsi, + const struct drm_dsc_picture_parameter_set *pps); + +void mipi_dsi_compression_mode_ext_multi(struct mipi_dsi_multi_context *ctx, + bool enable, + enum mipi_dsi_compression_algo algo, + unsigned int pps_selector); +void mipi_dsi_compression_mode_multi(struct mipi_dsi_multi_context *ctx, + bool enable); +void mipi_dsi_picture_parameter_set_multi(struct mipi_dsi_multi_context *ctx, + const struct drm_dsc_picture_parameter_set *pps); ssize_t mipi_dsi_generic_write(struct mipi_dsi_device *dsi, const void *payload, size_t size); +void mipi_dsi_generic_write_multi(struct mipi_dsi_multi_context *ctx, + const void *payload, size_t size); +void mipi_dsi_dual_generic_write_multi(struct mipi_dsi_multi_context *ctx, + struct mipi_dsi_device *dsi1, + struct mipi_dsi_device *dsi2, + const void *payload, size_t size); ssize_t mipi_dsi_generic_read(struct mipi_dsi_device *dsi, const void *params, size_t num_params, void *data, size_t size); +u32 drm_mipi_dsi_get_input_bus_fmt(enum mipi_dsi_pixel_format dsi_format); + +#define mipi_dsi_msleep(ctx, delay) \ + do { \ + if (!(ctx)->accum_err) \ + msleep(delay); \ + } while (0) + +#define mipi_dsi_usleep_range(ctx, min, max) \ + do { \ + if (!(ctx)->accum_err) \ + usleep_range(min, max); \ + } while (0) /** * enum mipi_dsi_dcs_tear_mode - Tearing Effect Output Line mode @@ -271,10 +330,20 @@ enum mipi_dsi_dcs_tear_mode { ssize_t mipi_dsi_dcs_write_buffer(struct mipi_dsi_device *dsi, const void *data, size_t len); +int mipi_dsi_dcs_write_buffer_chatty(struct mipi_dsi_device *dsi, + const void *data, size_t len); +void mipi_dsi_dcs_write_buffer_multi(struct mipi_dsi_multi_context *ctx, + const void *data, size_t len); +void mipi_dsi_dual_dcs_write_buffer_multi(struct mipi_dsi_multi_context *ctx, + struct mipi_dsi_device *dsi1, + struct mipi_dsi_device *dsi2, + const void *data, size_t len); ssize_t mipi_dsi_dcs_write(struct mipi_dsi_device *dsi, u8 cmd, const void *data, size_t len); ssize_t mipi_dsi_dcs_read(struct mipi_dsi_device *dsi, u8 cmd, void *data, size_t len); +void mipi_dsi_dcs_read_multi(struct mipi_dsi_multi_context *ctx, u8 cmd, + void *data, size_t len); int mipi_dsi_dcs_nop(struct mipi_dsi_device *dsi); int mipi_dsi_dcs_soft_reset(struct mipi_dsi_device *dsi); int mipi_dsi_dcs_get_power_mode(struct mipi_dsi_device *dsi, u8 *mode); @@ -287,7 +356,6 @@ int mipi_dsi_dcs_set_column_address(struct mipi_dsi_device *dsi, u16 start, u16 end); int mipi_dsi_dcs_set_page_address(struct mipi_dsi_device *dsi, u16 start, u16 end); -int mipi_dsi_dcs_set_tear_off(struct mipi_dsi_device *dsi); int mipi_dsi_dcs_set_tear_on(struct mipi_dsi_device *dsi, enum mipi_dsi_dcs_tear_mode mode); int mipi_dsi_dcs_set_pixel_format(struct mipi_dsi_device *dsi, u8 format); @@ -301,42 +369,176 @@ int mipi_dsi_dcs_set_display_brightness_large(struct mipi_dsi_device *dsi, int mipi_dsi_dcs_get_display_brightness_large(struct mipi_dsi_device *dsi, u16 *brightness); +void mipi_dsi_dcs_nop_multi(struct mipi_dsi_multi_context *ctx); +void mipi_dsi_dcs_enter_sleep_mode_multi(struct mipi_dsi_multi_context *ctx); +void mipi_dsi_dcs_exit_sleep_mode_multi(struct mipi_dsi_multi_context *ctx); +void mipi_dsi_dcs_set_display_off_multi(struct mipi_dsi_multi_context *ctx); +void mipi_dsi_dcs_set_display_on_multi(struct mipi_dsi_multi_context *ctx); +void mipi_dsi_dcs_set_tear_on_multi(struct mipi_dsi_multi_context *ctx, + enum mipi_dsi_dcs_tear_mode mode); +void mipi_dsi_turn_on_peripheral_multi(struct mipi_dsi_multi_context *ctx); +void mipi_dsi_dcs_soft_reset_multi(struct mipi_dsi_multi_context *ctx); +void mipi_dsi_dcs_set_display_brightness_multi(struct mipi_dsi_multi_context *ctx, + u16 brightness); +void mipi_dsi_dcs_set_pixel_format_multi(struct mipi_dsi_multi_context *ctx, + u8 format); +void mipi_dsi_dcs_set_column_address_multi(struct mipi_dsi_multi_context *ctx, + u16 start, u16 end); +void mipi_dsi_dcs_set_page_address_multi(struct mipi_dsi_multi_context *ctx, + u16 start, u16 end); +void mipi_dsi_dcs_set_tear_scanline_multi(struct mipi_dsi_multi_context *ctx, + u16 scanline); +void mipi_dsi_dcs_set_tear_off_multi(struct mipi_dsi_multi_context *ctx); + /** - * mipi_dsi_generic_write_seq - transmit data using a generic write packet - * @dsi: DSI peripheral device + * mipi_dsi_generic_write_seq_multi - transmit data using a generic write packet + * + * This macro will print errors for you and error handling is optimized for + * callers that call this multiple times in a row. + * + * @ctx: Context for multiple DSI transactions + * @seq: buffer containing the payload + */ +#define mipi_dsi_generic_write_seq_multi(ctx, seq...) \ + do { \ + static const u8 d[] = { seq }; \ + mipi_dsi_generic_write_multi(ctx, d, ARRAY_SIZE(d)); \ + } while (0) + +/** + * mipi_dsi_generic_write_var_seq_multi - transmit non-constant data using a + * generic write packet + * + * This macro will print errors for you and error handling is optimized for + * callers that call this multiple times in a row. + * + * @ctx: Context for multiple DSI transactions * @seq: buffer containing the payload */ -#define mipi_dsi_generic_write_seq(dsi, seq...) \ - do { \ - static const u8 d[] = { seq }; \ - struct device *dev = &dsi->dev; \ - int ret; \ - ret = mipi_dsi_generic_write(dsi, d, ARRAY_SIZE(d)); \ - if (ret < 0) { \ - dev_err_ratelimited(dev, "transmit data failed: %d\n", \ - ret); \ - return ret; \ - } \ +#define mipi_dsi_generic_write_var_seq_multi(ctx, seq...) \ + do { \ + const u8 d[] = { seq }; \ + mipi_dsi_generic_write_multi(ctx, d, ARRAY_SIZE(d)); \ + } while (0) + +/** + * mipi_dsi_dcs_write_seq_multi - transmit a DCS command with payload + * + * This macro will print errors for you and error handling is optimized for + * callers that call this multiple times in a row. + * + * @ctx: Context for multiple DSI transactions + * @cmd: Command + * @seq: buffer containing data to be transmitted + */ +#define mipi_dsi_dcs_write_seq_multi(ctx, cmd, seq...) \ + do { \ + static const u8 d[] = { cmd, seq }; \ + mipi_dsi_dcs_write_buffer_multi(ctx, d, ARRAY_SIZE(d)); \ } while (0) /** - * mipi_dsi_dcs_write_seq - transmit a DCS command with payload - * @dsi: DSI peripheral device + * mipi_dsi_dcs_write_var_seq_multi - transmit a DCS command with non-constant + * payload + * + * This macro will print errors for you and error handling is optimized for + * callers that call this multiple times in a row. + * + * @ctx: Context for multiple DSI transactions * @cmd: Command * @seq: buffer containing data to be transmitted */ -#define mipi_dsi_dcs_write_seq(dsi, cmd, seq...) \ - do { \ - static const u8 d[] = { cmd, seq }; \ - struct device *dev = &dsi->dev; \ - int ret; \ - ret = mipi_dsi_dcs_write_buffer(dsi, d, ARRAY_SIZE(d)); \ - if (ret < 0) { \ - dev_err_ratelimited( \ - dev, "sending command %#02x failed: %d\n", \ - cmd, ret); \ - return ret; \ - } \ +#define mipi_dsi_dcs_write_var_seq_multi(ctx, cmd, seq...) \ + do { \ + const u8 d[] = { cmd, seq }; \ + mipi_dsi_dcs_write_buffer_multi(ctx, d, ARRAY_SIZE(d)); \ + } while (0) + +/** + * mipi_dsi_dual - send the same MIPI DSI command to two interfaces + * + * This macro will send the specified MIPI DSI command twice, once per each of + * the two interfaces supplied. This is useful for reducing duplication of code + * in panel drivers which use two parallel serial interfaces. + * + * Note that the _func parameter cannot accept a macro such as + * mipi_dsi_generic_write_multi() or mipi_dsi_dcs_write_buffer_multi(). See + * mipi_dsi_dual_generic_write_multi() and + * mipi_dsi_dual_dcs_write_buffer_multi() instead. + * + * WARNING: This macro reuses the _func argument and the optional trailing + * arguments twice each, which may cause unintended side effects. For example, + * adding the postfix increment ++ operator to one of the arguments to be + * passed to _func will cause the variable to be incremented twice instead of + * once and the variable will be its original value + 1 when sent to _dsi2. + * + * @_func: MIPI DSI function to pass context and arguments into + * @_ctx: Context for multiple DSI transactions + * @_dsi1: First DSI interface to act as recipient of the MIPI DSI command + * @_dsi2: Second DSI interface to act as recipient of the MIPI DSI command + * @...: Arguments to pass to MIPI DSI function or macro + */ + +#define mipi_dsi_dual(_func, _ctx, _dsi1, _dsi2, ...) \ + do { \ + struct mipi_dsi_multi_context *_ctxcpy = (_ctx); \ + _ctxcpy->dsi = (_dsi1); \ + (_func)(_ctxcpy, ##__VA_ARGS__); \ + _ctxcpy->dsi = (_dsi2); \ + (_func)(_ctxcpy, ##__VA_ARGS__); \ + } while (0) + +/** + * mipi_dsi_dual_generic_write_seq_multi - transmit data using a generic write + * packet to two dsi interfaces, one after the other + * + * This macro will send the specified generic packet twice, once per each of + * the two interfaces supplied. This is useful for reducing duplication of code + * in panel drivers which use two parallel serial interfaces. + * + * Note that if an error occurs while transmitting the packet to the first DSI + * interface, the packet will not be sent to the second DSI interface. + * + * This macro will print errors for you and error handling is optimized for + * callers that call this multiple times in a row. + * + * @_ctx: Context for multiple DSI transactions + * @_dsi1: First DSI interface to act as recipient of packet + * @_dsi2: Second DSI interface to act as recipient of packet + * @_seq: buffer containing the payload + */ +#define mipi_dsi_dual_generic_write_seq_multi(_ctx, _dsi1, _dsi2, _seq...) \ + do { \ + static const u8 d[] = { _seq }; \ + mipi_dsi_dual_generic_write_multi(_ctx, _dsi1, _dsi2, d, \ + ARRAY_SIZE(d)); \ + } while (0) + +/** + * mipi_dsi_dual_dcs_write_seq_multi - transmit a DCS command with payload to + * two dsi interfaces, one after the other + * + * This macro will send the specified DCS command with payload twice, once per + * each of the two interfaces supplied. This is useful for reducing duplication + * of code in panel drivers which use two parallel serial interfaces. + * + * Note that if an error occurs while transmitting the payload to the first DSI + * interface, the payload will not be sent to the second DSI interface. + * + * This macro will print errors for you and error handling is optimized for + * callers that call this multiple times in a row. + * + * @_ctx: Context for multiple DSI transactions + * @_dsi1: First DSI interface to act as recipient of packet + * @_dsi2: Second DSI interface to act as recipient of packet + * @_cmd: Command + * @_seq: buffer containing the payload + */ +#define mipi_dsi_dual_dcs_write_seq_multi(_ctx, _dsi1, _dsi2, _cmd, _seq...) \ + do { \ + static const u8 d[] = { _cmd, _seq }; \ + mipi_dsi_dual_dcs_write_buffer_multi(_ctx, _dsi1, _dsi2, d, \ + ARRAY_SIZE(d)); \ } while (0) /** diff --git a/include/drm/drm_mm.h b/include/drm/drm_mm.h index ac33ba1b18bc..16ce0e8f36a6 100644 --- a/include/drm/drm_mm.h +++ b/include/drm/drm_mm.h @@ -48,7 +48,7 @@ #endif #include <linux/types.h> -#include <drm/drm_print.h> +struct drm_printer; #ifdef CONFIG_DRM_DEBUG_MM #define DRM_MM_BUG_ON(expr) BUG_ON(expr) @@ -463,7 +463,6 @@ static inline int drm_mm_insert_node(struct drm_mm *mm, } void drm_mm_remove_node(struct drm_mm_node *node); -void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new); void drm_mm_init(struct drm_mm *mm, u64 start, u64 size); void drm_mm_takedown(struct drm_mm *mm); diff --git a/include/drm/drm_mode_config.h b/include/drm/drm_mode_config.h index e5b053001d22..895fb820dba0 100644 --- a/include/drm/drm_mode_config.h +++ b/include/drm/drm_mode_config.h @@ -82,6 +82,7 @@ struct drm_mode_config_funcs { */ struct drm_framebuffer *(*fb_create)(struct drm_device *dev, struct drm_file *file_priv, + const struct drm_format_info *info, const struct drm_mode_fb_cmd2 *mode_cmd); /** @@ -95,23 +96,7 @@ struct drm_mode_config_funcs { * The format information specific to the given fb metadata, or * NULL if none is found. */ - const struct drm_format_info *(*get_format_info)(const struct drm_mode_fb_cmd2 *mode_cmd); - - /** - * @output_poll_changed: - * - * Callback used by helpers to inform the driver of output configuration - * changes. - * - * Drivers implementing fbdev emulation use drm_kms_helper_hotplug_event() - * to call this hook to inform the fbdev helper of output changes. - * - * This hook is deprecated, drivers should instead use - * drm_fbdev_generic_setup() which takes care of any necessary - * hotplug event forwarding already without further involvement by - * the driver. - */ - void (*output_poll_changed)(struct drm_device *dev); + const struct drm_format_info *(*get_format_info)(u32 pixel_format, u64 modifier); /** * @mode_valid: @@ -506,6 +491,34 @@ struct drm_mode_config { struct list_head plane_list; /** + * @panic_lock: + * + * Raw spinlock used to protect critical sections of code that access + * the display hardware or modeset software state, which the panic + * printing code must be protected against. See drm_panic_trylock(), + * drm_panic_lock() and drm_panic_unlock(). + */ + struct raw_spinlock panic_lock; + + /** + * @num_colorop: + * + * Number of colorop objects on this device. + * This is invariant over the lifetime of a device and hence doesn't + * need any locks. + */ + int num_colorop; + + /** + * @colorop_list: + * + * List of colorop objects linked with &drm_colorop.head. This is + * invariant over the lifetime of a device and hence doesn't need any + * locks. + */ + struct list_head colorop_list; + + /** * @num_crtc: * * Number of CRTCs on this device linked with &drm_crtc.head. This is invariant over the lifetime @@ -538,8 +551,8 @@ struct drm_mode_config { */ struct list_head privobj_list; - int min_width, min_height; - int max_width, max_height; + unsigned int min_width, min_height; + unsigned int max_width, max_height; const struct drm_mode_config_funcs *funcs; /* output poll support */ @@ -891,13 +904,6 @@ struct drm_mode_config { uint32_t preferred_depth, prefer_shadow; /** - * @prefer_shadow_fbdev: - * - * Hint to framebuffer emulation to prefer shadow-fb rendering. - */ - bool prefer_shadow_fbdev; - - /** * @quirk_addfb_prefer_xbgr_30bpp: * * Special hack for legacy ADDFB to keep nouveau userspace happy. Should @@ -949,6 +955,17 @@ struct drm_mode_config { */ struct drm_property *modifiers_property; + /** + * @async_modifiers_property: Plane property to list support modifier/format + * combination for asynchronous flips. + */ + struct drm_property *async_modifiers_property; + + /** + * @size_hints_property: Plane SIZE_HINTS property. + */ + struct drm_property *size_hints_property; + /* cursor size */ uint32_t cursor_width, cursor_height; diff --git a/include/drm/drm_mode_object.h b/include/drm/drm_mode_object.h index 912f1e415685..c68edbd126d0 100644 --- a/include/drm/drm_mode_object.h +++ b/include/drm/drm_mode_object.h @@ -35,7 +35,7 @@ struct drm_file; * @id: userspace visible identifier * @type: type of the object, one of DRM_MODE_OBJECT\_\* * @properties: properties attached to this object, including values - * @refcount: reference count for objects which with dynamic lifetime + * @refcount: reference count for objects with dynamic lifetime * @free_cb: free function callback, only set for objects with dynamic lifetime * * Base structure for modeset objects visible to userspace. Objects can be @@ -60,7 +60,7 @@ struct drm_mode_object { void (*free_cb)(struct kref *kref); }; -#define DRM_OBJECT_MAX_PROPERTY 24 +#define DRM_OBJECT_MAX_PROPERTY 64 /** * struct drm_object_properties - property tracking for &drm_mode_object */ diff --git a/include/drm/drm_modes.h b/include/drm/drm_modes.h index c613f0abe9dc..b9bb92e4b029 100644 --- a/include/drm/drm_modes.h +++ b/include/drm/drm_modes.h @@ -467,6 +467,8 @@ bool drm_mode_is_420_also(const struct drm_display_info *display, const struct drm_display_mode *mode); bool drm_mode_is_420(const struct drm_display_info *display, const struct drm_display_mode *mode); +void drm_set_preferred_mode(struct drm_connector *connector, + int hpref, int vpref); struct drm_display_mode *drm_analog_tv_mode(struct drm_device *dev, enum drm_connector_tv_mode mode, diff --git a/include/drm/drm_modeset_helper.h b/include/drm/drm_modeset_helper.h index 995fd981cab0..7e3d4c5a7f66 100644 --- a/include/drm/drm_modeset_helper.h +++ b/include/drm/drm_modeset_helper.h @@ -26,6 +26,7 @@ struct drm_crtc; struct drm_crtc_funcs; struct drm_device; +struct drm_format_info; struct drm_framebuffer; struct drm_mode_fb_cmd2; @@ -33,6 +34,7 @@ void drm_helper_move_panel_connectors_to_head(struct drm_device *); void drm_helper_mode_fill_fb_struct(struct drm_device *dev, struct drm_framebuffer *fb, + const struct drm_format_info *info, const struct drm_mode_fb_cmd2 *mode_cmd); int drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc, diff --git a/include/drm/drm_modeset_helper_vtables.h b/include/drm/drm_modeset_helper_vtables.h index 206f495bbf06..fe32854b7ffe 100644 --- a/include/drm/drm_modeset_helper_vtables.h +++ b/include/drm/drm_modeset_helper_vtables.h @@ -48,6 +48,7 @@ * To make this clear all the helper vtables are pulled together in this location here. */ +struct drm_scanout_buffer; struct drm_writeback_connector; struct drm_writeback_job; @@ -59,8 +60,8 @@ enum mode_set_atomic { /** * struct drm_crtc_helper_funcs - helper operations for CRTCs * - * These hooks are used by the legacy CRTC helpers, the transitional plane - * helpers and the new atomic modesetting helpers. + * These hooks are used by the legacy CRTC helpers and the new atomic + * modesetting helpers. */ struct drm_crtc_helper_funcs { /** @@ -134,7 +135,7 @@ struct drm_crtc_helper_funcs { * Since this function is both called from the check phase of an atomic * commit, and the mode validation in the probe paths it is not allowed * to look at anything else but the passed-in mode, and validate it - * against configuration-invariant hardward constraints. Any further + * against configuration-invariant hardware constraints. Any further * limits which depend upon the configuration can only be checked in * @mode_fixup or @atomic_check. * @@ -216,9 +217,7 @@ struct drm_crtc_helper_funcs { * * This callback is used to update the display mode of a CRTC without * changing anything of the primary plane configuration. This fits the - * requirement of atomic and hence is used by the atomic helpers. It is - * also used by the transitional plane helpers to implement a - * @mode_set hook in drm_helper_crtc_mode_set(). + * requirement of atomic and hence is used by the atomic helpers. * * Note that the display pipe is completely off when this function is * called. Atomic drivers which need hardware to be running before they @@ -333,8 +332,8 @@ struct drm_crtc_helper_funcs { * all updated. Again the recommendation is to just call check helpers * until a maximal configuration is reached. * - * This callback is used by the atomic modeset helpers and by the - * transitional plane helpers, but it is optional. + * This callback is used by the atomic modeset helpers, but it is + * optional. * * NOTE: * @@ -373,8 +372,8 @@ struct drm_crtc_helper_funcs { * has picked. See drm_atomic_helper_commit_planes() for a discussion of * the tradeoffs and variants of plane commit helpers. * - * This callback is used by the atomic modeset helpers and by the - * transitional plane helpers, but it is optional. + * This callback is used by the atomic modeset helpers, but it is + * optional. */ void (*atomic_begin)(struct drm_crtc *crtc, struct drm_atomic_state *state); @@ -397,8 +396,8 @@ struct drm_crtc_helper_funcs { * has picked. See drm_atomic_helper_commit_planes() for a discussion of * the tradeoffs and variants of plane commit helpers. * - * This callback is used by the atomic modeset helpers and by the - * transitional plane helpers, but it is optional. + * This callback is used by the atomic modeset helpers, but it is + * optional. */ void (*atomic_flush)(struct drm_crtc *crtc, struct drm_atomic_state *state); @@ -491,6 +490,18 @@ struct drm_crtc_helper_funcs { bool in_vblank_irq, int *vpos, int *hpos, ktime_t *stime, ktime_t *etime, const struct drm_display_mode *mode); + + /** + * @handle_vblank_timeout: Handles timeouts of the vblank timer. + * + * Called by CRTC's the vblank timer on each timeout. Semantics is + * equivalient to drm_crtc_handle_vblank(). Implementations should + * invoke drm_crtc_handle_vblank() as part of processing the timeout. + * + * This callback is optional. If unset, the vblank timer invokes + * drm_crtc_handle_vblank() directly. + */ + bool (*handle_vblank_timeout)(struct drm_crtc *crtc); }; /** @@ -507,8 +518,8 @@ static inline void drm_crtc_helper_add(struct drm_crtc *crtc, /** * struct drm_encoder_helper_funcs - helper operations for encoders * - * These hooks are used by the legacy CRTC helpers, the transitional plane - * helpers and the new atomic modesetting helpers. + * These hooks are used by the legacy CRTC helpers and the new atomic + * modesetting helpers. */ struct drm_encoder_helper_funcs { /** @@ -552,7 +563,7 @@ struct drm_encoder_helper_funcs { * Since this function is both called from the check phase of an atomic * commit, and the mode validation in the probe paths it is not allowed * to look at anything else but the passed-in mode, and validate it - * against configuration-invariant hardward constraints. Any further + * against configuration-invariant hardware constraints. Any further * limits which depend upon the configuration can only be checked in * @mode_fixup or @atomic_check. * @@ -900,7 +911,8 @@ struct drm_connector_helper_funcs { * * RETURNS: * - * The number of modes added by calling drm_mode_probed_add(). + * The number of modes added by calling drm_mode_probed_add(). Return 0 + * on failures (no modes) instead of negative error codes. */ int (*get_modes)(struct drm_connector *connector); @@ -967,7 +979,7 @@ struct drm_connector_helper_funcs { * drm_mode_status. */ enum drm_mode_status (*mode_valid)(struct drm_connector *connector, - struct drm_display_mode *mode); + const struct drm_display_mode *mode); /** * @mode_valid_ctx: @@ -1006,7 +1018,7 @@ struct drm_connector_helper_funcs { * */ int (*mode_valid_ctx)(struct drm_connector *connector, - struct drm_display_mode *mode, + const struct drm_display_mode *mode, struct drm_modeset_acquire_ctx *ctx, enum drm_mode_status *status); @@ -1156,6 +1168,11 @@ struct drm_connector_helper_funcs { * This operation is optional. * * This callback is used by the drm_kms_helper_poll_enable() helpers. + * + * This operation does not need to perform any hpd state tracking as + * the DRM core handles that maintenance and ensures the calls to enable + * and disable hpd are balanced. + * */ void (*enable_hpd)(struct drm_connector *connector); @@ -1167,6 +1184,11 @@ struct drm_connector_helper_funcs { * This operation is optional. * * This callback is used by the drm_kms_helper_poll_disable() helpers. + * + * This operation does not need to perform any hpd state tracking as + * the DRM core handles that maintenance and ensures the calls to enable + * and disable hpd are balanced. + * */ void (*disable_hpd)(struct drm_connector *connector); }; @@ -1185,8 +1207,7 @@ static inline void drm_connector_helper_add(struct drm_connector *connector, /** * struct drm_plane_helper_funcs - helper operations for planes * - * These functions are used by the atomic helpers and by the transitional plane - * helpers. + * These functions are used by the atomic helpers. */ struct drm_plane_helper_funcs { /** @@ -1221,9 +1242,8 @@ struct drm_plane_helper_funcs { * The helpers will call @cleanup_fb with matching arguments for every * successful call to this hook. * - * This callback is used by the atomic modeset helpers and by the - * transitional plane helpers, but it is optional. See @begin_fb_access - * for preparing per-commit resources. + * This callback is used by the atomic modeset helpers, but it is + * optional. See @begin_fb_access for preparing per-commit resources. * * RETURNS: * @@ -1240,8 +1260,8 @@ struct drm_plane_helper_funcs { * This hook is called to clean up any resources allocated for the given * framebuffer and plane configuration in @prepare_fb. * - * This callback is used by the atomic modeset helpers and by the - * transitional plane helpers, but it is optional. + * This callback is used by the atomic modeset helpers, but it is + * optional. */ void (*cleanup_fb)(struct drm_plane *plane, struct drm_plane_state *old_state); @@ -1295,8 +1315,8 @@ struct drm_plane_helper_funcs { * all updated. Again the recommendation is to just call check helpers * until a maximal configuration is reached. * - * This callback is used by the atomic modeset helpers and by the - * transitional plane helpers, but it is optional. + * This callback is used by the atomic modeset helpers, but it is + * optional. * * NOTE: * @@ -1326,11 +1346,36 @@ struct drm_plane_helper_funcs { * has picked. See drm_atomic_helper_commit_planes() for a discussion of * the tradeoffs and variants of plane commit helpers. * - * This callback is used by the atomic modeset helpers and by the - * transitional plane helpers, but it is optional. + * This callback is used by the atomic modeset helpers, but it is optional. */ void (*atomic_update)(struct drm_plane *plane, struct drm_atomic_state *state); + + /** + * @atomic_enable: + * + * Drivers should use this function to unconditionally enable a plane. + * This hook is called in-between the &drm_crtc_helper_funcs.atomic_begin + * and drm_crtc_helper_funcs.atomic_flush callbacks. It is called after + * @atomic_update, which will be called for all enabled planes. Drivers + * that use @atomic_enable should set up a plane in @atomic_update and + * afterwards enable the plane in @atomic_enable. If a plane needs to be + * enabled before installing the scanout buffer, drivers can still do + * so in @atomic_update. + * + * Note that the power state of the display pipe when this function is + * called depends upon the exact helpers and calling sequence the driver + * has picked. See drm_atomic_helper_commit_planes() for a discussion of + * the tradeoffs and variants of plane commit helpers. + * + * This callback is used by the atomic modeset helpers, but it is + * optional. If implemented, @atomic_enable should be the inverse of + * @atomic_disable. Drivers that don't want to use either can still + * implement the complete plane update in @atomic_update. + */ + void (*atomic_enable)(struct drm_plane *plane, + struct drm_atomic_state *state); + /** * @atomic_disable: * @@ -1350,8 +1395,8 @@ struct drm_plane_helper_funcs { * has picked. See drm_atomic_helper_commit_planes() for a discussion of * the tradeoffs and variants of plane commit helpers. * - * This callback is used by the atomic modeset helpers and by the - * transitional plane helpers, but it is optional. + * This callback is used by the atomic modeset helpers, but it is + * optional. It's intended to reverse the effects of @atomic_enable. */ void (*atomic_disable)(struct drm_plane *plane, struct drm_atomic_state *state); @@ -1367,13 +1412,18 @@ struct drm_plane_helper_funcs { * given update can be committed asynchronously, that is, if it can * jump ahead of the state currently queued for update. * + * This function is also used by drm_atomic_set_property() to determine + * if the plane can be flipped in async. The flip flag is used to + * distinguish if the function is used for just the plane state or for a + * flip. + * * RETURNS: * * Return 0 on success and any error returned indicates that the update * can not be applied in asynchronous manner. */ int (*atomic_async_check)(struct drm_plane *plane, - struct drm_atomic_state *state); + struct drm_atomic_state *state, bool flip); /** * @atomic_async_update: @@ -1411,6 +1461,44 @@ struct drm_plane_helper_funcs { */ void (*atomic_async_update)(struct drm_plane *plane, struct drm_atomic_state *state); + + /** + * @get_scanout_buffer: + * + * Get the current scanout buffer, to display a message with drm_panic. + * The driver should do the minimum changes to provide a buffer, + * that can be used to display the panic screen. Currently only linear + * buffers are supported. Non-linear buffer support is on the TODO list. + * The device &dev.mode_config.panic_lock is taken before calling this + * function, so you can safely access the &plane.state + * It is called from a panic callback, and must follow its restrictions. + * Please look the documentation at drm_panic_trylock() for an in-depth + * discussions of what's safe and what is not allowed. + * It's a best effort mode, so it's expected that in some complex cases + * the panic screen won't be displayed. + * The returned &drm_scanout_buffer.map must be valid if no error code is + * returned. + * + * Return: + * %0 on success, negative errno on failure. + */ + int (*get_scanout_buffer)(struct drm_plane *plane, + struct drm_scanout_buffer *sb); + + /** + * @panic_flush: + * + * It is used by drm_panic, and is called after the panic screen is + * drawn to the scanout buffer. In this function, the driver + * can send additional commands to the hardware, to make the scanout + * buffer visible. + * It is only called if get_scanout_buffer() returned successfully, and + * the &dev.mode_config.panic_lock is held during the entire sequence. + * It is called from a panic callback, and must follow its restrictions. + * Please look the documentation at drm_panic_trylock() for an in-depth + * discussions of what's safe and what is not allowed. + */ + void (*panic_flush)(struct drm_plane *plane); }; /** @@ -1443,7 +1531,7 @@ struct drm_mode_config_helper_funcs { * swapped into the various state pointers. The passed in state * therefore contains copies of the old/previous state. This hook should * commit the new state into hardware. Note that the helpers have - * already waited for preceeding atomic commits and fences, but drivers + * already waited for preceding atomic commits and fences, but drivers * can add more waiting calls at the start of their implementation, e.g. * to wait for driver-internal request for implicit syncing, before * starting to commit the update to the hardware. diff --git a/include/drm/drm_of.h b/include/drm/drm_of.h index 10ab58c40746..7f0256dae3f1 100644 --- a/include/drm/drm_of.h +++ b/include/drm/drm_of.h @@ -2,6 +2,7 @@ #ifndef __DRM_OF_H__ #define __DRM_OF_H__ +#include <linux/err.h> #include <linux/of_graph.h> #if IS_ENABLED(CONFIG_OF) && IS_ENABLED(CONFIG_DRM_PANEL_BRIDGE) #include <drm/drm_bridge.h> @@ -15,6 +16,8 @@ struct drm_encoder; struct drm_panel; struct drm_bridge; struct device_node; +struct mipi_dsi_device_info; +struct mipi_dsi_host; /** * enum drm_lvds_dual_link_pixels - Pixel order of an LVDS dual-link connection @@ -49,6 +52,8 @@ int drm_of_find_panel_or_bridge(const struct device_node *np, struct drm_bridge **bridge); int drm_of_lvds_get_dual_link_pixel_order(const struct device_node *port1, const struct device_node *port2); +int drm_of_lvds_get_dual_link_pixel_order_sink(struct device_node *port1, + struct device_node *port2); int drm_of_lvds_get_data_mapping(const struct device_node *port); int drm_of_get_data_lanes_count(const struct device_node *endpoint, const unsigned int min, const unsigned int max); @@ -107,6 +112,13 @@ drm_of_lvds_get_dual_link_pixel_order(const struct device_node *port1, } static inline int +drm_of_lvds_get_dual_link_pixel_order_sink(struct device_node *port1, + struct device_node *port2) +{ + return -EINVAL; +} + +static inline int drm_of_lvds_get_data_mapping(const struct device_node *port) { return -EINVAL; @@ -129,6 +141,16 @@ drm_of_get_data_lanes_count_ep(const struct device_node *port, } #endif +#if IS_ENABLED(CONFIG_OF) && IS_ENABLED(CONFIG_DRM_MIPI_DSI) +struct mipi_dsi_host *drm_of_get_dsi_bus(struct device *dev); +#else +static inline struct +mipi_dsi_host *drm_of_get_dsi_bus(struct device *dev) +{ + return ERR_PTR(-EINVAL); +} +#endif /* CONFIG_OF && CONFIG_DRM_MIPI_DSI */ + /* * drm_of_panel_bridge_remove - remove panel bridge * @np: device tree node containing panel bridge output ports diff --git a/include/drm/drm_pagemap.h b/include/drm/drm_pagemap.h new file mode 100644 index 000000000000..f6e7e234c089 --- /dev/null +++ b/include/drm/drm_pagemap.h @@ -0,0 +1,248 @@ +/* SPDX-License-Identifier: MIT */ +#ifndef _DRM_PAGEMAP_H_ +#define _DRM_PAGEMAP_H_ + +#include <linux/dma-direction.h> +#include <linux/hmm.h> +#include <linux/types.h> + +#define NR_PAGES(order) (1U << (order)) + +struct drm_pagemap; +struct drm_pagemap_zdd; +struct device; + +/** + * enum drm_interconnect_protocol - Used to identify an interconnect protocol. + * + * @DRM_INTERCONNECT_SYSTEM: DMA map is system pages + * @DRM_INTERCONNECT_DRIVER: DMA map is driver defined + */ +enum drm_interconnect_protocol { + DRM_INTERCONNECT_SYSTEM, + DRM_INTERCONNECT_DRIVER, + /* A driver can add private values beyond DRM_INTERCONNECT_DRIVER */ +}; + +/** + * struct drm_pagemap_addr - Address representation. + * @addr: The dma address or driver-defined address for driver private interconnects. + * @proto: The interconnect protocol. + * @order: The page order of the device mapping. (Size is PAGE_SIZE << order). + * @dir: The DMA direction. + * + * Note: There is room for improvement here. We should be able to pack into + * 64 bits. + */ +struct drm_pagemap_addr { + dma_addr_t addr; + u64 proto : 54; + u64 order : 8; + u64 dir : 2; +}; + +/** + * drm_pagemap_addr_encode() - Encode a dma address with metadata + * @addr: The dma address or driver-defined address for driver private interconnects. + * @proto: The interconnect protocol. + * @order: The page order of the dma mapping. (Size is PAGE_SIZE << order). + * @dir: The DMA direction. + * + * Return: A struct drm_pagemap_addr encoding the above information. + */ +static inline struct drm_pagemap_addr +drm_pagemap_addr_encode(dma_addr_t addr, + enum drm_interconnect_protocol proto, + unsigned int order, + enum dma_data_direction dir) +{ + return (struct drm_pagemap_addr) { + .addr = addr, + .proto = proto, + .order = order, + .dir = dir, + }; +} + +/** + * struct drm_pagemap_ops: Ops for a drm-pagemap. + */ +struct drm_pagemap_ops { + /** + * @device_map: Map for device access or provide a virtual address suitable for + * + * @dpagemap: The struct drm_pagemap for the page. + * @dev: The device mapper. + * @page: The page to map. + * @order: The page order of the device mapping. (Size is PAGE_SIZE << order). + * @dir: The transfer direction. + */ + struct drm_pagemap_addr (*device_map)(struct drm_pagemap *dpagemap, + struct device *dev, + struct page *page, + unsigned int order, + enum dma_data_direction dir); + + /** + * @device_unmap: Unmap a device address previously obtained using @device_map. + * + * @dpagemap: The struct drm_pagemap for the mapping. + * @dev: The device unmapper. + * @addr: The device address obtained when mapping. + */ + void (*device_unmap)(struct drm_pagemap *dpagemap, + struct device *dev, + struct drm_pagemap_addr addr); + + /** + * @populate_mm: Populate part of the mm with @dpagemap memory, + * migrating existing data. + * @dpagemap: The struct drm_pagemap managing the memory. + * @start: The virtual start address in @mm + * @end: The virtual end address in @mm + * @mm: Pointer to a live mm. The caller must have an mmget() + * reference. + * + * The caller will have the mm lock at least in read mode. + * Note that there is no guarantee that the memory is resident + * after the function returns, it's best effort only. + * When the mm is not using the memory anymore, + * it will be released. The struct drm_pagemap might have a + * mechanism in place to reclaim the memory and the data will + * then be migrated. Typically to system memory. + * The implementation should hold sufficient runtime power- + * references while pages are used in an address space and + * should ideally guard against hardware device unbind in + * a way such that device pages are migrated back to system + * followed by device page removal. The implementation should + * return -ENODEV after device removal. + * + * Return: 0 if successful. Negative error code on error. + */ + int (*populate_mm)(struct drm_pagemap *dpagemap, + unsigned long start, unsigned long end, + struct mm_struct *mm, + unsigned long timeslice_ms); +}; + +/** + * struct drm_pagemap: Additional information for a struct dev_pagemap + * used for device p2p handshaking. + * @ops: The struct drm_pagemap_ops. + * @dev: The struct drevice owning the device-private memory. + */ +struct drm_pagemap { + const struct drm_pagemap_ops *ops; + struct device *dev; +}; + +struct drm_pagemap_devmem; + +/** + * struct drm_pagemap_devmem_ops - Operations structure for GPU SVM device memory + * + * This structure defines the operations for GPU Shared Virtual Memory (SVM) + * device memory. These operations are provided by the GPU driver to manage device memory + * allocations and perform operations such as migration between device memory and system + * RAM. + */ +struct drm_pagemap_devmem_ops { + /** + * @devmem_release: Release device memory allocation (optional) + * @devmem_allocation: device memory allocation + * + * Release device memory allocation and drop a reference to device + * memory allocation. + */ + void (*devmem_release)(struct drm_pagemap_devmem *devmem_allocation); + + /** + * @populate_devmem_pfn: Populate device memory PFN (required for migration) + * @devmem_allocation: device memory allocation + * @npages: Number of pages to populate + * @pfn: Array of page frame numbers to populate + * + * Populate device memory page frame numbers (PFN). + * + * Return: 0 on success, a negative error code on failure. + */ + int (*populate_devmem_pfn)(struct drm_pagemap_devmem *devmem_allocation, + unsigned long npages, unsigned long *pfn); + + /** + * @copy_to_devmem: Copy to device memory (required for migration) + * @pages: Pointer to array of device memory pages (destination) + * @pagemap_addr: Pointer to array of DMA information (source) + * @npages: Number of pages to copy + * + * Copy pages to device memory. If the order of a @pagemap_addr entry + * is greater than 0, the entry is populated but subsequent entries + * within the range of that order are not populated. + * + * Return: 0 on success, a negative error code on failure. + */ + int (*copy_to_devmem)(struct page **pages, + struct drm_pagemap_addr *pagemap_addr, + unsigned long npages); + + /** + * @copy_to_ram: Copy to system RAM (required for migration) + * @pages: Pointer to array of device memory pages (source) + * @pagemap_addr: Pointer to array of DMA information (destination) + * @npages: Number of pages to copy + * + * Copy pages to system RAM. If the order of a @pagemap_addr entry + * is greater than 0, the entry is populated but subsequent entries + * within the range of that order are not populated. + * + * Return: 0 on success, a negative error code on failure. + */ + int (*copy_to_ram)(struct page **pages, + struct drm_pagemap_addr *pagemap_addr, + unsigned long npages); +}; + +/** + * struct drm_pagemap_devmem - Structure representing a GPU SVM device memory allocation + * + * @dev: Pointer to the device structure which device memory allocation belongs to + * @mm: Pointer to the mm_struct for the address space + * @detached: device memory allocations is detached from device pages + * @ops: Pointer to the operations structure for GPU SVM device memory + * @dpagemap: The struct drm_pagemap of the pages this allocation belongs to. + * @size: Size of device memory allocation + * @timeslice_expiration: Timeslice expiration in jiffies + */ +struct drm_pagemap_devmem { + struct device *dev; + struct mm_struct *mm; + struct completion detached; + const struct drm_pagemap_devmem_ops *ops; + struct drm_pagemap *dpagemap; + size_t size; + u64 timeslice_expiration; +}; + +int drm_pagemap_migrate_to_devmem(struct drm_pagemap_devmem *devmem_allocation, + struct mm_struct *mm, + unsigned long start, unsigned long end, + unsigned long timeslice_ms, + void *pgmap_owner); + +int drm_pagemap_evict_to_ram(struct drm_pagemap_devmem *devmem_allocation); + +const struct dev_pagemap_ops *drm_pagemap_pagemap_ops_get(void); + +struct drm_pagemap *drm_pagemap_page_to_dpagemap(struct page *page); + +void drm_pagemap_devmem_init(struct drm_pagemap_devmem *devmem_allocation, + struct device *dev, struct mm_struct *mm, + const struct drm_pagemap_devmem_ops *ops, + struct drm_pagemap *dpagemap, size_t size); + +int drm_pagemap_populate_mm(struct drm_pagemap *dpagemap, + unsigned long start, unsigned long end, + struct mm_struct *mm, + unsigned long timeslice_ms); + +#endif diff --git a/include/drm/drm_panel.h b/include/drm/drm_panel.h index 432fab2347eb..2407bfa60236 100644 --- a/include/drm/drm_panel.h +++ b/include/drm/drm_panel.h @@ -27,12 +27,14 @@ #include <linux/err.h> #include <linux/errno.h> #include <linux/list.h> +#include <linux/mutex.h> +#include <linux/kref.h> struct backlight_device; struct dentry; struct device_node; struct drm_connector; -struct drm_device; +struct drm_panel_follower; struct drm_panel; struct display_timing; @@ -144,6 +146,59 @@ struct drm_panel_funcs { void (*debugfs_init)(struct drm_panel *panel, struct dentry *root); }; +struct drm_panel_follower_funcs { + /** + * @panel_prepared: + * + * Called after the panel has been powered on. + */ + int (*panel_prepared)(struct drm_panel_follower *follower); + + /** + * @panel_unpreparing: + * + * Called before the panel is powered off. + */ + int (*panel_unpreparing)(struct drm_panel_follower *follower); + + /** + * @panel_enabled: + * + * Called after the panel and the backlight have been enabled. + */ + int (*panel_enabled)(struct drm_panel_follower *follower); + + /** + * @panel_disabling: + * + * Called before the panel and the backlight are disabled. + */ + int (*panel_disabling)(struct drm_panel_follower *follower); +}; + +struct drm_panel_follower { + /** + * @funcs: + * + * Dependent device callbacks; should be initted by the caller. + */ + const struct drm_panel_follower_funcs *funcs; + + /** + * @list + * + * Used for linking into panel's list; set by drm_panel_add_follower(). + */ + struct list_head list; + + /** + * @panel + * + * The panel we're dependent on; set by drm_panel_add_follower(). + */ + struct drm_panel *panel; +}; + /** * struct drm_panel - DRM panel object */ @@ -190,6 +245,20 @@ struct drm_panel { struct list_head list; /** + * @followers: + * + * A list of struct drm_panel_follower dependent on this panel. + */ + struct list_head followers; + + /** + * @follower_lock: + * + * Lock for followers list. + */ + struct mutex follower_lock; + + /** * @prepare_prev_first: * * The previous controller should be prepared first, before the prepare @@ -198,20 +267,74 @@ struct drm_panel { * the panel is powered up. */ bool prepare_prev_first; + + /** + * @prepared: + * + * If true then the panel has been prepared. + */ + bool prepared; + + /** + * @enabled: + * + * If true then the panel has been enabled. + */ + bool enabled; + + /** + * @container: Pointer to the private driver struct embedding this + * @struct drm_panel. + */ + void *container; + + /** + * @refcount: reference count of users referencing this panel. + */ + struct kref refcount; }; +void *__devm_drm_panel_alloc(struct device *dev, size_t size, size_t offset, + const struct drm_panel_funcs *funcs, + int connector_type); + +/** + * devm_drm_panel_alloc - Allocate and initialize a refcounted panel. + * + * @dev: struct device of the panel device + * @type: the type of the struct which contains struct &drm_panel + * @member: the name of the &drm_panel within @type + * @funcs: callbacks for this panel + * @connector_type: the connector type (DRM_MODE_CONNECTOR_*) corresponding to + * the panel interface + * + * The reference count of the returned panel is initialized to 1. This + * reference will be automatically dropped via devm (by calling + * drm_panel_put()) when @dev is removed. + * + * Returns: + * Pointer to container structure embedding the panel, ERR_PTR on failure. + */ +#define devm_drm_panel_alloc(dev, type, member, funcs, connector_type) \ + ((type *)__devm_drm_panel_alloc(dev, sizeof(type), \ + offsetof(type, member), funcs, \ + connector_type)) + void drm_panel_init(struct drm_panel *panel, struct device *dev, const struct drm_panel_funcs *funcs, int connector_type); +struct drm_panel *drm_panel_get(struct drm_panel *panel); +void drm_panel_put(struct drm_panel *panel); + void drm_panel_add(struct drm_panel *panel); void drm_panel_remove(struct drm_panel *panel); -int drm_panel_prepare(struct drm_panel *panel); -int drm_panel_unprepare(struct drm_panel *panel); +void drm_panel_prepare(struct drm_panel *panel); +void drm_panel_unprepare(struct drm_panel *panel); -int drm_panel_enable(struct drm_panel *panel); -int drm_panel_disable(struct drm_panel *panel); +void drm_panel_enable(struct drm_panel *panel); +void drm_panel_disable(struct drm_panel *panel); int drm_panel_get_modes(struct drm_panel *panel, struct drm_connector *connector); @@ -232,6 +355,33 @@ static inline int of_drm_get_panel_orientation(const struct device_node *np, } #endif +#if defined(CONFIG_DRM_PANEL) +bool drm_is_panel_follower(struct device *dev); +int drm_panel_add_follower(struct device *follower_dev, + struct drm_panel_follower *follower); +void drm_panel_remove_follower(struct drm_panel_follower *follower); +int devm_drm_panel_add_follower(struct device *follower_dev, + struct drm_panel_follower *follower); +#else +static inline bool drm_is_panel_follower(struct device *dev) +{ + return false; +} + +static inline int drm_panel_add_follower(struct device *follower_dev, + struct drm_panel_follower *follower) +{ + return -ENODEV; +} + +static inline void drm_panel_remove_follower(struct drm_panel_follower *follower) { } +static inline int devm_drm_panel_add_follower(struct device *follower_dev, + struct drm_panel_follower *follower) +{ + return -ENODEV; +} +#endif + #if IS_ENABLED(CONFIG_DRM_PANEL) && (IS_BUILTIN(CONFIG_BACKLIGHT_CLASS_DEVICE) || \ (IS_MODULE(CONFIG_DRM) && IS_MODULE(CONFIG_BACKLIGHT_CLASS_DEVICE))) int drm_panel_of_backlight(struct drm_panel *panel); diff --git a/include/drm/drm_panic.h b/include/drm/drm_panic.h new file mode 100644 index 000000000000..ac0e46b73436 --- /dev/null +++ b/include/drm/drm_panic.h @@ -0,0 +1,189 @@ +/* SPDX-License-Identifier: GPL-2.0 or MIT */ + +/* + * Copyright (c) 2024 Intel + * Copyright (c) 2024 Red Hat + */ + +#ifndef __DRM_PANIC_H__ +#define __DRM_PANIC_H__ + +#include <linux/module.h> +#include <linux/types.h> +#include <linux/iosys-map.h> + +#include <drm/drm_device.h> +#include <drm/drm_fourcc.h> + +/** + * struct drm_scanout_buffer - DRM scanout buffer + * + * This structure holds the information necessary for drm_panic to draw the + * panic screen, and display it. + */ +struct drm_scanout_buffer { + /** + * @format: + * + * drm format of the scanout buffer. + */ + const struct drm_format_info *format; + + /** + * @map: + * + * Virtual address of the scanout buffer, either in memory or iomem. + * The scanout buffer should be in linear format, and can be directly + * sent to the display hardware. Tearing is not an issue for the panic + * screen. + */ + struct iosys_map map[DRM_FORMAT_MAX_PLANES]; + + /** + * @pages: Optional, if the scanout buffer is not mapped, set this field + * to the array of pages of the scanout buffer. The panic code will use + * kmap_local_page_try_from_panic() to map one page at a time to write + * all the pixels. This array shouldn't be allocated from the + * get_scanoutbuffer() callback. + * The scanout buffer should be in linear format. + */ + struct page **pages; + + /** + * @width: Width of the scanout buffer, in pixels. + */ + unsigned int width; + + /** + * @height: Height of the scanout buffer, in pixels. + */ + unsigned int height; + + /** + * @pitch: Length in bytes between the start of two consecutive lines. + */ + unsigned int pitch[DRM_FORMAT_MAX_PLANES]; + + /** + * @set_pixel: Optional function, to set a pixel color on the + * framebuffer. It allows to handle special tiling format inside the + * driver. It takes precedence over the @map and @pages fields. + */ + void (*set_pixel)(struct drm_scanout_buffer *sb, unsigned int x, + unsigned int y, u32 color); + + /** + * @private: private pointer that you can use in the callbacks + * set_pixel() + */ + void *private; + +}; + +#ifdef CONFIG_DRM_PANIC + +/** + * drm_panic_trylock - try to enter the panic printing critical section + * @dev: struct drm_device + * @flags: unsigned long irq flags you need to pass to the unlock() counterpart + * + * This function must be called by any panic printing code. The panic printing + * attempt must be aborted if the trylock fails. + * + * Panic printing code can make the following assumptions while holding the + * panic lock: + * + * - Anything protected by drm_panic_lock() and drm_panic_unlock() pairs is safe + * to access. + * + * - Furthermore the panic printing code only registers in drm_dev_unregister() + * and gets removed in drm_dev_unregister(). This allows the panic code to + * safely access any state which is invariant in between these two function + * calls, like the list of planes &drm_mode_config.plane_list or most of the + * struct drm_plane structure. + * + * Specifically thanks to the protection around plane updates in + * drm_atomic_helper_swap_state() the following additional guarantees hold: + * + * - It is safe to deference the drm_plane.state pointer. + * + * - Anything in struct drm_plane_state or the driver's subclass thereof which + * stays invariant after the atomic check code has finished is safe to access. + * Specifically this includes the reference counted pointers to framebuffer + * and buffer objects. + * + * - Anything set up by &drm_plane_helper_funcs.fb_prepare and cleaned up + * &drm_plane_helper_funcs.fb_cleanup is safe to access, as long as it stays + * invariant between these two calls. This also means that for drivers using + * dynamic buffer management the framebuffer is pinned, and therefer all + * relevant datastructures can be accessed without taking any further locks + * (which would be impossible in panic context anyway). + * + * - Importantly, software and hardware state set up by + * &drm_plane_helper_funcs.begin_fb_access and + * &drm_plane_helper_funcs.end_fb_access is not safe to access. + * + * Drivers must not make any assumptions about the actual state of the hardware, + * unless they explicitly protected these hardware access with drm_panic_lock() + * and drm_panic_unlock(). + * + * Return: + * %0 when failing to acquire the raw spinlock, nonzero on success. + */ +#define drm_panic_trylock(dev, flags) \ + raw_spin_trylock_irqsave(&(dev)->mode_config.panic_lock, flags) + +/** + * drm_panic_lock - protect panic printing relevant state + * @dev: struct drm_device + * @flags: unsigned long irq flags you need to pass to the unlock() counterpart + * + * This function must be called to protect software and hardware state that the + * panic printing code must be able to rely on. The protected sections must be + * as small as possible. It uses the irqsave/irqrestore variant, and can be + * called from irq handler. Examples include: + * + * - Access to peek/poke or other similar registers, if that is the way the + * driver prints the pixels into the scanout buffer at panic time. + * + * - Updates to pointers like &drm_plane.state, allowing the panic handler to + * safely deference these. This is done in drm_atomic_helper_swap_state(). + * + * - An state that isn't invariant and that the driver must be able to access + * during panic printing. + */ + +#define drm_panic_lock(dev, flags) \ + raw_spin_lock_irqsave(&(dev)->mode_config.panic_lock, flags) + +/** + * drm_panic_unlock - end of the panic printing critical section + * @dev: struct drm_device + * @flags: irq flags that were returned when acquiring the lock + * + * Unlocks the raw spinlock acquired by either drm_panic_lock() or + * drm_panic_trylock(). + */ +#define drm_panic_unlock(dev, flags) \ + raw_spin_unlock_irqrestore(&(dev)->mode_config.panic_lock, flags) + +#else + +static inline bool drm_panic_trylock(struct drm_device *dev, unsigned long flags) +{ + return true; +} + +static inline void drm_panic_lock(struct drm_device *dev, unsigned long flags) {} +static inline void drm_panic_unlock(struct drm_device *dev, unsigned long flags) {} + +#endif + +#if defined(CONFIG_DRM_PANIC_SCREEN_QR_CODE) +size_t drm_panic_qr_max_data_size(u8 version, size_t url_len); + +u8 drm_panic_qr_generate(const char *url, u8 *data, size_t data_len, size_t data_size, + u8 *tmp, size_t tmp_size); +#endif + +#endif /* __DRM_PANIC_H__ */ diff --git a/include/drm/drm_plane.h b/include/drm/drm_plane.h index 51291983ea44..703ef4d1bbbc 100644 --- a/include/drm/drm_plane.h +++ b/include/drm/drm_plane.h @@ -25,6 +25,7 @@ #include <linux/list.h> #include <linux/ctype.h> +#include <linux/kmsg_dump.h> #include <drm/drm_mode_object.h> #include <drm/drm_color_mgmt.h> #include <drm/drm_rect.h> @@ -32,6 +33,7 @@ #include <drm/drm_util.h> struct drm_crtc; +struct drm_plane_size_hint; struct drm_printer; struct drm_modeset_acquire_ctx; @@ -56,7 +58,7 @@ struct drm_plane_state { /** * @crtc: * - * Currently bound CRTC, NULL if disabled. Do not this write directly, + * Currently bound CRTC, NULL if disabled. Do not write this directly, * use drm_atomic_set_crtc_for_plane() */ struct drm_crtc *crtc; @@ -116,6 +118,10 @@ struct drm_plane_state { /** @src_h: height of visible portion of plane (in 16.16) */ uint32_t src_h, src_w; + /** @hotspot_x: x offset to mouse cursor hotspot */ + /** @hotspot_y: y offset to mouse cursor hotspot */ + int32_t hotspot_x, hotspot_y; + /** * @alpha: * Opacity of the plane with 0 as completely transparent and 0xffff as @@ -191,6 +197,16 @@ struct drm_plane_state { struct drm_property_blob *fb_damage_clips; /** + * @ignore_damage_clips: + * + * Set by drivers to indicate the drm_atomic_helper_damage_iter_init() + * helper that the @fb_damage_clips blob property should be ignored. + * + * See :ref:`damage_tracking_properties` for more information. + */ + bool ignore_damage_clips; + + /** * @src: * * source coordinates of the plane (in 16.16). @@ -228,6 +244,14 @@ struct drm_plane_state { enum drm_scaling_filter scaling_filter; /** + * @color_pipeline: + * + * The first colorop of the active color pipeline, or NULL, if no + * color pipeline is active. + */ + struct drm_colorop *color_pipeline; + + /** * @commit: Tracks the pending commit to prevent use-after-free conditions, * and for async plane updates. * @@ -237,6 +261,13 @@ struct drm_plane_state { /** @state: backpointer to global drm_atomic_state */ struct drm_atomic_state *state; + + /** + * @color_mgmt_changed: Color management properties have changed. Used + * by the atomic helpers and drivers to steer the atomic commit control + * flow. + */ + bool color_mgmt_changed : 1; }; static inline struct drm_rect @@ -526,6 +557,23 @@ struct drm_plane_funcs { */ bool (*format_mod_supported)(struct drm_plane *plane, uint32_t format, uint64_t modifier); + /** + * @format_mod_supported_async: + * + * This optional hook is used for the DRM to determine if for + * asynchronous flip the given format/modifier combination is valid for + * the plane. This allows the DRM to generate the correct format + * bitmask (which formats apply to which modifier), and to validate + * modifiers at atomic_check time. + * + * Returns: + * + * True if the given modifier is valid for that format on the plane. + * False otherwise. + */ + bool (*format_mod_supported_async)(struct drm_plane *plane, + u32 format, u64 modifier); + }; /** @@ -744,10 +792,33 @@ struct drm_plane { struct drm_property *color_range_property; /** + * @color_pipeline_property: + * + * Optional "COLOR_PIPELINE" enum property for specifying + * a color pipeline to use on the plane. + */ + struct drm_property *color_pipeline_property; + + /** * @scaling_filter_property: property to apply a particular filter while * scaling. */ struct drm_property *scaling_filter_property; + + /** + * @hotspot_x_property: property to set mouse hotspot x offset. + */ + struct drm_property *hotspot_x_property; + + /** + * @hotspot_y_property: property to set mouse hotspot y offset. + */ + struct drm_property *hotspot_y_property; + + /** + * @kmsg_panic: Used to register a panic notifier for this plane + */ + struct kmsg_dumper kmsg_panic; }; #define obj_to_plane(x) container_of(x, struct drm_plane, base) @@ -934,6 +1005,8 @@ static inline struct drm_plane *drm_plane_find(struct drm_device *dev, #define drm_for_each_plane(plane, dev) \ list_for_each_entry(plane, &(dev)->mode_config.plane_list, head) +bool drm_plane_has_format(struct drm_plane *plane, + u32 format, u64 modifier); bool drm_any_plane_has_format(struct drm_device *dev, u32 format, u64 modifier); @@ -945,5 +1018,11 @@ drm_plane_get_damage_clips(const struct drm_plane_state *state); int drm_plane_create_scaling_filter_property(struct drm_plane *plane, unsigned int supported_filters); +int drm_plane_add_size_hints_property(struct drm_plane *plane, + const struct drm_plane_size_hint *hints, + int num_hints); +int drm_plane_create_color_pipeline_property(struct drm_plane *plane, + const struct drm_prop_enum_list *pipelines, + int num_pipelines); #endif diff --git a/include/drm/drm_plane_helper.h b/include/drm/drm_plane_helper.h index 3a574e8cd22f..75f9c4830564 100644 --- a/include/drm/drm_plane_helper.h +++ b/include/drm/drm_plane_helper.h @@ -26,7 +26,6 @@ #include <linux/types.h> -struct drm_atomic_state; struct drm_crtc; struct drm_framebuffer; struct drm_modeset_acquire_ctx; @@ -42,7 +41,6 @@ int drm_plane_helper_update_primary(struct drm_plane *plane, struct drm_crtc *cr int drm_plane_helper_disable_primary(struct drm_plane *plane, struct drm_modeset_acquire_ctx *ctx); void drm_plane_helper_destroy(struct drm_plane *plane); -int drm_plane_helper_atomic_check(struct drm_plane *plane, struct drm_atomic_state *state); /** * DRM_PLANE_NON_ATOMIC_FUNCS - Default plane functions for non-atomic drivers diff --git a/include/drm/drm_prime.h b/include/drm/drm_prime.h index 2a1d01e5b56b..f50f862f0d8b 100644 --- a/include/drm/drm_prime.h +++ b/include/drm/drm_prime.h @@ -69,6 +69,9 @@ void drm_gem_dmabuf_release(struct dma_buf *dma_buf); int drm_gem_prime_fd_to_handle(struct drm_device *dev, struct drm_file *file_priv, int prime_fd, uint32_t *handle); +struct dma_buf *drm_gem_prime_handle_to_dmabuf(struct drm_device *dev, + struct drm_file *file_priv, uint32_t handle, + uint32_t flags); int drm_gem_prime_handle_to_fd(struct drm_device *dev, struct drm_file *file_priv, uint32_t handle, uint32_t flags, int *prime_fd); @@ -97,6 +100,9 @@ struct dma_buf *drm_gem_prime_export(struct drm_gem_object *obj, unsigned long drm_prime_get_contiguous_size(struct sg_table *sgt); /* helper functions for importing */ +bool drm_gem_is_prime_exported_dma_buf(struct drm_device *dev, + struct dma_buf *dma_buf); + struct drm_gem_object *drm_gem_prime_import_dev(struct drm_device *dev, struct dma_buf *dma_buf, struct device *attach_dev); diff --git a/include/drm/drm_print.h b/include/drm/drm_print.h index a93a387f8a1a..ab017b05e175 100644 --- a/include/drm/drm_print.h +++ b/include/drm/drm_print.h @@ -28,12 +28,15 @@ #include <linux/compiler.h> #include <linux/printk.h> -#include <linux/seq_file.h> #include <linux/device.h> -#include <linux/debugfs.h> #include <linux/dynamic_debug.h> #include <drm/drm.h> +#include <drm/drm_device.h> + +struct debugfs_regset32; +struct drm_device; +struct seq_file; /* Do *not* use outside of drm_print.[ch]! */ extern unsigned long __drm_debug; @@ -68,6 +71,101 @@ extern unsigned long __drm_debug; */ /** + * enum drm_debug_category - The DRM debug categories + * + * Each of the DRM debug logging macros use a specific category, and the logging + * is filtered by the drm.debug module parameter. This enum specifies the values + * for the interface. + * + * Each DRM_DEBUG_<CATEGORY> macro logs to DRM_UT_<CATEGORY> category, except + * DRM_DEBUG() logs to DRM_UT_CORE. + * + * Enabling verbose debug messages is done through the drm.debug parameter, each + * category being enabled by a bit: + * + * - drm.debug=0x1 will enable CORE messages + * - drm.debug=0x2 will enable DRIVER messages + * - drm.debug=0x3 will enable CORE and DRIVER messages + * - ... + * - drm.debug=0x1ff will enable all messages + * + * An interesting feature is that it's possible to enable verbose logging at + * run-time by echoing the debug value in its sysfs node:: + * + * # echo 0xf > /sys/module/drm/parameters/debug + * + */ +enum drm_debug_category { + /* These names must match those in DYNAMIC_DEBUG_CLASSBITS */ + /** + * @DRM_UT_CORE: Used in the generic drm code: drm_ioctl.c, drm_mm.c, + * drm_memory.c, ... + */ + DRM_UT_CORE, + /** + * @DRM_UT_DRIVER: Used in the vendor specific part of the driver: i915, + * radeon, ... macro. + */ + DRM_UT_DRIVER, + /** + * @DRM_UT_KMS: Used in the modesetting code. + */ + DRM_UT_KMS, + /** + * @DRM_UT_PRIME: Used in the prime code. + */ + DRM_UT_PRIME, + /** + * @DRM_UT_ATOMIC: Used in the atomic code. + */ + DRM_UT_ATOMIC, + /** + * @DRM_UT_VBL: Used for verbose debug message in the vblank code. + */ + DRM_UT_VBL, + /** + * @DRM_UT_STATE: Used for verbose atomic state debugging. + */ + DRM_UT_STATE, + /** + * @DRM_UT_LEASE: Used in the lease code. + */ + DRM_UT_LEASE, + /** + * @DRM_UT_DP: Used in the DP code. + */ + DRM_UT_DP, + /** + * @DRM_UT_DRMRES: Used in the drm managed resources code. + */ + DRM_UT_DRMRES +}; + +static inline bool drm_debug_enabled_raw(enum drm_debug_category category) +{ + return unlikely(__drm_debug & BIT(category)); +} + +#define drm_debug_enabled_instrumented(category) \ + ({ \ + pr_debug("todo: is this frequent enough to optimize ?\n"); \ + drm_debug_enabled_raw(category); \ + }) + +#if defined(CONFIG_DRM_USE_DYNAMIC_DEBUG) +/* + * the drm.debug API uses dyndbg, so each drm_*dbg macro/callsite gets + * a descriptor, and only enabled callsites are reachable. They use + * the private macro to avoid re-testing the enable-bit. + */ +#define __drm_debug_enabled(category) true +#define drm_debug_enabled(category) drm_debug_enabled_instrumented(category) +#else +#define __drm_debug_enabled(category) drm_debug_enabled_raw(category) +#define drm_debug_enabled(category) drm_debug_enabled_raw(category) +#endif + +/** * struct drm_printer - drm output "stream" * * Do not use struct members directly. Use drm_printer_seq_file(), @@ -78,7 +176,13 @@ struct drm_printer { void (*printfn)(struct drm_printer *p, struct va_format *vaf); void (*puts)(struct drm_printer *p, const char *str); void *arg; + const void *origin; const char *prefix; + struct { + unsigned int series; + unsigned int counter; + } line; + enum drm_debug_category category; }; void __drm_printfn_coredump(struct drm_printer *p, struct va_format *vaf); @@ -86,8 +190,9 @@ void __drm_puts_coredump(struct drm_printer *p, const char *str); void __drm_printfn_seq_file(struct drm_printer *p, struct va_format *vaf); void __drm_puts_seq_file(struct drm_printer *p, const char *str); void __drm_printfn_info(struct drm_printer *p, struct va_format *vaf); -void __drm_printfn_debug(struct drm_printer *p, struct va_format *vaf); +void __drm_printfn_dbg(struct drm_printer *p, struct va_format *vaf); void __drm_printfn_err(struct drm_printer *p, struct va_format *vaf); +void __drm_printfn_line(struct drm_printer *p, struct va_format *vaf); __printf(2, 3) void drm_printf(struct drm_printer *p, const char *f, ...); @@ -95,6 +200,8 @@ void drm_puts(struct drm_printer *p, const char *str); void drm_print_regset32(struct drm_printer *p, struct debugfs_regset32 *regset); void drm_print_bits(struct drm_printer *p, unsigned long value, const char * const bits[], unsigned int nbits); +void drm_print_hex_dump(struct drm_printer *p, const char *prefix, + const u8 *buf, size_t len); __printf(2, 0) /** @@ -122,7 +229,8 @@ drm_vprintf(struct drm_printer *p, const char *fmt, va_list *va) /** * struct drm_print_iterator - local struct used with drm_printer_coredump - * @data: Pointer to the devcoredump output buffer + * @data: Pointer to the devcoredump output buffer, can be NULL if using + * drm_printer_coredump to determine size of devcoredump * @start: The offset within the buffer to start writing * @remain: The number of bytes to write for this iteration */ @@ -167,6 +275,57 @@ struct drm_print_iterator { * coredump_read, ...) * } * + * The above example has a time complexity of O(N^2), where N is the size of the + * devcoredump. This is acceptable for small devcoredumps but scales poorly for + * larger ones. + * + * Another use case for drm_coredump_printer is to capture the devcoredump into + * a saved buffer before the dev_coredump() callback. This involves two passes: + * one to determine the size of the devcoredump and another to print it to a + * buffer. Then, in dev_coredump(), copy from the saved buffer into the + * devcoredump read buffer. + * + * For example:: + * + * char *devcoredump_saved_buffer; + * + * ssize_t __coredump_print(char *buffer, ssize_t count, ...) + * { + * struct drm_print_iterator iter; + * struct drm_printer p; + * + * iter.data = buffer; + * iter.start = 0; + * iter.remain = count; + * + * p = drm_coredump_printer(&iter); + * + * drm_printf(p, "foo=%d\n", foo); + * ... + * return count - iter.remain; + * } + * + * void coredump_print(...) + * { + * ssize_t count; + * + * count = __coredump_print(NULL, INT_MAX, ...); + * devcoredump_saved_buffer = kvmalloc(count, GFP_KERNEL); + * __coredump_print(devcoredump_saved_buffer, count, ...); + * } + * + * void coredump_read(char *buffer, loff_t offset, size_t count, + * void *data, size_t datalen) + * { + * ... + * memcpy(buffer, devcoredump_saved_buffer + offset, count); + * ... + * } + * + * The above example has a time complexity of O(N*2), where N is the size of the + * devcoredump. This scales better than the previous example for larger + * devcoredumps. + * * RETURNS: * The &drm_printer object */ @@ -186,6 +345,26 @@ drm_coredump_printer(struct drm_print_iterator *iter) } /** + * drm_coredump_printer_is_full() - DRM coredump printer output is full + * @p: DRM coredump printer + * + * DRM printer output is full, useful to short circuit coredump printing once + * printer is full. + * + * RETURNS: + * True if DRM coredump printer output buffer is full, False otherwise + */ +static inline bool drm_coredump_printer_is_full(struct drm_printer *p) +{ + struct drm_print_iterator *iterator = p->arg; + + if (p->printfn != __drm_printfn_coredump) + return true; + + return !iterator->remain; +} + +/** * drm_seq_file_printer - construct a &drm_printer that outputs to &seq_file * @f: the &struct seq_file to output to * @@ -219,132 +398,106 @@ static inline struct drm_printer drm_info_printer(struct device *dev) } /** - * drm_debug_printer - construct a &drm_printer that outputs to pr_debug() - * @prefix: debug output prefix + * drm_dbg_printer - construct a &drm_printer for drm device specific output + * @drm: the &struct drm_device pointer, or NULL + * @category: the debug category to use + * @prefix: debug output prefix, or NULL for no prefix * * RETURNS: * The &drm_printer object */ -static inline struct drm_printer drm_debug_printer(const char *prefix) +static inline struct drm_printer drm_dbg_printer(struct drm_device *drm, + enum drm_debug_category category, + const char *prefix) { struct drm_printer p = { - .printfn = __drm_printfn_debug, - .prefix = prefix + .printfn = __drm_printfn_dbg, + .arg = drm, + .origin = (const void *)_THIS_IP_, /* it's fine as we will be inlined */ + .prefix = prefix, + .category = category, }; return p; } /** - * drm_err_printer - construct a &drm_printer that outputs to pr_err() - * @prefix: debug output prefix + * drm_err_printer - construct a &drm_printer that outputs to drm_err() + * @drm: the &struct drm_device pointer + * @prefix: debug output prefix, or NULL for no prefix * * RETURNS: * The &drm_printer object */ -static inline struct drm_printer drm_err_printer(const char *prefix) +static inline struct drm_printer drm_err_printer(struct drm_device *drm, + const char *prefix) { struct drm_printer p = { .printfn = __drm_printfn_err, + .arg = drm, .prefix = prefix }; return p; } /** - * enum drm_debug_category - The DRM debug categories + * drm_line_printer - construct a &drm_printer that prefixes outputs with line numbers + * @p: the &struct drm_printer which actually generates the output + * @prefix: optional output prefix, or NULL for no prefix + * @series: optional unique series identifier, or 0 to omit identifier in the output * - * Each of the DRM debug logging macros use a specific category, and the logging - * is filtered by the drm.debug module parameter. This enum specifies the values - * for the interface. + * This printer can be used to increase the robustness of the captured output + * to make sure we didn't lost any intermediate lines of the output. Helpful + * while capturing some crash data. * - * Each DRM_DEBUG_<CATEGORY> macro logs to DRM_UT_<CATEGORY> category, except - * DRM_DEBUG() logs to DRM_UT_CORE. + * Example 1:: * - * Enabling verbose debug messages is done through the drm.debug parameter, each - * category being enabled by a bit: + * void crash_dump(struct drm_device *drm) + * { + * static unsigned int id; + * struct drm_printer p = drm_err_printer(drm, "crash"); + * struct drm_printer lp = drm_line_printer(&p, "dump", ++id); * - * - drm.debug=0x1 will enable CORE messages - * - drm.debug=0x2 will enable DRIVER messages - * - drm.debug=0x3 will enable CORE and DRIVER messages - * - ... - * - drm.debug=0x1ff will enable all messages + * drm_printf(&lp, "foo"); + * drm_printf(&lp, "bar"); + * } * - * An interesting feature is that it's possible to enable verbose logging at - * run-time by echoing the debug value in its sysfs node:: + * Above code will print into the dmesg something like:: * - * # echo 0xf > /sys/module/drm/parameters/debug + * [ ] 0000:00:00.0: [drm] *ERROR* crash dump 1.1: foo + * [ ] 0000:00:00.0: [drm] *ERROR* crash dump 1.2: bar * + * Example 2:: + * + * void line_dump(struct device *dev) + * { + * struct drm_printer p = drm_info_printer(dev); + * struct drm_printer lp = drm_line_printer(&p, NULL, 0); + * + * drm_printf(&lp, "foo"); + * drm_printf(&lp, "bar"); + * } + * + * Above code will print:: + * + * [ ] 0000:00:00.0: [drm] 1: foo + * [ ] 0000:00:00.0: [drm] 2: bar + * + * RETURNS: + * The &drm_printer object */ -enum drm_debug_category { - /* These names must match those in DYNAMIC_DEBUG_CLASSBITS */ - /** - * @DRM_UT_CORE: Used in the generic drm code: drm_ioctl.c, drm_mm.c, - * drm_memory.c, ... - */ - DRM_UT_CORE, - /** - * @DRM_UT_DRIVER: Used in the vendor specific part of the driver: i915, - * radeon, ... macro. - */ - DRM_UT_DRIVER, - /** - * @DRM_UT_KMS: Used in the modesetting code. - */ - DRM_UT_KMS, - /** - * @DRM_UT_PRIME: Used in the prime code. - */ - DRM_UT_PRIME, - /** - * @DRM_UT_ATOMIC: Used in the atomic code. - */ - DRM_UT_ATOMIC, - /** - * @DRM_UT_VBL: Used for verbose debug message in the vblank code. - */ - DRM_UT_VBL, - /** - * @DRM_UT_STATE: Used for verbose atomic state debugging. - */ - DRM_UT_STATE, - /** - * @DRM_UT_LEASE: Used in the lease code. - */ - DRM_UT_LEASE, - /** - * @DRM_UT_DP: Used in the DP code. - */ - DRM_UT_DP, - /** - * @DRM_UT_DRMRES: Used in the drm managed resources code. - */ - DRM_UT_DRMRES -}; - -static inline bool drm_debug_enabled_raw(enum drm_debug_category category) +static inline struct drm_printer drm_line_printer(struct drm_printer *p, + const char *prefix, + unsigned int series) { - return unlikely(__drm_debug & BIT(category)); + struct drm_printer lp = { + .printfn = __drm_printfn_line, + .arg = p, + .prefix = prefix, + .line = { .series = series, }, + }; + return lp; } -#define drm_debug_enabled_instrumented(category) \ - ({ \ - pr_debug("todo: is this frequent enough to optimize ?\n"); \ - drm_debug_enabled_raw(category); \ - }) - -#if defined(CONFIG_DRM_USE_DYNAMIC_DEBUG) -/* - * the drm.debug API uses dyndbg, so each drm_*dbg macro/callsite gets - * a descriptor, and only enabled callsites are reachable. They use - * the private macro to avoid re-testing the enable-bit. - */ -#define __drm_debug_enabled(category) true -#define drm_debug_enabled(category) drm_debug_enabled_instrumented(category) -#else -#define __drm_debug_enabled(category) drm_debug_enabled_raw(category) -#define drm_debug_enabled(category) drm_debug_enabled_raw(category) -#endif - /* * struct device based logging * @@ -451,9 +604,15 @@ void __drm_dev_dbg(struct _ddebug *desc, const struct device *dev, * Prefer drm_device based logging over device or prink based logging. */ +/* Helper to enforce struct drm_device type */ +static inline struct device *__drm_to_dev(const struct drm_device *drm) +{ + return drm ? drm->dev : NULL; +} + /* Helper for struct drm_device based logging. */ #define __drm_printk(drm, level, type, fmt, ...) \ - dev_##level##type((drm)->dev, "[drm] " fmt, ##__VA_ARGS__) + dev_##level##type(__drm_to_dev(drm), "[drm] " fmt, ##__VA_ARGS__) #define drm_info(drm, fmt, ...) \ @@ -487,25 +646,25 @@ void __drm_dev_dbg(struct _ddebug *desc, const struct device *dev, #define drm_dbg_core(drm, fmt, ...) \ - drm_dev_dbg((drm) ? (drm)->dev : NULL, DRM_UT_CORE, fmt, ##__VA_ARGS__) -#define drm_dbg_driver(drm, fmt, ...) \ - drm_dev_dbg((drm) ? (drm)->dev : NULL, DRM_UT_DRIVER, fmt, ##__VA_ARGS__) + drm_dev_dbg(__drm_to_dev(drm), DRM_UT_CORE, fmt, ##__VA_ARGS__) +#define drm_dbg_driver(drm, fmt, ...) \ + drm_dev_dbg(__drm_to_dev(drm), DRM_UT_DRIVER, fmt, ##__VA_ARGS__) #define drm_dbg_kms(drm, fmt, ...) \ - drm_dev_dbg((drm) ? (drm)->dev : NULL, DRM_UT_KMS, fmt, ##__VA_ARGS__) + drm_dev_dbg(__drm_to_dev(drm), DRM_UT_KMS, fmt, ##__VA_ARGS__) #define drm_dbg_prime(drm, fmt, ...) \ - drm_dev_dbg((drm) ? (drm)->dev : NULL, DRM_UT_PRIME, fmt, ##__VA_ARGS__) + drm_dev_dbg(__drm_to_dev(drm), DRM_UT_PRIME, fmt, ##__VA_ARGS__) #define drm_dbg_atomic(drm, fmt, ...) \ - drm_dev_dbg((drm) ? (drm)->dev : NULL, DRM_UT_ATOMIC, fmt, ##__VA_ARGS__) + drm_dev_dbg(__drm_to_dev(drm), DRM_UT_ATOMIC, fmt, ##__VA_ARGS__) #define drm_dbg_vbl(drm, fmt, ...) \ - drm_dev_dbg((drm) ? (drm)->dev : NULL, DRM_UT_VBL, fmt, ##__VA_ARGS__) + drm_dev_dbg(__drm_to_dev(drm), DRM_UT_VBL, fmt, ##__VA_ARGS__) #define drm_dbg_state(drm, fmt, ...) \ - drm_dev_dbg((drm) ? (drm)->dev : NULL, DRM_UT_STATE, fmt, ##__VA_ARGS__) + drm_dev_dbg(__drm_to_dev(drm), DRM_UT_STATE, fmt, ##__VA_ARGS__) #define drm_dbg_lease(drm, fmt, ...) \ - drm_dev_dbg((drm) ? (drm)->dev : NULL, DRM_UT_LEASE, fmt, ##__VA_ARGS__) + drm_dev_dbg(__drm_to_dev(drm), DRM_UT_LEASE, fmt, ##__VA_ARGS__) #define drm_dbg_dp(drm, fmt, ...) \ - drm_dev_dbg((drm) ? (drm)->dev : NULL, DRM_UT_DP, fmt, ##__VA_ARGS__) + drm_dev_dbg(__drm_to_dev(drm), DRM_UT_DP, fmt, ##__VA_ARGS__) #define drm_dbg_drmres(drm, fmt, ...) \ - drm_dev_dbg((drm) ? (drm)->dev : NULL, DRM_UT_DRMRES, fmt, ##__VA_ARGS__) + drm_dev_dbg(__drm_to_dev(drm), DRM_UT_DRMRES, fmt, ##__VA_ARGS__) #define drm_dbg(drm, fmt, ...) drm_dbg_driver(drm, fmt, ##__VA_ARGS__) @@ -515,17 +674,15 @@ void __drm_dev_dbg(struct _ddebug *desc, const struct device *dev, * Prefer drm_device based logging over device or prink based logging. */ -__printf(3, 4) -void ___drm_dbg(struct _ddebug *desc, enum drm_debug_category category, const char *format, ...); __printf(1, 2) void __drm_err(const char *format, ...); #if !defined(CONFIG_DRM_USE_DYNAMIC_DEBUG) -#define __drm_dbg(cat, fmt, ...) ___drm_dbg(NULL, cat, fmt, ##__VA_ARGS__) +#define __drm_dbg(cat, fmt, ...) __drm_dev_dbg(NULL, NULL, cat, fmt, ##__VA_ARGS__) #else #define __drm_dbg(cat, fmt, ...) \ - _dynamic_func_call_cls(cat, fmt, ___drm_dbg, \ - cat, fmt, ##__VA_ARGS__) + _dynamic_func_call_cls(cat, fmt, __drm_dev_dbg, \ + NULL, cat, fmt, ##__VA_ARGS__) #endif /* Macros to make printk easier */ @@ -596,12 +753,14 @@ void __drm_err(const char *format, ...); #define __DRM_DEFINE_DBG_RATELIMITED(category, drm, fmt, ...) \ ({ \ static DEFINE_RATELIMIT_STATE(rs_, DEFAULT_RATELIMIT_INTERVAL, DEFAULT_RATELIMIT_BURST);\ - const struct drm_device *drm_ = (drm); \ \ if (drm_debug_enabled(DRM_UT_ ## category) && __ratelimit(&rs_)) \ - drm_dev_printk(drm_ ? drm_->dev : NULL, KERN_DEBUG, fmt, ## __VA_ARGS__); \ + drm_dev_printk(__drm_to_dev(drm), KERN_DEBUG, fmt, ## __VA_ARGS__); \ }) +#define drm_dbg_ratelimited(drm, fmt, ...) \ + __DRM_DEFINE_DBG_RATELIMITED(DRIVER, drm, fmt, ## __VA_ARGS__) + #define drm_dbg_kms_ratelimited(drm, fmt, ...) \ __DRM_DEFINE_DBG_RATELIMITED(KMS, drm, fmt, ## __VA_ARGS__) @@ -617,14 +776,14 @@ void __drm_err(const char *format, ...); /* Helper for struct drm_device based WARNs */ #define drm_WARN(drm, condition, format, arg...) \ - WARN(condition, "%s %s: " format, \ - dev_driver_string((drm)->dev), \ - dev_name((drm)->dev), ## arg) + WARN(condition, "%s %s: [drm] " format, \ + dev_driver_string(__drm_to_dev(drm)), \ + dev_name(__drm_to_dev(drm)), ## arg) #define drm_WARN_ONCE(drm, condition, format, arg...) \ - WARN_ONCE(condition, "%s %s: " format, \ - dev_driver_string((drm)->dev), \ - dev_name((drm)->dev), ## arg) + WARN_ONCE(condition, "%s %s: [drm] " format, \ + dev_driver_string(__drm_to_dev(drm)), \ + dev_name(__drm_to_dev(drm)), ## arg) #define drm_WARN_ON(drm, x) \ drm_WARN((drm), (x), "%s", \ diff --git a/include/drm/drm_probe_helper.h b/include/drm/drm_probe_helper.h index 4977e0ab72db..840ae5f798c2 100644 --- a/include/drm/drm_probe_helper.h +++ b/include/drm/drm_probe_helper.h @@ -16,6 +16,8 @@ int drm_helper_probe_single_connector_modes(struct drm_connector int drm_helper_probe_detect(struct drm_connector *connector, struct drm_modeset_acquire_ctx *ctx, bool force); + +void drmm_kms_helper_poll_init(struct drm_device *dev); void drm_kms_helper_poll_init(struct drm_device *dev); void drm_kms_helper_poll_fini(struct drm_device *dev); bool drm_helper_hpd_irq_event(struct drm_device *dev); @@ -25,16 +27,20 @@ void drm_kms_helper_connector_hotplug_event(struct drm_connector *connector); void drm_kms_helper_poll_disable(struct drm_device *dev); void drm_kms_helper_poll_enable(struct drm_device *dev); +void drm_kms_helper_poll_reschedule(struct drm_device *dev); bool drm_kms_helper_is_poll_worker(void); enum drm_mode_status drm_crtc_helper_mode_valid_fixed(struct drm_crtc *crtc, const struct drm_display_mode *mode, const struct drm_display_mode *fixed_mode); -int drm_connector_helper_get_modes_from_ddc(struct drm_connector *connector); int drm_connector_helper_get_modes_fixed(struct drm_connector *connector, const struct drm_display_mode *fixed_mode); int drm_connector_helper_get_modes(struct drm_connector *connector); int drm_connector_helper_tv_get_modes(struct drm_connector *connector); +int drm_connector_helper_detect_from_ddc(struct drm_connector *connector, + struct drm_modeset_acquire_ctx *ctx, + bool force); + #endif diff --git a/include/drm/drm_property.h b/include/drm/drm_property.h index 65bc9710a470..082f29156b3e 100644 --- a/include/drm/drm_property.h +++ b/include/drm/drm_property.h @@ -279,6 +279,12 @@ struct drm_property_blob *drm_property_create_blob(struct drm_device *dev, const void *data); struct drm_property_blob *drm_property_lookup_blob(struct drm_device *dev, uint32_t id); +int drm_property_replace_blob_from_id(struct drm_device *dev, + struct drm_property_blob **blob, + uint64_t blob_id, + ssize_t expected_size, + ssize_t expected_elem_size, + bool *replaced); int drm_property_replace_global_blob(struct drm_device *dev, struct drm_property_blob **replace, size_t length, diff --git a/include/drm/drm_rect.h b/include/drm/drm_rect.h index e8d94fca2703..46f09cf68458 100644 --- a/include/drm/drm_rect.h +++ b/include/drm/drm_rect.h @@ -129,7 +129,7 @@ static inline void drm_rect_adjust_size(struct drm_rect *r, int dw, int dh) /** * drm_rect_translate - translate the rectangle - * @r: rectangle to be tranlated + * @r: rectangle to be translated * @dx: horizontal translation * @dy: vertical translation * @@ -146,7 +146,7 @@ static inline void drm_rect_translate(struct drm_rect *r, int dx, int dy) /** * drm_rect_translate_to - translate the rectangle to an absolute position - * @r: rectangle to be tranlated + * @r: rectangle to be translated * @x: horizontal position * @y: vertical position * @@ -238,6 +238,21 @@ static inline void drm_rect_fp_to_int(struct drm_rect *dst, drm_rect_height(src) >> 16); } +/** + * drm_rect_overlap - Check if two rectangles overlap + * @a: first rectangle + * @b: second rectangle + * + * RETURNS: + * %true if the rectangles overlap, %false otherwise. + */ +static inline bool drm_rect_overlap(const struct drm_rect *a, + const struct drm_rect *b) +{ + return (a->x2 > b->x1 && b->x2 > a->x1 && + a->y2 > b->y1 && b->y2 > a->y1); +} + bool drm_rect_intersect(struct drm_rect *r, const struct drm_rect *clip); bool drm_rect_clip_scaled(struct drm_rect *src, struct drm_rect *dst, const struct drm_rect *clip); diff --git a/include/drm/drm_suballoc.h b/include/drm/drm_suballoc.h new file mode 100644 index 000000000000..7ba72a81a808 --- /dev/null +++ b/include/drm/drm_suballoc.h @@ -0,0 +1,108 @@ +/* SPDX-License-Identifier: GPL-2.0 OR MIT */ +/* + * Copyright 2011 Red Hat Inc. + * Copyright © 2022 Intel Corporation + */ +#ifndef _DRM_SUBALLOC_H_ +#define _DRM_SUBALLOC_H_ + +#include <drm/drm_mm.h> + +#include <linux/dma-fence.h> +#include <linux/types.h> + +#define DRM_SUBALLOC_MAX_QUEUES 32 +/** + * struct drm_suballoc_manager - fenced range allocations + * @wq: Wait queue for sleeping allocations on contention. + * @hole: Pointer to first hole node. + * @olist: List of allocated ranges. + * @flist: Array[fence context hash] of queues of fenced allocated ranges. + * @size: Size of the managed range. + * @align: Default alignment for the managed range. + */ +struct drm_suballoc_manager { + wait_queue_head_t wq; + struct list_head *hole; + struct list_head olist; + struct list_head flist[DRM_SUBALLOC_MAX_QUEUES]; + size_t size; + size_t align; +}; + +/** + * struct drm_suballoc - Sub-allocated range + * @olist: List link for list of allocated ranges. + * @flist: List linkk for the manager fenced allocated ranges queues. + * @manager: The drm_suballoc_manager. + * @soffset: Start offset. + * @eoffset: End offset + 1 so that @eoffset - @soffset = size. + * @fence: The fence protecting the allocation. + */ +struct drm_suballoc { + struct list_head olist; + struct list_head flist; + struct drm_suballoc_manager *manager; + size_t soffset; + size_t eoffset; + struct dma_fence *fence; +}; + +void drm_suballoc_manager_init(struct drm_suballoc_manager *sa_manager, + size_t size, size_t align); + +void drm_suballoc_manager_fini(struct drm_suballoc_manager *sa_manager); + +struct drm_suballoc * +drm_suballoc_new(struct drm_suballoc_manager *sa_manager, size_t size, + gfp_t gfp, bool intr, size_t align); + +void drm_suballoc_free(struct drm_suballoc *sa, struct dma_fence *fence); + +/** + * drm_suballoc_soffset - Range start. + * @sa: The struct drm_suballoc. + * + * Return: The start of the allocated range. + */ +static inline size_t drm_suballoc_soffset(struct drm_suballoc *sa) +{ + return sa->soffset; +} + +/** + * drm_suballoc_eoffset - Range end. + * @sa: The struct drm_suballoc. + * + * Return: The end of the allocated range + 1. + */ +static inline size_t drm_suballoc_eoffset(struct drm_suballoc *sa) +{ + return sa->eoffset; +} + +/** + * drm_suballoc_size - Range size. + * @sa: The struct drm_suballoc. + * + * Return: The size of the allocated range. + */ +static inline size_t drm_suballoc_size(struct drm_suballoc *sa) +{ + return sa->eoffset - sa->soffset; +} + +#ifdef CONFIG_DEBUG_FS +void drm_suballoc_dump_debug_info(struct drm_suballoc_manager *sa_manager, + struct drm_printer *p, + unsigned long long suballoc_base); +#else +static inline void +drm_suballoc_dump_debug_info(struct drm_suballoc_manager *sa_manager, + struct drm_printer *p, + unsigned long long suballoc_base) +{ } + +#endif + +#endif /* _DRM_SUBALLOC_H_ */ diff --git a/include/drm/drm_syncobj.h b/include/drm/drm_syncobj.h index 6cf7243a1dc5..b40052132e52 100644 --- a/include/drm/drm_syncobj.h +++ b/include/drm/drm_syncobj.h @@ -54,7 +54,11 @@ struct drm_syncobj { */ struct list_head cb_list; /** - * @lock: Protects &cb_list and write-locks &fence. + * @ev_fd_list: List of registered eventfd. + */ + struct list_head ev_fd_list; + /** + * @lock: Protects &cb_list and &ev_fd_list, and write-locks &fence. */ spinlock_t lock; /** diff --git a/include/drm/drm_sysfs.h b/include/drm/drm_sysfs.h index 6273cac44e47..96a5d858404b 100644 --- a/include/drm/drm_sysfs.h +++ b/include/drm/drm_sysfs.h @@ -12,6 +12,6 @@ void drm_class_device_unregister(struct device *dev); void drm_sysfs_hotplug_event(struct drm_device *dev); void drm_sysfs_connector_hotplug_event(struct drm_connector *connector); -void drm_sysfs_connector_status_event(struct drm_connector *connector, - struct drm_property *property); +void drm_sysfs_connector_property_event(struct drm_connector *connector, + struct drm_property *property); #endif diff --git a/include/drm/drm_util.h b/include/drm/drm_util.h index 79952d8c4bba..440199618620 100644 --- a/include/drm/drm_util.h +++ b/include/drm/drm_util.h @@ -36,6 +36,7 @@ #include <linux/kgdb.h> #include <linux/preempt.h> #include <linux/smp.h> +#include <linux/util_macros.h> /* * Use EXPORT_SYMBOL_FOR_TESTS_ONLY() for functions that shall @@ -48,21 +49,6 @@ #endif /** - * for_each_if - helper for handling conditionals in various for_each macros - * @condition: The condition to check - * - * Typical use:: - * - * #define for_each_foo_bar(x, y) \' - * list_for_each_entry(x, y->list, head) \' - * for_each_if(x->something == SOMETHING) - * - * The for_each_if() macro makes the use of for_each_foo_bar() less error - * prone. - */ -#define for_each_if(condition) if (!(condition)) {} else - -/** * drm_can_sleep - returns true if currently okay to sleep * * This function shall not be used in new code. diff --git a/include/drm/drm_utils.h b/include/drm/drm_utils.h index 70775748d243..6a46f755daba 100644 --- a/include/drm/drm_utils.h +++ b/include/drm/drm_utils.h @@ -12,8 +12,18 @@ #include <linux/types.h> +struct drm_edid; + int drm_get_panel_orientation_quirk(int width, int height); +struct drm_panel_backlight_quirk { + u16 min_brightness; + u32 brightness_mask; +}; + +const struct drm_panel_backlight_quirk * +drm_get_panel_backlight_quirk(const struct drm_edid *edid); + signed long drm_timeout_abs_to_jiffies(int64_t timeout_nsec); #endif diff --git a/include/drm/drm_vblank.h b/include/drm/drm_vblank.h index 733a3e2d1d10..ffa564d79638 100644 --- a/include/drm/drm_vblank.h +++ b/include/drm/drm_vblank.h @@ -25,6 +25,7 @@ #define _DRM_VBLANK_H_ #include <linux/seqlock.h> +#include <linux/hrtimer.h> #include <linux/idr.h> #include <linux/poll.h> #include <linux/kthread.h> @@ -79,6 +80,53 @@ struct drm_pending_vblank_event { }; /** + * struct drm_vblank_crtc_config - vblank configuration for a CRTC + */ +struct drm_vblank_crtc_config { + /** + * @offdelay_ms: Vblank off delay in ms, used to determine how long + * &drm_vblank_crtc.disable_timer waits before disabling. + * + * Defaults to the value of drm_vblank_offdelay in drm_crtc_vblank_on(). + */ + int offdelay_ms; + + /** + * @disable_immediate: See &drm_device.vblank_disable_immediate + * for the exact semantics of immediate vblank disabling. + * + * Additionally, this tracks the disable immediate value per crtc, just + * in case it needs to differ from the default value for a given device. + * + * Defaults to the value of &drm_device.vblank_disable_immediate in + * drm_crtc_vblank_on(). + */ + bool disable_immediate; +}; + +/** + * struct drm_vblank_crtc_timer - vblank timer for a CRTC + */ +struct drm_vblank_crtc_timer { + /** + * @timer: The vblank's high-resolution timer + */ + struct hrtimer timer; + /** + * @interval_lock: Protects @interval + */ + spinlock_t interval_lock; + /** + * @interval: Duration between two vblanks + */ + ktime_t interval; + /** + * @crtc: The timer's CRTC + */ + struct drm_crtc *crtc; +}; + +/** * struct drm_vblank_crtc - vblank tracking for a CRTC * * This structure tracks the vblank state for one CRTC. @@ -99,8 +147,8 @@ struct drm_vblank_crtc { wait_queue_head_t queue; /** * @disable_timer: Disable timer for the delayed vblank disabling - * hysteresis logic. Vblank disabling is controlled through the - * drm_vblank_offdelay module option and the setting of the + * hysteresis logic. Vblank disabling is controlled through + * &drm_vblank_crtc_config.offdelay_ms and the setting of the * &drm_device.max_vblank_count value. */ struct timer_list disable_timer; @@ -199,6 +247,12 @@ struct drm_vblank_crtc { struct drm_display_mode hwmode; /** + * @config: Stores vblank configuration values for a given CRTC. + * Also, see drm_crtc_vblank_on_config(). + */ + struct drm_vblank_crtc_config config; + + /** * @enabled: Tracks the enabling state of the corresponding &drm_crtc to * avoid double-disabling and hence corrupting saved state. Needed by * drivers not using atomic KMS, since those might go through their CRTC @@ -223,13 +277,20 @@ struct drm_vblank_crtc { * cancelled. */ wait_queue_head_t work_wait_queue; + + /** + * @vblank_timer: Holds the state of the vblank timer + */ + struct drm_vblank_crtc_timer vblank_timer; }; +struct drm_vblank_crtc *drm_crtc_vblank_crtc(struct drm_crtc *crtc); int drm_vblank_init(struct drm_device *dev, unsigned int num_crtcs); bool drm_dev_has_vblank(const struct drm_device *dev); u64 drm_crtc_vblank_count(struct drm_crtc *crtc); u64 drm_crtc_vblank_count_and_time(struct drm_crtc *crtc, ktime_t *vblanktime); +int drm_crtc_next_vblank_start(struct drm_crtc *crtc, ktime_t *vblanktime); void drm_crtc_send_vblank_event(struct drm_crtc *crtc, struct drm_pending_vblank_event *e); void drm_crtc_arm_vblank_event(struct drm_crtc *crtc, @@ -245,6 +306,8 @@ void drm_wait_one_vblank(struct drm_device *dev, unsigned int pipe); void drm_crtc_wait_one_vblank(struct drm_crtc *crtc); void drm_crtc_vblank_off(struct drm_crtc *crtc); void drm_crtc_vblank_reset(struct drm_crtc *crtc); +void drm_crtc_vblank_on_config(struct drm_crtc *crtc, + const struct drm_vblank_crtc_config *config); void drm_crtc_vblank_on(struct drm_crtc *crtc); u64 drm_crtc_accurate_vblank_count(struct drm_crtc *crtc); void drm_crtc_vblank_restore(struct drm_crtc *crtc); @@ -255,6 +318,10 @@ wait_queue_head_t *drm_crtc_vblank_waitqueue(struct drm_crtc *crtc); void drm_crtc_set_max_vblank_count(struct drm_crtc *crtc, u32 max_vblank_count); +int drm_crtc_vblank_start_timer(struct drm_crtc *crtc); +void drm_crtc_vblank_cancel_timer(struct drm_crtc *crtc); +void drm_crtc_vblank_get_vblank_timeout(struct drm_crtc *crtc, ktime_t *vblank_time); + /* * Helpers for struct drm_crtc_funcs */ diff --git a/include/drm/drm_vblank_helper.h b/include/drm/drm_vblank_helper.h new file mode 100644 index 000000000000..fcd8a9b35846 --- /dev/null +++ b/include/drm/drm_vblank_helper.h @@ -0,0 +1,56 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ + +#ifndef _DRM_VBLANK_HELPER_H_ +#define _DRM_VBLANK_HELPER_H_ + +#include <linux/hrtimer_types.h> +#include <linux/types.h> + +struct drm_atomic_state; +struct drm_crtc; + +/* + * VBLANK helpers + */ + +void drm_crtc_vblank_atomic_flush(struct drm_crtc *crtc, + struct drm_atomic_state *state); +void drm_crtc_vblank_atomic_enable(struct drm_crtc *crtc, + struct drm_atomic_state *state); +void drm_crtc_vblank_atomic_disable(struct drm_crtc *crtc, + struct drm_atomic_state *crtc_state); + +/** + * DRM_CRTC_HELPER_VBLANK_FUNCS - Default implementation for VBLANK helpers + * + * This macro initializes struct &drm_crtc_helper_funcs to default helpers + * for VBLANK handling. + */ +#define DRM_CRTC_HELPER_VBLANK_FUNCS \ + .atomic_flush = drm_crtc_vblank_atomic_flush, \ + .atomic_enable = drm_crtc_vblank_atomic_enable, \ + .atomic_disable = drm_crtc_vblank_atomic_disable + +/* + * VBLANK timer + */ + +int drm_crtc_vblank_helper_enable_vblank_timer(struct drm_crtc *crtc); +void drm_crtc_vblank_helper_disable_vblank_timer(struct drm_crtc *crtc); +bool drm_crtc_vblank_helper_get_vblank_timestamp_from_timer(struct drm_crtc *crtc, + int *max_error, + ktime_t *vblank_time, + bool in_vblank_irq); + +/** + * DRM_CRTC_VBLANK_TIMER_FUNCS - Default implementation for VBLANK timers + * + * This macro initializes struct &drm_crtc_funcs to default helpers for + * VBLANK timers. + */ +#define DRM_CRTC_VBLANK_TIMER_FUNCS \ + .enable_vblank = drm_crtc_vblank_helper_enable_vblank_timer, \ + .disable_vblank = drm_crtc_vblank_helper_disable_vblank_timer, \ + .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp_from_timer + +#endif diff --git a/include/drm/drm_vblank_work.h b/include/drm/drm_vblank_work.h index eb41d0810c4f..e04d436b7297 100644 --- a/include/drm/drm_vblank_work.h +++ b/include/drm/drm_vblank_work.h @@ -17,6 +17,7 @@ struct drm_crtc; * drm_vblank_work_init() * drm_vblank_work_cancel_sync() * drm_vblank_work_flush() + * drm_vblank_work_flush_all() */ struct drm_vblank_work { /** @@ -67,5 +68,6 @@ void drm_vblank_work_init(struct drm_vblank_work *work, struct drm_crtc *crtc, void (*func)(struct kthread_work *work)); bool drm_vblank_work_cancel_sync(struct drm_vblank_work *work); void drm_vblank_work_flush(struct drm_vblank_work *work); +void drm_vblank_work_flush_all(struct drm_crtc *crtc); #endif /* !_DRM_VBLANK_WORK_H_ */ diff --git a/include/drm/drm_writeback.h b/include/drm/drm_writeback.h index 17e576c80169..c380a7b8f55a 100644 --- a/include/drm/drm_writeback.h +++ b/include/drm/drm_writeback.h @@ -161,6 +161,12 @@ int drm_writeback_connector_init_with_encoder(struct drm_device *dev, const struct drm_connector_funcs *con_funcs, const u32 *formats, int n_formats); +int drmm_writeback_connector_init(struct drm_device *dev, + struct drm_writeback_connector *wb_connector, + const struct drm_connector_funcs *con_funcs, + struct drm_encoder *enc, + const u32 *formats, int n_formats); + int drm_writeback_set_fb(struct drm_connector_state *conn_state, struct drm_framebuffer *fb); diff --git a/include/drm/gma_drm.h b/include/drm/gma_drm.h deleted file mode 100644 index 228f43e8df89..000000000000 --- a/include/drm/gma_drm.h +++ /dev/null @@ -1,13 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/************************************************************************** - * Copyright (c) 2007-2011, Intel Corporation. - * All Rights Reserved. - * Copyright (c) 2008, Tungsten Graphics Inc. Cedar Park, TX., USA. - * All Rights Reserved. - * - **************************************************************************/ - -#ifndef _GMA_DRM_H_ -#define _GMA_DRM_H_ - -#endif diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h index 9db9e5e504ee..fb88301b3c45 100644 --- a/include/drm/gpu_scheduler.h +++ b/include/drm/gpu_scheduler.h @@ -33,14 +33,23 @@ #define MAX_WAIT_SCHED_ENTITY_Q_EMPTY msecs_to_jiffies(1000) /** - * DRM_SCHED_FENCE_DONT_PIPELINE - Prefent dependency pipelining + * DRM_SCHED_FENCE_DONT_PIPELINE - Prevent dependency pipelining * * Setting this flag on a scheduler fence prevents pipelining of jobs depending * on this fence. In other words we always insert a full CPU round trip before - * dependen jobs are pushed to the hw queue. + * dependent jobs are pushed to the hw queue. */ #define DRM_SCHED_FENCE_DONT_PIPELINE DMA_FENCE_FLAG_USER_BITS +/** + * DRM_SCHED_FENCE_FLAG_HAS_DEADLINE_BIT - A fence deadline hint has been set + * + * Because we could have a deadline hint can be set before the backing hw + * fence is created, we need to keep track of whether a deadline has already + * been set. + */ +#define DRM_SCHED_FENCE_FLAG_HAS_DEADLINE_BIT (DMA_FENCE_FLAG_USER_BITS + 1) + enum dma_resv_usage; struct dma_resv; struct drm_gem_object; @@ -48,25 +57,20 @@ struct drm_gem_object; struct drm_gpu_scheduler; struct drm_sched_rq; +struct drm_file; + /* These are often used as an (initial) index * to an array, and as such should start at 0. */ enum drm_sched_priority { - DRM_SCHED_PRIORITY_MIN, - DRM_SCHED_PRIORITY_NORMAL, - DRM_SCHED_PRIORITY_HIGH, DRM_SCHED_PRIORITY_KERNEL, + DRM_SCHED_PRIORITY_HIGH, + DRM_SCHED_PRIORITY_NORMAL, + DRM_SCHED_PRIORITY_LOW, - DRM_SCHED_PRIORITY_COUNT, - DRM_SCHED_PRIORITY_UNSET = -2 + DRM_SCHED_PRIORITY_COUNT }; -/* Used to chose between FIFO and RR jobs scheduling */ -extern int drm_sched_policy; - -#define DRM_SCHED_POLICY_RR 0 -#define DRM_SCHED_POLICY_FIFO 1 - /** * struct drm_sched_entity - A wrapper around a job queue (typically * attached to the DRM file_priv). @@ -87,13 +91,21 @@ struct drm_sched_entity { struct list_head list; /** + * @lock: + * + * Lock protecting the run-queue (@rq) to which this entity belongs, + * @priority and the list of schedulers (@sched_list, @num_sched_list). + */ + spinlock_t lock; + + /** * @rq: * * Runqueue on which this entity is currently scheduled. * * FIXME: Locking is very unclear for this. Writers are protected by - * @rq_lock, but readers are generally lockless and seem to just race - * with not even a READ_ONCE. + * @lock, but readers are generally lockless and seem to just race with + * not even a READ_ONCE. */ struct drm_sched_rq *rq; @@ -126,18 +138,11 @@ struct drm_sched_entity { * @priority: * * Priority of the entity. This can be modified by calling - * drm_sched_entity_set_priority(). Protected by &rq_lock. + * drm_sched_entity_set_priority(). Protected by @lock. */ enum drm_sched_priority priority; /** - * @rq_lock: - * - * Lock to modify the runqueue to which this entity belongs. - */ - spinlock_t rq_lock; - - /** * @job_queue: the list of jobs of this entity. */ struct spsc_queue job_queue; @@ -187,10 +192,10 @@ struct drm_sched_entity { * @last_scheduled: * * Points to the finished fence of the last scheduled job. Only written - * by the scheduler thread, can be accessed locklessly from - * drm_sched_job_arm() iff the queue is empty. + * by drm_sched_entity_pop_job(). Can be accessed locklessly from + * drm_sched_job_arm() if the queue is empty. */ - struct dma_fence *last_scheduled; + struct dma_fence __rcu *last_scheduled; /** * @last_user: last group leader pushing a job into the entity. @@ -228,33 +233,28 @@ struct drm_sched_entity { */ struct rb_node rb_tree_node; - /** - * @elapsed_ns: - * - * Records the amount of time where jobs from this entity were active - * on the GPU. - */ - uint64_t elapsed_ns; }; /** * struct drm_sched_rq - queue of entities to be scheduled. * - * @lock: to modify the entities list. * @sched: the scheduler to which this rq belongs to. - * @entities: list of the entities to be scheduled. + * @lock: protects @entities, @rb_tree_root and @current_entity. * @current_entity: the entity which is to be scheduled. - * @rb_tree_root: root of time based priory queue of entities for FIFO scheduling + * @entities: list of the entities to be scheduled. + * @rb_tree_root: root of time based priority queue of entities for FIFO scheduling * * Run queue is a set of entities scheduling command submissions for * one specific ring. It implements the scheduling policy that selects * the next entity to emit commands from. */ struct drm_sched_rq { - spinlock_t lock; struct drm_gpu_scheduler *sched; - struct list_head entities; + + spinlock_t lock; + /* Following members are protected by the @lock: */ struct drm_sched_entity *current_entity; + struct list_head entities; struct rb_root_cached rb_tree_root; }; @@ -280,6 +280,12 @@ struct drm_sched_fence { */ struct dma_fence finished; + /** + * @deadline: deadline set on &drm_sched_fence.finished which + * potentially needs to be propagated to &drm_sched_fence.parent + */ + ktime_t deadline; + /** * @parent: the fence returned by &drm_sched_backend_ops.run_job * when scheduling the job on hardware. We signal the @@ -299,6 +305,13 @@ struct drm_sched_fence { * @owner: job owner for debugging */ void *owner; + + /** + * @drm_client_id: + * + * The client_id of the drm_file which owns the job. + */ + uint64_t drm_client_id; }; struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f); @@ -311,8 +324,8 @@ struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f); * @sched: the scheduler instance on which this job is scheduled. * @s_fence: contains the fences for the scheduling of job. * @finish_cb: the callback for the finished fence. - * @work: Helper to reschdeule job kill to different context. - * @id: a unique id assigned to each job scheduled on the scheduler. + * @credits: the number of credits this job contributes to the scheduler + * @work: Helper to reschedule job kill to different context. * @karma: increment on every hang caused by this job. If this exceeds the hang * limit of the scheduler then the job is marked guilty and will not * be scheduled further. @@ -325,25 +338,45 @@ struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f); * to schedule the job. */ struct drm_sched_job { - struct spsc_node queue_node; - struct list_head list; + /** + * @submit_ts: + * + * When the job was pushed into the entity queue. + */ + ktime_t submit_ts; + + /** + * @sched: + * + * The scheduler this job is or will be scheduled on. Gets set by + * drm_sched_job_arm(). Valid until drm_sched_backend_ops.free_job() + * has finished. + */ struct drm_gpu_scheduler *sched; + struct drm_sched_fence *s_fence; + struct drm_sched_entity *entity; + + enum drm_sched_priority s_priority; + u32 credits; + /** @last_dependency: tracks @dependencies as they signal */ + unsigned int last_dependency; + atomic_t karma; + + struct spsc_node queue_node; + struct list_head list; /* * work is used only after finish_cb has been used and will not be * accessed anymore. */ union { - struct dma_fence_cb finish_cb; - struct work_struct work; + struct dma_fence_cb finish_cb; + struct work_struct work; }; - uint64_t id; - atomic_t karma; - enum drm_sched_priority s_priority; - struct drm_sched_entity *entity; struct dma_fence_cb cb; + /** * @dependencies: * @@ -352,28 +385,22 @@ struct drm_sched_job { * drm_sched_job_add_implicit_dependencies(). */ struct xarray dependencies; - - /** @last_dependency: tracks @dependencies as they signal */ - unsigned long last_dependency; - - /** - * @submit_ts: - * - * When the job was pushed into the entity queue. - */ - ktime_t submit_ts; }; -static inline bool drm_sched_invalidate_job(struct drm_sched_job *s_job, - int threshold) -{ - return s_job && atomic_inc_return(&s_job->karma) > threshold; -} - +/** + * enum drm_gpu_sched_stat - the scheduler's status + * + * @DRM_GPU_SCHED_STAT_NONE: Reserved. Do not use. + * @DRM_GPU_SCHED_STAT_RESET: The GPU hung and successfully reset. + * @DRM_GPU_SCHED_STAT_ENODEV: Error: Device is not available anymore. + * @DRM_GPU_SCHED_STAT_NO_HANG: Contrary to scheduler's assumption, the GPU + * did not hang and is still running. + */ enum drm_gpu_sched_stat { - DRM_GPU_SCHED_STAT_NONE, /* Reserve 0 */ - DRM_GPU_SCHED_STAT_NOMINAL, + DRM_GPU_SCHED_STAT_NONE, + DRM_GPU_SCHED_STAT_RESET, DRM_GPU_SCHED_STAT_ENODEV, + DRM_GPU_SCHED_STAT_NO_HANG, }; /** @@ -397,10 +424,36 @@ struct drm_sched_backend_ops { struct drm_sched_entity *s_entity); /** - * @run_job: Called to execute the job once all of the dependencies - * have been resolved. This may be called multiple times, if - * timedout_job() has happened and drm_sched_job_recovery() - * decides to try it again. + * @run_job: Called to execute the job once all of the dependencies + * have been resolved. + * + * @sched_job: the job to run + * + * The deprecated drm_sched_resubmit_jobs() (called by &struct + * drm_sched_backend_ops.timedout_job) can invoke this again with the + * same parameters. Using this is discouraged because it violates + * dma_fence rules, notably dma_fence_init() has to be called on + * already initialized fences for a second time. Moreover, this is + * dangerous because attempts to allocate memory might deadlock with + * memory management code waiting for the reset to complete. + * + * TODO: Document what drivers should do / use instead. + * + * This method is called in a workqueue context - either from the + * submit_wq the driver passed through drm_sched_init(), or, if the + * driver passed NULL, a separate, ordered workqueue the scheduler + * allocated. + * + * Note that the scheduler expects to 'inherit' its own reference to + * this fence from the callback. It does not invoke an extra + * dma_fence_get() on it. Consequently, this callback must take a + * reference for the scheduler, and additional ones for the driver's + * respective needs. + * + * Return: + * * On success: dma_fence the driver must signal once the hardware has + * completed the job ("hardware fence"). + * * On failure: NULL or an ERR_PTR. */ struct dma_fence *(*run_job)(struct drm_sched_job *sched_job); @@ -408,43 +461,52 @@ struct drm_sched_backend_ops { * @timedout_job: Called when a job has taken too long to execute, * to trigger GPU recovery. * - * This method is called in a workqueue context. + * @sched_job: The job that has timed out + * + * Drivers typically issue a reset to recover from GPU hangs. + * This procedure looks very different depending on whether a firmware + * or a hardware scheduler is being used. + * + * For a FIRMWARE SCHEDULER, each ring has one scheduler, and each + * scheduler has one entity. Hence, the steps taken typically look as + * follows: + * + * 1. Stop the scheduler using drm_sched_stop(). This will pause the + * scheduler workqueues and cancel the timeout work, guaranteeing + * that nothing is queued while the ring is being removed. + * 2. Remove the ring. The firmware will make sure that the + * corresponding parts of the hardware are resetted, and that other + * rings are not impacted. + * 3. Kill the entity and the associated scheduler. * - * Drivers typically issue a reset to recover from GPU hangs, and this - * procedure usually follows the following workflow: * - * 1. Stop the scheduler using drm_sched_stop(). This will park the - * scheduler thread and cancel the timeout work, guaranteeing that - * nothing is queued while we reset the hardware queue - * 2. Try to gracefully stop non-faulty jobs (optional) - * 3. Issue a GPU reset (driver-specific) - * 4. Re-submit jobs using drm_sched_resubmit_jobs() - * 5. Restart the scheduler using drm_sched_start(). At that point, new - * jobs can be queued, and the scheduler thread is unblocked + * For a HARDWARE SCHEDULER, a scheduler instance schedules jobs from + * one or more entities to one ring. This implies that all entities + * associated with the affected scheduler cannot be torn down, because + * this would effectively also affect innocent userspace processes which + * did not submit faulty jobs (for example). + * + * Consequently, the procedure to recover with a hardware scheduler + * should look like this: + * + * 1. Stop all schedulers impacted by the reset using drm_sched_stop(). + * 2. Kill the entity the faulty job stems from. + * 3. Issue a GPU reset on all faulty rings (driver-specific). + * 4. Re-submit jobs on all schedulers impacted by re-submitting them to + * the entities which are still alive. + * 5. Restart all schedulers that were stopped in step #1 using + * drm_sched_start(). * * Note that some GPUs have distinct hardware queues but need to reset * the GPU globally, which requires extra synchronization between the - * timeout handler of the different &drm_gpu_scheduler. One way to - * achieve this synchronization is to create an ordered workqueue - * (using alloc_ordered_workqueue()) at the driver level, and pass this - * queue to drm_sched_init(), to guarantee that timeout handlers are - * executed sequentially. The above workflow needs to be slightly - * adjusted in that case: - * - * 1. Stop all schedulers impacted by the reset using drm_sched_stop() - * 2. Try to gracefully stop non-faulty jobs on all queues impacted by - * the reset (optional) - * 3. Issue a GPU reset on all faulty queues (driver-specific) - * 4. Re-submit jobs on all schedulers impacted by the reset using - * drm_sched_resubmit_jobs() - * 5. Restart all schedulers that were stopped in step #1 using - * drm_sched_start() + * timeout handlers of different schedulers. One way to achieve this + * synchronization is to create an ordered workqueue (using + * alloc_ordered_workqueue()) at the driver level, and pass this queue + * as drm_sched_init()'s @timeout_wq parameter. This will guarantee + * that timeout handlers are executed sequentially. * - * Return DRM_GPU_SCHED_STAT_NOMINAL, when all is normal, - * and the underlying driver has started or completed recovery. + * Return: The scheduler's status, defined by &enum drm_gpu_sched_stat * - * Return DRM_GPU_SCHED_STAT_ENODEV, if the device is no longer - * available, i.e. has been unplugged. */ enum drm_gpu_sched_stat (*timedout_job)(struct drm_sched_job *sched_job); @@ -453,27 +515,47 @@ struct drm_sched_backend_ops { * and it's time to clean it up. */ void (*free_job)(struct drm_sched_job *sched_job); + + /** + * @cancel_job: Used by the scheduler to guarantee remaining jobs' fences + * get signaled in drm_sched_fini(). + * + * Used by the scheduler to cancel all jobs that have not been executed + * with &struct drm_sched_backend_ops.run_job by the time + * drm_sched_fini() gets invoked. + * + * Drivers need to signal the passed job's hardware fence with an + * appropriate error code (e.g., -ECANCELED) in this callback. They + * must not free the job. + * + * The scheduler will only call this callback once it stopped calling + * all other callbacks forever, with the exception of &struct + * drm_sched_backend_ops.free_job. + */ + void (*cancel_job)(struct drm_sched_job *sched_job); }; /** * struct drm_gpu_scheduler - scheduler instance-specific data * * @ops: backend operations provided by the driver. - * @hw_submission_limit: the max size of the hardware queue. + * @credit_limit: the credit limit of this scheduler + * @credit_count: the current credit count of this scheduler * @timeout: the time after which a job is removed from the scheduler. * @name: name of the ring for which this scheduler is being used. - * @sched_rq: priority wise array of run queues. - * @wake_up_worker: the wait queue on which the scheduler sleeps until a job - * is ready to be scheduled. - * @job_scheduled: once @drm_sched_entity_do_release is called the scheduler + * @num_rqs: Number of run-queues. This is at most DRM_SCHED_PRIORITY_COUNT, + * as there's usually one run-queue per priority, but could be less. + * @sched_rq: An allocated array of run-queues of size @num_rqs; + * @job_scheduled: once drm_sched_entity_flush() is called the scheduler * waits on this wait queue until all the scheduled jobs are * finished. - * @hw_rq_count: the number of jobs currently in the hardware queue. * @job_id_count: used to assign unique id to the each job. + * @submit_wq: workqueue used to queue @work_run_job and @work_free_job * @timeout_wq: workqueue used to queue @work_tdr + * @work_run_job: work which calls run_job op of each scheduler. + * @work_free_job: work which calls free_job op of each scheduler. * @work_tdr: schedules a delayed call to @drm_sched_job_timedout after the * timeout interval is over. - * @thread: the kthread on which the scheduler which run. * @pending_list: the list of jobs which are currently in the job queue. * @job_list_lock: lock to protect the pending_list. * @hang_limit: once the hangs by a job crosses this limit then it is marked @@ -482,23 +564,27 @@ struct drm_sched_backend_ops { * @_score: score used when the driver doesn't provide one * @ready: marks if the underlying HW is ready to work * @free_guilty: A hit to time out handler to free the guilty job. + * @pause_submit: pause queuing of @work_run_job on @submit_wq + * @own_submit_wq: scheduler owns allocation of @submit_wq * @dev: system &struct device * * One scheduler is implemented for each hardware ring. */ struct drm_gpu_scheduler { const struct drm_sched_backend_ops *ops; - uint32_t hw_submission_limit; + u32 credit_limit; + atomic_t credit_count; long timeout; const char *name; - struct drm_sched_rq sched_rq[DRM_SCHED_PRIORITY_COUNT]; - wait_queue_head_t wake_up_worker; + u32 num_rqs; + struct drm_sched_rq **sched_rq; wait_queue_head_t job_scheduled; - atomic_t hw_rq_count; atomic64_t job_id_count; + struct workqueue_struct *submit_wq; struct workqueue_struct *timeout_wq; + struct work_struct work_run_job; + struct work_struct work_free_job; struct delayed_work work_tdr; - struct task_struct *thread; struct list_head pending_list; spinlock_t job_list_lock; int hang_limit; @@ -506,52 +592,96 @@ struct drm_gpu_scheduler { atomic_t _score; bool ready; bool free_guilty; + bool pause_submit; + bool own_submit_wq; struct device *dev; }; +/** + * struct drm_sched_init_args - parameters for initializing a DRM GPU scheduler + * + * @ops: backend operations provided by the driver + * @submit_wq: workqueue to use for submission. If NULL, an ordered wq is + * allocated and used. + * @num_rqs: Number of run-queues. This may be at most DRM_SCHED_PRIORITY_COUNT, + * as there's usually one run-queue per priority, but may be less. + * @credit_limit: the number of credits this scheduler can hold from all jobs + * @hang_limit: number of times to allow a job to hang before dropping it. + * This mechanism is DEPRECATED. Set it to 0. + * @timeout: timeout value in jiffies for submitted jobs. + * @timeout_wq: workqueue to use for timeout work. If NULL, the system_wq is used. + * @score: score atomic shared with other schedulers. May be NULL. + * @name: name (typically the driver's name). Used for debugging + * @dev: associated device. Used for debugging + */ +struct drm_sched_init_args { + const struct drm_sched_backend_ops *ops; + struct workqueue_struct *submit_wq; + struct workqueue_struct *timeout_wq; + u32 num_rqs; + u32 credit_limit; + unsigned int hang_limit; + long timeout; + atomic_t *score; + const char *name; + struct device *dev; +}; + +/* Scheduler operations */ + int drm_sched_init(struct drm_gpu_scheduler *sched, - const struct drm_sched_backend_ops *ops, - uint32_t hw_submission, unsigned hang_limit, - long timeout, struct workqueue_struct *timeout_wq, - atomic_t *score, const char *name, struct device *dev); + const struct drm_sched_init_args *args); void drm_sched_fini(struct drm_gpu_scheduler *sched); + +unsigned long drm_sched_suspend_timeout(struct drm_gpu_scheduler *sched); +void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched, + unsigned long remaining); +void drm_sched_tdr_queue_imm(struct drm_gpu_scheduler *sched); +bool drm_sched_wqueue_ready(struct drm_gpu_scheduler *sched); +void drm_sched_wqueue_stop(struct drm_gpu_scheduler *sched); +void drm_sched_wqueue_start(struct drm_gpu_scheduler *sched); +void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad); +void drm_sched_start(struct drm_gpu_scheduler *sched, int errno); +void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched); +void drm_sched_fault(struct drm_gpu_scheduler *sched); + +struct drm_gpu_scheduler * +drm_sched_pick_best(struct drm_gpu_scheduler **sched_list, + unsigned int num_sched_list); + +/* Jobs */ + int drm_sched_job_init(struct drm_sched_job *job, struct drm_sched_entity *entity, - void *owner); + u32 credits, void *owner, + u64 drm_client_id); void drm_sched_job_arm(struct drm_sched_job *job); +void drm_sched_entity_push_job(struct drm_sched_job *sched_job); int drm_sched_job_add_dependency(struct drm_sched_job *job, struct dma_fence *fence); +int drm_sched_job_add_syncobj_dependency(struct drm_sched_job *job, + struct drm_file *file, + u32 handle, + u32 point); int drm_sched_job_add_resv_dependencies(struct drm_sched_job *job, struct dma_resv *resv, enum dma_resv_usage usage); int drm_sched_job_add_implicit_dependencies(struct drm_sched_job *job, struct drm_gem_object *obj, bool write); - - -void drm_sched_entity_modify_sched(struct drm_sched_entity *entity, - struct drm_gpu_scheduler **sched_list, - unsigned int num_sched_list); - +bool drm_sched_job_has_dependency(struct drm_sched_job *job, + struct dma_fence *fence); void drm_sched_job_cleanup(struct drm_sched_job *job); -void drm_sched_wakeup(struct drm_gpu_scheduler *sched); -void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad); -void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery); -void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched); void drm_sched_increase_karma(struct drm_sched_job *bad); -void drm_sched_reset_karma(struct drm_sched_job *bad); -void drm_sched_increase_karma_ext(struct drm_sched_job *bad, int type); -bool drm_sched_dependency_optimized(struct dma_fence* fence, - struct drm_sched_entity *entity); -void drm_sched_fault(struct drm_gpu_scheduler *sched); -void drm_sched_rq_add_entity(struct drm_sched_rq *rq, - struct drm_sched_entity *entity); -void drm_sched_rq_remove_entity(struct drm_sched_rq *rq, - struct drm_sched_entity *entity); +static inline bool drm_sched_invalidate_job(struct drm_sched_job *s_job, + int threshold) +{ + return s_job && atomic_inc_return(&s_job->karma) > threshold; +} -void drm_sched_rq_update_fifo(struct drm_sched_entity *entity, ktime_t ts); +/* Entities */ int drm_sched_entity_init(struct drm_sched_entity *entity, enum drm_sched_priority priority, @@ -561,27 +691,11 @@ int drm_sched_entity_init(struct drm_sched_entity *entity, long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout); void drm_sched_entity_fini(struct drm_sched_entity *entity); void drm_sched_entity_destroy(struct drm_sched_entity *entity); -void drm_sched_entity_select_rq(struct drm_sched_entity *entity); -struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity); -void drm_sched_entity_push_job(struct drm_sched_job *sched_job); void drm_sched_entity_set_priority(struct drm_sched_entity *entity, enum drm_sched_priority priority); -bool drm_sched_entity_is_ready(struct drm_sched_entity *entity); - -struct drm_sched_fence *drm_sched_fence_alloc( - struct drm_sched_entity *s_entity, void *owner); -void drm_sched_fence_init(struct drm_sched_fence *fence, - struct drm_sched_entity *entity); -void drm_sched_fence_free(struct drm_sched_fence *fence); - -void drm_sched_fence_scheduled(struct drm_sched_fence *fence); -void drm_sched_fence_finished(struct drm_sched_fence *fence); - -unsigned long drm_sched_suspend_timeout(struct drm_gpu_scheduler *sched); -void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched, - unsigned long remaining); -struct drm_gpu_scheduler * -drm_sched_pick_best(struct drm_gpu_scheduler **sched_list, - unsigned int num_sched_list); +int drm_sched_entity_error(struct drm_sched_entity *entity); +void drm_sched_entity_modify_sched(struct drm_sched_entity *entity, + struct drm_gpu_scheduler **sched_list, + unsigned int num_sched_list); #endif diff --git a/include/drm/i2c/ch7006.h b/include/drm/i2c/ch7006.h deleted file mode 100644 index 8390b437a1f8..000000000000 --- a/include/drm/i2c/ch7006.h +++ /dev/null @@ -1,86 +0,0 @@ -/* - * Copyright (C) 2009 Francisco Jerez. - * All Rights Reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sublicense, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial - * portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. - * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE - * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - * - */ - -#ifndef __DRM_I2C_CH7006_H__ -#define __DRM_I2C_CH7006_H__ - -/** - * struct ch7006_encoder_params - * - * Describes how the ch7006 is wired up with the GPU. It should be - * used as the @params parameter of its @set_config method. - * - * See "http://www.chrontel.com/pdf/7006.pdf" for their precise - * meaning. - */ -struct ch7006_encoder_params { - enum { - CH7006_FORMAT_RGB16 = 0, - CH7006_FORMAT_YCrCb24m16, - CH7006_FORMAT_RGB24m16, - CH7006_FORMAT_RGB15, - CH7006_FORMAT_RGB24m12C, - CH7006_FORMAT_RGB24m12I, - CH7006_FORMAT_RGB24m8, - CH7006_FORMAT_RGB16m8, - CH7006_FORMAT_RGB15m8, - CH7006_FORMAT_YCrCb24m8, - } input_format; - - enum { - CH7006_CLOCK_SLAVE = 0, - CH7006_CLOCK_MASTER, - } clock_mode; - - enum { - CH7006_CLOCK_EDGE_NEG = 0, - CH7006_CLOCK_EDGE_POS, - } clock_edge; - - int xcm, pcm; - - enum { - CH7006_SYNC_SLAVE = 0, - CH7006_SYNC_MASTER, - } sync_direction; - - enum { - CH7006_SYNC_SEPARATED = 0, - CH7006_SYNC_EMBEDDED, - } sync_encoding; - - enum { - CH7006_POUT_1_8V = 0, - CH7006_POUT_3_3V, - } pout_level; - - enum { - CH7006_ACTIVE_HSYNC = 0, - CH7006_ACTIVE_DSTART, - } active_detect; -}; - -#endif diff --git a/include/drm/i2c/sil164.h b/include/drm/i2c/sil164.h deleted file mode 100644 index 205e27384c83..000000000000 --- a/include/drm/i2c/sil164.h +++ /dev/null @@ -1,63 +0,0 @@ -/* - * Copyright (C) 2010 Francisco Jerez. - * All Rights Reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sublicense, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial - * portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. - * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE - * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - * - */ - -#ifndef __DRM_I2C_SIL164_H__ -#define __DRM_I2C_SIL164_H__ - -/** - * struct sil164_encoder_params - * - * Describes how the sil164 is connected to the GPU. It should be used - * as the @params parameter of its @set_config method. - * - * See "http://www.siliconimage.com/docs/SiI-DS-0021-E-164.pdf". - */ -struct sil164_encoder_params { - enum { - SIL164_INPUT_EDGE_FALLING = 0, - SIL164_INPUT_EDGE_RISING - } input_edge; - - enum { - SIL164_INPUT_WIDTH_12BIT = 0, - SIL164_INPUT_WIDTH_24BIT - } input_width; - - enum { - SIL164_INPUT_SINGLE_EDGE = 0, - SIL164_INPUT_DUAL_EDGE - } input_dual; - - enum { - SIL164_PLL_FILTER_ON = 0, - SIL164_PLL_FILTER_OFF, - } pll_filter; - - int input_skew; /** < Allowed range [-4, 3], use 0 for no de-skew. */ - int duallink_skew; /** < Allowed range [-4, 3]. */ -}; - -#endif diff --git a/include/drm/i2c/tda998x.h b/include/drm/i2c/tda998x.h deleted file mode 100644 index 3cb25ccbe5e6..000000000000 --- a/include/drm/i2c/tda998x.h +++ /dev/null @@ -1,40 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef __DRM_I2C_TDA998X_H__ -#define __DRM_I2C_TDA998X_H__ - -#include <linux/hdmi.h> -#include <dt-bindings/display/tda998x.h> - -enum { - AFMT_UNUSED = 0, - AFMT_SPDIF = TDA998x_SPDIF, - AFMT_I2S = TDA998x_I2S, -}; - -struct tda998x_audio_params { - u8 config; - u8 format; - unsigned sample_width; - unsigned sample_rate; - struct hdmi_audio_infoframe cea; - u8 status[5]; -}; - -struct tda998x_encoder_params { - u8 swap_b:3; - u8 mirr_b:1; - u8 swap_a:3; - u8 mirr_a:1; - u8 swap_d:3; - u8 mirr_d:1; - u8 swap_c:3; - u8 mirr_c:1; - u8 swap_f:3; - u8 mirr_f:1; - u8 swap_e:3; - u8 mirr_e:1; - - struct tda998x_audio_params audio_params; -}; - -#endif diff --git a/include/drm/i915_mei_hdcp_interface.h b/include/drm/i915_mei_hdcp_interface.h deleted file mode 100644 index f441cbcd95a4..000000000000 --- a/include/drm/i915_mei_hdcp_interface.h +++ /dev/null @@ -1,184 +0,0 @@ -/* SPDX-License-Identifier: (GPL-2.0+) */ -/* - * Copyright © 2017-2019 Intel Corporation - * - * Authors: - * Ramalingam C <ramalingam.c@intel.com> - */ - -#ifndef _I915_MEI_HDCP_INTERFACE_H_ -#define _I915_MEI_HDCP_INTERFACE_H_ - -#include <linux/mutex.h> -#include <linux/device.h> -#include <drm/display/drm_hdcp.h> - -/** - * enum hdcp_port_type - HDCP port implementation type defined by ME FW - * @HDCP_PORT_TYPE_INVALID: Invalid hdcp port type - * @HDCP_PORT_TYPE_INTEGRATED: In-Host HDCP2.x port - * @HDCP_PORT_TYPE_LSPCON: HDCP2.2 discrete wired Tx port with LSPCON - * (HDMI 2.0) solution - * @HDCP_PORT_TYPE_CPDP: HDCP2.2 discrete wired Tx port using the CPDP (DP 1.3) - * solution - */ -enum hdcp_port_type { - HDCP_PORT_TYPE_INVALID, - HDCP_PORT_TYPE_INTEGRATED, - HDCP_PORT_TYPE_LSPCON, - HDCP_PORT_TYPE_CPDP -}; - -/** - * enum hdcp_wired_protocol - HDCP adaptation used on the port - * @HDCP_PROTOCOL_INVALID: Invalid HDCP adaptation protocol - * @HDCP_PROTOCOL_HDMI: HDMI adaptation of HDCP used on the port - * @HDCP_PROTOCOL_DP: DP adaptation of HDCP used on the port - */ -enum hdcp_wired_protocol { - HDCP_PROTOCOL_INVALID, - HDCP_PROTOCOL_HDMI, - HDCP_PROTOCOL_DP -}; - -enum mei_fw_ddi { - MEI_DDI_INVALID_PORT = 0x0, - - MEI_DDI_B = 1, - MEI_DDI_C, - MEI_DDI_D, - MEI_DDI_E, - MEI_DDI_F, - MEI_DDI_A = 7, - MEI_DDI_RANGE_END = MEI_DDI_A, -}; - -/** - * enum mei_fw_tc - ME Firmware defined index for transcoders - * @MEI_INVALID_TRANSCODER: Index for Invalid transcoder - * @MEI_TRANSCODER_EDP: Index for EDP Transcoder - * @MEI_TRANSCODER_DSI0: Index for DSI0 Transcoder - * @MEI_TRANSCODER_DSI1: Index for DSI1 Transcoder - * @MEI_TRANSCODER_A: Index for Transcoder A - * @MEI_TRANSCODER_B: Index for Transcoder B - * @MEI_TRANSCODER_C: Index for Transcoder C - * @MEI_TRANSCODER_D: Index for Transcoder D - */ -enum mei_fw_tc { - MEI_INVALID_TRANSCODER = 0x00, - MEI_TRANSCODER_EDP, - MEI_TRANSCODER_DSI0, - MEI_TRANSCODER_DSI1, - MEI_TRANSCODER_A = 0x10, - MEI_TRANSCODER_B, - MEI_TRANSCODER_C, - MEI_TRANSCODER_D -}; - -/** - * struct hdcp_port_data - intel specific HDCP port data - * @fw_ddi: ddi index as per ME FW - * @fw_tc: transcoder index as per ME FW - * @port_type: HDCP port type as per ME FW classification - * @protocol: HDCP adaptation as per ME FW - * @k: No of streams transmitted on a port. Only on DP MST this is != 1 - * @seq_num_m: Count of RepeaterAuth_Stream_Manage msg propagated. - * Initialized to 0 on AKE_INIT. Incremented after every successful - * transmission of RepeaterAuth_Stream_Manage message. When it rolls - * over re-Auth has to be triggered. - * @streams: struct hdcp2_streamid_type[k]. Defines the type and id for the - * streams - */ -struct hdcp_port_data { - enum mei_fw_ddi fw_ddi; - enum mei_fw_tc fw_tc; - u8 port_type; - u8 protocol; - u16 k; - u32 seq_num_m; - struct hdcp2_streamid_type *streams; -}; - -/** - * struct i915_hdcp_component_ops- ops for HDCP2.2 services. - * @owner: Module providing the ops - * @initiate_hdcp2_session: Initiate a Wired HDCP2.2 Tx Session. - * And Prepare AKE_Init. - * @verify_receiver_cert_prepare_km: Verify the Receiver Certificate - * AKE_Send_Cert and prepare - AKE_Stored_Km/AKE_No_Stored_Km - * @verify_hprime: Verify AKE_Send_H_prime - * @store_pairing_info: Store pairing info received - * @initiate_locality_check: Prepare LC_Init - * @verify_lprime: Verify lprime - * @get_session_key: Prepare SKE_Send_Eks - * @repeater_check_flow_prepare_ack: Validate the Downstream topology - * and prepare rep_ack - * @verify_mprime: Verify mprime - * @enable_hdcp_authentication: Mark a port as authenticated. - * @close_hdcp_session: Close the Wired HDCP Tx session per port. - * This also disables the authenticated state of the port. - */ -struct i915_hdcp_component_ops { - /** - * @owner: mei_hdcp module - */ - struct module *owner; - - int (*initiate_hdcp2_session)(struct device *dev, - struct hdcp_port_data *data, - struct hdcp2_ake_init *ake_data); - int (*verify_receiver_cert_prepare_km)(struct device *dev, - struct hdcp_port_data *data, - struct hdcp2_ake_send_cert - *rx_cert, - bool *km_stored, - struct hdcp2_ake_no_stored_km - *ek_pub_km, - size_t *msg_sz); - int (*verify_hprime)(struct device *dev, - struct hdcp_port_data *data, - struct hdcp2_ake_send_hprime *rx_hprime); - int (*store_pairing_info)(struct device *dev, - struct hdcp_port_data *data, - struct hdcp2_ake_send_pairing_info - *pairing_info); - int (*initiate_locality_check)(struct device *dev, - struct hdcp_port_data *data, - struct hdcp2_lc_init *lc_init_data); - int (*verify_lprime)(struct device *dev, - struct hdcp_port_data *data, - struct hdcp2_lc_send_lprime *rx_lprime); - int (*get_session_key)(struct device *dev, - struct hdcp_port_data *data, - struct hdcp2_ske_send_eks *ske_data); - int (*repeater_check_flow_prepare_ack)(struct device *dev, - struct hdcp_port_data *data, - struct hdcp2_rep_send_receiverid_list - *rep_topology, - struct hdcp2_rep_send_ack - *rep_send_ack); - int (*verify_mprime)(struct device *dev, - struct hdcp_port_data *data, - struct hdcp2_rep_stream_ready *stream_ready); - int (*enable_hdcp_authentication)(struct device *dev, - struct hdcp_port_data *data); - int (*close_hdcp_session)(struct device *dev, - struct hdcp_port_data *data); -}; - -/** - * struct i915_hdcp_component_master - Used for communication between i915 - * and mei_hdcp drivers for the HDCP2.2 services - * @mei_dev: device that provide the HDCP2.2 service from MEI Bus. - * @hdcp_ops: Ops implemented by mei_hdcp driver, used by i915 driver. - */ -struct i915_hdcp_comp_master { - struct device *mei_dev; - const struct i915_hdcp_component_ops *ops; - - /* To protect the above members. */ - struct mutex mutex; -}; - -#endif /* _I915_MEI_HDCP_INTERFACE_H_ */ diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h deleted file mode 100644 index 4a4c190f7698..000000000000 --- a/include/drm/i915_pciids.h +++ /dev/null @@ -1,751 +0,0 @@ -/* - * Copyright 2013 Intel Corporation - * All Rights Reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sub license, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial portions - * of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ -#ifndef _I915_PCIIDS_H -#define _I915_PCIIDS_H - -/* - * A pci_device_id struct { - * __u32 vendor, device; - * __u32 subvendor, subdevice; - * __u32 class, class_mask; - * kernel_ulong_t driver_data; - * }; - * Don't use C99 here because "class" is reserved and we want to - * give userspace flexibility. - */ -#define INTEL_VGA_DEVICE(id, info) { \ - 0x8086, id, \ - ~0, ~0, \ - 0x030000, 0xff0000, \ - (unsigned long) info } - -#define INTEL_QUANTA_VGA_DEVICE(info) { \ - 0x8086, 0x16a, \ - 0x152d, 0x8990, \ - 0x030000, 0xff0000, \ - (unsigned long) info } - -#define INTEL_I810_IDS(info) \ - INTEL_VGA_DEVICE(0x7121, info), /* I810 */ \ - INTEL_VGA_DEVICE(0x7123, info), /* I810_DC100 */ \ - INTEL_VGA_DEVICE(0x7125, info) /* I810_E */ - -#define INTEL_I815_IDS(info) \ - INTEL_VGA_DEVICE(0x1132, info) /* I815*/ - -#define INTEL_I830_IDS(info) \ - INTEL_VGA_DEVICE(0x3577, info) - -#define INTEL_I845G_IDS(info) \ - INTEL_VGA_DEVICE(0x2562, info) - -#define INTEL_I85X_IDS(info) \ - INTEL_VGA_DEVICE(0x3582, info), /* I855_GM */ \ - INTEL_VGA_DEVICE(0x358e, info) - -#define INTEL_I865G_IDS(info) \ - INTEL_VGA_DEVICE(0x2572, info) /* I865_G */ - -#define INTEL_I915G_IDS(info) \ - INTEL_VGA_DEVICE(0x2582, info), /* I915_G */ \ - INTEL_VGA_DEVICE(0x258a, info) /* E7221_G */ - -#define INTEL_I915GM_IDS(info) \ - INTEL_VGA_DEVICE(0x2592, info) /* I915_GM */ - -#define INTEL_I945G_IDS(info) \ - INTEL_VGA_DEVICE(0x2772, info) /* I945_G */ - -#define INTEL_I945GM_IDS(info) \ - INTEL_VGA_DEVICE(0x27a2, info), /* I945_GM */ \ - INTEL_VGA_DEVICE(0x27ae, info) /* I945_GME */ - -#define INTEL_I965G_IDS(info) \ - INTEL_VGA_DEVICE(0x2972, info), /* I946_GZ */ \ - INTEL_VGA_DEVICE(0x2982, info), /* G35_G */ \ - INTEL_VGA_DEVICE(0x2992, info), /* I965_Q */ \ - INTEL_VGA_DEVICE(0x29a2, info) /* I965_G */ - -#define INTEL_G33_IDS(info) \ - INTEL_VGA_DEVICE(0x29b2, info), /* Q35_G */ \ - INTEL_VGA_DEVICE(0x29c2, info), /* G33_G */ \ - INTEL_VGA_DEVICE(0x29d2, info) /* Q33_G */ - -#define INTEL_I965GM_IDS(info) \ - INTEL_VGA_DEVICE(0x2a02, info), /* I965_GM */ \ - INTEL_VGA_DEVICE(0x2a12, info) /* I965_GME */ - -#define INTEL_GM45_IDS(info) \ - INTEL_VGA_DEVICE(0x2a42, info) /* GM45_G */ - -#define INTEL_G45_IDS(info) \ - INTEL_VGA_DEVICE(0x2e02, info), /* IGD_E_G */ \ - INTEL_VGA_DEVICE(0x2e12, info), /* Q45_G */ \ - INTEL_VGA_DEVICE(0x2e22, info), /* G45_G */ \ - INTEL_VGA_DEVICE(0x2e32, info), /* G41_G */ \ - INTEL_VGA_DEVICE(0x2e42, info), /* B43_G */ \ - INTEL_VGA_DEVICE(0x2e92, info) /* B43_G.1 */ - -#define INTEL_PINEVIEW_G_IDS(info) \ - INTEL_VGA_DEVICE(0xa001, info) - -#define INTEL_PINEVIEW_M_IDS(info) \ - INTEL_VGA_DEVICE(0xa011, info) - -#define INTEL_IRONLAKE_D_IDS(info) \ - INTEL_VGA_DEVICE(0x0042, info) - -#define INTEL_IRONLAKE_M_IDS(info) \ - INTEL_VGA_DEVICE(0x0046, info) - -#define INTEL_SNB_D_GT1_IDS(info) \ - INTEL_VGA_DEVICE(0x0102, info), \ - INTEL_VGA_DEVICE(0x010A, info) - -#define INTEL_SNB_D_GT2_IDS(info) \ - INTEL_VGA_DEVICE(0x0112, info), \ - INTEL_VGA_DEVICE(0x0122, info) - -#define INTEL_SNB_D_IDS(info) \ - INTEL_SNB_D_GT1_IDS(info), \ - INTEL_SNB_D_GT2_IDS(info) - -#define INTEL_SNB_M_GT1_IDS(info) \ - INTEL_VGA_DEVICE(0x0106, info) - -#define INTEL_SNB_M_GT2_IDS(info) \ - INTEL_VGA_DEVICE(0x0116, info), \ - INTEL_VGA_DEVICE(0x0126, info) - -#define INTEL_SNB_M_IDS(info) \ - INTEL_SNB_M_GT1_IDS(info), \ - INTEL_SNB_M_GT2_IDS(info) - -#define INTEL_IVB_M_GT1_IDS(info) \ - INTEL_VGA_DEVICE(0x0156, info) /* GT1 mobile */ - -#define INTEL_IVB_M_GT2_IDS(info) \ - INTEL_VGA_DEVICE(0x0166, info) /* GT2 mobile */ - -#define INTEL_IVB_M_IDS(info) \ - INTEL_IVB_M_GT1_IDS(info), \ - INTEL_IVB_M_GT2_IDS(info) - -#define INTEL_IVB_D_GT1_IDS(info) \ - INTEL_VGA_DEVICE(0x0152, info), /* GT1 desktop */ \ - INTEL_VGA_DEVICE(0x015a, info) /* GT1 server */ - -#define INTEL_IVB_D_GT2_IDS(info) \ - INTEL_VGA_DEVICE(0x0162, info), /* GT2 desktop */ \ - INTEL_VGA_DEVICE(0x016a, info) /* GT2 server */ - -#define INTEL_IVB_D_IDS(info) \ - INTEL_IVB_D_GT1_IDS(info), \ - INTEL_IVB_D_GT2_IDS(info) - -#define INTEL_IVB_Q_IDS(info) \ - INTEL_QUANTA_VGA_DEVICE(info) /* Quanta transcode */ - -#define INTEL_HSW_ULT_GT1_IDS(info) \ - INTEL_VGA_DEVICE(0x0A02, info), /* ULT GT1 desktop */ \ - INTEL_VGA_DEVICE(0x0A06, info), /* ULT GT1 mobile */ \ - INTEL_VGA_DEVICE(0x0A0A, info), /* ULT GT1 server */ \ - INTEL_VGA_DEVICE(0x0A0B, info) /* ULT GT1 reserved */ - -#define INTEL_HSW_ULX_GT1_IDS(info) \ - INTEL_VGA_DEVICE(0x0A0E, info) /* ULX GT1 mobile */ - -#define INTEL_HSW_GT1_IDS(info) \ - INTEL_HSW_ULT_GT1_IDS(info), \ - INTEL_HSW_ULX_GT1_IDS(info), \ - INTEL_VGA_DEVICE(0x0402, info), /* GT1 desktop */ \ - INTEL_VGA_DEVICE(0x0406, info), /* GT1 mobile */ \ - INTEL_VGA_DEVICE(0x040A, info), /* GT1 server */ \ - INTEL_VGA_DEVICE(0x040B, info), /* GT1 reserved */ \ - INTEL_VGA_DEVICE(0x040E, info), /* GT1 reserved */ \ - INTEL_VGA_DEVICE(0x0C02, info), /* SDV GT1 desktop */ \ - INTEL_VGA_DEVICE(0x0C06, info), /* SDV GT1 mobile */ \ - INTEL_VGA_DEVICE(0x0C0A, info), /* SDV GT1 server */ \ - INTEL_VGA_DEVICE(0x0C0B, info), /* SDV GT1 reserved */ \ - INTEL_VGA_DEVICE(0x0C0E, info), /* SDV GT1 reserved */ \ - INTEL_VGA_DEVICE(0x0D02, info), /* CRW GT1 desktop */ \ - INTEL_VGA_DEVICE(0x0D06, info), /* CRW GT1 mobile */ \ - INTEL_VGA_DEVICE(0x0D0A, info), /* CRW GT1 server */ \ - INTEL_VGA_DEVICE(0x0D0B, info), /* CRW GT1 reserved */ \ - INTEL_VGA_DEVICE(0x0D0E, info) /* CRW GT1 reserved */ - -#define INTEL_HSW_ULT_GT2_IDS(info) \ - INTEL_VGA_DEVICE(0x0A12, info), /* ULT GT2 desktop */ \ - INTEL_VGA_DEVICE(0x0A16, info), /* ULT GT2 mobile */ \ - INTEL_VGA_DEVICE(0x0A1A, info), /* ULT GT2 server */ \ - INTEL_VGA_DEVICE(0x0A1B, info) /* ULT GT2 reserved */ \ - -#define INTEL_HSW_ULX_GT2_IDS(info) \ - INTEL_VGA_DEVICE(0x0A1E, info) /* ULX GT2 mobile */ \ - -#define INTEL_HSW_GT2_IDS(info) \ - INTEL_HSW_ULT_GT2_IDS(info), \ - INTEL_HSW_ULX_GT2_IDS(info), \ - INTEL_VGA_DEVICE(0x0412, info), /* GT2 desktop */ \ - INTEL_VGA_DEVICE(0x0416, info), /* GT2 mobile */ \ - INTEL_VGA_DEVICE(0x041A, info), /* GT2 server */ \ - INTEL_VGA_DEVICE(0x041B, info), /* GT2 reserved */ \ - INTEL_VGA_DEVICE(0x041E, info), /* GT2 reserved */ \ - INTEL_VGA_DEVICE(0x0C12, info), /* SDV GT2 desktop */ \ - INTEL_VGA_DEVICE(0x0C16, info), /* SDV GT2 mobile */ \ - INTEL_VGA_DEVICE(0x0C1A, info), /* SDV GT2 server */ \ - INTEL_VGA_DEVICE(0x0C1B, info), /* SDV GT2 reserved */ \ - INTEL_VGA_DEVICE(0x0C1E, info), /* SDV GT2 reserved */ \ - INTEL_VGA_DEVICE(0x0D12, info), /* CRW GT2 desktop */ \ - INTEL_VGA_DEVICE(0x0D16, info), /* CRW GT2 mobile */ \ - INTEL_VGA_DEVICE(0x0D1A, info), /* CRW GT2 server */ \ - INTEL_VGA_DEVICE(0x0D1B, info), /* CRW GT2 reserved */ \ - INTEL_VGA_DEVICE(0x0D1E, info) /* CRW GT2 reserved */ - -#define INTEL_HSW_ULT_GT3_IDS(info) \ - INTEL_VGA_DEVICE(0x0A22, info), /* ULT GT3 desktop */ \ - INTEL_VGA_DEVICE(0x0A26, info), /* ULT GT3 mobile */ \ - INTEL_VGA_DEVICE(0x0A2A, info), /* ULT GT3 server */ \ - INTEL_VGA_DEVICE(0x0A2B, info), /* ULT GT3 reserved */ \ - INTEL_VGA_DEVICE(0x0A2E, info) /* ULT GT3 reserved */ - -#define INTEL_HSW_GT3_IDS(info) \ - INTEL_HSW_ULT_GT3_IDS(info), \ - INTEL_VGA_DEVICE(0x0422, info), /* GT3 desktop */ \ - INTEL_VGA_DEVICE(0x0426, info), /* GT3 mobile */ \ - INTEL_VGA_DEVICE(0x042A, info), /* GT3 server */ \ - INTEL_VGA_DEVICE(0x042B, info), /* GT3 reserved */ \ - INTEL_VGA_DEVICE(0x042E, info), /* GT3 reserved */ \ - INTEL_VGA_DEVICE(0x0C22, info), /* SDV GT3 desktop */ \ - INTEL_VGA_DEVICE(0x0C26, info), /* SDV GT3 mobile */ \ - INTEL_VGA_DEVICE(0x0C2A, info), /* SDV GT3 server */ \ - INTEL_VGA_DEVICE(0x0C2B, info), /* SDV GT3 reserved */ \ - INTEL_VGA_DEVICE(0x0C2E, info), /* SDV GT3 reserved */ \ - INTEL_VGA_DEVICE(0x0D22, info), /* CRW GT3 desktop */ \ - INTEL_VGA_DEVICE(0x0D26, info), /* CRW GT3 mobile */ \ - INTEL_VGA_DEVICE(0x0D2A, info), /* CRW GT3 server */ \ - INTEL_VGA_DEVICE(0x0D2B, info), /* CRW GT3 reserved */ \ - INTEL_VGA_DEVICE(0x0D2E, info) /* CRW GT3 reserved */ - -#define INTEL_HSW_IDS(info) \ - INTEL_HSW_GT1_IDS(info), \ - INTEL_HSW_GT2_IDS(info), \ - INTEL_HSW_GT3_IDS(info) - -#define INTEL_VLV_IDS(info) \ - INTEL_VGA_DEVICE(0x0f30, info), \ - INTEL_VGA_DEVICE(0x0f31, info), \ - INTEL_VGA_DEVICE(0x0f32, info), \ - INTEL_VGA_DEVICE(0x0f33, info) - -#define INTEL_BDW_ULT_GT1_IDS(info) \ - INTEL_VGA_DEVICE(0x1606, info), /* GT1 ULT */ \ - INTEL_VGA_DEVICE(0x160B, info) /* GT1 Iris */ - -#define INTEL_BDW_ULX_GT1_IDS(info) \ - INTEL_VGA_DEVICE(0x160E, info) /* GT1 ULX */ - -#define INTEL_BDW_GT1_IDS(info) \ - INTEL_BDW_ULT_GT1_IDS(info), \ - INTEL_BDW_ULX_GT1_IDS(info), \ - INTEL_VGA_DEVICE(0x1602, info), /* GT1 ULT */ \ - INTEL_VGA_DEVICE(0x160A, info), /* GT1 Server */ \ - INTEL_VGA_DEVICE(0x160D, info) /* GT1 Workstation */ - -#define INTEL_BDW_ULT_GT2_IDS(info) \ - INTEL_VGA_DEVICE(0x1616, info), /* GT2 ULT */ \ - INTEL_VGA_DEVICE(0x161B, info) /* GT2 ULT */ - -#define INTEL_BDW_ULX_GT2_IDS(info) \ - INTEL_VGA_DEVICE(0x161E, info) /* GT2 ULX */ - -#define INTEL_BDW_GT2_IDS(info) \ - INTEL_BDW_ULT_GT2_IDS(info), \ - INTEL_BDW_ULX_GT2_IDS(info), \ - INTEL_VGA_DEVICE(0x1612, info), /* GT2 Halo */ \ - INTEL_VGA_DEVICE(0x161A, info), /* GT2 Server */ \ - INTEL_VGA_DEVICE(0x161D, info) /* GT2 Workstation */ - -#define INTEL_BDW_ULT_GT3_IDS(info) \ - INTEL_VGA_DEVICE(0x1626, info), /* ULT */ \ - INTEL_VGA_DEVICE(0x162B, info) /* Iris */ \ - -#define INTEL_BDW_ULX_GT3_IDS(info) \ - INTEL_VGA_DEVICE(0x162E, info) /* ULX */ - -#define INTEL_BDW_GT3_IDS(info) \ - INTEL_BDW_ULT_GT3_IDS(info), \ - INTEL_BDW_ULX_GT3_IDS(info), \ - INTEL_VGA_DEVICE(0x1622, info), /* ULT */ \ - INTEL_VGA_DEVICE(0x162A, info), /* Server */ \ - INTEL_VGA_DEVICE(0x162D, info) /* Workstation */ - -#define INTEL_BDW_ULT_RSVD_IDS(info) \ - INTEL_VGA_DEVICE(0x1636, info), /* ULT */ \ - INTEL_VGA_DEVICE(0x163B, info) /* Iris */ - -#define INTEL_BDW_ULX_RSVD_IDS(info) \ - INTEL_VGA_DEVICE(0x163E, info) /* ULX */ - -#define INTEL_BDW_RSVD_IDS(info) \ - INTEL_BDW_ULT_RSVD_IDS(info), \ - INTEL_BDW_ULX_RSVD_IDS(info), \ - INTEL_VGA_DEVICE(0x1632, info), /* ULT */ \ - INTEL_VGA_DEVICE(0x163A, info), /* Server */ \ - INTEL_VGA_DEVICE(0x163D, info) /* Workstation */ - -#define INTEL_BDW_IDS(info) \ - INTEL_BDW_GT1_IDS(info), \ - INTEL_BDW_GT2_IDS(info), \ - INTEL_BDW_GT3_IDS(info), \ - INTEL_BDW_RSVD_IDS(info) - -#define INTEL_CHV_IDS(info) \ - INTEL_VGA_DEVICE(0x22b0, info), \ - INTEL_VGA_DEVICE(0x22b1, info), \ - INTEL_VGA_DEVICE(0x22b2, info), \ - INTEL_VGA_DEVICE(0x22b3, info) - -#define INTEL_SKL_ULT_GT1_IDS(info) \ - INTEL_VGA_DEVICE(0x1906, info), /* ULT GT1 */ \ - INTEL_VGA_DEVICE(0x1913, info) /* ULT GT1.5 */ - -#define INTEL_SKL_ULX_GT1_IDS(info) \ - INTEL_VGA_DEVICE(0x190E, info), /* ULX GT1 */ \ - INTEL_VGA_DEVICE(0x1915, info) /* ULX GT1.5 */ - -#define INTEL_SKL_GT1_IDS(info) \ - INTEL_SKL_ULT_GT1_IDS(info), \ - INTEL_SKL_ULX_GT1_IDS(info), \ - INTEL_VGA_DEVICE(0x1902, info), /* DT GT1 */ \ - INTEL_VGA_DEVICE(0x190A, info), /* SRV GT1 */ \ - INTEL_VGA_DEVICE(0x190B, info), /* Halo GT1 */ \ - INTEL_VGA_DEVICE(0x1917, info) /* DT GT1.5 */ - -#define INTEL_SKL_ULT_GT2_IDS(info) \ - INTEL_VGA_DEVICE(0x1916, info), /* ULT GT2 */ \ - INTEL_VGA_DEVICE(0x1921, info) /* ULT GT2F */ - -#define INTEL_SKL_ULX_GT2_IDS(info) \ - INTEL_VGA_DEVICE(0x191E, info) /* ULX GT2 */ - -#define INTEL_SKL_GT2_IDS(info) \ - INTEL_SKL_ULT_GT2_IDS(info), \ - INTEL_SKL_ULX_GT2_IDS(info), \ - INTEL_VGA_DEVICE(0x1912, info), /* DT GT2 */ \ - INTEL_VGA_DEVICE(0x191A, info), /* SRV GT2 */ \ - INTEL_VGA_DEVICE(0x191B, info), /* Halo GT2 */ \ - INTEL_VGA_DEVICE(0x191D, info) /* WKS GT2 */ - -#define INTEL_SKL_ULT_GT3_IDS(info) \ - INTEL_VGA_DEVICE(0x1923, info), /* ULT GT3 */ \ - INTEL_VGA_DEVICE(0x1926, info), /* ULT GT3e */ \ - INTEL_VGA_DEVICE(0x1927, info) /* ULT GT3e */ - -#define INTEL_SKL_GT3_IDS(info) \ - INTEL_SKL_ULT_GT3_IDS(info), \ - INTEL_VGA_DEVICE(0x192A, info), /* SRV GT3 */ \ - INTEL_VGA_DEVICE(0x192B, info), /* Halo GT3e */ \ - INTEL_VGA_DEVICE(0x192D, info) /* SRV GT3e */ - -#define INTEL_SKL_GT4_IDS(info) \ - INTEL_VGA_DEVICE(0x1932, info), /* DT GT4 */ \ - INTEL_VGA_DEVICE(0x193A, info), /* SRV GT4e */ \ - INTEL_VGA_DEVICE(0x193B, info), /* Halo GT4e */ \ - INTEL_VGA_DEVICE(0x193D, info) /* WKS GT4e */ - -#define INTEL_SKL_IDS(info) \ - INTEL_SKL_GT1_IDS(info), \ - INTEL_SKL_GT2_IDS(info), \ - INTEL_SKL_GT3_IDS(info), \ - INTEL_SKL_GT4_IDS(info) - -#define INTEL_BXT_IDS(info) \ - INTEL_VGA_DEVICE(0x0A84, info), \ - INTEL_VGA_DEVICE(0x1A84, info), \ - INTEL_VGA_DEVICE(0x1A85, info), \ - INTEL_VGA_DEVICE(0x5A84, info), /* APL HD Graphics 505 */ \ - INTEL_VGA_DEVICE(0x5A85, info) /* APL HD Graphics 500 */ - -#define INTEL_GLK_IDS(info) \ - INTEL_VGA_DEVICE(0x3184, info), \ - INTEL_VGA_DEVICE(0x3185, info) - -#define INTEL_KBL_ULT_GT1_IDS(info) \ - INTEL_VGA_DEVICE(0x5906, info), /* ULT GT1 */ \ - INTEL_VGA_DEVICE(0x5913, info) /* ULT GT1.5 */ - -#define INTEL_KBL_ULX_GT1_IDS(info) \ - INTEL_VGA_DEVICE(0x590E, info), /* ULX GT1 */ \ - INTEL_VGA_DEVICE(0x5915, info) /* ULX GT1.5 */ - -#define INTEL_KBL_GT1_IDS(info) \ - INTEL_KBL_ULT_GT1_IDS(info), \ - INTEL_KBL_ULX_GT1_IDS(info), \ - INTEL_VGA_DEVICE(0x5902, info), /* DT GT1 */ \ - INTEL_VGA_DEVICE(0x5908, info), /* Halo GT1 */ \ - INTEL_VGA_DEVICE(0x590A, info), /* SRV GT1 */ \ - INTEL_VGA_DEVICE(0x590B, info) /* Halo GT1 */ - -#define INTEL_KBL_ULT_GT2_IDS(info) \ - INTEL_VGA_DEVICE(0x5916, info), /* ULT GT2 */ \ - INTEL_VGA_DEVICE(0x5921, info) /* ULT GT2F */ - -#define INTEL_KBL_ULX_GT2_IDS(info) \ - INTEL_VGA_DEVICE(0x591E, info) /* ULX GT2 */ - -#define INTEL_KBL_GT2_IDS(info) \ - INTEL_KBL_ULT_GT2_IDS(info), \ - INTEL_KBL_ULX_GT2_IDS(info), \ - INTEL_VGA_DEVICE(0x5912, info), /* DT GT2 */ \ - INTEL_VGA_DEVICE(0x5917, info), /* Mobile GT2 */ \ - INTEL_VGA_DEVICE(0x591A, info), /* SRV GT2 */ \ - INTEL_VGA_DEVICE(0x591B, info), /* Halo GT2 */ \ - INTEL_VGA_DEVICE(0x591D, info) /* WKS GT2 */ - -#define INTEL_KBL_ULT_GT3_IDS(info) \ - INTEL_VGA_DEVICE(0x5926, info) /* ULT GT3 */ - -#define INTEL_KBL_GT3_IDS(info) \ - INTEL_KBL_ULT_GT3_IDS(info), \ - INTEL_VGA_DEVICE(0x5923, info), /* ULT GT3 */ \ - INTEL_VGA_DEVICE(0x5927, info) /* ULT GT3 */ - -#define INTEL_KBL_GT4_IDS(info) \ - INTEL_VGA_DEVICE(0x593B, info) /* Halo GT4 */ - -/* AML/KBL Y GT2 */ -#define INTEL_AML_KBL_GT2_IDS(info) \ - INTEL_VGA_DEVICE(0x591C, info), /* ULX GT2 */ \ - INTEL_VGA_DEVICE(0x87C0, info) /* ULX GT2 */ - -/* AML/CFL Y GT2 */ -#define INTEL_AML_CFL_GT2_IDS(info) \ - INTEL_VGA_DEVICE(0x87CA, info) - -/* CML GT1 */ -#define INTEL_CML_GT1_IDS(info) \ - INTEL_VGA_DEVICE(0x9BA2, info), \ - INTEL_VGA_DEVICE(0x9BA4, info), \ - INTEL_VGA_DEVICE(0x9BA5, info), \ - INTEL_VGA_DEVICE(0x9BA8, info) - -#define INTEL_CML_U_GT1_IDS(info) \ - INTEL_VGA_DEVICE(0x9B21, info), \ - INTEL_VGA_DEVICE(0x9BAA, info), \ - INTEL_VGA_DEVICE(0x9BAC, info) - -/* CML GT2 */ -#define INTEL_CML_GT2_IDS(info) \ - INTEL_VGA_DEVICE(0x9BC2, info), \ - INTEL_VGA_DEVICE(0x9BC4, info), \ - INTEL_VGA_DEVICE(0x9BC5, info), \ - INTEL_VGA_DEVICE(0x9BC6, info), \ - INTEL_VGA_DEVICE(0x9BC8, info), \ - INTEL_VGA_DEVICE(0x9BE6, info), \ - INTEL_VGA_DEVICE(0x9BF6, info) - -#define INTEL_CML_U_GT2_IDS(info) \ - INTEL_VGA_DEVICE(0x9B41, info), \ - INTEL_VGA_DEVICE(0x9BCA, info), \ - INTEL_VGA_DEVICE(0x9BCC, info) - -#define INTEL_KBL_IDS(info) \ - INTEL_KBL_GT1_IDS(info), \ - INTEL_KBL_GT2_IDS(info), \ - INTEL_KBL_GT3_IDS(info), \ - INTEL_KBL_GT4_IDS(info), \ - INTEL_AML_KBL_GT2_IDS(info) - -/* CFL S */ -#define INTEL_CFL_S_GT1_IDS(info) \ - INTEL_VGA_DEVICE(0x3E90, info), /* SRV GT1 */ \ - INTEL_VGA_DEVICE(0x3E93, info), /* SRV GT1 */ \ - INTEL_VGA_DEVICE(0x3E99, info) /* SRV GT1 */ - -#define INTEL_CFL_S_GT2_IDS(info) \ - INTEL_VGA_DEVICE(0x3E91, info), /* SRV GT2 */ \ - INTEL_VGA_DEVICE(0x3E92, info), /* SRV GT2 */ \ - INTEL_VGA_DEVICE(0x3E96, info), /* SRV GT2 */ \ - INTEL_VGA_DEVICE(0x3E98, info), /* SRV GT2 */ \ - INTEL_VGA_DEVICE(0x3E9A, info) /* SRV GT2 */ - -/* CFL H */ -#define INTEL_CFL_H_GT1_IDS(info) \ - INTEL_VGA_DEVICE(0x3E9C, info) - -#define INTEL_CFL_H_GT2_IDS(info) \ - INTEL_VGA_DEVICE(0x3E94, info), /* Halo GT2 */ \ - INTEL_VGA_DEVICE(0x3E9B, info) /* Halo GT2 */ - -/* CFL U GT2 */ -#define INTEL_CFL_U_GT2_IDS(info) \ - INTEL_VGA_DEVICE(0x3EA9, info) - -/* CFL U GT3 */ -#define INTEL_CFL_U_GT3_IDS(info) \ - INTEL_VGA_DEVICE(0x3EA5, info), /* ULT GT3 */ \ - INTEL_VGA_DEVICE(0x3EA6, info), /* ULT GT3 */ \ - INTEL_VGA_DEVICE(0x3EA7, info), /* ULT GT3 */ \ - INTEL_VGA_DEVICE(0x3EA8, info) /* ULT GT3 */ - -/* WHL/CFL U GT1 */ -#define INTEL_WHL_U_GT1_IDS(info) \ - INTEL_VGA_DEVICE(0x3EA1, info), \ - INTEL_VGA_DEVICE(0x3EA4, info) - -/* WHL/CFL U GT2 */ -#define INTEL_WHL_U_GT2_IDS(info) \ - INTEL_VGA_DEVICE(0x3EA0, info), \ - INTEL_VGA_DEVICE(0x3EA3, info) - -/* WHL/CFL U GT3 */ -#define INTEL_WHL_U_GT3_IDS(info) \ - INTEL_VGA_DEVICE(0x3EA2, info) - -#define INTEL_CFL_IDS(info) \ - INTEL_CFL_S_GT1_IDS(info), \ - INTEL_CFL_S_GT2_IDS(info), \ - INTEL_CFL_H_GT1_IDS(info), \ - INTEL_CFL_H_GT2_IDS(info), \ - INTEL_CFL_U_GT2_IDS(info), \ - INTEL_CFL_U_GT3_IDS(info), \ - INTEL_WHL_U_GT1_IDS(info), \ - INTEL_WHL_U_GT2_IDS(info), \ - INTEL_WHL_U_GT3_IDS(info), \ - INTEL_AML_CFL_GT2_IDS(info), \ - INTEL_CML_GT1_IDS(info), \ - INTEL_CML_GT2_IDS(info), \ - INTEL_CML_U_GT1_IDS(info), \ - INTEL_CML_U_GT2_IDS(info) - -/* CNL */ -#define INTEL_CNL_PORT_F_IDS(info) \ - INTEL_VGA_DEVICE(0x5A44, info), \ - INTEL_VGA_DEVICE(0x5A4C, info), \ - INTEL_VGA_DEVICE(0x5A54, info), \ - INTEL_VGA_DEVICE(0x5A5C, info) - -#define INTEL_CNL_IDS(info) \ - INTEL_CNL_PORT_F_IDS(info), \ - INTEL_VGA_DEVICE(0x5A40, info), \ - INTEL_VGA_DEVICE(0x5A41, info), \ - INTEL_VGA_DEVICE(0x5A42, info), \ - INTEL_VGA_DEVICE(0x5A49, info), \ - INTEL_VGA_DEVICE(0x5A4A, info), \ - INTEL_VGA_DEVICE(0x5A50, info), \ - INTEL_VGA_DEVICE(0x5A51, info), \ - INTEL_VGA_DEVICE(0x5A52, info), \ - INTEL_VGA_DEVICE(0x5A59, info), \ - INTEL_VGA_DEVICE(0x5A5A, info) - -/* ICL */ -#define INTEL_ICL_PORT_F_IDS(info) \ - INTEL_VGA_DEVICE(0x8A50, info), \ - INTEL_VGA_DEVICE(0x8A52, info), \ - INTEL_VGA_DEVICE(0x8A53, info), \ - INTEL_VGA_DEVICE(0x8A54, info), \ - INTEL_VGA_DEVICE(0x8A56, info), \ - INTEL_VGA_DEVICE(0x8A57, info), \ - INTEL_VGA_DEVICE(0x8A58, info), \ - INTEL_VGA_DEVICE(0x8A59, info), \ - INTEL_VGA_DEVICE(0x8A5A, info), \ - INTEL_VGA_DEVICE(0x8A5B, info), \ - INTEL_VGA_DEVICE(0x8A5C, info), \ - INTEL_VGA_DEVICE(0x8A70, info), \ - INTEL_VGA_DEVICE(0x8A71, info) - -#define INTEL_ICL_11_IDS(info) \ - INTEL_ICL_PORT_F_IDS(info), \ - INTEL_VGA_DEVICE(0x8A51, info), \ - INTEL_VGA_DEVICE(0x8A5D, info) - -/* EHL */ -#define INTEL_EHL_IDS(info) \ - INTEL_VGA_DEVICE(0x4541, info), \ - INTEL_VGA_DEVICE(0x4551, info), \ - INTEL_VGA_DEVICE(0x4555, info), \ - INTEL_VGA_DEVICE(0x4557, info), \ - INTEL_VGA_DEVICE(0x4571, info) - -/* JSL */ -#define INTEL_JSL_IDS(info) \ - INTEL_VGA_DEVICE(0x4E51, info), \ - INTEL_VGA_DEVICE(0x4E55, info), \ - INTEL_VGA_DEVICE(0x4E57, info), \ - INTEL_VGA_DEVICE(0x4E61, info), \ - INTEL_VGA_DEVICE(0x4E71, info) - -/* TGL */ -#define INTEL_TGL_12_GT1_IDS(info) \ - INTEL_VGA_DEVICE(0x9A60, info), \ - INTEL_VGA_DEVICE(0x9A68, info), \ - INTEL_VGA_DEVICE(0x9A70, info) - -#define INTEL_TGL_12_GT2_IDS(info) \ - INTEL_VGA_DEVICE(0x9A40, info), \ - INTEL_VGA_DEVICE(0x9A49, info), \ - INTEL_VGA_DEVICE(0x9A59, info), \ - INTEL_VGA_DEVICE(0x9A78, info), \ - INTEL_VGA_DEVICE(0x9AC0, info), \ - INTEL_VGA_DEVICE(0x9AC9, info), \ - INTEL_VGA_DEVICE(0x9AD9, info), \ - INTEL_VGA_DEVICE(0x9AF8, info) - -#define INTEL_TGL_12_IDS(info) \ - INTEL_TGL_12_GT1_IDS(info), \ - INTEL_TGL_12_GT2_IDS(info) - -/* RKL */ -#define INTEL_RKL_IDS(info) \ - INTEL_VGA_DEVICE(0x4C80, info), \ - INTEL_VGA_DEVICE(0x4C8A, info), \ - INTEL_VGA_DEVICE(0x4C8B, info), \ - INTEL_VGA_DEVICE(0x4C8C, info), \ - INTEL_VGA_DEVICE(0x4C90, info), \ - INTEL_VGA_DEVICE(0x4C9A, info) - -/* DG1 */ -#define INTEL_DG1_IDS(info) \ - INTEL_VGA_DEVICE(0x4905, info), \ - INTEL_VGA_DEVICE(0x4906, info), \ - INTEL_VGA_DEVICE(0x4907, info), \ - INTEL_VGA_DEVICE(0x4908, info), \ - INTEL_VGA_DEVICE(0x4909, info) - -/* ADL-S */ -#define INTEL_ADLS_IDS(info) \ - INTEL_VGA_DEVICE(0x4680, info), \ - INTEL_VGA_DEVICE(0x4682, info), \ - INTEL_VGA_DEVICE(0x4688, info), \ - INTEL_VGA_DEVICE(0x468A, info), \ - INTEL_VGA_DEVICE(0x468B, info), \ - INTEL_VGA_DEVICE(0x4690, info), \ - INTEL_VGA_DEVICE(0x4692, info), \ - INTEL_VGA_DEVICE(0x4693, info) - -/* ADL-P */ -#define INTEL_ADLP_IDS(info) \ - INTEL_VGA_DEVICE(0x46A0, info), \ - INTEL_VGA_DEVICE(0x46A1, info), \ - INTEL_VGA_DEVICE(0x46A2, info), \ - INTEL_VGA_DEVICE(0x46A3, info), \ - INTEL_VGA_DEVICE(0x46A6, info), \ - INTEL_VGA_DEVICE(0x46A8, info), \ - INTEL_VGA_DEVICE(0x46AA, info), \ - INTEL_VGA_DEVICE(0x462A, info), \ - INTEL_VGA_DEVICE(0x4626, info), \ - INTEL_VGA_DEVICE(0x4628, info), \ - INTEL_VGA_DEVICE(0x46B0, info), \ - INTEL_VGA_DEVICE(0x46B1, info), \ - INTEL_VGA_DEVICE(0x46B2, info), \ - INTEL_VGA_DEVICE(0x46B3, info), \ - INTEL_VGA_DEVICE(0x46C0, info), \ - INTEL_VGA_DEVICE(0x46C1, info), \ - INTEL_VGA_DEVICE(0x46C2, info), \ - INTEL_VGA_DEVICE(0x46C3, info) - -/* ADL-N */ -#define INTEL_ADLN_IDS(info) \ - INTEL_VGA_DEVICE(0x46D0, info), \ - INTEL_VGA_DEVICE(0x46D1, info), \ - INTEL_VGA_DEVICE(0x46D2, info) - -/* RPL-S */ -#define INTEL_RPLS_IDS(info) \ - INTEL_VGA_DEVICE(0xA780, info), \ - INTEL_VGA_DEVICE(0xA781, info), \ - INTEL_VGA_DEVICE(0xA782, info), \ - INTEL_VGA_DEVICE(0xA783, info), \ - INTEL_VGA_DEVICE(0xA788, info), \ - INTEL_VGA_DEVICE(0xA789, info), \ - INTEL_VGA_DEVICE(0xA78A, info), \ - INTEL_VGA_DEVICE(0xA78B, info) - -/* RPL-P */ -#define INTEL_RPLP_IDS(info) \ - INTEL_VGA_DEVICE(0xA720, info), \ - INTEL_VGA_DEVICE(0xA721, info), \ - INTEL_VGA_DEVICE(0xA7A0, info), \ - INTEL_VGA_DEVICE(0xA7A1, info), \ - INTEL_VGA_DEVICE(0xA7A8, info), \ - INTEL_VGA_DEVICE(0xA7A9, info) - -/* DG2 */ -#define INTEL_DG2_G10_IDS(info) \ - INTEL_VGA_DEVICE(0x5690, info), \ - INTEL_VGA_DEVICE(0x5691, info), \ - INTEL_VGA_DEVICE(0x5692, info), \ - INTEL_VGA_DEVICE(0x56A0, info), \ - INTEL_VGA_DEVICE(0x56A1, info), \ - INTEL_VGA_DEVICE(0x56A2, info) - -#define INTEL_DG2_G11_IDS(info) \ - INTEL_VGA_DEVICE(0x5693, info), \ - INTEL_VGA_DEVICE(0x5694, info), \ - INTEL_VGA_DEVICE(0x5695, info), \ - INTEL_VGA_DEVICE(0x5698, info), \ - INTEL_VGA_DEVICE(0x56A5, info), \ - INTEL_VGA_DEVICE(0x56A6, info), \ - INTEL_VGA_DEVICE(0x56B0, info), \ - INTEL_VGA_DEVICE(0x56B1, info) - -#define INTEL_DG2_G12_IDS(info) \ - INTEL_VGA_DEVICE(0x5696, info), \ - INTEL_VGA_DEVICE(0x5697, info), \ - INTEL_VGA_DEVICE(0x56A3, info), \ - INTEL_VGA_DEVICE(0x56A4, info), \ - INTEL_VGA_DEVICE(0x56B2, info), \ - INTEL_VGA_DEVICE(0x56B3, info) - -#define INTEL_DG2_IDS(info) \ - INTEL_DG2_G10_IDS(info), \ - INTEL_DG2_G11_IDS(info), \ - INTEL_DG2_G12_IDS(info) - -#define INTEL_ATS_M150_IDS(info) \ - INTEL_VGA_DEVICE(0x56C0, info) - -#define INTEL_ATS_M75_IDS(info) \ - INTEL_VGA_DEVICE(0x56C1, info) - -#define INTEL_ATS_M_IDS(info) \ - INTEL_ATS_M150_IDS(info), \ - INTEL_ATS_M75_IDS(info) -/* MTL */ -#define INTEL_MTL_M_IDS(info) \ - INTEL_VGA_DEVICE(0x7D40, info), \ - INTEL_VGA_DEVICE(0x7D60, info) - -#define INTEL_MTL_P_IDS(info) \ - INTEL_VGA_DEVICE(0x7D45, info), \ - INTEL_VGA_DEVICE(0x7D55, info), \ - INTEL_VGA_DEVICE(0x7DD5, info) - -#define INTEL_MTL_IDS(info) \ - INTEL_MTL_M_IDS(info), \ - INTEL_MTL_P_IDS(info) - -#endif /* _I915_PCIIDS_H */ diff --git a/include/drm/intel/display_member.h b/include/drm/intel/display_member.h new file mode 100644 index 000000000000..0319ea560b60 --- /dev/null +++ b/include/drm/intel/display_member.h @@ -0,0 +1,42 @@ +/* SPDX-License-Identifier: MIT */ +/* Copyright © 2025 Intel Corporation */ + +#ifndef __DRM_INTEL_DISPLAY_H__ +#define __DRM_INTEL_DISPLAY_H__ + +#include <linux/build_bug.h> +#include <linux/stddef.h> +#include <linux/stringify.h> + +#include <drm/drm_device.h> + +struct intel_display; + +/* + * A dummy device struct to define the relative offsets of drm and display + * members. With the members identically placed in struct drm_i915_private and + * struct xe_device, this allows figuring out the struct intel_display pointer + * without the definition of either driver specific structure. + */ +struct __intel_generic_device { + struct drm_device drm; + struct intel_display *display; +}; + +/** + * INTEL_DISPLAY_MEMBER_STATIC_ASSERT() - ensure correct placing of drm and display members + * @type: The struct to check + * @drm_member: Name of the struct drm_device member + * @display_member: Name of the struct intel_display * member. + * + * Use this static assert macro to ensure the struct drm_i915_private and struct + * xe_device struct drm_device and struct intel_display * members are at the + * same relative offsets. + */ +#define INTEL_DISPLAY_MEMBER_STATIC_ASSERT(type, drm_member, display_member) \ + static_assert( \ + offsetof(struct __intel_generic_device, display) - offsetof(struct __intel_generic_device, drm) == \ + offsetof(type, display_member) - offsetof(type, drm_member), \ + __stringify(type) " " __stringify(drm_member) " and " __stringify(display_member) " members at invalid offsets") + +#endif diff --git a/include/drm/intel/display_parent_interface.h b/include/drm/intel/display_parent_interface.h new file mode 100644 index 000000000000..26bedc360044 --- /dev/null +++ b/include/drm/intel/display_parent_interface.h @@ -0,0 +1,45 @@ +/* SPDX-License-Identifier: MIT */ +/* Copyright © 2025 Intel Corporation x*/ + +#ifndef __DISPLAY_PARENT_INTERFACE_H__ +#define __DISPLAY_PARENT_INTERFACE_H__ + +#include <linux/types.h> + +struct drm_device; +struct ref_tracker; + +struct intel_display_rpm_interface { + struct ref_tracker *(*get)(const struct drm_device *drm); + struct ref_tracker *(*get_raw)(const struct drm_device *drm); + struct ref_tracker *(*get_if_in_use)(const struct drm_device *drm); + struct ref_tracker *(*get_noresume)(const struct drm_device *drm); + + void (*put)(const struct drm_device *drm, struct ref_tracker *wakeref); + void (*put_raw)(const struct drm_device *drm, struct ref_tracker *wakeref); + void (*put_unchecked)(const struct drm_device *drm); + + bool (*suspended)(const struct drm_device *drm); + void (*assert_held)(const struct drm_device *drm); + void (*assert_block)(const struct drm_device *drm); + void (*assert_unblock)(const struct drm_device *drm); +}; + +/** + * struct intel_display_parent_interface - services parent driver provides to display + * + * The parent, or core, driver provides a pointer to this structure to display + * driver when calling intel_display_device_probe(). The display driver uses it + * to access services provided by the parent driver. The structure may contain + * sub-struct pointers to group function pointers by functionality. + * + * All function and sub-struct pointers must be initialized and callable unless + * explicitly marked as "optional" below. The display driver will only NULL + * check the optional pointers. + */ +struct intel_display_parent_interface { + /** @rpm: Runtime PM functions */ + const struct intel_display_rpm_interface *rpm; +}; + +#endif diff --git a/include/drm/i915_component.h b/include/drm/intel/i915_component.h index c1e2a43d2d1e..8082db222e00 100644 --- a/include/drm/i915_component.h +++ b/include/drm/intel/i915_component.h @@ -24,12 +24,14 @@ #ifndef _I915_COMPONENT_H_ #define _I915_COMPONENT_H_ -#include "drm_audio_component.h" +#include <drm/drm_audio_component.h> enum i915_component_type { I915_COMPONENT_AUDIO = 1, I915_COMPONENT_HDCP, - I915_COMPONENT_PXP + I915_COMPONENT_PXP, + I915_COMPONENT_GSC_PROXY, + INTEL_COMPONENT_LB, }; /* MAX_PORT is the number of port diff --git a/include/drm/i915_drm.h b/include/drm/intel/i915_drm.h index 7adce327c1c2..adff68538484 100644 --- a/include/drm/i915_drm.h +++ b/include/drm/intel/i915_drm.h @@ -42,7 +42,7 @@ extern struct resource intel_graphics_stolen_res; * The Bridge device's PCI config space has information about the * fb aperture size and the amount of pre-reserved memory. * This is all handled in the intel-gtt.ko module. i915.ko only - * cares about the vga bit for the vga rbiter. + * cares about the vga bit for the vga arbiter. */ #define INTEL_GMCH_CTRL 0x52 #define INTEL_GMCH_VGA_DISABLE (1 << 1) diff --git a/include/drm/intel/i915_gsc_proxy_mei_interface.h b/include/drm/intel/i915_gsc_proxy_mei_interface.h new file mode 100644 index 000000000000..850dfbf40607 --- /dev/null +++ b/include/drm/intel/i915_gsc_proxy_mei_interface.h @@ -0,0 +1,53 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright (c) 2022-2023 Intel Corporation + */ + +#ifndef _I915_GSC_PROXY_MEI_INTERFACE_H_ +#define _I915_GSC_PROXY_MEI_INTERFACE_H_ + +#include <linux/types.h> + +struct device; +struct module; + +/** + * struct i915_gsc_proxy_component_ops - ops for GSC Proxy services. + * @owner: Module providing the ops + * @send: sends a proxy message from GSC FW to ME FW + * @recv: receives a proxy message for GSC FW from ME FW + */ +struct i915_gsc_proxy_component_ops { + struct module *owner; + + /** + * @send: Sends a proxy message to ME FW. + * @dev: device struct corresponding to the mei device + * @buf: message buffer to send + * @size: size of the message + * Return: bytes sent on success, negative errno value on failure + */ + int (*send)(struct device *dev, const void *buf, size_t size); + + /** + * @recv: Receives a proxy message from ME FW. + * @dev: device struct corresponding to the mei device + * @buf: message buffer to contain the received message + * @size: size of the buffer + * Return: bytes received on success, negative errno value on failure + */ + int (*recv)(struct device *dev, void *buf, size_t size); +}; + +/** + * struct i915_gsc_proxy_component - Used for communication between i915 and + * MEI drivers for GSC proxy services + * @mei_dev: device that provide the GSC proxy service. + * @ops: Ops implemented by GSC proxy driver, used by i915 driver. + */ +struct i915_gsc_proxy_component { + struct device *mei_dev; + const struct i915_gsc_proxy_component_ops *ops; +}; + +#endif /* _I915_GSC_PROXY_MEI_INTERFACE_H_ */ diff --git a/include/drm/intel/i915_hdcp_interface.h b/include/drm/intel/i915_hdcp_interface.h new file mode 100644 index 000000000000..d776ed7dcd00 --- /dev/null +++ b/include/drm/intel/i915_hdcp_interface.h @@ -0,0 +1,547 @@ +/* SPDX-License-Identifier: (GPL-2.0+) */ +/* + * Copyright © 2017-2019 Intel Corporation + * + * Authors: + * Ramalingam C <ramalingam.c@intel.com> + */ + +#ifndef _I915_HDCP_INTERFACE_H_ +#define _I915_HDCP_INTERFACE_H_ + +#include <linux/mutex.h> +#include <linux/device.h> +#include <drm/display/drm_hdcp.h> + +/** + * enum hdcp_port_type - HDCP port implementation type defined by ME/GSC FW + * @HDCP_PORT_TYPE_INVALID: Invalid hdcp port type + * @HDCP_PORT_TYPE_INTEGRATED: In-Host HDCP2.x port + * @HDCP_PORT_TYPE_LSPCON: HDCP2.2 discrete wired Tx port with LSPCON + * (HDMI 2.0) solution + * @HDCP_PORT_TYPE_CPDP: HDCP2.2 discrete wired Tx port using the CPDP (DP 1.3) + * solution + */ +enum hdcp_port_type { + HDCP_PORT_TYPE_INVALID, + HDCP_PORT_TYPE_INTEGRATED, + HDCP_PORT_TYPE_LSPCON, + HDCP_PORT_TYPE_CPDP +}; + +/** + * enum hdcp_wired_protocol - HDCP adaptation used on the port + * @HDCP_PROTOCOL_INVALID: Invalid HDCP adaptation protocol + * @HDCP_PROTOCOL_HDMI: HDMI adaptation of HDCP used on the port + * @HDCP_PROTOCOL_DP: DP adaptation of HDCP used on the port + */ +enum hdcp_wired_protocol { + HDCP_PROTOCOL_INVALID, + HDCP_PROTOCOL_HDMI, + HDCP_PROTOCOL_DP +}; + +enum hdcp_ddi { + HDCP_DDI_INVALID_PORT = 0x0, + + HDCP_DDI_B = 1, + HDCP_DDI_C, + HDCP_DDI_D, + HDCP_DDI_E, + HDCP_DDI_F, + HDCP_DDI_A = 7, + HDCP_DDI_RANGE_END = HDCP_DDI_A, +}; + +/** + * enum hdcp_transcoder - ME/GSC Firmware defined index for transcoders + * @HDCP_INVALID_TRANSCODER: Index for Invalid transcoder + * @HDCP_TRANSCODER_EDP: Index for EDP Transcoder + * @HDCP_TRANSCODER_DSI0: Index for DSI0 Transcoder + * @HDCP_TRANSCODER_DSI1: Index for DSI1 Transcoder + * @HDCP_TRANSCODER_A: Index for Transcoder A + * @HDCP_TRANSCODER_B: Index for Transcoder B + * @HDCP_TRANSCODER_C: Index for Transcoder C + * @HDCP_TRANSCODER_D: Index for Transcoder D + */ +enum hdcp_transcoder { + HDCP_INVALID_TRANSCODER = 0x00, + HDCP_TRANSCODER_EDP, + HDCP_TRANSCODER_DSI0, + HDCP_TRANSCODER_DSI1, + HDCP_TRANSCODER_A = 0x10, + HDCP_TRANSCODER_B, + HDCP_TRANSCODER_C, + HDCP_TRANSCODER_D +}; + +/** + * struct hdcp_port_data - intel specific HDCP port data + * @hdcp_ddi: ddi index as per ME/GSC FW + * @hdcp_transcoder: transcoder index as per ME/GSC FW + * @port_type: HDCP port type as per ME/GSC FW classification + * @protocol: HDCP adaptation as per ME/GSC FW + * @k: No of streams transmitted on a port. Only on DP MST this is != 1 + * @seq_num_m: Count of RepeaterAuth_Stream_Manage msg propagated. + * Initialized to 0 on AKE_INIT. Incremented after every successful + * transmission of RepeaterAuth_Stream_Manage message. When it rolls + * over re-Auth has to be triggered. + * @streams: struct hdcp2_streamid_type[k]. Defines the type and id for the + * streams + */ +struct hdcp_port_data { + enum hdcp_ddi hdcp_ddi; + enum hdcp_transcoder hdcp_transcoder; + u8 port_type; + u8 protocol; + u16 k; + u32 seq_num_m; + struct hdcp2_streamid_type *streams; +}; + +/** + * struct i915_hdcp_ops- ops for HDCP2.2 services. + * @owner: Module providing the ops + * @initiate_hdcp2_session: Initiate a Wired HDCP2.2 Tx Session. + * And Prepare AKE_Init. + * @verify_receiver_cert_prepare_km: Verify the Receiver Certificate + * AKE_Send_Cert and prepare + * AKE_Stored_Km/AKE_No_Stored_Km + * @verify_hprime: Verify AKE_Send_H_prime + * @store_pairing_info: Store pairing info received + * @initiate_locality_check: Prepare LC_Init + * @verify_lprime: Verify lprime + * @get_session_key: Prepare SKE_Send_Eks + * @repeater_check_flow_prepare_ack: Validate the Downstream topology + * and prepare rep_ack + * @verify_mprime: Verify mprime + * @enable_hdcp_authentication: Mark a port as authenticated. + * @close_hdcp_session: Close the Wired HDCP Tx session per port. + * This also disables the authenticated state of the port. + */ +struct i915_hdcp_ops { + /** + * @owner: hdcp module + */ + struct module *owner; + + int (*initiate_hdcp2_session)(struct device *dev, + struct hdcp_port_data *data, + struct hdcp2_ake_init *ake_data); + int (*verify_receiver_cert_prepare_km)(struct device *dev, + struct hdcp_port_data *data, + struct hdcp2_ake_send_cert + *rx_cert, + bool *km_stored, + struct hdcp2_ake_no_stored_km + *ek_pub_km, + size_t *msg_sz); + int (*verify_hprime)(struct device *dev, + struct hdcp_port_data *data, + struct hdcp2_ake_send_hprime *rx_hprime); + int (*store_pairing_info)(struct device *dev, + struct hdcp_port_data *data, + struct hdcp2_ake_send_pairing_info + *pairing_info); + int (*initiate_locality_check)(struct device *dev, + struct hdcp_port_data *data, + struct hdcp2_lc_init *lc_init_data); + int (*verify_lprime)(struct device *dev, + struct hdcp_port_data *data, + struct hdcp2_lc_send_lprime *rx_lprime); + int (*get_session_key)(struct device *dev, + struct hdcp_port_data *data, + struct hdcp2_ske_send_eks *ske_data); + int (*repeater_check_flow_prepare_ack)(struct device *dev, + struct hdcp_port_data *data, + struct hdcp2_rep_send_receiverid_list + *rep_topology, + struct hdcp2_rep_send_ack + *rep_send_ack); + int (*verify_mprime)(struct device *dev, + struct hdcp_port_data *data, + struct hdcp2_rep_stream_ready *stream_ready); + int (*enable_hdcp_authentication)(struct device *dev, + struct hdcp_port_data *data); + int (*close_hdcp_session)(struct device *dev, + struct hdcp_port_data *data); +}; + +/** + * struct i915_hdcp_arbiter - Used for communication between i915 + * and hdcp drivers for the HDCP2.2 services + */ +struct i915_hdcp_arbiter { + /** + * @hdcp_dev: device that provides the HDCP2.2 service from MEI Bus. + */ + struct device *hdcp_dev; + + /** + * @ops: Ops implemented by hdcp driver or intel_hdcp_gsc, used by i915 + * driver. + */ + const struct i915_hdcp_ops *ops; + + /** + * @mutex: To protect the above members. + */ + struct mutex mutex; +}; + +/* fw_hdcp_status: Enumeration of all HDCP Status Codes */ +enum fw_hdcp_status { + FW_HDCP_STATUS_SUCCESS = 0x0000, + + /* WiDi Generic Status Codes */ + FW_HDCP_STATUS_INTERNAL_ERROR = 0x1000, + FW_HDCP_STATUS_UNKNOWN_ERROR = 0x1001, + FW_HDCP_STATUS_INCORRECT_API_VERSION = 0x1002, + FW_HDCP_STATUS_INVALID_FUNCTION = 0x1003, + FW_HDCP_STATUS_INVALID_BUFFER_LENGTH = 0x1004, + FW_HDCP_STATUS_INVALID_PARAMS = 0x1005, + FW_HDCP_STATUS_AUTHENTICATION_FAILED = 0x1006, + + /* WiDi Status Codes */ + FW_HDCP_INVALID_SESSION_STATE = 0x6000, + FW_HDCP_SRM_FRAGMENT_UNEXPECTED = 0x6001, + FW_HDCP_SRM_INVALID_LENGTH = 0x6002, + FW_HDCP_SRM_FRAGMENT_OFFSET_INVALID = 0x6003, + FW_HDCP_SRM_VERIFICATION_FAILED = 0x6004, + FW_HDCP_SRM_VERSION_TOO_OLD = 0x6005, + FW_HDCP_RX_CERT_VERIFICATION_FAILED = 0x6006, + FW_HDCP_RX_REVOKED = 0x6007, + FW_HDCP_H_VERIFICATION_FAILED = 0x6008, + FW_HDCP_REPEATER_CHECK_UNEXPECTED = 0x6009, + FW_HDCP_TOPOLOGY_MAX_EXCEEDED = 0x600A, + FW_HDCP_V_VERIFICATION_FAILED = 0x600B, + FW_HDCP_L_VERIFICATION_FAILED = 0x600C, + FW_HDCP_STREAM_KEY_ALLOC_FAILED = 0x600D, + FW_HDCP_BASE_KEY_RESET_FAILED = 0x600E, + FW_HDCP_NONCE_GENERATION_FAILED = 0x600F, + FW_HDCP_STATUS_INVALID_E_KEY_STATE = 0x6010, + FW_HDCP_STATUS_INVALID_CS_ICV = 0x6011, + FW_HDCP_STATUS_INVALID_KB_KEY_STATE = 0x6012, + FW_HDCP_STATUS_INVALID_PAVP_MODE_ICV = 0x6013, + FW_HDCP_STATUS_INVALID_PAVP_MODE = 0x6014, + FW_HDCP_STATUS_LC_MAX_ATTEMPTS = 0x6015, + + /* New status for HDCP 2.1 */ + FW_HDCP_STATUS_MISMATCH_IN_M = 0x6016, + + /* New status code for HDCP 2.2 Rx */ + FW_HDCP_STATUS_RX_PROV_NOT_ALLOWED = 0x6017, + FW_HDCP_STATUS_RX_PROV_WRONG_SUBJECT = 0x6018, + FW_HDCP_RX_NEEDS_PROVISIONING = 0x6019, + FW_HDCP_BKSV_ICV_AUTH_FAILED = 0x6020, + FW_HDCP_STATUS_INVALID_STREAM_ID = 0x6021, + FW_HDCP_STATUS_CHAIN_NOT_INITIALIZED = 0x6022, + FW_HDCP_FAIL_NOT_EXPECTED = 0x6023, + FW_HDCP_FAIL_HDCP_OFF = 0x6024, + FW_HDCP_FAIL_INVALID_PAVP_MEMORY_MODE = 0x6025, + FW_HDCP_FAIL_AES_ECB_FAILURE = 0x6026, + FW_HDCP_FEATURE_NOT_SUPPORTED = 0x6027, + FW_HDCP_DMA_READ_ERROR = 0x6028, + FW_HDCP_DMA_WRITE_ERROR = 0x6029, + FW_HDCP_FAIL_INVALID_PACKET_SIZE = 0x6030, + FW_HDCP_H264_PARSING_ERROR = 0x6031, + FW_HDCP_HDCP2_ERRATA_VIDEO_VIOLATION = 0x6032, + FW_HDCP_HDCP2_ERRATA_AUDIO_VIOLATION = 0x6033, + FW_HDCP_TX_ACTIVE_ERROR = 0x6034, + FW_HDCP_MODE_CHANGE_ERROR = 0x6035, + FW_HDCP_STREAM_TYPE_ERROR = 0x6036, + FW_HDCP_STREAM_MANAGE_NOT_POSSIBLE = 0x6037, + + FW_HDCP_STATUS_PORT_INVALID_COMMAND = 0x6038, + FW_HDCP_STATUS_UNSUPPORTED_PROTOCOL = 0x6039, + FW_HDCP_STATUS_INVALID_PORT_INDEX = 0x603a, + FW_HDCP_STATUS_TX_AUTH_NEEDED = 0x603b, + FW_HDCP_STATUS_NOT_INTEGRATED_PORT = 0x603c, + FW_HDCP_STATUS_SESSION_MAX_REACHED = 0x603d, + + /* hdcp capable bit is not set in rx_caps(error is unique to DP) */ + FW_HDCP_STATUS_NOT_HDCP_CAPABLE = 0x6041, + + FW_HDCP_STATUS_INVALID_STREAM_COUNT = 0x6042, +}; + +#define HDCP_API_VERSION 0x00010000 + +#define HDCP_M_LEN 16 +#define HDCP_KH_LEN 16 + +/* Payload Buffer size(Excluding Header) for CMDs and corresponding response */ +/* Wired_Tx_AKE */ +#define WIRED_CMD_BUF_LEN_INITIATE_HDCP2_SESSION_IN (4 + 1) +#define WIRED_CMD_BUF_LEN_INITIATE_HDCP2_SESSION_OUT (4 + 8 + 3) + +#define WIRED_CMD_BUF_LEN_VERIFY_RECEIVER_CERT_IN (4 + 522 + 8 + 3) +#define WIRED_CMD_BUF_LEN_VERIFY_RECEIVER_CERT_MIN_OUT (4 + 1 + 3 + 16 + 16) +#define WIRED_CMD_BUF_LEN_VERIFY_RECEIVER_CERT_MAX_OUT (4 + 1 + 3 + 128) + +#define WIRED_CMD_BUF_LEN_AKE_SEND_HPRIME_IN (4 + 32) +#define WIRED_CMD_BUF_LEN_AKE_SEND_HPRIME_OUT (4) + +#define WIRED_CMD_BUF_LEN_SEND_PAIRING_INFO_IN (4 + 16) +#define WIRED_CMD_BUF_LEN_SEND_PAIRING_INFO_OUT (4) + +#define WIRED_CMD_BUF_LEN_CLOSE_SESSION_IN (4) +#define WIRED_CMD_BUF_LEN_CLOSE_SESSION_OUT (4) + +/* Wired_Tx_LC */ +#define WIRED_CMD_BUF_LEN_INIT_LOCALITY_CHECK_IN (4) +#define WIRED_CMD_BUF_LEN_INIT_LOCALITY_CHECK_OUT (4 + 8) + +#define WIRED_CMD_BUF_LEN_VALIDATE_LOCALITY_IN (4 + 32) +#define WIRED_CMD_BUF_LEN_VALIDATE_LOCALITY_OUT (4) + +/* Wired_Tx_SKE */ +#define WIRED_CMD_BUF_LEN_GET_SESSION_KEY_IN (4) +#define WIRED_CMD_BUF_LEN_GET_SESSION_KEY_OUT (4 + 16 + 8) + +/* Wired_Tx_SKE */ +#define WIRED_CMD_BUF_LEN_ENABLE_AUTH_IN (4 + 1) +#define WIRED_CMD_BUF_LEN_ENABLE_AUTH_OUT (4) + +/* Wired_Tx_Repeater */ +#define WIRED_CMD_BUF_LEN_VERIFY_REPEATER_IN (4 + 2 + 3 + 16 + 155) +#define WIRED_CMD_BUF_LEN_VERIFY_REPEATER_OUT (4 + 1 + 16) + +#define WIRED_CMD_BUF_LEN_REPEATER_AUTH_STREAM_REQ_MIN_IN (4 + 3 + \ + 32 + 2 + 2) + +#define WIRED_CMD_BUF_LEN_REPEATER_AUTH_STREAM_REQ_OUT (4) + +/* hdcp_command_id: Enumeration of all WIRED HDCP Command IDs */ +enum hdcp_command_id { + _WIDI_COMMAND_BASE = 0x00030000, + WIDI_INITIATE_HDCP2_SESSION = _WIDI_COMMAND_BASE, + HDCP_GET_SRM_STATUS, + HDCP_SEND_SRM_FRAGMENT, + + /* The wired HDCP Tx commands */ + _WIRED_COMMAND_BASE = 0x00031000, + WIRED_INITIATE_HDCP2_SESSION = _WIRED_COMMAND_BASE, + WIRED_VERIFY_RECEIVER_CERT, + WIRED_AKE_SEND_HPRIME, + WIRED_AKE_SEND_PAIRING_INFO, + WIRED_INIT_LOCALITY_CHECK, + WIRED_VALIDATE_LOCALITY, + WIRED_GET_SESSION_KEY, + WIRED_ENABLE_AUTH, + WIRED_VERIFY_REPEATER, + WIRED_REPEATER_AUTH_STREAM_REQ, + WIRED_CLOSE_SESSION, + + _WIRED_COMMANDS_COUNT, +}; + +union encrypted_buff { + u8 e_kpub_km[HDCP_2_2_E_KPUB_KM_LEN]; + u8 e_kh_km_m[HDCP_2_2_E_KH_KM_M_LEN]; + struct { + u8 e_kh_km[HDCP_KH_LEN]; + u8 m[HDCP_M_LEN]; + } __packed; +}; + +/* HDCP HECI message header. All header values are little endian. */ +struct hdcp_cmd_header { + u32 api_version; + u32 command_id; + enum fw_hdcp_status status; + /* Length of the HECI message (excluding the header) */ + u32 buffer_len; +} __packed; + +/* Empty command request or response. No data follows the header. */ +struct hdcp_cmd_no_data { + struct hdcp_cmd_header header; +} __packed; + +/* Uniquely identifies the hdcp port being addressed for a given command. */ +struct hdcp_port_id { + u8 integrated_port_type; + /* physical_port is used until Gen11.5. Must be zero for Gen11.5+ */ + u8 physical_port; + /* attached_transcoder is for Gen11.5+. Set to zero for <Gen11.5 */ + u8 attached_transcoder; + u8 reserved; +} __packed; + +/* + * Data structures for integrated wired HDCP2 Tx in + * support of the AKE protocol + */ +/* HECI struct for integrated wired HDCP Tx session initiation. */ +struct wired_cmd_initiate_hdcp2_session_in { + struct hdcp_cmd_header header; + struct hdcp_port_id port; + u8 protocol; /* for HDMI vs DP */ +} __packed; + +struct wired_cmd_initiate_hdcp2_session_out { + struct hdcp_cmd_header header; + struct hdcp_port_id port; + u8 r_tx[HDCP_2_2_RTX_LEN]; + struct hdcp2_tx_caps tx_caps; +} __packed; + +/* HECI struct for ending an integrated wired HDCP Tx session. */ +struct wired_cmd_close_session_in { + struct hdcp_cmd_header header; + struct hdcp_port_id port; +} __packed; + +struct wired_cmd_close_session_out { + struct hdcp_cmd_header header; + struct hdcp_port_id port; +} __packed; + +/* HECI struct for integrated wired HDCP Tx Rx Cert verification. */ +struct wired_cmd_verify_receiver_cert_in { + struct hdcp_cmd_header header; + struct hdcp_port_id port; + struct hdcp2_cert_rx cert_rx; + u8 r_rx[HDCP_2_2_RRX_LEN]; + u8 rx_caps[HDCP_2_2_RXCAPS_LEN]; +} __packed; + +struct wired_cmd_verify_receiver_cert_out { + struct hdcp_cmd_header header; + struct hdcp_port_id port; + u8 km_stored; + u8 reserved[3]; + union encrypted_buff ekm_buff; +} __packed; + +/* HECI struct for verification of Rx's Hprime in a HDCP Tx session */ +struct wired_cmd_ake_send_hprime_in { + struct hdcp_cmd_header header; + struct hdcp_port_id port; + u8 h_prime[HDCP_2_2_H_PRIME_LEN]; +} __packed; + +struct wired_cmd_ake_send_hprime_out { + struct hdcp_cmd_header header; + struct hdcp_port_id port; +} __packed; + +/* + * HECI struct for sending in AKE pairing data generated by the Rx in an + * integrated wired HDCP Tx session. + */ +struct wired_cmd_ake_send_pairing_info_in { + struct hdcp_cmd_header header; + struct hdcp_port_id port; + u8 e_kh_km[HDCP_2_2_E_KH_KM_LEN]; +} __packed; + +struct wired_cmd_ake_send_pairing_info_out { + struct hdcp_cmd_header header; + struct hdcp_port_id port; +} __packed; + +/* Data structures for integrated wired HDCP2 Tx in support of the LC protocol*/ +/* + * HECI struct for initiating locality check with an + * integrated wired HDCP Tx session. + */ +struct wired_cmd_init_locality_check_in { + struct hdcp_cmd_header header; + struct hdcp_port_id port; +} __packed; + +struct wired_cmd_init_locality_check_out { + struct hdcp_cmd_header header; + struct hdcp_port_id port; + u8 r_n[HDCP_2_2_RN_LEN]; +} __packed; + +/* + * HECI struct for validating an Rx's LPrime value in an + * integrated wired HDCP Tx session. + */ +struct wired_cmd_validate_locality_in { + struct hdcp_cmd_header header; + struct hdcp_port_id port; + u8 l_prime[HDCP_2_2_L_PRIME_LEN]; +} __packed; + +struct wired_cmd_validate_locality_out { + struct hdcp_cmd_header header; + struct hdcp_port_id port; +} __packed; + +/* + * Data structures for integrated wired HDCP2 Tx in support of the + * SKE protocol + */ +/* HECI struct for creating session key */ +struct wired_cmd_get_session_key_in { + struct hdcp_cmd_header header; + struct hdcp_port_id port; +} __packed; + +struct wired_cmd_get_session_key_out { + struct hdcp_cmd_header header; + struct hdcp_port_id port; + u8 e_dkey_ks[HDCP_2_2_E_DKEY_KS_LEN]; + u8 r_iv[HDCP_2_2_RIV_LEN]; +} __packed; + +/* HECI struct for the Tx enable authentication command */ +struct wired_cmd_enable_auth_in { + struct hdcp_cmd_header header; + struct hdcp_port_id port; + u8 stream_type; +} __packed; + +struct wired_cmd_enable_auth_out { + struct hdcp_cmd_header header; + struct hdcp_port_id port; +} __packed; + +/* + * Data structures for integrated wired HDCP2 Tx in support of + * the repeater protocols + */ +/* + * HECI struct for verifying the downstream repeater's HDCP topology in an + * integrated wired HDCP Tx session. + */ +struct wired_cmd_verify_repeater_in { + struct hdcp_cmd_header header; + struct hdcp_port_id port; + u8 rx_info[HDCP_2_2_RXINFO_LEN]; + u8 seq_num_v[HDCP_2_2_SEQ_NUM_LEN]; + u8 v_prime[HDCP_2_2_V_PRIME_HALF_LEN]; + u8 receiver_ids[HDCP_2_2_RECEIVER_IDS_MAX_LEN]; +} __packed; + +struct wired_cmd_verify_repeater_out { + struct hdcp_cmd_header header; + struct hdcp_port_id port; + u8 content_type_supported; + u8 v[HDCP_2_2_V_PRIME_HALF_LEN]; +} __packed; + +/* + * HECI struct in support of stream management in an + * integrated wired HDCP Tx session. + */ +struct wired_cmd_repeater_auth_stream_req_in { + struct hdcp_cmd_header header; + struct hdcp_port_id port; + u8 seq_num_m[HDCP_2_2_SEQ_NUM_LEN]; + u8 m_prime[HDCP_2_2_MPRIME_LEN]; + __be16 k; + struct hdcp2_streamid_type streams[]; +} __packed; + +struct wired_cmd_repeater_auth_stream_req_out { + struct hdcp_cmd_header header; + struct hdcp_port_id port; +} __packed; + +#endif /* _I915_HDCP_INTERFACE_H_ */ diff --git a/include/drm/i915_pxp_tee_interface.h b/include/drm/intel/i915_pxp_tee_interface.h index a702b6ec17f7..a532d32f58f3 100644 --- a/include/drm/i915_pxp_tee_interface.h +++ b/include/drm/intel/i915_pxp_tee_interface.h @@ -12,18 +12,26 @@ struct scatterlist; /** * struct i915_pxp_component_ops - ops for PXP services. - * @owner: Module providing the ops - * @send: sends data to PXP - * @receive: receives data from PXP */ struct i915_pxp_component_ops { /** - * @owner: owner of the module provding the ops + * @owner: Module providing the ops. */ struct module *owner; - int (*send)(struct device *dev, const void *message, size_t size); - int (*recv)(struct device *dev, void *buffer, size_t size); + /** + * @send: Send a PXP message. + */ + int (*send)(struct device *dev, const void *message, size_t size, + unsigned long timeout_ms); + /** + * @recv: Receive a PXP message. + */ + int (*recv)(struct device *dev, void *buffer, size_t size, + unsigned long timeout_ms); + /** + * @gsc_command: Send a GSC command. + */ ssize_t (*gsc_command)(struct device *dev, u8 client_id, u32 fence_id, struct scatterlist *sg_in, size_t total_in_len, struct scatterlist *sg_out); @@ -33,14 +41,21 @@ struct i915_pxp_component_ops { /** * struct i915_pxp_component - Used for communication between i915 and TEE * drivers for the PXP services - * @tee_dev: device that provide the PXP service from TEE Bus. - * @pxp_ops: Ops implemented by TEE driver, used by i915 driver. */ struct i915_pxp_component { + /** + * @tee_dev: device that provide the PXP service from TEE Bus. + */ struct device *tee_dev; + + /** + * @ops: Ops implemented by TEE driver, used by i915 driver. + */ const struct i915_pxp_component_ops *ops; - /* To protect the above members. */ + /** + * @mutex: To protect the above members. + */ struct mutex mutex; }; diff --git a/include/drm/intel-gtt.h b/include/drm/intel/intel-gtt.h index cb0d5b7200c7..f53bcff01f22 100644 --- a/include/drm/intel-gtt.h +++ b/include/drm/intel/intel-gtt.h @@ -28,6 +28,8 @@ void intel_gmch_gtt_insert_sg_entries(struct sg_table *st, unsigned int pg_start, unsigned int flags); void intel_gmch_gtt_clear_range(unsigned int first_entry, unsigned int num_entries); +dma_addr_t intel_gmch_gtt_read_entry(unsigned int pg, + bool *is_present, bool *is_local); /* Special gtt memory types */ #define AGP_DCACHE_MEMORY 1 diff --git a/include/drm/intel/intel_lb_mei_interface.h b/include/drm/intel/intel_lb_mei_interface.h new file mode 100644 index 000000000000..d65be2cba2ab --- /dev/null +++ b/include/drm/intel/intel_lb_mei_interface.h @@ -0,0 +1,70 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright (c) 2025 Intel Corporation + */ + +#ifndef _INTEL_LB_MEI_INTERFACE_H_ +#define _INTEL_LB_MEI_INTERFACE_H_ + +#include <linux/types.h> + +struct device; + +/** + * define INTEL_LB_FLAG_IS_PERSISTENT - Mark the payload as persistent + * + * This flag indicates that the late binding payload should be stored + * persistently in flash across warm resets. + */ +#define INTEL_LB_FLAG_IS_PERSISTENT BIT(0) + +/** + * enum intel_lb_type - enum to determine late binding payload type + * @INTEL_LB_TYPE_FAN_CONTROL: Fan controller configuration + */ +enum intel_lb_type { + INTEL_LB_TYPE_FAN_CONTROL = 1, +}; + +/** + * enum intel_lb_status - Status codes returned on late binding transmissions + * @INTEL_LB_STATUS_SUCCESS: Operation completed successfully + * @INTEL_LB_STATUS_4ID_MISMATCH: Mismatch in the expected 4ID (firmware identity/token) + * @INTEL_LB_STATUS_ARB_FAILURE: Arbitration failure (e.g. conflicting access or state) + * @INTEL_LB_STATUS_GENERAL_ERROR: General firmware error not covered by other codes + * @INTEL_LB_STATUS_INVALID_PARAMS: One or more input parameters are invalid + * @INTEL_LB_STATUS_INVALID_SIGNATURE: Payload has an invalid or untrusted signature + * @INTEL_LB_STATUS_INVALID_PAYLOAD: Payload contents are not accepted by firmware + * @INTEL_LB_STATUS_TIMEOUT: Operation timed out before completion + */ +enum intel_lb_status { + INTEL_LB_STATUS_SUCCESS = 0, + INTEL_LB_STATUS_4ID_MISMATCH = 1, + INTEL_LB_STATUS_ARB_FAILURE = 2, + INTEL_LB_STATUS_GENERAL_ERROR = 3, + INTEL_LB_STATUS_INVALID_PARAMS = 4, + INTEL_LB_STATUS_INVALID_SIGNATURE = 5, + INTEL_LB_STATUS_INVALID_PAYLOAD = 6, + INTEL_LB_STATUS_TIMEOUT = 7, +}; + +/** + * struct intel_lb_component_ops - Ops for late binding services + */ +struct intel_lb_component_ops { + /** + * push_payload - Sends a payload to the authentication firmware + * @dev: Device struct corresponding to the mei device + * @type: Payload type (see &enum intel_lb_type) + * @flags: Payload flags bitmap (e.g. %INTEL_LB_FLAGS_IS_PERSISTENT) + * @payload: Pointer to payload buffer + * @payload_size: Payload buffer size in bytes + * + * Return: 0 success, negative errno value on transport failure, + * positive status returned by firmware + */ + int (*push_payload)(struct device *dev, u32 type, u32 flags, + const void *payload, size_t payload_size); +}; + +#endif /* _INTEL_LB_MEI_INTERFACE_H_ */ diff --git a/include/drm/intel_lpe_audio.h b/include/drm/intel/intel_lpe_audio.h index b6121c8fe539..b6121c8fe539 100644 --- a/include/drm/intel_lpe_audio.h +++ b/include/drm/intel/intel_lpe_audio.h diff --git a/include/drm/intel/pciids.h b/include/drm/intel/pciids.h new file mode 100644 index 000000000000..52520e684ab1 --- /dev/null +++ b/include/drm/intel/pciids.h @@ -0,0 +1,903 @@ +/* + * Copyright 2013 Intel Corporation + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef __PCIIDS_H__ +#define __PCIIDS_H__ + +#ifdef __KERNEL__ +#define INTEL_PCI_DEVICE(_id, _info) { \ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, (_id)), \ + .driver_data = (kernel_ulong_t)(_info), \ +} + +#define INTEL_VGA_DEVICE(_id, _info) { \ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, (_id)), \ + .class = PCI_BASE_CLASS_DISPLAY << 16, .class_mask = 0xff << 16, \ + .driver_data = (kernel_ulong_t)(_info), \ +} + +#define INTEL_QUANTA_VGA_DEVICE(_info) { \ + .vendor = PCI_VENDOR_ID_INTEL, .device = 0x16a, \ + .subvendor = 0x152d, .subdevice = 0x8990, \ + .class = PCI_BASE_CLASS_DISPLAY << 16, .class_mask = 0xff << 16, \ + .driver_data = (kernel_ulong_t)(_info), \ +} +#endif + +#define INTEL_I810_IDS(MACRO__, ...) \ + MACRO__(0x7121, ## __VA_ARGS__), /* I810 */ \ + MACRO__(0x7123, ## __VA_ARGS__), /* I810_DC100 */ \ + MACRO__(0x7125, ## __VA_ARGS__) /* I810_E */ + +#define INTEL_I815_IDS(MACRO__, ...) \ + MACRO__(0x1132, ## __VA_ARGS__) /* I815*/ + +#define INTEL_I830_IDS(MACRO__, ...) \ + MACRO__(0x3577, ## __VA_ARGS__) + +#define INTEL_I845G_IDS(MACRO__, ...) \ + MACRO__(0x2562, ## __VA_ARGS__) + +#define INTEL_I85X_IDS(MACRO__, ...) \ + MACRO__(0x3582, ## __VA_ARGS__), /* I855_GM */ \ + MACRO__(0x358e, ## __VA_ARGS__) + +#define INTEL_I865G_IDS(MACRO__, ...) \ + MACRO__(0x2572, ## __VA_ARGS__) /* I865_G */ + +#define INTEL_I915G_IDS(MACRO__, ...) \ + MACRO__(0x2582, ## __VA_ARGS__), /* I915_G */ \ + MACRO__(0x258a, ## __VA_ARGS__) /* E7221_G */ + +#define INTEL_I915GM_IDS(MACRO__, ...) \ + MACRO__(0x2592, ## __VA_ARGS__) /* I915_GM */ + +#define INTEL_I945G_IDS(MACRO__, ...) \ + MACRO__(0x2772, ## __VA_ARGS__) /* I945_G */ + +#define INTEL_I945GM_IDS(MACRO__, ...) \ + MACRO__(0x27a2, ## __VA_ARGS__), /* I945_GM */ \ + MACRO__(0x27ae, ## __VA_ARGS__) /* I945_GME */ + +#define INTEL_I965G_IDS(MACRO__, ...) \ + MACRO__(0x2972, ## __VA_ARGS__), /* I946_GZ */ \ + MACRO__(0x2982, ## __VA_ARGS__), /* G35_G */ \ + MACRO__(0x2992, ## __VA_ARGS__), /* I965_Q */ \ + MACRO__(0x29a2, ## __VA_ARGS__) /* I965_G */ + +#define INTEL_G33_IDS(MACRO__, ...) \ + MACRO__(0x29b2, ## __VA_ARGS__), /* Q35_G */ \ + MACRO__(0x29c2, ## __VA_ARGS__), /* G33_G */ \ + MACRO__(0x29d2, ## __VA_ARGS__) /* Q33_G */ + +#define INTEL_I965GM_IDS(MACRO__, ...) \ + MACRO__(0x2a02, ## __VA_ARGS__), /* I965_GM */ \ + MACRO__(0x2a12, ## __VA_ARGS__) /* I965_GME */ + +#define INTEL_GM45_IDS(MACRO__, ...) \ + MACRO__(0x2a42, ## __VA_ARGS__) /* GM45_G */ + +#define INTEL_G45_IDS(MACRO__, ...) \ + MACRO__(0x2e02, ## __VA_ARGS__), /* IGD_E_G */ \ + MACRO__(0x2e12, ## __VA_ARGS__), /* Q45_G */ \ + MACRO__(0x2e22, ## __VA_ARGS__), /* G45_G */ \ + MACRO__(0x2e32, ## __VA_ARGS__), /* G41_G */ \ + MACRO__(0x2e42, ## __VA_ARGS__), /* B43_G */ \ + MACRO__(0x2e92, ## __VA_ARGS__) /* B43_G.1 */ + +#define INTEL_PNV_G_IDS(MACRO__, ...) \ + MACRO__(0xa001, ## __VA_ARGS__) + +#define INTEL_PNV_M_IDS(MACRO__, ...) \ + MACRO__(0xa011, ## __VA_ARGS__) + +#define INTEL_PNV_IDS(MACRO__, ...) \ + INTEL_PNV_G_IDS(MACRO__, ## __VA_ARGS__), \ + INTEL_PNV_M_IDS(MACRO__, ## __VA_ARGS__) + +#define INTEL_ILK_D_IDS(MACRO__, ...) \ + MACRO__(0x0042, ## __VA_ARGS__) + +#define INTEL_ILK_M_IDS(MACRO__, ...) \ + MACRO__(0x0046, ## __VA_ARGS__) + +#define INTEL_ILK_IDS(MACRO__, ...) \ + INTEL_ILK_D_IDS(MACRO__, ## __VA_ARGS__), \ + INTEL_ILK_M_IDS(MACRO__, ## __VA_ARGS__) + +#define INTEL_SNB_D_GT1_IDS(MACRO__, ...) \ + MACRO__(0x0102, ## __VA_ARGS__), \ + MACRO__(0x010A, ## __VA_ARGS__) + +#define INTEL_SNB_D_GT2_IDS(MACRO__, ...) \ + MACRO__(0x0112, ## __VA_ARGS__), \ + MACRO__(0x0122, ## __VA_ARGS__) + +#define INTEL_SNB_D_IDS(MACRO__, ...) \ + INTEL_SNB_D_GT1_IDS(MACRO__, ## __VA_ARGS__), \ + INTEL_SNB_D_GT2_IDS(MACRO__, ## __VA_ARGS__) + +#define INTEL_SNB_M_GT1_IDS(MACRO__, ...) \ + MACRO__(0x0106, ## __VA_ARGS__) + +#define INTEL_SNB_M_GT2_IDS(MACRO__, ...) \ + MACRO__(0x0116, ## __VA_ARGS__), \ + MACRO__(0x0126, ## __VA_ARGS__) + +#define INTEL_SNB_M_IDS(MACRO__, ...) \ + INTEL_SNB_M_GT1_IDS(MACRO__, ## __VA_ARGS__), \ + INTEL_SNB_M_GT2_IDS(MACRO__, ## __VA_ARGS__) + +#define INTEL_SNB_IDS(MACRO__, ...) \ + INTEL_SNB_D_IDS(MACRO__, ## __VA_ARGS__), \ + INTEL_SNB_M_IDS(MACRO__, ## __VA_ARGS__) + +#define INTEL_IVB_M_GT1_IDS(MACRO__, ...) \ + MACRO__(0x0156, ## __VA_ARGS__) /* GT1 mobile */ + +#define INTEL_IVB_M_GT2_IDS(MACRO__, ...) \ + MACRO__(0x0166, ## __VA_ARGS__) /* GT2 mobile */ + +#define INTEL_IVB_M_IDS(MACRO__, ...) \ + INTEL_IVB_M_GT1_IDS(MACRO__, ## __VA_ARGS__), \ + INTEL_IVB_M_GT2_IDS(MACRO__, ## __VA_ARGS__) + +#define INTEL_IVB_D_GT1_IDS(MACRO__, ...) \ + MACRO__(0x0152, ## __VA_ARGS__), /* GT1 desktop */ \ + MACRO__(0x015a, ## __VA_ARGS__) /* GT1 server */ + +#define INTEL_IVB_D_GT2_IDS(MACRO__, ...) \ + MACRO__(0x0162, ## __VA_ARGS__), /* GT2 desktop */ \ + MACRO__(0x016a, ## __VA_ARGS__) /* GT2 server */ + +#define INTEL_IVB_D_IDS(MACRO__, ...) \ + INTEL_IVB_D_GT1_IDS(MACRO__, ## __VA_ARGS__), \ + INTEL_IVB_D_GT2_IDS(MACRO__, ## __VA_ARGS__) + +#define INTEL_IVB_IDS(MACRO__, ...) \ + INTEL_IVB_M_IDS(MACRO__, ## __VA_ARGS__), \ + INTEL_IVB_D_IDS(MACRO__, ## __VA_ARGS__) + +#define INTEL_IVB_Q_IDS(MACRO__, ...) \ + INTEL_QUANTA_VGA_DEVICE(__VA_ARGS__) /* Quanta transcode */ + +#define INTEL_HSW_ULT_GT1_IDS(MACRO__, ...) \ + MACRO__(0x0A02, ## __VA_ARGS__), /* ULT GT1 desktop */ \ + MACRO__(0x0A06, ## __VA_ARGS__), /* ULT GT1 mobile */ \ + MACRO__(0x0A0A, ## __VA_ARGS__), /* ULT GT1 server */ \ + MACRO__(0x0A0B, ## __VA_ARGS__) /* ULT GT1 reserved */ + +#define INTEL_HSW_ULX_GT1_IDS(MACRO__, ...) \ + MACRO__(0x0A0E, ## __VA_ARGS__) /* ULX GT1 mobile */ + +#define INTEL_HSW_GT1_IDS(MACRO__, ...) \ + INTEL_HSW_ULT_GT1_IDS(MACRO__, ## __VA_ARGS__), \ + INTEL_HSW_ULX_GT1_IDS(MACRO__, ## __VA_ARGS__), \ + MACRO__(0x0402, ## __VA_ARGS__), /* GT1 desktop */ \ + MACRO__(0x0406, ## __VA_ARGS__), /* GT1 mobile */ \ + MACRO__(0x040A, ## __VA_ARGS__), /* GT1 server */ \ + MACRO__(0x040B, ## __VA_ARGS__), /* GT1 reserved */ \ + MACRO__(0x040E, ## __VA_ARGS__), /* GT1 reserved */ \ + MACRO__(0x0C02, ## __VA_ARGS__), /* SDV GT1 desktop */ \ + MACRO__(0x0C06, ## __VA_ARGS__), /* SDV GT1 mobile */ \ + MACRO__(0x0C0A, ## __VA_ARGS__), /* SDV GT1 server */ \ + MACRO__(0x0C0B, ## __VA_ARGS__), /* SDV GT1 reserved */ \ + MACRO__(0x0C0E, ## __VA_ARGS__), /* SDV GT1 reserved */ \ + MACRO__(0x0D02, ## __VA_ARGS__), /* CRW GT1 desktop */ \ + MACRO__(0x0D06, ## __VA_ARGS__), /* CRW GT1 mobile */ \ + MACRO__(0x0D0A, ## __VA_ARGS__), /* CRW GT1 server */ \ + MACRO__(0x0D0B, ## __VA_ARGS__), /* CRW GT1 reserved */ \ + MACRO__(0x0D0E, ## __VA_ARGS__) /* CRW GT1 reserved */ + +#define INTEL_HSW_ULT_GT2_IDS(MACRO__, ...) \ + MACRO__(0x0A12, ## __VA_ARGS__), /* ULT GT2 desktop */ \ + MACRO__(0x0A16, ## __VA_ARGS__), /* ULT GT2 mobile */ \ + MACRO__(0x0A1A, ## __VA_ARGS__), /* ULT GT2 server */ \ + MACRO__(0x0A1B, ## __VA_ARGS__) /* ULT GT2 reserved */ \ + +#define INTEL_HSW_ULX_GT2_IDS(MACRO__, ...) \ + MACRO__(0x0A1E, ## __VA_ARGS__) /* ULX GT2 mobile */ \ + +#define INTEL_HSW_GT2_IDS(MACRO__, ...) \ + INTEL_HSW_ULT_GT2_IDS(MACRO__, ## __VA_ARGS__), \ + INTEL_HSW_ULX_GT2_IDS(MACRO__, ## __VA_ARGS__), \ + MACRO__(0x0412, ## __VA_ARGS__), /* GT2 desktop */ \ + MACRO__(0x0416, ## __VA_ARGS__), /* GT2 mobile */ \ + MACRO__(0x041A, ## __VA_ARGS__), /* GT2 server */ \ + MACRO__(0x041B, ## __VA_ARGS__), /* GT2 reserved */ \ + MACRO__(0x041E, ## __VA_ARGS__), /* GT2 reserved */ \ + MACRO__(0x0C12, ## __VA_ARGS__), /* SDV GT2 desktop */ \ + MACRO__(0x0C16, ## __VA_ARGS__), /* SDV GT2 mobile */ \ + MACRO__(0x0C1A, ## __VA_ARGS__), /* SDV GT2 server */ \ + MACRO__(0x0C1B, ## __VA_ARGS__), /* SDV GT2 reserved */ \ + MACRO__(0x0C1E, ## __VA_ARGS__), /* SDV GT2 reserved */ \ + MACRO__(0x0D12, ## __VA_ARGS__), /* CRW GT2 desktop */ \ + MACRO__(0x0D16, ## __VA_ARGS__), /* CRW GT2 mobile */ \ + MACRO__(0x0D1A, ## __VA_ARGS__), /* CRW GT2 server */ \ + MACRO__(0x0D1B, ## __VA_ARGS__), /* CRW GT2 reserved */ \ + MACRO__(0x0D1E, ## __VA_ARGS__) /* CRW GT2 reserved */ + +#define INTEL_HSW_ULT_GT3_IDS(MACRO__, ...) \ + MACRO__(0x0A22, ## __VA_ARGS__), /* ULT GT3 desktop */ \ + MACRO__(0x0A26, ## __VA_ARGS__), /* ULT GT3 mobile */ \ + MACRO__(0x0A2A, ## __VA_ARGS__), /* ULT GT3 server */ \ + MACRO__(0x0A2B, ## __VA_ARGS__), /* ULT GT3 reserved */ \ + MACRO__(0x0A2E, ## __VA_ARGS__) /* ULT GT3 reserved */ + +#define INTEL_HSW_GT3_IDS(MACRO__, ...) \ + INTEL_HSW_ULT_GT3_IDS(MACRO__, ## __VA_ARGS__), \ + MACRO__(0x0422, ## __VA_ARGS__), /* GT3 desktop */ \ + MACRO__(0x0426, ## __VA_ARGS__), /* GT3 mobile */ \ + MACRO__(0x042A, ## __VA_ARGS__), /* GT3 server */ \ + MACRO__(0x042B, ## __VA_ARGS__), /* GT3 reserved */ \ + MACRO__(0x042E, ## __VA_ARGS__), /* GT3 reserved */ \ + MACRO__(0x0C22, ## __VA_ARGS__), /* SDV GT3 desktop */ \ + MACRO__(0x0C26, ## __VA_ARGS__), /* SDV GT3 mobile */ \ + MACRO__(0x0C2A, ## __VA_ARGS__), /* SDV GT3 server */ \ + MACRO__(0x0C2B, ## __VA_ARGS__), /* SDV GT3 reserved */ \ + MACRO__(0x0C2E, ## __VA_ARGS__), /* SDV GT3 reserved */ \ + MACRO__(0x0D22, ## __VA_ARGS__), /* CRW GT3 desktop */ \ + MACRO__(0x0D26, ## __VA_ARGS__), /* CRW GT3 mobile */ \ + MACRO__(0x0D2A, ## __VA_ARGS__), /* CRW GT3 server */ \ + MACRO__(0x0D2B, ## __VA_ARGS__), /* CRW GT3 reserved */ \ + MACRO__(0x0D2E, ## __VA_ARGS__) /* CRW GT3 reserved */ + +#define INTEL_HSW_IDS(MACRO__, ...) \ + INTEL_HSW_GT1_IDS(MACRO__, ## __VA_ARGS__), \ + INTEL_HSW_GT2_IDS(MACRO__, ## __VA_ARGS__), \ + INTEL_HSW_GT3_IDS(MACRO__, ## __VA_ARGS__) + +#define INTEL_VLV_IDS(MACRO__, ...) \ + MACRO__(0x0f30, ## __VA_ARGS__), \ + MACRO__(0x0f31, ## __VA_ARGS__), \ + MACRO__(0x0f32, ## __VA_ARGS__), \ + MACRO__(0x0f33, ## __VA_ARGS__) + +#define INTEL_BDW_ULT_GT1_IDS(MACRO__, ...) \ + MACRO__(0x1606, ## __VA_ARGS__), /* GT1 ULT */ \ + MACRO__(0x160B, ## __VA_ARGS__) /* GT1 Iris */ + +#define INTEL_BDW_ULX_GT1_IDS(MACRO__, ...) \ + MACRO__(0x160E, ## __VA_ARGS__) /* GT1 ULX */ + +#define INTEL_BDW_GT1_IDS(MACRO__, ...) \ + INTEL_BDW_ULT_GT1_IDS(MACRO__, ## __VA_ARGS__), \ + INTEL_BDW_ULX_GT1_IDS(MACRO__, ## __VA_ARGS__), \ + MACRO__(0x1602, ## __VA_ARGS__), /* GT1 ULT */ \ + MACRO__(0x160A, ## __VA_ARGS__), /* GT1 Server */ \ + MACRO__(0x160D, ## __VA_ARGS__) /* GT1 Workstation */ + +#define INTEL_BDW_ULT_GT2_IDS(MACRO__, ...) \ + MACRO__(0x1616, ## __VA_ARGS__), /* GT2 ULT */ \ + MACRO__(0x161B, ## __VA_ARGS__) /* GT2 ULT */ + +#define INTEL_BDW_ULX_GT2_IDS(MACRO__, ...) \ + MACRO__(0x161E, ## __VA_ARGS__) /* GT2 ULX */ + +#define INTEL_BDW_GT2_IDS(MACRO__, ...) \ + INTEL_BDW_ULT_GT2_IDS(MACRO__, ## __VA_ARGS__), \ + INTEL_BDW_ULX_GT2_IDS(MACRO__, ## __VA_ARGS__), \ + MACRO__(0x1612, ## __VA_ARGS__), /* GT2 Halo */ \ + MACRO__(0x161A, ## __VA_ARGS__), /* GT2 Server */ \ + MACRO__(0x161D, ## __VA_ARGS__) /* GT2 Workstation */ + +#define INTEL_BDW_ULT_GT3_IDS(MACRO__, ...) \ + MACRO__(0x1626, ## __VA_ARGS__), /* ULT */ \ + MACRO__(0x162B, ## __VA_ARGS__) /* Iris */ \ + +#define INTEL_BDW_ULX_GT3_IDS(MACRO__, ...) \ + MACRO__(0x162E, ## __VA_ARGS__) /* ULX */ + +#define INTEL_BDW_GT3_IDS(MACRO__, ...) \ + INTEL_BDW_ULT_GT3_IDS(MACRO__, ## __VA_ARGS__), \ + INTEL_BDW_ULX_GT3_IDS(MACRO__, ## __VA_ARGS__), \ + MACRO__(0x1622, ## __VA_ARGS__), /* ULT */ \ + MACRO__(0x162A, ## __VA_ARGS__), /* Server */ \ + MACRO__(0x162D, ## __VA_ARGS__) /* Workstation */ + +#define INTEL_BDW_ULT_RSVD_IDS(MACRO__, ...) \ + MACRO__(0x1636, ## __VA_ARGS__), /* ULT */ \ + MACRO__(0x163B, ## __VA_ARGS__) /* Iris */ + +#define INTEL_BDW_ULX_RSVD_IDS(MACRO__, ...) \ + MACRO__(0x163E, ## __VA_ARGS__) /* ULX */ + +#define INTEL_BDW_RSVD_IDS(MACRO__, ...) \ + INTEL_BDW_ULT_RSVD_IDS(MACRO__, ## __VA_ARGS__), \ + INTEL_BDW_ULX_RSVD_IDS(MACRO__, ## __VA_ARGS__), \ + MACRO__(0x1632, ## __VA_ARGS__), /* ULT */ \ + MACRO__(0x163A, ## __VA_ARGS__), /* Server */ \ + MACRO__(0x163D, ## __VA_ARGS__) /* Workstation */ + +#define INTEL_BDW_IDS(MACRO__, ...) \ + INTEL_BDW_GT1_IDS(MACRO__, ## __VA_ARGS__), \ + INTEL_BDW_GT2_IDS(MACRO__, ## __VA_ARGS__), \ + INTEL_BDW_GT3_IDS(MACRO__, ## __VA_ARGS__), \ + INTEL_BDW_RSVD_IDS(MACRO__, ## __VA_ARGS__) + +#define INTEL_CHV_IDS(MACRO__, ...) \ + MACRO__(0x22b0, ## __VA_ARGS__), \ + MACRO__(0x22b1, ## __VA_ARGS__), \ + MACRO__(0x22b2, ## __VA_ARGS__), \ + MACRO__(0x22b3, ## __VA_ARGS__) + +#define INTEL_SKL_ULT_GT1_IDS(MACRO__, ...) \ + MACRO__(0x1906, ## __VA_ARGS__), /* ULT GT1 */ \ + MACRO__(0x1913, ## __VA_ARGS__) /* ULT GT1.5 */ + +#define INTEL_SKL_ULX_GT1_IDS(MACRO__, ...) \ + MACRO__(0x190E, ## __VA_ARGS__), /* ULX GT1 */ \ + MACRO__(0x1915, ## __VA_ARGS__) /* ULX GT1.5 */ + +#define INTEL_SKL_GT1_IDS(MACRO__, ...) \ + INTEL_SKL_ULT_GT1_IDS(MACRO__, ## __VA_ARGS__), \ + INTEL_SKL_ULX_GT1_IDS(MACRO__, ## __VA_ARGS__), \ + MACRO__(0x1902, ## __VA_ARGS__), /* DT GT1 */ \ + MACRO__(0x190A, ## __VA_ARGS__), /* SRV GT1 */ \ + MACRO__(0x190B, ## __VA_ARGS__), /* Halo GT1 */ \ + MACRO__(0x1917, ## __VA_ARGS__) /* DT GT1.5 */ + +#define INTEL_SKL_ULT_GT2_IDS(MACRO__, ...) \ + MACRO__(0x1916, ## __VA_ARGS__), /* ULT GT2 */ \ + MACRO__(0x1921, ## __VA_ARGS__) /* ULT GT2F */ + +#define INTEL_SKL_ULX_GT2_IDS(MACRO__, ...) \ + MACRO__(0x191E, ## __VA_ARGS__) /* ULX GT2 */ + +#define INTEL_SKL_GT2_IDS(MACRO__, ...) \ + INTEL_SKL_ULT_GT2_IDS(MACRO__, ## __VA_ARGS__), \ + INTEL_SKL_ULX_GT2_IDS(MACRO__, ## __VA_ARGS__), \ + MACRO__(0x1912, ## __VA_ARGS__), /* DT GT2 */ \ + MACRO__(0x191A, ## __VA_ARGS__), /* SRV GT2 */ \ + MACRO__(0x191B, ## __VA_ARGS__), /* Halo GT2 */ \ + MACRO__(0x191D, ## __VA_ARGS__) /* WKS GT2 */ + +#define INTEL_SKL_ULT_GT3_IDS(MACRO__, ...) \ + MACRO__(0x1923, ## __VA_ARGS__), /* ULT GT3 */ \ + MACRO__(0x1926, ## __VA_ARGS__), /* ULT GT3e */ \ + MACRO__(0x1927, ## __VA_ARGS__) /* ULT GT3e */ + +#define INTEL_SKL_GT3_IDS(MACRO__, ...) \ + INTEL_SKL_ULT_GT3_IDS(MACRO__, ## __VA_ARGS__), \ + MACRO__(0x192A, ## __VA_ARGS__), /* SRV GT3 */ \ + MACRO__(0x192B, ## __VA_ARGS__), /* Halo GT3e */ \ + MACRO__(0x192D, ## __VA_ARGS__) /* SRV GT3e */ + +#define INTEL_SKL_GT4_IDS(MACRO__, ...) \ + MACRO__(0x1932, ## __VA_ARGS__), /* DT GT4 */ \ + MACRO__(0x193A, ## __VA_ARGS__), /* SRV GT4e */ \ + MACRO__(0x193B, ## __VA_ARGS__), /* Halo GT4e */ \ + MACRO__(0x193D, ## __VA_ARGS__) /* WKS GT4e */ + +#define INTEL_SKL_IDS(MACRO__, ...) \ + INTEL_SKL_GT1_IDS(MACRO__, ## __VA_ARGS__), \ + INTEL_SKL_GT2_IDS(MACRO__, ## __VA_ARGS__), \ + INTEL_SKL_GT3_IDS(MACRO__, ## __VA_ARGS__), \ + INTEL_SKL_GT4_IDS(MACRO__, ## __VA_ARGS__) + +#define INTEL_BXT_IDS(MACRO__, ...) \ + MACRO__(0x0A84, ## __VA_ARGS__), \ + MACRO__(0x1A84, ## __VA_ARGS__), \ + MACRO__(0x1A85, ## __VA_ARGS__), \ + MACRO__(0x5A84, ## __VA_ARGS__), /* APL HD Graphics 505 */ \ + MACRO__(0x5A85, ## __VA_ARGS__) /* APL HD Graphics 500 */ + +#define INTEL_GLK_IDS(MACRO__, ...) \ + MACRO__(0x3184, ## __VA_ARGS__), \ + MACRO__(0x3185, ## __VA_ARGS__) + +#define INTEL_KBL_ULT_GT1_IDS(MACRO__, ...) \ + MACRO__(0x5906, ## __VA_ARGS__), /* ULT GT1 */ \ + MACRO__(0x5913, ## __VA_ARGS__) /* ULT GT1.5 */ + +#define INTEL_KBL_ULX_GT1_IDS(MACRO__, ...) \ + MACRO__(0x590E, ## __VA_ARGS__), /* ULX GT1 */ \ + MACRO__(0x5915, ## __VA_ARGS__) /* ULX GT1.5 */ + +#define INTEL_KBL_GT1_IDS(MACRO__, ...) \ + INTEL_KBL_ULT_GT1_IDS(MACRO__, ## __VA_ARGS__), \ + INTEL_KBL_ULX_GT1_IDS(MACRO__, ## __VA_ARGS__), \ + MACRO__(0x5902, ## __VA_ARGS__), /* DT GT1 */ \ + MACRO__(0x5908, ## __VA_ARGS__), /* Halo GT1 */ \ + MACRO__(0x590A, ## __VA_ARGS__), /* SRV GT1 */ \ + MACRO__(0x590B, ## __VA_ARGS__) /* Halo GT1 */ + +#define INTEL_KBL_ULT_GT2_IDS(MACRO__, ...) \ + MACRO__(0x5916, ## __VA_ARGS__), /* ULT GT2 */ \ + MACRO__(0x5921, ## __VA_ARGS__) /* ULT GT2F */ + +#define INTEL_KBL_ULX_GT2_IDS(MACRO__, ...) \ + MACRO__(0x591E, ## __VA_ARGS__) /* ULX GT2 */ + +#define INTEL_KBL_GT2_IDS(MACRO__, ...) \ + INTEL_KBL_ULT_GT2_IDS(MACRO__, ## __VA_ARGS__), \ + INTEL_KBL_ULX_GT2_IDS(MACRO__, ## __VA_ARGS__), \ + MACRO__(0x5912, ## __VA_ARGS__), /* DT GT2 */ \ + MACRO__(0x5917, ## __VA_ARGS__), /* Mobile GT2 */ \ + MACRO__(0x591A, ## __VA_ARGS__), /* SRV GT2 */ \ + MACRO__(0x591B, ## __VA_ARGS__), /* Halo GT2 */ \ + MACRO__(0x591D, ## __VA_ARGS__) /* WKS GT2 */ + +#define INTEL_KBL_ULT_GT3_IDS(MACRO__, ...) \ + MACRO__(0x5926, ## __VA_ARGS__) /* ULT GT3 */ + +#define INTEL_KBL_GT3_IDS(MACRO__, ...) \ + INTEL_KBL_ULT_GT3_IDS(MACRO__, ## __VA_ARGS__), \ + MACRO__(0x5923, ## __VA_ARGS__), /* ULT GT3 */ \ + MACRO__(0x5927, ## __VA_ARGS__) /* ULT GT3 */ + +#define INTEL_KBL_GT4_IDS(MACRO__, ...) \ + MACRO__(0x593B, ## __VA_ARGS__) /* Halo GT4 */ + +/* AML/KBL Y GT2 */ +#define INTEL_AML_KBL_GT2_IDS(MACRO__, ...) \ + MACRO__(0x591C, ## __VA_ARGS__), /* ULX GT2 */ \ + MACRO__(0x87C0, ## __VA_ARGS__) /* ULX GT2 */ + +/* AML/CFL Y GT2 */ +#define INTEL_AML_CFL_GT2_IDS(MACRO__, ...) \ + MACRO__(0x87CA, ## __VA_ARGS__) + +/* CML GT1 */ +#define INTEL_CML_GT1_IDS(MACRO__, ...) \ + MACRO__(0x9BA2, ## __VA_ARGS__), \ + MACRO__(0x9BA4, ## __VA_ARGS__), \ + MACRO__(0x9BA5, ## __VA_ARGS__), \ + MACRO__(0x9BA8, ## __VA_ARGS__) + +#define INTEL_CML_U_GT1_IDS(MACRO__, ...) \ + MACRO__(0x9B21, ## __VA_ARGS__), \ + MACRO__(0x9BAA, ## __VA_ARGS__), \ + MACRO__(0x9BAC, ## __VA_ARGS__) + +/* CML GT2 */ +#define INTEL_CML_GT2_IDS(MACRO__, ...) \ + MACRO__(0x9BC2, ## __VA_ARGS__), \ + MACRO__(0x9BC4, ## __VA_ARGS__), \ + MACRO__(0x9BC5, ## __VA_ARGS__), \ + MACRO__(0x9BC6, ## __VA_ARGS__), \ + MACRO__(0x9BC8, ## __VA_ARGS__), \ + MACRO__(0x9BE6, ## __VA_ARGS__), \ + MACRO__(0x9BF6, ## __VA_ARGS__) + +#define INTEL_CML_U_GT2_IDS(MACRO__, ...) \ + MACRO__(0x9B41, ## __VA_ARGS__), \ + MACRO__(0x9BCA, ## __VA_ARGS__), \ + MACRO__(0x9BCC, ## __VA_ARGS__) + +#define INTEL_CML_IDS(MACRO__, ...) \ + INTEL_CML_GT1_IDS(MACRO__, ## __VA_ARGS__), \ + INTEL_CML_GT2_IDS(MACRO__, ## __VA_ARGS__), \ + INTEL_CML_U_GT1_IDS(MACRO__, ## __VA_ARGS__), \ + INTEL_CML_U_GT2_IDS(MACRO__, ## __VA_ARGS__) + +#define INTEL_KBL_IDS(MACRO__, ...) \ + INTEL_KBL_GT1_IDS(MACRO__, ## __VA_ARGS__), \ + INTEL_KBL_GT2_IDS(MACRO__, ## __VA_ARGS__), \ + INTEL_KBL_GT3_IDS(MACRO__, ## __VA_ARGS__), \ + INTEL_KBL_GT4_IDS(MACRO__, ## __VA_ARGS__), \ + INTEL_AML_KBL_GT2_IDS(MACRO__, ## __VA_ARGS__) + +/* CFL S */ +#define INTEL_CFL_S_GT1_IDS(MACRO__, ...) \ + MACRO__(0x3E90, ## __VA_ARGS__), /* SRV GT1 */ \ + MACRO__(0x3E93, ## __VA_ARGS__), /* SRV GT1 */ \ + MACRO__(0x3E99, ## __VA_ARGS__) /* SRV GT1 */ + +#define INTEL_CFL_S_GT2_IDS(MACRO__, ...) \ + MACRO__(0x3E91, ## __VA_ARGS__), /* SRV GT2 */ \ + MACRO__(0x3E92, ## __VA_ARGS__), /* SRV GT2 */ \ + MACRO__(0x3E96, ## __VA_ARGS__), /* SRV GT2 */ \ + MACRO__(0x3E98, ## __VA_ARGS__), /* SRV GT2 */ \ + MACRO__(0x3E9A, ## __VA_ARGS__) /* SRV GT2 */ + +/* CFL H */ +#define INTEL_CFL_H_GT1_IDS(MACRO__, ...) \ + MACRO__(0x3E9C, ## __VA_ARGS__) + +#define INTEL_CFL_H_GT2_IDS(MACRO__, ...) \ + MACRO__(0x3E94, ## __VA_ARGS__), /* Halo GT2 */ \ + MACRO__(0x3E9B, ## __VA_ARGS__) /* Halo GT2 */ + +/* CFL U GT2 */ +#define INTEL_CFL_U_GT2_IDS(MACRO__, ...) \ + MACRO__(0x3EA9, ## __VA_ARGS__) + +/* CFL U GT3 */ +#define INTEL_CFL_U_GT3_IDS(MACRO__, ...) \ + MACRO__(0x3EA5, ## __VA_ARGS__), /* ULT GT3 */ \ + MACRO__(0x3EA6, ## __VA_ARGS__), /* ULT GT3 */ \ + MACRO__(0x3EA7, ## __VA_ARGS__), /* ULT GT3 */ \ + MACRO__(0x3EA8, ## __VA_ARGS__) /* ULT GT3 */ + +#define INTEL_CFL_IDS(MACRO__, ...) \ + INTEL_CFL_S_GT1_IDS(MACRO__, ## __VA_ARGS__), \ + INTEL_CFL_S_GT2_IDS(MACRO__, ## __VA_ARGS__), \ + INTEL_CFL_H_GT1_IDS(MACRO__, ## __VA_ARGS__), \ + INTEL_CFL_H_GT2_IDS(MACRO__, ## __VA_ARGS__), \ + INTEL_CFL_U_GT2_IDS(MACRO__, ## __VA_ARGS__), \ + INTEL_CFL_U_GT3_IDS(MACRO__, ## __VA_ARGS__), \ + INTEL_AML_CFL_GT2_IDS(MACRO__, ## __VA_ARGS__) + +/* WHL/CFL U GT1 */ +#define INTEL_WHL_U_GT1_IDS(MACRO__, ...) \ + MACRO__(0x3EA1, ## __VA_ARGS__), \ + MACRO__(0x3EA4, ## __VA_ARGS__) + +/* WHL/CFL U GT2 */ +#define INTEL_WHL_U_GT2_IDS(MACRO__, ...) \ + MACRO__(0x3EA0, ## __VA_ARGS__), \ + MACRO__(0x3EA3, ## __VA_ARGS__) + +/* WHL/CFL U GT3 */ +#define INTEL_WHL_U_GT3_IDS(MACRO__, ...) \ + MACRO__(0x3EA2, ## __VA_ARGS__) + +#define INTEL_WHL_IDS(MACRO__, ...) \ + INTEL_WHL_U_GT1_IDS(MACRO__, ## __VA_ARGS__), \ + INTEL_WHL_U_GT2_IDS(MACRO__, ## __VA_ARGS__), \ + INTEL_WHL_U_GT3_IDS(MACRO__, ## __VA_ARGS__) + +/* CNL */ +#define INTEL_CNL_PORT_F_IDS(MACRO__, ...) \ + MACRO__(0x5A44, ## __VA_ARGS__), \ + MACRO__(0x5A4C, ## __VA_ARGS__), \ + MACRO__(0x5A54, ## __VA_ARGS__), \ + MACRO__(0x5A5C, ## __VA_ARGS__) + +#define INTEL_CNL_IDS(MACRO__, ...) \ + INTEL_CNL_PORT_F_IDS(MACRO__, ## __VA_ARGS__), \ + MACRO__(0x5A40, ## __VA_ARGS__), \ + MACRO__(0x5A41, ## __VA_ARGS__), \ + MACRO__(0x5A42, ## __VA_ARGS__), \ + MACRO__(0x5A49, ## __VA_ARGS__), \ + MACRO__(0x5A4A, ## __VA_ARGS__), \ + MACRO__(0x5A50, ## __VA_ARGS__), \ + MACRO__(0x5A51, ## __VA_ARGS__), \ + MACRO__(0x5A52, ## __VA_ARGS__), \ + MACRO__(0x5A59, ## __VA_ARGS__), \ + MACRO__(0x5A5A, ## __VA_ARGS__) + +/* ICL */ +#define INTEL_ICL_PORT_F_IDS(MACRO__, ...) \ + MACRO__(0x8A50, ## __VA_ARGS__), \ + MACRO__(0x8A52, ## __VA_ARGS__), \ + MACRO__(0x8A53, ## __VA_ARGS__), \ + MACRO__(0x8A54, ## __VA_ARGS__), \ + MACRO__(0x8A56, ## __VA_ARGS__), \ + MACRO__(0x8A57, ## __VA_ARGS__), \ + MACRO__(0x8A58, ## __VA_ARGS__), \ + MACRO__(0x8A59, ## __VA_ARGS__), \ + MACRO__(0x8A5A, ## __VA_ARGS__), \ + MACRO__(0x8A5B, ## __VA_ARGS__), \ + MACRO__(0x8A5C, ## __VA_ARGS__), \ + MACRO__(0x8A70, ## __VA_ARGS__), \ + MACRO__(0x8A71, ## __VA_ARGS__) + +#define INTEL_ICL_IDS(MACRO__, ...) \ + INTEL_ICL_PORT_F_IDS(MACRO__, ## __VA_ARGS__), \ + MACRO__(0x8A51, ## __VA_ARGS__), \ + MACRO__(0x8A5D, ## __VA_ARGS__) + +/* EHL */ +#define INTEL_EHL_IDS(MACRO__, ...) \ + MACRO__(0x4541, ## __VA_ARGS__), \ + MACRO__(0x4551, ## __VA_ARGS__), \ + MACRO__(0x4555, ## __VA_ARGS__), \ + MACRO__(0x4557, ## __VA_ARGS__), \ + MACRO__(0x4570, ## __VA_ARGS__), \ + MACRO__(0x4571, ## __VA_ARGS__) + +/* JSL */ +#define INTEL_JSL_IDS(MACRO__, ...) \ + MACRO__(0x4E51, ## __VA_ARGS__), \ + MACRO__(0x4E55, ## __VA_ARGS__), \ + MACRO__(0x4E57, ## __VA_ARGS__), \ + MACRO__(0x4E61, ## __VA_ARGS__), \ + MACRO__(0x4E71, ## __VA_ARGS__) + +/* TGL */ +#define INTEL_TGL_GT1_IDS(MACRO__, ...) \ + MACRO__(0x9A60, ## __VA_ARGS__), \ + MACRO__(0x9A68, ## __VA_ARGS__), \ + MACRO__(0x9A70, ## __VA_ARGS__) + +#define INTEL_TGL_GT2_IDS(MACRO__, ...) \ + MACRO__(0x9A40, ## __VA_ARGS__), \ + MACRO__(0x9A49, ## __VA_ARGS__), \ + MACRO__(0x9A59, ## __VA_ARGS__), \ + MACRO__(0x9A78, ## __VA_ARGS__), \ + MACRO__(0x9AC0, ## __VA_ARGS__), \ + MACRO__(0x9AC9, ## __VA_ARGS__), \ + MACRO__(0x9AD9, ## __VA_ARGS__), \ + MACRO__(0x9AF8, ## __VA_ARGS__) + +#define INTEL_TGL_IDS(MACRO__, ...) \ + INTEL_TGL_GT1_IDS(MACRO__, ## __VA_ARGS__), \ + INTEL_TGL_GT2_IDS(MACRO__, ## __VA_ARGS__) + +/* RKL */ +#define INTEL_RKL_IDS(MACRO__, ...) \ + MACRO__(0x4C80, ## __VA_ARGS__), \ + MACRO__(0x4C8A, ## __VA_ARGS__), \ + MACRO__(0x4C8B, ## __VA_ARGS__), \ + MACRO__(0x4C8C, ## __VA_ARGS__), \ + MACRO__(0x4C90, ## __VA_ARGS__), \ + MACRO__(0x4C9A, ## __VA_ARGS__) + +/* DG1 */ +#define INTEL_DG1_IDS(MACRO__, ...) \ + MACRO__(0x4905, ## __VA_ARGS__), \ + MACRO__(0x4906, ## __VA_ARGS__), \ + MACRO__(0x4907, ## __VA_ARGS__), \ + MACRO__(0x4908, ## __VA_ARGS__), \ + MACRO__(0x4909, ## __VA_ARGS__) + +/* ADL-S */ +#define INTEL_ADLS_IDS(MACRO__, ...) \ + MACRO__(0x4680, ## __VA_ARGS__), \ + MACRO__(0x4682, ## __VA_ARGS__), \ + MACRO__(0x4688, ## __VA_ARGS__), \ + MACRO__(0x468A, ## __VA_ARGS__), \ + MACRO__(0x468B, ## __VA_ARGS__), \ + MACRO__(0x4690, ## __VA_ARGS__), \ + MACRO__(0x4692, ## __VA_ARGS__), \ + MACRO__(0x4693, ## __VA_ARGS__) + +/* ADL-P */ +#define INTEL_ADLP_IDS(MACRO__, ...) \ + MACRO__(0x46A0, ## __VA_ARGS__), \ + MACRO__(0x46A1, ## __VA_ARGS__), \ + MACRO__(0x46A2, ## __VA_ARGS__), \ + MACRO__(0x46A3, ## __VA_ARGS__), \ + MACRO__(0x46A6, ## __VA_ARGS__), \ + MACRO__(0x46A8, ## __VA_ARGS__), \ + MACRO__(0x46AA, ## __VA_ARGS__), \ + MACRO__(0x462A, ## __VA_ARGS__), \ + MACRO__(0x4626, ## __VA_ARGS__), \ + MACRO__(0x4628, ## __VA_ARGS__), \ + MACRO__(0x46B0, ## __VA_ARGS__), \ + MACRO__(0x46B1, ## __VA_ARGS__), \ + MACRO__(0x46B2, ## __VA_ARGS__), \ + MACRO__(0x46B3, ## __VA_ARGS__), \ + MACRO__(0x46C0, ## __VA_ARGS__), \ + MACRO__(0x46C1, ## __VA_ARGS__), \ + MACRO__(0x46C2, ## __VA_ARGS__), \ + MACRO__(0x46C3, ## __VA_ARGS__) + +/* ADL-N */ +#define INTEL_ADLN_IDS(MACRO__, ...) \ + MACRO__(0x46D0, ## __VA_ARGS__), \ + MACRO__(0x46D1, ## __VA_ARGS__), \ + MACRO__(0x46D2, ## __VA_ARGS__), \ + MACRO__(0x46D3, ## __VA_ARGS__), \ + MACRO__(0x46D4, ## __VA_ARGS__) + +/* RPL-S */ +#define INTEL_RPLS_IDS(MACRO__, ...) \ + MACRO__(0xA780, ## __VA_ARGS__), \ + MACRO__(0xA781, ## __VA_ARGS__), \ + MACRO__(0xA782, ## __VA_ARGS__), \ + MACRO__(0xA783, ## __VA_ARGS__), \ + MACRO__(0xA788, ## __VA_ARGS__), \ + MACRO__(0xA789, ## __VA_ARGS__), \ + MACRO__(0xA78A, ## __VA_ARGS__), \ + MACRO__(0xA78B, ## __VA_ARGS__) + +/* RPL-U */ +#define INTEL_RPLU_IDS(MACRO__, ...) \ + MACRO__(0xA721, ## __VA_ARGS__), \ + MACRO__(0xA7A1, ## __VA_ARGS__), \ + MACRO__(0xA7A9, ## __VA_ARGS__), \ + MACRO__(0xA7AC, ## __VA_ARGS__), \ + MACRO__(0xA7AD, ## __VA_ARGS__) + +/* RPL-P */ +#define INTEL_RPLP_IDS(MACRO__, ...) \ + MACRO__(0xA720, ## __VA_ARGS__), \ + MACRO__(0xA7A0, ## __VA_ARGS__), \ + MACRO__(0xA7A8, ## __VA_ARGS__), \ + MACRO__(0xA7AA, ## __VA_ARGS__), \ + MACRO__(0xA7AB, ## __VA_ARGS__) + +/* DG2 */ +#define INTEL_DG2_G10_D_IDS(MACRO__, ...) \ + MACRO__(0x56A0, ## __VA_ARGS__), \ + MACRO__(0x56A1, ## __VA_ARGS__), \ + MACRO__(0x56A2, ## __VA_ARGS__) + +#define INTEL_DG2_G10_E_IDS(MACRO__, ...) \ + MACRO__(0x56BE, ## __VA_ARGS__), \ + MACRO__(0x56BF, ## __VA_ARGS__) + +#define INTEL_DG2_G10_M_IDS(MACRO__, ...) \ + MACRO__(0x5690, ## __VA_ARGS__), \ + MACRO__(0x5691, ## __VA_ARGS__), \ + MACRO__(0x5692, ## __VA_ARGS__) + +#define INTEL_DG2_G10_IDS(MACRO__, ...) \ + INTEL_DG2_G10_D_IDS(MACRO__, ## __VA_ARGS__), \ + INTEL_DG2_G10_E_IDS(MACRO__, ## __VA_ARGS__), \ + INTEL_DG2_G10_M_IDS(MACRO__, ## __VA_ARGS__) + +#define INTEL_DG2_G11_D_IDS(MACRO__, ...) \ + MACRO__(0x56A5, ## __VA_ARGS__), \ + MACRO__(0x56A6, ## __VA_ARGS__), \ + MACRO__(0x56B0, ## __VA_ARGS__), \ + MACRO__(0x56B1, ## __VA_ARGS__) + +#define INTEL_DG2_G11_E_IDS(MACRO__, ...) \ + MACRO__(0x56BA, ## __VA_ARGS__), \ + MACRO__(0x56BB, ## __VA_ARGS__), \ + MACRO__(0x56BC, ## __VA_ARGS__), \ + MACRO__(0x56BD, ## __VA_ARGS__) + +#define INTEL_DG2_G11_M_IDS(MACRO__, ...) \ + MACRO__(0x5693, ## __VA_ARGS__), \ + MACRO__(0x5694, ## __VA_ARGS__), \ + MACRO__(0x5695, ## __VA_ARGS__) + +#define INTEL_DG2_G11_IDS(MACRO__, ...) \ + INTEL_DG2_G11_D_IDS(MACRO__, ## __VA_ARGS__), \ + INTEL_DG2_G11_E_IDS(MACRO__, ## __VA_ARGS__), \ + INTEL_DG2_G11_M_IDS(MACRO__, ## __VA_ARGS__) + +#define INTEL_DG2_G12_D_IDS(MACRO__, ...) \ + MACRO__(0x56A3, ## __VA_ARGS__), \ + MACRO__(0x56A4, ## __VA_ARGS__), \ + MACRO__(0x56B2, ## __VA_ARGS__), \ + MACRO__(0x56B3, ## __VA_ARGS__) + +#define INTEL_DG2_G12_M_IDS(MACRO__, ...) \ + MACRO__(0x5696, ## __VA_ARGS__), \ + MACRO__(0x5697, ## __VA_ARGS__) + +#define INTEL_DG2_G12_IDS(MACRO__, ...) \ + INTEL_DG2_G12_D_IDS(MACRO__, ## __VA_ARGS__), \ + INTEL_DG2_G12_M_IDS(MACRO__, ## __VA_ARGS__) + +#define INTEL_DG2_D_IDS(MACRO__, ...) \ + INTEL_DG2_G10_D_IDS(MACRO__, ## __VA_ARGS__), \ + INTEL_DG2_G11_D_IDS(MACRO__, ## __VA_ARGS__), \ + INTEL_DG2_G12_D_IDS(MACRO__, ## __VA_ARGS__) + +#define INTEL_DG2_IDS(MACRO__, ...) \ + INTEL_DG2_G10_IDS(MACRO__, ## __VA_ARGS__), \ + INTEL_DG2_G11_IDS(MACRO__, ## __VA_ARGS__), \ + INTEL_DG2_G12_IDS(MACRO__, ## __VA_ARGS__) + +#define INTEL_ATS_M150_IDS(MACRO__, ...) \ + MACRO__(0x56C0, ## __VA_ARGS__), \ + MACRO__(0x56C2, ## __VA_ARGS__) + +#define INTEL_ATS_M75_IDS(MACRO__, ...) \ + MACRO__(0x56C1, ## __VA_ARGS__) + +#define INTEL_ATS_M_IDS(MACRO__, ...) \ + INTEL_ATS_M150_IDS(MACRO__, ## __VA_ARGS__), \ + INTEL_ATS_M75_IDS(MACRO__, ## __VA_ARGS__) + +/* ARL */ +#define INTEL_ARL_H_IDS(MACRO__, ...) \ + MACRO__(0x7D51, ## __VA_ARGS__), \ + MACRO__(0x7DD1, ## __VA_ARGS__) + +#define INTEL_ARL_U_IDS(MACRO__, ...) \ + MACRO__(0x7D41, ## __VA_ARGS__) \ + +#define INTEL_ARL_S_IDS(MACRO__, ...) \ + MACRO__(0x7D67, ## __VA_ARGS__), \ + MACRO__(0xB640, ## __VA_ARGS__) + +#define INTEL_ARL_IDS(MACRO__, ...) \ + INTEL_ARL_H_IDS(MACRO__, ## __VA_ARGS__), \ + INTEL_ARL_U_IDS(MACRO__, ## __VA_ARGS__), \ + INTEL_ARL_S_IDS(MACRO__, ## __VA_ARGS__) + +/* MTL */ +#define INTEL_MTL_U_IDS(MACRO__, ...) \ + MACRO__(0x7D40, ## __VA_ARGS__), \ + MACRO__(0x7D45, ## __VA_ARGS__) + +#define INTEL_MTL_IDS(MACRO__, ...) \ + INTEL_MTL_U_IDS(MACRO__, ## __VA_ARGS__), \ + MACRO__(0x7D55, ## __VA_ARGS__), \ + MACRO__(0x7D60, ## __VA_ARGS__), \ + MACRO__(0x7DD5, ## __VA_ARGS__) + +/* PVC */ +#define INTEL_PVC_IDS(MACRO__, ...) \ + MACRO__(0x0B69, ## __VA_ARGS__), \ + MACRO__(0x0B6E, ## __VA_ARGS__), \ + MACRO__(0x0BD4, ## __VA_ARGS__), \ + MACRO__(0x0BD5, ## __VA_ARGS__), \ + MACRO__(0x0BD6, ## __VA_ARGS__), \ + MACRO__(0x0BD7, ## __VA_ARGS__), \ + MACRO__(0x0BD8, ## __VA_ARGS__), \ + MACRO__(0x0BD9, ## __VA_ARGS__), \ + MACRO__(0x0BDA, ## __VA_ARGS__), \ + MACRO__(0x0BDB, ## __VA_ARGS__), \ + MACRO__(0x0BE0, ## __VA_ARGS__), \ + MACRO__(0x0BE1, ## __VA_ARGS__), \ + MACRO__(0x0BE5, ## __VA_ARGS__) + +/* LNL */ +#define INTEL_LNL_IDS(MACRO__, ...) \ + MACRO__(0x6420, ## __VA_ARGS__), \ + MACRO__(0x64A0, ## __VA_ARGS__), \ + MACRO__(0x64B0, ## __VA_ARGS__) + +/* BMG */ +#define INTEL_BMG_G21_IDS(MACRO__, ...) \ + MACRO__(0xE202, ## __VA_ARGS__), \ + MACRO__(0xE209, ## __VA_ARGS__), \ + MACRO__(0xE20B, ## __VA_ARGS__), \ + MACRO__(0xE20C, ## __VA_ARGS__), \ + MACRO__(0xE20D, ## __VA_ARGS__), \ + MACRO__(0xE210, ## __VA_ARGS__), \ + MACRO__(0xE211, ## __VA_ARGS__), \ + MACRO__(0xE212, ## __VA_ARGS__), \ + MACRO__(0xE216, ## __VA_ARGS__) + +#define INTEL_BMG_IDS(MACRO__, ...) \ + INTEL_BMG_G21_IDS(MACRO__, ## __VA_ARGS__), \ + MACRO__(0xE220, ## __VA_ARGS__), \ + MACRO__(0xE221, ## __VA_ARGS__), \ + MACRO__(0xE222, ## __VA_ARGS__), \ + MACRO__(0xE223, ## __VA_ARGS__) + +/* PTL */ +#define INTEL_PTL_IDS(MACRO__, ...) \ + MACRO__(0xB080, ## __VA_ARGS__), \ + MACRO__(0xB081, ## __VA_ARGS__), \ + MACRO__(0xB082, ## __VA_ARGS__), \ + MACRO__(0xB083, ## __VA_ARGS__), \ + MACRO__(0xB084, ## __VA_ARGS__), \ + MACRO__(0xB085, ## __VA_ARGS__), \ + MACRO__(0xB086, ## __VA_ARGS__), \ + MACRO__(0xB087, ## __VA_ARGS__), \ + MACRO__(0xB08F, ## __VA_ARGS__), \ + MACRO__(0xB090, ## __VA_ARGS__), \ + MACRO__(0xB0A0, ## __VA_ARGS__), \ + MACRO__(0xB0B0, ## __VA_ARGS__) + +/* WCL */ +#define INTEL_WCL_IDS(MACRO__, ...) \ + MACRO__(0xFD80, ## __VA_ARGS__), \ + MACRO__(0xFD81, ## __VA_ARGS__) + +/* NVL-S */ +#define INTEL_NVLS_IDS(MACRO__, ...) \ + MACRO__(0xD740, ## __VA_ARGS__), \ + MACRO__(0xD741, ## __VA_ARGS__), \ + MACRO__(0xD742, ## __VA_ARGS__), \ + MACRO__(0xD743, ## __VA_ARGS__), \ + MACRO__(0xD744, ## __VA_ARGS__), \ + MACRO__(0xD745, ## __VA_ARGS__) + +/* CRI */ +#define INTEL_CRI_IDS(MACRO__, ...) \ + MACRO__(0x674C, ## __VA_ARGS__) + +#endif /* __PCIIDS_H__ */ diff --git a/include/drm/intel/xe_sriov_vfio.h b/include/drm/intel/xe_sriov_vfio.h new file mode 100644 index 000000000000..e9814e8149fd --- /dev/null +++ b/include/drm/intel/xe_sriov_vfio.h @@ -0,0 +1,143 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2025 Intel Corporation + */ + +#ifndef _XE_SRIOV_VFIO_H_ +#define _XE_SRIOV_VFIO_H_ + +#include <linux/types.h> + +struct pci_dev; +struct xe_device; + +/** + * xe_sriov_vfio_get_pf() - Get PF &xe_device. + * @pdev: the VF &pci_dev device + * + * Return: pointer to PF &xe_device, NULL otherwise. + */ +struct xe_device *xe_sriov_vfio_get_pf(struct pci_dev *pdev); + +/** + * xe_sriov_vfio_migration_supported() - Check if migration is supported. + * @xe: the PF &xe_device obtained by calling xe_sriov_vfio_get_pf() + * + * Return: true if migration is supported, false otherwise. + */ +bool xe_sriov_vfio_migration_supported(struct xe_device *xe); + +/** + * xe_sriov_vfio_wait_flr_done() - Wait for VF FLR completion. + * @xe: the PF &xe_device obtained by calling xe_sriov_vfio_get_pf() + * @vfid: the VF identifier (can't be 0) + * + * This function will wait until VF FLR is processed by PF on all tiles (or + * until timeout occurs). + * + * Return: 0 on success or a negative error code on failure. + */ +int xe_sriov_vfio_wait_flr_done(struct xe_device *xe, unsigned int vfid); + +/** + * xe_sriov_vfio_suspend_device() - Suspend VF. + * @xe: the PF &xe_device obtained by calling xe_sriov_vfio_get_pf() + * @vfid: the VF identifier (can't be 0) + * + * This function will pause VF on all tiles/GTs. + * + * Return: 0 on success or a negative error code on failure. + */ +int xe_sriov_vfio_suspend_device(struct xe_device *xe, unsigned int vfid); + +/** + * xe_sriov_vfio_resume_device() - Resume VF. + * @xe: the PF &xe_device obtained by calling xe_sriov_vfio_get_pf() + * @vfid: the VF identifier (can't be 0) + * + * This function will resume VF on all tiles. + * + * Return: 0 on success or a negative error code on failure. + */ +int xe_sriov_vfio_resume_device(struct xe_device *xe, unsigned int vfid); + +/** + * xe_sriov_vfio_stop_copy_enter() - Initiate a VF device migration data save. + * @xe: the PF &xe_device obtained by calling xe_sriov_vfio_get_pf() + * @vfid: the VF identifier (can't be 0) + * + * Return: 0 on success or a negative error code on failure. + */ +int xe_sriov_vfio_stop_copy_enter(struct xe_device *xe, unsigned int vfid); + +/** + * xe_sriov_vfio_stop_copy_exit() - Finish a VF device migration data save. + * @xe: the PF &xe_device obtained by calling xe_sriov_vfio_get_pf() + * @vfid: the VF identifier (can't be 0) + * + * Return: 0 on success or a negative error code on failure. + */ +int xe_sriov_vfio_stop_copy_exit(struct xe_device *xe, unsigned int vfid); + +/** + * xe_sriov_vfio_resume_data_enter() - Initiate a VF device migration data restore. + * @xe: the PF &xe_device obtained by calling xe_sriov_vfio_get_pf() + * @vfid: the VF identifier (can't be 0) + * + * Return: 0 on success or a negative error code on failure. + */ +int xe_sriov_vfio_resume_data_enter(struct xe_device *xe, unsigned int vfid); + +/** + * xe_sriov_vfio_resume_data_exit() - Finish a VF device migration data restore. + * @xe: the PF &xe_device obtained by calling xe_sriov_vfio_get_pf() + * @vfid: the VF identifier (can't be 0) + * + * Return: 0 on success or a negative error code on failure. + */ +int xe_sriov_vfio_resume_data_exit(struct xe_device *xe, unsigned int vfid); + +/** + * xe_sriov_vfio_error() - Move VF device to error state. + * @xe: the PF &xe_device obtained by calling xe_sriov_vfio_get_pf() + * @vfid: the VF identifier (can't be 0) + * + * Reset is needed to move it out of error state. + * + * Return: 0 on success or a negative error code on failure. + */ +int xe_sriov_vfio_error(struct xe_device *xe, unsigned int vfid); + +/** + * xe_sriov_vfio_data_read() - Read migration data from the VF device. + * @xe: the PF &xe_device obtained by calling xe_sriov_vfio_get_pf() + * @vfid: the VF identifier (can't be 0) + * @buf: start address of userspace buffer + * @len: requested read size from userspace + * + * Return: number of bytes that has been successfully read, + * 0 if no more migration data is available, -errno on failure. + */ +ssize_t xe_sriov_vfio_data_read(struct xe_device *xe, unsigned int vfid, + char __user *buf, size_t len); +/** + * xe_sriov_vfio_data_write() - Write migration data to the VF device. + * @xe: the PF &xe_device obtained by calling xe_sriov_vfio_get_pf() + * @vfid: the VF identifier (can't be 0) + * @buf: start address of userspace buffer + * @len: requested write size from userspace + * + * Return: number of bytes that has been successfully written, -errno on failure. + */ +ssize_t xe_sriov_vfio_data_write(struct xe_device *xe, unsigned int vfid, + const char __user *buf, size_t len); +/** + * xe_sriov_vfio_stop_copy_size() - Get a size estimate of VF device migration data. + * @xe: the PF &xe_device obtained by calling xe_sriov_vfio_get_pf() + * @vfid: the VF identifier (can't be 0) + * + * Return: migration data size in bytes or a negative error code on failure. + */ +ssize_t xe_sriov_vfio_stop_copy_size(struct xe_device *xe, unsigned int vfid); + +#endif diff --git a/include/drm/spsc_queue.h b/include/drm/spsc_queue.h index 125f096c88cb..ee9df8cc67b7 100644 --- a/include/drm/spsc_queue.h +++ b/include/drm/spsc_queue.h @@ -70,9 +70,11 @@ static inline bool spsc_queue_push(struct spsc_queue *queue, struct spsc_node *n preempt_disable(); + atomic_inc(&queue->job_count); + smp_mb__after_atomic(); + tail = (struct spsc_node **)atomic_long_xchg(&queue->tail, (long)&node->next); WRITE_ONCE(*tail, node); - atomic_inc(&queue->job_count); /* * In case of first element verify new node will be visible to the consumer diff --git a/include/drm/task_barrier.h b/include/drm/task_barrier.h index 087e3f649c52..f6e6ed529681 100644 --- a/include/drm/task_barrier.h +++ b/include/drm/task_barrier.h @@ -24,8 +24,8 @@ #include <linux/atomic.h> /* - * Reusable 2 PHASE task barrier (randevouz point) implementation for N tasks. - * Based on the Little book of sempahores - https://greenteapress.com/wp/semaphores/ + * Reusable 2 PHASE task barrier (rendez-vous point) implementation for N tasks. + * Based on the Little book of semaphores - https://greenteapress.com/wp/semaphores/ */ diff --git a/include/drm/ttm/ttm_allocation.h b/include/drm/ttm/ttm_allocation.h new file mode 100644 index 000000000000..655d1e44aba7 --- /dev/null +++ b/include/drm/ttm/ttm_allocation.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 OR MIT */ +/* Copyright (c) 2025 Valve Corporation */ + +#ifndef _TTM_ALLOCATION_H_ +#define _TTM_ALLOCATION_H_ + +#define TTM_ALLOCATION_POOL_BENEFICIAL_ORDER(n) ((n) & 0xff) /* Max order which caller can benefit from */ +#define TTM_ALLOCATION_POOL_USE_DMA_ALLOC BIT(8) /* Use coherent DMA allocations. */ +#define TTM_ALLOCATION_POOL_USE_DMA32 BIT(9) /* Use GFP_DMA32 allocations. */ +#define TTM_ALLOCATION_PROPAGATE_ENOSPC BIT(10) /* Do not convert ENOSPC from resource managers to ENOMEM. */ + +#endif diff --git a/include/drm/ttm/ttm_backup.h b/include/drm/ttm/ttm_backup.h new file mode 100644 index 000000000000..c33cba111171 --- /dev/null +++ b/include/drm/ttm/ttm_backup.h @@ -0,0 +1,72 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2024 Intel Corporation + */ + +#ifndef _TTM_BACKUP_H_ +#define _TTM_BACKUP_H_ + +#include <linux/mm_types.h> +#include <linux/shmem_fs.h> + +/** + * ttm_backup_handle_to_page_ptr() - Convert handle to struct page pointer + * @handle: The handle to convert. + * + * Converts an opaque handle received from the + * ttm_backup_backup_page() function to an (invalid) + * struct page pointer suitable for a struct page array. + * + * Return: An (invalid) struct page pointer. + */ +static inline struct page * +ttm_backup_handle_to_page_ptr(unsigned long handle) +{ + return (struct page *)(handle << 1 | 1); +} + +/** + * ttm_backup_page_ptr_is_handle() - Whether a struct page pointer is a handle + * @page: The struct page pointer to check. + * + * Return: true if the struct page pointer is a handld returned from + * ttm_backup_handle_to_page_ptr(). False otherwise. + */ +static inline bool ttm_backup_page_ptr_is_handle(const struct page *page) +{ + return (unsigned long)page & 1; +} + +/** + * ttm_backup_page_ptr_to_handle() - Convert a struct page pointer to a handle + * @page: The struct page pointer to convert + * + * Return: The handle that was previously used in + * ttm_backup_handle_to_page_ptr() to obtain a struct page pointer, suitable + * for use as argument in the struct ttm_backup_drop() or + * ttm_backup_copy_page() functions. + */ +static inline unsigned long +ttm_backup_page_ptr_to_handle(const struct page *page) +{ + WARN_ON(!ttm_backup_page_ptr_is_handle(page)); + return (unsigned long)page >> 1; +} + +void ttm_backup_drop(struct file *backup, pgoff_t handle); + +int ttm_backup_copy_page(struct file *backup, struct page *dst, + pgoff_t handle, bool intr); + +s64 +ttm_backup_backup_page(struct file *backup, struct page *page, + bool writeback, pgoff_t idx, gfp_t page_gfp, + gfp_t alloc_gfp); + +void ttm_backup_fini(struct file *backup); + +u64 ttm_backup_bytes_avail(void); + +struct file *ttm_backup_shmem_create(loff_t size); + +#endif diff --git a/include/drm/ttm/ttm_bo.h b/include/drm/ttm/ttm_bo.h index 8b113c384236..bca3a8849d47 100644 --- a/include/drm/ttm/ttm_bo.h +++ b/include/drm/ttm/ttm_bo.h @@ -83,6 +83,9 @@ enum ttm_bo_type { * @resource: structure describing current placement. * @ttm: TTM structure holding system pages. * @deleted: True if the object is only a zombie and already deleted. + * @bulk_move: The bulk move object. + * @priority: Priority for LRU, BOs with lower priority are evicted first. + * @pin_count: Pin count. * * Base class for TTM buffer object, that deals with data placement and CPU * mappings. GPU mappings are really up to the driver, but for simpler GPUs @@ -128,26 +131,27 @@ struct ttm_buffer_object { struct work_struct delayed_delete; /** - * Special members that are protected by the reserve lock - * and the bo::lock when written to. Can be read with - * either of these locks held. + * @sg: external source of pages and DMA addresses, protected by the + * reservation lock. */ struct sg_table *sg; }; +#define TTM_BO_MAP_IOMEM_MASK 0x80 + /** * struct ttm_bo_kmap_obj * * @virtual: The current kernel virtual address. * @page: The page when kmap'ing a single page. * @bo_kmap_type: Type of bo_kmap. + * @bo: The TTM BO. * * Object describing a kernel mapping. Since a TTM bo may be located * in various memory types with various caching policies, the * mapping can either be an ioremap, a vmap, a kmap or part of a * premapped region. */ -#define TTM_BO_MAP_IOMEM_MASK 0x80 struct ttm_bo_kmap_obj { void *virtual; struct page *page; @@ -168,9 +172,9 @@ struct ttm_bo_kmap_obj { * @gfp_retry_mayfail: Set the __GFP_RETRY_MAYFAIL when allocation pages. * @allow_res_evict: Allow eviction of reserved BOs. Can be used when multiple * BOs share the same reservation object. - * @force_alloc: Don't check the memory account during suspend or CPU page * faults. Should only be used by TTM internally. * @resv: Reservation object to allow reserved evictions with. + * @bytes_moved: Statistics on how many bytes have been moved. * * Context for TTM operations like changing buffer placement or general memory * allocation. @@ -180,38 +184,73 @@ struct ttm_operation_ctx { bool no_wait_gpu; bool gfp_retry_mayfail; bool allow_res_evict; - bool force_alloc; struct dma_resv *resv; uint64_t bytes_moved; }; +struct ttm_lru_walk; + +/** struct ttm_lru_walk_ops - Operations for a LRU walk. */ +struct ttm_lru_walk_ops { + /** + * process_bo - Process this bo. + * @walk: struct ttm_lru_walk describing the walk. + * @bo: A locked and referenced buffer object. + * + * Return: Negative error code on error, User-defined positive value + * (typically, but not always, size of the processed bo) on success. + * On success, the returned values are summed by the walk and the + * walk exits when its target is met. + * 0 also indicates success, -EBUSY means this bo was skipped. + */ + s64 (*process_bo)(struct ttm_lru_walk *walk, struct ttm_buffer_object *bo); +}; + /** - * ttm_bo_get - reference a struct ttm_buffer_object - * - * @bo: The buffer object. + * struct ttm_lru_walk_arg - Common part for the variants of BO LRU walk. */ -static inline void ttm_bo_get(struct ttm_buffer_object *bo) -{ - kref_get(&bo->kref); -} +struct ttm_lru_walk_arg { + /** @ctx: Pointer to the struct ttm_operation_ctx. */ + struct ttm_operation_ctx *ctx; + /** @ticket: The struct ww_acquire_ctx if any. */ + struct ww_acquire_ctx *ticket; + /** @trylock_only: Only use trylock for locking. */ + bool trylock_only; +}; /** - * ttm_bo_get_unless_zero - reference a struct ttm_buffer_object unless - * its refcount has already reached zero. - * @bo: The buffer object. - * - * Used to reference a TTM buffer object in lookups where the object is removed - * from the lookup structure during the destructor and for RCU lookups. - * - * Returns: @bo if the referencing was successful, NULL otherwise. + * struct ttm_lru_walk - Structure describing a LRU walk. */ -static inline __must_check struct ttm_buffer_object * -ttm_bo_get_unless_zero(struct ttm_buffer_object *bo) -{ - if (!kref_get_unless_zero(&bo->kref)) - return NULL; - return bo; -} +struct ttm_lru_walk { + /** @ops: Pointer to the ops structure. */ + const struct ttm_lru_walk_ops *ops; + /** @arg: Common bo LRU walk arguments. */ + struct ttm_lru_walk_arg arg; +}; + +s64 ttm_lru_walk_for_evict(struct ttm_lru_walk *walk, struct ttm_device *bdev, + struct ttm_resource_manager *man, s64 target); + +/** + * struct ttm_bo_shrink_flags - flags to govern the bo shrinking behaviour + * @purge: Purge the content rather than backing it up. + * @writeback: Attempt to immediately write content to swap space. + * @allow_move: Allow moving to system before shrinking. This is typically + * not desired for zombie- or ghost objects (with zombie object meaning + * objects with a zero gem object refcount) + */ +struct ttm_bo_shrink_flags { + u32 purge : 1; + u32 writeback : 1; + u32 allow_move : 1; +}; + +long ttm_bo_shrink(struct ttm_operation_ctx *ctx, struct ttm_buffer_object *bo, + const struct ttm_bo_shrink_flags flags); + +bool ttm_bo_shrink_suitable(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx); + +bool ttm_bo_shrink_avoid_wait(void); /** * ttm_bo_reserve: @@ -264,7 +303,7 @@ static inline int ttm_bo_reserve(struct ttm_buffer_object *bo, * ttm_bo_reserve_slowpath: * @bo: A pointer to a struct ttm_buffer_object. * @interruptible: Sleep interruptible if waiting. - * @sequence: Set (@bo)->sequence to this value after lock + * @ticket: Ticket used to acquire the ww_mutex. * * This is called after ttm_bo_reserve returns -EAGAIN and we backed off * from all our other reservations. Because there are no other reservations @@ -303,7 +342,7 @@ static inline void ttm_bo_assign_mem(struct ttm_buffer_object *bo, } /** - * ttm_bo_move_null = assign memory for a buffer object. + * ttm_bo_move_null - assign memory for a buffer object. * @bo: The bo to assign the memory to * @new_mem: The memory to be assigned. * @@ -352,11 +391,9 @@ int ttm_bo_wait_ctx(struct ttm_buffer_object *bo, int ttm_bo_validate(struct ttm_buffer_object *bo, struct ttm_placement *placement, struct ttm_operation_ctx *ctx); -void ttm_bo_put(struct ttm_buffer_object *bo); +void ttm_bo_fini(struct ttm_buffer_object *bo); void ttm_bo_set_bulk_move(struct ttm_buffer_object *bo, struct ttm_lru_bulk_move *bulk); -int ttm_bo_lock_delayed_workqueue(struct ttm_device *bdev); -void ttm_bo_unlock_delayed_workqueue(struct ttm_device *bdev, int resched); bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo, const struct ttm_place *place); int ttm_bo_init_reserved(struct ttm_device *bdev, struct ttm_buffer_object *bo, @@ -372,18 +409,20 @@ int ttm_bo_init_validate(struct ttm_device *bdev, struct ttm_buffer_object *bo, int ttm_bo_kmap(struct ttm_buffer_object *bo, unsigned long start_page, unsigned long num_pages, struct ttm_bo_kmap_obj *map); void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map); +void *ttm_bo_kmap_try_from_panic(struct ttm_buffer_object *bo, unsigned long page); int ttm_bo_vmap(struct ttm_buffer_object *bo, struct iosys_map *map); void ttm_bo_vunmap(struct ttm_buffer_object *bo, struct iosys_map *map); int ttm_bo_mmap_obj(struct vm_area_struct *vma, struct ttm_buffer_object *bo); -int ttm_bo_swapout(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx, - gfp_t gfp_flags); +s64 ttm_bo_swapout(struct ttm_device *bdev, struct ttm_operation_ctx *ctx, + struct ttm_resource_manager *man, gfp_t gfp_flags, + s64 target); void ttm_bo_pin(struct ttm_buffer_object *bo); void ttm_bo_unpin(struct ttm_buffer_object *bo); -int ttm_mem_evict_first(struct ttm_device *bdev, - struct ttm_resource_manager *man, - const struct ttm_place *place, - struct ttm_operation_ctx *ctx, - struct ww_acquire_ctx *ticket); +int ttm_bo_evict_first(struct ttm_device *bdev, + struct ttm_resource_manager *man, + struct ttm_operation_ctx *ctx); +int ttm_bo_access(struct ttm_buffer_object *bo, unsigned long offset, + void *buf, int len, int write); vm_fault_t ttm_bo_vm_reserve(struct ttm_buffer_object *bo, struct vm_fault *vmf); vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf, @@ -425,5 +464,83 @@ int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo); pgprot_t ttm_io_prot(struct ttm_buffer_object *bo, struct ttm_resource *res, pgprot_t tmp); void ttm_bo_tt_destroy(struct ttm_buffer_object *bo); +int ttm_bo_populate(struct ttm_buffer_object *bo, + struct ttm_operation_ctx *ctx); +int ttm_bo_setup_export(struct ttm_buffer_object *bo, + struct ttm_operation_ctx *ctx); + +/* Driver LRU walk helpers initially targeted for shrinking. */ + +/** + * struct ttm_bo_lru_cursor - Iterator cursor for TTM LRU list looping + */ +struct ttm_bo_lru_cursor { + /** @res_curs: Embedded struct ttm_resource_cursor. */ + struct ttm_resource_cursor res_curs; + /** + * @bo: Buffer object pointer if a buffer object is refcounted, + * NULL otherwise. + */ + struct ttm_buffer_object *bo; + /** + * @needs_unlock: Valid iff @bo != NULL. The bo resv needs + * unlock before the next iteration or after loop exit. + */ + bool needs_unlock; + /** @arg: Pointer to common BO LRU walk arguments. */ + struct ttm_lru_walk_arg *arg; +}; + +void ttm_bo_lru_cursor_fini(struct ttm_bo_lru_cursor *curs); + +struct ttm_bo_lru_cursor * +ttm_bo_lru_cursor_init(struct ttm_bo_lru_cursor *curs, + struct ttm_resource_manager *man, + struct ttm_lru_walk_arg *arg); + +struct ttm_buffer_object *ttm_bo_lru_cursor_first(struct ttm_bo_lru_cursor *curs); + +struct ttm_buffer_object *ttm_bo_lru_cursor_next(struct ttm_bo_lru_cursor *curs); + +/* + * Defines needed to use autocleanup (linux/cleanup.h) with struct ttm_bo_lru_cursor. + */ +DEFINE_CLASS(ttm_bo_lru_cursor, struct ttm_bo_lru_cursor *, + if (_T) {ttm_bo_lru_cursor_fini(_T); }, + ttm_bo_lru_cursor_init(curs, man, arg), + struct ttm_bo_lru_cursor *curs, struct ttm_resource_manager *man, + struct ttm_lru_walk_arg *arg); +static inline void * +class_ttm_bo_lru_cursor_lock_ptr(class_ttm_bo_lru_cursor_t *_T) +{ return *_T; } +#define class_ttm_bo_lru_cursor_is_conditional false + +/** + * ttm_bo_lru_for_each_reserved_guarded() - Iterate over buffer objects owning + * resources on LRU lists. + * @_cursor: struct ttm_bo_lru_cursor to use for the iteration. + * @_man: The resource manager whose LRU lists to iterate over. + * @_arg: The struct ttm_lru_walk_arg to govern the LRU walk. + * @_bo: The struct ttm_buffer_object pointer pointing to the buffer object + * for the current iteration. + * + * Iterate over all resources of @_man and for each resource, attempt to + * reference and lock (using the locking mode detailed in @_ctx) the buffer + * object it points to. If successful, assign @_bo to the address of the + * buffer object and update @_cursor. The iteration is guarded in the + * sense that @_cursor will be initialized before looping start and cleaned + * up at looping termination, even if terminated prematurely by, for + * example a return or break statement. Exiting the loop will also unlock + * (if needed) and unreference @_bo. + * + * Return: If locking of a bo returns an error, then iteration is terminated + * and @_bo is set to a corresponding error pointer. It's illegal to + * dereference @_bo after loop exit. + */ +#define ttm_bo_lru_for_each_reserved_guarded(_cursor, _man, _arg, _bo) \ + scoped_guard(ttm_bo_lru_cursor, _cursor, _man, _arg) \ + for ((_bo) = ttm_bo_lru_cursor_first(_cursor); \ + !IS_ERR_OR_NULL(_bo); \ + (_bo) = ttm_bo_lru_cursor_next(_cursor)) #endif diff --git a/include/drm/ttm/ttm_caching.h b/include/drm/ttm/ttm_caching.h index 235a743d90e1..a18f43e93aba 100644 --- a/include/drm/ttm/ttm_caching.h +++ b/include/drm/ttm/ttm_caching.h @@ -25,6 +25,8 @@ #ifndef _TTM_CACHING_H_ #define _TTM_CACHING_H_ +#include <linux/pgtable.h> + #define TTM_NUM_CACHING_TYPES 3 /** diff --git a/include/drm/ttm/ttm_device.h b/include/drm/ttm/ttm_device.h index 4f3e81eac6f3..5618aef462f2 100644 --- a/include/drm/ttm/ttm_device.h +++ b/include/drm/ttm/ttm_device.h @@ -27,6 +27,7 @@ #include <linux/types.h> #include <linux/workqueue.h> +#include <drm/ttm/ttm_allocation.h> #include <drm/ttm/ttm_resource.h> #include <drm/ttm/ttm_pool.h> @@ -141,7 +142,7 @@ struct ttm_device_funcs { * the graphics address space * @ctx: context for this move with parameters * @new_mem: the new memory region receiving the buffer - @ @hop: placement for driver directed intermediate hop + * @hop: placement for driver directed intermediate hop * * Move a buffer between two memory regions. * Returns errno -EMULTIHOP if driver requests a hop @@ -220,10 +221,15 @@ struct ttm_device { struct list_head device_list; /** + * @alloc_flags: TTM_ALLOCATION_* flags. + */ + unsigned int alloc_flags; + + /** * @funcs: Function table for the device. * Constant after bo device init */ - struct ttm_device_funcs *funcs; + const struct ttm_device_funcs *funcs; /** * @sysman: Resource manager for the system domain. @@ -252,9 +258,10 @@ struct ttm_device { spinlock_t lru_lock; /** - * @pinned: Buffer objects which are pinned and so not on any LRU list. + * @unevictable: Buffer objects which are pinned or swapped and as such + * not on an LRU list. */ - struct list_head pinned; + struct list_head unevictable; /** * @dev_mapping: A pointer to the struct address_space for invalidating @@ -271,6 +278,7 @@ struct ttm_device { int ttm_global_swapout(struct ttm_operation_ctx *ctx, gfp_t gfp_flags); int ttm_device_swapout(struct ttm_device *bdev, struct ttm_operation_ctx *ctx, gfp_t gfp_flags); +int ttm_device_prepare_hibernation(struct ttm_device *bdev); static inline struct ttm_resource_manager * ttm_manager_type(struct ttm_device *bdev, int mem_type) @@ -287,10 +295,10 @@ static inline void ttm_set_driver_manager(struct ttm_device *bdev, int type, bdev->man_drv[type] = manager; } -int ttm_device_init(struct ttm_device *bdev, struct ttm_device_funcs *funcs, +int ttm_device_init(struct ttm_device *bdev, const struct ttm_device_funcs *funcs, struct device *dev, struct address_space *mapping, struct drm_vma_offset_manager *vma_manager, - bool use_dma_alloc, bool use_dma32); + unsigned int alloc_flags); void ttm_device_fini(struct ttm_device *bdev); void ttm_device_clear_dma_mappings(struct ttm_device *bdev); diff --git a/include/drm/ttm/ttm_execbuf_util.h b/include/drm/ttm/ttm_execbuf_util.h index 03aca29d3ce4..fac1e3e57ebd 100644 --- a/include/drm/ttm/ttm_execbuf_util.h +++ b/include/drm/ttm/ttm_execbuf_util.h @@ -52,7 +52,7 @@ struct ttm_validate_buffer { }; /** - * function ttm_eu_backoff_reservation + * ttm_eu_backoff_reservation * * @ticket: ww_acquire_ctx from reserve call * @list: thread private list of ttm_validate_buffer structs. @@ -64,14 +64,13 @@ void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket, struct list_head *list); /** - * function ttm_eu_reserve_buffers + * ttm_eu_reserve_buffers * * @ticket: [out] ww_acquire_ctx filled in by call, or NULL if only * non-blocking reserves should be tried. * @list: thread private list of ttm_validate_buffer structs. * @intr: should the wait be interruptible * @dups: [out] optional list of duplicates. - * @del_lru: true if BOs should be removed from the LRU. * * Tries to reserve bos pointed to by the list entries for validation. * If the function returns 0, all buffers are marked as "unfenced", @@ -102,7 +101,7 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket, struct list_head *dups); /** - * function ttm_eu_fence_buffer_objects. + * ttm_eu_fence_buffer_objects * * @ticket: ww_acquire_ctx from reserve call * @list: thread private list of ttm_validate_buffer structs. diff --git a/include/drm/ttm/ttm_kmap_iter.h b/include/drm/ttm/ttm_kmap_iter.h index cc5c09a211b4..fe72631a6e93 100644 --- a/include/drm/ttm/ttm_kmap_iter.h +++ b/include/drm/ttm/ttm_kmap_iter.h @@ -20,7 +20,7 @@ struct iosys_map; */ struct ttm_kmap_iter_ops { /** - * kmap_local() - Map a PAGE_SIZE part of the resource using + * @map_local: Map a PAGE_SIZE part of the resource using * kmap_local semantics. * @res_iter: Pointer to the struct ttm_kmap_iter representing * the resource. @@ -31,7 +31,7 @@ struct ttm_kmap_iter_ops { void (*map_local)(struct ttm_kmap_iter *res_iter, struct iosys_map *dmap, pgoff_t i); /** - * unmap_local() - Unmap a PAGE_SIZE part of the resource previously + * @unmap_local: Unmap a PAGE_SIZE part of the resource previously * mapped using kmap_local. * @res_iter: Pointer to the struct ttm_kmap_iter representing * the resource. diff --git a/include/drm/ttm/ttm_placement.h b/include/drm/ttm/ttm_placement.h index 8074d0f6cae5..b510a4812609 100644 --- a/include/drm/ttm/ttm_placement.h +++ b/include/drm/ttm/ttm_placement.h @@ -64,6 +64,12 @@ /* For multihop handling */ #define TTM_PL_FLAG_TEMPORARY (1 << 2) +/* Placement is never used during eviction */ +#define TTM_PL_FLAG_DESIRED (1 << 3) + +/* Placement is only used during eviction */ +#define TTM_PL_FLAG_FALLBACK (1 << 4) + /** * struct ttm_place * @@ -86,16 +92,12 @@ struct ttm_place { * * @num_placement: number of preferred placements * @placement: preferred placements - * @num_busy_placement: number of preferred placements when need to evict buffer - * @busy_placement: preferred placements when need to evict buffer * * Structure indicating the placement you request for an object. */ struct ttm_placement { unsigned num_placement; const struct ttm_place *placement; - unsigned num_busy_placement; - const struct ttm_place *busy_placement; }; #endif diff --git a/include/drm/ttm/ttm_pool.h b/include/drm/ttm/ttm_pool.h index ef09b23d29e3..233581670e78 100644 --- a/include/drm/ttm/ttm_pool.h +++ b/include/drm/ttm/ttm_pool.h @@ -32,9 +32,11 @@ #include <drm/ttm/ttm_caching.h> struct device; -struct ttm_tt; -struct ttm_pool; +struct seq_file; +struct ttm_backup_flags; struct ttm_operation_ctx; +struct ttm_pool; +struct ttm_tt; /** * struct ttm_pool_type - Pool for a certain memory type @@ -61,18 +63,18 @@ struct ttm_pool_type { * struct ttm_pool - Pool for all caching and orders * * @dev: the device we allocate pages for - * @use_dma_alloc: if coherent DMA allocations should be used - * @use_dma32: if GFP_DMA32 should be used + * @nid: which numa node to use + * @alloc_flags: TTM_ALLOCATION_POOL_* flags * @caching: pools for each caching/order */ struct ttm_pool { struct device *dev; + int nid; - bool use_dma_alloc; - bool use_dma32; + unsigned int alloc_flags; struct { - struct ttm_pool_type orders[MAX_ORDER]; + struct ttm_pool_type orders[NR_PAGE_ORDERS]; } caching[TTM_NUM_CACHING_TYPES]; }; @@ -81,11 +83,18 @@ int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt, void ttm_pool_free(struct ttm_pool *pool, struct ttm_tt *tt); void ttm_pool_init(struct ttm_pool *pool, struct device *dev, - bool use_dma_alloc, bool use_dma32); + int nid, unsigned int alloc_flags); void ttm_pool_fini(struct ttm_pool *pool); int ttm_pool_debugfs(struct ttm_pool *pool, struct seq_file *m); +void ttm_pool_drop_backed_up(struct ttm_tt *tt); + +long ttm_pool_backup(struct ttm_pool *pool, struct ttm_tt *ttm, + const struct ttm_backup_flags *flags); +int ttm_pool_restore_and_alloc(struct ttm_pool *pool, struct ttm_tt *tt, + const struct ttm_operation_ctx *ctx); + int ttm_pool_mgr_init(unsigned long num_pages); void ttm_pool_mgr_fini(void); diff --git a/include/drm/ttm/ttm_resource.h b/include/drm/ttm/ttm_resource.h index 78a226eba953..33e80f30b8b8 100644 --- a/include/drm/ttm/ttm_resource.h +++ b/include/drm/ttm/ttm_resource.h @@ -31,13 +31,15 @@ #include <linux/iosys-map.h> #include <linux/dma-fence.h> -#include <drm/drm_print.h> #include <drm/ttm/ttm_caching.h> #include <drm/ttm/ttm_kmap_iter.h> #define TTM_MAX_BO_PRIORITY 4U -#define TTM_NUM_MEM_TYPES 8 +#define TTM_NUM_MEM_TYPES 9 +struct dentry; +struct dmem_cgroup_device; +struct drm_printer; struct ttm_device; struct ttm_resource_manager; struct ttm_resource; @@ -49,6 +51,52 @@ struct io_mapping; struct sg_table; struct scatterlist; +/** + * define TTM_NUM_MOVE_FENCES - How many entities can be used for evictions + * + * Pipelined evictions can be spread on multiple entities. This + * is the max number of entities that can be used by the driver + * for that purpose. + */ +#define TTM_NUM_MOVE_FENCES 8 + +/** + * enum ttm_lru_item_type - enumerate ttm_lru_item subclasses + */ +enum ttm_lru_item_type { + /** @TTM_LRU_RESOURCE: The resource subclass */ + TTM_LRU_RESOURCE, + /** @TTM_LRU_HITCH: The iterator hitch subclass */ + TTM_LRU_HITCH +}; + +/** + * struct ttm_lru_item - The TTM lru list node base class + * @link: The list link + * @type: The subclass type + */ +struct ttm_lru_item { + struct list_head link; + enum ttm_lru_item_type type; +}; + +/** + * ttm_lru_item_init() - initialize a struct ttm_lru_item + * @item: The item to initialize + * @type: The subclass type + */ +static inline void ttm_lru_item_init(struct ttm_lru_item *item, + enum ttm_lru_item_type type) +{ + item->type = type; + INIT_LIST_HEAD(&item->link); +} + +static inline bool ttm_lru_item_is_res(const struct ttm_lru_item *item) +{ + return item->type == TTM_LRU_RESOURCE; +} + struct ttm_resource_manager_func { /** * struct ttm_resource_manager_func member alloc @@ -142,8 +190,8 @@ struct ttm_resource_manager_func { * @size: Size of the managed region. * @bdev: ttm device this manager belongs to * @func: structure pointer implementing the range manager. See above - * @move_lock: lock for move fence - * @move: The fence of the last pipelined move operation. + * @eviction_lock: lock for eviction fences + * @eviction_fences: The fences of the last pipelined move operation. * @lru: The lru list for this memory type. * * This structure is used to identify and manage memory types for a device. @@ -157,12 +205,12 @@ struct ttm_resource_manager { struct ttm_device *bdev; uint64_t size; const struct ttm_resource_manager_func *func; - spinlock_t move_lock; - /* - * Protected by @move_lock. + /* This is very similar to a dma_resv object, but locking rules make + * it difficult to use one in this context. */ - struct dma_fence *move; + spinlock_t eviction_lock; + struct dma_fence *eviction_fences[TTM_NUM_MOVE_FENCES]; /* * Protected by the bdev->lru_lock. @@ -174,6 +222,11 @@ struct ttm_resource_manager { * bdev->lru_lock. */ uint64_t usage; + + /** + * @cg: &dmem_cgroup_region used for memory accounting, if not NULL. + */ + struct dmem_cgroup_region *cg; }; /** @@ -202,6 +255,7 @@ struct ttm_bus_placement { * @placement: Placement flags. * @bus: Placement on io bus accessible to the CPU * @bo: weak reference to the BO, protected by ttm_device::lru_lock + * @css: cgroup state this resource is charged to * * Structure indicating the placement and space resources used by a * buffer object. @@ -214,22 +268,25 @@ struct ttm_resource { struct ttm_bus_placement bus; struct ttm_buffer_object *bo; + struct dmem_cgroup_pool_state *css; + /** * @lru: Least recently used list, see &ttm_resource_manager.lru */ - struct list_head lru; + struct ttm_lru_item lru; }; /** - * struct ttm_resource_cursor - * - * @priority: the current priority + * ttm_lru_item_to_res() - Downcast a struct ttm_lru_item to a struct ttm_resource + * @item: The struct ttm_lru_item to downcast * - * Cursor to iterate over the resources in a manager. + * Return: Pointer to the embedding struct ttm_resource */ -struct ttm_resource_cursor { - unsigned int priority; -}; +static inline struct ttm_resource * +ttm_lru_item_to_res(struct ttm_lru_item *item) +{ + return container_of(item, struct ttm_resource, lru); +} /** * struct ttm_lru_bulk_move_pos @@ -246,17 +303,53 @@ struct ttm_lru_bulk_move_pos { /** * struct ttm_lru_bulk_move - * * @pos: first/last lru entry for resources in the each domain/priority + * @cursor_list: The list of cursors currently traversing any of + * the sublists of @pos. Protected by the ttm device's lru_lock. * * Container for the current bulk move state. Should be used with * ttm_lru_bulk_move_init() and ttm_bo_set_bulk_move(). + * All BOs in a bulk_move structure need to share the same reservation object to + * ensure that the bulk as a whole is locked for eviction even if only one BO of + * the bulk is evicted. */ struct ttm_lru_bulk_move { struct ttm_lru_bulk_move_pos pos[TTM_NUM_MEM_TYPES][TTM_MAX_BO_PRIORITY]; + struct list_head cursor_list; }; /** + * struct ttm_resource_cursor + * @man: The resource manager currently being iterated over + * @hitch: A hitch list node inserted before the next resource + * to iterate over. + * @bulk_link: A list link for the list of cursors traversing the + * bulk sublist of @bulk. Protected by the ttm device's lru_lock. + * @bulk: Pointer to struct ttm_lru_bulk_move whose subrange @hitch is + * inserted to. NULL if none. Never dereference this pointer since + * the struct ttm_lru_bulk_move object pointed to might have been + * freed. The pointer is only for comparison. + * @mem_type: The memory type of the LRU list being traversed. + * This field is valid iff @bulk != NULL. + * @priority: the current priority + * + * Cursor to iterate over the resources in a manager. + */ +struct ttm_resource_cursor { + struct ttm_resource_manager *man; + struct ttm_lru_item hitch; + struct list_head bulk_link; + struct ttm_lru_bulk_move *bulk; + unsigned int mem_type; + unsigned int priority; +}; + +void ttm_resource_cursor_init(struct ttm_resource_cursor *cursor, + struct ttm_resource_manager *man); + +void ttm_resource_cursor_fini(struct ttm_resource_cursor *cursor); + +/** * struct ttm_kmap_iter_iomap - Specialization for a struct io_mapping + * struct sg_table backed struct ttm_resource. * @base: Embedded struct ttm_kmap_iter providing the usage interface. @@ -338,12 +431,18 @@ static inline bool ttm_resource_manager_used(struct ttm_resource_manager *man) static inline void ttm_resource_manager_cleanup(struct ttm_resource_manager *man) { - dma_fence_put(man->move); - man->move = NULL; + int i; + + for (i = 0; i < TTM_NUM_MOVE_FENCES; i++) { + dma_fence_put(man->eviction_fences[i]); + man->eviction_fences[i] = NULL; + } } void ttm_lru_bulk_move_init(struct ttm_lru_bulk_move *bulk); void ttm_lru_bulk_move_tail(struct ttm_lru_bulk_move *bulk); +void ttm_lru_bulk_move_fini(struct ttm_device *bdev, + struct ttm_lru_bulk_move *bulk); void ttm_resource_add_bulk_move(struct ttm_resource *res, struct ttm_buffer_object *bo); @@ -359,18 +458,16 @@ void ttm_resource_fini(struct ttm_resource_manager *man, int ttm_resource_alloc(struct ttm_buffer_object *bo, const struct ttm_place *place, - struct ttm_resource **res); + struct ttm_resource **res, + struct dmem_cgroup_pool_state **ret_limit_pool); void ttm_resource_free(struct ttm_buffer_object *bo, struct ttm_resource **res); bool ttm_resource_intersects(struct ttm_device *bdev, struct ttm_resource *res, const struct ttm_place *place, size_t size); -bool ttm_resource_compatible(struct ttm_device *bdev, - struct ttm_resource *res, - const struct ttm_place *place, - size_t size); -bool ttm_resource_compat(struct ttm_resource *res, - struct ttm_placement *placement); +bool ttm_resource_compatible(struct ttm_resource *res, + struct ttm_placement *placement, + bool evicting); void ttm_resource_set_bo(struct ttm_resource *res, struct ttm_buffer_object *bo); @@ -386,24 +483,23 @@ void ttm_resource_manager_debug(struct ttm_resource_manager *man, struct drm_printer *p); struct ttm_resource * -ttm_resource_manager_first(struct ttm_resource_manager *man, - struct ttm_resource_cursor *cursor); +ttm_resource_manager_first(struct ttm_resource_cursor *cursor); +struct ttm_resource * +ttm_resource_manager_next(struct ttm_resource_cursor *cursor); + struct ttm_resource * -ttm_resource_manager_next(struct ttm_resource_manager *man, - struct ttm_resource_cursor *cursor, - struct ttm_resource *res); +ttm_lru_first_res_or_null(struct list_head *head); /** * ttm_resource_manager_for_each_res - iterate over all resources - * @man: the resource manager * @cursor: struct ttm_resource_cursor for the current position * @res: the current resource * * Iterate over all the evictable resources in a resource manager. */ -#define ttm_resource_manager_for_each_res(man, cursor, res) \ - for (res = ttm_resource_manager_first(man, cursor); res; \ - res = ttm_resource_manager_next(man, cursor, res)) +#define ttm_resource_manager_for_each_res(cursor, res) \ + for (res = ttm_resource_manager_first(cursor); res; \ + res = ttm_resource_manager_next(cursor)) struct ttm_kmap_iter * ttm_kmap_iter_iomap_init(struct ttm_kmap_iter_iomap *iter_io, diff --git a/include/drm/ttm/ttm_tt.h b/include/drm/ttm/ttm_tt.h index b7d3f3843f1e..406437ad674b 100644 --- a/include/drm/ttm/ttm_tt.h +++ b/include/drm/ttm/ttm_tt.h @@ -32,11 +32,13 @@ #include <drm/ttm/ttm_caching.h> #include <drm/ttm/ttm_kmap_iter.h> +struct ttm_backup; struct ttm_device; struct ttm_tt; struct ttm_resource; struct ttm_buffer_object; struct ttm_operation_ctx; +struct ttm_pool_tt_restore; /** * struct ttm_tt - This is a structure holding the pages, caching- and aperture @@ -79,16 +81,28 @@ struct ttm_tt { * page_flags = TTM_TT_FLAG_EXTERNAL | * TTM_TT_FLAG_EXTERNAL_MAPPABLE; * + * TTM_TT_FLAG_DECRYPTED: The mapped ttm pages should be marked as + * not encrypted. The framework will try to match what the dma layer + * is doing, but note that it is a little fragile because ttm page + * fault handling abuses the DMA api a bit and dma_map_attrs can't be + * used to assure pgprot always matches. + * + * TTM_TT_FLAG_BACKED_UP: TTM internal only. This is set if the + * struct ttm_tt has been (possibly partially) backed up. + * * TTM_TT_FLAG_PRIV_POPULATED: TTM internal only. DO NOT USE. This is * set by TTM after ttm_tt_populate() has successfully returned, and is * then unset when TTM calls ttm_tt_unpopulate(). + * */ -#define TTM_TT_FLAG_SWAPPED (1 << 0) -#define TTM_TT_FLAG_ZERO_ALLOC (1 << 1) -#define TTM_TT_FLAG_EXTERNAL (1 << 2) -#define TTM_TT_FLAG_EXTERNAL_MAPPABLE (1 << 3) +#define TTM_TT_FLAG_SWAPPED BIT(0) +#define TTM_TT_FLAG_ZERO_ALLOC BIT(1) +#define TTM_TT_FLAG_EXTERNAL BIT(2) +#define TTM_TT_FLAG_EXTERNAL_MAPPABLE BIT(3) +#define TTM_TT_FLAG_DECRYPTED BIT(4) +#define TTM_TT_FLAG_BACKED_UP BIT(5) -#define TTM_TT_FLAG_PRIV_POPULATED (1U << 31) +#define TTM_TT_FLAG_PRIV_POPULATED BIT(6) uint32_t page_flags; /** @num_pages: Number of pages in the page array. */ uint32_t num_pages; @@ -99,10 +113,19 @@ struct ttm_tt { /** @swap_storage: Pointer to shmem struct file for swap storage. */ struct file *swap_storage; /** + * @backup: Pointer to backup struct for backed up tts. + * Could be unified with @swap_storage. Meanwhile, the driver's + * ttm_tt_create() callback is responsible for assigning + * this field. + */ + struct file *backup; + /** * @caching: The current caching state of the pages, see enum * ttm_caching. */ enum ttm_caching caching; + /** @restore: Partial restoration from backup state. TTM private */ + struct ttm_pool_tt_restore *restore; }; /** @@ -123,6 +146,40 @@ static inline bool ttm_tt_is_populated(struct ttm_tt *tt) } /** + * ttm_tt_is_swapped() - Whether the ttm_tt is swapped out or backed up + * @tt: The struct ttm_tt. + * + * Return: true if swapped or backed up, false otherwise. + */ +static inline bool ttm_tt_is_swapped(const struct ttm_tt *tt) +{ + return tt->page_flags & (TTM_TT_FLAG_SWAPPED | TTM_TT_FLAG_BACKED_UP); +} + +/** + * ttm_tt_is_backed_up() - Whether the ttm_tt backed up + * @tt: The struct ttm_tt. + * + * Return: true if swapped or backed up, false otherwise. + */ +static inline bool ttm_tt_is_backed_up(const struct ttm_tt *tt) +{ + return tt->page_flags & TTM_TT_FLAG_BACKED_UP; +} + +/** + * ttm_tt_clear_backed_up() - Clear the ttm_tt backed-up status + * @tt: The struct ttm_tt. + * + * Drivers can use this functionto clear the backed-up status, + * for example before destroying or re-validating a purged tt. + */ +static inline void ttm_tt_clear_backed_up(struct ttm_tt *tt) +{ + tt->page_flags &= ~TTM_TT_FLAG_BACKED_UP; +} + +/** * ttm_tt_create * * @bo: pointer to a struct ttm_buffer_object @@ -222,6 +279,26 @@ void ttm_tt_mgr_init(unsigned long num_pages, unsigned long num_dma32_pages); struct ttm_kmap_iter *ttm_kmap_iter_tt_init(struct ttm_kmap_iter_tt *iter_tt, struct ttm_tt *tt); +unsigned long ttm_tt_pages_limit(void); + +/** + * struct ttm_backup_flags - Flags to govern backup behaviour. + * @purge: Free pages without backing up. Bypass pools. + * @writeback: Attempt to copy contents directly to swap space, even + * if that means blocking on writes to external memory. + */ +struct ttm_backup_flags { + u32 purge : 1; + u32 writeback : 1; +}; + +long ttm_tt_backup(struct ttm_device *bdev, struct ttm_tt *tt, + const struct ttm_backup_flags flags); + +int ttm_tt_restore(struct ttm_device *bdev, struct ttm_tt *tt, + const struct ttm_operation_ctx *ctx); + +int ttm_tt_setup_backup(struct ttm_tt *tt); #if IS_ENABLED(CONFIG_AGP) #include <linux/agp_backend.h> |
