summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2021-07-21 11:58:26 +1000
committerDave Airlie <airlied@redhat.com>2021-07-21 11:58:28 +1000
commit588b3eee528873d73bf777f329d35b2e65e24777 (patch)
tree5c93b3d1c08f208fb959bfbffa584b00175e3910 /include
parentb4d7049acebf959e80d11611cd104bc5360fd13b (diff)
parent17a1837d07be38d957af453e08788edbe1f9343a (diff)
Merge tag 'drm-misc-next-2021-07-16' of git://anongit.freedesktop.org/drm/drm-misc into drm-next
drm-misc-next for v5.15: UAPI Changes: Cross-subsystem Changes: - udmabuf: Add support for mapping hugepages - Add dma-buf stats to sysfs. - Assorted fixes to fbdev/omap2. - dma-buf: Document DMA_BUF_IOCTL_SYNC - Improve dma-buf non-dynamic exporter expectations better. - Add module parameters for dma-buf size and list limit. - Add HDMI codec support to vc4, to replace vc4's own codec. - Document dma-buf implicit fencing rules. - dma_resv_test_signaled test_all handling. Core Changes: - Extract i915's eDP backlight code into DRM helpers. - Assorted docbook updates. - Rework drm_dp_aux documentation. - Add support for the DP aux bus. - Shrink dma-fence-chain slightly. - Add alloc/free helpers for dma-fence-chain. - Assorted fixes to TTM., drm/of, bridge - drm_gem_plane_helper_prepare/cleanup_fb is now the default for gem drivers. - Small fix for scheduler completion. - Remove use of drm_device.irq_enabled. - Print the driver name to dmesg when registering framebuffer. - Export drm/gem's shadow plane handling, and use it in vkms. - Assorted small fixes. Driver Changes: - Add eDP backlight to nouveau. - Assorted fixes and cleanups to nouveau, panfrost, vmwgfx, anx7625, amdgpu, gma500, radeon, mgag200, vgem, vc4, vkms, omapdrm. - Add support for Samsung DB7430, Samsung ATNA33XC20, EDT ETMV570G2DHU, EDT ETM0350G0DH6, Innolux EJ030NA panels. - Fix some simple pannels missing bus_format and connector types. - Add mks-guest-stats instrumentation support to vmwgfx. - Merge i915-ttm topic branch. - Make s6e63m0 panel use Mipi-DBI helpers. - Add detect() supoprt for AST. - Use interrupts for hotplug on vc4. - vmwgfx is now moved to drm-misc-next, as sroland is no longer a maintainer for now. - vmwgfx now uses copies of vmware's internal device headers. - Slowly convert ti-sn65dsi83 over to atomic. - Rework amdgpu dma-resv handling. - Fix virtio fencing for planes. - Ensure amdgpu can always evict to SYSTEM. - Many drivers fixed for implicit fencing rules. - Set default prepare/cleanup fb for tiny, vram and simple helpers too. - Rework panfrost gpu reset and related serialization. - Update VKMS todo list. - Make bochs a tiny gpu driver, and use vram helper. - Use linux irq interfaces instead of drm_irq in some drivers. - Add support for Raspberry Pi Pico to GUD. Signed-off-by: Dave Airlie <airlied@redhat.com> # gpg: Signature made Fri 16 Jul 2021 21:06:04 AEST # gpg: using RSA key B97BD6A80CAC4981091AE547FE558C72A67013C3 # gpg: Good signature from "Maarten Lankhorst <maarten.lankhorst@linux.intel.com>" [expired] # gpg: aka "Maarten Lankhorst <maarten@debian.org>" [expired] # gpg: aka "Maarten Lankhorst <maarten.lankhorst@canonical.com>" [expired] # gpg: Note: This key has expired! # Primary key fingerprint: B97B D6A8 0CAC 4981 091A E547 FE55 8C72 A670 13C3 From: Maarten Lankhorst <maarten.lankhorst@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/444811c3-cbec-e9d5-9a6b-9632eda7962a@linux.intel.com
Diffstat (limited to 'include')
-rw-r--r--include/drm/drm_aperture.h14
-rw-r--r--include/drm/drm_bridge.h26
-rw-r--r--include/drm/drm_connector.h5
-rw-r--r--include/drm/drm_dp_aux_bus.h57
-rw-r--r--include/drm/drm_dp_helper.h175
-rw-r--r--include/drm/drm_gem_atomic_helper.h6
-rw-r--r--include/drm/drm_gem_vram_helper.h16
-rw-r--r--include/drm/drm_mipi_dbi.h7
-rw-r--r--include/drm/drm_mipi_dsi.h5
-rw-r--r--include/drm/drm_modeset_helper_vtables.h7
-rw-r--r--include/drm/drm_panel.h8
-rw-r--r--include/drm/drm_simple_kms_helper.h7
-rw-r--r--include/drm/drm_vma_manager.h2
-rw-r--r--include/drm/gpu_scheduler.h37
-rw-r--r--include/drm/ttm/ttm_placement.h7
-rw-r--r--include/linux/dma-buf.h177
-rw-r--r--include/linux/dma-fence-chain.h52
-rw-r--r--include/uapi/drm/drm.h4
-rw-r--r--include/uapi/drm/drm_mode.h32
-rw-r--r--include/uapi/drm/vmwgfx_drm.h41
-rw-r--r--include/uapi/linux/dma-buf.h50
21 files changed, 646 insertions, 89 deletions
diff --git a/include/drm/drm_aperture.h b/include/drm/drm_aperture.h
index 6c148078780c..7096703c3949 100644
--- a/include/drm/drm_aperture.h
+++ b/include/drm/drm_aperture.h
@@ -6,20 +6,22 @@
#include <linux/types.h>
struct drm_device;
+struct drm_driver;
struct pci_dev;
int devm_aperture_acquire_from_firmware(struct drm_device *dev, resource_size_t base,
resource_size_t size);
int drm_aperture_remove_conflicting_framebuffers(resource_size_t base, resource_size_t size,
- bool primary, const char *name);
+ bool primary, const struct drm_driver *req_driver);
-int drm_aperture_remove_conflicting_pci_framebuffers(struct pci_dev *pdev, const char *name);
+int drm_aperture_remove_conflicting_pci_framebuffers(struct pci_dev *pdev,
+ const struct drm_driver *req_driver);
/**
* drm_aperture_remove_framebuffers - remove all existing framebuffers
* @primary: also kick vga16fb if present
- * @name: requesting driver name
+ * @req_driver: requesting DRM driver
*
* This function removes all graphics device drivers. Use this function on systems
* that can have their framebuffer located anywhere in memory.
@@ -27,9 +29,11 @@ int drm_aperture_remove_conflicting_pci_framebuffers(struct pci_dev *pdev, const
* Returns:
* 0 on success, or a negative errno code otherwise
*/
-static inline int drm_aperture_remove_framebuffers(bool primary, const char *name)
+static inline int
+drm_aperture_remove_framebuffers(bool primary, const struct drm_driver *req_driver)
{
- return drm_aperture_remove_conflicting_framebuffers(0, (resource_size_t)-1, primary, name);
+ return drm_aperture_remove_conflicting_framebuffers(0, (resource_size_t)-1, primary,
+ req_driver);
}
#endif
diff --git a/include/drm/drm_bridge.h b/include/drm/drm_bridge.h
index 2195daa289d2..46bdfa48c413 100644
--- a/include/drm/drm_bridge.h
+++ b/include/drm/drm_bridge.h
@@ -171,6 +171,11 @@ struct drm_bridge_funcs {
* signals) feeding it is still running when this callback is called.
*
* The @disable callback is optional.
+ *
+ * NOTE:
+ *
+ * This is deprecated, do not use!
+ * New drivers shall use &drm_bridge_funcs.atomic_disable.
*/
void (*disable)(struct drm_bridge *bridge);
@@ -190,6 +195,11 @@ struct drm_bridge_funcs {
* called.
*
* The @post_disable callback is optional.
+ *
+ * NOTE:
+ *
+ * This is deprecated, do not use!
+ * New drivers shall use &drm_bridge_funcs.atomic_post_disable.
*/
void (*post_disable)(struct drm_bridge *bridge);
@@ -215,9 +225,9 @@ struct drm_bridge_funcs {
*
* NOTE:
*
- * If a need arises to store and access modes adjusted for other
- * locations than the connection between the CRTC and the first bridge,
- * the DRM framework will have to be extended with DRM bridge states.
+ * This is deprecated, do not use!
+ * New drivers shall set their mode in the
+ * &drm_bridge_funcs.atomic_enable operation.
*/
void (*mode_set)(struct drm_bridge *bridge,
const struct drm_display_mode *mode,
@@ -239,6 +249,11 @@ struct drm_bridge_funcs {
* there is one) when this callback is called.
*
* The @pre_enable callback is optional.
+ *
+ * NOTE:
+ *
+ * This is deprecated, do not use!
+ * New drivers shall use &drm_bridge_funcs.atomic_pre_enable.
*/
void (*pre_enable)(struct drm_bridge *bridge);
@@ -259,6 +274,11 @@ struct drm_bridge_funcs {
* chain if there is one.
*
* The @enable callback is optional.
+ *
+ * NOTE:
+ *
+ * This is deprecated, do not use!
+ * New drivers shall use &drm_bridge_funcs.atomic_enable.
*/
void (*enable)(struct drm_bridge *bridge);
diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h
index 714d1a01c065..0a1d9a0fcbb2 100644
--- a/include/drm/drm_connector.h
+++ b/include/drm/drm_connector.h
@@ -848,6 +848,11 @@ struct drm_connector_funcs {
* locks to avoid races with concurrent modeset changes need to use
* &drm_connector_helper_funcs.detect_ctx instead.
*
+ * Also note that this callback can be called no matter the
+ * state the connector is in. Drivers that need the underlying
+ * device to be powered to perform the detection will first need
+ * to make sure it's been properly enabled.
+ *
* RETURNS:
*
* drm_connector_status indicating the connector's status.
diff --git a/include/drm/drm_dp_aux_bus.h b/include/drm/drm_dp_aux_bus.h
new file mode 100644
index 000000000000..4f19b20b1dd6
--- /dev/null
+++ b/include/drm/drm_dp_aux_bus.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2021 Google Inc.
+ *
+ * The DP AUX bus is used for devices that are connected over a DisplayPort
+ * AUX bus. The devices on the far side of the bus are referred to as
+ * endpoints in this code.
+ */
+
+#ifndef _DP_AUX_BUS_H_
+#define _DP_AUX_BUS_H_
+
+#include <linux/device.h>
+#include <linux/mod_devicetable.h>
+
+/**
+ * struct dp_aux_ep_device - Main dev structure for DP AUX endpoints
+ *
+ * This is used to instantiate devices that are connected via a DP AUX
+ * bus. Usually the device is a panel, but conceivable other devices could
+ * be hooked up there.
+ */
+struct dp_aux_ep_device {
+ /** @dev: The normal dev pointer */
+ struct device dev;
+ /** @aux: Pointer to the aux bus */
+ struct drm_dp_aux *aux;
+};
+
+struct dp_aux_ep_driver {
+ int (*probe)(struct dp_aux_ep_device *aux_ep);
+ void (*remove)(struct dp_aux_ep_device *aux_ep);
+ void (*shutdown)(struct dp_aux_ep_device *aux_ep);
+ struct device_driver driver;
+};
+
+static inline struct dp_aux_ep_device *to_dp_aux_ep_dev(struct device *dev)
+{
+ return container_of(dev, struct dp_aux_ep_device, dev);
+}
+
+static inline struct dp_aux_ep_driver *to_dp_aux_ep_drv(struct device_driver *drv)
+{
+ return container_of(drv, struct dp_aux_ep_driver, driver);
+}
+
+int of_dp_aux_populate_ep_devices(struct drm_dp_aux *aux);
+void of_dp_aux_depopulate_ep_devices(struct drm_dp_aux *aux);
+int devm_of_dp_aux_populate_ep_devices(struct drm_dp_aux *aux);
+
+#define dp_aux_dp_driver_register(aux_ep_drv) \
+ __dp_aux_dp_driver_register(aux_ep_drv, THIS_MODULE)
+int __dp_aux_dp_driver_register(struct dp_aux_ep_driver *aux_ep_drv,
+ struct module *owner);
+void dp_aux_dp_driver_unregister(struct dp_aux_ep_driver *aux_ep_drv);
+
+#endif /* _DP_AUX_BUS_H_ */
diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h
index 3f2715eb965f..1d5b3dbb6e56 100644
--- a/include/drm/drm_dp_helper.h
+++ b/include/drm/drm_dp_helper.h
@@ -30,6 +30,7 @@
struct drm_device;
struct drm_dp_aux;
+struct drm_panel;
/*
* Unless otherwise noted, all values are from the DP 1.1a spec. Note that
@@ -1818,6 +1819,24 @@ drm_dp_sink_can_do_video_without_timing_msa(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
DP_MSA_TIMING_PAR_IGNORED;
}
+/**
+ * drm_edp_backlight_supported() - Check an eDP DPCD for VESA backlight support
+ * @edp_dpcd: The DPCD to check
+ *
+ * Note that currently this function will return %false for panels which support various DPCD
+ * backlight features but which require the brightness be set through PWM, and don't support setting
+ * the brightness level via the DPCD. This is a TODO.
+ *
+ * Returns: %True if @edp_dpcd indicates that VESA backlight controls are supported, %false
+ * otherwise
+ */
+static inline bool
+drm_edp_backlight_supported(const u8 edp_dpcd[EDP_DISPLAY_CTL_CAP_SIZE])
+{
+ return (edp_dpcd[1] & DP_EDP_TCON_BACKLIGHT_ADJUSTMENT_CAP) &&
+ (edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_AUX_SET_CAP);
+}
+
/*
* DisplayPort AUX channel
*/
@@ -1858,35 +1877,6 @@ struct drm_dp_aux_cec {
/**
* struct drm_dp_aux - DisplayPort AUX channel
- * @name: user-visible name of this AUX channel and the I2C-over-AUX adapter
- * @ddc: I2C adapter that can be used for I2C-over-AUX communication
- * @dev: pointer to struct device that is the parent for this AUX channel
- * @drm_dev: pointer to the &drm_device that owns this AUX channel. Beware, this
- * may be %NULL before drm_dp_aux_register() has been called.
- * @crtc: backpointer to the crtc that is currently using this AUX channel
- * @hw_mutex: internal mutex used for locking transfers
- * @crc_work: worker that captures CRCs for each frame
- * @crc_count: counter of captured frame CRCs
- * @transfer: transfers a message representing a single AUX transaction
- *
- * The @dev field should be set to a pointer to the device that implements the
- * AUX channel. As well, the @drm_dev field should be set to the &drm_device
- * that will be using this AUX channel as early as possible. For many graphics
- * drivers this should happen before drm_dp_aux_init(), however it's perfectly
- * fine to set this field later so long as it's assigned before calling
- * drm_dp_aux_register().
- *
- * The @name field may be used to specify the name of the I2C adapter. If set to
- * %NULL, dev_name() of @dev will be used.
- *
- * Drivers provide a hardware-specific implementation of how transactions are
- * executed via the @transfer() function. A pointer to a &drm_dp_aux_msg
- * structure describing the transaction is passed into this function. Upon
- * success, the implementation should return the number of payload bytes that
- * were transferred, or a negative error-code on failure. Helpers propagate
- * errors from the @transfer() function, with the exception of the %-EBUSY
- * error, which causes a transaction to be retried. On a short, helpers will
- * return %-EPROTO to make it simpler to check for failure.
*
* An AUX channel can also be used to transport I2C messages to a sink. A
* typical application of that is to access an EDID that's present in the sink
@@ -1897,22 +1887,96 @@ struct drm_dp_aux_cec {
* transfers by default; if a partial response is received, the adapter will
* drop down to the size given by the partial response for this transaction
* only.
- *
- * Note that the aux helper code assumes that the @transfer() function only
- * modifies the reply field of the &drm_dp_aux_msg structure. The retry logic
- * and i2c helpers assume this is the case.
*/
struct drm_dp_aux {
+ /**
+ * @name: user-visible name of this AUX channel and the
+ * I2C-over-AUX adapter.
+ *
+ * It's also used to specify the name of the I2C adapter. If set
+ * to %NULL, dev_name() of @dev will be used.
+ */
const char *name;
+
+ /**
+ * @ddc: I2C adapter that can be used for I2C-over-AUX
+ * communication
+ */
struct i2c_adapter ddc;
+
+ /**
+ * @dev: pointer to struct device that is the parent for this
+ * AUX channel.
+ */
struct device *dev;
+
+ /**
+ * @drm_dev: pointer to the &drm_device that owns this AUX channel.
+ * Beware, this may be %NULL before drm_dp_aux_register() has been
+ * called.
+ *
+ * It should be set to the &drm_device that will be using this AUX
+ * channel as early as possible. For many graphics drivers this should
+ * happen before drm_dp_aux_init(), however it's perfectly fine to set
+ * this field later so long as it's assigned before calling
+ * drm_dp_aux_register().
+ */
struct drm_device *drm_dev;
+
+ /**
+ * @crtc: backpointer to the crtc that is currently using this
+ * AUX channel
+ */
struct drm_crtc *crtc;
+
+ /**
+ * @hw_mutex: internal mutex used for locking transfers.
+ *
+ * Note that if the underlying hardware is shared among multiple
+ * channels, the driver needs to do additional locking to
+ * prevent concurrent access.
+ */
struct mutex hw_mutex;
+
+ /**
+ * @crc_work: worker that captures CRCs for each frame
+ */
struct work_struct crc_work;
+
+ /**
+ * @crc_count: counter of captured frame CRCs
+ */
u8 crc_count;
+
+ /**
+ * @transfer: transfers a message representing a single AUX
+ * transaction.
+ *
+ * This is a hardware-specific implementation of how
+ * transactions are executed that the drivers must provide.
+ *
+ * A pointer to a &drm_dp_aux_msg structure describing the
+ * transaction is passed into this function. Upon success, the
+ * implementation should return the number of payload bytes that
+ * were transferred, or a negative error-code on failure.
+ *
+ * Helpers will propagate these errors, with the exception of
+ * the %-EBUSY error, which causes a transaction to be retried.
+ * On a short, helpers will return %-EPROTO to make it simpler
+ * to check for failure.
+ *
+ * The @transfer() function must only modify the reply field of
+ * the &drm_dp_aux_msg structure. The retry logic and i2c
+ * helpers assume this is the case.
+ *
+ * Also note that this callback can be called no matter the
+ * state @dev is in. Drivers that need that device to be powered
+ * to perform this operation will first need to make sure it's
+ * been properly enabled.
+ */
ssize_t (*transfer)(struct drm_dp_aux *aux,
struct drm_dp_aux_msg *msg);
+
/**
* @i2c_nack_count: Counts I2C NACKs, used for DP validation.
*/
@@ -2124,6 +2188,51 @@ drm_dp_has_quirk(const struct drm_dp_desc *desc, enum drm_dp_quirk quirk)
return desc->quirks & BIT(quirk);
}
+/**
+ * struct drm_edp_backlight_info - Probed eDP backlight info struct
+ * @pwmgen_bit_count: The pwmgen bit count
+ * @pwm_freq_pre_divider: The PWM frequency pre-divider value being used for this backlight, if any
+ * @max: The maximum backlight level that may be set
+ * @lsb_reg_used: Do we also write values to the DP_EDP_BACKLIGHT_BRIGHTNESS_LSB register?
+ * @aux_enable: Does the panel support the AUX enable cap?
+ *
+ * This structure contains various data about an eDP backlight, which can be populated by using
+ * drm_edp_backlight_init().
+ */
+struct drm_edp_backlight_info {
+ u8 pwmgen_bit_count;
+ u8 pwm_freq_pre_divider;
+ u16 max;
+
+ bool lsb_reg_used : 1;
+ bool aux_enable : 1;
+};
+
+int
+drm_edp_backlight_init(struct drm_dp_aux *aux, struct drm_edp_backlight_info *bl,
+ u16 driver_pwm_freq_hz, const u8 edp_dpcd[EDP_DISPLAY_CTL_CAP_SIZE],
+ u16 *current_level, u8 *current_mode);
+int drm_edp_backlight_set_level(struct drm_dp_aux *aux, const struct drm_edp_backlight_info *bl,
+ u16 level);
+int drm_edp_backlight_enable(struct drm_dp_aux *aux, const struct drm_edp_backlight_info *bl,
+ u16 level);
+int drm_edp_backlight_disable(struct drm_dp_aux *aux, const struct drm_edp_backlight_info *bl);
+
+#if IS_ENABLED(CONFIG_DRM_KMS_HELPER) && (IS_BUILTIN(CONFIG_BACKLIGHT_CLASS_DEVICE) || \
+ (IS_MODULE(CONFIG_DRM_KMS_HELPER) && IS_MODULE(CONFIG_BACKLIGHT_CLASS_DEVICE)))
+
+int drm_panel_dp_aux_backlight(struct drm_panel *panel, struct drm_dp_aux *aux);
+
+#else
+
+static inline int drm_panel_dp_aux_backlight(struct drm_panel *panel,
+ struct drm_dp_aux *aux)
+{
+ return 0;
+}
+
+#endif
+
#ifdef CONFIG_DRM_DP_CEC
void drm_dp_cec_irq(struct drm_dp_aux *aux);
void drm_dp_cec_register_connector(struct drm_dp_aux *aux,
diff --git a/include/drm/drm_gem_atomic_helper.h b/include/drm/drm_gem_atomic_helper.h
index cfc5adee3d13..d82c23622156 100644
--- a/include/drm/drm_gem_atomic_helper.h
+++ b/include/drm/drm_gem_atomic_helper.h
@@ -53,6 +53,12 @@ to_drm_shadow_plane_state(struct drm_plane_state *state)
return container_of(state, struct drm_shadow_plane_state, base);
}
+void __drm_gem_duplicate_shadow_plane_state(struct drm_plane *plane,
+ struct drm_shadow_plane_state *new_shadow_plane_state);
+void __drm_gem_destroy_shadow_plane_state(struct drm_shadow_plane_state *shadow_plane_state);
+void __drm_gem_reset_shadow_plane(struct drm_plane *plane,
+ struct drm_shadow_plane_state *shadow_plane_state);
+
void drm_gem_reset_shadow_plane(struct drm_plane *plane);
struct drm_plane_state *drm_gem_duplicate_shadow_plane_state(struct drm_plane *plane);
void drm_gem_destroy_shadow_plane_state(struct drm_plane *plane,
diff --git a/include/drm/drm_gem_vram_helper.h b/include/drm/drm_gem_vram_helper.h
index 27ed7e9243b9..d3cf06c9af65 100644
--- a/include/drm/drm_gem_vram_helper.h
+++ b/include/drm/drm_gem_vram_helper.h
@@ -124,6 +124,18 @@ void
drm_gem_vram_plane_helper_cleanup_fb(struct drm_plane *plane,
struct drm_plane_state *old_state);
+/**
+ * DRM_GEM_VRAM_PLANE_HELPER_FUNCS -
+ * Initializes struct drm_plane_helper_funcs for VRAM handling
+ *
+ * Drivers may use GEM BOs as VRAM helpers for the framebuffer memory. This
+ * macro initializes struct drm_plane_helper_funcs to use the respective helper
+ * functions.
+ */
+#define DRM_GEM_VRAM_PLANE_HELPER_FUNCS \
+ .prepare_fb = drm_gem_vram_plane_helper_prepare_fb, \
+ .cleanup_fb = drm_gem_vram_plane_helper_cleanup_fb
+
/*
* Helpers for struct drm_simple_display_pipe_funcs
*/
@@ -192,10 +204,6 @@ void drm_vram_mm_debugfs_init(struct drm_minor *minor);
* Helpers for integration with struct drm_device
*/
-struct drm_vram_mm *drm_vram_helper_alloc_mm(
- struct drm_device *dev, uint64_t vram_base, size_t vram_size);
-void drm_vram_helper_release_mm(struct drm_device *dev);
-
int drmm_vram_helper_init(struct drm_device *dev, uint64_t vram_base,
size_t vram_size);
diff --git a/include/drm/drm_mipi_dbi.h b/include/drm/drm_mipi_dbi.h
index f543d6e3e822..05e194958265 100644
--- a/include/drm/drm_mipi_dbi.h
+++ b/include/drm/drm_mipi_dbi.h
@@ -183,7 +183,12 @@ int mipi_dbi_buf_copy(void *dst, struct drm_framebuffer *fb,
#define mipi_dbi_command(dbi, cmd, seq...) \
({ \
const u8 d[] = { seq }; \
- mipi_dbi_command_stackbuf(dbi, cmd, d, ARRAY_SIZE(d)); \
+ struct device *dev = &(dbi)->spi->dev; \
+ int ret; \
+ ret = mipi_dbi_command_stackbuf(dbi, cmd, d, ARRAY_SIZE(d)); \
+ if (ret) \
+ dev_err_ratelimited(dev, "error %d when sending command %#02x\n", ret, cmd); \
+ ret; \
})
#ifdef CONFIG_DEBUG_FS
diff --git a/include/drm/drm_mipi_dsi.h b/include/drm/drm_mipi_dsi.h
index 360e6377e84b..849d3029e303 100644
--- a/include/drm/drm_mipi_dsi.h
+++ b/include/drm/drm_mipi_dsi.h
@@ -80,6 +80,11 @@ int mipi_dsi_create_packet(struct mipi_dsi_packet *packet,
* Note that typically DSI packet transmission is atomic, so the .transfer()
* function will seldomly return anything other than the number of bytes
* contained in the transmit buffer on success.
+ *
+ * Also note that those callbacks can be called no matter the state the
+ * host is in. Drivers that need the underlying device to be powered to
+ * perform these operations will first need to make sure it's been
+ * properly enabled.
*/
struct mipi_dsi_host_ops {
int (*attach)(struct mipi_dsi_host *host,
diff --git a/include/drm/drm_modeset_helper_vtables.h b/include/drm/drm_modeset_helper_vtables.h
index f3a4b47b3986..fdfa9f37ce05 100644
--- a/include/drm/drm_modeset_helper_vtables.h
+++ b/include/drm/drm_modeset_helper_vtables.h
@@ -1178,8 +1178,11 @@ struct drm_plane_helper_funcs {
* equivalent functionality should be implemented through private
* members in the plane structure.
*
- * Drivers which always have their buffers pinned should use
- * drm_gem_plane_helper_prepare_fb() for this hook.
+ * For GEM drivers who neither have a @prepare_fb nor @cleanup_fb hook
+ * set drm_gem_plane_helper_prepare_fb() is called automatically to
+ * implement this. Other drivers which need additional plane processing
+ * can call drm_gem_plane_helper_prepare_fb() from their @prepare_fb
+ * hook.
*
* The helpers will call @cleanup_fb with matching arguments for every
* successful call to this hook.
diff --git a/include/drm/drm_panel.h b/include/drm/drm_panel.h
index 33605c3f0eba..4602f833eb51 100644
--- a/include/drm/drm_panel.h
+++ b/include/drm/drm_panel.h
@@ -64,8 +64,8 @@ enum drm_panel_orientation;
* the panel. This is the job of the .unprepare() function.
*
* Backlight can be handled automatically if configured using
- * drm_panel_of_backlight(). Then the driver does not need to implement the
- * functionality to enable/disable backlight.
+ * drm_panel_of_backlight() or drm_panel_dp_aux_backlight(). Then the driver
+ * does not need to implement the functionality to enable/disable backlight.
*/
struct drm_panel_funcs {
/**
@@ -144,8 +144,8 @@ struct drm_panel {
* Backlight device, used to turn on backlight after the call
* to enable(), and to turn off backlight before the call to
* disable().
- * backlight is set by drm_panel_of_backlight() and drivers
- * shall not assign it.
+ * backlight is set by drm_panel_of_backlight() or
+ * drm_panel_dp_aux_backlight() and drivers shall not assign it.
*/
struct backlight_device *backlight;
diff --git a/include/drm/drm_simple_kms_helper.h b/include/drm/drm_simple_kms_helper.h
index ef9944e9c5fc..cf07132d4ee8 100644
--- a/include/drm/drm_simple_kms_helper.h
+++ b/include/drm/drm_simple_kms_helper.h
@@ -116,8 +116,11 @@ struct drm_simple_display_pipe_funcs {
* the documentation for the &drm_plane_helper_funcs.prepare_fb hook for
* more details.
*
- * Drivers which always have their buffers pinned should use
- * drm_gem_simple_display_pipe_prepare_fb() for this hook.
+ * For GEM drivers who neither have a @prepare_fb nor @cleanup_fb hook
+ * set drm_gem_simple_display_pipe_prepare_fb() is called automatically
+ * to implement this. Other drivers which need additional plane
+ * processing can call drm_gem_simple_display_pipe_prepare_fb() from
+ * their @prepare_fb hook.
*/
int (*prepare_fb)(struct drm_simple_display_pipe *pipe,
struct drm_plane_state *plane_state);
diff --git a/include/drm/drm_vma_manager.h b/include/drm/drm_vma_manager.h
index 76ac5e97a559..4f8c35206f7c 100644
--- a/include/drm/drm_vma_manager.h
+++ b/include/drm/drm_vma_manager.h
@@ -53,7 +53,7 @@ struct drm_vma_offset_node {
rwlock_t vm_lock;
struct drm_mm_node vm_node;
struct rb_root vm_files;
- bool readonly:1;
+ void *driver_private;
};
struct drm_vma_offset_manager {
diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
index d18af49fd009..88ae7f331bb1 100644
--- a/include/drm/gpu_scheduler.h
+++ b/include/drm/gpu_scheduler.h
@@ -239,6 +239,38 @@ struct drm_sched_backend_ops {
* @timedout_job: Called when a job has taken too long to execute,
* to trigger GPU recovery.
*
+ * This method is called in a workqueue context.
+ *
+ * Drivers typically issue a reset to recover from GPU hangs, and this
+ * procedure usually follows the following workflow:
+ *
+ * 1. Stop the scheduler using drm_sched_stop(). This will park the
+ * scheduler thread and cancel the timeout work, guaranteeing that
+ * nothing is queued while we reset the hardware queue
+ * 2. Try to gracefully stop non-faulty jobs (optional)
+ * 3. Issue a GPU reset (driver-specific)
+ * 4. Re-submit jobs using drm_sched_resubmit_jobs()
+ * 5. Restart the scheduler using drm_sched_start(). At that point, new
+ * jobs can be queued, and the scheduler thread is unblocked
+ *
+ * Note that some GPUs have distinct hardware queues but need to reset
+ * the GPU globally, which requires extra synchronization between the
+ * timeout handler of the different &drm_gpu_scheduler. One way to
+ * achieve this synchronization is to create an ordered workqueue
+ * (using alloc_ordered_workqueue()) at the driver level, and pass this
+ * queue to drm_sched_init(), to guarantee that timeout handlers are
+ * executed sequentially. The above workflow needs to be slightly
+ * adjusted in that case:
+ *
+ * 1. Stop all schedulers impacted by the reset using drm_sched_stop()
+ * 2. Try to gracefully stop non-faulty jobs on all queues impacted by
+ * the reset (optional)
+ * 3. Issue a GPU reset on all faulty queues (driver-specific)
+ * 4. Re-submit jobs on all schedulers impacted by the reset using
+ * drm_sched_resubmit_jobs()
+ * 5. Restart all schedulers that were stopped in step #1 using
+ * drm_sched_start()
+ *
* Return DRM_GPU_SCHED_STAT_NOMINAL, when all is normal,
* and the underlying driver has started or completed recovery.
*
@@ -269,6 +301,7 @@ struct drm_sched_backend_ops {
* finished.
* @hw_rq_count: the number of jobs currently in the hardware queue.
* @job_id_count: used to assign unique id to the each job.
+ * @timeout_wq: workqueue used to queue @work_tdr
* @work_tdr: schedules a delayed call to @drm_sched_job_timedout after the
* timeout interval is over.
* @thread: the kthread on which the scheduler which run.
@@ -293,6 +326,7 @@ struct drm_gpu_scheduler {
wait_queue_head_t job_scheduled;
atomic_t hw_rq_count;
atomic64_t job_id_count;
+ struct workqueue_struct *timeout_wq;
struct delayed_work work_tdr;
struct task_struct *thread;
struct list_head pending_list;
@@ -306,7 +340,8 @@ struct drm_gpu_scheduler {
int drm_sched_init(struct drm_gpu_scheduler *sched,
const struct drm_sched_backend_ops *ops,
- uint32_t hw_submission, unsigned hang_limit, long timeout,
+ uint32_t hw_submission, unsigned hang_limit,
+ long timeout, struct workqueue_struct *timeout_wq,
atomic_t *score, const char *name);
void drm_sched_fini(struct drm_gpu_scheduler *sched);
diff --git a/include/drm/ttm/ttm_placement.h b/include/drm/ttm/ttm_placement.h
index aa6ba4d0cf78..8995c9e4ec1b 100644
--- a/include/drm/ttm/ttm_placement.h
+++ b/include/drm/ttm/ttm_placement.h
@@ -47,8 +47,11 @@
* top of the memory area, instead of the bottom.
*/
-#define TTM_PL_FLAG_CONTIGUOUS (1 << 19)
-#define TTM_PL_FLAG_TOPDOWN (1 << 22)
+#define TTM_PL_FLAG_CONTIGUOUS (1 << 0)
+#define TTM_PL_FLAG_TOPDOWN (1 << 1)
+
+/* For multihop handling */
+#define TTM_PL_FLAG_TEMPORARY (1 << 2)
/**
* struct ttm_place
diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
index efdc56b9d95f..2b814fde0d11 100644
--- a/include/linux/dma-buf.h
+++ b/include/linux/dma-buf.h
@@ -96,6 +96,12 @@ struct dma_buf_ops {
* This is called automatically for non-dynamic importers from
* dma_buf_attach().
*
+ * Note that similar to non-dynamic exporters in their @map_dma_buf
+ * callback the driver must guarantee that the memory is available for
+ * use and cleared of any old data by the time this function returns.
+ * Drivers which pipeline their buffer moves internally must wait for
+ * all moves and clears to complete.
+ *
* Returns:
*
* 0 on success, negative error code on failure.
@@ -144,6 +150,15 @@ struct dma_buf_ops {
* This is always called with the dmabuf->resv object locked when
* the dynamic_mapping flag is true.
*
+ * Note that for non-dynamic exporters the driver must guarantee that
+ * that the memory is available for use and cleared of any old data by
+ * the time this function returns. Drivers which pipeline their buffer
+ * moves internally must wait for all moves and clears to complete.
+ * Dynamic exporters do not need to follow this rule: For non-dynamic
+ * importers the buffer is already pinned through @pin, which has the
+ * same requirements. Dynamic importers otoh are required to obey the
+ * dma_resv fences.
+ *
* Returns:
*
* A &sg_table scatter list of or the backing storage of the DMA buffer,
@@ -274,27 +289,6 @@ struct dma_buf_ops {
/**
* struct dma_buf - shared buffer object
- * @size: size of the buffer; invariant over the lifetime of the buffer.
- * @file: file pointer used for sharing buffers across, and for refcounting.
- * @attachments: list of dma_buf_attachment that denotes all devices attached,
- * protected by dma_resv lock.
- * @ops: dma_buf_ops associated with this buffer object.
- * @lock: used internally to serialize list manipulation, attach/detach and
- * vmap/unmap
- * @vmapping_counter: used internally to refcnt the vmaps
- * @vmap_ptr: the current vmap ptr if vmapping_counter > 0
- * @exp_name: name of the exporter; useful for debugging.
- * @name: userspace-provided name; useful for accounting and debugging,
- * protected by @resv.
- * @name_lock: spinlock to protect name access
- * @owner: pointer to exporter module; used for refcounting when exporter is a
- * kernel module.
- * @list_node: node for dma_buf accounting and debugging.
- * @priv: exporter specific private data for this buffer object.
- * @resv: reservation object linked to this dma-buf
- * @poll: for userspace poll support
- * @cb_excl: for userspace poll support
- * @cb_shared: for userspace poll support
*
* This represents a shared buffer, created by calling dma_buf_export(). The
* userspace representation is a normal file descriptor, which can be created by
@@ -306,30 +300,161 @@ struct dma_buf_ops {
* Device DMA access is handled by the separate &struct dma_buf_attachment.
*/
struct dma_buf {
+ /**
+ * @size:
+ *
+ * Size of the buffer; invariant over the lifetime of the buffer.
+ */
size_t size;
+
+ /**
+ * @file:
+ *
+ * File pointer used for sharing buffers across, and for refcounting.
+ * See dma_buf_get() and dma_buf_put().
+ */
struct file *file;
+
+ /**
+ * @attachments:
+ *
+ * List of dma_buf_attachment that denotes all devices attached,
+ * protected by &dma_resv lock @resv.
+ */
struct list_head attachments;
+
+ /** @ops: dma_buf_ops associated with this buffer object. */
const struct dma_buf_ops *ops;
+
+ /**
+ * @lock:
+ *
+ * Used internally to serialize list manipulation, attach/detach and
+ * vmap/unmap. Note that in many cases this is superseeded by
+ * dma_resv_lock() on @resv.
+ */
struct mutex lock;
+
+ /**
+ * @vmapping_counter:
+ *
+ * Used internally to refcnt the vmaps returned by dma_buf_vmap().
+ * Protected by @lock.
+ */
unsigned vmapping_counter;
+
+ /**
+ * @vmap_ptr:
+ * The current vmap ptr if @vmapping_counter > 0. Protected by @lock.
+ */
struct dma_buf_map vmap_ptr;
+
+ /**
+ * @exp_name:
+ *
+ * Name of the exporter; useful for debugging. See the
+ * DMA_BUF_SET_NAME IOCTL.
+ */
const char *exp_name;
+
+ /**
+ * @name:
+ *
+ * Userspace-provided name; useful for accounting and debugging,
+ * protected by dma_resv_lock() on @resv and @name_lock for read access.
+ */
const char *name;
+
+ /** @name_lock: Spinlock to protect name acces for read access. */
spinlock_t name_lock;
+
+ /**
+ * @owner:
+ *
+ * Pointer to exporter module; used for refcounting when exporter is a
+ * kernel module.
+ */
struct module *owner;
+
+ /** @list_node: node for dma_buf accounting and debugging. */
struct list_head list_node;
+
+ /** @priv: exporter specific private data for this buffer object. */
void *priv;
+
+ /**
+ * @resv:
+ *
+ * Reservation object linked to this dma-buf.
+ *
+ * IMPLICIT SYNCHRONIZATION RULES:
+ *
+ * Drivers which support implicit synchronization of buffer access as
+ * e.g. exposed in `Implicit Fence Poll Support`_ must follow the
+ * below rules.
+ *
+ * - Drivers must add a shared fence through dma_resv_add_shared_fence()
+ * for anything the userspace API considers a read access. This highly
+ * depends upon the API and window system.
+ *
+ * - Similarly drivers must set the exclusive fence through
+ * dma_resv_add_excl_fence() for anything the userspace API considers
+ * write access.
+ *
+ * - Drivers may just always set the exclusive fence, since that only
+ * causes unecessarily synchronization, but no correctness issues.
+ *
+ * - Some drivers only expose a synchronous userspace API with no
+ * pipelining across drivers. These do not set any fences for their
+ * access. An example here is v4l.
+ *
+ * DYNAMIC IMPORTER RULES:
+ *
+ * Dynamic importers, see dma_buf_attachment_is_dynamic(), have
+ * additional constraints on how they set up fences:
+ *
+ * - Dynamic importers must obey the exclusive fence and wait for it to
+ * signal before allowing access to the buffer's underlying storage
+ * through the device.
+ *
+ * - Dynamic importers should set fences for any access that they can't
+ * disable immediately from their &dma_buf_attach_ops.move_notify
+ * callback.
+ */
struct dma_resv *resv;
- /* poll support */
+ /** @poll: for userspace poll support */
wait_queue_head_t poll;
+ /** @cb_excl: for userspace poll support */
+ /** @cb_shared: for userspace poll support */
struct dma_buf_poll_cb_t {
struct dma_fence_cb cb;
wait_queue_head_t *poll;
__poll_t active;
} cb_excl, cb_shared;
+#ifdef CONFIG_DMABUF_SYSFS_STATS
+ /**
+ * @sysfs_entry:
+ *
+ * For exposing information about this buffer in sysfs. See also
+ * `DMA-BUF statistics`_ for the uapi this enables.
+ */
+ struct dma_buf_sysfs_entry {
+ struct kobject kobj;
+ struct dma_buf *dmabuf;
+
+ /**
+ * @sysfs_entry.attachment_uid:
+ *
+ * This is protected by the dma_resv_lock() on @resv and is
+ * incremented on each attach.
+ */
+ unsigned int attachment_uid;
+ struct kset *attach_stats_kset;
+ } *sysfs_entry;
+#endif
};
/**
@@ -379,6 +504,7 @@ struct dma_buf_attach_ops {
* @importer_ops: importer operations for this attachment, if provided
* dma_buf_map/unmap_attachment() must be called with the dma_resv lock held.
* @importer_priv: importer specific attachment data.
+ * @sysfs_entry: For exposing information about this attachment in sysfs.
*
* This structure holds the attachment information between the dma_buf buffer
* and its user device(s). The list contains one attachment struct per device
@@ -399,6 +525,13 @@ struct dma_buf_attachment {
const struct dma_buf_attach_ops *importer_ops;
void *importer_priv;
void *priv;
+#ifdef CONFIG_DMABUF_SYSFS_STATS
+ /* for sysfs stats */
+ struct dma_buf_attach_sysfs_entry {
+ struct kobject kobj;
+ unsigned int map_counter;
+ } *sysfs_entry;
+#endif
};
/**
diff --git a/include/linux/dma-fence-chain.h b/include/linux/dma-fence-chain.h
index 10462a029da2..54fe3443fd2c 100644
--- a/include/linux/dma-fence-chain.h
+++ b/include/linux/dma-fence-chain.h
@@ -12,25 +12,41 @@
#include <linux/dma-fence.h>
#include <linux/irq_work.h>
+#include <linux/slab.h>
/**
* struct dma_fence_chain - fence to represent an node of a fence chain
* @base: fence base class
- * @lock: spinlock for fence handling
* @prev: previous fence of the chain
* @prev_seqno: original previous seqno before garbage collection
* @fence: encapsulated fence
- * @cb: callback structure for signaling
- * @work: irq work item for signaling
+ * @lock: spinlock for fence handling
*/
struct dma_fence_chain {
struct dma_fence base;
- spinlock_t lock;
struct dma_fence __rcu *prev;
u64 prev_seqno;
struct dma_fence *fence;
- struct dma_fence_cb cb;
- struct irq_work work;
+ union {
+ /**
+ * @cb: callback for signaling
+ *
+ * This is used to add the callback for signaling the
+ * complection of the fence chain. Never used at the same time
+ * as the irq work.
+ */
+ struct dma_fence_cb cb;
+
+ /**
+ * @work: irq work item for signaling
+ *
+ * Irq work structure to allow us to add the callback without
+ * running into lock inversion. Never used at the same time as
+ * the callback.
+ */
+ struct irq_work work;
+ };
+ spinlock_t lock;
};
extern const struct dma_fence_ops dma_fence_chain_ops;
@@ -52,6 +68,30 @@ to_dma_fence_chain(struct dma_fence *fence)
}
/**
+ * dma_fence_chain_alloc
+ *
+ * Returns a new struct dma_fence_chain object or NULL on failure.
+ */
+static inline struct dma_fence_chain *dma_fence_chain_alloc(void)
+{
+ return kmalloc(sizeof(struct dma_fence_chain), GFP_KERNEL);
+};
+
+/**
+ * dma_fence_chain_free
+ * @chain: chain node to free
+ *
+ * Frees up an allocated but not used struct dma_fence_chain object. This
+ * doesn't need an RCU grace period since the fence was never initialized nor
+ * published. After dma_fence_chain_init() has been called the fence must be
+ * released by calling dma_fence_put(), and not through this function.
+ */
+static inline void dma_fence_chain_free(struct dma_fence_chain *chain)
+{
+ kfree(chain);
+};
+
+/**
* dma_fence_chain_for_each - iterate over all fences in chain
* @iter: current fence
* @head: starting point
diff --git a/include/uapi/drm/drm.h b/include/uapi/drm/drm.h
index d043752a74cf..e1f49dd241f7 100644
--- a/include/uapi/drm/drm.h
+++ b/include/uapi/drm/drm.h
@@ -635,8 +635,8 @@ struct drm_gem_open {
/**
* DRM_CAP_VBLANK_HIGH_CRTC
*
- * If set to 1, the kernel supports specifying a CRTC index in the high bits of
- * &drm_wait_vblank_request.type.
+ * If set to 1, the kernel supports specifying a :ref:`CRTC index<crtc_index>`
+ * in the high bits of &drm_wait_vblank_request.type.
*
* Starting kernel version 2.6.39, this capability is always set to 1.
*/
diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h
index 9b6722d45f36..98bf130feda5 100644
--- a/include/uapi/drm/drm_mode.h
+++ b/include/uapi/drm/drm_mode.h
@@ -312,16 +312,48 @@ struct drm_mode_set_plane {
__u32 src_w;
};
+/**
+ * struct drm_mode_get_plane - Get plane metadata.
+ *
+ * Userspace can perform a GETPLANE ioctl to retrieve information about a
+ * plane.
+ *
+ * To retrieve the number of formats supported, set @count_format_types to zero
+ * and call the ioctl. @count_format_types will be updated with the value.
+ *
+ * To retrieve these formats, allocate an array with the memory needed to store
+ * @count_format_types formats. Point @format_type_ptr to this array and call
+ * the ioctl again (with @count_format_types still set to the value returned in
+ * the first ioctl call).
+ */
struct drm_mode_get_plane {
+ /**
+ * @plane_id: Object ID of the plane whose information should be
+ * retrieved. Set by caller.
+ */
__u32 plane_id;
+ /** @crtc_id: Object ID of the current CRTC. */
__u32 crtc_id;
+ /** @fb_id: Object ID of the current fb. */
__u32 fb_id;
+ /**
+ * @possible_crtcs: Bitmask of CRTC's compatible with the plane. CRTC's
+ * are created and they receive an index, which corresponds to their
+ * position in the bitmask. Bit N corresponds to
+ * :ref:`CRTC index<crtc_index>` N.
+ */
__u32 possible_crtcs;
+ /** @gamma_size: Never used. */
__u32 gamma_size;
+ /** @count_format_types: Number of formats. */
__u32 count_format_types;
+ /**
+ * @format_type_ptr: Pointer to ``__u32`` array of formats that are
+ * supported by the plane. These formats do not require modifiers.
+ */
__u64 format_type_ptr;
};
diff --git a/include/uapi/drm/vmwgfx_drm.h b/include/uapi/drm/vmwgfx_drm.h
index 02e917507479..9078775feb51 100644
--- a/include/uapi/drm/vmwgfx_drm.h
+++ b/include/uapi/drm/vmwgfx_drm.h
@@ -72,6 +72,9 @@ extern "C" {
#define DRM_VMW_GB_SURFACE_CREATE_EXT 27
#define DRM_VMW_GB_SURFACE_REF_EXT 28
#define DRM_VMW_MSG 29
+#define DRM_VMW_MKSSTAT_RESET 30
+#define DRM_VMW_MKSSTAT_ADD 31
+#define DRM_VMW_MKSSTAT_REMOVE 32
/*************************************************************************/
/**
@@ -1236,6 +1239,44 @@ struct drm_vmw_msg_arg {
__u32 receive_len;
};
+/**
+ * struct drm_vmw_mksstat_add_arg
+ *
+ * @stat: Pointer to user-space stat-counters array, page-aligned.
+ * @info: Pointer to user-space counter-infos array, page-aligned.
+ * @strs: Pointer to user-space stat strings, page-aligned.
+ * @stat_len: Length in bytes of stat-counters array.
+ * @info_len: Length in bytes of counter-infos array.
+ * @strs_len: Length in bytes of the stat strings, terminators included.
+ * @description: Pointer to instance descriptor string; will be truncated
+ * to MKS_GUEST_STAT_INSTANCE_DESC_LENGTH chars.
+ * @id: Output identifier of the produced record; -1 if error.
+ *
+ * Argument to the DRM_VMW_MKSSTAT_ADD ioctl.
+ */
+struct drm_vmw_mksstat_add_arg {
+ __u64 stat;
+ __u64 info;
+ __u64 strs;
+ __u64 stat_len;
+ __u64 info_len;
+ __u64 strs_len;
+ __u64 description;
+ __u64 id;
+};
+
+/**
+ * struct drm_vmw_mksstat_remove_arg
+ *
+ * @id: Identifier of the record being disposed, originally obtained through
+ * DRM_VMW_MKSSTAT_ADD ioctl.
+ *
+ * Argument to the DRM_VMW_MKSSTAT_REMOVE ioctl.
+ */
+struct drm_vmw_mksstat_remove_arg {
+ __u64 id;
+};
+
#if defined(__cplusplus)
}
#endif
diff --git a/include/uapi/linux/dma-buf.h b/include/uapi/linux/dma-buf.h
index 7f30393b92c3..8e4a2ca0bcbf 100644
--- a/include/uapi/linux/dma-buf.h
+++ b/include/uapi/linux/dma-buf.h
@@ -22,8 +22,56 @@
#include <linux/types.h>
-/* begin/end dma-buf functions used for userspace mmap. */
+/**
+ * struct dma_buf_sync - Synchronize with CPU access.
+ *
+ * When a DMA buffer is accessed from the CPU via mmap, it is not always
+ * possible to guarantee coherency between the CPU-visible map and underlying
+ * memory. To manage coherency, DMA_BUF_IOCTL_SYNC must be used to bracket
+ * any CPU access to give the kernel the chance to shuffle memory around if
+ * needed.
+ *
+ * Prior to accessing the map, the client must call DMA_BUF_IOCTL_SYNC
+ * with DMA_BUF_SYNC_START and the appropriate read/write flags. Once the
+ * access is complete, the client should call DMA_BUF_IOCTL_SYNC with
+ * DMA_BUF_SYNC_END and the same read/write flags.
+ *
+ * The synchronization provided via DMA_BUF_IOCTL_SYNC only provides cache
+ * coherency. It does not prevent other processes or devices from
+ * accessing the memory at the same time. If synchronization with a GPU or
+ * other device driver is required, it is the client's responsibility to
+ * wait for buffer to be ready for reading or writing before calling this
+ * ioctl with DMA_BUF_SYNC_START. Likewise, the client must ensure that
+ * follow-up work is not submitted to GPU or other device driver until
+ * after this ioctl has been called with DMA_BUF_SYNC_END?
+ *
+ * If the driver or API with which the client is interacting uses implicit
+ * synchronization, waiting for prior work to complete can be done via
+ * poll() on the DMA buffer file descriptor. If the driver or API requires
+ * explicit synchronization, the client may have to wait on a sync_file or
+ * other synchronization primitive outside the scope of the DMA buffer API.
+ */
struct dma_buf_sync {
+ /**
+ * @flags: Set of access flags
+ *
+ * DMA_BUF_SYNC_START:
+ * Indicates the start of a map access session.
+ *
+ * DMA_BUF_SYNC_END:
+ * Indicates the end of a map access session.
+ *
+ * DMA_BUF_SYNC_READ:
+ * Indicates that the mapped DMA buffer will be read by the
+ * client via the CPU map.
+ *
+ * DMA_BUF_SYNC_WRITE:
+ * Indicates that the mapped DMA buffer will be written by the
+ * client via the CPU map.
+ *
+ * DMA_BUF_SYNC_RW:
+ * An alias for DMA_BUF_SYNC_READ | DMA_BUF_SYNC_WRITE.
+ */
__u64 flags;
};