summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/i915_gem_dmabuf.c
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2016-08-04 16:32:42 +0100
committerChris Wilson <chris@chris-wilson.co.uk>2016-08-04 20:20:06 +0100
commitad778f8967ea2f0bfda02701f918bcfcd495b721 (patch)
treebe8ab4cf23133b97782d69d6a1744b1956a66312 /drivers/gpu/drm/i915/i915_gem_dmabuf.c
parent0eafec6d3244802d469712682b0f513963c23eff (diff)
drm/i915: Export our request as a dma-buf fence on the reservation object
If the GEM objects being rendered with in this request have been exported via dma-buf to a third party, hook ourselves into the dma-buf reservation object so that the third party can serialise with our rendering via the dma-buf fences. Testcase: igt/prime_busy Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch> Link: http://patchwork.freedesktop.org/patch/msgid/1470324762-2545-26-git-send-email-chris@chris-wilson.co.uk
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem_dmabuf.c')
-rw-r--r--drivers/gpu/drm/i915/i915_gem_dmabuf.c58
1 files changed, 55 insertions, 3 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
index 3a00ab3ad06e..c60a8d5bbad0 100644
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -23,9 +23,13 @@
* Authors:
* Dave Airlie <airlied@redhat.com>
*/
+
+#include <linux/dma-buf.h>
+#include <linux/reservation.h>
+
#include <drm/drmP.h>
+
#include "i915_drv.h"
-#include <linux/dma-buf.h>
static struct drm_i915_gem_object *dma_buf_to_obj(struct dma_buf *buf)
{
@@ -218,25 +222,73 @@ static const struct dma_buf_ops i915_dmabuf_ops = {
.end_cpu_access = i915_gem_end_cpu_access,
};
+static void export_fences(struct drm_i915_gem_object *obj,
+ struct dma_buf *dma_buf)
+{
+ struct reservation_object *resv = dma_buf->resv;
+ struct drm_i915_gem_request *req;
+ unsigned long active;
+ int idx;
+
+ active = __I915_BO_ACTIVE(obj);
+ if (!active)
+ return;
+
+ /* Serialise with execbuf to prevent concurrent fence-loops */
+ mutex_lock(&obj->base.dev->struct_mutex);
+
+ /* Mark the object for future fences before racily adding old fences */
+ obj->base.dma_buf = dma_buf;
+
+ ww_mutex_lock(&resv->lock, NULL);
+
+ for_each_active(active, idx) {
+ req = i915_gem_active_get(&obj->last_read[idx],
+ &obj->base.dev->struct_mutex);
+ if (!req)
+ continue;
+
+ if (reservation_object_reserve_shared(resv) == 0)
+ reservation_object_add_shared_fence(resv, &req->fence);
+
+ i915_gem_request_put(req);
+ }
+
+ req = i915_gem_active_get(&obj->last_write,
+ &obj->base.dev->struct_mutex);
+ if (req) {
+ reservation_object_add_excl_fence(resv, &req->fence);
+ i915_gem_request_put(req);
+ }
+
+ ww_mutex_unlock(&resv->lock);
+ mutex_unlock(&obj->base.dev->struct_mutex);
+}
+
struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
struct drm_gem_object *gem_obj, int flags)
{
struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
+ struct dma_buf *dma_buf;
exp_info.ops = &i915_dmabuf_ops;
exp_info.size = gem_obj->size;
exp_info.flags = flags;
exp_info.priv = gem_obj;
-
if (obj->ops->dmabuf_export) {
int ret = obj->ops->dmabuf_export(obj);
if (ret)
return ERR_PTR(ret);
}
- return dma_buf_export(&exp_info);
+ dma_buf = dma_buf_export(&exp_info);
+ if (IS_ERR(dma_buf))
+ return dma_buf;
+
+ export_fences(obj, dma_buf);
+ return dma_buf;
}
static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)