From b91b09eea7a15ab417aa9ed6502b3be12f5283f8 Mon Sep 17 00:00:00 2001 From: Matthew Auld Date: Fri, 6 Oct 2017 23:18:17 +0100 Subject: drm/i915: push set_pages down to the callers Each backend is now responsible for calling __i915_gem_object_set_pages upon successfully gathering its backing storage. This eliminates the inconsistency between the async and sync paths, which stands out even more when we start throwing around an sg_mask in a later patch. Suggested-by: Chris Wilson Signed-off-by: Matthew Auld Cc: Joonas Lahtinen Cc: Chris Wilson Reviewed-by: Joonas Lahtinen Reviewed-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20171006145041.21673-6-matthew.auld@intel.com Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20171006221833.32439-5-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_gem_dmabuf.c | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) (limited to 'drivers/gpu/drm/i915/i915_gem_dmabuf.c') diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c index 6176e589cf09..4c4dc85159fb 100644 --- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c +++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c @@ -256,11 +256,18 @@ struct dma_buf *i915_gem_prime_export(struct drm_device *dev, return drm_gem_dmabuf_export(dev, &exp_info); } -static struct sg_table * -i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj) +static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj) { - return dma_buf_map_attachment(obj->base.import_attach, - DMA_BIDIRECTIONAL); + struct sg_table *pages; + + pages = dma_buf_map_attachment(obj->base.import_attach, + DMA_BIDIRECTIONAL); + if (IS_ERR(pages)) + return PTR_ERR(pages); + + __i915_gem_object_set_pages(obj, pages); + + return 0; } static void i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object *obj, -- cgit