summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/drm_prime.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-04-02 07:59:23 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2018-04-02 07:59:23 -0700
commit320b164abb32db876866a4ff8c2cb710524ac6ea (patch)
tree1f79119cde6e24c9f1d01fb1e51252bca7c4cdd5 /drivers/gpu/drm/drm_prime.c
parent0adb32858b0bddf4ada5f364a84ed60b196dbcda (diff)
parent694f54f680f7fd8e9561928fbfc537d9afbc3d79 (diff)
Merge tag 'drm-for-v4.17' of git://people.freedesktop.org/~airlied/linux
Pull drm updates from Dave Airlie: "Cannonlake and Vega12 support are probably the two major things. This pull lacks nouveau, Ben had some unforseen leave and a few other blockers so we'll see how things look or maybe leave it for this merge window. core: - Device links to handle sound/gpu pm dependency - Color encoding/range properties - Plane clipping into plane check helper - Backlight helpers - DP TP4 + HBR3 helper support amdgpu: - Vega12 support - Enable DC by default on all supported GPUs - Powerplay restructuring and cleanup - DC bandwidth calc updates - DC backlight on pre-DCE11 - TTM backing store dropping support - SR-IOV fixes - Adding "wattman" like functionality - DC crc support - Improved DC dual-link handling amdkfd: - GPUVM support for dGPU - KFD events for dGPU - Enable PCIe atomics for dGPUs - HSA process eviction support - Live-lock fixes for process eviction - VM page table allocation fix for large-bar systems panel: - Raydium RM68200 - AUO G104SN02 V2 - KEO TX31D200VM0BAA - ARM Versatile panels i915: - Cannonlake support enabled - AUX-F port support added - Icelake base enabling until internal milestone of forcewake support - Query uAPI interface (used for GPU topology information currently) - Compressed framebuffer support for sprites - kmem cache shrinking when GPU is idle - Avoid boosting GPU when waited item is being processed already - Avoid retraining LSPCON link unnecessarily - Decrease request signaling latency - Deprecation of I915_SET_COLORKEY_NONE - Kerneldoc and compiler warning cleanup for upcoming CI enforcements - Full range ycbcr toggling - HDCP support i915/gvt: - Big refactor for shadow ppgtt - KBL context save/restore via LRI cmd (Weinan) - Properly unmap dma for guest page (Changbin) vmwgfx: - Lots of various improvements etnaviv: - Use the drm gpu scheduler - prep work for GC7000L support vc4: - fix alpha blending - Expose perf counters to userspace pl111: - Bandwidth checking/limiting - Versatile panel support sun4i: - A83T HDMI support - A80 support - YUV plane support - H3/H5 HDMI support omapdrm: - HPD support for DVI connector - remove lots of static variables msm: - DSI updates from 10nm / SDM845 - fix for race condition with a3xx/a4xx fence completion irq - some refactoring/prep work for eventual a6xx support (ie. when we have a userspace) - a5xx debugfs enhancements - some mdp5 fixes/cleanups to prepare for eventually merging writeback - support (ie. when we have a userspace) tegra: - mmap() fixes for fbdev devices - Overlay plane for hw cursor fix - dma-buf cache maintenance support mali-dp: - YUV->RGB conversion support rockchip: - rk3399/chromebook fixes and improvements rcar-du: - LVDS support move to drm bridge - DT bindings for R8A77995 - Driver/DT support for R8A77970 tilcdc: - DRM panel support" * tag 'drm-for-v4.17' of git://people.freedesktop.org/~airlied/linux: (1646 commits) drm/i915: Fix hibernation with ACPI S0 target state drm/i915/execlists: Use a locked clear_bit() for synchronisation with interrupt drm/i915: Specify which engines to reset following semaphore/event lockups drm/i915/dp: Write to SET_POWER dpcd to enable MST hub. drm/amdkfd: Use ordered workqueue to restore processes drm/amdgpu: Fix acquiring VM on large-BAR systems drm/amd/pp: clean header file hwmgr.h drm/amd/pp: use mlck_table.count for array loop index limit drm: Fix uabi regression by allowing garbage mode->type from userspace drm/amdgpu: Add an ATPX quirk for hybrid laptop drm/amdgpu: fix spelling mistake: "asssert" -> "assert" drm/amd/pp: Add new asic support in pp_psm.c drm/amd/pp: Clean up powerplay code on Vega12 drm/amd/pp: Add smu irq handlers for legacy asics drm/amd/pp: Fix set wrong temperature range on smu7 drm/amdgpu: Don't change preferred domian when fallback GTT v5 drm/vmwgfx: Bump version patchlevel and date drm/vmwgfx: use monotonic event timestamps drm/vmwgfx: Unpin the screen object backup buffer when not used drm/vmwgfx: Stricter count of legacy surface device resources ...
Diffstat (limited to 'drivers/gpu/drm/drm_prime.c')
-rw-r--r--drivers/gpu/drm/drm_prime.c193
1 files changed, 145 insertions, 48 deletions
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index 9a17725b0f7a..7856a9b3f8a8 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -73,6 +73,9 @@
* Drivers should detect this situation and return back the gem object
* from the dma-buf private. Prime will do this automatically for drivers that
* use the drm_gem_prime_{import,export} helpers.
+ *
+ * GEM struct &dma_buf_ops symbols are now exported. They can be resued by
+ * drivers which implement GEM interface.
*/
struct drm_prime_member {
@@ -180,9 +183,20 @@ static int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpri
return -ENOENT;
}
-static int drm_gem_map_attach(struct dma_buf *dma_buf,
- struct device *target_dev,
- struct dma_buf_attachment *attach)
+/**
+ * drm_gem_map_attach - dma_buf attach implementation for GEM
+ * @dma_buf: buffer to attach device to
+ * @target_dev: not used
+ * @attach: buffer attachment data
+ *
+ * Allocates &drm_prime_attachment and calls &drm_driver.gem_prime_pin for
+ * device specific attachment. This can be used as the &dma_buf_ops.attach
+ * callback.
+ *
+ * Returns 0 on success, negative error code on failure.
+ */
+int drm_gem_map_attach(struct dma_buf *dma_buf, struct device *target_dev,
+ struct dma_buf_attachment *attach)
{
struct drm_prime_attachment *prime_attach;
struct drm_gem_object *obj = dma_buf->priv;
@@ -200,34 +214,44 @@ static int drm_gem_map_attach(struct dma_buf *dma_buf,
return dev->driver->gem_prime_pin(obj);
}
+EXPORT_SYMBOL(drm_gem_map_attach);
-static void drm_gem_map_detach(struct dma_buf *dma_buf,
- struct dma_buf_attachment *attach)
+/**
+ * drm_gem_map_detach - dma_buf detach implementation for GEM
+ * @dma_buf: buffer to detach from
+ * @attach: attachment to be detached
+ *
+ * Cleans up &dma_buf_attachment. This can be used as the &dma_buf_ops.detach
+ * callback.
+ */
+void drm_gem_map_detach(struct dma_buf *dma_buf,
+ struct dma_buf_attachment *attach)
{
struct drm_prime_attachment *prime_attach = attach->priv;
struct drm_gem_object *obj = dma_buf->priv;
struct drm_device *dev = obj->dev;
- struct sg_table *sgt;
- if (dev->driver->gem_prime_unpin)
- dev->driver->gem_prime_unpin(obj);
+ if (prime_attach) {
+ struct sg_table *sgt = prime_attach->sgt;
- if (!prime_attach)
- return;
-
- sgt = prime_attach->sgt;
- if (sgt) {
- if (prime_attach->dir != DMA_NONE)
- dma_unmap_sg_attrs(attach->dev, sgt->sgl, sgt->nents,
- prime_attach->dir,
- DMA_ATTR_SKIP_CPU_SYNC);
- sg_free_table(sgt);
+ if (sgt) {
+ if (prime_attach->dir != DMA_NONE)
+ dma_unmap_sg_attrs(attach->dev, sgt->sgl,
+ sgt->nents,
+ prime_attach->dir,
+ DMA_ATTR_SKIP_CPU_SYNC);
+ sg_free_table(sgt);
+ }
+
+ kfree(sgt);
+ kfree(prime_attach);
+ attach->priv = NULL;
}
- kfree(sgt);
- kfree(prime_attach);
- attach->priv = NULL;
+ if (dev->driver->gem_prime_unpin)
+ dev->driver->gem_prime_unpin(obj);
}
+EXPORT_SYMBOL(drm_gem_map_detach);
void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpriv,
struct dma_buf *dma_buf)
@@ -254,8 +278,20 @@ void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpr
}
}
-static struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach,
- enum dma_data_direction dir)
+/**
+ * drm_gem_map_dma_buf - map_dma_buf implementation for GEM
+ * @attach: attachment whose scatterlist is to be returned
+ * @dir: direction of DMA transfer
+ *
+ * Calls &drm_driver.gem_prime_get_sg_table and then maps the scatterlist. This
+ * can be used as the &dma_buf_ops.map_dma_buf callback.
+ *
+ * Returns sg_table containing the scatterlist to be returned; returns ERR_PTR
+ * on error. May return -EINTR if it is interrupted by a signal.
+ */
+
+struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach,
+ enum dma_data_direction dir)
{
struct drm_prime_attachment *prime_attach = attach->priv;
struct drm_gem_object *obj = attach->dmabuf->priv;
@@ -291,13 +327,21 @@ static struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach,
return sgt;
}
+EXPORT_SYMBOL(drm_gem_map_dma_buf);
-static void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
- struct sg_table *sgt,
- enum dma_data_direction dir)
+/**
+ * drm_gem_unmap_dma_buf - unmap_dma_buf implementation for GEM
+ *
+ * Not implemented. The unmap is done at drm_gem_map_detach(). This can be
+ * used as the &dma_buf_ops.unmap_dma_buf callback.
+ */
+void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
+ struct sg_table *sgt,
+ enum dma_data_direction dir)
{
/* nothing to be done here */
}
+EXPORT_SYMBOL(drm_gem_unmap_dma_buf);
/**
* drm_gem_dmabuf_export - dma_buf export implementation for GEM
@@ -348,47 +392,99 @@ void drm_gem_dmabuf_release(struct dma_buf *dma_buf)
}
EXPORT_SYMBOL(drm_gem_dmabuf_release);
-static void *drm_gem_dmabuf_vmap(struct dma_buf *dma_buf)
+/**
+ * drm_gem_dmabuf_vmap - dma_buf vmap implementation for GEM
+ * @dma_buf: buffer to be mapped
+ *
+ * Sets up a kernel virtual mapping. This can be used as the &dma_buf_ops.vmap
+ * callback.
+ *
+ * Returns the kernel virtual address.
+ */
+void *drm_gem_dmabuf_vmap(struct dma_buf *dma_buf)
{
struct drm_gem_object *obj = dma_buf->priv;
struct drm_device *dev = obj->dev;
return dev->driver->gem_prime_vmap(obj);
}
+EXPORT_SYMBOL(drm_gem_dmabuf_vmap);
-static void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
+/**
+ * drm_gem_dmabuf_vunmap - dma_buf vunmap implementation for GEM
+ * @dma_buf: buffer to be unmapped
+ * @vaddr: the virtual address of the buffer
+ *
+ * Releases a kernel virtual mapping. This can be used as the
+ * &dma_buf_ops.vunmap callback.
+ */
+void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
{
struct drm_gem_object *obj = dma_buf->priv;
struct drm_device *dev = obj->dev;
dev->driver->gem_prime_vunmap(obj, vaddr);
}
+EXPORT_SYMBOL(drm_gem_dmabuf_vunmap);
-static void *drm_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf,
- unsigned long page_num)
+/**
+ * drm_gem_dmabuf_kmap_atomic - map_atomic implementation for GEM
+ *
+ * Not implemented. This can be used as the &dma_buf_ops.map_atomic callback.
+ */
+void *drm_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf,
+ unsigned long page_num)
{
return NULL;
}
+EXPORT_SYMBOL(drm_gem_dmabuf_kmap_atomic);
-static void drm_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf,
- unsigned long page_num, void *addr)
+/**
+ * drm_gem_dmabuf_kunmap_atomic - unmap_atomic implementation for GEM
+ *
+ * Not implemented. This can be used as the &dma_buf_ops.unmap_atomic callback.
+ */
+void drm_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf,
+ unsigned long page_num, void *addr)
{
}
-static void *drm_gem_dmabuf_kmap(struct dma_buf *dma_buf,
- unsigned long page_num)
+EXPORT_SYMBOL(drm_gem_dmabuf_kunmap_atomic);
+
+/**
+ * drm_gem_dmabuf_kmap - map implementation for GEM
+ *
+ * Not implemented. This can be used as the &dma_buf_ops.map callback.
+ */
+void *drm_gem_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num)
{
return NULL;
}
+EXPORT_SYMBOL(drm_gem_dmabuf_kmap);
-static void drm_gem_dmabuf_kunmap(struct dma_buf *dma_buf,
- unsigned long page_num, void *addr)
+/**
+ * drm_gem_dmabuf_kunmap - unmap implementation for GEM
+ *
+ * Not implemented. This can be used as the &dma_buf_ops.unmap callback.
+ */
+void drm_gem_dmabuf_kunmap(struct dma_buf *dma_buf, unsigned long page_num,
+ void *addr)
{
}
+EXPORT_SYMBOL(drm_gem_dmabuf_kunmap);
-static int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf,
- struct vm_area_struct *vma)
+/**
+ * drm_gem_dmabuf_mmap - dma_buf mmap implementation for GEM
+ * @dma_buf: buffer to be mapped
+ * @vma: virtual address range
+ *
+ * Provides memory mapping for the buffer. This can be used as the
+ * &dma_buf_ops.mmap callback.
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
{
struct drm_gem_object *obj = dma_buf->priv;
struct drm_device *dev = obj->dev;
@@ -398,6 +494,7 @@ static int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf,
return dev->driver->gem_prime_mmap(obj, vma);
}
+EXPORT_SYMBOL(drm_gem_dmabuf_mmap);
static const struct dma_buf_ops drm_gem_prime_dmabuf_ops = {
.attach = drm_gem_map_attach,
@@ -825,40 +922,40 @@ EXPORT_SYMBOL(drm_prime_pages_to_sg);
/**
* drm_prime_sg_to_page_addr_arrays - convert an sg table into a page array
* @sgt: scatter-gather table to convert
- * @pages: array of page pointers to store the page array in
+ * @pages: optional array of page pointers to store the page array in
* @addrs: optional array to store the dma bus address of each page
- * @max_pages: size of both the passed-in arrays
+ * @max_entries: size of both the passed-in arrays
*
* Exports an sg table into an array of pages and addresses. This is currently
* required by the TTM driver in order to do correct fault handling.
*/
int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages,
- dma_addr_t *addrs, int max_pages)
+ dma_addr_t *addrs, int max_entries)
{
unsigned count;
struct scatterlist *sg;
struct page *page;
- u32 len;
- int pg_index;
+ u32 len, index;
dma_addr_t addr;
- pg_index = 0;
+ index = 0;
for_each_sg(sgt->sgl, sg, sgt->nents, count) {
len = sg->length;
page = sg_page(sg);
addr = sg_dma_address(sg);
while (len > 0) {
- if (WARN_ON(pg_index >= max_pages))
+ if (WARN_ON(index >= max_entries))
return -1;
- pages[pg_index] = page;
+ if (pages)
+ pages[index] = page;
if (addrs)
- addrs[pg_index] = addr;
+ addrs[index] = addr;
page++;
addr += PAGE_SIZE;
len -= PAGE_SIZE;
- pg_index++;
+ index++;
}
}
return 0;