summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c2
-rw-r--r--drivers/gpu/drm/drm_internal.h6
-rw-r--r--drivers/gpu/drm/drm_ioctl.c6
-rw-r--r--drivers/gpu/drm/drm_syncobj.c529
-rw-r--r--drivers/gpu/drm/exynos/exynos5433_drm_decon.c124
-rw-r--r--drivers/gpu/drm/exynos/exynos7_drm_decon.c13
-rw-r--r--drivers/gpu/drm/exynos/exynos_dp.c15
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_core.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.c33
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.h10
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dpi.c12
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.h5
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dsi.c218
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c16
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_mic.c44
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_plane.c27
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_vidi.c15
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c25
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c48
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c67
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c14
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c4
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx_gpu.c2
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx_gpu.h1
-rw-r--r--drivers/gpu/drm/msm/adreno/a4xx_gpu.c2
-rw-r--r--drivers/gpu/drm/msm/adreno/a4xx_gpu.h1
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.c51
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.h1
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_power.c14
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c53
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.c5
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.h2
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_host.c94
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy.c2
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.c2
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_bridge.c4
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_connector.c63
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c7
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c26
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c12
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c27
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c54
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h7
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_mdss.c63
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c59
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c36
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h12
-rw-r--r--drivers/gpu/drm/msm/msm_fb.c45
-rw-r--r--drivers/gpu/drm/msm/msm_fbdev.c57
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c46
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c85
-rw-r--r--drivers/gpu/drm/msm/msm_kms.h2
-rw-r--r--drivers/gpu/drm/msm/msm_ringbuffer.c12
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dss.h3
-rw-r--r--drivers/gpu/drm/omapdrm/dss/pll.c29
-rw-r--r--drivers/gpu/drm/omapdrm/dss/venc.c65
-rw-r--r--drivers/gpu/drm/omapdrm/dss/video-pll.c2
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.c47
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_backend.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c242
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c11
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h39
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c148
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.c104
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.h4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_irq.c111
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c2
68 files changed, 2096 insertions, 796 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index ba10a83535d3..60d8bedb694d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -1035,7 +1035,7 @@ static int amdgpu_syncobj_lookup_and_add_to_sync(struct amdgpu_cs_parser *p,
{
int r;
struct dma_fence *fence;
- r = drm_syncobj_fence_get(p->filp, handle, &fence);
+ r = drm_syncobj_find_fence(p->filp, handle, &fence);
if (r)
return r;
diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h
index 4e906b82a170..fbc3f308fa19 100644
--- a/drivers/gpu/drm/drm_internal.h
+++ b/drivers/gpu/drm/drm_internal.h
@@ -167,3 +167,9 @@ int drm_syncobj_handle_to_fd_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_private);
int drm_syncobj_fd_to_handle_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_private);
+int drm_syncobj_wait_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_private);
+int drm_syncobj_reset_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_private);
+int drm_syncobj_signal_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_private);
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index d920b2118a39..a9ae6dd2d593 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -657,6 +657,12 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE, drm_syncobj_fd_to_handle_ioctl,
DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_WAIT, drm_syncobj_wait_ioctl,
+ DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_RESET, drm_syncobj_reset_ioctl,
+ DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_SIGNAL, drm_syncobj_signal_ioctl,
+ DRM_UNLOCKED|DRM_RENDER_ALLOW),
};
#define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls )
diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c
index a5b38a80a99a..0422b8c2c2e7 100644
--- a/drivers/gpu/drm/drm_syncobj.c
+++ b/drivers/gpu/drm/drm_syncobj.c
@@ -1,5 +1,7 @@
/*
* Copyright 2017 Red Hat
+ * Parts ported from amdgpu (fence wait code).
+ * Copyright 2016 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -31,6 +33,9 @@
* that contain an optional fence. The fence can be updated with a new
* fence, or be NULL.
*
+ * syncobj's can be waited upon, where it will wait for the underlying
+ * fence.
+ *
* syncobj's can be export to fd's and back, these fd's are opaque and
* have no other use case, except passing the syncobj between processes.
*
@@ -46,6 +51,7 @@
#include <linux/fs.h>
#include <linux/anon_inodes.h>
#include <linux/sync_file.h>
+#include <linux/sched/signal.h>
#include "drm_internal.h"
#include <drm/drm_syncobj.h>
@@ -75,6 +81,75 @@ struct drm_syncobj *drm_syncobj_find(struct drm_file *file_private,
}
EXPORT_SYMBOL(drm_syncobj_find);
+static void drm_syncobj_add_callback_locked(struct drm_syncobj *syncobj,
+ struct drm_syncobj_cb *cb,
+ drm_syncobj_func_t func)
+{
+ cb->func = func;
+ list_add_tail(&cb->node, &syncobj->cb_list);
+}
+
+static int drm_syncobj_fence_get_or_add_callback(struct drm_syncobj *syncobj,
+ struct dma_fence **fence,
+ struct drm_syncobj_cb *cb,
+ drm_syncobj_func_t func)
+{
+ int ret;
+
+ *fence = drm_syncobj_fence_get(syncobj);
+ if (*fence)
+ return 1;
+
+ spin_lock(&syncobj->lock);
+ /* We've already tried once to get a fence and failed. Now that we
+ * have the lock, try one more time just to be sure we don't add a
+ * callback when a fence has already been set.
+ */
+ if (syncobj->fence) {
+ *fence = dma_fence_get(syncobj->fence);
+ ret = 1;
+ } else {
+ *fence = NULL;
+ drm_syncobj_add_callback_locked(syncobj, cb, func);
+ ret = 0;
+ }
+ spin_unlock(&syncobj->lock);
+
+ return ret;
+}
+
+/**
+ * drm_syncobj_add_callback - adds a callback to syncobj::cb_list
+ * @syncobj: Sync object to which to add the callback
+ * @cb: Callback to add
+ * @func: Func to use when initializing the drm_syncobj_cb struct
+ *
+ * This adds a callback to be called next time the fence is replaced
+ */
+void drm_syncobj_add_callback(struct drm_syncobj *syncobj,
+ struct drm_syncobj_cb *cb,
+ drm_syncobj_func_t func)
+{
+ spin_lock(&syncobj->lock);
+ drm_syncobj_add_callback_locked(syncobj, cb, func);
+ spin_unlock(&syncobj->lock);
+}
+EXPORT_SYMBOL(drm_syncobj_add_callback);
+
+/**
+ * drm_syncobj_add_callback - removes a callback to syncobj::cb_list
+ * @syncobj: Sync object from which to remove the callback
+ * @cb: Callback to remove
+ */
+void drm_syncobj_remove_callback(struct drm_syncobj *syncobj,
+ struct drm_syncobj_cb *cb)
+{
+ spin_lock(&syncobj->lock);
+ list_del_init(&cb->node);
+ spin_unlock(&syncobj->lock);
+}
+EXPORT_SYMBOL(drm_syncobj_remove_callback);
+
/**
* drm_syncobj_replace_fence - replace fence in a sync object.
* @syncobj: Sync object to replace fence in
@@ -86,18 +161,75 @@ void drm_syncobj_replace_fence(struct drm_syncobj *syncobj,
struct dma_fence *fence)
{
struct dma_fence *old_fence;
+ struct drm_syncobj_cb *cur, *tmp;
if (fence)
dma_fence_get(fence);
- old_fence = xchg(&syncobj->fence, fence);
+
+ spin_lock(&syncobj->lock);
+
+ old_fence = syncobj->fence;
+ syncobj->fence = fence;
+
+ if (fence != old_fence) {
+ list_for_each_entry_safe(cur, tmp, &syncobj->cb_list, node) {
+ list_del_init(&cur->node);
+ cur->func(syncobj, cur);
+ }
+ }
+
+ spin_unlock(&syncobj->lock);
dma_fence_put(old_fence);
}
EXPORT_SYMBOL(drm_syncobj_replace_fence);
-int drm_syncobj_fence_get(struct drm_file *file_private,
- u32 handle,
- struct dma_fence **fence)
+struct drm_syncobj_null_fence {
+ struct dma_fence base;
+ spinlock_t lock;
+};
+
+static const char *drm_syncobj_null_fence_get_name(struct dma_fence *fence)
+{
+ return "syncobjnull";
+}
+
+static bool drm_syncobj_null_fence_enable_signaling(struct dma_fence *fence)
+{
+ dma_fence_enable_sw_signaling(fence);
+ return !dma_fence_is_signaled(fence);
+}
+
+static const struct dma_fence_ops drm_syncobj_null_fence_ops = {
+ .get_driver_name = drm_syncobj_null_fence_get_name,
+ .get_timeline_name = drm_syncobj_null_fence_get_name,
+ .enable_signaling = drm_syncobj_null_fence_enable_signaling,
+ .wait = dma_fence_default_wait,
+ .release = NULL,
+};
+
+static int drm_syncobj_assign_null_handle(struct drm_syncobj *syncobj)
+{
+ struct drm_syncobj_null_fence *fence;
+ fence = kzalloc(sizeof(*fence), GFP_KERNEL);
+ if (fence == NULL)
+ return -ENOMEM;
+
+ spin_lock_init(&fence->lock);
+ dma_fence_init(&fence->base, &drm_syncobj_null_fence_ops,
+ &fence->lock, 0, 0);
+ dma_fence_signal(&fence->base);
+
+ drm_syncobj_replace_fence(syncobj, &fence->base);
+
+ dma_fence_put(&fence->base);
+
+ return 0;
+}
+
+int drm_syncobj_find_fence(struct drm_file *file_private,
+ u32 handle,
+ struct dma_fence **fence)
{
struct drm_syncobj *syncobj = drm_syncobj_find(file_private, handle);
int ret = 0;
@@ -105,14 +237,14 @@ int drm_syncobj_fence_get(struct drm_file *file_private,
if (!syncobj)
return -ENOENT;
- *fence = dma_fence_get(syncobj->fence);
+ *fence = drm_syncobj_fence_get(syncobj);
if (!*fence) {
ret = -EINVAL;
}
drm_syncobj_put(syncobj);
return ret;
}
-EXPORT_SYMBOL(drm_syncobj_fence_get);
+EXPORT_SYMBOL(drm_syncobj_find_fence);
/**
* drm_syncobj_free - free a sync object.
@@ -125,13 +257,13 @@ void drm_syncobj_free(struct kref *kref)
struct drm_syncobj *syncobj = container_of(kref,
struct drm_syncobj,
refcount);
- dma_fence_put(syncobj->fence);
+ drm_syncobj_replace_fence(syncobj, NULL);
kfree(syncobj);
}
EXPORT_SYMBOL(drm_syncobj_free);
static int drm_syncobj_create(struct drm_file *file_private,
- u32 *handle)
+ u32 *handle, uint32_t flags)
{
int ret;
struct drm_syncobj *syncobj;
@@ -141,6 +273,16 @@ static int drm_syncobj_create(struct drm_file *file_private,
return -ENOMEM;
kref_init(&syncobj->refcount);
+ INIT_LIST_HEAD(&syncobj->cb_list);
+ spin_lock_init(&syncobj->lock);
+
+ if (flags & DRM_SYNCOBJ_CREATE_SIGNALED) {
+ ret = drm_syncobj_assign_null_handle(syncobj);
+ if (ret < 0) {
+ drm_syncobj_put(syncobj);
+ return ret;
+ }
+ }
idr_preload(GFP_KERNEL);
spin_lock(&file_private->syncobj_table_lock);
@@ -307,7 +449,7 @@ int drm_syncobj_export_sync_file(struct drm_file *file_private,
if (fd < 0)
return fd;
- ret = drm_syncobj_fence_get(file_private, handle, &fence);
+ ret = drm_syncobj_find_fence(file_private, handle, &fence);
if (ret)
goto err_put_fd;
@@ -377,11 +519,11 @@ drm_syncobj_create_ioctl(struct drm_device *dev, void *data,
return -ENODEV;
/* no valid flags yet */
- if (args->flags)
+ if (args->flags & ~DRM_SYNCOBJ_CREATE_SIGNALED)
return -EINVAL;
return drm_syncobj_create(file_private,
- &args->handle);
+ &args->handle, args->flags);
}
int
@@ -447,3 +589,368 @@ drm_syncobj_fd_to_handle_ioctl(struct drm_device *dev, void *data,
return drm_syncobj_fd_to_handle(file_private, args->fd,
&args->handle);
}
+
+struct syncobj_wait_entry {
+ struct task_struct *task;
+ struct dma_fence *fence;
+ struct dma_fence_cb fence_cb;
+ struct drm_syncobj_cb syncobj_cb;
+};
+
+static void syncobj_wait_fence_func(struct dma_fence *fence,
+ struct dma_fence_cb *cb)
+{
+ struct syncobj_wait_entry *wait =
+ container_of(cb, struct syncobj_wait_entry, fence_cb);
+
+ wake_up_process(wait->task);
+}
+
+static void syncobj_wait_syncobj_func(struct drm_syncobj *syncobj,
+ struct drm_syncobj_cb *cb)
+{
+ struct syncobj_wait_entry *wait =
+ container_of(cb, struct syncobj_wait_entry, syncobj_cb);
+
+ /* This happens inside the syncobj lock */
+ wait->fence = dma_fence_get(syncobj->fence);
+ wake_up_process(wait->task);
+}
+
+static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs,
+ uint32_t count,
+ uint32_t flags,
+ signed long timeout,
+ uint32_t *idx)
+{
+ struct syncobj_wait_entry *entries;
+ struct dma_fence *fence;
+ signed long ret;
+ uint32_t signaled_count, i;
+
+ entries = kcalloc(count, sizeof(*entries), GFP_KERNEL);
+ if (!entries)
+ return -ENOMEM;
+
+ /* Walk the list of sync objects and initialize entries. We do
+ * this up-front so that we can properly return -EINVAL if there is
+ * a syncobj with a missing fence and then never have the chance of
+ * returning -EINVAL again.
+ */
+ signaled_count = 0;
+ for (i = 0; i < count; ++i) {
+ entries[i].task = current;
+ entries[i].fence = drm_syncobj_fence_get(syncobjs[i]);
+ if (!entries[i].fence) {
+ if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) {
+ continue;
+ } else {
+ ret = -EINVAL;
+ goto cleanup_entries;
+ }
+ }
+
+ if (dma_fence_is_signaled(entries[i].fence)) {
+ if (signaled_count == 0 && idx)
+ *idx = i;
+ signaled_count++;
+ }
+ }
+
+ /* Initialize ret to the max of timeout and 1. That way, the
+ * default return value indicates a successful wait and not a
+ * timeout.
+ */
+ ret = max_t(signed long, timeout, 1);
+
+ if (signaled_count == count ||
+ (signaled_count > 0 &&
+ !(flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL)))
+ goto cleanup_entries;
+
+ /* There's a very annoying laxness in the dma_fence API here, in
+ * that backends are not required to automatically report when a
+ * fence is signaled prior to fence->ops->enable_signaling() being
+ * called. So here if we fail to match signaled_count, we need to
+ * fallthough and try a 0 timeout wait!
+ */
+
+ if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) {
+ for (i = 0; i < count; ++i) {
+ drm_syncobj_fence_get_or_add_callback(syncobjs[i],
+ &entries[i].fence,
+ &entries[i].syncobj_cb,
+ syncobj_wait_syncobj_func);
+ }
+ }
+
+ do {
+ set_current_state(TASK_INTERRUPTIBLE);
+
+ signaled_count = 0;
+ for (i = 0; i < count; ++i) {
+ fence = entries[i].fence;
+ if (!fence)
+ continue;
+
+ if (dma_fence_is_signaled(fence) ||
+ (!entries[i].fence_cb.func &&
+ dma_fence_add_callback(fence,
+ &entries[i].fence_cb,
+ syncobj_wait_fence_func))) {
+ /* The fence has been signaled */
+ if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL) {
+ signaled_count++;
+ } else {
+ if (idx)
+ *idx = i;
+ goto done_waiting;
+ }
+ }
+ }
+
+ if (signaled_count == count)
+ goto done_waiting;
+
+ if (timeout == 0) {
+ /* If we are doing a 0 timeout wait and we got
+ * here, then we just timed out.
+ */
+ ret = 0;
+ goto done_waiting;
+ }
+
+ ret = schedule_timeout(ret);
+
+ if (ret > 0 && signal_pending(current))
+ ret = -ERESTARTSYS;
+ } while (ret > 0);
+
+done_waiting:
+ __set_current_state(TASK_RUNNING);
+
+cleanup_entries:
+ for (i = 0; i < count; ++i) {
+ if (entries[i].syncobj_cb.func)
+ drm_syncobj_remove_callback(syncobjs[i],
+ &entries[i].syncobj_cb);
+ if (entries[i].fence_cb.func)
+ dma_fence_remove_callback(entries[i].fence,
+ &entries[i].fence_cb);
+ dma_fence_put(entries[i].fence);
+ }
+ kfree(entries);
+
+ return ret;
+}
+
+/**
+ * drm_timeout_abs_to_jiffies - calculate jiffies timeout from absolute value
+ *
+ * @timeout_nsec: timeout nsec component in ns, 0 for poll
+ *
+ * Calculate the timeout in jiffies from an absolute time in sec/nsec.
+ */
+static signed long drm_timeout_abs_to_jiffies(int64_t timeout_nsec)
+{
+ ktime_t abs_timeout, now;
+ u64 timeout_ns, timeout_jiffies64;
+
+ /* make 0 timeout means poll - absolute 0 doesn't seem valid */
+ if (timeout_nsec == 0)
+ return 0;
+
+ abs_timeout = ns_to_ktime(timeout_nsec);
+ now = ktime_get();
+
+ if (!ktime_after(abs_timeout, now))
+ return 0;
+
+ timeout_ns = ktime_to_ns(ktime_sub(abs_timeout, now));
+
+ timeout_jiffies64 = nsecs_to_jiffies64(timeout_ns);
+ /* clamp timeout to avoid infinite timeout */
+ if (timeout_jiffies64 >= MAX_SCHEDULE_TIMEOUT - 1)
+ return MAX_SCHEDULE_TIMEOUT - 1;
+
+ return timeout_jiffies64 + 1;
+}
+
+static int drm_syncobj_array_wait(struct drm_device *dev,
+ struct drm_file *file_private,
+ struct drm_syncobj_wait *wait,
+ struct drm_syncobj **syncobjs)
+{
+ signed long timeout = drm_timeout_abs_to_jiffies(wait->timeout_nsec);
+ signed long ret = 0;
+ uint32_t first = ~0;
+
+ ret = drm_syncobj_array_wait_timeout(syncobjs,
+ wait->count_handles,
+ wait->flags,
+ timeout, &first);
+ if (ret < 0)
+ return ret;
+
+ wait->first_signaled = first;
+ if (ret == 0)
+ return -ETIME;
+ return 0;
+}
+
+static int drm_syncobj_array_find(struct drm_file *file_private,
+ void *user_handles, uint32_t count_handles,
+ struct drm_syncobj ***syncobjs_out)
+{
+ uint32_t i, *handles;
+ struct drm_syncobj **syncobjs;
+ int ret;
+
+ handles = kmalloc_array(count_handles, sizeof(*handles), GFP_KERNEL);
+ if (handles == NULL)
+ return -ENOMEM;
+
+ if (copy_from_user(handles, user_handles,
+ sizeof(uint32_t) * count_handles)) {
+ ret = -EFAULT;
+ goto err_free_handles;
+ }
+
+ syncobjs = kmalloc_array(count_handles, sizeof(*syncobjs), GFP_KERNEL);
+ if (syncobjs == NULL) {
+ ret = -ENOMEM;
+ goto err_free_handles;
+ }
+
+ for (i = 0; i < count_handles; i++) {
+ syncobjs[i] = drm_syncobj_find(file_private, handles[i]);
+ if (!syncobjs[i]) {
+ ret = -ENOENT;
+ goto err_put_syncobjs;
+ }
+ }
+
+ kfree(handles);
+ *syncobjs_out = syncobjs;
+ return 0;
+
+err_put_syncobjs:
+ while (i-- > 0)
+ drm_syncobj_put(syncobjs[i]);
+ kfree(syncobjs);
+err_free_handles:
+ kfree(handles);
+
+ return ret;
+}
+
+static void drm_syncobj_array_free(struct drm_syncobj **syncobjs,
+ uint32_t count)
+{
+ uint32_t i;
+ for (i = 0; i < count; i++)
+ drm_syncobj_put(syncobjs[i]);
+ kfree(syncobjs);
+}
+
+int
+drm_syncobj_wait_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_private)
+{
+ struct drm_syncobj_wait *args = data;
+ struct drm_syncobj **syncobjs;
+ int ret = 0;
+
+ if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
+ return -ENODEV;
+
+ if (args->flags & ~(DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL |
+ DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT))
+ return -EINVAL;
+
+ if (args->count_handles == 0)
+ return -EINVAL;
+
+ ret = drm_syncobj_array_find(file_private,
+ u64_to_user_ptr(args->handles),
+ args->count_handles,
+ &syncobjs);
+ if (ret < 0)
+ return ret;
+
+ ret = drm_syncobj_array_wait(dev, file_private,
+ args, syncobjs);
+
+ drm_syncobj_array_free(syncobjs, args->count_handles);
+
+ return ret;
+}
+
+int
+drm_syncobj_reset_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_private)
+{
+ struct drm_syncobj_array *args = data;
+ struct drm_syncobj **syncobjs;
+ uint32_t i;
+ int ret;
+
+ if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
+ return -ENODEV;
+
+ if (args->pad != 0)
+ return -EINVAL;
+
+ if (args->count_handles == 0)
+ return -EINVAL;
+
+ ret = drm_syncobj_array_find(file_private,
+ u64_to_user_ptr(args->handles),
+ args->count_handles,
+ &syncobjs);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < args->count_handles; i++)
+ drm_syncobj_replace_fence(syncobjs[i], NULL);
+
+ drm_syncobj_array_free(syncobjs, args->count_handles);
+
+ return 0;
+}
+
+int
+drm_syncobj_signal_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_private)
+{
+ struct drm_syncobj_array *args = data;
+ struct drm_syncobj **syncobjs;
+ uint32_t i;
+ int ret;
+
+ if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
+ return -ENODEV;
+
+ if (args->pad != 0)
+ return -EINVAL;
+
+ if (args->count_handles == 0)
+ return -EINVAL;
+
+ ret = drm_syncobj_array_find(file_private,
+ u64_to_user_ptr(args->handles),
+ args->count_handles,
+ &syncobjs);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < args->count_handles; i++) {
+ ret = drm_syncobj_assign_null_handle(syncobjs[i]);
+ if (ret < 0)
+ break;
+ }
+
+ drm_syncobj_array_free(syncobjs, args->count_handles);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
index 5792ca88ab7a..730b8d9db187 100644
--- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
@@ -13,6 +13,7 @@
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/component.h>
+#include <linux/iopoll.h>
#include <linux/mfd/syscon.h>
#include <linux/of_device.h>
#include <linux/of_gpio.h>
@@ -33,9 +34,8 @@
#define WINDOWS_NR 3
#define MIN_FB_WIDTH_FOR_16WORD_BURST 128
-#define IFTYPE_I80 (1 << 0)
-#define I80_HW_TRG (1 << 1)
-#define IFTYPE_HDMI (1 << 2)
+#define I80_HW_TRG (1 << 0)
+#define IFTYPE_HDMI (1 << 1)
static const char * const decon_clks_name[] = {
"pclk",
@@ -57,6 +57,8 @@ struct decon_context {
struct regmap *sysreg;
struct clk *clks[ARRAY_SIZE(decon_clks_name)];
unsigned int irq;
+ unsigned int irq_vsync;
+ unsigned int irq_lcd_sys;
unsigned int te_irq;
unsigned long out_type;
int first_win;
@@ -90,7 +92,7 @@ static int decon_enable_vblank(struct exynos_drm_crtc *crtc)
u32 val;
val = VIDINTCON0_INTEN;
- if (ctx->out_type & IFTYPE_I80)
+ if (crtc->i80_mode)
val |= VIDINTCON0_FRAMEDONE;
else
val |= VIDINTCON0_INTFRMEN | VIDINTCON0_FRAMESEL_FP;
@@ -139,7 +141,7 @@ static u32 decon_get_frame_count(struct decon_context *ctx, bool end)
switch (status & (VIDCON1_VSTATUS_MASK | VIDCON1_I80_ACTIVE)) {
case VIDCON1_VSTATUS_VS:
- if (!(ctx->out_type & IFTYPE_I80))
+ if (!(ctx->crtc->i80_mode))
--frm;
break;
case VIDCON1_VSTATUS_BP:
@@ -166,7 +168,7 @@ static u32 decon_get_vblank_counter(struct exynos_drm_crtc *crtc)
static void decon_setup_trigger(struct decon_context *ctx)
{
- if (!(ctx->out_type & (IFTYPE_I80 | I80_HW_TRG)))
+ if (!ctx->crtc->i80_mode && !(ctx->out_type & I80_HW_TRG))
return;
if (!(ctx->out_type & I80_HW_TRG)) {
@@ -206,7 +208,7 @@ static void decon_commit(struct exynos_drm_crtc *crtc)
val = VIDOUT_LCD_ON;
if (interlaced)
val |= VIDOUT_INTERLACE_EN_F;
- if (ctx->out_type & IFTYPE_I80) {
+ if (crtc->i80_mode) {
val |= VIDOUT_COMMAND_IF;
} else {
val |= VIDOUT_RGB_IF;
@@ -222,7 +224,7 @@ static void decon_commit(struct exynos_drm_crtc *crtc)
VIDTCON2_HOZVAL(m->hdisplay - 1);
writel(val, ctx->addr + DECON_VIDTCON2);
- if (!(ctx->out_type & IFTYPE_I80)) {
+ if (!crtc->i80_mode) {
int vbp = m->crtc_vtotal - m->crtc_vsync_end;
int vfp = m->crtc_vsync_start - m->crtc_vdisplay;
@@ -277,16 +279,14 @@ static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win,
val |= WINCONx_BURSTLEN_16WORD;
break;
case DRM_FORMAT_ARGB8888:
+ default:
val |= WINCONx_BPPMODE_32BPP_A8888;
val |= WINCONx_WSWP_F | WINCONx_BLD_PIX_F | WINCONx_ALPHA_SEL_F;
val |= WINCONx_BURSTLEN_16WORD;
break;
- default:
- DRM_ERROR("Proper pixel format is not set\n");
- return;
}
- DRM_DEBUG_KMS("bpp = %u\n", fb->format->cpp[0] * 8);
+ DRM_DEBUG_KMS("cpp = %u\n", fb->format->cpp[0]);
/*
* In case of exynos, setting dma-burst to 16Word causes permanent
@@ -329,7 +329,7 @@ static void decon_update_plane(struct exynos_drm_crtc *crtc,
struct decon_context *ctx = crtc->ctx;
struct drm_framebuffer *fb = state->base.fb;
unsigned int win = plane->index;
- unsigned int bpp = fb->format->cpp[0];
+ unsigned int cpp = fb->format->cpp[0];
unsigned int pitch = fb->pitches[0];
dma_addr_t dma_addr = exynos_drm_fb_dma_addr(fb, 0);
u32 val;
@@ -365,11 +365,11 @@ static void decon_update_plane(struct exynos_drm_crtc *crtc,
writel(val, ctx->addr + DECON_VIDW0xADD1B0(win));
if (!(ctx->out_type & IFTYPE_HDMI))
- val = BIT_VAL(pitch - state->crtc.w * bpp, 27, 14)
- | BIT_VAL(state->crtc.w * bpp, 13, 0);
+ val = BIT_VAL(pitch - state->crtc.w * cpp, 27, 14)
+ | BIT_VAL(state->crtc.w * cpp, 13, 0);
else
- val = BIT_VAL(pitch - state->crtc.w * bpp, 29, 15)
- | BIT_VAL(state->crtc.w * bpp, 14, 0);
+ val = BIT_VAL(pitch - state->crtc.w * cpp, 29, 15)
+ | BIT_VAL(state->crtc.w * cpp, 14, 0);
writel(val, ctx->addr + DECON_VIDW0xADD2(win));
decon_win_set_pixfmt(ctx, win, fb);
@@ -407,24 +407,19 @@ static void decon_atomic_flush(struct exynos_drm_crtc *crtc)
static void decon_swreset(struct decon_context *ctx)
{
- unsigned int tries;
unsigned long flags;
+ u32 val;
+ int ret;
writel(0, ctx->addr + DECON_VIDCON0);
- for (tries = 2000; tries; --tries) {
- if (~readl(ctx->addr + DECON_VIDCON0) & VIDCON0_STOP_STATUS)
- break;
- udelay(10);
- }
+ readl_poll_timeout(ctx->addr + DECON_VIDCON0, val,
+ ~val & VIDCON0_STOP_STATUS, 12, 20000);
writel(VIDCON0_SWRESET, ctx->addr + DECON_VIDCON0);
- for (tries = 2000; tries; --tries) {
- if (~readl(ctx->addr + DECON_VIDCON0) & VIDCON0_SWRESET)
- break;
- udelay(10);
- }
+ ret = readl_poll_timeout(ctx->addr + DECON_VIDCON0, val,
+ ~val & VIDCON0_SWRESET, 12, 20000);
- WARN(tries == 0, "failed to software reset DECON\n");
+ WARN(ret < 0, "failed to software reset DECON\n");
spin_lock_irqsave(&ctx->vblank_lock, flags);
ctx->frame_id = 0;
@@ -515,6 +510,22 @@ err:
clk_disable_unprepare(ctx->clks[i]);
}
+static enum drm_mode_status decon_mode_valid(struct exynos_drm_crtc *crtc,
+ const struct drm_display_mode *mode)
+{
+ struct decon_context *ctx = crtc->ctx;
+
+ ctx->irq = crtc->i80_mode ? ctx->irq_lcd_sys : ctx->irq_vsync;
+
+ if (ctx->irq)
+ return MODE_OK;
+
+ dev_info(ctx->dev, "Sink requires %s mode, but appropriate interrupt is not provided.\n",
+ crtc->i80_mode ? "command" : "video");
+
+ return MODE_BAD;
+}
+
static const struct exynos_drm_crtc_ops decon_crtc_ops = {
.enable = decon_enable,
.disable = decon_disable,
@@ -524,6 +535,7 @@ static const struct exynos_drm_crtc_ops decon_crtc_ops = {
.atomic_begin = decon_atomic_begin,
.update_plane = decon_update_plane,
.disable_plane = decon_disable_plane,
+ .mode_valid = decon_mode_valid,
.atomic_flush = decon_atomic_flush,
};
@@ -674,19 +686,22 @@ static const struct of_device_id exynos5433_decon_driver_dt_match[] = {
MODULE_DEVICE_TABLE(of, exynos5433_decon_driver_dt_match);
static int decon_conf_irq(struct decon_context *ctx, const char *name,
- irq_handler_t handler, unsigned long int flags, bool required)
+ irq_handler_t handler, unsigned long int flags)
{
struct platform_device *pdev = to_platform_device(ctx->dev);
int ret, irq = platform_get_irq_byname(pdev, name);
if (irq < 0) {
- if (irq == -EPROBE_DEFER)
+ switch (irq) {
+ case -EPROBE_DEFER:
return irq;
- if (required)
- dev_err(ctx->dev, "cannot get %s IRQ\n", name);
- else
- irq = 0;
- return irq;
+ case -ENODATA:
+ case -ENXIO:
+ return 0;
+ default:
+ dev_err(ctx->dev, "IRQ %s get failed, %d\n", name, irq);
+ return irq;
+ }
}
irq_set_status_flags(irq, IRQ_NOAUTOEN);
ret = devm_request_irq(ctx->dev, irq, handler, flags, "drm_decon", ctx);
@@ -714,11 +729,8 @@ static int exynos5433_decon_probe(struct platform_device *pdev)
ctx->out_type = (unsigned long)of_device_get_match_data(dev);
spin_lock_init(&ctx->vblank_lock);
- if (ctx->out_type & IFTYPE_HDMI) {
+ if (ctx->out_type & IFTYPE_HDMI)
ctx->first_win = 1;
- } else if (of_get_child_by_name(dev->of_node, "i80-if-timings")) {
- ctx->out_type |= IFTYPE_I80;
- }
for (i = 0; i < ARRAY_SIZE(decon_clks_name); i++) {
struct clk *clk;
@@ -742,25 +754,23 @@ static int exynos5433_decon_probe(struct platform_device *pdev)
return PTR_ERR(ctx->addr);
}
- if (ctx->out_type & IFTYPE_I80) {
- ret = decon_conf_irq(ctx, "lcd_sys", decon_irq_handler, 0, true);
- if (ret < 0)
- return ret;
- ctx->irq = ret;
+ ret = decon_conf_irq(ctx, "vsync", decon_irq_handler, 0);
+ if (ret < 0)
+ return ret;
+ ctx->irq_vsync = ret;
- ret = decon_conf_irq(ctx, "te", decon_te_irq_handler,
- IRQF_TRIGGER_RISING, false);
- if (ret < 0)
- return ret;
- if (ret) {
- ctx->te_irq = ret;
- ctx->out_type &= ~I80_HW_TRG;
- }
- } else {
- ret = decon_conf_irq(ctx, "vsync", decon_irq_handler, 0, true);
- if (ret < 0)
+ ret = decon_conf_irq(ctx, "lcd_sys", decon_irq_handler, 0);
+ if (ret < 0)
+ return ret;
+ ctx->irq_lcd_sys = ret;
+
+ ret = decon_conf_irq(ctx, "te", decon_te_irq_handler,
+ IRQF_TRIGGER_RISING);
+ if (ret < 0)
return ret;
- ctx->irq = ret;
+ if (ret) {
+ ctx->te_irq = ret;
+ ctx->out_type &= ~I80_HW_TRG;
}
if (ctx->out_type & I80_HW_TRG) {
diff --git a/drivers/gpu/drm/exynos/exynos7_drm_decon.c b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
index 3e88269fdc2e..615efcf7782a 100644
--- a/drivers/gpu/drm/exynos/exynos7_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
@@ -309,19 +309,14 @@ static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win,
val |= WINCONx_BURSTLEN_16WORD;
break;
case DRM_FORMAT_BGRA8888:
+ default:
val |= WINCONx_BPPMODE_32BPP_BGRA | WINCONx_BLD_PIX |
WINCONx_ALPHA_SEL;
val |= WINCONx_BURSTLEN_16WORD;
break;
- default:
- DRM_DEBUG_KMS("invalid pixel size so using unpacked 24bpp.\n");
-
- val |= WINCONx_BPPMODE_24BPP_xRGB;
- val |= WINCONx_BURSTLEN_16WORD;
- break;
}
- DRM_DEBUG_KMS("bpp = %d\n", fb->format->cpp[0] * 8);
+ DRM_DEBUG_KMS("cpp = %d\n", fb->format->cpp[0]);
/*
* In case of exynos, setting dma-burst to 16Word causes permanent
@@ -398,7 +393,7 @@ static void decon_update_plane(struct exynos_drm_crtc *crtc,
unsigned int last_x;
unsigned int last_y;
unsigned int win = plane->index;
- unsigned int bpp = fb->format->cpp[0];
+ unsigned int cpp = fb->format->cpp[0];
unsigned int pitch = fb->pitches[0];
if (ctx->suspended)
@@ -418,7 +413,7 @@ static void decon_update_plane(struct exynos_drm_crtc *crtc,
val = (unsigned long)exynos_drm_fb_dma_addr(fb, 0);
writel(val, ctx->regs + VIDW_BUF_START(win));
- padding = (pitch / bpp) - fb->width;
+ padding = (pitch / cpp) - fb->width;
/* buffer size */
writel(fb->width + padding, ctx->regs + VIDW_WHOLE_X(win));
diff --git a/drivers/gpu/drm/exynos/exynos_dp.c b/drivers/gpu/drm/exynos/exynos_dp.c
index 385537b726a6..39629e7a80b9 100644
--- a/drivers/gpu/drm/exynos/exynos_dp.c
+++ b/drivers/gpu/drm/exynos/exynos_dp.c
@@ -155,7 +155,7 @@ static int exynos_dp_bind(struct device *dev, struct device *master, void *data)
struct exynos_dp_device *dp = dev_get_drvdata(dev);
struct drm_encoder *encoder = &dp->encoder;
struct drm_device *drm_dev = data;
- int pipe, ret;
+ int ret;
/*
* Just like the probe function said, we don't need the
@@ -179,20 +179,15 @@ static int exynos_dp_bind(struct device *dev, struct device *master, void *data)
return ret;
}
- pipe = exynos_drm_crtc_get_pipe_from_type(drm_dev,
- EXYNOS_DISPLAY_TYPE_LCD);
- if (pipe < 0)
- return pipe;
-
- encoder->possible_crtcs = 1 << pipe;
-
- DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs);
-
drm_encoder_init(drm_dev, encoder, &exynos_dp_encoder_funcs,
DRM_MODE_ENCODER_TMDS, NULL);
drm_encoder_helper_add(encoder, &exynos_dp_encoder_helper_funcs);
+ ret = exynos_drm_set_possible_crtcs(encoder, EXYNOS_DISPLAY_TYPE_LCD);
+ if (ret < 0)
+ return ret;
+
dp->plat_data.encoder = encoder;
return analogix_dp_bind(dev, dp->drm_dev, &dp->plat_data);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_core.c b/drivers/gpu/drm/exynos/exynos_drm_core.c
index edbd98ff293e..b0c0621fcdf7 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_core.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_core.c
@@ -13,6 +13,7 @@
*/
#include <drm/drmP.h>
+
#include "exynos_drm_drv.h"
#include "exynos_drm_crtc.h"
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
index c37078fbe0ea..6ce0821590df 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
@@ -16,6 +16,7 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_encoder.h>
#include "exynos_drm_crtc.h"
#include "exynos_drm_drv.h"
@@ -83,7 +84,19 @@ static void exynos_crtc_atomic_flush(struct drm_crtc *crtc,
exynos_crtc->ops->atomic_flush(exynos_crtc);
}
+static enum drm_mode_status exynos_crtc_mode_valid(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode)
+{
+ struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
+
+ if (exynos_crtc->ops->mode_valid)
+ return exynos_crtc->ops->mode_valid(exynos_crtc, mode);
+
+ return MODE_OK;
+}
+
static const struct drm_crtc_helper_funcs exynos_crtc_helper_funcs = {
+ .mode_valid = exynos_crtc_mode_valid,
.atomic_check = exynos_crtc_atomic_check,
.atomic_begin = exynos_crtc_atomic_begin,
.atomic_flush = exynos_crtc_atomic_flush,
@@ -191,16 +204,30 @@ err_crtc:
return ERR_PTR(ret);
}
-int exynos_drm_crtc_get_pipe_from_type(struct drm_device *drm_dev,
+struct exynos_drm_crtc *exynos_drm_crtc_get_by_type(struct drm_device *drm_dev,
enum exynos_drm_output_type out_type)
{
struct drm_crtc *crtc;
drm_for_each_crtc(crtc, drm_dev)
if (to_exynos_crtc(crtc)->type == out_type)
- return drm_crtc_index(crtc);
+ return to_exynos_crtc(crtc);
- return -EPERM;
+ return ERR_PTR(-EPERM);
+}
+
+int exynos_drm_set_possible_crtcs(struct drm_encoder *encoder,
+ enum exynos_drm_output_type out_type)
+{
+ struct exynos_drm_crtc *crtc = exynos_drm_crtc_get_by_type(encoder->dev,
+ out_type);
+
+ if (IS_ERR(crtc))
+ return PTR_ERR(crtc);
+
+ encoder->possible_crtcs = drm_crtc_mask(&crtc->base);
+
+ return 0;
}
void exynos_drm_crtc_te_handler(struct drm_crtc *crtc)
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.h b/drivers/gpu/drm/exynos/exynos_drm_crtc.h
index ef58b64e3d2d..dec446109e6c 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.h
@@ -15,21 +15,25 @@
#ifndef _EXYNOS_DRM_CRTC_H_
#define _EXYNOS_DRM_CRTC_H_
+
#include "exynos_drm_drv.h"
struct exynos_drm_crtc *exynos_drm_crtc_create(struct drm_device *drm_dev,
struct drm_plane *plane,
- enum exynos_drm_output_type type,
+ enum exynos_drm_output_type out_type,
const struct exynos_drm_crtc_ops *ops,
void *context);
void exynos_drm_crtc_wait_pending_update(struct exynos_drm_crtc *exynos_crtc);
void exynos_drm_crtc_finish_update(struct exynos_drm_crtc *exynos_crtc,
struct exynos_drm_plane *exynos_plane);
-/* This function gets pipe value to crtc device matched with out_type. */
-int exynos_drm_crtc_get_pipe_from_type(struct drm_device *drm_dev,
+/* This function gets crtc device matched with out_type. */
+struct exynos_drm_crtc *exynos_drm_crtc_get_by_type(struct drm_device *drm_dev,
enum exynos_drm_output_type out_type);
+int exynos_drm_set_possible_crtcs(struct drm_encoder *encoder,
+ enum exynos_drm_output_type out_type);
+
/*
* This function calls the crtc device(manager)'s te_handler() callback
* to trigger to transfer video image at the tearing effect synchronization
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dpi.c b/drivers/gpu/drm/exynos/exynos_drm_dpi.c
index 76d80e5de521..66945e0dc57f 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dpi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dpi.c
@@ -202,19 +202,15 @@ int exynos_dpi_bind(struct drm_device *dev, struct drm_encoder *encoder)
{
int ret;
- ret = exynos_drm_crtc_get_pipe_from_type(dev, EXYNOS_DISPLAY_TYPE_LCD);
- if (ret < 0)
- return ret;
-
- encoder->possible_crtcs = 1 << ret;
-
- DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs);
-
drm_encoder_init(dev, encoder, &exynos_dpi_encoder_funcs,
DRM_MODE_ENCODER_TMDS, NULL);
drm_encoder_helper_add(encoder, &exynos_dpi_encoder_helper_funcs);
+ ret = exynos_drm_set_possible_crtcs(encoder, EXYNOS_DISPLAY_TYPE_LCD);
+ if (ret < 0)
+ return ret;
+
ret = exynos_dpi_create_connector(encoder);
if (ret) {
DRM_ERROR("failed to create connector ret = %d\n", ret);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
index a93de321706b..cf131c2aa23e 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -91,6 +91,7 @@ struct exynos_drm_plane {
#define EXYNOS_DRM_PLANE_CAP_DOUBLE (1 << 0)
#define EXYNOS_DRM_PLANE_CAP_SCALE (1 << 1)
#define EXYNOS_DRM_PLANE_CAP_ZPOS (1 << 2)
+#define EXYNOS_DRM_PLANE_CAP_TILE (1 << 3)
/*
* Exynos DRM plane configuration structure.
@@ -117,6 +118,7 @@ struct exynos_drm_plane_config {
* @disable: disable the device
* @enable_vblank: specific driver callback for enabling vblank interrupt.
* @disable_vblank: specific driver callback for disabling vblank interrupt.
+ * @mode_valid: specific driver callback for mode validation
* @atomic_check: validate state
* @atomic_begin: prepare device to receive an update
* @atomic_flush: mark the end of device update
@@ -132,6 +134,8 @@ struct exynos_drm_crtc_ops {
int (*enable_vblank)(struct exynos_drm_crtc *crtc);
void (*disable_vblank)(struct exynos_drm_crtc *crtc);
u32 (*get_vblank_counter)(struct exynos_drm_crtc *crtc);
+ enum drm_mode_status (*mode_valid)(struct exynos_drm_crtc *crtc,
+ const struct drm_display_mode *mode);
int (*atomic_check)(struct exynos_drm_crtc *crtc,
struct drm_crtc_state *state);
void (*atomic_begin)(struct exynos_drm_crtc *crtc);
@@ -162,6 +166,7 @@ struct exynos_drm_crtc {
const struct exynos_drm_crtc_ops *ops;
void *ctx;
struct exynos_drm_clk *pipe_clk;
+ bool i80_mode : 1;
};
static inline void exynos_drm_pipe_clk_enable(struct exynos_drm_crtc *crtc,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
index c399dc9b325f..7904ffa9abfb 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
@@ -254,7 +254,6 @@ struct exynos_dsi {
struct drm_encoder encoder;
struct mipi_dsi_host dsi_host;
struct drm_connector connector;
- struct device_node *panel_node;
struct drm_panel *panel;
struct device *dev;
@@ -1329,12 +1328,13 @@ static int exynos_dsi_init(struct exynos_dsi *dsi)
return 0;
}
-static int exynos_dsi_register_te_irq(struct exynos_dsi *dsi)
+static int exynos_dsi_register_te_irq(struct exynos_dsi *dsi,
+ struct device *panel)
{
int ret;
int te_gpio_irq;
- dsi->te_gpio = of_get_named_gpio(dsi->panel_node, "te-gpios", 0);
+ dsi->te_gpio = of_get_named_gpio(panel->of_node, "te-gpios", 0);
if (dsi->te_gpio == -ENOENT)
return 0;
@@ -1374,85 +1374,6 @@ static void exynos_dsi_unregister_te_irq(struct exynos_dsi *dsi)
}
}
-static int exynos_dsi_host_attach(struct mipi_dsi_host *host,
- struct mipi_dsi_device *device)
-{
- struct exynos_dsi *dsi = host_to_dsi(host);
-
- dsi->lanes = device->lanes;
- dsi->format = device->format;
- dsi->mode_flags = device->mode_flags;
- dsi->panel_node = device->dev.of_node;
-
- /*
- * This is a temporary solution and should be made by more generic way.
- *
- * If attached panel device is for command mode one, dsi should register
- * TE interrupt handler.
- */
- if (!(dsi->mode_flags & MIPI_DSI_MODE_VIDEO)) {
- int ret = exynos_dsi_register_te_irq(dsi);
-
- if (ret)
- return ret;
- }
-
- if (dsi->connector.dev)
- drm_helper_hpd_irq_event(dsi->connector.dev);
-
- return 0;
-}
-
-static int exynos_dsi_host_detach(struct mipi_dsi_host *host,
- struct mipi_dsi_device *device)
-{
- struct exynos_dsi *dsi = host_to_dsi(host);
-
- exynos_dsi_unregister_te_irq(dsi);
-
- dsi->panel_node = NULL;
-
- if (dsi->connector.dev)
- drm_helper_hpd_irq_event(dsi->connector.dev);
-
- return 0;
-}
-
-static ssize_t exynos_dsi_host_transfer(struct mipi_dsi_host *host,
- const struct mipi_dsi_msg *msg)
-{
- struct exynos_dsi *dsi = host_to_dsi(host);
- struct exynos_dsi_transfer xfer;
- int ret;
-
- if (!(dsi->state & DSIM_STATE_ENABLED))
- return -EINVAL;
-
- if (!(dsi->state & DSIM_STATE_INITIALIZED)) {
- ret = exynos_dsi_init(dsi);
- if (ret)
- return ret;
- dsi->state |= DSIM_STATE_INITIALIZED;
- }
-
- ret = mipi_dsi_create_packet(&xfer.packet, msg);
- if (ret < 0)
- return ret;
-
- xfer.rx_len = msg->rx_len;
- xfer.rx_payload = msg->rx_buf;
- xfer.flags = msg->flags;
-
- ret = exynos_dsi_transfer(dsi, &xfer);
- return (ret < 0) ? ret : xfer.rx_done;
-}
-
-static const struct mipi_dsi_host_ops exynos_dsi_ops = {
- .attach = exynos_dsi_host_attach,
- .detach = exynos_dsi_host_detach,
- .transfer = exynos_dsi_host_transfer,
-};
-
static void exynos_dsi_enable(struct drm_encoder *encoder)
{
struct exynos_dsi *dsi = encoder_to_dsi(encoder);
@@ -1508,25 +1429,7 @@ static void exynos_dsi_disable(struct drm_encoder *encoder)
static enum drm_connector_status
exynos_dsi_detect(struct drm_connector *connector, bool force)
{
- struct exynos_dsi *dsi = connector_to_dsi(connector);
-
- if (!dsi->panel) {
- dsi->panel = of_drm_find_panel(dsi->panel_node);
- if (dsi->panel)
- drm_panel_attach(dsi->panel, &dsi->connector);
- } else if (!dsi->panel_node) {
- struct drm_encoder *encoder;
-
- encoder = platform_get_drvdata(to_platform_device(dsi->dev));
- exynos_dsi_disable(encoder);
- drm_panel_detach(dsi->panel);
- dsi->panel = NULL;
- }
-
- if (dsi->panel)
- return connector_status_connected;
-
- return connector_status_disconnected;
+ return connector->status;
}
static void exynos_dsi_connector_destroy(struct drm_connector *connector)
@@ -1575,6 +1478,7 @@ static int exynos_dsi_create_connector(struct drm_encoder *encoder)
return ret;
}
+ connector->status = connector_status_disconnected;
drm_connector_helper_add(connector, &exynos_dsi_connector_helper_funcs);
drm_mode_connector_attach_encoder(connector, encoder);
@@ -1611,6 +1515,105 @@ static const struct drm_encoder_funcs exynos_dsi_encoder_funcs = {
MODULE_DEVICE_TABLE(of, exynos_dsi_of_match);
+static int exynos_dsi_host_attach(struct mipi_dsi_host *host,
+ struct mipi_dsi_device *device)
+{
+ struct exynos_dsi *dsi = host_to_dsi(host);
+ struct drm_device *drm = dsi->connector.dev;
+
+ /*
+ * This is a temporary solution and should be made by more generic way.
+ *
+ * If attached panel device is for command mode one, dsi should register
+ * TE interrupt handler.
+ */
+ if (!(device->mode_flags & MIPI_DSI_MODE_VIDEO)) {
+ int ret = exynos_dsi_register_te_irq(dsi, &device->dev);
+ if (ret)
+ return ret;
+ }
+
+ mutex_lock(&drm->mode_config.mutex);
+
+ dsi->lanes = device->lanes;
+ dsi->format = device->format;
+ dsi->mode_flags = device->mode_flags;
+ dsi->panel = of_drm_find_panel(device->dev.of_node);
+ if (dsi->panel) {
+ drm_panel_attach(dsi->panel, &dsi->connector);
+ dsi->connector.status = connector_status_connected;
+ }
+ exynos_drm_crtc_get_by_type(drm, EXYNOS_DISPLAY_TYPE_LCD)->i80_mode =
+ !(dsi->mode_flags & MIPI_DSI_MODE_VIDEO);
+
+ mutex_unlock(&drm->mode_config.mutex);
+
+ if (drm->mode_config.poll_enabled)
+ drm_kms_helper_hotplug_event(drm);
+
+ return 0;
+}
+
+static int exynos_dsi_host_detach(struct mipi_dsi_host *host,
+ struct mipi_dsi_device *device)
+{
+ struct exynos_dsi *dsi = host_to_dsi(host);
+ struct drm_device *drm = dsi->connector.dev;
+
+ mutex_lock(&drm->mode_config.mutex);
+
+ if (dsi->panel) {
+ exynos_dsi_disable(&dsi->encoder);
+ drm_panel_detach(dsi->panel);
+ dsi->panel = NULL;
+ dsi->connector.status = connector_status_disconnected;
+ }
+
+ mutex_unlock(&drm->mode_config.mutex);
+
+ if (drm->mode_config.poll_enabled)
+ drm_kms_helper_hotplug_event(drm);
+
+ exynos_dsi_unregister_te_irq(dsi);
+
+ return 0;
+}
+
+static ssize_t exynos_dsi_host_transfer(struct mipi_dsi_host *host,
+ const struct mipi_dsi_msg *msg)
+{
+ struct exynos_dsi *dsi = host_to_dsi(host);
+ struct exynos_dsi_transfer xfer;
+ int ret;
+
+ if (!(dsi->state & DSIM_STATE_ENABLED))
+ return -EINVAL;
+
+ if (!(dsi->state & DSIM_STATE_INITIALIZED)) {
+ ret = exynos_dsi_init(dsi);
+ if (ret)
+ return ret;
+ dsi->state |= DSIM_STATE_INITIALIZED;
+ }
+
+ ret = mipi_dsi_create_packet(&xfer.packet, msg);
+ if (ret < 0)
+ return ret;
+
+ xfer.rx_len = msg->rx_len;
+ xfer.rx_payload = msg->rx_buf;
+ xfer.flags = msg->flags;
+
+ ret = exynos_dsi_transfer(dsi, &xfer);
+ return (ret < 0) ? ret : xfer.rx_done;
+}
+
+static const struct mipi_dsi_host_ops exynos_dsi_ops = {
+ .attach = exynos_dsi_host_attach,
+ .detach = exynos_dsi_host_detach,
+ .transfer = exynos_dsi_host_transfer,
+};
+
static int exynos_dsi_of_read_u32(const struct device_node *np,
const char *propname, u32 *out_value)
{
@@ -1662,20 +1665,15 @@ static int exynos_dsi_bind(struct device *dev, struct device *master,
struct drm_bridge *bridge;
int ret;
- ret = exynos_drm_crtc_get_pipe_from_type(drm_dev,
- EXYNOS_DISPLAY_TYPE_LCD);
- if (ret < 0)
- return ret;
-
- encoder->possible_crtcs = 1 << ret;
-
- DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs);
-
drm_encoder_init(drm_dev, encoder, &exynos_dsi_encoder_funcs,
DRM_MODE_ENCODER_TMDS, NULL);
drm_encoder_helper_add(encoder, &exynos_dsi_encoder_helper_funcs);
+ ret = exynos_drm_set_possible_crtcs(encoder, EXYNOS_DISPLAY_TYPE_LCD);
+ if (ret < 0)
+ return ret;
+
ret = exynos_dsi_create_connector(encoder);
if (ret) {
DRM_ERROR("failed to create connector ret = %d\n", ret);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c
index 6592f50d460a..8208df56a88f 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c
@@ -225,4 +225,6 @@ void exynos_drm_mode_config_init(struct drm_device *dev)
dev->mode_config.funcs = &exynos_drm_mode_config_funcs;
dev->mode_config.helper_private = &exynos_drm_mode_config_helpers;
+
+ dev->mode_config.allow_fb_modifiers = true;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index 60f93cad6643..d42ae2bc3e56 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -583,18 +583,12 @@ static void fimd_win_set_pixfmt(struct fimd_context *ctx, unsigned int win,
val |= WINCONx_BURSTLEN_16WORD;
break;
case DRM_FORMAT_ARGB8888:
+ default:
val |= WINCON1_BPPMODE_25BPP_A1888
| WINCON1_BLD_PIX | WINCON1_ALPHA_SEL;
val |= WINCONx_WSWP;
val |= WINCONx_BURSTLEN_16WORD;
break;
- default:
- DRM_DEBUG_KMS("invalid pixel size so using unpacked 24bpp.\n");
-
- val |= WINCON0_BPPMODE_24BPP_888;
- val |= WINCONx_WSWP;
- val |= WINCONx_BURSTLEN_16WORD;
- break;
}
/*
@@ -718,13 +712,13 @@ static void fimd_update_plane(struct exynos_drm_crtc *crtc,
unsigned long val, size, offset;
unsigned int last_x, last_y, buf_offsize, line_size;
unsigned int win = plane->index;
- unsigned int bpp = fb->format->cpp[0];
+ unsigned int cpp = fb->format->cpp[0];
unsigned int pitch = fb->pitches[0];
if (ctx->suspended)
return;
- offset = state->src.x * bpp;
+ offset = state->src.x * cpp;
offset += state->src.y * pitch;
/* buffer start address */
@@ -743,8 +737,8 @@ static void fimd_update_plane(struct exynos_drm_crtc *crtc,
state->crtc.w, state->crtc.h);
/* buffer size */
- buf_offsize = pitch - (state->crtc.w * bpp);
- line_size = state->crtc.w * bpp;
+ buf_offsize = pitch - (state->crtc.w * cpp);
+ line_size = state->crtc.w * cpp;
val = VIDW_BUF_SIZE_OFFSET(buf_offsize) |
VIDW_BUF_SIZE_PAGEWIDTH(line_size) |
VIDW_BUF_SIZE_OFFSET_E(buf_offsize) |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_mic.c b/drivers/gpu/drm/exynos/exynos_drm_mic.c
index 16bbee897e0d..ba4a32b132ba 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_mic.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_mic.c
@@ -21,9 +21,12 @@
#include <linux/component.h>
#include <linux/pm_runtime.h>
#include <drm/drmP.h>
+#include <drm/drm_encoder.h>
#include <linux/mfd/syscon.h>
#include <linux/regmap.h>
+#include "exynos_drm_drv.h"
+
/* Sysreg registers for MIC */
#define DSD_CFG_MUX 0x1004
#define MIC0_RGB_MUX (1 << 0)
@@ -85,12 +88,6 @@
#define MIC_BS_SIZE_2D(x) ((x) & 0x3fff)
-enum {
- ENDPOINT_DECON_NODE,
- ENDPOINT_DSI_NODE,
- NUM_ENDPOINTS
-};
-
static char *clk_names[] = { "pclk_mic0", "sclk_rgb_vclk_to_mic0" };
#define NUM_CLKS ARRAY_SIZE(clk_names)
static DEFINE_MUTEX(mic_mutex);
@@ -229,36 +226,6 @@ static void mic_set_reg_on(struct exynos_mic *mic, bool enable)
writel(reg, mic->reg + MIC_OP);
}
-static int parse_dt(struct exynos_mic *mic)
-{
- int ret = 0, i, j;
- struct device_node *remote_node;
- struct device_node *nodes[3];
-
- /*
- * The order of endpoints does matter.
- * The first node must be for decon and the second one must be for dsi.
- */
- for (i = 0, j = 0; i < NUM_ENDPOINTS; i++) {
- remote_node = of_graph_get_remote_node(mic->dev->of_node, i, 0);
- if (!remote_node) {
- ret = -EPIPE;
- goto exit;
- }
- nodes[j++] = remote_node;
-
- if (i == ENDPOINT_DECON_NODE &&
- of_get_child_by_name(remote_node, "i80-if-timings"))
- mic->i80_mode = 1;
- }
-
-exit:
- while (--j > -1)
- of_node_put(nodes[j]);
-
- return ret;
-}
-
static void mic_disable(struct drm_bridge *bridge) { }
static void mic_post_disable(struct drm_bridge *bridge)
@@ -286,6 +253,7 @@ static void mic_mode_set(struct drm_bridge *bridge,
mutex_lock(&mic_mutex);
drm_display_mode_to_videomode(mode, &mic->vm);
+ mic->i80_mode = to_exynos_crtc(bridge->encoder->crtc)->i80_mode;
mutex_unlock(&mic_mutex);
}
@@ -417,10 +385,6 @@ static int exynos_mic_probe(struct platform_device *pdev)
mic->dev = dev;
- ret = parse_dt(mic);
- if (ret)
- goto err;
-
ret = of_address_to_resource(dev->of_node, 0, &res);
if (ret) {
DRM_ERROR("mic: Failed to get mem region for MIC\n");
diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.c b/drivers/gpu/drm/exynos/exynos_drm_plane.c
index 8de74009dee4..d2a90dae5c71 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_plane.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_plane.c
@@ -179,6 +179,29 @@ static struct drm_plane_funcs exynos_plane_funcs = {
};
static int
+exynos_drm_plane_check_format(const struct exynos_drm_plane_config *config,
+ struct exynos_drm_plane_state *state)
+{
+ struct drm_framebuffer *fb = state->base.fb;
+
+ switch (fb->modifier) {
+ case DRM_FORMAT_MOD_SAMSUNG_64_32_TILE:
+ if (!(config->capabilities & EXYNOS_DRM_PLANE_CAP_TILE))
+ return -ENOTSUPP;
+ break;
+
+ case DRM_FORMAT_MOD_LINEAR:
+ break;
+
+ default:
+ DRM_ERROR("unsupported pixel format modifier");
+ return -ENOTSUPP;
+ }
+
+ return 0;
+}
+
+static int
exynos_drm_plane_check_size(const struct exynos_drm_plane_config *config,
struct exynos_drm_plane_state *state)
{
@@ -222,6 +245,10 @@ static int exynos_plane_atomic_check(struct drm_plane *plane,
/* translate state into exynos_state */
exynos_plane_mode_set(exynos_state);
+ ret = exynos_drm_plane_check_format(exynos_plane->config, exynos_state);
+ if (ret)
+ return ret;
+
ret = exynos_drm_plane_check_size(exynos_plane->config, exynos_state);
return ret;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
index 9186a654c3b5..53e03f8af3d5 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
@@ -381,7 +381,7 @@ static int vidi_bind(struct device *dev, struct device *master, void *data)
struct exynos_drm_plane *exynos_plane;
struct exynos_drm_plane_config plane_config = { 0 };
unsigned int i;
- int pipe, ret;
+ int ret;
ctx->drm_dev = drm_dev;
@@ -406,20 +406,15 @@ static int vidi_bind(struct device *dev, struct device *master, void *data)
return PTR_ERR(ctx->crtc);
}
- pipe = exynos_drm_crtc_get_pipe_from_type(drm_dev,
- EXYNOS_DISPLAY_TYPE_VIDI);
- if (pipe < 0)
- return pipe;
-
- encoder->possible_crtcs = 1 << pipe;
-
- DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs);
-
drm_encoder_init(drm_dev, encoder, &exynos_vidi_encoder_funcs,
DRM_MODE_ENCODER_TMDS, NULL);
drm_encoder_helper_add(encoder, &exynos_vidi_encoder_helper_funcs);
+ ret = exynos_drm_set_possible_crtcs(encoder, EXYNOS_DISPLAY_TYPE_VIDI);
+ if (ret < 0)
+ return ret;
+
ret = vidi_create_connector(encoder);
if (ret) {
DRM_ERROR("failed to create connector ret = %d\n", ret);
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index d70eeb8c5f75..214fa5e51963 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -1697,32 +1697,25 @@ static int hdmi_bind(struct device *dev, struct device *master, void *data)
struct drm_device *drm_dev = data;
struct hdmi_context *hdata = dev_get_drvdata(dev);
struct drm_encoder *encoder = &hdata->encoder;
- struct exynos_drm_crtc *exynos_crtc;
- struct drm_crtc *crtc;
- int ret, pipe;
+ struct exynos_drm_crtc *crtc;
+ int ret;
hdata->drm_dev = drm_dev;
- pipe = exynos_drm_crtc_get_pipe_from_type(drm_dev,
- EXYNOS_DISPLAY_TYPE_HDMI);
- if (pipe < 0)
- return pipe;
-
hdata->phy_clk.enable = hdmiphy_clk_enable;
- crtc = drm_crtc_from_index(drm_dev, pipe);
- exynos_crtc = to_exynos_crtc(crtc);
- exynos_crtc->pipe_clk = &hdata->phy_clk;
-
- encoder->possible_crtcs = 1 << pipe;
-
- DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs);
-
drm_encoder_init(drm_dev, encoder, &exynos_hdmi_encoder_funcs,
DRM_MODE_ENCODER_TMDS, NULL);
drm_encoder_helper_add(encoder, &exynos_hdmi_encoder_helper_funcs);
+ ret = exynos_drm_set_possible_crtcs(encoder, EXYNOS_DISPLAY_TYPE_HDMI);
+ if (ret < 0)
+ return ret;
+
+ crtc = exynos_drm_crtc_get_by_type(drm_dev, EXYNOS_DISPLAY_TYPE_HDMI);
+ crtc->pipe_clk = &hdata->phy_clk;
+
ret = hdmi_create_connector(encoder);
if (ret) {
DRM_ERROR("failed to create connector ret = %d\n", ret);
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index a998a8dd783c..002755415e00 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -148,7 +148,8 @@ static const struct exynos_drm_plane_config plane_configs[MIXER_WIN_NR] = {
.pixel_formats = vp_formats,
.num_pixel_formats = ARRAY_SIZE(vp_formats),
.capabilities = EXYNOS_DRM_PLANE_CAP_SCALE |
- EXYNOS_DRM_PLANE_CAP_ZPOS,
+ EXYNOS_DRM_PLANE_CAP_ZPOS |
+ EXYNOS_DRM_PLANE_CAP_TILE,
},
};
@@ -483,29 +484,18 @@ static void vp_video_buffer(struct mixer_context *ctx,
unsigned int priority = state->base.normalized_zpos + 1;
unsigned long flags;
dma_addr_t luma_addr[2], chroma_addr[2];
- bool tiled_mode = false;
- bool crcb_mode = false;
+ bool is_tiled, is_nv21;
u32 val;
- switch (fb->format->format) {
- case DRM_FORMAT_NV12:
- crcb_mode = false;
- break;
- case DRM_FORMAT_NV21:
- crcb_mode = true;
- break;
- default:
- DRM_ERROR("pixel format for vp is wrong [%d].\n",
- fb->format->format);
- return;
- }
+ is_nv21 = (fb->format->format == DRM_FORMAT_NV21);
+ is_tiled = (fb->modifier == DRM_FORMAT_MOD_SAMSUNG_64_32_TILE);
luma_addr[0] = exynos_drm_fb_dma_addr(fb, 0);
chroma_addr[0] = exynos_drm_fb_dma_addr(fb, 1);
if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
__set_bit(MXR_BIT_INTERLACE, &ctx->flags);
- if (tiled_mode) {
+ if (is_tiled) {
luma_addr[1] = luma_addr[0] + 0x40;
chroma_addr[1] = chroma_addr[0] + 0x40;
} else {
@@ -525,14 +515,14 @@ static void vp_video_buffer(struct mixer_context *ctx,
vp_reg_writemask(res, VP_MODE, val, VP_MODE_LINE_SKIP);
/* setup format */
- val = (crcb_mode ? VP_MODE_NV21 : VP_MODE_NV12);
- val |= (tiled_mode ? VP_MODE_MEM_TILED : VP_MODE_MEM_LINEAR);
+ val = (is_nv21 ? VP_MODE_NV21 : VP_MODE_NV12);
+ val |= (is_tiled ? VP_MODE_MEM_TILED : VP_MODE_MEM_LINEAR);
vp_reg_writemask(res, VP_MODE, val, VP_MODE_FMT_MASK);
/* setting size of input image */
vp_reg_write(res, VP_IMG_SIZE_Y, VP_IMG_HSIZE(fb->pitches[0]) |
VP_IMG_VSIZE(fb->height));
- /* chroma height has to reduced by 2 to avoid chroma distorions */
+ /* chroma plane for NV12/NV21 is half the height of the luma plane */
vp_reg_write(res, VP_IMG_SIZE_C, VP_IMG_HSIZE(fb->pitches[0]) |
VP_IMG_VSIZE(fb->height / 2));
@@ -594,7 +584,7 @@ static void mixer_graph_buffer(struct mixer_context *ctx,
unsigned long flags;
unsigned int win = plane->index;
unsigned int x_ratio = 0, y_ratio = 0;
- unsigned int src_x_offset, src_y_offset, dst_x_offset, dst_y_offset;
+ unsigned int dst_x_offset, dst_y_offset;
dma_addr_t dma_addr;
unsigned int fmt;
u32 val;
@@ -616,12 +606,9 @@ static void mixer_graph_buffer(struct mixer_context *ctx,
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_ARGB8888:
+ default:
fmt = MXR_FORMAT_ARGB8888;
break;
-
- default:
- DRM_DEBUG_KMS("pixelformat unsupported by mixer\n");
- return;
}
/* ratio is already checked by common plane code */
@@ -631,12 +618,10 @@ static void mixer_graph_buffer(struct mixer_context *ctx,
dst_x_offset = state->crtc.x;
dst_y_offset = state->crtc.y;
- /* converting dma address base and source offset */
+ /* translate dma address base s.t. the source image offset is zero */
dma_addr = exynos_drm_fb_dma_addr(fb, 0)
+ (state->src.x * fb->format->cpp[0])
+ (state->src.y * fb->pitches[0]);
- src_x_offset = 0;
- src_y_offset = 0;
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
__set_bit(MXR_BIT_INTERLACE, &ctx->flags);
@@ -667,11 +652,6 @@ static void mixer_graph_buffer(struct mixer_context *ctx,
val |= MXR_GRP_WH_V_SCALE(y_ratio);
mixer_reg_write(res, MXR_GRAPHIC_WH(win), val);
- /* setup offsets in source image */
- val = MXR_GRP_SXY_SX(src_x_offset);
- val |= MXR_GRP_SXY_SY(src_y_offset);
- mixer_reg_write(res, MXR_GRAPHIC_SXY(win), val);
-
/* setup offsets in display image */
val = MXR_GRP_DXY_DX(dst_x_offset);
val |= MXR_GRP_DXY_DY(dst_y_offset);
@@ -748,6 +728,10 @@ static void mixer_win_reset(struct mixer_context *ctx)
if (test_bit(MXR_BIT_VP_ENABLED, &ctx->flags))
mixer_reg_writemask(res, MXR_CFG, 0, MXR_CFG_VP_ENABLE);
+ /* set all source image offsets to zero */
+ mixer_reg_write(res, MXR_GRAPHIC_SXY(0), 0);
+ mixer_reg_write(res, MXR_GRAPHIC_SXY(1), 0);
+
spin_unlock_irqrestore(&res->reg_slock, flags);
}
diff --git a/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c b/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c
index f77dcfaade6c..b4c7af3ab6ae 100644
--- a/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c
+++ b/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c
@@ -603,6 +603,72 @@ static void dsi_encoder_enable(struct drm_encoder *encoder)
dsi->enable = true;
}
+static enum drm_mode_status dsi_encoder_phy_mode_valid(
+ struct drm_encoder *encoder,
+ const struct drm_display_mode *mode)
+{
+ struct dw_dsi *dsi = encoder_to_dsi(encoder);
+ struct mipi_phy_params phy;
+ u32 bpp = mipi_dsi_pixel_format_to_bpp(dsi->format);
+ u32 req_kHz, act_kHz, lane_byte_clk_kHz;
+
+ /* Calculate the lane byte clk using the adjusted mode clk */
+ memset(&phy, 0, sizeof(phy));
+ req_kHz = mode->clock * bpp / dsi->lanes;
+ act_kHz = dsi_calc_phy_rate(req_kHz, &phy);
+ lane_byte_clk_kHz = act_kHz / 8;
+
+ DRM_DEBUG_DRIVER("Checking mode %ix%i-%i@%i clock: %i...",
+ mode->hdisplay, mode->vdisplay, bpp,
+ drm_mode_vrefresh(mode), mode->clock);
+
+ /*
+ * Make sure the adjusted mode clock and the lane byte clk
+ * have a common denominator base frequency
+ */
+ if (mode->clock/dsi->lanes == lane_byte_clk_kHz/3) {
+ DRM_DEBUG_DRIVER("OK!\n");
+ return MODE_OK;
+ }
+
+ DRM_DEBUG_DRIVER("BAD!\n");
+ return MODE_BAD;
+}
+
+static enum drm_mode_status dsi_encoder_mode_valid(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode)
+
+{
+ const struct drm_crtc_helper_funcs *crtc_funcs = NULL;
+ struct drm_crtc *crtc = NULL;
+ struct drm_display_mode adj_mode;
+ enum drm_mode_status ret;
+
+ /*
+ * The crtc might adjust the mode, so go through the
+ * possible crtcs (technically just one) and call
+ * mode_fixup to figure out the adjusted mode before we
+ * validate it.
+ */
+ drm_for_each_crtc(crtc, encoder->dev) {
+ /*
+ * reset adj_mode to the mode value each time,
+ * so we don't adjust the mode twice
+ */
+ drm_mode_copy(&adj_mode, mode);
+
+ crtc_funcs = crtc->helper_private;
+ if (crtc_funcs && crtc_funcs->mode_fixup)
+ if (!crtc_funcs->mode_fixup(crtc, mode, &adj_mode))
+ return MODE_BAD;
+
+ ret = dsi_encoder_phy_mode_valid(encoder, &adj_mode);
+ if (ret != MODE_OK)
+ return ret;
+ }
+ return MODE_OK;
+}
+
static void dsi_encoder_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adj_mode)
@@ -622,6 +688,7 @@ static int dsi_encoder_atomic_check(struct drm_encoder *encoder,
static const struct drm_encoder_helper_funcs dw_encoder_helper_funcs = {
.atomic_check = dsi_encoder_atomic_check,
+ .mode_valid = dsi_encoder_mode_valid,
.mode_set = dsi_encoder_mode_set,
.enable = dsi_encoder_enable,
.disable = dsi_encoder_disable
diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
index 39f7d15673ed..9823477b1855 100644
--- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
+++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
@@ -178,6 +178,19 @@ static void ade_init(struct ade_hw_ctx *ctx)
FRM_END_START_MASK, REG_EFFECTIVE_IN_ADEEN_FRMEND);
}
+static bool ade_crtc_mode_fixup(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct ade_crtc *acrtc = to_ade_crtc(crtc);
+ struct ade_hw_ctx *ctx = acrtc->ctx;
+
+ adjusted_mode->clock =
+ clk_round_rate(ctx->ade_pix_clk, mode->clock * 1000) / 1000;
+ return true;
+}
+
+
static void ade_set_pix_clk(struct ade_hw_ctx *ctx,
struct drm_display_mode *mode,
struct drm_display_mode *adj_mode)
@@ -555,6 +568,7 @@ static void ade_crtc_atomic_flush(struct drm_crtc *crtc,
}
static const struct drm_crtc_helper_funcs ade_crtc_helper_funcs = {
+ .mode_fixup = ade_crtc_mode_fixup,
.mode_set_nofb = ade_crtc_mode_set_nofb,
.atomic_begin = ade_crtc_atomic_begin,
.atomic_flush = ade_crtc_atomic_flush,
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 3d74f3a27c13..4c2016237d61 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -2129,9 +2129,7 @@ await_fence_array(struct i915_execbuffer *eb,
if (!(flags & I915_EXEC_FENCE_WAIT))
continue;
- rcu_read_lock();
- fence = dma_fence_get_rcu_safe(&syncobj->fence);
- rcu_read_unlock();
+ fence = drm_syncobj_fence_get(syncobj);
if (!fence)
return -EINVAL;
diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
index 0e3828ed1e46..7791313405b5 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
@@ -486,8 +486,6 @@ struct msm_gpu *a3xx_gpu_init(struct drm_device *dev)
adreno_gpu = &a3xx_gpu->base;
gpu = &adreno_gpu->base;
- a3xx_gpu->pdev = pdev;
-
gpu->perfcntrs = perfcntrs;
gpu->num_perfcntrs = ARRAY_SIZE(perfcntrs);
diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.h b/drivers/gpu/drm/msm/adreno/a3xx_gpu.h
index 85ff66cbddd6..ab60dc9e344e 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.h
@@ -28,7 +28,6 @@
struct a3xx_gpu {
struct adreno_gpu base;
- struct platform_device *pdev;
/* if OCMEM is used for GMEM: */
uint32_t ocmem_base;
diff --git a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
index 19abf229b08d..58341ef6f15b 100644
--- a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
@@ -568,8 +568,6 @@ struct msm_gpu *a4xx_gpu_init(struct drm_device *dev)
adreno_gpu = &a4xx_gpu->base;
gpu = &adreno_gpu->base;
- a4xx_gpu->pdev = pdev;
-
gpu->perfcntrs = NULL;
gpu->num_perfcntrs = 0;
diff --git a/drivers/gpu/drm/msm/adreno/a4xx_gpu.h b/drivers/gpu/drm/msm/adreno/a4xx_gpu.h
index 01247204ac92..f757184328a3 100644
--- a/drivers/gpu/drm/msm/adreno/a4xx_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/a4xx_gpu.h
@@ -23,7 +23,6 @@
struct a4xx_gpu {
struct adreno_gpu base;
- struct platform_device *pdev;
/* if OCMEM is used for GMEM: */
uint32_t ocmem_base;
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
index f9eae03aa1dc..17c59d839e6f 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
@@ -284,28 +284,14 @@ static int a5xx_me_init(struct msm_gpu *gpu)
static struct drm_gem_object *a5xx_ucode_load_bo(struct msm_gpu *gpu,
const struct firmware *fw, u64 *iova)
{
- struct drm_device *drm = gpu->dev;
struct drm_gem_object *bo;
void *ptr;
- bo = msm_gem_new_locked(drm, fw->size - 4, MSM_BO_UNCACHED);
- if (IS_ERR(bo))
- return bo;
+ ptr = msm_gem_kernel_new_locked(gpu->dev, fw->size - 4,
+ MSM_BO_UNCACHED | MSM_BO_GPU_READONLY, gpu->aspace, &bo, iova);
- ptr = msm_gem_get_vaddr(bo);
- if (!ptr) {
- drm_gem_object_unreference(bo);
- return ERR_PTR(-ENOMEM);
- }
-
- if (iova) {
- int ret = msm_gem_get_iova(bo, gpu->aspace, iova);
-
- if (ret) {
- drm_gem_object_unreference(bo);
- return ERR_PTR(ret);
- }
- }
+ if (IS_ERR(ptr))
+ return ERR_CAST(ptr);
memcpy(ptr, &fw->data[4], fw->size - 4);
@@ -372,8 +358,7 @@ static int a5xx_zap_shader_init(struct msm_gpu *gpu)
{
static bool loaded;
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
- struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
- struct platform_device *pdev = a5xx_gpu->pdev;
+ struct platform_device *pdev = gpu->pdev;
int ret;
/*
@@ -410,6 +395,7 @@ static int a5xx_zap_shader_init(struct msm_gpu *gpu)
A5XX_RBBM_INT_0_MASK_RBBM_ETS_MS_TIMEOUT | \
A5XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNC_OVERFLOW | \
A5XX_RBBM_INT_0_MASK_CP_HW_ERROR | \
+ A5XX_RBBM_INT_0_MASK_MISC_HANG_DETECT | \
A5XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS | \
A5XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS | \
A5XX_RBBM_INT_0_MASK_GPMU_VOLTAGE_DROOP)
@@ -812,6 +798,27 @@ static void a5xx_gpmu_err_irq(struct msm_gpu *gpu)
dev_err_ratelimited(gpu->dev->dev, "GPMU | voltage droop\n");
}
+static void a5xx_fault_detect_irq(struct msm_gpu *gpu)
+{
+ struct drm_device *dev = gpu->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+
+ dev_err(dev->dev, "gpu fault fence %x status %8.8X rb %4.4x/%4.4x ib1 %16.16llX/%4.4x ib2 %16.16llX/%4.4x\n",
+ gpu->funcs->last_fence(gpu),
+ gpu_read(gpu, REG_A5XX_RBBM_STATUS),
+ gpu_read(gpu, REG_A5XX_CP_RB_RPTR),
+ gpu_read(gpu, REG_A5XX_CP_RB_WPTR),
+ gpu_read64(gpu, REG_A5XX_CP_IB1_BASE, REG_A5XX_CP_IB1_BASE_HI),
+ gpu_read(gpu, REG_A5XX_CP_IB1_BUFSZ),
+ gpu_read64(gpu, REG_A5XX_CP_IB2_BASE, REG_A5XX_CP_IB2_BASE_HI),
+ gpu_read(gpu, REG_A5XX_CP_IB2_BUFSZ));
+
+ /* Turn off the hangcheck timer to keep it from bothering us */
+ del_timer(&gpu->hangcheck_timer);
+
+ queue_work(priv->wq, &gpu->recover_work);
+}
+
#define RBBM_ERROR_MASK \
(A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR | \
A5XX_RBBM_INT_0_MASK_RBBM_TRANSFER_TIMEOUT | \
@@ -838,6 +845,9 @@ static irqreturn_t a5xx_irq(struct msm_gpu *gpu)
if (status & A5XX_RBBM_INT_0_MASK_CP_HW_ERROR)
a5xx_cp_err_irq(gpu);
+ if (status & A5XX_RBBM_INT_0_MASK_MISC_HANG_DETECT)
+ a5xx_fault_detect_irq(gpu);
+
if (status & A5XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS)
a5xx_uche_err_irq(gpu);
@@ -1015,7 +1025,6 @@ struct msm_gpu *a5xx_gpu_init(struct drm_device *dev)
adreno_gpu = &a5xx_gpu->base;
gpu = &adreno_gpu->base;
- a5xx_gpu->pdev = pdev;
adreno_gpu->registers = a5xx_registers;
adreno_gpu->reg_offsets = a5xx_register_offsets;
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.h b/drivers/gpu/drm/msm/adreno/a5xx_gpu.h
index 1137092241d5..e94451685bf8 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.h
@@ -23,7 +23,6 @@
struct a5xx_gpu {
struct adreno_gpu base;
- struct platform_device *pdev;
struct drm_gem_object *pm4_bo;
uint64_t pm4_iova;
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_power.c b/drivers/gpu/drm/msm/adreno/a5xx_power.c
index 87af6eea0483..04aab1dcae2b 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_power.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_power.c
@@ -294,16 +294,10 @@ void a5xx_gpmu_ucode_init(struct msm_gpu *gpu)
*/
bosize = (cmds_size + (cmds_size / TYPE4_MAX_PAYLOAD) + 1) << 2;
- a5xx_gpu->gpmu_bo = msm_gem_new_locked(drm, bosize, MSM_BO_UNCACHED);
- if (IS_ERR(a5xx_gpu->gpmu_bo))
- goto err;
-
- if (msm_gem_get_iova(a5xx_gpu->gpmu_bo, gpu->aspace,
- &a5xx_gpu->gpmu_iova))
- goto err;
-
- ptr = msm_gem_get_vaddr(a5xx_gpu->gpmu_bo);
- if (!ptr)
+ ptr = msm_gem_kernel_new_locked(drm, bosize,
+ MSM_BO_UNCACHED | MSM_BO_GPU_READONLY, gpu->aspace,
+ &a5xx_gpu->gpmu_bo, &a5xx_gpu->gpmu_iova);
+ if (IS_ERR(ptr))
goto err;
while (cmds_size > 0) {
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index 7414c6bbd582..c8b4ac254bb5 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -337,11 +337,6 @@ void adreno_wait_ring(struct msm_gpu *gpu, uint32_t ndwords)
DRM_ERROR("%s: timeout waiting for ringbuffer space\n", gpu->name);
}
-static const char *iommu_ports[] = {
- "gfx3d_user", "gfx3d_priv",
- "gfx3d1_user", "gfx3d1_priv",
-};
-
int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
struct adreno_gpu *adreno_gpu, const struct adreno_gpu_funcs *funcs)
{
@@ -373,15 +368,15 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
adreno_gpu_config.ringsz = RB_SIZE;
+ pm_runtime_set_autosuspend_delay(&pdev->dev, DRM_MSM_INACTIVE_PERIOD);
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+
ret = msm_gpu_init(drm, pdev, &adreno_gpu->base, &funcs->base,
adreno_gpu->info->name, &adreno_gpu_config);
if (ret)
return ret;
- pm_runtime_set_autosuspend_delay(&pdev->dev, DRM_MSM_INACTIVE_PERIOD);
- pm_runtime_use_autosuspend(&pdev->dev);
- pm_runtime_enable(&pdev->dev);
-
ret = request_firmware(&adreno_gpu->pm4, adreno_gpu->info->pm4fw, drm->dev);
if (ret) {
dev_err(drm->dev, "failed to load %s PM4 firmware: %d\n",
@@ -396,37 +391,17 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
return ret;
}
- if (gpu->aspace && gpu->aspace->mmu) {
- struct msm_mmu *mmu = gpu->aspace->mmu;
- ret = mmu->funcs->attach(mmu, iommu_ports,
- ARRAY_SIZE(iommu_ports));
- if (ret)
- return ret;
- }
+ adreno_gpu->memptrs = msm_gem_kernel_new(drm,
+ sizeof(*adreno_gpu->memptrs), MSM_BO_UNCACHED, gpu->aspace,
+ &adreno_gpu->memptrs_bo, &adreno_gpu->memptrs_iova);
- adreno_gpu->memptrs_bo = msm_gem_new(drm, sizeof(*adreno_gpu->memptrs),
- MSM_BO_UNCACHED);
- if (IS_ERR(adreno_gpu->memptrs_bo)) {
- ret = PTR_ERR(adreno_gpu->memptrs_bo);
- adreno_gpu->memptrs_bo = NULL;
- dev_err(drm->dev, "could not allocate memptrs: %d\n", ret);
- return ret;
- }
-
- adreno_gpu->memptrs = msm_gem_get_vaddr(adreno_gpu->memptrs_bo);
if (IS_ERR(adreno_gpu->memptrs)) {
- dev_err(drm->dev, "could not vmap memptrs\n");
- return -ENOMEM;
- }
-
- ret = msm_gem_get_iova(adreno_gpu->memptrs_bo, gpu->aspace,
- &adreno_gpu->memptrs_iova);
- if (ret) {
- dev_err(drm->dev, "could not map memptrs: %d\n", ret);
- return ret;
+ ret = PTR_ERR(adreno_gpu->memptrs);
+ adreno_gpu->memptrs = NULL;
+ dev_err(drm->dev, "could not allocate memptrs: %d\n", ret);
}
- return 0;
+ return ret;
}
void adreno_gpu_cleanup(struct adreno_gpu *adreno_gpu)
@@ -446,10 +421,4 @@ void adreno_gpu_cleanup(struct adreno_gpu *adreno_gpu)
release_firmware(adreno_gpu->pfp);
msm_gpu_cleanup(gpu);
-
- if (gpu->aspace) {
- gpu->aspace->mmu->funcs->detach(gpu->aspace->mmu,
- iommu_ports, ARRAY_SIZE(iommu_ports));
- msm_gem_address_space_put(gpu->aspace);
- }
}
diff --git a/drivers/gpu/drm/msm/dsi/dsi.c b/drivers/gpu/drm/msm/dsi/dsi.c
index 311c1c1e7d6c..98742d7af6dc 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.c
+++ b/drivers/gpu/drm/msm/dsi/dsi.c
@@ -161,12 +161,17 @@ static const struct of_device_id dt_match[] = {
{}
};
+static const struct dev_pm_ops dsi_pm_ops = {
+ SET_RUNTIME_PM_OPS(msm_dsi_runtime_suspend, msm_dsi_runtime_resume, NULL)
+};
+
static struct platform_driver dsi_driver = {
.probe = dsi_dev_probe,
.remove = dsi_dev_remove,
.driver = {
.name = "msm_dsi",
.of_match_table = dt_match,
+ .pm = &dsi_pm_ops,
},
};
diff --git a/drivers/gpu/drm/msm/dsi/dsi.h b/drivers/gpu/drm/msm/dsi/dsi.h
index 9e6017387efb..2302046197a8 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.h
+++ b/drivers/gpu/drm/msm/dsi/dsi.h
@@ -179,6 +179,8 @@ void msm_dsi_host_destroy(struct mipi_dsi_host *host);
int msm_dsi_host_modeset_init(struct mipi_dsi_host *host,
struct drm_device *dev);
int msm_dsi_host_init(struct msm_dsi *msm_dsi);
+int msm_dsi_runtime_suspend(struct device *dev);
+int msm_dsi_runtime_resume(struct device *dev);
/* dsi phy */
struct msm_dsi_phy;
diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
index c7b612c3d771..dbb31a014419 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
@@ -135,7 +135,6 @@ struct msm_dsi_host {
struct completion video_comp;
struct mutex dev_mutex;
struct mutex cmd_mutex;
- struct mutex clk_mutex;
spinlock_t intr_lock; /* Protect interrupt ctrl register */
u32 err_work_state;
@@ -221,6 +220,8 @@ static const struct msm_dsi_cfg_handler *dsi_get_config(
goto put_gdsc;
}
+ pm_runtime_get_sync(dev);
+
ret = regulator_enable(gdsc_reg);
if (ret) {
pr_err("%s: unable to enable gdsc\n", __func__);
@@ -247,6 +248,7 @@ disable_clks:
clk_disable_unprepare(ahb_clk);
disable_gdsc:
regulator_disable(gdsc_reg);
+ pm_runtime_put_autosuspend(dev);
put_clk:
clk_put(ahb_clk);
put_gdsc:
@@ -455,6 +457,34 @@ static void dsi_bus_clk_disable(struct msm_dsi_host *msm_host)
clk_disable_unprepare(msm_host->bus_clks[i]);
}
+int msm_dsi_runtime_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct msm_dsi *msm_dsi = platform_get_drvdata(pdev);
+ struct mipi_dsi_host *host = msm_dsi->host;
+ struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+
+ if (!msm_host->cfg_hnd)
+ return 0;
+
+ dsi_bus_clk_disable(msm_host);
+
+ return 0;
+}
+
+int msm_dsi_runtime_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct msm_dsi *msm_dsi = platform_get_drvdata(pdev);
+ struct mipi_dsi_host *host = msm_dsi->host;
+ struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+
+ if (!msm_host->cfg_hnd)
+ return 0;
+
+ return dsi_bus_clk_enable(msm_host);
+}
+
static int dsi_link_clk_enable_6g(struct msm_dsi_host *msm_host)
{
int ret;
@@ -596,35 +626,6 @@ static void dsi_link_clk_disable(struct msm_dsi_host *msm_host)
}
}
-static int dsi_clk_ctrl(struct msm_dsi_host *msm_host, bool enable)
-{
- int ret = 0;
-
- mutex_lock(&msm_host->clk_mutex);
- if (enable) {
- ret = dsi_bus_clk_enable(msm_host);
- if (ret) {
- pr_err("%s: Can not enable bus clk, %d\n",
- __func__, ret);
- goto unlock_ret;
- }
- ret = dsi_link_clk_enable(msm_host);
- if (ret) {
- pr_err("%s: Can not enable link clk, %d\n",
- __func__, ret);
- dsi_bus_clk_disable(msm_host);
- goto unlock_ret;
- }
- } else {
- dsi_link_clk_disable(msm_host);
- dsi_bus_clk_disable(msm_host);
- }
-
-unlock_ret:
- mutex_unlock(&msm_host->clk_mutex);
- return ret;
-}
-
static int dsi_calc_clk_rate(struct msm_dsi_host *msm_host)
{
struct drm_display_mode *mode = msm_host->mode;
@@ -1699,6 +1700,7 @@ int msm_dsi_host_init(struct msm_dsi *msm_dsi)
}
msm_host->pdev = pdev;
+ msm_dsi->host = &msm_host->base;
ret = dsi_host_parse_dt(msm_host);
if (ret) {
@@ -1713,6 +1715,8 @@ int msm_dsi_host_init(struct msm_dsi *msm_dsi)
goto fail;
}
+ pm_runtime_enable(&pdev->dev);
+
msm_host->cfg_hnd = dsi_get_config(msm_host);
if (!msm_host->cfg_hnd) {
ret = -EINVAL;
@@ -1753,7 +1757,6 @@ int msm_dsi_host_init(struct msm_dsi *msm_dsi)
init_completion(&msm_host->video_comp);
mutex_init(&msm_host->dev_mutex);
mutex_init(&msm_host->cmd_mutex);
- mutex_init(&msm_host->clk_mutex);
spin_lock_init(&msm_host->intr_lock);
/* setup workqueue */
@@ -1761,7 +1764,6 @@ int msm_dsi_host_init(struct msm_dsi *msm_dsi)
INIT_WORK(&msm_host->err_work, dsi_err_worker);
INIT_WORK(&msm_host->hpd_work, dsi_hpd_worker);
- msm_dsi->host = &msm_host->base;
msm_dsi->id = msm_host->id;
DBG("Dsi Host %d initialized", msm_host->id);
@@ -1783,9 +1785,10 @@ void msm_dsi_host_destroy(struct mipi_dsi_host *host)
msm_host->workqueue = NULL;
}
- mutex_destroy(&msm_host->clk_mutex);
mutex_destroy(&msm_host->cmd_mutex);
mutex_destroy(&msm_host->dev_mutex);
+
+ pm_runtime_disable(&msm_host->pdev->dev);
}
int msm_dsi_host_modeset_init(struct mipi_dsi_host *host,
@@ -1881,7 +1884,8 @@ int msm_dsi_host_xfer_prepare(struct mipi_dsi_host *host,
* mdss interrupt is generated in mdp core clock domain
* mdp clock need to be enabled to receive dsi interrupt
*/
- dsi_clk_ctrl(msm_host, 1);
+ pm_runtime_get_sync(&msm_host->pdev->dev);
+ dsi_link_clk_enable(msm_host);
/* TODO: vote for bus bandwidth */
@@ -1911,7 +1915,8 @@ void msm_dsi_host_xfer_restore(struct mipi_dsi_host *host,
/* TODO: unvote for bus bandwidth */
- dsi_clk_ctrl(msm_host, 0);
+ dsi_link_clk_disable(msm_host);
+ pm_runtime_put_autosuspend(&msm_host->pdev->dev);
}
int msm_dsi_host_cmd_tx(struct mipi_dsi_host *host,
@@ -2160,8 +2165,11 @@ int msm_dsi_host_enable(struct mipi_dsi_host *host)
* and only turned on before MDP START.
* This part of code should be enabled once mdp driver support it.
*/
- /* if (msm_panel->mode == MSM_DSI_CMD_MODE)
- dsi_clk_ctrl(msm_host, 0); */
+ /* if (msm_panel->mode == MSM_DSI_CMD_MODE) {
+ * dsi_link_clk_disable(msm_host);
+ * pm_runtime_put_autosuspend(&msm_host->pdev->dev);
+ * }
+ */
return 0;
}
@@ -2217,9 +2225,11 @@ int msm_dsi_host_power_on(struct mipi_dsi_host *host,
goto unlock_ret;
}
- ret = dsi_clk_ctrl(msm_host, 1);
+ pm_runtime_get_sync(&msm_host->pdev->dev);
+ ret = dsi_link_clk_enable(msm_host);
if (ret) {
- pr_err("%s: failed to enable clocks. ret=%d\n", __func__, ret);
+ pr_err("%s: failed to enable link clocks. ret=%d\n",
+ __func__, ret);
goto fail_disable_reg;
}
@@ -2243,7 +2253,8 @@ int msm_dsi_host_power_on(struct mipi_dsi_host *host,
return 0;
fail_disable_clk:
- dsi_clk_ctrl(msm_host, 0);
+ dsi_link_clk_disable(msm_host);
+ pm_runtime_put_autosuspend(&msm_host->pdev->dev);
fail_disable_reg:
dsi_host_regulator_disable(msm_host);
unlock_ret:
@@ -2268,7 +2279,8 @@ int msm_dsi_host_power_off(struct mipi_dsi_host *host)
pinctrl_pm_select_sleep_state(&msm_host->pdev->dev);
- dsi_clk_ctrl(msm_host, 0);
+ dsi_link_clk_disable(msm_host);
+ pm_runtime_put_autosuspend(&msm_host->pdev->dev);
dsi_host_regulator_disable(msm_host);
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
index 0c2eb9c9a1fc..7c9bf91bc22b 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
@@ -373,7 +373,7 @@ static int dsi_phy_enable_resource(struct msm_dsi_phy *phy)
static void dsi_phy_disable_resource(struct msm_dsi_phy *phy)
{
clk_disable_unprepare(phy->ahb_clk);
- pm_runtime_put_sync(&phy->pdev->dev);
+ pm_runtime_put_autosuspend(&phy->pdev->dev);
}
static const struct of_device_id dsi_phy_dt_match[] = {
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c
index a968cad509c2..17e069a133a4 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.c
@@ -239,6 +239,8 @@ static struct hdmi *msm_hdmi_init(struct platform_device *pdev)
hdmi->pwr_clks[i] = clk;
}
+ pm_runtime_enable(&pdev->dev);
+
hdmi->workq = alloc_ordered_workqueue("msm_hdmi", 0);
hdmi->i2c = msm_hdmi_i2c_init(hdmi);
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
index 13ac822dee5d..7e357077ed26 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
@@ -35,6 +35,8 @@ static void msm_hdmi_power_on(struct drm_bridge *bridge)
const struct hdmi_platform_config *config = hdmi->config;
int i, ret;
+ pm_runtime_get_sync(&hdmi->pdev->dev);
+
for (i = 0; i < config->pwr_reg_cnt; i++) {
ret = regulator_enable(hdmi->pwr_regs[i]);
if (ret) {
@@ -84,6 +86,8 @@ static void power_off(struct drm_bridge *bridge)
config->pwr_reg_names[i], ret);
}
}
+
+ pm_runtime_put_autosuspend(&hdmi->pdev->dev);
}
#define AVI_IFRAME_LINE_NUMBER 1
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
index 71536d9c7fe8..c0848dfedd50 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
@@ -137,6 +137,36 @@ err:
return ret;
}
+static void enable_hpd_clocks(struct hdmi *hdmi, bool enable)
+{
+ const struct hdmi_platform_config *config = hdmi->config;
+ struct device *dev = &hdmi->pdev->dev;
+ int i, ret;
+
+ if (enable) {
+ for (i = 0; i < config->hpd_clk_cnt; i++) {
+ if (config->hpd_freq && config->hpd_freq[i]) {
+ ret = clk_set_rate(hdmi->hpd_clks[i],
+ config->hpd_freq[i]);
+ if (ret)
+ dev_warn(dev,
+ "failed to set clk %s (%d)\n",
+ config->hpd_clk_names[i], ret);
+ }
+
+ ret = clk_prepare_enable(hdmi->hpd_clks[i]);
+ if (ret) {
+ dev_err(dev,
+ "failed to enable hpd clk: %s (%d)\n",
+ config->hpd_clk_names[i], ret);
+ }
+ }
+ } else {
+ for (i = config->hpd_clk_cnt - 1; i >= 0; i--)
+ clk_disable_unprepare(hdmi->hpd_clks[i]);
+ }
+}
+
static int hpd_enable(struct hdmi_connector *hdmi_connector)
{
struct hdmi *hdmi = hdmi_connector->hdmi;
@@ -167,22 +197,8 @@ static int hpd_enable(struct hdmi_connector *hdmi_connector)
goto fail;
}
- for (i = 0; i < config->hpd_clk_cnt; i++) {
- if (config->hpd_freq && config->hpd_freq[i]) {
- ret = clk_set_rate(hdmi->hpd_clks[i],
- config->hpd_freq[i]);
- if (ret)
- dev_warn(dev, "failed to set clk %s (%d)\n",
- config->hpd_clk_names[i], ret);
- }
-
- ret = clk_prepare_enable(hdmi->hpd_clks[i]);
- if (ret) {
- dev_err(dev, "failed to enable hpd clk: %s (%d)\n",
- config->hpd_clk_names[i], ret);
- goto fail;
- }
- }
+ pm_runtime_get_sync(dev);
+ enable_hpd_clocks(hdmi, true);
msm_hdmi_set_mode(hdmi, false);
msm_hdmi_phy_reset(hdmi);
@@ -225,8 +241,8 @@ static void hdp_disable(struct hdmi_connector *hdmi_connector)
msm_hdmi_set_mode(hdmi, false);
- for (i = 0; i < config->hpd_clk_cnt; i++)
- clk_disable_unprepare(hdmi->hpd_clks[i]);
+ enable_hpd_clocks(hdmi, false);
+ pm_runtime_put_autosuspend(dev);
ret = gpio_config(hdmi, false);
if (ret)
@@ -285,7 +301,16 @@ void msm_hdmi_connector_irq(struct drm_connector *connector)
static enum drm_connector_status detect_reg(struct hdmi *hdmi)
{
- uint32_t hpd_int_status = hdmi_read(hdmi, REG_HDMI_HPD_INT_STATUS);
+ uint32_t hpd_int_status;
+
+ pm_runtime_get_sync(&hdmi->pdev->dev);
+ enable_hpd_clocks(hdmi, true);
+
+ hpd_int_status = hdmi_read(hdmi, REG_HDMI_HPD_INT_STATUS);
+
+ enable_hpd_clocks(hdmi, false);
+ pm_runtime_put_autosuspend(&hdmi->pdev->dev);
+
return (hpd_int_status & HDMI_HPD_INT_STATUS_CABLE_DETECTED) ?
connector_status_connected : connector_status_disconnected;
}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c
index aa7402e03f67..60790df91bfa 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c
@@ -192,6 +192,7 @@ int mdp5_cmd_encoder_set_split_display(struct drm_encoder *encoder,
{
struct mdp5_encoder *mdp5_cmd_enc = to_mdp5_encoder(encoder);
struct mdp5_kms *mdp5_kms;
+ struct device *dev;
int intf_num;
u32 data = 0;
@@ -214,14 +215,16 @@ int mdp5_cmd_encoder_set_split_display(struct drm_encoder *encoder,
/* Smart Panel, Sync mode */
data |= MDP5_SPLIT_DPL_UPPER_SMART_PANEL;
+ dev = &mdp5_kms->pdev->dev;
+
/* Make sure clocks are on when connectors calling this function. */
- mdp5_enable(mdp5_kms);
+ pm_runtime_get_sync(dev);
mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_UPPER, data);
mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_LOWER,
MDP5_SPLIT_DPL_LOWER_SMART_PANEL);
mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_EN, 1);
- mdp5_disable(mdp5_kms);
+ pm_runtime_put_autosuspend(dev);
return 0;
}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
index 3a81e26629c7..6fcb58ab718c 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
@@ -415,6 +415,7 @@ static void mdp5_crtc_atomic_disable(struct drm_crtc *crtc,
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
struct mdp5_kms *mdp5_kms = get_kms(crtc);
+ struct device *dev = &mdp5_kms->pdev->dev;
DBG("%s", crtc->name);
@@ -425,7 +426,7 @@ static void mdp5_crtc_atomic_disable(struct drm_crtc *crtc,
mdp_irq_unregister(&mdp5_kms->base, &mdp5_crtc->pp_done);
mdp_irq_unregister(&mdp5_kms->base, &mdp5_crtc->err);
- mdp5_disable(mdp5_kms);
+ pm_runtime_put_autosuspend(dev);
mdp5_crtc->enabled = false;
}
@@ -436,13 +437,17 @@ static void mdp5_crtc_atomic_enable(struct drm_crtc *crtc,
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
struct mdp5_kms *mdp5_kms = get_kms(crtc);
+ struct device *dev = &mdp5_kms->pdev->dev;
DBG("%s", crtc->name);
if (WARN_ON(mdp5_crtc->enabled))
return;
- mdp5_enable(mdp5_kms);
+ pm_runtime_get_sync(dev);
+
+ mdp5_crtc_mode_set_nofb(crtc);
+
mdp_irq_register(&mdp5_kms->base, &mdp5_crtc->err);
if (mdp5_cstate->cmd_mode)
@@ -533,7 +538,7 @@ static bool is_fullscreen(struct drm_crtc_state *cstate,
((pstate->crtc_y + pstate->crtc_h) >= cstate->mode.vdisplay);
}
-enum mdp_mixer_stage_id get_start_stage(struct drm_crtc *crtc,
+static enum mdp_mixer_stage_id get_start_stage(struct drm_crtc *crtc,
struct drm_crtc_state *new_crtc_state,
struct drm_plane_state *bpstate)
{
@@ -727,6 +732,7 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline;
struct drm_device *dev = crtc->dev;
struct mdp5_kms *mdp5_kms = get_kms(crtc);
+ struct platform_device *pdev = mdp5_kms->pdev;
struct msm_kms *kms = &mdp5_kms->base.base;
struct drm_gem_object *cursor_bo, *old_bo = NULL;
uint32_t blendcfg, stride;
@@ -755,7 +761,7 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
if (!handle) {
DBG("Cursor off");
cursor_enable = false;
- mdp5_enable(mdp5_kms);
+ pm_runtime_get_sync(&pdev->dev);
goto set_cursor;
}
@@ -770,6 +776,8 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
lm = mdp5_cstate->pipeline.mixer->lm;
stride = width * drm_format_plane_cpp(DRM_FORMAT_ARGB8888, 0);
+ pm_runtime_get_sync(&pdev->dev);
+
spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags);
old_bo = mdp5_crtc->cursor.scanout_bo;
@@ -779,8 +787,6 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
get_roi(crtc, &roi_w, &roi_h);
- mdp5_enable(mdp5_kms);
-
mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_STRIDE(lm), stride);
mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_FORMAT(lm),
MDP5_LM_CURSOR_FORMAT_FORMAT(CURSOR_FMT_ARGB8888));
@@ -798,6 +804,8 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags);
+ pm_runtime_put_autosuspend(&pdev->dev);
+
set_cursor:
ret = mdp5_ctl_set_cursor(ctl, pipeline, 0, cursor_enable);
if (ret) {
@@ -809,7 +817,7 @@ set_cursor:
crtc_flush(crtc, flush_mask);
end:
- mdp5_disable(mdp5_kms);
+ pm_runtime_put_autosuspend(&pdev->dev);
if (old_bo) {
drm_flip_work_queue(&mdp5_crtc->unref_cursor_work, old_bo);
/* enable vblank to complete cursor work: */
@@ -842,7 +850,7 @@ static int mdp5_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
get_roi(crtc, &roi_w, &roi_h);
- mdp5_enable(mdp5_kms);
+ pm_runtime_get_sync(&mdp5_kms->pdev->dev);
spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags);
mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_SIZE(lm),
@@ -855,7 +863,7 @@ static int mdp5_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
crtc_flush(crtc, flush_mask);
- mdp5_disable(mdp5_kms);
+ pm_runtime_put_autosuspend(&mdp5_kms->pdev->dev);
return 0;
}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
index 70bef51245af..5b851380d3f2 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
@@ -297,6 +297,10 @@ static void mdp5_encoder_enable(struct drm_encoder *encoder)
{
struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
struct mdp5_interface *intf = mdp5_encoder->intf;
+ /* this isn't right I think */
+ struct drm_crtc_state *cstate = encoder->crtc->state;
+
+ mdp5_encoder_mode_set(encoder, &cstate->mode, &cstate->adjusted_mode);
if (intf->mode == MDP5_INTF_DSI_MODE_COMMAND)
mdp5_cmd_encoder_enable(encoder);
@@ -320,7 +324,6 @@ static int mdp5_encoder_atomic_check(struct drm_encoder *encoder,
}
static const struct drm_encoder_helper_funcs mdp5_encoder_helper_funcs = {
- .mode_set = mdp5_encoder_mode_set,
.disable = mdp5_encoder_disable,
.enable = mdp5_encoder_enable,
.atomic_check = mdp5_encoder_atomic_check,
@@ -350,6 +353,7 @@ int mdp5_vid_encoder_set_split_display(struct drm_encoder *encoder,
struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
struct mdp5_encoder *mdp5_slave_enc = to_mdp5_encoder(slave_encoder);
struct mdp5_kms *mdp5_kms;
+ struct device *dev;
int intf_num;
u32 data = 0;
@@ -369,8 +373,10 @@ int mdp5_vid_encoder_set_split_display(struct drm_encoder *encoder,
else
return -EINVAL;
+ dev = &mdp5_kms->pdev->dev;
/* Make sure clocks are on when connectors calling this function. */
- mdp5_enable(mdp5_kms);
+ pm_runtime_get_sync(dev);
+
/* Dumb Panel, Sync mode */
mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_UPPER, 0);
mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_LOWER, data);
@@ -378,7 +384,7 @@ int mdp5_vid_encoder_set_split_display(struct drm_encoder *encoder,
mdp5_ctl_pair(mdp5_encoder->ctl, mdp5_slave_enc->ctl, true);
- mdp5_disable(mdp5_kms);
+ pm_runtime_put_autosuspend(dev);
return 0;
}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c
index 3ce8b9dec9c1..bb5deb00c899 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c
@@ -49,16 +49,19 @@ static void mdp5_irq_error_handler(struct mdp_irq *irq, uint32_t irqstatus)
void mdp5_irq_preinstall(struct msm_kms *kms)
{
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
- mdp5_enable(mdp5_kms);
+ struct device *dev = &mdp5_kms->pdev->dev;
+
+ pm_runtime_get_sync(dev);
mdp5_write(mdp5_kms, REG_MDP5_INTR_CLEAR, 0xffffffff);
mdp5_write(mdp5_kms, REG_MDP5_INTR_EN, 0x00000000);
- mdp5_disable(mdp5_kms);
+ pm_runtime_put_autosuspend(dev);
}
int mdp5_irq_postinstall(struct msm_kms *kms)
{
struct mdp_kms *mdp_kms = to_mdp_kms(kms);
struct mdp5_kms *mdp5_kms = to_mdp5_kms(mdp_kms);
+ struct device *dev = &mdp5_kms->pdev->dev;
struct mdp_irq *error_handler = &mdp5_kms->error_handler;
error_handler->irq = mdp5_irq_error_handler;
@@ -67,9 +70,9 @@ int mdp5_irq_postinstall(struct msm_kms *kms)
MDP5_IRQ_INTF2_UNDER_RUN |
MDP5_IRQ_INTF3_UNDER_RUN;
- mdp5_enable(mdp5_kms);
+ pm_runtime_get_sync(dev);
mdp_irq_register(mdp_kms, error_handler);
- mdp5_disable(mdp5_kms);
+ pm_runtime_put_autosuspend(dev);
return 0;
}
@@ -77,9 +80,11 @@ int mdp5_irq_postinstall(struct msm_kms *kms)
void mdp5_irq_uninstall(struct msm_kms *kms)
{
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
- mdp5_enable(mdp5_kms);
+ struct device *dev = &mdp5_kms->pdev->dev;
+
+ pm_runtime_get_sync(dev);
mdp5_write(mdp5_kms, REG_MDP5_INTR_EN, 0x00000000);
- mdp5_disable(mdp5_kms);
+ pm_runtime_put_autosuspend(dev);
}
irqreturn_t mdp5_irq(struct msm_kms *kms)
@@ -109,11 +114,12 @@ irqreturn_t mdp5_irq(struct msm_kms *kms)
int mdp5_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
{
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
+ struct device *dev = &mdp5_kms->pdev->dev;
- mdp5_enable(mdp5_kms);
+ pm_runtime_get_sync(dev);
mdp_update_vblank_mask(to_mdp_kms(kms),
mdp5_crtc_vblank(crtc), true);
- mdp5_disable(mdp5_kms);
+ pm_runtime_put_autosuspend(dev);
return 0;
}
@@ -121,9 +127,10 @@ int mdp5_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
void mdp5_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
{
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
+ struct device *dev = &mdp5_kms->pdev->dev;
- mdp5_enable(mdp5_kms);
+ pm_runtime_get_sync(dev);
mdp_update_vblank_mask(to_mdp_kms(kms),
mdp5_crtc_vblank(crtc), false);
- mdp5_disable(mdp5_kms);
+ pm_runtime_put_autosuspend(dev);
}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
index 1c603aef3c59..f7c0698fec40 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
@@ -30,11 +30,10 @@ static const char *iommu_ports[] = {
static int mdp5_hw_init(struct msm_kms *kms)
{
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
- struct platform_device *pdev = mdp5_kms->pdev;
+ struct device *dev = &mdp5_kms->pdev->dev;
unsigned long flags;
- pm_runtime_get_sync(&pdev->dev);
- mdp5_enable(mdp5_kms);
+ pm_runtime_get_sync(dev);
/* Magic unknown register writes:
*
@@ -66,8 +65,7 @@ static int mdp5_hw_init(struct msm_kms *kms)
mdp5_ctlm_hw_reset(mdp5_kms->ctlm);
- mdp5_disable(mdp5_kms);
- pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_put_sync(dev);
return 0;
}
@@ -111,8 +109,9 @@ static void mdp5_swap_state(struct msm_kms *kms, struct drm_atomic_state *state)
static void mdp5_prepare_commit(struct msm_kms *kms, struct drm_atomic_state *state)
{
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
+ struct device *dev = &mdp5_kms->pdev->dev;
- mdp5_enable(mdp5_kms);
+ pm_runtime_get_sync(dev);
if (mdp5_kms->smp)
mdp5_smp_prepare_commit(mdp5_kms->smp, &mdp5_kms->state->smp);
@@ -121,11 +120,12 @@ static void mdp5_prepare_commit(struct msm_kms *kms, struct drm_atomic_state *st
static void mdp5_complete_commit(struct msm_kms *kms, struct drm_atomic_state *state)
{
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
+ struct device *dev = &mdp5_kms->pdev->dev;
if (mdp5_kms->smp)
mdp5_smp_complete_commit(mdp5_kms->smp, &mdp5_kms->state->smp);
- mdp5_disable(mdp5_kms);
+ pm_runtime_put_autosuspend(dev);
}
static void mdp5_wait_for_crtc_commit_done(struct msm_kms *kms,
@@ -249,6 +249,9 @@ int mdp5_disable(struct mdp5_kms *mdp5_kms)
{
DBG("");
+ mdp5_kms->enable_count--;
+ WARN_ON(mdp5_kms->enable_count < 0);
+
clk_disable_unprepare(mdp5_kms->ahb_clk);
clk_disable_unprepare(mdp5_kms->axi_clk);
clk_disable_unprepare(mdp5_kms->core_clk);
@@ -262,6 +265,8 @@ int mdp5_enable(struct mdp5_kms *mdp5_kms)
{
DBG("");
+ mdp5_kms->enable_count++;
+
clk_prepare_enable(mdp5_kms->ahb_clk);
clk_prepare_enable(mdp5_kms->axi_clk);
clk_prepare_enable(mdp5_kms->core_clk);
@@ -486,11 +491,12 @@ fail:
static void read_mdp_hw_revision(struct mdp5_kms *mdp5_kms,
u32 *major, u32 *minor)
{
+ struct device *dev = &mdp5_kms->pdev->dev;
u32 version;
- mdp5_enable(mdp5_kms);
+ pm_runtime_get_sync(dev);
version = mdp5_read(mdp5_kms, REG_MDP5_HW_VERSION);
- mdp5_disable(mdp5_kms);
+ pm_runtime_put_autosuspend(dev);
*major = FIELD(version, MDP5_HW_VERSION_MAJOR);
*minor = FIELD(version, MDP5_HW_VERSION_MINOR);
@@ -643,7 +649,7 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
* have left things on, in which case we'll start getting faults if
* we don't disable):
*/
- mdp5_enable(mdp5_kms);
+ pm_runtime_get_sync(&pdev->dev);
for (i = 0; i < MDP5_INTF_NUM_MAX; i++) {
if (mdp5_cfg_intf_is_virtual(config->hw->intf.connect[i]) ||
!config->hw->intf.base[i])
@@ -652,7 +658,6 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
mdp5_write(mdp5_kms, REG_MDP5_INTF_FRAME_LINE_COUNT_EN(i), 0x3);
}
- mdp5_disable(mdp5_kms);
mdelay(16);
if (config->platform.iommu) {
@@ -678,6 +683,8 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
aspace = NULL;;
}
+ pm_runtime_put_autosuspend(&pdev->dev);
+
ret = modeset_init(mdp5_kms);
if (ret) {
dev_err(&pdev->dev, "modeset_init failed: %d\n", ret);
@@ -1005,6 +1012,30 @@ static int mdp5_dev_remove(struct platform_device *pdev)
return 0;
}
+static __maybe_unused int mdp5_runtime_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct mdp5_kms *mdp5_kms = platform_get_drvdata(pdev);
+
+ DBG("");
+
+ return mdp5_disable(mdp5_kms);
+}
+
+static __maybe_unused int mdp5_runtime_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct mdp5_kms *mdp5_kms = platform_get_drvdata(pdev);
+
+ DBG("");
+
+ return mdp5_enable(mdp5_kms);
+}
+
+static const struct dev_pm_ops mdp5_pm_ops = {
+ SET_RUNTIME_PM_OPS(mdp5_runtime_suspend, mdp5_runtime_resume, NULL)
+};
+
static const struct of_device_id mdp5_dt_match[] = {
{ .compatible = "qcom,mdp5", },
/* to support downstream DT files */
@@ -1019,6 +1050,7 @@ static struct platform_driver mdp5_driver = {
.driver = {
.name = "msm_mdp",
.of_match_table = mdp5_dt_match,
+ .pm = &mdp5_pm_ops,
},
};
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
index 17caa0e8c8ae..9b3fe01089d1 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
@@ -76,6 +76,8 @@ struct mdp5_kms {
bool rpm_enabled;
struct mdp_irq error_handler;
+
+ int enable_count;
};
#define to_mdp5_kms(x) container_of(x, struct mdp5_kms, base)
@@ -167,11 +169,13 @@ struct mdp5_encoder {
static inline void mdp5_write(struct mdp5_kms *mdp5_kms, u32 reg, u32 data)
{
+ WARN_ON(mdp5_kms->enable_count <= 0);
msm_writel(data, mdp5_kms->mmio + reg);
}
static inline u32 mdp5_read(struct mdp5_kms *mdp5_kms, u32 reg)
{
+ WARN_ON(mdp5_kms->enable_count <= 0);
return msm_readl(mdp5_kms->mmio + reg);
}
@@ -255,9 +259,6 @@ static inline uint32_t lm2ppdone(struct mdp5_hw_mixer *mixer)
return MDP5_IRQ_PING_PONG_0_DONE << mixer->pp;
}
-int mdp5_disable(struct mdp5_kms *mdp5_kms);
-int mdp5_enable(struct mdp5_kms *mdp5_kms);
-
void mdp5_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask,
uint32_t old_irqmask);
void mdp5_irq_preinstall(struct msm_kms *kms);
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_mdss.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_mdss.c
index 9c34d7824988..f2a0db7a8a03 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_mdss.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_mdss.c
@@ -31,6 +31,10 @@ struct msm_mdss {
struct regulator *vdd;
+ struct clk *ahb_clk;
+ struct clk *axi_clk;
+ struct clk *vsync_clk;
+
struct {
volatile unsigned long enabled_mask;
struct irq_domain *domain;
@@ -140,6 +144,51 @@ static int mdss_irq_domain_init(struct msm_mdss *mdss)
return 0;
}
+int msm_mdss_enable(struct msm_mdss *mdss)
+{
+ DBG("");
+
+ clk_prepare_enable(mdss->ahb_clk);
+ if (mdss->axi_clk)
+ clk_prepare_enable(mdss->axi_clk);
+ if (mdss->vsync_clk)
+ clk_prepare_enable(mdss->vsync_clk);
+
+ return 0;
+}
+
+int msm_mdss_disable(struct msm_mdss *mdss)
+{
+ DBG("");
+
+ if (mdss->vsync_clk)
+ clk_disable_unprepare(mdss->vsync_clk);
+ if (mdss->axi_clk)
+ clk_disable_unprepare(mdss->axi_clk);
+ clk_disable_unprepare(mdss->ahb_clk);
+
+ return 0;
+}
+
+static int msm_mdss_get_clocks(struct msm_mdss *mdss)
+{
+ struct platform_device *pdev = to_platform_device(mdss->dev->dev);
+
+ mdss->ahb_clk = msm_clk_get(pdev, "iface");
+ if (IS_ERR(mdss->ahb_clk))
+ mdss->ahb_clk = NULL;
+
+ mdss->axi_clk = msm_clk_get(pdev, "bus");
+ if (IS_ERR(mdss->axi_clk))
+ mdss->axi_clk = NULL;
+
+ mdss->vsync_clk = msm_clk_get(pdev, "vsync");
+ if (IS_ERR(mdss->vsync_clk))
+ mdss->vsync_clk = NULL;
+
+ return 0;
+}
+
void msm_mdss_destroy(struct drm_device *dev)
{
struct msm_drm_private *priv = dev->dev_private;
@@ -153,8 +202,6 @@ void msm_mdss_destroy(struct drm_device *dev)
regulator_disable(mdss->vdd);
- pm_runtime_put_sync(dev->dev);
-
pm_runtime_disable(dev->dev);
}
@@ -190,6 +237,12 @@ int msm_mdss_init(struct drm_device *dev)
goto fail;
}
+ ret = msm_mdss_get_clocks(mdss);
+ if (ret) {
+ dev_err(dev->dev, "failed to get clocks: %d\n", ret);
+ goto fail;
+ }
+
/* Regulator to enable GDSCs in downstream kernels */
mdss->vdd = devm_regulator_get(dev->dev, "vdd");
if (IS_ERR(mdss->vdd)) {
@@ -221,12 +274,6 @@ int msm_mdss_init(struct drm_device *dev)
pm_runtime_enable(dev->dev);
- /*
- * TODO: This is needed as the MDSS GDSC is only tied to MDSS's power
- * domain. Remove this once runtime PM is adapted for all the devices.
- */
- pm_runtime_get_sync(dev->dev);
-
return 0;
fail_irq:
regulator_disable(mdss->vdd);
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c
index 58f712d37e7f..ae4983d9d0a5 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c
@@ -28,6 +28,13 @@ struct mdp5_smp {
int blk_cnt;
int blk_size;
+
+ /* register cache */
+ u32 alloc_w[22];
+ u32 alloc_r[22];
+ u32 pipe_reqprio_fifo_wm0[SSPP_MAX];
+ u32 pipe_reqprio_fifo_wm1[SSPP_MAX];
+ u32 pipe_reqprio_fifo_wm2[SSPP_MAX];
};
static inline
@@ -98,16 +105,15 @@ static int smp_request_block(struct mdp5_smp *smp,
static void set_fifo_thresholds(struct mdp5_smp *smp,
enum mdp5_pipe pipe, int nblks)
{
- struct mdp5_kms *mdp5_kms = get_kms(smp);
u32 smp_entries_per_blk = smp->blk_size / (128 / BITS_PER_BYTE);
u32 val;
/* 1/4 of SMP pool that is being fetched */
val = (nblks * smp_entries_per_blk) / 4;
- mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_0(pipe), val * 1);
- mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_1(pipe), val * 2);
- mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_2(pipe), val * 3);
+ smp->pipe_reqprio_fifo_wm0[pipe] = val * 1;
+ smp->pipe_reqprio_fifo_wm1[pipe] = val * 2;
+ smp->pipe_reqprio_fifo_wm2[pipe] = val * 3;
}
/*
@@ -222,7 +228,6 @@ void mdp5_smp_release(struct mdp5_smp *smp, struct mdp5_smp_state *state,
static unsigned update_smp_state(struct mdp5_smp *smp,
u32 cid, mdp5_smp_state_t *assigned)
{
- struct mdp5_kms *mdp5_kms = get_kms(smp);
int cnt = smp->blk_cnt;
unsigned nblks = 0;
u32 blk, val;
@@ -231,7 +236,7 @@ static unsigned update_smp_state(struct mdp5_smp *smp,
int idx = blk / 3;
int fld = blk % 3;
- val = mdp5_read(mdp5_kms, REG_MDP5_SMP_ALLOC_W_REG(idx));
+ val = smp->alloc_w[idx];
switch (fld) {
case 0:
@@ -248,8 +253,8 @@ static unsigned update_smp_state(struct mdp5_smp *smp,
break;
}
- mdp5_write(mdp5_kms, REG_MDP5_SMP_ALLOC_W_REG(idx), val);
- mdp5_write(mdp5_kms, REG_MDP5_SMP_ALLOC_R_REG(idx), val);
+ smp->alloc_w[idx] = val;
+ smp->alloc_r[idx] = val;
nblks++;
}
@@ -257,6 +262,39 @@ static unsigned update_smp_state(struct mdp5_smp *smp,
return nblks;
}
+static void write_smp_alloc_regs(struct mdp5_smp *smp)
+{
+ struct mdp5_kms *mdp5_kms = get_kms(smp);
+ int i, num_regs;
+
+ num_regs = smp->blk_cnt / 3 + 1;
+
+ for (i = 0; i < num_regs; i++) {
+ mdp5_write(mdp5_kms, REG_MDP5_SMP_ALLOC_W_REG(i),
+ smp->alloc_w[i]);
+ mdp5_write(mdp5_kms, REG_MDP5_SMP_ALLOC_R_REG(i),
+ smp->alloc_r[i]);
+ }
+}
+
+static void write_smp_fifo_regs(struct mdp5_smp *smp)
+{
+ struct mdp5_kms *mdp5_kms = get_kms(smp);
+ int i;
+
+ for (i = 0; i < mdp5_kms->num_hwpipes; i++) {
+ struct mdp5_hw_pipe *hwpipe = mdp5_kms->hwpipes[i];
+ enum mdp5_pipe pipe = hwpipe->pipe;
+
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_0(pipe),
+ smp->pipe_reqprio_fifo_wm0[pipe]);
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_1(pipe),
+ smp->pipe_reqprio_fifo_wm1[pipe]);
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_2(pipe),
+ smp->pipe_reqprio_fifo_wm2[pipe]);
+ }
+}
+
void mdp5_smp_prepare_commit(struct mdp5_smp *smp, struct mdp5_smp_state *state)
{
enum mdp5_pipe pipe;
@@ -277,6 +315,9 @@ void mdp5_smp_prepare_commit(struct mdp5_smp *smp, struct mdp5_smp_state *state)
set_fifo_thresholds(smp, pipe, nblks);
}
+ write_smp_alloc_regs(smp);
+ write_smp_fifo_regs(smp);
+
state->assigned = 0;
}
@@ -289,6 +330,8 @@ void mdp5_smp_complete_commit(struct mdp5_smp *smp, struct mdp5_smp_state *state
set_fifo_thresholds(smp, pipe, 0);
}
+ write_smp_fifo_regs(smp);
+
state->released = 0;
}
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index b0129e7b29e3..606df7bea97b 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -73,6 +73,10 @@ bool dumpstate = false;
MODULE_PARM_DESC(dumpstate, "Dump KMS state on errors");
module_param(dumpstate, bool, 0600);
+static bool modeset = true;
+MODULE_PARM_DESC(modeset, "Use kernel modesetting [KMS] (1=on (default), 0=disable)");
+module_param(modeset, bool, 0600);
+
/*
* Util/helpers:
*/
@@ -878,8 +882,37 @@ static int msm_pm_resume(struct device *dev)
}
#endif
+#ifdef CONFIG_PM
+static int msm_runtime_suspend(struct device *dev)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct msm_drm_private *priv = ddev->dev_private;
+
+ DBG("");
+
+ if (priv->mdss)
+ return msm_mdss_disable(priv->mdss);
+
+ return 0;
+}
+
+static int msm_runtime_resume(struct device *dev)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct msm_drm_private *priv = ddev->dev_private;
+
+ DBG("");
+
+ if (priv->mdss)
+ return msm_mdss_enable(priv->mdss);
+
+ return 0;
+}
+#endif
+
static const struct dev_pm_ops msm_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(msm_pm_suspend, msm_pm_resume)
+ SET_RUNTIME_PM_OPS(msm_runtime_suspend, msm_runtime_resume, NULL)
};
/*
@@ -1103,6 +1136,9 @@ static struct platform_driver msm_platform_driver = {
static int __init msm_drm_register(void)
{
+ if (!modeset)
+ return -EINVAL;
+
DBG("init");
msm_mdp_register();
msm_dsi_register();
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index fc8d24f7c084..5e8109c07560 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -55,8 +55,6 @@ struct msm_fence_cb;
struct msm_gem_address_space;
struct msm_gem_vma;
-#define NUM_DOMAINS 2 /* one for KMS, then one per gpu core (?) */
-
struct msm_file_private {
/* currently we don't do anything useful with this.. but when
* per-context address spaces are supported we'd keep track of
@@ -237,6 +235,12 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev,
uint32_t size, uint32_t flags);
struct drm_gem_object *msm_gem_new_locked(struct drm_device *dev,
uint32_t size, uint32_t flags);
+void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
+ uint32_t flags, struct msm_gem_address_space *aspace,
+ struct drm_gem_object **bo, uint64_t *iova);
+void *msm_gem_kernel_new_locked(struct drm_device *dev, uint32_t size,
+ uint32_t flags, struct msm_gem_address_space *aspace,
+ struct drm_gem_object **bo, uint64_t *iova);
struct drm_gem_object *msm_gem_import(struct drm_device *dev,
struct dma_buf *dmabuf, struct sg_table *sgt);
@@ -248,10 +252,10 @@ uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb,
struct msm_gem_address_space *aspace, int plane);
struct drm_gem_object *msm_framebuffer_bo(struct drm_framebuffer *fb, int plane);
const struct msm_format *msm_framebuffer_format(struct drm_framebuffer *fb);
-struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
- const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos);
struct drm_framebuffer *msm_framebuffer_create(struct drm_device *dev,
struct drm_file *file, const struct drm_mode_fb_cmd2 *mode_cmd);
+struct drm_framebuffer * msm_alloc_stolen_fb(struct drm_device *dev,
+ int w, int h, int p, uint32_t format);
struct drm_fb_helper *msm_fbdev_init(struct drm_device *dev);
void msm_fbdev_free(struct drm_device *dev);
diff --git a/drivers/gpu/drm/msm/msm_fb.c b/drivers/gpu/drm/msm/msm_fb.c
index 6ecb7b170316..fc175e724ad6 100644
--- a/drivers/gpu/drm/msm/msm_fb.c
+++ b/drivers/gpu/drm/msm/msm_fb.c
@@ -20,6 +20,7 @@
#include "msm_drv.h"
#include "msm_kms.h"
+#include "msm_gem.h"
struct msm_framebuffer {
struct drm_framebuffer base;
@@ -28,6 +29,8 @@ struct msm_framebuffer {
};
#define to_msm_framebuffer(x) container_of(x, struct msm_framebuffer, base)
+static struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
+ const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos);
static int msm_framebuffer_create_handle(struct drm_framebuffer *fb,
struct drm_file *file_priv,
@@ -161,7 +164,7 @@ out_unref:
return ERR_PTR(ret);
}
-struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
+static struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos)
{
struct msm_drm_private *priv = dev->dev_private;
@@ -237,3 +240,43 @@ fail:
return ERR_PTR(ret);
}
+
+struct drm_framebuffer *
+msm_alloc_stolen_fb(struct drm_device *dev, int w, int h, int p, uint32_t format)
+{
+ struct drm_mode_fb_cmd2 mode_cmd = {
+ .pixel_format = format,
+ .width = w,
+ .height = h,
+ .pitches = { p },
+ };
+ struct drm_gem_object *bo;
+ struct drm_framebuffer *fb;
+ int size;
+
+ /* allocate backing bo */
+ size = mode_cmd.pitches[0] * mode_cmd.height;
+ DBG("allocating %d bytes for fb %d", size, dev->primary->index);
+ bo = msm_gem_new(dev, size, MSM_BO_SCANOUT | MSM_BO_WC | MSM_BO_STOLEN);
+ if (IS_ERR(bo)) {
+ dev_warn(dev->dev, "could not allocate stolen bo\n");
+ /* try regular bo: */
+ bo = msm_gem_new(dev, size, MSM_BO_SCANOUT | MSM_BO_WC);
+ }
+ if (IS_ERR(bo)) {
+ dev_err(dev->dev, "failed to allocate buffer object\n");
+ return ERR_CAST(bo);
+ }
+
+ fb = msm_framebuffer_init(dev, &mode_cmd, &bo);
+ if (IS_ERR(fb)) {
+ dev_err(dev->dev, "failed to allocate fb\n");
+ /* note: if fb creation failed, we can't rely on fb destroy
+ * to unref the bo:
+ */
+ drm_gem_object_unreference_unlocked(bo);
+ return ERR_CAST(fb);
+ }
+
+ return fb;
+}
diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c
index 9c00fedfc741..c178563fcd4d 100644
--- a/drivers/gpu/drm/msm/msm_fbdev.c
+++ b/drivers/gpu/drm/msm/msm_fbdev.c
@@ -19,7 +19,6 @@
#include <drm/drm_fb_helper.h>
#include "msm_drv.h"
-#include "msm_gem.h"
#include "msm_kms.h"
extern int msm_gem_mmap_obj(struct drm_gem_object *obj,
@@ -35,7 +34,6 @@ static int msm_fbdev_mmap(struct fb_info *info, struct vm_area_struct *vma);
struct msm_fbdev {
struct drm_fb_helper base;
struct drm_framebuffer *fb;
- struct drm_gem_object *bo;
};
static struct fb_ops msm_fb_ops = {
@@ -57,16 +55,16 @@ static int msm_fbdev_mmap(struct fb_info *info, struct vm_area_struct *vma)
{
struct drm_fb_helper *helper = (struct drm_fb_helper *)info->par;
struct msm_fbdev *fbdev = to_msm_fbdev(helper);
- struct drm_gem_object *drm_obj = fbdev->bo;
+ struct drm_gem_object *bo = msm_framebuffer_bo(fbdev->fb, 0);
int ret = 0;
- ret = drm_gem_mmap_obj(drm_obj, drm_obj->size, vma);
+ ret = drm_gem_mmap_obj(bo, bo->size, vma);
if (ret) {
pr_err("%s:drm_gem_mmap_obj fail\n", __func__);
return ret;
}
- return msm_gem_mmap_obj(drm_obj, vma);
+ return msm_gem_mmap_obj(bo, vma);
}
static int msm_fbdev_create(struct drm_fb_helper *helper,
@@ -76,47 +74,30 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
struct drm_device *dev = helper->dev;
struct msm_drm_private *priv = dev->dev_private;
struct drm_framebuffer *fb = NULL;
+ struct drm_gem_object *bo;
struct fb_info *fbi = NULL;
- struct drm_mode_fb_cmd2 mode_cmd = {0};
uint64_t paddr;
- int ret, size;
+ uint32_t format;
+ int ret, pitch;
+
+ format = drm_mode_legacy_fb_format(sizes->surface_bpp, sizes->surface_depth);
DBG("create fbdev: %dx%d@%d (%dx%d)", sizes->surface_width,
sizes->surface_height, sizes->surface_bpp,
sizes->fb_width, sizes->fb_height);
- mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
- sizes->surface_depth);
-
- mode_cmd.width = sizes->surface_width;
- mode_cmd.height = sizes->surface_height;
-
- mode_cmd.pitches[0] = align_pitch(
- mode_cmd.width, sizes->surface_bpp);
+ pitch = align_pitch(sizes->surface_width, sizes->surface_bpp);
+ fb = msm_alloc_stolen_fb(dev, sizes->surface_width,
+ sizes->surface_height, pitch, format);
- /* allocate backing bo */
- size = mode_cmd.pitches[0] * mode_cmd.height;
- DBG("allocating %d bytes for fb %d", size, dev->primary->index);
- fbdev->bo = msm_gem_new(dev, size, MSM_BO_SCANOUT |
- MSM_BO_WC | MSM_BO_STOLEN);
- if (IS_ERR(fbdev->bo)) {
- ret = PTR_ERR(fbdev->bo);
- fbdev->bo = NULL;
- dev_err(dev->dev, "failed to allocate buffer object: %d\n", ret);
- goto fail;
- }
-
- fb = msm_framebuffer_init(dev, &mode_cmd, &fbdev->bo);
if (IS_ERR(fb)) {
dev_err(dev->dev, "failed to allocate fb\n");
- /* note: if fb creation failed, we can't rely on fb destroy
- * to unref the bo:
- */
- drm_gem_object_unreference_unlocked(fbdev->bo);
ret = PTR_ERR(fb);
goto fail;
}
+ bo = msm_framebuffer_bo(fb, 0);
+
mutex_lock(&dev->struct_mutex);
/*
@@ -124,7 +105,7 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
* in panic (ie. lock-safe, etc) we could avoid pinning the
* buffer now:
*/
- ret = msm_gem_get_iova(fbdev->bo, priv->kms->aspace, &paddr);
+ ret = msm_gem_get_iova(bo, priv->kms->aspace, &paddr);
if (ret) {
dev_err(dev->dev, "failed to get buffer obj iova: %d\n", ret);
goto fail_unlock;
@@ -152,14 +133,14 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
dev->mode_config.fb_base = paddr;
- fbi->screen_base = msm_gem_get_vaddr(fbdev->bo);
+ fbi->screen_base = msm_gem_get_vaddr(bo);
if (IS_ERR(fbi->screen_base)) {
ret = PTR_ERR(fbi->screen_base);
goto fail_unlock;
}
- fbi->screen_size = fbdev->bo->size;
+ fbi->screen_size = bo->size;
fbi->fix.smem_start = paddr;
- fbi->fix.smem_len = fbdev->bo->size;
+ fbi->fix.smem_len = bo->size;
DBG("par=%p, %dx%d", fbi->par, fbi->var.xres, fbi->var.yres);
DBG("allocated %dx%d fb", fbdev->fb->width, fbdev->fb->height);
@@ -241,7 +222,9 @@ void msm_fbdev_free(struct drm_device *dev)
/* this will free the backing object */
if (fbdev->fb) {
- msm_gem_put_vaddr(fbdev->bo);
+ struct drm_gem_object *bo =
+ msm_framebuffer_bo(fbdev->fb, 0);
+ msm_gem_put_vaddr(bo);
drm_framebuffer_remove(fbdev->fb);
}
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index a0c60e738db8..f15821a0d900 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -1024,3 +1024,49 @@ fail:
drm_gem_object_unreference_unlocked(obj);
return ERR_PTR(ret);
}
+
+static void *_msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
+ uint32_t flags, struct msm_gem_address_space *aspace,
+ struct drm_gem_object **bo, uint64_t *iova, bool locked)
+{
+ void *vaddr;
+ struct drm_gem_object *obj = _msm_gem_new(dev, size, flags, locked);
+ int ret;
+
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);
+
+ if (iova) {
+ ret = msm_gem_get_iova(obj, aspace, iova);
+ if (ret) {
+ drm_gem_object_unreference(obj);
+ return ERR_PTR(ret);
+ }
+ }
+
+ vaddr = msm_gem_get_vaddr(obj);
+ if (!vaddr) {
+ msm_gem_put_iova(obj, aspace);
+ drm_gem_object_unreference(obj);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ if (bo)
+ *bo = obj;
+
+ return vaddr;
+}
+
+void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
+ uint32_t flags, struct msm_gem_address_space *aspace,
+ struct drm_gem_object **bo, uint64_t *iova)
+{
+ return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, false);
+}
+
+void *msm_gem_kernel_new_locked(struct drm_device *dev, uint32_t size,
+ uint32_t flags, struct msm_gem_address_space *aspace,
+ struct drm_gem_object **bo, uint64_t *iova)
+{
+ return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, true);
+}
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index 9f3dbc236ab3..ffbff27600e0 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -562,11 +562,49 @@ static int get_clocks(struct platform_device *pdev, struct msm_gpu *gpu)
return 0;
}
+static struct msm_gem_address_space *
+msm_gpu_create_address_space(struct msm_gpu *gpu, struct platform_device *pdev,
+ uint64_t va_start, uint64_t va_end)
+{
+ struct iommu_domain *iommu;
+ struct msm_gem_address_space *aspace;
+ int ret;
+
+ /*
+ * Setup IOMMU.. eventually we will (I think) do this once per context
+ * and have separate page tables per context. For now, to keep things
+ * simple and to get something working, just use a single address space:
+ */
+ iommu = iommu_domain_alloc(&platform_bus_type);
+ if (!iommu)
+ return NULL;
+
+ iommu->geometry.aperture_start = va_start;
+ iommu->geometry.aperture_end = va_end;
+
+ dev_info(gpu->dev->dev, "%s: using IOMMU\n", gpu->name);
+
+ aspace = msm_gem_address_space_create(&pdev->dev, iommu, "gpu");
+ if (IS_ERR(aspace)) {
+ dev_err(gpu->dev->dev, "failed to init iommu: %ld\n",
+ PTR_ERR(aspace));
+ iommu_domain_free(iommu);
+ return ERR_CAST(aspace);
+ }
+
+ ret = aspace->mmu->funcs->attach(aspace->mmu, NULL, 0);
+ if (ret) {
+ msm_gem_address_space_put(aspace);
+ return ERR_PTR(ret);
+ }
+
+ return aspace;
+}
+
int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs,
const char *name, struct msm_gpu_config *config)
{
- struct iommu_domain *iommu;
int ret;
if (WARN_ON(gpu->num_perfcntrs > ARRAY_SIZE(gpu->last_cntrs)))
@@ -636,28 +674,19 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
if (IS_ERR(gpu->gpu_cx))
gpu->gpu_cx = NULL;
- /* Setup IOMMU.. eventually we will (I think) do this once per context
- * and have separate page tables per context. For now, to keep things
- * simple and to get something working, just use a single address space:
- */
- iommu = iommu_domain_alloc(&platform_bus_type);
- if (iommu) {
- iommu->geometry.aperture_start = config->va_start;
- iommu->geometry.aperture_end = config->va_end;
-
- dev_info(drm->dev, "%s: using IOMMU\n", name);
- gpu->aspace = msm_gem_address_space_create(&pdev->dev,
- iommu, "gpu");
- if (IS_ERR(gpu->aspace)) {
- ret = PTR_ERR(gpu->aspace);
- dev_err(drm->dev, "failed to init iommu: %d\n", ret);
- gpu->aspace = NULL;
- iommu_domain_free(iommu);
- goto fail;
- }
+ gpu->pdev = pdev;
+ platform_set_drvdata(pdev, gpu);
+
+ bs_init(gpu);
- } else {
+ gpu->aspace = msm_gpu_create_address_space(gpu, pdev,
+ config->va_start, config->va_end);
+
+ if (gpu->aspace == NULL)
dev_info(drm->dev, "%s: no IOMMU, fallback to VRAM carveout!\n", name);
+ else if (IS_ERR(gpu->aspace)) {
+ ret = PTR_ERR(gpu->aspace);
+ goto fail;
}
/* Create ringbuffer: */
@@ -669,14 +698,10 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
goto fail;
}
- gpu->pdev = pdev;
- platform_set_drvdata(pdev, gpu);
-
- bs_init(gpu);
-
return 0;
fail:
+ platform_set_drvdata(pdev, NULL);
return ret;
}
@@ -693,7 +718,9 @@ void msm_gpu_cleanup(struct msm_gpu *gpu)
msm_gem_put_iova(gpu->rb->bo, gpu->aspace);
msm_ringbuffer_destroy(gpu->rb);
}
-
- if (gpu->fctx)
- msm_fence_context_free(gpu->fctx);
+ if (gpu->aspace) {
+ gpu->aspace->mmu->funcs->detach(gpu->aspace->mmu,
+ NULL, 0);
+ msm_gem_address_space_put(gpu->aspace);
+ }
}
diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h
index a8f2ba5e5f07..17d5824417ad 100644
--- a/drivers/gpu/drm/msm/msm_kms.h
+++ b/drivers/gpu/drm/msm/msm_kms.h
@@ -99,5 +99,7 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev);
struct msm_kms *mdp5_kms_init(struct drm_device *dev);
int msm_mdss_init(struct drm_device *dev);
void msm_mdss_destroy(struct drm_device *dev);
+int msm_mdss_enable(struct msm_mdss *mdss);
+int msm_mdss_disable(struct msm_mdss *mdss);
#endif /* __MSM_KMS_H__ */
diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.c b/drivers/gpu/drm/msm/msm_ringbuffer.c
index 791bca3c6a9c..bf065a540130 100644
--- a/drivers/gpu/drm/msm/msm_ringbuffer.c
+++ b/drivers/gpu/drm/msm/msm_ringbuffer.c
@@ -33,16 +33,14 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int size)
}
ring->gpu = gpu;
- ring->bo = msm_gem_new(gpu->dev, size, MSM_BO_WC);
- if (IS_ERR(ring->bo)) {
- ret = PTR_ERR(ring->bo);
- ring->bo = NULL;
- goto fail;
- }
- ring->start = msm_gem_get_vaddr(ring->bo);
+ /* Pass NULL for the iova pointer - we will map it later */
+ ring->start = msm_gem_kernel_new(gpu->dev, size, MSM_BO_WC,
+ gpu->aspace, &ring->bo, NULL);
+
if (IS_ERR(ring->start)) {
ret = PTR_ERR(ring->start);
+ ring->start = 0;
goto fail;
}
ring->end = ring->start + (size / 4);
diff --git a/drivers/gpu/drm/omapdrm/dss/dss.h b/drivers/gpu/drm/omapdrm/dss/dss.h
index 085486024089..ed465572491e 100644
--- a/drivers/gpu/drm/omapdrm/dss/dss.h
+++ b/drivers/gpu/drm/omapdrm/dss/dss.h
@@ -185,6 +185,9 @@ struct dss_pll_hw {
bool has_freqsel;
bool has_selfreqdco;
bool has_refsel;
+
+ /* DRA7 errata i886: use high N & M to avoid jitter */
+ bool errata_i886;
};
struct dss_pll {
diff --git a/drivers/gpu/drm/omapdrm/dss/pll.c b/drivers/gpu/drm/omapdrm/dss/pll.c
index 5e221302768b..9d9d9d42009b 100644
--- a/drivers/gpu/drm/omapdrm/dss/pll.c
+++ b/drivers/gpu/drm/omapdrm/dss/pll.c
@@ -215,8 +215,8 @@ bool dss_pll_calc_a(const struct dss_pll *pll, unsigned long clkin,
dss_pll_calc_func func, void *data)
{
const struct dss_pll_hw *hw = pll->hw;
- int n, n_min, n_max;
- int m, m_min, m_max;
+ int n, n_start, n_stop, n_inc;
+ int m, m_start, m_stop, m_inc;
unsigned long fint, clkdco;
unsigned long pll_hw_max;
unsigned long fint_hw_min, fint_hw_max;
@@ -226,22 +226,33 @@ bool dss_pll_calc_a(const struct dss_pll *pll, unsigned long clkin,
fint_hw_min = hw->fint_min;
fint_hw_max = hw->fint_max;
- n_min = max(DIV_ROUND_UP(clkin, fint_hw_max), 1ul);
- n_max = min((unsigned)(clkin / fint_hw_min), hw->n_max);
+ n_start = max(DIV_ROUND_UP(clkin, fint_hw_max), 1ul);
+ n_stop = min((unsigned)(clkin / fint_hw_min), hw->n_max);
+ n_inc = 1;
+
+ if (hw->errata_i886) {
+ swap(n_start, n_stop);
+ n_inc = -1;
+ }
pll_max = pll_max ? pll_max : ULONG_MAX;
- /* Try to find high N & M to avoid jitter (DRA7 errata i886) */
- for (n = n_max; n >= n_min; --n) {
+ for (n = n_start; n != n_stop; n += n_inc) {
fint = clkin / n;
- m_min = max(DIV_ROUND_UP(DIV_ROUND_UP(pll_min, fint), 2),
+ m_start = max(DIV_ROUND_UP(DIV_ROUND_UP(pll_min, fint), 2),
1ul);
- m_max = min3((unsigned)(pll_max / fint / 2),
+ m_stop = min3((unsigned)(pll_max / fint / 2),
(unsigned)(pll_hw_max / fint / 2),
hw->m_max);
+ m_inc = 1;
+
+ if (hw->errata_i886) {
+ swap(m_start, m_stop);
+ m_inc = -1;
+ }
- for (m = m_max; m >= m_min; --m) {
+ for (m = m_start; m != m_stop; m += m_inc) {
clkdco = 2 * m * fint;
if (func(n, m, fint, clkdco, data))
diff --git a/drivers/gpu/drm/omapdrm/dss/venc.c b/drivers/gpu/drm/omapdrm/dss/venc.c
index 5bd7788357b2..d58da6f32693 100644
--- a/drivers/gpu/drm/omapdrm/dss/venc.c
+++ b/drivers/gpu/drm/omapdrm/dss/venc.c
@@ -263,6 +263,12 @@ static const struct venc_config venc_config_pal_bdghi = {
.fid_ext_start_y__fid_ext_offset_y = 0x01380005,
};
+enum venc_videomode {
+ VENC_MODE_UNKNOWN,
+ VENC_MODE_PAL,
+ VENC_MODE_NTSC,
+};
+
static const struct videomode omap_dss_pal_vm = {
.hactive = 720,
.vactive = 574,
@@ -297,6 +303,24 @@ static const struct videomode omap_dss_ntsc_vm = {
DISPLAY_FLAGS_SYNC_NEGEDGE,
};
+static enum venc_videomode venc_get_videomode(const struct videomode *vm)
+{
+ if (!(vm->flags & DISPLAY_FLAGS_INTERLACED))
+ return VENC_MODE_UNKNOWN;
+
+ if (vm->pixelclock == omap_dss_pal_vm.pixelclock &&
+ vm->hactive == omap_dss_pal_vm.hactive &&
+ vm->vactive == omap_dss_pal_vm.vactive)
+ return VENC_MODE_PAL;
+
+ if (vm->pixelclock == omap_dss_ntsc_vm.pixelclock &&
+ vm->hactive == omap_dss_ntsc_vm.hactive &&
+ vm->vactive == omap_dss_ntsc_vm.vactive)
+ return VENC_MODE_NTSC;
+
+ return VENC_MODE_UNKNOWN;
+}
+
static struct {
struct platform_device *pdev;
void __iomem *base;
@@ -423,14 +447,14 @@ static void venc_runtime_put(void)
static const struct venc_config *venc_timings_to_config(struct videomode *vm)
{
- if (memcmp(&omap_dss_pal_vm, vm, sizeof(*vm)) == 0)
+ switch (venc_get_videomode(vm)) {
+ default:
+ WARN_ON_ONCE(1);
+ case VENC_MODE_PAL:
return &venc_config_pal_trm;
-
- if (memcmp(&omap_dss_ntsc_vm, vm, sizeof(*vm)) == 0)
+ case VENC_MODE_NTSC:
return &venc_config_ntsc_trm;
-
- BUG();
- return NULL;
+ }
}
static int venc_power_on(struct omap_dss_device *dssdev)
@@ -541,15 +565,28 @@ static void venc_display_disable(struct omap_dss_device *dssdev)
static void venc_set_timings(struct omap_dss_device *dssdev,
struct videomode *vm)
{
+ struct videomode actual_vm;
+
DSSDBG("venc_set_timings\n");
mutex_lock(&venc.venc_lock);
+ switch (venc_get_videomode(vm)) {
+ default:
+ WARN_ON_ONCE(1);
+ case VENC_MODE_PAL:
+ actual_vm = omap_dss_pal_vm;
+ break;
+ case VENC_MODE_NTSC:
+ actual_vm = omap_dss_ntsc_vm;
+ break;
+ }
+
/* Reset WSS data when the TV standard changes. */
- if (memcmp(&venc.vm, vm, sizeof(*vm)))
+ if (memcmp(&venc.vm, &actual_vm, sizeof(actual_vm)))
venc.wss_data = 0;
- venc.vm = *vm;
+ venc.vm = actual_vm;
dispc_set_tv_pclk(13500000);
@@ -561,13 +598,13 @@ static int venc_check_timings(struct omap_dss_device *dssdev,
{
DSSDBG("venc_check_timings\n");
- if (memcmp(&omap_dss_pal_vm, vm, sizeof(*vm)) == 0)
+ switch (venc_get_videomode(vm)) {
+ case VENC_MODE_PAL:
+ case VENC_MODE_NTSC:
return 0;
-
- if (memcmp(&omap_dss_ntsc_vm, vm, sizeof(*vm)) == 0)
- return 0;
-
- return -EINVAL;
+ default:
+ return -EINVAL;
+ }
}
static void venc_get_timings(struct omap_dss_device *dssdev,
diff --git a/drivers/gpu/drm/omapdrm/dss/video-pll.c b/drivers/gpu/drm/omapdrm/dss/video-pll.c
index f7ea02a88b1a..38a239cc5e04 100644
--- a/drivers/gpu/drm/omapdrm/dss/video-pll.c
+++ b/drivers/gpu/drm/omapdrm/dss/video-pll.c
@@ -130,6 +130,8 @@ static const struct dss_pll_hw dss_dra7_video_pll_hw = {
.mX_lsb[3] = 5,
.has_refsel = true,
+
+ .errata_i886 = true,
};
struct dss_pll *dss_video_pll_init(struct platform_device *pdev, int id,
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c
index 9b3c36b48356..cdf5b0601eba 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.c
+++ b/drivers/gpu/drm/omapdrm/omap_drv.c
@@ -84,23 +84,36 @@ static void omap_atomic_commit_tail(struct drm_atomic_state *old_state)
/* Apply the atomic update. */
drm_atomic_helper_commit_modeset_disables(dev, old_state);
- /* With the current dss dispc implementation we have to enable
- * the new modeset before we can commit planes. The dispc ovl
- * configuration relies on the video mode configuration been
- * written into the HW when the ovl configuration is
- * calculated.
- *
- * This approach is not ideal because after a mode change the
- * plane update is executed only after the first vblank
- * interrupt. The dispc implementation should be fixed so that
- * it is able use uncommitted drm state information.
- */
- drm_atomic_helper_commit_modeset_enables(dev, old_state);
- omap_atomic_wait_for_completion(dev, old_state);
-
- drm_atomic_helper_commit_planes(dev, old_state, 0);
-
- drm_atomic_helper_commit_hw_done(old_state);
+ if (priv->omaprev != 0x3430) {
+ /* With the current dss dispc implementation we have to enable
+ * the new modeset before we can commit planes. The dispc ovl
+ * configuration relies on the video mode configuration been
+ * written into the HW when the ovl configuration is
+ * calculated.
+ *
+ * This approach is not ideal because after a mode change the
+ * plane update is executed only after the first vblank
+ * interrupt. The dispc implementation should be fixed so that
+ * it is able use uncommitted drm state information.
+ */
+ drm_atomic_helper_commit_modeset_enables(dev, old_state);
+ omap_atomic_wait_for_completion(dev, old_state);
+
+ drm_atomic_helper_commit_planes(dev, old_state, 0);
+
+ drm_atomic_helper_commit_hw_done(old_state);
+ } else {
+ /*
+ * OMAP3 DSS seems to have issues with the work-around above,
+ * resulting in endless sync losts if a crtc is enabled without
+ * a plane. For now, skip the WA for OMAP3.
+ */
+ drm_atomic_helper_commit_planes(dev, old_state, 0);
+
+ drm_atomic_helper_commit_modeset_enables(dev, old_state);
+
+ drm_atomic_helper_commit_hw_done(old_state);
+ }
/*
* Wait for completion of the page flips to ensure that old buffers
diff --git a/drivers/gpu/drm/sun4i/sun4i_backend.c b/drivers/gpu/drm/sun4i/sun4i_backend.c
index cf480218daa5..ec5943627aa5 100644
--- a/drivers/gpu/drm/sun4i/sun4i_backend.c
+++ b/drivers/gpu/drm/sun4i/sun4i_backend.c
@@ -312,7 +312,7 @@ static int sun4i_backend_of_get_id(struct device_node *node)
struct device_node *remote;
u32 reg;
- remote = of_parse_phandle(ep, "remote-endpoint", 0);
+ remote = of_graph_get_remote_endpoint(ep);
if (!remote)
continue;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
index 86178796de6c..c706ad30411b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
@@ -51,6 +51,7 @@ struct vmw_cmdbuf_context {
struct list_head hw_submitted;
struct list_head preempted;
unsigned num_hw_submitted;
+ bool block_submission;
};
/**
@@ -60,6 +61,9 @@ struct vmw_cmdbuf_context {
* kernel command submissions, @cur.
* @space_mutex: Mutex to protect against starvation when we allocate
* main pool buffer space.
+ * @error_mutex: Mutex to serialize the work queue error handling.
+ * Note this is not needed if the same workqueue handler
+ * can't race with itself...
* @work: A struct work_struct implementeing command buffer error handling.
* Immutable.
* @dev_priv: Pointer to the device private struct. Immutable.
@@ -85,7 +89,6 @@ struct vmw_cmdbuf_context {
* Internal protection.
* @dheaders: Pool of DMA memory for device command buffer headers with trailing
* space for inline data. Internal protection.
- * @tasklet: Tasklet struct for irq processing. Immutable.
* @alloc_queue: Wait queue for processes waiting to allocate command buffer
* space.
* @idle_queue: Wait queue for processes waiting for command buffer idle.
@@ -102,6 +105,7 @@ struct vmw_cmdbuf_context {
struct vmw_cmdbuf_man {
struct mutex cur_mutex;
struct mutex space_mutex;
+ struct mutex error_mutex;
struct work_struct work;
struct vmw_private *dev_priv;
struct vmw_cmdbuf_context ctx[SVGA_CB_CONTEXT_MAX];
@@ -117,7 +121,6 @@ struct vmw_cmdbuf_man {
spinlock_t lock;
struct dma_pool *headers;
struct dma_pool *dheaders;
- struct tasklet_struct tasklet;
wait_queue_head_t alloc_queue;
wait_queue_head_t idle_queue;
bool irq_on;
@@ -181,12 +184,13 @@ struct vmw_cmdbuf_alloc_info {
};
/* Loop over each context in the command buffer manager. */
-#define for_each_cmdbuf_ctx(_man, _i, _ctx) \
+#define for_each_cmdbuf_ctx(_man, _i, _ctx) \
for (_i = 0, _ctx = &(_man)->ctx[0]; (_i) < SVGA_CB_CONTEXT_MAX; \
++(_i), ++(_ctx))
-static int vmw_cmdbuf_startstop(struct vmw_cmdbuf_man *man, bool enable);
-
+static int vmw_cmdbuf_startstop(struct vmw_cmdbuf_man *man, u32 context,
+ bool enable);
+static int vmw_cmdbuf_preempt(struct vmw_cmdbuf_man *man, u32 context);
/**
* vmw_cmdbuf_cur_lock - Helper to lock the cur_mutex.
@@ -278,9 +282,9 @@ void vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header)
vmw_cmdbuf_header_inline_free(header);
return;
}
- spin_lock_bh(&man->lock);
+ spin_lock(&man->lock);
__vmw_cmdbuf_header_free(header);
- spin_unlock_bh(&man->lock);
+ spin_unlock(&man->lock);
}
@@ -331,7 +335,8 @@ static void vmw_cmdbuf_ctx_submit(struct vmw_cmdbuf_man *man,
struct vmw_cmdbuf_context *ctx)
{
while (ctx->num_hw_submitted < man->max_hw_submitted &&
- !list_empty(&ctx->submitted)) {
+ !list_empty(&ctx->submitted) &&
+ !ctx->block_submission) {
struct vmw_cmdbuf_header *entry;
SVGACBStatus status;
@@ -386,12 +391,17 @@ static void vmw_cmdbuf_ctx_process(struct vmw_cmdbuf_man *man,
__vmw_cmdbuf_header_free(entry);
break;
case SVGA_CB_STATUS_COMMAND_ERROR:
- case SVGA_CB_STATUS_CB_HEADER_ERROR:
+ entry->cb_header->status = SVGA_CB_STATUS_NONE;
list_add_tail(&entry->list, &man->error);
schedule_work(&man->work);
break;
case SVGA_CB_STATUS_PREEMPTED:
- list_add(&entry->list, &ctx->preempted);
+ entry->cb_header->status = SVGA_CB_STATUS_NONE;
+ list_add_tail(&entry->list, &ctx->preempted);
+ break;
+ case SVGA_CB_STATUS_CB_HEADER_ERROR:
+ WARN_ONCE(true, "Command buffer header error.\n");
+ __vmw_cmdbuf_header_free(entry);
break;
default:
WARN_ONCE(true, "Undefined command buffer status.\n");
@@ -468,20 +478,17 @@ static void vmw_cmdbuf_ctx_add(struct vmw_cmdbuf_man *man,
}
/**
- * vmw_cmdbuf_man_tasklet - The main part of the command buffer interrupt
- * handler implemented as a tasklet.
+ * vmw_cmdbuf_irqthread - The main part of the command buffer interrupt
+ * handler implemented as a threaded irq task.
*
- * @data: Tasklet closure. A pointer to the command buffer manager cast to
- * an unsigned long.
+ * @man: Pointer to the command buffer manager.
*
- * The bottom half (tasklet) of the interrupt handler simply calls into the
+ * The bottom half of the interrupt handler simply calls into the
* command buffer processor to free finished buffers and submit any
* queued buffers to hardware.
*/
-static void vmw_cmdbuf_man_tasklet(unsigned long data)
+void vmw_cmdbuf_irqthread(struct vmw_cmdbuf_man *man)
{
- struct vmw_cmdbuf_man *man = (struct vmw_cmdbuf_man *) data;
-
spin_lock(&man->lock);
vmw_cmdbuf_man_process(man);
spin_unlock(&man->lock);
@@ -502,24 +509,112 @@ static void vmw_cmdbuf_work_func(struct work_struct *work)
container_of(work, struct vmw_cmdbuf_man, work);
struct vmw_cmdbuf_header *entry, *next;
uint32_t dummy;
- bool restart = false;
+ bool restart[SVGA_CB_CONTEXT_MAX];
+ bool send_fence = false;
+ struct list_head restart_head[SVGA_CB_CONTEXT_MAX];
+ int i;
+ struct vmw_cmdbuf_context *ctx;
- spin_lock_bh(&man->lock);
+ for_each_cmdbuf_ctx(man, i, ctx) {
+ INIT_LIST_HEAD(&restart_head[i]);
+ restart[i] = false;
+ }
+
+ mutex_lock(&man->error_mutex);
+ spin_lock(&man->lock);
list_for_each_entry_safe(entry, next, &man->error, list) {
- restart = true;
- DRM_ERROR("Command buffer error.\n");
+ SVGACBHeader *cb_hdr = entry->cb_header;
+ SVGA3dCmdHeader *header = (SVGA3dCmdHeader *)
+ (entry->cmd + cb_hdr->errorOffset);
+ u32 error_cmd_size, new_start_offset;
+ const char *cmd_name;
+
+ list_del_init(&entry->list);
+ restart[entry->cb_context] = true;
+
+ if (!vmw_cmd_describe(header, &error_cmd_size, &cmd_name)) {
+ DRM_ERROR("Unknown command causing device error.\n");
+ DRM_ERROR("Command buffer offset is %lu\n",
+ (unsigned long) cb_hdr->errorOffset);
+ __vmw_cmdbuf_header_free(entry);
+ send_fence = true;
+ continue;
+ }
- list_del(&entry->list);
- __vmw_cmdbuf_header_free(entry);
- wake_up_all(&man->idle_queue);
+ DRM_ERROR("Command \"%s\" causing device error.\n", cmd_name);
+ DRM_ERROR("Command buffer offset is %lu\n",
+ (unsigned long) cb_hdr->errorOffset);
+ DRM_ERROR("Command size is %lu\n",
+ (unsigned long) error_cmd_size);
+
+ new_start_offset = cb_hdr->errorOffset + error_cmd_size;
+
+ if (new_start_offset >= cb_hdr->length) {
+ __vmw_cmdbuf_header_free(entry);
+ send_fence = true;
+ continue;
+ }
+
+ if (man->using_mob)
+ cb_hdr->ptr.mob.mobOffset += new_start_offset;
+ else
+ cb_hdr->ptr.pa += (u64) new_start_offset;
+
+ entry->cmd += new_start_offset;
+ cb_hdr->length -= new_start_offset;
+ cb_hdr->errorOffset = 0;
+ cb_hdr->offset = 0;
+ list_add_tail(&entry->list, &restart_head[entry->cb_context]);
+ man->ctx[entry->cb_context].block_submission = true;
+ }
+ spin_unlock(&man->lock);
+
+ /* Preempt all contexts with errors */
+ for_each_cmdbuf_ctx(man, i, ctx) {
+ if (ctx->block_submission && vmw_cmdbuf_preempt(man, i))
+ DRM_ERROR("Failed preempting command buffer "
+ "context %u.\n", i);
+ }
+
+ spin_lock(&man->lock);
+ for_each_cmdbuf_ctx(man, i, ctx) {
+ if (!ctx->block_submission)
+ continue;
+
+ /* Move preempted command buffers to the preempted queue. */
+ vmw_cmdbuf_ctx_process(man, ctx, &dummy);
+
+ /*
+ * Add the preempted queue after the command buffer
+ * that caused an error.
+ */
+ list_splice_init(&ctx->preempted, restart_head[i].prev);
+
+ /*
+ * Finally add all command buffers first in the submitted
+ * queue, to rerun them.
+ */
+ list_splice_init(&restart_head[i], &ctx->submitted);
+
+ ctx->block_submission = false;
}
- spin_unlock_bh(&man->lock);
- if (restart && vmw_cmdbuf_startstop(man, true))
- DRM_ERROR("Failed restarting command buffer context 0.\n");
+ vmw_cmdbuf_man_process(man);
+ spin_unlock(&man->lock);
+
+ for_each_cmdbuf_ctx(man, i, ctx) {
+ if (restart[i] && vmw_cmdbuf_startstop(man, i, true))
+ DRM_ERROR("Failed restarting command buffer "
+ "context %u.\n", i);
+ }
/* Send a new fence in case one was removed */
- vmw_fifo_send_fence(man->dev_priv, &dummy);
+ if (send_fence) {
+ vmw_fifo_send_fence(man->dev_priv, &dummy);
+ wake_up_all(&man->idle_queue);
+ }
+
+ mutex_unlock(&man->error_mutex);
}
/**
@@ -536,7 +631,7 @@ static bool vmw_cmdbuf_man_idle(struct vmw_cmdbuf_man *man,
bool idle = false;
int i;
- spin_lock_bh(&man->lock);
+ spin_lock(&man->lock);
vmw_cmdbuf_man_process(man);
for_each_cmdbuf_ctx(man, i, ctx) {
if (!list_empty(&ctx->submitted) ||
@@ -548,7 +643,7 @@ static bool vmw_cmdbuf_man_idle(struct vmw_cmdbuf_man *man,
idle = list_empty(&man->error);
out_unlock:
- spin_unlock_bh(&man->lock);
+ spin_unlock(&man->lock);
return idle;
}
@@ -571,7 +666,7 @@ static void __vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man)
if (!cur)
return;
- spin_lock_bh(&man->lock);
+ spin_lock(&man->lock);
if (man->cur_pos == 0) {
__vmw_cmdbuf_header_free(cur);
goto out_unlock;
@@ -580,7 +675,7 @@ static void __vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man)
man->cur->cb_header->length = man->cur_pos;
vmw_cmdbuf_ctx_add(man, man->cur, SVGA_CB_CONTEXT_0);
out_unlock:
- spin_unlock_bh(&man->lock);
+ spin_unlock(&man->lock);
man->cur = NULL;
man->cur_pos = 0;
}
@@ -673,14 +768,14 @@ static bool vmw_cmdbuf_try_alloc(struct vmw_cmdbuf_man *man,
return true;
memset(info->node, 0, sizeof(*info->node));
- spin_lock_bh(&man->lock);
+ spin_lock(&man->lock);
ret = drm_mm_insert_node(&man->mm, info->node, info->page_size);
if (ret) {
vmw_cmdbuf_man_process(man);
ret = drm_mm_insert_node(&man->mm, info->node, info->page_size);
}
- spin_unlock_bh(&man->lock);
+ spin_unlock(&man->lock);
info->done = !ret;
return info->done;
@@ -801,9 +896,9 @@ static int vmw_cmdbuf_space_pool(struct vmw_cmdbuf_man *man,
return 0;
out_no_cb_header:
- spin_lock_bh(&man->lock);
+ spin_lock(&man->lock);
drm_mm_remove_node(&header->node);
- spin_unlock_bh(&man->lock);
+ spin_unlock(&man->lock);
return ret;
}
@@ -1023,18 +1118,6 @@ void vmw_cmdbuf_commit(struct vmw_cmdbuf_man *man, size_t size,
vmw_cmdbuf_cur_unlock(man);
}
-/**
- * vmw_cmdbuf_tasklet_schedule - Schedule the interrupt handler bottom half.
- *
- * @man: The command buffer manager.
- */
-void vmw_cmdbuf_tasklet_schedule(struct vmw_cmdbuf_man *man)
-{
- if (!man)
- return;
-
- tasklet_schedule(&man->tasklet);
-}
/**
* vmw_cmdbuf_send_device_command - Send a command through the device context.
@@ -1059,9 +1142,9 @@ static int vmw_cmdbuf_send_device_command(struct vmw_cmdbuf_man *man,
memcpy(cmd, command, size);
header->cb_header->length = size;
header->cb_context = SVGA_CB_CONTEXT_DEVICE;
- spin_lock_bh(&man->lock);
+ spin_lock(&man->lock);
status = vmw_cmdbuf_header_submit(header);
- spin_unlock_bh(&man->lock);
+ spin_unlock(&man->lock);
vmw_cmdbuf_header_free(header);
if (status != SVGA_CB_STATUS_COMPLETED) {
@@ -1074,6 +1157,29 @@ static int vmw_cmdbuf_send_device_command(struct vmw_cmdbuf_man *man,
}
/**
+ * vmw_cmdbuf_preempt - Send a preempt command through the device
+ * context.
+ *
+ * @man: The command buffer manager.
+ *
+ * Synchronously sends a preempt command.
+ */
+static int vmw_cmdbuf_preempt(struct vmw_cmdbuf_man *man, u32 context)
+{
+ struct {
+ uint32 id;
+ SVGADCCmdPreempt body;
+ } __packed cmd;
+
+ cmd.id = SVGA_DC_CMD_PREEMPT;
+ cmd.body.context = SVGA_CB_CONTEXT_0 + context;
+ cmd.body.ignoreIDZero = 0;
+
+ return vmw_cmdbuf_send_device_command(man, &cmd, sizeof(cmd));
+}
+
+
+/**
* vmw_cmdbuf_startstop - Send a start / stop command through the device
* context.
*
@@ -1082,7 +1188,7 @@ static int vmw_cmdbuf_send_device_command(struct vmw_cmdbuf_man *man,
*
* Synchronously sends a device start / stop context command.
*/
-static int vmw_cmdbuf_startstop(struct vmw_cmdbuf_man *man,
+static int vmw_cmdbuf_startstop(struct vmw_cmdbuf_man *man, u32 context,
bool enable)
{
struct {
@@ -1092,7 +1198,7 @@ static int vmw_cmdbuf_startstop(struct vmw_cmdbuf_man *man,
cmd.id = SVGA_DC_CMD_START_STOP_CONTEXT;
cmd.body.enable = (enable) ? 1 : 0;
- cmd.body.context = SVGA_CB_CONTEXT_0;
+ cmd.body.context = SVGA_CB_CONTEXT_0 + context;
return vmw_cmdbuf_send_device_command(man, &cmd, sizeof(cmd));
}
@@ -1191,7 +1297,7 @@ struct vmw_cmdbuf_man *vmw_cmdbuf_man_create(struct vmw_private *dev_priv)
{
struct vmw_cmdbuf_man *man;
struct vmw_cmdbuf_context *ctx;
- int i;
+ unsigned int i;
int ret;
if (!(dev_priv->capabilities & SVGA_CAP_COMMAND_BUFFERS))
@@ -1226,8 +1332,7 @@ struct vmw_cmdbuf_man *vmw_cmdbuf_man_create(struct vmw_private *dev_priv)
spin_lock_init(&man->lock);
mutex_init(&man->cur_mutex);
mutex_init(&man->space_mutex);
- tasklet_init(&man->tasklet, vmw_cmdbuf_man_tasklet,
- (unsigned long) man);
+ mutex_init(&man->error_mutex);
man->default_size = VMW_CMDBUF_INLINE_SIZE;
init_waitqueue_head(&man->alloc_queue);
init_waitqueue_head(&man->idle_queue);
@@ -1236,11 +1341,14 @@ struct vmw_cmdbuf_man *vmw_cmdbuf_man_create(struct vmw_private *dev_priv)
INIT_WORK(&man->work, &vmw_cmdbuf_work_func);
vmw_generic_waiter_add(dev_priv, SVGA_IRQFLAG_ERROR,
&dev_priv->error_waiters);
- ret = vmw_cmdbuf_startstop(man, true);
- if (ret) {
- DRM_ERROR("Failed starting command buffer context 0.\n");
- vmw_cmdbuf_man_destroy(man);
- return ERR_PTR(ret);
+ for_each_cmdbuf_ctx(man, i, ctx) {
+ ret = vmw_cmdbuf_startstop(man, i, true);
+ if (ret) {
+ DRM_ERROR("Failed starting command buffer "
+ "context %u.\n", i);
+ vmw_cmdbuf_man_destroy(man);
+ return ERR_PTR(ret);
+ }
}
return man;
@@ -1290,18 +1398,24 @@ void vmw_cmdbuf_remove_pool(struct vmw_cmdbuf_man *man)
*/
void vmw_cmdbuf_man_destroy(struct vmw_cmdbuf_man *man)
{
+ struct vmw_cmdbuf_context *ctx;
+ unsigned int i;
+
WARN_ON_ONCE(man->has_pool);
(void) vmw_cmdbuf_idle(man, false, 10*HZ);
- if (vmw_cmdbuf_startstop(man, false))
- DRM_ERROR("Failed stopping command buffer context 0.\n");
+
+ for_each_cmdbuf_ctx(man, i, ctx)
+ if (vmw_cmdbuf_startstop(man, i, false))
+ DRM_ERROR("Failed stopping command buffer "
+ "context %u.\n", i);
vmw_generic_waiter_remove(man->dev_priv, SVGA_IRQFLAG_ERROR,
&man->dev_priv->error_waiters);
- tasklet_kill(&man->tasklet);
(void) cancel_work_sync(&man->work);
dma_pool_destroy(man->dheaders);
dma_pool_destroy(man->headers);
mutex_destroy(&man->cur_mutex);
mutex_destroy(&man->space_mutex);
+ mutex_destroy(&man->error_mutex);
kfree(man);
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 8be26509a9aa..e84fee3ec4f3 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -36,7 +36,6 @@
#include <drm/ttm/ttm_module.h>
#include <linux/dma_remapping.h>
-#define VMWGFX_DRIVER_NAME "vmwgfx"
#define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices"
#define VMWGFX_CHIP_SVGAII 0
#define VMW_FB_RESERVATION 0
@@ -825,7 +824,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
}
if (dev_priv->capabilities & SVGA_CAP_IRQMASK) {
- ret = drm_irq_install(dev, dev->pdev->irq);
+ ret = vmw_irq_install(dev, dev->pdev->irq);
if (ret != 0) {
DRM_ERROR("Failed installing irq: %d\n", ret);
goto out_no_irq;
@@ -937,7 +936,7 @@ out_no_bdev:
vmw_fence_manager_takedown(dev_priv->fman);
out_no_fman:
if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
- drm_irq_uninstall(dev_priv->dev);
+ vmw_irq_uninstall(dev_priv->dev);
out_no_irq:
if (dev_priv->stealth)
pci_release_region(dev->pdev, 2);
@@ -990,7 +989,7 @@ static void vmw_driver_unload(struct drm_device *dev)
vmw_release_device_late(dev_priv);
vmw_fence_manager_takedown(dev_priv->fman);
if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
- drm_irq_uninstall(dev_priv->dev);
+ vmw_irq_uninstall(dev_priv->dev);
if (dev_priv->stealth)
pci_release_region(dev->pdev, 2);
else
@@ -1516,10 +1515,6 @@ static struct drm_driver driver = {
.load = vmw_driver_load,
.unload = vmw_driver_unload,
.lastclose = vmw_lastclose,
- .irq_preinstall = vmw_irq_preinstall,
- .irq_postinstall = vmw_irq_postinstall,
- .irq_uninstall = vmw_irq_uninstall,
- .irq_handler = vmw_irq_handler,
.get_vblank_counter = vmw_get_vblank_counter,
.enable_vblank = vmw_enable_vblank,
.disable_vblank = vmw_disable_vblank,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 4b948fba9eec..7e5f30e234b1 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -40,10 +40,12 @@
#include <drm/ttm/ttm_execbuf_util.h>
#include <drm/ttm/ttm_module.h>
#include "vmwgfx_fence.h"
+#include <linux/sync_file.h>
-#define VMWGFX_DRIVER_DATE "20170607"
+#define VMWGFX_DRIVER_NAME "vmwgfx"
+#define VMWGFX_DRIVER_DATE "20170612"
#define VMWGFX_DRIVER_MAJOR 2
-#define VMWGFX_DRIVER_MINOR 13
+#define VMWGFX_DRIVER_MINOR 14
#define VMWGFX_DRIVER_PATCHLEVEL 0
#define VMWGFX_FILE_PAGE_OFFSET 0x00100000
#define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
@@ -351,6 +353,12 @@ struct vmw_otable_batch {
struct ttm_buffer_object *otable_bo;
};
+enum {
+ VMW_IRQTHREAD_FENCE,
+ VMW_IRQTHREAD_CMDBUF,
+ VMW_IRQTHREAD_MAX
+};
+
struct vmw_private {
struct ttm_bo_device bdev;
struct ttm_bo_global_ref bo_global_ref;
@@ -529,6 +537,7 @@ struct vmw_private {
struct vmw_otable_batch otable_batch;
struct vmw_cmdbuf_man *cman;
+ DECLARE_BITMAP(irqthread_pending, VMW_IRQTHREAD_MAX);
};
static inline struct vmw_surface *vmw_res_to_srf(struct vmw_resource *res)
@@ -561,24 +570,21 @@ static inline struct vmw_master *vmw_master(struct drm_master *master)
static inline void vmw_write(struct vmw_private *dev_priv,
unsigned int offset, uint32_t value)
{
- unsigned long irq_flags;
-
- spin_lock_irqsave(&dev_priv->hw_lock, irq_flags);
+ spin_lock(&dev_priv->hw_lock);
outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT);
outl(value, dev_priv->io_start + VMWGFX_VALUE_PORT);
- spin_unlock_irqrestore(&dev_priv->hw_lock, irq_flags);
+ spin_unlock(&dev_priv->hw_lock);
}
static inline uint32_t vmw_read(struct vmw_private *dev_priv,
unsigned int offset)
{
- unsigned long irq_flags;
u32 val;
- spin_lock_irqsave(&dev_priv->hw_lock, irq_flags);
+ spin_lock(&dev_priv->hw_lock);
outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT);
val = inl(dev_priv->io_start + VMWGFX_VALUE_PORT);
- spin_unlock_irqrestore(&dev_priv->hw_lock, irq_flags);
+ spin_unlock(&dev_priv->hw_lock);
return val;
}
@@ -821,7 +827,8 @@ extern int vmw_execbuf_process(struct drm_file *file_priv,
uint32_t dx_context_handle,
struct drm_vmw_fence_rep __user
*user_fence_rep,
- struct vmw_fence_obj **out_fence);
+ struct vmw_fence_obj **out_fence,
+ uint32_t flags);
extern void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
struct vmw_fence_obj *fence);
extern void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv);
@@ -836,23 +843,23 @@ extern void vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
struct drm_vmw_fence_rep __user
*user_fence_rep,
struct vmw_fence_obj *fence,
- uint32_t fence_handle);
+ uint32_t fence_handle,
+ int32_t out_fence_fd,
+ struct sync_file *sync_file);
extern int vmw_validate_single_buffer(struct vmw_private *dev_priv,
struct ttm_buffer_object *bo,
bool interruptible,
bool validate_as_mob);
-
+bool vmw_cmd_describe(const void *buf, u32 *size, char const **cmd);
/**
* IRQs and wating - vmwgfx_irq.c
*/
-extern irqreturn_t vmw_irq_handler(int irq, void *arg);
extern int vmw_wait_seqno(struct vmw_private *dev_priv, bool lazy,
uint32_t seqno, bool interruptible,
unsigned long timeout);
-extern void vmw_irq_preinstall(struct drm_device *dev);
-extern int vmw_irq_postinstall(struct drm_device *dev);
+extern int vmw_irq_install(struct drm_device *dev, int irq);
extern void vmw_irq_uninstall(struct drm_device *dev);
extern bool vmw_seqno_passed(struct vmw_private *dev_priv,
uint32_t seqno);
@@ -1150,13 +1157,13 @@ extern void *vmw_cmdbuf_reserve(struct vmw_cmdbuf_man *man, size_t size,
extern void vmw_cmdbuf_commit(struct vmw_cmdbuf_man *man, size_t size,
struct vmw_cmdbuf_header *header,
bool flush);
-extern void vmw_cmdbuf_tasklet_schedule(struct vmw_cmdbuf_man *man);
extern void *vmw_cmdbuf_alloc(struct vmw_cmdbuf_man *man,
size_t size, bool interruptible,
struct vmw_cmdbuf_header **p_header);
extern void vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header);
extern int vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man,
bool interruptible);
+extern void vmw_cmdbuf_irqthread(struct vmw_cmdbuf_man *man);
/**
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 2cfb3c93f42a..21c62a34e558 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -24,6 +24,7 @@
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
+#include <linux/sync_file.h>
#include "vmwgfx_drv.h"
#include "vmwgfx_reg.h"
@@ -112,11 +113,12 @@ struct vmw_cmd_entry {
bool user_allow;
bool gb_disable;
bool gb_enable;
+ const char *cmd_name;
};
#define VMW_CMD_DEF(_cmd, _func, _user_allow, _gb_disable, _gb_enable) \
[(_cmd) - SVGA_3D_CMD_BASE] = {(_func), (_user_allow),\
- (_gb_disable), (_gb_enable)}
+ (_gb_disable), (_gb_enable), #_cmd}
static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
@@ -3302,6 +3304,8 @@ static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
true, false, true),
VMW_CMD_DEF(SVGA_3D_CMD_NOP, &vmw_cmd_ok,
true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_NOP_ERROR, &vmw_cmd_ok,
+ true, false, true),
VMW_CMD_DEF(SVGA_3D_CMD_ENABLE_GART, &vmw_cmd_invalid,
false, false, true),
VMW_CMD_DEF(SVGA_3D_CMD_DISABLE_GART, &vmw_cmd_invalid,
@@ -3469,6 +3473,51 @@ static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
true, false, true),
};
+bool vmw_cmd_describe(const void *buf, u32 *size, char const **cmd)
+{
+ u32 cmd_id = ((u32 *) buf)[0];
+
+ if (cmd_id >= SVGA_CMD_MAX) {
+ SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
+ const struct vmw_cmd_entry *entry;
+
+ *size = header->size + sizeof(SVGA3dCmdHeader);
+ cmd_id = header->id;
+ if (cmd_id >= SVGA_3D_CMD_MAX)
+ return false;
+
+ cmd_id -= SVGA_3D_CMD_BASE;
+ entry = &vmw_cmd_entries[cmd_id];
+ *cmd = entry->cmd_name;
+ return true;
+ }
+
+ switch (cmd_id) {
+ case SVGA_CMD_UPDATE:
+ *cmd = "SVGA_CMD_UPDATE";
+ *size = sizeof(u32) + sizeof(SVGAFifoCmdUpdate);
+ break;
+ case SVGA_CMD_DEFINE_GMRFB:
+ *cmd = "SVGA_CMD_DEFINE_GMRFB";
+ *size = sizeof(u32) + sizeof(SVGAFifoCmdDefineGMRFB);
+ break;
+ case SVGA_CMD_BLIT_GMRFB_TO_SCREEN:
+ *cmd = "SVGA_CMD_BLIT_GMRFB_TO_SCREEN";
+ *size = sizeof(u32) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
+ break;
+ case SVGA_CMD_BLIT_SCREEN_TO_GMRFB:
+ *cmd = "SVGA_CMD_BLIT_SCREEN_TO_GMRFB";
+ *size = sizeof(u32) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
+ break;
+ default:
+ *cmd = "UNKNOWN";
+ *size = 0;
+ return false;
+ }
+
+ return true;
+}
+
static int vmw_cmd_check(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
void *buf, uint32_t *size)
@@ -3781,6 +3830,8 @@ int vmw_execbuf_fence_commands(struct drm_file *file_priv,
* which the information should be copied.
* @fence: Pointer to the fenc object.
* @fence_handle: User-space fence handle.
+ * @out_fence_fd: exported file descriptor for the fence. -1 if not used
+ * @sync_file: Only used to clean up in case of an error in this function.
*
* This function copies fence information to user-space. If copying fails,
* The user-space struct drm_vmw_fence_rep::error member is hopefully
@@ -3796,7 +3847,9 @@ vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
int ret,
struct drm_vmw_fence_rep __user *user_fence_rep,
struct vmw_fence_obj *fence,
- uint32_t fence_handle)
+ uint32_t fence_handle,
+ int32_t out_fence_fd,
+ struct sync_file *sync_file)
{
struct drm_vmw_fence_rep fence_rep;
@@ -3806,6 +3859,7 @@ vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
memset(&fence_rep, 0, sizeof(fence_rep));
fence_rep.error = ret;
+ fence_rep.fd = out_fence_fd;
if (ret == 0) {
BUG_ON(fence == NULL);
@@ -3828,6 +3882,14 @@ vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
* and unreference the handle.
*/
if (unlikely(ret != 0) && (fence_rep.error == 0)) {
+ if (sync_file)
+ fput(sync_file->file);
+
+ if (fence_rep.fd != -1) {
+ put_unused_fd(fence_rep.fd);
+ fence_rep.fd = -1;
+ }
+
ttm_ref_object_base_unref(vmw_fp->tfile,
fence_handle, TTM_REF_USAGE);
DRM_ERROR("Fence copy error. Syncing.\n");
@@ -4003,7 +4065,8 @@ int vmw_execbuf_process(struct drm_file *file_priv,
uint64_t throttle_us,
uint32_t dx_context_handle,
struct drm_vmw_fence_rep __user *user_fence_rep,
- struct vmw_fence_obj **out_fence)
+ struct vmw_fence_obj **out_fence,
+ uint32_t flags)
{
struct vmw_sw_context *sw_context = &dev_priv->ctx;
struct vmw_fence_obj *fence = NULL;
@@ -4013,20 +4076,33 @@ int vmw_execbuf_process(struct drm_file *file_priv,
struct ww_acquire_ctx ticket;
uint32_t handle;
int ret;
+ int32_t out_fence_fd = -1;
+ struct sync_file *sync_file = NULL;
+
+
+ if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) {
+ out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
+ if (out_fence_fd < 0) {
+ DRM_ERROR("Failed to get a fence file descriptor.\n");
+ return out_fence_fd;
+ }
+ }
if (throttle_us) {
ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue,
throttle_us);
if (ret)
- return ret;
+ goto out_free_fence_fd;
}
kernel_commands = vmw_execbuf_cmdbuf(dev_priv, user_commands,
kernel_commands, command_size,
&header);
- if (IS_ERR(kernel_commands))
- return PTR_ERR(kernel_commands);
+ if (IS_ERR(kernel_commands)) {
+ ret = PTR_ERR(kernel_commands);
+ goto out_free_fence_fd;
+ }
ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
if (ret) {
@@ -4162,8 +4238,32 @@ int vmw_execbuf_process(struct drm_file *file_priv,
__vmw_execbuf_release_pinned_bo(dev_priv, fence);
vmw_clear_validations(sw_context);
+
+ /*
+ * If anything fails here, give up trying to export the fence
+ * and do a sync since the user mode will not be able to sync
+ * the fence itself. This ensures we are still functionally
+ * correct.
+ */
+ if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) {
+
+ sync_file = sync_file_create(&fence->base);
+ if (!sync_file) {
+ DRM_ERROR("Unable to create sync file for fence\n");
+ put_unused_fd(out_fence_fd);
+ out_fence_fd = -1;
+
+ (void) vmw_fence_obj_wait(fence, false, false,
+ VMW_FENCE_WAIT_TIMEOUT);
+ } else {
+ /* Link the fence with the FD created earlier */
+ fd_install(out_fence_fd, sync_file->file);
+ }
+ }
+
vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret,
- user_fence_rep, fence, handle);
+ user_fence_rep, fence, handle,
+ out_fence_fd, sync_file);
/* Don't unreference when handing fence out */
if (unlikely(out_fence != NULL)) {
@@ -4214,6 +4314,9 @@ out_unlock:
out_free_header:
if (header)
vmw_cmdbuf_header_free(header);
+out_free_fence_fd:
+ if (out_fence_fd >= 0)
+ put_unused_fd(out_fence_fd);
return ret;
}
@@ -4366,6 +4469,7 @@ int vmw_execbuf_ioctl(struct drm_device *dev, unsigned long data,
static const size_t copy_offset[] = {
offsetof(struct drm_vmw_execbuf_arg, context_handle),
sizeof(struct drm_vmw_execbuf_arg)};
+ struct dma_fence *in_fence = NULL;
if (unlikely(size < copy_offset[0])) {
DRM_ERROR("Invalid command size, ioctl %d\n",
@@ -4401,15 +4505,25 @@ int vmw_execbuf_ioctl(struct drm_device *dev, unsigned long data,
arg.context_handle = (uint32_t) -1;
break;
case 2:
- if (arg.pad64 != 0) {
- DRM_ERROR("Unused IOCTL data not set to zero.\n");
- return -EINVAL;
- }
- break;
default:
break;
}
+
+ /* If imported a fence FD from elsewhere, then wait on it */
+ if (arg.flags & DRM_VMW_EXECBUF_FLAG_IMPORT_FENCE_FD) {
+ in_fence = sync_file_get_fence(arg.imported_fence_fd);
+
+ if (!in_fence) {
+ DRM_ERROR("Cannot get imported fence\n");
+ return -EINVAL;
+ }
+
+ ret = vmw_wait_dma_fence(dev_priv->fman, in_fence);
+ if (ret)
+ goto out;
+ }
+
ret = ttm_read_lock(&dev_priv->reservation_sem, true);
if (unlikely(ret != 0))
return ret;
@@ -4419,12 +4533,16 @@ int vmw_execbuf_ioctl(struct drm_device *dev, unsigned long data,
NULL, arg.command_size, arg.throttle_us,
arg.context_handle,
(void __user *)(unsigned long)arg.fence_rep,
- NULL);
+ NULL,
+ arg.flags);
ttm_read_unlock(&dev_priv->reservation_sem);
if (unlikely(ret != 0))
- return ret;
+ goto out;
vmw_kms_cursor_post_execbuf(dev_priv);
- return 0;
+out:
+ if (in_fence)
+ dma_fence_put(in_fence);
+ return ret;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
index b8bc5bc7de7e..3bbad22b3748 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -114,12 +114,11 @@ static void vmw_fence_obj_destroy(struct dma_fence *f)
container_of(f, struct vmw_fence_obj, base);
struct vmw_fence_manager *fman = fman_from_fence(fence);
- unsigned long irq_flags;
- spin_lock_irqsave(&fman->lock, irq_flags);
+ spin_lock(&fman->lock);
list_del_init(&fence->head);
--fman->num_fence_objects;
- spin_unlock_irqrestore(&fman->lock, irq_flags);
+ spin_unlock(&fman->lock);
fence->destroy(fence);
}
@@ -252,10 +251,10 @@ static void vmw_fence_work_func(struct work_struct *work)
INIT_LIST_HEAD(&list);
mutex_lock(&fman->goal_irq_mutex);
- spin_lock_irq(&fman->lock);
+ spin_lock(&fman->lock);
list_splice_init(&fman->cleanup_list, &list);
seqno_valid = fman->seqno_valid;
- spin_unlock_irq(&fman->lock);
+ spin_unlock(&fman->lock);
if (!seqno_valid && fman->goal_irq_on) {
fman->goal_irq_on = false;
@@ -305,15 +304,14 @@ struct vmw_fence_manager *vmw_fence_manager_init(struct vmw_private *dev_priv)
void vmw_fence_manager_takedown(struct vmw_fence_manager *fman)
{
- unsigned long irq_flags;
bool lists_empty;
(void) cancel_work_sync(&fman->work);
- spin_lock_irqsave(&fman->lock, irq_flags);
+ spin_lock(&fman->lock);
lists_empty = list_empty(&fman->fence_list) &&
list_empty(&fman->cleanup_list);
- spin_unlock_irqrestore(&fman->lock, irq_flags);
+ spin_unlock(&fman->lock);
BUG_ON(!lists_empty);
kfree(fman);
@@ -323,7 +321,6 @@ static int vmw_fence_obj_init(struct vmw_fence_manager *fman,
struct vmw_fence_obj *fence, u32 seqno,
void (*destroy) (struct vmw_fence_obj *fence))
{
- unsigned long irq_flags;
int ret = 0;
dma_fence_init(&fence->base, &vmw_fence_ops, &fman->lock,
@@ -331,7 +328,7 @@ static int vmw_fence_obj_init(struct vmw_fence_manager *fman,
INIT_LIST_HEAD(&fence->seq_passed_actions);
fence->destroy = destroy;
- spin_lock_irqsave(&fman->lock, irq_flags);
+ spin_lock(&fman->lock);
if (unlikely(fman->fifo_down)) {
ret = -EBUSY;
goto out_unlock;
@@ -340,7 +337,7 @@ static int vmw_fence_obj_init(struct vmw_fence_manager *fman,
++fman->num_fence_objects;
out_unlock:
- spin_unlock_irqrestore(&fman->lock, irq_flags);
+ spin_unlock(&fman->lock);
return ret;
}
@@ -489,11 +486,9 @@ rerun:
void vmw_fences_update(struct vmw_fence_manager *fman)
{
- unsigned long irq_flags;
-
- spin_lock_irqsave(&fman->lock, irq_flags);
+ spin_lock(&fman->lock);
__vmw_fences_update(fman);
- spin_unlock_irqrestore(&fman->lock, irq_flags);
+ spin_unlock(&fman->lock);
}
bool vmw_fence_obj_signaled(struct vmw_fence_obj *fence)
@@ -650,6 +645,51 @@ out_no_object:
/**
+ * vmw_wait_dma_fence - Wait for a dma fence
+ *
+ * @fman: pointer to a fence manager
+ * @fence: DMA fence to wait on
+ *
+ * This function handles the case when the fence is actually a fence
+ * array. If that's the case, it'll wait on each of the child fence
+ */
+int vmw_wait_dma_fence(struct vmw_fence_manager *fman,
+ struct dma_fence *fence)
+{
+ struct dma_fence_array *fence_array;
+ int ret = 0;
+ int i;
+
+
+ if (dma_fence_is_signaled(fence))
+ return 0;
+
+ if (!dma_fence_is_array(fence))
+ return dma_fence_wait(fence, true);
+
+ /* From i915: Note that if the fence-array was created in
+ * signal-on-any mode, we should *not* decompose it into its individual
+ * fences. However, we don't currently store which mode the fence-array
+ * is operating in. Fortunately, the only user of signal-on-any is
+ * private to amdgpu and we should not see any incoming fence-array
+ * from sync-file being in signal-on-any mode.
+ */
+
+ fence_array = to_dma_fence_array(fence);
+ for (i = 0; i < fence_array->num_fences; i++) {
+ struct dma_fence *child = fence_array->fences[i];
+
+ ret = dma_fence_wait(child, true);
+
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+
+/**
* vmw_fence_fifo_down - signal all unsignaled fence objects.
*/
@@ -663,14 +703,14 @@ void vmw_fence_fifo_down(struct vmw_fence_manager *fman)
* restart when we've released the fman->lock.
*/
- spin_lock_irq(&fman->lock);
+ spin_lock(&fman->lock);
fman->fifo_down = true;
while (!list_empty(&fman->fence_list)) {
struct vmw_fence_obj *fence =
list_entry(fman->fence_list.prev, struct vmw_fence_obj,
head);
dma_fence_get(&fence->base);
- spin_unlock_irq(&fman->lock);
+ spin_unlock(&fman->lock);
ret = vmw_fence_obj_wait(fence, false, false,
VMW_FENCE_WAIT_TIMEOUT);
@@ -686,18 +726,16 @@ void vmw_fence_fifo_down(struct vmw_fence_manager *fman)
BUG_ON(!list_empty(&fence->head));
dma_fence_put(&fence->base);
- spin_lock_irq(&fman->lock);
+ spin_lock(&fman->lock);
}
- spin_unlock_irq(&fman->lock);
+ spin_unlock(&fman->lock);
}
void vmw_fence_fifo_up(struct vmw_fence_manager *fman)
{
- unsigned long irq_flags;
-
- spin_lock_irqsave(&fman->lock, irq_flags);
+ spin_lock(&fman->lock);
fman->fifo_down = false;
- spin_unlock_irqrestore(&fman->lock, irq_flags);
+ spin_unlock(&fman->lock);
}
@@ -812,9 +850,9 @@ int vmw_fence_obj_signaled_ioctl(struct drm_device *dev, void *data,
arg->signaled = vmw_fence_obj_signaled(fence);
arg->signaled_flags = arg->flags;
- spin_lock_irq(&fman->lock);
+ spin_lock(&fman->lock);
arg->passed_seqno = dev_priv->last_read_seqno;
- spin_unlock_irq(&fman->lock);
+ spin_unlock(&fman->lock);
ttm_base_object_unref(&base);
@@ -841,8 +879,7 @@ int vmw_fence_obj_unref_ioctl(struct drm_device *dev, void *data,
*
* This function is called when the seqno of the fence where @action is
* attached has passed. It queues the event on the submitter's event list.
- * This function is always called from atomic context, and may be called
- * from irq context.
+ * This function is always called from atomic context.
*/
static void vmw_event_fence_action_seq_passed(struct vmw_fence_action *action)
{
@@ -851,13 +888,13 @@ static void vmw_event_fence_action_seq_passed(struct vmw_fence_action *action)
struct drm_device *dev = eaction->dev;
struct drm_pending_event *event = eaction->event;
struct drm_file *file_priv;
- unsigned long irq_flags;
+
if (unlikely(event == NULL))
return;
file_priv = event->file_priv;
- spin_lock_irqsave(&dev->event_lock, irq_flags);
+ spin_lock_irq(&dev->event_lock);
if (likely(eaction->tv_sec != NULL)) {
struct timeval tv;
@@ -869,7 +906,7 @@ static void vmw_event_fence_action_seq_passed(struct vmw_fence_action *action)
drm_send_event_locked(dev, eaction->event);
eaction->event = NULL;
- spin_unlock_irqrestore(&dev->event_lock, irq_flags);
+ spin_unlock_irq(&dev->event_lock);
}
/**
@@ -904,11 +941,10 @@ static void vmw_fence_obj_add_action(struct vmw_fence_obj *fence,
struct vmw_fence_action *action)
{
struct vmw_fence_manager *fman = fman_from_fence(fence);
- unsigned long irq_flags;
bool run_update = false;
mutex_lock(&fman->goal_irq_mutex);
- spin_lock_irqsave(&fman->lock, irq_flags);
+ spin_lock(&fman->lock);
fman->pending_actions[action->type]++;
if (dma_fence_is_signaled_locked(&fence->base)) {
@@ -927,7 +963,7 @@ static void vmw_fence_obj_add_action(struct vmw_fence_obj *fence,
run_update = vmw_fence_goal_check_locked(fence);
}
- spin_unlock_irqrestore(&fman->lock, irq_flags);
+ spin_unlock(&fman->lock);
if (run_update) {
if (!fman->goal_irq_on) {
@@ -1114,7 +1150,7 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
}
vmw_execbuf_copy_fence_user(dev_priv, vmw_fp, 0, user_fence_rep, fence,
- handle);
+ handle, -1, NULL);
vmw_fence_obj_unreference(&fence);
return 0;
out_no_create:
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h
index d9d85aa6ed20..20224dba9d8e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h
@@ -28,6 +28,7 @@
#ifndef _VMWGFX_FENCE_H_
#include <linux/dma-fence.h>
+#include <linux/dma-fence-array.h>
#define VMW_FENCE_WAIT_TIMEOUT (5*HZ)
@@ -102,6 +103,9 @@ extern int vmw_user_fence_create(struct drm_file *file_priv,
struct vmw_fence_obj **p_fence,
uint32_t *p_handle);
+extern int vmw_wait_dma_fence(struct vmw_fence_manager *fman,
+ struct dma_fence *fence);
+
extern void vmw_fence_fifo_up(struct vmw_fence_manager *fman);
extern void vmw_fence_fifo_down(struct vmw_fence_manager *fman);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
index 0c7e1723292c..b9239ba067c4 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
@@ -30,11 +30,56 @@
#define VMW_FENCE_WRAP (1 << 24)
-irqreturn_t vmw_irq_handler(int irq, void *arg)
+/**
+ * vmw_thread_fn - Deferred (process context) irq handler
+ *
+ * @irq: irq number
+ * @arg: Closure argument. Pointer to a struct drm_device cast to void *
+ *
+ * This function implements the deferred part of irq processing.
+ * The function is guaranteed to run at least once after the
+ * vmw_irq_handler has returned with IRQ_WAKE_THREAD.
+ *
+ */
+static irqreturn_t vmw_thread_fn(int irq, void *arg)
+{
+ struct drm_device *dev = (struct drm_device *)arg;
+ struct vmw_private *dev_priv = vmw_priv(dev);
+ irqreturn_t ret = IRQ_NONE;
+
+ if (test_and_clear_bit(VMW_IRQTHREAD_FENCE,
+ dev_priv->irqthread_pending)) {
+ vmw_fences_update(dev_priv->fman);
+ wake_up_all(&dev_priv->fence_queue);
+ ret = IRQ_HANDLED;
+ }
+
+ if (test_and_clear_bit(VMW_IRQTHREAD_CMDBUF,
+ dev_priv->irqthread_pending)) {
+ vmw_cmdbuf_irqthread(dev_priv->cman);
+ ret = IRQ_HANDLED;
+ }
+
+ return ret;
+}
+
+/**
+ * vmw_irq_handler irq handler
+ *
+ * @irq: irq number
+ * @arg: Closure argument. Pointer to a struct drm_device cast to void *
+ *
+ * This function implements the quick part of irq processing.
+ * The function performs fast actions like clearing the device interrupt
+ * flags and also reasonably quick actions like waking processes waiting for
+ * FIFO space. Other IRQ actions are deferred to the IRQ thread.
+ */
+static irqreturn_t vmw_irq_handler(int irq, void *arg)
{
struct drm_device *dev = (struct drm_device *)arg;
struct vmw_private *dev_priv = vmw_priv(dev);
uint32_t status, masked_status;
+ irqreturn_t ret = IRQ_HANDLED;
status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
masked_status = status & READ_ONCE(dev_priv->irq_mask);
@@ -45,20 +90,21 @@ irqreturn_t vmw_irq_handler(int irq, void *arg)
if (!status)
return IRQ_NONE;
- if (masked_status & (SVGA_IRQFLAG_ANY_FENCE |
- SVGA_IRQFLAG_FENCE_GOAL)) {
- vmw_fences_update(dev_priv->fman);
- wake_up_all(&dev_priv->fence_queue);
- }
-
if (masked_status & SVGA_IRQFLAG_FIFO_PROGRESS)
wake_up_all(&dev_priv->fifo_queue);
- if (masked_status & (SVGA_IRQFLAG_COMMAND_BUFFER |
- SVGA_IRQFLAG_ERROR))
- vmw_cmdbuf_tasklet_schedule(dev_priv->cman);
+ if ((masked_status & (SVGA_IRQFLAG_ANY_FENCE |
+ SVGA_IRQFLAG_FENCE_GOAL)) &&
+ !test_and_set_bit(VMW_IRQTHREAD_FENCE, dev_priv->irqthread_pending))
+ ret = IRQ_WAKE_THREAD;
- return IRQ_HANDLED;
+ if ((masked_status & (SVGA_IRQFLAG_COMMAND_BUFFER |
+ SVGA_IRQFLAG_ERROR)) &&
+ !test_and_set_bit(VMW_IRQTHREAD_CMDBUF,
+ dev_priv->irqthread_pending))
+ ret = IRQ_WAKE_THREAD;
+
+ return ret;
}
static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t seqno)
@@ -281,23 +327,15 @@ int vmw_wait_seqno(struct vmw_private *dev_priv,
return ret;
}
-void vmw_irq_preinstall(struct drm_device *dev)
+static void vmw_irq_preinstall(struct drm_device *dev)
{
struct vmw_private *dev_priv = vmw_priv(dev);
uint32_t status;
- if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
- return;
-
status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
}
-int vmw_irq_postinstall(struct drm_device *dev)
-{
- return 0;
-}
-
void vmw_irq_uninstall(struct drm_device *dev)
{
struct vmw_private *dev_priv = vmw_priv(dev);
@@ -306,8 +344,41 @@ void vmw_irq_uninstall(struct drm_device *dev)
if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
return;
+ if (!dev->irq_enabled)
+ return;
+
vmw_write(dev_priv, SVGA_REG_IRQMASK, 0);
status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
+
+ dev->irq_enabled = false;
+ free_irq(dev->irq, dev);
+}
+
+/**
+ * vmw_irq_install - Install the irq handlers
+ *
+ * @dev: Pointer to the drm device.
+ * @irq: The irq number.
+ * Return: Zero if successful. Negative number otherwise.
+ */
+int vmw_irq_install(struct drm_device *dev, int irq)
+{
+ int ret;
+
+ if (dev->irq_enabled)
+ return -EBUSY;
+
+ vmw_irq_preinstall(dev);
+
+ ret = request_threaded_irq(irq, vmw_irq_handler, vmw_thread_fn,
+ IRQF_SHARED, VMWGFX_DRIVER_NAME, dev);
+ if (ret < 0)
+ return ret;
+
+ dev->irq_enabled = true;
+ dev->irq = irq;
+
+ return ret;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 36dd7930bf5f..5d50e45ae274 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -2494,7 +2494,7 @@ void vmw_kms_helper_buffer_finish(struct vmw_private *dev_priv,
if (file_priv)
vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv),
ret, user_fence_rep, fence,
- handle);
+ handle, -1, NULL);
if (out_fence)
*out_fence = fence;
else