summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/drm_managed.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/drm_managed.c')
-rw-r--r--drivers/gpu/drm/drm_managed.c65
1 files changed, 61 insertions, 4 deletions
diff --git a/drivers/gpu/drm/drm_managed.c b/drivers/gpu/drm/drm_managed.c
index 37d7db6223be..247f468731de 100644
--- a/drivers/gpu/drm/drm_managed.c
+++ b/drivers/gpu/drm/drm_managed.c
@@ -7,7 +7,9 @@
#include <drm/drm_managed.h>
+#include <linux/export.h>
#include <linux/list.h>
+#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
@@ -48,10 +50,10 @@ struct drmres {
* Some archs want to perform DMA into kmalloc caches
* and need a guaranteed alignment larger than
* the alignment of a 64-bit integer.
- * Thus we use ARCH_KMALLOC_MINALIGN here and get exactly the same
- * buffer alignment as if it was allocated by plain kmalloc().
+ * Thus we use ARCH_DMA_MINALIGN for data[] which will force the same
+ * alignment for struct drmres when allocated by kmalloc().
*/
- u8 __aligned(ARCH_KMALLOC_MINALIGN) data[];
+ u8 __aligned(ARCH_DMA_MINALIGN) data[];
};
static void free_dr(struct drmres *dr)
@@ -176,6 +178,45 @@ int __drmm_add_action_or_reset(struct drm_device *dev,
EXPORT_SYMBOL(__drmm_add_action_or_reset);
/**
+ * drmm_release_action - release a managed action from a &drm_device
+ * @dev: DRM device
+ * @action: function which would be called when @dev is released
+ * @data: opaque pointer, passed to @action
+ *
+ * This function calls the @action previously added by drmm_add_action()
+ * immediately.
+ * The @action is removed from the list of cleanup actions for @dev,
+ * which means that it won't be called in the final drm_dev_put().
+ */
+void drmm_release_action(struct drm_device *dev,
+ drmres_release_t action,
+ void *data)
+{
+ struct drmres *dr_match = NULL, *dr;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->managed.lock, flags);
+ list_for_each_entry_reverse(dr, &dev->managed.resources, node.entry) {
+ if (dr->node.release == action) {
+ if (!data || *(void **)dr->data == data) {
+ dr_match = dr;
+ del_dr(dev, dr_match);
+ break;
+ }
+ }
+ }
+ spin_unlock_irqrestore(&dev->managed.lock, flags);
+
+ if (WARN_ON(!dr_match))
+ return;
+
+ action(dev, data);
+
+ free_dr(dr_match);
+}
+EXPORT_SYMBOL(drmm_release_action);
+
+/**
* drmm_kmalloc - &drm_device managed kmalloc()
* @dev: DRM device
* @size: size of the memory allocation
@@ -195,7 +236,7 @@ void *drmm_kmalloc(struct drm_device *dev, size_t size, gfp_t gfp)
size, gfp);
return NULL;
}
- dr->node.name = kstrdup_const("kmalloc", GFP_KERNEL);
+ dr->node.name = kstrdup_const("kmalloc", gfp);
add_dr(dev, dr);
@@ -262,3 +303,19 @@ void drmm_kfree(struct drm_device *dev, void *data)
free_dr(dr_match);
}
EXPORT_SYMBOL(drmm_kfree);
+
+void __drmm_mutex_release(struct drm_device *dev, void *res)
+{
+ struct mutex *lock = res;
+
+ mutex_destroy(lock);
+}
+EXPORT_SYMBOL(__drmm_mutex_release);
+
+void __drmm_workqueue_release(struct drm_device *device, void *res)
+{
+ struct workqueue_struct *wq = res;
+
+ destroy_workqueue(wq);
+}
+EXPORT_SYMBOL(__drmm_workqueue_release);