summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorThierry Reding <treding@nvidia.com>2017-02-28 15:46:41 +0100
committerThierry Reding <treding@nvidia.com>2017-02-28 16:16:43 +0100
commite6b62714e87c8811d5564b6a0738dcde63a51774 (patch)
tree295fdcf6c17e379b393de020cd0c0848ba0a872b /drivers
parenta4a69da06bc11a937a6e417938b1bb698ee1fa46 (diff)
drm: Introduce drm_gem_object_{get,put}()
For consistency with other reference counting APIs in the kernel, add drm_gem_object_get() and drm_gem_object_put(), as well as an unlocked variant of the latter, to reference count GEM buffer objects. Compatibility aliases are added to keep existing code working. To help speed up the transition, all the instances of the old functions in the DRM core are already replaced in this commit. The existing semantic patch for the DRM subsystem-wide conversion is extended to account for these new helpers. Reviewed-by: Sean Paul <seanpaul@chromium.org> Acked-by: Christian König <christian.koenig@amd.com> Signed-off-by: Thierry Reding <treding@nvidia.com> Link: http://patchwork.freedesktop.org/patch/msgid/20170228144643.5668-6-thierry.reding@gmail.com
Diffstat (limited to 'drivers')
-rw-r--r--drivers/gpu/drm/drm_fb_cma_helper.c16
-rw-r--r--drivers/gpu/drm/drm_gem.c44
-rw-r--r--drivers/gpu/drm/drm_gem_cma_helper.c10
-rw-r--r--drivers/gpu/drm/drm_prime.c10
4 files changed, 40 insertions, 40 deletions
diff --git a/drivers/gpu/drm/drm_fb_cma_helper.c b/drivers/gpu/drm/drm_fb_cma_helper.c
index 920bb5764cd6..be6d90664e50 100644
--- a/drivers/gpu/drm/drm_fb_cma_helper.c
+++ b/drivers/gpu/drm/drm_fb_cma_helper.c
@@ -102,7 +102,7 @@ void drm_fb_cma_destroy(struct drm_framebuffer *fb)
for (i = 0; i < 4; i++) {
if (fb_cma->obj[i])
- drm_gem_object_unreference_unlocked(&fb_cma->obj[i]->base);
+ drm_gem_object_put_unlocked(&fb_cma->obj[i]->base);
}
drm_framebuffer_cleanup(fb);
@@ -190,7 +190,7 @@ struct drm_framebuffer *drm_fb_cma_create_with_funcs(struct drm_device *dev,
if (!obj) {
dev_err(dev->dev, "Failed to lookup GEM object\n");
ret = -ENXIO;
- goto err_gem_object_unreference;
+ goto err_gem_object_put;
}
min_size = (height - 1) * mode_cmd->pitches[i]
@@ -198,9 +198,9 @@ struct drm_framebuffer *drm_fb_cma_create_with_funcs(struct drm_device *dev,
+ mode_cmd->offsets[i];
if (obj->size < min_size) {
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
ret = -EINVAL;
- goto err_gem_object_unreference;
+ goto err_gem_object_put;
}
objs[i] = to_drm_gem_cma_obj(obj);
}
@@ -208,14 +208,14 @@ struct drm_framebuffer *drm_fb_cma_create_with_funcs(struct drm_device *dev,
fb_cma = drm_fb_cma_alloc(dev, mode_cmd, objs, i, funcs);
if (IS_ERR(fb_cma)) {
ret = PTR_ERR(fb_cma);
- goto err_gem_object_unreference;
+ goto err_gem_object_put;
}
return &fb_cma->fb;
-err_gem_object_unreference:
+err_gem_object_put:
for (i--; i >= 0; i--)
- drm_gem_object_unreference_unlocked(&objs[i]->base);
+ drm_gem_object_put_unlocked(&objs[i]->base);
return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(drm_fb_cma_create_with_funcs);
@@ -477,7 +477,7 @@ err_cma_destroy:
err_fb_info_destroy:
drm_fb_helper_fini(helper);
err_gem_free_object:
- drm_gem_object_unreference_unlocked(&obj->base);
+ drm_gem_object_put_unlocked(&obj->base);
return ret;
}
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index bc93de308673..b1e28c944637 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -218,7 +218,7 @@ static void drm_gem_object_exported_dma_buf_free(struct drm_gem_object *obj)
}
static void
-drm_gem_object_handle_unreference_unlocked(struct drm_gem_object *obj)
+drm_gem_object_handle_put_unlocked(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
bool final = false;
@@ -241,7 +241,7 @@ drm_gem_object_handle_unreference_unlocked(struct drm_gem_object *obj)
mutex_unlock(&dev->object_name_lock);
if (final)
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
}
/*
@@ -262,7 +262,7 @@ drm_gem_object_release_handle(int id, void *ptr, void *data)
if (dev->driver->gem_close_object)
dev->driver->gem_close_object(obj, file_priv);
- drm_gem_object_handle_unreference_unlocked(obj);
+ drm_gem_object_handle_put_unlocked(obj);
return 0;
}
@@ -352,7 +352,7 @@ drm_gem_handle_create_tail(struct drm_file *file_priv,
WARN_ON(!mutex_is_locked(&dev->object_name_lock));
if (obj->handle_count++ == 0)
- drm_gem_object_reference(obj);
+ drm_gem_object_get(obj);
/*
* Get the user-visible handle using idr. Preload and perform
@@ -392,7 +392,7 @@ err_remove:
idr_remove(&file_priv->object_idr, handle);
spin_unlock(&file_priv->table_lock);
err_unref:
- drm_gem_object_handle_unreference_unlocked(obj);
+ drm_gem_object_handle_put_unlocked(obj);
return ret;
}
@@ -606,7 +606,7 @@ drm_gem_object_lookup(struct drm_file *filp, u32 handle)
/* Check if we currently have a reference on the object */
obj = idr_find(&filp->object_idr, handle);
if (obj)
- drm_gem_object_reference(obj);
+ drm_gem_object_get(obj);
spin_unlock(&filp->table_lock);
@@ -683,7 +683,7 @@ drm_gem_flink_ioctl(struct drm_device *dev, void *data,
err:
mutex_unlock(&dev->object_name_lock);
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
return ret;
}
@@ -713,7 +713,7 @@ drm_gem_open_ioctl(struct drm_device *dev, void *data,
mutex_lock(&dev->object_name_lock);
obj = idr_find(&dev->object_name_idr, (int) args->name);
if (obj) {
- drm_gem_object_reference(obj);
+ drm_gem_object_get(obj);
} else {
mutex_unlock(&dev->object_name_lock);
return -ENOENT;
@@ -721,7 +721,7 @@ drm_gem_open_ioctl(struct drm_device *dev, void *data,
/* drm_gem_handle_create_tail unlocks dev->object_name_lock. */
ret = drm_gem_handle_create_tail(file_priv, obj, &handle);
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
if (ret)
return ret;
@@ -809,16 +809,16 @@ drm_gem_object_free(struct kref *kref)
EXPORT_SYMBOL(drm_gem_object_free);
/**
- * drm_gem_object_unreference_unlocked - release a GEM BO reference
+ * drm_gem_object_put_unlocked - drop a GEM buffer object reference
* @obj: GEM buffer object
*
* This releases a reference to @obj. Callers must not hold the
* &drm_device.struct_mutex lock when calling this function.
*
- * See also __drm_gem_object_unreference().
+ * See also __drm_gem_object_put().
*/
void
-drm_gem_object_unreference_unlocked(struct drm_gem_object *obj)
+drm_gem_object_put_unlocked(struct drm_gem_object *obj)
{
struct drm_device *dev;
@@ -834,10 +834,10 @@ drm_gem_object_unreference_unlocked(struct drm_gem_object *obj)
&dev->struct_mutex))
mutex_unlock(&dev->struct_mutex);
}
-EXPORT_SYMBOL(drm_gem_object_unreference_unlocked);
+EXPORT_SYMBOL(drm_gem_object_put_unlocked);
/**
- * drm_gem_object_unreference - release a GEM BO reference
+ * drm_gem_object_put - release a GEM buffer object reference
* @obj: GEM buffer object
*
* This releases a reference to @obj. Callers must hold the
@@ -845,10 +845,10 @@ EXPORT_SYMBOL(drm_gem_object_unreference_unlocked);
* driver doesn't use &drm_device.struct_mutex for anything.
*
* For drivers not encumbered with legacy locking use
- * drm_gem_object_unreference_unlocked() instead.
+ * drm_gem_object_put_unlocked() instead.
*/
void
-drm_gem_object_unreference(struct drm_gem_object *obj)
+drm_gem_object_put(struct drm_gem_object *obj)
{
if (obj) {
WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
@@ -856,7 +856,7 @@ drm_gem_object_unreference(struct drm_gem_object *obj)
kref_put(&obj->refcount, drm_gem_object_free);
}
}
-EXPORT_SYMBOL(drm_gem_object_unreference);
+EXPORT_SYMBOL(drm_gem_object_put);
/**
* drm_gem_vm_open - vma->ops->open implementation for GEM
@@ -869,7 +869,7 @@ void drm_gem_vm_open(struct vm_area_struct *vma)
{
struct drm_gem_object *obj = vma->vm_private_data;
- drm_gem_object_reference(obj);
+ drm_gem_object_get(obj);
}
EXPORT_SYMBOL(drm_gem_vm_open);
@@ -884,7 +884,7 @@ void drm_gem_vm_close(struct vm_area_struct *vma)
{
struct drm_gem_object *obj = vma->vm_private_data;
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
}
EXPORT_SYMBOL(drm_gem_vm_close);
@@ -935,7 +935,7 @@ int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
* (which should happen whether the vma was created by this call, or
* by a vm_open due to mremap or partial unmap or whatever).
*/
- drm_gem_object_reference(obj);
+ drm_gem_object_get(obj);
return 0;
}
@@ -992,14 +992,14 @@ int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
return -EINVAL;
if (!drm_vma_node_is_allowed(node, priv)) {
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
return -EACCES;
}
ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT,
vma);
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
return ret;
}
diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c
index 5cf38a474845..906984d4bec2 100644
--- a/drivers/gpu/drm/drm_gem_cma_helper.c
+++ b/drivers/gpu/drm/drm_gem_cma_helper.c
@@ -121,7 +121,7 @@ struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm,
return cma_obj;
error:
- drm_gem_object_unreference_unlocked(&cma_obj->base);
+ drm_gem_object_put_unlocked(&cma_obj->base);
return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(drm_gem_cma_create);
@@ -163,7 +163,7 @@ drm_gem_cma_create_with_handle(struct drm_file *file_priv,
*/
ret = drm_gem_handle_create(file_priv, gem_obj, handle);
/* drop reference from allocate - handle holds it now. */
- drm_gem_object_unreference_unlocked(gem_obj);
+ drm_gem_object_put_unlocked(gem_obj);
if (ret)
return ERR_PTR(ret);
@@ -293,7 +293,7 @@ int drm_gem_cma_dumb_map_offset(struct drm_file *file_priv,
*offset = drm_vma_node_offset_addr(&gem_obj->vma_node);
- drm_gem_object_unreference_unlocked(gem_obj);
+ drm_gem_object_put_unlocked(gem_obj);
return 0;
}
@@ -416,13 +416,13 @@ unsigned long drm_gem_cma_get_unmapped_area(struct file *filp,
return -EINVAL;
if (!drm_vma_node_is_allowed(node, priv)) {
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
return -EACCES;
}
cma_obj = to_drm_gem_cma_obj(obj);
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
return cma_obj->vaddr ? (unsigned long)cma_obj->vaddr : -EINVAL;
}
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index 25aa4558f1b5..866b294e7c61 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -318,7 +318,7 @@ struct dma_buf *drm_gem_dmabuf_export(struct drm_device *dev,
return dma_buf;
drm_dev_ref(dev);
- drm_gem_object_reference(exp_info->priv);
+ drm_gem_object_get(exp_info->priv);
return dma_buf;
}
@@ -339,7 +339,7 @@ void drm_gem_dmabuf_release(struct dma_buf *dma_buf)
struct drm_device *dev = obj->dev;
/* drop the reference on the export fd holds */
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
drm_dev_unref(dev);
}
@@ -585,7 +585,7 @@ out_have_handle:
fail_put_dmabuf:
dma_buf_put(dmabuf);
out:
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
out_unlock:
mutex_unlock(&file_priv->prime.lock);
@@ -616,7 +616,7 @@ struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev,
* Importing dmabuf exported from out own gem increases
* refcount on gem itself instead of f_count of dmabuf.
*/
- drm_gem_object_reference(obj);
+ drm_gem_object_get(obj);
return obj;
}
}
@@ -704,7 +704,7 @@ int drm_gem_prime_fd_to_handle(struct drm_device *dev,
/* _handle_create_tail unconditionally unlocks dev->object_name_lock. */
ret = drm_gem_handle_create_tail(file_priv, obj, handle);
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
if (ret)
goto out_put;