diff options
Diffstat (limited to 'drivers/gpu/drm/vmwgfx/ttm_object.c')
| -rw-r--r-- | drivers/gpu/drm/vmwgfx/ttm_object.c | 327 |
1 files changed, 100 insertions, 227 deletions
diff --git a/drivers/gpu/drm/vmwgfx/ttm_object.c b/drivers/gpu/drm/vmwgfx/ttm_object.c index 36990b80e790..36d46b79562a 100644 --- a/drivers/gpu/drm/vmwgfx/ttm_object.c +++ b/drivers/gpu/drm/vmwgfx/ttm_object.c @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0 OR MIT */ /************************************************************************** * - * Copyright (c) 2009-2013 VMware, Inc., Palo Alto, CA., USA + * Copyright (c) 2009-2023 VMware, Inc., Palo Alto, CA., USA * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a @@ -42,6 +42,22 @@ */ +#define pr_fmt(fmt) "[TTM] " fmt + +#include "ttm_object.h" +#include "vmwgfx_drv.h" + +#include <linux/list.h> +#include <linux/spinlock.h> +#include <linux/slab.h> +#include <linux/atomic.h> +#include <linux/module.h> +#include <linux/hashtable.h> + +MODULE_IMPORT_NS("DMA_BUF"); + +#define VMW_TTM_OBJECT_REF_HT_ORDER 10 + /** * struct ttm_object_file * @@ -55,49 +71,33 @@ * * @ref_hash: Hash tables of ref objects, one per ttm_ref_type, * for fast lookup of ref objects given a base object. + * + * @refcount: reference/usage count */ - -#define pr_fmt(fmt) "[TTM] " fmt - -#include <drm/ttm/ttm_module.h> -#include <linux/list.h> -#include <linux/spinlock.h> -#include <linux/slab.h> -#include <linux/atomic.h> -#include "ttm_object.h" - struct ttm_object_file { struct ttm_object_device *tdev; spinlock_t lock; struct list_head ref_list; - struct drm_open_hash ref_hash[TTM_REF_NUM]; + DECLARE_HASHTABLE(ref_hash, VMW_TTM_OBJECT_REF_HT_ORDER); struct kref refcount; }; -/** +/* * struct ttm_object_device * - * @object_lock: lock that protects the object_hash hash table. - * - * @object_hash: hash table for fast lookup of object global names. - * - * @object_count: Per device object count. + * @object_lock: lock that protects idr. * * This is the per-device data structure needed for ttm object management. */ struct ttm_object_device { spinlock_t object_lock; - struct drm_open_hash object_hash; - atomic_t object_count; - struct ttm_mem_global *mem_glob; struct dma_buf_ops ops; void (*dmabuf_release)(struct dma_buf *dma_buf); - size_t dma_buf_size; struct idr idr; }; -/** +/* * struct ttm_ref_object * * @hash: Hash entry for the per-file object reference hash. @@ -120,10 +120,9 @@ struct ttm_object_device { struct ttm_ref_object { struct rcu_head rcu_head; - struct drm_hash_item hash; + struct vmwgfx_hash_item hash; struct list_head head; struct kref kref; - enum ttm_ref_type ref_type; struct ttm_base_object *obj; struct ttm_object_file *tfile; }; @@ -137,6 +136,36 @@ ttm_object_file_ref(struct ttm_object_file *tfile) return tfile; } +static int ttm_tfile_find_ref_rcu(struct ttm_object_file *tfile, + uint64_t key, + struct vmwgfx_hash_item **p_hash) +{ + struct vmwgfx_hash_item *hash; + + hash_for_each_possible_rcu(tfile->ref_hash, hash, head, key) { + if (hash->key == key) { + *p_hash = hash; + return 0; + } + } + return -EINVAL; +} + +static int ttm_tfile_find_ref(struct ttm_object_file *tfile, + uint64_t key, + struct vmwgfx_hash_item **p_hash) +{ + struct vmwgfx_hash_item *hash; + + hash_for_each_possible(tfile->ref_hash, hash, head, key) { + if (hash->key == key) { + *p_hash = hash; + return 0; + } + } + return -EINVAL; +} + static void ttm_object_file_destroy(struct kref *kref) { struct ttm_object_file *tfile = @@ -159,9 +188,7 @@ int ttm_base_object_init(struct ttm_object_file *tfile, struct ttm_base_object *base, bool shareable, enum ttm_object_type object_type, - void (*refcount_release) (struct ttm_base_object **), - void (*ref_obj_release) (struct ttm_base_object *, - enum ttm_ref_type ref_type)) + void (*refcount_release) (struct ttm_base_object **)) { struct ttm_object_device *tdev = tfile->tdev; int ret; @@ -169,19 +196,18 @@ int ttm_base_object_init(struct ttm_object_file *tfile, base->shareable = shareable; base->tfile = ttm_object_file_ref(tfile); base->refcount_release = refcount_release; - base->ref_obj_release = ref_obj_release; base->object_type = object_type; kref_init(&base->refcount); idr_preload(GFP_KERNEL); spin_lock(&tdev->object_lock); - ret = idr_alloc(&tdev->idr, base, 0, 0, GFP_NOWAIT); + ret = idr_alloc(&tdev->idr, base, 1, 0, GFP_NOWAIT); spin_unlock(&tdev->object_lock); idr_preload_end(); if (ret < 0) return ret; base->handle = ret; - ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL, false); + ret = ttm_ref_object_add(tfile, base, NULL, false); if (unlikely(ret != 0)) goto out_err1; @@ -225,64 +251,29 @@ void ttm_base_object_unref(struct ttm_base_object **p_base) kref_put(&base->refcount, ttm_release_base); } -/** - * ttm_base_object_noref_lookup - look up a base object without reference - * @tfile: The struct ttm_object_file the object is registered with. - * @key: The object handle. - * - * This function looks up a ttm base object and returns a pointer to it - * without refcounting the pointer. The returned pointer is only valid - * until ttm_base_object_noref_release() is called, and the object - * pointed to by the returned pointer may be doomed. Any persistent usage - * of the object requires a refcount to be taken using kref_get_unless_zero(). - * Iff this function returns successfully it needs to be paired with - * ttm_base_object_noref_release() and no sleeping- or scheduling functions - * may be called inbetween these function callse. - * - * Return: A pointer to the object if successful or NULL otherwise. - */ -struct ttm_base_object * -ttm_base_object_noref_lookup(struct ttm_object_file *tfile, uint32_t key) -{ - struct drm_hash_item *hash; - struct drm_open_hash *ht = &tfile->ref_hash[TTM_REF_USAGE]; - int ret; - - rcu_read_lock(); - ret = drm_ht_find_item_rcu(ht, key, &hash); - if (ret) { - rcu_read_unlock(); - return NULL; - } - - __release(RCU); - return drm_hash_entry(hash, struct ttm_ref_object, hash)->obj; -} -EXPORT_SYMBOL(ttm_base_object_noref_lookup); - struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile, - uint32_t key) + uint64_t key) { struct ttm_base_object *base = NULL; - struct drm_hash_item *hash; - struct drm_open_hash *ht = &tfile->ref_hash[TTM_REF_USAGE]; + struct vmwgfx_hash_item *hash; int ret; - rcu_read_lock(); - ret = drm_ht_find_item_rcu(ht, key, &hash); + spin_lock(&tfile->lock); + ret = ttm_tfile_find_ref(tfile, key, &hash); if (likely(ret == 0)) { - base = drm_hash_entry(hash, struct ttm_ref_object, hash)->obj; + base = hlist_entry(hash, struct ttm_ref_object, hash)->obj; if (!kref_get_unless_zero(&base->refcount)) base = NULL; } - rcu_read_unlock(); + spin_unlock(&tfile->lock); + return base; } struct ttm_base_object * -ttm_base_object_lookup_for_ref(struct ttm_object_device *tdev, uint32_t key) +ttm_base_object_lookup_for_ref(struct ttm_object_device *tdev, uint64_t key) { struct ttm_base_object *base; @@ -296,64 +287,13 @@ ttm_base_object_lookup_for_ref(struct ttm_object_device *tdev, uint32_t key) return base; } -/** - * ttm_ref_object_exists - Check whether a caller has a valid ref object - * (has opened) a base object. - * - * @tfile: Pointer to a struct ttm_object_file identifying the caller. - * @base: Pointer to a struct base object. - * - * Checks wether the caller identified by @tfile has put a valid USAGE - * reference object on the base object identified by @base. - */ -bool ttm_ref_object_exists(struct ttm_object_file *tfile, - struct ttm_base_object *base) -{ - struct drm_open_hash *ht = &tfile->ref_hash[TTM_REF_USAGE]; - struct drm_hash_item *hash; - struct ttm_ref_object *ref; - - rcu_read_lock(); - if (unlikely(drm_ht_find_item_rcu(ht, base->handle, &hash) != 0)) - goto out_false; - - /* - * Verify that the ref object is really pointing to our base object. - * Our base object could actually be dead, and the ref object pointing - * to another base object with the same handle. - */ - ref = drm_hash_entry(hash, struct ttm_ref_object, hash); - if (unlikely(base != ref->obj)) - goto out_false; - - /* - * Verify that the ref->obj pointer was actually valid! - */ - rmb(); - if (unlikely(kref_read(&ref->kref) == 0)) - goto out_false; - - rcu_read_unlock(); - return true; - - out_false: - rcu_read_unlock(); - return false; -} - int ttm_ref_object_add(struct ttm_object_file *tfile, struct ttm_base_object *base, - enum ttm_ref_type ref_type, bool *existed, + bool *existed, bool require_existed) { - struct drm_open_hash *ht = &tfile->ref_hash[ref_type]; struct ttm_ref_object *ref; - struct drm_hash_item *hash; - struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob; - struct ttm_operation_ctx ctx = { - .interruptible = false, - .no_wait_gpu = false - }; + struct vmwgfx_hash_item *hash; int ret = -EINVAL; if (base->tfile != tfile && !base->shareable) @@ -364,10 +304,10 @@ int ttm_ref_object_add(struct ttm_object_file *tfile, while (ret == -EINVAL) { rcu_read_lock(); - ret = drm_ht_find_item_rcu(ht, base->handle, &hash); + ret = ttm_tfile_find_ref_rcu(tfile, base->handle, &hash); if (ret == 0) { - ref = drm_hash_entry(hash, struct ttm_ref_object, hash); + ref = hlist_entry(hash, struct ttm_ref_object, hash); if (kref_get_unless_zero(&ref->kref)) { rcu_read_unlock(); break; @@ -378,39 +318,25 @@ int ttm_ref_object_add(struct ttm_object_file *tfile, if (require_existed) return -EPERM; - ret = ttm_mem_global_alloc(mem_glob, sizeof(*ref), - &ctx); - if (unlikely(ret != 0)) - return ret; ref = kmalloc(sizeof(*ref), GFP_KERNEL); if (unlikely(ref == NULL)) { - ttm_mem_global_free(mem_glob, sizeof(*ref)); return -ENOMEM; } ref->hash.key = base->handle; ref->obj = base; ref->tfile = tfile; - ref->ref_type = ref_type; kref_init(&ref->kref); spin_lock(&tfile->lock); - ret = drm_ht_insert_item_rcu(ht, &ref->hash); - - if (likely(ret == 0)) { - list_add_tail(&ref->head, &tfile->ref_list); - kref_get(&base->refcount); - spin_unlock(&tfile->lock); - if (existed != NULL) - *existed = false; - break; - } + hash_add_rcu(tfile->ref_hash, &ref->hash.head, ref->hash.key); + ret = 0; + list_add_tail(&ref->head, &tfile->ref_list); + kref_get(&base->refcount); spin_unlock(&tfile->lock); - BUG_ON(ret != -EINVAL); - - ttm_mem_global_free(mem_glob, sizeof(*ref)); - kfree(ref); + if (existed != NULL) + *existed = false; } return ret; @@ -421,40 +347,31 @@ ttm_ref_object_release(struct kref *kref) { struct ttm_ref_object *ref = container_of(kref, struct ttm_ref_object, kref); - struct ttm_base_object *base = ref->obj; struct ttm_object_file *tfile = ref->tfile; - struct drm_open_hash *ht; - struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob; - ht = &tfile->ref_hash[ref->ref_type]; - (void)drm_ht_remove_item_rcu(ht, &ref->hash); + hash_del_rcu(&ref->hash.head); list_del(&ref->head); spin_unlock(&tfile->lock); - if (ref->ref_type != TTM_REF_USAGE && base->ref_obj_release) - base->ref_obj_release(base, ref->ref_type); - ttm_base_object_unref(&ref->obj); - ttm_mem_global_free(mem_glob, sizeof(*ref)); kfree_rcu(ref, rcu_head); spin_lock(&tfile->lock); } int ttm_ref_object_base_unref(struct ttm_object_file *tfile, - unsigned long key, enum ttm_ref_type ref_type) + unsigned long key) { - struct drm_open_hash *ht = &tfile->ref_hash[ref_type]; struct ttm_ref_object *ref; - struct drm_hash_item *hash; + struct vmwgfx_hash_item *hash; int ret; spin_lock(&tfile->lock); - ret = drm_ht_find_item(ht, key, &hash); + ret = ttm_tfile_find_ref(tfile, key, &hash); if (unlikely(ret != 0)) { spin_unlock(&tfile->lock); return -EINVAL; } - ref = drm_hash_entry(hash, struct ttm_ref_object, hash); + ref = hlist_entry(hash, struct ttm_ref_object, hash); kref_put(&ref->kref, ttm_ref_object_release); spin_unlock(&tfile->lock); return 0; @@ -464,7 +381,6 @@ void ttm_object_file_release(struct ttm_object_file **p_tfile) { struct ttm_ref_object *ref; struct list_head *list; - unsigned int i; struct ttm_object_file *tfile = *p_tfile; *p_tfile = NULL; @@ -482,19 +398,13 @@ void ttm_object_file_release(struct ttm_object_file **p_tfile) } spin_unlock(&tfile->lock); - for (i = 0; i < TTM_REF_NUM; ++i) - drm_ht_remove(&tfile->ref_hash[i]); ttm_object_file_unref(&tfile); } -struct ttm_object_file *ttm_object_file_init(struct ttm_object_device *tdev, - unsigned int hash_order) +struct ttm_object_file *ttm_object_file_init(struct ttm_object_device *tdev) { struct ttm_object_file *tfile = kmalloc(sizeof(*tfile), GFP_KERNEL); - unsigned int i; - unsigned int j = 0; - int ret; if (unlikely(tfile == NULL)) return NULL; @@ -504,53 +414,34 @@ struct ttm_object_file *ttm_object_file_init(struct ttm_object_device *tdev, kref_init(&tfile->refcount); INIT_LIST_HEAD(&tfile->ref_list); - for (i = 0; i < TTM_REF_NUM; ++i) { - ret = drm_ht_create(&tfile->ref_hash[i], hash_order); - if (ret) { - j = i; - goto out_err; - } - } + hash_init(tfile->ref_hash); return tfile; -out_err: - for (i = 0; i < j; ++i) - drm_ht_remove(&tfile->ref_hash[i]); - - kfree(tfile); - - return NULL; } struct ttm_object_device * -ttm_object_device_init(struct ttm_mem_global *mem_glob, - unsigned int hash_order, - const struct dma_buf_ops *ops) +ttm_object_device_init(const struct dma_buf_ops *ops) { struct ttm_object_device *tdev = kmalloc(sizeof(*tdev), GFP_KERNEL); - int ret; if (unlikely(tdev == NULL)) return NULL; - tdev->mem_glob = mem_glob; spin_lock_init(&tdev->object_lock); - atomic_set(&tdev->object_count, 0); - ret = drm_ht_create(&tdev->object_hash, hash_order); - if (ret != 0) - goto out_no_object_hash; - idr_init(&tdev->idr); + /* + * Our base is at VMWGFX_NUM_MOB + 1 because we want to create + * a seperate namespace for GEM handles (which are + * 1..VMWGFX_NUM_MOB) and the surface handles. Some ioctl's + * can take either handle as an argument so we want to + * easily be able to tell whether the handle refers to a + * GEM buffer or a surface. + */ + idr_init_base(&tdev->idr, VMWGFX_NUM_MOB + 1); tdev->ops = *ops; tdev->dmabuf_release = tdev->ops.release; tdev->ops.release = ttm_prime_dmabuf_release; - tdev->dma_buf_size = ttm_round_pot(sizeof(struct dma_buf)) + - ttm_round_pot(sizeof(struct file)); return tdev; - -out_no_object_hash: - kfree(tdev); - return NULL; } void ttm_object_device_release(struct ttm_object_device **p_tdev) @@ -561,7 +452,6 @@ void ttm_object_device_release(struct ttm_object_device **p_tdev) WARN_ON_ONCE(!idr_is_empty(&tdev->idr)); idr_destroy(&tdev->idr); - drm_ht_remove(&tdev->object_hash); kfree(tdev); } @@ -569,7 +459,7 @@ void ttm_object_device_release(struct ttm_object_device **p_tdev) /** * get_dma_buf_unless_doomed - get a dma_buf reference if possible. * - * @dma_buf: Non-refcounted pointer to a struct dma-buf. + * @dmabuf: Non-refcounted pointer to a struct dma-buf. * * Obtain a file reference from a lookup structure that doesn't refcount * the file, but synchronizes with its release method to make sure it has @@ -581,7 +471,7 @@ void ttm_object_device_release(struct ttm_object_device **p_tdev) */ static bool __must_check get_dma_buf_unless_doomed(struct dma_buf *dmabuf) { - return atomic_long_inc_not_zero(&dmabuf->file->f_count) != 0L; + return file_ref_get(&dmabuf->file->f_ref); } /** @@ -630,7 +520,6 @@ static void ttm_prime_dmabuf_release(struct dma_buf *dma_buf) if (prime->dma_buf == dma_buf) prime->dma_buf = NULL; mutex_unlock(&prime->mutex); - ttm_mem_global_free(tdev->mem_glob, tdev->dma_buf_size); ttm_base_object_unref(&base); } @@ -664,7 +553,7 @@ int ttm_prime_fd_to_handle(struct ttm_object_file *tfile, prime = (struct ttm_prime_object *) dma_buf->priv; base = &prime->base; *handle = base->handle; - ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL, false); + ret = ttm_ref_object_add(tfile, base, NULL, false); dma_buf_put(dma_buf); @@ -712,30 +601,18 @@ int ttm_prime_handle_to_fd(struct ttm_object_file *tfile, dma_buf = prime->dma_buf; if (!dma_buf || !get_dma_buf_unless_doomed(dma_buf)) { DEFINE_DMA_BUF_EXPORT_INFO(exp_info); - struct ttm_operation_ctx ctx = { - .interruptible = true, - .no_wait_gpu = false - }; exp_info.ops = &tdev->ops; exp_info.size = prime->size; exp_info.flags = flags; exp_info.priv = prime; /* - * Need to create a new dma_buf, with memory accounting. + * Need to create a new dma_buf */ - ret = ttm_mem_global_alloc(tdev->mem_glob, tdev->dma_buf_size, - &ctx); - if (unlikely(ret != 0)) { - mutex_unlock(&prime->mutex); - goto out_unref; - } dma_buf = dma_buf_export(&exp_info); if (IS_ERR(dma_buf)) { ret = PTR_ERR(dma_buf); - ttm_mem_global_free(tdev->mem_glob, - tdev->dma_buf_size); mutex_unlock(&prime->mutex); goto out_unref; } @@ -767,21 +644,18 @@ out_unref: * @tfile: struct ttm_object_file identifying the caller * @size: The size of the dma_bufs we export. * @prime: The object to be initialized. - * @shareable: See ttm_base_object_init * @type: See ttm_base_object_init * @refcount_release: See ttm_base_object_init - * @ref_obj_release: See ttm_base_object_init * * Initializes an object which is compatible with the drm_prime model * for data sharing between processes and devices. */ int ttm_prime_object_init(struct ttm_object_file *tfile, size_t size, - struct ttm_prime_object *prime, bool shareable, + struct ttm_prime_object *prime, enum ttm_object_type type, - void (*refcount_release) (struct ttm_base_object **), - void (*ref_obj_release) (struct ttm_base_object *, - enum ttm_ref_type ref_type)) + void (*refcount_release) (struct ttm_base_object **)) { + bool shareable = !!(type == VMW_RES_SURFACE); mutex_init(&prime->mutex); prime->size = PAGE_ALIGN(size); prime->real_type = type; @@ -789,6 +663,5 @@ int ttm_prime_object_init(struct ttm_object_file *tfile, size_t size, prime->refcount_release = refcount_release; return ttm_base_object_init(tfile, &prime->base, shareable, ttm_prime_type, - ttm_prime_refcount_release, - ref_obj_release); + ttm_prime_refcount_release); } |
