summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/ttm/ttm_execbuf_util.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/ttm/ttm_execbuf_util.c')
-rw-r--r--drivers/gpu/drm/ttm/ttm_execbuf_util.c96
1 files changed, 22 insertions, 74 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
index 93860346c426..bc7a83a9fe44 100644
--- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c
+++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
@@ -26,12 +26,10 @@
*
**************************************************************************/
+#include <linux/export.h>
+
#include <drm/ttm/ttm_execbuf_util.h>
-#include <drm/ttm/ttm_bo_driver.h>
-#include <drm/ttm/ttm_placement.h>
-#include <linux/wait.h>
-#include <linux/sched.h>
-#include <linux/module.h>
+#include <drm/ttm/ttm_bo.h>
static void ttm_eu_backoff_reservation_reverse(struct list_head *list,
struct ttm_validate_buffer *entry)
@@ -39,17 +37,7 @@ static void ttm_eu_backoff_reservation_reverse(struct list_head *list,
list_for_each_entry_continue_reverse(entry, list, head) {
struct ttm_buffer_object *bo = entry->bo;
- reservation_object_unlock(bo->resv);
- }
-}
-
-static void ttm_eu_del_from_lru_locked(struct list_head *list)
-{
- struct ttm_validate_buffer *entry;
-
- list_for_each_entry(entry, list, head) {
- struct ttm_buffer_object *bo = entry->bo;
- ttm_bo_del_from_lru(bo);
+ dma_resv_unlock(bo->base.resv);
}
}
@@ -57,22 +45,16 @@ void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
struct list_head *list)
{
struct ttm_validate_buffer *entry;
- struct ttm_bo_global *glob;
if (list_empty(list))
return;
- entry = list_first_entry(list, struct ttm_validate_buffer, head);
- glob = entry->bo->bdev->glob;
-
- spin_lock(&glob->lru_lock);
list_for_each_entry(entry, list, head) {
struct ttm_buffer_object *bo = entry->bo;
- ttm_bo_add_to_lru(bo);
- reservation_object_unlock(bo->resv);
+ ttm_bo_move_to_lru_tail_unlocked(bo);
+ dma_resv_unlock(bo->base.resv);
}
- spin_unlock(&glob->lru_lock);
if (ticket)
ww_acquire_fini(ticket);
@@ -95,29 +77,21 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
struct list_head *list, bool intr,
struct list_head *dups)
{
- struct ttm_bo_global *glob;
struct ttm_validate_buffer *entry;
int ret;
if (list_empty(list))
return 0;
- entry = list_first_entry(list, struct ttm_validate_buffer, head);
- glob = entry->bo->bdev->glob;
-
if (ticket)
ww_acquire_init(ticket, &reservation_ww_class);
list_for_each_entry(entry, list, head) {
struct ttm_buffer_object *bo = entry->bo;
+ unsigned int num_fences;
- ret = __ttm_bo_reserve(bo, intr, (ticket == NULL), ticket);
- if (!ret && unlikely(atomic_read(&bo->cpu_writers) > 0)) {
- reservation_object_unlock(bo->resv);
-
- ret = -EBUSY;
-
- } else if (ret == -EALREADY && dups) {
+ ret = ttm_bo_reserve(bo, intr, (ticket == NULL), ticket);
+ if (ret == -EALREADY && dups) {
struct ttm_validate_buffer *safe = entry;
entry = list_prev_entry(entry, head);
list_del(&safe->head);
@@ -125,12 +99,10 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
continue;
}
+ num_fences = max(entry->num_shared, 1u);
if (!ret) {
- if (!entry->num_shared)
- continue;
-
- ret = reservation_object_reserve_shared(bo->resv,
- entry->num_shared);
+ ret = dma_resv_reserve_fences(bo->base.resv,
+ num_fences);
if (!ret)
continue;
}
@@ -142,22 +114,14 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
ttm_eu_backoff_reservation_reverse(list, entry);
if (ret == -EDEADLK) {
- if (intr) {
- ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock,
- ticket);
- } else {
- ww_mutex_lock_slow(&bo->resv->lock, ticket);
- ret = 0;
- }
+ ret = ttm_bo_reserve_slowpath(bo, intr, ticket);
}
- if (!ret && entry->num_shared)
- ret = reservation_object_reserve_shared(bo->resv,
- entry->num_shared);
+ if (!ret)
+ ret = dma_resv_reserve_fences(bo->base.resv,
+ num_fences);
if (unlikely(ret != 0)) {
- if (ret == -EINTR)
- ret = -ERESTARTSYS;
if (ticket) {
ww_acquire_done(ticket);
ww_acquire_fini(ticket);
@@ -172,11 +136,6 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
list_add(&entry->head, list);
}
- if (ticket)
- ww_acquire_done(ticket);
- spin_lock(&glob->lru_lock);
- ttm_eu_del_from_lru_locked(list);
- spin_unlock(&glob->lru_lock);
return 0;
}
EXPORT_SYMBOL(ttm_eu_reserve_buffers);
@@ -186,29 +145,18 @@ void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
struct dma_fence *fence)
{
struct ttm_validate_buffer *entry;
- struct ttm_buffer_object *bo;
- struct ttm_bo_global *glob;
- struct ttm_bo_device *bdev;
if (list_empty(list))
return;
- bo = list_first_entry(list, struct ttm_validate_buffer, head)->bo;
- bdev = bo->bdev;
- glob = bo->bdev->glob;
-
- spin_lock(&glob->lru_lock);
-
list_for_each_entry(entry, list, head) {
- bo = entry->bo;
- if (entry->num_shared)
- reservation_object_add_shared_fence(bo->resv, fence);
- else
- reservation_object_add_excl_fence(bo->resv, fence);
- ttm_bo_add_to_lru(bo);
- reservation_object_unlock(bo->resv);
+ struct ttm_buffer_object *bo = entry->bo;
+
+ dma_resv_add_fence(bo->base.resv, fence, entry->num_shared ?
+ DMA_RESV_USAGE_READ : DMA_RESV_USAGE_WRITE);
+ ttm_bo_move_to_lru_tail_unlocked(bo);
+ dma_resv_unlock(bo->base.resv);
}
- spin_unlock(&glob->lru_lock);
if (ticket)
ww_acquire_fini(ticket);
}