diff options
author | Jani Nikula <jani.nikula@intel.com> | 2021-04-08 10:22:14 +0300 |
---|---|---|
committer | Jani Nikula <jani.nikula@intel.com> | 2021-04-08 10:22:14 +0300 |
commit | d527353e4bceae6d59606ad21ad00916401d22ad (patch) | |
tree | 8ad569177aa89f15dcc485ee31d671cc98344ba8 /drivers/gpu/drm/qxl/qxl_release.c | |
parent | 70bfb30743d5da73058b0a2271e9c127a84fb494 (diff) | |
parent | 9c0fed84d5750e1eea6c664e073ffa2534a17743 (diff) |
Merge drm/drm-next into drm-intel-next
Sync up with topic/i915-gem-next and drm-intel-gt-next.
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
Diffstat (limited to 'drivers/gpu/drm/qxl/qxl_release.c')
-rw-r--r-- | drivers/gpu/drm/qxl/qxl_release.c | 89 |
1 files changed, 31 insertions, 58 deletions
diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c index 0fcfc952d5e9..b19f2f00b215 100644 --- a/drivers/gpu/drm/qxl/qxl_release.c +++ b/drivers/gpu/drm/qxl/qxl_release.c @@ -58,56 +58,16 @@ static long qxl_fence_wait(struct dma_fence *fence, bool intr, signed long timeout) { struct qxl_device *qdev; - struct qxl_release *release; - int count = 0, sc = 0; - bool have_drawable_releases; unsigned long cur, end = jiffies + timeout; qdev = container_of(fence->lock, struct qxl_device, release_lock); - release = container_of(fence, struct qxl_release, base); - have_drawable_releases = release->type == QXL_RELEASE_DRAWABLE; - -retry: - sc++; - - if (dma_fence_is_signaled(fence)) - goto signaled; - - qxl_io_notify_oom(qdev); - - for (count = 0; count < 11; count++) { - if (!qxl_queue_garbage_collect(qdev, true)) - break; - if (dma_fence_is_signaled(fence)) - goto signaled; - } - - if (dma_fence_is_signaled(fence)) - goto signaled; - - if (have_drawable_releases || sc < 4) { - if (sc > 2) - /* back off */ - usleep_range(500, 1000); - - if (time_after(jiffies, end)) - return 0; - - if (have_drawable_releases && sc > 300) { - DMA_FENCE_WARN(fence, "failed to wait on release %llu " - "after spincount %d\n", - fence->context & ~0xf0000000, sc); - goto signaled; - } - goto retry; - } - /* - * yeah, original sync_obj_wait gave up after 3 spins when - * have_drawable_releases is not set. - */ + if (!wait_event_timeout(qdev->release_event, + (dma_fence_is_signaled(fence) || + (qxl_io_notify_oom(qdev), 0)), + timeout)) + return 0; -signaled: cur = jiffies; if (time_after(cur, end)) return 0; @@ -196,14 +156,16 @@ qxl_release_free(struct qxl_device *qdev, qxl_release_free_list(release); kfree(release); } + atomic_dec(&qdev->release_count); } static int qxl_release_bo_alloc(struct qxl_device *qdev, - struct qxl_bo **bo) + struct qxl_bo **bo, + u32 priority) { /* pin releases bo's they are too messy to evict */ return qxl_bo_create(qdev, PAGE_SIZE, false, true, - QXL_GEM_DOMAIN_VRAM, NULL, bo); + QXL_GEM_DOMAIN_VRAM, priority, NULL, bo); } int qxl_release_list_add(struct qxl_release *release, struct qxl_bo *bo) @@ -321,18 +283,23 @@ int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size, int type, struct qxl_release **release, struct qxl_bo **rbo) { - struct qxl_bo *bo; + struct qxl_bo *bo, *free_bo = NULL; int idr_ret; int ret = 0; union qxl_release_info *info; int cur_idx; + u32 priority; - if (type == QXL_RELEASE_DRAWABLE) + if (type == QXL_RELEASE_DRAWABLE) { cur_idx = 0; - else if (type == QXL_RELEASE_SURFACE_CMD) + priority = 0; + } else if (type == QXL_RELEASE_SURFACE_CMD) { cur_idx = 1; - else if (type == QXL_RELEASE_CURSOR_CMD) + priority = 1; + } else if (type == QXL_RELEASE_CURSOR_CMD) { cur_idx = 2; + priority = 1; + } else { DRM_ERROR("got illegal type: %d\n", type); return -EINVAL; @@ -344,17 +311,22 @@ int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size, *rbo = NULL; return idr_ret; } + atomic_inc(&qdev->release_count); mutex_lock(&qdev->release_mutex); if (qdev->current_release_bo_offset[cur_idx] + 1 >= releases_per_bo[cur_idx]) { - qxl_bo_unref(&qdev->current_release_bo[cur_idx]); + free_bo = qdev->current_release_bo[cur_idx]; qdev->current_release_bo_offset[cur_idx] = 0; qdev->current_release_bo[cur_idx] = NULL; } if (!qdev->current_release_bo[cur_idx]) { - ret = qxl_release_bo_alloc(qdev, &qdev->current_release_bo[cur_idx]); + ret = qxl_release_bo_alloc(qdev, &qdev->current_release_bo[cur_idx], priority); if (ret) { mutex_unlock(&qdev->release_mutex); + if (free_bo) { + qxl_bo_unpin(free_bo); + qxl_bo_unref(&free_bo); + } qxl_release_free(qdev, *release); return ret; } @@ -370,6 +342,10 @@ int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size, *rbo = bo; mutex_unlock(&qdev->release_mutex); + if (free_bo) { + qxl_bo_unpin(free_bo); + qxl_bo_unref(&free_bo); + } ret = qxl_release_list_add(*release, bo); qxl_bo_unref(&bo); @@ -429,7 +405,7 @@ void qxl_release_unmap(struct qxl_device *qdev, void qxl_release_fence_buffer_objects(struct qxl_release *release) { struct ttm_buffer_object *bo; - struct ttm_bo_device *bdev; + struct ttm_device *bdev; struct ttm_validate_buffer *entry; struct qxl_device *qdev; @@ -450,16 +426,13 @@ void qxl_release_fence_buffer_objects(struct qxl_release *release) release->id | 0xf0000000, release->base.seqno); trace_dma_fence_emit(&release->base); - spin_lock(&ttm_bo_glob.lru_lock); - list_for_each_entry(entry, &release->bos, head) { bo = entry->bo; dma_resv_add_shared_fence(bo->base.resv, &release->base); - ttm_bo_move_to_lru_tail(bo, &bo->mem, NULL); + ttm_bo_move_to_lru_tail_unlocked(bo); dma_resv_unlock(bo->base.resv); } - spin_unlock(&ttm_bo_glob.lru_lock); ww_acquire_fini(&release->ticket); } |