summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/qxl/qxl_release.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/qxl/qxl_release.c')
-rw-r--r--drivers/gpu/drm/qxl/qxl_release.c179
1 files changed, 79 insertions, 100 deletions
diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c
index 30f85f0130cb..7b3c9a6016db 100644
--- a/drivers/gpu/drm/qxl/qxl_release.c
+++ b/drivers/gpu/drm/qxl/qxl_release.c
@@ -19,9 +19,15 @@
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
+
+#include <linux/delay.h>
+
+#include <drm/drm_print.h>
+
+#include <trace/events/dma_fence.h>
+
#include "qxl_drv.h"
#include "qxl_object.h"
-#include <trace/events/dma_fence.h>
/*
* drawable cmd cache - allocate a bunch of VRAM pages, suballocate
@@ -32,10 +38,10 @@
/* manage releaseables */
/* stack them 16 high for now -drawable object is 191 */
#define RELEASE_SIZE 256
-#define RELEASES_PER_BO (4096 / RELEASE_SIZE)
+#define RELEASES_PER_BO (PAGE_SIZE / RELEASE_SIZE)
/* put an alloc/dealloc surface cmd into one bo and round up to 128 */
#define SURFACE_RELEASE_SIZE 128
-#define SURFACE_RELEASES_PER_BO (4096 / SURFACE_RELEASE_SIZE)
+#define SURFACE_RELEASES_PER_BO (PAGE_SIZE / SURFACE_RELEASE_SIZE)
static const int release_size_per_bo[] = { RELEASE_SIZE, SURFACE_RELEASE_SIZE, RELEASE_SIZE };
static const int releases_per_bo[] = { RELEASES_PER_BO, SURFACE_RELEASES_PER_BO, RELEASES_PER_BO };
@@ -54,56 +60,16 @@ static long qxl_fence_wait(struct dma_fence *fence, bool intr,
signed long timeout)
{
struct qxl_device *qdev;
- struct qxl_release *release;
- int count = 0, sc = 0;
- bool have_drawable_releases;
unsigned long cur, end = jiffies + timeout;
qdev = container_of(fence->lock, struct qxl_device, release_lock);
- release = container_of(fence, struct qxl_release, base);
- have_drawable_releases = release->type == QXL_RELEASE_DRAWABLE;
-
-retry:
- sc++;
-
- if (dma_fence_is_signaled(fence))
- goto signaled;
-
- qxl_io_notify_oom(qdev);
-
- for (count = 0; count < 11; count++) {
- if (!qxl_queue_garbage_collect(qdev, true))
- break;
- if (dma_fence_is_signaled(fence))
- goto signaled;
- }
-
- if (dma_fence_is_signaled(fence))
- goto signaled;
-
- if (have_drawable_releases || sc < 4) {
- if (sc > 2)
- /* back off */
- usleep_range(500, 1000);
-
- if (time_after(jiffies, end))
- return 0;
-
- if (have_drawable_releases && sc > 300) {
- DMA_FENCE_WARN(fence, "failed to wait on release %llu "
- "after spincount %d\n",
- fence->context & ~0xf0000000, sc);
- goto signaled;
- }
- goto retry;
- }
- /*
- * yeah, original sync_obj_wait gave up after 3 spins when
- * have_drawable_releases is not set.
- */
+ if (!wait_event_timeout(qdev->release_event,
+ (dma_fence_is_signaled(fence) ||
+ (qxl_io_notify_oom(qdev), 0)),
+ timeout))
+ return 0;
-signaled:
cur = jiffies;
if (time_after(cur, end))
return 0;
@@ -157,13 +123,11 @@ qxl_release_free_list(struct qxl_release *release)
{
while (!list_empty(&release->bos)) {
struct qxl_bo_list *entry;
- struct qxl_bo *bo;
entry = container_of(release->bos.next,
- struct qxl_bo_list, tv.head);
- bo = to_qxl_bo(entry->tv.bo);
- qxl_bo_unref(&bo);
- list_del(&entry->tv.head);
+ struct qxl_bo_list, list);
+ qxl_bo_unref(&entry->bo);
+ list_del(&entry->list);
kfree(entry);
}
release->release_bo = NULL;
@@ -192,22 +156,24 @@ qxl_release_free(struct qxl_device *qdev,
qxl_release_free_list(release);
kfree(release);
}
+ atomic_dec(&qdev->release_count);
}
static int qxl_release_bo_alloc(struct qxl_device *qdev,
- struct qxl_bo **bo)
+ struct qxl_bo **bo,
+ u32 priority)
{
/* pin releases bo's they are too messy to evict */
return qxl_bo_create(qdev, PAGE_SIZE, false, true,
- QXL_GEM_DOMAIN_VRAM, NULL, bo);
+ QXL_GEM_DOMAIN_VRAM, priority, NULL, bo);
}
int qxl_release_list_add(struct qxl_release *release, struct qxl_bo *bo)
{
struct qxl_bo_list *entry;
- list_for_each_entry(entry, &release->bos, tv.head) {
- if (entry->tv.bo == &bo->tbo)
+ list_for_each_entry(entry, &release->bos, list) {
+ if (entry->bo == bo)
return 0;
}
@@ -216,9 +182,8 @@ int qxl_release_list_add(struct qxl_release *release, struct qxl_bo *bo)
return -ENOMEM;
qxl_bo_ref(bo);
- entry->tv.bo = &bo->tbo;
- entry->tv.num_shared = 0;
- list_add_tail(&entry->tv.head, &release->bos);
+ entry->bo = bo;
+ list_add_tail(&entry->list, &release->bos);
return 0;
}
@@ -227,19 +192,19 @@ static int qxl_release_validate_bo(struct qxl_bo *bo)
struct ttm_operation_ctx ctx = { true, false };
int ret;
- if (!bo->pin_count) {
- qxl_ttm_placement_from_domain(bo, bo->type, false);
+ if (!bo->tbo.pin_count) {
+ qxl_ttm_placement_from_domain(bo, bo->type);
ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
if (ret)
return ret;
}
- ret = reservation_object_reserve_shared(bo->tbo.resv, 1);
+ ret = dma_resv_reserve_fences(bo->tbo.base.resv, 1);
if (ret)
return ret;
/* allocate a surface for reserved + validated buffers */
- ret = qxl_bo_check_id(bo->gem_base.dev->dev_private, bo);
+ ret = qxl_bo_check_id(to_qxl(bo->tbo.base.dev), bo);
if (ret)
return ret;
return 0;
@@ -255,21 +220,28 @@ int qxl_release_reserve_list(struct qxl_release *release, bool no_intr)
if (list_is_singular(&release->bos))
return 0;
- ret = ttm_eu_reserve_buffers(&release->ticket, &release->bos,
- !no_intr, NULL);
- if (ret)
- return ret;
-
- list_for_each_entry(entry, &release->bos, tv.head) {
- struct qxl_bo *bo = to_qxl_bo(entry->tv.bo);
-
- ret = qxl_release_validate_bo(bo);
- if (ret) {
- ttm_eu_backoff_reservation(&release->ticket, &release->bos);
- return ret;
+ drm_exec_init(&release->exec, no_intr ? 0 :
+ DRM_EXEC_INTERRUPTIBLE_WAIT, 0);
+ drm_exec_until_all_locked(&release->exec) {
+ list_for_each_entry(entry, &release->bos, list) {
+ ret = drm_exec_prepare_obj(&release->exec,
+ &entry->bo->tbo.base,
+ 1);
+ drm_exec_retry_on_contention(&release->exec);
+ if (ret)
+ goto error;
}
}
+
+ list_for_each_entry(entry, &release->bos, list) {
+ ret = qxl_release_validate_bo(entry->bo);
+ if (ret)
+ goto error;
+ }
return 0;
+error:
+ drm_exec_fini(&release->exec);
+ return ret;
}
void qxl_release_backoff_reserve_list(struct qxl_release *release)
@@ -279,7 +251,7 @@ void qxl_release_backoff_reserve_list(struct qxl_release *release)
if (list_is_singular(&release->bos))
return;
- ttm_eu_backoff_reservation(&release->ticket, &release->bos);
+ drm_exec_fini(&release->exec);
}
int qxl_alloc_surface_release_reserved(struct qxl_device *qdev,
@@ -317,18 +289,23 @@ int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size,
int type, struct qxl_release **release,
struct qxl_bo **rbo)
{
- struct qxl_bo *bo;
+ struct qxl_bo *bo, *free_bo = NULL;
int idr_ret;
int ret = 0;
union qxl_release_info *info;
int cur_idx;
+ u32 priority;
- if (type == QXL_RELEASE_DRAWABLE)
+ if (type == QXL_RELEASE_DRAWABLE) {
cur_idx = 0;
- else if (type == QXL_RELEASE_SURFACE_CMD)
+ priority = 0;
+ } else if (type == QXL_RELEASE_SURFACE_CMD) {
cur_idx = 1;
- else if (type == QXL_RELEASE_CURSOR_CMD)
+ priority = 1;
+ } else if (type == QXL_RELEASE_CURSOR_CMD) {
cur_idx = 2;
+ priority = 1;
+ }
else {
DRM_ERROR("got illegal type: %d\n", type);
return -EINVAL;
@@ -340,17 +317,22 @@ int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size,
*rbo = NULL;
return idr_ret;
}
+ atomic_inc(&qdev->release_count);
mutex_lock(&qdev->release_mutex);
if (qdev->current_release_bo_offset[cur_idx] + 1 >= releases_per_bo[cur_idx]) {
- qxl_bo_unref(&qdev->current_release_bo[cur_idx]);
+ free_bo = qdev->current_release_bo[cur_idx];
qdev->current_release_bo_offset[cur_idx] = 0;
qdev->current_release_bo[cur_idx] = NULL;
}
if (!qdev->current_release_bo[cur_idx]) {
- ret = qxl_release_bo_alloc(qdev, &qdev->current_release_bo[cur_idx]);
+ ret = qxl_release_bo_alloc(qdev, &qdev->current_release_bo[cur_idx], priority);
if (ret) {
mutex_unlock(&qdev->release_mutex);
+ if (free_bo) {
+ qxl_bo_unpin(free_bo);
+ qxl_bo_unref(&free_bo);
+ }
qxl_release_free(qdev, *release);
return ret;
}
@@ -366,6 +348,10 @@ int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size,
*rbo = bo;
mutex_unlock(&qdev->release_mutex);
+ if (free_bo) {
+ qxl_bo_unpin(free_bo);
+ qxl_bo_unref(&free_bo);
+ }
ret = qxl_release_list_add(*release, bo);
qxl_bo_unref(&bo);
@@ -424,19 +410,18 @@ void qxl_release_unmap(struct qxl_device *qdev,
void qxl_release_fence_buffer_objects(struct qxl_release *release)
{
- struct ttm_buffer_object *bo;
- struct ttm_bo_global *glob;
- struct ttm_bo_device *bdev;
- struct ttm_validate_buffer *entry;
+ struct ttm_device *bdev;
+ struct qxl_bo_list *entry;
struct qxl_device *qdev;
+ struct qxl_bo *bo;
/* if only one object on the release its the release itself
since these objects are pinned no need to reserve */
if (list_is_singular(&release->bos) || list_empty(&release->bos))
return;
- bo = list_first_entry(&release->bos, struct ttm_validate_buffer, head)->bo;
- bdev = bo->bdev;
+ bo = list_first_entry(&release->bos, struct qxl_bo_list, list)->bo;
+ bdev = bo->tbo.bdev;
qdev = container_of(bdev, struct qxl_device, mman.bdev);
/*
@@ -447,18 +432,12 @@ void qxl_release_fence_buffer_objects(struct qxl_release *release)
release->id | 0xf0000000, release->base.seqno);
trace_dma_fence_emit(&release->base);
- glob = bdev->glob;
-
- spin_lock(&glob->lru_lock);
-
- list_for_each_entry(entry, &release->bos, head) {
+ list_for_each_entry(entry, &release->bos, list) {
bo = entry->bo;
- reservation_object_add_shared_fence(bo->resv, &release->base);
- ttm_bo_add_to_lru(bo);
- reservation_object_unlock(bo->resv);
+ dma_resv_add_fence(bo->tbo.base.resv, &release->base,
+ DMA_RESV_USAGE_READ);
+ ttm_bo_move_to_lru_tail_unlocked(&bo->tbo);
}
- spin_unlock(&glob->lru_lock);
- ww_acquire_fini(&release->ticket);
+ drm_exec_fini(&release->exec);
}
-