summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/vmwgfx/vmwgfx_resource.c')
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c376
1 files changed, 182 insertions, 194 deletions
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 7b45393ad98e..388011696941 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -1,7 +1,8 @@
// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
+ * Copyright (c) 2009-2024 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
@@ -27,9 +28,10 @@
#include <drm/ttm/ttm_placement.h>
-#include "vmwgfx_resource_priv.h"
#include "vmwgfx_binding.h"
+#include "vmwgfx_bo.h"
#include "vmwgfx_drv.h"
+#include "vmwgfx_resource_priv.h"
#define VMW_RES_EVICT_ERR_COUNT 10
@@ -39,10 +41,10 @@
*/
void vmw_resource_mob_attach(struct vmw_resource *res)
{
- struct vmw_buffer_object *backup = res->backup;
- struct rb_node **new = &backup->res_tree.rb_node, *parent = NULL;
+ struct vmw_bo *gbo = res->guest_memory_bo;
+ struct rb_node **new = &gbo->res_tree.rb_node, *parent = NULL;
- dma_resv_assert_held(res->backup->base.base.resv);
+ dma_resv_assert_held(gbo->tbo.base.resv);
res->used_prio = (res->res_dirty) ? res->func->dirty_prio :
res->func->prio;
@@ -51,14 +53,15 @@ void vmw_resource_mob_attach(struct vmw_resource *res)
container_of(*new, struct vmw_resource, mob_node);
parent = *new;
- new = (res->backup_offset < this->backup_offset) ?
+ new = (res->guest_memory_offset < this->guest_memory_offset) ?
&((*new)->rb_left) : &((*new)->rb_right);
}
rb_link_node(&res->mob_node, parent, new);
- rb_insert_color(&res->mob_node, &backup->res_tree);
+ rb_insert_color(&res->mob_node, &gbo->res_tree);
+ vmw_bo_del_detached_resource(gbo, res);
- vmw_bo_prio_add(backup, res->used_prio);
+ vmw_bo_prio_add(gbo, res->used_prio);
}
/**
@@ -67,13 +70,13 @@ void vmw_resource_mob_attach(struct vmw_resource *res)
*/
void vmw_resource_mob_detach(struct vmw_resource *res)
{
- struct vmw_buffer_object *backup = res->backup;
+ struct vmw_bo *gbo = res->guest_memory_bo;
- dma_resv_assert_held(backup->base.base.resv);
+ dma_resv_assert_held(gbo->tbo.base.resv);
if (vmw_resource_mob_attached(res)) {
- rb_erase(&res->mob_node, &backup->res_tree);
+ rb_erase(&res->mob_node, &gbo->res_tree);
RB_CLEAR_NODE(&res->mob_node);
- vmw_bo_prio_del(backup, res->used_prio);
+ vmw_bo_prio_del(gbo, res->used_prio);
}
}
@@ -114,15 +117,17 @@ static void vmw_resource_release(struct kref *kref)
container_of(kref, struct vmw_resource, kref);
struct vmw_private *dev_priv = res->dev_priv;
int id;
+ int ret;
struct idr *idr = &dev_priv->res_idr[res->func->res_type];
spin_lock(&dev_priv->resource_lock);
list_del_init(&res->lru_head);
spin_unlock(&dev_priv->resource_lock);
- if (res->backup) {
- struct ttm_buffer_object *bo = &res->backup->base;
+ if (res->guest_memory_bo) {
+ struct ttm_buffer_object *bo = &res->guest_memory_bo->tbo;
- ttm_bo_reserve(bo, false, false, NULL);
+ ret = ttm_bo_reserve(bo, false, false, NULL);
+ BUG_ON(ret);
if (vmw_resource_mob_attached(res) &&
res->func->unbind != NULL) {
struct ttm_validate_buffer val_buf;
@@ -131,14 +136,14 @@ static void vmw_resource_release(struct kref *kref)
val_buf.num_shared = 0;
res->func->unbind(res, false, &val_buf);
}
- res->backup_dirty = false;
+ res->guest_memory_size = false;
vmw_resource_mob_detach(res);
if (res->dirty)
res->func->dirty_free(res);
if (res->coherent)
- vmw_bo_dirty_release(res->backup);
+ vmw_bo_dirty_release(res->guest_memory_bo);
ttm_bo_unreserve(bo);
- vmw_bo_unreference(&res->backup);
+ vmw_user_bo_unref(&res->guest_memory_bo);
}
if (likely(res->hw_destroy != NULL)) {
@@ -221,9 +226,9 @@ int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res,
INIT_LIST_HEAD(&res->lru_head);
INIT_LIST_HEAD(&res->binding_head);
res->id = -1;
- res->backup = NULL;
- res->backup_offset = 0;
- res->backup_dirty = false;
+ res->guest_memory_bo = NULL;
+ res->guest_memory_offset = 0;
+ res->guest_memory_dirty = false;
res->res_dirty = false;
res->coherent = false;
res->used_prio = 3;
@@ -261,14 +266,14 @@ int vmw_user_resource_lookup_handle(struct vmw_private *dev_priv,
int ret = -EINVAL;
base = ttm_base_object_lookup(tfile, handle);
- if (unlikely(base == NULL))
+ if (unlikely(!base))
return -EINVAL;
if (unlikely(ttm_base_object_type(base) != converter->object_type))
goto out_bad_resource;
res = converter->base_obj_to_res(base);
- kref_get(&res->kref);
+ vmw_resource_reference(res);
*p_res = res;
ret = 0;
@@ -279,100 +284,74 @@ out_bad_resource:
return ret;
}
-/**
- * vmw_user_resource_noref_lookup_handle - lookup a struct resource from a
- * TTM user-space handle and perform basic type checks
- *
- * @dev_priv: Pointer to a device private struct
- * @tfile: Pointer to a struct ttm_object_file identifying the caller
- * @handle: The TTM user-space handle
- * @converter: Pointer to an object describing the resource type
- *
- * If the handle can't be found or is associated with an incorrect resource
- * type, -EINVAL will be returned.
- */
-struct vmw_resource *
-vmw_user_resource_noref_lookup_handle(struct vmw_private *dev_priv,
- struct ttm_object_file *tfile,
- uint32_t handle,
- const struct vmw_user_resource_conv
- *converter)
-{
- struct ttm_base_object *base;
-
- base = ttm_base_object_noref_lookup(tfile, handle);
- if (!base)
- return ERR_PTR(-ESRCH);
-
- if (unlikely(ttm_base_object_type(base) != converter->object_type)) {
- ttm_base_object_noref_release();
- return ERR_PTR(-EINVAL);
- }
-
- return converter->base_obj_to_res(base);
-}
-
/*
* Helper function that looks either a surface or bo.
*
* The pointer this pointed at by out_surf and out_buf needs to be null.
*/
-int vmw_user_lookup_handle(struct vmw_private *dev_priv,
- struct ttm_object_file *tfile,
- uint32_t handle,
- struct vmw_surface **out_surf,
- struct vmw_buffer_object **out_buf)
+int vmw_user_object_lookup(struct vmw_private *dev_priv,
+ struct drm_file *filp,
+ u32 handle,
+ struct vmw_user_object *uo)
{
+ struct ttm_object_file *tfile = vmw_fpriv(filp)->tfile;
struct vmw_resource *res;
int ret;
- BUG_ON(*out_surf || *out_buf);
+ WARN_ON(uo->surface || uo->buffer);
ret = vmw_user_resource_lookup_handle(dev_priv, tfile, handle,
user_surface_converter,
&res);
if (!ret) {
- *out_surf = vmw_res_to_srf(res);
+ uo->surface = vmw_res_to_srf(res);
return 0;
}
- *out_surf = NULL;
- ret = vmw_user_bo_lookup(tfile, handle, out_buf, NULL);
+ uo->surface = NULL;
+ ret = vmw_user_bo_lookup(filp, handle, &uo->buffer);
+ if (!ret && !uo->buffer->is_dumb) {
+ uo->surface = vmw_lookup_surface_for_buffer(dev_priv,
+ uo->buffer,
+ handle);
+ if (uo->surface)
+ vmw_user_bo_unref(&uo->buffer);
+ }
+
return ret;
}
/**
- * vmw_resource_buf_alloc - Allocate a backup buffer for a resource.
+ * vmw_resource_buf_alloc - Allocate a guest memory buffer for a resource.
*
- * @res: The resource for which to allocate a backup buffer.
+ * @res: The resource for which to allocate a gbo buffer.
* @interruptible: Whether any sleeps during allocation should be
* performed while interruptible.
*/
static int vmw_resource_buf_alloc(struct vmw_resource *res,
bool interruptible)
{
- unsigned long size =
- (res->backup_size + PAGE_SIZE - 1) & PAGE_MASK;
- struct vmw_buffer_object *backup;
+ unsigned long size = PFN_ALIGN(res->guest_memory_size);
+ struct vmw_bo *gbo;
+ struct vmw_bo_params bo_params = {
+ .domain = res->func->domain,
+ .busy_domain = res->func->busy_domain,
+ .bo_type = ttm_bo_type_device,
+ .size = res->guest_memory_size,
+ .pin = false
+ };
int ret;
- if (likely(res->backup)) {
- BUG_ON(res->backup->base.base.size < size);
+ if (likely(res->guest_memory_bo)) {
+ BUG_ON(res->guest_memory_bo->tbo.base.size < size);
return 0;
}
- backup = kzalloc(sizeof(*backup), GFP_KERNEL);
- if (unlikely(!backup))
- return -ENOMEM;
-
- ret = vmw_bo_init(res->dev_priv, backup, res->backup_size,
- res->func->backup_placement,
- interruptible, false,
- &vmw_bo_bo_free);
+ ret = vmw_bo_create(res->dev_priv, &bo_params, &gbo);
if (unlikely(ret != 0))
goto out_no_bo;
- res->backup = backup;
+ res->guest_memory_bo = gbo;
out_no_bo:
return ret;
@@ -404,13 +383,13 @@ static int vmw_resource_do_validate(struct vmw_resource *res,
}
if (func->bind &&
- ((func->needs_backup && !vmw_resource_mob_attached(res) &&
- val_buf->bo != NULL) ||
- (!func->needs_backup && val_buf->bo != NULL))) {
+ ((func->needs_guest_memory && !vmw_resource_mob_attached(res) &&
+ val_buf->bo) ||
+ (!func->needs_guest_memory && val_buf->bo))) {
ret = func->bind(res, val_buf);
if (unlikely(ret != 0))
goto out_bind_failed;
- if (func->needs_backup)
+ if (func->needs_guest_memory)
vmw_resource_mob_attach(res);
}
@@ -420,11 +399,11 @@ static int vmw_resource_do_validate(struct vmw_resource *res,
*/
if (func->dirty_alloc && vmw_resource_mob_attached(res) &&
!res->coherent) {
- if (res->backup->dirty && !res->dirty) {
+ if (res->guest_memory_bo->dirty && !res->dirty) {
ret = func->dirty_alloc(res);
if (ret)
return ret;
- } else if (!res->backup->dirty && res->dirty) {
+ } else if (!res->guest_memory_bo->dirty && res->dirty) {
func->dirty_free(res);
}
}
@@ -435,12 +414,12 @@ static int vmw_resource_do_validate(struct vmw_resource *res,
*/
if (res->dirty) {
if (dirtying && !res->res_dirty) {
- pgoff_t start = res->backup_offset >> PAGE_SHIFT;
+ pgoff_t start = res->guest_memory_offset >> PAGE_SHIFT;
pgoff_t end = __KERNEL_DIV_ROUND_UP
- (res->backup_offset + res->backup_size,
+ (res->guest_memory_offset + res->guest_memory_size,
PAGE_SIZE);
- vmw_bo_dirty_unmap(res->backup, start, end);
+ vmw_bo_dirty_unmap(res->guest_memory_bo, start, end);
}
vmw_bo_dirty_transfer_to_res(res);
@@ -462,10 +441,10 @@ out_bind_failed:
* @res: Pointer to the struct vmw_resource to unreserve.
* @dirty_set: Change dirty status of the resource.
* @dirty: When changing dirty status indicates the new status.
- * @switch_backup: Backup buffer has been switched.
- * @new_backup: Pointer to new backup buffer if command submission
+ * @switch_guest_memory: Guest memory buffer has been switched.
+ * @new_guest_memory_bo: Pointer to new guest memory buffer if command submission
* switched. May be NULL.
- * @new_backup_offset: New backup offset if @switch_backup is true.
+ * @new_guest_memory_offset: New gbo offset if @switch_guest_memory is true.
*
* Currently unreserving a resource means putting it back on the device's
* resource lru list, so that it can be evicted if necessary.
@@ -473,42 +452,42 @@ out_bind_failed:
void vmw_resource_unreserve(struct vmw_resource *res,
bool dirty_set,
bool dirty,
- bool switch_backup,
- struct vmw_buffer_object *new_backup,
- unsigned long new_backup_offset)
+ bool switch_guest_memory,
+ struct vmw_bo *new_guest_memory_bo,
+ unsigned long new_guest_memory_offset)
{
struct vmw_private *dev_priv = res->dev_priv;
if (!list_empty(&res->lru_head))
return;
- if (switch_backup && new_backup != res->backup) {
- if (res->backup) {
+ if (switch_guest_memory && new_guest_memory_bo != res->guest_memory_bo) {
+ if (res->guest_memory_bo) {
vmw_resource_mob_detach(res);
if (res->coherent)
- vmw_bo_dirty_release(res->backup);
- vmw_bo_unreference(&res->backup);
+ vmw_bo_dirty_release(res->guest_memory_bo);
+ vmw_user_bo_unref(&res->guest_memory_bo);
}
- if (new_backup) {
- res->backup = vmw_bo_reference(new_backup);
+ if (new_guest_memory_bo) {
+ res->guest_memory_bo = vmw_user_bo_ref(new_guest_memory_bo);
/*
* The validation code should already have added a
* dirty tracker here.
*/
- WARN_ON(res->coherent && !new_backup->dirty);
+ WARN_ON(res->coherent && !new_guest_memory_bo->dirty);
vmw_resource_mob_attach(res);
} else {
- res->backup = NULL;
+ res->guest_memory_bo = NULL;
}
- } else if (switch_backup && res->coherent) {
- vmw_bo_dirty_release(res->backup);
+ } else if (switch_guest_memory && res->coherent) {
+ vmw_bo_dirty_release(res->guest_memory_bo);
}
- if (switch_backup)
- res->backup_offset = new_backup_offset;
+ if (switch_guest_memory)
+ res->guest_memory_offset = new_guest_memory_offset;
if (dirty_set)
res->res_dirty = dirty;
@@ -527,7 +506,7 @@ void vmw_resource_unreserve(struct vmw_resource *res,
* for a resource and in that case, allocate
* one, reserve and validate it.
*
- * @ticket: The ww aqcquire context to use, or NULL if trylocking.
+ * @ticket: The ww acquire context to use, or NULL if trylocking.
* @res: The resource for which to allocate a backup buffer.
* @interruptible: Whether any sleeps during allocation should be
* performed while interruptible.
@@ -542,30 +521,32 @@ vmw_resource_check_buffer(struct ww_acquire_ctx *ticket,
{
struct ttm_operation_ctx ctx = { true, false };
struct list_head val_list;
- bool backup_dirty = false;
+ bool guest_memory_dirty = false;
int ret;
- if (unlikely(res->backup == NULL)) {
+ if (unlikely(!res->guest_memory_bo)) {
ret = vmw_resource_buf_alloc(res, interruptible);
if (unlikely(ret != 0))
return ret;
}
INIT_LIST_HEAD(&val_list);
- ttm_bo_get(&res->backup->base);
- val_buf->bo = &res->backup->base;
+ val_buf->bo = &res->guest_memory_bo->tbo;
val_buf->num_shared = 0;
+ drm_gem_object_get(&val_buf->bo->base);
list_add_tail(&val_buf->head, &val_list);
ret = ttm_eu_reserve_buffers(ticket, &val_list, interruptible, NULL);
if (unlikely(ret != 0))
goto out_no_reserve;
- if (res->func->needs_backup && !vmw_resource_mob_attached(res))
+ if (res->func->needs_guest_memory && !vmw_resource_mob_attached(res))
return 0;
- backup_dirty = res->backup_dirty;
- ret = ttm_bo_validate(&res->backup->base,
- res->func->backup_placement,
+ guest_memory_dirty = res->guest_memory_dirty;
+ vmw_bo_placement_set(res->guest_memory_bo, res->func->domain,
+ res->func->busy_domain);
+ ret = ttm_bo_validate(&res->guest_memory_bo->tbo,
+ &res->guest_memory_bo->placement,
&ctx);
if (unlikely(ret != 0))
@@ -576,10 +557,10 @@ vmw_resource_check_buffer(struct ww_acquire_ctx *ticket,
out_no_validate:
ttm_eu_backoff_reservation(ticket, &val_list);
out_no_reserve:
- ttm_bo_put(val_buf->bo);
+ drm_gem_object_put(&val_buf->bo->base);
val_buf->bo = NULL;
- if (backup_dirty)
- vmw_bo_unreference(&res->backup);
+ if (guest_memory_dirty)
+ vmw_user_bo_unref(&res->guest_memory_bo);
return ret;
}
@@ -590,12 +571,13 @@ out_no_reserve:
* @res: The resource to reserve.
*
* This function takes the resource off the LRU list and make sure
- * a backup buffer is present for guest-backed resources. However,
- * the buffer may not be bound to the resource at this point.
+ * a guest memory buffer is present for guest-backed resources.
+ * However, the buffer may not be bound to the resource at this
+ * point.
*
*/
int vmw_resource_reserve(struct vmw_resource *res, bool interruptible,
- bool no_backup)
+ bool no_guest_memory)
{
struct vmw_private *dev_priv = res->dev_priv;
int ret;
@@ -604,13 +586,13 @@ int vmw_resource_reserve(struct vmw_resource *res, bool interruptible,
list_del_init(&res->lru_head);
spin_unlock(&dev_priv->resource_lock);
- if (res->func->needs_backup && res->backup == NULL &&
- !no_backup) {
+ if (res->func->needs_guest_memory && !res->guest_memory_bo &&
+ !no_guest_memory) {
ret = vmw_resource_buf_alloc(res, interruptible);
if (unlikely(ret != 0)) {
- DRM_ERROR("Failed to allocate a backup buffer "
+ DRM_ERROR("Failed to allocate a guest memory buffer "
"of size %lu. bytes\n",
- (unsigned long) res->backup_size);
+ (unsigned long) res->guest_memory_size);
return ret;
}
}
@@ -620,10 +602,10 @@ int vmw_resource_reserve(struct vmw_resource *res, bool interruptible,
/**
* vmw_resource_backoff_reservation - Unreserve and unreference a
- * backup buffer
+ * guest memory buffer
*.
* @ticket: The ww acquire ctx used for reservation.
- * @val_buf: Backup buffer information.
+ * @val_buf: Guest memory buffer information.
*/
static void
vmw_resource_backoff_reservation(struct ww_acquire_ctx *ticket,
@@ -637,7 +619,7 @@ vmw_resource_backoff_reservation(struct ww_acquire_ctx *ticket,
INIT_LIST_HEAD(&val_list);
list_add_tail(&val_buf->head, &val_list);
ttm_eu_backoff_reservation(ticket, &val_list);
- ttm_bo_put(val_buf->bo);
+ drm_gem_object_put(&val_buf->bo->base);
val_buf->bo = NULL;
}
@@ -665,14 +647,14 @@ static int vmw_resource_do_evict(struct ww_acquire_ctx *ticket,
return ret;
if (unlikely(func->unbind != NULL &&
- (!func->needs_backup || vmw_resource_mob_attached(res)))) {
+ (!func->needs_guest_memory || vmw_resource_mob_attached(res)))) {
ret = func->unbind(res, res->res_dirty, &val_buf);
if (unlikely(ret != 0))
goto out_no_unbind;
vmw_resource_mob_detach(res);
}
ret = func->destroy(res);
- res->backup_dirty = true;
+ res->guest_memory_dirty = true;
res->res_dirty = false;
out_no_unbind:
vmw_resource_backoff_reservation(ticket, &val_buf);
@@ -688,7 +670,7 @@ out_no_unbind:
* @intr: Perform waits interruptible if possible.
* @dirtying: Pending GPU operation will dirty the resource
*
- * On succesful return, any backup DMA buffer pointed to by @res->backup will
+ * On successful return, any backup DMA buffer pointed to by @res->backup will
* be reserved and validated.
* On hardware resource shortage, this function will repeatedly evict
* resources of the same type until the validation succeeds.
@@ -711,8 +693,8 @@ int vmw_resource_validate(struct vmw_resource *res, bool intr,
val_buf.bo = NULL;
val_buf.num_shared = 0;
- if (res->backup)
- val_buf.bo = &res->backup->base;
+ if (res->guest_memory_bo)
+ val_buf.bo = &res->guest_memory_bo->tbo;
do {
ret = vmw_resource_do_validate(res, &val_buf, dirtying);
if (likely(ret != -EBUSY))
@@ -752,9 +734,9 @@ int vmw_resource_validate(struct vmw_resource *res, bool intr,
if (unlikely(ret != 0))
goto out_no_validate;
- else if (!res->func->needs_backup && res->backup) {
+ else if (!res->func->needs_guest_memory && res->guest_memory_bo) {
WARN_ON_ONCE(vmw_resource_mob_attached(res));
- vmw_bo_unreference(&res->backup);
+ vmw_user_bo_unref(&res->guest_memory_bo);
}
return 0;
@@ -775,14 +757,14 @@ out_no_validate:
* validation code, since resource validation and eviction
* both require the backup buffer to be reserved.
*/
-void vmw_resource_unbind_list(struct vmw_buffer_object *vbo)
+void vmw_resource_unbind_list(struct vmw_bo *vbo)
{
struct ttm_validate_buffer val_buf = {
- .bo = &vbo->base,
+ .bo = &vbo->tbo,
.num_shared = 0
};
- dma_resv_assert_held(vbo->base.base.resv);
+ dma_resv_assert_held(vbo->tbo.base.resv);
while (!RB_EMPTY_ROOT(&vbo->res_tree)) {
struct rb_node *node = vbo->res_tree.rb_node;
struct vmw_resource *res =
@@ -791,12 +773,12 @@ void vmw_resource_unbind_list(struct vmw_buffer_object *vbo)
if (!WARN_ON_ONCE(!res->func->unbind))
(void) res->func->unbind(res, res->res_dirty, &val_buf);
- res->backup_dirty = true;
+ res->guest_memory_size = true;
res->res_dirty = false;
vmw_resource_mob_detach(res);
}
- (void) ttm_bo_wait(&vbo->base, false, false);
+ (void) ttm_bo_wait(&vbo->tbo, false, false);
}
@@ -806,9 +788,9 @@ void vmw_resource_unbind_list(struct vmw_buffer_object *vbo)
* @dx_query_mob: Buffer containing the DX query MOB
*
* Read back cached states from the device if they exist. This function
- * assumings binding_mutex is held.
+ * assumes binding_mutex is held.
*/
-int vmw_query_readback_all(struct vmw_buffer_object *dx_query_mob)
+int vmw_query_readback_all(struct vmw_bo *dx_query_mob)
{
struct vmw_resource *dx_query_ctx;
struct vmw_private *dev_priv;
@@ -857,26 +839,24 @@ void vmw_query_move_notify(struct ttm_buffer_object *bo,
struct ttm_resource *old_mem,
struct ttm_resource *new_mem)
{
- struct vmw_buffer_object *dx_query_mob;
+ struct vmw_bo *dx_query_mob;
struct ttm_device *bdev = bo->bdev;
- struct vmw_private *dev_priv;
-
-
- dev_priv = container_of(bdev, struct vmw_private, bdev);
+ struct vmw_private *dev_priv = vmw_priv_from_ttm(bdev);
mutex_lock(&dev_priv->binding_mutex);
- dx_query_mob = container_of(bo, struct vmw_buffer_object, base);
- if (!dx_query_mob || !dx_query_mob->dx_query_ctx) {
- mutex_unlock(&dev_priv->binding_mutex);
- return;
- }
-
/* If BO is being moved from MOB to system memory */
- if (new_mem->mem_type == TTM_PL_SYSTEM &&
+ if (old_mem &&
+ new_mem->mem_type == TTM_PL_SYSTEM &&
old_mem->mem_type == VMW_PL_MOB) {
struct vmw_fence_obj *fence;
+ dx_query_mob = to_vmw_bo(&bo->base);
+ if (!dx_query_mob || !dx_query_mob->dx_query_ctx) {
+ mutex_unlock(&dev_priv->binding_mutex);
+ return;
+ }
+
(void) vmw_query_readback_all(dx_query_mob);
mutex_unlock(&dev_priv->binding_mutex);
@@ -890,7 +870,6 @@ void vmw_query_move_notify(struct ttm_buffer_object *bo,
(void) ttm_bo_wait(bo, false, false);
} else
mutex_unlock(&dev_priv->binding_mutex);
-
}
/**
@@ -900,7 +879,7 @@ void vmw_query_move_notify(struct ttm_buffer_object *bo,
*/
bool vmw_resource_needs_backup(const struct vmw_resource *res)
{
- return res->func->needs_backup;
+ return res->func->needs_guest_memory;
}
/**
@@ -996,19 +975,24 @@ int vmw_resource_pin(struct vmw_resource *res, bool interruptible)
goto out_no_reserve;
if (res->pin_count == 0) {
- struct vmw_buffer_object *vbo = NULL;
+ struct vmw_bo *vbo = NULL;
- if (res->backup) {
- vbo = res->backup;
+ if (res->guest_memory_bo) {
+ vbo = res->guest_memory_bo;
- ttm_bo_reserve(&vbo->base, interruptible, false, NULL);
- if (!vbo->base.pin_count) {
+ ret = ttm_bo_reserve(&vbo->tbo, interruptible, false, NULL);
+ if (ret)
+ goto out_no_validate;
+ if (!vbo->tbo.pin_count) {
+ vmw_bo_placement_set(vbo,
+ res->func->domain,
+ res->func->busy_domain);
ret = ttm_bo_validate
- (&vbo->base,
- res->func->backup_placement,
+ (&vbo->tbo,
+ &vbo->placement,
&ctx);
if (ret) {
- ttm_bo_unreserve(&vbo->base);
+ ttm_bo_unreserve(&vbo->tbo);
goto out_no_validate;
}
}
@@ -1018,7 +1002,7 @@ int vmw_resource_pin(struct vmw_resource *res, bool interruptible)
}
ret = vmw_resource_validate(res, interruptible, true);
if (vbo)
- ttm_bo_unreserve(&vbo->base);
+ ttm_bo_unreserve(&vbo->tbo);
if (ret)
goto out_no_validate;
}
@@ -1051,12 +1035,12 @@ void vmw_resource_unpin(struct vmw_resource *res)
WARN_ON(ret);
WARN_ON(res->pin_count == 0);
- if (--res->pin_count == 0 && res->backup) {
- struct vmw_buffer_object *vbo = res->backup;
+ if (--res->pin_count == 0 && res->guest_memory_bo) {
+ struct vmw_bo *vbo = res->guest_memory_bo;
- (void) ttm_bo_reserve(&vbo->base, false, false, NULL);
+ (void) ttm_bo_reserve(&vbo->tbo, false, false, NULL);
vmw_bo_pin_reserved(vbo, false);
- ttm_bo_unreserve(&vbo->base);
+ ttm_bo_unreserve(&vbo->tbo);
}
vmw_resource_unreserve(res, false, false, false, NULL, 0UL);
@@ -1089,6 +1073,22 @@ void vmw_resource_dirty_update(struct vmw_resource *res, pgoff_t start,
end << PAGE_SHIFT);
}
+int vmw_resource_clean(struct vmw_resource *res)
+{
+ int ret = 0;
+
+ if (res->res_dirty) {
+ if (!res->func->clean)
+ return -EINVAL;
+
+ ret = res->func->clean(res);
+ if (ret)
+ return ret;
+ res->res_dirty = false;
+ }
+ return ret;
+}
+
/**
* vmw_resources_clean - Clean resources intersecting a mob range
* @vbo: The mob buffer object
@@ -1097,7 +1097,7 @@ void vmw_resource_dirty_update(struct vmw_resource *res, pgoff_t start,
* @num_prefault: Returns how many pages including the first have been
* cleaned and are ok to prefault
*/
-int vmw_resources_clean(struct vmw_buffer_object *vbo, pgoff_t start,
+int vmw_resources_clean(struct vmw_bo *vbo, pgoff_t start,
pgoff_t end, pgoff_t *num_prefault)
{
struct rb_node *cur = vbo->res_tree.rb_node;
@@ -1105,6 +1105,7 @@ int vmw_resources_clean(struct vmw_buffer_object *vbo, pgoff_t start,
unsigned long res_start = start << PAGE_SHIFT;
unsigned long res_end = end << PAGE_SHIFT;
unsigned long last_cleaned = 0;
+ int ret;
/*
* Find the resource with lowest backup_offset that intersects the
@@ -1114,9 +1115,9 @@ int vmw_resources_clean(struct vmw_buffer_object *vbo, pgoff_t start,
struct vmw_resource *cur_res =
container_of(cur, struct vmw_resource, mob_node);
- if (cur_res->backup_offset >= res_end) {
+ if (cur_res->guest_memory_offset >= res_end) {
cur = cur->rb_left;
- } else if (cur_res->backup_offset + cur_res->backup_size <=
+ } else if (cur_res->guest_memory_offset + cur_res->guest_memory_size <=
res_start) {
cur = cur->rb_right;
} else {
@@ -1127,29 +1128,20 @@ int vmw_resources_clean(struct vmw_buffer_object *vbo, pgoff_t start,
}
/*
- * In order of increasing backup_offset, clean dirty resorces
+ * In order of increasing guest_memory_offset, clean dirty resources
* intersecting the range.
*/
while (found) {
- if (found->res_dirty) {
- int ret;
-
- if (!found->func->clean)
- return -EINVAL;
-
- ret = found->func->clean(found);
- if (ret)
- return ret;
-
- found->res_dirty = false;
- }
- last_cleaned = found->backup_offset + found->backup_size;
+ ret = vmw_resource_clean(found);
+ if (ret)
+ return ret;
+ last_cleaned = found->guest_memory_offset + found->guest_memory_size;
cur = rb_next(&found->mob_node);
if (!cur)
break;
found = container_of(cur, struct vmw_resource, mob_node);
- if (found->backup_offset >= res_end)
+ if (found->guest_memory_offset >= res_end)
break;
}
@@ -1158,15 +1150,11 @@ int vmw_resources_clean(struct vmw_buffer_object *vbo, pgoff_t start,
*/
*num_prefault = 1;
if (last_cleaned > res_start) {
- struct ttm_buffer_object *bo = &vbo->base;
+ struct ttm_buffer_object *bo = &vbo->tbo;
*num_prefault = __KERNEL_DIV_ROUND_UP(last_cleaned - res_start,
PAGE_SIZE);
vmw_bo_fence_single(bo, NULL);
- if (bo->moving)
- dma_fence_put(bo->moving);
- bo->moving = dma_fence_get
- (dma_resv_excl_fence(bo->base.resv));
}
return 0;