summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/i915_vma.h
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/i915_vma.h')
-rw-r--r--drivers/gpu/drm/i915/i915_vma.h458
1 files changed, 281 insertions, 177 deletions
diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h
index 20cf272c97b1..8054047840aa 100644
--- a/drivers/gpu/drm/i915/i915_vma.h
+++ b/drivers/gpu/drm/i915/i915_vma.h
@@ -26,158 +26,163 @@
#define __I915_VMA_H__
#include <linux/io-mapping.h>
+#include <linux/rbtree.h>
#include <drm/drm_mm.h>
+#include "gem/i915_gem_object.h"
+#include "gt/intel_ggtt_fencing.h"
+
+#include "i915_active.h"
#include "i915_gem_gtt.h"
-#include "i915_gem_fence_reg.h"
-#include "i915_gem_object.h"
-#include "i915_gem_request.h"
+#include "i915_ptr_util.h"
+#include "i915_request.h"
+#include "i915_vma_resource.h"
+#include "i915_vma_types.h"
+struct i915_vma *
+i915_vma_instance(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm,
+ const struct i915_gtt_view *view);
-enum i915_cache_level;
+void i915_vma_unpin_and_release(struct i915_vma **p_vma, unsigned int flags);
+#define I915_VMA_RELEASE_MAP BIT(0)
-/**
- * A VMA represents a GEM BO that is bound into an address space. Therefore, a
- * VMA's presence cannot be guaranteed before binding, or after unbinding the
- * object into/from the address space.
- *
- * To make things as simple as possible (ie. no refcounting), a VMA's lifetime
- * will always be <= an objects lifetime. So object refcounting should cover us.
- */
-struct i915_vma {
- struct drm_mm_node node;
- struct drm_i915_gem_object *obj;
- struct i915_address_space *vm;
- struct drm_i915_fence_reg *fence;
- struct reservation_object *resv; /** Alias of obj->resv */
- struct sg_table *pages;
- void __iomem *iomap;
- u64 size;
- u64 display_alignment;
-
- u32 fence_size;
- u32 fence_alignment;
-
- unsigned int flags;
- /**
- * How many users have pinned this object in GTT space. The following
- * users can each hold at most one reference: pwrite/pread, execbuffer
- * (objects are not allowed multiple times for the same batchbuffer),
- * and the framebuffer code. When switching/pageflipping, the
- * framebuffer code has at most two buffers pinned per crtc.
- *
- * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3
- * bits with absolutely no headroom. So use 4 bits.
- */
-#define I915_VMA_PIN_MASK 0xf
-#define I915_VMA_PIN_OVERFLOW BIT(5)
-
- /** Flags and address space this VMA is bound to */
-#define I915_VMA_GLOBAL_BIND BIT(6)
-#define I915_VMA_LOCAL_BIND BIT(7)
-#define I915_VMA_BIND_MASK (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND | I915_VMA_PIN_OVERFLOW)
-
-#define I915_VMA_GGTT BIT(8)
-#define I915_VMA_CAN_FENCE BIT(9)
-#define I915_VMA_CLOSED BIT(10)
-
- unsigned int active;
- struct i915_gem_active last_read[I915_NUM_ENGINES];
- struct i915_gem_active last_fence;
-
- /**
- * Support different GGTT views into the same object.
- * This means there can be multiple VMA mappings per object and per VM.
- * i915_ggtt_view_type is used to distinguish between those entries.
- * The default one of zero (I915_GGTT_VIEW_NORMAL) is default and also
- * assumed in GEM functions which take no ggtt view parameter.
- */
- struct i915_ggtt_view ggtt_view;
-
- /** This object's place on the active/inactive lists */
- struct list_head vm_link;
+static inline bool i915_vma_is_active(const struct i915_vma *vma)
+{
+ return !i915_active_is_idle(&vma->active);
+}
- struct list_head obj_link; /* Link in the object's VMA list */
- struct rb_node obj_node;
- struct hlist_node obj_hash;
+/* do not reserve memory to prevent deadlocks */
+#define __EXEC_OBJECT_NO_RESERVE BIT(31)
+#define __EXEC_OBJECT_NO_REQUEST_AWAIT BIT(30)
- /** This vma's place in the execbuf reservation list */
- struct list_head exec_link;
- struct list_head reloc_link;
+int __must_check _i915_vma_move_to_active(struct i915_vma *vma,
+ struct i915_request *rq,
+ struct dma_fence *fence,
+ unsigned int flags);
+static inline int __must_check
+i915_vma_move_to_active(struct i915_vma *vma, struct i915_request *rq,
+ unsigned int flags)
+{
+ return _i915_vma_move_to_active(vma, rq, &rq->fence, flags);
+}
- /** This vma's place in the eviction list */
- struct list_head evict_link;
+#define __i915_vma_flags(v) ((unsigned long *)&(v)->flags.counter)
- /**
- * Used for performing relocations during execbuffer insertion.
- */
- struct drm_i915_gem_exec_object2 *exec_entry;
- struct hlist_node exec_node;
- u32 exec_handle;
+static inline bool i915_vma_is_ggtt(const struct i915_vma *vma)
+{
+ return test_bit(I915_VMA_GGTT_BIT, __i915_vma_flags(vma));
+}
- struct i915_gem_context *ctx;
- struct hlist_node ctx_node;
- u32 ctx_handle;
-};
+static inline bool i915_vma_is_dpt(const struct i915_vma *vma)
+{
+ return i915_is_dpt(vma->vm);
+}
-struct i915_vma *
-i915_vma_instance(struct drm_i915_gem_object *obj,
- struct i915_address_space *vm,
- const struct i915_ggtt_view *view);
+static inline bool i915_vma_has_ggtt_write(const struct i915_vma *vma)
+{
+ return test_bit(I915_VMA_GGTT_WRITE_BIT, __i915_vma_flags(vma));
+}
-void i915_vma_unpin_and_release(struct i915_vma **p_vma);
+static inline void i915_vma_set_ggtt_write(struct i915_vma *vma)
+{
+ GEM_BUG_ON(!i915_vma_is_ggtt(vma));
+ set_bit(I915_VMA_GGTT_WRITE_BIT, __i915_vma_flags(vma));
+}
-static inline bool i915_vma_is_ggtt(const struct i915_vma *vma)
+static inline bool i915_vma_unset_ggtt_write(struct i915_vma *vma)
{
- return vma->flags & I915_VMA_GGTT;
+ return test_and_clear_bit(I915_VMA_GGTT_WRITE_BIT,
+ __i915_vma_flags(vma));
}
+void i915_vma_flush_writes(struct i915_vma *vma);
+
static inline bool i915_vma_is_map_and_fenceable(const struct i915_vma *vma)
{
- return vma->flags & I915_VMA_CAN_FENCE;
+ return test_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma));
}
-static inline bool i915_vma_is_closed(const struct i915_vma *vma)
+static inline bool i915_vma_set_userfault(struct i915_vma *vma)
{
- return vma->flags & I915_VMA_CLOSED;
+ GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma));
+ return test_and_set_bit(I915_VMA_USERFAULT_BIT, __i915_vma_flags(vma));
}
-static inline unsigned int i915_vma_get_active(const struct i915_vma *vma)
+static inline void i915_vma_unset_userfault(struct i915_vma *vma)
{
- return vma->active;
+ return clear_bit(I915_VMA_USERFAULT_BIT, __i915_vma_flags(vma));
}
-static inline bool i915_vma_is_active(const struct i915_vma *vma)
+static inline bool i915_vma_has_userfault(const struct i915_vma *vma)
+{
+ return test_bit(I915_VMA_USERFAULT_BIT, __i915_vma_flags(vma));
+}
+
+static inline bool i915_vma_is_closed(const struct i915_vma *vma)
+{
+ return !list_empty(&vma->closed_link);
+}
+
+/* Internal use only. */
+static inline u64 __i915_vma_size(const struct i915_vma *vma)
{
- return i915_vma_get_active(vma);
+ return vma->node.size - 2 * vma->guard;
}
-static inline void i915_vma_set_active(struct i915_vma *vma,
- unsigned int engine)
+/**
+ * i915_vma_size - Obtain the va range size of the vma
+ * @vma: The vma
+ *
+ * GPU virtual address space may be allocated with padding. This
+ * function returns the effective virtual address range size
+ * with padding subtracted.
+ *
+ * Return: The effective virtual address range size.
+ */
+static inline u64 i915_vma_size(const struct i915_vma *vma)
{
- vma->active |= BIT(engine);
+ GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
+ return __i915_vma_size(vma);
}
-static inline void i915_vma_clear_active(struct i915_vma *vma,
- unsigned int engine)
+/* Internal use only. */
+static inline u64 __i915_vma_offset(const struct i915_vma *vma)
{
- vma->active &= ~BIT(engine);
+ /* The actual start of the vma->pages is after the guard pages. */
+ return vma->node.start + vma->guard;
}
-static inline bool i915_vma_has_active_engine(const struct i915_vma *vma,
- unsigned int engine)
+/**
+ * i915_vma_offset - Obtain the va offset of the vma
+ * @vma: The vma
+ *
+ * GPU virtual address space may be allocated with padding. This
+ * function returns the effective virtual address offset the gpu
+ * should use to access the bound data.
+ *
+ * Return: The effective virtual address offset.
+ */
+static inline u64 i915_vma_offset(const struct i915_vma *vma)
{
- return vma->active & BIT(engine);
+ GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
+ return __i915_vma_offset(vma);
}
static inline u32 i915_ggtt_offset(const struct i915_vma *vma)
{
GEM_BUG_ON(!i915_vma_is_ggtt(vma));
- GEM_BUG_ON(!vma->node.allocated);
- GEM_BUG_ON(upper_32_bits(vma->node.start));
- GEM_BUG_ON(upper_32_bits(vma->node.start + vma->node.size - 1));
- return lower_32_bits(vma->node.start);
+ GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
+ GEM_BUG_ON(upper_32_bits(i915_vma_offset(vma)));
+ GEM_BUG_ON(upper_32_bits(i915_vma_offset(vma) +
+ i915_vma_size(vma) - 1));
+ return lower_32_bits(i915_vma_offset(vma));
+}
+
+static inline u32 i915_ggtt_pin_bias(struct i915_vma *vma)
+{
+ return i915_vm_to_ggtt(vma->vm)->pin_bias;
}
static inline struct i915_vma *i915_vma_get(struct i915_vma *vma)
@@ -186,31 +191,34 @@ static inline struct i915_vma *i915_vma_get(struct i915_vma *vma)
return vma;
}
-static inline void i915_vma_put(struct i915_vma *vma)
+static inline struct i915_vma *i915_vma_tryget(struct i915_vma *vma)
{
- i915_gem_object_put(vma->obj);
+ if (likely(kref_get_unless_zero(&vma->obj->base.refcount)))
+ return vma;
+
+ return NULL;
}
-static __always_inline ptrdiff_t ptrdiff(const void *a, const void *b)
+static inline void i915_vma_put(struct i915_vma *vma)
{
- return a - b;
+ i915_gem_object_put(vma->obj);
}
static inline long
i915_vma_compare(struct i915_vma *vma,
struct i915_address_space *vm,
- const struct i915_ggtt_view *view)
+ const struct i915_gtt_view *view)
{
ptrdiff_t cmp;
- GEM_BUG_ON(view && !i915_is_ggtt(vm));
+ GEM_BUG_ON(view && !i915_is_ggtt_or_dpt(vm));
cmp = ptrdiff(vma->vm, vm);
if (cmp)
return cmp;
- BUILD_BUG_ON(I915_GGTT_VIEW_NORMAL != 0);
- cmp = vma->ggtt_view.type;
+ BUILD_BUG_ON(I915_GTT_VIEW_NORMAL != 0);
+ cmp = vma->gtt_view.type;
if (!view)
return cmp;
@@ -218,7 +226,9 @@ i915_vma_compare(struct i915_vma *vma,
if (cmp)
return cmp;
- /* ggtt_view.type also encodes its size so that we both distinguish
+ assert_i915_gem_gtt_types();
+
+ /* gtt_view.type also encodes its size so that we both distinguish
* different views using it as a "type" and also use a compact (no
* accessing of uninitialised padding bytes) memcmp without storing
* an extra parameter or adding more code.
@@ -228,47 +238,66 @@ i915_vma_compare(struct i915_vma *vma,
* we assert above that all branches have the same address, and that
* each branch has a unique type/size.
*/
- BUILD_BUG_ON(I915_GGTT_VIEW_NORMAL >= I915_GGTT_VIEW_PARTIAL);
- BUILD_BUG_ON(I915_GGTT_VIEW_PARTIAL >= I915_GGTT_VIEW_ROTATED);
+ BUILD_BUG_ON(I915_GTT_VIEW_NORMAL >= I915_GTT_VIEW_PARTIAL);
+ BUILD_BUG_ON(I915_GTT_VIEW_PARTIAL >= I915_GTT_VIEW_ROTATED);
+ BUILD_BUG_ON(I915_GTT_VIEW_ROTATED >= I915_GTT_VIEW_REMAPPED);
BUILD_BUG_ON(offsetof(typeof(*view), rotated) !=
offsetof(typeof(*view), partial));
- return memcmp(&vma->ggtt_view.partial, &view->partial, view->type);
+ BUILD_BUG_ON(offsetof(typeof(*view), rotated) !=
+ offsetof(typeof(*view), remapped));
+ return memcmp(&vma->gtt_view.partial, &view->partial, view->type);
}
-int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
- u32 flags);
-bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long cache_level);
+struct i915_vma_work *i915_vma_work(void);
+int i915_vma_bind(struct i915_vma *vma,
+ unsigned int pat_index,
+ u32 flags,
+ struct i915_vma_work *work,
+ struct i915_vma_resource *vma_res);
+
+bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long color);
bool i915_vma_misplaced(const struct i915_vma *vma,
u64 size, u64 alignment, u64 flags);
void __i915_vma_set_map_and_fenceable(struct i915_vma *vma);
+void i915_vma_revoke_mmap(struct i915_vma *vma);
+void vma_invalidate_tlb(struct i915_address_space *vm, u32 *tlb);
+struct dma_fence *__i915_vma_evict(struct i915_vma *vma, bool async);
+int __i915_vma_unbind(struct i915_vma *vma);
int __must_check i915_vma_unbind(struct i915_vma *vma);
+int __must_check i915_vma_unbind_async(struct i915_vma *vma, bool trylock_vm);
+int __must_check i915_vma_unbind_unlocked(struct i915_vma *vma);
void i915_vma_unlink_ctx(struct i915_vma *vma);
void i915_vma_close(struct i915_vma *vma);
+void i915_vma_reopen(struct i915_vma *vma);
-int __i915_vma_do_pin(struct i915_vma *vma,
- u64 size, u64 alignment, u64 flags);
-static inline int __must_check
-i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
-{
- BUILD_BUG_ON(PIN_MBZ != I915_VMA_PIN_OVERFLOW);
- BUILD_BUG_ON(PIN_GLOBAL != I915_VMA_GLOBAL_BIND);
- BUILD_BUG_ON(PIN_USER != I915_VMA_LOCAL_BIND);
+void i915_vma_destroy_locked(struct i915_vma *vma);
+void i915_vma_destroy(struct i915_vma *vma);
- /* Pin early to prevent the shrinker/eviction logic from destroying
- * our vma as we insert and bind.
- */
- if (likely(((++vma->flags ^ flags) & I915_VMA_BIND_MASK) == 0)) {
- GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
- GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
- return 0;
- }
+#define assert_vma_held(vma) dma_resv_assert_held((vma)->obj->base.resv)
+
+static inline void i915_vma_lock(struct i915_vma *vma)
+{
+ dma_resv_lock(vma->obj->base.resv, NULL);
+}
- return __i915_vma_do_pin(vma, size, alignment, flags);
+static inline void i915_vma_unlock(struct i915_vma *vma)
+{
+ dma_resv_unlock(vma->obj->base.resv);
}
+int __must_check
+i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
+ u64 size, u64 alignment, u64 flags);
+
+int __must_check
+i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags);
+
+int i915_ggtt_pin(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
+ u32 align, unsigned int flags);
+
static inline int i915_vma_pin_count(const struct i915_vma *vma)
{
- return vma->flags & I915_VMA_PIN_MASK;
+ return atomic_read(&vma->flags) & I915_VMA_PIN_MASK;
}
static inline bool i915_vma_is_pinned(const struct i915_vma *vma)
@@ -278,22 +307,39 @@ static inline bool i915_vma_is_pinned(const struct i915_vma *vma)
static inline void __i915_vma_pin(struct i915_vma *vma)
{
- vma->flags++;
- GEM_BUG_ON(vma->flags & I915_VMA_PIN_OVERFLOW);
+ atomic_inc(&vma->flags);
+ GEM_BUG_ON(!i915_vma_is_pinned(vma));
}
static inline void __i915_vma_unpin(struct i915_vma *vma)
{
- vma->flags--;
+ GEM_BUG_ON(!i915_vma_is_pinned(vma));
+ atomic_dec(&vma->flags);
}
static inline void i915_vma_unpin(struct i915_vma *vma)
{
- GEM_BUG_ON(!i915_vma_is_pinned(vma));
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
__i915_vma_unpin(vma);
}
+static inline bool i915_vma_is_bound(const struct i915_vma *vma,
+ unsigned int where)
+{
+ return atomic_read(&vma->flags) & where;
+}
+
+static inline bool i915_node_color_differs(const struct drm_mm_node *node,
+ unsigned long color)
+{
+ return drm_mm_node_allocated(node) && node->color != color;
+}
+
+static inline void __iomem *i915_vma_get_iomap(struct i915_vma *vma)
+{
+ return READ_ONCE(vma->iomap);
+}
+
/**
* i915_vma_pin_iomap - calls ioremap_wc to map the GGTT VMA via the aperture
* @vma: VMA to iomap
@@ -303,12 +349,9 @@ static inline void i915_vma_unpin(struct i915_vma *vma)
* the caller must call i915_vma_unpin_iomap to relinquish the pinning
* after the iomapping is no longer required.
*
- * Callers must hold the struct_mutex.
- *
* Returns a valid iomapped pointer or ERR_PTR.
*/
void __iomem *i915_vma_pin_iomap(struct i915_vma *vma);
-#define IO_ERR_PTR(x) ((void __iomem *)ERR_PTR(x))
/**
* i915_vma_unpin_iomap - unpins the mapping returned from i915_vma_iomap
@@ -316,21 +359,10 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma);
*
* Unpins the previously iomapped VMA from i915_vma_pin_iomap().
*
- * Callers must hold the struct_mutex. This function is only valid to be
- * called on a VMA previously iomapped by the caller with i915_vma_pin_iomap().
+ * This function is only valid to be called on a VMA previously
+ * iomapped by the caller with i915_vma_pin_iomap().
*/
-static inline void i915_vma_unpin_iomap(struct i915_vma *vma)
-{
- lockdep_assert_held(&vma->obj->base.dev->struct_mutex);
- GEM_BUG_ON(vma->iomap == NULL);
- i915_vma_unpin(vma);
-}
-
-static inline struct page *i915_vma_first_page(struct i915_vma *vma)
-{
- GEM_BUG_ON(!vma->pages);
- return sg_page(vma->pages->sgl);
-}
+void i915_vma_unpin_iomap(struct i915_vma *vma);
/**
* i915_vma_pin_fence - pin fencing state
@@ -344,18 +376,17 @@ static inline struct page *i915_vma_first_page(struct i915_vma *vma)
* i915_vma_unpin_fence().
*
* Returns:
- *
* True if the vma has a fence, false otherwise.
*/
-static inline bool
-i915_vma_pin_fence(struct i915_vma *vma)
+int __must_check i915_vma_pin_fence(struct i915_vma *vma);
+void i915_vma_revoke_fence(struct i915_vma *vma);
+
+int __i915_vma_pin_fence(struct i915_vma *vma);
+
+static inline void __i915_vma_unpin_fence(struct i915_vma *vma)
{
- lockdep_assert_held(&vma->obj->base.dev->struct_mutex);
- if (vma->fence) {
- vma->fence->pin_count++;
- return true;
- } else
- return false;
+ GEM_BUG_ON(atomic_read(&vma->fence->pin_count) <= 0);
+ atomic_dec(&vma->fence->pin_count);
}
/**
@@ -369,12 +400,85 @@ i915_vma_pin_fence(struct i915_vma *vma)
static inline void
i915_vma_unpin_fence(struct i915_vma *vma)
{
- lockdep_assert_held(&vma->obj->base.dev->struct_mutex);
- if (vma->fence) {
- GEM_BUG_ON(vma->fence->pin_count <= 0);
- vma->fence->pin_count--;
- }
+ if (vma->fence)
+ __i915_vma_unpin_fence(vma);
+}
+
+static inline int i915_vma_fence_id(const struct i915_vma *vma)
+{
+ return vma->fence ? vma->fence->id : -1;
+}
+
+void i915_vma_parked(struct intel_gt *gt);
+
+static inline bool i915_vma_is_scanout(const struct i915_vma *vma)
+{
+ return test_bit(I915_VMA_SCANOUT_BIT, __i915_vma_flags(vma));
+}
+
+static inline void i915_vma_mark_scanout(struct i915_vma *vma)
+{
+ set_bit(I915_VMA_SCANOUT_BIT, __i915_vma_flags(vma));
+}
+
+static inline void i915_vma_clear_scanout(struct i915_vma *vma)
+{
+ clear_bit(I915_VMA_SCANOUT_BIT, __i915_vma_flags(vma));
+}
+
+void i915_ggtt_clear_scanout(struct drm_i915_gem_object *obj);
+
+#define for_each_until(cond) if (cond) break; else
+
+/**
+ * for_each_ggtt_vma - Iterate over the GGTT VMA belonging to an object.
+ * @V: the #i915_vma iterator
+ * @OBJ: the #drm_i915_gem_object
+ *
+ * GGTT VMA are placed at the being of the object's vma_list, see
+ * vma_create(), so we can stop our walk as soon as we see a ppgtt VMA,
+ * or the list is empty ofc.
+ */
+#define for_each_ggtt_vma(V, OBJ) \
+ list_for_each_entry(V, &(OBJ)->vma.list, obj_link) \
+ for_each_until(!i915_vma_is_ggtt(V))
+
+struct i915_vma *i915_vma_make_unshrinkable(struct i915_vma *vma);
+void i915_vma_make_shrinkable(struct i915_vma *vma);
+void i915_vma_make_purgeable(struct i915_vma *vma);
+
+int i915_vma_wait_for_bind(struct i915_vma *vma);
+
+static inline int i915_vma_sync(struct i915_vma *vma)
+{
+ /* Wait for the asynchronous bindings and pending GPU reads */
+ return i915_active_wait(&vma->active);
+}
+
+/**
+ * i915_vma_get_current_resource - Get the current resource of the vma
+ * @vma: The vma to get the current resource from.
+ *
+ * It's illegal to call this function if the vma is not bound.
+ *
+ * Return: A refcounted pointer to the current vma resource
+ * of the vma, assuming the vma is bound.
+ */
+static inline struct i915_vma_resource *
+i915_vma_get_current_resource(struct i915_vma *vma)
+{
+ return i915_vma_resource_get(vma->resource);
}
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+void i915_vma_resource_init_from_vma(struct i915_vma_resource *vma_res,
+ struct i915_vma *vma);
#endif
+void i915_vma_module_exit(void);
+int i915_vma_module_init(void);
+
+I915_SELFTEST_DECLARE(int i915_vma_get_pages(struct i915_vma *vma));
+I915_SELFTEST_DECLARE(void i915_vma_put_pages(struct i915_vma *vma));
+
+#endif