summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/i915_vma.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/i915_vma.c')
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c16
1 files changed, 7 insertions, 9 deletions
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index b70715b1411d..632e316f8b05 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -26,7 +26,7 @@
#include <linux/dma-fence-array.h>
#include <drm/drm_gem.h>
-#include "display/intel_display.h"
+#include "display/intel_fb.h"
#include "display/intel_frontbuffer.h"
#include "gem/i915_gem_lmem.h"
#include "gem/i915_gem_object_frontbuffer.h"
@@ -778,8 +778,8 @@ bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long color)
* @flags: mask of PIN_* flags to use
*
* First we try to allocate some free space that meets the requirements for
- * the VMA. Failiing that, if the flags permit, it will evict an old VMA,
- * preferrably the oldest idle entry to make room for the new VMA.
+ * the VMA. Failing that, if the flags permit, it will evict an old VMA,
+ * preferably the oldest idle entry to make room for the new VMA.
*
* Returns:
* 0 on success, negative error code otherwise.
@@ -877,7 +877,7 @@ i915_vma_insert(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
* objects which need to be tightly packed into the low 32bits.
*
* Note that we assume that GGTT are limited to 4GiB for the
- * forseeable future. See also i915_ggtt_offset().
+ * foreseeable future. See also i915_ggtt_offset().
*/
if (upper_32_bits(end - 1) &&
vma->page_sizes.sg > I915_GTT_PAGE_SIZE &&
@@ -1001,7 +1001,7 @@ rotate_pages(struct drm_i915_gem_object *obj, unsigned int offset,
/*
* The DE ignores the PTEs for the padding tiles, the sg entry
- * here is just a conenience to indicate how many padding PTEs
+ * here is just a convenience to indicate how many padding PTEs
* to insert at this spot.
*/
sg_set_page(sg, NULL, left, 0);
@@ -1776,8 +1776,6 @@ static void release_references(struct i915_vma *vma, struct intel_gt *gt,
if (vm_ddestroy)
i915_vm_resv_put(vma->vm);
- /* Wait for async active retire */
- i915_active_wait(&vma->active);
i915_active_fini(&vma->active);
GEM_WARN_ON(vma->resource);
i915_vma_free(vma);
@@ -2159,7 +2157,7 @@ static struct dma_fence *__i915_vma_unbind_async(struct i915_vma *vma)
int i915_vma_unbind(struct i915_vma *vma)
{
struct i915_address_space *vm = vma->vm;
- intel_wakeref_t wakeref = 0;
+ intel_wakeref_t wakeref = NULL;
int err;
assert_object_held_shared(vma->obj);
@@ -2198,7 +2196,7 @@ int i915_vma_unbind_async(struct i915_vma *vma, bool trylock_vm)
{
struct drm_i915_gem_object *obj = vma->obj;
struct i915_address_space *vm = vma->vm;
- intel_wakeref_t wakeref = 0;
+ intel_wakeref_t wakeref = NULL;
struct dma_fence *fence;
int err;