summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/gt/intel_gtt.c
diff options
context:
space:
mode:
authorMatthew Auld <matthew.auld@intel.com>2021-04-27 09:54:14 +0100
committerMatthew Auld <matthew.auld@intel.com>2021-04-27 16:21:47 +0100
commit6aed5673f00dcbe86dc40408cab6d85abacd20d4 (patch)
treef39994fc4fbd430180e4448c422482a7951f7e41 /drivers/gpu/drm/i915/gt/intel_gtt.c
parent529b9ec809a0a073192e5dc757fc383e11926661 (diff)
drm/i915/gtt/dgfx: place the PD in LMEM
It's a requirement that for dgfx we place all the paging structures in device local-memory. v2: use i915_coherent_map_type() v3: improve the shared dma-resv object comment Signed-off-by: Matthew Auld <matthew.auld@intel.com> Cc: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com> Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20210427085417.120246-4-matthew.auld@intel.com
Diffstat (limited to 'drivers/gpu/drm/i915/gt/intel_gtt.c')
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gtt.c30
1 files changed, 27 insertions, 3 deletions
diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.c b/drivers/gpu/drm/i915/gt/intel_gtt.c
index d386b89e2758..061c39d2ad51 100644
--- a/drivers/gpu/drm/i915/gt/intel_gtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gtt.c
@@ -7,10 +7,26 @@
#include <linux/fault-inject.h>
+#include "gem/i915_gem_lmem.h"
#include "i915_trace.h"
#include "intel_gt.h"
#include "intel_gtt.h"
+struct drm_i915_gem_object *alloc_pt_lmem(struct i915_address_space *vm, int sz)
+{
+ struct drm_i915_gem_object *obj;
+
+ obj = i915_gem_object_create_lmem(vm->i915, sz, 0);
+ /*
+ * Ensure all paging structures for this vm share the same dma-resv
+ * object underneath, with the idea that one object_lock() will lock
+ * them all at once.
+ */
+ if (!IS_ERR(obj))
+ obj->base.resv = &vm->resv;
+ return obj;
+}
+
struct drm_i915_gem_object *alloc_pt_dma(struct i915_address_space *vm, int sz)
{
struct drm_i915_gem_object *obj;
@@ -19,7 +35,11 @@ struct drm_i915_gem_object *alloc_pt_dma(struct i915_address_space *vm, int sz)
i915_gem_shrink_all(vm->i915);
obj = i915_gem_object_create_internal(vm->i915, sz);
- /* ensure all dma objects have the same reservation class */
+ /*
+ * Ensure all paging structures for this vm share the same dma-resv
+ * object underneath, with the idea that one object_lock() will lock
+ * them all at once.
+ */
if (!IS_ERR(obj))
obj->base.resv = &vm->resv;
return obj;
@@ -27,9 +47,11 @@ struct drm_i915_gem_object *alloc_pt_dma(struct i915_address_space *vm, int sz)
int map_pt_dma(struct i915_address_space *vm, struct drm_i915_gem_object *obj)
{
+ enum i915_map_type type;
void *vaddr;
- vaddr = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WB);
+ type = i915_coherent_map_type(vm->i915, obj, true);
+ vaddr = i915_gem_object_pin_map_unlocked(obj, type);
if (IS_ERR(vaddr))
return PTR_ERR(vaddr);
@@ -39,9 +61,11 @@ int map_pt_dma(struct i915_address_space *vm, struct drm_i915_gem_object *obj)
int map_pt_dma_locked(struct i915_address_space *vm, struct drm_i915_gem_object *obj)
{
+ enum i915_map_type type;
void *vaddr;
- vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
+ type = i915_coherent_map_type(vm->i915, obj, true);
+ vaddr = i915_gem_object_pin_map(obj, type);
if (IS_ERR(vaddr))
return PTR_ERR(vaddr);