summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/gem
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/gem')
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context.c129
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context_types.h1
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c16
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_lmem.c23
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_lmem.h2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_mman.c73
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.h1
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object_types.h4
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_pages.c1
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_pm.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shmem.c59
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shrinker.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_stolen.c12
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_stolen.h8
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_tiling.c18
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_ttm.c21
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c4
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c4
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_userptr.c6
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_wait.c2
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/huge_pages.c18
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c8
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c2
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c7
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c1
26 files changed, 290 insertions, 136 deletions
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
index dcbfe32fd30c..c0543c35cd6a 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
@@ -78,6 +78,7 @@
#include "gt/intel_engine_user.h"
#include "gt/intel_gpu_commands.h"
#include "gt/intel_ring.h"
+#include "gt/shmem_utils.h"
#include "pxp/intel_pxp.h"
@@ -879,6 +880,7 @@ static int set_proto_ctx_param(struct drm_i915_file_private *fpriv,
struct i915_gem_proto_context *pc,
struct drm_i915_gem_context_param *args)
{
+ struct drm_i915_private *i915 = fpriv->i915;
int ret = 0;
switch (args->param) {
@@ -904,6 +906,13 @@ static int set_proto_ctx_param(struct drm_i915_file_private *fpriv,
pc->user_flags &= ~BIT(UCONTEXT_BANNABLE);
break;
+ case I915_CONTEXT_PARAM_LOW_LATENCY:
+ if (intel_uc_uses_guc_submission(&to_gt(i915)->uc))
+ pc->user_flags |= BIT(UCONTEXT_LOW_LATENCY);
+ else
+ ret = -EINVAL;
+ break;
+
case I915_CONTEXT_PARAM_RECOVERABLE:
if (args->size)
ret = -EINVAL;
@@ -949,6 +958,7 @@ static int set_proto_ctx_param(struct drm_i915_file_private *fpriv,
case I915_CONTEXT_PARAM_NO_ZEROMAP:
case I915_CONTEXT_PARAM_BAN_PERIOD:
case I915_CONTEXT_PARAM_RINGSIZE:
+ case I915_CONTEXT_PARAM_CONTEXT_IMAGE:
default:
ret = -EINVAL;
break;
@@ -992,6 +1002,9 @@ static int intel_context_set_gem(struct intel_context *ce,
if (sseu.slice_mask && !WARN_ON(ce->engine->class != RENDER_CLASS))
ret = intel_context_reconfigure_sseu(ce, sseu);
+ if (test_bit(UCONTEXT_LOW_LATENCY, &ctx->user_flags))
+ __set_bit(CONTEXT_LOW_LATENCY, &ce->flags);
+
return ret;
}
@@ -1630,6 +1643,9 @@ i915_gem_create_context(struct drm_i915_private *i915,
if (vm)
ctx->vm = vm;
+ /* Assign early so intel_context_set_gem can access these flags */
+ ctx->user_flags = pc->user_flags;
+
mutex_init(&ctx->engines_mutex);
if (pc->num_user_engines >= 0) {
i915_gem_context_set_user_engines(ctx);
@@ -1652,8 +1668,6 @@ i915_gem_create_context(struct drm_i915_private *i915,
* is no remap info, it will be a NOP. */
ctx->remap_slice = ALL_L3_SLICES(i915);
- ctx->user_flags = pc->user_flags;
-
for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp); i++)
ctx->hang_timestamp[i] = jiffies - CONTEXT_FAST_HANG_JIFFIES;
@@ -2092,6 +2106,95 @@ static int get_protected(struct i915_gem_context *ctx,
return 0;
}
+static int set_context_image(struct i915_gem_context *ctx,
+ struct drm_i915_gem_context_param *args)
+{
+ struct i915_gem_context_param_context_image user;
+ struct intel_context *ce;
+ struct file *shmem_state;
+ unsigned long lookup;
+ void *state;
+ int ret = 0;
+
+ if (!IS_ENABLED(CONFIG_DRM_I915_REPLAY_GPU_HANGS_API))
+ return -EINVAL;
+
+ if (!ctx->i915->params.enable_debug_only_api)
+ return -EINVAL;
+
+ if (args->size < sizeof(user))
+ return -EINVAL;
+
+ if (copy_from_user(&user, u64_to_user_ptr(args->value), sizeof(user)))
+ return -EFAULT;
+
+ if (user.mbz)
+ return -EINVAL;
+
+ if (user.flags & ~(I915_CONTEXT_IMAGE_FLAG_ENGINE_INDEX))
+ return -EINVAL;
+
+ lookup = 0;
+ if (user.flags & I915_CONTEXT_IMAGE_FLAG_ENGINE_INDEX)
+ lookup |= LOOKUP_USER_INDEX;
+
+ ce = lookup_user_engine(ctx, lookup, &user.engine);
+ if (IS_ERR(ce))
+ return PTR_ERR(ce);
+
+ if (user.size < ce->engine->context_size) {
+ ret = -EINVAL;
+ goto out_ce;
+ }
+
+ if (drm_WARN_ON_ONCE(&ctx->i915->drm,
+ test_bit(CONTEXT_ALLOC_BIT, &ce->flags))) {
+ /*
+ * This is racy but for a debug only API, if userspace is keen
+ * to create and configure contexts, while simultaneously using
+ * them from a second thread, let them suffer by potentially not
+ * executing with the context image they just raced to apply.
+ */
+ ret = -EBUSY;
+ goto out_ce;
+ }
+
+ state = kmalloc(ce->engine->context_size, GFP_KERNEL);
+ if (!state) {
+ ret = -ENOMEM;
+ goto out_ce;
+ }
+
+ if (copy_from_user(state, u64_to_user_ptr(user.image),
+ ce->engine->context_size)) {
+ ret = -EFAULT;
+ goto out_state;
+ }
+
+ shmem_state = shmem_create_from_data(ce->engine->name,
+ state, ce->engine->context_size);
+ if (IS_ERR(shmem_state)) {
+ ret = PTR_ERR(shmem_state);
+ goto out_state;
+ }
+
+ if (intel_context_set_own_state(ce)) {
+ ret = -EBUSY;
+ fput(shmem_state);
+ goto out_state;
+ }
+
+ ce->default_state = shmem_state;
+
+ args->size = sizeof(user);
+
+out_state:
+ kfree(state);
+out_ce:
+ intel_context_put(ce);
+ return ret;
+}
+
static int ctx_setparam(struct drm_i915_file_private *fpriv,
struct i915_gem_context *ctx,
struct drm_i915_gem_context_param *args)
@@ -2144,6 +2247,10 @@ static int ctx_setparam(struct drm_i915_file_private *fpriv,
ret = set_persistence(ctx, args);
break;
+ case I915_CONTEXT_PARAM_CONTEXT_IMAGE:
+ ret = set_context_image(ctx, args);
+ break;
+
case I915_CONTEXT_PARAM_PROTECTED_CONTENT:
case I915_CONTEXT_PARAM_NO_ZEROMAP:
case I915_CONTEXT_PARAM_BAN_PERIOD:
@@ -2488,6 +2595,7 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
case I915_CONTEXT_PARAM_BAN_PERIOD:
case I915_CONTEXT_PARAM_ENGINES:
case I915_CONTEXT_PARAM_RINGSIZE:
+ case I915_CONTEXT_PARAM_CONTEXT_IMAGE:
default:
ret = -EINVAL;
break;
@@ -2600,5 +2708,22 @@ int __init i915_gem_context_module_init(void)
if (!slab_luts)
return -ENOMEM;
+ if (IS_ENABLED(CONFIG_DRM_I915_REPLAY_GPU_HANGS_API)) {
+ pr_notice("**************************************************************\n");
+ pr_notice("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
+ pr_notice("** **\n");
+ if (i915_modparams.enable_debug_only_api)
+ pr_notice("** i915.enable_debug_only_api is intended to be set **\n");
+ else
+ pr_notice("** CONFIG_DRM_I915_REPLAY_GPU_HANGS_API builds are intended **\n");
+ pr_notice("** for specific userspace graphics stack developers only! **\n");
+ pr_notice("** **\n");
+ pr_notice("** If you are seeing this message please report this to the **\n");
+ pr_notice("** provider of your kernel build. **\n");
+ pr_notice("** **\n");
+ pr_notice("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
+ pr_notice("**************************************************************\n");
+ }
+
return 0;
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
index 03bc7f9d191b..b6d97da63d1f 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
@@ -338,6 +338,7 @@ struct i915_gem_context {
#define UCONTEXT_BANNABLE 2
#define UCONTEXT_RECOVERABLE 3
#define UCONTEXT_PERSISTENCE 4
+#define UCONTEXT_LOW_LATENCY 5
/**
* @flags: small set of booleans
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
index 1df74f7aa3dc..9473050ac842 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
@@ -16,7 +16,7 @@
#include "i915_gem_object.h"
#include "i915_scatterlist.h"
-MODULE_IMPORT_NS(DMA_BUF);
+MODULE_IMPORT_NS("DMA_BUF");
I915_SELFTEST_DECLARE(static bool force_different_devices;)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index d3a771afb083..f151640c1d13 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -12,8 +12,6 @@
#include <drm/drm_auth.h>
#include <drm/drm_syncobj.h>
-#include "display/intel_frontbuffer.h"
-
#include "gem/i915_gem_ioctls.h"
#include "gt/intel_context.h"
#include "gt/intel_gpu_commands.h"
@@ -340,7 +338,7 @@ static int eb_create(struct i915_execbuffer *eb)
* Without a 1:1 association between relocation handles and
* the execobject[] index, we instead create a hashtable.
* We size it dynamically based on available memory, starting
- * first with 1:1 assocative hash and scaling back until
+ * first with 1:1 associative hash and scaling back until
* the allocation succeeds.
*
* Later on we use a positive lut_size to indicate we are
@@ -827,7 +825,7 @@ static int eb_select_context(struct i915_execbuffer *eb)
struct i915_gem_context *ctx;
ctx = i915_gem_context_lookup(eb->file->driver_priv, eb->args->rsvd1);
- if (unlikely(IS_ERR(ctx)))
+ if (IS_ERR(ctx))
return PTR_ERR(ctx);
eb->gem_context = ctx;
@@ -917,7 +915,7 @@ static struct i915_vma *eb_lookup_vma(struct i915_execbuffer *eb, u32 handle)
*/
if (i915_gem_context_uses_protected_content(eb->gem_context) &&
i915_gem_object_is_protected(obj)) {
- err = intel_pxp_key_check(eb->i915->pxp, obj, true);
+ err = intel_pxp_key_check(eb->i915->pxp, intel_bo_to_drm_bo(obj), true);
if (err) {
i915_gem_object_put(obj);
return ERR_PTR(err);
@@ -1533,7 +1531,7 @@ static int eb_relocate_vma(struct i915_execbuffer *eb, struct eb_vma *ev)
u64_to_user_ptr(entry->relocs_ptr);
unsigned long remain = entry->relocation_count;
- if (unlikely(remain > N_RELOC(ULONG_MAX)))
+ if (unlikely(remain > N_RELOC(INT_MAX)))
return -EINVAL;
/*
@@ -1641,7 +1639,7 @@ static int check_relocations(const struct drm_i915_gem_exec_object2 *entry)
if (size == 0)
return 0;
- if (size > N_RELOC(ULONG_MAX))
+ if (size > N_RELOC(INT_MAX))
return -EINVAL;
addr = u64_to_user_ptr(entry->relocs_ptr);
@@ -2457,7 +2455,7 @@ static int eb_submit(struct i915_execbuffer *eb)
* The engine index is returned.
*/
static unsigned int
-gen8_dispatch_bsd_engine(struct drm_i915_private *dev_priv,
+gen8_dispatch_bsd_engine(struct drm_i915_private *i915,
struct drm_file *file)
{
struct drm_i915_file_private *file_priv = file->driver_priv;
@@ -2465,7 +2463,7 @@ gen8_dispatch_bsd_engine(struct drm_i915_private *dev_priv,
/* Check whether the file_priv has already selected one ring. */
if ((int)file_priv->bsd_engine < 0)
file_priv->bsd_engine =
- get_random_u32_below(dev_priv->engine_uabi_class_count[I915_ENGINE_CLASS_VIDEO]);
+ get_random_u32_below(i915->engine_uabi_class_count[I915_ENGINE_CLASS_VIDEO]);
return file_priv->bsd_engine;
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_lmem.c b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
index 3198b64ad7db..388f90784d8a 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
@@ -53,29 +53,6 @@ bool i915_gem_object_is_lmem(struct drm_i915_gem_object *obj)
}
/**
- * __i915_gem_object_is_lmem - Whether the object is resident in
- * lmem while in the fence signaling critical path.
- * @obj: The object to check.
- *
- * This function is intended to be called from within the fence signaling
- * path where the fence, or a pin, keeps the object from being migrated. For
- * example during gpu reset or similar.
- *
- * Return: Whether the object is resident in lmem.
- */
-bool __i915_gem_object_is_lmem(struct drm_i915_gem_object *obj)
-{
- struct intel_memory_region *mr = READ_ONCE(obj->mm.region);
-
-#ifdef CONFIG_LOCKDEP
- GEM_WARN_ON(dma_resv_test_signaled(obj->base.resv, DMA_RESV_USAGE_BOOKKEEP) &&
- i915_gem_object_evictable(obj));
-#endif
- return mr && (mr->type == INTEL_MEMORY_LOCAL ||
- mr->type == INTEL_MEMORY_STOLEN_LOCAL);
-}
-
-/**
* __i915_gem_object_create_lmem_with_ps - Create lmem object and force the
* minimum page size for the backing pages.
* @i915: The i915 instance.
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_lmem.h b/drivers/gpu/drm/i915/gem/i915_gem_lmem.h
index 5a7a14e85c3f..ecd8f1a633a1 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_lmem.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_lmem.h
@@ -19,8 +19,6 @@ i915_gem_object_lmem_io_map(struct drm_i915_gem_object *obj,
bool i915_gem_object_is_lmem(struct drm_i915_gem_object *obj);
-bool __i915_gem_object_is_lmem(struct drm_i915_gem_object *obj);
-
struct drm_i915_gem_object *
i915_gem_object_create_lmem_from_data(struct drm_i915_private *i915,
const void *data, size_t size);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
index a2195e28b625..21274aa9bddd 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
@@ -252,6 +252,7 @@ static vm_fault_t vm_fault_cpu(struct vm_fault *vmf)
struct vm_area_struct *area = vmf->vma;
struct i915_mmap_offset *mmo = area->vm_private_data;
struct drm_i915_gem_object *obj = mmo->obj;
+ unsigned long obj_offset;
resource_size_t iomap;
int err;
@@ -273,10 +274,11 @@ static vm_fault_t vm_fault_cpu(struct vm_fault *vmf)
iomap -= obj->mm.region->region.start;
}
+ obj_offset = area->vm_pgoff - drm_vma_node_start(&mmo->vma_node);
/* PTEs are revoked in obj->ops->put_pages() */
err = remap_io_sg(area,
area->vm_start, area->vm_end - area->vm_start,
- obj->mm.pages->sgl, iomap);
+ obj->mm.pages->sgl, obj_offset, iomap);
if (area->vm_flags & VM_WRITE) {
GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
@@ -290,6 +292,47 @@ out:
return i915_error_to_vmf_fault(err);
}
+static void set_address_limits(struct vm_area_struct *area,
+ struct i915_vma *vma,
+ unsigned long obj_offset,
+ resource_size_t gmadr_start,
+ unsigned long *start_vaddr,
+ unsigned long *end_vaddr,
+ unsigned long *pfn)
+{
+ unsigned long vm_start, vm_end, vma_size; /* user's memory parameters */
+ long start, end; /* memory boundaries */
+
+ /*
+ * Let's move into the ">> PAGE_SHIFT"
+ * domain to be sure not to lose bits
+ */
+ vm_start = area->vm_start >> PAGE_SHIFT;
+ vm_end = area->vm_end >> PAGE_SHIFT;
+ vma_size = vma->size >> PAGE_SHIFT;
+
+ /*
+ * Calculate the memory boundaries by considering the offset
+ * provided by the user during memory mapping and the offset
+ * provided for the partial mapping.
+ */
+ start = vm_start;
+ start -= obj_offset;
+ start += vma->gtt_view.partial.offset;
+ end = start + vma_size;
+
+ start = max_t(long, start, vm_start);
+ end = min_t(long, end, vm_end);
+
+ /* Let's move back into the "<< PAGE_SHIFT" domain */
+ *start_vaddr = (unsigned long)start << PAGE_SHIFT;
+ *end_vaddr = (unsigned long)end << PAGE_SHIFT;
+
+ *pfn = (gmadr_start + i915_ggtt_offset(vma)) >> PAGE_SHIFT;
+ *pfn += (*start_vaddr - area->vm_start) >> PAGE_SHIFT;
+ *pfn += obj_offset - vma->gtt_view.partial.offset;
+}
+
static vm_fault_t vm_fault_gtt(struct vm_fault *vmf)
{
#define MIN_CHUNK_PAGES (SZ_1M >> PAGE_SHIFT)
@@ -302,14 +345,18 @@ static vm_fault_t vm_fault_gtt(struct vm_fault *vmf)
struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
bool write = area->vm_flags & VM_WRITE;
struct i915_gem_ww_ctx ww;
+ unsigned long obj_offset;
+ unsigned long start, end; /* memory boundaries */
intel_wakeref_t wakeref;
struct i915_vma *vma;
pgoff_t page_offset;
+ unsigned long pfn;
int srcu;
int ret;
- /* We don't use vmf->pgoff since that has the fake offset */
+ obj_offset = area->vm_pgoff - drm_vma_node_start(&mmo->vma_node);
page_offset = (vmf->address - area->vm_start) >> PAGE_SHIFT;
+ page_offset += obj_offset;
trace_i915_gem_object_fault(obj, page_offset, true, write);
@@ -402,12 +449,16 @@ retry:
if (ret)
goto err_unpin;
+ /*
+ * Dump all the necessary parameters in this function to perform the
+ * arithmetic calculation for the virtual address start and end and
+ * the PFN (Page Frame Number).
+ */
+ set_address_limits(area, vma, obj_offset, ggtt->gmadr.start,
+ &start, &end, &pfn);
+
/* Finally, remap it using the new GTT offset */
- ret = remap_io_mapping(area,
- area->vm_start + (vma->gtt_view.partial.offset << PAGE_SHIFT),
- (ggtt->gmadr.start + i915_ggtt_offset(vma)) >> PAGE_SHIFT,
- min_t(u64, vma->size, area->vm_end - area->vm_start),
- &ggtt->iomap);
+ ret = remap_io_mapping(area, start, pfn, end - start, &ggtt->iomap);
if (ret)
goto err_fence;
@@ -1030,9 +1081,9 @@ int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma)
rcu_read_lock();
drm_vma_offset_lock_lookup(dev->vma_offset_manager);
- node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
- vma->vm_pgoff,
- vma_pages(vma));
+ node = drm_vma_offset_lookup_locked(dev->vma_offset_manager,
+ vma->vm_pgoff,
+ vma_pages(vma));
if (node && drm_vma_node_is_allowed(node, priv)) {
/*
* Skip 0-refcnted objects as it is in the process of being
@@ -1084,6 +1135,8 @@ int i915_gem_fb_mmap(struct drm_i915_gem_object *obj, struct vm_area_struct *vma
mmo = mmap_offset_attach(obj, mmap_type, NULL);
if (IS_ERR(mmo))
return PTR_ERR(mmo);
+
+ vma->vm_pgoff += drm_vma_node_start(&mmo->vma_node);
}
/*
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h
index 3560a062d287..bb713e096db2 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
@@ -89,7 +89,6 @@ __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj);
* @handle: userspace handle
*
* Returns:
- *
* A pointer to the object named by the handle if such exists on @filp, NULL
* otherwise. This object is only valid whilst under the RCU read lock, and
* note carefully the object may be in the process of being destroyed.
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
index 0c5cdab278b6..68413c05c812 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
@@ -386,7 +386,7 @@ struct drm_i915_gem_object {
* and kernel mode driver for caching policy control after GEN12.
* In the meantime platform specific tables are created to translate
* i915_cache_level into pat index, for more details check the macros
- * defined i915/i915_pci.c, e.g. PVC_CACHELEVEL.
+ * defined i915/i915_pci.c, e.g. TGL_CACHELEVEL.
* For backward compatibility, this field contains values exactly match
* the entries of enum i915_cache_level for pre-GEN12 platforms (See
* LEGACY_CACHELEVEL), so that the PTE encode functions for these
@@ -535,7 +535,7 @@ struct drm_i915_gem_object {
* I915_CACHE_NONE. The only exception is userptr objects, where we
* instead force I915_CACHE_LLC, but we also don't allow userspace to
* ever change the @cache_level for such objects. Another special case
- * is dma-buf, which doesn't rely on @cache_dirty, but there we
+ * is dma-buf, which doesn't rely on @cache_dirty, but there we
* always do a forced flush when acquiring the pages, if there is a
* chance that the pages can be read directly from main memory with
* the GPU.
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
index 0ba955611dfb..8780aa243105 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
@@ -5,6 +5,7 @@
*/
#include <drm/drm_cache.h>
+#include <linux/vmalloc.h>
#include "gt/intel_gt.h"
#include "gt/intel_tlb.h"
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
index 3b27218aabe2..900c08337942 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
@@ -13,7 +13,7 @@
#include "i915_driver.h"
#include "i915_drv.h"
-#if defined(CONFIG_X86)
+#if IS_ENABLED(CONFIG_X86)
#include <asm/smp.h>
#else
#define wbinvd_on_all_cpus() \
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
index 38b72d86560f..ae3343c81a64 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
@@ -209,8 +209,6 @@ static int shmem_get_pages(struct drm_i915_gem_object *obj)
struct address_space *mapping = obj->base.filp->f_mapping;
unsigned int max_segment = i915_sg_segment_size(i915->drm.dev);
struct sg_table *st;
- struct sgt_iter sgt_iter;
- struct page *page;
int ret;
/*
@@ -239,9 +237,7 @@ rebuild_st:
* for PAGE_SIZE chunks instead may be helpful.
*/
if (max_segment > PAGE_SIZE) {
- for_each_sgt_page(page, sgt_iter, st)
- put_page(page);
- sg_free_table(st);
+ shmem_sg_free_table(st, mapping, false, false);
kfree(st);
max_segment = PAGE_SIZE;
@@ -424,7 +420,8 @@ shmem_pwrite(struct drm_i915_gem_object *obj,
struct address_space *mapping = obj->base.filp->f_mapping;
const struct address_space_operations *aops = mapping->a_ops;
char __user *user_data = u64_to_user_ptr(arg->data_ptr);
- u64 remain, offset;
+ u64 remain;
+ loff_t pos;
unsigned int pg;
/* Caller already validated user args */
@@ -457,12 +454,12 @@ shmem_pwrite(struct drm_i915_gem_object *obj,
*/
remain = arg->size;
- offset = arg->offset;
- pg = offset_in_page(offset);
+ pos = arg->offset;
+ pg = offset_in_page(pos);
do {
unsigned int len, unwritten;
- struct page *page;
+ struct folio *folio;
void *data, *vaddr;
int err;
char __maybe_unused c;
@@ -480,21 +477,19 @@ shmem_pwrite(struct drm_i915_gem_object *obj,
if (err)
return err;
- err = aops->write_begin(obj->base.filp, mapping, offset, len,
- &page, &data);
+ err = aops->write_begin(obj->base.filp, mapping, pos, len,
+ &folio, &data);
if (err < 0)
return err;
- vaddr = kmap_local_page(page);
+ vaddr = kmap_local_folio(folio, offset_in_folio(folio, pos));
pagefault_disable();
- unwritten = __copy_from_user_inatomic(vaddr + pg,
- user_data,
- len);
+ unwritten = __copy_from_user_inatomic(vaddr, user_data, len);
pagefault_enable();
kunmap_local(vaddr);
- err = aops->write_end(obj->base.filp, mapping, offset, len,
- len - unwritten, page, data);
+ err = aops->write_end(obj->base.filp, mapping, pos, len,
+ len - unwritten, folio, data);
if (err < 0)
return err;
@@ -504,7 +499,7 @@ shmem_pwrite(struct drm_i915_gem_object *obj,
remain -= len;
user_data += len;
- offset += len;
+ pos += len;
pg = 0;
} while (remain);
@@ -654,17 +649,17 @@ i915_gem_object_create_shmem(struct drm_i915_private *i915,
/* Allocate a new GEM object and fill it with the supplied data */
struct drm_i915_gem_object *
-i915_gem_object_create_shmem_from_data(struct drm_i915_private *dev_priv,
+i915_gem_object_create_shmem_from_data(struct drm_i915_private *i915,
const void *data, resource_size_t size)
{
struct drm_i915_gem_object *obj;
struct file *file;
const struct address_space_operations *aops;
- resource_size_t offset;
+ loff_t pos;
int err;
- GEM_WARN_ON(IS_DGFX(dev_priv));
- obj = i915_gem_object_create_shmem(dev_priv, round_up(size, PAGE_SIZE));
+ GEM_WARN_ON(IS_DGFX(i915));
+ obj = i915_gem_object_create_shmem(i915, round_up(size, PAGE_SIZE));
if (IS_ERR(obj))
return obj;
@@ -672,29 +667,27 @@ i915_gem_object_create_shmem_from_data(struct drm_i915_private *dev_priv,
file = obj->base.filp;
aops = file->f_mapping->a_ops;
- offset = 0;
+ pos = 0;
do {
unsigned int len = min_t(typeof(size), size, PAGE_SIZE);
- struct page *page;
- void *pgdata, *vaddr;
+ struct folio *folio;
+ void *fsdata;
- err = aops->write_begin(file, file->f_mapping, offset, len,
- &page, &pgdata);
+ err = aops->write_begin(file, file->f_mapping, pos, len,
+ &folio, &fsdata);
if (err < 0)
goto fail;
- vaddr = kmap(page);
- memcpy(vaddr, data, len);
- kunmap(page);
+ memcpy_to_folio(folio, offset_in_folio(folio, pos), data, len);
- err = aops->write_end(file, file->f_mapping, offset, len, len,
- page, pgdata);
+ err = aops->write_end(file, file->f_mapping, pos, len, len,
+ folio, fsdata);
if (err < 0)
goto fail;
size -= len;
data += len;
- offset += len;
+ pos += len;
} while (size);
return obj;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
index d166052eb2ce..9117e9422844 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
@@ -117,7 +117,7 @@ i915_gem_shrink(struct i915_gem_ww_ctx *ww,
},
{ NULL, 0 },
}, *phase;
- intel_wakeref_t wakeref = 0;
+ intel_wakeref_t wakeref = NULL;
unsigned long count = 0;
unsigned long scanned = 0;
int err = 0, i = 0;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
index ad6dd7f3259b..9d958a6f377e 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
@@ -8,7 +8,7 @@
#include <linux/mutex.h>
#include <drm/drm_mm.h>
-#include <drm/i915_drm.h>
+#include <drm/intel/i915_drm.h>
#include "gem/i915_gem_lmem.h"
#include "gem/i915_gem_region.h"
@@ -457,7 +457,7 @@ static int init_reserved_stolen(struct drm_i915_private *i915)
icl_get_stolen_reserved(i915, uncore,
&reserved_base, &reserved_size);
} else if (GRAPHICS_VER(i915) >= 8) {
- if (IS_LP(i915))
+ if (IS_CHERRYVIEW(i915) || IS_BROXTON(i915) || IS_GEMINILAKE(i915))
chv_get_stolen_reserved(i915, uncore,
&reserved_base, &reserved_size);
else
@@ -936,8 +936,12 @@ i915_gem_stolen_lmem_setup(struct drm_i915_private *i915, u16 type,
} else {
/* Use DSM base address instead for stolen memory */
dsm_base = intel_uncore_read64(uncore, GEN6_DSMBASE) & GEN11_BDSM_MASK;
- if (WARN_ON(lmem_size < dsm_base))
- return ERR_PTR(-ENODEV);
+ if (lmem_size < dsm_base) {
+ drm_dbg(&i915->drm,
+ "Disabling stolen memory support due to OOB placement: lmem_size = %pa vs dsm_base = %pa\n",
+ &lmem_size, &dsm_base);
+ return NULL;
+ }
dsm_size = ALIGN_DOWN(lmem_size - dsm_base, SZ_1M);
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.h b/drivers/gpu/drm/i915/gem/i915_gem_stolen.h
index 258381d1c054..dfe0db8bb1b9 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.h
@@ -14,14 +14,14 @@ struct drm_i915_gem_object;
#define i915_stolen_fb drm_mm_node
-int i915_gem_stolen_insert_node(struct drm_i915_private *dev_priv,
+int i915_gem_stolen_insert_node(struct drm_i915_private *i915,
struct drm_mm_node *node, u64 size,
unsigned alignment);
-int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv,
+int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *i915,
struct drm_mm_node *node, u64 size,
unsigned alignment, u64 start,
u64 end);
-void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv,
+void i915_gem_stolen_remove_node(struct drm_i915_private *i915,
struct drm_mm_node *node);
struct intel_memory_region *
i915_gem_stolen_smem_setup(struct drm_i915_private *i915, u16 type,
@@ -31,7 +31,7 @@ i915_gem_stolen_lmem_setup(struct drm_i915_private *i915, u16 type,
u16 instance);
struct drm_i915_gem_object *
-i915_gem_object_create_stolen(struct drm_i915_private *dev_priv,
+i915_gem_object_create_stolen(struct drm_i915_private *i915,
resource_size_t size);
bool i915_gem_object_is_stolen(const struct drm_i915_gem_object *obj);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_tiling.c b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
index a049ca0b7980..d9eb84c1d2f1 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
@@ -343,12 +343,12 @@ int
i915_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *i915 = to_i915(dev);
struct drm_i915_gem_set_tiling *args = data;
struct drm_i915_gem_object *obj;
int err;
- if (!to_gt(dev_priv)->ggtt->num_fences)
+ if (!to_gt(i915)->ggtt->num_fences)
return -EOPNOTSUPP;
obj = i915_gem_object_lookup(file, args->handle);
@@ -374,9 +374,9 @@ i915_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
args->stride = 0;
} else {
if (args->tiling_mode == I915_TILING_X)
- args->swizzle_mode = to_gt(dev_priv)->ggtt->bit_6_swizzle_x;
+ args->swizzle_mode = to_gt(i915)->ggtt->bit_6_swizzle_x;
else
- args->swizzle_mode = to_gt(dev_priv)->ggtt->bit_6_swizzle_y;
+ args->swizzle_mode = to_gt(i915)->ggtt->bit_6_swizzle_y;
/* Hide bit 17 swizzling from the user. This prevents old Mesa
* from aborting the application on sw fallbacks to bit 17,
@@ -427,11 +427,11 @@ i915_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct drm_i915_gem_get_tiling *args = data;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *i915 = to_i915(dev);
struct drm_i915_gem_object *obj;
int err = -ENOENT;
- if (!to_gt(dev_priv)->ggtt->num_fences)
+ if (!to_gt(i915)->ggtt->num_fences)
return -EOPNOTSUPP;
rcu_read_lock();
@@ -447,10 +447,10 @@ i915_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
switch (args->tiling_mode) {
case I915_TILING_X:
- args->swizzle_mode = to_gt(dev_priv)->ggtt->bit_6_swizzle_x;
+ args->swizzle_mode = to_gt(i915)->ggtt->bit_6_swizzle_x;
break;
case I915_TILING_Y:
- args->swizzle_mode = to_gt(dev_priv)->ggtt->bit_6_swizzle_y;
+ args->swizzle_mode = to_gt(i915)->ggtt->bit_6_swizzle_y;
break;
default:
case I915_TILING_NONE:
@@ -459,7 +459,7 @@ i915_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
}
/* Hide bit 17 from the user -- see comment in i915_gem_set_tiling */
- if (dev_priv->gem_quirks & GEM_QUIRK_PIN_SWIZZLED_PAGES)
+ if (i915->gem_quirks & GEM_QUIRK_PIN_SWIZZLED_PAGES)
args->phys_swizzle_mode = I915_BIT_6_SWIZZLE_UNKNOWN;
else
args->phys_swizzle_mode = args->swizzle_mode;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
index e6f177183c0f..10d8673641f7 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
@@ -165,7 +165,6 @@ i915_ttm_placement_from_obj(const struct drm_i915_gem_object *obj,
i915_ttm_place_from_region(num_allowed ? obj->mm.placements[0] :
obj->mm.region, &places[0], obj->bo_offset,
obj->base.size, flags);
- places[0].flags |= TTM_PL_FLAG_DESIRED;
/* Cache this on object? */
for (i = 0; i < num_allowed; ++i) {
@@ -779,13 +778,16 @@ static int __i915_ttm_get_pages(struct drm_i915_gem_object *obj,
.interruptible = true,
.no_wait_gpu = false,
};
- int real_num_busy;
+ struct ttm_placement initial_placement;
+ struct ttm_place initial_place;
int ret;
/* First try only the requested placement. No eviction. */
- real_num_busy = placement->num_placement;
- placement->num_placement = 1;
- ret = ttm_bo_validate(bo, placement, &ctx);
+ initial_placement.num_placement = 1;
+ memcpy(&initial_place, placement->placement, sizeof(struct ttm_place));
+ initial_place.flags |= TTM_PL_FLAG_DESIRED;
+ initial_placement.placement = &initial_place;
+ ret = ttm_bo_validate(bo, &initial_placement, &ctx);
if (ret) {
ret = i915_ttm_err_to_gem(ret);
/*
@@ -800,14 +802,13 @@ static int __i915_ttm_get_pages(struct drm_i915_gem_object *obj,
* If the initial attempt fails, allow all accepted placements,
* evicting if necessary.
*/
- placement->num_placement = real_num_busy;
ret = ttm_bo_validate(bo, placement, &ctx);
if (ret)
return i915_ttm_err_to_gem(ret);
}
if (bo->ttm && !ttm_tt_is_populated(bo->ttm)) {
- ret = ttm_tt_populate(bo->bdev, bo->ttm, &ctx);
+ ret = ttm_bo_populate(bo, &ctx);
if (ret)
return ret;
@@ -1037,7 +1038,7 @@ static vm_fault_t vm_fault_ttm(struct vm_fault *vmf)
struct ttm_buffer_object *bo = area->vm_private_data;
struct drm_device *dev = bo->base.dev;
struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
- intel_wakeref_t wakeref = 0;
+ intel_wakeref_t wakeref = NULL;
vm_fault_t ret;
int idx;
@@ -1130,7 +1131,7 @@ static vm_fault_t vm_fault_ttm(struct vm_fault *vmf)
GEM_WARN_ON(!i915_ttm_cpu_maps_iomem(bo->resource));
}
- if (wakeref & CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND)
+ if (wakeref && CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND != 0)
intel_wakeref_auto(&to_i915(obj->base.dev)->runtime_pm.userfault_wakeref,
msecs_to_jiffies_timeout(CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND));
@@ -1194,7 +1195,7 @@ static u64 i915_ttm_mmap_offset(struct drm_i915_gem_object *obj)
static void i915_ttm_unmap_virtual(struct drm_i915_gem_object *obj)
{
struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
- intel_wakeref_t wakeref = 0;
+ intel_wakeref_t wakeref = NULL;
assert_object_held_shared(obj);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c
index 7078af2f8f79..041dab543b78 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c
@@ -155,7 +155,7 @@ void i915_ttm_adjust_gem_after_move(struct drm_i915_gem_object *obj)
* @bo: The ttm buffer object.
*
* This function prepares an object for move by removing all GPU bindings,
- * removing all CPU mapings and finally releasing the pages sg-table.
+ * removing all CPU mappings and finally releasing the pages sg-table.
*
* Return: 0 if successful, negative error code on error.
*/
@@ -624,7 +624,7 @@ int i915_ttm_move(struct ttm_buffer_object *bo, bool evict,
/* Populate ttm with pages if needed. Typically system memory. */
if (ttm && (dst_man->use_tt || (ttm->page_flags & TTM_TT_FLAG_SWAPPED))) {
- ret = ttm_tt_populate(bo->bdev, ttm, ctx);
+ ret = ttm_bo_populate(bo, ctx);
if (ret)
return ret;
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c
index ad649523d5e0..61596cecce4d 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c
@@ -90,7 +90,7 @@ static int i915_ttm_backup(struct i915_gem_apply_to_region *apply,
goto out_no_lock;
backup_bo = i915_gem_to_ttm(backup);
- err = ttm_tt_populate(backup_bo->bdev, backup_bo->ttm, &ctx);
+ err = ttm_bo_populate(backup_bo, &ctx);
if (err)
goto out_no_populate;
@@ -189,7 +189,7 @@ static int i915_ttm_restore(struct i915_gem_apply_to_region *apply,
if (!backup_bo->resource)
err = ttm_bo_validate(backup_bo, i915_ttm_sys_placement(), &ctx);
if (!err)
- err = ttm_tt_populate(backup_bo->bdev, backup_bo->ttm, &ctx);
+ err = ttm_bo_populate(backup_bo, &ctx);
if (!err) {
err = i915_gem_obj_copy_ttm(obj, backup, pm_apply->allow_gpu,
false);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
index 61abfb505766..09b68713ab32 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
@@ -463,13 +463,13 @@ i915_gem_userptr_ioctl(struct drm_device *dev,
struct drm_file *file)
{
static struct lock_class_key __maybe_unused lock_class;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *i915 = to_i915(dev);
struct drm_i915_gem_userptr *args = data;
struct drm_i915_gem_object __maybe_unused *obj;
int __maybe_unused ret;
u32 __maybe_unused handle;
- if (!HAS_LLC(dev_priv) && !HAS_SNOOP(dev_priv)) {
+ if (!HAS_LLC(i915) && !HAS_SNOOP(i915)) {
/* We cannot support coherent userptr objects on hw without
* LLC and broken snooping.
*/
@@ -501,7 +501,7 @@ i915_gem_userptr_ioctl(struct drm_device *dev,
* On almost all of the older hw, we cannot tell the GPU that
* a page is readonly.
*/
- if (!to_gt(dev_priv)->vm->has_read_only)
+ if (!to_gt(i915)->vm->has_read_only)
return -ENODEV;
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_wait.c b/drivers/gpu/drm/i915/gem/i915_gem_wait.c
index d4b918fb11ce..1f55e62044a4 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_wait.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_wait.c
@@ -266,7 +266,7 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
if (ret == -ETIME && !nsecs_to_jiffies(args->timeout_ns))
args->timeout_ns = 0;
- /* Asked to wait beyond the jiffie/scheduler precision? */
+ /* Asked to wait beyond the jiffy/scheduler precision? */
if (ret == -ETIME && args->timeout_ns)
ret = -EAGAIN;
}
diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
index 3ff3d8889c6c..84d41e6ccf05 100644
--- a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
+++ b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
@@ -713,7 +713,7 @@ static int igt_ppgtt_huge_fill(void *arg)
{
struct drm_i915_private *i915 = arg;
unsigned int supported = RUNTIME_INFO(i915)->page_sizes;
- bool has_pte64 = GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50);
+ bool has_pte64 = GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55);
struct i915_address_space *vm;
struct i915_gem_context *ctx;
unsigned long max_pages;
@@ -857,7 +857,7 @@ out:
static int igt_ppgtt_64K(void *arg)
{
struct drm_i915_private *i915 = arg;
- bool has_pte64 = GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50);
+ bool has_pte64 = GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55);
struct drm_i915_gem_object *obj;
struct i915_address_space *vm;
struct i915_gem_context *ctx;
@@ -1969,19 +1969,19 @@ int i915_gem_huge_page_mock_selftests(void)
SUBTEST(igt_mock_memory_region_huge_pages),
SUBTEST(igt_mock_ppgtt_misaligned_dma),
};
- struct drm_i915_private *dev_priv;
+ struct drm_i915_private *i915;
struct i915_ppgtt *ppgtt;
int err;
- dev_priv = mock_gem_device();
- if (!dev_priv)
+ i915 = mock_gem_device();
+ if (!i915)
return -ENOMEM;
/* Pretend to be a device which supports the 48b PPGTT */
- RUNTIME_INFO(dev_priv)->ppgtt_type = INTEL_PPGTT_FULL;
- RUNTIME_INFO(dev_priv)->ppgtt_size = 48;
+ RUNTIME_INFO(i915)->ppgtt_type = INTEL_PPGTT_FULL;
+ RUNTIME_INFO(i915)->ppgtt_size = 48;
- ppgtt = i915_ppgtt_create(to_gt(dev_priv), 0);
+ ppgtt = i915_ppgtt_create(to_gt(i915), 0);
if (IS_ERR(ppgtt)) {
err = PTR_ERR(ppgtt);
goto out_unlock;
@@ -2005,7 +2005,7 @@ int i915_gem_huge_page_mock_selftests(void)
out_put:
i915_vm_put(&ppgtt->vm);
out_unlock:
- mock_destroy_device(dev_priv);
+ mock_destroy_device(i915);
return err;
}
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
index 10a7847f1b04..bac15196b4d2 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
@@ -117,7 +117,7 @@ static bool fastblit_supports_x_tiling(const struct drm_i915_private *i915)
if (gen < 12)
return true;
- if (GRAPHICS_VER_FULL(i915) < IP_VER(12, 50))
+ if (GRAPHICS_VER_FULL(i915) < IP_VER(12, 55))
return false;
return HAS_DISPLAY(i915);
@@ -166,7 +166,7 @@ static int prepare_blit(const struct tiled_blits *t,
src_pitch = t->width; /* in dwords */
if (src->tiling == CLIENT_TILING_Y) {
src_tiles = XY_FAST_COPY_BLT_D0_SRC_TILE_MODE(YMAJOR);
- if (GRAPHICS_VER_FULL(to_i915(batch->base.dev)) >= IP_VER(12, 50))
+ if (GRAPHICS_VER_FULL(to_i915(batch->base.dev)) >= IP_VER(12, 55))
src_4t = XY_FAST_COPY_BLT_D1_SRC_TILE4;
} else if (src->tiling == CLIENT_TILING_X) {
src_tiles = XY_FAST_COPY_BLT_D0_SRC_TILE_MODE(TILE_X);
@@ -177,7 +177,7 @@ static int prepare_blit(const struct tiled_blits *t,
dst_pitch = t->width; /* in dwords */
if (dst->tiling == CLIENT_TILING_Y) {
dst_tiles = XY_FAST_COPY_BLT_D0_DST_TILE_MODE(YMAJOR);
- if (GRAPHICS_VER_FULL(to_i915(batch->base.dev)) >= IP_VER(12, 50))
+ if (GRAPHICS_VER_FULL(to_i915(batch->base.dev)) >= IP_VER(12, 55))
dst_4t = XY_FAST_COPY_BLT_D1_DST_TILE4;
} else if (dst->tiling == CLIENT_TILING_X) {
dst_tiles = XY_FAST_COPY_BLT_D0_DST_TILE_MODE(TILE_X);
@@ -365,7 +365,7 @@ static u64 tiled_offset(const struct intel_gt *gt,
v += x;
swizzle = gt->ggtt->bit_6_swizzle_x;
- } else if (GRAPHICS_VER_FULL(gt->i915) >= IP_VER(12, 50)) {
+ } else if (GRAPHICS_VER_FULL(gt->i915) >= IP_VER(12, 55)) {
/* Y-major tiling layout is Tile4 for Xe_HP and beyond */
v = linear_x_y_to_ftiled_pos(x_pos, y_pos, stride, 32);
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
index 89d4dc8b60c6..eb0158e43417 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
@@ -369,7 +369,7 @@ static int live_parallel_switch(void *arg)
if (!data[n].ce[0])
continue;
- worker = kthread_create_worker(0, "igt/parallel:%s",
+ worker = kthread_run_worker(0, "igt/parallel:%s",
data[n].ce[0]->engine->name);
if (IS_ERR(worker)) {
err = PTR_ERR(worker);
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
index d684a70f2c04..2fda549dd82d 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
@@ -7,6 +7,7 @@
#include "i915_drv.h"
#include "i915_selftest.h"
#include "gem/i915_gem_context.h"
+#include "gt/intel_gt.h"
#include "mock_context.h"
#include "mock_dmabuf.h"
@@ -155,6 +156,7 @@ static int verify_access(struct drm_i915_private *i915,
struct file *file;
u32 *vaddr;
int err = 0, i;
+ unsigned int mode;
file = mock_file(i915);
if (IS_ERR(file))
@@ -194,7 +196,8 @@ static int verify_access(struct drm_i915_private *i915,
if (err)
goto out_file;
- vaddr = i915_gem_object_pin_map_unlocked(native_obj, I915_MAP_WB);
+ mode = intel_gt_coherent_map_type(to_gt(i915), native_obj, false);
+ vaddr = i915_gem_object_pin_map_unlocked(native_obj, mode);
if (IS_ERR(vaddr)) {
err = PTR_ERR(vaddr);
goto out_file;
@@ -503,7 +506,7 @@ static int igt_dmabuf_export_vmap(void *arg)
goto out;
}
- if (memchr_inv(ptr, 0, dmabuf->size)) {
+ if (!mem_is_zero(ptr, dmabuf->size)) {
pr_err("Exported object not initialised to zero!\n");
err = -EINVAL;
goto out;
diff --git a/drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c b/drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c
index b2a5882b8f81..075657018739 100644
--- a/drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c
+++ b/drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c
@@ -4,6 +4,7 @@
* Copyright © 2016 Intel Corporation
*/
+#include <linux/vmalloc.h>
#include "mock_dmabuf.h"
static struct sg_table *mock_map_dma_buf(struct dma_buf_attachment *attachment,