diff options
Diffstat (limited to 'drivers/gpu/drm/i915/gem')
45 files changed, 395 insertions, 295 deletions
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_busy.c b/drivers/gpu/drm/i915/gem/i915_gem_busy.c index ddda468241ef..6e4d0ce3952f 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_busy.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_busy.c @@ -1,6 +1,5 @@ +// SPDX-License-Identifier: MIT /* - * SPDX-License-Identifier: MIT - * * Copyright © 2014-2016 Intel Corporation */ diff --git a/drivers/gpu/drm/i915/gem/i915_gem_clflush.c b/drivers/gpu/drm/i915/gem/i915_gem_clflush.c index 7d97ea2a653e..c4854c5b4e0f 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_clflush.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_clflush.c @@ -1,6 +1,5 @@ +// SPDX-License-Identifier: MIT /* - * SPDX-License-Identifier: MIT - * * Copyright © 2016 Intel Corporation */ diff --git a/drivers/gpu/drm/i915/gem/i915_gem_clflush.h b/drivers/gpu/drm/i915/gem/i915_gem_clflush.h index e6c382973129..9d7ee1579900 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_clflush.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_clflush.h @@ -1,6 +1,5 @@ +/* SPDX-License-Identifier: MIT */ /* - * SPDX-License-Identifier: MIT - * * Copyright © 2016 Intel Corporation */ diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c index dcbfe32fd30c..15835952352e 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_context.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c @@ -1,6 +1,5 @@ +// SPDX-License-Identifier: MIT /* - * SPDX-License-Identifier: MIT - * * Copyright © 2011-2012 Intel Corporation */ @@ -78,6 +77,7 @@ #include "gt/intel_engine_user.h" #include "gt/intel_gpu_commands.h" #include "gt/intel_ring.h" +#include "gt/shmem_utils.h" #include "pxp/intel_pxp.h" @@ -237,7 +237,7 @@ static int proto_context_set_persistence(struct drm_i915_private *i915, * * However, if we cannot reset an engine by itself, we cannot * cleanup a hanging persistent context without causing - * colateral damage, and we should not pretend we can by + * collateral damage, and we should not pretend we can by * exposing the interface. */ if (!intel_has_reset_engine(to_gt(i915))) @@ -879,6 +879,7 @@ static int set_proto_ctx_param(struct drm_i915_file_private *fpriv, struct i915_gem_proto_context *pc, struct drm_i915_gem_context_param *args) { + struct drm_i915_private *i915 = fpriv->i915; int ret = 0; switch (args->param) { @@ -904,6 +905,13 @@ static int set_proto_ctx_param(struct drm_i915_file_private *fpriv, pc->user_flags &= ~BIT(UCONTEXT_BANNABLE); break; + case I915_CONTEXT_PARAM_LOW_LATENCY: + if (intel_uc_uses_guc_submission(&to_gt(i915)->uc)) + pc->user_flags |= BIT(UCONTEXT_LOW_LATENCY); + else + ret = -EINVAL; + break; + case I915_CONTEXT_PARAM_RECOVERABLE: if (args->size) ret = -EINVAL; @@ -949,6 +957,7 @@ static int set_proto_ctx_param(struct drm_i915_file_private *fpriv, case I915_CONTEXT_PARAM_NO_ZEROMAP: case I915_CONTEXT_PARAM_BAN_PERIOD: case I915_CONTEXT_PARAM_RINGSIZE: + case I915_CONTEXT_PARAM_CONTEXT_IMAGE: default: ret = -EINVAL; break; @@ -992,6 +1001,9 @@ static int intel_context_set_gem(struct intel_context *ce, if (sseu.slice_mask && !WARN_ON(ce->engine->class != RENDER_CLASS)) ret = intel_context_reconfigure_sseu(ce, sseu); + if (test_bit(UCONTEXT_LOW_LATENCY, &ctx->user_flags)) + __set_bit(CONTEXT_LOW_LATENCY, &ce->flags); + return ret; } @@ -1576,7 +1588,7 @@ static int __context_set_persistence(struct i915_gem_context *ctx, bool state) * * However, if we cannot reset an engine by itself, we cannot * cleanup a hanging persistent context without causing - * colateral damage, and we should not pretend we can by + * collateral damage, and we should not pretend we can by * exposing the interface. */ if (!intel_has_reset_engine(to_gt(ctx->i915))) @@ -1630,6 +1642,9 @@ i915_gem_create_context(struct drm_i915_private *i915, if (vm) ctx->vm = vm; + /* Assign early so intel_context_set_gem can access these flags */ + ctx->user_flags = pc->user_flags; + mutex_init(&ctx->engines_mutex); if (pc->num_user_engines >= 0) { i915_gem_context_set_user_engines(ctx); @@ -1652,8 +1667,6 @@ i915_gem_create_context(struct drm_i915_private *i915, * is no remap info, it will be a NOP. */ ctx->remap_slice = ALL_L3_SLICES(i915); - ctx->user_flags = pc->user_flags; - for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp); i++) ctx->hang_timestamp[i] = jiffies - CONTEXT_FAST_HANG_JIFFIES; @@ -2092,6 +2105,95 @@ static int get_protected(struct i915_gem_context *ctx, return 0; } +static int set_context_image(struct i915_gem_context *ctx, + struct drm_i915_gem_context_param *args) +{ + struct i915_gem_context_param_context_image user; + struct intel_context *ce; + struct file *shmem_state; + unsigned long lookup; + void *state; + int ret = 0; + + if (!IS_ENABLED(CONFIG_DRM_I915_REPLAY_GPU_HANGS_API)) + return -EINVAL; + + if (!ctx->i915->params.enable_debug_only_api) + return -EINVAL; + + if (args->size < sizeof(user)) + return -EINVAL; + + if (copy_from_user(&user, u64_to_user_ptr(args->value), sizeof(user))) + return -EFAULT; + + if (user.mbz) + return -EINVAL; + + if (user.flags & ~(I915_CONTEXT_IMAGE_FLAG_ENGINE_INDEX)) + return -EINVAL; + + lookup = 0; + if (user.flags & I915_CONTEXT_IMAGE_FLAG_ENGINE_INDEX) + lookup |= LOOKUP_USER_INDEX; + + ce = lookup_user_engine(ctx, lookup, &user.engine); + if (IS_ERR(ce)) + return PTR_ERR(ce); + + if (user.size < ce->engine->context_size) { + ret = -EINVAL; + goto out_ce; + } + + if (drm_WARN_ON_ONCE(&ctx->i915->drm, + test_bit(CONTEXT_ALLOC_BIT, &ce->flags))) { + /* + * This is racy but for a debug only API, if userspace is keen + * to create and configure contexts, while simultaneously using + * them from a second thread, let them suffer by potentially not + * executing with the context image they just raced to apply. + */ + ret = -EBUSY; + goto out_ce; + } + + state = kmalloc(ce->engine->context_size, GFP_KERNEL); + if (!state) { + ret = -ENOMEM; + goto out_ce; + } + + if (copy_from_user(state, u64_to_user_ptr(user.image), + ce->engine->context_size)) { + ret = -EFAULT; + goto out_state; + } + + shmem_state = shmem_create_from_data(ce->engine->name, + state, ce->engine->context_size); + if (IS_ERR(shmem_state)) { + ret = PTR_ERR(shmem_state); + goto out_state; + } + + if (intel_context_set_own_state(ce)) { + ret = -EBUSY; + fput(shmem_state); + goto out_state; + } + + ce->default_state = shmem_state; + + args->size = sizeof(user); + +out_state: + kfree(state); +out_ce: + intel_context_put(ce); + return ret; +} + static int ctx_setparam(struct drm_i915_file_private *fpriv, struct i915_gem_context *ctx, struct drm_i915_gem_context_param *args) @@ -2144,6 +2246,10 @@ static int ctx_setparam(struct drm_i915_file_private *fpriv, ret = set_persistence(ctx, args); break; + case I915_CONTEXT_PARAM_CONTEXT_IMAGE: + ret = set_context_image(ctx, args); + break; + case I915_CONTEXT_PARAM_PROTECTED_CONTENT: case I915_CONTEXT_PARAM_NO_ZEROMAP: case I915_CONTEXT_PARAM_BAN_PERIOD: @@ -2221,7 +2327,7 @@ finalize_create_context_locked(struct drm_i915_file_private *file_priv, /* * One for the xarray and one for the caller. We need to grab - * the reference *prior* to making the ctx visble to userspace + * the reference *prior* to making the ctx visible to userspace * in gem_context_register(), as at any point after that * userspace can try to race us with another thread destroying * the context under our feet. @@ -2488,6 +2594,7 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data, case I915_CONTEXT_PARAM_BAN_PERIOD: case I915_CONTEXT_PARAM_ENGINES: case I915_CONTEXT_PARAM_RINGSIZE: + case I915_CONTEXT_PARAM_CONTEXT_IMAGE: default: ret = -EINVAL; break; @@ -2600,5 +2707,22 @@ int __init i915_gem_context_module_init(void) if (!slab_luts) return -ENOMEM; + if (IS_ENABLED(CONFIG_DRM_I915_REPLAY_GPU_HANGS_API)) { + pr_notice("**************************************************************\n"); + pr_notice("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n"); + pr_notice("** **\n"); + if (i915_modparams.enable_debug_only_api) + pr_notice("** i915.enable_debug_only_api is intended to be set **\n"); + else + pr_notice("** CONFIG_DRM_I915_REPLAY_GPU_HANGS_API builds are intended **\n"); + pr_notice("** for specific userspace graphics stack developers only! **\n"); + pr_notice("** **\n"); + pr_notice("** If you are seeing this message please report this to the **\n"); + pr_notice("** provider of your kernel build. **\n"); + pr_notice("** **\n"); + pr_notice("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n"); + pr_notice("**************************************************************\n"); + } + return 0; } diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.h b/drivers/gpu/drm/i915/gem/i915_gem_context.h index e5b0f66ea1fe..6e682a6a0574 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_context.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_context.h @@ -1,6 +1,5 @@ +/* SPDX-License-Identifier: MIT */ /* - * SPDX-License-Identifier: MIT - * * Copyright © 2016 Intel Corporation */ diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h index 03bc7f9d191b..0267c924634b 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h @@ -1,6 +1,5 @@ +/* SPDX-License-Identifier: MIT */ /* - * SPDX-License-Identifier: MIT - * * Copyright © 2019 Intel Corporation */ @@ -245,9 +244,9 @@ struct i915_gem_context { * Execbuf uses the I915_EXEC_RING_MASK as an index into this * array to select which HW context + engine to execute on. For * the default array, the user_ring_map[] is used to translate - * the legacy uABI onto the approprate index (e.g. both + * the legacy uABI onto the appropriate index (e.g. both * I915_EXEC_DEFAULT and I915_EXEC_RENDER select the same - * context, and I915_EXEC_BSD is weird). For a use defined + * context, and I915_EXEC_BSD is weird). For a user defined * array, execbuf uses I915_EXEC_RING_MASK as a plain index. * * User defined by I915_CONTEXT_PARAM_ENGINE (when the @@ -276,7 +275,7 @@ struct i915_gem_context { * @vm: unique address space (GTT) * * In full-ppgtt mode, each context has its own address space ensuring - * complete seperation of one client from all others. + * complete separation of one client from all others. * * In other modes, this is a NULL pointer with the expectation that * the caller uses the shared global GTT. @@ -338,6 +337,7 @@ struct i915_gem_context { #define UCONTEXT_BANNABLE 2 #define UCONTEXT_RECOVERABLE 3 #define UCONTEXT_PERSISTENCE 4 +#define UCONTEXT_LOW_LATENCY 5 /** * @flags: small set of booleans diff --git a/drivers/gpu/drm/i915/gem/i915_gem_create.c b/drivers/gpu/drm/i915/gem/i915_gem_create.c index 19156ba4b9ef..c3e6a325872d 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_create.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_create.c @@ -193,7 +193,7 @@ i915_gem_dumb_create(struct drm_file *file, args->pitch = ALIGN(args->width * cpp, 64); /* align stride to page size so that we can remap */ - if (args->pitch > intel_plane_fb_max_stride(to_i915(dev), format, + if (args->pitch > intel_plane_fb_max_stride(dev, format, DRM_FORMAT_MOD_LINEAR)) args->pitch = ALIGN(args->pitch, 4096); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c index 1df74f7aa3dc..05e440643aa2 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c @@ -1,6 +1,5 @@ +// SPDX-License-Identifier: MIT /* - * SPDX-License-Identifier: MIT - * * Copyright 2012 Red Hat Inc */ @@ -16,7 +15,7 @@ #include "i915_gem_object.h" #include "i915_scatterlist.h" -MODULE_IMPORT_NS(DMA_BUF); +MODULE_IMPORT_NS("DMA_BUF"); I915_SELFTEST_DECLARE(static bool force_different_devices;) diff --git a/drivers/gpu/drm/i915/gem/i915_gem_domain.c b/drivers/gpu/drm/i915/gem/i915_gem_domain.c index 3770828f2eaf..7a0cc51923b3 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_domain.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_domain.c @@ -1,6 +1,5 @@ +// SPDX-License-Identifier: MIT /* - * SPDX-License-Identifier: MIT - * * Copyright © 2014-2016 Intel Corporation */ @@ -18,8 +17,6 @@ #include "i915_gem_object_frontbuffer.h" #include "i915_vma.h" -#define VTD_GUARD (168u * I915_GTT_PAGE_SIZE) /* 168 or tile-row PTE padding */ - static bool gpu_write_needs_clflush(struct drm_i915_gem_object *obj) { struct drm_i915_private *i915 = to_i915(obj->base.dev); @@ -276,7 +273,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, * For objects created by userspace through GEM_CREATE with pat_index * set by set_pat extension, simply return 0 here without touching * the cache setting, because such objects should have an immutable - * cache setting by desgin and always managed by userspace. + * cache setting by design and always managed by userspace. */ if (i915_gem_object_has_cache_level(obj, cache_level)) return 0; @@ -424,7 +421,7 @@ out: struct i915_vma * i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, struct i915_gem_ww_ctx *ww, - u32 alignment, + u32 alignment, unsigned int guard, const struct i915_gtt_view *view, unsigned int flags) { @@ -453,15 +450,8 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, return ERR_PTR(ret); /* VT-d may overfetch before/after the vma, so pad with scratch */ - if (intel_scanout_needs_vtd_wa(i915)) { - unsigned int guard = VTD_GUARD; - - if (i915_gem_object_is_tiled(obj)) - guard = max(guard, - i915_gem_object_get_tile_row_size(obj)); - - flags |= PIN_OFFSET_GUARD | guard; - } + if (guard) + flags |= PIN_OFFSET_GUARD | (guard * I915_GTT_PAGE_SIZE); /* * As the user may map the buffer once pinned in the display plane diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c index d3a771afb083..ca7e9216934a 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c @@ -1,6 +1,5 @@ +// SPDX-License-Identifier: MIT /* - * SPDX-License-Identifier: MIT - * * Copyright © 2008,2010 Intel Corporation */ @@ -12,8 +11,6 @@ #include <drm/drm_auth.h> #include <drm/drm_syncobj.h> -#include "display/intel_frontbuffer.h" - #include "gem/i915_gem_ioctls.h" #include "gt/intel_context.h" #include "gt/intel_gpu_commands.h" @@ -305,7 +302,7 @@ struct i915_execbuffer { struct intel_gt_buffer_pool_node *batch_pool; /** pool node for batch buffer */ /** - * Indicate either the size of the hastable used to resolve + * Indicate either the size of the hashtable used to resolve * relocation handles, or if negative that we are using a direct * index into the execobj[]. */ @@ -340,7 +337,7 @@ static int eb_create(struct i915_execbuffer *eb) * Without a 1:1 association between relocation handles and * the execobject[] index, we instead create a hashtable. * We size it dynamically based on available memory, starting - * first with 1:1 assocative hash and scaling back until + * first with 1:1 associative hash and scaling back until * the allocation succeeds. * * Later on we use a positive lut_size to indicate we are @@ -827,7 +824,7 @@ static int eb_select_context(struct i915_execbuffer *eb) struct i915_gem_context *ctx; ctx = i915_gem_context_lookup(eb->file->driver_priv, eb->args->rsvd1); - if (unlikely(IS_ERR(ctx))) + if (IS_ERR(ctx)) return PTR_ERR(ctx); eb->gem_context = ctx; @@ -917,7 +914,7 @@ static struct i915_vma *eb_lookup_vma(struct i915_execbuffer *eb, u32 handle) */ if (i915_gem_context_uses_protected_content(eb->gem_context) && i915_gem_object_is_protected(obj)) { - err = intel_pxp_key_check(eb->i915->pxp, obj, true); + err = intel_pxp_key_check(intel_bo_to_drm_bo(obj), true); if (err) { i915_gem_object_put(obj); return ERR_PTR(err); @@ -1533,7 +1530,7 @@ static int eb_relocate_vma(struct i915_execbuffer *eb, struct eb_vma *ev) u64_to_user_ptr(entry->relocs_ptr); unsigned long remain = entry->relocation_count; - if (unlikely(remain > N_RELOC(ULONG_MAX))) + if (unlikely(remain > N_RELOC(INT_MAX))) return -EINVAL; /* @@ -1641,7 +1638,7 @@ static int check_relocations(const struct drm_i915_gem_exec_object2 *entry) if (size == 0) return 0; - if (size > N_RELOC(ULONG_MAX)) + if (size > N_RELOC(INT_MAX)) return -EINVAL; addr = u64_to_user_ptr(entry->relocs_ptr); @@ -2457,7 +2454,7 @@ static int eb_submit(struct i915_execbuffer *eb) * The engine index is returned. */ static unsigned int -gen8_dispatch_bsd_engine(struct drm_i915_private *dev_priv, +gen8_dispatch_bsd_engine(struct drm_i915_private *i915, struct drm_file *file) { struct drm_i915_file_private *file_priv = file->driver_priv; @@ -2465,7 +2462,7 @@ gen8_dispatch_bsd_engine(struct drm_i915_private *dev_priv, /* Check whether the file_priv has already selected one ring. */ if ((int)file_priv->bsd_engine < 0) file_priv->bsd_engine = - get_random_u32_below(dev_priv->engine_uabi_class_count[I915_ENGINE_CLASS_VIDEO]); + get_random_u32_below(i915->engine_uabi_class_count[I915_ENGINE_CLASS_VIDEO]); return file_priv->bsd_engine; } @@ -2545,7 +2542,7 @@ static int eb_pin_timeline(struct i915_execbuffer *eb, struct intel_context *ce, /* * Error path, cannot use intel_context_timeline_lock as - * that is user interruptable and this clean up step + * that is user interruptible and this clean up step * must be done. */ mutex_lock(&ce->timeline->mutex); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_internal.c b/drivers/gpu/drm/i915/gem/i915_gem_internal.c index ea7561ae6e13..232b984f60b6 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_internal.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_internal.c @@ -1,6 +1,5 @@ +// SPDX-License-Identifier: MIT /* - * SPDX-License-Identifier: MIT - * * Copyright © 2014-2016 Intel Corporation */ diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ioctls.h b/drivers/gpu/drm/i915/gem/i915_gem_ioctls.h index 28d6526e32ab..8044d34707b6 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_ioctls.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_ioctls.h @@ -1,6 +1,5 @@ +/* SPDX-License-Identifier: MIT */ /* - * SPDX-License-Identifier: MIT - * * Copyright © 2019 Intel Corporation */ diff --git a/drivers/gpu/drm/i915/gem/i915_gem_lmem.c b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c index 3198b64ad7db..f566191d843b 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_lmem.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c @@ -48,31 +48,7 @@ bool i915_gem_object_is_lmem(struct drm_i915_gem_object *obj) i915_gem_object_evictable(obj)) assert_object_held(obj); #endif - return mr && (mr->type == INTEL_MEMORY_LOCAL || - mr->type == INTEL_MEMORY_STOLEN_LOCAL); -} - -/** - * __i915_gem_object_is_lmem - Whether the object is resident in - * lmem while in the fence signaling critical path. - * @obj: The object to check. - * - * This function is intended to be called from within the fence signaling - * path where the fence, or a pin, keeps the object from being migrated. For - * example during gpu reset or similar. - * - * Return: Whether the object is resident in lmem. - */ -bool __i915_gem_object_is_lmem(struct drm_i915_gem_object *obj) -{ - struct intel_memory_region *mr = READ_ONCE(obj->mm.region); - -#ifdef CONFIG_LOCKDEP - GEM_WARN_ON(dma_resv_test_signaled(obj->base.resv, DMA_RESV_USAGE_BOOKKEEP) && - i915_gem_object_evictable(obj)); -#endif - return mr && (mr->type == INTEL_MEMORY_LOCAL || - mr->type == INTEL_MEMORY_STOLEN_LOCAL); + return mr && intel_memory_type_is_local(mr->type); } /** diff --git a/drivers/gpu/drm/i915/gem/i915_gem_lmem.h b/drivers/gpu/drm/i915/gem/i915_gem_lmem.h index 5a7a14e85c3f..ecd8f1a633a1 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_lmem.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_lmem.h @@ -19,8 +19,6 @@ i915_gem_object_lmem_io_map(struct drm_i915_gem_object *obj, bool i915_gem_object_is_lmem(struct drm_i915_gem_object *obj); -bool __i915_gem_object_is_lmem(struct drm_i915_gem_object *obj); - struct drm_i915_gem_object * i915_gem_object_create_lmem_from_data(struct drm_i915_private *i915, const void *data, size_t size); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c index a2195e28b625..f6d37dff320d 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c @@ -1,6 +1,5 @@ +// SPDX-License-Identifier: MIT /* - * SPDX-License-Identifier: MIT - * * Copyright © 2014-2016 Intel Corporation */ @@ -164,6 +163,9 @@ static unsigned int tile_row_pages(const struct drm_i915_gem_object *obj) * 4 - Support multiple fault handlers per object depending on object's * backing storage (a.k.a. MMAP_OFFSET). * + * 5 - Support multiple partial mmaps(mmap part of BO + unmap a offset, multiple + * times with different size and offset). + * * Restrictions: * * * snoopable objects cannot be accessed via the GTT. It can cause machine @@ -191,7 +193,7 @@ static unsigned int tile_row_pages(const struct drm_i915_gem_object *obj) */ int i915_gem_mmap_gtt_version(void) { - return 4; + return 5; } static inline struct i915_gtt_view @@ -252,6 +254,7 @@ static vm_fault_t vm_fault_cpu(struct vm_fault *vmf) struct vm_area_struct *area = vmf->vma; struct i915_mmap_offset *mmo = area->vm_private_data; struct drm_i915_gem_object *obj = mmo->obj; + unsigned long obj_offset; resource_size_t iomap; int err; @@ -273,10 +276,11 @@ static vm_fault_t vm_fault_cpu(struct vm_fault *vmf) iomap -= obj->mm.region->region.start; } + obj_offset = area->vm_pgoff - drm_vma_node_start(&mmo->vma_node); /* PTEs are revoked in obj->ops->put_pages() */ err = remap_io_sg(area, area->vm_start, area->vm_end - area->vm_start, - obj->mm.pages->sgl, iomap); + obj->mm.pages->sgl, obj_offset, iomap); if (area->vm_flags & VM_WRITE) { GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj)); @@ -290,6 +294,47 @@ out: return i915_error_to_vmf_fault(err); } +static void set_address_limits(struct vm_area_struct *area, + struct i915_vma *vma, + unsigned long obj_offset, + resource_size_t gmadr_start, + unsigned long *start_vaddr, + unsigned long *end_vaddr, + unsigned long *pfn) +{ + unsigned long vm_start, vm_end, vma_size; /* user's memory parameters */ + long start, end; /* memory boundaries */ + + /* + * Let's move into the ">> PAGE_SHIFT" + * domain to be sure not to lose bits + */ + vm_start = area->vm_start >> PAGE_SHIFT; + vm_end = area->vm_end >> PAGE_SHIFT; + vma_size = vma->size >> PAGE_SHIFT; + + /* + * Calculate the memory boundaries by considering the offset + * provided by the user during memory mapping and the offset + * provided for the partial mapping. + */ + start = vm_start; + start -= obj_offset; + start += vma->gtt_view.partial.offset; + end = start + vma_size; + + start = max_t(long, start, vm_start); + end = min_t(long, end, vm_end); + + /* Let's move back into the "<< PAGE_SHIFT" domain */ + *start_vaddr = (unsigned long)start << PAGE_SHIFT; + *end_vaddr = (unsigned long)end << PAGE_SHIFT; + + *pfn = (gmadr_start + i915_ggtt_offset(vma)) >> PAGE_SHIFT; + *pfn += (*start_vaddr - area->vm_start) >> PAGE_SHIFT; + *pfn += obj_offset - vma->gtt_view.partial.offset; +} + static vm_fault_t vm_fault_gtt(struct vm_fault *vmf) { #define MIN_CHUNK_PAGES (SZ_1M >> PAGE_SHIFT) @@ -302,14 +347,18 @@ static vm_fault_t vm_fault_gtt(struct vm_fault *vmf) struct i915_ggtt *ggtt = to_gt(i915)->ggtt; bool write = area->vm_flags & VM_WRITE; struct i915_gem_ww_ctx ww; + unsigned long obj_offset; + unsigned long start, end; /* memory boundaries */ intel_wakeref_t wakeref; struct i915_vma *vma; pgoff_t page_offset; + unsigned long pfn; int srcu; int ret; - /* We don't use vmf->pgoff since that has the fake offset */ + obj_offset = area->vm_pgoff - drm_vma_node_start(&mmo->vma_node); page_offset = (vmf->address - area->vm_start) >> PAGE_SHIFT; + page_offset += obj_offset; trace_i915_gem_object_fault(obj, page_offset, true, write); @@ -402,12 +451,16 @@ retry: if (ret) goto err_unpin; + /* + * Dump all the necessary parameters in this function to perform the + * arithmetic calculation for the virtual address start and end and + * the PFN (Page Frame Number). + */ + set_address_limits(area, vma, obj_offset, ggtt->gmadr.start, + &start, &end, &pfn); + /* Finally, remap it using the new GTT offset */ - ret = remap_io_mapping(area, - area->vm_start + (vma->gtt_view.partial.offset << PAGE_SHIFT), - (ggtt->gmadr.start + i915_ggtt_offset(vma)) >> PAGE_SHIFT, - min_t(u64, vma->size, area->vm_end - area->vm_start), - &ggtt->iomap); + ret = remap_io_mapping(area, start, pfn, end - start, &ggtt->iomap); if (ret) goto err_fence; @@ -1030,9 +1083,9 @@ int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma) rcu_read_lock(); drm_vma_offset_lock_lookup(dev->vma_offset_manager); - node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager, - vma->vm_pgoff, - vma_pages(vma)); + node = drm_vma_offset_lookup_locked(dev->vma_offset_manager, + vma->vm_pgoff, + vma_pages(vma)); if (node && drm_vma_node_is_allowed(node, priv)) { /* * Skip 0-refcnted objects as it is in the process of being @@ -1084,6 +1137,8 @@ int i915_gem_fb_mmap(struct drm_i915_gem_object *obj, struct vm_area_struct *vma mmo = mmap_offset_attach(obj, mmap_type, NULL); if (IS_ERR(mmo)) return PTR_ERR(mmo); + + vma->vm_pgoff += drm_vma_node_start(&mmo->vma_node); } /* diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.h b/drivers/gpu/drm/i915/gem/i915_gem_mman.h index 196417fd0f5c..946fb9825eb3 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_mman.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.h @@ -1,6 +1,5 @@ +/* SPDX-License-Identifier: MIT */ /* - * SPDX-License-Identifier: MIT - * * Copyright © 2019 Intel Corporation */ diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c index 58e6c680fe0d..1f38e367c60b 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: MIT /* * Copyright © 2017 Intel Corporation * @@ -873,6 +874,30 @@ bool i915_gem_object_needs_ccs_pages(struct drm_i915_gem_object *obj) return lmem_placement; } +static int i915_gem_vmap_object(struct drm_gem_object *gem_obj, + struct iosys_map *map) +{ + struct drm_i915_gem_object *obj = to_intel_bo(gem_obj); + void *vaddr; + + vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB); + if (IS_ERR(vaddr)) + return PTR_ERR(vaddr); + + iosys_map_set_vaddr(map, vaddr); + + return 0; +} + +static void i915_gem_vunmap_object(struct drm_gem_object *gem_obj, + struct iosys_map *map) +{ + struct drm_i915_gem_object *obj = to_intel_bo(gem_obj); + + i915_gem_object_flush_map(obj); + i915_gem_object_unpin_map(obj); +} + void i915_gem_init__objects(struct drm_i915_private *i915) { INIT_WORK(&i915->mm.free_work, __i915_gem_free_work); @@ -896,6 +921,8 @@ static const struct drm_gem_object_funcs i915_gem_object_funcs = { .free = i915_gem_free_object, .close = i915_gem_close_object, .export = i915_gem_prime_export, + .vmap = i915_gem_vmap_object, + .vunmap = i915_gem_vunmap_object, }; /** diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h index 3560a062d287..c34f41605b46 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h @@ -1,6 +1,5 @@ +/* SPDX-License-Identifier: MIT */ /* - * SPDX-License-Identifier: MIT - * * Copyright © 2016 Intel Corporation */ @@ -89,7 +88,6 @@ __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj); * @handle: userspace handle * * Returns: - * * A pointer to the object named by the handle if such exists on @filp, NULL * otherwise. This object is only valid whilst under the RCU read lock, and * note carefully the object may be in the process of being destroyed. @@ -777,7 +775,7 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write); struct i915_vma * __must_check i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, struct i915_gem_ww_ctx *ww, - u32 alignment, + u32 alignment, unsigned int guard, const struct i915_gtt_view *view, unsigned int flags); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h index 0c5cdab278b6..64600aa8227f 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h @@ -1,6 +1,5 @@ +/* SPDX-License-Identifier: MIT */ /* - * SPDX-License-Identifier: MIT - * * Copyright © 2016 Intel Corporation */ @@ -386,7 +385,7 @@ struct drm_i915_gem_object { * and kernel mode driver for caching policy control after GEN12. * In the meantime platform specific tables are created to translate * i915_cache_level into pat index, for more details check the macros - * defined i915/i915_pci.c, e.g. PVC_CACHELEVEL. + * defined i915/i915_pci.c, e.g. TGL_CACHELEVEL. * For backward compatibility, this field contains values exactly match * the entries of enum i915_cache_level for pre-GEN12 platforms (See * LEGACY_CACHELEVEL), so that the PTE encode functions for these @@ -535,7 +534,7 @@ struct drm_i915_gem_object { * I915_CACHE_NONE. The only exception is userptr objects, where we * instead force I915_CACHE_LLC, but we also don't allow userspace to * ever change the @cache_level for such objects. Another special case - * is dma-buf, which doesn't rely on @cache_dirty, but there we + * is dma-buf, which doesn't rely on @cache_dirty, but there we * always do a forced flush when acquiring the pages, if there is a * chance that the pages can be read directly from main memory with * the GPU. diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c index 0ba955611dfb..7f83f8bdc8fb 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c @@ -1,10 +1,10 @@ +// SPDX-License-Identifier: MIT /* - * SPDX-License-Identifier: MIT - * * Copyright © 2014-2016 Intel Corporation */ #include <drm/drm_cache.h> +#include <linux/vmalloc.h> #include "gt/intel_gt.h" #include "gt/intel_tlb.h" diff --git a/drivers/gpu/drm/i915/gem/i915_gem_phys.c b/drivers/gpu/drm/i915/gem/i915_gem_phys.c index ef85c6dc9fd5..f9e7cab140f8 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_phys.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_phys.c @@ -1,6 +1,5 @@ +// SPDX-License-Identifier: MIT /* - * SPDX-License-Identifier: MIT - * * Copyright © 2014-2016 Intel Corporation */ diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_pm.c index 3b27218aabe2..f0857c5c96df 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_pm.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_pm.c @@ -1,6 +1,5 @@ +// SPDX-License-Identifier: MIT /* - * SPDX-License-Identifier: MIT - * * Copyright © 2019 Intel Corporation */ @@ -13,7 +12,7 @@ #include "i915_driver.h" #include "i915_drv.h" -#if defined(CONFIG_X86) +#if IS_ENABLED(CONFIG_X86) #include <asm/smp.h> #else #define wbinvd_on_all_cpus() \ diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pm.h b/drivers/gpu/drm/i915/gem/i915_gem_pm.h index bedf1e95941a..bd5bd2c5e7f9 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_pm.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_pm.h @@ -1,6 +1,5 @@ +/* SPDX-License-Identifier: MIT */ /* - * SPDX-License-Identifier: MIT - * * Copyright © 2019 Intel Corporation */ diff --git a/drivers/gpu/drm/i915/gem/i915_gem_region.c b/drivers/gpu/drm/i915/gem/i915_gem_region.c index b09b74a2448b..636768d0f57e 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_region.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_region.c @@ -82,7 +82,7 @@ __i915_gem_object_create_region(struct intel_memory_region *mem, /* * Anything smaller than the min_page_size can't be freely inserted into - * the GTT, due to alignemnt restrictions. For such special objects, + * the GTT, due to alignment restrictions. For such special objects, * make sure we force memcpy based suspend-resume. In the future we can * revisit this, either by allowing special mis-aligned objects in the * migration path, or by mapping all of LMEM upfront using cheap 1G diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c index 38b72d86560f..19a3eb82dc6a 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c @@ -1,6 +1,5 @@ +// SPDX-License-Identifier: MIT /* - * SPDX-License-Identifier: MIT - * * Copyright © 2014-2016 Intel Corporation */ @@ -209,8 +208,6 @@ static int shmem_get_pages(struct drm_i915_gem_object *obj) struct address_space *mapping = obj->base.filp->f_mapping; unsigned int max_segment = i915_sg_segment_size(i915->drm.dev); struct sg_table *st; - struct sgt_iter sgt_iter; - struct page *page; int ret; /* @@ -239,9 +236,7 @@ rebuild_st: * for PAGE_SIZE chunks instead may be helpful. */ if (max_segment > PAGE_SIZE) { - for_each_sgt_page(page, sgt_iter, st) - put_page(page); - sg_free_table(st); + shmem_sg_free_table(st, mapping, false, false); kfree(st); max_segment = PAGE_SIZE; @@ -309,36 +304,20 @@ void __shmem_writeback(size_t size, struct address_space *mapping) .range_end = LLONG_MAX, .for_reclaim = 1, }; - unsigned long i; + struct folio *folio = NULL; + int error = 0; /* * Leave mmapings intact (GTT will have been revoked on unbinding, - * leaving only CPU mmapings around) and add those pages to the LRU + * leaving only CPU mmapings around) and add those folios to the LRU * instead of invoking writeback so they are aged and paged out * as normal. */ - - /* Begin writeback on each dirty page */ - for (i = 0; i < size >> PAGE_SHIFT; i++) { - struct page *page; - - page = find_lock_page(mapping, i); - if (!page) - continue; - - if (!page_mapped(page) && clear_page_dirty_for_io(page)) { - int ret; - - SetPageReclaim(page); - ret = mapping->a_ops->writepage(page, &wbc); - if (!PageWriteback(page)) - ClearPageReclaim(page); - if (!ret) - goto put; - } - unlock_page(page); -put: - put_page(page); + while ((folio = writeback_iter(mapping, &wbc, folio, &error))) { + if (folio_mapped(folio)) + folio_redirty_for_writepage(&wbc, folio); + else + error = shmem_writeout(folio, &wbc); } } @@ -424,7 +403,8 @@ shmem_pwrite(struct drm_i915_gem_object *obj, struct address_space *mapping = obj->base.filp->f_mapping; const struct address_space_operations *aops = mapping->a_ops; char __user *user_data = u64_to_user_ptr(arg->data_ptr); - u64 remain, offset; + u64 remain; + loff_t pos; unsigned int pg; /* Caller already validated user args */ @@ -457,12 +437,12 @@ shmem_pwrite(struct drm_i915_gem_object *obj, */ remain = arg->size; - offset = arg->offset; - pg = offset_in_page(offset); + pos = arg->offset; + pg = offset_in_page(pos); do { unsigned int len, unwritten; - struct page *page; + struct folio *folio; void *data, *vaddr; int err; char __maybe_unused c; @@ -480,21 +460,19 @@ shmem_pwrite(struct drm_i915_gem_object *obj, if (err) return err; - err = aops->write_begin(obj->base.filp, mapping, offset, len, - &page, &data); + err = aops->write_begin(obj->base.filp, mapping, pos, len, + &folio, &data); if (err < 0) return err; - vaddr = kmap_local_page(page); + vaddr = kmap_local_folio(folio, offset_in_folio(folio, pos)); pagefault_disable(); - unwritten = __copy_from_user_inatomic(vaddr + pg, - user_data, - len); + unwritten = __copy_from_user_inatomic(vaddr, user_data, len); pagefault_enable(); kunmap_local(vaddr); - err = aops->write_end(obj->base.filp, mapping, offset, len, - len - unwritten, page, data); + err = aops->write_end(obj->base.filp, mapping, pos, len, + len - unwritten, folio, data); if (err < 0) return err; @@ -504,7 +482,7 @@ shmem_pwrite(struct drm_i915_gem_object *obj, remain -= len; user_data += len; - offset += len; + pos += len; pg = 0; } while (remain); @@ -654,17 +632,17 @@ i915_gem_object_create_shmem(struct drm_i915_private *i915, /* Allocate a new GEM object and fill it with the supplied data */ struct drm_i915_gem_object * -i915_gem_object_create_shmem_from_data(struct drm_i915_private *dev_priv, +i915_gem_object_create_shmem_from_data(struct drm_i915_private *i915, const void *data, resource_size_t size) { struct drm_i915_gem_object *obj; struct file *file; const struct address_space_operations *aops; - resource_size_t offset; + loff_t pos; int err; - GEM_WARN_ON(IS_DGFX(dev_priv)); - obj = i915_gem_object_create_shmem(dev_priv, round_up(size, PAGE_SIZE)); + GEM_WARN_ON(IS_DGFX(i915)); + obj = i915_gem_object_create_shmem(i915, round_up(size, PAGE_SIZE)); if (IS_ERR(obj)) return obj; @@ -672,29 +650,27 @@ i915_gem_object_create_shmem_from_data(struct drm_i915_private *dev_priv, file = obj->base.filp; aops = file->f_mapping->a_ops; - offset = 0; + pos = 0; do { unsigned int len = min_t(typeof(size), size, PAGE_SIZE); - struct page *page; - void *pgdata, *vaddr; + struct folio *folio; + void *fsdata; - err = aops->write_begin(file, file->f_mapping, offset, len, - &page, &pgdata); + err = aops->write_begin(file, file->f_mapping, pos, len, + &folio, &fsdata); if (err < 0) goto fail; - vaddr = kmap(page); - memcpy(vaddr, data, len); - kunmap(page); + memcpy_to_folio(folio, offset_in_folio(folio, pos), data, len); - err = aops->write_end(file, file->f_mapping, offset, len, len, - page, pgdata); + err = aops->write_end(file, file->f_mapping, pos, len, len, + folio, fsdata); if (err < 0) goto fail; size -= len; data += len; - offset += len; + pos += len; } while (size); return obj; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c index d166052eb2ce..b81e67504bbe 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c @@ -1,6 +1,5 @@ +// SPDX-License-Identifier: MIT /* - * SPDX-License-Identifier: MIT - * * Copyright © 2008-2015 Intel Corporation */ @@ -25,7 +24,7 @@ static bool swap_available(void) static bool can_release_pages(struct drm_i915_gem_object *obj) { - /* Consider only shrinkable ojects. */ + /* Consider only shrinkable objects. */ if (!i915_gem_object_is_shrinkable(obj)) return false; @@ -117,7 +116,7 @@ i915_gem_shrink(struct i915_gem_ww_ctx *ww, }, { NULL, 0 }, }, *phase; - intel_wakeref_t wakeref = 0; + intel_wakeref_t wakeref = NULL; unsigned long count = 0; unsigned long scanned = 0; int err = 0, i = 0; @@ -261,7 +260,7 @@ skip: * i915_gem_shrink_all - Shrink buffer object caches completely * @i915: i915 device * - * This is a simple wraper around i915_gem_shrink() to aggressively shrink all + * This is a simple wrapper around i915_gem_shrink() to aggressively shrink all * caches completely. It also first waits for and retires all outstanding * requests to also be able to release backing storage for active objects. * diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c index ad6dd7f3259b..3380151edfc1 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c @@ -1,6 +1,5 @@ +// SPDX-License-Identifier: MIT /* - * SPDX-License-Identifier: MIT - * * Copyright © 2008-2012 Intel Corporation */ @@ -8,7 +7,7 @@ #include <linux/mutex.h> #include <drm/drm_mm.h> -#include <drm/i915_drm.h> +#include <drm/intel/i915_drm.h> #include "gem/i915_gem_lmem.h" #include "gem/i915_gem_region.h" @@ -457,7 +456,7 @@ static int init_reserved_stolen(struct drm_i915_private *i915) icl_get_stolen_reserved(i915, uncore, &reserved_base, &reserved_size); } else if (GRAPHICS_VER(i915) >= 8) { - if (IS_LP(i915)) + if (IS_CHERRYVIEW(i915) || IS_BROXTON(i915) || IS_GEMINILAKE(i915)) chv_get_stolen_reserved(i915, uncore, &reserved_base, &reserved_size); else @@ -936,8 +935,12 @@ i915_gem_stolen_lmem_setup(struct drm_i915_private *i915, u16 type, } else { /* Use DSM base address instead for stolen memory */ dsm_base = intel_uncore_read64(uncore, GEN6_DSMBASE) & GEN11_BDSM_MASK; - if (WARN_ON(lmem_size < dsm_base)) - return ERR_PTR(-ENODEV); + if (lmem_size < dsm_base) { + drm_dbg(&i915->drm, + "Disabling stolen memory support due to OOB placement: lmem_size = %pa vs dsm_base = %pa\n", + &lmem_size, &dsm_base); + return NULL; + } dsm_size = ALIGN_DOWN(lmem_size - dsm_base, SZ_1M); } diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.h b/drivers/gpu/drm/i915/gem/i915_gem_stolen.h index 258381d1c054..dfe0db8bb1b9 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.h @@ -14,14 +14,14 @@ struct drm_i915_gem_object; #define i915_stolen_fb drm_mm_node -int i915_gem_stolen_insert_node(struct drm_i915_private *dev_priv, +int i915_gem_stolen_insert_node(struct drm_i915_private *i915, struct drm_mm_node *node, u64 size, unsigned alignment); -int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv, +int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *i915, struct drm_mm_node *node, u64 size, unsigned alignment, u64 start, u64 end); -void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv, +void i915_gem_stolen_remove_node(struct drm_i915_private *i915, struct drm_mm_node *node); struct intel_memory_region * i915_gem_stolen_smem_setup(struct drm_i915_private *i915, u16 type, @@ -31,7 +31,7 @@ i915_gem_stolen_lmem_setup(struct drm_i915_private *i915, u16 type, u16 instance); struct drm_i915_gem_object * -i915_gem_object_create_stolen(struct drm_i915_private *dev_priv, +i915_gem_object_create_stolen(struct drm_i915_private *i915, resource_size_t size); bool i915_gem_object_is_stolen(const struct drm_i915_gem_object *obj); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_throttle.c b/drivers/gpu/drm/i915/gem/i915_gem_throttle.c index af85d0c28168..8814cbcde5b5 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_throttle.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_throttle.c @@ -1,6 +1,5 @@ +// SPDX-License-Identifier: MIT /* - * SPDX-License-Identifier: MIT - * * Copyright © 2014-2016 Intel Corporation */ diff --git a/drivers/gpu/drm/i915/gem/i915_gem_tiling.c b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c index a049ca0b7980..5a296ba3758a 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_tiling.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c @@ -1,6 +1,5 @@ +// SPDX-License-Identifier: MIT /* - * SPDX-License-Identifier: MIT - * * Copyright © 2008 Intel Corporation */ @@ -39,7 +38,7 @@ * Since neither of this applies for new tiling layouts on modern platforms like * W, Ys and Yf tiling GEM only allows object tiling to be set to X or Y tiled. * Anything else can be handled in userspace entirely without the kernel's - * invovlement. + * involvement. */ /** @@ -343,12 +342,12 @@ int i915_gem_set_tiling_ioctl(struct drm_device *dev, void *data, struct drm_file *file) { - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *i915 = to_i915(dev); struct drm_i915_gem_set_tiling *args = data; struct drm_i915_gem_object *obj; int err; - if (!to_gt(dev_priv)->ggtt->num_fences) + if (!to_gt(i915)->ggtt->num_fences) return -EOPNOTSUPP; obj = i915_gem_object_lookup(file, args->handle); @@ -374,9 +373,9 @@ i915_gem_set_tiling_ioctl(struct drm_device *dev, void *data, args->stride = 0; } else { if (args->tiling_mode == I915_TILING_X) - args->swizzle_mode = to_gt(dev_priv)->ggtt->bit_6_swizzle_x; + args->swizzle_mode = to_gt(i915)->ggtt->bit_6_swizzle_x; else - args->swizzle_mode = to_gt(dev_priv)->ggtt->bit_6_swizzle_y; + args->swizzle_mode = to_gt(i915)->ggtt->bit_6_swizzle_y; /* Hide bit 17 swizzling from the user. This prevents old Mesa * from aborting the application on sw fallbacks to bit 17, @@ -427,11 +426,11 @@ i915_gem_get_tiling_ioctl(struct drm_device *dev, void *data, struct drm_file *file) { struct drm_i915_gem_get_tiling *args = data; - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *i915 = to_i915(dev); struct drm_i915_gem_object *obj; int err = -ENOENT; - if (!to_gt(dev_priv)->ggtt->num_fences) + if (!to_gt(i915)->ggtt->num_fences) return -EOPNOTSUPP; rcu_read_lock(); @@ -447,10 +446,10 @@ i915_gem_get_tiling_ioctl(struct drm_device *dev, void *data, switch (args->tiling_mode) { case I915_TILING_X: - args->swizzle_mode = to_gt(dev_priv)->ggtt->bit_6_swizzle_x; + args->swizzle_mode = to_gt(i915)->ggtt->bit_6_swizzle_x; break; case I915_TILING_Y: - args->swizzle_mode = to_gt(dev_priv)->ggtt->bit_6_swizzle_y; + args->swizzle_mode = to_gt(i915)->ggtt->bit_6_swizzle_y; break; default: case I915_TILING_NONE: @@ -459,7 +458,7 @@ i915_gem_get_tiling_ioctl(struct drm_device *dev, void *data, } /* Hide bit 17 from the user -- see comment in i915_gem_set_tiling */ - if (dev_priv->gem_quirks & GEM_QUIRK_PIN_SWIZZLED_PAGES) + if (i915->gem_quirks & GEM_QUIRK_PIN_SWIZZLED_PAGES) args->phys_swizzle_mode = I915_BIT_6_SWIZZLE_UNKNOWN; else args->phys_swizzle_mode = args->swizzle_mode; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c index e6f177183c0f..1f4814968868 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c @@ -165,7 +165,6 @@ i915_ttm_placement_from_obj(const struct drm_i915_gem_object *obj, i915_ttm_place_from_region(num_allowed ? obj->mm.placements[0] : obj->mm.region, &places[0], obj->bo_offset, obj->base.size, flags); - places[0].flags |= TTM_PL_FLAG_DESIRED; /* Cache this on object? */ for (i = 0; i < num_allowed; ++i) { @@ -779,13 +778,16 @@ static int __i915_ttm_get_pages(struct drm_i915_gem_object *obj, .interruptible = true, .no_wait_gpu = false, }; - int real_num_busy; + struct ttm_placement initial_placement; + struct ttm_place initial_place; int ret; /* First try only the requested placement. No eviction. */ - real_num_busy = placement->num_placement; - placement->num_placement = 1; - ret = ttm_bo_validate(bo, placement, &ctx); + initial_placement.num_placement = 1; + memcpy(&initial_place, placement->placement, sizeof(struct ttm_place)); + initial_place.flags |= TTM_PL_FLAG_DESIRED; + initial_placement.placement = &initial_place; + ret = ttm_bo_validate(bo, &initial_placement, &ctx); if (ret) { ret = i915_ttm_err_to_gem(ret); /* @@ -800,14 +802,13 @@ static int __i915_ttm_get_pages(struct drm_i915_gem_object *obj, * If the initial attempt fails, allow all accepted placements, * evicting if necessary. */ - placement->num_placement = real_num_busy; ret = ttm_bo_validate(bo, placement, &ctx); if (ret) return i915_ttm_err_to_gem(ret); } if (bo->ttm && !ttm_tt_is_populated(bo->ttm)) { - ret = ttm_tt_populate(bo->bdev, bo->ttm, &ctx); + ret = ttm_bo_populate(bo, &ctx); if (ret) return ret; @@ -993,7 +994,7 @@ void i915_ttm_adjust_lru(struct drm_i915_gem_object *obj) * If we need to place an LMEM resource which doesn't need CPU * access then we should try not to victimize mappable objects * first, since we likely end up stealing more of the mappable - * portion. And likewise when we try to find space for a mappble + * portion. And likewise when we try to find space for a mappable * object, we know not to ever victimize objects that don't * occupy any mappable pages. */ @@ -1037,7 +1038,7 @@ static vm_fault_t vm_fault_ttm(struct vm_fault *vmf) struct ttm_buffer_object *bo = area->vm_private_data; struct drm_device *dev = bo->base.dev; struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo); - intel_wakeref_t wakeref = 0; + intel_wakeref_t wakeref = NULL; vm_fault_t ret; int idx; @@ -1130,7 +1131,7 @@ static vm_fault_t vm_fault_ttm(struct vm_fault *vmf) GEM_WARN_ON(!i915_ttm_cpu_maps_iomem(bo->resource)); } - if (wakeref & CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND) + if (wakeref && CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND != 0) intel_wakeref_auto(&to_i915(obj->base.dev)->runtime_pm.userfault_wakeref, msecs_to_jiffies_timeout(CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND)); @@ -1194,7 +1195,7 @@ static u64 i915_ttm_mmap_offset(struct drm_i915_gem_object *obj) static void i915_ttm_unmap_virtual(struct drm_i915_gem_object *obj) { struct ttm_buffer_object *bo = i915_gem_to_ttm(obj); - intel_wakeref_t wakeref = 0; + intel_wakeref_t wakeref = NULL; assert_object_held_shared(obj); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c index 7078af2f8f79..2f6b33edb9c9 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c @@ -155,7 +155,7 @@ void i915_ttm_adjust_gem_after_move(struct drm_i915_gem_object *obj) * @bo: The ttm buffer object. * * This function prepares an object for move by removing all GPU bindings, - * removing all CPU mapings and finally releasing the pages sg-table. + * removing all CPU mappings and finally releasing the pages sg-table. * * Return: 0 if successful, negative error code on error. */ @@ -603,7 +603,7 @@ int i915_ttm_move(struct ttm_buffer_object *bo, bool evict, * sequence, where at the end we can do the move for real. * * The special case here is when the dst_mem is TTM_PL_SYSTEM, - * which doens't require any kind of move, so it should be safe + * which doesn't require any kind of move, so it should be safe * to skip all the below and call ttm_bo_move_null() here, where * the caller in __i915_ttm_get_pages() will take care of the * rest, since we should have a valid ttm_tt. @@ -624,7 +624,7 @@ int i915_ttm_move(struct ttm_buffer_object *bo, bool evict, /* Populate ttm with pages if needed. Typically system memory. */ if (ttm && (dst_man->use_tt || (ttm->page_flags & TTM_TT_FLAG_SWAPPED))) { - ret = ttm_tt_populate(bo->bdev, ttm, ctx); + ret = ttm_bo_populate(bo, ctx); if (ret) return ret; } diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c index ad649523d5e0..61596cecce4d 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c @@ -90,7 +90,7 @@ static int i915_ttm_backup(struct i915_gem_apply_to_region *apply, goto out_no_lock; backup_bo = i915_gem_to_ttm(backup); - err = ttm_tt_populate(backup_bo->bdev, backup_bo->ttm, &ctx); + err = ttm_bo_populate(backup_bo, &ctx); if (err) goto out_no_populate; @@ -189,7 +189,7 @@ static int i915_ttm_restore(struct i915_gem_apply_to_region *apply, if (!backup_bo->resource) err = ttm_bo_validate(backup_bo, i915_ttm_sys_placement(), &ctx); if (!err) - err = ttm_tt_populate(backup_bo->bdev, backup_bo->ttm, &ctx); + err = ttm_bo_populate(backup_bo, &ctx); if (!err) { err = i915_gem_obj_copy_ttm(obj, backup, pm_apply->allow_gpu, false); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c index 61abfb505766..307a18eede72 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c @@ -1,9 +1,8 @@ +// SPDX-License-Identifier: MIT /* - * SPDX-License-Identifier: MIT - * * Copyright © 2012-2014 Intel Corporation * - * Based on amdgpu_mn, which bears the following notice: + * Based on amdgpu_mn, which bears the following notice: * * Copyright 2014 Advanced Micro Devices, Inc. * All Rights Reserved. @@ -463,13 +462,13 @@ i915_gem_userptr_ioctl(struct drm_device *dev, struct drm_file *file) { static struct lock_class_key __maybe_unused lock_class; - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *i915 = to_i915(dev); struct drm_i915_gem_userptr *args = data; struct drm_i915_gem_object __maybe_unused *obj; int __maybe_unused ret; u32 __maybe_unused handle; - if (!HAS_LLC(dev_priv) && !HAS_SNOOP(dev_priv)) { + if (!HAS_LLC(i915) && !HAS_SNOOP(i915)) { /* We cannot support coherent userptr objects on hw without * LLC and broken snooping. */ @@ -501,7 +500,7 @@ i915_gem_userptr_ioctl(struct drm_device *dev, * On almost all of the older hw, we cannot tell the GPU that * a page is readonly. */ - if (!to_gt(dev_priv)->vm->has_read_only) + if (!to_gt(i915)->vm->has_read_only) return -ENODEV; } diff --git a/drivers/gpu/drm/i915/gem/i915_gem_wait.c b/drivers/gpu/drm/i915/gem/i915_gem_wait.c index d4b918fb11ce..7127e90c1a8f 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_wait.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_wait.c @@ -1,6 +1,5 @@ +// SPDX-License-Identifier: MIT /* - * SPDX-License-Identifier: MIT - * * Copyright © 2016 Intel Corporation */ @@ -266,7 +265,7 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file) if (ret == -ETIME && !nsecs_to_jiffies(args->timeout_ns)) args->timeout_ns = 0; - /* Asked to wait beyond the jiffie/scheduler precision? */ + /* Asked to wait beyond the jiffy/scheduler precision? */ if (ret == -ETIME && args->timeout_ns) ret = -EAGAIN; } diff --git a/drivers/gpu/drm/i915/gem/i915_gemfs.c b/drivers/gpu/drm/i915/gem/i915_gemfs.c index 46b9a17d6abc..65d84a93c525 100644 --- a/drivers/gpu/drm/i915/gem/i915_gemfs.c +++ b/drivers/gpu/drm/i915/gem/i915_gemfs.c @@ -1,6 +1,5 @@ +// SPDX-License-Identifier: MIT /* - * SPDX-License-Identifier: MIT - * * Copyright © 2017 Intel Corporation */ diff --git a/drivers/gpu/drm/i915/gem/i915_gemfs.h b/drivers/gpu/drm/i915/gem/i915_gemfs.h index 5d835e44c4f6..16d4333c9a4e 100644 --- a/drivers/gpu/drm/i915/gem/i915_gemfs.h +++ b/drivers/gpu/drm/i915/gem/i915_gemfs.h @@ -1,6 +1,5 @@ +/* SPDX-License-Identifier: MIT */ /* - * SPDX-License-Identifier: MIT - * * Copyright © 2017 Intel Corporation */ diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c index 3ff3d8889c6c..bd08605a1611 100644 --- a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c +++ b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c @@ -713,7 +713,7 @@ static int igt_ppgtt_huge_fill(void *arg) { struct drm_i915_private *i915 = arg; unsigned int supported = RUNTIME_INFO(i915)->page_sizes; - bool has_pte64 = GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50); + bool has_pte64 = GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55); struct i915_address_space *vm; struct i915_gem_context *ctx; unsigned long max_pages; @@ -857,7 +857,7 @@ out: static int igt_ppgtt_64K(void *arg) { struct drm_i915_private *i915 = arg; - bool has_pte64 = GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50); + bool has_pte64 = GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55); struct drm_i915_gem_object *obj; struct i915_address_space *vm; struct i915_gem_context *ctx; @@ -1781,7 +1781,7 @@ static int igt_tmpfs_fallback(void *arg) /* * Make sure that we don't burst into a ball of flames upon falling back - * to tmpfs, which we rely on if on the off-chance we encouter a failure + * to tmpfs, which we rely on if on the off-chance we encounter a failure * when setting up gemfs. */ @@ -1969,19 +1969,19 @@ int i915_gem_huge_page_mock_selftests(void) SUBTEST(igt_mock_memory_region_huge_pages), SUBTEST(igt_mock_ppgtt_misaligned_dma), }; - struct drm_i915_private *dev_priv; + struct drm_i915_private *i915; struct i915_ppgtt *ppgtt; int err; - dev_priv = mock_gem_device(); - if (!dev_priv) + i915 = mock_gem_device(); + if (!i915) return -ENOMEM; /* Pretend to be a device which supports the 48b PPGTT */ - RUNTIME_INFO(dev_priv)->ppgtt_type = INTEL_PPGTT_FULL; - RUNTIME_INFO(dev_priv)->ppgtt_size = 48; + RUNTIME_INFO(i915)->ppgtt_type = INTEL_PPGTT_FULL; + RUNTIME_INFO(i915)->ppgtt_size = 48; - ppgtt = i915_ppgtt_create(to_gt(dev_priv), 0); + ppgtt = i915_ppgtt_create(to_gt(i915), 0); if (IS_ERR(ppgtt)) { err = PTR_ERR(ppgtt); goto out_unlock; @@ -2005,7 +2005,7 @@ int i915_gem_huge_page_mock_selftests(void) out_put: i915_vm_put(&ppgtt->vm); out_unlock: - mock_destroy_device(dev_priv); + mock_destroy_device(i915); return err; } diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c index 10a7847f1b04..bac15196b4d2 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c @@ -117,7 +117,7 @@ static bool fastblit_supports_x_tiling(const struct drm_i915_private *i915) if (gen < 12) return true; - if (GRAPHICS_VER_FULL(i915) < IP_VER(12, 50)) + if (GRAPHICS_VER_FULL(i915) < IP_VER(12, 55)) return false; return HAS_DISPLAY(i915); @@ -166,7 +166,7 @@ static int prepare_blit(const struct tiled_blits *t, src_pitch = t->width; /* in dwords */ if (src->tiling == CLIENT_TILING_Y) { src_tiles = XY_FAST_COPY_BLT_D0_SRC_TILE_MODE(YMAJOR); - if (GRAPHICS_VER_FULL(to_i915(batch->base.dev)) >= IP_VER(12, 50)) + if (GRAPHICS_VER_FULL(to_i915(batch->base.dev)) >= IP_VER(12, 55)) src_4t = XY_FAST_COPY_BLT_D1_SRC_TILE4; } else if (src->tiling == CLIENT_TILING_X) { src_tiles = XY_FAST_COPY_BLT_D0_SRC_TILE_MODE(TILE_X); @@ -177,7 +177,7 @@ static int prepare_blit(const struct tiled_blits *t, dst_pitch = t->width; /* in dwords */ if (dst->tiling == CLIENT_TILING_Y) { dst_tiles = XY_FAST_COPY_BLT_D0_DST_TILE_MODE(YMAJOR); - if (GRAPHICS_VER_FULL(to_i915(batch->base.dev)) >= IP_VER(12, 50)) + if (GRAPHICS_VER_FULL(to_i915(batch->base.dev)) >= IP_VER(12, 55)) dst_4t = XY_FAST_COPY_BLT_D1_DST_TILE4; } else if (dst->tiling == CLIENT_TILING_X) { dst_tiles = XY_FAST_COPY_BLT_D0_DST_TILE_MODE(TILE_X); @@ -365,7 +365,7 @@ static u64 tiled_offset(const struct intel_gt *gt, v += x; swizzle = gt->ggtt->bit_6_swizzle_x; - } else if (GRAPHICS_VER_FULL(gt->i915) >= IP_VER(12, 50)) { + } else if (GRAPHICS_VER_FULL(gt->i915) >= IP_VER(12, 55)) { /* Y-major tiling layout is Tile4 for Xe_HP and beyond */ v = linear_x_y_to_ftiled_pos(x_pos, y_pos, stride, 32); diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c index 89d4dc8b60c6..eb0158e43417 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c @@ -369,7 +369,7 @@ static int live_parallel_switch(void *arg) if (!data[n].ce[0]) continue; - worker = kthread_create_worker(0, "igt/parallel:%s", + worker = kthread_run_worker(0, "igt/parallel:%s", data[n].ce[0]->engine->name); if (IS_ERR(worker)) { err = PTR_ERR(worker); diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c index d684a70f2c04..2fda549dd82d 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c @@ -7,6 +7,7 @@ #include "i915_drv.h" #include "i915_selftest.h" #include "gem/i915_gem_context.h" +#include "gt/intel_gt.h" #include "mock_context.h" #include "mock_dmabuf.h" @@ -155,6 +156,7 @@ static int verify_access(struct drm_i915_private *i915, struct file *file; u32 *vaddr; int err = 0, i; + unsigned int mode; file = mock_file(i915); if (IS_ERR(file)) @@ -194,7 +196,8 @@ static int verify_access(struct drm_i915_private *i915, if (err) goto out_file; - vaddr = i915_gem_object_pin_map_unlocked(native_obj, I915_MAP_WB); + mode = intel_gt_coherent_map_type(to_gt(i915), native_obj, false); + vaddr = i915_gem_object_pin_map_unlocked(native_obj, mode); if (IS_ERR(vaddr)) { err = PTR_ERR(vaddr); goto out_file; @@ -503,7 +506,7 @@ static int igt_dmabuf_export_vmap(void *arg) goto out; } - if (memchr_inv(ptr, 0, dmabuf->size)) { + if (!mem_is_zero(ptr, dmabuf->size)) { pr_err("Exported object not initialised to zero!\n"); err = -EINVAL; goto out; diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c index 99a9ade73956..9c3f17e51885 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c @@ -1342,7 +1342,7 @@ static int igt_mmap_migrate(void *arg) } /* - * Allocate in the mappable portion, should be no suprises here. + * Allocate in the mappable portion, should be no surprises here. */ err = __igt_mmap_migrate(mixed, ARRAY_SIZE(mixed), mr, 0); if (err) @@ -1837,6 +1837,8 @@ static int igt_mmap_revoke(void *arg) int i915_gem_mman_live_selftests(struct drm_i915_private *i915) { + int ret; + bool unuse_mm = false; static const struct i915_subtest tests[] = { SUBTEST(igt_partial_tiling), SUBTEST(igt_smoke_tiling), @@ -1848,5 +1850,15 @@ int i915_gem_mman_live_selftests(struct drm_i915_private *i915) SUBTEST(igt_mmap_gpu), }; - return i915_live_subtests(tests, i915); + if (!current->mm) { + kthread_use_mm(current->active_mm); + unuse_mm = true; + } + + ret = i915_live_subtests(tests, i915); + + if (unuse_mm) + kthread_unuse_mm(current->active_mm); + + return ret; } diff --git a/drivers/gpu/drm/i915/gem/selftests/mock_context.c b/drivers/gpu/drm/i915/gem/selftests/mock_context.c index 2b0327cc47c2..fd8babb513e5 100644 --- a/drivers/gpu/drm/i915/gem/selftests/mock_context.c +++ b/drivers/gpu/drm/i915/gem/selftests/mock_context.c @@ -108,44 +108,6 @@ err_ctx: } struct i915_gem_context * -live_context_for_engine(struct intel_engine_cs *engine, struct file *file) -{ - struct i915_gem_engines *engines; - struct i915_gem_context *ctx; - struct intel_sseu null_sseu = {}; - struct intel_context *ce; - - engines = alloc_engines(1); - if (!engines) - return ERR_PTR(-ENOMEM); - - ctx = live_context(engine->i915, file); - if (IS_ERR(ctx)) { - __free_engines(engines, 0); - return ctx; - } - - ce = intel_context_create(engine); - if (IS_ERR(ce)) { - __free_engines(engines, 0); - return ERR_CAST(ce); - } - - intel_context_set_gem(ce, ctx, null_sseu); - engines->engines[0] = ce; - engines->num_engines = 1; - - mutex_lock(&ctx->engines_mutex); - i915_gem_context_set_user_engines(ctx); - engines = rcu_replace_pointer(ctx->engines, engines, 1); - mutex_unlock(&ctx->engines_mutex); - - engines_idle_release(ctx, engines); - - return ctx; -} - -struct i915_gem_context * kernel_context(struct drm_i915_private *i915, struct i915_address_space *vm) { diff --git a/drivers/gpu/drm/i915/gem/selftests/mock_context.h b/drivers/gpu/drm/i915/gem/selftests/mock_context.h index 7a02fd9b5866..bc8fb37d2d24 100644 --- a/drivers/gpu/drm/i915/gem/selftests/mock_context.h +++ b/drivers/gpu/drm/i915/gem/selftests/mock_context.h @@ -23,9 +23,6 @@ void mock_context_close(struct i915_gem_context *ctx); struct i915_gem_context * live_context(struct drm_i915_private *i915, struct file *file); -struct i915_gem_context * -live_context_for_engine(struct intel_engine_cs *engine, struct file *file); - struct i915_gem_context *kernel_context(struct drm_i915_private *i915, struct i915_address_space *vm); void kernel_context_close(struct i915_gem_context *ctx); diff --git a/drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c b/drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c index b2a5882b8f81..5cd58e0f0dcf 100644 --- a/drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c +++ b/drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c @@ -4,6 +4,7 @@ * Copyright © 2016 Intel Corporation */ +#include <linux/vmalloc.h> #include "mock_dmabuf.h" static struct sg_table *mock_map_dma_buf(struct dma_buf_attachment *attachment, @@ -102,8 +103,7 @@ static struct dma_buf *mock_dmabuf(int npages) struct dma_buf *dmabuf; int i; - mock = kmalloc(sizeof(*mock) + npages * sizeof(struct page *), - GFP_KERNEL); + mock = kmalloc(struct_size(mock, pages, npages), GFP_KERNEL); if (!mock) return ERR_PTR(-ENOMEM); |