diff options
-rw-r--r-- | drivers/gpu/drm/xe/compat-i915-headers/pxp/intel_pxp.h | 12 | ||||
-rw-r--r-- | drivers/gpu/drm/xe/display/intel_bo.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/xe/xe_bo.c | 98 | ||||
-rw-r--r-- | drivers/gpu/drm/xe/xe_bo.h | 5 | ||||
-rw-r--r-- | drivers/gpu/drm/xe/xe_bo_types.h | 6 | ||||
-rw-r--r-- | drivers/gpu/drm/xe/xe_exec.c | 6 | ||||
-rw-r--r-- | drivers/gpu/drm/xe/xe_pxp.c | 90 | ||||
-rw-r--r-- | drivers/gpu/drm/xe/xe_pxp.h | 6 | ||||
-rw-r--r-- | drivers/gpu/drm/xe/xe_pxp_types.h | 11 | ||||
-rw-r--r-- | drivers/gpu/drm/xe/xe_vm.c | 46 | ||||
-rw-r--r-- | drivers/gpu/drm/xe/xe_vm.h | 2 | ||||
-rw-r--r-- | include/uapi/drm/xe_drm.h | 19 |
12 files changed, 296 insertions, 7 deletions
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/pxp/intel_pxp.h b/drivers/gpu/drm/xe/compat-i915-headers/pxp/intel_pxp.h index 419e8e926f00..d2eb8e1f6c4b 100644 --- a/drivers/gpu/drm/xe/compat-i915-headers/pxp/intel_pxp.h +++ b/drivers/gpu/drm/xe/compat-i915-headers/pxp/intel_pxp.h @@ -9,6 +9,8 @@ #include <linux/errno.h> #include <linux/types.h> +#include "xe_pxp.h" + struct drm_gem_object; struct xe_pxp; @@ -16,7 +18,15 @@ static inline int intel_pxp_key_check(struct xe_pxp *pxp, struct drm_gem_object *obj, bool assign) { - return -ENODEV; + /* + * The assign variable is used in i915 to assign the key to the BO at + * first submission time. In Xe the key is instead assigned at BO + * creation time, so the assign variable must always be false. + */ + if (assign) + return -EINVAL; + + return xe_pxp_obj_key_check(pxp, obj); } #endif diff --git a/drivers/gpu/drm/xe/display/intel_bo.c b/drivers/gpu/drm/xe/display/intel_bo.c index b463f5bd4eed..27437c22bd70 100644 --- a/drivers/gpu/drm/xe/display/intel_bo.c +++ b/drivers/gpu/drm/xe/display/intel_bo.c @@ -25,7 +25,7 @@ bool intel_bo_is_shmem(struct drm_gem_object *obj) bool intel_bo_is_protected(struct drm_gem_object *obj) { - return false; + return xe_bo_is_protected(gem_to_xe_bo(obj)); } void intel_bo_flush_if_display(struct drm_gem_object *obj) diff --git a/drivers/gpu/drm/xe/xe_bo.c b/drivers/gpu/drm/xe/xe_bo.c index c32201123d44..6812164e1470 100644 --- a/drivers/gpu/drm/xe/xe_bo.c +++ b/drivers/gpu/drm/xe/xe_bo.c @@ -6,6 +6,7 @@ #include "xe_bo.h" #include <linux/dma-buf.h> +#include <linux/nospec.h> #include <drm/drm_drv.h> #include <drm/drm_gem_ttm_helper.h> @@ -26,6 +27,7 @@ #include "xe_migrate.h" #include "xe_pm.h" #include "xe_preempt_fence.h" +#include "xe_pxp.h" #include "xe_res_cursor.h" #include "xe_trace_bo.h" #include "xe_ttm_stolen_mgr.h" @@ -2155,6 +2157,93 @@ void xe_bo_vunmap(struct xe_bo *bo) __xe_bo_vunmap(bo); } +static int gem_create_set_pxp_type(struct xe_device *xe, struct xe_bo *bo, u64 value) +{ + if (value == DRM_XE_PXP_TYPE_NONE) + return 0; + + /* we only support DRM_XE_PXP_TYPE_HWDRM for now */ + if (XE_IOCTL_DBG(xe, value != DRM_XE_PXP_TYPE_HWDRM)) + return -EINVAL; + + return xe_pxp_key_assign(xe->pxp, bo); +} + +typedef int (*xe_gem_create_set_property_fn)(struct xe_device *xe, + struct xe_bo *bo, + u64 value); + +static const xe_gem_create_set_property_fn gem_create_set_property_funcs[] = { + [DRM_XE_GEM_CREATE_EXTENSION_SET_PROPERTY] = gem_create_set_pxp_type, +}; + +static int gem_create_user_ext_set_property(struct xe_device *xe, + struct xe_bo *bo, + u64 extension) +{ + u64 __user *address = u64_to_user_ptr(extension); + struct drm_xe_ext_set_property ext; + int err; + u32 idx; + + err = __copy_from_user(&ext, address, sizeof(ext)); + if (XE_IOCTL_DBG(xe, err)) + return -EFAULT; + + if (XE_IOCTL_DBG(xe, ext.property >= + ARRAY_SIZE(gem_create_set_property_funcs)) || + XE_IOCTL_DBG(xe, ext.pad) || + XE_IOCTL_DBG(xe, ext.property != DRM_XE_GEM_CREATE_EXTENSION_SET_PROPERTY)) + return -EINVAL; + + idx = array_index_nospec(ext.property, ARRAY_SIZE(gem_create_set_property_funcs)); + if (!gem_create_set_property_funcs[idx]) + return -EINVAL; + + return gem_create_set_property_funcs[idx](xe, bo, ext.value); +} + +typedef int (*xe_gem_create_user_extension_fn)(struct xe_device *xe, + struct xe_bo *bo, + u64 extension); + +static const xe_gem_create_user_extension_fn gem_create_user_extension_funcs[] = { + [DRM_XE_GEM_CREATE_EXTENSION_SET_PROPERTY] = gem_create_user_ext_set_property, +}; + +#define MAX_USER_EXTENSIONS 16 +static int gem_create_user_extensions(struct xe_device *xe, struct xe_bo *bo, + u64 extensions, int ext_number) +{ + u64 __user *address = u64_to_user_ptr(extensions); + struct drm_xe_user_extension ext; + int err; + u32 idx; + + if (XE_IOCTL_DBG(xe, ext_number >= MAX_USER_EXTENSIONS)) + return -E2BIG; + + err = __copy_from_user(&ext, address, sizeof(ext)); + if (XE_IOCTL_DBG(xe, err)) + return -EFAULT; + + if (XE_IOCTL_DBG(xe, ext.pad) || + XE_IOCTL_DBG(xe, ext.name >= ARRAY_SIZE(gem_create_user_extension_funcs))) + return -EINVAL; + + idx = array_index_nospec(ext.name, + ARRAY_SIZE(gem_create_user_extension_funcs)); + err = gem_create_user_extension_funcs[idx](xe, bo, extensions); + if (XE_IOCTL_DBG(xe, err)) + return err; + + if (ext.next_extension) + return gem_create_user_extensions(xe, bo, ext.next_extension, + ++ext_number); + + return 0; +} + int xe_gem_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file) { @@ -2167,8 +2256,7 @@ int xe_gem_create_ioctl(struct drm_device *dev, void *data, u32 handle; int err; - if (XE_IOCTL_DBG(xe, args->extensions) || - XE_IOCTL_DBG(xe, args->pad[0] || args->pad[1] || args->pad[2]) || + if (XE_IOCTL_DBG(xe, args->pad[0] || args->pad[1] || args->pad[2]) || XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1])) return -EINVAL; @@ -2250,6 +2338,12 @@ int xe_gem_create_ioctl(struct drm_device *dev, void *data, goto out_vm; } + if (args->extensions) { + err = gem_create_user_extensions(xe, bo, args->extensions, 0); + if (err) + goto out_bulk; + } + err = drm_gem_handle_create(file, &bo->ttm.base, &handle); if (err) goto out_bulk; diff --git a/drivers/gpu/drm/xe/xe_bo.h b/drivers/gpu/drm/xe/xe_bo.h index 04995c5ced32..f09b9315721b 100644 --- a/drivers/gpu/drm/xe/xe_bo.h +++ b/drivers/gpu/drm/xe/xe_bo.h @@ -186,6 +186,11 @@ static inline bool xe_bo_is_pinned(struct xe_bo *bo) return bo->ttm.pin_count; } +static inline bool xe_bo_is_protected(const struct xe_bo *bo) +{ + return bo->pxp_key_instance; +} + static inline void xe_bo_unpin_map_no_vm(struct xe_bo *bo) { if (likely(bo)) { diff --git a/drivers/gpu/drm/xe/xe_bo_types.h b/drivers/gpu/drm/xe/xe_bo_types.h index 46dc9e4e3e46..60c522866500 100644 --- a/drivers/gpu/drm/xe/xe_bo_types.h +++ b/drivers/gpu/drm/xe/xe_bo_types.h @@ -57,6 +57,12 @@ struct xe_bo { */ struct list_head client_link; #endif + /** + * @pxp_key_instance: PXP key instance this BO was created against. A + * 0 in this variable indicates that the BO does not use PXP encryption. + */ + u32 pxp_key_instance; + /** @freed: List node for delayed put. */ struct llist_node freed; /** @update_index: Update index if PT BO */ diff --git a/drivers/gpu/drm/xe/xe_exec.c b/drivers/gpu/drm/xe/xe_exec.c index df8ce550deb4..b75adfc99fb7 100644 --- a/drivers/gpu/drm/xe/xe_exec.c +++ b/drivers/gpu/drm/xe/xe_exec.c @@ -262,6 +262,12 @@ retry: goto err_exec; } + if (xe_exec_queue_uses_pxp(q)) { + err = xe_vm_validate_protected(q->vm); + if (err) + goto err_exec; + } + job = xe_sched_job_create(q, xe_exec_queue_is_parallel(q) ? addresses : &args->address); if (IS_ERR(job)) { diff --git a/drivers/gpu/drm/xe/xe_pxp.c b/drivers/gpu/drm/xe/xe_pxp.c index 24aef5c0f04a..8060b4050be8 100644 --- a/drivers/gpu/drm/xe/xe_pxp.c +++ b/drivers/gpu/drm/xe/xe_pxp.c @@ -8,6 +8,8 @@ #include <drm/drm_managed.h> #include <uapi/drm/xe_drm.h> +#include "xe_bo.h" +#include "xe_bo_types.h" #include "xe_device_types.h" #include "xe_exec_queue.h" #include "xe_force_wake.h" @@ -185,6 +187,9 @@ static void pxp_terminate(struct xe_pxp *pxp) pxp_invalidate_queues(pxp); + if (pxp->status == XE_PXP_ACTIVE) + pxp->key_instance++; + /* * If we have a termination already in progress, we need to wait for * it to complete before queueing another one. Once the first @@ -385,6 +390,8 @@ int xe_pxp_init(struct xe_device *xe) pxp->xe = xe; pxp->gt = gt; + pxp->key_instance = 1; + /* * we'll use the completions to check if there is an action pending, * so we start them as completed and we reinit it when an action is @@ -689,3 +696,86 @@ static void pxp_invalidate_queues(struct xe_pxp *pxp) spin_unlock_irq(&pxp->queues.lock); } + +/** + * xe_pxp_key_assign - mark a BO as using the current PXP key iteration + * @pxp: the xe->pxp pointer (it will be NULL if PXP is disabled) + * @bo: the BO to mark + * + * Returns: -ENODEV if PXP is disabled, 0 otherwise. + */ +int xe_pxp_key_assign(struct xe_pxp *pxp, struct xe_bo *bo) +{ + if (!xe_pxp_is_enabled(pxp)) + return -ENODEV; + + xe_assert(pxp->xe, !bo->pxp_key_instance); + + /* + * Note that the PXP key handling is inherently racey, because the key + * can theoretically change at any time (although it's unlikely to do + * so without triggers), even right after we copy it. Taking a lock + * wouldn't help because the value might still change as soon as we + * release the lock. + * Userspace needs to handle the fact that their BOs can go invalid at + * any point. + */ + bo->pxp_key_instance = pxp->key_instance; + + return 0; +} + +/** + * xe_pxp_bo_key_check - check if the key used by a xe_bo is valid + * @pxp: the xe->pxp pointer (it will be NULL if PXP is disabled) + * @bo: the BO we want to check + * + * Checks whether a BO was encrypted with the current key or an obsolete one. + * + * Returns: 0 if the key is valid, -ENODEV if PXP is disabled, -EINVAL if the + * BO is not using PXP, -ENOEXEC if the key is not valid. + */ +int xe_pxp_bo_key_check(struct xe_pxp *pxp, struct xe_bo *bo) +{ + if (!xe_pxp_is_enabled(pxp)) + return -ENODEV; + + if (!xe_bo_is_protected(bo)) + return -EINVAL; + + xe_assert(pxp->xe, bo->pxp_key_instance); + + /* + * Note that the PXP key handling is inherently racey, because the key + * can theoretically change at any time (although it's unlikely to do + * so without triggers), even right after we check it. Taking a lock + * wouldn't help because the value might still change as soon as we + * release the lock. + * We mitigate the risk by checking the key at multiple points (on each + * submission involving the BO and right before flipping it on the + * display), but there is still a very small chance that we could + * operate on an invalid BO for a single submission or a single frame + * flip. This is a compromise made to protect the encrypted data (which + * is what the key termination is for). + */ + if (bo->pxp_key_instance != pxp->key_instance) + return -ENOEXEC; + + return 0; +} + +/** + * xe_pxp_obj_key_check - check if the key used by a drm_gem_obj is valid + * @pxp: the xe->pxp pointer (it will be NULL if PXP is disabled) + * @obj: the drm_gem_obj we want to check + * + * Checks whether a drm_gem_obj was encrypted with the current key or an + * obsolete one. + * + * Returns: 0 if the key is valid, -ENODEV if PXP is disabled, -EINVAL if the + * obj is not using PXP, -ENOEXEC if the key is not valid. + */ +int xe_pxp_obj_key_check(struct xe_pxp *pxp, struct drm_gem_object *obj) +{ + return xe_pxp_bo_key_check(pxp, gem_to_xe_bo(obj)); +} diff --git a/drivers/gpu/drm/xe/xe_pxp.h b/drivers/gpu/drm/xe/xe_pxp.h index 868813cc84b9..3dd70eac9da6 100644 --- a/drivers/gpu/drm/xe/xe_pxp.h +++ b/drivers/gpu/drm/xe/xe_pxp.h @@ -8,6 +8,8 @@ #include <linux/types.h> +struct drm_gem_object; +struct xe_bo; struct xe_device; struct xe_exec_queue; struct xe_pxp; @@ -23,4 +25,8 @@ int xe_pxp_exec_queue_set_type(struct xe_pxp *pxp, struct xe_exec_queue *q, u8 t int xe_pxp_exec_queue_add(struct xe_pxp *pxp, struct xe_exec_queue *q); void xe_pxp_exec_queue_remove(struct xe_pxp *pxp, struct xe_exec_queue *q); +int xe_pxp_key_assign(struct xe_pxp *pxp, struct xe_bo *bo); +int xe_pxp_bo_key_check(struct xe_pxp *pxp, struct xe_bo *bo); +int xe_pxp_obj_key_check(struct xe_pxp *pxp, struct drm_gem_object *obj); + #endif /* __XE_PXP_H__ */ diff --git a/drivers/gpu/drm/xe/xe_pxp_types.h b/drivers/gpu/drm/xe/xe_pxp_types.h index bd741720f67d..8e4569f0173d 100644 --- a/drivers/gpu/drm/xe/xe_pxp_types.h +++ b/drivers/gpu/drm/xe/xe_pxp_types.h @@ -112,6 +112,17 @@ struct xe_pxp { /** @queues.list: list of exec_queues that use PXP */ struct list_head list; } queues; + + /** + * @key_instance: keep track of the current iteration of the PXP key. + * Note that, due to the time needed for PXP termination and re-start + * to complete, the minimum time between 2 subsequent increases of this + * variable is 50ms, and even that only if there is a continuous attack; + * normal behavior is for this to increase much much slower than that. + * This means that we don't expect this to ever wrap and don't implement + * that case in the code. + */ + u32 key_instance; }; #endif /* __XE_PXP_TYPES_H__ */ diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c index b9270d059e18..d664f2e418b2 100644 --- a/drivers/gpu/drm/xe/xe_vm.c +++ b/drivers/gpu/drm/xe/xe_vm.c @@ -33,6 +33,7 @@ #include "xe_pm.h" #include "xe_preempt_fence.h" #include "xe_pt.h" +#include "xe_pxp.h" #include "xe_res_cursor.h" #include "xe_sync.h" #include "xe_trace_bo.h" @@ -2726,7 +2727,8 @@ ALLOW_ERROR_INJECTION(vm_bind_ioctl_ops_execute, ERRNO); (DRM_XE_VM_BIND_FLAG_READONLY | \ DRM_XE_VM_BIND_FLAG_IMMEDIATE | \ DRM_XE_VM_BIND_FLAG_NULL | \ - DRM_XE_VM_BIND_FLAG_DUMPABLE) + DRM_XE_VM_BIND_FLAG_DUMPABLE | \ + DRM_XE_VM_BIND_FLAG_CHECK_PXP) #ifdef TEST_VM_OPS_ERROR #define SUPPORTED_FLAGS (SUPPORTED_FLAGS_STUB | FORCE_OP_ERROR) @@ -2889,7 +2891,7 @@ static void xe_vma_ops_init(struct xe_vma_ops *vops, struct xe_vm *vm, static int xe_vm_bind_ioctl_validate_bo(struct xe_device *xe, struct xe_bo *bo, u64 addr, u64 range, u64 obj_offset, - u16 pat_index) + u16 pat_index, u32 op, u32 bind_flags) { u16 coh_mode; @@ -2933,6 +2935,12 @@ static int xe_vm_bind_ioctl_validate_bo(struct xe_device *xe, struct xe_bo *bo, return -EINVAL; } + /* If a BO is protected it can only be mapped if the key is still valid */ + if ((bind_flags & DRM_XE_VM_BIND_FLAG_CHECK_PXP) && xe_bo_is_protected(bo) && + op != DRM_XE_VM_BIND_OP_UNMAP && op != DRM_XE_VM_BIND_OP_UNMAP_ALL) + if (XE_IOCTL_DBG(xe, xe_pxp_bo_key_check(xe->pxp, bo) != 0)) + return -ENOEXEC; + return 0; } @@ -3022,6 +3030,8 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file) u32 obj = bind_ops[i].obj; u64 obj_offset = bind_ops[i].obj_offset; u16 pat_index = bind_ops[i].pat_index; + u32 op = bind_ops[i].op; + u32 bind_flags = bind_ops[i].flags; if (!obj) continue; @@ -3034,7 +3044,8 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file) bos[i] = gem_to_xe_bo(gem_obj); err = xe_vm_bind_ioctl_validate_bo(xe, bos[i], addr, range, - obj_offset, pat_index); + obj_offset, pat_index, op, + bind_flags); if (err) goto put_obj; } @@ -3334,6 +3345,35 @@ wait: return ret; } +int xe_vm_validate_protected(struct xe_vm *vm) +{ + struct drm_gpuva *gpuva; + int err = 0; + + if (!vm) + return -ENODEV; + + mutex_lock(&vm->snap_mutex); + + drm_gpuvm_for_each_va(gpuva, &vm->gpuvm) { + struct xe_vma *vma = gpuva_to_vma(gpuva); + struct xe_bo *bo = vma->gpuva.gem.obj ? + gem_to_xe_bo(vma->gpuva.gem.obj) : NULL; + + if (!bo) + continue; + + if (xe_bo_is_protected(bo)) { + err = xe_pxp_bo_key_check(vm->xe->pxp, bo); + if (err) + break; + } + } + + mutex_unlock(&vm->snap_mutex); + return err; +} + struct xe_vm_snapshot { unsigned long num_snaps; struct { diff --git a/drivers/gpu/drm/xe/xe_vm.h b/drivers/gpu/drm/xe/xe_vm.h index 0a2fa6c0815b..f66075f8a6fe 100644 --- a/drivers/gpu/drm/xe/xe_vm.h +++ b/drivers/gpu/drm/xe/xe_vm.h @@ -215,6 +215,8 @@ struct dma_fence *xe_vma_rebind(struct xe_vm *vm, struct xe_vma *vma, int xe_vm_invalidate_vma(struct xe_vma *vma); +int xe_vm_validate_protected(struct xe_vm *vm); + static inline void xe_vm_queue_rebind_worker(struct xe_vm *vm) { xe_assert(vm->xe, xe_vm_in_preempt_fence_mode(vm)); diff --git a/include/uapi/drm/xe_drm.h b/include/uapi/drm/xe_drm.h index 112fd27f3c75..892f54d3aa09 100644 --- a/include/uapi/drm/xe_drm.h +++ b/include/uapi/drm/xe_drm.h @@ -778,8 +778,23 @@ struct drm_xe_device_query { * - %DRM_XE_GEM_CPU_CACHING_WC - Allocate the pages as write-combined. This * is uncached. Scanout surfaces should likely use this. All objects * that can be placed in VRAM must use this. + * + * This ioctl supports setting the following properties via the + * %DRM_XE_GEM_CREATE_EXTENSION_SET_PROPERTY extension, which uses the + * generic @drm_xe_ext_set_property struct: + * + * - %DRM_XE_GEM_CREATE_SET_PROPERTY_PXP_TYPE - set the type of PXP session + * this object will be used with. Valid values are listed in enum + * drm_xe_pxp_session_type. %DRM_XE_PXP_TYPE_NONE is the default behavior, so + * there is no need to explicitly set that. Objects used with session of type + * %DRM_XE_PXP_TYPE_HWDRM will be marked as invalid if a PXP invalidation + * event occurs after their creation. Attempting to flip an invalid object + * will cause a black frame to be displayed instead. Submissions with invalid + * objects mapped in the VM will be rejected. */ struct drm_xe_gem_create { +#define DRM_XE_GEM_CREATE_EXTENSION_SET_PROPERTY 0 +#define DRM_XE_GEM_CREATE_SET_PROPERTY_PXP_TYPE 0 /** @extensions: Pointer to the first extension struct, if any */ __u64 extensions; @@ -968,6 +983,9 @@ struct drm_xe_vm_destroy { * will only be valid for DRM_XE_VM_BIND_OP_MAP operations, the BO * handle MBZ, and the BO offset MBZ. This flag is intended to * implement VK sparse bindings. + * - %DRM_XE_VM_BIND_FLAG_CHECK_PXP - If the object is encrypted via PXP, + * reject the binding if the encryption key is no longer valid. This + * flag has no effect on BOs that are not marked as using PXP. */ struct drm_xe_vm_bind_op { /** @extensions: Pointer to the first extension struct, if any */ @@ -1058,6 +1076,7 @@ struct drm_xe_vm_bind_op { #define DRM_XE_VM_BIND_FLAG_IMMEDIATE (1 << 1) #define DRM_XE_VM_BIND_FLAG_NULL (1 << 2) #define DRM_XE_VM_BIND_FLAG_DUMPABLE (1 << 3) +#define DRM_XE_VM_BIND_FLAG_CHECK_PXP (1 << 4) /** @flags: Bind flags */ __u32 flags; |