summaryrefslogtreecommitdiff
path: root/drivers/gpu
diff options
context:
space:
mode:
authorThomas Hellstrom <thellstrom@vmware.com>2018-12-12 11:52:08 +0100
committerThomas Hellstrom <thellstrom@vmware.com>2018-12-13 13:04:25 +0100
commitfd567467753fac9f9f477550065018e7f4e3c8f3 (patch)
tree110744e9652bfbc00018fa572add924df2ca7ee0 /drivers/gpu
parente06d45d8c165a0b1a7aaa1b794c78d2db911c8f6 (diff)
drm/vmwgfx: Protect from excessive execbuf kernel memory allocations v3
With the new validation code, a malicious user-space app could potentially submit command streams with enough buffer-object and resource references in them to have the resulting allocated validion nodes and relocations make the kernel run out of GFP_KERNEL memory. Protect from this by having the validation code reserve TTM graphics memory when allocating. Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com> Reviewed-by: Deepak Rawat <drawat@vmware.com> --- v2: Removed leftover debug printouts
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h5
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c36
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_validation.c21
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_validation.h37
6 files changed, 103 insertions, 2 deletions
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 61a84b958d67..d7a2dfb8ee9b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -49,6 +49,8 @@
#define VMWGFX_REPO "In Tree"
+#define VMWGFX_VALIDATION_MEM_GRAN (16*PAGE_SIZE)
+
/**
* Fully encoded drm commands. Might move to vmw_drm.h
@@ -918,7 +920,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
spin_unlock(&dev_priv->cap_lock);
}
-
+ vmw_validation_mem_init_ttm(dev_priv, VMWGFX_VALIDATION_MEM_GRAN);
ret = vmw_kms_init(dev_priv);
if (unlikely(ret != 0))
goto out_no_kms;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 59f614225bcd..aca974b14b55 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -606,6 +606,9 @@ struct vmw_private {
struct vmw_cmdbuf_man *cman;
DECLARE_BITMAP(irqthread_pending, VMW_IRQTHREAD_MAX);
+
+ /* Validation memory reservation */
+ struct vmw_validation_mem vvm;
};
static inline struct vmw_surface *vmw_res_to_srf(struct vmw_resource *res)
@@ -846,6 +849,8 @@ extern int vmw_ttm_global_init(struct vmw_private *dev_priv);
extern void vmw_ttm_global_release(struct vmw_private *dev_priv);
extern int vmw_mmap(struct file *filp, struct vm_area_struct *vma);
+extern void vmw_validation_mem_init_ttm(struct vmw_private *dev_priv,
+ size_t gran);
/**
* TTM buffer object driver - vmwgfx_ttm_buffer.c
*/
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 260650bb5560..f2d13a72c05d 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -3835,6 +3835,8 @@ int vmw_execbuf_process(struct drm_file *file_priv,
struct sync_file *sync_file = NULL;
DECLARE_VAL_CONTEXT(val_ctx, &sw_context->res_ht, 1);
+ vmw_validation_set_val_mem(&val_ctx, &dev_priv->vvm);
+
if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) {
out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
if (out_fence_fd < 0) {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
index 7b1e5a5cbd2c..f88247046721 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
@@ -96,3 +96,39 @@ void vmw_ttm_global_release(struct vmw_private *dev_priv)
drm_global_item_unref(&dev_priv->bo_global_ref.ref);
drm_global_item_unref(&dev_priv->mem_global_ref);
}
+
+/* struct vmw_validation_mem callback */
+static int vmw_vmt_reserve(struct vmw_validation_mem *m, size_t size)
+{
+ static struct ttm_operation_ctx ctx = {.interruptible = false,
+ .no_wait_gpu = false};
+ struct vmw_private *dev_priv = container_of(m, struct vmw_private, vvm);
+
+ return ttm_mem_global_alloc(vmw_mem_glob(dev_priv), size, &ctx);
+}
+
+/* struct vmw_validation_mem callback */
+static void vmw_vmt_unreserve(struct vmw_validation_mem *m, size_t size)
+{
+ struct vmw_private *dev_priv = container_of(m, struct vmw_private, vvm);
+
+ return ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
+}
+
+/**
+ * vmw_validation_mem_init_ttm - Interface the validation memory tracker
+ * to ttm.
+ * @dev_priv: Pointer to struct vmw_private. The reason we choose a vmw private
+ * rather than a struct vmw_validation_mem is to make sure assumption in the
+ * callbacks that struct vmw_private derives from struct vmw_validation_mem
+ * holds true.
+ * @gran: The recommended allocation granularity
+ */
+void vmw_validation_mem_init_ttm(struct vmw_private *dev_priv, size_t gran)
+{
+ struct vmw_validation_mem *vvm = &dev_priv->vvm;
+
+ vvm->reserve_mem = vmw_vmt_reserve;
+ vvm->unreserve_mem = vmw_vmt_unreserve;
+ vvm->gran = gran;
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
index 184025fa938e..f116f092e00b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
@@ -104,11 +104,25 @@ void *vmw_validation_mem_alloc(struct vmw_validation_context *ctx,
return NULL;
if (ctx->mem_size_left < size) {
- struct page *page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+ struct page *page;
+ if (ctx->vm && ctx->vm_size_left < PAGE_SIZE) {
+ int ret = ctx->vm->reserve_mem(ctx->vm, ctx->vm->gran);
+
+ if (ret)
+ return NULL;
+
+ ctx->vm_size_left += ctx->vm->gran;
+ ctx->total_mem += ctx->vm->gran;
+ }
+
+ page = alloc_page(GFP_KERNEL | __GFP_ZERO);
if (!page)
return NULL;
+ if (ctx->vm)
+ ctx->vm_size_left -= PAGE_SIZE;
+
list_add_tail(&page->lru, &ctx->page_list);
ctx->page_address = page_address(page);
ctx->mem_size_left = PAGE_SIZE;
@@ -138,6 +152,11 @@ static void vmw_validation_mem_free(struct vmw_validation_context *ctx)
}
ctx->mem_size_left = 0;
+ if (ctx->vm && ctx->total_mem) {
+ ctx->vm->unreserve_mem(ctx->vm, ctx->total_mem);
+ ctx->total_mem = 0;
+ ctx->vm_size_left = 0;
+ }
}
/**
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h
index b57e3292c386..3b396fea40d7 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h
@@ -34,6 +34,21 @@
#include <drm/ttm/ttm_execbuf_util.h>
/**
+ * struct vmw_validation_mem - Custom interface to provide memory reservations
+ * for the validation code.
+ * @reserve_mem: Callback to reserve memory
+ * @unreserve_mem: Callback to unreserve memory
+ * @gran: Reservation granularity. Contains a hint how much memory should
+ * be reserved in each call to @reserve_mem(). A slow implementation may want
+ * reservation to be done in large batches.
+ */
+struct vmw_validation_mem {
+ int (*reserve_mem)(struct vmw_validation_mem *m, size_t size);
+ void (*unreserve_mem)(struct vmw_validation_mem *m, size_t size);
+ size_t gran;
+};
+
+/**
* struct vmw_validation_context - Per command submission validation context
* @ht: Hash table used to find resource- or buffer object duplicates
* @resource_list: List head for resource validation metadata
@@ -47,6 +62,10 @@
* buffer objects
* @mem_size_left: Free memory left in the last page in @page_list
* @page_address: Kernel virtual address of the last page in @page_list
+ * @vm: A pointer to the memory reservation interface or NULL if no
+ * memory reservation is needed.
+ * @vm_size_left: Amount of reserved memory that so far has not been allocated.
+ * @total_mem: Amount of reserved memory.
*/
struct vmw_validation_context {
struct drm_open_hash *ht;
@@ -59,6 +78,9 @@ struct vmw_validation_context {
unsigned int merge_dups;
unsigned int mem_size_left;
u8 *page_address;
+ struct vmw_validation_mem *vm;
+ size_t vm_size_left;
+ size_t total_mem;
};
struct vmw_buffer_object;
@@ -102,6 +124,21 @@ vmw_validation_has_bos(struct vmw_validation_context *ctx)
}
/**
+ * vmw_validation_set_val_mem - Register a validation mem object for
+ * validation memory reservation
+ * @ctx: The validation context
+ * @vm: Pointer to a struct vmw_validation_mem
+ *
+ * Must be set before the first attempt to allocate validation memory.
+ */
+static inline void
+vmw_validation_set_val_mem(struct vmw_validation_context *ctx,
+ struct vmw_validation_mem *vm)
+{
+ ctx->vm = vm;
+}
+
+/**
* vmw_validation_set_ht - Register a hash table for duplicate finding
* @ctx: The validation context
* @ht: Pointer to a hash table to use for duplicate finding