mirror of
https://github.com/torvalds/linux.git
synced 2024-11-13 23:51:39 +00:00
drm/vmwgfx: Protect from excessive execbuf kernel memory allocations v3
With the new validation code, a malicious user-space app could potentially submit command streams with enough buffer-object and resource references in them to have the resulting allocated validion nodes and relocations make the kernel run out of GFP_KERNEL memory. Protect from this by having the validation code reserve TTM graphics memory when allocating. Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com> Reviewed-by: Deepak Rawat <drawat@vmware.com> --- v2: Removed leftover debug printouts
This commit is contained in:
parent
e06d45d8c1
commit
fd56746775
@ -49,6 +49,8 @@
|
||||
|
||||
#define VMWGFX_REPO "In Tree"
|
||||
|
||||
#define VMWGFX_VALIDATION_MEM_GRAN (16*PAGE_SIZE)
|
||||
|
||||
|
||||
/**
|
||||
* Fully encoded drm commands. Might move to vmw_drm.h
|
||||
@ -918,7 +920,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
|
||||
spin_unlock(&dev_priv->cap_lock);
|
||||
}
|
||||
|
||||
|
||||
vmw_validation_mem_init_ttm(dev_priv, VMWGFX_VALIDATION_MEM_GRAN);
|
||||
ret = vmw_kms_init(dev_priv);
|
||||
if (unlikely(ret != 0))
|
||||
goto out_no_kms;
|
||||
|
@ -606,6 +606,9 @@ struct vmw_private {
|
||||
|
||||
struct vmw_cmdbuf_man *cman;
|
||||
DECLARE_BITMAP(irqthread_pending, VMW_IRQTHREAD_MAX);
|
||||
|
||||
/* Validation memory reservation */
|
||||
struct vmw_validation_mem vvm;
|
||||
};
|
||||
|
||||
static inline struct vmw_surface *vmw_res_to_srf(struct vmw_resource *res)
|
||||
@ -846,6 +849,8 @@ extern int vmw_ttm_global_init(struct vmw_private *dev_priv);
|
||||
extern void vmw_ttm_global_release(struct vmw_private *dev_priv);
|
||||
extern int vmw_mmap(struct file *filp, struct vm_area_struct *vma);
|
||||
|
||||
extern void vmw_validation_mem_init_ttm(struct vmw_private *dev_priv,
|
||||
size_t gran);
|
||||
/**
|
||||
* TTM buffer object driver - vmwgfx_ttm_buffer.c
|
||||
*/
|
||||
|
@ -3835,6 +3835,8 @@ int vmw_execbuf_process(struct drm_file *file_priv,
|
||||
struct sync_file *sync_file = NULL;
|
||||
DECLARE_VAL_CONTEXT(val_ctx, &sw_context->res_ht, 1);
|
||||
|
||||
vmw_validation_set_val_mem(&val_ctx, &dev_priv->vvm);
|
||||
|
||||
if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) {
|
||||
out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
|
||||
if (out_fence_fd < 0) {
|
||||
|
@ -96,3 +96,39 @@ void vmw_ttm_global_release(struct vmw_private *dev_priv)
|
||||
drm_global_item_unref(&dev_priv->bo_global_ref.ref);
|
||||
drm_global_item_unref(&dev_priv->mem_global_ref);
|
||||
}
|
||||
|
||||
/* struct vmw_validation_mem callback */
|
||||
static int vmw_vmt_reserve(struct vmw_validation_mem *m, size_t size)
|
||||
{
|
||||
static struct ttm_operation_ctx ctx = {.interruptible = false,
|
||||
.no_wait_gpu = false};
|
||||
struct vmw_private *dev_priv = container_of(m, struct vmw_private, vvm);
|
||||
|
||||
return ttm_mem_global_alloc(vmw_mem_glob(dev_priv), size, &ctx);
|
||||
}
|
||||
|
||||
/* struct vmw_validation_mem callback */
|
||||
static void vmw_vmt_unreserve(struct vmw_validation_mem *m, size_t size)
|
||||
{
|
||||
struct vmw_private *dev_priv = container_of(m, struct vmw_private, vvm);
|
||||
|
||||
return ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_validation_mem_init_ttm - Interface the validation memory tracker
|
||||
* to ttm.
|
||||
* @dev_priv: Pointer to struct vmw_private. The reason we choose a vmw private
|
||||
* rather than a struct vmw_validation_mem is to make sure assumption in the
|
||||
* callbacks that struct vmw_private derives from struct vmw_validation_mem
|
||||
* holds true.
|
||||
* @gran: The recommended allocation granularity
|
||||
*/
|
||||
void vmw_validation_mem_init_ttm(struct vmw_private *dev_priv, size_t gran)
|
||||
{
|
||||
struct vmw_validation_mem *vvm = &dev_priv->vvm;
|
||||
|
||||
vvm->reserve_mem = vmw_vmt_reserve;
|
||||
vvm->unreserve_mem = vmw_vmt_unreserve;
|
||||
vvm->gran = gran;
|
||||
}
|
||||
|
@ -104,11 +104,25 @@ void *vmw_validation_mem_alloc(struct vmw_validation_context *ctx,
|
||||
return NULL;
|
||||
|
||||
if (ctx->mem_size_left < size) {
|
||||
struct page *page = alloc_page(GFP_KERNEL | __GFP_ZERO);
|
||||
struct page *page;
|
||||
|
||||
if (ctx->vm && ctx->vm_size_left < PAGE_SIZE) {
|
||||
int ret = ctx->vm->reserve_mem(ctx->vm, ctx->vm->gran);
|
||||
|
||||
if (ret)
|
||||
return NULL;
|
||||
|
||||
ctx->vm_size_left += ctx->vm->gran;
|
||||
ctx->total_mem += ctx->vm->gran;
|
||||
}
|
||||
|
||||
page = alloc_page(GFP_KERNEL | __GFP_ZERO);
|
||||
if (!page)
|
||||
return NULL;
|
||||
|
||||
if (ctx->vm)
|
||||
ctx->vm_size_left -= PAGE_SIZE;
|
||||
|
||||
list_add_tail(&page->lru, &ctx->page_list);
|
||||
ctx->page_address = page_address(page);
|
||||
ctx->mem_size_left = PAGE_SIZE;
|
||||
@ -138,6 +152,11 @@ static void vmw_validation_mem_free(struct vmw_validation_context *ctx)
|
||||
}
|
||||
|
||||
ctx->mem_size_left = 0;
|
||||
if (ctx->vm && ctx->total_mem) {
|
||||
ctx->vm->unreserve_mem(ctx->vm, ctx->total_mem);
|
||||
ctx->total_mem = 0;
|
||||
ctx->vm_size_left = 0;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -33,6 +33,21 @@
|
||||
#include <linux/ww_mutex.h>
|
||||
#include <drm/ttm/ttm_execbuf_util.h>
|
||||
|
||||
/**
|
||||
* struct vmw_validation_mem - Custom interface to provide memory reservations
|
||||
* for the validation code.
|
||||
* @reserve_mem: Callback to reserve memory
|
||||
* @unreserve_mem: Callback to unreserve memory
|
||||
* @gran: Reservation granularity. Contains a hint how much memory should
|
||||
* be reserved in each call to @reserve_mem(). A slow implementation may want
|
||||
* reservation to be done in large batches.
|
||||
*/
|
||||
struct vmw_validation_mem {
|
||||
int (*reserve_mem)(struct vmw_validation_mem *m, size_t size);
|
||||
void (*unreserve_mem)(struct vmw_validation_mem *m, size_t size);
|
||||
size_t gran;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct vmw_validation_context - Per command submission validation context
|
||||
* @ht: Hash table used to find resource- or buffer object duplicates
|
||||
@ -47,6 +62,10 @@
|
||||
* buffer objects
|
||||
* @mem_size_left: Free memory left in the last page in @page_list
|
||||
* @page_address: Kernel virtual address of the last page in @page_list
|
||||
* @vm: A pointer to the memory reservation interface or NULL if no
|
||||
* memory reservation is needed.
|
||||
* @vm_size_left: Amount of reserved memory that so far has not been allocated.
|
||||
* @total_mem: Amount of reserved memory.
|
||||
*/
|
||||
struct vmw_validation_context {
|
||||
struct drm_open_hash *ht;
|
||||
@ -59,6 +78,9 @@ struct vmw_validation_context {
|
||||
unsigned int merge_dups;
|
||||
unsigned int mem_size_left;
|
||||
u8 *page_address;
|
||||
struct vmw_validation_mem *vm;
|
||||
size_t vm_size_left;
|
||||
size_t total_mem;
|
||||
};
|
||||
|
||||
struct vmw_buffer_object;
|
||||
@ -101,6 +123,21 @@ vmw_validation_has_bos(struct vmw_validation_context *ctx)
|
||||
return !list_empty(&ctx->bo_list);
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_validation_set_val_mem - Register a validation mem object for
|
||||
* validation memory reservation
|
||||
* @ctx: The validation context
|
||||
* @vm: Pointer to a struct vmw_validation_mem
|
||||
*
|
||||
* Must be set before the first attempt to allocate validation memory.
|
||||
*/
|
||||
static inline void
|
||||
vmw_validation_set_val_mem(struct vmw_validation_context *ctx,
|
||||
struct vmw_validation_mem *vm)
|
||||
{
|
||||
ctx->vm = vm;
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_validation_set_ht - Register a hash table for duplicate finding
|
||||
* @ctx: The validation context
|
||||
|
Loading…
Reference in New Issue
Block a user