vmwgfx: Implement a proper GMR eviction mechanism
Use Ben's new range manager hooks to implement a manager for GMRs that manages ids rather than ranges. This means we can use the standard TTM code for binding, unbinding and eviction. Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com> Signed-off-by: Dave Airlie <airlied@redhat.com>
This commit is contained in:
parent
8f895da57d
commit
135cba0dc3
@ -4,6 +4,6 @@ ccflags-y := -Iinclude/drm
|
||||
vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \
|
||||
vmwgfx_fb.o vmwgfx_ioctl.o vmwgfx_resource.o vmwgfx_buffer.o \
|
||||
vmwgfx_fifo.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \
|
||||
vmwgfx_overlay.o vmwgfx_fence.o
|
||||
vmwgfx_overlay.o vmwgfx_fence.o vmwgfx_gmrid_manager.o
|
||||
|
||||
obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o
|
||||
|
@ -39,6 +39,9 @@ static uint32_t vram_ne_placement_flags = TTM_PL_FLAG_VRAM |
|
||||
static uint32_t sys_placement_flags = TTM_PL_FLAG_SYSTEM |
|
||||
TTM_PL_FLAG_CACHED;
|
||||
|
||||
static uint32_t gmr_placement_flags = VMW_PL_FLAG_GMR |
|
||||
TTM_PL_FLAG_CACHED;
|
||||
|
||||
struct ttm_placement vmw_vram_placement = {
|
||||
.fpfn = 0,
|
||||
.lpfn = 0,
|
||||
@ -48,6 +51,20 @@ struct ttm_placement vmw_vram_placement = {
|
||||
.busy_placement = &vram_placement_flags
|
||||
};
|
||||
|
||||
static uint32_t vram_gmr_placement_flags[] = {
|
||||
TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED,
|
||||
VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
|
||||
};
|
||||
|
||||
struct ttm_placement vmw_vram_gmr_placement = {
|
||||
.fpfn = 0,
|
||||
.lpfn = 0,
|
||||
.num_placement = 2,
|
||||
.placement = vram_gmr_placement_flags,
|
||||
.num_busy_placement = 1,
|
||||
.busy_placement = &gmr_placement_flags
|
||||
};
|
||||
|
||||
struct ttm_placement vmw_vram_sys_placement = {
|
||||
.fpfn = 0,
|
||||
.lpfn = 0,
|
||||
@ -77,27 +94,52 @@ struct ttm_placement vmw_sys_placement = {
|
||||
|
||||
struct vmw_ttm_backend {
|
||||
struct ttm_backend backend;
|
||||
struct page **pages;
|
||||
unsigned long num_pages;
|
||||
struct vmw_private *dev_priv;
|
||||
int gmr_id;
|
||||
};
|
||||
|
||||
static int vmw_ttm_populate(struct ttm_backend *backend,
|
||||
unsigned long num_pages, struct page **pages,
|
||||
struct page *dummy_read_page)
|
||||
{
|
||||
struct vmw_ttm_backend *vmw_be =
|
||||
container_of(backend, struct vmw_ttm_backend, backend);
|
||||
|
||||
vmw_be->pages = pages;
|
||||
vmw_be->num_pages = num_pages;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int vmw_ttm_bind(struct ttm_backend *backend, struct ttm_mem_reg *bo_mem)
|
||||
{
|
||||
return 0;
|
||||
struct vmw_ttm_backend *vmw_be =
|
||||
container_of(backend, struct vmw_ttm_backend, backend);
|
||||
|
||||
vmw_be->gmr_id = bo_mem->start;
|
||||
|
||||
return vmw_gmr_bind(vmw_be->dev_priv, vmw_be->pages,
|
||||
vmw_be->num_pages, vmw_be->gmr_id);
|
||||
}
|
||||
|
||||
static int vmw_ttm_unbind(struct ttm_backend *backend)
|
||||
{
|
||||
struct vmw_ttm_backend *vmw_be =
|
||||
container_of(backend, struct vmw_ttm_backend, backend);
|
||||
|
||||
vmw_gmr_unbind(vmw_be->dev_priv, vmw_be->gmr_id);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void vmw_ttm_clear(struct ttm_backend *backend)
|
||||
{
|
||||
struct vmw_ttm_backend *vmw_be =
|
||||
container_of(backend, struct vmw_ttm_backend, backend);
|
||||
|
||||
vmw_be->pages = NULL;
|
||||
vmw_be->num_pages = 0;
|
||||
}
|
||||
|
||||
static void vmw_ttm_destroy(struct ttm_backend *backend)
|
||||
@ -125,6 +167,7 @@ struct ttm_backend *vmw_ttm_backend_init(struct ttm_bo_device *bdev)
|
||||
return NULL;
|
||||
|
||||
vmw_be->backend.func = &vmw_ttm_func;
|
||||
vmw_be->dev_priv = container_of(bdev, struct vmw_private, bdev);
|
||||
|
||||
return &vmw_be->backend;
|
||||
}
|
||||
@ -142,7 +185,7 @@ int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
|
||||
/* System memory */
|
||||
|
||||
man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
|
||||
man->available_caching = TTM_PL_MASK_CACHING;
|
||||
man->available_caching = TTM_PL_FLAG_CACHED;
|
||||
man->default_caching = TTM_PL_FLAG_CACHED;
|
||||
break;
|
||||
case TTM_PL_VRAM:
|
||||
@ -150,8 +193,20 @@ int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
|
||||
man->func = &ttm_bo_manager_func;
|
||||
man->gpu_offset = 0;
|
||||
man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_MAPPABLE;
|
||||
man->available_caching = TTM_PL_MASK_CACHING;
|
||||
man->default_caching = TTM_PL_FLAG_WC;
|
||||
man->available_caching = TTM_PL_FLAG_CACHED;
|
||||
man->default_caching = TTM_PL_FLAG_CACHED;
|
||||
break;
|
||||
case VMW_PL_GMR:
|
||||
/*
|
||||
* "Guest Memory Regions" is an aperture like feature with
|
||||
* one slot per bo. There is an upper limit of the number of
|
||||
* slots as well as the bo size.
|
||||
*/
|
||||
man->func = &vmw_gmrid_manager_func;
|
||||
man->gpu_offset = 0;
|
||||
man->flags = TTM_MEMTYPE_FLAG_CMA | TTM_MEMTYPE_FLAG_MAPPABLE;
|
||||
man->available_caching = TTM_PL_FLAG_CACHED;
|
||||
man->default_caching = TTM_PL_FLAG_CACHED;
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
|
||||
@ -175,18 +230,6 @@ static int vmw_verify_access(struct ttm_buffer_object *bo, struct file *filp)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void vmw_move_notify(struct ttm_buffer_object *bo,
|
||||
struct ttm_mem_reg *new_mem)
|
||||
{
|
||||
if (new_mem->mem_type != TTM_PL_SYSTEM)
|
||||
vmw_dmabuf_gmr_unbind(bo);
|
||||
}
|
||||
|
||||
static void vmw_swap_notify(struct ttm_buffer_object *bo)
|
||||
{
|
||||
vmw_dmabuf_gmr_unbind(bo);
|
||||
}
|
||||
|
||||
static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
|
||||
{
|
||||
struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
|
||||
@ -201,7 +244,7 @@ static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg
|
||||
return -EINVAL;
|
||||
switch (mem->mem_type) {
|
||||
case TTM_PL_SYSTEM:
|
||||
/* System memory */
|
||||
case VMW_PL_GMR:
|
||||
return 0;
|
||||
case TTM_PL_VRAM:
|
||||
mem->bus.offset = mem->start << PAGE_SHIFT;
|
||||
@ -277,8 +320,8 @@ struct ttm_bo_driver vmw_bo_driver = {
|
||||
.sync_obj_flush = vmw_sync_obj_flush,
|
||||
.sync_obj_unref = vmw_sync_obj_unref,
|
||||
.sync_obj_ref = vmw_sync_obj_ref,
|
||||
.move_notify = vmw_move_notify,
|
||||
.swap_notify = vmw_swap_notify,
|
||||
.move_notify = NULL,
|
||||
.swap_notify = NULL,
|
||||
.fault_reserve_notify = &vmw_ttm_fault_reserve_notify,
|
||||
.io_mem_reserve = &vmw_ttm_io_mem_reserve,
|
||||
.io_mem_free = &vmw_ttm_io_mem_free,
|
||||
|
@ -260,13 +260,11 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
|
||||
idr_init(&dev_priv->context_idr);
|
||||
idr_init(&dev_priv->surface_idr);
|
||||
idr_init(&dev_priv->stream_idr);
|
||||
ida_init(&dev_priv->gmr_ida);
|
||||
mutex_init(&dev_priv->init_mutex);
|
||||
init_waitqueue_head(&dev_priv->fence_queue);
|
||||
init_waitqueue_head(&dev_priv->fifo_queue);
|
||||
atomic_set(&dev_priv->fence_queue_waiters, 0);
|
||||
atomic_set(&dev_priv->fifo_queue_waiters, 0);
|
||||
INIT_LIST_HEAD(&dev_priv->gmr_lru);
|
||||
|
||||
dev_priv->io_start = pci_resource_start(dev->pdev, 0);
|
||||
dev_priv->vram_start = pci_resource_start(dev->pdev, 1);
|
||||
@ -341,6 +339,14 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
|
||||
goto out_err2;
|
||||
}
|
||||
|
||||
dev_priv->has_gmr = true;
|
||||
if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR,
|
||||
dev_priv->max_gmr_ids) != 0) {
|
||||
DRM_INFO("No GMR memory available. "
|
||||
"Graphics memory resources are very limited.\n");
|
||||
dev_priv->has_gmr = false;
|
||||
}
|
||||
|
||||
dev_priv->mmio_mtrr = drm_mtrr_add(dev_priv->mmio_start,
|
||||
dev_priv->mmio_size, DRM_MTRR_WC);
|
||||
|
||||
@ -440,13 +446,14 @@ out_err4:
|
||||
out_err3:
|
||||
drm_mtrr_del(dev_priv->mmio_mtrr, dev_priv->mmio_start,
|
||||
dev_priv->mmio_size, DRM_MTRR_WC);
|
||||
if (dev_priv->has_gmr)
|
||||
(void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
|
||||
(void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
|
||||
out_err2:
|
||||
(void)ttm_bo_device_release(&dev_priv->bdev);
|
||||
out_err1:
|
||||
vmw_ttm_global_release(dev_priv);
|
||||
out_err0:
|
||||
ida_destroy(&dev_priv->gmr_ida);
|
||||
idr_destroy(&dev_priv->surface_idr);
|
||||
idr_destroy(&dev_priv->context_idr);
|
||||
idr_destroy(&dev_priv->stream_idr);
|
||||
@ -478,10 +485,11 @@ static int vmw_driver_unload(struct drm_device *dev)
|
||||
iounmap(dev_priv->mmio_virt);
|
||||
drm_mtrr_del(dev_priv->mmio_mtrr, dev_priv->mmio_start,
|
||||
dev_priv->mmio_size, DRM_MTRR_WC);
|
||||
if (dev_priv->has_gmr)
|
||||
(void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
|
||||
(void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
|
||||
(void)ttm_bo_device_release(&dev_priv->bdev);
|
||||
vmw_ttm_global_release(dev_priv);
|
||||
ida_destroy(&dev_priv->gmr_ida);
|
||||
idr_destroy(&dev_priv->surface_idr);
|
||||
idr_destroy(&dev_priv->context_idr);
|
||||
idr_destroy(&dev_priv->stream_idr);
|
||||
|
@ -49,6 +49,9 @@
|
||||
#define VMWGFX_MAX_GMRS 2048
|
||||
#define VMWGFX_MAX_DISPLAYS 16
|
||||
|
||||
#define VMW_PL_GMR TTM_PL_PRIV0
|
||||
#define VMW_PL_FLAG_GMR TTM_PL_FLAG_PRIV0
|
||||
|
||||
struct vmw_fpriv {
|
||||
struct drm_master *locked_master;
|
||||
struct ttm_object_file *tfile;
|
||||
@ -57,8 +60,6 @@ struct vmw_fpriv {
|
||||
struct vmw_dma_buffer {
|
||||
struct ttm_buffer_object base;
|
||||
struct list_head validate_list;
|
||||
struct list_head gmr_lru;
|
||||
uint32_t gmr_id;
|
||||
bool gmr_bound;
|
||||
uint32_t cur_validate_node;
|
||||
bool on_validate_list;
|
||||
@ -184,6 +185,7 @@ struct vmw_private {
|
||||
uint32_t capabilities;
|
||||
uint32_t max_gmr_descriptors;
|
||||
uint32_t max_gmr_ids;
|
||||
bool has_gmr;
|
||||
struct mutex hw_mutex;
|
||||
|
||||
/*
|
||||
@ -265,14 +267,6 @@ struct vmw_private {
|
||||
uint32_t val_seq;
|
||||
struct mutex cmdbuf_mutex;
|
||||
|
||||
/**
|
||||
* GMR management. Protected by the lru spinlock.
|
||||
*/
|
||||
|
||||
struct ida gmr_ida;
|
||||
struct list_head gmr_lru;
|
||||
|
||||
|
||||
/**
|
||||
* Operating mode.
|
||||
*/
|
||||
@ -334,7 +328,9 @@ void vmw_3d_resource_dec(struct vmw_private *dev_priv);
|
||||
*/
|
||||
|
||||
extern int vmw_gmr_bind(struct vmw_private *dev_priv,
|
||||
struct ttm_buffer_object *bo);
|
||||
struct page *pages[],
|
||||
unsigned long num_pages,
|
||||
int gmr_id);
|
||||
extern void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id);
|
||||
|
||||
/**
|
||||
@ -383,14 +379,10 @@ extern uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo,
|
||||
extern void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo);
|
||||
extern int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
|
||||
uint32_t id, struct vmw_dma_buffer **out);
|
||||
extern uint32_t vmw_dmabuf_gmr(struct ttm_buffer_object *bo);
|
||||
extern void vmw_dmabuf_set_gmr(struct ttm_buffer_object *bo, uint32_t id);
|
||||
extern int vmw_gmr_id_alloc(struct vmw_private *dev_priv, uint32_t *p_id);
|
||||
extern int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv,
|
||||
struct vmw_dma_buffer *bo);
|
||||
extern int vmw_dmabuf_from_vram(struct vmw_private *vmw_priv,
|
||||
struct vmw_dma_buffer *bo);
|
||||
extern void vmw_dmabuf_gmr_unbind(struct ttm_buffer_object *bo);
|
||||
extern int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
extern int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
|
||||
@ -442,6 +434,7 @@ extern int vmw_mmap(struct file *filp, struct vm_area_struct *vma);
|
||||
extern struct ttm_placement vmw_vram_placement;
|
||||
extern struct ttm_placement vmw_vram_ne_placement;
|
||||
extern struct ttm_placement vmw_vram_sys_placement;
|
||||
extern struct ttm_placement vmw_vram_gmr_placement;
|
||||
extern struct ttm_placement vmw_sys_placement;
|
||||
extern struct ttm_bo_driver vmw_bo_driver;
|
||||
extern int vmw_dma_quiescent(struct drm_device *dev);
|
||||
@ -543,6 +536,12 @@ int vmw_overlay_unref(struct vmw_private *dev_priv, uint32_t stream_id);
|
||||
int vmw_overlay_num_overlays(struct vmw_private *dev_priv);
|
||||
int vmw_overlay_num_free_overlays(struct vmw_private *dev_priv);
|
||||
|
||||
/**
|
||||
* GMR Id manager
|
||||
*/
|
||||
|
||||
extern const struct ttm_mem_type_manager_func vmw_gmrid_manager_func;
|
||||
|
||||
/**
|
||||
* Inline helper functions
|
||||
*/
|
||||
|
@ -538,8 +538,11 @@ static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
|
||||
reloc = &sw_context->relocs[i];
|
||||
validate = &sw_context->val_bufs[reloc->index];
|
||||
bo = validate->bo;
|
||||
reloc->location->offset += bo->offset;
|
||||
reloc->location->gmrId = vmw_dmabuf_gmr(bo);
|
||||
if (bo->mem.mem_type == TTM_PL_VRAM) {
|
||||
reloc->location->offset += bo->offset;
|
||||
reloc->location->gmrId = SVGA_GMR_FRAMEBUFFER;
|
||||
} else
|
||||
reloc->location->gmrId = bo->mem.start;
|
||||
}
|
||||
vmw_free_relocations(sw_context);
|
||||
}
|
||||
@ -563,25 +566,14 @@ static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (vmw_dmabuf_gmr(bo) != SVGA_GMR_NULL)
|
||||
return 0;
|
||||
|
||||
/**
|
||||
* Put BO in VRAM, only if there is space.
|
||||
* Put BO in VRAM if there is space, otherwise as a GMR.
|
||||
* If there is no space in VRAM and GMR ids are all used up,
|
||||
* start evicting GMRs to make room. If the DMA buffer can't be
|
||||
* used as a GMR, this will return -ENOMEM.
|
||||
*/
|
||||
|
||||
ret = ttm_bo_validate(bo, &vmw_vram_sys_placement, true, false, false);
|
||||
if (unlikely(ret == -ERESTARTSYS))
|
||||
return ret;
|
||||
|
||||
/**
|
||||
* Otherwise, set it up as GMR.
|
||||
*/
|
||||
|
||||
if (vmw_dmabuf_gmr(bo) != SVGA_GMR_NULL)
|
||||
return 0;
|
||||
|
||||
ret = vmw_gmr_bind(dev_priv, bo);
|
||||
ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, true, false, false);
|
||||
if (likely(ret == 0 || ret == -ERESTARTSYS))
|
||||
return ret;
|
||||
|
||||
@ -590,6 +582,7 @@ static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
|
||||
* previous contents.
|
||||
*/
|
||||
|
||||
DRM_INFO("Falling through to VRAM.\n");
|
||||
ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false, false);
|
||||
return ret;
|
||||
}
|
||||
|
@ -612,7 +612,6 @@ int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv,
|
||||
{
|
||||
struct ttm_buffer_object *bo = &vmw_bo->base;
|
||||
struct ttm_placement ne_placement = vmw_vram_ne_placement;
|
||||
struct drm_mm_node *mm_node;
|
||||
int ret = 0;
|
||||
|
||||
ne_placement.lpfn = bo->num_pages;
|
||||
@ -626,9 +625,9 @@ int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv,
|
||||
if (unlikely(ret != 0))
|
||||
goto err_unlock;
|
||||
|
||||
mm_node = bo->mem.mm_node;
|
||||
if (bo->mem.mem_type == TTM_PL_VRAM &&
|
||||
mm_node->start < bo->num_pages)
|
||||
bo->mem.start < bo->num_pages &&
|
||||
bo->mem.start > 0)
|
||||
(void) ttm_bo_validate(bo, &vmw_sys_placement, false,
|
||||
false, false);
|
||||
|
||||
|
@ -146,7 +146,7 @@ static void vmw_gmr_fire_descriptors(struct vmw_private *dev_priv,
|
||||
*/
|
||||
|
||||
static unsigned long vmw_gmr_count_descriptors(struct page *pages[],
|
||||
unsigned long num_pages)
|
||||
unsigned long num_pages)
|
||||
{
|
||||
unsigned long prev_pfn = ~(0UL);
|
||||
unsigned long pfn;
|
||||
@ -163,45 +163,33 @@ static unsigned long vmw_gmr_count_descriptors(struct page *pages[],
|
||||
}
|
||||
|
||||
int vmw_gmr_bind(struct vmw_private *dev_priv,
|
||||
struct ttm_buffer_object *bo)
|
||||
struct page *pages[],
|
||||
unsigned long num_pages,
|
||||
int gmr_id)
|
||||
{
|
||||
struct ttm_tt *ttm = bo->ttm;
|
||||
unsigned long descriptors;
|
||||
int ret;
|
||||
uint32_t id;
|
||||
struct list_head desc_pages;
|
||||
int ret;
|
||||
|
||||
if (!(dev_priv->capabilities & SVGA_CAP_GMR))
|
||||
if (unlikely(!(dev_priv->capabilities & SVGA_CAP_GMR)))
|
||||
return -EINVAL;
|
||||
|
||||
ret = ttm_tt_populate(ttm);
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
|
||||
descriptors = vmw_gmr_count_descriptors(ttm->pages, ttm->num_pages);
|
||||
if (unlikely(descriptors > dev_priv->max_gmr_descriptors))
|
||||
if (vmw_gmr_count_descriptors(pages, num_pages) >
|
||||
dev_priv->max_gmr_descriptors)
|
||||
return -EINVAL;
|
||||
|
||||
INIT_LIST_HEAD(&desc_pages);
|
||||
ret = vmw_gmr_build_descriptors(&desc_pages, ttm->pages,
|
||||
ttm->num_pages);
|
||||
|
||||
ret = vmw_gmr_build_descriptors(&desc_pages, pages, num_pages);
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
|
||||
ret = vmw_gmr_id_alloc(dev_priv, &id);
|
||||
if (unlikely(ret != 0))
|
||||
goto out_no_id;
|
||||
|
||||
vmw_gmr_fire_descriptors(dev_priv, id, &desc_pages);
|
||||
vmw_gmr_fire_descriptors(dev_priv, gmr_id, &desc_pages);
|
||||
vmw_gmr_free_descriptors(&desc_pages);
|
||||
vmw_dmabuf_set_gmr(bo, id);
|
||||
|
||||
return 0;
|
||||
|
||||
out_no_id:
|
||||
vmw_gmr_free_descriptors(&desc_pages);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id)
|
||||
{
|
||||
mutex_lock(&dev_priv->hw_mutex);
|
||||
|
137
drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
Normal file
137
drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
Normal file
@ -0,0 +1,137 @@
|
||||
/**************************************************************************
|
||||
*
|
||||
* Copyright (c) 2007-2010 VMware, Inc., Palo Alto, CA., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
**************************************************************************/
|
||||
/*
|
||||
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
|
||||
*/
|
||||
|
||||
#include "vmwgfx_drv.h"
|
||||
#include "ttm/ttm_module.h"
|
||||
#include "ttm/ttm_bo_driver.h"
|
||||
#include "ttm/ttm_placement.h"
|
||||
#include <linux/idr.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/kernel.h>
|
||||
|
||||
struct vmwgfx_gmrid_man {
|
||||
spinlock_t lock;
|
||||
struct ida gmr_ida;
|
||||
uint32_t max_gmr_ids;
|
||||
};
|
||||
|
||||
static int vmw_gmrid_man_get_node(struct ttm_mem_type_manager *man,
|
||||
struct ttm_buffer_object *bo,
|
||||
struct ttm_placement *placement,
|
||||
struct ttm_mem_reg *mem)
|
||||
{
|
||||
struct vmwgfx_gmrid_man *gman =
|
||||
(struct vmwgfx_gmrid_man *)man->priv;
|
||||
int ret;
|
||||
int id;
|
||||
|
||||
mem->mm_node = NULL;
|
||||
|
||||
do {
|
||||
if (unlikely(ida_pre_get(&gman->gmr_ida, GFP_KERNEL) == 0))
|
||||
return -ENOMEM;
|
||||
|
||||
spin_lock(&gman->lock);
|
||||
ret = ida_get_new(&gman->gmr_ida, &id);
|
||||
|
||||
if (unlikely(ret == 0 && id >= gman->max_gmr_ids)) {
|
||||
ida_remove(&gman->gmr_ida, id);
|
||||
spin_unlock(&gman->lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
spin_unlock(&gman->lock);
|
||||
|
||||
} while (ret == -EAGAIN);
|
||||
|
||||
if (likely(ret == 0)) {
|
||||
mem->mm_node = gman;
|
||||
mem->start = id;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void vmw_gmrid_man_put_node(struct ttm_mem_type_manager *man,
|
||||
struct ttm_mem_reg *mem)
|
||||
{
|
||||
struct vmwgfx_gmrid_man *gman =
|
||||
(struct vmwgfx_gmrid_man *)man->priv;
|
||||
|
||||
if (mem->mm_node) {
|
||||
spin_lock(&gman->lock);
|
||||
ida_remove(&gman->gmr_ida, mem->start);
|
||||
spin_unlock(&gman->lock);
|
||||
mem->mm_node = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static int vmw_gmrid_man_init(struct ttm_mem_type_manager *man,
|
||||
unsigned long p_size)
|
||||
{
|
||||
struct vmwgfx_gmrid_man *gman =
|
||||
kzalloc(sizeof(*gman), GFP_KERNEL);
|
||||
|
||||
if (unlikely(gman == NULL))
|
||||
return -ENOMEM;
|
||||
|
||||
spin_lock_init(&gman->lock);
|
||||
ida_init(&gman->gmr_ida);
|
||||
gman->max_gmr_ids = p_size;
|
||||
man->priv = (void *) gman;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int vmw_gmrid_man_takedown(struct ttm_mem_type_manager *man)
|
||||
{
|
||||
struct vmwgfx_gmrid_man *gman =
|
||||
(struct vmwgfx_gmrid_man *)man->priv;
|
||||
|
||||
if (gman) {
|
||||
ida_destroy(&gman->gmr_ida);
|
||||
kfree(gman);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void vmw_gmrid_man_debug(struct ttm_mem_type_manager *man,
|
||||
const char *prefix)
|
||||
{
|
||||
printk(KERN_INFO "%s: No debug info available for the GMR "
|
||||
"id manager.\n", prefix);
|
||||
}
|
||||
|
||||
const struct ttm_mem_type_manager_func vmw_gmrid_manager_func = {
|
||||
vmw_gmrid_man_init,
|
||||
vmw_gmrid_man_takedown,
|
||||
vmw_gmrid_man_get_node,
|
||||
vmw_gmrid_man_put_node,
|
||||
vmw_gmrid_man_debug
|
||||
};
|
@ -765,28 +765,11 @@ static size_t vmw_dmabuf_acc_size(struct ttm_bo_global *glob,
|
||||
return bo_user_size + page_array_size;
|
||||
}
|
||||
|
||||
void vmw_dmabuf_gmr_unbind(struct ttm_buffer_object *bo)
|
||||
{
|
||||
struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
|
||||
struct ttm_bo_global *glob = bo->glob;
|
||||
struct vmw_private *dev_priv =
|
||||
container_of(bo->bdev, struct vmw_private, bdev);
|
||||
|
||||
if (vmw_bo->gmr_bound) {
|
||||
vmw_gmr_unbind(dev_priv, vmw_bo->gmr_id);
|
||||
spin_lock(&glob->lru_lock);
|
||||
ida_remove(&dev_priv->gmr_ida, vmw_bo->gmr_id);
|
||||
spin_unlock(&glob->lru_lock);
|
||||
vmw_bo->gmr_bound = false;
|
||||
}
|
||||
}
|
||||
|
||||
void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo)
|
||||
{
|
||||
struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
|
||||
struct ttm_bo_global *glob = bo->glob;
|
||||
|
||||
vmw_dmabuf_gmr_unbind(bo);
|
||||
ttm_mem_global_free(glob->mem_glob, bo->acc_size);
|
||||
kfree(vmw_bo);
|
||||
}
|
||||
@ -818,10 +801,7 @@ int vmw_dmabuf_init(struct vmw_private *dev_priv,
|
||||
|
||||
memset(vmw_bo, 0, sizeof(*vmw_bo));
|
||||
|
||||
INIT_LIST_HEAD(&vmw_bo->gmr_lru);
|
||||
INIT_LIST_HEAD(&vmw_bo->validate_list);
|
||||
vmw_bo->gmr_id = 0;
|
||||
vmw_bo->gmr_bound = false;
|
||||
|
||||
ret = ttm_bo_init(bdev, &vmw_bo->base, size,
|
||||
ttm_bo_type_device, placement,
|
||||
@ -835,7 +815,6 @@ static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo)
|
||||
struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo);
|
||||
struct ttm_bo_global *glob = bo->glob;
|
||||
|
||||
vmw_dmabuf_gmr_unbind(bo);
|
||||
ttm_mem_global_free(glob->mem_glob, bo->acc_size);
|
||||
kfree(vmw_user_bo);
|
||||
}
|
||||
@ -938,25 +917,6 @@ void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo)
|
||||
vmw_bo->on_validate_list = false;
|
||||
}
|
||||
|
||||
uint32_t vmw_dmabuf_gmr(struct ttm_buffer_object *bo)
|
||||
{
|
||||
struct vmw_dma_buffer *vmw_bo;
|
||||
|
||||
if (bo->mem.mem_type == TTM_PL_VRAM)
|
||||
return SVGA_GMR_FRAMEBUFFER;
|
||||
|
||||
vmw_bo = vmw_dma_buffer(bo);
|
||||
|
||||
return (vmw_bo->gmr_bound) ? vmw_bo->gmr_id : SVGA_GMR_NULL;
|
||||
}
|
||||
|
||||
void vmw_dmabuf_set_gmr(struct ttm_buffer_object *bo, uint32_t id)
|
||||
{
|
||||
struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
|
||||
vmw_bo->gmr_bound = true;
|
||||
vmw_bo->gmr_id = id;
|
||||
}
|
||||
|
||||
int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
|
||||
uint32_t handle, struct vmw_dma_buffer **out)
|
||||
{
|
||||
@ -985,41 +945,6 @@ int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* TODO: Implement a gmr id eviction mechanism. Currently we just fail
|
||||
* when we're out of ids, causing GMR space to be allocated
|
||||
* out of VRAM.
|
||||
*/
|
||||
|
||||
int vmw_gmr_id_alloc(struct vmw_private *dev_priv, uint32_t *p_id)
|
||||
{
|
||||
struct ttm_bo_global *glob = dev_priv->bdev.glob;
|
||||
int id;
|
||||
int ret;
|
||||
|
||||
do {
|
||||
if (unlikely(ida_pre_get(&dev_priv->gmr_ida, GFP_KERNEL) == 0))
|
||||
return -ENOMEM;
|
||||
|
||||
spin_lock(&glob->lru_lock);
|
||||
ret = ida_get_new(&dev_priv->gmr_ida, &id);
|
||||
spin_unlock(&glob->lru_lock);
|
||||
} while (ret == -EAGAIN);
|
||||
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
|
||||
if (unlikely(id >= dev_priv->max_gmr_ids)) {
|
||||
spin_lock(&glob->lru_lock);
|
||||
ida_remove(&dev_priv->gmr_ida, id);
|
||||
spin_unlock(&glob->lru_lock);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
*p_id = (uint32_t) id;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Stream management
|
||||
*/
|
||||
|
Loading…
Reference in New Issue
Block a user