mirror of
https://github.com/torvalds/linux.git
synced 2024-11-17 09:31:50 +00:00
drm/ttm: allow fence to be added as shared
This patch adds a new flag to the ttm_validate_buffer list to add the fence as shared to the reservation object. Signed-off-by: Christian König <christian.koenig@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
parent
c4d922b145
commit
ae9c0af2c0
@ -226,6 +226,7 @@ int qxl_release_list_add(struct qxl_release *release, struct qxl_bo *bo)
|
||||
|
||||
qxl_bo_ref(bo);
|
||||
entry->tv.bo = &bo->tbo;
|
||||
entry->tv.shared = false;
|
||||
list_add_tail(&entry->tv.head, &release->bos);
|
||||
return 0;
|
||||
}
|
||||
|
@ -183,6 +183,7 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
|
||||
}
|
||||
|
||||
p->relocs[i].tv.bo = &p->relocs[i].robj->tbo;
|
||||
p->relocs[i].tv.shared = false;
|
||||
p->relocs[i].handle = r->handle;
|
||||
|
||||
radeon_cs_buckets_add(&buckets, &p->relocs[i].tv.head,
|
||||
|
@ -143,6 +143,7 @@ struct radeon_cs_reloc *radeon_vm_get_bos(struct radeon_device *rdev,
|
||||
list[0].prefered_domains = RADEON_GEM_DOMAIN_VRAM;
|
||||
list[0].allowed_domains = RADEON_GEM_DOMAIN_VRAM;
|
||||
list[0].tv.bo = &vm->page_directory->tbo;
|
||||
list[0].tv.shared = false;
|
||||
list[0].tiling_flags = 0;
|
||||
list[0].handle = 0;
|
||||
list_add(&list[0].tv.head, head);
|
||||
@ -156,6 +157,7 @@ struct radeon_cs_reloc *radeon_vm_get_bos(struct radeon_device *rdev,
|
||||
list[idx].prefered_domains = RADEON_GEM_DOMAIN_VRAM;
|
||||
list[idx].allowed_domains = RADEON_GEM_DOMAIN_VRAM;
|
||||
list[idx].tv.bo = &list[idx].robj->tbo;
|
||||
list[idx].tv.shared = false;
|
||||
list[idx].tiling_flags = 0;
|
||||
list[idx].handle = 0;
|
||||
list_add(&list[idx++].tv.head, head);
|
||||
@ -395,6 +397,7 @@ static int radeon_vm_clear_bo(struct radeon_device *rdev,
|
||||
|
||||
memset(&tv, 0, sizeof(tv));
|
||||
tv.bo = &bo->tbo;
|
||||
tv.shared = false;
|
||||
|
||||
INIT_LIST_HEAD(&head);
|
||||
list_add(&tv.head, &head);
|
||||
|
@ -119,8 +119,14 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
|
||||
ret = -EBUSY;
|
||||
}
|
||||
|
||||
if (!ret)
|
||||
continue;
|
||||
if (!ret) {
|
||||
if (!entry->shared)
|
||||
continue;
|
||||
|
||||
ret = reservation_object_reserve_shared(bo->resv);
|
||||
if (!ret)
|
||||
continue;
|
||||
}
|
||||
|
||||
/* uh oh, we lost out, drop every reservation and try
|
||||
* to only reserve this buffer, then start over if
|
||||
@ -136,6 +142,9 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
|
||||
ret = 0;
|
||||
}
|
||||
|
||||
if (!ret && entry->shared)
|
||||
ret = reservation_object_reserve_shared(bo->resv);
|
||||
|
||||
if (unlikely(ret != 0)) {
|
||||
if (ret == -EINTR)
|
||||
ret = -ERESTARTSYS;
|
||||
@ -183,7 +192,10 @@ void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
|
||||
|
||||
list_for_each_entry(entry, list, head) {
|
||||
bo = entry->bo;
|
||||
reservation_object_add_excl_fence(bo->resv, fence);
|
||||
if (entry->shared)
|
||||
reservation_object_add_shared_fence(bo->resv, fence);
|
||||
else
|
||||
reservation_object_add_excl_fence(bo->resv, fence);
|
||||
ttm_bo_add_to_lru(bo);
|
||||
__ttm_bo_unreserve(bo);
|
||||
}
|
||||
|
@ -346,6 +346,7 @@ static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
|
||||
++sw_context->cur_val_buf;
|
||||
val_buf = &vval_buf->base;
|
||||
val_buf->bo = ttm_bo_reference(bo);
|
||||
val_buf->shared = false;
|
||||
list_add_tail(&val_buf->head, &sw_context->validate_nodes);
|
||||
vval_buf->validate_as_mob = validate_as_mob;
|
||||
}
|
||||
@ -2670,9 +2671,11 @@ void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
|
||||
INIT_LIST_HEAD(&validate_list);
|
||||
|
||||
pinned_val.bo = ttm_bo_reference(dev_priv->pinned_bo);
|
||||
pinned_val.shared = false;
|
||||
list_add_tail(&pinned_val.head, &validate_list);
|
||||
|
||||
query_val.bo = ttm_bo_reference(dev_priv->dummy_query_bo);
|
||||
query_val.shared = false;
|
||||
list_add_tail(&query_val.head, &validate_list);
|
||||
|
||||
ret = ttm_eu_reserve_buffers(&ticket, &validate_list, false);
|
||||
|
@ -133,6 +133,7 @@ static void vmw_resource_release(struct kref *kref)
|
||||
struct ttm_validate_buffer val_buf;
|
||||
|
||||
val_buf.bo = bo;
|
||||
val_buf.shared = false;
|
||||
res->func->unbind(res, false, &val_buf);
|
||||
}
|
||||
res->backup_dirty = false;
|
||||
@ -1219,6 +1220,7 @@ vmw_resource_check_buffer(struct vmw_resource *res,
|
||||
|
||||
INIT_LIST_HEAD(&val_list);
|
||||
val_buf->bo = ttm_bo_reference(&res->backup->base);
|
||||
val_buf->shared = false;
|
||||
list_add_tail(&val_buf->head, &val_list);
|
||||
ret = ttm_eu_reserve_buffers(NULL, &val_list, interruptible);
|
||||
if (unlikely(ret != 0))
|
||||
@ -1312,6 +1314,7 @@ int vmw_resource_do_evict(struct vmw_resource *res, bool interruptible)
|
||||
BUG_ON(!func->may_evict);
|
||||
|
||||
val_buf.bo = NULL;
|
||||
val_buf.shared = false;
|
||||
ret = vmw_resource_check_buffer(res, interruptible, &val_buf);
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
@ -1357,6 +1360,7 @@ int vmw_resource_validate(struct vmw_resource *res)
|
||||
return 0;
|
||||
|
||||
val_buf.bo = NULL;
|
||||
val_buf.shared = false;
|
||||
if (res->backup)
|
||||
val_buf.bo = &res->backup->base;
|
||||
do {
|
||||
@ -1474,6 +1478,7 @@ void vmw_resource_move_notify(struct ttm_buffer_object *bo,
|
||||
struct ttm_validate_buffer val_buf;
|
||||
|
||||
val_buf.bo = bo;
|
||||
val_buf.shared = false;
|
||||
|
||||
list_for_each_entry_safe(res, n, &dma_buf->res_list, mob_head) {
|
||||
|
||||
|
@ -39,11 +39,13 @@
|
||||
*
|
||||
* @head: list head for thread-private list.
|
||||
* @bo: refcounted buffer object pointer.
|
||||
* @shared: should the fence be added shared?
|
||||
*/
|
||||
|
||||
struct ttm_validate_buffer {
|
||||
struct list_head head;
|
||||
struct ttm_buffer_object *bo;
|
||||
bool shared;
|
||||
};
|
||||
|
||||
/**
|
||||
|
Loading…
Reference in New Issue
Block a user