drm/amdgpu: reserve the PD during unmap and remove

We not only need to protect the mapping tree and freed list itself,
but also the items on those list.

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Chunming Zhou <david1.zhou@amd.com>
Acked-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
Christian König 2016-03-08 17:47:46 +01:00 committed by Alex Deucher
parent fb29b57c34
commit b5a5ec5504

View File

@ -140,25 +140,40 @@ int amdgpu_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_pri
void amdgpu_gem_object_close(struct drm_gem_object *obj, void amdgpu_gem_object_close(struct drm_gem_object *obj,
struct drm_file *file_priv) struct drm_file *file_priv)
{ {
struct amdgpu_bo *rbo = gem_to_amdgpu_bo(obj); struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
struct amdgpu_device *adev = rbo->adev; struct amdgpu_device *adev = bo->adev;
struct amdgpu_fpriv *fpriv = file_priv->driver_priv; struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
struct amdgpu_vm *vm = &fpriv->vm; struct amdgpu_vm *vm = &fpriv->vm;
struct amdgpu_bo_list_entry vm_pd;
struct list_head list, duplicates;
struct ttm_validate_buffer tv;
struct ww_acquire_ctx ticket;
struct amdgpu_bo_va *bo_va; struct amdgpu_bo_va *bo_va;
int r; int r;
r = amdgpu_bo_reserve(rbo, true);
INIT_LIST_HEAD(&list);
INIT_LIST_HEAD(&duplicates);
tv.bo = &bo->tbo;
tv.shared = true;
list_add(&tv.head, &list);
amdgpu_vm_get_pd_bo(vm, &list, &vm_pd);
r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates);
if (r) { if (r) {
dev_err(adev->dev, "leaking bo va because " dev_err(adev->dev, "leaking bo va because "
"we fail to reserve bo (%d)\n", r); "we fail to reserve bo (%d)\n", r);
return; return;
} }
bo_va = amdgpu_vm_bo_find(vm, rbo); bo_va = amdgpu_vm_bo_find(vm, bo);
if (bo_va) { if (bo_va) {
if (--bo_va->ref_count == 0) { if (--bo_va->ref_count == 0) {
amdgpu_vm_bo_rmv(adev, bo_va); amdgpu_vm_bo_rmv(adev, bo_va);
} }
} }
amdgpu_bo_unreserve(rbo); ttm_eu_backoff_reservation(&ticket, &list);
} }
static int amdgpu_gem_handle_lockup(struct amdgpu_device *adev, int r) static int amdgpu_gem_handle_lockup(struct amdgpu_device *adev, int r)
@ -580,11 +595,10 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
tv.shared = true; tv.shared = true;
list_add(&tv.head, &list); list_add(&tv.head, &list);
if (args->operation == AMDGPU_VA_OP_MAP) { tv_pd.bo = &fpriv->vm.page_directory->tbo;
tv_pd.bo = &fpriv->vm.page_directory->tbo; tv_pd.shared = true;
tv_pd.shared = true; list_add(&tv_pd.head, &list);
list_add(&tv_pd.head, &list);
}
r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates); r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates);
if (r) { if (r) {
drm_gem_object_unreference_unlocked(gobj); drm_gem_object_unreference_unlocked(gobj);