drm/amdgpu: use xarray for storing pasid in vm
Replace idr with xarray as we actually need hash functionality. Cleanup code related to vm pasid by adding helper function. Signed-off-by: Nirmoy Das <nirmoy.das@amd.com> Acked-by: Felix Kuehling <Felix.Kuehling@amd.com> Reviewed-by: Christian König <christian.koenig@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
@@ -88,6 +88,46 @@ struct amdgpu_prt_cb {
|
|||||||
struct dma_fence_cb cb;
|
struct dma_fence_cb cb;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* amdgpu_vm_set_pasid - manage pasid and vm ptr mapping
|
||||||
|
*
|
||||||
|
* @adev: amdgpu_device pointer
|
||||||
|
* @vm: amdgpu_vm pointer
|
||||||
|
* @pasid: the pasid the VM is using on this GPU
|
||||||
|
*
|
||||||
|
* Set the pasid this VM is using on this GPU, can also be used to remove the
|
||||||
|
* pasid by passing in zero.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
int amdgpu_vm_set_pasid(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
||||||
|
u32 pasid)
|
||||||
|
{
|
||||||
|
int r;
|
||||||
|
|
||||||
|
if (vm->pasid == pasid)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
if (vm->pasid) {
|
||||||
|
r = xa_err(xa_erase_irq(&adev->vm_manager.pasids, vm->pasid));
|
||||||
|
if (r < 0)
|
||||||
|
return r;
|
||||||
|
|
||||||
|
vm->pasid = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (pasid) {
|
||||||
|
r = xa_err(xa_store_irq(&adev->vm_manager.pasids, pasid, vm,
|
||||||
|
GFP_KERNEL));
|
||||||
|
if (r < 0)
|
||||||
|
return r;
|
||||||
|
|
||||||
|
vm->pasid = pasid;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* vm eviction_lock can be taken in MMU notifiers. Make sure no reclaim-FS
|
* vm eviction_lock can be taken in MMU notifiers. Make sure no reclaim-FS
|
||||||
* happens while holding this lock anywhere to prevent deadlocks when
|
* happens while holding this lock anywhere to prevent deadlocks when
|
||||||
@@ -2945,18 +2985,9 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, u32 pasid)
|
|||||||
|
|
||||||
amdgpu_bo_unreserve(vm->root.bo);
|
amdgpu_bo_unreserve(vm->root.bo);
|
||||||
|
|
||||||
if (pasid) {
|
r = amdgpu_vm_set_pasid(adev, vm, pasid);
|
||||||
unsigned long flags;
|
if (r)
|
||||||
|
goto error_free_root;
|
||||||
spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
|
|
||||||
r = idr_alloc(&adev->vm_manager.pasid_idr, vm, pasid, pasid + 1,
|
|
||||||
GFP_ATOMIC);
|
|
||||||
spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
|
|
||||||
if (r < 0)
|
|
||||||
goto error_free_root;
|
|
||||||
|
|
||||||
vm->pasid = pasid;
|
|
||||||
}
|
|
||||||
|
|
||||||
INIT_KFIFO(vm->faults);
|
INIT_KFIFO(vm->faults);
|
||||||
|
|
||||||
@@ -3044,18 +3075,15 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
|||||||
if (r)
|
if (r)
|
||||||
goto unreserve_bo;
|
goto unreserve_bo;
|
||||||
|
|
||||||
if (pasid) {
|
/* Free the original amdgpu allocated pasid,
|
||||||
unsigned long flags;
|
* will be replaced with kfd allocated pasid.
|
||||||
|
*/
|
||||||
|
if (vm->pasid)
|
||||||
|
amdgpu_pasid_free(vm->pasid);
|
||||||
|
|
||||||
spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
|
r = amdgpu_vm_set_pasid(adev, vm, pasid);
|
||||||
r = idr_alloc(&adev->vm_manager.pasid_idr, vm, pasid, pasid + 1,
|
if (r)
|
||||||
GFP_ATOMIC);
|
goto unreserve_bo;
|
||||||
spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
|
|
||||||
|
|
||||||
if (r == -ENOSPC)
|
|
||||||
goto unreserve_bo;
|
|
||||||
r = 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Check if PD needs to be reinitialized and do it before
|
/* Check if PD needs to be reinitialized and do it before
|
||||||
* changing any other state, in case it fails.
|
* changing any other state, in case it fails.
|
||||||
@@ -3066,7 +3094,7 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
|||||||
to_amdgpu_bo_vm(vm->root.bo),
|
to_amdgpu_bo_vm(vm->root.bo),
|
||||||
false);
|
false);
|
||||||
if (r)
|
if (r)
|
||||||
goto free_idr;
|
goto free_pasid_entry;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Update VM state */
|
/* Update VM state */
|
||||||
@@ -3083,7 +3111,7 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
|||||||
r = amdgpu_bo_sync_wait(vm->root.bo,
|
r = amdgpu_bo_sync_wait(vm->root.bo,
|
||||||
AMDGPU_FENCE_OWNER_UNDEFINED, true);
|
AMDGPU_FENCE_OWNER_UNDEFINED, true);
|
||||||
if (r)
|
if (r)
|
||||||
goto free_idr;
|
goto free_pasid_entry;
|
||||||
|
|
||||||
vm->update_funcs = &amdgpu_vm_cpu_funcs;
|
vm->update_funcs = &amdgpu_vm_cpu_funcs;
|
||||||
} else {
|
} else {
|
||||||
@@ -3093,36 +3121,13 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
|||||||
vm->last_update = NULL;
|
vm->last_update = NULL;
|
||||||
vm->is_compute_context = true;
|
vm->is_compute_context = true;
|
||||||
|
|
||||||
if (vm->pasid) {
|
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
|
|
||||||
idr_remove(&adev->vm_manager.pasid_idr, vm->pasid);
|
|
||||||
spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
|
|
||||||
|
|
||||||
/* Free the original amdgpu allocated pasid
|
|
||||||
* Will be replaced with kfd allocated pasid
|
|
||||||
*/
|
|
||||||
amdgpu_pasid_free(vm->pasid);
|
|
||||||
vm->pasid = 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Free the shadow bo for compute VM */
|
/* Free the shadow bo for compute VM */
|
||||||
amdgpu_bo_unref(&to_amdgpu_bo_vm(vm->root.bo)->shadow);
|
amdgpu_bo_unref(&to_amdgpu_bo_vm(vm->root.bo)->shadow);
|
||||||
|
|
||||||
if (pasid)
|
|
||||||
vm->pasid = pasid;
|
|
||||||
|
|
||||||
goto unreserve_bo;
|
goto unreserve_bo;
|
||||||
|
|
||||||
free_idr:
|
free_pasid_entry:
|
||||||
if (pasid) {
|
amdgpu_vm_set_pasid(adev, vm, 0);
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
|
|
||||||
idr_remove(&adev->vm_manager.pasid_idr, pasid);
|
|
||||||
spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
|
|
||||||
}
|
|
||||||
unreserve_bo:
|
unreserve_bo:
|
||||||
amdgpu_bo_unreserve(vm->root.bo);
|
amdgpu_bo_unreserve(vm->root.bo);
|
||||||
return r;
|
return r;
|
||||||
@@ -3138,14 +3143,7 @@ unreserve_bo:
|
|||||||
*/
|
*/
|
||||||
void amdgpu_vm_release_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm)
|
void amdgpu_vm_release_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm)
|
||||||
{
|
{
|
||||||
if (vm->pasid) {
|
amdgpu_vm_set_pasid(adev, vm, 0);
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
|
|
||||||
idr_remove(&adev->vm_manager.pasid_idr, vm->pasid);
|
|
||||||
spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
|
|
||||||
}
|
|
||||||
vm->pasid = 0;
|
|
||||||
vm->is_compute_context = false;
|
vm->is_compute_context = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -3169,15 +3167,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
|
|||||||
|
|
||||||
root = amdgpu_bo_ref(vm->root.bo);
|
root = amdgpu_bo_ref(vm->root.bo);
|
||||||
amdgpu_bo_reserve(root, true);
|
amdgpu_bo_reserve(root, true);
|
||||||
if (vm->pasid) {
|
amdgpu_vm_set_pasid(adev, vm, 0);
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
|
|
||||||
idr_remove(&adev->vm_manager.pasid_idr, vm->pasid);
|
|
||||||
spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
|
|
||||||
vm->pasid = 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
dma_fence_wait(vm->last_unlocked, false);
|
dma_fence_wait(vm->last_unlocked, false);
|
||||||
dma_fence_put(vm->last_unlocked);
|
dma_fence_put(vm->last_unlocked);
|
||||||
|
|
||||||
@@ -3259,8 +3249,7 @@ void amdgpu_vm_manager_init(struct amdgpu_device *adev)
|
|||||||
adev->vm_manager.vm_update_mode = 0;
|
adev->vm_manager.vm_update_mode = 0;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
idr_init(&adev->vm_manager.pasid_idr);
|
xa_init_flags(&adev->vm_manager.pasids, XA_FLAGS_LOCK_IRQ);
|
||||||
spin_lock_init(&adev->vm_manager.pasid_lock);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -3272,8 +3261,8 @@ void amdgpu_vm_manager_init(struct amdgpu_device *adev)
|
|||||||
*/
|
*/
|
||||||
void amdgpu_vm_manager_fini(struct amdgpu_device *adev)
|
void amdgpu_vm_manager_fini(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
WARN_ON(!idr_is_empty(&adev->vm_manager.pasid_idr));
|
WARN_ON(!xa_empty(&adev->vm_manager.pasids));
|
||||||
idr_destroy(&adev->vm_manager.pasid_idr);
|
xa_destroy(&adev->vm_manager.pasids);
|
||||||
|
|
||||||
amdgpu_vmid_mgr_fini(adev);
|
amdgpu_vmid_mgr_fini(adev);
|
||||||
}
|
}
|
||||||
@@ -3342,13 +3331,13 @@ void amdgpu_vm_get_task_info(struct amdgpu_device *adev, u32 pasid,
|
|||||||
struct amdgpu_vm *vm;
|
struct amdgpu_vm *vm;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
|
xa_lock_irqsave(&adev->vm_manager.pasids, flags);
|
||||||
|
|
||||||
vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
|
vm = xa_load(&adev->vm_manager.pasids, pasid);
|
||||||
if (vm)
|
if (vm)
|
||||||
*task_info = vm->task_info;
|
*task_info = vm->task_info;
|
||||||
|
|
||||||
spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
|
xa_unlock_irqrestore(&adev->vm_manager.pasids, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -3390,15 +3379,15 @@ bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid,
|
|||||||
struct amdgpu_vm *vm;
|
struct amdgpu_vm *vm;
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
spin_lock_irqsave(&adev->vm_manager.pasid_lock, irqflags);
|
xa_lock_irqsave(&adev->vm_manager.pasids, irqflags);
|
||||||
vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
|
vm = xa_load(&adev->vm_manager.pasids, pasid);
|
||||||
if (vm) {
|
if (vm) {
|
||||||
root = amdgpu_bo_ref(vm->root.bo);
|
root = amdgpu_bo_ref(vm->root.bo);
|
||||||
is_compute_context = vm->is_compute_context;
|
is_compute_context = vm->is_compute_context;
|
||||||
} else {
|
} else {
|
||||||
root = NULL;
|
root = NULL;
|
||||||
}
|
}
|
||||||
spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, irqflags);
|
xa_unlock_irqrestore(&adev->vm_manager.pasids, irqflags);
|
||||||
|
|
||||||
if (!root)
|
if (!root)
|
||||||
return false;
|
return false;
|
||||||
@@ -3416,11 +3405,11 @@ bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid,
|
|||||||
goto error_unref;
|
goto error_unref;
|
||||||
|
|
||||||
/* Double check that the VM still exists */
|
/* Double check that the VM still exists */
|
||||||
spin_lock_irqsave(&adev->vm_manager.pasid_lock, irqflags);
|
xa_lock_irqsave(&adev->vm_manager.pasids, irqflags);
|
||||||
vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
|
vm = xa_load(&adev->vm_manager.pasids, pasid);
|
||||||
if (vm && vm->root.bo != root)
|
if (vm && vm->root.bo != root)
|
||||||
vm = NULL;
|
vm = NULL;
|
||||||
spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, irqflags);
|
xa_unlock_irqrestore(&adev->vm_manager.pasids, irqflags);
|
||||||
if (!vm)
|
if (!vm)
|
||||||
goto error_unlock;
|
goto error_unlock;
|
||||||
|
|
||||||
|
|||||||
@@ -359,8 +359,7 @@ struct amdgpu_vm_manager {
|
|||||||
/* PASID to VM mapping, will be used in interrupt context to
|
/* PASID to VM mapping, will be used in interrupt context to
|
||||||
* look up VM of a page fault
|
* look up VM of a page fault
|
||||||
*/
|
*/
|
||||||
struct idr pasid_idr;
|
struct xarray pasids;
|
||||||
spinlock_t pasid_lock;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
struct amdgpu_bo_va_mapping;
|
struct amdgpu_bo_va_mapping;
|
||||||
@@ -375,6 +374,9 @@ extern const struct amdgpu_vm_update_funcs amdgpu_vm_sdma_funcs;
|
|||||||
void amdgpu_vm_manager_init(struct amdgpu_device *adev);
|
void amdgpu_vm_manager_init(struct amdgpu_device *adev);
|
||||||
void amdgpu_vm_manager_fini(struct amdgpu_device *adev);
|
void amdgpu_vm_manager_fini(struct amdgpu_device *adev);
|
||||||
|
|
||||||
|
int amdgpu_vm_set_pasid(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
||||||
|
u32 pasid);
|
||||||
|
|
||||||
long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout);
|
long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout);
|
||||||
int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, u32 pasid);
|
int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, u32 pasid);
|
||||||
int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm, u32 pasid);
|
int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm, u32 pasid);
|
||||||
|
|||||||
Reference in New Issue
Block a user