mirror of
https://github.com/torvalds/linux.git
synced 2024-11-10 14:11:52 +00:00
drm/amdkfd: Validate queue cwsr area and eop buffer size
When creating KFD user compute queue, check if queue eop buffer size, cwsr area size, ctl stack size equal to the size of KFD node properities. Check the entire cwsr area which may split into multiple svm ranges aligned to granularity boundary. Signed-off-by: Philip Yang <Philip.Yang@amd.com> Reviewed-by: Felix Kuehling <felix.kuehling@amd.com> Acked-by: Christian König <christian.koenig@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
parent
517fff221c
commit
629568d25f
@ -225,9 +225,15 @@ void kfd_queue_buffer_put(struct amdgpu_vm *vm, struct amdgpu_bo **bo)
|
|||||||
|
|
||||||
int kfd_queue_acquire_buffers(struct kfd_process_device *pdd, struct queue_properties *properties)
|
int kfd_queue_acquire_buffers(struct kfd_process_device *pdd, struct queue_properties *properties)
|
||||||
{
|
{
|
||||||
|
struct kfd_topology_device *topo_dev;
|
||||||
struct amdgpu_vm *vm;
|
struct amdgpu_vm *vm;
|
||||||
|
u32 total_cwsr_size;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
|
topo_dev = kfd_topology_device_by_id(pdd->dev->id);
|
||||||
|
if (!topo_dev)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
vm = drm_priv_to_vm(pdd->drm_priv);
|
vm = drm_priv_to_vm(pdd->drm_priv);
|
||||||
err = amdgpu_bo_reserve(vm->root.bo, false);
|
err = amdgpu_bo_reserve(vm->root.bo, false);
|
||||||
if (err)
|
if (err)
|
||||||
@ -252,6 +258,12 @@ int kfd_queue_acquire_buffers(struct kfd_process_device *pdd, struct queue_prope
|
|||||||
|
|
||||||
/* EOP buffer is not required for all ASICs */
|
/* EOP buffer is not required for all ASICs */
|
||||||
if (properties->eop_ring_buffer_address) {
|
if (properties->eop_ring_buffer_address) {
|
||||||
|
if (properties->eop_ring_buffer_size != topo_dev->node_props.eop_buffer_size) {
|
||||||
|
pr_debug("queue eop bo size 0x%lx not equal to node eop buf size 0x%x\n",
|
||||||
|
properties->eop_buf_bo->tbo.base.size,
|
||||||
|
topo_dev->node_props.eop_buffer_size);
|
||||||
|
goto out_err_unreserve;
|
||||||
|
}
|
||||||
err = kfd_queue_buffer_get(vm, (void *)properties->eop_ring_buffer_address,
|
err = kfd_queue_buffer_get(vm, (void *)properties->eop_ring_buffer_address,
|
||||||
&properties->eop_buf_bo,
|
&properties->eop_buf_bo,
|
||||||
properties->eop_ring_buffer_size);
|
properties->eop_ring_buffer_size);
|
||||||
@ -259,15 +271,33 @@ int kfd_queue_acquire_buffers(struct kfd_process_device *pdd, struct queue_prope
|
|||||||
goto out_err_unreserve;
|
goto out_err_unreserve;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (properties->ctl_stack_size != topo_dev->node_props.ctl_stack_size) {
|
||||||
|
pr_debug("queue ctl stack size 0x%x not equal to node ctl stack size 0x%x\n",
|
||||||
|
properties->ctl_stack_size,
|
||||||
|
topo_dev->node_props.ctl_stack_size);
|
||||||
|
goto out_err_unreserve;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (properties->ctx_save_restore_area_size != topo_dev->node_props.cwsr_size) {
|
||||||
|
pr_debug("queue cwsr size 0x%x not equal to node cwsr size 0x%x\n",
|
||||||
|
properties->ctx_save_restore_area_size,
|
||||||
|
topo_dev->node_props.cwsr_size);
|
||||||
|
goto out_err_unreserve;
|
||||||
|
}
|
||||||
|
|
||||||
|
total_cwsr_size = (topo_dev->node_props.cwsr_size + topo_dev->node_props.debug_memory_size)
|
||||||
|
* NUM_XCC(pdd->dev->xcc_mask);
|
||||||
|
total_cwsr_size = ALIGN(total_cwsr_size, PAGE_SIZE);
|
||||||
|
|
||||||
err = kfd_queue_buffer_get(vm, (void *)properties->ctx_save_restore_area_address,
|
err = kfd_queue_buffer_get(vm, (void *)properties->ctx_save_restore_area_address,
|
||||||
&properties->cwsr_bo, 0);
|
&properties->cwsr_bo, total_cwsr_size);
|
||||||
if (!err)
|
if (!err)
|
||||||
goto out_unreserve;
|
goto out_unreserve;
|
||||||
|
|
||||||
amdgpu_bo_unreserve(vm->root.bo);
|
amdgpu_bo_unreserve(vm->root.bo);
|
||||||
|
|
||||||
err = kfd_queue_buffer_svm_get(pdd, properties->ctx_save_restore_area_address,
|
err = kfd_queue_buffer_svm_get(pdd, properties->ctx_save_restore_area_address,
|
||||||
properties->ctx_save_restore_area_size);
|
total_cwsr_size);
|
||||||
if (err)
|
if (err)
|
||||||
goto out_err_release;
|
goto out_err_release;
|
||||||
|
|
||||||
@ -286,7 +316,9 @@ out_err_release:
|
|||||||
|
|
||||||
int kfd_queue_release_buffers(struct kfd_process_device *pdd, struct queue_properties *properties)
|
int kfd_queue_release_buffers(struct kfd_process_device *pdd, struct queue_properties *properties)
|
||||||
{
|
{
|
||||||
|
struct kfd_topology_device *topo_dev;
|
||||||
struct amdgpu_vm *vm;
|
struct amdgpu_vm *vm;
|
||||||
|
u32 total_cwsr_size;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
vm = drm_priv_to_vm(pdd->drm_priv);
|
vm = drm_priv_to_vm(pdd->drm_priv);
|
||||||
@ -302,8 +334,14 @@ int kfd_queue_release_buffers(struct kfd_process_device *pdd, struct queue_prope
|
|||||||
|
|
||||||
amdgpu_bo_unreserve(vm->root.bo);
|
amdgpu_bo_unreserve(vm->root.bo);
|
||||||
|
|
||||||
kfd_queue_buffer_svm_put(pdd, properties->ctx_save_restore_area_address,
|
topo_dev = kfd_topology_device_by_id(pdd->dev->id);
|
||||||
properties->ctx_save_restore_area_size);
|
if (!topo_dev)
|
||||||
|
return -EINVAL;
|
||||||
|
total_cwsr_size = (topo_dev->node_props.cwsr_size + topo_dev->node_props.debug_memory_size)
|
||||||
|
* NUM_XCC(pdd->dev->xcc_mask);
|
||||||
|
total_cwsr_size = ALIGN(total_cwsr_size, PAGE_SIZE);
|
||||||
|
|
||||||
|
kfd_queue_buffer_svm_put(pdd, properties->ctx_save_restore_area_address, total_cwsr_size);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user