drm/amdkfd: Validate user queue buffers

Find user queue rptr, ring buf, eop buffer and cwsr area BOs, and
check BOs are mapped on the GPU with correct size and take the BO
reference.

Signed-off-by: Philip Yang <Philip.Yang@amd.com>
Reviewed-by: Felix Kuehling <felix.kuehling@amd.com>
Acked-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
Philip Yang 2024-06-20 12:21:57 -04:00 committed by Alex Deucher
parent 9c7e69d2e1
commit 68e599db7a
2 changed files with 41 additions and 3 deletions

View File

@ -524,6 +524,10 @@ struct queue_properties {
uint64_t exception_status;
struct amdgpu_bo *wptr_bo;
struct amdgpu_bo *rptr_bo;
struct amdgpu_bo *ring_bo;
struct amdgpu_bo *eop_buf_bo;
struct amdgpu_bo *cwsr_bo;
};
#define QUEUE_IS_ACTIVE(q) ((q).queue_size > 0 && \

View File

@ -97,7 +97,8 @@ int kfd_queue_buffer_get(struct amdgpu_vm *vm, void __user *addr, struct amdgpu_
if (!mapping)
goto out_err;
if (user_addr != mapping->start || user_addr + size - 1 != mapping->last) {
if (user_addr != mapping->start ||
(size != 0 && user_addr + size - 1 != mapping->last)) {
pr_debug("expected size 0x%llx not equal to mapping addr 0x%llx size 0x%llx\n",
expected_size, mapping->start << AMDGPU_GPU_PAGE_SHIFT,
(mapping->last - mapping->start + 1) << AMDGPU_GPU_PAGE_SHIFT);
@ -124,18 +125,51 @@ int kfd_queue_acquire_buffers(struct kfd_process_device *pdd, struct queue_prope
err = kfd_queue_buffer_get(vm, properties->write_ptr, &properties->wptr_bo, PAGE_SIZE);
if (err)
goto out_err_unreserve;
err = kfd_queue_buffer_get(vm, properties->read_ptr, &properties->rptr_bo, PAGE_SIZE);
if (err)
goto out_err_unreserve;
err = kfd_queue_buffer_get(vm, (void *)properties->queue_address,
&properties->ring_bo, properties->queue_size);
if (err)
goto out_err_unreserve;
/* only compute queue requires EOP buffer and CWSR area */
if (properties->type != KFD_QUEUE_TYPE_COMPUTE)
goto out_unreserve;
amdgpu_bo_unreserve(vm->root.bo);
return 0;
/* EOP buffer is not required for all ASICs */
if (properties->eop_ring_buffer_address) {
err = kfd_queue_buffer_get(vm, (void *)properties->eop_ring_buffer_address,
&properties->eop_buf_bo,
properties->eop_ring_buffer_size);
if (err)
goto out_err_unreserve;
}
err = kfd_queue_buffer_get(vm, (void *)properties->ctx_save_restore_area_address,
&properties->cwsr_bo, 0);
if (err)
goto out_err_unreserve;
out_unreserve:
amdgpu_bo_unreserve(vm->root.bo);
return 0;
out_err_unreserve:
amdgpu_bo_unreserve(vm->root.bo);
kfd_queue_release_buffers(pdd, properties);
return err;
}
int kfd_queue_release_buffers(struct kfd_process_device *pdd, struct queue_properties *properties)
{
amdgpu_bo_unref(&properties->wptr_bo);
amdgpu_bo_unref(&properties->rptr_bo);
amdgpu_bo_unref(&properties->ring_bo);
amdgpu_bo_unref(&properties->eop_buf_bo);
amdgpu_bo_unref(&properties->cwsr_bo);
return 0;
}