drm/scheduler: use job count instead of peek

The spsc_queue_peek function is accessing queue->head which belongs to
the consumer thread and shouldn't be accessed by the producer

This is fixing a rare race condition when destroying entities.

Signed-off-by: Christian König <christian.koenig@amd.com>
Acked-by: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
Reviewed-by: Monk.liu@amd.com
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
Christian König 2019-08-09 17:27:21 +02:00 committed by Alex Deucher
parent 31ec0dd380
commit e1b4ce25db

View File

@ -95,7 +95,7 @@ static bool drm_sched_entity_is_idle(struct drm_sched_entity *entity)
rmb(); /* for list_empty to work without lock */ rmb(); /* for list_empty to work without lock */
if (list_empty(&entity->list) || if (list_empty(&entity->list) ||
spsc_queue_peek(&entity->job_queue) == NULL) spsc_queue_count(&entity->job_queue) == 0)
return true; return true;
return false; return false;
@ -281,7 +281,7 @@ void drm_sched_entity_fini(struct drm_sched_entity *entity)
/* Consumption of existing IBs wasn't completed. Forcefully /* Consumption of existing IBs wasn't completed. Forcefully
* remove them here. * remove them here.
*/ */
if (spsc_queue_peek(&entity->job_queue)) { if (spsc_queue_count(&entity->job_queue)) {
if (sched) { if (sched) {
/* Park the kernel for a moment to make sure it isn't processing /* Park the kernel for a moment to make sure it isn't processing
* our enity. * our enity.