drm/scheduler: rework entity creation
Entity currently keeps a copy of run_queue list and modify it in drm_sched_entity_set_priority(). Entities shouldn't modify run_queue list. Use drm_gpu_scheduler list instead of drm_sched_rq list in drm_sched_entity struct. In this way we can select a runqueue based on entity/ctx's priority for a drm scheduler. Signed-off-by: Nirmoy Das <nirmoy.das@amd.com> Reviewed-by: Christian König <christian.koenig@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
@@ -122,7 +122,7 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
|
|||||||
|
|
||||||
for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
|
for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
|
||||||
struct amdgpu_ring *rings[AMDGPU_MAX_RINGS];
|
struct amdgpu_ring *rings[AMDGPU_MAX_RINGS];
|
||||||
struct drm_sched_rq *rqs[AMDGPU_MAX_RINGS];
|
struct drm_gpu_scheduler *sched_list[AMDGPU_MAX_RINGS];
|
||||||
unsigned num_rings = 0;
|
unsigned num_rings = 0;
|
||||||
unsigned num_rqs = 0;
|
unsigned num_rqs = 0;
|
||||||
|
|
||||||
@@ -181,12 +181,13 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
|
|||||||
if (!rings[j]->adev)
|
if (!rings[j]->adev)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
rqs[num_rqs++] = &rings[j]->sched.sched_rq[priority];
|
sched_list[num_rqs++] = &rings[j]->sched;
|
||||||
}
|
}
|
||||||
|
|
||||||
for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j)
|
for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j)
|
||||||
r = drm_sched_entity_init(&ctx->entities[i][j].entity,
|
r = drm_sched_entity_init(&ctx->entities[i][j].entity,
|
||||||
rqs, num_rqs, &ctx->guilty);
|
priority, sched_list,
|
||||||
|
num_rqs, &ctx->guilty);
|
||||||
if (r)
|
if (r)
|
||||||
goto error_cleanup_entities;
|
goto error_cleanup_entities;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1987,11 +1987,13 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
|
|||||||
|
|
||||||
if (enable) {
|
if (enable) {
|
||||||
struct amdgpu_ring *ring;
|
struct amdgpu_ring *ring;
|
||||||
struct drm_sched_rq *rq;
|
struct drm_gpu_scheduler *sched;
|
||||||
|
|
||||||
ring = adev->mman.buffer_funcs_ring;
|
ring = adev->mman.buffer_funcs_ring;
|
||||||
rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_KERNEL];
|
sched = &ring->sched;
|
||||||
r = drm_sched_entity_init(&adev->mman.entity, &rq, 1, NULL);
|
r = drm_sched_entity_init(&adev->mman.entity,
|
||||||
|
DRM_SCHED_PRIORITY_KERNEL, &sched,
|
||||||
|
1, NULL);
|
||||||
if (r) {
|
if (r) {
|
||||||
DRM_ERROR("Failed setting up TTM BO move entity (%d)\n",
|
DRM_ERROR("Failed setting up TTM BO move entity (%d)\n",
|
||||||
r);
|
r);
|
||||||
|
|||||||
@@ -330,12 +330,13 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
|
|||||||
int amdgpu_uvd_entity_init(struct amdgpu_device *adev)
|
int amdgpu_uvd_entity_init(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
struct amdgpu_ring *ring;
|
struct amdgpu_ring *ring;
|
||||||
struct drm_sched_rq *rq;
|
struct drm_gpu_scheduler *sched;
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
ring = &adev->uvd.inst[0].ring;
|
ring = &adev->uvd.inst[0].ring;
|
||||||
rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
|
sched = &ring->sched;
|
||||||
r = drm_sched_entity_init(&adev->uvd.entity, &rq, 1, NULL);
|
r = drm_sched_entity_init(&adev->uvd.entity, DRM_SCHED_PRIORITY_NORMAL,
|
||||||
|
&sched, 1, NULL);
|
||||||
if (r) {
|
if (r) {
|
||||||
DRM_ERROR("Failed setting up UVD kernel entity.\n");
|
DRM_ERROR("Failed setting up UVD kernel entity.\n");
|
||||||
return r;
|
return r;
|
||||||
|
|||||||
@@ -240,12 +240,13 @@ int amdgpu_vce_sw_fini(struct amdgpu_device *adev)
|
|||||||
int amdgpu_vce_entity_init(struct amdgpu_device *adev)
|
int amdgpu_vce_entity_init(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
struct amdgpu_ring *ring;
|
struct amdgpu_ring *ring;
|
||||||
struct drm_sched_rq *rq;
|
struct drm_gpu_scheduler *sched;
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
ring = &adev->vce.ring[0];
|
ring = &adev->vce.ring[0];
|
||||||
rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
|
sched = &ring->sched;
|
||||||
r = drm_sched_entity_init(&adev->vce.entity, &rq, 1, NULL);
|
r = drm_sched_entity_init(&adev->vce.entity, DRM_SCHED_PRIORITY_NORMAL,
|
||||||
|
&sched, 1, NULL);
|
||||||
if (r != 0) {
|
if (r != 0) {
|
||||||
DRM_ERROR("Failed setting up VCE run queue.\n");
|
DRM_ERROR("Failed setting up VCE run queue.\n");
|
||||||
return r;
|
return r;
|
||||||
|
|||||||
@@ -2740,6 +2740,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
|||||||
{
|
{
|
||||||
struct amdgpu_bo_param bp;
|
struct amdgpu_bo_param bp;
|
||||||
struct amdgpu_bo *root;
|
struct amdgpu_bo *root;
|
||||||
|
struct drm_gpu_scheduler *sched_list[AMDGPU_MAX_RINGS];
|
||||||
int r, i;
|
int r, i;
|
||||||
|
|
||||||
vm->va = RB_ROOT_CACHED;
|
vm->va = RB_ROOT_CACHED;
|
||||||
@@ -2753,14 +2754,19 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
|||||||
spin_lock_init(&vm->invalidated_lock);
|
spin_lock_init(&vm->invalidated_lock);
|
||||||
INIT_LIST_HEAD(&vm->freed);
|
INIT_LIST_HEAD(&vm->freed);
|
||||||
|
|
||||||
|
for (i = 0; i < adev->vm_manager.vm_pte_num_rqs; i++)
|
||||||
|
sched_list[i] = adev->vm_manager.vm_pte_rqs[i]->sched;
|
||||||
|
|
||||||
/* create scheduler entities for page table updates */
|
/* create scheduler entities for page table updates */
|
||||||
r = drm_sched_entity_init(&vm->direct, adev->vm_manager.vm_pte_rqs,
|
r = drm_sched_entity_init(&vm->direct, DRM_SCHED_PRIORITY_NORMAL,
|
||||||
adev->vm_manager.vm_pte_num_rqs, NULL);
|
sched_list, adev->vm_manager.vm_pte_num_rqs,
|
||||||
|
NULL);
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
r = drm_sched_entity_init(&vm->delayed, adev->vm_manager.vm_pte_rqs,
|
r = drm_sched_entity_init(&vm->delayed, DRM_SCHED_PRIORITY_NORMAL,
|
||||||
adev->vm_manager.vm_pte_num_rqs, NULL);
|
sched_list, adev->vm_manager.vm_pte_num_rqs,
|
||||||
|
NULL);
|
||||||
if (r)
|
if (r)
|
||||||
goto error_free_direct;
|
goto error_free_direct;
|
||||||
|
|
||||||
|
|||||||
@@ -65,12 +65,13 @@ static int etnaviv_open(struct drm_device *dev, struct drm_file *file)
|
|||||||
|
|
||||||
for (i = 0; i < ETNA_MAX_PIPES; i++) {
|
for (i = 0; i < ETNA_MAX_PIPES; i++) {
|
||||||
struct etnaviv_gpu *gpu = priv->gpu[i];
|
struct etnaviv_gpu *gpu = priv->gpu[i];
|
||||||
struct drm_sched_rq *rq;
|
struct drm_gpu_scheduler *sched;
|
||||||
|
|
||||||
if (gpu) {
|
if (gpu) {
|
||||||
rq = &gpu->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
|
sched = &gpu->sched;
|
||||||
drm_sched_entity_init(&ctx->sched_entity[i],
|
drm_sched_entity_init(&ctx->sched_entity[i],
|
||||||
&rq, 1, NULL);
|
DRM_SCHED_PRIORITY_NORMAL, &sched,
|
||||||
|
1, NULL);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -159,9 +159,10 @@ int lima_sched_context_init(struct lima_sched_pipe *pipe,
|
|||||||
struct lima_sched_context *context,
|
struct lima_sched_context *context,
|
||||||
atomic_t *guilty)
|
atomic_t *guilty)
|
||||||
{
|
{
|
||||||
struct drm_sched_rq *rq = pipe->base.sched_rq + DRM_SCHED_PRIORITY_NORMAL;
|
struct drm_gpu_scheduler *sched = &pipe->base;
|
||||||
|
|
||||||
return drm_sched_entity_init(&context->base, &rq, 1, guilty);
|
return drm_sched_entity_init(&context->base, DRM_SCHED_PRIORITY_NORMAL,
|
||||||
|
&sched, 1, guilty);
|
||||||
}
|
}
|
||||||
|
|
||||||
void lima_sched_context_fini(struct lima_sched_pipe *pipe,
|
void lima_sched_context_fini(struct lima_sched_pipe *pipe,
|
||||||
|
|||||||
@@ -542,12 +542,14 @@ int panfrost_job_open(struct panfrost_file_priv *panfrost_priv)
|
|||||||
{
|
{
|
||||||
struct panfrost_device *pfdev = panfrost_priv->pfdev;
|
struct panfrost_device *pfdev = panfrost_priv->pfdev;
|
||||||
struct panfrost_job_slot *js = pfdev->js;
|
struct panfrost_job_slot *js = pfdev->js;
|
||||||
struct drm_sched_rq *rq;
|
struct drm_gpu_scheduler *sched;
|
||||||
int ret, i;
|
int ret, i;
|
||||||
|
|
||||||
for (i = 0; i < NUM_JOB_SLOTS; i++) {
|
for (i = 0; i < NUM_JOB_SLOTS; i++) {
|
||||||
rq = &js->queue[i].sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
|
sched = &js->queue[i].sched;
|
||||||
ret = drm_sched_entity_init(&panfrost_priv->sched_entity[i], &rq, 1, NULL);
|
ret = drm_sched_entity_init(&panfrost_priv->sched_entity[i],
|
||||||
|
DRM_SCHED_PRIORITY_NORMAL, &sched,
|
||||||
|
1, NULL);
|
||||||
if (WARN_ON(ret))
|
if (WARN_ON(ret))
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -38,9 +38,10 @@
|
|||||||
* submit to HW ring.
|
* submit to HW ring.
|
||||||
*
|
*
|
||||||
* @entity: scheduler entity to init
|
* @entity: scheduler entity to init
|
||||||
* @rq_list: the list of run queue on which jobs from this
|
* @priority: priority of the entity
|
||||||
|
* @sched_list: the list of drm scheds on which jobs from this
|
||||||
* entity can be submitted
|
* entity can be submitted
|
||||||
* @num_rq_list: number of run queue in rq_list
|
* @num_sched_list: number of drm sched in sched_list
|
||||||
* @guilty: atomic_t set to 1 when a job on this queue
|
* @guilty: atomic_t set to 1 when a job on this queue
|
||||||
* is found to be guilty causing a timeout
|
* is found to be guilty causing a timeout
|
||||||
*
|
*
|
||||||
@@ -50,32 +51,35 @@
|
|||||||
* Returns 0 on success or a negative error code on failure.
|
* Returns 0 on success or a negative error code on failure.
|
||||||
*/
|
*/
|
||||||
int drm_sched_entity_init(struct drm_sched_entity *entity,
|
int drm_sched_entity_init(struct drm_sched_entity *entity,
|
||||||
struct drm_sched_rq **rq_list,
|
enum drm_sched_priority priority,
|
||||||
unsigned int num_rq_list,
|
struct drm_gpu_scheduler **sched_list,
|
||||||
|
unsigned int num_sched_list,
|
||||||
atomic_t *guilty)
|
atomic_t *guilty)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
if (!(entity && rq_list && (num_rq_list == 0 || rq_list[0])))
|
if (!(entity && sched_list && (num_sched_list == 0 || sched_list[0])))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
memset(entity, 0, sizeof(struct drm_sched_entity));
|
memset(entity, 0, sizeof(struct drm_sched_entity));
|
||||||
INIT_LIST_HEAD(&entity->list);
|
INIT_LIST_HEAD(&entity->list);
|
||||||
entity->rq = NULL;
|
entity->rq = NULL;
|
||||||
entity->guilty = guilty;
|
entity->guilty = guilty;
|
||||||
entity->num_rq_list = num_rq_list;
|
entity->num_sched_list = num_sched_list;
|
||||||
entity->rq_list = kcalloc(num_rq_list, sizeof(struct drm_sched_rq *),
|
entity->priority = priority;
|
||||||
GFP_KERNEL);
|
entity->sched_list = kcalloc(num_sched_list,
|
||||||
if (!entity->rq_list)
|
sizeof(struct drm_gpu_scheduler *), GFP_KERNEL);
|
||||||
|
|
||||||
|
if(!entity->sched_list)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
init_completion(&entity->entity_idle);
|
init_completion(&entity->entity_idle);
|
||||||
|
|
||||||
for (i = 0; i < num_rq_list; ++i)
|
for (i = 0; i < num_sched_list; i++)
|
||||||
entity->rq_list[i] = rq_list[i];
|
entity->sched_list[i] = sched_list[i];
|
||||||
|
|
||||||
if (num_rq_list)
|
if (num_sched_list)
|
||||||
entity->rq = rq_list[0];
|
entity->rq = &entity->sched_list[0]->sched_rq[entity->priority];
|
||||||
|
|
||||||
entity->last_scheduled = NULL;
|
entity->last_scheduled = NULL;
|
||||||
|
|
||||||
@@ -139,10 +143,10 @@ drm_sched_entity_get_free_sched(struct drm_sched_entity *entity)
|
|||||||
unsigned int min_jobs = UINT_MAX, num_jobs;
|
unsigned int min_jobs = UINT_MAX, num_jobs;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
for (i = 0; i < entity->num_rq_list; ++i) {
|
for (i = 0; i < entity->num_sched_list; ++i) {
|
||||||
struct drm_gpu_scheduler *sched = entity->rq_list[i]->sched;
|
struct drm_gpu_scheduler *sched = entity->sched_list[i];
|
||||||
|
|
||||||
if (!entity->rq_list[i]->sched->ready) {
|
if (!entity->sched_list[i]->ready) {
|
||||||
DRM_WARN("sched%s is not ready, skipping", sched->name);
|
DRM_WARN("sched%s is not ready, skipping", sched->name);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
@@ -150,7 +154,7 @@ drm_sched_entity_get_free_sched(struct drm_sched_entity *entity)
|
|||||||
num_jobs = atomic_read(&sched->num_jobs);
|
num_jobs = atomic_read(&sched->num_jobs);
|
||||||
if (num_jobs < min_jobs) {
|
if (num_jobs < min_jobs) {
|
||||||
min_jobs = num_jobs;
|
min_jobs = num_jobs;
|
||||||
rq = entity->rq_list[i];
|
rq = &entity->sched_list[i]->sched_rq[entity->priority];
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -308,7 +312,7 @@ void drm_sched_entity_fini(struct drm_sched_entity *entity)
|
|||||||
|
|
||||||
dma_fence_put(entity->last_scheduled);
|
dma_fence_put(entity->last_scheduled);
|
||||||
entity->last_scheduled = NULL;
|
entity->last_scheduled = NULL;
|
||||||
kfree(entity->rq_list);
|
kfree(entity->sched_list);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(drm_sched_entity_fini);
|
EXPORT_SYMBOL(drm_sched_entity_fini);
|
||||||
|
|
||||||
@@ -353,15 +357,6 @@ static void drm_sched_entity_wakeup(struct dma_fence *f,
|
|||||||
drm_sched_wakeup(entity->rq->sched);
|
drm_sched_wakeup(entity->rq->sched);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* drm_sched_entity_set_rq_priority - helper for drm_sched_entity_set_priority
|
|
||||||
*/
|
|
||||||
static void drm_sched_entity_set_rq_priority(struct drm_sched_rq **rq,
|
|
||||||
enum drm_sched_priority priority)
|
|
||||||
{
|
|
||||||
*rq = &(*rq)->sched->sched_rq[priority];
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* drm_sched_entity_set_priority - Sets priority of the entity
|
* drm_sched_entity_set_priority - Sets priority of the entity
|
||||||
*
|
*
|
||||||
@@ -373,19 +368,8 @@ static void drm_sched_entity_set_rq_priority(struct drm_sched_rq **rq,
|
|||||||
void drm_sched_entity_set_priority(struct drm_sched_entity *entity,
|
void drm_sched_entity_set_priority(struct drm_sched_entity *entity,
|
||||||
enum drm_sched_priority priority)
|
enum drm_sched_priority priority)
|
||||||
{
|
{
|
||||||
unsigned int i;
|
|
||||||
|
|
||||||
spin_lock(&entity->rq_lock);
|
spin_lock(&entity->rq_lock);
|
||||||
|
entity->priority = priority;
|
||||||
for (i = 0; i < entity->num_rq_list; ++i)
|
|
||||||
drm_sched_entity_set_rq_priority(&entity->rq_list[i], priority);
|
|
||||||
|
|
||||||
if (entity->rq) {
|
|
||||||
drm_sched_rq_remove_entity(entity->rq, entity);
|
|
||||||
drm_sched_entity_set_rq_priority(&entity->rq, priority);
|
|
||||||
drm_sched_rq_add_entity(entity->rq, entity);
|
|
||||||
}
|
|
||||||
|
|
||||||
spin_unlock(&entity->rq_lock);
|
spin_unlock(&entity->rq_lock);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(drm_sched_entity_set_priority);
|
EXPORT_SYMBOL(drm_sched_entity_set_priority);
|
||||||
@@ -490,20 +474,20 @@ void drm_sched_entity_select_rq(struct drm_sched_entity *entity)
|
|||||||
struct dma_fence *fence;
|
struct dma_fence *fence;
|
||||||
struct drm_sched_rq *rq;
|
struct drm_sched_rq *rq;
|
||||||
|
|
||||||
if (spsc_queue_count(&entity->job_queue) || entity->num_rq_list <= 1)
|
if (spsc_queue_count(&entity->job_queue) || entity->num_sched_list <= 1)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
fence = READ_ONCE(entity->last_scheduled);
|
fence = READ_ONCE(entity->last_scheduled);
|
||||||
if (fence && !dma_fence_is_signaled(fence))
|
if (fence && !dma_fence_is_signaled(fence))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
rq = drm_sched_entity_get_free_sched(entity);
|
|
||||||
if (rq == entity->rq)
|
|
||||||
return;
|
|
||||||
|
|
||||||
spin_lock(&entity->rq_lock);
|
spin_lock(&entity->rq_lock);
|
||||||
drm_sched_rq_remove_entity(entity->rq, entity);
|
rq = drm_sched_entity_get_free_sched(entity);
|
||||||
entity->rq = rq;
|
if (rq != entity->rq) {
|
||||||
|
drm_sched_rq_remove_entity(entity->rq, entity);
|
||||||
|
entity->rq = rq;
|
||||||
|
}
|
||||||
|
|
||||||
spin_unlock(&entity->rq_lock);
|
spin_unlock(&entity->rq_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -140,7 +140,7 @@ v3d_open(struct drm_device *dev, struct drm_file *file)
|
|||||||
{
|
{
|
||||||
struct v3d_dev *v3d = to_v3d_dev(dev);
|
struct v3d_dev *v3d = to_v3d_dev(dev);
|
||||||
struct v3d_file_priv *v3d_priv;
|
struct v3d_file_priv *v3d_priv;
|
||||||
struct drm_sched_rq *rq;
|
struct drm_gpu_scheduler *sched;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
v3d_priv = kzalloc(sizeof(*v3d_priv), GFP_KERNEL);
|
v3d_priv = kzalloc(sizeof(*v3d_priv), GFP_KERNEL);
|
||||||
@@ -150,8 +150,10 @@ v3d_open(struct drm_device *dev, struct drm_file *file)
|
|||||||
v3d_priv->v3d = v3d;
|
v3d_priv->v3d = v3d;
|
||||||
|
|
||||||
for (i = 0; i < V3D_MAX_QUEUES; i++) {
|
for (i = 0; i < V3D_MAX_QUEUES; i++) {
|
||||||
rq = &v3d->queue[i].sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
|
sched = &v3d->queue[i].sched;
|
||||||
drm_sched_entity_init(&v3d_priv->sched_entity[i], &rq, 1, NULL);
|
drm_sched_entity_init(&v3d_priv->sched_entity[i],
|
||||||
|
DRM_SCHED_PRIORITY_NORMAL, &sched,
|
||||||
|
1, NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
file->driver_priv = v3d_priv;
|
file->driver_priv = v3d_priv;
|
||||||
|
|||||||
@@ -81,8 +81,9 @@ enum drm_sched_priority {
|
|||||||
struct drm_sched_entity {
|
struct drm_sched_entity {
|
||||||
struct list_head list;
|
struct list_head list;
|
||||||
struct drm_sched_rq *rq;
|
struct drm_sched_rq *rq;
|
||||||
struct drm_sched_rq **rq_list;
|
unsigned int num_sched_list;
|
||||||
unsigned int num_rq_list;
|
struct drm_gpu_scheduler **sched_list;
|
||||||
|
enum drm_sched_priority priority;
|
||||||
spinlock_t rq_lock;
|
spinlock_t rq_lock;
|
||||||
|
|
||||||
struct spsc_queue job_queue;
|
struct spsc_queue job_queue;
|
||||||
@@ -312,7 +313,8 @@ void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
|
|||||||
struct drm_sched_entity *entity);
|
struct drm_sched_entity *entity);
|
||||||
|
|
||||||
int drm_sched_entity_init(struct drm_sched_entity *entity,
|
int drm_sched_entity_init(struct drm_sched_entity *entity,
|
||||||
struct drm_sched_rq **rq_list,
|
enum drm_sched_priority priority,
|
||||||
|
struct drm_gpu_scheduler **sched_list,
|
||||||
unsigned int num_rq_list,
|
unsigned int num_rq_list,
|
||||||
atomic_t *guilty);
|
atomic_t *guilty);
|
||||||
long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout);
|
long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout);
|
||||||
|
|||||||
Reference in New Issue
Block a user