mirror of
https://github.com/torvalds/linux.git
synced 2024-12-29 14:21:47 +00:00
drm/sched: drop entity parameter from drm_sched_push_job
Originally a job was only bound to the queue when we pushed this, but now that's done in drm_sched_job_init, making that parameter entirely redundant. Remove it. The same applies to the context parameter in lima_sched_context_queue_task, simplify that too. v2: Rebase on top of msm adopting drm/sched Reviewed-by: Christian König <christian.koenig@amd.com> Acked-by: Emma Anholt <emma@anholt.net> Acked-by: Melissa Wen <mwen@igalia.com> Reviewed-by: Steven Price <steven.price@arm.com> (v1) Reviewed-by: Boris Brezillon <boris.brezillon@collabora.com> (v1) Signed-off-by: Daniel Vetter <daniel.vetter@intel.com> Cc: Lucas Stach <l.stach@pengutronix.de> Cc: Russell King <linux+etnaviv@armlinux.org.uk> Cc: Christian Gmeiner <christian.gmeiner@gmail.com> Cc: Qiang Yu <yuq825@gmail.com> Cc: Rob Herring <robh@kernel.org> Cc: Tomeu Vizoso <tomeu.vizoso@collabora.com> Cc: Steven Price <steven.price@arm.com> Cc: Alyssa Rosenzweig <alyssa.rosenzweig@collabora.com> Cc: Emma Anholt <emma@anholt.net> Cc: David Airlie <airlied@linux.ie> Cc: Daniel Vetter <daniel@ffwll.ch> Cc: Sumit Semwal <sumit.semwal@linaro.org> Cc: "Christian König" <christian.koenig@amd.com> Cc: Alex Deucher <alexander.deucher@amd.com> Cc: Nirmoy Das <nirmoy.das@amd.com> Cc: Dave Airlie <airlied@redhat.com> Cc: Chen Li <chenli@uniontech.com> Cc: Lee Jones <lee.jones@linaro.org> Cc: Deepak R Varma <mh12gx2825@gmail.com> Cc: Kevin Wang <kevin1.wang@amd.com> Cc: Luben Tuikov <luben.tuikov@amd.com> Cc: "Marek Olšák" <marek.olsak@amd.com> Cc: Maarten Lankhorst <maarten.lankhorst@linux.intel.com> Cc: Andrey Grodzovsky <andrey.grodzovsky@amd.com> Cc: Dennis Li <Dennis.Li@amd.com> Cc: Boris Brezillon <boris.brezillon@collabora.com> Cc: etnaviv@lists.freedesktop.org Cc: lima@lists.freedesktop.org Cc: linux-media@vger.kernel.org Cc: linaro-mm-sig@lists.linaro.org Cc: Rob Clark <robdclark@gmail.com> Cc: Sean Paul <sean@poorly.run> Cc: Melissa Wen <mwen@igalia.com> Cc: linux-arm-msm@vger.kernel.org Cc: freedreno@lists.freedesktop.org Link: https://patchwork.freedesktop.org/patch/msgid/20210805104705.862416-6-daniel.vetter@ffwll.ch
This commit is contained in:
parent
ebd5f74255
commit
0e10e9a1db
@ -1261,7 +1261,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
|
||||
|
||||
trace_amdgpu_cs_ioctl(job);
|
||||
amdgpu_vm_bo_trace_cs(&fpriv->vm, &p->ticket);
|
||||
drm_sched_entity_push_job(&job->base, entity);
|
||||
drm_sched_entity_push_job(&job->base);
|
||||
|
||||
amdgpu_vm_move_to_lru_tail(p->adev, &fpriv->vm);
|
||||
|
||||
|
@ -174,7 +174,7 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
|
||||
|
||||
*f = dma_fence_get(&job->base.s_fence->finished);
|
||||
amdgpu_job_free_resources(job);
|
||||
drm_sched_entity_push_job(&job->base, entity);
|
||||
drm_sched_entity_push_job(&job->base);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -178,7 +178,7 @@ int etnaviv_sched_push_job(struct drm_sched_entity *sched_entity,
|
||||
/* the scheduler holds on to the job now */
|
||||
kref_get(&submit->refcount);
|
||||
|
||||
drm_sched_entity_push_job(&submit->sched_job, sched_entity);
|
||||
drm_sched_entity_push_job(&submit->sched_job);
|
||||
|
||||
out_unlock:
|
||||
mutex_unlock(&submit->gpu->fence_lock);
|
||||
|
@ -359,8 +359,7 @@ int lima_gem_submit(struct drm_file *file, struct lima_submit *submit)
|
||||
goto err_out2;
|
||||
}
|
||||
|
||||
fence = lima_sched_context_queue_task(
|
||||
submit->ctx->context + submit->pipe, submit->task);
|
||||
fence = lima_sched_context_queue_task(submit->task);
|
||||
|
||||
for (i = 0; i < submit->nr_bos; i++) {
|
||||
if (submit->bos[i].flags & LIMA_SUBMIT_BO_WRITE)
|
||||
|
@ -177,13 +177,12 @@ void lima_sched_context_fini(struct lima_sched_pipe *pipe,
|
||||
drm_sched_entity_fini(&context->base);
|
||||
}
|
||||
|
||||
struct dma_fence *lima_sched_context_queue_task(struct lima_sched_context *context,
|
||||
struct lima_sched_task *task)
|
||||
struct dma_fence *lima_sched_context_queue_task(struct lima_sched_task *task)
|
||||
{
|
||||
struct dma_fence *fence = dma_fence_get(&task->base.s_fence->finished);
|
||||
|
||||
trace_lima_task_submit(task);
|
||||
drm_sched_entity_push_job(&task->base, &context->base);
|
||||
drm_sched_entity_push_job(&task->base);
|
||||
return fence;
|
||||
}
|
||||
|
||||
|
@ -98,8 +98,7 @@ int lima_sched_context_init(struct lima_sched_pipe *pipe,
|
||||
atomic_t *guilty);
|
||||
void lima_sched_context_fini(struct lima_sched_pipe *pipe,
|
||||
struct lima_sched_context *context);
|
||||
struct dma_fence *lima_sched_context_queue_task(struct lima_sched_context *context,
|
||||
struct lima_sched_task *task);
|
||||
struct dma_fence *lima_sched_context_queue_task(struct lima_sched_task *task);
|
||||
|
||||
int lima_sched_pipe_init(struct lima_sched_pipe *pipe, const char *name);
|
||||
void lima_sched_pipe_fini(struct lima_sched_pipe *pipe);
|
||||
|
@ -908,7 +908,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
|
||||
/* The scheduler owns a ref now: */
|
||||
msm_gem_submit_get(submit);
|
||||
|
||||
drm_sched_entity_push_job(&submit->base, &queue->entity);
|
||||
drm_sched_entity_push_job(&submit->base);
|
||||
|
||||
args->fence = submit->fence_id;
|
||||
|
||||
|
@ -301,7 +301,7 @@ int panfrost_job_push(struct panfrost_job *job)
|
||||
|
||||
kref_get(&job->refcount); /* put by scheduler job completion */
|
||||
|
||||
drm_sched_entity_push_job(&job->base, entity);
|
||||
drm_sched_entity_push_job(&job->base);
|
||||
|
||||
mutex_unlock(&pfdev->sched_lock);
|
||||
|
||||
|
@ -516,9 +516,7 @@ void drm_sched_entity_select_rq(struct drm_sched_entity *entity)
|
||||
|
||||
/**
|
||||
* drm_sched_entity_push_job - Submit a job to the entity's job queue
|
||||
*
|
||||
* @sched_job: job to submit
|
||||
* @entity: scheduler entity
|
||||
*
|
||||
* Note: To guarantee that the order of insertion to queue matches the job's
|
||||
* fence sequence number this function should be called with drm_sched_job_arm()
|
||||
@ -526,9 +524,9 @@ void drm_sched_entity_select_rq(struct drm_sched_entity *entity)
|
||||
*
|
||||
* Returns 0 for success, negative error code otherwise.
|
||||
*/
|
||||
void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
|
||||
struct drm_sched_entity *entity)
|
||||
void drm_sched_entity_push_job(struct drm_sched_job *sched_job)
|
||||
{
|
||||
struct drm_sched_entity *entity = sched_job->entity;
|
||||
bool first;
|
||||
|
||||
trace_drm_sched_job(sched_job, entity);
|
||||
|
@ -487,7 +487,7 @@ v3d_push_job(struct v3d_file_priv *v3d_priv,
|
||||
/* put by scheduler job completion */
|
||||
kref_get(&job->refcount);
|
||||
|
||||
drm_sched_entity_push_job(&job->base, &v3d_priv->sched_entity[queue]);
|
||||
drm_sched_entity_push_job(&job->base);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -409,8 +409,7 @@ void drm_sched_entity_fini(struct drm_sched_entity *entity);
|
||||
void drm_sched_entity_destroy(struct drm_sched_entity *entity);
|
||||
void drm_sched_entity_select_rq(struct drm_sched_entity *entity);
|
||||
struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity);
|
||||
void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
|
||||
struct drm_sched_entity *entity);
|
||||
void drm_sched_entity_push_job(struct drm_sched_job *sched_job);
|
||||
void drm_sched_entity_set_priority(struct drm_sched_entity *entity,
|
||||
enum drm_sched_priority priority);
|
||||
bool drm_sched_entity_is_ready(struct drm_sched_entity *entity);
|
||||
|
Loading…
Reference in New Issue
Block a user