drm/xe: Add helper to accumulate exec queue runtime

Add a helper to accumulate per-client runtime of all its
exec queues. This is called every time a sched job is finished.

v2:
  - Use guc_exec_queue_free_job() and execlist_job_free() to accumulate
    runtime when job is finished since xe_sched_job_completed() is not a
    notification that job finished.
  - Stop trying to update runtime from xe_exec_queue_fini() - that is
    redundant and may happen after xef is closed, leading to a
    use-after-free
  - Do not special case the first timestamp read: the default LRC sets
    CTX_TIMESTAMP to zero, so even the first sample should be a valid
    one.
  - Handle the parallel submission case by multiplying the runtime by
    width.
v3: Update comments

Signed-off-by: Umesh Nerlige Ramappa <umesh.nerlige.ramappa@intel.com>
Reviewed-by: Matt Roper <matthew.d.roper@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20240517204310.88854-6-lucas.demarchi@intel.com
Signed-off-by: Lucas De Marchi <lucas.demarchi@intel.com>
This commit is contained in:
Umesh Nerlige Ramappa 2024-05-17 13:43:07 -07:00 committed by Lucas De Marchi
parent f2f6b667c6
commit 6109f24f87
5 changed files with 44 additions and 0 deletions

View File

@ -559,6 +559,9 @@ struct xe_file {
struct mutex lock;
} exec_queue;
/** @runtime: hw engine class runtime in ticks for this drm client */
u64 runtime[XE_ENGINE_CLASS_MAX];
/** @client: drm client */
struct xe_drm_client *client;
};

View File

@ -769,6 +769,43 @@ bool xe_exec_queue_is_idle(struct xe_exec_queue *q)
q->lrc[0].fence_ctx.next_seqno - 1;
}
/**
* xe_exec_queue_update_runtime() - Update runtime for this exec queue from hw
* @q: The exec queue
*
* Update the timestamp saved by HW for this exec queue and save runtime
* calculated by using the delta from last update. On multi-lrc case, only the
* first is considered.
*/
void xe_exec_queue_update_runtime(struct xe_exec_queue *q)
{
struct xe_file *xef;
struct xe_lrc *lrc;
u32 old_ts, new_ts;
/*
* Jobs that are run during driver load may use an exec_queue, but are
* not associated with a user xe file, so avoid accumulating busyness
* for kernel specific work.
*/
if (!q->vm || !q->vm->xef)
return;
xef = q->vm->xef;
/*
* Only sample the first LRC. For parallel submission, all of them are
* scheduled together and we compensate that below by multiplying by
* width - this may introduce errors if that premise is not true and
* they don't exit 100% aligned. On the other hand, looping through
* the LRCs and reading them in different time could also introduce
* errors.
*/
lrc = &q->lrc[0];
new_ts = xe_lrc_update_timestamp(lrc, &old_ts);
xef->runtime[q->class] += (new_ts - old_ts) * q->width;
}
void xe_exec_queue_kill(struct xe_exec_queue *q)
{
struct xe_exec_queue *eq = q, *next;

View File

@ -75,5 +75,6 @@ struct dma_fence *xe_exec_queue_last_fence_get(struct xe_exec_queue *e,
struct xe_vm *vm);
void xe_exec_queue_last_fence_set(struct xe_exec_queue *e, struct xe_vm *vm,
struct dma_fence *fence);
void xe_exec_queue_update_runtime(struct xe_exec_queue *q);
#endif

View File

@ -306,6 +306,7 @@ static void execlist_job_free(struct drm_sched_job *drm_job)
{
struct xe_sched_job *job = to_xe_sched_job(drm_job);
xe_exec_queue_update_runtime(job->q);
xe_sched_job_put(job);
}

View File

@ -749,6 +749,8 @@ static void guc_exec_queue_free_job(struct drm_sched_job *drm_job)
{
struct xe_sched_job *job = to_xe_sched_job(drm_job);
xe_exec_queue_update_runtime(job->q);
trace_xe_sched_job_free(job);
xe_sched_job_put(job);
}