mirror of
https://github.com/torvalds/linux.git
synced 2024-11-10 06:01:57 +00:00
drm/xe: Add timeout to preempt fences
To adhere to dma fencing rules that fences must signal within a reasonable amount of time, add a 5 second timeout to preempt fences. If this timeout occurs, kill the associated VM as this fatal to the VM. v2: - Add comment for smp_wmb (Checkpatch) - Fix kernel doc typo (Inspection) - Add comment for killed check (Niranjana) v3: - Drop smp_wmb (Matthew Auld) - Don't take vm->lock in preempt fence worker (Matthew Auld) - Drop RB given changes to patch v4: - Add WRITE/READ_ONCE (Niranjana) - Don't export xe_vm_kill (Niranjana) Cc: Matthew Auld <matthew.auld@intel.com> Cc: Niranjana Vishwanathapura <niranjana.vishwanathapura@intel.com> Signed-off-by: Matthew Brost <matthew.brost@intel.com> Tested-by: Stuart Summers <stuart.summers@intel.com> Reviewed-by: Niranjana Vishwanathapura <niranjana.vishwanathapura@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20240626004137.4060806-1-matthew.brost@intel.com
This commit is contained in:
parent
7c0389c615
commit
627c961d67
@ -172,9 +172,11 @@ struct xe_exec_queue_ops {
|
||||
int (*suspend)(struct xe_exec_queue *q);
|
||||
/**
|
||||
* @suspend_wait: Wait for an exec queue to suspend executing, should be
|
||||
* call after suspend.
|
||||
* call after suspend. In dma-fencing path thus must return within a
|
||||
* reasonable amount of time. -ETIME return shall indicate an error
|
||||
* waiting for suspend resulting in associated VM getting killed.
|
||||
*/
|
||||
void (*suspend_wait)(struct xe_exec_queue *q);
|
||||
int (*suspend_wait)(struct xe_exec_queue *q);
|
||||
/**
|
||||
* @resume: Resume exec queue execution, exec queue must be in a suspended
|
||||
* state and dma fence returned from most recent suspend call must be
|
||||
|
@ -422,10 +422,11 @@ static int execlist_exec_queue_suspend(struct xe_exec_queue *q)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void execlist_exec_queue_suspend_wait(struct xe_exec_queue *q)
|
||||
static int execlist_exec_queue_suspend_wait(struct xe_exec_queue *q)
|
||||
|
||||
{
|
||||
/* NIY */
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void execlist_exec_queue_resume(struct xe_exec_queue *q)
|
||||
|
@ -1301,6 +1301,15 @@ static void __guc_exec_queue_process_msg_set_sched_props(struct xe_sched_msg *ms
|
||||
kfree(msg);
|
||||
}
|
||||
|
||||
static void __suspend_fence_signal(struct xe_exec_queue *q)
|
||||
{
|
||||
if (!q->guc->suspend_pending)
|
||||
return;
|
||||
|
||||
WRITE_ONCE(q->guc->suspend_pending, false);
|
||||
wake_up(&q->guc->suspend_wait);
|
||||
}
|
||||
|
||||
static void suspend_fence_signal(struct xe_exec_queue *q)
|
||||
{
|
||||
struct xe_guc *guc = exec_queue_to_guc(q);
|
||||
@ -1310,9 +1319,7 @@ static void suspend_fence_signal(struct xe_exec_queue *q)
|
||||
guc_read_stopped(guc));
|
||||
xe_assert(xe, q->guc->suspend_pending);
|
||||
|
||||
q->guc->suspend_pending = false;
|
||||
smp_wmb();
|
||||
wake_up(&q->guc->suspend_wait);
|
||||
__suspend_fence_signal(q);
|
||||
}
|
||||
|
||||
static void __guc_exec_queue_process_msg_suspend(struct xe_sched_msg *msg)
|
||||
@ -1465,6 +1472,7 @@ static void guc_exec_queue_kill(struct xe_exec_queue *q)
|
||||
{
|
||||
trace_xe_exec_queue_kill(q);
|
||||
set_exec_queue_killed(q);
|
||||
__suspend_fence_signal(q);
|
||||
xe_guc_exec_queue_trigger_cleanup(q);
|
||||
}
|
||||
|
||||
@ -1561,12 +1569,31 @@ static int guc_exec_queue_suspend(struct xe_exec_queue *q)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void guc_exec_queue_suspend_wait(struct xe_exec_queue *q)
|
||||
static int guc_exec_queue_suspend_wait(struct xe_exec_queue *q)
|
||||
{
|
||||
struct xe_guc *guc = exec_queue_to_guc(q);
|
||||
int ret;
|
||||
|
||||
wait_event(q->guc->suspend_wait, !q->guc->suspend_pending ||
|
||||
guc_read_stopped(guc));
|
||||
/*
|
||||
* Likely don't need to check exec_queue_killed() as we clear
|
||||
* suspend_pending upon kill but to be paranoid but races in which
|
||||
* suspend_pending is set after kill also check kill here.
|
||||
*/
|
||||
ret = wait_event_timeout(q->guc->suspend_wait,
|
||||
!READ_ONCE(q->guc->suspend_pending) ||
|
||||
exec_queue_killed(q) ||
|
||||
guc_read_stopped(guc),
|
||||
HZ * 5);
|
||||
|
||||
if (!ret) {
|
||||
xe_gt_warn(guc_to_gt(guc),
|
||||
"Suspend fence, guc_id=%d, failed to respond",
|
||||
q->guc->id);
|
||||
/* XXX: Trigger GT reset? */
|
||||
return -ETIME;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void guc_exec_queue_resume(struct xe_exec_queue *q)
|
||||
|
@ -17,10 +17,16 @@ static void preempt_fence_work_func(struct work_struct *w)
|
||||
container_of(w, typeof(*pfence), preempt_work);
|
||||
struct xe_exec_queue *q = pfence->q;
|
||||
|
||||
if (pfence->error)
|
||||
if (pfence->error) {
|
||||
dma_fence_set_error(&pfence->base, pfence->error);
|
||||
else
|
||||
q->ops->suspend_wait(q);
|
||||
} else if (!q->ops->reset_status(q)) {
|
||||
int err = q->ops->suspend_wait(q);
|
||||
|
||||
if (err)
|
||||
dma_fence_set_error(&pfence->base, err);
|
||||
} else {
|
||||
dma_fence_set_error(&pfence->base, -ENOENT);
|
||||
}
|
||||
|
||||
dma_fence_signal(&pfence->base);
|
||||
/*
|
||||
|
@ -133,8 +133,10 @@ static int wait_for_existing_preempt_fences(struct xe_vm *vm)
|
||||
if (q->lr.pfence) {
|
||||
long timeout = dma_fence_wait(q->lr.pfence, false);
|
||||
|
||||
if (timeout < 0)
|
||||
/* Only -ETIME on fence indicates VM needs to be killed */
|
||||
if (timeout < 0 || q->lr.pfence->error == -ETIME)
|
||||
return -ETIME;
|
||||
|
||||
dma_fence_put(q->lr.pfence);
|
||||
q->lr.pfence = NULL;
|
||||
}
|
||||
@ -311,6 +313,14 @@ int __xe_vm_userptr_needs_repin(struct xe_vm *vm)
|
||||
|
||||
#define XE_VM_REBIND_RETRY_TIMEOUT_MS 1000
|
||||
|
||||
/*
|
||||
* xe_vm_kill() - VM Kill
|
||||
* @vm: The VM.
|
||||
* @unlocked: Flag indicates the VM's dma-resv is not held
|
||||
*
|
||||
* Kill the VM by setting banned flag indicated VM is no longer available for
|
||||
* use. If in preempt fence mode, also kill all exec queue attached to the VM.
|
||||
*/
|
||||
static void xe_vm_kill(struct xe_vm *vm, bool unlocked)
|
||||
{
|
||||
struct xe_exec_queue *q;
|
||||
|
Loading…
Reference in New Issue
Block a user