- Xe_exec ioctl minor fix on sync entry cleanup upon error (Ashutosh)

- SRIOV: limit VF LMEM provisioning (Michal)
 - Wedge mode fixes (Brost)
 -----BEGIN PGP SIGNATURE-----
 
 iQEzBAABCAAdFiEEbSBwaO7dZQkcLOKj+mJfZA7rE8oFAmaZOW8ACgkQ+mJfZA7r
 E8qN/Af+OftrVNA+eH5VvJcIdgjqSuNdAzIP7xC9yZAM43we1YnBzRoU2Z9vsw9D
 t61fCUWsqzNw+/peep1CpFnlk2cDqyV9vUfJQd2mKreoWIvQ4J2+Cv+qLGZ3xazh
 hxYznXN9c5FXB9ODPvcGUJfWyvwBdJWyALZB8kwELtK7i6RkFQupjsWxyhPvhjMu
 NNmIn8Iiducb6UjKDeYEILQdZHxBLgf6bbqQfGj548S7dZijZd//ap0uXoA/Zetn
 xpNYkjoo1CLgrAiWc4IcETMCKwK7pF0/Y0WvRdpAzpzfiuhlymsHpQ9Q2gCPm58S
 vQdSc/qIPX4/bXKME3VtdtBRjufdFg==
 =VxZY
 -----END PGP SIGNATURE-----

Merge tag 'drm-xe-next-fixes-2024-07-18' of https://gitlab.freedesktop.org/drm/xe/kernel into drm-next

- Xe_exec ioctl minor fix on sync entry cleanup upon error (Ashutosh)
- SRIOV: limit VF LMEM provisioning (Michal)
- Wedge mode fixes (Brost)

Signed-off-by: Dave Airlie <airlied@redhat.com>

From: Rodrigo Vivi <rodrigo.vivi@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/Zpk6CI0FDoTJwkSb@intel.com
This commit is contained in:
Dave Airlie 2024-07-22 11:51:48 +10:00
commit 78e6e468e1
11 changed files with 107 additions and 25 deletions

View File

@ -854,6 +854,13 @@ u64 xe_device_uncanonicalize_addr(struct xe_device *xe, u64 address)
return address & GENMASK_ULL(xe->info.va_bits - 1, 0);
}
static void xe_device_wedged_fini(struct drm_device *drm, void *arg)
{
struct xe_device *xe = arg;
xe_pm_runtime_put(xe);
}
/**
* xe_device_declare_wedged - Declare device wedged
* @xe: xe device instance
@ -870,11 +877,21 @@ u64 xe_device_uncanonicalize_addr(struct xe_device *xe, u64 address)
*/
void xe_device_declare_wedged(struct xe_device *xe)
{
struct xe_gt *gt;
u8 id;
if (xe->wedged.mode == 0) {
drm_dbg(&xe->drm, "Wedged mode is forcibly disabled\n");
return;
}
if (drmm_add_action_or_reset(&xe->drm, xe_device_wedged_fini, xe)) {
drm_err(&xe->drm, "Failed to register xe_device_wedged_fini clean-up. Although device is wedged.\n");
return;
}
xe_pm_runtime_get_noresume(xe);
if (!atomic_xchg(&xe->wedged.flag, 1)) {
xe->needs_flr_on_fini = true;
drm_err(&xe->drm,
@ -883,4 +900,7 @@ void xe_device_declare_wedged(struct xe_device *xe)
"Please file a _new_ bug report at https://gitlab.freedesktop.org/drm/xe/kernel/issues/new\n",
dev_name(xe->drm.dev));
}
for_each_gt(gt, xe, id)
xe_gt_declare_wedged(gt);
}

View File

@ -118,7 +118,7 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
u64 addresses[XE_HW_ENGINE_MAX_INSTANCE];
struct drm_gpuvm_exec vm_exec = {.extra.fn = xe_exec_fn};
struct drm_exec *exec = &vm_exec.exec;
u32 i, num_syncs = 0, num_ufence = 0;
u32 i, num_syncs, num_ufence = 0;
struct xe_sched_job *job;
struct xe_vm *vm;
bool write_locked, skip_retry = false;
@ -156,15 +156,15 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
vm = q->vm;
for (i = 0; i < args->num_syncs; i++) {
err = xe_sync_entry_parse(xe, xef, &syncs[num_syncs++],
&syncs_user[i], SYNC_PARSE_FLAG_EXEC |
for (num_syncs = 0; num_syncs < args->num_syncs; num_syncs++) {
err = xe_sync_entry_parse(xe, xef, &syncs[num_syncs],
&syncs_user[num_syncs], SYNC_PARSE_FLAG_EXEC |
(xe_vm_in_lr_mode(vm) ?
SYNC_PARSE_FLAG_LR_MODE : 0));
if (err)
goto err_syncs;
if (xe_sync_is_ufence(&syncs[i]))
if (xe_sync_is_ufence(&syncs[num_syncs]))
num_ufence++;
}
@ -325,8 +325,8 @@ err_unlock_list:
if (err == -EAGAIN && !skip_retry)
goto retry;
err_syncs:
for (i = 0; i < num_syncs; i++)
xe_sync_entry_cleanup(&syncs[i]);
while (num_syncs--)
xe_sync_entry_cleanup(&syncs[num_syncs]);
kfree(syncs);
err_exec_queue:
xe_exec_queue_put(q);

View File

@ -904,3 +904,18 @@ struct xe_hw_engine *xe_gt_any_hw_engine(struct xe_gt *gt)
return NULL;
}
/**
* xe_gt_declare_wedged() - Declare GT wedged
* @gt: the GT object
*
* Wedge the GT which stops all submission, saves desired debug state, and
* cleans up anything which could timeout.
*/
void xe_gt_declare_wedged(struct xe_gt *gt)
{
xe_gt_assert(gt, gt_to_xe(gt)->wedged.mode);
xe_uc_declare_wedged(&gt->uc);
xe_gt_tlb_invalidation_reset(gt);
}

View File

@ -37,6 +37,7 @@ struct xe_gt *xe_gt_alloc(struct xe_tile *tile);
int xe_gt_init_hwconfig(struct xe_gt *gt);
int xe_gt_init_early(struct xe_gt *gt);
int xe_gt_init(struct xe_gt *gt);
void xe_gt_declare_wedged(struct xe_gt *gt);
int xe_gt_record_default_lrcs(struct xe_gt *gt);
/**

View File

@ -1543,6 +1543,7 @@ static u64 pf_estimate_fair_lmem(struct xe_gt *gt, unsigned int num_vfs)
u64 fair;
fair = div_u64(available, num_vfs);
fair = rounddown_pow_of_two(fair); /* XXX: ttm_vram_mgr & drm_buddy limitation */
fair = ALIGN_DOWN(fair, alignment);
#ifdef MAX_FAIR_LMEM
fair = min_t(u64, MAX_FAIR_LMEM, fair);

View File

@ -1178,3 +1178,19 @@ void xe_guc_print_info(struct xe_guc *guc, struct drm_printer *p)
xe_guc_ct_print(&guc->ct, p, false);
xe_guc_submit_print(guc, p);
}
/**
* xe_guc_declare_wedged() - Declare GuC wedged
* @guc: the GuC object
*
* Wedge the GuC which stops all submission, saves desired debug state, and
* cleans up anything which could timeout.
*/
void xe_guc_declare_wedged(struct xe_guc *guc)
{
xe_gt_assert(guc_to_gt(guc), guc_to_xe(guc)->wedged.mode);
xe_guc_reset_prepare(guc);
xe_guc_ct_stop(&guc->ct);
xe_guc_submit_wedge(guc);
}

View File

@ -37,6 +37,7 @@ void xe_guc_reset_wait(struct xe_guc *guc);
void xe_guc_stop_prepare(struct xe_guc *guc);
void xe_guc_stop(struct xe_guc *guc);
int xe_guc_start(struct xe_guc *guc);
void xe_guc_declare_wedged(struct xe_guc *guc);
static inline u16 xe_engine_class_to_guc_class(enum xe_engine_class class)
{

View File

@ -861,13 +861,40 @@ static void xe_guc_exec_queue_trigger_cleanup(struct xe_exec_queue *q)
xe_sched_tdr_queue_imm(&q->guc->sched);
}
static bool guc_submit_hint_wedged(struct xe_guc *guc)
/**
* xe_guc_submit_wedge() - Wedge GuC submission
* @guc: the GuC object
*
* Save exec queue's registered with GuC state by taking a ref to each queue.
* Register a DRMM handler to drop refs upon driver unload.
*/
void xe_guc_submit_wedge(struct xe_guc *guc)
{
struct xe_device *xe = guc_to_xe(guc);
struct xe_exec_queue *q;
unsigned long index;
int err;
xe_gt_assert(guc_to_gt(guc), guc_to_xe(guc)->wedged.mode);
err = drmm_add_action_or_reset(&guc_to_xe(guc)->drm,
guc_submit_wedged_fini, guc);
if (err) {
drm_err(&xe->drm, "Failed to register xe_guc_submit clean-up on wedged.mode=2. Although device is wedged.\n");
return;
}
mutex_lock(&guc->submission_state.lock);
xa_for_each(&guc->submission_state.exec_queue_lookup, index, q)
if (xe_exec_queue_get_unless_zero(q))
set_exec_queue_wedged(q);
mutex_unlock(&guc->submission_state.lock);
}
static bool guc_submit_hint_wedged(struct xe_guc *guc)
{
struct xe_device *xe = guc_to_xe(guc);
if (xe->wedged.mode != 2)
return false;
@ -876,22 +903,6 @@ static bool guc_submit_hint_wedged(struct xe_guc *guc)
xe_device_declare_wedged(xe);
xe_guc_submit_reset_prepare(guc);
xe_guc_ct_stop(&guc->ct);
err = drmm_add_action_or_reset(&guc_to_xe(guc)->drm,
guc_submit_wedged_fini, guc);
if (err) {
drm_err(&xe->drm, "Failed to register xe_guc_submit clean-up on wedged.mode=2. Although device is wedged.\n");
return true; /* Device is wedged anyway */
}
mutex_lock(&guc->submission_state.lock);
xa_for_each(&guc->submission_state.exec_queue_lookup, index, q)
if (xe_exec_queue_get_unless_zero(q))
set_exec_queue_wedged(q);
mutex_unlock(&guc->submission_state.lock);
return true;
}
@ -1677,7 +1688,8 @@ int xe_guc_submit_reset_prepare(struct xe_guc *guc)
void xe_guc_submit_reset_wait(struct xe_guc *guc)
{
wait_event(guc->ct.wq, !guc_read_stopped(guc));
wait_event(guc->ct.wq, xe_device_wedged(guc_to_xe(guc)) ||
!guc_read_stopped(guc));
}
void xe_guc_submit_stop(struct xe_guc *guc)

View File

@ -18,6 +18,7 @@ int xe_guc_submit_reset_prepare(struct xe_guc *guc);
void xe_guc_submit_reset_wait(struct xe_guc *guc);
void xe_guc_submit_stop(struct xe_guc *guc);
int xe_guc_submit_start(struct xe_guc *guc);
void xe_guc_submit_wedge(struct xe_guc *guc);
int xe_guc_sched_done_handler(struct xe_guc *guc, u32 *msg, u32 len);
int xe_guc_deregister_done_handler(struct xe_guc *guc, u32 *msg, u32 len);

View File

@ -300,3 +300,17 @@ void xe_uc_remove(struct xe_uc *uc)
{
xe_gsc_remove(&uc->gsc);
}
/**
* xe_uc_declare_wedged() - Declare UC wedged
* @uc: the UC object
*
* Wedge the UC which stops all submission, saves desired debug state, and
* cleans up anything which could timeout.
*/
void xe_uc_declare_wedged(struct xe_uc *uc)
{
xe_gt_assert(uc_to_gt(uc), uc_to_xe(uc)->wedged.mode);
xe_guc_declare_wedged(&uc->guc);
}

View File

@ -21,5 +21,6 @@ int xe_uc_start(struct xe_uc *uc);
int xe_uc_suspend(struct xe_uc *uc);
int xe_uc_sanitize_reset(struct xe_uc *uc);
void xe_uc_remove(struct xe_uc *uc);
void xe_uc_declare_wedged(struct xe_uc *uc);
#endif