drm/xe: Hold a PM ref when GT TLB invalidations are inflight

Avoid GT TLB invalidation timeouts by holding a PM ref when
invalidations are inflight.

v2:
 - Drop PM ref before signaling fence (CI)
v3:
 - Move invalidation_fence_signal helper in tlb timeout to previous
   patch (Matthew Auld)

Fixes: dd08ebf6c3 ("drm/xe: Introduce a new DRM driver for Intel GPUs")
Cc: Rodrigo Vivi <rodrigo.vivi@intel.com>
Cc: Nirmoy Das <nirmoy.das@intel.com>
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: Nirmoy Das <nirmoy.das@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20240719172905.1527927-4-matthew.brost@intel.com
This commit is contained in:
Matthew Brost 2024-07-19 10:29:04 -07:00
parent 61ac035361
commit 0a382f9bc5
4 changed files with 29 additions and 3 deletions

View File

@ -13,6 +13,7 @@
#include "xe_guc.h"
#include "xe_guc_ct.h"
#include "xe_mmio.h"
#include "xe_pm.h"
#include "xe_sriov.h"
#include "xe_trace.h"
#include "regs/xe_guc_regs.h"
@ -41,6 +42,7 @@ __invalidation_fence_signal(struct xe_device *xe, struct xe_gt_tlb_invalidation_
bool stack = test_bit(FENCE_STACK_BIT, &fence->base.flags);
trace_xe_gt_tlb_invalidation_fence_signal(xe, fence);
xe_gt_tlb_invalidation_fence_fini(fence);
dma_fence_signal(&fence->base);
if (!stack)
dma_fence_put(&fence->base);
@ -263,8 +265,10 @@ int xe_gt_tlb_invalidation_ggtt(struct xe_gt *gt)
xe_gt_tlb_invalidation_fence_init(gt, &fence, true);
ret = xe_gt_tlb_invalidation_guc(gt, &fence);
if (ret < 0)
if (ret < 0) {
xe_gt_tlb_invalidation_fence_fini(&fence);
return ret;
}
xe_gt_tlb_invalidation_fence_wait(&fence);
} else if (xe_device_uc_enabled(xe) && !xe_device_wedged(xe)) {
@ -489,12 +493,15 @@ static const struct dma_fence_ops invalidation_fence_ops = {
* @fence: TLB invalidation fence to initialize
* @stack: fence is stack variable
*
* Initialize TLB invalidation fence for use
* Initialize TLB invalidation fence for use. xe_gt_tlb_invalidation_fence_fini
* must be called if fence is not signaled.
*/
void xe_gt_tlb_invalidation_fence_init(struct xe_gt *gt,
struct xe_gt_tlb_invalidation_fence *fence,
bool stack)
{
xe_pm_runtime_get_noresume(gt_to_xe(gt));
spin_lock_irq(&gt->tlb_invalidation.lock);
dma_fence_init(&fence->base, &invalidation_fence_ops,
&gt->tlb_invalidation.lock,
@ -505,4 +512,16 @@ void xe_gt_tlb_invalidation_fence_init(struct xe_gt *gt,
set_bit(FENCE_STACK_BIT, &fence->base.flags);
else
dma_fence_get(&fence->base);
fence->gt = gt;
}
/**
* xe_gt_tlb_invalidation_fence_fini - Finalize TLB invalidation fence
* @fence: TLB invalidation fence to finalize
*
* Drop PM ref which fence took durinig init.
*/
void xe_gt_tlb_invalidation_fence_fini(struct xe_gt_tlb_invalidation_fence *fence)
{
xe_pm_runtime_put(gt_to_xe(fence->gt));
}

View File

@ -28,6 +28,7 @@ int xe_guc_tlb_invalidation_done_handler(struct xe_guc *guc, u32 *msg, u32 len);
void xe_gt_tlb_invalidation_fence_init(struct xe_gt *gt,
struct xe_gt_tlb_invalidation_fence *fence,
bool stack);
void xe_gt_tlb_invalidation_fence_fini(struct xe_gt_tlb_invalidation_fence *fence);
static inline void
xe_gt_tlb_invalidation_fence_wait(struct xe_gt_tlb_invalidation_fence *fence)

View File

@ -8,6 +8,8 @@
#include <linux/dma-fence.h>
struct xe_gt;
/**
* struct xe_gt_tlb_invalidation_fence - XE GT TLB invalidation fence
*
@ -17,6 +19,8 @@
struct xe_gt_tlb_invalidation_fence {
/** @base: dma fence base */
struct dma_fence base;
/** @gt: GT which fence belong to */
struct xe_gt *gt;
/** @link: link into list of pending tlb fences */
struct list_head link;
/** @seqno: seqno of TLB invalidation to signal fence one */

View File

@ -3218,8 +3218,10 @@ int xe_vm_invalidate_vma(struct xe_vma *vma)
*/
ret = xe_gt_tlb_invalidation_vma(tile->primary_gt,
&fence[id], vma);
if (ret < 0)
if (ret < 0) {
xe_gt_tlb_invalidation_fence_fini(&fence[id]);
goto wait;
}
tile_needs_invalidate |= BIT(id);
}