drm/i915/gvt: Move request alloc to dispatch_workload path only

Previously the performance is improved through the workload auditing
and shadowing ahead of vGPU scheduling, however, there is the case that
more requests are allocated in submit_context before the previous request
is added, the timeline will hold its seqno which is later.

This patch is to move the request alloc to dispatch_workload function,
where is the same place as request is added.

It will fix the issue of kernel BUG for (timeline->seqno != request->fence.seqno)
check when add_request.

Fixes: 89ea20b930 ("drm/i915/gvt: Factor out scan and shadow from workload dispatch")
Signed-off-by: Chuanxiao Dong <chuanxiao.dong@intel.com>
Signed-off-by: fred gao <fred.gao@intel.com>
Signed-off-by: Zhenyu Wang <zhenyuw@linux.intel.com>
(cherry picked from commit f2880e04f3)
This commit is contained in:
fred gao 2017-11-14 17:09:35 +08:00 committed by Zhenyu Wang
parent bf3a26b3cb
commit c3c80f0736
3 changed files with 29 additions and 4 deletions

View File

@ -496,6 +496,12 @@ static int prepare_execlist_workload(struct intel_vgpu_workload *workload)
goto err_unpin_mm;
}
ret = intel_gvt_generate_request(workload);
if (ret) {
gvt_vgpu_err("fail to generate request\n");
goto err_unpin_mm;
}
ret = prepare_shadow_batch_buffer(workload);
if (ret) {
gvt_vgpu_err("fail to prepare_shadow_batch_buffer\n");

View File

@ -254,7 +254,6 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
struct intel_engine_cs *engine = dev_priv->engine[ring_id];
struct drm_i915_gem_request *rq;
struct intel_vgpu *vgpu = workload->vgpu;
struct intel_ring *ring;
int ret;
@ -300,6 +299,26 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
ret = populate_shadow_context(workload);
if (ret)
goto err_unpin;
workload->shadowed = true;
return 0;
err_unpin:
engine->context_unpin(engine, shadow_ctx);
err_shadow:
release_shadow_wa_ctx(&workload->wa_ctx);
err_scan:
return ret;
}
int intel_gvt_generate_request(struct intel_vgpu_workload *workload)
{
int ring_id = workload->ring_id;
struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
struct intel_engine_cs *engine = dev_priv->engine[ring_id];
struct drm_i915_gem_request *rq;
struct intel_vgpu *vgpu = workload->vgpu;
struct i915_gem_context *shadow_ctx = vgpu->shadow_ctx;
int ret;
rq = i915_gem_request_alloc(dev_priv->engine[ring_id], shadow_ctx);
if (IS_ERR(rq)) {
@ -314,14 +333,11 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
ret = copy_workload_to_ring_buffer(workload);
if (ret)
goto err_unpin;
workload->shadowed = true;
return 0;
err_unpin:
engine->context_unpin(engine, shadow_ctx);
err_shadow:
release_shadow_wa_ctx(&workload->wa_ctx);
err_scan:
return ret;
}

View File

@ -142,4 +142,7 @@ int intel_vgpu_init_gvt_context(struct intel_vgpu *vgpu);
void intel_vgpu_clean_gvt_context(struct intel_vgpu *vgpu);
void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx);
int intel_gvt_generate_request(struct intel_vgpu_workload *workload);
#endif