drm/i915/gvt: Fix workload request allocation before request add

In commit 6bb2a2af8b ("drm/i915/gvt: Fix crash after request->hw_context change"),
forgot to handle workload scan path in ELSP handler case which was to
optimize scanning earlier instead of in gvt submission thread, so request
alloc and add was splitting then which is against right process.

This trys to do a partial revert of that commit which still has workload
request alloc helper and make sure shadow state population is handled after
request alloc for target state buffer.

v3: Fix missed workload status setting in request alloc error path
v2: Fix dispatch workload err path that should add request after alloc anyway.

Fixes: 6bb2a2af8b ("drm/i915/gvt: Fix crash after request->hw_context change")
Cc: Bin Yang <bin.yang@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Tested-by: Bin Yang <bin.yang@intel.com>
Reviewed-by: Xiaolin Zhang <xiaolin.zhang@intel.com>
Signed-off-by: Zhenyu Wang <zhenyuw@linux.intel.com>
This commit is contained in:
Zhenyu Wang 2018-12-29 11:13:10 +08:00
parent bfeffd1552
commit f0e9943725
2 changed files with 43 additions and 22 deletions

View File

@ -356,6 +356,33 @@ static int set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload,
return 0;
}
static int
intel_gvt_workload_req_alloc(struct intel_vgpu_workload *workload)
{
struct intel_vgpu *vgpu = workload->vgpu;
struct intel_vgpu_submission *s = &vgpu->submission;
struct i915_gem_context *shadow_ctx = s->shadow_ctx;
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
struct intel_engine_cs *engine = dev_priv->engine[workload->ring_id];
struct i915_request *rq;
int ret = 0;
lockdep_assert_held(&dev_priv->drm.struct_mutex);
if (workload->req)
goto out;
rq = i915_request_alloc(engine, shadow_ctx);
if (IS_ERR(rq)) {
gvt_vgpu_err("fail to allocate gem request\n");
ret = PTR_ERR(rq);
goto out;
}
workload->req = i915_request_get(rq);
out:
return ret;
}
/**
* intel_gvt_scan_and_shadow_workload - audit the workload by scanning and
* shadow it as well, include ringbuffer,wa_ctx and ctx.
@ -372,12 +399,11 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
struct intel_engine_cs *engine = dev_priv->engine[workload->ring_id];
struct intel_context *ce;
struct i915_request *rq;
int ret;
lockdep_assert_held(&dev_priv->drm.struct_mutex);
if (workload->req)
if (workload->shadow)
return 0;
ret = set_context_ppgtt_from_shadow(workload, shadow_ctx);
@ -417,22 +443,8 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
goto err_shadow;
}
rq = i915_request_alloc(engine, shadow_ctx);
if (IS_ERR(rq)) {
gvt_vgpu_err("fail to allocate gem request\n");
ret = PTR_ERR(rq);
goto err_shadow;
}
workload->req = i915_request_get(rq);
ret = populate_shadow_context(workload);
if (ret)
goto err_req;
workload->shadow = true;
return 0;
err_req:
rq = fetch_and_zero(&workload->req);
i915_request_put(rq);
err_shadow:
release_shadow_wa_ctx(&workload->wa_ctx);
err_unpin:
@ -671,23 +683,31 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
mutex_lock(&vgpu->vgpu_lock);
mutex_lock(&dev_priv->drm.struct_mutex);
ret = intel_gvt_workload_req_alloc(workload);
if (ret)
goto err_req;
ret = intel_gvt_scan_and_shadow_workload(workload);
if (ret)
goto out;
ret = populate_shadow_context(workload);
if (ret) {
release_shadow_wa_ctx(&workload->wa_ctx);
goto out;
}
ret = prepare_workload(workload);
out:
if (ret)
workload->status = ret;
if (!IS_ERR_OR_NULL(workload->req)) {
gvt_dbg_sched("ring id %d submit workload to i915 %p\n",
ring_id, workload->req);
i915_request_add(workload->req);
workload->dispatched = true;
}
err_req:
if (ret)
workload->status = ret;
mutex_unlock(&dev_priv->drm.struct_mutex);
mutex_unlock(&vgpu->vgpu_lock);
return ret;

View File

@ -83,6 +83,7 @@ struct intel_vgpu_workload {
struct i915_request *req;
/* if this workload has been dispatched to i915? */
bool dispatched;
bool shadow; /* if workload has done shadow of guest request */
int status;
struct intel_vgpu_mm *shadow_mm;