mirror of
https://github.com/torvalds/linux.git
synced 2024-11-18 10:01:43 +00:00
drm/i915: Only recover active engines
If we issue a reset to a currently idle engine, leave it idle afterwards. This is useful to excise a linkage between reset and the shrinker. When waking the engine, we need to pin the default context image which we use for overwriting a guilty context -- if the engine is idle we do not need this pinned image! However, this pinning means that waking the engine acquires the FS_RECLAIM, and so may trigger the shrinker. The shrinker itself may need to wait upon the GPU to unbind and object and so may require services of reset; ergo we should avoid the engine wake up path. The danger in skipping the recovery for idle engines is that we leave the engine with no context defined, which may interfere with the operation of the power context on some older platforms. In practice, we should only be resetting an active GPU but it something to look out for on Ironlake (if memory serves). Fixes:79ffac8599
("drm/i915: Invert the GEM wakeref hierarchy") Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Cc: Imre Deak <imre.deak@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20190626154549.10066-2-chris@chris-wilson.co.uk (cherry picked from commit18398904ca
) Signed-off-by: Jani Nikula <jani.nikula@intel.com>
This commit is contained in:
parent
b1fa6fd94f
commit
4b9bb9728c
@ -687,7 +687,6 @@ static void reset_prepare_engine(struct intel_engine_cs *engine)
|
|||||||
* written to the powercontext is undefined and so we may lose
|
* written to the powercontext is undefined and so we may lose
|
||||||
* GPU state upon resume, i.e. fail to restart after a reset.
|
* GPU state upon resume, i.e. fail to restart after a reset.
|
||||||
*/
|
*/
|
||||||
intel_engine_pm_get(engine);
|
|
||||||
intel_uncore_forcewake_get(engine->uncore, FORCEWAKE_ALL);
|
intel_uncore_forcewake_get(engine->uncore, FORCEWAKE_ALL);
|
||||||
engine->reset.prepare(engine);
|
engine->reset.prepare(engine);
|
||||||
}
|
}
|
||||||
@ -718,16 +717,21 @@ static void revoke_mmaps(struct drm_i915_private *i915)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void reset_prepare(struct drm_i915_private *i915)
|
static intel_engine_mask_t reset_prepare(struct drm_i915_private *i915)
|
||||||
{
|
{
|
||||||
struct intel_engine_cs *engine;
|
struct intel_engine_cs *engine;
|
||||||
|
intel_engine_mask_t awake = 0;
|
||||||
enum intel_engine_id id;
|
enum intel_engine_id id;
|
||||||
|
|
||||||
intel_gt_pm_get(i915);
|
for_each_engine(engine, i915, id) {
|
||||||
for_each_engine(engine, i915, id)
|
if (intel_engine_pm_get_if_awake(engine))
|
||||||
|
awake |= engine->mask;
|
||||||
reset_prepare_engine(engine);
|
reset_prepare_engine(engine);
|
||||||
|
}
|
||||||
|
|
||||||
intel_uc_reset_prepare(i915);
|
intel_uc_reset_prepare(i915);
|
||||||
|
|
||||||
|
return awake;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void gt_revoke(struct drm_i915_private *i915)
|
static void gt_revoke(struct drm_i915_private *i915)
|
||||||
@ -761,20 +765,22 @@ static int gt_reset(struct drm_i915_private *i915,
|
|||||||
static void reset_finish_engine(struct intel_engine_cs *engine)
|
static void reset_finish_engine(struct intel_engine_cs *engine)
|
||||||
{
|
{
|
||||||
engine->reset.finish(engine);
|
engine->reset.finish(engine);
|
||||||
intel_engine_pm_put(engine);
|
|
||||||
intel_uncore_forcewake_put(engine->uncore, FORCEWAKE_ALL);
|
intel_uncore_forcewake_put(engine->uncore, FORCEWAKE_ALL);
|
||||||
|
|
||||||
|
intel_engine_signal_breadcrumbs(engine);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void reset_finish(struct drm_i915_private *i915)
|
static void reset_finish(struct drm_i915_private *i915,
|
||||||
|
intel_engine_mask_t awake)
|
||||||
{
|
{
|
||||||
struct intel_engine_cs *engine;
|
struct intel_engine_cs *engine;
|
||||||
enum intel_engine_id id;
|
enum intel_engine_id id;
|
||||||
|
|
||||||
for_each_engine(engine, i915, id) {
|
for_each_engine(engine, i915, id) {
|
||||||
reset_finish_engine(engine);
|
reset_finish_engine(engine);
|
||||||
intel_engine_signal_breadcrumbs(engine);
|
if (awake & engine->mask)
|
||||||
|
intel_engine_pm_put(engine);
|
||||||
}
|
}
|
||||||
intel_gt_pm_put(i915);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void nop_submit_request(struct i915_request *request)
|
static void nop_submit_request(struct i915_request *request)
|
||||||
@ -798,6 +804,7 @@ static void __i915_gem_set_wedged(struct drm_i915_private *i915)
|
|||||||
{
|
{
|
||||||
struct i915_gpu_error *error = &i915->gpu_error;
|
struct i915_gpu_error *error = &i915->gpu_error;
|
||||||
struct intel_engine_cs *engine;
|
struct intel_engine_cs *engine;
|
||||||
|
intel_engine_mask_t awake;
|
||||||
enum intel_engine_id id;
|
enum intel_engine_id id;
|
||||||
|
|
||||||
if (test_bit(I915_WEDGED, &error->flags))
|
if (test_bit(I915_WEDGED, &error->flags))
|
||||||
@ -817,7 +824,7 @@ static void __i915_gem_set_wedged(struct drm_i915_private *i915)
|
|||||||
* rolling the global seqno forward (since this would complete requests
|
* rolling the global seqno forward (since this would complete requests
|
||||||
* for which we haven't set the fence error to EIO yet).
|
* for which we haven't set the fence error to EIO yet).
|
||||||
*/
|
*/
|
||||||
reset_prepare(i915);
|
awake = reset_prepare(i915);
|
||||||
|
|
||||||
/* Even if the GPU reset fails, it should still stop the engines */
|
/* Even if the GPU reset fails, it should still stop the engines */
|
||||||
if (!INTEL_INFO(i915)->gpu_reset_clobbers_display)
|
if (!INTEL_INFO(i915)->gpu_reset_clobbers_display)
|
||||||
@ -841,7 +848,7 @@ static void __i915_gem_set_wedged(struct drm_i915_private *i915)
|
|||||||
for_each_engine(engine, i915, id)
|
for_each_engine(engine, i915, id)
|
||||||
engine->cancel_requests(engine);
|
engine->cancel_requests(engine);
|
||||||
|
|
||||||
reset_finish(i915);
|
reset_finish(i915, awake);
|
||||||
|
|
||||||
GEM_TRACE("end\n");
|
GEM_TRACE("end\n");
|
||||||
}
|
}
|
||||||
@ -988,6 +995,7 @@ void i915_reset(struct drm_i915_private *i915,
|
|||||||
const char *reason)
|
const char *reason)
|
||||||
{
|
{
|
||||||
struct i915_gpu_error *error = &i915->gpu_error;
|
struct i915_gpu_error *error = &i915->gpu_error;
|
||||||
|
intel_engine_mask_t awake;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
GEM_TRACE("flags=%lx\n", error->flags);
|
GEM_TRACE("flags=%lx\n", error->flags);
|
||||||
@ -1004,7 +1012,7 @@ void i915_reset(struct drm_i915_private *i915,
|
|||||||
dev_notice(i915->drm.dev, "Resetting chip for %s\n", reason);
|
dev_notice(i915->drm.dev, "Resetting chip for %s\n", reason);
|
||||||
error->reset_count++;
|
error->reset_count++;
|
||||||
|
|
||||||
reset_prepare(i915);
|
awake = reset_prepare(i915);
|
||||||
|
|
||||||
if (!intel_has_gpu_reset(i915)) {
|
if (!intel_has_gpu_reset(i915)) {
|
||||||
if (i915_modparams.reset)
|
if (i915_modparams.reset)
|
||||||
@ -1049,7 +1057,7 @@ void i915_reset(struct drm_i915_private *i915,
|
|||||||
i915_queue_hangcheck(i915);
|
i915_queue_hangcheck(i915);
|
||||||
|
|
||||||
finish:
|
finish:
|
||||||
reset_finish(i915);
|
reset_finish(i915, awake);
|
||||||
unlock:
|
unlock:
|
||||||
mutex_unlock(&error->wedge_mutex);
|
mutex_unlock(&error->wedge_mutex);
|
||||||
return;
|
return;
|
||||||
@ -1100,7 +1108,7 @@ int i915_reset_engine(struct intel_engine_cs *engine, const char *msg)
|
|||||||
GEM_TRACE("%s flags=%lx\n", engine->name, error->flags);
|
GEM_TRACE("%s flags=%lx\n", engine->name, error->flags);
|
||||||
GEM_BUG_ON(!test_bit(I915_RESET_ENGINE + engine->id, &error->flags));
|
GEM_BUG_ON(!test_bit(I915_RESET_ENGINE + engine->id, &error->flags));
|
||||||
|
|
||||||
if (!intel_wakeref_active(&engine->wakeref))
|
if (!intel_engine_pm_get_if_awake(engine))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
reset_prepare_engine(engine);
|
reset_prepare_engine(engine);
|
||||||
@ -1135,12 +1143,11 @@ int i915_reset_engine(struct intel_engine_cs *engine, const char *msg)
|
|||||||
* process to program RING_MODE, HWSP and re-enable submission.
|
* process to program RING_MODE, HWSP and re-enable submission.
|
||||||
*/
|
*/
|
||||||
ret = engine->resume(engine);
|
ret = engine->resume(engine);
|
||||||
if (ret)
|
|
||||||
goto out;
|
|
||||||
|
|
||||||
out:
|
out:
|
||||||
intel_engine_cancel_stop_cs(engine);
|
intel_engine_cancel_stop_cs(engine);
|
||||||
reset_finish_engine(engine);
|
reset_finish_engine(engine);
|
||||||
|
intel_engine_pm_put(engine);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -71,13 +71,16 @@ static int igt_atomic_reset(void *arg)
|
|||||||
goto unlock;
|
goto unlock;
|
||||||
|
|
||||||
for (p = igt_atomic_phases; p->name; p++) {
|
for (p = igt_atomic_phases; p->name; p++) {
|
||||||
|
intel_engine_mask_t awake;
|
||||||
|
|
||||||
GEM_TRACE("intel_gpu_reset under %s\n", p->name);
|
GEM_TRACE("intel_gpu_reset under %s\n", p->name);
|
||||||
|
|
||||||
|
awake = reset_prepare(i915);
|
||||||
p->critical_section_begin();
|
p->critical_section_begin();
|
||||||
reset_prepare(i915);
|
reset_prepare(i915);
|
||||||
err = intel_gpu_reset(i915, ALL_ENGINES);
|
err = intel_gpu_reset(i915, ALL_ENGINES);
|
||||||
reset_finish(i915);
|
|
||||||
p->critical_section_end();
|
p->critical_section_end();
|
||||||
|
reset_finish(i915, awake);
|
||||||
|
|
||||||
if (err) {
|
if (err) {
|
||||||
pr_err("intel_gpu_reset failed under %s\n", p->name);
|
pr_err("intel_gpu_reset failed under %s\n", p->name);
|
||||||
|
Loading…
Reference in New Issue
Block a user