- 2 fixes: 1 for perf and 1 execlist submission race.

-----BEGIN PGP SIGNATURE-----
 
 iQEcBAABAgAGBQJaoA9cAAoJEPpiX2QO6xPKfI8H/1M+nPvIfaqZHpnBSvLhOZ95
 py3qBQ72HvAlWaChH0FBAaUkWO7g38buMoUQrXn2d5dwU+RSoRC6AsEQE/ZqntN4
 QMYMMqKx8Qnd4zkYcPx2mZD414uKHKYS7TvTK1HrslCexOteDjG7myQxURnrwM3u
 DvJJzr0FDo5zW6kgT2sA3VXv/Fvc2heExy5QodIoF4ajU/TcU7HqC4ZiMkaigZA6
 j4oWrEdf0jukWTLsgzikVwn6j0+HUapKQfHi1/OMn4cf5mTtDTTHqQVE4qjyRhuh
 oXnVZYWjIn4QL8mK9Ql5yJvjrsQLvxWT24FErPloz9ustl+t9A+6Zx4vPiibOUk=
 =Wni1
 -----END PGP SIGNATURE-----

Merge tag 'drm-intel-fixes-2018-03-07' of git://anongit.freedesktop.org/drm/drm-intel into drm-fixes

- 2 fixes: 1 for perf and 1 execlist submission race.

* tag 'drm-intel-fixes-2018-03-07' of git://anongit.freedesktop.org/drm/drm-intel:
  drm/i915: Suspend submission tasklets around wedging
  drm/i915/perf: fix perf stream opening lock
This commit is contained in:
Dave Airlie 2018-03-09 09:22:19 +10:00
commit aa87d62f7e
3 changed files with 24 additions and 29 deletions

View File

@ -3205,8 +3205,10 @@ void i915_gem_set_wedged(struct drm_i915_private *i915)
* rolling the global seqno forward (since this would complete requests * rolling the global seqno forward (since this would complete requests
* for which we haven't set the fence error to EIO yet). * for which we haven't set the fence error to EIO yet).
*/ */
for_each_engine(engine, i915, id) for_each_engine(engine, i915, id) {
i915_gem_reset_prepare_engine(engine);
engine->submit_request = nop_submit_request; engine->submit_request = nop_submit_request;
}
/* /*
* Make sure no one is running the old callback before we proceed with * Make sure no one is running the old callback before we proceed with
@ -3244,6 +3246,8 @@ void i915_gem_set_wedged(struct drm_i915_private *i915)
intel_engine_init_global_seqno(engine, intel_engine_init_global_seqno(engine,
intel_engine_last_submit(engine)); intel_engine_last_submit(engine));
spin_unlock_irqrestore(&engine->timeline->lock, flags); spin_unlock_irqrestore(&engine->timeline->lock, flags);
i915_gem_reset_finish_engine(engine);
} }
set_bit(I915_WEDGED, &i915->gpu_error.flags); set_bit(I915_WEDGED, &i915->gpu_error.flags);

View File

@ -1303,9 +1303,8 @@ static void i915_oa_stream_destroy(struct i915_perf_stream *stream)
*/ */
mutex_lock(&dev_priv->drm.struct_mutex); mutex_lock(&dev_priv->drm.struct_mutex);
dev_priv->perf.oa.exclusive_stream = NULL; dev_priv->perf.oa.exclusive_stream = NULL;
mutex_unlock(&dev_priv->drm.struct_mutex);
dev_priv->perf.oa.ops.disable_metric_set(dev_priv); dev_priv->perf.oa.ops.disable_metric_set(dev_priv);
mutex_unlock(&dev_priv->drm.struct_mutex);
free_oa_buffer(dev_priv); free_oa_buffer(dev_priv);
@ -1756,22 +1755,13 @@ static int gen8_switch_to_updated_kernel_context(struct drm_i915_private *dev_pr
* Note: it's only the RCS/Render context that has any OA state. * Note: it's only the RCS/Render context that has any OA state.
*/ */
static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv, static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv,
const struct i915_oa_config *oa_config, const struct i915_oa_config *oa_config)
bool interruptible)
{ {
struct i915_gem_context *ctx; struct i915_gem_context *ctx;
int ret; int ret;
unsigned int wait_flags = I915_WAIT_LOCKED; unsigned int wait_flags = I915_WAIT_LOCKED;
if (interruptible) { lockdep_assert_held(&dev_priv->drm.struct_mutex);
ret = i915_mutex_lock_interruptible(&dev_priv->drm);
if (ret)
return ret;
wait_flags |= I915_WAIT_INTERRUPTIBLE;
} else {
mutex_lock(&dev_priv->drm.struct_mutex);
}
/* Switch away from any user context. */ /* Switch away from any user context. */
ret = gen8_switch_to_updated_kernel_context(dev_priv, oa_config); ret = gen8_switch_to_updated_kernel_context(dev_priv, oa_config);
@ -1819,8 +1809,6 @@ static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv,
} }
out: out:
mutex_unlock(&dev_priv->drm.struct_mutex);
return ret; return ret;
} }
@ -1863,7 +1851,7 @@ static int gen8_enable_metric_set(struct drm_i915_private *dev_priv,
* to make sure all slices/subslices are ON before writing to NOA * to make sure all slices/subslices are ON before writing to NOA
* registers. * registers.
*/ */
ret = gen8_configure_all_contexts(dev_priv, oa_config, true); ret = gen8_configure_all_contexts(dev_priv, oa_config);
if (ret) if (ret)
return ret; return ret;
@ -1878,7 +1866,7 @@ static int gen8_enable_metric_set(struct drm_i915_private *dev_priv,
static void gen8_disable_metric_set(struct drm_i915_private *dev_priv) static void gen8_disable_metric_set(struct drm_i915_private *dev_priv)
{ {
/* Reset all contexts' slices/subslices configurations. */ /* Reset all contexts' slices/subslices configurations. */
gen8_configure_all_contexts(dev_priv, NULL, false); gen8_configure_all_contexts(dev_priv, NULL);
I915_WRITE(GDT_CHICKEN_BITS, (I915_READ(GDT_CHICKEN_BITS) & I915_WRITE(GDT_CHICKEN_BITS, (I915_READ(GDT_CHICKEN_BITS) &
~GT_NOA_ENABLE)); ~GT_NOA_ENABLE));
@ -1888,7 +1876,7 @@ static void gen8_disable_metric_set(struct drm_i915_private *dev_priv)
static void gen10_disable_metric_set(struct drm_i915_private *dev_priv) static void gen10_disable_metric_set(struct drm_i915_private *dev_priv)
{ {
/* Reset all contexts' slices/subslices configurations. */ /* Reset all contexts' slices/subslices configurations. */
gen8_configure_all_contexts(dev_priv, NULL, false); gen8_configure_all_contexts(dev_priv, NULL);
/* Make sure we disable noa to save power. */ /* Make sure we disable noa to save power. */
I915_WRITE(RPM_CONFIG1, I915_WRITE(RPM_CONFIG1,
@ -2138,6 +2126,10 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
if (ret) if (ret)
goto err_oa_buf_alloc; goto err_oa_buf_alloc;
ret = i915_mutex_lock_interruptible(&dev_priv->drm);
if (ret)
goto err_lock;
ret = dev_priv->perf.oa.ops.enable_metric_set(dev_priv, ret = dev_priv->perf.oa.ops.enable_metric_set(dev_priv,
stream->oa_config); stream->oa_config);
if (ret) if (ret)
@ -2145,23 +2137,17 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
stream->ops = &i915_oa_stream_ops; stream->ops = &i915_oa_stream_ops;
/* Lock device for exclusive_stream access late because
* enable_metric_set() might lock as well on gen8+.
*/
ret = i915_mutex_lock_interruptible(&dev_priv->drm);
if (ret)
goto err_lock;
dev_priv->perf.oa.exclusive_stream = stream; dev_priv->perf.oa.exclusive_stream = stream;
mutex_unlock(&dev_priv->drm.struct_mutex); mutex_unlock(&dev_priv->drm.struct_mutex);
return 0; return 0;
err_lock:
dev_priv->perf.oa.ops.disable_metric_set(dev_priv);
err_enable: err_enable:
dev_priv->perf.oa.ops.disable_metric_set(dev_priv);
mutex_unlock(&dev_priv->drm.struct_mutex);
err_lock:
free_oa_buffer(dev_priv); free_oa_buffer(dev_priv);
err_oa_buf_alloc: err_oa_buf_alloc:

View File

@ -719,6 +719,8 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
struct rb_node *rb; struct rb_node *rb;
unsigned long flags; unsigned long flags;
GEM_TRACE("%s\n", engine->name);
spin_lock_irqsave(&engine->timeline->lock, flags); spin_lock_irqsave(&engine->timeline->lock, flags);
/* Cancel the requests on the HW and clear the ELSP tracker. */ /* Cancel the requests on the HW and clear the ELSP tracker. */
@ -765,6 +767,9 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
*/ */
clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted); clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted);
/* Mark all CS interrupts as complete */
execlists->active = 0;
spin_unlock_irqrestore(&engine->timeline->lock, flags); spin_unlock_irqrestore(&engine->timeline->lock, flags);
} }