powerpc/spufs: Fix possible scheduling of a context to multiple SPEs
We currently have a race when scheduling a context to a SPE - after we have found a runnable context in spusched_tick, the same context may have been scheduled by spu_activate(). This may result in a panic if we try to unschedule a context that has been freed in the meantime. This change exits spu_schedule() if the context has already been scheduled, so we don't end up scheduling it twice. Signed-off-by: Andre Detsch <adetsch@br.ibm.com> Signed-off-by: Jeremy Kerr <jk@ozlabs.org>
This commit is contained in:
parent
b65fe0356b
commit
b2e601d14d
@ -728,7 +728,8 @@ static void spu_schedule(struct spu *spu, struct spu_context *ctx)
|
|||||||
/* not a candidate for interruptible because it's called either
|
/* not a candidate for interruptible because it's called either
|
||||||
from the scheduler thread or from spu_deactivate */
|
from the scheduler thread or from spu_deactivate */
|
||||||
mutex_lock(&ctx->state_mutex);
|
mutex_lock(&ctx->state_mutex);
|
||||||
__spu_schedule(spu, ctx);
|
if (ctx->state == SPU_STATE_SAVED)
|
||||||
|
__spu_schedule(spu, ctx);
|
||||||
spu_release(ctx);
|
spu_release(ctx);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user