sched/wait, drivers/drm: Convert wait_on_atomic_t() usage to the new wait_var_event() API

The old wait_on_atomic_t() is going to get removed, use the more
flexible wait_var_event() API instead.

Unlike wake_up_atomic_t(), wake_up_var() will issue the wakeup
even if the variable is not 0.

No change in functionality.

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Daniel Vetter <daniel.vetter@intel.com>
Cc: David Airlie <airlied@linux.ie>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Peter Zijlstra 2018-03-15 11:41:39 +01:00 committed by Ingo Molnar
parent 6b2bb7265f
commit d224985a5e
2 changed files with 11 additions and 16 deletions

View File

@ -177,8 +177,9 @@ static ssize_t auxdev_read_iter(struct kiocb *iocb, struct iov_iter *to)
res = pos - iocb->ki_pos;
iocb->ki_pos = pos;
atomic_dec(&aux_dev->usecount);
wake_up_atomic_t(&aux_dev->usecount);
if (atomic_dec_and_test(&aux_dev->usecount))
wake_up_var(&aux_dev->usecount);
return res;
}
@ -218,8 +219,9 @@ static ssize_t auxdev_write_iter(struct kiocb *iocb, struct iov_iter *from)
res = pos - iocb->ki_pos;
iocb->ki_pos = pos;
atomic_dec(&aux_dev->usecount);
wake_up_atomic_t(&aux_dev->usecount);
if (atomic_dec_and_test(&aux_dev->usecount))
wake_up_var(&aux_dev->usecount);
return res;
}
@ -277,8 +279,7 @@ void drm_dp_aux_unregister_devnode(struct drm_dp_aux *aux)
mutex_unlock(&aux_idr_mutex);
atomic_dec(&aux_dev->usecount);
wait_on_atomic_t(&aux_dev->usecount, atomic_t_wait,
TASK_UNINTERRUPTIBLE);
wait_var_event(&aux_dev->usecount, !atomic_read(&aux_dev->usecount));
minor = aux_dev->index;
if (aux_dev->dev)

View File

@ -271,18 +271,13 @@ struct igt_wakeup {
u32 seqno;
};
static int wait_atomic_timeout(atomic_t *p, unsigned int mode)
{
return schedule_timeout(10 * HZ) ? 0 : -ETIMEDOUT;
}
static bool wait_for_ready(struct igt_wakeup *w)
{
DEFINE_WAIT(ready);
set_bit(IDLE, &w->flags);
if (atomic_dec_and_test(w->done))
wake_up_atomic_t(w->done);
wake_up_var(w->done);
if (test_bit(STOP, &w->flags))
goto out;
@ -299,7 +294,7 @@ static bool wait_for_ready(struct igt_wakeup *w)
out:
clear_bit(IDLE, &w->flags);
if (atomic_dec_and_test(w->set))
wake_up_atomic_t(w->set);
wake_up_var(w->set);
return !test_bit(STOP, &w->flags);
}
@ -342,7 +337,7 @@ static void igt_wake_all_sync(atomic_t *ready,
atomic_set(ready, 0);
wake_up_all(wq);
wait_on_atomic_t(set, atomic_t_wait, TASK_UNINTERRUPTIBLE);
wait_var_event(set, !atomic_read(set));
atomic_set(ready, count);
atomic_set(done, count);
}
@ -350,7 +345,6 @@ static void igt_wake_all_sync(atomic_t *ready,
static int igt_wakeup(void *arg)
{
I915_RND_STATE(prng);
const int state = TASK_UNINTERRUPTIBLE;
struct intel_engine_cs *engine = arg;
struct igt_wakeup *waiters;
DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
@ -418,7 +412,7 @@ static int igt_wakeup(void *arg)
* that they are ready for the next test. We wait until all
* threads are complete and waiting for us (i.e. not a seqno).
*/
err = wait_on_atomic_t(&done, wait_atomic_timeout, state);
err = wait_var_event_timeout(&done, !atomic_read(&done), 10 * HZ);
if (err) {
pr_err("Timed out waiting for %d remaining waiters\n",
atomic_read(&done));