aio: Don't use ctx->tail unnecessarily

aio_complete() (arguably) needs to keep its own trusted copy of the tail
pointer, but io_getevents() doesn't have to use it - it's already using
the head pointer from the ring buffer.

So convert it to use the tail from the ring buffer so it touches fewer
cachelines and doesn't contend with the cacheline aio_complete() needs.

Signed-off-by: Kent Overstreet <koverstreet@google.com>
Cc: Zach Brown <zab@redhat.com>
Cc: Felipe Balbi <balbi@ti.com>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Mark Fasheh <mfasheh@suse.com>
Cc: Joel Becker <jlbec@evilplan.org>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: Asai Thambi S P <asamymuthupa@micron.com>
Cc: Selvan Mani <smani@micron.com>
Cc: Sam Bradshaw <sbradshaw@micron.com>
Cc: Jeff Moyer <jmoyer@redhat.com>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Benjamin LaHaise <bcrl@kvack.org>
Signed-off-by: Benjamin LaHaise <bcrl@kvack.org>
This commit is contained in:
Kent Overstreet 2013-05-09 15:36:07 -07:00 committed by Benjamin LaHaise
parent bec68faaf3
commit 5ffac122db

View File

@ -406,7 +406,8 @@ static void free_ioctx(struct work_struct *work)
struct kioctx *ctx = container_of(work, struct kioctx, free_work);
struct aio_ring *ring;
struct kiocb *req;
unsigned cpu, head, avail;
unsigned cpu, avail;
DEFINE_WAIT(wait);
spin_lock_irq(&ctx->ctx_lock);
@ -427,22 +428,24 @@ static void free_ioctx(struct work_struct *work)
kcpu->reqs_available = 0;
}
ring = kmap_atomic(ctx->ring_pages[0]);
head = ring->head;
kunmap_atomic(ring);
while (1) {
prepare_to_wait(&ctx->wait, &wait, TASK_UNINTERRUPTIBLE);
while (atomic_read(&ctx->reqs_available) < ctx->nr_events - 1) {
wait_event(ctx->wait,
(head != ctx->tail) ||
(atomic_read(&ctx->reqs_available) >=
ctx->nr_events - 1));
avail = (head <= ctx->tail ? ctx->tail : ctx->nr_events) - head;
ring = kmap_atomic(ctx->ring_pages[0]);
avail = (ring->head <= ring->tail)
? ring->tail - ring->head
: ctx->nr_events - ring->head + ring->tail;
atomic_add(avail, &ctx->reqs_available);
head += avail;
head %= ctx->nr_events;
ring->head = ring->tail;
kunmap_atomic(ring);
if (atomic_read(&ctx->reqs_available) >= ctx->nr_events - 1)
break;
schedule();
}
finish_wait(&ctx->wait, &wait);
WARN_ON(atomic_read(&ctx->reqs_available) > ctx->nr_events - 1);
@ -869,7 +872,7 @@ static long aio_read_events_ring(struct kioctx *ctx,
struct io_event __user *event, long nr)
{
struct aio_ring *ring;
unsigned head, pos;
unsigned head, tail, pos;
long ret = 0;
int copy_ret;
@ -877,11 +880,12 @@ static long aio_read_events_ring(struct kioctx *ctx,
ring = kmap_atomic(ctx->ring_pages[0]);
head = ring->head;
tail = ring->tail;
kunmap_atomic(ring);
pr_debug("h%u t%u m%u\n", head, ctx->tail, ctx->nr_events);
pr_debug("h%u t%u m%u\n", head, tail, ctx->nr_events);
if (head == ctx->tail)
if (head == tail)
goto out;
while (ret < nr) {
@ -889,8 +893,8 @@ static long aio_read_events_ring(struct kioctx *ctx,
struct io_event *ev;
struct page *page;
avail = (head <= ctx->tail ? ctx->tail : ctx->nr_events) - head;
if (head == ctx->tail)
avail = (head <= tail ? tail : ctx->nr_events) - head;
if (head == tail)
break;
avail = min(avail, nr - ret);
@ -921,7 +925,7 @@ static long aio_read_events_ring(struct kioctx *ctx,
kunmap_atomic(ring);
flush_dcache_page(ctx->ring_pages[0]);
pr_debug("%li h%u t%u\n", ret, head, ctx->tail);
pr_debug("%li h%u t%u\n", ret, head, tail);
put_reqs_available(ctx, ret);
out: