From 21b40200cfe961b1428a529c63c33b1f1e1b4738 Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Tue, 7 May 2013 16:18:47 -0700 Subject: [PATCH] aio: use flush_dcache_page() This wasn't causing problems before because it's not needed on x86, but it is needed on other architectures. Signed-off-by: Kent Overstreet Cc: Zach Brown Cc: Felipe Balbi Cc: Greg Kroah-Hartman Cc: Mark Fasheh Cc: Joel Becker Cc: Rusty Russell Cc: Jens Axboe Cc: Asai Thambi S P Cc: Selvan Mani Cc: Sam Bradshaw Cc: Jeff Moyer Cc: Al Viro Cc: Benjamin LaHaise Cc: Theodore Ts'o Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/aio.c | 45 +++++++++++++++++---------------------------- 1 file changed, 17 insertions(+), 28 deletions(-) diff --git a/fs/aio.c b/fs/aio.c index 3f348a45e23b..aea060d8c1e8 100644 --- a/fs/aio.c +++ b/fs/aio.c @@ -211,33 +211,15 @@ static int aio_setup_ring(struct kioctx *ctx) ring->incompat_features = AIO_RING_INCOMPAT_FEATURES; ring->header_length = sizeof(struct aio_ring); kunmap_atomic(ring); + flush_dcache_page(info->ring_pages[0]); return 0; } - -/* aio_ring_event: returns a pointer to the event at the given index from - * kmap_atomic(). Release the pointer with put_aio_ring_event(); - */ #define AIO_EVENTS_PER_PAGE (PAGE_SIZE / sizeof(struct io_event)) #define AIO_EVENTS_FIRST_PAGE ((PAGE_SIZE - sizeof(struct aio_ring)) / sizeof(struct io_event)) #define AIO_EVENTS_OFFSET (AIO_EVENTS_PER_PAGE - AIO_EVENTS_FIRST_PAGE) -#define aio_ring_event(info, nr) ({ \ - unsigned pos = (nr) + AIO_EVENTS_OFFSET; \ - struct io_event *__event; \ - __event = kmap_atomic( \ - (info)->ring_pages[pos / AIO_EVENTS_PER_PAGE]); \ - __event += pos % AIO_EVENTS_PER_PAGE; \ - __event; \ -}) - -#define put_aio_ring_event(event) do { \ - struct io_event *__event = (event); \ - (void)__event; \ - kunmap_atomic((void *)((unsigned long)__event & PAGE_MASK)); \ -} while(0) - static int kiocb_cancel(struct kioctx *ctx, struct kiocb *kiocb, struct io_event *res) { @@ -649,9 +631,9 @@ void aio_complete(struct kiocb *iocb, long res, long res2) struct kioctx *ctx = iocb->ki_ctx; struct aio_ring_info *info; struct aio_ring *ring; - struct io_event *event; + struct io_event *ev_page, *event; unsigned long flags; - unsigned long tail; + unsigned tail, pos; /* * Special case handling for sync iocbs: @@ -690,19 +672,24 @@ void aio_complete(struct kiocb *iocb, long res, long res2) if (kiocbIsCancelled(iocb)) goto put_rq; - ring = kmap_atomic(info->ring_pages[0]); - tail = info->tail; - event = aio_ring_event(info, tail); + pos = tail + AIO_EVENTS_OFFSET; + if (++tail >= info->nr) tail = 0; + ev_page = kmap_atomic(info->ring_pages[pos / AIO_EVENTS_PER_PAGE]); + event = ev_page + pos % AIO_EVENTS_PER_PAGE; + event->obj = (u64)(unsigned long)iocb->ki_obj.user; event->data = iocb->ki_user_data; event->res = res; event->res2 = res2; - pr_debug("%p[%lu]: %p: %p %Lx %lx %lx\n", + kunmap_atomic(ev_page); + flush_dcache_page(info->ring_pages[pos / AIO_EVENTS_PER_PAGE]); + + pr_debug("%p[%u]: %p: %p %Lx %lx %lx\n", ctx, tail, iocb, iocb->ki_obj.user, iocb->ki_user_data, res, res2); @@ -712,12 +699,13 @@ void aio_complete(struct kiocb *iocb, long res, long res2) smp_wmb(); /* make event visible before updating tail */ info->tail = tail; + + ring = kmap_atomic(info->ring_pages[0]); ring->tail = tail; - - put_aio_ring_event(event); kunmap_atomic(ring); + flush_dcache_page(info->ring_pages[0]); - pr_debug("added to ring %p at [%lu]\n", iocb, tail); + pr_debug("added to ring %p at [%u]\n", iocb, tail); /* * Check if the user asked us to deliver the result through an @@ -807,6 +795,7 @@ static long aio_read_events_ring(struct kioctx *ctx, ring = kmap_atomic(info->ring_pages[0]); ring->head = head; kunmap_atomic(ring); + flush_dcache_page(info->ring_pages[0]); pr_debug("%li h%u t%u\n", ret, head, info->tail); out: