2010-10-28 01:30:10 +00:00
|
|
|
/*
|
|
|
|
* linux/fs/ext4/page-io.c
|
|
|
|
*
|
|
|
|
* This contains the new page_io functions for ext4
|
|
|
|
*
|
|
|
|
* Written by Theodore Ts'o, 2010.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/fs.h>
|
|
|
|
#include <linux/time.h>
|
|
|
|
#include <linux/jbd2.h>
|
|
|
|
#include <linux/highuid.h>
|
|
|
|
#include <linux/pagemap.h>
|
|
|
|
#include <linux/quotaops.h>
|
|
|
|
#include <linux/string.h>
|
|
|
|
#include <linux/buffer_head.h>
|
|
|
|
#include <linux/writeback.h>
|
|
|
|
#include <linux/pagevec.h>
|
|
|
|
#include <linux/mpage.h>
|
|
|
|
#include <linux/namei.h>
|
|
|
|
#include <linux/uio.h>
|
|
|
|
#include <linux/bio.h>
|
|
|
|
#include <linux/workqueue.h>
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/slab.h>
|
|
|
|
|
|
|
|
#include "ext4_jbd2.h"
|
|
|
|
#include "xattr.h"
|
|
|
|
#include "acl.h"
|
|
|
|
#include "ext4_extents.h"
|
|
|
|
|
|
|
|
static struct kmem_cache *io_page_cachep, *io_end_cachep;
|
|
|
|
|
2010-10-28 01:30:14 +00:00
|
|
|
int __init ext4_init_pageio(void)
|
2010-10-28 01:30:10 +00:00
|
|
|
{
|
|
|
|
io_page_cachep = KMEM_CACHE(ext4_io_page, SLAB_RECLAIM_ACCOUNT);
|
|
|
|
if (io_page_cachep == NULL)
|
|
|
|
return -ENOMEM;
|
|
|
|
io_end_cachep = KMEM_CACHE(ext4_io_end, SLAB_RECLAIM_ACCOUNT);
|
2011-01-10 17:10:44 +00:00
|
|
|
if (io_end_cachep == NULL) {
|
2010-10-28 01:30:10 +00:00
|
|
|
kmem_cache_destroy(io_page_cachep);
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2010-10-28 01:30:14 +00:00
|
|
|
void ext4_exit_pageio(void)
|
2010-10-28 01:30:10 +00:00
|
|
|
{
|
|
|
|
kmem_cache_destroy(io_end_cachep);
|
|
|
|
kmem_cache_destroy(io_page_cachep);
|
|
|
|
}
|
|
|
|
|
2010-11-08 18:43:33 +00:00
|
|
|
void ext4_ioend_wait(struct inode *inode)
|
|
|
|
{
|
ext4: serialize unaligned asynchronous DIO
ext4 has a data corruption case when doing non-block-aligned
asynchronous direct IO into a sparse file, as demonstrated
by xfstest 240.
The root cause is that while ext4 preallocates space in the
hole, mappings of that space still look "new" and
dio_zero_block() will zero out the unwritten portions. When
more than one AIO thread is going, they both find this "new"
block and race to zero out their portion; this is uncoordinated
and causes data corruption.
Dave Chinner fixed this for xfs by simply serializing all
unaligned asynchronous direct IO. I've done the same here.
The difference is that we only wait on conversions, not all IO.
This is a very big hammer, and I'm not very pleased with
stuffing this into ext4_file_write(). But since ext4 is
DIO_LOCKING, we need to serialize it at this high level.
I tried to move this into ext4_ext_direct_IO, but by then
we have the i_mutex already, and we will wait on the
work queue to do conversions - which must also take the
i_mutex. So that won't work.
This was originally exposed by qemu-kvm installing to
a raw disk image with a normal sector-63 alignment. I've
tested a backport of this patch with qemu, and it does
avoid the corruption. It is also quite a lot slower
(14 min for package installs, vs. 8 min for well-aligned)
but I'll take slow correctness over fast corruption any day.
Mingming suggested that we can track outstanding
conversions, and wait on those so that non-sparse
files won't be affected, and I've implemented that here;
unaligned AIO to nonsparse files won't take a perf hit.
[tytso@mit.edu: Keep the mutex as a hashed array instead
of bloating the ext4 inode]
[tytso@mit.edu: Fix up namespace issues so that global
variables are protected with an "ext4_" prefix.]
Signed-off-by: Eric Sandeen <sandeen@redhat.com>
Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
2011-02-12 13:17:34 +00:00
|
|
|
wait_queue_head_t *wq = ext4_ioend_wq(inode);
|
2010-11-08 18:43:33 +00:00
|
|
|
|
|
|
|
wait_event(*wq, (atomic_read(&EXT4_I(inode)->i_ioend_count) == 0));
|
|
|
|
}
|
|
|
|
|
2010-11-08 18:45:33 +00:00
|
|
|
static void put_io_page(struct ext4_io_page *io_page)
|
|
|
|
{
|
|
|
|
if (atomic_dec_and_test(&io_page->p_count)) {
|
Revert "ext4: don't release page refs in ext4_end_bio()"
This reverts commit b43d17f319f2c502b17139d1cf70731b2b62c644.
Dave Jones reports that it causes lockups on his laptop, and his debug
output showed a lot of processes hung waiting for page_writeback (or
more commonly - processes hung waiting for a lock that was held during
that writeback wait).
The page_writeback hint made Ted suggest that Dave look at this commit,
and Dave verified that reverting it makes his problems go away.
Ted says:
"That commit fixes a race which is seen when you write into fallocated
(and hence uninitialized) disk blocks under *very* heavy memory
pressure. Furthermore, although theoretically it could trigger under
normal direct I/O writes, it only seems to trigger if you are issuing
a huge number of AIO writes, such that a just-written page can get
evicted from memory, and then read back into memory, before the
workqueue has a chance to update the extent tree.
This race has been around for a little over a year, and no one noticed
until two months ago; it only happens under fairly exotic conditions,
and in fact even after trying very hard to create a simple repro under
lab conditions, we could only reproduce the problem and confirm the
fix on production servers running MySQL on very fast PCIe-attached
flash devices.
Given that Dave was able to hit this problem pretty quickly, if we
confirm that this commit is at fault, the only reasonable thing to do
is to revert it IMO."
Reported-and-tested-by: Dave Jones <davej@redhat.com>
Acked-by: Theodore Ts'o <tytso@mit.edu>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2012-03-30 00:00:56 +00:00
|
|
|
end_page_writeback(io_page->p_page);
|
2010-11-08 18:45:33 +00:00
|
|
|
put_page(io_page->p_page);
|
|
|
|
kmem_cache_free(io_page_cachep, io_page);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-10-28 01:30:10 +00:00
|
|
|
void ext4_free_io_end(ext4_io_end_t *io)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
BUG_ON(!io);
|
|
|
|
if (io->page)
|
|
|
|
put_page(io->page);
|
2010-11-08 18:45:33 +00:00
|
|
|
for (i = 0; i < io->num_io_pages; i++)
|
|
|
|
put_io_page(io->pages[i]);
|
2010-10-28 01:30:10 +00:00
|
|
|
io->num_io_pages = 0;
|
2011-10-30 22:41:19 +00:00
|
|
|
if (atomic_dec_and_test(&EXT4_I(io->inode)->i_ioend_count))
|
|
|
|
wake_up_all(ext4_ioend_wq(io->inode));
|
2010-10-28 01:30:10 +00:00
|
|
|
kmem_cache_free(io_end_cachep, io);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* check a range of space and convert unwritten extents to written.
|
2011-10-30 22:26:08 +00:00
|
|
|
*
|
|
|
|
* Called with inode->i_mutex; we depend on this when we manipulate
|
|
|
|
* io->flag, since we could otherwise race with ext4_flush_completed_IO()
|
2010-10-28 01:30:10 +00:00
|
|
|
*/
|
|
|
|
int ext4_end_io_nolock(ext4_io_end_t *io)
|
|
|
|
{
|
|
|
|
struct inode *inode = io->inode;
|
|
|
|
loff_t offset = io->offset;
|
|
|
|
ssize_t size = io->size;
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
ext4_debug("ext4_end_io_nolock: io 0x%p from inode %lu,list->next 0x%p,"
|
|
|
|
"list->prev 0x%p\n",
|
|
|
|
io, inode->i_ino, io->list.next, io->list.prev);
|
|
|
|
|
|
|
|
ret = ext4_convert_unwritten_extents(inode, offset, size);
|
|
|
|
if (ret < 0) {
|
2011-10-31 14:56:32 +00:00
|
|
|
ext4_msg(inode->i_sb, KERN_EMERG,
|
|
|
|
"failed to convert unwritten extents to written "
|
|
|
|
"extents -- potential data loss! "
|
|
|
|
"(inode %lu, offset %llu, size %zd, error %d)",
|
|
|
|
inode->i_ino, offset, size, ret);
|
2010-10-28 01:30:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (io->iocb)
|
|
|
|
aio_complete(io->iocb, io->result, 0);
|
ext4: serialize unaligned asynchronous DIO
ext4 has a data corruption case when doing non-block-aligned
asynchronous direct IO into a sparse file, as demonstrated
by xfstest 240.
The root cause is that while ext4 preallocates space in the
hole, mappings of that space still look "new" and
dio_zero_block() will zero out the unwritten portions. When
more than one AIO thread is going, they both find this "new"
block and race to zero out their portion; this is uncoordinated
and causes data corruption.
Dave Chinner fixed this for xfs by simply serializing all
unaligned asynchronous direct IO. I've done the same here.
The difference is that we only wait on conversions, not all IO.
This is a very big hammer, and I'm not very pleased with
stuffing this into ext4_file_write(). But since ext4 is
DIO_LOCKING, we need to serialize it at this high level.
I tried to move this into ext4_ext_direct_IO, but by then
we have the i_mutex already, and we will wait on the
work queue to do conversions - which must also take the
i_mutex. So that won't work.
This was originally exposed by qemu-kvm installing to
a raw disk image with a normal sector-63 alignment. I've
tested a backport of this patch with qemu, and it does
avoid the corruption. It is also quite a lot slower
(14 min for package installs, vs. 8 min for well-aligned)
but I'll take slow correctness over fast corruption any day.
Mingming suggested that we can track outstanding
conversions, and wait on those so that non-sparse
files won't be affected, and I've implemented that here;
unaligned AIO to nonsparse files won't take a perf hit.
[tytso@mit.edu: Keep the mutex as a hashed array instead
of bloating the ext4 inode]
[tytso@mit.edu: Fix up namespace issues so that global
variables are protected with an "ext4_" prefix.]
Signed-off-by: Eric Sandeen <sandeen@redhat.com>
Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
2011-02-12 13:17:34 +00:00
|
|
|
|
2012-02-20 22:59:24 +00:00
|
|
|
if (io->flag & EXT4_IO_END_DIRECT)
|
|
|
|
inode_dio_done(inode);
|
2011-10-31 14:56:32 +00:00
|
|
|
/* Wake up anyone waiting on unwritten extent conversion */
|
|
|
|
if (atomic_dec_and_test(&EXT4_I(inode)->i_aiodio_unwritten))
|
|
|
|
wake_up_all(ext4_ioend_wq(io->inode));
|
2010-10-28 01:30:10 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* work on completed aio dio IO, to convert unwritten extents to extents
|
|
|
|
*/
|
|
|
|
static void ext4_end_io_work(struct work_struct *work)
|
|
|
|
{
|
|
|
|
ext4_io_end_t *io = container_of(work, ext4_io_end_t, work);
|
|
|
|
struct inode *inode = io->inode;
|
|
|
|
struct ext4_inode_info *ei = EXT4_I(inode);
|
|
|
|
unsigned long flags;
|
|
|
|
|
2011-10-30 22:26:08 +00:00
|
|
|
spin_lock_irqsave(&ei->i_completed_io_lock, flags);
|
ext4: fix race between sync and completed io work
The following command line will leave the aio-stress process unkillable
on an ext4 file system (in my case, mounted on /mnt/test):
aio-stress -t 20 -s 10 -O -S -o 2 -I 1000 /mnt/test/aiostress.3561.4 /mnt/test/aiostress.3561.4.20 /mnt/test/aiostress.3561.4.19 /mnt/test/aiostress.3561.4.18 /mnt/test/aiostress.3561.4.17 /mnt/test/aiostress.3561.4.16 /mnt/test/aiostress.3561.4.15 /mnt/test/aiostress.3561.4.14 /mnt/test/aiostress.3561.4.13 /mnt/test/aiostress.3561.4.12 /mnt/test/aiostress.3561.4.11 /mnt/test/aiostress.3561.4.10 /mnt/test/aiostress.3561.4.9 /mnt/test/aiostress.3561.4.8 /mnt/test/aiostress.3561.4.7 /mnt/test/aiostress.3561.4.6 /mnt/test/aiostress.3561.4.5 /mnt/test/aiostress.3561.4.4 /mnt/test/aiostress.3561.4.3 /mnt/test/aiostress.3561.4.2
This is using the aio-stress program from the xfstests test suite.
That particular command line tells aio-stress to do random writes to
20 files from 20 threads (one thread per file). The files are NOT
preallocated, so you will get writes to random offsets within the
file, thus creating holes and extending i_size. It also opens the
file with O_DIRECT and O_SYNC.
On to the problem. When an I/O requires unwritten extent conversion,
it is queued onto the completed_io_list for the ext4 inode. Two code
paths will pull work items from this list. The first is the
ext4_end_io_work routine, and the second is ext4_flush_completed_IO,
which is called via the fsync path (and O_SYNC handling, as well).
There are two issues I've found in these code paths. First, if the
fsync path beats the work routine to a particular I/O, the work
routine will free the io_end structure! It does not take into account
the fact that the io_end may still be in use by the fsync path. I've
fixed this issue by adding yet another IO_END flag, indicating that
the io_end is being processed by the fsync path.
The second problem is that the work routine will make an assignment to
io->flag outside of the lock. I have witnessed this result in a hang
at umount. Moving the flag setting inside the lock resolved that
problem.
The problem was introduced by commit b82e384c7b ("ext4: optimize
locking for end_io extent conversion"), which first appeared in 3.2.
As such, the fix should be backported to that release (probably along
with the unwritten extent conversion race fix).
Signed-off-by: Jeff Moyer <jmoyer@redhat.com>
Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
CC: stable@kernel.org
2012-03-05 15:29:52 +00:00
|
|
|
if (io->flag & EXT4_IO_END_IN_FSYNC)
|
|
|
|
goto requeue;
|
2011-10-30 22:26:08 +00:00
|
|
|
if (list_empty(&io->list)) {
|
|
|
|
spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
|
|
|
|
goto free;
|
|
|
|
}
|
|
|
|
|
2011-08-31 15:50:51 +00:00
|
|
|
if (!mutex_trylock(&inode->i_mutex)) {
|
ext4: fix race between sync and completed io work
The following command line will leave the aio-stress process unkillable
on an ext4 file system (in my case, mounted on /mnt/test):
aio-stress -t 20 -s 10 -O -S -o 2 -I 1000 /mnt/test/aiostress.3561.4 /mnt/test/aiostress.3561.4.20 /mnt/test/aiostress.3561.4.19 /mnt/test/aiostress.3561.4.18 /mnt/test/aiostress.3561.4.17 /mnt/test/aiostress.3561.4.16 /mnt/test/aiostress.3561.4.15 /mnt/test/aiostress.3561.4.14 /mnt/test/aiostress.3561.4.13 /mnt/test/aiostress.3561.4.12 /mnt/test/aiostress.3561.4.11 /mnt/test/aiostress.3561.4.10 /mnt/test/aiostress.3561.4.9 /mnt/test/aiostress.3561.4.8 /mnt/test/aiostress.3561.4.7 /mnt/test/aiostress.3561.4.6 /mnt/test/aiostress.3561.4.5 /mnt/test/aiostress.3561.4.4 /mnt/test/aiostress.3561.4.3 /mnt/test/aiostress.3561.4.2
This is using the aio-stress program from the xfstests test suite.
That particular command line tells aio-stress to do random writes to
20 files from 20 threads (one thread per file). The files are NOT
preallocated, so you will get writes to random offsets within the
file, thus creating holes and extending i_size. It also opens the
file with O_DIRECT and O_SYNC.
On to the problem. When an I/O requires unwritten extent conversion,
it is queued onto the completed_io_list for the ext4 inode. Two code
paths will pull work items from this list. The first is the
ext4_end_io_work routine, and the second is ext4_flush_completed_IO,
which is called via the fsync path (and O_SYNC handling, as well).
There are two issues I've found in these code paths. First, if the
fsync path beats the work routine to a particular I/O, the work
routine will free the io_end structure! It does not take into account
the fact that the io_end may still be in use by the fsync path. I've
fixed this issue by adding yet another IO_END flag, indicating that
the io_end is being processed by the fsync path.
The second problem is that the work routine will make an assignment to
io->flag outside of the lock. I have witnessed this result in a hang
at umount. Moving the flag setting inside the lock resolved that
problem.
The problem was introduced by commit b82e384c7b ("ext4: optimize
locking for end_io extent conversion"), which first appeared in 3.2.
As such, the fix should be backported to that release (probably along
with the unwritten extent conversion race fix).
Signed-off-by: Jeff Moyer <jmoyer@redhat.com>
Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
CC: stable@kernel.org
2012-03-05 15:29:52 +00:00
|
|
|
bool was_queued;
|
|
|
|
requeue:
|
|
|
|
was_queued = !!(io->flag & EXT4_IO_END_QUEUED);
|
|
|
|
io->flag |= EXT4_IO_END_QUEUED;
|
2011-10-31 14:56:32 +00:00
|
|
|
spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
|
2011-08-31 15:50:51 +00:00
|
|
|
/*
|
|
|
|
* Requeue the work instead of waiting so that the work
|
|
|
|
* items queued after this can be processed.
|
|
|
|
*/
|
|
|
|
queue_work(EXT4_SB(inode->i_sb)->dio_unwritten_wq, &io->work);
|
|
|
|
/*
|
|
|
|
* To prevent the ext4-dio-unwritten thread from keeping
|
|
|
|
* requeueing end_io requests and occupying cpu for too long,
|
|
|
|
* yield the cpu if it sees an end_io request that has already
|
|
|
|
* been requeued.
|
|
|
|
*/
|
ext4: fix race between sync and completed io work
The following command line will leave the aio-stress process unkillable
on an ext4 file system (in my case, mounted on /mnt/test):
aio-stress -t 20 -s 10 -O -S -o 2 -I 1000 /mnt/test/aiostress.3561.4 /mnt/test/aiostress.3561.4.20 /mnt/test/aiostress.3561.4.19 /mnt/test/aiostress.3561.4.18 /mnt/test/aiostress.3561.4.17 /mnt/test/aiostress.3561.4.16 /mnt/test/aiostress.3561.4.15 /mnt/test/aiostress.3561.4.14 /mnt/test/aiostress.3561.4.13 /mnt/test/aiostress.3561.4.12 /mnt/test/aiostress.3561.4.11 /mnt/test/aiostress.3561.4.10 /mnt/test/aiostress.3561.4.9 /mnt/test/aiostress.3561.4.8 /mnt/test/aiostress.3561.4.7 /mnt/test/aiostress.3561.4.6 /mnt/test/aiostress.3561.4.5 /mnt/test/aiostress.3561.4.4 /mnt/test/aiostress.3561.4.3 /mnt/test/aiostress.3561.4.2
This is using the aio-stress program from the xfstests test suite.
That particular command line tells aio-stress to do random writes to
20 files from 20 threads (one thread per file). The files are NOT
preallocated, so you will get writes to random offsets within the
file, thus creating holes and extending i_size. It also opens the
file with O_DIRECT and O_SYNC.
On to the problem. When an I/O requires unwritten extent conversion,
it is queued onto the completed_io_list for the ext4 inode. Two code
paths will pull work items from this list. The first is the
ext4_end_io_work routine, and the second is ext4_flush_completed_IO,
which is called via the fsync path (and O_SYNC handling, as well).
There are two issues I've found in these code paths. First, if the
fsync path beats the work routine to a particular I/O, the work
routine will free the io_end structure! It does not take into account
the fact that the io_end may still be in use by the fsync path. I've
fixed this issue by adding yet another IO_END flag, indicating that
the io_end is being processed by the fsync path.
The second problem is that the work routine will make an assignment to
io->flag outside of the lock. I have witnessed this result in a hang
at umount. Moving the flag setting inside the lock resolved that
problem.
The problem was introduced by commit b82e384c7b ("ext4: optimize
locking for end_io extent conversion"), which first appeared in 3.2.
As such, the fix should be backported to that release (probably along
with the unwritten extent conversion race fix).
Signed-off-by: Jeff Moyer <jmoyer@redhat.com>
Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
CC: stable@kernel.org
2012-03-05 15:29:52 +00:00
|
|
|
if (was_queued)
|
2011-08-31 15:50:51 +00:00
|
|
|
yield();
|
|
|
|
return;
|
|
|
|
}
|
2011-10-31 14:56:32 +00:00
|
|
|
list_del_init(&io->list);
|
2010-10-28 01:30:10 +00:00
|
|
|
spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
|
2011-10-31 14:56:32 +00:00
|
|
|
(void) ext4_end_io_nolock(io);
|
2010-10-28 01:30:10 +00:00
|
|
|
mutex_unlock(&inode->i_mutex);
|
2011-10-30 22:26:08 +00:00
|
|
|
free:
|
2010-10-28 01:30:10 +00:00
|
|
|
ext4_free_io_end(io);
|
|
|
|
}
|
|
|
|
|
|
|
|
ext4_io_end_t *ext4_init_io_end(struct inode *inode, gfp_t flags)
|
|
|
|
{
|
2010-12-20 02:41:55 +00:00
|
|
|
ext4_io_end_t *io = kmem_cache_zalloc(io_end_cachep, flags);
|
2010-10-28 01:30:10 +00:00
|
|
|
if (io) {
|
2010-11-08 18:43:33 +00:00
|
|
|
atomic_inc(&EXT4_I(inode)->i_ioend_count);
|
|
|
|
io->inode = inode;
|
2010-10-28 01:30:10 +00:00
|
|
|
INIT_WORK(&io->work, ext4_end_io_work);
|
|
|
|
INIT_LIST_HEAD(&io->list);
|
|
|
|
}
|
|
|
|
return io;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Print an buffer I/O error compatible with the fs/buffer.c. This
|
|
|
|
* provides compatibility with dmesg scrapers that look for a specific
|
|
|
|
* buffer I/O error message. We really need a unified error reporting
|
|
|
|
* structure to userspace ala Digital Unix's uerf system, but it's
|
|
|
|
* probably not going to happen in my lifetime, due to LKML politics...
|
|
|
|
*/
|
|
|
|
static void buffer_io_error(struct buffer_head *bh)
|
|
|
|
{
|
|
|
|
char b[BDEVNAME_SIZE];
|
|
|
|
printk(KERN_ERR "Buffer I/O error on device %s, logical block %llu\n",
|
|
|
|
bdevname(bh->b_bdev, b),
|
|
|
|
(unsigned long long)bh->b_blocknr);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void ext4_end_bio(struct bio *bio, int error)
|
|
|
|
{
|
|
|
|
ext4_io_end_t *io_end = bio->bi_private;
|
|
|
|
struct workqueue_struct *wq;
|
|
|
|
struct inode *inode;
|
|
|
|
unsigned long flags;
|
|
|
|
int i;
|
ext4: Fix data corruption with multi-block writepages support
This fixes a corruption problem with the multi-block
writepages submittal change for ext4, from commit
bd2d0210cf22f2bd0cef72eb97cf94fc7d31d8cc ("ext4: use bio
layer instead of buffer layer in mpage_da_submit_io").
(Note that this corruption is not present in 2.6.37 on
ext4, because the corruption was detected after the
feature was merged in 2.6.37-rc1, and so it was turned
off by adding a non-default mount option,
mblk_io_submit. With this commit, which hopefully
fixes the last of the bugs with this feature, we'll be
able to turn on this performance feature by default in
2.6.38, and remove the mblk_io_submit option.)
The ext4 code path to bundle multiple pages for
writeback in ext4_bio_write_page() had a bug: we should
be clearing buffer head dirty flags *before* we submit
the bio, not in the completion routine.
The patch below was tested on 2.6.37 under KVM with the
postgresql script which was submitted by Jon Nelson as
documented in commit 1449032be1.
Without the patch, I'd hit the corruption problem about
50-70% of the time. With the patch, I executed the
script > 100 times with no corruption seen.
I also fixed a bug to make sure ext4_end_bio() doesn't
dereference the bio after the bio_put() call.
Reported-by: Jon Nelson <jnelson@jamponi.net>
Reported-by: Matthias Bayer <jackdachef@gmail.com>
Signed-off-by: Curt Wohlgemuth <curtw@google.com>
Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
Cc: stable@kernel.org
2011-02-07 17:46:14 +00:00
|
|
|
sector_t bi_sector = bio->bi_sector;
|
2010-10-28 01:30:10 +00:00
|
|
|
|
|
|
|
BUG_ON(!io_end);
|
|
|
|
bio->bi_private = NULL;
|
|
|
|
bio->bi_end_io = NULL;
|
|
|
|
if (test_bit(BIO_UPTODATE, &bio->bi_flags))
|
|
|
|
error = 0;
|
|
|
|
bio_put(bio);
|
|
|
|
|
|
|
|
for (i = 0; i < io_end->num_io_pages; i++) {
|
|
|
|
struct page *page = io_end->pages[i]->p_page;
|
|
|
|
struct buffer_head *bh, *head;
|
2011-04-30 17:26:26 +00:00
|
|
|
loff_t offset;
|
|
|
|
loff_t io_end_offset;
|
2010-10-28 01:30:10 +00:00
|
|
|
|
2011-04-30 17:26:26 +00:00
|
|
|
if (error) {
|
2010-10-28 01:30:10 +00:00
|
|
|
SetPageError(page);
|
2011-04-30 17:26:26 +00:00
|
|
|
set_bit(AS_EIO, &page->mapping->flags);
|
|
|
|
head = page_buffers(page);
|
|
|
|
BUG_ON(!head);
|
|
|
|
|
|
|
|
io_end_offset = io_end->offset + io_end->size;
|
2010-10-28 01:30:10 +00:00
|
|
|
|
|
|
|
offset = (sector_t) page->index << PAGE_CACHE_SHIFT;
|
|
|
|
bh = head;
|
|
|
|
do {
|
|
|
|
if ((offset >= io_end->offset) &&
|
2011-04-30 17:26:26 +00:00
|
|
|
(offset+bh->b_size <= io_end_offset))
|
|
|
|
buffer_io_error(bh);
|
|
|
|
|
2010-10-28 01:30:10 +00:00
|
|
|
offset += bh->b_size;
|
|
|
|
bh = bh->b_this_page;
|
|
|
|
} while (bh != head);
|
|
|
|
}
|
|
|
|
|
Revert "ext4: don't release page refs in ext4_end_bio()"
This reverts commit b43d17f319f2c502b17139d1cf70731b2b62c644.
Dave Jones reports that it causes lockups on his laptop, and his debug
output showed a lot of processes hung waiting for page_writeback (or
more commonly - processes hung waiting for a lock that was held during
that writeback wait).
The page_writeback hint made Ted suggest that Dave look at this commit,
and Dave verified that reverting it makes his problems go away.
Ted says:
"That commit fixes a race which is seen when you write into fallocated
(and hence uninitialized) disk blocks under *very* heavy memory
pressure. Furthermore, although theoretically it could trigger under
normal direct I/O writes, it only seems to trigger if you are issuing
a huge number of AIO writes, such that a just-written page can get
evicted from memory, and then read back into memory, before the
workqueue has a chance to update the extent tree.
This race has been around for a little over a year, and no one noticed
until two months ago; it only happens under fairly exotic conditions,
and in fact even after trying very hard to create a simple repro under
lab conditions, we could only reproduce the problem and confirm the
fix on production servers running MySQL on very fast PCIe-attached
flash devices.
Given that Dave was able to hit this problem pretty quickly, if we
confirm that this commit is at fault, the only reasonable thing to do
is to revert it IMO."
Reported-and-tested-by: Dave Jones <davej@redhat.com>
Acked-by: Theodore Ts'o <tytso@mit.edu>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2012-03-30 00:00:56 +00:00
|
|
|
put_io_page(io_end->pages[i]);
|
2010-10-28 01:30:10 +00:00
|
|
|
}
|
Revert "ext4: don't release page refs in ext4_end_bio()"
This reverts commit b43d17f319f2c502b17139d1cf70731b2b62c644.
Dave Jones reports that it causes lockups on his laptop, and his debug
output showed a lot of processes hung waiting for page_writeback (or
more commonly - processes hung waiting for a lock that was held during
that writeback wait).
The page_writeback hint made Ted suggest that Dave look at this commit,
and Dave verified that reverting it makes his problems go away.
Ted says:
"That commit fixes a race which is seen when you write into fallocated
(and hence uninitialized) disk blocks under *very* heavy memory
pressure. Furthermore, although theoretically it could trigger under
normal direct I/O writes, it only seems to trigger if you are issuing
a huge number of AIO writes, such that a just-written page can get
evicted from memory, and then read back into memory, before the
workqueue has a chance to update the extent tree.
This race has been around for a little over a year, and no one noticed
until two months ago; it only happens under fairly exotic conditions,
and in fact even after trying very hard to create a simple repro under
lab conditions, we could only reproduce the problem and confirm the
fix on production servers running MySQL on very fast PCIe-attached
flash devices.
Given that Dave was able to hit this problem pretty quickly, if we
confirm that this commit is at fault, the only reasonable thing to do
is to revert it IMO."
Reported-and-tested-by: Dave Jones <davej@redhat.com>
Acked-by: Theodore Ts'o <tytso@mit.edu>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2012-03-30 00:00:56 +00:00
|
|
|
io_end->num_io_pages = 0;
|
2010-11-08 18:43:33 +00:00
|
|
|
inode = io_end->inode;
|
|
|
|
|
|
|
|
if (error) {
|
|
|
|
io_end->flag |= EXT4_IO_END_ERROR;
|
|
|
|
ext4_warning(inode->i_sb, "I/O error writing to inode %lu "
|
|
|
|
"(offset %llu size %ld starting block %llu)",
|
|
|
|
inode->i_ino,
|
|
|
|
(unsigned long long) io_end->offset,
|
|
|
|
(long) io_end->size,
|
|
|
|
(unsigned long long)
|
ext4: Fix data corruption with multi-block writepages support
This fixes a corruption problem with the multi-block
writepages submittal change for ext4, from commit
bd2d0210cf22f2bd0cef72eb97cf94fc7d31d8cc ("ext4: use bio
layer instead of buffer layer in mpage_da_submit_io").
(Note that this corruption is not present in 2.6.37 on
ext4, because the corruption was detected after the
feature was merged in 2.6.37-rc1, and so it was turned
off by adding a non-default mount option,
mblk_io_submit. With this commit, which hopefully
fixes the last of the bugs with this feature, we'll be
able to turn on this performance feature by default in
2.6.38, and remove the mblk_io_submit option.)
The ext4 code path to bundle multiple pages for
writeback in ext4_bio_write_page() had a bug: we should
be clearing buffer head dirty flags *before* we submit
the bio, not in the completion routine.
The patch below was tested on 2.6.37 under KVM with the
postgresql script which was submitted by Jon Nelson as
documented in commit 1449032be1.
Without the patch, I'd hit the corruption problem about
50-70% of the time. With the patch, I executed the
script > 100 times with no corruption seen.
I also fixed a bug to make sure ext4_end_bio() doesn't
dereference the bio after the bio_put() call.
Reported-by: Jon Nelson <jnelson@jamponi.net>
Reported-by: Matthias Bayer <jackdachef@gmail.com>
Signed-off-by: Curt Wohlgemuth <curtw@google.com>
Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
Cc: stable@kernel.org
2011-02-07 17:46:14 +00:00
|
|
|
bi_sector >> (inode->i_blkbits - 9));
|
2010-11-08 18:43:33 +00:00
|
|
|
}
|
2010-10-28 01:30:10 +00:00
|
|
|
|
2011-02-28 18:12:38 +00:00
|
|
|
if (!(io_end->flag & EXT4_IO_END_UNWRITTEN)) {
|
|
|
|
ext4_free_io_end(io_end);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2010-10-28 01:30:10 +00:00
|
|
|
/* Add the io_end to per-inode completed io list*/
|
|
|
|
spin_lock_irqsave(&EXT4_I(inode)->i_completed_io_lock, flags);
|
|
|
|
list_add_tail(&io_end->list, &EXT4_I(inode)->i_completed_io_list);
|
|
|
|
spin_unlock_irqrestore(&EXT4_I(inode)->i_completed_io_lock, flags);
|
|
|
|
|
|
|
|
wq = EXT4_SB(inode->i_sb)->dio_unwritten_wq;
|
|
|
|
/* queue the work to convert unwritten extents to written */
|
|
|
|
queue_work(wq, &io_end->work);
|
|
|
|
}
|
|
|
|
|
|
|
|
void ext4_io_submit(struct ext4_io_submit *io)
|
|
|
|
{
|
|
|
|
struct bio *bio = io->io_bio;
|
|
|
|
|
|
|
|
if (bio) {
|
|
|
|
bio_get(io->io_bio);
|
|
|
|
submit_bio(io->io_op, io->io_bio);
|
|
|
|
BUG_ON(bio_flagged(io->io_bio, BIO_EOPNOTSUPP));
|
|
|
|
bio_put(io->io_bio);
|
|
|
|
}
|
2011-02-22 02:01:42 +00:00
|
|
|
io->io_bio = NULL;
|
2010-10-28 01:30:10 +00:00
|
|
|
io->io_op = 0;
|
2011-02-22 02:01:42 +00:00
|
|
|
io->io_end = NULL;
|
2010-10-28 01:30:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int io_submit_init(struct ext4_io_submit *io,
|
|
|
|
struct inode *inode,
|
|
|
|
struct writeback_control *wbc,
|
|
|
|
struct buffer_head *bh)
|
|
|
|
{
|
|
|
|
ext4_io_end_t *io_end;
|
|
|
|
struct page *page = bh->b_page;
|
|
|
|
int nvecs = bio_get_nr_vecs(bh->b_bdev);
|
|
|
|
struct bio *bio;
|
|
|
|
|
|
|
|
io_end = ext4_init_io_end(inode, GFP_NOFS);
|
|
|
|
if (!io_end)
|
|
|
|
return -ENOMEM;
|
2011-06-30 01:44:45 +00:00
|
|
|
bio = bio_alloc(GFP_NOIO, min(nvecs, BIO_MAX_PAGES));
|
2010-10-28 01:30:10 +00:00
|
|
|
bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
|
|
|
|
bio->bi_bdev = bh->b_bdev;
|
|
|
|
bio->bi_private = io->io_end = io_end;
|
|
|
|
bio->bi_end_io = ext4_end_bio;
|
|
|
|
|
|
|
|
io_end->offset = (page->index << PAGE_CACHE_SHIFT) + bh_offset(bh);
|
|
|
|
|
|
|
|
io->io_bio = bio;
|
2011-03-09 10:56:30 +00:00
|
|
|
io->io_op = (wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : WRITE);
|
2010-10-28 01:30:10 +00:00
|
|
|
io->io_next_block = bh->b_blocknr;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int io_submit_add_bh(struct ext4_io_submit *io,
|
|
|
|
struct ext4_io_page *io_page,
|
|
|
|
struct inode *inode,
|
|
|
|
struct writeback_control *wbc,
|
|
|
|
struct buffer_head *bh)
|
|
|
|
{
|
|
|
|
ext4_io_end_t *io_end;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (buffer_new(bh)) {
|
|
|
|
clear_buffer_new(bh);
|
|
|
|
unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!buffer_mapped(bh) || buffer_delay(bh)) {
|
|
|
|
if (!buffer_mapped(bh))
|
|
|
|
clear_buffer_dirty(bh);
|
|
|
|
if (io->io_bio)
|
|
|
|
ext4_io_submit(io);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (io->io_bio && bh->b_blocknr != io->io_next_block) {
|
|
|
|
submit_and_retry:
|
|
|
|
ext4_io_submit(io);
|
|
|
|
}
|
|
|
|
if (io->io_bio == NULL) {
|
|
|
|
ret = io_submit_init(io, inode, wbc, bh);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
io_end = io->io_end;
|
|
|
|
if ((io_end->num_io_pages >= MAX_IO_PAGES) &&
|
|
|
|
(io_end->pages[io_end->num_io_pages-1] != io_page))
|
|
|
|
goto submit_and_retry;
|
2011-10-31 21:30:44 +00:00
|
|
|
if (buffer_uninit(bh))
|
|
|
|
ext4_set_io_unwritten_flag(inode, io_end);
|
2010-10-28 01:30:10 +00:00
|
|
|
io->io_end->size += bh->b_size;
|
|
|
|
io->io_next_block++;
|
|
|
|
ret = bio_add_page(io->io_bio, bh->b_page, bh->b_size, bh_offset(bh));
|
|
|
|
if (ret != bh->b_size)
|
|
|
|
goto submit_and_retry;
|
|
|
|
if ((io_end->num_io_pages == 0) ||
|
|
|
|
(io_end->pages[io_end->num_io_pages-1] != io_page)) {
|
|
|
|
io_end->pages[io_end->num_io_pages++] = io_page;
|
2010-11-08 18:45:33 +00:00
|
|
|
atomic_inc(&io_page->p_count);
|
2010-10-28 01:30:10 +00:00
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int ext4_bio_write_page(struct ext4_io_submit *io,
|
|
|
|
struct page *page,
|
|
|
|
int len,
|
|
|
|
struct writeback_control *wbc)
|
|
|
|
{
|
|
|
|
struct inode *inode = page->mapping->host;
|
|
|
|
unsigned block_start, block_end, blocksize;
|
|
|
|
struct ext4_io_page *io_page;
|
|
|
|
struct buffer_head *bh, *head;
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
blocksize = 1 << inode->i_blkbits;
|
|
|
|
|
ext4: Fix data corruption with multi-block writepages support
This fixes a corruption problem with the multi-block
writepages submittal change for ext4, from commit
bd2d0210cf22f2bd0cef72eb97cf94fc7d31d8cc ("ext4: use bio
layer instead of buffer layer in mpage_da_submit_io").
(Note that this corruption is not present in 2.6.37 on
ext4, because the corruption was detected after the
feature was merged in 2.6.37-rc1, and so it was turned
off by adding a non-default mount option,
mblk_io_submit. With this commit, which hopefully
fixes the last of the bugs with this feature, we'll be
able to turn on this performance feature by default in
2.6.38, and remove the mblk_io_submit option.)
The ext4 code path to bundle multiple pages for
writeback in ext4_bio_write_page() had a bug: we should
be clearing buffer head dirty flags *before* we submit
the bio, not in the completion routine.
The patch below was tested on 2.6.37 under KVM with the
postgresql script which was submitted by Jon Nelson as
documented in commit 1449032be1.
Without the patch, I'd hit the corruption problem about
50-70% of the time. With the patch, I executed the
script > 100 times with no corruption seen.
I also fixed a bug to make sure ext4_end_bio() doesn't
dereference the bio after the bio_put() call.
Reported-by: Jon Nelson <jnelson@jamponi.net>
Reported-by: Matthias Bayer <jackdachef@gmail.com>
Signed-off-by: Curt Wohlgemuth <curtw@google.com>
Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
Cc: stable@kernel.org
2011-02-07 17:46:14 +00:00
|
|
|
BUG_ON(!PageLocked(page));
|
2010-10-28 01:30:10 +00:00
|
|
|
BUG_ON(PageWriteback(page));
|
|
|
|
|
|
|
|
io_page = kmem_cache_alloc(io_page_cachep, GFP_NOFS);
|
|
|
|
if (!io_page) {
|
|
|
|
set_page_dirty(page);
|
|
|
|
unlock_page(page);
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
io_page->p_page = page;
|
2010-11-08 18:45:33 +00:00
|
|
|
atomic_set(&io_page->p_count, 1);
|
2010-10-28 01:30:10 +00:00
|
|
|
get_page(page);
|
2011-02-27 21:43:24 +00:00
|
|
|
set_page_writeback(page);
|
|
|
|
ClearPageError(page);
|
2010-10-28 01:30:10 +00:00
|
|
|
|
|
|
|
for (bh = head = page_buffers(page), block_start = 0;
|
|
|
|
bh != head || !block_start;
|
|
|
|
block_start = block_end, bh = bh->b_this_page) {
|
ext4: Fix data corruption with multi-block writepages support
This fixes a corruption problem with the multi-block
writepages submittal change for ext4, from commit
bd2d0210cf22f2bd0cef72eb97cf94fc7d31d8cc ("ext4: use bio
layer instead of buffer layer in mpage_da_submit_io").
(Note that this corruption is not present in 2.6.37 on
ext4, because the corruption was detected after the
feature was merged in 2.6.37-rc1, and so it was turned
off by adding a non-default mount option,
mblk_io_submit. With this commit, which hopefully
fixes the last of the bugs with this feature, we'll be
able to turn on this performance feature by default in
2.6.38, and remove the mblk_io_submit option.)
The ext4 code path to bundle multiple pages for
writeback in ext4_bio_write_page() had a bug: we should
be clearing buffer head dirty flags *before* we submit
the bio, not in the completion routine.
The patch below was tested on 2.6.37 under KVM with the
postgresql script which was submitted by Jon Nelson as
documented in commit 1449032be1.
Without the patch, I'd hit the corruption problem about
50-70% of the time. With the patch, I executed the
script > 100 times with no corruption seen.
I also fixed a bug to make sure ext4_end_bio() doesn't
dereference the bio after the bio_put() call.
Reported-by: Jon Nelson <jnelson@jamponi.net>
Reported-by: Matthias Bayer <jackdachef@gmail.com>
Signed-off-by: Curt Wohlgemuth <curtw@google.com>
Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
Cc: stable@kernel.org
2011-02-07 17:46:14 +00:00
|
|
|
|
2010-10-28 01:30:10 +00:00
|
|
|
block_end = block_start + blocksize;
|
|
|
|
if (block_start >= len) {
|
2011-12-14 03:29:12 +00:00
|
|
|
/*
|
|
|
|
* Comments copied from block_write_full_page_endio:
|
|
|
|
*
|
|
|
|
* The page straddles i_size. It must be zeroed out on
|
|
|
|
* each and every writepage invocation because it may
|
|
|
|
* be mmapped. "A file is mapped in multiples of the
|
|
|
|
* page size. For a file that is not a multiple of
|
|
|
|
* the page size, the remaining memory is zeroed when
|
|
|
|
* mapped, and writes to that region are not written
|
|
|
|
* out to the file."
|
|
|
|
*/
|
|
|
|
zero_user_segment(page, block_start, block_end);
|
2010-10-28 01:30:10 +00:00
|
|
|
clear_buffer_dirty(bh);
|
|
|
|
set_buffer_uptodate(bh);
|
|
|
|
continue;
|
|
|
|
}
|
ext4: Fix data corruption with multi-block writepages support
This fixes a corruption problem with the multi-block
writepages submittal change for ext4, from commit
bd2d0210cf22f2bd0cef72eb97cf94fc7d31d8cc ("ext4: use bio
layer instead of buffer layer in mpage_da_submit_io").
(Note that this corruption is not present in 2.6.37 on
ext4, because the corruption was detected after the
feature was merged in 2.6.37-rc1, and so it was turned
off by adding a non-default mount option,
mblk_io_submit. With this commit, which hopefully
fixes the last of the bugs with this feature, we'll be
able to turn on this performance feature by default in
2.6.38, and remove the mblk_io_submit option.)
The ext4 code path to bundle multiple pages for
writeback in ext4_bio_write_page() had a bug: we should
be clearing buffer head dirty flags *before* we submit
the bio, not in the completion routine.
The patch below was tested on 2.6.37 under KVM with the
postgresql script which was submitted by Jon Nelson as
documented in commit 1449032be1.
Without the patch, I'd hit the corruption problem about
50-70% of the time. With the patch, I executed the
script > 100 times with no corruption seen.
I also fixed a bug to make sure ext4_end_bio() doesn't
dereference the bio after the bio_put() call.
Reported-by: Jon Nelson <jnelson@jamponi.net>
Reported-by: Matthias Bayer <jackdachef@gmail.com>
Signed-off-by: Curt Wohlgemuth <curtw@google.com>
Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
Cc: stable@kernel.org
2011-02-07 17:46:14 +00:00
|
|
|
clear_buffer_dirty(bh);
|
2010-10-28 01:30:10 +00:00
|
|
|
ret = io_submit_add_bh(io, io_page, inode, wbc, bh);
|
|
|
|
if (ret) {
|
|
|
|
/*
|
|
|
|
* We only get here on ENOMEM. Not much else
|
|
|
|
* we can do but mark the page as dirty, and
|
|
|
|
* better luck next time.
|
|
|
|
*/
|
|
|
|
set_page_dirty(page);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
unlock_page(page);
|
|
|
|
/*
|
|
|
|
* If the page was truncated before we could do the writeback,
|
|
|
|
* or we had a memory allocation error while trying to write
|
|
|
|
* the first buffer head, we won't have submitted any pages for
|
|
|
|
* I/O. In that case we need to make sure we've cleared the
|
|
|
|
* PageWriteback bit from the page to prevent the system from
|
|
|
|
* wedging later on.
|
|
|
|
*/
|
2010-11-08 18:45:33 +00:00
|
|
|
put_io_page(io_page);
|
2010-10-28 01:30:10 +00:00
|
|
|
return ret;
|
|
|
|
}
|