forked from Minki/linux
pass iov_iter to ->direct_IO()
unmodified, for now Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
This commit is contained in:
parent
cb66a7a1f1
commit
d8d3d94b80
@ -196,8 +196,7 @@ prototypes:
|
||||
void (*invalidatepage) (struct page *, unsigned int, unsigned int);
|
||||
int (*releasepage) (struct page *, int);
|
||||
void (*freepage)(struct page *);
|
||||
int (*direct_IO)(int, struct kiocb *, const struct iovec *iov,
|
||||
loff_t offset, unsigned long nr_segs);
|
||||
int (*direct_IO)(int, struct kiocb *, struct iov_iter *iter, loff_t offset);
|
||||
int (*get_xip_mem)(struct address_space *, pgoff_t, int, void **,
|
||||
unsigned long *);
|
||||
int (*migratepage)(struct address_space *, struct page *, struct page *);
|
||||
|
@ -589,8 +589,7 @@ struct address_space_operations {
|
||||
void (*invalidatepage) (struct page *, unsigned int, unsigned int);
|
||||
int (*releasepage) (struct page *, int);
|
||||
void (*freepage)(struct page *);
|
||||
ssize_t (*direct_IO)(int, struct kiocb *, const struct iovec *iov,
|
||||
loff_t offset, unsigned long nr_segs);
|
||||
ssize_t (*direct_IO)(int, struct kiocb *, struct iov_iter *iter, loff_t offset);
|
||||
struct page* (*get_xip_page)(struct address_space *, sector_t,
|
||||
int);
|
||||
/* migrate the contents of a page to the specified target */
|
||||
|
@ -363,15 +363,14 @@ static ssize_t ll_direct_IO_26_seg(const struct lu_env *env, struct cl_io *io,
|
||||
#define MAX_DIO_SIZE ((MAX_MALLOC / sizeof(struct brw_page) * PAGE_CACHE_SIZE) & \
|
||||
~(DT_MAX_BRW_SIZE - 1))
|
||||
static ssize_t ll_direct_IO_26(int rw, struct kiocb *iocb,
|
||||
const struct iovec *iov, loff_t file_offset,
|
||||
unsigned long nr_segs)
|
||||
struct iov_iter *iter, loff_t file_offset)
|
||||
{
|
||||
struct lu_env *env;
|
||||
struct cl_io *io;
|
||||
struct file *file = iocb->ki_filp;
|
||||
struct inode *inode = file->f_mapping->host;
|
||||
struct ccc_object *obj = cl_inode2ccc(inode);
|
||||
long count = iov_length(iov, nr_segs);
|
||||
long count = iov_length(iter->iov, iter->nr_segs);
|
||||
long tot_bytes = 0, result = 0;
|
||||
struct ll_inode_info *lli = ll_i2info(inode);
|
||||
unsigned long seg = 0;
|
||||
@ -392,9 +391,9 @@ static ssize_t ll_direct_IO_26(int rw, struct kiocb *iocb,
|
||||
MAX_DIO_SIZE >> PAGE_CACHE_SHIFT);
|
||||
|
||||
/* Check that all user buffers are aligned as well */
|
||||
for (seg = 0; seg < nr_segs; seg++) {
|
||||
if (((unsigned long)iov[seg].iov_base & ~CFS_PAGE_MASK) ||
|
||||
(iov[seg].iov_len & ~CFS_PAGE_MASK))
|
||||
for (seg = 0; seg < iter->nr_segs; seg++) {
|
||||
if (((unsigned long)iter->iov[seg].iov_base & ~CFS_PAGE_MASK) ||
|
||||
(iter->iov[seg].iov_len & ~CFS_PAGE_MASK))
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@ -411,9 +410,9 @@ static ssize_t ll_direct_IO_26(int rw, struct kiocb *iocb,
|
||||
mutex_lock(&inode->i_mutex);
|
||||
|
||||
LASSERT(obj->cob_transient_pages == 0);
|
||||
for (seg = 0; seg < nr_segs; seg++) {
|
||||
long iov_left = iov[seg].iov_len;
|
||||
unsigned long user_addr = (unsigned long)iov[seg].iov_base;
|
||||
for (seg = 0; seg < iter->nr_segs; seg++) {
|
||||
long iov_left = iter->iov[seg].iov_len;
|
||||
unsigned long user_addr = (unsigned long)iter->iov[seg].iov_base;
|
||||
|
||||
if (rw == READ) {
|
||||
if (file_offset >= i_size_read(inode))
|
||||
|
@ -259,8 +259,7 @@ static int v9fs_launder_page(struct page *page)
|
||||
*
|
||||
*/
|
||||
static ssize_t
|
||||
v9fs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
|
||||
loff_t pos, unsigned long nr_segs)
|
||||
v9fs_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter, loff_t pos)
|
||||
{
|
||||
/*
|
||||
* FIXME
|
||||
@ -269,7 +268,7 @@ v9fs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
|
||||
*/
|
||||
p9_debug(P9_DEBUG_VFS, "v9fs_direct_IO: v9fs_direct_IO (%s) off/no(%lld/%lu) EINVAL\n",
|
||||
iocb->ki_filp->f_path.dentry->d_name.name,
|
||||
(long long)pos, nr_segs);
|
||||
(long long)pos, iter->nr_segs);
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -165,14 +165,15 @@ blkdev_get_block(struct inode *inode, sector_t iblock,
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
blkdev_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
|
||||
loff_t offset, unsigned long nr_segs)
|
||||
blkdev_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter,
|
||||
loff_t offset)
|
||||
{
|
||||
struct file *file = iocb->ki_filp;
|
||||
struct inode *inode = file->f_mapping->host;
|
||||
|
||||
return __blockdev_direct_IO(rw, iocb, inode, I_BDEV(inode), iov, offset,
|
||||
nr_segs, blkdev_get_block, NULL, NULL, 0);
|
||||
return __blockdev_direct_IO(rw, iocb, inode, I_BDEV(inode), iter->iov,
|
||||
offset, iter->nr_segs, blkdev_get_block,
|
||||
NULL, NULL, 0);
|
||||
}
|
||||
|
||||
int __sync_blockdev(struct block_device *bdev, int wait)
|
||||
|
@ -7433,8 +7433,7 @@ out:
|
||||
}
|
||||
|
||||
static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb,
|
||||
const struct iovec *iov, loff_t offset,
|
||||
unsigned long nr_segs)
|
||||
struct iov_iter *iter, loff_t offset)
|
||||
{
|
||||
struct file *file = iocb->ki_filp;
|
||||
struct inode *inode = file->f_mapping->host;
|
||||
@ -7444,8 +7443,8 @@ static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb,
|
||||
bool relock = false;
|
||||
ssize_t ret;
|
||||
|
||||
if (check_direct_IO(BTRFS_I(inode)->root, rw, iocb, iov,
|
||||
offset, nr_segs))
|
||||
if (check_direct_IO(BTRFS_I(inode)->root, rw, iocb, iter->iov,
|
||||
offset, iter->nr_segs))
|
||||
return 0;
|
||||
|
||||
atomic_inc(&inode->i_dio_count);
|
||||
@ -7457,7 +7456,7 @@ static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb,
|
||||
* we need to flush the dirty pages again to make absolutely sure
|
||||
* that any outstanding dirty pages are on disk.
|
||||
*/
|
||||
count = iov_length(iov, nr_segs);
|
||||
count = iov_length(iter->iov, iter->nr_segs);
|
||||
if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
|
||||
&BTRFS_I(inode)->runtime_flags))
|
||||
filemap_fdatawrite_range(inode->i_mapping, offset, count);
|
||||
@ -7484,7 +7483,8 @@ static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb,
|
||||
|
||||
ret = __blockdev_direct_IO(rw, iocb, inode,
|
||||
BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev,
|
||||
iov, offset, nr_segs, btrfs_get_blocks_direct, NULL,
|
||||
iter->iov, offset, iter->nr_segs,
|
||||
btrfs_get_blocks_direct, NULL,
|
||||
btrfs_submit_direct, flags);
|
||||
if (rw & WRITE) {
|
||||
if (ret < 0 && ret != -EIOCBQUEUED)
|
||||
|
@ -1187,8 +1187,8 @@ static int ceph_write_end(struct file *file, struct address_space *mapping,
|
||||
* never get called.
|
||||
*/
|
||||
static ssize_t ceph_direct_io(int rw, struct kiocb *iocb,
|
||||
const struct iovec *iov,
|
||||
loff_t pos, unsigned long nr_segs)
|
||||
struct iov_iter *iter,
|
||||
loff_t pos)
|
||||
{
|
||||
WARN_ON(1);
|
||||
return -EINVAL;
|
||||
|
@ -3702,8 +3702,8 @@ void cifs_oplock_break(struct work_struct *work)
|
||||
* Direct IO is not yet supported in the cached mode.
|
||||
*/
|
||||
static ssize_t
|
||||
cifs_direct_io(int rw, struct kiocb *iocb, const struct iovec *iov,
|
||||
loff_t pos, unsigned long nr_segs)
|
||||
cifs_direct_io(int rw, struct kiocb *iocb, struct iov_iter *iter,
|
||||
loff_t pos)
|
||||
{
|
||||
/*
|
||||
* FIXME
|
||||
|
@ -964,7 +964,7 @@ static void exofs_invalidatepage(struct page *page, unsigned int offset,
|
||||
|
||||
/* TODO: Should be easy enough to do proprly */
|
||||
static ssize_t exofs_direct_IO(int rw, struct kiocb *iocb,
|
||||
const struct iovec *iov, loff_t offset, unsigned long nr_segs)
|
||||
struct iov_iter *iter, loff_t offset)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
@ -850,18 +850,19 @@ static sector_t ext2_bmap(struct address_space *mapping, sector_t block)
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
ext2_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
|
||||
loff_t offset, unsigned long nr_segs)
|
||||
ext2_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter,
|
||||
loff_t offset)
|
||||
{
|
||||
struct file *file = iocb->ki_filp;
|
||||
struct address_space *mapping = file->f_mapping;
|
||||
struct inode *inode = mapping->host;
|
||||
ssize_t ret;
|
||||
|
||||
ret = blockdev_direct_IO(rw, iocb, inode, iov, offset, nr_segs,
|
||||
ext2_get_block);
|
||||
ret = blockdev_direct_IO(rw, iocb, inode, iter->iov, offset,
|
||||
iter->nr_segs, ext2_get_block);
|
||||
if (ret < 0 && (rw & WRITE))
|
||||
ext2_write_failed(mapping, offset + iov_length(iov, nr_segs));
|
||||
ext2_write_failed(mapping, offset +
|
||||
iov_length(iter->iov, iter->nr_segs));
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -1820,8 +1820,7 @@ static int ext3_releasepage(struct page *page, gfp_t wait)
|
||||
* VFS code falls back into buffered path in that case so we are safe.
|
||||
*/
|
||||
static ssize_t ext3_direct_IO(int rw, struct kiocb *iocb,
|
||||
const struct iovec *iov, loff_t offset,
|
||||
unsigned long nr_segs)
|
||||
struct iov_iter *iter, loff_t offset)
|
||||
{
|
||||
struct file *file = iocb->ki_filp;
|
||||
struct inode *inode = file->f_mapping->host;
|
||||
@ -1829,10 +1828,10 @@ static ssize_t ext3_direct_IO(int rw, struct kiocb *iocb,
|
||||
handle_t *handle;
|
||||
ssize_t ret;
|
||||
int orphan = 0;
|
||||
size_t count = iov_length(iov, nr_segs);
|
||||
size_t count = iov_length(iter->iov, iter->nr_segs);
|
||||
int retries = 0;
|
||||
|
||||
trace_ext3_direct_IO_enter(inode, offset, iov_length(iov, nr_segs), rw);
|
||||
trace_ext3_direct_IO_enter(inode, offset, count, rw);
|
||||
|
||||
if (rw == WRITE) {
|
||||
loff_t final_size = offset + count;
|
||||
@ -1856,15 +1855,15 @@ static ssize_t ext3_direct_IO(int rw, struct kiocb *iocb,
|
||||
}
|
||||
|
||||
retry:
|
||||
ret = blockdev_direct_IO(rw, iocb, inode, iov, offset, nr_segs,
|
||||
ext3_get_block);
|
||||
ret = blockdev_direct_IO(rw, iocb, inode, iter->iov, offset,
|
||||
iter->nr_segs, ext3_get_block);
|
||||
/*
|
||||
* In case of error extending write may have instantiated a few
|
||||
* blocks outside i_size. Trim these off again.
|
||||
*/
|
||||
if (unlikely((rw & WRITE) && ret < 0)) {
|
||||
loff_t isize = i_size_read(inode);
|
||||
loff_t end = offset + iov_length(iov, nr_segs);
|
||||
loff_t end = offset + count;
|
||||
|
||||
if (end > isize)
|
||||
ext3_truncate_failed_direct_write(inode);
|
||||
@ -1909,8 +1908,7 @@ retry:
|
||||
ret = err;
|
||||
}
|
||||
out:
|
||||
trace_ext3_direct_IO_exit(inode, offset,
|
||||
iov_length(iov, nr_segs), rw, ret);
|
||||
trace_ext3_direct_IO_exit(inode, offset, count, rw, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -3222,8 +3222,7 @@ retake_lock:
|
||||
}
|
||||
|
||||
static ssize_t ext4_direct_IO(int rw, struct kiocb *iocb,
|
||||
const struct iovec *iov, loff_t offset,
|
||||
unsigned long nr_segs)
|
||||
struct iov_iter *iter, loff_t offset)
|
||||
{
|
||||
struct file *file = iocb->ki_filp;
|
||||
struct inode *inode = file->f_mapping->host;
|
||||
@ -3239,13 +3238,13 @@ static ssize_t ext4_direct_IO(int rw, struct kiocb *iocb,
|
||||
if (ext4_has_inline_data(inode))
|
||||
return 0;
|
||||
|
||||
trace_ext4_direct_IO_enter(inode, offset, iov_length(iov, nr_segs), rw);
|
||||
trace_ext4_direct_IO_enter(inode, offset, iov_length(iter->iov, iter->nr_segs), rw);
|
||||
if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
|
||||
ret = ext4_ext_direct_IO(rw, iocb, iov, offset, nr_segs);
|
||||
ret = ext4_ext_direct_IO(rw, iocb, iter->iov, offset, iter->nr_segs);
|
||||
else
|
||||
ret = ext4_ind_direct_IO(rw, iocb, iov, offset, nr_segs);
|
||||
ret = ext4_ind_direct_IO(rw, iocb, iter->iov, offset, iter->nr_segs);
|
||||
trace_ext4_direct_IO_exit(inode, offset,
|
||||
iov_length(iov, nr_segs), rw, ret);
|
||||
iov_length(iter->iov, iter->nr_segs), rw, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -1010,7 +1010,7 @@ static int check_direct_IO(struct inode *inode, int rw,
|
||||
}
|
||||
|
||||
static ssize_t f2fs_direct_IO(int rw, struct kiocb *iocb,
|
||||
const struct iovec *iov, loff_t offset, unsigned long nr_segs)
|
||||
struct iov_iter *iter, loff_t offset)
|
||||
{
|
||||
struct file *file = iocb->ki_filp;
|
||||
struct inode *inode = file->f_mapping->host;
|
||||
@ -1019,11 +1019,11 @@ static ssize_t f2fs_direct_IO(int rw, struct kiocb *iocb,
|
||||
if (f2fs_has_inline_data(inode))
|
||||
return 0;
|
||||
|
||||
if (check_direct_IO(inode, rw, iov, offset, nr_segs))
|
||||
if (check_direct_IO(inode, rw, iter->iov, offset, iter->nr_segs))
|
||||
return 0;
|
||||
|
||||
return blockdev_direct_IO(rw, iocb, inode, iov, offset, nr_segs,
|
||||
get_data_block);
|
||||
return blockdev_direct_IO(rw, iocb, inode, iter->iov, offset,
|
||||
iter->nr_segs, get_data_block);
|
||||
}
|
||||
|
||||
static void f2fs_invalidate_data_page(struct page *page, unsigned int offset,
|
||||
|
@ -185,8 +185,8 @@ static int fat_write_end(struct file *file, struct address_space *mapping,
|
||||
}
|
||||
|
||||
static ssize_t fat_direct_IO(int rw, struct kiocb *iocb,
|
||||
const struct iovec *iov,
|
||||
loff_t offset, unsigned long nr_segs)
|
||||
struct iov_iter *iter,
|
||||
loff_t offset)
|
||||
{
|
||||
struct file *file = iocb->ki_filp;
|
||||
struct address_space *mapping = file->f_mapping;
|
||||
@ -203,7 +203,7 @@ static ssize_t fat_direct_IO(int rw, struct kiocb *iocb,
|
||||
*
|
||||
* Return 0, and fallback to normal buffered write.
|
||||
*/
|
||||
loff_t size = offset + iov_length(iov, nr_segs);
|
||||
loff_t size = offset + iov_length(iter->iov, iter->nr_segs);
|
||||
if (MSDOS_I(inode)->mmu_private < size)
|
||||
return 0;
|
||||
}
|
||||
@ -212,10 +212,11 @@ static ssize_t fat_direct_IO(int rw, struct kiocb *iocb,
|
||||
* FAT need to use the DIO_LOCKING for avoiding the race
|
||||
* condition of fat_get_block() and ->truncate().
|
||||
*/
|
||||
ret = blockdev_direct_IO(rw, iocb, inode, iov, offset, nr_segs,
|
||||
fat_get_block);
|
||||
ret = blockdev_direct_IO(rw, iocb, inode, iter->iov, offset,
|
||||
iter->nr_segs, fat_get_block);
|
||||
if (ret < 0 && (rw & WRITE))
|
||||
fat_write_failed(mapping, offset + iov_length(iov, nr_segs));
|
||||
fat_write_failed(mapping, offset +
|
||||
iov_length(iter->iov, iter->nr_segs));
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -2890,8 +2890,8 @@ static inline loff_t fuse_round_up(loff_t off)
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
fuse_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
|
||||
loff_t offset, unsigned long nr_segs)
|
||||
fuse_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter,
|
||||
loff_t offset)
|
||||
{
|
||||
ssize_t ret = 0;
|
||||
struct file *file = iocb->ki_filp;
|
||||
@ -2900,7 +2900,7 @@ fuse_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
|
||||
loff_t pos = 0;
|
||||
struct inode *inode;
|
||||
loff_t i_size;
|
||||
size_t count = iov_length(iov, nr_segs);
|
||||
size_t count = iov_length(iter->iov, iter->nr_segs);
|
||||
struct fuse_io_priv *io;
|
||||
|
||||
pos = offset;
|
||||
@ -2944,9 +2944,9 @@ fuse_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
|
||||
io->async = false;
|
||||
|
||||
if (rw == WRITE)
|
||||
ret = __fuse_direct_write(io, iov, nr_segs, &pos);
|
||||
ret = __fuse_direct_write(io, iter->iov, iter->nr_segs, &pos);
|
||||
else
|
||||
ret = __fuse_direct_read(io, iov, nr_segs, &pos, count);
|
||||
ret = __fuse_direct_read(io, iter->iov, iter->nr_segs, &pos, count);
|
||||
|
||||
if (io->async) {
|
||||
fuse_aio_complete(io, ret < 0 ? ret : 0, -1);
|
||||
|
@ -1041,8 +1041,7 @@ static int gfs2_ok_for_dio(struct gfs2_inode *ip, int rw, loff_t offset)
|
||||
|
||||
|
||||
static ssize_t gfs2_direct_IO(int rw, struct kiocb *iocb,
|
||||
const struct iovec *iov, loff_t offset,
|
||||
unsigned long nr_segs)
|
||||
struct iov_iter *iter, loff_t offset)
|
||||
{
|
||||
struct file *file = iocb->ki_filp;
|
||||
struct inode *inode = file->f_mapping->host;
|
||||
@ -1082,7 +1081,7 @@ static ssize_t gfs2_direct_IO(int rw, struct kiocb *iocb,
|
||||
*/
|
||||
if (mapping->nrpages) {
|
||||
loff_t lstart = offset & (PAGE_CACHE_SIZE - 1);
|
||||
loff_t len = iov_length(iov, nr_segs);
|
||||
loff_t len = iov_length(iter->iov, iter->nr_segs);
|
||||
loff_t end = PAGE_ALIGN(offset + len) - 1;
|
||||
|
||||
rv = 0;
|
||||
@ -1097,9 +1096,9 @@ static ssize_t gfs2_direct_IO(int rw, struct kiocb *iocb,
|
||||
truncate_inode_pages_range(mapping, lstart, end);
|
||||
}
|
||||
|
||||
rv = __blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
|
||||
offset, nr_segs, gfs2_get_block_direct,
|
||||
NULL, NULL, 0);
|
||||
rv = __blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev,
|
||||
iter->iov, offset, iter->nr_segs,
|
||||
gfs2_get_block_direct, NULL, NULL, 0);
|
||||
out:
|
||||
gfs2_glock_dq(&gh);
|
||||
gfs2_holder_uninit(&gh);
|
||||
|
@ -125,15 +125,15 @@ static int hfs_releasepage(struct page *page, gfp_t mask)
|
||||
}
|
||||
|
||||
static ssize_t hfs_direct_IO(int rw, struct kiocb *iocb,
|
||||
const struct iovec *iov, loff_t offset, unsigned long nr_segs)
|
||||
struct iov_iter *iter, loff_t offset)
|
||||
{
|
||||
struct file *file = iocb->ki_filp;
|
||||
struct address_space *mapping = file->f_mapping;
|
||||
struct inode *inode = file_inode(file)->i_mapping->host;
|
||||
ssize_t ret;
|
||||
|
||||
ret = blockdev_direct_IO(rw, iocb, inode, iov, offset, nr_segs,
|
||||
hfs_get_block);
|
||||
ret = blockdev_direct_IO(rw, iocb, inode, iter->iov, offset,
|
||||
iter->nr_segs, hfs_get_block);
|
||||
|
||||
/*
|
||||
* In case of error extending write may have instantiated a few
|
||||
@ -141,7 +141,7 @@ static ssize_t hfs_direct_IO(int rw, struct kiocb *iocb,
|
||||
*/
|
||||
if (unlikely((rw & WRITE) && ret < 0)) {
|
||||
loff_t isize = i_size_read(inode);
|
||||
loff_t end = offset + iov_length(iov, nr_segs);
|
||||
loff_t end = offset + iov_length(iter->iov, iter->nr_segs);
|
||||
|
||||
if (end > isize)
|
||||
hfs_write_failed(mapping, end);
|
||||
|
@ -123,14 +123,14 @@ static int hfsplus_releasepage(struct page *page, gfp_t mask)
|
||||
}
|
||||
|
||||
static ssize_t hfsplus_direct_IO(int rw, struct kiocb *iocb,
|
||||
const struct iovec *iov, loff_t offset, unsigned long nr_segs)
|
||||
struct iov_iter *iter, loff_t offset)
|
||||
{
|
||||
struct file *file = iocb->ki_filp;
|
||||
struct address_space *mapping = file->f_mapping;
|
||||
struct inode *inode = file_inode(file)->i_mapping->host;
|
||||
ssize_t ret;
|
||||
|
||||
ret = blockdev_direct_IO(rw, iocb, inode, iov, offset, nr_segs,
|
||||
ret = blockdev_direct_IO(rw, iocb, inode, iter->iov, offset, iter->nr_segs,
|
||||
hfsplus_get_block);
|
||||
|
||||
/*
|
||||
@ -139,7 +139,7 @@ static ssize_t hfsplus_direct_IO(int rw, struct kiocb *iocb,
|
||||
*/
|
||||
if (unlikely((rw & WRITE) && ret < 0)) {
|
||||
loff_t isize = i_size_read(inode);
|
||||
loff_t end = offset + iov_length(iov, nr_segs);
|
||||
loff_t end = offset + iov_length(iter->iov, iter->nr_segs);
|
||||
|
||||
if (end > isize)
|
||||
hfsplus_write_failed(mapping, end);
|
||||
|
@ -331,15 +331,15 @@ static sector_t jfs_bmap(struct address_space *mapping, sector_t block)
|
||||
}
|
||||
|
||||
static ssize_t jfs_direct_IO(int rw, struct kiocb *iocb,
|
||||
const struct iovec *iov, loff_t offset, unsigned long nr_segs)
|
||||
struct iov_iter *iter, loff_t offset)
|
||||
{
|
||||
struct file *file = iocb->ki_filp;
|
||||
struct address_space *mapping = file->f_mapping;
|
||||
struct inode *inode = file->f_mapping->host;
|
||||
ssize_t ret;
|
||||
|
||||
ret = blockdev_direct_IO(rw, iocb, inode, iov, offset, nr_segs,
|
||||
jfs_get_block);
|
||||
ret = blockdev_direct_IO(rw, iocb, inode, iter->iov, offset,
|
||||
iter->nr_segs, jfs_get_block);
|
||||
|
||||
/*
|
||||
* In case of error extending write may have instantiated a few
|
||||
@ -347,7 +347,7 @@ static ssize_t jfs_direct_IO(int rw, struct kiocb *iocb,
|
||||
*/
|
||||
if (unlikely((rw & WRITE) && ret < 0)) {
|
||||
loff_t isize = i_size_read(inode);
|
||||
loff_t end = offset + iov_length(iov, nr_segs);
|
||||
loff_t end = offset + iov_length(iter->iov, iter->nr_segs);
|
||||
|
||||
if (end > isize)
|
||||
jfs_write_failed(mapping, end);
|
||||
|
@ -121,20 +121,20 @@ static inline int put_dreq(struct nfs_direct_req *dreq)
|
||||
* shunt off direct read and write requests before the VFS gets them,
|
||||
* so this method is only ever called for swap.
|
||||
*/
|
||||
ssize_t nfs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, loff_t pos, unsigned long nr_segs)
|
||||
ssize_t nfs_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter, loff_t pos)
|
||||
{
|
||||
#ifndef CONFIG_NFS_SWAP
|
||||
dprintk("NFS: nfs_direct_IO (%pD) off/no(%Ld/%lu) EINVAL\n",
|
||||
iocb->ki_filp, (long long) pos, nr_segs);
|
||||
iocb->ki_filp, (long long) pos, iter->nr_segs);
|
||||
|
||||
return -EINVAL;
|
||||
#else
|
||||
VM_BUG_ON(iocb->ki_nbytes != PAGE_SIZE);
|
||||
|
||||
if (rw == READ || rw == KERNEL_READ)
|
||||
return nfs_file_direct_read(iocb, iov, nr_segs, pos,
|
||||
return nfs_file_direct_read(iocb, iter->iov, iter->nr_segs, pos,
|
||||
rw == READ ? true : false);
|
||||
return nfs_file_direct_write(iocb, iov, nr_segs, pos,
|
||||
return nfs_file_direct_write(iocb, iter->iov, iter->nr_segs, pos,
|
||||
rw == WRITE ? true : false);
|
||||
#endif /* CONFIG_NFS_SWAP */
|
||||
}
|
||||
|
@ -298,8 +298,8 @@ static int nilfs_write_end(struct file *file, struct address_space *mapping,
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
nilfs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
|
||||
loff_t offset, unsigned long nr_segs)
|
||||
nilfs_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter,
|
||||
loff_t offset)
|
||||
{
|
||||
struct file *file = iocb->ki_filp;
|
||||
struct address_space *mapping = file->f_mapping;
|
||||
@ -310,8 +310,8 @@ nilfs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
|
||||
return 0;
|
||||
|
||||
/* Needs synchronization with the cleaner */
|
||||
size = blockdev_direct_IO(rw, iocb, inode, iov, offset, nr_segs,
|
||||
nilfs_get_block);
|
||||
size = blockdev_direct_IO(rw, iocb, inode, iter->iov, offset,
|
||||
iter->nr_segs, nilfs_get_block);
|
||||
|
||||
/*
|
||||
* In case of error extending write may have instantiated a few
|
||||
@ -319,7 +319,7 @@ nilfs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
|
||||
*/
|
||||
if (unlikely((rw & WRITE) && size < 0)) {
|
||||
loff_t isize = i_size_read(inode);
|
||||
loff_t end = offset + iov_length(iov, nr_segs);
|
||||
loff_t end = offset + iov_length(iter->iov, iter->nr_segs);
|
||||
|
||||
if (end > isize)
|
||||
nilfs_write_failed(mapping, end);
|
||||
|
@ -599,9 +599,8 @@ static int ocfs2_releasepage(struct page *page, gfp_t wait)
|
||||
|
||||
static ssize_t ocfs2_direct_IO(int rw,
|
||||
struct kiocb *iocb,
|
||||
const struct iovec *iov,
|
||||
loff_t offset,
|
||||
unsigned long nr_segs)
|
||||
struct iov_iter *iter,
|
||||
loff_t offset)
|
||||
{
|
||||
struct file *file = iocb->ki_filp;
|
||||
struct inode *inode = file_inode(file)->i_mapping->host;
|
||||
@ -618,7 +617,7 @@ static ssize_t ocfs2_direct_IO(int rw,
|
||||
return 0;
|
||||
|
||||
return __blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev,
|
||||
iov, offset, nr_segs,
|
||||
iter->iov, offset, iter->nr_segs,
|
||||
ocfs2_direct_IO_get_blocks,
|
||||
ocfs2_dio_end_io, NULL, 0);
|
||||
}
|
||||
|
@ -3083,15 +3083,14 @@ static int reiserfs_releasepage(struct page *page, gfp_t unused_gfp_flags)
|
||||
/* We thank Mingming Cao for helping us understand in great detail what
|
||||
to do in this section of the code. */
|
||||
static ssize_t reiserfs_direct_IO(int rw, struct kiocb *iocb,
|
||||
const struct iovec *iov, loff_t offset,
|
||||
unsigned long nr_segs)
|
||||
struct iov_iter *iter, loff_t offset)
|
||||
{
|
||||
struct file *file = iocb->ki_filp;
|
||||
struct inode *inode = file->f_mapping->host;
|
||||
ssize_t ret;
|
||||
|
||||
ret = blockdev_direct_IO(rw, iocb, inode, iov, offset, nr_segs,
|
||||
reiserfs_get_blocks_direct_io);
|
||||
ret = blockdev_direct_IO(rw, iocb, inode, iter->iov, offset,
|
||||
iter->nr_segs, reiserfs_get_blocks_direct_io);
|
||||
|
||||
/*
|
||||
* In case of error extending write may have instantiated a few
|
||||
@ -3099,7 +3098,7 @@ static ssize_t reiserfs_direct_IO(int rw, struct kiocb *iocb,
|
||||
*/
|
||||
if (unlikely((rw & WRITE) && ret < 0)) {
|
||||
loff_t isize = i_size_read(inode);
|
||||
loff_t end = offset + iov_length(iov, nr_segs);
|
||||
loff_t end = offset + iov_length(iter->iov, iter->nr_segs);
|
||||
|
||||
if ((end > isize) && inode_newsize_ok(inode, isize) == 0) {
|
||||
truncate_setsize(inode, isize);
|
||||
|
@ -119,8 +119,8 @@ static int udf_adinicb_write_end(struct file *file,
|
||||
}
|
||||
|
||||
static ssize_t udf_adinicb_direct_IO(int rw, struct kiocb *iocb,
|
||||
const struct iovec *iov,
|
||||
loff_t offset, unsigned long nr_segs)
|
||||
struct iov_iter *iter,
|
||||
loff_t offset)
|
||||
{
|
||||
/* Fallback to buffered I/O. */
|
||||
return 0;
|
||||
|
@ -217,18 +217,18 @@ static int udf_write_begin(struct file *file, struct address_space *mapping,
|
||||
}
|
||||
|
||||
static ssize_t udf_direct_IO(int rw, struct kiocb *iocb,
|
||||
const struct iovec *iov,
|
||||
loff_t offset, unsigned long nr_segs)
|
||||
struct iov_iter *iter,
|
||||
loff_t offset)
|
||||
{
|
||||
struct file *file = iocb->ki_filp;
|
||||
struct address_space *mapping = file->f_mapping;
|
||||
struct inode *inode = mapping->host;
|
||||
ssize_t ret;
|
||||
|
||||
ret = blockdev_direct_IO(rw, iocb, inode, iov, offset, nr_segs,
|
||||
ret = blockdev_direct_IO(rw, iocb, inode, iter->iov, offset, iter->nr_segs,
|
||||
udf_get_block);
|
||||
if (unlikely(ret < 0 && (rw & WRITE)))
|
||||
udf_write_failed(mapping, offset + iov_length(iov, nr_segs));
|
||||
udf_write_failed(mapping, offset + iov_length(iter->iov, iter->nr_segs));
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -1449,9 +1449,8 @@ STATIC ssize_t
|
||||
xfs_vm_direct_IO(
|
||||
int rw,
|
||||
struct kiocb *iocb,
|
||||
const struct iovec *iov,
|
||||
loff_t offset,
|
||||
unsigned long nr_segs)
|
||||
struct iov_iter *iter,
|
||||
loff_t offset)
|
||||
{
|
||||
struct inode *inode = iocb->ki_filp->f_mapping->host;
|
||||
struct block_device *bdev = xfs_find_bdev_for_inode(inode);
|
||||
@ -1459,7 +1458,7 @@ xfs_vm_direct_IO(
|
||||
ssize_t ret;
|
||||
|
||||
if (rw & WRITE) {
|
||||
size_t size = iov_length(iov, nr_segs);
|
||||
size_t size = iov_length(iter->iov, iter->nr_segs);
|
||||
|
||||
/*
|
||||
* We cannot preallocate a size update transaction here as we
|
||||
@ -1471,16 +1470,16 @@ xfs_vm_direct_IO(
|
||||
if (offset + size > XFS_I(inode)->i_d.di_size)
|
||||
ioend->io_isdirect = 1;
|
||||
|
||||
ret = __blockdev_direct_IO(rw, iocb, inode, bdev, iov,
|
||||
offset, nr_segs,
|
||||
ret = __blockdev_direct_IO(rw, iocb, inode, bdev, iter->iov,
|
||||
offset, iter->nr_segs,
|
||||
xfs_get_blocks_direct,
|
||||
xfs_end_io_direct_write, NULL,
|
||||
DIO_ASYNC_EXTEND);
|
||||
if (ret != -EIOCBQUEUED && iocb->private)
|
||||
goto out_destroy_ioend;
|
||||
} else {
|
||||
ret = __blockdev_direct_IO(rw, iocb, inode, bdev, iov,
|
||||
offset, nr_segs,
|
||||
ret = __blockdev_direct_IO(rw, iocb, inode, bdev, iter->iov,
|
||||
offset, iter->nr_segs,
|
||||
xfs_get_blocks_direct,
|
||||
NULL, NULL, 0);
|
||||
}
|
||||
|
@ -343,8 +343,7 @@ struct address_space_operations {
|
||||
void (*invalidatepage) (struct page *, unsigned int, unsigned int);
|
||||
int (*releasepage) (struct page *, gfp_t);
|
||||
void (*freepage)(struct page *);
|
||||
ssize_t (*direct_IO)(int, struct kiocb *, const struct iovec *iov,
|
||||
loff_t offset, unsigned long nr_segs);
|
||||
ssize_t (*direct_IO)(int, struct kiocb *, struct iov_iter *iter, loff_t offset);
|
||||
int (*get_xip_mem)(struct address_space *, pgoff_t, int,
|
||||
void **, unsigned long *);
|
||||
/*
|
||||
|
@ -459,8 +459,7 @@ extern int nfs3_removexattr (struct dentry *, const char *name);
|
||||
/*
|
||||
* linux/fs/nfs/direct.c
|
||||
*/
|
||||
extern ssize_t nfs_direct_IO(int, struct kiocb *, const struct iovec *, loff_t,
|
||||
unsigned long);
|
||||
extern ssize_t nfs_direct_IO(int, struct kiocb *, struct iov_iter *, loff_t);
|
||||
extern ssize_t nfs_file_direct_read(struct kiocb *iocb,
|
||||
const struct iovec *iov, unsigned long nr_segs,
|
||||
loff_t pos, bool uio);
|
||||
|
@ -1699,10 +1699,9 @@ generic_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
|
||||
size = i_size_read(inode);
|
||||
retval = filemap_write_and_wait_range(mapping, pos,
|
||||
pos + iov_length(iov, nr_segs) - 1);
|
||||
if (!retval) {
|
||||
retval = mapping->a_ops->direct_IO(READ, iocb,
|
||||
iov, pos, nr_segs);
|
||||
}
|
||||
if (!retval)
|
||||
retval = mapping->a_ops->direct_IO(READ, iocb, &i, pos);
|
||||
|
||||
if (retval > 0) {
|
||||
*ppos = pos + retval;
|
||||
count -= retval;
|
||||
@ -2383,7 +2382,7 @@ generic_file_direct_write(struct kiocb *iocb, struct iov_iter *from,
|
||||
}
|
||||
}
|
||||
|
||||
written = mapping->a_ops->direct_IO(WRITE, iocb, from->iov, pos, from->nr_segs);
|
||||
written = mapping->a_ops->direct_IO(WRITE, iocb, from, pos);
|
||||
|
||||
/*
|
||||
* Finally, try again to invalidate clean pages which might have been
|
||||
|
@ -263,16 +263,18 @@ int __swap_writepage(struct page *page, struct writeback_control *wbc,
|
||||
.iov_base = kmap(page),
|
||||
.iov_len = PAGE_SIZE,
|
||||
};
|
||||
struct iov_iter from;
|
||||
|
||||
init_sync_kiocb(&kiocb, swap_file);
|
||||
kiocb.ki_pos = page_file_offset(page);
|
||||
kiocb.ki_nbytes = PAGE_SIZE;
|
||||
iov_iter_init(&from, &iov, 1, PAGE_SIZE, 0);
|
||||
|
||||
set_page_writeback(page);
|
||||
unlock_page(page);
|
||||
ret = mapping->a_ops->direct_IO(KERNEL_WRITE,
|
||||
&kiocb, &iov,
|
||||
kiocb.ki_pos, 1);
|
||||
&kiocb, &from,
|
||||
kiocb.ki_pos);
|
||||
kunmap(page);
|
||||
if (ret == PAGE_SIZE) {
|
||||
count_vm_event(PSWPOUT);
|
||||
|
Loading…
Reference in New Issue
Block a user