NFS: Remove nfs_readpage_sync()

It makes no sense to maintain 2 parallel systems for reading in pages.

Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
This commit is contained in:
Trond Myklebust 2006-12-13 15:23:44 -05:00
parent c228fd3aee
commit 8e0969f045
5 changed files with 2 additions and 197 deletions

View File

@ -253,29 +253,6 @@ static int nfs3_proc_readlink(struct inode *inode, struct page *page,
return status;
}
static int nfs3_proc_read(struct nfs_read_data *rdata)
{
int flags = rdata->flags;
struct inode * inode = rdata->inode;
struct nfs_fattr * fattr = rdata->res.fattr;
struct rpc_message msg = {
.rpc_proc = &nfs3_procedures[NFS3PROC_READ],
.rpc_argp = &rdata->args,
.rpc_resp = &rdata->res,
.rpc_cred = rdata->cred,
};
int status;
dprintk("NFS call read %d @ %Ld\n", rdata->args.count,
(long long) rdata->args.offset);
nfs_fattr_init(fattr);
status = rpc_call_sync(NFS_CLIENT(inode), &msg, flags);
if (status >= 0)
nfs_refresh_inode(inode, fattr);
dprintk("NFS reply read: %d\n", status);
return status;
}
/*
* Create a regular file.
* For now, we don't implement O_EXCL.
@ -855,7 +832,6 @@ const struct nfs_rpc_ops nfs_v3_clientops = {
.lookup = nfs3_proc_lookup,
.access = nfs3_proc_access,
.readlink = nfs3_proc_readlink,
.read = nfs3_proc_read,
.create = nfs3_proc_create,
.remove = nfs3_proc_remove,
.unlink_setup = nfs3_proc_unlink_setup,

View File

@ -1734,44 +1734,6 @@ static int nfs4_proc_readlink(struct inode *inode, struct page *page,
return err;
}
static int _nfs4_proc_read(struct nfs_read_data *rdata)
{
int flags = rdata->flags;
struct inode *inode = rdata->inode;
struct nfs_fattr *fattr = rdata->res.fattr;
struct nfs_server *server = NFS_SERVER(inode);
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READ],
.rpc_argp = &rdata->args,
.rpc_resp = &rdata->res,
.rpc_cred = rdata->cred,
};
unsigned long timestamp = jiffies;
int status;
dprintk("NFS call read %d @ %Ld\n", rdata->args.count,
(long long) rdata->args.offset);
nfs_fattr_init(fattr);
status = rpc_call_sync(server->client, &msg, flags);
if (!status)
renew_lease(server, timestamp);
dprintk("NFS reply read: %d\n", status);
return status;
}
static int nfs4_proc_read(struct nfs_read_data *rdata)
{
struct nfs4_exception exception = { };
int err;
do {
err = nfs4_handle_exception(NFS_SERVER(rdata->inode),
_nfs4_proc_read(rdata),
&exception);
} while (exception.retry);
return err;
}
/*
* Got race?
* We will need to arrange for the VFS layer to provide an atomic open.
@ -3643,7 +3605,6 @@ const struct nfs_rpc_ops nfs_v4_clientops = {
.lookup = nfs4_proc_lookup,
.access = nfs4_proc_access,
.readlink = nfs4_proc_readlink,
.read = nfs4_proc_read,
.create = nfs4_proc_create,
.remove = nfs4_proc_remove,
.unlink_setup = nfs4_proc_unlink_setup,

View File

@ -186,35 +186,6 @@ static int nfs_proc_readlink(struct inode *inode, struct page *page,
return status;
}
static int nfs_proc_read(struct nfs_read_data *rdata)
{
int flags = rdata->flags;
struct inode * inode = rdata->inode;
struct nfs_fattr * fattr = rdata->res.fattr;
struct rpc_message msg = {
.rpc_proc = &nfs_procedures[NFSPROC_READ],
.rpc_argp = &rdata->args,
.rpc_resp = &rdata->res,
.rpc_cred = rdata->cred,
};
int status;
dprintk("NFS call read %d @ %Ld\n", rdata->args.count,
(long long) rdata->args.offset);
nfs_fattr_init(fattr);
status = rpc_call_sync(NFS_CLIENT(inode), &msg, flags);
if (status >= 0) {
nfs_refresh_inode(inode, fattr);
/* Emulate the eof flag, which isn't normally needed in NFSv2
* as it is guaranteed to always return the file attributes
*/
if (rdata->args.offset + rdata->args.count >= fattr->size)
rdata->res.eof = 1;
}
dprintk("NFS reply read: %d\n", status);
return status;
}
static int
nfs_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
int flags, struct nameidata *nd)
@ -666,7 +637,6 @@ const struct nfs_rpc_ops nfs_v2_clientops = {
.lookup = nfs_proc_lookup,
.access = NULL, /* access */
.readlink = nfs_proc_readlink,
.read = nfs_proc_read,
.create = nfs_proc_create,
.remove = nfs_proc_remove,
.unlink_setup = nfs_proc_unlink_setup,

View File

@ -5,14 +5,6 @@
*
* Partial copy of Linus' read cache modifications to fs/nfs/file.c
* modified for async RPC by okir@monad.swb.de
*
* We do an ugly hack here in order to return proper error codes to the
* user program when a read request failed: since generic_file_read
* only checks the return value of inode->i_op->readpage() which is always 0
* for async RPC, we set the error bit of the page to 1 when an error occurs,
* and make nfs_readpage transmit requests synchronously when encountering this.
* This is only a small problem, though, since we now retry all operations
* within the RPC code when root squashing is suspected.
*/
#include <linux/time.h>
@ -122,93 +114,6 @@ static void nfs_readpage_truncate_uninitialised_page(struct nfs_read_data *data)
}
}
/*
* Read a page synchronously.
*/
static int nfs_readpage_sync(struct nfs_open_context *ctx, struct inode *inode,
struct page *page)
{
unsigned int rsize = NFS_SERVER(inode)->rsize;
unsigned int count = PAGE_CACHE_SIZE;
int result = -ENOMEM;
struct nfs_read_data *rdata;
rdata = nfs_readdata_alloc(count);
if (!rdata)
goto out_unlock;
memset(rdata, 0, sizeof(*rdata));
rdata->flags = (IS_SWAPFILE(inode)? NFS_RPC_SWAPFLAGS : 0);
rdata->cred = ctx->cred;
rdata->inode = inode;
INIT_LIST_HEAD(&rdata->pages);
rdata->args.fh = NFS_FH(inode);
rdata->args.context = ctx;
rdata->args.pages = &page;
rdata->args.pgbase = 0UL;
rdata->args.count = rsize;
rdata->res.fattr = &rdata->fattr;
dprintk("NFS: nfs_readpage_sync(%p)\n", page);
/*
* This works now because the socket layer never tries to DMA
* into this buffer directly.
*/
do {
if (count < rsize)
rdata->args.count = count;
rdata->res.count = rdata->args.count;
rdata->args.offset = page_offset(page) + rdata->args.pgbase;
dprintk("NFS: nfs_proc_read(%s, (%s/%Ld), %Lu, %u)\n",
NFS_SERVER(inode)->nfs_client->cl_hostname,
inode->i_sb->s_id,
(long long)NFS_FILEID(inode),
(unsigned long long)rdata->args.pgbase,
rdata->args.count);
lock_kernel();
result = NFS_PROTO(inode)->read(rdata);
unlock_kernel();
/*
* Even if we had a partial success we can't mark the page
* cache valid.
*/
if (result < 0) {
if (result == -EISDIR)
result = -EINVAL;
goto io_error;
}
count -= result;
rdata->args.pgbase += result;
nfs_add_stats(inode, NFSIOS_SERVERREADBYTES, result);
/* Note: result == 0 should only happen if we're caching
* a write that extends the file and punches a hole.
*/
if (rdata->res.eof != 0 || result == 0)
break;
} while (count);
spin_lock(&inode->i_lock);
NFS_I(inode)->cache_validity |= NFS_INO_INVALID_ATIME;
spin_unlock(&inode->i_lock);
if (rdata->res.eof || rdata->res.count == rdata->args.count) {
SetPageUptodate(page);
if (rdata->res.eof && count != 0)
memclear_highpage_flush(page, rdata->args.pgbase, count);
}
result = 0;
io_error:
nfs_readdata_free(rdata);
out_unlock:
unlock_page(page);
return result;
}
static int nfs_readpage_async(struct nfs_open_context *ctx, struct inode *inode,
struct page *page)
{
@ -621,15 +526,9 @@ int nfs_readpage(struct file *file, struct page *page)
} else
ctx = get_nfs_open_context((struct nfs_open_context *)
file->private_data);
if (!IS_SYNC(inode)) {
error = nfs_readpage_async(ctx, inode, page);
goto out;
}
error = nfs_readpage_sync(ctx, inode, page);
if (error < 0 && IS_SWAPFILE(inode))
printk("Aiee.. nfs swap-in of page failed!\n");
out:
error = nfs_readpage_async(ctx, inode, page);
put_nfs_open_context(ctx);
return error;

View File

@ -784,7 +784,6 @@ struct nfs_rpc_ops {
int (*access) (struct inode *, struct nfs_access_entry *);
int (*readlink)(struct inode *, struct page *, unsigned int,
unsigned int);
int (*read) (struct nfs_read_data *);
int (*create) (struct inode *, struct dentry *,
struct iattr *, int, struct nameidata *);
int (*remove) (struct inode *, struct qstr *);