2005-06-22 17:16:21 +00:00
|
|
|
/*
|
|
|
|
* linux/fs/nfs/nfs4_fs.h
|
|
|
|
*
|
|
|
|
* Copyright (C) 2005 Trond Myklebust
|
|
|
|
*
|
|
|
|
* NFSv4-specific filesystem definitions and declarations
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef __LINUX_FS_NFS_NFS4_FS_H
|
|
|
|
#define __LINUX_FS_NFS_NFS4_FS_H
|
|
|
|
|
|
|
|
#ifdef CONFIG_NFS_V4
|
|
|
|
|
|
|
|
struct idmap;
|
|
|
|
|
|
|
|
enum nfs4_client_state {
|
2008-12-23 20:21:48 +00:00
|
|
|
NFS4CLNT_MANAGER_RUNNING = 0,
|
2008-12-23 20:21:42 +00:00
|
|
|
NFS4CLNT_CHECK_LEASE,
|
2006-01-03 08:55:24 +00:00
|
|
|
NFS4CLNT_LEASE_EXPIRED,
|
2008-12-23 20:21:41 +00:00
|
|
|
NFS4CLNT_RECLAIM_REBOOT,
|
|
|
|
NFS4CLNT_RECLAIM_NOGRACE,
|
2008-12-23 20:21:47 +00:00
|
|
|
NFS4CLNT_DELEGRETURN,
|
2009-12-04 20:55:05 +00:00
|
|
|
NFS4CLNT_SESSION_RESET,
|
2010-01-20 21:06:27 +00:00
|
|
|
NFS4CLNT_RECALL_SLOT,
|
2011-04-24 18:28:18 +00:00
|
|
|
NFS4CLNT_LEASE_CONFIRM,
|
2011-05-31 23:05:47 +00:00
|
|
|
NFS4CLNT_SERVER_SCOPE_MISMATCH,
|
2005-06-22 17:16:21 +00:00
|
|
|
};
|
|
|
|
|
2010-06-16 13:52:26 +00:00
|
|
|
enum nfs4_session_state {
|
2010-06-16 13:52:27 +00:00
|
|
|
NFS4_SESSION_INITING,
|
2010-06-16 13:52:26 +00:00
|
|
|
NFS4_SESSION_DRAINING,
|
|
|
|
};
|
|
|
|
|
2011-08-24 19:07:37 +00:00
|
|
|
#define NFS4_RENEW_TIMEOUT 0x01
|
|
|
|
#define NFS4_RENEW_DELEGATION_CB 0x02
|
|
|
|
|
2010-06-16 13:52:26 +00:00
|
|
|
struct nfs4_minor_version_ops {
|
|
|
|
u32 minor_version;
|
|
|
|
|
2011-03-24 17:12:24 +00:00
|
|
|
int (*call_sync)(struct rpc_clnt *clnt,
|
|
|
|
struct nfs_server *server,
|
2010-06-16 13:52:26 +00:00
|
|
|
struct rpc_message *msg,
|
|
|
|
struct nfs4_sequence_args *args,
|
|
|
|
struct nfs4_sequence_res *res,
|
|
|
|
int cache_reply);
|
2012-03-04 23:13:56 +00:00
|
|
|
bool (*match_stateid)(const nfs4_stateid *,
|
2010-06-16 13:52:27 +00:00
|
|
|
const nfs4_stateid *);
|
2011-06-02 18:59:07 +00:00
|
|
|
int (*find_root_sec)(struct nfs_server *, struct nfs_fh *,
|
|
|
|
struct nfs_fsinfo *);
|
2010-06-16 13:52:27 +00:00
|
|
|
const struct nfs4_state_recovery_ops *reboot_recovery_ops;
|
|
|
|
const struct nfs4_state_recovery_ops *nograce_recovery_ops;
|
|
|
|
const struct nfs4_state_maintenance_ops *state_renewal_ops;
|
2010-06-16 13:52:26 +00:00
|
|
|
};
|
|
|
|
|
2012-01-18 03:04:25 +00:00
|
|
|
struct nfs_unique_id {
|
|
|
|
struct rb_node rb_node;
|
|
|
|
__u64 id;
|
|
|
|
};
|
|
|
|
|
NFSv4: Add functions to order RPC calls
NFSv4 file state-changing functions such as OPEN, CLOSE, LOCK,... are all
labelled with "sequence identifiers" in order to prevent the server from
reordering RPC requests, as this could cause its file state to
become out of sync with the client.
Currently the NFS client code enforces this ordering locally using
semaphores to restrict access to structures until the RPC call is done.
This, of course, only works with synchronous RPC calls, since the
user process must first grab the semaphore.
By dropping semaphores, and instead teaching the RPC engine to hold
the RPC calls until they are ready to be sent, we can extend this
process to work nicely with asynchronous RPC calls too.
This patch adds a new list called "rpc_sequence" that defines the order
of the RPC calls to be sent. We add one such list for each state_owner.
When an RPC call is ready to be sent, it checks if it is top of the
rpc_sequence list. If so, it proceeds. If not, it goes back to sleep,
and loops until it hits top of the list.
Once the RPC call has completed, it can then bump the sequence id counter,
and remove itself from the rpc_sequence list, and then wake up the next
sleeper.
Note that the state_owner sequence ids and lock_owner sequence ids are
all indexed to the same rpc_sequence list, so OPEN, LOCK,... requests
are all ordered w.r.t. each other.
Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
2005-10-18 21:20:12 +00:00
|
|
|
#define NFS_SEQID_CONFIRMED 1
|
|
|
|
struct nfs_seqid_counter {
|
2012-01-18 03:04:25 +00:00
|
|
|
int owner_id;
|
NFSv4: Add functions to order RPC calls
NFSv4 file state-changing functions such as OPEN, CLOSE, LOCK,... are all
labelled with "sequence identifiers" in order to prevent the server from
reordering RPC requests, as this could cause its file state to
become out of sync with the client.
Currently the NFS client code enforces this ordering locally using
semaphores to restrict access to structures until the RPC call is done.
This, of course, only works with synchronous RPC calls, since the
user process must first grab the semaphore.
By dropping semaphores, and instead teaching the RPC engine to hold
the RPC calls until they are ready to be sent, we can extend this
process to work nicely with asynchronous RPC calls too.
This patch adds a new list called "rpc_sequence" that defines the order
of the RPC calls to be sent. We add one such list for each state_owner.
When an RPC call is ready to be sent, it checks if it is top of the
rpc_sequence list. If so, it proceeds. If not, it goes back to sleep,
and loops until it hits top of the list.
Once the RPC call has completed, it can then bump the sequence id counter,
and remove itself from the rpc_sequence list, and then wake up the next
sleeper.
Note that the state_owner sequence ids and lock_owner sequence ids are
all indexed to the same rpc_sequence list, so OPEN, LOCK,... requests
are all ordered w.r.t. each other.
Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
2005-10-18 21:20:12 +00:00
|
|
|
int flags;
|
|
|
|
u32 counter;
|
2012-01-18 03:04:25 +00:00
|
|
|
spinlock_t lock; /* Protects the list */
|
|
|
|
struct list_head list; /* Defines sequence of RPC calls */
|
|
|
|
struct rpc_wait_queue wait; /* RPC call delay queue */
|
NFSv4: Add functions to order RPC calls
NFSv4 file state-changing functions such as OPEN, CLOSE, LOCK,... are all
labelled with "sequence identifiers" in order to prevent the server from
reordering RPC requests, as this could cause its file state to
become out of sync with the client.
Currently the NFS client code enforces this ordering locally using
semaphores to restrict access to structures until the RPC call is done.
This, of course, only works with synchronous RPC calls, since the
user process must first grab the semaphore.
By dropping semaphores, and instead teaching the RPC engine to hold
the RPC calls until they are ready to be sent, we can extend this
process to work nicely with asynchronous RPC calls too.
This patch adds a new list called "rpc_sequence" that defines the order
of the RPC calls to be sent. We add one such list for each state_owner.
When an RPC call is ready to be sent, it checks if it is top of the
rpc_sequence list. If so, it proceeds. If not, it goes back to sleep,
and loops until it hits top of the list.
Once the RPC call has completed, it can then bump the sequence id counter,
and remove itself from the rpc_sequence list, and then wake up the next
sleeper.
Note that the state_owner sequence ids and lock_owner sequence ids are
all indexed to the same rpc_sequence list, so OPEN, LOCK,... requests
are all ordered w.r.t. each other.
Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
2005-10-18 21:20:12 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
struct nfs_seqid {
|
|
|
|
struct nfs_seqid_counter *sequence;
|
2005-10-20 21:22:41 +00:00
|
|
|
struct list_head list;
|
2012-01-20 23:47:05 +00:00
|
|
|
struct rpc_task *task;
|
NFSv4: Add functions to order RPC calls
NFSv4 file state-changing functions such as OPEN, CLOSE, LOCK,... are all
labelled with "sequence identifiers" in order to prevent the server from
reordering RPC requests, as this could cause its file state to
become out of sync with the client.
Currently the NFS client code enforces this ordering locally using
semaphores to restrict access to structures until the RPC call is done.
This, of course, only works with synchronous RPC calls, since the
user process must first grab the semaphore.
By dropping semaphores, and instead teaching the RPC engine to hold
the RPC calls until they are ready to be sent, we can extend this
process to work nicely with asynchronous RPC calls too.
This patch adds a new list called "rpc_sequence" that defines the order
of the RPC calls to be sent. We add one such list for each state_owner.
When an RPC call is ready to be sent, it checks if it is top of the
rpc_sequence list. If so, it proceeds. If not, it goes back to sleep,
and loops until it hits top of the list.
Once the RPC call has completed, it can then bump the sequence id counter,
and remove itself from the rpc_sequence list, and then wake up the next
sleeper.
Note that the state_owner sequence ids and lock_owner sequence ids are
all indexed to the same rpc_sequence list, so OPEN, LOCK,... requests
are all ordered w.r.t. each other.
Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
2005-10-18 21:20:12 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
static inline void nfs_confirm_seqid(struct nfs_seqid_counter *seqid, int status)
|
|
|
|
{
|
|
|
|
if (seqid_mutating_err(-status))
|
|
|
|
seqid->flags |= NFS_SEQID_CONFIRMED;
|
|
|
|
}
|
|
|
|
|
2005-06-22 17:16:21 +00:00
|
|
|
/*
|
|
|
|
* NFS4 state_owners and lock_owners are simply labels for ordered
|
|
|
|
* sequences of RPC calls. Their sole purpose is to provide once-only
|
|
|
|
* semantics by allowing the server to identify replayed requests.
|
|
|
|
*/
|
|
|
|
struct nfs4_state_owner {
|
2007-07-06 14:53:21 +00:00
|
|
|
struct nfs_server *so_server;
|
NFS: Cache state owners after files are closed
Servers have a finite amount of memory to store NFSv4 open and lock
owners. Moreover, servers may have a difficult time determining when
they can reap their state owner table, thanks to gray areas in the
NFSv4 protocol specification. Thus clients should be careful to reuse
state owners when possible.
Currently Linux is not too careful. When a user has closed all her
files on one mount point, the state owner's reference count goes to
zero, and it is released. The next OPEN allocates a new one. A
workload that serially opens and closes files can run through a large
number of open owners this way.
When a state owner's reference count goes to zero, slap it onto a free
list for that nfs_server, with an expiry time. Garbage collect before
looking for a state owner. This makes state owners for active users
available for re-use.
Now that there can be unused state owners remaining at umount time,
purge the state owner free list when a server is destroyed. Also be
sure not to reclaim unused state owners during state recovery.
This change has benefits for the client as well. For some workloads,
this approach drops the number of OPEN_CONFIRM calls from the same as
the number of OPEN calls, down to just one. This reduces wire traffic
and thus open(2) latency. Before this patch, untarring a kernel
source tarball shows the OPEN_CONFIRM call counter steadily increasing
through the test. With the patch, the OPEN_CONFIRM count remains at 1
throughout the entire untar.
As long as the expiry time is kept short, I don't think garbage
collection should be terribly expensive, although it does bounce the
clp->cl_lock around a bit.
[ At some point we should rationalize the use of the nfs_server
->destroy method. ]
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
[Trond: Fixed a garbage collection race and a few efficiency issues]
Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
2011-12-06 21:13:48 +00:00
|
|
|
struct list_head so_lru;
|
|
|
|
unsigned long so_expires;
|
2010-12-24 01:32:43 +00:00
|
|
|
struct rb_node so_server_node;
|
2005-06-22 17:16:21 +00:00
|
|
|
|
|
|
|
struct rpc_cred *so_cred; /* Associated cred */
|
2007-07-02 17:58:33 +00:00
|
|
|
|
|
|
|
spinlock_t so_lock;
|
|
|
|
atomic_t so_count;
|
2008-12-23 20:21:43 +00:00
|
|
|
unsigned long so_flags;
|
2005-06-22 17:16:21 +00:00
|
|
|
struct list_head so_states;
|
NFSv4: Add functions to order RPC calls
NFSv4 file state-changing functions such as OPEN, CLOSE, LOCK,... are all
labelled with "sequence identifiers" in order to prevent the server from
reordering RPC requests, as this could cause its file state to
become out of sync with the client.
Currently the NFS client code enforces this ordering locally using
semaphores to restrict access to structures until the RPC call is done.
This, of course, only works with synchronous RPC calls, since the
user process must first grab the semaphore.
By dropping semaphores, and instead teaching the RPC engine to hold
the RPC calls until they are ready to be sent, we can extend this
process to work nicely with asynchronous RPC calls too.
This patch adds a new list called "rpc_sequence" that defines the order
of the RPC calls to be sent. We add one such list for each state_owner.
When an RPC call is ready to be sent, it checks if it is top of the
rpc_sequence list. If so, it proceeds. If not, it goes back to sleep,
and loops until it hits top of the list.
Once the RPC call has completed, it can then bump the sequence id counter,
and remove itself from the rpc_sequence list, and then wake up the next
sleeper.
Note that the state_owner sequence ids and lock_owner sequence ids are
all indexed to the same rpc_sequence list, so OPEN, LOCK,... requests
are all ordered w.r.t. each other.
Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
2005-10-18 21:20:12 +00:00
|
|
|
struct nfs_seqid_counter so_seqid;
|
2005-06-22 17:16:21 +00:00
|
|
|
};
|
|
|
|
|
2008-12-23 20:21:43 +00:00
|
|
|
enum {
|
|
|
|
NFS_OWNER_RECLAIM_REBOOT,
|
|
|
|
NFS_OWNER_RECLAIM_NOGRACE
|
|
|
|
};
|
|
|
|
|
2009-12-09 09:50:14 +00:00
|
|
|
#define NFS_LOCK_NEW 0
|
|
|
|
#define NFS_LOCK_RECLAIM 1
|
|
|
|
#define NFS_LOCK_EXPIRED 2
|
|
|
|
|
2005-06-22 17:16:21 +00:00
|
|
|
/*
|
|
|
|
* struct nfs4_state maintains the client-side state for a given
|
|
|
|
* (state_owner,inode) tuple (OPEN) or state_owner (LOCK).
|
|
|
|
*
|
|
|
|
* OPEN:
|
|
|
|
* In order to know when to OPEN_DOWNGRADE or CLOSE the state on the server,
|
|
|
|
* we need to know how many files are open for reading or writing on a
|
|
|
|
* given inode. This information too is stored here.
|
|
|
|
*
|
|
|
|
* LOCK: one nfs4_state (LOCK) to hold the lock stateid nfs4_state(OPEN)
|
|
|
|
*/
|
|
|
|
|
2010-07-01 16:49:11 +00:00
|
|
|
struct nfs4_lock_owner {
|
|
|
|
unsigned int lo_type;
|
|
|
|
#define NFS4_ANY_LOCK_TYPE (0U)
|
|
|
|
#define NFS4_FLOCK_LOCK_TYPE (1U << 0)
|
|
|
|
#define NFS4_POSIX_LOCK_TYPE (1U << 1)
|
|
|
|
union {
|
|
|
|
fl_owner_t posix_owner;
|
|
|
|
pid_t flock_owner;
|
|
|
|
} lo_u;
|
|
|
|
};
|
|
|
|
|
2005-06-22 17:16:21 +00:00
|
|
|
struct nfs4_lock_state {
|
|
|
|
struct list_head ls_locks; /* Other lock stateids */
|
2005-06-22 17:16:32 +00:00
|
|
|
struct nfs4_state * ls_state; /* Pointer to open state */
|
2005-06-22 17:16:21 +00:00
|
|
|
#define NFS_LOCK_INITIALIZED 1
|
|
|
|
int ls_flags;
|
NFSv4: Add functions to order RPC calls
NFSv4 file state-changing functions such as OPEN, CLOSE, LOCK,... are all
labelled with "sequence identifiers" in order to prevent the server from
reordering RPC requests, as this could cause its file state to
become out of sync with the client.
Currently the NFS client code enforces this ordering locally using
semaphores to restrict access to structures until the RPC call is done.
This, of course, only works with synchronous RPC calls, since the
user process must first grab the semaphore.
By dropping semaphores, and instead teaching the RPC engine to hold
the RPC calls until they are ready to be sent, we can extend this
process to work nicely with asynchronous RPC calls too.
This patch adds a new list called "rpc_sequence" that defines the order
of the RPC calls to be sent. We add one such list for each state_owner.
When an RPC call is ready to be sent, it checks if it is top of the
rpc_sequence list. If so, it proceeds. If not, it goes back to sleep,
and loops until it hits top of the list.
Once the RPC call has completed, it can then bump the sequence id counter,
and remove itself from the rpc_sequence list, and then wake up the next
sleeper.
Note that the state_owner sequence ids and lock_owner sequence ids are
all indexed to the same rpc_sequence list, so OPEN, LOCK,... requests
are all ordered w.r.t. each other.
Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
2005-10-18 21:20:12 +00:00
|
|
|
struct nfs_seqid_counter ls_seqid;
|
2005-06-22 17:16:21 +00:00
|
|
|
nfs4_stateid ls_stateid;
|
|
|
|
atomic_t ls_count;
|
2010-07-01 16:49:11 +00:00
|
|
|
struct nfs4_lock_owner ls_owner;
|
2005-06-22 17:16:21 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
/* bits for nfs4_state->flags */
|
|
|
|
enum {
|
|
|
|
LK_STATE_IN_USE,
|
2007-07-05 22:07:55 +00:00
|
|
|
NFS_DELEGATED_STATE, /* Current stateid is delegation */
|
|
|
|
NFS_O_RDONLY_STATE, /* OPEN stateid has read-only state */
|
|
|
|
NFS_O_WRONLY_STATE, /* OPEN stateid has write-only state */
|
|
|
|
NFS_O_RDWR_STATE, /* OPEN stateid has read/write state */
|
2008-12-23 20:21:41 +00:00
|
|
|
NFS_STATE_RECLAIM_REBOOT, /* OPEN stateid server rebooted */
|
|
|
|
NFS_STATE_RECLAIM_NOGRACE, /* OPEN stateid needs to recover state */
|
2010-01-26 20:42:30 +00:00
|
|
|
NFS_STATE_POSIX_LOCKS, /* Posix locks are supported */
|
2005-06-22 17:16:21 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
struct nfs4_state {
|
|
|
|
struct list_head open_states; /* List of states for the same state_owner */
|
|
|
|
struct list_head inode_states; /* List of states for the same inode */
|
|
|
|
struct list_head lock_states; /* List of subservient lock stateids */
|
|
|
|
|
|
|
|
struct nfs4_state_owner *owner; /* Pointer to the open owner */
|
|
|
|
struct inode *inode; /* Pointer to the inode */
|
|
|
|
|
|
|
|
unsigned long flags; /* Do we hold any locks? */
|
2005-06-22 17:16:32 +00:00
|
|
|
spinlock_t state_lock; /* Protects the lock_states list */
|
2005-06-22 17:16:21 +00:00
|
|
|
|
2007-07-09 14:45:42 +00:00
|
|
|
seqlock_t seqlock; /* Protects the stateid/open_stateid */
|
2007-07-05 22:07:55 +00:00
|
|
|
nfs4_stateid stateid; /* Current stateid: may be delegation */
|
|
|
|
nfs4_stateid open_stateid; /* OPEN stateid */
|
2005-06-22 17:16:21 +00:00
|
|
|
|
2007-07-09 14:45:42 +00:00
|
|
|
/* The following 3 fields are protected by owner->so_lock */
|
2007-07-05 22:07:55 +00:00
|
|
|
unsigned int n_rdonly; /* Number of read-only references */
|
|
|
|
unsigned int n_wronly; /* Number of write-only references */
|
|
|
|
unsigned int n_rdwr; /* Number of read/write references */
|
2008-12-23 20:21:56 +00:00
|
|
|
fmode_t state; /* State on the server (R,W, or RW) */
|
2005-06-22 17:16:21 +00:00
|
|
|
atomic_t count;
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
struct nfs4_exception {
|
|
|
|
long timeout;
|
|
|
|
int retry;
|
2008-12-23 20:21:46 +00:00
|
|
|
struct nfs4_state *state;
|
2005-06-22 17:16:21 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
struct nfs4_state_recovery_ops {
|
2008-12-23 20:21:43 +00:00
|
|
|
int owner_flag_bit;
|
2008-12-23 20:21:41 +00:00
|
|
|
int state_flag_bit;
|
2005-06-22 17:16:21 +00:00
|
|
|
int (*recover_open)(struct nfs4_state_owner *, struct nfs4_state *);
|
|
|
|
int (*recover_lock)(struct nfs4_state *, struct file_lock *);
|
2009-04-01 13:22:47 +00:00
|
|
|
int (*establish_clid)(struct nfs_client *, struct rpc_cred *);
|
2009-04-01 13:22:48 +00:00
|
|
|
struct rpc_cred * (*get_clid_cred)(struct nfs_client *);
|
2009-12-05 21:08:41 +00:00
|
|
|
int (*reclaim_complete)(struct nfs_client *);
|
2005-06-22 17:16:21 +00:00
|
|
|
};
|
|
|
|
|
2009-04-01 13:22:44 +00:00
|
|
|
struct nfs4_state_maintenance_ops {
|
2011-08-24 19:07:37 +00:00
|
|
|
int (*sched_state_renewal)(struct nfs_client *, struct rpc_cred *, unsigned);
|
2009-04-01 13:22:46 +00:00
|
|
|
struct rpc_cred * (*get_state_renewal_cred_locked)(struct nfs_client *);
|
2009-04-01 13:22:45 +00:00
|
|
|
int (*renew_lease)(struct nfs_client *, struct rpc_cred *);
|
2009-04-01 13:22:44 +00:00
|
|
|
};
|
|
|
|
|
2009-02-20 05:51:22 +00:00
|
|
|
extern const struct dentry_operations nfs4_dentry_operations;
|
2007-02-12 08:55:39 +00:00
|
|
|
extern const struct inode_operations nfs4_dir_inode_operations;
|
2005-06-22 17:16:22 +00:00
|
|
|
|
2005-06-22 17:16:21 +00:00
|
|
|
/* nfs4proc.c */
|
2010-04-16 20:43:06 +00:00
|
|
|
extern int nfs4_proc_setclientid(struct nfs_client *, u32, unsigned short, struct rpc_cred *, struct nfs4_setclientid_res *);
|
|
|
|
extern int nfs4_proc_setclientid_confirm(struct nfs_client *, struct nfs4_setclientid_res *arg, struct rpc_cred *);
|
2009-12-04 20:52:24 +00:00
|
|
|
extern int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred);
|
2009-04-01 13:22:47 +00:00
|
|
|
extern int nfs4_init_clientid(struct nfs_client *, struct rpc_cred *);
|
2009-12-04 20:52:24 +00:00
|
|
|
extern int nfs41_init_clientid(struct nfs_client *, struct rpc_cred *);
|
2011-06-22 22:20:23 +00:00
|
|
|
extern int nfs4_do_close(struct nfs4_state *state, gfp_t gfp_mask, int wait, bool roc);
|
2006-06-09 13:34:19 +00:00
|
|
|
extern int nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle);
|
2007-07-18 01:52:39 +00:00
|
|
|
extern int nfs4_proc_fs_locations(struct inode *dir, const struct qstr *name,
|
2006-06-09 13:34:23 +00:00
|
|
|
struct nfs4_fs_locations *fs_locations, struct page *page);
|
2010-07-01 16:49:01 +00:00
|
|
|
extern void nfs4_release_lockowner(const struct nfs4_lock_state *);
|
2010-12-09 11:35:25 +00:00
|
|
|
extern const struct xattr_handler *nfs4_xattr_handlers[];
|
2005-06-22 17:16:21 +00:00
|
|
|
|
2009-04-01 13:21:53 +00:00
|
|
|
#if defined(CONFIG_NFS_V4_1)
|
2010-06-16 13:52:26 +00:00
|
|
|
static inline struct nfs4_session *nfs4_get_session(const struct nfs_server *server)
|
|
|
|
{
|
|
|
|
return server->nfs_client->cl_session;
|
|
|
|
}
|
|
|
|
|
2012-01-18 03:57:37 +00:00
|
|
|
extern bool nfs4_set_task_privileged(struct rpc_task *task, void *dummy);
|
2010-06-16 13:52:26 +00:00
|
|
|
extern int nfs4_setup_sequence(const struct nfs_server *server,
|
2009-04-01 13:22:15 +00:00
|
|
|
struct nfs4_sequence_args *args, struct nfs4_sequence_res *res,
|
2012-01-18 03:04:25 +00:00
|
|
|
struct rpc_task *task);
|
2011-03-01 01:34:19 +00:00
|
|
|
extern int nfs41_setup_sequence(struct nfs4_session *session,
|
|
|
|
struct nfs4_sequence_args *args, struct nfs4_sequence_res *res,
|
2012-01-18 03:04:25 +00:00
|
|
|
struct rpc_task *task);
|
2009-04-01 13:21:53 +00:00
|
|
|
extern void nfs4_destroy_session(struct nfs4_session *session);
|
|
|
|
extern struct nfs4_session *nfs4_alloc_session(struct nfs_client *clp);
|
2009-12-06 00:32:11 +00:00
|
|
|
extern int nfs4_proc_create_session(struct nfs_client *);
|
2009-04-01 13:22:39 +00:00
|
|
|
extern int nfs4_proc_destroy_session(struct nfs4_session *);
|
2009-07-21 20:48:07 +00:00
|
|
|
extern int nfs4_init_session(struct nfs_server *server);
|
2009-12-06 17:23:46 +00:00
|
|
|
extern int nfs4_proc_get_lease_time(struct nfs_client *clp,
|
|
|
|
struct nfs_fsinfo *fsinfo);
|
2011-03-23 13:27:54 +00:00
|
|
|
extern int nfs4_proc_layoutcommit(struct nfs4_layoutcommit_data *data,
|
2011-03-12 07:58:10 +00:00
|
|
|
bool sync);
|
2011-03-01 01:34:12 +00:00
|
|
|
|
|
|
|
static inline bool
|
|
|
|
is_ds_only_client(struct nfs_client *clp)
|
|
|
|
{
|
|
|
|
return (clp->cl_exchange_flags & EXCHGID4_FLAG_MASK_PNFS) ==
|
|
|
|
EXCHGID4_FLAG_USE_PNFS_DS;
|
|
|
|
}
|
2011-03-01 01:34:17 +00:00
|
|
|
|
|
|
|
static inline bool
|
|
|
|
is_ds_client(struct nfs_client *clp)
|
|
|
|
{
|
|
|
|
return clp->cl_exchange_flags & EXCHGID4_FLAG_USE_PNFS_DS;
|
|
|
|
}
|
2009-04-01 13:22:15 +00:00
|
|
|
#else /* CONFIG_NFS_v4_1 */
|
2010-06-16 13:52:26 +00:00
|
|
|
static inline struct nfs4_session *nfs4_get_session(const struct nfs_server *server)
|
|
|
|
{
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int nfs4_setup_sequence(const struct nfs_server *server,
|
2009-04-01 13:22:15 +00:00
|
|
|
struct nfs4_sequence_args *args, struct nfs4_sequence_res *res,
|
2012-01-18 03:04:25 +00:00
|
|
|
struct rpc_task *task)
|
2009-04-01 13:22:15 +00:00
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
2009-07-21 20:48:07 +00:00
|
|
|
|
|
|
|
static inline int nfs4_init_session(struct nfs_server *server)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
2011-03-01 01:34:12 +00:00
|
|
|
|
|
|
|
static inline bool
|
|
|
|
is_ds_only_client(struct nfs_client *clp)
|
2011-03-01 01:34:17 +00:00
|
|
|
{
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool
|
|
|
|
is_ds_client(struct nfs_client *clp)
|
2011-03-01 01:34:12 +00:00
|
|
|
{
|
|
|
|
return false;
|
|
|
|
}
|
2009-04-01 13:21:53 +00:00
|
|
|
#endif /* CONFIG_NFS_V4_1 */
|
2005-06-22 17:16:21 +00:00
|
|
|
|
2010-06-16 13:52:26 +00:00
|
|
|
extern const struct nfs4_minor_version_ops *nfs_v4_minor_ops[];
|
2009-04-01 13:22:44 +00:00
|
|
|
|
2005-06-22 17:16:21 +00:00
|
|
|
extern const u32 nfs4_fattr_bitmap[2];
|
|
|
|
extern const u32 nfs4_statfs_bitmap[2];
|
|
|
|
extern const u32 nfs4_pathconf_bitmap[2];
|
2011-07-31 00:52:37 +00:00
|
|
|
extern const u32 nfs4_fsinfo_bitmap[3];
|
2006-06-09 13:34:25 +00:00
|
|
|
extern const u32 nfs4_fs_locations_bitmap[2];
|
2005-06-22 17:16:21 +00:00
|
|
|
|
|
|
|
/* nfs4renewd.c */
|
2006-08-23 00:06:08 +00:00
|
|
|
extern void nfs4_schedule_state_renewal(struct nfs_client *);
|
2005-06-22 17:16:21 +00:00
|
|
|
extern void nfs4_renewd_prepare_shutdown(struct nfs_server *);
|
2006-08-23 00:06:08 +00:00
|
|
|
extern void nfs4_kill_renewd(struct nfs_client *);
|
2006-11-22 14:55:48 +00:00
|
|
|
extern void nfs4_renew_state(struct work_struct *);
|
2005-06-22 17:16:21 +00:00
|
|
|
|
|
|
|
/* nfs4state.c */
|
2009-04-01 13:22:46 +00:00
|
|
|
struct rpc_cred *nfs4_get_setclientid_cred(struct nfs_client *clp);
|
2008-12-23 20:21:41 +00:00
|
|
|
struct rpc_cred *nfs4_get_renew_cred_locked(struct nfs_client *clp);
|
2009-04-01 13:22:46 +00:00
|
|
|
#if defined(CONFIG_NFS_V4_1)
|
|
|
|
struct rpc_cred *nfs4_get_machine_cred_locked(struct nfs_client *clp);
|
2009-04-01 13:22:49 +00:00
|
|
|
struct rpc_cred *nfs4_get_exchange_id_cred(struct nfs_client *clp);
|
2011-03-09 21:00:53 +00:00
|
|
|
extern void nfs4_schedule_session_recovery(struct nfs4_session *);
|
|
|
|
#else
|
|
|
|
static inline void nfs4_schedule_session_recovery(struct nfs4_session *session)
|
|
|
|
{
|
|
|
|
}
|
2009-04-01 13:22:46 +00:00
|
|
|
#endif /* CONFIG_NFS_V4_1 */
|
2005-06-22 17:16:21 +00:00
|
|
|
|
2012-01-18 03:04:24 +00:00
|
|
|
extern struct nfs4_state_owner *nfs4_get_state_owner(struct nfs_server *, struct rpc_cred *, gfp_t);
|
2005-06-22 17:16:21 +00:00
|
|
|
extern void nfs4_put_state_owner(struct nfs4_state_owner *);
|
NFS: Cache state owners after files are closed
Servers have a finite amount of memory to store NFSv4 open and lock
owners. Moreover, servers may have a difficult time determining when
they can reap their state owner table, thanks to gray areas in the
NFSv4 protocol specification. Thus clients should be careful to reuse
state owners when possible.
Currently Linux is not too careful. When a user has closed all her
files on one mount point, the state owner's reference count goes to
zero, and it is released. The next OPEN allocates a new one. A
workload that serially opens and closes files can run through a large
number of open owners this way.
When a state owner's reference count goes to zero, slap it onto a free
list for that nfs_server, with an expiry time. Garbage collect before
looking for a state owner. This makes state owners for active users
available for re-use.
Now that there can be unused state owners remaining at umount time,
purge the state owner free list when a server is destroyed. Also be
sure not to reclaim unused state owners during state recovery.
This change has benefits for the client as well. For some workloads,
this approach drops the number of OPEN_CONFIRM calls from the same as
the number of OPEN calls, down to just one. This reduces wire traffic
and thus open(2) latency. Before this patch, untarring a kernel
source tarball shows the OPEN_CONFIRM call counter steadily increasing
through the test. With the patch, the OPEN_CONFIRM count remains at 1
throughout the entire untar.
As long as the expiry time is kept short, I don't think garbage
collection should be terribly expensive, although it does bounce the
clp->cl_lock around a bit.
[ At some point we should rationalize the use of the nfs_server
->destroy method. ]
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
[Trond: Fixed a garbage collection race and a few efficiency issues]
Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
2011-12-06 21:13:48 +00:00
|
|
|
extern void nfs4_purge_state_owners(struct nfs_server *);
|
2005-06-22 17:16:21 +00:00
|
|
|
extern struct nfs4_state * nfs4_get_open_state(struct inode *, struct nfs4_state_owner *);
|
|
|
|
extern void nfs4_put_open_state(struct nfs4_state *);
|
2011-06-22 22:20:23 +00:00
|
|
|
extern void nfs4_close_state(struct nfs4_state *, fmode_t);
|
|
|
|
extern void nfs4_close_sync(struct nfs4_state *, fmode_t);
|
2008-12-23 20:21:56 +00:00
|
|
|
extern void nfs4_state_set_mode_locked(struct nfs4_state *, fmode_t);
|
2012-03-06 00:56:44 +00:00
|
|
|
extern void nfs_inode_find_state_and_recover(struct inode *inode,
|
|
|
|
const nfs4_stateid *stateid);
|
2011-03-09 21:00:53 +00:00
|
|
|
extern void nfs4_schedule_lease_recovery(struct nfs_client *);
|
2008-12-23 20:21:50 +00:00
|
|
|
extern void nfs4_schedule_state_manager(struct nfs_client *);
|
2011-08-24 19:07:37 +00:00
|
|
|
extern void nfs4_schedule_path_down_recovery(struct nfs_client *clp);
|
2011-03-09 21:00:53 +00:00
|
|
|
extern void nfs4_schedule_stateid_recovery(const struct nfs_server *, struct nfs4_state *);
|
2009-12-05 18:46:14 +00:00
|
|
|
extern void nfs41_handle_sequence_flag_errors(struct nfs_client *clp, u32 flags);
|
2010-01-20 21:06:27 +00:00
|
|
|
extern void nfs41_handle_recall_slot(struct nfs_client *clp);
|
2011-05-31 23:05:47 +00:00
|
|
|
extern void nfs41_handle_server_scope(struct nfs_client *,
|
|
|
|
struct server_scope **);
|
2005-10-18 21:20:15 +00:00
|
|
|
extern void nfs4_put_lock_state(struct nfs4_lock_state *lsp);
|
2005-06-22 17:16:32 +00:00
|
|
|
extern int nfs4_set_lock_state(struct nfs4_state *state, struct file_lock *fl);
|
2012-03-04 23:13:56 +00:00
|
|
|
extern void nfs4_select_rw_stateid(nfs4_stateid *, struct nfs4_state *, fl_owner_t, pid_t);
|
2005-06-22 17:16:21 +00:00
|
|
|
|
2010-05-13 16:51:01 +00:00
|
|
|
extern struct nfs_seqid *nfs_alloc_seqid(struct nfs_seqid_counter *counter, gfp_t gfp_mask);
|
NFSv4: Add functions to order RPC calls
NFSv4 file state-changing functions such as OPEN, CLOSE, LOCK,... are all
labelled with "sequence identifiers" in order to prevent the server from
reordering RPC requests, as this could cause its file state to
become out of sync with the client.
Currently the NFS client code enforces this ordering locally using
semaphores to restrict access to structures until the RPC call is done.
This, of course, only works with synchronous RPC calls, since the
user process must first grab the semaphore.
By dropping semaphores, and instead teaching the RPC engine to hold
the RPC calls until they are ready to be sent, we can extend this
process to work nicely with asynchronous RPC calls too.
This patch adds a new list called "rpc_sequence" that defines the order
of the RPC calls to be sent. We add one such list for each state_owner.
When an RPC call is ready to be sent, it checks if it is top of the
rpc_sequence list. If so, it proceeds. If not, it goes back to sleep,
and loops until it hits top of the list.
Once the RPC call has completed, it can then bump the sequence id counter,
and remove itself from the rpc_sequence list, and then wake up the next
sleeper.
Note that the state_owner sequence ids and lock_owner sequence ids are
all indexed to the same rpc_sequence list, so OPEN, LOCK,... requests
are all ordered w.r.t. each other.
Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
2005-10-18 21:20:12 +00:00
|
|
|
extern int nfs_wait_on_sequence(struct nfs_seqid *seqid, struct rpc_task *task);
|
|
|
|
extern void nfs_increment_open_seqid(int status, struct nfs_seqid *seqid);
|
|
|
|
extern void nfs_increment_lock_seqid(int status, struct nfs_seqid *seqid);
|
2009-12-15 19:47:36 +00:00
|
|
|
extern void nfs_release_seqid(struct nfs_seqid *seqid);
|
NFSv4: Add functions to order RPC calls
NFSv4 file state-changing functions such as OPEN, CLOSE, LOCK,... are all
labelled with "sequence identifiers" in order to prevent the server from
reordering RPC requests, as this could cause its file state to
become out of sync with the client.
Currently the NFS client code enforces this ordering locally using
semaphores to restrict access to structures until the RPC call is done.
This, of course, only works with synchronous RPC calls, since the
user process must first grab the semaphore.
By dropping semaphores, and instead teaching the RPC engine to hold
the RPC calls until they are ready to be sent, we can extend this
process to work nicely with asynchronous RPC calls too.
This patch adds a new list called "rpc_sequence" that defines the order
of the RPC calls to be sent. We add one such list for each state_owner.
When an RPC call is ready to be sent, it checks if it is top of the
rpc_sequence list. If so, it proceeds. If not, it goes back to sleep,
and loops until it hits top of the list.
Once the RPC call has completed, it can then bump the sequence id counter,
and remove itself from the rpc_sequence list, and then wake up the next
sleeper.
Note that the state_owner sequence ids and lock_owner sequence ids are
all indexed to the same rpc_sequence list, so OPEN, LOCK,... requests
are all ordered w.r.t. each other.
Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
2005-10-18 21:20:12 +00:00
|
|
|
extern void nfs_free_seqid(struct nfs_seqid *seqid);
|
|
|
|
|
2005-06-22 17:16:21 +00:00
|
|
|
extern const nfs4_stateid zero_stateid;
|
|
|
|
|
|
|
|
/* nfs4xdr.c */
|
|
|
|
extern struct rpc_procinfo nfs4_procedures[];
|
|
|
|
|
|
|
|
struct nfs4_mount_data;
|
|
|
|
|
|
|
|
/* callback_xdr.c */
|
|
|
|
extern struct svc_version nfs4_callback_version1;
|
2009-12-05 18:19:01 +00:00
|
|
|
extern struct svc_version nfs4_callback_version4;
|
2005-06-22 17:16:21 +00:00
|
|
|
|
|
|
|
#else
|
|
|
|
|
2011-06-22 22:20:23 +00:00
|
|
|
#define nfs4_close_state(a, b) do { } while (0)
|
|
|
|
#define nfs4_close_sync(a, b) do { } while (0)
|
2005-06-22 17:16:21 +00:00
|
|
|
|
|
|
|
#endif /* CONFIG_NFS_V4 */
|
|
|
|
#endif /* __LINUX_FS_NFS_NFS4_FS.H */
|