2005-06-22 17:16:21 +00:00
|
|
|
/*
|
|
|
|
* linux/fs/nfs/nfs4_fs.h
|
|
|
|
*
|
|
|
|
* Copyright (C) 2005 Trond Myklebust
|
|
|
|
*
|
|
|
|
* NFSv4-specific filesystem definitions and declarations
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef __LINUX_FS_NFS_NFS4_FS_H
|
|
|
|
#define __LINUX_FS_NFS_NFS4_FS_H
|
|
|
|
|
|
|
|
#ifdef CONFIG_NFS_V4
|
|
|
|
|
|
|
|
struct idmap;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* In a seqid-mutating op, this macro controls which error return
|
|
|
|
* values trigger incrementation of the seqid.
|
|
|
|
*
|
|
|
|
* from rfc 3010:
|
|
|
|
* The client MUST monotonically increment the sequence number for the
|
|
|
|
* CLOSE, LOCK, LOCKU, OPEN, OPEN_CONFIRM, and OPEN_DOWNGRADE
|
|
|
|
* operations. This is true even in the event that the previous
|
|
|
|
* operation that used the sequence number received an error. The only
|
|
|
|
* exception to this rule is if the previous operation received one of
|
|
|
|
* the following errors: NFSERR_STALE_CLIENTID, NFSERR_STALE_STATEID,
|
|
|
|
* NFSERR_BAD_STATEID, NFSERR_BAD_SEQID, NFSERR_BADXDR,
|
|
|
|
* NFSERR_RESOURCE, NFSERR_NOFILEHANDLE.
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
#define seqid_mutating_err(err) \
|
|
|
|
(((err) != NFSERR_STALE_CLIENTID) && \
|
|
|
|
((err) != NFSERR_STALE_STATEID) && \
|
|
|
|
((err) != NFSERR_BAD_STATEID) && \
|
|
|
|
((err) != NFSERR_BAD_SEQID) && \
|
|
|
|
((err) != NFSERR_BAD_XDR) && \
|
|
|
|
((err) != NFSERR_RESOURCE) && \
|
|
|
|
((err) != NFSERR_NOFILEHANDLE))
|
|
|
|
|
|
|
|
enum nfs4_client_state {
|
2006-01-03 08:55:22 +00:00
|
|
|
NFS4CLNT_STATE_RECOVER = 0,
|
2006-01-03 08:55:24 +00:00
|
|
|
NFS4CLNT_LEASE_EXPIRED,
|
2005-06-22 17:16:21 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The nfs4_client identifies our client state to the server.
|
|
|
|
*/
|
|
|
|
struct nfs4_client {
|
|
|
|
struct list_head cl_servers; /* Global list of servers */
|
|
|
|
struct in_addr cl_addr; /* Server identifier */
|
|
|
|
u64 cl_clientid; /* constant */
|
|
|
|
nfs4_verifier cl_confirm;
|
|
|
|
unsigned long cl_state;
|
|
|
|
|
|
|
|
u32 cl_lockowner_id;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The following rwsem ensures exclusive access to the server
|
|
|
|
* while we recover the state following a lease expiration.
|
|
|
|
*/
|
|
|
|
struct rw_semaphore cl_sem;
|
|
|
|
|
|
|
|
struct list_head cl_delegations;
|
|
|
|
struct list_head cl_state_owners;
|
|
|
|
struct list_head cl_unused;
|
|
|
|
int cl_nunused;
|
|
|
|
spinlock_t cl_lock;
|
|
|
|
atomic_t cl_count;
|
|
|
|
|
|
|
|
struct rpc_clnt * cl_rpcclient;
|
|
|
|
|
|
|
|
struct list_head cl_superblocks; /* List of nfs_server structs */
|
|
|
|
|
|
|
|
unsigned long cl_lease_time;
|
|
|
|
unsigned long cl_last_renewal;
|
|
|
|
struct work_struct cl_renewd;
|
|
|
|
struct work_struct cl_recoverd;
|
|
|
|
|
|
|
|
struct rpc_wait_queue cl_rpcwaitq;
|
|
|
|
|
|
|
|
/* used for the setclientid verifier */
|
|
|
|
struct timespec cl_boot_time;
|
|
|
|
|
|
|
|
/* idmapper */
|
|
|
|
struct idmap * cl_idmap;
|
|
|
|
|
|
|
|
/* Our own IP address, as a null-terminated string.
|
|
|
|
* This is used to generate the clientid, and the callback address.
|
|
|
|
*/
|
|
|
|
char cl_ipaddr[16];
|
|
|
|
unsigned char cl_id_uniquifier;
|
|
|
|
};
|
|
|
|
|
NFSv4: Add functions to order RPC calls
NFSv4 file state-changing functions such as OPEN, CLOSE, LOCK,... are all
labelled with "sequence identifiers" in order to prevent the server from
reordering RPC requests, as this could cause its file state to
become out of sync with the client.
Currently the NFS client code enforces this ordering locally using
semaphores to restrict access to structures until the RPC call is done.
This, of course, only works with synchronous RPC calls, since the
user process must first grab the semaphore.
By dropping semaphores, and instead teaching the RPC engine to hold
the RPC calls until they are ready to be sent, we can extend this
process to work nicely with asynchronous RPC calls too.
This patch adds a new list called "rpc_sequence" that defines the order
of the RPC calls to be sent. We add one such list for each state_owner.
When an RPC call is ready to be sent, it checks if it is top of the
rpc_sequence list. If so, it proceeds. If not, it goes back to sleep,
and loops until it hits top of the list.
Once the RPC call has completed, it can then bump the sequence id counter,
and remove itself from the rpc_sequence list, and then wake up the next
sleeper.
Note that the state_owner sequence ids and lock_owner sequence ids are
all indexed to the same rpc_sequence list, so OPEN, LOCK,... requests
are all ordered w.r.t. each other.
Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
2005-10-18 21:20:12 +00:00
|
|
|
/*
|
|
|
|
* struct rpc_sequence ensures that RPC calls are sent in the exact
|
|
|
|
* order that they appear on the list.
|
|
|
|
*/
|
|
|
|
struct rpc_sequence {
|
|
|
|
struct rpc_wait_queue wait; /* RPC call delay queue */
|
|
|
|
spinlock_t lock; /* Protects the list */
|
|
|
|
struct list_head list; /* Defines sequence of RPC calls */
|
|
|
|
};
|
|
|
|
|
|
|
|
#define NFS_SEQID_CONFIRMED 1
|
|
|
|
struct nfs_seqid_counter {
|
|
|
|
struct rpc_sequence *sequence;
|
|
|
|
int flags;
|
|
|
|
u32 counter;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct nfs_seqid {
|
|
|
|
struct nfs_seqid_counter *sequence;
|
2005-10-20 21:22:41 +00:00
|
|
|
struct list_head list;
|
NFSv4: Add functions to order RPC calls
NFSv4 file state-changing functions such as OPEN, CLOSE, LOCK,... are all
labelled with "sequence identifiers" in order to prevent the server from
reordering RPC requests, as this could cause its file state to
become out of sync with the client.
Currently the NFS client code enforces this ordering locally using
semaphores to restrict access to structures until the RPC call is done.
This, of course, only works with synchronous RPC calls, since the
user process must first grab the semaphore.
By dropping semaphores, and instead teaching the RPC engine to hold
the RPC calls until they are ready to be sent, we can extend this
process to work nicely with asynchronous RPC calls too.
This patch adds a new list called "rpc_sequence" that defines the order
of the RPC calls to be sent. We add one such list for each state_owner.
When an RPC call is ready to be sent, it checks if it is top of the
rpc_sequence list. If so, it proceeds. If not, it goes back to sleep,
and loops until it hits top of the list.
Once the RPC call has completed, it can then bump the sequence id counter,
and remove itself from the rpc_sequence list, and then wake up the next
sleeper.
Note that the state_owner sequence ids and lock_owner sequence ids are
all indexed to the same rpc_sequence list, so OPEN, LOCK,... requests
are all ordered w.r.t. each other.
Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
2005-10-18 21:20:12 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
static inline void nfs_confirm_seqid(struct nfs_seqid_counter *seqid, int status)
|
|
|
|
{
|
|
|
|
if (seqid_mutating_err(-status))
|
|
|
|
seqid->flags |= NFS_SEQID_CONFIRMED;
|
|
|
|
}
|
|
|
|
|
2005-06-22 17:16:21 +00:00
|
|
|
/*
|
|
|
|
* NFS4 state_owners and lock_owners are simply labels for ordered
|
|
|
|
* sequences of RPC calls. Their sole purpose is to provide once-only
|
|
|
|
* semantics by allowing the server to identify replayed requests.
|
|
|
|
*/
|
|
|
|
struct nfs4_state_owner {
|
2005-10-20 21:22:47 +00:00
|
|
|
spinlock_t so_lock;
|
2005-06-22 17:16:21 +00:00
|
|
|
struct list_head so_list; /* per-clientid list of state_owners */
|
|
|
|
struct nfs4_client *so_client;
|
|
|
|
u32 so_id; /* 32-bit identifier, unique */
|
|
|
|
atomic_t so_count;
|
|
|
|
|
|
|
|
struct rpc_cred *so_cred; /* Associated cred */
|
|
|
|
struct list_head so_states;
|
|
|
|
struct list_head so_delegations;
|
NFSv4: Add functions to order RPC calls
NFSv4 file state-changing functions such as OPEN, CLOSE, LOCK,... are all
labelled with "sequence identifiers" in order to prevent the server from
reordering RPC requests, as this could cause its file state to
become out of sync with the client.
Currently the NFS client code enforces this ordering locally using
semaphores to restrict access to structures until the RPC call is done.
This, of course, only works with synchronous RPC calls, since the
user process must first grab the semaphore.
By dropping semaphores, and instead teaching the RPC engine to hold
the RPC calls until they are ready to be sent, we can extend this
process to work nicely with asynchronous RPC calls too.
This patch adds a new list called "rpc_sequence" that defines the order
of the RPC calls to be sent. We add one such list for each state_owner.
When an RPC call is ready to be sent, it checks if it is top of the
rpc_sequence list. If so, it proceeds. If not, it goes back to sleep,
and loops until it hits top of the list.
Once the RPC call has completed, it can then bump the sequence id counter,
and remove itself from the rpc_sequence list, and then wake up the next
sleeper.
Note that the state_owner sequence ids and lock_owner sequence ids are
all indexed to the same rpc_sequence list, so OPEN, LOCK,... requests
are all ordered w.r.t. each other.
Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
2005-10-18 21:20:12 +00:00
|
|
|
struct nfs_seqid_counter so_seqid;
|
|
|
|
struct rpc_sequence so_sequence;
|
2005-06-22 17:16:21 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* struct nfs4_state maintains the client-side state for a given
|
|
|
|
* (state_owner,inode) tuple (OPEN) or state_owner (LOCK).
|
|
|
|
*
|
|
|
|
* OPEN:
|
|
|
|
* In order to know when to OPEN_DOWNGRADE or CLOSE the state on the server,
|
|
|
|
* we need to know how many files are open for reading or writing on a
|
|
|
|
* given inode. This information too is stored here.
|
|
|
|
*
|
|
|
|
* LOCK: one nfs4_state (LOCK) to hold the lock stateid nfs4_state(OPEN)
|
|
|
|
*/
|
|
|
|
|
|
|
|
struct nfs4_lock_state {
|
|
|
|
struct list_head ls_locks; /* Other lock stateids */
|
2005-06-22 17:16:32 +00:00
|
|
|
struct nfs4_state * ls_state; /* Pointer to open state */
|
2005-06-22 17:16:21 +00:00
|
|
|
fl_owner_t ls_owner; /* POSIX lock owner */
|
|
|
|
#define NFS_LOCK_INITIALIZED 1
|
|
|
|
int ls_flags;
|
NFSv4: Add functions to order RPC calls
NFSv4 file state-changing functions such as OPEN, CLOSE, LOCK,... are all
labelled with "sequence identifiers" in order to prevent the server from
reordering RPC requests, as this could cause its file state to
become out of sync with the client.
Currently the NFS client code enforces this ordering locally using
semaphores to restrict access to structures until the RPC call is done.
This, of course, only works with synchronous RPC calls, since the
user process must first grab the semaphore.
By dropping semaphores, and instead teaching the RPC engine to hold
the RPC calls until they are ready to be sent, we can extend this
process to work nicely with asynchronous RPC calls too.
This patch adds a new list called "rpc_sequence" that defines the order
of the RPC calls to be sent. We add one such list for each state_owner.
When an RPC call is ready to be sent, it checks if it is top of the
rpc_sequence list. If so, it proceeds. If not, it goes back to sleep,
and loops until it hits top of the list.
Once the RPC call has completed, it can then bump the sequence id counter,
and remove itself from the rpc_sequence list, and then wake up the next
sleeper.
Note that the state_owner sequence ids and lock_owner sequence ids are
all indexed to the same rpc_sequence list, so OPEN, LOCK,... requests
are all ordered w.r.t. each other.
Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
2005-10-18 21:20:12 +00:00
|
|
|
struct nfs_seqid_counter ls_seqid;
|
2005-06-22 17:16:21 +00:00
|
|
|
u32 ls_id;
|
|
|
|
nfs4_stateid ls_stateid;
|
|
|
|
atomic_t ls_count;
|
|
|
|
};
|
|
|
|
|
|
|
|
/* bits for nfs4_state->flags */
|
|
|
|
enum {
|
|
|
|
LK_STATE_IN_USE,
|
|
|
|
NFS_DELEGATED_STATE,
|
|
|
|
};
|
|
|
|
|
|
|
|
struct nfs4_state {
|
|
|
|
struct list_head open_states; /* List of states for the same state_owner */
|
|
|
|
struct list_head inode_states; /* List of states for the same inode */
|
|
|
|
struct list_head lock_states; /* List of subservient lock stateids */
|
|
|
|
|
|
|
|
struct nfs4_state_owner *owner; /* Pointer to the open owner */
|
|
|
|
struct inode *inode; /* Pointer to the inode */
|
|
|
|
|
|
|
|
unsigned long flags; /* Do we hold any locks? */
|
2005-06-22 17:16:32 +00:00
|
|
|
spinlock_t state_lock; /* Protects the lock_states list */
|
2005-06-22 17:16:21 +00:00
|
|
|
|
|
|
|
nfs4_stateid stateid;
|
|
|
|
|
2006-01-03 08:55:13 +00:00
|
|
|
unsigned int n_rdonly;
|
|
|
|
unsigned int n_wronly;
|
|
|
|
unsigned int n_rdwr;
|
2005-06-22 17:16:21 +00:00
|
|
|
int state; /* State on the server (R,W, or RW) */
|
|
|
|
atomic_t count;
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
struct nfs4_exception {
|
|
|
|
long timeout;
|
|
|
|
int retry;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct nfs4_state_recovery_ops {
|
|
|
|
int (*recover_open)(struct nfs4_state_owner *, struct nfs4_state *);
|
|
|
|
int (*recover_lock)(struct nfs4_state *, struct file_lock *);
|
|
|
|
};
|
|
|
|
|
|
|
|
extern struct dentry_operations nfs4_dentry_operations;
|
|
|
|
extern struct inode_operations nfs4_dir_inode_operations;
|
2005-06-22 17:16:22 +00:00
|
|
|
|
|
|
|
/* inode.c */
|
|
|
|
extern ssize_t nfs4_getxattr(struct dentry *, const char *, void *, size_t);
|
|
|
|
extern int nfs4_setxattr(struct dentry *, const char *, const void *, size_t, int);
|
|
|
|
extern ssize_t nfs4_listxattr(struct dentry *, char *, size_t);
|
|
|
|
|
2005-06-22 17:16:21 +00:00
|
|
|
|
|
|
|
/* nfs4proc.c */
|
|
|
|
extern int nfs4_map_errors(int err);
|
2006-01-03 08:55:26 +00:00
|
|
|
extern int nfs4_proc_setclientid(struct nfs4_client *, u32, unsigned short, struct rpc_cred *);
|
|
|
|
extern int nfs4_proc_setclientid_confirm(struct nfs4_client *, struct rpc_cred *);
|
2006-01-03 08:55:25 +00:00
|
|
|
extern int nfs4_proc_async_renew(struct nfs4_client *, struct rpc_cred *);
|
|
|
|
extern int nfs4_proc_renew(struct nfs4_client *, struct rpc_cred *);
|
2005-11-04 20:32:58 +00:00
|
|
|
extern int nfs4_do_close(struct inode *inode, struct nfs4_state *state);
|
2005-10-18 21:20:17 +00:00
|
|
|
extern struct dentry *nfs4_atomic_open(struct inode *, struct dentry *, struct nameidata *);
|
|
|
|
extern int nfs4_open_revalidate(struct inode *, struct dentry *, int, struct nameidata *);
|
2005-06-22 17:16:21 +00:00
|
|
|
|
|
|
|
extern struct nfs4_state_recovery_ops nfs4_reboot_recovery_ops;
|
|
|
|
extern struct nfs4_state_recovery_ops nfs4_network_partition_recovery_ops;
|
|
|
|
|
|
|
|
extern const u32 nfs4_fattr_bitmap[2];
|
|
|
|
extern const u32 nfs4_statfs_bitmap[2];
|
|
|
|
extern const u32 nfs4_pathconf_bitmap[2];
|
|
|
|
extern const u32 nfs4_fsinfo_bitmap[2];
|
|
|
|
|
|
|
|
/* nfs4renewd.c */
|
|
|
|
extern void nfs4_schedule_state_renewal(struct nfs4_client *);
|
|
|
|
extern void nfs4_renewd_prepare_shutdown(struct nfs_server *);
|
|
|
|
extern void nfs4_kill_renewd(struct nfs4_client *);
|
|
|
|
extern void nfs4_renew_state(void *);
|
|
|
|
|
|
|
|
/* nfs4state.c */
|
|
|
|
extern void init_nfsv4_state(struct nfs_server *);
|
|
|
|
extern void destroy_nfsv4_state(struct nfs_server *);
|
|
|
|
extern struct nfs4_client *nfs4_get_client(struct in_addr *);
|
|
|
|
extern void nfs4_put_client(struct nfs4_client *clp);
|
|
|
|
extern struct nfs4_client *nfs4_find_client(struct in_addr *);
|
2006-01-03 08:55:25 +00:00
|
|
|
struct rpc_cred *nfs4_get_renew_cred(struct nfs4_client *clp);
|
2005-06-22 17:16:21 +00:00
|
|
|
extern u32 nfs4_alloc_lockowner_id(struct nfs4_client *);
|
|
|
|
|
|
|
|
extern struct nfs4_state_owner * nfs4_get_state_owner(struct nfs_server *, struct rpc_cred *);
|
|
|
|
extern void nfs4_put_state_owner(struct nfs4_state_owner *);
|
|
|
|
extern void nfs4_drop_state_owner(struct nfs4_state_owner *);
|
|
|
|
extern struct nfs4_state * nfs4_get_open_state(struct inode *, struct nfs4_state_owner *);
|
|
|
|
extern void nfs4_put_open_state(struct nfs4_state *);
|
|
|
|
extern void nfs4_close_state(struct nfs4_state *, mode_t);
|
2005-11-04 20:32:58 +00:00
|
|
|
extern void nfs4_state_set_mode_locked(struct nfs4_state *, mode_t);
|
2005-06-22 17:16:21 +00:00
|
|
|
extern void nfs4_schedule_state_recovery(struct nfs4_client *);
|
2005-10-18 21:20:15 +00:00
|
|
|
extern void nfs4_put_lock_state(struct nfs4_lock_state *lsp);
|
2005-06-22 17:16:32 +00:00
|
|
|
extern int nfs4_set_lock_state(struct nfs4_state *state, struct file_lock *fl);
|
2005-06-22 17:16:21 +00:00
|
|
|
extern void nfs4_copy_stateid(nfs4_stateid *, struct nfs4_state *, fl_owner_t);
|
|
|
|
|
NFSv4: Add functions to order RPC calls
NFSv4 file state-changing functions such as OPEN, CLOSE, LOCK,... are all
labelled with "sequence identifiers" in order to prevent the server from
reordering RPC requests, as this could cause its file state to
become out of sync with the client.
Currently the NFS client code enforces this ordering locally using
semaphores to restrict access to structures until the RPC call is done.
This, of course, only works with synchronous RPC calls, since the
user process must first grab the semaphore.
By dropping semaphores, and instead teaching the RPC engine to hold
the RPC calls until they are ready to be sent, we can extend this
process to work nicely with asynchronous RPC calls too.
This patch adds a new list called "rpc_sequence" that defines the order
of the RPC calls to be sent. We add one such list for each state_owner.
When an RPC call is ready to be sent, it checks if it is top of the
rpc_sequence list. If so, it proceeds. If not, it goes back to sleep,
and loops until it hits top of the list.
Once the RPC call has completed, it can then bump the sequence id counter,
and remove itself from the rpc_sequence list, and then wake up the next
sleeper.
Note that the state_owner sequence ids and lock_owner sequence ids are
all indexed to the same rpc_sequence list, so OPEN, LOCK,... requests
are all ordered w.r.t. each other.
Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
2005-10-18 21:20:12 +00:00
|
|
|
extern struct nfs_seqid *nfs_alloc_seqid(struct nfs_seqid_counter *counter);
|
|
|
|
extern int nfs_wait_on_sequence(struct nfs_seqid *seqid, struct rpc_task *task);
|
|
|
|
extern void nfs_increment_open_seqid(int status, struct nfs_seqid *seqid);
|
|
|
|
extern void nfs_increment_lock_seqid(int status, struct nfs_seqid *seqid);
|
|
|
|
extern void nfs_free_seqid(struct nfs_seqid *seqid);
|
|
|
|
|
2005-06-22 17:16:21 +00:00
|
|
|
extern const nfs4_stateid zero_stateid;
|
|
|
|
|
|
|
|
/* nfs4xdr.c */
|
|
|
|
extern uint32_t *nfs4_decode_dirent(uint32_t *p, struct nfs_entry *entry, int plus);
|
|
|
|
extern struct rpc_procinfo nfs4_procedures[];
|
|
|
|
|
|
|
|
struct nfs4_mount_data;
|
|
|
|
|
|
|
|
/* callback_xdr.c */
|
|
|
|
extern struct svc_version nfs4_callback_version1;
|
|
|
|
|
|
|
|
#else
|
|
|
|
|
|
|
|
#define init_nfsv4_state(server) do { } while (0)
|
|
|
|
#define destroy_nfsv4_state(server) do { } while (0)
|
|
|
|
#define nfs4_put_state_owner(inode, owner) do { } while (0)
|
|
|
|
#define nfs4_put_open_state(state) do { } while (0)
|
|
|
|
#define nfs4_close_state(a, b) do { } while (0)
|
|
|
|
|
|
|
|
#endif /* CONFIG_NFS_V4 */
|
|
|
|
#endif /* __LINUX_FS_NFS_NFS4_FS.H */
|