mirror of
https://github.com/torvalds/linux.git
synced 2024-11-10 14:11:52 +00:00
NFSD 6.8 Release Notes
The bulk of the patches for this release are clean-ups and minor bug fixes. There is one significant revert to mention: support for RDMA Read operations in the server's RPC-over-RDMA transport implementation has been fixed so it waits for Read completion in a way that avoids tying up an nfsd thread. This prevents a possible DoS vector if an RPC-over-RDMA client should become unresponsive during RDMA Read operations. As always I am grateful to NFSD contributors, reviewers, and testers. -----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEKLLlsBKG3yQ88j7+M2qzM29mf5cFAmWdW34ACgkQM2qzM29m f5fKmw/+PcjoNDWR55kTmOo8j0h4HF8rhunvP2C50svnnsX63y1WKkLaxyAFN/Hl UFucJDQBjJvwi+PEbGOXcjkizuG5mhRBFvFIYDJYGWsE1s7B/v3E/Servvt1wSek UjoTjknYrqH6R3YfA8zBaWRJUXwvVQW3Bzo4mShrQK7He9/7nBHdUe0aWbAA9oW3 QgzKH/FzqCS03MvuxQv74KgBcl3diIrDaj041A3CtSnXzSKqwc3LaUAd5B4BL+oq GnxpV1rtZla50M4Ntddi+vSjUvHWZySQ1GEJj7rKLTwpGXkxM2NuMkGx676WR4Iv sYDX0fsica2elKbqJem8pk68qi6XEdZVAdoOHdgNJRClmYHby8xkrL/TYKiQZf42 IN9FogoVSZ+vSdI158Weim9+0Jqf+ffIh57ZtOyQQQAGZkdhB6GhcbdHJhQ9eOgB LAiAL7bsoWvDmBh5m9KnBmQYGpZoDUa6AT0bIvGD2O4/MdpHBkyT8Xwt+210nPOK mpBtxe5O8cUcg7A5/TwnVRg5jKp4CF8VWh2R8sGDhcYV8UfRthB38h4rHNhv4vxt l6ZUgmtTxrs1rCeh6aoiWTKXeQmI8meWlcet7cxw/axAsaTXkYPi5mslxF9f4O8u nQ8q7LuZQy2CKZO/t98STwx7s9OJcDOwcy51rnKK85TlCwnxFWg= =mIKg -----END PGP SIGNATURE----- Merge tag 'nfsd-6.8' of git://git.kernel.org/pub/scm/linux/kernel/git/cel/linux Pull nfsd updates from Chuck Lever: "The bulk of the patches for this release are clean-ups and minor bug fixes. There is one significant revert to mention: support for RDMA Read operations in the server's RPC-over-RDMA transport implementation has been fixed so it waits for Read completion in a way that avoids tying up an nfsd thread. This prevents a possible DoS vector if an RPC-over-RDMA client should become unresponsive during RDMA Read operations. As always I am grateful to NFSD contributors, reviewers, and testers" * tag 'nfsd-6.8' of git://git.kernel.org/pub/scm/linux/kernel/git/cel/linux: (56 commits) nfsd: rename nfsd_last_thread() to nfsd_destroy_serv() SUNRPC: discard sv_refcnt, and svc_get/svc_put svc: don't hold reference for poolstats, only mutex. SUNRPC: remove printk when back channel request not found svcrdma: Implement multi-stage Read completion again svcrdma: Copy construction of svc_rqst::rq_arg to rdma_read_complete() svcrdma: Add back svcxprt_rdma::sc_read_complete_q svcrdma: Add back svc_rdma_recv_ctxt::rc_pages svcrdma: Clean up comment in svc_rdma_accept() svcrdma: Remove queue-shortening warnings svcrdma: Remove pointer addresses shown in dprintk() svcrdma: Optimize svc_rdma_cc_init() svcrdma: De-duplicate completion ID initialization helpers svcrdma: Move the svc_rdma_cc_init() call svcrdma: Remove struct svc_rdma_read_info svcrdma: Update the synopsis of svc_rdma_read_special() svcrdma: Update the synopsis of svc_rdma_read_call_chunk() svcrdma: Update synopsis of svc_rdma_read_multiple_chunks() svcrdma: Update synopsis of svc_rdma_copy_inline_range() svcrdma: Update the synopsis of svc_rdma_read_data_item() ...
This commit is contained in:
commit
49f4810356
@ -345,10 +345,10 @@ static int lockd_get(void)
|
||||
|
||||
serv->sv_maxconn = nlm_max_connections;
|
||||
error = svc_set_num_threads(serv, NULL, 1);
|
||||
/* The thread now holds the only reference */
|
||||
svc_put(serv);
|
||||
if (error < 0)
|
||||
if (error < 0) {
|
||||
svc_destroy(&serv);
|
||||
return error;
|
||||
}
|
||||
|
||||
nlmsvc_serv = serv;
|
||||
register_inetaddr_notifier(&lockd_inetaddr_notifier);
|
||||
@ -372,11 +372,9 @@ static void lockd_put(void)
|
||||
unregister_inet6addr_notifier(&lockd_inet6addr_notifier);
|
||||
#endif
|
||||
|
||||
svc_get(nlmsvc_serv);
|
||||
svc_set_num_threads(nlmsvc_serv, NULL, 0);
|
||||
svc_put(nlmsvc_serv);
|
||||
timer_delete_sync(&nlmsvc_retry);
|
||||
nlmsvc_serv = NULL;
|
||||
svc_destroy(&nlmsvc_serv);
|
||||
dprintk("lockd_down: service destroyed\n");
|
||||
}
|
||||
|
||||
|
@ -187,7 +187,7 @@ static struct svc_serv *nfs_callback_create_svc(int minorversion)
|
||||
* Check whether we're already up and running.
|
||||
*/
|
||||
if (cb_info->serv)
|
||||
return svc_get(cb_info->serv);
|
||||
return cb_info->serv;
|
||||
|
||||
/*
|
||||
* Sanity check: if there's no task,
|
||||
@ -245,9 +245,10 @@ int nfs_callback_up(u32 minorversion, struct rpc_xprt *xprt)
|
||||
|
||||
cb_info->users++;
|
||||
err_net:
|
||||
if (!cb_info->users)
|
||||
cb_info->serv = NULL;
|
||||
svc_put(serv);
|
||||
if (!cb_info->users) {
|
||||
svc_set_num_threads(cb_info->serv, NULL, 0);
|
||||
svc_destroy(&cb_info->serv);
|
||||
}
|
||||
err_create:
|
||||
mutex_unlock(&nfs_callback_mutex);
|
||||
return ret;
|
||||
@ -271,11 +272,9 @@ void nfs_callback_down(int minorversion, struct net *net)
|
||||
nfs_callback_down_net(minorversion, serv, net);
|
||||
cb_info->users--;
|
||||
if (cb_info->users == 0) {
|
||||
svc_get(serv);
|
||||
svc_set_num_threads(serv, NULL, 0);
|
||||
svc_put(serv);
|
||||
dprintk("nfs_callback_down: service destroyed\n");
|
||||
cb_info->serv = NULL;
|
||||
svc_destroy(&cb_info->serv);
|
||||
}
|
||||
mutex_unlock(&nfs_callback_mutex);
|
||||
}
|
||||
|
@ -19,25 +19,6 @@ enum nfs4_callback_procnum {
|
||||
CB_COMPOUND = 1,
|
||||
};
|
||||
|
||||
enum nfs4_callback_opnum {
|
||||
OP_CB_GETATTR = 3,
|
||||
OP_CB_RECALL = 4,
|
||||
/* Callback operations new to NFSv4.1 */
|
||||
OP_CB_LAYOUTRECALL = 5,
|
||||
OP_CB_NOTIFY = 6,
|
||||
OP_CB_PUSH_DELEG = 7,
|
||||
OP_CB_RECALL_ANY = 8,
|
||||
OP_CB_RECALLABLE_OBJ_AVAIL = 9,
|
||||
OP_CB_RECALL_SLOT = 10,
|
||||
OP_CB_SEQUENCE = 11,
|
||||
OP_CB_WANTS_CANCELLED = 12,
|
||||
OP_CB_NOTIFY_LOCK = 13,
|
||||
OP_CB_NOTIFY_DEVICEID = 14,
|
||||
/* Callback operations new to NFSv4.2 */
|
||||
OP_CB_OFFLOAD = 15,
|
||||
OP_CB_ILLEGAL = 10044,
|
||||
};
|
||||
|
||||
struct nfs4_slot;
|
||||
struct cb_process_state {
|
||||
__be32 drc_status;
|
||||
|
@ -158,3 +158,19 @@ config NFSD_V4_SECURITY_LABEL
|
||||
|
||||
If you do not wish to enable fine-grained security labels SELinux or
|
||||
Smack policies on NFSv4 files, say N.
|
||||
|
||||
config NFSD_LEGACY_CLIENT_TRACKING
|
||||
bool "Support legacy NFSv4 client tracking methods (DEPRECATED)"
|
||||
depends on NFSD_V4
|
||||
default n
|
||||
help
|
||||
The NFSv4 server needs to store a small amount of information on
|
||||
stable storage in order to handle state recovery after reboot. Most
|
||||
modern deployments upcall to a userland daemon for this (nfsdcld),
|
||||
but older NFS servers may store information directly in a
|
||||
recoverydir, or spawn a process directly using a usermodehelper
|
||||
upcall.
|
||||
|
||||
These legacy client tracking methods have proven to be probelmatic
|
||||
and will be removed in the future. Say Y here if you need support
|
||||
for them in the interim.
|
||||
|
@ -717,7 +717,7 @@ nfsd_file_cache_init(void)
|
||||
return ret;
|
||||
|
||||
ret = -ENOMEM;
|
||||
nfsd_filecache_wq = alloc_workqueue("nfsd_filecache", 0, 0);
|
||||
nfsd_filecache_wq = alloc_workqueue("nfsd_filecache", WQ_UNBOUND, 0);
|
||||
if (!nfsd_filecache_wq)
|
||||
goto out;
|
||||
|
||||
|
@ -123,14 +123,9 @@ struct nfsd_net {
|
||||
u32 clientid_counter;
|
||||
u32 clverifier_counter;
|
||||
|
||||
struct svc_serv *nfsd_serv;
|
||||
/* When a listening socket is added to nfsd, keep_active is set
|
||||
* and this justifies a reference on nfsd_serv. This stops
|
||||
* nfsd_serv from being freed. When the number of threads is
|
||||
* set, keep_active is cleared and the reference is dropped. So
|
||||
* when the last thread exits, the service will be destroyed.
|
||||
*/
|
||||
int keep_active;
|
||||
struct svc_info nfsd_info;
|
||||
#define nfsd_serv nfsd_info.serv
|
||||
|
||||
|
||||
/*
|
||||
* clientid and stateid data for construction of net unique COPY
|
||||
|
@ -31,6 +31,7 @@
|
||||
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#include <linux/nfs4.h>
|
||||
#include <linux/sunrpc/clnt.h>
|
||||
#include <linux/sunrpc/xprt.h>
|
||||
#include <linux/sunrpc/svc_xprt.h>
|
||||
@ -87,31 +88,6 @@ static void encode_bitmap4(struct xdr_stream *xdr, const __u32 *bitmap,
|
||||
WARN_ON_ONCE(xdr_stream_encode_uint32_array(xdr, bitmap, len) < 0);
|
||||
}
|
||||
|
||||
/*
|
||||
* nfs_cb_opnum4
|
||||
*
|
||||
* enum nfs_cb_opnum4 {
|
||||
* OP_CB_GETATTR = 3,
|
||||
* ...
|
||||
* };
|
||||
*/
|
||||
enum nfs_cb_opnum4 {
|
||||
OP_CB_GETATTR = 3,
|
||||
OP_CB_RECALL = 4,
|
||||
OP_CB_LAYOUTRECALL = 5,
|
||||
OP_CB_NOTIFY = 6,
|
||||
OP_CB_PUSH_DELEG = 7,
|
||||
OP_CB_RECALL_ANY = 8,
|
||||
OP_CB_RECALLABLE_OBJ_AVAIL = 9,
|
||||
OP_CB_RECALL_SLOT = 10,
|
||||
OP_CB_SEQUENCE = 11,
|
||||
OP_CB_WANTS_CANCELLED = 12,
|
||||
OP_CB_NOTIFY_LOCK = 13,
|
||||
OP_CB_NOTIFY_DEVICEID = 14,
|
||||
OP_CB_OFFLOAD = 15,
|
||||
OP_CB_ILLEGAL = 10044
|
||||
};
|
||||
|
||||
static void encode_nfs_cb_opnum4(struct xdr_stream *xdr, enum nfs_cb_opnum4 op)
|
||||
{
|
||||
__be32 *p;
|
||||
|
@ -970,8 +970,11 @@ nfsd4_read(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
|
||||
* To ensure proper ordering, we therefore turn off zero copy if
|
||||
* the client wants us to do more in this compound:
|
||||
*/
|
||||
if (!nfsd4_last_compound_op(rqstp))
|
||||
clear_bit(RQ_SPLICE_OK, &rqstp->rq_flags);
|
||||
if (!nfsd4_last_compound_op(rqstp)) {
|
||||
struct nfsd4_compoundargs *argp = rqstp->rq_argp;
|
||||
|
||||
argp->splice_ok = false;
|
||||
}
|
||||
|
||||
/* check stateid */
|
||||
status = nfs4_preprocess_stateid_op(rqstp, cstate, &cstate->current_fh,
|
||||
|
@ -66,6 +66,7 @@ struct nfsd4_client_tracking_ops {
|
||||
static const struct nfsd4_client_tracking_ops nfsd4_cld_tracking_ops;
|
||||
static const struct nfsd4_client_tracking_ops nfsd4_cld_tracking_ops_v2;
|
||||
|
||||
#ifdef CONFIG_NFSD_LEGACY_CLIENT_TRACKING
|
||||
/* Globals */
|
||||
static char user_recovery_dirname[PATH_MAX] = "/var/lib/nfs/v4recovery";
|
||||
|
||||
@ -720,6 +721,7 @@ static const struct nfsd4_client_tracking_ops nfsd4_legacy_tracking_ops = {
|
||||
.version = 1,
|
||||
.msglen = 0,
|
||||
};
|
||||
#endif /* CONFIG_NFSD_LEGACY_CLIENT_TRACKING */
|
||||
|
||||
/* Globals */
|
||||
#define NFSD_PIPE_DIR "nfsd"
|
||||
@ -731,8 +733,10 @@ struct cld_net {
|
||||
spinlock_t cn_lock;
|
||||
struct list_head cn_list;
|
||||
unsigned int cn_xid;
|
||||
bool cn_has_legacy;
|
||||
struct crypto_shash *cn_tfm;
|
||||
#ifdef CONFIG_NFSD_LEGACY_CLIENT_TRACKING
|
||||
bool cn_has_legacy;
|
||||
#endif
|
||||
};
|
||||
|
||||
struct cld_upcall {
|
||||
@ -793,7 +797,6 @@ __cld_pipe_inprogress_downcall(const struct cld_msg_v2 __user *cmsg,
|
||||
uint8_t cmd, princhashlen;
|
||||
struct xdr_netobj name, princhash = { .len = 0, .data = NULL };
|
||||
uint16_t namelen;
|
||||
struct cld_net *cn = nn->cld_net;
|
||||
|
||||
if (get_user(cmd, &cmsg->cm_cmd)) {
|
||||
dprintk("%s: error when copying cmd from userspace", __func__);
|
||||
@ -833,11 +836,15 @@ __cld_pipe_inprogress_downcall(const struct cld_msg_v2 __user *cmsg,
|
||||
return PTR_ERR(name.data);
|
||||
name.len = namelen;
|
||||
}
|
||||
#ifdef CONFIG_NFSD_LEGACY_CLIENT_TRACKING
|
||||
if (name.len > 5 && memcmp(name.data, "hash:", 5) == 0) {
|
||||
struct cld_net *cn = nn->cld_net;
|
||||
|
||||
name.len = name.len - 5;
|
||||
memmove(name.data, name.data + 5, name.len);
|
||||
cn->cn_has_legacy = true;
|
||||
}
|
||||
#endif
|
||||
if (!nfs4_client_to_reclaim(name, princhash, nn)) {
|
||||
kfree(name.data);
|
||||
kfree(princhash.data);
|
||||
@ -1010,7 +1017,9 @@ __nfsd4_init_cld_pipe(struct net *net)
|
||||
}
|
||||
|
||||
cn->cn_pipe->dentry = dentry;
|
||||
#ifdef CONFIG_NFSD_LEGACY_CLIENT_TRACKING
|
||||
cn->cn_has_legacy = false;
|
||||
#endif
|
||||
nn->cld_net = cn;
|
||||
return 0;
|
||||
|
||||
@ -1282,10 +1291,6 @@ nfsd4_cld_check(struct nfs4_client *clp)
|
||||
{
|
||||
struct nfs4_client_reclaim *crp;
|
||||
struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
|
||||
struct cld_net *cn = nn->cld_net;
|
||||
int status;
|
||||
char dname[HEXDIR_LEN];
|
||||
struct xdr_netobj name;
|
||||
|
||||
/* did we already find that this client is stable? */
|
||||
if (test_bit(NFSD4_CLIENT_STABLE, &clp->cl_flags))
|
||||
@ -1296,7 +1301,12 @@ nfsd4_cld_check(struct nfs4_client *clp)
|
||||
if (crp)
|
||||
goto found;
|
||||
|
||||
if (cn->cn_has_legacy) {
|
||||
#ifdef CONFIG_NFSD_LEGACY_CLIENT_TRACKING
|
||||
if (nn->cld_net->cn_has_legacy) {
|
||||
int status;
|
||||
char dname[HEXDIR_LEN];
|
||||
struct xdr_netobj name;
|
||||
|
||||
status = nfs4_make_rec_clidname(dname, &clp->cl_name);
|
||||
if (status)
|
||||
return -ENOENT;
|
||||
@ -1314,6 +1324,7 @@ nfsd4_cld_check(struct nfs4_client *clp)
|
||||
goto found;
|
||||
|
||||
}
|
||||
#endif
|
||||
return -ENOENT;
|
||||
found:
|
||||
crp->cr_clp = clp;
|
||||
@ -1327,8 +1338,6 @@ nfsd4_cld_check_v2(struct nfs4_client *clp)
|
||||
struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
|
||||
struct cld_net *cn = nn->cld_net;
|
||||
int status;
|
||||
char dname[HEXDIR_LEN];
|
||||
struct xdr_netobj name;
|
||||
struct crypto_shash *tfm = cn->cn_tfm;
|
||||
struct xdr_netobj cksum;
|
||||
char *principal = NULL;
|
||||
@ -1342,7 +1351,11 @@ nfsd4_cld_check_v2(struct nfs4_client *clp)
|
||||
if (crp)
|
||||
goto found;
|
||||
|
||||
#ifdef CONFIG_NFSD_LEGACY_CLIENT_TRACKING
|
||||
if (cn->cn_has_legacy) {
|
||||
struct xdr_netobj name;
|
||||
char dname[HEXDIR_LEN];
|
||||
|
||||
status = nfs4_make_rec_clidname(dname, &clp->cl_name);
|
||||
if (status)
|
||||
return -ENOENT;
|
||||
@ -1360,6 +1373,7 @@ nfsd4_cld_check_v2(struct nfs4_client *clp)
|
||||
goto found;
|
||||
|
||||
}
|
||||
#endif
|
||||
return -ENOENT;
|
||||
found:
|
||||
if (crp->cr_princhash.len) {
|
||||
@ -1663,6 +1677,7 @@ static const struct nfsd4_client_tracking_ops nfsd4_cld_tracking_ops_v2 = {
|
||||
.msglen = sizeof(struct cld_msg_v2),
|
||||
};
|
||||
|
||||
#ifdef CONFIG_NFSD_LEGACY_CLIENT_TRACKING
|
||||
/* upcall via usermodehelper */
|
||||
static char cltrack_prog[PATH_MAX] = "/sbin/nfsdcltrack";
|
||||
module_param_string(cltrack_prog, cltrack_prog, sizeof(cltrack_prog),
|
||||
@ -2007,12 +2022,46 @@ static const struct nfsd4_client_tracking_ops nfsd4_umh_tracking_ops = {
|
||||
.msglen = 0,
|
||||
};
|
||||
|
||||
static inline int check_for_legacy_methods(int status, struct net *net)
|
||||
{
|
||||
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
|
||||
struct path path;
|
||||
|
||||
/*
|
||||
* Next, try the UMH upcall.
|
||||
*/
|
||||
nn->client_tracking_ops = &nfsd4_umh_tracking_ops;
|
||||
status = nn->client_tracking_ops->init(net);
|
||||
if (!status)
|
||||
return status;
|
||||
|
||||
/*
|
||||
* Finally, See if the recoverydir exists and is a directory.
|
||||
* If it is, then use the legacy ops.
|
||||
*/
|
||||
nn->client_tracking_ops = &nfsd4_legacy_tracking_ops;
|
||||
status = kern_path(nfs4_recoverydir(), LOOKUP_FOLLOW, &path);
|
||||
if (!status) {
|
||||
status = !d_is_dir(path.dentry);
|
||||
path_put(&path);
|
||||
if (status)
|
||||
return -ENOTDIR;
|
||||
status = nn->client_tracking_ops->init(net);
|
||||
}
|
||||
return status;
|
||||
}
|
||||
#else
|
||||
static inline int check_for_legacy_methods(int status, struct net *net)
|
||||
{
|
||||
return status;
|
||||
}
|
||||
#endif /* CONFIG_LEGACY_NFSD_CLIENT_TRACKING */
|
||||
|
||||
int
|
||||
nfsd4_client_tracking_init(struct net *net)
|
||||
{
|
||||
int status;
|
||||
struct path path;
|
||||
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
|
||||
int status;
|
||||
|
||||
/* just run the init if it the method is already decided */
|
||||
if (nn->client_tracking_ops)
|
||||
@ -2030,29 +2079,9 @@ nfsd4_client_tracking_init(struct net *net)
|
||||
return status;
|
||||
}
|
||||
|
||||
/*
|
||||
* Next, try the UMH upcall.
|
||||
*/
|
||||
nn->client_tracking_ops = &nfsd4_umh_tracking_ops;
|
||||
status = nn->client_tracking_ops->init(net);
|
||||
if (!status)
|
||||
return status;
|
||||
|
||||
/*
|
||||
* Finally, See if the recoverydir exists and is a directory.
|
||||
* If it is, then use the legacy ops.
|
||||
*/
|
||||
nn->client_tracking_ops = &nfsd4_legacy_tracking_ops;
|
||||
status = kern_path(nfs4_recoverydir(), LOOKUP_FOLLOW, &path);
|
||||
if (!status) {
|
||||
status = d_is_dir(path.dentry);
|
||||
path_put(&path);
|
||||
if (!status) {
|
||||
status = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
status = check_for_legacy_methods(status, net);
|
||||
if (status)
|
||||
goto out;
|
||||
do_init:
|
||||
status = nn->client_tracking_ops->init(net);
|
||||
out:
|
||||
|
@ -6575,7 +6575,7 @@ unlock:
|
||||
spin_unlock(&nn->s2s_cp_lock);
|
||||
if (!state)
|
||||
return nfserr_bad_stateid;
|
||||
if (!clp && state)
|
||||
if (!clp)
|
||||
*cps = state;
|
||||
return 0;
|
||||
}
|
||||
|
@ -2524,8 +2524,9 @@ nfsd4_decode_compound(struct nfsd4_compoundargs *argp)
|
||||
svc_reserve(argp->rqstp, max_reply + readbytes);
|
||||
argp->rqstp->rq_cachetype = cachethis ? RC_REPLBUFF : RC_NOCACHE;
|
||||
|
||||
argp->splice_ok = nfsd_read_splice_ok(argp->rqstp);
|
||||
if (readcount > 1 || max_reply > PAGE_SIZE - auth_slack)
|
||||
clear_bit(RQ_SPLICE_OK, &argp->rqstp->rq_flags);
|
||||
argp->splice_ok = false;
|
||||
|
||||
return true;
|
||||
}
|
||||
@ -4375,12 +4376,13 @@ static __be32
|
||||
nfsd4_encode_read(struct nfsd4_compoundres *resp, __be32 nfserr,
|
||||
union nfsd4_op_u *u)
|
||||
{
|
||||
struct nfsd4_compoundargs *argp = resp->rqstp->rq_argp;
|
||||
struct nfsd4_read *read = &u->read;
|
||||
bool splice_ok = test_bit(RQ_SPLICE_OK, &resp->rqstp->rq_flags);
|
||||
unsigned long maxcount;
|
||||
struct xdr_stream *xdr = resp->xdr;
|
||||
struct file *file;
|
||||
int starting_len = xdr->buf->len;
|
||||
bool splice_ok = argp->splice_ok;
|
||||
unsigned long maxcount;
|
||||
struct file *file;
|
||||
__be32 *p;
|
||||
|
||||
if (nfserr)
|
||||
@ -5201,9 +5203,10 @@ static __be32
|
||||
nfsd4_encode_read_plus_data(struct nfsd4_compoundres *resp,
|
||||
struct nfsd4_read *read)
|
||||
{
|
||||
bool splice_ok = test_bit(RQ_SPLICE_OK, &resp->rqstp->rq_flags);
|
||||
struct nfsd4_compoundargs *argp = resp->rqstp->rq_argp;
|
||||
struct file *file = read->rd_nf->nf_file;
|
||||
struct xdr_stream *xdr = resp->xdr;
|
||||
bool splice_ok = argp->splice_ok;
|
||||
unsigned long maxcount;
|
||||
__be32 nfserr, *p;
|
||||
|
||||
|
@ -364,8 +364,6 @@ nfsd_reply_cache_scan(struct shrinker *shrink, struct shrink_control *sc)
|
||||
if (freed > sc->nr_to_scan)
|
||||
break;
|
||||
}
|
||||
|
||||
trace_nfsd_drc_gc(nn, freed);
|
||||
return freed;
|
||||
}
|
||||
|
||||
@ -508,7 +506,6 @@ int nfsd_cache_lookup(struct svc_rqst *rqstp, unsigned int start,
|
||||
__wsum csum;
|
||||
struct nfsd_drc_bucket *b;
|
||||
int type = rqstp->rq_cachetype;
|
||||
unsigned long freed;
|
||||
LIST_HEAD(dispose);
|
||||
int rtn = RC_DOIT;
|
||||
|
||||
@ -538,8 +535,7 @@ int nfsd_cache_lookup(struct svc_rqst *rqstp, unsigned int start,
|
||||
nfsd_prune_bucket_locked(nn, b, 3, &dispose);
|
||||
spin_unlock(&b->cache_lock);
|
||||
|
||||
freed = nfsd_cacherep_dispose(&dispose);
|
||||
trace_nfsd_drc_gc(nn, freed);
|
||||
nfsd_cacherep_dispose(&dispose);
|
||||
|
||||
nfsd_stats_rc_misses_inc();
|
||||
atomic_inc(&nn->num_drc_entries);
|
||||
|
@ -76,7 +76,9 @@ static ssize_t write_maxconn(struct file *file, char *buf, size_t size);
|
||||
#ifdef CONFIG_NFSD_V4
|
||||
static ssize_t write_leasetime(struct file *file, char *buf, size_t size);
|
||||
static ssize_t write_gracetime(struct file *file, char *buf, size_t size);
|
||||
#ifdef CONFIG_NFSD_LEGACY_CLIENT_TRACKING
|
||||
static ssize_t write_recoverydir(struct file *file, char *buf, size_t size);
|
||||
#endif
|
||||
static ssize_t write_v4_end_grace(struct file *file, char *buf, size_t size);
|
||||
#endif
|
||||
|
||||
@ -93,7 +95,9 @@ static ssize_t (*const write_op[])(struct file *, char *, size_t) = {
|
||||
#ifdef CONFIG_NFSD_V4
|
||||
[NFSD_Leasetime] = write_leasetime,
|
||||
[NFSD_Gracetime] = write_gracetime,
|
||||
#ifdef CONFIG_NFSD_LEGACY_CLIENT_TRACKING
|
||||
[NFSD_RecoveryDir] = write_recoverydir,
|
||||
#endif
|
||||
[NFSD_V4EndGrace] = write_v4_end_grace,
|
||||
#endif
|
||||
};
|
||||
@ -179,7 +183,7 @@ static const struct file_operations pool_stats_operations = {
|
||||
.open = nfsd_pool_stats_open,
|
||||
.read = seq_read,
|
||||
.llseek = seq_lseek,
|
||||
.release = nfsd_pool_stats_release,
|
||||
.release = seq_release,
|
||||
};
|
||||
|
||||
DEFINE_SHOW_ATTRIBUTE(nfsd_reply_cache_stats);
|
||||
@ -707,12 +711,9 @@ static ssize_t __write_ports_addfd(char *buf, struct net *net, const struct cred
|
||||
serv = nn->nfsd_serv;
|
||||
err = svc_addsock(serv, net, fd, buf, SIMPLE_TRANSACTION_LIMIT, cred);
|
||||
|
||||
if (err < 0 && !serv->sv_nrthreads && !nn->keep_active)
|
||||
nfsd_last_thread(net);
|
||||
else if (err >= 0 && !serv->sv_nrthreads && !xchg(&nn->keep_active, 1))
|
||||
svc_get(serv);
|
||||
if (!serv->sv_nrthreads && list_empty(&nn->nfsd_serv->sv_permsocks))
|
||||
nfsd_destroy_serv(net);
|
||||
|
||||
svc_put(serv);
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -750,10 +751,6 @@ static ssize_t __write_ports_addxprt(char *buf, struct net *net, const struct cr
|
||||
if (err < 0 && err != -EAFNOSUPPORT)
|
||||
goto out_close;
|
||||
|
||||
if (!serv->sv_nrthreads && !xchg(&nn->keep_active, 1))
|
||||
svc_get(serv);
|
||||
|
||||
svc_put(serv);
|
||||
return 0;
|
||||
out_close:
|
||||
xprt = svc_find_xprt(serv, transport, net, PF_INET, port);
|
||||
@ -762,10 +759,9 @@ out_close:
|
||||
svc_xprt_put(xprt);
|
||||
}
|
||||
out_err:
|
||||
if (!serv->sv_nrthreads && !nn->keep_active)
|
||||
nfsd_last_thread(net);
|
||||
if (!serv->sv_nrthreads && list_empty(&nn->nfsd_serv->sv_permsocks))
|
||||
nfsd_destroy_serv(net);
|
||||
|
||||
svc_put(serv);
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -1021,6 +1017,7 @@ static ssize_t write_gracetime(struct file *file, char *buf, size_t size)
|
||||
return nfsd4_write_time(file, buf, size, &nn->nfsd4_grace, nn);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_NFSD_LEGACY_CLIENT_TRACKING
|
||||
static ssize_t __write_recoverydir(struct file *file, char *buf, size_t size,
|
||||
struct nfsd_net *nn)
|
||||
{
|
||||
@ -1081,6 +1078,7 @@ static ssize_t write_recoverydir(struct file *file, char *buf, size_t size)
|
||||
mutex_unlock(&nfsd_mutex);
|
||||
return rv;
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* write_v4_end_grace - release grace period for nfsd's v4.x lock manager
|
||||
|
@ -148,7 +148,7 @@ int nfsd_vers(struct nfsd_net *nn, int vers, enum vers_op change);
|
||||
int nfsd_minorversion(struct nfsd_net *nn, u32 minorversion, enum vers_op change);
|
||||
void nfsd_reset_versions(struct nfsd_net *nn);
|
||||
int nfsd_create_serv(struct net *net);
|
||||
void nfsd_last_thread(struct net *net);
|
||||
void nfsd_destroy_serv(struct net *net);
|
||||
|
||||
extern int nfsd_max_blksize;
|
||||
|
||||
|
@ -59,15 +59,6 @@ static __be32 nfsd_init_request(struct svc_rqst *,
|
||||
* nfsd_mutex protects nn->nfsd_serv -- both the pointer itself and some members
|
||||
* of the svc_serv struct such as ->sv_temp_socks and ->sv_permsocks.
|
||||
*
|
||||
* If (out side the lock) nn->nfsd_serv is non-NULL, then it must point to a
|
||||
* properly initialised 'struct svc_serv' with ->sv_nrthreads > 0 (unless
|
||||
* nn->keep_active is set). That number of nfsd threads must
|
||||
* exist and each must be listed in ->sp_all_threads in some entry of
|
||||
* ->sv_pools[].
|
||||
*
|
||||
* Each active thread holds a counted reference on nn->nfsd_serv, as does
|
||||
* the nn->keep_active flag and various transient calls to svc_get().
|
||||
*
|
||||
* Finally, the nfsd_mutex also protects some of the global variables that are
|
||||
* accessed when nfsd starts and that are settable via the write_* routines in
|
||||
* nfsctl.c. In particular:
|
||||
@ -359,13 +350,12 @@ static bool nfsd_needs_lockd(struct nfsd_net *nn)
|
||||
*/
|
||||
void nfsd_copy_write_verifier(__be32 verf[2], struct nfsd_net *nn)
|
||||
{
|
||||
int seq = 0;
|
||||
unsigned int seq;
|
||||
|
||||
do {
|
||||
read_seqbegin_or_lock(&nn->writeverf_lock, &seq);
|
||||
seq = read_seqbegin(&nn->writeverf_lock);
|
||||
memcpy(verf, nn->writeverf, sizeof(nn->writeverf));
|
||||
} while (need_seqretry(&nn->writeverf_lock, seq));
|
||||
done_seqretry(&nn->writeverf_lock, seq);
|
||||
} while (read_seqretry(&nn->writeverf_lock, seq));
|
||||
}
|
||||
|
||||
static void nfsd_reset_write_verifier_locked(struct nfsd_net *nn)
|
||||
@ -542,7 +532,11 @@ static struct notifier_block nfsd_inet6addr_notifier = {
|
||||
/* Only used under nfsd_mutex, so this atomic may be overkill: */
|
||||
static atomic_t nfsd_notifier_refcount = ATOMIC_INIT(0);
|
||||
|
||||
void nfsd_last_thread(struct net *net)
|
||||
/**
|
||||
* nfsd_destroy_serv - tear down NFSD's svc_serv for a namespace
|
||||
* @net: network namespace the NFS service is associated with
|
||||
*/
|
||||
void nfsd_destroy_serv(struct net *net)
|
||||
{
|
||||
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
|
||||
struct svc_serv *serv = nn->nfsd_serv;
|
||||
@ -564,7 +558,7 @@ void nfsd_last_thread(struct net *net)
|
||||
/*
|
||||
* write_ports can create the server without actually starting
|
||||
* any threads--if we get shut down before any threads are
|
||||
* started, then nfsd_last_thread will be run before any of this
|
||||
* started, then nfsd_destroy_serv will be run before any of this
|
||||
* other initialization has been done except the rpcb information.
|
||||
*/
|
||||
svc_rpcb_cleanup(serv, net);
|
||||
@ -573,6 +567,7 @@ void nfsd_last_thread(struct net *net)
|
||||
|
||||
nfsd_shutdown_net(net);
|
||||
nfsd_export_flush(net);
|
||||
svc_destroy(&serv);
|
||||
}
|
||||
|
||||
void nfsd_reset_versions(struct nfsd_net *nn)
|
||||
@ -647,11 +642,9 @@ void nfsd_shutdown_threads(struct net *net)
|
||||
return;
|
||||
}
|
||||
|
||||
svc_get(serv);
|
||||
/* Kill outstanding nfsd threads */
|
||||
svc_set_num_threads(serv, NULL, 0);
|
||||
nfsd_last_thread(net);
|
||||
svc_put(serv);
|
||||
nfsd_destroy_serv(net);
|
||||
mutex_unlock(&nfsd_mutex);
|
||||
}
|
||||
|
||||
@ -667,10 +660,9 @@ int nfsd_create_serv(struct net *net)
|
||||
struct svc_serv *serv;
|
||||
|
||||
WARN_ON(!mutex_is_locked(&nfsd_mutex));
|
||||
if (nn->nfsd_serv) {
|
||||
svc_get(nn->nfsd_serv);
|
||||
if (nn->nfsd_serv)
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (nfsd_max_blksize == 0)
|
||||
nfsd_max_blksize = nfsd_get_default_max_blksize();
|
||||
nfsd_reset_versions(nn);
|
||||
@ -681,10 +673,11 @@ int nfsd_create_serv(struct net *net)
|
||||
serv->sv_maxconn = nn->max_connections;
|
||||
error = svc_bind(serv, net);
|
||||
if (error < 0) {
|
||||
svc_put(serv);
|
||||
svc_destroy(&serv);
|
||||
return error;
|
||||
}
|
||||
spin_lock(&nfsd_notifier_lock);
|
||||
nn->nfsd_info.mutex = &nfsd_mutex;
|
||||
nn->nfsd_serv = serv;
|
||||
spin_unlock(&nfsd_notifier_lock);
|
||||
|
||||
@ -764,7 +757,6 @@ int nfsd_set_nrthreads(int n, int *nthreads, struct net *net)
|
||||
nthreads[0] = 1;
|
||||
|
||||
/* apply the new numbers */
|
||||
svc_get(nn->nfsd_serv);
|
||||
for (i = 0; i < n; i++) {
|
||||
err = svc_set_num_threads(nn->nfsd_serv,
|
||||
&nn->nfsd_serv->sv_pools[i],
|
||||
@ -772,7 +764,6 @@ int nfsd_set_nrthreads(int n, int *nthreads, struct net *net)
|
||||
if (err)
|
||||
break;
|
||||
}
|
||||
svc_put(nn->nfsd_serv);
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -814,13 +805,8 @@ nfsd_svc(int nrservs, struct net *net, const struct cred *cred)
|
||||
goto out_put;
|
||||
error = serv->sv_nrthreads;
|
||||
out_put:
|
||||
/* Threads now hold service active */
|
||||
if (xchg(&nn->keep_active, 0))
|
||||
svc_put(serv);
|
||||
|
||||
if (serv->sv_nrthreads == 0)
|
||||
nfsd_last_thread(net);
|
||||
svc_put(serv);
|
||||
nfsd_destroy_serv(net);
|
||||
out:
|
||||
mutex_unlock(&nfsd_mutex);
|
||||
return error;
|
||||
@ -1083,28 +1069,7 @@ bool nfssvc_encode_voidres(struct svc_rqst *rqstp, struct xdr_stream *xdr)
|
||||
|
||||
int nfsd_pool_stats_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
int ret;
|
||||
struct nfsd_net *nn = net_generic(inode->i_sb->s_fs_info, nfsd_net_id);
|
||||
|
||||
mutex_lock(&nfsd_mutex);
|
||||
if (nn->nfsd_serv == NULL) {
|
||||
mutex_unlock(&nfsd_mutex);
|
||||
return -ENODEV;
|
||||
}
|
||||
svc_get(nn->nfsd_serv);
|
||||
ret = svc_pool_stats_open(nn->nfsd_serv, file);
|
||||
mutex_unlock(&nfsd_mutex);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int nfsd_pool_stats_release(struct inode *inode, struct file *file)
|
||||
{
|
||||
struct seq_file *seq = file->private_data;
|
||||
struct svc_serv *serv = seq->private;
|
||||
int ret = seq_release(inode, file);
|
||||
|
||||
mutex_lock(&nfsd_mutex);
|
||||
svc_put(serv);
|
||||
mutex_unlock(&nfsd_mutex);
|
||||
return ret;
|
||||
return svc_pool_stats_open(&nn->nfsd_info, file);
|
||||
}
|
||||
|
@ -1262,28 +1262,6 @@ TRACE_EVENT(nfsd_drc_mismatch,
|
||||
__entry->ingress)
|
||||
);
|
||||
|
||||
TRACE_EVENT_CONDITION(nfsd_drc_gc,
|
||||
TP_PROTO(
|
||||
const struct nfsd_net *nn,
|
||||
unsigned long freed
|
||||
),
|
||||
TP_ARGS(nn, freed),
|
||||
TP_CONDITION(freed > 0),
|
||||
TP_STRUCT__entry(
|
||||
__field(unsigned long long, boot_time)
|
||||
__field(unsigned long, freed)
|
||||
__field(int, total)
|
||||
),
|
||||
TP_fast_assign(
|
||||
__entry->boot_time = nn->boot_time;
|
||||
__entry->freed = freed;
|
||||
__entry->total = atomic_read(&nn->num_drc_entries);
|
||||
),
|
||||
TP_printk("boot_time=%16llx total=%d freed=%lu",
|
||||
__entry->boot_time, __entry->total, __entry->freed
|
||||
)
|
||||
);
|
||||
|
||||
TRACE_EVENT(nfsd_cb_args,
|
||||
TP_PROTO(
|
||||
const struct nfs4_client *clp,
|
||||
|
@ -1210,6 +1210,30 @@ out_nfserr:
|
||||
return nfserr;
|
||||
}
|
||||
|
||||
/**
|
||||
* nfsd_read_splice_ok - check if spliced reading is supported
|
||||
* @rqstp: RPC transaction context
|
||||
*
|
||||
* Return values:
|
||||
* %true: nfsd_splice_read() may be used
|
||||
* %false: nfsd_splice_read() must not be used
|
||||
*
|
||||
* NFS READ normally uses splice to send data in-place. However the
|
||||
* data in cache can change after the reply's MIC is computed but
|
||||
* before the RPC reply is sent. To prevent the client from
|
||||
* rejecting the server-computed MIC in this somewhat rare case, do
|
||||
* not use splice with the GSS integrity and privacy services.
|
||||
*/
|
||||
bool nfsd_read_splice_ok(struct svc_rqst *rqstp)
|
||||
{
|
||||
switch (svc_auth_flavor(rqstp)) {
|
||||
case RPC_AUTH_GSS_KRB5I:
|
||||
case RPC_AUTH_GSS_KRB5P:
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* nfsd_read - Read data from a file
|
||||
* @rqstp: RPC transaction context
|
||||
@ -1239,7 +1263,7 @@ __be32 nfsd_read(struct svc_rqst *rqstp, struct svc_fh *fhp,
|
||||
return err;
|
||||
|
||||
file = nf->nf_file;
|
||||
if (file->f_op->splice_read && test_bit(RQ_SPLICE_OK, &rqstp->rq_flags))
|
||||
if (file->f_op->splice_read && nfsd_read_splice_ok(rqstp))
|
||||
err = nfsd_splice_read(rqstp, fhp, file, offset, count, eof);
|
||||
else
|
||||
err = nfsd_iter_read(rqstp, fhp, file, offset, count, 0, eof);
|
||||
@ -2103,9 +2127,23 @@ static __be32 nfsd_buffered_readdir(struct file *file, struct svc_fh *fhp,
|
||||
return cdp->err;
|
||||
}
|
||||
|
||||
/*
|
||||
* Read entries from a directory.
|
||||
* The NFSv3/4 verifier we ignore for now.
|
||||
/**
|
||||
* nfsd_readdir - Read entries from a directory
|
||||
* @rqstp: RPC transaction context
|
||||
* @fhp: NFS file handle of directory to be read
|
||||
* @offsetp: OUT: seek offset of final entry that was read
|
||||
* @cdp: OUT: an eof error value
|
||||
* @func: entry filler actor
|
||||
*
|
||||
* This implementation ignores the NFSv3/4 verifier cookie.
|
||||
*
|
||||
* NB: normal system calls hold file->f_pos_lock when calling
|
||||
* ->iterate_shared and ->llseek, but nfsd_readdir() does not.
|
||||
* Because the struct file acquired here is not visible to other
|
||||
* threads, it's internal state does not need mutex protection.
|
||||
*
|
||||
* Returns nfs_ok on success, otherwise an nfsstat code is
|
||||
* returned.
|
||||
*/
|
||||
__be32
|
||||
nfsd_readdir(struct svc_rqst *rqstp, struct svc_fh *fhp, loff_t *offsetp,
|
||||
|
@ -114,6 +114,7 @@ __be32 nfsd_iter_read(struct svc_rqst *rqstp, struct svc_fh *fhp,
|
||||
struct file *file, loff_t offset,
|
||||
unsigned long *count, unsigned int base,
|
||||
u32 *eof);
|
||||
bool nfsd_read_splice_ok(struct svc_rqst *rqstp);
|
||||
__be32 nfsd_read(struct svc_rqst *rqstp, struct svc_fh *fhp,
|
||||
loff_t offset, unsigned long *count,
|
||||
u32 *eof);
|
||||
|
@ -840,6 +840,7 @@ struct nfsd4_compoundargs {
|
||||
u32 minorversion;
|
||||
u32 client_opcnt;
|
||||
u32 opcnt;
|
||||
bool splice_ok;
|
||||
struct nfsd4_op *ops;
|
||||
struct nfsd4_op iops[8];
|
||||
};
|
||||
|
@ -869,4 +869,26 @@ enum {
|
||||
RCA4_TYPE_MASK_OTHER_LAYOUT_MAX = 15,
|
||||
};
|
||||
|
||||
enum nfs_cb_opnum4 {
|
||||
OP_CB_GETATTR = 3,
|
||||
OP_CB_RECALL = 4,
|
||||
|
||||
/* Callback operations new to NFSv4.1 */
|
||||
OP_CB_LAYOUTRECALL = 5,
|
||||
OP_CB_NOTIFY = 6,
|
||||
OP_CB_PUSH_DELEG = 7,
|
||||
OP_CB_RECALL_ANY = 8,
|
||||
OP_CB_RECALLABLE_OBJ_AVAIL = 9,
|
||||
OP_CB_RECALL_SLOT = 10,
|
||||
OP_CB_SEQUENCE = 11,
|
||||
OP_CB_WANTS_CANCELLED = 12,
|
||||
OP_CB_NOTIFY_LOCK = 13,
|
||||
OP_CB_NOTIFY_DEVICEID = 14,
|
||||
|
||||
/* Callback operations new to NFSv4.2 */
|
||||
OP_CB_OFFLOAD = 15,
|
||||
|
||||
OP_CB_ILLEGAL = 10044,
|
||||
};
|
||||
|
||||
#endif
|
||||
|
@ -69,7 +69,6 @@ struct svc_serv {
|
||||
struct svc_program * sv_program; /* RPC program */
|
||||
struct svc_stat * sv_stats; /* RPC statistics */
|
||||
spinlock_t sv_lock;
|
||||
struct kref sv_refcnt;
|
||||
unsigned int sv_nrthreads; /* # of server threads */
|
||||
unsigned int sv_maxconn; /* max connections allowed or
|
||||
* '0' causing max to be based
|
||||
@ -97,31 +96,13 @@ struct svc_serv {
|
||||
#endif /* CONFIG_SUNRPC_BACKCHANNEL */
|
||||
};
|
||||
|
||||
/**
|
||||
* svc_get() - increment reference count on a SUNRPC serv
|
||||
* @serv: the svc_serv to have count incremented
|
||||
*
|
||||
* Returns: the svc_serv that was passed in.
|
||||
*/
|
||||
static inline struct svc_serv *svc_get(struct svc_serv *serv)
|
||||
{
|
||||
kref_get(&serv->sv_refcnt);
|
||||
return serv;
|
||||
}
|
||||
/* This is used by pool_stats to find and lock an svc */
|
||||
struct svc_info {
|
||||
struct svc_serv *serv;
|
||||
struct mutex *mutex;
|
||||
};
|
||||
|
||||
void svc_destroy(struct kref *);
|
||||
|
||||
/**
|
||||
* svc_put - decrement reference count on a SUNRPC serv
|
||||
* @serv: the svc_serv to have count decremented
|
||||
*
|
||||
* When the reference count reaches zero, svc_destroy()
|
||||
* is called to clean up and free the serv.
|
||||
*/
|
||||
static inline void svc_put(struct svc_serv *serv)
|
||||
{
|
||||
kref_put(&serv->sv_refcnt, svc_destroy);
|
||||
}
|
||||
void svc_destroy(struct svc_serv **svcp);
|
||||
|
||||
/*
|
||||
* Maximum payload size supported by a kernel RPC server.
|
||||
@ -260,8 +241,6 @@ enum {
|
||||
RQ_LOCAL, /* local request */
|
||||
RQ_USEDEFERRAL, /* use deferral */
|
||||
RQ_DROPME, /* drop current reply */
|
||||
RQ_SPLICE_OK, /* turned off in gss privacy to prevent
|
||||
* encrypting page cache pages */
|
||||
RQ_VICTIM, /* Have agreed to shut down */
|
||||
RQ_DATA, /* request has data */
|
||||
};
|
||||
@ -433,7 +412,7 @@ void svc_exit_thread(struct svc_rqst *);
|
||||
struct svc_serv * svc_create_pooled(struct svc_program *, unsigned int,
|
||||
int (*threadfn)(void *data));
|
||||
int svc_set_num_threads(struct svc_serv *, struct svc_pool *, int);
|
||||
int svc_pool_stats_open(struct svc_serv *serv, struct file *file);
|
||||
int svc_pool_stats_open(struct svc_info *si, struct file *file);
|
||||
void svc_process(struct svc_rqst *rqstp);
|
||||
void svc_process_bc(struct rpc_rqst *req, struct svc_rqst *rqstp);
|
||||
int svc_register(const struct svc_serv *, struct net *, const int,
|
||||
|
@ -65,6 +65,7 @@ extern unsigned int svcrdma_ord;
|
||||
extern unsigned int svcrdma_max_requests;
|
||||
extern unsigned int svcrdma_max_bc_requests;
|
||||
extern unsigned int svcrdma_max_req_size;
|
||||
extern struct workqueue_struct *svcrdma_wq;
|
||||
|
||||
extern struct percpu_counter svcrdma_stat_read;
|
||||
extern struct percpu_counter svcrdma_stat_recv;
|
||||
@ -97,6 +98,7 @@ struct svcxprt_rdma {
|
||||
u32 sc_pending_recvs;
|
||||
u32 sc_recv_batch;
|
||||
struct list_head sc_rq_dto_q;
|
||||
struct list_head sc_read_complete_q;
|
||||
spinlock_t sc_rq_dto_lock;
|
||||
struct ib_qp *sc_qp;
|
||||
struct ib_cq *sc_rq_cq;
|
||||
@ -115,6 +117,13 @@ struct svcxprt_rdma {
|
||||
/* sc_flags */
|
||||
#define RDMAXPRT_CONN_PENDING 3
|
||||
|
||||
static inline struct svcxprt_rdma *svc_rdma_rqst_rdma(struct svc_rqst *rqstp)
|
||||
{
|
||||
struct svc_xprt *xprt = rqstp->rq_xprt;
|
||||
|
||||
return container_of(xprt, struct svcxprt_rdma, sc_xprt);
|
||||
}
|
||||
|
||||
/*
|
||||
* Default connection parameters
|
||||
*/
|
||||
@ -126,6 +135,43 @@ enum {
|
||||
|
||||
#define RPCSVC_MAXPAYLOAD_RDMA RPCSVC_MAXPAYLOAD
|
||||
|
||||
/**
|
||||
* svc_rdma_send_cid_init - Initialize a Receive Queue completion ID
|
||||
* @rdma: controlling transport
|
||||
* @cid: completion ID to initialize
|
||||
*/
|
||||
static inline void svc_rdma_recv_cid_init(struct svcxprt_rdma *rdma,
|
||||
struct rpc_rdma_cid *cid)
|
||||
{
|
||||
cid->ci_queue_id = rdma->sc_rq_cq->res.id;
|
||||
cid->ci_completion_id = atomic_inc_return(&rdma->sc_completion_ids);
|
||||
}
|
||||
|
||||
/**
|
||||
* svc_rdma_send_cid_init - Initialize a Send Queue completion ID
|
||||
* @rdma: controlling transport
|
||||
* @cid: completion ID to initialize
|
||||
*/
|
||||
static inline void svc_rdma_send_cid_init(struct svcxprt_rdma *rdma,
|
||||
struct rpc_rdma_cid *cid)
|
||||
{
|
||||
cid->ci_queue_id = rdma->sc_sq_cq->res.id;
|
||||
cid->ci_completion_id = atomic_inc_return(&rdma->sc_completion_ids);
|
||||
}
|
||||
|
||||
/*
|
||||
* A chunk context tracks all I/O for moving one Read or Write
|
||||
* chunk. This is a set of rdma_rw's that handle data movement
|
||||
* for all segments of one chunk.
|
||||
*/
|
||||
struct svc_rdma_chunk_ctxt {
|
||||
struct rpc_rdma_cid cc_cid;
|
||||
struct ib_cqe cc_cqe;
|
||||
struct list_head cc_rwctxts;
|
||||
ktime_t cc_posttime;
|
||||
int cc_sqecount;
|
||||
};
|
||||
|
||||
struct svc_rdma_recv_ctxt {
|
||||
struct llist_node rc_node;
|
||||
struct list_head rc_list;
|
||||
@ -136,22 +182,33 @@ struct svc_rdma_recv_ctxt {
|
||||
void *rc_recv_buf;
|
||||
struct xdr_stream rc_stream;
|
||||
u32 rc_byte_len;
|
||||
unsigned int rc_page_count;
|
||||
u32 rc_inv_rkey;
|
||||
__be32 rc_msgtype;
|
||||
|
||||
/* State for pulling a Read chunk */
|
||||
unsigned int rc_pageoff;
|
||||
unsigned int rc_curpage;
|
||||
unsigned int rc_readbytes;
|
||||
struct xdr_buf rc_saved_arg;
|
||||
struct svc_rdma_chunk_ctxt rc_cc;
|
||||
|
||||
struct svc_rdma_pcl rc_call_pcl;
|
||||
|
||||
struct svc_rdma_pcl rc_read_pcl;
|
||||
struct svc_rdma_chunk *rc_cur_result_payload;
|
||||
struct svc_rdma_pcl rc_write_pcl;
|
||||
struct svc_rdma_pcl rc_reply_pcl;
|
||||
|
||||
unsigned int rc_page_count;
|
||||
struct page *rc_pages[RPCSVC_MAXPAGES];
|
||||
};
|
||||
|
||||
struct svc_rdma_send_ctxt {
|
||||
struct llist_node sc_node;
|
||||
struct rpc_rdma_cid sc_cid;
|
||||
struct work_struct sc_work;
|
||||
|
||||
struct svcxprt_rdma *sc_rdma;
|
||||
struct ib_send_wr sc_send_wr;
|
||||
struct ib_cqe sc_cqe;
|
||||
struct xdr_buf sc_hdrbuf;
|
||||
@ -180,6 +237,11 @@ extern int svc_rdma_recvfrom(struct svc_rqst *);
|
||||
|
||||
/* svc_rdma_rw.c */
|
||||
extern void svc_rdma_destroy_rw_ctxts(struct svcxprt_rdma *rdma);
|
||||
extern void svc_rdma_cc_init(struct svcxprt_rdma *rdma,
|
||||
struct svc_rdma_chunk_ctxt *cc);
|
||||
extern void svc_rdma_cc_release(struct svcxprt_rdma *rdma,
|
||||
struct svc_rdma_chunk_ctxt *cc,
|
||||
enum dma_data_direction dir);
|
||||
extern int svc_rdma_send_write_chunk(struct svcxprt_rdma *rdma,
|
||||
const struct svc_rdma_chunk *chunk,
|
||||
const struct xdr_buf *xdr);
|
||||
@ -200,7 +262,8 @@ extern int svc_rdma_send(struct svcxprt_rdma *rdma,
|
||||
struct svc_rdma_send_ctxt *ctxt);
|
||||
extern int svc_rdma_map_reply_msg(struct svcxprt_rdma *rdma,
|
||||
struct svc_rdma_send_ctxt *sctxt,
|
||||
const struct svc_rdma_recv_ctxt *rctxt,
|
||||
const struct svc_rdma_pcl *write_pcl,
|
||||
const struct svc_rdma_pcl *reply_pcl,
|
||||
const struct xdr_buf *xdr);
|
||||
extern void svc_rdma_send_error_msg(struct svcxprt_rdma *rdma,
|
||||
struct svc_rdma_send_ctxt *sctxt,
|
||||
|
@ -131,8 +131,11 @@ enum svc_auth_status {
|
||||
* This call releases a domain.
|
||||
*
|
||||
* set_client()
|
||||
* Givens a pending request (struct svc_rqst), finds and assigns
|
||||
* Given a pending request (struct svc_rqst), finds and assigns
|
||||
* an appropriate 'auth_domain' as the client.
|
||||
*
|
||||
* pseudoflavor()
|
||||
* Returns RPC_AUTH pseudoflavor in use by @rqstp.
|
||||
*/
|
||||
struct auth_ops {
|
||||
char * name;
|
||||
@ -143,11 +146,13 @@ struct auth_ops {
|
||||
int (*release)(struct svc_rqst *rqstp);
|
||||
void (*domain_release)(struct auth_domain *dom);
|
||||
enum svc_auth_status (*set_client)(struct svc_rqst *rqstp);
|
||||
rpc_authflavor_t (*pseudoflavor)(struct svc_rqst *rqstp);
|
||||
};
|
||||
|
||||
struct svc_xprt;
|
||||
|
||||
extern enum svc_auth_status svc_authenticate(struct svc_rqst *rqstp);
|
||||
extern rpc_authflavor_t svc_auth_flavor(struct svc_rqst *rqstp);
|
||||
extern int svc_authorise(struct svc_rqst *rqstp);
|
||||
extern enum svc_auth_status svc_set_client(struct svc_rqst *rqstp);
|
||||
extern int svc_auth_register(rpc_authflavor_t flavor, struct auth_ops *aops);
|
||||
|
@ -22,6 +22,36 @@
|
||||
** Event classes
|
||||
**/
|
||||
|
||||
DECLARE_EVENT_CLASS(rpcrdma_simple_cid_class,
|
||||
TP_PROTO(
|
||||
const struct rpc_rdma_cid *cid
|
||||
),
|
||||
|
||||
TP_ARGS(cid),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(u32, cq_id)
|
||||
__field(int, completion_id)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->cq_id = cid->ci_queue_id;
|
||||
__entry->completion_id = cid->ci_completion_id;
|
||||
),
|
||||
|
||||
TP_printk("cq.id=%d cid=%d",
|
||||
__entry->cq_id, __entry->completion_id
|
||||
)
|
||||
);
|
||||
|
||||
#define DEFINE_SIMPLE_CID_EVENT(name) \
|
||||
DEFINE_EVENT(rpcrdma_simple_cid_class, name, \
|
||||
TP_PROTO( \
|
||||
const struct rpc_rdma_cid *cid \
|
||||
), \
|
||||
TP_ARGS(cid) \
|
||||
)
|
||||
|
||||
DECLARE_EVENT_CLASS(rpcrdma_completion_class,
|
||||
TP_PROTO(
|
||||
const struct ib_wc *wc,
|
||||
@ -62,37 +92,6 @@ DECLARE_EVENT_CLASS(rpcrdma_completion_class,
|
||||
), \
|
||||
TP_ARGS(wc, cid))
|
||||
|
||||
DECLARE_EVENT_CLASS(rpcrdma_send_completion_class,
|
||||
TP_PROTO(
|
||||
const struct ib_wc *wc,
|
||||
const struct rpc_rdma_cid *cid
|
||||
),
|
||||
|
||||
TP_ARGS(wc, cid),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(u32, cq_id)
|
||||
__field(int, completion_id)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->cq_id = cid->ci_queue_id;
|
||||
__entry->completion_id = cid->ci_completion_id;
|
||||
),
|
||||
|
||||
TP_printk("cq.id=%u cid=%d",
|
||||
__entry->cq_id, __entry->completion_id
|
||||
)
|
||||
);
|
||||
|
||||
#define DEFINE_SEND_COMPLETION_EVENT(name) \
|
||||
DEFINE_EVENT(rpcrdma_send_completion_class, name, \
|
||||
TP_PROTO( \
|
||||
const struct ib_wc *wc, \
|
||||
const struct rpc_rdma_cid *cid \
|
||||
), \
|
||||
TP_ARGS(wc, cid))
|
||||
|
||||
DECLARE_EVENT_CLASS(rpcrdma_send_flush_class,
|
||||
TP_PROTO(
|
||||
const struct ib_wc *wc,
|
||||
@ -978,27 +977,7 @@ TRACE_EVENT(xprtrdma_post_send_err,
|
||||
)
|
||||
);
|
||||
|
||||
TRACE_EVENT(xprtrdma_post_recv,
|
||||
TP_PROTO(
|
||||
const struct rpcrdma_rep *rep
|
||||
),
|
||||
|
||||
TP_ARGS(rep),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(u32, cq_id)
|
||||
__field(int, completion_id)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->cq_id = rep->rr_cid.ci_queue_id;
|
||||
__entry->completion_id = rep->rr_cid.ci_completion_id;
|
||||
),
|
||||
|
||||
TP_printk("cq.id=%d cid=%d",
|
||||
__entry->cq_id, __entry->completion_id
|
||||
)
|
||||
);
|
||||
DEFINE_SIMPLE_CID_EVENT(xprtrdma_post_recv);
|
||||
|
||||
TRACE_EVENT(xprtrdma_post_recvs,
|
||||
TP_PROTO(
|
||||
@ -1783,29 +1762,29 @@ DEFINE_ERROR_EVENT(chunk);
|
||||
|
||||
DECLARE_EVENT_CLASS(svcrdma_dma_map_class,
|
||||
TP_PROTO(
|
||||
const struct svcxprt_rdma *rdma,
|
||||
const struct rpc_rdma_cid *cid,
|
||||
u64 dma_addr,
|
||||
u32 length
|
||||
),
|
||||
|
||||
TP_ARGS(rdma, dma_addr, length),
|
||||
TP_ARGS(cid, dma_addr, length),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(u32, cq_id)
|
||||
__field(int, completion_id)
|
||||
__field(u64, dma_addr)
|
||||
__field(u32, length)
|
||||
__string(device, rdma->sc_cm_id->device->name)
|
||||
__string(addr, rdma->sc_xprt.xpt_remotebuf)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->cq_id = cid->ci_queue_id;
|
||||
__entry->completion_id = cid->ci_completion_id;
|
||||
__entry->dma_addr = dma_addr;
|
||||
__entry->length = length;
|
||||
__assign_str(device, rdma->sc_cm_id->device->name);
|
||||
__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
|
||||
),
|
||||
|
||||
TP_printk("addr=%s device=%s dma_addr=%llu length=%u",
|
||||
__get_str(addr), __get_str(device),
|
||||
TP_printk("cq.id=%u cid=%d dma_addr=%llu length=%u",
|
||||
__entry->cq_id, __entry->completion_id,
|
||||
__entry->dma_addr, __entry->length
|
||||
)
|
||||
);
|
||||
@ -1813,11 +1792,12 @@ DECLARE_EVENT_CLASS(svcrdma_dma_map_class,
|
||||
#define DEFINE_SVC_DMA_EVENT(name) \
|
||||
DEFINE_EVENT(svcrdma_dma_map_class, svcrdma_##name, \
|
||||
TP_PROTO( \
|
||||
const struct svcxprt_rdma *rdma,\
|
||||
const struct rpc_rdma_cid *cid, \
|
||||
u64 dma_addr, \
|
||||
u32 length \
|
||||
), \
|
||||
TP_ARGS(rdma, dma_addr, length))
|
||||
TP_ARGS(cid, dma_addr, length) \
|
||||
)
|
||||
|
||||
DEFINE_SVC_DMA_EVENT(dma_map_page);
|
||||
DEFINE_SVC_DMA_EVENT(dma_map_err);
|
||||
@ -1826,33 +1806,37 @@ DEFINE_SVC_DMA_EVENT(dma_unmap_page);
|
||||
TRACE_EVENT(svcrdma_dma_map_rw_err,
|
||||
TP_PROTO(
|
||||
const struct svcxprt_rdma *rdma,
|
||||
u64 offset,
|
||||
u32 handle,
|
||||
unsigned int nents,
|
||||
int status
|
||||
),
|
||||
|
||||
TP_ARGS(rdma, nents, status),
|
||||
TP_ARGS(rdma, offset, handle, nents, status),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(int, status)
|
||||
__field(u32, cq_id)
|
||||
__field(u32, handle)
|
||||
__field(u64, offset)
|
||||
__field(unsigned int, nents)
|
||||
__string(device, rdma->sc_cm_id->device->name)
|
||||
__string(addr, rdma->sc_xprt.xpt_remotebuf)
|
||||
__field(int, status)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->status = status;
|
||||
__entry->cq_id = rdma->sc_sq_cq->res.id;
|
||||
__entry->handle = handle;
|
||||
__entry->offset = offset;
|
||||
__entry->nents = nents;
|
||||
__assign_str(device, rdma->sc_cm_id->device->name);
|
||||
__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
|
||||
__entry->status = status;
|
||||
),
|
||||
|
||||
TP_printk("addr=%s device=%s nents=%u status=%d",
|
||||
__get_str(addr), __get_str(device), __entry->nents,
|
||||
__entry->status
|
||||
TP_printk("cq.id=%u 0x%016llx:0x%08x nents=%u status=%d",
|
||||
__entry->cq_id, (unsigned long long)__entry->offset,
|
||||
__entry->handle, __entry->nents, __entry->status
|
||||
)
|
||||
);
|
||||
|
||||
TRACE_EVENT(svcrdma_no_rwctx_err,
|
||||
TRACE_EVENT(svcrdma_rwctx_empty,
|
||||
TP_PROTO(
|
||||
const struct svcxprt_rdma *rdma,
|
||||
unsigned int num_sges
|
||||
@ -1861,79 +1845,75 @@ TRACE_EVENT(svcrdma_no_rwctx_err,
|
||||
TP_ARGS(rdma, num_sges),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(u32, cq_id)
|
||||
__field(unsigned int, num_sges)
|
||||
__string(device, rdma->sc_cm_id->device->name)
|
||||
__string(addr, rdma->sc_xprt.xpt_remotebuf)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->cq_id = rdma->sc_sq_cq->res.id;
|
||||
__entry->num_sges = num_sges;
|
||||
__assign_str(device, rdma->sc_cm_id->device->name);
|
||||
__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
|
||||
),
|
||||
|
||||
TP_printk("addr=%s device=%s num_sges=%d",
|
||||
__get_str(addr), __get_str(device), __entry->num_sges
|
||||
TP_printk("cq.id=%u num_sges=%d",
|
||||
__entry->cq_id, __entry->num_sges
|
||||
)
|
||||
);
|
||||
|
||||
TRACE_EVENT(svcrdma_page_overrun_err,
|
||||
TP_PROTO(
|
||||
const struct svcxprt_rdma *rdma,
|
||||
const struct svc_rqst *rqst,
|
||||
const struct rpc_rdma_cid *cid,
|
||||
unsigned int pageno
|
||||
),
|
||||
|
||||
TP_ARGS(rdma, rqst, pageno),
|
||||
TP_ARGS(cid, pageno),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(u32, cq_id)
|
||||
__field(int, completion_id)
|
||||
__field(unsigned int, pageno)
|
||||
__field(u32, xid)
|
||||
__string(device, rdma->sc_cm_id->device->name)
|
||||
__string(addr, rdma->sc_xprt.xpt_remotebuf)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->cq_id = cid->ci_queue_id;
|
||||
__entry->completion_id = cid->ci_completion_id;
|
||||
__entry->pageno = pageno;
|
||||
__entry->xid = __be32_to_cpu(rqst->rq_xid);
|
||||
__assign_str(device, rdma->sc_cm_id->device->name);
|
||||
__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
|
||||
),
|
||||
|
||||
TP_printk("addr=%s device=%s xid=0x%08x pageno=%u", __get_str(addr),
|
||||
__get_str(device), __entry->xid, __entry->pageno
|
||||
TP_printk("cq.id=%u cid=%d pageno=%u",
|
||||
__entry->cq_id, __entry->completion_id,
|
||||
__entry->pageno
|
||||
)
|
||||
);
|
||||
|
||||
TRACE_EVENT(svcrdma_small_wrch_err,
|
||||
TP_PROTO(
|
||||
const struct svcxprt_rdma *rdma,
|
||||
const struct rpc_rdma_cid *cid,
|
||||
unsigned int remaining,
|
||||
unsigned int seg_no,
|
||||
unsigned int num_segs
|
||||
),
|
||||
|
||||
TP_ARGS(rdma, remaining, seg_no, num_segs),
|
||||
TP_ARGS(cid, remaining, seg_no, num_segs),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(u32, cq_id)
|
||||
__field(int, completion_id)
|
||||
__field(unsigned int, remaining)
|
||||
__field(unsigned int, seg_no)
|
||||
__field(unsigned int, num_segs)
|
||||
__string(device, rdma->sc_cm_id->device->name)
|
||||
__string(addr, rdma->sc_xprt.xpt_remotebuf)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->cq_id = cid->ci_queue_id;
|
||||
__entry->completion_id = cid->ci_completion_id;
|
||||
__entry->remaining = remaining;
|
||||
__entry->seg_no = seg_no;
|
||||
__entry->num_segs = num_segs;
|
||||
__assign_str(device, rdma->sc_cm_id->device->name);
|
||||
__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
|
||||
),
|
||||
|
||||
TP_printk("addr=%s device=%s remaining=%u seg_no=%u num_segs=%u",
|
||||
__get_str(addr), __get_str(device), __entry->remaining,
|
||||
__entry->seg_no, __entry->num_segs
|
||||
TP_printk("cq.id=%u cid=%d remaining=%u seg_no=%u num_segs=%u",
|
||||
__entry->cq_id, __entry->completion_id,
|
||||
__entry->remaining, __entry->seg_no, __entry->num_segs
|
||||
)
|
||||
);
|
||||
|
||||
@ -2020,31 +2000,11 @@ TRACE_EVENT(svcrdma_post_send,
|
||||
)
|
||||
);
|
||||
|
||||
DEFINE_SEND_COMPLETION_EVENT(svcrdma_wc_send);
|
||||
DEFINE_SIMPLE_CID_EVENT(svcrdma_wc_send);
|
||||
DEFINE_SEND_FLUSH_EVENT(svcrdma_wc_send_flush);
|
||||
DEFINE_SEND_FLUSH_EVENT(svcrdma_wc_send_err);
|
||||
|
||||
TRACE_EVENT(svcrdma_post_recv,
|
||||
TP_PROTO(
|
||||
const struct svc_rdma_recv_ctxt *ctxt
|
||||
),
|
||||
|
||||
TP_ARGS(ctxt),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(u32, cq_id)
|
||||
__field(int, completion_id)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->cq_id = ctxt->rc_cid.ci_queue_id;
|
||||
__entry->completion_id = ctxt->rc_cid.ci_completion_id;
|
||||
),
|
||||
|
||||
TP_printk("cq.id=%d cid=%d",
|
||||
__entry->cq_id, __entry->completion_id
|
||||
)
|
||||
);
|
||||
DEFINE_SIMPLE_CID_EVENT(svcrdma_post_recv);
|
||||
|
||||
DEFINE_RECEIVE_SUCCESS_EVENT(svcrdma_wc_recv);
|
||||
DEFINE_RECEIVE_FLUSH_EVENT(svcrdma_wc_recv_flush);
|
||||
@ -2152,8 +2112,9 @@ TRACE_EVENT(svcrdma_wc_read,
|
||||
|
||||
DEFINE_SEND_FLUSH_EVENT(svcrdma_wc_read_flush);
|
||||
DEFINE_SEND_FLUSH_EVENT(svcrdma_wc_read_err);
|
||||
DEFINE_SIMPLE_CID_EVENT(svcrdma_read_finished);
|
||||
|
||||
DEFINE_SEND_COMPLETION_EVENT(svcrdma_wc_write);
|
||||
DEFINE_SIMPLE_CID_EVENT(svcrdma_wc_write);
|
||||
DEFINE_SEND_FLUSH_EVENT(svcrdma_wc_write_flush);
|
||||
DEFINE_SEND_FLUSH_EVENT(svcrdma_wc_write_err);
|
||||
|
||||
@ -2184,65 +2145,74 @@ TRACE_EVENT(svcrdma_qp_error,
|
||||
)
|
||||
);
|
||||
|
||||
DECLARE_EVENT_CLASS(svcrdma_sendqueue_event,
|
||||
DECLARE_EVENT_CLASS(svcrdma_sendqueue_class,
|
||||
TP_PROTO(
|
||||
const struct svcxprt_rdma *rdma
|
||||
const struct svcxprt_rdma *rdma,
|
||||
const struct rpc_rdma_cid *cid
|
||||
),
|
||||
|
||||
TP_ARGS(rdma),
|
||||
TP_ARGS(rdma, cid),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(u32, cq_id)
|
||||
__field(int, completion_id)
|
||||
__field(int, avail)
|
||||
__field(int, depth)
|
||||
__string(addr, rdma->sc_xprt.xpt_remotebuf)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->cq_id = cid->ci_queue_id;
|
||||
__entry->completion_id = cid->ci_completion_id;
|
||||
__entry->avail = atomic_read(&rdma->sc_sq_avail);
|
||||
__entry->depth = rdma->sc_sq_depth;
|
||||
__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
|
||||
),
|
||||
|
||||
TP_printk("addr=%s sc_sq_avail=%d/%d",
|
||||
__get_str(addr), __entry->avail, __entry->depth
|
||||
TP_printk("cq.id=%u cid=%d sc_sq_avail=%d/%d",
|
||||
__entry->cq_id, __entry->completion_id,
|
||||
__entry->avail, __entry->depth
|
||||
)
|
||||
);
|
||||
|
||||
#define DEFINE_SQ_EVENT(name) \
|
||||
DEFINE_EVENT(svcrdma_sendqueue_event, svcrdma_sq_##name,\
|
||||
TP_PROTO( \
|
||||
const struct svcxprt_rdma *rdma \
|
||||
), \
|
||||
TP_ARGS(rdma))
|
||||
DEFINE_EVENT(svcrdma_sendqueue_class, name, \
|
||||
TP_PROTO( \
|
||||
const struct svcxprt_rdma *rdma, \
|
||||
const struct rpc_rdma_cid *cid \
|
||||
), \
|
||||
TP_ARGS(rdma, cid) \
|
||||
)
|
||||
|
||||
DEFINE_SQ_EVENT(full);
|
||||
DEFINE_SQ_EVENT(retry);
|
||||
DEFINE_SQ_EVENT(svcrdma_sq_full);
|
||||
DEFINE_SQ_EVENT(svcrdma_sq_retry);
|
||||
|
||||
TRACE_EVENT(svcrdma_sq_post_err,
|
||||
TP_PROTO(
|
||||
const struct svcxprt_rdma *rdma,
|
||||
const struct rpc_rdma_cid *cid,
|
||||
int status
|
||||
),
|
||||
|
||||
TP_ARGS(rdma, status),
|
||||
TP_ARGS(rdma, cid, status),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(u32, cq_id)
|
||||
__field(int, completion_id)
|
||||
__field(int, avail)
|
||||
__field(int, depth)
|
||||
__field(int, status)
|
||||
__string(addr, rdma->sc_xprt.xpt_remotebuf)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->cq_id = cid->ci_queue_id;
|
||||
__entry->completion_id = cid->ci_completion_id;
|
||||
__entry->avail = atomic_read(&rdma->sc_sq_avail);
|
||||
__entry->depth = rdma->sc_sq_depth;
|
||||
__entry->status = status;
|
||||
__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
|
||||
),
|
||||
|
||||
TP_printk("addr=%s sc_sq_avail=%d/%d status=%d",
|
||||
__get_str(addr), __entry->avail, __entry->depth,
|
||||
__entry->status
|
||||
TP_printk("cq.id=%u cid=%d sc_sq_avail=%d/%d status=%d",
|
||||
__entry->cq_id, __entry->completion_id,
|
||||
__entry->avail, __entry->depth, __entry->status
|
||||
)
|
||||
);
|
||||
|
||||
|
@ -1675,7 +1675,6 @@ DEFINE_SVCXDRBUF_EVENT(sendto);
|
||||
svc_rqst_flag(LOCAL) \
|
||||
svc_rqst_flag(USEDEFERRAL) \
|
||||
svc_rqst_flag(DROPME) \
|
||||
svc_rqst_flag(SPLICE_OK) \
|
||||
svc_rqst_flag(VICTIM) \
|
||||
svc_rqst_flag_end(DATA)
|
||||
|
||||
|
@ -866,14 +866,6 @@ svcauth_gss_unwrap_integ(struct svc_rqst *rqstp, u32 seq, struct gss_ctx *ctx)
|
||||
struct xdr_buf databody_integ;
|
||||
struct xdr_netobj checksum;
|
||||
|
||||
/* NFS READ normally uses splice to send data in-place. However
|
||||
* the data in cache can change after the reply's MIC is computed
|
||||
* but before the RPC reply is sent. To prevent the client from
|
||||
* rejecting the server-computed MIC in this somewhat rare case,
|
||||
* do not use splice with the GSS integrity service.
|
||||
*/
|
||||
clear_bit(RQ_SPLICE_OK, &rqstp->rq_flags);
|
||||
|
||||
/* Did we already verify the signature on the original pass through? */
|
||||
if (rqstp->rq_deferred)
|
||||
return 0;
|
||||
@ -948,8 +940,6 @@ svcauth_gss_unwrap_priv(struct svc_rqst *rqstp, u32 seq, struct gss_ctx *ctx)
|
||||
struct xdr_buf *buf = xdr->buf;
|
||||
unsigned int saved_len;
|
||||
|
||||
clear_bit(RQ_SPLICE_OK, &rqstp->rq_flags);
|
||||
|
||||
if (xdr_stream_decode_u32(xdr, &len) < 0)
|
||||
goto unwrap_failed;
|
||||
if (rqstp->rq_deferred) {
|
||||
@ -2014,6 +2004,11 @@ svcauth_gss_domain_release(struct auth_domain *dom)
|
||||
call_rcu(&dom->rcu_head, svcauth_gss_domain_release_rcu);
|
||||
}
|
||||
|
||||
static rpc_authflavor_t svcauth_gss_pseudoflavor(struct svc_rqst *rqstp)
|
||||
{
|
||||
return svcauth_gss_flavor(rqstp->rq_gssclient);
|
||||
}
|
||||
|
||||
static struct auth_ops svcauthops_gss = {
|
||||
.name = "rpcsec_gss",
|
||||
.owner = THIS_MODULE,
|
||||
@ -2022,6 +2017,7 @@ static struct auth_ops svcauthops_gss = {
|
||||
.release = svcauth_gss_release,
|
||||
.domain_release = svcauth_gss_domain_release,
|
||||
.set_client = svcauth_gss_set_client,
|
||||
.pseudoflavor = svcauth_gss_pseudoflavor,
|
||||
};
|
||||
|
||||
static int rsi_cache_create_net(struct net *net)
|
||||
|
@ -463,7 +463,6 @@ __svc_create(struct svc_program *prog, unsigned int bufsize, int npools,
|
||||
return NULL;
|
||||
serv->sv_name = prog->pg_name;
|
||||
serv->sv_program = prog;
|
||||
kref_init(&serv->sv_refcnt);
|
||||
serv->sv_stats = prog->pg_stats;
|
||||
if (bufsize > RPCSVC_MAXPAYLOAD)
|
||||
bufsize = RPCSVC_MAXPAYLOAD;
|
||||
@ -564,11 +563,13 @@ EXPORT_SYMBOL_GPL(svc_create_pooled);
|
||||
* protect sv_permsocks and sv_tempsocks.
|
||||
*/
|
||||
void
|
||||
svc_destroy(struct kref *ref)
|
||||
svc_destroy(struct svc_serv **servp)
|
||||
{
|
||||
struct svc_serv *serv = container_of(ref, struct svc_serv, sv_refcnt);
|
||||
struct svc_serv *serv = *servp;
|
||||
unsigned int i;
|
||||
|
||||
*servp = NULL;
|
||||
|
||||
dprintk("svc: svc_destroy(%s)\n", serv->sv_program->pg_name);
|
||||
timer_shutdown_sync(&serv->sv_temptimer);
|
||||
|
||||
@ -675,7 +676,6 @@ svc_prepare_thread(struct svc_serv *serv, struct svc_pool *pool, int node)
|
||||
if (!rqstp)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
svc_get(serv);
|
||||
spin_lock_bh(&serv->sv_lock);
|
||||
serv->sv_nrthreads += 1;
|
||||
spin_unlock_bh(&serv->sv_lock);
|
||||
@ -935,11 +935,6 @@ svc_exit_thread(struct svc_rqst *rqstp)
|
||||
|
||||
svc_rqst_free(rqstp);
|
||||
|
||||
svc_put(serv);
|
||||
/* That svc_put() cannot be the last, because the thread
|
||||
* waiting for SP_VICTIM_REMAINS to clear must hold
|
||||
* a reference. So it is still safe to access pool.
|
||||
*/
|
||||
clear_and_wake_up_bit(SP_VICTIM_REMAINS, &pool->sp_flags);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(svc_exit_thread);
|
||||
@ -1305,8 +1300,6 @@ svc_process_common(struct svc_rqst *rqstp)
|
||||
int rc;
|
||||
__be32 *p;
|
||||
|
||||
/* Will be turned off by GSS integrity and privacy services */
|
||||
set_bit(RQ_SPLICE_OK, &rqstp->rq_flags);
|
||||
/* Will be turned off only when NFSv4 Sessions are used */
|
||||
set_bit(RQ_USEDEFERRAL, &rqstp->rq_flags);
|
||||
clear_bit(RQ_DROPME, &rqstp->rq_flags);
|
||||
|
@ -1362,29 +1362,36 @@ int svc_xprt_names(struct svc_serv *serv, char *buf, const int buflen)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(svc_xprt_names);
|
||||
|
||||
|
||||
/*----------------------------------------------------------------------------*/
|
||||
|
||||
static void *svc_pool_stats_start(struct seq_file *m, loff_t *pos)
|
||||
{
|
||||
unsigned int pidx = (unsigned int)*pos;
|
||||
struct svc_serv *serv = m->private;
|
||||
struct svc_info *si = m->private;
|
||||
|
||||
dprintk("svc_pool_stats_start, *pidx=%u\n", pidx);
|
||||
|
||||
mutex_lock(si->mutex);
|
||||
|
||||
if (!pidx)
|
||||
return SEQ_START_TOKEN;
|
||||
return (pidx > serv->sv_nrpools ? NULL : &serv->sv_pools[pidx-1]);
|
||||
if (!si->serv)
|
||||
return NULL;
|
||||
return pidx > si->serv->sv_nrpools ? NULL
|
||||
: &si->serv->sv_pools[pidx - 1];
|
||||
}
|
||||
|
||||
static void *svc_pool_stats_next(struct seq_file *m, void *p, loff_t *pos)
|
||||
{
|
||||
struct svc_pool *pool = p;
|
||||
struct svc_serv *serv = m->private;
|
||||
struct svc_info *si = m->private;
|
||||
struct svc_serv *serv = si->serv;
|
||||
|
||||
dprintk("svc_pool_stats_next, *pos=%llu\n", *pos);
|
||||
|
||||
if (p == SEQ_START_TOKEN) {
|
||||
if (!serv) {
|
||||
pool = NULL;
|
||||
} else if (p == SEQ_START_TOKEN) {
|
||||
pool = &serv->sv_pools[0];
|
||||
} else {
|
||||
unsigned int pidx = (pool - &serv->sv_pools[0]);
|
||||
@ -1399,6 +1406,9 @@ static void *svc_pool_stats_next(struct seq_file *m, void *p, loff_t *pos)
|
||||
|
||||
static void svc_pool_stats_stop(struct seq_file *m, void *p)
|
||||
{
|
||||
struct svc_info *si = m->private;
|
||||
|
||||
mutex_unlock(si->mutex);
|
||||
}
|
||||
|
||||
static int svc_pool_stats_show(struct seq_file *m, void *p)
|
||||
@ -1426,14 +1436,18 @@ static const struct seq_operations svc_pool_stats_seq_ops = {
|
||||
.show = svc_pool_stats_show,
|
||||
};
|
||||
|
||||
int svc_pool_stats_open(struct svc_serv *serv, struct file *file)
|
||||
int svc_pool_stats_open(struct svc_info *info, struct file *file)
|
||||
{
|
||||
struct seq_file *seq;
|
||||
int err;
|
||||
|
||||
err = seq_open(file, &svc_pool_stats_seq_ops);
|
||||
if (!err)
|
||||
((struct seq_file *) file->private_data)->private = serv;
|
||||
return err;
|
||||
if (err)
|
||||
return err;
|
||||
seq = file->private_data;
|
||||
seq->private = info;
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(svc_pool_stats_open);
|
||||
|
||||
|
@ -160,6 +160,22 @@ svc_auth_unregister(rpc_authflavor_t flavor)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(svc_auth_unregister);
|
||||
|
||||
/**
|
||||
* svc_auth_flavor - return RPC transaction's RPC_AUTH flavor
|
||||
* @rqstp: RPC transaction context
|
||||
*
|
||||
* Returns an RPC flavor or GSS pseudoflavor.
|
||||
*/
|
||||
rpc_authflavor_t svc_auth_flavor(struct svc_rqst *rqstp)
|
||||
{
|
||||
struct auth_ops *aops = rqstp->rq_authop;
|
||||
|
||||
if (!aops->pseudoflavor)
|
||||
return aops->flavour;
|
||||
return aops->pseudoflavor(rqstp);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(svc_auth_flavor);
|
||||
|
||||
/**************************************************
|
||||
* 'auth_domains' are stored in a hash table indexed by name.
|
||||
* When the last reference to an 'auth_domain' is dropped,
|
||||
|
@ -1049,18 +1049,14 @@ static int receive_cb_reply(struct svc_sock *svsk, struct svc_rqst *rqstp)
|
||||
struct rpc_rqst *req = NULL;
|
||||
struct kvec *src, *dst;
|
||||
__be32 *p = (__be32 *)rqstp->rq_arg.head[0].iov_base;
|
||||
__be32 xid;
|
||||
__be32 calldir;
|
||||
|
||||
xid = *p++;
|
||||
calldir = *p;
|
||||
__be32 xid = *p;
|
||||
|
||||
if (!bc_xprt)
|
||||
return -EAGAIN;
|
||||
spin_lock(&bc_xprt->queue_lock);
|
||||
req = xprt_lookup_rqst(bc_xprt, xid);
|
||||
if (!req)
|
||||
goto unlock_notfound;
|
||||
goto unlock_eagain;
|
||||
|
||||
memcpy(&req->rq_private_buf, &req->rq_rcv_buf, sizeof(struct xdr_buf));
|
||||
/*
|
||||
@ -1077,12 +1073,6 @@ static int receive_cb_reply(struct svc_sock *svsk, struct svc_rqst *rqstp)
|
||||
rqstp->rq_arg.len = 0;
|
||||
spin_unlock(&bc_xprt->queue_lock);
|
||||
return 0;
|
||||
unlock_notfound:
|
||||
printk(KERN_NOTICE
|
||||
"%s: Got unrecognized reply: "
|
||||
"calldir 0x%x xpt_bc_xprt %p xid %08x\n",
|
||||
__func__, ntohl(calldir),
|
||||
bc_xprt, ntohl(xid));
|
||||
unlock_eagain:
|
||||
spin_unlock(&bc_xprt->queue_lock);
|
||||
return -EAGAIN;
|
||||
|
@ -256,28 +256,44 @@ out_err:
|
||||
return rc;
|
||||
}
|
||||
|
||||
struct workqueue_struct *svcrdma_wq;
|
||||
|
||||
void svc_rdma_cleanup(void)
|
||||
{
|
||||
dprintk("SVCRDMA Module Removed, deregister RPC RDMA transport\n");
|
||||
svc_unreg_xprt_class(&svc_rdma_class);
|
||||
svc_rdma_proc_cleanup();
|
||||
if (svcrdma_wq) {
|
||||
struct workqueue_struct *wq = svcrdma_wq;
|
||||
|
||||
svcrdma_wq = NULL;
|
||||
destroy_workqueue(wq);
|
||||
}
|
||||
|
||||
dprintk("SVCRDMA Module Removed, deregister RPC RDMA transport\n");
|
||||
}
|
||||
|
||||
int svc_rdma_init(void)
|
||||
{
|
||||
struct workqueue_struct *wq;
|
||||
int rc;
|
||||
|
||||
wq = alloc_workqueue("svcrdma", WQ_UNBOUND, 0);
|
||||
if (!wq)
|
||||
return -ENOMEM;
|
||||
|
||||
rc = svc_rdma_proc_init();
|
||||
if (rc) {
|
||||
destroy_workqueue(wq);
|
||||
return rc;
|
||||
}
|
||||
|
||||
svcrdma_wq = wq;
|
||||
svc_reg_xprt_class(&svc_rdma_class);
|
||||
|
||||
dprintk("SVCRDMA Module Init, register RPC RDMA transport\n");
|
||||
dprintk("\tsvcrdma_ord : %d\n", svcrdma_ord);
|
||||
dprintk("\tmax_requests : %u\n", svcrdma_max_requests);
|
||||
dprintk("\tmax_bc_requests : %u\n", svcrdma_max_bc_requests);
|
||||
dprintk("\tmax_inline : %d\n", svcrdma_max_req_size);
|
||||
|
||||
rc = svc_rdma_proc_init();
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
/* Register RDMA with the SVC transport switch */
|
||||
svc_reg_xprt_class(&svc_rdma_class);
|
||||
return 0;
|
||||
}
|
||||
|
@ -76,15 +76,12 @@ static int svc_rdma_bc_sendto(struct svcxprt_rdma *rdma,
|
||||
struct rpc_rqst *rqst,
|
||||
struct svc_rdma_send_ctxt *sctxt)
|
||||
{
|
||||
struct svc_rdma_recv_ctxt *rctxt;
|
||||
struct svc_rdma_pcl empty_pcl;
|
||||
int ret;
|
||||
|
||||
rctxt = svc_rdma_recv_ctxt_get(rdma);
|
||||
if (!rctxt)
|
||||
return -EIO;
|
||||
|
||||
ret = svc_rdma_map_reply_msg(rdma, sctxt, rctxt, &rqst->rq_snd_buf);
|
||||
svc_rdma_recv_ctxt_put(rdma, rctxt);
|
||||
pcl_init(&empty_pcl);
|
||||
ret = svc_rdma_map_reply_msg(rdma, sctxt, &empty_pcl, &empty_pcl,
|
||||
&rqst->rq_snd_buf);
|
||||
if (ret < 0)
|
||||
return -EIO;
|
||||
|
||||
|
@ -115,13 +115,6 @@ svc_rdma_next_recv_ctxt(struct list_head *list)
|
||||
rc_list);
|
||||
}
|
||||
|
||||
static void svc_rdma_recv_cid_init(struct svcxprt_rdma *rdma,
|
||||
struct rpc_rdma_cid *cid)
|
||||
{
|
||||
cid->ci_queue_id = rdma->sc_rq_cq->res.id;
|
||||
cid->ci_completion_id = atomic_inc_return(&rdma->sc_completion_ids);
|
||||
}
|
||||
|
||||
static struct svc_rdma_recv_ctxt *
|
||||
svc_rdma_recv_ctxt_alloc(struct svcxprt_rdma *rdma)
|
||||
{
|
||||
@ -130,7 +123,7 @@ svc_rdma_recv_ctxt_alloc(struct svcxprt_rdma *rdma)
|
||||
dma_addr_t addr;
|
||||
void *buffer;
|
||||
|
||||
ctxt = kmalloc_node(sizeof(*ctxt), GFP_KERNEL, node);
|
||||
ctxt = kzalloc_node(sizeof(*ctxt), GFP_KERNEL, node);
|
||||
if (!ctxt)
|
||||
goto fail0;
|
||||
buffer = kmalloc_node(rdma->sc_max_req_size, GFP_KERNEL, node);
|
||||
@ -156,6 +149,7 @@ svc_rdma_recv_ctxt_alloc(struct svcxprt_rdma *rdma)
|
||||
ctxt->rc_recv_sge.length = rdma->sc_max_req_size;
|
||||
ctxt->rc_recv_sge.lkey = rdma->sc_pd->local_dma_lkey;
|
||||
ctxt->rc_recv_buf = buffer;
|
||||
svc_rdma_cc_init(rdma, &ctxt->rc_cc);
|
||||
return ctxt;
|
||||
|
||||
fail2:
|
||||
@ -204,18 +198,11 @@ struct svc_rdma_recv_ctxt *svc_rdma_recv_ctxt_get(struct svcxprt_rdma *rdma)
|
||||
|
||||
node = llist_del_first(&rdma->sc_recv_ctxts);
|
||||
if (!node)
|
||||
goto out_empty;
|
||||
ctxt = llist_entry(node, struct svc_rdma_recv_ctxt, rc_node);
|
||||
return NULL;
|
||||
|
||||
out:
|
||||
ctxt = llist_entry(node, struct svc_rdma_recv_ctxt, rc_node);
|
||||
ctxt->rc_page_count = 0;
|
||||
return ctxt;
|
||||
|
||||
out_empty:
|
||||
ctxt = svc_rdma_recv_ctxt_alloc(rdma);
|
||||
if (!ctxt)
|
||||
return NULL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -227,6 +214,13 @@ out_empty:
|
||||
void svc_rdma_recv_ctxt_put(struct svcxprt_rdma *rdma,
|
||||
struct svc_rdma_recv_ctxt *ctxt)
|
||||
{
|
||||
svc_rdma_cc_release(rdma, &ctxt->rc_cc, DMA_FROM_DEVICE);
|
||||
|
||||
/* @rc_page_count is normally zero here, but error flows
|
||||
* can leave pages in @rc_pages.
|
||||
*/
|
||||
release_pages(ctxt->rc_pages, ctxt->rc_page_count);
|
||||
|
||||
pcl_free(&ctxt->rc_call_pcl);
|
||||
pcl_free(&ctxt->rc_read_pcl);
|
||||
pcl_free(&ctxt->rc_write_pcl);
|
||||
@ -271,13 +265,13 @@ static bool svc_rdma_refresh_recvs(struct svcxprt_rdma *rdma,
|
||||
if (!ctxt)
|
||||
break;
|
||||
|
||||
trace_svcrdma_post_recv(ctxt);
|
||||
trace_svcrdma_post_recv(&ctxt->rc_cid);
|
||||
ctxt->rc_recv_wr.next = recv_chain;
|
||||
recv_chain = &ctxt->rc_recv_wr;
|
||||
rdma->sc_pending_recvs++;
|
||||
}
|
||||
if (!recv_chain)
|
||||
return false;
|
||||
return true;
|
||||
|
||||
ret = ib_post_recv(rdma->sc_qp, recv_chain, &bad_wr);
|
||||
if (ret)
|
||||
@ -301,10 +295,27 @@ err_free:
|
||||
* svc_rdma_post_recvs - Post initial set of Recv WRs
|
||||
* @rdma: fresh svcxprt_rdma
|
||||
*
|
||||
* Returns true if successful, otherwise false.
|
||||
* Return values:
|
||||
* %true: Receive Queue initialization successful
|
||||
* %false: memory allocation or DMA error
|
||||
*/
|
||||
bool svc_rdma_post_recvs(struct svcxprt_rdma *rdma)
|
||||
{
|
||||
unsigned int total;
|
||||
|
||||
/* For each credit, allocate enough recv_ctxts for one
|
||||
* posted Receive and one RPC in process.
|
||||
*/
|
||||
total = (rdma->sc_max_requests * 2) + rdma->sc_recv_batch;
|
||||
while (total--) {
|
||||
struct svc_rdma_recv_ctxt *ctxt;
|
||||
|
||||
ctxt = svc_rdma_recv_ctxt_alloc(rdma);
|
||||
if (!ctxt)
|
||||
return false;
|
||||
llist_add(&ctxt->rc_node, &rdma->sc_recv_ctxts);
|
||||
}
|
||||
|
||||
return svc_rdma_refresh_recvs(rdma, rdma->sc_max_requests);
|
||||
}
|
||||
|
||||
@ -373,6 +384,10 @@ void svc_rdma_flush_recv_queues(struct svcxprt_rdma *rdma)
|
||||
{
|
||||
struct svc_rdma_recv_ctxt *ctxt;
|
||||
|
||||
while ((ctxt = svc_rdma_next_recv_ctxt(&rdma->sc_read_complete_q))) {
|
||||
list_del(&ctxt->rc_list);
|
||||
svc_rdma_recv_ctxt_put(rdma, ctxt);
|
||||
}
|
||||
while ((ctxt = svc_rdma_next_recv_ctxt(&rdma->sc_rq_dto_q))) {
|
||||
list_del(&ctxt->rc_list);
|
||||
svc_rdma_recv_ctxt_put(rdma, ctxt);
|
||||
@ -754,6 +769,122 @@ static bool svc_rdma_is_reverse_direction_reply(struct svc_xprt *xprt,
|
||||
return true;
|
||||
}
|
||||
|
||||
/* Finish constructing the RPC Call message in rqstp::rq_arg.
|
||||
*
|
||||
* The incoming RPC/RDMA message is an RDMA_MSG type message
|
||||
* with a single Read chunk (only the upper layer data payload
|
||||
* was conveyed via RDMA Read).
|
||||
*/
|
||||
static void svc_rdma_read_complete_one(struct svc_rqst *rqstp,
|
||||
struct svc_rdma_recv_ctxt *ctxt)
|
||||
{
|
||||
struct svc_rdma_chunk *chunk = pcl_first_chunk(&ctxt->rc_read_pcl);
|
||||
struct xdr_buf *buf = &rqstp->rq_arg;
|
||||
unsigned int length;
|
||||
|
||||
/* Split the Receive buffer between the head and tail
|
||||
* buffers at Read chunk's position. XDR roundup of the
|
||||
* chunk is not included in either the pagelist or in
|
||||
* the tail.
|
||||
*/
|
||||
buf->tail[0].iov_base = buf->head[0].iov_base + chunk->ch_position;
|
||||
buf->tail[0].iov_len = buf->head[0].iov_len - chunk->ch_position;
|
||||
buf->head[0].iov_len = chunk->ch_position;
|
||||
|
||||
/* Read chunk may need XDR roundup (see RFC 8166, s. 3.4.5.2).
|
||||
*
|
||||
* If the client already rounded up the chunk length, the
|
||||
* length does not change. Otherwise, the length of the page
|
||||
* list is increased to include XDR round-up.
|
||||
*
|
||||
* Currently these chunks always start at page offset 0,
|
||||
* thus the rounded-up length never crosses a page boundary.
|
||||
*/
|
||||
buf->pages = &rqstp->rq_pages[0];
|
||||
length = xdr_align_size(chunk->ch_length);
|
||||
buf->page_len = length;
|
||||
buf->len += length;
|
||||
buf->buflen += length;
|
||||
}
|
||||
|
||||
/* Finish constructing the RPC Call message in rqstp::rq_arg.
|
||||
*
|
||||
* The incoming RPC/RDMA message is an RDMA_MSG type message
|
||||
* with payload in multiple Read chunks and no PZRC.
|
||||
*/
|
||||
static void svc_rdma_read_complete_multiple(struct svc_rqst *rqstp,
|
||||
struct svc_rdma_recv_ctxt *ctxt)
|
||||
{
|
||||
struct xdr_buf *buf = &rqstp->rq_arg;
|
||||
|
||||
buf->len += ctxt->rc_readbytes;
|
||||
buf->buflen += ctxt->rc_readbytes;
|
||||
|
||||
buf->head[0].iov_base = page_address(rqstp->rq_pages[0]);
|
||||
buf->head[0].iov_len = min_t(size_t, PAGE_SIZE, ctxt->rc_readbytes);
|
||||
buf->pages = &rqstp->rq_pages[1];
|
||||
buf->page_len = ctxt->rc_readbytes - buf->head[0].iov_len;
|
||||
}
|
||||
|
||||
/* Finish constructing the RPC Call message in rqstp::rq_arg.
|
||||
*
|
||||
* The incoming RPC/RDMA message is an RDMA_NOMSG type message
|
||||
* (the RPC message body was conveyed via RDMA Read).
|
||||
*/
|
||||
static void svc_rdma_read_complete_pzrc(struct svc_rqst *rqstp,
|
||||
struct svc_rdma_recv_ctxt *ctxt)
|
||||
{
|
||||
struct xdr_buf *buf = &rqstp->rq_arg;
|
||||
|
||||
buf->len += ctxt->rc_readbytes;
|
||||
buf->buflen += ctxt->rc_readbytes;
|
||||
|
||||
buf->head[0].iov_base = page_address(rqstp->rq_pages[0]);
|
||||
buf->head[0].iov_len = min_t(size_t, PAGE_SIZE, ctxt->rc_readbytes);
|
||||
buf->pages = &rqstp->rq_pages[1];
|
||||
buf->page_len = ctxt->rc_readbytes - buf->head[0].iov_len;
|
||||
}
|
||||
|
||||
static noinline void svc_rdma_read_complete(struct svc_rqst *rqstp,
|
||||
struct svc_rdma_recv_ctxt *ctxt)
|
||||
{
|
||||
unsigned int i;
|
||||
|
||||
/* Transfer the Read chunk pages into @rqstp.rq_pages, replacing
|
||||
* the rq_pages that were already allocated for this rqstp.
|
||||
*/
|
||||
release_pages(rqstp->rq_respages, ctxt->rc_page_count);
|
||||
for (i = 0; i < ctxt->rc_page_count; i++)
|
||||
rqstp->rq_pages[i] = ctxt->rc_pages[i];
|
||||
|
||||
/* Update @rqstp's result send buffer to start after the
|
||||
* last page in the RDMA Read payload.
|
||||
*/
|
||||
rqstp->rq_respages = &rqstp->rq_pages[ctxt->rc_page_count];
|
||||
rqstp->rq_next_page = rqstp->rq_respages + 1;
|
||||
|
||||
/* Prevent svc_rdma_recv_ctxt_put() from releasing the
|
||||
* pages in ctxt::rc_pages a second time.
|
||||
*/
|
||||
ctxt->rc_page_count = 0;
|
||||
|
||||
/* Finish constructing the RPC Call message. The exact
|
||||
* procedure for that depends on what kind of RPC/RDMA
|
||||
* chunks were provided by the client.
|
||||
*/
|
||||
rqstp->rq_arg = ctxt->rc_saved_arg;
|
||||
if (pcl_is_empty(&ctxt->rc_call_pcl)) {
|
||||
if (ctxt->rc_read_pcl.cl_count == 1)
|
||||
svc_rdma_read_complete_one(rqstp, ctxt);
|
||||
else
|
||||
svc_rdma_read_complete_multiple(rqstp, ctxt);
|
||||
} else {
|
||||
svc_rdma_read_complete_pzrc(rqstp, ctxt);
|
||||
}
|
||||
|
||||
trace_svcrdma_read_finished(&ctxt->rc_cid);
|
||||
}
|
||||
|
||||
/**
|
||||
* svc_rdma_recvfrom - Receive an RPC call
|
||||
* @rqstp: request structure into which to receive an RPC Call
|
||||
@ -798,8 +929,15 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
|
||||
|
||||
rqstp->rq_xprt_ctxt = NULL;
|
||||
|
||||
ctxt = NULL;
|
||||
spin_lock(&rdma_xprt->sc_rq_dto_lock);
|
||||
ctxt = svc_rdma_next_recv_ctxt(&rdma_xprt->sc_read_complete_q);
|
||||
if (ctxt) {
|
||||
list_del(&ctxt->rc_list);
|
||||
spin_unlock(&rdma_xprt->sc_rq_dto_lock);
|
||||
svc_xprt_received(xprt);
|
||||
svc_rdma_read_complete(rqstp, ctxt);
|
||||
goto complete;
|
||||
}
|
||||
ctxt = svc_rdma_next_recv_ctxt(&rdma_xprt->sc_rq_dto_q);
|
||||
if (ctxt)
|
||||
list_del(&ctxt->rc_list);
|
||||
@ -831,12 +969,10 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
|
||||
svc_rdma_get_inv_rkey(rdma_xprt, ctxt);
|
||||
|
||||
if (!pcl_is_empty(&ctxt->rc_read_pcl) ||
|
||||
!pcl_is_empty(&ctxt->rc_call_pcl)) {
|
||||
ret = svc_rdma_process_read_list(rdma_xprt, rqstp, ctxt);
|
||||
if (ret < 0)
|
||||
goto out_readfail;
|
||||
}
|
||||
!pcl_is_empty(&ctxt->rc_call_pcl))
|
||||
goto out_readlist;
|
||||
|
||||
complete:
|
||||
rqstp->rq_xprt_ctxt = ctxt;
|
||||
rqstp->rq_prot = IPPROTO_MAX;
|
||||
svc_xprt_copy_addrs(rqstp, xprt);
|
||||
@ -848,12 +984,23 @@ out_err:
|
||||
svc_rdma_recv_ctxt_put(rdma_xprt, ctxt);
|
||||
return 0;
|
||||
|
||||
out_readfail:
|
||||
if (ret == -EINVAL)
|
||||
svc_rdma_send_error(rdma_xprt, ctxt, ret);
|
||||
svc_rdma_recv_ctxt_put(rdma_xprt, ctxt);
|
||||
svc_xprt_deferred_close(xprt);
|
||||
return -ENOTCONN;
|
||||
out_readlist:
|
||||
/* This @rqstp is about to be recycled. Save the work
|
||||
* already done constructing the Call message in rq_arg
|
||||
* so it can be restored when the RDMA Reads have
|
||||
* completed.
|
||||
*/
|
||||
ctxt->rc_saved_arg = rqstp->rq_arg;
|
||||
|
||||
ret = svc_rdma_process_read_list(rdma_xprt, rqstp, ctxt);
|
||||
if (ret < 0) {
|
||||
if (ret == -EINVAL)
|
||||
svc_rdma_send_error(rdma_xprt, ctxt, ret);
|
||||
svc_rdma_recv_ctxt_put(rdma_xprt, ctxt);
|
||||
svc_xprt_deferred_close(xprt);
|
||||
return ret;
|
||||
}
|
||||
return 0;
|
||||
|
||||
out_backchannel:
|
||||
svc_rdma_handle_bc_reply(rqstp, ctxt);
|
||||
|
@ -39,6 +39,7 @@ struct svc_rdma_rw_ctxt {
|
||||
struct list_head rw_list;
|
||||
struct rdma_rw_ctx rw_ctx;
|
||||
unsigned int rw_nents;
|
||||
unsigned int rw_first_sgl_nents;
|
||||
struct sg_table rw_sg_table;
|
||||
struct scatterlist rw_first_sgl[];
|
||||
};
|
||||
@ -53,6 +54,8 @@ svc_rdma_next_ctxt(struct list_head *list)
|
||||
static struct svc_rdma_rw_ctxt *
|
||||
svc_rdma_get_rw_ctxt(struct svcxprt_rdma *rdma, unsigned int sges)
|
||||
{
|
||||
struct ib_device *dev = rdma->sc_cm_id->device;
|
||||
unsigned int first_sgl_nents = dev->attrs.max_send_sge;
|
||||
struct svc_rdma_rw_ctxt *ctxt;
|
||||
struct llist_node *node;
|
||||
|
||||
@ -62,32 +65,33 @@ svc_rdma_get_rw_ctxt(struct svcxprt_rdma *rdma, unsigned int sges)
|
||||
if (node) {
|
||||
ctxt = llist_entry(node, struct svc_rdma_rw_ctxt, rw_node);
|
||||
} else {
|
||||
ctxt = kmalloc_node(struct_size(ctxt, rw_first_sgl, SG_CHUNK_SIZE),
|
||||
GFP_KERNEL, ibdev_to_node(rdma->sc_cm_id->device));
|
||||
ctxt = kmalloc_node(struct_size(ctxt, rw_first_sgl, first_sgl_nents),
|
||||
GFP_KERNEL, ibdev_to_node(dev));
|
||||
if (!ctxt)
|
||||
goto out_noctx;
|
||||
|
||||
INIT_LIST_HEAD(&ctxt->rw_list);
|
||||
ctxt->rw_first_sgl_nents = first_sgl_nents;
|
||||
}
|
||||
|
||||
ctxt->rw_sg_table.sgl = ctxt->rw_first_sgl;
|
||||
if (sg_alloc_table_chained(&ctxt->rw_sg_table, sges,
|
||||
ctxt->rw_sg_table.sgl,
|
||||
SG_CHUNK_SIZE))
|
||||
first_sgl_nents))
|
||||
goto out_free;
|
||||
return ctxt;
|
||||
|
||||
out_free:
|
||||
kfree(ctxt);
|
||||
out_noctx:
|
||||
trace_svcrdma_no_rwctx_err(rdma, sges);
|
||||
trace_svcrdma_rwctx_empty(rdma, sges);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void __svc_rdma_put_rw_ctxt(struct svc_rdma_rw_ctxt *ctxt,
|
||||
struct llist_head *list)
|
||||
{
|
||||
sg_free_table_chained(&ctxt->rw_sg_table, SG_CHUNK_SIZE);
|
||||
sg_free_table_chained(&ctxt->rw_sg_table, ctxt->rw_first_sgl_nents);
|
||||
llist_add(&ctxt->rw_node, list);
|
||||
}
|
||||
|
||||
@ -135,57 +139,40 @@ static int svc_rdma_rw_ctx_init(struct svcxprt_rdma *rdma,
|
||||
ctxt->rw_sg_table.sgl, ctxt->rw_nents,
|
||||
0, offset, handle, direction);
|
||||
if (unlikely(ret < 0)) {
|
||||
trace_svcrdma_dma_map_rw_err(rdma, offset, handle,
|
||||
ctxt->rw_nents, ret);
|
||||
svc_rdma_put_rw_ctxt(rdma, ctxt);
|
||||
trace_svcrdma_dma_map_rw_err(rdma, ctxt->rw_nents, ret);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* A chunk context tracks all I/O for moving one Read or Write
|
||||
* chunk. This is a set of rdma_rw's that handle data movement
|
||||
* for all segments of one chunk.
|
||||
*
|
||||
* These are small, acquired with a single allocator call, and
|
||||
* no more than one is needed per chunk. They are allocated on
|
||||
* demand, and not cached.
|
||||
/**
|
||||
* svc_rdma_cc_init - Initialize an svc_rdma_chunk_ctxt
|
||||
* @rdma: controlling transport instance
|
||||
* @cc: svc_rdma_chunk_ctxt to be initialized
|
||||
*/
|
||||
struct svc_rdma_chunk_ctxt {
|
||||
struct rpc_rdma_cid cc_cid;
|
||||
struct ib_cqe cc_cqe;
|
||||
struct svcxprt_rdma *cc_rdma;
|
||||
struct list_head cc_rwctxts;
|
||||
ktime_t cc_posttime;
|
||||
int cc_sqecount;
|
||||
enum ib_wc_status cc_status;
|
||||
struct completion cc_done;
|
||||
};
|
||||
|
||||
static void svc_rdma_cc_cid_init(struct svcxprt_rdma *rdma,
|
||||
struct rpc_rdma_cid *cid)
|
||||
void svc_rdma_cc_init(struct svcxprt_rdma *rdma,
|
||||
struct svc_rdma_chunk_ctxt *cc)
|
||||
{
|
||||
cid->ci_queue_id = rdma->sc_sq_cq->res.id;
|
||||
cid->ci_completion_id = atomic_inc_return(&rdma->sc_completion_ids);
|
||||
}
|
||||
struct rpc_rdma_cid *cid = &cc->cc_cid;
|
||||
|
||||
static void svc_rdma_cc_init(struct svcxprt_rdma *rdma,
|
||||
struct svc_rdma_chunk_ctxt *cc)
|
||||
{
|
||||
svc_rdma_cc_cid_init(rdma, &cc->cc_cid);
|
||||
cc->cc_rdma = rdma;
|
||||
if (unlikely(!cid->ci_completion_id))
|
||||
svc_rdma_send_cid_init(rdma, cid);
|
||||
|
||||
INIT_LIST_HEAD(&cc->cc_rwctxts);
|
||||
cc->cc_sqecount = 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* The consumed rw_ctx's are cleaned and placed on a local llist so
|
||||
* that only one atomic llist operation is needed to put them all
|
||||
* back on the free list.
|
||||
/**
|
||||
* svc_rdma_cc_release - Release resources held by a svc_rdma_chunk_ctxt
|
||||
* @rdma: controlling transport instance
|
||||
* @cc: svc_rdma_chunk_ctxt to be released
|
||||
* @dir: DMA direction
|
||||
*/
|
||||
static void svc_rdma_cc_release(struct svc_rdma_chunk_ctxt *cc,
|
||||
enum dma_data_direction dir)
|
||||
void svc_rdma_cc_release(struct svcxprt_rdma *rdma,
|
||||
struct svc_rdma_chunk_ctxt *cc,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
struct svcxprt_rdma *rdma = cc->cc_rdma;
|
||||
struct llist_node *first, *last;
|
||||
struct svc_rdma_rw_ctxt *ctxt;
|
||||
LLIST_HEAD(free);
|
||||
@ -215,6 +202,8 @@ static void svc_rdma_cc_release(struct svc_rdma_chunk_ctxt *cc,
|
||||
* - Stores arguments for the SGL constructor functions
|
||||
*/
|
||||
struct svc_rdma_write_info {
|
||||
struct svcxprt_rdma *wi_rdma;
|
||||
|
||||
const struct svc_rdma_chunk *wi_chunk;
|
||||
|
||||
/* write state of this chunk */
|
||||
@ -227,6 +216,7 @@ struct svc_rdma_write_info {
|
||||
unsigned int wi_next_off;
|
||||
|
||||
struct svc_rdma_chunk_ctxt wi_cc;
|
||||
struct work_struct wi_work;
|
||||
};
|
||||
|
||||
static struct svc_rdma_write_info *
|
||||
@ -235,23 +225,31 @@ svc_rdma_write_info_alloc(struct svcxprt_rdma *rdma,
|
||||
{
|
||||
struct svc_rdma_write_info *info;
|
||||
|
||||
info = kmalloc_node(sizeof(*info), GFP_KERNEL,
|
||||
info = kzalloc_node(sizeof(*info), GFP_KERNEL,
|
||||
ibdev_to_node(rdma->sc_cm_id->device));
|
||||
if (!info)
|
||||
return info;
|
||||
|
||||
info->wi_rdma = rdma;
|
||||
info->wi_chunk = chunk;
|
||||
info->wi_seg_off = 0;
|
||||
info->wi_seg_no = 0;
|
||||
svc_rdma_cc_init(rdma, &info->wi_cc);
|
||||
info->wi_cc.cc_cqe.done = svc_rdma_write_done;
|
||||
return info;
|
||||
}
|
||||
|
||||
static void svc_rdma_write_info_free_async(struct work_struct *work)
|
||||
{
|
||||
struct svc_rdma_write_info *info;
|
||||
|
||||
info = container_of(work, struct svc_rdma_write_info, wi_work);
|
||||
svc_rdma_cc_release(info->wi_rdma, &info->wi_cc, DMA_TO_DEVICE);
|
||||
kfree(info);
|
||||
}
|
||||
|
||||
static void svc_rdma_write_info_free(struct svc_rdma_write_info *info)
|
||||
{
|
||||
svc_rdma_cc_release(&info->wi_cc, DMA_TO_DEVICE);
|
||||
kfree(info);
|
||||
INIT_WORK(&info->wi_work, svc_rdma_write_info_free_async);
|
||||
queue_work(svcrdma_wq, &info->wi_work);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -263,16 +261,16 @@ static void svc_rdma_write_info_free(struct svc_rdma_write_info *info)
|
||||
*/
|
||||
static void svc_rdma_write_done(struct ib_cq *cq, struct ib_wc *wc)
|
||||
{
|
||||
struct svcxprt_rdma *rdma = cq->cq_context;
|
||||
struct ib_cqe *cqe = wc->wr_cqe;
|
||||
struct svc_rdma_chunk_ctxt *cc =
|
||||
container_of(cqe, struct svc_rdma_chunk_ctxt, cc_cqe);
|
||||
struct svcxprt_rdma *rdma = cc->cc_rdma;
|
||||
struct svc_rdma_write_info *info =
|
||||
container_of(cc, struct svc_rdma_write_info, wi_cc);
|
||||
|
||||
switch (wc->status) {
|
||||
case IB_WC_SUCCESS:
|
||||
trace_svcrdma_wc_write(wc, &cc->cc_cid);
|
||||
trace_svcrdma_wc_write(&cc->cc_cid);
|
||||
break;
|
||||
case IB_WC_WR_FLUSH_ERR:
|
||||
trace_svcrdma_wc_write_flush(wc, &cc->cc_cid);
|
||||
@ -289,39 +287,6 @@ static void svc_rdma_write_done(struct ib_cq *cq, struct ib_wc *wc)
|
||||
svc_rdma_write_info_free(info);
|
||||
}
|
||||
|
||||
/* State for pulling a Read chunk.
|
||||
*/
|
||||
struct svc_rdma_read_info {
|
||||
struct svc_rqst *ri_rqst;
|
||||
struct svc_rdma_recv_ctxt *ri_readctxt;
|
||||
unsigned int ri_pageno;
|
||||
unsigned int ri_pageoff;
|
||||
unsigned int ri_totalbytes;
|
||||
|
||||
struct svc_rdma_chunk_ctxt ri_cc;
|
||||
};
|
||||
|
||||
static struct svc_rdma_read_info *
|
||||
svc_rdma_read_info_alloc(struct svcxprt_rdma *rdma)
|
||||
{
|
||||
struct svc_rdma_read_info *info;
|
||||
|
||||
info = kmalloc_node(sizeof(*info), GFP_KERNEL,
|
||||
ibdev_to_node(rdma->sc_cm_id->device));
|
||||
if (!info)
|
||||
return info;
|
||||
|
||||
svc_rdma_cc_init(rdma, &info->ri_cc);
|
||||
info->ri_cc.cc_cqe.done = svc_rdma_wc_read_done;
|
||||
return info;
|
||||
}
|
||||
|
||||
static void svc_rdma_read_info_free(struct svc_rdma_read_info *info)
|
||||
{
|
||||
svc_rdma_cc_release(&info->ri_cc, DMA_FROM_DEVICE);
|
||||
kfree(info);
|
||||
}
|
||||
|
||||
/**
|
||||
* svc_rdma_wc_read_done - Handle completion of an RDMA Read ctx
|
||||
* @cq: controlling Completion Queue
|
||||
@ -330,17 +295,27 @@ static void svc_rdma_read_info_free(struct svc_rdma_read_info *info)
|
||||
*/
|
||||
static void svc_rdma_wc_read_done(struct ib_cq *cq, struct ib_wc *wc)
|
||||
{
|
||||
struct svcxprt_rdma *rdma = cq->cq_context;
|
||||
struct ib_cqe *cqe = wc->wr_cqe;
|
||||
struct svc_rdma_chunk_ctxt *cc =
|
||||
container_of(cqe, struct svc_rdma_chunk_ctxt, cc_cqe);
|
||||
struct svc_rdma_read_info *info;
|
||||
struct svc_rdma_recv_ctxt *ctxt;
|
||||
|
||||
svc_rdma_wake_send_waiters(rdma, cc->cc_sqecount);
|
||||
|
||||
ctxt = container_of(cc, struct svc_rdma_recv_ctxt, rc_cc);
|
||||
switch (wc->status) {
|
||||
case IB_WC_SUCCESS:
|
||||
info = container_of(cc, struct svc_rdma_read_info, ri_cc);
|
||||
trace_svcrdma_wc_read(wc, &cc->cc_cid, info->ri_totalbytes,
|
||||
trace_svcrdma_wc_read(wc, &cc->cc_cid, ctxt->rc_readbytes,
|
||||
cc->cc_posttime);
|
||||
break;
|
||||
|
||||
spin_lock(&rdma->sc_rq_dto_lock);
|
||||
list_add_tail(&ctxt->rc_list, &rdma->sc_read_complete_q);
|
||||
/* the unlock pairs with the smp_rmb in svc_xprt_ready */
|
||||
set_bit(XPT_DATA, &rdma->sc_xprt.xpt_flags);
|
||||
spin_unlock(&rdma->sc_rq_dto_lock);
|
||||
svc_xprt_enqueue(&rdma->sc_xprt);
|
||||
return;
|
||||
case IB_WC_WR_FLUSH_ERR:
|
||||
trace_svcrdma_wc_read_flush(wc, &cc->cc_cid);
|
||||
break;
|
||||
@ -348,10 +323,13 @@ static void svc_rdma_wc_read_done(struct ib_cq *cq, struct ib_wc *wc)
|
||||
trace_svcrdma_wc_read_err(wc, &cc->cc_cid);
|
||||
}
|
||||
|
||||
svc_rdma_wake_send_waiters(cc->cc_rdma, cc->cc_sqecount);
|
||||
cc->cc_status = wc->status;
|
||||
complete(&cc->cc_done);
|
||||
return;
|
||||
/* The RDMA Read has flushed, so the incoming RPC message
|
||||
* cannot be constructed and must be dropped. Signal the
|
||||
* loss to the client by closing the connection.
|
||||
*/
|
||||
svc_rdma_cc_release(rdma, cc, DMA_FROM_DEVICE);
|
||||
svc_rdma_recv_ctxt_put(rdma, ctxt);
|
||||
svc_xprt_deferred_close(&rdma->sc_xprt);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -360,9 +338,9 @@ static void svc_rdma_wc_read_done(struct ib_cq *cq, struct ib_wc *wc)
|
||||
* even if one or more WRs are flushed. This is true when posting
|
||||
* an rdma_rw_ctx or when posting a single signaled WR.
|
||||
*/
|
||||
static int svc_rdma_post_chunk_ctxt(struct svc_rdma_chunk_ctxt *cc)
|
||||
static int svc_rdma_post_chunk_ctxt(struct svcxprt_rdma *rdma,
|
||||
struct svc_rdma_chunk_ctxt *cc)
|
||||
{
|
||||
struct svcxprt_rdma *rdma = cc->cc_rdma;
|
||||
struct ib_send_wr *first_wr;
|
||||
const struct ib_send_wr *bad_wr;
|
||||
struct list_head *tmp;
|
||||
@ -396,14 +374,14 @@ static int svc_rdma_post_chunk_ctxt(struct svc_rdma_chunk_ctxt *cc)
|
||||
}
|
||||
|
||||
percpu_counter_inc(&svcrdma_stat_sq_starve);
|
||||
trace_svcrdma_sq_full(rdma);
|
||||
trace_svcrdma_sq_full(rdma, &cc->cc_cid);
|
||||
atomic_add(cc->cc_sqecount, &rdma->sc_sq_avail);
|
||||
wait_event(rdma->sc_send_wait,
|
||||
atomic_read(&rdma->sc_sq_avail) > cc->cc_sqecount);
|
||||
trace_svcrdma_sq_retry(rdma);
|
||||
trace_svcrdma_sq_retry(rdma, &cc->cc_cid);
|
||||
} while (1);
|
||||
|
||||
trace_svcrdma_sq_post_err(rdma, ret);
|
||||
trace_svcrdma_sq_post_err(rdma, &cc->cc_cid, ret);
|
||||
svc_xprt_deferred_close(&rdma->sc_xprt);
|
||||
|
||||
/* If even one was posted, there will be a completion. */
|
||||
@ -473,7 +451,7 @@ svc_rdma_build_writes(struct svc_rdma_write_info *info,
|
||||
unsigned int remaining)
|
||||
{
|
||||
struct svc_rdma_chunk_ctxt *cc = &info->wi_cc;
|
||||
struct svcxprt_rdma *rdma = cc->cc_rdma;
|
||||
struct svcxprt_rdma *rdma = info->wi_rdma;
|
||||
const struct svc_rdma_segment *seg;
|
||||
struct svc_rdma_rw_ctxt *ctxt;
|
||||
int ret;
|
||||
@ -516,7 +494,7 @@ svc_rdma_build_writes(struct svc_rdma_write_info *info,
|
||||
return 0;
|
||||
|
||||
out_overflow:
|
||||
trace_svcrdma_small_wrch_err(rdma, remaining, info->wi_seg_no,
|
||||
trace_svcrdma_small_wrch_err(&cc->cc_cid, remaining, info->wi_seg_no,
|
||||
info->wi_chunk->ch_segcount);
|
||||
return -E2BIG;
|
||||
}
|
||||
@ -633,7 +611,7 @@ int svc_rdma_send_write_chunk(struct svcxprt_rdma *rdma,
|
||||
goto out_err;
|
||||
|
||||
trace_svcrdma_post_write_chunk(&cc->cc_cid, cc->cc_sqecount);
|
||||
ret = svc_rdma_post_chunk_ctxt(cc);
|
||||
ret = svc_rdma_post_chunk_ctxt(rdma, cc);
|
||||
if (ret < 0)
|
||||
goto out_err;
|
||||
return xdr->len;
|
||||
@ -680,7 +658,7 @@ int svc_rdma_send_reply_chunk(struct svcxprt_rdma *rdma,
|
||||
goto out_err;
|
||||
|
||||
trace_svcrdma_post_reply_chunk(&cc->cc_cid, cc->cc_sqecount);
|
||||
ret = svc_rdma_post_chunk_ctxt(cc);
|
||||
ret = svc_rdma_post_chunk_ctxt(rdma, cc);
|
||||
if (ret < 0)
|
||||
goto out_err;
|
||||
|
||||
@ -693,7 +671,8 @@ out_err:
|
||||
|
||||
/**
|
||||
* svc_rdma_build_read_segment - Build RDMA Read WQEs to pull one RDMA segment
|
||||
* @info: context for ongoing I/O
|
||||
* @rqstp: RPC transaction context
|
||||
* @head: context for ongoing I/O
|
||||
* @segment: co-ordinates of remote memory to be read
|
||||
*
|
||||
* Returns:
|
||||
@ -702,20 +681,20 @@ out_err:
|
||||
* %-ENOMEM: allocating a local resources failed
|
||||
* %-EIO: a DMA mapping error occurred
|
||||
*/
|
||||
static int svc_rdma_build_read_segment(struct svc_rdma_read_info *info,
|
||||
static int svc_rdma_build_read_segment(struct svc_rqst *rqstp,
|
||||
struct svc_rdma_recv_ctxt *head,
|
||||
const struct svc_rdma_segment *segment)
|
||||
{
|
||||
struct svc_rdma_recv_ctxt *head = info->ri_readctxt;
|
||||
struct svc_rdma_chunk_ctxt *cc = &info->ri_cc;
|
||||
struct svc_rqst *rqstp = info->ri_rqst;
|
||||
struct svcxprt_rdma *rdma = svc_rdma_rqst_rdma(rqstp);
|
||||
struct svc_rdma_chunk_ctxt *cc = &head->rc_cc;
|
||||
unsigned int sge_no, seg_len, len;
|
||||
struct svc_rdma_rw_ctxt *ctxt;
|
||||
struct scatterlist *sg;
|
||||
int ret;
|
||||
|
||||
len = segment->rs_length;
|
||||
sge_no = PAGE_ALIGN(info->ri_pageoff + len) >> PAGE_SHIFT;
|
||||
ctxt = svc_rdma_get_rw_ctxt(cc->cc_rdma, sge_no);
|
||||
sge_no = PAGE_ALIGN(head->rc_pageoff + len) >> PAGE_SHIFT;
|
||||
ctxt = svc_rdma_get_rw_ctxt(rdma, sge_no);
|
||||
if (!ctxt)
|
||||
return -ENOMEM;
|
||||
ctxt->rw_nents = sge_no;
|
||||
@ -723,29 +702,27 @@ static int svc_rdma_build_read_segment(struct svc_rdma_read_info *info,
|
||||
sg = ctxt->rw_sg_table.sgl;
|
||||
for (sge_no = 0; sge_no < ctxt->rw_nents; sge_no++) {
|
||||
seg_len = min_t(unsigned int, len,
|
||||
PAGE_SIZE - info->ri_pageoff);
|
||||
PAGE_SIZE - head->rc_pageoff);
|
||||
|
||||
if (!info->ri_pageoff)
|
||||
if (!head->rc_pageoff)
|
||||
head->rc_page_count++;
|
||||
|
||||
sg_set_page(sg, rqstp->rq_pages[info->ri_pageno],
|
||||
seg_len, info->ri_pageoff);
|
||||
sg_set_page(sg, rqstp->rq_pages[head->rc_curpage],
|
||||
seg_len, head->rc_pageoff);
|
||||
sg = sg_next(sg);
|
||||
|
||||
info->ri_pageoff += seg_len;
|
||||
if (info->ri_pageoff == PAGE_SIZE) {
|
||||
info->ri_pageno++;
|
||||
info->ri_pageoff = 0;
|
||||
head->rc_pageoff += seg_len;
|
||||
if (head->rc_pageoff == PAGE_SIZE) {
|
||||
head->rc_curpage++;
|
||||
head->rc_pageoff = 0;
|
||||
}
|
||||
len -= seg_len;
|
||||
|
||||
/* Safety check */
|
||||
if (len &&
|
||||
&rqstp->rq_pages[info->ri_pageno + 1] > rqstp->rq_page_end)
|
||||
if (len && ((head->rc_curpage + 1) > ARRAY_SIZE(rqstp->rq_pages)))
|
||||
goto out_overrun;
|
||||
}
|
||||
|
||||
ret = svc_rdma_rw_ctx_init(cc->cc_rdma, ctxt, segment->rs_offset,
|
||||
ret = svc_rdma_rw_ctx_init(rdma, ctxt, segment->rs_offset,
|
||||
segment->rs_handle, DMA_FROM_DEVICE);
|
||||
if (ret < 0)
|
||||
return -EIO;
|
||||
@ -756,13 +733,14 @@ static int svc_rdma_build_read_segment(struct svc_rdma_read_info *info,
|
||||
return 0;
|
||||
|
||||
out_overrun:
|
||||
trace_svcrdma_page_overrun_err(cc->cc_rdma, rqstp, info->ri_pageno);
|
||||
trace_svcrdma_page_overrun_err(&cc->cc_cid, head->rc_curpage);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/**
|
||||
* svc_rdma_build_read_chunk - Build RDMA Read WQEs to pull one RDMA chunk
|
||||
* @info: context for ongoing I/O
|
||||
* @rqstp: RPC transaction context
|
||||
* @head: context for ongoing I/O
|
||||
* @chunk: Read chunk to pull
|
||||
*
|
||||
* Return values:
|
||||
@ -771,7 +749,8 @@ out_overrun:
|
||||
* %-ENOMEM: allocating a local resources failed
|
||||
* %-EIO: a DMA mapping error occurred
|
||||
*/
|
||||
static int svc_rdma_build_read_chunk(struct svc_rdma_read_info *info,
|
||||
static int svc_rdma_build_read_chunk(struct svc_rqst *rqstp,
|
||||
struct svc_rdma_recv_ctxt *head,
|
||||
const struct svc_rdma_chunk *chunk)
|
||||
{
|
||||
const struct svc_rdma_segment *segment;
|
||||
@ -779,56 +758,56 @@ static int svc_rdma_build_read_chunk(struct svc_rdma_read_info *info,
|
||||
|
||||
ret = -EINVAL;
|
||||
pcl_for_each_segment(segment, chunk) {
|
||||
ret = svc_rdma_build_read_segment(info, segment);
|
||||
ret = svc_rdma_build_read_segment(rqstp, head, segment);
|
||||
if (ret < 0)
|
||||
break;
|
||||
info->ri_totalbytes += segment->rs_length;
|
||||
head->rc_readbytes += segment->rs_length;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* svc_rdma_copy_inline_range - Copy part of the inline content into pages
|
||||
* @info: context for RDMA Reads
|
||||
* @rqstp: RPC transaction context
|
||||
* @head: context for ongoing I/O
|
||||
* @offset: offset into the Receive buffer of region to copy
|
||||
* @remaining: length of region to copy
|
||||
*
|
||||
* Take a page at a time from rqstp->rq_pages and copy the inline
|
||||
* content from the Receive buffer into that page. Update
|
||||
* info->ri_pageno and info->ri_pageoff so that the next RDMA Read
|
||||
* head->rc_curpage and head->rc_pageoff so that the next RDMA Read
|
||||
* result will land contiguously with the copied content.
|
||||
*
|
||||
* Return values:
|
||||
* %0: Inline content was successfully copied
|
||||
* %-EINVAL: offset or length was incorrect
|
||||
*/
|
||||
static int svc_rdma_copy_inline_range(struct svc_rdma_read_info *info,
|
||||
static int svc_rdma_copy_inline_range(struct svc_rqst *rqstp,
|
||||
struct svc_rdma_recv_ctxt *head,
|
||||
unsigned int offset,
|
||||
unsigned int remaining)
|
||||
{
|
||||
struct svc_rdma_recv_ctxt *head = info->ri_readctxt;
|
||||
unsigned char *dst, *src = head->rc_recv_buf;
|
||||
struct svc_rqst *rqstp = info->ri_rqst;
|
||||
unsigned int page_no, numpages;
|
||||
|
||||
numpages = PAGE_ALIGN(info->ri_pageoff + remaining) >> PAGE_SHIFT;
|
||||
numpages = PAGE_ALIGN(head->rc_pageoff + remaining) >> PAGE_SHIFT;
|
||||
for (page_no = 0; page_no < numpages; page_no++) {
|
||||
unsigned int page_len;
|
||||
|
||||
page_len = min_t(unsigned int, remaining,
|
||||
PAGE_SIZE - info->ri_pageoff);
|
||||
PAGE_SIZE - head->rc_pageoff);
|
||||
|
||||
if (!info->ri_pageoff)
|
||||
if (!head->rc_pageoff)
|
||||
head->rc_page_count++;
|
||||
|
||||
dst = page_address(rqstp->rq_pages[info->ri_pageno]);
|
||||
memcpy(dst + info->ri_pageno, src + offset, page_len);
|
||||
dst = page_address(rqstp->rq_pages[head->rc_curpage]);
|
||||
memcpy(dst + head->rc_curpage, src + offset, page_len);
|
||||
|
||||
info->ri_totalbytes += page_len;
|
||||
info->ri_pageoff += page_len;
|
||||
if (info->ri_pageoff == PAGE_SIZE) {
|
||||
info->ri_pageno++;
|
||||
info->ri_pageoff = 0;
|
||||
head->rc_readbytes += page_len;
|
||||
head->rc_pageoff += page_len;
|
||||
if (head->rc_pageoff == PAGE_SIZE) {
|
||||
head->rc_curpage++;
|
||||
head->rc_pageoff = 0;
|
||||
}
|
||||
remaining -= page_len;
|
||||
offset += page_len;
|
||||
@ -839,7 +818,8 @@ static int svc_rdma_copy_inline_range(struct svc_rdma_read_info *info,
|
||||
|
||||
/**
|
||||
* svc_rdma_read_multiple_chunks - Construct RDMA Reads to pull data item Read chunks
|
||||
* @info: context for RDMA Reads
|
||||
* @rqstp: RPC transaction context
|
||||
* @head: context for ongoing I/O
|
||||
*
|
||||
* The chunk data lands in rqstp->rq_arg as a series of contiguous pages,
|
||||
* like an incoming TCP call.
|
||||
@ -851,11 +831,11 @@ static int svc_rdma_copy_inline_range(struct svc_rdma_read_info *info,
|
||||
* %-ENOTCONN: posting failed (connection is lost),
|
||||
* %-EIO: rdma_rw initialization failed (DMA mapping, etc).
|
||||
*/
|
||||
static noinline int svc_rdma_read_multiple_chunks(struct svc_rdma_read_info *info)
|
||||
static noinline int
|
||||
svc_rdma_read_multiple_chunks(struct svc_rqst *rqstp,
|
||||
struct svc_rdma_recv_ctxt *head)
|
||||
{
|
||||
struct svc_rdma_recv_ctxt *head = info->ri_readctxt;
|
||||
const struct svc_rdma_pcl *pcl = &head->rc_read_pcl;
|
||||
struct xdr_buf *buf = &info->ri_rqst->rq_arg;
|
||||
struct svc_rdma_chunk *chunk, *next;
|
||||
unsigned int start, length;
|
||||
int ret;
|
||||
@ -863,12 +843,12 @@ static noinline int svc_rdma_read_multiple_chunks(struct svc_rdma_read_info *inf
|
||||
start = 0;
|
||||
chunk = pcl_first_chunk(pcl);
|
||||
length = chunk->ch_position;
|
||||
ret = svc_rdma_copy_inline_range(info, start, length);
|
||||
ret = svc_rdma_copy_inline_range(rqstp, head, start, length);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
pcl_for_each_chunk(chunk, pcl) {
|
||||
ret = svc_rdma_build_read_chunk(info, chunk);
|
||||
ret = svc_rdma_build_read_chunk(rqstp, head, chunk);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
@ -877,31 +857,21 @@ static noinline int svc_rdma_read_multiple_chunks(struct svc_rdma_read_info *inf
|
||||
break;
|
||||
|
||||
start += length;
|
||||
length = next->ch_position - info->ri_totalbytes;
|
||||
ret = svc_rdma_copy_inline_range(info, start, length);
|
||||
length = next->ch_position - head->rc_readbytes;
|
||||
ret = svc_rdma_copy_inline_range(rqstp, head, start, length);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
}
|
||||
|
||||
start += length;
|
||||
length = head->rc_byte_len - start;
|
||||
ret = svc_rdma_copy_inline_range(info, start, length);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
buf->len += info->ri_totalbytes;
|
||||
buf->buflen += info->ri_totalbytes;
|
||||
|
||||
buf->head[0].iov_base = page_address(info->ri_rqst->rq_pages[0]);
|
||||
buf->head[0].iov_len = min_t(size_t, PAGE_SIZE, info->ri_totalbytes);
|
||||
buf->pages = &info->ri_rqst->rq_pages[1];
|
||||
buf->page_len = info->ri_totalbytes - buf->head[0].iov_len;
|
||||
return 0;
|
||||
return svc_rdma_copy_inline_range(rqstp, head, start, length);
|
||||
}
|
||||
|
||||
/**
|
||||
* svc_rdma_read_data_item - Construct RDMA Reads to pull data item Read chunks
|
||||
* @info: context for RDMA Reads
|
||||
* @rqstp: RPC transaction context
|
||||
* @head: context for ongoing I/O
|
||||
*
|
||||
* The chunk data lands in the page list of rqstp->rq_arg.pages.
|
||||
*
|
||||
@ -916,50 +886,17 @@ static noinline int svc_rdma_read_multiple_chunks(struct svc_rdma_read_info *inf
|
||||
* %-ENOTCONN: posting failed (connection is lost),
|
||||
* %-EIO: rdma_rw initialization failed (DMA mapping, etc).
|
||||
*/
|
||||
static int svc_rdma_read_data_item(struct svc_rdma_read_info *info)
|
||||
static int svc_rdma_read_data_item(struct svc_rqst *rqstp,
|
||||
struct svc_rdma_recv_ctxt *head)
|
||||
{
|
||||
struct svc_rdma_recv_ctxt *head = info->ri_readctxt;
|
||||
struct xdr_buf *buf = &info->ri_rqst->rq_arg;
|
||||
struct svc_rdma_chunk *chunk;
|
||||
unsigned int length;
|
||||
int ret;
|
||||
|
||||
chunk = pcl_first_chunk(&head->rc_read_pcl);
|
||||
ret = svc_rdma_build_read_chunk(info, chunk);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
/* Split the Receive buffer between the head and tail
|
||||
* buffers at Read chunk's position. XDR roundup of the
|
||||
* chunk is not included in either the pagelist or in
|
||||
* the tail.
|
||||
*/
|
||||
buf->tail[0].iov_base = buf->head[0].iov_base + chunk->ch_position;
|
||||
buf->tail[0].iov_len = buf->head[0].iov_len - chunk->ch_position;
|
||||
buf->head[0].iov_len = chunk->ch_position;
|
||||
|
||||
/* Read chunk may need XDR roundup (see RFC 8166, s. 3.4.5.2).
|
||||
*
|
||||
* If the client already rounded up the chunk length, the
|
||||
* length does not change. Otherwise, the length of the page
|
||||
* list is increased to include XDR round-up.
|
||||
*
|
||||
* Currently these chunks always start at page offset 0,
|
||||
* thus the rounded-up length never crosses a page boundary.
|
||||
*/
|
||||
buf->pages = &info->ri_rqst->rq_pages[0];
|
||||
length = xdr_align_size(chunk->ch_length);
|
||||
buf->page_len = length;
|
||||
buf->len += length;
|
||||
buf->buflen += length;
|
||||
|
||||
out:
|
||||
return ret;
|
||||
return svc_rdma_build_read_chunk(rqstp, head,
|
||||
pcl_first_chunk(&head->rc_read_pcl));
|
||||
}
|
||||
|
||||
/**
|
||||
* svc_rdma_read_chunk_range - Build RDMA Read WQEs for portion of a chunk
|
||||
* @info: context for RDMA Reads
|
||||
* svc_rdma_read_chunk_range - Build RDMA Read WRs for portion of a chunk
|
||||
* @rqstp: RPC transaction context
|
||||
* @head: context for ongoing I/O
|
||||
* @chunk: parsed Call chunk to pull
|
||||
* @offset: offset of region to pull
|
||||
* @length: length of region to pull
|
||||
@ -971,7 +908,8 @@ out:
|
||||
* %-ENOTCONN: posting failed (connection is lost),
|
||||
* %-EIO: rdma_rw initialization failed (DMA mapping, etc).
|
||||
*/
|
||||
static int svc_rdma_read_chunk_range(struct svc_rdma_read_info *info,
|
||||
static int svc_rdma_read_chunk_range(struct svc_rqst *rqstp,
|
||||
struct svc_rdma_recv_ctxt *head,
|
||||
const struct svc_rdma_chunk *chunk,
|
||||
unsigned int offset, unsigned int length)
|
||||
{
|
||||
@ -991,11 +929,11 @@ static int svc_rdma_read_chunk_range(struct svc_rdma_read_info *info,
|
||||
dummy.rs_length = min_t(u32, length, segment->rs_length) - offset;
|
||||
dummy.rs_offset = segment->rs_offset + offset;
|
||||
|
||||
ret = svc_rdma_build_read_segment(info, &dummy);
|
||||
ret = svc_rdma_build_read_segment(rqstp, head, &dummy);
|
||||
if (ret < 0)
|
||||
break;
|
||||
|
||||
info->ri_totalbytes += dummy.rs_length;
|
||||
head->rc_readbytes += dummy.rs_length;
|
||||
length -= dummy.rs_length;
|
||||
offset = 0;
|
||||
}
|
||||
@ -1004,7 +942,8 @@ static int svc_rdma_read_chunk_range(struct svc_rdma_read_info *info,
|
||||
|
||||
/**
|
||||
* svc_rdma_read_call_chunk - Build RDMA Read WQEs to pull a Long Message
|
||||
* @info: context for RDMA Reads
|
||||
* @rqstp: RPC transaction context
|
||||
* @head: context for ongoing I/O
|
||||
*
|
||||
* Return values:
|
||||
* %0: RDMA Read WQEs were successfully built
|
||||
@ -1013,9 +952,9 @@ static int svc_rdma_read_chunk_range(struct svc_rdma_read_info *info,
|
||||
* %-ENOTCONN: posting failed (connection is lost),
|
||||
* %-EIO: rdma_rw initialization failed (DMA mapping, etc).
|
||||
*/
|
||||
static int svc_rdma_read_call_chunk(struct svc_rdma_read_info *info)
|
||||
static int svc_rdma_read_call_chunk(struct svc_rqst *rqstp,
|
||||
struct svc_rdma_recv_ctxt *head)
|
||||
{
|
||||
struct svc_rdma_recv_ctxt *head = info->ri_readctxt;
|
||||
const struct svc_rdma_chunk *call_chunk =
|
||||
pcl_first_chunk(&head->rc_call_pcl);
|
||||
const struct svc_rdma_pcl *pcl = &head->rc_read_pcl;
|
||||
@ -1024,17 +963,18 @@ static int svc_rdma_read_call_chunk(struct svc_rdma_read_info *info)
|
||||
int ret;
|
||||
|
||||
if (pcl_is_empty(pcl))
|
||||
return svc_rdma_build_read_chunk(info, call_chunk);
|
||||
return svc_rdma_build_read_chunk(rqstp, head, call_chunk);
|
||||
|
||||
start = 0;
|
||||
chunk = pcl_first_chunk(pcl);
|
||||
length = chunk->ch_position;
|
||||
ret = svc_rdma_read_chunk_range(info, call_chunk, start, length);
|
||||
ret = svc_rdma_read_chunk_range(rqstp, head, call_chunk,
|
||||
start, length);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
pcl_for_each_chunk(chunk, pcl) {
|
||||
ret = svc_rdma_build_read_chunk(info, chunk);
|
||||
ret = svc_rdma_build_read_chunk(rqstp, head, chunk);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
@ -1043,8 +983,8 @@ static int svc_rdma_read_call_chunk(struct svc_rdma_read_info *info)
|
||||
break;
|
||||
|
||||
start += length;
|
||||
length = next->ch_position - info->ri_totalbytes;
|
||||
ret = svc_rdma_read_chunk_range(info, call_chunk,
|
||||
length = next->ch_position - head->rc_readbytes;
|
||||
ret = svc_rdma_read_chunk_range(rqstp, head, call_chunk,
|
||||
start, length);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
@ -1052,12 +992,14 @@ static int svc_rdma_read_call_chunk(struct svc_rdma_read_info *info)
|
||||
|
||||
start += length;
|
||||
length = call_chunk->ch_length - start;
|
||||
return svc_rdma_read_chunk_range(info, call_chunk, start, length);
|
||||
return svc_rdma_read_chunk_range(rqstp, head, call_chunk,
|
||||
start, length);
|
||||
}
|
||||
|
||||
/**
|
||||
* svc_rdma_read_special - Build RDMA Read WQEs to pull a Long Message
|
||||
* @info: context for RDMA Reads
|
||||
* @rqstp: RPC transaction context
|
||||
* @head: context for ongoing I/O
|
||||
*
|
||||
* The start of the data lands in the first page just after the
|
||||
* Transport header, and the rest lands in rqstp->rq_arg.pages.
|
||||
@ -1073,25 +1015,31 @@ static int svc_rdma_read_call_chunk(struct svc_rdma_read_info *info)
|
||||
* %-ENOTCONN: posting failed (connection is lost),
|
||||
* %-EIO: rdma_rw initialization failed (DMA mapping, etc).
|
||||
*/
|
||||
static noinline int svc_rdma_read_special(struct svc_rdma_read_info *info)
|
||||
static noinline int svc_rdma_read_special(struct svc_rqst *rqstp,
|
||||
struct svc_rdma_recv_ctxt *head)
|
||||
{
|
||||
struct xdr_buf *buf = &info->ri_rqst->rq_arg;
|
||||
int ret;
|
||||
return svc_rdma_read_call_chunk(rqstp, head);
|
||||
}
|
||||
|
||||
ret = svc_rdma_read_call_chunk(info);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
/* Pages under I/O have been copied to head->rc_pages. Ensure that
|
||||
* svc_xprt_release() does not put them when svc_rdma_recvfrom()
|
||||
* returns. This has to be done after all Read WRs are constructed
|
||||
* to properly handle a page that happens to be part of I/O on behalf
|
||||
* of two different RDMA segments.
|
||||
*
|
||||
* Note: if the subsequent post_send fails, these pages have already
|
||||
* been moved to head->rc_pages and thus will be cleaned up by
|
||||
* svc_rdma_recv_ctxt_put().
|
||||
*/
|
||||
static void svc_rdma_clear_rqst_pages(struct svc_rqst *rqstp,
|
||||
struct svc_rdma_recv_ctxt *head)
|
||||
{
|
||||
unsigned int i;
|
||||
|
||||
buf->len += info->ri_totalbytes;
|
||||
buf->buflen += info->ri_totalbytes;
|
||||
|
||||
buf->head[0].iov_base = page_address(info->ri_rqst->rq_pages[0]);
|
||||
buf->head[0].iov_len = min_t(size_t, PAGE_SIZE, info->ri_totalbytes);
|
||||
buf->pages = &info->ri_rqst->rq_pages[1];
|
||||
buf->page_len = info->ri_totalbytes - buf->head[0].iov_len;
|
||||
|
||||
out:
|
||||
return ret;
|
||||
for (i = 0; i < head->rc_page_count; i++) {
|
||||
head->rc_pages[i] = rqstp->rq_pages[i];
|
||||
rqstp->rq_pages[i] = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
@ -1121,49 +1069,27 @@ int svc_rdma_process_read_list(struct svcxprt_rdma *rdma,
|
||||
struct svc_rqst *rqstp,
|
||||
struct svc_rdma_recv_ctxt *head)
|
||||
{
|
||||
struct svc_rdma_read_info *info;
|
||||
struct svc_rdma_chunk_ctxt *cc;
|
||||
struct svc_rdma_chunk_ctxt *cc = &head->rc_cc;
|
||||
int ret;
|
||||
|
||||
info = svc_rdma_read_info_alloc(rdma);
|
||||
if (!info)
|
||||
return -ENOMEM;
|
||||
cc = &info->ri_cc;
|
||||
info->ri_rqst = rqstp;
|
||||
info->ri_readctxt = head;
|
||||
info->ri_pageno = 0;
|
||||
info->ri_pageoff = 0;
|
||||
info->ri_totalbytes = 0;
|
||||
cc->cc_cqe.done = svc_rdma_wc_read_done;
|
||||
cc->cc_sqecount = 0;
|
||||
head->rc_pageoff = 0;
|
||||
head->rc_curpage = 0;
|
||||
head->rc_readbytes = 0;
|
||||
|
||||
if (pcl_is_empty(&head->rc_call_pcl)) {
|
||||
if (head->rc_read_pcl.cl_count == 1)
|
||||
ret = svc_rdma_read_data_item(info);
|
||||
ret = svc_rdma_read_data_item(rqstp, head);
|
||||
else
|
||||
ret = svc_rdma_read_multiple_chunks(info);
|
||||
ret = svc_rdma_read_multiple_chunks(rqstp, head);
|
||||
} else
|
||||
ret = svc_rdma_read_special(info);
|
||||
ret = svc_rdma_read_special(rqstp, head);
|
||||
svc_rdma_clear_rqst_pages(rqstp, head);
|
||||
if (ret < 0)
|
||||
goto out_err;
|
||||
return ret;
|
||||
|
||||
trace_svcrdma_post_read_chunk(&cc->cc_cid, cc->cc_sqecount);
|
||||
init_completion(&cc->cc_done);
|
||||
ret = svc_rdma_post_chunk_ctxt(cc);
|
||||
if (ret < 0)
|
||||
goto out_err;
|
||||
|
||||
ret = 1;
|
||||
wait_for_completion(&cc->cc_done);
|
||||
if (cc->cc_status != IB_WC_SUCCESS)
|
||||
ret = -EIO;
|
||||
|
||||
/* rq_respages starts after the last arg page */
|
||||
rqstp->rq_respages = &rqstp->rq_pages[head->rc_page_count];
|
||||
rqstp->rq_next_page = rqstp->rq_respages + 1;
|
||||
|
||||
/* Ensure svc_rdma_recv_ctxt_put() does not try to release pages */
|
||||
head->rc_page_count = 0;
|
||||
|
||||
out_err:
|
||||
svc_rdma_read_info_free(info);
|
||||
return ret;
|
||||
ret = svc_rdma_post_chunk_ctxt(rdma, cc);
|
||||
return ret < 0 ? ret : 1;
|
||||
}
|
||||
|
@ -113,13 +113,6 @@
|
||||
|
||||
static void svc_rdma_wc_send(struct ib_cq *cq, struct ib_wc *wc);
|
||||
|
||||
static void svc_rdma_send_cid_init(struct svcxprt_rdma *rdma,
|
||||
struct rpc_rdma_cid *cid)
|
||||
{
|
||||
cid->ci_queue_id = rdma->sc_sq_cq->res.id;
|
||||
cid->ci_completion_id = atomic_inc_return(&rdma->sc_completion_ids);
|
||||
}
|
||||
|
||||
static struct svc_rdma_send_ctxt *
|
||||
svc_rdma_send_ctxt_alloc(struct svcxprt_rdma *rdma)
|
||||
{
|
||||
@ -129,7 +122,7 @@ svc_rdma_send_ctxt_alloc(struct svcxprt_rdma *rdma)
|
||||
void *buffer;
|
||||
int i;
|
||||
|
||||
ctxt = kmalloc_node(struct_size(ctxt, sc_sges, rdma->sc_max_send_sges),
|
||||
ctxt = kzalloc_node(struct_size(ctxt, sc_sges, rdma->sc_max_send_sges),
|
||||
GFP_KERNEL, node);
|
||||
if (!ctxt)
|
||||
goto fail0;
|
||||
@ -143,6 +136,7 @@ svc_rdma_send_ctxt_alloc(struct svcxprt_rdma *rdma)
|
||||
|
||||
svc_rdma_send_cid_init(rdma, &ctxt->sc_cid);
|
||||
|
||||
ctxt->sc_rdma = rdma;
|
||||
ctxt->sc_send_wr.next = NULL;
|
||||
ctxt->sc_send_wr.wr_cqe = &ctxt->sc_cqe;
|
||||
ctxt->sc_send_wr.sg_list = ctxt->sc_sges;
|
||||
@ -200,10 +194,11 @@ struct svc_rdma_send_ctxt *svc_rdma_send_ctxt_get(struct svcxprt_rdma *rdma)
|
||||
|
||||
spin_lock(&rdma->sc_send_lock);
|
||||
node = llist_del_first(&rdma->sc_send_ctxts);
|
||||
spin_unlock(&rdma->sc_send_lock);
|
||||
if (!node)
|
||||
goto out_empty;
|
||||
|
||||
ctxt = llist_entry(node, struct svc_rdma_send_ctxt, sc_node);
|
||||
spin_unlock(&rdma->sc_send_lock);
|
||||
|
||||
out:
|
||||
rpcrdma_set_xdrlen(&ctxt->sc_hdrbuf, 0);
|
||||
@ -216,13 +211,45 @@ out:
|
||||
return ctxt;
|
||||
|
||||
out_empty:
|
||||
spin_unlock(&rdma->sc_send_lock);
|
||||
ctxt = svc_rdma_send_ctxt_alloc(rdma);
|
||||
if (!ctxt)
|
||||
return NULL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
static void svc_rdma_send_ctxt_release(struct svcxprt_rdma *rdma,
|
||||
struct svc_rdma_send_ctxt *ctxt)
|
||||
{
|
||||
struct ib_device *device = rdma->sc_cm_id->device;
|
||||
unsigned int i;
|
||||
|
||||
if (ctxt->sc_page_count)
|
||||
release_pages(ctxt->sc_pages, ctxt->sc_page_count);
|
||||
|
||||
/* The first SGE contains the transport header, which
|
||||
* remains mapped until @ctxt is destroyed.
|
||||
*/
|
||||
for (i = 1; i < ctxt->sc_send_wr.num_sge; i++) {
|
||||
trace_svcrdma_dma_unmap_page(&ctxt->sc_cid,
|
||||
ctxt->sc_sges[i].addr,
|
||||
ctxt->sc_sges[i].length);
|
||||
ib_dma_unmap_page(device,
|
||||
ctxt->sc_sges[i].addr,
|
||||
ctxt->sc_sges[i].length,
|
||||
DMA_TO_DEVICE);
|
||||
}
|
||||
|
||||
llist_add(&ctxt->sc_node, &rdma->sc_send_ctxts);
|
||||
}
|
||||
|
||||
static void svc_rdma_send_ctxt_put_async(struct work_struct *work)
|
||||
{
|
||||
struct svc_rdma_send_ctxt *ctxt;
|
||||
|
||||
ctxt = container_of(work, struct svc_rdma_send_ctxt, sc_work);
|
||||
svc_rdma_send_ctxt_release(ctxt->sc_rdma, ctxt);
|
||||
}
|
||||
|
||||
/**
|
||||
* svc_rdma_send_ctxt_put - Return send_ctxt to free list
|
||||
* @rdma: controlling svcxprt_rdma
|
||||
@ -233,26 +260,8 @@ out_empty:
|
||||
void svc_rdma_send_ctxt_put(struct svcxprt_rdma *rdma,
|
||||
struct svc_rdma_send_ctxt *ctxt)
|
||||
{
|
||||
struct ib_device *device = rdma->sc_cm_id->device;
|
||||
unsigned int i;
|
||||
|
||||
if (ctxt->sc_page_count)
|
||||
release_pages(ctxt->sc_pages, ctxt->sc_page_count);
|
||||
|
||||
/* The first SGE contains the transport header, which
|
||||
* remains mapped until @ctxt is destroyed.
|
||||
*/
|
||||
for (i = 1; i < ctxt->sc_send_wr.num_sge; i++) {
|
||||
ib_dma_unmap_page(device,
|
||||
ctxt->sc_sges[i].addr,
|
||||
ctxt->sc_sges[i].length,
|
||||
DMA_TO_DEVICE);
|
||||
trace_svcrdma_dma_unmap_page(rdma,
|
||||
ctxt->sc_sges[i].addr,
|
||||
ctxt->sc_sges[i].length);
|
||||
}
|
||||
|
||||
llist_add(&ctxt->sc_node, &rdma->sc_send_ctxts);
|
||||
INIT_WORK(&ctxt->sc_work, svc_rdma_send_ctxt_put_async);
|
||||
queue_work(svcrdma_wq, &ctxt->sc_work);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -289,7 +298,7 @@ static void svc_rdma_wc_send(struct ib_cq *cq, struct ib_wc *wc)
|
||||
if (unlikely(wc->status != IB_WC_SUCCESS))
|
||||
goto flushed;
|
||||
|
||||
trace_svcrdma_wc_send(wc, &ctxt->sc_cid);
|
||||
trace_svcrdma_wc_send(&ctxt->sc_cid);
|
||||
svc_rdma_send_ctxt_put(rdma, ctxt);
|
||||
return;
|
||||
|
||||
@ -327,13 +336,13 @@ int svc_rdma_send(struct svcxprt_rdma *rdma, struct svc_rdma_send_ctxt *ctxt)
|
||||
while (1) {
|
||||
if ((atomic_dec_return(&rdma->sc_sq_avail) < 0)) {
|
||||
percpu_counter_inc(&svcrdma_stat_sq_starve);
|
||||
trace_svcrdma_sq_full(rdma);
|
||||
trace_svcrdma_sq_full(rdma, &ctxt->sc_cid);
|
||||
atomic_inc(&rdma->sc_sq_avail);
|
||||
wait_event(rdma->sc_send_wait,
|
||||
atomic_read(&rdma->sc_sq_avail) > 1);
|
||||
if (test_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags))
|
||||
return -ENOTCONN;
|
||||
trace_svcrdma_sq_retry(rdma);
|
||||
trace_svcrdma_sq_retry(rdma, &ctxt->sc_cid);
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -344,7 +353,7 @@ int svc_rdma_send(struct svcxprt_rdma *rdma, struct svc_rdma_send_ctxt *ctxt)
|
||||
return 0;
|
||||
}
|
||||
|
||||
trace_svcrdma_sq_post_err(rdma, ret);
|
||||
trace_svcrdma_sq_post_err(rdma, &ctxt->sc_cid, ret);
|
||||
svc_xprt_deferred_close(&rdma->sc_xprt);
|
||||
wake_up(&rdma->sc_send_wait);
|
||||
return ret;
|
||||
@ -534,14 +543,14 @@ static int svc_rdma_page_dma_map(void *data, struct page *page,
|
||||
if (ib_dma_mapping_error(dev, dma_addr))
|
||||
goto out_maperr;
|
||||
|
||||
trace_svcrdma_dma_map_page(rdma, dma_addr, len);
|
||||
trace_svcrdma_dma_map_page(&ctxt->sc_cid, dma_addr, len);
|
||||
ctxt->sc_sges[ctxt->sc_cur_sge_no].addr = dma_addr;
|
||||
ctxt->sc_sges[ctxt->sc_cur_sge_no].length = len;
|
||||
ctxt->sc_send_wr.num_sge++;
|
||||
return 0;
|
||||
|
||||
out_maperr:
|
||||
trace_svcrdma_dma_map_err(rdma, dma_addr, len);
|
||||
trace_svcrdma_dma_map_err(&ctxt->sc_cid, dma_addr, len);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
@ -653,7 +662,7 @@ static int svc_rdma_xb_count_sges(const struct xdr_buf *xdr,
|
||||
* svc_rdma_pull_up_needed - Determine whether to use pull-up
|
||||
* @rdma: controlling transport
|
||||
* @sctxt: send_ctxt for the Send WR
|
||||
* @rctxt: Write and Reply chunks provided by client
|
||||
* @write_pcl: Write chunk list provided by client
|
||||
* @xdr: xdr_buf containing RPC message to transmit
|
||||
*
|
||||
* Returns:
|
||||
@ -662,7 +671,7 @@ static int svc_rdma_xb_count_sges(const struct xdr_buf *xdr,
|
||||
*/
|
||||
static bool svc_rdma_pull_up_needed(const struct svcxprt_rdma *rdma,
|
||||
const struct svc_rdma_send_ctxt *sctxt,
|
||||
const struct svc_rdma_recv_ctxt *rctxt,
|
||||
const struct svc_rdma_pcl *write_pcl,
|
||||
const struct xdr_buf *xdr)
|
||||
{
|
||||
/* Resources needed for the transport header */
|
||||
@ -672,7 +681,7 @@ static bool svc_rdma_pull_up_needed(const struct svcxprt_rdma *rdma,
|
||||
};
|
||||
int ret;
|
||||
|
||||
ret = pcl_process_nonpayloads(&rctxt->rc_write_pcl, xdr,
|
||||
ret = pcl_process_nonpayloads(write_pcl, xdr,
|
||||
svc_rdma_xb_count_sges, &args);
|
||||
if (ret < 0)
|
||||
return false;
|
||||
@ -728,7 +737,7 @@ static int svc_rdma_xb_linearize(const struct xdr_buf *xdr,
|
||||
* svc_rdma_pull_up_reply_msg - Copy Reply into a single buffer
|
||||
* @rdma: controlling transport
|
||||
* @sctxt: send_ctxt for the Send WR; xprt hdr is already prepared
|
||||
* @rctxt: Write and Reply chunks provided by client
|
||||
* @write_pcl: Write chunk list provided by client
|
||||
* @xdr: prepared xdr_buf containing RPC message
|
||||
*
|
||||
* The device is not capable of sending the reply directly.
|
||||
@ -743,7 +752,7 @@ static int svc_rdma_xb_linearize(const struct xdr_buf *xdr,
|
||||
*/
|
||||
static int svc_rdma_pull_up_reply_msg(const struct svcxprt_rdma *rdma,
|
||||
struct svc_rdma_send_ctxt *sctxt,
|
||||
const struct svc_rdma_recv_ctxt *rctxt,
|
||||
const struct svc_rdma_pcl *write_pcl,
|
||||
const struct xdr_buf *xdr)
|
||||
{
|
||||
struct svc_rdma_pullup_data args = {
|
||||
@ -751,7 +760,7 @@ static int svc_rdma_pull_up_reply_msg(const struct svcxprt_rdma *rdma,
|
||||
};
|
||||
int ret;
|
||||
|
||||
ret = pcl_process_nonpayloads(&rctxt->rc_write_pcl, xdr,
|
||||
ret = pcl_process_nonpayloads(write_pcl, xdr,
|
||||
svc_rdma_xb_linearize, &args);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
@ -764,7 +773,8 @@ static int svc_rdma_pull_up_reply_msg(const struct svcxprt_rdma *rdma,
|
||||
/* svc_rdma_map_reply_msg - DMA map the buffer holding RPC message
|
||||
* @rdma: controlling transport
|
||||
* @sctxt: send_ctxt for the Send WR
|
||||
* @rctxt: Write and Reply chunks provided by client
|
||||
* @write_pcl: Write chunk list provided by client
|
||||
* @reply_pcl: Reply chunk provided by client
|
||||
* @xdr: prepared xdr_buf containing RPC message
|
||||
*
|
||||
* Returns:
|
||||
@ -776,7 +786,8 @@ static int svc_rdma_pull_up_reply_msg(const struct svcxprt_rdma *rdma,
|
||||
*/
|
||||
int svc_rdma_map_reply_msg(struct svcxprt_rdma *rdma,
|
||||
struct svc_rdma_send_ctxt *sctxt,
|
||||
const struct svc_rdma_recv_ctxt *rctxt,
|
||||
const struct svc_rdma_pcl *write_pcl,
|
||||
const struct svc_rdma_pcl *reply_pcl,
|
||||
const struct xdr_buf *xdr)
|
||||
{
|
||||
struct svc_rdma_map_data args = {
|
||||
@ -789,18 +800,18 @@ int svc_rdma_map_reply_msg(struct svcxprt_rdma *rdma,
|
||||
sctxt->sc_sges[0].length = sctxt->sc_hdrbuf.len;
|
||||
|
||||
/* If there is a Reply chunk, nothing follows the transport
|
||||
* header, and we're done here.
|
||||
* header, so there is nothing to map.
|
||||
*/
|
||||
if (!pcl_is_empty(&rctxt->rc_reply_pcl))
|
||||
if (!pcl_is_empty(reply_pcl))
|
||||
return 0;
|
||||
|
||||
/* For pull-up, svc_rdma_send() will sync the transport header.
|
||||
* No additional DMA mapping is necessary.
|
||||
*/
|
||||
if (svc_rdma_pull_up_needed(rdma, sctxt, rctxt, xdr))
|
||||
return svc_rdma_pull_up_reply_msg(rdma, sctxt, rctxt, xdr);
|
||||
if (svc_rdma_pull_up_needed(rdma, sctxt, write_pcl, xdr))
|
||||
return svc_rdma_pull_up_reply_msg(rdma, sctxt, write_pcl, xdr);
|
||||
|
||||
return pcl_process_nonpayloads(&rctxt->rc_write_pcl, xdr,
|
||||
return pcl_process_nonpayloads(write_pcl, xdr,
|
||||
svc_rdma_xb_dma_map, &args);
|
||||
}
|
||||
|
||||
@ -848,7 +859,8 @@ static int svc_rdma_send_reply_msg(struct svcxprt_rdma *rdma,
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = svc_rdma_map_reply_msg(rdma, sctxt, rctxt, &rqstp->rq_res);
|
||||
ret = svc_rdma_map_reply_msg(rdma, sctxt, &rctxt->rc_write_pcl,
|
||||
&rctxt->rc_reply_pcl, &rqstp->rq_res);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
|
@ -125,6 +125,9 @@ static void qp_event_handler(struct ib_event *event, void *context)
|
||||
static struct svcxprt_rdma *svc_rdma_create_xprt(struct svc_serv *serv,
|
||||
struct net *net, int node)
|
||||
{
|
||||
static struct lock_class_key svcrdma_rwctx_lock;
|
||||
static struct lock_class_key svcrdma_sctx_lock;
|
||||
static struct lock_class_key svcrdma_dto_lock;
|
||||
struct svcxprt_rdma *cma_xprt;
|
||||
|
||||
cma_xprt = kzalloc_node(sizeof(*cma_xprt), GFP_KERNEL, node);
|
||||
@ -134,6 +137,7 @@ static struct svcxprt_rdma *svc_rdma_create_xprt(struct svc_serv *serv,
|
||||
svc_xprt_init(net, &svc_rdma_class, &cma_xprt->sc_xprt, serv);
|
||||
INIT_LIST_HEAD(&cma_xprt->sc_accept_q);
|
||||
INIT_LIST_HEAD(&cma_xprt->sc_rq_dto_q);
|
||||
INIT_LIST_HEAD(&cma_xprt->sc_read_complete_q);
|
||||
init_llist_head(&cma_xprt->sc_send_ctxts);
|
||||
init_llist_head(&cma_xprt->sc_recv_ctxts);
|
||||
init_llist_head(&cma_xprt->sc_rw_ctxts);
|
||||
@ -141,8 +145,11 @@ static struct svcxprt_rdma *svc_rdma_create_xprt(struct svc_serv *serv,
|
||||
|
||||
spin_lock_init(&cma_xprt->sc_lock);
|
||||
spin_lock_init(&cma_xprt->sc_rq_dto_lock);
|
||||
lockdep_set_class(&cma_xprt->sc_rq_dto_lock, &svcrdma_dto_lock);
|
||||
spin_lock_init(&cma_xprt->sc_send_lock);
|
||||
lockdep_set_class(&cma_xprt->sc_send_lock, &svcrdma_sctx_lock);
|
||||
spin_lock_init(&cma_xprt->sc_rw_ctxt_lock);
|
||||
lockdep_set_class(&cma_xprt->sc_rw_ctxt_lock, &svcrdma_rwctx_lock);
|
||||
|
||||
/*
|
||||
* Note that this implies that the underlying transport support
|
||||
@ -391,37 +398,35 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
|
||||
dev = newxprt->sc_cm_id->device;
|
||||
newxprt->sc_port_num = newxprt->sc_cm_id->port_num;
|
||||
|
||||
/* Qualify the transport resource defaults with the
|
||||
* capabilities of this particular device */
|
||||
newxprt->sc_max_req_size = svcrdma_max_req_size;
|
||||
newxprt->sc_max_requests = svcrdma_max_requests;
|
||||
newxprt->sc_max_bc_requests = svcrdma_max_bc_requests;
|
||||
newxprt->sc_recv_batch = RPCRDMA_MAX_RECV_BATCH;
|
||||
newxprt->sc_fc_credits = cpu_to_be32(newxprt->sc_max_requests);
|
||||
|
||||
/* Qualify the transport's resource defaults with the
|
||||
* capabilities of this particular device.
|
||||
*/
|
||||
|
||||
/* Transport header, head iovec, tail iovec */
|
||||
newxprt->sc_max_send_sges = 3;
|
||||
/* Add one SGE per page list entry */
|
||||
newxprt->sc_max_send_sges += (svcrdma_max_req_size / PAGE_SIZE) + 1;
|
||||
if (newxprt->sc_max_send_sges > dev->attrs.max_send_sge)
|
||||
newxprt->sc_max_send_sges = dev->attrs.max_send_sge;
|
||||
newxprt->sc_max_req_size = svcrdma_max_req_size;
|
||||
newxprt->sc_max_requests = svcrdma_max_requests;
|
||||
newxprt->sc_max_bc_requests = svcrdma_max_bc_requests;
|
||||
newxprt->sc_recv_batch = RPCRDMA_MAX_RECV_BATCH;
|
||||
rq_depth = newxprt->sc_max_requests + newxprt->sc_max_bc_requests +
|
||||
newxprt->sc_recv_batch;
|
||||
if (rq_depth > dev->attrs.max_qp_wr) {
|
||||
pr_warn("svcrdma: reducing receive depth to %d\n",
|
||||
dev->attrs.max_qp_wr);
|
||||
rq_depth = dev->attrs.max_qp_wr;
|
||||
newxprt->sc_recv_batch = 1;
|
||||
newxprt->sc_max_requests = rq_depth - 2;
|
||||
newxprt->sc_max_bc_requests = 2;
|
||||
}
|
||||
newxprt->sc_fc_credits = cpu_to_be32(newxprt->sc_max_requests);
|
||||
ctxts = rdma_rw_mr_factor(dev, newxprt->sc_port_num, RPCSVC_MAXPAGES);
|
||||
ctxts *= newxprt->sc_max_requests;
|
||||
newxprt->sc_sq_depth = rq_depth + ctxts;
|
||||
if (newxprt->sc_sq_depth > dev->attrs.max_qp_wr) {
|
||||
pr_warn("svcrdma: reducing send depth to %d\n",
|
||||
dev->attrs.max_qp_wr);
|
||||
if (newxprt->sc_sq_depth > dev->attrs.max_qp_wr)
|
||||
newxprt->sc_sq_depth = dev->attrs.max_qp_wr;
|
||||
}
|
||||
atomic_set(&newxprt->sc_sq_avail, newxprt->sc_sq_depth);
|
||||
|
||||
newxprt->sc_pd = ib_alloc_pd(dev, 0);
|
||||
@ -451,8 +456,6 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
|
||||
qp_attr.qp_type = IB_QPT_RC;
|
||||
qp_attr.send_cq = newxprt->sc_sq_cq;
|
||||
qp_attr.recv_cq = newxprt->sc_rq_cq;
|
||||
dprintk("svcrdma: newxprt->sc_cm_id=%p, newxprt->sc_pd=%p\n",
|
||||
newxprt->sc_cm_id, newxprt->sc_pd);
|
||||
dprintk(" cap.max_send_wr = %d, cap.max_recv_wr = %d\n",
|
||||
qp_attr.cap.max_send_wr, qp_attr.cap.max_recv_wr);
|
||||
dprintk(" cap.max_send_sge = %d, cap.max_recv_sge = %d\n",
|
||||
@ -506,7 +509,7 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
|
||||
}
|
||||
|
||||
#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
|
||||
dprintk("svcrdma: new connection %p accepted:\n", newxprt);
|
||||
dprintk("svcrdma: new connection accepted on device %s:\n", dev->name);
|
||||
sap = (struct sockaddr *)&newxprt->sc_cm_id->route.addr.src_addr;
|
||||
dprintk(" local address : %pIS:%u\n", sap, rpc_get_port(sap));
|
||||
sap = (struct sockaddr *)&newxprt->sc_cm_id->route.addr.dst_addr;
|
||||
@ -547,6 +550,7 @@ static void __svc_rdma_free(struct work_struct *work)
|
||||
/* This blocks until the Completion Queues are empty */
|
||||
if (rdma->sc_qp && !IS_ERR(rdma->sc_qp))
|
||||
ib_drain_qp(rdma->sc_qp);
|
||||
flush_workqueue(svcrdma_wq);
|
||||
|
||||
svc_rdma_flush_recv_queues(rdma);
|
||||
|
||||
|
@ -1364,7 +1364,7 @@ void rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, int needed, bool temp)
|
||||
}
|
||||
|
||||
rep->rr_cid.ci_queue_id = ep->re_attr.recv_cq->res.id;
|
||||
trace_xprtrdma_post_recv(rep);
|
||||
trace_xprtrdma_post_recv(&rep->rr_cid);
|
||||
rep->rr_recv_wr.next = wr;
|
||||
wr = &rep->rr_recv_wr;
|
||||
--needed;
|
||||
|
Loading…
Reference in New Issue
Block a user