SUNRPC: Ensure we flush any closed sockets before xs_xprt_free()

We must ensure that all sockets are closed before we call xprt_free()
and release the reference to the net namespace. The problem is that
calling fput() will defer closing the socket until delayed_fput() gets
called.
Let's fix the situation by allowing rpciod and the transport teardown
code (which runs on the system wq) to call __fput_sync(), and directly
close the socket.

Reported-by: Felix Fu <foyjog@gmail.com>
Acked-by: Al Viro <viro@zeniv.linux.org.uk>
Fixes: a73881c96d ("SUNRPC: Fix an Oops in udp_poll()")
Cc: stable@vger.kernel.org # 5.1.x: 3be232f11a: SUNRPC: Prevent immediate close+reconnect
Cc: stable@vger.kernel.org # 5.1.x: 89f42494f9: SUNRPC: Don't call connect() more than once on a TCP socket
Cc: stable@vger.kernel.org # 5.1.x
Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
This commit is contained in:
Trond Myklebust 2022-04-03 15:58:11 -04:00
parent 830f1111d9
commit f00432063d
4 changed files with 15 additions and 10 deletions

View File

@ -412,6 +412,7 @@ void __fput_sync(struct file *file)
} }
EXPORT_SYMBOL(fput); EXPORT_SYMBOL(fput);
EXPORT_SYMBOL(__fput_sync);
void __init files_init(void) void __init files_init(void)
{ {

View File

@ -1004,7 +1004,6 @@ DEFINE_RPC_XPRT_LIFETIME_EVENT(connect);
DEFINE_RPC_XPRT_LIFETIME_EVENT(disconnect_auto); DEFINE_RPC_XPRT_LIFETIME_EVENT(disconnect_auto);
DEFINE_RPC_XPRT_LIFETIME_EVENT(disconnect_done); DEFINE_RPC_XPRT_LIFETIME_EVENT(disconnect_done);
DEFINE_RPC_XPRT_LIFETIME_EVENT(disconnect_force); DEFINE_RPC_XPRT_LIFETIME_EVENT(disconnect_force);
DEFINE_RPC_XPRT_LIFETIME_EVENT(disconnect_cleanup);
DEFINE_RPC_XPRT_LIFETIME_EVENT(destroy); DEFINE_RPC_XPRT_LIFETIME_EVENT(destroy);
DECLARE_EVENT_CLASS(rpc_xprt_event, DECLARE_EVENT_CLASS(rpc_xprt_event,

View File

@ -930,12 +930,7 @@ void xprt_connect(struct rpc_task *task)
if (!xprt_lock_write(xprt, task)) if (!xprt_lock_write(xprt, task))
return; return;
if (test_and_clear_bit(XPRT_CLOSE_WAIT, &xprt->state)) { if (!xprt_connected(xprt) && !test_bit(XPRT_CLOSE_WAIT, &xprt->state)) {
trace_xprt_disconnect_cleanup(xprt);
xprt->ops->close(xprt);
}
if (!xprt_connected(xprt)) {
task->tk_rqstp->rq_connect_cookie = xprt->connect_cookie; task->tk_rqstp->rq_connect_cookie = xprt->connect_cookie;
rpc_sleep_on_timeout(&xprt->pending, task, NULL, rpc_sleep_on_timeout(&xprt->pending, task, NULL,
xprt_request_timeout(task->tk_rqstp)); xprt_request_timeout(task->tk_rqstp));

View File

@ -879,7 +879,7 @@ static int xs_local_send_request(struct rpc_rqst *req)
/* Close the stream if the previous transmission was incomplete */ /* Close the stream if the previous transmission was incomplete */
if (xs_send_request_was_aborted(transport, req)) { if (xs_send_request_was_aborted(transport, req)) {
xs_close(xprt); xprt_force_disconnect(xprt);
return -ENOTCONN; return -ENOTCONN;
} }
@ -915,7 +915,7 @@ static int xs_local_send_request(struct rpc_rqst *req)
-status); -status);
fallthrough; fallthrough;
case -EPIPE: case -EPIPE:
xs_close(xprt); xprt_force_disconnect(xprt);
status = -ENOTCONN; status = -ENOTCONN;
} }
@ -1185,6 +1185,16 @@ static void xs_reset_transport(struct sock_xprt *transport)
if (sk == NULL) if (sk == NULL)
return; return;
/*
* Make sure we're calling this in a context from which it is safe
* to call __fput_sync(). In practice that means rpciod and the
* system workqueue.
*/
if (!(current->flags & PF_WQ_WORKER)) {
WARN_ON_ONCE(1);
set_bit(XPRT_CLOSE_WAIT, &xprt->state);
return;
}
if (atomic_read(&transport->xprt.swapper)) if (atomic_read(&transport->xprt.swapper))
sk_clear_memalloc(sk); sk_clear_memalloc(sk);
@ -1208,7 +1218,7 @@ static void xs_reset_transport(struct sock_xprt *transport)
mutex_unlock(&transport->recv_mutex); mutex_unlock(&transport->recv_mutex);
trace_rpc_socket_close(xprt, sock); trace_rpc_socket_close(xprt, sock);
fput(filp); __fput_sync(filp);
xprt_disconnect_done(xprt); xprt_disconnect_done(xprt);
} }