hv_sock: Add support for delayed close

Currently, hvsock does not implement any delayed or background close
logic. Whenever the hvsock socket is closed, a FIN is sent to the peer, and
the last reference to the socket is dropped, which leads to a call to
.destruct where the socket can hang indefinitely waiting for the peer to
close it's side. The can cause the user application to hang in the close()
call.

This change implements proper STREAM(TCP) closing handshake mechanism by
sending the FIN to the peer and the waiting for the peer's FIN to arrive
for a given timeout. On timeout, it will try to terminate the connection
(i.e. a RST). This is in-line with other socket providers such as virtio.

This change does not address the hang in the vmbus_hvsock_device_unregister
where it waits indefinitely for the host to rescind the channel. That
should be taken up as a separate fix.

Signed-off-by: Sunil Muthuswamy <sunilmut@microsoft.com>
Reviewed-by: Dexuan Cui <decui@microsoft.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Sunil Muthuswamy 2019-05-15 00:56:05 +00:00 committed by David S. Miller
parent 55c0dd8add
commit a9eeb998c2

View File

@ -35,6 +35,9 @@
/* The MTU is 16KB per the host side's design */ /* The MTU is 16KB per the host side's design */
#define HVS_MTU_SIZE (1024 * 16) #define HVS_MTU_SIZE (1024 * 16)
/* How long to wait for graceful shutdown of a connection */
#define HVS_CLOSE_TIMEOUT (8 * HZ)
struct vmpipe_proto_header { struct vmpipe_proto_header {
u32 pkt_type; u32 pkt_type;
u32 data_size; u32 data_size;
@ -305,19 +308,32 @@ static void hvs_channel_cb(void *ctx)
sk->sk_write_space(sk); sk->sk_write_space(sk);
} }
static void hvs_do_close_lock_held(struct vsock_sock *vsk,
bool cancel_timeout)
{
struct sock *sk = sk_vsock(vsk);
sock_set_flag(sk, SOCK_DONE);
vsk->peer_shutdown = SHUTDOWN_MASK;
if (vsock_stream_has_data(vsk) <= 0)
sk->sk_state = TCP_CLOSING;
sk->sk_state_change(sk);
if (vsk->close_work_scheduled &&
(!cancel_timeout || cancel_delayed_work(&vsk->close_work))) {
vsk->close_work_scheduled = false;
vsock_remove_sock(vsk);
/* Release the reference taken while scheduling the timeout */
sock_put(sk);
}
}
static void hvs_close_connection(struct vmbus_channel *chan) static void hvs_close_connection(struct vmbus_channel *chan)
{ {
struct sock *sk = get_per_channel_state(chan); struct sock *sk = get_per_channel_state(chan);
struct vsock_sock *vsk = vsock_sk(sk);
lock_sock(sk); lock_sock(sk);
hvs_do_close_lock_held(vsock_sk(sk), true);
sk->sk_state = TCP_CLOSE;
sock_set_flag(sk, SOCK_DONE);
vsk->peer_shutdown |= SEND_SHUTDOWN | RCV_SHUTDOWN;
sk->sk_state_change(sk);
release_sock(sk); release_sock(sk);
} }
@ -452,50 +468,80 @@ static int hvs_connect(struct vsock_sock *vsk)
return vmbus_send_tl_connect_request(&h->vm_srv_id, &h->host_srv_id); return vmbus_send_tl_connect_request(&h->vm_srv_id, &h->host_srv_id);
} }
static void hvs_shutdown_lock_held(struct hvsock *hvs, int mode)
{
struct vmpipe_proto_header hdr;
if (hvs->fin_sent || !hvs->chan)
return;
/* It can't fail: see hvs_channel_writable_bytes(). */
(void)hvs_send_data(hvs->chan, (struct hvs_send_buf *)&hdr, 0);
hvs->fin_sent = true;
}
static int hvs_shutdown(struct vsock_sock *vsk, int mode) static int hvs_shutdown(struct vsock_sock *vsk, int mode)
{ {
struct sock *sk = sk_vsock(vsk); struct sock *sk = sk_vsock(vsk);
struct vmpipe_proto_header hdr;
struct hvs_send_buf *send_buf;
struct hvsock *hvs;
if (!(mode & SEND_SHUTDOWN)) if (!(mode & SEND_SHUTDOWN))
return 0; return 0;
lock_sock(sk); lock_sock(sk);
hvs_shutdown_lock_held(vsk->trans, mode);
hvs = vsk->trans;
if (hvs->fin_sent)
goto out;
send_buf = (struct hvs_send_buf *)&hdr;
/* It can't fail: see hvs_channel_writable_bytes(). */
(void)hvs_send_data(hvs->chan, send_buf, 0);
hvs->fin_sent = true;
out:
release_sock(sk); release_sock(sk);
return 0; return 0;
} }
static void hvs_close_timeout(struct work_struct *work)
{
struct vsock_sock *vsk =
container_of(work, struct vsock_sock, close_work.work);
struct sock *sk = sk_vsock(vsk);
sock_hold(sk);
lock_sock(sk);
if (!sock_flag(sk, SOCK_DONE))
hvs_do_close_lock_held(vsk, false);
vsk->close_work_scheduled = false;
release_sock(sk);
sock_put(sk);
}
/* Returns true, if it is safe to remove socket; false otherwise */
static bool hvs_close_lock_held(struct vsock_sock *vsk)
{
struct sock *sk = sk_vsock(vsk);
if (!(sk->sk_state == TCP_ESTABLISHED ||
sk->sk_state == TCP_CLOSING))
return true;
if ((sk->sk_shutdown & SHUTDOWN_MASK) != SHUTDOWN_MASK)
hvs_shutdown_lock_held(vsk->trans, SHUTDOWN_MASK);
if (sock_flag(sk, SOCK_DONE))
return true;
/* This reference will be dropped by the delayed close routine */
sock_hold(sk);
INIT_DELAYED_WORK(&vsk->close_work, hvs_close_timeout);
vsk->close_work_scheduled = true;
schedule_delayed_work(&vsk->close_work, HVS_CLOSE_TIMEOUT);
return false;
}
static void hvs_release(struct vsock_sock *vsk) static void hvs_release(struct vsock_sock *vsk)
{ {
struct sock *sk = sk_vsock(vsk); struct sock *sk = sk_vsock(vsk);
struct hvsock *hvs = vsk->trans; bool remove_sock;
struct vmbus_channel *chan;
lock_sock(sk); lock_sock(sk);
remove_sock = hvs_close_lock_held(vsk);
sk->sk_state = TCP_CLOSING;
vsock_remove_sock(vsk);
release_sock(sk); release_sock(sk);
if (remove_sock)
chan = hvs->chan; vsock_remove_sock(vsk);
if (chan)
hvs_shutdown(vsk, RCV_SHUTDOWN | SEND_SHUTDOWN);
} }
static void hvs_destruct(struct vsock_sock *vsk) static void hvs_destruct(struct vsock_sock *vsk)