net/smc: smc_poll improvements

Increase the socket refcount during poll wait.
Take the socket lock before checking socket state.
For a listening socket return a mask independent of state SMC_ACTIVE and
cover errors or closed state as well.
Get rid of the accept_q loop in smc_accept_poll().

Signed-off-by: Ursula Braun <ubraun@linux.vnet.ibm.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Ursula Braun 2018-01-26 09:28:47 +01:00 committed by David S. Miller
parent da05bf2981
commit 8dce2786a2

View File

@ -1122,21 +1122,15 @@ out:
static unsigned int smc_accept_poll(struct sock *parent) static unsigned int smc_accept_poll(struct sock *parent)
{ {
struct smc_sock *isk; struct smc_sock *isk = smc_sk(parent);
struct sock *sk; int mask = 0;
lock_sock(parent); spin_lock(&isk->accept_q_lock);
list_for_each_entry(isk, &smc_sk(parent)->accept_q, accept_q) { if (!list_empty(&isk->accept_q))
sk = (struct sock *)isk; mask = POLLIN | POLLRDNORM;
spin_unlock(&isk->accept_q_lock);
if (sk->sk_state == SMC_ACTIVE) { return mask;
release_sock(parent);
return POLLIN | POLLRDNORM;
}
}
release_sock(parent);
return 0;
} }
static unsigned int smc_poll(struct file *file, struct socket *sock, static unsigned int smc_poll(struct file *file, struct socket *sock,
@ -1147,9 +1141,15 @@ static unsigned int smc_poll(struct file *file, struct socket *sock,
struct smc_sock *smc; struct smc_sock *smc;
int rc; int rc;
if (!sk)
return POLLNVAL;
smc = smc_sk(sock->sk); smc = smc_sk(sock->sk);
sock_hold(sk);
lock_sock(sk);
if ((sk->sk_state == SMC_INIT) || smc->use_fallback) { if ((sk->sk_state == SMC_INIT) || smc->use_fallback) {
/* delegate to CLC child sock */ /* delegate to CLC child sock */
release_sock(sk);
mask = smc->clcsock->ops->poll(file, smc->clcsock, wait); mask = smc->clcsock->ops->poll(file, smc->clcsock, wait);
/* if non-blocking connect finished ... */ /* if non-blocking connect finished ... */
lock_sock(sk); lock_sock(sk);
@ -1161,37 +1161,43 @@ static unsigned int smc_poll(struct file *file, struct socket *sock,
rc = smc_connect_rdma(smc); rc = smc_connect_rdma(smc);
if (rc < 0) if (rc < 0)
mask |= POLLERR; mask |= POLLERR;
else /* success cases including fallback */
/* success cases including fallback */ mask |= POLLOUT | POLLWRNORM;
mask |= POLLOUT | POLLWRNORM;
} }
} }
release_sock(sk);
} else { } else {
sock_poll_wait(file, sk_sleep(sk), wait); if (sk->sk_state != SMC_CLOSED) {
if (sk->sk_state == SMC_LISTEN) release_sock(sk);
/* woken up by sk_data_ready in smc_listen_work() */ sock_poll_wait(file, sk_sleep(sk), wait);
mask |= smc_accept_poll(sk); lock_sock(sk);
}
if (sk->sk_err) if (sk->sk_err)
mask |= POLLERR; mask |= POLLERR;
if (atomic_read(&smc->conn.sndbuf_space) ||
(sk->sk_shutdown & SEND_SHUTDOWN)) {
mask |= POLLOUT | POLLWRNORM;
} else {
sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
}
if (atomic_read(&smc->conn.bytes_to_rcv))
mask |= POLLIN | POLLRDNORM;
if ((sk->sk_shutdown == SHUTDOWN_MASK) || if ((sk->sk_shutdown == SHUTDOWN_MASK) ||
(sk->sk_state == SMC_CLOSED)) (sk->sk_state == SMC_CLOSED))
mask |= POLLHUP; mask |= POLLHUP;
if (sk->sk_shutdown & RCV_SHUTDOWN) if (sk->sk_state == SMC_LISTEN) {
mask |= POLLIN | POLLRDNORM | POLLRDHUP; /* woken up by sk_data_ready in smc_listen_work() */
if (sk->sk_state == SMC_APPCLOSEWAIT1) mask = smc_accept_poll(sk);
mask |= POLLIN; } else {
if (atomic_read(&smc->conn.sndbuf_space) ||
sk->sk_shutdown & SEND_SHUTDOWN) {
mask |= POLLOUT | POLLWRNORM;
} else {
sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
}
if (atomic_read(&smc->conn.bytes_to_rcv))
mask |= POLLIN | POLLRDNORM;
if (sk->sk_shutdown & RCV_SHUTDOWN)
mask |= POLLIN | POLLRDNORM | POLLRDHUP;
if (sk->sk_state == SMC_APPCLOSEWAIT1)
mask |= POLLIN;
}
} }
release_sock(sk);
sock_put(sk);
return mask; return mask;
} }