smc: fix mutex unlocks during link group creation

Link group creation is synchronized with the smc_create_lgr_pending
lock. In smc_listen_work() this mutex is sometimes unlocked, even
though it has not been locked before. This issue will surface in
presence of the SMC rendezvous code.

Signed-off-by: Ursula Braun <ubraun@linux.vnet.ibm.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Ursula Braun 2017-10-25 11:01:44 +02:00 committed by David S. Miller
parent 3fc27b71b8
commit 145686baab

View File

@ -808,7 +808,7 @@ static void smc_listen_work(struct work_struct *work)
rc = local_contact; rc = local_contact;
if (rc == -ENOMEM) if (rc == -ENOMEM)
reason_code = SMC_CLC_DECL_MEM;/* insufficient memory*/ reason_code = SMC_CLC_DECL_MEM;/* insufficient memory*/
goto decline_rdma; goto decline_rdma_unlock;
} }
link = &new_smc->conn.lgr->lnk[SMC_SINGLE_LINK]; link = &new_smc->conn.lgr->lnk[SMC_SINGLE_LINK];
@ -816,7 +816,7 @@ static void smc_listen_work(struct work_struct *work)
rc = smc_buf_create(new_smc); rc = smc_buf_create(new_smc);
if (rc) { if (rc) {
reason_code = SMC_CLC_DECL_MEM; reason_code = SMC_CLC_DECL_MEM;
goto decline_rdma; goto decline_rdma_unlock;
} }
smc_close_init(new_smc); smc_close_init(new_smc);
@ -831,7 +831,7 @@ static void smc_listen_work(struct work_struct *work)
buf_desc->mr_rx[SMC_SINGLE_LINK]); buf_desc->mr_rx[SMC_SINGLE_LINK]);
if (rc) { if (rc) {
reason_code = SMC_CLC_DECL_INTERR; reason_code = SMC_CLC_DECL_INTERR;
goto decline_rdma; goto decline_rdma_unlock;
} }
} }
} }
@ -839,15 +839,15 @@ static void smc_listen_work(struct work_struct *work)
rc = smc_clc_send_accept(new_smc, local_contact); rc = smc_clc_send_accept(new_smc, local_contact);
if (rc) if (rc)
goto out_err; goto out_err_unlock;
/* receive SMC Confirm CLC message */ /* receive SMC Confirm CLC message */
reason_code = smc_clc_wait_msg(new_smc, &cclc, sizeof(cclc), reason_code = smc_clc_wait_msg(new_smc, &cclc, sizeof(cclc),
SMC_CLC_CONFIRM); SMC_CLC_CONFIRM);
if (reason_code < 0) if (reason_code < 0)
goto out_err; goto out_err_unlock;
if (reason_code > 0) if (reason_code > 0)
goto decline_rdma; goto decline_rdma_unlock;
smc_conn_save_peer_info(new_smc, &cclc); smc_conn_save_peer_info(new_smc, &cclc);
if (local_contact == SMC_FIRST_CONTACT) if (local_contact == SMC_FIRST_CONTACT)
smc_link_save_peer_info(link, &cclc); smc_link_save_peer_info(link, &cclc);
@ -855,34 +855,34 @@ static void smc_listen_work(struct work_struct *work)
rc = smc_rmb_rtoken_handling(&new_smc->conn, &cclc); rc = smc_rmb_rtoken_handling(&new_smc->conn, &cclc);
if (rc) { if (rc) {
reason_code = SMC_CLC_DECL_INTERR; reason_code = SMC_CLC_DECL_INTERR;
goto decline_rdma; goto decline_rdma_unlock;
} }
if (local_contact == SMC_FIRST_CONTACT) { if (local_contact == SMC_FIRST_CONTACT) {
rc = smc_ib_ready_link(link); rc = smc_ib_ready_link(link);
if (rc) { if (rc) {
reason_code = SMC_CLC_DECL_INTERR; reason_code = SMC_CLC_DECL_INTERR;
goto decline_rdma; goto decline_rdma_unlock;
} }
/* QP confirmation over RoCE fabric */ /* QP confirmation over RoCE fabric */
reason_code = smc_serv_conf_first_link(new_smc); reason_code = smc_serv_conf_first_link(new_smc);
if (reason_code < 0) { if (reason_code < 0) {
/* peer is not aware of a problem */ /* peer is not aware of a problem */
rc = reason_code; rc = reason_code;
goto out_err; goto out_err_unlock;
} }
if (reason_code > 0) if (reason_code > 0)
goto decline_rdma; goto decline_rdma_unlock;
} }
smc_tx_init(new_smc); smc_tx_init(new_smc);
mutex_unlock(&smc_create_lgr_pending);
out_connected: out_connected:
sk_refcnt_debug_inc(newsmcsk); sk_refcnt_debug_inc(newsmcsk);
if (newsmcsk->sk_state == SMC_INIT) if (newsmcsk->sk_state == SMC_INIT)
newsmcsk->sk_state = SMC_ACTIVE; newsmcsk->sk_state = SMC_ACTIVE;
enqueue: enqueue:
mutex_unlock(&smc_create_lgr_pending);
lock_sock_nested(&lsmc->sk, SINGLE_DEPTH_NESTING); lock_sock_nested(&lsmc->sk, SINGLE_DEPTH_NESTING);
if (lsmc->sk.sk_state == SMC_LISTEN) { if (lsmc->sk.sk_state == SMC_LISTEN) {
smc_accept_enqueue(&lsmc->sk, newsmcsk); smc_accept_enqueue(&lsmc->sk, newsmcsk);
@ -896,6 +896,8 @@ enqueue:
sock_put(&lsmc->sk); /* sock_hold in smc_tcp_listen_work */ sock_put(&lsmc->sk); /* sock_hold in smc_tcp_listen_work */
return; return;
decline_rdma_unlock:
mutex_unlock(&smc_create_lgr_pending);
decline_rdma: decline_rdma:
/* RDMA setup failed, switch back to TCP */ /* RDMA setup failed, switch back to TCP */
smc_conn_free(&new_smc->conn); smc_conn_free(&new_smc->conn);
@ -907,6 +909,8 @@ decline_rdma:
} }
goto out_connected; goto out_connected;
out_err_unlock:
mutex_unlock(&smc_create_lgr_pending);
out_err: out_err:
newsmcsk->sk_state = SMC_CLOSED; newsmcsk->sk_state = SMC_CLOSED;
smc_conn_free(&new_smc->conn); smc_conn_free(&new_smc->conn);