net/smc: move add link processing for new device into llc layer

When a new ib device is up smc will send an add link invitation to the
peer if needed. This is currently done with rudimentary flow control.
Under high workload these add link invitations can disturb other llc
flows because they arrive unexpected. Fix this by integrating the
invitations into the normal llc event flow and handle them as a flow.
While at it, check for already assigned requests in the flow before
the new add link request is assigned.

Reviewed-by: Ursula Braun <ubraun@linux.ibm.com>
Fixes: 1f90a05d9f ("net/smc: add smcr_port_add() and smcr_link_up() processing")
Signed-off-by: Karsten Graul <kgraul@linux.ibm.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Karsten Graul 2020-07-18 15:06:14 +02:00 committed by David S. Miller
parent 2ff0867851
commit c48254fa48
3 changed files with 58 additions and 80 deletions

View File

@ -45,18 +45,10 @@ static struct smc_lgr_list smc_lgr_list = { /* established link groups */
static atomic_t lgr_cnt = ATOMIC_INIT(0); /* number of existing link groups */
static DECLARE_WAIT_QUEUE_HEAD(lgrs_deleted);
struct smc_ib_up_work {
struct work_struct work;
struct smc_link_group *lgr;
struct smc_ib_device *smcibdev;
u8 ibport;
};
static void smc_buf_free(struct smc_link_group *lgr, bool is_rmb,
struct smc_buf_desc *buf_desc);
static void __smc_lgr_terminate(struct smc_link_group *lgr, bool soft);
static void smc_link_up_work(struct work_struct *work);
static void smc_link_down_work(struct work_struct *work);
/* return head of link group list and its lock for a given link group */
@ -1106,67 +1098,23 @@ static void smc_conn_abort_work(struct work_struct *work)
sock_put(&smc->sk); /* sock_hold done by schedulers of abort_work */
}
/* link is up - establish alternate link if applicable */
static void smcr_link_up(struct smc_link_group *lgr,
struct smc_ib_device *smcibdev, u8 ibport)
{
struct smc_link *link = NULL;
if (list_empty(&lgr->list) ||
lgr->type == SMC_LGR_SYMMETRIC ||
lgr->type == SMC_LGR_ASYMMETRIC_PEER)
return;
if (lgr->role == SMC_SERV) {
/* trigger local add link processing */
link = smc_llc_usable_link(lgr);
if (!link)
return;
smc_llc_srv_add_link_local(link);
} else {
/* invite server to start add link processing */
u8 gid[SMC_GID_SIZE];
if (smc_ib_determine_gid(smcibdev, ibport, lgr->vlan_id, gid,
NULL))
return;
if (lgr->llc_flow_lcl.type != SMC_LLC_FLOW_NONE) {
/* some other llc task is ongoing */
wait_event_timeout(lgr->llc_flow_waiter,
(list_empty(&lgr->list) ||
lgr->llc_flow_lcl.type == SMC_LLC_FLOW_NONE),
SMC_LLC_WAIT_TIME);
}
/* lgr or device no longer active? */
if (!list_empty(&lgr->list) &&
smc_ib_port_active(smcibdev, ibport))
link = smc_llc_usable_link(lgr);
if (link)
smc_llc_send_add_link(link, smcibdev->mac[ibport - 1],
gid, NULL, SMC_LLC_REQ);
wake_up(&lgr->llc_flow_waiter); /* wake up next waiter */
}
}
void smcr_port_add(struct smc_ib_device *smcibdev, u8 ibport)
{
struct smc_ib_up_work *ib_work;
struct smc_link_group *lgr, *n;
list_for_each_entry_safe(lgr, n, &smc_lgr_list.list, list) {
struct smc_link *link;
if (strncmp(smcibdev->pnetid[ibport - 1], lgr->pnet_id,
SMC_MAX_PNETID_LEN) ||
lgr->type == SMC_LGR_SYMMETRIC ||
lgr->type == SMC_LGR_ASYMMETRIC_PEER)
continue;
ib_work = kmalloc(sizeof(*ib_work), GFP_KERNEL);
if (!ib_work)
continue;
INIT_WORK(&ib_work->work, smc_link_up_work);
ib_work->lgr = lgr;
ib_work->smcibdev = smcibdev;
ib_work->ibport = ibport;
schedule_work(&ib_work->work);
/* trigger local add link processing */
link = smc_llc_usable_link(lgr);
if (link)
smc_llc_add_link_local(link);
}
}
@ -1249,20 +1197,6 @@ void smcr_port_err(struct smc_ib_device *smcibdev, u8 ibport)
}
}
static void smc_link_up_work(struct work_struct *work)
{
struct smc_ib_up_work *ib_work = container_of(work,
struct smc_ib_up_work,
work);
struct smc_link_group *lgr = ib_work->lgr;
if (list_empty(&lgr->list))
goto out;
smcr_link_up(lgr, ib_work->smcibdev, ib_work->ibport);
out:
kfree(ib_work);
}
static void smc_link_down_work(struct work_struct *work)
{
struct smc_link *link = container_of(work, struct smc_link,

View File

@ -895,6 +895,36 @@ out:
return rc;
}
/* as an SMC client, invite server to start the add_link processing */
static void smc_llc_cli_add_link_invite(struct smc_link *link,
struct smc_llc_qentry *qentry)
{
struct smc_link_group *lgr = smc_get_lgr(link);
struct smc_init_info ini;
if (lgr->type == SMC_LGR_SYMMETRIC ||
lgr->type == SMC_LGR_ASYMMETRIC_PEER)
goto out;
ini.vlan_id = lgr->vlan_id;
smc_pnet_find_alt_roce(lgr, &ini, link->smcibdev);
if (!ini.ib_dev)
goto out;
smc_llc_send_add_link(link, ini.ib_dev->mac[ini.ib_port - 1],
ini.ib_gid, NULL, SMC_LLC_REQ);
out:
kfree(qentry);
}
static bool smc_llc_is_local_add_link(union smc_llc_msg *llc)
{
if (llc->raw.hdr.common.type == SMC_LLC_ADD_LINK &&
!llc->add_link.qp_mtu && !llc->add_link.link_num)
return true;
return false;
}
static void smc_llc_process_cli_add_link(struct smc_link_group *lgr)
{
struct smc_llc_qentry *qentry;
@ -902,7 +932,10 @@ static void smc_llc_process_cli_add_link(struct smc_link_group *lgr)
qentry = smc_llc_flow_qentry_clr(&lgr->llc_flow_lcl);
mutex_lock(&lgr->llc_conf_mutex);
smc_llc_cli_add_link(qentry->link, qentry);
if (smc_llc_is_local_add_link(&qentry->msg))
smc_llc_cli_add_link_invite(qentry->link, qentry);
else
smc_llc_cli_add_link(qentry->link, qentry);
mutex_unlock(&lgr->llc_conf_mutex);
}
@ -1160,14 +1193,14 @@ static void smc_llc_process_srv_add_link(struct smc_link_group *lgr)
mutex_unlock(&lgr->llc_conf_mutex);
}
/* enqueue a local add_link req to trigger a new add_link flow, only as SERV */
void smc_llc_srv_add_link_local(struct smc_link *link)
/* enqueue a local add_link req to trigger a new add_link flow */
void smc_llc_add_link_local(struct smc_link *link)
{
struct smc_llc_msg_add_link add_llc = {0};
add_llc.hd.length = sizeof(add_llc);
add_llc.hd.common.type = SMC_LLC_ADD_LINK;
/* no dev and port needed, we as server ignore client data anyway */
/* no dev and port needed */
smc_llc_enqueue(link, (union smc_llc_msg *)&add_llc);
}
@ -1347,7 +1380,7 @@ static void smc_llc_process_srv_delete_link(struct smc_link_group *lgr)
if (lgr->type == SMC_LGR_SINGLE && !list_empty(&lgr->list)) {
/* trigger setup of asymm alt link */
smc_llc_srv_add_link_local(lnk);
smc_llc_add_link_local(lnk);
}
out:
mutex_unlock(&lgr->llc_conf_mutex);
@ -1476,7 +1509,18 @@ static void smc_llc_event_handler(struct smc_llc_qentry *qentry)
if (list_empty(&lgr->list))
goto out; /* lgr is terminating */
if (lgr->role == SMC_CLNT) {
if (lgr->llc_flow_lcl.type == SMC_LLC_FLOW_ADD_LINK) {
if (smc_llc_is_local_add_link(llc)) {
if (lgr->llc_flow_lcl.type ==
SMC_LLC_FLOW_ADD_LINK)
break; /* add_link in progress */
if (smc_llc_flow_start(&lgr->llc_flow_lcl,
qentry)) {
schedule_work(&lgr->llc_add_link_work);
}
return;
}
if (lgr->llc_flow_lcl.type == SMC_LLC_FLOW_ADD_LINK &&
!lgr->llc_flow_lcl.qentry) {
/* a flow is waiting for this message */
smc_llc_flow_qentry_set(&lgr->llc_flow_lcl,
qentry);

View File

@ -103,7 +103,7 @@ void smc_llc_send_link_delete_all(struct smc_link_group *lgr, bool ord,
u32 rsn);
int smc_llc_cli_add_link(struct smc_link *link, struct smc_llc_qentry *qentry);
int smc_llc_srv_add_link(struct smc_link *link);
void smc_llc_srv_add_link_local(struct smc_link *link);
void smc_llc_add_link_local(struct smc_link *link);
int smc_llc_init(void) __init;
#endif /* SMC_LLC_H */