tipc: remove implicit message delivery in node_unlock()

After the most recent changes, all access calls to a link which
may entail addition of messages to the link's input queue are
postpended by an explicit call to tipc_sk_rcv(), using a reference
to the correct queue.

This means that the potentially hazardous implicit delivery, using
tipc_node_unlock() in combination with a binary flag and a cached
queue pointer, now has become redundant.

This commit removes this implicit delivery mechanism both for regular
data messages and for binding table update messages.

Tested-by: Ying Xue <ying.xue@windriver.com>
Signed-off-by: Jon Maloy <jon.maloy@ericsson.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Jon Paul Maloy 2015-07-30 18:24:24 -04:00 committed by David S. Miller
parent 598411d70f
commit 23d8335d78
4 changed files with 10 additions and 63 deletions

View File

@ -559,8 +559,6 @@ void link_prepare_wakeup(struct tipc_link *l)
break; break;
skb_unlink(skb, &l->wakeupq); skb_unlink(skb, &l->wakeupq);
skb_queue_tail(l->inputq, skb); skb_queue_tail(l->inputq, skb);
l->owner->inputq = l->inputq;
l->owner->action_flags |= TIPC_MSG_EVT;
} }
} }
@ -598,8 +596,6 @@ void tipc_link_purge_queues(struct tipc_link *l_ptr)
void tipc_link_reset(struct tipc_link *l) void tipc_link_reset(struct tipc_link *l)
{ {
struct tipc_node *owner = l->owner;
tipc_link_fsm_evt(l, LINK_RESET_EVT); tipc_link_fsm_evt(l, LINK_RESET_EVT);
/* Link is down, accept any session */ /* Link is down, accept any session */
@ -611,14 +607,10 @@ void tipc_link_reset(struct tipc_link *l)
/* Prepare for renewed mtu size negotiation */ /* Prepare for renewed mtu size negotiation */
l->mtu = l->advertised_mtu; l->mtu = l->advertised_mtu;
/* Clean up all queues, except inputq: */ /* Clean up all queues: */
__skb_queue_purge(&l->transmq); __skb_queue_purge(&l->transmq);
__skb_queue_purge(&l->deferdq); __skb_queue_purge(&l->deferdq);
if (!owner->inputq) skb_queue_splice_init(&l->wakeupq, l->inputq);
owner->inputq = l->inputq;
skb_queue_splice_init(&l->wakeupq, owner->inputq);
if (!skb_queue_empty(owner->inputq))
owner->action_flags |= TIPC_MSG_EVT;
tipc_link_purge_backlog(l); tipc_link_purge_backlog(l);
kfree_skb(l->reasm_buf); kfree_skb(l->reasm_buf);
@ -972,7 +964,6 @@ static bool tipc_data_input(struct tipc_link *link, struct sk_buff *skb)
{ {
struct tipc_node *node = link->owner; struct tipc_node *node = link->owner;
struct tipc_msg *msg = buf_msg(skb); struct tipc_msg *msg = buf_msg(skb);
u32 dport = msg_destport(msg);
switch (msg_user(msg)) { switch (msg_user(msg)) {
case TIPC_LOW_IMPORTANCE: case TIPC_LOW_IMPORTANCE:
@ -980,17 +971,11 @@ static bool tipc_data_input(struct tipc_link *link, struct sk_buff *skb)
case TIPC_HIGH_IMPORTANCE: case TIPC_HIGH_IMPORTANCE:
case TIPC_CRITICAL_IMPORTANCE: case TIPC_CRITICAL_IMPORTANCE:
case CONN_MANAGER: case CONN_MANAGER:
if (tipc_skb_queue_tail(link->inputq, skb, dport)) { skb_queue_tail(link->inputq, skb);
node->inputq = link->inputq;
node->action_flags |= TIPC_MSG_EVT;
}
return true; return true;
case NAME_DISTRIBUTOR: case NAME_DISTRIBUTOR:
node->bclink.recv_permitted = true; node->bclink.recv_permitted = true;
node->namedq = link->namedq;
skb_queue_tail(link->namedq, skb); skb_queue_tail(link->namedq, skb);
if (skb_queue_len(link->namedq) == 1)
node->action_flags |= TIPC_NAMED_MSG_EVT;
return true; return true;
case MSG_BUNDLER: case MSG_BUNDLER:
case TUNNEL_PROTOCOL: case TUNNEL_PROTOCOL:

View File

@ -862,28 +862,6 @@ static inline struct sk_buff *tipc_skb_dequeue(struct sk_buff_head *list,
return skb; return skb;
} }
/* tipc_skb_queue_tail(): add buffer to tail of list;
* @list: list to be appended to
* @skb: buffer to append. Always appended
* @dport: the destination port of the buffer
* returns true if dport differs from previous destination
*/
static inline bool tipc_skb_queue_tail(struct sk_buff_head *list,
struct sk_buff *skb, u32 dport)
{
struct sk_buff *_skb = NULL;
bool rv = false;
spin_lock_bh(&list->lock);
_skb = skb_peek_tail(list);
if (!_skb || (msg_destport(buf_msg(_skb)) != dport) ||
(skb_queue_len(list) > 32))
rv = true;
__skb_queue_tail(list, skb);
spin_unlock_bh(&list->lock);
return rv;
}
/* tipc_skb_queue_sorted(); sort pkt into list according to sequence number /* tipc_skb_queue_sorted(); sort pkt into list according to sequence number
* @list: list to be appended to * @list: list to be appended to
* @skb: buffer to add * @skb: buffer to add

View File

@ -873,10 +873,8 @@ static void node_lost_contact(struct tipc_node *n_ptr,
SHORT_H_SIZE, 0, tn->own_addr, SHORT_H_SIZE, 0, tn->own_addr,
conn->peer_node, conn->port, conn->peer_node, conn->port,
conn->peer_port, TIPC_ERR_NO_NODE); conn->peer_port, TIPC_ERR_NO_NODE);
if (likely(skb)) { if (likely(skb))
skb_queue_tail(inputq, skb); skb_queue_tail(inputq, skb);
n_ptr->action_flags |= TIPC_MSG_EVT;
}
list_del(&conn->list); list_del(&conn->list);
kfree(conn); kfree(conn);
} }
@ -923,27 +921,20 @@ void tipc_node_unlock(struct tipc_node *node)
u32 flags = node->action_flags; u32 flags = node->action_flags;
u32 link_id = 0; u32 link_id = 0;
struct list_head *publ_list; struct list_head *publ_list;
struct sk_buff_head *inputq = node->inputq;
struct sk_buff_head *namedq;
if (likely(!flags || (flags == TIPC_MSG_EVT))) { if (likely(!flags)) {
node->action_flags = 0;
spin_unlock_bh(&node->lock); spin_unlock_bh(&node->lock);
if (flags == TIPC_MSG_EVT)
tipc_sk_rcv(net, inputq);
return; return;
} }
addr = node->addr; addr = node->addr;
link_id = node->link_id; link_id = node->link_id;
namedq = node->namedq;
publ_list = &node->publ_list; publ_list = &node->publ_list;
node->action_flags &= ~(TIPC_MSG_EVT | node->action_flags &= ~(TIPC_NOTIFY_NODE_DOWN | TIPC_NOTIFY_NODE_UP |
TIPC_NOTIFY_NODE_DOWN | TIPC_NOTIFY_NODE_UP |
TIPC_NOTIFY_LINK_DOWN | TIPC_NOTIFY_LINK_UP | TIPC_NOTIFY_LINK_DOWN | TIPC_NOTIFY_LINK_UP |
TIPC_WAKEUP_BCAST_USERS | TIPC_BCAST_MSG_EVT | TIPC_WAKEUP_BCAST_USERS | TIPC_BCAST_MSG_EVT |
TIPC_NAMED_MSG_EVT | TIPC_BCAST_RESET); TIPC_BCAST_RESET);
spin_unlock_bh(&node->lock); spin_unlock_bh(&node->lock);
@ -964,12 +955,6 @@ void tipc_node_unlock(struct tipc_node *node)
tipc_nametbl_withdraw(net, TIPC_LINK_STATE, addr, tipc_nametbl_withdraw(net, TIPC_LINK_STATE, addr,
link_id, addr); link_id, addr);
if (flags & TIPC_MSG_EVT)
tipc_sk_rcv(net, inputq);
if (flags & TIPC_NAMED_MSG_EVT)
tipc_named_rcv(net, namedq);
if (flags & TIPC_BCAST_MSG_EVT) if (flags & TIPC_BCAST_MSG_EVT)
tipc_bclink_input(net); tipc_bclink_input(net);
@ -1270,6 +1255,9 @@ unlock:
if (unlikely(rc & TIPC_LINK_DOWN_EVT)) if (unlikely(rc & TIPC_LINK_DOWN_EVT))
tipc_node_link_down(n, bearer_id, false); tipc_node_link_down(n, bearer_id, false);
if (unlikely(!skb_queue_empty(&n->bclink.namedq)))
tipc_named_rcv(net, &n->bclink.namedq);
if (!skb_queue_empty(&le->inputq)) if (!skb_queue_empty(&le->inputq))
tipc_sk_rcv(net, &le->inputq); tipc_sk_rcv(net, &le->inputq);

View File

@ -53,13 +53,11 @@
* TIPC_DISTRIBUTE_NAME: publish or withdraw link state name type * TIPC_DISTRIBUTE_NAME: publish or withdraw link state name type
*/ */
enum { enum {
TIPC_MSG_EVT = 1,
TIPC_NOTIFY_NODE_DOWN = (1 << 3), TIPC_NOTIFY_NODE_DOWN = (1 << 3),
TIPC_NOTIFY_NODE_UP = (1 << 4), TIPC_NOTIFY_NODE_UP = (1 << 4),
TIPC_WAKEUP_BCAST_USERS = (1 << 5), TIPC_WAKEUP_BCAST_USERS = (1 << 5),
TIPC_NOTIFY_LINK_UP = (1 << 6), TIPC_NOTIFY_LINK_UP = (1 << 6),
TIPC_NOTIFY_LINK_DOWN = (1 << 7), TIPC_NOTIFY_LINK_DOWN = (1 << 7),
TIPC_NAMED_MSG_EVT = (1 << 8),
TIPC_BCAST_MSG_EVT = (1 << 9), TIPC_BCAST_MSG_EVT = (1 << 9),
TIPC_BCAST_RESET = (1 << 10) TIPC_BCAST_RESET = (1 << 10)
}; };
@ -124,8 +122,6 @@ struct tipc_node {
spinlock_t lock; spinlock_t lock;
struct net *net; struct net *net;
struct hlist_node hash; struct hlist_node hash;
struct sk_buff_head *inputq;
struct sk_buff_head *namedq;
int active_links[2]; int active_links[2];
struct tipc_link_entry links[MAX_BEARERS]; struct tipc_link_entry links[MAX_BEARERS];
int action_flags; int action_flags;