forked from Minki/linux
Merge git://github.com/davem330/net
* git://github.com/davem330/net: net: fix typos in Documentation/networking/scaling.txt bridge: leave carrier on for empty bridge netfilter: Use proper rwlock init function tcp: properly update lost_cnt_hint during shifting tcp: properly handle md5sig_pool references macvlan/macvtap: Fix unicast between macvtap interfaces in bridge mode
This commit is contained in:
commit
3ee72ca992
@ -27,7 +27,7 @@ applying a filter to each packet that assigns it to one of a small number
|
||||
of logical flows. Packets for each flow are steered to a separate receive
|
||||
queue, which in turn can be processed by separate CPUs. This mechanism is
|
||||
generally known as “Receive-side Scaling” (RSS). The goal of RSS and
|
||||
the other scaling techniques to increase performance uniformly.
|
||||
the other scaling techniques is to increase performance uniformly.
|
||||
Multi-queue distribution can also be used for traffic prioritization, but
|
||||
that is not the focus of these techniques.
|
||||
|
||||
@ -186,10 +186,10 @@ are steered using plain RPS. Multiple table entries may point to the
|
||||
same CPU. Indeed, with many flows and few CPUs, it is very likely that
|
||||
a single application thread handles flows with many different flow hashes.
|
||||
|
||||
rps_sock_table is a global flow table that contains the *desired* CPU for
|
||||
flows: the CPU that is currently processing the flow in userspace. Each
|
||||
table value is a CPU index that is updated during calls to recvmsg and
|
||||
sendmsg (specifically, inet_recvmsg(), inet_sendmsg(), inet_sendpage()
|
||||
rps_sock_flow_table is a global flow table that contains the *desired* CPU
|
||||
for flows: the CPU that is currently processing the flow in userspace.
|
||||
Each table value is a CPU index that is updated during calls to recvmsg
|
||||
and sendmsg (specifically, inet_recvmsg(), inet_sendmsg(), inet_sendpage()
|
||||
and tcp_splice_read()).
|
||||
|
||||
When the scheduler moves a thread to a new CPU while it has outstanding
|
||||
|
@ -239,7 +239,7 @@ static int macvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
dest = macvlan_hash_lookup(port, eth->h_dest);
|
||||
if (dest && dest->mode == MACVLAN_MODE_BRIDGE) {
|
||||
/* send to lowerdev first for its network taps */
|
||||
vlan->forward(vlan->lowerdev, skb);
|
||||
dev_forward_skb(vlan->lowerdev, skb);
|
||||
|
||||
return NET_XMIT_SUCCESS;
|
||||
}
|
||||
|
@ -91,7 +91,6 @@ static int br_dev_open(struct net_device *dev)
|
||||
{
|
||||
struct net_bridge *br = netdev_priv(dev);
|
||||
|
||||
netif_carrier_off(dev);
|
||||
netdev_update_features(dev);
|
||||
netif_start_queue(dev);
|
||||
br_stp_enable_bridge(br);
|
||||
@ -108,8 +107,6 @@ static int br_dev_stop(struct net_device *dev)
|
||||
{
|
||||
struct net_bridge *br = netdev_priv(dev);
|
||||
|
||||
netif_carrier_off(dev);
|
||||
|
||||
br_stp_disable_bridge(br);
|
||||
br_multicast_stop(br);
|
||||
|
||||
|
@ -1389,9 +1389,7 @@ static int tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
|
||||
|
||||
BUG_ON(!pcount);
|
||||
|
||||
/* Tweak before seqno plays */
|
||||
if (!tcp_is_fack(tp) && tcp_is_sack(tp) && tp->lost_skb_hint &&
|
||||
!before(TCP_SKB_CB(tp->lost_skb_hint)->seq, TCP_SKB_CB(skb)->seq))
|
||||
if (skb == tp->lost_skb_hint)
|
||||
tp->lost_cnt_hint += pcount;
|
||||
|
||||
TCP_SKB_CB(prev)->end_seq += shifted;
|
||||
|
@ -927,18 +927,21 @@ int tcp_v4_md5_do_add(struct sock *sk, __be32 addr,
|
||||
}
|
||||
sk_nocaps_add(sk, NETIF_F_GSO_MASK);
|
||||
}
|
||||
if (tcp_alloc_md5sig_pool(sk) == NULL) {
|
||||
|
||||
md5sig = tp->md5sig_info;
|
||||
if (md5sig->entries4 == 0 &&
|
||||
tcp_alloc_md5sig_pool(sk) == NULL) {
|
||||
kfree(newkey);
|
||||
return -ENOMEM;
|
||||
}
|
||||
md5sig = tp->md5sig_info;
|
||||
|
||||
if (md5sig->alloced4 == md5sig->entries4) {
|
||||
keys = kmalloc((sizeof(*keys) *
|
||||
(md5sig->entries4 + 1)), GFP_ATOMIC);
|
||||
if (!keys) {
|
||||
kfree(newkey);
|
||||
tcp_free_md5sig_pool();
|
||||
if (md5sig->entries4 == 0)
|
||||
tcp_free_md5sig_pool();
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
@ -982,6 +985,7 @@ int tcp_v4_md5_do_del(struct sock *sk, __be32 addr)
|
||||
kfree(tp->md5sig_info->keys4);
|
||||
tp->md5sig_info->keys4 = NULL;
|
||||
tp->md5sig_info->alloced4 = 0;
|
||||
tcp_free_md5sig_pool();
|
||||
} else if (tp->md5sig_info->entries4 != i) {
|
||||
/* Need to do some manipulation */
|
||||
memmove(&tp->md5sig_info->keys4[i],
|
||||
@ -989,7 +993,6 @@ int tcp_v4_md5_do_del(struct sock *sk, __be32 addr)
|
||||
(tp->md5sig_info->entries4 - i) *
|
||||
sizeof(struct tcp4_md5sig_key));
|
||||
}
|
||||
tcp_free_md5sig_pool();
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
@ -591,7 +591,8 @@ static int tcp_v6_md5_do_add(struct sock *sk, const struct in6_addr *peer,
|
||||
}
|
||||
sk_nocaps_add(sk, NETIF_F_GSO_MASK);
|
||||
}
|
||||
if (tcp_alloc_md5sig_pool(sk) == NULL) {
|
||||
if (tp->md5sig_info->entries6 == 0 &&
|
||||
tcp_alloc_md5sig_pool(sk) == NULL) {
|
||||
kfree(newkey);
|
||||
return -ENOMEM;
|
||||
}
|
||||
@ -600,8 +601,9 @@ static int tcp_v6_md5_do_add(struct sock *sk, const struct in6_addr *peer,
|
||||
(tp->md5sig_info->entries6 + 1)), GFP_ATOMIC);
|
||||
|
||||
if (!keys) {
|
||||
tcp_free_md5sig_pool();
|
||||
kfree(newkey);
|
||||
if (tp->md5sig_info->entries6 == 0)
|
||||
tcp_free_md5sig_pool();
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
@ -647,6 +649,7 @@ static int tcp_v6_md5_do_del(struct sock *sk, const struct in6_addr *peer)
|
||||
kfree(tp->md5sig_info->keys6);
|
||||
tp->md5sig_info->keys6 = NULL;
|
||||
tp->md5sig_info->alloced6 = 0;
|
||||
tcp_free_md5sig_pool();
|
||||
} else {
|
||||
/* shrink the database */
|
||||
if (tp->md5sig_info->entries6 != i)
|
||||
@ -655,7 +658,6 @@ static int tcp_v6_md5_do_del(struct sock *sk, const struct in6_addr *peer)
|
||||
(tp->md5sig_info->entries6 - i)
|
||||
* sizeof (tp->md5sig_info->keys6[0]));
|
||||
}
|
||||
tcp_free_md5sig_pool();
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
@ -3679,7 +3679,7 @@ int __net_init ip_vs_control_net_init(struct net *net)
|
||||
int idx;
|
||||
struct netns_ipvs *ipvs = net_ipvs(net);
|
||||
|
||||
ipvs->rs_lock = __RW_LOCK_UNLOCKED(ipvs->rs_lock);
|
||||
rwlock_init(&ipvs->rs_lock);
|
||||
|
||||
/* Initialize rs_table */
|
||||
for (idx = 0; idx < IP_VS_RTAB_SIZE; idx++)
|
||||
|
Loading…
Reference in New Issue
Block a user