forked from Minki/linux
tcp: remove false sharing in tcp_rcv_state_process()
Last known hot point during SYNFLOOD attack is the clearing of rx_opt.saw_tstamp in tcp_rcv_state_process() It is not needed for a listener, so we move it where it matters. Performance while a SYNFLOOD hits a single listener socket went from 5 Mpps to 6 Mpps on my test server (24 cores, 8 NIC RX queues) Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
b3d051477c
commit
8804b2722d
@ -5796,8 +5796,6 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
|
||||
int queued = 0;
|
||||
bool acceptable;
|
||||
|
||||
tp->rx_opt.saw_tstamp = 0;
|
||||
|
||||
switch (sk->sk_state) {
|
||||
case TCP_CLOSE:
|
||||
goto discard;
|
||||
@ -5838,6 +5836,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
|
||||
goto discard;
|
||||
|
||||
case TCP_SYN_SENT:
|
||||
tp->rx_opt.saw_tstamp = 0;
|
||||
queued = tcp_rcv_synsent_state_process(sk, skb, th);
|
||||
if (queued >= 0)
|
||||
return queued;
|
||||
@ -5849,6 +5848,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
|
||||
return 0;
|
||||
}
|
||||
|
||||
tp->rx_opt.saw_tstamp = 0;
|
||||
req = tp->fastopen_rsk;
|
||||
if (req) {
|
||||
WARN_ON_ONCE(sk->sk_state != TCP_SYN_RECV &&
|
||||
|
Loading…
Reference in New Issue
Block a user