forked from Minki/linux
tcp: increment sk_drops for listeners
Goal: packets dropped by a listener are accounted for. This adds tcp_listendrop() helper, and clears sk_drops in sk_clone_lock() so that children do not inherit their parent drop count. Note that we no longer increment LINUX_MIB_LISTENDROPS counter when sending a SYNCOOKIE, since the SYN packet generated a SYNACK. We already have a separate LINUX_MIB_SYNCOOKIESSENT Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
532182cd61
commit
9caad86415
@ -1836,4 +1836,17 @@ static inline void tcp_segs_in(struct tcp_sock *tp, const struct sk_buff *skb)
|
||||
tp->data_segs_in += segs_in;
|
||||
}
|
||||
|
||||
/*
|
||||
* TCP listen path runs lockless.
|
||||
* We forced "struct sock" to be const qualified to make sure
|
||||
* we don't modify one of its field by mistake.
|
||||
* Here, we increment sk_drops which is an atomic_t, so we can safely
|
||||
* make sock writable again.
|
||||
*/
|
||||
static inline void tcp_listendrop(const struct sock *sk)
|
||||
{
|
||||
atomic_inc(&((struct sock *)sk)->sk_drops);
|
||||
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
|
||||
}
|
||||
|
||||
#endif /* _TCP_H */
|
||||
|
@ -1525,6 +1525,7 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
|
||||
newsk->sk_dst_cache = NULL;
|
||||
newsk->sk_wmem_queued = 0;
|
||||
newsk->sk_forward_alloc = 0;
|
||||
atomic_set(&newsk->sk_drops, 0);
|
||||
newsk->sk_send_head = NULL;
|
||||
newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK;
|
||||
|
||||
|
@ -6339,8 +6339,10 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
|
||||
inet_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
|
||||
af_ops->send_synack(sk, dst, &fl, req,
|
||||
&foc, !want_cookie);
|
||||
if (want_cookie)
|
||||
goto drop_and_free;
|
||||
if (want_cookie) {
|
||||
reqsk_free(req);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
reqsk_put(req);
|
||||
return 0;
|
||||
@ -6350,7 +6352,7 @@ drop_and_release:
|
||||
drop_and_free:
|
||||
reqsk_free(req);
|
||||
drop:
|
||||
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
|
||||
tcp_listendrop(sk);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(tcp_conn_request);
|
||||
|
@ -329,7 +329,7 @@ void tcp_req_err(struct sock *sk, u32 seq, bool abort)
|
||||
* errors returned from accept().
|
||||
*/
|
||||
inet_csk_reqsk_queue_drop(req->rsk_listener, req);
|
||||
NET_INC_STATS_BH(net, LINUX_MIB_LISTENDROPS);
|
||||
tcp_listendrop(req->rsk_listener);
|
||||
}
|
||||
reqsk_put(req);
|
||||
}
|
||||
@ -1246,7 +1246,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
|
||||
&tcp_request_sock_ipv4_ops, sk, skb);
|
||||
|
||||
drop:
|
||||
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
|
||||
tcp_listendrop(sk);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(tcp_v4_conn_request);
|
||||
@ -1348,7 +1348,7 @@ exit_overflow:
|
||||
exit_nonewsk:
|
||||
dst_release(dst);
|
||||
exit:
|
||||
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
|
||||
tcp_listendrop(sk);
|
||||
return NULL;
|
||||
put_and_exit:
|
||||
inet_csk_prepare_forced_close(newsk);
|
||||
|
@ -964,7 +964,7 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
|
||||
&tcp_request_sock_ipv6_ops, sk, skb);
|
||||
|
||||
drop:
|
||||
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
|
||||
tcp_listendrop(sk);
|
||||
return 0; /* don't send reset */
|
||||
}
|
||||
|
||||
@ -1169,7 +1169,7 @@ out_overflow:
|
||||
out_nonewsk:
|
||||
dst_release(dst);
|
||||
out:
|
||||
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
|
||||
tcp_listendrop(sk);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user