mirror of
https://github.com/torvalds/linux.git
synced 2024-12-28 13:51:44 +00:00
tcp: do not drop syn_recv on all icmp reports
Petr Novopashenniy reported that ICMP redirects on SYN_RECV sockets
were leading to RST.
This is of course incorrect.
A specific list of ICMP messages should be able to drop a SYN_RECV.
For instance, a REDIRECT on SYN_RECV shall be ignored, as we do
not hold a dst per SYN_RECV pseudo request.
Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=111751
Fixes: 079096f103
("tcp/dccp: install syn_recv requests into ehash table")
Reported-by: Petr Novopashenniy <pety@rusnet.ru>
Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
44c3d0c1c0
commit
9cf7490360
@ -447,7 +447,7 @@ const u8 *tcp_parse_md5sig_option(const struct tcphdr *th);
|
||||
|
||||
void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb);
|
||||
void tcp_v4_mtu_reduced(struct sock *sk);
|
||||
void tcp_req_err(struct sock *sk, u32 seq);
|
||||
void tcp_req_err(struct sock *sk, u32 seq, bool abort);
|
||||
int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb);
|
||||
struct sock *tcp_create_openreq_child(const struct sock *sk,
|
||||
struct request_sock *req,
|
||||
|
@ -311,7 +311,7 @@ static void do_redirect(struct sk_buff *skb, struct sock *sk)
|
||||
|
||||
|
||||
/* handle ICMP messages on TCP_NEW_SYN_RECV request sockets */
|
||||
void tcp_req_err(struct sock *sk, u32 seq)
|
||||
void tcp_req_err(struct sock *sk, u32 seq, bool abort)
|
||||
{
|
||||
struct request_sock *req = inet_reqsk(sk);
|
||||
struct net *net = sock_net(sk);
|
||||
@ -323,7 +323,7 @@ void tcp_req_err(struct sock *sk, u32 seq)
|
||||
|
||||
if (seq != tcp_rsk(req)->snt_isn) {
|
||||
NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
|
||||
} else {
|
||||
} else if (abort) {
|
||||
/*
|
||||
* Still in SYN_RECV, just remove it silently.
|
||||
* There is no good way to pass the error to the newly
|
||||
@ -383,7 +383,12 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
|
||||
}
|
||||
seq = ntohl(th->seq);
|
||||
if (sk->sk_state == TCP_NEW_SYN_RECV)
|
||||
return tcp_req_err(sk, seq);
|
||||
return tcp_req_err(sk, seq,
|
||||
type == ICMP_PARAMETERPROB ||
|
||||
type == ICMP_TIME_EXCEEDED ||
|
||||
(type == ICMP_DEST_UNREACH &&
|
||||
(code == ICMP_NET_UNREACH ||
|
||||
code == ICMP_HOST_UNREACH)));
|
||||
|
||||
bh_lock_sock(sk);
|
||||
/* If too many ICMPs get dropped on busy
|
||||
|
@ -327,6 +327,7 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
|
||||
struct tcp_sock *tp;
|
||||
__u32 seq, snd_una;
|
||||
struct sock *sk;
|
||||
bool fatal;
|
||||
int err;
|
||||
|
||||
sk = __inet6_lookup_established(net, &tcp_hashinfo,
|
||||
@ -345,8 +346,9 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
|
||||
return;
|
||||
}
|
||||
seq = ntohl(th->seq);
|
||||
fatal = icmpv6_err_convert(type, code, &err);
|
||||
if (sk->sk_state == TCP_NEW_SYN_RECV)
|
||||
return tcp_req_err(sk, seq);
|
||||
return tcp_req_err(sk, seq, fatal);
|
||||
|
||||
bh_lock_sock(sk);
|
||||
if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG)
|
||||
@ -400,7 +402,6 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
|
||||
goto out;
|
||||
}
|
||||
|
||||
icmpv6_err_convert(type, code, &err);
|
||||
|
||||
/* Might be for an request_sock */
|
||||
switch (sk->sk_state) {
|
||||
|
Loading…
Reference in New Issue
Block a user