forked from Minki/linux
tcp: remove Appropriate Byte Count support
TCP Appropriate Byte Count was added by me, but later disabled. There is no point in maintaining it since it is a potential source of bugs and Linux already implements other better window protection heuristics. Signed-off-by: Stephen Hemminger <stephen@networkplumber.org> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
547472b8e1
commit
ca2eb5679f
@ -130,17 +130,6 @@ somaxconn - INTEGER
|
||||
Defaults to 128. See also tcp_max_syn_backlog for additional tuning
|
||||
for TCP sockets.
|
||||
|
||||
tcp_abc - INTEGER
|
||||
Controls Appropriate Byte Count (ABC) defined in RFC3465.
|
||||
ABC is a way of increasing congestion window (cwnd) more slowly
|
||||
in response to partial acknowledgments.
|
||||
Possible values are:
|
||||
0 increase cwnd once per acknowledgment (no ABC)
|
||||
1 increase cwnd once per acknowledgment of full sized segment
|
||||
2 allow increase cwnd by two if acknowledgment is
|
||||
of two segments to compensate for delayed acknowledgments.
|
||||
Default: 0 (off)
|
||||
|
||||
tcp_abort_on_overflow - BOOLEAN
|
||||
If listening service is too slow to accept new connections,
|
||||
reset them. Default state is FALSE. It means that if overflow
|
||||
|
@ -246,7 +246,6 @@ struct tcp_sock {
|
||||
u32 sacked_out; /* SACK'd packets */
|
||||
u32 fackets_out; /* FACK'd packets */
|
||||
u32 tso_deferred;
|
||||
u32 bytes_acked; /* Appropriate Byte Counting - RFC3465 */
|
||||
|
||||
/* from STCP, retrans queue hinting */
|
||||
struct sk_buff* lost_skb_hint;
|
||||
|
@ -279,7 +279,6 @@ extern int sysctl_tcp_dma_copybreak;
|
||||
extern int sysctl_tcp_nometrics_save;
|
||||
extern int sysctl_tcp_moderate_rcvbuf;
|
||||
extern int sysctl_tcp_tso_win_divisor;
|
||||
extern int sysctl_tcp_abc;
|
||||
extern int sysctl_tcp_mtu_probing;
|
||||
extern int sysctl_tcp_base_mss;
|
||||
extern int sysctl_tcp_workaround_signed_windows;
|
||||
|
@ -387,7 +387,6 @@ static const struct bin_table bin_net_ipv4_table[] = {
|
||||
{ CTL_INT, NET_TCP_MODERATE_RCVBUF, "tcp_moderate_rcvbuf" },
|
||||
{ CTL_INT, NET_TCP_TSO_WIN_DIVISOR, "tcp_tso_win_divisor" },
|
||||
{ CTL_STR, NET_TCP_CONG_CONTROL, "tcp_congestion_control" },
|
||||
{ CTL_INT, NET_TCP_ABC, "tcp_abc" },
|
||||
{ CTL_INT, NET_TCP_MTU_PROBING, "tcp_mtu_probing" },
|
||||
{ CTL_INT, NET_TCP_BASE_MSS, "tcp_base_mss" },
|
||||
{ CTL_INT, NET_IPV4_TCP_WORKAROUND_SIGNED_WINDOWS, "tcp_workaround_signed_windows" },
|
||||
|
@ -632,13 +632,6 @@ static struct ctl_table ipv4_table[] = {
|
||||
.maxlen = TCP_CA_NAME_MAX,
|
||||
.proc_handler = proc_tcp_congestion_control,
|
||||
},
|
||||
{
|
||||
.procname = "tcp_abc",
|
||||
.data = &sysctl_tcp_abc,
|
||||
.maxlen = sizeof(int),
|
||||
.mode = 0644,
|
||||
.proc_handler = proc_dointvec,
|
||||
},
|
||||
{
|
||||
.procname = "tcp_mtu_probing",
|
||||
.data = &sysctl_tcp_mtu_probing,
|
||||
|
@ -2289,7 +2289,6 @@ int tcp_disconnect(struct sock *sk, int flags)
|
||||
tp->packets_out = 0;
|
||||
tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
|
||||
tp->snd_cwnd_cnt = 0;
|
||||
tp->bytes_acked = 0;
|
||||
tp->window_clamp = 0;
|
||||
tcp_set_ca_state(sk, TCP_CA_Open);
|
||||
tcp_clear_retrans(tp);
|
||||
|
@ -317,28 +317,11 @@ void tcp_slow_start(struct tcp_sock *tp)
|
||||
snd_cwnd = 1U;
|
||||
}
|
||||
|
||||
/* RFC3465: ABC Slow start
|
||||
* Increase only after a full MSS of bytes is acked
|
||||
*
|
||||
* TCP sender SHOULD increase cwnd by the number of
|
||||
* previously unacknowledged bytes ACKed by each incoming
|
||||
* acknowledgment, provided the increase is not more than L
|
||||
*/
|
||||
if (sysctl_tcp_abc && tp->bytes_acked < tp->mss_cache)
|
||||
return;
|
||||
|
||||
if (sysctl_tcp_max_ssthresh > 0 && tp->snd_cwnd > sysctl_tcp_max_ssthresh)
|
||||
cnt = sysctl_tcp_max_ssthresh >> 1; /* limited slow start */
|
||||
else
|
||||
cnt = snd_cwnd; /* exponential increase */
|
||||
|
||||
/* RFC3465: ABC
|
||||
* We MAY increase by 2 if discovered delayed ack
|
||||
*/
|
||||
if (sysctl_tcp_abc > 1 && tp->bytes_acked >= 2*tp->mss_cache)
|
||||
cnt <<= 1;
|
||||
tp->bytes_acked = 0;
|
||||
|
||||
tp->snd_cwnd_cnt += cnt;
|
||||
while (tp->snd_cwnd_cnt >= snd_cwnd) {
|
||||
tp->snd_cwnd_cnt -= snd_cwnd;
|
||||
@ -378,20 +361,9 @@ void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
|
||||
/* In "safe" area, increase. */
|
||||
if (tp->snd_cwnd <= tp->snd_ssthresh)
|
||||
tcp_slow_start(tp);
|
||||
|
||||
/* In dangerous area, increase slowly. */
|
||||
else if (sysctl_tcp_abc) {
|
||||
/* RFC3465: Appropriate Byte Count
|
||||
* increase once for each full cwnd acked
|
||||
*/
|
||||
if (tp->bytes_acked >= tp->snd_cwnd*tp->mss_cache) {
|
||||
tp->bytes_acked -= tp->snd_cwnd*tp->mss_cache;
|
||||
if (tp->snd_cwnd < tp->snd_cwnd_clamp)
|
||||
tp->snd_cwnd++;
|
||||
}
|
||||
} else {
|
||||
else
|
||||
tcp_cong_avoid_ai(tp, tp->snd_cwnd);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(tcp_reno_cong_avoid);
|
||||
|
||||
|
@ -98,7 +98,6 @@ int sysctl_tcp_frto_response __read_mostly;
|
||||
int sysctl_tcp_thin_dupack __read_mostly;
|
||||
|
||||
int sysctl_tcp_moderate_rcvbuf __read_mostly = 1;
|
||||
int sysctl_tcp_abc __read_mostly;
|
||||
int sysctl_tcp_early_retrans __read_mostly = 2;
|
||||
|
||||
#define FLAG_DATA 0x01 /* Incoming frame contained data. */
|
||||
@ -2007,7 +2006,6 @@ static void tcp_enter_frto_loss(struct sock *sk, int allowed_segments, int flag)
|
||||
tp->snd_cwnd_cnt = 0;
|
||||
tp->snd_cwnd_stamp = tcp_time_stamp;
|
||||
tp->frto_counter = 0;
|
||||
tp->bytes_acked = 0;
|
||||
|
||||
tp->reordering = min_t(unsigned int, tp->reordering,
|
||||
sysctl_tcp_reordering);
|
||||
@ -2056,7 +2054,6 @@ void tcp_enter_loss(struct sock *sk, int how)
|
||||
tp->snd_cwnd_cnt = 0;
|
||||
tp->snd_cwnd_stamp = tcp_time_stamp;
|
||||
|
||||
tp->bytes_acked = 0;
|
||||
tcp_clear_retrans_partial(tp);
|
||||
|
||||
if (tcp_is_reno(tp))
|
||||
@ -2684,7 +2681,6 @@ static void tcp_init_cwnd_reduction(struct sock *sk, const bool set_ssthresh)
|
||||
struct tcp_sock *tp = tcp_sk(sk);
|
||||
|
||||
tp->high_seq = tp->snd_nxt;
|
||||
tp->bytes_acked = 0;
|
||||
tp->snd_cwnd_cnt = 0;
|
||||
tp->prior_cwnd = tp->snd_cwnd;
|
||||
tp->prr_delivered = 0;
|
||||
@ -2735,7 +2731,6 @@ void tcp_enter_cwr(struct sock *sk, const int set_ssthresh)
|
||||
struct tcp_sock *tp = tcp_sk(sk);
|
||||
|
||||
tp->prior_ssthresh = 0;
|
||||
tp->bytes_acked = 0;
|
||||
if (inet_csk(sk)->icsk_ca_state < TCP_CA_CWR) {
|
||||
tp->undo_marker = 0;
|
||||
tcp_init_cwnd_reduction(sk, set_ssthresh);
|
||||
@ -3417,7 +3412,6 @@ static void tcp_conservative_spur_to_response(struct tcp_sock *tp)
|
||||
{
|
||||
tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_ssthresh);
|
||||
tp->snd_cwnd_cnt = 0;
|
||||
tp->bytes_acked = 0;
|
||||
TCP_ECN_queue_cwr(tp);
|
||||
tcp_moderate_cwnd(tp);
|
||||
}
|
||||
@ -3609,15 +3603,6 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
|
||||
if (after(ack, prior_snd_una))
|
||||
flag |= FLAG_SND_UNA_ADVANCED;
|
||||
|
||||
if (sysctl_tcp_abc) {
|
||||
if (icsk->icsk_ca_state < TCP_CA_CWR)
|
||||
tp->bytes_acked += ack - prior_snd_una;
|
||||
else if (icsk->icsk_ca_state == TCP_CA_Loss)
|
||||
/* we assume just one segment left network */
|
||||
tp->bytes_acked += min(ack - prior_snd_una,
|
||||
tp->mss_cache);
|
||||
}
|
||||
|
||||
prior_fackets = tp->fackets_out;
|
||||
prior_in_flight = tcp_packets_in_flight(tp);
|
||||
|
||||
|
@ -446,7 +446,6 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
|
||||
*/
|
||||
newtp->snd_cwnd = TCP_INIT_CWND;
|
||||
newtp->snd_cwnd_cnt = 0;
|
||||
newtp->bytes_acked = 0;
|
||||
|
||||
newtp->frto_counter = 0;
|
||||
newtp->frto_highmark = 0;
|
||||
|
Loading…
Reference in New Issue
Block a user