forked from Minki/linux
tcp: adjust rcv_ssthresh according to sk_reserved_mem
When user sets SO_RESERVE_MEM socket option, in order to utilize the reserved memory when in memory pressure state, we adjust rcv_ssthresh according to the available reserved memory for the socket, instead of using 4 * advmss always. Signed-off-by: Wei Wang <weiwan@google.com> Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
ca057051cf
commit
053f368412
@ -1421,6 +1421,17 @@ static inline int tcp_full_space(const struct sock *sk)
|
|||||||
return tcp_win_from_space(sk, READ_ONCE(sk->sk_rcvbuf));
|
return tcp_win_from_space(sk, READ_ONCE(sk->sk_rcvbuf));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline void tcp_adjust_rcv_ssthresh(struct sock *sk)
|
||||||
|
{
|
||||||
|
int unused_mem = sk_unused_reserved_mem(sk);
|
||||||
|
struct tcp_sock *tp = tcp_sk(sk);
|
||||||
|
|
||||||
|
tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U * tp->advmss);
|
||||||
|
if (unused_mem)
|
||||||
|
tp->rcv_ssthresh = max_t(u32, tp->rcv_ssthresh,
|
||||||
|
tcp_win_from_space(sk, unused_mem));
|
||||||
|
}
|
||||||
|
|
||||||
void tcp_cleanup_rbuf(struct sock *sk, int copied);
|
void tcp_cleanup_rbuf(struct sock *sk, int copied);
|
||||||
|
|
||||||
/* We provision sk_rcvbuf around 200% of sk_rcvlowat.
|
/* We provision sk_rcvbuf around 200% of sk_rcvlowat.
|
||||||
|
@ -500,8 +500,11 @@ static void tcp_grow_window(struct sock *sk, const struct sk_buff *skb,
|
|||||||
|
|
||||||
room = min_t(int, tp->window_clamp, tcp_space(sk)) - tp->rcv_ssthresh;
|
room = min_t(int, tp->window_clamp, tcp_space(sk)) - tp->rcv_ssthresh;
|
||||||
|
|
||||||
|
if (room <= 0)
|
||||||
|
return;
|
||||||
|
|
||||||
/* Check #1 */
|
/* Check #1 */
|
||||||
if (room > 0 && !tcp_under_memory_pressure(sk)) {
|
if (!tcp_under_memory_pressure(sk)) {
|
||||||
unsigned int truesize = truesize_adjust(adjust, skb);
|
unsigned int truesize = truesize_adjust(adjust, skb);
|
||||||
int incr;
|
int incr;
|
||||||
|
|
||||||
@ -518,6 +521,11 @@ static void tcp_grow_window(struct sock *sk, const struct sk_buff *skb,
|
|||||||
tp->rcv_ssthresh += min(room, incr);
|
tp->rcv_ssthresh += min(room, incr);
|
||||||
inet_csk(sk)->icsk_ack.quick |= 1;
|
inet_csk(sk)->icsk_ack.quick |= 1;
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
/* Under pressure:
|
||||||
|
* Adjust rcv_ssthresh according to reserved mem
|
||||||
|
*/
|
||||||
|
tcp_adjust_rcv_ssthresh(sk);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -5345,7 +5353,7 @@ static int tcp_prune_queue(struct sock *sk)
|
|||||||
if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
|
if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
|
||||||
tcp_clamp_window(sk);
|
tcp_clamp_window(sk);
|
||||||
else if (tcp_under_memory_pressure(sk))
|
else if (tcp_under_memory_pressure(sk))
|
||||||
tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U * tp->advmss);
|
tcp_adjust_rcv_ssthresh(sk);
|
||||||
|
|
||||||
if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf)
|
if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf)
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -2967,8 +2967,7 @@ u32 __tcp_select_window(struct sock *sk)
|
|||||||
icsk->icsk_ack.quick = 0;
|
icsk->icsk_ack.quick = 0;
|
||||||
|
|
||||||
if (tcp_under_memory_pressure(sk))
|
if (tcp_under_memory_pressure(sk))
|
||||||
tp->rcv_ssthresh = min(tp->rcv_ssthresh,
|
tcp_adjust_rcv_ssthresh(sk);
|
||||||
4U * tp->advmss);
|
|
||||||
|
|
||||||
/* free_space might become our new window, make sure we don't
|
/* free_space might become our new window, make sure we don't
|
||||||
* increase it due to wscale.
|
* increase it due to wscale.
|
||||||
|
Loading…
Reference in New Issue
Block a user