1
0
Fork 0

tcp: fix cwnd undo in Reno and HTCP congestion controls

Using ssthresh to revert cwnd is less reliable when ssthresh is
bounded to 2 packets. This patch uses an existing variable in TCP
"prior_cwnd" that snapshots the cwnd right before entering fast
recovery and RTO recovery in Reno.  This fixes the issue discussed
in netdev thread: "A buggy behavior for Linux TCP Reno and HTCP"
https://www.spinics.net/lists/netdev/msg444955.html

Suggested-by: Neal Cardwell <ncardwell@google.com>
Reported-by: Wei Sun <unlcsewsun@gmail.com>
Signed-off-by: Yuchung Cheng <ncardwell@google.com>
Signed-off-by: Neal Cardwell <ncardwell@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
zero-colors
Yuchung Cheng 2017-08-03 20:38:51 -07:00 committed by David S. Miller
parent 10377ba767
commit 4faf783998
4 changed files with 4 additions and 4 deletions

View File

@ -258,7 +258,7 @@ struct tcp_sock {
u32 snd_cwnd_clamp; /* Do not allow snd_cwnd to grow above this */ u32 snd_cwnd_clamp; /* Do not allow snd_cwnd to grow above this */
u32 snd_cwnd_used; u32 snd_cwnd_used;
u32 snd_cwnd_stamp; u32 snd_cwnd_stamp;
u32 prior_cwnd; /* Congestion window at start of Recovery. */ u32 prior_cwnd; /* cwnd right before starting loss recovery */
u32 prr_delivered; /* Number of newly delivered packets to u32 prr_delivered; /* Number of newly delivered packets to
* receiver in Recovery. */ * receiver in Recovery. */
u32 prr_out; /* Total number of pkts sent during Recovery. */ u32 prr_out; /* Total number of pkts sent during Recovery. */

View File

@ -456,7 +456,7 @@ u32 tcp_reno_undo_cwnd(struct sock *sk)
{ {
const struct tcp_sock *tp = tcp_sk(sk); const struct tcp_sock *tp = tcp_sk(sk);
return max(tp->snd_cwnd, tp->snd_ssthresh << 1); return max(tp->snd_cwnd, tp->prior_cwnd);
} }
EXPORT_SYMBOL_GPL(tcp_reno_undo_cwnd); EXPORT_SYMBOL_GPL(tcp_reno_undo_cwnd);

View File

@ -66,7 +66,6 @@ static inline void htcp_reset(struct htcp *ca)
static u32 htcp_cwnd_undo(struct sock *sk) static u32 htcp_cwnd_undo(struct sock *sk)
{ {
const struct tcp_sock *tp = tcp_sk(sk);
struct htcp *ca = inet_csk_ca(sk); struct htcp *ca = inet_csk_ca(sk);
if (ca->undo_last_cong) { if (ca->undo_last_cong) {
@ -76,7 +75,7 @@ static u32 htcp_cwnd_undo(struct sock *sk)
ca->undo_last_cong = 0; ca->undo_last_cong = 0;
} }
return max(tp->snd_cwnd, (tp->snd_ssthresh << 7) / ca->beta); return tcp_reno_undo_cwnd(sk);
} }
static inline void measure_rtt(struct sock *sk, u32 srtt) static inline void measure_rtt(struct sock *sk, u32 srtt)

View File

@ -1950,6 +1950,7 @@ void tcp_enter_loss(struct sock *sk)
!after(tp->high_seq, tp->snd_una) || !after(tp->high_seq, tp->snd_una) ||
(icsk->icsk_ca_state == TCP_CA_Loss && !icsk->icsk_retransmits)) { (icsk->icsk_ca_state == TCP_CA_Loss && !icsk->icsk_retransmits)) {
tp->prior_ssthresh = tcp_current_ssthresh(sk); tp->prior_ssthresh = tcp_current_ssthresh(sk);
tp->prior_cwnd = tp->snd_cwnd;
tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk); tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk);
tcp_ca_event(sk, CA_EVENT_LOSS); tcp_ca_event(sk, CA_EVENT_LOSS);
tcp_init_undo(tp); tcp_init_undo(tp);