1
0
Fork 0

[TCP]: Fix __tcp_push_pending_frames() 'nonagle' handling.

'nonagle' should be passed to the tcp_snd_test() function
as 'TCP_NAGLE_PUSH' if we are checking an SKB not at the
tail of the write_queue.  This is because Nagle does not
apply to such frames since we cannot possibly tack more
data onto them.

However, while doing this __tcp_push_pending_frames() makes
all of the packets in the write_queue use this modified
'nonagle' value.

Fix the bug and simplify this function by just calling
tcp_write_xmit() directly if sk_send_head is non-NULL.

As a result, we can now make tcp_data_snd_check() just call
tcp_push_pending_frames() instead of the specialized
__tcp_data_snd_check().

Signed-off-by: David S. Miller <davem@davemloft.net>
hifive-unleashed-5.1
David S. Miller 2005-07-05 15:19:38 -07:00
parent a2e2a59c93
commit 55c97f3e99
3 changed files with 8 additions and 25 deletions

View File

@ -848,7 +848,6 @@ extern __u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb,
/* tcp_output.c */
extern void __tcp_data_snd_check(struct sock *sk, struct sk_buff *skb);
extern void __tcp_push_pending_frames(struct sock *sk, struct tcp_sock *tp,
unsigned int cur_mss, int nonagle);
extern int tcp_may_send_now(struct sock *sk, struct tcp_sock *tp);

View File

@ -3346,12 +3346,9 @@ static inline void tcp_check_space(struct sock *sk)
}
}
static __inline__ void tcp_data_snd_check(struct sock *sk)
static __inline__ void tcp_data_snd_check(struct sock *sk, struct tcp_sock *tp)
{
struct sk_buff *skb = sk->sk_send_head;
if (skb != NULL)
__tcp_data_snd_check(sk, skb);
tcp_push_pending_frames(sk, tp);
tcp_check_space(sk);
}
@ -3645,7 +3642,7 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
*/
tcp_ack(sk, skb, 0);
__kfree_skb(skb);
tcp_data_snd_check(sk);
tcp_data_snd_check(sk, tp);
return 0;
} else { /* Header too small */
TCP_INC_STATS_BH(TCP_MIB_INERRS);
@ -3711,7 +3708,7 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
if (TCP_SKB_CB(skb)->ack_seq != tp->snd_una) {
/* Well, only one small jumplet in fast path... */
tcp_ack(sk, skb, FLAG_DATA);
tcp_data_snd_check(sk);
tcp_data_snd_check(sk, tp);
if (!tcp_ack_scheduled(tp))
goto no_ack;
}
@ -3789,7 +3786,7 @@ step5:
/* step 7: process the segment text */
tcp_data_queue(sk, skb);
tcp_data_snd_check(sk);
tcp_data_snd_check(sk, tp);
tcp_ack_snd_check(sk);
return 0;
@ -4099,7 +4096,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
/* Do step6 onward by hand. */
tcp_urg(sk, skb, th);
__kfree_skb(skb);
tcp_data_snd_check(sk);
tcp_data_snd_check(sk, tp);
return 0;
}
@ -4290,7 +4287,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
/* tcp_data could move socket to TIME-WAIT */
if (sk->sk_state != TCP_CLOSE) {
tcp_data_snd_check(sk);
tcp_data_snd_check(sk, tp);
tcp_ack_snd_check(sk);
}

View File

@ -894,24 +894,11 @@ void __tcp_push_pending_frames(struct sock *sk, struct tcp_sock *tp,
struct sk_buff *skb = sk->sk_send_head;
if (skb) {
if (!tcp_skb_is_last(sk, skb))
nonagle = TCP_NAGLE_PUSH;
if (!tcp_snd_test(sk, skb, cur_mss, nonagle) ||
tcp_write_xmit(sk, cur_mss, nonagle))
if (tcp_write_xmit(sk, cur_mss, nonagle))
tcp_check_probe_timer(sk, tp);
}
}
void __tcp_data_snd_check(struct sock *sk, struct sk_buff *skb)
{
struct tcp_sock *tp = tcp_sk(sk);
if (after(TCP_SKB_CB(skb)->end_seq, tp->snd_una + tp->snd_wnd) ||
tcp_packets_in_flight(tp) >= tp->snd_cwnd ||
tcp_write_xmit(sk, tcp_current_mss(sk, 1), tp->nonagle))
tcp_check_probe_timer(sk, tp);
}
/* This function returns the amount that we can raise the
* usable window based on the following constraints
*