Skip to content
Snippets Groups Projects
tcp_output.c 91.5 KiB
Newer Older
  • Learn to ignore specific revisions
  • Linus Torvalds's avatar
    Linus Torvalds committed
    
    		ato = min(ato, max_ato);
    	}
    
    	/* Stay within the limit we were given */
    	timeout = jiffies + ato;
    
    	/* Use new timeout only if there wasn't a older one earlier. */
    
    	if (icsk->icsk_ack.pending & ICSK_ACK_TIMER) {
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    		/* If delack timer was blocked or is about to expire,
    		 * send ACK now.
    		 */
    
    		if (icsk->icsk_ack.blocked ||
    		    time_before_eq(icsk->icsk_ack.timeout, jiffies + (ato >> 2))) {
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    			tcp_send_ack(sk);
    			return;
    		}
    
    
    		if (!time_before(timeout, icsk->icsk_ack.timeout))
    			timeout = icsk->icsk_ack.timeout;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	}
    
    	icsk->icsk_ack.pending |= ICSK_ACK_SCHED | ICSK_ACK_TIMER;
    	icsk->icsk_ack.timeout = timeout;
    	sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    }
    
    /* This routine sends an ack and also updates the window. */
    void tcp_send_ack(struct sock *sk)
    {
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    
    	/* If we have been reset, we may not send again. */
    	if (sk->sk_state == TCP_CLOSE)
    		return;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    
    	/* We are not putting this on the write queue, so
    	 * tcp_transmit_skb() will set the ownership to this
    	 * sock.
    	 */
    
    	buff = alloc_skb(MAX_TCP_HEADER, sk_gfp_atomic(sk, GFP_ATOMIC));
    
    	if (buff == NULL) {
    		inet_csk_schedule_ack(sk);
    		inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN;
    		inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
    					  TCP_DELACK_MAX, TCP_RTO_MAX);
    		return;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	}
    
    
    	/* Reserve space for headers and prepare control bits. */
    	skb_reserve(buff, MAX_TCP_HEADER);
    
    Changli Gao's avatar
    Changli Gao committed
    	tcp_init_nondata_skb(buff, tcp_acceptable_seq(sk), TCPHDR_ACK);
    
    
    	/* Send it off, this clears delayed acks for us. */
    	TCP_SKB_CB(buff)->when = tcp_time_stamp;
    
    	tcp_transmit_skb(sk, buff, 0, sk_gfp_atomic(sk, GFP_ATOMIC));
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    }
    
    /* This routine sends a packet with an out of date sequence
     * number. It assumes the other end will try to ack it.
     *
     * Question: what should we make while urgent mode?
     * 4.4BSD forces sending single byte of data. We cannot send
     * out of window data, because we have SND.NXT==SND.MAX...
     *
     * Current solution: to send TWO zero-length segments in urgent mode:
     * one is with SEG.SEQ=SND.UNA to deliver urgent pointer, another is
     * out-of-date with SND.UNA-1 to probe window.
     */
    static int tcp_xmit_probe_skb(struct sock *sk, int urgent)
    {
    	struct tcp_sock *tp = tcp_sk(sk);
    	struct sk_buff *skb;
    
    	/* We don't queue it, tcp_transmit_skb() sets ownership. */
    
    	skb = alloc_skb(MAX_TCP_HEADER, sk_gfp_atomic(sk, GFP_ATOMIC));
    
    	if (skb == NULL)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    		return -1;
    
    	/* Reserve space for headers and set control bits. */
    	skb_reserve(skb, MAX_TCP_HEADER);
    	/* Use a previous sequence.  This should cause the other
    	 * end to send an ack.  Don't queue or clone SKB, just
    	 * send it.
    	 */
    
    Changli Gao's avatar
    Changli Gao committed
    	tcp_init_nondata_skb(skb, tp->snd_una - !urgent, TCPHDR_ACK);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	TCP_SKB_CB(skb)->when = tcp_time_stamp;
    
    	return tcp_transmit_skb(sk, skb, 0, GFP_ATOMIC);
    
    void tcp_send_window_probe(struct sock *sk)
    {
    	if (sk->sk_state == TCP_ESTABLISHED) {
    		tcp_sk(sk)->snd_wl1 = tcp_sk(sk)->rcv_nxt - 1;
    
    		tcp_sk(sk)->snd_nxt = tcp_sk(sk)->write_seq;
    
    		tcp_xmit_probe_skb(sk, 0);
    	}
    }
    
    
    /* Initiate keepalive or window probe from timer. */
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    int tcp_write_wakeup(struct sock *sk)
    {
    
    	struct tcp_sock *tp = tcp_sk(sk);
    	struct sk_buff *skb;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    
    	if (sk->sk_state == TCP_CLOSE)
    		return -1;
    
    	if ((skb = tcp_send_head(sk)) != NULL &&
    	    before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp))) {
    		int err;
    
    		unsigned int mss = tcp_current_mss(sk);
    
    		unsigned int seg_size = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
    
    		if (before(tp->pushed_seq, TCP_SKB_CB(skb)->end_seq))
    			tp->pushed_seq = TCP_SKB_CB(skb)->end_seq;
    
    		/* We are probing the opening of a window
    		 * but the window size is != 0
    		 * must have been a result SWS avoidance ( sender )
    		 */
    		if (seg_size < TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq ||
    		    skb->len > mss) {
    			seg_size = min(seg_size, mss);
    
    			TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH;
    
    			if (tcp_fragment(sk, skb, seg_size, mss))
    				return -1;
    		} else if (!tcp_skb_pcount(skb))
    			tcp_set_skb_tso_segs(sk, skb, mss);
    
    
    		TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH;
    
    		TCP_SKB_CB(skb)->when = tcp_time_stamp;
    		err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
    		if (!err)
    			tcp_event_new_data_sent(sk, skb);
    		return err;
    	} else {
    
    		if (between(tp->snd_up, tp->snd_una + 1, tp->snd_una + 0xFFFF))
    
    			tcp_xmit_probe_skb(sk, 1);
    		return tcp_xmit_probe_skb(sk, 0);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	}
    }
    
    /* A window probe timeout has occurred.  If window is not closed send
     * a partial packet else a zero probe.
     */
    void tcp_send_probe0(struct sock *sk)
    {
    
    	struct inet_connection_sock *icsk = inet_csk(sk);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	struct tcp_sock *tp = tcp_sk(sk);
    	int err;
    
    	err = tcp_write_wakeup(sk);
    
    
    	if (tp->packets_out || !tcp_send_head(sk)) {
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    		/* Cancel probe timer, if it is not required. */
    
    		icsk->icsk_backoff = 0;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    		return;
    	}
    
    	if (err <= 0) {
    
    		if (icsk->icsk_backoff < sysctl_tcp_retries2)
    			icsk->icsk_backoff++;
    
    		inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
    
    					  min(icsk->icsk_rto << icsk->icsk_backoff, TCP_RTO_MAX),
    					  TCP_RTO_MAX);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	} else {
    		/* If packet was not sent due to local congestion,
    
    		 * do not backoff and do not remember icsk_probes_out.
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    		 * Let local senders to fight for local resources.
    		 *
    		 * Use accumulated backoff yet.
    		 */
    
    		if (!icsk->icsk_probes_out)
    			icsk->icsk_probes_out = 1;
    
    		inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
    
    					  min(icsk->icsk_rto << icsk->icsk_backoff,
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	}
    }