Skip to content
Snippets Groups Projects
tcp_input.c 156 KiB
Newer Older
  • Learn to ignore specific revisions
  • 	       tcp_skb_timedout(sk, tcp_write_queue_head(sk));
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    }
    
    /* Linux NewReno/SACK/FACK/ECN state machine.
     * --------------------------------------
     *
     * "Open"	Normal state, no dubious events, fast path.
     * "Disorder"   In all the respects it is "Open",
     *		but requires a bit more attention. It is entered when
     *		we see some SACKs or dupacks. It is split of "Open"
     *		mainly to move some processing from fast path to slow one.
     * "CWR"	CWND was reduced due to some Congestion Notification event.
     *		It can be ECN, ICMP source quench, local device congestion.
     * "Recovery"	CWND was reduced, we are fast-retransmitting.
     * "Loss"	CWND was reduced due to RTO timeout or SACK reneging.
     *
     * tcp_fastretrans_alert() is entered:
     * - each incoming ACK, if state is not "Open"
     * - when arrived ACK is unusual, namely:
     *	* SACK
     *	* Duplicate ACK.
     *	* ECN ECE.
     *
     * Counting packets in flight is pretty simple.
     *
     *	in_flight = packets_out - left_out + retrans_out
     *
     *	packets_out is SND.NXT-SND.UNA counted in packets.
     *
     *	retrans_out is number of retransmitted segments.
     *
     *	left_out is number of segments left network, but not ACKed yet.
     *
     *		left_out = sacked_out + lost_out
     *
     *     sacked_out: Packets, which arrived to receiver out of order
     *		   and hence not ACKed. With SACKs this number is simply
     *		   amount of SACKed data. Even without SACKs
     *		   it is easy to give pretty reliable estimate of this number,
     *		   counting duplicate ACKs.
     *
     *       lost_out: Packets lost by network. TCP has no explicit
     *		   "loss notification" feedback from network (for now).
     *		   It means that this number can be only _guessed_.
     *		   Actually, it is the heuristics to predict lossage that
     *		   distinguishes different algorithms.
     *
     *	F.e. after RTO, when all the queue is considered as lost,
     *	lost_out = packets_out and in_flight = retrans_out.
     *
     *		Essentially, we have now two algorithms counting
     *		lost packets.
     *
     *		FACK: It is the simplest heuristics. As soon as we decided
     *		that something is lost, we decide that _all_ not SACKed
     *		packets until the most forward SACK are lost. I.e.
     *		lost_out = fackets_out - sacked_out and left_out = fackets_out.
     *		It is absolutely correct estimate, if network does not reorder
     *		packets. And it loses any connection to reality when reordering
     *		takes place. We use FACK by default until reordering
     *		is suspected on the path to this destination.
     *
     *		NewReno: when Recovery is entered, we assume that one segment
     *		is lost (classic Reno). While we are in Recovery and
     *		a partial ACK arrives, we assume that one more packet
     *		is lost (NewReno). This heuristics are the same in NewReno
     *		and SACK.
     *
     *  Imagine, that's all! Forget about all this shamanism about CWND inflation
     *  deflation etc. CWND is real congestion window, never inflated, changes
     *  only according to classic VJ rules.
     *
     * Really tricky (and requiring careful tuning) part of algorithm
     * is hidden in functions tcp_time_to_recover() and tcp_xmit_retransmit_queue().
     * The first determines the moment _when_ we should reduce CWND and,
     * hence, slow down forward transmission. In fact, it determines the moment
     * when we decide that hole is caused by loss, rather than by a reorder.
     *
     * tcp_xmit_retransmit_queue() decides, _what_ we should retransmit to fill
     * holes, caused by lost packets.
     *
     * And the most logically complicated part of algorithm is undo
     * heuristics. We detect false retransmits due to both too early
     * fast retransmit (reordering) and underestimated RTO, analyzing
     * timestamps and D-SACKs. When we detect that some segments were
     * retransmitted by mistake and CWND reduction was wrong, we undo
     * window reduction and abort recovery phase. This logic is hidden
     * inside several functions named tcp_try_undo_<something>.
     */
    
    /* This function decides, when we should leave Disordered state
     * and enter Recovery phase, reducing congestion window.
     *
     * Main question: may we further continue forward transmission
     * with the same cwnd?
     */
    
    static int tcp_time_to_recover(struct sock *sk)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    {
    
    	struct tcp_sock *tp = tcp_sk(sk);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	__u32 packets_out;
    
    
    	/* Do not perform any recovery during F-RTO algorithm */
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	/* Trick#1: The loss is proven. */
    	if (tp->lost_out)
    		return 1;
    
    	/* Not-A-Trick#2 : Classic rule... */
    
    	if (tcp_dupack_heurestics(tp) > tp->reordering)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    		return 1;
    
    	/* Trick#3 : when we use RFC2988 timer restart, fast
    	 * retransmit can be triggered by timeout of queue head.
    	 */
    
    	if (tcp_is_fack(tp) && tcp_head_timedout(sk))
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    		return 1;
    
    	/* Trick#4: It is still not OK... But will it be useful to delay
    	 * recovery more?
    	 */
    	packets_out = tp->packets_out;
    	if (packets_out <= tp->reordering &&
    	    tp->sacked_out >= max_t(__u32, packets_out/2, sysctl_tcp_reordering) &&
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    		/* We have nothing to send. This connection is limited
    		 * either by receiver window or by application.
    		 */
    		return 1;
    	}
    
    	return 0;
    }
    
    
    /* RFC: This is from the original, I doubt that this is necessary at all:
     * clear xmit_retrans hint if seq of this skb is beyond hint. How could we
     * retransmitted past LOST markings in the first place? I'm not fully sure
     * about undo and end of connection cases, which can cause R without L?
     */
    
    static void tcp_verify_retransmit_hint(struct tcp_sock *tp, struct sk_buff *skb)
    
    {
    	if ((tp->retransmit_skb_hint != NULL) &&
    	    before(TCP_SKB_CB(skb)->seq,
    
    		   TCP_SKB_CB(tp->retransmit_skb_hint)->seq))
    
    		tp->retransmit_skb_hint = NULL;
    
    /* Mark head of queue up as lost. With RFC3517 SACK, the packets is
     * is against sacked "cnt", otherwise it's against facked "cnt"
     */
    
    static void tcp_mark_head_lost(struct sock *sk, int packets)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    {
    
    	struct tcp_sock *tp = tcp_sk(sk);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	struct sk_buff *skb;
    
    	int cnt, oldcnt;
    	int err;
    	unsigned int mss;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    
    	BUG_TRAP(packets <= tp->packets_out);
    	if (tp->lost_skb_hint) {
    		skb = tp->lost_skb_hint;
    		cnt = tp->lost_cnt_hint;
    	} else {
    
    		skb = tcp_write_queue_head(sk);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    
    	tcp_for_write_queue_from(skb, sk) {
    		if (skb == tcp_send_head(sk))
    			break;
    
    		/* TODO: do this better */
    		/* this is not the most efficient way to do this... */
    		tp->lost_skb_hint = skb;
    		tp->lost_cnt_hint = cnt;
    
    		if (after(TCP_SKB_CB(skb)->end_seq, tp->high_seq))
    			break;
    
    		oldcnt = cnt;
    
    		if (tcp_is_fack(tp) || tcp_is_reno(tp) ||
    
    		    (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED))
    			cnt += tcp_skb_pcount(skb);
    
    
    		if (cnt > packets) {
    			if (tcp_is_sack(tp) || (oldcnt >= packets))
    				break;
    
    			mss = skb_shinfo(skb)->gso_size;
    			err = tcp_fragment(sk, skb, (packets - oldcnt) * mss, mss);
    			if (err < 0)
    				break;
    			cnt = packets;
    		}
    
    
    		if (!(TCP_SKB_CB(skb)->sacked & (TCPCB_SACKED_ACKED|TCPCB_LOST))) {
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    			TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
    			tp->lost_out += tcp_skb_pcount(skb);
    
    			tcp_verify_retransmit_hint(tp, skb);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    		}
    	}
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    }
    
    /* Account newly detected lost packet(s) */
    
    
    static void tcp_update_scoreboard(struct sock *sk, int fast_rexmit)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    {
    
    	struct tcp_sock *tp = tcp_sk(sk);
    
    
    	} else if (tcp_is_fack(tp)) {
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    		int lost = tp->fackets_out - tp->reordering;
    		if (lost <= 0)
    			lost = 1;
    
    		tcp_mark_head_lost(sk, lost);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	} else {
    
    		int sacked_upto = tp->sacked_out - tp->reordering;
    
    		if (sacked_upto < fast_rexmit)
    			sacked_upto = fast_rexmit;
    		tcp_mark_head_lost(sk, sacked_upto);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	}
    
    	/* New heuristics: it is possible only after we switched
    	 * to restart timer each time when something is ACKed.
    	 * Hence, we can detect timed out packets during fast
    	 * retransmit without falling to slow start.
    	 */
    
    	if (tcp_is_fack(tp) && tcp_head_timedout(sk)) {
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    		struct sk_buff *skb;
    
    
    		skb = tp->scoreboard_skb_hint ? tp->scoreboard_skb_hint
    
    			: tcp_write_queue_head(sk);
    
    		tcp_for_write_queue_from(skb, sk) {
    			if (skb == tcp_send_head(sk))
    				break;
    
    			if (!tcp_skb_timedout(sk, skb))
    				break;
    
    
    			if (!(TCP_SKB_CB(skb)->sacked & (TCPCB_SACKED_ACKED|TCPCB_LOST))) {
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    				TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
    				tp->lost_out += tcp_skb_pcount(skb);
    
    				tcp_verify_retransmit_hint(tp, skb);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    			}
    		}
    
    
    		tp->scoreboard_skb_hint = skb;
    
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	}
    }
    
    /* CWND moderation, preventing bursts due to too big ACKs
     * in dubious situations.
     */
    static inline void tcp_moderate_cwnd(struct tcp_sock *tp)
    {
    	tp->snd_cwnd = min(tp->snd_cwnd,
    
    			   tcp_packets_in_flight(tp) + tcp_max_burst(tp));
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	tp->snd_cwnd_stamp = tcp_time_stamp;
    }
    
    
    /* Lower bound on congestion window is slow start threshold
     * unless congestion avoidance choice decides to overide it.
     */
    static inline u32 tcp_cwnd_min(const struct sock *sk)
    {
    	const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops;
    
    	return ca_ops->min_cwnd ? ca_ops->min_cwnd(sk) : tcp_sk(sk)->snd_ssthresh;
    }
    
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    /* Decrease cwnd each second ack. */
    
    static void tcp_cwnd_down(struct sock *sk, int flag)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    {
    
    	struct tcp_sock *tp = tcp_sk(sk);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	int decr = tp->snd_cwnd_cnt + 1;
    
    
    	if ((flag & (FLAG_ANY_PROGRESS | FLAG_DSACKING_ACK)) ||
    	    (tcp_is_reno(tp) && !(flag & FLAG_NOT_DUP))) {
    		tp->snd_cwnd_cnt = decr & 1;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    
    		if (decr && tp->snd_cwnd > tcp_cwnd_min(sk))
    			tp->snd_cwnd -= decr;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    
    		tp->snd_cwnd = min(tp->snd_cwnd, tcp_packets_in_flight(tp) + 1);
    
    		tp->snd_cwnd_stamp = tcp_time_stamp;
    	}
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    }
    
    /* Nothing was retransmitted or returned timestamp is less
     * than timestamp of the first retransmission.
     */
    static inline int tcp_packet_delayed(struct tcp_sock *tp)
    {
    	return !tp->retrans_stamp ||
    		(tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr &&
    		 (__s32)(tp->rx_opt.rcv_tsecr - tp->retrans_stamp) < 0);
    }
    
    /* Undo procedures. */
    
    #if FASTRETRANS_DEBUG > 1
    
    static void DBGUNDO(struct sock *sk, const char *msg)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    {
    
    	struct tcp_sock *tp = tcp_sk(sk);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	struct inet_sock *inet = inet_sk(sk);
    
    	if (sk->sk_family == AF_INET) {
    		printk(KERN_DEBUG "Undo %s " NIPQUAD_FMT "/%u c%u l%u ss%u/%u p%u\n",
    		       msg,
    		       NIPQUAD(inet->daddr), ntohs(inet->dport),
    		       tp->snd_cwnd, tcp_left_out(tp),
    		       tp->snd_ssthresh, tp->prior_ssthresh,
    		       tp->packets_out);
    	}
    #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
    	else if (sk->sk_family == AF_INET6) {
    		struct ipv6_pinfo *np = inet6_sk(sk);
    		printk(KERN_DEBUG "Undo %s " NIP6_FMT "/%u c%u l%u ss%u/%u p%u\n",
    		       msg,
    		       NIP6(np->daddr), ntohs(inet->dport),
    		       tp->snd_cwnd, tcp_left_out(tp),
    		       tp->snd_ssthresh, tp->prior_ssthresh,
    		       tp->packets_out);
    	}
    #endif
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    }
    #else
    #define DBGUNDO(x...) do { } while (0)
    #endif
    
    
    static void tcp_undo_cwr(struct sock *sk, const int undo)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    {
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	if (tp->prior_ssthresh) {
    
    		const struct inet_connection_sock *icsk = inet_csk(sk);
    
    		if (icsk->icsk_ca_ops->undo_cwnd)
    			tp->snd_cwnd = icsk->icsk_ca_ops->undo_cwnd(sk);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    		else
    
    			tp->snd_cwnd = max(tp->snd_cwnd, tp->snd_ssthresh << 1);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    		if (undo && tp->prior_ssthresh > tp->snd_ssthresh) {
    			tp->snd_ssthresh = tp->prior_ssthresh;
    			TCP_ECN_withdraw_cwr(tp);
    		}
    	} else {
    		tp->snd_cwnd = max(tp->snd_cwnd, tp->snd_ssthresh);
    	}
    	tcp_moderate_cwnd(tp);
    	tp->snd_cwnd_stamp = tcp_time_stamp;
    
    
    	/* There is something screwy going on with the retrans hints after
    	   an undo */
    
    	tcp_clear_all_retrans_hints(tp);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    }
    
    static inline int tcp_may_undo(struct tcp_sock *tp)
    {
    
    	return tp->undo_marker && (!tp->undo_retrans || tcp_packet_delayed(tp));
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    }
    
    /* People celebrate: "We love our President!" */
    
    static int tcp_try_undo_recovery(struct sock *sk)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    {
    
    	struct tcp_sock *tp = tcp_sk(sk);
    
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	if (tcp_may_undo(tp)) {
    		/* Happy end! We did not retransmit anything
    		 * or our original transmission succeeded.
    		 */
    
    		DBGUNDO(sk, inet_csk(sk)->icsk_ca_state == TCP_CA_Loss ? "loss" : "retrans");
    
    		tcp_undo_cwr(sk, 1);
    		if (inet_csk(sk)->icsk_ca_state == TCP_CA_Loss)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    			NET_INC_STATS_BH(LINUX_MIB_TCPLOSSUNDO);
    		else
    			NET_INC_STATS_BH(LINUX_MIB_TCPFULLUNDO);
    		tp->undo_marker = 0;
    	}
    
    	if (tp->snd_una == tp->high_seq && tcp_is_reno(tp)) {
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    		/* Hold old state until something *above* high_seq
    		 * is ACKed. For Reno it is MUST to prevent false
    		 * fast retransmits (RFC2582). SACK TCP is safe. */
    		tcp_moderate_cwnd(tp);
    		return 1;
    	}
    
    	tcp_set_ca_state(sk, TCP_CA_Open);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	return 0;
    }
    
    /* Try to undo cwnd reduction, because D-SACKs acked all retransmitted data */
    
    static void tcp_try_undo_dsack(struct sock *sk)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    {
    
    	struct tcp_sock *tp = tcp_sk(sk);
    
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	if (tp->undo_marker && !tp->undo_retrans) {
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    		tp->undo_marker = 0;
    		NET_INC_STATS_BH(LINUX_MIB_TCPDSACKUNDO);
    	}
    }
    
    /* Undo during fast recovery after partial ACK. */
    
    
    static int tcp_try_undo_partial(struct sock *sk, int acked)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    {
    
    	struct tcp_sock *tp = tcp_sk(sk);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	/* Partial ACK arrived. Force Hoe's retransmit. */
    
    	int failed = tcp_is_reno(tp) || (tcp_fackets_out(tp) > tp->reordering);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    	if (tcp_may_undo(tp)) {
    		/* Plain luck! Hole if filled with delayed
    		 * packet, rather than with a retransmit.
    		 */
    		if (tp->retrans_out == 0)
    			tp->retrans_stamp = 0;
    
    
    		tcp_update_reordering(sk, tcp_fackets_out(tp) + acked, 1);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    		NET_INC_STATS_BH(LINUX_MIB_TCPPARTIALUNDO);
    
    		/* So... Do not make Hoe's retransmit yet.
    		 * If the first packet was delayed, the rest
    		 * ones are most probably delayed as well.
    		 */
    		failed = 0;
    	}
    	return failed;
    }
    
    /* Undo during loss recovery after partial ACK. */
    
    static int tcp_try_undo_loss(struct sock *sk)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    {
    
    	struct tcp_sock *tp = tcp_sk(sk);
    
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	if (tcp_may_undo(tp)) {
    		struct sk_buff *skb;
    
    		tcp_for_write_queue(skb, sk) {
    			if (skb == tcp_send_head(sk))
    				break;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    			TCP_SKB_CB(skb)->sacked &= ~TCPCB_LOST;
    		}
    
    		tcp_clear_all_retrans_hints(tp);
    
    		DBGUNDO(sk, "partial loss");
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    		tp->lost_out = 0;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    		NET_INC_STATS_BH(LINUX_MIB_TCPLOSSUNDO);
    
    		inet_csk(sk)->icsk_retransmits = 0;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    		tp->undo_marker = 0;
    
    			tcp_set_ca_state(sk, TCP_CA_Open);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    		return 1;
    	}
    	return 0;
    }
    
    
    static inline void tcp_complete_cwr(struct sock *sk)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    {
    
    	struct tcp_sock *tp = tcp_sk(sk);
    
    	tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_ssthresh);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	tp->snd_cwnd_stamp = tcp_time_stamp;
    
    	tcp_ca_event(sk, CA_EVENT_COMPLETE_CWR);
    
    static void tcp_try_to_open(struct sock *sk, int flag)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    {
    
    	struct tcp_sock *tp = tcp_sk(sk);
    
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	if (tp->retrans_out == 0)
    		tp->retrans_stamp = 0;
    
    
    	if (flag & FLAG_ECE)
    
    		tcp_enter_cwr(sk, 1);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    
    	if (inet_csk(sk)->icsk_ca_state != TCP_CA_CWR) {
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    		int state = TCP_CA_Open;
    
    
    		if (tcp_left_out(tp) || tp->retrans_out || tp->undo_marker)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    			state = TCP_CA_Disorder;
    
    
    		if (inet_csk(sk)->icsk_ca_state != state) {
    			tcp_set_ca_state(sk, state);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    			tp->high_seq = tp->snd_nxt;
    		}
    		tcp_moderate_cwnd(tp);
    	} else {
    
    		tcp_cwnd_down(sk, flag);
    
    John Heffner's avatar
    John Heffner committed
    static void tcp_mtup_probe_failed(struct sock *sk)
    {
    	struct inet_connection_sock *icsk = inet_csk(sk);
    
    	icsk->icsk_mtup.search_high = icsk->icsk_mtup.probe_size - 1;
    	icsk->icsk_mtup.probe_size = 0;
    }
    
    static void tcp_mtup_probe_success(struct sock *sk, struct sk_buff *skb)
    {
    	struct tcp_sock *tp = tcp_sk(sk);
    	struct inet_connection_sock *icsk = inet_csk(sk);
    
    	/* FIXME: breaks with very large cwnd */
    	tp->prior_ssthresh = tcp_current_ssthresh(sk);
    	tp->snd_cwnd = tp->snd_cwnd *
    		       tcp_mss_to_mtu(sk, tp->mss_cache) /
    		       icsk->icsk_mtup.probe_size;
    	tp->snd_cwnd_cnt = 0;
    	tp->snd_cwnd_stamp = tcp_time_stamp;
    	tp->rcv_ssthresh = tcp_current_ssthresh(sk);
    
    	icsk->icsk_mtup.search_low = icsk->icsk_mtup.probe_size;
    	icsk->icsk_mtup.probe_size = 0;
    	tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
    }
    
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    /* Process an event, which can update packets-in-flight not trivially.
     * Main goal of this function is to calculate new estimate for left_out,
     * taking into account both packets sitting in receiver's buffer and
     * packets lost by network.
     *
     * Besides that it does CWND reduction, when packet loss is detected
     * and changes state of machine.
     *
     * It does _not_ decide what to send, it is made in function
     * tcp_xmit_retransmit_queue().
     */
    
    static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked, int flag)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    {
    
    	struct inet_connection_sock *icsk = inet_csk(sk);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	struct tcp_sock *tp = tcp_sk(sk);
    
    	int is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP));
    	int do_lost = is_dupack || ((flag & FLAG_DATA_SACKED) &&
    
    				    (tcp_fackets_out(tp) > tp->reordering));
    	int fast_rexmit = 0;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    
    	if (WARN_ON(!tp->packets_out && tp->sacked_out))
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    		tp->sacked_out = 0;
    
    	if (WARN_ON(!tp->sacked_out && tp->fackets_out))
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    		tp->fackets_out = 0;
    
    
    	/* Now state machine starts.
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	 * A. ECE, hence prohibit cwnd undoing, the reduction is required. */
    
    	if (flag & FLAG_ECE)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    		tp->prior_ssthresh = 0;
    
    	/* B. In all the states check for reneging SACKs. */
    
    	if (tcp_check_sack_reneging(sk, flag))
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    		return;
    
    	/* C. Process data loss notification, provided it is valid. */
    
    	if (tcp_is_fack(tp) && (flag & FLAG_DATA_LOST) &&
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	    before(tp->snd_una, tp->high_seq) &&
    
    	    icsk->icsk_ca_state != TCP_CA_Open &&
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	    tp->fackets_out > tp->reordering) {
    
    		tcp_mark_head_lost(sk, tp->fackets_out - tp->reordering);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    		NET_INC_STATS_BH(LINUX_MIB_TCPLOSS);
    	}
    
    
    	/* D. Check consistency of the current state. */
    	tcp_verify_left_out(tp);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    	/* E. Check state exit conditions. State can be terminated
    	 *    when high_seq is ACKed. */
    
    	if (icsk->icsk_ca_state == TCP_CA_Open) {
    
    		BUG_TRAP(tp->retrans_out == 0);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    		tp->retrans_stamp = 0;
    	} else if (!before(tp->snd_una, tp->high_seq)) {
    
    		switch (icsk->icsk_ca_state) {
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    		case TCP_CA_Loss:
    
    			if (tcp_try_undo_recovery(sk))
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    				return;
    			break;
    
    		case TCP_CA_CWR:
    			/* CWR is to be held something *above* high_seq
    			 * is ACKed for CWR bit to reach receiver. */
    			if (tp->snd_una != tp->high_seq) {
    
    				tcp_complete_cwr(sk);
    				tcp_set_ca_state(sk, TCP_CA_Open);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    			}
    			break;
    
    		case TCP_CA_Disorder:
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    			if (!tp->undo_marker ||
    			    /* For SACK case do not Open to allow to undo
    			     * catching for all duplicate ACKs. */
    
    			    tcp_is_reno(tp) || tp->snd_una != tp->high_seq) {
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    				tp->undo_marker = 0;
    
    				tcp_set_ca_state(sk, TCP_CA_Open);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    			}
    			break;
    
    		case TCP_CA_Recovery:
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    				tcp_reset_reno_sack(tp);
    
    			if (tcp_try_undo_recovery(sk))
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    				return;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    			break;
    		}
    	}
    
    	/* F. Process state. */
    
    	switch (icsk->icsk_ca_state) {
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	case TCP_CA_Recovery:
    
    		if (!(flag & FLAG_SND_UNA_ADVANCED)) {
    
    			if (tcp_is_reno(tp) && is_dupack)
    
    		} else
    			do_lost = tcp_try_undo_partial(sk, pkts_acked);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    		break;
    	case TCP_CA_Loss:
    
    		if (flag & FLAG_DATA_ACKED)
    
    		if (tcp_is_reno(tp) && flag & FLAG_SND_UNA_ADVANCED)
    			tcp_reset_reno_sack(tp);
    
    		if (!tcp_try_undo_loss(sk)) {
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    			tcp_moderate_cwnd(tp);
    			tcp_xmit_retransmit_queue(sk);
    			return;
    		}
    
    		if (icsk->icsk_ca_state != TCP_CA_Open)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    			return;
    		/* Loss is undone; fall through to processing in Open state. */
    	default:
    
    			if (flag & FLAG_SND_UNA_ADVANCED)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    				tcp_reset_reno_sack(tp);
    			if (is_dupack)
    
    		if (icsk->icsk_ca_state == TCP_CA_Disorder)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    
    		if (!tcp_time_to_recover(sk)) {
    			tcp_try_to_open(sk, flag);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    			return;
    		}
    
    
    John Heffner's avatar
    John Heffner committed
    		/* MTU probe failure: don't reduce cwnd */
    		if (icsk->icsk_ca_state < TCP_CA_CWR &&
    		    icsk->icsk_mtup.probe_size &&
    
    		    tp->snd_una == tp->mtu_probe.probe_seq_start) {
    
    John Heffner's avatar
    John Heffner committed
    			tcp_mtup_probe_failed(sk);
    			/* Restores the reduction we did in tcp_mtup_probe() */
    			tp->snd_cwnd++;
    			tcp_simple_retransmit(sk);
    			return;
    		}
    
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    		/* Otherwise enter Recovery state */
    
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    			NET_INC_STATS_BH(LINUX_MIB_TCPRENORECOVERY);
    		else
    			NET_INC_STATS_BH(LINUX_MIB_TCPSACKRECOVERY);
    
    		tp->high_seq = tp->snd_nxt;
    		tp->prior_ssthresh = 0;
    		tp->undo_marker = tp->snd_una;
    		tp->undo_retrans = tp->retrans_out;
    
    
    		if (icsk->icsk_ca_state < TCP_CA_CWR) {
    
    			if (!(flag & FLAG_ECE))
    
    				tp->prior_ssthresh = tcp_current_ssthresh(sk);
    			tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    			TCP_ECN_queue_cwr(tp);
    		}
    
    
    		tp->bytes_acked = 0;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    		tp->snd_cwnd_cnt = 0;
    
    		tcp_set_ca_state(sk, TCP_CA_Recovery);
    
    	if (do_lost || (tcp_is_fack(tp) && tcp_head_timedout(sk)))
    		tcp_update_scoreboard(sk, fast_rexmit);
    
    	tcp_cwnd_down(sk, flag);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	tcp_xmit_retransmit_queue(sk);
    }
    
    /* Read draft-ietf-tcplw-high-performance before mucking
    
    Stephen Hemminger's avatar
    Stephen Hemminger committed
     * with this code. (Supersedes RFC1323)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
     */
    
    static void tcp_ack_saw_tstamp(struct sock *sk, int flag)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    {
    	/* RTTM Rule: A TSecr value received in a segment is used to
    	 * update the averaged RTT measurement only if the segment
    	 * acknowledges some new data, i.e., only if it advances the
    	 * left edge of the send window.
    	 *
    	 * See draft-ietf-tcplw-high-performance-00, section 3.3.
    	 * 1998/04/10 Andrey V. Savochkin <saw@msu.ru>
    	 *
    	 * Changed: reset backoff as soon as we see the first valid sample.
    
    Stephen Hemminger's avatar
    Stephen Hemminger committed
    	 * If we do not, we get strongly overestimated rto. With timestamps
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	 * samples are accepted even from very old segments: f.e., when rtt=1
    	 * increases to 8, we retransmit 5 times and after 8 seconds delayed
    	 * answer arrives rto becomes 120 seconds! If at least one of segments
    	 * in window is lost... Voila.	 			--ANK (010210)
    	 */
    
    	struct tcp_sock *tp = tcp_sk(sk);
    	const __u32 seq_rtt = tcp_time_stamp - tp->rx_opt.rcv_tsecr;
    
    	tcp_rtt_estimator(sk, seq_rtt);
    
    	tcp_set_rto(sk);
    	inet_csk(sk)->icsk_backoff = 0;
    	tcp_bound_rto(sk);
    
    static void tcp_ack_no_tstamp(struct sock *sk, u32 seq_rtt, int flag)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    {
    	/* We don't have a timestamp. Can only use
    	 * packets that are not retransmitted to determine
    	 * rtt estimates. Also, we must not reset the
    	 * backoff for rto until we get a non-retransmitted
    	 * packet. This allows us to deal with a situation
    	 * where the network delay has increased suddenly.
    	 * I.e. Karn's algorithm. (SIGCOMM '87, p5.)
    	 */
    
    	if (flag & FLAG_RETRANS_DATA_ACKED)
    		return;
    
    
    	tcp_rtt_estimator(sk, seq_rtt);
    
    	tcp_set_rto(sk);
    	inet_csk(sk)->icsk_backoff = 0;
    	tcp_bound_rto(sk);
    
    static inline void tcp_ack_update_rtt(struct sock *sk, const int flag,
    
    				      const s32 seq_rtt)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    {
    
    	const struct tcp_sock *tp = tcp_sk(sk);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	/* Note that peer MAY send zero echo. In this case it is ignored. (rfc1323) */
    	if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr)
    
    		tcp_ack_saw_tstamp(sk, flag);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	else if (seq_rtt >= 0)
    
    		tcp_ack_no_tstamp(sk, seq_rtt, flag);
    
    static void tcp_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    {
    
    	const struct inet_connection_sock *icsk = inet_csk(sk);
    
    	icsk->icsk_ca_ops->cong_avoid(sk, ack, in_flight);
    
    	tcp_sk(sk)->snd_cwnd_stamp = tcp_time_stamp;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    }
    
    /* Restart timer after forward progress on connection.
     * RFC2988 recommends to restart timer to now+rto.
     */
    
    static void tcp_rearm_rto(struct sock *sk)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    {
    
    	struct tcp_sock *tp = tcp_sk(sk);
    
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	if (!tp->packets_out) {
    
    		inet_csk_clear_xmit_timer(sk, ICSK_TIME_RETRANS);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	} else {
    
    		inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
    					  inet_csk(sk)->icsk_rto, TCP_RTO_MAX);
    
    /* If we get here, the whole TSO packet has not been acked. */
    
    static u32 tcp_tso_acked(struct sock *sk, struct sk_buff *skb)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    {
    	struct tcp_sock *tp = tcp_sk(sk);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    
    	BUG_ON(!after(TCP_SKB_CB(skb)->end_seq, tp->snd_una));
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    	packets_acked = tcp_skb_pcount(skb);
    
    	if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq))
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    		return 0;
    	packets_acked -= tcp_skb_pcount(skb);
    
    	if (packets_acked) {
    		BUG_ON(tcp_skb_pcount(skb) == 0);
    
    		BUG_ON(!before(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq));
    
    /* Remove acknowledged frames from the retransmission queue. If our packet
     * is before the ack sequence we can discard it as it's confirmed to have
     * arrived at the other end.
     */
    
    static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    {
    	struct tcp_sock *tp = tcp_sk(sk);
    
    	const struct inet_connection_sock *icsk = inet_csk(sk);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	struct sk_buff *skb;
    
    	u32 now = tcp_time_stamp;
    
    	u32 reord = tp->packets_out;
    
    	ktime_t last_ackt = net_invalid_timestamp();
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    
    	while ((skb = tcp_write_queue_head(sk)) && skb != tcp_send_head(sk)) {
    
    		struct tcp_skb_cb *scb = TCP_SKB_CB(skb);
    
    		u8 sacked = scb->sacked;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    
    		/* Determine how many packets and what bytes were acked, tso and else */
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    		if (after(scb->end_seq, tp->snd_una)) {
    
    			if (tcp_skb_pcount(skb) == 1 ||
    			    !after(tp->snd_una, scb->seq))
    				break;
    
    
    			acked_pcount = tcp_tso_acked(sk, skb);
    			if (!acked_pcount)
    
    				break;
    
    			fully_acked = 0;
    			end_seq = tp->snd_una;
    		} else {
    
    			acked_pcount = tcp_skb_pcount(skb);
    
    John Heffner's avatar
    John Heffner committed
    		/* MTU probing checks */
    
    		if (fully_acked && icsk->icsk_mtup.probe_size &&
    		    !after(tp->mtu_probe.probe_seq_end, scb->end_seq)) {
    			tcp_mtup_probe_success(sk, skb);
    
    		if (sacked & TCPCB_RETRANS) {
    			if (sacked & TCPCB_SACKED_RETRANS)
    
    				tp->retrans_out -= acked_pcount;
    
    			flag |= FLAG_RETRANS_DATA_ACKED;
    			ca_seq_rtt = -1;
    			seq_rtt = -1;
    
    			if ((flag & FLAG_DATA_ACKED) || (acked_pcount > 1))
    
    				flag |= FLAG_NONHEAD_RETRANS_ACKED;
    
    			ca_seq_rtt = now - scb->when;
    			last_ackt = skb->tstamp;
    
    			if (!(sacked & TCPCB_SACKED_ACKED))
    
    				reord = min(pkts_acked, reord);
    
    
    		if (sacked & TCPCB_SACKED_ACKED)
    
    			tp->sacked_out -= acked_pcount;
    
    			tp->lost_out -= acked_pcount;
    
    		if (unlikely(tp->urg_mode && !before(end_seq, tp->snd_up)))
    
    		tp->packets_out -= acked_pcount;
    		pkts_acked += acked_pcount;
    
    		/* Initial outgoing SYN's get put onto the write_queue
    		 * just like anything else we transmit.  It is not
    		 * true data, and if we misinform our callers that
    		 * this ACK acks real data, we will erroneously exit
    		 * connection startup slow start one packet too
    		 * quickly.  This is severely frowned upon behavior.
    		 */
    		if (!(scb->flags & TCPCB_FLAG_SYN)) {
    			flag |= FLAG_DATA_ACKED;
    		} else {
    			flag |= FLAG_SYN_ACKED;
    			tp->retrans_stamp = 0;
    		}
    
    
    		tcp_unlink_write_queue(skb, sk);
    
    		sk_wmem_free_skb(sk, skb);
    
    		tcp_clear_all_retrans_hints(tp);
    
    	if (skb && (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED))
    		flag |= FLAG_SACK_RENEGING;
    
    
    	if (flag & FLAG_ACKED) {
    
    		const struct tcp_congestion_ops *ca_ops
    			= inet_csk(sk)->icsk_ca_ops;
    
    
    		tcp_ack_update_rtt(sk, flag, seq_rtt);
    
    		if (tcp_is_reno(tp)) {
    			tcp_remove_reno_sacks(sk, pkts_acked);
    		} else {
    			/* Non-retransmitted hole got filled? That's reordering */
    			if (reord < prior_fackets)
    				tcp_update_reordering(sk, tp->fackets_out - reord, 0);
    		}
    
    
    		tp->fackets_out -= min(pkts_acked, tp->fackets_out);
    
    		if (ca_ops->pkts_acked) {
    			s32 rtt_us = -1;
    
    			/* Is the ACK triggering packet unambiguous? */
    
    			if (!(flag & FLAG_RETRANS_DATA_ACKED)) {
    
    				/* High resolution needed and available? */
    				if (ca_ops->flags & TCP_CONG_RTT_STAMP &&
    				    !ktime_equal(last_ackt,
    						 net_invalid_timestamp()))
    					rtt_us = ktime_us_delta(ktime_get_real(),
    								last_ackt);
    
    				else if (ca_seq_rtt > 0)
    					rtt_us = jiffies_to_usecs(ca_seq_rtt);
    
    			ca_ops->pkts_acked(sk, pkts_acked, rtt_us);
    		}
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	}
    
    #if FASTRETRANS_DEBUG > 0
    	BUG_TRAP((int)tp->sacked_out >= 0);
    	BUG_TRAP((int)tp->lost_out >= 0);
    	BUG_TRAP((int)tp->retrans_out >= 0);
    
    	if (!tp->packets_out && tcp_is_sack(tp)) {
    
    		icsk = inet_csk(sk);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    		if (tp->lost_out) {
    			printk(KERN_DEBUG "Leak l=%u %d\n",
    
    			       tp->lost_out, icsk->icsk_ca_state);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    			tp->lost_out = 0;
    		}
    		if (tp->sacked_out) {
    			printk(KERN_DEBUG "Leak s=%u %d\n",
    
    			       tp->sacked_out, icsk->icsk_ca_state);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    			tp->sacked_out = 0;
    		}
    		if (tp->retrans_out) {
    			printk(KERN_DEBUG "Leak r=%u %d\n",
    
    			       tp->retrans_out, icsk->icsk_ca_state);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    			tp->retrans_out = 0;
    		}
    	}
    #endif
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    }
    
    static void tcp_ack_probe(struct sock *sk)
    {
    
    	const struct tcp_sock *tp = tcp_sk(sk);
    	struct inet_connection_sock *icsk = inet_csk(sk);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    	/* Was it a usable window open? */
    
    
    	if (!after(TCP_SKB_CB(tcp_send_head(sk))->end_seq, tcp_wnd_end(tp))) {
    
    		icsk->icsk_backoff = 0;
    		inet_csk_clear_xmit_timer(sk, ICSK_TIME_PROBE0);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    		/* Socket must be waked up by subsequent tcp_data_snd_check().
    		 * This function is not for random using!
    		 */
    	} else {
    
    		inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
    
    					  min(icsk->icsk_rto << icsk->icsk_backoff, TCP_RTO_MAX),
    					  TCP_RTO_MAX);
    
    static inline int tcp_ack_is_dubious(const struct sock *sk, const int flag)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    {
    	return (!(flag & FLAG_NOT_DUP) || (flag & FLAG_CA_ALERT) ||
    
    		inet_csk(sk)->icsk_ca_state != TCP_CA_Open);
    
    static inline int tcp_may_raise_cwnd(const struct sock *sk, const int flag)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    {
    
    	const struct tcp_sock *tp = tcp_sk(sk);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	return (!(flag & FLAG_ECE) || tp->snd_cwnd < tp->snd_ssthresh) &&
    
    		!((1 << inet_csk(sk)->icsk_ca_state) & (TCPF_CA_Recovery | TCPF_CA_CWR));
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    }
    
    /* Check that window update is acceptable.
     * The function assumes that snd_una<=ack<=snd_next.
     */
    
    static inline int tcp_may_update_window(const struct tcp_sock *tp,