Skip to content
Snippets Groups Projects
tcp_input.c 132 KiB
Newer Older
  • Learn to ignore specific revisions
  • Linus Torvalds's avatar
    Linus Torvalds committed
    /*
     * INET		An implementation of the TCP/IP protocol suite for the LINUX
     *		operating system.  INET is implemented using the  BSD Socket
     *		interface as the means of communication with the user level.
     *
     *		Implementation of the Transmission Control Protocol(TCP).
     *
     * Version:	$Id: tcp_input.c,v 1.243 2002/02/01 22:01:04 davem Exp $
     *
    
     * Authors:	Ross Biro
    
    Linus Torvalds's avatar
    Linus Torvalds committed
     *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
     *		Mark Evans, <evansmp@uhura.aston.ac.uk>
     *		Corey Minyard <wf-rch!minyard@relay.EU.net>
     *		Florian La Roche, <flla@stud.uni-sb.de>
     *		Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
     *		Linus Torvalds, <torvalds@cs.helsinki.fi>
     *		Alan Cox, <gw4pts@gw4pts.ampr.org>
     *		Matthew Dillon, <dillon@apollo.west.oic.com>
     *		Arnt Gulbrandsen, <agulbra@nvg.unit.no>
     *		Jorge Cwik, <jorge@laser.satlink.net>
     */
    
    /*
     * Changes:
     *		Pedro Roque	:	Fast Retransmit/Recovery.
     *					Two receive queues.
     *					Retransmit queue handled by TCP.
     *					Better retransmit timer handling.
     *					New congestion avoidance.
     *					Header prediction.
     *					Variable renaming.
     *
     *		Eric		:	Fast Retransmit.
     *		Randy Scott	:	MSS option defines.
     *		Eric Schenk	:	Fixes to slow start algorithm.
     *		Eric Schenk	:	Yet another double ACK bug.
     *		Eric Schenk	:	Delayed ACK bug fixes.
     *		Eric Schenk	:	Floyd style fast retrans war avoidance.
     *		David S. Miller	:	Don't allow zero congestion window.
     *		Eric Schenk	:	Fix retransmitter so that it sends
     *					next packet on ack of previous packet.
     *		Andi Kleen	:	Moved open_request checking here
     *					and process RSTs for open_requests.
     *		Andi Kleen	:	Better prune_queue, and other fixes.
    
    Stephen Hemminger's avatar
    Stephen Hemminger committed
     *		Andrey Savochkin:	Fix RTT measurements in the presence of
    
    Linus Torvalds's avatar
    Linus Torvalds committed
     *					timestamps.
     *		Andrey Savochkin:	Check sequence numbers correctly when
     *					removing SACKs due to in sequence incoming
     *					data segments.
     *		Andi Kleen:		Make sure we never ack data there is not
     *					enough room for. Also make this condition
     *					a fatal error if it might still happen.
     *		Andi Kleen:		Add tcp_measure_rcv_mss to make 
     *					connections with MSS<min(MTU,ann. MSS)
     *					work without delayed acks. 
     *		Andi Kleen:		Process packets with PSH set in the
     *					fast path.
     *		J Hadi Salim:		ECN support
     *	 	Andrei Gurtov,
     *		Pasi Sarolahti,
     *		Panu Kuhlberg:		Experimental audit of TCP (re)transmission
     *					engine. Lots of bugs are found.
     *		Pasi Sarolahti:		F-RTO for dealing with spurious RTOs
     */
    
    #include <linux/mm.h>
    #include <linux/module.h>
    #include <linux/sysctl.h>
    #include <net/tcp.h>
    #include <net/inet_common.h>
    #include <linux/ipsec.h>
    #include <asm/unaligned.h>
    
    #include <net/netdma.h>
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    
    int sysctl_tcp_timestamps __read_mostly = 1;
    int sysctl_tcp_window_scaling __read_mostly = 1;
    int sysctl_tcp_sack __read_mostly = 1;
    int sysctl_tcp_fack __read_mostly = 1;
    int sysctl_tcp_reordering __read_mostly = TCP_FASTRETRANS_THRESH;
    int sysctl_tcp_ecn __read_mostly;
    int sysctl_tcp_dsack __read_mostly = 1;
    int sysctl_tcp_app_win __read_mostly = 31;
    int sysctl_tcp_adv_win_scale __read_mostly = 2;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    
    int sysctl_tcp_stdurg __read_mostly;
    int sysctl_tcp_rfc1337 __read_mostly;
    int sysctl_tcp_max_orphans __read_mostly = NR_FILE;
    int sysctl_tcp_frto __read_mostly;
    int sysctl_tcp_nometrics_save __read_mostly;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    
    int sysctl_tcp_moderate_rcvbuf __read_mostly = 1;
    int sysctl_tcp_abc __read_mostly;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    #define FLAG_DATA		0x01 /* Incoming frame contained data.		*/
    #define FLAG_WIN_UPDATE		0x02 /* Incoming ACK was a window update.	*/
    #define FLAG_DATA_ACKED		0x04 /* This ACK acknowledged new data.		*/
    #define FLAG_RETRANS_DATA_ACKED	0x08 /* "" "" some of which was retransmitted.	*/
    #define FLAG_SYN_ACKED		0x10 /* This ACK acknowledged SYN.		*/
    #define FLAG_DATA_SACKED	0x20 /* New SACK.				*/
    #define FLAG_ECE		0x40 /* ECE in this ACK				*/
    #define FLAG_DATA_LOST		0x80 /* SACK detected data lossage.		*/
    #define FLAG_SLOWPATH		0x100 /* Do not skip RFC checks for window update.*/
    
    #define FLAG_ACKED		(FLAG_DATA_ACKED|FLAG_SYN_ACKED)
    #define FLAG_NOT_DUP		(FLAG_DATA|FLAG_WIN_UPDATE|FLAG_ACKED)
    #define FLAG_CA_ALERT		(FLAG_DATA_SACKED|FLAG_ECE)
    #define FLAG_FORWARD_PROGRESS	(FLAG_ACKED|FLAG_DATA_SACKED)
    
    #define IsReno(tp) ((tp)->rx_opt.sack_ok == 0)
    #define IsFack(tp) ((tp)->rx_opt.sack_ok & 2)
    #define IsDSack(tp) ((tp)->rx_opt.sack_ok & 4)
    
    #define TCP_REMNANT (TCP_FLAG_FIN|TCP_FLAG_URG|TCP_FLAG_SYN|TCP_FLAG_PSH)
    
    /* Adapt the MSS value used to make delayed ack decision to the 
     * real world.
     */ 
    
    Stephen Hemminger's avatar
    Stephen Hemminger committed
    static void tcp_measure_rcv_mss(struct sock *sk,
    				const struct sk_buff *skb)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    {
    
    	struct inet_connection_sock *icsk = inet_csk(sk);
    	const unsigned int lss = icsk->icsk_ack.last_seg_size; 
    	unsigned int len;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    
    	icsk->icsk_ack.last_seg_size = 0; 
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    	/* skb->len may jitter because of SACKs, even if peer
    	 * sends good full-sized frames.
    	 */
    
    	len = skb_shinfo(skb)->gso_size ?: skb->len;
    
    	if (len >= icsk->icsk_ack.rcv_mss) {
    		icsk->icsk_ack.rcv_mss = len;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	} else {
    		/* Otherwise, we make more careful check taking into account,
    		 * that SACKs block is variable.
    		 *
    		 * "len" is invariant segment length, including TCP header.
    		 */
    		len += skb->data - skb->h.raw;
    		if (len >= TCP_MIN_RCVMSS + sizeof(struct tcphdr) ||
    		    /* If PSH is not set, packet should be
    		     * full sized, provided peer TCP is not badly broken.
    		     * This observation (if it is correct 8)) allows
    		     * to handle super-low mtu links fairly.
    		     */
    		    (len >= TCP_MIN_MSS + sizeof(struct tcphdr) &&
    		     !(tcp_flag_word(skb->h.th)&TCP_REMNANT))) {
    			/* Subtract also invariant (if peer is RFC compliant),
    			 * tcp header plus fixed timestamp option length.
    			 * Resulting "len" is MSS free of SACK jitter.
    			 */
    
    			len -= tcp_sk(sk)->tcp_header_len;
    			icsk->icsk_ack.last_seg_size = len;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    			if (len == lss) {
    
    				icsk->icsk_ack.rcv_mss = len;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    				return;
    			}
    		}
    
    		if (icsk->icsk_ack.pending & ICSK_ACK_PUSHED)
    			icsk->icsk_ack.pending |= ICSK_ACK_PUSHED2;
    
    		icsk->icsk_ack.pending |= ICSK_ACK_PUSHED;
    
    static void tcp_incr_quickack(struct sock *sk)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    {
    
    	struct inet_connection_sock *icsk = inet_csk(sk);
    	unsigned quickacks = tcp_sk(sk)->rcv_wnd / (2 * icsk->icsk_ack.rcv_mss);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    	if (quickacks==0)
    		quickacks=2;
    
    	if (quickacks > icsk->icsk_ack.quick)
    		icsk->icsk_ack.quick = min(quickacks, TCP_MAX_QUICKACKS);
    
    void tcp_enter_quickack_mode(struct sock *sk)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    {
    
    	struct inet_connection_sock *icsk = inet_csk(sk);
    	tcp_incr_quickack(sk);
    	icsk->icsk_ack.pingpong = 0;
    	icsk->icsk_ack.ato = TCP_ATO_MIN;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    }
    
    /* Send ACKs quickly, if "quick" count is not exhausted
     * and the session is not interactive.
     */
    
    
    static inline int tcp_in_quickack_mode(const struct sock *sk)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    {
    
    	const struct inet_connection_sock *icsk = inet_csk(sk);
    	return icsk->icsk_ack.quick && !icsk->icsk_ack.pingpong;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    }
    
    /* Buffer size and advertised window tuning.
     *
     * 1. Tuning sk->sk_sndbuf, when connection enters established state.
     */
    
    static void tcp_fixup_sndbuf(struct sock *sk)
    {
    	int sndmem = tcp_sk(sk)->rx_opt.mss_clamp + MAX_TCP_HEADER + 16 +
    		     sizeof(struct sk_buff);
    
    	if (sk->sk_sndbuf < 3 * sndmem)
    		sk->sk_sndbuf = min(3 * sndmem, sysctl_tcp_wmem[2]);
    }
    
    /* 2. Tuning advertised window (window_clamp, rcv_ssthresh)
     *
     * All tcp_full_space() is split to two parts: "network" buffer, allocated
     * forward and advertised in receiver window (tp->rcv_wnd) and
     * "application buffer", required to isolate scheduling/application
     * latencies from network.
     * window_clamp is maximal advertised window. It can be less than
     * tcp_full_space(), in this case tcp_full_space() - window_clamp
     * is reserved for "application" buffer. The less window_clamp is
     * the smoother our behaviour from viewpoint of network, but the lower
     * throughput and the higher sensitivity of the connection to losses. 8)
     *
     * rcv_ssthresh is more strict window_clamp used at "slow start"
     * phase to predict further behaviour of this connection.
     * It is used for two goals:
     * - to enforce header prediction at sender, even when application
     *   requires some significant "application buffer". It is check #1.
     * - to prevent pruning of receive queue because of misprediction
     *   of receiver window. Check #2.
     *
     * The scheme does not work when sender sends good segments opening
    
    Stephen Hemminger's avatar
    Stephen Hemminger committed
     * window and then starts to feed us spaghetti. But it should work
    
    Linus Torvalds's avatar
    Linus Torvalds committed
     * in common situations. Otherwise, we have to rely on queue collapsing.
     */
    
    /* Slow part of check#2. */
    
    static int __tcp_grow_window(const struct sock *sk, struct tcp_sock *tp,
    			     const struct sk_buff *skb)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    {
    	/* Optimize this! */
    	int truesize = tcp_win_from_space(skb->truesize)/2;
    
    	int window = tcp_win_from_space(sysctl_tcp_rmem[2])/2;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    	while (tp->rcv_ssthresh <= window) {
    		if (truesize <= skb->len)
    
    			return 2 * inet_csk(sk)->icsk_ack.rcv_mss;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    		truesize >>= 1;
    		window >>= 1;
    	}
    	return 0;
    }
    
    
    Stephen Hemminger's avatar
    Stephen Hemminger committed
    static void tcp_grow_window(struct sock *sk, struct tcp_sock *tp,
    			    struct sk_buff *skb)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    {
    	/* Check #1 */
    	if (tp->rcv_ssthresh < tp->window_clamp &&
    	    (int)tp->rcv_ssthresh < tcp_space(sk) &&
    	    !tcp_memory_pressure) {
    		int incr;
    
    		/* Check #2. Increase window, if skb with such overhead
    		 * will fit to rcvbuf in future.
    		 */
    		if (tcp_win_from_space(skb->truesize) <= skb->len)
    			incr = 2*tp->advmss;
    		else
    			incr = __tcp_grow_window(sk, tp, skb);
    
    		if (incr) {
    			tp->rcv_ssthresh = min(tp->rcv_ssthresh + incr, tp->window_clamp);
    
    			inet_csk(sk)->icsk_ack.quick |= 1;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    		}
    	}
    }
    
    /* 3. Tuning rcvbuf, when connection enters established state. */
    
    static void tcp_fixup_rcvbuf(struct sock *sk)
    {
    	struct tcp_sock *tp = tcp_sk(sk);
    	int rcvmem = tp->advmss + MAX_TCP_HEADER + 16 + sizeof(struct sk_buff);
    
    	/* Try to select rcvbuf so that 4 mss-sized segments
    
    Stephen Hemminger's avatar
    Stephen Hemminger committed
    	 * will fit to window and corresponding skbs will fit to our rcvbuf.
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	 * (was 3; 4 is minimum to allow fast retransmit to work.)
    	 */
    	while (tcp_win_from_space(rcvmem) < tp->advmss)
    		rcvmem += 128;
    	if (sk->sk_rcvbuf < 4 * rcvmem)
    		sk->sk_rcvbuf = min(4 * rcvmem, sysctl_tcp_rmem[2]);
    }
    
    
    Stephen Hemminger's avatar
    Stephen Hemminger committed
    /* 4. Try to fixup all. It is made immediately after connection enters
    
    Linus Torvalds's avatar
    Linus Torvalds committed
     *    established state.
     */
    static void tcp_init_buffer_space(struct sock *sk)
    {
    	struct tcp_sock *tp = tcp_sk(sk);
    	int maxwin;
    
    	if (!(sk->sk_userlocks & SOCK_RCVBUF_LOCK))
    		tcp_fixup_rcvbuf(sk);
    	if (!(sk->sk_userlocks & SOCK_SNDBUF_LOCK))
    		tcp_fixup_sndbuf(sk);
    
    	tp->rcvq_space.space = tp->rcv_wnd;
    
    	maxwin = tcp_full_space(sk);
    
    	if (tp->window_clamp >= maxwin) {
    		tp->window_clamp = maxwin;
    
    		if (sysctl_tcp_app_win && maxwin > 4 * tp->advmss)
    			tp->window_clamp = max(maxwin -
    					       (maxwin >> sysctl_tcp_app_win),
    					       4 * tp->advmss);
    	}
    
    	/* Force reservation of one segment. */
    	if (sysctl_tcp_app_win &&
    	    tp->window_clamp > 2 * tp->advmss &&
    	    tp->window_clamp + tp->advmss > maxwin)
    		tp->window_clamp = max(2 * tp->advmss, maxwin - tp->advmss);
    
    	tp->rcv_ssthresh = min(tp->rcv_ssthresh, tp->window_clamp);
    	tp->snd_cwnd_stamp = tcp_time_stamp;
    }
    
    /* 5. Recalculate window clamp after socket hit its memory bounds. */
    static void tcp_clamp_window(struct sock *sk, struct tcp_sock *tp)
    {
    
    	struct inet_connection_sock *icsk = inet_csk(sk);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    
    	if (sk->sk_rcvbuf < sysctl_tcp_rmem[2] &&
    	    !(sk->sk_userlocks & SOCK_RCVBUF_LOCK) &&
    	    !tcp_memory_pressure &&
    	    atomic_read(&tcp_memory_allocated) < sysctl_tcp_mem[0]) {
    		sk->sk_rcvbuf = min(atomic_read(&sk->sk_rmem_alloc),
    				    sysctl_tcp_rmem[2]);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	}
    
    	if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    		tp->rcv_ssthresh = min(tp->window_clamp, 2U*tp->advmss);
    }
    
    
    Stephen Hemminger's avatar
    Stephen Hemminger committed
    
    /* Initialize RCV_MSS value.
     * RCV_MSS is an our guess about MSS used by the peer.
     * We haven't any direct information about the MSS.
     * It's better to underestimate the RCV_MSS rather than overestimate.
     * Overestimations make us ACKing less frequently than needed.
     * Underestimations are more easy to detect and fix by tcp_measure_rcv_mss().
     */
    void tcp_initialize_rcv_mss(struct sock *sk)
    {
    	struct tcp_sock *tp = tcp_sk(sk);
    	unsigned int hint = min_t(unsigned int, tp->advmss, tp->mss_cache);
    
    	hint = min(hint, tp->rcv_wnd/2);
    	hint = min(hint, TCP_MIN_RCVMSS);
    	hint = max(hint, TCP_MIN_MSS);
    
    	inet_csk(sk)->icsk_ack.rcv_mss = hint;
    }
    
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    /* Receiver "autotuning" code.
     *
     * The algorithm for RTT estimation w/o timestamps is based on
     * Dynamic Right-Sizing (DRS) by Wu Feng and Mike Fisk of LANL.
     * <http://www.lanl.gov/radiant/website/pubs/drs/lacsi2001.ps>
     *
     * More detail on this code can be found at
     * <http://www.psc.edu/~jheffner/senior_thesis.ps>,
     * though this reference is out of date.  A new paper
     * is pending.
     */
    static void tcp_rcv_rtt_update(struct tcp_sock *tp, u32 sample, int win_dep)
    {
    	u32 new_sample = tp->rcv_rtt_est.rtt;
    	long m = sample;
    
    	if (m == 0)
    		m = 1;
    
    	if (new_sample != 0) {
    		/* If we sample in larger samples in the non-timestamp
    		 * case, we could grossly overestimate the RTT especially
    		 * with chatty applications or bulk transfer apps which
    		 * are stalled on filesystem I/O.
    		 *
    		 * Also, since we are only going for a minimum in the
    
    		 * non-timestamp case, we do not smooth things out
    
    Stephen Hemminger's avatar
    Stephen Hemminger committed
    		 * else with timestamps disabled convergence takes too
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    		 * long.
    		 */
    		if (!win_dep) {
    			m -= (new_sample >> 3);
    			new_sample += m;
    		} else if (m < new_sample)
    			new_sample = m << 3;
    	} else {
    
    Stephen Hemminger's avatar
    Stephen Hemminger committed
    		/* No previous measure. */
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    		new_sample = m << 3;
    	}
    
    	if (tp->rcv_rtt_est.rtt != new_sample)
    		tp->rcv_rtt_est.rtt = new_sample;
    }
    
    static inline void tcp_rcv_rtt_measure(struct tcp_sock *tp)
    {
    	if (tp->rcv_rtt_est.time == 0)
    		goto new_measure;
    	if (before(tp->rcv_nxt, tp->rcv_rtt_est.seq))
    		return;
    	tcp_rcv_rtt_update(tp,
    			   jiffies - tp->rcv_rtt_est.time,
    			   1);
    
    new_measure:
    	tp->rcv_rtt_est.seq = tp->rcv_nxt + tp->rcv_wnd;
    	tp->rcv_rtt_est.time = tcp_time_stamp;
    }
    
    
    static inline void tcp_rcv_rtt_measure_ts(struct sock *sk, const struct sk_buff *skb)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    {
    
    	struct tcp_sock *tp = tcp_sk(sk);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	if (tp->rx_opt.rcv_tsecr &&
    	    (TCP_SKB_CB(skb)->end_seq -
    
    	     TCP_SKB_CB(skb)->seq >= inet_csk(sk)->icsk_ack.rcv_mss))
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    		tcp_rcv_rtt_update(tp, tcp_time_stamp - tp->rx_opt.rcv_tsecr, 0);
    }
    
    /*
     * This function should be called every time data is copied to user space.
     * It calculates the appropriate TCP receive buffer space.
     */
    void tcp_rcv_space_adjust(struct sock *sk)
    {
    	struct tcp_sock *tp = tcp_sk(sk);
    	int time;
    	int space;
    	
    	if (tp->rcvq_space.time == 0)
    		goto new_measure;
    	
    	time = tcp_time_stamp - tp->rcvq_space.time;
    	if (time < (tp->rcv_rtt_est.rtt >> 3) ||
    	    tp->rcv_rtt_est.rtt == 0)
    		return;
    	
    	space = 2 * (tp->copied_seq - tp->rcvq_space.seq);
    
    	space = max(tp->rcvq_space.space, space);
    
    	if (tp->rcvq_space.space != space) {
    		int rcvmem;
    
    		tp->rcvq_space.space = space;
    
    
    		if (sysctl_tcp_moderate_rcvbuf &&
    		    !(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) {
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    			int new_clamp = space;
    
    			/* Receive space grows, normalize in order to
    			 * take into account packet headers and sk_buff
    			 * structure overhead.
    			 */
    			space /= tp->advmss;
    			if (!space)
    				space = 1;
    			rcvmem = (tp->advmss + MAX_TCP_HEADER +
    				  16 + sizeof(struct sk_buff));
    			while (tcp_win_from_space(rcvmem) < tp->advmss)
    				rcvmem += 128;
    			space *= rcvmem;
    			space = min(space, sysctl_tcp_rmem[2]);
    			if (space > sk->sk_rcvbuf) {
    				sk->sk_rcvbuf = space;
    
    				/* Make the window clamp follow along.  */
    				tp->window_clamp = new_clamp;
    			}
    		}
    	}
    	
    new_measure:
    	tp->rcvq_space.seq = tp->copied_seq;
    	tp->rcvq_space.time = tcp_time_stamp;
    }
    
    /* There is something which you must keep in mind when you analyze the
     * behavior of the tp->ato delayed ack timeout interval.  When a
     * connection starts up, we want to ack as quickly as possible.  The
     * problem is that "good" TCP's do slow start at the beginning of data
     * transmission.  The means that until we send the first few ACK's the
     * sender will sit on his end and only queue most of his data, because
     * he can only send snd_cwnd unacked packets at any given time.  For
     * each ACK we send, he increments snd_cwnd and transmits more of his
     * queue.  -DaveM
     */
    static void tcp_event_data_recv(struct sock *sk, struct tcp_sock *tp, struct sk_buff *skb)
    {
    
    	struct inet_connection_sock *icsk = inet_csk(sk);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	u32 now;
    
    
    	inet_csk_schedule_ack(sk);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    
    	tcp_measure_rcv_mss(sk, skb);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    	tcp_rcv_rtt_measure(tp);
    	
    	now = tcp_time_stamp;
    
    
    	if (!icsk->icsk_ack.ato) {
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    		/* The _first_ data packet received, initialize
    		 * delayed ACK engine.
    		 */
    
    		tcp_incr_quickack(sk);
    		icsk->icsk_ack.ato = TCP_ATO_MIN;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	} else {
    
    		int m = now - icsk->icsk_ack.lrcvtime;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    		if (m <= TCP_ATO_MIN/2) {
    			/* The fastest case is the first. */
    
    			icsk->icsk_ack.ato = (icsk->icsk_ack.ato >> 1) + TCP_ATO_MIN / 2;
    		} else if (m < icsk->icsk_ack.ato) {
    			icsk->icsk_ack.ato = (icsk->icsk_ack.ato >> 1) + m;
    			if (icsk->icsk_ack.ato > icsk->icsk_rto)
    				icsk->icsk_ack.ato = icsk->icsk_rto;
    		} else if (m > icsk->icsk_rto) {
    
    Stephen Hemminger's avatar
    Stephen Hemminger committed
    			/* Too long gap. Apparently sender failed to
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    			 * restart window, so that we send ACKs quickly.
    			 */
    
    			tcp_incr_quickack(sk);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    			sk_stream_mem_reclaim(sk);
    		}
    	}
    
    	icsk->icsk_ack.lrcvtime = now;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    	TCP_ECN_check_ce(tp, skb);
    
    	if (skb->len >= 128)
    		tcp_grow_window(sk, tp, skb);
    }
    
    /* Called to compute a smoothed rtt estimate. The data fed to this
     * routine either comes from timestamps, or from segments that were
     * known _not_ to have been retransmitted [see Karn/Partridge
     * Proceedings SIGCOMM 87]. The algorithm is from the SIGCOMM 88
     * piece by Van Jacobson.
     * NOTE: the next three routines used to be one big routine.
     * To save cycles in the RFC 1323 implementation it was better to break
     * it up into three procedures. -- erics
     */
    
    static void tcp_rtt_estimator(struct sock *sk, const __u32 mrtt)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    {
    
    	struct tcp_sock *tp = tcp_sk(sk);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	long m = mrtt; /* RTT */
    
    	/*	The following amusing code comes from Jacobson's
    	 *	article in SIGCOMM '88.  Note that rtt and mdev
    	 *	are scaled versions of rtt and mean deviation.
    	 *	This is designed to be as fast as possible 
    	 *	m stands for "measurement".
    	 *
    	 *	On a 1990 paper the rto value is changed to:
    	 *	RTO = rtt + 4 * mdev
    	 *
    	 * Funny. This algorithm seems to be very broken.
    	 * These formulae increase RTO, when it should be decreased, increase
    
    	 * too slowly, when it should be increased quickly, decrease too quickly
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	 * etc. I guess in BSD RTO takes ONE value, so that it is absolutely
    	 * does not matter how to _calculate_ it. Seems, it was trap
    	 * that VJ failed to avoid. 8)
    	 */
    	if(m == 0)
    		m = 1;
    	if (tp->srtt != 0) {
    		m -= (tp->srtt >> 3);	/* m is now error in rtt est */
    		tp->srtt += m;		/* rtt = 7/8 rtt + 1/8 new */
    		if (m < 0) {
    			m = -m;		/* m is now abs(error) */
    			m -= (tp->mdev >> 2);   /* similar update on mdev */
    			/* This is similar to one of Eifel findings.
    			 * Eifel blocks mdev updates when rtt decreases.
    			 * This solution is a bit different: we use finer gain
    			 * for mdev in this case (alpha*beta).
    			 * Like Eifel it also prevents growth of rto,
    			 * but also it limits too fast rto decreases,
    			 * happening in pure Eifel.
    			 */
    			if (m > 0)
    				m >>= 3;
    		} else {
    			m -= (tp->mdev >> 2);   /* similar update on mdev */
    		}
    		tp->mdev += m;	    	/* mdev = 3/4 mdev + 1/4 new */
    		if (tp->mdev > tp->mdev_max) {
    			tp->mdev_max = tp->mdev;
    			if (tp->mdev_max > tp->rttvar)
    				tp->rttvar = tp->mdev_max;
    		}
    		if (after(tp->snd_una, tp->rtt_seq)) {
    			if (tp->mdev_max < tp->rttvar)
    				tp->rttvar -= (tp->rttvar-tp->mdev_max)>>2;
    			tp->rtt_seq = tp->snd_nxt;
    			tp->mdev_max = TCP_RTO_MIN;
    		}
    	} else {
    		/* no previous measure. */
    		tp->srtt = m<<3;	/* take the measured time to be rtt */
    		tp->mdev = m<<1;	/* make sure rto = 3*rtt */
    		tp->mdev_max = tp->rttvar = max(tp->mdev, TCP_RTO_MIN);
    		tp->rtt_seq = tp->snd_nxt;
    	}
    }
    
    /* Calculate rto without backoff.  This is the second half of Van Jacobson's
     * routine referred to above.
     */
    
    static inline void tcp_set_rto(struct sock *sk)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    {
    
    	const struct tcp_sock *tp = tcp_sk(sk);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	/* Old crap is replaced with new one. 8)
    	 *
    	 * More seriously:
    	 * 1. If rtt variance happened to be less 50msec, it is hallucination.
    	 *    It cannot be less due to utterly erratic ACK generation made
    	 *    at least by solaris and freebsd. "Erratic ACKs" has _nothing_
    	 *    to do with delayed acks, because at cwnd>2 true delack timeout
    	 *    is invisible. Actually, Linux-2.4 also generates erratic
    
    Stephen Hemminger's avatar
    Stephen Hemminger committed
    	 *    ACKs in some circumstances.
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	 */
    
    	inet_csk(sk)->icsk_rto = (tp->srtt >> 3) + tp->rttvar;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    	/* 2. Fixups made earlier cannot be right.
    	 *    If we do not estimate RTO correctly without them,
    	 *    all the algo is pure shit and should be replaced
    
    Stephen Hemminger's avatar
    Stephen Hemminger committed
    	 *    with correct one. It is exactly, which we pretend to do.
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	 */
    }
    
    /* NOTE: clamping at TCP_RTO_MIN is not required, current algo
     * guarantees that rto is higher.
     */
    
    static inline void tcp_bound_rto(struct sock *sk)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    {
    
    	if (inet_csk(sk)->icsk_rto > TCP_RTO_MAX)
    		inet_csk(sk)->icsk_rto = TCP_RTO_MAX;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    }
    
    /* Save metrics learned by this TCP session.
       This function is called only, when TCP finishes successfully
       i.e. when it enters TIME-WAIT or goes from LAST-ACK to CLOSE.
     */
    void tcp_update_metrics(struct sock *sk)
    {
    	struct tcp_sock *tp = tcp_sk(sk);
    	struct dst_entry *dst = __sk_dst_get(sk);
    
    	if (sysctl_tcp_nometrics_save)
    		return;
    
    	dst_confirm(dst);
    
    	if (dst && (dst->flags&DST_HOST)) {
    
    		const struct inet_connection_sock *icsk = inet_csk(sk);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    		int m;
    
    
    		if (icsk->icsk_backoff || !tp->srtt) {
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    			/* This session failed to estimate rtt. Why?
    			 * Probably, no packets returned in time.
    			 * Reset our results.
    			 */
    			if (!(dst_metric_locked(dst, RTAX_RTT)))
    				dst->metrics[RTAX_RTT-1] = 0;
    			return;
    		}
    
    		m = dst_metric(dst, RTAX_RTT) - tp->srtt;
    
    		/* If newly calculated rtt larger than stored one,
    		 * store new one. Otherwise, use EWMA. Remember,
    		 * rtt overestimation is always better than underestimation.
    		 */
    		if (!(dst_metric_locked(dst, RTAX_RTT))) {
    			if (m <= 0)
    				dst->metrics[RTAX_RTT-1] = tp->srtt;
    			else
    				dst->metrics[RTAX_RTT-1] -= (m>>3);
    		}
    
    		if (!(dst_metric_locked(dst, RTAX_RTTVAR))) {
    			if (m < 0)
    				m = -m;
    
    			/* Scale deviation to rttvar fixed point */
    			m >>= 1;
    			if (m < tp->mdev)
    				m = tp->mdev;
    
    			if (m >= dst_metric(dst, RTAX_RTTVAR))
    				dst->metrics[RTAX_RTTVAR-1] = m;
    			else
    				dst->metrics[RTAX_RTTVAR-1] -=
    					(dst->metrics[RTAX_RTTVAR-1] - m)>>2;
    		}
    
    		if (tp->snd_ssthresh >= 0xFFFF) {
    			/* Slow start still did not finish. */
    			if (dst_metric(dst, RTAX_SSTHRESH) &&
    			    !dst_metric_locked(dst, RTAX_SSTHRESH) &&
    			    (tp->snd_cwnd >> 1) > dst_metric(dst, RTAX_SSTHRESH))
    				dst->metrics[RTAX_SSTHRESH-1] = tp->snd_cwnd >> 1;
    			if (!dst_metric_locked(dst, RTAX_CWND) &&
    			    tp->snd_cwnd > dst_metric(dst, RTAX_CWND))
    				dst->metrics[RTAX_CWND-1] = tp->snd_cwnd;
    		} else if (tp->snd_cwnd > tp->snd_ssthresh &&
    
    			   icsk->icsk_ca_state == TCP_CA_Open) {
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    			/* Cong. avoidance phase, cwnd is reliable. */
    			if (!dst_metric_locked(dst, RTAX_SSTHRESH))
    				dst->metrics[RTAX_SSTHRESH-1] =
    					max(tp->snd_cwnd >> 1, tp->snd_ssthresh);
    			if (!dst_metric_locked(dst, RTAX_CWND))
    				dst->metrics[RTAX_CWND-1] = (dst->metrics[RTAX_CWND-1] + tp->snd_cwnd) >> 1;
    		} else {
    			/* Else slow start did not finish, cwnd is non-sense,
    			   ssthresh may be also invalid.
    			 */
    			if (!dst_metric_locked(dst, RTAX_CWND))
    				dst->metrics[RTAX_CWND-1] = (dst->metrics[RTAX_CWND-1] + tp->snd_ssthresh) >> 1;
    			if (dst->metrics[RTAX_SSTHRESH-1] &&
    			    !dst_metric_locked(dst, RTAX_SSTHRESH) &&
    			    tp->snd_ssthresh > dst->metrics[RTAX_SSTHRESH-1])
    				dst->metrics[RTAX_SSTHRESH-1] = tp->snd_ssthresh;
    		}
    
    		if (!dst_metric_locked(dst, RTAX_REORDERING)) {
    			if (dst->metrics[RTAX_REORDERING-1] < tp->reordering &&
    			    tp->reordering != sysctl_tcp_reordering)
    				dst->metrics[RTAX_REORDERING-1] = tp->reordering;
    		}
    	}
    }
    
    /* Numbers are taken from RFC2414.  */
    __u32 tcp_init_cwnd(struct tcp_sock *tp, struct dst_entry *dst)
    {
    	__u32 cwnd = (dst ? dst_metric(dst, RTAX_INITCWND) : 0);
    
    	if (!cwnd) {
    
    		if (tp->mss_cache > 1460)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    			cwnd = 2;
    		else
    
    			cwnd = (tp->mss_cache > 1095) ? 3 : 4;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	}
    	return min_t(__u32, cwnd, tp->snd_cwnd_clamp);
    }
    
    
    Stephen Hemminger's avatar
    Stephen Hemminger committed
    /* Set slow start threshold and cwnd not falling to slow start */
    void tcp_enter_cwr(struct sock *sk)
    {
    	struct tcp_sock *tp = tcp_sk(sk);
    
    	tp->prior_ssthresh = 0;
    	tp->bytes_acked = 0;
    	if (inet_csk(sk)->icsk_ca_state < TCP_CA_CWR) {
    		tp->undo_marker = 0;
    		tp->snd_ssthresh = inet_csk(sk)->icsk_ca_ops->ssthresh(sk);
    		tp->snd_cwnd = min(tp->snd_cwnd,
    				   tcp_packets_in_flight(tp) + 1U);
    		tp->snd_cwnd_cnt = 0;
    		tp->high_seq = tp->snd_nxt;
    		tp->snd_cwnd_stamp = tcp_time_stamp;
    		TCP_ECN_queue_cwr(tp);
    
    		tcp_set_ca_state(sk, TCP_CA_CWR);
    	}
    }
    
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    /* Initialize metrics on socket. */
    
    static void tcp_init_metrics(struct sock *sk)
    {
    	struct tcp_sock *tp = tcp_sk(sk);
    	struct dst_entry *dst = __sk_dst_get(sk);
    
    	if (dst == NULL)
    		goto reset;
    
    	dst_confirm(dst);
    
    	if (dst_metric_locked(dst, RTAX_CWND))
    		tp->snd_cwnd_clamp = dst_metric(dst, RTAX_CWND);
    	if (dst_metric(dst, RTAX_SSTHRESH)) {
    		tp->snd_ssthresh = dst_metric(dst, RTAX_SSTHRESH);
    		if (tp->snd_ssthresh > tp->snd_cwnd_clamp)
    			tp->snd_ssthresh = tp->snd_cwnd_clamp;
    	}
    	if (dst_metric(dst, RTAX_REORDERING) &&
    	    tp->reordering != dst_metric(dst, RTAX_REORDERING)) {
    		tp->rx_opt.sack_ok &= ~2;
    		tp->reordering = dst_metric(dst, RTAX_REORDERING);
    	}
    
    	if (dst_metric(dst, RTAX_RTT) == 0)
    		goto reset;
    
    	if (!tp->srtt && dst_metric(dst, RTAX_RTT) < (TCP_TIMEOUT_INIT << 3))
    		goto reset;
    
    	/* Initial rtt is determined from SYN,SYN-ACK.
    	 * The segment is small and rtt may appear much
    	 * less than real one. Use per-dst memory
    	 * to make it more realistic.
    	 *
    	 * A bit of theory. RTT is time passed after "normal" sized packet
    
    Stephen Hemminger's avatar
    Stephen Hemminger committed
    	 * is sent until it is ACKed. In normal circumstances sending small
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	 * packets force peer to delay ACKs and calculation is correct too.
    	 * The algorithm is adaptive and, provided we follow specs, it
    	 * NEVER underestimate RTT. BUT! If peer tries to make some clever
    	 * tricks sort of "quick acks" for time long enough to decrease RTT
    	 * to low value, and then abruptly stops to do it and starts to delay
    	 * ACKs, wait for troubles.
    	 */
    	if (dst_metric(dst, RTAX_RTT) > tp->srtt) {
    		tp->srtt = dst_metric(dst, RTAX_RTT);
    		tp->rtt_seq = tp->snd_nxt;
    	}
    	if (dst_metric(dst, RTAX_RTTVAR) > tp->mdev) {
    		tp->mdev = dst_metric(dst, RTAX_RTTVAR);
    		tp->mdev_max = tp->rttvar = max(tp->mdev, TCP_RTO_MIN);
    	}
    
    	tcp_set_rto(sk);
    	tcp_bound_rto(sk);
    	if (inet_csk(sk)->icsk_rto < TCP_TIMEOUT_INIT && !tp->rx_opt.saw_tstamp)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    		goto reset;
    	tp->snd_cwnd = tcp_init_cwnd(tp, dst);
    	tp->snd_cwnd_stamp = tcp_time_stamp;
    	return;
    
    reset:
    	/* Play conservative. If timestamps are not
    	 * supported, TCP will fail to recalculate correct
    	 * rtt, if initial rto is too small. FORGET ALL AND RESET!
    	 */
    	if (!tp->rx_opt.saw_tstamp && tp->srtt) {
    		tp->srtt = 0;
    		tp->mdev = tp->mdev_max = tp->rttvar = TCP_TIMEOUT_INIT;
    
    		inet_csk(sk)->icsk_rto = TCP_TIMEOUT_INIT;
    
    static void tcp_update_reordering(struct sock *sk, const int metric,
    				  const int ts)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    {
    
    	struct tcp_sock *tp = tcp_sk(sk);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	if (metric > tp->reordering) {
    		tp->reordering = min(TCP_MAX_REORDERING, metric);
    
    		/* This exciting event is worth to be remembered. 8) */
    		if (ts)
    			NET_INC_STATS_BH(LINUX_MIB_TCPTSREORDER);
    		else if (IsReno(tp))
    			NET_INC_STATS_BH(LINUX_MIB_TCPRENOREORDER);
    		else if (IsFack(tp))
    			NET_INC_STATS_BH(LINUX_MIB_TCPFACKREORDER);
    		else
    			NET_INC_STATS_BH(LINUX_MIB_TCPSACKREORDER);
    #if FASTRETRANS_DEBUG > 1
    		printk(KERN_DEBUG "Disorder%d %d %u f%u s%u rr%d\n",
    
    		       tp->rx_opt.sack_ok, inet_csk(sk)->icsk_ca_state,
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    		       tp->reordering,
    		       tp->fackets_out,
    		       tp->sacked_out,
    		       tp->undo_marker ? tp->undo_retrans : 0);
    #endif
    		/* Disable FACK yet. */
    		tp->rx_opt.sack_ok &= ~2;
    	}
    }
    
    /* This procedure tags the retransmission queue when SACKs arrive.
     *
     * We have three tag bits: SACKED(S), RETRANS(R) and LOST(L).
     * Packets in queue with these bits set are counted in variables
     * sacked_out, retrans_out and lost_out, correspondingly.
     *
     * Valid combinations are:
     * Tag  InFlight	Description
     * 0	1		- orig segment is in flight.
     * S	0		- nothing flies, orig reached receiver.
     * L	0		- nothing flies, orig lost by net.
     * R	2		- both orig and retransmit are in flight.
     * L|R	1		- orig is lost, retransmit is in flight.
     * S|R  1		- orig reached receiver, retrans is still in flight.
     * (L|S|R is logically valid, it could occur when L|R is sacked,
     *  but it is equivalent to plain S and code short-curcuits it to S.
     *  L|S is logically invalid, it would mean -1 packet in flight 8))
     *
     * These 6 states form finite state machine, controlled by the following events:
     * 1. New ACK (+SACK) arrives. (tcp_sacktag_write_queue())
     * 2. Retransmission. (tcp_retransmit_skb(), tcp_xmit_retransmit_queue())
     * 3. Loss detection event of one of three flavors:
     *	A. Scoreboard estimator decided the packet is lost.
     *	   A'. Reno "three dupacks" marks head of queue lost.
     *	   A''. Its FACK modfication, head until snd.fack is lost.
     *	B. SACK arrives sacking data transmitted after never retransmitted
     *	   hole was sent out.
     *	C. SACK arrives sacking SND.NXT at the moment, when the
     *	   segment was retransmitted.
     * 4. D-SACK added new rule: D-SACK changes any tag to S.
     *
     * It is pleasant to note, that state diagram turns out to be commutative,
     * so that we are allowed not to be bothered by order of our actions,
     * when multiple events arrive simultaneously. (see the function below).
     *
     * Reordering detection.
     * --------------------
     * Reordering metric is maximal distance, which a packet can be displaced
     * in packet stream. With SACKs we can estimate it:
     *
     * 1. SACK fills old hole and the corresponding segment was not
     *    ever retransmitted -> reordering. Alas, we cannot use it
     *    when segment was retransmitted.
     * 2. The last flaw is solved with D-SACK. D-SACK arrives
     *    for retransmitted and already SACKed segment -> reordering..
     * Both of these heuristics are not used in Loss state, when we cannot
     * account for retransmits accurately.
     */
    static int
    tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_una)
    {
    
    	const struct inet_connection_sock *icsk = inet_csk(sk);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	struct tcp_sock *tp = tcp_sk(sk);
    	unsigned char *ptr = ack_skb->h.raw + TCP_SKB_CB(ack_skb)->sacked;
    
    	struct tcp_sack_block_wire *sp = (struct tcp_sack_block_wire *)(ptr+2);
    
    	struct sk_buff *cached_skb;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	int num_sacks = (ptr[1] - TCPOLEN_SACK_BASE)>>3;
    	int reord = tp->packets_out;
    	int prior_fackets;
    	u32 lost_retrans = 0;
    	int flag = 0;
    
    	int dup_sack = 0;
    
    	int cached_fack_count;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	int i;
    
    	int first_sack_index;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    	if (!tp->sacked_out)
    		tp->fackets_out = 0;
    	prior_fackets = tp->fackets_out;
    
    
    	/* Check for D-SACK. */
    	if (before(ntohl(sp[0].start_seq), TCP_SKB_CB(ack_skb)->ack_seq)) {
    		dup_sack = 1;
    		tp->rx_opt.sack_ok |= 4;
    		NET_INC_STATS_BH(LINUX_MIB_TCPDSACKRECV);
    	} else if (num_sacks > 1 &&
    			!after(ntohl(sp[0].end_seq), ntohl(sp[1].end_seq)) &&
    			!before(ntohl(sp[0].start_seq), ntohl(sp[1].start_seq))) {
    		dup_sack = 1;
    		tp->rx_opt.sack_ok |= 4;
    		NET_INC_STATS_BH(LINUX_MIB_TCPDSACKOFORECV);
    	}
    
    	/* D-SACK for already forgotten data...
    	 * Do dumb counting. */
    	if (dup_sack &&
    			!after(ntohl(sp[0].end_seq), prior_snd_una) &&
    			after(ntohl(sp[0].end_seq), tp->undo_marker))
    		tp->undo_retrans--;
    
    	/* Eliminate too old ACKs, but take into
    	 * account more or less fresh ones, they can
    	 * contain valid SACK info.
    	 */
    	if (before(TCP_SKB_CB(ack_skb)->ack_seq, prior_snd_una - tp->max_window))
    		return 0;
    
    
    	/* SACK fastpath:
    	 * if the only SACK change is the increase of the end_seq of
    	 * the first block then only apply that SACK block
    	 * and use retrans queue hinting otherwise slowpath */
    	flag = 1;
    
    	for (i = 0; i < num_sacks; i++) {
    		__be32 start_seq = sp[i].start_seq;
    		__be32 end_seq = sp[i].end_seq;
    
    		if (i == 0) {
    
    			if (tp->recv_sack_cache[i].start_seq != start_seq)
    				flag = 0;
    		} else {
    			if ((tp->recv_sack_cache[i].start_seq != start_seq) ||
    			    (tp->recv_sack_cache[i].end_seq != end_seq))
    				flag = 0;
    		}
    		tp->recv_sack_cache[i].start_seq = start_seq;
    		tp->recv_sack_cache[i].end_seq = end_seq;
    	}