Skip to content
Snippets Groups Projects
tcp_input.c 139 KiB
Newer Older
  • Learn to ignore specific revisions
  • Linus Torvalds's avatar
    Linus Torvalds committed
    	u32 ack = TCP_SKB_CB(skb)->ack_seq;
    
    	return (/* 1. Pure ACK with correct sequence number. */
    		(th->ack && seq == TCP_SKB_CB(skb)->end_seq && seq == tp->rcv_nxt) &&
    
    		/* 2. ... and duplicate ACK. */
    		ack == tp->snd_una &&
    
    		/* 3. ... and does not update window. */
    		!tcp_may_update_window(tp, ack, seq, ntohs(th->window) << tp->rx_opt.snd_wscale) &&
    
    		/* 4. ... and sits in replay window. */
    
    		(s32)(tp->rx_opt.ts_recent - tp->rx_opt.rcv_tsval) <= (inet_csk(sk)->icsk_rto * 1024) / HZ);
    
    static inline int tcp_paws_discard(const struct sock *sk, const struct sk_buff *skb)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    {
    
    	const struct tcp_sock *tp = tcp_sk(sk);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	return ((s32)(tp->rx_opt.ts_recent - tp->rx_opt.rcv_tsval) > TCP_PAWS_WINDOW &&
    
    		get_seconds() < tp->rx_opt.ts_recent_stamp + TCP_PAWS_24DAYS &&
    
    		!tcp_disordered_ack(sk, skb));
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    }
    
    /* Check segment sequence number for validity.
     *
     * Segment controls are considered valid, if the segment
     * fits to the window after truncation to the window. Acceptability
     * of data (and SYN, FIN, of course) is checked separately.
     * See tcp_data_queue(), for example.
     *
     * Also, controls (RST is main one) are accepted using RCV.WUP instead
     * of RCV.NXT. Peer still did not advance his SND.UNA when we
     * delayed ACK, so that hisSND.UNA<=ourRCV.WUP.
     * (borrowed from freebsd)
     */
    
    static inline int tcp_sequence(struct tcp_sock *tp, u32 seq, u32 end_seq)
    {
    	return	!before(end_seq, tp->rcv_wup) &&
    		!after(seq, tp->rcv_nxt + tcp_receive_window(tp));
    }
    
    /* When we get a reset we do this. */
    static void tcp_reset(struct sock *sk)
    {
    	/* We want the right error as BSD sees it (and indeed as we do). */
    	switch (sk->sk_state) {
    		case TCP_SYN_SENT:
    			sk->sk_err = ECONNREFUSED;
    			break;
    		case TCP_CLOSE_WAIT:
    			sk->sk_err = EPIPE;
    			break;
    		case TCP_CLOSE:
    			return;
    		default:
    			sk->sk_err = ECONNRESET;
    	}
    
    	if (!sock_flag(sk, SOCK_DEAD))
    		sk->sk_error_report(sk);
    
    	tcp_done(sk);
    }
    
    /*
     * 	Process the FIN bit. This now behaves as it is supposed to work
     *	and the FIN takes effect when it is validly part of sequence
     *	space. Not before when we get holes.
     *
     *	If we are ESTABLISHED, a received fin moves us to CLOSE-WAIT
     *	(and thence onto LAST-ACK and finally, CLOSE, we never enter
     *	TIME-WAIT)
     *
     *	If we are in FINWAIT-1, a received FIN indicates simultaneous
     *	close and we go into CLOSING (and later onto TIME-WAIT)
     *
     *	If we are in FINWAIT-2, a received FIN moves us to TIME-WAIT.
     */
    static void tcp_fin(struct sk_buff *skb, struct sock *sk, struct tcphdr *th)
    {
    	struct tcp_sock *tp = tcp_sk(sk);
    
    
    	inet_csk_schedule_ack(sk);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    	sk->sk_shutdown |= RCV_SHUTDOWN;
    	sock_set_flag(sk, SOCK_DONE);
    
    	switch (sk->sk_state) {
    		case TCP_SYN_RECV:
    		case TCP_ESTABLISHED:
    			/* Move to CLOSE_WAIT */
    			tcp_set_state(sk, TCP_CLOSE_WAIT);
    
    			inet_csk(sk)->icsk_ack.pingpong = 1;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    			break;
    
    		case TCP_CLOSE_WAIT:
    		case TCP_CLOSING:
    			/* Received a retransmission of the FIN, do
    			 * nothing.
    			 */
    			break;
    		case TCP_LAST_ACK:
    			/* RFC793: Remain in the LAST-ACK state. */
    			break;
    
    		case TCP_FIN_WAIT1:
    			/* This case occurs when a simultaneous close
    			 * happens, we must ack the received FIN and
    			 * enter the CLOSING state.
    			 */
    			tcp_send_ack(sk);
    			tcp_set_state(sk, TCP_CLOSING);
    			break;
    		case TCP_FIN_WAIT2:
    			/* Received a FIN -- send ACK and enter TIME_WAIT. */
    			tcp_send_ack(sk);
    			tcp_time_wait(sk, TCP_TIME_WAIT, 0);
    			break;
    		default:
    			/* Only TCP_LISTEN and TCP_CLOSE are left, in these
    			 * cases we should never reach this piece of code.
    			 */
    			printk(KERN_ERR "%s: Impossible, sk->sk_state=%d\n",
    			       __FUNCTION__, sk->sk_state);
    			break;
    	};
    
    	/* It _is_ possible, that we have something out-of-order _after_ FIN.
    	 * Probably, we should reset in this case. For now drop them.
    	 */
    	__skb_queue_purge(&tp->out_of_order_queue);
    	if (tp->rx_opt.sack_ok)
    		tcp_sack_reset(&tp->rx_opt);
    	sk_stream_mem_reclaim(sk);
    
    	if (!sock_flag(sk, SOCK_DEAD)) {
    		sk->sk_state_change(sk);
    
    		/* Do not send POLL_HUP for half duplex close. */
    		if (sk->sk_shutdown == SHUTDOWN_MASK ||
    		    sk->sk_state == TCP_CLOSE)
    			sk_wake_async(sk, 1, POLL_HUP);
    		else
    			sk_wake_async(sk, 1, POLL_IN);
    	}
    }
    
    
    Stephen Hemminger's avatar
    Stephen Hemminger committed
    static inline int tcp_sack_extend(struct tcp_sack_block *sp, u32 seq, u32 end_seq)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    {
    	if (!after(seq, sp->end_seq) && !after(sp->start_seq, end_seq)) {
    		if (before(seq, sp->start_seq))
    			sp->start_seq = seq;
    		if (after(end_seq, sp->end_seq))
    			sp->end_seq = end_seq;
    		return 1;
    	}
    	return 0;
    }
    
    
    Stephen Hemminger's avatar
    Stephen Hemminger committed
    static void tcp_dsack_set(struct tcp_sock *tp, u32 seq, u32 end_seq)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    {
    	if (tp->rx_opt.sack_ok && sysctl_tcp_dsack) {
    		if (before(seq, tp->rcv_nxt))
    			NET_INC_STATS_BH(LINUX_MIB_TCPDSACKOLDSENT);
    		else
    			NET_INC_STATS_BH(LINUX_MIB_TCPDSACKOFOSENT);
    
    		tp->rx_opt.dsack = 1;
    		tp->duplicate_sack[0].start_seq = seq;
    		tp->duplicate_sack[0].end_seq = end_seq;
    		tp->rx_opt.eff_sacks = min(tp->rx_opt.num_sacks + 1, 4 - tp->rx_opt.tstamp_ok);
    	}
    }
    
    
    Stephen Hemminger's avatar
    Stephen Hemminger committed
    static void tcp_dsack_extend(struct tcp_sock *tp, u32 seq, u32 end_seq)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    {
    	if (!tp->rx_opt.dsack)
    		tcp_dsack_set(tp, seq, end_seq);
    	else
    		tcp_sack_extend(tp->duplicate_sack, seq, end_seq);
    }
    
    static void tcp_send_dupack(struct sock *sk, struct sk_buff *skb)
    {
    	struct tcp_sock *tp = tcp_sk(sk);
    
    	if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq &&
    	    before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
    		NET_INC_STATS_BH(LINUX_MIB_DELAYEDACKLOST);
    
    		tcp_enter_quickack_mode(sk);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    		if (tp->rx_opt.sack_ok && sysctl_tcp_dsack) {
    			u32 end_seq = TCP_SKB_CB(skb)->end_seq;
    
    			if (after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt))
    				end_seq = tp->rcv_nxt;
    			tcp_dsack_set(tp, TCP_SKB_CB(skb)->seq, end_seq);
    		}
    	}
    
    	tcp_send_ack(sk);
    }
    
    /* These routines update the SACK block as out-of-order packets arrive or
     * in-order packets close up the sequence space.
     */
    static void tcp_sack_maybe_coalesce(struct tcp_sock *tp)
    {
    	int this_sack;
    	struct tcp_sack_block *sp = &tp->selective_acks[0];
    	struct tcp_sack_block *swalk = sp+1;
    
    	/* See if the recent change to the first SACK eats into
    	 * or hits the sequence space of other SACK blocks, if so coalesce.
    	 */
    	for (this_sack = 1; this_sack < tp->rx_opt.num_sacks; ) {
    		if (tcp_sack_extend(sp, swalk->start_seq, swalk->end_seq)) {
    			int i;
    
    			/* Zap SWALK, by moving every further SACK up by one slot.
    			 * Decrease num_sacks.
    			 */
    			tp->rx_opt.num_sacks--;
    			tp->rx_opt.eff_sacks = min(tp->rx_opt.num_sacks + tp->rx_opt.dsack, 4 - tp->rx_opt.tstamp_ok);
    			for(i=this_sack; i < tp->rx_opt.num_sacks; i++)
    				sp[i] = sp[i+1];
    			continue;
    		}
    		this_sack++, swalk++;
    	}
    }
    
    
    Stephen Hemminger's avatar
    Stephen Hemminger committed
    static inline void tcp_sack_swap(struct tcp_sack_block *sack1, struct tcp_sack_block *sack2)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    {
    	__u32 tmp;
    
    	tmp = sack1->start_seq;
    	sack1->start_seq = sack2->start_seq;
    	sack2->start_seq = tmp;
    
    	tmp = sack1->end_seq;
    	sack1->end_seq = sack2->end_seq;
    	sack2->end_seq = tmp;
    }
    
    static void tcp_sack_new_ofo_skb(struct sock *sk, u32 seq, u32 end_seq)
    {
    	struct tcp_sock *tp = tcp_sk(sk);
    	struct tcp_sack_block *sp = &tp->selective_acks[0];
    	int cur_sacks = tp->rx_opt.num_sacks;
    	int this_sack;
    
    	if (!cur_sacks)
    		goto new_sack;
    
    	for (this_sack=0; this_sack<cur_sacks; this_sack++, sp++) {
    		if (tcp_sack_extend(sp, seq, end_seq)) {
    			/* Rotate this_sack to the first one. */
    			for (; this_sack>0; this_sack--, sp--)
    				tcp_sack_swap(sp, sp-1);
    			if (cur_sacks > 1)
    				tcp_sack_maybe_coalesce(tp);
    			return;
    		}
    	}
    
    	/* Could not find an adjacent existing SACK, build a new one,
    	 * put it at the front, and shift everyone else down.  We
    	 * always know there is at least one SACK present already here.
    	 *
    	 * If the sack array is full, forget about the last one.
    	 */
    	if (this_sack >= 4) {
    		this_sack--;
    		tp->rx_opt.num_sacks--;
    		sp--;
    	}
    	for(; this_sack > 0; this_sack--, sp--)
    		*sp = *(sp-1);
    
    new_sack:
    	/* Build the new head SACK, and we're done. */
    	sp->start_seq = seq;
    	sp->end_seq = end_seq;
    	tp->rx_opt.num_sacks++;
    	tp->rx_opt.eff_sacks = min(tp->rx_opt.num_sacks + tp->rx_opt.dsack, 4 - tp->rx_opt.tstamp_ok);
    }
    
    /* RCV.NXT advances, some SACKs should be eaten. */
    
    static void tcp_sack_remove(struct tcp_sock *tp)
    {
    	struct tcp_sack_block *sp = &tp->selective_acks[0];
    	int num_sacks = tp->rx_opt.num_sacks;
    	int this_sack;
    
    	/* Empty ofo queue, hence, all the SACKs are eaten. Clear. */
    
    	if (skb_queue_empty(&tp->out_of_order_queue)) {
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    		tp->rx_opt.num_sacks = 0;
    		tp->rx_opt.eff_sacks = tp->rx_opt.dsack;
    		return;
    	}
    
    	for(this_sack = 0; this_sack < num_sacks; ) {
    		/* Check if the start of the sack is covered by RCV.NXT. */
    		if (!before(tp->rcv_nxt, sp->start_seq)) {
    			int i;
    
    			/* RCV.NXT must cover all the block! */
    			BUG_TRAP(!before(tp->rcv_nxt, sp->end_seq));
    
    			/* Zap this SACK, by moving forward any other SACKS. */
    			for (i=this_sack+1; i < num_sacks; i++)
    				tp->selective_acks[i-1] = tp->selective_acks[i];
    			num_sacks--;
    			continue;
    		}
    		this_sack++;
    		sp++;
    	}
    	if (num_sacks != tp->rx_opt.num_sacks) {
    		tp->rx_opt.num_sacks = num_sacks;
    		tp->rx_opt.eff_sacks = min(tp->rx_opt.num_sacks + tp->rx_opt.dsack, 4 - tp->rx_opt.tstamp_ok);
    	}
    }
    
    /* This one checks to see if we can put data from the
     * out_of_order queue into the receive_queue.
     */
    static void tcp_ofo_queue(struct sock *sk)
    {
    	struct tcp_sock *tp = tcp_sk(sk);
    	__u32 dsack_high = tp->rcv_nxt;
    	struct sk_buff *skb;
    
    	while ((skb = skb_peek(&tp->out_of_order_queue)) != NULL) {
    		if (after(TCP_SKB_CB(skb)->seq, tp->rcv_nxt))
    			break;
    
    		if (before(TCP_SKB_CB(skb)->seq, dsack_high)) {
    			__u32 dsack = dsack_high;
    			if (before(TCP_SKB_CB(skb)->end_seq, dsack_high))
    				dsack_high = TCP_SKB_CB(skb)->end_seq;
    			tcp_dsack_extend(tp, TCP_SKB_CB(skb)->seq, dsack);
    		}
    
    		if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) {
    			SOCK_DEBUG(sk, "ofo packet was already received \n");
    
    David S. Miller's avatar
    David S. Miller committed
    			__skb_unlink(skb, &tp->out_of_order_queue);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    			__kfree_skb(skb);
    			continue;
    		}
    		SOCK_DEBUG(sk, "ofo requeuing : rcv_next %X seq %X - %X\n",
    			   tp->rcv_nxt, TCP_SKB_CB(skb)->seq,
    			   TCP_SKB_CB(skb)->end_seq);
    
    
    David S. Miller's avatar
    David S. Miller committed
    		__skb_unlink(skb, &tp->out_of_order_queue);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    		__skb_queue_tail(&sk->sk_receive_queue, skb);
    		tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
    		if(skb->h.th->fin)
    			tcp_fin(skb, sk, skb->h.th);
    	}
    }
    
    static int tcp_prune_queue(struct sock *sk);
    
    static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
    {
    	struct tcphdr *th = skb->h.th;
    	struct tcp_sock *tp = tcp_sk(sk);
    	int eaten = -1;
    
    	if (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq)
    		goto drop;
    
    	__skb_pull(skb, th->doff*4);
    
    	TCP_ECN_accept_cwr(tp, skb);
    
    	if (tp->rx_opt.dsack) {
    		tp->rx_opt.dsack = 0;
    		tp->rx_opt.eff_sacks = min_t(unsigned int, tp->rx_opt.num_sacks,
    						    4 - tp->rx_opt.tstamp_ok);
    	}
    
    	/*  Queue data for delivery to the user.
    	 *  Packets in sequence go to the receive queue.
    	 *  Out of sequence packets to the out_of_order_queue.
    	 */
    	if (TCP_SKB_CB(skb)->seq == tp->rcv_nxt) {
    		if (tcp_receive_window(tp) == 0)
    			goto out_of_window;
    
    		/* Ok. In sequence. In window. */
    		if (tp->ucopy.task == current &&
    		    tp->copied_seq == tp->rcv_nxt && tp->ucopy.len &&
    		    sock_owned_by_user(sk) && !tp->urg_data) {
    			int chunk = min_t(unsigned int, skb->len,
    							tp->ucopy.len);
    
    			__set_current_state(TASK_RUNNING);
    
    			local_bh_enable();
    			if (!skb_copy_datagram_iovec(skb, 0, tp->ucopy.iov, chunk)) {
    				tp->ucopy.len -= chunk;
    				tp->copied_seq += chunk;
    				eaten = (chunk == skb->len && !th->fin);
    				tcp_rcv_space_adjust(sk);
    			}
    			local_bh_disable();
    		}
    
    		if (eaten <= 0) {
    queue_and_out:
    			if (eaten < 0 &&
    			    (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
    			     !sk_stream_rmem_schedule(sk, skb))) {
    				if (tcp_prune_queue(sk) < 0 ||
    				    !sk_stream_rmem_schedule(sk, skb))
    					goto drop;
    			}
    			sk_stream_set_owner_r(skb, sk);
    			__skb_queue_tail(&sk->sk_receive_queue, skb);
    		}
    		tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
    		if(skb->len)
    			tcp_event_data_recv(sk, tp, skb);
    		if(th->fin)
    			tcp_fin(skb, sk, th);
    
    
    		if (!skb_queue_empty(&tp->out_of_order_queue)) {
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    			tcp_ofo_queue(sk);
    
    			/* RFC2581. 4.2. SHOULD send immediate ACK, when
    			 * gap in queue is filled.
    			 */
    
    			if (skb_queue_empty(&tp->out_of_order_queue))
    
    				inet_csk(sk)->icsk_ack.pingpong = 0;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    		}
    
    		if (tp->rx_opt.num_sacks)
    			tcp_sack_remove(tp);
    
    		tcp_fast_path_check(sk, tp);
    
    		if (eaten > 0)
    			__kfree_skb(skb);
    		else if (!sock_flag(sk, SOCK_DEAD))
    			sk->sk_data_ready(sk, 0);
    		return;
    	}
    
    	if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) {
    		/* A retransmit, 2nd most common case.  Force an immediate ack. */
    		NET_INC_STATS_BH(LINUX_MIB_DELAYEDACKLOST);
    		tcp_dsack_set(tp, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq);
    
    out_of_window:
    
    		tcp_enter_quickack_mode(sk);
    		inet_csk_schedule_ack(sk);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    drop:
    		__kfree_skb(skb);
    		return;
    	}
    
    	/* Out of window. F.e. zero window probe. */
    	if (!before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt + tcp_receive_window(tp)))
    		goto out_of_window;
    
    
    	tcp_enter_quickack_mode(sk);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    	if (before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
    		/* Partial packet, seq < rcv_next < end_seq */
    		SOCK_DEBUG(sk, "partial packet: rcv_next %X seq %X - %X\n",
    			   tp->rcv_nxt, TCP_SKB_CB(skb)->seq,
    			   TCP_SKB_CB(skb)->end_seq);
    
    		tcp_dsack_set(tp, TCP_SKB_CB(skb)->seq, tp->rcv_nxt);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    		/* If window is closed, drop tail of packet. But after
    		 * remembering D-SACK for its head made in previous line.
    		 */
    		if (!tcp_receive_window(tp))
    			goto out_of_window;
    		goto queue_and_out;
    	}
    
    	TCP_ECN_check_ce(tp, skb);
    
    	if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
    	    !sk_stream_rmem_schedule(sk, skb)) {
    		if (tcp_prune_queue(sk) < 0 ||
    		    !sk_stream_rmem_schedule(sk, skb))
    			goto drop;
    	}
    
    	/* Disable header prediction. */
    	tp->pred_flags = 0;
    
    	inet_csk_schedule_ack(sk);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    	SOCK_DEBUG(sk, "out of order segment: rcv_next %X seq %X - %X\n",
    		   tp->rcv_nxt, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq);
    
    	sk_stream_set_owner_r(skb, sk);
    
    	if (!skb_peek(&tp->out_of_order_queue)) {
    		/* Initial out of order segment, build 1 SACK. */
    		if (tp->rx_opt.sack_ok) {
    			tp->rx_opt.num_sacks = 1;
    			tp->rx_opt.dsack     = 0;
    			tp->rx_opt.eff_sacks = 1;
    			tp->selective_acks[0].start_seq = TCP_SKB_CB(skb)->seq;
    			tp->selective_acks[0].end_seq =
    						TCP_SKB_CB(skb)->end_seq;
    		}
    		__skb_queue_head(&tp->out_of_order_queue,skb);
    	} else {
    		struct sk_buff *skb1 = tp->out_of_order_queue.prev;
    		u32 seq = TCP_SKB_CB(skb)->seq;
    		u32 end_seq = TCP_SKB_CB(skb)->end_seq;
    
    		if (seq == TCP_SKB_CB(skb1)->end_seq) {
    
    David S. Miller's avatar
    David S. Miller committed
    			__skb_append(skb1, skb, &tp->out_of_order_queue);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    			if (!tp->rx_opt.num_sacks ||
    			    tp->selective_acks[0].end_seq != seq)
    				goto add_sack;
    
    			/* Common case: data arrive in order after hole. */
    			tp->selective_acks[0].end_seq = end_seq;
    			return;
    		}
    
    		/* Find place to insert this segment. */
    		do {
    			if (!after(TCP_SKB_CB(skb1)->seq, seq))
    				break;
    		} while ((skb1 = skb1->prev) !=
    			 (struct sk_buff*)&tp->out_of_order_queue);
    
    		/* Do skb overlap to previous one? */
    		if (skb1 != (struct sk_buff*)&tp->out_of_order_queue &&
    		    before(seq, TCP_SKB_CB(skb1)->end_seq)) {
    			if (!after(end_seq, TCP_SKB_CB(skb1)->end_seq)) {
    				/* All the bits are present. Drop. */
    				__kfree_skb(skb);
    				tcp_dsack_set(tp, seq, end_seq);
    				goto add_sack;
    			}
    			if (after(seq, TCP_SKB_CB(skb1)->seq)) {
    				/* Partial overlap. */
    				tcp_dsack_set(tp, seq, TCP_SKB_CB(skb1)->end_seq);
    			} else {
    				skb1 = skb1->prev;
    			}
    		}
    		__skb_insert(skb, skb1, skb1->next, &tp->out_of_order_queue);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    		/* And clean segments covered by new one as whole. */
    		while ((skb1 = skb->next) !=
    		       (struct sk_buff*)&tp->out_of_order_queue &&
    		       after(end_seq, TCP_SKB_CB(skb1)->seq)) {
    		       if (before(end_seq, TCP_SKB_CB(skb1)->end_seq)) {
    			       tcp_dsack_extend(tp, TCP_SKB_CB(skb1)->seq, end_seq);
    			       break;
    		       }
    
    David S. Miller's avatar
    David S. Miller committed
    		       __skb_unlink(skb1, &tp->out_of_order_queue);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    		       tcp_dsack_extend(tp, TCP_SKB_CB(skb1)->seq, TCP_SKB_CB(skb1)->end_seq);
    		       __kfree_skb(skb1);
    		}
    
    add_sack:
    		if (tp->rx_opt.sack_ok)
    			tcp_sack_new_ofo_skb(sk, seq, end_seq);
    	}
    }
    
    /* Collapse contiguous sequence of skbs head..tail with
     * sequence numbers start..end.
     * Segments with FIN/SYN are not collapsed (only because this
     * simplifies code)
     */
    static void
    
    David S. Miller's avatar
    David S. Miller committed
    tcp_collapse(struct sock *sk, struct sk_buff_head *list,
    	     struct sk_buff *head, struct sk_buff *tail,
    	     u32 start, u32 end)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    {
    	struct sk_buff *skb;
    
    
    Stephen Hemminger's avatar
    Stephen Hemminger committed
    	/* First, check that queue is collapsible and find
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	 * the point where collapsing can be useful. */
    	for (skb = head; skb != tail; ) {
    		/* No new bits? It is possible on ofo queue. */
    		if (!before(start, TCP_SKB_CB(skb)->end_seq)) {
    			struct sk_buff *next = skb->next;
    
    David S. Miller's avatar
    David S. Miller committed
    			__skb_unlink(skb, list);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    			__kfree_skb(skb);
    			NET_INC_STATS_BH(LINUX_MIB_TCPRCVCOLLAPSED);
    			skb = next;
    			continue;
    		}
    
    		/* The first skb to collapse is:
    		 * - not SYN/FIN and
    		 * - bloated or contains data before "start" or
    		 *   overlaps to the next one.
    		 */
    		if (!skb->h.th->syn && !skb->h.th->fin &&
    		    (tcp_win_from_space(skb->truesize) > skb->len ||
    		     before(TCP_SKB_CB(skb)->seq, start) ||
    		     (skb->next != tail &&
    		      TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb->next)->seq)))
    			break;
    
    		/* Decided to skip this, advance start seq. */
    		start = TCP_SKB_CB(skb)->end_seq;
    		skb = skb->next;
    	}
    	if (skb == tail || skb->h.th->syn || skb->h.th->fin)
    		return;
    
    	while (before(start, end)) {
    		struct sk_buff *nskb;
    		int header = skb_headroom(skb);
    		int copy = SKB_MAX_ORDER(header, 0);
    
    		/* Too big header? This can happen with IPv6. */
    		if (copy < 0)
    			return;
    		if (end-start < copy)
    			copy = end-start;
    		nskb = alloc_skb(copy+header, GFP_ATOMIC);
    		if (!nskb)
    			return;
    		skb_reserve(nskb, header);
    		memcpy(nskb->head, skb->head, header);
    		nskb->nh.raw = nskb->head + (skb->nh.raw-skb->head);
    		nskb->h.raw = nskb->head + (skb->h.raw-skb->head);
    		nskb->mac.raw = nskb->head + (skb->mac.raw-skb->head);
    		memcpy(nskb->cb, skb->cb, sizeof(skb->cb));
    		TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(nskb)->end_seq = start;
    
    David S. Miller's avatar
    David S. Miller committed
    		__skb_insert(nskb, skb->prev, skb, list);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    		sk_stream_set_owner_r(nskb, sk);
    
    		/* Copy data, releasing collapsed skbs. */
    		while (copy > 0) {
    			int offset = start - TCP_SKB_CB(skb)->seq;
    			int size = TCP_SKB_CB(skb)->end_seq - start;
    
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    			if (size > 0) {
    				size = min(copy, size);
    				if (skb_copy_bits(skb, offset, skb_put(nskb, size), size))
    					BUG();
    				TCP_SKB_CB(nskb)->end_seq += size;
    				copy -= size;
    				start += size;
    			}
    			if (!before(start, TCP_SKB_CB(skb)->end_seq)) {
    				struct sk_buff *next = skb->next;
    
    David S. Miller's avatar
    David S. Miller committed
    				__skb_unlink(skb, list);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    				__kfree_skb(skb);
    				NET_INC_STATS_BH(LINUX_MIB_TCPRCVCOLLAPSED);
    				skb = next;
    				if (skb == tail || skb->h.th->syn || skb->h.th->fin)
    					return;
    			}
    		}
    	}
    }
    
    /* Collapse ofo queue. Algorithm: select contiguous sequence of skbs
     * and tcp_collapse() them until all the queue is collapsed.
     */
    static void tcp_collapse_ofo_queue(struct sock *sk)
    {
    	struct tcp_sock *tp = tcp_sk(sk);
    	struct sk_buff *skb = skb_peek(&tp->out_of_order_queue);
    	struct sk_buff *head;
    	u32 start, end;
    
    	if (skb == NULL)
    		return;
    
    	start = TCP_SKB_CB(skb)->seq;
    	end = TCP_SKB_CB(skb)->end_seq;
    	head = skb;
    
    	for (;;) {
    		skb = skb->next;
    
    		/* Segment is terminated when we see gap or when
    		 * we are at the end of all the queue. */
    		if (skb == (struct sk_buff *)&tp->out_of_order_queue ||
    		    after(TCP_SKB_CB(skb)->seq, end) ||
    		    before(TCP_SKB_CB(skb)->end_seq, start)) {
    
    David S. Miller's avatar
    David S. Miller committed
    			tcp_collapse(sk, &tp->out_of_order_queue,
    				     head, skb, start, end);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    			head = skb;
    			if (skb == (struct sk_buff *)&tp->out_of_order_queue)
    				break;
    			/* Start new segment */
    			start = TCP_SKB_CB(skb)->seq;
    			end = TCP_SKB_CB(skb)->end_seq;
    		} else {
    			if (before(TCP_SKB_CB(skb)->seq, start))
    				start = TCP_SKB_CB(skb)->seq;
    			if (after(TCP_SKB_CB(skb)->end_seq, end))
    				end = TCP_SKB_CB(skb)->end_seq;
    		}
    	}
    }
    
    /* Reduce allocated memory if we can, trying to get
     * the socket within its memory limits again.
     *
     * Return less than zero if we should start dropping frames
     * until the socket owning process reads some of the data
     * to stabilize the situation.
     */
    static int tcp_prune_queue(struct sock *sk)
    {
    
    	struct tcp_sock *tp = tcp_sk(sk);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    	SOCK_DEBUG(sk, "prune_queue: c=%x\n", tp->copied_seq);
    
    	NET_INC_STATS_BH(LINUX_MIB_PRUNECALLED);
    
    	if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
    		tcp_clamp_window(sk, tp);
    	else if (tcp_memory_pressure)
    		tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U * tp->advmss);
    
    	tcp_collapse_ofo_queue(sk);
    
    David S. Miller's avatar
    David S. Miller committed
    	tcp_collapse(sk, &sk->sk_receive_queue,
    		     sk->sk_receive_queue.next,
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    		     (struct sk_buff*)&sk->sk_receive_queue,
    		     tp->copied_seq, tp->rcv_nxt);
    	sk_stream_mem_reclaim(sk);
    
    	if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf)
    		return 0;
    
    	/* Collapsing did not help, destructive actions follow.
    	 * This must not ever occur. */
    
    	/* First, purge the out_of_order queue. */
    
    	if (!skb_queue_empty(&tp->out_of_order_queue)) {
    		NET_INC_STATS_BH(LINUX_MIB_OFOPRUNED);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    		__skb_queue_purge(&tp->out_of_order_queue);
    
    		/* Reset SACK state.  A conforming SACK implementation will
    		 * do the same at a timeout based retransmit.  When a connection
    		 * is in a sad state like this, we care only about integrity
    		 * of the connection not performance.
    		 */
    		if (tp->rx_opt.sack_ok)
    			tcp_sack_reset(&tp->rx_opt);
    		sk_stream_mem_reclaim(sk);
    	}
    
    	if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf)
    		return 0;
    
    	/* If we are really being abused, tell the caller to silently
    	 * drop receive data on the floor.  It will get retransmitted
    	 * and hopefully then we'll have sufficient space.
    	 */
    	NET_INC_STATS_BH(LINUX_MIB_RCVPRUNED);
    
    	/* Massive buffer overcommit. */
    	tp->pred_flags = 0;
    	return -1;
    }
    
    
    /* RFC2861, slow part. Adjust cwnd, after it was not full during one rto.
     * As additional protections, we do not touch cwnd in retransmission phases,
     * and if application hit its sndbuf limit recently.
     */
    void tcp_cwnd_application_limited(struct sock *sk)
    {
    	struct tcp_sock *tp = tcp_sk(sk);
    
    
    	if (inet_csk(sk)->icsk_ca_state == TCP_CA_Open &&
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	    sk->sk_socket && !test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
    		/* Limited by application or receiver window. */
    
    		u32 init_win = tcp_init_cwnd(tp, __sk_dst_get(sk));
    		u32 win_used = max(tp->snd_cwnd_used, init_win);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    		if (win_used < tp->snd_cwnd) {
    
    			tp->snd_ssthresh = tcp_current_ssthresh(sk);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    			tp->snd_cwnd = (tp->snd_cwnd + win_used) >> 1;
    		}
    		tp->snd_cwnd_used = 0;
    	}
    	tp->snd_cwnd_stamp = tcp_time_stamp;
    }
    
    
    Stephen Hemminger's avatar
    Stephen Hemminger committed
    static int tcp_should_expand_sndbuf(struct sock *sk, struct tcp_sock *tp)
    
    {
    	/* If the user specified a specific send buffer setting, do
    	 * not modify it.
    	 */
    	if (sk->sk_userlocks & SOCK_SNDBUF_LOCK)
    		return 0;
    
    	/* If we are under global TCP memory pressure, do not expand.  */
    	if (tcp_memory_pressure)
    		return 0;
    
    	/* If we are under soft global TCP memory pressure, do not expand.  */
    	if (atomic_read(&tcp_memory_allocated) >= sysctl_tcp_mem[0])
    		return 0;
    
    	/* If we filled the congestion window, do not expand.  */
    	if (tp->packets_out >= tp->snd_cwnd)
    		return 0;
    
    	return 1;
    }
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    /* When incoming ACK allowed to free some skb from write_queue,
     * we remember this event in flag SOCK_QUEUE_SHRUNK and wake up socket
     * on the exit from tcp input handler.
     *
     * PROBLEM: sndbuf expansion does not work well with largesend.
     */
    static void tcp_new_space(struct sock *sk)
    {
    	struct tcp_sock *tp = tcp_sk(sk);
    
    
    	if (tcp_should_expand_sndbuf(sk, tp)) {
    
    		int sndmem = max_t(u32, tp->rx_opt.mss_clamp, tp->mss_cache) +
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    			MAX_TCP_HEADER + 16 + sizeof(struct sk_buff),
    		    demanded = max_t(unsigned int, tp->snd_cwnd,
    						   tp->reordering + 1);
    		sndmem *= 2*demanded;
    		if (sndmem > sk->sk_sndbuf)
    			sk->sk_sndbuf = min(sndmem, sysctl_tcp_wmem[2]);
    		tp->snd_cwnd_stamp = tcp_time_stamp;
    	}
    
    	sk->sk_write_space(sk);
    }
    
    
    Stephen Hemminger's avatar
    Stephen Hemminger committed
    static void tcp_check_space(struct sock *sk)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    {
    	if (sock_flag(sk, SOCK_QUEUE_SHRUNK)) {
    		sock_reset_flag(sk, SOCK_QUEUE_SHRUNK);
    		if (sk->sk_socket &&
    		    test_bit(SOCK_NOSPACE, &sk->sk_socket->flags))
    			tcp_new_space(sk);
    	}
    }
    
    
    Stephen Hemminger's avatar
    Stephen Hemminger committed
    static inline void tcp_data_snd_check(struct sock *sk, struct tcp_sock *tp)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    {
    
    	tcp_push_pending_frames(sk, tp);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	tcp_check_space(sk);
    }
    
    /*
     * Check if sending an ack is needed.
     */
    static void __tcp_ack_snd_check(struct sock *sk, int ofo_possible)
    {
    	struct tcp_sock *tp = tcp_sk(sk);
    
    	    /* More than one full frame received... */
    
    	if (((tp->rcv_nxt - tp->rcv_wup) > inet_csk(sk)->icsk_ack.rcv_mss
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	     /* ... and right edge of window advances far enough.
    	      * (tcp_recvmsg() will send ACK otherwise). Or...
    	      */
    	     && __tcp_select_window(sk) >= tp->rcv_wnd) ||
    	    /* We ACK each frame or... */
    
    	    tcp_in_quickack_mode(sk) ||
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	    /* We have out of order data. */
    	    (ofo_possible &&
    	     skb_peek(&tp->out_of_order_queue))) {
    		/* Then ack it now */
    		tcp_send_ack(sk);
    	} else {
    		/* Else, send delayed ack. */
    		tcp_send_delayed_ack(sk);
    	}
    }
    
    
    Stephen Hemminger's avatar
    Stephen Hemminger committed
    static inline void tcp_ack_snd_check(struct sock *sk)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    {
    
    	if (!inet_csk_ack_scheduled(sk)) {
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    		/* We sent a data segment already. */
    		return;
    	}
    	__tcp_ack_snd_check(sk, 1);
    }
    
    /*
     *	This routine is only called when we have urgent data
    
    Stephen Hemminger's avatar
    Stephen Hemminger committed
     *	signaled. Its the 'slow' part of tcp_urg. It could be
    
    Linus Torvalds's avatar
    Linus Torvalds committed
     *	moved inline now as tcp_urg is only called from one
     *	place. We handle URGent data wrong. We have to - as
     *	BSD still doesn't use the correction from RFC961.
     *	For 1003.1g we should support a new option TCP_STDURG to permit
     *	either form (or just set the sysctl tcp_stdurg).
     */
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    static void tcp_check_urg(struct sock * sk, struct tcphdr * th)
    {
    	struct tcp_sock *tp = tcp_sk(sk);
    	u32 ptr = ntohs(th->urg_ptr);
    
    	if (ptr && !sysctl_tcp_stdurg)
    		ptr--;
    	ptr += ntohl(th->seq);
    
    	/* Ignore urgent data that we've already seen and read. */
    	if (after(tp->copied_seq, ptr))
    		return;
    
    	/* Do not replay urg ptr.
    	 *
    	 * NOTE: interesting situation not covered by specs.
    	 * Misbehaving sender may send urg ptr, pointing to segment,
    	 * which we already have in ofo queue. We are not able to fetch
    	 * such data and will stay in TCP_URG_NOTYET until will be eaten
    	 * by recvmsg(). Seems, we are not obliged to handle such wicked
    	 * situations. But it is worth to think about possibility of some
    	 * DoSes using some hypothetical application level deadlock.
    	 */
    	if (before(ptr, tp->rcv_nxt))
    		return;
    
    	/* Do we already have a newer (or duplicate) urgent pointer? */
    	if (tp->urg_data && !after(ptr, tp->urg_seq))
    		return;
    
    	/* Tell the world about our new urgent pointer. */
    	sk_send_sigurg(sk);
    
    	/* We may be adding urgent data when the last byte read was
    	 * urgent. To do this requires some care. We cannot just ignore
    	 * tp->copied_seq since we would read the last urgent byte again
    	 * as data, nor can we alter copied_seq until this data arrives
    
    Stephen Hemminger's avatar
    Stephen Hemminger committed
    	 * or we break the semantics of SIOCATMARK (and thus sockatmark())
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	 *
    	 * NOTE. Double Dutch. Rendering to plain English: author of comment
    	 * above did something sort of 	send("A", MSG_OOB); send("B", MSG_OOB);
    	 * and expect that both A and B disappear from stream. This is _wrong_.
    	 * Though this happens in BSD with high probability, this is occasional.
    	 * Any application relying on this is buggy. Note also, that fix "works"
    	 * only in this artificial test. Insert some normal data between A and B and we will
    	 * decline of BSD again. Verdict: it is better to remove to trap
    	 * buggy users.
    	 */
    	if (tp->urg_seq == tp->copied_seq && tp->urg_data &&
    	    !sock_flag(sk, SOCK_URGINLINE) &&
    	    tp->copied_seq != tp->rcv_nxt) {
    		struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
    		tp->copied_seq++;
    		if (skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq)) {
    
    David S. Miller's avatar
    David S. Miller committed
    			__skb_unlink(skb, &sk->sk_receive_queue);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    			__kfree_skb(skb);
    		}
    	}
    
    	tp->urg_data   = TCP_URG_NOTYET;
    	tp->urg_seq    = ptr;
    
    	/* Disable header prediction. */
    	tp->pred_flags = 0;
    }
    
    /* This is the 'fast' part of urgent handling. */
    static void tcp_urg(struct sock *sk, struct sk_buff *skb, struct tcphdr *th)
    {
    	struct tcp_sock *tp = tcp_sk(sk);
    
    	/* Check if we get a new urgent pointer - normally not. */
    	if (th->urg)
    		tcp_check_urg(sk,th);
    
    	/* Do we wait for any urgent data? - normally not... */
    	if (tp->urg_data == TCP_URG_NOTYET) {
    		u32 ptr = tp->urg_seq - ntohl(th->seq) + (th->doff * 4) -
    			  th->syn;
    
    
    		/* Is the urgent pointer pointing into this packet? */
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    		if (ptr < skb->len) {
    			u8 tmp;
    			if (skb_copy_bits(skb, ptr, &tmp, 1))
    				BUG();
    			tp->urg_data = TCP_URG_VALID | tmp;
    			if (!sock_flag(sk, SOCK_DEAD))
    				sk->sk_data_ready(sk, 0);
    		}
    	}
    }
    
    static int tcp_copy_to_iovec(struct sock *sk, struct sk_buff *skb, int hlen)