Skip to content
Snippets Groups Projects
tcp_ipv4.c 59.1 KiB
Newer Older
  • Learn to ignore specific revisions
  • Linus Torvalds's avatar
    Linus Torvalds committed
     *	Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
     *		      for reset.
     *	Answer: if a packet caused RST, it is not for a socket
     *		existing in our system, if it is matched to a socket,
     *		it is just duplicate segment or bug in other side's TCP.
     *		So that we build reply only basing on parameters
     *		arrived with segment.
     *	Exception: precedence violation. We do not implement it in any case.
     */
    
    static void tcp_v4_send_reset(struct sk_buff *skb)
    {
    	struct tcphdr *th = skb->h.th;
    	struct tcphdr rth;
    	struct ip_reply_arg arg;
    
    	/* Never send a reset in response to a reset. */
    	if (th->rst)
    		return;
    
    	if (((struct rtable *)skb->dst)->rt_type != RTN_LOCAL)
    		return;
    
    	/* Swap the send and the receive. */
    	memset(&rth, 0, sizeof(struct tcphdr));
    	rth.dest   = th->source;
    	rth.source = th->dest;
    	rth.doff   = sizeof(struct tcphdr) / 4;
    	rth.rst    = 1;
    
    	if (th->ack) {
    		rth.seq = th->ack_seq;
    	} else {
    		rth.ack = 1;
    		rth.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
    				    skb->len - (th->doff << 2));
    	}
    
    	memset(&arg, 0, sizeof arg);
    	arg.iov[0].iov_base = (unsigned char *)&rth;
    	arg.iov[0].iov_len  = sizeof rth;
    	arg.csum = csum_tcpudp_nofold(skb->nh.iph->daddr,
    				      skb->nh.iph->saddr, /*XXX*/
    				      sizeof(struct tcphdr), IPPROTO_TCP, 0);
    	arg.csumoffset = offsetof(struct tcphdr, check) / 2;
    
    	ip_send_reply(tcp_socket->sk, skb, &arg, sizeof rth);
    
    	TCP_INC_STATS_BH(TCP_MIB_OUTSEGS);
    	TCP_INC_STATS_BH(TCP_MIB_OUTRSTS);
    }
    
    /* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
       outside socket context is ugly, certainly. What can I do?
     */
    
    static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
    			    u32 win, u32 ts)
    {
    	struct tcphdr *th = skb->h.th;
    	struct {
    		struct tcphdr th;
    		u32 tsopt[3];
    	} rep;
    	struct ip_reply_arg arg;
    
    	memset(&rep.th, 0, sizeof(struct tcphdr));
    	memset(&arg, 0, sizeof arg);
    
    	arg.iov[0].iov_base = (unsigned char *)&rep;
    	arg.iov[0].iov_len  = sizeof(rep.th);
    	if (ts) {
    		rep.tsopt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
    				     (TCPOPT_TIMESTAMP << 8) |
    				     TCPOLEN_TIMESTAMP);
    		rep.tsopt[1] = htonl(tcp_time_stamp);
    		rep.tsopt[2] = htonl(ts);
    		arg.iov[0].iov_len = sizeof(rep);
    	}
    
    	/* Swap the send and the receive. */
    	rep.th.dest    = th->source;
    	rep.th.source  = th->dest;
    	rep.th.doff    = arg.iov[0].iov_len / 4;
    	rep.th.seq     = htonl(seq);
    	rep.th.ack_seq = htonl(ack);
    	rep.th.ack     = 1;
    	rep.th.window  = htons(win);
    
    	arg.csum = csum_tcpudp_nofold(skb->nh.iph->daddr,
    				      skb->nh.iph->saddr, /*XXX*/
    				      arg.iov[0].iov_len, IPPROTO_TCP, 0);
    	arg.csumoffset = offsetof(struct tcphdr, check) / 2;
    
    	ip_send_reply(tcp_socket->sk, skb, &arg, arg.iov[0].iov_len);
    
    	TCP_INC_STATS_BH(TCP_MIB_OUTSEGS);
    }
    
    static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
    {
    	struct tcp_tw_bucket *tw = (struct tcp_tw_bucket *)sk;
    
    	tcp_v4_send_ack(skb, tw->tw_snd_nxt, tw->tw_rcv_nxt,
    			tw->tw_rcv_wnd >> tw->tw_rcv_wscale, tw->tw_ts_recent);
    
    	tcp_tw_put(tw);
    }
    
    
    static void tcp_v4_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    {
    
    	tcp_v4_send_ack(skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd,
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    			req->ts_recent);
    }
    
    static struct dst_entry* tcp_v4_route_req(struct sock *sk,
    
    					  struct request_sock *req)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    {
    	struct rtable *rt;
    
    	const struct inet_request_sock *ireq = inet_rsk(req);
    	struct ip_options *opt = inet_rsk(req)->opt;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	struct flowi fl = { .oif = sk->sk_bound_dev_if,
    			    .nl_u = { .ip4_u =
    				      { .daddr = ((opt && opt->srr) ?
    						  opt->faddr :
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    					.tos = RT_CONN_FLAGS(sk) } },
    			    .proto = IPPROTO_TCP,
    			    .uli_u = { .ports =
    				       { .sport = inet_sk(sk)->sport,
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    	if (ip_route_output_flow(&rt, &fl, sk, 0)) {
    		IP_INC_STATS_BH(IPSTATS_MIB_OUTNOROUTES);
    		return NULL;
    	}
    	if (opt && opt->is_strictroute && rt->rt_dst != rt->rt_gateway) {
    		ip_rt_put(rt);
    		IP_INC_STATS_BH(IPSTATS_MIB_OUTNOROUTES);
    		return NULL;
    	}
    	return &rt->u.dst;
    }
    
    /*
     *	Send a SYN-ACK after having received an ACK.
    
     *	This still operates on a request_sock only, not on a big
    
    Linus Torvalds's avatar
    Linus Torvalds committed
     *	socket.
     */
    
    static int tcp_v4_send_synack(struct sock *sk, struct request_sock *req,
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    			      struct dst_entry *dst)
    {
    
    	const struct inet_request_sock *ireq = inet_rsk(req);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	int err = -1;
    	struct sk_buff * skb;
    
    	/* First, grab a route. */
    	if (!dst && (dst = tcp_v4_route_req(sk, req)) == NULL)
    		goto out;
    
    	skb = tcp_make_synack(sk, dst, req);
    
    	if (skb) {
    		struct tcphdr *th = skb->h.th;
    
    		th->check = tcp_v4_check(th, skb->len,
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    					 csum_partial((char *)th, skb->len,
    						      skb->csum));
    
    
    		err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr,
    					    ireq->rmt_addr,
    					    ireq->opt);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    		if (err == NET_XMIT_CN)
    			err = 0;
    	}
    
    out:
    	dst_release(dst);
    	return err;
    }
    
    /*
    
     *	IPv4 request_sock destructor.
    
    Linus Torvalds's avatar
    Linus Torvalds committed
     */
    
    static void tcp_v4_reqsk_destructor(struct request_sock *req)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    {
    
    	if (inet_rsk(req)->opt)
    		kfree(inet_rsk(req)->opt);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    }
    
    static inline void syn_flood_warning(struct sk_buff *skb)
    {
    	static unsigned long warntime;
    
    	if (time_after(jiffies, (warntime + HZ * 60))) {
    		warntime = jiffies;
    		printk(KERN_INFO
    		       "possible SYN flooding on port %d. Sending cookies.\n",
    		       ntohs(skb->h.th->dest));
    	}
    }
    
    /*
    
     * Save and compile IPv4 options into the request_sock if needed.
    
    Linus Torvalds's avatar
    Linus Torvalds committed
     */
    static inline struct ip_options *tcp_v4_save_options(struct sock *sk,
    						     struct sk_buff *skb)
    {
    	struct ip_options *opt = &(IPCB(skb)->opt);
    	struct ip_options *dopt = NULL;
    
    	if (opt && opt->optlen) {
    		int opt_size = optlength(opt);
    		dopt = kmalloc(opt_size, GFP_ATOMIC);
    		if (dopt) {
    			if (ip_options_echo(dopt, skb)) {
    				kfree(dopt);
    				dopt = NULL;
    			}
    		}
    	}
    	return dopt;
    }
    
    
    struct request_sock_ops tcp_request_sock_ops = {
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	.family		=	PF_INET,
    
    	.obj_size	=	sizeof(struct tcp_request_sock),
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	.rtx_syn_ack	=	tcp_v4_send_synack,
    
    	.send_ack	=	tcp_v4_reqsk_send_ack,
    	.destructor	=	tcp_v4_reqsk_destructor,
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	.send_reset	=	tcp_v4_send_reset,
    };
    
    int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
    {
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	struct tcp_options_received tmp_opt;
    
    	struct request_sock *req;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	__u32 saddr = skb->nh.iph->saddr;
    	__u32 daddr = skb->nh.iph->daddr;
    	__u32 isn = TCP_SKB_CB(skb)->when;
    	struct dst_entry *dst = NULL;
    #ifdef CONFIG_SYN_COOKIES
    	int want_cookie = 0;
    #else
    #define want_cookie 0 /* Argh, why doesn't gcc optimize this :( */
    #endif
    
    	/* Never answer to SYNs send to broadcast or multicast */
    	if (((struct rtable *)skb->dst)->rt_flags &
    	    (RTCF_BROADCAST | RTCF_MULTICAST))
    		goto drop;
    
    	/* TW buckets are converted to open requests without
    	 * limitations, they conserve resources and peer is
    	 * evidently real one.
    	 */
    	if (tcp_synq_is_full(sk) && !isn) {
    #ifdef CONFIG_SYN_COOKIES
    		if (sysctl_tcp_syncookies) {
    			want_cookie = 1;
    		} else
    #endif
    		goto drop;
    	}
    
    	/* Accept backlog is full. If we have already queued enough
    	 * of warm entries in syn queue, drop request. It is better than
    	 * clogging syn queue with openreqs with exponentially increasing
    	 * timeout.
    	 */
    	if (sk_acceptq_is_full(sk) && tcp_synq_young(sk) > 1)
    		goto drop;
    
    
    	req = reqsk_alloc(&tcp_request_sock_ops);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	if (!req)
    		goto drop;
    
    	tcp_clear_options(&tmp_opt);
    	tmp_opt.mss_clamp = 536;
    	tmp_opt.user_mss  = tcp_sk(sk)->rx_opt.user_mss;
    
    	tcp_parse_options(skb, &tmp_opt, 0);
    
    	if (want_cookie) {
    		tcp_clear_options(&tmp_opt);
    		tmp_opt.saw_tstamp = 0;
    	}
    
    	if (tmp_opt.saw_tstamp && !tmp_opt.rcv_tsval) {
    		/* Some OSes (unknown ones, but I see them on web server, which
    		 * contains information interesting only for windows'
    		 * users) do not send their stamp in SYN. It is easy case.
    		 * We simply do not advertise TS support.
    		 */
    		tmp_opt.saw_tstamp = 0;
    		tmp_opt.tstamp_ok  = 0;
    	}
    	tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
    
    	tcp_openreq_init(req, &tmp_opt, skb);
    
    
    	ireq = inet_rsk(req);
    	ireq->loc_addr = daddr;
    	ireq->rmt_addr = saddr;
    	ireq->opt = tcp_v4_save_options(sk, skb);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	if (!want_cookie)
    		TCP_ECN_create_request(req, skb->h.th);
    
    	if (want_cookie) {
    #ifdef CONFIG_SYN_COOKIES
    		syn_flood_warning(skb);
    #endif
    		isn = cookie_v4_init_sequence(sk, skb, &req->mss);
    	} else if (!isn) {
    		struct inet_peer *peer = NULL;
    
    		/* VJ's idea. We save last timestamp seen
    		 * from the destination in peer table, when entering
    		 * state TIME-WAIT, and check against it before
    		 * accepting new connection request.
    		 *
    		 * If "isn" is not zero, this request hit alive
    		 * timewait bucket, so that all the necessary checks
    		 * are made in the function processing timewait state.
    		 */
    		if (tmp_opt.saw_tstamp &&
    		    sysctl_tcp_tw_recycle &&
    		    (dst = tcp_v4_route_req(sk, req)) != NULL &&
    		    (peer = rt_get_peer((struct rtable *)dst)) != NULL &&
    		    peer->v4daddr == saddr) {
    			if (xtime.tv_sec < peer->tcp_ts_stamp + TCP_PAWS_MSL &&
    			    (s32)(peer->tcp_ts - req->ts_recent) >
    							TCP_PAWS_WINDOW) {
    				NET_INC_STATS_BH(LINUX_MIB_PAWSPASSIVEREJECTED);
    				dst_release(dst);
    				goto drop_and_free;
    			}
    		}
    		/* Kill the following clause, if you dislike this way. */
    		else if (!sysctl_tcp_syncookies &&
    			 (sysctl_max_syn_backlog - tcp_synq_len(sk) <
    			  (sysctl_max_syn_backlog >> 2)) &&
    			 (!peer || !peer->tcp_ts_stamp) &&
    			 (!dst || !dst_metric(dst, RTAX_RTT))) {
    			/* Without syncookies last quarter of
    			 * backlog is filled with destinations,
    			 * proven to be alive.
    			 * It means that we continue to communicate
    			 * to destinations, already remembered
    			 * to the moment of synflood.
    			 */
    
    Heikki Orsila's avatar
    Heikki Orsila committed
    			LIMIT_NETDEBUG(printk(KERN_DEBUG "TCP: drop open "
    					      "request from %u.%u."
    					      "%u.%u/%u\n",
    					      NIPQUAD(saddr),
    					      ntohs(skb->h.th->source)));
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    			dst_release(dst);
    			goto drop_and_free;
    		}
    
    		isn = tcp_v4_init_sequence(sk, skb);
    	}
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    	if (tcp_v4_send_synack(sk, req, dst))
    		goto drop_and_free;
    
    	if (want_cookie) {
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	} else {
    		tcp_v4_synq_add(sk, req);
    	}
    	return 0;
    
    drop_and_free:
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    drop:
    	TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS);
    	return 0;
    }
    
    
    /*
     * The three way handshake has completed - we got a valid synack -
     * now create the new socket.
     */
    struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
    
    				  struct request_sock *req,
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    				  struct dst_entry *dst)
    {
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	struct inet_sock *newinet;
    	struct tcp_sock *newtp;
    	struct sock *newsk;
    
    	if (sk_acceptq_is_full(sk))
    		goto exit_overflow;
    
    	if (!dst && (dst = tcp_v4_route_req(sk, req)) == NULL)
    		goto exit;
    
    	newsk = tcp_create_openreq_child(sk, req, skb);
    	if (!newsk)
    		goto exit;
    
    
    	sk_setup_caps(newsk, dst);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    	newtp		      = tcp_sk(newsk);
    	newinet		      = inet_sk(newsk);
    
    	ireq		      = inet_rsk(req);
    	newinet->daddr	      = ireq->rmt_addr;
    	newinet->rcv_saddr    = ireq->loc_addr;
    	newinet->saddr	      = ireq->loc_addr;
    	newinet->opt	      = ireq->opt;
    	ireq->opt	      = NULL;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	newinet->mc_index     = tcp_v4_iif(skb);
    	newinet->mc_ttl	      = skb->nh.iph->ttl;
    	newtp->ext_header_len = 0;
    	if (newinet->opt)
    		newtp->ext_header_len = newinet->opt->optlen;
    	newinet->id = newtp->write_seq ^ jiffies;
    
    	tcp_sync_mss(newsk, dst_mtu(dst));
    	newtp->advmss = dst_metric(dst, RTAX_ADVMSS);
    	tcp_initialize_rcv_mss(newsk);
    
    
    	__inet_hash(&tcp_hashinfo, newsk, 0);
    
    	__inet_inherit_port(&tcp_hashinfo, sk, newsk);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    	return newsk;
    
    exit_overflow:
    	NET_INC_STATS_BH(LINUX_MIB_LISTENOVERFLOWS);
    exit:
    	NET_INC_STATS_BH(LINUX_MIB_LISTENDROPS);
    	dst_release(dst);
    	return NULL;
    }
    
    static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
    {
    	struct tcphdr *th = skb->h.th;
    	struct iphdr *iph = skb->nh.iph;
    	struct tcp_sock *tp = tcp_sk(sk);
    	struct sock *nsk;
    
    	struct request_sock **prev;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	/* Find possible connection requests. */
    
    	struct request_sock *req = tcp_v4_search_req(tp, &prev, th->source,
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    						     iph->saddr, iph->daddr);
    	if (req)
    		return tcp_check_req(sk, skb, req, prev);
    
    	nsk = __tcp_v4_lookup_established(skb->nh.iph->saddr,
    					  th->source,
    					  skb->nh.iph->daddr,
    					  ntohs(th->dest),
    					  tcp_v4_iif(skb));
    
    	if (nsk) {
    		if (nsk->sk_state != TCP_TIME_WAIT) {
    			bh_lock_sock(nsk);
    			return nsk;
    		}
    		tcp_tw_put((struct tcp_tw_bucket *)nsk);
    		return NULL;
    	}
    
    #ifdef CONFIG_SYN_COOKIES
    	if (!th->rst && !th->syn && th->ack)
    		sk = cookie_v4_check(sk, skb, &(IPCB(skb)->opt));
    #endif
    	return sk;
    }
    
    static int tcp_v4_checksum_init(struct sk_buff *skb)
    {
    	if (skb->ip_summed == CHECKSUM_HW) {
    		skb->ip_summed = CHECKSUM_UNNECESSARY;
    		if (!tcp_v4_check(skb->h.th, skb->len, skb->nh.iph->saddr,
    				  skb->nh.iph->daddr, skb->csum))
    			return 0;
    
    
    Heikki Orsila's avatar
    Heikki Orsila committed
    		LIMIT_NETDEBUG(printk(KERN_DEBUG "hw tcp v4 csum failed\n"));
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    		skb->ip_summed = CHECKSUM_NONE;
    	}
    	if (skb->len <= 76) {
    		if (tcp_v4_check(skb->h.th, skb->len, skb->nh.iph->saddr,
    				 skb->nh.iph->daddr,
    				 skb_checksum(skb, 0, skb->len, 0)))
    			return -1;
    		skb->ip_summed = CHECKSUM_UNNECESSARY;
    	} else {
    		skb->csum = ~tcp_v4_check(skb->h.th, skb->len,
    					  skb->nh.iph->saddr,
    					  skb->nh.iph->daddr, 0);
    	}
    	return 0;
    }
    
    
    /* The socket must have it's spinlock held when we get
     * here.
     *
     * We have a potential double-lock case here, so even when
     * doing backlog processing we use the BH locking scheme.
     * This is because we cannot sleep with the original spinlock
     * held.
     */
    int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
    {
    	if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
    		TCP_CHECK_TIMER(sk);
    		if (tcp_rcv_established(sk, skb, skb->h.th, skb->len))
    			goto reset;
    		TCP_CHECK_TIMER(sk);
    		return 0;
    	}
    
    	if (skb->len < (skb->h.th->doff << 2) || tcp_checksum_complete(skb))
    		goto csum_err;
    
    	if (sk->sk_state == TCP_LISTEN) {
    		struct sock *nsk = tcp_v4_hnd_req(sk, skb);
    		if (!nsk)
    			goto discard;
    
    		if (nsk != sk) {
    			if (tcp_child_process(sk, nsk, skb))
    				goto reset;
    			return 0;
    		}
    	}
    
    	TCP_CHECK_TIMER(sk);
    	if (tcp_rcv_state_process(sk, skb, skb->h.th, skb->len))
    		goto reset;
    	TCP_CHECK_TIMER(sk);
    	return 0;
    
    reset:
    	tcp_v4_send_reset(skb);
    discard:
    	kfree_skb(skb);
    	/* Be careful here. If this function gets more complicated and
    	 * gcc suffers from register pressure on the x86, sk (in %ebx)
    	 * might be destroyed here. This current version compiles correctly,
    	 * but you have been warned.
    	 */
    	return 0;
    
    csum_err:
    	TCP_INC_STATS_BH(TCP_MIB_INERRS);
    	goto discard;
    }
    
    /*
     *	From tcp_input.c
     */
    
    int tcp_v4_rcv(struct sk_buff *skb)
    {
    	struct tcphdr *th;
    	struct sock *sk;
    	int ret;
    
    	if (skb->pkt_type != PACKET_HOST)
    		goto discard_it;
    
    	/* Count it even if it's bad */
    	TCP_INC_STATS_BH(TCP_MIB_INSEGS);
    
    	if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
    		goto discard_it;
    
    	th = skb->h.th;
    
    	if (th->doff < sizeof(struct tcphdr) / 4)
    		goto bad_packet;
    	if (!pskb_may_pull(skb, th->doff * 4))
    		goto discard_it;
    
    	/* An explanation is required here, I think.
    	 * Packet length and doff are validated by header prediction,
    	 * provided case of th->doff==0 is elimineted.
    	 * So, we defer the checks. */
    	if ((skb->ip_summed != CHECKSUM_UNNECESSARY &&
    	     tcp_v4_checksum_init(skb) < 0))
    		goto bad_packet;
    
    	th = skb->h.th;
    	TCP_SKB_CB(skb)->seq = ntohl(th->seq);
    	TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
    				    skb->len - th->doff * 4);
    	TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
    	TCP_SKB_CB(skb)->when	 = 0;
    	TCP_SKB_CB(skb)->flags	 = skb->nh.iph->tos;
    	TCP_SKB_CB(skb)->sacked	 = 0;
    
    	sk = __tcp_v4_lookup(skb->nh.iph->saddr, th->source,
    			     skb->nh.iph->daddr, ntohs(th->dest),
    			     tcp_v4_iif(skb));
    
    	if (!sk)
    		goto no_tcp_socket;
    
    process:
    	if (sk->sk_state == TCP_TIME_WAIT)
    		goto do_time_wait;
    
    	if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
    		goto discard_and_relse;
    
    	if (sk_filter(sk, skb, 0))
    		goto discard_and_relse;
    
    	skb->dev = NULL;
    
    	bh_lock_sock(sk);
    	ret = 0;
    	if (!sock_owned_by_user(sk)) {
    		if (!tcp_prequeue(sk, skb))
    			ret = tcp_v4_do_rcv(sk, skb);
    	} else
    		sk_add_backlog(sk, skb);
    	bh_unlock_sock(sk);
    
    	sock_put(sk);
    
    	return ret;
    
    no_tcp_socket:
    	if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
    		goto discard_it;
    
    	if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
    bad_packet:
    		TCP_INC_STATS_BH(TCP_MIB_INERRS);
    	} else {
    		tcp_v4_send_reset(skb);
    	}
    
    discard_it:
    	/* Discard frame. */
    	kfree_skb(skb);
      	return 0;
    
    discard_and_relse:
    	sock_put(sk);
    	goto discard_it;
    
    do_time_wait:
    	if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
    		tcp_tw_put((struct tcp_tw_bucket *) sk);
    		goto discard_it;
    	}
    
    	if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
    		TCP_INC_STATS_BH(TCP_MIB_INERRS);
    		tcp_tw_put((struct tcp_tw_bucket *) sk);
    		goto discard_it;
    	}
    	switch (tcp_timewait_state_process((struct tcp_tw_bucket *)sk,
    					   skb, th, skb->len)) {
    	case TCP_TW_SYN: {
    		struct sock *sk2 = tcp_v4_lookup_listener(skb->nh.iph->daddr,
    							  ntohs(th->dest),
    							  tcp_v4_iif(skb));
    		if (sk2) {
    			tcp_tw_deschedule((struct tcp_tw_bucket *)sk);
    			tcp_tw_put((struct tcp_tw_bucket *)sk);
    			sk = sk2;
    			goto process;
    		}
    		/* Fall through to ACK */
    	}
    	case TCP_TW_ACK:
    		tcp_v4_timewait_ack(sk, skb);
    		break;
    	case TCP_TW_RST:
    		goto no_tcp_socket;
    	case TCP_TW_SUCCESS:;
    	}
    	goto discard_it;
    }
    
    static void v4_addr2sockaddr(struct sock *sk, struct sockaddr * uaddr)
    {
    	struct sockaddr_in *sin = (struct sockaddr_in *) uaddr;
    	struct inet_sock *inet = inet_sk(sk);
    
    	sin->sin_family		= AF_INET;
    	sin->sin_addr.s_addr	= inet->daddr;
    	sin->sin_port		= inet->dport;
    }
    
    /* VJ's idea. Save last timestamp seen from this destination
     * and hold it at least for normal timewait interval to use for duplicate
     * segment detection in subsequent connections, before they enter synchronized
     * state.
     */
    
    int tcp_v4_remember_stamp(struct sock *sk)
    {
    	struct inet_sock *inet = inet_sk(sk);
    	struct tcp_sock *tp = tcp_sk(sk);
    	struct rtable *rt = (struct rtable *)__sk_dst_get(sk);
    	struct inet_peer *peer = NULL;
    	int release_it = 0;
    
    	if (!rt || rt->rt_dst != inet->daddr) {
    		peer = inet_getpeer(inet->daddr, 1);
    		release_it = 1;
    	} else {
    		if (!rt->peer)
    			rt_bind_peer(rt, 1);
    		peer = rt->peer;
    	}
    
    	if (peer) {
    		if ((s32)(peer->tcp_ts - tp->rx_opt.ts_recent) <= 0 ||
    		    (peer->tcp_ts_stamp + TCP_PAWS_MSL < xtime.tv_sec &&
    		     peer->tcp_ts_stamp <= tp->rx_opt.ts_recent_stamp)) {
    			peer->tcp_ts_stamp = tp->rx_opt.ts_recent_stamp;
    			peer->tcp_ts = tp->rx_opt.ts_recent;
    		}
    		if (release_it)
    			inet_putpeer(peer);
    		return 1;
    	}
    
    	return 0;
    }
    
    int tcp_v4_tw_remember_stamp(struct tcp_tw_bucket *tw)
    {
    	struct inet_peer *peer = NULL;
    
    	peer = inet_getpeer(tw->tw_daddr, 1);
    
    	if (peer) {
    		if ((s32)(peer->tcp_ts - tw->tw_ts_recent) <= 0 ||
    		    (peer->tcp_ts_stamp + TCP_PAWS_MSL < xtime.tv_sec &&
    		     peer->tcp_ts_stamp <= tw->tw_ts_recent_stamp)) {
    			peer->tcp_ts_stamp = tw->tw_ts_recent_stamp;
    			peer->tcp_ts = tw->tw_ts_recent;
    		}
    		inet_putpeer(peer);
    		return 1;
    	}
    
    	return 0;
    }
    
    struct tcp_func ipv4_specific = {
    	.queue_xmit	=	ip_queue_xmit,
    	.send_check	=	tcp_v4_send_check,
    
    	.rebuild_header	=	inet_sk_rebuild_header,
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	.conn_request	=	tcp_v4_conn_request,
    	.syn_recv_sock	=	tcp_v4_syn_recv_sock,
    	.remember_stamp	=	tcp_v4_remember_stamp,
    	.net_header_len	=	sizeof(struct iphdr),
    	.setsockopt	=	ip_setsockopt,
    	.getsockopt	=	ip_getsockopt,
    	.addr2sockaddr	=	v4_addr2sockaddr,
    	.sockaddr_len	=	sizeof(struct sockaddr_in),
    };
    
    /* NOTE: A lot of things set to zero explicitly by call to
     *       sk_alloc() so need not be done here.
     */
    static int tcp_v4_init_sock(struct sock *sk)
    {
    	struct tcp_sock *tp = tcp_sk(sk);
    
    	skb_queue_head_init(&tp->out_of_order_queue);
    	tcp_init_xmit_timers(sk);
    	tcp_prequeue_init(tp);
    
    	tp->rto  = TCP_TIMEOUT_INIT;
    	tp->mdev = TCP_TIMEOUT_INIT;
    
    	/* So many TCP implementations out there (incorrectly) count the
    	 * initial SYN frame in their delayed-ACK and congestion control
    	 * algorithms that we must have the following bandaid to talk
    	 * efficiently to them.  -DaveM
    	 */
    	tp->snd_cwnd = 2;
    
    	/* See draft-stevens-tcpca-spec-01 for discussion of the
    	 * initialization of these values.
    	 */
    	tp->snd_ssthresh = 0x7fffffff;	/* Infinity */
    	tp->snd_cwnd_clamp = ~0;
    
    	tp->mss_cache = 536;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    	tp->reordering = sysctl_tcp_reordering;
    
    	tp->ca_ops = &tcp_init_congestion_ops;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    	sk->sk_state = TCP_CLOSE;
    
    	sk->sk_write_space = sk_stream_write_space;
    	sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
    
    	tp->af_specific = &ipv4_specific;
    
    	sk->sk_sndbuf = sysctl_tcp_wmem[1];
    	sk->sk_rcvbuf = sysctl_tcp_rmem[1];
    
    	atomic_inc(&tcp_sockets_allocated);
    
    	return 0;
    }
    
    int tcp_v4_destroy_sock(struct sock *sk)
    {
    	struct tcp_sock *tp = tcp_sk(sk);
    
    	tcp_clear_xmit_timers(sk);
    
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	/* Cleanup up the write buffer. */
      	sk_stream_writequeue_purge(sk);
    
    	/* Cleans up our, hopefully empty, out_of_order_queue. */
      	__skb_queue_purge(&tp->out_of_order_queue);
    
    	/* Clean prequeue, it must be empty really */
    	__skb_queue_purge(&tp->ucopy.prequeue);
    
    	/* Clean up a referenced TCP bind bucket. */
    
    	if (inet_sk(sk)->bind_hash)
    
    		inet_put_port(&tcp_hashinfo, sk);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    	/*
    	 * If sendmsg cached page exists, toss it.
    	 */
    	if (sk->sk_sndmsg_page) {
    		__free_page(sk->sk_sndmsg_page);
    		sk->sk_sndmsg_page = NULL;
    	}
    
    	atomic_dec(&tcp_sockets_allocated);
    
    	return 0;
    }
    
    EXPORT_SYMBOL(tcp_v4_destroy_sock);
    
    #ifdef CONFIG_PROC_FS
    /* Proc filesystem TCP sock list dumping. */
    
    static inline struct tcp_tw_bucket *tw_head(struct hlist_head *head)
    {
    	return hlist_empty(head) ? NULL :
    		list_entry(head->first, struct tcp_tw_bucket, tw_node);
    }
    
    static inline struct tcp_tw_bucket *tw_next(struct tcp_tw_bucket *tw)
    {
    	return tw->tw_node.next ?
    		hlist_entry(tw->tw_node.next, typeof(*tw), tw_node) : NULL;
    }
    
    static void *listening_get_next(struct seq_file *seq, void *cur)
    {
    	struct tcp_sock *tp;
    	struct hlist_node *node;
    	struct sock *sk = cur;
    	struct tcp_iter_state* st = seq->private;
    
    	if (!sk) {
    		st->bucket = 0;
    
    		sk = sk_head(&tcp_hashinfo.listening_hash[0]);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    		goto get_sk;
    	}
    
    	++st->num;
    
    	if (st->state == TCP_SEQ_STATE_OPENREQ) {
    
    		struct request_sock *req = cur;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    	       	tp = tcp_sk(st->syn_wait_sk);
    		req = req->dl_next;
    		while (1) {
    			while (req) {
    
    				if (req->rsk_ops->family == st->family) {
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    					cur = req;
    					goto out;
    				}
    				req = req->dl_next;
    			}
    			if (++st->sbucket >= TCP_SYNQ_HSIZE)
    				break;
    get_req:
    
    			req = tp->accept_queue.listen_opt->syn_table[st->sbucket];
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    		}
    		sk	  = sk_next(st->syn_wait_sk);
    		st->state = TCP_SEQ_STATE_LISTENING;
    
    		read_unlock_bh(&tp->accept_queue.syn_wait_lock);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	} else {
    	       	tp = tcp_sk(sk);
    
    		read_lock_bh(&tp->accept_queue.syn_wait_lock);
    		if (reqsk_queue_len(&tp->accept_queue))
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    			goto start_req;
    
    		read_unlock_bh(&tp->accept_queue.syn_wait_lock);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    		sk = sk_next(sk);
    	}
    get_sk:
    	sk_for_each_from(sk, node) {
    		if (sk->sk_family == st->family) {
    			cur = sk;
    			goto out;
    		}
    	       	tp = tcp_sk(sk);
    
    		read_lock_bh(&tp->accept_queue.syn_wait_lock);
    		if (reqsk_queue_len(&tp->accept_queue)) {
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    start_req:
    			st->uid		= sock_i_uid(sk);
    			st->syn_wait_sk = sk;
    			st->state	= TCP_SEQ_STATE_OPENREQ;
    			st->sbucket	= 0;
    			goto get_req;
    		}
    
    		read_unlock_bh(&tp->accept_queue.syn_wait_lock);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	}
    
    	if (++st->bucket < INET_LHTABLE_SIZE) {
    
    		sk = sk_head(&tcp_hashinfo.listening_hash[st->bucket]);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    		goto get_sk;
    	}
    	cur = NULL;
    out:
    	return cur;
    }
    
    static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
    {
    	void *rc = listening_get_next(seq, NULL);
    
    	while (rc && *pos) {
    		rc = listening_get_next(seq, rc);
    		--*pos;
    	}
    	return rc;
    }
    
    static void *established_get_first(struct seq_file *seq)
    {
    	struct tcp_iter_state* st = seq->private;
    	void *rc = NULL;
    
    
    	for (st->bucket = 0; st->bucket < tcp_hashinfo.ehash_size; ++st->bucket) {
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    		struct sock *sk;
    		struct hlist_node *node;
    		struct tcp_tw_bucket *tw;
    
    		/* We can reschedule _before_ having picked the target: */
    		cond_resched_softirq();
    
    
    		read_lock(&tcp_hashinfo.ehash[st->bucket].lock);
    		sk_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    			if (sk->sk_family != st->family) {
    				continue;
    			}
    			rc = sk;
    			goto out;
    		}
    		st->state = TCP_SEQ_STATE_TIME_WAIT;
    		tw_for_each(tw, node,
    
    			    &tcp_hashinfo.ehash[st->bucket + tcp_hashinfo.ehash_size].chain) {
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    			if (tw->tw_family != st->family) {
    				continue;
    			}
    			rc = tw;
    			goto out;
    		}
    
    		read_unlock(&tcp_hashinfo.ehash[st->bucket].lock);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    		st->state = TCP_SEQ_STATE_ESTABLISHED;
    	}
    out:
    	return rc;
    }
    
    static void *established_get_next(struct seq_file *seq, void *cur)
    {
    	struct sock *sk = cur;
    	struct tcp_tw_bucket *tw;
    	struct hlist_node *node;
    	struct tcp_iter_state* st = seq->private;
    
    	++st->num;
    
    	if (st->state == TCP_SEQ_STATE_TIME_WAIT) {
    		tw = cur;
    		tw = tw_next(tw);
    get_tw: