Newer
Older
if (copy_from_user(&cmd, optval, sizeof(cmd)))
return -EFAULT;
if (sin->sin_family != AF_INET)
return -EINVAL;
if (!cmd.tcpm_key || !cmd.tcpm_keylen) {
if (!tcp_sk(sk)->md5sig_info)
return -ENOENT;
return tcp_v4_md5_do_del(sk, sin->sin_addr.s_addr);
}
if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
return -EINVAL;
if (!tcp_sk(sk)->md5sig_info) {
struct tcp_sock *tp = tcp_sk(sk);
struct tcp_md5sig_info *p;
p = kzalloc(sizeof(*p), sk->sk_allocation);
if (!p)
return -EINVAL;
tp->md5sig_info = p;
newkey = kmemdup(cmd.tcpm_key, cmd.tcpm_keylen, sk->sk_allocation);
if (!newkey)
return -ENOMEM;
return tcp_v4_md5_do_add(sk, sin->sin_addr.s_addr,
newkey, cmd.tcpm_keylen);
}
static int tcp_v4_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
__be32 daddr, __be32 saddr, int nbytes)
{
struct tcp4_pseudohdr *bp;
bp = &hp->md5_blk.ip4;
/*
* 1. the TCP pseudo-header (in the order: source IP address,
* destination IP address, zero-padded protocol number, and
* segment length)
*/
bp->saddr = saddr;
bp->daddr = daddr;
bp->pad = 0;
bp->protocol = IPPROTO_TCP;
bp->len = cpu_to_be16(nbytes);
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
sg_init_one(&sg, bp, sizeof(*bp));
return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
}
static int tcp_v4_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
__be32 daddr, __be32 saddr, struct tcphdr *th)
{
struct tcp_md5sig_pool *hp;
struct hash_desc *desc;
hp = tcp_get_md5sig_pool();
if (!hp)
goto clear_hash_noput;
desc = &hp->md5_desc;
if (crypto_hash_init(desc))
goto clear_hash;
if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
goto clear_hash;
if (tcp_md5_hash_header(hp, th))
goto clear_hash;
if (tcp_md5_hash_key(hp, key))
goto clear_hash;
if (crypto_hash_final(desc, md5_hash))
goto clear_hash;
tcp_put_md5sig_pool();
return 0;
clear_hash:
tcp_put_md5sig_pool();
clear_hash_noput:
memset(md5_hash, 0, 16);
int tcp_v4_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
struct sock *sk, struct request_sock *req,
struct sk_buff *skb)
struct tcp_md5sig_pool *hp;
struct hash_desc *desc;
struct tcphdr *th = tcp_hdr(skb);
__be32 saddr, daddr;
if (sk) {
saddr = inet_sk(sk)->inet_saddr;
daddr = inet_sk(sk)->inet_daddr;
} else if (req) {
saddr = inet_rsk(req)->loc_addr;
daddr = inet_rsk(req)->rmt_addr;
const struct iphdr *iph = ip_hdr(skb);
saddr = iph->saddr;
daddr = iph->daddr;
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
hp = tcp_get_md5sig_pool();
if (!hp)
goto clear_hash_noput;
desc = &hp->md5_desc;
if (crypto_hash_init(desc))
goto clear_hash;
if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
goto clear_hash;
if (tcp_md5_hash_header(hp, th))
goto clear_hash;
if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
goto clear_hash;
if (tcp_md5_hash_key(hp, key))
goto clear_hash;
if (crypto_hash_final(desc, md5_hash))
goto clear_hash;
tcp_put_md5sig_pool();
return 0;
clear_hash:
tcp_put_md5sig_pool();
clear_hash_noput:
memset(md5_hash, 0, 16);
return 1;
EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
static int tcp_v4_inbound_md5_hash(struct sock *sk, struct sk_buff *skb)
{
/*
* This gets called for each TCP segment that arrives
* so we want to be efficient.
* We have 3 drop cases:
* o No MD5 hash and one expected.
* o MD5 hash and we're not expecting one.
* o MD5 hash and its wrong.
*/
__u8 *hash_location = NULL;
struct tcp_md5sig_key *hash_expected;
const struct iphdr *iph = ip_hdr(skb);
struct tcphdr *th = tcp_hdr(skb);
int genhash;
unsigned char newhash[16];
hash_expected = tcp_v4_md5_do_lookup(sk, iph->saddr);
hash_location = tcp_parse_md5sig_option(th);
/* We've parsed the options - do we have a hash? */
if (!hash_expected && !hash_location)
return 0;
if (hash_expected && !hash_location) {
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
return 1;
}
if (!hash_expected && hash_location) {
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
return 1;
}
/* Okay, so this is hash_expected and hash_location -
* so we need to calculate the checksum.
*/
genhash = tcp_v4_md5_hash_skb(newhash,
hash_expected,
NULL, NULL, skb);
if (genhash || memcmp(hash_location, newhash, 16) != 0) {
if (net_ratelimit()) {
printk(KERN_INFO "MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n",
&iph->saddr, ntohs(th->source),
&iph->daddr, ntohs(th->dest),
genhash ? " tcp_v4_calc_md5_hash failed" : "");
}
return 1;
}
return 0;
}
#endif
struct request_sock_ops tcp_request_sock_ops __read_mostly = {

Arnaldo Carvalho de Melo
committed
.obj_size = sizeof(struct tcp_request_sock),
.rtx_syn_ack = tcp_v4_rtx_synack,
.send_ack = tcp_v4_reqsk_send_ack,
.destructor = tcp_v4_reqsk_destructor,
.syn_ack_timeout = tcp_syn_ack_timeout,
#ifdef CONFIG_TCP_MD5SIG
static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
.md5_lookup = tcp_v4_reqsk_md5_lookup,
.calc_md5_hash = tcp_v4_md5_hash_skb,
int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
{
struct tcp_extend_values tmp_ext;
u8 *hash_location;
struct request_sock *req;
struct inet_request_sock *ireq;
struct tcp_sock *tp = tcp_sk(sk);
struct dst_entry *dst = NULL;
__be32 saddr = ip_hdr(skb)->saddr;
__be32 daddr = ip_hdr(skb)->daddr;
__u32 isn = TCP_SKB_CB(skb)->when;
#ifdef CONFIG_SYN_COOKIES
int want_cookie = 0;
#else
#define want_cookie 0 /* Argh, why doesn't gcc optimize this :( */
#endif
/* Never answer to SYNs send to broadcast or multicast */
if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
goto drop;
/* TW buckets are converted to open requests without
* limitations, they conserve resources and peer is
* evidently real one.
*/
if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
if (net_ratelimit())
syn_flood_warning(skb);
#ifdef CONFIG_SYN_COOKIES
if (sysctl_tcp_syncookies) {
want_cookie = 1;
} else
#endif
goto drop;
}
/* Accept backlog is full. If we have already queued enough
* of warm entries in syn queue, drop request. It is better than
* clogging syn queue with openreqs with exponentially increasing
* timeout.
*/
if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)

Arnaldo Carvalho de Melo
committed
req = inet_reqsk_alloc(&tcp_request_sock_ops);
#ifdef CONFIG_TCP_MD5SIG
tcp_rsk(req)->af_specific = &tcp_request_sock_ipv4_ops;
#endif
tmp_opt.mss_clamp = TCP_MSS_DEFAULT;
tmp_opt.user_mss = tp->rx_opt.user_mss;
tcp_parse_options(skb, &tmp_opt, &hash_location, 0);
if (tmp_opt.cookie_plus > 0 &&
tmp_opt.saw_tstamp &&
!tp->rx_opt.cookie_out_never &&
(sysctl_tcp_cookie_size > 0 ||
(tp->cookie_values != NULL &&
tp->cookie_values->cookie_desired > 0))) {
u8 *c;
u32 *mess = &tmp_ext.cookie_bakery[COOKIE_DIGEST_WORDS];
int l = tmp_opt.cookie_plus - TCPOLEN_COOKIE_BASE;
if (tcp_cookie_generator(&tmp_ext.cookie_bakery[0]) != 0)
goto drop_and_release;
/* Secret recipe starts with IP addresses */
*mess++ ^= (__force u32)daddr;
*mess++ ^= (__force u32)saddr;
/* plus variable length Initiator Cookie */
c = (u8 *)mess;
while (l-- > 0)
*c++ ^= *hash_location++;
#ifdef CONFIG_SYN_COOKIES
want_cookie = 0; /* not our kind of cookie */
#endif
tmp_ext.cookie_out_never = 0; /* false */
tmp_ext.cookie_plus = tmp_opt.cookie_plus;
} else if (!tp->rx_opt.cookie_in_always) {
/* redundant indications, but ensure initialization. */
tmp_ext.cookie_out_never = 1; /* true */
tmp_ext.cookie_plus = 0;
} else {
goto drop_and_release;
}
tmp_ext.cookie_in_always = tp->rx_opt.cookie_in_always;
if (want_cookie && !tmp_opt.saw_tstamp)
tcp_clear_options(&tmp_opt);
tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
tcp_openreq_init(req, &tmp_opt, skb);
ireq = inet_rsk(req);
ireq->loc_addr = daddr;
ireq->rmt_addr = saddr;
ireq->no_srccheck = inet_sk(sk)->transparent;
ireq->opt = tcp_v4_save_options(sk, skb);
if (security_inet_conn_request(sk, skb, req))
goto drop_and_free;
if (!want_cookie || tmp_opt.tstamp_ok)
TCP_ECN_create_request(req, tcp_hdr(skb));
if (want_cookie) {
isn = cookie_v4_init_sequence(sk, skb, &req->mss);
} else if (!isn) {
struct inet_peer *peer = NULL;
/* VJ's idea. We save last timestamp seen
* from the destination in peer table, when entering
* state TIME-WAIT, and check against it before
* accepting new connection request.
*
* If "isn" is not zero, this request hit alive
* timewait bucket, so that all the necessary checks
* are made in the function processing timewait state.
*/
if (tmp_opt.saw_tstamp &&
tcp_death_row.sysctl_tw_recycle &&
(dst = inet_csk_route_req(sk, req)) != NULL &&
(peer = rt_get_peer((struct rtable *)dst)) != NULL &&
peer->daddr.addr.a4 == saddr) {
if ((u32)get_seconds() - peer->tcp_ts_stamp < TCP_PAWS_MSL &&
(s32)(peer->tcp_ts - req->ts_recent) >
TCP_PAWS_WINDOW) {
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED);
goto drop_and_release;
}
}
/* Kill the following clause, if you dislike this way. */
else if (!sysctl_tcp_syncookies &&
(sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
(sysctl_max_syn_backlog >> 2)) &&
(!peer || !peer->tcp_ts_stamp) &&
(!dst || !dst_metric(dst, RTAX_RTT))) {
/* Without syncookies last quarter of
* backlog is filled with destinations,
* proven to be alive.
* It means that we continue to communicate
* to destinations, already remembered
* to the moment of synflood.
*/
LIMIT_NETDEBUG(KERN_DEBUG "TCP: drop open request from %pI4/%u\n",
&saddr, ntohs(tcp_hdr(skb)->source));
goto drop_and_release;
isn = tcp_v4_init_sequence(skb);

Arnaldo Carvalho de Melo
committed
tcp_rsk(req)->snt_isn = isn;
if (tcp_v4_send_synack(sk, dst, req,
(struct request_values *)&tmp_ext) ||
inet_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
drop_and_release:
dst_release(dst);
/*
* The three way handshake has completed - we got a valid synack -
* now create the new socket.
*/
struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
struct request_sock *req,

Arnaldo Carvalho de Melo
committed
struct inet_request_sock *ireq;
struct inet_sock *newinet;
struct tcp_sock *newtp;
struct sock *newsk;
#ifdef CONFIG_TCP_MD5SIG
struct tcp_md5sig_key *key;
#endif
if (sk_acceptq_is_full(sk))
goto exit_overflow;
if (!dst && (dst = inet_csk_route_req(sk, req)) == NULL)
goto exit;
newsk = tcp_create_openreq_child(sk, req, skb);
if (!newsk)

Balazs Scheidler
committed
goto exit_nonewsk;
newsk->sk_gso_type = SKB_GSO_TCPV4;
newtp = tcp_sk(newsk);
newinet = inet_sk(newsk);

Arnaldo Carvalho de Melo
committed
ireq = inet_rsk(req);
newinet->inet_daddr = ireq->rmt_addr;
newinet->inet_rcv_saddr = ireq->loc_addr;
newinet->inet_saddr = ireq->loc_addr;

Arnaldo Carvalho de Melo
committed
newinet->opt = ireq->opt;
ireq->opt = NULL;
newinet->mc_index = inet_iif(skb);
newinet->mc_ttl = ip_hdr(skb)->ttl;
inet_csk(newsk)->icsk_ext_hdr_len = 0;
inet_csk(newsk)->icsk_ext_hdr_len = newinet->opt->optlen;
newinet->inet_id = newtp->write_seq ^ jiffies;
newtp->advmss = dst_metric_advmss(dst);
if (tcp_sk(sk)->rx_opt.user_mss &&
tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
#ifdef CONFIG_TCP_MD5SIG
/* Copy over the MD5 key from the original socket */
key = tcp_v4_md5_do_lookup(sk, newinet->inet_daddr);
if (key != NULL) {
/*
* We're using one, so create a matching key
* on the newsk structure. If we fail to get
* memory, then we end up not copying the key
* across. Shucks.
*/
char *newkey = kmemdup(key->key, key->keylen, GFP_ATOMIC);
if (newkey != NULL)
tcp_v4_md5_do_add(newsk, newinet->inet_daddr,
newkey, key->keylen);

Balazs Scheidler
committed
if (__inet_inherit_port(sk, newsk) < 0) {
sock_put(newsk);
goto exit;
}
__inet_hash_nolisten(newsk, NULL);
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);

Balazs Scheidler
committed
exit_nonewsk:
dst_release(dst);
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
{
struct tcphdr *th = tcp_hdr(skb);
const struct iphdr *iph = ip_hdr(skb);
struct request_sock **prev;
struct request_sock *req = inet_csk_search_req(sk, &prev, th->source,
iph->saddr, iph->daddr);
if (req)
return tcp_check_req(sk, skb, req, prev);
nsk = inet_lookup_established(sock_net(sk), &tcp_hashinfo, iph->saddr,
th->source, iph->daddr, th->dest, inet_iif(skb));
if (nsk) {
if (nsk->sk_state != TCP_TIME_WAIT) {
bh_lock_sock(nsk);
return nsk;
}
inet_twsk_put(inet_twsk(nsk));
return NULL;
}
#ifdef CONFIG_SYN_COOKIES
if (!th->syn)
sk = cookie_v4_check(sk, skb, &(IPCB(skb)->opt));
#endif
return sk;
}
static __sum16 tcp_v4_checksum_init(struct sk_buff *skb)
const struct iphdr *iph = ip_hdr(skb);
if (skb->ip_summed == CHECKSUM_COMPLETE) {
if (!tcp_v4_check(skb->len, iph->saddr,
iph->daddr, skb->csum)) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
skb->csum = csum_tcpudp_nofold(iph->saddr, iph->daddr,
skb->len, IPPROTO_TCP, 0);
return __skb_checksum_complete(skb);
}
return 0;
}
/* The socket must have it's spinlock held when we get
* here.
*
* We have a potential double-lock case here, so even when
* doing backlog processing we use the BH locking scheme.
* This is because we cannot sleep with the original spinlock
* held.
*/
int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
{
struct sock *rsk;
#ifdef CONFIG_TCP_MD5SIG
/*
* We really want to reject the packet as early as possible
* if:
* o We're expecting an MD5'd packet and this is no MD5 tcp option
* o There is an MD5 option and we're not expecting one
*/
if (tcp_v4_inbound_md5_hash(sk, skb))
goto discard;
#endif
if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
sock_rps_save_rxhash(sk, skb->rxhash);
if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) {
if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
goto csum_err;
if (sk->sk_state == TCP_LISTEN) {
struct sock *nsk = tcp_v4_hnd_req(sk, skb);
if (!nsk)
goto discard;
if (nsk != sk) {
if (tcp_child_process(sk, nsk, skb)) {
rsk = nsk;
} else
sock_rps_save_rxhash(sk, skb->rxhash);
if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len)) {
tcp_v4_send_reset(rsk, skb);
discard:
kfree_skb(skb);
/* Be careful here. If this function gets more complicated and
* gcc suffers from register pressure on the x86, sk (in %ebx)
* might be destroyed here. This current version compiles correctly,
* but you have been warned.
*/
return 0;
csum_err:
TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
/*
* From tcp_input.c
*/
int tcp_v4_rcv(struct sk_buff *skb)
{
const struct iphdr *iph;
struct tcphdr *th;
struct sock *sk;
int ret;
struct net *net = dev_net(skb->dev);
if (skb->pkt_type != PACKET_HOST)
goto discard_it;
/* Count it even if it's bad */
TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
goto discard_it;
th = tcp_hdr(skb);
if (th->doff < sizeof(struct tcphdr) / 4)
goto bad_packet;
if (!pskb_may_pull(skb, th->doff * 4))
goto discard_it;
/* An explanation is required here, I think.
* Packet length and doff are validated by header prediction,
* provided case of th->doff==0 is eliminated.
if (!skb_csum_unnecessary(skb) && tcp_v4_checksum_init(skb))
th = tcp_hdr(skb);
iph = ip_hdr(skb);
TCP_SKB_CB(skb)->seq = ntohl(th->seq);
TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
skb->len - th->doff * 4);
TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
TCP_SKB_CB(skb)->when = 0;
TCP_SKB_CB(skb)->flags = iph->tos;
sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
process:
if (sk->sk_state == TCP_TIME_WAIT)
goto do_time_wait;
if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
goto discard_and_relse;
nf_reset(skb);
goto discard_and_relse;
skb->dev = NULL;
#ifdef CONFIG_NET_DMA
struct tcp_sock *tp = tcp_sk(sk);
if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY);
else
#endif
{
if (!tcp_prequeue(sk, skb))
} else if (unlikely(sk_add_backlog(sk, skb))) {
NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
bh_unlock_sock(sk);
sock_put(sk);
return ret;
no_tcp_socket:
if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
goto discard_it;
if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
bad_packet:
TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
tcp_v4_send_reset(NULL, skb);
}
discard_it:
/* Discard frame. */
kfree_skb(skb);
discard_and_relse:
sock_put(sk);
goto discard_it;
do_time_wait:
if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
inet_twsk_put(inet_twsk(sk));
goto discard_it;
}
if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
inet_twsk_put(inet_twsk(sk));
switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
struct sock *sk2 = inet_lookup_listener(dev_net(skb->dev),
iph->daddr, th->dest,
inet_twsk_deschedule(inet_twsk(sk), &tcp_death_row);
inet_twsk_put(inet_twsk(sk));
sk = sk2;
goto process;
}
/* Fall through to ACK */
}
case TCP_TW_ACK:
tcp_v4_timewait_ack(sk, skb);
break;
case TCP_TW_RST:
goto no_tcp_socket;
case TCP_TW_SUCCESS:;
}
goto discard_it;
}
struct inet_peer *tcp_v4_get_peer(struct sock *sk, bool *release_it)
struct rtable *rt = (struct rtable *) __sk_dst_get(sk);
struct inet_peer *peer;
if (!rt || rt->rt_dst != inet->inet_daddr) {
peer = inet_getpeer_v4(inet->inet_daddr, 1);
*release_it = true;
} else {
if (!rt->peer)
rt_bind_peer(rt, 1);
peer = rt->peer;
*release_it = false;
return peer;
EXPORT_SYMBOL(tcp_v4_get_peer);
void *tcp_v4_tw_get_peer(struct sock *sk)
struct inet_timewait_sock *tw = inet_twsk(sk);
return inet_getpeer_v4(tw->tw_daddr, 1);
EXPORT_SYMBOL(tcp_v4_tw_get_peer);
static struct timewait_sock_ops tcp_timewait_sock_ops = {
.twsk_obj_size = sizeof(struct tcp_timewait_sock),
.twsk_unique = tcp_twsk_unique,
.twsk_destructor= tcp_twsk_destructor,
.twsk_getpeer = tcp_v4_tw_get_peer,
};
const struct inet_connection_sock_af_ops ipv4_specific = {

Arnaldo Carvalho de Melo
committed
.queue_xmit = ip_queue_xmit,
.send_check = tcp_v4_send_check,
.rebuild_header = inet_sk_rebuild_header,
.conn_request = tcp_v4_conn_request,
.syn_recv_sock = tcp_v4_syn_recv_sock,
.get_peer = tcp_v4_get_peer,

Arnaldo Carvalho de Melo
committed
.net_header_len = sizeof(struct iphdr),
.setsockopt = ip_setsockopt,
.getsockopt = ip_getsockopt,
.addr2sockaddr = inet_csk_addr2sockaddr,
.sockaddr_len = sizeof(struct sockaddr_in),
.bind_conflict = inet_csk_bind_conflict,

Arnaldo Carvalho de Melo
committed
.compat_setsockopt = compat_ip_setsockopt,
.compat_getsockopt = compat_ip_getsockopt,
#ifdef CONFIG_TCP_MD5SIG
static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
.md5_lookup = tcp_v4_md5_lookup,
.calc_md5_hash = tcp_v4_md5_hash_skb,
.md5_add = tcp_v4_md5_add_func,
.md5_parse = tcp_v4_parse_md5_keys,
};
/* NOTE: A lot of things set to zero explicitly by call to
* sk_alloc() so need not be done here.
*/
static int tcp_v4_init_sock(struct sock *sk)
{
struct inet_connection_sock *icsk = inet_csk(sk);
struct tcp_sock *tp = tcp_sk(sk);
skb_queue_head_init(&tp->out_of_order_queue);
tcp_init_xmit_timers(sk);
tcp_prequeue_init(tp);
icsk->icsk_rto = TCP_TIMEOUT_INIT;
tp->mdev = TCP_TIMEOUT_INIT;
/* So many TCP implementations out there (incorrectly) count the
* initial SYN frame in their delayed-ACK and congestion control
* algorithms that we must have the following bandaid to talk
* efficiently to them. -DaveM
*/
tp->snd_cwnd = 2;
/* See draft-stevens-tcpca-spec-01 for discussion of the
* initialization of these values.
*/
tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
tp->mss_cache = TCP_MSS_DEFAULT;
icsk->icsk_ca_ops = &tcp_init_congestion_ops;
sk->sk_state = TCP_CLOSE;
sk->sk_write_space = sk_stream_write_space;
sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);

Arnaldo Carvalho de Melo
committed
icsk->icsk_af_ops = &ipv4_specific;
icsk->icsk_sync_mss = tcp_sync_mss;
#ifdef CONFIG_TCP_MD5SIG
tp->af_specific = &tcp_sock_ipv4_specific;
#endif
/* TCP Cookie Transactions */
if (sysctl_tcp_cookie_size > 0) {
/* Default, cookies without s_data_payload. */
tp->cookie_values =
kzalloc(sizeof(*tp->cookie_values),
sk->sk_allocation);
if (tp->cookie_values != NULL)
kref_init(&tp->cookie_values->kref);
}
/* Presumed zeroed, in order of appearance:
* cookie_in_always, cookie_out_never,
* s_data_constant, s_data_in, s_data_out
*/
sk->sk_sndbuf = sysctl_tcp_wmem[1];
sk->sk_rcvbuf = sysctl_tcp_rmem[1];
percpu_counter_inc(&tcp_sockets_allocated);
void tcp_v4_destroy_sock(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
tcp_clear_xmit_timers(sk);
tcp_cleanup_congestion_control(sk);
tcp_write_queue_purge(sk);
/* Cleans up our, hopefully empty, out_of_order_queue. */
__skb_queue_purge(&tp->out_of_order_queue);
#ifdef CONFIG_TCP_MD5SIG
/* Clean up the MD5 key list, if any */
if (tp->md5sig_info) {
tcp_v4_clear_md5_list(sk);
kfree(tp->md5sig_info);
tp->md5sig_info = NULL;
}
#endif
#ifdef CONFIG_NET_DMA
/* Cleans up our sk_async_wait_queue */
__skb_queue_purge(&sk->sk_async_wait_queue);
/* Clean prequeue, it must be empty really */
__skb_queue_purge(&tp->ucopy.prequeue);
/* Clean up a referenced TCP bind bucket. */
if (inet_csk(sk)->icsk_bind_hash)
inet_put_port(sk);
/*
* If sendmsg cached page exists, toss it.
*/
if (sk->sk_sndmsg_page) {
__free_page(sk->sk_sndmsg_page);
sk->sk_sndmsg_page = NULL;
}
/* TCP Cookie Transactions */
if (tp->cookie_values != NULL) {
kref_put(&tp->cookie_values->kref,
tcp_cookie_values_release);
tp->cookie_values = NULL;
}
percpu_counter_dec(&tcp_sockets_allocated);
}
EXPORT_SYMBOL(tcp_v4_destroy_sock);
#ifdef CONFIG_PROC_FS
/* Proc filesystem TCP sock list dumping. */
static inline struct inet_timewait_sock *tw_head(struct hlist_nulls_head *head)
return hlist_nulls_empty(head) ? NULL :
list_entry(head->first, struct inet_timewait_sock, tw_node);
static inline struct inet_timewait_sock *tw_next(struct inet_timewait_sock *tw)
return !is_a_nulls(tw->tw_node.next) ?
hlist_nulls_entry(tw->tw_node.next, typeof(*tw), tw_node) : NULL;
/*
* Get next listener socket follow cur. If cur is NULL, get first socket
* starting from bucket given in st->bucket; when st->bucket is zero the
* very first socket in the hash table is returned.
*/
static void *listening_get_next(struct seq_file *seq, void *cur)
{
struct inet_connection_sock *icsk;
struct hlist_nulls_node *node;
struct inet_listen_hashbucket *ilb;
struct tcp_iter_state *st = seq->private;
struct net *net = seq_file_net(seq);
ilb = &tcp_hashinfo.listening_hash[st->bucket];
sk = sk_nulls_head(&ilb->head);
ilb = &tcp_hashinfo.listening_hash[st->bucket];
struct request_sock *req = cur;
icsk = inet_csk(st->syn_wait_sk);
req = req->dl_next;
while (1) {
while (req) {
if (req->rsk_ops->family == st->family) {
cur = req;
goto out;
}
req = req->dl_next;
}
if (++st->sbucket >= icsk->icsk_accept_queue.listen_opt->nr_table_entries)
req = icsk->icsk_accept_queue.listen_opt->syn_table[st->sbucket];
read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);