Newer
Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
/*
* INET An implementation of the TCP/IP protocol suite for the LINUX
* operating system. INET is implemented using the BSD Socket
* interface as the means of communication with the user level.
*
* Implementation of the Transmission Control Protocol(TCP).
*
* Version: $Id: tcp_ipv4.c,v 1.240 2002/02/01 22:01:04 davem Exp $
*
* IPv4 specific functions
*
*
* code split from:
* linux/ipv4/tcp.c
* linux/ipv4/tcp_input.c
* linux/ipv4/tcp_output.c
*
* See tcp.c for author information
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
/*
* Changes:
* David S. Miller : New socket lookup architecture.
* This code is dedicated to John Dyson.
* David S. Miller : Change semantics of established hash,
* half is devoted to TIME_WAIT sockets
* and the rest go in the other half.
* Andi Kleen : Add support for syncookies and fixed
* some bugs: ip options weren't passed to
* the TCP layer, missed a check for an
* ACK bit.
* Andi Kleen : Implemented fast path mtu discovery.
* Fixed many serious bugs in the
* request_sock handling and moved
* most of it into the af independent code.
* Added tail drop and some other bugfixes.
* Added new listen sematics.
* Mike McLagan : Routing by source
* Juan Jose Ciarlante: ip_dynaddr bits
* Andi Kleen: various fixes.
* Vitaly E. Lavrov : Transparent proxy revived after year
* coma.
* Andi Kleen : Fix new listen.
* Andi Kleen : Fix accept error reporting.
* YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
* Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
* a single port at the same time.
*/
#include <linux/config.h>
#include <linux/types.h>
#include <linux/fcntl.h>
#include <linux/module.h>
#include <linux/random.h>
#include <linux/cache.h>
#include <linux/jhash.h>
#include <linux/init.h>
#include <linux/times.h>
#include <net/icmp.h>

Arnaldo Carvalho de Melo
committed
#include <net/inet_hashtables.h>
#include <net/tcp.h>
#include <net/ipv6.h>
#include <net/inet_common.h>
#include <net/xfrm.h>
#include <linux/inet.h>
#include <linux/ipv6.h>
#include <linux/stddef.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
extern int sysctl_ip_dynaddr;
int sysctl_tcp_tw_reuse;
int sysctl_tcp_low_latency;
/* Check TCP sequence numbers in ICMP packets. */
#define ICMP_MIN_LENGTH 8
/* Socket used for sending RSTs */
static struct socket *tcp_socket;
void tcp_v4_send_check(struct sock *sk, struct tcphdr *th, int len,
struct sk_buff *skb);
struct inet_hashinfo __cacheline_aligned tcp_hashinfo = {
.lhash_lock = RW_LOCK_UNLOCKED,
.lhash_users = ATOMIC_INIT(0),
.lhash_wait = __WAIT_QUEUE_HEAD_INITIALIZER(tcp_hashinfo.lhash_wait),
.portalloc_lock = SPIN_LOCK_UNLOCKED,
};
/*
* This array holds the first and last local port number.
* For high-usage systems, use sysctl to change this to
* 32768-61000
*/
int sysctl_local_port_range[2] = { 1024, 4999 };
int tcp_port_rover = 1024 - 1;
static inline int tcp_bind_conflict(struct sock *sk, struct inet_bind_bucket *tb)
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
{
const u32 sk_rcv_saddr = tcp_v4_rcv_saddr(sk);
struct sock *sk2;
struct hlist_node *node;
int reuse = sk->sk_reuse;
sk_for_each_bound(sk2, node, &tb->owners) {
if (sk != sk2 &&
!tcp_v6_ipv6only(sk2) &&
(!sk->sk_bound_dev_if ||
!sk2->sk_bound_dev_if ||
sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) {
if (!reuse || !sk2->sk_reuse ||
sk2->sk_state == TCP_LISTEN) {
const u32 sk2_rcv_saddr = tcp_v4_rcv_saddr(sk2);
if (!sk2_rcv_saddr || !sk_rcv_saddr ||
sk2_rcv_saddr == sk_rcv_saddr)
break;
}
}
}
return node != NULL;
}
/* Obtain a reference to a local port for the given sock,
* if snum is zero it means select any available local port.
*/
static int tcp_v4_get_port(struct sock *sk, unsigned short snum)
{
struct inet_bind_hashbucket *head;
struct inet_bind_bucket *tb;
int ret;
local_bh_disable();
if (!snum) {
int low = sysctl_local_port_range[0];
int high = sysctl_local_port_range[1];
int remaining = (high - low) + 1;
int rover;
spin_lock(&tcp_portalloc_lock);
if (tcp_port_rover < low)
rover = low;
else
rover = tcp_port_rover;
head = &tcp_bhash[inet_bhashfn(rover, tcp_bhash_size)];
inet_bind_bucket_for_each(tb, node, &head->chain)
if (tb->port == rover)
goto next;
break;
next:
spin_unlock(&head->lock);
} while (--remaining > 0);
tcp_port_rover = rover;
spin_unlock(&tcp_portalloc_lock);
/* Exhausted local port range during search? It is not
* possible for us to be holding one of the bind hash
* locks if this test triggers, because if 'remaining'
* drops to zero, we broke out of the do/while loop at
* the top level, not from the 'break;' statement.
*/
if (unlikely(remaining <= 0))
goto fail;
/* OK, here is the one we will use. HEAD is
* non-NULL and we hold it's mutex.
*/
snum = rover;
} else {
head = &tcp_bhash[inet_bhashfn(snum, tcp_bhash_size)];
inet_bind_bucket_for_each(tb, node, &head->chain)
if (tb->port == snum)
goto tb_found;
}
tb = NULL;
goto tb_not_found;
tb_found:
if (!hlist_empty(&tb->owners)) {
if (sk->sk_reuse > 1)
goto success;
if (tb->fastreuse > 0 &&
sk->sk_reuse && sk->sk_state != TCP_LISTEN) {
goto success;
} else {
ret = 1;
if (tcp_bind_conflict(sk, tb))
goto fail_unlock;
}
}
tb_not_found:
ret = 1;
if (!tb && (tb = inet_bind_bucket_create(tcp_bucket_cachep, head, snum)) == NULL)
goto fail_unlock;
if (hlist_empty(&tb->owners)) {
if (sk->sk_reuse && sk->sk_state != TCP_LISTEN)
tb->fastreuse = 1;
else
tb->fastreuse = 0;
} else if (tb->fastreuse &&
(!sk->sk_reuse || sk->sk_state == TCP_LISTEN))
tb->fastreuse = 0;
success:
if (!inet_sk(sk)->bind_hash)
inet_bind_hash(sk, tb, snum);
BUG_TRAP(inet_sk(sk)->bind_hash == tb);
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
ret = 0;
fail_unlock:
spin_unlock(&head->lock);
fail:
local_bh_enable();
return ret;
}
/* This lock without WQ_FLAG_EXCLUSIVE is good on UP and it can be very bad on SMP.
* Look, when several writers sleep and reader wakes them up, all but one
* immediately hit write lock and grab all the cpus. Exclusive sleep solves
* this, _but_ remember, it adds useless work on UP machines (wake up each
* exclusive lock release). It should be ifdefed really.
*/
void tcp_listen_wlock(void)
{
write_lock(&tcp_lhash_lock);
if (atomic_read(&tcp_lhash_users)) {
DEFINE_WAIT(wait);
for (;;) {
prepare_to_wait_exclusive(&tcp_lhash_wait,
&wait, TASK_UNINTERRUPTIBLE);
if (!atomic_read(&tcp_lhash_users))
break;
write_unlock_bh(&tcp_lhash_lock);
schedule();
write_lock_bh(&tcp_lhash_lock);
}
finish_wait(&tcp_lhash_wait, &wait);
}
}
static __inline__ void __tcp_v4_hash(struct sock *sk, const int listen_possible)
{
struct hlist_head *list;
rwlock_t *lock;
BUG_TRAP(sk_unhashed(sk));
if (listen_possible && sk->sk_state == TCP_LISTEN) {
list = &tcp_listening_hash[inet_sk_listen_hashfn(sk)];
lock = &tcp_lhash_lock;
tcp_listen_wlock();
} else {

Arnaldo Carvalho de Melo
committed
sk->sk_hashent = inet_sk_ehashfn(sk, tcp_ehash_size);
list = &tcp_ehash[sk->sk_hashent].chain;
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
lock = &tcp_ehash[sk->sk_hashent].lock;
write_lock(lock);
}
__sk_add_node(sk, list);
sock_prot_inc_use(sk->sk_prot);
write_unlock(lock);
if (listen_possible && sk->sk_state == TCP_LISTEN)
wake_up(&tcp_lhash_wait);
}
static void tcp_v4_hash(struct sock *sk)
{
if (sk->sk_state != TCP_CLOSE) {
local_bh_disable();
__tcp_v4_hash(sk, 1);
local_bh_enable();
}
}
void tcp_unhash(struct sock *sk)
{
rwlock_t *lock;
if (sk_unhashed(sk))
goto ende;
if (sk->sk_state == TCP_LISTEN) {
local_bh_disable();
tcp_listen_wlock();
lock = &tcp_lhash_lock;
} else {
struct inet_ehash_bucket *head = &tcp_ehash[sk->sk_hashent];
lock = &head->lock;
write_lock_bh(&head->lock);
}
if (__sk_del_node_init(sk))
sock_prot_dec_use(sk->sk_prot);
write_unlock_bh(lock);
ende:
if (sk->sk_state == TCP_LISTEN)
wake_up(&tcp_lhash_wait);
}
/* Don't inline this cruft. Here are some nice properties to
* exploit here. The BSD API does not allow a listening TCP
* to specify the remote port nor the remote address for the
* connection. So always assume those are both wildcarded
* during the search since they can never be otherwise.
*/
static struct sock *__tcp_v4_lookup_listener(struct hlist_head *head,
const u32 daddr,
const unsigned short hnum,
const int dif)
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
{
struct sock *result = NULL, *sk;
struct hlist_node *node;
int score, hiscore;
hiscore=-1;
sk_for_each(sk, node, head) {
struct inet_sock *inet = inet_sk(sk);
if (inet->num == hnum && !ipv6_only_sock(sk)) {
__u32 rcv_saddr = inet->rcv_saddr;
score = (sk->sk_family == PF_INET ? 1 : 0);
if (rcv_saddr) {
if (rcv_saddr != daddr)
continue;
score+=2;
}
if (sk->sk_bound_dev_if) {
if (sk->sk_bound_dev_if != dif)
continue;
score+=2;
}
if (score == 5)
return sk;
if (score > hiscore) {
hiscore = score;
result = sk;
}
}
}
return result;
}
/* Optimize the common listener case. */
static inline struct sock *tcp_v4_lookup_listener(const u32 daddr,
const unsigned short hnum,
const int dif)
{
struct sock *sk = NULL;
struct hlist_head *head;
read_lock(&tcp_lhash_lock);
head = &tcp_listening_hash[inet_lhashfn(hnum)];
if (!hlist_empty(head)) {
struct inet_sock *inet = inet_sk((sk = __sk_head(head)));
if (inet->num == hnum && !sk->sk_node.next &&
(!inet->rcv_saddr || inet->rcv_saddr == daddr) &&
(sk->sk_family == PF_INET || !ipv6_only_sock(sk)) &&
!sk->sk_bound_dev_if)
goto sherry_cache;
sk = __tcp_v4_lookup_listener(head, daddr, hnum, dif);
}
if (sk) {
sherry_cache:
sock_hold(sk);
}
read_unlock(&tcp_lhash_lock);
return sk;
}
/* Sockets in TCP_CLOSE state are _always_ taken out of the hash, so
* we need not check it for TCP lookups anymore, thanks Alexey. -DaveM
*
* Local BH must be disabled here.
*/
static inline struct sock *__tcp_v4_lookup_established(const u32 saddr,
const u16 sport,
const u32 daddr,
const u16 hnum,
const int dif)
struct inet_ehash_bucket *head;
TCP_V4_ADDR_COOKIE(acookie, saddr, daddr)
__u32 ports = TCP_COMBINED_PORTS(sport, hnum);
struct sock *sk;
struct hlist_node *node;
/* Optimize here for direct hit, only listening connections can
* have wildcards anyways.
*/

Arnaldo Carvalho de Melo
committed
const int hash = inet_ehashfn(daddr, hnum, saddr, sport, tcp_ehash_size);
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
head = &tcp_ehash[hash];
read_lock(&head->lock);
sk_for_each(sk, node, &head->chain) {
if (TCP_IPV4_MATCH(sk, acookie, saddr, daddr, ports, dif))
goto hit; /* You sunk my battleship! */
}
/* Must check for a TIME_WAIT'er before going to listener hash. */
sk_for_each(sk, node, &(head + tcp_ehash_size)->chain) {
if (TCP_IPV4_TW_MATCH(sk, acookie, saddr, daddr, ports, dif))
goto hit;
}
sk = NULL;
out:
read_unlock(&head->lock);
return sk;
hit:
sock_hold(sk);
goto out;
}
static inline struct sock *__tcp_v4_lookup(u32 saddr, u16 sport,
u32 daddr, u16 hnum, int dif)
{
struct sock *sk = __tcp_v4_lookup_established(saddr, sport,
daddr, hnum, dif);
return sk ? : tcp_v4_lookup_listener(daddr, hnum, dif);
}
inline struct sock *tcp_v4_lookup(u32 saddr, u16 sport, u32 daddr,
u16 dport, int dif)
{
struct sock *sk;
local_bh_disable();
sk = __tcp_v4_lookup(saddr, sport, daddr, ntohs(dport), dif);
local_bh_enable();
return sk;
}
EXPORT_SYMBOL_GPL(tcp_v4_lookup);
static inline __u32 tcp_v4_init_sequence(struct sock *sk, struct sk_buff *skb)
{
return secure_tcp_sequence_number(skb->nh.iph->daddr,
skb->nh.iph->saddr,
skb->h.th->dest,
skb->h.th->source);
}
/* called with local bh disabled */
static int __tcp_v4_check_established(struct sock *sk, __u16 lport,
struct tcp_tw_bucket **twp)
{
struct inet_sock *inet = inet_sk(sk);
u32 daddr = inet->rcv_saddr;
u32 saddr = inet->daddr;
int dif = sk->sk_bound_dev_if;
TCP_V4_ADDR_COOKIE(acookie, saddr, daddr)
__u32 ports = TCP_COMBINED_PORTS(inet->dport, lport);

Arnaldo Carvalho de Melo
committed
const int hash = inet_ehashfn(daddr, lport, saddr, inet->dport, tcp_ehash_size);
struct inet_ehash_bucket *head = &tcp_ehash[hash];
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
struct sock *sk2;
struct hlist_node *node;
struct tcp_tw_bucket *tw;
write_lock(&head->lock);
/* Check TIME-WAIT sockets first. */
sk_for_each(sk2, node, &(head + tcp_ehash_size)->chain) {
tw = (struct tcp_tw_bucket *)sk2;
if (TCP_IPV4_TW_MATCH(sk2, acookie, saddr, daddr, ports, dif)) {
struct tcp_sock *tp = tcp_sk(sk);
/* With PAWS, it is safe from the viewpoint
of data integrity. Even without PAWS it
is safe provided sequence spaces do not
overlap i.e. at data rates <= 80Mbit/sec.
Actually, the idea is close to VJ's one,
only timestamp cache is held not per host,
but per port pair and TW bucket is used
as state holder.
If TW bucket has been already destroyed we
fall back to VJ's scheme and use initial
timestamp retrieved from peer table.
*/
if (tw->tw_ts_recent_stamp &&
(!twp || (sysctl_tcp_tw_reuse &&
xtime.tv_sec -
tw->tw_ts_recent_stamp > 1))) {
if ((tp->write_seq =
tw->tw_snd_nxt + 65535 + 2) == 0)
tp->write_seq = 1;
tp->rx_opt.ts_recent = tw->tw_ts_recent;
tp->rx_opt.ts_recent_stamp = tw->tw_ts_recent_stamp;
sock_hold(sk2);
goto unique;
} else
goto not_unique;
}
}
tw = NULL;
/* And established part... */
sk_for_each(sk2, node, &head->chain) {
if (TCP_IPV4_MATCH(sk2, acookie, saddr, daddr, ports, dif))
goto not_unique;
}
unique:
/* Must record num and sport now. Otherwise we will see
* in hash table socket with a funny identity. */
inet->num = lport;
inet->sport = htons(lport);
sk->sk_hashent = hash;
BUG_TRAP(sk_unhashed(sk));
__sk_add_node(sk, &head->chain);
sock_prot_inc_use(sk->sk_prot);
write_unlock(&head->lock);
if (twp) {
*twp = tw;
NET_INC_STATS_BH(LINUX_MIB_TIMEWAITRECYCLED);
} else if (tw) {
/* Silly. Should hash-dance instead... */
tcp_tw_deschedule(tw);
NET_INC_STATS_BH(LINUX_MIB_TIMEWAITRECYCLED);
tcp_tw_put(tw);
}
return 0;
not_unique:
write_unlock(&head->lock);
return -EADDRNOTAVAIL;
}
static inline u32 connect_port_offset(const struct sock *sk)
{
const struct inet_sock *inet = inet_sk(sk);
return secure_tcp_port_ephemeral(inet->rcv_saddr, inet->daddr,
inet->dport);
}
/*
* Bind a port for a connect operation and hash it.
*/
static inline int tcp_v4_hash_connect(struct sock *sk)
{
const unsigned short snum = inet_sk(sk)->num;
struct inet_bind_hashbucket *head;
struct inet_bind_bucket *tb;
int ret;
if (!snum) {
int low = sysctl_local_port_range[0];
int high = sysctl_local_port_range[1];
int range = high - low;
int i;
int port;
static u32 hint;
u32 offset = hint + connect_port_offset(sk);
struct hlist_node *node;
struct tcp_tw_bucket *tw = NULL;
local_bh_disable();
for (i = 1; i <= range; i++) {
port = low + (i + offset) % range;
head = &tcp_bhash[inet_bhashfn(port, tcp_bhash_size)];
spin_lock(&head->lock);
/* Does not bother with rcv_saddr checks,
* because the established check is already
* unique enough.
*/
inet_bind_bucket_for_each(tb, node, &head->chain) {
if (tb->port == port) {
BUG_TRAP(!hlist_empty(&tb->owners));
if (tb->fastreuse >= 0)
goto next_port;
if (!__tcp_v4_check_established(sk,
port,
&tw))
goto ok;
goto next_port;
}
}
tb = inet_bind_bucket_create(tcp_bucket_cachep, head, port);
if (!tb) {
spin_unlock(&head->lock);
break;
}
tb->fastreuse = -1;
goto ok;
next_port:
spin_unlock(&head->lock);
}
local_bh_enable();
return -EADDRNOTAVAIL;
ok:
hint += i;
/* Head lock still held and bh's disabled */
inet_bind_hash(sk, tb, port);
if (sk_unhashed(sk)) {
inet_sk(sk)->sport = htons(port);
__tcp_v4_hash(sk, 0);
}
spin_unlock(&head->lock);
if (tw) {
tcp_tw_deschedule(tw);
tcp_tw_put(tw);
}
ret = 0;
goto out;
}
head = &tcp_bhash[inet_bhashfn(snum, tcp_bhash_size)];
tb = inet_sk(sk)->bind_hash;
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
spin_lock_bh(&head->lock);
if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) {
__tcp_v4_hash(sk, 0);
spin_unlock_bh(&head->lock);
return 0;
} else {
spin_unlock(&head->lock);
/* No definite answer... Walk to established hash table */
ret = __tcp_v4_check_established(sk, snum, NULL);
out:
local_bh_enable();
return ret;
}
}
/* This will initiate an outgoing connection. */
int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
{
struct inet_sock *inet = inet_sk(sk);
struct tcp_sock *tp = tcp_sk(sk);
struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
struct rtable *rt;
u32 daddr, nexthop;
int tmp;
int err;
if (addr_len < sizeof(struct sockaddr_in))
return -EINVAL;
if (usin->sin_family != AF_INET)
return -EAFNOSUPPORT;
nexthop = daddr = usin->sin_addr.s_addr;
if (inet->opt && inet->opt->srr) {
if (!daddr)
return -EINVAL;
nexthop = inet->opt->faddr;
}
tmp = ip_route_connect(&rt, nexthop, inet->saddr,
RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
IPPROTO_TCP,
inet->sport, usin->sin_port, sk);
if (tmp < 0)
return tmp;
if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
ip_rt_put(rt);
return -ENETUNREACH;
}
if (!inet->opt || !inet->opt->srr)
daddr = rt->rt_dst;
if (!inet->saddr)
inet->saddr = rt->rt_src;
inet->rcv_saddr = inet->saddr;
if (tp->rx_opt.ts_recent_stamp && inet->daddr != daddr) {
/* Reset inherited state */
tp->rx_opt.ts_recent = 0;
tp->rx_opt.ts_recent_stamp = 0;
tp->write_seq = 0;
}
if (sysctl_tcp_tw_recycle &&
!tp->rx_opt.ts_recent_stamp && rt->rt_dst == daddr) {
struct inet_peer *peer = rt_get_peer(rt);
/* VJ's idea. We save last timestamp seen from
* the destination in peer table, when entering state TIME-WAIT
* and initialize rx_opt.ts_recent from it, when trying new connection.
*/
if (peer && peer->tcp_ts_stamp + TCP_PAWS_MSL >= xtime.tv_sec) {
tp->rx_opt.ts_recent_stamp = peer->tcp_ts_stamp;
tp->rx_opt.ts_recent = peer->tcp_ts;
}
}
inet->dport = usin->sin_port;
inet->daddr = daddr;
tp->ext_header_len = 0;
if (inet->opt)
tp->ext_header_len = inet->opt->optlen;
tp->rx_opt.mss_clamp = 536;
/* Socket identity is still unknown (sport may be zero).
* However we set state to SYN-SENT and not releasing socket
* lock select source port, enter ourselves into the hash tables and
* complete initialization after this.
*/
tcp_set_state(sk, TCP_SYN_SENT);
err = tcp_v4_hash_connect(sk);
if (err)
goto failure;
err = ip_route_newports(&rt, inet->sport, inet->dport, sk);
if (err)
goto failure;
/* OK, now commit destination to socket. */
sk_setup_caps(sk, &rt->u.dst);
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
if (!tp->write_seq)
tp->write_seq = secure_tcp_sequence_number(inet->saddr,
inet->daddr,
inet->sport,
usin->sin_port);
inet->id = tp->write_seq ^ jiffies;
err = tcp_connect(sk);
rt = NULL;
if (err)
goto failure;
return 0;
failure:
/* This unhashes the socket and releases the local port, if necessary. */
tcp_set_state(sk, TCP_CLOSE);
ip_rt_put(rt);
sk->sk_route_caps = 0;
inet->dport = 0;
return err;
}
static __inline__ int tcp_v4_iif(struct sk_buff *skb)
{
return ((struct rtable *)skb->dst)->rt_iif;
}
static __inline__ u32 tcp_v4_synq_hash(u32 raddr, u16 rport, u32 rnd)
{
return (jhash_2words(raddr, (u32) rport, rnd) & (TCP_SYNQ_HSIZE - 1));
}
static struct request_sock *tcp_v4_search_req(struct tcp_sock *tp,
struct request_sock ***prevp,
struct listen_sock *lopt = tp->accept_queue.listen_opt;
struct request_sock *req, **prev;
for (prev = &lopt->syn_table[tcp_v4_synq_hash(raddr, rport, lopt->hash_rnd)];
(req = *prev) != NULL;
prev = &req->dl_next) {

Arnaldo Carvalho de Melo
committed
const struct inet_request_sock *ireq = inet_rsk(req);
if (ireq->rmt_port == rport &&
ireq->rmt_addr == raddr &&
ireq->loc_addr == laddr &&
TCP_INET_FAMILY(req->rsk_ops->family)) {
BUG_TRAP(!req->sk);
*prevp = prev;
break;
}
}
return req;
}
static void tcp_v4_synq_add(struct sock *sk, struct request_sock *req)
struct listen_sock *lopt = tp->accept_queue.listen_opt;

Arnaldo Carvalho de Melo
committed
u32 h = tcp_v4_synq_hash(inet_rsk(req)->rmt_addr, inet_rsk(req)->rmt_port, lopt->hash_rnd);
reqsk_queue_hash_req(&tp->accept_queue, h, req, TCP_TIMEOUT_INIT);
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
tcp_synq_added(sk);
}
/*
* This routine does path mtu discovery as defined in RFC1191.
*/
static inline void do_pmtu_discovery(struct sock *sk, struct iphdr *iph,
u32 mtu)
{
struct dst_entry *dst;
struct inet_sock *inet = inet_sk(sk);
struct tcp_sock *tp = tcp_sk(sk);
/* We are not interested in TCP_LISTEN and open_requests (SYN-ACKs
* send out by Linux are always <576bytes so they should go through
* unfragmented).
*/
if (sk->sk_state == TCP_LISTEN)
return;
/* We don't check in the destentry if pmtu discovery is forbidden
* on this route. We just assume that no packet_to_big packets
* are send back when pmtu discovery is not active.
* There is a small race when the user changes this flag in the
* route, but I think that's acceptable.
*/
if ((dst = __sk_dst_check(sk, 0)) == NULL)
return;
dst->ops->update_pmtu(dst, mtu);
/* Something is about to be wrong... Remember soft error
* for the case, if this connection will not able to recover.
*/
if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
sk->sk_err_soft = EMSGSIZE;
mtu = dst_mtu(dst);
if (inet->pmtudisc != IP_PMTUDISC_DONT &&
tp->pmtu_cookie > mtu) {
tcp_sync_mss(sk, mtu);
/* Resend the TCP packet because it's
* clear that the old packet has been
* dropped. This is the new "fast" path mtu
* discovery.
*/
tcp_simple_retransmit(sk);
} /* else let the usual retransmit timer handle it */
}
/*
* This routine is called by the ICMP module when it gets some
* sort of error condition. If err < 0 then the socket should
* be closed and the error returned to the user. If err > 0
* it's just the icmp type << 8 | icmp code. After adjustment
* header points to the first 8 bytes of the tcp header. We need
* to find the appropriate port.
*
* The locking strategy used here is very "optimistic". When
* someone else accesses the socket the ICMP is just dropped
* and for some paths there is no check at all.
* A more general error queue to queue errors for later handling
* is probably better.
*
*/
void tcp_v4_err(struct sk_buff *skb, u32 info)
{
struct iphdr *iph = (struct iphdr *)skb->data;
struct tcphdr *th = (struct tcphdr *)(skb->data + (iph->ihl << 2));
struct tcp_sock *tp;
struct inet_sock *inet;
int type = skb->h.icmph->type;
int code = skb->h.icmph->code;
struct sock *sk;
__u32 seq;
int err;
if (skb->len < (iph->ihl << 2) + 8) {
ICMP_INC_STATS_BH(ICMP_MIB_INERRORS);
return;
}
sk = tcp_v4_lookup(iph->daddr, th->dest, iph->saddr,
th->source, tcp_v4_iif(skb));
if (!sk) {
ICMP_INC_STATS_BH(ICMP_MIB_INERRORS);
return;
}
if (sk->sk_state == TCP_TIME_WAIT) {
tcp_tw_put((struct tcp_tw_bucket *)sk);
return;
}
bh_lock_sock(sk);
/* If too many ICMPs get dropped on busy
* servers this needs to be solved differently.
*/
if (sock_owned_by_user(sk))
NET_INC_STATS_BH(LINUX_MIB_LOCKDROPPEDICMPS);
if (sk->sk_state == TCP_CLOSE)
goto out;
tp = tcp_sk(sk);
seq = ntohl(th->seq);
if (sk->sk_state != TCP_LISTEN &&
!between(seq, tp->snd_una, tp->snd_nxt)) {
NET_INC_STATS(LINUX_MIB_OUTOFWINDOWICMPS);
goto out;
}
switch (type) {
case ICMP_SOURCE_QUENCH:
/* Just silently ignore these. */
goto out;
case ICMP_PARAMETERPROB:
err = EPROTO;
break;
case ICMP_DEST_UNREACH:
if (code > NR_ICMP_UNREACH)
goto out;
if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
if (!sock_owned_by_user(sk))
do_pmtu_discovery(sk, iph, info);
goto out;
}
err = icmp_err_convert[code].errno;
break;
case ICMP_TIME_EXCEEDED:
err = EHOSTUNREACH;
break;
default:
goto out;
}
switch (sk->sk_state) {
struct request_sock *req, **prev;
case TCP_LISTEN:
if (sock_owned_by_user(sk))
goto out;
req = tcp_v4_search_req(tp, &prev, th->dest,
iph->daddr, iph->saddr);
if (!req)
goto out;
/* ICMPs are not backlogged, hence we cannot get
an established socket here.
*/
BUG_TRAP(!req->sk);

Arnaldo Carvalho de Melo
committed
if (seq != tcp_rsk(req)->snt_isn) {
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS);
goto out;
}
/*
* Still in SYN_RECV, just remove it silently.
* There is no good way to pass the error to the newly
* created socket, and POSIX does not want network
* errors returned from accept().
*/
tcp_synq_drop(sk, req, prev);
goto out;
case TCP_SYN_SENT:
case TCP_SYN_RECV: /* Cannot happen.
It can f.e. if SYNs crossed.
*/
if (!sock_owned_by_user(sk)) {
TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS);
sk->sk_err = err;
sk->sk_error_report(sk);
tcp_done(sk);
} else {
sk->sk_err_soft = err;
}
goto out;