Newer
Older
info->tcpi_rto = jiffies_to_usecs(icsk->icsk_rto);
info->tcpi_ato = jiffies_to_usecs(icsk->icsk_ack.ato);
info->tcpi_snd_mss = tp->mss_cache;
info->tcpi_rcv_mss = icsk->icsk_ack.rcv_mss;
info->tcpi_unacked = tp->packets_out;
info->tcpi_sacked = tp->sacked_out;
info->tcpi_lost = tp->lost_out;
info->tcpi_retrans = tp->retrans_out;
info->tcpi_fackets = tp->fackets_out;
info->tcpi_last_data_sent = jiffies_to_msecs(now - tp->lsndtime);
info->tcpi_last_data_recv = jiffies_to_msecs(now - icsk->icsk_ack.lrcvtime);
info->tcpi_last_ack_recv = jiffies_to_msecs(now - tp->rcv_tstamp);
info->tcpi_pmtu = icsk->icsk_pmtu_cookie;
info->tcpi_rcv_ssthresh = tp->rcv_ssthresh;
info->tcpi_rtt = jiffies_to_usecs(tp->srtt)>>3;
info->tcpi_rttvar = jiffies_to_usecs(tp->mdev)>>2;
info->tcpi_snd_ssthresh = tp->snd_ssthresh;
info->tcpi_snd_cwnd = tp->snd_cwnd;
info->tcpi_advmss = tp->advmss;
info->tcpi_reordering = tp->reordering;
info->tcpi_rcv_rtt = jiffies_to_usecs(tp->rcv_rtt_est.rtt)>>3;
info->tcpi_rcv_space = tp->rcvq_space.space;
info->tcpi_total_retrans = tp->total_retrans;
}
EXPORT_SYMBOL_GPL(tcp_get_info);
static int do_tcp_getsockopt(struct sock *sk, int level,
int optname, char __user *optval, int __user *optlen)

Arnaldo Carvalho de Melo
committed
struct inet_connection_sock *icsk = inet_csk(sk);
struct tcp_sock *tp = tcp_sk(sk);
int val, len;
if (get_user(len, optlen))
return -EFAULT;
len = min_t(unsigned int, len, sizeof(int));
if (len < 0)
return -EINVAL;
switch (optname) {
case TCP_MAXSEG:
if (!val && ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)))
val = tp->rx_opt.user_mss;
break;
case TCP_NODELAY:
val = !!(tp->nonagle&TCP_NAGLE_OFF);
break;
case TCP_CORK:
val = !!(tp->nonagle&TCP_NAGLE_CORK);
break;
case TCP_KEEPIDLE:
val = (tp->keepalive_time ? : sysctl_tcp_keepalive_time) / HZ;
break;
case TCP_KEEPINTVL:
val = (tp->keepalive_intvl ? : sysctl_tcp_keepalive_intvl) / HZ;
break;
case TCP_KEEPCNT:
val = tp->keepalive_probes ? : sysctl_tcp_keepalive_probes;
break;
case TCP_SYNCNT:

Arnaldo Carvalho de Melo
committed
val = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries;
break;
case TCP_LINGER2:
val = tp->linger2;
if (val >= 0)
val = (val ? : sysctl_tcp_fin_timeout) / HZ;
break;
case TCP_DEFER_ACCEPT:

Arnaldo Carvalho de Melo
committed
val = !icsk->icsk_accept_queue.rskq_defer_accept ? 0 :
((TCP_TIMEOUT_INIT / HZ) << (icsk->icsk_accept_queue.rskq_defer_accept - 1));
break;
case TCP_WINDOW_CLAMP:
val = tp->window_clamp;
break;
case TCP_INFO: {
struct tcp_info info;
if (get_user(len, optlen))
return -EFAULT;
tcp_get_info(sk, &info);
len = min_t(unsigned int, len, sizeof(info));
if (put_user(len, optlen))
return -EFAULT;
if (copy_to_user(optval, &info, len))
return -EFAULT;
return 0;
}
case TCP_QUICKACK:

Arnaldo Carvalho de Melo
committed
val = !icsk->icsk_ack.pingpong;
case TCP_CONGESTION:
if (get_user(len, optlen))
return -EFAULT;
len = min_t(unsigned int, len, TCP_CA_NAME_MAX);
if (put_user(len, optlen))
return -EFAULT;
if (copy_to_user(optval, icsk->icsk_ca_ops->name, len))
return -EFAULT;
return 0;
default:
return -ENOPROTOOPT;
};
if (put_user(len, optlen))
return -EFAULT;
if (copy_to_user(optval, &val, len))
return -EFAULT;
return 0;
}
int tcp_getsockopt(struct sock *sk, int level, int optname, char __user *optval,
int __user *optlen)
{
struct inet_connection_sock *icsk = inet_csk(sk);
if (level != SOL_TCP)
return icsk->icsk_af_ops->getsockopt(sk, level, optname,
optval, optlen);
return do_tcp_getsockopt(sk, level, optname, optval, optlen);
}
#ifdef CONFIG_COMPAT

Arnaldo Carvalho de Melo
committed
int compat_tcp_getsockopt(struct sock *sk, int level, int optname,
char __user *optval, int __user *optlen)
if (level != SOL_TCP)
return inet_csk_compat_getsockopt(sk, level, optname,
optval, optlen);
return do_tcp_getsockopt(sk, level, optname, optval, optlen);
}

Arnaldo Carvalho de Melo
committed
EXPORT_SYMBOL(compat_tcp_getsockopt);
struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features)
{
struct sk_buff *segs = ERR_PTR(-EINVAL);
struct tcphdr *th;
unsigned thlen;
unsigned int seq;
unsigned int delta;
unsigned int oldlen;
unsigned int len;
if (!pskb_may_pull(skb, sizeof(*th)))
goto out;
th = skb->h.th;
thlen = th->doff * 4;
if (thlen < sizeof(*th))
goto out;
if (!pskb_may_pull(skb, thlen))
goto out;
if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) {
/* Packet is from an untrusted source, reset gso_segs. */
int mss = skb_shinfo(skb)->gso_size;
skb_shinfo(skb)->gso_segs = (skb->len + mss - 1) / mss;
segs = NULL;
goto out;
}
if (IS_ERR(segs))
goto out;
len = skb_shinfo(skb)->gso_size;
delta = htonl(oldlen + (thlen + len));
skb = segs;
th = skb->h.th;
seq = ntohl(th->seq);
do {
th->fin = th->psh = 0;
th->check = ~csum_fold(th->check + delta);
if (skb->ip_summed != CHECKSUM_HW)
th->check = csum_fold(csum_partial(skb->h.raw, thlen,
skb->csum));
seq += len;
skb = skb->next;
th = skb->h.th;
th->seq = htonl(seq);
th->cwr = 0;
} while (skb->next);
delta = htonl(oldlen + (skb->tail - skb->h.raw) + skb->data_len);
th->check = ~csum_fold(th->check + delta);
if (skb->ip_summed != CHECKSUM_HW)
th->check = csum_fold(csum_partial(skb->h.raw, thlen,
skb->csum));
extern struct tcp_congestion_ops tcp_reno;
static __initdata unsigned long thash_entries;
static int __init set_thash_entries(char *str)
{
if (!str)
return 0;
thash_entries = simple_strtoul(str, &str, 0);
return 1;
}
__setup("thash_entries=", set_thash_entries);
void __init tcp_init(void)
{
struct sk_buff *skb = NULL;
unsigned long limit;
int order, i, max_share;
if (sizeof(struct tcp_skb_cb) > sizeof(skb->cb))
__skb_cb_too_small_for_tcp(sizeof(struct tcp_skb_cb),
sizeof(skb->cb));
tcp_hashinfo.bind_bucket_cachep =
kmem_cache_create("tcp_bind_bucket",
sizeof(struct inet_bind_bucket), 0,
SLAB_HWCACHE_ALIGN, NULL, NULL);
if (!tcp_hashinfo.bind_bucket_cachep)
panic("tcp_init: Cannot alloc tcp_bind_bucket cache.");
/* Size and allocate the main established and bind bucket
* hash tables.
*
* The methodology is similar to that of the buffer cache.
*/
tcp_hashinfo.ehash =
sizeof(struct inet_ehash_bucket),
&tcp_hashinfo.ehash_size,
tcp_hashinfo.ehash_size = (1 << tcp_hashinfo.ehash_size) >> 1;
for (i = 0; i < (tcp_hashinfo.ehash_size << 1); i++) {
rwlock_init(&tcp_hashinfo.ehash[i].lock);
INIT_HLIST_HEAD(&tcp_hashinfo.ehash[i].chain);
tcp_hashinfo.bhash =
sizeof(struct inet_bind_hashbucket),
tcp_hashinfo.ehash_size,
&tcp_hashinfo.bhash_size,
tcp_hashinfo.bhash_size = 1 << tcp_hashinfo.bhash_size;
for (i = 0; i < tcp_hashinfo.bhash_size; i++) {
spin_lock_init(&tcp_hashinfo.bhash[i].lock);
INIT_HLIST_HEAD(&tcp_hashinfo.bhash[i].chain);
}
/* Try to be a bit smarter and adjust defaults depending
* on available memory.
*/
for (order = 0; ((1 << order) << PAGE_SHIFT) <
(tcp_hashinfo.bhash_size * sizeof(struct inet_bind_hashbucket));
if (order >= 4) {
sysctl_local_port_range[0] = 32768;
sysctl_local_port_range[1] = 61000;
tcp_death_row.sysctl_max_tw_buckets = 180000;
sysctl_tcp_max_orphans = 4096 << (order - 4);
sysctl_max_syn_backlog = 1024;
} else if (order < 3) {
sysctl_local_port_range[0] = 1024 * (3 - order);
tcp_death_row.sysctl_max_tw_buckets >>= (3 - order);
sysctl_tcp_max_orphans >>= (3 - order);
sysctl_max_syn_backlog = 128;
}
sysctl_tcp_mem[0] = 768 << order;
sysctl_tcp_mem[1] = 1024 << order;
sysctl_tcp_mem[2] = 1536 << order;
limit = ((unsigned long)sysctl_tcp_mem[1]) << (PAGE_SHIFT - 7);
max_share = min(4UL*1024*1024, limit);
sysctl_tcp_wmem[0] = SK_STREAM_MEM_QUANTUM;
sysctl_tcp_wmem[1] = 16*1024;
sysctl_tcp_wmem[2] = max(64*1024, max_share);
sysctl_tcp_rmem[0] = SK_STREAM_MEM_QUANTUM;
sysctl_tcp_rmem[1] = 87380;
sysctl_tcp_rmem[2] = max(87380, max_share);
printk(KERN_INFO "TCP: Hash tables configured "
"(established %d bind %d)\n",
tcp_hashinfo.ehash_size << 1, tcp_hashinfo.bhash_size);
tcp_register_congestion_control(&tcp_reno);
}
EXPORT_SYMBOL(tcp_close);
EXPORT_SYMBOL(tcp_disconnect);
EXPORT_SYMBOL(tcp_getsockopt);
EXPORT_SYMBOL(tcp_ioctl);
EXPORT_SYMBOL(tcp_poll);
EXPORT_SYMBOL(tcp_read_sock);
EXPORT_SYMBOL(tcp_recvmsg);
EXPORT_SYMBOL(tcp_sendmsg);
EXPORT_SYMBOL(tcp_sendpage);
EXPORT_SYMBOL(tcp_setsockopt);
EXPORT_SYMBOL(tcp_shutdown);
EXPORT_SYMBOL(tcp_statistics);