Newer
Older
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187
2188
2189
2190
2191
2192
2193
2194
2195
2196
2197
2198
2199
2200
2201
2202
2203
2204
2205
2206
2207
2208
2209
2210
2211
2212
2213
2214
2215
2216
2217
2218
2219
2220
2221
2222
2223
2224
2225
2226
2227
2228
2229
2230
2231
2232
2233
2234
2235
2236
2237
2238
2239
2240
2241
2242
2243
2244
2245
2246
2247
2248
2249
2250
2251
2252
2253
2254
2255
2256
2257
2258
2259
2260
2261
2262
2263
2264
2265
2266
2267
2268
2269
2270
2271
2272
2273
2274
2275
2276
2277
2278
2279
2280
2281
2282
2283
2284
2285
2286
2287
2288
2289
2290
2291
2292
2293
2294
2295
2296
2297
2298
2299
2300
2301
2302
2303
2304
2305
2306
2307
2308
2309
2310
2311
2312
2313
2314
2315
2316
2317
2318
2319
2320
2321
2322
2323
2324
2325
2326
2327
2328
2329
2330
2331
2332
2333
2334
2335
2336
2337
2338
2339
2340
2341
2342
2343
2344
2345
2346
2347
2348
2349
2350
2351
2352
2353
2354
2355
2356
2357
2358
2359
2360
2361
2362
2363
2364
2365
2366
2367
2368
2369
2370
2371
2372
2373
2374
2375
2376
2377
2378
2379
2380
2381
2382
2383
2384
2385
2386
2387
2388
2389
2390
2391
2392
2393
2394
2395
2396
2397
2398
2399
2400
2401
2402
2403
2404
2405
2406
2407
2408
2409
2410
2411
2412
2413
2414
2415
2416
2417
2418
2419
2420
2421
2422
2423
2424
2425
2426
2427
2428
2429
2430
2431
2432
2433
2434
2435
2436
2437
2438
2439
2440
2441
2442
2443
2444
2445
2446
2447
2448
2449
2450
2451
2452
2453
2454
2455
2456
2457
2458
2459
2460
2461
2462
2463
2464
2465
2466
2467
2468
2469
2470
2471
2472
2473
2474
2475
2476
2477
2478
2479
2480
2481
2482
2483
2484
2485
2486
2487
2488
2489
2490
2491
2492
2493
2494
2495
2496
2497
2498
2499
2500
2501
2502
2503
2504
2505
2506
2507
2508
2509
2510
2511
2512
2513
2514
2515
2516
2517
2518
2519
2520
2521
2522
2523
2524
2525
2526
2527
2528
2529
2530
2531
2532
2533
2534
2535
2536
2537
2538
2539
2540
2541
2542
2543
2544
2545
2546
2547
2548
2549
2550
2551
2552
2553
2554
2555
2556
2557
2558
2559
2560
2561
2562
2563
2564
2565
2566
2567
2568
2569
2570
2571
2572
2573
2574
2575
2576
2577
2578
2579
2580
2581
2582
2583
2584
2585
2586
2587
2588
2589
2590
2591
2592
2593
2594
2595
2596
2597
2598
2599
2600
2601
2602
2603
2604
2605
2606
2607
2608
2609
2610
2611
2612
2613
2614
2615
2616
2617
2618
2619
2620
2621
2622
2623
2624
2625
2626
2627
2628
2629
2630
2631
2632
2633
2634
2635
2636
2637
2638
2639
2640
2641
2642
2643
2644
2645
2646
2647
2648
2649
2650
2651
2652
2653
2654
2655
2656
2657
2658
2659
2660
2661
2662
2663
2664
2665
2666
2667
2668
2669
2670
2671
2672
2673
2674
2675
2676
2677
2678
2679
2680
2681
2682
2683
2684
2685
2686
2687
2688
2689
2690
2691
2692
2693
2694
2695
2696
2697
2698
2699
2700
2701
2702
2703
2704
2705
2706
2707
2708
2709
2710
2711
2712
2713
2714
2715
2716
2717
2718
2719
2720
2721
2722
2723
2724
2725
2726
2727
2728
2729
2730
2731
2732
2733
2734
2735
2736
2737
2738
2739
2740
2741
2742
2743
2744
2745
2746
2747
2748
2749
2750
2751
2752
2753
2754
2755
2756
2757
2758
2759
2760
2761
2762
2763
2764
2765
2766
2767
2768
2769
2770
2771
2772
2773
2774
2775
2776
2777
2778
2779
2780
2781
2782
2783
2784
2785
2786
2787
2788
2789
2790
2791
2792
2793
2794
2795
2796
2797
2798
2799
2800
2801
2802
2803
2804
2805
2806
2807
2808
2809
2810
2811
2812
2813
2814
2815
2816
2817
2818
2819
2820
2821
2822
2823
2824
2825
2826
2827
2828
2829
2830
2831
2832
2833
2834
2835
2836
2837
2838
2839
2840
2841
2842
2843
2844
2845
2846
2847
2848
2849
2850
2851
2852
2853
2854
2855
2856
2857
2858
2859
2860
2861
2862
2863
2864
2865
2866
2867
2868
2869
2870
2871
2872
2873
2874
2875
2876
2877
2878
2879
2880
2881
2882
2883
2884
2885
2886
2887
2888
2889
2890
2891
2892
2893
2894
2895
2896
2897
2898
2899
2900
2901
2902
2903
2904
2905
2906
2907
2908
2909
2910
2911
2912
2913
2914
2915
2916
2917
2918
2919
2920
2921
2922
2923
2924
2925
2926
2927
2928
2929
2930
2931
2932
2933
2934
2935
2936
2937
2938
2939
2940
2941
2942
2943
2944
2945
2946
2947
2948
2949
2950
2951
2952
2953
2954
2955
2956
2957
2958
2959
2960
2961
2962
2963
2964
2965
2966
2967
2968
2969
2970
2971
2972
2973
2974
2975
2976
2977
2978
2979
2980
2981
2982
2983
2984
2985
2986
2987
2988
2989
2990
2991
2992
2993
2994
2995
2996
2997
2998
2999
3000
*/
if (flag & FLAG_RETRANS_DATA_ACKED)
return;
tcp_rtt_estimator(tp, seq_rtt);
tcp_set_rto(tp);
tp->backoff = 0;
tcp_bound_rto(tp);
}
static inline void tcp_ack_update_rtt(struct tcp_sock *tp,
int flag, s32 seq_rtt)
{
/* Note that peer MAY send zero echo. In this case it is ignored. (rfc1323) */
if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr)
tcp_ack_saw_tstamp(tp, flag);
else if (seq_rtt >= 0)
tcp_ack_no_tstamp(tp, seq_rtt, flag);
}
/*
* Compute congestion window to use.
*
* This is from the implementation of BICTCP in
* Lison-Xu, Kahaled Harfoush, and Injog Rhee.
* "Binary Increase Congestion Control for Fast, Long Distance
* Networks" in InfoComm 2004
* Available from:
* http://www.csc.ncsu.edu/faculty/rhee/export/bitcp.pdf
*
* Unless BIC is enabled and congestion window is large
* this behaves the same as the original Reno.
*/
static inline __u32 bictcp_cwnd(struct tcp_sock *tp)
{
/* orignal Reno behaviour */
if (!tcp_is_bic(tp))
return tp->snd_cwnd;
if (tp->bictcp.last_cwnd == tp->snd_cwnd &&
(s32)(tcp_time_stamp - tp->bictcp.last_stamp) <= (HZ>>5))
return tp->bictcp.cnt;
tp->bictcp.last_cwnd = tp->snd_cwnd;
tp->bictcp.last_stamp = tcp_time_stamp;
/* start off normal */
if (tp->snd_cwnd <= sysctl_tcp_bic_low_window)
tp->bictcp.cnt = tp->snd_cwnd;
/* binary increase */
else if (tp->snd_cwnd < tp->bictcp.last_max_cwnd) {
__u32 dist = (tp->bictcp.last_max_cwnd - tp->snd_cwnd)
/ BICTCP_B;
if (dist > BICTCP_MAX_INCREMENT)
/* linear increase */
tp->bictcp.cnt = tp->snd_cwnd / BICTCP_MAX_INCREMENT;
else if (dist <= 1U)
/* binary search increase */
tp->bictcp.cnt = tp->snd_cwnd * BICTCP_FUNC_OF_MIN_INCR
/ BICTCP_B;
else
/* binary search increase */
tp->bictcp.cnt = tp->snd_cwnd / dist;
} else {
/* slow start amd linear increase */
if (tp->snd_cwnd < tp->bictcp.last_max_cwnd + BICTCP_B)
/* slow start */
tp->bictcp.cnt = tp->snd_cwnd * BICTCP_FUNC_OF_MIN_INCR
/ BICTCP_B;
else if (tp->snd_cwnd < tp->bictcp.last_max_cwnd
+ BICTCP_MAX_INCREMENT*(BICTCP_B-1))
/* slow start */
tp->bictcp.cnt = tp->snd_cwnd * (BICTCP_B-1)
/ (tp->snd_cwnd-tp->bictcp.last_max_cwnd);
else
/* linear increase */
tp->bictcp.cnt = tp->snd_cwnd / BICTCP_MAX_INCREMENT;
}
return tp->bictcp.cnt;
}
/* This is Jacobson's slow start and congestion avoidance.
* SIGCOMM '88, p. 328.
*/
static inline void reno_cong_avoid(struct tcp_sock *tp)
{
if (tp->snd_cwnd <= tp->snd_ssthresh) {
/* In "safe" area, increase. */
if (tp->snd_cwnd < tp->snd_cwnd_clamp)
tp->snd_cwnd++;
} else {
/* In dangerous area, increase slowly.
* In theory this is tp->snd_cwnd += 1 / tp->snd_cwnd
*/
if (tp->snd_cwnd_cnt >= bictcp_cwnd(tp)) {
if (tp->snd_cwnd < tp->snd_cwnd_clamp)
tp->snd_cwnd++;
tp->snd_cwnd_cnt=0;
} else
tp->snd_cwnd_cnt++;
}
tp->snd_cwnd_stamp = tcp_time_stamp;
}
/* This is based on the congestion detection/avoidance scheme described in
* Lawrence S. Brakmo and Larry L. Peterson.
* "TCP Vegas: End to end congestion avoidance on a global internet."
* IEEE Journal on Selected Areas in Communication, 13(8):1465--1480,
* October 1995. Available from:
* ftp://ftp.cs.arizona.edu/xkernel/Papers/jsac.ps
*
* See http://www.cs.arizona.edu/xkernel/ for their implementation.
* The main aspects that distinguish this implementation from the
* Arizona Vegas implementation are:
* o We do not change the loss detection or recovery mechanisms of
* Linux in any way. Linux already recovers from losses quite well,
* using fine-grained timers, NewReno, and FACK.
* o To avoid the performance penalty imposed by increasing cwnd
* only every-other RTT during slow start, we increase during
* every RTT during slow start, just like Reno.
* o Largely to allow continuous cwnd growth during slow start,
* we use the rate at which ACKs come back as the "actual"
* rate, rather than the rate at which data is sent.
* o To speed convergence to the right rate, we set the cwnd
* to achieve the right ("actual") rate when we exit slow start.
* o To filter out the noise caused by delayed ACKs, we use the
* minimum RTT sample observed during the last RTT to calculate
* the actual rate.
* o When the sender re-starts from idle, it waits until it has
* received ACKs for an entire flight of new data before making
* a cwnd adjustment decision. The original Vegas implementation
* assumed senders never went idle.
*/
static void vegas_cong_avoid(struct tcp_sock *tp, u32 ack, u32 seq_rtt)
{
/* The key players are v_beg_snd_una and v_beg_snd_nxt.
*
* These are so named because they represent the approximate values
* of snd_una and snd_nxt at the beginning of the current RTT. More
* precisely, they represent the amount of data sent during the RTT.
* At the end of the RTT, when we receive an ACK for v_beg_snd_nxt,
* we will calculate that (v_beg_snd_nxt - v_beg_snd_una) outstanding
* bytes of data have been ACKed during the course of the RTT, giving
* an "actual" rate of:
*
* (v_beg_snd_nxt - v_beg_snd_una) / (rtt duration)
*
* Unfortunately, v_beg_snd_una is not exactly equal to snd_una,
* because delayed ACKs can cover more than one segment, so they
* don't line up nicely with the boundaries of RTTs.
*
* Another unfortunate fact of life is that delayed ACKs delay the
* advance of the left edge of our send window, so that the number
* of bytes we send in an RTT is often less than our cwnd will allow.
* So we keep track of our cwnd separately, in v_beg_snd_cwnd.
*/
if (after(ack, tp->vegas.beg_snd_nxt)) {
/* Do the Vegas once-per-RTT cwnd adjustment. */
u32 old_wnd, old_snd_cwnd;
/* Here old_wnd is essentially the window of data that was
* sent during the previous RTT, and has all
* been acknowledged in the course of the RTT that ended
* with the ACK we just received. Likewise, old_snd_cwnd
* is the cwnd during the previous RTT.
*/
old_wnd = (tp->vegas.beg_snd_nxt - tp->vegas.beg_snd_una) /
tp->mss_cache_std;
old_snd_cwnd = tp->vegas.beg_snd_cwnd;
/* Save the extent of the current window so we can use this
* at the end of the next RTT.
*/
tp->vegas.beg_snd_una = tp->vegas.beg_snd_nxt;
tp->vegas.beg_snd_nxt = tp->snd_nxt;
tp->vegas.beg_snd_cwnd = tp->snd_cwnd;
/* Take into account the current RTT sample too, to
* decrease the impact of delayed acks. This double counts
* this sample since we count it for the next window as well,
* but that's not too awful, since we're taking the min,
* rather than averaging.
*/
vegas_rtt_calc(tp, seq_rtt);
/* We do the Vegas calculations only if we got enough RTT
* samples that we can be reasonably sure that we got
* at least one RTT sample that wasn't from a delayed ACK.
* If we only had 2 samples total,
* then that means we're getting only 1 ACK per RTT, which
* means they're almost certainly delayed ACKs.
* If we have 3 samples, we should be OK.
*/
if (tp->vegas.cntRTT <= 2) {
/* We don't have enough RTT samples to do the Vegas
* calculation, so we'll behave like Reno.
*/
if (tp->snd_cwnd > tp->snd_ssthresh)
tp->snd_cwnd++;
} else {
u32 rtt, target_cwnd, diff;
/* We have enough RTT samples, so, using the Vegas
* algorithm, we determine if we should increase or
* decrease cwnd, and by how much.
*/
/* Pluck out the RTT we are using for the Vegas
* calculations. This is the min RTT seen during the
* last RTT. Taking the min filters out the effects
* of delayed ACKs, at the cost of noticing congestion
* a bit later.
*/
rtt = tp->vegas.minRTT;
/* Calculate the cwnd we should have, if we weren't
* going too fast.
*
* This is:
* (actual rate in segments) * baseRTT
* We keep it as a fixed point number with
* V_PARAM_SHIFT bits to the right of the binary point.
*/
target_cwnd = ((old_wnd * tp->vegas.baseRTT)
<< V_PARAM_SHIFT) / rtt;
/* Calculate the difference between the window we had,
* and the window we would like to have. This quantity
* is the "Diff" from the Arizona Vegas papers.
*
* Again, this is a fixed point number with
* V_PARAM_SHIFT bits to the right of the binary
* point.
*/
diff = (old_wnd << V_PARAM_SHIFT) - target_cwnd;
if (tp->snd_cwnd < tp->snd_ssthresh) {
/* Slow start. */
if (diff > sysctl_tcp_vegas_gamma) {
/* Going too fast. Time to slow down
* and switch to congestion avoidance.
*/
tp->snd_ssthresh = 2;
/* Set cwnd to match the actual rate
* exactly:
* cwnd = (actual rate) * baseRTT
* Then we add 1 because the integer
* truncation robs us of full link
* utilization.
*/
tp->snd_cwnd = min(tp->snd_cwnd,
(target_cwnd >>
V_PARAM_SHIFT)+1);
}
} else {
/* Congestion avoidance. */
u32 next_snd_cwnd;
/* Figure out where we would like cwnd
* to be.
*/
if (diff > sysctl_tcp_vegas_beta) {
/* The old window was too fast, so
* we slow down.
*/
next_snd_cwnd = old_snd_cwnd - 1;
} else if (diff < sysctl_tcp_vegas_alpha) {
/* We don't have enough extra packets
* in the network, so speed up.
*/
next_snd_cwnd = old_snd_cwnd + 1;
} else {
/* Sending just as fast as we
* should be.
*/
next_snd_cwnd = old_snd_cwnd;
}
/* Adjust cwnd upward or downward, toward the
* desired value.
*/
if (next_snd_cwnd > tp->snd_cwnd)
tp->snd_cwnd++;
else if (next_snd_cwnd < tp->snd_cwnd)
tp->snd_cwnd--;
}
}
/* Wipe the slate clean for the next RTT. */
tp->vegas.cntRTT = 0;
tp->vegas.minRTT = 0x7fffffff;
}
/* The following code is executed for every ack we receive,
* except for conditions checked in should_advance_cwnd()
* before the call to tcp_cong_avoid(). Mainly this means that
* we only execute this code if the ack actually acked some
* data.
*/
/* If we are in slow start, increase our cwnd in response to this ACK.
* (If we are not in slow start then we are in congestion avoidance,
* and adjust our congestion window only once per RTT. See the code
* above.)
*/
if (tp->snd_cwnd <= tp->snd_ssthresh)
tp->snd_cwnd++;
/* to keep cwnd from growing without bound */
tp->snd_cwnd = min_t(u32, tp->snd_cwnd, tp->snd_cwnd_clamp);
/* Make sure that we are never so timid as to reduce our cwnd below
* 2 MSS.
*
* Going below 2 MSS would risk huge delayed ACKs from our receiver.
*/
tp->snd_cwnd = max(tp->snd_cwnd, 2U);
tp->snd_cwnd_stamp = tcp_time_stamp;
}
static inline void tcp_cong_avoid(struct tcp_sock *tp, u32 ack, u32 seq_rtt)
{
if (tcp_vegas_enabled(tp))
vegas_cong_avoid(tp, ack, seq_rtt);
else
reno_cong_avoid(tp);
}
/* Restart timer after forward progress on connection.
* RFC2988 recommends to restart timer to now+rto.
*/
static inline void tcp_ack_packets_out(struct sock *sk, struct tcp_sock *tp)
{
if (!tp->packets_out) {
tcp_clear_xmit_timer(sk, TCP_TIME_RETRANS);
} else {
tcp_reset_xmit_timer(sk, TCP_TIME_RETRANS, tp->rto);
}
}
/* There is one downside to this scheme. Although we keep the
* ACK clock ticking, adjusting packet counters and advancing
* congestion window, we do not liberate socket send buffer
* space.
*
* Mucking with skb->truesize and sk->sk_wmem_alloc et al.
* then making a write space wakeup callback is a possible
* future enhancement. WARNING: it is not trivial to make.
*/
static int tcp_tso_acked(struct sock *sk, struct sk_buff *skb,
__u32 now, __s32 *seq_rtt)
{
struct tcp_sock *tp = tcp_sk(sk);
struct tcp_skb_cb *scb = TCP_SKB_CB(skb);
__u32 seq = tp->snd_una;
__u32 packets_acked;
int acked = 0;
/* If we get here, the whole TSO packet has not been
* acked.
*/
BUG_ON(!after(scb->end_seq, seq));
packets_acked = tcp_skb_pcount(skb);
if (tcp_trim_head(sk, skb, seq - scb->seq))
return 0;
packets_acked -= tcp_skb_pcount(skb);
if (packets_acked) {
__u8 sacked = scb->sacked;
acked |= FLAG_DATA_ACKED;
if (sacked) {
if (sacked & TCPCB_RETRANS) {
if (sacked & TCPCB_SACKED_RETRANS)
tp->retrans_out -= packets_acked;
acked |= FLAG_RETRANS_DATA_ACKED;
*seq_rtt = -1;
} else if (*seq_rtt < 0)
*seq_rtt = now - scb->when;
if (sacked & TCPCB_SACKED_ACKED)
tp->sacked_out -= packets_acked;
if (sacked & TCPCB_LOST)
tp->lost_out -= packets_acked;
if (sacked & TCPCB_URG) {
if (tp->urg_mode &&
!before(seq, tp->snd_up))
tp->urg_mode = 0;
}
} else if (*seq_rtt < 0)
*seq_rtt = now - scb->when;
if (tp->fackets_out) {
__u32 dval = min(tp->fackets_out, packets_acked);
tp->fackets_out -= dval;
}
tp->packets_out -= packets_acked;
BUG_ON(tcp_skb_pcount(skb) == 0);
BUG_ON(!before(scb->seq, scb->end_seq));
}
return acked;
}
/* Remove acknowledged frames from the retransmission queue. */
static int tcp_clean_rtx_queue(struct sock *sk, __s32 *seq_rtt_p)
{
struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *skb;
__u32 now = tcp_time_stamp;
int acked = 0;
__s32 seq_rtt = -1;
while ((skb = skb_peek(&sk->sk_write_queue)) &&
skb != sk->sk_send_head) {
struct tcp_skb_cb *scb = TCP_SKB_CB(skb);
__u8 sacked = scb->sacked;
/* If our packet is before the ack sequence we can
* discard it as it's confirmed to have arrived at
* the other end.
*/
if (after(scb->end_seq, tp->snd_una)) {
if (tcp_skb_pcount(skb) > 1)
acked |= tcp_tso_acked(sk, skb,
now, &seq_rtt);
break;
}
/* Initial outgoing SYN's get put onto the write_queue
* just like anything else we transmit. It is not
* true data, and if we misinform our callers that
* this ACK acks real data, we will erroneously exit
* connection startup slow start one packet too
* quickly. This is severely frowned upon behavior.
*/
if (!(scb->flags & TCPCB_FLAG_SYN)) {
acked |= FLAG_DATA_ACKED;
} else {
acked |= FLAG_SYN_ACKED;
tp->retrans_stamp = 0;
}
if (sacked) {
if (sacked & TCPCB_RETRANS) {
if(sacked & TCPCB_SACKED_RETRANS)
tp->retrans_out -= tcp_skb_pcount(skb);
acked |= FLAG_RETRANS_DATA_ACKED;
seq_rtt = -1;
} else if (seq_rtt < 0)
seq_rtt = now - scb->when;
if (sacked & TCPCB_SACKED_ACKED)
tp->sacked_out -= tcp_skb_pcount(skb);
if (sacked & TCPCB_LOST)
tp->lost_out -= tcp_skb_pcount(skb);
if (sacked & TCPCB_URG) {
if (tp->urg_mode &&
!before(scb->end_seq, tp->snd_up))
tp->urg_mode = 0;
}
} else if (seq_rtt < 0)
seq_rtt = now - scb->when;
tcp_dec_pcount_approx(&tp->fackets_out, skb);
tcp_packets_out_dec(tp, skb);
__skb_unlink(skb, skb->list);
sk_stream_free_skb(sk, skb);
}
if (acked&FLAG_ACKED) {
tcp_ack_update_rtt(tp, acked, seq_rtt);
tcp_ack_packets_out(sk, tp);
}
#if FASTRETRANS_DEBUG > 0
BUG_TRAP((int)tp->sacked_out >= 0);
BUG_TRAP((int)tp->lost_out >= 0);
BUG_TRAP((int)tp->retrans_out >= 0);
if (!tp->packets_out && tp->rx_opt.sack_ok) {
if (tp->lost_out) {
printk(KERN_DEBUG "Leak l=%u %d\n",
tp->lost_out, tp->ca_state);
tp->lost_out = 0;
}
if (tp->sacked_out) {
printk(KERN_DEBUG "Leak s=%u %d\n",
tp->sacked_out, tp->ca_state);
tp->sacked_out = 0;
}
if (tp->retrans_out) {
printk(KERN_DEBUG "Leak r=%u %d\n",
tp->retrans_out, tp->ca_state);
tp->retrans_out = 0;
}
}
#endif
*seq_rtt_p = seq_rtt;
return acked;
}
static void tcp_ack_probe(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
/* Was it a usable window open? */
if (!after(TCP_SKB_CB(sk->sk_send_head)->end_seq,
tp->snd_una + tp->snd_wnd)) {
tp->backoff = 0;
tcp_clear_xmit_timer(sk, TCP_TIME_PROBE0);
/* Socket must be waked up by subsequent tcp_data_snd_check().
* This function is not for random using!
*/
} else {
tcp_reset_xmit_timer(sk, TCP_TIME_PROBE0,
min(tp->rto << tp->backoff, TCP_RTO_MAX));
}
}
static inline int tcp_ack_is_dubious(struct tcp_sock *tp, int flag)
{
return (!(flag & FLAG_NOT_DUP) || (flag & FLAG_CA_ALERT) ||
tp->ca_state != TCP_CA_Open);
}
static inline int tcp_may_raise_cwnd(struct tcp_sock *tp, int flag)
{
return (!(flag & FLAG_ECE) || tp->snd_cwnd < tp->snd_ssthresh) &&
!((1<<tp->ca_state)&(TCPF_CA_Recovery|TCPF_CA_CWR));
}
/* Check that window update is acceptable.
* The function assumes that snd_una<=ack<=snd_next.
*/
static inline int tcp_may_update_window(struct tcp_sock *tp, u32 ack,
u32 ack_seq, u32 nwin)
{
return (after(ack, tp->snd_una) ||
after(ack_seq, tp->snd_wl1) ||
(ack_seq == tp->snd_wl1 && nwin > tp->snd_wnd));
}
/* Update our send window.
*
* Window update algorithm, described in RFC793/RFC1122 (used in linux-2.2
* and in FreeBSD. NetBSD's one is even worse.) is wrong.
*/
static int tcp_ack_update_window(struct sock *sk, struct tcp_sock *tp,
struct sk_buff *skb, u32 ack, u32 ack_seq)
{
int flag = 0;
u32 nwin = ntohs(skb->h.th->window);
if (likely(!skb->h.th->syn))
nwin <<= tp->rx_opt.snd_wscale;
if (tcp_may_update_window(tp, ack, ack_seq, nwin)) {
flag |= FLAG_WIN_UPDATE;
tcp_update_wl(tp, ack, ack_seq);
if (tp->snd_wnd != nwin) {
tp->snd_wnd = nwin;
/* Note, it is the only place, where
* fast path is recovered for sending TCP.
*/
tcp_fast_path_check(sk, tp);
if (nwin > tp->max_window) {
tp->max_window = nwin;
tcp_sync_mss(sk, tp->pmtu_cookie);
}
}
}
tp->snd_una = ack;
return flag;
}
static void tcp_process_frto(struct sock *sk, u32 prior_snd_una)
{
struct tcp_sock *tp = tcp_sk(sk);
tcp_sync_left_out(tp);
if (tp->snd_una == prior_snd_una ||
!before(tp->snd_una, tp->frto_highmark)) {
/* RTO was caused by loss, start retransmitting in
* go-back-N slow start
*/
tcp_enter_frto_loss(sk);
return;
}
if (tp->frto_counter == 1) {
/* First ACK after RTO advances the window: allow two new
* segments out.
*/
tp->snd_cwnd = tcp_packets_in_flight(tp) + 2;
} else {
/* Also the second ACK after RTO advances the window.
* The RTO was likely spurious. Reduce cwnd and continue
* in congestion avoidance
*/
tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_ssthresh);
tcp_moderate_cwnd(tp);
}
/* F-RTO affects on two new ACKs following RTO.
* At latest on third ACK the TCP behavor is back to normal.
*/
tp->frto_counter = (tp->frto_counter + 1) % 3;
}
/*
* TCP Westwood+
*/
/*
* @init_westwood
* This function initializes fields used in TCP Westwood+. We can't
* get no information about RTTmin at this time so we simply set it to
* TCP_WESTWOOD_INIT_RTT. This value was chosen to be too conservative
* since in this way we're sure it will be updated in a consistent
* way as soon as possible. It will reasonably happen within the first
* RTT period of the connection lifetime.
*/
static void init_westwood(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
tp->westwood.bw_ns_est = 0;
tp->westwood.bw_est = 0;
tp->westwood.accounted = 0;
tp->westwood.cumul_ack = 0;
tp->westwood.rtt_win_sx = tcp_time_stamp;
tp->westwood.rtt = TCP_WESTWOOD_INIT_RTT;
tp->westwood.rtt_min = TCP_WESTWOOD_INIT_RTT;
tp->westwood.snd_una = tp->snd_una;
}
/*
* @westwood_do_filter
* Low-pass filter. Implemented using constant coeffients.
*/
static inline __u32 westwood_do_filter(__u32 a, __u32 b)
{
return (((7 * a) + b) >> 3);
}
static void westwood_filter(struct sock *sk, __u32 delta)
{
struct tcp_sock *tp = tcp_sk(sk);
tp->westwood.bw_ns_est =
westwood_do_filter(tp->westwood.bw_ns_est,
tp->westwood.bk / delta);
tp->westwood.bw_est =
westwood_do_filter(tp->westwood.bw_est,
tp->westwood.bw_ns_est);
}
/*
* @westwood_update_rttmin
* It is used to update RTTmin. In this case we MUST NOT use
* WESTWOOD_RTT_MIN minimum bound since we could be on a LAN!
*/
static inline __u32 westwood_update_rttmin(const struct sock *sk)
{
const struct tcp_sock *tp = tcp_sk(sk);
__u32 rttmin = tp->westwood.rtt_min;
if (tp->westwood.rtt != 0 &&
(tp->westwood.rtt < tp->westwood.rtt_min || !rttmin))
rttmin = tp->westwood.rtt;
return rttmin;
}
/*
* @westwood_acked
* Evaluate increases for dk.
*/
static inline __u32 westwood_acked(const struct sock *sk)
{
const struct tcp_sock *tp = tcp_sk(sk);
return tp->snd_una - tp->westwood.snd_una;
}
/*
* @westwood_new_window
* It evaluates if we are receiving data inside the same RTT window as
* when we started.
* Return value:
* It returns 0 if we are still evaluating samples in the same RTT
* window, 1 if the sample has to be considered in the next window.
*/
static int westwood_new_window(const struct sock *sk)
{
const struct tcp_sock *tp = tcp_sk(sk);
__u32 left_bound;
__u32 rtt;
int ret = 0;
left_bound = tp->westwood.rtt_win_sx;
rtt = max(tp->westwood.rtt, (u32) TCP_WESTWOOD_RTT_MIN);
/*
* A RTT-window has passed. Be careful since if RTT is less than
* 50ms we don't filter but we continue 'building the sample'.
* This minimum limit was choosen since an estimation on small
* time intervals is better to avoid...
* Obvioulsy on a LAN we reasonably will always have
* right_bound = left_bound + WESTWOOD_RTT_MIN
*/
if ((left_bound + rtt) < tcp_time_stamp)
ret = 1;
return ret;
}
/*
* @westwood_update_window
* It updates RTT evaluation window if it is the right moment to do
* it. If so it calls filter for evaluating bandwidth.
*/
static void __westwood_update_window(struct sock *sk, __u32 now)
{
struct tcp_sock *tp = tcp_sk(sk);
__u32 delta = now - tp->westwood.rtt_win_sx;
if (delta) {
if (tp->westwood.rtt)
westwood_filter(sk, delta);
tp->westwood.bk = 0;
tp->westwood.rtt_win_sx = tcp_time_stamp;
}
}
static void westwood_update_window(struct sock *sk, __u32 now)
{
if (westwood_new_window(sk))
__westwood_update_window(sk, now);
}
/*
* @__tcp_westwood_fast_bw
* It is called when we are in fast path. In particular it is called when
* header prediction is successfull. In such case infact update is
* straight forward and doesn't need any particular care.
*/
static void __tcp_westwood_fast_bw(struct sock *sk, struct sk_buff *skb)
{
struct tcp_sock *tp = tcp_sk(sk);
westwood_update_window(sk, tcp_time_stamp);
tp->westwood.bk += westwood_acked(sk);
tp->westwood.snd_una = tp->snd_una;
tp->westwood.rtt_min = westwood_update_rttmin(sk);
}
static inline void tcp_westwood_fast_bw(struct sock *sk, struct sk_buff *skb)
{
if (tcp_is_westwood(tcp_sk(sk)))
__tcp_westwood_fast_bw(sk, skb);
}
/*
* @westwood_dupack_update
* It updates accounted and cumul_ack when receiving a dupack.
*/
static void westwood_dupack_update(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
tp->westwood.accounted += tp->mss_cache_std;
tp->westwood.cumul_ack = tp->mss_cache_std;
}
static inline int westwood_may_change_cumul(struct tcp_sock *tp)
{
return (tp->westwood.cumul_ack > tp->mss_cache_std);
}
static inline void westwood_partial_update(struct tcp_sock *tp)
{
tp->westwood.accounted -= tp->westwood.cumul_ack;
tp->westwood.cumul_ack = tp->mss_cache_std;
}
static inline void westwood_complete_update(struct tcp_sock *tp)
{
tp->westwood.cumul_ack -= tp->westwood.accounted;
tp->westwood.accounted = 0;
}
/*
* @westwood_acked_count
* This function evaluates cumul_ack for evaluating dk in case of
* delayed or partial acks.
*/
static inline __u32 westwood_acked_count(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
tp->westwood.cumul_ack = westwood_acked(sk);
/* If cumul_ack is 0 this is a dupack since it's not moving
* tp->snd_una.
*/
if (!(tp->westwood.cumul_ack))
westwood_dupack_update(sk);
if (westwood_may_change_cumul(tp)) {
/* Partial or delayed ack */
if (tp->westwood.accounted >= tp->westwood.cumul_ack)
westwood_partial_update(tp);
else
westwood_complete_update(tp);
}
tp->westwood.snd_una = tp->snd_una;
return tp->westwood.cumul_ack;
}
/*
* @__tcp_westwood_slow_bw
* It is called when something is going wrong..even if there could
* be no problems! Infact a simple delayed packet may trigger a
* dupack. But we need to be careful in such case.
*/
static void __tcp_westwood_slow_bw(struct sock *sk, struct sk_buff *skb)
{
struct tcp_sock *tp = tcp_sk(sk);
westwood_update_window(sk, tcp_time_stamp);
tp->westwood.bk += westwood_acked_count(sk);
tp->westwood.rtt_min = westwood_update_rttmin(sk);
}
static inline void tcp_westwood_slow_bw(struct sock *sk, struct sk_buff *skb)
{
if (tcp_is_westwood(tcp_sk(sk)))
__tcp_westwood_slow_bw(sk, skb);
}
/* This routine deals with incoming acks, but not outgoing ones. */
static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
{
struct tcp_sock *tp = tcp_sk(sk);
u32 prior_snd_una = tp->snd_una;
u32 ack_seq = TCP_SKB_CB(skb)->seq;
u32 ack = TCP_SKB_CB(skb)->ack_seq;
u32 prior_in_flight;
s32 seq_rtt;
int prior_packets;
/* If the ack is newer than sent or older than previous acks
* then we can probably ignore it.
*/
if (after(ack, tp->snd_nxt))
goto uninteresting_ack;
if (before(ack, prior_snd_una))
goto old_ack;
if (!(flag&FLAG_SLOWPATH) && after(ack, prior_snd_una)) {
/* Window is constant, pure forward advance.
* No more checks are required.
* Note, we use the fact that SND.UNA>=SND.WL2.
*/
tcp_update_wl(tp, ack, ack_seq);
tp->snd_una = ack;
tcp_westwood_fast_bw(sk, skb);
flag |= FLAG_WIN_UPDATE;
NET_INC_STATS_BH(LINUX_MIB_TCPHPACKS);
} else {
if (ack_seq != TCP_SKB_CB(skb)->end_seq)
flag |= FLAG_DATA;
else
NET_INC_STATS_BH(LINUX_MIB_TCPPUREACKS);
flag |= tcp_ack_update_window(sk, tp, skb, ack, ack_seq);
if (TCP_SKB_CB(skb)->sacked)
flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una);
if (TCP_ECN_rcv_ecn_echo(tp, skb->h.th))
flag |= FLAG_ECE;
tcp_westwood_slow_bw(sk,skb);
}
/* We passed data and got it acked, remove any soft error
* log. Something worked...
*/
sk->sk_err_soft = 0;
tp->rcv_tstamp = tcp_time_stamp;
prior_packets = tp->packets_out;
if (!prior_packets)
goto no_queue;
prior_in_flight = tcp_packets_in_flight(tp);
/* See if we can take anything off of the retransmit queue. */
flag |= tcp_clean_rtx_queue(sk, &seq_rtt);
if (tp->frto_counter)
tcp_process_frto(sk, prior_snd_una);
if (tcp_ack_is_dubious(tp, flag)) {
/* Advanve CWND, if state allows this. */
if ((flag & FLAG_DATA_ACKED) &&
(tcp_vegas_enabled(tp) || prior_in_flight >= tp->snd_cwnd) &&
tcp_may_raise_cwnd(tp, flag))
tcp_cong_avoid(tp, ack, seq_rtt);
tcp_fastretrans_alert(sk, prior_snd_una, prior_packets, flag);
} else {
if ((flag & FLAG_DATA_ACKED) &&
(tcp_vegas_enabled(tp) || prior_in_flight >= tp->snd_cwnd))
tcp_cong_avoid(tp, ack, seq_rtt);
}
if ((flag & FLAG_FORWARD_PROGRESS) || !(flag&FLAG_NOT_DUP))
dst_confirm(sk->sk_dst_cache);
return 1;
no_queue:
tp->probes_out = 0;
/* If this ack opens up a zero window, clear backoff. It was
* being used to time the probes, and is probably far higher than
* it needs to be for normal retransmission.
*/
if (sk->sk_send_head)
tcp_ack_probe(sk);
return 1;
old_ack:
if (TCP_SKB_CB(skb)->sacked)
tcp_sacktag_write_queue(sk, skb, prior_snd_una);
uninteresting_ack:
SOCK_DEBUG(sk, "Ack %u out of %u:%u\n", ack, tp->snd_una, tp->snd_nxt);
return 0;
}
/* Look for tcp options. Normally only called on SYN and SYNACK packets.
* But, this can also be called on packets in the established flow when
* the fast version below fails.
*/
void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx, int estab)
{
unsigned char *ptr;
struct tcphdr *th = skb->h.th;
int length=(th->doff*4)-sizeof(struct tcphdr);
ptr = (unsigned char *)(th + 1);
opt_rx->saw_tstamp = 0;
while(length>0) {
int opcode=*ptr++;
int opsize;
switch (opcode) {
case TCPOPT_EOL: