quiche/recovery/congestion/
cubic.rs

1// Copyright (C) 2019, Cloudflare, Inc.
2// All rights reserved.
3//
4// Redistribution and use in source and binary forms, with or without
5// modification, are permitted provided that the following conditions are
6// met:
7//
8//     * Redistributions of source code must retain the above copyright notice,
9//       this list of conditions and the following disclaimer.
10//
11//     * Redistributions in binary form must reproduce the above copyright
12//       notice, this list of conditions and the following disclaimer in the
13//       documentation and/or other materials provided with the distribution.
14//
15// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
16// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
17// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
18// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
19// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
20// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
21// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
22// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
23// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
24// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
25// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
27//! CUBIC Congestion Control
28//!
29//! This implementation is based on the following draft:
30//! <https://tools.ietf.org/html/draft-ietf-tcpm-rfc8312bis-02>
31//!
32//! Note that Slow Start can use HyStart++ when enabled.
33
34use std::cmp;
35
36use std::time::Duration;
37use std::time::Instant;
38
39use super::rtt::RttStats;
40use super::Acked;
41use super::Sent;
42
43use super::reno;
44use super::Congestion;
45use super::CongestionControlOps;
46use crate::recovery::MINIMUM_WINDOW_PACKETS;
47
48pub(crate) static CUBIC: CongestionControlOps = CongestionControlOps {
49    on_init,
50    on_packet_sent,
51    on_packets_acked,
52    congestion_event,
53    checkpoint,
54    rollback,
55    has_custom_pacing,
56    #[cfg(feature = "qlog")]
57    state_str,
58    debug_fmt,
59};
60
61/// CUBIC Constants.
62///
63/// These are recommended value in RFC8312.
64const BETA_CUBIC: f64 = 0.7;
65
66const C: f64 = 0.4;
67
68/// Threshold for rolling back state, as percentage of lost packets relative to
69/// cwnd.
70const ROLLBACK_THRESHOLD_PERCENT: usize = 20;
71
72/// Minimum threshold for rolling back state, as number of packets.
73const MIN_ROLLBACK_THRESHOLD: usize = 2;
74
75/// Default value of alpha_aimd in the beginning of congestion avoidance.
76const ALPHA_AIMD: f64 = 3.0 * (1.0 - BETA_CUBIC) / (1.0 + BETA_CUBIC);
77
78/// CUBIC State Variables.
79///
80/// We need to keep those variables across the connection.
81/// k, w_max, w_est are described in the RFC.
82#[derive(Debug, Default)]
83pub struct State {
84    k: f64,
85
86    w_max: f64,
87
88    w_est: f64,
89
90    alpha_aimd: f64,
91
92    // Used in CUBIC fix (see on_packet_sent())
93    last_sent_time: Option<Instant>,
94
95    // Store cwnd increment during congestion avoidance.
96    cwnd_inc: usize,
97
98    // CUBIC state checkpoint preceding the last congestion event.
99    prior: PriorState,
100}
101
102/// Stores the CUBIC state from before the last congestion event.
103///
104/// <https://tools.ietf.org/id/draft-ietf-tcpm-rfc8312bis-00.html#section-4.9>
105#[derive(Debug, Default)]
106struct PriorState {
107    congestion_window: usize,
108
109    ssthresh: usize,
110
111    w_max: f64,
112
113    k: f64,
114
115    epoch_start: Option<Instant>,
116
117    lost_count: usize,
118}
119
120/// CUBIC Functions.
121///
122/// Note that these calculations are based on a count of cwnd as bytes,
123/// not packets.
124/// Unit of t (duration) and RTT are based on seconds (f64).
125impl State {
126    // K = cubic_root ((w_max - cwnd) / C) (Eq. 2)
127    fn cubic_k(&self, cwnd: usize, max_datagram_size: usize) -> f64 {
128        let w_max = self.w_max / max_datagram_size as f64;
129        let cwnd = cwnd as f64 / max_datagram_size as f64;
130
131        libm::cbrt((w_max - cwnd) / C)
132    }
133
134    // W_cubic(t) = C * (t - K)^3 + w_max (Eq. 1)
135    fn w_cubic(&self, t: Duration, max_datagram_size: usize) -> f64 {
136        let w_max = self.w_max / max_datagram_size as f64;
137
138        (C * (t.as_secs_f64() - self.k).powi(3) + w_max) *
139            max_datagram_size as f64
140    }
141
142    // W_est = W_est + alpha_aimd * (segments_acked / cwnd)  (Eq. 4)
143    fn w_est_inc(
144        &self, acked: usize, cwnd: usize, max_datagram_size: usize,
145    ) -> f64 {
146        self.alpha_aimd * (acked as f64 / cwnd as f64) * max_datagram_size as f64
147    }
148}
149
150fn on_init(_r: &mut Congestion) {}
151
152fn on_packet_sent(
153    r: &mut Congestion, sent_bytes: usize, bytes_in_flight: usize, now: Instant,
154) {
155    // See https://github.com/torvalds/linux/commit/30927520dbae297182990bb21d08762bcc35ce1d
156    // First transmit when no packets in flight
157    let cubic = &mut r.cubic_state;
158
159    if let Some(last_sent_time) = cubic.last_sent_time {
160        if bytes_in_flight == 0 {
161            let delta = now - last_sent_time;
162
163            // We were application limited (idle) for a while.
164            // Shift epoch start to keep cwnd growth to cubic curve.
165            if let Some(recovery_start_time) = r.congestion_recovery_start_time {
166                if delta.as_nanos() > 0 {
167                    r.congestion_recovery_start_time =
168                        Some(recovery_start_time + delta);
169                }
170            }
171        }
172    }
173
174    cubic.last_sent_time = Some(now);
175
176    reno::on_packet_sent(r, sent_bytes, bytes_in_flight, now);
177}
178
179fn on_packets_acked(
180    r: &mut Congestion, bytes_in_flight: usize, packets: &mut Vec<Acked>,
181    now: Instant, rtt_stats: &RttStats,
182) {
183    for pkt in packets.drain(..) {
184        on_packet_acked(r, bytes_in_flight, &pkt, now, rtt_stats);
185    }
186}
187
188fn on_packet_acked(
189    r: &mut Congestion, bytes_in_flight: usize, packet: &Acked, now: Instant,
190    rtt_stats: &RttStats,
191) {
192    let in_congestion_recovery = r.in_congestion_recovery(packet.time_sent);
193
194    if in_congestion_recovery {
195        r.prr.on_packet_acked(
196            packet.size,
197            bytes_in_flight,
198            r.ssthresh.get(),
199            r.max_datagram_size,
200        );
201
202        return;
203    }
204
205    if r.app_limited {
206        return;
207    }
208
209    // Detecting spurious congestion events.
210    // <https://tools.ietf.org/id/draft-ietf-tcpm-rfc8312bis-00.html#section-4.9>
211    //
212    // When the recovery episode ends with recovering
213    // a few packets (less than cwnd / mss * ROLLBACK_THRESHOLD_PERCENT(%)), it's
214    // considered as spurious and restore to the previous state.
215    if r.congestion_recovery_start_time.is_some() {
216        let new_lost = r.lost_count - r.cubic_state.prior.lost_count;
217
218        let rollback_threshold = (r.congestion_window / r.max_datagram_size) *
219            ROLLBACK_THRESHOLD_PERCENT /
220            100;
221
222        let rollback_threshold = rollback_threshold.max(MIN_ROLLBACK_THRESHOLD);
223
224        if new_lost < rollback_threshold {
225            let did_rollback = rollback(r);
226            if did_rollback {
227                return;
228            }
229        }
230    }
231
232    if r.congestion_window < r.ssthresh.get() {
233        // In Slow start, bytes_acked_sl is used for counting
234        // acknowledged bytes.
235        r.bytes_acked_sl += packet.size;
236
237        if r.bytes_acked_sl >= r.max_datagram_size {
238            if r.hystart.in_css() {
239                r.congestion_window +=
240                    r.hystart.css_cwnd_inc(r.max_datagram_size);
241            } else {
242                r.congestion_window += r.max_datagram_size;
243            }
244
245            r.bytes_acked_sl -= r.max_datagram_size;
246        }
247
248        if r.hystart.on_packet_acked(packet, rtt_stats.latest_rtt, now) {
249            // Exit to congestion avoidance if CSS ends.
250            r.ssthresh.update(r.congestion_window, true);
251        }
252    } else {
253        // Congestion avoidance.
254        let ca_start_time;
255
256        // In CSS, use css_start_time instead of congestion_recovery_start_time.
257        if r.hystart.in_css() {
258            ca_start_time = r.hystart.css_start_time().unwrap();
259
260            // Reset w_max and k when CSS started.
261            if r.cubic_state.w_max == 0.0 {
262                r.cubic_state.w_max = r.congestion_window as f64;
263                r.cubic_state.k = 0.0;
264
265                r.cubic_state.w_est = r.congestion_window as f64;
266                r.cubic_state.alpha_aimd = ALPHA_AIMD;
267            }
268        } else {
269            match r.congestion_recovery_start_time {
270                Some(t) => ca_start_time = t,
271                None => {
272                    // When we come here without congestion_event() triggered,
273                    // initialize congestion_recovery_start_time, w_max and k.
274                    ca_start_time = now;
275                    r.congestion_recovery_start_time = Some(now);
276
277                    r.cubic_state.w_max = r.congestion_window as f64;
278                    r.cubic_state.k = 0.0;
279
280                    r.cubic_state.w_est = r.congestion_window as f64;
281                    r.cubic_state.alpha_aimd = ALPHA_AIMD;
282                },
283            }
284        }
285
286        let t = now.saturating_duration_since(ca_start_time);
287
288        // target = w_cubic(t + rtt)
289        let target = r
290            .cubic_state
291            .w_cubic(t + *rtt_stats.min_rtt, r.max_datagram_size);
292
293        // Clipping target to [cwnd, 1.5 x cwnd]
294        let target = f64::max(target, r.congestion_window as f64);
295        let target = f64::min(target, r.congestion_window as f64 * 1.5);
296
297        // Update w_est.
298        let w_est_inc = r.cubic_state.w_est_inc(
299            packet.size,
300            r.congestion_window,
301            r.max_datagram_size,
302        );
303        r.cubic_state.w_est += w_est_inc;
304
305        if r.cubic_state.w_est >= r.cubic_state.w_max {
306            r.cubic_state.alpha_aimd = 1.0;
307        }
308
309        let mut cubic_cwnd = r.congestion_window;
310
311        if r.cubic_state.w_cubic(t, r.max_datagram_size) < r.cubic_state.w_est {
312            // AIMD friendly region (W_cubic(t) < W_est)
313            cubic_cwnd = cmp::max(cubic_cwnd, r.cubic_state.w_est as usize);
314        } else {
315            // Concave region or convex region use same increment.
316            let cubic_inc =
317                r.max_datagram_size * (target as usize - cubic_cwnd) / cubic_cwnd;
318
319            cubic_cwnd += cubic_inc;
320        }
321
322        // Update the increment and increase cwnd by MSS.
323        r.cubic_state.cwnd_inc += cubic_cwnd - r.congestion_window;
324
325        if r.cubic_state.cwnd_inc >= r.max_datagram_size {
326            r.congestion_window += r.max_datagram_size;
327            r.cubic_state.cwnd_inc -= r.max_datagram_size;
328        }
329    }
330}
331
332fn congestion_event(
333    r: &mut Congestion, bytes_in_flight: usize, _lost_bytes: usize,
334    largest_lost_pkt: &Sent, now: Instant,
335) {
336    let time_sent = largest_lost_pkt.time_sent;
337    let in_congestion_recovery = r.in_congestion_recovery(time_sent);
338
339    // Start a new congestion event if packet was sent after the
340    // start of the previous congestion recovery period.
341    if !in_congestion_recovery {
342        r.congestion_recovery_start_time = Some(now);
343
344        // Fast convergence
345        if (r.congestion_window as f64) < r.cubic_state.w_max {
346            r.cubic_state.w_max =
347                r.congestion_window as f64 * (1.0 + BETA_CUBIC) / 2.0;
348        } else {
349            r.cubic_state.w_max = r.congestion_window as f64;
350        }
351
352        let ssthresh = (r.congestion_window as f64 * BETA_CUBIC) as usize;
353        let ssthresh =
354            cmp::max(ssthresh, r.max_datagram_size * MINIMUM_WINDOW_PACKETS);
355        r.ssthresh.update(ssthresh, r.hystart.in_css());
356        r.congestion_window = ssthresh;
357
358        r.cubic_state.k = if r.cubic_state.w_max < r.congestion_window as f64 {
359            0.0
360        } else {
361            r.cubic_state
362                .cubic_k(r.congestion_window, r.max_datagram_size)
363        };
364
365        r.cubic_state.cwnd_inc =
366            (r.cubic_state.cwnd_inc as f64 * BETA_CUBIC) as usize;
367
368        r.cubic_state.w_est = r.congestion_window as f64;
369        r.cubic_state.alpha_aimd = ALPHA_AIMD;
370
371        if r.hystart.in_css() {
372            r.hystart.congestion_event();
373        }
374
375        r.prr.congestion_event(bytes_in_flight);
376    }
377}
378
379fn checkpoint(r: &mut Congestion) {
380    r.cubic_state.prior.congestion_window = r.congestion_window;
381    r.cubic_state.prior.ssthresh = r.ssthresh.get();
382    r.cubic_state.prior.w_max = r.cubic_state.w_max;
383    r.cubic_state.prior.k = r.cubic_state.k;
384    r.cubic_state.prior.epoch_start = r.congestion_recovery_start_time;
385    r.cubic_state.prior.lost_count = r.lost_count;
386}
387
388fn rollback(r: &mut Congestion) -> bool {
389    // Don't go back to slow start.
390    if r.cubic_state.prior.congestion_window < r.cubic_state.prior.ssthresh {
391        return false;
392    }
393
394    if r.congestion_window >= r.cubic_state.prior.congestion_window {
395        return false;
396    }
397
398    r.congestion_window = r.cubic_state.prior.congestion_window;
399    r.ssthresh.update(r.cubic_state.prior.ssthresh, false);
400    r.cubic_state.w_max = r.cubic_state.prior.w_max;
401    r.cubic_state.k = r.cubic_state.prior.k;
402    r.congestion_recovery_start_time = r.cubic_state.prior.epoch_start;
403
404    true
405}
406
407fn has_custom_pacing() -> bool {
408    false
409}
410
411#[cfg(feature = "qlog")]
412fn state_str(r: &Congestion, now: Instant) -> &'static str {
413    reno::state_str(r, now)
414}
415
416fn debug_fmt(r: &Congestion, f: &mut std::fmt::Formatter) -> std::fmt::Result {
417    write!(
418        f,
419        "cubic={{ k={} w_max={} }} ",
420        r.cubic_state.k, r.cubic_state.w_max
421    )
422}
423
424#[cfg(test)]
425mod tests {
426    use super::*;
427
428    use crate::CongestionControlAlgorithm;
429
430    use crate::recovery::congestion::hystart;
431    use crate::recovery::congestion::recovery::LegacyRecovery;
432    use crate::recovery::congestion::test_sender::TestSender;
433    use crate::recovery::RecoveryOps;
434
435    fn test_sender() -> TestSender {
436        TestSender::new(CongestionControlAlgorithm::CUBIC, false)
437    }
438
439    fn hystart_test_sender() -> TestSender {
440        TestSender::new(CongestionControlAlgorithm::CUBIC, true)
441    }
442
443    #[test]
444    fn cubic_init() {
445        let mut cfg = crate::Config::new(crate::PROTOCOL_VERSION).unwrap();
446        cfg.set_cc_algorithm(CongestionControlAlgorithm::CUBIC);
447
448        let r = LegacyRecovery::new(&cfg);
449
450        assert!(r.cwnd() > 0);
451        assert_eq!(r.bytes_in_flight(), 0);
452    }
453
454    #[test]
455    fn cubic_slow_start() {
456        let mut sender = test_sender();
457        let size = sender.max_datagram_size;
458
459        // Send initcwnd full MSS packets to become no longer app limited
460        for _ in 0..sender.initial_congestion_window_packets {
461            sender.send_packet(size);
462        }
463
464        let cwnd_prev = sender.congestion_window;
465
466        sender.ack_n_packets(1, size);
467
468        // Check if cwnd increased by packet size (slow start)
469        assert_eq!(sender.congestion_window, cwnd_prev + size);
470    }
471
472    #[test]
473    fn cubic_slow_start_multi_acks() {
474        let mut sender = test_sender();
475        let size = sender.max_datagram_size;
476
477        // Send initcwnd full MSS packets to become no longer app limited
478        for _ in 0..sender.initial_congestion_window_packets {
479            sender.send_packet(size);
480        }
481
482        let cwnd_prev = sender.congestion_window;
483
484        sender.ack_n_packets(3, size);
485
486        // Acked 3 packets.
487        assert_eq!(sender.congestion_window, cwnd_prev + size * 3);
488    }
489
490    #[test]
491    fn cubic_congestion_event() {
492        let mut sender = test_sender();
493        let size = sender.max_datagram_size;
494
495        sender.send_packet(size);
496
497        let cwnd_prev = sender.congestion_window;
498
499        sender.lose_n_packets(1, size, None);
500
501        // In CUBIC, after congestion event, cwnd will be reduced by (1 -
502        // CUBIC_BETA)
503        assert_eq!(
504            cwnd_prev as f64 * BETA_CUBIC,
505            sender.congestion_window as f64
506        );
507    }
508
509    #[test]
510    fn cubic_congestion_avoidance() {
511        let mut sender = test_sender();
512        let size = sender.max_datagram_size;
513
514        let prev_cwnd = sender.congestion_window;
515
516        // Send initcwnd full MSS packets to become no longer app limited
517        for _ in 0..sender.initial_congestion_window_packets {
518            sender.send_packet(size);
519        }
520
521        // Trigger congestion event to update ssthresh
522        sender.lose_n_packets(1, size, None);
523
524        // After congestion event, cwnd will be reduced.
525        let cur_cwnd = (prev_cwnd as f64 * BETA_CUBIC) as usize;
526        assert_eq!(sender.congestion_window, cur_cwnd);
527
528        // Shift current time by 1 RTT.
529        let rtt = Duration::from_millis(100);
530        sender.update_rtt(rtt);
531        // Exit from the recovery.
532        sender.advance_time(rtt);
533
534        // During Congestion Avoidance, it will take
535        // 5 ACKs to increase cwnd by 1 MSS.
536        for _ in 0..5 {
537            sender.ack_n_packets(1, size);
538            sender.advance_time(rtt);
539        }
540
541        assert_eq!(sender.congestion_window, cur_cwnd + size);
542    }
543
544    #[test]
545    fn cubic_hystart_css_to_ss() {
546        let mut sender = hystart_test_sender();
547        let size = sender.max_datagram_size;
548
549        // 1st round.
550        let n_rtt_sample = hystart::N_RTT_SAMPLE;
551
552        let rtt_1st = Duration::from_millis(50);
553
554        let next_rnd = sender.next_pkt + n_rtt_sample as u64 - 1;
555        sender.hystart.start_round(next_rnd);
556        // Send 1st round packets.
557        for _ in 0..n_rtt_sample {
558            sender.send_packet(size);
559        }
560        sender.update_app_limited(false);
561
562        // Receiving Acks.
563        sender.advance_time(rtt_1st);
564        sender.update_rtt(rtt_1st);
565        sender.ack_n_packets(n_rtt_sample, size);
566
567        // Not in CSS yet.
568        assert!(sender.hystart.css_start_time().is_none());
569
570        // 2nd round.
571        let mut rtt_2nd = Duration::from_millis(100);
572
573        sender.advance_time(rtt_2nd);
574
575        let next_rnd = sender.next_pkt + n_rtt_sample as u64 - 1;
576        sender.hystart.start_round(next_rnd);
577        // Send 2nd round packets.
578        for _ in 0..n_rtt_sample {
579            sender.send_packet(size);
580        }
581        sender.update_app_limited(false);
582
583        // Receiving Acks.
584        // Last ack will cause to exit to CSS.
585        let mut cwnd_prev = sender.congestion_window();
586
587        for _ in 0..n_rtt_sample {
588            cwnd_prev = sender.congestion_window();
589            sender.update_rtt(rtt_2nd);
590            sender.ack_n_packets(1, size);
591            // Keep increasing RTT so that hystart exits to CSS.
592            rtt_2nd += rtt_2nd.saturating_add(Duration::from_millis(4));
593        }
594
595        // Now we are in CSS.
596        assert!(sender.hystart.css_start_time().is_some());
597        assert_eq!(sender.congestion_window(), cwnd_prev + size);
598
599        // 3rd round, which RTT is less than previous round to
600        // trigger back to Slow Start.
601        let rtt_3rd = Duration::from_millis(80);
602        sender.advance_time(rtt_3rd);
603        cwnd_prev = sender.congestion_window();
604
605        let next_rnd = sender.next_pkt + n_rtt_sample as u64 - 1;
606        sender.hystart.start_round(next_rnd);
607        // Send 3nd round packets.
608        for _ in 0..n_rtt_sample {
609            sender.send_packet(size);
610        }
611        sender.update_app_limited(false);
612
613        // Receiving Acks.
614        // Last ack will cause to exit to SS.
615        sender.update_rtt(rtt_3rd);
616        sender.ack_n_packets(n_rtt_sample, size);
617
618        // Now we are back in Slow Start.
619        assert!(sender.hystart.css_start_time().is_none());
620        assert_eq!(
621            sender.congestion_window(),
622            cwnd_prev +
623                size / hystart::CSS_GROWTH_DIVISOR * hystart::N_RTT_SAMPLE
624        );
625    }
626
627    #[test]
628    fn cubic_hystart_css_to_ca() {
629        let mut sender = hystart_test_sender();
630        let size = sender.max_datagram_size;
631
632        // 1st round.
633        let n_rtt_sample = hystart::N_RTT_SAMPLE;
634
635        let rtt_1st = Duration::from_millis(50);
636
637        let next_rnd = sender.next_pkt + n_rtt_sample as u64 - 1;
638        sender.hystart.start_round(next_rnd);
639        // Send 1st round packets.
640        for _ in 0..n_rtt_sample {
641            sender.send_packet(size);
642        }
643        sender.update_app_limited(false);
644
645        // Receiving Acks.
646        sender.advance_time(rtt_1st);
647        sender.update_rtt(rtt_1st);
648        sender.ack_n_packets(n_rtt_sample, size);
649
650        // Not in CSS yet.
651        assert!(sender.hystart.css_start_time().is_none());
652
653        // 2nd round.
654        let mut rtt_2nd = Duration::from_millis(100);
655        sender.advance_time(rtt_2nd);
656        // Send 2nd round packets.
657        let next_rnd = sender.next_pkt + n_rtt_sample as u64 - 1;
658        sender.hystart.start_round(next_rnd);
659        for _ in 0..n_rtt_sample {
660            sender.send_packet(size);
661        }
662        sender.update_app_limited(false);
663
664        // Receiving Acks.
665        // Last ack will cause to exit to CSS.
666        let mut cwnd_prev = sender.congestion_window();
667
668        for _ in 0..n_rtt_sample {
669            cwnd_prev = sender.congestion_window();
670            sender.update_rtt(rtt_2nd);
671            sender.ack_n_packets(1, size);
672            // Keep increasing RTT so that hystart exits to CSS.
673            rtt_2nd += rtt_2nd.saturating_add(Duration::from_millis(4));
674        }
675
676        // Now we are in CSS.
677        assert!(sender.hystart.css_start_time().is_some());
678        assert_eq!(sender.congestion_window(), cwnd_prev + size);
679
680        // Run 5 (CSS_ROUNDS) in CSS, to exit to congestion avoidance.
681        let rtt_css = Duration::from_millis(100);
682        sender.advance_time(rtt_css);
683
684        for _ in 0..hystart::CSS_ROUNDS {
685            // Send a round of packets.
686            let next_rnd = sender.next_pkt + n_rtt_sample as u64 - 1;
687            sender.hystart.start_round(next_rnd);
688            for _ in 0..n_rtt_sample {
689                sender.send_packet(size);
690            }
691            sender.update_app_limited(false);
692
693            // Receiving Acks.
694            sender.update_rtt(rtt_css);
695            sender.ack_n_packets(n_rtt_sample, size);
696        }
697        // Now we are in congestion avoidance.
698        assert_eq!(sender.congestion_window(), sender.ssthresh.get());
699    }
700
701    #[test]
702    fn cubic_spurious_congestion_event() {
703        let mut sender = test_sender();
704        let size = sender.max_datagram_size;
705
706        let prev_cwnd = sender.congestion_window();
707
708        // Send initcwnd full MSS packets to become no longer app limited
709        for _ in 0..sender.initial_congestion_window_packets {
710            sender.send_packet(size);
711        }
712        sender.lose_n_packets(1, size, None);
713
714        // After congestion event, cwnd will be reduced.
715        let cur_cwnd = (prev_cwnd as f64 * BETA_CUBIC) as usize;
716        assert_eq!(sender.congestion_window(), cur_cwnd);
717
718        // Ack more than cwnd bytes with rtt=100ms
719        let rtt = Duration::from_millis(100);
720        sender.update_rtt(rtt);
721
722        let acked = Acked {
723            pkt_num: 0,
724            // To exit from recovery
725            time_sent: sender.time + rtt,
726            size,
727            delivered: 0,
728            delivered_time: sender.time,
729            first_sent_time: sender.time,
730            is_app_limited: false,
731            rtt: Duration::ZERO,
732        };
733
734        // Trigger detecting spurious congestion event
735        sender.inject_ack(acked, sender.time + rtt + Duration::from_millis(5));
736
737        // This is from slow start, no rollback.
738        assert_eq!(sender.congestion_window(), cur_cwnd);
739
740        sender.advance_time(rtt);
741
742        let prev_cwnd = sender.congestion_window();
743
744        sender.lose_n_packets(1, size, Some(sender.time));
745
746        // After congestion event, cwnd will be reduced.
747        let cur_cwnd = (cur_cwnd as f64 * BETA_CUBIC) as usize;
748        assert_eq!(sender.congestion_window(), cur_cwnd);
749
750        sender.advance_time(rtt + Duration::from_millis(5));
751
752        let acked = Acked {
753            pkt_num: 0,
754            // To exit from recovery
755            time_sent: sender.time + rtt,
756            size,
757            delivered: 0,
758            delivered_time: sender.time,
759            first_sent_time: sender.time,
760            is_app_limited: false,
761            rtt: Duration::ZERO,
762        };
763
764        // Trigger detecting spurious congestion event.
765        sender.inject_ack(acked, sender.time + rtt + Duration::from_millis(5));
766
767        // cwnd is rolled back to the previous one.
768        assert_eq!(sender.congestion_window(), prev_cwnd);
769    }
770
771    #[test]
772    fn cubic_fast_convergence() {
773        let mut sender = test_sender();
774        let size = sender.max_datagram_size;
775
776        let prev_cwnd = sender.congestion_window;
777
778        // Send initcwnd full MSS packets to become no longer app limited
779        for _ in 0..sender.initial_congestion_window_packets {
780            sender.send_packet(size);
781        }
782
783        // Trigger congestion event to update ssthresh
784        sender.lose_n_packets(1, size, None);
785
786        // After 1st congestion event, cwnd will be reduced.
787        let cur_cwnd = (prev_cwnd as f64 * BETA_CUBIC) as usize;
788        assert_eq!(sender.congestion_window, cur_cwnd);
789
790        // Shift current time by 1 RTT.
791        let rtt = Duration::from_millis(100);
792        sender.update_rtt(rtt);
793        // Exit from the recovery.
794        sender.advance_time(rtt);
795
796        // During Congestion Avoidance, it will take
797        // 5 ACKs to increase cwnd by 1 MSS.
798        for _ in 0..5 {
799            sender.ack_n_packets(1, size);
800            sender.advance_time(rtt);
801        }
802
803        assert_eq!(sender.congestion_window, cur_cwnd + size);
804
805        let prev_cwnd = sender.congestion_window;
806
807        // Fast convergence: now there is 2nd congestion event and
808        // cwnd is not fully recovered to w_max, w_max will be
809        // further reduced.
810        sender.lose_n_packets(1, size, None);
811
812        // After 2nd congestion event, cwnd will be reduced.
813        let cur_cwnd = (prev_cwnd as f64 * BETA_CUBIC) as usize;
814        assert_eq!(sender.congestion_window, cur_cwnd);
815
816        // w_max will be further reduced, not prev_cwnd
817        assert_eq!(
818            sender.cubic_state.w_max,
819            prev_cwnd as f64 * (1.0 + BETA_CUBIC) / 2.0
820        );
821    }
822}