quiche/recovery/congestion/
cubic.rs

1// Copyright (C) 2019, Cloudflare, Inc.
2// All rights reserved.
3//
4// Redistribution and use in source and binary forms, with or without
5// modification, are permitted provided that the following conditions are
6// met:
7//
8//     * Redistributions of source code must retain the above copyright notice,
9//       this list of conditions and the following disclaimer.
10//
11//     * Redistributions in binary form must reproduce the above copyright
12//       notice, this list of conditions and the following disclaimer in the
13//       documentation and/or other materials provided with the distribution.
14//
15// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
16// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
17// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
18// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
19// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
20// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
21// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
22// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
23// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
24// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
25// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
27//! CUBIC Congestion Control
28//!
29//! This implementation is based on the following draft:
30//! <https://tools.ietf.org/html/draft-ietf-tcpm-rfc8312bis-02>
31//!
32//! Note that Slow Start can use HyStart++ when enabled.
33
34use std::cmp;
35
36use std::time::Duration;
37use std::time::Instant;
38
39use super::rtt::RttStats;
40use super::Acked;
41use super::Sent;
42
43use super::reno;
44use super::Congestion;
45use super::CongestionControlOps;
46use crate::recovery::MINIMUM_WINDOW_PACKETS;
47
48pub(crate) static CUBIC: CongestionControlOps = CongestionControlOps {
49    on_init,
50    on_packet_sent,
51    on_packets_acked,
52    congestion_event,
53    checkpoint,
54    rollback,
55    has_custom_pacing,
56    debug_fmt,
57};
58
59/// CUBIC Constants.
60///
61/// These are recommended value in RFC8312.
62const BETA_CUBIC: f64 = 0.7;
63
64const C: f64 = 0.4;
65
66/// Threshold for rolling back state, as percentage of lost packets relative to
67/// cwnd.
68const ROLLBACK_THRESHOLD_PERCENT: usize = 20;
69
70/// Minimum threshold for rolling back state, as number of packets.
71const MIN_ROLLBACK_THRESHOLD: usize = 2;
72
73/// Default value of alpha_aimd in the beginning of congestion avoidance.
74const ALPHA_AIMD: f64 = 3.0 * (1.0 - BETA_CUBIC) / (1.0 + BETA_CUBIC);
75
76/// CUBIC State Variables.
77///
78/// We need to keep those variables across the connection.
79/// k, w_max, w_est are described in the RFC.
80#[derive(Debug, Default)]
81pub struct State {
82    k: f64,
83
84    w_max: f64,
85
86    w_est: f64,
87
88    alpha_aimd: f64,
89
90    // Used in CUBIC fix (see on_packet_sent())
91    last_sent_time: Option<Instant>,
92
93    // Store cwnd increment during congestion avoidance.
94    cwnd_inc: usize,
95
96    // CUBIC state checkpoint preceding the last congestion event.
97    prior: PriorState,
98}
99
100/// Stores the CUBIC state from before the last congestion event.
101///
102/// <https://tools.ietf.org/id/draft-ietf-tcpm-rfc8312bis-00.html#section-4.9>
103#[derive(Debug, Default)]
104struct PriorState {
105    congestion_window: usize,
106
107    ssthresh: usize,
108
109    w_max: f64,
110
111    k: f64,
112
113    epoch_start: Option<Instant>,
114
115    lost_count: usize,
116}
117
118/// CUBIC Functions.
119///
120/// Note that these calculations are based on a count of cwnd as bytes,
121/// not packets.
122/// Unit of t (duration) and RTT are based on seconds (f64).
123impl State {
124    // K = cubic_root ((w_max - cwnd) / C) (Eq. 2)
125    fn cubic_k(&self, cwnd: usize, max_datagram_size: usize) -> f64 {
126        let w_max = self.w_max / max_datagram_size as f64;
127        let cwnd = cwnd as f64 / max_datagram_size as f64;
128
129        libm::cbrt((w_max - cwnd) / C)
130    }
131
132    // W_cubic(t) = C * (t - K)^3 + w_max (Eq. 1)
133    fn w_cubic(&self, t: Duration, max_datagram_size: usize) -> f64 {
134        let w_max = self.w_max / max_datagram_size as f64;
135
136        (C * (t.as_secs_f64() - self.k).powi(3) + w_max) *
137            max_datagram_size as f64
138    }
139
140    // W_est = W_est + alpha_aimd * (segments_acked / cwnd)  (Eq. 4)
141    fn w_est_inc(
142        &self, acked: usize, cwnd: usize, max_datagram_size: usize,
143    ) -> f64 {
144        self.alpha_aimd * (acked as f64 / cwnd as f64) * max_datagram_size as f64
145    }
146}
147
148fn on_init(_r: &mut Congestion) {}
149
150fn on_packet_sent(
151    r: &mut Congestion, sent_bytes: usize, bytes_in_flight: usize, now: Instant,
152) {
153    // See https://github.com/torvalds/linux/commit/30927520dbae297182990bb21d08762bcc35ce1d
154    // First transmit when no packets in flight
155    let cubic = &mut r.cubic_state;
156
157    if let Some(last_sent_time) = cubic.last_sent_time {
158        if bytes_in_flight == 0 {
159            let delta = now - last_sent_time;
160
161            // We were application limited (idle) for a while.
162            // Shift epoch start to keep cwnd growth to cubic curve.
163            if let Some(recovery_start_time) = r.congestion_recovery_start_time {
164                if delta.as_nanos() > 0 {
165                    r.congestion_recovery_start_time =
166                        Some(recovery_start_time + delta);
167                }
168            }
169        }
170    }
171
172    cubic.last_sent_time = Some(now);
173
174    reno::on_packet_sent(r, sent_bytes, bytes_in_flight, now);
175}
176
177fn on_packets_acked(
178    r: &mut Congestion, bytes_in_flight: usize, packets: &mut Vec<Acked>,
179    now: Instant, rtt_stats: &RttStats,
180) {
181    for pkt in packets.drain(..) {
182        on_packet_acked(r, bytes_in_flight, &pkt, now, rtt_stats);
183    }
184}
185
186fn on_packet_acked(
187    r: &mut Congestion, bytes_in_flight: usize, packet: &Acked, now: Instant,
188    rtt_stats: &RttStats,
189) {
190    let in_congestion_recovery = r.in_congestion_recovery(packet.time_sent);
191
192    if in_congestion_recovery {
193        r.prr.on_packet_acked(
194            packet.size,
195            bytes_in_flight,
196            r.ssthresh,
197            r.max_datagram_size,
198        );
199
200        return;
201    }
202
203    if r.app_limited {
204        return;
205    }
206
207    // Detecting spurious congestion events.
208    // <https://tools.ietf.org/id/draft-ietf-tcpm-rfc8312bis-00.html#section-4.9>
209    //
210    // When the recovery episode ends with recovering
211    // a few packets (less than cwnd / mss * ROLLBACK_THRESHOLD_PERCENT(%)), it's
212    // considered as spurious and restore to the previous state.
213    if r.congestion_recovery_start_time.is_some() {
214        let new_lost = r.lost_count - r.cubic_state.prior.lost_count;
215
216        let rollback_threshold = (r.congestion_window / r.max_datagram_size) *
217            ROLLBACK_THRESHOLD_PERCENT /
218            100;
219
220        let rollback_threshold = rollback_threshold.max(MIN_ROLLBACK_THRESHOLD);
221
222        if new_lost < rollback_threshold {
223            let did_rollback = rollback(r);
224            if did_rollback {
225                return;
226            }
227        }
228    }
229
230    if r.congestion_window < r.ssthresh {
231        // In Slow start, bytes_acked_sl is used for counting
232        // acknowledged bytes.
233        r.bytes_acked_sl += packet.size;
234
235        if r.bytes_acked_sl >= r.max_datagram_size {
236            if r.hystart.in_css() {
237                r.congestion_window +=
238                    r.hystart.css_cwnd_inc(r.max_datagram_size);
239            } else {
240                r.congestion_window += r.max_datagram_size;
241            }
242
243            r.bytes_acked_sl -= r.max_datagram_size;
244        }
245
246        if r.hystart.on_packet_acked(packet, rtt_stats.latest_rtt, now) {
247            // Exit to congestion avoidance if CSS ends.
248            r.ssthresh = r.congestion_window;
249        }
250    } else {
251        // Congestion avoidance.
252        let ca_start_time;
253
254        // In CSS, use css_start_time instead of congestion_recovery_start_time.
255        if r.hystart.in_css() {
256            ca_start_time = r.hystart.css_start_time().unwrap();
257
258            // Reset w_max and k when CSS started.
259            if r.cubic_state.w_max == 0.0 {
260                r.cubic_state.w_max = r.congestion_window as f64;
261                r.cubic_state.k = 0.0;
262
263                r.cubic_state.w_est = r.congestion_window as f64;
264                r.cubic_state.alpha_aimd = ALPHA_AIMD;
265            }
266        } else {
267            match r.congestion_recovery_start_time {
268                Some(t) => ca_start_time = t,
269                None => {
270                    // When we come here without congestion_event() triggered,
271                    // initialize congestion_recovery_start_time, w_max and k.
272                    ca_start_time = now;
273                    r.congestion_recovery_start_time = Some(now);
274
275                    r.cubic_state.w_max = r.congestion_window as f64;
276                    r.cubic_state.k = 0.0;
277
278                    r.cubic_state.w_est = r.congestion_window as f64;
279                    r.cubic_state.alpha_aimd = ALPHA_AIMD;
280                },
281            }
282        }
283
284        let t = now.saturating_duration_since(ca_start_time);
285
286        // target = w_cubic(t + rtt)
287        let target = r
288            .cubic_state
289            .w_cubic(t + *rtt_stats.min_rtt, r.max_datagram_size);
290
291        // Clipping target to [cwnd, 1.5 x cwnd]
292        let target = f64::max(target, r.congestion_window as f64);
293        let target = f64::min(target, r.congestion_window as f64 * 1.5);
294
295        // Update w_est.
296        let w_est_inc = r.cubic_state.w_est_inc(
297            packet.size,
298            r.congestion_window,
299            r.max_datagram_size,
300        );
301        r.cubic_state.w_est += w_est_inc;
302
303        if r.cubic_state.w_est >= r.cubic_state.w_max {
304            r.cubic_state.alpha_aimd = 1.0;
305        }
306
307        let mut cubic_cwnd = r.congestion_window;
308
309        if r.cubic_state.w_cubic(t, r.max_datagram_size) < r.cubic_state.w_est {
310            // AIMD friendly region (W_cubic(t) < W_est)
311            cubic_cwnd = cmp::max(cubic_cwnd, r.cubic_state.w_est as usize);
312        } else {
313            // Concave region or convex region use same increment.
314            let cubic_inc =
315                r.max_datagram_size * (target as usize - cubic_cwnd) / cubic_cwnd;
316
317            cubic_cwnd += cubic_inc;
318        }
319
320        // Update the increment and increase cwnd by MSS.
321        r.cubic_state.cwnd_inc += cubic_cwnd - r.congestion_window;
322
323        if r.cubic_state.cwnd_inc >= r.max_datagram_size {
324            r.congestion_window += r.max_datagram_size;
325            r.cubic_state.cwnd_inc -= r.max_datagram_size;
326        }
327    }
328}
329
330fn congestion_event(
331    r: &mut Congestion, bytes_in_flight: usize, _lost_bytes: usize,
332    largest_lost_pkt: &Sent, now: Instant,
333) {
334    let time_sent = largest_lost_pkt.time_sent;
335    let in_congestion_recovery = r.in_congestion_recovery(time_sent);
336
337    // Start a new congestion event if packet was sent after the
338    // start of the previous congestion recovery period.
339    if !in_congestion_recovery {
340        r.congestion_recovery_start_time = Some(now);
341
342        // Fast convergence
343        if (r.congestion_window as f64) < r.cubic_state.w_max {
344            r.cubic_state.w_max =
345                r.congestion_window as f64 * (1.0 + BETA_CUBIC) / 2.0;
346        } else {
347            r.cubic_state.w_max = r.congestion_window as f64;
348        }
349
350        r.ssthresh = (r.congestion_window as f64 * BETA_CUBIC) as usize;
351        r.ssthresh =
352            cmp::max(r.ssthresh, r.max_datagram_size * MINIMUM_WINDOW_PACKETS);
353        r.congestion_window = r.ssthresh;
354
355        r.cubic_state.k = if r.cubic_state.w_max < r.congestion_window as f64 {
356            0.0
357        } else {
358            r.cubic_state
359                .cubic_k(r.congestion_window, r.max_datagram_size)
360        };
361
362        r.cubic_state.cwnd_inc =
363            (r.cubic_state.cwnd_inc as f64 * BETA_CUBIC) as usize;
364
365        r.cubic_state.w_est = r.congestion_window as f64;
366        r.cubic_state.alpha_aimd = ALPHA_AIMD;
367
368        if r.hystart.in_css() {
369            r.hystart.congestion_event();
370        }
371
372        r.prr.congestion_event(bytes_in_flight);
373    }
374}
375
376fn checkpoint(r: &mut Congestion) {
377    r.cubic_state.prior.congestion_window = r.congestion_window;
378    r.cubic_state.prior.ssthresh = r.ssthresh;
379    r.cubic_state.prior.w_max = r.cubic_state.w_max;
380    r.cubic_state.prior.k = r.cubic_state.k;
381    r.cubic_state.prior.epoch_start = r.congestion_recovery_start_time;
382    r.cubic_state.prior.lost_count = r.lost_count;
383}
384
385fn rollback(r: &mut Congestion) -> bool {
386    // Don't go back to slow start.
387    if r.cubic_state.prior.congestion_window < r.cubic_state.prior.ssthresh {
388        return false;
389    }
390
391    if r.congestion_window >= r.cubic_state.prior.congestion_window {
392        return false;
393    }
394
395    r.congestion_window = r.cubic_state.prior.congestion_window;
396    r.ssthresh = r.cubic_state.prior.ssthresh;
397    r.cubic_state.w_max = r.cubic_state.prior.w_max;
398    r.cubic_state.k = r.cubic_state.prior.k;
399    r.congestion_recovery_start_time = r.cubic_state.prior.epoch_start;
400
401    true
402}
403
404fn has_custom_pacing() -> bool {
405    false
406}
407
408fn debug_fmt(r: &Congestion, f: &mut std::fmt::Formatter) -> std::fmt::Result {
409    write!(
410        f,
411        "cubic={{ k={} w_max={} }} ",
412        r.cubic_state.k, r.cubic_state.w_max
413    )
414}
415
416#[cfg(test)]
417mod tests {
418    use super::*;
419
420    use crate::CongestionControlAlgorithm;
421
422    use crate::recovery::congestion::hystart;
423    use crate::recovery::congestion::recovery::LegacyRecovery;
424    use crate::recovery::congestion::test_sender::TestSender;
425    use crate::recovery::RecoveryOps;
426
427    fn test_sender() -> TestSender {
428        TestSender::new(CongestionControlAlgorithm::CUBIC, false)
429    }
430
431    fn hystart_test_sender() -> TestSender {
432        TestSender::new(CongestionControlAlgorithm::CUBIC, true)
433    }
434
435    #[test]
436    fn cubic_init() {
437        let mut cfg = crate::Config::new(crate::PROTOCOL_VERSION).unwrap();
438        cfg.set_cc_algorithm(CongestionControlAlgorithm::CUBIC);
439
440        let r = LegacyRecovery::new(&cfg);
441
442        assert!(r.cwnd() > 0);
443        assert_eq!(r.bytes_in_flight, 0);
444    }
445
446    #[test]
447    fn cubic_slow_start() {
448        let mut sender = test_sender();
449        let size = sender.max_datagram_size;
450
451        // Send initcwnd full MSS packets to become no longer app limited
452        for _ in 0..sender.initial_congestion_window_packets {
453            sender.send_packet(size);
454        }
455
456        let cwnd_prev = sender.congestion_window;
457
458        sender.ack_n_packets(1, size);
459
460        // Check if cwnd increased by packet size (slow start)
461        assert_eq!(sender.congestion_window, cwnd_prev + size);
462    }
463
464    #[test]
465    fn cubic_slow_start_multi_acks() {
466        let mut sender = test_sender();
467        let size = sender.max_datagram_size;
468
469        // Send initcwnd full MSS packets to become no longer app limited
470        for _ in 0..sender.initial_congestion_window_packets {
471            sender.send_packet(size);
472        }
473
474        let cwnd_prev = sender.congestion_window;
475
476        sender.ack_n_packets(3, size);
477
478        // Acked 3 packets.
479        assert_eq!(sender.congestion_window, cwnd_prev + size * 3);
480    }
481
482    #[test]
483    fn cubic_congestion_event() {
484        let mut sender = test_sender();
485        let size = sender.max_datagram_size;
486
487        sender.send_packet(size);
488
489        let cwnd_prev = sender.congestion_window;
490
491        sender.lose_n_packets(1, size, None);
492
493        // In CUBIC, after congestion event, cwnd will be reduced by (1 -
494        // CUBIC_BETA)
495        assert_eq!(
496            cwnd_prev as f64 * BETA_CUBIC,
497            sender.congestion_window as f64
498        );
499    }
500
501    #[test]
502    fn cubic_congestion_avoidance() {
503        let mut sender = test_sender();
504        let size = sender.max_datagram_size;
505
506        let prev_cwnd = sender.congestion_window;
507
508        // Send initcwnd full MSS packets to become no longer app limited
509        for _ in 0..sender.initial_congestion_window_packets {
510            sender.send_packet(size);
511        }
512
513        // Trigger congestion event to update ssthresh
514        sender.lose_n_packets(1, size, None);
515
516        // After congestion event, cwnd will be reduced.
517        let cur_cwnd = (prev_cwnd as f64 * BETA_CUBIC) as usize;
518        assert_eq!(sender.congestion_window, cur_cwnd);
519
520        // Shift current time by 1 RTT.
521        let rtt = Duration::from_millis(100);
522        sender.update_rtt(rtt);
523        // Exit from the recovery.
524        sender.advance_time(rtt);
525
526        // During Congestion Avoidance, it will take
527        // 5 ACKs to increase cwnd by 1 MSS.
528        for _ in 0..5 {
529            sender.ack_n_packets(1, size);
530            sender.advance_time(rtt);
531        }
532
533        assert_eq!(sender.congestion_window, cur_cwnd + size);
534    }
535
536    #[test]
537    fn cubic_hystart_css_to_ss() {
538        let mut sender = hystart_test_sender();
539        let size = sender.max_datagram_size;
540
541        // 1st round.
542        let n_rtt_sample = hystart::N_RTT_SAMPLE;
543
544        let rtt_1st = Duration::from_millis(50);
545
546        let next_rnd = sender.next_pkt + n_rtt_sample as u64 - 1;
547        sender.hystart.start_round(next_rnd);
548        // Send 1st round packets.
549        for _ in 0..n_rtt_sample {
550            sender.send_packet(size);
551        }
552        sender.update_app_limited(false);
553
554        // Receiving Acks.
555        sender.advance_time(rtt_1st);
556        sender.update_rtt(rtt_1st);
557        sender.ack_n_packets(n_rtt_sample, size);
558
559        // Not in CSS yet.
560        assert!(sender.hystart.css_start_time().is_none());
561
562        // 2nd round.
563        let mut rtt_2nd = Duration::from_millis(100);
564
565        sender.advance_time(rtt_2nd);
566
567        let next_rnd = sender.next_pkt + n_rtt_sample as u64 - 1;
568        sender.hystart.start_round(next_rnd);
569        // Send 2nd round packets.
570        for _ in 0..n_rtt_sample {
571            sender.send_packet(size);
572        }
573        sender.update_app_limited(false);
574
575        // Receiving Acks.
576        // Last ack will cause to exit to CSS.
577        let mut cwnd_prev = sender.congestion_window();
578
579        for _ in 0..n_rtt_sample {
580            cwnd_prev = sender.congestion_window();
581            sender.update_rtt(rtt_2nd);
582            sender.ack_n_packets(1, size);
583            // Keep increasing RTT so that hystart exits to CSS.
584            rtt_2nd += rtt_2nd.saturating_add(Duration::from_millis(4));
585        }
586
587        // Now we are in CSS.
588        assert!(sender.hystart.css_start_time().is_some());
589        assert_eq!(sender.congestion_window(), cwnd_prev + size);
590
591        // 3rd round, which RTT is less than previous round to
592        // trigger back to Slow Start.
593        let rtt_3rd = Duration::from_millis(80);
594        sender.advance_time(rtt_3rd);
595        cwnd_prev = sender.congestion_window();
596
597        let next_rnd = sender.next_pkt + n_rtt_sample as u64 - 1;
598        sender.hystart.start_round(next_rnd);
599        // Send 3nd round packets.
600        for _ in 0..n_rtt_sample {
601            sender.send_packet(size);
602        }
603        sender.update_app_limited(false);
604
605        // Receiving Acks.
606        // Last ack will cause to exit to SS.
607        sender.update_rtt(rtt_3rd);
608        sender.ack_n_packets(n_rtt_sample, size);
609
610        // Now we are back in Slow Start.
611        assert!(sender.hystart.css_start_time().is_none());
612        assert_eq!(
613            sender.congestion_window(),
614            cwnd_prev +
615                size / hystart::CSS_GROWTH_DIVISOR * hystart::N_RTT_SAMPLE
616        );
617    }
618
619    #[test]
620    fn cubic_hystart_css_to_ca() {
621        let mut sender = hystart_test_sender();
622        let size = sender.max_datagram_size;
623
624        // 1st round.
625        let n_rtt_sample = hystart::N_RTT_SAMPLE;
626
627        let rtt_1st = Duration::from_millis(50);
628
629        let next_rnd = sender.next_pkt + n_rtt_sample as u64 - 1;
630        sender.hystart.start_round(next_rnd);
631        // Send 1st round packets.
632        for _ in 0..n_rtt_sample {
633            sender.send_packet(size);
634        }
635        sender.update_app_limited(false);
636
637        // Receiving Acks.
638        sender.advance_time(rtt_1st);
639        sender.update_rtt(rtt_1st);
640        sender.ack_n_packets(n_rtt_sample, size);
641
642        // Not in CSS yet.
643        assert!(sender.hystart.css_start_time().is_none());
644
645        // 2nd round.
646        let mut rtt_2nd = Duration::from_millis(100);
647        sender.advance_time(rtt_2nd);
648        // Send 2nd round packets.
649        let next_rnd = sender.next_pkt + n_rtt_sample as u64 - 1;
650        sender.hystart.start_round(next_rnd);
651        for _ in 0..n_rtt_sample {
652            sender.send_packet(size);
653        }
654        sender.update_app_limited(false);
655
656        // Receiving Acks.
657        // Last ack will cause to exit to CSS.
658        let mut cwnd_prev = sender.congestion_window();
659
660        for _ in 0..n_rtt_sample {
661            cwnd_prev = sender.congestion_window();
662            sender.update_rtt(rtt_2nd);
663            sender.ack_n_packets(1, size);
664            // Keep increasing RTT so that hystart exits to CSS.
665            rtt_2nd += rtt_2nd.saturating_add(Duration::from_millis(4));
666        }
667
668        // Now we are in CSS.
669        assert!(sender.hystart.css_start_time().is_some());
670        assert_eq!(sender.congestion_window(), cwnd_prev + size);
671
672        // Run 5 (CSS_ROUNDS) in CSS, to exit to congestion avoidance.
673        let rtt_css = Duration::from_millis(100);
674        sender.advance_time(rtt_css);
675
676        for _ in 0..hystart::CSS_ROUNDS {
677            // Send a round of packets.
678            let next_rnd = sender.next_pkt + n_rtt_sample as u64 - 1;
679            sender.hystart.start_round(next_rnd);
680            for _ in 0..n_rtt_sample {
681                sender.send_packet(size);
682            }
683            sender.update_app_limited(false);
684
685            // Receiving Acks.
686            sender.update_rtt(rtt_css);
687            sender.ack_n_packets(n_rtt_sample, size);
688        }
689        // Now we are in congestion avoidance.
690        assert_eq!(sender.congestion_window(), sender.ssthresh);
691    }
692
693    #[test]
694    fn cubic_spurious_congestion_event() {
695        let mut sender = test_sender();
696        let size = sender.max_datagram_size;
697
698        let prev_cwnd = sender.congestion_window();
699
700        // Send initcwnd full MSS packets to become no longer app limited
701        for _ in 0..sender.initial_congestion_window_packets {
702            sender.send_packet(size);
703        }
704        sender.lose_n_packets(1, size, None);
705
706        // After congestion event, cwnd will be reduced.
707        let cur_cwnd = (prev_cwnd as f64 * BETA_CUBIC) as usize;
708        assert_eq!(sender.congestion_window(), cur_cwnd);
709
710        // Ack more than cwnd bytes with rtt=100ms
711        let rtt = Duration::from_millis(100);
712        sender.update_rtt(rtt);
713
714        let acked = Acked {
715            pkt_num: 0,
716            // To exit from recovery
717            time_sent: sender.time + rtt,
718            size,
719            delivered: 0,
720            delivered_time: sender.time,
721            first_sent_time: sender.time,
722            is_app_limited: false,
723            rtt: Duration::ZERO,
724        };
725
726        // Trigger detecting spurious congestion event
727        sender.inject_ack(acked, sender.time + rtt + Duration::from_millis(5));
728
729        // This is from slow start, no rollback.
730        assert_eq!(sender.congestion_window(), cur_cwnd);
731
732        sender.advance_time(rtt);
733
734        let prev_cwnd = sender.congestion_window();
735
736        sender.lose_n_packets(1, size, Some(sender.time));
737
738        // After congestion event, cwnd will be reduced.
739        let cur_cwnd = (cur_cwnd as f64 * BETA_CUBIC) as usize;
740        assert_eq!(sender.congestion_window(), cur_cwnd);
741
742        sender.advance_time(rtt + Duration::from_millis(5));
743
744        let acked = Acked {
745            pkt_num: 0,
746            // To exit from recovery
747            time_sent: sender.time + rtt,
748            size,
749            delivered: 0,
750            delivered_time: sender.time,
751            first_sent_time: sender.time,
752            is_app_limited: false,
753            rtt: Duration::ZERO,
754        };
755
756        // Trigger detecting spurious congestion event.
757        sender.inject_ack(acked, sender.time + rtt + Duration::from_millis(5));
758
759        // cwnd is rolled back to the previous one.
760        assert_eq!(sender.congestion_window(), prev_cwnd);
761    }
762
763    #[test]
764    fn cubic_fast_convergence() {
765        let mut sender = test_sender();
766        let size = sender.max_datagram_size;
767
768        let prev_cwnd = sender.congestion_window;
769
770        // Send initcwnd full MSS packets to become no longer app limited
771        for _ in 0..sender.initial_congestion_window_packets {
772            sender.send_packet(size);
773        }
774
775        // Trigger congestion event to update ssthresh
776        sender.lose_n_packets(1, size, None);
777
778        // After 1st congestion event, cwnd will be reduced.
779        let cur_cwnd = (prev_cwnd as f64 * BETA_CUBIC) as usize;
780        assert_eq!(sender.congestion_window, cur_cwnd);
781
782        // Shift current time by 1 RTT.
783        let rtt = Duration::from_millis(100);
784        sender.update_rtt(rtt);
785        // Exit from the recovery.
786        sender.advance_time(rtt);
787
788        // During Congestion Avoidance, it will take
789        // 5 ACKs to increase cwnd by 1 MSS.
790        for _ in 0..5 {
791            sender.ack_n_packets(1, size);
792            sender.advance_time(rtt);
793        }
794
795        assert_eq!(sender.congestion_window, cur_cwnd + size);
796
797        let prev_cwnd = sender.congestion_window;
798
799        // Fast convergence: now there is 2nd congestion event and
800        // cwnd is not fully recovered to w_max, w_max will be
801        // further reduced.
802        sender.lose_n_packets(1, size, None);
803
804        // After 2nd congestion event, cwnd will be reduced.
805        let cur_cwnd = (prev_cwnd as f64 * BETA_CUBIC) as usize;
806        assert_eq!(sender.congestion_window, cur_cwnd);
807
808        // w_max will be further reduced, not prev_cwnd
809        assert_eq!(
810            sender.cubic_state.w_max,
811            prev_cwnd as f64 * (1.0 + BETA_CUBIC) / 2.0
812        );
813    }
814}