Skip to main content

quiche/recovery/congestion/
cubic.rs

1// Copyright (C) 2019, Cloudflare, Inc.
2// All rights reserved.
3//
4// Redistribution and use in source and binary forms, with or without
5// modification, are permitted provided that the following conditions are
6// met:
7//
8//     * Redistributions of source code must retain the above copyright notice,
9//       this list of conditions and the following disclaimer.
10//
11//     * Redistributions in binary form must reproduce the above copyright
12//       notice, this list of conditions and the following disclaimer in the
13//       documentation and/or other materials provided with the distribution.
14//
15// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
16// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
17// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
18// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
19// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
20// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
21// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
22// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
23// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
24// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
25// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
27//! CUBIC Congestion Control
28//!
29//! This implementation is based on the following draft:
30//! <https://tools.ietf.org/html/draft-ietf-tcpm-rfc8312bis-02>
31//!
32//! Note that Slow Start can use HyStart++ when enabled.
33
34use std::cmp;
35
36use std::time::Duration;
37use std::time::Instant;
38
39use super::rtt::RttStats;
40use super::Acked;
41use super::Sent;
42
43use super::reno;
44use super::Congestion;
45use super::CongestionControlOps;
46use crate::recovery::MINIMUM_WINDOW_PACKETS;
47
48pub(crate) static CUBIC: CongestionControlOps = CongestionControlOps {
49    on_init,
50    on_packet_sent,
51    on_packets_acked,
52    congestion_event,
53    checkpoint,
54    rollback,
55    #[cfg(feature = "qlog")]
56    state_str,
57    debug_fmt,
58};
59
60/// CUBIC Constants.
61///
62/// These are recommended value in RFC8312.
63const BETA_CUBIC: f64 = 0.7;
64
65const C: f64 = 0.4;
66
67/// Threshold for rolling back state, as percentage of lost packets relative to
68/// cwnd.
69const ROLLBACK_THRESHOLD_PERCENT: usize = 20;
70
71/// Minimum threshold for rolling back state, as number of packets.
72const MIN_ROLLBACK_THRESHOLD: usize = 2;
73
74/// Default value of alpha_aimd in the beginning of congestion avoidance.
75const ALPHA_AIMD: f64 = 3.0 * (1.0 - BETA_CUBIC) / (1.0 + BETA_CUBIC);
76
77/// CUBIC State Variables.
78///
79/// We need to keep those variables across the connection.
80/// k, w_max, w_est are described in the RFC.
81#[derive(Debug, Default)]
82pub struct State {
83    k: f64,
84
85    w_max: f64,
86
87    w_est: f64,
88
89    alpha_aimd: f64,
90
91    // Used in CUBIC fix (see on_packet_sent())
92    last_sent_time: Option<Instant>,
93
94    // Tracks when the last ACK was processed, approximating when
95    // bytes_in_flight transitioned toward zero. Used to measure the
96    // actual idle duration in on_packet_sent() instead of the time
97    // since the last send (which inflates the delta by a full RTT).
98    last_ack_time: Option<Instant>,
99
100    // Store cwnd increment during congestion avoidance.
101    cwnd_inc: usize,
102
103    // CUBIC state checkpoint preceding the last congestion event.
104    prior: PriorState,
105}
106
107/// Stores the CUBIC state from before the last congestion event.
108///
109/// <https://tools.ietf.org/id/draft-ietf-tcpm-rfc8312bis-00.html#section-4.9>
110#[derive(Debug, Default)]
111struct PriorState {
112    congestion_window: usize,
113
114    ssthresh: usize,
115
116    w_max: f64,
117
118    k: f64,
119
120    epoch_start: Option<Instant>,
121
122    lost_count: usize,
123}
124
125/// CUBIC Functions.
126///
127/// Note that these calculations are based on a count of cwnd as bytes,
128/// not packets.
129/// Unit of t (duration) and RTT are based on seconds (f64).
130impl State {
131    // K = cubic_root ((w_max - cwnd) / C) (Eq. 2)
132    fn cubic_k(&self, cwnd: usize, max_datagram_size: usize) -> f64 {
133        let w_max = self.w_max / max_datagram_size as f64;
134        let cwnd = cwnd as f64 / max_datagram_size as f64;
135
136        libm::cbrt((w_max - cwnd) / C)
137    }
138
139    // W_cubic(t) = C * (t - K)^3 + w_max (Eq. 1)
140    fn w_cubic(&self, t: Duration, max_datagram_size: usize) -> f64 {
141        let w_max = self.w_max / max_datagram_size as f64;
142
143        (C * (t.as_secs_f64() - self.k).powi(3) + w_max) *
144            max_datagram_size as f64
145    }
146
147    // W_est = W_est + alpha_aimd * (segments_acked / cwnd)  (Eq. 4)
148    fn w_est_inc(
149        &self, acked: usize, cwnd: usize, max_datagram_size: usize,
150    ) -> f64 {
151        self.alpha_aimd * (acked as f64 / cwnd as f64) * max_datagram_size as f64
152    }
153}
154
155fn on_init(_r: &mut Congestion) {}
156
157fn on_packet_sent(
158    r: &mut Congestion, sent_bytes: usize, bytes_in_flight: usize, now: Instant,
159) {
160    reno::on_packet_sent(r, sent_bytes, bytes_in_flight, now);
161
162    // Don't adjust epoch or track send time for non-data packets
163    // (e.g. ACKs). These have in_flight=true but size=0.
164    // Skip all the following logic for these packets.
165    if sent_bytes == 0 && r.enable_cubic_idle_restart_fix {
166        return;
167    }
168
169    // See https://github.com/torvalds/linux/commit/30927520dbae297182990bb21d08762bcc35ce1d
170    // First transmit when no packets in flight.
171    // Shift epoch start to keep cwnd growth on the cubic curve.
172    let cubic = &mut r.cubic_state;
173
174    if bytes_in_flight == 0 {
175        if let Some(recovery_start_time) = r.congestion_recovery_start_time {
176            // Measure idle from the most recent activity: either the
177            // last ACK (approximating when bif hit 0) or the last data
178            // send, whichever is later. Using last_sent_time alone
179            // would inflate the delta by a full RTT when cwnd is small
180            // and bif transiently hits 0 between ACK and send.
181            let idle_start = if r.enable_cubic_idle_restart_fix {
182                cmp::max(cubic.last_ack_time, cubic.last_sent_time)
183            } else {
184                cubic.last_sent_time
185            };
186
187            if let Some(idle_start) = idle_start {
188                if idle_start < now {
189                    let delta = now - idle_start;
190                    r.congestion_recovery_start_time =
191                        Some(recovery_start_time + delta);
192                }
193            }
194        }
195    }
196
197    cubic.last_sent_time = Some(now);
198}
199
200fn on_packets_acked(
201    r: &mut Congestion, bytes_in_flight: usize, packets: &mut Vec<Acked>,
202    now: Instant, rtt_stats: &RttStats,
203) {
204    r.cubic_state.last_ack_time = Some(now);
205
206    for pkt in packets.drain(..) {
207        on_packet_acked(r, bytes_in_flight, &pkt, now, rtt_stats);
208    }
209}
210
211fn on_packet_acked(
212    r: &mut Congestion, bytes_in_flight: usize, packet: &Acked, now: Instant,
213    rtt_stats: &RttStats,
214) {
215    let in_congestion_recovery = r.in_congestion_recovery(packet.time_sent);
216
217    if in_congestion_recovery {
218        r.prr.on_packet_acked(
219            packet.size,
220            bytes_in_flight,
221            r.ssthresh.get(),
222            r.max_datagram_size,
223        );
224
225        return;
226    }
227
228    if r.app_limited {
229        return;
230    }
231
232    // Detecting spurious congestion events.
233    // <https://tools.ietf.org/id/draft-ietf-tcpm-rfc8312bis-00.html#section-4.9>
234    //
235    // When the recovery episode ends with recovering
236    // a few packets (less than cwnd / mss * ROLLBACK_THRESHOLD_PERCENT(%)), it's
237    // considered as spurious and restore to the previous state.
238    if r.congestion_recovery_start_time.is_some() {
239        let new_lost = r.lost_count - r.cubic_state.prior.lost_count;
240
241        let rollback_threshold = (r.congestion_window / r.max_datagram_size) *
242            ROLLBACK_THRESHOLD_PERCENT /
243            100;
244
245        let rollback_threshold = rollback_threshold.max(MIN_ROLLBACK_THRESHOLD);
246
247        if new_lost < rollback_threshold {
248            let did_rollback = rollback(r);
249            if did_rollback {
250                return;
251            }
252        }
253    }
254
255    if r.congestion_window < r.ssthresh.get() {
256        // In Slow start, bytes_acked_sl is used for counting
257        // acknowledged bytes.
258        r.bytes_acked_sl += packet.size;
259
260        if r.bytes_acked_sl >= r.max_datagram_size {
261            if r.hystart.in_css() {
262                r.congestion_window +=
263                    r.hystart.css_cwnd_inc(r.max_datagram_size);
264            } else {
265                r.congestion_window += r.max_datagram_size;
266            }
267
268            r.bytes_acked_sl -= r.max_datagram_size;
269        }
270
271        if r.hystart.on_packet_acked(packet, rtt_stats.latest_rtt, now) {
272            // Exit to congestion avoidance if CSS ends.
273            r.ssthresh.update(r.congestion_window, true);
274        }
275    } else {
276        // Congestion avoidance.
277        let ca_start_time;
278
279        // In CSS, use css_start_time instead of congestion_recovery_start_time.
280        if r.hystart.in_css() {
281            ca_start_time = r.hystart.css_start_time().unwrap();
282
283            // Reset w_max and k when CSS started.
284            if r.cubic_state.w_max == 0.0 {
285                r.cubic_state.w_max = r.congestion_window as f64;
286                r.cubic_state.k = 0.0;
287
288                r.cubic_state.w_est = r.congestion_window as f64;
289                r.cubic_state.alpha_aimd = ALPHA_AIMD;
290            }
291        } else {
292            match r.congestion_recovery_start_time {
293                Some(t) => ca_start_time = t,
294                None => {
295                    // When we come here without congestion_event() triggered,
296                    // initialize congestion_recovery_start_time, w_max and k.
297                    ca_start_time = now;
298                    r.congestion_recovery_start_time = Some(now);
299
300                    r.cubic_state.w_max = r.congestion_window as f64;
301                    r.cubic_state.k = 0.0;
302
303                    r.cubic_state.w_est = r.congestion_window as f64;
304                    r.cubic_state.alpha_aimd = ALPHA_AIMD;
305                },
306            }
307        }
308
309        let t = now.saturating_duration_since(ca_start_time);
310
311        // target = w_cubic(t + rtt)
312        let target = r
313            .cubic_state
314            .w_cubic(t + *rtt_stats.min_rtt, r.max_datagram_size);
315
316        // Clipping target to [cwnd, 1.5 x cwnd]
317        let target = f64::max(target, r.congestion_window as f64);
318        let target = f64::min(target, r.congestion_window as f64 * 1.5);
319
320        // Update w_est.
321        let w_est_inc = r.cubic_state.w_est_inc(
322            packet.size,
323            r.congestion_window,
324            r.max_datagram_size,
325        );
326        r.cubic_state.w_est += w_est_inc;
327
328        if r.cubic_state.w_est >= r.cubic_state.w_max {
329            r.cubic_state.alpha_aimd = 1.0;
330        }
331
332        let mut cubic_cwnd = r.congestion_window;
333
334        if r.cubic_state.w_cubic(t, r.max_datagram_size) < r.cubic_state.w_est {
335            // AIMD friendly region (W_cubic(t) < W_est)
336            cubic_cwnd = cmp::max(cubic_cwnd, r.cubic_state.w_est as usize);
337        } else {
338            // Concave region or convex region use same increment.
339            let cubic_inc =
340                r.max_datagram_size * (target as usize - cubic_cwnd) / cubic_cwnd;
341
342            cubic_cwnd += cubic_inc;
343        }
344
345        // Update the increment and increase cwnd by MSS.
346        r.cubic_state.cwnd_inc += cubic_cwnd - r.congestion_window;
347
348        if r.cubic_state.cwnd_inc >= r.max_datagram_size {
349            r.congestion_window += r.max_datagram_size;
350            r.cubic_state.cwnd_inc -= r.max_datagram_size;
351        }
352    }
353}
354
355fn congestion_event(
356    r: &mut Congestion, bytes_in_flight: usize, _lost_bytes: usize,
357    largest_lost_pkt: &Sent, now: Instant,
358) {
359    let time_sent = largest_lost_pkt.time_sent;
360    let in_congestion_recovery = r.in_congestion_recovery(time_sent);
361
362    // Start a new congestion event if packet was sent after the
363    // start of the previous congestion recovery period.
364    if !in_congestion_recovery {
365        r.congestion_recovery_start_time = Some(now);
366
367        // Fast convergence
368        if (r.congestion_window as f64) < r.cubic_state.w_max {
369            r.cubic_state.w_max =
370                r.congestion_window as f64 * (1.0 + BETA_CUBIC) / 2.0;
371        } else {
372            r.cubic_state.w_max = r.congestion_window as f64;
373        }
374
375        let ssthresh = (r.congestion_window as f64 * BETA_CUBIC) as usize;
376        let ssthresh =
377            cmp::max(ssthresh, r.max_datagram_size * MINIMUM_WINDOW_PACKETS);
378        r.ssthresh.update(ssthresh, r.hystart.in_css());
379        r.congestion_window = ssthresh;
380
381        r.cubic_state.k = if r.cubic_state.w_max < r.congestion_window as f64 {
382            0.0
383        } else {
384            r.cubic_state
385                .cubic_k(r.congestion_window, r.max_datagram_size)
386        };
387
388        r.cubic_state.cwnd_inc =
389            (r.cubic_state.cwnd_inc as f64 * BETA_CUBIC) as usize;
390
391        r.cubic_state.w_est = r.congestion_window as f64;
392        r.cubic_state.alpha_aimd = ALPHA_AIMD;
393
394        if r.hystart.in_css() {
395            r.hystart.congestion_event();
396        }
397
398        r.prr.congestion_event(bytes_in_flight);
399    }
400}
401
402fn checkpoint(r: &mut Congestion) {
403    r.cubic_state.prior.congestion_window = r.congestion_window;
404    r.cubic_state.prior.ssthresh = r.ssthresh.get();
405    r.cubic_state.prior.w_max = r.cubic_state.w_max;
406    r.cubic_state.prior.k = r.cubic_state.k;
407    r.cubic_state.prior.epoch_start = r.congestion_recovery_start_time;
408    r.cubic_state.prior.lost_count = r.lost_count;
409}
410
411fn rollback(r: &mut Congestion) -> bool {
412    // Don't go back to slow start.
413    if r.cubic_state.prior.congestion_window < r.cubic_state.prior.ssthresh {
414        return false;
415    }
416
417    if r.congestion_window >= r.cubic_state.prior.congestion_window {
418        return false;
419    }
420
421    r.congestion_window = r.cubic_state.prior.congestion_window;
422    r.ssthresh.update(r.cubic_state.prior.ssthresh, false);
423    r.cubic_state.w_max = r.cubic_state.prior.w_max;
424    r.cubic_state.k = r.cubic_state.prior.k;
425    r.congestion_recovery_start_time = r.cubic_state.prior.epoch_start;
426
427    true
428}
429
430#[cfg(feature = "qlog")]
431fn state_str(r: &Congestion, now: Instant) -> &'static str {
432    reno::state_str(r, now)
433}
434
435fn debug_fmt(r: &Congestion, f: &mut std::fmt::Formatter) -> std::fmt::Result {
436    write!(
437        f,
438        "cubic={{ k={} w_max={} }} ",
439        r.cubic_state.k, r.cubic_state.w_max
440    )
441}
442
443#[cfg(test)]
444mod tests {
445    use super::*;
446
447    use crate::CongestionControlAlgorithm;
448
449    use crate::recovery::congestion::hystart;
450    use crate::recovery::congestion::recovery::LegacyRecovery;
451    use crate::recovery::congestion::test_sender::TestSender;
452    use crate::recovery::RecoveryOps;
453
454    fn test_sender() -> TestSender {
455        TestSender::new(CongestionControlAlgorithm::CUBIC, false)
456    }
457
458    fn hystart_test_sender() -> TestSender {
459        TestSender::new(CongestionControlAlgorithm::CUBIC, true)
460    }
461
462    #[test]
463    fn cubic_init() {
464        let mut cfg = crate::Config::new(crate::PROTOCOL_VERSION).unwrap();
465        cfg.set_cc_algorithm(CongestionControlAlgorithm::CUBIC);
466
467        let r = LegacyRecovery::new(&cfg);
468
469        assert!(r.cwnd() > 0);
470        assert_eq!(r.bytes_in_flight(), 0);
471    }
472
473    #[test]
474    fn cubic_slow_start() {
475        let mut sender = test_sender();
476        let size = sender.max_datagram_size;
477
478        // Send initcwnd full MSS packets to become no longer app limited
479        for _ in 0..sender.initial_congestion_window_packets {
480            sender.send_packet(size);
481        }
482
483        let cwnd_prev = sender.congestion_window;
484
485        sender.ack_n_packets(1, size);
486
487        // Check if cwnd increased by packet size (slow start)
488        assert_eq!(sender.congestion_window, cwnd_prev + size);
489    }
490
491    #[test]
492    fn cubic_slow_start_multi_acks() {
493        let mut sender = test_sender();
494        let size = sender.max_datagram_size;
495
496        // Send initcwnd full MSS packets to become no longer app limited
497        for _ in 0..sender.initial_congestion_window_packets {
498            sender.send_packet(size);
499        }
500
501        let cwnd_prev = sender.congestion_window;
502
503        sender.ack_n_packets(3, size);
504
505        // Acked 3 packets.
506        assert_eq!(sender.congestion_window, cwnd_prev + size * 3);
507    }
508
509    #[test]
510    fn cubic_congestion_event() {
511        let mut sender = test_sender();
512        let size = sender.max_datagram_size;
513
514        sender.send_packet(size);
515
516        let cwnd_prev = sender.congestion_window;
517
518        sender.lose_n_packets(1, size, None);
519
520        // In CUBIC, after congestion event, cwnd will be reduced by (1 -
521        // CUBIC_BETA)
522        assert_eq!(
523            cwnd_prev as f64 * BETA_CUBIC,
524            sender.congestion_window as f64
525        );
526    }
527
528    #[test]
529    fn cubic_congestion_avoidance() {
530        let mut sender = test_sender();
531        let size = sender.max_datagram_size;
532
533        let prev_cwnd = sender.congestion_window;
534
535        // Send initcwnd full MSS packets to become no longer app limited
536        for _ in 0..sender.initial_congestion_window_packets {
537            sender.send_packet(size);
538        }
539
540        let rtt = Duration::from_millis(100);
541
542        sender.advance_time(rtt);
543        sender.lose_n_packets(1, size, None);
544
545        // After congestion event, cwnd will be reduced.
546        let cur_cwnd = (prev_cwnd as f64 * BETA_CUBIC) as usize;
547        assert_eq!(sender.congestion_window, cur_cwnd);
548
549        // Shift current time by 1 RTT.
550        sender.update_rtt(rtt);
551        // Exit from the recovery.
552        sender.advance_time(rtt);
553
554        sender.ack_n_packets(sender.initial_congestion_window_packets - 2, size);
555        // Only packets sent after exit recovery can increase cwnd.
556        assert_eq!(sender.congestion_window, cur_cwnd);
557
558        for _ in 0..8 {
559            sender.send_packet(size);
560        }
561        sender.advance_time(rtt);
562
563        sender.ack_n_packets(1, size);
564        // Only packets sent after exit recovery can increase cwnd.
565        assert_eq!(sender.congestion_window, cur_cwnd);
566
567        // During Congestion Avoidance, it will take
568        // 6 ACKs to increase cwnd by 1 MSS.
569        for _ in 0..6 {
570            sender.ack_n_packets(1, size);
571            sender.advance_time(rtt);
572        }
573
574        assert_eq!(sender.congestion_window, cur_cwnd + size);
575    }
576
577    #[test]
578    fn cubic_hystart_css_to_ss() {
579        let mut sender = hystart_test_sender();
580        let size = sender.max_datagram_size;
581
582        // 1st round.
583        let n_rtt_sample = hystart::N_RTT_SAMPLE;
584
585        let rtt_1st = Duration::from_millis(50);
586
587        let next_rnd = sender.next_pkt + n_rtt_sample as u64 - 1;
588        sender.hystart.start_round(next_rnd);
589        // Send 1st round packets.
590        for _ in 0..n_rtt_sample {
591            sender.send_packet(size);
592        }
593        sender.update_app_limited(false);
594
595        // Receiving Acks.
596        sender.advance_time(rtt_1st);
597        sender.update_rtt(rtt_1st);
598        sender.ack_n_packets(n_rtt_sample, size);
599
600        // Not in CSS yet.
601        assert!(sender.hystart.css_start_time().is_none());
602
603        // 2nd round.
604        let mut rtt_2nd = Duration::from_millis(100);
605
606        sender.advance_time(rtt_2nd);
607
608        let next_rnd = sender.next_pkt + n_rtt_sample as u64 - 1;
609        sender.hystart.start_round(next_rnd);
610        // Send 2nd round packets.
611        for _ in 0..n_rtt_sample {
612            sender.send_packet(size);
613        }
614        sender.update_app_limited(false);
615
616        // Receiving Acks.
617        // Last ack will cause to exit to CSS.
618        let mut cwnd_prev = sender.congestion_window();
619
620        for _ in 0..n_rtt_sample {
621            cwnd_prev = sender.congestion_window();
622            sender.update_rtt(rtt_2nd);
623            sender.ack_n_packets(1, size);
624            // Keep increasing RTT so that hystart exits to CSS.
625            rtt_2nd += rtt_2nd.saturating_add(Duration::from_millis(4));
626        }
627
628        // Now we are in CSS.
629        assert!(sender.hystart.css_start_time().is_some());
630        assert_eq!(sender.congestion_window(), cwnd_prev + size);
631
632        // 3rd round, which RTT is less than previous round to
633        // trigger back to Slow Start.
634        let rtt_3rd = Duration::from_millis(80);
635        sender.advance_time(rtt_3rd);
636        cwnd_prev = sender.congestion_window();
637
638        let next_rnd = sender.next_pkt + n_rtt_sample as u64 - 1;
639        sender.hystart.start_round(next_rnd);
640        // Send 3nd round packets.
641        for _ in 0..n_rtt_sample {
642            sender.send_packet(size);
643        }
644        sender.update_app_limited(false);
645
646        // Receiving Acks.
647        // Last ack will cause to exit to SS.
648        sender.update_rtt(rtt_3rd);
649        sender.ack_n_packets(n_rtt_sample, size);
650
651        // Now we are back in Slow Start.
652        assert!(sender.hystart.css_start_time().is_none());
653        assert_eq!(
654            sender.congestion_window(),
655            cwnd_prev +
656                size / hystart::CSS_GROWTH_DIVISOR * hystart::N_RTT_SAMPLE
657        );
658    }
659
660    #[test]
661    fn cubic_hystart_css_to_ca() {
662        let mut sender = hystart_test_sender();
663        let size = sender.max_datagram_size;
664
665        // 1st round.
666        let n_rtt_sample = hystart::N_RTT_SAMPLE;
667
668        let rtt_1st = Duration::from_millis(50);
669
670        let next_rnd = sender.next_pkt + n_rtt_sample as u64 - 1;
671        sender.hystart.start_round(next_rnd);
672        // Send 1st round packets.
673        for _ in 0..n_rtt_sample {
674            sender.send_packet(size);
675        }
676        sender.update_app_limited(false);
677
678        // Receiving Acks.
679        sender.advance_time(rtt_1st);
680        sender.update_rtt(rtt_1st);
681        sender.ack_n_packets(n_rtt_sample, size);
682
683        // Not in CSS yet.
684        assert!(sender.hystart.css_start_time().is_none());
685
686        // 2nd round.
687        let mut rtt_2nd = Duration::from_millis(100);
688        sender.advance_time(rtt_2nd);
689        // Send 2nd round packets.
690        let next_rnd = sender.next_pkt + n_rtt_sample as u64 - 1;
691        sender.hystart.start_round(next_rnd);
692        for _ in 0..n_rtt_sample {
693            sender.send_packet(size);
694        }
695        sender.update_app_limited(false);
696
697        // Receiving Acks.
698        // Last ack will cause to exit to CSS.
699        let mut cwnd_prev = sender.congestion_window();
700
701        for _ in 0..n_rtt_sample {
702            cwnd_prev = sender.congestion_window();
703            sender.update_rtt(rtt_2nd);
704            sender.ack_n_packets(1, size);
705            // Keep increasing RTT so that hystart exits to CSS.
706            rtt_2nd += rtt_2nd.saturating_add(Duration::from_millis(4));
707        }
708
709        // Now we are in CSS.
710        assert!(sender.hystart.css_start_time().is_some());
711        assert_eq!(sender.congestion_window(), cwnd_prev + size);
712
713        // Run 5 (CSS_ROUNDS) in CSS, to exit to congestion avoidance.
714        let rtt_css = Duration::from_millis(100);
715        sender.advance_time(rtt_css);
716
717        for _ in 0..hystart::CSS_ROUNDS {
718            // Send a round of packets.
719            let next_rnd = sender.next_pkt + n_rtt_sample as u64 - 1;
720            sender.hystart.start_round(next_rnd);
721            for _ in 0..n_rtt_sample {
722                sender.send_packet(size);
723            }
724            sender.update_app_limited(false);
725
726            // Receiving Acks.
727            sender.update_rtt(rtt_css);
728            sender.ack_n_packets(n_rtt_sample, size);
729        }
730        // Now we are in congestion avoidance.
731        assert_eq!(sender.congestion_window(), sender.ssthresh.get());
732    }
733
734    #[test]
735    fn cubic_spurious_congestion_event() {
736        let mut sender = test_sender();
737        let size = sender.max_datagram_size;
738
739        let prev_cwnd = sender.congestion_window();
740
741        // Send initcwnd full MSS packets to become no longer app limited
742        for _ in 0..sender.initial_congestion_window_packets {
743            sender.send_packet(size);
744        }
745        sender.lose_n_packets(1, size, None);
746
747        // After congestion event, cwnd will be reduced.
748        let cur_cwnd = (prev_cwnd as f64 * BETA_CUBIC) as usize;
749        assert_eq!(sender.congestion_window(), cur_cwnd);
750
751        // Ack more than cwnd bytes with rtt=100ms
752        let rtt = Duration::from_millis(100);
753        sender.update_rtt(rtt);
754
755        let acked = Acked {
756            pkt_num: 0,
757            // To exit from recovery
758            time_sent: sender.time + rtt,
759            size,
760            delivered: 0,
761            delivered_time: sender.time,
762            first_sent_time: sender.time,
763            is_app_limited: false,
764            rtt: Duration::ZERO,
765        };
766
767        // Trigger detecting spurious congestion event
768        sender.inject_ack(acked, sender.time + rtt + Duration::from_millis(5));
769
770        // This is from slow start, no rollback.
771        assert_eq!(sender.congestion_window(), cur_cwnd);
772
773        sender.advance_time(rtt);
774
775        let prev_cwnd = sender.congestion_window();
776
777        sender.lose_n_packets(1, size, Some(sender.time));
778
779        // After congestion event, cwnd will be reduced.
780        let cur_cwnd = (cur_cwnd as f64 * BETA_CUBIC) as usize;
781        assert_eq!(sender.congestion_window(), cur_cwnd);
782
783        sender.advance_time(rtt + Duration::from_millis(5));
784
785        let acked = Acked {
786            pkt_num: 0,
787            // To exit from recovery
788            time_sent: sender.time + rtt,
789            size,
790            delivered: 0,
791            delivered_time: sender.time,
792            first_sent_time: sender.time,
793            is_app_limited: false,
794            rtt: Duration::ZERO,
795        };
796
797        // Trigger detecting spurious congestion event.
798        sender.inject_ack(acked, sender.time + rtt + Duration::from_millis(5));
799
800        // cwnd is rolled back to the previous one.
801        assert_eq!(sender.congestion_window(), prev_cwnd);
802    }
803
804    #[test]
805    fn cubic_fast_convergence() {
806        let mut sender = test_sender();
807        let size = sender.max_datagram_size;
808
809        let prev_cwnd = sender.congestion_window;
810
811        // Send initcwnd full MSS packets to become no longer app limited
812        for _ in 0..sender.initial_congestion_window_packets {
813            sender.send_packet(size);
814        }
815
816        // Trigger congestion event to update ssthresh
817        sender.lose_n_packets(1, size, None);
818
819        // After 1st congestion event, cwnd will be reduced.
820        let cur_cwnd = (prev_cwnd as f64 * BETA_CUBIC) as usize;
821        assert_eq!(sender.congestion_window, cur_cwnd);
822
823        // Shift current time by 1 RTT.
824        let rtt = Duration::from_millis(100);
825        sender.update_rtt(rtt);
826        // Exit from the recovery.
827        sender.advance_time(rtt);
828
829        // Drain most packets in flight except for 1 to avoid adjustments to
830        // cubic.last_sent_time when idle, see on_packet_sent.
831        sender.ack_n_packets(sender.initial_congestion_window_packets - 2, size);
832
833        // During Congestion Avoidance, it will take 6 ACKs to
834        // increase cwnd by 1 MSS.  But ACKed packets must have been
835        // sent after the loss event.
836        for _ in 0..7 {
837            sender.send_packet(size);
838        }
839        // ACK the packet that was sent before the loss event.
840        sender.ack_n_packets(1, size);
841        sender.advance_time(rtt);
842        for _ in 0..6 {
843            sender.ack_n_packets(1, size);
844        }
845
846        assert_eq!(sender.congestion_window, cur_cwnd + size);
847
848        let prev_cwnd = sender.congestion_window;
849
850        // Fast convergence: now there is 2nd congestion event and
851        // cwnd is not fully recovered to w_max, w_max will be
852        // further reduced.
853        sender.lose_n_packets(1, size, None);
854
855        // After 2nd congestion event, cwnd will be reduced.
856        let cur_cwnd = (prev_cwnd as f64 * BETA_CUBIC) as usize;
857        assert_eq!(sender.congestion_window, cur_cwnd);
858
859        // w_max will be further reduced, not prev_cwnd
860        assert_eq!(
861            sender.cubic_state.w_max,
862            prev_cwnd as f64 * (1.0 + BETA_CUBIC) / 2.0
863        );
864    }
865
866    #[test]
867    fn cubic_perpetual_recovery_trap() {
868        // Reproduces the bug where cwnd stays pinned at minimum after a
869        // loss event because the idle-time epoch shift pushes
870        // congestion_recovery_start_time into the future on every
871        // send -> ACK -> send cycle when bif transiently hits 0.
872        let mut sender = test_sender();
873        let size = sender.max_datagram_size;
874        let rtt = Duration::from_millis(100);
875
876        // Fill the pipe.
877        for _ in 0..sender.initial_congestion_window_packets {
878            sender.send_packet(size);
879        }
880
881        sender.update_rtt(rtt);
882        sender.advance_time(rtt);
883
884        // Trigger a loss to enter recovery and reduce cwnd.
885        let initial_cwnd = sender.congestion_window;
886        sender.lose_n_packets(1, size, None);
887        let post_loss_cwnd = sender.congestion_window;
888        assert_eq!(post_loss_cwnd, (initial_cwnd as f64 * BETA_CUBIC) as usize);
889        let initial_recovery_start_time =
890            sender.congestion_recovery_start_time.unwrap();
891        assert_eq!(initial_recovery_start_time, sender.time);
892
893        // ACK remaining in-flight packets to exit recovery.
894        sender.ack_n_packets(sender.initial_congestion_window_packets - 1, size);
895        assert_eq!(sender.bytes_in_flight, 0);
896
897        // Now simulate the problematic pattern: send a small burst at
898        // minimum cwnd, ACK it (bif drops to 0), advance one RTT, repeat.
899        // With the bug, recovery_start_time would advance on every
900        // cycle, trapping cwnd.  With the fix, cwnd must grow.
901        let packets_per_burst = cmp::max(1, sender.congestion_window / size);
902
903        for _ in 0..20 {
904            for _ in 0..packets_per_burst {
905                sender.send_packet(size);
906            }
907
908            sender.advance_time(rtt);
909            sender.ack_n_packets(packets_per_burst, size);
910            assert_eq!(sender.bytes_in_flight, 0);
911        }
912
913        // cwnd must have grown beyond the post-loss value.
914        assert!(
915            sender.congestion_window > post_loss_cwnd,
916            "cwnd stuck at {} (post-loss {}): perpetual recovery trap",
917            sender.congestion_window,
918            post_loss_cwnd,
919        );
920        // Recovery start hasn't changed.
921        assert_eq!(
922            sender.congestion_recovery_start_time.unwrap() -
923                initial_recovery_start_time,
924            Duration::ZERO,
925            "epoch should not have shifted without idle gap",
926        );
927    }
928
929    #[test]
930    fn cubic_genuine_idle_epoch_shift() {
931        // After a loss + recovery, a long idle period (seconds) should
932        // shift the epoch so that cwnd growth resumes from where it
933        // left off rather than jumping.
934        let mut sender = test_sender();
935        let test_start = sender.time;
936        let size = sender.max_datagram_size;
937        let rtt = Duration::from_millis(100);
938
939        // Fill the pipe and trigger loss.
940        for _ in 0..sender.initial_congestion_window_packets {
941            sender.send_packet(size);
942        }
943        sender.update_rtt(rtt);
944        sender.advance_time(rtt);
945        sender.lose_n_packets(1, size, None);
946
947        let initial_recovery_start =
948            sender.congestion_recovery_start_time.unwrap();
949        assert_eq!(
950            initial_recovery_start,
951            test_start + rtt,
952            "Recovery start is set",
953        );
954
955        let post_loss_cwnd = sender.congestion_window;
956
957        // ACK remaining to exit recovery.
958        sender.ack_n_packets(sender.initial_congestion_window_packets - 1, size);
959
960        // App-limited like behavior
961        // Long idle: 5 seconds of silence.
962        let idle_duration = Duration::from_secs(5);
963        sender.advance_time(idle_duration);
964
965        // Resume sending and ACKing.
966        let packets_per_burst = cmp::max(1, sender.congestion_window / size);
967
968        for _ in 0..packets_per_burst {
969            sender.send_packet(size);
970        }
971        sender.advance_time(rtt);
972        sender.ack_n_packets(packets_per_burst, size);
973
974        // After one RTT of sending post-idle, cwnd should not have
975        // shrunk.
976        assert_eq!(sender.congestion_window, post_loss_cwnd);
977
978        // Verify recovery_start_time was shifted forward (i.e., the
979        // idle shift logic ran).
980        let recovery_start = sender.congestion_recovery_start_time.unwrap();
981        assert!(
982            recovery_start >
983                sender.cubic_state.last_sent_time.unwrap() - idle_duration,
984            "epoch should have been shifted forward by idle period",
985        );
986        assert_eq!(
987            recovery_start - sender.cubic_state.last_sent_time.unwrap(),
988            Duration::ZERO,
989            "epoch should have been shifted forward by idle period",
990        );
991        assert_eq!(
992            recovery_start - initial_recovery_start,
993            idle_duration,
994            "Recovery start should have moved forward by the idle duration",
995        );
996    }
997
998    #[test]
999    fn cubic_zero_bytes_sent_no_epoch_shift() {
1000        // A zero-byte packet (e.g. PADDING-only) must NOT shift
1001        // congestion_recovery_start_time or update last_sent_time.
1002        let mut sender = test_sender();
1003        let size = sender.max_datagram_size;
1004        let rtt = Duration::from_millis(100);
1005
1006        // Fill pipe, trigger loss to set congestion_recovery_start_time.
1007        for _ in 0..sender.initial_congestion_window_packets {
1008            sender.send_packet(size);
1009        }
1010        sender.update_rtt(rtt);
1011        sender.advance_time(rtt);
1012        sender.lose_n_packets(1, size, None);
1013
1014        // ACK everything → bif = 0.
1015        sender.ack_n_packets(sender.initial_congestion_window_packets - 1, size);
1016        assert_eq!(sender.bytes_in_flight, 0);
1017
1018        // Record state before zero-byte send.
1019        let recovery_start_before = sender.congestion_recovery_start_time;
1020        let last_sent_before = sender.cubic_state.last_sent_time;
1021
1022        // Advance time so any shift would be visible.
1023        sender.advance_time(Duration::from_millis(500));
1024
1025        // Send a zero-byte packet (simulates PADDING-only).
1026        sender.send_packet(0);
1027
1028        // Neither recovery_start_time nor last_sent_time should change.
1029        assert_eq!(
1030            sender.congestion_recovery_start_time, recovery_start_before,
1031            "recovery_start_time must not shift on zero-byte send",
1032        );
1033        assert_eq!(
1034            sender.cubic_state.last_sent_time, last_sent_before,
1035            "last_sent_time must not update on zero-byte send",
1036        );
1037    }
1038}