quiche/recovery/congestion/
reno.rs

1// Copyright (C) 2019, Cloudflare, Inc.
2// All rights reserved.
3//
4// Redistribution and use in source and binary forms, with or without
5// modification, are permitted provided that the following conditions are
6// met:
7//
8//     * Redistributions of source code must retain the above copyright notice,
9//       this list of conditions and the following disclaimer.
10//
11//     * Redistributions in binary form must reproduce the above copyright
12//       notice, this list of conditions and the following disclaimer in the
13//       documentation and/or other materials provided with the distribution.
14//
15// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
16// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
17// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
18// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
19// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
20// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
21// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
22// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
23// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
24// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
25// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
27//! Reno Congestion Control
28//!
29//! Note that Slow Start can use HyStart++ when enabled.
30
31use std::cmp;
32use std::time::Instant;
33
34use super::rtt::RttStats;
35use super::Acked;
36use super::Sent;
37
38use super::Congestion;
39use super::CongestionControlOps;
40use crate::recovery::LOSS_REDUCTION_FACTOR;
41use crate::recovery::MINIMUM_WINDOW_PACKETS;
42
43pub(crate) static RENO: CongestionControlOps = CongestionControlOps {
44    on_init,
45    on_packet_sent,
46    on_packets_acked,
47    congestion_event,
48    checkpoint,
49    rollback,
50    has_custom_pacing,
51    debug_fmt,
52};
53
54pub fn on_init(_r: &mut Congestion) {}
55
56pub fn on_packet_sent(
57    _r: &mut Congestion, _sent_bytes: usize, _bytes_in_flight: usize,
58    _now: Instant,
59) {
60}
61
62fn on_packets_acked(
63    r: &mut Congestion, _bytes_in_flight: usize, packets: &mut Vec<Acked>,
64    now: Instant, rtt_stats: &RttStats,
65) {
66    for pkt in packets.drain(..) {
67        on_packet_acked(r, &pkt, now, rtt_stats);
68    }
69}
70
71fn on_packet_acked(
72    r: &mut Congestion, packet: &Acked, now: Instant, rtt_stats: &RttStats,
73) {
74    if r.in_congestion_recovery(packet.time_sent) {
75        return;
76    }
77
78    if r.app_limited {
79        return;
80    }
81
82    if r.congestion_window < r.ssthresh {
83        // In Slow slart, bytes_acked_sl is used for counting
84        // acknowledged bytes.
85        r.bytes_acked_sl += packet.size;
86
87        if r.hystart.in_css() {
88            r.congestion_window += r.hystart.css_cwnd_inc(r.max_datagram_size);
89        } else {
90            r.congestion_window += r.max_datagram_size;
91        }
92
93        if r.hystart.on_packet_acked(packet, rtt_stats.latest_rtt, now) {
94            // Exit to congestion avoidance if CSS ends.
95            r.ssthresh = r.congestion_window;
96        }
97    } else {
98        // Congestion avoidance.
99        r.bytes_acked_ca += packet.size;
100
101        if r.bytes_acked_ca >= r.congestion_window {
102            r.bytes_acked_ca -= r.congestion_window;
103            r.congestion_window += r.max_datagram_size;
104        }
105    }
106}
107
108fn congestion_event(
109    r: &mut Congestion, _bytes_in_flight: usize, _lost_bytes: usize,
110    largest_lost_pkt: &Sent, now: Instant,
111) {
112    // Start a new congestion event if packet was sent after the
113    // start of the previous congestion recovery period.
114    let time_sent = largest_lost_pkt.time_sent;
115
116    if !r.in_congestion_recovery(time_sent) {
117        r.congestion_recovery_start_time = Some(now);
118
119        r.congestion_window =
120            (r.congestion_window as f64 * LOSS_REDUCTION_FACTOR) as usize;
121
122        r.congestion_window = cmp::max(
123            r.congestion_window,
124            r.max_datagram_size * MINIMUM_WINDOW_PACKETS,
125        );
126
127        r.bytes_acked_ca =
128            (r.congestion_window as f64 * LOSS_REDUCTION_FACTOR) as usize;
129
130        r.ssthresh = r.congestion_window;
131
132        if r.hystart.in_css() {
133            r.hystart.congestion_event();
134        }
135    }
136}
137
138fn checkpoint(_r: &mut Congestion) {}
139
140fn rollback(_r: &mut Congestion) -> bool {
141    true
142}
143
144fn has_custom_pacing() -> bool {
145    false
146}
147
148fn debug_fmt(_r: &Congestion, _f: &mut std::fmt::Formatter) -> std::fmt::Result {
149    Ok(())
150}
151
152#[cfg(test)]
153mod tests {
154    use crate::CongestionControlAlgorithm;
155
156    use super::*;
157
158    use crate::recovery::congestion::recovery::LegacyRecovery;
159    use crate::recovery::congestion::test_sender::TestSender;
160    use crate::recovery::RecoveryOps;
161
162    use std::time::Duration;
163
164    fn test_sender() -> TestSender {
165        TestSender::new(CongestionControlAlgorithm::Reno, false)
166    }
167
168    #[test]
169    fn reno_init() {
170        let mut cfg = crate::Config::new(crate::PROTOCOL_VERSION).unwrap();
171        cfg.set_cc_algorithm(CongestionControlAlgorithm::Reno);
172
173        let r = LegacyRecovery::new(&cfg);
174
175        assert!(r.cwnd() > 0);
176        assert_eq!(r.bytes_in_flight, 0);
177    }
178
179    #[test]
180    fn reno_slow_start() {
181        let mut sender = test_sender();
182        let size = sender.max_datagram_size;
183
184        // Send initcwnd full MSS packets to become no longer app limited
185        for _ in 0..sender.initial_congestion_window_packets {
186            sender.send_packet(size);
187        }
188
189        let cwnd_prev = sender.congestion_window;
190
191        sender.ack_n_packets(1, size);
192
193        // Check if cwnd increased by packet size (slow start).
194        assert_eq!(sender.congestion_window, cwnd_prev + size);
195    }
196
197    #[test]
198    fn reno_slow_start_multi_acks() {
199        let mut sender = test_sender();
200        let size = sender.max_datagram_size;
201
202        // Send initcwnd full MSS packets to become no longer app limited
203        for _ in 0..sender.initial_congestion_window_packets {
204            sender.send_packet(size);
205        }
206
207        let cwnd_prev = sender.congestion_window;
208
209        sender.ack_n_packets(3, size);
210
211        // Acked 3 packets.
212        assert_eq!(sender.congestion_window, cwnd_prev + size * 3);
213    }
214
215    #[test]
216    fn reno_congestion_event() {
217        let mut sender = test_sender();
218        let size = sender.max_datagram_size;
219
220        let prev_cwnd = sender.congestion_window;
221
222        sender.send_packet(size);
223        sender.lose_n_packets(1, size, None);
224
225        // In Reno, after congestion event, cwnd will be cut in half.
226        assert_eq!(prev_cwnd / 2, sender.congestion_window);
227    }
228
229    #[test]
230    fn reno_congestion_avoidance() {
231        let mut sender = test_sender();
232        let size = sender.max_datagram_size;
233
234        // Send initcwnd full MSS packets to become no longer app limited
235        for _ in 0..14 {
236            sender.send_packet(size);
237        }
238
239        let prev_cwnd = sender.congestion_window;
240
241        sender.lose_n_packets(1, size, None);
242
243        // After congestion event, cwnd will be reduced.
244        let cur_cwnd = (prev_cwnd as f64 * LOSS_REDUCTION_FACTOR) as usize;
245        assert_eq!(sender.congestion_window, cur_cwnd);
246
247        let rtt = Duration::from_millis(100);
248        sender.update_rtt(rtt);
249        sender.advance_time(2 * rtt);
250
251        sender.ack_n_packets(8, size);
252        // After acking more than cwnd, expect cwnd increased by MSS
253        assert_eq!(sender.congestion_window, cur_cwnd + size);
254    }
255}