quiche/lib.rs
1// Copyright (C) 2018-2019, Cloudflare, Inc.
2// All rights reserved.
3//
4// Redistribution and use in source and binary forms, with or without
5// modification, are permitted provided that the following conditions are
6// met:
7//
8// * Redistributions of source code must retain the above copyright notice,
9// this list of conditions and the following disclaimer.
10//
11// * Redistributions in binary form must reproduce the above copyright
12// notice, this list of conditions and the following disclaimer in the
13// documentation and/or other materials provided with the distribution.
14//
15// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
16// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
17// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
18// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
19// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
20// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
21// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
22// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
23// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
24// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
25// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
27//! 🥧 Savoury implementation of the QUIC transport protocol and HTTP/3.
28//!
29//! [quiche] is an implementation of the QUIC transport protocol and HTTP/3 as
30//! specified by the [IETF]. It provides a low level API for processing QUIC
31//! packets and handling connection state. The application is responsible for
32//! providing I/O (e.g. sockets handling) as well as an event loop with support
33//! for timers.
34//!
35//! [quiche]: https://github.com/cloudflare/quiche/
36//! [ietf]: https://quicwg.org/
37//!
38//! ## Configuring connections
39//!
40//! The first step in establishing a QUIC connection using quiche is creating a
41//! [`Config`] object:
42//!
43//! ```
44//! let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
45//! config.set_application_protos(&[b"example-proto"]);
46//!
47//! // Additional configuration specific to application and use case...
48//! # Ok::<(), quiche::Error>(())
49//! ```
50//!
51//! The [`Config`] object controls important aspects of the QUIC connection such
52//! as QUIC version, ALPN IDs, flow control, congestion control, idle timeout
53//! and other properties or features.
54//!
55//! QUIC is a general-purpose transport protocol and there are several
56//! configuration properties where there is no reasonable default value. For
57//! example, the permitted number of concurrent streams of any particular type
58//! is dependent on the application running over QUIC, and other use-case
59//! specific concerns.
60//!
61//! quiche defaults several properties to zero, applications most likely need
62//! to set these to something else to satisfy their needs using the following:
63//!
64//! - [`set_initial_max_streams_bidi()`]
65//! - [`set_initial_max_streams_uni()`]
66//! - [`set_initial_max_data()`]
67//! - [`set_initial_max_stream_data_bidi_local()`]
68//! - [`set_initial_max_stream_data_bidi_remote()`]
69//! - [`set_initial_max_stream_data_uni()`]
70//!
71//! [`Config`] also holds TLS configuration. This can be changed by mutators on
72//! the an existing object, or by constructing a TLS context manually and
73//! creating a configuration using [`with_boring_ssl_ctx_builder()`].
74//!
75//! A configuration object can be shared among multiple connections.
76//!
77//! ### Connection setup
78//!
79//! On the client-side the [`connect()`] utility function can be used to create
80//! a new connection, while [`accept()`] is for servers:
81//!
82//! ```
83//! # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
84//! # let server_name = "quic.tech";
85//! # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
86//! # let peer = "127.0.0.1:1234".parse().unwrap();
87//! # let local = "127.0.0.1:4321".parse().unwrap();
88//! // Client connection.
89//! let conn =
90//! quiche::connect(Some(&server_name), &scid, local, peer, &mut config)?;
91//!
92//! // Server connection.
93//! # let peer = "127.0.0.1:1234".parse().unwrap();
94//! # let local = "127.0.0.1:4321".parse().unwrap();
95//! let conn = quiche::accept(&scid, None, local, peer, &mut config)?;
96//! # Ok::<(), quiche::Error>(())
97//! ```
98//!
99//! In both cases, the application is responsible for generating a new source
100//! connection ID that will be used to identify the new connection.
101//!
102//! The application also need to pass the address of the remote peer of the
103//! connection: in the case of a client that would be the address of the server
104//! it is trying to connect to, and for a server that is the address of the
105//! client that initiated the connection.
106//!
107//! ## Handling incoming packets
108//!
109//! Using the connection's [`recv()`] method the application can process
110//! incoming packets that belong to that connection from the network:
111//!
112//! ```no_run
113//! # let mut buf = [0; 512];
114//! # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
115//! # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
116//! # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
117//! # let peer = "127.0.0.1:1234".parse().unwrap();
118//! # let local = "127.0.0.1:4321".parse().unwrap();
119//! # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
120//! let to = socket.local_addr().unwrap();
121//!
122//! loop {
123//! let (read, from) = socket.recv_from(&mut buf).unwrap();
124//!
125//! let recv_info = quiche::RecvInfo { from, to };
126//!
127//! let read = match conn.recv(&mut buf[..read], recv_info) {
128//! Ok(v) => v,
129//!
130//! Err(quiche::Error::Done) => {
131//! // Done reading.
132//! break;
133//! },
134//!
135//! Err(e) => {
136//! // An error occurred, handle it.
137//! break;
138//! },
139//! };
140//! }
141//! # Ok::<(), quiche::Error>(())
142//! ```
143//!
144//! The application has to pass a [`RecvInfo`] structure in order to provide
145//! additional information about the received packet (such as the address it
146//! was received from).
147//!
148//! ## Generating outgoing packets
149//!
150//! Outgoing packet are generated using the connection's [`send()`] method
151//! instead:
152//!
153//! ```no_run
154//! # let mut out = [0; 512];
155//! # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
156//! # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
157//! # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
158//! # let peer = "127.0.0.1:1234".parse().unwrap();
159//! # let local = "127.0.0.1:4321".parse().unwrap();
160//! # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
161//! loop {
162//! let (write, send_info) = match conn.send(&mut out) {
163//! Ok(v) => v,
164//!
165//! Err(quiche::Error::Done) => {
166//! // Done writing.
167//! break;
168//! },
169//!
170//! Err(e) => {
171//! // An error occurred, handle it.
172//! break;
173//! },
174//! };
175//!
176//! socket.send_to(&out[..write], &send_info.to).unwrap();
177//! }
178//! # Ok::<(), quiche::Error>(())
179//! ```
180//!
181//! The application will be provided with a [`SendInfo`] structure providing
182//! additional information about the newly created packet (such as the address
183//! the packet should be sent to).
184//!
185//! When packets are sent, the application is responsible for maintaining a
186//! timer to react to time-based connection events. The timer expiration can be
187//! obtained using the connection's [`timeout()`] method.
188//!
189//! ```
190//! # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
191//! # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
192//! # let peer = "127.0.0.1:1234".parse().unwrap();
193//! # let local = "127.0.0.1:4321".parse().unwrap();
194//! # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
195//! let timeout = conn.timeout();
196//! # Ok::<(), quiche::Error>(())
197//! ```
198//!
199//! The application is responsible for providing a timer implementation, which
200//! can be specific to the operating system or networking framework used. When
201//! a timer expires, the connection's [`on_timeout()`] method should be called,
202//! after which additional packets might need to be sent on the network:
203//!
204//! ```no_run
205//! # let mut out = [0; 512];
206//! # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
207//! # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
208//! # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
209//! # let peer = "127.0.0.1:1234".parse().unwrap();
210//! # let local = "127.0.0.1:4321".parse().unwrap();
211//! # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
212//! // Timeout expired, handle it.
213//! conn.on_timeout();
214//!
215//! // Send more packets as needed after timeout.
216//! loop {
217//! let (write, send_info) = match conn.send(&mut out) {
218//! Ok(v) => v,
219//!
220//! Err(quiche::Error::Done) => {
221//! // Done writing.
222//! break;
223//! },
224//!
225//! Err(e) => {
226//! // An error occurred, handle it.
227//! break;
228//! },
229//! };
230//!
231//! socket.send_to(&out[..write], &send_info.to).unwrap();
232//! }
233//! # Ok::<(), quiche::Error>(())
234//! ```
235//!
236//! ### Pacing
237//!
238//! It is recommended that applications [pace] sending of outgoing packets to
239//! avoid creating packet bursts that could cause short-term congestion and
240//! losses in the network.
241//!
242//! quiche exposes pacing hints for outgoing packets through the [`at`] field
243//! of the [`SendInfo`] structure that is returned by the [`send()`] method.
244//! This field represents the time when a specific packet should be sent into
245//! the network.
246//!
247//! Applications can use these hints by artificially delaying the sending of
248//! packets through platform-specific mechanisms (such as the [`SO_TXTIME`]
249//! socket option on Linux), or custom methods (for example by using user-space
250//! timers).
251//!
252//! [pace]: https://datatracker.ietf.org/doc/html/rfc9002#section-7.7
253//! [`SO_TXTIME`]: https://man7.org/linux/man-pages/man8/tc-etf.8.html
254//!
255//! ## Sending and receiving stream data
256//!
257//! After some back and forth, the connection will complete its handshake and
258//! will be ready for sending or receiving application data.
259//!
260//! Data can be sent on a stream by using the [`stream_send()`] method:
261//!
262//! ```no_run
263//! # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
264//! # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
265//! # let peer = "127.0.0.1:1234".parse().unwrap();
266//! # let local = "127.0.0.1:4321".parse().unwrap();
267//! # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
268//! if conn.is_established() {
269//! // Handshake completed, send some data on stream 0.
270//! conn.stream_send(0, b"hello", true)?;
271//! }
272//! # Ok::<(), quiche::Error>(())
273//! ```
274//!
275//! The application can check whether there are any readable streams by using
276//! the connection's [`readable()`] method, which returns an iterator over all
277//! the streams that have outstanding data to read.
278//!
279//! The [`stream_recv()`] method can then be used to retrieve the application
280//! data from the readable stream:
281//!
282//! ```no_run
283//! # let mut buf = [0; 512];
284//! # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
285//! # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
286//! # let peer = "127.0.0.1:1234".parse().unwrap();
287//! # let local = "127.0.0.1:4321".parse().unwrap();
288//! # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
289//! if conn.is_established() {
290//! // Iterate over readable streams.
291//! for stream_id in conn.readable() {
292//! // Stream is readable, read until there's no more data.
293//! while let Ok((read, fin)) = conn.stream_recv(stream_id, &mut buf) {
294//! println!("Got {} bytes on stream {}", read, stream_id);
295//! }
296//! }
297//! }
298//! # Ok::<(), quiche::Error>(())
299//! ```
300//!
301//! ## HTTP/3
302//!
303//! The quiche [HTTP/3 module] provides a high level API for sending and
304//! receiving HTTP requests and responses on top of the QUIC transport protocol.
305//!
306//! [`Config`]: https://docs.quic.tech/quiche/struct.Config.html
307//! [`set_initial_max_streams_bidi()`]: https://docs.rs/quiche/latest/quiche/struct.Config.html#method.set_initial_max_streams_bidi
308//! [`set_initial_max_streams_uni()`]: https://docs.rs/quiche/latest/quiche/struct.Config.html#method.set_initial_max_streams_uni
309//! [`set_initial_max_data()`]: https://docs.rs/quiche/latest/quiche/struct.Config.html#method.set_initial_max_data
310//! [`set_initial_max_stream_data_bidi_local()`]: https://docs.rs/quiche/latest/quiche/struct.Config.html#method.set_initial_max_stream_data_bidi_local
311//! [`set_initial_max_stream_data_bidi_remote()`]: https://docs.rs/quiche/latest/quiche/struct.Config.html#method.set_initial_max_stream_data_bidi_remote
312//! [`set_initial_max_stream_data_uni()`]: https://docs.rs/quiche/latest/quiche/struct.Config.html#method.set_initial_max_stream_data_uni
313//! [`with_boring_ssl_ctx_builder()`]: https://docs.quic.tech/quiche/struct.Config.html#method.with_boring_ssl_ctx_builder
314//! [`connect()`]: fn.connect.html
315//! [`accept()`]: fn.accept.html
316//! [`recv()`]: struct.Connection.html#method.recv
317//! [`RecvInfo`]: struct.RecvInfo.html
318//! [`send()`]: struct.Connection.html#method.send
319//! [`SendInfo`]: struct.SendInfo.html
320//! [`at`]: struct.SendInfo.html#structfield.at
321//! [`timeout()`]: struct.Connection.html#method.timeout
322//! [`on_timeout()`]: struct.Connection.html#method.on_timeout
323//! [`stream_send()`]: struct.Connection.html#method.stream_send
324//! [`readable()`]: struct.Connection.html#method.readable
325//! [`stream_recv()`]: struct.Connection.html#method.stream_recv
326//! [HTTP/3 module]: h3/index.html
327//!
328//! ## Congestion Control
329//!
330//! The quiche library provides a high-level API for configuring which
331//! congestion control algorithm to use throughout the QUIC connection.
332//!
333//! When a QUIC connection is created, the application can optionally choose
334//! which CC algorithm to use. See [`CongestionControlAlgorithm`] for currently
335//! available congestion control algorithms.
336//!
337//! For example:
338//!
339//! ```
340//! let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION).unwrap();
341//! config.set_cc_algorithm(quiche::CongestionControlAlgorithm::Reno);
342//! ```
343//!
344//! Alternatively, you can configure the congestion control algorithm to use
345//! by its name.
346//!
347//! ```
348//! let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION).unwrap();
349//! config.set_cc_algorithm_name("reno").unwrap();
350//! ```
351//!
352//! Note that the CC algorithm should be configured before calling [`connect()`]
353//! or [`accept()`]. Otherwise the connection will use a default CC algorithm.
354//!
355//! [`CongestionControlAlgorithm`]: enum.CongestionControlAlgorithm.html
356//!
357//! ## Feature flags
358//!
359//! quiche defines a number of [feature flags] to reduce the amount of compiled
360//! code and dependencies:
361//!
362//! * `boringssl-boring-crate` (default): Use the BoringSSL library provided by
363//! the [boring] crate.
364//!
365//! * `pkg-config-meta`: Generate pkg-config metadata file for libquiche.
366//!
367//! * `ffi`: Build and expose the FFI API.
368//!
369//! * `qlog`: Enable support for the [qlog] logging format.
370//!
371//! * `custom-client-dcid`: Allow clients to supply a custom DCID when
372//! initiating a connection. Dangerous if the DCID does not meet QUIC's
373//! unpredictability and length requirements.
374//!
375//! [feature flags]: https://doc.rust-lang.org/cargo/reference/manifest.html#the-features-section
376//! [boring]: https://crates.io/crates/boring
377//! [qlog]: https://datatracker.ietf.org/doc/html/draft-ietf-quic-qlog-main-schema
378
379#![allow(clippy::upper_case_acronyms)]
380#![warn(missing_docs)]
381#![warn(unused_qualifications)]
382#![cfg_attr(docsrs, feature(doc_cfg))]
383
384#[macro_use]
385extern crate log;
386
387use std::cmp;
388
389use std::collections::VecDeque;
390
391use debug_panic::debug_panic;
392
393use std::net::SocketAddr;
394
395use std::str::FromStr;
396
397use std::sync::Arc;
398
399use std::time::Duration;
400use std::time::Instant;
401
402#[cfg(feature = "qlog")]
403use qlog::events::quic::DataMovedAdditionalInfo;
404#[cfg(feature = "qlog")]
405use qlog::events::quic::QuicEventType;
406#[cfg(feature = "qlog")]
407use qlog::events::quic::TransportInitiator;
408#[cfg(feature = "qlog")]
409use qlog::events::DataRecipient;
410#[cfg(feature = "qlog")]
411use qlog::events::Event;
412#[cfg(feature = "qlog")]
413use qlog::events::EventData;
414#[cfg(feature = "qlog")]
415use qlog::events::EventImportance;
416#[cfg(feature = "qlog")]
417use qlog::events::EventType;
418#[cfg(feature = "qlog")]
419use qlog::events::RawInfo;
420
421use smallvec::SmallVec;
422
423use crate::buffers::DefaultBufFactory;
424
425use crate::recovery::OnAckReceivedOutcome;
426use crate::recovery::OnLossDetectionTimeoutOutcome;
427use crate::recovery::RecoveryOps;
428use crate::recovery::ReleaseDecision;
429
430use crate::stream::RecvAction;
431use crate::stream::StreamPriorityKey;
432
433/// The current QUIC wire version.
434pub const PROTOCOL_VERSION: u32 = PROTOCOL_VERSION_V1;
435
436/// Supported QUIC versions.
437const PROTOCOL_VERSION_V1: u32 = 0x0000_0001;
438
439/// The maximum length of a connection ID.
440pub const MAX_CONN_ID_LEN: usize = packet::MAX_CID_LEN as usize;
441
442/// The minimum length of Initial packets sent by a client.
443pub const MIN_CLIENT_INITIAL_LEN: usize = 1200;
444
445/// The default initial RTT.
446const DEFAULT_INITIAL_RTT: Duration = Duration::from_millis(333);
447
448const PAYLOAD_MIN_LEN: usize = 4;
449
450// PATH_CHALLENGE (9 bytes) + AEAD tag (16 bytes).
451const MIN_PROBING_SIZE: usize = 25;
452
453const MAX_AMPLIFICATION_FACTOR: usize = 3;
454
455// The maximum number of tracked packet number ranges that need to be acked.
456//
457// This represents more or less how many ack blocks can fit in a typical packet.
458const MAX_ACK_RANGES: usize = 68;
459
460// The highest possible stream ID allowed.
461const MAX_STREAM_ID: u64 = 1 << 60;
462
463// The default max_datagram_size used in congestion control.
464const MAX_SEND_UDP_PAYLOAD_SIZE: usize = 1200;
465
466// The default length of DATAGRAM queues.
467const DEFAULT_MAX_DGRAM_QUEUE_LEN: usize = 0;
468
469// The default length of PATH_CHALLENGE receive queue.
470const DEFAULT_MAX_PATH_CHALLENGE_RX_QUEUE_LEN: usize = 3;
471
472// The DATAGRAM standard recommends either none or 65536 as maximum DATAGRAM
473// frames size. We enforce the recommendation for forward compatibility.
474const MAX_DGRAM_FRAME_SIZE: u64 = 65536;
475
476// The length of the payload length field.
477const PAYLOAD_LENGTH_LEN: usize = 2;
478
479// The number of undecryptable that can be buffered.
480const MAX_UNDECRYPTABLE_PACKETS: usize = 10;
481
482const RESERVED_VERSION_MASK: u32 = 0xfafafafa;
483
484// The default size of the receiver connection flow control window.
485const DEFAULT_CONNECTION_WINDOW: u64 = 48 * 1024;
486
487// The maximum size of the receiver connection flow control window.
488const MAX_CONNECTION_WINDOW: u64 = 24 * 1024 * 1024;
489
490// How much larger the connection flow control window need to be larger than
491// the stream flow control window.
492const CONNECTION_WINDOW_FACTOR: f64 = 1.5;
493
494// How many probing packet timeouts do we tolerate before considering the path
495// validation as failed.
496const MAX_PROBING_TIMEOUTS: usize = 3;
497
498// The default initial congestion window size in terms of packet count.
499const DEFAULT_INITIAL_CONGESTION_WINDOW_PACKETS: usize = 10;
500
501// The maximum data offset that can be stored in a crypto stream.
502const MAX_CRYPTO_STREAM_OFFSET: u64 = 1 << 16;
503
504// The send capacity factor.
505const TX_CAP_FACTOR: f64 = 1.0;
506
507/// Ancillary information about incoming packets.
508#[derive(Clone, Copy, Debug, PartialEq, Eq)]
509pub struct RecvInfo {
510 /// The remote address the packet was received from.
511 pub from: SocketAddr,
512
513 /// The local address the packet was received on.
514 pub to: SocketAddr,
515}
516
517/// Ancillary information about outgoing packets.
518#[derive(Clone, Copy, Debug, PartialEq, Eq)]
519pub struct SendInfo {
520 /// The local address the packet should be sent from.
521 pub from: SocketAddr,
522
523 /// The remote address the packet should be sent to.
524 pub to: SocketAddr,
525
526 /// The time to send the packet out.
527 ///
528 /// See [Pacing] for more details.
529 ///
530 /// [Pacing]: index.html#pacing
531 pub at: Instant,
532}
533
534/// The side of the stream to be shut down.
535///
536/// This should be used when calling [`stream_shutdown()`].
537///
538/// [`stream_shutdown()`]: struct.Connection.html#method.stream_shutdown
539#[repr(C)]
540#[derive(PartialEq, Eq)]
541pub enum Shutdown {
542 /// Stop receiving stream data.
543 Read = 0,
544
545 /// Stop sending stream data.
546 Write = 1,
547}
548
549/// Qlog logging level.
550#[repr(C)]
551#[cfg(feature = "qlog")]
552#[cfg_attr(docsrs, doc(cfg(feature = "qlog")))]
553pub enum QlogLevel {
554 /// Logs any events of Core importance.
555 Core = 0,
556
557 /// Logs any events of Core and Base importance.
558 Base = 1,
559
560 /// Logs any events of Core, Base and Extra importance
561 Extra = 2,
562}
563
564/// Stores configuration shared between multiple connections.
565pub struct Config {
566 local_transport_params: TransportParams,
567
568 version: u32,
569
570 tls_ctx: tls::Context,
571
572 application_protos: Vec<Vec<u8>>,
573
574 grease: bool,
575
576 cc_algorithm: CongestionControlAlgorithm,
577 custom_bbr_params: Option<BbrParams>,
578 initial_congestion_window_packets: usize,
579 enable_relaxed_loss_threshold: bool,
580 enable_cubic_idle_restart_fix: bool,
581 enable_send_streams_blocked: bool,
582
583 pmtud: bool,
584 pmtud_max_probes: u8,
585
586 hystart: bool,
587
588 pacing: bool,
589 /// Send rate limit in Mbps
590 max_pacing_rate: Option<u64>,
591
592 tx_cap_factor: f64,
593
594 dgram_recv_max_queue_len: usize,
595 dgram_send_max_queue_len: usize,
596
597 path_challenge_recv_max_queue_len: usize,
598
599 max_send_udp_payload_size: usize,
600
601 max_connection_window: u64,
602 max_stream_window: u64,
603
604 max_amplification_factor: usize,
605
606 disable_dcid_reuse: bool,
607
608 track_unknown_transport_params: Option<usize>,
609
610 initial_rtt: Duration,
611
612 /// When true, uses the initial max data (for connection
613 /// and stream) as the initial flow control window.
614 use_initial_max_data_as_flow_control_win: bool,
615}
616
617// See https://quicwg.org/base-drafts/rfc9000.html#section-15
618fn is_reserved_version(version: u32) -> bool {
619 version & RESERVED_VERSION_MASK == version
620}
621
622impl Config {
623 /// Creates a config object with the given version.
624 ///
625 /// ## Examples:
626 ///
627 /// ```
628 /// let config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
629 /// # Ok::<(), quiche::Error>(())
630 /// ```
631 pub fn new(version: u32) -> Result<Config> {
632 Self::with_tls_ctx(version, tls::Context::new()?)
633 }
634
635 /// Creates a config object with the given version and
636 /// [`SslContextBuilder`].
637 ///
638 /// This is useful for applications that wish to manually configure
639 /// [`SslContextBuilder`].
640 ///
641 /// [`SslContextBuilder`]: https://docs.rs/boring/latest/boring/ssl/struct.SslContextBuilder.html
642 #[cfg(feature = "boringssl-boring-crate")]
643 #[cfg_attr(docsrs, doc(cfg(feature = "boringssl-boring-crate")))]
644 pub fn with_boring_ssl_ctx_builder(
645 version: u32, tls_ctx_builder: boring::ssl::SslContextBuilder,
646 ) -> Result<Config> {
647 Self::with_tls_ctx(version, tls::Context::from_boring(tls_ctx_builder))
648 }
649
650 fn with_tls_ctx(version: u32, tls_ctx: tls::Context) -> Result<Config> {
651 if !is_reserved_version(version) && !version_is_supported(version) {
652 return Err(Error::UnknownVersion);
653 }
654
655 Ok(Config {
656 local_transport_params: TransportParams::default(),
657 version,
658 tls_ctx,
659 application_protos: Vec::new(),
660 grease: true,
661 cc_algorithm: CongestionControlAlgorithm::CUBIC,
662 custom_bbr_params: None,
663 initial_congestion_window_packets:
664 DEFAULT_INITIAL_CONGESTION_WINDOW_PACKETS,
665 enable_relaxed_loss_threshold: false,
666 enable_cubic_idle_restart_fix: true,
667 enable_send_streams_blocked: false,
668 pmtud: false,
669 pmtud_max_probes: pmtud::MAX_PROBES_DEFAULT,
670 hystart: true,
671 pacing: true,
672 max_pacing_rate: None,
673
674 tx_cap_factor: TX_CAP_FACTOR,
675
676 dgram_recv_max_queue_len: DEFAULT_MAX_DGRAM_QUEUE_LEN,
677 dgram_send_max_queue_len: DEFAULT_MAX_DGRAM_QUEUE_LEN,
678
679 path_challenge_recv_max_queue_len:
680 DEFAULT_MAX_PATH_CHALLENGE_RX_QUEUE_LEN,
681
682 max_send_udp_payload_size: MAX_SEND_UDP_PAYLOAD_SIZE,
683
684 max_connection_window: MAX_CONNECTION_WINDOW,
685 max_stream_window: stream::MAX_STREAM_WINDOW,
686
687 max_amplification_factor: MAX_AMPLIFICATION_FACTOR,
688
689 disable_dcid_reuse: false,
690
691 track_unknown_transport_params: None,
692 initial_rtt: DEFAULT_INITIAL_RTT,
693
694 use_initial_max_data_as_flow_control_win: false,
695 })
696 }
697
698 /// Configures the given certificate chain.
699 ///
700 /// The content of `file` is parsed as a PEM-encoded leaf certificate,
701 /// followed by optional intermediate certificates.
702 ///
703 /// ## Examples:
704 ///
705 /// ```no_run
706 /// # let mut config = quiche::Config::new(0xbabababa)?;
707 /// config.load_cert_chain_from_pem_file("/path/to/cert.pem")?;
708 /// # Ok::<(), quiche::Error>(())
709 /// ```
710 pub fn load_cert_chain_from_pem_file(&mut self, file: &str) -> Result<()> {
711 self.tls_ctx.use_certificate_chain_file(file)
712 }
713
714 /// Configures the given private key.
715 ///
716 /// The content of `file` is parsed as a PEM-encoded private key.
717 ///
718 /// ## Examples:
719 ///
720 /// ```no_run
721 /// # let mut config = quiche::Config::new(0xbabababa)?;
722 /// config.load_priv_key_from_pem_file("/path/to/key.pem")?;
723 /// # Ok::<(), quiche::Error>(())
724 /// ```
725 pub fn load_priv_key_from_pem_file(&mut self, file: &str) -> Result<()> {
726 self.tls_ctx.use_privkey_file(file)
727 }
728
729 /// Specifies a file where trusted CA certificates are stored for the
730 /// purposes of certificate verification.
731 ///
732 /// The content of `file` is parsed as a PEM-encoded certificate chain.
733 ///
734 /// ## Examples:
735 ///
736 /// ```no_run
737 /// # let mut config = quiche::Config::new(0xbabababa)?;
738 /// config.load_verify_locations_from_file("/path/to/cert.pem")?;
739 /// # Ok::<(), quiche::Error>(())
740 /// ```
741 pub fn load_verify_locations_from_file(&mut self, file: &str) -> Result<()> {
742 self.tls_ctx.load_verify_locations_from_file(file)
743 }
744
745 /// Specifies a directory where trusted CA certificates are stored for the
746 /// purposes of certificate verification.
747 ///
748 /// The content of `dir` a set of PEM-encoded certificate chains.
749 ///
750 /// ## Examples:
751 ///
752 /// ```no_run
753 /// # let mut config = quiche::Config::new(0xbabababa)?;
754 /// config.load_verify_locations_from_directory("/path/to/certs")?;
755 /// # Ok::<(), quiche::Error>(())
756 /// ```
757 pub fn load_verify_locations_from_directory(
758 &mut self, dir: &str,
759 ) -> Result<()> {
760 self.tls_ctx.load_verify_locations_from_directory(dir)
761 }
762
763 /// Configures whether to verify the peer's certificate.
764 ///
765 /// This should usually be `true` for client-side connections and `false`
766 /// for server-side ones.
767 ///
768 /// Note that by default, no verification is performed.
769 ///
770 /// Also note that on the server-side, enabling verification of the peer
771 /// will trigger a certificate request and make authentication errors
772 /// fatal, but will still allow anonymous clients (i.e. clients that
773 /// don't present a certificate at all). Servers can check whether a
774 /// client presented a certificate by calling [`peer_cert()`] if they
775 /// need to.
776 ///
777 /// [`peer_cert()`]: struct.Connection.html#method.peer_cert
778 pub fn verify_peer(&mut self, verify: bool) {
779 self.tls_ctx.set_verify(verify);
780 }
781
782 /// Configures whether to do path MTU discovery.
783 ///
784 /// The default value is `false`.
785 pub fn discover_pmtu(&mut self, discover: bool) {
786 self.pmtud = discover;
787 }
788
789 /// Configures the maximum number of PMTUD probe attempts before treating
790 /// a probe size as failed.
791 ///
792 /// Defaults to 3 per [RFC 8899 Section 5.1.2](https://datatracker.ietf.org/doc/html/rfc8899#section-5.1.2).
793 /// If 0 is passed, the default value is used.
794 pub fn set_pmtud_max_probes(&mut self, max_probes: u8) {
795 self.pmtud_max_probes = max_probes;
796 }
797
798 /// Configures whether to send GREASE values.
799 ///
800 /// The default value is `true`.
801 pub fn grease(&mut self, grease: bool) {
802 self.grease = grease;
803 }
804
805 /// Enables logging of secrets.
806 ///
807 /// When logging is enabled, the [`set_keylog()`] method must be called on
808 /// the connection for its cryptographic secrets to be logged in the
809 /// [keylog] format to the specified writer.
810 ///
811 /// [`set_keylog()`]: struct.Connection.html#method.set_keylog
812 /// [keylog]: https://developer.mozilla.org/en-US/docs/Mozilla/Projects/NSS/Key_Log_Format
813 pub fn log_keys(&mut self) {
814 self.tls_ctx.enable_keylog();
815 }
816
817 /// Configures the session ticket key material.
818 ///
819 /// On the server this key will be used to encrypt and decrypt session
820 /// tickets, used to perform session resumption without server-side state.
821 ///
822 /// By default a key is generated internally, and rotated regularly, so
823 /// applications don't need to call this unless they need to use a
824 /// specific key (e.g. in order to support resumption across multiple
825 /// servers), in which case the application is also responsible for
826 /// rotating the key to provide forward secrecy.
827 pub fn set_ticket_key(&mut self, key: &[u8]) -> Result<()> {
828 self.tls_ctx.set_ticket_key(key)
829 }
830
831 /// Enables sending or receiving early data.
832 pub fn enable_early_data(&mut self) {
833 self.tls_ctx.set_early_data_enabled(true);
834 }
835
836 /// Configures the list of supported application protocols.
837 ///
838 /// On the client this configures the list of protocols to send to the
839 /// server as part of the ALPN extension.
840 ///
841 /// On the server this configures the list of supported protocols to match
842 /// against the client-supplied list.
843 ///
844 /// Applications must set a value, but no default is provided.
845 ///
846 /// ## Examples:
847 ///
848 /// ```
849 /// # let mut config = quiche::Config::new(0xbabababa)?;
850 /// config.set_application_protos(&[b"http/1.1", b"http/0.9"]);
851 /// # Ok::<(), quiche::Error>(())
852 /// ```
853 pub fn set_application_protos(
854 &mut self, protos_list: &[&[u8]],
855 ) -> Result<()> {
856 self.application_protos =
857 protos_list.iter().map(|s| s.to_vec()).collect();
858
859 self.tls_ctx.set_alpn(protos_list)
860 }
861
862 /// Configures the list of supported application protocols using wire
863 /// format.
864 ///
865 /// The list of protocols `protos` must be a series of non-empty, 8-bit
866 /// length-prefixed strings.
867 ///
868 /// See [`set_application_protos`](Self::set_application_protos) for more
869 /// background about application protocols.
870 ///
871 /// ## Examples:
872 ///
873 /// ```
874 /// # let mut config = quiche::Config::new(0xbabababa)?;
875 /// config.set_application_protos_wire_format(b"\x08http/1.1\x08http/0.9")?;
876 /// # Ok::<(), quiche::Error>(())
877 /// ```
878 pub fn set_application_protos_wire_format(
879 &mut self, protos: &[u8],
880 ) -> Result<()> {
881 let mut b = octets::Octets::with_slice(protos);
882
883 let mut protos_list = Vec::new();
884
885 while let Ok(proto) = b.get_bytes_with_u8_length() {
886 protos_list.push(proto.buf());
887 }
888
889 self.set_application_protos(&protos_list)
890 }
891
892 /// Sets the anti-amplification limit factor.
893 ///
894 /// The default value is `3`.
895 pub fn set_max_amplification_factor(&mut self, v: usize) {
896 self.max_amplification_factor = v;
897 }
898
899 /// Sets the send capacity factor.
900 ///
901 /// The default value is `1`.
902 pub fn set_send_capacity_factor(&mut self, v: f64) {
903 self.tx_cap_factor = v;
904 }
905
906 /// Sets the connection's initial RTT.
907 ///
908 /// The default value is `333`.
909 pub fn set_initial_rtt(&mut self, v: Duration) {
910 self.initial_rtt = v;
911 }
912
913 /// Sets the `max_idle_timeout` transport parameter, in milliseconds.
914 ///
915 /// The default value is infinite, that is, no timeout is used.
916 pub fn set_max_idle_timeout(&mut self, v: u64) {
917 self.local_transport_params.max_idle_timeout =
918 cmp::min(v, octets::MAX_VAR_INT);
919 }
920
921 /// Sets the `max_udp_payload_size transport` parameter.
922 ///
923 /// The default value is `65527`.
924 pub fn set_max_recv_udp_payload_size(&mut self, v: usize) {
925 self.local_transport_params.max_udp_payload_size =
926 cmp::min(v as u64, octets::MAX_VAR_INT);
927 }
928
929 /// Sets the maximum outgoing UDP payload size.
930 ///
931 /// The default and minimum value is `1200`.
932 pub fn set_max_send_udp_payload_size(&mut self, v: usize) {
933 self.max_send_udp_payload_size = cmp::max(v, MAX_SEND_UDP_PAYLOAD_SIZE);
934 }
935
936 /// Sets the `initial_max_data` transport parameter.
937 ///
938 /// When set to a non-zero value quiche will only allow at most `v` bytes of
939 /// incoming stream data to be buffered for the whole connection (that is,
940 /// data that is not yet read by the application) and will allow more data
941 /// to be received as the buffer is consumed by the application.
942 ///
943 /// When set to zero, either explicitly or via the default, quiche will not
944 /// give any flow control to the peer, preventing it from sending any stream
945 /// data.
946 ///
947 /// The default value is `0`.
948 pub fn set_initial_max_data(&mut self, v: u64) {
949 self.local_transport_params.initial_max_data =
950 cmp::min(v, octets::MAX_VAR_INT);
951 }
952
953 /// Sets the `initial_max_stream_data_bidi_local` transport parameter.
954 ///
955 /// When set to a non-zero value quiche will only allow at most `v` bytes
956 /// of incoming stream data to be buffered for each locally-initiated
957 /// bidirectional stream (that is, data that is not yet read by the
958 /// application) and will allow more data to be received as the buffer is
959 /// consumed by the application.
960 ///
961 /// When set to zero, either explicitly or via the default, quiche will not
962 /// give any flow control to the peer, preventing it from sending any stream
963 /// data.
964 ///
965 /// The default value is `0`.
966 pub fn set_initial_max_stream_data_bidi_local(&mut self, v: u64) {
967 self.local_transport_params
968 .initial_max_stream_data_bidi_local =
969 cmp::min(v, octets::MAX_VAR_INT);
970 }
971
972 /// Sets the `initial_max_stream_data_bidi_remote` transport parameter.
973 ///
974 /// When set to a non-zero value quiche will only allow at most `v` bytes
975 /// of incoming stream data to be buffered for each remotely-initiated
976 /// bidirectional stream (that is, data that is not yet read by the
977 /// application) and will allow more data to be received as the buffer is
978 /// consumed by the application.
979 ///
980 /// When set to zero, either explicitly or via the default, quiche will not
981 /// give any flow control to the peer, preventing it from sending any stream
982 /// data.
983 ///
984 /// The default value is `0`.
985 pub fn set_initial_max_stream_data_bidi_remote(&mut self, v: u64) {
986 self.local_transport_params
987 .initial_max_stream_data_bidi_remote =
988 cmp::min(v, octets::MAX_VAR_INT);
989 }
990
991 /// Sets the `initial_max_stream_data_uni` transport parameter.
992 ///
993 /// When set to a non-zero value quiche will only allow at most `v` bytes
994 /// of incoming stream data to be buffered for each unidirectional stream
995 /// (that is, data that is not yet read by the application) and will allow
996 /// more data to be received as the buffer is consumed by the application.
997 ///
998 /// When set to zero, either explicitly or via the default, quiche will not
999 /// give any flow control to the peer, preventing it from sending any stream
1000 /// data.
1001 ///
1002 /// The default value is `0`.
1003 pub fn set_initial_max_stream_data_uni(&mut self, v: u64) {
1004 self.local_transport_params.initial_max_stream_data_uni =
1005 cmp::min(v, octets::MAX_VAR_INT);
1006 }
1007
1008 /// Sets the `initial_max_streams_bidi` transport parameter.
1009 ///
1010 /// When set to a non-zero value quiche will only allow `v` number of
1011 /// concurrent remotely-initiated bidirectional streams to be open at any
1012 /// given time and will increase the limit automatically as streams are
1013 /// completed.
1014 ///
1015 /// When set to zero, either explicitly or via the default, quiche will not
1016 /// not allow the peer to open any bidirectional streams.
1017 ///
1018 /// A bidirectional stream is considered completed when all incoming data
1019 /// has been read by the application (up to the `fin` offset) or the
1020 /// stream's read direction has been shutdown, and all outgoing data has
1021 /// been acked by the peer (up to the `fin` offset) or the stream's write
1022 /// direction has been shutdown.
1023 ///
1024 /// The default value is `0`.
1025 pub fn set_initial_max_streams_bidi(&mut self, v: u64) {
1026 self.local_transport_params.initial_max_streams_bidi =
1027 cmp::min(v, octets::MAX_VAR_INT);
1028 }
1029
1030 /// Sets the `initial_max_streams_uni` transport parameter.
1031 ///
1032 /// When set to a non-zero value quiche will only allow `v` number of
1033 /// concurrent remotely-initiated unidirectional streams to be open at any
1034 /// given time and will increase the limit automatically as streams are
1035 /// completed.
1036 ///
1037 /// When set to zero, either explicitly or via the default, quiche will not
1038 /// not allow the peer to open any unidirectional streams.
1039 ///
1040 /// A unidirectional stream is considered completed when all incoming data
1041 /// has been read by the application (up to the `fin` offset) or the
1042 /// stream's read direction has been shutdown.
1043 ///
1044 /// The default value is `0`.
1045 pub fn set_initial_max_streams_uni(&mut self, v: u64) {
1046 self.local_transport_params.initial_max_streams_uni =
1047 cmp::min(v, octets::MAX_VAR_INT);
1048 }
1049
1050 /// Sets the `ack_delay_exponent` transport parameter.
1051 ///
1052 /// Values above the RFC 9000 maximum of
1053 /// [`MAX_ACK_DELAY_EXPONENT`] (20) are clamped to that
1054 /// maximum.
1055 ///
1056 /// The default value is `3`.
1057 pub fn set_ack_delay_exponent(&mut self, v: u64) {
1058 self.local_transport_params.ack_delay_exponent =
1059 cmp::min(v, MAX_ACK_DELAY_EXPONENT);
1060 }
1061
1062 /// Sets the `max_ack_delay` transport parameter.
1063 ///
1064 /// The default value is `25`.
1065 pub fn set_max_ack_delay(&mut self, v: u64) {
1066 self.local_transport_params.max_ack_delay =
1067 cmp::min(v, octets::MAX_VAR_INT);
1068 }
1069
1070 /// Sets the `active_connection_id_limit` transport parameter.
1071 ///
1072 /// The default value is `2`. Lower values will be ignored.
1073 pub fn set_active_connection_id_limit(&mut self, v: u64) {
1074 if v >= 2 {
1075 self.local_transport_params.active_conn_id_limit =
1076 cmp::min(v, octets::MAX_VAR_INT);
1077 }
1078 }
1079
1080 /// Sets the `disable_active_migration` transport parameter.
1081 ///
1082 /// The default value is `false`.
1083 pub fn set_disable_active_migration(&mut self, v: bool) {
1084 self.local_transport_params.disable_active_migration = v;
1085 }
1086
1087 /// Sets the congestion control algorithm used.
1088 ///
1089 /// The default value is `CongestionControlAlgorithm::CUBIC`.
1090 pub fn set_cc_algorithm(&mut self, algo: CongestionControlAlgorithm) {
1091 self.cc_algorithm = algo;
1092 }
1093
1094 /// Sets custom BBR settings.
1095 ///
1096 /// This API is experimental and will be removed in the future.
1097 ///
1098 /// Currently this only applies if cc_algorithm is
1099 /// `CongestionControlAlgorithm::Bbr2Gcongestion` is set.
1100 ///
1101 /// The default value is `None`.
1102 #[cfg(feature = "internal")]
1103 #[doc(hidden)]
1104 pub fn set_custom_bbr_params(&mut self, custom_bbr_settings: BbrParams) {
1105 self.custom_bbr_params = Some(custom_bbr_settings);
1106 }
1107
1108 /// Sets the congestion control algorithm used by string.
1109 ///
1110 /// The default value is `cubic`. On error `Error::CongestionControl`
1111 /// will be returned.
1112 ///
1113 /// ## Examples:
1114 ///
1115 /// ```
1116 /// # let mut config = quiche::Config::new(0xbabababa)?;
1117 /// config.set_cc_algorithm_name("reno");
1118 /// # Ok::<(), quiche::Error>(())
1119 /// ```
1120 pub fn set_cc_algorithm_name(&mut self, name: &str) -> Result<()> {
1121 self.cc_algorithm = CongestionControlAlgorithm::from_str(name)?;
1122
1123 Ok(())
1124 }
1125
1126 /// Sets initial congestion window size in terms of packet count.
1127 ///
1128 /// The default value is 10.
1129 pub fn set_initial_congestion_window_packets(&mut self, packets: usize) {
1130 self.initial_congestion_window_packets = packets;
1131 }
1132
1133 /// Configure whether to enable relaxed loss detection on spurious loss.
1134 ///
1135 /// The default value is false.
1136 pub fn set_enable_relaxed_loss_threshold(&mut self, enable: bool) {
1137 self.enable_relaxed_loss_threshold = enable;
1138 }
1139
1140 /// Configure whether to enable the CUBIC idle restart fix.
1141 ///
1142 /// When enabled, the epoch shift on idle restart uses the later of
1143 /// the last ACK time and last send time, avoiding an inflated delta
1144 /// when bytes-in-flight transiently hits zero.
1145 ///
1146 /// The default value is `true`.
1147 pub fn set_enable_cubic_idle_restart_fix(&mut self, enable: bool) {
1148 self.enable_cubic_idle_restart_fix = enable;
1149 }
1150
1151 /// Configure whether to enable sending STREAMS_BLOCKED frames.
1152 ///
1153 /// STREAMS_BLOCKED frames are an optional advisory signal in the QUIC
1154 /// protocol which SHOULD be sent when the sender wishes to open a stream
1155 /// but is unable to do so due to the maximum stream limit set by its peer.
1156 ///
1157 /// The default value is false.
1158 pub fn set_enable_send_streams_blocked(&mut self, enable: bool) {
1159 self.enable_send_streams_blocked = enable;
1160 }
1161
1162 /// Configures whether to enable HyStart++.
1163 ///
1164 /// The default value is `true`.
1165 pub fn enable_hystart(&mut self, v: bool) {
1166 self.hystart = v;
1167 }
1168
1169 /// Configures whether to enable pacing.
1170 ///
1171 /// The default value is `true`.
1172 pub fn enable_pacing(&mut self, v: bool) {
1173 self.pacing = v;
1174 }
1175
1176 /// Sets the max value for pacing rate.
1177 ///
1178 /// By default pacing rate is not limited.
1179 pub fn set_max_pacing_rate(&mut self, v: u64) {
1180 self.max_pacing_rate = Some(v);
1181 }
1182
1183 /// Configures whether to enable receiving DATAGRAM frames.
1184 ///
1185 /// When enabled, the `max_datagram_frame_size` transport parameter is set
1186 /// to 65536 as recommended by draft-ietf-quic-datagram-01.
1187 ///
1188 /// The default is `false`.
1189 pub fn enable_dgram(
1190 &mut self, enabled: bool, recv_queue_len: usize, send_queue_len: usize,
1191 ) {
1192 self.local_transport_params.max_datagram_frame_size = if enabled {
1193 Some(MAX_DGRAM_FRAME_SIZE)
1194 } else {
1195 None
1196 };
1197 self.dgram_recv_max_queue_len = recv_queue_len;
1198 self.dgram_send_max_queue_len = send_queue_len;
1199 }
1200
1201 /// Configures the max number of queued received PATH_CHALLENGE frames.
1202 ///
1203 /// When an endpoint receives a PATH_CHALLENGE frame and the queue is full,
1204 /// the frame is discarded.
1205 ///
1206 /// The default is 3.
1207 pub fn set_path_challenge_recv_max_queue_len(&mut self, queue_len: usize) {
1208 self.path_challenge_recv_max_queue_len = queue_len;
1209 }
1210
1211 /// Sets the maximum size of the connection window.
1212 ///
1213 /// The default value is MAX_CONNECTION_WINDOW (24MBytes).
1214 pub fn set_max_connection_window(&mut self, v: u64) {
1215 self.max_connection_window = v;
1216 }
1217
1218 /// Sets the maximum size of the stream window.
1219 ///
1220 /// The default value is MAX_STREAM_WINDOW (16MBytes).
1221 pub fn set_max_stream_window(&mut self, v: u64) {
1222 self.max_stream_window = v;
1223 }
1224
1225 /// Sets the initial stateless reset token.
1226 ///
1227 /// This value is only advertised by servers. Setting a stateless retry
1228 /// token as a client has no effect on the connection.
1229 ///
1230 /// The default value is `None`.
1231 pub fn set_stateless_reset_token(&mut self, v: Option<u128>) {
1232 self.local_transport_params.stateless_reset_token = v;
1233 }
1234
1235 /// Sets whether the QUIC connection should avoid reusing DCIDs over
1236 /// different paths.
1237 ///
1238 /// When set to `true`, it ensures that a destination Connection ID is never
1239 /// reused on different paths. Such behaviour may lead to connection stall
1240 /// if the peer performs a non-voluntary migration (e.g., NAT rebinding) and
1241 /// does not provide additional destination Connection IDs to handle such
1242 /// event.
1243 ///
1244 /// The default value is `false`.
1245 pub fn set_disable_dcid_reuse(&mut self, v: bool) {
1246 self.disable_dcid_reuse = v;
1247 }
1248
1249 /// Enables tracking unknown transport parameters.
1250 ///
1251 /// Specify the maximum number of bytes used to track unknown transport
1252 /// parameters. The size includes the identifier and its value. If storing a
1253 /// transport parameter would cause the limit to be exceeded, it is quietly
1254 /// dropped.
1255 ///
1256 /// The default is that the feature is disabled.
1257 pub fn enable_track_unknown_transport_parameters(&mut self, size: usize) {
1258 self.track_unknown_transport_params = Some(size);
1259 }
1260
1261 /// Sets whether the initial max data value should be used as the initial
1262 /// flow control window.
1263 ///
1264 /// If set to true, the initial flow control window for streams and the
1265 /// connection itself will be set to the initial max data value for streams
1266 /// and the connection respectively. If false, the window is set to the
1267 /// minimum of initial max data and `DEFAULT_STREAM_WINDOW` or
1268 /// `DEFAULT_CONNECTION_WINDOW`
1269 ///
1270 /// The default is false.
1271 pub fn set_use_initial_max_data_as_flow_control_win(&mut self, v: bool) {
1272 self.use_initial_max_data_as_flow_control_win = v;
1273 }
1274}
1275
1276/// Tracks the health of the tx_buffered value.
1277#[derive(Clone, Copy, Debug, Default, PartialEq)]
1278pub enum TxBufferTrackingState {
1279 /// The send buffer is in a good state
1280 #[default]
1281 Ok,
1282 /// The send buffer is in an inconsistent state, which could lead to
1283 /// connection stalls or excess buffering due to bugs we haven't
1284 /// tracked down yet.
1285 Inconsistent,
1286}
1287
1288/// Tracks if the connection hit the peer stream limit and which
1289/// STREAMS_BLOCKED frames have been sent.
1290#[derive(Default)]
1291struct StreamsBlockedState {
1292 /// The peer's max_streams limit at which we last became blocked on
1293 /// opening new local streams, if any.
1294 blocked_at: Option<u64>,
1295
1296 /// The stream limit sent on the most recently sent STREAMS_BLOCKED
1297 /// frame. If != to blocked_at, the connection has pending STREAMS_BLOCKED
1298 /// frames to send.
1299 blocked_sent: Option<u64>,
1300}
1301
1302impl StreamsBlockedState {
1303 /// Returns true if there is a STREAMS_BLOCKED frame that needs sending.
1304 fn has_pending_stream_blocked_frame(&self) -> bool {
1305 self.blocked_sent < self.blocked_at
1306 }
1307
1308 /// Update the stream blocked limit.
1309 fn update_at(&mut self, limit: u64) {
1310 self.blocked_at = self.blocked_at.max(Some(limit));
1311 }
1312
1313 /// Clear blocked_sent to force retransmission of the most recently sent
1314 /// STREAMS_BLOCKED frame.
1315 fn force_retransmit_sent_limit_eq(&mut self, limit: u64) {
1316 // Only clear blocked_sent if the lost frame had the most recently sent
1317 // limit.
1318 if self.blocked_sent == Some(limit) {
1319 self.blocked_sent = None;
1320 }
1321 }
1322}
1323
1324/// A QUIC connection.
1325pub struct Connection<F = DefaultBufFactory>
1326where
1327 F: BufFactory,
1328{
1329 /// QUIC wire version used for the connection.
1330 version: u32,
1331
1332 /// Connection Identifiers.
1333 ids: cid::ConnectionIdentifiers,
1334
1335 /// Unique opaque ID for the connection that can be used for logging.
1336 trace_id: String,
1337
1338 /// Packet number spaces.
1339 pkt_num_spaces: [packet::PktNumSpace; packet::Epoch::count()],
1340
1341 /// The crypto context.
1342 crypto_ctx: [packet::CryptoContext; packet::Epoch::count()],
1343
1344 /// Next packet number.
1345 next_pkt_num: u64,
1346
1347 // TODO
1348 // combine with `next_pkt_num`
1349 /// Track the packet skip context
1350 pkt_num_manager: packet::PktNumManager,
1351
1352 /// Peer's transport parameters.
1353 peer_transport_params: TransportParams,
1354
1355 /// If tracking unknown transport parameters from a peer, how much space to
1356 /// use in bytes.
1357 peer_transport_params_track_unknown: Option<usize>,
1358
1359 /// Local transport parameters.
1360 local_transport_params: TransportParams,
1361
1362 /// TLS handshake state.
1363 handshake: tls::Handshake,
1364
1365 /// Serialized TLS session buffer.
1366 ///
1367 /// This field is populated when a new session ticket is processed on the
1368 /// client. On the server this is empty.
1369 session: Option<Vec<u8>>,
1370
1371 /// The configuration for recovery.
1372 recovery_config: recovery::RecoveryConfig,
1373
1374 /// The path manager.
1375 paths: path::PathMap,
1376
1377 /// PATH_CHALLENGE receive queue max length.
1378 path_challenge_recv_max_queue_len: usize,
1379
1380 /// Total number of received PATH_CHALLENGE frames.
1381 path_challenge_rx_count: u64,
1382
1383 /// List of supported application protocols.
1384 application_protos: Vec<Vec<u8>>,
1385
1386 /// Total number of received packets.
1387 recv_count: usize,
1388
1389 /// Total number of sent packets.
1390 sent_count: usize,
1391
1392 /// Total number of lost packets.
1393 lost_count: usize,
1394
1395 /// Total number of lost packets that were later acked.
1396 spurious_lost_count: usize,
1397
1398 /// Total number of packets sent with data retransmitted.
1399 retrans_count: usize,
1400
1401 /// Total number of sent DATAGRAM frames.
1402 dgram_sent_count: usize,
1403
1404 /// Total number of received DATAGRAM frames.
1405 dgram_recv_count: usize,
1406
1407 /// Total number of bytes received from the peer.
1408 rx_data: u64,
1409
1410 /// Receiver flow controller.
1411 flow_control: flowcontrol::FlowControl,
1412
1413 /// Whether we send MAX_DATA frame.
1414 should_send_max_data: bool,
1415
1416 /// True if there is a pending MAX_STREAMS_BIDI frame to send.
1417 should_send_max_streams_bidi: bool,
1418
1419 /// True if there is a pending MAX_STREAMS_UNI frame to send.
1420 should_send_max_streams_uni: bool,
1421
1422 /// Number of stream data bytes that can be buffered.
1423 tx_cap: usize,
1424
1425 /// The send capacity factor.
1426 tx_cap_factor: f64,
1427
1428 /// Number of bytes buffered in the send buffer.
1429 tx_buffered: usize,
1430
1431 /// Tracks the health of tx_buffered.
1432 tx_buffered_state: TxBufferTrackingState,
1433
1434 /// Total number of bytes sent to the peer.
1435 tx_data: u64,
1436
1437 /// Peer's flow control limit for the connection.
1438 max_tx_data: u64,
1439
1440 /// Last tx_data before running a full send() loop.
1441 last_tx_data: u64,
1442
1443 /// Total number of bytes retransmitted over the connection.
1444 /// This counts only STREAM and CRYPTO data.
1445 stream_retrans_bytes: u64,
1446
1447 /// Total number of bytes sent over the connection.
1448 sent_bytes: u64,
1449
1450 /// Total number of bytes received over the connection.
1451 recv_bytes: u64,
1452
1453 /// Total number of bytes sent acked over the connection.
1454 acked_bytes: u64,
1455
1456 /// Total number of bytes sent lost over the connection.
1457 lost_bytes: u64,
1458
1459 /// Streams map, indexed by stream ID.
1460 streams: stream::StreamMap<F>,
1461
1462 /// Peer's original destination connection ID. Used by the client to
1463 /// validate the server's transport parameter.
1464 odcid: Option<ConnectionId<'static>>,
1465
1466 /// Peer's retry source connection ID. Used by the client during stateless
1467 /// retry to validate the server's transport parameter.
1468 rscid: Option<ConnectionId<'static>>,
1469
1470 /// Received address verification token.
1471 token: Option<Vec<u8>>,
1472
1473 /// Error code and reason to be sent to the peer in a CONNECTION_CLOSE
1474 /// frame.
1475 local_error: Option<ConnectionError>,
1476
1477 /// Error code and reason received from the peer in a CONNECTION_CLOSE
1478 /// frame.
1479 peer_error: Option<ConnectionError>,
1480
1481 /// The connection-level limit at which send blocking occurred.
1482 blocked_limit: Option<u64>,
1483
1484 /// Idle timeout expiration time.
1485 idle_timer: Option<Instant>,
1486
1487 /// Draining timeout expiration time.
1488 draining_timer: Option<Instant>,
1489
1490 /// List of raw packets that were received before they could be decrypted.
1491 undecryptable_pkts: VecDeque<(Vec<u8>, RecvInfo)>,
1492
1493 /// The negotiated ALPN protocol.
1494 alpn: Vec<u8>,
1495
1496 /// Whether this is a server-side connection.
1497 is_server: bool,
1498
1499 /// Whether the initial secrets have been derived.
1500 derived_initial_secrets: bool,
1501
1502 /// Whether a version negotiation packet has already been received. Only
1503 /// relevant for client connections.
1504 did_version_negotiation: bool,
1505
1506 /// Whether stateless retry has been performed.
1507 did_retry: bool,
1508
1509 /// Whether the peer already updated its connection ID.
1510 got_peer_conn_id: bool,
1511
1512 /// Whether the peer verified our initial address.
1513 peer_verified_initial_address: bool,
1514
1515 /// Whether the peer's transport parameters were parsed.
1516 parsed_peer_transport_params: bool,
1517
1518 /// Whether the connection handshake has been completed.
1519 handshake_completed: bool,
1520
1521 /// Whether the HANDSHAKE_DONE frame has been sent.
1522 handshake_done_sent: bool,
1523
1524 /// Whether the HANDSHAKE_DONE frame has been acked.
1525 handshake_done_acked: bool,
1526
1527 /// Whether the connection handshake has been confirmed.
1528 handshake_confirmed: bool,
1529
1530 /// Key phase bit used for outgoing protected packets.
1531 key_phase: bool,
1532
1533 /// Whether an ack-eliciting packet has been sent since last receiving a
1534 /// packet.
1535 ack_eliciting_sent: bool,
1536
1537 /// Whether the connection is closed.
1538 closed: bool,
1539
1540 /// Whether the connection was timed out.
1541 timed_out: bool,
1542
1543 /// Whether to send GREASE.
1544 grease: bool,
1545
1546 /// Whether to send STREAMS_BLOCKED frames when bidi or uni stream quota
1547 /// exhausted.
1548 enable_send_streams_blocked: bool,
1549
1550 /// TLS keylog writer.
1551 keylog: Option<Box<dyn std::io::Write + Send + Sync>>,
1552
1553 #[cfg(feature = "qlog")]
1554 qlog: QlogInfo,
1555
1556 /// DATAGRAM queues.
1557 dgram_recv_queue: dgram::DatagramQueue<F>,
1558 dgram_send_queue: dgram::DatagramQueue<F>,
1559
1560 /// Whether to emit DATAGRAM frames in the next packet.
1561 emit_dgram: bool,
1562
1563 /// Whether the connection should prevent from reusing destination
1564 /// Connection IDs when the peer migrates.
1565 disable_dcid_reuse: bool,
1566
1567 /// The number of streams reset by local.
1568 reset_stream_local_count: u64,
1569
1570 /// The number of streams stopped by local.
1571 stopped_stream_local_count: u64,
1572
1573 /// The number of streams reset by remote.
1574 reset_stream_remote_count: u64,
1575
1576 /// The number of streams stopped by remote.
1577 stopped_stream_remote_count: u64,
1578
1579 /// The number of DATA_BLOCKED frames sent due to hitting the connection
1580 /// flow control limit.
1581 data_blocked_sent_count: u64,
1582
1583 /// The number of STREAM_DATA_BLOCKED frames sent due to a stream hitting
1584 /// the stream flow control limit.
1585 stream_data_blocked_sent_count: u64,
1586
1587 /// The number of DATA_BLOCKED frames received from the remote endpoint.
1588 data_blocked_recv_count: u64,
1589
1590 /// The number of STREAM_DATA_BLOCKED frames received from the remote
1591 /// endpoint.
1592 stream_data_blocked_recv_count: u64,
1593
1594 /// The number of STREAMS_BLOCKED frames received from the remote endpoint
1595 /// indicating the peer is blocked on opening new bidirectional streams.
1596 streams_blocked_bidi_recv_count: u64,
1597
1598 /// The number of STREAMS_BLOCKED frames received from the remote endpoint
1599 /// indicating the peer is blocked on opening new unidirectional streams.
1600 streams_blocked_uni_recv_count: u64,
1601
1602 /// The number of times send() was blocked because the anti-amplification
1603 /// budget (bytes received × max_amplification_factor) was exhausted.
1604 amplification_limited_count: u64,
1605
1606 /// Tracks if the connection hit the peer's bidi or uni stream limit, and if
1607 /// STREAMS_BLOCKED frames are pending transmission.
1608 streams_blocked_bidi_state: StreamsBlockedState,
1609 streams_blocked_uni_state: StreamsBlockedState,
1610
1611 /// The anti-amplification limit factor.
1612 max_amplification_factor: usize,
1613}
1614
1615/// Creates a new server-side connection.
1616///
1617/// The `scid` parameter represents the server's source connection ID, while
1618/// the optional `odcid` parameter represents the original destination ID the
1619/// client sent before a Retry packet (this is only required when using the
1620/// [`retry()`] function). See also the [`accept_with_retry()`] function for
1621/// more advanced retry cases.
1622///
1623/// [`retry()`]: fn.retry.html
1624///
1625/// ## Examples:
1626///
1627/// ```no_run
1628/// # let mut config = quiche::Config::new(0xbabababa)?;
1629/// # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
1630/// # let local = "127.0.0.1:0".parse().unwrap();
1631/// # let peer = "127.0.0.1:1234".parse().unwrap();
1632/// let conn = quiche::accept(&scid, None, local, peer, &mut config)?;
1633/// # Ok::<(), quiche::Error>(())
1634/// ```
1635#[inline(always)]
1636pub fn accept(
1637 scid: &ConnectionId, odcid: Option<&ConnectionId>, local: SocketAddr,
1638 peer: SocketAddr, config: &mut Config,
1639) -> Result<Connection> {
1640 accept_with_buf_factory(scid, odcid, local, peer, config)
1641}
1642
1643/// Creates a new server-side connection, with a custom buffer generation
1644/// method.
1645///
1646/// The buffers generated can be anything that can be drereferenced as a byte
1647/// slice. See [`accept`] and [`BufFactory`] for more info.
1648#[inline]
1649pub fn accept_with_buf_factory<F: BufFactory>(
1650 scid: &ConnectionId, odcid: Option<&ConnectionId>, local: SocketAddr,
1651 peer: SocketAddr, config: &mut Config,
1652) -> Result<Connection<F>> {
1653 // For connections with `odcid` set, we historically used `retry_source_cid =
1654 // scid`. Keep this behavior to preserve backwards compatibility.
1655 // `accept_with_retry` allows the SCIDs to be specified separately.
1656 let retry_cids = odcid.map(|odcid| RetryConnectionIds {
1657 original_destination_cid: odcid,
1658 retry_source_cid: scid,
1659 });
1660 Connection::new(scid, retry_cids, None, local, peer, config, true)
1661}
1662
1663/// A wrapper for connection IDs used in [`accept_with_retry`].
1664pub struct RetryConnectionIds<'a> {
1665 /// The DCID of the first Initial packet received by the server, which
1666 /// triggered the Retry packet.
1667 pub original_destination_cid: &'a ConnectionId<'a>,
1668 /// The SCID of the Retry packet sent by the server. This can be different
1669 /// from the new connection's SCID.
1670 pub retry_source_cid: &'a ConnectionId<'a>,
1671}
1672
1673/// Creates a new server-side connection after the client responded to a Retry
1674/// packet.
1675///
1676/// To generate a Retry packet in the first place, use the [`retry()`] function.
1677///
1678/// The `scid` parameter represents the server's source connection ID, which can
1679/// be freshly generated after the application has successfully verified the
1680/// Retry. `retry_cids` is used to tie the new connection to the Initial + Retry
1681/// exchange that preceded the connection's creation.
1682///
1683/// The DCID of the client's Initial packet is inherently untrusted data. It is
1684/// safe to use the DCID in the `retry_source_cid` field of the
1685/// `RetryConnectionIds` provided to this function. However, using the Initial's
1686/// DCID for the `scid` parameter carries risks. Applications are advised to
1687/// implement their own DCID validation steps before using the DCID in that
1688/// manner.
1689#[inline]
1690pub fn accept_with_retry<F: BufFactory>(
1691 scid: &ConnectionId, retry_cids: RetryConnectionIds, local: SocketAddr,
1692 peer: SocketAddr, config: &mut Config,
1693) -> Result<Connection<F>> {
1694 Connection::new(scid, Some(retry_cids), None, local, peer, config, true)
1695}
1696
1697/// Creates a new client-side connection.
1698///
1699/// The `scid` parameter is used as the connection's source connection ID,
1700/// while the optional `server_name` parameter is used to verify the peer's
1701/// certificate.
1702///
1703/// ## Examples:
1704///
1705/// ```no_run
1706/// # let mut config = quiche::Config::new(0xbabababa)?;
1707/// # let server_name = "quic.tech";
1708/// # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
1709/// # let local = "127.0.0.1:4321".parse().unwrap();
1710/// # let peer = "127.0.0.1:1234".parse().unwrap();
1711/// let conn =
1712/// quiche::connect(Some(&server_name), &scid, local, peer, &mut config)?;
1713/// # Ok::<(), quiche::Error>(())
1714/// ```
1715#[inline]
1716pub fn connect(
1717 server_name: Option<&str>, scid: &ConnectionId, local: SocketAddr,
1718 peer: SocketAddr, config: &mut Config,
1719) -> Result<Connection> {
1720 let mut conn = Connection::new(scid, None, None, local, peer, config, false)?;
1721
1722 if let Some(server_name) = server_name {
1723 conn.handshake.set_host_name(server_name)?;
1724 }
1725
1726 Ok(conn)
1727}
1728
1729/// Creates a new client-side connection using the given DCID initially.
1730///
1731/// Be aware that [RFC 9000] places requirements for unpredictability and length
1732/// on the client DCID field. This function is dangerous if these requirements
1733/// are not satisfied.
1734///
1735/// The `scid` parameter is used as the connection's source connection ID, while
1736/// the optional `server_name` parameter is used to verify the peer's
1737/// certificate.
1738///
1739/// [RFC 9000]: <https://datatracker.ietf.org/doc/html/rfc9000#section-7.2-3>
1740#[cfg(feature = "custom-client-dcid")]
1741#[cfg_attr(docsrs, doc(cfg(feature = "custom-client-dcid")))]
1742pub fn connect_with_dcid(
1743 server_name: Option<&str>, scid: &ConnectionId, dcid: &ConnectionId,
1744 local: SocketAddr, peer: SocketAddr, config: &mut Config,
1745) -> Result<Connection> {
1746 let mut conn =
1747 Connection::new(scid, None, Some(dcid), local, peer, config, false)?;
1748
1749 if let Some(server_name) = server_name {
1750 conn.handshake.set_host_name(server_name)?;
1751 }
1752
1753 Ok(conn)
1754}
1755
1756/// Creates a new client-side connection, with a custom buffer generation
1757/// method.
1758///
1759/// The buffers generated can be anything that can be drereferenced as a byte
1760/// slice. See [`connect`] and [`BufFactory`] for more info.
1761#[inline]
1762pub fn connect_with_buffer_factory<F: BufFactory>(
1763 server_name: Option<&str>, scid: &ConnectionId, local: SocketAddr,
1764 peer: SocketAddr, config: &mut Config,
1765) -> Result<Connection<F>> {
1766 let mut conn = Connection::new(scid, None, None, local, peer, config, false)?;
1767
1768 if let Some(server_name) = server_name {
1769 conn.handshake.set_host_name(server_name)?;
1770 }
1771
1772 Ok(conn)
1773}
1774
1775/// Creates a new client-side connection, with a custom buffer generation
1776/// method using the given dcid initially.
1777/// Be aware the RFC places requirements for unpredictability and length
1778/// on the client DCID field.
1779/// [`RFC9000`]: https://datatracker.ietf.org/doc/html/rfc9000#section-7.2-3
1780///
1781/// The buffers generated can be anything that can be drereferenced as a byte
1782/// slice. See [`connect`] and [`BufFactory`] for more info.
1783#[cfg(feature = "custom-client-dcid")]
1784#[cfg_attr(docsrs, doc(cfg(feature = "custom-client-dcid")))]
1785pub fn connect_with_dcid_and_buffer_factory<F: BufFactory>(
1786 server_name: Option<&str>, scid: &ConnectionId, dcid: &ConnectionId,
1787 local: SocketAddr, peer: SocketAddr, config: &mut Config,
1788) -> Result<Connection<F>> {
1789 let mut conn =
1790 Connection::new(scid, None, Some(dcid), local, peer, config, false)?;
1791
1792 if let Some(server_name) = server_name {
1793 conn.handshake.set_host_name(server_name)?;
1794 }
1795
1796 Ok(conn)
1797}
1798
1799/// Writes a version negotiation packet.
1800///
1801/// The `scid` and `dcid` parameters are the source connection ID and the
1802/// destination connection ID extracted from the received client's Initial
1803/// packet that advertises an unsupported version.
1804///
1805/// ## Examples:
1806///
1807/// ```no_run
1808/// # let mut buf = [0; 512];
1809/// # let mut out = [0; 512];
1810/// # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
1811/// let (len, src) = socket.recv_from(&mut buf).unwrap();
1812///
1813/// let hdr =
1814/// quiche::Header::from_slice(&mut buf[..len], quiche::MAX_CONN_ID_LEN)?;
1815///
1816/// if hdr.version != quiche::PROTOCOL_VERSION {
1817/// let len = quiche::negotiate_version(&hdr.scid, &hdr.dcid, &mut out)?;
1818/// socket.send_to(&out[..len], &src).unwrap();
1819/// }
1820/// # Ok::<(), quiche::Error>(())
1821/// ```
1822#[inline]
1823pub fn negotiate_version(
1824 scid: &ConnectionId, dcid: &ConnectionId, out: &mut [u8],
1825) -> Result<usize> {
1826 packet::negotiate_version(scid, dcid, out)
1827}
1828
1829/// Writes a stateless retry packet.
1830///
1831/// The `scid` and `dcid` parameters are the source connection ID and the
1832/// destination connection ID extracted from the received client's Initial
1833/// packet, while `new_scid` is the server's new source connection ID and
1834/// `token` is the address validation token the client needs to echo back.
1835///
1836/// The application is responsible for generating the address validation
1837/// token to be sent to the client, and verifying tokens sent back by the
1838/// client. The generated token should include the `dcid` parameter, such
1839/// that it can be later extracted from the token and passed to the
1840/// [`accept()`] function as its `odcid` parameter.
1841///
1842/// [`accept()`]: fn.accept.html
1843///
1844/// ## Examples:
1845///
1846/// ```no_run
1847/// # let mut config = quiche::Config::new(0xbabababa)?;
1848/// # let mut buf = [0; 512];
1849/// # let mut out = [0; 512];
1850/// # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
1851/// # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
1852/// # let local = socket.local_addr().unwrap();
1853/// # fn mint_token(hdr: &quiche::Header, src: &std::net::SocketAddr) -> Vec<u8> {
1854/// # vec![]
1855/// # }
1856/// # fn validate_token<'a>(src: &std::net::SocketAddr, token: &'a [u8]) -> Option<quiche::ConnectionId<'a>> {
1857/// # None
1858/// # }
1859/// let (len, peer) = socket.recv_from(&mut buf).unwrap();
1860///
1861/// let hdr = quiche::Header::from_slice(&mut buf[..len], quiche::MAX_CONN_ID_LEN)?;
1862///
1863/// let token = hdr.token.as_ref().unwrap();
1864///
1865/// // No token sent by client, create a new one.
1866/// if token.is_empty() {
1867/// let new_token = mint_token(&hdr, &peer);
1868///
1869/// let len = quiche::retry(
1870/// &hdr.scid, &hdr.dcid, &scid, &new_token, hdr.version, &mut out,
1871/// )?;
1872///
1873/// socket.send_to(&out[..len], &peer).unwrap();
1874/// return Ok(());
1875/// }
1876///
1877/// // Client sent token, validate it.
1878/// let odcid = validate_token(&peer, token);
1879///
1880/// if odcid.is_none() {
1881/// // Invalid address validation token.
1882/// return Ok(());
1883/// }
1884///
1885/// let conn = quiche::accept(&scid, odcid.as_ref(), local, peer, &mut config)?;
1886/// # Ok::<(), quiche::Error>(())
1887/// ```
1888#[inline]
1889pub fn retry(
1890 scid: &ConnectionId, dcid: &ConnectionId, new_scid: &ConnectionId,
1891 token: &[u8], version: u32, out: &mut [u8],
1892) -> Result<usize> {
1893 packet::retry(scid, dcid, new_scid, token, version, out)
1894}
1895
1896/// Returns true if the given protocol version is supported.
1897#[inline]
1898pub fn version_is_supported(version: u32) -> bool {
1899 matches!(version, PROTOCOL_VERSION_V1)
1900}
1901
1902/// Pushes a frame to the output packet if there is enough space.
1903///
1904/// Returns `true` on success, `false` otherwise. In case of failure it means
1905/// there is no room to add the frame in the packet. You may retry to add the
1906/// frame later.
1907macro_rules! push_frame_to_pkt {
1908 ($out:expr, $frames:expr, $frame:expr, $left:expr) => {{
1909 if $frame.wire_len() <= $left {
1910 $left -= $frame.wire_len();
1911
1912 $frame.to_bytes(&mut $out)?;
1913
1914 $frames.push($frame);
1915
1916 true
1917 } else {
1918 false
1919 }
1920 }};
1921}
1922
1923/// Executes the provided body if the qlog feature is enabled, quiche has been
1924/// configured with a log writer, the event's importance is within the
1925/// configured level.
1926macro_rules! qlog_with_type {
1927 ($ty:expr, $qlog:expr, $qlog_streamer_ref:ident, $body:block) => {{
1928 #[cfg(feature = "qlog")]
1929 {
1930 if EventImportance::from($ty).is_contained_in(&$qlog.level) {
1931 if let Some($qlog_streamer_ref) = &mut $qlog.streamer {
1932 $body
1933 }
1934 }
1935 }
1936 }};
1937}
1938
1939#[cfg(feature = "qlog")]
1940const QLOG_PARAMS_SET: EventType =
1941 EventType::QuicEventType(QuicEventType::ParametersSet);
1942
1943#[cfg(feature = "qlog")]
1944const QLOG_PACKET_RX: EventType =
1945 EventType::QuicEventType(QuicEventType::PacketReceived);
1946
1947#[cfg(feature = "qlog")]
1948const QLOG_PACKET_TX: EventType =
1949 EventType::QuicEventType(QuicEventType::PacketSent);
1950
1951#[cfg(feature = "qlog")]
1952const QLOG_DATA_MV: EventType =
1953 EventType::QuicEventType(QuicEventType::StreamDataMoved);
1954
1955#[cfg(feature = "qlog")]
1956const QLOG_METRICS: EventType =
1957 EventType::QuicEventType(QuicEventType::RecoveryMetricsUpdated);
1958
1959#[cfg(feature = "qlog")]
1960const QLOG_CONNECTION_CLOSED: EventType =
1961 EventType::QuicEventType(QuicEventType::ConnectionClosed);
1962
1963#[cfg(feature = "qlog")]
1964struct QlogInfo {
1965 streamer: Option<qlog::streamer::QlogStreamer>,
1966 logged_peer_params: bool,
1967 level: EventImportance,
1968}
1969
1970#[cfg(feature = "qlog")]
1971impl Default for QlogInfo {
1972 fn default() -> Self {
1973 QlogInfo {
1974 streamer: None,
1975 logged_peer_params: false,
1976 level: EventImportance::Base,
1977 }
1978 }
1979}
1980
1981impl<F: BufFactory> Connection<F> {
1982 fn new(
1983 scid: &ConnectionId, retry_cids: Option<RetryConnectionIds>,
1984 client_dcid: Option<&ConnectionId>, local: SocketAddr, peer: SocketAddr,
1985 config: &mut Config, is_server: bool,
1986 ) -> Result<Connection<F>> {
1987 let tls = config.tls_ctx.new_handshake()?;
1988 Connection::with_tls(
1989 scid,
1990 retry_cids,
1991 client_dcid,
1992 local,
1993 peer,
1994 config,
1995 tls,
1996 is_server,
1997 )
1998 }
1999
2000 #[allow(clippy::too_many_arguments)]
2001 fn with_tls(
2002 scid: &ConnectionId, retry_cids: Option<RetryConnectionIds>,
2003 client_dcid: Option<&ConnectionId>, local: SocketAddr, peer: SocketAddr,
2004 config: &Config, tls: tls::Handshake, is_server: bool,
2005 ) -> Result<Connection<F>> {
2006 if retry_cids.is_some() && client_dcid.is_some() {
2007 // These are exclusive, the caller should only specify one or the
2008 // other.
2009 return Err(Error::InvalidDcidInitialization);
2010 }
2011 #[cfg(feature = "custom-client-dcid")]
2012 if let Some(client_dcid) = client_dcid {
2013 // The Minimum length is 8.
2014 // See https://datatracker.ietf.org/doc/html/rfc9000#section-7.2-3
2015 if client_dcid.to_vec().len() < 8 {
2016 return Err(Error::InvalidDcidInitialization);
2017 }
2018 }
2019 #[cfg(not(feature = "custom-client-dcid"))]
2020 if client_dcid.is_some() {
2021 return Err(Error::InvalidDcidInitialization);
2022 }
2023
2024 let max_rx_data = config.local_transport_params.initial_max_data;
2025
2026 let scid_as_hex: Vec<String> =
2027 scid.iter().map(|b| format!("{b:02x}")).collect();
2028
2029 let reset_token = if is_server {
2030 config.local_transport_params.stateless_reset_token
2031 } else {
2032 None
2033 };
2034
2035 let recovery_config = recovery::RecoveryConfig::from_config(config);
2036
2037 let mut path = path::Path::new(
2038 local,
2039 peer,
2040 &recovery_config,
2041 config.path_challenge_recv_max_queue_len,
2042 true,
2043 Some(config),
2044 );
2045
2046 // If we sent a Retry assume the peer's address is verified.
2047 path.verified_peer_address = retry_cids.is_some();
2048 // Assume clients validate the server's address implicitly.
2049 path.peer_verified_local_address = is_server;
2050
2051 // Do not allocate more than the number of active CIDs.
2052 let paths = path::PathMap::new(
2053 path,
2054 config.local_transport_params.active_conn_id_limit as usize,
2055 is_server,
2056 );
2057
2058 let active_path_id = paths.get_active_path_id()?;
2059
2060 let ids = cid::ConnectionIdentifiers::new(
2061 config.local_transport_params.active_conn_id_limit as usize,
2062 scid,
2063 active_path_id,
2064 reset_token,
2065 );
2066
2067 let initial_flow_control_window =
2068 if config.use_initial_max_data_as_flow_control_win {
2069 max_rx_data
2070 } else {
2071 cmp::min(max_rx_data / 2 * 3, DEFAULT_CONNECTION_WINDOW)
2072 };
2073 let mut conn = Connection {
2074 version: config.version,
2075
2076 ids,
2077
2078 trace_id: scid_as_hex.join(""),
2079
2080 pkt_num_spaces: [
2081 packet::PktNumSpace::new(),
2082 packet::PktNumSpace::new(),
2083 packet::PktNumSpace::new(),
2084 ],
2085
2086 crypto_ctx: [
2087 packet::CryptoContext::new(),
2088 packet::CryptoContext::new(),
2089 packet::CryptoContext::new(),
2090 ],
2091
2092 next_pkt_num: 0,
2093
2094 pkt_num_manager: packet::PktNumManager::new(),
2095
2096 peer_transport_params: TransportParams::default(),
2097
2098 peer_transport_params_track_unknown: config
2099 .track_unknown_transport_params,
2100
2101 local_transport_params: config.local_transport_params.clone(),
2102
2103 handshake: tls,
2104
2105 session: None,
2106
2107 recovery_config,
2108
2109 paths,
2110 path_challenge_recv_max_queue_len: config
2111 .path_challenge_recv_max_queue_len,
2112 path_challenge_rx_count: 0,
2113
2114 application_protos: config.application_protos.clone(),
2115
2116 recv_count: 0,
2117 sent_count: 0,
2118 lost_count: 0,
2119 spurious_lost_count: 0,
2120 retrans_count: 0,
2121 dgram_sent_count: 0,
2122 dgram_recv_count: 0,
2123 sent_bytes: 0,
2124 recv_bytes: 0,
2125 acked_bytes: 0,
2126 lost_bytes: 0,
2127
2128 rx_data: 0,
2129 flow_control: flowcontrol::FlowControl::new(
2130 max_rx_data,
2131 initial_flow_control_window,
2132 config.max_connection_window,
2133 ),
2134 should_send_max_data: false,
2135 should_send_max_streams_bidi: false,
2136 should_send_max_streams_uni: false,
2137
2138 tx_cap: 0,
2139 tx_cap_factor: config.tx_cap_factor,
2140
2141 tx_buffered: 0,
2142 tx_buffered_state: TxBufferTrackingState::Ok,
2143
2144 tx_data: 0,
2145 max_tx_data: 0,
2146 last_tx_data: 0,
2147
2148 stream_retrans_bytes: 0,
2149
2150 streams: stream::StreamMap::new(
2151 config.local_transport_params.initial_max_streams_bidi,
2152 config.local_transport_params.initial_max_streams_uni,
2153 config.max_stream_window,
2154 ),
2155
2156 odcid: None,
2157
2158 rscid: None,
2159
2160 token: None,
2161
2162 local_error: None,
2163
2164 peer_error: None,
2165
2166 blocked_limit: None,
2167
2168 idle_timer: None,
2169
2170 draining_timer: None,
2171
2172 undecryptable_pkts: VecDeque::new(),
2173
2174 alpn: Vec::new(),
2175
2176 is_server,
2177
2178 derived_initial_secrets: false,
2179
2180 did_version_negotiation: false,
2181
2182 did_retry: false,
2183
2184 got_peer_conn_id: false,
2185
2186 // Assume clients validate the server's address implicitly.
2187 peer_verified_initial_address: is_server,
2188
2189 parsed_peer_transport_params: false,
2190
2191 handshake_completed: false,
2192
2193 handshake_done_sent: false,
2194 handshake_done_acked: false,
2195
2196 handshake_confirmed: false,
2197
2198 key_phase: false,
2199
2200 ack_eliciting_sent: false,
2201
2202 closed: false,
2203
2204 timed_out: false,
2205
2206 grease: config.grease,
2207
2208 enable_send_streams_blocked: config.enable_send_streams_blocked,
2209
2210 keylog: None,
2211
2212 #[cfg(feature = "qlog")]
2213 qlog: Default::default(),
2214
2215 dgram_recv_queue: dgram::DatagramQueue::new(
2216 config.dgram_recv_max_queue_len,
2217 ),
2218
2219 dgram_send_queue: dgram::DatagramQueue::new(
2220 config.dgram_send_max_queue_len,
2221 ),
2222
2223 emit_dgram: true,
2224
2225 disable_dcid_reuse: config.disable_dcid_reuse,
2226
2227 reset_stream_local_count: 0,
2228 stopped_stream_local_count: 0,
2229 reset_stream_remote_count: 0,
2230 stopped_stream_remote_count: 0,
2231
2232 data_blocked_sent_count: 0,
2233 stream_data_blocked_sent_count: 0,
2234 data_blocked_recv_count: 0,
2235 stream_data_blocked_recv_count: 0,
2236
2237 streams_blocked_bidi_recv_count: 0,
2238 streams_blocked_uni_recv_count: 0,
2239
2240 amplification_limited_count: 0,
2241
2242 streams_blocked_bidi_state: Default::default(),
2243 streams_blocked_uni_state: Default::default(),
2244
2245 max_amplification_factor: config.max_amplification_factor,
2246 };
2247 conn.streams.set_use_initial_max_data_as_flow_control_win(
2248 config.use_initial_max_data_as_flow_control_win,
2249 );
2250
2251 if let Some(retry_cids) = retry_cids {
2252 conn.local_transport_params
2253 .original_destination_connection_id =
2254 Some(retry_cids.original_destination_cid.to_vec().into());
2255
2256 conn.local_transport_params.retry_source_connection_id =
2257 Some(retry_cids.retry_source_cid.to_vec().into());
2258
2259 conn.did_retry = true;
2260 }
2261
2262 conn.local_transport_params.initial_source_connection_id =
2263 Some(conn.ids.get_scid(0)?.cid.to_vec().into());
2264
2265 conn.handshake.init(is_server)?;
2266
2267 conn.handshake
2268 .use_legacy_codepoint(config.version != PROTOCOL_VERSION_V1);
2269
2270 conn.encode_transport_params()?;
2271
2272 if !is_server {
2273 let dcid = if let Some(client_dcid) = client_dcid {
2274 // We already had an dcid generated for us, use it.
2275 client_dcid.to_vec()
2276 } else {
2277 // Derive initial secrets for the client. We can do this here
2278 // because we already generated the random
2279 // destination connection ID.
2280 let mut dcid = [0; 16];
2281 rand::rand_bytes(&mut dcid[..]);
2282 dcid.to_vec()
2283 };
2284
2285 let (aead_open, aead_seal) = crypto::derive_initial_key_material(
2286 &dcid,
2287 conn.version,
2288 conn.is_server,
2289 false,
2290 )?;
2291
2292 let reset_token = conn.peer_transport_params.stateless_reset_token;
2293 conn.set_initial_dcid(
2294 dcid.to_vec().into(),
2295 reset_token,
2296 active_path_id,
2297 )?;
2298
2299 conn.crypto_ctx[packet::Epoch::Initial].crypto_open = Some(aead_open);
2300 conn.crypto_ctx[packet::Epoch::Initial].crypto_seal = Some(aead_seal);
2301
2302 conn.derived_initial_secrets = true;
2303 }
2304
2305 Ok(conn)
2306 }
2307
2308 /// Sets keylog output to the designated [`Writer`].
2309 ///
2310 /// This needs to be called as soon as the connection is created, to avoid
2311 /// missing some early logs.
2312 ///
2313 /// [`Writer`]: https://doc.rust-lang.org/std/io/trait.Write.html
2314 #[inline]
2315 pub fn set_keylog(&mut self, writer: Box<dyn std::io::Write + Send + Sync>) {
2316 self.keylog = Some(writer);
2317 }
2318
2319 /// Sets qlog output to the designated [`Writer`].
2320 ///
2321 /// Only events included in `QlogLevel::Base` are written. The serialization
2322 /// format is JSON-SEQ.
2323 ///
2324 /// This needs to be called as soon as the connection is created, to avoid
2325 /// missing some early logs.
2326 ///
2327 /// [`Writer`]: https://doc.rust-lang.org/std/io/trait.Write.html
2328 #[cfg(feature = "qlog")]
2329 #[cfg_attr(docsrs, doc(cfg(feature = "qlog")))]
2330 pub fn set_qlog(
2331 &mut self, writer: Box<dyn std::io::Write + Send + Sync>, title: String,
2332 description: String,
2333 ) {
2334 self.set_qlog_with_level(writer, title, description, QlogLevel::Base)
2335 }
2336
2337 /// Sets qlog output to the designated [`Writer`].
2338 ///
2339 /// Only qlog events included in the specified `QlogLevel` are written. The
2340 /// serialization format is JSON-SEQ.
2341 ///
2342 /// This needs to be called as soon as the connection is created, to avoid
2343 /// missing some early logs.
2344 ///
2345 /// [`Writer`]: https://doc.rust-lang.org/std/io/trait.Write.html
2346 #[cfg(feature = "qlog")]
2347 #[cfg_attr(docsrs, doc(cfg(feature = "qlog")))]
2348 pub fn set_qlog_with_level(
2349 &mut self, writer: Box<dyn std::io::Write + Send + Sync>, title: String,
2350 description: String, qlog_level: QlogLevel,
2351 ) {
2352 use qlog::events::quic::TransportInitiator;
2353 use qlog::events::HTTP3_URI;
2354 use qlog::events::QUIC_URI;
2355 use qlog::CommonFields;
2356 use qlog::ReferenceTime;
2357
2358 let vp = if self.is_server {
2359 qlog::VantagePointType::Server
2360 } else {
2361 qlog::VantagePointType::Client
2362 };
2363
2364 let level = match qlog_level {
2365 QlogLevel::Core => EventImportance::Core,
2366
2367 QlogLevel::Base => EventImportance::Base,
2368
2369 QlogLevel::Extra => EventImportance::Extra,
2370 };
2371
2372 self.qlog.level = level;
2373
2374 // Best effort to get Instant::now() and SystemTime::now() as closely
2375 // together as possible.
2376 let now = Instant::now();
2377 let now_wall_clock = std::time::SystemTime::now();
2378 let common_fields = CommonFields {
2379 reference_time: ReferenceTime::new_monotonic(Some(now_wall_clock)),
2380 ..Default::default()
2381 };
2382 let trace = qlog::TraceSeq::new(
2383 Some(title.to_string()),
2384 Some(description.to_string()),
2385 Some(common_fields),
2386 Some(qlog::VantagePoint {
2387 name: None,
2388 ty: vp,
2389 flow: None,
2390 }),
2391 vec![QUIC_URI.to_string(), HTTP3_URI.to_string()],
2392 );
2393
2394 let mut streamer = qlog::streamer::QlogStreamer::new(
2395 Some(title),
2396 Some(description),
2397 now,
2398 trace,
2399 self.qlog.level,
2400 qlog::streamer::EventTimePrecision::MicroSeconds,
2401 writer,
2402 );
2403
2404 streamer.start_log().ok();
2405
2406 let ev_data = self
2407 .local_transport_params
2408 .to_qlog(TransportInitiator::Local, self.handshake.cipher());
2409
2410 // This event occurs very early, so just mark the relative time as 0.0.
2411 streamer.add_event(Event::with_time(0.0, ev_data)).ok();
2412
2413 self.qlog.streamer = Some(streamer);
2414 }
2415
2416 /// Returns a mutable reference to the QlogStreamer, if it exists.
2417 #[cfg(feature = "qlog")]
2418 #[cfg_attr(docsrs, doc(cfg(feature = "qlog")))]
2419 pub fn qlog_streamer(&mut self) -> Option<&mut qlog::streamer::QlogStreamer> {
2420 self.qlog.streamer.as_mut()
2421 }
2422
2423 /// Configures the given session for resumption.
2424 ///
2425 /// On the client, this can be used to offer the given serialized session,
2426 /// as returned by [`session()`], for resumption.
2427 ///
2428 /// This must only be called immediately after creating a connection, that
2429 /// is, before any packet is sent or received.
2430 ///
2431 /// [`session()`]: struct.Connection.html#method.session
2432 #[inline]
2433 pub fn set_session(&mut self, session: &[u8]) -> Result<()> {
2434 let mut b = octets::Octets::with_slice(session);
2435
2436 let session_len = b.get_u64()? as usize;
2437 let session_bytes = b.get_bytes(session_len)?;
2438
2439 self.handshake.set_session(session_bytes.as_ref())?;
2440
2441 let raw_params_len = b.get_u64()? as usize;
2442 let raw_params_bytes = b.get_bytes(raw_params_len)?;
2443
2444 let peer_params = TransportParams::decode(
2445 raw_params_bytes.as_ref(),
2446 self.is_server,
2447 self.peer_transport_params_track_unknown,
2448 )?;
2449
2450 self.process_peer_transport_params(peer_params)?;
2451
2452 Ok(())
2453 }
2454
2455 /// Sets the `max_idle_timeout` transport parameter, in milliseconds.
2456 ///
2457 /// This must only be called immediately after creating a connection, that
2458 /// is, before any packet is sent or received.
2459 ///
2460 /// The default value is infinite, that is, no timeout is used unless
2461 /// already configured when creating the connection.
2462 pub fn set_max_idle_timeout(&mut self, v: u64) -> Result<()> {
2463 self.local_transport_params.max_idle_timeout =
2464 cmp::min(v, octets::MAX_VAR_INT);
2465
2466 self.encode_transport_params()
2467 }
2468
2469 /// Sets the congestion control algorithm used.
2470 ///
2471 /// This function can only be called inside one of BoringSSL's handshake
2472 /// callbacks, before any packet has been sent. Calling this function any
2473 /// other time will have no effect.
2474 ///
2475 /// See [`Config::set_cc_algorithm()`].
2476 ///
2477 /// [`Config::set_cc_algorithm()`]: struct.Config.html#method.set_cc_algorithm
2478 #[cfg(feature = "boringssl-boring-crate")]
2479 #[cfg_attr(docsrs, doc(cfg(feature = "boringssl-boring-crate")))]
2480 pub fn set_cc_algorithm_in_handshake(
2481 ssl: &mut boring::ssl::SslRef, algo: CongestionControlAlgorithm,
2482 ) -> Result<()> {
2483 let ex_data = tls::ExData::from_ssl_ref(ssl).ok_or(Error::TlsFail)?;
2484
2485 ex_data.recovery_config.cc_algorithm = algo;
2486
2487 Ok(())
2488 }
2489
2490 /// Sets custom BBR settings.
2491 ///
2492 /// This API is experimental and will be removed in the future.
2493 ///
2494 /// Currently this only applies if cc_algorithm is
2495 /// `CongestionControlAlgorithm::Bbr2Gcongestion` is set.
2496 ///
2497 /// This function can only be called inside one of BoringSSL's handshake
2498 /// callbacks, before any packet has been sent. Calling this function any
2499 /// other time will have no effect.
2500 ///
2501 /// See [`Config::set_custom_bbr_settings()`].
2502 ///
2503 /// [`Config::set_custom_bbr_settings()`]: struct.Config.html#method.set_custom_bbr_settings
2504 #[cfg(all(feature = "boringssl-boring-crate", feature = "internal"))]
2505 #[cfg_attr(docsrs, doc(cfg(feature = "boringssl-boring-crate")))]
2506 #[doc(hidden)]
2507 pub fn set_custom_bbr_settings_in_handshake(
2508 ssl: &mut boring::ssl::SslRef, custom_bbr_params: BbrParams,
2509 ) -> Result<()> {
2510 let ex_data = tls::ExData::from_ssl_ref(ssl).ok_or(Error::TlsFail)?;
2511
2512 ex_data.recovery_config.custom_bbr_params = Some(custom_bbr_params);
2513
2514 Ok(())
2515 }
2516
2517 /// Sets the congestion control algorithm used by string.
2518 ///
2519 /// This function can only be called inside one of BoringSSL's handshake
2520 /// callbacks, before any packet has been sent. Calling this function any
2521 /// other time will have no effect.
2522 ///
2523 /// See [`Config::set_cc_algorithm_name()`].
2524 ///
2525 /// [`Config::set_cc_algorithm_name()`]: struct.Config.html#method.set_cc_algorithm_name
2526 #[cfg(feature = "boringssl-boring-crate")]
2527 #[cfg_attr(docsrs, doc(cfg(feature = "boringssl-boring-crate")))]
2528 pub fn set_cc_algorithm_name_in_handshake(
2529 ssl: &mut boring::ssl::SslRef, name: &str,
2530 ) -> Result<()> {
2531 let cc_algo = CongestionControlAlgorithm::from_str(name)?;
2532 Self::set_cc_algorithm_in_handshake(ssl, cc_algo)
2533 }
2534
2535 /// Sets initial congestion window size in terms of packet count.
2536 ///
2537 /// This function can only be called inside one of BoringSSL's handshake
2538 /// callbacks, before any packet has been sent. Calling this function any
2539 /// other time will have no effect.
2540 ///
2541 /// See [`Config::set_initial_congestion_window_packets()`].
2542 ///
2543 /// [`Config::set_initial_congestion_window_packets()`]: struct.Config.html#method.set_initial_congestion_window_packets
2544 #[cfg(feature = "boringssl-boring-crate")]
2545 #[cfg_attr(docsrs, doc(cfg(feature = "boringssl-boring-crate")))]
2546 pub fn set_initial_congestion_window_packets_in_handshake(
2547 ssl: &mut boring::ssl::SslRef, packets: usize,
2548 ) -> Result<()> {
2549 let ex_data = tls::ExData::from_ssl_ref(ssl).ok_or(Error::TlsFail)?;
2550
2551 ex_data.recovery_config.initial_congestion_window_packets = packets;
2552
2553 Ok(())
2554 }
2555
2556 /// Configure whether to enable relaxed loss detection on spurious loss.
2557 ///
2558 /// This function can only be called inside one of BoringSSL's handshake
2559 /// callbacks, before any packet has been sent. Calling this function any
2560 /// other time will have no effect.
2561 ///
2562 /// See [`Config::set_enable_relaxed_loss_threshold()`].
2563 ///
2564 /// [`Config::set_enable_relaxed_loss_threshold()`]: struct.Config.html#method.set_enable_relaxed_loss_threshold
2565 #[cfg(feature = "boringssl-boring-crate")]
2566 #[cfg_attr(docsrs, doc(cfg(feature = "boringssl-boring-crate")))]
2567 pub fn set_enable_relaxed_loss_threshold_in_handshake(
2568 ssl: &mut boring::ssl::SslRef, enable: bool,
2569 ) -> Result<()> {
2570 let ex_data = tls::ExData::from_ssl_ref(ssl).ok_or(Error::TlsFail)?;
2571
2572 ex_data.recovery_config.enable_relaxed_loss_threshold = enable;
2573
2574 Ok(())
2575 }
2576
2577 /// Configure whether to enable the CUBIC idle restart fix.
2578 ///
2579 /// This function can only be called inside one of BoringSSL's handshake
2580 /// callbacks, before any packet has been sent. Calling this function any
2581 /// other time will have no effect.
2582 ///
2583 /// See [`Config::set_enable_cubic_idle_restart_fix()`].
2584 ///
2585 /// [`Config::set_enable_cubic_idle_restart_fix()`]: struct.Config.html#method.set_enable_cubic_idle_restart_fix
2586 #[cfg(feature = "boringssl-boring-crate")]
2587 #[cfg_attr(docsrs, doc(cfg(feature = "boringssl-boring-crate")))]
2588 pub fn set_enable_cubic_idle_restart_fix_in_handshake(
2589 ssl: &mut boring::ssl::SslRef, enable: bool,
2590 ) -> Result<()> {
2591 let ex_data = tls::ExData::from_ssl_ref(ssl).ok_or(Error::TlsFail)?;
2592
2593 ex_data.recovery_config.enable_cubic_idle_restart_fix = enable;
2594
2595 Ok(())
2596 }
2597
2598 /// Configures whether to enable HyStart++.
2599 ///
2600 /// This function can only be called inside one of BoringSSL's handshake
2601 /// callbacks, before any packet has been sent. Calling this function any
2602 /// other time will have no effect.
2603 ///
2604 /// See [`Config::enable_hystart()`].
2605 ///
2606 /// [`Config::enable_hystart()`]: struct.Config.html#method.enable_hystart
2607 #[cfg(feature = "boringssl-boring-crate")]
2608 #[cfg_attr(docsrs, doc(cfg(feature = "boringssl-boring-crate")))]
2609 pub fn set_hystart_in_handshake(
2610 ssl: &mut boring::ssl::SslRef, v: bool,
2611 ) -> Result<()> {
2612 let ex_data = tls::ExData::from_ssl_ref(ssl).ok_or(Error::TlsFail)?;
2613
2614 ex_data.recovery_config.hystart = v;
2615
2616 Ok(())
2617 }
2618
2619 /// Configures whether to enable pacing.
2620 ///
2621 /// This function can only be called inside one of BoringSSL's handshake
2622 /// callbacks, before any packet has been sent. Calling this function any
2623 /// other time will have no effect.
2624 ///
2625 /// See [`Config::enable_pacing()`].
2626 ///
2627 /// [`Config::enable_pacing()`]: struct.Config.html#method.enable_pacing
2628 #[cfg(feature = "boringssl-boring-crate")]
2629 #[cfg_attr(docsrs, doc(cfg(feature = "boringssl-boring-crate")))]
2630 pub fn set_pacing_in_handshake(
2631 ssl: &mut boring::ssl::SslRef, v: bool,
2632 ) -> Result<()> {
2633 let ex_data = tls::ExData::from_ssl_ref(ssl).ok_or(Error::TlsFail)?;
2634
2635 ex_data.recovery_config.pacing = v;
2636
2637 Ok(())
2638 }
2639
2640 /// Sets the max value for pacing rate.
2641 ///
2642 /// This function can only be called inside one of BoringSSL's handshake
2643 /// callbacks, before any packet has been sent. Calling this function any
2644 /// other time will have no effect.
2645 ///
2646 /// See [`Config::set_max_pacing_rate()`].
2647 ///
2648 /// [`Config::set_max_pacing_rate()`]: struct.Config.html#method.set_max_pacing_rate
2649 #[cfg(feature = "boringssl-boring-crate")]
2650 #[cfg_attr(docsrs, doc(cfg(feature = "boringssl-boring-crate")))]
2651 pub fn set_max_pacing_rate_in_handshake(
2652 ssl: &mut boring::ssl::SslRef, v: Option<u64>,
2653 ) -> Result<()> {
2654 let ex_data = tls::ExData::from_ssl_ref(ssl).ok_or(Error::TlsFail)?;
2655
2656 ex_data.recovery_config.max_pacing_rate = v;
2657
2658 Ok(())
2659 }
2660
2661 /// Sets the maximum outgoing UDP payload size.
2662 ///
2663 /// This function can only be called inside one of BoringSSL's handshake
2664 /// callbacks, before any packet has been sent. Calling this function any
2665 /// other time will have no effect.
2666 ///
2667 /// See [`Config::set_max_send_udp_payload_size()`].
2668 ///
2669 /// [`Config::set_max_send_udp_payload_size()`]: struct.Config.html#method.set_max_send_udp_payload_size
2670 #[cfg(feature = "boringssl-boring-crate")]
2671 #[cfg_attr(docsrs, doc(cfg(feature = "boringssl-boring-crate")))]
2672 pub fn set_max_send_udp_payload_size_in_handshake(
2673 ssl: &mut boring::ssl::SslRef, v: usize,
2674 ) -> Result<()> {
2675 let ex_data = tls::ExData::from_ssl_ref(ssl).ok_or(Error::TlsFail)?;
2676
2677 ex_data.recovery_config.max_send_udp_payload_size = v;
2678
2679 Ok(())
2680 }
2681
2682 /// Sets the send capacity factor.
2683 ///
2684 /// This function can only be called inside one of BoringSSL's handshake
2685 /// callbacks, before any packet has been sent. Calling this function any
2686 /// other time will have no effect.
2687 ///
2688 /// See [`Config::set_send_capacity_factor()`].
2689 ///
2690 /// [`Config::set_max_send_udp_payload_size()`]: struct.Config.html#method.set_send_capacity_factor
2691 #[cfg(feature = "boringssl-boring-crate")]
2692 #[cfg_attr(docsrs, doc(cfg(feature = "boringssl-boring-crate")))]
2693 pub fn set_send_capacity_factor_in_handshake(
2694 ssl: &mut boring::ssl::SslRef, v: f64,
2695 ) -> Result<()> {
2696 let ex_data = tls::ExData::from_ssl_ref(ssl).ok_or(Error::TlsFail)?;
2697
2698 ex_data.tx_cap_factor = v;
2699
2700 Ok(())
2701 }
2702
2703 /// Configures whether to do path MTU discovery.
2704 ///
2705 /// This function can only be called inside one of BoringSSL's handshake
2706 /// callbacks, before any packet has been sent. Calling this function any
2707 /// other time will have no effect.
2708 ///
2709 /// See [`Config::discover_pmtu()`].
2710 ///
2711 /// [`Config::discover_pmtu()`]: struct.Config.html#method.discover_pmtu
2712 #[cfg(feature = "boringssl-boring-crate")]
2713 #[cfg_attr(docsrs, doc(cfg(feature = "boringssl-boring-crate")))]
2714 pub fn set_discover_pmtu_in_handshake(
2715 ssl: &mut boring::ssl::SslRef, discover: bool, max_probes: u8,
2716 ) -> Result<()> {
2717 let ex_data = tls::ExData::from_ssl_ref(ssl).ok_or(Error::TlsFail)?;
2718
2719 ex_data.pmtud = Some((discover, max_probes));
2720
2721 Ok(())
2722 }
2723
2724 /// Sets the `max_idle_timeout` transport parameter, in milliseconds.
2725 ///
2726 /// This function can only be called inside one of BoringSSL's handshake
2727 /// callbacks, before any packet has been sent. Calling this function any
2728 /// other time will have no effect.
2729 ///
2730 /// See [`Config::set_max_idle_timeout()`].
2731 ///
2732 /// [`Config::set_max_idle_timeout()`]: struct.Config.html#method.set_max_idle_timeout
2733 #[cfg(feature = "boringssl-boring-crate")]
2734 #[cfg_attr(docsrs, doc(cfg(feature = "boringssl-boring-crate")))]
2735 pub fn set_max_idle_timeout_in_handshake(
2736 ssl: &mut boring::ssl::SslRef, v: u64,
2737 ) -> Result<()> {
2738 let ex_data = tls::ExData::from_ssl_ref(ssl).ok_or(Error::TlsFail)?;
2739
2740 ex_data.local_transport_params.max_idle_timeout = v;
2741
2742 Self::set_transport_parameters_in_hanshake(
2743 ex_data.local_transport_params.clone(),
2744 ex_data.is_server,
2745 ssl,
2746 )
2747 }
2748
2749 /// Sets the `initial_max_streams_bidi` transport parameter.
2750 ///
2751 /// This function can only be called inside one of BoringSSL's handshake
2752 /// callbacks, before any packet has been sent. Calling this function any
2753 /// other time will have no effect.
2754 ///
2755 /// See [`Config::set_initial_max_streams_bidi()`].
2756 ///
2757 /// [`Config::set_initial_max_streams_bidi()`]: struct.Config.html#method.set_initial_max_streams_bidi
2758 #[cfg(feature = "boringssl-boring-crate")]
2759 #[cfg_attr(docsrs, doc(cfg(feature = "boringssl-boring-crate")))]
2760 pub fn set_initial_max_streams_bidi_in_handshake(
2761 ssl: &mut boring::ssl::SslRef, v: u64,
2762 ) -> Result<()> {
2763 let ex_data = tls::ExData::from_ssl_ref(ssl).ok_or(Error::TlsFail)?;
2764
2765 ex_data.local_transport_params.initial_max_streams_bidi = v;
2766
2767 Self::set_transport_parameters_in_hanshake(
2768 ex_data.local_transport_params.clone(),
2769 ex_data.is_server,
2770 ssl,
2771 )
2772 }
2773
2774 #[cfg(feature = "boringssl-boring-crate")]
2775 fn set_transport_parameters_in_hanshake(
2776 params: TransportParams, is_server: bool, ssl: &mut boring::ssl::SslRef,
2777 ) -> Result<()> {
2778 use foreign_types_shared::ForeignTypeRef;
2779
2780 // In order to apply the new parameter to the TLS state before TPs are
2781 // written into a TLS message, we need to re-encode all TPs immediately.
2782 //
2783 // Since we don't have direct access to the main `Connection` object, we
2784 // need to re-create the `Handshake` state from the `SslRef`.
2785 //
2786 // SAFETY: the `Handshake` object must not be drop()ed, otherwise it
2787 // would free the underlying BoringSSL structure.
2788 let mut handshake =
2789 unsafe { tls::Handshake::from_ptr(ssl.as_ptr() as _) };
2790 handshake.set_quic_transport_params(¶ms, is_server)?;
2791
2792 // Avoid running `drop(handshake)` as that would free the underlying
2793 // handshake state.
2794 std::mem::forget(handshake);
2795
2796 Ok(())
2797 }
2798
2799 /// Sets the `use_initial_max_data_as_flow_control_win` flag during SSL
2800 /// handshake.
2801 ///
2802 /// This function can only be called inside one of BoringSSL's handshake
2803 /// callbacks, before any packet has been sent. Calling this function any
2804 /// other time will have no effect.
2805 ///
2806 /// See [`Connection::enable_use_initial_max_data_as_flow_control_win()`].
2807 #[cfg(feature = "boringssl-boring-crate")]
2808 #[cfg_attr(docsrs, doc(cfg(feature = "boringssl-boring-crate")))]
2809 pub fn set_use_initial_max_data_as_flow_control_win_in_handshake(
2810 ssl: &mut boring::ssl::SslRef,
2811 ) -> Result<()> {
2812 let ex_data = tls::ExData::from_ssl_ref(ssl).ok_or(Error::TlsFail)?;
2813
2814 ex_data.use_initial_max_data_as_flow_control_win = true;
2815 Ok(())
2816 }
2817
2818 /// Processes QUIC packets received from the peer.
2819 ///
2820 /// On success the number of bytes processed from the input buffer is
2821 /// returned. On error the connection will be closed by calling [`close()`]
2822 /// with the appropriate error code.
2823 ///
2824 /// Coalesced packets will be processed as necessary.
2825 ///
2826 /// Note that the contents of the input buffer `buf` might be modified by
2827 /// this function due to, for example, in-place decryption.
2828 ///
2829 /// [`close()`]: struct.Connection.html#method.close
2830 ///
2831 /// ## Examples:
2832 ///
2833 /// ```no_run
2834 /// # let mut buf = [0; 512];
2835 /// # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
2836 /// # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
2837 /// # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
2838 /// # let peer = "127.0.0.1:1234".parse().unwrap();
2839 /// # let local = socket.local_addr().unwrap();
2840 /// # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
2841 /// loop {
2842 /// let (read, from) = socket.recv_from(&mut buf).unwrap();
2843 ///
2844 /// let recv_info = quiche::RecvInfo {
2845 /// from,
2846 /// to: local,
2847 /// };
2848 ///
2849 /// let read = match conn.recv(&mut buf[..read], recv_info) {
2850 /// Ok(v) => v,
2851 ///
2852 /// Err(e) => {
2853 /// // An error occurred, handle it.
2854 /// break;
2855 /// },
2856 /// };
2857 /// }
2858 /// # Ok::<(), quiche::Error>(())
2859 /// ```
2860 pub fn recv(&mut self, buf: &mut [u8], info: RecvInfo) -> Result<usize> {
2861 let len = buf.len();
2862
2863 if len == 0 {
2864 return Err(Error::BufferTooShort);
2865 }
2866
2867 let recv_pid = self.paths.path_id_from_addrs(&(info.to, info.from));
2868
2869 if let Some(recv_pid) = recv_pid {
2870 let recv_path = self.paths.get_mut(recv_pid)?;
2871
2872 // Keep track of how many bytes we received from the client, so we
2873 // can limit bytes sent back before address validation, to a
2874 // multiple of this. The limit needs to be increased early on, so
2875 // that if there is an error there is enough credit to send a
2876 // CONNECTION_CLOSE.
2877 //
2878 // It doesn't matter if the packets received were valid or not, we
2879 // only need to track the total amount of bytes received.
2880 //
2881 // Note that we also need to limit the number of bytes we sent on a
2882 // path if we are not the host that initiated its usage.
2883 if self.is_server && !recv_path.verified_peer_address {
2884 recv_path.max_send_bytes += len * self.max_amplification_factor;
2885 }
2886 } else if !self.is_server {
2887 // If a client receives packets from an unknown server address,
2888 // the client MUST discard these packets.
2889 trace!(
2890 "{} client received packet from unknown address {:?}, dropping",
2891 self.trace_id,
2892 info,
2893 );
2894
2895 return Ok(len);
2896 }
2897
2898 let mut done = 0;
2899 let mut left = len;
2900
2901 // Process coalesced packets.
2902 while left > 0 {
2903 let read = match self.recv_single(
2904 &mut buf[len - left..len],
2905 &info,
2906 recv_pid,
2907 ) {
2908 Ok(v) => v,
2909
2910 Err(Error::Done) => {
2911 // If the packet can't be processed or decrypted, check if
2912 // it's a stateless reset.
2913 if self.is_stateless_reset(&buf[len - left..len]) {
2914 trace!("{} packet is a stateless reset", self.trace_id);
2915
2916 self.mark_closed();
2917 }
2918
2919 left
2920 },
2921
2922 Err(e) => {
2923 // In case of error processing the incoming packet, close
2924 // the connection.
2925 self.close(false, e.to_wire(), b"").ok();
2926 return Err(e);
2927 },
2928 };
2929
2930 done += read;
2931 left -= read;
2932 }
2933
2934 // Even though the packet was previously "accepted", it
2935 // should be safe to forward the error, as it also comes
2936 // from the `recv()` method.
2937 self.process_undecrypted_0rtt_packets()?;
2938
2939 Ok(done)
2940 }
2941
2942 fn process_undecrypted_0rtt_packets(&mut self) -> Result<()> {
2943 // Process previously undecryptable 0-RTT packets if the decryption key
2944 // is now available.
2945 if self.crypto_ctx[packet::Epoch::Application]
2946 .crypto_0rtt_open
2947 .is_some()
2948 {
2949 while let Some((mut pkt, info)) = self.undecryptable_pkts.pop_front()
2950 {
2951 if let Err(e) = self.recv(&mut pkt, info) {
2952 self.undecryptable_pkts.clear();
2953
2954 return Err(e);
2955 }
2956 }
2957 }
2958 Ok(())
2959 }
2960
2961 /// Returns true if a QUIC packet is a stateless reset.
2962 fn is_stateless_reset(&self, buf: &[u8]) -> bool {
2963 // If the packet is too small, then we just throw it away.
2964 let buf_len = buf.len();
2965 if buf_len < 21 {
2966 return false;
2967 }
2968
2969 // TODO: we should iterate over all active destination connection IDs
2970 // and check against their reset token.
2971 match self.peer_transport_params.stateless_reset_token {
2972 Some(token) => {
2973 let token_len = 16;
2974
2975 crypto::verify_slices_are_equal(
2976 &token.to_be_bytes(),
2977 &buf[buf_len - token_len..buf_len],
2978 )
2979 .is_ok()
2980 },
2981
2982 None => false,
2983 }
2984 }
2985
2986 /// Processes a single QUIC packet received from the peer.
2987 ///
2988 /// On success the number of bytes processed from the input buffer is
2989 /// returned. When the [`Done`] error is returned, processing of the
2990 /// remainder of the incoming UDP datagram should be interrupted.
2991 ///
2992 /// Note that a server might observe a new 4-tuple, preventing to
2993 /// know in advance to which path the incoming packet belongs to (`recv_pid`
2994 /// is `None`). As a client, packets from unknown 4-tuple are dropped
2995 /// beforehand (see `recv()`).
2996 ///
2997 /// On error, an error other than [`Done`] is returned.
2998 ///
2999 /// [`Done`]: enum.Error.html#variant.Done
3000 fn recv_single(
3001 &mut self, buf: &mut [u8], info: &RecvInfo, recv_pid: Option<usize>,
3002 ) -> Result<usize> {
3003 let now = Instant::now();
3004
3005 if buf.is_empty() {
3006 return Err(Error::Done);
3007 }
3008
3009 if self.is_closed() || self.is_draining() {
3010 return Err(Error::Done);
3011 }
3012
3013 let is_closing = self.local_error.is_some();
3014
3015 if is_closing {
3016 return Err(Error::Done);
3017 }
3018
3019 let buf_len = buf.len();
3020
3021 let mut b = octets::OctetsMut::with_slice(buf);
3022
3023 let mut hdr = Header::from_bytes(&mut b, self.source_id().len())
3024 .map_err(|e| {
3025 drop_pkt_on_err(
3026 e,
3027 self.recv_count,
3028 self.is_server,
3029 &self.trace_id,
3030 )
3031 })?;
3032
3033 if hdr.ty == Type::VersionNegotiation {
3034 // Version negotiation packets can only be sent by the server.
3035 if self.is_server {
3036 return Err(Error::Done);
3037 }
3038
3039 // Ignore duplicate version negotiation.
3040 if self.did_version_negotiation {
3041 return Err(Error::Done);
3042 }
3043
3044 // Ignore version negotiation if any other packet has already been
3045 // successfully processed.
3046 if self.recv_count > 0 {
3047 return Err(Error::Done);
3048 }
3049
3050 if hdr.dcid != self.source_id() {
3051 return Err(Error::Done);
3052 }
3053
3054 if hdr.scid != self.destination_id() {
3055 return Err(Error::Done);
3056 }
3057
3058 trace!("{} rx pkt {:?}", self.trace_id, hdr);
3059
3060 let versions = hdr.versions.ok_or(Error::Done)?;
3061
3062 // Ignore version negotiation if the version already selected is
3063 // listed.
3064 if versions.contains(&self.version) {
3065 return Err(Error::Done);
3066 }
3067
3068 let supported_versions =
3069 versions.iter().filter(|&&v| version_is_supported(v));
3070
3071 let mut found_version = false;
3072
3073 for &v in supported_versions {
3074 found_version = true;
3075
3076 // The final version takes precedence over draft ones.
3077 if v == PROTOCOL_VERSION_V1 {
3078 self.version = v;
3079 break;
3080 }
3081
3082 self.version = cmp::max(self.version, v);
3083 }
3084
3085 if !found_version {
3086 // We don't support any of the versions offered.
3087 //
3088 // While a man-in-the-middle attacker might be able to
3089 // inject a version negotiation packet that triggers this
3090 // failure, the window of opportunity is very small and
3091 // this error is quite useful for debugging, so don't just
3092 // ignore the packet.
3093 return Err(Error::UnknownVersion);
3094 }
3095
3096 self.did_version_negotiation = true;
3097
3098 // Derive Initial secrets based on the new version.
3099 let (aead_open, aead_seal) = crypto::derive_initial_key_material(
3100 &self.destination_id(),
3101 self.version,
3102 self.is_server,
3103 true,
3104 )?;
3105
3106 // Reset connection state to force sending another Initial packet.
3107 self.drop_epoch_state(packet::Epoch::Initial, now);
3108 self.got_peer_conn_id = false;
3109 self.handshake.clear()?;
3110
3111 self.crypto_ctx[packet::Epoch::Initial].crypto_open = Some(aead_open);
3112 self.crypto_ctx[packet::Epoch::Initial].crypto_seal = Some(aead_seal);
3113
3114 self.handshake
3115 .use_legacy_codepoint(self.version != PROTOCOL_VERSION_V1);
3116
3117 // Encode transport parameters again, as the new version might be
3118 // using a different format.
3119 self.encode_transport_params()?;
3120
3121 return Err(Error::Done);
3122 }
3123
3124 if hdr.ty == Type::Retry {
3125 // Retry packets can only be sent by the server.
3126 if self.is_server {
3127 return Err(Error::Done);
3128 }
3129
3130 // Ignore duplicate retry.
3131 if self.did_retry {
3132 return Err(Error::Done);
3133 }
3134
3135 // Check if Retry packet is valid.
3136 if packet::verify_retry_integrity(
3137 &b,
3138 &self.destination_id(),
3139 self.version,
3140 )
3141 .is_err()
3142 {
3143 return Err(Error::Done);
3144 }
3145
3146 trace!("{} rx pkt {:?}", self.trace_id, hdr);
3147
3148 self.token = hdr.token;
3149 self.did_retry = true;
3150
3151 // Remember peer's new connection ID.
3152 self.odcid = Some(self.destination_id().into_owned());
3153
3154 self.set_initial_dcid(
3155 hdr.scid.clone(),
3156 None,
3157 self.paths.get_active_path_id()?,
3158 )?;
3159
3160 self.rscid = Some(self.destination_id().into_owned());
3161
3162 // Derive Initial secrets using the new connection ID.
3163 let (aead_open, aead_seal) = crypto::derive_initial_key_material(
3164 &hdr.scid,
3165 self.version,
3166 self.is_server,
3167 true,
3168 )?;
3169
3170 // Reset connection state to force sending another Initial packet.
3171 self.drop_epoch_state(packet::Epoch::Initial, now);
3172 self.got_peer_conn_id = false;
3173 self.handshake.clear()?;
3174
3175 self.crypto_ctx[packet::Epoch::Initial].crypto_open = Some(aead_open);
3176 self.crypto_ctx[packet::Epoch::Initial].crypto_seal = Some(aead_seal);
3177
3178 return Err(Error::Done);
3179 }
3180
3181 if self.is_server && !self.did_version_negotiation {
3182 if !version_is_supported(hdr.version) {
3183 return Err(Error::UnknownVersion);
3184 }
3185
3186 self.version = hdr.version;
3187 self.did_version_negotiation = true;
3188
3189 self.handshake
3190 .use_legacy_codepoint(self.version != PROTOCOL_VERSION_V1);
3191
3192 // Encode transport parameters again, as the new version might be
3193 // using a different format.
3194 self.encode_transport_params()?;
3195 }
3196
3197 if hdr.ty != Type::Short && hdr.version != self.version {
3198 // At this point version negotiation was already performed, so
3199 // ignore packets that don't match the connection's version.
3200 return Err(Error::Done);
3201 }
3202
3203 // Long header packets have an explicit payload length, but short
3204 // packets don't so just use the remaining capacity in the buffer.
3205 let payload_len = if hdr.ty == Type::Short {
3206 b.cap()
3207 } else {
3208 b.get_varint().map_err(|e| {
3209 drop_pkt_on_err(
3210 e.into(),
3211 self.recv_count,
3212 self.is_server,
3213 &self.trace_id,
3214 )
3215 })? as usize
3216 };
3217
3218 // Make sure the buffer is same or larger than an explicit
3219 // payload length.
3220 if payload_len > b.cap() {
3221 return Err(drop_pkt_on_err(
3222 Error::InvalidPacket,
3223 self.recv_count,
3224 self.is_server,
3225 &self.trace_id,
3226 ));
3227 }
3228
3229 // Derive initial secrets on the server.
3230 if !self.derived_initial_secrets {
3231 let (aead_open, aead_seal) = crypto::derive_initial_key_material(
3232 &hdr.dcid,
3233 self.version,
3234 self.is_server,
3235 false,
3236 )?;
3237
3238 self.crypto_ctx[packet::Epoch::Initial].crypto_open = Some(aead_open);
3239 self.crypto_ctx[packet::Epoch::Initial].crypto_seal = Some(aead_seal);
3240
3241 self.derived_initial_secrets = true;
3242 }
3243
3244 // Select packet number space epoch based on the received packet's type.
3245 let epoch = hdr.ty.to_epoch()?;
3246
3247 // Select AEAD context used to open incoming packet.
3248 let aead = if hdr.ty == Type::ZeroRTT {
3249 // Only use 0-RTT key if incoming packet is 0-RTT.
3250 self.crypto_ctx[epoch].crypto_0rtt_open.as_ref()
3251 } else {
3252 // Otherwise use the packet number space's main key.
3253 self.crypto_ctx[epoch].crypto_open.as_ref()
3254 };
3255
3256 // Finally, discard packet if no usable key is available.
3257 let mut aead = match aead {
3258 Some(v) => v,
3259
3260 None => {
3261 if hdr.ty == Type::ZeroRTT &&
3262 self.undecryptable_pkts.len() < MAX_UNDECRYPTABLE_PACKETS &&
3263 !self.is_established()
3264 {
3265 // Buffer 0-RTT packets when the required read key is not
3266 // available yet, and process them later.
3267 //
3268 // TODO: in the future we might want to buffer other types
3269 // of undecryptable packets as well.
3270 let pkt_len = b.off() + payload_len;
3271 let pkt = (b.buf()[..pkt_len]).to_vec();
3272
3273 self.undecryptable_pkts.push_back((pkt, *info));
3274 return Ok(pkt_len);
3275 }
3276
3277 let e = drop_pkt_on_err(
3278 Error::CryptoFail,
3279 self.recv_count,
3280 self.is_server,
3281 &self.trace_id,
3282 );
3283
3284 return Err(e);
3285 },
3286 };
3287
3288 let aead_tag_len = aead.alg().tag_len();
3289
3290 packet::decrypt_hdr(&mut b, &mut hdr, aead).map_err(|e| {
3291 drop_pkt_on_err(e, self.recv_count, self.is_server, &self.trace_id)
3292 })?;
3293
3294 let pn = packet::decode_pkt_num(
3295 self.pkt_num_spaces[epoch].largest_rx_pkt_num,
3296 hdr.pkt_num,
3297 hdr.pkt_num_len,
3298 );
3299
3300 let pn_len = hdr.pkt_num_len;
3301
3302 trace!(
3303 "{} rx pkt {:?} len={} pn={} {}",
3304 self.trace_id,
3305 hdr,
3306 payload_len,
3307 pn,
3308 AddrTupleFmt(info.from, info.to)
3309 );
3310
3311 #[cfg(feature = "qlog")]
3312 let mut qlog_frames = vec![];
3313
3314 // Check for key update.
3315 let mut aead_next = None;
3316
3317 if self.handshake_confirmed &&
3318 hdr.ty != Type::ZeroRTT &&
3319 hdr.key_phase != self.key_phase
3320 {
3321 // Check if this packet arrived before key update.
3322 if let Some(key_update) = self.crypto_ctx[epoch]
3323 .key_update
3324 .as_ref()
3325 .and_then(|key_update| {
3326 (pn < key_update.pn_on_update).then_some(key_update)
3327 })
3328 {
3329 aead = &key_update.crypto_open;
3330 } else {
3331 trace!("{} peer-initiated key update", self.trace_id);
3332
3333 aead_next = Some((
3334 self.crypto_ctx[epoch]
3335 .crypto_open
3336 .as_ref()
3337 .unwrap()
3338 .derive_next_packet_key()?,
3339 self.crypto_ctx[epoch]
3340 .crypto_seal
3341 .as_ref()
3342 .unwrap()
3343 .derive_next_packet_key()?,
3344 ));
3345
3346 // `aead_next` is always `Some()` at this point, so the `unwrap()`
3347 // will never fail.
3348 aead = &aead_next.as_ref().unwrap().0;
3349 }
3350 }
3351
3352 let mut payload = packet::decrypt_pkt(
3353 &mut b,
3354 pn,
3355 pn_len,
3356 payload_len,
3357 aead,
3358 )
3359 .map_err(|e| {
3360 drop_pkt_on_err(e, self.recv_count, self.is_server, &self.trace_id)
3361 })?;
3362
3363 if self.pkt_num_spaces[epoch].recv_pkt_num.contains(pn) {
3364 trace!("{} ignored duplicate packet {}", self.trace_id, pn);
3365 return Err(Error::Done);
3366 }
3367
3368 // Packets with no frames are invalid.
3369 if payload.cap() == 0 {
3370 return Err(Error::InvalidPacket);
3371 }
3372
3373 // Now that we decrypted the packet, let's see if we can map it to an
3374 // existing path.
3375 let recv_pid = if hdr.ty == Type::Short && self.got_peer_conn_id {
3376 let pkt_dcid = ConnectionId::from_ref(&hdr.dcid);
3377 self.get_or_create_recv_path_id(recv_pid, &pkt_dcid, buf_len, info)?
3378 } else {
3379 // During handshake, we are on the initial path.
3380 self.paths.get_active_path_id()?
3381 };
3382
3383 // The key update is verified once a packet is successfully decrypted
3384 // using the new keys.
3385 if let Some((open_next, seal_next)) = aead_next {
3386 if !self.crypto_ctx[epoch]
3387 .key_update
3388 .as_ref()
3389 .is_none_or(|prev| prev.update_acked)
3390 {
3391 // Peer has updated keys twice without awaiting confirmation.
3392 return Err(Error::KeyUpdate);
3393 }
3394
3395 trace!("{} key update verified", self.trace_id);
3396
3397 let _ = self.crypto_ctx[epoch].crypto_seal.replace(seal_next);
3398
3399 let open_prev = self.crypto_ctx[epoch]
3400 .crypto_open
3401 .replace(open_next)
3402 .unwrap();
3403
3404 let recv_path = self.paths.get_mut(recv_pid)?;
3405
3406 self.crypto_ctx[epoch].key_update = Some(packet::KeyUpdate {
3407 crypto_open: open_prev,
3408 pn_on_update: pn,
3409 update_acked: false,
3410 timer: now + (recv_path.recovery.pto() * 3),
3411 });
3412
3413 self.key_phase = !self.key_phase;
3414
3415 qlog_with_type!(QLOG_PACKET_RX, self.qlog, q, {
3416 let trigger = Some(
3417 qlog::events::quic::KeyUpdateOrRetiredTrigger::RemoteUpdate,
3418 );
3419
3420 let ev_data_client =
3421 EventData::QuicKeyUpdated(qlog::events::quic::KeyUpdated {
3422 key_type: qlog::events::quic::KeyType::Client1RttSecret,
3423 trigger: trigger.clone(),
3424 ..Default::default()
3425 });
3426
3427 q.add_event_data_with_instant(ev_data_client, now).ok();
3428
3429 let ev_data_server =
3430 EventData::QuicKeyUpdated(qlog::events::quic::KeyUpdated {
3431 key_type: qlog::events::quic::KeyType::Server1RttSecret,
3432 trigger,
3433 ..Default::default()
3434 });
3435
3436 q.add_event_data_with_instant(ev_data_server, now).ok();
3437 });
3438 }
3439
3440 if !self.is_server && !self.got_peer_conn_id {
3441 if self.odcid.is_none() {
3442 self.odcid = Some(self.destination_id().into_owned());
3443 }
3444
3445 // Replace the randomly generated destination connection ID with
3446 // the one supplied by the server.
3447 self.set_initial_dcid(
3448 hdr.scid.clone(),
3449 self.peer_transport_params.stateless_reset_token,
3450 recv_pid,
3451 )?;
3452
3453 self.got_peer_conn_id = true;
3454 }
3455
3456 if self.is_server && !self.got_peer_conn_id {
3457 self.set_initial_dcid(hdr.scid.clone(), None, recv_pid)?;
3458
3459 if !self.did_retry {
3460 self.local_transport_params
3461 .original_destination_connection_id =
3462 Some(hdr.dcid.to_vec().into());
3463
3464 self.encode_transport_params()?;
3465 }
3466
3467 self.got_peer_conn_id = true;
3468 }
3469
3470 // To avoid sending an ACK in response to an ACK-only packet, we need
3471 // to keep track of whether this packet contains any frame other than
3472 // ACK and PADDING.
3473 let mut ack_elicited = false;
3474
3475 // Process packet payload. If a frame cannot be processed, store the
3476 // error and stop further packet processing.
3477 let mut frame_processing_err = None;
3478
3479 // To know if the peer migrated the connection, we need to keep track
3480 // whether this is a non-probing packet.
3481 let mut probing = true;
3482
3483 // Process packet payload.
3484 while payload.cap() > 0 {
3485 let frame = frame::Frame::from_bytes(&mut payload, hdr.ty)?;
3486
3487 qlog_with_type!(QLOG_PACKET_RX, self.qlog, _q, {
3488 qlog_frames.push(frame.to_qlog());
3489 });
3490
3491 if frame.ack_eliciting() {
3492 ack_elicited = true;
3493 }
3494
3495 if !frame.probing() {
3496 probing = false;
3497 }
3498
3499 if let Err(e) = self.process_frame(frame, &hdr, recv_pid, epoch, now)
3500 {
3501 frame_processing_err = Some(e);
3502 break;
3503 }
3504 }
3505
3506 qlog_with_type!(QLOG_PACKET_RX, self.qlog, q, {
3507 let packet_size = b.len();
3508
3509 let qlog_pkt_hdr = qlog::events::quic::PacketHeader::with_type(
3510 hdr.ty.to_qlog(),
3511 Some(pn),
3512 Some(hdr.version),
3513 Some(&hdr.scid),
3514 Some(&hdr.dcid),
3515 );
3516
3517 let qlog_raw_info = RawInfo {
3518 length: Some(packet_size as u64),
3519 payload_length: Some(payload_len as u64),
3520 data: None,
3521 };
3522
3523 let ev_data = EventData::QuicPacketReceived(
3524 qlog::events::quic::PacketReceived {
3525 header: qlog_pkt_hdr,
3526 frames: Some(qlog_frames),
3527 raw: Some(qlog_raw_info),
3528 ..Default::default()
3529 },
3530 );
3531
3532 q.add_event_data_with_instant(ev_data, now).ok();
3533 });
3534
3535 qlog_with_type!(QLOG_METRICS, self.qlog, q, {
3536 let recv_path = self.paths.get_mut(recv_pid)?;
3537 recv_path.recovery.maybe_qlog(q, now);
3538 });
3539
3540 if let Some(e) = frame_processing_err {
3541 // Any frame error is terminal, so now just return.
3542 return Err(e);
3543 }
3544
3545 // Only log the remote transport parameters once the connection is
3546 // established (i.e. after frames have been fully parsed) and only
3547 // once per connection.
3548 if self.is_established() {
3549 qlog_with_type!(QLOG_PARAMS_SET, self.qlog, q, {
3550 if !self.qlog.logged_peer_params {
3551 let ev_data = self.peer_transport_params.to_qlog(
3552 TransportInitiator::Remote,
3553 self.handshake.cipher(),
3554 );
3555
3556 q.add_event_data_with_instant(ev_data, now).ok();
3557
3558 self.qlog.logged_peer_params = true;
3559 }
3560 });
3561 }
3562
3563 // Process acked frames. Note that several packets from several paths
3564 // might have been acked by the received packet.
3565 for (_, p) in self.paths.iter_mut() {
3566 while let Some(acked) = p.recovery.next_acked_frame(epoch) {
3567 match acked {
3568 frame::Frame::Ping {
3569 mtu_probe: Some(mtu_probe),
3570 } => {
3571 if let Some(pmtud) = p.pmtud.as_mut() {
3572 trace!(
3573 "{} pmtud probe acked; probe size {:?}",
3574 self.trace_id,
3575 mtu_probe
3576 );
3577
3578 // Ensure the probe is within the supported MTU range
3579 // before updating the max datagram size
3580 if let Some(current_mtu) =
3581 pmtud.successful_probe(mtu_probe)
3582 {
3583 qlog_with_type!(
3584 EventType::QuicEventType(
3585 QuicEventType::MtuUpdated
3586 ),
3587 self.qlog,
3588 q,
3589 {
3590 let pmtu_data = EventData::QuicMtuUpdated(
3591 qlog::events::quic::MtuUpdated {
3592 old: Some(
3593 p.recovery.max_datagram_size()
3594 as u32,
3595 ),
3596 new: current_mtu as u32,
3597 done: Some(true),
3598 },
3599 );
3600
3601 q.add_event_data_with_instant(
3602 pmtu_data, now,
3603 )
3604 .ok();
3605 }
3606 );
3607
3608 p.recovery
3609 .pmtud_update_max_datagram_size(current_mtu);
3610 }
3611 }
3612 },
3613
3614 frame::Frame::ACK { ranges, .. } => {
3615 // Stop acknowledging packets less than or equal to the
3616 // largest acknowledged in the sent ACK frame that, in
3617 // turn, got acked.
3618 if let Some(largest_acked) = ranges.last() {
3619 self.pkt_num_spaces[epoch]
3620 .recv_pkt_need_ack
3621 .remove_until(largest_acked);
3622 }
3623 },
3624
3625 frame::Frame::CryptoHeader { offset, length } => {
3626 self.crypto_ctx[epoch]
3627 .crypto_stream
3628 .send
3629 .ack_and_drop(offset, length);
3630 },
3631
3632 frame::Frame::StreamHeader {
3633 stream_id,
3634 offset,
3635 length,
3636 ..
3637 } => {
3638 // Update tx_buffered and emit qlog before checking if the
3639 // stream still exists. The client does need to ACK
3640 // frames that were received after the client sends a
3641 // ResetStream.
3642 self.tx_buffered =
3643 self.tx_buffered.saturating_sub(length);
3644
3645 qlog_with_type!(QLOG_DATA_MV, self.qlog, q, {
3646 let ev_data = EventData::QuicStreamDataMoved(
3647 qlog::events::quic::StreamDataMoved {
3648 stream_id: Some(stream_id),
3649 offset: Some(offset),
3650 raw: Some(RawInfo {
3651 length: Some(length as u64),
3652 ..Default::default()
3653 }),
3654 from: Some(DataRecipient::Transport),
3655 to: Some(DataRecipient::Dropped),
3656 ..Default::default()
3657 },
3658 );
3659
3660 q.add_event_data_with_instant(ev_data, now).ok();
3661 });
3662
3663 let stream = match self.streams.get_mut(stream_id) {
3664 Some(v) => v,
3665
3666 None => continue,
3667 };
3668
3669 stream.send.ack_and_drop(offset, length);
3670
3671 let priority_key = Arc::clone(&stream.priority_key);
3672
3673 // Only collect the stream if it is complete and not
3674 // readable or writable.
3675 //
3676 // If it is readable, it will get collected when
3677 // stream_recv() is next used.
3678 //
3679 // If it is writable, it might mean that the stream
3680 // has been stopped by the peer (i.e. a STOP_SENDING
3681 // frame is received), in which case before collecting
3682 // the stream we will need to propagate the
3683 // `StreamStopped` error to the application. It will
3684 // instead get collected when one of stream_capacity(),
3685 // stream_writable(), stream_send(), ... is next called.
3686 //
3687 // Note that we can't use `is_writable()` here because
3688 // it returns false if the stream is stopped. Instead,
3689 // since the stream is marked as writable when a
3690 // STOP_SENDING frame is received, we check the writable
3691 // queue directly instead.
3692 let is_writable = priority_key.writable.is_linked() &&
3693 // Ensure that the stream is actually stopped.
3694 stream.send.is_stopped();
3695
3696 let is_complete = stream.is_complete();
3697 let is_readable = stream.is_readable();
3698
3699 if is_complete && !is_readable && !is_writable {
3700 let local = stream.local;
3701 self.streams.collect(stream_id, local);
3702 }
3703 },
3704
3705 frame::Frame::HandshakeDone => {
3706 // Explicitly set this to true, so that if the frame was
3707 // already scheduled for retransmission, it is aborted.
3708 self.handshake_done_sent = true;
3709
3710 self.handshake_done_acked = true;
3711 },
3712
3713 frame::Frame::ResetStream { stream_id, .. } => {
3714 let stream = match self.streams.get_mut(stream_id) {
3715 Some(v) => v,
3716
3717 None => continue,
3718 };
3719
3720 let priority_key = Arc::clone(&stream.priority_key);
3721
3722 // Only collect the stream if it is complete and not
3723 // readable or writable.
3724 //
3725 // If it is readable, it will get collected when
3726 // stream_recv() is next used.
3727 //
3728 // If it is writable, it might mean that the stream
3729 // has been stopped by the peer (i.e. a STOP_SENDING
3730 // frame is received), in which case before collecting
3731 // the stream we will need to propagate the
3732 // `StreamStopped` error to the application. It will
3733 // instead get collected when one of stream_capacity(),
3734 // stream_writable(), stream_send(), ... is next called.
3735 //
3736 // Note that we can't use `is_writable()` here because
3737 // it returns false if the stream is stopped. Instead,
3738 // since the stream is marked as writable when a
3739 // STOP_SENDING frame is received, we check the writable
3740 // queue directly instead.
3741 let is_writable = priority_key.writable.is_linked() &&
3742 // Ensure that the stream is actually stopped.
3743 stream.send.is_stopped();
3744
3745 let is_complete = stream.is_complete();
3746 let is_readable = stream.is_readable();
3747
3748 if is_complete && !is_readable && !is_writable {
3749 let local = stream.local;
3750 self.streams.collect(stream_id, local);
3751 }
3752 },
3753
3754 _ => (),
3755 }
3756 }
3757 }
3758
3759 // Now that we processed all the frames, if there is a path that has no
3760 // Destination CID, try to allocate one.
3761 let no_dcid = self
3762 .paths
3763 .iter_mut()
3764 .filter(|(_, p)| p.active_dcid_seq.is_none());
3765
3766 for (pid, p) in no_dcid {
3767 if self.ids.zero_length_dcid() {
3768 p.active_dcid_seq = Some(0);
3769 continue;
3770 }
3771
3772 let dcid_seq = match self.ids.lowest_available_dcid_seq() {
3773 Some(seq) => seq,
3774 None => break,
3775 };
3776
3777 self.ids.link_dcid_to_path_id(dcid_seq, pid)?;
3778
3779 p.active_dcid_seq = Some(dcid_seq);
3780 }
3781
3782 // We only record the time of arrival of the largest packet number
3783 // that still needs to be acked, to be used for ACK delay calculation.
3784 if self.pkt_num_spaces[epoch].recv_pkt_need_ack.last() < Some(pn) {
3785 self.pkt_num_spaces[epoch].largest_rx_pkt_time = now;
3786 }
3787
3788 self.pkt_num_spaces[epoch].recv_pkt_num.insert(pn);
3789
3790 self.pkt_num_spaces[epoch].recv_pkt_need_ack.push_item(pn);
3791
3792 self.pkt_num_spaces[epoch].ack_elicited =
3793 cmp::max(self.pkt_num_spaces[epoch].ack_elicited, ack_elicited);
3794
3795 self.pkt_num_spaces[epoch].largest_rx_pkt_num =
3796 cmp::max(self.pkt_num_spaces[epoch].largest_rx_pkt_num, pn);
3797
3798 if !probing {
3799 self.pkt_num_spaces[epoch].largest_rx_non_probing_pkt_num = cmp::max(
3800 self.pkt_num_spaces[epoch].largest_rx_non_probing_pkt_num,
3801 pn,
3802 );
3803
3804 // Did the peer migrated to another path?
3805 let active_path_id = self.paths.get_active_path_id()?;
3806
3807 if self.is_server &&
3808 recv_pid != active_path_id &&
3809 self.pkt_num_spaces[epoch].largest_rx_non_probing_pkt_num == pn
3810 {
3811 self.on_peer_migrated(recv_pid, self.disable_dcid_reuse, now)?;
3812 }
3813 }
3814
3815 if let Some(idle_timeout) = self.idle_timeout() {
3816 self.idle_timer = Some(now + idle_timeout);
3817 }
3818
3819 // Update send capacity.
3820 self.update_tx_cap();
3821
3822 self.recv_count += 1;
3823 self.paths.get_mut(recv_pid)?.recv_count += 1;
3824
3825 let read = b.off() + aead_tag_len;
3826
3827 self.recv_bytes += read as u64;
3828 self.paths.get_mut(recv_pid)?.recv_bytes += read as u64;
3829
3830 // An Handshake packet has been received from the client and has been
3831 // successfully processed, so we can drop the initial state and consider
3832 // the client's address to be verified.
3833 if self.is_server && hdr.ty == Type::Handshake {
3834 self.drop_epoch_state(packet::Epoch::Initial, now);
3835
3836 self.paths.get_mut(recv_pid)?.verified_peer_address = true;
3837 }
3838
3839 self.ack_eliciting_sent = false;
3840
3841 Ok(read)
3842 }
3843
3844 /// Writes a single QUIC packet to be sent to the peer.
3845 ///
3846 /// On success the number of bytes written to the output buffer is
3847 /// returned, or [`Done`] if there was nothing to write.
3848 ///
3849 /// The application should call `send()` multiple times until [`Done`] is
3850 /// returned, indicating that there are no more packets to send. It is
3851 /// recommended that `send()` be called in the following cases:
3852 ///
3853 /// * When the application receives QUIC packets from the peer (that is,
3854 /// any time [`recv()`] is also called).
3855 ///
3856 /// * When the connection timer expires (that is, any time [`on_timeout()`]
3857 /// is also called).
3858 ///
3859 /// * When the application sends data to the peer (for example, any time
3860 /// [`stream_send()`] or [`stream_shutdown()`] are called).
3861 ///
3862 /// * When the application receives data from the peer (for example any
3863 /// time [`stream_recv()`] is called).
3864 ///
3865 /// Once [`is_draining()`] returns `true`, it is no longer necessary to call
3866 /// `send()` and all calls will return [`Done`].
3867 ///
3868 /// [`Done`]: enum.Error.html#variant.Done
3869 /// [`recv()`]: struct.Connection.html#method.recv
3870 /// [`on_timeout()`]: struct.Connection.html#method.on_timeout
3871 /// [`stream_send()`]: struct.Connection.html#method.stream_send
3872 /// [`stream_shutdown()`]: struct.Connection.html#method.stream_shutdown
3873 /// [`stream_recv()`]: struct.Connection.html#method.stream_recv
3874 /// [`is_draining()`]: struct.Connection.html#method.is_draining
3875 ///
3876 /// ## Examples:
3877 ///
3878 /// ```no_run
3879 /// # let mut out = [0; 512];
3880 /// # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
3881 /// # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
3882 /// # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
3883 /// # let peer = "127.0.0.1:1234".parse().unwrap();
3884 /// # let local = socket.local_addr().unwrap();
3885 /// # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
3886 /// loop {
3887 /// let (write, send_info) = match conn.send(&mut out) {
3888 /// Ok(v) => v,
3889 ///
3890 /// Err(quiche::Error::Done) => {
3891 /// // Done writing.
3892 /// break;
3893 /// },
3894 ///
3895 /// Err(e) => {
3896 /// // An error occurred, handle it.
3897 /// break;
3898 /// },
3899 /// };
3900 ///
3901 /// socket.send_to(&out[..write], &send_info.to).unwrap();
3902 /// }
3903 /// # Ok::<(), quiche::Error>(())
3904 /// ```
3905 pub fn send(&mut self, out: &mut [u8]) -> Result<(usize, SendInfo)> {
3906 self.send_on_path(out, None, None)
3907 }
3908
3909 /// Writes a single QUIC packet to be sent to the peer from the specified
3910 /// local address `from` to the destination address `to`.
3911 ///
3912 /// The behavior of this method differs depending on the value of the `from`
3913 /// and `to` parameters:
3914 ///
3915 /// * If both are `Some`, then the method only consider the 4-tuple
3916 /// (`from`, `to`). Application can monitor the 4-tuple availability,
3917 /// either by monitoring [`path_event_next()`] events or by relying on
3918 /// the [`paths_iter()`] method. If the provided 4-tuple does not exist
3919 /// on the connection (anymore), it returns an [`InvalidState`].
3920 ///
3921 /// * If `from` is `Some` and `to` is `None`, then the method only
3922 /// considers sending packets on paths having `from` as local address.
3923 ///
3924 /// * If `to` is `Some` and `from` is `None`, then the method only
3925 /// considers sending packets on paths having `to` as peer address.
3926 ///
3927 /// * If both are `None`, all available paths are considered.
3928 ///
3929 /// On success the number of bytes written to the output buffer is
3930 /// returned, or [`Done`] if there was nothing to write.
3931 ///
3932 /// The application should call `send_on_path()` multiple times until
3933 /// [`Done`] is returned, indicating that there are no more packets to
3934 /// send. It is recommended that `send_on_path()` be called in the
3935 /// following cases:
3936 ///
3937 /// * When the application receives QUIC packets from the peer (that is,
3938 /// any time [`recv()`] is also called).
3939 ///
3940 /// * When the connection timer expires (that is, any time [`on_timeout()`]
3941 /// is also called).
3942 ///
3943 /// * When the application sends data to the peer (for examples, any time
3944 /// [`stream_send()`] or [`stream_shutdown()`] are called).
3945 ///
3946 /// * When the application receives data from the peer (for example any
3947 /// time [`stream_recv()`] is called).
3948 ///
3949 /// Once [`is_draining()`] returns `true`, it is no longer necessary to call
3950 /// `send_on_path()` and all calls will return [`Done`].
3951 ///
3952 /// [`Done`]: enum.Error.html#variant.Done
3953 /// [`InvalidState`]: enum.Error.html#InvalidState
3954 /// [`recv()`]: struct.Connection.html#method.recv
3955 /// [`on_timeout()`]: struct.Connection.html#method.on_timeout
3956 /// [`stream_send()`]: struct.Connection.html#method.stream_send
3957 /// [`stream_shutdown()`]: struct.Connection.html#method.stream_shutdown
3958 /// [`stream_recv()`]: struct.Connection.html#method.stream_recv
3959 /// [`path_event_next()`]: struct.Connection.html#method.path_event_next
3960 /// [`paths_iter()`]: struct.Connection.html#method.paths_iter
3961 /// [`is_draining()`]: struct.Connection.html#method.is_draining
3962 ///
3963 /// ## Examples:
3964 ///
3965 /// ```no_run
3966 /// # let mut out = [0; 512];
3967 /// # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
3968 /// # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
3969 /// # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
3970 /// # let peer = "127.0.0.1:1234".parse().unwrap();
3971 /// # let local = socket.local_addr().unwrap();
3972 /// # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
3973 /// loop {
3974 /// let (write, send_info) = match conn.send_on_path(&mut out, Some(local), Some(peer)) {
3975 /// Ok(v) => v,
3976 ///
3977 /// Err(quiche::Error::Done) => {
3978 /// // Done writing.
3979 /// break;
3980 /// },
3981 ///
3982 /// Err(e) => {
3983 /// // An error occurred, handle it.
3984 /// break;
3985 /// },
3986 /// };
3987 ///
3988 /// socket.send_to(&out[..write], &send_info.to).unwrap();
3989 /// }
3990 /// # Ok::<(), quiche::Error>(())
3991 /// ```
3992 pub fn send_on_path(
3993 &mut self, out: &mut [u8], from: Option<SocketAddr>,
3994 to: Option<SocketAddr>,
3995 ) -> Result<(usize, SendInfo)> {
3996 if out.is_empty() {
3997 return Err(Error::BufferTooShort);
3998 }
3999
4000 if self.is_closed() || self.is_draining() {
4001 return Err(Error::Done);
4002 }
4003
4004 let now = Instant::now();
4005
4006 if self.local_error.is_none() {
4007 self.do_handshake(now)?;
4008 }
4009
4010 // Forwarding the error value here could confuse
4011 // applications, as they may not expect getting a `recv()`
4012 // error when calling `send()`.
4013 //
4014 // We simply fall-through to sending packets, which should
4015 // take care of terminating the connection as needed.
4016 let _ = self.process_undecrypted_0rtt_packets();
4017
4018 // There's no point in trying to send a packet if the Initial secrets
4019 // have not been derived yet, so return early.
4020 if !self.derived_initial_secrets {
4021 return Err(Error::Done);
4022 }
4023
4024 let mut has_initial = false;
4025
4026 let mut done = 0;
4027
4028 // Limit output packet size to respect the sender and receiver's
4029 // maximum UDP payload size limit.
4030 let mut left = cmp::min(out.len(), self.max_send_udp_payload_size());
4031
4032 let send_pid = match (from, to) {
4033 (Some(f), Some(t)) => self
4034 .paths
4035 .path_id_from_addrs(&(f, t))
4036 .ok_or(Error::InvalidState)?,
4037
4038 _ => self.get_send_path_id(from, to)?,
4039 };
4040
4041 let send_path = self.paths.get_mut(send_pid)?;
4042
4043 // Update max datagram size to allow path MTU discovery probe to be sent.
4044 if let Some(pmtud) = send_path.pmtud.as_mut() {
4045 if pmtud.should_probe() {
4046 let size = if self.handshake_confirmed || self.handshake_completed
4047 {
4048 pmtud.get_probe_size()
4049 } else {
4050 pmtud.get_current_mtu()
4051 };
4052
4053 send_path.recovery.pmtud_update_max_datagram_size(size);
4054
4055 left =
4056 cmp::min(out.len(), send_path.recovery.max_datagram_size());
4057 }
4058 }
4059
4060 // Limit data sent by the server based on the amount of data received
4061 // from the client before its address is validated.
4062 if !send_path.verified_peer_address && self.is_server {
4063 left = cmp::min(left, send_path.max_send_bytes);
4064 }
4065
4066 // Generate coalesced packets.
4067 while left > 0 {
4068 let (ty, written) = match self.send_single(
4069 &mut out[done..done + left],
4070 send_pid,
4071 has_initial,
4072 now,
4073 ) {
4074 Ok(v) => v,
4075
4076 Err(Error::BufferTooShort) | Err(Error::Done) => break,
4077
4078 Err(e) => return Err(e),
4079 };
4080
4081 done += written;
4082 left -= written;
4083
4084 match ty {
4085 Type::Initial => has_initial = true,
4086
4087 // No more packets can be coalesced after a 1-RTT.
4088 Type::Short => break,
4089
4090 _ => (),
4091 };
4092
4093 // When sending multiple PTO probes, don't coalesce them together,
4094 // so they are sent on separate UDP datagrams.
4095 if let Ok(epoch) = ty.to_epoch() {
4096 if self.paths.get_mut(send_pid)?.recovery.loss_probes(epoch) > 0 {
4097 break;
4098 }
4099 }
4100
4101 // Don't coalesce packets that must go on different paths.
4102 if !(from.is_some() && to.is_some()) &&
4103 self.get_send_path_id(from, to)? != send_pid
4104 {
4105 break;
4106 }
4107 }
4108
4109 if done == 0 {
4110 self.last_tx_data = self.tx_data;
4111
4112 return Err(Error::Done);
4113 }
4114
4115 if has_initial && left > 0 && done < MIN_CLIENT_INITIAL_LEN {
4116 let pad_len = cmp::min(left, MIN_CLIENT_INITIAL_LEN - done);
4117
4118 // Fill padding area with null bytes, to avoid leaking information
4119 // in case the application reuses the packet buffer.
4120 out[done..done + pad_len].fill(0);
4121
4122 done += pad_len;
4123 }
4124
4125 let send_path = self.paths.get(send_pid)?;
4126
4127 let info = SendInfo {
4128 from: send_path.local_addr(),
4129 to: send_path.peer_addr(),
4130
4131 at: send_path.recovery.get_packet_send_time(now),
4132 };
4133
4134 Ok((done, info))
4135 }
4136
4137 fn send_single(
4138 &mut self, out: &mut [u8], send_pid: usize, has_initial: bool,
4139 now: Instant,
4140 ) -> Result<(Type, usize)> {
4141 if out.is_empty() {
4142 return Err(Error::BufferTooShort);
4143 }
4144
4145 if self.is_draining() {
4146 return Err(Error::Done);
4147 }
4148
4149 let is_closing = self.local_error.is_some();
4150
4151 let out_len = out.len();
4152
4153 let mut b = octets::OctetsMut::with_slice(out);
4154
4155 let pkt_type = self.write_pkt_type(send_pid)?;
4156
4157 let max_dgram_len = if !self.dgram_send_queue.is_empty() {
4158 self.dgram_max_writable_len()
4159 } else {
4160 None
4161 };
4162
4163 let epoch = pkt_type.to_epoch()?;
4164 let pkt_space = &mut self.pkt_num_spaces[epoch];
4165 let crypto_ctx = &mut self.crypto_ctx[epoch];
4166
4167 // Process lost frames. There might be several paths having lost frames.
4168 for (_, p) in self.paths.iter_mut() {
4169 while let Some(lost) = p.recovery.next_lost_frame(epoch) {
4170 match lost {
4171 frame::Frame::CryptoHeader { offset, length } => {
4172 crypto_ctx.crypto_stream.send.retransmit(offset, length);
4173
4174 self.stream_retrans_bytes += length as u64;
4175 p.stream_retrans_bytes += length as u64;
4176
4177 self.retrans_count += 1;
4178 p.retrans_count += 1;
4179 },
4180
4181 frame::Frame::StreamHeader {
4182 stream_id,
4183 offset,
4184 length,
4185 fin,
4186 } => {
4187 let stream = match self.streams.get_mut(stream_id) {
4188 // Only retransmit data if the stream is not closed
4189 // or stopped.
4190 Some(v) if !v.send.is_stopped() => v,
4191
4192 // Data on a closed stream will not be retransmitted
4193 // or acked after it is declared lost, so update
4194 // tx_buffered and qlog.
4195 _ => {
4196 self.tx_buffered =
4197 self.tx_buffered.saturating_sub(length);
4198
4199 qlog_with_type!(QLOG_DATA_MV, self.qlog, q, {
4200 let ev_data = EventData::QuicStreamDataMoved(
4201 qlog::events::quic::StreamDataMoved {
4202 stream_id: Some(stream_id),
4203 offset: Some(offset),
4204 raw: Some(RawInfo {
4205 length: Some(length as u64),
4206 ..Default::default()
4207 }),
4208 from: Some(DataRecipient::Transport),
4209 to: Some(DataRecipient::Dropped),
4210 ..Default::default()
4211 },
4212 );
4213
4214 q.add_event_data_with_instant(ev_data, now)
4215 .ok();
4216 });
4217
4218 continue;
4219 },
4220 };
4221
4222 let was_flushable = stream.is_flushable();
4223
4224 let empty_fin = length == 0 && fin;
4225
4226 stream.send.retransmit(offset, length);
4227
4228 // If the stream is now flushable push it to the
4229 // flushable queue, but only if it wasn't already
4230 // queued.
4231 //
4232 // Consider the stream flushable also when we are
4233 // sending a zero-length frame that has the fin flag
4234 // set.
4235 if (stream.is_flushable() || empty_fin) && !was_flushable
4236 {
4237 let priority_key = Arc::clone(&stream.priority_key);
4238 self.streams.insert_flushable(&priority_key);
4239 }
4240
4241 self.stream_retrans_bytes += length as u64;
4242 p.stream_retrans_bytes += length as u64;
4243
4244 self.retrans_count += 1;
4245 p.retrans_count += 1;
4246 },
4247
4248 frame::Frame::ACK { .. } => {
4249 pkt_space.ack_elicited = true;
4250 },
4251
4252 frame::Frame::ResetStream {
4253 stream_id,
4254 error_code,
4255 final_size,
4256 } => {
4257 self.streams
4258 .insert_reset(stream_id, error_code, final_size);
4259 },
4260
4261 frame::Frame::StopSending {
4262 stream_id,
4263 error_code,
4264 } =>
4265 // We only need to retransmit the STOP_SENDING frame if
4266 // the stream is still active and not FIN'd. Even if the
4267 // packet was lost, if the application has the final
4268 // size at this point there is no need to retransmit.
4269 if let Some(stream) = self.streams.get(stream_id) {
4270 if !stream.recv.is_fin() {
4271 self.streams
4272 .insert_stopped(stream_id, error_code);
4273 }
4274 },
4275
4276 // Retransmit HANDSHAKE_DONE only if it hasn't been acked at
4277 // least once already.
4278 frame::Frame::HandshakeDone =>
4279 if !self.handshake_done_acked {
4280 self.handshake_done_sent = false;
4281 },
4282
4283 frame::Frame::MaxStreamData { stream_id, .. } => {
4284 if self.streams.get(stream_id).is_some() {
4285 self.streams.insert_almost_full(stream_id);
4286 }
4287 },
4288
4289 frame::Frame::MaxData { .. } => {
4290 self.should_send_max_data = true;
4291 },
4292
4293 frame::Frame::MaxStreamsUni { .. } => {
4294 self.should_send_max_streams_uni = true;
4295 },
4296
4297 frame::Frame::MaxStreamsBidi { .. } => {
4298 self.should_send_max_streams_bidi = true;
4299 },
4300
4301 // Retransmit STREAMS_BLOCKED frames if the frame with the
4302 // most recent limit is lost. These are informational
4303 // signals to the peer, reliably sending them
4304 // ensures the signal is used consistently and helps
4305 // debugging.
4306 frame::Frame::StreamsBlockedBidi { limit } => {
4307 self.streams_blocked_bidi_state
4308 .force_retransmit_sent_limit_eq(limit);
4309 },
4310
4311 frame::Frame::StreamsBlockedUni { limit } => {
4312 self.streams_blocked_uni_state
4313 .force_retransmit_sent_limit_eq(limit);
4314 },
4315
4316 frame::Frame::NewConnectionId { seq_num, .. } => {
4317 self.ids.mark_advertise_new_scid_seq(seq_num, true);
4318 },
4319
4320 frame::Frame::RetireConnectionId { seq_num } => {
4321 self.ids.mark_retire_dcid_seq(seq_num, true)?;
4322 },
4323
4324 frame::Frame::Ping { mtu_probe } => {
4325 // Ping frames are not retransmitted.
4326 if let Some(failed_probe) = mtu_probe {
4327 if let Some(pmtud) = p.pmtud.as_mut() {
4328 trace!("pmtud probe dropped: {failed_probe}");
4329 pmtud.failed_probe(failed_probe);
4330 }
4331 }
4332 },
4333
4334 // Sent as StreamHeader frames. Stream frames are never
4335 // generated by quiche.
4336 frame::Frame::Stream { .. } => {
4337 debug_panic!(
4338 "Unexpected frame lost: Stream. quiche should \
4339 have tracked retransmittable stream data as \
4340 StreamHeader frames."
4341 );
4342 },
4343
4344 // Sent as CryptoHeader frames. Crypto frames are never
4345 // generated by quiche.
4346 frame::Frame::Crypto { .. } => {
4347 debug_panic!(
4348 "Unexpected frame lost: Crypto. quiche should \
4349 have tracked retransmittable crypto data as \
4350 CryptoHeader frames."
4351 );
4352 },
4353
4354 // NewToken frames are never sent by quiche; they are not
4355 // implemented.
4356 frame::Frame::NewToken { .. } => {
4357 debug_panic!(
4358 "Unexpected frame lost: NewToken. quiche used to \
4359 not implement NewToken frames, retransmission of \
4360 these frames is not implemented."
4361 );
4362 },
4363
4364 // Data blocked frames are an optional advisory
4365 // signal. We choose to not retransmit them to
4366 // avoid unnecessary network usage.
4367 frame::Frame::DataBlocked { .. } |
4368 frame::Frame::StreamDataBlocked { .. } => (),
4369
4370 // Path challenge and response have their own
4371 // retry logic. They should not be retransmitted
4372 // normally since according to RFC 9000 Section
4373 // 8.2.2: "An endpoint MUST NOT send more than one
4374 // PATH_RESPONSE frame in response to one
4375 // PATH_CHALLENGE frame".
4376 frame::Frame::PathChallenge { .. } |
4377 frame::Frame::PathResponse { .. } => (),
4378
4379 // From RFC 9000 Section 13.3: CONNECTION_CLOSE
4380 // frames, are not sent again when packet loss is
4381 // detected. Resending these signals is described
4382 // in Section 10.
4383 frame::Frame::ConnectionClose { .. } |
4384 frame::Frame::ApplicationClose { .. } => (),
4385
4386 // Padding doesn't require retransmission.
4387 frame::Frame::Padding { .. } => (),
4388
4389 frame::Frame::DatagramHeader { .. } |
4390 frame::Frame::Datagram { .. } => {
4391 // Datagrams do not require retransmission. Just update
4392 // stats.
4393 p.dgram_lost_count = p.dgram_lost_count.saturating_add(1);
4394 },
4395 // IMPORTANT: Do not add an exhaustive catch
4396 // all. We want to add explicit handling for frame
4397 // types that can be safely ignored when lost.
4398 }
4399 }
4400 }
4401 self.check_tx_buffered_invariant();
4402
4403 let is_app_limited = self.delivery_rate_check_if_app_limited();
4404 let n_paths = self.paths.len();
4405 let path = self.paths.get_mut(send_pid)?;
4406 let flow_control = &mut self.flow_control;
4407 let pkt_space = &mut self.pkt_num_spaces[epoch];
4408 let crypto_ctx = &mut self.crypto_ctx[epoch];
4409 let pkt_num_manager = &mut self.pkt_num_manager;
4410
4411 let mut left = if let Some(pmtud) = path.pmtud.as_mut() {
4412 // Limit output buffer size by estimated path MTU.
4413 cmp::min(pmtud.get_current_mtu(), b.cap())
4414 } else {
4415 b.cap()
4416 };
4417
4418 if pkt_num_manager.should_skip_pn(self.handshake_completed) {
4419 pkt_num_manager.set_skip_pn(Some(self.next_pkt_num));
4420 self.next_pkt_num += 1;
4421 };
4422 let pn = self.next_pkt_num;
4423
4424 let largest_acked_pkt =
4425 path.recovery.get_largest_acked_on_epoch(epoch).unwrap_or(0);
4426 let pn_len = packet::pkt_num_len(pn, largest_acked_pkt);
4427
4428 // The AEAD overhead at the current encryption level.
4429 let crypto_overhead = crypto_ctx.crypto_overhead().ok_or(Error::Done)?;
4430
4431 let dcid_seq = path.active_dcid_seq.ok_or(Error::OutOfIdentifiers)?;
4432
4433 let dcid =
4434 ConnectionId::from_ref(self.ids.get_dcid(dcid_seq)?.cid.as_ref());
4435
4436 let scid = if let Some(scid_seq) = path.active_scid_seq {
4437 ConnectionId::from_ref(self.ids.get_scid(scid_seq)?.cid.as_ref())
4438 } else if pkt_type == Type::Short {
4439 ConnectionId::default()
4440 } else {
4441 return Err(Error::InvalidState);
4442 };
4443
4444 let hdr = Header {
4445 ty: pkt_type,
4446
4447 version: self.version,
4448
4449 dcid,
4450 scid,
4451
4452 pkt_num: 0,
4453 pkt_num_len: pn_len,
4454
4455 // Only clone token for Initial packets, as other packets don't have
4456 // this field (Retry doesn't count, as it's not encoded as part of
4457 // this code path).
4458 token: if pkt_type == Type::Initial {
4459 self.token.clone()
4460 } else {
4461 None
4462 },
4463
4464 versions: None,
4465 key_phase: self.key_phase,
4466 };
4467
4468 hdr.to_bytes(&mut b)?;
4469
4470 let hdr_trace = if log::max_level() == log::LevelFilter::Trace {
4471 Some(format!("{hdr:?}"))
4472 } else {
4473 None
4474 };
4475
4476 let hdr_ty = hdr.ty;
4477
4478 #[cfg(feature = "qlog")]
4479 let qlog_pkt_hdr = self.qlog.streamer.as_ref().map(|_q| {
4480 qlog::events::quic::PacketHeader::with_type(
4481 hdr.ty.to_qlog(),
4482 Some(pn),
4483 Some(hdr.version),
4484 Some(&hdr.scid),
4485 Some(&hdr.dcid),
4486 )
4487 });
4488
4489 // Calculate the space required for the packet, including the header
4490 // the payload length, the packet number and the AEAD overhead.
4491 let mut overhead = b.off() + pn_len + crypto_overhead;
4492
4493 // We assume that the payload length, which is only present in long
4494 // header packets, can always be encoded with a 2-byte varint.
4495 if pkt_type != Type::Short {
4496 overhead += PAYLOAD_LENGTH_LEN;
4497 }
4498
4499 // Make sure we have enough space left for the packet overhead.
4500 match left.checked_sub(overhead) {
4501 Some(v) => left = v,
4502
4503 None => {
4504 // We can't send more because there isn't enough space available
4505 // in the output buffer.
4506 //
4507 // This usually happens when we try to send a new packet but
4508 // failed because cwnd is almost full. In such case app_limited
4509 // is set to false here to make cwnd grow when ACK is received.
4510 path.recovery.update_app_limited(false);
4511 return Err(Error::Done);
4512 },
4513 }
4514
4515 // Make sure there is enough space for the minimum payload length.
4516 if left < PAYLOAD_MIN_LEN {
4517 path.recovery.update_app_limited(false);
4518 return Err(Error::Done);
4519 }
4520
4521 let mut frames: SmallVec<[frame::Frame; 1]> = SmallVec::new();
4522
4523 let mut ack_eliciting = false;
4524 let mut in_flight = false;
4525 let mut is_pmtud_probe = false;
4526 let mut has_data = false;
4527
4528 // Whether or not we should explicitly elicit an ACK via PING frame if we
4529 // implicitly elicit one otherwise.
4530 let ack_elicit_required = path.recovery.should_elicit_ack(epoch);
4531
4532 let header_offset = b.off();
4533
4534 // Reserve space for payload length in advance. Since we don't yet know
4535 // what the final length will be, we reserve 2 bytes in all cases.
4536 //
4537 // Only long header packets have an explicit length field.
4538 if pkt_type != Type::Short {
4539 b.skip(PAYLOAD_LENGTH_LEN)?;
4540 }
4541
4542 packet::encode_pkt_num(pn, pn_len, &mut b)?;
4543
4544 let payload_offset = b.off();
4545
4546 let cwnd_available =
4547 path.recovery.cwnd_available().saturating_sub(overhead);
4548
4549 let left_before_packing_ack_frame = left;
4550
4551 // Create ACK frame.
4552 //
4553 // When we need to explicitly elicit an ACK via PING later, go ahead and
4554 // generate an ACK (if there's anything to ACK) since we're going to
4555 // send a packet with PING anyways, even if we haven't received anything
4556 // ACK eliciting.
4557 if pkt_space.recv_pkt_need_ack.len() > 0 &&
4558 (pkt_space.ack_elicited || ack_elicit_required) &&
4559 (!is_closing ||
4560 (pkt_type == Type::Handshake &&
4561 self.local_error
4562 .as_ref()
4563 .is_some_and(|le| le.is_app))) &&
4564 path.active()
4565 {
4566 #[cfg(not(feature = "fuzzing"))]
4567 let ack_delay = pkt_space.largest_rx_pkt_time.elapsed();
4568
4569 #[cfg(not(feature = "fuzzing"))]
4570 let ack_delay = ack_delay.as_micros() as u64 /
4571 2_u64
4572 .pow(self.local_transport_params.ack_delay_exponent as u32);
4573
4574 // pseudo-random reproducible ack delays when fuzzing
4575 #[cfg(feature = "fuzzing")]
4576 let ack_delay = rand::rand_u8() as u64 + 1;
4577
4578 let frame = frame::Frame::ACK {
4579 ack_delay,
4580 ranges: pkt_space.recv_pkt_need_ack.clone(),
4581 ecn_counts: None, // sending ECN is not supported at this time
4582 };
4583
4584 // When a PING frame needs to be sent, avoid sending the ACK if
4585 // there is not enough cwnd available for both (note that PING
4586 // frames are always 1 byte, so we just need to check that the
4587 // ACK's length is lower than cwnd).
4588 if pkt_space.ack_elicited || frame.wire_len() < cwnd_available {
4589 // ACK-only packets are not congestion controlled so ACKs must
4590 // be bundled considering the buffer capacity only, and not the
4591 // available cwnd.
4592 if push_frame_to_pkt!(b, frames, frame, left) {
4593 pkt_space.ack_elicited = false;
4594 }
4595 }
4596 }
4597
4598 // Limit output packet size by congestion window size.
4599 left = cmp::min(
4600 left,
4601 // Bytes consumed by ACK frames.
4602 cwnd_available.saturating_sub(left_before_packing_ack_frame - left),
4603 );
4604
4605 let mut challenge_data = None;
4606
4607 if pkt_type == Type::Short {
4608 // Create PMTUD probe.
4609 //
4610 // In order to send a PMTUD probe the current `left` value, which was
4611 // already limited by the current PMTU measure, needs to be ignored,
4612 // but the outgoing packet still needs to be limited by
4613 // the output buffer size, as well as the congestion
4614 // window.
4615 //
4616 // In addition, the PMTUD probe is only generated when the handshake
4617 // is confirmed, to avoid interfering with the handshake
4618 // (e.g. due to the anti-amplification limits).
4619 if let Ok(active_path) = self.paths.get_active_mut() {
4620 let should_probe_pmtu = active_path.should_send_pmtu_probe(
4621 self.handshake_confirmed,
4622 self.handshake_completed,
4623 out_len,
4624 is_closing,
4625 frames.is_empty(),
4626 );
4627
4628 if should_probe_pmtu {
4629 if let Some(pmtud) = active_path.pmtud.as_mut() {
4630 let probe_size = pmtud.get_probe_size();
4631 trace!(
4632 "{} sending pmtud probe pmtu_probe={} estimated_pmtu={}",
4633 self.trace_id,
4634 probe_size,
4635 pmtud.get_current_mtu(),
4636 );
4637
4638 left = probe_size;
4639
4640 match left.checked_sub(overhead) {
4641 Some(v) => left = v,
4642
4643 None => {
4644 // We can't send more because there isn't enough
4645 // space available in the output buffer.
4646 //
4647 // This usually happens when we try to send a new
4648 // packet but failed because cwnd is almost full.
4649 //
4650 // In such case app_limited is set to false here
4651 // to make cwnd grow when ACK is received.
4652 active_path.recovery.update_app_limited(false);
4653 return Err(Error::Done);
4654 },
4655 }
4656
4657 let frame = frame::Frame::Padding {
4658 len: probe_size - overhead - 1,
4659 };
4660
4661 if push_frame_to_pkt!(b, frames, frame, left) {
4662 let frame = frame::Frame::Ping {
4663 mtu_probe: Some(probe_size),
4664 };
4665
4666 if push_frame_to_pkt!(b, frames, frame, left) {
4667 ack_eliciting = true;
4668 in_flight = true;
4669 }
4670 }
4671
4672 // Reset probe flag after sending to prevent duplicate
4673 // probes in a single flight.
4674 pmtud.set_in_flight(true);
4675 is_pmtud_probe = true;
4676 }
4677 }
4678 }
4679
4680 let path = self.paths.get_mut(send_pid)?;
4681 // Create PATH_RESPONSE frame if needed.
4682 // We do not try to ensure that these are really sent.
4683 while let Some(challenge) = path.pop_received_challenge() {
4684 let frame = frame::Frame::PathResponse { data: challenge };
4685
4686 if push_frame_to_pkt!(b, frames, frame, left) {
4687 ack_eliciting = true;
4688 in_flight = true;
4689 } else {
4690 // If there are other pending PATH_RESPONSE, don't lose them
4691 // now.
4692 break;
4693 }
4694 }
4695
4696 // Create PATH_CHALLENGE frame if needed.
4697 if path.validation_requested() {
4698 // TODO: ensure that data is unique over paths.
4699 let data = rand::rand_u64().to_be_bytes();
4700
4701 let frame = frame::Frame::PathChallenge { data };
4702
4703 if push_frame_to_pkt!(b, frames, frame, left) {
4704 // Let's notify the path once we know the packet size.
4705 challenge_data = Some(data);
4706
4707 ack_eliciting = true;
4708 in_flight = true;
4709 }
4710 }
4711
4712 if let Some(key_update) = crypto_ctx.key_update.as_mut() {
4713 key_update.update_acked = true;
4714 }
4715 }
4716
4717 let path = self.paths.get_mut(send_pid)?;
4718
4719 if pkt_type == Type::Short && !is_closing {
4720 // Create NEW_CONNECTION_ID frames as needed.
4721 while let Some(seq_num) = self.ids.next_advertise_new_scid_seq() {
4722 let frame = self.ids.get_new_connection_id_frame_for(seq_num)?;
4723
4724 if push_frame_to_pkt!(b, frames, frame, left) {
4725 self.ids.mark_advertise_new_scid_seq(seq_num, false);
4726
4727 ack_eliciting = true;
4728 in_flight = true;
4729 } else {
4730 break;
4731 }
4732 }
4733 }
4734
4735 if pkt_type == Type::Short && !is_closing && path.active() {
4736 // Create HANDSHAKE_DONE frame.
4737 // self.should_send_handshake_done() but without the need to borrow
4738 if self.handshake_completed &&
4739 !self.handshake_done_sent &&
4740 self.is_server
4741 {
4742 let frame = frame::Frame::HandshakeDone;
4743
4744 if push_frame_to_pkt!(b, frames, frame, left) {
4745 self.handshake_done_sent = true;
4746
4747 ack_eliciting = true;
4748 in_flight = true;
4749 }
4750 }
4751
4752 // Create MAX_STREAMS_BIDI frame.
4753 if self.streams.should_update_max_streams_bidi() ||
4754 self.should_send_max_streams_bidi
4755 {
4756 let frame = frame::Frame::MaxStreamsBidi {
4757 max: self.streams.max_streams_bidi_next(),
4758 };
4759
4760 if push_frame_to_pkt!(b, frames, frame, left) {
4761 self.streams.update_max_streams_bidi();
4762 self.should_send_max_streams_bidi = false;
4763
4764 ack_eliciting = true;
4765 in_flight = true;
4766 }
4767 }
4768
4769 // Create MAX_STREAMS_UNI frame.
4770 if self.streams.should_update_max_streams_uni() ||
4771 self.should_send_max_streams_uni
4772 {
4773 let frame = frame::Frame::MaxStreamsUni {
4774 max: self.streams.max_streams_uni_next(),
4775 };
4776
4777 if push_frame_to_pkt!(b, frames, frame, left) {
4778 self.streams.update_max_streams_uni();
4779 self.should_send_max_streams_uni = false;
4780
4781 ack_eliciting = true;
4782 in_flight = true;
4783 }
4784 }
4785
4786 // Create DATA_BLOCKED frame.
4787 if let Some(limit) = self.blocked_limit {
4788 let frame = frame::Frame::DataBlocked { limit };
4789
4790 if push_frame_to_pkt!(b, frames, frame, left) {
4791 self.blocked_limit = None;
4792 self.data_blocked_sent_count =
4793 self.data_blocked_sent_count.saturating_add(1);
4794
4795 ack_eliciting = true;
4796 in_flight = true;
4797 }
4798 }
4799
4800 // Create STREAMS_BLOCKED (bidi) frame when the local endpoint has
4801 // exhausted the peer's bidirectional stream count limit.
4802 if self
4803 .streams_blocked_bidi_state
4804 .has_pending_stream_blocked_frame()
4805 {
4806 if let Some(limit) = self.streams_blocked_bidi_state.blocked_at {
4807 let frame = frame::Frame::StreamsBlockedBidi { limit };
4808
4809 if push_frame_to_pkt!(b, frames, frame, left) {
4810 // Record the limit we just notified the peer about so
4811 // that redundant frames for the same limit are
4812 // suppressed.
4813 self.streams_blocked_bidi_state.blocked_sent =
4814 Some(limit);
4815
4816 ack_eliciting = true;
4817 in_flight = true;
4818 }
4819 }
4820 }
4821
4822 // Create STREAMS_BLOCKED (uni) frame when the local endpoint has
4823 // exhausted the peer's unidirectional stream count limit.
4824 if self
4825 .streams_blocked_uni_state
4826 .has_pending_stream_blocked_frame()
4827 {
4828 if let Some(limit) = self.streams_blocked_uni_state.blocked_at {
4829 let frame = frame::Frame::StreamsBlockedUni { limit };
4830
4831 if push_frame_to_pkt!(b, frames, frame, left) {
4832 // Record the limit we just notified the peer about so
4833 // that redundant frames for the same limit are
4834 // suppressed.
4835 self.streams_blocked_uni_state.blocked_sent = Some(limit);
4836
4837 ack_eliciting = true;
4838 in_flight = true;
4839 }
4840 }
4841 }
4842
4843 // Create MAX_STREAM_DATA frames as needed.
4844 for stream_id in self.streams.almost_full() {
4845 let stream = match self.streams.get_mut(stream_id) {
4846 Some(v) => v,
4847
4848 None => {
4849 // The stream doesn't exist anymore, so remove it from
4850 // the almost full set.
4851 self.streams.remove_almost_full(stream_id);
4852 continue;
4853 },
4854 };
4855
4856 // Autotune the stream window size, but only if this is not a
4857 // retransmission (on a retransmit the stream will be in
4858 // `self.streams.almost_full()` but it's `almost_full()`
4859 // method returns false.
4860 if stream.recv.almost_full() {
4861 stream.recv.autotune_window(now, path.recovery.rtt());
4862 }
4863
4864 let frame = frame::Frame::MaxStreamData {
4865 stream_id,
4866 max: stream.recv.max_data_next(),
4867 };
4868
4869 if push_frame_to_pkt!(b, frames, frame, left) {
4870 let recv_win = stream.recv.window();
4871
4872 stream.recv.update_max_data(now);
4873
4874 self.streams.remove_almost_full(stream_id);
4875
4876 ack_eliciting = true;
4877 in_flight = true;
4878
4879 // Make sure the connection window always has some
4880 // room compared to the stream window.
4881 flow_control.ensure_window_lower_bound(
4882 (recv_win as f64 * CONNECTION_WINDOW_FACTOR) as u64,
4883 );
4884 }
4885 }
4886
4887 // Create MAX_DATA frame as needed.
4888 if flow_control.should_update_max_data() &&
4889 flow_control.max_data() < flow_control.max_data_next()
4890 {
4891 // Autotune the connection window size. We only tune the window
4892 // if we are sending an "organic" update, not on retransmits.
4893 flow_control.autotune_window(now, path.recovery.rtt());
4894 self.should_send_max_data = true;
4895 }
4896
4897 if self.should_send_max_data {
4898 let frame = frame::Frame::MaxData {
4899 max: flow_control.max_data_next(),
4900 };
4901
4902 if push_frame_to_pkt!(b, frames, frame, left) {
4903 self.should_send_max_data = false;
4904
4905 // Commits the new max_rx_data limit.
4906 flow_control.update_max_data(now);
4907
4908 ack_eliciting = true;
4909 in_flight = true;
4910 }
4911 }
4912
4913 // Create STOP_SENDING frames as needed.
4914 for (stream_id, error_code) in self
4915 .streams
4916 .stopped()
4917 .map(|(&k, &v)| (k, v))
4918 .collect::<Vec<(u64, u64)>>()
4919 {
4920 let frame = frame::Frame::StopSending {
4921 stream_id,
4922 error_code,
4923 };
4924
4925 if push_frame_to_pkt!(b, frames, frame, left) {
4926 self.streams.remove_stopped(stream_id);
4927
4928 ack_eliciting = true;
4929 in_flight = true;
4930 }
4931 }
4932
4933 // Create RESET_STREAM frames as needed.
4934 for (stream_id, (error_code, final_size)) in self
4935 .streams
4936 .reset()
4937 .map(|(&k, &v)| (k, v))
4938 .collect::<Vec<(u64, (u64, u64))>>()
4939 {
4940 let frame = frame::Frame::ResetStream {
4941 stream_id,
4942 error_code,
4943 final_size,
4944 };
4945
4946 if push_frame_to_pkt!(b, frames, frame, left) {
4947 self.streams.remove_reset(stream_id);
4948
4949 ack_eliciting = true;
4950 in_flight = true;
4951 }
4952 }
4953
4954 // Create STREAM_DATA_BLOCKED frames as needed.
4955 for (stream_id, limit) in self
4956 .streams
4957 .blocked()
4958 .map(|(&k, &v)| (k, v))
4959 .collect::<Vec<(u64, u64)>>()
4960 {
4961 let frame = frame::Frame::StreamDataBlocked { stream_id, limit };
4962
4963 if push_frame_to_pkt!(b, frames, frame, left) {
4964 self.streams.remove_blocked(stream_id);
4965 self.stream_data_blocked_sent_count =
4966 self.stream_data_blocked_sent_count.saturating_add(1);
4967
4968 ack_eliciting = true;
4969 in_flight = true;
4970 }
4971 }
4972
4973 // Create RETIRE_CONNECTION_ID frames as needed.
4974 let retire_dcid_seqs = self.ids.retire_dcid_seqs();
4975
4976 for seq_num in retire_dcid_seqs {
4977 // The sequence number specified in a RETIRE_CONNECTION_ID frame
4978 // MUST NOT refer to the Destination Connection ID field of the
4979 // packet in which the frame is contained.
4980 let dcid_seq = path.active_dcid_seq.ok_or(Error::InvalidState)?;
4981
4982 if seq_num == dcid_seq {
4983 continue;
4984 }
4985
4986 let frame = frame::Frame::RetireConnectionId { seq_num };
4987
4988 if push_frame_to_pkt!(b, frames, frame, left) {
4989 self.ids.mark_retire_dcid_seq(seq_num, false)?;
4990
4991 ack_eliciting = true;
4992 in_flight = true;
4993 } else {
4994 break;
4995 }
4996 }
4997 }
4998
4999 // Create CONNECTION_CLOSE frame. Try to send this only on the active
5000 // path, unless it is the last one available.
5001 if path.active() || n_paths == 1 {
5002 if let Some(conn_err) = self.local_error.as_ref() {
5003 if conn_err.is_app {
5004 // Create ApplicationClose frame.
5005 if pkt_type == Type::Short {
5006 let frame = frame::Frame::ApplicationClose {
5007 error_code: conn_err.error_code,
5008 reason: conn_err.reason.clone(),
5009 };
5010
5011 if push_frame_to_pkt!(b, frames, frame, left) {
5012 let pto = path.recovery.pto();
5013 self.draining_timer = Some(now + (pto * 3));
5014
5015 ack_eliciting = true;
5016 in_flight = true;
5017 }
5018 }
5019 } else {
5020 // Create ConnectionClose frame.
5021 let frame = frame::Frame::ConnectionClose {
5022 error_code: conn_err.error_code,
5023 frame_type: 0,
5024 reason: conn_err.reason.clone(),
5025 };
5026
5027 if push_frame_to_pkt!(b, frames, frame, left) {
5028 let pto = path.recovery.pto();
5029 self.draining_timer = Some(now + (pto * 3));
5030
5031 ack_eliciting = true;
5032 in_flight = true;
5033 }
5034 }
5035 }
5036 }
5037
5038 // Create CRYPTO frame.
5039 if crypto_ctx.crypto_stream.is_flushable() &&
5040 left > frame::MAX_CRYPTO_OVERHEAD &&
5041 !is_closing &&
5042 path.active()
5043 {
5044 let crypto_off = crypto_ctx.crypto_stream.send.off_front();
5045
5046 // Encode the frame.
5047 //
5048 // Instead of creating a `frame::Frame` object, encode the frame
5049 // directly into the packet buffer.
5050 //
5051 // First we reserve some space in the output buffer for writing the
5052 // frame header (we assume the length field is always a 2-byte
5053 // varint as we don't know the value yet).
5054 //
5055 // Then we emit the data from the crypto stream's send buffer.
5056 //
5057 // Finally we go back and encode the frame header with the now
5058 // available information.
5059 let hdr_off = b.off();
5060 let hdr_len = 1 + // frame type
5061 octets::varint_len(crypto_off) + // offset
5062 2; // length, always encode as 2-byte varint
5063
5064 if let Some(max_len) = left.checked_sub(hdr_len) {
5065 let (mut crypto_hdr, mut crypto_payload) =
5066 b.split_at(hdr_off + hdr_len)?;
5067
5068 // Write stream data into the packet buffer.
5069 let (len, _) = crypto_ctx
5070 .crypto_stream
5071 .send
5072 .emit(&mut crypto_payload.as_mut()[..max_len])?;
5073
5074 // Encode the frame's header.
5075 //
5076 // Due to how `OctetsMut::split_at()` works, `crypto_hdr` starts
5077 // from the initial offset of `b` (rather than the current
5078 // offset), so it needs to be advanced to the
5079 // initial frame offset.
5080 crypto_hdr.skip(hdr_off)?;
5081
5082 frame::encode_crypto_header(
5083 crypto_off,
5084 len as u64,
5085 &mut crypto_hdr,
5086 )?;
5087
5088 // Advance the packet buffer's offset.
5089 b.skip(hdr_len + len)?;
5090
5091 let frame = frame::Frame::CryptoHeader {
5092 offset: crypto_off,
5093 length: len,
5094 };
5095
5096 if push_frame_to_pkt!(b, frames, frame, left) {
5097 ack_eliciting = true;
5098 in_flight = true;
5099 has_data = true;
5100 }
5101 }
5102 }
5103
5104 // The preference of data-bearing frame to include in a packet
5105 // is managed by `self.emit_dgram`. However, whether any frames
5106 // can be sent depends on the state of their buffers. In the case
5107 // where one type is preferred but its buffer is empty, fall back
5108 // to the other type in order not to waste this function call.
5109 let mut dgram_emitted = false;
5110 let dgrams_to_emit = max_dgram_len.is_some();
5111 let stream_to_emit = self.streams.has_flushable();
5112
5113 let mut do_dgram = self.emit_dgram && dgrams_to_emit;
5114 let do_stream = !self.emit_dgram && stream_to_emit;
5115
5116 if !do_stream && dgrams_to_emit {
5117 do_dgram = true;
5118 }
5119
5120 // Create DATAGRAM frame.
5121 if (pkt_type == Type::Short || pkt_type == Type::ZeroRTT) &&
5122 left > frame::MAX_DGRAM_OVERHEAD &&
5123 !is_closing &&
5124 path.active() &&
5125 do_dgram
5126 {
5127 if let Some(max_dgram_payload) = max_dgram_len {
5128 while let Some(len) = self.dgram_send_queue.peek_front_len() {
5129 let hdr_off = b.off();
5130 let hdr_len = 1 + // frame type
5131 2; // length, always encode as 2-byte varint
5132
5133 if (hdr_len + len) <= left {
5134 // Front of the queue fits this packet, send it.
5135 match self.dgram_send_queue.pop() {
5136 Some(data) => {
5137 // Encode the frame.
5138 //
5139 // Instead of creating a `frame::Frame` object,
5140 // encode the frame directly into the packet
5141 // buffer.
5142 //
5143 // First we reserve some space in the output
5144 // buffer for writing the frame header (we
5145 // assume the length field is always a 2-byte
5146 // varint as we don't know the value yet).
5147 //
5148 // Then we emit the data from the DATAGRAM's
5149 // buffer.
5150 //
5151 // Finally we go back and encode the frame
5152 // header with the now available information.
5153 let (mut dgram_hdr, mut dgram_payload) =
5154 b.split_at(hdr_off + hdr_len)?;
5155
5156 dgram_payload.as_mut()[..len]
5157 .copy_from_slice(data.as_ref());
5158
5159 // Encode the frame's header.
5160 //
5161 // Due to how `OctetsMut::split_at()` works,
5162 // `dgram_hdr` starts from the initial offset
5163 // of `b` (rather than the current offset), so
5164 // it needs to be advanced to the initial frame
5165 // offset.
5166 dgram_hdr.skip(hdr_off)?;
5167
5168 frame::encode_dgram_header(
5169 len as u64,
5170 &mut dgram_hdr,
5171 )?;
5172
5173 // Advance the packet buffer's offset.
5174 b.skip(hdr_len + len)?;
5175
5176 let frame =
5177 frame::Frame::DatagramHeader { length: len };
5178
5179 if push_frame_to_pkt!(b, frames, frame, left) {
5180 ack_eliciting = true;
5181 in_flight = true;
5182 dgram_emitted = true;
5183 self.dgram_sent_count =
5184 self.dgram_sent_count.saturating_add(1);
5185 path.dgram_sent_count =
5186 path.dgram_sent_count.saturating_add(1);
5187 }
5188 },
5189
5190 None => continue,
5191 };
5192 } else if len > max_dgram_payload {
5193 // This dgram frame will never fit. Let's purge it.
5194 self.dgram_send_queue.pop();
5195 } else {
5196 break;
5197 }
5198 }
5199 }
5200 }
5201
5202 // Create a single STREAM frame for the first stream that is flushable.
5203 if (pkt_type == Type::Short || pkt_type == Type::ZeroRTT) &&
5204 left > frame::MAX_STREAM_OVERHEAD &&
5205 !is_closing &&
5206 path.active() &&
5207 !dgram_emitted
5208 {
5209 while let Some(priority_key) = self.streams.peek_flushable() {
5210 let stream_id = priority_key.id;
5211 let stream = match self.streams.get_mut(stream_id) {
5212 // Avoid sending frames for streams that were already stopped.
5213 //
5214 // This might happen if stream data was buffered but not yet
5215 // flushed on the wire when a STOP_SENDING frame is received.
5216 Some(v) if !v.send.is_stopped() => v,
5217 _ => {
5218 self.streams.remove_flushable(&priority_key);
5219 continue;
5220 },
5221 };
5222
5223 let stream_off = stream.send.off_front();
5224
5225 // Encode the frame.
5226 //
5227 // Instead of creating a `frame::Frame` object, encode the frame
5228 // directly into the packet buffer.
5229 //
5230 // First we reserve some space in the output buffer for writing
5231 // the frame header (we assume the length field is always a
5232 // 2-byte varint as we don't know the value yet).
5233 //
5234 // Then we emit the data from the stream's send buffer.
5235 //
5236 // Finally we go back and encode the frame header with the now
5237 // available information.
5238 let hdr_off = b.off();
5239 let hdr_len = 1 + // frame type
5240 octets::varint_len(stream_id) + // stream_id
5241 octets::varint_len(stream_off) + // offset
5242 2; // length, always encode as 2-byte varint
5243
5244 let max_len = match left.checked_sub(hdr_len) {
5245 Some(v) => v,
5246 None => {
5247 let priority_key = Arc::clone(&stream.priority_key);
5248 self.streams.remove_flushable(&priority_key);
5249
5250 continue;
5251 },
5252 };
5253
5254 let (mut stream_hdr, mut stream_payload) =
5255 b.split_at(hdr_off + hdr_len)?;
5256
5257 // Write stream data into the packet buffer.
5258 let (len, fin) =
5259 stream.send.emit(&mut stream_payload.as_mut()[..max_len])?;
5260
5261 // Encode the frame's header.
5262 //
5263 // Due to how `OctetsMut::split_at()` works, `stream_hdr` starts
5264 // from the initial offset of `b` (rather than the current
5265 // offset), so it needs to be advanced to the initial frame
5266 // offset.
5267 stream_hdr.skip(hdr_off)?;
5268
5269 frame::encode_stream_header(
5270 stream_id,
5271 stream_off,
5272 len as u64,
5273 fin,
5274 &mut stream_hdr,
5275 )?;
5276
5277 // Advance the packet buffer's offset.
5278 b.skip(hdr_len + len)?;
5279
5280 let frame = frame::Frame::StreamHeader {
5281 stream_id,
5282 offset: stream_off,
5283 length: len,
5284 fin,
5285 };
5286
5287 if push_frame_to_pkt!(b, frames, frame, left) {
5288 ack_eliciting = true;
5289 in_flight = true;
5290 has_data = true;
5291 }
5292
5293 let priority_key = Arc::clone(&stream.priority_key);
5294 // If the stream is no longer flushable, remove it from the queue
5295 if !stream.is_flushable() {
5296 self.streams.remove_flushable(&priority_key);
5297 } else if stream.incremental {
5298 // Shuffle the incremental stream to the back of the
5299 // queue.
5300 self.streams.remove_flushable(&priority_key);
5301 self.streams.insert_flushable(&priority_key);
5302 }
5303
5304 #[cfg(feature = "fuzzing")]
5305 // Coalesce STREAM frames when fuzzing.
5306 if left > frame::MAX_STREAM_OVERHEAD {
5307 continue;
5308 }
5309
5310 break;
5311 }
5312 }
5313
5314 // Alternate trying to send DATAGRAMs next time.
5315 self.emit_dgram = !dgram_emitted;
5316
5317 // If no other ack-eliciting frame is sent, include a PING frame
5318 // - if PTO probe needed; OR
5319 // - if we've sent too many non ack-eliciting packets without having
5320 // sent an ACK eliciting one; OR
5321 // - the application requested an ack-eliciting frame be sent.
5322 if (ack_elicit_required || path.needs_ack_eliciting) &&
5323 !ack_eliciting &&
5324 left >= 1 &&
5325 !is_closing
5326 {
5327 let frame = frame::Frame::Ping { mtu_probe: None };
5328
5329 if push_frame_to_pkt!(b, frames, frame, left) {
5330 ack_eliciting = true;
5331 in_flight = true;
5332 }
5333 }
5334
5335 if ack_eliciting && !is_pmtud_probe {
5336 path.needs_ack_eliciting = false;
5337 path.recovery.ping_sent(epoch);
5338 }
5339
5340 if !has_data &&
5341 !dgram_emitted &&
5342 cwnd_available > frame::MAX_STREAM_OVERHEAD
5343 {
5344 path.recovery.on_app_limited();
5345 }
5346
5347 if frames.is_empty() {
5348 // When we reach this point we are not able to write more, so set
5349 // app_limited to false.
5350 path.recovery.update_app_limited(false);
5351 return Err(Error::Done);
5352 }
5353
5354 // When coalescing a 1-RTT packet, we can't add padding in the UDP
5355 // datagram, so use PADDING frames instead.
5356 //
5357 // This is only needed if
5358 // 1) an Initial packet has already been written to the UDP datagram,
5359 // as Initial always requires padding.
5360 //
5361 // 2) this is a probing packet towards an unvalidated peer address.
5362 if (has_initial || !path.validated()) &&
5363 pkt_type == Type::Short &&
5364 left >= 1
5365 {
5366 let frame = frame::Frame::Padding { len: left };
5367
5368 if push_frame_to_pkt!(b, frames, frame, left) {
5369 in_flight = true;
5370 }
5371 }
5372
5373 // Pad payload so that it's always at least 4 bytes.
5374 if b.off() - payload_offset < PAYLOAD_MIN_LEN {
5375 let payload_len = b.off() - payload_offset;
5376
5377 let frame = frame::Frame::Padding {
5378 len: PAYLOAD_MIN_LEN - payload_len,
5379 };
5380
5381 #[allow(unused_assignments)]
5382 if push_frame_to_pkt!(b, frames, frame, left) {
5383 in_flight = true;
5384 }
5385 }
5386
5387 let payload_len = b.off() - payload_offset;
5388
5389 // Fill in payload length.
5390 if pkt_type != Type::Short {
5391 let len = pn_len + payload_len + crypto_overhead;
5392
5393 let (_, mut payload_with_len) = b.split_at(header_offset)?;
5394 payload_with_len
5395 .put_varint_with_len(len as u64, PAYLOAD_LENGTH_LEN)?;
5396 }
5397
5398 trace!(
5399 "{} tx pkt {} len={} pn={} {}",
5400 self.trace_id,
5401 hdr_trace.unwrap_or_default(),
5402 payload_len,
5403 pn,
5404 AddrTupleFmt(path.local_addr(), path.peer_addr())
5405 );
5406
5407 #[cfg(feature = "qlog")]
5408 let mut qlog_frames: Vec<qlog::events::quic::QuicFrame> =
5409 Vec::with_capacity(frames.len());
5410
5411 for frame in &mut frames {
5412 trace!("{} tx frm {:?}", self.trace_id, frame);
5413
5414 qlog_with_type!(QLOG_PACKET_TX, self.qlog, _q, {
5415 qlog_frames.push(frame.to_qlog());
5416 });
5417 }
5418
5419 qlog_with_type!(QLOG_PACKET_TX, self.qlog, q, {
5420 if let Some(header) = qlog_pkt_hdr {
5421 // Qlog packet raw info described at
5422 // https://datatracker.ietf.org/doc/html/draft-ietf-quic-qlog-main-schema-00#section-5.1
5423 //
5424 // `length` includes packet headers and trailers (AEAD tag).
5425 let length = payload_len + payload_offset + crypto_overhead;
5426 let qlog_raw_info = RawInfo {
5427 length: Some(length as u64),
5428 payload_length: Some(payload_len as u64),
5429 data: None,
5430 };
5431
5432 let send_at_time =
5433 now.duration_since(q.start_time()).as_secs_f64() * 1000.0;
5434
5435 let ev_data =
5436 EventData::QuicPacketSent(qlog::events::quic::PacketSent {
5437 header,
5438 frames: Some(qlog_frames),
5439 raw: Some(qlog_raw_info),
5440 send_at_time: Some(send_at_time),
5441 ..Default::default()
5442 });
5443
5444 q.add_event_data_with_instant(ev_data, now).ok();
5445 }
5446 });
5447
5448 let aead = match crypto_ctx.crypto_seal {
5449 Some(ref mut v) => v,
5450 None => return Err(Error::InvalidState),
5451 };
5452
5453 let written = packet::encrypt_pkt(
5454 &mut b,
5455 pn,
5456 pn_len,
5457 payload_len,
5458 payload_offset,
5459 None,
5460 aead,
5461 )?;
5462
5463 let sent_pkt_has_data = if path.recovery.gcongestion_enabled() {
5464 has_data || dgram_emitted
5465 } else {
5466 has_data
5467 };
5468
5469 let sent_pkt = recovery::Sent {
5470 pkt_num: pn,
5471 frames,
5472 time_sent: now,
5473 time_acked: None,
5474 time_lost: None,
5475 size: if ack_eliciting { written } else { 0 },
5476 ack_eliciting,
5477 in_flight,
5478 delivered: 0,
5479 delivered_time: now,
5480 first_sent_time: now,
5481 is_app_limited: false,
5482 tx_in_flight: 0,
5483 lost: 0,
5484 has_data: sent_pkt_has_data,
5485 is_pmtud_probe,
5486 };
5487
5488 if in_flight && is_app_limited {
5489 path.recovery.delivery_rate_update_app_limited(true);
5490 }
5491
5492 self.next_pkt_num += 1;
5493
5494 let handshake_status = recovery::HandshakeStatus {
5495 has_handshake_keys: self.crypto_ctx[packet::Epoch::Handshake]
5496 .has_keys(),
5497 peer_verified_address: self.peer_verified_initial_address,
5498 completed: self.handshake_completed,
5499 };
5500
5501 self.on_packet_sent(send_pid, sent_pkt, epoch, handshake_status, now)?;
5502
5503 let path = self.paths.get_mut(send_pid)?;
5504 qlog_with_type!(QLOG_METRICS, self.qlog, q, {
5505 path.recovery.maybe_qlog(q, now);
5506 });
5507
5508 // Record sent packet size if we probe the path.
5509 if let Some(data) = challenge_data {
5510 path.add_challenge_sent(data, written, now);
5511 }
5512
5513 self.sent_count += 1;
5514 self.sent_bytes += written as u64;
5515 path.sent_count += 1;
5516 path.sent_bytes += written as u64;
5517
5518 if self.dgram_send_queue.byte_size() > path.recovery.cwnd_available() {
5519 path.recovery.update_app_limited(false);
5520 }
5521
5522 let had_send_budget = path.max_send_bytes > 0;
5523 path.max_send_bytes = path.max_send_bytes.saturating_sub(written);
5524 if self.is_server &&
5525 !path.verified_peer_address &&
5526 had_send_budget &&
5527 path.max_send_bytes == 0
5528 {
5529 self.amplification_limited_count =
5530 self.amplification_limited_count.saturating_add(1);
5531 }
5532
5533 // On the client, drop initial state after sending an Handshake packet.
5534 if !self.is_server && hdr_ty == Type::Handshake {
5535 self.drop_epoch_state(packet::Epoch::Initial, now);
5536 }
5537
5538 // (Re)start the idle timer if we are sending the first ack-eliciting
5539 // packet since last receiving a packet.
5540 if ack_eliciting && !self.ack_eliciting_sent {
5541 if let Some(idle_timeout) = self.idle_timeout() {
5542 self.idle_timer = Some(now + idle_timeout);
5543 }
5544 }
5545
5546 if ack_eliciting {
5547 self.ack_eliciting_sent = true;
5548 }
5549
5550 Ok((pkt_type, written))
5551 }
5552
5553 fn on_packet_sent(
5554 &mut self, send_pid: usize, sent_pkt: recovery::Sent,
5555 epoch: packet::Epoch, handshake_status: recovery::HandshakeStatus,
5556 now: Instant,
5557 ) -> Result<()> {
5558 let path = self.paths.get_mut(send_pid)?;
5559
5560 // It's fine to set the skip counter based on a non-active path's values.
5561 let cwnd = path.recovery.cwnd();
5562 let max_datagram_size = path.recovery.max_datagram_size();
5563 self.pkt_num_spaces[epoch].on_packet_sent(&sent_pkt);
5564 self.pkt_num_manager.on_packet_sent(
5565 cwnd,
5566 max_datagram_size,
5567 self.handshake_completed,
5568 );
5569
5570 path.recovery.on_packet_sent(
5571 sent_pkt,
5572 epoch,
5573 handshake_status,
5574 now,
5575 &self.trace_id,
5576 );
5577
5578 Ok(())
5579 }
5580
5581 /// Returns the desired send time for the next packet.
5582 #[inline]
5583 pub fn get_next_release_time(&self) -> Option<ReleaseDecision> {
5584 Some(
5585 self.paths
5586 .get_active()
5587 .ok()?
5588 .recovery
5589 .get_next_release_time(),
5590 )
5591 }
5592
5593 /// Returns whether gcongestion is enabled.
5594 #[inline]
5595 pub fn gcongestion_enabled(&self) -> Option<bool> {
5596 Some(self.paths.get_active().ok()?.recovery.gcongestion_enabled())
5597 }
5598
5599 /// Returns the maximum pacing into the future.
5600 ///
5601 /// Equals 1/8 of the smoothed RTT, but at least 1ms and not greater than
5602 /// 5ms.
5603 pub fn max_release_into_future(&self) -> Duration {
5604 self.paths
5605 .get_active()
5606 .map(|p| p.recovery.rtt().mul_f64(0.125))
5607 .unwrap_or(Duration::from_millis(1))
5608 .max(Duration::from_millis(1))
5609 .min(Duration::from_millis(5))
5610 }
5611
5612 /// Returns whether pacing is enabled.
5613 #[inline]
5614 pub fn pacing_enabled(&self) -> bool {
5615 self.recovery_config.pacing
5616 }
5617
5618 /// Returns the size of the send quantum, in bytes.
5619 ///
5620 /// This represents the maximum size of a packet burst as determined by the
5621 /// congestion control algorithm in use.
5622 ///
5623 /// Applications can, for example, use it in conjunction with segmentation
5624 /// offloading mechanisms as the maximum limit for outgoing aggregates of
5625 /// multiple packets.
5626 #[inline]
5627 pub fn send_quantum(&self) -> usize {
5628 match self.paths.get_active() {
5629 Ok(p) => p.recovery.send_quantum(),
5630 _ => 0,
5631 }
5632 }
5633
5634 /// Returns the size of the send quantum over the given 4-tuple, in bytes.
5635 ///
5636 /// This represents the maximum size of a packet burst as determined by the
5637 /// congestion control algorithm in use.
5638 ///
5639 /// Applications can, for example, use it in conjunction with segmentation
5640 /// offloading mechanisms as the maximum limit for outgoing aggregates of
5641 /// multiple packets.
5642 ///
5643 /// If the (`local_addr`, peer_addr`) 4-tuple relates to a non-existing
5644 /// path, this method returns 0.
5645 pub fn send_quantum_on_path(
5646 &self, local_addr: SocketAddr, peer_addr: SocketAddr,
5647 ) -> usize {
5648 self.paths
5649 .path_id_from_addrs(&(local_addr, peer_addr))
5650 .and_then(|pid| self.paths.get(pid).ok())
5651 .map(|path| path.recovery.send_quantum())
5652 .unwrap_or(0)
5653 }
5654
5655 /// Reads contiguous data from a stream into the provided slice.
5656 ///
5657 /// The slice must be sized by the caller and will be populated up to its
5658 /// capacity.
5659 ///
5660 /// On success the amount of bytes read and a flag indicating the fin state
5661 /// is returned as a tuple, or [`Done`] if there is no data to read.
5662 ///
5663 /// Reading data from a stream may trigger queueing of control messages
5664 /// (e.g. MAX_STREAM_DATA). [`send()`] should be called afterwards.
5665 ///
5666 /// [`Done`]: enum.Error.html#variant.Done
5667 /// [`send()`]: struct.Connection.html#method.send
5668 ///
5669 /// ## Examples:
5670 ///
5671 /// ```no_run
5672 /// # let mut buf = [0; 512];
5673 /// # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
5674 /// # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
5675 /// # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
5676 /// # let peer = "127.0.0.1:1234".parse().unwrap();
5677 /// # let local = socket.local_addr().unwrap();
5678 /// # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
5679 /// # let stream_id = 0;
5680 /// while let Ok((read, fin)) = conn.stream_recv(stream_id, &mut buf) {
5681 /// println!("Got {} bytes on stream {}", read, stream_id);
5682 /// }
5683 /// # Ok::<(), quiche::Error>(())
5684 /// ```
5685 #[inline]
5686 pub fn stream_recv(
5687 &mut self, stream_id: u64, out: &mut [u8],
5688 ) -> Result<(usize, bool)> {
5689 self.stream_recv_buf(stream_id, out)
5690 }
5691
5692 /// Reads contiguous data from a stream into the provided [`bytes::BufMut`].
5693 ///
5694 /// **NOTE**:
5695 /// The BufMut will be populated with all available data up to its capacity.
5696 /// Since some BufMut implementations, e.g., [`Vec<u8>`], dynamically
5697 /// allocate additional memory, the caller may use [`BufMut::limit()`]
5698 /// to limit the maximum amount of data that can be written.
5699 ///
5700 /// On success the amount of bytes read and a flag indicating the fin state
5701 /// is returned as a tuple, or [`Done`] if there is no data to read.
5702 /// [`BufMut::advance_mut()`] will have been called with the same number of
5703 /// total bytes.
5704 ///
5705 /// Reading data from a stream may trigger queueing of control messages
5706 /// (e.g. MAX_STREAM_DATA). [`send()`] should be called afterwards.
5707 ///
5708 /// [`BufMut::limit()`]: bytes::BufMut::limit
5709 /// [`BufMut::advance_mut()`]: bytes::BufMut::advance_mut
5710 /// [`Done`]: enum.Error.html#variant.Done
5711 /// [`send()`]: struct.Connection.html#method.send
5712 ///
5713 /// ## Examples:
5714 ///
5715 /// ```no_run
5716 /// # use bytes::BufMut as _;
5717 /// # let mut buf = Vec::new().limit(1024); // Read at most 1024 bytes
5718 /// # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
5719 /// # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
5720 /// # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
5721 /// # let peer = "127.0.0.1:1234".parse().unwrap();
5722 /// # let local = socket.local_addr().unwrap();
5723 /// # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
5724 /// # let stream_id = 0;
5725 /// # let mut total_read = 0;
5726 /// while let Ok((read, fin)) = conn.stream_recv_buf(stream_id, &mut buf) {
5727 /// println!("Got {} bytes on stream {}", read, stream_id);
5728 /// total_read += read;
5729 /// assert_eq!(buf.get_ref().len(), total_read);
5730 /// }
5731 /// # Ok::<(), quiche::Error>(())
5732 /// ```
5733 pub fn stream_recv_buf<B: bytes::BufMut>(
5734 &mut self, stream_id: u64, out: B,
5735 ) -> Result<(usize, bool)> {
5736 self.do_stream_recv(stream_id, RecvAction::Emit { out })
5737 }
5738
5739 /// Discard contiguous data from a stream without copying.
5740 ///
5741 /// On success the amount of bytes discarded and a flag indicating the fin
5742 /// state is returned as a tuple, or [`Done`] if there is no data to
5743 /// discard.
5744 ///
5745 /// Discarding data from a stream may trigger queueing of control messages
5746 /// (e.g. MAX_STREAM_DATA). [`send()`] should be called afterwards.
5747 ///
5748 /// [`Done`]: enum.Error.html#variant.Done
5749 /// [`send()`]: struct.Connection.html#method.send
5750 ///
5751 /// ## Examples:
5752 ///
5753 /// ```no_run
5754 /// # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
5755 /// # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
5756 /// # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
5757 /// # let peer = "127.0.0.1:1234".parse().unwrap();
5758 /// # let local = socket.local_addr().unwrap();
5759 /// # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
5760 /// # let stream_id = 0;
5761 /// while let Ok((read, fin)) = conn.stream_discard(stream_id, 1) {
5762 /// println!("Discarded {} byte(s) on stream {}", read, stream_id);
5763 /// }
5764 /// # Ok::<(), quiche::Error>(())
5765 /// ```
5766 pub fn stream_discard(
5767 &mut self, stream_id: u64, len: usize,
5768 ) -> Result<(usize, bool)> {
5769 // `do_stream_recv()` is generic on the kind of `BufMut` in RecvAction.
5770 // Since we are discarding, it doesn't matter, but the compiler still
5771 // wants to know, so we say `&mut [u8]`.
5772 self.do_stream_recv::<&mut [u8]>(stream_id, RecvAction::Discard { len })
5773 }
5774
5775 // Reads or discards contiguous data from a stream.
5776 //
5777 // Passing an `action` of `StreamRecvAction::Emit` results in a read into
5778 // the provided slice. It must be sized by the caller and will be populated
5779 // up to its capacity.
5780 //
5781 // Passing an `action` of `StreamRecvAction::Discard` results in discard up
5782 // to the indicated length.
5783 //
5784 // On success the amount of bytes read or discarded, and a flag indicating
5785 // the fin state, is returned as a tuple, or [`Done`] if there is no data to
5786 // read or discard.
5787 //
5788 // Reading or discarding data from a stream may trigger queueing of control
5789 // messages (e.g. MAX_STREAM_DATA). [`send()`] should be called afterwards.
5790 //
5791 // [`Done`]: enum.Error.html#variant.Done
5792 // [`send()`]: struct.Connection.html#method.send
5793 fn do_stream_recv<B: bytes::BufMut>(
5794 &mut self, stream_id: u64, action: RecvAction<B>,
5795 ) -> Result<(usize, bool)> {
5796 // We can't read on our own unidirectional streams.
5797 if !stream::is_bidi(stream_id) &&
5798 stream::is_local(stream_id, self.is_server)
5799 {
5800 return Err(Error::InvalidStreamState(stream_id));
5801 }
5802
5803 let stream = self
5804 .streams
5805 .get_mut(stream_id)
5806 .ok_or(Error::InvalidStreamState(stream_id))?;
5807
5808 if !stream.is_readable() {
5809 return Err(Error::Done);
5810 }
5811
5812 let local = stream.local;
5813 let priority_key = Arc::clone(&stream.priority_key);
5814
5815 #[cfg(feature = "qlog")]
5816 let offset = stream.recv.off_front();
5817
5818 #[cfg(feature = "qlog")]
5819 let to = match action {
5820 RecvAction::Emit { .. } => Some(DataRecipient::Application),
5821
5822 RecvAction::Discard { .. } => Some(DataRecipient::Dropped),
5823 };
5824
5825 let (read, fin) = match stream.recv.emit_or_discard(action) {
5826 Ok(v) => v,
5827
5828 Err(e) => {
5829 // Collect the stream if it is now complete. This can happen if
5830 // we got a `StreamReset` error which will now be propagated to
5831 // the application, so we don't need to keep the stream's state
5832 // anymore.
5833 if stream.is_complete() {
5834 self.streams.collect(stream_id, local);
5835 }
5836
5837 self.streams.remove_readable(&priority_key);
5838 return Err(e);
5839 },
5840 };
5841
5842 self.flow_control.add_consumed(read as u64);
5843
5844 let readable = stream.is_readable();
5845
5846 let complete = stream.is_complete();
5847
5848 if stream.recv.almost_full() {
5849 self.streams.insert_almost_full(stream_id);
5850 }
5851
5852 if !readable {
5853 self.streams.remove_readable(&priority_key);
5854 }
5855
5856 if complete {
5857 self.streams.collect(stream_id, local);
5858 }
5859
5860 qlog_with_type!(QLOG_DATA_MV, self.qlog, q, {
5861 let ev_data = EventData::QuicStreamDataMoved(
5862 qlog::events::quic::StreamDataMoved {
5863 stream_id: Some(stream_id),
5864 offset: Some(offset),
5865 raw: Some(RawInfo {
5866 length: Some(read as u64),
5867 ..Default::default()
5868 }),
5869 from: Some(DataRecipient::Transport),
5870 to,
5871 additional_info: fin
5872 .then_some(DataMovedAdditionalInfo::FinSet),
5873 },
5874 );
5875
5876 let now = Instant::now();
5877 q.add_event_data_with_instant(ev_data, now).ok();
5878 });
5879
5880 if priority_key.incremental && readable {
5881 // Shuffle the incremental stream to the back of the queue.
5882 self.streams.remove_readable(&priority_key);
5883 self.streams.insert_readable(&priority_key);
5884 }
5885
5886 Ok((read, fin))
5887 }
5888
5889 /// Writes data to a stream.
5890 ///
5891 /// On success the number of bytes written is returned, or [`Done`] if no
5892 /// data was written (e.g. because the stream has no capacity).
5893 ///
5894 /// Applications can provide a 0-length buffer with the fin flag set to
5895 /// true. This will lead to a 0-length FIN STREAM frame being sent at the
5896 /// latest offset. The `Ok(0)` value is only returned when the application
5897 /// provided a 0-length buffer.
5898 ///
5899 /// In addition, if the peer has signalled that it doesn't want to receive
5900 /// any more data from this stream by sending the `STOP_SENDING` frame, the
5901 /// [`StreamStopped`] error will be returned instead of any data.
5902 ///
5903 /// Note that in order to avoid buffering an infinite amount of data in the
5904 /// stream's send buffer, streams are only allowed to buffer outgoing data
5905 /// up to the amount that the peer allows it to send (that is, up to the
5906 /// stream's outgoing flow control capacity).
5907 ///
5908 /// This means that the number of written bytes returned can be lower than
5909 /// the length of the input buffer when the stream doesn't have enough
5910 /// capacity for the operation to complete. The application should retry the
5911 /// operation once the stream is reported as writable again.
5912 ///
5913 /// Applications should call this method only after the handshake is
5914 /// completed (whenever [`is_established()`] returns `true`) or during
5915 /// early data if enabled (whenever [`is_in_early_data()`] returns `true`).
5916 ///
5917 /// [`Done`]: enum.Error.html#variant.Done
5918 /// [`StreamStopped`]: enum.Error.html#variant.StreamStopped
5919 /// [`is_established()`]: struct.Connection.html#method.is_established
5920 /// [`is_in_early_data()`]: struct.Connection.html#method.is_in_early_data
5921 ///
5922 /// ## Examples:
5923 ///
5924 /// ```no_run
5925 /// # let mut buf = [0; 512];
5926 /// # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
5927 /// # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
5928 /// # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
5929 /// # let peer = "127.0.0.1:1234".parse().unwrap();
5930 /// # let local = "127.0.0.1:4321".parse().unwrap();
5931 /// # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
5932 /// # let stream_id = 0;
5933 /// conn.stream_send(stream_id, b"hello", true)?;
5934 /// # Ok::<(), quiche::Error>(())
5935 /// ```
5936 pub fn stream_send(
5937 &mut self, stream_id: u64, buf: &[u8], fin: bool,
5938 ) -> Result<usize> {
5939 self.stream_do_send(
5940 stream_id,
5941 buf,
5942 fin,
5943 |stream: &mut stream::Stream<F>,
5944 buf: &[u8],
5945 cap: usize,
5946 fin: bool| {
5947 stream.send.write(&buf[..cap], fin).map(|v| (v, v))
5948 },
5949 )
5950 }
5951
5952 /// Writes data to a stream with zero copying, instead, it appends the
5953 /// provided buffer directly to the send queue if the capacity allows
5954 /// it.
5955 ///
5956 /// When a partial write happens (including when [`Error::Done`] is
5957 /// returned) the remaining (unwritten) buffer will also be returned.
5958 /// The application should retry the operation once the stream is
5959 /// reported as writable again.
5960 pub fn stream_send_zc(
5961 &mut self, stream_id: u64, buf: F::Buf, fin: bool,
5962 ) -> Result<(usize, Option<F::Buf>)>
5963 where
5964 F::Buf: BufSplit,
5965 {
5966 self.stream_do_send(
5967 stream_id,
5968 buf,
5969 fin,
5970 |stream: &mut stream::Stream<F>,
5971 buf: F::Buf,
5972 cap: usize,
5973 fin: bool| {
5974 let (sent, remaining) = stream.send.append_buf(buf, cap, fin)?;
5975 Ok((sent, (sent, remaining)))
5976 },
5977 )
5978 }
5979
5980 fn stream_do_send<B, R, SND>(
5981 &mut self, stream_id: u64, buf: B, fin: bool, write_fn: SND,
5982 ) -> Result<R>
5983 where
5984 B: AsRef<[u8]>,
5985 SND: FnOnce(&mut stream::Stream<F>, B, usize, bool) -> Result<(usize, R)>,
5986 {
5987 // We can't write on the peer's unidirectional streams.
5988 if !stream::is_bidi(stream_id) &&
5989 !stream::is_local(stream_id, self.is_server)
5990 {
5991 return Err(Error::InvalidStreamState(stream_id));
5992 }
5993
5994 let len = buf.as_ref().len();
5995
5996 // Mark the connection as blocked if the connection-level flow control
5997 // limit doesn't let us buffer all the data.
5998 //
5999 // Note that this is separate from "send capacity" as that also takes
6000 // congestion control into consideration.
6001 if self.max_tx_data - self.tx_data < len as u64 {
6002 self.blocked_limit = Some(self.max_tx_data);
6003 }
6004
6005 let cap = self.tx_cap;
6006
6007 // Get existing stream or create a new one.
6008 let stream = match self.get_or_create_stream(stream_id, true) {
6009 Ok(v) => v,
6010
6011 Err(Error::StreamLimit) => {
6012 // If the local endpoint has exhausted the peer's stream count
6013 // limit, record the current limit so that a STREAMS_BLOCKED
6014 // frame can be sent.
6015 if self.enable_send_streams_blocked &&
6016 stream::is_local(stream_id, self.is_server)
6017 {
6018 if stream::is_bidi(stream_id) {
6019 let limit = self.streams.peer_max_streams_bidi();
6020 self.streams_blocked_bidi_state.update_at(limit);
6021 } else {
6022 let limit = self.streams.peer_max_streams_uni();
6023 self.streams_blocked_uni_state.update_at(limit);
6024 }
6025 }
6026
6027 return Err(Error::StreamLimit);
6028 },
6029
6030 Err(e) => return Err(e),
6031 };
6032
6033 #[cfg(feature = "qlog")]
6034 let offset = stream.send.off_back();
6035
6036 let was_writable = stream.is_writable();
6037
6038 let was_flushable = stream.is_flushable();
6039
6040 let is_complete = stream.is_complete();
6041 let is_readable = stream.is_readable();
6042
6043 let priority_key = Arc::clone(&stream.priority_key);
6044
6045 // Return early if the stream has been stopped, and collect its state
6046 // if complete.
6047 if let Err(Error::StreamStopped(e)) = stream.send.cap() {
6048 // Only collect the stream if it is complete and not readable.
6049 // If it is readable, it will get collected when stream_recv()
6050 // is used.
6051 //
6052 // The stream can't be writable if it has been stopped.
6053 if is_complete && !is_readable {
6054 let local = stream.local;
6055 self.streams.collect(stream_id, local);
6056 }
6057
6058 return Err(Error::StreamStopped(e));
6059 };
6060
6061 // Truncate the input buffer based on the connection's send capacity if
6062 // necessary.
6063 //
6064 // When the cap is zero, the method returns Ok(0) *only* when the passed
6065 // buffer is empty. We return Error::Done otherwise.
6066 if cap == 0 && len > 0 {
6067 if was_writable {
6068 // When `stream_writable_next()` returns a stream, the writable
6069 // mark is removed, but because the stream is blocked by the
6070 // connection-level send capacity it won't be marked as writable
6071 // again once the capacity increases.
6072 //
6073 // Since the stream is writable already, mark it here instead.
6074 self.streams.insert_writable(&priority_key);
6075 }
6076
6077 return Err(Error::Done);
6078 }
6079
6080 let (cap, fin, blocked_by_cap) = if cap < len {
6081 (cap, false, true)
6082 } else {
6083 (len, fin, false)
6084 };
6085
6086 let (sent, ret) = match write_fn(stream, buf, cap, fin) {
6087 Ok(v) => v,
6088
6089 Err(e) => {
6090 self.streams.remove_writable(&priority_key);
6091 return Err(e);
6092 },
6093 };
6094
6095 let incremental = stream.incremental;
6096 let priority_key = Arc::clone(&stream.priority_key);
6097
6098 let flushable = stream.is_flushable();
6099
6100 let writable = stream.is_writable();
6101
6102 let empty_fin = len == 0 && fin;
6103
6104 if sent < cap {
6105 let max_off = stream.send.max_off();
6106
6107 if stream.send.blocked_at() != Some(max_off) {
6108 stream.send.update_blocked_at(Some(max_off));
6109 self.streams.insert_blocked(stream_id, max_off);
6110 }
6111 } else {
6112 stream.send.update_blocked_at(None);
6113 self.streams.remove_blocked(stream_id);
6114 }
6115
6116 // If the stream is now flushable push it to the flushable queue, but
6117 // only if it wasn't already queued.
6118 //
6119 // Consider the stream flushable also when we are sending a zero-length
6120 // frame that has the fin flag set.
6121 if (flushable || empty_fin) && !was_flushable {
6122 self.streams.insert_flushable(&priority_key);
6123 }
6124
6125 if !writable {
6126 self.streams.remove_writable(&priority_key);
6127 } else if was_writable && blocked_by_cap {
6128 // When `stream_writable_next()` returns a stream, the writable
6129 // mark is removed, but because the stream is blocked by the
6130 // connection-level send capacity it won't be marked as writable
6131 // again once the capacity increases.
6132 //
6133 // Since the stream is writable already, mark it here instead.
6134 self.streams.insert_writable(&priority_key);
6135 }
6136
6137 self.tx_cap -= sent;
6138
6139 self.tx_data += sent as u64;
6140
6141 self.tx_buffered += sent;
6142 self.check_tx_buffered_invariant();
6143
6144 qlog_with_type!(QLOG_DATA_MV, self.qlog, q, {
6145 let ev_data = EventData::QuicStreamDataMoved(
6146 qlog::events::quic::StreamDataMoved {
6147 stream_id: Some(stream_id),
6148 offset: Some(offset),
6149 raw: Some(RawInfo {
6150 length: Some(sent as u64),
6151 ..Default::default()
6152 }),
6153 from: Some(DataRecipient::Application),
6154 to: Some(DataRecipient::Transport),
6155 additional_info: fin
6156 .then_some(DataMovedAdditionalInfo::FinSet),
6157 },
6158 );
6159
6160 let now = Instant::now();
6161 q.add_event_data_with_instant(ev_data, now).ok();
6162 });
6163
6164 if sent == 0 && cap > 0 {
6165 return Err(Error::Done);
6166 }
6167
6168 if incremental && writable {
6169 // Shuffle the incremental stream to the back of the queue.
6170 self.streams.remove_writable(&priority_key);
6171 self.streams.insert_writable(&priority_key);
6172 }
6173
6174 Ok(ret)
6175 }
6176
6177 /// Sets the priority for a stream.
6178 ///
6179 /// A stream's priority determines the order in which stream data is sent
6180 /// on the wire (streams with lower priority are sent first). Streams are
6181 /// created with a default priority of `127`.
6182 ///
6183 /// The target stream is created if it did not exist before calling this
6184 /// method.
6185 pub fn stream_priority(
6186 &mut self, stream_id: u64, urgency: u8, incremental: bool,
6187 ) -> Result<()> {
6188 // Get existing stream or create a new one, but if the stream
6189 // has already been closed and collected, ignore the prioritization.
6190 let stream = match self.get_or_create_stream(stream_id, true) {
6191 Ok(v) => v,
6192
6193 Err(Error::Done) => return Ok(()),
6194
6195 Err(e) => return Err(e),
6196 };
6197
6198 if stream.urgency == urgency && stream.incremental == incremental {
6199 return Ok(());
6200 }
6201
6202 stream.urgency = urgency;
6203 stream.incremental = incremental;
6204
6205 let new_priority_key = Arc::new(StreamPriorityKey {
6206 urgency: stream.urgency,
6207 incremental: stream.incremental,
6208 id: stream_id,
6209 ..Default::default()
6210 });
6211
6212 let old_priority_key =
6213 std::mem::replace(&mut stream.priority_key, new_priority_key.clone());
6214
6215 self.streams
6216 .update_priority(&old_priority_key, &new_priority_key);
6217
6218 Ok(())
6219 }
6220
6221 /// Shuts down reading or writing from/to the specified stream.
6222 ///
6223 /// When the `direction` argument is set to [`Shutdown::Read`], outstanding
6224 /// data in the stream's receive buffer is dropped, and no additional data
6225 /// is added to it. Data received after calling this method is still
6226 /// validated and acked but not stored, and [`stream_recv()`] will not
6227 /// return it to the application. In addition, a `STOP_SENDING` frame will
6228 /// be sent to the peer to signal it to stop sending data.
6229 ///
6230 /// When the `direction` argument is set to [`Shutdown::Write`], outstanding
6231 /// data in the stream's send buffer is dropped, and no additional data is
6232 /// added to it. Data passed to [`stream_send()`] after calling this method
6233 /// will be ignored. In addition, a `RESET_STREAM` frame will be sent to the
6234 /// peer to signal the reset.
6235 ///
6236 /// Locally-initiated unidirectional streams can only be closed in the
6237 /// [`Shutdown::Write`] direction. Remotely-initiated unidirectional streams
6238 /// can only be closed in the [`Shutdown::Read`] direction. Using an
6239 /// incorrect direction will return [`InvalidStreamState`].
6240 ///
6241 /// [`Shutdown::Read`]: enum.Shutdown.html#variant.Read
6242 /// [`Shutdown::Write`]: enum.Shutdown.html#variant.Write
6243 /// [`stream_recv()`]: struct.Connection.html#method.stream_recv
6244 /// [`stream_send()`]: struct.Connection.html#method.stream_send
6245 /// [`InvalidStreamState`]: enum.Error.html#variant.InvalidStreamState
6246 pub fn stream_shutdown(
6247 &mut self, stream_id: u64, direction: Shutdown, err: u64,
6248 ) -> Result<()> {
6249 // Don't try to stop a local unidirectional stream.
6250 if direction == Shutdown::Read &&
6251 stream::is_local(stream_id, self.is_server) &&
6252 !stream::is_bidi(stream_id)
6253 {
6254 return Err(Error::InvalidStreamState(stream_id));
6255 }
6256
6257 // Don't try to reset a remote unidirectional stream.
6258 if direction == Shutdown::Write &&
6259 !stream::is_local(stream_id, self.is_server) &&
6260 !stream::is_bidi(stream_id)
6261 {
6262 return Err(Error::InvalidStreamState(stream_id));
6263 }
6264
6265 // Get existing stream.
6266 let stream = self.streams.get_mut(stream_id).ok_or(Error::Done)?;
6267
6268 let priority_key = Arc::clone(&stream.priority_key);
6269
6270 match direction {
6271 Shutdown::Read => {
6272 let consumed = stream.recv.shutdown()?;
6273 self.flow_control.add_consumed(consumed);
6274
6275 if !stream.recv.is_fin() {
6276 self.streams.insert_stopped(stream_id, err);
6277 }
6278
6279 // Once shutdown, the stream is guaranteed to be non-readable.
6280 self.streams.remove_readable(&priority_key);
6281
6282 self.stopped_stream_local_count =
6283 self.stopped_stream_local_count.saturating_add(1);
6284 },
6285
6286 Shutdown::Write => {
6287 let (final_size, unsent) = stream.send.shutdown()?;
6288
6289 // Claw back some flow control allowance from data that was
6290 // buffered but not actually sent before the stream was reset.
6291 self.tx_data = self.tx_data.saturating_sub(unsent);
6292
6293 self.tx_buffered =
6294 self.tx_buffered.saturating_sub(unsent as usize);
6295
6296 // These drops in qlog are a bit weird, but the only way to ensure
6297 // that all bytes that are moved from App to Transport in
6298 // stream_do_send are eventually moved from Transport to Dropped.
6299 // Ideally we would add a Transport to Network transition also as
6300 // a way to indicate when bytes were transmitted vs dropped
6301 // without ever being sent.
6302 qlog_with_type!(QLOG_DATA_MV, self.qlog, q, {
6303 let ev_data = EventData::QuicStreamDataMoved(
6304 qlog::events::quic::StreamDataMoved {
6305 stream_id: Some(stream_id),
6306 offset: Some(final_size),
6307 raw: Some(RawInfo {
6308 length: Some(unsent),
6309 ..Default::default()
6310 }),
6311 from: Some(DataRecipient::Transport),
6312 to: Some(DataRecipient::Dropped),
6313 ..Default::default()
6314 },
6315 );
6316
6317 q.add_event_data_with_instant(ev_data, Instant::now()).ok();
6318 });
6319
6320 // Update send capacity.
6321 self.update_tx_cap();
6322
6323 self.streams.insert_reset(stream_id, err, final_size);
6324
6325 // Once shutdown, the stream is guaranteed to be non-writable.
6326 self.streams.remove_writable(&priority_key);
6327
6328 self.reset_stream_local_count =
6329 self.reset_stream_local_count.saturating_add(1);
6330 },
6331 }
6332
6333 Ok(())
6334 }
6335
6336 /// Returns the stream's send capacity in bytes.
6337 ///
6338 /// The returned capacity takes into account the stream's flow control limit
6339 /// as well as connection level flow and congestion control.
6340 ///
6341 /// If the specified stream doesn't exist (including when it has already
6342 /// been completed and closed), the [`InvalidStreamState`] error will be
6343 /// returned.
6344 ///
6345 /// In addition, if the peer has signalled that it doesn't want to receive
6346 /// any more data from this stream by sending the `STOP_SENDING` frame, the
6347 /// [`StreamStopped`] error will be returned.
6348 ///
6349 /// [`InvalidStreamState`]: enum.Error.html#variant.InvalidStreamState
6350 /// [`StreamStopped`]: enum.Error.html#variant.StreamStopped
6351 #[inline]
6352 pub fn stream_capacity(&mut self, stream_id: u64) -> Result<usize> {
6353 if let Some(stream) = self.streams.get(stream_id) {
6354 let stream_cap = match stream.send.cap() {
6355 Ok(v) => v,
6356
6357 Err(Error::StreamStopped(e)) => {
6358 // Only collect the stream if it is complete and not
6359 // readable. If it is readable, it will get collected when
6360 // stream_recv() is used.
6361 if stream.is_complete() && !stream.is_readable() {
6362 let local = stream.local;
6363 self.streams.collect(stream_id, local);
6364 }
6365
6366 return Err(Error::StreamStopped(e));
6367 },
6368
6369 Err(e) => return Err(e),
6370 };
6371
6372 let cap = cmp::min(self.tx_cap, stream_cap);
6373 return Ok(cap);
6374 };
6375
6376 Err(Error::InvalidStreamState(stream_id))
6377 }
6378
6379 /// Returns the next stream that has data to read.
6380 ///
6381 /// Note that once returned by this method, a stream ID will not be returned
6382 /// again until it is "re-armed".
6383 ///
6384 /// The application will need to read all of the pending data on the stream,
6385 /// and new data has to be received before the stream is reported again.
6386 ///
6387 /// This is unlike the [`readable()`] method, that returns the same list of
6388 /// readable streams when called multiple times in succession.
6389 ///
6390 /// [`readable()`]: struct.Connection.html#method.readable
6391 pub fn stream_readable_next(&mut self) -> Option<u64> {
6392 let priority_key = self.streams.readable.front().clone_pointer()?;
6393
6394 self.streams.remove_readable(&priority_key);
6395
6396 Some(priority_key.id)
6397 }
6398
6399 /// Returns true if the stream has data that can be read.
6400 pub fn stream_readable(&self, stream_id: u64) -> bool {
6401 let stream = match self.streams.get(stream_id) {
6402 Some(v) => v,
6403
6404 None => return false,
6405 };
6406
6407 stream.is_readable()
6408 }
6409
6410 /// Returns the next stream that can be written to.
6411 ///
6412 /// Note that once returned by this method, a stream ID will not be returned
6413 /// again until it is "re-armed".
6414 ///
6415 /// This is unlike the [`writable()`] method, that returns the same list of
6416 /// writable streams when called multiple times in succession. It is not
6417 /// advised to use both `stream_writable_next()` and [`writable()`] on the
6418 /// same connection, as it may lead to unexpected results.
6419 ///
6420 /// The [`stream_writable()`] method can also be used to fine-tune when a
6421 /// stream is reported as writable again.
6422 ///
6423 /// [`stream_writable()`]: struct.Connection.html#method.stream_writable
6424 /// [`writable()`]: struct.Connection.html#method.writable
6425 pub fn stream_writable_next(&mut self) -> Option<u64> {
6426 // If there is not enough connection-level send capacity, none of the
6427 // streams are writable.
6428 if self.tx_cap == 0 {
6429 return None;
6430 }
6431
6432 let mut cursor = self.streams.writable.front();
6433
6434 while let Some(priority_key) = cursor.clone_pointer() {
6435 if let Some(stream) = self.streams.get(priority_key.id) {
6436 let cap = match stream.send.cap() {
6437 Ok(v) => v,
6438
6439 // Return the stream to the application immediately if it's
6440 // stopped.
6441 Err(_) =>
6442 return {
6443 self.streams.remove_writable(&priority_key);
6444
6445 Some(priority_key.id)
6446 },
6447 };
6448
6449 if cmp::min(self.tx_cap, cap) >= stream.send_lowat {
6450 self.streams.remove_writable(&priority_key);
6451 return Some(priority_key.id);
6452 }
6453 }
6454
6455 cursor.move_next();
6456 }
6457
6458 None
6459 }
6460
6461 /// Returns true if the stream has enough send capacity.
6462 ///
6463 /// When `len` more bytes can be buffered into the given stream's send
6464 /// buffer, `true` will be returned, `false` otherwise.
6465 ///
6466 /// In the latter case, if the additional data can't be buffered due to
6467 /// flow control limits, the peer will also be notified, and a "low send
6468 /// watermark" will be set for the stream, such that it is not going to be
6469 /// reported as writable again by [`stream_writable_next()`] until its send
6470 /// capacity reaches `len`.
6471 ///
6472 /// If the specified stream doesn't exist (including when it has already
6473 /// been completed and closed), the [`InvalidStreamState`] error will be
6474 /// returned.
6475 ///
6476 /// In addition, if the peer has signalled that it doesn't want to receive
6477 /// any more data from this stream by sending the `STOP_SENDING` frame, the
6478 /// [`StreamStopped`] error will be returned.
6479 ///
6480 /// [`stream_writable_next()`]: struct.Connection.html#method.stream_writable_next
6481 /// [`InvalidStreamState`]: enum.Error.html#variant.InvalidStreamState
6482 /// [`StreamStopped`]: enum.Error.html#variant.StreamStopped
6483 #[inline]
6484 pub fn stream_writable(
6485 &mut self, stream_id: u64, len: usize,
6486 ) -> Result<bool> {
6487 if self.stream_capacity(stream_id)? >= len {
6488 return Ok(true);
6489 }
6490
6491 let stream = match self.streams.get_mut(stream_id) {
6492 Some(v) => v,
6493
6494 None => return Err(Error::InvalidStreamState(stream_id)),
6495 };
6496
6497 stream.send_lowat = cmp::max(1, len);
6498
6499 let is_writable = stream.is_writable();
6500
6501 let priority_key = Arc::clone(&stream.priority_key);
6502
6503 if self.max_tx_data - self.tx_data < len as u64 {
6504 self.blocked_limit = Some(self.max_tx_data);
6505 }
6506
6507 if stream.send.cap()? < len {
6508 let max_off = stream.send.max_off();
6509 if stream.send.blocked_at() != Some(max_off) {
6510 stream.send.update_blocked_at(Some(max_off));
6511 self.streams.insert_blocked(stream_id, max_off);
6512 }
6513 } else if is_writable {
6514 // When `stream_writable_next()` returns a stream, the writable
6515 // mark is removed, but because the stream is blocked by the
6516 // connection-level send capacity it won't be marked as writable
6517 // again once the capacity increases.
6518 //
6519 // Since the stream is writable already, mark it here instead.
6520 self.streams.insert_writable(&priority_key);
6521 }
6522
6523 Ok(false)
6524 }
6525
6526 /// Returns true if all the data has been read from the specified stream.
6527 ///
6528 /// This instructs the application that all the data received from the
6529 /// peer on the stream has been read, and there won't be anymore in the
6530 /// future.
6531 ///
6532 /// Basically this returns true when the peer either set the `fin` flag
6533 /// for the stream, or sent `RESET_STREAM`.
6534 #[inline]
6535 pub fn stream_finished(&self, stream_id: u64) -> bool {
6536 let stream = match self.streams.get(stream_id) {
6537 Some(v) => v,
6538
6539 None => return true,
6540 };
6541
6542 stream.recv.is_fin()
6543 }
6544
6545 /// Returns the number of bidirectional streams that can be created
6546 /// before the peer's stream count limit is reached.
6547 ///
6548 /// This can be useful to know if it's possible to create a bidirectional
6549 /// stream without trying it first.
6550 #[inline]
6551 pub fn peer_streams_left_bidi(&self) -> u64 {
6552 self.streams.peer_streams_left_bidi()
6553 }
6554
6555 /// Returns the number of unidirectional streams that can be created
6556 /// before the peer's stream count limit is reached.
6557 ///
6558 /// This can be useful to know if it's possible to create a unidirectional
6559 /// stream without trying it first.
6560 #[inline]
6561 pub fn peer_streams_left_uni(&self) -> u64 {
6562 self.streams.peer_streams_left_uni()
6563 }
6564
6565 /// Returns an iterator over streams that have outstanding data to read.
6566 ///
6567 /// Note that the iterator will only include streams that were readable at
6568 /// the time the iterator itself was created (i.e. when `readable()` was
6569 /// called). To account for newly readable streams, the iterator needs to
6570 /// be created again.
6571 ///
6572 /// ## Examples:
6573 ///
6574 /// ```no_run
6575 /// # let mut buf = [0; 512];
6576 /// # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
6577 /// # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
6578 /// # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
6579 /// # let peer = "127.0.0.1:1234".parse().unwrap();
6580 /// # let local = socket.local_addr().unwrap();
6581 /// # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
6582 /// // Iterate over readable streams.
6583 /// for stream_id in conn.readable() {
6584 /// // Stream is readable, read until there's no more data.
6585 /// while let Ok((read, fin)) = conn.stream_recv(stream_id, &mut buf) {
6586 /// println!("Got {} bytes on stream {}", read, stream_id);
6587 /// }
6588 /// }
6589 /// # Ok::<(), quiche::Error>(())
6590 /// ```
6591 #[inline]
6592 pub fn readable(&self) -> StreamIter {
6593 self.streams.readable()
6594 }
6595
6596 /// Returns an iterator over streams that can be written in priority order.
6597 ///
6598 /// The priority order is based on RFC 9218 scheduling recommendations.
6599 /// Stream priority can be controlled using [`stream_priority()`]. In order
6600 /// to support fairness requirements, each time this method is called,
6601 /// internal state is updated. Therefore the iterator ordering can change
6602 /// between calls, even if no streams were added or removed.
6603 ///
6604 /// A "writable" stream is a stream that has enough flow control capacity to
6605 /// send data to the peer. To avoid buffering an infinite amount of data,
6606 /// streams are only allowed to buffer outgoing data up to the amount that
6607 /// the peer allows to send.
6608 ///
6609 /// Note that the iterator will only include streams that were writable at
6610 /// the time the iterator itself was created (i.e. when `writable()` was
6611 /// called). To account for newly writable streams, the iterator needs to be
6612 /// created again.
6613 ///
6614 /// ## Examples:
6615 ///
6616 /// ```no_run
6617 /// # let mut buf = [0; 512];
6618 /// # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
6619 /// # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
6620 /// # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
6621 /// # let local = socket.local_addr().unwrap();
6622 /// # let peer = "127.0.0.1:1234".parse().unwrap();
6623 /// # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
6624 /// // Iterate over writable streams.
6625 /// for stream_id in conn.writable() {
6626 /// // Stream is writable, write some data.
6627 /// if let Ok(written) = conn.stream_send(stream_id, &buf, false) {
6628 /// println!("Written {} bytes on stream {}", written, stream_id);
6629 /// }
6630 /// }
6631 /// # Ok::<(), quiche::Error>(())
6632 /// ```
6633 /// [`stream_priority()`]: struct.Connection.html#method.stream_priority
6634 #[inline]
6635 pub fn writable(&self) -> StreamIter {
6636 // If there is not enough connection-level send capacity, none of the
6637 // streams are writable, so return an empty iterator.
6638 if self.tx_cap == 0 {
6639 return StreamIter::default();
6640 }
6641
6642 self.streams.writable()
6643 }
6644
6645 /// Returns the maximum possible size of egress UDP payloads.
6646 ///
6647 /// This is the maximum size of UDP payloads that can be sent, and depends
6648 /// on both the configured maximum send payload size of the local endpoint
6649 /// (as configured with [`set_max_send_udp_payload_size()`]), as well as
6650 /// the transport parameter advertised by the remote peer.
6651 ///
6652 /// Note that this value can change during the lifetime of the connection,
6653 /// but should remain stable across consecutive calls to [`send()`].
6654 ///
6655 /// [`set_max_send_udp_payload_size()`]:
6656 /// struct.Config.html#method.set_max_send_udp_payload_size
6657 /// [`send()`]: struct.Connection.html#method.send
6658 pub fn max_send_udp_payload_size(&self) -> usize {
6659 let max_datagram_size = self
6660 .paths
6661 .get_active()
6662 .ok()
6663 .map(|p| p.recovery.max_datagram_size());
6664
6665 if let Some(max_datagram_size) = max_datagram_size {
6666 if self.is_established() {
6667 // We cap the maximum packet size to 16KB or so, so that it can be
6668 // always encoded with a 2-byte varint.
6669 return cmp::min(16383, max_datagram_size);
6670 }
6671 }
6672
6673 // Allow for 1200 bytes (minimum QUIC packet size) during the
6674 // handshake.
6675 MIN_CLIENT_INITIAL_LEN
6676 }
6677
6678 /// Schedule an ack-eliciting packet on the active path.
6679 ///
6680 /// QUIC packets might not contain ack-eliciting frames during normal
6681 /// operating conditions. If the packet would already contain
6682 /// ack-eliciting frames, this method does not change any behavior.
6683 /// However, if the packet would not ordinarily contain ack-eliciting
6684 /// frames, this method ensures that a PING frame sent.
6685 ///
6686 /// Calling this method multiple times before [`send()`] has no effect.
6687 ///
6688 /// [`send()`]: struct.Connection.html#method.send
6689 pub fn send_ack_eliciting(&mut self) -> Result<()> {
6690 if self.is_closed() || self.is_draining() {
6691 return Ok(());
6692 }
6693 self.paths.get_active_mut()?.needs_ack_eliciting = true;
6694 Ok(())
6695 }
6696
6697 /// Schedule an ack-eliciting packet on the specified path.
6698 ///
6699 /// See [`send_ack_eliciting()`] for more detail. [`InvalidState`] is
6700 /// returned if there is no record of the path.
6701 ///
6702 /// [`send_ack_eliciting()`]: struct.Connection.html#method.send_ack_eliciting
6703 /// [`InvalidState`]: enum.Error.html#variant.InvalidState
6704 pub fn send_ack_eliciting_on_path(
6705 &mut self, local: SocketAddr, peer: SocketAddr,
6706 ) -> Result<()> {
6707 if self.is_closed() || self.is_draining() {
6708 return Ok(());
6709 }
6710 let path_id = self
6711 .paths
6712 .path_id_from_addrs(&(local, peer))
6713 .ok_or(Error::InvalidState)?;
6714 self.paths.get_mut(path_id)?.needs_ack_eliciting = true;
6715 Ok(())
6716 }
6717
6718 /// Reads the first received DATAGRAM.
6719 ///
6720 /// On success the DATAGRAM's data is returned along with its size.
6721 ///
6722 /// [`Done`] is returned if there is no data to read.
6723 ///
6724 /// [`BufferTooShort`] is returned if the provided buffer is too small for
6725 /// the DATAGRAM.
6726 ///
6727 /// [`Done`]: enum.Error.html#variant.Done
6728 /// [`BufferTooShort`]: enum.Error.html#variant.BufferTooShort
6729 ///
6730 /// ## Examples:
6731 ///
6732 /// ```no_run
6733 /// # let mut buf = [0; 512];
6734 /// # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
6735 /// # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
6736 /// # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
6737 /// # let peer = "127.0.0.1:1234".parse().unwrap();
6738 /// # let local = socket.local_addr().unwrap();
6739 /// # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
6740 /// let mut dgram_buf = [0; 512];
6741 /// while let Ok((len)) = conn.dgram_recv(&mut dgram_buf) {
6742 /// println!("Got {} bytes of DATAGRAM", len);
6743 /// }
6744 /// # Ok::<(), quiche::Error>(())
6745 /// ```
6746 #[inline]
6747 pub fn dgram_recv(&mut self, buf: &mut [u8]) -> Result<usize> {
6748 match self.dgram_recv_queue.pop() {
6749 Some(d) => {
6750 if d.as_ref().len() > buf.len() {
6751 return Err(Error::BufferTooShort);
6752 }
6753 let len = d.as_ref().len();
6754
6755 buf[..len].copy_from_slice(d.as_ref());
6756 Ok(len)
6757 },
6758
6759 None => Err(Error::Done),
6760 }
6761 }
6762
6763 /// Reads the first received DATAGRAM.
6764 ///
6765 /// This is the same as [`dgram_recv()`] but returns the DATAGRAM as an
6766 /// owned buffer instead of copying into the provided buffer.
6767 ///
6768 /// [`dgram_recv()`]: struct.Connection.html#method.dgram_recv
6769 #[inline]
6770 pub fn dgram_recv_buf(&mut self) -> Result<F::DgramBuf> {
6771 self.dgram_recv_queue.pop().ok_or(Error::Done)
6772 }
6773
6774 /// Reads the first received DATAGRAM without removing it from the queue.
6775 ///
6776 /// On success the DATAGRAM's data is returned along with the actual number
6777 /// of bytes peeked. The requested length cannot exceed the DATAGRAM's
6778 /// actual length.
6779 ///
6780 /// [`Done`] is returned if there is no data to read.
6781 ///
6782 /// [`BufferTooShort`] is returned if the provided buffer is smaller the
6783 /// number of bytes to peek.
6784 ///
6785 /// [`Done`]: enum.Error.html#variant.Done
6786 /// [`BufferTooShort`]: enum.Error.html#variant.BufferTooShort
6787 #[inline]
6788 pub fn dgram_recv_peek(&self, buf: &mut [u8], len: usize) -> Result<usize> {
6789 self.dgram_recv_queue.peek_front_bytes(buf, len)
6790 }
6791
6792 /// Returns the length of the first stored DATAGRAM.
6793 #[inline]
6794 pub fn dgram_recv_front_len(&self) -> Option<usize> {
6795 self.dgram_recv_queue.peek_front_len()
6796 }
6797
6798 /// Returns the number of items in the DATAGRAM receive queue.
6799 #[inline]
6800 pub fn dgram_recv_queue_len(&self) -> usize {
6801 self.dgram_recv_queue.len()
6802 }
6803
6804 /// Returns the total size of all items in the DATAGRAM receive queue.
6805 #[inline]
6806 pub fn dgram_recv_queue_byte_size(&self) -> usize {
6807 self.dgram_recv_queue.byte_size()
6808 }
6809
6810 /// Returns the number of items in the DATAGRAM send queue.
6811 #[inline]
6812 pub fn dgram_send_queue_len(&self) -> usize {
6813 self.dgram_send_queue.len()
6814 }
6815
6816 /// Returns the total size of all items in the DATAGRAM send queue.
6817 #[inline]
6818 pub fn dgram_send_queue_byte_size(&self) -> usize {
6819 self.dgram_send_queue.byte_size()
6820 }
6821
6822 /// Returns whether or not the DATAGRAM send queue is full.
6823 #[inline]
6824 pub fn is_dgram_send_queue_full(&self) -> bool {
6825 self.dgram_send_queue.is_full()
6826 }
6827
6828 /// Returns whether or not the DATAGRAM recv queue is full.
6829 #[inline]
6830 pub fn is_dgram_recv_queue_full(&self) -> bool {
6831 self.dgram_recv_queue.is_full()
6832 }
6833
6834 /// Sends data in a DATAGRAM frame.
6835 ///
6836 /// [`Done`] is returned if no data was written.
6837 /// [`InvalidState`] is returned if the peer does not support DATAGRAM.
6838 /// [`BufferTooShort`] is returned if the DATAGRAM frame length is larger
6839 /// than peer's supported DATAGRAM frame length. Use
6840 /// [`dgram_max_writable_len()`] to get the largest supported DATAGRAM
6841 /// frame length.
6842 ///
6843 /// Note that there is no flow control of DATAGRAM frames, so in order to
6844 /// avoid buffering an infinite amount of frames we apply an internal
6845 /// limit.
6846 ///
6847 /// [`Done`]: enum.Error.html#variant.Done
6848 /// [`InvalidState`]: enum.Error.html#variant.InvalidState
6849 /// [`BufferTooShort`]: enum.Error.html#variant.BufferTooShort
6850 /// [`dgram_max_writable_len()`]:
6851 /// struct.Connection.html#method.dgram_max_writable_len
6852 ///
6853 /// ## Examples:
6854 ///
6855 /// ```no_run
6856 /// # let mut buf = [0; 512];
6857 /// # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
6858 /// # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
6859 /// # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
6860 /// # let peer = "127.0.0.1:1234".parse().unwrap();
6861 /// # let local = socket.local_addr().unwrap();
6862 /// # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
6863 /// conn.dgram_send(b"hello")?;
6864 /// # Ok::<(), quiche::Error>(())
6865 /// ```
6866 pub fn dgram_send(&mut self, buf: &[u8]) -> Result<()> {
6867 self.dgram_send_buf(F::dgram_buf_from_slice(buf))
6868 }
6869
6870 /// Sends data in a DATAGRAM frame.
6871 ///
6872 /// This is the same as [`dgram_send()`] but takes an owned buffer
6873 /// instead of a slice and avoids copying.
6874 ///
6875 /// [`dgram_send()`]: struct.Connection.html#method.dgram_send
6876 pub fn dgram_send_buf(&mut self, buf: F::DgramBuf) -> Result<()> {
6877 let max_payload_len = match self.dgram_max_writable_len() {
6878 Some(v) => v,
6879
6880 None => return Err(Error::InvalidState),
6881 };
6882
6883 if buf.as_ref().len() > max_payload_len {
6884 return Err(Error::BufferTooShort);
6885 }
6886
6887 self.dgram_send_queue.push(buf)?;
6888
6889 let active_path = self.paths.get_active_mut()?;
6890
6891 if self.dgram_send_queue.byte_size() >
6892 active_path.recovery.cwnd_available()
6893 {
6894 active_path.recovery.update_app_limited(false);
6895 }
6896
6897 Ok(())
6898 }
6899
6900 /// Purges queued outgoing DATAGRAMs matching the predicate.
6901 ///
6902 /// In other words, remove all elements `e` such that `f(&e)` returns true.
6903 ///
6904 /// ## Examples:
6905 /// ```no_run
6906 /// # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
6907 /// # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
6908 /// # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
6909 /// # let peer = "127.0.0.1:1234".parse().unwrap();
6910 /// # let local = socket.local_addr().unwrap();
6911 /// # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
6912 /// conn.dgram_send(b"hello")?;
6913 /// conn.dgram_purge_outgoing(&|d: &[u8]| -> bool { d[0] == 0 });
6914 /// # Ok::<(), quiche::Error>(())
6915 /// ```
6916 #[inline]
6917 pub fn dgram_purge_outgoing<FN: Fn(&[u8]) -> bool>(&mut self, f: FN) {
6918 self.dgram_send_queue.purge(f);
6919 }
6920
6921 /// Returns the maximum DATAGRAM payload that can be sent.
6922 ///
6923 /// [`None`] is returned if the peer hasn't advertised a maximum DATAGRAM
6924 /// frame size.
6925 ///
6926 /// ## Examples:
6927 ///
6928 /// ```no_run
6929 /// # let mut buf = [0; 512];
6930 /// # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
6931 /// # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
6932 /// # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
6933 /// # let peer = "127.0.0.1:1234".parse().unwrap();
6934 /// # let local = socket.local_addr().unwrap();
6935 /// # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
6936 /// if let Some(payload_size) = conn.dgram_max_writable_len() {
6937 /// if payload_size > 5 {
6938 /// conn.dgram_send(b"hello")?;
6939 /// }
6940 /// }
6941 /// # Ok::<(), quiche::Error>(())
6942 /// ```
6943 #[inline]
6944 pub fn dgram_max_writable_len(&self) -> Option<usize> {
6945 match self.peer_transport_params.max_datagram_frame_size {
6946 None => None,
6947 Some(peer_frame_len) => {
6948 let dcid = self.destination_id();
6949 // Start from the maximum packet size...
6950 let mut max_len = self.max_send_udp_payload_size();
6951 // ...subtract the Short packet header overhead...
6952 // (1 byte of pkt_len + len of dcid)
6953 max_len = max_len.saturating_sub(1 + dcid.len());
6954 // ...subtract the packet number (max len)...
6955 max_len = max_len.saturating_sub(packet::MAX_PKT_NUM_LEN);
6956 // ...subtract the crypto overhead...
6957 max_len = max_len.saturating_sub(
6958 self.crypto_ctx[packet::Epoch::Application]
6959 .crypto_overhead()?,
6960 );
6961 // ...clamp to what peer can support...
6962 max_len = cmp::min(peer_frame_len as usize, max_len);
6963 // ...subtract frame overhead, checked for underflow.
6964 // (1 byte of frame type + len of length )
6965 max_len.checked_sub(1 + frame::MAX_DGRAM_OVERHEAD)
6966 },
6967 }
6968 }
6969
6970 fn dgram_enabled(&self) -> bool {
6971 self.local_transport_params
6972 .max_datagram_frame_size
6973 .is_some()
6974 }
6975
6976 /// Returns when the next timeout event will occur.
6977 ///
6978 /// Once the timeout Instant has been reached, the [`on_timeout()`] method
6979 /// should be called. A timeout of `None` means that the timer should be
6980 /// disarmed.
6981 ///
6982 /// [`on_timeout()`]: struct.Connection.html#method.on_timeout
6983 pub fn timeout_instant(&self) -> Option<Instant> {
6984 if self.is_closed() {
6985 return None;
6986 }
6987
6988 if self.is_draining() {
6989 // Draining timer takes precedence over all other timers. If it is
6990 // set it means the connection is closing so there's no point in
6991 // processing the other timers.
6992 self.draining_timer
6993 } else {
6994 // Use the lowest timer value (i.e. "sooner") among idle and loss
6995 // detection timers. If they are both unset (i.e. `None`) then the
6996 // result is `None`, but if at least one of them is set then a
6997 // `Some(...)` value is returned.
6998 let path_timer = self
6999 .paths
7000 .iter()
7001 .filter_map(|(_, p)| p.recovery.loss_detection_timer())
7002 .min();
7003
7004 let key_update_timer = self.crypto_ctx[packet::Epoch::Application]
7005 .key_update
7006 .as_ref()
7007 .map(|key_update| key_update.timer);
7008
7009 let timers = [self.idle_timer, path_timer, key_update_timer];
7010
7011 timers.iter().filter_map(|&x| x).min()
7012 }
7013 }
7014
7015 /// Returns the amount of time until the next timeout event.
7016 ///
7017 /// Once the given duration has elapsed, the [`on_timeout()`] method should
7018 /// be called. A timeout of `None` means that the timer should be disarmed.
7019 ///
7020 /// [`on_timeout()`]: struct.Connection.html#method.on_timeout
7021 pub fn timeout(&self) -> Option<Duration> {
7022 self.timeout_instant().map(|timeout| {
7023 let now = Instant::now();
7024
7025 if timeout <= now {
7026 Duration::ZERO
7027 } else {
7028 timeout.duration_since(now)
7029 }
7030 })
7031 }
7032
7033 /// Processes a timeout event.
7034 ///
7035 /// If no timeout has occurred it does nothing.
7036 pub fn on_timeout(&mut self) {
7037 let now = Instant::now();
7038
7039 if let Some(draining_timer) = self.draining_timer {
7040 if draining_timer <= now {
7041 trace!("{} draining timeout expired", self.trace_id);
7042
7043 self.mark_closed();
7044 }
7045
7046 // Draining timer takes precedence over all other timers. If it is
7047 // set it means the connection is closing so there's no point in
7048 // processing the other timers.
7049 return;
7050 }
7051
7052 if let Some(timer) = self.idle_timer {
7053 if timer <= now {
7054 trace!("{} idle timeout expired", self.trace_id);
7055
7056 self.mark_closed();
7057 self.timed_out = true;
7058 return;
7059 }
7060 }
7061
7062 if let Some(timer) = self.crypto_ctx[packet::Epoch::Application]
7063 .key_update
7064 .as_ref()
7065 .map(|key_update| key_update.timer)
7066 {
7067 if timer <= now {
7068 // Discard previous key once key update timer expired.
7069 let _ = self.crypto_ctx[packet::Epoch::Application]
7070 .key_update
7071 .take();
7072 }
7073 }
7074
7075 let handshake_status = self.handshake_status();
7076
7077 for (_, p) in self.paths.iter_mut() {
7078 if let Some(timer) = p.recovery.loss_detection_timer() {
7079 if timer <= now {
7080 trace!("{} loss detection timeout expired", self.trace_id);
7081
7082 let OnLossDetectionTimeoutOutcome {
7083 lost_packets,
7084 lost_bytes,
7085 } = p.on_loss_detection_timeout(
7086 handshake_status,
7087 now,
7088 self.is_server,
7089 &self.trace_id,
7090 );
7091
7092 self.lost_count += lost_packets;
7093 self.lost_bytes += lost_bytes as u64;
7094
7095 qlog_with_type!(QLOG_METRICS, self.qlog, q, {
7096 p.recovery.maybe_qlog(q, now);
7097 });
7098 }
7099 }
7100 }
7101
7102 // Notify timeout events to the application.
7103 self.paths.notify_failed_validations();
7104
7105 // If the active path failed, try to find a new candidate.
7106 if self.paths.get_active_path_id().is_err() {
7107 match self.paths.find_candidate_path() {
7108 Some(pid) => {
7109 if self.set_active_path(pid, now).is_err() {
7110 // The connection cannot continue.
7111 self.mark_closed();
7112 }
7113 },
7114
7115 // The connection cannot continue.
7116 None => {
7117 self.mark_closed();
7118 },
7119 }
7120 }
7121 }
7122
7123 /// Requests the stack to perform path validation of the proposed 4-tuple.
7124 ///
7125 /// Probing new paths requires spare Connection IDs at both the host and the
7126 /// peer sides. If it is not the case, it raises an [`OutOfIdentifiers`].
7127 ///
7128 /// The probing of new addresses can only be done by the client. The server
7129 /// can only probe network paths that were previously advertised by
7130 /// [`PathEvent::New`]. If the server tries to probe such an unseen network
7131 /// path, this call raises an [`InvalidState`].
7132 ///
7133 /// The caller might also want to probe an existing path. In such case, it
7134 /// triggers a PATH_CHALLENGE frame, but it does not require spare CIDs.
7135 ///
7136 /// A server always probes a new path it observes. Calling this method is
7137 /// hence not required to validate a new path. However, a server can still
7138 /// request an additional path validation of the proposed 4-tuple.
7139 ///
7140 /// Calling this method several times before calling [`send()`] or
7141 /// [`send_on_path()`] results in a single probe being generated. An
7142 /// application wanting to send multiple in-flight probes must call this
7143 /// method again after having sent packets.
7144 ///
7145 /// Returns the Destination Connection ID sequence number associated to that
7146 /// path.
7147 ///
7148 /// [`PathEvent::New`]: enum.PathEvent.html#variant.New
7149 /// [`OutOfIdentifiers`]: enum.Error.html#OutOfIdentifiers
7150 /// [`InvalidState`]: enum.Error.html#InvalidState
7151 /// [`send()`]: struct.Connection.html#method.send
7152 /// [`send_on_path()`]: struct.Connection.html#method.send_on_path
7153 pub fn probe_path(
7154 &mut self, local_addr: SocketAddr, peer_addr: SocketAddr,
7155 ) -> Result<u64> {
7156 // We may want to probe an existing path.
7157 let pid = match self.paths.path_id_from_addrs(&(local_addr, peer_addr)) {
7158 Some(pid) => pid,
7159 None => self.create_path_on_client(local_addr, peer_addr)?,
7160 };
7161
7162 let path = self.paths.get_mut(pid)?;
7163 path.request_validation();
7164
7165 path.active_dcid_seq.ok_or(Error::InvalidState)
7166 }
7167
7168 /// Migrates the connection to a new local address `local_addr`.
7169 ///
7170 /// The behavior is similar to [`migrate()`], with the nuance that the
7171 /// connection only changes the local address, but not the peer one.
7172 ///
7173 /// See [`migrate()`] for the full specification of this method.
7174 ///
7175 /// [`migrate()`]: struct.Connection.html#method.migrate
7176 pub fn migrate_source(&mut self, local_addr: SocketAddr) -> Result<u64> {
7177 let peer_addr = self.paths.get_active()?.peer_addr();
7178 self.migrate(local_addr, peer_addr)
7179 }
7180
7181 /// Migrates the connection over the given network path between `local_addr`
7182 /// and `peer_addr`.
7183 ///
7184 /// Connection migration can only be initiated by the client. Calling this
7185 /// method as a server returns [`InvalidState`].
7186 ///
7187 /// To initiate voluntary migration, there should be enough Connection IDs
7188 /// at both sides. If this requirement is not satisfied, this call returns
7189 /// [`OutOfIdentifiers`].
7190 ///
7191 /// Returns the Destination Connection ID associated to that migrated path.
7192 ///
7193 /// [`OutOfIdentifiers`]: enum.Error.html#OutOfIdentifiers
7194 /// [`InvalidState`]: enum.Error.html#InvalidState
7195 pub fn migrate(
7196 &mut self, local_addr: SocketAddr, peer_addr: SocketAddr,
7197 ) -> Result<u64> {
7198 if self.is_server {
7199 return Err(Error::InvalidState);
7200 }
7201
7202 // If the path already exists, mark it as the active one.
7203 let (pid, dcid_seq) = if let Some(pid) =
7204 self.paths.path_id_from_addrs(&(local_addr, peer_addr))
7205 {
7206 let path = self.paths.get_mut(pid)?;
7207
7208 // If it is already active, do nothing.
7209 if path.active() {
7210 return path.active_dcid_seq.ok_or(Error::OutOfIdentifiers);
7211 }
7212
7213 // Ensures that a Source Connection ID has been dedicated to this
7214 // path, or a free one is available. This is only required if the
7215 // host uses non-zero length Source Connection IDs.
7216 if !self.ids.zero_length_scid() &&
7217 path.active_scid_seq.is_none() &&
7218 self.ids.available_scids() == 0
7219 {
7220 return Err(Error::OutOfIdentifiers);
7221 }
7222
7223 // Ensures that the migrated path has a Destination Connection ID.
7224 let dcid_seq = if let Some(dcid_seq) = path.active_dcid_seq {
7225 dcid_seq
7226 } else {
7227 let dcid_seq = self
7228 .ids
7229 .lowest_available_dcid_seq()
7230 .ok_or(Error::OutOfIdentifiers)?;
7231
7232 self.ids.link_dcid_to_path_id(dcid_seq, pid)?;
7233 path.active_dcid_seq = Some(dcid_seq);
7234
7235 dcid_seq
7236 };
7237
7238 (pid, dcid_seq)
7239 } else {
7240 let pid = self.create_path_on_client(local_addr, peer_addr)?;
7241
7242 let dcid_seq = self
7243 .paths
7244 .get(pid)?
7245 .active_dcid_seq
7246 .ok_or(Error::InvalidState)?;
7247
7248 (pid, dcid_seq)
7249 };
7250
7251 // Change the active path.
7252 self.set_active_path(pid, Instant::now())?;
7253
7254 Ok(dcid_seq)
7255 }
7256
7257 /// Provides additional source Connection IDs that the peer can use to reach
7258 /// this host.
7259 ///
7260 /// This triggers sending NEW_CONNECTION_ID frames if the provided Source
7261 /// Connection ID is not already present. In the case the caller tries to
7262 /// reuse a Connection ID with a different reset token, this raises an
7263 /// `InvalidState`.
7264 ///
7265 /// At any time, the peer cannot have more Destination Connection IDs than
7266 /// the maximum number of active Connection IDs it negotiated. In such case
7267 /// (i.e., when [`scids_left()`] returns 0), if the host agrees to
7268 /// request the removal of previous connection IDs, it sets the
7269 /// `retire_if_needed` parameter. Otherwise, an [`IdLimit`] is returned.
7270 ///
7271 /// Note that setting `retire_if_needed` does not prevent this function from
7272 /// returning an [`IdLimit`] in the case the caller wants to retire still
7273 /// unannounced Connection IDs.
7274 ///
7275 /// The caller is responsible for ensuring that the provided `scid` is not
7276 /// repeated several times over the connection. quiche ensures that as long
7277 /// as the provided Connection ID is still in use (i.e., not retired), it
7278 /// does not assign a different sequence number.
7279 ///
7280 /// Note that if the host uses zero-length Source Connection IDs, it cannot
7281 /// advertise Source Connection IDs and calling this method returns an
7282 /// [`InvalidState`].
7283 ///
7284 /// Returns the sequence number associated to the provided Connection ID.
7285 ///
7286 /// [`scids_left()`]: struct.Connection.html#method.scids_left
7287 /// [`IdLimit`]: enum.Error.html#IdLimit
7288 /// [`InvalidState`]: enum.Error.html#InvalidState
7289 pub fn new_scid(
7290 &mut self, scid: &ConnectionId, reset_token: u128, retire_if_needed: bool,
7291 ) -> Result<u64> {
7292 self.ids.new_scid(
7293 scid.to_vec().into(),
7294 Some(reset_token),
7295 true,
7296 None,
7297 retire_if_needed,
7298 )
7299 }
7300
7301 /// Returns the number of source Connection IDs that are active. This is
7302 /// only meaningful if the host uses non-zero length Source Connection IDs.
7303 pub fn active_scids(&self) -> usize {
7304 self.ids.active_source_cids()
7305 }
7306
7307 /// Returns the number of additional source Connection IDs that can be
7308 /// provided to the peer without exceeding the limit it advertised.
7309 ///
7310 /// The limit is the minimum of the locally configured active connection
7311 /// ID limit and the one sent by the peer.
7312 ///
7313 /// Returns `0` when the peer's limit is already reached or temporarily
7314 /// exceeded (e.g. during a SCID rotation where a retirement is in
7315 /// flight and `active_scids()` transiently exceeds the advertised
7316 /// limit).
7317 ///
7318 /// To obtain the maximum possible value allowed by the peer an application
7319 /// can instead inspect the [`peer_active_conn_id_limit`] value.
7320 ///
7321 /// [`peer_active_conn_id_limit`]: struct.Stats.html#structfield.peer_active_conn_id_limit
7322 #[inline]
7323 pub fn scids_left(&self) -> usize {
7324 let max_active_source_cids = cmp::min(
7325 self.peer_transport_params.active_conn_id_limit,
7326 self.local_transport_params.active_conn_id_limit,
7327 ) as usize;
7328
7329 max_active_source_cids.saturating_sub(self.active_scids())
7330 }
7331
7332 /// Requests the retirement of the destination Connection ID used by the
7333 /// host to reach its peer.
7334 ///
7335 /// This triggers sending RETIRE_CONNECTION_ID frames.
7336 ///
7337 /// If the application tries to retire a non-existing Destination Connection
7338 /// ID sequence number, or if it uses zero-length Destination Connection ID,
7339 /// this method returns an [`InvalidState`].
7340 ///
7341 /// At any time, the host must have at least one Destination ID. If the
7342 /// application tries to retire the last one, or if the caller tries to
7343 /// retire the destination Connection ID used by the current active path
7344 /// while having neither spare Destination Connection IDs nor validated
7345 /// network paths, this method returns an [`OutOfIdentifiers`]. This
7346 /// behavior prevents the caller from stalling the connection due to the
7347 /// lack of validated path to send non-probing packets.
7348 ///
7349 /// [`InvalidState`]: enum.Error.html#InvalidState
7350 /// [`OutOfIdentifiers`]: enum.Error.html#OutOfIdentifiers
7351 pub fn retire_dcid(&mut self, dcid_seq: u64) -> Result<()> {
7352 if self.ids.zero_length_dcid() {
7353 return Err(Error::InvalidState);
7354 }
7355
7356 let active_path_dcid_seq = self
7357 .paths
7358 .get_active()?
7359 .active_dcid_seq
7360 .ok_or(Error::InvalidState)?;
7361
7362 let active_path_id = self.paths.get_active_path_id()?;
7363
7364 if active_path_dcid_seq == dcid_seq &&
7365 self.ids.lowest_available_dcid_seq().is_none() &&
7366 !self
7367 .paths
7368 .iter()
7369 .any(|(pid, p)| pid != active_path_id && p.usable())
7370 {
7371 return Err(Error::OutOfIdentifiers);
7372 }
7373
7374 if let Some(pid) = self.ids.retire_dcid(dcid_seq)? {
7375 // The retired Destination CID was associated to a given path. Let's
7376 // find an available DCID to associate to that path.
7377 let path = self.paths.get_mut(pid)?;
7378 let dcid_seq = self.ids.lowest_available_dcid_seq();
7379
7380 if let Some(dcid_seq) = dcid_seq {
7381 self.ids.link_dcid_to_path_id(dcid_seq, pid)?;
7382 }
7383
7384 path.active_dcid_seq = dcid_seq;
7385 }
7386
7387 Ok(())
7388 }
7389
7390 /// Processes path-specific events.
7391 ///
7392 /// On success it returns a [`PathEvent`], or `None` when there are no
7393 /// events to report. Please refer to [`PathEvent`] for the exhaustive event
7394 /// list.
7395 ///
7396 /// Note that all events are edge-triggered, meaning that once reported they
7397 /// will not be reported again by calling this method again, until the event
7398 /// is re-armed.
7399 ///
7400 /// [`PathEvent`]: enum.PathEvent.html
7401 pub fn path_event_next(&mut self) -> Option<PathEvent> {
7402 self.paths.pop_event()
7403 }
7404
7405 /// Returns the number of source Connection IDs that are retired.
7406 pub fn retired_scids(&self) -> usize {
7407 self.ids.retired_source_cids()
7408 }
7409
7410 /// Returns a source `ConnectionId` that has been retired.
7411 ///
7412 /// On success it returns a [`ConnectionId`], or `None` when there are no
7413 /// more retired connection IDs.
7414 ///
7415 /// [`ConnectionId`]: struct.ConnectionId.html
7416 pub fn retired_scid_next(&mut self) -> Option<ConnectionId<'static>> {
7417 self.ids.pop_retired_scid()
7418 }
7419
7420 /// Returns the number of spare Destination Connection IDs, i.e.,
7421 /// Destination Connection IDs that are still unused.
7422 ///
7423 /// Note that this function returns 0 if the host uses zero length
7424 /// Destination Connection IDs.
7425 pub fn available_dcids(&self) -> usize {
7426 self.ids.available_dcids()
7427 }
7428
7429 /// Returns an iterator over destination `SockAddr`s whose association
7430 /// with `from` forms a known QUIC path on which packets can be sent to.
7431 ///
7432 /// This function is typically used in combination with [`send_on_path()`].
7433 ///
7434 /// Note that the iterator includes all the possible combination of
7435 /// destination `SockAddr`s, even those whose sending is not required now.
7436 /// In other words, this is another way for the application to recall from
7437 /// past [`PathEvent::New`] events.
7438 ///
7439 /// [`PathEvent::New`]: enum.PathEvent.html#variant.New
7440 /// [`send_on_path()`]: struct.Connection.html#method.send_on_path
7441 ///
7442 /// ## Examples:
7443 ///
7444 /// ```no_run
7445 /// # let mut out = [0; 512];
7446 /// # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
7447 /// # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
7448 /// # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
7449 /// # let local = socket.local_addr().unwrap();
7450 /// # let peer = "127.0.0.1:1234".parse().unwrap();
7451 /// # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
7452 /// // Iterate over possible destinations for the given local `SockAddr`.
7453 /// for dest in conn.paths_iter(local) {
7454 /// loop {
7455 /// let (write, send_info) =
7456 /// match conn.send_on_path(&mut out, Some(local), Some(dest)) {
7457 /// Ok(v) => v,
7458 ///
7459 /// Err(quiche::Error::Done) => {
7460 /// // Done writing for this destination.
7461 /// break;
7462 /// },
7463 ///
7464 /// Err(e) => {
7465 /// // An error occurred, handle it.
7466 /// break;
7467 /// },
7468 /// };
7469 ///
7470 /// socket.send_to(&out[..write], &send_info.to).unwrap();
7471 /// }
7472 /// }
7473 /// # Ok::<(), quiche::Error>(())
7474 /// ```
7475 #[inline]
7476 pub fn paths_iter(&self, from: SocketAddr) -> SocketAddrIter {
7477 // Instead of trying to identify whether packets will be sent on the
7478 // given 4-tuple, simply filter paths that cannot be used.
7479 SocketAddrIter {
7480 sockaddrs: self
7481 .paths
7482 .iter()
7483 .filter(|(_, p)| p.active_dcid_seq.is_some())
7484 .filter(|(_, p)| p.usable() || p.probing_required())
7485 .filter(|(_, p)| p.local_addr() == from)
7486 .map(|(_, p)| p.peer_addr())
7487 .collect(),
7488
7489 index: 0,
7490 }
7491 }
7492
7493 /// Closes the connection with the given error and reason.
7494 ///
7495 /// The `app` parameter specifies whether an application close should be
7496 /// sent to the peer. Otherwise a normal connection close is sent.
7497 ///
7498 /// If `app` is true but the connection is not in a state that is safe to
7499 /// send an application error (not established nor in early data), in
7500 /// accordance with [RFC
7501 /// 9000](https://www.rfc-editor.org/rfc/rfc9000.html#section-10.2.3-3), the
7502 /// error code is changed to APPLICATION_ERROR and the reason phrase is
7503 /// cleared.
7504 ///
7505 /// Returns [`Done`] if the connection had already been closed.
7506 ///
7507 /// Note that the connection will not be closed immediately. An application
7508 /// should continue calling the [`recv()`], [`send()`], [`timeout()`] and
7509 /// [`on_timeout()`] methods as normal, until the [`is_closed()`] method
7510 /// returns `true`.
7511 ///
7512 /// [`Done`]: enum.Error.html#variant.Done
7513 /// [`recv()`]: struct.Connection.html#method.recv
7514 /// [`send()`]: struct.Connection.html#method.send
7515 /// [`timeout()`]: struct.Connection.html#method.timeout
7516 /// [`on_timeout()`]: struct.Connection.html#method.on_timeout
7517 /// [`is_closed()`]: struct.Connection.html#method.is_closed
7518 pub fn close(&mut self, app: bool, err: u64, reason: &[u8]) -> Result<()> {
7519 if self.is_closed() || self.is_draining() {
7520 return Err(Error::Done);
7521 }
7522
7523 if self.local_error.is_some() {
7524 return Err(Error::Done);
7525 }
7526
7527 let is_safe_to_send_app_data =
7528 self.is_established() || self.is_in_early_data();
7529
7530 if app && !is_safe_to_send_app_data {
7531 // Clear error information.
7532 self.local_error = Some(ConnectionError {
7533 is_app: false,
7534 error_code: 0x0c,
7535 reason: vec![],
7536 });
7537 } else {
7538 self.local_error = Some(ConnectionError {
7539 is_app: app,
7540 error_code: err,
7541 reason: reason.to_vec(),
7542 });
7543 }
7544
7545 // When no packet was successfully processed close connection immediately.
7546 if self.recv_count == 0 {
7547 self.mark_closed();
7548 }
7549
7550 Ok(())
7551 }
7552
7553 /// Returns a string uniquely representing the connection.
7554 ///
7555 /// This can be used for logging purposes to differentiate between multiple
7556 /// connections.
7557 #[inline]
7558 pub fn trace_id(&self) -> &str {
7559 &self.trace_id
7560 }
7561
7562 /// Returns the negotiated ALPN protocol.
7563 ///
7564 /// If no protocol has been negotiated, the returned value is empty.
7565 #[inline]
7566 pub fn application_proto(&self) -> &[u8] {
7567 self.alpn.as_ref()
7568 }
7569
7570 /// Returns the server name requested by the client.
7571 #[inline]
7572 pub fn server_name(&self) -> Option<&str> {
7573 self.handshake.server_name()
7574 }
7575
7576 /// Returns the peer's leaf certificate (if any) as a DER-encoded buffer.
7577 #[inline]
7578 pub fn peer_cert(&self) -> Option<&[u8]> {
7579 self.handshake.peer_cert()
7580 }
7581
7582 /// Returns the peer's certificate chain (if any) as a vector of DER-encoded
7583 /// buffers.
7584 ///
7585 /// The certificate at index 0 is the peer's leaf certificate, the other
7586 /// certificates (if any) are the chain certificate authorities used to
7587 /// sign the leaf certificate.
7588 #[inline]
7589 pub fn peer_cert_chain(&self) -> Option<Vec<&[u8]>> {
7590 self.handshake.peer_cert_chain()
7591 }
7592
7593 /// Returns the serialized cryptographic session for the connection.
7594 ///
7595 /// This can be used by a client to cache a connection's session, and resume
7596 /// it later using the [`set_session()`] method.
7597 ///
7598 /// [`set_session()`]: struct.Connection.html#method.set_session
7599 #[inline]
7600 pub fn session(&self) -> Option<&[u8]> {
7601 self.session.as_deref()
7602 }
7603
7604 /// Returns the source connection ID.
7605 ///
7606 /// When there are multiple IDs, and if there is an active path, the ID used
7607 /// on that path is returned. Otherwise the oldest ID is returned.
7608 ///
7609 /// Note that the value returned can change throughout the connection's
7610 /// lifetime.
7611 #[inline]
7612 pub fn source_id(&self) -> ConnectionId<'_> {
7613 if let Ok(path) = self.paths.get_active() {
7614 if let Some(active_scid_seq) = path.active_scid_seq {
7615 if let Ok(e) = self.ids.get_scid(active_scid_seq) {
7616 return ConnectionId::from_ref(e.cid.as_ref());
7617 }
7618 }
7619 }
7620
7621 let e = self.ids.oldest_scid();
7622 ConnectionId::from_ref(e.cid.as_ref())
7623 }
7624
7625 /// Returns all active source connection IDs.
7626 ///
7627 /// An iterator is returned for all active IDs (i.e. ones that have not
7628 /// been explicitly retired yet).
7629 #[inline]
7630 pub fn source_ids(&self) -> impl Iterator<Item = &ConnectionId<'_>> {
7631 self.ids.scids_iter()
7632 }
7633
7634 /// Returns the destination connection ID.
7635 ///
7636 /// Note that the value returned can change throughout the connection's
7637 /// lifetime.
7638 #[inline]
7639 pub fn destination_id(&self) -> ConnectionId<'_> {
7640 if let Ok(path) = self.paths.get_active() {
7641 if let Some(active_dcid_seq) = path.active_dcid_seq {
7642 if let Ok(e) = self.ids.get_dcid(active_dcid_seq) {
7643 return ConnectionId::from_ref(e.cid.as_ref());
7644 }
7645 }
7646 }
7647
7648 let e = self.ids.oldest_dcid();
7649 ConnectionId::from_ref(e.cid.as_ref())
7650 }
7651
7652 /// Returns the PMTU for the active path if it exists.
7653 ///
7654 /// This requires no additonal packets to be sent but simply checks if PMTUD
7655 /// has completed and has found a valid PMTU.
7656 #[inline]
7657 pub fn pmtu(&self) -> Option<usize> {
7658 if let Ok(path) = self.paths.get_active() {
7659 path.pmtud.as_ref().and_then(|pmtud| pmtud.get_pmtu())
7660 } else {
7661 None
7662 }
7663 }
7664
7665 /// Revalidates the PMTU for the active path by sending a new probe packet
7666 /// of PMTU size. If the probe is dropped PMTUD will restart and find a new
7667 /// valid PMTU.
7668 #[inline]
7669 pub fn revalidate_pmtu(&mut self) {
7670 if let Ok(active_path) = self.paths.get_active_mut() {
7671 if let Some(pmtud) = active_path.pmtud.as_mut() {
7672 pmtud.revalidate_pmtu();
7673 }
7674 }
7675 }
7676
7677 /// Returns true if the connection handshake is complete.
7678 #[inline]
7679 pub fn is_established(&self) -> bool {
7680 self.handshake_completed
7681 }
7682
7683 /// Returns true if the connection is resumed.
7684 #[inline]
7685 pub fn is_resumed(&self) -> bool {
7686 self.handshake.is_resumed()
7687 }
7688
7689 /// Returns true if the connection has a pending handshake that has
7690 /// progressed enough to send or receive early data.
7691 #[inline]
7692 pub fn is_in_early_data(&self) -> bool {
7693 self.handshake.is_in_early_data()
7694 }
7695
7696 /// Returns the early data reason for the connection.
7697 ///
7698 /// This status can be useful for logging and debugging. See [BoringSSL]
7699 /// documentation for a definition of the reasons.
7700 ///
7701 /// [BoringSSL]: https://commondatastorage.googleapis.com/chromium-boringssl-docs/ssl.h.html#ssl_early_data_reason_t
7702 #[inline]
7703 pub fn early_data_reason(&self) -> u32 {
7704 self.handshake.early_data_reason()
7705 }
7706
7707 /// Returns whether there is stream or DATAGRAM data available to read.
7708 #[inline]
7709 pub fn is_readable(&self) -> bool {
7710 self.streams.has_readable() || self.dgram_recv_front_len().is_some()
7711 }
7712
7713 /// Returns whether the network path with local address `from` and remote
7714 /// address `peer` has been validated.
7715 ///
7716 /// If the 4-tuple does not exist over the connection, returns an
7717 /// [`InvalidState`].
7718 ///
7719 /// [`InvalidState`]: enum.Error.html#variant.InvalidState
7720 pub fn is_path_validated(
7721 &self, from: SocketAddr, to: SocketAddr,
7722 ) -> Result<bool> {
7723 let pid = self
7724 .paths
7725 .path_id_from_addrs(&(from, to))
7726 .ok_or(Error::InvalidState)?;
7727
7728 Ok(self.paths.get(pid)?.validated())
7729 }
7730
7731 /// Returns true if the connection is draining.
7732 ///
7733 /// If this returns `true`, the connection object cannot yet be dropped, but
7734 /// no new application data can be sent or received. An application should
7735 /// continue calling the [`recv()`], [`timeout()`], and [`on_timeout()`]
7736 /// methods as normal, until the [`is_closed()`] method returns `true`.
7737 ///
7738 /// In contrast, once `is_draining()` returns `true`, calling [`send()`]
7739 /// is not required because no new outgoing packets will be generated.
7740 ///
7741 /// [`recv()`]: struct.Connection.html#method.recv
7742 /// [`send()`]: struct.Connection.html#method.send
7743 /// [`timeout()`]: struct.Connection.html#method.timeout
7744 /// [`on_timeout()`]: struct.Connection.html#method.on_timeout
7745 /// [`is_closed()`]: struct.Connection.html#method.is_closed
7746 #[inline]
7747 pub fn is_draining(&self) -> bool {
7748 self.draining_timer.is_some()
7749 }
7750
7751 /// Returns true if the connection is closed.
7752 ///
7753 /// If this returns true, the connection object can be dropped.
7754 #[inline]
7755 pub fn is_closed(&self) -> bool {
7756 self.closed
7757 }
7758
7759 /// Returns true if the connection was closed due to the idle timeout.
7760 #[inline]
7761 pub fn is_timed_out(&self) -> bool {
7762 self.timed_out
7763 }
7764
7765 /// Returns the error received from the peer, if any.
7766 ///
7767 /// Note that a `Some` return value does not necessarily imply
7768 /// [`is_closed()`] or any other connection state.
7769 ///
7770 /// [`is_closed()`]: struct.Connection.html#method.is_closed
7771 #[inline]
7772 pub fn peer_error(&self) -> Option<&ConnectionError> {
7773 self.peer_error.as_ref()
7774 }
7775
7776 /// Returns the error [`close()`] was called with, or internally
7777 /// created quiche errors, if any.
7778 ///
7779 /// Note that a `Some` return value does not necessarily imply
7780 /// [`is_closed()`] or any other connection state.
7781 /// `Some` also does not guarantee that the error has been sent to
7782 /// or received by the peer.
7783 ///
7784 /// [`close()`]: struct.Connection.html#method.close
7785 /// [`is_closed()`]: struct.Connection.html#method.is_closed
7786 #[inline]
7787 pub fn local_error(&self) -> Option<&ConnectionError> {
7788 self.local_error.as_ref()
7789 }
7790
7791 /// Collects and returns statistics about the connection.
7792 #[inline]
7793 pub fn stats(&self) -> Stats {
7794 Stats {
7795 recv: self.recv_count,
7796 sent: self.sent_count,
7797 lost: self.lost_count,
7798 spurious_lost: self.spurious_lost_count,
7799 retrans: self.retrans_count,
7800 sent_bytes: self.sent_bytes,
7801 recv_bytes: self.recv_bytes,
7802 acked_bytes: self.acked_bytes,
7803 lost_bytes: self.lost_bytes,
7804 stream_retrans_bytes: self.stream_retrans_bytes,
7805 dgram_recv: self.dgram_recv_count,
7806 dgram_sent: self.dgram_sent_count,
7807 paths_count: self.paths.len(),
7808 reset_stream_count_local: self.reset_stream_local_count,
7809 stopped_stream_count_local: self.stopped_stream_local_count,
7810 reset_stream_count_remote: self.reset_stream_remote_count,
7811 stopped_stream_count_remote: self.stopped_stream_remote_count,
7812 data_blocked_sent_count: self.data_blocked_sent_count,
7813 stream_data_blocked_sent_count: self.stream_data_blocked_sent_count,
7814 data_blocked_recv_count: self.data_blocked_recv_count,
7815 stream_data_blocked_recv_count: self.stream_data_blocked_recv_count,
7816 streams_blocked_bidi_recv_count: self.streams_blocked_bidi_recv_count,
7817 streams_blocked_uni_recv_count: self.streams_blocked_uni_recv_count,
7818 path_challenge_rx_count: self.path_challenge_rx_count,
7819 amplification_limited_count: self.amplification_limited_count,
7820 bytes_in_flight_duration: self.bytes_in_flight_duration(),
7821 tx_buffered_state: self.tx_buffered_state,
7822 }
7823 }
7824
7825 /// Returns the sum of the durations when each path in the
7826 /// connection was actively sending bytes or waiting for acks.
7827 /// Note that this could result in a duration that is longer than
7828 /// the actual connection duration in cases where multiple paths
7829 /// are active for extended periods of time. In practice only 1
7830 /// path is typically active at a time.
7831 /// TODO revisit computation if in the future multiple paths are
7832 /// often active at the same time.
7833 fn bytes_in_flight_duration(&self) -> Duration {
7834 self.paths.iter().fold(Duration::ZERO, |acc, (_, path)| {
7835 acc + path.bytes_in_flight_duration()
7836 })
7837 }
7838
7839 /// Returns reference to peer's transport parameters. Returns `None` if we
7840 /// have not yet processed the peer's transport parameters.
7841 pub fn peer_transport_params(&self) -> Option<&TransportParams> {
7842 if !self.parsed_peer_transport_params {
7843 return None;
7844 }
7845
7846 Some(&self.peer_transport_params)
7847 }
7848
7849 /// Collects and returns statistics about each known path for the
7850 /// connection.
7851 pub fn path_stats(&self) -> impl Iterator<Item = PathStats> + '_ {
7852 self.paths.iter().map(|(_, p)| p.stats())
7853 }
7854
7855 /// Returns whether or not this is a server-side connection.
7856 pub fn is_server(&self) -> bool {
7857 self.is_server
7858 }
7859
7860 fn encode_transport_params(&mut self) -> Result<()> {
7861 self.handshake.set_quic_transport_params(
7862 &self.local_transport_params,
7863 self.is_server,
7864 )
7865 }
7866
7867 fn parse_peer_transport_params(
7868 &mut self, peer_params: TransportParams,
7869 ) -> Result<()> {
7870 // Validate initial_source_connection_id.
7871 match &peer_params.initial_source_connection_id {
7872 Some(v) if v != &self.destination_id() =>
7873 return Err(Error::InvalidTransportParam),
7874
7875 Some(_) => (),
7876
7877 // initial_source_connection_id must be sent by
7878 // both endpoints.
7879 None => return Err(Error::InvalidTransportParam),
7880 }
7881
7882 // Validate original_destination_connection_id.
7883 if let Some(odcid) = &self.odcid {
7884 match &peer_params.original_destination_connection_id {
7885 Some(v) if v != odcid =>
7886 return Err(Error::InvalidTransportParam),
7887
7888 Some(_) => (),
7889
7890 // original_destination_connection_id must be
7891 // sent by the server.
7892 None if !self.is_server =>
7893 return Err(Error::InvalidTransportParam),
7894
7895 None => (),
7896 }
7897 }
7898
7899 // Validate retry_source_connection_id.
7900 if let Some(rscid) = &self.rscid {
7901 match &peer_params.retry_source_connection_id {
7902 Some(v) if v != rscid =>
7903 return Err(Error::InvalidTransportParam),
7904
7905 Some(_) => (),
7906
7907 // retry_source_connection_id must be sent by
7908 // the server.
7909 None => return Err(Error::InvalidTransportParam),
7910 }
7911 }
7912
7913 self.process_peer_transport_params(peer_params)?;
7914
7915 self.parsed_peer_transport_params = true;
7916
7917 Ok(())
7918 }
7919
7920 fn process_peer_transport_params(
7921 &mut self, peer_params: TransportParams,
7922 ) -> Result<()> {
7923 self.max_tx_data = peer_params.initial_max_data;
7924
7925 // Update send capacity.
7926 self.update_tx_cap();
7927
7928 self.streams
7929 .update_peer_max_streams_bidi(peer_params.initial_max_streams_bidi);
7930 self.streams
7931 .update_peer_max_streams_uni(peer_params.initial_max_streams_uni);
7932
7933 let max_ack_delay = Duration::from_millis(peer_params.max_ack_delay);
7934
7935 self.recovery_config.max_ack_delay = max_ack_delay;
7936
7937 let active_path = self.paths.get_active_mut()?;
7938
7939 active_path.recovery.update_max_ack_delay(max_ack_delay);
7940
7941 if active_path
7942 .pmtud
7943 .as_ref()
7944 .map(|pmtud| pmtud.should_probe())
7945 .unwrap_or(false)
7946 {
7947 active_path.recovery.pmtud_update_max_datagram_size(
7948 active_path
7949 .pmtud
7950 .as_mut()
7951 .expect("PMTUD existence verified above")
7952 .get_probe_size()
7953 .min(peer_params.max_udp_payload_size as usize),
7954 );
7955 } else {
7956 active_path.recovery.update_max_datagram_size(
7957 peer_params.max_udp_payload_size as usize,
7958 );
7959 }
7960
7961 // Record the max_active_conn_id parameter advertised by the peer.
7962 self.ids
7963 .set_source_conn_id_limit(peer_params.active_conn_id_limit);
7964
7965 self.peer_transport_params = peer_params;
7966
7967 Ok(())
7968 }
7969
7970 /// Continues the handshake.
7971 ///
7972 /// If the connection is already established, it does nothing.
7973 fn do_handshake(&mut self, now: Instant) -> Result<()> {
7974 let mut ex_data = tls::ExData {
7975 application_protos: &self.application_protos,
7976
7977 crypto_ctx: &mut self.crypto_ctx,
7978
7979 session: &mut self.session,
7980
7981 local_error: &mut self.local_error,
7982
7983 keylog: self.keylog.as_mut(),
7984
7985 trace_id: &self.trace_id,
7986
7987 local_transport_params: self.local_transport_params.clone(),
7988
7989 recovery_config: self.recovery_config,
7990
7991 tx_cap_factor: self.tx_cap_factor,
7992
7993 pmtud: None,
7994
7995 is_server: self.is_server,
7996
7997 use_initial_max_data_as_flow_control_win: false,
7998 };
7999
8000 if self.handshake_completed {
8001 return self.handshake.process_post_handshake(&mut ex_data);
8002 }
8003
8004 match self.handshake.do_handshake(&mut ex_data) {
8005 Ok(_) => (),
8006
8007 Err(Error::Done) => {
8008 // Apply in-handshake configuration from callbacks if the path's
8009 // Recovery module can still be reinitilized.
8010 if self
8011 .paths
8012 .get_active()
8013 .map(|p| p.can_reinit_recovery())
8014 .unwrap_or(false)
8015 {
8016 if ex_data.recovery_config != self.recovery_config {
8017 if let Ok(path) = self.paths.get_active_mut() {
8018 self.recovery_config = ex_data.recovery_config;
8019 path.reinit_recovery(&self.recovery_config);
8020 }
8021 }
8022
8023 if ex_data.tx_cap_factor != self.tx_cap_factor {
8024 self.tx_cap_factor = ex_data.tx_cap_factor;
8025 }
8026
8027 if let Some((discover, max_probes)) = ex_data.pmtud {
8028 self.paths.set_discover_pmtu_on_existing_paths(
8029 discover,
8030 self.recovery_config.max_send_udp_payload_size,
8031 max_probes,
8032 );
8033 }
8034
8035 if ex_data.local_transport_params !=
8036 self.local_transport_params
8037 {
8038 self.streams.set_max_streams_bidi(
8039 ex_data
8040 .local_transport_params
8041 .initial_max_streams_bidi,
8042 );
8043
8044 self.local_transport_params =
8045 ex_data.local_transport_params;
8046 }
8047 }
8048
8049 if ex_data.use_initial_max_data_as_flow_control_win {
8050 self.enable_use_initial_max_data_as_flow_control_win();
8051 }
8052
8053 // Try to parse transport parameters as soon as the first flight
8054 // of handshake data is processed.
8055 //
8056 // This is potentially dangerous as the handshake hasn't been
8057 // completed yet, though it's required to be able to send data
8058 // in 0.5 RTT.
8059 let raw_params = self.handshake.quic_transport_params();
8060
8061 if !self.parsed_peer_transport_params && !raw_params.is_empty() {
8062 let peer_params = TransportParams::decode(
8063 raw_params,
8064 self.is_server,
8065 self.peer_transport_params_track_unknown,
8066 )?;
8067
8068 self.parse_peer_transport_params(peer_params)?;
8069 }
8070
8071 return Ok(());
8072 },
8073
8074 Err(e) => return Err(e),
8075 };
8076
8077 self.handshake_completed = self.handshake.is_completed();
8078
8079 self.alpn = self.handshake.alpn_protocol().to_vec();
8080
8081 let raw_params = self.handshake.quic_transport_params();
8082
8083 if !self.parsed_peer_transport_params && !raw_params.is_empty() {
8084 let peer_params = TransportParams::decode(
8085 raw_params,
8086 self.is_server,
8087 self.peer_transport_params_track_unknown,
8088 )?;
8089
8090 self.parse_peer_transport_params(peer_params)?;
8091 }
8092
8093 if self.handshake_completed {
8094 // The handshake is considered confirmed at the server when the
8095 // handshake completes, at which point we can also drop the
8096 // handshake epoch.
8097 if self.is_server {
8098 self.handshake_confirmed = true;
8099
8100 self.drop_epoch_state(packet::Epoch::Handshake, now);
8101 }
8102
8103 // Once the handshake is completed there's no point in processing
8104 // 0-RTT packets anymore, so clear the buffer now.
8105 self.undecryptable_pkts.clear();
8106
8107 trace!("{} connection established: proto={:?} cipher={:?} curve={:?} sigalg={:?} resumed={} {:?}",
8108 &self.trace_id,
8109 std::str::from_utf8(self.application_proto()),
8110 self.handshake.cipher(),
8111 self.handshake.curve(),
8112 self.handshake.sigalg(),
8113 self.handshake.is_resumed(),
8114 self.peer_transport_params);
8115 }
8116
8117 Ok(())
8118 }
8119
8120 /// Use the value of the intial max_data / initial stream max_data setting
8121 /// as the initial flow control window for the connection and streams.
8122 /// The connection-level flow control window will only be changed if it
8123 /// hasn't been auto tuned yet. For streams: only newly created streams
8124 /// receive the new setting.
8125 fn enable_use_initial_max_data_as_flow_control_win(&mut self) {
8126 self.flow_control.set_window_if_not_tuned_yet(
8127 self.local_transport_params.initial_max_data,
8128 );
8129 self.streams
8130 .set_use_initial_max_data_as_flow_control_win(true);
8131 }
8132
8133 /// Selects the packet type for the next outgoing packet.
8134 fn write_pkt_type(&self, send_pid: usize) -> Result<Type> {
8135 // On error send packet in the latest epoch available, but only send
8136 // 1-RTT ones when the handshake is completed.
8137 if self
8138 .local_error
8139 .as_ref()
8140 .is_some_and(|conn_err| !conn_err.is_app)
8141 {
8142 let epoch = match self.handshake.write_level() {
8143 crypto::Level::Initial => packet::Epoch::Initial,
8144 crypto::Level::ZeroRTT => unreachable!(),
8145 crypto::Level::Handshake => packet::Epoch::Handshake,
8146 crypto::Level::OneRTT => packet::Epoch::Application,
8147 };
8148
8149 if !self.handshake_confirmed {
8150 match epoch {
8151 // Downgrade the epoch to Handshake as the handshake is not
8152 // completed yet.
8153 packet::Epoch::Application => return Ok(Type::Handshake),
8154
8155 // Downgrade the epoch to Initial as the remote peer might
8156 // not be able to decrypt handshake packets yet.
8157 packet::Epoch::Handshake
8158 if self.crypto_ctx[packet::Epoch::Initial].has_keys() =>
8159 return Ok(Type::Initial),
8160
8161 _ => (),
8162 };
8163 }
8164
8165 return Ok(Type::from_epoch(epoch));
8166 }
8167
8168 for &epoch in packet::Epoch::epochs(
8169 packet::Epoch::Initial..=packet::Epoch::Application,
8170 ) {
8171 let crypto_ctx = &self.crypto_ctx[epoch];
8172 let pkt_space = &self.pkt_num_spaces[epoch];
8173
8174 // Only send packets in a space when we have the send keys for it.
8175 if crypto_ctx.crypto_seal.is_none() {
8176 continue;
8177 }
8178
8179 // We are ready to send data for this packet number space.
8180 if crypto_ctx.data_available() || pkt_space.ready() {
8181 return Ok(Type::from_epoch(epoch));
8182 }
8183
8184 // There are lost frames in this packet number space.
8185 for (_, p) in self.paths.iter() {
8186 if p.recovery.has_lost_frames(epoch) {
8187 return Ok(Type::from_epoch(epoch));
8188 }
8189
8190 // We need to send PTO probe packets.
8191 if p.recovery.loss_probes(epoch) > 0 {
8192 return Ok(Type::from_epoch(epoch));
8193 }
8194 }
8195 }
8196
8197 // If there are flushable, almost full or blocked streams, use the
8198 // Application epoch.
8199 let send_path = self.paths.get(send_pid)?;
8200 if (self.is_established() || self.is_in_early_data()) &&
8201 (self.should_send_handshake_done() ||
8202 self.flow_control.should_update_max_data() ||
8203 self.should_send_max_data ||
8204 self.blocked_limit.is_some() ||
8205 self.streams_blocked_bidi_state
8206 .has_pending_stream_blocked_frame() ||
8207 self.streams_blocked_uni_state
8208 .has_pending_stream_blocked_frame() ||
8209 self.dgram_send_queue.has_pending() ||
8210 self.local_error
8211 .as_ref()
8212 .is_some_and(|conn_err| conn_err.is_app) ||
8213 self.should_send_max_streams_bidi ||
8214 self.streams.should_update_max_streams_bidi() ||
8215 self.should_send_max_streams_uni ||
8216 self.streams.should_update_max_streams_uni() ||
8217 self.streams.has_flushable() ||
8218 self.streams.has_almost_full() ||
8219 self.streams.has_blocked() ||
8220 self.streams.has_reset() ||
8221 self.streams.has_stopped() ||
8222 self.ids.has_new_scids() ||
8223 self.ids.has_retire_dcids() ||
8224 send_path
8225 .pmtud
8226 .as_ref()
8227 .is_some_and(|pmtud| pmtud.should_probe()) ||
8228 send_path.needs_ack_eliciting ||
8229 send_path.probing_required())
8230 {
8231 // Only clients can send 0-RTT packets.
8232 if !self.is_server && self.is_in_early_data() {
8233 return Ok(Type::ZeroRTT);
8234 }
8235
8236 return Ok(Type::Short);
8237 }
8238
8239 Err(Error::Done)
8240 }
8241
8242 /// Returns the mutable stream with the given ID if it exists, or creates
8243 /// a new one otherwise.
8244 fn get_or_create_stream(
8245 &mut self, id: u64, local: bool,
8246 ) -> Result<&mut stream::Stream<F>> {
8247 self.streams.get_or_create(
8248 id,
8249 &self.local_transport_params,
8250 &self.peer_transport_params,
8251 local,
8252 self.is_server,
8253 )
8254 }
8255
8256 /// Processes an incoming frame.
8257 fn process_frame(
8258 &mut self, frame: frame::Frame, hdr: &Header, recv_path_id: usize,
8259 epoch: packet::Epoch, now: Instant,
8260 ) -> Result<()> {
8261 trace!("{} rx frm {:?}", self.trace_id, frame);
8262
8263 match frame {
8264 frame::Frame::Padding { .. } => (),
8265
8266 frame::Frame::Ping { .. } => (),
8267
8268 frame::Frame::ACK {
8269 ranges, ack_delay, ..
8270 } => {
8271 let ack_delay = ack_delay
8272 .checked_mul(2_u64.pow(
8273 self.peer_transport_params.ack_delay_exponent as u32,
8274 ))
8275 .ok_or(Error::InvalidFrame)?;
8276
8277 if epoch == packet::Epoch::Handshake ||
8278 (epoch == packet::Epoch::Application &&
8279 self.is_established())
8280 {
8281 self.peer_verified_initial_address = true;
8282 }
8283
8284 let handshake_status = self.handshake_status();
8285
8286 let is_app_limited = self.delivery_rate_check_if_app_limited();
8287
8288 let largest_acked = ranges.last().expect(
8289 "ACK frames should always have at least one ack range",
8290 );
8291
8292 for (_, p) in self.paths.iter_mut() {
8293 if self.pkt_num_spaces[epoch]
8294 .largest_tx_pkt_num
8295 .is_some_and(|largest_sent| largest_sent < largest_acked)
8296 {
8297 // https://www.rfc-editor.org/rfc/rfc9000#section-13.1
8298 // An endpoint SHOULD treat receipt of an acknowledgment
8299 // for a packet it did not send as
8300 // a connection error of type PROTOCOL_VIOLATION
8301 return Err(Error::InvalidAckRange);
8302 }
8303
8304 if is_app_limited {
8305 p.recovery.delivery_rate_update_app_limited(true);
8306 }
8307
8308 let OnAckReceivedOutcome {
8309 lost_packets,
8310 lost_bytes,
8311 acked_bytes,
8312 spurious_losses,
8313 } = p.recovery.on_ack_received(
8314 &ranges,
8315 ack_delay,
8316 epoch,
8317 handshake_status,
8318 now,
8319 self.pkt_num_manager.skip_pn(),
8320 &self.trace_id,
8321 )?;
8322
8323 let skip_pn = self.pkt_num_manager.skip_pn();
8324 let largest_acked =
8325 p.recovery.get_largest_acked_on_epoch(epoch);
8326
8327 // Consider the skip_pn validated if the peer has sent an ack
8328 // for a larger pkt number.
8329 if let Some((largest_acked, skip_pn)) =
8330 largest_acked.zip(skip_pn)
8331 {
8332 if largest_acked > skip_pn {
8333 self.pkt_num_manager.set_skip_pn(None);
8334 }
8335 }
8336
8337 self.lost_count += lost_packets;
8338 self.lost_bytes += lost_bytes as u64;
8339 self.acked_bytes += acked_bytes as u64;
8340 self.spurious_lost_count += spurious_losses;
8341 }
8342 },
8343
8344 frame::Frame::ResetStream {
8345 stream_id,
8346 error_code,
8347 final_size,
8348 } => {
8349 // Peer can't send on our unidirectional streams.
8350 if !stream::is_bidi(stream_id) &&
8351 stream::is_local(stream_id, self.is_server)
8352 {
8353 return Err(Error::InvalidStreamState(stream_id));
8354 }
8355
8356 let max_rx_data_left = self.max_rx_data() - self.rx_data;
8357
8358 // Get existing stream or create a new one, but if the stream
8359 // has already been closed and collected, ignore the frame.
8360 //
8361 // This can happen if e.g. an ACK frame is lost, and the peer
8362 // retransmits another frame before it realizes that the stream
8363 // is gone.
8364 //
8365 // Note that it makes it impossible to check if the frame is
8366 // illegal, since we have no state, but since we ignore the
8367 // frame, it should be fine.
8368 let stream = match self.get_or_create_stream(stream_id, false) {
8369 Ok(v) => v,
8370
8371 Err(Error::Done) => return Ok(()),
8372
8373 Err(e) => return Err(e),
8374 };
8375
8376 let was_readable = stream.is_readable();
8377 let priority_key = Arc::clone(&stream.priority_key);
8378
8379 let stream::RecvBufResetReturn {
8380 max_data_delta,
8381 consumed_flowcontrol,
8382 } = stream.recv.reset(error_code, final_size)?;
8383
8384 if max_data_delta > max_rx_data_left {
8385 return Err(Error::FlowControl);
8386 }
8387
8388 if !was_readable && stream.is_readable() {
8389 self.streams.insert_readable(&priority_key);
8390 }
8391
8392 self.rx_data += max_data_delta;
8393 // We dropped the receive buffer, return connection level
8394 // flow-control
8395 self.flow_control.add_consumed(consumed_flowcontrol);
8396
8397 self.reset_stream_remote_count =
8398 self.reset_stream_remote_count.saturating_add(1);
8399 },
8400
8401 frame::Frame::StopSending {
8402 stream_id,
8403 error_code,
8404 } => {
8405 // STOP_SENDING on a receive-only stream is a fatal error.
8406 if !stream::is_local(stream_id, self.is_server) &&
8407 !stream::is_bidi(stream_id)
8408 {
8409 return Err(Error::InvalidStreamState(stream_id));
8410 }
8411
8412 // Get existing stream or create a new one, but if the stream
8413 // has already been closed and collected, ignore the frame.
8414 //
8415 // This can happen if e.g. an ACK frame is lost, and the peer
8416 // retransmits another frame before it realizes that the stream
8417 // is gone.
8418 //
8419 // Note that it makes it impossible to check if the frame is
8420 // illegal, since we have no state, but since we ignore the
8421 // frame, it should be fine.
8422 let stream = match self.get_or_create_stream(stream_id, false) {
8423 Ok(v) => v,
8424
8425 Err(Error::Done) => return Ok(()),
8426
8427 Err(e) => return Err(e),
8428 };
8429
8430 let was_writable = stream.is_writable();
8431
8432 let priority_key = Arc::clone(&stream.priority_key);
8433
8434 // Try stopping the stream.
8435 if let Ok((final_size, unsent)) = stream.send.stop(error_code) {
8436 // Claw back some flow control allowance from data that was
8437 // buffered but not actually sent before the stream was
8438 // reset.
8439 //
8440 // Note that `tx_cap` will be updated later on, so no need
8441 // to touch it here.
8442 self.tx_data = self.tx_data.saturating_sub(unsent);
8443
8444 self.tx_buffered =
8445 self.tx_buffered.saturating_sub(unsent as usize);
8446
8447 // These drops in qlog are a bit weird, but the only way to
8448 // ensure that all bytes that are moved from App to Transport
8449 // in stream_do_send are eventually moved from Transport to
8450 // Dropped. Ideally we would add a Transport to Network
8451 // transition also as a way to indicate when bytes were
8452 // transmitted vs dropped without ever being sent.
8453 qlog_with_type!(QLOG_DATA_MV, self.qlog, q, {
8454 let ev_data = EventData::QuicStreamDataMoved(
8455 qlog::events::quic::StreamDataMoved {
8456 stream_id: Some(stream_id),
8457 offset: Some(final_size),
8458 raw: Some(RawInfo {
8459 length: Some(unsent),
8460 ..Default::default()
8461 }),
8462 from: Some(DataRecipient::Transport),
8463 to: Some(DataRecipient::Dropped),
8464 ..Default::default()
8465 },
8466 );
8467
8468 q.add_event_data_with_instant(ev_data, now).ok();
8469 });
8470
8471 self.streams.insert_reset(stream_id, error_code, final_size);
8472
8473 if !was_writable {
8474 self.streams.insert_writable(&priority_key);
8475 }
8476
8477 self.stopped_stream_remote_count =
8478 self.stopped_stream_remote_count.saturating_add(1);
8479 self.reset_stream_local_count =
8480 self.reset_stream_local_count.saturating_add(1);
8481 }
8482 },
8483
8484 frame::Frame::Crypto { data } => {
8485 if data.max_off() >= MAX_CRYPTO_STREAM_OFFSET {
8486 return Err(Error::CryptoBufferExceeded);
8487 }
8488
8489 // Push the data to the stream so it can be re-ordered.
8490 self.crypto_ctx[epoch].crypto_stream.recv.write(data)?;
8491
8492 // Feed crypto data to the TLS state, if there's data
8493 // available at the expected offset.
8494 let mut crypto_buf = [0; 512];
8495
8496 let level = crypto::Level::from_epoch(epoch);
8497
8498 let stream = &mut self.crypto_ctx[epoch].crypto_stream;
8499
8500 while let Ok((read, _)) = stream.recv.emit(&mut crypto_buf) {
8501 let recv_buf = &crypto_buf[..read];
8502 self.handshake.provide_data(level, recv_buf)?;
8503 }
8504
8505 self.do_handshake(now)?;
8506 },
8507
8508 frame::Frame::CryptoHeader { .. } => unreachable!(),
8509
8510 // TODO: implement stateless retry
8511 frame::Frame::NewToken { .. } =>
8512 if self.is_server {
8513 return Err(Error::InvalidPacket);
8514 },
8515
8516 frame::Frame::Stream { stream_id, data } => {
8517 // Peer can't send on our unidirectional streams.
8518 if !stream::is_bidi(stream_id) &&
8519 stream::is_local(stream_id, self.is_server)
8520 {
8521 return Err(Error::InvalidStreamState(stream_id));
8522 }
8523
8524 let max_rx_data_left = self.max_rx_data() - self.rx_data;
8525
8526 // Get existing stream or create a new one, but if the stream
8527 // has already been closed and collected, ignore the frame.
8528 //
8529 // This can happen if e.g. an ACK frame is lost, and the peer
8530 // retransmits another frame before it realizes that the stream
8531 // is gone.
8532 //
8533 // Note that it makes it impossible to check if the frame is
8534 // illegal, since we have no state, but since we ignore the
8535 // frame, it should be fine.
8536 let stream = match self.get_or_create_stream(stream_id, false) {
8537 Ok(v) => v,
8538
8539 Err(Error::Done) => return Ok(()),
8540
8541 Err(e) => return Err(e),
8542 };
8543
8544 // Check for the connection-level flow control limit.
8545 let max_off_delta =
8546 data.max_off().saturating_sub(stream.recv.max_off());
8547
8548 if max_off_delta > max_rx_data_left {
8549 return Err(Error::FlowControl);
8550 }
8551
8552 let was_readable = stream.is_readable();
8553 let priority_key = Arc::clone(&stream.priority_key);
8554
8555 let was_draining = stream.recv.is_draining();
8556
8557 stream.recv.write(data)?;
8558
8559 if !was_readable && stream.is_readable() {
8560 self.streams.insert_readable(&priority_key);
8561 }
8562
8563 self.rx_data += max_off_delta;
8564
8565 if was_draining {
8566 // When a stream is in draining state it will not queue
8567 // incoming data for the application to read, so consider
8568 // the received data as consumed, which might trigger a flow
8569 // control update.
8570 self.flow_control.add_consumed(max_off_delta);
8571 }
8572 },
8573
8574 frame::Frame::StreamHeader { .. } => unreachable!(),
8575
8576 frame::Frame::MaxData { max } => {
8577 self.max_tx_data = cmp::max(self.max_tx_data, max);
8578 },
8579
8580 frame::Frame::MaxStreamData { stream_id, max } => {
8581 // Peer can't receive on its own unidirectional streams.
8582 if !stream::is_bidi(stream_id) &&
8583 !stream::is_local(stream_id, self.is_server)
8584 {
8585 return Err(Error::InvalidStreamState(stream_id));
8586 }
8587
8588 // Get existing stream or create a new one, but if the stream
8589 // has already been closed and collected, ignore the frame.
8590 //
8591 // This can happen if e.g. an ACK frame is lost, and the peer
8592 // retransmits another frame before it realizes that the stream
8593 // is gone.
8594 //
8595 // Note that it makes it impossible to check if the frame is
8596 // illegal, since we have no state, but since we ignore the
8597 // frame, it should be fine.
8598 let stream = match self.get_or_create_stream(stream_id, false) {
8599 Ok(v) => v,
8600
8601 Err(Error::Done) => return Ok(()),
8602
8603 Err(e) => return Err(e),
8604 };
8605
8606 let was_flushable = stream.is_flushable();
8607
8608 stream.send.update_max_data(max);
8609
8610 let writable = stream.is_writable();
8611
8612 let priority_key = Arc::clone(&stream.priority_key);
8613
8614 // If the stream is now flushable push it to the flushable queue,
8615 // but only if it wasn't already queued.
8616 if stream.is_flushable() && !was_flushable {
8617 let priority_key = Arc::clone(&stream.priority_key);
8618 self.streams.insert_flushable(&priority_key);
8619 }
8620
8621 if writable {
8622 self.streams.insert_writable(&priority_key);
8623 }
8624 },
8625
8626 frame::Frame::MaxStreamsBidi { max } => {
8627 if max > MAX_STREAM_ID {
8628 return Err(Error::InvalidFrame);
8629 }
8630
8631 self.streams.update_peer_max_streams_bidi(max);
8632 },
8633
8634 frame::Frame::MaxStreamsUni { max } => {
8635 if max > MAX_STREAM_ID {
8636 return Err(Error::InvalidFrame);
8637 }
8638
8639 self.streams.update_peer_max_streams_uni(max);
8640 },
8641
8642 frame::Frame::DataBlocked { .. } => {
8643 self.data_blocked_recv_count =
8644 self.data_blocked_recv_count.saturating_add(1);
8645 },
8646
8647 frame::Frame::StreamDataBlocked { .. } => {
8648 self.stream_data_blocked_recv_count =
8649 self.stream_data_blocked_recv_count.saturating_add(1);
8650 },
8651
8652 frame::Frame::StreamsBlockedBidi { limit } => {
8653 if limit > MAX_STREAM_ID {
8654 return Err(Error::InvalidFrame);
8655 }
8656
8657 self.streams_blocked_bidi_recv_count =
8658 self.streams_blocked_bidi_recv_count.saturating_add(1);
8659 },
8660
8661 frame::Frame::StreamsBlockedUni { limit } => {
8662 if limit > MAX_STREAM_ID {
8663 return Err(Error::InvalidFrame);
8664 }
8665
8666 self.streams_blocked_uni_recv_count =
8667 self.streams_blocked_uni_recv_count.saturating_add(1);
8668 },
8669
8670 frame::Frame::NewConnectionId {
8671 seq_num,
8672 retire_prior_to,
8673 conn_id,
8674 reset_token,
8675 } => {
8676 if self.ids.zero_length_dcid() {
8677 return Err(Error::InvalidState);
8678 }
8679
8680 let mut retired_path_ids = SmallVec::new();
8681
8682 // Retire pending path IDs before propagating the error code to
8683 // make sure retired connection IDs are not in use anymore.
8684 let new_dcid_res = self.ids.new_dcid(
8685 conn_id.into(),
8686 seq_num,
8687 u128::from_be_bytes(reset_token),
8688 retire_prior_to,
8689 &mut retired_path_ids,
8690 );
8691
8692 for (dcid_seq, pid) in retired_path_ids {
8693 let path = self.paths.get_mut(pid)?;
8694
8695 // Maybe the path already switched to another DCID.
8696 if path.active_dcid_seq != Some(dcid_seq) {
8697 continue;
8698 }
8699
8700 if let Some(new_dcid_seq) =
8701 self.ids.lowest_available_dcid_seq()
8702 {
8703 path.active_dcid_seq = Some(new_dcid_seq);
8704
8705 self.ids.link_dcid_to_path_id(new_dcid_seq, pid)?;
8706
8707 trace!(
8708 "{} path ID {} changed DCID: old seq num {} new seq num {}",
8709 self.trace_id, pid, dcid_seq, new_dcid_seq,
8710 );
8711 } else {
8712 // We cannot use this path anymore for now.
8713 path.active_dcid_seq = None;
8714
8715 trace!(
8716 "{} path ID {} cannot be used; DCID seq num {} has been retired",
8717 self.trace_id, pid, dcid_seq,
8718 );
8719 }
8720 }
8721
8722 // Propagate error (if any) now...
8723 new_dcid_res?;
8724 },
8725
8726 frame::Frame::RetireConnectionId { seq_num } => {
8727 if self.ids.zero_length_scid() {
8728 return Err(Error::InvalidState);
8729 }
8730
8731 if let Some(pid) = self.ids.retire_scid(seq_num, &hdr.dcid)? {
8732 let path = self.paths.get_mut(pid)?;
8733
8734 // Maybe we already linked a new SCID to that path.
8735 if path.active_scid_seq == Some(seq_num) {
8736 // XXX: We do not remove unused paths now, we instead
8737 // wait until we need to maintain more paths than the
8738 // host is willing to.
8739 path.active_scid_seq = None;
8740 }
8741 }
8742 },
8743
8744 frame::Frame::PathChallenge { data } => {
8745 self.path_challenge_rx_count += 1;
8746
8747 self.paths
8748 .get_mut(recv_path_id)?
8749 .on_challenge_received(data);
8750 },
8751
8752 frame::Frame::PathResponse { data } => {
8753 self.paths.on_response_received(data)?;
8754 },
8755
8756 frame::Frame::ConnectionClose {
8757 error_code, reason, ..
8758 } => {
8759 self.peer_error = Some(ConnectionError {
8760 is_app: false,
8761 error_code,
8762 reason,
8763 });
8764
8765 let path = self.paths.get_active()?;
8766 self.draining_timer = Some(now + (path.recovery.pto() * 3));
8767 },
8768
8769 frame::Frame::ApplicationClose { error_code, reason } => {
8770 self.peer_error = Some(ConnectionError {
8771 is_app: true,
8772 error_code,
8773 reason,
8774 });
8775
8776 let path = self.paths.get_active()?;
8777 self.draining_timer = Some(now + (path.recovery.pto() * 3));
8778 },
8779
8780 frame::Frame::HandshakeDone => {
8781 if self.is_server {
8782 return Err(Error::InvalidPacket);
8783 }
8784
8785 self.peer_verified_initial_address = true;
8786
8787 self.handshake_confirmed = true;
8788
8789 // Once the handshake is confirmed, we can drop Handshake keys.
8790 self.drop_epoch_state(packet::Epoch::Handshake, now);
8791 },
8792
8793 frame::Frame::Datagram { data } => {
8794 // Close the connection if DATAGRAMs are not enabled.
8795 // quiche always advertises support for 64K sized DATAGRAM
8796 // frames, as recommended by the standard, so we don't need a
8797 // size check.
8798 if !self.dgram_enabled() {
8799 return Err(Error::InvalidState);
8800 }
8801
8802 // If recv queue is full, discard oldest
8803 if self.dgram_recv_queue.is_full() {
8804 self.dgram_recv_queue.pop();
8805 }
8806
8807 self.dgram_recv_queue.push(data.into())?;
8808
8809 self.dgram_recv_count = self.dgram_recv_count.saturating_add(1);
8810
8811 let path = self.paths.get_mut(recv_path_id)?;
8812 path.dgram_recv_count = path.dgram_recv_count.saturating_add(1);
8813 },
8814
8815 frame::Frame::DatagramHeader { .. } => unreachable!(),
8816 }
8817
8818 Ok(())
8819 }
8820
8821 /// Drops the keys and recovery state for the given epoch.
8822 fn drop_epoch_state(&mut self, epoch: packet::Epoch, now: Instant) {
8823 let crypto_ctx = &mut self.crypto_ctx[epoch];
8824 if crypto_ctx.crypto_open.is_none() {
8825 return;
8826 }
8827 crypto_ctx.clear();
8828 self.pkt_num_spaces[epoch].clear();
8829
8830 let handshake_status = self.handshake_status();
8831 for (_, p) in self.paths.iter_mut() {
8832 p.recovery
8833 .on_pkt_num_space_discarded(epoch, handshake_status, now);
8834 }
8835
8836 trace!("{} dropped epoch {} state", self.trace_id, epoch);
8837 }
8838
8839 /// Returns the connection level flow control limit.
8840 fn max_rx_data(&self) -> u64 {
8841 self.flow_control.max_data()
8842 }
8843
8844 /// Returns true if the HANDSHAKE_DONE frame needs to be sent.
8845 fn should_send_handshake_done(&self) -> bool {
8846 self.is_established() && !self.handshake_done_sent && self.is_server
8847 }
8848
8849 /// Returns the idle timeout value.
8850 ///
8851 /// `None` is returned if both end-points disabled the idle timeout.
8852 fn idle_timeout(&self) -> Option<Duration> {
8853 // If the transport parameter is set to 0, then the respective endpoint
8854 // decided to disable the idle timeout. If both are disabled we should
8855 // not set any timeout.
8856 if self.local_transport_params.max_idle_timeout == 0 &&
8857 self.peer_transport_params.max_idle_timeout == 0
8858 {
8859 return None;
8860 }
8861
8862 // If the local endpoint or the peer disabled the idle timeout, use the
8863 // other peer's value, otherwise use the minimum of the two values.
8864 let idle_timeout = if self.local_transport_params.max_idle_timeout == 0 {
8865 self.peer_transport_params.max_idle_timeout
8866 } else if self.peer_transport_params.max_idle_timeout == 0 {
8867 self.local_transport_params.max_idle_timeout
8868 } else {
8869 cmp::min(
8870 self.local_transport_params.max_idle_timeout,
8871 self.peer_transport_params.max_idle_timeout,
8872 )
8873 };
8874
8875 let path_pto = match self.paths.get_active() {
8876 Ok(p) => p.recovery.pto(),
8877 Err(_) => Duration::ZERO,
8878 };
8879
8880 let idle_timeout = Duration::from_millis(idle_timeout);
8881 let idle_timeout = cmp::max(idle_timeout, 3 * path_pto);
8882
8883 Some(idle_timeout)
8884 }
8885
8886 /// Returns the connection's handshake status for use in loss recovery.
8887 fn handshake_status(&self) -> recovery::HandshakeStatus {
8888 recovery::HandshakeStatus {
8889 has_handshake_keys: self.crypto_ctx[packet::Epoch::Handshake]
8890 .has_keys(),
8891
8892 peer_verified_address: self.peer_verified_initial_address,
8893
8894 completed: self.is_established(),
8895 }
8896 }
8897
8898 /// Updates send capacity.
8899 fn update_tx_cap(&mut self) {
8900 let cwin_available = match self.paths.get_active() {
8901 Ok(p) => p.recovery.cwnd_available() as u64,
8902 Err(_) => 0,
8903 };
8904
8905 let cap =
8906 cmp::min(cwin_available, self.max_tx_data - self.tx_data) as usize;
8907 self.tx_cap = (cap as f64 * self.tx_cap_factor).ceil() as usize;
8908 }
8909
8910 fn delivery_rate_check_if_app_limited(&self) -> bool {
8911 // Enter the app-limited phase of delivery rate when these conditions
8912 // are met:
8913 //
8914 // - The remaining capacity is higher than available bytes in cwnd (there
8915 // is more room to send).
8916 // - New data since the last send() is smaller than available bytes in
8917 // cwnd (we queued less than what we can send).
8918 // - There is room to send more data in cwnd.
8919 //
8920 // In application-limited phases the transmission rate is limited by the
8921 // application rather than the congestion control algorithm.
8922 //
8923 // Note that this is equivalent to CheckIfApplicationLimited() from the
8924 // delivery rate draft. This is also separate from `recovery.app_limited`
8925 // and only applies to delivery rate calculation.
8926 let cwin_available = self
8927 .paths
8928 .iter()
8929 .filter(|&(_, p)| p.active())
8930 .map(|(_, p)| p.recovery.cwnd_available())
8931 .sum();
8932
8933 ((self.tx_buffered + self.dgram_send_queue_byte_size()) < cwin_available) &&
8934 (self.tx_data.saturating_sub(self.last_tx_data)) <
8935 cwin_available as u64 &&
8936 cwin_available > 0
8937 }
8938
8939 fn check_tx_buffered_invariant(&mut self) {
8940 // tx_buffered should track bytes queued in the stream buffers
8941 // and unacked retransmitable bytes in the network.
8942 // If tx_buffered > 0 mark the tx_buffered_state if there are no
8943 // flushable streams and there no inflight bytes.
8944 //
8945 // It is normal to have tx_buffered == 0 while there are inflight bytes
8946 // since not QUIC frames are retransmittable; inflight tracks all bytes
8947 // on the network which are subject to congestion control.
8948 if self.tx_buffered > 0 &&
8949 !self.streams.has_flushable() &&
8950 !self
8951 .paths
8952 .iter()
8953 .any(|(_, p)| p.recovery.bytes_in_flight() > 0)
8954 {
8955 self.tx_buffered_state = TxBufferTrackingState::Inconsistent;
8956 }
8957 }
8958
8959 fn set_initial_dcid(
8960 &mut self, cid: ConnectionId<'static>, reset_token: Option<u128>,
8961 path_id: usize,
8962 ) -> Result<()> {
8963 self.ids.set_initial_dcid(cid, reset_token, Some(path_id));
8964 self.paths.get_mut(path_id)?.active_dcid_seq = Some(0);
8965
8966 Ok(())
8967 }
8968
8969 /// Selects the path that the incoming packet belongs to, or creates a new
8970 /// one if no existing path matches.
8971 fn get_or_create_recv_path_id(
8972 &mut self, recv_pid: Option<usize>, dcid: &ConnectionId, buf_len: usize,
8973 info: &RecvInfo,
8974 ) -> Result<usize> {
8975 let ids = &mut self.ids;
8976
8977 let (in_scid_seq, mut in_scid_pid) =
8978 ids.find_scid_seq(dcid).ok_or(Error::InvalidState)?;
8979
8980 if let Some(recv_pid) = recv_pid {
8981 // If the path observes a change of SCID used, note it.
8982 let recv_path = self.paths.get_mut(recv_pid)?;
8983
8984 let cid_entry =
8985 recv_path.active_scid_seq.and_then(|v| ids.get_scid(v).ok());
8986
8987 if cid_entry.map(|e| &e.cid) != Some(dcid) {
8988 let incoming_cid_entry = ids.get_scid(in_scid_seq)?;
8989
8990 let prev_recv_pid =
8991 incoming_cid_entry.path_id.unwrap_or(recv_pid);
8992
8993 if prev_recv_pid != recv_pid {
8994 trace!(
8995 "{} peer reused CID {:?} from path {} on path {}",
8996 self.trace_id,
8997 dcid,
8998 prev_recv_pid,
8999 recv_pid
9000 );
9001
9002 // TODO: reset congestion control.
9003 }
9004
9005 trace!(
9006 "{} path ID {} now see SCID with seq num {}",
9007 self.trace_id,
9008 recv_pid,
9009 in_scid_seq
9010 );
9011
9012 recv_path.active_scid_seq = Some(in_scid_seq);
9013 ids.link_scid_to_path_id(in_scid_seq, recv_pid)?;
9014 }
9015
9016 return Ok(recv_pid);
9017 }
9018
9019 // This is a new 4-tuple. See if the CID has not been assigned on
9020 // another path.
9021
9022 // Ignore this step if are using zero-length SCID.
9023 if ids.zero_length_scid() {
9024 in_scid_pid = None;
9025 }
9026
9027 if let Some(in_scid_pid) = in_scid_pid {
9028 // This CID has been used by another path. If we have the
9029 // room to do so, create a new `Path` structure holding this
9030 // new 4-tuple. Otherwise, drop the packet.
9031 let old_path = self.paths.get_mut(in_scid_pid)?;
9032 let old_local_addr = old_path.local_addr();
9033 let old_peer_addr = old_path.peer_addr();
9034
9035 trace!(
9036 "{} reused CID seq {} of ({},{}) (path {}) on ({},{})",
9037 self.trace_id,
9038 in_scid_seq,
9039 old_local_addr,
9040 old_peer_addr,
9041 in_scid_pid,
9042 info.to,
9043 info.from
9044 );
9045
9046 // Notify the application.
9047 self.paths.notify_event(PathEvent::ReusedSourceConnectionId(
9048 in_scid_seq,
9049 (old_local_addr, old_peer_addr),
9050 (info.to, info.from),
9051 ));
9052 }
9053
9054 // This is a new path using an unassigned CID; create it!
9055 let mut path = path::Path::new(
9056 info.to,
9057 info.from,
9058 &self.recovery_config,
9059 self.path_challenge_recv_max_queue_len,
9060 false,
9061 None,
9062 );
9063
9064 path.max_send_bytes = buf_len * self.max_amplification_factor;
9065 path.active_scid_seq = Some(in_scid_seq);
9066
9067 // Automatically probes the new path.
9068 path.request_validation();
9069
9070 let pid = self.paths.insert_path(path, self.is_server)?;
9071
9072 // Do not record path reuse.
9073 if in_scid_pid.is_none() {
9074 ids.link_scid_to_path_id(in_scid_seq, pid)?;
9075 }
9076
9077 Ok(pid)
9078 }
9079
9080 /// Selects the path on which the next packet must be sent.
9081 fn get_send_path_id(
9082 &self, from: Option<SocketAddr>, to: Option<SocketAddr>,
9083 ) -> Result<usize> {
9084 // A probing packet must be sent, but only if the connection is fully
9085 // established.
9086 if self.is_established() {
9087 let mut probing = self
9088 .paths
9089 .iter()
9090 .filter(|(_, p)| from.is_none() || Some(p.local_addr()) == from)
9091 .filter(|(_, p)| to.is_none() || Some(p.peer_addr()) == to)
9092 .filter(|(_, p)| p.active_dcid_seq.is_some())
9093 .filter(|(_, p)| p.probing_required())
9094 .map(|(pid, _)| pid);
9095
9096 if let Some(pid) = probing.next() {
9097 return Ok(pid);
9098 }
9099 }
9100
9101 if let Some((pid, p)) = self.paths.get_active_with_pid() {
9102 if from.is_some() && Some(p.local_addr()) != from {
9103 return Err(Error::Done);
9104 }
9105
9106 if to.is_some() && Some(p.peer_addr()) != to {
9107 return Err(Error::Done);
9108 }
9109
9110 return Ok(pid);
9111 };
9112
9113 Err(Error::InvalidState)
9114 }
9115
9116 /// Sets the path with identifier 'path_id' to be active.
9117 fn set_active_path(&mut self, path_id: usize, now: Instant) -> Result<()> {
9118 if let Ok(old_active_path) = self.paths.get_active_mut() {
9119 for &e in packet::Epoch::epochs(
9120 packet::Epoch::Initial..=packet::Epoch::Application,
9121 ) {
9122 let (lost_packets, lost_bytes) = old_active_path
9123 .recovery
9124 .on_path_change(e, now, &self.trace_id);
9125
9126 self.lost_count += lost_packets;
9127 self.lost_bytes += lost_bytes as u64;
9128 }
9129 }
9130
9131 self.paths.set_active_path(path_id)
9132 }
9133
9134 /// Handles potential connection migration.
9135 fn on_peer_migrated(
9136 &mut self, new_pid: usize, disable_dcid_reuse: bool, now: Instant,
9137 ) -> Result<()> {
9138 let active_path_id = self.paths.get_active_path_id()?;
9139
9140 if active_path_id == new_pid {
9141 return Ok(());
9142 }
9143
9144 self.set_active_path(new_pid, now)?;
9145
9146 let no_spare_dcid =
9147 self.paths.get_mut(new_pid)?.active_dcid_seq.is_none();
9148
9149 if no_spare_dcid && !disable_dcid_reuse {
9150 self.paths.get_mut(new_pid)?.active_dcid_seq =
9151 self.paths.get_mut(active_path_id)?.active_dcid_seq;
9152 }
9153
9154 Ok(())
9155 }
9156
9157 /// Creates a new client-side path.
9158 fn create_path_on_client(
9159 &mut self, local_addr: SocketAddr, peer_addr: SocketAddr,
9160 ) -> Result<usize> {
9161 if self.is_server {
9162 return Err(Error::InvalidState);
9163 }
9164
9165 // If we use zero-length SCID and go over our local active CID limit,
9166 // the `insert_path()` call will raise an error.
9167 if !self.ids.zero_length_scid() && self.ids.available_scids() == 0 {
9168 return Err(Error::OutOfIdentifiers);
9169 }
9170
9171 // Do we have a spare DCID? If we are using zero-length DCID, just use
9172 // the default having sequence 0 (note that if we exceed our local CID
9173 // limit, the `insert_path()` call will raise an error.
9174 let dcid_seq = if self.ids.zero_length_dcid() {
9175 0
9176 } else {
9177 self.ids
9178 .lowest_available_dcid_seq()
9179 .ok_or(Error::OutOfIdentifiers)?
9180 };
9181
9182 let mut path = path::Path::new(
9183 local_addr,
9184 peer_addr,
9185 &self.recovery_config,
9186 self.path_challenge_recv_max_queue_len,
9187 false,
9188 None,
9189 );
9190 path.active_dcid_seq = Some(dcid_seq);
9191
9192 let pid = self
9193 .paths
9194 .insert_path(path, false)
9195 .map_err(|_| Error::OutOfIdentifiers)?;
9196 self.ids.link_dcid_to_path_id(dcid_seq, pid)?;
9197
9198 Ok(pid)
9199 }
9200
9201 // Marks the connection as closed and does any related tidyup.
9202 fn mark_closed(&mut self) {
9203 #[cfg(feature = "qlog")]
9204 {
9205 let cc = match (self.is_established(), self.timed_out, &self.peer_error, &self.local_error) {
9206 (false, _, _, _) => qlog::events::quic::ConnectionClosed {
9207 initiator: Some(TransportInitiator::Local),
9208 connection_error: None,
9209 application_error: None,
9210 error_code: None,
9211 internal_code: None,
9212 reason: Some("Failed to establish connection".to_string()),
9213 trigger: Some(qlog::events::quic::ConnectionClosedTrigger::HandshakeTimeout)
9214 },
9215
9216 (true, true, _, _) => qlog::events::quic::ConnectionClosed {
9217 initiator: Some(TransportInitiator::Local),
9218 connection_error: None,
9219 application_error: None,
9220 error_code: None,
9221 internal_code: None,
9222 reason: Some("Idle timeout".to_string()),
9223 trigger: Some(qlog::events::quic::ConnectionClosedTrigger::IdleTimeout)
9224 },
9225
9226 (true, false, Some(peer_error), None) => {
9227 let (connection_code, application_error, trigger) = if peer_error.is_app {
9228 (None, Some(qlog::events::ApplicationError::Unknown), None)
9229 } else {
9230 let trigger = if peer_error.error_code == WireErrorCode::NoError as u64 {
9231 Some(qlog::events::quic::ConnectionClosedTrigger::Clean)
9232 } else {
9233 Some(qlog::events::quic::ConnectionClosedTrigger::Error)
9234 };
9235
9236 (Some(qlog::events::ConnectionClosedEventError::TransportError(qlog::events::quic::TransportError::Unknown)), None, trigger)
9237 };
9238
9239 // TODO: select more appopriate connection_code and application_error than unknown.
9240 qlog::events::quic::ConnectionClosed {
9241 initiator: Some(TransportInitiator::Remote),
9242 connection_error: connection_code,
9243 application_error,
9244 error_code: Some(peer_error.error_code),
9245 internal_code: None,
9246 reason: Some(String::from_utf8_lossy(&peer_error.reason).to_string()),
9247 trigger,
9248 }
9249 },
9250
9251 (true, false, None, Some(local_error)) => {
9252 let (connection_code, application_error, trigger) = if local_error.is_app {
9253 (None, Some(qlog::events::ApplicationError::Unknown), None)
9254 } else {
9255 let trigger = if local_error.error_code == WireErrorCode::NoError as u64 {
9256 Some(qlog::events::quic::ConnectionClosedTrigger::Clean)
9257 } else {
9258 Some(qlog::events::quic::ConnectionClosedTrigger::Error)
9259 };
9260
9261 (Some(qlog::events::ConnectionClosedEventError::TransportError(qlog::events::quic::TransportError::Unknown)), None, trigger)
9262 };
9263
9264 // TODO: select more appopriate connection_code and application_error than unknown.
9265 qlog::events::quic::ConnectionClosed {
9266 initiator: Some(TransportInitiator::Local),
9267 connection_error: connection_code,
9268 application_error,
9269 error_code: Some(local_error.error_code),
9270 internal_code: None,
9271 reason: Some(String::from_utf8_lossy(&local_error.reason).to_string()),
9272 trigger,
9273 }
9274 },
9275
9276 _ => qlog::events::quic::ConnectionClosed {
9277 initiator: None,
9278 connection_error: None,
9279 application_error: None,
9280 error_code: None,
9281 internal_code: None,
9282 reason: None,
9283 trigger: None,
9284 },
9285 };
9286
9287 qlog_with_type!(QLOG_CONNECTION_CLOSED, self.qlog, q, {
9288 let ev_data = EventData::QuicConnectionClosed(cc);
9289
9290 q.add_event_data_now(ev_data).ok();
9291 });
9292 self.qlog.streamer = None;
9293 }
9294 self.closed = true;
9295 }
9296}
9297
9298#[cfg(feature = "boringssl-boring-crate")]
9299impl<F: BufFactory> AsMut<boring::ssl::SslRef> for Connection<F> {
9300 fn as_mut(&mut self) -> &mut boring::ssl::SslRef {
9301 self.handshake.ssl_mut()
9302 }
9303}
9304
9305/// Maps an `Error` to `Error::Done`, or itself.
9306///
9307/// When a received packet that hasn't yet been authenticated triggers a failure
9308/// it should, in most cases, be ignored, instead of raising a connection error,
9309/// to avoid potential man-in-the-middle and man-on-the-side attacks.
9310///
9311/// However, if no other packet was previously received, the connection should
9312/// indeed be closed as the received packet might just be network background
9313/// noise, and it shouldn't keep resources occupied indefinitely.
9314///
9315/// This function maps an error to `Error::Done` to ignore a packet failure
9316/// without aborting the connection, except when no other packet was previously
9317/// received, in which case the error itself is returned, but only on the
9318/// server-side as the client will already have armed the idle timer.
9319///
9320/// This must only be used for errors preceding packet authentication. Failures
9321/// happening after a packet has been authenticated should still cause the
9322/// connection to be aborted.
9323fn drop_pkt_on_err(
9324 e: Error, recv_count: usize, is_server: bool, trace_id: &str,
9325) -> Error {
9326 // On the server, if no other packet has been successfully processed, abort
9327 // the connection to avoid keeping the connection open when only junk is
9328 // received.
9329 if is_server && recv_count == 0 {
9330 return e;
9331 }
9332
9333 trace!("{trace_id} dropped invalid packet");
9334
9335 // Ignore other invalid packets that haven't been authenticated to prevent
9336 // man-in-the-middle and man-on-the-side attacks.
9337 Error::Done
9338}
9339
9340struct AddrTupleFmt(SocketAddr, SocketAddr);
9341
9342impl std::fmt::Display for AddrTupleFmt {
9343 fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
9344 let AddrTupleFmt(src, dst) = &self;
9345
9346 if src.ip().is_unspecified() || dst.ip().is_unspecified() {
9347 return Ok(());
9348 }
9349
9350 f.write_fmt(format_args!("src:{src} dst:{dst}"))
9351 }
9352}
9353
9354/// Statistics about the connection.
9355///
9356/// A connection's statistics can be collected using the [`stats()`] method.
9357///
9358/// [`stats()`]: struct.Connection.html#method.stats
9359#[derive(Clone, Default)]
9360#[non_exhaustive]
9361pub struct Stats {
9362 /// The number of QUIC packets received.
9363 pub recv: usize,
9364
9365 /// The number of QUIC packets sent.
9366 pub sent: usize,
9367
9368 /// The number of QUIC packets that were lost.
9369 pub lost: usize,
9370
9371 /// The number of QUIC packets that were marked as lost but later acked.
9372 pub spurious_lost: usize,
9373
9374 /// The number of sent QUIC packets with retransmitted data.
9375 pub retrans: usize,
9376
9377 /// The number of sent bytes.
9378 pub sent_bytes: u64,
9379
9380 /// The number of received bytes.
9381 pub recv_bytes: u64,
9382
9383 /// The number of bytes sent acked.
9384 pub acked_bytes: u64,
9385
9386 /// The number of bytes sent lost.
9387 pub lost_bytes: u64,
9388
9389 /// The number of stream bytes retransmitted.
9390 pub stream_retrans_bytes: u64,
9391
9392 /// The number of DATAGRAM frames received.
9393 pub dgram_recv: usize,
9394
9395 /// The number of DATAGRAM frames sent.
9396 pub dgram_sent: usize,
9397
9398 /// The number of known paths for the connection.
9399 pub paths_count: usize,
9400
9401 /// The number of streams reset by local.
9402 pub reset_stream_count_local: u64,
9403
9404 /// The number of streams stopped by local.
9405 pub stopped_stream_count_local: u64,
9406
9407 /// The number of streams reset by remote.
9408 pub reset_stream_count_remote: u64,
9409
9410 /// The number of streams stopped by remote.
9411 pub stopped_stream_count_remote: u64,
9412
9413 /// The number of DATA_BLOCKED frames sent due to hitting the connection
9414 /// flow control limit.
9415 pub data_blocked_sent_count: u64,
9416
9417 /// The number of STREAM_DATA_BLOCKED frames sent due to a stream hitting
9418 /// the stream flow control limit.
9419 pub stream_data_blocked_sent_count: u64,
9420
9421 /// The number of DATA_BLOCKED frames received from the remote.
9422 pub data_blocked_recv_count: u64,
9423
9424 /// The number of STREAM_DATA_BLOCKED frames received from the remote.
9425 pub stream_data_blocked_recv_count: u64,
9426
9427 /// The number of STREAMS_BLOCKED frames for bidirectional streams received
9428 /// from the remote, indicating the peer is blocked on opening new
9429 /// bidirectional streams.
9430 pub streams_blocked_bidi_recv_count: u64,
9431
9432 /// The number of STREAMS_BLOCKED frames for unidirectional streams received
9433 /// from the remote, indicating the peer is blocked on opening new
9434 /// unidirectional streams.
9435 pub streams_blocked_uni_recv_count: u64,
9436
9437 /// The total number of PATH_CHALLENGE frames that were received.
9438 pub path_challenge_rx_count: u64,
9439
9440 /// The number of times send() was blocked because the anti-amplification
9441 /// budget (bytes received × max_amplification_factor) was exhausted.
9442 pub amplification_limited_count: u64,
9443
9444 /// Total duration during which this side of the connection was
9445 /// actively sending bytes or waiting for those bytes to be acked.
9446 pub bytes_in_flight_duration: Duration,
9447
9448 /// Health state of the connection's tx_buffered.
9449 pub tx_buffered_state: TxBufferTrackingState,
9450}
9451
9452impl std::fmt::Debug for Stats {
9453 #[inline]
9454 fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
9455 write!(
9456 f,
9457 "recv={} sent={} lost={} retrans={}",
9458 self.recv, self.sent, self.lost, self.retrans,
9459 )?;
9460
9461 write!(
9462 f,
9463 " sent_bytes={} recv_bytes={} lost_bytes={}",
9464 self.sent_bytes, self.recv_bytes, self.lost_bytes,
9465 )?;
9466
9467 Ok(())
9468 }
9469}
9470
9471#[doc(hidden)]
9472#[cfg(any(test, feature = "internal"))]
9473pub mod test_utils;
9474
9475#[cfg(test)]
9476mod tests;
9477
9478pub use crate::packet::ConnectionId;
9479pub use crate::packet::Header;
9480pub use crate::packet::Type;
9481
9482pub use crate::path::PathEvent;
9483pub use crate::path::PathStats;
9484pub use crate::path::SocketAddrIter;
9485
9486pub use crate::recovery::BbrBwLoReductionStrategy;
9487pub use crate::recovery::BbrParams;
9488pub use crate::recovery::CongestionControlAlgorithm;
9489pub use crate::recovery::StartupExit;
9490pub use crate::recovery::StartupExitReason;
9491
9492pub use crate::stream::StreamIter;
9493
9494pub use crate::transport_params::TransportParams;
9495pub use crate::transport_params::UnknownTransportParameter;
9496pub use crate::transport_params::UnknownTransportParameterIterator;
9497pub use crate::transport_params::UnknownTransportParameters;
9498pub use crate::transport_params::MAX_ACK_DELAY_EXPONENT;
9499
9500pub use crate::buffers::BufFactory;
9501pub use crate::buffers::BufSplit;
9502
9503pub use crate::error::ConnectionError;
9504pub use crate::error::Error;
9505pub use crate::error::Result;
9506pub use crate::error::WireErrorCode;
9507
9508mod buffers;
9509mod cid;
9510mod crypto;
9511mod dgram;
9512mod error;
9513#[cfg(feature = "ffi")]
9514mod ffi;
9515mod flowcontrol;
9516mod frame;
9517pub mod h3;
9518mod minmax;
9519mod packet;
9520mod path;
9521mod pmtud;
9522mod rand;
9523mod range_buf;
9524mod ranges;
9525mod recovery;
9526mod stream;
9527mod tls;
9528mod transport_params;