quiche/lib.rs
1// Copyright (C) 2018-2019, Cloudflare, Inc.
2// All rights reserved.
3//
4// Redistribution and use in source and binary forms, with or without
5// modification, are permitted provided that the following conditions are
6// met:
7//
8// * Redistributions of source code must retain the above copyright notice,
9// this list of conditions and the following disclaimer.
10//
11// * Redistributions in binary form must reproduce the above copyright
12// notice, this list of conditions and the following disclaimer in the
13// documentation and/or other materials provided with the distribution.
14//
15// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
16// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
17// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
18// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
19// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
20// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
21// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
22// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
23// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
24// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
25// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
27//! 🥧 Savoury implementation of the QUIC transport protocol and HTTP/3.
28//!
29//! [quiche] is an implementation of the QUIC transport protocol and HTTP/3 as
30//! specified by the [IETF]. It provides a low level API for processing QUIC
31//! packets and handling connection state. The application is responsible for
32//! providing I/O (e.g. sockets handling) as well as an event loop with support
33//! for timers.
34//!
35//! [quiche]: https://github.com/cloudflare/quiche/
36//! [ietf]: https://quicwg.org/
37//!
38//! ## Configuring connections
39//!
40//! The first step in establishing a QUIC connection using quiche is creating a
41//! [`Config`] object:
42//!
43//! ```
44//! let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
45//! config.set_application_protos(&[b"example-proto"]);
46//!
47//! // Additional configuration specific to application and use case...
48//! # Ok::<(), quiche::Error>(())
49//! ```
50//!
51//! The [`Config`] object controls important aspects of the QUIC connection such
52//! as QUIC version, ALPN IDs, flow control, congestion control, idle timeout
53//! and other properties or features.
54//!
55//! QUIC is a general-purpose transport protocol and there are several
56//! configuration properties where there is no reasonable default value. For
57//! example, the permitted number of concurrent streams of any particular type
58//! is dependent on the application running over QUIC, and other use-case
59//! specific concerns.
60//!
61//! quiche defaults several properties to zero, applications most likely need
62//! to set these to something else to satisfy their needs using the following:
63//!
64//! - [`set_initial_max_streams_bidi()`]
65//! - [`set_initial_max_streams_uni()`]
66//! - [`set_initial_max_data()`]
67//! - [`set_initial_max_stream_data_bidi_local()`]
68//! - [`set_initial_max_stream_data_bidi_remote()`]
69//! - [`set_initial_max_stream_data_uni()`]
70//!
71//! [`Config`] also holds TLS configuration. This can be changed by mutators on
72//! the an existing object, or by constructing a TLS context manually and
73//! creating a configuration using [`with_boring_ssl_ctx_builder()`].
74//!
75//! A configuration object can be shared among multiple connections.
76//!
77//! ### Connection setup
78//!
79//! On the client-side the [`connect()`] utility function can be used to create
80//! a new connection, while [`accept()`] is for servers:
81//!
82//! ```
83//! # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
84//! # let server_name = "quic.tech";
85//! # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
86//! # let peer = "127.0.0.1:1234".parse().unwrap();
87//! # let local = "127.0.0.1:4321".parse().unwrap();
88//! // Client connection.
89//! let conn =
90//! quiche::connect(Some(&server_name), &scid, local, peer, &mut config)?;
91//!
92//! // Server connection.
93//! # let peer = "127.0.0.1:1234".parse().unwrap();
94//! # let local = "127.0.0.1:4321".parse().unwrap();
95//! let conn = quiche::accept(&scid, None, local, peer, &mut config)?;
96//! # Ok::<(), quiche::Error>(())
97//! ```
98//!
99//! In both cases, the application is responsible for generating a new source
100//! connection ID that will be used to identify the new connection.
101//!
102//! The application also need to pass the address of the remote peer of the
103//! connection: in the case of a client that would be the address of the server
104//! it is trying to connect to, and for a server that is the address of the
105//! client that initiated the connection.
106//!
107//! ## Handling incoming packets
108//!
109//! Using the connection's [`recv()`] method the application can process
110//! incoming packets that belong to that connection from the network:
111//!
112//! ```no_run
113//! # let mut buf = [0; 512];
114//! # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
115//! # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
116//! # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
117//! # let peer = "127.0.0.1:1234".parse().unwrap();
118//! # let local = "127.0.0.1:4321".parse().unwrap();
119//! # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
120//! let to = socket.local_addr().unwrap();
121//!
122//! loop {
123//! let (read, from) = socket.recv_from(&mut buf).unwrap();
124//!
125//! let recv_info = quiche::RecvInfo { from, to };
126//!
127//! let read = match conn.recv(&mut buf[..read], recv_info) {
128//! Ok(v) => v,
129//!
130//! Err(quiche::Error::Done) => {
131//! // Done reading.
132//! break;
133//! },
134//!
135//! Err(e) => {
136//! // An error occurred, handle it.
137//! break;
138//! },
139//! };
140//! }
141//! # Ok::<(), quiche::Error>(())
142//! ```
143//!
144//! The application has to pass a [`RecvInfo`] structure in order to provide
145//! additional information about the received packet (such as the address it
146//! was received from).
147//!
148//! ## Generating outgoing packets
149//!
150//! Outgoing packet are generated using the connection's [`send()`] method
151//! instead:
152//!
153//! ```no_run
154//! # let mut out = [0; 512];
155//! # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
156//! # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
157//! # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
158//! # let peer = "127.0.0.1:1234".parse().unwrap();
159//! # let local = "127.0.0.1:4321".parse().unwrap();
160//! # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
161//! loop {
162//! let (write, send_info) = match conn.send(&mut out) {
163//! Ok(v) => v,
164//!
165//! Err(quiche::Error::Done) => {
166//! // Done writing.
167//! break;
168//! },
169//!
170//! Err(e) => {
171//! // An error occurred, handle it.
172//! break;
173//! },
174//! };
175//!
176//! socket.send_to(&out[..write], &send_info.to).unwrap();
177//! }
178//! # Ok::<(), quiche::Error>(())
179//! ```
180//!
181//! The application will be provided with a [`SendInfo`] structure providing
182//! additional information about the newly created packet (such as the address
183//! the packet should be sent to).
184//!
185//! When packets are sent, the application is responsible for maintaining a
186//! timer to react to time-based connection events. The timer expiration can be
187//! obtained using the connection's [`timeout()`] method.
188//!
189//! ```
190//! # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
191//! # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
192//! # let peer = "127.0.0.1:1234".parse().unwrap();
193//! # let local = "127.0.0.1:4321".parse().unwrap();
194//! # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
195//! let timeout = conn.timeout();
196//! # Ok::<(), quiche::Error>(())
197//! ```
198//!
199//! The application is responsible for providing a timer implementation, which
200//! can be specific to the operating system or networking framework used. When
201//! a timer expires, the connection's [`on_timeout()`] method should be called,
202//! after which additional packets might need to be sent on the network:
203//!
204//! ```no_run
205//! # let mut out = [0; 512];
206//! # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
207//! # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
208//! # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
209//! # let peer = "127.0.0.1:1234".parse().unwrap();
210//! # let local = "127.0.0.1:4321".parse().unwrap();
211//! # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
212//! // Timeout expired, handle it.
213//! conn.on_timeout();
214//!
215//! // Send more packets as needed after timeout.
216//! loop {
217//! let (write, send_info) = match conn.send(&mut out) {
218//! Ok(v) => v,
219//!
220//! Err(quiche::Error::Done) => {
221//! // Done writing.
222//! break;
223//! },
224//!
225//! Err(e) => {
226//! // An error occurred, handle it.
227//! break;
228//! },
229//! };
230//!
231//! socket.send_to(&out[..write], &send_info.to).unwrap();
232//! }
233//! # Ok::<(), quiche::Error>(())
234//! ```
235//!
236//! ### Pacing
237//!
238//! It is recommended that applications [pace] sending of outgoing packets to
239//! avoid creating packet bursts that could cause short-term congestion and
240//! losses in the network.
241//!
242//! quiche exposes pacing hints for outgoing packets through the [`at`] field
243//! of the [`SendInfo`] structure that is returned by the [`send()`] method.
244//! This field represents the time when a specific packet should be sent into
245//! the network.
246//!
247//! Applications can use these hints by artificially delaying the sending of
248//! packets through platform-specific mechanisms (such as the [`SO_TXTIME`]
249//! socket option on Linux), or custom methods (for example by using user-space
250//! timers).
251//!
252//! [pace]: https://datatracker.ietf.org/doc/html/rfc9002#section-7.7
253//! [`SO_TXTIME`]: https://man7.org/linux/man-pages/man8/tc-etf.8.html
254//!
255//! ## Sending and receiving stream data
256//!
257//! After some back and forth, the connection will complete its handshake and
258//! will be ready for sending or receiving application data.
259//!
260//! Data can be sent on a stream by using the [`stream_send()`] method:
261//!
262//! ```no_run
263//! # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
264//! # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
265//! # let peer = "127.0.0.1:1234".parse().unwrap();
266//! # let local = "127.0.0.1:4321".parse().unwrap();
267//! # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
268//! if conn.is_established() {
269//! // Handshake completed, send some data on stream 0.
270//! conn.stream_send(0, b"hello", true)?;
271//! }
272//! # Ok::<(), quiche::Error>(())
273//! ```
274//!
275//! The application can check whether there are any readable streams by using
276//! the connection's [`readable()`] method, which returns an iterator over all
277//! the streams that have outstanding data to read.
278//!
279//! The [`stream_recv()`] method can then be used to retrieve the application
280//! data from the readable stream:
281//!
282//! ```no_run
283//! # let mut buf = [0; 512];
284//! # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
285//! # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
286//! # let peer = "127.0.0.1:1234".parse().unwrap();
287//! # let local = "127.0.0.1:4321".parse().unwrap();
288//! # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
289//! if conn.is_established() {
290//! // Iterate over readable streams.
291//! for stream_id in conn.readable() {
292//! // Stream is readable, read until there's no more data.
293//! while let Ok((read, fin)) = conn.stream_recv(stream_id, &mut buf) {
294//! println!("Got {} bytes on stream {}", read, stream_id);
295//! }
296//! }
297//! }
298//! # Ok::<(), quiche::Error>(())
299//! ```
300//!
301//! ## HTTP/3
302//!
303//! The quiche [HTTP/3 module] provides a high level API for sending and
304//! receiving HTTP requests and responses on top of the QUIC transport protocol.
305//!
306//! [`Config`]: https://docs.quic.tech/quiche/struct.Config.html
307//! [`set_initial_max_streams_bidi()`]: https://docs.rs/quiche/latest/quiche/struct.Config.html#method.set_initial_max_streams_bidi
308//! [`set_initial_max_streams_uni()`]: https://docs.rs/quiche/latest/quiche/struct.Config.html#method.set_initial_max_streams_uni
309//! [`set_initial_max_data()`]: https://docs.rs/quiche/latest/quiche/struct.Config.html#method.set_initial_max_data
310//! [`set_initial_max_stream_data_bidi_local()`]: https://docs.rs/quiche/latest/quiche/struct.Config.html#method.set_initial_max_stream_data_bidi_local
311//! [`set_initial_max_stream_data_bidi_remote()`]: https://docs.rs/quiche/latest/quiche/struct.Config.html#method.set_initial_max_stream_data_bidi_remote
312//! [`set_initial_max_stream_data_uni()`]: https://docs.rs/quiche/latest/quiche/struct.Config.html#method.set_initial_max_stream_data_uni
313//! [`with_boring_ssl_ctx_builder()`]: https://docs.quic.tech/quiche/struct.Config.html#method.with_boring_ssl_ctx_builder
314//! [`connect()`]: fn.connect.html
315//! [`accept()`]: fn.accept.html
316//! [`recv()`]: struct.Connection.html#method.recv
317//! [`RecvInfo`]: struct.RecvInfo.html
318//! [`send()`]: struct.Connection.html#method.send
319//! [`SendInfo`]: struct.SendInfo.html
320//! [`at`]: struct.SendInfo.html#structfield.at
321//! [`timeout()`]: struct.Connection.html#method.timeout
322//! [`on_timeout()`]: struct.Connection.html#method.on_timeout
323//! [`stream_send()`]: struct.Connection.html#method.stream_send
324//! [`readable()`]: struct.Connection.html#method.readable
325//! [`stream_recv()`]: struct.Connection.html#method.stream_recv
326//! [HTTP/3 module]: h3/index.html
327//!
328//! ## Congestion Control
329//!
330//! The quiche library provides a high-level API for configuring which
331//! congestion control algorithm to use throughout the QUIC connection.
332//!
333//! When a QUIC connection is created, the application can optionally choose
334//! which CC algorithm to use. See [`CongestionControlAlgorithm`] for currently
335//! available congestion control algorithms.
336//!
337//! For example:
338//!
339//! ```
340//! let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION).unwrap();
341//! config.set_cc_algorithm(quiche::CongestionControlAlgorithm::Reno);
342//! ```
343//!
344//! Alternatively, you can configure the congestion control algorithm to use
345//! by its name.
346//!
347//! ```
348//! let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION).unwrap();
349//! config.set_cc_algorithm_name("reno").unwrap();
350//! ```
351//!
352//! Note that the CC algorithm should be configured before calling [`connect()`]
353//! or [`accept()`]. Otherwise the connection will use a default CC algorithm.
354//!
355//! [`CongestionControlAlgorithm`]: enum.CongestionControlAlgorithm.html
356//!
357//! ## Feature flags
358//!
359//! quiche defines a number of [feature flags] to reduce the amount of compiled
360//! code and dependencies:
361//!
362//! * `boringssl-vendored` (default): Build the vendored BoringSSL library.
363//!
364//! * `boringssl-boring-crate`: Use the BoringSSL library provided by the
365//! [boring] crate. It takes precedence over `boringssl-vendored` if both
366//! features are enabled.
367//!
368//! * `pkg-config-meta`: Generate pkg-config metadata file for libquiche.
369//!
370//! * `ffi`: Build and expose the FFI API.
371//!
372//! * `qlog`: Enable support for the [qlog] logging format.
373//!
374//! [feature flags]: https://doc.rust-lang.org/cargo/reference/manifest.html#the-features-section
375//! [boring]: https://crates.io/crates/boring
376//! [qlog]: https://datatracker.ietf.org/doc/html/draft-ietf-quic-qlog-main-schema
377
378#![allow(clippy::upper_case_acronyms)]
379#![warn(missing_docs)]
380#![warn(unused_qualifications)]
381#![cfg_attr(docsrs, feature(doc_cfg))]
382
383#[macro_use]
384extern crate log;
385
386use std::cmp;
387
388use std::collections::VecDeque;
389
390use std::net::SocketAddr;
391
392use std::str::FromStr;
393
394use std::sync::Arc;
395
396use std::time::Duration;
397use std::time::Instant;
398
399#[cfg(feature = "qlog")]
400use qlog::events::quic::DataMovedAdditionalInfo;
401#[cfg(feature = "qlog")]
402use qlog::events::quic::QuicEventType;
403#[cfg(feature = "qlog")]
404use qlog::events::quic::TransportInitiator;
405#[cfg(feature = "qlog")]
406use qlog::events::DataRecipient;
407#[cfg(feature = "qlog")]
408use qlog::events::Event;
409#[cfg(feature = "qlog")]
410use qlog::events::EventData;
411#[cfg(feature = "qlog")]
412use qlog::events::EventImportance;
413#[cfg(feature = "qlog")]
414use qlog::events::EventType;
415#[cfg(feature = "qlog")]
416use qlog::events::RawInfo;
417
418use smallvec::SmallVec;
419
420use crate::buffers::DefaultBufFactory;
421
422use crate::recovery::OnAckReceivedOutcome;
423use crate::recovery::OnLossDetectionTimeoutOutcome;
424use crate::recovery::RecoveryOps;
425use crate::recovery::ReleaseDecision;
426
427use crate::stream::RecvAction;
428use crate::stream::StreamPriorityKey;
429
430/// The current QUIC wire version.
431pub const PROTOCOL_VERSION: u32 = PROTOCOL_VERSION_V1;
432
433/// Supported QUIC versions.
434const PROTOCOL_VERSION_V1: u32 = 0x0000_0001;
435
436/// The maximum length of a connection ID.
437pub const MAX_CONN_ID_LEN: usize = packet::MAX_CID_LEN as usize;
438
439/// The minimum length of Initial packets sent by a client.
440pub const MIN_CLIENT_INITIAL_LEN: usize = 1200;
441
442/// The default initial RTT.
443const DEFAULT_INITIAL_RTT: Duration = Duration::from_millis(333);
444
445const PAYLOAD_MIN_LEN: usize = 4;
446
447// PATH_CHALLENGE (9 bytes) + AEAD tag (16 bytes).
448const MIN_PROBING_SIZE: usize = 25;
449
450const MAX_AMPLIFICATION_FACTOR: usize = 3;
451
452// The maximum number of tracked packet number ranges that need to be acked.
453//
454// This represents more or less how many ack blocks can fit in a typical packet.
455const MAX_ACK_RANGES: usize = 68;
456
457// The highest possible stream ID allowed.
458const MAX_STREAM_ID: u64 = 1 << 60;
459
460// The default max_datagram_size used in congestion control.
461const MAX_SEND_UDP_PAYLOAD_SIZE: usize = 1200;
462
463// The default length of DATAGRAM queues.
464const DEFAULT_MAX_DGRAM_QUEUE_LEN: usize = 0;
465
466// The default length of PATH_CHALLENGE receive queue.
467const DEFAULT_MAX_PATH_CHALLENGE_RX_QUEUE_LEN: usize = 3;
468
469// The DATAGRAM standard recommends either none or 65536 as maximum DATAGRAM
470// frames size. We enforce the recommendation for forward compatibility.
471const MAX_DGRAM_FRAME_SIZE: u64 = 65536;
472
473// The length of the payload length field.
474const PAYLOAD_LENGTH_LEN: usize = 2;
475
476// The number of undecryptable that can be buffered.
477const MAX_UNDECRYPTABLE_PACKETS: usize = 10;
478
479const RESERVED_VERSION_MASK: u32 = 0xfafafafa;
480
481// The default size of the receiver connection flow control window.
482const DEFAULT_CONNECTION_WINDOW: u64 = 48 * 1024;
483
484// The maximum size of the receiver connection flow control window.
485const MAX_CONNECTION_WINDOW: u64 = 24 * 1024 * 1024;
486
487// How much larger the connection flow control window need to be larger than
488// the stream flow control window.
489const CONNECTION_WINDOW_FACTOR: f64 = 1.5;
490
491// How many probing packet timeouts do we tolerate before considering the path
492// validation as failed.
493const MAX_PROBING_TIMEOUTS: usize = 3;
494
495// The default initial congestion window size in terms of packet count.
496const DEFAULT_INITIAL_CONGESTION_WINDOW_PACKETS: usize = 10;
497
498// The maximum data offset that can be stored in a crypto stream.
499const MAX_CRYPTO_STREAM_OFFSET: u64 = 1 << 16;
500
501// The send capacity factor.
502const TX_CAP_FACTOR: f64 = 1.0;
503
504/// Ancillary information about incoming packets.
505#[derive(Clone, Copy, Debug, PartialEq, Eq)]
506pub struct RecvInfo {
507 /// The remote address the packet was received from.
508 pub from: SocketAddr,
509
510 /// The local address the packet was received on.
511 pub to: SocketAddr,
512}
513
514/// Ancillary information about outgoing packets.
515#[derive(Clone, Copy, Debug, PartialEq, Eq)]
516pub struct SendInfo {
517 /// The local address the packet should be sent from.
518 pub from: SocketAddr,
519
520 /// The remote address the packet should be sent to.
521 pub to: SocketAddr,
522
523 /// The time to send the packet out.
524 ///
525 /// See [Pacing] for more details.
526 ///
527 /// [Pacing]: index.html#pacing
528 pub at: Instant,
529}
530
531/// The side of the stream to be shut down.
532///
533/// This should be used when calling [`stream_shutdown()`].
534///
535/// [`stream_shutdown()`]: struct.Connection.html#method.stream_shutdown
536#[repr(C)]
537#[derive(PartialEq, Eq)]
538pub enum Shutdown {
539 /// Stop receiving stream data.
540 Read = 0,
541
542 /// Stop sending stream data.
543 Write = 1,
544}
545
546/// Qlog logging level.
547#[repr(C)]
548#[cfg(feature = "qlog")]
549#[cfg_attr(docsrs, doc(cfg(feature = "qlog")))]
550pub enum QlogLevel {
551 /// Logs any events of Core importance.
552 Core = 0,
553
554 /// Logs any events of Core and Base importance.
555 Base = 1,
556
557 /// Logs any events of Core, Base and Extra importance
558 Extra = 2,
559}
560
561/// Stores configuration shared between multiple connections.
562pub struct Config {
563 local_transport_params: TransportParams,
564
565 version: u32,
566
567 tls_ctx: tls::Context,
568
569 application_protos: Vec<Vec<u8>>,
570
571 grease: bool,
572
573 cc_algorithm: CongestionControlAlgorithm,
574 custom_bbr_params: Option<BbrParams>,
575 initial_congestion_window_packets: usize,
576 enable_relaxed_loss_threshold: bool,
577 enable_cubic_idle_restart_fix: bool,
578 enable_send_streams_blocked: bool,
579
580 pmtud: bool,
581 pmtud_max_probes: u8,
582
583 hystart: bool,
584
585 pacing: bool,
586 /// Send rate limit in Mbps
587 max_pacing_rate: Option<u64>,
588
589 tx_cap_factor: f64,
590
591 dgram_recv_max_queue_len: usize,
592 dgram_send_max_queue_len: usize,
593
594 path_challenge_recv_max_queue_len: usize,
595
596 max_send_udp_payload_size: usize,
597
598 max_connection_window: u64,
599 max_stream_window: u64,
600
601 max_amplification_factor: usize,
602
603 disable_dcid_reuse: bool,
604
605 track_unknown_transport_params: Option<usize>,
606
607 initial_rtt: Duration,
608}
609
610// See https://quicwg.org/base-drafts/rfc9000.html#section-15
611fn is_reserved_version(version: u32) -> bool {
612 version & RESERVED_VERSION_MASK == version
613}
614
615impl Config {
616 /// Creates a config object with the given version.
617 ///
618 /// ## Examples:
619 ///
620 /// ```
621 /// let config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
622 /// # Ok::<(), quiche::Error>(())
623 /// ```
624 pub fn new(version: u32) -> Result<Config> {
625 Self::with_tls_ctx(version, tls::Context::new()?)
626 }
627
628 /// Creates a config object with the given version and
629 /// [`SslContextBuilder`].
630 ///
631 /// This is useful for applications that wish to manually configure
632 /// [`SslContextBuilder`].
633 ///
634 /// [`SslContextBuilder`]: https://docs.rs/boring/latest/boring/ssl/struct.SslContextBuilder.html
635 #[cfg(feature = "boringssl-boring-crate")]
636 #[cfg_attr(docsrs, doc(cfg(feature = "boringssl-boring-crate")))]
637 pub fn with_boring_ssl_ctx_builder(
638 version: u32, tls_ctx_builder: boring::ssl::SslContextBuilder,
639 ) -> Result<Config> {
640 Self::with_tls_ctx(version, tls::Context::from_boring(tls_ctx_builder))
641 }
642
643 fn with_tls_ctx(version: u32, tls_ctx: tls::Context) -> Result<Config> {
644 if !is_reserved_version(version) && !version_is_supported(version) {
645 return Err(Error::UnknownVersion);
646 }
647
648 Ok(Config {
649 local_transport_params: TransportParams::default(),
650 version,
651 tls_ctx,
652 application_protos: Vec::new(),
653 grease: true,
654 cc_algorithm: CongestionControlAlgorithm::CUBIC,
655 custom_bbr_params: None,
656 initial_congestion_window_packets:
657 DEFAULT_INITIAL_CONGESTION_WINDOW_PACKETS,
658 enable_relaxed_loss_threshold: false,
659 enable_cubic_idle_restart_fix: true,
660 enable_send_streams_blocked: false,
661 pmtud: false,
662 pmtud_max_probes: pmtud::MAX_PROBES_DEFAULT,
663 hystart: true,
664 pacing: true,
665 max_pacing_rate: None,
666
667 tx_cap_factor: TX_CAP_FACTOR,
668
669 dgram_recv_max_queue_len: DEFAULT_MAX_DGRAM_QUEUE_LEN,
670 dgram_send_max_queue_len: DEFAULT_MAX_DGRAM_QUEUE_LEN,
671
672 path_challenge_recv_max_queue_len:
673 DEFAULT_MAX_PATH_CHALLENGE_RX_QUEUE_LEN,
674
675 max_send_udp_payload_size: MAX_SEND_UDP_PAYLOAD_SIZE,
676
677 max_connection_window: MAX_CONNECTION_WINDOW,
678 max_stream_window: stream::MAX_STREAM_WINDOW,
679
680 max_amplification_factor: MAX_AMPLIFICATION_FACTOR,
681
682 disable_dcid_reuse: false,
683
684 track_unknown_transport_params: None,
685 initial_rtt: DEFAULT_INITIAL_RTT,
686 })
687 }
688
689 /// Configures the given certificate chain.
690 ///
691 /// The content of `file` is parsed as a PEM-encoded leaf certificate,
692 /// followed by optional intermediate certificates.
693 ///
694 /// ## Examples:
695 ///
696 /// ```no_run
697 /// # let mut config = quiche::Config::new(0xbabababa)?;
698 /// config.load_cert_chain_from_pem_file("/path/to/cert.pem")?;
699 /// # Ok::<(), quiche::Error>(())
700 /// ```
701 pub fn load_cert_chain_from_pem_file(&mut self, file: &str) -> Result<()> {
702 self.tls_ctx.use_certificate_chain_file(file)
703 }
704
705 /// Configures the given private key.
706 ///
707 /// The content of `file` is parsed as a PEM-encoded private key.
708 ///
709 /// ## Examples:
710 ///
711 /// ```no_run
712 /// # let mut config = quiche::Config::new(0xbabababa)?;
713 /// config.load_priv_key_from_pem_file("/path/to/key.pem")?;
714 /// # Ok::<(), quiche::Error>(())
715 /// ```
716 pub fn load_priv_key_from_pem_file(&mut self, file: &str) -> Result<()> {
717 self.tls_ctx.use_privkey_file(file)
718 }
719
720 /// Specifies a file where trusted CA certificates are stored for the
721 /// purposes of certificate verification.
722 ///
723 /// The content of `file` is parsed as a PEM-encoded certificate chain.
724 ///
725 /// ## Examples:
726 ///
727 /// ```no_run
728 /// # let mut config = quiche::Config::new(0xbabababa)?;
729 /// config.load_verify_locations_from_file("/path/to/cert.pem")?;
730 /// # Ok::<(), quiche::Error>(())
731 /// ```
732 pub fn load_verify_locations_from_file(&mut self, file: &str) -> Result<()> {
733 self.tls_ctx.load_verify_locations_from_file(file)
734 }
735
736 /// Specifies a directory where trusted CA certificates are stored for the
737 /// purposes of certificate verification.
738 ///
739 /// The content of `dir` a set of PEM-encoded certificate chains.
740 ///
741 /// ## Examples:
742 ///
743 /// ```no_run
744 /// # let mut config = quiche::Config::new(0xbabababa)?;
745 /// config.load_verify_locations_from_directory("/path/to/certs")?;
746 /// # Ok::<(), quiche::Error>(())
747 /// ```
748 pub fn load_verify_locations_from_directory(
749 &mut self, dir: &str,
750 ) -> Result<()> {
751 self.tls_ctx.load_verify_locations_from_directory(dir)
752 }
753
754 /// Configures whether to verify the peer's certificate.
755 ///
756 /// This should usually be `true` for client-side connections and `false`
757 /// for server-side ones.
758 ///
759 /// Note that by default, no verification is performed.
760 ///
761 /// Also note that on the server-side, enabling verification of the peer
762 /// will trigger a certificate request and make authentication errors
763 /// fatal, but will still allow anonymous clients (i.e. clients that
764 /// don't present a certificate at all). Servers can check whether a
765 /// client presented a certificate by calling [`peer_cert()`] if they
766 /// need to.
767 ///
768 /// [`peer_cert()`]: struct.Connection.html#method.peer_cert
769 pub fn verify_peer(&mut self, verify: bool) {
770 self.tls_ctx.set_verify(verify);
771 }
772
773 /// Configures whether to do path MTU discovery.
774 ///
775 /// The default value is `false`.
776 pub fn discover_pmtu(&mut self, discover: bool) {
777 self.pmtud = discover;
778 }
779
780 /// Configures the maximum number of PMTUD probe attempts before treating
781 /// a probe size as failed.
782 ///
783 /// Defaults to 3 per [RFC 8899 Section 5.1.2](https://datatracker.ietf.org/doc/html/rfc8899#section-5.1.2).
784 /// If 0 is passed, the default value is used.
785 pub fn set_pmtud_max_probes(&mut self, max_probes: u8) {
786 self.pmtud_max_probes = max_probes;
787 }
788
789 /// Configures whether to send GREASE values.
790 ///
791 /// The default value is `true`.
792 pub fn grease(&mut self, grease: bool) {
793 self.grease = grease;
794 }
795
796 /// Enables logging of secrets.
797 ///
798 /// When logging is enabled, the [`set_keylog()`] method must be called on
799 /// the connection for its cryptographic secrets to be logged in the
800 /// [keylog] format to the specified writer.
801 ///
802 /// [`set_keylog()`]: struct.Connection.html#method.set_keylog
803 /// [keylog]: https://developer.mozilla.org/en-US/docs/Mozilla/Projects/NSS/Key_Log_Format
804 pub fn log_keys(&mut self) {
805 self.tls_ctx.enable_keylog();
806 }
807
808 /// Configures the session ticket key material.
809 ///
810 /// On the server this key will be used to encrypt and decrypt session
811 /// tickets, used to perform session resumption without server-side state.
812 ///
813 /// By default a key is generated internally, and rotated regularly, so
814 /// applications don't need to call this unless they need to use a
815 /// specific key (e.g. in order to support resumption across multiple
816 /// servers), in which case the application is also responsible for
817 /// rotating the key to provide forward secrecy.
818 pub fn set_ticket_key(&mut self, key: &[u8]) -> Result<()> {
819 self.tls_ctx.set_ticket_key(key)
820 }
821
822 /// Enables sending or receiving early data.
823 pub fn enable_early_data(&mut self) {
824 self.tls_ctx.set_early_data_enabled(true);
825 }
826
827 /// Configures the list of supported application protocols.
828 ///
829 /// On the client this configures the list of protocols to send to the
830 /// server as part of the ALPN extension.
831 ///
832 /// On the server this configures the list of supported protocols to match
833 /// against the client-supplied list.
834 ///
835 /// Applications must set a value, but no default is provided.
836 ///
837 /// ## Examples:
838 ///
839 /// ```
840 /// # let mut config = quiche::Config::new(0xbabababa)?;
841 /// config.set_application_protos(&[b"http/1.1", b"http/0.9"]);
842 /// # Ok::<(), quiche::Error>(())
843 /// ```
844 pub fn set_application_protos(
845 &mut self, protos_list: &[&[u8]],
846 ) -> Result<()> {
847 self.application_protos =
848 protos_list.iter().map(|s| s.to_vec()).collect();
849
850 self.tls_ctx.set_alpn(protos_list)
851 }
852
853 /// Configures the list of supported application protocols using wire
854 /// format.
855 ///
856 /// The list of protocols `protos` must be a series of non-empty, 8-bit
857 /// length-prefixed strings.
858 ///
859 /// See [`set_application_protos`](Self::set_application_protos) for more
860 /// background about application protocols.
861 ///
862 /// ## Examples:
863 ///
864 /// ```
865 /// # let mut config = quiche::Config::new(0xbabababa)?;
866 /// config.set_application_protos_wire_format(b"\x08http/1.1\x08http/0.9")?;
867 /// # Ok::<(), quiche::Error>(())
868 /// ```
869 pub fn set_application_protos_wire_format(
870 &mut self, protos: &[u8],
871 ) -> Result<()> {
872 let mut b = octets::Octets::with_slice(protos);
873
874 let mut protos_list = Vec::new();
875
876 while let Ok(proto) = b.get_bytes_with_u8_length() {
877 protos_list.push(proto.buf());
878 }
879
880 self.set_application_protos(&protos_list)
881 }
882
883 /// Sets the anti-amplification limit factor.
884 ///
885 /// The default value is `3`.
886 pub fn set_max_amplification_factor(&mut self, v: usize) {
887 self.max_amplification_factor = v;
888 }
889
890 /// Sets the send capacity factor.
891 ///
892 /// The default value is `1`.
893 pub fn set_send_capacity_factor(&mut self, v: f64) {
894 self.tx_cap_factor = v;
895 }
896
897 /// Sets the connection's initial RTT.
898 ///
899 /// The default value is `333`.
900 pub fn set_initial_rtt(&mut self, v: Duration) {
901 self.initial_rtt = v;
902 }
903
904 /// Sets the `max_idle_timeout` transport parameter, in milliseconds.
905 ///
906 /// The default value is infinite, that is, no timeout is used.
907 pub fn set_max_idle_timeout(&mut self, v: u64) {
908 self.local_transport_params.max_idle_timeout =
909 cmp::min(v, octets::MAX_VAR_INT);
910 }
911
912 /// Sets the `max_udp_payload_size transport` parameter.
913 ///
914 /// The default value is `65527`.
915 pub fn set_max_recv_udp_payload_size(&mut self, v: usize) {
916 self.local_transport_params.max_udp_payload_size =
917 cmp::min(v as u64, octets::MAX_VAR_INT);
918 }
919
920 /// Sets the maximum outgoing UDP payload size.
921 ///
922 /// The default and minimum value is `1200`.
923 pub fn set_max_send_udp_payload_size(&mut self, v: usize) {
924 self.max_send_udp_payload_size = cmp::max(v, MAX_SEND_UDP_PAYLOAD_SIZE);
925 }
926
927 /// Sets the `initial_max_data` transport parameter.
928 ///
929 /// When set to a non-zero value quiche will only allow at most `v` bytes of
930 /// incoming stream data to be buffered for the whole connection (that is,
931 /// data that is not yet read by the application) and will allow more data
932 /// to be received as the buffer is consumed by the application.
933 ///
934 /// When set to zero, either explicitly or via the default, quiche will not
935 /// give any flow control to the peer, preventing it from sending any stream
936 /// data.
937 ///
938 /// The default value is `0`.
939 pub fn set_initial_max_data(&mut self, v: u64) {
940 self.local_transport_params.initial_max_data =
941 cmp::min(v, octets::MAX_VAR_INT);
942 }
943
944 /// Sets the `initial_max_stream_data_bidi_local` transport parameter.
945 ///
946 /// When set to a non-zero value quiche will only allow at most `v` bytes
947 /// of incoming stream data to be buffered for each locally-initiated
948 /// bidirectional stream (that is, data that is not yet read by the
949 /// application) and will allow more data to be received as the buffer is
950 /// consumed by the application.
951 ///
952 /// When set to zero, either explicitly or via the default, quiche will not
953 /// give any flow control to the peer, preventing it from sending any stream
954 /// data.
955 ///
956 /// The default value is `0`.
957 pub fn set_initial_max_stream_data_bidi_local(&mut self, v: u64) {
958 self.local_transport_params
959 .initial_max_stream_data_bidi_local =
960 cmp::min(v, octets::MAX_VAR_INT);
961 }
962
963 /// Sets the `initial_max_stream_data_bidi_remote` transport parameter.
964 ///
965 /// When set to a non-zero value quiche will only allow at most `v` bytes
966 /// of incoming stream data to be buffered for each remotely-initiated
967 /// bidirectional stream (that is, data that is not yet read by the
968 /// application) and will allow more data to be received as the buffer is
969 /// consumed by the application.
970 ///
971 /// When set to zero, either explicitly or via the default, quiche will not
972 /// give any flow control to the peer, preventing it from sending any stream
973 /// data.
974 ///
975 /// The default value is `0`.
976 pub fn set_initial_max_stream_data_bidi_remote(&mut self, v: u64) {
977 self.local_transport_params
978 .initial_max_stream_data_bidi_remote =
979 cmp::min(v, octets::MAX_VAR_INT);
980 }
981
982 /// Sets the `initial_max_stream_data_uni` transport parameter.
983 ///
984 /// When set to a non-zero value quiche will only allow at most `v` bytes
985 /// of incoming stream data to be buffered for each unidirectional stream
986 /// (that is, data that is not yet read by the application) and will allow
987 /// more data to be received as the buffer is consumed by the application.
988 ///
989 /// When set to zero, either explicitly or via the default, quiche will not
990 /// give any flow control to the peer, preventing it from sending any stream
991 /// data.
992 ///
993 /// The default value is `0`.
994 pub fn set_initial_max_stream_data_uni(&mut self, v: u64) {
995 self.local_transport_params.initial_max_stream_data_uni =
996 cmp::min(v, octets::MAX_VAR_INT);
997 }
998
999 /// Sets the `initial_max_streams_bidi` transport parameter.
1000 ///
1001 /// When set to a non-zero value quiche will only allow `v` number of
1002 /// concurrent remotely-initiated bidirectional streams to be open at any
1003 /// given time and will increase the limit automatically as streams are
1004 /// completed.
1005 ///
1006 /// When set to zero, either explicitly or via the default, quiche will not
1007 /// not allow the peer to open any bidirectional streams.
1008 ///
1009 /// A bidirectional stream is considered completed when all incoming data
1010 /// has been read by the application (up to the `fin` offset) or the
1011 /// stream's read direction has been shutdown, and all outgoing data has
1012 /// been acked by the peer (up to the `fin` offset) or the stream's write
1013 /// direction has been shutdown.
1014 ///
1015 /// The default value is `0`.
1016 pub fn set_initial_max_streams_bidi(&mut self, v: u64) {
1017 self.local_transport_params.initial_max_streams_bidi =
1018 cmp::min(v, octets::MAX_VAR_INT);
1019 }
1020
1021 /// Sets the `initial_max_streams_uni` transport parameter.
1022 ///
1023 /// When set to a non-zero value quiche will only allow `v` number of
1024 /// concurrent remotely-initiated unidirectional streams to be open at any
1025 /// given time and will increase the limit automatically as streams are
1026 /// completed.
1027 ///
1028 /// When set to zero, either explicitly or via the default, quiche will not
1029 /// not allow the peer to open any unidirectional streams.
1030 ///
1031 /// A unidirectional stream is considered completed when all incoming data
1032 /// has been read by the application (up to the `fin` offset) or the
1033 /// stream's read direction has been shutdown.
1034 ///
1035 /// The default value is `0`.
1036 pub fn set_initial_max_streams_uni(&mut self, v: u64) {
1037 self.local_transport_params.initial_max_streams_uni =
1038 cmp::min(v, octets::MAX_VAR_INT);
1039 }
1040
1041 /// Sets the `ack_delay_exponent` transport parameter.
1042 ///
1043 /// The default value is `3`.
1044 pub fn set_ack_delay_exponent(&mut self, v: u64) {
1045 self.local_transport_params.ack_delay_exponent =
1046 cmp::min(v, octets::MAX_VAR_INT);
1047 }
1048
1049 /// Sets the `max_ack_delay` transport parameter.
1050 ///
1051 /// The default value is `25`.
1052 pub fn set_max_ack_delay(&mut self, v: u64) {
1053 self.local_transport_params.max_ack_delay =
1054 cmp::min(v, octets::MAX_VAR_INT);
1055 }
1056
1057 /// Sets the `active_connection_id_limit` transport parameter.
1058 ///
1059 /// The default value is `2`. Lower values will be ignored.
1060 pub fn set_active_connection_id_limit(&mut self, v: u64) {
1061 if v >= 2 {
1062 self.local_transport_params.active_conn_id_limit =
1063 cmp::min(v, octets::MAX_VAR_INT);
1064 }
1065 }
1066
1067 /// Sets the `disable_active_migration` transport parameter.
1068 ///
1069 /// The default value is `false`.
1070 pub fn set_disable_active_migration(&mut self, v: bool) {
1071 self.local_transport_params.disable_active_migration = v;
1072 }
1073
1074 /// Sets the congestion control algorithm used.
1075 ///
1076 /// The default value is `CongestionControlAlgorithm::CUBIC`.
1077 pub fn set_cc_algorithm(&mut self, algo: CongestionControlAlgorithm) {
1078 self.cc_algorithm = algo;
1079 }
1080
1081 /// Sets custom BBR settings.
1082 ///
1083 /// This API is experimental and will be removed in the future.
1084 ///
1085 /// Currently this only applies if cc_algorithm is
1086 /// `CongestionControlAlgorithm::Bbr2Gcongestion` is set.
1087 ///
1088 /// The default value is `None`.
1089 #[cfg(feature = "internal")]
1090 #[doc(hidden)]
1091 pub fn set_custom_bbr_params(&mut self, custom_bbr_settings: BbrParams) {
1092 self.custom_bbr_params = Some(custom_bbr_settings);
1093 }
1094
1095 /// Sets the congestion control algorithm used by string.
1096 ///
1097 /// The default value is `cubic`. On error `Error::CongestionControl`
1098 /// will be returned.
1099 ///
1100 /// ## Examples:
1101 ///
1102 /// ```
1103 /// # let mut config = quiche::Config::new(0xbabababa)?;
1104 /// config.set_cc_algorithm_name("reno");
1105 /// # Ok::<(), quiche::Error>(())
1106 /// ```
1107 pub fn set_cc_algorithm_name(&mut self, name: &str) -> Result<()> {
1108 self.cc_algorithm = CongestionControlAlgorithm::from_str(name)?;
1109
1110 Ok(())
1111 }
1112
1113 /// Sets initial congestion window size in terms of packet count.
1114 ///
1115 /// The default value is 10.
1116 pub fn set_initial_congestion_window_packets(&mut self, packets: usize) {
1117 self.initial_congestion_window_packets = packets;
1118 }
1119
1120 /// Configure whether to enable relaxed loss detection on spurious loss.
1121 ///
1122 /// The default value is false.
1123 pub fn set_enable_relaxed_loss_threshold(&mut self, enable: bool) {
1124 self.enable_relaxed_loss_threshold = enable;
1125 }
1126
1127 /// Configure whether to enable the CUBIC idle restart fix.
1128 ///
1129 /// When enabled, the epoch shift on idle restart uses the later of
1130 /// the last ACK time and last send time, avoiding an inflated delta
1131 /// when bytes-in-flight transiently hits zero.
1132 ///
1133 /// The default value is `true`.
1134 pub fn set_enable_cubic_idle_restart_fix(&mut self, enable: bool) {
1135 self.enable_cubic_idle_restart_fix = enable;
1136 }
1137
1138 /// Configure whether to enable sending STREAMS_BLOCKED frames.
1139 ///
1140 /// STREAMS_BLOCKED frames are an optional advisory signal in the QUIC
1141 /// protocol which SHOULD be sent when the sender wishes to open a stream
1142 /// but is unable to do so due to the maximum stream limit set by its peer.
1143 ///
1144 /// The default value is false.
1145 pub fn set_enable_send_streams_blocked(&mut self, enable: bool) {
1146 self.enable_send_streams_blocked = enable;
1147 }
1148
1149 /// Configures whether to enable HyStart++.
1150 ///
1151 /// The default value is `true`.
1152 pub fn enable_hystart(&mut self, v: bool) {
1153 self.hystart = v;
1154 }
1155
1156 /// Configures whether to enable pacing.
1157 ///
1158 /// The default value is `true`.
1159 pub fn enable_pacing(&mut self, v: bool) {
1160 self.pacing = v;
1161 }
1162
1163 /// Sets the max value for pacing rate.
1164 ///
1165 /// By default pacing rate is not limited.
1166 pub fn set_max_pacing_rate(&mut self, v: u64) {
1167 self.max_pacing_rate = Some(v);
1168 }
1169
1170 /// Configures whether to enable receiving DATAGRAM frames.
1171 ///
1172 /// When enabled, the `max_datagram_frame_size` transport parameter is set
1173 /// to 65536 as recommended by draft-ietf-quic-datagram-01.
1174 ///
1175 /// The default is `false`.
1176 pub fn enable_dgram(
1177 &mut self, enabled: bool, recv_queue_len: usize, send_queue_len: usize,
1178 ) {
1179 self.local_transport_params.max_datagram_frame_size = if enabled {
1180 Some(MAX_DGRAM_FRAME_SIZE)
1181 } else {
1182 None
1183 };
1184 self.dgram_recv_max_queue_len = recv_queue_len;
1185 self.dgram_send_max_queue_len = send_queue_len;
1186 }
1187
1188 /// Configures the max number of queued received PATH_CHALLENGE frames.
1189 ///
1190 /// When an endpoint receives a PATH_CHALLENGE frame and the queue is full,
1191 /// the frame is discarded.
1192 ///
1193 /// The default is 3.
1194 pub fn set_path_challenge_recv_max_queue_len(&mut self, queue_len: usize) {
1195 self.path_challenge_recv_max_queue_len = queue_len;
1196 }
1197
1198 /// Sets the maximum size of the connection window.
1199 ///
1200 /// The default value is MAX_CONNECTION_WINDOW (24MBytes).
1201 pub fn set_max_connection_window(&mut self, v: u64) {
1202 self.max_connection_window = v;
1203 }
1204
1205 /// Sets the maximum size of the stream window.
1206 ///
1207 /// The default value is MAX_STREAM_WINDOW (16MBytes).
1208 pub fn set_max_stream_window(&mut self, v: u64) {
1209 self.max_stream_window = v;
1210 }
1211
1212 /// Sets the initial stateless reset token.
1213 ///
1214 /// This value is only advertised by servers. Setting a stateless retry
1215 /// token as a client has no effect on the connection.
1216 ///
1217 /// The default value is `None`.
1218 pub fn set_stateless_reset_token(&mut self, v: Option<u128>) {
1219 self.local_transport_params.stateless_reset_token = v;
1220 }
1221
1222 /// Sets whether the QUIC connection should avoid reusing DCIDs over
1223 /// different paths.
1224 ///
1225 /// When set to `true`, it ensures that a destination Connection ID is never
1226 /// reused on different paths. Such behaviour may lead to connection stall
1227 /// if the peer performs a non-voluntary migration (e.g., NAT rebinding) and
1228 /// does not provide additional destination Connection IDs to handle such
1229 /// event.
1230 ///
1231 /// The default value is `false`.
1232 pub fn set_disable_dcid_reuse(&mut self, v: bool) {
1233 self.disable_dcid_reuse = v;
1234 }
1235
1236 /// Enables tracking unknown transport parameters.
1237 ///
1238 /// Specify the maximum number of bytes used to track unknown transport
1239 /// parameters. The size includes the identifier and its value. If storing a
1240 /// transport parameter would cause the limit to be exceeded, it is quietly
1241 /// dropped.
1242 ///
1243 /// The default is that the feature is disabled.
1244 pub fn enable_track_unknown_transport_parameters(&mut self, size: usize) {
1245 self.track_unknown_transport_params = Some(size);
1246 }
1247}
1248
1249/// Tracks the health of the tx_buffered value.
1250#[derive(Clone, Copy, Debug, Default, PartialEq)]
1251pub enum TxBufferTrackingState {
1252 /// The send buffer is in a good state
1253 #[default]
1254 Ok,
1255 /// The send buffer is in an inconsistent state, which could lead to
1256 /// connection stalls or excess buffering due to bugs we haven't
1257 /// tracked down yet.
1258 Inconsistent,
1259}
1260
1261/// Tracks if the connection hit the peer stream limit and which
1262/// STREAMS_BLOCKED frames have been sent.
1263#[derive(Default)]
1264struct StreamsBlockedState {
1265 /// The peer's max_streams limit at which we last became blocked on
1266 /// opening new local streams, if any.
1267 blocked_at: Option<u64>,
1268
1269 /// The stream limit sent on the most recently sent STREAMS_BLOCKED
1270 /// frame. If != to blocked_at, the connection has pending STREAMS_BLOCKED
1271 /// frames to send.
1272 blocked_sent: Option<u64>,
1273}
1274
1275impl StreamsBlockedState {
1276 /// Returns true if there is a STREAMS_BLOCKED frame that needs sending.
1277 fn has_pending_stream_blocked_frame(&self) -> bool {
1278 self.blocked_sent < self.blocked_at
1279 }
1280
1281 /// Update the stream blocked limit.
1282 fn update_at(&mut self, limit: u64) {
1283 self.blocked_at = self.blocked_at.max(Some(limit));
1284 }
1285
1286 /// Clear blocked_sent to force retransmission of the most recently sent
1287 /// STREAMS_BLOCKED frame.
1288 fn force_retransmit_sent_limit_eq(&mut self, limit: u64) {
1289 // Only clear blocked_sent if the lost frame had the most recently sent
1290 // limit.
1291 if self.blocked_sent == Some(limit) {
1292 self.blocked_sent = None;
1293 }
1294 }
1295}
1296
1297/// A QUIC connection.
1298pub struct Connection<F = DefaultBufFactory>
1299where
1300 F: BufFactory,
1301{
1302 /// QUIC wire version used for the connection.
1303 version: u32,
1304
1305 /// Connection Identifiers.
1306 ids: cid::ConnectionIdentifiers,
1307
1308 /// Unique opaque ID for the connection that can be used for logging.
1309 trace_id: String,
1310
1311 /// Packet number spaces.
1312 pkt_num_spaces: [packet::PktNumSpace; packet::Epoch::count()],
1313
1314 /// The crypto context.
1315 crypto_ctx: [packet::CryptoContext; packet::Epoch::count()],
1316
1317 /// Next packet number.
1318 next_pkt_num: u64,
1319
1320 // TODO
1321 // combine with `next_pkt_num`
1322 /// Track the packet skip context
1323 pkt_num_manager: packet::PktNumManager,
1324
1325 /// Peer's transport parameters.
1326 peer_transport_params: TransportParams,
1327
1328 /// If tracking unknown transport parameters from a peer, how much space to
1329 /// use in bytes.
1330 peer_transport_params_track_unknown: Option<usize>,
1331
1332 /// Local transport parameters.
1333 local_transport_params: TransportParams,
1334
1335 /// TLS handshake state.
1336 handshake: tls::Handshake,
1337
1338 /// Serialized TLS session buffer.
1339 ///
1340 /// This field is populated when a new session ticket is processed on the
1341 /// client. On the server this is empty.
1342 session: Option<Vec<u8>>,
1343
1344 /// The configuration for recovery.
1345 recovery_config: recovery::RecoveryConfig,
1346
1347 /// The path manager.
1348 paths: path::PathMap,
1349
1350 /// PATH_CHALLENGE receive queue max length.
1351 path_challenge_recv_max_queue_len: usize,
1352
1353 /// Total number of received PATH_CHALLENGE frames.
1354 path_challenge_rx_count: u64,
1355
1356 /// List of supported application protocols.
1357 application_protos: Vec<Vec<u8>>,
1358
1359 /// Total number of received packets.
1360 recv_count: usize,
1361
1362 /// Total number of sent packets.
1363 sent_count: usize,
1364
1365 /// Total number of lost packets.
1366 lost_count: usize,
1367
1368 /// Total number of lost packets that were later acked.
1369 spurious_lost_count: usize,
1370
1371 /// Total number of packets sent with data retransmitted.
1372 retrans_count: usize,
1373
1374 /// Total number of sent DATAGRAM frames.
1375 dgram_sent_count: usize,
1376
1377 /// Total number of received DATAGRAM frames.
1378 dgram_recv_count: usize,
1379
1380 /// Total number of bytes received from the peer.
1381 rx_data: u64,
1382
1383 /// Receiver flow controller.
1384 flow_control: flowcontrol::FlowControl,
1385
1386 /// Whether we send MAX_DATA frame.
1387 should_send_max_data: bool,
1388
1389 /// True if there is a pending MAX_STREAMS_BIDI frame to send.
1390 should_send_max_streams_bidi: bool,
1391
1392 /// True if there is a pending MAX_STREAMS_UNI frame to send.
1393 should_send_max_streams_uni: bool,
1394
1395 /// Number of stream data bytes that can be buffered.
1396 tx_cap: usize,
1397
1398 /// The send capacity factor.
1399 tx_cap_factor: f64,
1400
1401 /// Number of bytes buffered in the send buffer.
1402 tx_buffered: usize,
1403
1404 /// Tracks the health of tx_buffered.
1405 tx_buffered_state: TxBufferTrackingState,
1406
1407 /// Total number of bytes sent to the peer.
1408 tx_data: u64,
1409
1410 /// Peer's flow control limit for the connection.
1411 max_tx_data: u64,
1412
1413 /// Last tx_data before running a full send() loop.
1414 last_tx_data: u64,
1415
1416 /// Total number of bytes retransmitted over the connection.
1417 /// This counts only STREAM and CRYPTO data.
1418 stream_retrans_bytes: u64,
1419
1420 /// Total number of bytes sent over the connection.
1421 sent_bytes: u64,
1422
1423 /// Total number of bytes received over the connection.
1424 recv_bytes: u64,
1425
1426 /// Total number of bytes sent acked over the connection.
1427 acked_bytes: u64,
1428
1429 /// Total number of bytes sent lost over the connection.
1430 lost_bytes: u64,
1431
1432 /// Streams map, indexed by stream ID.
1433 streams: stream::StreamMap<F>,
1434
1435 /// Peer's original destination connection ID. Used by the client to
1436 /// validate the server's transport parameter.
1437 odcid: Option<ConnectionId<'static>>,
1438
1439 /// Peer's retry source connection ID. Used by the client during stateless
1440 /// retry to validate the server's transport parameter.
1441 rscid: Option<ConnectionId<'static>>,
1442
1443 /// Received address verification token.
1444 token: Option<Vec<u8>>,
1445
1446 /// Error code and reason to be sent to the peer in a CONNECTION_CLOSE
1447 /// frame.
1448 local_error: Option<ConnectionError>,
1449
1450 /// Error code and reason received from the peer in a CONNECTION_CLOSE
1451 /// frame.
1452 peer_error: Option<ConnectionError>,
1453
1454 /// The connection-level limit at which send blocking occurred.
1455 blocked_limit: Option<u64>,
1456
1457 /// Idle timeout expiration time.
1458 idle_timer: Option<Instant>,
1459
1460 /// Draining timeout expiration time.
1461 draining_timer: Option<Instant>,
1462
1463 /// List of raw packets that were received before they could be decrypted.
1464 undecryptable_pkts: VecDeque<(Vec<u8>, RecvInfo)>,
1465
1466 /// The negotiated ALPN protocol.
1467 alpn: Vec<u8>,
1468
1469 /// Whether this is a server-side connection.
1470 is_server: bool,
1471
1472 /// Whether the initial secrets have been derived.
1473 derived_initial_secrets: bool,
1474
1475 /// Whether a version negotiation packet has already been received. Only
1476 /// relevant for client connections.
1477 did_version_negotiation: bool,
1478
1479 /// Whether stateless retry has been performed.
1480 did_retry: bool,
1481
1482 /// Whether the peer already updated its connection ID.
1483 got_peer_conn_id: bool,
1484
1485 /// Whether the peer verified our initial address.
1486 peer_verified_initial_address: bool,
1487
1488 /// Whether the peer's transport parameters were parsed.
1489 parsed_peer_transport_params: bool,
1490
1491 /// Whether the connection handshake has been completed.
1492 handshake_completed: bool,
1493
1494 /// Whether the HANDSHAKE_DONE frame has been sent.
1495 handshake_done_sent: bool,
1496
1497 /// Whether the HANDSHAKE_DONE frame has been acked.
1498 handshake_done_acked: bool,
1499
1500 /// Whether the connection handshake has been confirmed.
1501 handshake_confirmed: bool,
1502
1503 /// Key phase bit used for outgoing protected packets.
1504 key_phase: bool,
1505
1506 /// Whether an ack-eliciting packet has been sent since last receiving a
1507 /// packet.
1508 ack_eliciting_sent: bool,
1509
1510 /// Whether the connection is closed.
1511 closed: bool,
1512
1513 /// Whether the connection was timed out.
1514 timed_out: bool,
1515
1516 /// Whether to send GREASE.
1517 grease: bool,
1518
1519 /// Whether to send STREAMS_BLOCKED frames when bidi or uni stream quota
1520 /// exhausted.
1521 enable_send_streams_blocked: bool,
1522
1523 /// TLS keylog writer.
1524 keylog: Option<Box<dyn std::io::Write + Send + Sync>>,
1525
1526 #[cfg(feature = "qlog")]
1527 qlog: QlogInfo,
1528
1529 /// DATAGRAM queues.
1530 dgram_recv_queue: dgram::DatagramQueue,
1531 dgram_send_queue: dgram::DatagramQueue,
1532
1533 /// Whether to emit DATAGRAM frames in the next packet.
1534 emit_dgram: bool,
1535
1536 /// Whether the connection should prevent from reusing destination
1537 /// Connection IDs when the peer migrates.
1538 disable_dcid_reuse: bool,
1539
1540 /// The number of streams reset by local.
1541 reset_stream_local_count: u64,
1542
1543 /// The number of streams stopped by local.
1544 stopped_stream_local_count: u64,
1545
1546 /// The number of streams reset by remote.
1547 reset_stream_remote_count: u64,
1548
1549 /// The number of streams stopped by remote.
1550 stopped_stream_remote_count: u64,
1551
1552 /// The number of DATA_BLOCKED frames sent due to hitting the connection
1553 /// flow control limit.
1554 data_blocked_sent_count: u64,
1555
1556 /// The number of STREAM_DATA_BLOCKED frames sent due to a stream hitting
1557 /// the stream flow control limit.
1558 stream_data_blocked_sent_count: u64,
1559
1560 /// The number of DATA_BLOCKED frames received from the remote endpoint.
1561 data_blocked_recv_count: u64,
1562
1563 /// The number of STREAM_DATA_BLOCKED frames received from the remote
1564 /// endpoint.
1565 stream_data_blocked_recv_count: u64,
1566
1567 /// The number of STREAMS_BLOCKED frames received from the remote endpoint
1568 /// indicating the peer is blocked on opening new bidirectional streams.
1569 streams_blocked_bidi_recv_count: u64,
1570
1571 /// The number of STREAMS_BLOCKED frames received from the remote endpoint
1572 /// indicating the peer is blocked on opening new unidirectional streams.
1573 streams_blocked_uni_recv_count: u64,
1574
1575 /// Tracks if the connection hit the peer's bidi or uni stream limit, and if
1576 /// STREAMS_BLOCKED frames are pending transmission.
1577 streams_blocked_bidi_state: StreamsBlockedState,
1578 streams_blocked_uni_state: StreamsBlockedState,
1579
1580 /// The anti-amplification limit factor.
1581 max_amplification_factor: usize,
1582}
1583
1584/// Creates a new server-side connection.
1585///
1586/// The `scid` parameter represents the server's source connection ID, while
1587/// the optional `odcid` parameter represents the original destination ID the
1588/// client sent before a Retry packet (this is only required when using the
1589/// [`retry()`] function). See also the [`accept_with_retry()`] function for
1590/// more advanced retry cases.
1591///
1592/// [`retry()`]: fn.retry.html
1593///
1594/// ## Examples:
1595///
1596/// ```no_run
1597/// # let mut config = quiche::Config::new(0xbabababa)?;
1598/// # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
1599/// # let local = "127.0.0.1:0".parse().unwrap();
1600/// # let peer = "127.0.0.1:1234".parse().unwrap();
1601/// let conn = quiche::accept(&scid, None, local, peer, &mut config)?;
1602/// # Ok::<(), quiche::Error>(())
1603/// ```
1604#[inline(always)]
1605pub fn accept(
1606 scid: &ConnectionId, odcid: Option<&ConnectionId>, local: SocketAddr,
1607 peer: SocketAddr, config: &mut Config,
1608) -> Result<Connection> {
1609 accept_with_buf_factory(scid, odcid, local, peer, config)
1610}
1611
1612/// Creates a new server-side connection, with a custom buffer generation
1613/// method.
1614///
1615/// The buffers generated can be anything that can be drereferenced as a byte
1616/// slice. See [`accept`] and [`BufFactory`] for more info.
1617#[inline]
1618pub fn accept_with_buf_factory<F: BufFactory>(
1619 scid: &ConnectionId, odcid: Option<&ConnectionId>, local: SocketAddr,
1620 peer: SocketAddr, config: &mut Config,
1621) -> Result<Connection<F>> {
1622 // For connections with `odcid` set, we historically used `retry_source_cid =
1623 // scid`. Keep this behavior to preserve backwards compatibility.
1624 // `accept_with_retry` allows the SCIDs to be specified separately.
1625 let retry_cids = odcid.map(|odcid| RetryConnectionIds {
1626 original_destination_cid: odcid,
1627 retry_source_cid: scid,
1628 });
1629 Connection::new(scid, retry_cids, None, local, peer, config, true)
1630}
1631
1632/// A wrapper for connection IDs used in [`accept_with_retry`].
1633pub struct RetryConnectionIds<'a> {
1634 /// The DCID of the first Initial packet received by the server, which
1635 /// triggered the Retry packet.
1636 pub original_destination_cid: &'a ConnectionId<'a>,
1637 /// The SCID of the Retry packet sent by the server. This can be different
1638 /// from the new connection's SCID.
1639 pub retry_source_cid: &'a ConnectionId<'a>,
1640}
1641
1642/// Creates a new server-side connection after the client responded to a Retry
1643/// packet.
1644///
1645/// To generate a Retry packet in the first place, use the [`retry()`] function.
1646///
1647/// The `scid` parameter represents the server's source connection ID, which can
1648/// be freshly generated after the application has successfully verified the
1649/// Retry. `retry_cids` is used to tie the new connection to the Initial + Retry
1650/// exchange that preceded the connection's creation.
1651///
1652/// The DCID of the client's Initial packet is inherently untrusted data. It is
1653/// safe to use the DCID in the `retry_source_cid` field of the
1654/// `RetryConnectionIds` provided to this function. However, using the Initial's
1655/// DCID for the `scid` parameter carries risks. Applications are advised to
1656/// implement their own DCID validation steps before using the DCID in that
1657/// manner.
1658#[inline]
1659pub fn accept_with_retry<F: BufFactory>(
1660 scid: &ConnectionId, retry_cids: RetryConnectionIds, local: SocketAddr,
1661 peer: SocketAddr, config: &mut Config,
1662) -> Result<Connection<F>> {
1663 Connection::new(scid, Some(retry_cids), None, local, peer, config, true)
1664}
1665
1666/// Creates a new client-side connection.
1667///
1668/// The `scid` parameter is used as the connection's source connection ID,
1669/// while the optional `server_name` parameter is used to verify the peer's
1670/// certificate.
1671///
1672/// ## Examples:
1673///
1674/// ```no_run
1675/// # let mut config = quiche::Config::new(0xbabababa)?;
1676/// # let server_name = "quic.tech";
1677/// # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
1678/// # let local = "127.0.0.1:4321".parse().unwrap();
1679/// # let peer = "127.0.0.1:1234".parse().unwrap();
1680/// let conn =
1681/// quiche::connect(Some(&server_name), &scid, local, peer, &mut config)?;
1682/// # Ok::<(), quiche::Error>(())
1683/// ```
1684#[inline]
1685pub fn connect(
1686 server_name: Option<&str>, scid: &ConnectionId, local: SocketAddr,
1687 peer: SocketAddr, config: &mut Config,
1688) -> Result<Connection> {
1689 let mut conn = Connection::new(scid, None, None, local, peer, config, false)?;
1690
1691 if let Some(server_name) = server_name {
1692 conn.handshake.set_host_name(server_name)?;
1693 }
1694
1695 Ok(conn)
1696}
1697
1698/// Creates a new client-side connection using the given DCID initially.
1699///
1700/// Be aware that [RFC 9000] places requirements for unpredictability and length
1701/// on the client DCID field. This function is dangerous if these requirements
1702/// are not satisfied.
1703///
1704/// The `scid` parameter is used as the connection's source connection ID, while
1705/// the optional `server_name` parameter is used to verify the peer's
1706/// certificate.
1707///
1708/// [RFC 9000]: <https://datatracker.ietf.org/doc/html/rfc9000#section-7.2-3>
1709#[cfg(feature = "custom-client-dcid")]
1710#[cfg_attr(docsrs, doc(cfg(feature = "custom-client-dcid")))]
1711pub fn connect_with_dcid(
1712 server_name: Option<&str>, scid: &ConnectionId, dcid: &ConnectionId,
1713 local: SocketAddr, peer: SocketAddr, config: &mut Config,
1714) -> Result<Connection> {
1715 let mut conn =
1716 Connection::new(scid, None, Some(dcid), local, peer, config, false)?;
1717
1718 if let Some(server_name) = server_name {
1719 conn.handshake.set_host_name(server_name)?;
1720 }
1721
1722 Ok(conn)
1723}
1724
1725/// Creates a new client-side connection, with a custom buffer generation
1726/// method.
1727///
1728/// The buffers generated can be anything that can be drereferenced as a byte
1729/// slice. See [`connect`] and [`BufFactory`] for more info.
1730#[inline]
1731pub fn connect_with_buffer_factory<F: BufFactory>(
1732 server_name: Option<&str>, scid: &ConnectionId, local: SocketAddr,
1733 peer: SocketAddr, config: &mut Config,
1734) -> Result<Connection<F>> {
1735 let mut conn = Connection::new(scid, None, None, local, peer, config, false)?;
1736
1737 if let Some(server_name) = server_name {
1738 conn.handshake.set_host_name(server_name)?;
1739 }
1740
1741 Ok(conn)
1742}
1743
1744/// Creates a new client-side connection, with a custom buffer generation
1745/// method using the given dcid initially.
1746/// Be aware the RFC places requirements for unpredictability and length
1747/// on the client DCID field.
1748/// [`RFC9000`]: https://datatracker.ietf.org/doc/html/rfc9000#section-7.2-3
1749///
1750/// The buffers generated can be anything that can be drereferenced as a byte
1751/// slice. See [`connect`] and [`BufFactory`] for more info.
1752#[cfg(feature = "custom-client-dcid")]
1753#[cfg_attr(docsrs, doc(cfg(feature = "custom-client-dcid")))]
1754pub fn connect_with_dcid_and_buffer_factory<F: BufFactory>(
1755 server_name: Option<&str>, scid: &ConnectionId, dcid: &ConnectionId,
1756 local: SocketAddr, peer: SocketAddr, config: &mut Config,
1757) -> Result<Connection<F>> {
1758 let mut conn =
1759 Connection::new(scid, None, Some(dcid), local, peer, config, false)?;
1760
1761 if let Some(server_name) = server_name {
1762 conn.handshake.set_host_name(server_name)?;
1763 }
1764
1765 Ok(conn)
1766}
1767
1768/// Writes a version negotiation packet.
1769///
1770/// The `scid` and `dcid` parameters are the source connection ID and the
1771/// destination connection ID extracted from the received client's Initial
1772/// packet that advertises an unsupported version.
1773///
1774/// ## Examples:
1775///
1776/// ```no_run
1777/// # let mut buf = [0; 512];
1778/// # let mut out = [0; 512];
1779/// # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
1780/// let (len, src) = socket.recv_from(&mut buf).unwrap();
1781///
1782/// let hdr =
1783/// quiche::Header::from_slice(&mut buf[..len], quiche::MAX_CONN_ID_LEN)?;
1784///
1785/// if hdr.version != quiche::PROTOCOL_VERSION {
1786/// let len = quiche::negotiate_version(&hdr.scid, &hdr.dcid, &mut out)?;
1787/// socket.send_to(&out[..len], &src).unwrap();
1788/// }
1789/// # Ok::<(), quiche::Error>(())
1790/// ```
1791#[inline]
1792pub fn negotiate_version(
1793 scid: &ConnectionId, dcid: &ConnectionId, out: &mut [u8],
1794) -> Result<usize> {
1795 packet::negotiate_version(scid, dcid, out)
1796}
1797
1798/// Writes a stateless retry packet.
1799///
1800/// The `scid` and `dcid` parameters are the source connection ID and the
1801/// destination connection ID extracted from the received client's Initial
1802/// packet, while `new_scid` is the server's new source connection ID and
1803/// `token` is the address validation token the client needs to echo back.
1804///
1805/// The application is responsible for generating the address validation
1806/// token to be sent to the client, and verifying tokens sent back by the
1807/// client. The generated token should include the `dcid` parameter, such
1808/// that it can be later extracted from the token and passed to the
1809/// [`accept()`] function as its `odcid` parameter.
1810///
1811/// [`accept()`]: fn.accept.html
1812///
1813/// ## Examples:
1814///
1815/// ```no_run
1816/// # let mut config = quiche::Config::new(0xbabababa)?;
1817/// # let mut buf = [0; 512];
1818/// # let mut out = [0; 512];
1819/// # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
1820/// # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
1821/// # let local = socket.local_addr().unwrap();
1822/// # fn mint_token(hdr: &quiche::Header, src: &std::net::SocketAddr) -> Vec<u8> {
1823/// # vec![]
1824/// # }
1825/// # fn validate_token<'a>(src: &std::net::SocketAddr, token: &'a [u8]) -> Option<quiche::ConnectionId<'a>> {
1826/// # None
1827/// # }
1828/// let (len, peer) = socket.recv_from(&mut buf).unwrap();
1829///
1830/// let hdr = quiche::Header::from_slice(&mut buf[..len], quiche::MAX_CONN_ID_LEN)?;
1831///
1832/// let token = hdr.token.as_ref().unwrap();
1833///
1834/// // No token sent by client, create a new one.
1835/// if token.is_empty() {
1836/// let new_token = mint_token(&hdr, &peer);
1837///
1838/// let len = quiche::retry(
1839/// &hdr.scid, &hdr.dcid, &scid, &new_token, hdr.version, &mut out,
1840/// )?;
1841///
1842/// socket.send_to(&out[..len], &peer).unwrap();
1843/// return Ok(());
1844/// }
1845///
1846/// // Client sent token, validate it.
1847/// let odcid = validate_token(&peer, token);
1848///
1849/// if odcid.is_none() {
1850/// // Invalid address validation token.
1851/// return Ok(());
1852/// }
1853///
1854/// let conn = quiche::accept(&scid, odcid.as_ref(), local, peer, &mut config)?;
1855/// # Ok::<(), quiche::Error>(())
1856/// ```
1857#[inline]
1858pub fn retry(
1859 scid: &ConnectionId, dcid: &ConnectionId, new_scid: &ConnectionId,
1860 token: &[u8], version: u32, out: &mut [u8],
1861) -> Result<usize> {
1862 packet::retry(scid, dcid, new_scid, token, version, out)
1863}
1864
1865/// Returns true if the given protocol version is supported.
1866#[inline]
1867pub fn version_is_supported(version: u32) -> bool {
1868 matches!(version, PROTOCOL_VERSION_V1)
1869}
1870
1871/// Pushes a frame to the output packet if there is enough space.
1872///
1873/// Returns `true` on success, `false` otherwise. In case of failure it means
1874/// there is no room to add the frame in the packet. You may retry to add the
1875/// frame later.
1876macro_rules! push_frame_to_pkt {
1877 ($out:expr, $frames:expr, $frame:expr, $left:expr) => {{
1878 if $frame.wire_len() <= $left {
1879 $left -= $frame.wire_len();
1880
1881 $frame.to_bytes(&mut $out)?;
1882
1883 $frames.push($frame);
1884
1885 true
1886 } else {
1887 false
1888 }
1889 }};
1890}
1891
1892/// Executes the provided body if the qlog feature is enabled, quiche has been
1893/// configured with a log writer, the event's importance is within the
1894/// configured level.
1895macro_rules! qlog_with_type {
1896 ($ty:expr, $qlog:expr, $qlog_streamer_ref:ident, $body:block) => {{
1897 #[cfg(feature = "qlog")]
1898 {
1899 if EventImportance::from($ty).is_contained_in(&$qlog.level) {
1900 if let Some($qlog_streamer_ref) = &mut $qlog.streamer {
1901 $body
1902 }
1903 }
1904 }
1905 }};
1906}
1907
1908#[cfg(feature = "qlog")]
1909const QLOG_PARAMS_SET: EventType =
1910 EventType::QuicEventType(QuicEventType::ParametersSet);
1911
1912#[cfg(feature = "qlog")]
1913const QLOG_PACKET_RX: EventType =
1914 EventType::QuicEventType(QuicEventType::PacketReceived);
1915
1916#[cfg(feature = "qlog")]
1917const QLOG_PACKET_TX: EventType =
1918 EventType::QuicEventType(QuicEventType::PacketSent);
1919
1920#[cfg(feature = "qlog")]
1921const QLOG_DATA_MV: EventType =
1922 EventType::QuicEventType(QuicEventType::StreamDataMoved);
1923
1924#[cfg(feature = "qlog")]
1925const QLOG_METRICS: EventType =
1926 EventType::QuicEventType(QuicEventType::RecoveryMetricsUpdated);
1927
1928#[cfg(feature = "qlog")]
1929const QLOG_CONNECTION_CLOSED: EventType =
1930 EventType::QuicEventType(QuicEventType::ConnectionClosed);
1931
1932#[cfg(feature = "qlog")]
1933struct QlogInfo {
1934 streamer: Option<qlog::streamer::QlogStreamer>,
1935 logged_peer_params: bool,
1936 level: EventImportance,
1937}
1938
1939#[cfg(feature = "qlog")]
1940impl Default for QlogInfo {
1941 fn default() -> Self {
1942 QlogInfo {
1943 streamer: None,
1944 logged_peer_params: false,
1945 level: EventImportance::Base,
1946 }
1947 }
1948}
1949
1950impl<F: BufFactory> Connection<F> {
1951 fn new(
1952 scid: &ConnectionId, retry_cids: Option<RetryConnectionIds>,
1953 client_dcid: Option<&ConnectionId>, local: SocketAddr, peer: SocketAddr,
1954 config: &mut Config, is_server: bool,
1955 ) -> Result<Connection<F>> {
1956 let tls = config.tls_ctx.new_handshake()?;
1957 Connection::with_tls(
1958 scid,
1959 retry_cids,
1960 client_dcid,
1961 local,
1962 peer,
1963 config,
1964 tls,
1965 is_server,
1966 )
1967 }
1968
1969 #[allow(clippy::too_many_arguments)]
1970 fn with_tls(
1971 scid: &ConnectionId, retry_cids: Option<RetryConnectionIds>,
1972 client_dcid: Option<&ConnectionId>, local: SocketAddr, peer: SocketAddr,
1973 config: &Config, tls: tls::Handshake, is_server: bool,
1974 ) -> Result<Connection<F>> {
1975 if retry_cids.is_some() && client_dcid.is_some() {
1976 // These are exclusive, the caller should only specify one or the
1977 // other.
1978 return Err(Error::InvalidDcidInitialization);
1979 }
1980 #[cfg(feature = "custom-client-dcid")]
1981 if let Some(client_dcid) = client_dcid {
1982 // The Minimum length is 8.
1983 // See https://datatracker.ietf.org/doc/html/rfc9000#section-7.2-3
1984 if client_dcid.to_vec().len() < 8 {
1985 return Err(Error::InvalidDcidInitialization);
1986 }
1987 }
1988 #[cfg(not(feature = "custom-client-dcid"))]
1989 if client_dcid.is_some() {
1990 return Err(Error::InvalidDcidInitialization);
1991 }
1992
1993 let max_rx_data = config.local_transport_params.initial_max_data;
1994
1995 let scid_as_hex: Vec<String> =
1996 scid.iter().map(|b| format!("{b:02x}")).collect();
1997
1998 let reset_token = if is_server {
1999 config.local_transport_params.stateless_reset_token
2000 } else {
2001 None
2002 };
2003
2004 let recovery_config = recovery::RecoveryConfig::from_config(config);
2005
2006 let mut path = path::Path::new(
2007 local,
2008 peer,
2009 &recovery_config,
2010 config.path_challenge_recv_max_queue_len,
2011 true,
2012 Some(config),
2013 );
2014
2015 // If we sent a Retry assume the peer's address is verified.
2016 path.verified_peer_address = retry_cids.is_some();
2017 // Assume clients validate the server's address implicitly.
2018 path.peer_verified_local_address = is_server;
2019
2020 // Do not allocate more than the number of active CIDs.
2021 let paths = path::PathMap::new(
2022 path,
2023 config.local_transport_params.active_conn_id_limit as usize,
2024 is_server,
2025 );
2026
2027 let active_path_id = paths.get_active_path_id()?;
2028
2029 let ids = cid::ConnectionIdentifiers::new(
2030 config.local_transport_params.active_conn_id_limit as usize,
2031 scid,
2032 active_path_id,
2033 reset_token,
2034 );
2035
2036 let mut conn = Connection {
2037 version: config.version,
2038
2039 ids,
2040
2041 trace_id: scid_as_hex.join(""),
2042
2043 pkt_num_spaces: [
2044 packet::PktNumSpace::new(),
2045 packet::PktNumSpace::new(),
2046 packet::PktNumSpace::new(),
2047 ],
2048
2049 crypto_ctx: [
2050 packet::CryptoContext::new(),
2051 packet::CryptoContext::new(),
2052 packet::CryptoContext::new(),
2053 ],
2054
2055 next_pkt_num: 0,
2056
2057 pkt_num_manager: packet::PktNumManager::new(),
2058
2059 peer_transport_params: TransportParams::default(),
2060
2061 peer_transport_params_track_unknown: config
2062 .track_unknown_transport_params,
2063
2064 local_transport_params: config.local_transport_params.clone(),
2065
2066 handshake: tls,
2067
2068 session: None,
2069
2070 recovery_config,
2071
2072 paths,
2073 path_challenge_recv_max_queue_len: config
2074 .path_challenge_recv_max_queue_len,
2075 path_challenge_rx_count: 0,
2076
2077 application_protos: config.application_protos.clone(),
2078
2079 recv_count: 0,
2080 sent_count: 0,
2081 lost_count: 0,
2082 spurious_lost_count: 0,
2083 retrans_count: 0,
2084 dgram_sent_count: 0,
2085 dgram_recv_count: 0,
2086 sent_bytes: 0,
2087 recv_bytes: 0,
2088 acked_bytes: 0,
2089 lost_bytes: 0,
2090
2091 rx_data: 0,
2092 flow_control: flowcontrol::FlowControl::new(
2093 max_rx_data,
2094 cmp::min(max_rx_data / 2 * 3, DEFAULT_CONNECTION_WINDOW),
2095 config.max_connection_window,
2096 ),
2097 should_send_max_data: false,
2098 should_send_max_streams_bidi: false,
2099 should_send_max_streams_uni: false,
2100
2101 tx_cap: 0,
2102 tx_cap_factor: config.tx_cap_factor,
2103
2104 tx_buffered: 0,
2105 tx_buffered_state: TxBufferTrackingState::Ok,
2106
2107 tx_data: 0,
2108 max_tx_data: 0,
2109 last_tx_data: 0,
2110
2111 stream_retrans_bytes: 0,
2112
2113 streams: stream::StreamMap::new(
2114 config.local_transport_params.initial_max_streams_bidi,
2115 config.local_transport_params.initial_max_streams_uni,
2116 config.max_stream_window,
2117 ),
2118
2119 odcid: None,
2120
2121 rscid: None,
2122
2123 token: None,
2124
2125 local_error: None,
2126
2127 peer_error: None,
2128
2129 blocked_limit: None,
2130
2131 idle_timer: None,
2132
2133 draining_timer: None,
2134
2135 undecryptable_pkts: VecDeque::new(),
2136
2137 alpn: Vec::new(),
2138
2139 is_server,
2140
2141 derived_initial_secrets: false,
2142
2143 did_version_negotiation: false,
2144
2145 did_retry: false,
2146
2147 got_peer_conn_id: false,
2148
2149 // Assume clients validate the server's address implicitly.
2150 peer_verified_initial_address: is_server,
2151
2152 parsed_peer_transport_params: false,
2153
2154 handshake_completed: false,
2155
2156 handshake_done_sent: false,
2157 handshake_done_acked: false,
2158
2159 handshake_confirmed: false,
2160
2161 key_phase: false,
2162
2163 ack_eliciting_sent: false,
2164
2165 closed: false,
2166
2167 timed_out: false,
2168
2169 grease: config.grease,
2170
2171 enable_send_streams_blocked: config.enable_send_streams_blocked,
2172
2173 keylog: None,
2174
2175 #[cfg(feature = "qlog")]
2176 qlog: Default::default(),
2177
2178 dgram_recv_queue: dgram::DatagramQueue::new(
2179 config.dgram_recv_max_queue_len,
2180 ),
2181
2182 dgram_send_queue: dgram::DatagramQueue::new(
2183 config.dgram_send_max_queue_len,
2184 ),
2185
2186 emit_dgram: true,
2187
2188 disable_dcid_reuse: config.disable_dcid_reuse,
2189
2190 reset_stream_local_count: 0,
2191 stopped_stream_local_count: 0,
2192 reset_stream_remote_count: 0,
2193 stopped_stream_remote_count: 0,
2194
2195 data_blocked_sent_count: 0,
2196 stream_data_blocked_sent_count: 0,
2197 data_blocked_recv_count: 0,
2198 stream_data_blocked_recv_count: 0,
2199
2200 streams_blocked_bidi_recv_count: 0,
2201 streams_blocked_uni_recv_count: 0,
2202
2203 streams_blocked_bidi_state: Default::default(),
2204 streams_blocked_uni_state: Default::default(),
2205
2206 max_amplification_factor: config.max_amplification_factor,
2207 };
2208
2209 if let Some(retry_cids) = retry_cids {
2210 conn.local_transport_params
2211 .original_destination_connection_id =
2212 Some(retry_cids.original_destination_cid.to_vec().into());
2213
2214 conn.local_transport_params.retry_source_connection_id =
2215 Some(retry_cids.retry_source_cid.to_vec().into());
2216
2217 conn.did_retry = true;
2218 }
2219
2220 conn.local_transport_params.initial_source_connection_id =
2221 Some(conn.ids.get_scid(0)?.cid.to_vec().into());
2222
2223 conn.handshake.init(is_server)?;
2224
2225 conn.handshake
2226 .use_legacy_codepoint(config.version != PROTOCOL_VERSION_V1);
2227
2228 conn.encode_transport_params()?;
2229
2230 if !is_server {
2231 let dcid = if let Some(client_dcid) = client_dcid {
2232 // We already had an dcid generated for us, use it.
2233 client_dcid.to_vec()
2234 } else {
2235 // Derive initial secrets for the client. We can do this here
2236 // because we already generated the random
2237 // destination connection ID.
2238 let mut dcid = [0; 16];
2239 rand::rand_bytes(&mut dcid[..]);
2240 dcid.to_vec()
2241 };
2242
2243 let (aead_open, aead_seal) = crypto::derive_initial_key_material(
2244 &dcid,
2245 conn.version,
2246 conn.is_server,
2247 false,
2248 )?;
2249
2250 let reset_token = conn.peer_transport_params.stateless_reset_token;
2251 conn.set_initial_dcid(
2252 dcid.to_vec().into(),
2253 reset_token,
2254 active_path_id,
2255 )?;
2256
2257 conn.crypto_ctx[packet::Epoch::Initial].crypto_open = Some(aead_open);
2258 conn.crypto_ctx[packet::Epoch::Initial].crypto_seal = Some(aead_seal);
2259
2260 conn.derived_initial_secrets = true;
2261 }
2262
2263 Ok(conn)
2264 }
2265
2266 /// Sets keylog output to the designated [`Writer`].
2267 ///
2268 /// This needs to be called as soon as the connection is created, to avoid
2269 /// missing some early logs.
2270 ///
2271 /// [`Writer`]: https://doc.rust-lang.org/std/io/trait.Write.html
2272 #[inline]
2273 pub fn set_keylog(&mut self, writer: Box<dyn std::io::Write + Send + Sync>) {
2274 self.keylog = Some(writer);
2275 }
2276
2277 /// Sets qlog output to the designated [`Writer`].
2278 ///
2279 /// Only events included in `QlogLevel::Base` are written. The serialization
2280 /// format is JSON-SEQ.
2281 ///
2282 /// This needs to be called as soon as the connection is created, to avoid
2283 /// missing some early logs.
2284 ///
2285 /// [`Writer`]: https://doc.rust-lang.org/std/io/trait.Write.html
2286 #[cfg(feature = "qlog")]
2287 #[cfg_attr(docsrs, doc(cfg(feature = "qlog")))]
2288 pub fn set_qlog(
2289 &mut self, writer: Box<dyn std::io::Write + Send + Sync>, title: String,
2290 description: String,
2291 ) {
2292 self.set_qlog_with_level(writer, title, description, QlogLevel::Base)
2293 }
2294
2295 /// Sets qlog output to the designated [`Writer`].
2296 ///
2297 /// Only qlog events included in the specified `QlogLevel` are written. The
2298 /// serialization format is JSON-SEQ.
2299 ///
2300 /// This needs to be called as soon as the connection is created, to avoid
2301 /// missing some early logs.
2302 ///
2303 /// [`Writer`]: https://doc.rust-lang.org/std/io/trait.Write.html
2304 #[cfg(feature = "qlog")]
2305 #[cfg_attr(docsrs, doc(cfg(feature = "qlog")))]
2306 pub fn set_qlog_with_level(
2307 &mut self, writer: Box<dyn std::io::Write + Send + Sync>, title: String,
2308 description: String, qlog_level: QlogLevel,
2309 ) {
2310 use qlog::events::quic::TransportInitiator;
2311 use qlog::events::HTTP3_URI;
2312 use qlog::events::QUIC_URI;
2313
2314 let vp = if self.is_server {
2315 qlog::VantagePointType::Server
2316 } else {
2317 qlog::VantagePointType::Client
2318 };
2319
2320 let level = match qlog_level {
2321 QlogLevel::Core => EventImportance::Core,
2322
2323 QlogLevel::Base => EventImportance::Base,
2324
2325 QlogLevel::Extra => EventImportance::Extra,
2326 };
2327
2328 self.qlog.level = level;
2329
2330 let trace = qlog::TraceSeq::new(
2331 Some(title.to_string()),
2332 Some(description.to_string()),
2333 None,
2334 Some(qlog::VantagePoint {
2335 name: None,
2336 ty: vp,
2337 flow: None,
2338 }),
2339 vec![QUIC_URI.to_string(), HTTP3_URI.to_string()],
2340 );
2341
2342 let mut streamer = qlog::streamer::QlogStreamer::new(
2343 Some(title),
2344 Some(description),
2345 Instant::now(),
2346 trace,
2347 self.qlog.level,
2348 writer,
2349 );
2350
2351 streamer.start_log().ok();
2352
2353 let ev_data = self
2354 .local_transport_params
2355 .to_qlog(TransportInitiator::Local, self.handshake.cipher());
2356
2357 // This event occurs very early, so just mark the relative time as 0.0.
2358 streamer.add_event(Event::with_time(0.0, ev_data)).ok();
2359
2360 self.qlog.streamer = Some(streamer);
2361 }
2362
2363 /// Returns a mutable reference to the QlogStreamer, if it exists.
2364 #[cfg(feature = "qlog")]
2365 #[cfg_attr(docsrs, doc(cfg(feature = "qlog")))]
2366 pub fn qlog_streamer(&mut self) -> Option<&mut qlog::streamer::QlogStreamer> {
2367 self.qlog.streamer.as_mut()
2368 }
2369
2370 /// Configures the given session for resumption.
2371 ///
2372 /// On the client, this can be used to offer the given serialized session,
2373 /// as returned by [`session()`], for resumption.
2374 ///
2375 /// This must only be called immediately after creating a connection, that
2376 /// is, before any packet is sent or received.
2377 ///
2378 /// [`session()`]: struct.Connection.html#method.session
2379 #[inline]
2380 pub fn set_session(&mut self, session: &[u8]) -> Result<()> {
2381 let mut b = octets::Octets::with_slice(session);
2382
2383 let session_len = b.get_u64()? as usize;
2384 let session_bytes = b.get_bytes(session_len)?;
2385
2386 self.handshake.set_session(session_bytes.as_ref())?;
2387
2388 let raw_params_len = b.get_u64()? as usize;
2389 let raw_params_bytes = b.get_bytes(raw_params_len)?;
2390
2391 let peer_params = TransportParams::decode(
2392 raw_params_bytes.as_ref(),
2393 self.is_server,
2394 self.peer_transport_params_track_unknown,
2395 )?;
2396
2397 self.process_peer_transport_params(peer_params)?;
2398
2399 Ok(())
2400 }
2401
2402 /// Sets the `max_idle_timeout` transport parameter, in milliseconds.
2403 ///
2404 /// This must only be called immediately after creating a connection, that
2405 /// is, before any packet is sent or received.
2406 ///
2407 /// The default value is infinite, that is, no timeout is used unless
2408 /// already configured when creating the connection.
2409 pub fn set_max_idle_timeout(&mut self, v: u64) -> Result<()> {
2410 self.local_transport_params.max_idle_timeout =
2411 cmp::min(v, octets::MAX_VAR_INT);
2412
2413 self.encode_transport_params()
2414 }
2415
2416 /// Sets the congestion control algorithm used.
2417 ///
2418 /// This function can only be called inside one of BoringSSL's handshake
2419 /// callbacks, before any packet has been sent. Calling this function any
2420 /// other time will have no effect.
2421 ///
2422 /// See [`Config::set_cc_algorithm()`].
2423 ///
2424 /// [`Config::set_cc_algorithm()`]: struct.Config.html#method.set_cc_algorithm
2425 #[cfg(feature = "boringssl-boring-crate")]
2426 #[cfg_attr(docsrs, doc(cfg(feature = "boringssl-boring-crate")))]
2427 pub fn set_cc_algorithm_in_handshake(
2428 ssl: &mut boring::ssl::SslRef, algo: CongestionControlAlgorithm,
2429 ) -> Result<()> {
2430 let ex_data = tls::ExData::from_ssl_ref(ssl).ok_or(Error::TlsFail)?;
2431
2432 ex_data.recovery_config.cc_algorithm = algo;
2433
2434 Ok(())
2435 }
2436
2437 /// Sets custom BBR settings.
2438 ///
2439 /// This API is experimental and will be removed in the future.
2440 ///
2441 /// Currently this only applies if cc_algorithm is
2442 /// `CongestionControlAlgorithm::Bbr2Gcongestion` is set.
2443 ///
2444 /// This function can only be called inside one of BoringSSL's handshake
2445 /// callbacks, before any packet has been sent. Calling this function any
2446 /// other time will have no effect.
2447 ///
2448 /// See [`Config::set_custom_bbr_settings()`].
2449 ///
2450 /// [`Config::set_custom_bbr_settings()`]: struct.Config.html#method.set_custom_bbr_settings
2451 #[cfg(all(feature = "boringssl-boring-crate", feature = "internal"))]
2452 #[cfg_attr(docsrs, doc(cfg(feature = "boringssl-boring-crate")))]
2453 #[doc(hidden)]
2454 pub fn set_custom_bbr_settings_in_handshake(
2455 ssl: &mut boring::ssl::SslRef, custom_bbr_params: BbrParams,
2456 ) -> Result<()> {
2457 let ex_data = tls::ExData::from_ssl_ref(ssl).ok_or(Error::TlsFail)?;
2458
2459 ex_data.recovery_config.custom_bbr_params = Some(custom_bbr_params);
2460
2461 Ok(())
2462 }
2463
2464 /// Sets the congestion control algorithm used by string.
2465 ///
2466 /// This function can only be called inside one of BoringSSL's handshake
2467 /// callbacks, before any packet has been sent. Calling this function any
2468 /// other time will have no effect.
2469 ///
2470 /// See [`Config::set_cc_algorithm_name()`].
2471 ///
2472 /// [`Config::set_cc_algorithm_name()`]: struct.Config.html#method.set_cc_algorithm_name
2473 #[cfg(feature = "boringssl-boring-crate")]
2474 #[cfg_attr(docsrs, doc(cfg(feature = "boringssl-boring-crate")))]
2475 pub fn set_cc_algorithm_name_in_handshake(
2476 ssl: &mut boring::ssl::SslRef, name: &str,
2477 ) -> Result<()> {
2478 let cc_algo = CongestionControlAlgorithm::from_str(name)?;
2479 Self::set_cc_algorithm_in_handshake(ssl, cc_algo)
2480 }
2481
2482 /// Sets initial congestion window size in terms of packet count.
2483 ///
2484 /// This function can only be called inside one of BoringSSL's handshake
2485 /// callbacks, before any packet has been sent. Calling this function any
2486 /// other time will have no effect.
2487 ///
2488 /// See [`Config::set_initial_congestion_window_packets()`].
2489 ///
2490 /// [`Config::set_initial_congestion_window_packets()`]: struct.Config.html#method.set_initial_congestion_window_packets
2491 #[cfg(feature = "boringssl-boring-crate")]
2492 #[cfg_attr(docsrs, doc(cfg(feature = "boringssl-boring-crate")))]
2493 pub fn set_initial_congestion_window_packets_in_handshake(
2494 ssl: &mut boring::ssl::SslRef, packets: usize,
2495 ) -> Result<()> {
2496 let ex_data = tls::ExData::from_ssl_ref(ssl).ok_or(Error::TlsFail)?;
2497
2498 ex_data.recovery_config.initial_congestion_window_packets = packets;
2499
2500 Ok(())
2501 }
2502
2503 /// Configure whether to enable relaxed loss detection on spurious loss.
2504 ///
2505 /// This function can only be called inside one of BoringSSL's handshake
2506 /// callbacks, before any packet has been sent. Calling this function any
2507 /// other time will have no effect.
2508 ///
2509 /// See [`Config::set_enable_relaxed_loss_threshold()`].
2510 ///
2511 /// [`Config::set_enable_relaxed_loss_threshold()`]: struct.Config.html#method.set_enable_relaxed_loss_threshold
2512 #[cfg(feature = "boringssl-boring-crate")]
2513 #[cfg_attr(docsrs, doc(cfg(feature = "boringssl-boring-crate")))]
2514 pub fn set_enable_relaxed_loss_threshold_in_handshake(
2515 ssl: &mut boring::ssl::SslRef, enable: bool,
2516 ) -> Result<()> {
2517 let ex_data = tls::ExData::from_ssl_ref(ssl).ok_or(Error::TlsFail)?;
2518
2519 ex_data.recovery_config.enable_relaxed_loss_threshold = enable;
2520
2521 Ok(())
2522 }
2523
2524 /// Configure whether to enable the CUBIC idle restart fix.
2525 ///
2526 /// This function can only be called inside one of BoringSSL's handshake
2527 /// callbacks, before any packet has been sent. Calling this function any
2528 /// other time will have no effect.
2529 ///
2530 /// See [`Config::set_enable_cubic_idle_restart_fix()`].
2531 ///
2532 /// [`Config::set_enable_cubic_idle_restart_fix()`]: struct.Config.html#method.set_enable_cubic_idle_restart_fix
2533 #[cfg(feature = "boringssl-boring-crate")]
2534 #[cfg_attr(docsrs, doc(cfg(feature = "boringssl-boring-crate")))]
2535 pub fn set_enable_cubic_idle_restart_fix_in_handshake(
2536 ssl: &mut boring::ssl::SslRef, enable: bool,
2537 ) -> Result<()> {
2538 let ex_data = tls::ExData::from_ssl_ref(ssl).ok_or(Error::TlsFail)?;
2539
2540 ex_data.recovery_config.enable_cubic_idle_restart_fix = enable;
2541
2542 Ok(())
2543 }
2544
2545 /// Configures whether to enable HyStart++.
2546 ///
2547 /// This function can only be called inside one of BoringSSL's handshake
2548 /// callbacks, before any packet has been sent. Calling this function any
2549 /// other time will have no effect.
2550 ///
2551 /// See [`Config::enable_hystart()`].
2552 ///
2553 /// [`Config::enable_hystart()`]: struct.Config.html#method.enable_hystart
2554 #[cfg(feature = "boringssl-boring-crate")]
2555 #[cfg_attr(docsrs, doc(cfg(feature = "boringssl-boring-crate")))]
2556 pub fn set_hystart_in_handshake(
2557 ssl: &mut boring::ssl::SslRef, v: bool,
2558 ) -> Result<()> {
2559 let ex_data = tls::ExData::from_ssl_ref(ssl).ok_or(Error::TlsFail)?;
2560
2561 ex_data.recovery_config.hystart = v;
2562
2563 Ok(())
2564 }
2565
2566 /// Configures whether to enable pacing.
2567 ///
2568 /// This function can only be called inside one of BoringSSL's handshake
2569 /// callbacks, before any packet has been sent. Calling this function any
2570 /// other time will have no effect.
2571 ///
2572 /// See [`Config::enable_pacing()`].
2573 ///
2574 /// [`Config::enable_pacing()`]: struct.Config.html#method.enable_pacing
2575 #[cfg(feature = "boringssl-boring-crate")]
2576 #[cfg_attr(docsrs, doc(cfg(feature = "boringssl-boring-crate")))]
2577 pub fn set_pacing_in_handshake(
2578 ssl: &mut boring::ssl::SslRef, v: bool,
2579 ) -> Result<()> {
2580 let ex_data = tls::ExData::from_ssl_ref(ssl).ok_or(Error::TlsFail)?;
2581
2582 ex_data.recovery_config.pacing = v;
2583
2584 Ok(())
2585 }
2586
2587 /// Sets the max value for pacing rate.
2588 ///
2589 /// This function can only be called inside one of BoringSSL's handshake
2590 /// callbacks, before any packet has been sent. Calling this function any
2591 /// other time will have no effect.
2592 ///
2593 /// See [`Config::set_max_pacing_rate()`].
2594 ///
2595 /// [`Config::set_max_pacing_rate()`]: struct.Config.html#method.set_max_pacing_rate
2596 #[cfg(feature = "boringssl-boring-crate")]
2597 #[cfg_attr(docsrs, doc(cfg(feature = "boringssl-boring-crate")))]
2598 pub fn set_max_pacing_rate_in_handshake(
2599 ssl: &mut boring::ssl::SslRef, v: Option<u64>,
2600 ) -> Result<()> {
2601 let ex_data = tls::ExData::from_ssl_ref(ssl).ok_or(Error::TlsFail)?;
2602
2603 ex_data.recovery_config.max_pacing_rate = v;
2604
2605 Ok(())
2606 }
2607
2608 /// Sets the maximum outgoing UDP payload size.
2609 ///
2610 /// This function can only be called inside one of BoringSSL's handshake
2611 /// callbacks, before any packet has been sent. Calling this function any
2612 /// other time will have no effect.
2613 ///
2614 /// See [`Config::set_max_send_udp_payload_size()`].
2615 ///
2616 /// [`Config::set_max_send_udp_payload_size()`]: struct.Config.html#method.set_max_send_udp_payload_size
2617 #[cfg(feature = "boringssl-boring-crate")]
2618 #[cfg_attr(docsrs, doc(cfg(feature = "boringssl-boring-crate")))]
2619 pub fn set_max_send_udp_payload_size_in_handshake(
2620 ssl: &mut boring::ssl::SslRef, v: usize,
2621 ) -> Result<()> {
2622 let ex_data = tls::ExData::from_ssl_ref(ssl).ok_or(Error::TlsFail)?;
2623
2624 ex_data.recovery_config.max_send_udp_payload_size = v;
2625
2626 Ok(())
2627 }
2628
2629 /// Sets the send capacity factor.
2630 ///
2631 /// This function can only be called inside one of BoringSSL's handshake
2632 /// callbacks, before any packet has been sent. Calling this function any
2633 /// other time will have no effect.
2634 ///
2635 /// See [`Config::set_send_capacity_factor()`].
2636 ///
2637 /// [`Config::set_max_send_udp_payload_size()`]: struct.Config.html#method.set_send_capacity_factor
2638 #[cfg(feature = "boringssl-boring-crate")]
2639 #[cfg_attr(docsrs, doc(cfg(feature = "boringssl-boring-crate")))]
2640 pub fn set_send_capacity_factor_in_handshake(
2641 ssl: &mut boring::ssl::SslRef, v: f64,
2642 ) -> Result<()> {
2643 let ex_data = tls::ExData::from_ssl_ref(ssl).ok_or(Error::TlsFail)?;
2644
2645 ex_data.tx_cap_factor = v;
2646
2647 Ok(())
2648 }
2649
2650 /// Configures whether to do path MTU discovery.
2651 ///
2652 /// This function can only be called inside one of BoringSSL's handshake
2653 /// callbacks, before any packet has been sent. Calling this function any
2654 /// other time will have no effect.
2655 ///
2656 /// See [`Config::discover_pmtu()`].
2657 ///
2658 /// [`Config::discover_pmtu()`]: struct.Config.html#method.discover_pmtu
2659 #[cfg(feature = "boringssl-boring-crate")]
2660 #[cfg_attr(docsrs, doc(cfg(feature = "boringssl-boring-crate")))]
2661 pub fn set_discover_pmtu_in_handshake(
2662 ssl: &mut boring::ssl::SslRef, discover: bool, max_probes: u8,
2663 ) -> Result<()> {
2664 let ex_data = tls::ExData::from_ssl_ref(ssl).ok_or(Error::TlsFail)?;
2665
2666 ex_data.pmtud = Some((discover, max_probes));
2667
2668 Ok(())
2669 }
2670
2671 /// Sets the `max_idle_timeout` transport parameter, in milliseconds.
2672 ///
2673 /// This function can only be called inside one of BoringSSL's handshake
2674 /// callbacks, before any packet has been sent. Calling this function any
2675 /// other time will have no effect.
2676 ///
2677 /// See [`Config::set_max_idle_timeout()`].
2678 ///
2679 /// [`Config::set_max_idle_timeout()`]: struct.Config.html#method.set_max_idle_timeout
2680 #[cfg(feature = "boringssl-boring-crate")]
2681 #[cfg_attr(docsrs, doc(cfg(feature = "boringssl-boring-crate")))]
2682 pub fn set_max_idle_timeout_in_handshake(
2683 ssl: &mut boring::ssl::SslRef, v: u64,
2684 ) -> Result<()> {
2685 let ex_data = tls::ExData::from_ssl_ref(ssl).ok_or(Error::TlsFail)?;
2686
2687 ex_data.local_transport_params.max_idle_timeout = v;
2688
2689 Self::set_transport_parameters_in_hanshake(
2690 ex_data.local_transport_params.clone(),
2691 ex_data.is_server,
2692 ssl,
2693 )
2694 }
2695
2696 /// Sets the `initial_max_streams_bidi` transport parameter.
2697 ///
2698 /// This function can only be called inside one of BoringSSL's handshake
2699 /// callbacks, before any packet has been sent. Calling this function any
2700 /// other time will have no effect.
2701 ///
2702 /// See [`Config::set_initial_max_streams_bidi()`].
2703 ///
2704 /// [`Config::set_initial_max_streams_bidi()`]: struct.Config.html#method.set_initial_max_streams_bidi
2705 #[cfg(feature = "boringssl-boring-crate")]
2706 #[cfg_attr(docsrs, doc(cfg(feature = "boringssl-boring-crate")))]
2707 pub fn set_initial_max_streams_bidi_in_handshake(
2708 ssl: &mut boring::ssl::SslRef, v: u64,
2709 ) -> Result<()> {
2710 let ex_data = tls::ExData::from_ssl_ref(ssl).ok_or(Error::TlsFail)?;
2711
2712 ex_data.local_transport_params.initial_max_streams_bidi = v;
2713
2714 Self::set_transport_parameters_in_hanshake(
2715 ex_data.local_transport_params.clone(),
2716 ex_data.is_server,
2717 ssl,
2718 )
2719 }
2720
2721 #[cfg(feature = "boringssl-boring-crate")]
2722 fn set_transport_parameters_in_hanshake(
2723 params: TransportParams, is_server: bool, ssl: &mut boring::ssl::SslRef,
2724 ) -> Result<()> {
2725 use foreign_types_shared::ForeignTypeRef;
2726
2727 // In order to apply the new parameter to the TLS state before TPs are
2728 // written into a TLS message, we need to re-encode all TPs immediately.
2729 //
2730 // Since we don't have direct access to the main `Connection` object, we
2731 // need to re-create the `Handshake` state from the `SslRef`.
2732 //
2733 // SAFETY: the `Handshake` object must not be drop()ed, otherwise it
2734 // would free the underlying BoringSSL structure.
2735 let mut handshake =
2736 unsafe { tls::Handshake::from_ptr(ssl.as_ptr() as _) };
2737 handshake.set_quic_transport_params(¶ms, is_server)?;
2738
2739 // Avoid running `drop(handshake)` as that would free the underlying
2740 // handshake state.
2741 std::mem::forget(handshake);
2742
2743 Ok(())
2744 }
2745
2746 /// Processes QUIC packets received from the peer.
2747 ///
2748 /// On success the number of bytes processed from the input buffer is
2749 /// returned. On error the connection will be closed by calling [`close()`]
2750 /// with the appropriate error code.
2751 ///
2752 /// Coalesced packets will be processed as necessary.
2753 ///
2754 /// Note that the contents of the input buffer `buf` might be modified by
2755 /// this function due to, for example, in-place decryption.
2756 ///
2757 /// [`close()`]: struct.Connection.html#method.close
2758 ///
2759 /// ## Examples:
2760 ///
2761 /// ```no_run
2762 /// # let mut buf = [0; 512];
2763 /// # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
2764 /// # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
2765 /// # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
2766 /// # let peer = "127.0.0.1:1234".parse().unwrap();
2767 /// # let local = socket.local_addr().unwrap();
2768 /// # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
2769 /// loop {
2770 /// let (read, from) = socket.recv_from(&mut buf).unwrap();
2771 ///
2772 /// let recv_info = quiche::RecvInfo {
2773 /// from,
2774 /// to: local,
2775 /// };
2776 ///
2777 /// let read = match conn.recv(&mut buf[..read], recv_info) {
2778 /// Ok(v) => v,
2779 ///
2780 /// Err(e) => {
2781 /// // An error occurred, handle it.
2782 /// break;
2783 /// },
2784 /// };
2785 /// }
2786 /// # Ok::<(), quiche::Error>(())
2787 /// ```
2788 pub fn recv(&mut self, buf: &mut [u8], info: RecvInfo) -> Result<usize> {
2789 let len = buf.len();
2790
2791 if len == 0 {
2792 return Err(Error::BufferTooShort);
2793 }
2794
2795 let recv_pid = self.paths.path_id_from_addrs(&(info.to, info.from));
2796
2797 if let Some(recv_pid) = recv_pid {
2798 let recv_path = self.paths.get_mut(recv_pid)?;
2799
2800 // Keep track of how many bytes we received from the client, so we
2801 // can limit bytes sent back before address validation, to a
2802 // multiple of this. The limit needs to be increased early on, so
2803 // that if there is an error there is enough credit to send a
2804 // CONNECTION_CLOSE.
2805 //
2806 // It doesn't matter if the packets received were valid or not, we
2807 // only need to track the total amount of bytes received.
2808 //
2809 // Note that we also need to limit the number of bytes we sent on a
2810 // path if we are not the host that initiated its usage.
2811 if self.is_server && !recv_path.verified_peer_address {
2812 recv_path.max_send_bytes += len * self.max_amplification_factor;
2813 }
2814 } else if !self.is_server {
2815 // If a client receives packets from an unknown server address,
2816 // the client MUST discard these packets.
2817 trace!(
2818 "{} client received packet from unknown address {:?}, dropping",
2819 self.trace_id,
2820 info,
2821 );
2822
2823 return Ok(len);
2824 }
2825
2826 let mut done = 0;
2827 let mut left = len;
2828
2829 // Process coalesced packets.
2830 while left > 0 {
2831 let read = match self.recv_single(
2832 &mut buf[len - left..len],
2833 &info,
2834 recv_pid,
2835 ) {
2836 Ok(v) => v,
2837
2838 Err(Error::Done) => {
2839 // If the packet can't be processed or decrypted, check if
2840 // it's a stateless reset.
2841 if self.is_stateless_reset(&buf[len - left..len]) {
2842 trace!("{} packet is a stateless reset", self.trace_id);
2843
2844 self.mark_closed();
2845 }
2846
2847 left
2848 },
2849
2850 Err(e) => {
2851 // In case of error processing the incoming packet, close
2852 // the connection.
2853 self.close(false, e.to_wire(), b"").ok();
2854 return Err(e);
2855 },
2856 };
2857
2858 done += read;
2859 left -= read;
2860 }
2861
2862 // Even though the packet was previously "accepted", it
2863 // should be safe to forward the error, as it also comes
2864 // from the `recv()` method.
2865 self.process_undecrypted_0rtt_packets()?;
2866
2867 Ok(done)
2868 }
2869
2870 fn process_undecrypted_0rtt_packets(&mut self) -> Result<()> {
2871 // Process previously undecryptable 0-RTT packets if the decryption key
2872 // is now available.
2873 if self.crypto_ctx[packet::Epoch::Application]
2874 .crypto_0rtt_open
2875 .is_some()
2876 {
2877 while let Some((mut pkt, info)) = self.undecryptable_pkts.pop_front()
2878 {
2879 if let Err(e) = self.recv(&mut pkt, info) {
2880 self.undecryptable_pkts.clear();
2881
2882 return Err(e);
2883 }
2884 }
2885 }
2886 Ok(())
2887 }
2888
2889 /// Returns true if a QUIC packet is a stateless reset.
2890 fn is_stateless_reset(&self, buf: &[u8]) -> bool {
2891 // If the packet is too small, then we just throw it away.
2892 let buf_len = buf.len();
2893 if buf_len < 21 {
2894 return false;
2895 }
2896
2897 // TODO: we should iterate over all active destination connection IDs
2898 // and check against their reset token.
2899 match self.peer_transport_params.stateless_reset_token {
2900 Some(token) => {
2901 let token_len = 16;
2902
2903 crypto::verify_slices_are_equal(
2904 &token.to_be_bytes(),
2905 &buf[buf_len - token_len..buf_len],
2906 )
2907 .is_ok()
2908 },
2909
2910 None => false,
2911 }
2912 }
2913
2914 /// Processes a single QUIC packet received from the peer.
2915 ///
2916 /// On success the number of bytes processed from the input buffer is
2917 /// returned. When the [`Done`] error is returned, processing of the
2918 /// remainder of the incoming UDP datagram should be interrupted.
2919 ///
2920 /// Note that a server might observe a new 4-tuple, preventing to
2921 /// know in advance to which path the incoming packet belongs to (`recv_pid`
2922 /// is `None`). As a client, packets from unknown 4-tuple are dropped
2923 /// beforehand (see `recv()`).
2924 ///
2925 /// On error, an error other than [`Done`] is returned.
2926 ///
2927 /// [`Done`]: enum.Error.html#variant.Done
2928 fn recv_single(
2929 &mut self, buf: &mut [u8], info: &RecvInfo, recv_pid: Option<usize>,
2930 ) -> Result<usize> {
2931 let now = Instant::now();
2932
2933 if buf.is_empty() {
2934 return Err(Error::Done);
2935 }
2936
2937 if self.is_closed() || self.is_draining() {
2938 return Err(Error::Done);
2939 }
2940
2941 let is_closing = self.local_error.is_some();
2942
2943 if is_closing {
2944 return Err(Error::Done);
2945 }
2946
2947 let buf_len = buf.len();
2948
2949 let mut b = octets::OctetsMut::with_slice(buf);
2950
2951 let mut hdr = Header::from_bytes(&mut b, self.source_id().len())
2952 .map_err(|e| {
2953 drop_pkt_on_err(
2954 e,
2955 self.recv_count,
2956 self.is_server,
2957 &self.trace_id,
2958 )
2959 })?;
2960
2961 if hdr.ty == Type::VersionNegotiation {
2962 // Version negotiation packets can only be sent by the server.
2963 if self.is_server {
2964 return Err(Error::Done);
2965 }
2966
2967 // Ignore duplicate version negotiation.
2968 if self.did_version_negotiation {
2969 return Err(Error::Done);
2970 }
2971
2972 // Ignore version negotiation if any other packet has already been
2973 // successfully processed.
2974 if self.recv_count > 0 {
2975 return Err(Error::Done);
2976 }
2977
2978 if hdr.dcid != self.source_id() {
2979 return Err(Error::Done);
2980 }
2981
2982 if hdr.scid != self.destination_id() {
2983 return Err(Error::Done);
2984 }
2985
2986 trace!("{} rx pkt {:?}", self.trace_id, hdr);
2987
2988 let versions = hdr.versions.ok_or(Error::Done)?;
2989
2990 // Ignore version negotiation if the version already selected is
2991 // listed.
2992 if versions.contains(&self.version) {
2993 return Err(Error::Done);
2994 }
2995
2996 let supported_versions =
2997 versions.iter().filter(|&&v| version_is_supported(v));
2998
2999 let mut found_version = false;
3000
3001 for &v in supported_versions {
3002 found_version = true;
3003
3004 // The final version takes precedence over draft ones.
3005 if v == PROTOCOL_VERSION_V1 {
3006 self.version = v;
3007 break;
3008 }
3009
3010 self.version = cmp::max(self.version, v);
3011 }
3012
3013 if !found_version {
3014 // We don't support any of the versions offered.
3015 //
3016 // While a man-in-the-middle attacker might be able to
3017 // inject a version negotiation packet that triggers this
3018 // failure, the window of opportunity is very small and
3019 // this error is quite useful for debugging, so don't just
3020 // ignore the packet.
3021 return Err(Error::UnknownVersion);
3022 }
3023
3024 self.did_version_negotiation = true;
3025
3026 // Derive Initial secrets based on the new version.
3027 let (aead_open, aead_seal) = crypto::derive_initial_key_material(
3028 &self.destination_id(),
3029 self.version,
3030 self.is_server,
3031 true,
3032 )?;
3033
3034 // Reset connection state to force sending another Initial packet.
3035 self.drop_epoch_state(packet::Epoch::Initial, now);
3036 self.got_peer_conn_id = false;
3037 self.handshake.clear()?;
3038
3039 self.crypto_ctx[packet::Epoch::Initial].crypto_open = Some(aead_open);
3040 self.crypto_ctx[packet::Epoch::Initial].crypto_seal = Some(aead_seal);
3041
3042 self.handshake
3043 .use_legacy_codepoint(self.version != PROTOCOL_VERSION_V1);
3044
3045 // Encode transport parameters again, as the new version might be
3046 // using a different format.
3047 self.encode_transport_params()?;
3048
3049 return Err(Error::Done);
3050 }
3051
3052 if hdr.ty == Type::Retry {
3053 // Retry packets can only be sent by the server.
3054 if self.is_server {
3055 return Err(Error::Done);
3056 }
3057
3058 // Ignore duplicate retry.
3059 if self.did_retry {
3060 return Err(Error::Done);
3061 }
3062
3063 // Check if Retry packet is valid.
3064 if packet::verify_retry_integrity(
3065 &b,
3066 &self.destination_id(),
3067 self.version,
3068 )
3069 .is_err()
3070 {
3071 return Err(Error::Done);
3072 }
3073
3074 trace!("{} rx pkt {:?}", self.trace_id, hdr);
3075
3076 self.token = hdr.token;
3077 self.did_retry = true;
3078
3079 // Remember peer's new connection ID.
3080 self.odcid = Some(self.destination_id().into_owned());
3081
3082 self.set_initial_dcid(
3083 hdr.scid.clone(),
3084 None,
3085 self.paths.get_active_path_id()?,
3086 )?;
3087
3088 self.rscid = Some(self.destination_id().into_owned());
3089
3090 // Derive Initial secrets using the new connection ID.
3091 let (aead_open, aead_seal) = crypto::derive_initial_key_material(
3092 &hdr.scid,
3093 self.version,
3094 self.is_server,
3095 true,
3096 )?;
3097
3098 // Reset connection state to force sending another Initial packet.
3099 self.drop_epoch_state(packet::Epoch::Initial, now);
3100 self.got_peer_conn_id = false;
3101 self.handshake.clear()?;
3102
3103 self.crypto_ctx[packet::Epoch::Initial].crypto_open = Some(aead_open);
3104 self.crypto_ctx[packet::Epoch::Initial].crypto_seal = Some(aead_seal);
3105
3106 return Err(Error::Done);
3107 }
3108
3109 if self.is_server && !self.did_version_negotiation {
3110 if !version_is_supported(hdr.version) {
3111 return Err(Error::UnknownVersion);
3112 }
3113
3114 self.version = hdr.version;
3115 self.did_version_negotiation = true;
3116
3117 self.handshake
3118 .use_legacy_codepoint(self.version != PROTOCOL_VERSION_V1);
3119
3120 // Encode transport parameters again, as the new version might be
3121 // using a different format.
3122 self.encode_transport_params()?;
3123 }
3124
3125 if hdr.ty != Type::Short && hdr.version != self.version {
3126 // At this point version negotiation was already performed, so
3127 // ignore packets that don't match the connection's version.
3128 return Err(Error::Done);
3129 }
3130
3131 // Long header packets have an explicit payload length, but short
3132 // packets don't so just use the remaining capacity in the buffer.
3133 let payload_len = if hdr.ty == Type::Short {
3134 b.cap()
3135 } else {
3136 b.get_varint().map_err(|e| {
3137 drop_pkt_on_err(
3138 e.into(),
3139 self.recv_count,
3140 self.is_server,
3141 &self.trace_id,
3142 )
3143 })? as usize
3144 };
3145
3146 // Make sure the buffer is same or larger than an explicit
3147 // payload length.
3148 if payload_len > b.cap() {
3149 return Err(drop_pkt_on_err(
3150 Error::InvalidPacket,
3151 self.recv_count,
3152 self.is_server,
3153 &self.trace_id,
3154 ));
3155 }
3156
3157 // Derive initial secrets on the server.
3158 if !self.derived_initial_secrets {
3159 let (aead_open, aead_seal) = crypto::derive_initial_key_material(
3160 &hdr.dcid,
3161 self.version,
3162 self.is_server,
3163 false,
3164 )?;
3165
3166 self.crypto_ctx[packet::Epoch::Initial].crypto_open = Some(aead_open);
3167 self.crypto_ctx[packet::Epoch::Initial].crypto_seal = Some(aead_seal);
3168
3169 self.derived_initial_secrets = true;
3170 }
3171
3172 // Select packet number space epoch based on the received packet's type.
3173 let epoch = hdr.ty.to_epoch()?;
3174
3175 // Select AEAD context used to open incoming packet.
3176 let aead = if hdr.ty == Type::ZeroRTT {
3177 // Only use 0-RTT key if incoming packet is 0-RTT.
3178 self.crypto_ctx[epoch].crypto_0rtt_open.as_ref()
3179 } else {
3180 // Otherwise use the packet number space's main key.
3181 self.crypto_ctx[epoch].crypto_open.as_ref()
3182 };
3183
3184 // Finally, discard packet if no usable key is available.
3185 let mut aead = match aead {
3186 Some(v) => v,
3187
3188 None => {
3189 if hdr.ty == Type::ZeroRTT &&
3190 self.undecryptable_pkts.len() < MAX_UNDECRYPTABLE_PACKETS &&
3191 !self.is_established()
3192 {
3193 // Buffer 0-RTT packets when the required read key is not
3194 // available yet, and process them later.
3195 //
3196 // TODO: in the future we might want to buffer other types
3197 // of undecryptable packets as well.
3198 let pkt_len = b.off() + payload_len;
3199 let pkt = (b.buf()[..pkt_len]).to_vec();
3200
3201 self.undecryptable_pkts.push_back((pkt, *info));
3202 return Ok(pkt_len);
3203 }
3204
3205 let e = drop_pkt_on_err(
3206 Error::CryptoFail,
3207 self.recv_count,
3208 self.is_server,
3209 &self.trace_id,
3210 );
3211
3212 return Err(e);
3213 },
3214 };
3215
3216 let aead_tag_len = aead.alg().tag_len();
3217
3218 packet::decrypt_hdr(&mut b, &mut hdr, aead).map_err(|e| {
3219 drop_pkt_on_err(e, self.recv_count, self.is_server, &self.trace_id)
3220 })?;
3221
3222 let pn = packet::decode_pkt_num(
3223 self.pkt_num_spaces[epoch].largest_rx_pkt_num,
3224 hdr.pkt_num,
3225 hdr.pkt_num_len,
3226 );
3227
3228 let pn_len = hdr.pkt_num_len;
3229
3230 trace!(
3231 "{} rx pkt {:?} len={} pn={} {}",
3232 self.trace_id,
3233 hdr,
3234 payload_len,
3235 pn,
3236 AddrTupleFmt(info.from, info.to)
3237 );
3238
3239 #[cfg(feature = "qlog")]
3240 let mut qlog_frames = vec![];
3241
3242 // Check for key update.
3243 let mut aead_next = None;
3244
3245 if self.handshake_confirmed &&
3246 hdr.ty != Type::ZeroRTT &&
3247 hdr.key_phase != self.key_phase
3248 {
3249 // Check if this packet arrived before key update.
3250 if let Some(key_update) = self.crypto_ctx[epoch]
3251 .key_update
3252 .as_ref()
3253 .and_then(|key_update| {
3254 (pn < key_update.pn_on_update).then_some(key_update)
3255 })
3256 {
3257 aead = &key_update.crypto_open;
3258 } else {
3259 trace!("{} peer-initiated key update", self.trace_id);
3260
3261 aead_next = Some((
3262 self.crypto_ctx[epoch]
3263 .crypto_open
3264 .as_ref()
3265 .unwrap()
3266 .derive_next_packet_key()?,
3267 self.crypto_ctx[epoch]
3268 .crypto_seal
3269 .as_ref()
3270 .unwrap()
3271 .derive_next_packet_key()?,
3272 ));
3273
3274 // `aead_next` is always `Some()` at this point, so the `unwrap()`
3275 // will never fail.
3276 aead = &aead_next.as_ref().unwrap().0;
3277 }
3278 }
3279
3280 let mut payload = packet::decrypt_pkt(
3281 &mut b,
3282 pn,
3283 pn_len,
3284 payload_len,
3285 aead,
3286 )
3287 .map_err(|e| {
3288 drop_pkt_on_err(e, self.recv_count, self.is_server, &self.trace_id)
3289 })?;
3290
3291 if self.pkt_num_spaces[epoch].recv_pkt_num.contains(pn) {
3292 trace!("{} ignored duplicate packet {}", self.trace_id, pn);
3293 return Err(Error::Done);
3294 }
3295
3296 // Packets with no frames are invalid.
3297 if payload.cap() == 0 {
3298 return Err(Error::InvalidPacket);
3299 }
3300
3301 // Now that we decrypted the packet, let's see if we can map it to an
3302 // existing path.
3303 let recv_pid = if hdr.ty == Type::Short && self.got_peer_conn_id {
3304 let pkt_dcid = ConnectionId::from_ref(&hdr.dcid);
3305 self.get_or_create_recv_path_id(recv_pid, &pkt_dcid, buf_len, info)?
3306 } else {
3307 // During handshake, we are on the initial path.
3308 self.paths.get_active_path_id()?
3309 };
3310
3311 // The key update is verified once a packet is successfully decrypted
3312 // using the new keys.
3313 if let Some((open_next, seal_next)) = aead_next {
3314 if !self.crypto_ctx[epoch]
3315 .key_update
3316 .as_ref()
3317 .is_none_or(|prev| prev.update_acked)
3318 {
3319 // Peer has updated keys twice without awaiting confirmation.
3320 return Err(Error::KeyUpdate);
3321 }
3322
3323 trace!("{} key update verified", self.trace_id);
3324
3325 let _ = self.crypto_ctx[epoch].crypto_seal.replace(seal_next);
3326
3327 let open_prev = self.crypto_ctx[epoch]
3328 .crypto_open
3329 .replace(open_next)
3330 .unwrap();
3331
3332 let recv_path = self.paths.get_mut(recv_pid)?;
3333
3334 self.crypto_ctx[epoch].key_update = Some(packet::KeyUpdate {
3335 crypto_open: open_prev,
3336 pn_on_update: pn,
3337 update_acked: false,
3338 timer: now + (recv_path.recovery.pto() * 3),
3339 });
3340
3341 self.key_phase = !self.key_phase;
3342
3343 qlog_with_type!(QLOG_PACKET_RX, self.qlog, q, {
3344 let trigger = Some(
3345 qlog::events::quic::KeyUpdateOrRetiredTrigger::RemoteUpdate,
3346 );
3347
3348 let ev_data_client =
3349 EventData::QuicKeyUpdated(qlog::events::quic::KeyUpdated {
3350 key_type: qlog::events::quic::KeyType::Client1RttSecret,
3351 trigger: trigger.clone(),
3352 ..Default::default()
3353 });
3354
3355 q.add_event_data_with_instant(ev_data_client, now).ok();
3356
3357 let ev_data_server =
3358 EventData::QuicKeyUpdated(qlog::events::quic::KeyUpdated {
3359 key_type: qlog::events::quic::KeyType::Server1RttSecret,
3360 trigger,
3361 ..Default::default()
3362 });
3363
3364 q.add_event_data_with_instant(ev_data_server, now).ok();
3365 });
3366 }
3367
3368 if !self.is_server && !self.got_peer_conn_id {
3369 if self.odcid.is_none() {
3370 self.odcid = Some(self.destination_id().into_owned());
3371 }
3372
3373 // Replace the randomly generated destination connection ID with
3374 // the one supplied by the server.
3375 self.set_initial_dcid(
3376 hdr.scid.clone(),
3377 self.peer_transport_params.stateless_reset_token,
3378 recv_pid,
3379 )?;
3380
3381 self.got_peer_conn_id = true;
3382 }
3383
3384 if self.is_server && !self.got_peer_conn_id {
3385 self.set_initial_dcid(hdr.scid.clone(), None, recv_pid)?;
3386
3387 if !self.did_retry {
3388 self.local_transport_params
3389 .original_destination_connection_id =
3390 Some(hdr.dcid.to_vec().into());
3391
3392 self.encode_transport_params()?;
3393 }
3394
3395 self.got_peer_conn_id = true;
3396 }
3397
3398 // To avoid sending an ACK in response to an ACK-only packet, we need
3399 // to keep track of whether this packet contains any frame other than
3400 // ACK and PADDING.
3401 let mut ack_elicited = false;
3402
3403 // Process packet payload. If a frame cannot be processed, store the
3404 // error and stop further packet processing.
3405 let mut frame_processing_err = None;
3406
3407 // To know if the peer migrated the connection, we need to keep track
3408 // whether this is a non-probing packet.
3409 let mut probing = true;
3410
3411 // Process packet payload.
3412 while payload.cap() > 0 {
3413 let frame = frame::Frame::from_bytes(&mut payload, hdr.ty)?;
3414
3415 qlog_with_type!(QLOG_PACKET_RX, self.qlog, _q, {
3416 qlog_frames.push(frame.to_qlog());
3417 });
3418
3419 if frame.ack_eliciting() {
3420 ack_elicited = true;
3421 }
3422
3423 if !frame.probing() {
3424 probing = false;
3425 }
3426
3427 if let Err(e) = self.process_frame(frame, &hdr, recv_pid, epoch, now)
3428 {
3429 frame_processing_err = Some(e);
3430 break;
3431 }
3432 }
3433
3434 qlog_with_type!(QLOG_PACKET_RX, self.qlog, q, {
3435 let packet_size = b.len();
3436
3437 let qlog_pkt_hdr = qlog::events::quic::PacketHeader::with_type(
3438 hdr.ty.to_qlog(),
3439 Some(pn),
3440 Some(hdr.version),
3441 Some(&hdr.scid),
3442 Some(&hdr.dcid),
3443 );
3444
3445 let qlog_raw_info = RawInfo {
3446 length: Some(packet_size as u64),
3447 payload_length: Some(payload_len as u64),
3448 data: None,
3449 };
3450
3451 let ev_data = EventData::QuicPacketReceived(
3452 qlog::events::quic::PacketReceived {
3453 header: qlog_pkt_hdr,
3454 frames: Some(qlog_frames),
3455 raw: Some(qlog_raw_info),
3456 ..Default::default()
3457 },
3458 );
3459
3460 q.add_event_data_with_instant(ev_data, now).ok();
3461 });
3462
3463 qlog_with_type!(QLOG_PACKET_RX, self.qlog, q, {
3464 let recv_path = self.paths.get_mut(recv_pid)?;
3465 recv_path.recovery.maybe_qlog(q, now);
3466 });
3467
3468 if let Some(e) = frame_processing_err {
3469 // Any frame error is terminal, so now just return.
3470 return Err(e);
3471 }
3472
3473 // Only log the remote transport parameters once the connection is
3474 // established (i.e. after frames have been fully parsed) and only
3475 // once per connection.
3476 if self.is_established() {
3477 qlog_with_type!(QLOG_PARAMS_SET, self.qlog, q, {
3478 if !self.qlog.logged_peer_params {
3479 let ev_data = self.peer_transport_params.to_qlog(
3480 TransportInitiator::Remote,
3481 self.handshake.cipher(),
3482 );
3483
3484 q.add_event_data_with_instant(ev_data, now).ok();
3485
3486 self.qlog.logged_peer_params = true;
3487 }
3488 });
3489 }
3490
3491 // Process acked frames. Note that several packets from several paths
3492 // might have been acked by the received packet.
3493 for (_, p) in self.paths.iter_mut() {
3494 while let Some(acked) = p.recovery.next_acked_frame(epoch) {
3495 match acked {
3496 frame::Frame::Ping {
3497 mtu_probe: Some(mtu_probe),
3498 } => {
3499 if let Some(pmtud) = p.pmtud.as_mut() {
3500 trace!(
3501 "{} pmtud probe acked; probe size {:?}",
3502 self.trace_id,
3503 mtu_probe
3504 );
3505
3506 // Ensure the probe is within the supported MTU range
3507 // before updating the max datagram size
3508 if let Some(current_mtu) =
3509 pmtud.successful_probe(mtu_probe)
3510 {
3511 qlog_with_type!(
3512 EventType::QuicEventType(
3513 QuicEventType::MtuUpdated
3514 ),
3515 self.qlog,
3516 q,
3517 {
3518 let pmtu_data = EventData::QuicMtuUpdated(
3519 qlog::events::quic::MtuUpdated {
3520 old: Some(
3521 p.recovery.max_datagram_size()
3522 as u32,
3523 ),
3524 new: current_mtu as u32,
3525 done: Some(true),
3526 },
3527 );
3528
3529 q.add_event_data_with_instant(
3530 pmtu_data, now,
3531 )
3532 .ok();
3533 }
3534 );
3535
3536 p.recovery
3537 .pmtud_update_max_datagram_size(current_mtu);
3538 }
3539 }
3540 },
3541
3542 frame::Frame::ACK { ranges, .. } => {
3543 // Stop acknowledging packets less than or equal to the
3544 // largest acknowledged in the sent ACK frame that, in
3545 // turn, got acked.
3546 if let Some(largest_acked) = ranges.last() {
3547 self.pkt_num_spaces[epoch]
3548 .recv_pkt_need_ack
3549 .remove_until(largest_acked);
3550 }
3551 },
3552
3553 frame::Frame::CryptoHeader { offset, length } => {
3554 self.crypto_ctx[epoch]
3555 .crypto_stream
3556 .send
3557 .ack_and_drop(offset, length);
3558 },
3559
3560 frame::Frame::StreamHeader {
3561 stream_id,
3562 offset,
3563 length,
3564 ..
3565 } => {
3566 // Update tx_buffered and emit qlog before checking if the
3567 // stream still exists. The client does need to ACK
3568 // frames that were received after the client sends a
3569 // ResetStream.
3570 self.tx_buffered =
3571 self.tx_buffered.saturating_sub(length);
3572
3573 qlog_with_type!(QLOG_DATA_MV, self.qlog, q, {
3574 let ev_data = EventData::QuicStreamDataMoved(
3575 qlog::events::quic::StreamDataMoved {
3576 stream_id: Some(stream_id),
3577 offset: Some(offset),
3578 raw: Some(RawInfo {
3579 length: Some(length as u64),
3580 ..Default::default()
3581 }),
3582 from: Some(DataRecipient::Transport),
3583 to: Some(DataRecipient::Dropped),
3584 ..Default::default()
3585 },
3586 );
3587
3588 q.add_event_data_with_instant(ev_data, now).ok();
3589 });
3590
3591 let stream = match self.streams.get_mut(stream_id) {
3592 Some(v) => v,
3593
3594 None => continue,
3595 };
3596
3597 stream.send.ack_and_drop(offset, length);
3598
3599 let priority_key = Arc::clone(&stream.priority_key);
3600
3601 // Only collect the stream if it is complete and not
3602 // readable or writable.
3603 //
3604 // If it is readable, it will get collected when
3605 // stream_recv() is next used.
3606 //
3607 // If it is writable, it might mean that the stream
3608 // has been stopped by the peer (i.e. a STOP_SENDING
3609 // frame is received), in which case before collecting
3610 // the stream we will need to propagate the
3611 // `StreamStopped` error to the application. It will
3612 // instead get collected when one of stream_capacity(),
3613 // stream_writable(), stream_send(), ... is next called.
3614 //
3615 // Note that we can't use `is_writable()` here because
3616 // it returns false if the stream is stopped. Instead,
3617 // since the stream is marked as writable when a
3618 // STOP_SENDING frame is received, we check the writable
3619 // queue directly instead.
3620 let is_writable = priority_key.writable.is_linked() &&
3621 // Ensure that the stream is actually stopped.
3622 stream.send.is_stopped();
3623
3624 let is_complete = stream.is_complete();
3625 let is_readable = stream.is_readable();
3626
3627 if is_complete && !is_readable && !is_writable {
3628 let local = stream.local;
3629 self.streams.collect(stream_id, local);
3630 }
3631 },
3632
3633 frame::Frame::HandshakeDone => {
3634 // Explicitly set this to true, so that if the frame was
3635 // already scheduled for retransmission, it is aborted.
3636 self.handshake_done_sent = true;
3637
3638 self.handshake_done_acked = true;
3639 },
3640
3641 frame::Frame::ResetStream { stream_id, .. } => {
3642 let stream = match self.streams.get_mut(stream_id) {
3643 Some(v) => v,
3644
3645 None => continue,
3646 };
3647
3648 let priority_key = Arc::clone(&stream.priority_key);
3649
3650 // Only collect the stream if it is complete and not
3651 // readable or writable.
3652 //
3653 // If it is readable, it will get collected when
3654 // stream_recv() is next used.
3655 //
3656 // If it is writable, it might mean that the stream
3657 // has been stopped by the peer (i.e. a STOP_SENDING
3658 // frame is received), in which case before collecting
3659 // the stream we will need to propagate the
3660 // `StreamStopped` error to the application. It will
3661 // instead get collected when one of stream_capacity(),
3662 // stream_writable(), stream_send(), ... is next called.
3663 //
3664 // Note that we can't use `is_writable()` here because
3665 // it returns false if the stream is stopped. Instead,
3666 // since the stream is marked as writable when a
3667 // STOP_SENDING frame is received, we check the writable
3668 // queue directly instead.
3669 let is_writable = priority_key.writable.is_linked() &&
3670 // Ensure that the stream is actually stopped.
3671 stream.send.is_stopped();
3672
3673 let is_complete = stream.is_complete();
3674 let is_readable = stream.is_readable();
3675
3676 if is_complete && !is_readable && !is_writable {
3677 let local = stream.local;
3678 self.streams.collect(stream_id, local);
3679 }
3680 },
3681
3682 _ => (),
3683 }
3684 }
3685 }
3686
3687 // Now that we processed all the frames, if there is a path that has no
3688 // Destination CID, try to allocate one.
3689 let no_dcid = self
3690 .paths
3691 .iter_mut()
3692 .filter(|(_, p)| p.active_dcid_seq.is_none());
3693
3694 for (pid, p) in no_dcid {
3695 if self.ids.zero_length_dcid() {
3696 p.active_dcid_seq = Some(0);
3697 continue;
3698 }
3699
3700 let dcid_seq = match self.ids.lowest_available_dcid_seq() {
3701 Some(seq) => seq,
3702 None => break,
3703 };
3704
3705 self.ids.link_dcid_to_path_id(dcid_seq, pid)?;
3706
3707 p.active_dcid_seq = Some(dcid_seq);
3708 }
3709
3710 // We only record the time of arrival of the largest packet number
3711 // that still needs to be acked, to be used for ACK delay calculation.
3712 if self.pkt_num_spaces[epoch].recv_pkt_need_ack.last() < Some(pn) {
3713 self.pkt_num_spaces[epoch].largest_rx_pkt_time = now;
3714 }
3715
3716 self.pkt_num_spaces[epoch].recv_pkt_num.insert(pn);
3717
3718 self.pkt_num_spaces[epoch].recv_pkt_need_ack.push_item(pn);
3719
3720 self.pkt_num_spaces[epoch].ack_elicited =
3721 cmp::max(self.pkt_num_spaces[epoch].ack_elicited, ack_elicited);
3722
3723 self.pkt_num_spaces[epoch].largest_rx_pkt_num =
3724 cmp::max(self.pkt_num_spaces[epoch].largest_rx_pkt_num, pn);
3725
3726 if !probing {
3727 self.pkt_num_spaces[epoch].largest_rx_non_probing_pkt_num = cmp::max(
3728 self.pkt_num_spaces[epoch].largest_rx_non_probing_pkt_num,
3729 pn,
3730 );
3731
3732 // Did the peer migrated to another path?
3733 let active_path_id = self.paths.get_active_path_id()?;
3734
3735 if self.is_server &&
3736 recv_pid != active_path_id &&
3737 self.pkt_num_spaces[epoch].largest_rx_non_probing_pkt_num == pn
3738 {
3739 self.on_peer_migrated(recv_pid, self.disable_dcid_reuse, now)?;
3740 }
3741 }
3742
3743 if let Some(idle_timeout) = self.idle_timeout() {
3744 self.idle_timer = Some(now + idle_timeout);
3745 }
3746
3747 // Update send capacity.
3748 self.update_tx_cap();
3749
3750 self.recv_count += 1;
3751 self.paths.get_mut(recv_pid)?.recv_count += 1;
3752
3753 let read = b.off() + aead_tag_len;
3754
3755 self.recv_bytes += read as u64;
3756 self.paths.get_mut(recv_pid)?.recv_bytes += read as u64;
3757
3758 // An Handshake packet has been received from the client and has been
3759 // successfully processed, so we can drop the initial state and consider
3760 // the client's address to be verified.
3761 if self.is_server && hdr.ty == Type::Handshake {
3762 self.drop_epoch_state(packet::Epoch::Initial, now);
3763
3764 self.paths.get_mut(recv_pid)?.verified_peer_address = true;
3765 }
3766
3767 self.ack_eliciting_sent = false;
3768
3769 Ok(read)
3770 }
3771
3772 /// Writes a single QUIC packet to be sent to the peer.
3773 ///
3774 /// On success the number of bytes written to the output buffer is
3775 /// returned, or [`Done`] if there was nothing to write.
3776 ///
3777 /// The application should call `send()` multiple times until [`Done`] is
3778 /// returned, indicating that there are no more packets to send. It is
3779 /// recommended that `send()` be called in the following cases:
3780 ///
3781 /// * When the application receives QUIC packets from the peer (that is,
3782 /// any time [`recv()`] is also called).
3783 ///
3784 /// * When the connection timer expires (that is, any time [`on_timeout()`]
3785 /// is also called).
3786 ///
3787 /// * When the application sends data to the peer (for example, any time
3788 /// [`stream_send()`] or [`stream_shutdown()`] are called).
3789 ///
3790 /// * When the application receives data from the peer (for example any
3791 /// time [`stream_recv()`] is called).
3792 ///
3793 /// Once [`is_draining()`] returns `true`, it is no longer necessary to call
3794 /// `send()` and all calls will return [`Done`].
3795 ///
3796 /// [`Done`]: enum.Error.html#variant.Done
3797 /// [`recv()`]: struct.Connection.html#method.recv
3798 /// [`on_timeout()`]: struct.Connection.html#method.on_timeout
3799 /// [`stream_send()`]: struct.Connection.html#method.stream_send
3800 /// [`stream_shutdown()`]: struct.Connection.html#method.stream_shutdown
3801 /// [`stream_recv()`]: struct.Connection.html#method.stream_recv
3802 /// [`is_draining()`]: struct.Connection.html#method.is_draining
3803 ///
3804 /// ## Examples:
3805 ///
3806 /// ```no_run
3807 /// # let mut out = [0; 512];
3808 /// # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
3809 /// # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
3810 /// # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
3811 /// # let peer = "127.0.0.1:1234".parse().unwrap();
3812 /// # let local = socket.local_addr().unwrap();
3813 /// # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
3814 /// loop {
3815 /// let (write, send_info) = match conn.send(&mut out) {
3816 /// Ok(v) => v,
3817 ///
3818 /// Err(quiche::Error::Done) => {
3819 /// // Done writing.
3820 /// break;
3821 /// },
3822 ///
3823 /// Err(e) => {
3824 /// // An error occurred, handle it.
3825 /// break;
3826 /// },
3827 /// };
3828 ///
3829 /// socket.send_to(&out[..write], &send_info.to).unwrap();
3830 /// }
3831 /// # Ok::<(), quiche::Error>(())
3832 /// ```
3833 pub fn send(&mut self, out: &mut [u8]) -> Result<(usize, SendInfo)> {
3834 self.send_on_path(out, None, None)
3835 }
3836
3837 /// Writes a single QUIC packet to be sent to the peer from the specified
3838 /// local address `from` to the destination address `to`.
3839 ///
3840 /// The behavior of this method differs depending on the value of the `from`
3841 /// and `to` parameters:
3842 ///
3843 /// * If both are `Some`, then the method only consider the 4-tuple
3844 /// (`from`, `to`). Application can monitor the 4-tuple availability,
3845 /// either by monitoring [`path_event_next()`] events or by relying on
3846 /// the [`paths_iter()`] method. If the provided 4-tuple does not exist
3847 /// on the connection (anymore), it returns an [`InvalidState`].
3848 ///
3849 /// * If `from` is `Some` and `to` is `None`, then the method only
3850 /// considers sending packets on paths having `from` as local address.
3851 ///
3852 /// * If `to` is `Some` and `from` is `None`, then the method only
3853 /// considers sending packets on paths having `to` as peer address.
3854 ///
3855 /// * If both are `None`, all available paths are considered.
3856 ///
3857 /// On success the number of bytes written to the output buffer is
3858 /// returned, or [`Done`] if there was nothing to write.
3859 ///
3860 /// The application should call `send_on_path()` multiple times until
3861 /// [`Done`] is returned, indicating that there are no more packets to
3862 /// send. It is recommended that `send_on_path()` be called in the
3863 /// following cases:
3864 ///
3865 /// * When the application receives QUIC packets from the peer (that is,
3866 /// any time [`recv()`] is also called).
3867 ///
3868 /// * When the connection timer expires (that is, any time [`on_timeout()`]
3869 /// is also called).
3870 ///
3871 /// * When the application sends data to the peer (for examples, any time
3872 /// [`stream_send()`] or [`stream_shutdown()`] are called).
3873 ///
3874 /// * When the application receives data from the peer (for example any
3875 /// time [`stream_recv()`] is called).
3876 ///
3877 /// Once [`is_draining()`] returns `true`, it is no longer necessary to call
3878 /// `send_on_path()` and all calls will return [`Done`].
3879 ///
3880 /// [`Done`]: enum.Error.html#variant.Done
3881 /// [`InvalidState`]: enum.Error.html#InvalidState
3882 /// [`recv()`]: struct.Connection.html#method.recv
3883 /// [`on_timeout()`]: struct.Connection.html#method.on_timeout
3884 /// [`stream_send()`]: struct.Connection.html#method.stream_send
3885 /// [`stream_shutdown()`]: struct.Connection.html#method.stream_shutdown
3886 /// [`stream_recv()`]: struct.Connection.html#method.stream_recv
3887 /// [`path_event_next()`]: struct.Connection.html#method.path_event_next
3888 /// [`paths_iter()`]: struct.Connection.html#method.paths_iter
3889 /// [`is_draining()`]: struct.Connection.html#method.is_draining
3890 ///
3891 /// ## Examples:
3892 ///
3893 /// ```no_run
3894 /// # let mut out = [0; 512];
3895 /// # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
3896 /// # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
3897 /// # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
3898 /// # let peer = "127.0.0.1:1234".parse().unwrap();
3899 /// # let local = socket.local_addr().unwrap();
3900 /// # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
3901 /// loop {
3902 /// let (write, send_info) = match conn.send_on_path(&mut out, Some(local), Some(peer)) {
3903 /// Ok(v) => v,
3904 ///
3905 /// Err(quiche::Error::Done) => {
3906 /// // Done writing.
3907 /// break;
3908 /// },
3909 ///
3910 /// Err(e) => {
3911 /// // An error occurred, handle it.
3912 /// break;
3913 /// },
3914 /// };
3915 ///
3916 /// socket.send_to(&out[..write], &send_info.to).unwrap();
3917 /// }
3918 /// # Ok::<(), quiche::Error>(())
3919 /// ```
3920 pub fn send_on_path(
3921 &mut self, out: &mut [u8], from: Option<SocketAddr>,
3922 to: Option<SocketAddr>,
3923 ) -> Result<(usize, SendInfo)> {
3924 if out.is_empty() {
3925 return Err(Error::BufferTooShort);
3926 }
3927
3928 if self.is_closed() || self.is_draining() {
3929 return Err(Error::Done);
3930 }
3931
3932 let now = Instant::now();
3933
3934 if self.local_error.is_none() {
3935 self.do_handshake(now)?;
3936 }
3937
3938 // Forwarding the error value here could confuse
3939 // applications, as they may not expect getting a `recv()`
3940 // error when calling `send()`.
3941 //
3942 // We simply fall-through to sending packets, which should
3943 // take care of terminating the connection as needed.
3944 let _ = self.process_undecrypted_0rtt_packets();
3945
3946 // There's no point in trying to send a packet if the Initial secrets
3947 // have not been derived yet, so return early.
3948 if !self.derived_initial_secrets {
3949 return Err(Error::Done);
3950 }
3951
3952 let mut has_initial = false;
3953
3954 let mut done = 0;
3955
3956 // Limit output packet size to respect the sender and receiver's
3957 // maximum UDP payload size limit.
3958 let mut left = cmp::min(out.len(), self.max_send_udp_payload_size());
3959
3960 let send_pid = match (from, to) {
3961 (Some(f), Some(t)) => self
3962 .paths
3963 .path_id_from_addrs(&(f, t))
3964 .ok_or(Error::InvalidState)?,
3965
3966 _ => self.get_send_path_id(from, to)?,
3967 };
3968
3969 let send_path = self.paths.get_mut(send_pid)?;
3970
3971 // Update max datagram size to allow path MTU discovery probe to be sent.
3972 if let Some(pmtud) = send_path.pmtud.as_mut() {
3973 if pmtud.should_probe() {
3974 let size = if self.handshake_confirmed || self.handshake_completed
3975 {
3976 pmtud.get_probe_size()
3977 } else {
3978 pmtud.get_current_mtu()
3979 };
3980
3981 send_path.recovery.pmtud_update_max_datagram_size(size);
3982
3983 left =
3984 cmp::min(out.len(), send_path.recovery.max_datagram_size());
3985 }
3986 }
3987
3988 // Limit data sent by the server based on the amount of data received
3989 // from the client before its address is validated.
3990 if !send_path.verified_peer_address && self.is_server {
3991 left = cmp::min(left, send_path.max_send_bytes);
3992 }
3993
3994 // Generate coalesced packets.
3995 while left > 0 {
3996 let (ty, written) = match self.send_single(
3997 &mut out[done..done + left],
3998 send_pid,
3999 has_initial,
4000 now,
4001 ) {
4002 Ok(v) => v,
4003
4004 Err(Error::BufferTooShort) | Err(Error::Done) => break,
4005
4006 Err(e) => return Err(e),
4007 };
4008
4009 done += written;
4010 left -= written;
4011
4012 match ty {
4013 Type::Initial => has_initial = true,
4014
4015 // No more packets can be coalesced after a 1-RTT.
4016 Type::Short => break,
4017
4018 _ => (),
4019 };
4020
4021 // When sending multiple PTO probes, don't coalesce them together,
4022 // so they are sent on separate UDP datagrams.
4023 if let Ok(epoch) = ty.to_epoch() {
4024 if self.paths.get_mut(send_pid)?.recovery.loss_probes(epoch) > 0 {
4025 break;
4026 }
4027 }
4028
4029 // Don't coalesce packets that must go on different paths.
4030 if !(from.is_some() && to.is_some()) &&
4031 self.get_send_path_id(from, to)? != send_pid
4032 {
4033 break;
4034 }
4035 }
4036
4037 if done == 0 {
4038 self.last_tx_data = self.tx_data;
4039
4040 return Err(Error::Done);
4041 }
4042
4043 if has_initial && left > 0 && done < MIN_CLIENT_INITIAL_LEN {
4044 let pad_len = cmp::min(left, MIN_CLIENT_INITIAL_LEN - done);
4045
4046 // Fill padding area with null bytes, to avoid leaking information
4047 // in case the application reuses the packet buffer.
4048 out[done..done + pad_len].fill(0);
4049
4050 done += pad_len;
4051 }
4052
4053 let send_path = self.paths.get(send_pid)?;
4054
4055 let info = SendInfo {
4056 from: send_path.local_addr(),
4057 to: send_path.peer_addr(),
4058
4059 at: send_path.recovery.get_packet_send_time(now),
4060 };
4061
4062 Ok((done, info))
4063 }
4064
4065 fn send_single(
4066 &mut self, out: &mut [u8], send_pid: usize, has_initial: bool,
4067 now: Instant,
4068 ) -> Result<(Type, usize)> {
4069 if out.is_empty() {
4070 return Err(Error::BufferTooShort);
4071 }
4072
4073 if self.is_draining() {
4074 return Err(Error::Done);
4075 }
4076
4077 let is_closing = self.local_error.is_some();
4078
4079 let out_len = out.len();
4080
4081 let mut b = octets::OctetsMut::with_slice(out);
4082
4083 let pkt_type = self.write_pkt_type(send_pid)?;
4084
4085 let max_dgram_len = if !self.dgram_send_queue.is_empty() {
4086 self.dgram_max_writable_len()
4087 } else {
4088 None
4089 };
4090
4091 let epoch = pkt_type.to_epoch()?;
4092 let pkt_space = &mut self.pkt_num_spaces[epoch];
4093 let crypto_ctx = &mut self.crypto_ctx[epoch];
4094
4095 // Process lost frames. There might be several paths having lost frames.
4096 for (_, p) in self.paths.iter_mut() {
4097 while let Some(lost) = p.recovery.next_lost_frame(epoch) {
4098 match lost {
4099 frame::Frame::CryptoHeader { offset, length } => {
4100 crypto_ctx.crypto_stream.send.retransmit(offset, length);
4101
4102 self.stream_retrans_bytes += length as u64;
4103 p.stream_retrans_bytes += length as u64;
4104
4105 self.retrans_count += 1;
4106 p.retrans_count += 1;
4107 },
4108
4109 frame::Frame::StreamHeader {
4110 stream_id,
4111 offset,
4112 length,
4113 fin,
4114 } => {
4115 let stream = match self.streams.get_mut(stream_id) {
4116 // Only retransmit data if the stream is not closed
4117 // or stopped.
4118 Some(v) if !v.send.is_stopped() => v,
4119
4120 // Data on a closed stream will not be retransmitted
4121 // or acked after it is declared lost, so update
4122 // tx_buffered and qlog.
4123 _ => {
4124 self.tx_buffered =
4125 self.tx_buffered.saturating_sub(length);
4126
4127 qlog_with_type!(QLOG_DATA_MV, self.qlog, q, {
4128 let ev_data = EventData::QuicStreamDataMoved(
4129 qlog::events::quic::StreamDataMoved {
4130 stream_id: Some(stream_id),
4131 offset: Some(offset),
4132 raw: Some(RawInfo {
4133 length: Some(length as u64),
4134 ..Default::default()
4135 }),
4136 from: Some(DataRecipient::Transport),
4137 to: Some(DataRecipient::Dropped),
4138 ..Default::default()
4139 },
4140 );
4141
4142 q.add_event_data_with_instant(ev_data, now)
4143 .ok();
4144 });
4145
4146 continue;
4147 },
4148 };
4149
4150 let was_flushable = stream.is_flushable();
4151
4152 let empty_fin = length == 0 && fin;
4153
4154 stream.send.retransmit(offset, length);
4155
4156 // If the stream is now flushable push it to the
4157 // flushable queue, but only if it wasn't already
4158 // queued.
4159 //
4160 // Consider the stream flushable also when we are
4161 // sending a zero-length frame that has the fin flag
4162 // set.
4163 if (stream.is_flushable() || empty_fin) && !was_flushable
4164 {
4165 let priority_key = Arc::clone(&stream.priority_key);
4166 self.streams.insert_flushable(&priority_key);
4167 }
4168
4169 self.stream_retrans_bytes += length as u64;
4170 p.stream_retrans_bytes += length as u64;
4171
4172 self.retrans_count += 1;
4173 p.retrans_count += 1;
4174 },
4175
4176 frame::Frame::ACK { .. } => {
4177 pkt_space.ack_elicited = true;
4178 },
4179
4180 frame::Frame::ResetStream {
4181 stream_id,
4182 error_code,
4183 final_size,
4184 } => {
4185 self.streams
4186 .insert_reset(stream_id, error_code, final_size);
4187 },
4188
4189 frame::Frame::StopSending {
4190 stream_id,
4191 error_code,
4192 } =>
4193 // We only need to retransmit the STOP_SENDING frame if
4194 // the stream is still active and not FIN'd. Even if the
4195 // packet was lost, if the application has the final
4196 // size at this point there is no need to retransmit.
4197 if let Some(stream) = self.streams.get(stream_id) {
4198 if !stream.recv.is_fin() {
4199 self.streams
4200 .insert_stopped(stream_id, error_code);
4201 }
4202 },
4203
4204 // Retransmit HANDSHAKE_DONE only if it hasn't been acked at
4205 // least once already.
4206 frame::Frame::HandshakeDone if !self.handshake_done_acked => {
4207 self.handshake_done_sent = false;
4208 },
4209
4210 frame::Frame::MaxStreamData { stream_id, .. } => {
4211 if self.streams.get(stream_id).is_some() {
4212 self.streams.insert_almost_full(stream_id);
4213 }
4214 },
4215
4216 frame::Frame::MaxData { .. } => {
4217 self.should_send_max_data = true;
4218 },
4219
4220 frame::Frame::MaxStreamsUni { .. } => {
4221 self.should_send_max_streams_uni = true;
4222 },
4223
4224 frame::Frame::MaxStreamsBidi { .. } => {
4225 self.should_send_max_streams_bidi = true;
4226 },
4227
4228 // Retransmit STREAMS_BLOCKED frames if the frame with the
4229 // most recent limit is lost. These are informational
4230 // signals to the peer, reliably sending them
4231 // ensures the signal is used consistently and helps
4232 // debugging.
4233 frame::Frame::StreamsBlockedBidi { limit } => {
4234 self.streams_blocked_bidi_state
4235 .force_retransmit_sent_limit_eq(limit);
4236 },
4237
4238 frame::Frame::StreamsBlockedUni { limit } => {
4239 self.streams_blocked_uni_state
4240 .force_retransmit_sent_limit_eq(limit);
4241 },
4242
4243 frame::Frame::NewConnectionId { seq_num, .. } => {
4244 self.ids.mark_advertise_new_scid_seq(seq_num, true);
4245 },
4246
4247 frame::Frame::RetireConnectionId { seq_num } => {
4248 self.ids.mark_retire_dcid_seq(seq_num, true)?;
4249 },
4250
4251 frame::Frame::Ping {
4252 mtu_probe: Some(failed_probe),
4253 } =>
4254 if let Some(pmtud) = p.pmtud.as_mut() {
4255 trace!("pmtud probe dropped: {failed_probe}");
4256 pmtud.failed_probe(failed_probe);
4257 },
4258
4259 _ => (),
4260 }
4261 }
4262 }
4263 self.check_tx_buffered_invariant();
4264
4265 let is_app_limited = self.delivery_rate_check_if_app_limited();
4266 let n_paths = self.paths.len();
4267 let path = self.paths.get_mut(send_pid)?;
4268 let flow_control = &mut self.flow_control;
4269 let pkt_space = &mut self.pkt_num_spaces[epoch];
4270 let crypto_ctx = &mut self.crypto_ctx[epoch];
4271 let pkt_num_manager = &mut self.pkt_num_manager;
4272
4273 let mut left = if let Some(pmtud) = path.pmtud.as_mut() {
4274 // Limit output buffer size by estimated path MTU.
4275 cmp::min(pmtud.get_current_mtu(), b.cap())
4276 } else {
4277 b.cap()
4278 };
4279
4280 if pkt_num_manager.should_skip_pn(self.handshake_completed) {
4281 pkt_num_manager.set_skip_pn(Some(self.next_pkt_num));
4282 self.next_pkt_num += 1;
4283 };
4284 let pn = self.next_pkt_num;
4285
4286 let largest_acked_pkt =
4287 path.recovery.get_largest_acked_on_epoch(epoch).unwrap_or(0);
4288 let pn_len = packet::pkt_num_len(pn, largest_acked_pkt);
4289
4290 // The AEAD overhead at the current encryption level.
4291 let crypto_overhead = crypto_ctx.crypto_overhead().ok_or(Error::Done)?;
4292
4293 let dcid_seq = path.active_dcid_seq.ok_or(Error::OutOfIdentifiers)?;
4294
4295 let dcid =
4296 ConnectionId::from_ref(self.ids.get_dcid(dcid_seq)?.cid.as_ref());
4297
4298 let scid = if let Some(scid_seq) = path.active_scid_seq {
4299 ConnectionId::from_ref(self.ids.get_scid(scid_seq)?.cid.as_ref())
4300 } else if pkt_type == Type::Short {
4301 ConnectionId::default()
4302 } else {
4303 return Err(Error::InvalidState);
4304 };
4305
4306 let hdr = Header {
4307 ty: pkt_type,
4308
4309 version: self.version,
4310
4311 dcid,
4312 scid,
4313
4314 pkt_num: 0,
4315 pkt_num_len: pn_len,
4316
4317 // Only clone token for Initial packets, as other packets don't have
4318 // this field (Retry doesn't count, as it's not encoded as part of
4319 // this code path).
4320 token: if pkt_type == Type::Initial {
4321 self.token.clone()
4322 } else {
4323 None
4324 },
4325
4326 versions: None,
4327 key_phase: self.key_phase,
4328 };
4329
4330 hdr.to_bytes(&mut b)?;
4331
4332 let hdr_trace = if log::max_level() == log::LevelFilter::Trace {
4333 Some(format!("{hdr:?}"))
4334 } else {
4335 None
4336 };
4337
4338 let hdr_ty = hdr.ty;
4339
4340 #[cfg(feature = "qlog")]
4341 let qlog_pkt_hdr = self.qlog.streamer.as_ref().map(|_q| {
4342 qlog::events::quic::PacketHeader::with_type(
4343 hdr.ty.to_qlog(),
4344 Some(pn),
4345 Some(hdr.version),
4346 Some(&hdr.scid),
4347 Some(&hdr.dcid),
4348 )
4349 });
4350
4351 // Calculate the space required for the packet, including the header
4352 // the payload length, the packet number and the AEAD overhead.
4353 let mut overhead = b.off() + pn_len + crypto_overhead;
4354
4355 // We assume that the payload length, which is only present in long
4356 // header packets, can always be encoded with a 2-byte varint.
4357 if pkt_type != Type::Short {
4358 overhead += PAYLOAD_LENGTH_LEN;
4359 }
4360
4361 // Make sure we have enough space left for the packet overhead.
4362 match left.checked_sub(overhead) {
4363 Some(v) => left = v,
4364
4365 None => {
4366 // We can't send more because there isn't enough space available
4367 // in the output buffer.
4368 //
4369 // This usually happens when we try to send a new packet but
4370 // failed because cwnd is almost full. In such case app_limited
4371 // is set to false here to make cwnd grow when ACK is received.
4372 path.recovery.update_app_limited(false);
4373 return Err(Error::Done);
4374 },
4375 }
4376
4377 // Make sure there is enough space for the minimum payload length.
4378 if left < PAYLOAD_MIN_LEN {
4379 path.recovery.update_app_limited(false);
4380 return Err(Error::Done);
4381 }
4382
4383 let mut frames: SmallVec<[frame::Frame; 1]> = SmallVec::new();
4384
4385 let mut ack_eliciting = false;
4386 let mut in_flight = false;
4387 let mut is_pmtud_probe = false;
4388 let mut has_data = false;
4389
4390 // Whether or not we should explicitly elicit an ACK via PING frame if we
4391 // implicitly elicit one otherwise.
4392 let ack_elicit_required = path.recovery.should_elicit_ack(epoch);
4393
4394 let header_offset = b.off();
4395
4396 // Reserve space for payload length in advance. Since we don't yet know
4397 // what the final length will be, we reserve 2 bytes in all cases.
4398 //
4399 // Only long header packets have an explicit length field.
4400 if pkt_type != Type::Short {
4401 b.skip(PAYLOAD_LENGTH_LEN)?;
4402 }
4403
4404 packet::encode_pkt_num(pn, pn_len, &mut b)?;
4405
4406 let payload_offset = b.off();
4407
4408 let cwnd_available =
4409 path.recovery.cwnd_available().saturating_sub(overhead);
4410
4411 let left_before_packing_ack_frame = left;
4412
4413 // Create ACK frame.
4414 //
4415 // When we need to explicitly elicit an ACK via PING later, go ahead and
4416 // generate an ACK (if there's anything to ACK) since we're going to
4417 // send a packet with PING anyways, even if we haven't received anything
4418 // ACK eliciting.
4419 if pkt_space.recv_pkt_need_ack.len() > 0 &&
4420 (pkt_space.ack_elicited || ack_elicit_required) &&
4421 (!is_closing ||
4422 (pkt_type == Type::Handshake &&
4423 self.local_error
4424 .as_ref()
4425 .is_some_and(|le| le.is_app))) &&
4426 path.active()
4427 {
4428 #[cfg(not(feature = "fuzzing"))]
4429 let ack_delay = pkt_space.largest_rx_pkt_time.elapsed();
4430
4431 #[cfg(not(feature = "fuzzing"))]
4432 let ack_delay = ack_delay.as_micros() as u64 /
4433 2_u64
4434 .pow(self.local_transport_params.ack_delay_exponent as u32);
4435
4436 // pseudo-random reproducible ack delays when fuzzing
4437 #[cfg(feature = "fuzzing")]
4438 let ack_delay = rand::rand_u8() as u64 + 1;
4439
4440 let frame = frame::Frame::ACK {
4441 ack_delay,
4442 ranges: pkt_space.recv_pkt_need_ack.clone(),
4443 ecn_counts: None, // sending ECN is not supported at this time
4444 };
4445
4446 // When a PING frame needs to be sent, avoid sending the ACK if
4447 // there is not enough cwnd available for both (note that PING
4448 // frames are always 1 byte, so we just need to check that the
4449 // ACK's length is lower than cwnd).
4450 if pkt_space.ack_elicited || frame.wire_len() < cwnd_available {
4451 // ACK-only packets are not congestion controlled so ACKs must
4452 // be bundled considering the buffer capacity only, and not the
4453 // available cwnd.
4454 if push_frame_to_pkt!(b, frames, frame, left) {
4455 pkt_space.ack_elicited = false;
4456 }
4457 }
4458 }
4459
4460 // Limit output packet size by congestion window size.
4461 left = cmp::min(
4462 left,
4463 // Bytes consumed by ACK frames.
4464 cwnd_available.saturating_sub(left_before_packing_ack_frame - left),
4465 );
4466
4467 let mut challenge_data = None;
4468
4469 if pkt_type == Type::Short {
4470 // Create PMTUD probe.
4471 //
4472 // In order to send a PMTUD probe the current `left` value, which was
4473 // already limited by the current PMTU measure, needs to be ignored,
4474 // but the outgoing packet still needs to be limited by
4475 // the output buffer size, as well as the congestion
4476 // window.
4477 //
4478 // In addition, the PMTUD probe is only generated when the handshake
4479 // is confirmed, to avoid interfering with the handshake
4480 // (e.g. due to the anti-amplification limits).
4481 if let Ok(active_path) = self.paths.get_active_mut() {
4482 let should_probe_pmtu = active_path.should_send_pmtu_probe(
4483 self.handshake_confirmed,
4484 self.handshake_completed,
4485 out_len,
4486 is_closing,
4487 frames.is_empty(),
4488 );
4489
4490 if should_probe_pmtu {
4491 if let Some(pmtud) = active_path.pmtud.as_mut() {
4492 let probe_size = pmtud.get_probe_size();
4493 trace!(
4494 "{} sending pmtud probe pmtu_probe={} estimated_pmtu={}",
4495 self.trace_id,
4496 probe_size,
4497 pmtud.get_current_mtu(),
4498 );
4499
4500 left = probe_size;
4501
4502 match left.checked_sub(overhead) {
4503 Some(v) => left = v,
4504
4505 None => {
4506 // We can't send more because there isn't enough
4507 // space available in the output buffer.
4508 //
4509 // This usually happens when we try to send a new
4510 // packet but failed because cwnd is almost full.
4511 //
4512 // In such case app_limited is set to false here
4513 // to make cwnd grow when ACK is received.
4514 active_path.recovery.update_app_limited(false);
4515 return Err(Error::Done);
4516 },
4517 }
4518
4519 let frame = frame::Frame::Padding {
4520 len: probe_size - overhead - 1,
4521 };
4522
4523 if push_frame_to_pkt!(b, frames, frame, left) {
4524 let frame = frame::Frame::Ping {
4525 mtu_probe: Some(probe_size),
4526 };
4527
4528 if push_frame_to_pkt!(b, frames, frame, left) {
4529 ack_eliciting = true;
4530 in_flight = true;
4531 }
4532 }
4533
4534 // Reset probe flag after sending to prevent duplicate
4535 // probes in a single flight.
4536 pmtud.set_in_flight(true);
4537 is_pmtud_probe = true;
4538 }
4539 }
4540 }
4541
4542 let path = self.paths.get_mut(send_pid)?;
4543 // Create PATH_RESPONSE frame if needed.
4544 // We do not try to ensure that these are really sent.
4545 while let Some(challenge) = path.pop_received_challenge() {
4546 let frame = frame::Frame::PathResponse { data: challenge };
4547
4548 if push_frame_to_pkt!(b, frames, frame, left) {
4549 ack_eliciting = true;
4550 in_flight = true;
4551 } else {
4552 // If there are other pending PATH_RESPONSE, don't lose them
4553 // now.
4554 break;
4555 }
4556 }
4557
4558 // Create PATH_CHALLENGE frame if needed.
4559 if path.validation_requested() {
4560 // TODO: ensure that data is unique over paths.
4561 let data = rand::rand_u64().to_be_bytes();
4562
4563 let frame = frame::Frame::PathChallenge { data };
4564
4565 if push_frame_to_pkt!(b, frames, frame, left) {
4566 // Let's notify the path once we know the packet size.
4567 challenge_data = Some(data);
4568
4569 ack_eliciting = true;
4570 in_flight = true;
4571 }
4572 }
4573
4574 if let Some(key_update) = crypto_ctx.key_update.as_mut() {
4575 key_update.update_acked = true;
4576 }
4577 }
4578
4579 let path = self.paths.get_mut(send_pid)?;
4580
4581 if pkt_type == Type::Short && !is_closing {
4582 // Create NEW_CONNECTION_ID frames as needed.
4583 while let Some(seq_num) = self.ids.next_advertise_new_scid_seq() {
4584 let frame = self.ids.get_new_connection_id_frame_for(seq_num)?;
4585
4586 if push_frame_to_pkt!(b, frames, frame, left) {
4587 self.ids.mark_advertise_new_scid_seq(seq_num, false);
4588
4589 ack_eliciting = true;
4590 in_flight = true;
4591 } else {
4592 break;
4593 }
4594 }
4595 }
4596
4597 if pkt_type == Type::Short && !is_closing && path.active() {
4598 // Create HANDSHAKE_DONE frame.
4599 // self.should_send_handshake_done() but without the need to borrow
4600 if self.handshake_completed &&
4601 !self.handshake_done_sent &&
4602 self.is_server
4603 {
4604 let frame = frame::Frame::HandshakeDone;
4605
4606 if push_frame_to_pkt!(b, frames, frame, left) {
4607 self.handshake_done_sent = true;
4608
4609 ack_eliciting = true;
4610 in_flight = true;
4611 }
4612 }
4613
4614 // Create MAX_STREAMS_BIDI frame.
4615 if self.streams.should_update_max_streams_bidi() ||
4616 self.should_send_max_streams_bidi
4617 {
4618 let frame = frame::Frame::MaxStreamsBidi {
4619 max: self.streams.max_streams_bidi_next(),
4620 };
4621
4622 if push_frame_to_pkt!(b, frames, frame, left) {
4623 self.streams.update_max_streams_bidi();
4624 self.should_send_max_streams_bidi = false;
4625
4626 ack_eliciting = true;
4627 in_flight = true;
4628 }
4629 }
4630
4631 // Create MAX_STREAMS_UNI frame.
4632 if self.streams.should_update_max_streams_uni() ||
4633 self.should_send_max_streams_uni
4634 {
4635 let frame = frame::Frame::MaxStreamsUni {
4636 max: self.streams.max_streams_uni_next(),
4637 };
4638
4639 if push_frame_to_pkt!(b, frames, frame, left) {
4640 self.streams.update_max_streams_uni();
4641 self.should_send_max_streams_uni = false;
4642
4643 ack_eliciting = true;
4644 in_flight = true;
4645 }
4646 }
4647
4648 // Create DATA_BLOCKED frame.
4649 if let Some(limit) = self.blocked_limit {
4650 let frame = frame::Frame::DataBlocked { limit };
4651
4652 if push_frame_to_pkt!(b, frames, frame, left) {
4653 self.blocked_limit = None;
4654 self.data_blocked_sent_count =
4655 self.data_blocked_sent_count.saturating_add(1);
4656
4657 ack_eliciting = true;
4658 in_flight = true;
4659 }
4660 }
4661
4662 // Create STREAMS_BLOCKED (bidi) frame when the local endpoint has
4663 // exhausted the peer's bidirectional stream count limit.
4664 if self
4665 .streams_blocked_bidi_state
4666 .has_pending_stream_blocked_frame()
4667 {
4668 if let Some(limit) = self.streams_blocked_bidi_state.blocked_at {
4669 let frame = frame::Frame::StreamsBlockedBidi { limit };
4670
4671 if push_frame_to_pkt!(b, frames, frame, left) {
4672 // Record the limit we just notified the peer about so
4673 // that redundant frames for the same limit are
4674 // suppressed.
4675 self.streams_blocked_bidi_state.blocked_sent =
4676 Some(limit);
4677
4678 ack_eliciting = true;
4679 in_flight = true;
4680 }
4681 }
4682 }
4683
4684 // Create STREAMS_BLOCKED (uni) frame when the local endpoint has
4685 // exhausted the peer's unidirectional stream count limit.
4686 if self
4687 .streams_blocked_uni_state
4688 .has_pending_stream_blocked_frame()
4689 {
4690 if let Some(limit) = self.streams_blocked_uni_state.blocked_at {
4691 let frame = frame::Frame::StreamsBlockedUni { limit };
4692
4693 if push_frame_to_pkt!(b, frames, frame, left) {
4694 // Record the limit we just notified the peer about so
4695 // that redundant frames for the same limit are
4696 // suppressed.
4697 self.streams_blocked_uni_state.blocked_sent = Some(limit);
4698
4699 ack_eliciting = true;
4700 in_flight = true;
4701 }
4702 }
4703 }
4704
4705 // Create MAX_STREAM_DATA frames as needed.
4706 for stream_id in self.streams.almost_full() {
4707 let stream = match self.streams.get_mut(stream_id) {
4708 Some(v) => v,
4709
4710 None => {
4711 // The stream doesn't exist anymore, so remove it from
4712 // the almost full set.
4713 self.streams.remove_almost_full(stream_id);
4714 continue;
4715 },
4716 };
4717
4718 // Autotune the stream window size, but only if this is not a
4719 // retransmission (on a retransmit the stream will be in
4720 // `self.streams.almost_full()` but it's `almost_full()`
4721 // method returns false.
4722 if stream.recv.almost_full() {
4723 stream.recv.autotune_window(now, path.recovery.rtt());
4724 }
4725
4726 let frame = frame::Frame::MaxStreamData {
4727 stream_id,
4728 max: stream.recv.max_data_next(),
4729 };
4730
4731 if push_frame_to_pkt!(b, frames, frame, left) {
4732 let recv_win = stream.recv.window();
4733
4734 stream.recv.update_max_data(now);
4735
4736 self.streams.remove_almost_full(stream_id);
4737
4738 ack_eliciting = true;
4739 in_flight = true;
4740
4741 // Make sure the connection window always has some
4742 // room compared to the stream window.
4743 flow_control.ensure_window_lower_bound(
4744 (recv_win as f64 * CONNECTION_WINDOW_FACTOR) as u64,
4745 );
4746 }
4747 }
4748
4749 // Create MAX_DATA frame as needed.
4750 if flow_control.should_update_max_data() &&
4751 flow_control.max_data() < flow_control.max_data_next()
4752 {
4753 // Autotune the connection window size. We only tune the window
4754 // if we are sending an "organic" update, not on retransmits.
4755 flow_control.autotune_window(now, path.recovery.rtt());
4756 self.should_send_max_data = true;
4757 }
4758
4759 if self.should_send_max_data {
4760 let frame = frame::Frame::MaxData {
4761 max: flow_control.max_data_next(),
4762 };
4763
4764 if push_frame_to_pkt!(b, frames, frame, left) {
4765 self.should_send_max_data = false;
4766
4767 // Commits the new max_rx_data limit.
4768 flow_control.update_max_data(now);
4769
4770 ack_eliciting = true;
4771 in_flight = true;
4772 }
4773 }
4774
4775 // Create STOP_SENDING frames as needed.
4776 for (stream_id, error_code) in self
4777 .streams
4778 .stopped()
4779 .map(|(&k, &v)| (k, v))
4780 .collect::<Vec<(u64, u64)>>()
4781 {
4782 let frame = frame::Frame::StopSending {
4783 stream_id,
4784 error_code,
4785 };
4786
4787 if push_frame_to_pkt!(b, frames, frame, left) {
4788 self.streams.remove_stopped(stream_id);
4789
4790 ack_eliciting = true;
4791 in_flight = true;
4792 }
4793 }
4794
4795 // Create RESET_STREAM frames as needed.
4796 for (stream_id, (error_code, final_size)) in self
4797 .streams
4798 .reset()
4799 .map(|(&k, &v)| (k, v))
4800 .collect::<Vec<(u64, (u64, u64))>>()
4801 {
4802 let frame = frame::Frame::ResetStream {
4803 stream_id,
4804 error_code,
4805 final_size,
4806 };
4807
4808 if push_frame_to_pkt!(b, frames, frame, left) {
4809 self.streams.remove_reset(stream_id);
4810
4811 ack_eliciting = true;
4812 in_flight = true;
4813 }
4814 }
4815
4816 // Create STREAM_DATA_BLOCKED frames as needed.
4817 for (stream_id, limit) in self
4818 .streams
4819 .blocked()
4820 .map(|(&k, &v)| (k, v))
4821 .collect::<Vec<(u64, u64)>>()
4822 {
4823 let frame = frame::Frame::StreamDataBlocked { stream_id, limit };
4824
4825 if push_frame_to_pkt!(b, frames, frame, left) {
4826 self.streams.remove_blocked(stream_id);
4827 self.stream_data_blocked_sent_count =
4828 self.stream_data_blocked_sent_count.saturating_add(1);
4829
4830 ack_eliciting = true;
4831 in_flight = true;
4832 }
4833 }
4834
4835 // Create RETIRE_CONNECTION_ID frames as needed.
4836 let retire_dcid_seqs = self.ids.retire_dcid_seqs();
4837
4838 for seq_num in retire_dcid_seqs {
4839 // The sequence number specified in a RETIRE_CONNECTION_ID frame
4840 // MUST NOT refer to the Destination Connection ID field of the
4841 // packet in which the frame is contained.
4842 let dcid_seq = path.active_dcid_seq.ok_or(Error::InvalidState)?;
4843
4844 if seq_num == dcid_seq {
4845 continue;
4846 }
4847
4848 let frame = frame::Frame::RetireConnectionId { seq_num };
4849
4850 if push_frame_to_pkt!(b, frames, frame, left) {
4851 self.ids.mark_retire_dcid_seq(seq_num, false)?;
4852
4853 ack_eliciting = true;
4854 in_flight = true;
4855 } else {
4856 break;
4857 }
4858 }
4859 }
4860
4861 // Create CONNECTION_CLOSE frame. Try to send this only on the active
4862 // path, unless it is the last one available.
4863 if path.active() || n_paths == 1 {
4864 if let Some(conn_err) = self.local_error.as_ref() {
4865 if conn_err.is_app {
4866 // Create ApplicationClose frame.
4867 if pkt_type == Type::Short {
4868 let frame = frame::Frame::ApplicationClose {
4869 error_code: conn_err.error_code,
4870 reason: conn_err.reason.clone(),
4871 };
4872
4873 if push_frame_to_pkt!(b, frames, frame, left) {
4874 let pto = path.recovery.pto();
4875 self.draining_timer = Some(now + (pto * 3));
4876
4877 ack_eliciting = true;
4878 in_flight = true;
4879 }
4880 }
4881 } else {
4882 // Create ConnectionClose frame.
4883 let frame = frame::Frame::ConnectionClose {
4884 error_code: conn_err.error_code,
4885 frame_type: 0,
4886 reason: conn_err.reason.clone(),
4887 };
4888
4889 if push_frame_to_pkt!(b, frames, frame, left) {
4890 let pto = path.recovery.pto();
4891 self.draining_timer = Some(now + (pto * 3));
4892
4893 ack_eliciting = true;
4894 in_flight = true;
4895 }
4896 }
4897 }
4898 }
4899
4900 // Create CRYPTO frame.
4901 if crypto_ctx.crypto_stream.is_flushable() &&
4902 left > frame::MAX_CRYPTO_OVERHEAD &&
4903 !is_closing &&
4904 path.active()
4905 {
4906 let crypto_off = crypto_ctx.crypto_stream.send.off_front();
4907
4908 // Encode the frame.
4909 //
4910 // Instead of creating a `frame::Frame` object, encode the frame
4911 // directly into the packet buffer.
4912 //
4913 // First we reserve some space in the output buffer for writing the
4914 // frame header (we assume the length field is always a 2-byte
4915 // varint as we don't know the value yet).
4916 //
4917 // Then we emit the data from the crypto stream's send buffer.
4918 //
4919 // Finally we go back and encode the frame header with the now
4920 // available information.
4921 let hdr_off = b.off();
4922 let hdr_len = 1 + // frame type
4923 octets::varint_len(crypto_off) + // offset
4924 2; // length, always encode as 2-byte varint
4925
4926 if let Some(max_len) = left.checked_sub(hdr_len) {
4927 let (mut crypto_hdr, mut crypto_payload) =
4928 b.split_at(hdr_off + hdr_len)?;
4929
4930 // Write stream data into the packet buffer.
4931 let (len, _) = crypto_ctx
4932 .crypto_stream
4933 .send
4934 .emit(&mut crypto_payload.as_mut()[..max_len])?;
4935
4936 // Encode the frame's header.
4937 //
4938 // Due to how `OctetsMut::split_at()` works, `crypto_hdr` starts
4939 // from the initial offset of `b` (rather than the current
4940 // offset), so it needs to be advanced to the
4941 // initial frame offset.
4942 crypto_hdr.skip(hdr_off)?;
4943
4944 frame::encode_crypto_header(
4945 crypto_off,
4946 len as u64,
4947 &mut crypto_hdr,
4948 )?;
4949
4950 // Advance the packet buffer's offset.
4951 b.skip(hdr_len + len)?;
4952
4953 let frame = frame::Frame::CryptoHeader {
4954 offset: crypto_off,
4955 length: len,
4956 };
4957
4958 if push_frame_to_pkt!(b, frames, frame, left) {
4959 ack_eliciting = true;
4960 in_flight = true;
4961 has_data = true;
4962 }
4963 }
4964 }
4965
4966 // The preference of data-bearing frame to include in a packet
4967 // is managed by `self.emit_dgram`. However, whether any frames
4968 // can be sent depends on the state of their buffers. In the case
4969 // where one type is preferred but its buffer is empty, fall back
4970 // to the other type in order not to waste this function call.
4971 let mut dgram_emitted = false;
4972 let dgrams_to_emit = max_dgram_len.is_some();
4973 let stream_to_emit = self.streams.has_flushable();
4974
4975 let mut do_dgram = self.emit_dgram && dgrams_to_emit;
4976 let do_stream = !self.emit_dgram && stream_to_emit;
4977
4978 if !do_stream && dgrams_to_emit {
4979 do_dgram = true;
4980 }
4981
4982 // Create DATAGRAM frame.
4983 if (pkt_type == Type::Short || pkt_type == Type::ZeroRTT) &&
4984 left > frame::MAX_DGRAM_OVERHEAD &&
4985 !is_closing &&
4986 path.active() &&
4987 do_dgram
4988 {
4989 if let Some(max_dgram_payload) = max_dgram_len {
4990 while let Some(len) = self.dgram_send_queue.peek_front_len() {
4991 let hdr_off = b.off();
4992 let hdr_len = 1 + // frame type
4993 2; // length, always encode as 2-byte varint
4994
4995 if (hdr_len + len) <= left {
4996 // Front of the queue fits this packet, send it.
4997 match self.dgram_send_queue.pop() {
4998 Some(data) => {
4999 // Encode the frame.
5000 //
5001 // Instead of creating a `frame::Frame` object,
5002 // encode the frame directly into the packet
5003 // buffer.
5004 //
5005 // First we reserve some space in the output
5006 // buffer for writing the frame header (we
5007 // assume the length field is always a 2-byte
5008 // varint as we don't know the value yet).
5009 //
5010 // Then we emit the data from the DATAGRAM's
5011 // buffer.
5012 //
5013 // Finally we go back and encode the frame
5014 // header with the now available information.
5015 let (mut dgram_hdr, mut dgram_payload) =
5016 b.split_at(hdr_off + hdr_len)?;
5017
5018 dgram_payload.as_mut()[..len]
5019 .copy_from_slice(&data);
5020
5021 // Encode the frame's header.
5022 //
5023 // Due to how `OctetsMut::split_at()` works,
5024 // `dgram_hdr` starts from the initial offset
5025 // of `b` (rather than the current offset), so
5026 // it needs to be advanced to the initial frame
5027 // offset.
5028 dgram_hdr.skip(hdr_off)?;
5029
5030 frame::encode_dgram_header(
5031 len as u64,
5032 &mut dgram_hdr,
5033 )?;
5034
5035 // Advance the packet buffer's offset.
5036 b.skip(hdr_len + len)?;
5037
5038 let frame =
5039 frame::Frame::DatagramHeader { length: len };
5040
5041 if push_frame_to_pkt!(b, frames, frame, left) {
5042 ack_eliciting = true;
5043 in_flight = true;
5044 dgram_emitted = true;
5045 self.dgram_sent_count =
5046 self.dgram_sent_count.saturating_add(1);
5047 path.dgram_sent_count =
5048 path.dgram_sent_count.saturating_add(1);
5049 }
5050 },
5051
5052 None => continue,
5053 };
5054 } else if len > max_dgram_payload {
5055 // This dgram frame will never fit. Let's purge it.
5056 self.dgram_send_queue.pop();
5057 } else {
5058 break;
5059 }
5060 }
5061 }
5062 }
5063
5064 // Create a single STREAM frame for the first stream that is flushable.
5065 if (pkt_type == Type::Short || pkt_type == Type::ZeroRTT) &&
5066 left > frame::MAX_STREAM_OVERHEAD &&
5067 !is_closing &&
5068 path.active() &&
5069 !dgram_emitted
5070 {
5071 while let Some(priority_key) = self.streams.peek_flushable() {
5072 let stream_id = priority_key.id;
5073 let stream = match self.streams.get_mut(stream_id) {
5074 // Avoid sending frames for streams that were already stopped.
5075 //
5076 // This might happen if stream data was buffered but not yet
5077 // flushed on the wire when a STOP_SENDING frame is received.
5078 Some(v) if !v.send.is_stopped() => v,
5079 _ => {
5080 self.streams.remove_flushable(&priority_key);
5081 continue;
5082 },
5083 };
5084
5085 let stream_off = stream.send.off_front();
5086
5087 // Encode the frame.
5088 //
5089 // Instead of creating a `frame::Frame` object, encode the frame
5090 // directly into the packet buffer.
5091 //
5092 // First we reserve some space in the output buffer for writing
5093 // the frame header (we assume the length field is always a
5094 // 2-byte varint as we don't know the value yet).
5095 //
5096 // Then we emit the data from the stream's send buffer.
5097 //
5098 // Finally we go back and encode the frame header with the now
5099 // available information.
5100 let hdr_off = b.off();
5101 let hdr_len = 1 + // frame type
5102 octets::varint_len(stream_id) + // stream_id
5103 octets::varint_len(stream_off) + // offset
5104 2; // length, always encode as 2-byte varint
5105
5106 let max_len = match left.checked_sub(hdr_len) {
5107 Some(v) => v,
5108 None => {
5109 let priority_key = Arc::clone(&stream.priority_key);
5110 self.streams.remove_flushable(&priority_key);
5111
5112 continue;
5113 },
5114 };
5115
5116 let (mut stream_hdr, mut stream_payload) =
5117 b.split_at(hdr_off + hdr_len)?;
5118
5119 // Write stream data into the packet buffer.
5120 let (len, fin) =
5121 stream.send.emit(&mut stream_payload.as_mut()[..max_len])?;
5122
5123 // Encode the frame's header.
5124 //
5125 // Due to how `OctetsMut::split_at()` works, `stream_hdr` starts
5126 // from the initial offset of `b` (rather than the current
5127 // offset), so it needs to be advanced to the initial frame
5128 // offset.
5129 stream_hdr.skip(hdr_off)?;
5130
5131 frame::encode_stream_header(
5132 stream_id,
5133 stream_off,
5134 len as u64,
5135 fin,
5136 &mut stream_hdr,
5137 )?;
5138
5139 // Advance the packet buffer's offset.
5140 b.skip(hdr_len + len)?;
5141
5142 let frame = frame::Frame::StreamHeader {
5143 stream_id,
5144 offset: stream_off,
5145 length: len,
5146 fin,
5147 };
5148
5149 if push_frame_to_pkt!(b, frames, frame, left) {
5150 ack_eliciting = true;
5151 in_flight = true;
5152 has_data = true;
5153 }
5154
5155 let priority_key = Arc::clone(&stream.priority_key);
5156 // If the stream is no longer flushable, remove it from the queue
5157 if !stream.is_flushable() {
5158 self.streams.remove_flushable(&priority_key);
5159 } else if stream.incremental {
5160 // Shuffle the incremental stream to the back of the
5161 // queue.
5162 self.streams.remove_flushable(&priority_key);
5163 self.streams.insert_flushable(&priority_key);
5164 }
5165
5166 #[cfg(feature = "fuzzing")]
5167 // Coalesce STREAM frames when fuzzing.
5168 if left > frame::MAX_STREAM_OVERHEAD {
5169 continue;
5170 }
5171
5172 break;
5173 }
5174 }
5175
5176 // Alternate trying to send DATAGRAMs next time.
5177 self.emit_dgram = !dgram_emitted;
5178
5179 // If no other ack-eliciting frame is sent, include a PING frame
5180 // - if PTO probe needed; OR
5181 // - if we've sent too many non ack-eliciting packets without having
5182 // sent an ACK eliciting one; OR
5183 // - the application requested an ack-eliciting frame be sent.
5184 if (ack_elicit_required || path.needs_ack_eliciting) &&
5185 !ack_eliciting &&
5186 left >= 1 &&
5187 !is_closing
5188 {
5189 let frame = frame::Frame::Ping { mtu_probe: None };
5190
5191 if push_frame_to_pkt!(b, frames, frame, left) {
5192 ack_eliciting = true;
5193 in_flight = true;
5194 }
5195 }
5196
5197 if ack_eliciting && !is_pmtud_probe {
5198 path.needs_ack_eliciting = false;
5199 path.recovery.ping_sent(epoch);
5200 }
5201
5202 if !has_data &&
5203 !dgram_emitted &&
5204 cwnd_available > frame::MAX_STREAM_OVERHEAD
5205 {
5206 path.recovery.on_app_limited();
5207 }
5208
5209 if frames.is_empty() {
5210 // When we reach this point we are not able to write more, so set
5211 // app_limited to false.
5212 path.recovery.update_app_limited(false);
5213 return Err(Error::Done);
5214 }
5215
5216 // When coalescing a 1-RTT packet, we can't add padding in the UDP
5217 // datagram, so use PADDING frames instead.
5218 //
5219 // This is only needed if
5220 // 1) an Initial packet has already been written to the UDP datagram,
5221 // as Initial always requires padding.
5222 //
5223 // 2) this is a probing packet towards an unvalidated peer address.
5224 if (has_initial || !path.validated()) &&
5225 pkt_type == Type::Short &&
5226 left >= 1
5227 {
5228 let frame = frame::Frame::Padding { len: left };
5229
5230 if push_frame_to_pkt!(b, frames, frame, left) {
5231 in_flight = true;
5232 }
5233 }
5234
5235 // Pad payload so that it's always at least 4 bytes.
5236 if b.off() - payload_offset < PAYLOAD_MIN_LEN {
5237 let payload_len = b.off() - payload_offset;
5238
5239 let frame = frame::Frame::Padding {
5240 len: PAYLOAD_MIN_LEN - payload_len,
5241 };
5242
5243 #[allow(unused_assignments)]
5244 if push_frame_to_pkt!(b, frames, frame, left) {
5245 in_flight = true;
5246 }
5247 }
5248
5249 let payload_len = b.off() - payload_offset;
5250
5251 // Fill in payload length.
5252 if pkt_type != Type::Short {
5253 let len = pn_len + payload_len + crypto_overhead;
5254
5255 let (_, mut payload_with_len) = b.split_at(header_offset)?;
5256 payload_with_len
5257 .put_varint_with_len(len as u64, PAYLOAD_LENGTH_LEN)?;
5258 }
5259
5260 trace!(
5261 "{} tx pkt {} len={} pn={} {}",
5262 self.trace_id,
5263 hdr_trace.unwrap_or_default(),
5264 payload_len,
5265 pn,
5266 AddrTupleFmt(path.local_addr(), path.peer_addr())
5267 );
5268
5269 #[cfg(feature = "qlog")]
5270 let mut qlog_frames: SmallVec<
5271 [qlog::events::quic::QuicFrame; 1],
5272 > = SmallVec::with_capacity(frames.len());
5273
5274 for frame in &mut frames {
5275 trace!("{} tx frm {:?}", self.trace_id, frame);
5276
5277 qlog_with_type!(QLOG_PACKET_TX, self.qlog, _q, {
5278 qlog_frames.push(frame.to_qlog());
5279 });
5280 }
5281
5282 qlog_with_type!(QLOG_PACKET_TX, self.qlog, q, {
5283 if let Some(header) = qlog_pkt_hdr {
5284 // Qlog packet raw info described at
5285 // https://datatracker.ietf.org/doc/html/draft-ietf-quic-qlog-main-schema-00#section-5.1
5286 //
5287 // `length` includes packet headers and trailers (AEAD tag).
5288 let length = payload_len + payload_offset + crypto_overhead;
5289 let qlog_raw_info = RawInfo {
5290 length: Some(length as u64),
5291 payload_length: Some(payload_len as u64),
5292 data: None,
5293 };
5294
5295 let send_at_time =
5296 now.duration_since(q.start_time()).as_secs_f64() * 1000.0;
5297
5298 let ev_data =
5299 EventData::QuicPacketSent(qlog::events::quic::PacketSent {
5300 header,
5301 frames: Some(qlog_frames),
5302 raw: Some(qlog_raw_info),
5303 send_at_time: Some(send_at_time),
5304 ..Default::default()
5305 });
5306
5307 q.add_event_data_with_instant(ev_data, now).ok();
5308 }
5309 });
5310
5311 let aead = match crypto_ctx.crypto_seal {
5312 Some(ref mut v) => v,
5313 None => return Err(Error::InvalidState),
5314 };
5315
5316 let written = packet::encrypt_pkt(
5317 &mut b,
5318 pn,
5319 pn_len,
5320 payload_len,
5321 payload_offset,
5322 None,
5323 aead,
5324 )?;
5325
5326 let sent_pkt_has_data = if path.recovery.gcongestion_enabled() {
5327 has_data || dgram_emitted
5328 } else {
5329 has_data
5330 };
5331
5332 let sent_pkt = recovery::Sent {
5333 pkt_num: pn,
5334 frames,
5335 time_sent: now,
5336 time_acked: None,
5337 time_lost: None,
5338 size: if ack_eliciting { written } else { 0 },
5339 ack_eliciting,
5340 in_flight,
5341 delivered: 0,
5342 delivered_time: now,
5343 first_sent_time: now,
5344 is_app_limited: false,
5345 tx_in_flight: 0,
5346 lost: 0,
5347 has_data: sent_pkt_has_data,
5348 is_pmtud_probe,
5349 };
5350
5351 if in_flight && is_app_limited {
5352 path.recovery.delivery_rate_update_app_limited(true);
5353 }
5354
5355 self.next_pkt_num += 1;
5356
5357 let handshake_status = recovery::HandshakeStatus {
5358 has_handshake_keys: self.crypto_ctx[packet::Epoch::Handshake]
5359 .has_keys(),
5360 peer_verified_address: self.peer_verified_initial_address,
5361 completed: self.handshake_completed,
5362 };
5363
5364 self.on_packet_sent(send_pid, sent_pkt, epoch, handshake_status, now)?;
5365
5366 let path = self.paths.get_mut(send_pid)?;
5367 qlog_with_type!(QLOG_METRICS, self.qlog, q, {
5368 path.recovery.maybe_qlog(q, now);
5369 });
5370
5371 // Record sent packet size if we probe the path.
5372 if let Some(data) = challenge_data {
5373 path.add_challenge_sent(data, written, now);
5374 }
5375
5376 self.sent_count += 1;
5377 self.sent_bytes += written as u64;
5378 path.sent_count += 1;
5379 path.sent_bytes += written as u64;
5380
5381 if self.dgram_send_queue.byte_size() > path.recovery.cwnd_available() {
5382 path.recovery.update_app_limited(false);
5383 }
5384
5385 path.max_send_bytes = path.max_send_bytes.saturating_sub(written);
5386
5387 // On the client, drop initial state after sending an Handshake packet.
5388 if !self.is_server && hdr_ty == Type::Handshake {
5389 self.drop_epoch_state(packet::Epoch::Initial, now);
5390 }
5391
5392 // (Re)start the idle timer if we are sending the first ack-eliciting
5393 // packet since last receiving a packet.
5394 if ack_eliciting && !self.ack_eliciting_sent {
5395 if let Some(idle_timeout) = self.idle_timeout() {
5396 self.idle_timer = Some(now + idle_timeout);
5397 }
5398 }
5399
5400 if ack_eliciting {
5401 self.ack_eliciting_sent = true;
5402 }
5403
5404 Ok((pkt_type, written))
5405 }
5406
5407 fn on_packet_sent(
5408 &mut self, send_pid: usize, sent_pkt: recovery::Sent,
5409 epoch: packet::Epoch, handshake_status: recovery::HandshakeStatus,
5410 now: Instant,
5411 ) -> Result<()> {
5412 let path = self.paths.get_mut(send_pid)?;
5413
5414 // It's fine to set the skip counter based on a non-active path's values.
5415 let cwnd = path.recovery.cwnd();
5416 let max_datagram_size = path.recovery.max_datagram_size();
5417 self.pkt_num_spaces[epoch].on_packet_sent(&sent_pkt);
5418 self.pkt_num_manager.on_packet_sent(
5419 cwnd,
5420 max_datagram_size,
5421 self.handshake_completed,
5422 );
5423
5424 path.recovery.on_packet_sent(
5425 sent_pkt,
5426 epoch,
5427 handshake_status,
5428 now,
5429 &self.trace_id,
5430 );
5431
5432 Ok(())
5433 }
5434
5435 /// Returns the desired send time for the next packet.
5436 #[inline]
5437 pub fn get_next_release_time(&self) -> Option<ReleaseDecision> {
5438 Some(
5439 self.paths
5440 .get_active()
5441 .ok()?
5442 .recovery
5443 .get_next_release_time(),
5444 )
5445 }
5446
5447 /// Returns whether gcongestion is enabled.
5448 #[inline]
5449 pub fn gcongestion_enabled(&self) -> Option<bool> {
5450 Some(self.paths.get_active().ok()?.recovery.gcongestion_enabled())
5451 }
5452
5453 /// Returns the maximum pacing into the future.
5454 ///
5455 /// Equals 1/8 of the smoothed RTT, but at least 1ms and not greater than
5456 /// 5ms.
5457 pub fn max_release_into_future(&self) -> Duration {
5458 self.paths
5459 .get_active()
5460 .map(|p| p.recovery.rtt().mul_f64(0.125))
5461 .unwrap_or(Duration::from_millis(1))
5462 .max(Duration::from_millis(1))
5463 .min(Duration::from_millis(5))
5464 }
5465
5466 /// Returns whether pacing is enabled.
5467 #[inline]
5468 pub fn pacing_enabled(&self) -> bool {
5469 self.recovery_config.pacing
5470 }
5471
5472 /// Returns the size of the send quantum, in bytes.
5473 ///
5474 /// This represents the maximum size of a packet burst as determined by the
5475 /// congestion control algorithm in use.
5476 ///
5477 /// Applications can, for example, use it in conjunction with segmentation
5478 /// offloading mechanisms as the maximum limit for outgoing aggregates of
5479 /// multiple packets.
5480 #[inline]
5481 pub fn send_quantum(&self) -> usize {
5482 match self.paths.get_active() {
5483 Ok(p) => p.recovery.send_quantum(),
5484 _ => 0,
5485 }
5486 }
5487
5488 /// Returns the size of the send quantum over the given 4-tuple, in bytes.
5489 ///
5490 /// This represents the maximum size of a packet burst as determined by the
5491 /// congestion control algorithm in use.
5492 ///
5493 /// Applications can, for example, use it in conjunction with segmentation
5494 /// offloading mechanisms as the maximum limit for outgoing aggregates of
5495 /// multiple packets.
5496 ///
5497 /// If the (`local_addr`, peer_addr`) 4-tuple relates to a non-existing
5498 /// path, this method returns 0.
5499 pub fn send_quantum_on_path(
5500 &self, local_addr: SocketAddr, peer_addr: SocketAddr,
5501 ) -> usize {
5502 self.paths
5503 .path_id_from_addrs(&(local_addr, peer_addr))
5504 .and_then(|pid| self.paths.get(pid).ok())
5505 .map(|path| path.recovery.send_quantum())
5506 .unwrap_or(0)
5507 }
5508
5509 /// Reads contiguous data from a stream into the provided slice.
5510 ///
5511 /// The slice must be sized by the caller and will be populated up to its
5512 /// capacity.
5513 ///
5514 /// On success the amount of bytes read and a flag indicating the fin state
5515 /// is returned as a tuple, or [`Done`] if there is no data to read.
5516 ///
5517 /// Reading data from a stream may trigger queueing of control messages
5518 /// (e.g. MAX_STREAM_DATA). [`send()`] should be called afterwards.
5519 ///
5520 /// [`Done`]: enum.Error.html#variant.Done
5521 /// [`send()`]: struct.Connection.html#method.send
5522 ///
5523 /// ## Examples:
5524 ///
5525 /// ```no_run
5526 /// # let mut buf = [0; 512];
5527 /// # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
5528 /// # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
5529 /// # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
5530 /// # let peer = "127.0.0.1:1234".parse().unwrap();
5531 /// # let local = socket.local_addr().unwrap();
5532 /// # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
5533 /// # let stream_id = 0;
5534 /// while let Ok((read, fin)) = conn.stream_recv(stream_id, &mut buf) {
5535 /// println!("Got {} bytes on stream {}", read, stream_id);
5536 /// }
5537 /// # Ok::<(), quiche::Error>(())
5538 /// ```
5539 pub fn stream_recv(
5540 &mut self, stream_id: u64, out: &mut [u8],
5541 ) -> Result<(usize, bool)> {
5542 self.do_stream_recv(stream_id, RecvAction::Emit { out })
5543 }
5544
5545 /// Discard contiguous data from a stream without copying.
5546 ///
5547 /// On success the amount of bytes discarded and a flag indicating the fin
5548 /// state is returned as a tuple, or [`Done`] if there is no data to
5549 /// discard.
5550 ///
5551 /// Discarding data from a stream may trigger queueing of control messages
5552 /// (e.g. MAX_STREAM_DATA). [`send()`] should be called afterwards.
5553 ///
5554 /// [`Done`]: enum.Error.html#variant.Done
5555 /// [`send()`]: struct.Connection.html#method.send
5556 ///
5557 /// ## Examples:
5558 ///
5559 /// ```no_run
5560 /// # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
5561 /// # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
5562 /// # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
5563 /// # let peer = "127.0.0.1:1234".parse().unwrap();
5564 /// # let local = socket.local_addr().unwrap();
5565 /// # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
5566 /// # let stream_id = 0;
5567 /// while let Ok((read, fin)) = conn.stream_discard(stream_id, 1) {
5568 /// println!("Discarded {} byte(s) on stream {}", read, stream_id);
5569 /// }
5570 /// # Ok::<(), quiche::Error>(())
5571 /// ```
5572 pub fn stream_discard(
5573 &mut self, stream_id: u64, len: usize,
5574 ) -> Result<(usize, bool)> {
5575 self.do_stream_recv(stream_id, RecvAction::Discard { len })
5576 }
5577
5578 // Reads or discards contiguous data from a stream.
5579 //
5580 // Passing an `action` of `StreamRecvAction::Emit` results in a read into
5581 // the provided slice. It must be sized by the caller and will be populated
5582 // up to its capacity.
5583 //
5584 // Passing an `action` of `StreamRecvAction::Discard` results in discard up
5585 // to the indicated length.
5586 //
5587 // On success the amount of bytes read or discarded, and a flag indicating
5588 // the fin state, is returned as a tuple, or [`Done`] if there is no data to
5589 // read or discard.
5590 //
5591 // Reading or discarding data from a stream may trigger queueing of control
5592 // messages (e.g. MAX_STREAM_DATA). [`send()`] should be called afterwards.
5593 //
5594 // [`Done`]: enum.Error.html#variant.Done
5595 // [`send()`]: struct.Connection.html#method.send
5596 fn do_stream_recv(
5597 &mut self, stream_id: u64, action: RecvAction,
5598 ) -> Result<(usize, bool)> {
5599 // We can't read on our own unidirectional streams.
5600 if !stream::is_bidi(stream_id) &&
5601 stream::is_local(stream_id, self.is_server)
5602 {
5603 return Err(Error::InvalidStreamState(stream_id));
5604 }
5605
5606 let stream = self
5607 .streams
5608 .get_mut(stream_id)
5609 .ok_or(Error::InvalidStreamState(stream_id))?;
5610
5611 if !stream.is_readable() {
5612 return Err(Error::Done);
5613 }
5614
5615 let local = stream.local;
5616 let priority_key = Arc::clone(&stream.priority_key);
5617
5618 #[cfg(feature = "qlog")]
5619 let offset = stream.recv.off_front();
5620
5621 #[cfg(feature = "qlog")]
5622 let to = match action {
5623 RecvAction::Emit { .. } => Some(DataRecipient::Application),
5624
5625 RecvAction::Discard { .. } => Some(DataRecipient::Dropped),
5626 };
5627
5628 let (read, fin) = match stream.recv.emit_or_discard(action) {
5629 Ok(v) => v,
5630
5631 Err(e) => {
5632 // Collect the stream if it is now complete. This can happen if
5633 // we got a `StreamReset` error which will now be propagated to
5634 // the application, so we don't need to keep the stream's state
5635 // anymore.
5636 if stream.is_complete() {
5637 self.streams.collect(stream_id, local);
5638 }
5639
5640 self.streams.remove_readable(&priority_key);
5641 return Err(e);
5642 },
5643 };
5644
5645 self.flow_control.add_consumed(read as u64);
5646
5647 let readable = stream.is_readable();
5648
5649 let complete = stream.is_complete();
5650
5651 if stream.recv.almost_full() {
5652 self.streams.insert_almost_full(stream_id);
5653 }
5654
5655 if !readable {
5656 self.streams.remove_readable(&priority_key);
5657 }
5658
5659 if complete {
5660 self.streams.collect(stream_id, local);
5661 }
5662
5663 qlog_with_type!(QLOG_DATA_MV, self.qlog, q, {
5664 let ev_data = EventData::QuicStreamDataMoved(
5665 qlog::events::quic::StreamDataMoved {
5666 stream_id: Some(stream_id),
5667 offset: Some(offset),
5668 raw: Some(RawInfo {
5669 length: Some(read as u64),
5670 ..Default::default()
5671 }),
5672 from: Some(DataRecipient::Transport),
5673 to,
5674 additional_info: fin
5675 .then_some(DataMovedAdditionalInfo::FinSet),
5676 },
5677 );
5678
5679 let now = Instant::now();
5680 q.add_event_data_with_instant(ev_data, now).ok();
5681 });
5682
5683 if priority_key.incremental && readable {
5684 // Shuffle the incremental stream to the back of the queue.
5685 self.streams.remove_readable(&priority_key);
5686 self.streams.insert_readable(&priority_key);
5687 }
5688
5689 Ok((read, fin))
5690 }
5691
5692 /// Writes data to a stream.
5693 ///
5694 /// On success the number of bytes written is returned, or [`Done`] if no
5695 /// data was written (e.g. because the stream has no capacity).
5696 ///
5697 /// Applications can provide a 0-length buffer with the fin flag set to
5698 /// true. This will lead to a 0-length FIN STREAM frame being sent at the
5699 /// latest offset. The `Ok(0)` value is only returned when the application
5700 /// provided a 0-length buffer.
5701 ///
5702 /// In addition, if the peer has signalled that it doesn't want to receive
5703 /// any more data from this stream by sending the `STOP_SENDING` frame, the
5704 /// [`StreamStopped`] error will be returned instead of any data.
5705 ///
5706 /// Note that in order to avoid buffering an infinite amount of data in the
5707 /// stream's send buffer, streams are only allowed to buffer outgoing data
5708 /// up to the amount that the peer allows it to send (that is, up to the
5709 /// stream's outgoing flow control capacity).
5710 ///
5711 /// This means that the number of written bytes returned can be lower than
5712 /// the length of the input buffer when the stream doesn't have enough
5713 /// capacity for the operation to complete. The application should retry the
5714 /// operation once the stream is reported as writable again.
5715 ///
5716 /// Applications should call this method only after the handshake is
5717 /// completed (whenever [`is_established()`] returns `true`) or during
5718 /// early data if enabled (whenever [`is_in_early_data()`] returns `true`).
5719 ///
5720 /// [`Done`]: enum.Error.html#variant.Done
5721 /// [`StreamStopped`]: enum.Error.html#variant.StreamStopped
5722 /// [`is_established()`]: struct.Connection.html#method.is_established
5723 /// [`is_in_early_data()`]: struct.Connection.html#method.is_in_early_data
5724 ///
5725 /// ## Examples:
5726 ///
5727 /// ```no_run
5728 /// # let mut buf = [0; 512];
5729 /// # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
5730 /// # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
5731 /// # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
5732 /// # let peer = "127.0.0.1:1234".parse().unwrap();
5733 /// # let local = "127.0.0.1:4321".parse().unwrap();
5734 /// # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
5735 /// # let stream_id = 0;
5736 /// conn.stream_send(stream_id, b"hello", true)?;
5737 /// # Ok::<(), quiche::Error>(())
5738 /// ```
5739 pub fn stream_send(
5740 &mut self, stream_id: u64, buf: &[u8], fin: bool,
5741 ) -> Result<usize> {
5742 self.stream_do_send(
5743 stream_id,
5744 buf,
5745 fin,
5746 |stream: &mut stream::Stream<F>,
5747 buf: &[u8],
5748 cap: usize,
5749 fin: bool| {
5750 stream.send.write(&buf[..cap], fin).map(|v| (v, v))
5751 },
5752 )
5753 }
5754
5755 /// Writes data to a stream with zero copying, instead, it appends the
5756 /// provided buffer directly to the send queue if the capacity allows
5757 /// it.
5758 ///
5759 /// When a partial write happens (including when [`Error::Done`] is
5760 /// returned) the remaining (unwritten) buffer will also be returned.
5761 /// The application should retry the operation once the stream is
5762 /// reported as writable again.
5763 pub fn stream_send_zc(
5764 &mut self, stream_id: u64, buf: F::Buf, len: Option<usize>, fin: bool,
5765 ) -> Result<(usize, Option<F::Buf>)>
5766 where
5767 F::Buf: BufSplit,
5768 {
5769 self.stream_do_send(
5770 stream_id,
5771 buf,
5772 fin,
5773 |stream: &mut stream::Stream<F>,
5774 buf: F::Buf,
5775 cap: usize,
5776 fin: bool| {
5777 let len = len.unwrap_or(usize::MAX).min(cap);
5778 let (sent, remaining) = stream.send.append_buf(buf, len, fin)?;
5779 Ok((sent, (sent, remaining)))
5780 },
5781 )
5782 }
5783
5784 fn stream_do_send<B, R, SND>(
5785 &mut self, stream_id: u64, buf: B, fin: bool, write_fn: SND,
5786 ) -> Result<R>
5787 where
5788 B: AsRef<[u8]>,
5789 SND: FnOnce(&mut stream::Stream<F>, B, usize, bool) -> Result<(usize, R)>,
5790 {
5791 // We can't write on the peer's unidirectional streams.
5792 if !stream::is_bidi(stream_id) &&
5793 !stream::is_local(stream_id, self.is_server)
5794 {
5795 return Err(Error::InvalidStreamState(stream_id));
5796 }
5797
5798 let len = buf.as_ref().len();
5799
5800 // Mark the connection as blocked if the connection-level flow control
5801 // limit doesn't let us buffer all the data.
5802 //
5803 // Note that this is separate from "send capacity" as that also takes
5804 // congestion control into consideration.
5805 if self.max_tx_data - self.tx_data < len as u64 {
5806 self.blocked_limit = Some(self.max_tx_data);
5807 }
5808
5809 let cap = self.tx_cap;
5810
5811 // Get existing stream or create a new one.
5812 let stream = match self.get_or_create_stream(stream_id, true) {
5813 Ok(v) => v,
5814
5815 Err(Error::StreamLimit) => {
5816 // If the local endpoint has exhausted the peer's stream count
5817 // limit, record the current limit so that a STREAMS_BLOCKED
5818 // frame can be sent.
5819 if self.enable_send_streams_blocked &&
5820 stream::is_local(stream_id, self.is_server)
5821 {
5822 if stream::is_bidi(stream_id) {
5823 let limit = self.streams.peer_max_streams_bidi();
5824 self.streams_blocked_bidi_state.update_at(limit);
5825 } else {
5826 let limit = self.streams.peer_max_streams_uni();
5827 self.streams_blocked_uni_state.update_at(limit);
5828 }
5829 }
5830
5831 return Err(Error::StreamLimit);
5832 },
5833
5834 Err(e) => return Err(e),
5835 };
5836
5837 #[cfg(feature = "qlog")]
5838 let offset = stream.send.off_back();
5839
5840 let was_writable = stream.is_writable();
5841
5842 let was_flushable = stream.is_flushable();
5843
5844 let is_complete = stream.is_complete();
5845 let is_readable = stream.is_readable();
5846
5847 let priority_key = Arc::clone(&stream.priority_key);
5848
5849 // Return early if the stream has been stopped, and collect its state
5850 // if complete.
5851 if let Err(Error::StreamStopped(e)) = stream.send.cap() {
5852 // Only collect the stream if it is complete and not readable.
5853 // If it is readable, it will get collected when stream_recv()
5854 // is used.
5855 //
5856 // The stream can't be writable if it has been stopped.
5857 if is_complete && !is_readable {
5858 let local = stream.local;
5859 self.streams.collect(stream_id, local);
5860 }
5861
5862 return Err(Error::StreamStopped(e));
5863 };
5864
5865 // Truncate the input buffer based on the connection's send capacity if
5866 // necessary.
5867 //
5868 // When the cap is zero, the method returns Ok(0) *only* when the passed
5869 // buffer is empty. We return Error::Done otherwise.
5870 if cap == 0 && len > 0 {
5871 if was_writable {
5872 // When `stream_writable_next()` returns a stream, the writable
5873 // mark is removed, but because the stream is blocked by the
5874 // connection-level send capacity it won't be marked as writable
5875 // again once the capacity increases.
5876 //
5877 // Since the stream is writable already, mark it here instead.
5878 self.streams.insert_writable(&priority_key);
5879 }
5880
5881 return Err(Error::Done);
5882 }
5883
5884 let (cap, fin, blocked_by_cap) = if cap < len {
5885 (cap, false, true)
5886 } else {
5887 (len, fin, false)
5888 };
5889
5890 let (sent, ret) = match write_fn(stream, buf, cap, fin) {
5891 Ok(v) => v,
5892
5893 Err(e) => {
5894 self.streams.remove_writable(&priority_key);
5895 return Err(e);
5896 },
5897 };
5898
5899 let incremental = stream.incremental;
5900 let priority_key = Arc::clone(&stream.priority_key);
5901
5902 let flushable = stream.is_flushable();
5903
5904 let writable = stream.is_writable();
5905
5906 let empty_fin = len == 0 && fin;
5907
5908 if sent < cap {
5909 let max_off = stream.send.max_off();
5910
5911 if stream.send.blocked_at() != Some(max_off) {
5912 stream.send.update_blocked_at(Some(max_off));
5913 self.streams.insert_blocked(stream_id, max_off);
5914 }
5915 } else {
5916 stream.send.update_blocked_at(None);
5917 self.streams.remove_blocked(stream_id);
5918 }
5919
5920 // If the stream is now flushable push it to the flushable queue, but
5921 // only if it wasn't already queued.
5922 //
5923 // Consider the stream flushable also when we are sending a zero-length
5924 // frame that has the fin flag set.
5925 if (flushable || empty_fin) && !was_flushable {
5926 self.streams.insert_flushable(&priority_key);
5927 }
5928
5929 if !writable {
5930 self.streams.remove_writable(&priority_key);
5931 } else if was_writable && blocked_by_cap {
5932 // When `stream_writable_next()` returns a stream, the writable
5933 // mark is removed, but because the stream is blocked by the
5934 // connection-level send capacity it won't be marked as writable
5935 // again once the capacity increases.
5936 //
5937 // Since the stream is writable already, mark it here instead.
5938 self.streams.insert_writable(&priority_key);
5939 }
5940
5941 self.tx_cap -= sent;
5942
5943 self.tx_data += sent as u64;
5944
5945 self.tx_buffered += sent;
5946 self.check_tx_buffered_invariant();
5947
5948 qlog_with_type!(QLOG_DATA_MV, self.qlog, q, {
5949 let ev_data = EventData::QuicStreamDataMoved(
5950 qlog::events::quic::StreamDataMoved {
5951 stream_id: Some(stream_id),
5952 offset: Some(offset),
5953 raw: Some(RawInfo {
5954 length: Some(sent as u64),
5955 ..Default::default()
5956 }),
5957 from: Some(DataRecipient::Application),
5958 to: Some(DataRecipient::Transport),
5959 additional_info: fin
5960 .then_some(DataMovedAdditionalInfo::FinSet),
5961 },
5962 );
5963
5964 let now = Instant::now();
5965 q.add_event_data_with_instant(ev_data, now).ok();
5966 });
5967
5968 if sent == 0 && cap > 0 {
5969 return Err(Error::Done);
5970 }
5971
5972 if incremental && writable {
5973 // Shuffle the incremental stream to the back of the queue.
5974 self.streams.remove_writable(&priority_key);
5975 self.streams.insert_writable(&priority_key);
5976 }
5977
5978 Ok(ret)
5979 }
5980
5981 /// Sets the priority for a stream.
5982 ///
5983 /// A stream's priority determines the order in which stream data is sent
5984 /// on the wire (streams with lower priority are sent first). Streams are
5985 /// created with a default priority of `127`.
5986 ///
5987 /// The target stream is created if it did not exist before calling this
5988 /// method.
5989 pub fn stream_priority(
5990 &mut self, stream_id: u64, urgency: u8, incremental: bool,
5991 ) -> Result<()> {
5992 // Get existing stream or create a new one, but if the stream
5993 // has already been closed and collected, ignore the prioritization.
5994 let stream = match self.get_or_create_stream(stream_id, true) {
5995 Ok(v) => v,
5996
5997 Err(Error::Done) => return Ok(()),
5998
5999 Err(e) => return Err(e),
6000 };
6001
6002 if stream.urgency == urgency && stream.incremental == incremental {
6003 return Ok(());
6004 }
6005
6006 stream.urgency = urgency;
6007 stream.incremental = incremental;
6008
6009 let new_priority_key = Arc::new(StreamPriorityKey {
6010 urgency: stream.urgency,
6011 incremental: stream.incremental,
6012 id: stream_id,
6013 ..Default::default()
6014 });
6015
6016 let old_priority_key =
6017 std::mem::replace(&mut stream.priority_key, new_priority_key.clone());
6018
6019 self.streams
6020 .update_priority(&old_priority_key, &new_priority_key);
6021
6022 Ok(())
6023 }
6024
6025 /// Shuts down reading or writing from/to the specified stream.
6026 ///
6027 /// When the `direction` argument is set to [`Shutdown::Read`], outstanding
6028 /// data in the stream's receive buffer is dropped, and no additional data
6029 /// is added to it. Data received after calling this method is still
6030 /// validated and acked but not stored, and [`stream_recv()`] will not
6031 /// return it to the application. In addition, a `STOP_SENDING` frame will
6032 /// be sent to the peer to signal it to stop sending data.
6033 ///
6034 /// When the `direction` argument is set to [`Shutdown::Write`], outstanding
6035 /// data in the stream's send buffer is dropped, and no additional data is
6036 /// added to it. Data passed to [`stream_send()`] after calling this method
6037 /// will be ignored. In addition, a `RESET_STREAM` frame will be sent to the
6038 /// peer to signal the reset.
6039 ///
6040 /// Locally-initiated unidirectional streams can only be closed in the
6041 /// [`Shutdown::Write`] direction. Remotely-initiated unidirectional streams
6042 /// can only be closed in the [`Shutdown::Read`] direction. Using an
6043 /// incorrect direction will return [`InvalidStreamState`].
6044 ///
6045 /// [`Shutdown::Read`]: enum.Shutdown.html#variant.Read
6046 /// [`Shutdown::Write`]: enum.Shutdown.html#variant.Write
6047 /// [`stream_recv()`]: struct.Connection.html#method.stream_recv
6048 /// [`stream_send()`]: struct.Connection.html#method.stream_send
6049 /// [`InvalidStreamState`]: enum.Error.html#variant.InvalidStreamState
6050 pub fn stream_shutdown(
6051 &mut self, stream_id: u64, direction: Shutdown, err: u64,
6052 ) -> Result<()> {
6053 // Don't try to stop a local unidirectional stream.
6054 if direction == Shutdown::Read &&
6055 stream::is_local(stream_id, self.is_server) &&
6056 !stream::is_bidi(stream_id)
6057 {
6058 return Err(Error::InvalidStreamState(stream_id));
6059 }
6060
6061 // Don't try to reset a remote unidirectional stream.
6062 if direction == Shutdown::Write &&
6063 !stream::is_local(stream_id, self.is_server) &&
6064 !stream::is_bidi(stream_id)
6065 {
6066 return Err(Error::InvalidStreamState(stream_id));
6067 }
6068
6069 // Get existing stream.
6070 let stream = self.streams.get_mut(stream_id).ok_or(Error::Done)?;
6071
6072 let priority_key = Arc::clone(&stream.priority_key);
6073
6074 match direction {
6075 Shutdown::Read => {
6076 let consumed = stream.recv.shutdown()?;
6077 self.flow_control.add_consumed(consumed);
6078
6079 if !stream.recv.is_fin() {
6080 self.streams.insert_stopped(stream_id, err);
6081 }
6082
6083 // Once shutdown, the stream is guaranteed to be non-readable.
6084 self.streams.remove_readable(&priority_key);
6085
6086 self.stopped_stream_local_count =
6087 self.stopped_stream_local_count.saturating_add(1);
6088 },
6089
6090 Shutdown::Write => {
6091 let (final_size, unsent) = stream.send.shutdown()?;
6092
6093 // Claw back some flow control allowance from data that was
6094 // buffered but not actually sent before the stream was reset.
6095 self.tx_data = self.tx_data.saturating_sub(unsent);
6096
6097 self.tx_buffered =
6098 self.tx_buffered.saturating_sub(unsent as usize);
6099
6100 // These drops in qlog are a bit weird, but the only way to ensure
6101 // that all bytes that are moved from App to Transport in
6102 // stream_do_send are eventually moved from Transport to Dropped.
6103 // Ideally we would add a Transport to Network transition also as
6104 // a way to indicate when bytes were transmitted vs dropped
6105 // without ever being sent.
6106 qlog_with_type!(QLOG_DATA_MV, self.qlog, q, {
6107 let ev_data = EventData::QuicStreamDataMoved(
6108 qlog::events::quic::StreamDataMoved {
6109 stream_id: Some(stream_id),
6110 offset: Some(final_size),
6111 raw: Some(RawInfo {
6112 length: Some(unsent),
6113 ..Default::default()
6114 }),
6115 from: Some(DataRecipient::Transport),
6116 to: Some(DataRecipient::Dropped),
6117 ..Default::default()
6118 },
6119 );
6120
6121 q.add_event_data_with_instant(ev_data, Instant::now()).ok();
6122 });
6123
6124 // Update send capacity.
6125 self.update_tx_cap();
6126
6127 self.streams.insert_reset(stream_id, err, final_size);
6128
6129 // Once shutdown, the stream is guaranteed to be non-writable.
6130 self.streams.remove_writable(&priority_key);
6131
6132 self.reset_stream_local_count =
6133 self.reset_stream_local_count.saturating_add(1);
6134 },
6135 }
6136
6137 Ok(())
6138 }
6139
6140 /// Returns the stream's send capacity in bytes.
6141 ///
6142 /// If the specified stream doesn't exist (including when it has already
6143 /// been completed and closed), the [`InvalidStreamState`] error will be
6144 /// returned.
6145 ///
6146 /// In addition, if the peer has signalled that it doesn't want to receive
6147 /// any more data from this stream by sending the `STOP_SENDING` frame, the
6148 /// [`StreamStopped`] error will be returned.
6149 ///
6150 /// [`InvalidStreamState`]: enum.Error.html#variant.InvalidStreamState
6151 /// [`StreamStopped`]: enum.Error.html#variant.StreamStopped
6152 #[inline]
6153 pub fn stream_capacity(&mut self, stream_id: u64) -> Result<usize> {
6154 if let Some(stream) = self.streams.get(stream_id) {
6155 let stream_cap = match stream.send.cap() {
6156 Ok(v) => v,
6157
6158 Err(Error::StreamStopped(e)) => {
6159 // Only collect the stream if it is complete and not
6160 // readable. If it is readable, it will get collected when
6161 // stream_recv() is used.
6162 if stream.is_complete() && !stream.is_readable() {
6163 let local = stream.local;
6164 self.streams.collect(stream_id, local);
6165 }
6166
6167 return Err(Error::StreamStopped(e));
6168 },
6169
6170 Err(e) => return Err(e),
6171 };
6172
6173 let cap = cmp::min(self.tx_cap, stream_cap);
6174 return Ok(cap);
6175 };
6176
6177 Err(Error::InvalidStreamState(stream_id))
6178 }
6179
6180 /// Returns the next stream that has data to read.
6181 ///
6182 /// Note that once returned by this method, a stream ID will not be returned
6183 /// again until it is "re-armed".
6184 ///
6185 /// The application will need to read all of the pending data on the stream,
6186 /// and new data has to be received before the stream is reported again.
6187 ///
6188 /// This is unlike the [`readable()`] method, that returns the same list of
6189 /// readable streams when called multiple times in succession.
6190 ///
6191 /// [`readable()`]: struct.Connection.html#method.readable
6192 pub fn stream_readable_next(&mut self) -> Option<u64> {
6193 let priority_key = self.streams.readable.front().clone_pointer()?;
6194
6195 self.streams.remove_readable(&priority_key);
6196
6197 Some(priority_key.id)
6198 }
6199
6200 /// Returns true if the stream has data that can be read.
6201 pub fn stream_readable(&self, stream_id: u64) -> bool {
6202 let stream = match self.streams.get(stream_id) {
6203 Some(v) => v,
6204
6205 None => return false,
6206 };
6207
6208 stream.is_readable()
6209 }
6210
6211 /// Returns the next stream that can be written to.
6212 ///
6213 /// Note that once returned by this method, a stream ID will not be returned
6214 /// again until it is "re-armed".
6215 ///
6216 /// This is unlike the [`writable()`] method, that returns the same list of
6217 /// writable streams when called multiple times in succession. It is not
6218 /// advised to use both `stream_writable_next()` and [`writable()`] on the
6219 /// same connection, as it may lead to unexpected results.
6220 ///
6221 /// The [`stream_writable()`] method can also be used to fine-tune when a
6222 /// stream is reported as writable again.
6223 ///
6224 /// [`stream_writable()`]: struct.Connection.html#method.stream_writable
6225 /// [`writable()`]: struct.Connection.html#method.writable
6226 pub fn stream_writable_next(&mut self) -> Option<u64> {
6227 // If there is not enough connection-level send capacity, none of the
6228 // streams are writable.
6229 if self.tx_cap == 0 {
6230 return None;
6231 }
6232
6233 let mut cursor = self.streams.writable.front();
6234
6235 while let Some(priority_key) = cursor.clone_pointer() {
6236 if let Some(stream) = self.streams.get(priority_key.id) {
6237 let cap = match stream.send.cap() {
6238 Ok(v) => v,
6239
6240 // Return the stream to the application immediately if it's
6241 // stopped.
6242 Err(_) =>
6243 return {
6244 self.streams.remove_writable(&priority_key);
6245
6246 Some(priority_key.id)
6247 },
6248 };
6249
6250 if cmp::min(self.tx_cap, cap) >= stream.send_lowat {
6251 self.streams.remove_writable(&priority_key);
6252 return Some(priority_key.id);
6253 }
6254 }
6255
6256 cursor.move_next();
6257 }
6258
6259 None
6260 }
6261
6262 /// Returns true if the stream has enough send capacity.
6263 ///
6264 /// When `len` more bytes can be buffered into the given stream's send
6265 /// buffer, `true` will be returned, `false` otherwise.
6266 ///
6267 /// In the latter case, if the additional data can't be buffered due to
6268 /// flow control limits, the peer will also be notified, and a "low send
6269 /// watermark" will be set for the stream, such that it is not going to be
6270 /// reported as writable again by [`stream_writable_next()`] until its send
6271 /// capacity reaches `len`.
6272 ///
6273 /// If the specified stream doesn't exist (including when it has already
6274 /// been completed and closed), the [`InvalidStreamState`] error will be
6275 /// returned.
6276 ///
6277 /// In addition, if the peer has signalled that it doesn't want to receive
6278 /// any more data from this stream by sending the `STOP_SENDING` frame, the
6279 /// [`StreamStopped`] error will be returned.
6280 ///
6281 /// [`stream_writable_next()`]: struct.Connection.html#method.stream_writable_next
6282 /// [`InvalidStreamState`]: enum.Error.html#variant.InvalidStreamState
6283 /// [`StreamStopped`]: enum.Error.html#variant.StreamStopped
6284 #[inline]
6285 pub fn stream_writable(
6286 &mut self, stream_id: u64, len: usize,
6287 ) -> Result<bool> {
6288 if self.stream_capacity(stream_id)? >= len {
6289 return Ok(true);
6290 }
6291
6292 let stream = match self.streams.get_mut(stream_id) {
6293 Some(v) => v,
6294
6295 None => return Err(Error::InvalidStreamState(stream_id)),
6296 };
6297
6298 stream.send_lowat = cmp::max(1, len);
6299
6300 let is_writable = stream.is_writable();
6301
6302 let priority_key = Arc::clone(&stream.priority_key);
6303
6304 if self.max_tx_data - self.tx_data < len as u64 {
6305 self.blocked_limit = Some(self.max_tx_data);
6306 }
6307
6308 if stream.send.cap()? < len {
6309 let max_off = stream.send.max_off();
6310 if stream.send.blocked_at() != Some(max_off) {
6311 stream.send.update_blocked_at(Some(max_off));
6312 self.streams.insert_blocked(stream_id, max_off);
6313 }
6314 } else if is_writable {
6315 // When `stream_writable_next()` returns a stream, the writable
6316 // mark is removed, but because the stream is blocked by the
6317 // connection-level send capacity it won't be marked as writable
6318 // again once the capacity increases.
6319 //
6320 // Since the stream is writable already, mark it here instead.
6321 self.streams.insert_writable(&priority_key);
6322 }
6323
6324 Ok(false)
6325 }
6326
6327 /// Returns true if all the data has been read from the specified stream.
6328 ///
6329 /// This instructs the application that all the data received from the
6330 /// peer on the stream has been read, and there won't be anymore in the
6331 /// future.
6332 ///
6333 /// Basically this returns true when the peer either set the `fin` flag
6334 /// for the stream, or sent `RESET_STREAM`.
6335 #[inline]
6336 pub fn stream_finished(&self, stream_id: u64) -> bool {
6337 let stream = match self.streams.get(stream_id) {
6338 Some(v) => v,
6339
6340 None => return true,
6341 };
6342
6343 stream.recv.is_fin()
6344 }
6345
6346 /// Returns the number of bidirectional streams that can be created
6347 /// before the peer's stream count limit is reached.
6348 ///
6349 /// This can be useful to know if it's possible to create a bidirectional
6350 /// stream without trying it first.
6351 #[inline]
6352 pub fn peer_streams_left_bidi(&self) -> u64 {
6353 self.streams.peer_streams_left_bidi()
6354 }
6355
6356 /// Returns the number of unidirectional streams that can be created
6357 /// before the peer's stream count limit is reached.
6358 ///
6359 /// This can be useful to know if it's possible to create a unidirectional
6360 /// stream without trying it first.
6361 #[inline]
6362 pub fn peer_streams_left_uni(&self) -> u64 {
6363 self.streams.peer_streams_left_uni()
6364 }
6365
6366 /// Returns an iterator over streams that have outstanding data to read.
6367 ///
6368 /// Note that the iterator will only include streams that were readable at
6369 /// the time the iterator itself was created (i.e. when `readable()` was
6370 /// called). To account for newly readable streams, the iterator needs to
6371 /// be created again.
6372 ///
6373 /// ## Examples:
6374 ///
6375 /// ```no_run
6376 /// # let mut buf = [0; 512];
6377 /// # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
6378 /// # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
6379 /// # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
6380 /// # let peer = "127.0.0.1:1234".parse().unwrap();
6381 /// # let local = socket.local_addr().unwrap();
6382 /// # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
6383 /// // Iterate over readable streams.
6384 /// for stream_id in conn.readable() {
6385 /// // Stream is readable, read until there's no more data.
6386 /// while let Ok((read, fin)) = conn.stream_recv(stream_id, &mut buf) {
6387 /// println!("Got {} bytes on stream {}", read, stream_id);
6388 /// }
6389 /// }
6390 /// # Ok::<(), quiche::Error>(())
6391 /// ```
6392 #[inline]
6393 pub fn readable(&self) -> StreamIter {
6394 self.streams.readable()
6395 }
6396
6397 /// Returns an iterator over streams that can be written in priority order.
6398 ///
6399 /// The priority order is based on RFC 9218 scheduling recommendations.
6400 /// Stream priority can be controlled using [`stream_priority()`]. In order
6401 /// to support fairness requirements, each time this method is called,
6402 /// internal state is updated. Therefore the iterator ordering can change
6403 /// between calls, even if no streams were added or removed.
6404 ///
6405 /// A "writable" stream is a stream that has enough flow control capacity to
6406 /// send data to the peer. To avoid buffering an infinite amount of data,
6407 /// streams are only allowed to buffer outgoing data up to the amount that
6408 /// the peer allows to send.
6409 ///
6410 /// Note that the iterator will only include streams that were writable at
6411 /// the time the iterator itself was created (i.e. when `writable()` was
6412 /// called). To account for newly writable streams, the iterator needs to be
6413 /// created again.
6414 ///
6415 /// ## Examples:
6416 ///
6417 /// ```no_run
6418 /// # let mut buf = [0; 512];
6419 /// # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
6420 /// # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
6421 /// # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
6422 /// # let local = socket.local_addr().unwrap();
6423 /// # let peer = "127.0.0.1:1234".parse().unwrap();
6424 /// # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
6425 /// // Iterate over writable streams.
6426 /// for stream_id in conn.writable() {
6427 /// // Stream is writable, write some data.
6428 /// if let Ok(written) = conn.stream_send(stream_id, &buf, false) {
6429 /// println!("Written {} bytes on stream {}", written, stream_id);
6430 /// }
6431 /// }
6432 /// # Ok::<(), quiche::Error>(())
6433 /// ```
6434 /// [`stream_priority()`]: struct.Connection.html#method.stream_priority
6435 #[inline]
6436 pub fn writable(&self) -> StreamIter {
6437 // If there is not enough connection-level send capacity, none of the
6438 // streams are writable, so return an empty iterator.
6439 if self.tx_cap == 0 {
6440 return StreamIter::default();
6441 }
6442
6443 self.streams.writable()
6444 }
6445
6446 /// Returns the maximum possible size of egress UDP payloads.
6447 ///
6448 /// This is the maximum size of UDP payloads that can be sent, and depends
6449 /// on both the configured maximum send payload size of the local endpoint
6450 /// (as configured with [`set_max_send_udp_payload_size()`]), as well as
6451 /// the transport parameter advertised by the remote peer.
6452 ///
6453 /// Note that this value can change during the lifetime of the connection,
6454 /// but should remain stable across consecutive calls to [`send()`].
6455 ///
6456 /// [`set_max_send_udp_payload_size()`]:
6457 /// struct.Config.html#method.set_max_send_udp_payload_size
6458 /// [`send()`]: struct.Connection.html#method.send
6459 pub fn max_send_udp_payload_size(&self) -> usize {
6460 let max_datagram_size = self
6461 .paths
6462 .get_active()
6463 .ok()
6464 .map(|p| p.recovery.max_datagram_size());
6465
6466 if let Some(max_datagram_size) = max_datagram_size {
6467 if self.is_established() {
6468 // We cap the maximum packet size to 16KB or so, so that it can be
6469 // always encoded with a 2-byte varint.
6470 return cmp::min(16383, max_datagram_size);
6471 }
6472 }
6473
6474 // Allow for 1200 bytes (minimum QUIC packet size) during the
6475 // handshake.
6476 MIN_CLIENT_INITIAL_LEN
6477 }
6478
6479 /// Schedule an ack-eliciting packet on the active path.
6480 ///
6481 /// QUIC packets might not contain ack-eliciting frames during normal
6482 /// operating conditions. If the packet would already contain
6483 /// ack-eliciting frames, this method does not change any behavior.
6484 /// However, if the packet would not ordinarily contain ack-eliciting
6485 /// frames, this method ensures that a PING frame sent.
6486 ///
6487 /// Calling this method multiple times before [`send()`] has no effect.
6488 ///
6489 /// [`send()`]: struct.Connection.html#method.send
6490 pub fn send_ack_eliciting(&mut self) -> Result<()> {
6491 if self.is_closed() || self.is_draining() {
6492 return Ok(());
6493 }
6494 self.paths.get_active_mut()?.needs_ack_eliciting = true;
6495 Ok(())
6496 }
6497
6498 /// Schedule an ack-eliciting packet on the specified path.
6499 ///
6500 /// See [`send_ack_eliciting()`] for more detail. [`InvalidState`] is
6501 /// returned if there is no record of the path.
6502 ///
6503 /// [`send_ack_eliciting()`]: struct.Connection.html#method.send_ack_eliciting
6504 /// [`InvalidState`]: enum.Error.html#variant.InvalidState
6505 pub fn send_ack_eliciting_on_path(
6506 &mut self, local: SocketAddr, peer: SocketAddr,
6507 ) -> Result<()> {
6508 if self.is_closed() || self.is_draining() {
6509 return Ok(());
6510 }
6511 let path_id = self
6512 .paths
6513 .path_id_from_addrs(&(local, peer))
6514 .ok_or(Error::InvalidState)?;
6515 self.paths.get_mut(path_id)?.needs_ack_eliciting = true;
6516 Ok(())
6517 }
6518
6519 /// Reads the first received DATAGRAM.
6520 ///
6521 /// On success the DATAGRAM's data is returned along with its size.
6522 ///
6523 /// [`Done`] is returned if there is no data to read.
6524 ///
6525 /// [`BufferTooShort`] is returned if the provided buffer is too small for
6526 /// the DATAGRAM.
6527 ///
6528 /// [`Done`]: enum.Error.html#variant.Done
6529 /// [`BufferTooShort`]: enum.Error.html#variant.BufferTooShort
6530 ///
6531 /// ## Examples:
6532 ///
6533 /// ```no_run
6534 /// # let mut buf = [0; 512];
6535 /// # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
6536 /// # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
6537 /// # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
6538 /// # let peer = "127.0.0.1:1234".parse().unwrap();
6539 /// # let local = socket.local_addr().unwrap();
6540 /// # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
6541 /// let mut dgram_buf = [0; 512];
6542 /// while let Ok((len)) = conn.dgram_recv(&mut dgram_buf) {
6543 /// println!("Got {} bytes of DATAGRAM", len);
6544 /// }
6545 /// # Ok::<(), quiche::Error>(())
6546 /// ```
6547 #[inline]
6548 pub fn dgram_recv(&mut self, buf: &mut [u8]) -> Result<usize> {
6549 match self.dgram_recv_queue.pop() {
6550 Some(d) => {
6551 if d.len() > buf.len() {
6552 return Err(Error::BufferTooShort);
6553 }
6554
6555 buf[..d.len()].copy_from_slice(&d);
6556 Ok(d.len())
6557 },
6558
6559 None => Err(Error::Done),
6560 }
6561 }
6562
6563 /// Reads the first received DATAGRAM.
6564 ///
6565 /// This is the same as [`dgram_recv()`] but returns the DATAGRAM as a
6566 /// `Vec<u8>` instead of copying into the provided buffer.
6567 ///
6568 /// [`dgram_recv()`]: struct.Connection.html#method.dgram_recv
6569 #[inline]
6570 pub fn dgram_recv_vec(&mut self) -> Result<Vec<u8>> {
6571 match self.dgram_recv_queue.pop() {
6572 Some(d) => Ok(d),
6573
6574 None => Err(Error::Done),
6575 }
6576 }
6577
6578 /// Reads the first received DATAGRAM without removing it from the queue.
6579 ///
6580 /// On success the DATAGRAM's data is returned along with the actual number
6581 /// of bytes peeked. The requested length cannot exceed the DATAGRAM's
6582 /// actual length.
6583 ///
6584 /// [`Done`] is returned if there is no data to read.
6585 ///
6586 /// [`BufferTooShort`] is returned if the provided buffer is smaller the
6587 /// number of bytes to peek.
6588 ///
6589 /// [`Done`]: enum.Error.html#variant.Done
6590 /// [`BufferTooShort`]: enum.Error.html#variant.BufferTooShort
6591 #[inline]
6592 pub fn dgram_recv_peek(&self, buf: &mut [u8], len: usize) -> Result<usize> {
6593 self.dgram_recv_queue.peek_front_bytes(buf, len)
6594 }
6595
6596 /// Returns the length of the first stored DATAGRAM.
6597 #[inline]
6598 pub fn dgram_recv_front_len(&self) -> Option<usize> {
6599 self.dgram_recv_queue.peek_front_len()
6600 }
6601
6602 /// Returns the number of items in the DATAGRAM receive queue.
6603 #[inline]
6604 pub fn dgram_recv_queue_len(&self) -> usize {
6605 self.dgram_recv_queue.len()
6606 }
6607
6608 /// Returns the total size of all items in the DATAGRAM receive queue.
6609 #[inline]
6610 pub fn dgram_recv_queue_byte_size(&self) -> usize {
6611 self.dgram_recv_queue.byte_size()
6612 }
6613
6614 /// Returns the number of items in the DATAGRAM send queue.
6615 #[inline]
6616 pub fn dgram_send_queue_len(&self) -> usize {
6617 self.dgram_send_queue.len()
6618 }
6619
6620 /// Returns the total size of all items in the DATAGRAM send queue.
6621 #[inline]
6622 pub fn dgram_send_queue_byte_size(&self) -> usize {
6623 self.dgram_send_queue.byte_size()
6624 }
6625
6626 /// Returns whether or not the DATAGRAM send queue is full.
6627 #[inline]
6628 pub fn is_dgram_send_queue_full(&self) -> bool {
6629 self.dgram_send_queue.is_full()
6630 }
6631
6632 /// Returns whether or not the DATAGRAM recv queue is full.
6633 #[inline]
6634 pub fn is_dgram_recv_queue_full(&self) -> bool {
6635 self.dgram_recv_queue.is_full()
6636 }
6637
6638 /// Sends data in a DATAGRAM frame.
6639 ///
6640 /// [`Done`] is returned if no data was written.
6641 /// [`InvalidState`] is returned if the peer does not support DATAGRAM.
6642 /// [`BufferTooShort`] is returned if the DATAGRAM frame length is larger
6643 /// than peer's supported DATAGRAM frame length. Use
6644 /// [`dgram_max_writable_len()`] to get the largest supported DATAGRAM
6645 /// frame length.
6646 ///
6647 /// Note that there is no flow control of DATAGRAM frames, so in order to
6648 /// avoid buffering an infinite amount of frames we apply an internal
6649 /// limit.
6650 ///
6651 /// [`Done`]: enum.Error.html#variant.Done
6652 /// [`InvalidState`]: enum.Error.html#variant.InvalidState
6653 /// [`BufferTooShort`]: enum.Error.html#variant.BufferTooShort
6654 /// [`dgram_max_writable_len()`]:
6655 /// struct.Connection.html#method.dgram_max_writable_len
6656 ///
6657 /// ## Examples:
6658 ///
6659 /// ```no_run
6660 /// # let mut buf = [0; 512];
6661 /// # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
6662 /// # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
6663 /// # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
6664 /// # let peer = "127.0.0.1:1234".parse().unwrap();
6665 /// # let local = socket.local_addr().unwrap();
6666 /// # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
6667 /// conn.dgram_send(b"hello")?;
6668 /// # Ok::<(), quiche::Error>(())
6669 /// ```
6670 pub fn dgram_send(&mut self, buf: &[u8]) -> Result<()> {
6671 let max_payload_len = match self.dgram_max_writable_len() {
6672 Some(v) => v,
6673
6674 None => return Err(Error::InvalidState),
6675 };
6676
6677 if buf.len() > max_payload_len {
6678 return Err(Error::BufferTooShort);
6679 }
6680
6681 self.dgram_send_queue.push(buf.to_vec())?;
6682
6683 let active_path = self.paths.get_active_mut()?;
6684
6685 if self.dgram_send_queue.byte_size() >
6686 active_path.recovery.cwnd_available()
6687 {
6688 active_path.recovery.update_app_limited(false);
6689 }
6690
6691 Ok(())
6692 }
6693
6694 /// Sends data in a DATAGRAM frame.
6695 ///
6696 /// This is the same as [`dgram_send()`] but takes a `Vec<u8>` instead of
6697 /// a slice.
6698 ///
6699 /// [`dgram_send()`]: struct.Connection.html#method.dgram_send
6700 pub fn dgram_send_vec(&mut self, buf: Vec<u8>) -> Result<()> {
6701 let max_payload_len = match self.dgram_max_writable_len() {
6702 Some(v) => v,
6703
6704 None => return Err(Error::InvalidState),
6705 };
6706
6707 if buf.len() > max_payload_len {
6708 return Err(Error::BufferTooShort);
6709 }
6710
6711 self.dgram_send_queue.push(buf)?;
6712
6713 let active_path = self.paths.get_active_mut()?;
6714
6715 if self.dgram_send_queue.byte_size() >
6716 active_path.recovery.cwnd_available()
6717 {
6718 active_path.recovery.update_app_limited(false);
6719 }
6720
6721 Ok(())
6722 }
6723
6724 /// Purges queued outgoing DATAGRAMs matching the predicate.
6725 ///
6726 /// In other words, remove all elements `e` such that `f(&e)` returns true.
6727 ///
6728 /// ## Examples:
6729 /// ```no_run
6730 /// # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
6731 /// # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
6732 /// # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
6733 /// # let peer = "127.0.0.1:1234".parse().unwrap();
6734 /// # let local = socket.local_addr().unwrap();
6735 /// # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
6736 /// conn.dgram_send(b"hello")?;
6737 /// conn.dgram_purge_outgoing(&|d: &[u8]| -> bool { d[0] == 0 });
6738 /// # Ok::<(), quiche::Error>(())
6739 /// ```
6740 #[inline]
6741 pub fn dgram_purge_outgoing<FN: Fn(&[u8]) -> bool>(&mut self, f: FN) {
6742 self.dgram_send_queue.purge(f);
6743 }
6744
6745 /// Returns the maximum DATAGRAM payload that can be sent.
6746 ///
6747 /// [`None`] is returned if the peer hasn't advertised a maximum DATAGRAM
6748 /// frame size.
6749 ///
6750 /// ## Examples:
6751 ///
6752 /// ```no_run
6753 /// # let mut buf = [0; 512];
6754 /// # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
6755 /// # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
6756 /// # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
6757 /// # let peer = "127.0.0.1:1234".parse().unwrap();
6758 /// # let local = socket.local_addr().unwrap();
6759 /// # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
6760 /// if let Some(payload_size) = conn.dgram_max_writable_len() {
6761 /// if payload_size > 5 {
6762 /// conn.dgram_send(b"hello")?;
6763 /// }
6764 /// }
6765 /// # Ok::<(), quiche::Error>(())
6766 /// ```
6767 #[inline]
6768 pub fn dgram_max_writable_len(&self) -> Option<usize> {
6769 match self.peer_transport_params.max_datagram_frame_size {
6770 None => None,
6771 Some(peer_frame_len) => {
6772 let dcid = self.destination_id();
6773 // Start from the maximum packet size...
6774 let mut max_len = self.max_send_udp_payload_size();
6775 // ...subtract the Short packet header overhead...
6776 // (1 byte of pkt_len + len of dcid)
6777 max_len = max_len.saturating_sub(1 + dcid.len());
6778 // ...subtract the packet number (max len)...
6779 max_len = max_len.saturating_sub(packet::MAX_PKT_NUM_LEN);
6780 // ...subtract the crypto overhead...
6781 max_len = max_len.saturating_sub(
6782 self.crypto_ctx[packet::Epoch::Application]
6783 .crypto_overhead()?,
6784 );
6785 // ...clamp to what peer can support...
6786 max_len = cmp::min(peer_frame_len as usize, max_len);
6787 // ...subtract frame overhead, checked for underflow.
6788 // (1 byte of frame type + len of length )
6789 max_len.checked_sub(1 + frame::MAX_DGRAM_OVERHEAD)
6790 },
6791 }
6792 }
6793
6794 fn dgram_enabled(&self) -> bool {
6795 self.local_transport_params
6796 .max_datagram_frame_size
6797 .is_some()
6798 }
6799
6800 /// Returns when the next timeout event will occur.
6801 ///
6802 /// Once the timeout Instant has been reached, the [`on_timeout()`] method
6803 /// should be called. A timeout of `None` means that the timer should be
6804 /// disarmed.
6805 ///
6806 /// [`on_timeout()`]: struct.Connection.html#method.on_timeout
6807 pub fn timeout_instant(&self) -> Option<Instant> {
6808 if self.is_closed() {
6809 return None;
6810 }
6811
6812 if self.is_draining() {
6813 // Draining timer takes precedence over all other timers. If it is
6814 // set it means the connection is closing so there's no point in
6815 // processing the other timers.
6816 self.draining_timer
6817 } else {
6818 // Use the lowest timer value (i.e. "sooner") among idle and loss
6819 // detection timers. If they are both unset (i.e. `None`) then the
6820 // result is `None`, but if at least one of them is set then a
6821 // `Some(...)` value is returned.
6822 let path_timer = self
6823 .paths
6824 .iter()
6825 .filter_map(|(_, p)| p.recovery.loss_detection_timer())
6826 .min();
6827
6828 let key_update_timer = self.crypto_ctx[packet::Epoch::Application]
6829 .key_update
6830 .as_ref()
6831 .map(|key_update| key_update.timer);
6832
6833 let timers = [self.idle_timer, path_timer, key_update_timer];
6834
6835 timers.iter().filter_map(|&x| x).min()
6836 }
6837 }
6838
6839 /// Returns the amount of time until the next timeout event.
6840 ///
6841 /// Once the given duration has elapsed, the [`on_timeout()`] method should
6842 /// be called. A timeout of `None` means that the timer should be disarmed.
6843 ///
6844 /// [`on_timeout()`]: struct.Connection.html#method.on_timeout
6845 pub fn timeout(&self) -> Option<Duration> {
6846 self.timeout_instant().map(|timeout| {
6847 let now = Instant::now();
6848
6849 if timeout <= now {
6850 Duration::ZERO
6851 } else {
6852 timeout.duration_since(now)
6853 }
6854 })
6855 }
6856
6857 /// Processes a timeout event.
6858 ///
6859 /// If no timeout has occurred it does nothing.
6860 pub fn on_timeout(&mut self) {
6861 let now = Instant::now();
6862
6863 if let Some(draining_timer) = self.draining_timer {
6864 if draining_timer <= now {
6865 trace!("{} draining timeout expired", self.trace_id);
6866
6867 self.mark_closed();
6868 }
6869
6870 // Draining timer takes precedence over all other timers. If it is
6871 // set it means the connection is closing so there's no point in
6872 // processing the other timers.
6873 return;
6874 }
6875
6876 if let Some(timer) = self.idle_timer {
6877 if timer <= now {
6878 trace!("{} idle timeout expired", self.trace_id);
6879
6880 self.mark_closed();
6881 self.timed_out = true;
6882 return;
6883 }
6884 }
6885
6886 if let Some(timer) = self.crypto_ctx[packet::Epoch::Application]
6887 .key_update
6888 .as_ref()
6889 .map(|key_update| key_update.timer)
6890 {
6891 if timer <= now {
6892 // Discard previous key once key update timer expired.
6893 let _ = self.crypto_ctx[packet::Epoch::Application]
6894 .key_update
6895 .take();
6896 }
6897 }
6898
6899 let handshake_status = self.handshake_status();
6900
6901 for (_, p) in self.paths.iter_mut() {
6902 if let Some(timer) = p.recovery.loss_detection_timer() {
6903 if timer <= now {
6904 trace!("{} loss detection timeout expired", self.trace_id);
6905
6906 let OnLossDetectionTimeoutOutcome {
6907 lost_packets,
6908 lost_bytes,
6909 } = p.on_loss_detection_timeout(
6910 handshake_status,
6911 now,
6912 self.is_server,
6913 &self.trace_id,
6914 );
6915
6916 self.lost_count += lost_packets;
6917 self.lost_bytes += lost_bytes as u64;
6918
6919 qlog_with_type!(QLOG_METRICS, self.qlog, q, {
6920 p.recovery.maybe_qlog(q, now);
6921 });
6922 }
6923 }
6924 }
6925
6926 // Notify timeout events to the application.
6927 self.paths.notify_failed_validations();
6928
6929 // If the active path failed, try to find a new candidate.
6930 if self.paths.get_active_path_id().is_err() {
6931 match self.paths.find_candidate_path() {
6932 Some(pid) => {
6933 if self.set_active_path(pid, now).is_err() {
6934 // The connection cannot continue.
6935 self.mark_closed();
6936 }
6937 },
6938
6939 // The connection cannot continue.
6940 None => {
6941 self.mark_closed();
6942 },
6943 }
6944 }
6945 }
6946
6947 /// Requests the stack to perform path validation of the proposed 4-tuple.
6948 ///
6949 /// Probing new paths requires spare Connection IDs at both the host and the
6950 /// peer sides. If it is not the case, it raises an [`OutOfIdentifiers`].
6951 ///
6952 /// The probing of new addresses can only be done by the client. The server
6953 /// can only probe network paths that were previously advertised by
6954 /// [`PathEvent::New`]. If the server tries to probe such an unseen network
6955 /// path, this call raises an [`InvalidState`].
6956 ///
6957 /// The caller might also want to probe an existing path. In such case, it
6958 /// triggers a PATH_CHALLENGE frame, but it does not require spare CIDs.
6959 ///
6960 /// A server always probes a new path it observes. Calling this method is
6961 /// hence not required to validate a new path. However, a server can still
6962 /// request an additional path validation of the proposed 4-tuple.
6963 ///
6964 /// Calling this method several times before calling [`send()`] or
6965 /// [`send_on_path()`] results in a single probe being generated. An
6966 /// application wanting to send multiple in-flight probes must call this
6967 /// method again after having sent packets.
6968 ///
6969 /// Returns the Destination Connection ID sequence number associated to that
6970 /// path.
6971 ///
6972 /// [`PathEvent::New`]: enum.PathEvent.html#variant.New
6973 /// [`OutOfIdentifiers`]: enum.Error.html#OutOfIdentifiers
6974 /// [`InvalidState`]: enum.Error.html#InvalidState
6975 /// [`send()`]: struct.Connection.html#method.send
6976 /// [`send_on_path()`]: struct.Connection.html#method.send_on_path
6977 pub fn probe_path(
6978 &mut self, local_addr: SocketAddr, peer_addr: SocketAddr,
6979 ) -> Result<u64> {
6980 // We may want to probe an existing path.
6981 let pid = match self.paths.path_id_from_addrs(&(local_addr, peer_addr)) {
6982 Some(pid) => pid,
6983 None => self.create_path_on_client(local_addr, peer_addr)?,
6984 };
6985
6986 let path = self.paths.get_mut(pid)?;
6987 path.request_validation();
6988
6989 path.active_dcid_seq.ok_or(Error::InvalidState)
6990 }
6991
6992 /// Migrates the connection to a new local address `local_addr`.
6993 ///
6994 /// The behavior is similar to [`migrate()`], with the nuance that the
6995 /// connection only changes the local address, but not the peer one.
6996 ///
6997 /// See [`migrate()`] for the full specification of this method.
6998 ///
6999 /// [`migrate()`]: struct.Connection.html#method.migrate
7000 pub fn migrate_source(&mut self, local_addr: SocketAddr) -> Result<u64> {
7001 let peer_addr = self.paths.get_active()?.peer_addr();
7002 self.migrate(local_addr, peer_addr)
7003 }
7004
7005 /// Migrates the connection over the given network path between `local_addr`
7006 /// and `peer_addr`.
7007 ///
7008 /// Connection migration can only be initiated by the client. Calling this
7009 /// method as a server returns [`InvalidState`].
7010 ///
7011 /// To initiate voluntary migration, there should be enough Connection IDs
7012 /// at both sides. If this requirement is not satisfied, this call returns
7013 /// [`OutOfIdentifiers`].
7014 ///
7015 /// Returns the Destination Connection ID associated to that migrated path.
7016 ///
7017 /// [`OutOfIdentifiers`]: enum.Error.html#OutOfIdentifiers
7018 /// [`InvalidState`]: enum.Error.html#InvalidState
7019 pub fn migrate(
7020 &mut self, local_addr: SocketAddr, peer_addr: SocketAddr,
7021 ) -> Result<u64> {
7022 if self.is_server {
7023 return Err(Error::InvalidState);
7024 }
7025
7026 // If the path already exists, mark it as the active one.
7027 let (pid, dcid_seq) = if let Some(pid) =
7028 self.paths.path_id_from_addrs(&(local_addr, peer_addr))
7029 {
7030 let path = self.paths.get_mut(pid)?;
7031
7032 // If it is already active, do nothing.
7033 if path.active() {
7034 return path.active_dcid_seq.ok_or(Error::OutOfIdentifiers);
7035 }
7036
7037 // Ensures that a Source Connection ID has been dedicated to this
7038 // path, or a free one is available. This is only required if the
7039 // host uses non-zero length Source Connection IDs.
7040 if !self.ids.zero_length_scid() &&
7041 path.active_scid_seq.is_none() &&
7042 self.ids.available_scids() == 0
7043 {
7044 return Err(Error::OutOfIdentifiers);
7045 }
7046
7047 // Ensures that the migrated path has a Destination Connection ID.
7048 let dcid_seq = if let Some(dcid_seq) = path.active_dcid_seq {
7049 dcid_seq
7050 } else {
7051 let dcid_seq = self
7052 .ids
7053 .lowest_available_dcid_seq()
7054 .ok_or(Error::OutOfIdentifiers)?;
7055
7056 self.ids.link_dcid_to_path_id(dcid_seq, pid)?;
7057 path.active_dcid_seq = Some(dcid_seq);
7058
7059 dcid_seq
7060 };
7061
7062 (pid, dcid_seq)
7063 } else {
7064 let pid = self.create_path_on_client(local_addr, peer_addr)?;
7065
7066 let dcid_seq = self
7067 .paths
7068 .get(pid)?
7069 .active_dcid_seq
7070 .ok_or(Error::InvalidState)?;
7071
7072 (pid, dcid_seq)
7073 };
7074
7075 // Change the active path.
7076 self.set_active_path(pid, Instant::now())?;
7077
7078 Ok(dcid_seq)
7079 }
7080
7081 /// Provides additional source Connection IDs that the peer can use to reach
7082 /// this host.
7083 ///
7084 /// This triggers sending NEW_CONNECTION_ID frames if the provided Source
7085 /// Connection ID is not already present. In the case the caller tries to
7086 /// reuse a Connection ID with a different reset token, this raises an
7087 /// `InvalidState`.
7088 ///
7089 /// At any time, the peer cannot have more Destination Connection IDs than
7090 /// the maximum number of active Connection IDs it negotiated. In such case
7091 /// (i.e., when [`scids_left()`] returns 0), if the host agrees to
7092 /// request the removal of previous connection IDs, it sets the
7093 /// `retire_if_needed` parameter. Otherwise, an [`IdLimit`] is returned.
7094 ///
7095 /// Note that setting `retire_if_needed` does not prevent this function from
7096 /// returning an [`IdLimit`] in the case the caller wants to retire still
7097 /// unannounced Connection IDs.
7098 ///
7099 /// The caller is responsible for ensuring that the provided `scid` is not
7100 /// repeated several times over the connection. quiche ensures that as long
7101 /// as the provided Connection ID is still in use (i.e., not retired), it
7102 /// does not assign a different sequence number.
7103 ///
7104 /// Note that if the host uses zero-length Source Connection IDs, it cannot
7105 /// advertise Source Connection IDs and calling this method returns an
7106 /// [`InvalidState`].
7107 ///
7108 /// Returns the sequence number associated to the provided Connection ID.
7109 ///
7110 /// [`scids_left()`]: struct.Connection.html#method.scids_left
7111 /// [`IdLimit`]: enum.Error.html#IdLimit
7112 /// [`InvalidState`]: enum.Error.html#InvalidState
7113 pub fn new_scid(
7114 &mut self, scid: &ConnectionId, reset_token: u128, retire_if_needed: bool,
7115 ) -> Result<u64> {
7116 self.ids.new_scid(
7117 scid.to_vec().into(),
7118 Some(reset_token),
7119 true,
7120 None,
7121 retire_if_needed,
7122 )
7123 }
7124
7125 /// Returns the number of source Connection IDs that are active. This is
7126 /// only meaningful if the host uses non-zero length Source Connection IDs.
7127 pub fn active_scids(&self) -> usize {
7128 self.ids.active_source_cids()
7129 }
7130
7131 /// Returns the number of source Connection IDs that should be provided
7132 /// to the peer without exceeding the limit it advertised.
7133 ///
7134 /// This will automatically limit the number of Connection IDs to the
7135 /// minimum between the locally configured active connection ID limit,
7136 /// and the one sent by the peer.
7137 ///
7138 /// To obtain the maximum possible value allowed by the peer an application
7139 /// can instead inspect the [`peer_active_conn_id_limit`] value.
7140 ///
7141 /// [`peer_active_conn_id_limit`]: struct.Stats.html#structfield.peer_active_conn_id_limit
7142 #[inline]
7143 pub fn scids_left(&self) -> usize {
7144 let max_active_source_cids = cmp::min(
7145 self.peer_transport_params.active_conn_id_limit,
7146 self.local_transport_params.active_conn_id_limit,
7147 ) as usize;
7148
7149 max_active_source_cids - self.active_scids()
7150 }
7151
7152 /// Requests the retirement of the destination Connection ID used by the
7153 /// host to reach its peer.
7154 ///
7155 /// This triggers sending RETIRE_CONNECTION_ID frames.
7156 ///
7157 /// If the application tries to retire a non-existing Destination Connection
7158 /// ID sequence number, or if it uses zero-length Destination Connection ID,
7159 /// this method returns an [`InvalidState`].
7160 ///
7161 /// At any time, the host must have at least one Destination ID. If the
7162 /// application tries to retire the last one, or if the caller tries to
7163 /// retire the destination Connection ID used by the current active path
7164 /// while having neither spare Destination Connection IDs nor validated
7165 /// network paths, this method returns an [`OutOfIdentifiers`]. This
7166 /// behavior prevents the caller from stalling the connection due to the
7167 /// lack of validated path to send non-probing packets.
7168 ///
7169 /// [`InvalidState`]: enum.Error.html#InvalidState
7170 /// [`OutOfIdentifiers`]: enum.Error.html#OutOfIdentifiers
7171 pub fn retire_dcid(&mut self, dcid_seq: u64) -> Result<()> {
7172 if self.ids.zero_length_dcid() {
7173 return Err(Error::InvalidState);
7174 }
7175
7176 let active_path_dcid_seq = self
7177 .paths
7178 .get_active()?
7179 .active_dcid_seq
7180 .ok_or(Error::InvalidState)?;
7181
7182 let active_path_id = self.paths.get_active_path_id()?;
7183
7184 if active_path_dcid_seq == dcid_seq &&
7185 self.ids.lowest_available_dcid_seq().is_none() &&
7186 !self
7187 .paths
7188 .iter()
7189 .any(|(pid, p)| pid != active_path_id && p.usable())
7190 {
7191 return Err(Error::OutOfIdentifiers);
7192 }
7193
7194 if let Some(pid) = self.ids.retire_dcid(dcid_seq)? {
7195 // The retired Destination CID was associated to a given path. Let's
7196 // find an available DCID to associate to that path.
7197 let path = self.paths.get_mut(pid)?;
7198 let dcid_seq = self.ids.lowest_available_dcid_seq();
7199
7200 if let Some(dcid_seq) = dcid_seq {
7201 self.ids.link_dcid_to_path_id(dcid_seq, pid)?;
7202 }
7203
7204 path.active_dcid_seq = dcid_seq;
7205 }
7206
7207 Ok(())
7208 }
7209
7210 /// Processes path-specific events.
7211 ///
7212 /// On success it returns a [`PathEvent`], or `None` when there are no
7213 /// events to report. Please refer to [`PathEvent`] for the exhaustive event
7214 /// list.
7215 ///
7216 /// Note that all events are edge-triggered, meaning that once reported they
7217 /// will not be reported again by calling this method again, until the event
7218 /// is re-armed.
7219 ///
7220 /// [`PathEvent`]: enum.PathEvent.html
7221 pub fn path_event_next(&mut self) -> Option<PathEvent> {
7222 self.paths.pop_event()
7223 }
7224
7225 /// Returns the number of source Connection IDs that are retired.
7226 pub fn retired_scids(&self) -> usize {
7227 self.ids.retired_source_cids()
7228 }
7229
7230 /// Returns a source `ConnectionId` that has been retired.
7231 ///
7232 /// On success it returns a [`ConnectionId`], or `None` when there are no
7233 /// more retired connection IDs.
7234 ///
7235 /// [`ConnectionId`]: struct.ConnectionId.html
7236 pub fn retired_scid_next(&mut self) -> Option<ConnectionId<'static>> {
7237 self.ids.pop_retired_scid()
7238 }
7239
7240 /// Returns the number of spare Destination Connection IDs, i.e.,
7241 /// Destination Connection IDs that are still unused.
7242 ///
7243 /// Note that this function returns 0 if the host uses zero length
7244 /// Destination Connection IDs.
7245 pub fn available_dcids(&self) -> usize {
7246 self.ids.available_dcids()
7247 }
7248
7249 /// Returns an iterator over destination `SockAddr`s whose association
7250 /// with `from` forms a known QUIC path on which packets can be sent to.
7251 ///
7252 /// This function is typically used in combination with [`send_on_path()`].
7253 ///
7254 /// Note that the iterator includes all the possible combination of
7255 /// destination `SockAddr`s, even those whose sending is not required now.
7256 /// In other words, this is another way for the application to recall from
7257 /// past [`PathEvent::New`] events.
7258 ///
7259 /// [`PathEvent::New`]: enum.PathEvent.html#variant.New
7260 /// [`send_on_path()`]: struct.Connection.html#method.send_on_path
7261 ///
7262 /// ## Examples:
7263 ///
7264 /// ```no_run
7265 /// # let mut out = [0; 512];
7266 /// # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
7267 /// # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
7268 /// # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
7269 /// # let local = socket.local_addr().unwrap();
7270 /// # let peer = "127.0.0.1:1234".parse().unwrap();
7271 /// # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
7272 /// // Iterate over possible destinations for the given local `SockAddr`.
7273 /// for dest in conn.paths_iter(local) {
7274 /// loop {
7275 /// let (write, send_info) =
7276 /// match conn.send_on_path(&mut out, Some(local), Some(dest)) {
7277 /// Ok(v) => v,
7278 ///
7279 /// Err(quiche::Error::Done) => {
7280 /// // Done writing for this destination.
7281 /// break;
7282 /// },
7283 ///
7284 /// Err(e) => {
7285 /// // An error occurred, handle it.
7286 /// break;
7287 /// },
7288 /// };
7289 ///
7290 /// socket.send_to(&out[..write], &send_info.to).unwrap();
7291 /// }
7292 /// }
7293 /// # Ok::<(), quiche::Error>(())
7294 /// ```
7295 #[inline]
7296 pub fn paths_iter(&self, from: SocketAddr) -> SocketAddrIter {
7297 // Instead of trying to identify whether packets will be sent on the
7298 // given 4-tuple, simply filter paths that cannot be used.
7299 SocketAddrIter {
7300 sockaddrs: self
7301 .paths
7302 .iter()
7303 .filter(|(_, p)| p.active_dcid_seq.is_some())
7304 .filter(|(_, p)| p.usable() || p.probing_required())
7305 .filter(|(_, p)| p.local_addr() == from)
7306 .map(|(_, p)| p.peer_addr())
7307 .collect(),
7308
7309 index: 0,
7310 }
7311 }
7312
7313 /// Closes the connection with the given error and reason.
7314 ///
7315 /// The `app` parameter specifies whether an application close should be
7316 /// sent to the peer. Otherwise a normal connection close is sent.
7317 ///
7318 /// If `app` is true but the connection is not in a state that is safe to
7319 /// send an application error (not established nor in early data), in
7320 /// accordance with [RFC
7321 /// 9000](https://www.rfc-editor.org/rfc/rfc9000.html#section-10.2.3-3), the
7322 /// error code is changed to APPLICATION_ERROR and the reason phrase is
7323 /// cleared.
7324 ///
7325 /// Returns [`Done`] if the connection had already been closed.
7326 ///
7327 /// Note that the connection will not be closed immediately. An application
7328 /// should continue calling the [`recv()`], [`send()`], [`timeout()`] and
7329 /// [`on_timeout()`] methods as normal, until the [`is_closed()`] method
7330 /// returns `true`.
7331 ///
7332 /// [`Done`]: enum.Error.html#variant.Done
7333 /// [`recv()`]: struct.Connection.html#method.recv
7334 /// [`send()`]: struct.Connection.html#method.send
7335 /// [`timeout()`]: struct.Connection.html#method.timeout
7336 /// [`on_timeout()`]: struct.Connection.html#method.on_timeout
7337 /// [`is_closed()`]: struct.Connection.html#method.is_closed
7338 pub fn close(&mut self, app: bool, err: u64, reason: &[u8]) -> Result<()> {
7339 if self.is_closed() || self.is_draining() {
7340 return Err(Error::Done);
7341 }
7342
7343 if self.local_error.is_some() {
7344 return Err(Error::Done);
7345 }
7346
7347 let is_safe_to_send_app_data =
7348 self.is_established() || self.is_in_early_data();
7349
7350 if app && !is_safe_to_send_app_data {
7351 // Clear error information.
7352 self.local_error = Some(ConnectionError {
7353 is_app: false,
7354 error_code: 0x0c,
7355 reason: vec![],
7356 });
7357 } else {
7358 self.local_error = Some(ConnectionError {
7359 is_app: app,
7360 error_code: err,
7361 reason: reason.to_vec(),
7362 });
7363 }
7364
7365 // When no packet was successfully processed close connection immediately.
7366 if self.recv_count == 0 {
7367 self.mark_closed();
7368 }
7369
7370 Ok(())
7371 }
7372
7373 /// Returns a string uniquely representing the connection.
7374 ///
7375 /// This can be used for logging purposes to differentiate between multiple
7376 /// connections.
7377 #[inline]
7378 pub fn trace_id(&self) -> &str {
7379 &self.trace_id
7380 }
7381
7382 /// Returns the negotiated ALPN protocol.
7383 ///
7384 /// If no protocol has been negotiated, the returned value is empty.
7385 #[inline]
7386 pub fn application_proto(&self) -> &[u8] {
7387 self.alpn.as_ref()
7388 }
7389
7390 /// Returns the server name requested by the client.
7391 #[inline]
7392 pub fn server_name(&self) -> Option<&str> {
7393 self.handshake.server_name()
7394 }
7395
7396 /// Returns the peer's leaf certificate (if any) as a DER-encoded buffer.
7397 #[inline]
7398 pub fn peer_cert(&self) -> Option<&[u8]> {
7399 self.handshake.peer_cert()
7400 }
7401
7402 /// Returns the peer's certificate chain (if any) as a vector of DER-encoded
7403 /// buffers.
7404 ///
7405 /// The certificate at index 0 is the peer's leaf certificate, the other
7406 /// certificates (if any) are the chain certificate authorities used to
7407 /// sign the leaf certificate.
7408 #[inline]
7409 pub fn peer_cert_chain(&self) -> Option<Vec<&[u8]>> {
7410 self.handshake.peer_cert_chain()
7411 }
7412
7413 /// Returns the serialized cryptographic session for the connection.
7414 ///
7415 /// This can be used by a client to cache a connection's session, and resume
7416 /// it later using the [`set_session()`] method.
7417 ///
7418 /// [`set_session()`]: struct.Connection.html#method.set_session
7419 #[inline]
7420 pub fn session(&self) -> Option<&[u8]> {
7421 self.session.as_deref()
7422 }
7423
7424 /// Returns the source connection ID.
7425 ///
7426 /// When there are multiple IDs, and if there is an active path, the ID used
7427 /// on that path is returned. Otherwise the oldest ID is returned.
7428 ///
7429 /// Note that the value returned can change throughout the connection's
7430 /// lifetime.
7431 #[inline]
7432 pub fn source_id(&self) -> ConnectionId<'_> {
7433 if let Ok(path) = self.paths.get_active() {
7434 if let Some(active_scid_seq) = path.active_scid_seq {
7435 if let Ok(e) = self.ids.get_scid(active_scid_seq) {
7436 return ConnectionId::from_ref(e.cid.as_ref());
7437 }
7438 }
7439 }
7440
7441 let e = self.ids.oldest_scid();
7442 ConnectionId::from_ref(e.cid.as_ref())
7443 }
7444
7445 /// Returns all active source connection IDs.
7446 ///
7447 /// An iterator is returned for all active IDs (i.e. ones that have not
7448 /// been explicitly retired yet).
7449 #[inline]
7450 pub fn source_ids(&self) -> impl Iterator<Item = &ConnectionId<'_>> {
7451 self.ids.scids_iter()
7452 }
7453
7454 /// Returns the destination connection ID.
7455 ///
7456 /// Note that the value returned can change throughout the connection's
7457 /// lifetime.
7458 #[inline]
7459 pub fn destination_id(&self) -> ConnectionId<'_> {
7460 if let Ok(path) = self.paths.get_active() {
7461 if let Some(active_dcid_seq) = path.active_dcid_seq {
7462 if let Ok(e) = self.ids.get_dcid(active_dcid_seq) {
7463 return ConnectionId::from_ref(e.cid.as_ref());
7464 }
7465 }
7466 }
7467
7468 let e = self.ids.oldest_dcid();
7469 ConnectionId::from_ref(e.cid.as_ref())
7470 }
7471
7472 /// Returns the PMTU for the active path if it exists.
7473 ///
7474 /// This requires no additonal packets to be sent but simply checks if PMTUD
7475 /// has completed and has found a valid PMTU.
7476 #[inline]
7477 pub fn pmtu(&self) -> Option<usize> {
7478 if let Ok(path) = self.paths.get_active() {
7479 path.pmtud.as_ref().and_then(|pmtud| pmtud.get_pmtu())
7480 } else {
7481 None
7482 }
7483 }
7484
7485 /// Revalidates the PMTU for the active path by sending a new probe packet
7486 /// of PMTU size. If the probe is dropped PMTUD will restart and find a new
7487 /// valid PMTU.
7488 #[inline]
7489 pub fn revalidate_pmtu(&mut self) {
7490 if let Ok(active_path) = self.paths.get_active_mut() {
7491 if let Some(pmtud) = active_path.pmtud.as_mut() {
7492 pmtud.revalidate_pmtu();
7493 }
7494 }
7495 }
7496
7497 /// Returns true if the connection handshake is complete.
7498 #[inline]
7499 pub fn is_established(&self) -> bool {
7500 self.handshake_completed
7501 }
7502
7503 /// Returns true if the connection is resumed.
7504 #[inline]
7505 pub fn is_resumed(&self) -> bool {
7506 self.handshake.is_resumed()
7507 }
7508
7509 /// Returns true if the connection has a pending handshake that has
7510 /// progressed enough to send or receive early data.
7511 #[inline]
7512 pub fn is_in_early_data(&self) -> bool {
7513 self.handshake.is_in_early_data()
7514 }
7515
7516 /// Returns the early data reason for the connection.
7517 ///
7518 /// This status can be useful for logging and debugging. See [BoringSSL]
7519 /// documentation for a definition of the reasons.
7520 ///
7521 /// [BoringSSL]: https://commondatastorage.googleapis.com/chromium-boringssl-docs/ssl.h.html#ssl_early_data_reason_t
7522 #[inline]
7523 pub fn early_data_reason(&self) -> u32 {
7524 self.handshake.early_data_reason()
7525 }
7526
7527 /// Returns whether there is stream or DATAGRAM data available to read.
7528 #[inline]
7529 pub fn is_readable(&self) -> bool {
7530 self.streams.has_readable() || self.dgram_recv_front_len().is_some()
7531 }
7532
7533 /// Returns whether the network path with local address `from` and remote
7534 /// address `peer` has been validated.
7535 ///
7536 /// If the 4-tuple does not exist over the connection, returns an
7537 /// [`InvalidState`].
7538 ///
7539 /// [`InvalidState`]: enum.Error.html#variant.InvalidState
7540 pub fn is_path_validated(
7541 &self, from: SocketAddr, to: SocketAddr,
7542 ) -> Result<bool> {
7543 let pid = self
7544 .paths
7545 .path_id_from_addrs(&(from, to))
7546 .ok_or(Error::InvalidState)?;
7547
7548 Ok(self.paths.get(pid)?.validated())
7549 }
7550
7551 /// Returns true if the connection is draining.
7552 ///
7553 /// If this returns `true`, the connection object cannot yet be dropped, but
7554 /// no new application data can be sent or received. An application should
7555 /// continue calling the [`recv()`], [`timeout()`], and [`on_timeout()`]
7556 /// methods as normal, until the [`is_closed()`] method returns `true`.
7557 ///
7558 /// In contrast, once `is_draining()` returns `true`, calling [`send()`]
7559 /// is not required because no new outgoing packets will be generated.
7560 ///
7561 /// [`recv()`]: struct.Connection.html#method.recv
7562 /// [`send()`]: struct.Connection.html#method.send
7563 /// [`timeout()`]: struct.Connection.html#method.timeout
7564 /// [`on_timeout()`]: struct.Connection.html#method.on_timeout
7565 /// [`is_closed()`]: struct.Connection.html#method.is_closed
7566 #[inline]
7567 pub fn is_draining(&self) -> bool {
7568 self.draining_timer.is_some()
7569 }
7570
7571 /// Returns true if the connection is closed.
7572 ///
7573 /// If this returns true, the connection object can be dropped.
7574 #[inline]
7575 pub fn is_closed(&self) -> bool {
7576 self.closed
7577 }
7578
7579 /// Returns true if the connection was closed due to the idle timeout.
7580 #[inline]
7581 pub fn is_timed_out(&self) -> bool {
7582 self.timed_out
7583 }
7584
7585 /// Returns the error received from the peer, if any.
7586 ///
7587 /// Note that a `Some` return value does not necessarily imply
7588 /// [`is_closed()`] or any other connection state.
7589 ///
7590 /// [`is_closed()`]: struct.Connection.html#method.is_closed
7591 #[inline]
7592 pub fn peer_error(&self) -> Option<&ConnectionError> {
7593 self.peer_error.as_ref()
7594 }
7595
7596 /// Returns the error [`close()`] was called with, or internally
7597 /// created quiche errors, if any.
7598 ///
7599 /// Note that a `Some` return value does not necessarily imply
7600 /// [`is_closed()`] or any other connection state.
7601 /// `Some` also does not guarantee that the error has been sent to
7602 /// or received by the peer.
7603 ///
7604 /// [`close()`]: struct.Connection.html#method.close
7605 /// [`is_closed()`]: struct.Connection.html#method.is_closed
7606 #[inline]
7607 pub fn local_error(&self) -> Option<&ConnectionError> {
7608 self.local_error.as_ref()
7609 }
7610
7611 /// Collects and returns statistics about the connection.
7612 #[inline]
7613 pub fn stats(&self) -> Stats {
7614 Stats {
7615 recv: self.recv_count,
7616 sent: self.sent_count,
7617 lost: self.lost_count,
7618 spurious_lost: self.spurious_lost_count,
7619 retrans: self.retrans_count,
7620 sent_bytes: self.sent_bytes,
7621 recv_bytes: self.recv_bytes,
7622 acked_bytes: self.acked_bytes,
7623 lost_bytes: self.lost_bytes,
7624 stream_retrans_bytes: self.stream_retrans_bytes,
7625 dgram_recv: self.dgram_recv_count,
7626 dgram_sent: self.dgram_sent_count,
7627 paths_count: self.paths.len(),
7628 reset_stream_count_local: self.reset_stream_local_count,
7629 stopped_stream_count_local: self.stopped_stream_local_count,
7630 reset_stream_count_remote: self.reset_stream_remote_count,
7631 stopped_stream_count_remote: self.stopped_stream_remote_count,
7632 data_blocked_sent_count: self.data_blocked_sent_count,
7633 stream_data_blocked_sent_count: self.stream_data_blocked_sent_count,
7634 data_blocked_recv_count: self.data_blocked_recv_count,
7635 stream_data_blocked_recv_count: self.stream_data_blocked_recv_count,
7636 streams_blocked_bidi_recv_count: self.streams_blocked_bidi_recv_count,
7637 streams_blocked_uni_recv_count: self.streams_blocked_uni_recv_count,
7638 path_challenge_rx_count: self.path_challenge_rx_count,
7639 bytes_in_flight_duration: self.bytes_in_flight_duration(),
7640 tx_buffered_state: self.tx_buffered_state,
7641 }
7642 }
7643
7644 /// Returns the sum of the durations when each path in the
7645 /// connection was actively sending bytes or waiting for acks.
7646 /// Note that this could result in a duration that is longer than
7647 /// the actual connection duration in cases where multiple paths
7648 /// are active for extended periods of time. In practice only 1
7649 /// path is typically active at a time.
7650 /// TODO revisit computation if in the future multiple paths are
7651 /// often active at the same time.
7652 fn bytes_in_flight_duration(&self) -> Duration {
7653 self.paths.iter().fold(Duration::ZERO, |acc, (_, path)| {
7654 acc + path.bytes_in_flight_duration()
7655 })
7656 }
7657
7658 /// Returns reference to peer's transport parameters. Returns `None` if we
7659 /// have not yet processed the peer's transport parameters.
7660 pub fn peer_transport_params(&self) -> Option<&TransportParams> {
7661 if !self.parsed_peer_transport_params {
7662 return None;
7663 }
7664
7665 Some(&self.peer_transport_params)
7666 }
7667
7668 /// Collects and returns statistics about each known path for the
7669 /// connection.
7670 pub fn path_stats(&self) -> impl Iterator<Item = PathStats> + '_ {
7671 self.paths.iter().map(|(_, p)| p.stats())
7672 }
7673
7674 /// Returns whether or not this is a server-side connection.
7675 pub fn is_server(&self) -> bool {
7676 self.is_server
7677 }
7678
7679 fn encode_transport_params(&mut self) -> Result<()> {
7680 self.handshake.set_quic_transport_params(
7681 &self.local_transport_params,
7682 self.is_server,
7683 )
7684 }
7685
7686 fn parse_peer_transport_params(
7687 &mut self, peer_params: TransportParams,
7688 ) -> Result<()> {
7689 // Validate initial_source_connection_id.
7690 match &peer_params.initial_source_connection_id {
7691 Some(v) if v != &self.destination_id() =>
7692 return Err(Error::InvalidTransportParam),
7693
7694 Some(_) => (),
7695
7696 // initial_source_connection_id must be sent by
7697 // both endpoints.
7698 None => return Err(Error::InvalidTransportParam),
7699 }
7700
7701 // Validate original_destination_connection_id.
7702 if let Some(odcid) = &self.odcid {
7703 match &peer_params.original_destination_connection_id {
7704 Some(v) if v != odcid =>
7705 return Err(Error::InvalidTransportParam),
7706
7707 Some(_) => (),
7708
7709 // original_destination_connection_id must be
7710 // sent by the server.
7711 None if !self.is_server =>
7712 return Err(Error::InvalidTransportParam),
7713
7714 None => (),
7715 }
7716 }
7717
7718 // Validate retry_source_connection_id.
7719 if let Some(rscid) = &self.rscid {
7720 match &peer_params.retry_source_connection_id {
7721 Some(v) if v != rscid =>
7722 return Err(Error::InvalidTransportParam),
7723
7724 Some(_) => (),
7725
7726 // retry_source_connection_id must be sent by
7727 // the server.
7728 None => return Err(Error::InvalidTransportParam),
7729 }
7730 }
7731
7732 self.process_peer_transport_params(peer_params)?;
7733
7734 self.parsed_peer_transport_params = true;
7735
7736 Ok(())
7737 }
7738
7739 fn process_peer_transport_params(
7740 &mut self, peer_params: TransportParams,
7741 ) -> Result<()> {
7742 self.max_tx_data = peer_params.initial_max_data;
7743
7744 // Update send capacity.
7745 self.update_tx_cap();
7746
7747 self.streams
7748 .update_peer_max_streams_bidi(peer_params.initial_max_streams_bidi);
7749 self.streams
7750 .update_peer_max_streams_uni(peer_params.initial_max_streams_uni);
7751
7752 let max_ack_delay = Duration::from_millis(peer_params.max_ack_delay);
7753
7754 self.recovery_config.max_ack_delay = max_ack_delay;
7755
7756 let active_path = self.paths.get_active_mut()?;
7757
7758 active_path.recovery.update_max_ack_delay(max_ack_delay);
7759
7760 if active_path
7761 .pmtud
7762 .as_ref()
7763 .map(|pmtud| pmtud.should_probe())
7764 .unwrap_or(false)
7765 {
7766 active_path.recovery.pmtud_update_max_datagram_size(
7767 active_path
7768 .pmtud
7769 .as_mut()
7770 .expect("PMTUD existence verified above")
7771 .get_probe_size()
7772 .min(peer_params.max_udp_payload_size as usize),
7773 );
7774 } else {
7775 active_path.recovery.update_max_datagram_size(
7776 peer_params.max_udp_payload_size as usize,
7777 );
7778 }
7779
7780 // Record the max_active_conn_id parameter advertised by the peer.
7781 self.ids
7782 .set_source_conn_id_limit(peer_params.active_conn_id_limit);
7783
7784 self.peer_transport_params = peer_params;
7785
7786 Ok(())
7787 }
7788
7789 /// Continues the handshake.
7790 ///
7791 /// If the connection is already established, it does nothing.
7792 fn do_handshake(&mut self, now: Instant) -> Result<()> {
7793 let mut ex_data = tls::ExData {
7794 application_protos: &self.application_protos,
7795
7796 crypto_ctx: &mut self.crypto_ctx,
7797
7798 session: &mut self.session,
7799
7800 local_error: &mut self.local_error,
7801
7802 keylog: self.keylog.as_mut(),
7803
7804 trace_id: &self.trace_id,
7805
7806 local_transport_params: self.local_transport_params.clone(),
7807
7808 recovery_config: self.recovery_config,
7809
7810 tx_cap_factor: self.tx_cap_factor,
7811
7812 pmtud: None,
7813
7814 is_server: self.is_server,
7815 };
7816
7817 if self.handshake_completed {
7818 return self.handshake.process_post_handshake(&mut ex_data);
7819 }
7820
7821 match self.handshake.do_handshake(&mut ex_data) {
7822 Ok(_) => (),
7823
7824 Err(Error::Done) => {
7825 // Apply in-handshake configuration from callbacks if the path's
7826 // Recovery module can still be reinitilized.
7827 if self
7828 .paths
7829 .get_active()
7830 .map(|p| p.can_reinit_recovery())
7831 .unwrap_or(false)
7832 {
7833 if ex_data.recovery_config != self.recovery_config {
7834 if let Ok(path) = self.paths.get_active_mut() {
7835 self.recovery_config = ex_data.recovery_config;
7836 path.reinit_recovery(&self.recovery_config);
7837 }
7838 }
7839
7840 if ex_data.tx_cap_factor != self.tx_cap_factor {
7841 self.tx_cap_factor = ex_data.tx_cap_factor;
7842 }
7843
7844 if let Some((discover, max_probes)) = ex_data.pmtud {
7845 self.paths.set_discover_pmtu_on_existing_paths(
7846 discover,
7847 self.recovery_config.max_send_udp_payload_size,
7848 max_probes,
7849 );
7850 }
7851
7852 if ex_data.local_transport_params !=
7853 self.local_transport_params
7854 {
7855 self.streams.set_max_streams_bidi(
7856 ex_data
7857 .local_transport_params
7858 .initial_max_streams_bidi,
7859 );
7860
7861 self.local_transport_params =
7862 ex_data.local_transport_params;
7863 }
7864 }
7865
7866 // Try to parse transport parameters as soon as the first flight
7867 // of handshake data is processed.
7868 //
7869 // This is potentially dangerous as the handshake hasn't been
7870 // completed yet, though it's required to be able to send data
7871 // in 0.5 RTT.
7872 let raw_params = self.handshake.quic_transport_params();
7873
7874 if !self.parsed_peer_transport_params && !raw_params.is_empty() {
7875 let peer_params = TransportParams::decode(
7876 raw_params,
7877 self.is_server,
7878 self.peer_transport_params_track_unknown,
7879 )?;
7880
7881 self.parse_peer_transport_params(peer_params)?;
7882 }
7883
7884 return Ok(());
7885 },
7886
7887 Err(e) => return Err(e),
7888 };
7889
7890 self.handshake_completed = self.handshake.is_completed();
7891
7892 self.alpn = self.handshake.alpn_protocol().to_vec();
7893
7894 let raw_params = self.handshake.quic_transport_params();
7895
7896 if !self.parsed_peer_transport_params && !raw_params.is_empty() {
7897 let peer_params = TransportParams::decode(
7898 raw_params,
7899 self.is_server,
7900 self.peer_transport_params_track_unknown,
7901 )?;
7902
7903 self.parse_peer_transport_params(peer_params)?;
7904 }
7905
7906 if self.handshake_completed {
7907 // The handshake is considered confirmed at the server when the
7908 // handshake completes, at which point we can also drop the
7909 // handshake epoch.
7910 if self.is_server {
7911 self.handshake_confirmed = true;
7912
7913 self.drop_epoch_state(packet::Epoch::Handshake, now);
7914 }
7915
7916 // Once the handshake is completed there's no point in processing
7917 // 0-RTT packets anymore, so clear the buffer now.
7918 self.undecryptable_pkts.clear();
7919
7920 trace!("{} connection established: proto={:?} cipher={:?} curve={:?} sigalg={:?} resumed={} {:?}",
7921 &self.trace_id,
7922 std::str::from_utf8(self.application_proto()),
7923 self.handshake.cipher(),
7924 self.handshake.curve(),
7925 self.handshake.sigalg(),
7926 self.handshake.is_resumed(),
7927 self.peer_transport_params);
7928 }
7929
7930 Ok(())
7931 }
7932
7933 /// Selects the packet type for the next outgoing packet.
7934 fn write_pkt_type(&self, send_pid: usize) -> Result<Type> {
7935 // On error send packet in the latest epoch available, but only send
7936 // 1-RTT ones when the handshake is completed.
7937 if self
7938 .local_error
7939 .as_ref()
7940 .is_some_and(|conn_err| !conn_err.is_app)
7941 {
7942 let epoch = match self.handshake.write_level() {
7943 crypto::Level::Initial => packet::Epoch::Initial,
7944 crypto::Level::ZeroRTT => unreachable!(),
7945 crypto::Level::Handshake => packet::Epoch::Handshake,
7946 crypto::Level::OneRTT => packet::Epoch::Application,
7947 };
7948
7949 if !self.handshake_confirmed {
7950 match epoch {
7951 // Downgrade the epoch to Handshake as the handshake is not
7952 // completed yet.
7953 packet::Epoch::Application => return Ok(Type::Handshake),
7954
7955 // Downgrade the epoch to Initial as the remote peer might
7956 // not be able to decrypt handshake packets yet.
7957 packet::Epoch::Handshake
7958 if self.crypto_ctx[packet::Epoch::Initial].has_keys() =>
7959 return Ok(Type::Initial),
7960
7961 _ => (),
7962 };
7963 }
7964
7965 return Ok(Type::from_epoch(epoch));
7966 }
7967
7968 for &epoch in packet::Epoch::epochs(
7969 packet::Epoch::Initial..=packet::Epoch::Application,
7970 ) {
7971 let crypto_ctx = &self.crypto_ctx[epoch];
7972 let pkt_space = &self.pkt_num_spaces[epoch];
7973
7974 // Only send packets in a space when we have the send keys for it.
7975 if crypto_ctx.crypto_seal.is_none() {
7976 continue;
7977 }
7978
7979 // We are ready to send data for this packet number space.
7980 if crypto_ctx.data_available() || pkt_space.ready() {
7981 return Ok(Type::from_epoch(epoch));
7982 }
7983
7984 // There are lost frames in this packet number space.
7985 for (_, p) in self.paths.iter() {
7986 if p.recovery.has_lost_frames(epoch) {
7987 return Ok(Type::from_epoch(epoch));
7988 }
7989
7990 // We need to send PTO probe packets.
7991 if p.recovery.loss_probes(epoch) > 0 {
7992 return Ok(Type::from_epoch(epoch));
7993 }
7994 }
7995 }
7996
7997 // If there are flushable, almost full or blocked streams, use the
7998 // Application epoch.
7999 let send_path = self.paths.get(send_pid)?;
8000 if (self.is_established() || self.is_in_early_data()) &&
8001 (self.should_send_handshake_done() ||
8002 self.flow_control.should_update_max_data() ||
8003 self.should_send_max_data ||
8004 self.blocked_limit.is_some() ||
8005 self.streams_blocked_bidi_state
8006 .has_pending_stream_blocked_frame() ||
8007 self.streams_blocked_uni_state
8008 .has_pending_stream_blocked_frame() ||
8009 self.dgram_send_queue.has_pending() ||
8010 self.local_error
8011 .as_ref()
8012 .is_some_and(|conn_err| conn_err.is_app) ||
8013 self.should_send_max_streams_bidi ||
8014 self.streams.should_update_max_streams_bidi() ||
8015 self.should_send_max_streams_uni ||
8016 self.streams.should_update_max_streams_uni() ||
8017 self.streams.has_flushable() ||
8018 self.streams.has_almost_full() ||
8019 self.streams.has_blocked() ||
8020 self.streams.has_reset() ||
8021 self.streams.has_stopped() ||
8022 self.ids.has_new_scids() ||
8023 self.ids.has_retire_dcids() ||
8024 send_path
8025 .pmtud
8026 .as_ref()
8027 .is_some_and(|pmtud| pmtud.should_probe()) ||
8028 send_path.needs_ack_eliciting ||
8029 send_path.probing_required())
8030 {
8031 // Only clients can send 0-RTT packets.
8032 if !self.is_server && self.is_in_early_data() {
8033 return Ok(Type::ZeroRTT);
8034 }
8035
8036 return Ok(Type::Short);
8037 }
8038
8039 Err(Error::Done)
8040 }
8041
8042 /// Returns the mutable stream with the given ID if it exists, or creates
8043 /// a new one otherwise.
8044 fn get_or_create_stream(
8045 &mut self, id: u64, local: bool,
8046 ) -> Result<&mut stream::Stream<F>> {
8047 self.streams.get_or_create(
8048 id,
8049 &self.local_transport_params,
8050 &self.peer_transport_params,
8051 local,
8052 self.is_server,
8053 )
8054 }
8055
8056 /// Processes an incoming frame.
8057 fn process_frame(
8058 &mut self, frame: frame::Frame, hdr: &Header, recv_path_id: usize,
8059 epoch: packet::Epoch, now: Instant,
8060 ) -> Result<()> {
8061 trace!("{} rx frm {:?}", self.trace_id, frame);
8062
8063 match frame {
8064 frame::Frame::Padding { .. } => (),
8065
8066 frame::Frame::Ping { .. } => (),
8067
8068 frame::Frame::ACK {
8069 ranges, ack_delay, ..
8070 } => {
8071 let ack_delay = ack_delay
8072 .checked_mul(2_u64.pow(
8073 self.peer_transport_params.ack_delay_exponent as u32,
8074 ))
8075 .ok_or(Error::InvalidFrame)?;
8076
8077 if epoch == packet::Epoch::Handshake ||
8078 (epoch == packet::Epoch::Application &&
8079 self.is_established())
8080 {
8081 self.peer_verified_initial_address = true;
8082 }
8083
8084 let handshake_status = self.handshake_status();
8085
8086 let is_app_limited = self.delivery_rate_check_if_app_limited();
8087
8088 let largest_acked = ranges.last().expect(
8089 "ACK frames should always have at least one ack range",
8090 );
8091
8092 for (_, p) in self.paths.iter_mut() {
8093 if self.pkt_num_spaces[epoch]
8094 .largest_tx_pkt_num
8095 .is_some_and(|largest_sent| largest_sent < largest_acked)
8096 {
8097 // https://www.rfc-editor.org/rfc/rfc9000#section-13.1
8098 // An endpoint SHOULD treat receipt of an acknowledgment
8099 // for a packet it did not send as
8100 // a connection error of type PROTOCOL_VIOLATION
8101 return Err(Error::InvalidAckRange);
8102 }
8103
8104 if is_app_limited {
8105 p.recovery.delivery_rate_update_app_limited(true);
8106 }
8107
8108 let OnAckReceivedOutcome {
8109 lost_packets,
8110 lost_bytes,
8111 acked_bytes,
8112 spurious_losses,
8113 } = p.recovery.on_ack_received(
8114 &ranges,
8115 ack_delay,
8116 epoch,
8117 handshake_status,
8118 now,
8119 self.pkt_num_manager.skip_pn(),
8120 &self.trace_id,
8121 )?;
8122
8123 let skip_pn = self.pkt_num_manager.skip_pn();
8124 let largest_acked =
8125 p.recovery.get_largest_acked_on_epoch(epoch);
8126
8127 // Consider the skip_pn validated if the peer has sent an ack
8128 // for a larger pkt number.
8129 if let Some((largest_acked, skip_pn)) =
8130 largest_acked.zip(skip_pn)
8131 {
8132 if largest_acked > skip_pn {
8133 self.pkt_num_manager.set_skip_pn(None);
8134 }
8135 }
8136
8137 self.lost_count += lost_packets;
8138 self.lost_bytes += lost_bytes as u64;
8139 self.acked_bytes += acked_bytes as u64;
8140 self.spurious_lost_count += spurious_losses;
8141 }
8142 },
8143
8144 frame::Frame::ResetStream {
8145 stream_id,
8146 error_code,
8147 final_size,
8148 } => {
8149 // Peer can't send on our unidirectional streams.
8150 if !stream::is_bidi(stream_id) &&
8151 stream::is_local(stream_id, self.is_server)
8152 {
8153 return Err(Error::InvalidStreamState(stream_id));
8154 }
8155
8156 let max_rx_data_left = self.max_rx_data() - self.rx_data;
8157
8158 // Get existing stream or create a new one, but if the stream
8159 // has already been closed and collected, ignore the frame.
8160 //
8161 // This can happen if e.g. an ACK frame is lost, and the peer
8162 // retransmits another frame before it realizes that the stream
8163 // is gone.
8164 //
8165 // Note that it makes it impossible to check if the frame is
8166 // illegal, since we have no state, but since we ignore the
8167 // frame, it should be fine.
8168 let stream = match self.get_or_create_stream(stream_id, false) {
8169 Ok(v) => v,
8170
8171 Err(Error::Done) => return Ok(()),
8172
8173 Err(e) => return Err(e),
8174 };
8175
8176 let was_readable = stream.is_readable();
8177 let priority_key = Arc::clone(&stream.priority_key);
8178
8179 let stream::RecvBufResetReturn {
8180 max_data_delta,
8181 consumed_flowcontrol,
8182 } = stream.recv.reset(error_code, final_size)?;
8183
8184 if max_data_delta > max_rx_data_left {
8185 return Err(Error::FlowControl);
8186 }
8187
8188 if !was_readable && stream.is_readable() {
8189 self.streams.insert_readable(&priority_key);
8190 }
8191
8192 self.rx_data += max_data_delta;
8193 // We dropped the receive buffer, return connection level
8194 // flow-control
8195 self.flow_control.add_consumed(consumed_flowcontrol);
8196
8197 self.reset_stream_remote_count =
8198 self.reset_stream_remote_count.saturating_add(1);
8199 },
8200
8201 frame::Frame::StopSending {
8202 stream_id,
8203 error_code,
8204 } => {
8205 // STOP_SENDING on a receive-only stream is a fatal error.
8206 if !stream::is_local(stream_id, self.is_server) &&
8207 !stream::is_bidi(stream_id)
8208 {
8209 return Err(Error::InvalidStreamState(stream_id));
8210 }
8211
8212 // Get existing stream or create a new one, but if the stream
8213 // has already been closed and collected, ignore the frame.
8214 //
8215 // This can happen if e.g. an ACK frame is lost, and the peer
8216 // retransmits another frame before it realizes that the stream
8217 // is gone.
8218 //
8219 // Note that it makes it impossible to check if the frame is
8220 // illegal, since we have no state, but since we ignore the
8221 // frame, it should be fine.
8222 let stream = match self.get_or_create_stream(stream_id, false) {
8223 Ok(v) => v,
8224
8225 Err(Error::Done) => return Ok(()),
8226
8227 Err(e) => return Err(e),
8228 };
8229
8230 let was_writable = stream.is_writable();
8231
8232 let priority_key = Arc::clone(&stream.priority_key);
8233
8234 // Try stopping the stream.
8235 if let Ok((final_size, unsent)) = stream.send.stop(error_code) {
8236 // Claw back some flow control allowance from data that was
8237 // buffered but not actually sent before the stream was
8238 // reset.
8239 //
8240 // Note that `tx_cap` will be updated later on, so no need
8241 // to touch it here.
8242 self.tx_data = self.tx_data.saturating_sub(unsent);
8243
8244 self.tx_buffered =
8245 self.tx_buffered.saturating_sub(unsent as usize);
8246
8247 // These drops in qlog are a bit weird, but the only way to
8248 // ensure that all bytes that are moved from App to Transport
8249 // in stream_do_send are eventually moved from Transport to
8250 // Dropped. Ideally we would add a Transport to Network
8251 // transition also as a way to indicate when bytes were
8252 // transmitted vs dropped without ever being sent.
8253 qlog_with_type!(QLOG_DATA_MV, self.qlog, q, {
8254 let ev_data = EventData::QuicStreamDataMoved(
8255 qlog::events::quic::StreamDataMoved {
8256 stream_id: Some(stream_id),
8257 offset: Some(final_size),
8258 raw: Some(RawInfo {
8259 length: Some(unsent),
8260 ..Default::default()
8261 }),
8262 from: Some(DataRecipient::Transport),
8263 to: Some(DataRecipient::Dropped),
8264 ..Default::default()
8265 },
8266 );
8267
8268 q.add_event_data_with_instant(ev_data, now).ok();
8269 });
8270
8271 self.streams.insert_reset(stream_id, error_code, final_size);
8272
8273 if !was_writable {
8274 self.streams.insert_writable(&priority_key);
8275 }
8276
8277 self.stopped_stream_remote_count =
8278 self.stopped_stream_remote_count.saturating_add(1);
8279 self.reset_stream_local_count =
8280 self.reset_stream_local_count.saturating_add(1);
8281 }
8282 },
8283
8284 frame::Frame::Crypto { data } => {
8285 if data.max_off() >= MAX_CRYPTO_STREAM_OFFSET {
8286 return Err(Error::CryptoBufferExceeded);
8287 }
8288
8289 // Push the data to the stream so it can be re-ordered.
8290 self.crypto_ctx[epoch].crypto_stream.recv.write(data)?;
8291
8292 // Feed crypto data to the TLS state, if there's data
8293 // available at the expected offset.
8294 let mut crypto_buf = [0; 512];
8295
8296 let level = crypto::Level::from_epoch(epoch);
8297
8298 let stream = &mut self.crypto_ctx[epoch].crypto_stream;
8299
8300 while let Ok((read, _)) = stream.recv.emit(&mut crypto_buf) {
8301 let recv_buf = &crypto_buf[..read];
8302 self.handshake.provide_data(level, recv_buf)?;
8303 }
8304
8305 self.do_handshake(now)?;
8306 },
8307
8308 frame::Frame::CryptoHeader { .. } => unreachable!(),
8309
8310 // TODO: implement stateless retry
8311 frame::Frame::NewToken { .. } =>
8312 if self.is_server {
8313 return Err(Error::InvalidPacket);
8314 },
8315
8316 frame::Frame::Stream { stream_id, data } => {
8317 // Peer can't send on our unidirectional streams.
8318 if !stream::is_bidi(stream_id) &&
8319 stream::is_local(stream_id, self.is_server)
8320 {
8321 return Err(Error::InvalidStreamState(stream_id));
8322 }
8323
8324 let max_rx_data_left = self.max_rx_data() - self.rx_data;
8325
8326 // Get existing stream or create a new one, but if the stream
8327 // has already been closed and collected, ignore the frame.
8328 //
8329 // This can happen if e.g. an ACK frame is lost, and the peer
8330 // retransmits another frame before it realizes that the stream
8331 // is gone.
8332 //
8333 // Note that it makes it impossible to check if the frame is
8334 // illegal, since we have no state, but since we ignore the
8335 // frame, it should be fine.
8336 let stream = match self.get_or_create_stream(stream_id, false) {
8337 Ok(v) => v,
8338
8339 Err(Error::Done) => return Ok(()),
8340
8341 Err(e) => return Err(e),
8342 };
8343
8344 // Check for the connection-level flow control limit.
8345 let max_off_delta =
8346 data.max_off().saturating_sub(stream.recv.max_off());
8347
8348 if max_off_delta > max_rx_data_left {
8349 return Err(Error::FlowControl);
8350 }
8351
8352 let was_readable = stream.is_readable();
8353 let priority_key = Arc::clone(&stream.priority_key);
8354
8355 let was_draining = stream.recv.is_draining();
8356
8357 stream.recv.write(data)?;
8358
8359 if !was_readable && stream.is_readable() {
8360 self.streams.insert_readable(&priority_key);
8361 }
8362
8363 self.rx_data += max_off_delta;
8364
8365 if was_draining {
8366 // When a stream is in draining state it will not queue
8367 // incoming data for the application to read, so consider
8368 // the received data as consumed, which might trigger a flow
8369 // control update.
8370 self.flow_control.add_consumed(max_off_delta);
8371 }
8372 },
8373
8374 frame::Frame::StreamHeader { .. } => unreachable!(),
8375
8376 frame::Frame::MaxData { max } => {
8377 self.max_tx_data = cmp::max(self.max_tx_data, max);
8378 },
8379
8380 frame::Frame::MaxStreamData { stream_id, max } => {
8381 // Peer can't receive on its own unidirectional streams.
8382 if !stream::is_bidi(stream_id) &&
8383 !stream::is_local(stream_id, self.is_server)
8384 {
8385 return Err(Error::InvalidStreamState(stream_id));
8386 }
8387
8388 // Get existing stream or create a new one, but if the stream
8389 // has already been closed and collected, ignore the frame.
8390 //
8391 // This can happen if e.g. an ACK frame is lost, and the peer
8392 // retransmits another frame before it realizes that the stream
8393 // is gone.
8394 //
8395 // Note that it makes it impossible to check if the frame is
8396 // illegal, since we have no state, but since we ignore the
8397 // frame, it should be fine.
8398 let stream = match self.get_or_create_stream(stream_id, false) {
8399 Ok(v) => v,
8400
8401 Err(Error::Done) => return Ok(()),
8402
8403 Err(e) => return Err(e),
8404 };
8405
8406 let was_flushable = stream.is_flushable();
8407
8408 stream.send.update_max_data(max);
8409
8410 let writable = stream.is_writable();
8411
8412 let priority_key = Arc::clone(&stream.priority_key);
8413
8414 // If the stream is now flushable push it to the flushable queue,
8415 // but only if it wasn't already queued.
8416 if stream.is_flushable() && !was_flushable {
8417 let priority_key = Arc::clone(&stream.priority_key);
8418 self.streams.insert_flushable(&priority_key);
8419 }
8420
8421 if writable {
8422 self.streams.insert_writable(&priority_key);
8423 }
8424 },
8425
8426 frame::Frame::MaxStreamsBidi { max } => {
8427 if max > MAX_STREAM_ID {
8428 return Err(Error::InvalidFrame);
8429 }
8430
8431 self.streams.update_peer_max_streams_bidi(max);
8432 },
8433
8434 frame::Frame::MaxStreamsUni { max } => {
8435 if max > MAX_STREAM_ID {
8436 return Err(Error::InvalidFrame);
8437 }
8438
8439 self.streams.update_peer_max_streams_uni(max);
8440 },
8441
8442 frame::Frame::DataBlocked { .. } => {
8443 self.data_blocked_recv_count =
8444 self.data_blocked_recv_count.saturating_add(1);
8445 },
8446
8447 frame::Frame::StreamDataBlocked { .. } => {
8448 self.stream_data_blocked_recv_count =
8449 self.stream_data_blocked_recv_count.saturating_add(1);
8450 },
8451
8452 frame::Frame::StreamsBlockedBidi { limit } => {
8453 if limit > MAX_STREAM_ID {
8454 return Err(Error::InvalidFrame);
8455 }
8456
8457 self.streams_blocked_bidi_recv_count =
8458 self.streams_blocked_bidi_recv_count.saturating_add(1);
8459 },
8460
8461 frame::Frame::StreamsBlockedUni { limit } => {
8462 if limit > MAX_STREAM_ID {
8463 return Err(Error::InvalidFrame);
8464 }
8465
8466 self.streams_blocked_uni_recv_count =
8467 self.streams_blocked_uni_recv_count.saturating_add(1);
8468 },
8469
8470 frame::Frame::NewConnectionId {
8471 seq_num,
8472 retire_prior_to,
8473 conn_id,
8474 reset_token,
8475 } => {
8476 if self.ids.zero_length_dcid() {
8477 return Err(Error::InvalidState);
8478 }
8479
8480 let mut retired_path_ids = SmallVec::new();
8481
8482 // Retire pending path IDs before propagating the error code to
8483 // make sure retired connection IDs are not in use anymore.
8484 let new_dcid_res = self.ids.new_dcid(
8485 conn_id.into(),
8486 seq_num,
8487 u128::from_be_bytes(reset_token),
8488 retire_prior_to,
8489 &mut retired_path_ids,
8490 );
8491
8492 for (dcid_seq, pid) in retired_path_ids {
8493 let path = self.paths.get_mut(pid)?;
8494
8495 // Maybe the path already switched to another DCID.
8496 if path.active_dcid_seq != Some(dcid_seq) {
8497 continue;
8498 }
8499
8500 if let Some(new_dcid_seq) =
8501 self.ids.lowest_available_dcid_seq()
8502 {
8503 path.active_dcid_seq = Some(new_dcid_seq);
8504
8505 self.ids.link_dcid_to_path_id(new_dcid_seq, pid)?;
8506
8507 trace!(
8508 "{} path ID {} changed DCID: old seq num {} new seq num {}",
8509 self.trace_id, pid, dcid_seq, new_dcid_seq,
8510 );
8511 } else {
8512 // We cannot use this path anymore for now.
8513 path.active_dcid_seq = None;
8514
8515 trace!(
8516 "{} path ID {} cannot be used; DCID seq num {} has been retired",
8517 self.trace_id, pid, dcid_seq,
8518 );
8519 }
8520 }
8521
8522 // Propagate error (if any) now...
8523 new_dcid_res?;
8524 },
8525
8526 frame::Frame::RetireConnectionId { seq_num } => {
8527 if self.ids.zero_length_scid() {
8528 return Err(Error::InvalidState);
8529 }
8530
8531 if let Some(pid) = self.ids.retire_scid(seq_num, &hdr.dcid)? {
8532 let path = self.paths.get_mut(pid)?;
8533
8534 // Maybe we already linked a new SCID to that path.
8535 if path.active_scid_seq == Some(seq_num) {
8536 // XXX: We do not remove unused paths now, we instead
8537 // wait until we need to maintain more paths than the
8538 // host is willing to.
8539 path.active_scid_seq = None;
8540 }
8541 }
8542 },
8543
8544 frame::Frame::PathChallenge { data } => {
8545 self.path_challenge_rx_count += 1;
8546
8547 self.paths
8548 .get_mut(recv_path_id)?
8549 .on_challenge_received(data);
8550 },
8551
8552 frame::Frame::PathResponse { data } => {
8553 self.paths.on_response_received(data)?;
8554 },
8555
8556 frame::Frame::ConnectionClose {
8557 error_code, reason, ..
8558 } => {
8559 self.peer_error = Some(ConnectionError {
8560 is_app: false,
8561 error_code,
8562 reason,
8563 });
8564
8565 let path = self.paths.get_active()?;
8566 self.draining_timer = Some(now + (path.recovery.pto() * 3));
8567 },
8568
8569 frame::Frame::ApplicationClose { error_code, reason } => {
8570 self.peer_error = Some(ConnectionError {
8571 is_app: true,
8572 error_code,
8573 reason,
8574 });
8575
8576 let path = self.paths.get_active()?;
8577 self.draining_timer = Some(now + (path.recovery.pto() * 3));
8578 },
8579
8580 frame::Frame::HandshakeDone => {
8581 if self.is_server {
8582 return Err(Error::InvalidPacket);
8583 }
8584
8585 self.peer_verified_initial_address = true;
8586
8587 self.handshake_confirmed = true;
8588
8589 // Once the handshake is confirmed, we can drop Handshake keys.
8590 self.drop_epoch_state(packet::Epoch::Handshake, now);
8591 },
8592
8593 frame::Frame::Datagram { data } => {
8594 // Close the connection if DATAGRAMs are not enabled.
8595 // quiche always advertises support for 64K sized DATAGRAM
8596 // frames, as recommended by the standard, so we don't need a
8597 // size check.
8598 if !self.dgram_enabled() {
8599 return Err(Error::InvalidState);
8600 }
8601
8602 // If recv queue is full, discard oldest
8603 if self.dgram_recv_queue.is_full() {
8604 self.dgram_recv_queue.pop();
8605 }
8606
8607 self.dgram_recv_queue.push(data)?;
8608
8609 self.dgram_recv_count = self.dgram_recv_count.saturating_add(1);
8610
8611 let path = self.paths.get_mut(recv_path_id)?;
8612 path.dgram_recv_count = path.dgram_recv_count.saturating_add(1);
8613 },
8614
8615 frame::Frame::DatagramHeader { .. } => unreachable!(),
8616 }
8617
8618 Ok(())
8619 }
8620
8621 /// Drops the keys and recovery state for the given epoch.
8622 fn drop_epoch_state(&mut self, epoch: packet::Epoch, now: Instant) {
8623 let crypto_ctx = &mut self.crypto_ctx[epoch];
8624 if crypto_ctx.crypto_open.is_none() {
8625 return;
8626 }
8627 crypto_ctx.clear();
8628 self.pkt_num_spaces[epoch].clear();
8629
8630 let handshake_status = self.handshake_status();
8631 for (_, p) in self.paths.iter_mut() {
8632 p.recovery
8633 .on_pkt_num_space_discarded(epoch, handshake_status, now);
8634 }
8635
8636 trace!("{} dropped epoch {} state", self.trace_id, epoch);
8637 }
8638
8639 /// Returns the connection level flow control limit.
8640 fn max_rx_data(&self) -> u64 {
8641 self.flow_control.max_data()
8642 }
8643
8644 /// Returns true if the HANDSHAKE_DONE frame needs to be sent.
8645 fn should_send_handshake_done(&self) -> bool {
8646 self.is_established() && !self.handshake_done_sent && self.is_server
8647 }
8648
8649 /// Returns the idle timeout value.
8650 ///
8651 /// `None` is returned if both end-points disabled the idle timeout.
8652 fn idle_timeout(&self) -> Option<Duration> {
8653 // If the transport parameter is set to 0, then the respective endpoint
8654 // decided to disable the idle timeout. If both are disabled we should
8655 // not set any timeout.
8656 if self.local_transport_params.max_idle_timeout == 0 &&
8657 self.peer_transport_params.max_idle_timeout == 0
8658 {
8659 return None;
8660 }
8661
8662 // If the local endpoint or the peer disabled the idle timeout, use the
8663 // other peer's value, otherwise use the minimum of the two values.
8664 let idle_timeout = if self.local_transport_params.max_idle_timeout == 0 {
8665 self.peer_transport_params.max_idle_timeout
8666 } else if self.peer_transport_params.max_idle_timeout == 0 {
8667 self.local_transport_params.max_idle_timeout
8668 } else {
8669 cmp::min(
8670 self.local_transport_params.max_idle_timeout,
8671 self.peer_transport_params.max_idle_timeout,
8672 )
8673 };
8674
8675 let path_pto = match self.paths.get_active() {
8676 Ok(p) => p.recovery.pto(),
8677 Err(_) => Duration::ZERO,
8678 };
8679
8680 let idle_timeout = Duration::from_millis(idle_timeout);
8681 let idle_timeout = cmp::max(idle_timeout, 3 * path_pto);
8682
8683 Some(idle_timeout)
8684 }
8685
8686 /// Returns the connection's handshake status for use in loss recovery.
8687 fn handshake_status(&self) -> recovery::HandshakeStatus {
8688 recovery::HandshakeStatus {
8689 has_handshake_keys: self.crypto_ctx[packet::Epoch::Handshake]
8690 .has_keys(),
8691
8692 peer_verified_address: self.peer_verified_initial_address,
8693
8694 completed: self.is_established(),
8695 }
8696 }
8697
8698 /// Updates send capacity.
8699 fn update_tx_cap(&mut self) {
8700 let cwin_available = match self.paths.get_active() {
8701 Ok(p) => p.recovery.cwnd_available() as u64,
8702 Err(_) => 0,
8703 };
8704
8705 let cap =
8706 cmp::min(cwin_available, self.max_tx_data - self.tx_data) as usize;
8707 self.tx_cap = (cap as f64 * self.tx_cap_factor).ceil() as usize;
8708 }
8709
8710 fn delivery_rate_check_if_app_limited(&self) -> bool {
8711 // Enter the app-limited phase of delivery rate when these conditions
8712 // are met:
8713 //
8714 // - The remaining capacity is higher than available bytes in cwnd (there
8715 // is more room to send).
8716 // - New data since the last send() is smaller than available bytes in
8717 // cwnd (we queued less than what we can send).
8718 // - There is room to send more data in cwnd.
8719 //
8720 // In application-limited phases the transmission rate is limited by the
8721 // application rather than the congestion control algorithm.
8722 //
8723 // Note that this is equivalent to CheckIfApplicationLimited() from the
8724 // delivery rate draft. This is also separate from `recovery.app_limited`
8725 // and only applies to delivery rate calculation.
8726 let cwin_available = self
8727 .paths
8728 .iter()
8729 .filter(|&(_, p)| p.active())
8730 .map(|(_, p)| p.recovery.cwnd_available())
8731 .sum();
8732
8733 ((self.tx_buffered + self.dgram_send_queue_byte_size()) < cwin_available) &&
8734 (self.tx_data.saturating_sub(self.last_tx_data)) <
8735 cwin_available as u64 &&
8736 cwin_available > 0
8737 }
8738
8739 fn check_tx_buffered_invariant(&mut self) {
8740 // tx_buffered should track bytes queued in the stream buffers
8741 // and unacked retransmitable bytes in the network.
8742 // If tx_buffered > 0 mark the tx_buffered_state if there are no
8743 // flushable streams and there no inflight bytes.
8744 //
8745 // It is normal to have tx_buffered == 0 while there are inflight bytes
8746 // since not QUIC frames are retransmittable; inflight tracks all bytes
8747 // on the network which are subject to congestion control.
8748 if self.tx_buffered > 0 &&
8749 !self.streams.has_flushable() &&
8750 !self
8751 .paths
8752 .iter()
8753 .any(|(_, p)| p.recovery.bytes_in_flight() > 0)
8754 {
8755 self.tx_buffered_state = TxBufferTrackingState::Inconsistent;
8756 }
8757 }
8758
8759 fn set_initial_dcid(
8760 &mut self, cid: ConnectionId<'static>, reset_token: Option<u128>,
8761 path_id: usize,
8762 ) -> Result<()> {
8763 self.ids.set_initial_dcid(cid, reset_token, Some(path_id));
8764 self.paths.get_mut(path_id)?.active_dcid_seq = Some(0);
8765
8766 Ok(())
8767 }
8768
8769 /// Selects the path that the incoming packet belongs to, or creates a new
8770 /// one if no existing path matches.
8771 fn get_or_create_recv_path_id(
8772 &mut self, recv_pid: Option<usize>, dcid: &ConnectionId, buf_len: usize,
8773 info: &RecvInfo,
8774 ) -> Result<usize> {
8775 let ids = &mut self.ids;
8776
8777 let (in_scid_seq, mut in_scid_pid) =
8778 ids.find_scid_seq(dcid).ok_or(Error::InvalidState)?;
8779
8780 if let Some(recv_pid) = recv_pid {
8781 // If the path observes a change of SCID used, note it.
8782 let recv_path = self.paths.get_mut(recv_pid)?;
8783
8784 let cid_entry =
8785 recv_path.active_scid_seq.and_then(|v| ids.get_scid(v).ok());
8786
8787 if cid_entry.map(|e| &e.cid) != Some(dcid) {
8788 let incoming_cid_entry = ids.get_scid(in_scid_seq)?;
8789
8790 let prev_recv_pid =
8791 incoming_cid_entry.path_id.unwrap_or(recv_pid);
8792
8793 if prev_recv_pid != recv_pid {
8794 trace!(
8795 "{} peer reused CID {:?} from path {} on path {}",
8796 self.trace_id,
8797 dcid,
8798 prev_recv_pid,
8799 recv_pid
8800 );
8801
8802 // TODO: reset congestion control.
8803 }
8804
8805 trace!(
8806 "{} path ID {} now see SCID with seq num {}",
8807 self.trace_id,
8808 recv_pid,
8809 in_scid_seq
8810 );
8811
8812 recv_path.active_scid_seq = Some(in_scid_seq);
8813 ids.link_scid_to_path_id(in_scid_seq, recv_pid)?;
8814 }
8815
8816 return Ok(recv_pid);
8817 }
8818
8819 // This is a new 4-tuple. See if the CID has not been assigned on
8820 // another path.
8821
8822 // Ignore this step if are using zero-length SCID.
8823 if ids.zero_length_scid() {
8824 in_scid_pid = None;
8825 }
8826
8827 if let Some(in_scid_pid) = in_scid_pid {
8828 // This CID has been used by another path. If we have the
8829 // room to do so, create a new `Path` structure holding this
8830 // new 4-tuple. Otherwise, drop the packet.
8831 let old_path = self.paths.get_mut(in_scid_pid)?;
8832 let old_local_addr = old_path.local_addr();
8833 let old_peer_addr = old_path.peer_addr();
8834
8835 trace!(
8836 "{} reused CID seq {} of ({},{}) (path {}) on ({},{})",
8837 self.trace_id,
8838 in_scid_seq,
8839 old_local_addr,
8840 old_peer_addr,
8841 in_scid_pid,
8842 info.to,
8843 info.from
8844 );
8845
8846 // Notify the application.
8847 self.paths.notify_event(PathEvent::ReusedSourceConnectionId(
8848 in_scid_seq,
8849 (old_local_addr, old_peer_addr),
8850 (info.to, info.from),
8851 ));
8852 }
8853
8854 // This is a new path using an unassigned CID; create it!
8855 let mut path = path::Path::new(
8856 info.to,
8857 info.from,
8858 &self.recovery_config,
8859 self.path_challenge_recv_max_queue_len,
8860 false,
8861 None,
8862 );
8863
8864 path.max_send_bytes = buf_len * self.max_amplification_factor;
8865 path.active_scid_seq = Some(in_scid_seq);
8866
8867 // Automatically probes the new path.
8868 path.request_validation();
8869
8870 let pid = self.paths.insert_path(path, self.is_server)?;
8871
8872 // Do not record path reuse.
8873 if in_scid_pid.is_none() {
8874 ids.link_scid_to_path_id(in_scid_seq, pid)?;
8875 }
8876
8877 Ok(pid)
8878 }
8879
8880 /// Selects the path on which the next packet must be sent.
8881 fn get_send_path_id(
8882 &self, from: Option<SocketAddr>, to: Option<SocketAddr>,
8883 ) -> Result<usize> {
8884 // A probing packet must be sent, but only if the connection is fully
8885 // established.
8886 if self.is_established() {
8887 let mut probing = self
8888 .paths
8889 .iter()
8890 .filter(|(_, p)| from.is_none() || Some(p.local_addr()) == from)
8891 .filter(|(_, p)| to.is_none() || Some(p.peer_addr()) == to)
8892 .filter(|(_, p)| p.active_dcid_seq.is_some())
8893 .filter(|(_, p)| p.probing_required())
8894 .map(|(pid, _)| pid);
8895
8896 if let Some(pid) = probing.next() {
8897 return Ok(pid);
8898 }
8899 }
8900
8901 if let Some((pid, p)) = self.paths.get_active_with_pid() {
8902 if from.is_some() && Some(p.local_addr()) != from {
8903 return Err(Error::Done);
8904 }
8905
8906 if to.is_some() && Some(p.peer_addr()) != to {
8907 return Err(Error::Done);
8908 }
8909
8910 return Ok(pid);
8911 };
8912
8913 Err(Error::InvalidState)
8914 }
8915
8916 /// Sets the path with identifier 'path_id' to be active.
8917 fn set_active_path(&mut self, path_id: usize, now: Instant) -> Result<()> {
8918 if let Ok(old_active_path) = self.paths.get_active_mut() {
8919 for &e in packet::Epoch::epochs(
8920 packet::Epoch::Initial..=packet::Epoch::Application,
8921 ) {
8922 let (lost_packets, lost_bytes) = old_active_path
8923 .recovery
8924 .on_path_change(e, now, &self.trace_id);
8925
8926 self.lost_count += lost_packets;
8927 self.lost_bytes += lost_bytes as u64;
8928 }
8929 }
8930
8931 self.paths.set_active_path(path_id)
8932 }
8933
8934 /// Handles potential connection migration.
8935 fn on_peer_migrated(
8936 &mut self, new_pid: usize, disable_dcid_reuse: bool, now: Instant,
8937 ) -> Result<()> {
8938 let active_path_id = self.paths.get_active_path_id()?;
8939
8940 if active_path_id == new_pid {
8941 return Ok(());
8942 }
8943
8944 self.set_active_path(new_pid, now)?;
8945
8946 let no_spare_dcid =
8947 self.paths.get_mut(new_pid)?.active_dcid_seq.is_none();
8948
8949 if no_spare_dcid && !disable_dcid_reuse {
8950 self.paths.get_mut(new_pid)?.active_dcid_seq =
8951 self.paths.get_mut(active_path_id)?.active_dcid_seq;
8952 }
8953
8954 Ok(())
8955 }
8956
8957 /// Creates a new client-side path.
8958 fn create_path_on_client(
8959 &mut self, local_addr: SocketAddr, peer_addr: SocketAddr,
8960 ) -> Result<usize> {
8961 if self.is_server {
8962 return Err(Error::InvalidState);
8963 }
8964
8965 // If we use zero-length SCID and go over our local active CID limit,
8966 // the `insert_path()` call will raise an error.
8967 if !self.ids.zero_length_scid() && self.ids.available_scids() == 0 {
8968 return Err(Error::OutOfIdentifiers);
8969 }
8970
8971 // Do we have a spare DCID? If we are using zero-length DCID, just use
8972 // the default having sequence 0 (note that if we exceed our local CID
8973 // limit, the `insert_path()` call will raise an error.
8974 let dcid_seq = if self.ids.zero_length_dcid() {
8975 0
8976 } else {
8977 self.ids
8978 .lowest_available_dcid_seq()
8979 .ok_or(Error::OutOfIdentifiers)?
8980 };
8981
8982 let mut path = path::Path::new(
8983 local_addr,
8984 peer_addr,
8985 &self.recovery_config,
8986 self.path_challenge_recv_max_queue_len,
8987 false,
8988 None,
8989 );
8990 path.active_dcid_seq = Some(dcid_seq);
8991
8992 let pid = self
8993 .paths
8994 .insert_path(path, false)
8995 .map_err(|_| Error::OutOfIdentifiers)?;
8996 self.ids.link_dcid_to_path_id(dcid_seq, pid)?;
8997
8998 Ok(pid)
8999 }
9000
9001 // Marks the connection as closed and does any related tidyup.
9002 fn mark_closed(&mut self) {
9003 #[cfg(feature = "qlog")]
9004 {
9005 let cc = match (self.is_established(), self.timed_out, &self.peer_error, &self.local_error) {
9006 (false, _, _, _) => qlog::events::quic::ConnectionClosed {
9007 initiator: Some(TransportInitiator::Local),
9008 connection_error: None,
9009 application_error: None,
9010 error_code: None,
9011 internal_code: None,
9012 reason: Some("Failed to establish connection".to_string()),
9013 trigger: Some(qlog::events::quic::ConnectionClosedTrigger::HandshakeTimeout)
9014 },
9015
9016 (true, true, _, _) => qlog::events::quic::ConnectionClosed {
9017 initiator: Some(TransportInitiator::Local),
9018 connection_error: None,
9019 application_error: None,
9020 error_code: None,
9021 internal_code: None,
9022 reason: Some("Idle timeout".to_string()),
9023 trigger: Some(qlog::events::quic::ConnectionClosedTrigger::IdleTimeout)
9024 },
9025
9026 (true, false, Some(peer_error), None) => {
9027 let (connection_code, application_error, trigger) = if peer_error.is_app {
9028 (None, Some(qlog::events::ApplicationError::Unknown), None)
9029 } else {
9030 let trigger = if peer_error.error_code == WireErrorCode::NoError as u64 {
9031 Some(qlog::events::quic::ConnectionClosedTrigger::Clean)
9032 } else {
9033 Some(qlog::events::quic::ConnectionClosedTrigger::Error)
9034 };
9035
9036 (Some(qlog::events::ConnectionClosedEventError::TransportError(qlog::events::quic::TransportError::Unknown)), None, trigger)
9037 };
9038
9039 // TODO: select more appopriate connection_code and application_error than unknown.
9040 qlog::events::quic::ConnectionClosed {
9041 initiator: Some(TransportInitiator::Remote),
9042 connection_error: connection_code,
9043 application_error,
9044 error_code: Some(peer_error.error_code),
9045 internal_code: None,
9046 reason: Some(String::from_utf8_lossy(&peer_error.reason).to_string()),
9047 trigger,
9048 }
9049 },
9050
9051 (true, false, None, Some(local_error)) => {
9052 let (connection_code, application_error, trigger) = if local_error.is_app {
9053 (None, Some(qlog::events::ApplicationError::Unknown), None)
9054 } else {
9055 let trigger = if local_error.error_code == WireErrorCode::NoError as u64 {
9056 Some(qlog::events::quic::ConnectionClosedTrigger::Clean)
9057 } else {
9058 Some(qlog::events::quic::ConnectionClosedTrigger::Error)
9059 };
9060
9061 (Some(qlog::events::ConnectionClosedEventError::TransportError(qlog::events::quic::TransportError::Unknown)), None, trigger)
9062 };
9063
9064 // TODO: select more appopriate connection_code and application_error than unknown.
9065 qlog::events::quic::ConnectionClosed {
9066 initiator: Some(TransportInitiator::Local),
9067 connection_error: connection_code,
9068 application_error,
9069 error_code: Some(local_error.error_code),
9070 internal_code: None,
9071 reason: Some(String::from_utf8_lossy(&local_error.reason).to_string()),
9072 trigger,
9073 }
9074 },
9075
9076 _ => qlog::events::quic::ConnectionClosed {
9077 initiator: None,
9078 connection_error: None,
9079 application_error: None,
9080 error_code: None,
9081 internal_code: None,
9082 reason: None,
9083 trigger: None,
9084 },
9085 };
9086
9087 qlog_with_type!(QLOG_CONNECTION_CLOSED, self.qlog, q, {
9088 let ev_data = EventData::QuicConnectionClosed(cc);
9089
9090 q.add_event_data_now(ev_data).ok();
9091 });
9092 self.qlog.streamer = None;
9093 }
9094 self.closed = true;
9095 }
9096}
9097
9098#[cfg(feature = "boringssl-boring-crate")]
9099impl<F: BufFactory> AsMut<boring::ssl::SslRef> for Connection<F> {
9100 fn as_mut(&mut self) -> &mut boring::ssl::SslRef {
9101 self.handshake.ssl_mut()
9102 }
9103}
9104
9105/// Maps an `Error` to `Error::Done`, or itself.
9106///
9107/// When a received packet that hasn't yet been authenticated triggers a failure
9108/// it should, in most cases, be ignored, instead of raising a connection error,
9109/// to avoid potential man-in-the-middle and man-on-the-side attacks.
9110///
9111/// However, if no other packet was previously received, the connection should
9112/// indeed be closed as the received packet might just be network background
9113/// noise, and it shouldn't keep resources occupied indefinitely.
9114///
9115/// This function maps an error to `Error::Done` to ignore a packet failure
9116/// without aborting the connection, except when no other packet was previously
9117/// received, in which case the error itself is returned, but only on the
9118/// server-side as the client will already have armed the idle timer.
9119///
9120/// This must only be used for errors preceding packet authentication. Failures
9121/// happening after a packet has been authenticated should still cause the
9122/// connection to be aborted.
9123fn drop_pkt_on_err(
9124 e: Error, recv_count: usize, is_server: bool, trace_id: &str,
9125) -> Error {
9126 // On the server, if no other packet has been successfully processed, abort
9127 // the connection to avoid keeping the connection open when only junk is
9128 // received.
9129 if is_server && recv_count == 0 {
9130 return e;
9131 }
9132
9133 trace!("{trace_id} dropped invalid packet");
9134
9135 // Ignore other invalid packets that haven't been authenticated to prevent
9136 // man-in-the-middle and man-on-the-side attacks.
9137 Error::Done
9138}
9139
9140struct AddrTupleFmt(SocketAddr, SocketAddr);
9141
9142impl std::fmt::Display for AddrTupleFmt {
9143 fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
9144 let AddrTupleFmt(src, dst) = &self;
9145
9146 if src.ip().is_unspecified() || dst.ip().is_unspecified() {
9147 return Ok(());
9148 }
9149
9150 f.write_fmt(format_args!("src:{src} dst:{dst}"))
9151 }
9152}
9153
9154/// Statistics about the connection.
9155///
9156/// A connection's statistics can be collected using the [`stats()`] method.
9157///
9158/// [`stats()`]: struct.Connection.html#method.stats
9159#[derive(Clone, Default)]
9160pub struct Stats {
9161 /// The number of QUIC packets received.
9162 pub recv: usize,
9163
9164 /// The number of QUIC packets sent.
9165 pub sent: usize,
9166
9167 /// The number of QUIC packets that were lost.
9168 pub lost: usize,
9169
9170 /// The number of QUIC packets that were marked as lost but later acked.
9171 pub spurious_lost: usize,
9172
9173 /// The number of sent QUIC packets with retransmitted data.
9174 pub retrans: usize,
9175
9176 /// The number of sent bytes.
9177 pub sent_bytes: u64,
9178
9179 /// The number of received bytes.
9180 pub recv_bytes: u64,
9181
9182 /// The number of bytes sent acked.
9183 pub acked_bytes: u64,
9184
9185 /// The number of bytes sent lost.
9186 pub lost_bytes: u64,
9187
9188 /// The number of stream bytes retransmitted.
9189 pub stream_retrans_bytes: u64,
9190
9191 /// The number of DATAGRAM frames received.
9192 pub dgram_recv: usize,
9193
9194 /// The number of DATAGRAM frames sent.
9195 pub dgram_sent: usize,
9196
9197 /// The number of known paths for the connection.
9198 pub paths_count: usize,
9199
9200 /// The number of streams reset by local.
9201 pub reset_stream_count_local: u64,
9202
9203 /// The number of streams stopped by local.
9204 pub stopped_stream_count_local: u64,
9205
9206 /// The number of streams reset by remote.
9207 pub reset_stream_count_remote: u64,
9208
9209 /// The number of streams stopped by remote.
9210 pub stopped_stream_count_remote: u64,
9211
9212 /// The number of DATA_BLOCKED frames sent due to hitting the connection
9213 /// flow control limit.
9214 pub data_blocked_sent_count: u64,
9215
9216 /// The number of STREAM_DATA_BLOCKED frames sent due to a stream hitting
9217 /// the stream flow control limit.
9218 pub stream_data_blocked_sent_count: u64,
9219
9220 /// The number of DATA_BLOCKED frames received from the remote.
9221 pub data_blocked_recv_count: u64,
9222
9223 /// The number of STREAM_DATA_BLOCKED frames received from the remote.
9224 pub stream_data_blocked_recv_count: u64,
9225
9226 /// The number of STREAMS_BLOCKED frames for bidirectional streams received
9227 /// from the remote, indicating the peer is blocked on opening new
9228 /// bidirectional streams.
9229 pub streams_blocked_bidi_recv_count: u64,
9230
9231 /// The number of STREAMS_BLOCKED frames for unidirectional streams received
9232 /// from the remote, indicating the peer is blocked on opening new
9233 /// unidirectional streams.
9234 pub streams_blocked_uni_recv_count: u64,
9235
9236 /// The total number of PATH_CHALLENGE frames that were received.
9237 pub path_challenge_rx_count: u64,
9238
9239 /// Total duration during which this side of the connection was
9240 /// actively sending bytes or waiting for those bytes to be acked.
9241 pub bytes_in_flight_duration: Duration,
9242
9243 /// Health state of the connection's tx_buffered.
9244 pub tx_buffered_state: TxBufferTrackingState,
9245}
9246
9247impl std::fmt::Debug for Stats {
9248 #[inline]
9249 fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
9250 write!(
9251 f,
9252 "recv={} sent={} lost={} retrans={}",
9253 self.recv, self.sent, self.lost, self.retrans,
9254 )?;
9255
9256 write!(
9257 f,
9258 " sent_bytes={} recv_bytes={} lost_bytes={}",
9259 self.sent_bytes, self.recv_bytes, self.lost_bytes,
9260 )?;
9261
9262 Ok(())
9263 }
9264}
9265
9266#[doc(hidden)]
9267#[cfg(any(test, feature = "internal"))]
9268pub mod test_utils;
9269
9270#[cfg(test)]
9271mod tests;
9272
9273pub use crate::packet::ConnectionId;
9274pub use crate::packet::Header;
9275pub use crate::packet::Type;
9276
9277pub use crate::path::PathEvent;
9278pub use crate::path::PathStats;
9279pub use crate::path::SocketAddrIter;
9280
9281pub use crate::recovery::BbrBwLoReductionStrategy;
9282pub use crate::recovery::BbrParams;
9283pub use crate::recovery::CongestionControlAlgorithm;
9284pub use crate::recovery::StartupExit;
9285pub use crate::recovery::StartupExitReason;
9286
9287pub use crate::stream::StreamIter;
9288
9289pub use crate::transport_params::TransportParams;
9290pub use crate::transport_params::UnknownTransportParameter;
9291pub use crate::transport_params::UnknownTransportParameterIterator;
9292pub use crate::transport_params::UnknownTransportParameters;
9293
9294pub use crate::buffers::BufFactory;
9295pub use crate::buffers::BufSplit;
9296
9297pub use crate::error::ConnectionError;
9298pub use crate::error::Error;
9299pub use crate::error::Result;
9300pub use crate::error::WireErrorCode;
9301
9302mod buffers;
9303mod cid;
9304mod crypto;
9305mod dgram;
9306mod error;
9307#[cfg(feature = "ffi")]
9308mod ffi;
9309mod flowcontrol;
9310mod frame;
9311pub mod h3;
9312mod minmax;
9313mod packet;
9314mod path;
9315mod pmtud;
9316mod rand;
9317mod range_buf;
9318mod ranges;
9319mod recovery;
9320mod stream;
9321mod tls;
9322mod transport_params;