quiche/lib.rs
1// Copyright (C) 2018-2019, Cloudflare, Inc.
2// All rights reserved.
3//
4// Redistribution and use in source and binary forms, with or without
5// modification, are permitted provided that the following conditions are
6// met:
7//
8// * Redistributions of source code must retain the above copyright notice,
9// this list of conditions and the following disclaimer.
10//
11// * Redistributions in binary form must reproduce the above copyright
12// notice, this list of conditions and the following disclaimer in the
13// documentation and/or other materials provided with the distribution.
14//
15// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
16// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
17// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
18// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
19// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
20// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
21// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
22// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
23// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
24// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
25// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
27//! 🥧 Savoury implementation of the QUIC transport protocol and HTTP/3.
28//!
29//! [quiche] is an implementation of the QUIC transport protocol and HTTP/3 as
30//! specified by the [IETF]. It provides a low level API for processing QUIC
31//! packets and handling connection state. The application is responsible for
32//! providing I/O (e.g. sockets handling) as well as an event loop with support
33//! for timers.
34//!
35//! [quiche]: https://github.com/cloudflare/quiche/
36//! [ietf]: https://quicwg.org/
37//!
38//! ## Configuring connections
39//!
40//! The first step in establishing a QUIC connection using quiche is creating a
41//! [`Config`] object:
42//!
43//! ```
44//! let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
45//! config.set_application_protos(&[b"example-proto"]);
46//!
47//! // Additional configuration specific to application and use case...
48//! # Ok::<(), quiche::Error>(())
49//! ```
50//!
51//! The [`Config`] object controls important aspects of the QUIC connection such
52//! as QUIC version, ALPN IDs, flow control, congestion control, idle timeout
53//! and other properties or features.
54//!
55//! QUIC is a general-purpose transport protocol and there are several
56//! configuration properties where there is no reasonable default value. For
57//! example, the permitted number of concurrent streams of any particular type
58//! is dependent on the application running over QUIC, and other use-case
59//! specific concerns.
60//!
61//! quiche defaults several properties to zero, applications most likely need
62//! to set these to something else to satisfy their needs using the following:
63//!
64//! - [`set_initial_max_streams_bidi()`]
65//! - [`set_initial_max_streams_uni()`]
66//! - [`set_initial_max_data()`]
67//! - [`set_initial_max_stream_data_bidi_local()`]
68//! - [`set_initial_max_stream_data_bidi_remote()`]
69//! - [`set_initial_max_stream_data_uni()`]
70//!
71//! [`Config`] also holds TLS configuration. This can be changed by mutators on
72//! the an existing object, or by constructing a TLS context manually and
73//! creating a configuration using [`with_boring_ssl_ctx_builder()`].
74//!
75//! A configuration object can be shared among multiple connections.
76//!
77//! ### Connection setup
78//!
79//! On the client-side the [`connect()`] utility function can be used to create
80//! a new connection, while [`accept()`] is for servers:
81//!
82//! ```
83//! # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
84//! # let server_name = "quic.tech";
85//! # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
86//! # let peer = "127.0.0.1:1234".parse().unwrap();
87//! # let local = "127.0.0.1:4321".parse().unwrap();
88//! // Client connection.
89//! let conn =
90//! quiche::connect(Some(&server_name), &scid, local, peer, &mut config)?;
91//!
92//! // Server connection.
93//! # let peer = "127.0.0.1:1234".parse().unwrap();
94//! # let local = "127.0.0.1:4321".parse().unwrap();
95//! let conn = quiche::accept(&scid, None, local, peer, &mut config)?;
96//! # Ok::<(), quiche::Error>(())
97//! ```
98//!
99//! In both cases, the application is responsible for generating a new source
100//! connection ID that will be used to identify the new connection.
101//!
102//! The application also need to pass the address of the remote peer of the
103//! connection: in the case of a client that would be the address of the server
104//! it is trying to connect to, and for a server that is the address of the
105//! client that initiated the connection.
106//!
107//! ## Handling incoming packets
108//!
109//! Using the connection's [`recv()`] method the application can process
110//! incoming packets that belong to that connection from the network:
111//!
112//! ```no_run
113//! # let mut buf = [0; 512];
114//! # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
115//! # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
116//! # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
117//! # let peer = "127.0.0.1:1234".parse().unwrap();
118//! # let local = "127.0.0.1:4321".parse().unwrap();
119//! # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
120//! let to = socket.local_addr().unwrap();
121//!
122//! loop {
123//! let (read, from) = socket.recv_from(&mut buf).unwrap();
124//!
125//! let recv_info = quiche::RecvInfo { from, to };
126//!
127//! let read = match conn.recv(&mut buf[..read], recv_info) {
128//! Ok(v) => v,
129//!
130//! Err(quiche::Error::Done) => {
131//! // Done reading.
132//! break;
133//! },
134//!
135//! Err(e) => {
136//! // An error occurred, handle it.
137//! break;
138//! },
139//! };
140//! }
141//! # Ok::<(), quiche::Error>(())
142//! ```
143//!
144//! The application has to pass a [`RecvInfo`] structure in order to provide
145//! additional information about the received packet (such as the address it
146//! was received from).
147//!
148//! ## Generating outgoing packets
149//!
150//! Outgoing packet are generated using the connection's [`send()`] method
151//! instead:
152//!
153//! ```no_run
154//! # let mut out = [0; 512];
155//! # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
156//! # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
157//! # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
158//! # let peer = "127.0.0.1:1234".parse().unwrap();
159//! # let local = "127.0.0.1:4321".parse().unwrap();
160//! # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
161//! loop {
162//! let (write, send_info) = match conn.send(&mut out) {
163//! Ok(v) => v,
164//!
165//! Err(quiche::Error::Done) => {
166//! // Done writing.
167//! break;
168//! },
169//!
170//! Err(e) => {
171//! // An error occurred, handle it.
172//! break;
173//! },
174//! };
175//!
176//! socket.send_to(&out[..write], &send_info.to).unwrap();
177//! }
178//! # Ok::<(), quiche::Error>(())
179//! ```
180//!
181//! The application will be provided with a [`SendInfo`] structure providing
182//! additional information about the newly created packet (such as the address
183//! the packet should be sent to).
184//!
185//! When packets are sent, the application is responsible for maintaining a
186//! timer to react to time-based connection events. The timer expiration can be
187//! obtained using the connection's [`timeout()`] method.
188//!
189//! ```
190//! # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
191//! # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
192//! # let peer = "127.0.0.1:1234".parse().unwrap();
193//! # let local = "127.0.0.1:4321".parse().unwrap();
194//! # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
195//! let timeout = conn.timeout();
196//! # Ok::<(), quiche::Error>(())
197//! ```
198//!
199//! The application is responsible for providing a timer implementation, which
200//! can be specific to the operating system or networking framework used. When
201//! a timer expires, the connection's [`on_timeout()`] method should be called,
202//! after which additional packets might need to be sent on the network:
203//!
204//! ```no_run
205//! # let mut out = [0; 512];
206//! # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
207//! # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
208//! # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
209//! # let peer = "127.0.0.1:1234".parse().unwrap();
210//! # let local = "127.0.0.1:4321".parse().unwrap();
211//! # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
212//! // Timeout expired, handle it.
213//! conn.on_timeout();
214//!
215//! // Send more packets as needed after timeout.
216//! loop {
217//! let (write, send_info) = match conn.send(&mut out) {
218//! Ok(v) => v,
219//!
220//! Err(quiche::Error::Done) => {
221//! // Done writing.
222//! break;
223//! },
224//!
225//! Err(e) => {
226//! // An error occurred, handle it.
227//! break;
228//! },
229//! };
230//!
231//! socket.send_to(&out[..write], &send_info.to).unwrap();
232//! }
233//! # Ok::<(), quiche::Error>(())
234//! ```
235//!
236//! ### Pacing
237//!
238//! It is recommended that applications [pace] sending of outgoing packets to
239//! avoid creating packet bursts that could cause short-term congestion and
240//! losses in the network.
241//!
242//! quiche exposes pacing hints for outgoing packets through the [`at`] field
243//! of the [`SendInfo`] structure that is returned by the [`send()`] method.
244//! This field represents the time when a specific packet should be sent into
245//! the network.
246//!
247//! Applications can use these hints by artificially delaying the sending of
248//! packets through platform-specific mechanisms (such as the [`SO_TXTIME`]
249//! socket option on Linux), or custom methods (for example by using user-space
250//! timers).
251//!
252//! [pace]: https://datatracker.ietf.org/doc/html/rfc9002#section-7.7
253//! [`SO_TXTIME`]: https://man7.org/linux/man-pages/man8/tc-etf.8.html
254//!
255//! ## Sending and receiving stream data
256//!
257//! After some back and forth, the connection will complete its handshake and
258//! will be ready for sending or receiving application data.
259//!
260//! Data can be sent on a stream by using the [`stream_send()`] method:
261//!
262//! ```no_run
263//! # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
264//! # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
265//! # let peer = "127.0.0.1:1234".parse().unwrap();
266//! # let local = "127.0.0.1:4321".parse().unwrap();
267//! # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
268//! if conn.is_established() {
269//! // Handshake completed, send some data on stream 0.
270//! conn.stream_send(0, b"hello", true)?;
271//! }
272//! # Ok::<(), quiche::Error>(())
273//! ```
274//!
275//! The application can check whether there are any readable streams by using
276//! the connection's [`readable()`] method, which returns an iterator over all
277//! the streams that have outstanding data to read.
278//!
279//! The [`stream_recv()`] method can then be used to retrieve the application
280//! data from the readable stream:
281//!
282//! ```no_run
283//! # let mut buf = [0; 512];
284//! # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
285//! # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
286//! # let peer = "127.0.0.1:1234".parse().unwrap();
287//! # let local = "127.0.0.1:4321".parse().unwrap();
288//! # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
289//! if conn.is_established() {
290//! // Iterate over readable streams.
291//! for stream_id in conn.readable() {
292//! // Stream is readable, read until there's no more data.
293//! while let Ok((read, fin)) = conn.stream_recv(stream_id, &mut buf) {
294//! println!("Got {} bytes on stream {}", read, stream_id);
295//! }
296//! }
297//! }
298//! # Ok::<(), quiche::Error>(())
299//! ```
300//!
301//! ## HTTP/3
302//!
303//! The quiche [HTTP/3 module] provides a high level API for sending and
304//! receiving HTTP requests and responses on top of the QUIC transport protocol.
305//!
306//! [`Config`]: https://docs.quic.tech/quiche/struct.Config.html
307//! [`set_initial_max_streams_bidi()`]: https://docs.rs/quiche/latest/quiche/struct.Config.html#method.set_initial_max_streams_bidi
308//! [`set_initial_max_streams_uni()`]: https://docs.rs/quiche/latest/quiche/struct.Config.html#method.set_initial_max_streams_uni
309//! [`set_initial_max_data()`]: https://docs.rs/quiche/latest/quiche/struct.Config.html#method.set_initial_max_data
310//! [`set_initial_max_stream_data_bidi_local()`]: https://docs.rs/quiche/latest/quiche/struct.Config.html#method.set_initial_max_stream_data_bidi_local
311//! [`set_initial_max_stream_data_bidi_remote()`]: https://docs.rs/quiche/latest/quiche/struct.Config.html#method.set_initial_max_stream_data_bidi_remote
312//! [`set_initial_max_stream_data_uni()`]: https://docs.rs/quiche/latest/quiche/struct.Config.html#method.set_initial_max_stream_data_uni
313//! [`with_boring_ssl_ctx_builder()`]: https://docs.quic.tech/quiche/struct.Config.html#method.with_boring_ssl_ctx_builder
314//! [`connect()`]: fn.connect.html
315//! [`accept()`]: fn.accept.html
316//! [`recv()`]: struct.Connection.html#method.recv
317//! [`RecvInfo`]: struct.RecvInfo.html
318//! [`send()`]: struct.Connection.html#method.send
319//! [`SendInfo`]: struct.SendInfo.html
320//! [`at`]: struct.SendInfo.html#structfield.at
321//! [`timeout()`]: struct.Connection.html#method.timeout
322//! [`on_timeout()`]: struct.Connection.html#method.on_timeout
323//! [`stream_send()`]: struct.Connection.html#method.stream_send
324//! [`readable()`]: struct.Connection.html#method.readable
325//! [`stream_recv()`]: struct.Connection.html#method.stream_recv
326//! [HTTP/3 module]: h3/index.html
327//!
328//! ## Congestion Control
329//!
330//! The quiche library provides a high-level API for configuring which
331//! congestion control algorithm to use throughout the QUIC connection.
332//!
333//! When a QUIC connection is created, the application can optionally choose
334//! which CC algorithm to use. See [`CongestionControlAlgorithm`] for currently
335//! available congestion control algorithms.
336//!
337//! For example:
338//!
339//! ```
340//! let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION).unwrap();
341//! config.set_cc_algorithm(quiche::CongestionControlAlgorithm::Reno);
342//! ```
343//!
344//! Alternatively, you can configure the congestion control algorithm to use
345//! by its name.
346//!
347//! ```
348//! let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION).unwrap();
349//! config.set_cc_algorithm_name("reno").unwrap();
350//! ```
351//!
352//! Note that the CC algorithm should be configured before calling [`connect()`]
353//! or [`accept()`]. Otherwise the connection will use a default CC algorithm.
354//!
355//! [`CongestionControlAlgorithm`]: enum.CongestionControlAlgorithm.html
356//!
357//! ## Feature flags
358//!
359//! quiche defines a number of [feature flags] to reduce the amount of compiled
360//! code and dependencies:
361//!
362//! * `boringssl-vendored` (default): Build the vendored BoringSSL library.
363//!
364//! * `boringssl-boring-crate`: Use the BoringSSL library provided by the
365//! [boring] crate. It takes precedence over `boringssl-vendored` if both
366//! features are enabled.
367//!
368//! * `pkg-config-meta`: Generate pkg-config metadata file for libquiche.
369//!
370//! * `ffi`: Build and expose the FFI API.
371//!
372//! * `qlog`: Enable support for the [qlog] logging format.
373//!
374//! [feature flags]: https://doc.rust-lang.org/cargo/reference/manifest.html#the-features-section
375//! [boring]: https://crates.io/crates/boring
376//! [qlog]: https://datatracker.ietf.org/doc/html/draft-ietf-quic-qlog-main-schema
377
378#![allow(clippy::upper_case_acronyms)]
379#![warn(missing_docs)]
380#![warn(unused_qualifications)]
381#![cfg_attr(docsrs, feature(doc_cfg))]
382
383#[macro_use]
384extern crate log;
385
386use std::cmp;
387
388use std::collections::VecDeque;
389
390use std::net::SocketAddr;
391
392use std::str::FromStr;
393
394use std::sync::Arc;
395
396use std::time::Duration;
397use std::time::Instant;
398
399#[cfg(feature = "qlog")]
400use qlog::events::quic::DataMovedAdditionalInfo;
401#[cfg(feature = "qlog")]
402use qlog::events::quic::QuicEventType;
403#[cfg(feature = "qlog")]
404use qlog::events::quic::TransportInitiator;
405#[cfg(feature = "qlog")]
406use qlog::events::DataRecipient;
407#[cfg(feature = "qlog")]
408use qlog::events::Event;
409#[cfg(feature = "qlog")]
410use qlog::events::EventData;
411#[cfg(feature = "qlog")]
412use qlog::events::EventImportance;
413#[cfg(feature = "qlog")]
414use qlog::events::EventType;
415#[cfg(feature = "qlog")]
416use qlog::events::RawInfo;
417
418use smallvec::SmallVec;
419
420use crate::buffers::DefaultBufFactory;
421
422use crate::recovery::OnAckReceivedOutcome;
423use crate::recovery::OnLossDetectionTimeoutOutcome;
424use crate::recovery::RecoveryOps;
425use crate::recovery::ReleaseDecision;
426
427use crate::stream::RecvAction;
428use crate::stream::StreamPriorityKey;
429
430/// The current QUIC wire version.
431pub const PROTOCOL_VERSION: u32 = PROTOCOL_VERSION_V1;
432
433/// Supported QUIC versions.
434const PROTOCOL_VERSION_V1: u32 = 0x0000_0001;
435
436/// The maximum length of a connection ID.
437pub const MAX_CONN_ID_LEN: usize = packet::MAX_CID_LEN as usize;
438
439/// The minimum length of Initial packets sent by a client.
440pub const MIN_CLIENT_INITIAL_LEN: usize = 1200;
441
442/// The default initial RTT.
443const DEFAULT_INITIAL_RTT: Duration = Duration::from_millis(333);
444
445const PAYLOAD_MIN_LEN: usize = 4;
446
447// PATH_CHALLENGE (9 bytes) + AEAD tag (16 bytes).
448const MIN_PROBING_SIZE: usize = 25;
449
450const MAX_AMPLIFICATION_FACTOR: usize = 3;
451
452// The maximum number of tracked packet number ranges that need to be acked.
453//
454// This represents more or less how many ack blocks can fit in a typical packet.
455const MAX_ACK_RANGES: usize = 68;
456
457// The highest possible stream ID allowed.
458const MAX_STREAM_ID: u64 = 1 << 60;
459
460// The default max_datagram_size used in congestion control.
461const MAX_SEND_UDP_PAYLOAD_SIZE: usize = 1200;
462
463// The default length of DATAGRAM queues.
464const DEFAULT_MAX_DGRAM_QUEUE_LEN: usize = 0;
465
466// The default length of PATH_CHALLENGE receive queue.
467const DEFAULT_MAX_PATH_CHALLENGE_RX_QUEUE_LEN: usize = 3;
468
469// The DATAGRAM standard recommends either none or 65536 as maximum DATAGRAM
470// frames size. We enforce the recommendation for forward compatibility.
471const MAX_DGRAM_FRAME_SIZE: u64 = 65536;
472
473// The length of the payload length field.
474const PAYLOAD_LENGTH_LEN: usize = 2;
475
476// The number of undecryptable that can be buffered.
477const MAX_UNDECRYPTABLE_PACKETS: usize = 10;
478
479const RESERVED_VERSION_MASK: u32 = 0xfafafafa;
480
481// The default size of the receiver connection flow control window.
482const DEFAULT_CONNECTION_WINDOW: u64 = 48 * 1024;
483
484// The maximum size of the receiver connection flow control window.
485const MAX_CONNECTION_WINDOW: u64 = 24 * 1024 * 1024;
486
487// How much larger the connection flow control window need to be larger than
488// the stream flow control window.
489const CONNECTION_WINDOW_FACTOR: f64 = 1.5;
490
491// How many probing packet timeouts do we tolerate before considering the path
492// validation as failed.
493const MAX_PROBING_TIMEOUTS: usize = 3;
494
495// The default initial congestion window size in terms of packet count.
496const DEFAULT_INITIAL_CONGESTION_WINDOW_PACKETS: usize = 10;
497
498// The maximum data offset that can be stored in a crypto stream.
499const MAX_CRYPTO_STREAM_OFFSET: u64 = 1 << 16;
500
501// The send capacity factor.
502const TX_CAP_FACTOR: f64 = 1.0;
503
504/// Ancillary information about incoming packets.
505#[derive(Clone, Copy, Debug, PartialEq, Eq)]
506pub struct RecvInfo {
507 /// The remote address the packet was received from.
508 pub from: SocketAddr,
509
510 /// The local address the packet was received on.
511 pub to: SocketAddr,
512}
513
514/// Ancillary information about outgoing packets.
515#[derive(Clone, Copy, Debug, PartialEq, Eq)]
516pub struct SendInfo {
517 /// The local address the packet should be sent from.
518 pub from: SocketAddr,
519
520 /// The remote address the packet should be sent to.
521 pub to: SocketAddr,
522
523 /// The time to send the packet out.
524 ///
525 /// See [Pacing] for more details.
526 ///
527 /// [Pacing]: index.html#pacing
528 pub at: Instant,
529}
530
531/// The side of the stream to be shut down.
532///
533/// This should be used when calling [`stream_shutdown()`].
534///
535/// [`stream_shutdown()`]: struct.Connection.html#method.stream_shutdown
536#[repr(C)]
537#[derive(PartialEq, Eq)]
538pub enum Shutdown {
539 /// Stop receiving stream data.
540 Read = 0,
541
542 /// Stop sending stream data.
543 Write = 1,
544}
545
546/// Qlog logging level.
547#[repr(C)]
548#[cfg(feature = "qlog")]
549#[cfg_attr(docsrs, doc(cfg(feature = "qlog")))]
550pub enum QlogLevel {
551 /// Logs any events of Core importance.
552 Core = 0,
553
554 /// Logs any events of Core and Base importance.
555 Base = 1,
556
557 /// Logs any events of Core, Base and Extra importance
558 Extra = 2,
559}
560
561/// Stores configuration shared between multiple connections.
562pub struct Config {
563 local_transport_params: TransportParams,
564
565 version: u32,
566
567 tls_ctx: tls::Context,
568
569 application_protos: Vec<Vec<u8>>,
570
571 grease: bool,
572
573 cc_algorithm: CongestionControlAlgorithm,
574 custom_bbr_params: Option<BbrParams>,
575 initial_congestion_window_packets: usize,
576 enable_relaxed_loss_threshold: bool,
577 enable_cubic_idle_restart_fix: bool,
578 enable_send_streams_blocked: bool,
579
580 pmtud: bool,
581 pmtud_max_probes: u8,
582
583 hystart: bool,
584
585 pacing: bool,
586 /// Send rate limit in Mbps
587 max_pacing_rate: Option<u64>,
588
589 tx_cap_factor: f64,
590
591 dgram_recv_max_queue_len: usize,
592 dgram_send_max_queue_len: usize,
593
594 path_challenge_recv_max_queue_len: usize,
595
596 max_send_udp_payload_size: usize,
597
598 max_connection_window: u64,
599 max_stream_window: u64,
600
601 max_amplification_factor: usize,
602
603 disable_dcid_reuse: bool,
604
605 track_unknown_transport_params: Option<usize>,
606
607 initial_rtt: Duration,
608
609 /// When true, uses the initial max data (for connection
610 /// and stream) as the initial flow control window.
611 use_initial_max_data_as_flow_control_win: bool,
612}
613
614// See https://quicwg.org/base-drafts/rfc9000.html#section-15
615fn is_reserved_version(version: u32) -> bool {
616 version & RESERVED_VERSION_MASK == version
617}
618
619impl Config {
620 /// Creates a config object with the given version.
621 ///
622 /// ## Examples:
623 ///
624 /// ```
625 /// let config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
626 /// # Ok::<(), quiche::Error>(())
627 /// ```
628 pub fn new(version: u32) -> Result<Config> {
629 Self::with_tls_ctx(version, tls::Context::new()?)
630 }
631
632 /// Creates a config object with the given version and
633 /// [`SslContextBuilder`].
634 ///
635 /// This is useful for applications that wish to manually configure
636 /// [`SslContextBuilder`].
637 ///
638 /// [`SslContextBuilder`]: https://docs.rs/boring/latest/boring/ssl/struct.SslContextBuilder.html
639 #[cfg(feature = "boringssl-boring-crate")]
640 #[cfg_attr(docsrs, doc(cfg(feature = "boringssl-boring-crate")))]
641 pub fn with_boring_ssl_ctx_builder(
642 version: u32, tls_ctx_builder: boring::ssl::SslContextBuilder,
643 ) -> Result<Config> {
644 Self::with_tls_ctx(version, tls::Context::from_boring(tls_ctx_builder))
645 }
646
647 fn with_tls_ctx(version: u32, tls_ctx: tls::Context) -> Result<Config> {
648 if !is_reserved_version(version) && !version_is_supported(version) {
649 return Err(Error::UnknownVersion);
650 }
651
652 Ok(Config {
653 local_transport_params: TransportParams::default(),
654 version,
655 tls_ctx,
656 application_protos: Vec::new(),
657 grease: true,
658 cc_algorithm: CongestionControlAlgorithm::CUBIC,
659 custom_bbr_params: None,
660 initial_congestion_window_packets:
661 DEFAULT_INITIAL_CONGESTION_WINDOW_PACKETS,
662 enable_relaxed_loss_threshold: false,
663 enable_cubic_idle_restart_fix: true,
664 enable_send_streams_blocked: false,
665 pmtud: false,
666 pmtud_max_probes: pmtud::MAX_PROBES_DEFAULT,
667 hystart: true,
668 pacing: true,
669 max_pacing_rate: None,
670
671 tx_cap_factor: TX_CAP_FACTOR,
672
673 dgram_recv_max_queue_len: DEFAULT_MAX_DGRAM_QUEUE_LEN,
674 dgram_send_max_queue_len: DEFAULT_MAX_DGRAM_QUEUE_LEN,
675
676 path_challenge_recv_max_queue_len:
677 DEFAULT_MAX_PATH_CHALLENGE_RX_QUEUE_LEN,
678
679 max_send_udp_payload_size: MAX_SEND_UDP_PAYLOAD_SIZE,
680
681 max_connection_window: MAX_CONNECTION_WINDOW,
682 max_stream_window: stream::MAX_STREAM_WINDOW,
683
684 max_amplification_factor: MAX_AMPLIFICATION_FACTOR,
685
686 disable_dcid_reuse: false,
687
688 track_unknown_transport_params: None,
689 initial_rtt: DEFAULT_INITIAL_RTT,
690
691 use_initial_max_data_as_flow_control_win: false,
692 })
693 }
694
695 /// Configures the given certificate chain.
696 ///
697 /// The content of `file` is parsed as a PEM-encoded leaf certificate,
698 /// followed by optional intermediate certificates.
699 ///
700 /// ## Examples:
701 ///
702 /// ```no_run
703 /// # let mut config = quiche::Config::new(0xbabababa)?;
704 /// config.load_cert_chain_from_pem_file("/path/to/cert.pem")?;
705 /// # Ok::<(), quiche::Error>(())
706 /// ```
707 pub fn load_cert_chain_from_pem_file(&mut self, file: &str) -> Result<()> {
708 self.tls_ctx.use_certificate_chain_file(file)
709 }
710
711 /// Configures the given private key.
712 ///
713 /// The content of `file` is parsed as a PEM-encoded private key.
714 ///
715 /// ## Examples:
716 ///
717 /// ```no_run
718 /// # let mut config = quiche::Config::new(0xbabababa)?;
719 /// config.load_priv_key_from_pem_file("/path/to/key.pem")?;
720 /// # Ok::<(), quiche::Error>(())
721 /// ```
722 pub fn load_priv_key_from_pem_file(&mut self, file: &str) -> Result<()> {
723 self.tls_ctx.use_privkey_file(file)
724 }
725
726 /// Specifies a file where trusted CA certificates are stored for the
727 /// purposes of certificate verification.
728 ///
729 /// The content of `file` is parsed as a PEM-encoded certificate chain.
730 ///
731 /// ## Examples:
732 ///
733 /// ```no_run
734 /// # let mut config = quiche::Config::new(0xbabababa)?;
735 /// config.load_verify_locations_from_file("/path/to/cert.pem")?;
736 /// # Ok::<(), quiche::Error>(())
737 /// ```
738 pub fn load_verify_locations_from_file(&mut self, file: &str) -> Result<()> {
739 self.tls_ctx.load_verify_locations_from_file(file)
740 }
741
742 /// Specifies a directory where trusted CA certificates are stored for the
743 /// purposes of certificate verification.
744 ///
745 /// The content of `dir` a set of PEM-encoded certificate chains.
746 ///
747 /// ## Examples:
748 ///
749 /// ```no_run
750 /// # let mut config = quiche::Config::new(0xbabababa)?;
751 /// config.load_verify_locations_from_directory("/path/to/certs")?;
752 /// # Ok::<(), quiche::Error>(())
753 /// ```
754 pub fn load_verify_locations_from_directory(
755 &mut self, dir: &str,
756 ) -> Result<()> {
757 self.tls_ctx.load_verify_locations_from_directory(dir)
758 }
759
760 /// Configures whether to verify the peer's certificate.
761 ///
762 /// This should usually be `true` for client-side connections and `false`
763 /// for server-side ones.
764 ///
765 /// Note that by default, no verification is performed.
766 ///
767 /// Also note that on the server-side, enabling verification of the peer
768 /// will trigger a certificate request and make authentication errors
769 /// fatal, but will still allow anonymous clients (i.e. clients that
770 /// don't present a certificate at all). Servers can check whether a
771 /// client presented a certificate by calling [`peer_cert()`] if they
772 /// need to.
773 ///
774 /// [`peer_cert()`]: struct.Connection.html#method.peer_cert
775 pub fn verify_peer(&mut self, verify: bool) {
776 self.tls_ctx.set_verify(verify);
777 }
778
779 /// Configures whether to do path MTU discovery.
780 ///
781 /// The default value is `false`.
782 pub fn discover_pmtu(&mut self, discover: bool) {
783 self.pmtud = discover;
784 }
785
786 /// Configures the maximum number of PMTUD probe attempts before treating
787 /// a probe size as failed.
788 ///
789 /// Defaults to 3 per [RFC 8899 Section 5.1.2](https://datatracker.ietf.org/doc/html/rfc8899#section-5.1.2).
790 /// If 0 is passed, the default value is used.
791 pub fn set_pmtud_max_probes(&mut self, max_probes: u8) {
792 self.pmtud_max_probes = max_probes;
793 }
794
795 /// Configures whether to send GREASE values.
796 ///
797 /// The default value is `true`.
798 pub fn grease(&mut self, grease: bool) {
799 self.grease = grease;
800 }
801
802 /// Enables logging of secrets.
803 ///
804 /// When logging is enabled, the [`set_keylog()`] method must be called on
805 /// the connection for its cryptographic secrets to be logged in the
806 /// [keylog] format to the specified writer.
807 ///
808 /// [`set_keylog()`]: struct.Connection.html#method.set_keylog
809 /// [keylog]: https://developer.mozilla.org/en-US/docs/Mozilla/Projects/NSS/Key_Log_Format
810 pub fn log_keys(&mut self) {
811 self.tls_ctx.enable_keylog();
812 }
813
814 /// Configures the session ticket key material.
815 ///
816 /// On the server this key will be used to encrypt and decrypt session
817 /// tickets, used to perform session resumption without server-side state.
818 ///
819 /// By default a key is generated internally, and rotated regularly, so
820 /// applications don't need to call this unless they need to use a
821 /// specific key (e.g. in order to support resumption across multiple
822 /// servers), in which case the application is also responsible for
823 /// rotating the key to provide forward secrecy.
824 pub fn set_ticket_key(&mut self, key: &[u8]) -> Result<()> {
825 self.tls_ctx.set_ticket_key(key)
826 }
827
828 /// Enables sending or receiving early data.
829 pub fn enable_early_data(&mut self) {
830 self.tls_ctx.set_early_data_enabled(true);
831 }
832
833 /// Configures the list of supported application protocols.
834 ///
835 /// On the client this configures the list of protocols to send to the
836 /// server as part of the ALPN extension.
837 ///
838 /// On the server this configures the list of supported protocols to match
839 /// against the client-supplied list.
840 ///
841 /// Applications must set a value, but no default is provided.
842 ///
843 /// ## Examples:
844 ///
845 /// ```
846 /// # let mut config = quiche::Config::new(0xbabababa)?;
847 /// config.set_application_protos(&[b"http/1.1", b"http/0.9"]);
848 /// # Ok::<(), quiche::Error>(())
849 /// ```
850 pub fn set_application_protos(
851 &mut self, protos_list: &[&[u8]],
852 ) -> Result<()> {
853 self.application_protos =
854 protos_list.iter().map(|s| s.to_vec()).collect();
855
856 self.tls_ctx.set_alpn(protos_list)
857 }
858
859 /// Configures the list of supported application protocols using wire
860 /// format.
861 ///
862 /// The list of protocols `protos` must be a series of non-empty, 8-bit
863 /// length-prefixed strings.
864 ///
865 /// See [`set_application_protos`](Self::set_application_protos) for more
866 /// background about application protocols.
867 ///
868 /// ## Examples:
869 ///
870 /// ```
871 /// # let mut config = quiche::Config::new(0xbabababa)?;
872 /// config.set_application_protos_wire_format(b"\x08http/1.1\x08http/0.9")?;
873 /// # Ok::<(), quiche::Error>(())
874 /// ```
875 pub fn set_application_protos_wire_format(
876 &mut self, protos: &[u8],
877 ) -> Result<()> {
878 let mut b = octets::Octets::with_slice(protos);
879
880 let mut protos_list = Vec::new();
881
882 while let Ok(proto) = b.get_bytes_with_u8_length() {
883 protos_list.push(proto.buf());
884 }
885
886 self.set_application_protos(&protos_list)
887 }
888
889 /// Sets the anti-amplification limit factor.
890 ///
891 /// The default value is `3`.
892 pub fn set_max_amplification_factor(&mut self, v: usize) {
893 self.max_amplification_factor = v;
894 }
895
896 /// Sets the send capacity factor.
897 ///
898 /// The default value is `1`.
899 pub fn set_send_capacity_factor(&mut self, v: f64) {
900 self.tx_cap_factor = v;
901 }
902
903 /// Sets the connection's initial RTT.
904 ///
905 /// The default value is `333`.
906 pub fn set_initial_rtt(&mut self, v: Duration) {
907 self.initial_rtt = v;
908 }
909
910 /// Sets the `max_idle_timeout` transport parameter, in milliseconds.
911 ///
912 /// The default value is infinite, that is, no timeout is used.
913 pub fn set_max_idle_timeout(&mut self, v: u64) {
914 self.local_transport_params.max_idle_timeout =
915 cmp::min(v, octets::MAX_VAR_INT);
916 }
917
918 /// Sets the `max_udp_payload_size transport` parameter.
919 ///
920 /// The default value is `65527`.
921 pub fn set_max_recv_udp_payload_size(&mut self, v: usize) {
922 self.local_transport_params.max_udp_payload_size =
923 cmp::min(v as u64, octets::MAX_VAR_INT);
924 }
925
926 /// Sets the maximum outgoing UDP payload size.
927 ///
928 /// The default and minimum value is `1200`.
929 pub fn set_max_send_udp_payload_size(&mut self, v: usize) {
930 self.max_send_udp_payload_size = cmp::max(v, MAX_SEND_UDP_PAYLOAD_SIZE);
931 }
932
933 /// Sets the `initial_max_data` transport parameter.
934 ///
935 /// When set to a non-zero value quiche will only allow at most `v` bytes of
936 /// incoming stream data to be buffered for the whole connection (that is,
937 /// data that is not yet read by the application) and will allow more data
938 /// to be received as the buffer is consumed by the application.
939 ///
940 /// When set to zero, either explicitly or via the default, quiche will not
941 /// give any flow control to the peer, preventing it from sending any stream
942 /// data.
943 ///
944 /// The default value is `0`.
945 pub fn set_initial_max_data(&mut self, v: u64) {
946 self.local_transport_params.initial_max_data =
947 cmp::min(v, octets::MAX_VAR_INT);
948 }
949
950 /// Sets the `initial_max_stream_data_bidi_local` transport parameter.
951 ///
952 /// When set to a non-zero value quiche will only allow at most `v` bytes
953 /// of incoming stream data to be buffered for each locally-initiated
954 /// bidirectional stream (that is, data that is not yet read by the
955 /// application) and will allow more data to be received as the buffer is
956 /// consumed by the application.
957 ///
958 /// When set to zero, either explicitly or via the default, quiche will not
959 /// give any flow control to the peer, preventing it from sending any stream
960 /// data.
961 ///
962 /// The default value is `0`.
963 pub fn set_initial_max_stream_data_bidi_local(&mut self, v: u64) {
964 self.local_transport_params
965 .initial_max_stream_data_bidi_local =
966 cmp::min(v, octets::MAX_VAR_INT);
967 }
968
969 /// Sets the `initial_max_stream_data_bidi_remote` transport parameter.
970 ///
971 /// When set to a non-zero value quiche will only allow at most `v` bytes
972 /// of incoming stream data to be buffered for each remotely-initiated
973 /// bidirectional stream (that is, data that is not yet read by the
974 /// application) and will allow more data to be received as the buffer is
975 /// consumed by the application.
976 ///
977 /// When set to zero, either explicitly or via the default, quiche will not
978 /// give any flow control to the peer, preventing it from sending any stream
979 /// data.
980 ///
981 /// The default value is `0`.
982 pub fn set_initial_max_stream_data_bidi_remote(&mut self, v: u64) {
983 self.local_transport_params
984 .initial_max_stream_data_bidi_remote =
985 cmp::min(v, octets::MAX_VAR_INT);
986 }
987
988 /// Sets the `initial_max_stream_data_uni` transport parameter.
989 ///
990 /// When set to a non-zero value quiche will only allow at most `v` bytes
991 /// of incoming stream data to be buffered for each unidirectional stream
992 /// (that is, data that is not yet read by the application) and will allow
993 /// more data to be received as the buffer is consumed by the application.
994 ///
995 /// When set to zero, either explicitly or via the default, quiche will not
996 /// give any flow control to the peer, preventing it from sending any stream
997 /// data.
998 ///
999 /// The default value is `0`.
1000 pub fn set_initial_max_stream_data_uni(&mut self, v: u64) {
1001 self.local_transport_params.initial_max_stream_data_uni =
1002 cmp::min(v, octets::MAX_VAR_INT);
1003 }
1004
1005 /// Sets the `initial_max_streams_bidi` transport parameter.
1006 ///
1007 /// When set to a non-zero value quiche will only allow `v` number of
1008 /// concurrent remotely-initiated bidirectional streams to be open at any
1009 /// given time and will increase the limit automatically as streams are
1010 /// completed.
1011 ///
1012 /// When set to zero, either explicitly or via the default, quiche will not
1013 /// not allow the peer to open any bidirectional streams.
1014 ///
1015 /// A bidirectional stream is considered completed when all incoming data
1016 /// has been read by the application (up to the `fin` offset) or the
1017 /// stream's read direction has been shutdown, and all outgoing data has
1018 /// been acked by the peer (up to the `fin` offset) or the stream's write
1019 /// direction has been shutdown.
1020 ///
1021 /// The default value is `0`.
1022 pub fn set_initial_max_streams_bidi(&mut self, v: u64) {
1023 self.local_transport_params.initial_max_streams_bidi =
1024 cmp::min(v, octets::MAX_VAR_INT);
1025 }
1026
1027 /// Sets the `initial_max_streams_uni` transport parameter.
1028 ///
1029 /// When set to a non-zero value quiche will only allow `v` number of
1030 /// concurrent remotely-initiated unidirectional streams to be open at any
1031 /// given time and will increase the limit automatically as streams are
1032 /// completed.
1033 ///
1034 /// When set to zero, either explicitly or via the default, quiche will not
1035 /// not allow the peer to open any unidirectional streams.
1036 ///
1037 /// A unidirectional stream is considered completed when all incoming data
1038 /// has been read by the application (up to the `fin` offset) or the
1039 /// stream's read direction has been shutdown.
1040 ///
1041 /// The default value is `0`.
1042 pub fn set_initial_max_streams_uni(&mut self, v: u64) {
1043 self.local_transport_params.initial_max_streams_uni =
1044 cmp::min(v, octets::MAX_VAR_INT);
1045 }
1046
1047 /// Sets the `ack_delay_exponent` transport parameter.
1048 ///
1049 /// The default value is `3`.
1050 pub fn set_ack_delay_exponent(&mut self, v: u64) {
1051 self.local_transport_params.ack_delay_exponent =
1052 cmp::min(v, octets::MAX_VAR_INT);
1053 }
1054
1055 /// Sets the `max_ack_delay` transport parameter.
1056 ///
1057 /// The default value is `25`.
1058 pub fn set_max_ack_delay(&mut self, v: u64) {
1059 self.local_transport_params.max_ack_delay =
1060 cmp::min(v, octets::MAX_VAR_INT);
1061 }
1062
1063 /// Sets the `active_connection_id_limit` transport parameter.
1064 ///
1065 /// The default value is `2`. Lower values will be ignored.
1066 pub fn set_active_connection_id_limit(&mut self, v: u64) {
1067 if v >= 2 {
1068 self.local_transport_params.active_conn_id_limit =
1069 cmp::min(v, octets::MAX_VAR_INT);
1070 }
1071 }
1072
1073 /// Sets the `disable_active_migration` transport parameter.
1074 ///
1075 /// The default value is `false`.
1076 pub fn set_disable_active_migration(&mut self, v: bool) {
1077 self.local_transport_params.disable_active_migration = v;
1078 }
1079
1080 /// Sets the congestion control algorithm used.
1081 ///
1082 /// The default value is `CongestionControlAlgorithm::CUBIC`.
1083 pub fn set_cc_algorithm(&mut self, algo: CongestionControlAlgorithm) {
1084 self.cc_algorithm = algo;
1085 }
1086
1087 /// Sets custom BBR settings.
1088 ///
1089 /// This API is experimental and will be removed in the future.
1090 ///
1091 /// Currently this only applies if cc_algorithm is
1092 /// `CongestionControlAlgorithm::Bbr2Gcongestion` is set.
1093 ///
1094 /// The default value is `None`.
1095 #[cfg(feature = "internal")]
1096 #[doc(hidden)]
1097 pub fn set_custom_bbr_params(&mut self, custom_bbr_settings: BbrParams) {
1098 self.custom_bbr_params = Some(custom_bbr_settings);
1099 }
1100
1101 /// Sets the congestion control algorithm used by string.
1102 ///
1103 /// The default value is `cubic`. On error `Error::CongestionControl`
1104 /// will be returned.
1105 ///
1106 /// ## Examples:
1107 ///
1108 /// ```
1109 /// # let mut config = quiche::Config::new(0xbabababa)?;
1110 /// config.set_cc_algorithm_name("reno");
1111 /// # Ok::<(), quiche::Error>(())
1112 /// ```
1113 pub fn set_cc_algorithm_name(&mut self, name: &str) -> Result<()> {
1114 self.cc_algorithm = CongestionControlAlgorithm::from_str(name)?;
1115
1116 Ok(())
1117 }
1118
1119 /// Sets initial congestion window size in terms of packet count.
1120 ///
1121 /// The default value is 10.
1122 pub fn set_initial_congestion_window_packets(&mut self, packets: usize) {
1123 self.initial_congestion_window_packets = packets;
1124 }
1125
1126 /// Configure whether to enable relaxed loss detection on spurious loss.
1127 ///
1128 /// The default value is false.
1129 pub fn set_enable_relaxed_loss_threshold(&mut self, enable: bool) {
1130 self.enable_relaxed_loss_threshold = enable;
1131 }
1132
1133 /// Configure whether to enable the CUBIC idle restart fix.
1134 ///
1135 /// When enabled, the epoch shift on idle restart uses the later of
1136 /// the last ACK time and last send time, avoiding an inflated delta
1137 /// when bytes-in-flight transiently hits zero.
1138 ///
1139 /// The default value is `true`.
1140 pub fn set_enable_cubic_idle_restart_fix(&mut self, enable: bool) {
1141 self.enable_cubic_idle_restart_fix = enable;
1142 }
1143
1144 /// Configure whether to enable sending STREAMS_BLOCKED frames.
1145 ///
1146 /// STREAMS_BLOCKED frames are an optional advisory signal in the QUIC
1147 /// protocol which SHOULD be sent when the sender wishes to open a stream
1148 /// but is unable to do so due to the maximum stream limit set by its peer.
1149 ///
1150 /// The default value is false.
1151 pub fn set_enable_send_streams_blocked(&mut self, enable: bool) {
1152 self.enable_send_streams_blocked = enable;
1153 }
1154
1155 /// Configures whether to enable HyStart++.
1156 ///
1157 /// The default value is `true`.
1158 pub fn enable_hystart(&mut self, v: bool) {
1159 self.hystart = v;
1160 }
1161
1162 /// Configures whether to enable pacing.
1163 ///
1164 /// The default value is `true`.
1165 pub fn enable_pacing(&mut self, v: bool) {
1166 self.pacing = v;
1167 }
1168
1169 /// Sets the max value for pacing rate.
1170 ///
1171 /// By default pacing rate is not limited.
1172 pub fn set_max_pacing_rate(&mut self, v: u64) {
1173 self.max_pacing_rate = Some(v);
1174 }
1175
1176 /// Configures whether to enable receiving DATAGRAM frames.
1177 ///
1178 /// When enabled, the `max_datagram_frame_size` transport parameter is set
1179 /// to 65536 as recommended by draft-ietf-quic-datagram-01.
1180 ///
1181 /// The default is `false`.
1182 pub fn enable_dgram(
1183 &mut self, enabled: bool, recv_queue_len: usize, send_queue_len: usize,
1184 ) {
1185 self.local_transport_params.max_datagram_frame_size = if enabled {
1186 Some(MAX_DGRAM_FRAME_SIZE)
1187 } else {
1188 None
1189 };
1190 self.dgram_recv_max_queue_len = recv_queue_len;
1191 self.dgram_send_max_queue_len = send_queue_len;
1192 }
1193
1194 /// Configures the max number of queued received PATH_CHALLENGE frames.
1195 ///
1196 /// When an endpoint receives a PATH_CHALLENGE frame and the queue is full,
1197 /// the frame is discarded.
1198 ///
1199 /// The default is 3.
1200 pub fn set_path_challenge_recv_max_queue_len(&mut self, queue_len: usize) {
1201 self.path_challenge_recv_max_queue_len = queue_len;
1202 }
1203
1204 /// Sets the maximum size of the connection window.
1205 ///
1206 /// The default value is MAX_CONNECTION_WINDOW (24MBytes).
1207 pub fn set_max_connection_window(&mut self, v: u64) {
1208 self.max_connection_window = v;
1209 }
1210
1211 /// Sets the maximum size of the stream window.
1212 ///
1213 /// The default value is MAX_STREAM_WINDOW (16MBytes).
1214 pub fn set_max_stream_window(&mut self, v: u64) {
1215 self.max_stream_window = v;
1216 }
1217
1218 /// Sets the initial stateless reset token.
1219 ///
1220 /// This value is only advertised by servers. Setting a stateless retry
1221 /// token as a client has no effect on the connection.
1222 ///
1223 /// The default value is `None`.
1224 pub fn set_stateless_reset_token(&mut self, v: Option<u128>) {
1225 self.local_transport_params.stateless_reset_token = v;
1226 }
1227
1228 /// Sets whether the QUIC connection should avoid reusing DCIDs over
1229 /// different paths.
1230 ///
1231 /// When set to `true`, it ensures that a destination Connection ID is never
1232 /// reused on different paths. Such behaviour may lead to connection stall
1233 /// if the peer performs a non-voluntary migration (e.g., NAT rebinding) and
1234 /// does not provide additional destination Connection IDs to handle such
1235 /// event.
1236 ///
1237 /// The default value is `false`.
1238 pub fn set_disable_dcid_reuse(&mut self, v: bool) {
1239 self.disable_dcid_reuse = v;
1240 }
1241
1242 /// Enables tracking unknown transport parameters.
1243 ///
1244 /// Specify the maximum number of bytes used to track unknown transport
1245 /// parameters. The size includes the identifier and its value. If storing a
1246 /// transport parameter would cause the limit to be exceeded, it is quietly
1247 /// dropped.
1248 ///
1249 /// The default is that the feature is disabled.
1250 pub fn enable_track_unknown_transport_parameters(&mut self, size: usize) {
1251 self.track_unknown_transport_params = Some(size);
1252 }
1253
1254 /// Sets whether the initial max data value should be used as the initial
1255 /// flow control window.
1256 ///
1257 /// If set to true, the initial flow control window for streams and the
1258 /// connection itself will be set to the initial max data value for streams
1259 /// and the connection respectively. If false, the window is set to the
1260 /// minimum of initial max data and `DEFAULT_STREAM_WINDOW` or
1261 /// `DEFAULT_CONNECTION_WINDOW`
1262 ///
1263 /// The default is false.
1264 pub fn set_use_initial_max_data_as_flow_control_win(&mut self, v: bool) {
1265 self.use_initial_max_data_as_flow_control_win = v;
1266 }
1267}
1268
1269/// Tracks the health of the tx_buffered value.
1270#[derive(Clone, Copy, Debug, Default, PartialEq)]
1271pub enum TxBufferTrackingState {
1272 /// The send buffer is in a good state
1273 #[default]
1274 Ok,
1275 /// The send buffer is in an inconsistent state, which could lead to
1276 /// connection stalls or excess buffering due to bugs we haven't
1277 /// tracked down yet.
1278 Inconsistent,
1279}
1280
1281/// Tracks if the connection hit the peer stream limit and which
1282/// STREAMS_BLOCKED frames have been sent.
1283#[derive(Default)]
1284struct StreamsBlockedState {
1285 /// The peer's max_streams limit at which we last became blocked on
1286 /// opening new local streams, if any.
1287 blocked_at: Option<u64>,
1288
1289 /// The stream limit sent on the most recently sent STREAMS_BLOCKED
1290 /// frame. If != to blocked_at, the connection has pending STREAMS_BLOCKED
1291 /// frames to send.
1292 blocked_sent: Option<u64>,
1293}
1294
1295impl StreamsBlockedState {
1296 /// Returns true if there is a STREAMS_BLOCKED frame that needs sending.
1297 fn has_pending_stream_blocked_frame(&self) -> bool {
1298 self.blocked_sent < self.blocked_at
1299 }
1300
1301 /// Update the stream blocked limit.
1302 fn update_at(&mut self, limit: u64) {
1303 self.blocked_at = self.blocked_at.max(Some(limit));
1304 }
1305
1306 /// Clear blocked_sent to force retransmission of the most recently sent
1307 /// STREAMS_BLOCKED frame.
1308 fn force_retransmit_sent_limit_eq(&mut self, limit: u64) {
1309 // Only clear blocked_sent if the lost frame had the most recently sent
1310 // limit.
1311 if self.blocked_sent == Some(limit) {
1312 self.blocked_sent = None;
1313 }
1314 }
1315}
1316
1317/// A QUIC connection.
1318pub struct Connection<F = DefaultBufFactory>
1319where
1320 F: BufFactory,
1321{
1322 /// QUIC wire version used for the connection.
1323 version: u32,
1324
1325 /// Connection Identifiers.
1326 ids: cid::ConnectionIdentifiers,
1327
1328 /// Unique opaque ID for the connection that can be used for logging.
1329 trace_id: String,
1330
1331 /// Packet number spaces.
1332 pkt_num_spaces: [packet::PktNumSpace; packet::Epoch::count()],
1333
1334 /// The crypto context.
1335 crypto_ctx: [packet::CryptoContext; packet::Epoch::count()],
1336
1337 /// Next packet number.
1338 next_pkt_num: u64,
1339
1340 // TODO
1341 // combine with `next_pkt_num`
1342 /// Track the packet skip context
1343 pkt_num_manager: packet::PktNumManager,
1344
1345 /// Peer's transport parameters.
1346 peer_transport_params: TransportParams,
1347
1348 /// If tracking unknown transport parameters from a peer, how much space to
1349 /// use in bytes.
1350 peer_transport_params_track_unknown: Option<usize>,
1351
1352 /// Local transport parameters.
1353 local_transport_params: TransportParams,
1354
1355 /// TLS handshake state.
1356 handshake: tls::Handshake,
1357
1358 /// Serialized TLS session buffer.
1359 ///
1360 /// This field is populated when a new session ticket is processed on the
1361 /// client. On the server this is empty.
1362 session: Option<Vec<u8>>,
1363
1364 /// The configuration for recovery.
1365 recovery_config: recovery::RecoveryConfig,
1366
1367 /// The path manager.
1368 paths: path::PathMap,
1369
1370 /// PATH_CHALLENGE receive queue max length.
1371 path_challenge_recv_max_queue_len: usize,
1372
1373 /// Total number of received PATH_CHALLENGE frames.
1374 path_challenge_rx_count: u64,
1375
1376 /// List of supported application protocols.
1377 application_protos: Vec<Vec<u8>>,
1378
1379 /// Total number of received packets.
1380 recv_count: usize,
1381
1382 /// Total number of sent packets.
1383 sent_count: usize,
1384
1385 /// Total number of lost packets.
1386 lost_count: usize,
1387
1388 /// Total number of lost packets that were later acked.
1389 spurious_lost_count: usize,
1390
1391 /// Total number of packets sent with data retransmitted.
1392 retrans_count: usize,
1393
1394 /// Total number of sent DATAGRAM frames.
1395 dgram_sent_count: usize,
1396
1397 /// Total number of received DATAGRAM frames.
1398 dgram_recv_count: usize,
1399
1400 /// Total number of bytes received from the peer.
1401 rx_data: u64,
1402
1403 /// Receiver flow controller.
1404 flow_control: flowcontrol::FlowControl,
1405
1406 /// Whether we send MAX_DATA frame.
1407 should_send_max_data: bool,
1408
1409 /// True if there is a pending MAX_STREAMS_BIDI frame to send.
1410 should_send_max_streams_bidi: bool,
1411
1412 /// True if there is a pending MAX_STREAMS_UNI frame to send.
1413 should_send_max_streams_uni: bool,
1414
1415 /// Number of stream data bytes that can be buffered.
1416 tx_cap: usize,
1417
1418 /// The send capacity factor.
1419 tx_cap_factor: f64,
1420
1421 /// Number of bytes buffered in the send buffer.
1422 tx_buffered: usize,
1423
1424 /// Tracks the health of tx_buffered.
1425 tx_buffered_state: TxBufferTrackingState,
1426
1427 /// Total number of bytes sent to the peer.
1428 tx_data: u64,
1429
1430 /// Peer's flow control limit for the connection.
1431 max_tx_data: u64,
1432
1433 /// Last tx_data before running a full send() loop.
1434 last_tx_data: u64,
1435
1436 /// Total number of bytes retransmitted over the connection.
1437 /// This counts only STREAM and CRYPTO data.
1438 stream_retrans_bytes: u64,
1439
1440 /// Total number of bytes sent over the connection.
1441 sent_bytes: u64,
1442
1443 /// Total number of bytes received over the connection.
1444 recv_bytes: u64,
1445
1446 /// Total number of bytes sent acked over the connection.
1447 acked_bytes: u64,
1448
1449 /// Total number of bytes sent lost over the connection.
1450 lost_bytes: u64,
1451
1452 /// Streams map, indexed by stream ID.
1453 streams: stream::StreamMap<F>,
1454
1455 /// Peer's original destination connection ID. Used by the client to
1456 /// validate the server's transport parameter.
1457 odcid: Option<ConnectionId<'static>>,
1458
1459 /// Peer's retry source connection ID. Used by the client during stateless
1460 /// retry to validate the server's transport parameter.
1461 rscid: Option<ConnectionId<'static>>,
1462
1463 /// Received address verification token.
1464 token: Option<Vec<u8>>,
1465
1466 /// Error code and reason to be sent to the peer in a CONNECTION_CLOSE
1467 /// frame.
1468 local_error: Option<ConnectionError>,
1469
1470 /// Error code and reason received from the peer in a CONNECTION_CLOSE
1471 /// frame.
1472 peer_error: Option<ConnectionError>,
1473
1474 /// The connection-level limit at which send blocking occurred.
1475 blocked_limit: Option<u64>,
1476
1477 /// Idle timeout expiration time.
1478 idle_timer: Option<Instant>,
1479
1480 /// Draining timeout expiration time.
1481 draining_timer: Option<Instant>,
1482
1483 /// List of raw packets that were received before they could be decrypted.
1484 undecryptable_pkts: VecDeque<(Vec<u8>, RecvInfo)>,
1485
1486 /// The negotiated ALPN protocol.
1487 alpn: Vec<u8>,
1488
1489 /// Whether this is a server-side connection.
1490 is_server: bool,
1491
1492 /// Whether the initial secrets have been derived.
1493 derived_initial_secrets: bool,
1494
1495 /// Whether a version negotiation packet has already been received. Only
1496 /// relevant for client connections.
1497 did_version_negotiation: bool,
1498
1499 /// Whether stateless retry has been performed.
1500 did_retry: bool,
1501
1502 /// Whether the peer already updated its connection ID.
1503 got_peer_conn_id: bool,
1504
1505 /// Whether the peer verified our initial address.
1506 peer_verified_initial_address: bool,
1507
1508 /// Whether the peer's transport parameters were parsed.
1509 parsed_peer_transport_params: bool,
1510
1511 /// Whether the connection handshake has been completed.
1512 handshake_completed: bool,
1513
1514 /// Whether the HANDSHAKE_DONE frame has been sent.
1515 handshake_done_sent: bool,
1516
1517 /// Whether the HANDSHAKE_DONE frame has been acked.
1518 handshake_done_acked: bool,
1519
1520 /// Whether the connection handshake has been confirmed.
1521 handshake_confirmed: bool,
1522
1523 /// Key phase bit used for outgoing protected packets.
1524 key_phase: bool,
1525
1526 /// Whether an ack-eliciting packet has been sent since last receiving a
1527 /// packet.
1528 ack_eliciting_sent: bool,
1529
1530 /// Whether the connection is closed.
1531 closed: bool,
1532
1533 /// Whether the connection was timed out.
1534 timed_out: bool,
1535
1536 /// Whether to send GREASE.
1537 grease: bool,
1538
1539 /// Whether to send STREAMS_BLOCKED frames when bidi or uni stream quota
1540 /// exhausted.
1541 enable_send_streams_blocked: bool,
1542
1543 /// TLS keylog writer.
1544 keylog: Option<Box<dyn std::io::Write + Send + Sync>>,
1545
1546 #[cfg(feature = "qlog")]
1547 qlog: QlogInfo,
1548
1549 /// DATAGRAM queues.
1550 dgram_recv_queue: dgram::DatagramQueue<F>,
1551 dgram_send_queue: dgram::DatagramQueue<F>,
1552
1553 /// Whether to emit DATAGRAM frames in the next packet.
1554 emit_dgram: bool,
1555
1556 /// Whether the connection should prevent from reusing destination
1557 /// Connection IDs when the peer migrates.
1558 disable_dcid_reuse: bool,
1559
1560 /// The number of streams reset by local.
1561 reset_stream_local_count: u64,
1562
1563 /// The number of streams stopped by local.
1564 stopped_stream_local_count: u64,
1565
1566 /// The number of streams reset by remote.
1567 reset_stream_remote_count: u64,
1568
1569 /// The number of streams stopped by remote.
1570 stopped_stream_remote_count: u64,
1571
1572 /// The number of DATA_BLOCKED frames sent due to hitting the connection
1573 /// flow control limit.
1574 data_blocked_sent_count: u64,
1575
1576 /// The number of STREAM_DATA_BLOCKED frames sent due to a stream hitting
1577 /// the stream flow control limit.
1578 stream_data_blocked_sent_count: u64,
1579
1580 /// The number of DATA_BLOCKED frames received from the remote endpoint.
1581 data_blocked_recv_count: u64,
1582
1583 /// The number of STREAM_DATA_BLOCKED frames received from the remote
1584 /// endpoint.
1585 stream_data_blocked_recv_count: u64,
1586
1587 /// The number of STREAMS_BLOCKED frames received from the remote endpoint
1588 /// indicating the peer is blocked on opening new bidirectional streams.
1589 streams_blocked_bidi_recv_count: u64,
1590
1591 /// The number of STREAMS_BLOCKED frames received from the remote endpoint
1592 /// indicating the peer is blocked on opening new unidirectional streams.
1593 streams_blocked_uni_recv_count: u64,
1594
1595 /// Tracks if the connection hit the peer's bidi or uni stream limit, and if
1596 /// STREAMS_BLOCKED frames are pending transmission.
1597 streams_blocked_bidi_state: StreamsBlockedState,
1598 streams_blocked_uni_state: StreamsBlockedState,
1599
1600 /// The anti-amplification limit factor.
1601 max_amplification_factor: usize,
1602}
1603
1604/// Creates a new server-side connection.
1605///
1606/// The `scid` parameter represents the server's source connection ID, while
1607/// the optional `odcid` parameter represents the original destination ID the
1608/// client sent before a Retry packet (this is only required when using the
1609/// [`retry()`] function). See also the [`accept_with_retry()`] function for
1610/// more advanced retry cases.
1611///
1612/// [`retry()`]: fn.retry.html
1613///
1614/// ## Examples:
1615///
1616/// ```no_run
1617/// # let mut config = quiche::Config::new(0xbabababa)?;
1618/// # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
1619/// # let local = "127.0.0.1:0".parse().unwrap();
1620/// # let peer = "127.0.0.1:1234".parse().unwrap();
1621/// let conn = quiche::accept(&scid, None, local, peer, &mut config)?;
1622/// # Ok::<(), quiche::Error>(())
1623/// ```
1624#[inline(always)]
1625pub fn accept(
1626 scid: &ConnectionId, odcid: Option<&ConnectionId>, local: SocketAddr,
1627 peer: SocketAddr, config: &mut Config,
1628) -> Result<Connection> {
1629 accept_with_buf_factory(scid, odcid, local, peer, config)
1630}
1631
1632/// Creates a new server-side connection, with a custom buffer generation
1633/// method.
1634///
1635/// The buffers generated can be anything that can be drereferenced as a byte
1636/// slice. See [`accept`] and [`BufFactory`] for more info.
1637#[inline]
1638pub fn accept_with_buf_factory<F: BufFactory>(
1639 scid: &ConnectionId, odcid: Option<&ConnectionId>, local: SocketAddr,
1640 peer: SocketAddr, config: &mut Config,
1641) -> Result<Connection<F>> {
1642 // For connections with `odcid` set, we historically used `retry_source_cid =
1643 // scid`. Keep this behavior to preserve backwards compatibility.
1644 // `accept_with_retry` allows the SCIDs to be specified separately.
1645 let retry_cids = odcid.map(|odcid| RetryConnectionIds {
1646 original_destination_cid: odcid,
1647 retry_source_cid: scid,
1648 });
1649 Connection::new(scid, retry_cids, None, local, peer, config, true)
1650}
1651
1652/// A wrapper for connection IDs used in [`accept_with_retry`].
1653pub struct RetryConnectionIds<'a> {
1654 /// The DCID of the first Initial packet received by the server, which
1655 /// triggered the Retry packet.
1656 pub original_destination_cid: &'a ConnectionId<'a>,
1657 /// The SCID of the Retry packet sent by the server. This can be different
1658 /// from the new connection's SCID.
1659 pub retry_source_cid: &'a ConnectionId<'a>,
1660}
1661
1662/// Creates a new server-side connection after the client responded to a Retry
1663/// packet.
1664///
1665/// To generate a Retry packet in the first place, use the [`retry()`] function.
1666///
1667/// The `scid` parameter represents the server's source connection ID, which can
1668/// be freshly generated after the application has successfully verified the
1669/// Retry. `retry_cids` is used to tie the new connection to the Initial + Retry
1670/// exchange that preceded the connection's creation.
1671///
1672/// The DCID of the client's Initial packet is inherently untrusted data. It is
1673/// safe to use the DCID in the `retry_source_cid` field of the
1674/// `RetryConnectionIds` provided to this function. However, using the Initial's
1675/// DCID for the `scid` parameter carries risks. Applications are advised to
1676/// implement their own DCID validation steps before using the DCID in that
1677/// manner.
1678#[inline]
1679pub fn accept_with_retry<F: BufFactory>(
1680 scid: &ConnectionId, retry_cids: RetryConnectionIds, local: SocketAddr,
1681 peer: SocketAddr, config: &mut Config,
1682) -> Result<Connection<F>> {
1683 Connection::new(scid, Some(retry_cids), None, local, peer, config, true)
1684}
1685
1686/// Creates a new client-side connection.
1687///
1688/// The `scid` parameter is used as the connection's source connection ID,
1689/// while the optional `server_name` parameter is used to verify the peer's
1690/// certificate.
1691///
1692/// ## Examples:
1693///
1694/// ```no_run
1695/// # let mut config = quiche::Config::new(0xbabababa)?;
1696/// # let server_name = "quic.tech";
1697/// # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
1698/// # let local = "127.0.0.1:4321".parse().unwrap();
1699/// # let peer = "127.0.0.1:1234".parse().unwrap();
1700/// let conn =
1701/// quiche::connect(Some(&server_name), &scid, local, peer, &mut config)?;
1702/// # Ok::<(), quiche::Error>(())
1703/// ```
1704#[inline]
1705pub fn connect(
1706 server_name: Option<&str>, scid: &ConnectionId, local: SocketAddr,
1707 peer: SocketAddr, config: &mut Config,
1708) -> Result<Connection> {
1709 let mut conn = Connection::new(scid, None, None, local, peer, config, false)?;
1710
1711 if let Some(server_name) = server_name {
1712 conn.handshake.set_host_name(server_name)?;
1713 }
1714
1715 Ok(conn)
1716}
1717
1718/// Creates a new client-side connection using the given DCID initially.
1719///
1720/// Be aware that [RFC 9000] places requirements for unpredictability and length
1721/// on the client DCID field. This function is dangerous if these requirements
1722/// are not satisfied.
1723///
1724/// The `scid` parameter is used as the connection's source connection ID, while
1725/// the optional `server_name` parameter is used to verify the peer's
1726/// certificate.
1727///
1728/// [RFC 9000]: <https://datatracker.ietf.org/doc/html/rfc9000#section-7.2-3>
1729#[cfg(feature = "custom-client-dcid")]
1730#[cfg_attr(docsrs, doc(cfg(feature = "custom-client-dcid")))]
1731pub fn connect_with_dcid(
1732 server_name: Option<&str>, scid: &ConnectionId, dcid: &ConnectionId,
1733 local: SocketAddr, peer: SocketAddr, config: &mut Config,
1734) -> Result<Connection> {
1735 let mut conn =
1736 Connection::new(scid, None, Some(dcid), local, peer, config, false)?;
1737
1738 if let Some(server_name) = server_name {
1739 conn.handshake.set_host_name(server_name)?;
1740 }
1741
1742 Ok(conn)
1743}
1744
1745/// Creates a new client-side connection, with a custom buffer generation
1746/// method.
1747///
1748/// The buffers generated can be anything that can be drereferenced as a byte
1749/// slice. See [`connect`] and [`BufFactory`] for more info.
1750#[inline]
1751pub fn connect_with_buffer_factory<F: BufFactory>(
1752 server_name: Option<&str>, scid: &ConnectionId, local: SocketAddr,
1753 peer: SocketAddr, config: &mut Config,
1754) -> Result<Connection<F>> {
1755 let mut conn = Connection::new(scid, None, None, local, peer, config, false)?;
1756
1757 if let Some(server_name) = server_name {
1758 conn.handshake.set_host_name(server_name)?;
1759 }
1760
1761 Ok(conn)
1762}
1763
1764/// Creates a new client-side connection, with a custom buffer generation
1765/// method using the given dcid initially.
1766/// Be aware the RFC places requirements for unpredictability and length
1767/// on the client DCID field.
1768/// [`RFC9000`]: https://datatracker.ietf.org/doc/html/rfc9000#section-7.2-3
1769///
1770/// The buffers generated can be anything that can be drereferenced as a byte
1771/// slice. See [`connect`] and [`BufFactory`] for more info.
1772#[cfg(feature = "custom-client-dcid")]
1773#[cfg_attr(docsrs, doc(cfg(feature = "custom-client-dcid")))]
1774pub fn connect_with_dcid_and_buffer_factory<F: BufFactory>(
1775 server_name: Option<&str>, scid: &ConnectionId, dcid: &ConnectionId,
1776 local: SocketAddr, peer: SocketAddr, config: &mut Config,
1777) -> Result<Connection<F>> {
1778 let mut conn =
1779 Connection::new(scid, None, Some(dcid), local, peer, config, false)?;
1780
1781 if let Some(server_name) = server_name {
1782 conn.handshake.set_host_name(server_name)?;
1783 }
1784
1785 Ok(conn)
1786}
1787
1788/// Writes a version negotiation packet.
1789///
1790/// The `scid` and `dcid` parameters are the source connection ID and the
1791/// destination connection ID extracted from the received client's Initial
1792/// packet that advertises an unsupported version.
1793///
1794/// ## Examples:
1795///
1796/// ```no_run
1797/// # let mut buf = [0; 512];
1798/// # let mut out = [0; 512];
1799/// # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
1800/// let (len, src) = socket.recv_from(&mut buf).unwrap();
1801///
1802/// let hdr =
1803/// quiche::Header::from_slice(&mut buf[..len], quiche::MAX_CONN_ID_LEN)?;
1804///
1805/// if hdr.version != quiche::PROTOCOL_VERSION {
1806/// let len = quiche::negotiate_version(&hdr.scid, &hdr.dcid, &mut out)?;
1807/// socket.send_to(&out[..len], &src).unwrap();
1808/// }
1809/// # Ok::<(), quiche::Error>(())
1810/// ```
1811#[inline]
1812pub fn negotiate_version(
1813 scid: &ConnectionId, dcid: &ConnectionId, out: &mut [u8],
1814) -> Result<usize> {
1815 packet::negotiate_version(scid, dcid, out)
1816}
1817
1818/// Writes a stateless retry packet.
1819///
1820/// The `scid` and `dcid` parameters are the source connection ID and the
1821/// destination connection ID extracted from the received client's Initial
1822/// packet, while `new_scid` is the server's new source connection ID and
1823/// `token` is the address validation token the client needs to echo back.
1824///
1825/// The application is responsible for generating the address validation
1826/// token to be sent to the client, and verifying tokens sent back by the
1827/// client. The generated token should include the `dcid` parameter, such
1828/// that it can be later extracted from the token and passed to the
1829/// [`accept()`] function as its `odcid` parameter.
1830///
1831/// [`accept()`]: fn.accept.html
1832///
1833/// ## Examples:
1834///
1835/// ```no_run
1836/// # let mut config = quiche::Config::new(0xbabababa)?;
1837/// # let mut buf = [0; 512];
1838/// # let mut out = [0; 512];
1839/// # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
1840/// # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
1841/// # let local = socket.local_addr().unwrap();
1842/// # fn mint_token(hdr: &quiche::Header, src: &std::net::SocketAddr) -> Vec<u8> {
1843/// # vec![]
1844/// # }
1845/// # fn validate_token<'a>(src: &std::net::SocketAddr, token: &'a [u8]) -> Option<quiche::ConnectionId<'a>> {
1846/// # None
1847/// # }
1848/// let (len, peer) = socket.recv_from(&mut buf).unwrap();
1849///
1850/// let hdr = quiche::Header::from_slice(&mut buf[..len], quiche::MAX_CONN_ID_LEN)?;
1851///
1852/// let token = hdr.token.as_ref().unwrap();
1853///
1854/// // No token sent by client, create a new one.
1855/// if token.is_empty() {
1856/// let new_token = mint_token(&hdr, &peer);
1857///
1858/// let len = quiche::retry(
1859/// &hdr.scid, &hdr.dcid, &scid, &new_token, hdr.version, &mut out,
1860/// )?;
1861///
1862/// socket.send_to(&out[..len], &peer).unwrap();
1863/// return Ok(());
1864/// }
1865///
1866/// // Client sent token, validate it.
1867/// let odcid = validate_token(&peer, token);
1868///
1869/// if odcid.is_none() {
1870/// // Invalid address validation token.
1871/// return Ok(());
1872/// }
1873///
1874/// let conn = quiche::accept(&scid, odcid.as_ref(), local, peer, &mut config)?;
1875/// # Ok::<(), quiche::Error>(())
1876/// ```
1877#[inline]
1878pub fn retry(
1879 scid: &ConnectionId, dcid: &ConnectionId, new_scid: &ConnectionId,
1880 token: &[u8], version: u32, out: &mut [u8],
1881) -> Result<usize> {
1882 packet::retry(scid, dcid, new_scid, token, version, out)
1883}
1884
1885/// Returns true if the given protocol version is supported.
1886#[inline]
1887pub fn version_is_supported(version: u32) -> bool {
1888 matches!(version, PROTOCOL_VERSION_V1)
1889}
1890
1891/// Pushes a frame to the output packet if there is enough space.
1892///
1893/// Returns `true` on success, `false` otherwise. In case of failure it means
1894/// there is no room to add the frame in the packet. You may retry to add the
1895/// frame later.
1896macro_rules! push_frame_to_pkt {
1897 ($out:expr, $frames:expr, $frame:expr, $left:expr) => {{
1898 if $frame.wire_len() <= $left {
1899 $left -= $frame.wire_len();
1900
1901 $frame.to_bytes(&mut $out)?;
1902
1903 $frames.push($frame);
1904
1905 true
1906 } else {
1907 false
1908 }
1909 }};
1910}
1911
1912/// Executes the provided body if the qlog feature is enabled, quiche has been
1913/// configured with a log writer, the event's importance is within the
1914/// configured level.
1915macro_rules! qlog_with_type {
1916 ($ty:expr, $qlog:expr, $qlog_streamer_ref:ident, $body:block) => {{
1917 #[cfg(feature = "qlog")]
1918 {
1919 if EventImportance::from($ty).is_contained_in(&$qlog.level) {
1920 if let Some($qlog_streamer_ref) = &mut $qlog.streamer {
1921 $body
1922 }
1923 }
1924 }
1925 }};
1926}
1927
1928#[cfg(feature = "qlog")]
1929const QLOG_PARAMS_SET: EventType =
1930 EventType::QuicEventType(QuicEventType::ParametersSet);
1931
1932#[cfg(feature = "qlog")]
1933const QLOG_PACKET_RX: EventType =
1934 EventType::QuicEventType(QuicEventType::PacketReceived);
1935
1936#[cfg(feature = "qlog")]
1937const QLOG_PACKET_TX: EventType =
1938 EventType::QuicEventType(QuicEventType::PacketSent);
1939
1940#[cfg(feature = "qlog")]
1941const QLOG_DATA_MV: EventType =
1942 EventType::QuicEventType(QuicEventType::StreamDataMoved);
1943
1944#[cfg(feature = "qlog")]
1945const QLOG_METRICS: EventType =
1946 EventType::QuicEventType(QuicEventType::RecoveryMetricsUpdated);
1947
1948#[cfg(feature = "qlog")]
1949const QLOG_CONNECTION_CLOSED: EventType =
1950 EventType::QuicEventType(QuicEventType::ConnectionClosed);
1951
1952#[cfg(feature = "qlog")]
1953struct QlogInfo {
1954 streamer: Option<qlog::streamer::QlogStreamer>,
1955 logged_peer_params: bool,
1956 level: EventImportance,
1957}
1958
1959#[cfg(feature = "qlog")]
1960impl Default for QlogInfo {
1961 fn default() -> Self {
1962 QlogInfo {
1963 streamer: None,
1964 logged_peer_params: false,
1965 level: EventImportance::Base,
1966 }
1967 }
1968}
1969
1970impl<F: BufFactory> Connection<F> {
1971 fn new(
1972 scid: &ConnectionId, retry_cids: Option<RetryConnectionIds>,
1973 client_dcid: Option<&ConnectionId>, local: SocketAddr, peer: SocketAddr,
1974 config: &mut Config, is_server: bool,
1975 ) -> Result<Connection<F>> {
1976 let tls = config.tls_ctx.new_handshake()?;
1977 Connection::with_tls(
1978 scid,
1979 retry_cids,
1980 client_dcid,
1981 local,
1982 peer,
1983 config,
1984 tls,
1985 is_server,
1986 )
1987 }
1988
1989 #[allow(clippy::too_many_arguments)]
1990 fn with_tls(
1991 scid: &ConnectionId, retry_cids: Option<RetryConnectionIds>,
1992 client_dcid: Option<&ConnectionId>, local: SocketAddr, peer: SocketAddr,
1993 config: &Config, tls: tls::Handshake, is_server: bool,
1994 ) -> Result<Connection<F>> {
1995 if retry_cids.is_some() && client_dcid.is_some() {
1996 // These are exclusive, the caller should only specify one or the
1997 // other.
1998 return Err(Error::InvalidDcidInitialization);
1999 }
2000 #[cfg(feature = "custom-client-dcid")]
2001 if let Some(client_dcid) = client_dcid {
2002 // The Minimum length is 8.
2003 // See https://datatracker.ietf.org/doc/html/rfc9000#section-7.2-3
2004 if client_dcid.to_vec().len() < 8 {
2005 return Err(Error::InvalidDcidInitialization);
2006 }
2007 }
2008 #[cfg(not(feature = "custom-client-dcid"))]
2009 if client_dcid.is_some() {
2010 return Err(Error::InvalidDcidInitialization);
2011 }
2012
2013 let max_rx_data = config.local_transport_params.initial_max_data;
2014
2015 let scid_as_hex: Vec<String> =
2016 scid.iter().map(|b| format!("{b:02x}")).collect();
2017
2018 let reset_token = if is_server {
2019 config.local_transport_params.stateless_reset_token
2020 } else {
2021 None
2022 };
2023
2024 let recovery_config = recovery::RecoveryConfig::from_config(config);
2025
2026 let mut path = path::Path::new(
2027 local,
2028 peer,
2029 &recovery_config,
2030 config.path_challenge_recv_max_queue_len,
2031 true,
2032 Some(config),
2033 );
2034
2035 // If we sent a Retry assume the peer's address is verified.
2036 path.verified_peer_address = retry_cids.is_some();
2037 // Assume clients validate the server's address implicitly.
2038 path.peer_verified_local_address = is_server;
2039
2040 // Do not allocate more than the number of active CIDs.
2041 let paths = path::PathMap::new(
2042 path,
2043 config.local_transport_params.active_conn_id_limit as usize,
2044 is_server,
2045 );
2046
2047 let active_path_id = paths.get_active_path_id()?;
2048
2049 let ids = cid::ConnectionIdentifiers::new(
2050 config.local_transport_params.active_conn_id_limit as usize,
2051 scid,
2052 active_path_id,
2053 reset_token,
2054 );
2055
2056 let initial_flow_control_window =
2057 if config.use_initial_max_data_as_flow_control_win {
2058 max_rx_data
2059 } else {
2060 cmp::min(max_rx_data / 2 * 3, DEFAULT_CONNECTION_WINDOW)
2061 };
2062 let mut conn = Connection {
2063 version: config.version,
2064
2065 ids,
2066
2067 trace_id: scid_as_hex.join(""),
2068
2069 pkt_num_spaces: [
2070 packet::PktNumSpace::new(),
2071 packet::PktNumSpace::new(),
2072 packet::PktNumSpace::new(),
2073 ],
2074
2075 crypto_ctx: [
2076 packet::CryptoContext::new(),
2077 packet::CryptoContext::new(),
2078 packet::CryptoContext::new(),
2079 ],
2080
2081 next_pkt_num: 0,
2082
2083 pkt_num_manager: packet::PktNumManager::new(),
2084
2085 peer_transport_params: TransportParams::default(),
2086
2087 peer_transport_params_track_unknown: config
2088 .track_unknown_transport_params,
2089
2090 local_transport_params: config.local_transport_params.clone(),
2091
2092 handshake: tls,
2093
2094 session: None,
2095
2096 recovery_config,
2097
2098 paths,
2099 path_challenge_recv_max_queue_len: config
2100 .path_challenge_recv_max_queue_len,
2101 path_challenge_rx_count: 0,
2102
2103 application_protos: config.application_protos.clone(),
2104
2105 recv_count: 0,
2106 sent_count: 0,
2107 lost_count: 0,
2108 spurious_lost_count: 0,
2109 retrans_count: 0,
2110 dgram_sent_count: 0,
2111 dgram_recv_count: 0,
2112 sent_bytes: 0,
2113 recv_bytes: 0,
2114 acked_bytes: 0,
2115 lost_bytes: 0,
2116
2117 rx_data: 0,
2118 flow_control: flowcontrol::FlowControl::new(
2119 max_rx_data,
2120 initial_flow_control_window,
2121 config.max_connection_window,
2122 ),
2123 should_send_max_data: false,
2124 should_send_max_streams_bidi: false,
2125 should_send_max_streams_uni: false,
2126
2127 tx_cap: 0,
2128 tx_cap_factor: config.tx_cap_factor,
2129
2130 tx_buffered: 0,
2131 tx_buffered_state: TxBufferTrackingState::Ok,
2132
2133 tx_data: 0,
2134 max_tx_data: 0,
2135 last_tx_data: 0,
2136
2137 stream_retrans_bytes: 0,
2138
2139 streams: stream::StreamMap::new(
2140 config.local_transport_params.initial_max_streams_bidi,
2141 config.local_transport_params.initial_max_streams_uni,
2142 config.max_stream_window,
2143 ),
2144
2145 odcid: None,
2146
2147 rscid: None,
2148
2149 token: None,
2150
2151 local_error: None,
2152
2153 peer_error: None,
2154
2155 blocked_limit: None,
2156
2157 idle_timer: None,
2158
2159 draining_timer: None,
2160
2161 undecryptable_pkts: VecDeque::new(),
2162
2163 alpn: Vec::new(),
2164
2165 is_server,
2166
2167 derived_initial_secrets: false,
2168
2169 did_version_negotiation: false,
2170
2171 did_retry: false,
2172
2173 got_peer_conn_id: false,
2174
2175 // Assume clients validate the server's address implicitly.
2176 peer_verified_initial_address: is_server,
2177
2178 parsed_peer_transport_params: false,
2179
2180 handshake_completed: false,
2181
2182 handshake_done_sent: false,
2183 handshake_done_acked: false,
2184
2185 handshake_confirmed: false,
2186
2187 key_phase: false,
2188
2189 ack_eliciting_sent: false,
2190
2191 closed: false,
2192
2193 timed_out: false,
2194
2195 grease: config.grease,
2196
2197 enable_send_streams_blocked: config.enable_send_streams_blocked,
2198
2199 keylog: None,
2200
2201 #[cfg(feature = "qlog")]
2202 qlog: Default::default(),
2203
2204 dgram_recv_queue: dgram::DatagramQueue::new(
2205 config.dgram_recv_max_queue_len,
2206 ),
2207
2208 dgram_send_queue: dgram::DatagramQueue::new(
2209 config.dgram_send_max_queue_len,
2210 ),
2211
2212 emit_dgram: true,
2213
2214 disable_dcid_reuse: config.disable_dcid_reuse,
2215
2216 reset_stream_local_count: 0,
2217 stopped_stream_local_count: 0,
2218 reset_stream_remote_count: 0,
2219 stopped_stream_remote_count: 0,
2220
2221 data_blocked_sent_count: 0,
2222 stream_data_blocked_sent_count: 0,
2223 data_blocked_recv_count: 0,
2224 stream_data_blocked_recv_count: 0,
2225
2226 streams_blocked_bidi_recv_count: 0,
2227 streams_blocked_uni_recv_count: 0,
2228
2229 streams_blocked_bidi_state: Default::default(),
2230 streams_blocked_uni_state: Default::default(),
2231
2232 max_amplification_factor: config.max_amplification_factor,
2233 };
2234 conn.streams.set_use_initial_max_data_as_flow_control_win(
2235 config.use_initial_max_data_as_flow_control_win,
2236 );
2237
2238 if let Some(retry_cids) = retry_cids {
2239 conn.local_transport_params
2240 .original_destination_connection_id =
2241 Some(retry_cids.original_destination_cid.to_vec().into());
2242
2243 conn.local_transport_params.retry_source_connection_id =
2244 Some(retry_cids.retry_source_cid.to_vec().into());
2245
2246 conn.did_retry = true;
2247 }
2248
2249 conn.local_transport_params.initial_source_connection_id =
2250 Some(conn.ids.get_scid(0)?.cid.to_vec().into());
2251
2252 conn.handshake.init(is_server)?;
2253
2254 conn.handshake
2255 .use_legacy_codepoint(config.version != PROTOCOL_VERSION_V1);
2256
2257 conn.encode_transport_params()?;
2258
2259 if !is_server {
2260 let dcid = if let Some(client_dcid) = client_dcid {
2261 // We already had an dcid generated for us, use it.
2262 client_dcid.to_vec()
2263 } else {
2264 // Derive initial secrets for the client. We can do this here
2265 // because we already generated the random
2266 // destination connection ID.
2267 let mut dcid = [0; 16];
2268 rand::rand_bytes(&mut dcid[..]);
2269 dcid.to_vec()
2270 };
2271
2272 let (aead_open, aead_seal) = crypto::derive_initial_key_material(
2273 &dcid,
2274 conn.version,
2275 conn.is_server,
2276 false,
2277 )?;
2278
2279 let reset_token = conn.peer_transport_params.stateless_reset_token;
2280 conn.set_initial_dcid(
2281 dcid.to_vec().into(),
2282 reset_token,
2283 active_path_id,
2284 )?;
2285
2286 conn.crypto_ctx[packet::Epoch::Initial].crypto_open = Some(aead_open);
2287 conn.crypto_ctx[packet::Epoch::Initial].crypto_seal = Some(aead_seal);
2288
2289 conn.derived_initial_secrets = true;
2290 }
2291
2292 Ok(conn)
2293 }
2294
2295 /// Sets keylog output to the designated [`Writer`].
2296 ///
2297 /// This needs to be called as soon as the connection is created, to avoid
2298 /// missing some early logs.
2299 ///
2300 /// [`Writer`]: https://doc.rust-lang.org/std/io/trait.Write.html
2301 #[inline]
2302 pub fn set_keylog(&mut self, writer: Box<dyn std::io::Write + Send + Sync>) {
2303 self.keylog = Some(writer);
2304 }
2305
2306 /// Sets qlog output to the designated [`Writer`].
2307 ///
2308 /// Only events included in `QlogLevel::Base` are written. The serialization
2309 /// format is JSON-SEQ.
2310 ///
2311 /// This needs to be called as soon as the connection is created, to avoid
2312 /// missing some early logs.
2313 ///
2314 /// [`Writer`]: https://doc.rust-lang.org/std/io/trait.Write.html
2315 #[cfg(feature = "qlog")]
2316 #[cfg_attr(docsrs, doc(cfg(feature = "qlog")))]
2317 pub fn set_qlog(
2318 &mut self, writer: Box<dyn std::io::Write + Send + Sync>, title: String,
2319 description: String,
2320 ) {
2321 self.set_qlog_with_level(writer, title, description, QlogLevel::Base)
2322 }
2323
2324 /// Sets qlog output to the designated [`Writer`].
2325 ///
2326 /// Only qlog events included in the specified `QlogLevel` are written. The
2327 /// serialization format is JSON-SEQ.
2328 ///
2329 /// This needs to be called as soon as the connection is created, to avoid
2330 /// missing some early logs.
2331 ///
2332 /// [`Writer`]: https://doc.rust-lang.org/std/io/trait.Write.html
2333 #[cfg(feature = "qlog")]
2334 #[cfg_attr(docsrs, doc(cfg(feature = "qlog")))]
2335 pub fn set_qlog_with_level(
2336 &mut self, writer: Box<dyn std::io::Write + Send + Sync>, title: String,
2337 description: String, qlog_level: QlogLevel,
2338 ) {
2339 use qlog::events::quic::TransportInitiator;
2340 use qlog::events::HTTP3_URI;
2341 use qlog::events::QUIC_URI;
2342 use qlog::CommonFields;
2343 use qlog::ReferenceTime;
2344
2345 let vp = if self.is_server {
2346 qlog::VantagePointType::Server
2347 } else {
2348 qlog::VantagePointType::Client
2349 };
2350
2351 let level = match qlog_level {
2352 QlogLevel::Core => EventImportance::Core,
2353
2354 QlogLevel::Base => EventImportance::Base,
2355
2356 QlogLevel::Extra => EventImportance::Extra,
2357 };
2358
2359 self.qlog.level = level;
2360
2361 // Best effort to get Instant::now() and SystemTime::now() as closely
2362 // together as possible.
2363 let now = Instant::now();
2364 let now_wall_clock = std::time::SystemTime::now();
2365 let common_fields = CommonFields {
2366 reference_time: ReferenceTime::new_monotonic(Some(now_wall_clock)),
2367 ..Default::default()
2368 };
2369 let trace = qlog::TraceSeq::new(
2370 Some(title.to_string()),
2371 Some(description.to_string()),
2372 Some(common_fields),
2373 Some(qlog::VantagePoint {
2374 name: None,
2375 ty: vp,
2376 flow: None,
2377 }),
2378 vec![QUIC_URI.to_string(), HTTP3_URI.to_string()],
2379 );
2380
2381 let mut streamer = qlog::streamer::QlogStreamer::new(
2382 Some(title),
2383 Some(description),
2384 now,
2385 trace,
2386 self.qlog.level,
2387 writer,
2388 );
2389
2390 streamer.start_log().ok();
2391
2392 let ev_data = self
2393 .local_transport_params
2394 .to_qlog(TransportInitiator::Local, self.handshake.cipher());
2395
2396 // This event occurs very early, so just mark the relative time as 0.0.
2397 streamer.add_event(Event::with_time(0.0, ev_data)).ok();
2398
2399 self.qlog.streamer = Some(streamer);
2400 }
2401
2402 /// Returns a mutable reference to the QlogStreamer, if it exists.
2403 #[cfg(feature = "qlog")]
2404 #[cfg_attr(docsrs, doc(cfg(feature = "qlog")))]
2405 pub fn qlog_streamer(&mut self) -> Option<&mut qlog::streamer::QlogStreamer> {
2406 self.qlog.streamer.as_mut()
2407 }
2408
2409 /// Configures the given session for resumption.
2410 ///
2411 /// On the client, this can be used to offer the given serialized session,
2412 /// as returned by [`session()`], for resumption.
2413 ///
2414 /// This must only be called immediately after creating a connection, that
2415 /// is, before any packet is sent or received.
2416 ///
2417 /// [`session()`]: struct.Connection.html#method.session
2418 #[inline]
2419 pub fn set_session(&mut self, session: &[u8]) -> Result<()> {
2420 let mut b = octets::Octets::with_slice(session);
2421
2422 let session_len = b.get_u64()? as usize;
2423 let session_bytes = b.get_bytes(session_len)?;
2424
2425 self.handshake.set_session(session_bytes.as_ref())?;
2426
2427 let raw_params_len = b.get_u64()? as usize;
2428 let raw_params_bytes = b.get_bytes(raw_params_len)?;
2429
2430 let peer_params = TransportParams::decode(
2431 raw_params_bytes.as_ref(),
2432 self.is_server,
2433 self.peer_transport_params_track_unknown,
2434 )?;
2435
2436 self.process_peer_transport_params(peer_params)?;
2437
2438 Ok(())
2439 }
2440
2441 /// Sets the `max_idle_timeout` transport parameter, in milliseconds.
2442 ///
2443 /// This must only be called immediately after creating a connection, that
2444 /// is, before any packet is sent or received.
2445 ///
2446 /// The default value is infinite, that is, no timeout is used unless
2447 /// already configured when creating the connection.
2448 pub fn set_max_idle_timeout(&mut self, v: u64) -> Result<()> {
2449 self.local_transport_params.max_idle_timeout =
2450 cmp::min(v, octets::MAX_VAR_INT);
2451
2452 self.encode_transport_params()
2453 }
2454
2455 /// Sets the congestion control algorithm used.
2456 ///
2457 /// This function can only be called inside one of BoringSSL's handshake
2458 /// callbacks, before any packet has been sent. Calling this function any
2459 /// other time will have no effect.
2460 ///
2461 /// See [`Config::set_cc_algorithm()`].
2462 ///
2463 /// [`Config::set_cc_algorithm()`]: struct.Config.html#method.set_cc_algorithm
2464 #[cfg(feature = "boringssl-boring-crate")]
2465 #[cfg_attr(docsrs, doc(cfg(feature = "boringssl-boring-crate")))]
2466 pub fn set_cc_algorithm_in_handshake(
2467 ssl: &mut boring::ssl::SslRef, algo: CongestionControlAlgorithm,
2468 ) -> Result<()> {
2469 let ex_data = tls::ExData::from_ssl_ref(ssl).ok_or(Error::TlsFail)?;
2470
2471 ex_data.recovery_config.cc_algorithm = algo;
2472
2473 Ok(())
2474 }
2475
2476 /// Sets custom BBR settings.
2477 ///
2478 /// This API is experimental and will be removed in the future.
2479 ///
2480 /// Currently this only applies if cc_algorithm is
2481 /// `CongestionControlAlgorithm::Bbr2Gcongestion` is set.
2482 ///
2483 /// This function can only be called inside one of BoringSSL's handshake
2484 /// callbacks, before any packet has been sent. Calling this function any
2485 /// other time will have no effect.
2486 ///
2487 /// See [`Config::set_custom_bbr_settings()`].
2488 ///
2489 /// [`Config::set_custom_bbr_settings()`]: struct.Config.html#method.set_custom_bbr_settings
2490 #[cfg(all(feature = "boringssl-boring-crate", feature = "internal"))]
2491 #[cfg_attr(docsrs, doc(cfg(feature = "boringssl-boring-crate")))]
2492 #[doc(hidden)]
2493 pub fn set_custom_bbr_settings_in_handshake(
2494 ssl: &mut boring::ssl::SslRef, custom_bbr_params: BbrParams,
2495 ) -> Result<()> {
2496 let ex_data = tls::ExData::from_ssl_ref(ssl).ok_or(Error::TlsFail)?;
2497
2498 ex_data.recovery_config.custom_bbr_params = Some(custom_bbr_params);
2499
2500 Ok(())
2501 }
2502
2503 /// Sets the congestion control algorithm used by string.
2504 ///
2505 /// This function can only be called inside one of BoringSSL's handshake
2506 /// callbacks, before any packet has been sent. Calling this function any
2507 /// other time will have no effect.
2508 ///
2509 /// See [`Config::set_cc_algorithm_name()`].
2510 ///
2511 /// [`Config::set_cc_algorithm_name()`]: struct.Config.html#method.set_cc_algorithm_name
2512 #[cfg(feature = "boringssl-boring-crate")]
2513 #[cfg_attr(docsrs, doc(cfg(feature = "boringssl-boring-crate")))]
2514 pub fn set_cc_algorithm_name_in_handshake(
2515 ssl: &mut boring::ssl::SslRef, name: &str,
2516 ) -> Result<()> {
2517 let cc_algo = CongestionControlAlgorithm::from_str(name)?;
2518 Self::set_cc_algorithm_in_handshake(ssl, cc_algo)
2519 }
2520
2521 /// Sets initial congestion window size in terms of packet count.
2522 ///
2523 /// This function can only be called inside one of BoringSSL's handshake
2524 /// callbacks, before any packet has been sent. Calling this function any
2525 /// other time will have no effect.
2526 ///
2527 /// See [`Config::set_initial_congestion_window_packets()`].
2528 ///
2529 /// [`Config::set_initial_congestion_window_packets()`]: struct.Config.html#method.set_initial_congestion_window_packets
2530 #[cfg(feature = "boringssl-boring-crate")]
2531 #[cfg_attr(docsrs, doc(cfg(feature = "boringssl-boring-crate")))]
2532 pub fn set_initial_congestion_window_packets_in_handshake(
2533 ssl: &mut boring::ssl::SslRef, packets: usize,
2534 ) -> Result<()> {
2535 let ex_data = tls::ExData::from_ssl_ref(ssl).ok_or(Error::TlsFail)?;
2536
2537 ex_data.recovery_config.initial_congestion_window_packets = packets;
2538
2539 Ok(())
2540 }
2541
2542 /// Configure whether to enable relaxed loss detection on spurious loss.
2543 ///
2544 /// This function can only be called inside one of BoringSSL's handshake
2545 /// callbacks, before any packet has been sent. Calling this function any
2546 /// other time will have no effect.
2547 ///
2548 /// See [`Config::set_enable_relaxed_loss_threshold()`].
2549 ///
2550 /// [`Config::set_enable_relaxed_loss_threshold()`]: struct.Config.html#method.set_enable_relaxed_loss_threshold
2551 #[cfg(feature = "boringssl-boring-crate")]
2552 #[cfg_attr(docsrs, doc(cfg(feature = "boringssl-boring-crate")))]
2553 pub fn set_enable_relaxed_loss_threshold_in_handshake(
2554 ssl: &mut boring::ssl::SslRef, enable: bool,
2555 ) -> Result<()> {
2556 let ex_data = tls::ExData::from_ssl_ref(ssl).ok_or(Error::TlsFail)?;
2557
2558 ex_data.recovery_config.enable_relaxed_loss_threshold = enable;
2559
2560 Ok(())
2561 }
2562
2563 /// Configure whether to enable the CUBIC idle restart fix.
2564 ///
2565 /// This function can only be called inside one of BoringSSL's handshake
2566 /// callbacks, before any packet has been sent. Calling this function any
2567 /// other time will have no effect.
2568 ///
2569 /// See [`Config::set_enable_cubic_idle_restart_fix()`].
2570 ///
2571 /// [`Config::set_enable_cubic_idle_restart_fix()`]: struct.Config.html#method.set_enable_cubic_idle_restart_fix
2572 #[cfg(feature = "boringssl-boring-crate")]
2573 #[cfg_attr(docsrs, doc(cfg(feature = "boringssl-boring-crate")))]
2574 pub fn set_enable_cubic_idle_restart_fix_in_handshake(
2575 ssl: &mut boring::ssl::SslRef, enable: bool,
2576 ) -> Result<()> {
2577 let ex_data = tls::ExData::from_ssl_ref(ssl).ok_or(Error::TlsFail)?;
2578
2579 ex_data.recovery_config.enable_cubic_idle_restart_fix = enable;
2580
2581 Ok(())
2582 }
2583
2584 /// Configures whether to enable HyStart++.
2585 ///
2586 /// This function can only be called inside one of BoringSSL's handshake
2587 /// callbacks, before any packet has been sent. Calling this function any
2588 /// other time will have no effect.
2589 ///
2590 /// See [`Config::enable_hystart()`].
2591 ///
2592 /// [`Config::enable_hystart()`]: struct.Config.html#method.enable_hystart
2593 #[cfg(feature = "boringssl-boring-crate")]
2594 #[cfg_attr(docsrs, doc(cfg(feature = "boringssl-boring-crate")))]
2595 pub fn set_hystart_in_handshake(
2596 ssl: &mut boring::ssl::SslRef, v: bool,
2597 ) -> Result<()> {
2598 let ex_data = tls::ExData::from_ssl_ref(ssl).ok_or(Error::TlsFail)?;
2599
2600 ex_data.recovery_config.hystart = v;
2601
2602 Ok(())
2603 }
2604
2605 /// Configures whether to enable pacing.
2606 ///
2607 /// This function can only be called inside one of BoringSSL's handshake
2608 /// callbacks, before any packet has been sent. Calling this function any
2609 /// other time will have no effect.
2610 ///
2611 /// See [`Config::enable_pacing()`].
2612 ///
2613 /// [`Config::enable_pacing()`]: struct.Config.html#method.enable_pacing
2614 #[cfg(feature = "boringssl-boring-crate")]
2615 #[cfg_attr(docsrs, doc(cfg(feature = "boringssl-boring-crate")))]
2616 pub fn set_pacing_in_handshake(
2617 ssl: &mut boring::ssl::SslRef, v: bool,
2618 ) -> Result<()> {
2619 let ex_data = tls::ExData::from_ssl_ref(ssl).ok_or(Error::TlsFail)?;
2620
2621 ex_data.recovery_config.pacing = v;
2622
2623 Ok(())
2624 }
2625
2626 /// Sets the max value for pacing rate.
2627 ///
2628 /// This function can only be called inside one of BoringSSL's handshake
2629 /// callbacks, before any packet has been sent. Calling this function any
2630 /// other time will have no effect.
2631 ///
2632 /// See [`Config::set_max_pacing_rate()`].
2633 ///
2634 /// [`Config::set_max_pacing_rate()`]: struct.Config.html#method.set_max_pacing_rate
2635 #[cfg(feature = "boringssl-boring-crate")]
2636 #[cfg_attr(docsrs, doc(cfg(feature = "boringssl-boring-crate")))]
2637 pub fn set_max_pacing_rate_in_handshake(
2638 ssl: &mut boring::ssl::SslRef, v: Option<u64>,
2639 ) -> Result<()> {
2640 let ex_data = tls::ExData::from_ssl_ref(ssl).ok_or(Error::TlsFail)?;
2641
2642 ex_data.recovery_config.max_pacing_rate = v;
2643
2644 Ok(())
2645 }
2646
2647 /// Sets the maximum outgoing UDP payload size.
2648 ///
2649 /// This function can only be called inside one of BoringSSL's handshake
2650 /// callbacks, before any packet has been sent. Calling this function any
2651 /// other time will have no effect.
2652 ///
2653 /// See [`Config::set_max_send_udp_payload_size()`].
2654 ///
2655 /// [`Config::set_max_send_udp_payload_size()`]: struct.Config.html#method.set_max_send_udp_payload_size
2656 #[cfg(feature = "boringssl-boring-crate")]
2657 #[cfg_attr(docsrs, doc(cfg(feature = "boringssl-boring-crate")))]
2658 pub fn set_max_send_udp_payload_size_in_handshake(
2659 ssl: &mut boring::ssl::SslRef, v: usize,
2660 ) -> Result<()> {
2661 let ex_data = tls::ExData::from_ssl_ref(ssl).ok_or(Error::TlsFail)?;
2662
2663 ex_data.recovery_config.max_send_udp_payload_size = v;
2664
2665 Ok(())
2666 }
2667
2668 /// Sets the send capacity factor.
2669 ///
2670 /// This function can only be called inside one of BoringSSL's handshake
2671 /// callbacks, before any packet has been sent. Calling this function any
2672 /// other time will have no effect.
2673 ///
2674 /// See [`Config::set_send_capacity_factor()`].
2675 ///
2676 /// [`Config::set_max_send_udp_payload_size()`]: struct.Config.html#method.set_send_capacity_factor
2677 #[cfg(feature = "boringssl-boring-crate")]
2678 #[cfg_attr(docsrs, doc(cfg(feature = "boringssl-boring-crate")))]
2679 pub fn set_send_capacity_factor_in_handshake(
2680 ssl: &mut boring::ssl::SslRef, v: f64,
2681 ) -> Result<()> {
2682 let ex_data = tls::ExData::from_ssl_ref(ssl).ok_or(Error::TlsFail)?;
2683
2684 ex_data.tx_cap_factor = v;
2685
2686 Ok(())
2687 }
2688
2689 /// Configures whether to do path MTU discovery.
2690 ///
2691 /// This function can only be called inside one of BoringSSL's handshake
2692 /// callbacks, before any packet has been sent. Calling this function any
2693 /// other time will have no effect.
2694 ///
2695 /// See [`Config::discover_pmtu()`].
2696 ///
2697 /// [`Config::discover_pmtu()`]: struct.Config.html#method.discover_pmtu
2698 #[cfg(feature = "boringssl-boring-crate")]
2699 #[cfg_attr(docsrs, doc(cfg(feature = "boringssl-boring-crate")))]
2700 pub fn set_discover_pmtu_in_handshake(
2701 ssl: &mut boring::ssl::SslRef, discover: bool, max_probes: u8,
2702 ) -> Result<()> {
2703 let ex_data = tls::ExData::from_ssl_ref(ssl).ok_or(Error::TlsFail)?;
2704
2705 ex_data.pmtud = Some((discover, max_probes));
2706
2707 Ok(())
2708 }
2709
2710 /// Sets the `max_idle_timeout` transport parameter, in milliseconds.
2711 ///
2712 /// This function can only be called inside one of BoringSSL's handshake
2713 /// callbacks, before any packet has been sent. Calling this function any
2714 /// other time will have no effect.
2715 ///
2716 /// See [`Config::set_max_idle_timeout()`].
2717 ///
2718 /// [`Config::set_max_idle_timeout()`]: struct.Config.html#method.set_max_idle_timeout
2719 #[cfg(feature = "boringssl-boring-crate")]
2720 #[cfg_attr(docsrs, doc(cfg(feature = "boringssl-boring-crate")))]
2721 pub fn set_max_idle_timeout_in_handshake(
2722 ssl: &mut boring::ssl::SslRef, v: u64,
2723 ) -> Result<()> {
2724 let ex_data = tls::ExData::from_ssl_ref(ssl).ok_or(Error::TlsFail)?;
2725
2726 ex_data.local_transport_params.max_idle_timeout = v;
2727
2728 Self::set_transport_parameters_in_hanshake(
2729 ex_data.local_transport_params.clone(),
2730 ex_data.is_server,
2731 ssl,
2732 )
2733 }
2734
2735 /// Sets the `initial_max_streams_bidi` transport parameter.
2736 ///
2737 /// This function can only be called inside one of BoringSSL's handshake
2738 /// callbacks, before any packet has been sent. Calling this function any
2739 /// other time will have no effect.
2740 ///
2741 /// See [`Config::set_initial_max_streams_bidi()`].
2742 ///
2743 /// [`Config::set_initial_max_streams_bidi()`]: struct.Config.html#method.set_initial_max_streams_bidi
2744 #[cfg(feature = "boringssl-boring-crate")]
2745 #[cfg_attr(docsrs, doc(cfg(feature = "boringssl-boring-crate")))]
2746 pub fn set_initial_max_streams_bidi_in_handshake(
2747 ssl: &mut boring::ssl::SslRef, v: u64,
2748 ) -> Result<()> {
2749 let ex_data = tls::ExData::from_ssl_ref(ssl).ok_or(Error::TlsFail)?;
2750
2751 ex_data.local_transport_params.initial_max_streams_bidi = v;
2752
2753 Self::set_transport_parameters_in_hanshake(
2754 ex_data.local_transport_params.clone(),
2755 ex_data.is_server,
2756 ssl,
2757 )
2758 }
2759
2760 #[cfg(feature = "boringssl-boring-crate")]
2761 fn set_transport_parameters_in_hanshake(
2762 params: TransportParams, is_server: bool, ssl: &mut boring::ssl::SslRef,
2763 ) -> Result<()> {
2764 use foreign_types_shared::ForeignTypeRef;
2765
2766 // In order to apply the new parameter to the TLS state before TPs are
2767 // written into a TLS message, we need to re-encode all TPs immediately.
2768 //
2769 // Since we don't have direct access to the main `Connection` object, we
2770 // need to re-create the `Handshake` state from the `SslRef`.
2771 //
2772 // SAFETY: the `Handshake` object must not be drop()ed, otherwise it
2773 // would free the underlying BoringSSL structure.
2774 let mut handshake =
2775 unsafe { tls::Handshake::from_ptr(ssl.as_ptr() as _) };
2776 handshake.set_quic_transport_params(¶ms, is_server)?;
2777
2778 // Avoid running `drop(handshake)` as that would free the underlying
2779 // handshake state.
2780 std::mem::forget(handshake);
2781
2782 Ok(())
2783 }
2784
2785 /// Sets the `use_initial_max_data_as_flow_control_win` flag during SSL
2786 /// handshake.
2787 ///
2788 /// This function can only be called inside one of BoringSSL's handshake
2789 /// callbacks, before any packet has been sent. Calling this function any
2790 /// other time will have no effect.
2791 ///
2792 /// See [`Connection::enable_use_initial_max_data_as_flow_control_win()`].
2793 #[cfg(feature = "boringssl-boring-crate")]
2794 #[cfg_attr(docsrs, doc(cfg(feature = "boringssl-boring-crate")))]
2795 pub fn set_use_initial_max_data_as_flow_control_win_in_handshake(
2796 ssl: &mut boring::ssl::SslRef,
2797 ) -> Result<()> {
2798 let ex_data = tls::ExData::from_ssl_ref(ssl).ok_or(Error::TlsFail)?;
2799
2800 ex_data.use_initial_max_data_as_flow_control_win = true;
2801 Ok(())
2802 }
2803
2804 /// Processes QUIC packets received from the peer.
2805 ///
2806 /// On success the number of bytes processed from the input buffer is
2807 /// returned. On error the connection will be closed by calling [`close()`]
2808 /// with the appropriate error code.
2809 ///
2810 /// Coalesced packets will be processed as necessary.
2811 ///
2812 /// Note that the contents of the input buffer `buf` might be modified by
2813 /// this function due to, for example, in-place decryption.
2814 ///
2815 /// [`close()`]: struct.Connection.html#method.close
2816 ///
2817 /// ## Examples:
2818 ///
2819 /// ```no_run
2820 /// # let mut buf = [0; 512];
2821 /// # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
2822 /// # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
2823 /// # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
2824 /// # let peer = "127.0.0.1:1234".parse().unwrap();
2825 /// # let local = socket.local_addr().unwrap();
2826 /// # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
2827 /// loop {
2828 /// let (read, from) = socket.recv_from(&mut buf).unwrap();
2829 ///
2830 /// let recv_info = quiche::RecvInfo {
2831 /// from,
2832 /// to: local,
2833 /// };
2834 ///
2835 /// let read = match conn.recv(&mut buf[..read], recv_info) {
2836 /// Ok(v) => v,
2837 ///
2838 /// Err(e) => {
2839 /// // An error occurred, handle it.
2840 /// break;
2841 /// },
2842 /// };
2843 /// }
2844 /// # Ok::<(), quiche::Error>(())
2845 /// ```
2846 pub fn recv(&mut self, buf: &mut [u8], info: RecvInfo) -> Result<usize> {
2847 let len = buf.len();
2848
2849 if len == 0 {
2850 return Err(Error::BufferTooShort);
2851 }
2852
2853 let recv_pid = self.paths.path_id_from_addrs(&(info.to, info.from));
2854
2855 if let Some(recv_pid) = recv_pid {
2856 let recv_path = self.paths.get_mut(recv_pid)?;
2857
2858 // Keep track of how many bytes we received from the client, so we
2859 // can limit bytes sent back before address validation, to a
2860 // multiple of this. The limit needs to be increased early on, so
2861 // that if there is an error there is enough credit to send a
2862 // CONNECTION_CLOSE.
2863 //
2864 // It doesn't matter if the packets received were valid or not, we
2865 // only need to track the total amount of bytes received.
2866 //
2867 // Note that we also need to limit the number of bytes we sent on a
2868 // path if we are not the host that initiated its usage.
2869 if self.is_server && !recv_path.verified_peer_address {
2870 recv_path.max_send_bytes += len * self.max_amplification_factor;
2871 }
2872 } else if !self.is_server {
2873 // If a client receives packets from an unknown server address,
2874 // the client MUST discard these packets.
2875 trace!(
2876 "{} client received packet from unknown address {:?}, dropping",
2877 self.trace_id,
2878 info,
2879 );
2880
2881 return Ok(len);
2882 }
2883
2884 let mut done = 0;
2885 let mut left = len;
2886
2887 // Process coalesced packets.
2888 while left > 0 {
2889 let read = match self.recv_single(
2890 &mut buf[len - left..len],
2891 &info,
2892 recv_pid,
2893 ) {
2894 Ok(v) => v,
2895
2896 Err(Error::Done) => {
2897 // If the packet can't be processed or decrypted, check if
2898 // it's a stateless reset.
2899 if self.is_stateless_reset(&buf[len - left..len]) {
2900 trace!("{} packet is a stateless reset", self.trace_id);
2901
2902 self.mark_closed();
2903 }
2904
2905 left
2906 },
2907
2908 Err(e) => {
2909 // In case of error processing the incoming packet, close
2910 // the connection.
2911 self.close(false, e.to_wire(), b"").ok();
2912 return Err(e);
2913 },
2914 };
2915
2916 done += read;
2917 left -= read;
2918 }
2919
2920 // Even though the packet was previously "accepted", it
2921 // should be safe to forward the error, as it also comes
2922 // from the `recv()` method.
2923 self.process_undecrypted_0rtt_packets()?;
2924
2925 Ok(done)
2926 }
2927
2928 fn process_undecrypted_0rtt_packets(&mut self) -> Result<()> {
2929 // Process previously undecryptable 0-RTT packets if the decryption key
2930 // is now available.
2931 if self.crypto_ctx[packet::Epoch::Application]
2932 .crypto_0rtt_open
2933 .is_some()
2934 {
2935 while let Some((mut pkt, info)) = self.undecryptable_pkts.pop_front()
2936 {
2937 if let Err(e) = self.recv(&mut pkt, info) {
2938 self.undecryptable_pkts.clear();
2939
2940 return Err(e);
2941 }
2942 }
2943 }
2944 Ok(())
2945 }
2946
2947 /// Returns true if a QUIC packet is a stateless reset.
2948 fn is_stateless_reset(&self, buf: &[u8]) -> bool {
2949 // If the packet is too small, then we just throw it away.
2950 let buf_len = buf.len();
2951 if buf_len < 21 {
2952 return false;
2953 }
2954
2955 // TODO: we should iterate over all active destination connection IDs
2956 // and check against their reset token.
2957 match self.peer_transport_params.stateless_reset_token {
2958 Some(token) => {
2959 let token_len = 16;
2960
2961 crypto::verify_slices_are_equal(
2962 &token.to_be_bytes(),
2963 &buf[buf_len - token_len..buf_len],
2964 )
2965 .is_ok()
2966 },
2967
2968 None => false,
2969 }
2970 }
2971
2972 /// Processes a single QUIC packet received from the peer.
2973 ///
2974 /// On success the number of bytes processed from the input buffer is
2975 /// returned. When the [`Done`] error is returned, processing of the
2976 /// remainder of the incoming UDP datagram should be interrupted.
2977 ///
2978 /// Note that a server might observe a new 4-tuple, preventing to
2979 /// know in advance to which path the incoming packet belongs to (`recv_pid`
2980 /// is `None`). As a client, packets from unknown 4-tuple are dropped
2981 /// beforehand (see `recv()`).
2982 ///
2983 /// On error, an error other than [`Done`] is returned.
2984 ///
2985 /// [`Done`]: enum.Error.html#variant.Done
2986 fn recv_single(
2987 &mut self, buf: &mut [u8], info: &RecvInfo, recv_pid: Option<usize>,
2988 ) -> Result<usize> {
2989 let now = Instant::now();
2990
2991 if buf.is_empty() {
2992 return Err(Error::Done);
2993 }
2994
2995 if self.is_closed() || self.is_draining() {
2996 return Err(Error::Done);
2997 }
2998
2999 let is_closing = self.local_error.is_some();
3000
3001 if is_closing {
3002 return Err(Error::Done);
3003 }
3004
3005 let buf_len = buf.len();
3006
3007 let mut b = octets::OctetsMut::with_slice(buf);
3008
3009 let mut hdr = Header::from_bytes(&mut b, self.source_id().len())
3010 .map_err(|e| {
3011 drop_pkt_on_err(
3012 e,
3013 self.recv_count,
3014 self.is_server,
3015 &self.trace_id,
3016 )
3017 })?;
3018
3019 if hdr.ty == Type::VersionNegotiation {
3020 // Version negotiation packets can only be sent by the server.
3021 if self.is_server {
3022 return Err(Error::Done);
3023 }
3024
3025 // Ignore duplicate version negotiation.
3026 if self.did_version_negotiation {
3027 return Err(Error::Done);
3028 }
3029
3030 // Ignore version negotiation if any other packet has already been
3031 // successfully processed.
3032 if self.recv_count > 0 {
3033 return Err(Error::Done);
3034 }
3035
3036 if hdr.dcid != self.source_id() {
3037 return Err(Error::Done);
3038 }
3039
3040 if hdr.scid != self.destination_id() {
3041 return Err(Error::Done);
3042 }
3043
3044 trace!("{} rx pkt {:?}", self.trace_id, hdr);
3045
3046 let versions = hdr.versions.ok_or(Error::Done)?;
3047
3048 // Ignore version negotiation if the version already selected is
3049 // listed.
3050 if versions.contains(&self.version) {
3051 return Err(Error::Done);
3052 }
3053
3054 let supported_versions =
3055 versions.iter().filter(|&&v| version_is_supported(v));
3056
3057 let mut found_version = false;
3058
3059 for &v in supported_versions {
3060 found_version = true;
3061
3062 // The final version takes precedence over draft ones.
3063 if v == PROTOCOL_VERSION_V1 {
3064 self.version = v;
3065 break;
3066 }
3067
3068 self.version = cmp::max(self.version, v);
3069 }
3070
3071 if !found_version {
3072 // We don't support any of the versions offered.
3073 //
3074 // While a man-in-the-middle attacker might be able to
3075 // inject a version negotiation packet that triggers this
3076 // failure, the window of opportunity is very small and
3077 // this error is quite useful for debugging, so don't just
3078 // ignore the packet.
3079 return Err(Error::UnknownVersion);
3080 }
3081
3082 self.did_version_negotiation = true;
3083
3084 // Derive Initial secrets based on the new version.
3085 let (aead_open, aead_seal) = crypto::derive_initial_key_material(
3086 &self.destination_id(),
3087 self.version,
3088 self.is_server,
3089 true,
3090 )?;
3091
3092 // Reset connection state to force sending another Initial packet.
3093 self.drop_epoch_state(packet::Epoch::Initial, now);
3094 self.got_peer_conn_id = false;
3095 self.handshake.clear()?;
3096
3097 self.crypto_ctx[packet::Epoch::Initial].crypto_open = Some(aead_open);
3098 self.crypto_ctx[packet::Epoch::Initial].crypto_seal = Some(aead_seal);
3099
3100 self.handshake
3101 .use_legacy_codepoint(self.version != PROTOCOL_VERSION_V1);
3102
3103 // Encode transport parameters again, as the new version might be
3104 // using a different format.
3105 self.encode_transport_params()?;
3106
3107 return Err(Error::Done);
3108 }
3109
3110 if hdr.ty == Type::Retry {
3111 // Retry packets can only be sent by the server.
3112 if self.is_server {
3113 return Err(Error::Done);
3114 }
3115
3116 // Ignore duplicate retry.
3117 if self.did_retry {
3118 return Err(Error::Done);
3119 }
3120
3121 // Check if Retry packet is valid.
3122 if packet::verify_retry_integrity(
3123 &b,
3124 &self.destination_id(),
3125 self.version,
3126 )
3127 .is_err()
3128 {
3129 return Err(Error::Done);
3130 }
3131
3132 trace!("{} rx pkt {:?}", self.trace_id, hdr);
3133
3134 self.token = hdr.token;
3135 self.did_retry = true;
3136
3137 // Remember peer's new connection ID.
3138 self.odcid = Some(self.destination_id().into_owned());
3139
3140 self.set_initial_dcid(
3141 hdr.scid.clone(),
3142 None,
3143 self.paths.get_active_path_id()?,
3144 )?;
3145
3146 self.rscid = Some(self.destination_id().into_owned());
3147
3148 // Derive Initial secrets using the new connection ID.
3149 let (aead_open, aead_seal) = crypto::derive_initial_key_material(
3150 &hdr.scid,
3151 self.version,
3152 self.is_server,
3153 true,
3154 )?;
3155
3156 // Reset connection state to force sending another Initial packet.
3157 self.drop_epoch_state(packet::Epoch::Initial, now);
3158 self.got_peer_conn_id = false;
3159 self.handshake.clear()?;
3160
3161 self.crypto_ctx[packet::Epoch::Initial].crypto_open = Some(aead_open);
3162 self.crypto_ctx[packet::Epoch::Initial].crypto_seal = Some(aead_seal);
3163
3164 return Err(Error::Done);
3165 }
3166
3167 if self.is_server && !self.did_version_negotiation {
3168 if !version_is_supported(hdr.version) {
3169 return Err(Error::UnknownVersion);
3170 }
3171
3172 self.version = hdr.version;
3173 self.did_version_negotiation = true;
3174
3175 self.handshake
3176 .use_legacy_codepoint(self.version != PROTOCOL_VERSION_V1);
3177
3178 // Encode transport parameters again, as the new version might be
3179 // using a different format.
3180 self.encode_transport_params()?;
3181 }
3182
3183 if hdr.ty != Type::Short && hdr.version != self.version {
3184 // At this point version negotiation was already performed, so
3185 // ignore packets that don't match the connection's version.
3186 return Err(Error::Done);
3187 }
3188
3189 // Long header packets have an explicit payload length, but short
3190 // packets don't so just use the remaining capacity in the buffer.
3191 let payload_len = if hdr.ty == Type::Short {
3192 b.cap()
3193 } else {
3194 b.get_varint().map_err(|e| {
3195 drop_pkt_on_err(
3196 e.into(),
3197 self.recv_count,
3198 self.is_server,
3199 &self.trace_id,
3200 )
3201 })? as usize
3202 };
3203
3204 // Make sure the buffer is same or larger than an explicit
3205 // payload length.
3206 if payload_len > b.cap() {
3207 return Err(drop_pkt_on_err(
3208 Error::InvalidPacket,
3209 self.recv_count,
3210 self.is_server,
3211 &self.trace_id,
3212 ));
3213 }
3214
3215 // Derive initial secrets on the server.
3216 if !self.derived_initial_secrets {
3217 let (aead_open, aead_seal) = crypto::derive_initial_key_material(
3218 &hdr.dcid,
3219 self.version,
3220 self.is_server,
3221 false,
3222 )?;
3223
3224 self.crypto_ctx[packet::Epoch::Initial].crypto_open = Some(aead_open);
3225 self.crypto_ctx[packet::Epoch::Initial].crypto_seal = Some(aead_seal);
3226
3227 self.derived_initial_secrets = true;
3228 }
3229
3230 // Select packet number space epoch based on the received packet's type.
3231 let epoch = hdr.ty.to_epoch()?;
3232
3233 // Select AEAD context used to open incoming packet.
3234 let aead = if hdr.ty == Type::ZeroRTT {
3235 // Only use 0-RTT key if incoming packet is 0-RTT.
3236 self.crypto_ctx[epoch].crypto_0rtt_open.as_ref()
3237 } else {
3238 // Otherwise use the packet number space's main key.
3239 self.crypto_ctx[epoch].crypto_open.as_ref()
3240 };
3241
3242 // Finally, discard packet if no usable key is available.
3243 let mut aead = match aead {
3244 Some(v) => v,
3245
3246 None => {
3247 if hdr.ty == Type::ZeroRTT &&
3248 self.undecryptable_pkts.len() < MAX_UNDECRYPTABLE_PACKETS &&
3249 !self.is_established()
3250 {
3251 // Buffer 0-RTT packets when the required read key is not
3252 // available yet, and process them later.
3253 //
3254 // TODO: in the future we might want to buffer other types
3255 // of undecryptable packets as well.
3256 let pkt_len = b.off() + payload_len;
3257 let pkt = (b.buf()[..pkt_len]).to_vec();
3258
3259 self.undecryptable_pkts.push_back((pkt, *info));
3260 return Ok(pkt_len);
3261 }
3262
3263 let e = drop_pkt_on_err(
3264 Error::CryptoFail,
3265 self.recv_count,
3266 self.is_server,
3267 &self.trace_id,
3268 );
3269
3270 return Err(e);
3271 },
3272 };
3273
3274 let aead_tag_len = aead.alg().tag_len();
3275
3276 packet::decrypt_hdr(&mut b, &mut hdr, aead).map_err(|e| {
3277 drop_pkt_on_err(e, self.recv_count, self.is_server, &self.trace_id)
3278 })?;
3279
3280 let pn = packet::decode_pkt_num(
3281 self.pkt_num_spaces[epoch].largest_rx_pkt_num,
3282 hdr.pkt_num,
3283 hdr.pkt_num_len,
3284 );
3285
3286 let pn_len = hdr.pkt_num_len;
3287
3288 trace!(
3289 "{} rx pkt {:?} len={} pn={} {}",
3290 self.trace_id,
3291 hdr,
3292 payload_len,
3293 pn,
3294 AddrTupleFmt(info.from, info.to)
3295 );
3296
3297 #[cfg(feature = "qlog")]
3298 let mut qlog_frames = vec![];
3299
3300 // Check for key update.
3301 let mut aead_next = None;
3302
3303 if self.handshake_confirmed &&
3304 hdr.ty != Type::ZeroRTT &&
3305 hdr.key_phase != self.key_phase
3306 {
3307 // Check if this packet arrived before key update.
3308 if let Some(key_update) = self.crypto_ctx[epoch]
3309 .key_update
3310 .as_ref()
3311 .and_then(|key_update| {
3312 (pn < key_update.pn_on_update).then_some(key_update)
3313 })
3314 {
3315 aead = &key_update.crypto_open;
3316 } else {
3317 trace!("{} peer-initiated key update", self.trace_id);
3318
3319 aead_next = Some((
3320 self.crypto_ctx[epoch]
3321 .crypto_open
3322 .as_ref()
3323 .unwrap()
3324 .derive_next_packet_key()?,
3325 self.crypto_ctx[epoch]
3326 .crypto_seal
3327 .as_ref()
3328 .unwrap()
3329 .derive_next_packet_key()?,
3330 ));
3331
3332 // `aead_next` is always `Some()` at this point, so the `unwrap()`
3333 // will never fail.
3334 aead = &aead_next.as_ref().unwrap().0;
3335 }
3336 }
3337
3338 let mut payload = packet::decrypt_pkt(
3339 &mut b,
3340 pn,
3341 pn_len,
3342 payload_len,
3343 aead,
3344 )
3345 .map_err(|e| {
3346 drop_pkt_on_err(e, self.recv_count, self.is_server, &self.trace_id)
3347 })?;
3348
3349 if self.pkt_num_spaces[epoch].recv_pkt_num.contains(pn) {
3350 trace!("{} ignored duplicate packet {}", self.trace_id, pn);
3351 return Err(Error::Done);
3352 }
3353
3354 // Packets with no frames are invalid.
3355 if payload.cap() == 0 {
3356 return Err(Error::InvalidPacket);
3357 }
3358
3359 // Now that we decrypted the packet, let's see if we can map it to an
3360 // existing path.
3361 let recv_pid = if hdr.ty == Type::Short && self.got_peer_conn_id {
3362 let pkt_dcid = ConnectionId::from_ref(&hdr.dcid);
3363 self.get_or_create_recv_path_id(recv_pid, &pkt_dcid, buf_len, info)?
3364 } else {
3365 // During handshake, we are on the initial path.
3366 self.paths.get_active_path_id()?
3367 };
3368
3369 // The key update is verified once a packet is successfully decrypted
3370 // using the new keys.
3371 if let Some((open_next, seal_next)) = aead_next {
3372 if !self.crypto_ctx[epoch]
3373 .key_update
3374 .as_ref()
3375 .is_none_or(|prev| prev.update_acked)
3376 {
3377 // Peer has updated keys twice without awaiting confirmation.
3378 return Err(Error::KeyUpdate);
3379 }
3380
3381 trace!("{} key update verified", self.trace_id);
3382
3383 let _ = self.crypto_ctx[epoch].crypto_seal.replace(seal_next);
3384
3385 let open_prev = self.crypto_ctx[epoch]
3386 .crypto_open
3387 .replace(open_next)
3388 .unwrap();
3389
3390 let recv_path = self.paths.get_mut(recv_pid)?;
3391
3392 self.crypto_ctx[epoch].key_update = Some(packet::KeyUpdate {
3393 crypto_open: open_prev,
3394 pn_on_update: pn,
3395 update_acked: false,
3396 timer: now + (recv_path.recovery.pto() * 3),
3397 });
3398
3399 self.key_phase = !self.key_phase;
3400
3401 qlog_with_type!(QLOG_PACKET_RX, self.qlog, q, {
3402 let trigger = Some(
3403 qlog::events::quic::KeyUpdateOrRetiredTrigger::RemoteUpdate,
3404 );
3405
3406 let ev_data_client =
3407 EventData::QuicKeyUpdated(qlog::events::quic::KeyUpdated {
3408 key_type: qlog::events::quic::KeyType::Client1RttSecret,
3409 trigger: trigger.clone(),
3410 ..Default::default()
3411 });
3412
3413 q.add_event_data_with_instant(ev_data_client, now).ok();
3414
3415 let ev_data_server =
3416 EventData::QuicKeyUpdated(qlog::events::quic::KeyUpdated {
3417 key_type: qlog::events::quic::KeyType::Server1RttSecret,
3418 trigger,
3419 ..Default::default()
3420 });
3421
3422 q.add_event_data_with_instant(ev_data_server, now).ok();
3423 });
3424 }
3425
3426 if !self.is_server && !self.got_peer_conn_id {
3427 if self.odcid.is_none() {
3428 self.odcid = Some(self.destination_id().into_owned());
3429 }
3430
3431 // Replace the randomly generated destination connection ID with
3432 // the one supplied by the server.
3433 self.set_initial_dcid(
3434 hdr.scid.clone(),
3435 self.peer_transport_params.stateless_reset_token,
3436 recv_pid,
3437 )?;
3438
3439 self.got_peer_conn_id = true;
3440 }
3441
3442 if self.is_server && !self.got_peer_conn_id {
3443 self.set_initial_dcid(hdr.scid.clone(), None, recv_pid)?;
3444
3445 if !self.did_retry {
3446 self.local_transport_params
3447 .original_destination_connection_id =
3448 Some(hdr.dcid.to_vec().into());
3449
3450 self.encode_transport_params()?;
3451 }
3452
3453 self.got_peer_conn_id = true;
3454 }
3455
3456 // To avoid sending an ACK in response to an ACK-only packet, we need
3457 // to keep track of whether this packet contains any frame other than
3458 // ACK and PADDING.
3459 let mut ack_elicited = false;
3460
3461 // Process packet payload. If a frame cannot be processed, store the
3462 // error and stop further packet processing.
3463 let mut frame_processing_err = None;
3464
3465 // To know if the peer migrated the connection, we need to keep track
3466 // whether this is a non-probing packet.
3467 let mut probing = true;
3468
3469 // Process packet payload.
3470 while payload.cap() > 0 {
3471 let frame = frame::Frame::from_bytes(&mut payload, hdr.ty)?;
3472
3473 qlog_with_type!(QLOG_PACKET_RX, self.qlog, _q, {
3474 qlog_frames.push(frame.to_qlog());
3475 });
3476
3477 if frame.ack_eliciting() {
3478 ack_elicited = true;
3479 }
3480
3481 if !frame.probing() {
3482 probing = false;
3483 }
3484
3485 if let Err(e) = self.process_frame(frame, &hdr, recv_pid, epoch, now)
3486 {
3487 frame_processing_err = Some(e);
3488 break;
3489 }
3490 }
3491
3492 qlog_with_type!(QLOG_PACKET_RX, self.qlog, q, {
3493 let packet_size = b.len();
3494
3495 let qlog_pkt_hdr = qlog::events::quic::PacketHeader::with_type(
3496 hdr.ty.to_qlog(),
3497 Some(pn),
3498 Some(hdr.version),
3499 Some(&hdr.scid),
3500 Some(&hdr.dcid),
3501 );
3502
3503 let qlog_raw_info = RawInfo {
3504 length: Some(packet_size as u64),
3505 payload_length: Some(payload_len as u64),
3506 data: None,
3507 };
3508
3509 let ev_data = EventData::QuicPacketReceived(
3510 qlog::events::quic::PacketReceived {
3511 header: qlog_pkt_hdr,
3512 frames: Some(qlog_frames),
3513 raw: Some(qlog_raw_info),
3514 ..Default::default()
3515 },
3516 );
3517
3518 q.add_event_data_with_instant(ev_data, now).ok();
3519 });
3520
3521 qlog_with_type!(QLOG_METRICS, self.qlog, q, {
3522 let recv_path = self.paths.get_mut(recv_pid)?;
3523 recv_path.recovery.maybe_qlog(q, now);
3524 });
3525
3526 if let Some(e) = frame_processing_err {
3527 // Any frame error is terminal, so now just return.
3528 return Err(e);
3529 }
3530
3531 // Only log the remote transport parameters once the connection is
3532 // established (i.e. after frames have been fully parsed) and only
3533 // once per connection.
3534 if self.is_established() {
3535 qlog_with_type!(QLOG_PARAMS_SET, self.qlog, q, {
3536 if !self.qlog.logged_peer_params {
3537 let ev_data = self.peer_transport_params.to_qlog(
3538 TransportInitiator::Remote,
3539 self.handshake.cipher(),
3540 );
3541
3542 q.add_event_data_with_instant(ev_data, now).ok();
3543
3544 self.qlog.logged_peer_params = true;
3545 }
3546 });
3547 }
3548
3549 // Process acked frames. Note that several packets from several paths
3550 // might have been acked by the received packet.
3551 for (_, p) in self.paths.iter_mut() {
3552 while let Some(acked) = p.recovery.next_acked_frame(epoch) {
3553 match acked {
3554 frame::Frame::Ping {
3555 mtu_probe: Some(mtu_probe),
3556 } => {
3557 if let Some(pmtud) = p.pmtud.as_mut() {
3558 trace!(
3559 "{} pmtud probe acked; probe size {:?}",
3560 self.trace_id,
3561 mtu_probe
3562 );
3563
3564 // Ensure the probe is within the supported MTU range
3565 // before updating the max datagram size
3566 if let Some(current_mtu) =
3567 pmtud.successful_probe(mtu_probe)
3568 {
3569 qlog_with_type!(
3570 EventType::QuicEventType(
3571 QuicEventType::MtuUpdated
3572 ),
3573 self.qlog,
3574 q,
3575 {
3576 let pmtu_data = EventData::QuicMtuUpdated(
3577 qlog::events::quic::MtuUpdated {
3578 old: Some(
3579 p.recovery.max_datagram_size()
3580 as u32,
3581 ),
3582 new: current_mtu as u32,
3583 done: Some(true),
3584 },
3585 );
3586
3587 q.add_event_data_with_instant(
3588 pmtu_data, now,
3589 )
3590 .ok();
3591 }
3592 );
3593
3594 p.recovery
3595 .pmtud_update_max_datagram_size(current_mtu);
3596 }
3597 }
3598 },
3599
3600 frame::Frame::ACK { ranges, .. } => {
3601 // Stop acknowledging packets less than or equal to the
3602 // largest acknowledged in the sent ACK frame that, in
3603 // turn, got acked.
3604 if let Some(largest_acked) = ranges.last() {
3605 self.pkt_num_spaces[epoch]
3606 .recv_pkt_need_ack
3607 .remove_until(largest_acked);
3608 }
3609 },
3610
3611 frame::Frame::CryptoHeader { offset, length } => {
3612 self.crypto_ctx[epoch]
3613 .crypto_stream
3614 .send
3615 .ack_and_drop(offset, length);
3616 },
3617
3618 frame::Frame::StreamHeader {
3619 stream_id,
3620 offset,
3621 length,
3622 ..
3623 } => {
3624 // Update tx_buffered and emit qlog before checking if the
3625 // stream still exists. The client does need to ACK
3626 // frames that were received after the client sends a
3627 // ResetStream.
3628 self.tx_buffered =
3629 self.tx_buffered.saturating_sub(length);
3630
3631 qlog_with_type!(QLOG_DATA_MV, self.qlog, q, {
3632 let ev_data = EventData::QuicStreamDataMoved(
3633 qlog::events::quic::StreamDataMoved {
3634 stream_id: Some(stream_id),
3635 offset: Some(offset),
3636 raw: Some(RawInfo {
3637 length: Some(length as u64),
3638 ..Default::default()
3639 }),
3640 from: Some(DataRecipient::Transport),
3641 to: Some(DataRecipient::Dropped),
3642 ..Default::default()
3643 },
3644 );
3645
3646 q.add_event_data_with_instant(ev_data, now).ok();
3647 });
3648
3649 let stream = match self.streams.get_mut(stream_id) {
3650 Some(v) => v,
3651
3652 None => continue,
3653 };
3654
3655 stream.send.ack_and_drop(offset, length);
3656
3657 let priority_key = Arc::clone(&stream.priority_key);
3658
3659 // Only collect the stream if it is complete and not
3660 // readable or writable.
3661 //
3662 // If it is readable, it will get collected when
3663 // stream_recv() is next used.
3664 //
3665 // If it is writable, it might mean that the stream
3666 // has been stopped by the peer (i.e. a STOP_SENDING
3667 // frame is received), in which case before collecting
3668 // the stream we will need to propagate the
3669 // `StreamStopped` error to the application. It will
3670 // instead get collected when one of stream_capacity(),
3671 // stream_writable(), stream_send(), ... is next called.
3672 //
3673 // Note that we can't use `is_writable()` here because
3674 // it returns false if the stream is stopped. Instead,
3675 // since the stream is marked as writable when a
3676 // STOP_SENDING frame is received, we check the writable
3677 // queue directly instead.
3678 let is_writable = priority_key.writable.is_linked() &&
3679 // Ensure that the stream is actually stopped.
3680 stream.send.is_stopped();
3681
3682 let is_complete = stream.is_complete();
3683 let is_readable = stream.is_readable();
3684
3685 if is_complete && !is_readable && !is_writable {
3686 let local = stream.local;
3687 self.streams.collect(stream_id, local);
3688 }
3689 },
3690
3691 frame::Frame::HandshakeDone => {
3692 // Explicitly set this to true, so that if the frame was
3693 // already scheduled for retransmission, it is aborted.
3694 self.handshake_done_sent = true;
3695
3696 self.handshake_done_acked = true;
3697 },
3698
3699 frame::Frame::ResetStream { stream_id, .. } => {
3700 let stream = match self.streams.get_mut(stream_id) {
3701 Some(v) => v,
3702
3703 None => continue,
3704 };
3705
3706 let priority_key = Arc::clone(&stream.priority_key);
3707
3708 // Only collect the stream if it is complete and not
3709 // readable or writable.
3710 //
3711 // If it is readable, it will get collected when
3712 // stream_recv() is next used.
3713 //
3714 // If it is writable, it might mean that the stream
3715 // has been stopped by the peer (i.e. a STOP_SENDING
3716 // frame is received), in which case before collecting
3717 // the stream we will need to propagate the
3718 // `StreamStopped` error to the application. It will
3719 // instead get collected when one of stream_capacity(),
3720 // stream_writable(), stream_send(), ... is next called.
3721 //
3722 // Note that we can't use `is_writable()` here because
3723 // it returns false if the stream is stopped. Instead,
3724 // since the stream is marked as writable when a
3725 // STOP_SENDING frame is received, we check the writable
3726 // queue directly instead.
3727 let is_writable = priority_key.writable.is_linked() &&
3728 // Ensure that the stream is actually stopped.
3729 stream.send.is_stopped();
3730
3731 let is_complete = stream.is_complete();
3732 let is_readable = stream.is_readable();
3733
3734 if is_complete && !is_readable && !is_writable {
3735 let local = stream.local;
3736 self.streams.collect(stream_id, local);
3737 }
3738 },
3739
3740 _ => (),
3741 }
3742 }
3743 }
3744
3745 // Now that we processed all the frames, if there is a path that has no
3746 // Destination CID, try to allocate one.
3747 let no_dcid = self
3748 .paths
3749 .iter_mut()
3750 .filter(|(_, p)| p.active_dcid_seq.is_none());
3751
3752 for (pid, p) in no_dcid {
3753 if self.ids.zero_length_dcid() {
3754 p.active_dcid_seq = Some(0);
3755 continue;
3756 }
3757
3758 let dcid_seq = match self.ids.lowest_available_dcid_seq() {
3759 Some(seq) => seq,
3760 None => break,
3761 };
3762
3763 self.ids.link_dcid_to_path_id(dcid_seq, pid)?;
3764
3765 p.active_dcid_seq = Some(dcid_seq);
3766 }
3767
3768 // We only record the time of arrival of the largest packet number
3769 // that still needs to be acked, to be used for ACK delay calculation.
3770 if self.pkt_num_spaces[epoch].recv_pkt_need_ack.last() < Some(pn) {
3771 self.pkt_num_spaces[epoch].largest_rx_pkt_time = now;
3772 }
3773
3774 self.pkt_num_spaces[epoch].recv_pkt_num.insert(pn);
3775
3776 self.pkt_num_spaces[epoch].recv_pkt_need_ack.push_item(pn);
3777
3778 self.pkt_num_spaces[epoch].ack_elicited =
3779 cmp::max(self.pkt_num_spaces[epoch].ack_elicited, ack_elicited);
3780
3781 self.pkt_num_spaces[epoch].largest_rx_pkt_num =
3782 cmp::max(self.pkt_num_spaces[epoch].largest_rx_pkt_num, pn);
3783
3784 if !probing {
3785 self.pkt_num_spaces[epoch].largest_rx_non_probing_pkt_num = cmp::max(
3786 self.pkt_num_spaces[epoch].largest_rx_non_probing_pkt_num,
3787 pn,
3788 );
3789
3790 // Did the peer migrated to another path?
3791 let active_path_id = self.paths.get_active_path_id()?;
3792
3793 if self.is_server &&
3794 recv_pid != active_path_id &&
3795 self.pkt_num_spaces[epoch].largest_rx_non_probing_pkt_num == pn
3796 {
3797 self.on_peer_migrated(recv_pid, self.disable_dcid_reuse, now)?;
3798 }
3799 }
3800
3801 if let Some(idle_timeout) = self.idle_timeout() {
3802 self.idle_timer = Some(now + idle_timeout);
3803 }
3804
3805 // Update send capacity.
3806 self.update_tx_cap();
3807
3808 self.recv_count += 1;
3809 self.paths.get_mut(recv_pid)?.recv_count += 1;
3810
3811 let read = b.off() + aead_tag_len;
3812
3813 self.recv_bytes += read as u64;
3814 self.paths.get_mut(recv_pid)?.recv_bytes += read as u64;
3815
3816 // An Handshake packet has been received from the client and has been
3817 // successfully processed, so we can drop the initial state and consider
3818 // the client's address to be verified.
3819 if self.is_server && hdr.ty == Type::Handshake {
3820 self.drop_epoch_state(packet::Epoch::Initial, now);
3821
3822 self.paths.get_mut(recv_pid)?.verified_peer_address = true;
3823 }
3824
3825 self.ack_eliciting_sent = false;
3826
3827 Ok(read)
3828 }
3829
3830 /// Writes a single QUIC packet to be sent to the peer.
3831 ///
3832 /// On success the number of bytes written to the output buffer is
3833 /// returned, or [`Done`] if there was nothing to write.
3834 ///
3835 /// The application should call `send()` multiple times until [`Done`] is
3836 /// returned, indicating that there are no more packets to send. It is
3837 /// recommended that `send()` be called in the following cases:
3838 ///
3839 /// * When the application receives QUIC packets from the peer (that is,
3840 /// any time [`recv()`] is also called).
3841 ///
3842 /// * When the connection timer expires (that is, any time [`on_timeout()`]
3843 /// is also called).
3844 ///
3845 /// * When the application sends data to the peer (for example, any time
3846 /// [`stream_send()`] or [`stream_shutdown()`] are called).
3847 ///
3848 /// * When the application receives data from the peer (for example any
3849 /// time [`stream_recv()`] is called).
3850 ///
3851 /// Once [`is_draining()`] returns `true`, it is no longer necessary to call
3852 /// `send()` and all calls will return [`Done`].
3853 ///
3854 /// [`Done`]: enum.Error.html#variant.Done
3855 /// [`recv()`]: struct.Connection.html#method.recv
3856 /// [`on_timeout()`]: struct.Connection.html#method.on_timeout
3857 /// [`stream_send()`]: struct.Connection.html#method.stream_send
3858 /// [`stream_shutdown()`]: struct.Connection.html#method.stream_shutdown
3859 /// [`stream_recv()`]: struct.Connection.html#method.stream_recv
3860 /// [`is_draining()`]: struct.Connection.html#method.is_draining
3861 ///
3862 /// ## Examples:
3863 ///
3864 /// ```no_run
3865 /// # let mut out = [0; 512];
3866 /// # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
3867 /// # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
3868 /// # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
3869 /// # let peer = "127.0.0.1:1234".parse().unwrap();
3870 /// # let local = socket.local_addr().unwrap();
3871 /// # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
3872 /// loop {
3873 /// let (write, send_info) = match conn.send(&mut out) {
3874 /// Ok(v) => v,
3875 ///
3876 /// Err(quiche::Error::Done) => {
3877 /// // Done writing.
3878 /// break;
3879 /// },
3880 ///
3881 /// Err(e) => {
3882 /// // An error occurred, handle it.
3883 /// break;
3884 /// },
3885 /// };
3886 ///
3887 /// socket.send_to(&out[..write], &send_info.to).unwrap();
3888 /// }
3889 /// # Ok::<(), quiche::Error>(())
3890 /// ```
3891 pub fn send(&mut self, out: &mut [u8]) -> Result<(usize, SendInfo)> {
3892 self.send_on_path(out, None, None)
3893 }
3894
3895 /// Writes a single QUIC packet to be sent to the peer from the specified
3896 /// local address `from` to the destination address `to`.
3897 ///
3898 /// The behavior of this method differs depending on the value of the `from`
3899 /// and `to` parameters:
3900 ///
3901 /// * If both are `Some`, then the method only consider the 4-tuple
3902 /// (`from`, `to`). Application can monitor the 4-tuple availability,
3903 /// either by monitoring [`path_event_next()`] events or by relying on
3904 /// the [`paths_iter()`] method. If the provided 4-tuple does not exist
3905 /// on the connection (anymore), it returns an [`InvalidState`].
3906 ///
3907 /// * If `from` is `Some` and `to` is `None`, then the method only
3908 /// considers sending packets on paths having `from` as local address.
3909 ///
3910 /// * If `to` is `Some` and `from` is `None`, then the method only
3911 /// considers sending packets on paths having `to` as peer address.
3912 ///
3913 /// * If both are `None`, all available paths are considered.
3914 ///
3915 /// On success the number of bytes written to the output buffer is
3916 /// returned, or [`Done`] if there was nothing to write.
3917 ///
3918 /// The application should call `send_on_path()` multiple times until
3919 /// [`Done`] is returned, indicating that there are no more packets to
3920 /// send. It is recommended that `send_on_path()` be called in the
3921 /// following cases:
3922 ///
3923 /// * When the application receives QUIC packets from the peer (that is,
3924 /// any time [`recv()`] is also called).
3925 ///
3926 /// * When the connection timer expires (that is, any time [`on_timeout()`]
3927 /// is also called).
3928 ///
3929 /// * When the application sends data to the peer (for examples, any time
3930 /// [`stream_send()`] or [`stream_shutdown()`] are called).
3931 ///
3932 /// * When the application receives data from the peer (for example any
3933 /// time [`stream_recv()`] is called).
3934 ///
3935 /// Once [`is_draining()`] returns `true`, it is no longer necessary to call
3936 /// `send_on_path()` and all calls will return [`Done`].
3937 ///
3938 /// [`Done`]: enum.Error.html#variant.Done
3939 /// [`InvalidState`]: enum.Error.html#InvalidState
3940 /// [`recv()`]: struct.Connection.html#method.recv
3941 /// [`on_timeout()`]: struct.Connection.html#method.on_timeout
3942 /// [`stream_send()`]: struct.Connection.html#method.stream_send
3943 /// [`stream_shutdown()`]: struct.Connection.html#method.stream_shutdown
3944 /// [`stream_recv()`]: struct.Connection.html#method.stream_recv
3945 /// [`path_event_next()`]: struct.Connection.html#method.path_event_next
3946 /// [`paths_iter()`]: struct.Connection.html#method.paths_iter
3947 /// [`is_draining()`]: struct.Connection.html#method.is_draining
3948 ///
3949 /// ## Examples:
3950 ///
3951 /// ```no_run
3952 /// # let mut out = [0; 512];
3953 /// # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
3954 /// # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
3955 /// # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
3956 /// # let peer = "127.0.0.1:1234".parse().unwrap();
3957 /// # let local = socket.local_addr().unwrap();
3958 /// # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
3959 /// loop {
3960 /// let (write, send_info) = match conn.send_on_path(&mut out, Some(local), Some(peer)) {
3961 /// Ok(v) => v,
3962 ///
3963 /// Err(quiche::Error::Done) => {
3964 /// // Done writing.
3965 /// break;
3966 /// },
3967 ///
3968 /// Err(e) => {
3969 /// // An error occurred, handle it.
3970 /// break;
3971 /// },
3972 /// };
3973 ///
3974 /// socket.send_to(&out[..write], &send_info.to).unwrap();
3975 /// }
3976 /// # Ok::<(), quiche::Error>(())
3977 /// ```
3978 pub fn send_on_path(
3979 &mut self, out: &mut [u8], from: Option<SocketAddr>,
3980 to: Option<SocketAddr>,
3981 ) -> Result<(usize, SendInfo)> {
3982 if out.is_empty() {
3983 return Err(Error::BufferTooShort);
3984 }
3985
3986 if self.is_closed() || self.is_draining() {
3987 return Err(Error::Done);
3988 }
3989
3990 let now = Instant::now();
3991
3992 if self.local_error.is_none() {
3993 self.do_handshake(now)?;
3994 }
3995
3996 // Forwarding the error value here could confuse
3997 // applications, as they may not expect getting a `recv()`
3998 // error when calling `send()`.
3999 //
4000 // We simply fall-through to sending packets, which should
4001 // take care of terminating the connection as needed.
4002 let _ = self.process_undecrypted_0rtt_packets();
4003
4004 // There's no point in trying to send a packet if the Initial secrets
4005 // have not been derived yet, so return early.
4006 if !self.derived_initial_secrets {
4007 return Err(Error::Done);
4008 }
4009
4010 let mut has_initial = false;
4011
4012 let mut done = 0;
4013
4014 // Limit output packet size to respect the sender and receiver's
4015 // maximum UDP payload size limit.
4016 let mut left = cmp::min(out.len(), self.max_send_udp_payload_size());
4017
4018 let send_pid = match (from, to) {
4019 (Some(f), Some(t)) => self
4020 .paths
4021 .path_id_from_addrs(&(f, t))
4022 .ok_or(Error::InvalidState)?,
4023
4024 _ => self.get_send_path_id(from, to)?,
4025 };
4026
4027 let send_path = self.paths.get_mut(send_pid)?;
4028
4029 // Update max datagram size to allow path MTU discovery probe to be sent.
4030 if let Some(pmtud) = send_path.pmtud.as_mut() {
4031 if pmtud.should_probe() {
4032 let size = if self.handshake_confirmed || self.handshake_completed
4033 {
4034 pmtud.get_probe_size()
4035 } else {
4036 pmtud.get_current_mtu()
4037 };
4038
4039 send_path.recovery.pmtud_update_max_datagram_size(size);
4040
4041 left =
4042 cmp::min(out.len(), send_path.recovery.max_datagram_size());
4043 }
4044 }
4045
4046 // Limit data sent by the server based on the amount of data received
4047 // from the client before its address is validated.
4048 if !send_path.verified_peer_address && self.is_server {
4049 left = cmp::min(left, send_path.max_send_bytes);
4050 }
4051
4052 // Generate coalesced packets.
4053 while left > 0 {
4054 let (ty, written) = match self.send_single(
4055 &mut out[done..done + left],
4056 send_pid,
4057 has_initial,
4058 now,
4059 ) {
4060 Ok(v) => v,
4061
4062 Err(Error::BufferTooShort) | Err(Error::Done) => break,
4063
4064 Err(e) => return Err(e),
4065 };
4066
4067 done += written;
4068 left -= written;
4069
4070 match ty {
4071 Type::Initial => has_initial = true,
4072
4073 // No more packets can be coalesced after a 1-RTT.
4074 Type::Short => break,
4075
4076 _ => (),
4077 };
4078
4079 // When sending multiple PTO probes, don't coalesce them together,
4080 // so they are sent on separate UDP datagrams.
4081 if let Ok(epoch) = ty.to_epoch() {
4082 if self.paths.get_mut(send_pid)?.recovery.loss_probes(epoch) > 0 {
4083 break;
4084 }
4085 }
4086
4087 // Don't coalesce packets that must go on different paths.
4088 if !(from.is_some() && to.is_some()) &&
4089 self.get_send_path_id(from, to)? != send_pid
4090 {
4091 break;
4092 }
4093 }
4094
4095 if done == 0 {
4096 self.last_tx_data = self.tx_data;
4097
4098 return Err(Error::Done);
4099 }
4100
4101 if has_initial && left > 0 && done < MIN_CLIENT_INITIAL_LEN {
4102 let pad_len = cmp::min(left, MIN_CLIENT_INITIAL_LEN - done);
4103
4104 // Fill padding area with null bytes, to avoid leaking information
4105 // in case the application reuses the packet buffer.
4106 out[done..done + pad_len].fill(0);
4107
4108 done += pad_len;
4109 }
4110
4111 let send_path = self.paths.get(send_pid)?;
4112
4113 let info = SendInfo {
4114 from: send_path.local_addr(),
4115 to: send_path.peer_addr(),
4116
4117 at: send_path.recovery.get_packet_send_time(now),
4118 };
4119
4120 Ok((done, info))
4121 }
4122
4123 fn send_single(
4124 &mut self, out: &mut [u8], send_pid: usize, has_initial: bool,
4125 now: Instant,
4126 ) -> Result<(Type, usize)> {
4127 if out.is_empty() {
4128 return Err(Error::BufferTooShort);
4129 }
4130
4131 if self.is_draining() {
4132 return Err(Error::Done);
4133 }
4134
4135 let is_closing = self.local_error.is_some();
4136
4137 let out_len = out.len();
4138
4139 let mut b = octets::OctetsMut::with_slice(out);
4140
4141 let pkt_type = self.write_pkt_type(send_pid)?;
4142
4143 let max_dgram_len = if !self.dgram_send_queue.is_empty() {
4144 self.dgram_max_writable_len()
4145 } else {
4146 None
4147 };
4148
4149 let epoch = pkt_type.to_epoch()?;
4150 let pkt_space = &mut self.pkt_num_spaces[epoch];
4151 let crypto_ctx = &mut self.crypto_ctx[epoch];
4152
4153 // Process lost frames. There might be several paths having lost frames.
4154 for (_, p) in self.paths.iter_mut() {
4155 while let Some(lost) = p.recovery.next_lost_frame(epoch) {
4156 match lost {
4157 frame::Frame::CryptoHeader { offset, length } => {
4158 crypto_ctx.crypto_stream.send.retransmit(offset, length);
4159
4160 self.stream_retrans_bytes += length as u64;
4161 p.stream_retrans_bytes += length as u64;
4162
4163 self.retrans_count += 1;
4164 p.retrans_count += 1;
4165 },
4166
4167 frame::Frame::StreamHeader {
4168 stream_id,
4169 offset,
4170 length,
4171 fin,
4172 } => {
4173 let stream = match self.streams.get_mut(stream_id) {
4174 // Only retransmit data if the stream is not closed
4175 // or stopped.
4176 Some(v) if !v.send.is_stopped() => v,
4177
4178 // Data on a closed stream will not be retransmitted
4179 // or acked after it is declared lost, so update
4180 // tx_buffered and qlog.
4181 _ => {
4182 self.tx_buffered =
4183 self.tx_buffered.saturating_sub(length);
4184
4185 qlog_with_type!(QLOG_DATA_MV, self.qlog, q, {
4186 let ev_data = EventData::QuicStreamDataMoved(
4187 qlog::events::quic::StreamDataMoved {
4188 stream_id: Some(stream_id),
4189 offset: Some(offset),
4190 raw: Some(RawInfo {
4191 length: Some(length as u64),
4192 ..Default::default()
4193 }),
4194 from: Some(DataRecipient::Transport),
4195 to: Some(DataRecipient::Dropped),
4196 ..Default::default()
4197 },
4198 );
4199
4200 q.add_event_data_with_instant(ev_data, now)
4201 .ok();
4202 });
4203
4204 continue;
4205 },
4206 };
4207
4208 let was_flushable = stream.is_flushable();
4209
4210 let empty_fin = length == 0 && fin;
4211
4212 stream.send.retransmit(offset, length);
4213
4214 // If the stream is now flushable push it to the
4215 // flushable queue, but only if it wasn't already
4216 // queued.
4217 //
4218 // Consider the stream flushable also when we are
4219 // sending a zero-length frame that has the fin flag
4220 // set.
4221 if (stream.is_flushable() || empty_fin) && !was_flushable
4222 {
4223 let priority_key = Arc::clone(&stream.priority_key);
4224 self.streams.insert_flushable(&priority_key);
4225 }
4226
4227 self.stream_retrans_bytes += length as u64;
4228 p.stream_retrans_bytes += length as u64;
4229
4230 self.retrans_count += 1;
4231 p.retrans_count += 1;
4232 },
4233
4234 frame::Frame::ACK { .. } => {
4235 pkt_space.ack_elicited = true;
4236 },
4237
4238 frame::Frame::ResetStream {
4239 stream_id,
4240 error_code,
4241 final_size,
4242 } => {
4243 self.streams
4244 .insert_reset(stream_id, error_code, final_size);
4245 },
4246
4247 frame::Frame::StopSending {
4248 stream_id,
4249 error_code,
4250 } =>
4251 // We only need to retransmit the STOP_SENDING frame if
4252 // the stream is still active and not FIN'd. Even if the
4253 // packet was lost, if the application has the final
4254 // size at this point there is no need to retransmit.
4255 if let Some(stream) = self.streams.get(stream_id) {
4256 if !stream.recv.is_fin() {
4257 self.streams
4258 .insert_stopped(stream_id, error_code);
4259 }
4260 },
4261
4262 // Retransmit HANDSHAKE_DONE only if it hasn't been acked at
4263 // least once already.
4264 frame::Frame::HandshakeDone if !self.handshake_done_acked => {
4265 self.handshake_done_sent = false;
4266 },
4267
4268 frame::Frame::MaxStreamData { stream_id, .. } => {
4269 if self.streams.get(stream_id).is_some() {
4270 self.streams.insert_almost_full(stream_id);
4271 }
4272 },
4273
4274 frame::Frame::MaxData { .. } => {
4275 self.should_send_max_data = true;
4276 },
4277
4278 frame::Frame::MaxStreamsUni { .. } => {
4279 self.should_send_max_streams_uni = true;
4280 },
4281
4282 frame::Frame::MaxStreamsBidi { .. } => {
4283 self.should_send_max_streams_bidi = true;
4284 },
4285
4286 // Retransmit STREAMS_BLOCKED frames if the frame with the
4287 // most recent limit is lost. These are informational
4288 // signals to the peer, reliably sending them
4289 // ensures the signal is used consistently and helps
4290 // debugging.
4291 frame::Frame::StreamsBlockedBidi { limit } => {
4292 self.streams_blocked_bidi_state
4293 .force_retransmit_sent_limit_eq(limit);
4294 },
4295
4296 frame::Frame::StreamsBlockedUni { limit } => {
4297 self.streams_blocked_uni_state
4298 .force_retransmit_sent_limit_eq(limit);
4299 },
4300
4301 frame::Frame::NewConnectionId { seq_num, .. } => {
4302 self.ids.mark_advertise_new_scid_seq(seq_num, true);
4303 },
4304
4305 frame::Frame::RetireConnectionId { seq_num } => {
4306 self.ids.mark_retire_dcid_seq(seq_num, true)?;
4307 },
4308
4309 frame::Frame::Ping {
4310 mtu_probe: Some(failed_probe),
4311 } =>
4312 if let Some(pmtud) = p.pmtud.as_mut() {
4313 trace!("pmtud probe dropped: {failed_probe}");
4314 pmtud.failed_probe(failed_probe);
4315 },
4316
4317 _ => (),
4318 }
4319 }
4320 }
4321 self.check_tx_buffered_invariant();
4322
4323 let is_app_limited = self.delivery_rate_check_if_app_limited();
4324 let n_paths = self.paths.len();
4325 let path = self.paths.get_mut(send_pid)?;
4326 let flow_control = &mut self.flow_control;
4327 let pkt_space = &mut self.pkt_num_spaces[epoch];
4328 let crypto_ctx = &mut self.crypto_ctx[epoch];
4329 let pkt_num_manager = &mut self.pkt_num_manager;
4330
4331 let mut left = if let Some(pmtud) = path.pmtud.as_mut() {
4332 // Limit output buffer size by estimated path MTU.
4333 cmp::min(pmtud.get_current_mtu(), b.cap())
4334 } else {
4335 b.cap()
4336 };
4337
4338 if pkt_num_manager.should_skip_pn(self.handshake_completed) {
4339 pkt_num_manager.set_skip_pn(Some(self.next_pkt_num));
4340 self.next_pkt_num += 1;
4341 };
4342 let pn = self.next_pkt_num;
4343
4344 let largest_acked_pkt =
4345 path.recovery.get_largest_acked_on_epoch(epoch).unwrap_or(0);
4346 let pn_len = packet::pkt_num_len(pn, largest_acked_pkt);
4347
4348 // The AEAD overhead at the current encryption level.
4349 let crypto_overhead = crypto_ctx.crypto_overhead().ok_or(Error::Done)?;
4350
4351 let dcid_seq = path.active_dcid_seq.ok_or(Error::OutOfIdentifiers)?;
4352
4353 let dcid =
4354 ConnectionId::from_ref(self.ids.get_dcid(dcid_seq)?.cid.as_ref());
4355
4356 let scid = if let Some(scid_seq) = path.active_scid_seq {
4357 ConnectionId::from_ref(self.ids.get_scid(scid_seq)?.cid.as_ref())
4358 } else if pkt_type == Type::Short {
4359 ConnectionId::default()
4360 } else {
4361 return Err(Error::InvalidState);
4362 };
4363
4364 let hdr = Header {
4365 ty: pkt_type,
4366
4367 version: self.version,
4368
4369 dcid,
4370 scid,
4371
4372 pkt_num: 0,
4373 pkt_num_len: pn_len,
4374
4375 // Only clone token for Initial packets, as other packets don't have
4376 // this field (Retry doesn't count, as it's not encoded as part of
4377 // this code path).
4378 token: if pkt_type == Type::Initial {
4379 self.token.clone()
4380 } else {
4381 None
4382 },
4383
4384 versions: None,
4385 key_phase: self.key_phase,
4386 };
4387
4388 hdr.to_bytes(&mut b)?;
4389
4390 let hdr_trace = if log::max_level() == log::LevelFilter::Trace {
4391 Some(format!("{hdr:?}"))
4392 } else {
4393 None
4394 };
4395
4396 let hdr_ty = hdr.ty;
4397
4398 #[cfg(feature = "qlog")]
4399 let qlog_pkt_hdr = self.qlog.streamer.as_ref().map(|_q| {
4400 qlog::events::quic::PacketHeader::with_type(
4401 hdr.ty.to_qlog(),
4402 Some(pn),
4403 Some(hdr.version),
4404 Some(&hdr.scid),
4405 Some(&hdr.dcid),
4406 )
4407 });
4408
4409 // Calculate the space required for the packet, including the header
4410 // the payload length, the packet number and the AEAD overhead.
4411 let mut overhead = b.off() + pn_len + crypto_overhead;
4412
4413 // We assume that the payload length, which is only present in long
4414 // header packets, can always be encoded with a 2-byte varint.
4415 if pkt_type != Type::Short {
4416 overhead += PAYLOAD_LENGTH_LEN;
4417 }
4418
4419 // Make sure we have enough space left for the packet overhead.
4420 match left.checked_sub(overhead) {
4421 Some(v) => left = v,
4422
4423 None => {
4424 // We can't send more because there isn't enough space available
4425 // in the output buffer.
4426 //
4427 // This usually happens when we try to send a new packet but
4428 // failed because cwnd is almost full. In such case app_limited
4429 // is set to false here to make cwnd grow when ACK is received.
4430 path.recovery.update_app_limited(false);
4431 return Err(Error::Done);
4432 },
4433 }
4434
4435 // Make sure there is enough space for the minimum payload length.
4436 if left < PAYLOAD_MIN_LEN {
4437 path.recovery.update_app_limited(false);
4438 return Err(Error::Done);
4439 }
4440
4441 let mut frames: SmallVec<[frame::Frame; 1]> = SmallVec::new();
4442
4443 let mut ack_eliciting = false;
4444 let mut in_flight = false;
4445 let mut is_pmtud_probe = false;
4446 let mut has_data = false;
4447
4448 // Whether or not we should explicitly elicit an ACK via PING frame if we
4449 // implicitly elicit one otherwise.
4450 let ack_elicit_required = path.recovery.should_elicit_ack(epoch);
4451
4452 let header_offset = b.off();
4453
4454 // Reserve space for payload length in advance. Since we don't yet know
4455 // what the final length will be, we reserve 2 bytes in all cases.
4456 //
4457 // Only long header packets have an explicit length field.
4458 if pkt_type != Type::Short {
4459 b.skip(PAYLOAD_LENGTH_LEN)?;
4460 }
4461
4462 packet::encode_pkt_num(pn, pn_len, &mut b)?;
4463
4464 let payload_offset = b.off();
4465
4466 let cwnd_available =
4467 path.recovery.cwnd_available().saturating_sub(overhead);
4468
4469 let left_before_packing_ack_frame = left;
4470
4471 // Create ACK frame.
4472 //
4473 // When we need to explicitly elicit an ACK via PING later, go ahead and
4474 // generate an ACK (if there's anything to ACK) since we're going to
4475 // send a packet with PING anyways, even if we haven't received anything
4476 // ACK eliciting.
4477 if pkt_space.recv_pkt_need_ack.len() > 0 &&
4478 (pkt_space.ack_elicited || ack_elicit_required) &&
4479 (!is_closing ||
4480 (pkt_type == Type::Handshake &&
4481 self.local_error
4482 .as_ref()
4483 .is_some_and(|le| le.is_app))) &&
4484 path.active()
4485 {
4486 #[cfg(not(feature = "fuzzing"))]
4487 let ack_delay = pkt_space.largest_rx_pkt_time.elapsed();
4488
4489 #[cfg(not(feature = "fuzzing"))]
4490 let ack_delay = ack_delay.as_micros() as u64 /
4491 2_u64
4492 .pow(self.local_transport_params.ack_delay_exponent as u32);
4493
4494 // pseudo-random reproducible ack delays when fuzzing
4495 #[cfg(feature = "fuzzing")]
4496 let ack_delay = rand::rand_u8() as u64 + 1;
4497
4498 let frame = frame::Frame::ACK {
4499 ack_delay,
4500 ranges: pkt_space.recv_pkt_need_ack.clone(),
4501 ecn_counts: None, // sending ECN is not supported at this time
4502 };
4503
4504 // When a PING frame needs to be sent, avoid sending the ACK if
4505 // there is not enough cwnd available for both (note that PING
4506 // frames are always 1 byte, so we just need to check that the
4507 // ACK's length is lower than cwnd).
4508 if pkt_space.ack_elicited || frame.wire_len() < cwnd_available {
4509 // ACK-only packets are not congestion controlled so ACKs must
4510 // be bundled considering the buffer capacity only, and not the
4511 // available cwnd.
4512 if push_frame_to_pkt!(b, frames, frame, left) {
4513 pkt_space.ack_elicited = false;
4514 }
4515 }
4516 }
4517
4518 // Limit output packet size by congestion window size.
4519 left = cmp::min(
4520 left,
4521 // Bytes consumed by ACK frames.
4522 cwnd_available.saturating_sub(left_before_packing_ack_frame - left),
4523 );
4524
4525 let mut challenge_data = None;
4526
4527 if pkt_type == Type::Short {
4528 // Create PMTUD probe.
4529 //
4530 // In order to send a PMTUD probe the current `left` value, which was
4531 // already limited by the current PMTU measure, needs to be ignored,
4532 // but the outgoing packet still needs to be limited by
4533 // the output buffer size, as well as the congestion
4534 // window.
4535 //
4536 // In addition, the PMTUD probe is only generated when the handshake
4537 // is confirmed, to avoid interfering with the handshake
4538 // (e.g. due to the anti-amplification limits).
4539 if let Ok(active_path) = self.paths.get_active_mut() {
4540 let should_probe_pmtu = active_path.should_send_pmtu_probe(
4541 self.handshake_confirmed,
4542 self.handshake_completed,
4543 out_len,
4544 is_closing,
4545 frames.is_empty(),
4546 );
4547
4548 if should_probe_pmtu {
4549 if let Some(pmtud) = active_path.pmtud.as_mut() {
4550 let probe_size = pmtud.get_probe_size();
4551 trace!(
4552 "{} sending pmtud probe pmtu_probe={} estimated_pmtu={}",
4553 self.trace_id,
4554 probe_size,
4555 pmtud.get_current_mtu(),
4556 );
4557
4558 left = probe_size;
4559
4560 match left.checked_sub(overhead) {
4561 Some(v) => left = v,
4562
4563 None => {
4564 // We can't send more because there isn't enough
4565 // space available in the output buffer.
4566 //
4567 // This usually happens when we try to send a new
4568 // packet but failed because cwnd is almost full.
4569 //
4570 // In such case app_limited is set to false here
4571 // to make cwnd grow when ACK is received.
4572 active_path.recovery.update_app_limited(false);
4573 return Err(Error::Done);
4574 },
4575 }
4576
4577 let frame = frame::Frame::Padding {
4578 len: probe_size - overhead - 1,
4579 };
4580
4581 if push_frame_to_pkt!(b, frames, frame, left) {
4582 let frame = frame::Frame::Ping {
4583 mtu_probe: Some(probe_size),
4584 };
4585
4586 if push_frame_to_pkt!(b, frames, frame, left) {
4587 ack_eliciting = true;
4588 in_flight = true;
4589 }
4590 }
4591
4592 // Reset probe flag after sending to prevent duplicate
4593 // probes in a single flight.
4594 pmtud.set_in_flight(true);
4595 is_pmtud_probe = true;
4596 }
4597 }
4598 }
4599
4600 let path = self.paths.get_mut(send_pid)?;
4601 // Create PATH_RESPONSE frame if needed.
4602 // We do not try to ensure that these are really sent.
4603 while let Some(challenge) = path.pop_received_challenge() {
4604 let frame = frame::Frame::PathResponse { data: challenge };
4605
4606 if push_frame_to_pkt!(b, frames, frame, left) {
4607 ack_eliciting = true;
4608 in_flight = true;
4609 } else {
4610 // If there are other pending PATH_RESPONSE, don't lose them
4611 // now.
4612 break;
4613 }
4614 }
4615
4616 // Create PATH_CHALLENGE frame if needed.
4617 if path.validation_requested() {
4618 // TODO: ensure that data is unique over paths.
4619 let data = rand::rand_u64().to_be_bytes();
4620
4621 let frame = frame::Frame::PathChallenge { data };
4622
4623 if push_frame_to_pkt!(b, frames, frame, left) {
4624 // Let's notify the path once we know the packet size.
4625 challenge_data = Some(data);
4626
4627 ack_eliciting = true;
4628 in_flight = true;
4629 }
4630 }
4631
4632 if let Some(key_update) = crypto_ctx.key_update.as_mut() {
4633 key_update.update_acked = true;
4634 }
4635 }
4636
4637 let path = self.paths.get_mut(send_pid)?;
4638
4639 if pkt_type == Type::Short && !is_closing {
4640 // Create NEW_CONNECTION_ID frames as needed.
4641 while let Some(seq_num) = self.ids.next_advertise_new_scid_seq() {
4642 let frame = self.ids.get_new_connection_id_frame_for(seq_num)?;
4643
4644 if push_frame_to_pkt!(b, frames, frame, left) {
4645 self.ids.mark_advertise_new_scid_seq(seq_num, false);
4646
4647 ack_eliciting = true;
4648 in_flight = true;
4649 } else {
4650 break;
4651 }
4652 }
4653 }
4654
4655 if pkt_type == Type::Short && !is_closing && path.active() {
4656 // Create HANDSHAKE_DONE frame.
4657 // self.should_send_handshake_done() but without the need to borrow
4658 if self.handshake_completed &&
4659 !self.handshake_done_sent &&
4660 self.is_server
4661 {
4662 let frame = frame::Frame::HandshakeDone;
4663
4664 if push_frame_to_pkt!(b, frames, frame, left) {
4665 self.handshake_done_sent = true;
4666
4667 ack_eliciting = true;
4668 in_flight = true;
4669 }
4670 }
4671
4672 // Create MAX_STREAMS_BIDI frame.
4673 if self.streams.should_update_max_streams_bidi() ||
4674 self.should_send_max_streams_bidi
4675 {
4676 let frame = frame::Frame::MaxStreamsBidi {
4677 max: self.streams.max_streams_bidi_next(),
4678 };
4679
4680 if push_frame_to_pkt!(b, frames, frame, left) {
4681 self.streams.update_max_streams_bidi();
4682 self.should_send_max_streams_bidi = false;
4683
4684 ack_eliciting = true;
4685 in_flight = true;
4686 }
4687 }
4688
4689 // Create MAX_STREAMS_UNI frame.
4690 if self.streams.should_update_max_streams_uni() ||
4691 self.should_send_max_streams_uni
4692 {
4693 let frame = frame::Frame::MaxStreamsUni {
4694 max: self.streams.max_streams_uni_next(),
4695 };
4696
4697 if push_frame_to_pkt!(b, frames, frame, left) {
4698 self.streams.update_max_streams_uni();
4699 self.should_send_max_streams_uni = false;
4700
4701 ack_eliciting = true;
4702 in_flight = true;
4703 }
4704 }
4705
4706 // Create DATA_BLOCKED frame.
4707 if let Some(limit) = self.blocked_limit {
4708 let frame = frame::Frame::DataBlocked { limit };
4709
4710 if push_frame_to_pkt!(b, frames, frame, left) {
4711 self.blocked_limit = None;
4712 self.data_blocked_sent_count =
4713 self.data_blocked_sent_count.saturating_add(1);
4714
4715 ack_eliciting = true;
4716 in_flight = true;
4717 }
4718 }
4719
4720 // Create STREAMS_BLOCKED (bidi) frame when the local endpoint has
4721 // exhausted the peer's bidirectional stream count limit.
4722 if self
4723 .streams_blocked_bidi_state
4724 .has_pending_stream_blocked_frame()
4725 {
4726 if let Some(limit) = self.streams_blocked_bidi_state.blocked_at {
4727 let frame = frame::Frame::StreamsBlockedBidi { limit };
4728
4729 if push_frame_to_pkt!(b, frames, frame, left) {
4730 // Record the limit we just notified the peer about so
4731 // that redundant frames for the same limit are
4732 // suppressed.
4733 self.streams_blocked_bidi_state.blocked_sent =
4734 Some(limit);
4735
4736 ack_eliciting = true;
4737 in_flight = true;
4738 }
4739 }
4740 }
4741
4742 // Create STREAMS_BLOCKED (uni) frame when the local endpoint has
4743 // exhausted the peer's unidirectional stream count limit.
4744 if self
4745 .streams_blocked_uni_state
4746 .has_pending_stream_blocked_frame()
4747 {
4748 if let Some(limit) = self.streams_blocked_uni_state.blocked_at {
4749 let frame = frame::Frame::StreamsBlockedUni { limit };
4750
4751 if push_frame_to_pkt!(b, frames, frame, left) {
4752 // Record the limit we just notified the peer about so
4753 // that redundant frames for the same limit are
4754 // suppressed.
4755 self.streams_blocked_uni_state.blocked_sent = Some(limit);
4756
4757 ack_eliciting = true;
4758 in_flight = true;
4759 }
4760 }
4761 }
4762
4763 // Create MAX_STREAM_DATA frames as needed.
4764 for stream_id in self.streams.almost_full() {
4765 let stream = match self.streams.get_mut(stream_id) {
4766 Some(v) => v,
4767
4768 None => {
4769 // The stream doesn't exist anymore, so remove it from
4770 // the almost full set.
4771 self.streams.remove_almost_full(stream_id);
4772 continue;
4773 },
4774 };
4775
4776 // Autotune the stream window size, but only if this is not a
4777 // retransmission (on a retransmit the stream will be in
4778 // `self.streams.almost_full()` but it's `almost_full()`
4779 // method returns false.
4780 if stream.recv.almost_full() {
4781 stream.recv.autotune_window(now, path.recovery.rtt());
4782 }
4783
4784 let frame = frame::Frame::MaxStreamData {
4785 stream_id,
4786 max: stream.recv.max_data_next(),
4787 };
4788
4789 if push_frame_to_pkt!(b, frames, frame, left) {
4790 let recv_win = stream.recv.window();
4791
4792 stream.recv.update_max_data(now);
4793
4794 self.streams.remove_almost_full(stream_id);
4795
4796 ack_eliciting = true;
4797 in_flight = true;
4798
4799 // Make sure the connection window always has some
4800 // room compared to the stream window.
4801 flow_control.ensure_window_lower_bound(
4802 (recv_win as f64 * CONNECTION_WINDOW_FACTOR) as u64,
4803 );
4804 }
4805 }
4806
4807 // Create MAX_DATA frame as needed.
4808 if flow_control.should_update_max_data() &&
4809 flow_control.max_data() < flow_control.max_data_next()
4810 {
4811 // Autotune the connection window size. We only tune the window
4812 // if we are sending an "organic" update, not on retransmits.
4813 flow_control.autotune_window(now, path.recovery.rtt());
4814 self.should_send_max_data = true;
4815 }
4816
4817 if self.should_send_max_data {
4818 let frame = frame::Frame::MaxData {
4819 max: flow_control.max_data_next(),
4820 };
4821
4822 if push_frame_to_pkt!(b, frames, frame, left) {
4823 self.should_send_max_data = false;
4824
4825 // Commits the new max_rx_data limit.
4826 flow_control.update_max_data(now);
4827
4828 ack_eliciting = true;
4829 in_flight = true;
4830 }
4831 }
4832
4833 // Create STOP_SENDING frames as needed.
4834 for (stream_id, error_code) in self
4835 .streams
4836 .stopped()
4837 .map(|(&k, &v)| (k, v))
4838 .collect::<Vec<(u64, u64)>>()
4839 {
4840 let frame = frame::Frame::StopSending {
4841 stream_id,
4842 error_code,
4843 };
4844
4845 if push_frame_to_pkt!(b, frames, frame, left) {
4846 self.streams.remove_stopped(stream_id);
4847
4848 ack_eliciting = true;
4849 in_flight = true;
4850 }
4851 }
4852
4853 // Create RESET_STREAM frames as needed.
4854 for (stream_id, (error_code, final_size)) in self
4855 .streams
4856 .reset()
4857 .map(|(&k, &v)| (k, v))
4858 .collect::<Vec<(u64, (u64, u64))>>()
4859 {
4860 let frame = frame::Frame::ResetStream {
4861 stream_id,
4862 error_code,
4863 final_size,
4864 };
4865
4866 if push_frame_to_pkt!(b, frames, frame, left) {
4867 self.streams.remove_reset(stream_id);
4868
4869 ack_eliciting = true;
4870 in_flight = true;
4871 }
4872 }
4873
4874 // Create STREAM_DATA_BLOCKED frames as needed.
4875 for (stream_id, limit) in self
4876 .streams
4877 .blocked()
4878 .map(|(&k, &v)| (k, v))
4879 .collect::<Vec<(u64, u64)>>()
4880 {
4881 let frame = frame::Frame::StreamDataBlocked { stream_id, limit };
4882
4883 if push_frame_to_pkt!(b, frames, frame, left) {
4884 self.streams.remove_blocked(stream_id);
4885 self.stream_data_blocked_sent_count =
4886 self.stream_data_blocked_sent_count.saturating_add(1);
4887
4888 ack_eliciting = true;
4889 in_flight = true;
4890 }
4891 }
4892
4893 // Create RETIRE_CONNECTION_ID frames as needed.
4894 let retire_dcid_seqs = self.ids.retire_dcid_seqs();
4895
4896 for seq_num in retire_dcid_seqs {
4897 // The sequence number specified in a RETIRE_CONNECTION_ID frame
4898 // MUST NOT refer to the Destination Connection ID field of the
4899 // packet in which the frame is contained.
4900 let dcid_seq = path.active_dcid_seq.ok_or(Error::InvalidState)?;
4901
4902 if seq_num == dcid_seq {
4903 continue;
4904 }
4905
4906 let frame = frame::Frame::RetireConnectionId { seq_num };
4907
4908 if push_frame_to_pkt!(b, frames, frame, left) {
4909 self.ids.mark_retire_dcid_seq(seq_num, false)?;
4910
4911 ack_eliciting = true;
4912 in_flight = true;
4913 } else {
4914 break;
4915 }
4916 }
4917 }
4918
4919 // Create CONNECTION_CLOSE frame. Try to send this only on the active
4920 // path, unless it is the last one available.
4921 if path.active() || n_paths == 1 {
4922 if let Some(conn_err) = self.local_error.as_ref() {
4923 if conn_err.is_app {
4924 // Create ApplicationClose frame.
4925 if pkt_type == Type::Short {
4926 let frame = frame::Frame::ApplicationClose {
4927 error_code: conn_err.error_code,
4928 reason: conn_err.reason.clone(),
4929 };
4930
4931 if push_frame_to_pkt!(b, frames, frame, left) {
4932 let pto = path.recovery.pto();
4933 self.draining_timer = Some(now + (pto * 3));
4934
4935 ack_eliciting = true;
4936 in_flight = true;
4937 }
4938 }
4939 } else {
4940 // Create ConnectionClose frame.
4941 let frame = frame::Frame::ConnectionClose {
4942 error_code: conn_err.error_code,
4943 frame_type: 0,
4944 reason: conn_err.reason.clone(),
4945 };
4946
4947 if push_frame_to_pkt!(b, frames, frame, left) {
4948 let pto = path.recovery.pto();
4949 self.draining_timer = Some(now + (pto * 3));
4950
4951 ack_eliciting = true;
4952 in_flight = true;
4953 }
4954 }
4955 }
4956 }
4957
4958 // Create CRYPTO frame.
4959 if crypto_ctx.crypto_stream.is_flushable() &&
4960 left > frame::MAX_CRYPTO_OVERHEAD &&
4961 !is_closing &&
4962 path.active()
4963 {
4964 let crypto_off = crypto_ctx.crypto_stream.send.off_front();
4965
4966 // Encode the frame.
4967 //
4968 // Instead of creating a `frame::Frame` object, encode the frame
4969 // directly into the packet buffer.
4970 //
4971 // First we reserve some space in the output buffer for writing the
4972 // frame header (we assume the length field is always a 2-byte
4973 // varint as we don't know the value yet).
4974 //
4975 // Then we emit the data from the crypto stream's send buffer.
4976 //
4977 // Finally we go back and encode the frame header with the now
4978 // available information.
4979 let hdr_off = b.off();
4980 let hdr_len = 1 + // frame type
4981 octets::varint_len(crypto_off) + // offset
4982 2; // length, always encode as 2-byte varint
4983
4984 if let Some(max_len) = left.checked_sub(hdr_len) {
4985 let (mut crypto_hdr, mut crypto_payload) =
4986 b.split_at(hdr_off + hdr_len)?;
4987
4988 // Write stream data into the packet buffer.
4989 let (len, _) = crypto_ctx
4990 .crypto_stream
4991 .send
4992 .emit(&mut crypto_payload.as_mut()[..max_len])?;
4993
4994 // Encode the frame's header.
4995 //
4996 // Due to how `OctetsMut::split_at()` works, `crypto_hdr` starts
4997 // from the initial offset of `b` (rather than the current
4998 // offset), so it needs to be advanced to the
4999 // initial frame offset.
5000 crypto_hdr.skip(hdr_off)?;
5001
5002 frame::encode_crypto_header(
5003 crypto_off,
5004 len as u64,
5005 &mut crypto_hdr,
5006 )?;
5007
5008 // Advance the packet buffer's offset.
5009 b.skip(hdr_len + len)?;
5010
5011 let frame = frame::Frame::CryptoHeader {
5012 offset: crypto_off,
5013 length: len,
5014 };
5015
5016 if push_frame_to_pkt!(b, frames, frame, left) {
5017 ack_eliciting = true;
5018 in_flight = true;
5019 has_data = true;
5020 }
5021 }
5022 }
5023
5024 // The preference of data-bearing frame to include in a packet
5025 // is managed by `self.emit_dgram`. However, whether any frames
5026 // can be sent depends on the state of their buffers. In the case
5027 // where one type is preferred but its buffer is empty, fall back
5028 // to the other type in order not to waste this function call.
5029 let mut dgram_emitted = false;
5030 let dgrams_to_emit = max_dgram_len.is_some();
5031 let stream_to_emit = self.streams.has_flushable();
5032
5033 let mut do_dgram = self.emit_dgram && dgrams_to_emit;
5034 let do_stream = !self.emit_dgram && stream_to_emit;
5035
5036 if !do_stream && dgrams_to_emit {
5037 do_dgram = true;
5038 }
5039
5040 // Create DATAGRAM frame.
5041 if (pkt_type == Type::Short || pkt_type == Type::ZeroRTT) &&
5042 left > frame::MAX_DGRAM_OVERHEAD &&
5043 !is_closing &&
5044 path.active() &&
5045 do_dgram
5046 {
5047 if let Some(max_dgram_payload) = max_dgram_len {
5048 while let Some(len) = self.dgram_send_queue.peek_front_len() {
5049 let hdr_off = b.off();
5050 let hdr_len = 1 + // frame type
5051 2; // length, always encode as 2-byte varint
5052
5053 if (hdr_len + len) <= left {
5054 // Front of the queue fits this packet, send it.
5055 match self.dgram_send_queue.pop() {
5056 Some(data) => {
5057 // Encode the frame.
5058 //
5059 // Instead of creating a `frame::Frame` object,
5060 // encode the frame directly into the packet
5061 // buffer.
5062 //
5063 // First we reserve some space in the output
5064 // buffer for writing the frame header (we
5065 // assume the length field is always a 2-byte
5066 // varint as we don't know the value yet).
5067 //
5068 // Then we emit the data from the DATAGRAM's
5069 // buffer.
5070 //
5071 // Finally we go back and encode the frame
5072 // header with the now available information.
5073 let (mut dgram_hdr, mut dgram_payload) =
5074 b.split_at(hdr_off + hdr_len)?;
5075
5076 dgram_payload.as_mut()[..len]
5077 .copy_from_slice(data.as_ref());
5078
5079 // Encode the frame's header.
5080 //
5081 // Due to how `OctetsMut::split_at()` works,
5082 // `dgram_hdr` starts from the initial offset
5083 // of `b` (rather than the current offset), so
5084 // it needs to be advanced to the initial frame
5085 // offset.
5086 dgram_hdr.skip(hdr_off)?;
5087
5088 frame::encode_dgram_header(
5089 len as u64,
5090 &mut dgram_hdr,
5091 )?;
5092
5093 // Advance the packet buffer's offset.
5094 b.skip(hdr_len + len)?;
5095
5096 let frame =
5097 frame::Frame::DatagramHeader { length: len };
5098
5099 if push_frame_to_pkt!(b, frames, frame, left) {
5100 ack_eliciting = true;
5101 in_flight = true;
5102 dgram_emitted = true;
5103 self.dgram_sent_count =
5104 self.dgram_sent_count.saturating_add(1);
5105 path.dgram_sent_count =
5106 path.dgram_sent_count.saturating_add(1);
5107 }
5108 },
5109
5110 None => continue,
5111 };
5112 } else if len > max_dgram_payload {
5113 // This dgram frame will never fit. Let's purge it.
5114 self.dgram_send_queue.pop();
5115 } else {
5116 break;
5117 }
5118 }
5119 }
5120 }
5121
5122 // Create a single STREAM frame for the first stream that is flushable.
5123 if (pkt_type == Type::Short || pkt_type == Type::ZeroRTT) &&
5124 left > frame::MAX_STREAM_OVERHEAD &&
5125 !is_closing &&
5126 path.active() &&
5127 !dgram_emitted
5128 {
5129 while let Some(priority_key) = self.streams.peek_flushable() {
5130 let stream_id = priority_key.id;
5131 let stream = match self.streams.get_mut(stream_id) {
5132 // Avoid sending frames for streams that were already stopped.
5133 //
5134 // This might happen if stream data was buffered but not yet
5135 // flushed on the wire when a STOP_SENDING frame is received.
5136 Some(v) if !v.send.is_stopped() => v,
5137 _ => {
5138 self.streams.remove_flushable(&priority_key);
5139 continue;
5140 },
5141 };
5142
5143 let stream_off = stream.send.off_front();
5144
5145 // Encode the frame.
5146 //
5147 // Instead of creating a `frame::Frame` object, encode the frame
5148 // directly into the packet buffer.
5149 //
5150 // First we reserve some space in the output buffer for writing
5151 // the frame header (we assume the length field is always a
5152 // 2-byte varint as we don't know the value yet).
5153 //
5154 // Then we emit the data from the stream's send buffer.
5155 //
5156 // Finally we go back and encode the frame header with the now
5157 // available information.
5158 let hdr_off = b.off();
5159 let hdr_len = 1 + // frame type
5160 octets::varint_len(stream_id) + // stream_id
5161 octets::varint_len(stream_off) + // offset
5162 2; // length, always encode as 2-byte varint
5163
5164 let max_len = match left.checked_sub(hdr_len) {
5165 Some(v) => v,
5166 None => {
5167 let priority_key = Arc::clone(&stream.priority_key);
5168 self.streams.remove_flushable(&priority_key);
5169
5170 continue;
5171 },
5172 };
5173
5174 let (mut stream_hdr, mut stream_payload) =
5175 b.split_at(hdr_off + hdr_len)?;
5176
5177 // Write stream data into the packet buffer.
5178 let (len, fin) =
5179 stream.send.emit(&mut stream_payload.as_mut()[..max_len])?;
5180
5181 // Encode the frame's header.
5182 //
5183 // Due to how `OctetsMut::split_at()` works, `stream_hdr` starts
5184 // from the initial offset of `b` (rather than the current
5185 // offset), so it needs to be advanced to the initial frame
5186 // offset.
5187 stream_hdr.skip(hdr_off)?;
5188
5189 frame::encode_stream_header(
5190 stream_id,
5191 stream_off,
5192 len as u64,
5193 fin,
5194 &mut stream_hdr,
5195 )?;
5196
5197 // Advance the packet buffer's offset.
5198 b.skip(hdr_len + len)?;
5199
5200 let frame = frame::Frame::StreamHeader {
5201 stream_id,
5202 offset: stream_off,
5203 length: len,
5204 fin,
5205 };
5206
5207 if push_frame_to_pkt!(b, frames, frame, left) {
5208 ack_eliciting = true;
5209 in_flight = true;
5210 has_data = true;
5211 }
5212
5213 let priority_key = Arc::clone(&stream.priority_key);
5214 // If the stream is no longer flushable, remove it from the queue
5215 if !stream.is_flushable() {
5216 self.streams.remove_flushable(&priority_key);
5217 } else if stream.incremental {
5218 // Shuffle the incremental stream to the back of the
5219 // queue.
5220 self.streams.remove_flushable(&priority_key);
5221 self.streams.insert_flushable(&priority_key);
5222 }
5223
5224 #[cfg(feature = "fuzzing")]
5225 // Coalesce STREAM frames when fuzzing.
5226 if left > frame::MAX_STREAM_OVERHEAD {
5227 continue;
5228 }
5229
5230 break;
5231 }
5232 }
5233
5234 // Alternate trying to send DATAGRAMs next time.
5235 self.emit_dgram = !dgram_emitted;
5236
5237 // If no other ack-eliciting frame is sent, include a PING frame
5238 // - if PTO probe needed; OR
5239 // - if we've sent too many non ack-eliciting packets without having
5240 // sent an ACK eliciting one; OR
5241 // - the application requested an ack-eliciting frame be sent.
5242 if (ack_elicit_required || path.needs_ack_eliciting) &&
5243 !ack_eliciting &&
5244 left >= 1 &&
5245 !is_closing
5246 {
5247 let frame = frame::Frame::Ping { mtu_probe: None };
5248
5249 if push_frame_to_pkt!(b, frames, frame, left) {
5250 ack_eliciting = true;
5251 in_flight = true;
5252 }
5253 }
5254
5255 if ack_eliciting && !is_pmtud_probe {
5256 path.needs_ack_eliciting = false;
5257 path.recovery.ping_sent(epoch);
5258 }
5259
5260 if !has_data &&
5261 !dgram_emitted &&
5262 cwnd_available > frame::MAX_STREAM_OVERHEAD
5263 {
5264 path.recovery.on_app_limited();
5265 }
5266
5267 if frames.is_empty() {
5268 // When we reach this point we are not able to write more, so set
5269 // app_limited to false.
5270 path.recovery.update_app_limited(false);
5271 return Err(Error::Done);
5272 }
5273
5274 // When coalescing a 1-RTT packet, we can't add padding in the UDP
5275 // datagram, so use PADDING frames instead.
5276 //
5277 // This is only needed if
5278 // 1) an Initial packet has already been written to the UDP datagram,
5279 // as Initial always requires padding.
5280 //
5281 // 2) this is a probing packet towards an unvalidated peer address.
5282 if (has_initial || !path.validated()) &&
5283 pkt_type == Type::Short &&
5284 left >= 1
5285 {
5286 let frame = frame::Frame::Padding { len: left };
5287
5288 if push_frame_to_pkt!(b, frames, frame, left) {
5289 in_flight = true;
5290 }
5291 }
5292
5293 // Pad payload so that it's always at least 4 bytes.
5294 if b.off() - payload_offset < PAYLOAD_MIN_LEN {
5295 let payload_len = b.off() - payload_offset;
5296
5297 let frame = frame::Frame::Padding {
5298 len: PAYLOAD_MIN_LEN - payload_len,
5299 };
5300
5301 #[allow(unused_assignments)]
5302 if push_frame_to_pkt!(b, frames, frame, left) {
5303 in_flight = true;
5304 }
5305 }
5306
5307 let payload_len = b.off() - payload_offset;
5308
5309 // Fill in payload length.
5310 if pkt_type != Type::Short {
5311 let len = pn_len + payload_len + crypto_overhead;
5312
5313 let (_, mut payload_with_len) = b.split_at(header_offset)?;
5314 payload_with_len
5315 .put_varint_with_len(len as u64, PAYLOAD_LENGTH_LEN)?;
5316 }
5317
5318 trace!(
5319 "{} tx pkt {} len={} pn={} {}",
5320 self.trace_id,
5321 hdr_trace.unwrap_or_default(),
5322 payload_len,
5323 pn,
5324 AddrTupleFmt(path.local_addr(), path.peer_addr())
5325 );
5326
5327 #[cfg(feature = "qlog")]
5328 let mut qlog_frames: Vec<qlog::events::quic::QuicFrame> =
5329 Vec::with_capacity(frames.len());
5330
5331 for frame in &mut frames {
5332 trace!("{} tx frm {:?}", self.trace_id, frame);
5333
5334 qlog_with_type!(QLOG_PACKET_TX, self.qlog, _q, {
5335 qlog_frames.push(frame.to_qlog());
5336 });
5337 }
5338
5339 qlog_with_type!(QLOG_PACKET_TX, self.qlog, q, {
5340 if let Some(header) = qlog_pkt_hdr {
5341 // Qlog packet raw info described at
5342 // https://datatracker.ietf.org/doc/html/draft-ietf-quic-qlog-main-schema-00#section-5.1
5343 //
5344 // `length` includes packet headers and trailers (AEAD tag).
5345 let length = payload_len + payload_offset + crypto_overhead;
5346 let qlog_raw_info = RawInfo {
5347 length: Some(length as u64),
5348 payload_length: Some(payload_len as u64),
5349 data: None,
5350 };
5351
5352 let send_at_time =
5353 now.duration_since(q.start_time()).as_secs_f64() * 1000.0;
5354
5355 let ev_data =
5356 EventData::QuicPacketSent(qlog::events::quic::PacketSent {
5357 header,
5358 frames: Some(qlog_frames),
5359 raw: Some(qlog_raw_info),
5360 send_at_time: Some(send_at_time),
5361 ..Default::default()
5362 });
5363
5364 q.add_event_data_with_instant(ev_data, now).ok();
5365 }
5366 });
5367
5368 let aead = match crypto_ctx.crypto_seal {
5369 Some(ref mut v) => v,
5370 None => return Err(Error::InvalidState),
5371 };
5372
5373 let written = packet::encrypt_pkt(
5374 &mut b,
5375 pn,
5376 pn_len,
5377 payload_len,
5378 payload_offset,
5379 None,
5380 aead,
5381 )?;
5382
5383 let sent_pkt_has_data = if path.recovery.gcongestion_enabled() {
5384 has_data || dgram_emitted
5385 } else {
5386 has_data
5387 };
5388
5389 let sent_pkt = recovery::Sent {
5390 pkt_num: pn,
5391 frames,
5392 time_sent: now,
5393 time_acked: None,
5394 time_lost: None,
5395 size: if ack_eliciting { written } else { 0 },
5396 ack_eliciting,
5397 in_flight,
5398 delivered: 0,
5399 delivered_time: now,
5400 first_sent_time: now,
5401 is_app_limited: false,
5402 tx_in_flight: 0,
5403 lost: 0,
5404 has_data: sent_pkt_has_data,
5405 is_pmtud_probe,
5406 };
5407
5408 if in_flight && is_app_limited {
5409 path.recovery.delivery_rate_update_app_limited(true);
5410 }
5411
5412 self.next_pkt_num += 1;
5413
5414 let handshake_status = recovery::HandshakeStatus {
5415 has_handshake_keys: self.crypto_ctx[packet::Epoch::Handshake]
5416 .has_keys(),
5417 peer_verified_address: self.peer_verified_initial_address,
5418 completed: self.handshake_completed,
5419 };
5420
5421 self.on_packet_sent(send_pid, sent_pkt, epoch, handshake_status, now)?;
5422
5423 let path = self.paths.get_mut(send_pid)?;
5424 qlog_with_type!(QLOG_METRICS, self.qlog, q, {
5425 path.recovery.maybe_qlog(q, now);
5426 });
5427
5428 // Record sent packet size if we probe the path.
5429 if let Some(data) = challenge_data {
5430 path.add_challenge_sent(data, written, now);
5431 }
5432
5433 self.sent_count += 1;
5434 self.sent_bytes += written as u64;
5435 path.sent_count += 1;
5436 path.sent_bytes += written as u64;
5437
5438 if self.dgram_send_queue.byte_size() > path.recovery.cwnd_available() {
5439 path.recovery.update_app_limited(false);
5440 }
5441
5442 path.max_send_bytes = path.max_send_bytes.saturating_sub(written);
5443
5444 // On the client, drop initial state after sending an Handshake packet.
5445 if !self.is_server && hdr_ty == Type::Handshake {
5446 self.drop_epoch_state(packet::Epoch::Initial, now);
5447 }
5448
5449 // (Re)start the idle timer if we are sending the first ack-eliciting
5450 // packet since last receiving a packet.
5451 if ack_eliciting && !self.ack_eliciting_sent {
5452 if let Some(idle_timeout) = self.idle_timeout() {
5453 self.idle_timer = Some(now + idle_timeout);
5454 }
5455 }
5456
5457 if ack_eliciting {
5458 self.ack_eliciting_sent = true;
5459 }
5460
5461 Ok((pkt_type, written))
5462 }
5463
5464 fn on_packet_sent(
5465 &mut self, send_pid: usize, sent_pkt: recovery::Sent,
5466 epoch: packet::Epoch, handshake_status: recovery::HandshakeStatus,
5467 now: Instant,
5468 ) -> Result<()> {
5469 let path = self.paths.get_mut(send_pid)?;
5470
5471 // It's fine to set the skip counter based on a non-active path's values.
5472 let cwnd = path.recovery.cwnd();
5473 let max_datagram_size = path.recovery.max_datagram_size();
5474 self.pkt_num_spaces[epoch].on_packet_sent(&sent_pkt);
5475 self.pkt_num_manager.on_packet_sent(
5476 cwnd,
5477 max_datagram_size,
5478 self.handshake_completed,
5479 );
5480
5481 path.recovery.on_packet_sent(
5482 sent_pkt,
5483 epoch,
5484 handshake_status,
5485 now,
5486 &self.trace_id,
5487 );
5488
5489 Ok(())
5490 }
5491
5492 /// Returns the desired send time for the next packet.
5493 #[inline]
5494 pub fn get_next_release_time(&self) -> Option<ReleaseDecision> {
5495 Some(
5496 self.paths
5497 .get_active()
5498 .ok()?
5499 .recovery
5500 .get_next_release_time(),
5501 )
5502 }
5503
5504 /// Returns whether gcongestion is enabled.
5505 #[inline]
5506 pub fn gcongestion_enabled(&self) -> Option<bool> {
5507 Some(self.paths.get_active().ok()?.recovery.gcongestion_enabled())
5508 }
5509
5510 /// Returns the maximum pacing into the future.
5511 ///
5512 /// Equals 1/8 of the smoothed RTT, but at least 1ms and not greater than
5513 /// 5ms.
5514 pub fn max_release_into_future(&self) -> Duration {
5515 self.paths
5516 .get_active()
5517 .map(|p| p.recovery.rtt().mul_f64(0.125))
5518 .unwrap_or(Duration::from_millis(1))
5519 .max(Duration::from_millis(1))
5520 .min(Duration::from_millis(5))
5521 }
5522
5523 /// Returns whether pacing is enabled.
5524 #[inline]
5525 pub fn pacing_enabled(&self) -> bool {
5526 self.recovery_config.pacing
5527 }
5528
5529 /// Returns the size of the send quantum, in bytes.
5530 ///
5531 /// This represents the maximum size of a packet burst as determined by the
5532 /// congestion control algorithm in use.
5533 ///
5534 /// Applications can, for example, use it in conjunction with segmentation
5535 /// offloading mechanisms as the maximum limit for outgoing aggregates of
5536 /// multiple packets.
5537 #[inline]
5538 pub fn send_quantum(&self) -> usize {
5539 match self.paths.get_active() {
5540 Ok(p) => p.recovery.send_quantum(),
5541 _ => 0,
5542 }
5543 }
5544
5545 /// Returns the size of the send quantum over the given 4-tuple, in bytes.
5546 ///
5547 /// This represents the maximum size of a packet burst as determined by the
5548 /// congestion control algorithm in use.
5549 ///
5550 /// Applications can, for example, use it in conjunction with segmentation
5551 /// offloading mechanisms as the maximum limit for outgoing aggregates of
5552 /// multiple packets.
5553 ///
5554 /// If the (`local_addr`, peer_addr`) 4-tuple relates to a non-existing
5555 /// path, this method returns 0.
5556 pub fn send_quantum_on_path(
5557 &self, local_addr: SocketAddr, peer_addr: SocketAddr,
5558 ) -> usize {
5559 self.paths
5560 .path_id_from_addrs(&(local_addr, peer_addr))
5561 .and_then(|pid| self.paths.get(pid).ok())
5562 .map(|path| path.recovery.send_quantum())
5563 .unwrap_or(0)
5564 }
5565
5566 /// Reads contiguous data from a stream into the provided slice.
5567 ///
5568 /// The slice must be sized by the caller and will be populated up to its
5569 /// capacity.
5570 ///
5571 /// On success the amount of bytes read and a flag indicating the fin state
5572 /// is returned as a tuple, or [`Done`] if there is no data to read.
5573 ///
5574 /// Reading data from a stream may trigger queueing of control messages
5575 /// (e.g. MAX_STREAM_DATA). [`send()`] should be called afterwards.
5576 ///
5577 /// [`Done`]: enum.Error.html#variant.Done
5578 /// [`send()`]: struct.Connection.html#method.send
5579 ///
5580 /// ## Examples:
5581 ///
5582 /// ```no_run
5583 /// # let mut buf = [0; 512];
5584 /// # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
5585 /// # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
5586 /// # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
5587 /// # let peer = "127.0.0.1:1234".parse().unwrap();
5588 /// # let local = socket.local_addr().unwrap();
5589 /// # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
5590 /// # let stream_id = 0;
5591 /// while let Ok((read, fin)) = conn.stream_recv(stream_id, &mut buf) {
5592 /// println!("Got {} bytes on stream {}", read, stream_id);
5593 /// }
5594 /// # Ok::<(), quiche::Error>(())
5595 /// ```
5596 #[inline]
5597 pub fn stream_recv(
5598 &mut self, stream_id: u64, out: &mut [u8],
5599 ) -> Result<(usize, bool)> {
5600 self.stream_recv_buf(stream_id, out)
5601 }
5602
5603 /// Reads contiguous data from a stream into the provided [`bytes::BufMut`].
5604 ///
5605 /// **NOTE**:
5606 /// The BufMut will be populated with all available data up to its capacity.
5607 /// Since some BufMut implementations, e.g., [`Vec<u8>`], dynamically
5608 /// allocate additional memory, the caller may use [`BufMut::limit()`]
5609 /// to limit the maximum amount of data that can be written.
5610 ///
5611 /// On success the amount of bytes read and a flag indicating the fin state
5612 /// is returned as a tuple, or [`Done`] if there is no data to read.
5613 /// [`BufMut::advance_mut()`] will have been called with the same number of
5614 /// total bytes.
5615 ///
5616 /// Reading data from a stream may trigger queueing of control messages
5617 /// (e.g. MAX_STREAM_DATA). [`send()`] should be called afterwards.
5618 ///
5619 /// [`BufMut::limit()`]: bytes::BufMut::limit
5620 /// [`BufMut::advance_mut()`]: bytes::BufMut::advance_mut
5621 /// [`Done`]: enum.Error.html#variant.Done
5622 /// [`send()`]: struct.Connection.html#method.send
5623 ///
5624 /// ## Examples:
5625 ///
5626 /// ```no_run
5627 /// # use bytes::BufMut as _;
5628 /// # let mut buf = Vec::new().limit(1024); // Read at most 1024 bytes
5629 /// # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
5630 /// # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
5631 /// # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
5632 /// # let peer = "127.0.0.1:1234".parse().unwrap();
5633 /// # let local = socket.local_addr().unwrap();
5634 /// # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
5635 /// # let stream_id = 0;
5636 /// # let mut total_read = 0;
5637 /// while let Ok((read, fin)) = conn.stream_recv_buf(stream_id, &mut buf) {
5638 /// println!("Got {} bytes on stream {}", read, stream_id);
5639 /// total_read += read;
5640 /// assert_eq!(buf.get_ref().len(), total_read);
5641 /// }
5642 /// # Ok::<(), quiche::Error>(())
5643 /// ```
5644 pub fn stream_recv_buf<B: bytes::BufMut>(
5645 &mut self, stream_id: u64, out: B,
5646 ) -> Result<(usize, bool)> {
5647 self.do_stream_recv(stream_id, RecvAction::Emit { out })
5648 }
5649
5650 /// Discard contiguous data from a stream without copying.
5651 ///
5652 /// On success the amount of bytes discarded and a flag indicating the fin
5653 /// state is returned as a tuple, or [`Done`] if there is no data to
5654 /// discard.
5655 ///
5656 /// Discarding data from a stream may trigger queueing of control messages
5657 /// (e.g. MAX_STREAM_DATA). [`send()`] should be called afterwards.
5658 ///
5659 /// [`Done`]: enum.Error.html#variant.Done
5660 /// [`send()`]: struct.Connection.html#method.send
5661 ///
5662 /// ## Examples:
5663 ///
5664 /// ```no_run
5665 /// # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
5666 /// # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
5667 /// # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
5668 /// # let peer = "127.0.0.1:1234".parse().unwrap();
5669 /// # let local = socket.local_addr().unwrap();
5670 /// # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
5671 /// # let stream_id = 0;
5672 /// while let Ok((read, fin)) = conn.stream_discard(stream_id, 1) {
5673 /// println!("Discarded {} byte(s) on stream {}", read, stream_id);
5674 /// }
5675 /// # Ok::<(), quiche::Error>(())
5676 /// ```
5677 pub fn stream_discard(
5678 &mut self, stream_id: u64, len: usize,
5679 ) -> Result<(usize, bool)> {
5680 // `do_stream_recv()` is generic on the kind of `BufMut` in RecvAction.
5681 // Since we are discarding, it doesn't matter, but the compiler still
5682 // wants to know, so we say `&mut [u8]`.
5683 self.do_stream_recv::<&mut [u8]>(stream_id, RecvAction::Discard { len })
5684 }
5685
5686 // Reads or discards contiguous data from a stream.
5687 //
5688 // Passing an `action` of `StreamRecvAction::Emit` results in a read into
5689 // the provided slice. It must be sized by the caller and will be populated
5690 // up to its capacity.
5691 //
5692 // Passing an `action` of `StreamRecvAction::Discard` results in discard up
5693 // to the indicated length.
5694 //
5695 // On success the amount of bytes read or discarded, and a flag indicating
5696 // the fin state, is returned as a tuple, or [`Done`] if there is no data to
5697 // read or discard.
5698 //
5699 // Reading or discarding data from a stream may trigger queueing of control
5700 // messages (e.g. MAX_STREAM_DATA). [`send()`] should be called afterwards.
5701 //
5702 // [`Done`]: enum.Error.html#variant.Done
5703 // [`send()`]: struct.Connection.html#method.send
5704 fn do_stream_recv<B: bytes::BufMut>(
5705 &mut self, stream_id: u64, action: RecvAction<B>,
5706 ) -> Result<(usize, bool)> {
5707 // We can't read on our own unidirectional streams.
5708 if !stream::is_bidi(stream_id) &&
5709 stream::is_local(stream_id, self.is_server)
5710 {
5711 return Err(Error::InvalidStreamState(stream_id));
5712 }
5713
5714 let stream = self
5715 .streams
5716 .get_mut(stream_id)
5717 .ok_or(Error::InvalidStreamState(stream_id))?;
5718
5719 if !stream.is_readable() {
5720 return Err(Error::Done);
5721 }
5722
5723 let local = stream.local;
5724 let priority_key = Arc::clone(&stream.priority_key);
5725
5726 #[cfg(feature = "qlog")]
5727 let offset = stream.recv.off_front();
5728
5729 #[cfg(feature = "qlog")]
5730 let to = match action {
5731 RecvAction::Emit { .. } => Some(DataRecipient::Application),
5732
5733 RecvAction::Discard { .. } => Some(DataRecipient::Dropped),
5734 };
5735
5736 let (read, fin) = match stream.recv.emit_or_discard(action) {
5737 Ok(v) => v,
5738
5739 Err(e) => {
5740 // Collect the stream if it is now complete. This can happen if
5741 // we got a `StreamReset` error which will now be propagated to
5742 // the application, so we don't need to keep the stream's state
5743 // anymore.
5744 if stream.is_complete() {
5745 self.streams.collect(stream_id, local);
5746 }
5747
5748 self.streams.remove_readable(&priority_key);
5749 return Err(e);
5750 },
5751 };
5752
5753 self.flow_control.add_consumed(read as u64);
5754
5755 let readable = stream.is_readable();
5756
5757 let complete = stream.is_complete();
5758
5759 if stream.recv.almost_full() {
5760 self.streams.insert_almost_full(stream_id);
5761 }
5762
5763 if !readable {
5764 self.streams.remove_readable(&priority_key);
5765 }
5766
5767 if complete {
5768 self.streams.collect(stream_id, local);
5769 }
5770
5771 qlog_with_type!(QLOG_DATA_MV, self.qlog, q, {
5772 let ev_data = EventData::QuicStreamDataMoved(
5773 qlog::events::quic::StreamDataMoved {
5774 stream_id: Some(stream_id),
5775 offset: Some(offset),
5776 raw: Some(RawInfo {
5777 length: Some(read as u64),
5778 ..Default::default()
5779 }),
5780 from: Some(DataRecipient::Transport),
5781 to,
5782 additional_info: fin
5783 .then_some(DataMovedAdditionalInfo::FinSet),
5784 },
5785 );
5786
5787 let now = Instant::now();
5788 q.add_event_data_with_instant(ev_data, now).ok();
5789 });
5790
5791 if priority_key.incremental && readable {
5792 // Shuffle the incremental stream to the back of the queue.
5793 self.streams.remove_readable(&priority_key);
5794 self.streams.insert_readable(&priority_key);
5795 }
5796
5797 Ok((read, fin))
5798 }
5799
5800 /// Writes data to a stream.
5801 ///
5802 /// On success the number of bytes written is returned, or [`Done`] if no
5803 /// data was written (e.g. because the stream has no capacity).
5804 ///
5805 /// Applications can provide a 0-length buffer with the fin flag set to
5806 /// true. This will lead to a 0-length FIN STREAM frame being sent at the
5807 /// latest offset. The `Ok(0)` value is only returned when the application
5808 /// provided a 0-length buffer.
5809 ///
5810 /// In addition, if the peer has signalled that it doesn't want to receive
5811 /// any more data from this stream by sending the `STOP_SENDING` frame, the
5812 /// [`StreamStopped`] error will be returned instead of any data.
5813 ///
5814 /// Note that in order to avoid buffering an infinite amount of data in the
5815 /// stream's send buffer, streams are only allowed to buffer outgoing data
5816 /// up to the amount that the peer allows it to send (that is, up to the
5817 /// stream's outgoing flow control capacity).
5818 ///
5819 /// This means that the number of written bytes returned can be lower than
5820 /// the length of the input buffer when the stream doesn't have enough
5821 /// capacity for the operation to complete. The application should retry the
5822 /// operation once the stream is reported as writable again.
5823 ///
5824 /// Applications should call this method only after the handshake is
5825 /// completed (whenever [`is_established()`] returns `true`) or during
5826 /// early data if enabled (whenever [`is_in_early_data()`] returns `true`).
5827 ///
5828 /// [`Done`]: enum.Error.html#variant.Done
5829 /// [`StreamStopped`]: enum.Error.html#variant.StreamStopped
5830 /// [`is_established()`]: struct.Connection.html#method.is_established
5831 /// [`is_in_early_data()`]: struct.Connection.html#method.is_in_early_data
5832 ///
5833 /// ## Examples:
5834 ///
5835 /// ```no_run
5836 /// # let mut buf = [0; 512];
5837 /// # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
5838 /// # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
5839 /// # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
5840 /// # let peer = "127.0.0.1:1234".parse().unwrap();
5841 /// # let local = "127.0.0.1:4321".parse().unwrap();
5842 /// # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
5843 /// # let stream_id = 0;
5844 /// conn.stream_send(stream_id, b"hello", true)?;
5845 /// # Ok::<(), quiche::Error>(())
5846 /// ```
5847 pub fn stream_send(
5848 &mut self, stream_id: u64, buf: &[u8], fin: bool,
5849 ) -> Result<usize> {
5850 self.stream_do_send(
5851 stream_id,
5852 buf,
5853 fin,
5854 |stream: &mut stream::Stream<F>,
5855 buf: &[u8],
5856 cap: usize,
5857 fin: bool| {
5858 stream.send.write(&buf[..cap], fin).map(|v| (v, v))
5859 },
5860 )
5861 }
5862
5863 /// Writes data to a stream with zero copying, instead, it appends the
5864 /// provided buffer directly to the send queue if the capacity allows
5865 /// it.
5866 ///
5867 /// When a partial write happens (including when [`Error::Done`] is
5868 /// returned) the remaining (unwritten) buffer will also be returned.
5869 /// The application should retry the operation once the stream is
5870 /// reported as writable again.
5871 pub fn stream_send_zc(
5872 &mut self, stream_id: u64, buf: F::Buf, len: Option<usize>, fin: bool,
5873 ) -> Result<(usize, Option<F::Buf>)>
5874 where
5875 F::Buf: BufSplit,
5876 {
5877 self.stream_do_send(
5878 stream_id,
5879 buf,
5880 fin,
5881 |stream: &mut stream::Stream<F>,
5882 buf: F::Buf,
5883 cap: usize,
5884 fin: bool| {
5885 let len = len.unwrap_or(usize::MAX).min(cap);
5886 let (sent, remaining) = stream.send.append_buf(buf, len, fin)?;
5887 Ok((sent, (sent, remaining)))
5888 },
5889 )
5890 }
5891
5892 fn stream_do_send<B, R, SND>(
5893 &mut self, stream_id: u64, buf: B, fin: bool, write_fn: SND,
5894 ) -> Result<R>
5895 where
5896 B: AsRef<[u8]>,
5897 SND: FnOnce(&mut stream::Stream<F>, B, usize, bool) -> Result<(usize, R)>,
5898 {
5899 // We can't write on the peer's unidirectional streams.
5900 if !stream::is_bidi(stream_id) &&
5901 !stream::is_local(stream_id, self.is_server)
5902 {
5903 return Err(Error::InvalidStreamState(stream_id));
5904 }
5905
5906 let len = buf.as_ref().len();
5907
5908 // Mark the connection as blocked if the connection-level flow control
5909 // limit doesn't let us buffer all the data.
5910 //
5911 // Note that this is separate from "send capacity" as that also takes
5912 // congestion control into consideration.
5913 if self.max_tx_data - self.tx_data < len as u64 {
5914 self.blocked_limit = Some(self.max_tx_data);
5915 }
5916
5917 let cap = self.tx_cap;
5918
5919 // Get existing stream or create a new one.
5920 let stream = match self.get_or_create_stream(stream_id, true) {
5921 Ok(v) => v,
5922
5923 Err(Error::StreamLimit) => {
5924 // If the local endpoint has exhausted the peer's stream count
5925 // limit, record the current limit so that a STREAMS_BLOCKED
5926 // frame can be sent.
5927 if self.enable_send_streams_blocked &&
5928 stream::is_local(stream_id, self.is_server)
5929 {
5930 if stream::is_bidi(stream_id) {
5931 let limit = self.streams.peer_max_streams_bidi();
5932 self.streams_blocked_bidi_state.update_at(limit);
5933 } else {
5934 let limit = self.streams.peer_max_streams_uni();
5935 self.streams_blocked_uni_state.update_at(limit);
5936 }
5937 }
5938
5939 return Err(Error::StreamLimit);
5940 },
5941
5942 Err(e) => return Err(e),
5943 };
5944
5945 #[cfg(feature = "qlog")]
5946 let offset = stream.send.off_back();
5947
5948 let was_writable = stream.is_writable();
5949
5950 let was_flushable = stream.is_flushable();
5951
5952 let is_complete = stream.is_complete();
5953 let is_readable = stream.is_readable();
5954
5955 let priority_key = Arc::clone(&stream.priority_key);
5956
5957 // Return early if the stream has been stopped, and collect its state
5958 // if complete.
5959 if let Err(Error::StreamStopped(e)) = stream.send.cap() {
5960 // Only collect the stream if it is complete and not readable.
5961 // If it is readable, it will get collected when stream_recv()
5962 // is used.
5963 //
5964 // The stream can't be writable if it has been stopped.
5965 if is_complete && !is_readable {
5966 let local = stream.local;
5967 self.streams.collect(stream_id, local);
5968 }
5969
5970 return Err(Error::StreamStopped(e));
5971 };
5972
5973 // Truncate the input buffer based on the connection's send capacity if
5974 // necessary.
5975 //
5976 // When the cap is zero, the method returns Ok(0) *only* when the passed
5977 // buffer is empty. We return Error::Done otherwise.
5978 if cap == 0 && len > 0 {
5979 if was_writable {
5980 // When `stream_writable_next()` returns a stream, the writable
5981 // mark is removed, but because the stream is blocked by the
5982 // connection-level send capacity it won't be marked as writable
5983 // again once the capacity increases.
5984 //
5985 // Since the stream is writable already, mark it here instead.
5986 self.streams.insert_writable(&priority_key);
5987 }
5988
5989 return Err(Error::Done);
5990 }
5991
5992 let (cap, fin, blocked_by_cap) = if cap < len {
5993 (cap, false, true)
5994 } else {
5995 (len, fin, false)
5996 };
5997
5998 let (sent, ret) = match write_fn(stream, buf, cap, fin) {
5999 Ok(v) => v,
6000
6001 Err(e) => {
6002 self.streams.remove_writable(&priority_key);
6003 return Err(e);
6004 },
6005 };
6006
6007 let incremental = stream.incremental;
6008 let priority_key = Arc::clone(&stream.priority_key);
6009
6010 let flushable = stream.is_flushable();
6011
6012 let writable = stream.is_writable();
6013
6014 let empty_fin = len == 0 && fin;
6015
6016 if sent < cap {
6017 let max_off = stream.send.max_off();
6018
6019 if stream.send.blocked_at() != Some(max_off) {
6020 stream.send.update_blocked_at(Some(max_off));
6021 self.streams.insert_blocked(stream_id, max_off);
6022 }
6023 } else {
6024 stream.send.update_blocked_at(None);
6025 self.streams.remove_blocked(stream_id);
6026 }
6027
6028 // If the stream is now flushable push it to the flushable queue, but
6029 // only if it wasn't already queued.
6030 //
6031 // Consider the stream flushable also when we are sending a zero-length
6032 // frame that has the fin flag set.
6033 if (flushable || empty_fin) && !was_flushable {
6034 self.streams.insert_flushable(&priority_key);
6035 }
6036
6037 if !writable {
6038 self.streams.remove_writable(&priority_key);
6039 } else if was_writable && blocked_by_cap {
6040 // When `stream_writable_next()` returns a stream, the writable
6041 // mark is removed, but because the stream is blocked by the
6042 // connection-level send capacity it won't be marked as writable
6043 // again once the capacity increases.
6044 //
6045 // Since the stream is writable already, mark it here instead.
6046 self.streams.insert_writable(&priority_key);
6047 }
6048
6049 self.tx_cap -= sent;
6050
6051 self.tx_data += sent as u64;
6052
6053 self.tx_buffered += sent;
6054 self.check_tx_buffered_invariant();
6055
6056 qlog_with_type!(QLOG_DATA_MV, self.qlog, q, {
6057 let ev_data = EventData::QuicStreamDataMoved(
6058 qlog::events::quic::StreamDataMoved {
6059 stream_id: Some(stream_id),
6060 offset: Some(offset),
6061 raw: Some(RawInfo {
6062 length: Some(sent as u64),
6063 ..Default::default()
6064 }),
6065 from: Some(DataRecipient::Application),
6066 to: Some(DataRecipient::Transport),
6067 additional_info: fin
6068 .then_some(DataMovedAdditionalInfo::FinSet),
6069 },
6070 );
6071
6072 let now = Instant::now();
6073 q.add_event_data_with_instant(ev_data, now).ok();
6074 });
6075
6076 if sent == 0 && cap > 0 {
6077 return Err(Error::Done);
6078 }
6079
6080 if incremental && writable {
6081 // Shuffle the incremental stream to the back of the queue.
6082 self.streams.remove_writable(&priority_key);
6083 self.streams.insert_writable(&priority_key);
6084 }
6085
6086 Ok(ret)
6087 }
6088
6089 /// Sets the priority for a stream.
6090 ///
6091 /// A stream's priority determines the order in which stream data is sent
6092 /// on the wire (streams with lower priority are sent first). Streams are
6093 /// created with a default priority of `127`.
6094 ///
6095 /// The target stream is created if it did not exist before calling this
6096 /// method.
6097 pub fn stream_priority(
6098 &mut self, stream_id: u64, urgency: u8, incremental: bool,
6099 ) -> Result<()> {
6100 // Get existing stream or create a new one, but if the stream
6101 // has already been closed and collected, ignore the prioritization.
6102 let stream = match self.get_or_create_stream(stream_id, true) {
6103 Ok(v) => v,
6104
6105 Err(Error::Done) => return Ok(()),
6106
6107 Err(e) => return Err(e),
6108 };
6109
6110 if stream.urgency == urgency && stream.incremental == incremental {
6111 return Ok(());
6112 }
6113
6114 stream.urgency = urgency;
6115 stream.incremental = incremental;
6116
6117 let new_priority_key = Arc::new(StreamPriorityKey {
6118 urgency: stream.urgency,
6119 incremental: stream.incremental,
6120 id: stream_id,
6121 ..Default::default()
6122 });
6123
6124 let old_priority_key =
6125 std::mem::replace(&mut stream.priority_key, new_priority_key.clone());
6126
6127 self.streams
6128 .update_priority(&old_priority_key, &new_priority_key);
6129
6130 Ok(())
6131 }
6132
6133 /// Shuts down reading or writing from/to the specified stream.
6134 ///
6135 /// When the `direction` argument is set to [`Shutdown::Read`], outstanding
6136 /// data in the stream's receive buffer is dropped, and no additional data
6137 /// is added to it. Data received after calling this method is still
6138 /// validated and acked but not stored, and [`stream_recv()`] will not
6139 /// return it to the application. In addition, a `STOP_SENDING` frame will
6140 /// be sent to the peer to signal it to stop sending data.
6141 ///
6142 /// When the `direction` argument is set to [`Shutdown::Write`], outstanding
6143 /// data in the stream's send buffer is dropped, and no additional data is
6144 /// added to it. Data passed to [`stream_send()`] after calling this method
6145 /// will be ignored. In addition, a `RESET_STREAM` frame will be sent to the
6146 /// peer to signal the reset.
6147 ///
6148 /// Locally-initiated unidirectional streams can only be closed in the
6149 /// [`Shutdown::Write`] direction. Remotely-initiated unidirectional streams
6150 /// can only be closed in the [`Shutdown::Read`] direction. Using an
6151 /// incorrect direction will return [`InvalidStreamState`].
6152 ///
6153 /// [`Shutdown::Read`]: enum.Shutdown.html#variant.Read
6154 /// [`Shutdown::Write`]: enum.Shutdown.html#variant.Write
6155 /// [`stream_recv()`]: struct.Connection.html#method.stream_recv
6156 /// [`stream_send()`]: struct.Connection.html#method.stream_send
6157 /// [`InvalidStreamState`]: enum.Error.html#variant.InvalidStreamState
6158 pub fn stream_shutdown(
6159 &mut self, stream_id: u64, direction: Shutdown, err: u64,
6160 ) -> Result<()> {
6161 // Don't try to stop a local unidirectional stream.
6162 if direction == Shutdown::Read &&
6163 stream::is_local(stream_id, self.is_server) &&
6164 !stream::is_bidi(stream_id)
6165 {
6166 return Err(Error::InvalidStreamState(stream_id));
6167 }
6168
6169 // Don't try to reset a remote unidirectional stream.
6170 if direction == Shutdown::Write &&
6171 !stream::is_local(stream_id, self.is_server) &&
6172 !stream::is_bidi(stream_id)
6173 {
6174 return Err(Error::InvalidStreamState(stream_id));
6175 }
6176
6177 // Get existing stream.
6178 let stream = self.streams.get_mut(stream_id).ok_or(Error::Done)?;
6179
6180 let priority_key = Arc::clone(&stream.priority_key);
6181
6182 match direction {
6183 Shutdown::Read => {
6184 let consumed = stream.recv.shutdown()?;
6185 self.flow_control.add_consumed(consumed);
6186
6187 if !stream.recv.is_fin() {
6188 self.streams.insert_stopped(stream_id, err);
6189 }
6190
6191 // Once shutdown, the stream is guaranteed to be non-readable.
6192 self.streams.remove_readable(&priority_key);
6193
6194 self.stopped_stream_local_count =
6195 self.stopped_stream_local_count.saturating_add(1);
6196 },
6197
6198 Shutdown::Write => {
6199 let (final_size, unsent) = stream.send.shutdown()?;
6200
6201 // Claw back some flow control allowance from data that was
6202 // buffered but not actually sent before the stream was reset.
6203 self.tx_data = self.tx_data.saturating_sub(unsent);
6204
6205 self.tx_buffered =
6206 self.tx_buffered.saturating_sub(unsent as usize);
6207
6208 // These drops in qlog are a bit weird, but the only way to ensure
6209 // that all bytes that are moved from App to Transport in
6210 // stream_do_send are eventually moved from Transport to Dropped.
6211 // Ideally we would add a Transport to Network transition also as
6212 // a way to indicate when bytes were transmitted vs dropped
6213 // without ever being sent.
6214 qlog_with_type!(QLOG_DATA_MV, self.qlog, q, {
6215 let ev_data = EventData::QuicStreamDataMoved(
6216 qlog::events::quic::StreamDataMoved {
6217 stream_id: Some(stream_id),
6218 offset: Some(final_size),
6219 raw: Some(RawInfo {
6220 length: Some(unsent),
6221 ..Default::default()
6222 }),
6223 from: Some(DataRecipient::Transport),
6224 to: Some(DataRecipient::Dropped),
6225 ..Default::default()
6226 },
6227 );
6228
6229 q.add_event_data_with_instant(ev_data, Instant::now()).ok();
6230 });
6231
6232 // Update send capacity.
6233 self.update_tx_cap();
6234
6235 self.streams.insert_reset(stream_id, err, final_size);
6236
6237 // Once shutdown, the stream is guaranteed to be non-writable.
6238 self.streams.remove_writable(&priority_key);
6239
6240 self.reset_stream_local_count =
6241 self.reset_stream_local_count.saturating_add(1);
6242 },
6243 }
6244
6245 Ok(())
6246 }
6247
6248 /// Returns the stream's send capacity in bytes.
6249 ///
6250 /// If the specified stream doesn't exist (including when it has already
6251 /// been completed and closed), the [`InvalidStreamState`] error will be
6252 /// returned.
6253 ///
6254 /// In addition, if the peer has signalled that it doesn't want to receive
6255 /// any more data from this stream by sending the `STOP_SENDING` frame, the
6256 /// [`StreamStopped`] error will be returned.
6257 ///
6258 /// [`InvalidStreamState`]: enum.Error.html#variant.InvalidStreamState
6259 /// [`StreamStopped`]: enum.Error.html#variant.StreamStopped
6260 #[inline]
6261 pub fn stream_capacity(&mut self, stream_id: u64) -> Result<usize> {
6262 if let Some(stream) = self.streams.get(stream_id) {
6263 let stream_cap = match stream.send.cap() {
6264 Ok(v) => v,
6265
6266 Err(Error::StreamStopped(e)) => {
6267 // Only collect the stream if it is complete and not
6268 // readable. If it is readable, it will get collected when
6269 // stream_recv() is used.
6270 if stream.is_complete() && !stream.is_readable() {
6271 let local = stream.local;
6272 self.streams.collect(stream_id, local);
6273 }
6274
6275 return Err(Error::StreamStopped(e));
6276 },
6277
6278 Err(e) => return Err(e),
6279 };
6280
6281 let cap = cmp::min(self.tx_cap, stream_cap);
6282 return Ok(cap);
6283 };
6284
6285 Err(Error::InvalidStreamState(stream_id))
6286 }
6287
6288 /// Returns the next stream that has data to read.
6289 ///
6290 /// Note that once returned by this method, a stream ID will not be returned
6291 /// again until it is "re-armed".
6292 ///
6293 /// The application will need to read all of the pending data on the stream,
6294 /// and new data has to be received before the stream is reported again.
6295 ///
6296 /// This is unlike the [`readable()`] method, that returns the same list of
6297 /// readable streams when called multiple times in succession.
6298 ///
6299 /// [`readable()`]: struct.Connection.html#method.readable
6300 pub fn stream_readable_next(&mut self) -> Option<u64> {
6301 let priority_key = self.streams.readable.front().clone_pointer()?;
6302
6303 self.streams.remove_readable(&priority_key);
6304
6305 Some(priority_key.id)
6306 }
6307
6308 /// Returns true if the stream has data that can be read.
6309 pub fn stream_readable(&self, stream_id: u64) -> bool {
6310 let stream = match self.streams.get(stream_id) {
6311 Some(v) => v,
6312
6313 None => return false,
6314 };
6315
6316 stream.is_readable()
6317 }
6318
6319 /// Returns the next stream that can be written to.
6320 ///
6321 /// Note that once returned by this method, a stream ID will not be returned
6322 /// again until it is "re-armed".
6323 ///
6324 /// This is unlike the [`writable()`] method, that returns the same list of
6325 /// writable streams when called multiple times in succession. It is not
6326 /// advised to use both `stream_writable_next()` and [`writable()`] on the
6327 /// same connection, as it may lead to unexpected results.
6328 ///
6329 /// The [`stream_writable()`] method can also be used to fine-tune when a
6330 /// stream is reported as writable again.
6331 ///
6332 /// [`stream_writable()`]: struct.Connection.html#method.stream_writable
6333 /// [`writable()`]: struct.Connection.html#method.writable
6334 pub fn stream_writable_next(&mut self) -> Option<u64> {
6335 // If there is not enough connection-level send capacity, none of the
6336 // streams are writable.
6337 if self.tx_cap == 0 {
6338 return None;
6339 }
6340
6341 let mut cursor = self.streams.writable.front();
6342
6343 while let Some(priority_key) = cursor.clone_pointer() {
6344 if let Some(stream) = self.streams.get(priority_key.id) {
6345 let cap = match stream.send.cap() {
6346 Ok(v) => v,
6347
6348 // Return the stream to the application immediately if it's
6349 // stopped.
6350 Err(_) =>
6351 return {
6352 self.streams.remove_writable(&priority_key);
6353
6354 Some(priority_key.id)
6355 },
6356 };
6357
6358 if cmp::min(self.tx_cap, cap) >= stream.send_lowat {
6359 self.streams.remove_writable(&priority_key);
6360 return Some(priority_key.id);
6361 }
6362 }
6363
6364 cursor.move_next();
6365 }
6366
6367 None
6368 }
6369
6370 /// Returns true if the stream has enough send capacity.
6371 ///
6372 /// When `len` more bytes can be buffered into the given stream's send
6373 /// buffer, `true` will be returned, `false` otherwise.
6374 ///
6375 /// In the latter case, if the additional data can't be buffered due to
6376 /// flow control limits, the peer will also be notified, and a "low send
6377 /// watermark" will be set for the stream, such that it is not going to be
6378 /// reported as writable again by [`stream_writable_next()`] until its send
6379 /// capacity reaches `len`.
6380 ///
6381 /// If the specified stream doesn't exist (including when it has already
6382 /// been completed and closed), the [`InvalidStreamState`] error will be
6383 /// returned.
6384 ///
6385 /// In addition, if the peer has signalled that it doesn't want to receive
6386 /// any more data from this stream by sending the `STOP_SENDING` frame, the
6387 /// [`StreamStopped`] error will be returned.
6388 ///
6389 /// [`stream_writable_next()`]: struct.Connection.html#method.stream_writable_next
6390 /// [`InvalidStreamState`]: enum.Error.html#variant.InvalidStreamState
6391 /// [`StreamStopped`]: enum.Error.html#variant.StreamStopped
6392 #[inline]
6393 pub fn stream_writable(
6394 &mut self, stream_id: u64, len: usize,
6395 ) -> Result<bool> {
6396 if self.stream_capacity(stream_id)? >= len {
6397 return Ok(true);
6398 }
6399
6400 let stream = match self.streams.get_mut(stream_id) {
6401 Some(v) => v,
6402
6403 None => return Err(Error::InvalidStreamState(stream_id)),
6404 };
6405
6406 stream.send_lowat = cmp::max(1, len);
6407
6408 let is_writable = stream.is_writable();
6409
6410 let priority_key = Arc::clone(&stream.priority_key);
6411
6412 if self.max_tx_data - self.tx_data < len as u64 {
6413 self.blocked_limit = Some(self.max_tx_data);
6414 }
6415
6416 if stream.send.cap()? < len {
6417 let max_off = stream.send.max_off();
6418 if stream.send.blocked_at() != Some(max_off) {
6419 stream.send.update_blocked_at(Some(max_off));
6420 self.streams.insert_blocked(stream_id, max_off);
6421 }
6422 } else if is_writable {
6423 // When `stream_writable_next()` returns a stream, the writable
6424 // mark is removed, but because the stream is blocked by the
6425 // connection-level send capacity it won't be marked as writable
6426 // again once the capacity increases.
6427 //
6428 // Since the stream is writable already, mark it here instead.
6429 self.streams.insert_writable(&priority_key);
6430 }
6431
6432 Ok(false)
6433 }
6434
6435 /// Returns true if all the data has been read from the specified stream.
6436 ///
6437 /// This instructs the application that all the data received from the
6438 /// peer on the stream has been read, and there won't be anymore in the
6439 /// future.
6440 ///
6441 /// Basically this returns true when the peer either set the `fin` flag
6442 /// for the stream, or sent `RESET_STREAM`.
6443 #[inline]
6444 pub fn stream_finished(&self, stream_id: u64) -> bool {
6445 let stream = match self.streams.get(stream_id) {
6446 Some(v) => v,
6447
6448 None => return true,
6449 };
6450
6451 stream.recv.is_fin()
6452 }
6453
6454 /// Returns the number of bidirectional streams that can be created
6455 /// before the peer's stream count limit is reached.
6456 ///
6457 /// This can be useful to know if it's possible to create a bidirectional
6458 /// stream without trying it first.
6459 #[inline]
6460 pub fn peer_streams_left_bidi(&self) -> u64 {
6461 self.streams.peer_streams_left_bidi()
6462 }
6463
6464 /// Returns the number of unidirectional streams that can be created
6465 /// before the peer's stream count limit is reached.
6466 ///
6467 /// This can be useful to know if it's possible to create a unidirectional
6468 /// stream without trying it first.
6469 #[inline]
6470 pub fn peer_streams_left_uni(&self) -> u64 {
6471 self.streams.peer_streams_left_uni()
6472 }
6473
6474 /// Returns an iterator over streams that have outstanding data to read.
6475 ///
6476 /// Note that the iterator will only include streams that were readable at
6477 /// the time the iterator itself was created (i.e. when `readable()` was
6478 /// called). To account for newly readable streams, the iterator needs to
6479 /// be created again.
6480 ///
6481 /// ## Examples:
6482 ///
6483 /// ```no_run
6484 /// # let mut buf = [0; 512];
6485 /// # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
6486 /// # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
6487 /// # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
6488 /// # let peer = "127.0.0.1:1234".parse().unwrap();
6489 /// # let local = socket.local_addr().unwrap();
6490 /// # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
6491 /// // Iterate over readable streams.
6492 /// for stream_id in conn.readable() {
6493 /// // Stream is readable, read until there's no more data.
6494 /// while let Ok((read, fin)) = conn.stream_recv(stream_id, &mut buf) {
6495 /// println!("Got {} bytes on stream {}", read, stream_id);
6496 /// }
6497 /// }
6498 /// # Ok::<(), quiche::Error>(())
6499 /// ```
6500 #[inline]
6501 pub fn readable(&self) -> StreamIter {
6502 self.streams.readable()
6503 }
6504
6505 /// Returns an iterator over streams that can be written in priority order.
6506 ///
6507 /// The priority order is based on RFC 9218 scheduling recommendations.
6508 /// Stream priority can be controlled using [`stream_priority()`]. In order
6509 /// to support fairness requirements, each time this method is called,
6510 /// internal state is updated. Therefore the iterator ordering can change
6511 /// between calls, even if no streams were added or removed.
6512 ///
6513 /// A "writable" stream is a stream that has enough flow control capacity to
6514 /// send data to the peer. To avoid buffering an infinite amount of data,
6515 /// streams are only allowed to buffer outgoing data up to the amount that
6516 /// the peer allows to send.
6517 ///
6518 /// Note that the iterator will only include streams that were writable at
6519 /// the time the iterator itself was created (i.e. when `writable()` was
6520 /// called). To account for newly writable streams, the iterator needs to be
6521 /// created again.
6522 ///
6523 /// ## Examples:
6524 ///
6525 /// ```no_run
6526 /// # let mut buf = [0; 512];
6527 /// # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
6528 /// # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
6529 /// # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
6530 /// # let local = socket.local_addr().unwrap();
6531 /// # let peer = "127.0.0.1:1234".parse().unwrap();
6532 /// # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
6533 /// // Iterate over writable streams.
6534 /// for stream_id in conn.writable() {
6535 /// // Stream is writable, write some data.
6536 /// if let Ok(written) = conn.stream_send(stream_id, &buf, false) {
6537 /// println!("Written {} bytes on stream {}", written, stream_id);
6538 /// }
6539 /// }
6540 /// # Ok::<(), quiche::Error>(())
6541 /// ```
6542 /// [`stream_priority()`]: struct.Connection.html#method.stream_priority
6543 #[inline]
6544 pub fn writable(&self) -> StreamIter {
6545 // If there is not enough connection-level send capacity, none of the
6546 // streams are writable, so return an empty iterator.
6547 if self.tx_cap == 0 {
6548 return StreamIter::default();
6549 }
6550
6551 self.streams.writable()
6552 }
6553
6554 /// Returns the maximum possible size of egress UDP payloads.
6555 ///
6556 /// This is the maximum size of UDP payloads that can be sent, and depends
6557 /// on both the configured maximum send payload size of the local endpoint
6558 /// (as configured with [`set_max_send_udp_payload_size()`]), as well as
6559 /// the transport parameter advertised by the remote peer.
6560 ///
6561 /// Note that this value can change during the lifetime of the connection,
6562 /// but should remain stable across consecutive calls to [`send()`].
6563 ///
6564 /// [`set_max_send_udp_payload_size()`]:
6565 /// struct.Config.html#method.set_max_send_udp_payload_size
6566 /// [`send()`]: struct.Connection.html#method.send
6567 pub fn max_send_udp_payload_size(&self) -> usize {
6568 let max_datagram_size = self
6569 .paths
6570 .get_active()
6571 .ok()
6572 .map(|p| p.recovery.max_datagram_size());
6573
6574 if let Some(max_datagram_size) = max_datagram_size {
6575 if self.is_established() {
6576 // We cap the maximum packet size to 16KB or so, so that it can be
6577 // always encoded with a 2-byte varint.
6578 return cmp::min(16383, max_datagram_size);
6579 }
6580 }
6581
6582 // Allow for 1200 bytes (minimum QUIC packet size) during the
6583 // handshake.
6584 MIN_CLIENT_INITIAL_LEN
6585 }
6586
6587 /// Schedule an ack-eliciting packet on the active path.
6588 ///
6589 /// QUIC packets might not contain ack-eliciting frames during normal
6590 /// operating conditions. If the packet would already contain
6591 /// ack-eliciting frames, this method does not change any behavior.
6592 /// However, if the packet would not ordinarily contain ack-eliciting
6593 /// frames, this method ensures that a PING frame sent.
6594 ///
6595 /// Calling this method multiple times before [`send()`] has no effect.
6596 ///
6597 /// [`send()`]: struct.Connection.html#method.send
6598 pub fn send_ack_eliciting(&mut self) -> Result<()> {
6599 if self.is_closed() || self.is_draining() {
6600 return Ok(());
6601 }
6602 self.paths.get_active_mut()?.needs_ack_eliciting = true;
6603 Ok(())
6604 }
6605
6606 /// Schedule an ack-eliciting packet on the specified path.
6607 ///
6608 /// See [`send_ack_eliciting()`] for more detail. [`InvalidState`] is
6609 /// returned if there is no record of the path.
6610 ///
6611 /// [`send_ack_eliciting()`]: struct.Connection.html#method.send_ack_eliciting
6612 /// [`InvalidState`]: enum.Error.html#variant.InvalidState
6613 pub fn send_ack_eliciting_on_path(
6614 &mut self, local: SocketAddr, peer: SocketAddr,
6615 ) -> Result<()> {
6616 if self.is_closed() || self.is_draining() {
6617 return Ok(());
6618 }
6619 let path_id = self
6620 .paths
6621 .path_id_from_addrs(&(local, peer))
6622 .ok_or(Error::InvalidState)?;
6623 self.paths.get_mut(path_id)?.needs_ack_eliciting = true;
6624 Ok(())
6625 }
6626
6627 /// Reads the first received DATAGRAM.
6628 ///
6629 /// On success the DATAGRAM's data is returned along with its size.
6630 ///
6631 /// [`Done`] is returned if there is no data to read.
6632 ///
6633 /// [`BufferTooShort`] is returned if the provided buffer is too small for
6634 /// the DATAGRAM.
6635 ///
6636 /// [`Done`]: enum.Error.html#variant.Done
6637 /// [`BufferTooShort`]: enum.Error.html#variant.BufferTooShort
6638 ///
6639 /// ## Examples:
6640 ///
6641 /// ```no_run
6642 /// # let mut buf = [0; 512];
6643 /// # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
6644 /// # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
6645 /// # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
6646 /// # let peer = "127.0.0.1:1234".parse().unwrap();
6647 /// # let local = socket.local_addr().unwrap();
6648 /// # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
6649 /// let mut dgram_buf = [0; 512];
6650 /// while let Ok((len)) = conn.dgram_recv(&mut dgram_buf) {
6651 /// println!("Got {} bytes of DATAGRAM", len);
6652 /// }
6653 /// # Ok::<(), quiche::Error>(())
6654 /// ```
6655 #[inline]
6656 pub fn dgram_recv(&mut self, buf: &mut [u8]) -> Result<usize> {
6657 match self.dgram_recv_queue.pop() {
6658 Some(d) => {
6659 if d.as_ref().len() > buf.len() {
6660 return Err(Error::BufferTooShort);
6661 }
6662 let len = d.as_ref().len();
6663
6664 buf[..len].copy_from_slice(d.as_ref());
6665 Ok(len)
6666 },
6667
6668 None => Err(Error::Done),
6669 }
6670 }
6671
6672 /// Reads the first received DATAGRAM.
6673 ///
6674 /// This is the same as [`dgram_recv()`] but returns the DATAGRAM as an
6675 /// owned buffer instead of copying into the provided buffer.
6676 ///
6677 /// [`dgram_recv()`]: struct.Connection.html#method.dgram_recv
6678 #[inline]
6679 pub fn dgram_recv_buf(&mut self) -> Result<F::DgramBuf> {
6680 self.dgram_recv_queue.pop().ok_or(Error::Done)
6681 }
6682
6683 /// Reads the first received DATAGRAM without removing it from the queue.
6684 ///
6685 /// On success the DATAGRAM's data is returned along with the actual number
6686 /// of bytes peeked. The requested length cannot exceed the DATAGRAM's
6687 /// actual length.
6688 ///
6689 /// [`Done`] is returned if there is no data to read.
6690 ///
6691 /// [`BufferTooShort`] is returned if the provided buffer is smaller the
6692 /// number of bytes to peek.
6693 ///
6694 /// [`Done`]: enum.Error.html#variant.Done
6695 /// [`BufferTooShort`]: enum.Error.html#variant.BufferTooShort
6696 #[inline]
6697 pub fn dgram_recv_peek(&self, buf: &mut [u8], len: usize) -> Result<usize> {
6698 self.dgram_recv_queue.peek_front_bytes(buf, len)
6699 }
6700
6701 /// Returns the length of the first stored DATAGRAM.
6702 #[inline]
6703 pub fn dgram_recv_front_len(&self) -> Option<usize> {
6704 self.dgram_recv_queue.peek_front_len()
6705 }
6706
6707 /// Returns the number of items in the DATAGRAM receive queue.
6708 #[inline]
6709 pub fn dgram_recv_queue_len(&self) -> usize {
6710 self.dgram_recv_queue.len()
6711 }
6712
6713 /// Returns the total size of all items in the DATAGRAM receive queue.
6714 #[inline]
6715 pub fn dgram_recv_queue_byte_size(&self) -> usize {
6716 self.dgram_recv_queue.byte_size()
6717 }
6718
6719 /// Returns the number of items in the DATAGRAM send queue.
6720 #[inline]
6721 pub fn dgram_send_queue_len(&self) -> usize {
6722 self.dgram_send_queue.len()
6723 }
6724
6725 /// Returns the total size of all items in the DATAGRAM send queue.
6726 #[inline]
6727 pub fn dgram_send_queue_byte_size(&self) -> usize {
6728 self.dgram_send_queue.byte_size()
6729 }
6730
6731 /// Returns whether or not the DATAGRAM send queue is full.
6732 #[inline]
6733 pub fn is_dgram_send_queue_full(&self) -> bool {
6734 self.dgram_send_queue.is_full()
6735 }
6736
6737 /// Returns whether or not the DATAGRAM recv queue is full.
6738 #[inline]
6739 pub fn is_dgram_recv_queue_full(&self) -> bool {
6740 self.dgram_recv_queue.is_full()
6741 }
6742
6743 /// Sends data in a DATAGRAM frame.
6744 ///
6745 /// [`Done`] is returned if no data was written.
6746 /// [`InvalidState`] is returned if the peer does not support DATAGRAM.
6747 /// [`BufferTooShort`] is returned if the DATAGRAM frame length is larger
6748 /// than peer's supported DATAGRAM frame length. Use
6749 /// [`dgram_max_writable_len()`] to get the largest supported DATAGRAM
6750 /// frame length.
6751 ///
6752 /// Note that there is no flow control of DATAGRAM frames, so in order to
6753 /// avoid buffering an infinite amount of frames we apply an internal
6754 /// limit.
6755 ///
6756 /// [`Done`]: enum.Error.html#variant.Done
6757 /// [`InvalidState`]: enum.Error.html#variant.InvalidState
6758 /// [`BufferTooShort`]: enum.Error.html#variant.BufferTooShort
6759 /// [`dgram_max_writable_len()`]:
6760 /// struct.Connection.html#method.dgram_max_writable_len
6761 ///
6762 /// ## Examples:
6763 ///
6764 /// ```no_run
6765 /// # let mut buf = [0; 512];
6766 /// # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
6767 /// # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
6768 /// # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
6769 /// # let peer = "127.0.0.1:1234".parse().unwrap();
6770 /// # let local = socket.local_addr().unwrap();
6771 /// # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
6772 /// conn.dgram_send(b"hello")?;
6773 /// # Ok::<(), quiche::Error>(())
6774 /// ```
6775 pub fn dgram_send(&mut self, buf: &[u8]) -> Result<()> {
6776 self.dgram_send_buf(F::dgram_buf_from_slice(buf))
6777 }
6778
6779 /// Sends data in a DATAGRAM frame.
6780 ///
6781 /// This is the same as [`dgram_send()`] but takes an owned buffer
6782 /// instead of a slice and avoids copying.
6783 ///
6784 /// [`dgram_send()`]: struct.Connection.html#method.dgram_send
6785 pub fn dgram_send_buf(&mut self, buf: F::DgramBuf) -> Result<()> {
6786 let max_payload_len = match self.dgram_max_writable_len() {
6787 Some(v) => v,
6788
6789 None => return Err(Error::InvalidState),
6790 };
6791
6792 if buf.as_ref().len() > max_payload_len {
6793 return Err(Error::BufferTooShort);
6794 }
6795
6796 self.dgram_send_queue.push(buf)?;
6797
6798 let active_path = self.paths.get_active_mut()?;
6799
6800 if self.dgram_send_queue.byte_size() >
6801 active_path.recovery.cwnd_available()
6802 {
6803 active_path.recovery.update_app_limited(false);
6804 }
6805
6806 Ok(())
6807 }
6808
6809 /// Purges queued outgoing DATAGRAMs matching the predicate.
6810 ///
6811 /// In other words, remove all elements `e` such that `f(&e)` returns true.
6812 ///
6813 /// ## Examples:
6814 /// ```no_run
6815 /// # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
6816 /// # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
6817 /// # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
6818 /// # let peer = "127.0.0.1:1234".parse().unwrap();
6819 /// # let local = socket.local_addr().unwrap();
6820 /// # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
6821 /// conn.dgram_send(b"hello")?;
6822 /// conn.dgram_purge_outgoing(&|d: &[u8]| -> bool { d[0] == 0 });
6823 /// # Ok::<(), quiche::Error>(())
6824 /// ```
6825 #[inline]
6826 pub fn dgram_purge_outgoing<FN: Fn(&[u8]) -> bool>(&mut self, f: FN) {
6827 self.dgram_send_queue.purge(f);
6828 }
6829
6830 /// Returns the maximum DATAGRAM payload that can be sent.
6831 ///
6832 /// [`None`] is returned if the peer hasn't advertised a maximum DATAGRAM
6833 /// frame size.
6834 ///
6835 /// ## Examples:
6836 ///
6837 /// ```no_run
6838 /// # let mut buf = [0; 512];
6839 /// # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
6840 /// # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
6841 /// # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
6842 /// # let peer = "127.0.0.1:1234".parse().unwrap();
6843 /// # let local = socket.local_addr().unwrap();
6844 /// # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
6845 /// if let Some(payload_size) = conn.dgram_max_writable_len() {
6846 /// if payload_size > 5 {
6847 /// conn.dgram_send(b"hello")?;
6848 /// }
6849 /// }
6850 /// # Ok::<(), quiche::Error>(())
6851 /// ```
6852 #[inline]
6853 pub fn dgram_max_writable_len(&self) -> Option<usize> {
6854 match self.peer_transport_params.max_datagram_frame_size {
6855 None => None,
6856 Some(peer_frame_len) => {
6857 let dcid = self.destination_id();
6858 // Start from the maximum packet size...
6859 let mut max_len = self.max_send_udp_payload_size();
6860 // ...subtract the Short packet header overhead...
6861 // (1 byte of pkt_len + len of dcid)
6862 max_len = max_len.saturating_sub(1 + dcid.len());
6863 // ...subtract the packet number (max len)...
6864 max_len = max_len.saturating_sub(packet::MAX_PKT_NUM_LEN);
6865 // ...subtract the crypto overhead...
6866 max_len = max_len.saturating_sub(
6867 self.crypto_ctx[packet::Epoch::Application]
6868 .crypto_overhead()?,
6869 );
6870 // ...clamp to what peer can support...
6871 max_len = cmp::min(peer_frame_len as usize, max_len);
6872 // ...subtract frame overhead, checked for underflow.
6873 // (1 byte of frame type + len of length )
6874 max_len.checked_sub(1 + frame::MAX_DGRAM_OVERHEAD)
6875 },
6876 }
6877 }
6878
6879 fn dgram_enabled(&self) -> bool {
6880 self.local_transport_params
6881 .max_datagram_frame_size
6882 .is_some()
6883 }
6884
6885 /// Returns when the next timeout event will occur.
6886 ///
6887 /// Once the timeout Instant has been reached, the [`on_timeout()`] method
6888 /// should be called. A timeout of `None` means that the timer should be
6889 /// disarmed.
6890 ///
6891 /// [`on_timeout()`]: struct.Connection.html#method.on_timeout
6892 pub fn timeout_instant(&self) -> Option<Instant> {
6893 if self.is_closed() {
6894 return None;
6895 }
6896
6897 if self.is_draining() {
6898 // Draining timer takes precedence over all other timers. If it is
6899 // set it means the connection is closing so there's no point in
6900 // processing the other timers.
6901 self.draining_timer
6902 } else {
6903 // Use the lowest timer value (i.e. "sooner") among idle and loss
6904 // detection timers. If they are both unset (i.e. `None`) then the
6905 // result is `None`, but if at least one of them is set then a
6906 // `Some(...)` value is returned.
6907 let path_timer = self
6908 .paths
6909 .iter()
6910 .filter_map(|(_, p)| p.recovery.loss_detection_timer())
6911 .min();
6912
6913 let key_update_timer = self.crypto_ctx[packet::Epoch::Application]
6914 .key_update
6915 .as_ref()
6916 .map(|key_update| key_update.timer);
6917
6918 let timers = [self.idle_timer, path_timer, key_update_timer];
6919
6920 timers.iter().filter_map(|&x| x).min()
6921 }
6922 }
6923
6924 /// Returns the amount of time until the next timeout event.
6925 ///
6926 /// Once the given duration has elapsed, the [`on_timeout()`] method should
6927 /// be called. A timeout of `None` means that the timer should be disarmed.
6928 ///
6929 /// [`on_timeout()`]: struct.Connection.html#method.on_timeout
6930 pub fn timeout(&self) -> Option<Duration> {
6931 self.timeout_instant().map(|timeout| {
6932 let now = Instant::now();
6933
6934 if timeout <= now {
6935 Duration::ZERO
6936 } else {
6937 timeout.duration_since(now)
6938 }
6939 })
6940 }
6941
6942 /// Processes a timeout event.
6943 ///
6944 /// If no timeout has occurred it does nothing.
6945 pub fn on_timeout(&mut self) {
6946 let now = Instant::now();
6947
6948 if let Some(draining_timer) = self.draining_timer {
6949 if draining_timer <= now {
6950 trace!("{} draining timeout expired", self.trace_id);
6951
6952 self.mark_closed();
6953 }
6954
6955 // Draining timer takes precedence over all other timers. If it is
6956 // set it means the connection is closing so there's no point in
6957 // processing the other timers.
6958 return;
6959 }
6960
6961 if let Some(timer) = self.idle_timer {
6962 if timer <= now {
6963 trace!("{} idle timeout expired", self.trace_id);
6964
6965 self.mark_closed();
6966 self.timed_out = true;
6967 return;
6968 }
6969 }
6970
6971 if let Some(timer) = self.crypto_ctx[packet::Epoch::Application]
6972 .key_update
6973 .as_ref()
6974 .map(|key_update| key_update.timer)
6975 {
6976 if timer <= now {
6977 // Discard previous key once key update timer expired.
6978 let _ = self.crypto_ctx[packet::Epoch::Application]
6979 .key_update
6980 .take();
6981 }
6982 }
6983
6984 let handshake_status = self.handshake_status();
6985
6986 for (_, p) in self.paths.iter_mut() {
6987 if let Some(timer) = p.recovery.loss_detection_timer() {
6988 if timer <= now {
6989 trace!("{} loss detection timeout expired", self.trace_id);
6990
6991 let OnLossDetectionTimeoutOutcome {
6992 lost_packets,
6993 lost_bytes,
6994 } = p.on_loss_detection_timeout(
6995 handshake_status,
6996 now,
6997 self.is_server,
6998 &self.trace_id,
6999 );
7000
7001 self.lost_count += lost_packets;
7002 self.lost_bytes += lost_bytes as u64;
7003
7004 qlog_with_type!(QLOG_METRICS, self.qlog, q, {
7005 p.recovery.maybe_qlog(q, now);
7006 });
7007 }
7008 }
7009 }
7010
7011 // Notify timeout events to the application.
7012 self.paths.notify_failed_validations();
7013
7014 // If the active path failed, try to find a new candidate.
7015 if self.paths.get_active_path_id().is_err() {
7016 match self.paths.find_candidate_path() {
7017 Some(pid) => {
7018 if self.set_active_path(pid, now).is_err() {
7019 // The connection cannot continue.
7020 self.mark_closed();
7021 }
7022 },
7023
7024 // The connection cannot continue.
7025 None => {
7026 self.mark_closed();
7027 },
7028 }
7029 }
7030 }
7031
7032 /// Requests the stack to perform path validation of the proposed 4-tuple.
7033 ///
7034 /// Probing new paths requires spare Connection IDs at both the host and the
7035 /// peer sides. If it is not the case, it raises an [`OutOfIdentifiers`].
7036 ///
7037 /// The probing of new addresses can only be done by the client. The server
7038 /// can only probe network paths that were previously advertised by
7039 /// [`PathEvent::New`]. If the server tries to probe such an unseen network
7040 /// path, this call raises an [`InvalidState`].
7041 ///
7042 /// The caller might also want to probe an existing path. In such case, it
7043 /// triggers a PATH_CHALLENGE frame, but it does not require spare CIDs.
7044 ///
7045 /// A server always probes a new path it observes. Calling this method is
7046 /// hence not required to validate a new path. However, a server can still
7047 /// request an additional path validation of the proposed 4-tuple.
7048 ///
7049 /// Calling this method several times before calling [`send()`] or
7050 /// [`send_on_path()`] results in a single probe being generated. An
7051 /// application wanting to send multiple in-flight probes must call this
7052 /// method again after having sent packets.
7053 ///
7054 /// Returns the Destination Connection ID sequence number associated to that
7055 /// path.
7056 ///
7057 /// [`PathEvent::New`]: enum.PathEvent.html#variant.New
7058 /// [`OutOfIdentifiers`]: enum.Error.html#OutOfIdentifiers
7059 /// [`InvalidState`]: enum.Error.html#InvalidState
7060 /// [`send()`]: struct.Connection.html#method.send
7061 /// [`send_on_path()`]: struct.Connection.html#method.send_on_path
7062 pub fn probe_path(
7063 &mut self, local_addr: SocketAddr, peer_addr: SocketAddr,
7064 ) -> Result<u64> {
7065 // We may want to probe an existing path.
7066 let pid = match self.paths.path_id_from_addrs(&(local_addr, peer_addr)) {
7067 Some(pid) => pid,
7068 None => self.create_path_on_client(local_addr, peer_addr)?,
7069 };
7070
7071 let path = self.paths.get_mut(pid)?;
7072 path.request_validation();
7073
7074 path.active_dcid_seq.ok_or(Error::InvalidState)
7075 }
7076
7077 /// Migrates the connection to a new local address `local_addr`.
7078 ///
7079 /// The behavior is similar to [`migrate()`], with the nuance that the
7080 /// connection only changes the local address, but not the peer one.
7081 ///
7082 /// See [`migrate()`] for the full specification of this method.
7083 ///
7084 /// [`migrate()`]: struct.Connection.html#method.migrate
7085 pub fn migrate_source(&mut self, local_addr: SocketAddr) -> Result<u64> {
7086 let peer_addr = self.paths.get_active()?.peer_addr();
7087 self.migrate(local_addr, peer_addr)
7088 }
7089
7090 /// Migrates the connection over the given network path between `local_addr`
7091 /// and `peer_addr`.
7092 ///
7093 /// Connection migration can only be initiated by the client. Calling this
7094 /// method as a server returns [`InvalidState`].
7095 ///
7096 /// To initiate voluntary migration, there should be enough Connection IDs
7097 /// at both sides. If this requirement is not satisfied, this call returns
7098 /// [`OutOfIdentifiers`].
7099 ///
7100 /// Returns the Destination Connection ID associated to that migrated path.
7101 ///
7102 /// [`OutOfIdentifiers`]: enum.Error.html#OutOfIdentifiers
7103 /// [`InvalidState`]: enum.Error.html#InvalidState
7104 pub fn migrate(
7105 &mut self, local_addr: SocketAddr, peer_addr: SocketAddr,
7106 ) -> Result<u64> {
7107 if self.is_server {
7108 return Err(Error::InvalidState);
7109 }
7110
7111 // If the path already exists, mark it as the active one.
7112 let (pid, dcid_seq) = if let Some(pid) =
7113 self.paths.path_id_from_addrs(&(local_addr, peer_addr))
7114 {
7115 let path = self.paths.get_mut(pid)?;
7116
7117 // If it is already active, do nothing.
7118 if path.active() {
7119 return path.active_dcid_seq.ok_or(Error::OutOfIdentifiers);
7120 }
7121
7122 // Ensures that a Source Connection ID has been dedicated to this
7123 // path, or a free one is available. This is only required if the
7124 // host uses non-zero length Source Connection IDs.
7125 if !self.ids.zero_length_scid() &&
7126 path.active_scid_seq.is_none() &&
7127 self.ids.available_scids() == 0
7128 {
7129 return Err(Error::OutOfIdentifiers);
7130 }
7131
7132 // Ensures that the migrated path has a Destination Connection ID.
7133 let dcid_seq = if let Some(dcid_seq) = path.active_dcid_seq {
7134 dcid_seq
7135 } else {
7136 let dcid_seq = self
7137 .ids
7138 .lowest_available_dcid_seq()
7139 .ok_or(Error::OutOfIdentifiers)?;
7140
7141 self.ids.link_dcid_to_path_id(dcid_seq, pid)?;
7142 path.active_dcid_seq = Some(dcid_seq);
7143
7144 dcid_seq
7145 };
7146
7147 (pid, dcid_seq)
7148 } else {
7149 let pid = self.create_path_on_client(local_addr, peer_addr)?;
7150
7151 let dcid_seq = self
7152 .paths
7153 .get(pid)?
7154 .active_dcid_seq
7155 .ok_or(Error::InvalidState)?;
7156
7157 (pid, dcid_seq)
7158 };
7159
7160 // Change the active path.
7161 self.set_active_path(pid, Instant::now())?;
7162
7163 Ok(dcid_seq)
7164 }
7165
7166 /// Provides additional source Connection IDs that the peer can use to reach
7167 /// this host.
7168 ///
7169 /// This triggers sending NEW_CONNECTION_ID frames if the provided Source
7170 /// Connection ID is not already present. In the case the caller tries to
7171 /// reuse a Connection ID with a different reset token, this raises an
7172 /// `InvalidState`.
7173 ///
7174 /// At any time, the peer cannot have more Destination Connection IDs than
7175 /// the maximum number of active Connection IDs it negotiated. In such case
7176 /// (i.e., when [`scids_left()`] returns 0), if the host agrees to
7177 /// request the removal of previous connection IDs, it sets the
7178 /// `retire_if_needed` parameter. Otherwise, an [`IdLimit`] is returned.
7179 ///
7180 /// Note that setting `retire_if_needed` does not prevent this function from
7181 /// returning an [`IdLimit`] in the case the caller wants to retire still
7182 /// unannounced Connection IDs.
7183 ///
7184 /// The caller is responsible for ensuring that the provided `scid` is not
7185 /// repeated several times over the connection. quiche ensures that as long
7186 /// as the provided Connection ID is still in use (i.e., not retired), it
7187 /// does not assign a different sequence number.
7188 ///
7189 /// Note that if the host uses zero-length Source Connection IDs, it cannot
7190 /// advertise Source Connection IDs and calling this method returns an
7191 /// [`InvalidState`].
7192 ///
7193 /// Returns the sequence number associated to the provided Connection ID.
7194 ///
7195 /// [`scids_left()`]: struct.Connection.html#method.scids_left
7196 /// [`IdLimit`]: enum.Error.html#IdLimit
7197 /// [`InvalidState`]: enum.Error.html#InvalidState
7198 pub fn new_scid(
7199 &mut self, scid: &ConnectionId, reset_token: u128, retire_if_needed: bool,
7200 ) -> Result<u64> {
7201 self.ids.new_scid(
7202 scid.to_vec().into(),
7203 Some(reset_token),
7204 true,
7205 None,
7206 retire_if_needed,
7207 )
7208 }
7209
7210 /// Returns the number of source Connection IDs that are active. This is
7211 /// only meaningful if the host uses non-zero length Source Connection IDs.
7212 pub fn active_scids(&self) -> usize {
7213 self.ids.active_source_cids()
7214 }
7215
7216 /// Returns the number of source Connection IDs that should be provided
7217 /// to the peer without exceeding the limit it advertised.
7218 ///
7219 /// This will automatically limit the number of Connection IDs to the
7220 /// minimum between the locally configured active connection ID limit,
7221 /// and the one sent by the peer.
7222 ///
7223 /// To obtain the maximum possible value allowed by the peer an application
7224 /// can instead inspect the [`peer_active_conn_id_limit`] value.
7225 ///
7226 /// [`peer_active_conn_id_limit`]: struct.Stats.html#structfield.peer_active_conn_id_limit
7227 #[inline]
7228 pub fn scids_left(&self) -> usize {
7229 let max_active_source_cids = cmp::min(
7230 self.peer_transport_params.active_conn_id_limit,
7231 self.local_transport_params.active_conn_id_limit,
7232 ) as usize;
7233
7234 max_active_source_cids - self.active_scids()
7235 }
7236
7237 /// Requests the retirement of the destination Connection ID used by the
7238 /// host to reach its peer.
7239 ///
7240 /// This triggers sending RETIRE_CONNECTION_ID frames.
7241 ///
7242 /// If the application tries to retire a non-existing Destination Connection
7243 /// ID sequence number, or if it uses zero-length Destination Connection ID,
7244 /// this method returns an [`InvalidState`].
7245 ///
7246 /// At any time, the host must have at least one Destination ID. If the
7247 /// application tries to retire the last one, or if the caller tries to
7248 /// retire the destination Connection ID used by the current active path
7249 /// while having neither spare Destination Connection IDs nor validated
7250 /// network paths, this method returns an [`OutOfIdentifiers`]. This
7251 /// behavior prevents the caller from stalling the connection due to the
7252 /// lack of validated path to send non-probing packets.
7253 ///
7254 /// [`InvalidState`]: enum.Error.html#InvalidState
7255 /// [`OutOfIdentifiers`]: enum.Error.html#OutOfIdentifiers
7256 pub fn retire_dcid(&mut self, dcid_seq: u64) -> Result<()> {
7257 if self.ids.zero_length_dcid() {
7258 return Err(Error::InvalidState);
7259 }
7260
7261 let active_path_dcid_seq = self
7262 .paths
7263 .get_active()?
7264 .active_dcid_seq
7265 .ok_or(Error::InvalidState)?;
7266
7267 let active_path_id = self.paths.get_active_path_id()?;
7268
7269 if active_path_dcid_seq == dcid_seq &&
7270 self.ids.lowest_available_dcid_seq().is_none() &&
7271 !self
7272 .paths
7273 .iter()
7274 .any(|(pid, p)| pid != active_path_id && p.usable())
7275 {
7276 return Err(Error::OutOfIdentifiers);
7277 }
7278
7279 if let Some(pid) = self.ids.retire_dcid(dcid_seq)? {
7280 // The retired Destination CID was associated to a given path. Let's
7281 // find an available DCID to associate to that path.
7282 let path = self.paths.get_mut(pid)?;
7283 let dcid_seq = self.ids.lowest_available_dcid_seq();
7284
7285 if let Some(dcid_seq) = dcid_seq {
7286 self.ids.link_dcid_to_path_id(dcid_seq, pid)?;
7287 }
7288
7289 path.active_dcid_seq = dcid_seq;
7290 }
7291
7292 Ok(())
7293 }
7294
7295 /// Processes path-specific events.
7296 ///
7297 /// On success it returns a [`PathEvent`], or `None` when there are no
7298 /// events to report. Please refer to [`PathEvent`] for the exhaustive event
7299 /// list.
7300 ///
7301 /// Note that all events are edge-triggered, meaning that once reported they
7302 /// will not be reported again by calling this method again, until the event
7303 /// is re-armed.
7304 ///
7305 /// [`PathEvent`]: enum.PathEvent.html
7306 pub fn path_event_next(&mut self) -> Option<PathEvent> {
7307 self.paths.pop_event()
7308 }
7309
7310 /// Returns the number of source Connection IDs that are retired.
7311 pub fn retired_scids(&self) -> usize {
7312 self.ids.retired_source_cids()
7313 }
7314
7315 /// Returns a source `ConnectionId` that has been retired.
7316 ///
7317 /// On success it returns a [`ConnectionId`], or `None` when there are no
7318 /// more retired connection IDs.
7319 ///
7320 /// [`ConnectionId`]: struct.ConnectionId.html
7321 pub fn retired_scid_next(&mut self) -> Option<ConnectionId<'static>> {
7322 self.ids.pop_retired_scid()
7323 }
7324
7325 /// Returns the number of spare Destination Connection IDs, i.e.,
7326 /// Destination Connection IDs that are still unused.
7327 ///
7328 /// Note that this function returns 0 if the host uses zero length
7329 /// Destination Connection IDs.
7330 pub fn available_dcids(&self) -> usize {
7331 self.ids.available_dcids()
7332 }
7333
7334 /// Returns an iterator over destination `SockAddr`s whose association
7335 /// with `from` forms a known QUIC path on which packets can be sent to.
7336 ///
7337 /// This function is typically used in combination with [`send_on_path()`].
7338 ///
7339 /// Note that the iterator includes all the possible combination of
7340 /// destination `SockAddr`s, even those whose sending is not required now.
7341 /// In other words, this is another way for the application to recall from
7342 /// past [`PathEvent::New`] events.
7343 ///
7344 /// [`PathEvent::New`]: enum.PathEvent.html#variant.New
7345 /// [`send_on_path()`]: struct.Connection.html#method.send_on_path
7346 ///
7347 /// ## Examples:
7348 ///
7349 /// ```no_run
7350 /// # let mut out = [0; 512];
7351 /// # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
7352 /// # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
7353 /// # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
7354 /// # let local = socket.local_addr().unwrap();
7355 /// # let peer = "127.0.0.1:1234".parse().unwrap();
7356 /// # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
7357 /// // Iterate over possible destinations for the given local `SockAddr`.
7358 /// for dest in conn.paths_iter(local) {
7359 /// loop {
7360 /// let (write, send_info) =
7361 /// match conn.send_on_path(&mut out, Some(local), Some(dest)) {
7362 /// Ok(v) => v,
7363 ///
7364 /// Err(quiche::Error::Done) => {
7365 /// // Done writing for this destination.
7366 /// break;
7367 /// },
7368 ///
7369 /// Err(e) => {
7370 /// // An error occurred, handle it.
7371 /// break;
7372 /// },
7373 /// };
7374 ///
7375 /// socket.send_to(&out[..write], &send_info.to).unwrap();
7376 /// }
7377 /// }
7378 /// # Ok::<(), quiche::Error>(())
7379 /// ```
7380 #[inline]
7381 pub fn paths_iter(&self, from: SocketAddr) -> SocketAddrIter {
7382 // Instead of trying to identify whether packets will be sent on the
7383 // given 4-tuple, simply filter paths that cannot be used.
7384 SocketAddrIter {
7385 sockaddrs: self
7386 .paths
7387 .iter()
7388 .filter(|(_, p)| p.active_dcid_seq.is_some())
7389 .filter(|(_, p)| p.usable() || p.probing_required())
7390 .filter(|(_, p)| p.local_addr() == from)
7391 .map(|(_, p)| p.peer_addr())
7392 .collect(),
7393
7394 index: 0,
7395 }
7396 }
7397
7398 /// Closes the connection with the given error and reason.
7399 ///
7400 /// The `app` parameter specifies whether an application close should be
7401 /// sent to the peer. Otherwise a normal connection close is sent.
7402 ///
7403 /// If `app` is true but the connection is not in a state that is safe to
7404 /// send an application error (not established nor in early data), in
7405 /// accordance with [RFC
7406 /// 9000](https://www.rfc-editor.org/rfc/rfc9000.html#section-10.2.3-3), the
7407 /// error code is changed to APPLICATION_ERROR and the reason phrase is
7408 /// cleared.
7409 ///
7410 /// Returns [`Done`] if the connection had already been closed.
7411 ///
7412 /// Note that the connection will not be closed immediately. An application
7413 /// should continue calling the [`recv()`], [`send()`], [`timeout()`] and
7414 /// [`on_timeout()`] methods as normal, until the [`is_closed()`] method
7415 /// returns `true`.
7416 ///
7417 /// [`Done`]: enum.Error.html#variant.Done
7418 /// [`recv()`]: struct.Connection.html#method.recv
7419 /// [`send()`]: struct.Connection.html#method.send
7420 /// [`timeout()`]: struct.Connection.html#method.timeout
7421 /// [`on_timeout()`]: struct.Connection.html#method.on_timeout
7422 /// [`is_closed()`]: struct.Connection.html#method.is_closed
7423 pub fn close(&mut self, app: bool, err: u64, reason: &[u8]) -> Result<()> {
7424 if self.is_closed() || self.is_draining() {
7425 return Err(Error::Done);
7426 }
7427
7428 if self.local_error.is_some() {
7429 return Err(Error::Done);
7430 }
7431
7432 let is_safe_to_send_app_data =
7433 self.is_established() || self.is_in_early_data();
7434
7435 if app && !is_safe_to_send_app_data {
7436 // Clear error information.
7437 self.local_error = Some(ConnectionError {
7438 is_app: false,
7439 error_code: 0x0c,
7440 reason: vec![],
7441 });
7442 } else {
7443 self.local_error = Some(ConnectionError {
7444 is_app: app,
7445 error_code: err,
7446 reason: reason.to_vec(),
7447 });
7448 }
7449
7450 // When no packet was successfully processed close connection immediately.
7451 if self.recv_count == 0 {
7452 self.mark_closed();
7453 }
7454
7455 Ok(())
7456 }
7457
7458 /// Returns a string uniquely representing the connection.
7459 ///
7460 /// This can be used for logging purposes to differentiate between multiple
7461 /// connections.
7462 #[inline]
7463 pub fn trace_id(&self) -> &str {
7464 &self.trace_id
7465 }
7466
7467 /// Returns the negotiated ALPN protocol.
7468 ///
7469 /// If no protocol has been negotiated, the returned value is empty.
7470 #[inline]
7471 pub fn application_proto(&self) -> &[u8] {
7472 self.alpn.as_ref()
7473 }
7474
7475 /// Returns the server name requested by the client.
7476 #[inline]
7477 pub fn server_name(&self) -> Option<&str> {
7478 self.handshake.server_name()
7479 }
7480
7481 /// Returns the peer's leaf certificate (if any) as a DER-encoded buffer.
7482 #[inline]
7483 pub fn peer_cert(&self) -> Option<&[u8]> {
7484 self.handshake.peer_cert()
7485 }
7486
7487 /// Returns the peer's certificate chain (if any) as a vector of DER-encoded
7488 /// buffers.
7489 ///
7490 /// The certificate at index 0 is the peer's leaf certificate, the other
7491 /// certificates (if any) are the chain certificate authorities used to
7492 /// sign the leaf certificate.
7493 #[inline]
7494 pub fn peer_cert_chain(&self) -> Option<Vec<&[u8]>> {
7495 self.handshake.peer_cert_chain()
7496 }
7497
7498 /// Returns the serialized cryptographic session for the connection.
7499 ///
7500 /// This can be used by a client to cache a connection's session, and resume
7501 /// it later using the [`set_session()`] method.
7502 ///
7503 /// [`set_session()`]: struct.Connection.html#method.set_session
7504 #[inline]
7505 pub fn session(&self) -> Option<&[u8]> {
7506 self.session.as_deref()
7507 }
7508
7509 /// Returns the source connection ID.
7510 ///
7511 /// When there are multiple IDs, and if there is an active path, the ID used
7512 /// on that path is returned. Otherwise the oldest ID is returned.
7513 ///
7514 /// Note that the value returned can change throughout the connection's
7515 /// lifetime.
7516 #[inline]
7517 pub fn source_id(&self) -> ConnectionId<'_> {
7518 if let Ok(path) = self.paths.get_active() {
7519 if let Some(active_scid_seq) = path.active_scid_seq {
7520 if let Ok(e) = self.ids.get_scid(active_scid_seq) {
7521 return ConnectionId::from_ref(e.cid.as_ref());
7522 }
7523 }
7524 }
7525
7526 let e = self.ids.oldest_scid();
7527 ConnectionId::from_ref(e.cid.as_ref())
7528 }
7529
7530 /// Returns all active source connection IDs.
7531 ///
7532 /// An iterator is returned for all active IDs (i.e. ones that have not
7533 /// been explicitly retired yet).
7534 #[inline]
7535 pub fn source_ids(&self) -> impl Iterator<Item = &ConnectionId<'_>> {
7536 self.ids.scids_iter()
7537 }
7538
7539 /// Returns the destination connection ID.
7540 ///
7541 /// Note that the value returned can change throughout the connection's
7542 /// lifetime.
7543 #[inline]
7544 pub fn destination_id(&self) -> ConnectionId<'_> {
7545 if let Ok(path) = self.paths.get_active() {
7546 if let Some(active_dcid_seq) = path.active_dcid_seq {
7547 if let Ok(e) = self.ids.get_dcid(active_dcid_seq) {
7548 return ConnectionId::from_ref(e.cid.as_ref());
7549 }
7550 }
7551 }
7552
7553 let e = self.ids.oldest_dcid();
7554 ConnectionId::from_ref(e.cid.as_ref())
7555 }
7556
7557 /// Returns the PMTU for the active path if it exists.
7558 ///
7559 /// This requires no additonal packets to be sent but simply checks if PMTUD
7560 /// has completed and has found a valid PMTU.
7561 #[inline]
7562 pub fn pmtu(&self) -> Option<usize> {
7563 if let Ok(path) = self.paths.get_active() {
7564 path.pmtud.as_ref().and_then(|pmtud| pmtud.get_pmtu())
7565 } else {
7566 None
7567 }
7568 }
7569
7570 /// Revalidates the PMTU for the active path by sending a new probe packet
7571 /// of PMTU size. If the probe is dropped PMTUD will restart and find a new
7572 /// valid PMTU.
7573 #[inline]
7574 pub fn revalidate_pmtu(&mut self) {
7575 if let Ok(active_path) = self.paths.get_active_mut() {
7576 if let Some(pmtud) = active_path.pmtud.as_mut() {
7577 pmtud.revalidate_pmtu();
7578 }
7579 }
7580 }
7581
7582 /// Returns true if the connection handshake is complete.
7583 #[inline]
7584 pub fn is_established(&self) -> bool {
7585 self.handshake_completed
7586 }
7587
7588 /// Returns true if the connection is resumed.
7589 #[inline]
7590 pub fn is_resumed(&self) -> bool {
7591 self.handshake.is_resumed()
7592 }
7593
7594 /// Returns true if the connection has a pending handshake that has
7595 /// progressed enough to send or receive early data.
7596 #[inline]
7597 pub fn is_in_early_data(&self) -> bool {
7598 self.handshake.is_in_early_data()
7599 }
7600
7601 /// Returns the early data reason for the connection.
7602 ///
7603 /// This status can be useful for logging and debugging. See [BoringSSL]
7604 /// documentation for a definition of the reasons.
7605 ///
7606 /// [BoringSSL]: https://commondatastorage.googleapis.com/chromium-boringssl-docs/ssl.h.html#ssl_early_data_reason_t
7607 #[inline]
7608 pub fn early_data_reason(&self) -> u32 {
7609 self.handshake.early_data_reason()
7610 }
7611
7612 /// Returns whether there is stream or DATAGRAM data available to read.
7613 #[inline]
7614 pub fn is_readable(&self) -> bool {
7615 self.streams.has_readable() || self.dgram_recv_front_len().is_some()
7616 }
7617
7618 /// Returns whether the network path with local address `from` and remote
7619 /// address `peer` has been validated.
7620 ///
7621 /// If the 4-tuple does not exist over the connection, returns an
7622 /// [`InvalidState`].
7623 ///
7624 /// [`InvalidState`]: enum.Error.html#variant.InvalidState
7625 pub fn is_path_validated(
7626 &self, from: SocketAddr, to: SocketAddr,
7627 ) -> Result<bool> {
7628 let pid = self
7629 .paths
7630 .path_id_from_addrs(&(from, to))
7631 .ok_or(Error::InvalidState)?;
7632
7633 Ok(self.paths.get(pid)?.validated())
7634 }
7635
7636 /// Returns true if the connection is draining.
7637 ///
7638 /// If this returns `true`, the connection object cannot yet be dropped, but
7639 /// no new application data can be sent or received. An application should
7640 /// continue calling the [`recv()`], [`timeout()`], and [`on_timeout()`]
7641 /// methods as normal, until the [`is_closed()`] method returns `true`.
7642 ///
7643 /// In contrast, once `is_draining()` returns `true`, calling [`send()`]
7644 /// is not required because no new outgoing packets will be generated.
7645 ///
7646 /// [`recv()`]: struct.Connection.html#method.recv
7647 /// [`send()`]: struct.Connection.html#method.send
7648 /// [`timeout()`]: struct.Connection.html#method.timeout
7649 /// [`on_timeout()`]: struct.Connection.html#method.on_timeout
7650 /// [`is_closed()`]: struct.Connection.html#method.is_closed
7651 #[inline]
7652 pub fn is_draining(&self) -> bool {
7653 self.draining_timer.is_some()
7654 }
7655
7656 /// Returns true if the connection is closed.
7657 ///
7658 /// If this returns true, the connection object can be dropped.
7659 #[inline]
7660 pub fn is_closed(&self) -> bool {
7661 self.closed
7662 }
7663
7664 /// Returns true if the connection was closed due to the idle timeout.
7665 #[inline]
7666 pub fn is_timed_out(&self) -> bool {
7667 self.timed_out
7668 }
7669
7670 /// Returns the error received from the peer, if any.
7671 ///
7672 /// Note that a `Some` return value does not necessarily imply
7673 /// [`is_closed()`] or any other connection state.
7674 ///
7675 /// [`is_closed()`]: struct.Connection.html#method.is_closed
7676 #[inline]
7677 pub fn peer_error(&self) -> Option<&ConnectionError> {
7678 self.peer_error.as_ref()
7679 }
7680
7681 /// Returns the error [`close()`] was called with, or internally
7682 /// created quiche errors, if any.
7683 ///
7684 /// Note that a `Some` return value does not necessarily imply
7685 /// [`is_closed()`] or any other connection state.
7686 /// `Some` also does not guarantee that the error has been sent to
7687 /// or received by the peer.
7688 ///
7689 /// [`close()`]: struct.Connection.html#method.close
7690 /// [`is_closed()`]: struct.Connection.html#method.is_closed
7691 #[inline]
7692 pub fn local_error(&self) -> Option<&ConnectionError> {
7693 self.local_error.as_ref()
7694 }
7695
7696 /// Collects and returns statistics about the connection.
7697 #[inline]
7698 pub fn stats(&self) -> Stats {
7699 Stats {
7700 recv: self.recv_count,
7701 sent: self.sent_count,
7702 lost: self.lost_count,
7703 spurious_lost: self.spurious_lost_count,
7704 retrans: self.retrans_count,
7705 sent_bytes: self.sent_bytes,
7706 recv_bytes: self.recv_bytes,
7707 acked_bytes: self.acked_bytes,
7708 lost_bytes: self.lost_bytes,
7709 stream_retrans_bytes: self.stream_retrans_bytes,
7710 dgram_recv: self.dgram_recv_count,
7711 dgram_sent: self.dgram_sent_count,
7712 paths_count: self.paths.len(),
7713 reset_stream_count_local: self.reset_stream_local_count,
7714 stopped_stream_count_local: self.stopped_stream_local_count,
7715 reset_stream_count_remote: self.reset_stream_remote_count,
7716 stopped_stream_count_remote: self.stopped_stream_remote_count,
7717 data_blocked_sent_count: self.data_blocked_sent_count,
7718 stream_data_blocked_sent_count: self.stream_data_blocked_sent_count,
7719 data_blocked_recv_count: self.data_blocked_recv_count,
7720 stream_data_blocked_recv_count: self.stream_data_blocked_recv_count,
7721 streams_blocked_bidi_recv_count: self.streams_blocked_bidi_recv_count,
7722 streams_blocked_uni_recv_count: self.streams_blocked_uni_recv_count,
7723 path_challenge_rx_count: self.path_challenge_rx_count,
7724 bytes_in_flight_duration: self.bytes_in_flight_duration(),
7725 tx_buffered_state: self.tx_buffered_state,
7726 }
7727 }
7728
7729 /// Returns the sum of the durations when each path in the
7730 /// connection was actively sending bytes or waiting for acks.
7731 /// Note that this could result in a duration that is longer than
7732 /// the actual connection duration in cases where multiple paths
7733 /// are active for extended periods of time. In practice only 1
7734 /// path is typically active at a time.
7735 /// TODO revisit computation if in the future multiple paths are
7736 /// often active at the same time.
7737 fn bytes_in_flight_duration(&self) -> Duration {
7738 self.paths.iter().fold(Duration::ZERO, |acc, (_, path)| {
7739 acc + path.bytes_in_flight_duration()
7740 })
7741 }
7742
7743 /// Returns reference to peer's transport parameters. Returns `None` if we
7744 /// have not yet processed the peer's transport parameters.
7745 pub fn peer_transport_params(&self) -> Option<&TransportParams> {
7746 if !self.parsed_peer_transport_params {
7747 return None;
7748 }
7749
7750 Some(&self.peer_transport_params)
7751 }
7752
7753 /// Collects and returns statistics about each known path for the
7754 /// connection.
7755 pub fn path_stats(&self) -> impl Iterator<Item = PathStats> + '_ {
7756 self.paths.iter().map(|(_, p)| p.stats())
7757 }
7758
7759 /// Returns whether or not this is a server-side connection.
7760 pub fn is_server(&self) -> bool {
7761 self.is_server
7762 }
7763
7764 fn encode_transport_params(&mut self) -> Result<()> {
7765 self.handshake.set_quic_transport_params(
7766 &self.local_transport_params,
7767 self.is_server,
7768 )
7769 }
7770
7771 fn parse_peer_transport_params(
7772 &mut self, peer_params: TransportParams,
7773 ) -> Result<()> {
7774 // Validate initial_source_connection_id.
7775 match &peer_params.initial_source_connection_id {
7776 Some(v) if v != &self.destination_id() =>
7777 return Err(Error::InvalidTransportParam),
7778
7779 Some(_) => (),
7780
7781 // initial_source_connection_id must be sent by
7782 // both endpoints.
7783 None => return Err(Error::InvalidTransportParam),
7784 }
7785
7786 // Validate original_destination_connection_id.
7787 if let Some(odcid) = &self.odcid {
7788 match &peer_params.original_destination_connection_id {
7789 Some(v) if v != odcid =>
7790 return Err(Error::InvalidTransportParam),
7791
7792 Some(_) => (),
7793
7794 // original_destination_connection_id must be
7795 // sent by the server.
7796 None if !self.is_server =>
7797 return Err(Error::InvalidTransportParam),
7798
7799 None => (),
7800 }
7801 }
7802
7803 // Validate retry_source_connection_id.
7804 if let Some(rscid) = &self.rscid {
7805 match &peer_params.retry_source_connection_id {
7806 Some(v) if v != rscid =>
7807 return Err(Error::InvalidTransportParam),
7808
7809 Some(_) => (),
7810
7811 // retry_source_connection_id must be sent by
7812 // the server.
7813 None => return Err(Error::InvalidTransportParam),
7814 }
7815 }
7816
7817 self.process_peer_transport_params(peer_params)?;
7818
7819 self.parsed_peer_transport_params = true;
7820
7821 Ok(())
7822 }
7823
7824 fn process_peer_transport_params(
7825 &mut self, peer_params: TransportParams,
7826 ) -> Result<()> {
7827 self.max_tx_data = peer_params.initial_max_data;
7828
7829 // Update send capacity.
7830 self.update_tx_cap();
7831
7832 self.streams
7833 .update_peer_max_streams_bidi(peer_params.initial_max_streams_bidi);
7834 self.streams
7835 .update_peer_max_streams_uni(peer_params.initial_max_streams_uni);
7836
7837 let max_ack_delay = Duration::from_millis(peer_params.max_ack_delay);
7838
7839 self.recovery_config.max_ack_delay = max_ack_delay;
7840
7841 let active_path = self.paths.get_active_mut()?;
7842
7843 active_path.recovery.update_max_ack_delay(max_ack_delay);
7844
7845 if active_path
7846 .pmtud
7847 .as_ref()
7848 .map(|pmtud| pmtud.should_probe())
7849 .unwrap_or(false)
7850 {
7851 active_path.recovery.pmtud_update_max_datagram_size(
7852 active_path
7853 .pmtud
7854 .as_mut()
7855 .expect("PMTUD existence verified above")
7856 .get_probe_size()
7857 .min(peer_params.max_udp_payload_size as usize),
7858 );
7859 } else {
7860 active_path.recovery.update_max_datagram_size(
7861 peer_params.max_udp_payload_size as usize,
7862 );
7863 }
7864
7865 // Record the max_active_conn_id parameter advertised by the peer.
7866 self.ids
7867 .set_source_conn_id_limit(peer_params.active_conn_id_limit);
7868
7869 self.peer_transport_params = peer_params;
7870
7871 Ok(())
7872 }
7873
7874 /// Continues the handshake.
7875 ///
7876 /// If the connection is already established, it does nothing.
7877 fn do_handshake(&mut self, now: Instant) -> Result<()> {
7878 let mut ex_data = tls::ExData {
7879 application_protos: &self.application_protos,
7880
7881 crypto_ctx: &mut self.crypto_ctx,
7882
7883 session: &mut self.session,
7884
7885 local_error: &mut self.local_error,
7886
7887 keylog: self.keylog.as_mut(),
7888
7889 trace_id: &self.trace_id,
7890
7891 local_transport_params: self.local_transport_params.clone(),
7892
7893 recovery_config: self.recovery_config,
7894
7895 tx_cap_factor: self.tx_cap_factor,
7896
7897 pmtud: None,
7898
7899 is_server: self.is_server,
7900
7901 use_initial_max_data_as_flow_control_win: false,
7902 };
7903
7904 if self.handshake_completed {
7905 return self.handshake.process_post_handshake(&mut ex_data);
7906 }
7907
7908 match self.handshake.do_handshake(&mut ex_data) {
7909 Ok(_) => (),
7910
7911 Err(Error::Done) => {
7912 // Apply in-handshake configuration from callbacks if the path's
7913 // Recovery module can still be reinitilized.
7914 if self
7915 .paths
7916 .get_active()
7917 .map(|p| p.can_reinit_recovery())
7918 .unwrap_or(false)
7919 {
7920 if ex_data.recovery_config != self.recovery_config {
7921 if let Ok(path) = self.paths.get_active_mut() {
7922 self.recovery_config = ex_data.recovery_config;
7923 path.reinit_recovery(&self.recovery_config);
7924 }
7925 }
7926
7927 if ex_data.tx_cap_factor != self.tx_cap_factor {
7928 self.tx_cap_factor = ex_data.tx_cap_factor;
7929 }
7930
7931 if let Some((discover, max_probes)) = ex_data.pmtud {
7932 self.paths.set_discover_pmtu_on_existing_paths(
7933 discover,
7934 self.recovery_config.max_send_udp_payload_size,
7935 max_probes,
7936 );
7937 }
7938
7939 if ex_data.local_transport_params !=
7940 self.local_transport_params
7941 {
7942 self.streams.set_max_streams_bidi(
7943 ex_data
7944 .local_transport_params
7945 .initial_max_streams_bidi,
7946 );
7947
7948 self.local_transport_params =
7949 ex_data.local_transport_params;
7950 }
7951 }
7952
7953 if ex_data.use_initial_max_data_as_flow_control_win {
7954 self.enable_use_initial_max_data_as_flow_control_win();
7955 }
7956
7957 // Try to parse transport parameters as soon as the first flight
7958 // of handshake data is processed.
7959 //
7960 // This is potentially dangerous as the handshake hasn't been
7961 // completed yet, though it's required to be able to send data
7962 // in 0.5 RTT.
7963 let raw_params = self.handshake.quic_transport_params();
7964
7965 if !self.parsed_peer_transport_params && !raw_params.is_empty() {
7966 let peer_params = TransportParams::decode(
7967 raw_params,
7968 self.is_server,
7969 self.peer_transport_params_track_unknown,
7970 )?;
7971
7972 self.parse_peer_transport_params(peer_params)?;
7973 }
7974
7975 return Ok(());
7976 },
7977
7978 Err(e) => return Err(e),
7979 };
7980
7981 self.handshake_completed = self.handshake.is_completed();
7982
7983 self.alpn = self.handshake.alpn_protocol().to_vec();
7984
7985 let raw_params = self.handshake.quic_transport_params();
7986
7987 if !self.parsed_peer_transport_params && !raw_params.is_empty() {
7988 let peer_params = TransportParams::decode(
7989 raw_params,
7990 self.is_server,
7991 self.peer_transport_params_track_unknown,
7992 )?;
7993
7994 self.parse_peer_transport_params(peer_params)?;
7995 }
7996
7997 if self.handshake_completed {
7998 // The handshake is considered confirmed at the server when the
7999 // handshake completes, at which point we can also drop the
8000 // handshake epoch.
8001 if self.is_server {
8002 self.handshake_confirmed = true;
8003
8004 self.drop_epoch_state(packet::Epoch::Handshake, now);
8005 }
8006
8007 // Once the handshake is completed there's no point in processing
8008 // 0-RTT packets anymore, so clear the buffer now.
8009 self.undecryptable_pkts.clear();
8010
8011 trace!("{} connection established: proto={:?} cipher={:?} curve={:?} sigalg={:?} resumed={} {:?}",
8012 &self.trace_id,
8013 std::str::from_utf8(self.application_proto()),
8014 self.handshake.cipher(),
8015 self.handshake.curve(),
8016 self.handshake.sigalg(),
8017 self.handshake.is_resumed(),
8018 self.peer_transport_params);
8019 }
8020
8021 Ok(())
8022 }
8023
8024 /// Use the value of the intial max_data / initial stream max_data setting
8025 /// as the initial flow control window for the connection and streams.
8026 /// The connection-level flow control window will only be changed if it
8027 /// hasn't been auto tuned yet. For streams: only newly created streams
8028 /// receive the new setting.
8029 fn enable_use_initial_max_data_as_flow_control_win(&mut self) {
8030 self.flow_control.set_window_if_not_tuned_yet(
8031 self.local_transport_params.initial_max_data,
8032 );
8033 self.streams
8034 .set_use_initial_max_data_as_flow_control_win(true);
8035 }
8036
8037 /// Selects the packet type for the next outgoing packet.
8038 fn write_pkt_type(&self, send_pid: usize) -> Result<Type> {
8039 // On error send packet in the latest epoch available, but only send
8040 // 1-RTT ones when the handshake is completed.
8041 if self
8042 .local_error
8043 .as_ref()
8044 .is_some_and(|conn_err| !conn_err.is_app)
8045 {
8046 let epoch = match self.handshake.write_level() {
8047 crypto::Level::Initial => packet::Epoch::Initial,
8048 crypto::Level::ZeroRTT => unreachable!(),
8049 crypto::Level::Handshake => packet::Epoch::Handshake,
8050 crypto::Level::OneRTT => packet::Epoch::Application,
8051 };
8052
8053 if !self.handshake_confirmed {
8054 match epoch {
8055 // Downgrade the epoch to Handshake as the handshake is not
8056 // completed yet.
8057 packet::Epoch::Application => return Ok(Type::Handshake),
8058
8059 // Downgrade the epoch to Initial as the remote peer might
8060 // not be able to decrypt handshake packets yet.
8061 packet::Epoch::Handshake
8062 if self.crypto_ctx[packet::Epoch::Initial].has_keys() =>
8063 return Ok(Type::Initial),
8064
8065 _ => (),
8066 };
8067 }
8068
8069 return Ok(Type::from_epoch(epoch));
8070 }
8071
8072 for &epoch in packet::Epoch::epochs(
8073 packet::Epoch::Initial..=packet::Epoch::Application,
8074 ) {
8075 let crypto_ctx = &self.crypto_ctx[epoch];
8076 let pkt_space = &self.pkt_num_spaces[epoch];
8077
8078 // Only send packets in a space when we have the send keys for it.
8079 if crypto_ctx.crypto_seal.is_none() {
8080 continue;
8081 }
8082
8083 // We are ready to send data for this packet number space.
8084 if crypto_ctx.data_available() || pkt_space.ready() {
8085 return Ok(Type::from_epoch(epoch));
8086 }
8087
8088 // There are lost frames in this packet number space.
8089 for (_, p) in self.paths.iter() {
8090 if p.recovery.has_lost_frames(epoch) {
8091 return Ok(Type::from_epoch(epoch));
8092 }
8093
8094 // We need to send PTO probe packets.
8095 if p.recovery.loss_probes(epoch) > 0 {
8096 return Ok(Type::from_epoch(epoch));
8097 }
8098 }
8099 }
8100
8101 // If there are flushable, almost full or blocked streams, use the
8102 // Application epoch.
8103 let send_path = self.paths.get(send_pid)?;
8104 if (self.is_established() || self.is_in_early_data()) &&
8105 (self.should_send_handshake_done() ||
8106 self.flow_control.should_update_max_data() ||
8107 self.should_send_max_data ||
8108 self.blocked_limit.is_some() ||
8109 self.streams_blocked_bidi_state
8110 .has_pending_stream_blocked_frame() ||
8111 self.streams_blocked_uni_state
8112 .has_pending_stream_blocked_frame() ||
8113 self.dgram_send_queue.has_pending() ||
8114 self.local_error
8115 .as_ref()
8116 .is_some_and(|conn_err| conn_err.is_app) ||
8117 self.should_send_max_streams_bidi ||
8118 self.streams.should_update_max_streams_bidi() ||
8119 self.should_send_max_streams_uni ||
8120 self.streams.should_update_max_streams_uni() ||
8121 self.streams.has_flushable() ||
8122 self.streams.has_almost_full() ||
8123 self.streams.has_blocked() ||
8124 self.streams.has_reset() ||
8125 self.streams.has_stopped() ||
8126 self.ids.has_new_scids() ||
8127 self.ids.has_retire_dcids() ||
8128 send_path
8129 .pmtud
8130 .as_ref()
8131 .is_some_and(|pmtud| pmtud.should_probe()) ||
8132 send_path.needs_ack_eliciting ||
8133 send_path.probing_required())
8134 {
8135 // Only clients can send 0-RTT packets.
8136 if !self.is_server && self.is_in_early_data() {
8137 return Ok(Type::ZeroRTT);
8138 }
8139
8140 return Ok(Type::Short);
8141 }
8142
8143 Err(Error::Done)
8144 }
8145
8146 /// Returns the mutable stream with the given ID if it exists, or creates
8147 /// a new one otherwise.
8148 fn get_or_create_stream(
8149 &mut self, id: u64, local: bool,
8150 ) -> Result<&mut stream::Stream<F>> {
8151 self.streams.get_or_create(
8152 id,
8153 &self.local_transport_params,
8154 &self.peer_transport_params,
8155 local,
8156 self.is_server,
8157 )
8158 }
8159
8160 /// Processes an incoming frame.
8161 fn process_frame(
8162 &mut self, frame: frame::Frame, hdr: &Header, recv_path_id: usize,
8163 epoch: packet::Epoch, now: Instant,
8164 ) -> Result<()> {
8165 trace!("{} rx frm {:?}", self.trace_id, frame);
8166
8167 match frame {
8168 frame::Frame::Padding { .. } => (),
8169
8170 frame::Frame::Ping { .. } => (),
8171
8172 frame::Frame::ACK {
8173 ranges, ack_delay, ..
8174 } => {
8175 let ack_delay = ack_delay
8176 .checked_mul(2_u64.pow(
8177 self.peer_transport_params.ack_delay_exponent as u32,
8178 ))
8179 .ok_or(Error::InvalidFrame)?;
8180
8181 if epoch == packet::Epoch::Handshake ||
8182 (epoch == packet::Epoch::Application &&
8183 self.is_established())
8184 {
8185 self.peer_verified_initial_address = true;
8186 }
8187
8188 let handshake_status = self.handshake_status();
8189
8190 let is_app_limited = self.delivery_rate_check_if_app_limited();
8191
8192 let largest_acked = ranges.last().expect(
8193 "ACK frames should always have at least one ack range",
8194 );
8195
8196 for (_, p) in self.paths.iter_mut() {
8197 if self.pkt_num_spaces[epoch]
8198 .largest_tx_pkt_num
8199 .is_some_and(|largest_sent| largest_sent < largest_acked)
8200 {
8201 // https://www.rfc-editor.org/rfc/rfc9000#section-13.1
8202 // An endpoint SHOULD treat receipt of an acknowledgment
8203 // for a packet it did not send as
8204 // a connection error of type PROTOCOL_VIOLATION
8205 return Err(Error::InvalidAckRange);
8206 }
8207
8208 if is_app_limited {
8209 p.recovery.delivery_rate_update_app_limited(true);
8210 }
8211
8212 let OnAckReceivedOutcome {
8213 lost_packets,
8214 lost_bytes,
8215 acked_bytes,
8216 spurious_losses,
8217 } = p.recovery.on_ack_received(
8218 &ranges,
8219 ack_delay,
8220 epoch,
8221 handshake_status,
8222 now,
8223 self.pkt_num_manager.skip_pn(),
8224 &self.trace_id,
8225 )?;
8226
8227 let skip_pn = self.pkt_num_manager.skip_pn();
8228 let largest_acked =
8229 p.recovery.get_largest_acked_on_epoch(epoch);
8230
8231 // Consider the skip_pn validated if the peer has sent an ack
8232 // for a larger pkt number.
8233 if let Some((largest_acked, skip_pn)) =
8234 largest_acked.zip(skip_pn)
8235 {
8236 if largest_acked > skip_pn {
8237 self.pkt_num_manager.set_skip_pn(None);
8238 }
8239 }
8240
8241 self.lost_count += lost_packets;
8242 self.lost_bytes += lost_bytes as u64;
8243 self.acked_bytes += acked_bytes as u64;
8244 self.spurious_lost_count += spurious_losses;
8245 }
8246 },
8247
8248 frame::Frame::ResetStream {
8249 stream_id,
8250 error_code,
8251 final_size,
8252 } => {
8253 // Peer can't send on our unidirectional streams.
8254 if !stream::is_bidi(stream_id) &&
8255 stream::is_local(stream_id, self.is_server)
8256 {
8257 return Err(Error::InvalidStreamState(stream_id));
8258 }
8259
8260 let max_rx_data_left = self.max_rx_data() - self.rx_data;
8261
8262 // Get existing stream or create a new one, but if the stream
8263 // has already been closed and collected, ignore the frame.
8264 //
8265 // This can happen if e.g. an ACK frame is lost, and the peer
8266 // retransmits another frame before it realizes that the stream
8267 // is gone.
8268 //
8269 // Note that it makes it impossible to check if the frame is
8270 // illegal, since we have no state, but since we ignore the
8271 // frame, it should be fine.
8272 let stream = match self.get_or_create_stream(stream_id, false) {
8273 Ok(v) => v,
8274
8275 Err(Error::Done) => return Ok(()),
8276
8277 Err(e) => return Err(e),
8278 };
8279
8280 let was_readable = stream.is_readable();
8281 let priority_key = Arc::clone(&stream.priority_key);
8282
8283 let stream::RecvBufResetReturn {
8284 max_data_delta,
8285 consumed_flowcontrol,
8286 } = stream.recv.reset(error_code, final_size)?;
8287
8288 if max_data_delta > max_rx_data_left {
8289 return Err(Error::FlowControl);
8290 }
8291
8292 if !was_readable && stream.is_readable() {
8293 self.streams.insert_readable(&priority_key);
8294 }
8295
8296 self.rx_data += max_data_delta;
8297 // We dropped the receive buffer, return connection level
8298 // flow-control
8299 self.flow_control.add_consumed(consumed_flowcontrol);
8300
8301 self.reset_stream_remote_count =
8302 self.reset_stream_remote_count.saturating_add(1);
8303 },
8304
8305 frame::Frame::StopSending {
8306 stream_id,
8307 error_code,
8308 } => {
8309 // STOP_SENDING on a receive-only stream is a fatal error.
8310 if !stream::is_local(stream_id, self.is_server) &&
8311 !stream::is_bidi(stream_id)
8312 {
8313 return Err(Error::InvalidStreamState(stream_id));
8314 }
8315
8316 // Get existing stream or create a new one, but if the stream
8317 // has already been closed and collected, ignore the frame.
8318 //
8319 // This can happen if e.g. an ACK frame is lost, and the peer
8320 // retransmits another frame before it realizes that the stream
8321 // is gone.
8322 //
8323 // Note that it makes it impossible to check if the frame is
8324 // illegal, since we have no state, but since we ignore the
8325 // frame, it should be fine.
8326 let stream = match self.get_or_create_stream(stream_id, false) {
8327 Ok(v) => v,
8328
8329 Err(Error::Done) => return Ok(()),
8330
8331 Err(e) => return Err(e),
8332 };
8333
8334 let was_writable = stream.is_writable();
8335
8336 let priority_key = Arc::clone(&stream.priority_key);
8337
8338 // Try stopping the stream.
8339 if let Ok((final_size, unsent)) = stream.send.stop(error_code) {
8340 // Claw back some flow control allowance from data that was
8341 // buffered but not actually sent before the stream was
8342 // reset.
8343 //
8344 // Note that `tx_cap` will be updated later on, so no need
8345 // to touch it here.
8346 self.tx_data = self.tx_data.saturating_sub(unsent);
8347
8348 self.tx_buffered =
8349 self.tx_buffered.saturating_sub(unsent as usize);
8350
8351 // These drops in qlog are a bit weird, but the only way to
8352 // ensure that all bytes that are moved from App to Transport
8353 // in stream_do_send are eventually moved from Transport to
8354 // Dropped. Ideally we would add a Transport to Network
8355 // transition also as a way to indicate when bytes were
8356 // transmitted vs dropped without ever being sent.
8357 qlog_with_type!(QLOG_DATA_MV, self.qlog, q, {
8358 let ev_data = EventData::QuicStreamDataMoved(
8359 qlog::events::quic::StreamDataMoved {
8360 stream_id: Some(stream_id),
8361 offset: Some(final_size),
8362 raw: Some(RawInfo {
8363 length: Some(unsent),
8364 ..Default::default()
8365 }),
8366 from: Some(DataRecipient::Transport),
8367 to: Some(DataRecipient::Dropped),
8368 ..Default::default()
8369 },
8370 );
8371
8372 q.add_event_data_with_instant(ev_data, now).ok();
8373 });
8374
8375 self.streams.insert_reset(stream_id, error_code, final_size);
8376
8377 if !was_writable {
8378 self.streams.insert_writable(&priority_key);
8379 }
8380
8381 self.stopped_stream_remote_count =
8382 self.stopped_stream_remote_count.saturating_add(1);
8383 self.reset_stream_local_count =
8384 self.reset_stream_local_count.saturating_add(1);
8385 }
8386 },
8387
8388 frame::Frame::Crypto { data } => {
8389 if data.max_off() >= MAX_CRYPTO_STREAM_OFFSET {
8390 return Err(Error::CryptoBufferExceeded);
8391 }
8392
8393 // Push the data to the stream so it can be re-ordered.
8394 self.crypto_ctx[epoch].crypto_stream.recv.write(data)?;
8395
8396 // Feed crypto data to the TLS state, if there's data
8397 // available at the expected offset.
8398 let mut crypto_buf = [0; 512];
8399
8400 let level = crypto::Level::from_epoch(epoch);
8401
8402 let stream = &mut self.crypto_ctx[epoch].crypto_stream;
8403
8404 while let Ok((read, _)) = stream.recv.emit(&mut crypto_buf) {
8405 let recv_buf = &crypto_buf[..read];
8406 self.handshake.provide_data(level, recv_buf)?;
8407 }
8408
8409 self.do_handshake(now)?;
8410 },
8411
8412 frame::Frame::CryptoHeader { .. } => unreachable!(),
8413
8414 // TODO: implement stateless retry
8415 frame::Frame::NewToken { .. } =>
8416 if self.is_server {
8417 return Err(Error::InvalidPacket);
8418 },
8419
8420 frame::Frame::Stream { stream_id, data } => {
8421 // Peer can't send on our unidirectional streams.
8422 if !stream::is_bidi(stream_id) &&
8423 stream::is_local(stream_id, self.is_server)
8424 {
8425 return Err(Error::InvalidStreamState(stream_id));
8426 }
8427
8428 let max_rx_data_left = self.max_rx_data() - self.rx_data;
8429
8430 // Get existing stream or create a new one, but if the stream
8431 // has already been closed and collected, ignore the frame.
8432 //
8433 // This can happen if e.g. an ACK frame is lost, and the peer
8434 // retransmits another frame before it realizes that the stream
8435 // is gone.
8436 //
8437 // Note that it makes it impossible to check if the frame is
8438 // illegal, since we have no state, but since we ignore the
8439 // frame, it should be fine.
8440 let stream = match self.get_or_create_stream(stream_id, false) {
8441 Ok(v) => v,
8442
8443 Err(Error::Done) => return Ok(()),
8444
8445 Err(e) => return Err(e),
8446 };
8447
8448 // Check for the connection-level flow control limit.
8449 let max_off_delta =
8450 data.max_off().saturating_sub(stream.recv.max_off());
8451
8452 if max_off_delta > max_rx_data_left {
8453 return Err(Error::FlowControl);
8454 }
8455
8456 let was_readable = stream.is_readable();
8457 let priority_key = Arc::clone(&stream.priority_key);
8458
8459 let was_draining = stream.recv.is_draining();
8460
8461 stream.recv.write(data)?;
8462
8463 if !was_readable && stream.is_readable() {
8464 self.streams.insert_readable(&priority_key);
8465 }
8466
8467 self.rx_data += max_off_delta;
8468
8469 if was_draining {
8470 // When a stream is in draining state it will not queue
8471 // incoming data for the application to read, so consider
8472 // the received data as consumed, which might trigger a flow
8473 // control update.
8474 self.flow_control.add_consumed(max_off_delta);
8475 }
8476 },
8477
8478 frame::Frame::StreamHeader { .. } => unreachable!(),
8479
8480 frame::Frame::MaxData { max } => {
8481 self.max_tx_data = cmp::max(self.max_tx_data, max);
8482 },
8483
8484 frame::Frame::MaxStreamData { stream_id, max } => {
8485 // Peer can't receive on its own unidirectional streams.
8486 if !stream::is_bidi(stream_id) &&
8487 !stream::is_local(stream_id, self.is_server)
8488 {
8489 return Err(Error::InvalidStreamState(stream_id));
8490 }
8491
8492 // Get existing stream or create a new one, but if the stream
8493 // has already been closed and collected, ignore the frame.
8494 //
8495 // This can happen if e.g. an ACK frame is lost, and the peer
8496 // retransmits another frame before it realizes that the stream
8497 // is gone.
8498 //
8499 // Note that it makes it impossible to check if the frame is
8500 // illegal, since we have no state, but since we ignore the
8501 // frame, it should be fine.
8502 let stream = match self.get_or_create_stream(stream_id, false) {
8503 Ok(v) => v,
8504
8505 Err(Error::Done) => return Ok(()),
8506
8507 Err(e) => return Err(e),
8508 };
8509
8510 let was_flushable = stream.is_flushable();
8511
8512 stream.send.update_max_data(max);
8513
8514 let writable = stream.is_writable();
8515
8516 let priority_key = Arc::clone(&stream.priority_key);
8517
8518 // If the stream is now flushable push it to the flushable queue,
8519 // but only if it wasn't already queued.
8520 if stream.is_flushable() && !was_flushable {
8521 let priority_key = Arc::clone(&stream.priority_key);
8522 self.streams.insert_flushable(&priority_key);
8523 }
8524
8525 if writable {
8526 self.streams.insert_writable(&priority_key);
8527 }
8528 },
8529
8530 frame::Frame::MaxStreamsBidi { max } => {
8531 if max > MAX_STREAM_ID {
8532 return Err(Error::InvalidFrame);
8533 }
8534
8535 self.streams.update_peer_max_streams_bidi(max);
8536 },
8537
8538 frame::Frame::MaxStreamsUni { max } => {
8539 if max > MAX_STREAM_ID {
8540 return Err(Error::InvalidFrame);
8541 }
8542
8543 self.streams.update_peer_max_streams_uni(max);
8544 },
8545
8546 frame::Frame::DataBlocked { .. } => {
8547 self.data_blocked_recv_count =
8548 self.data_blocked_recv_count.saturating_add(1);
8549 },
8550
8551 frame::Frame::StreamDataBlocked { .. } => {
8552 self.stream_data_blocked_recv_count =
8553 self.stream_data_blocked_recv_count.saturating_add(1);
8554 },
8555
8556 frame::Frame::StreamsBlockedBidi { limit } => {
8557 if limit > MAX_STREAM_ID {
8558 return Err(Error::InvalidFrame);
8559 }
8560
8561 self.streams_blocked_bidi_recv_count =
8562 self.streams_blocked_bidi_recv_count.saturating_add(1);
8563 },
8564
8565 frame::Frame::StreamsBlockedUni { limit } => {
8566 if limit > MAX_STREAM_ID {
8567 return Err(Error::InvalidFrame);
8568 }
8569
8570 self.streams_blocked_uni_recv_count =
8571 self.streams_blocked_uni_recv_count.saturating_add(1);
8572 },
8573
8574 frame::Frame::NewConnectionId {
8575 seq_num,
8576 retire_prior_to,
8577 conn_id,
8578 reset_token,
8579 } => {
8580 if self.ids.zero_length_dcid() {
8581 return Err(Error::InvalidState);
8582 }
8583
8584 let mut retired_path_ids = SmallVec::new();
8585
8586 // Retire pending path IDs before propagating the error code to
8587 // make sure retired connection IDs are not in use anymore.
8588 let new_dcid_res = self.ids.new_dcid(
8589 conn_id.into(),
8590 seq_num,
8591 u128::from_be_bytes(reset_token),
8592 retire_prior_to,
8593 &mut retired_path_ids,
8594 );
8595
8596 for (dcid_seq, pid) in retired_path_ids {
8597 let path = self.paths.get_mut(pid)?;
8598
8599 // Maybe the path already switched to another DCID.
8600 if path.active_dcid_seq != Some(dcid_seq) {
8601 continue;
8602 }
8603
8604 if let Some(new_dcid_seq) =
8605 self.ids.lowest_available_dcid_seq()
8606 {
8607 path.active_dcid_seq = Some(new_dcid_seq);
8608
8609 self.ids.link_dcid_to_path_id(new_dcid_seq, pid)?;
8610
8611 trace!(
8612 "{} path ID {} changed DCID: old seq num {} new seq num {}",
8613 self.trace_id, pid, dcid_seq, new_dcid_seq,
8614 );
8615 } else {
8616 // We cannot use this path anymore for now.
8617 path.active_dcid_seq = None;
8618
8619 trace!(
8620 "{} path ID {} cannot be used; DCID seq num {} has been retired",
8621 self.trace_id, pid, dcid_seq,
8622 );
8623 }
8624 }
8625
8626 // Propagate error (if any) now...
8627 new_dcid_res?;
8628 },
8629
8630 frame::Frame::RetireConnectionId { seq_num } => {
8631 if self.ids.zero_length_scid() {
8632 return Err(Error::InvalidState);
8633 }
8634
8635 if let Some(pid) = self.ids.retire_scid(seq_num, &hdr.dcid)? {
8636 let path = self.paths.get_mut(pid)?;
8637
8638 // Maybe we already linked a new SCID to that path.
8639 if path.active_scid_seq == Some(seq_num) {
8640 // XXX: We do not remove unused paths now, we instead
8641 // wait until we need to maintain more paths than the
8642 // host is willing to.
8643 path.active_scid_seq = None;
8644 }
8645 }
8646 },
8647
8648 frame::Frame::PathChallenge { data } => {
8649 self.path_challenge_rx_count += 1;
8650
8651 self.paths
8652 .get_mut(recv_path_id)?
8653 .on_challenge_received(data);
8654 },
8655
8656 frame::Frame::PathResponse { data } => {
8657 self.paths.on_response_received(data)?;
8658 },
8659
8660 frame::Frame::ConnectionClose {
8661 error_code, reason, ..
8662 } => {
8663 self.peer_error = Some(ConnectionError {
8664 is_app: false,
8665 error_code,
8666 reason,
8667 });
8668
8669 let path = self.paths.get_active()?;
8670 self.draining_timer = Some(now + (path.recovery.pto() * 3));
8671 },
8672
8673 frame::Frame::ApplicationClose { error_code, reason } => {
8674 self.peer_error = Some(ConnectionError {
8675 is_app: true,
8676 error_code,
8677 reason,
8678 });
8679
8680 let path = self.paths.get_active()?;
8681 self.draining_timer = Some(now + (path.recovery.pto() * 3));
8682 },
8683
8684 frame::Frame::HandshakeDone => {
8685 if self.is_server {
8686 return Err(Error::InvalidPacket);
8687 }
8688
8689 self.peer_verified_initial_address = true;
8690
8691 self.handshake_confirmed = true;
8692
8693 // Once the handshake is confirmed, we can drop Handshake keys.
8694 self.drop_epoch_state(packet::Epoch::Handshake, now);
8695 },
8696
8697 frame::Frame::Datagram { data } => {
8698 // Close the connection if DATAGRAMs are not enabled.
8699 // quiche always advertises support for 64K sized DATAGRAM
8700 // frames, as recommended by the standard, so we don't need a
8701 // size check.
8702 if !self.dgram_enabled() {
8703 return Err(Error::InvalidState);
8704 }
8705
8706 // If recv queue is full, discard oldest
8707 if self.dgram_recv_queue.is_full() {
8708 self.dgram_recv_queue.pop();
8709 }
8710
8711 self.dgram_recv_queue.push(data.into())?;
8712
8713 self.dgram_recv_count = self.dgram_recv_count.saturating_add(1);
8714
8715 let path = self.paths.get_mut(recv_path_id)?;
8716 path.dgram_recv_count = path.dgram_recv_count.saturating_add(1);
8717 },
8718
8719 frame::Frame::DatagramHeader { .. } => unreachable!(),
8720 }
8721
8722 Ok(())
8723 }
8724
8725 /// Drops the keys and recovery state for the given epoch.
8726 fn drop_epoch_state(&mut self, epoch: packet::Epoch, now: Instant) {
8727 let crypto_ctx = &mut self.crypto_ctx[epoch];
8728 if crypto_ctx.crypto_open.is_none() {
8729 return;
8730 }
8731 crypto_ctx.clear();
8732 self.pkt_num_spaces[epoch].clear();
8733
8734 let handshake_status = self.handshake_status();
8735 for (_, p) in self.paths.iter_mut() {
8736 p.recovery
8737 .on_pkt_num_space_discarded(epoch, handshake_status, now);
8738 }
8739
8740 trace!("{} dropped epoch {} state", self.trace_id, epoch);
8741 }
8742
8743 /// Returns the connection level flow control limit.
8744 fn max_rx_data(&self) -> u64 {
8745 self.flow_control.max_data()
8746 }
8747
8748 /// Returns true if the HANDSHAKE_DONE frame needs to be sent.
8749 fn should_send_handshake_done(&self) -> bool {
8750 self.is_established() && !self.handshake_done_sent && self.is_server
8751 }
8752
8753 /// Returns the idle timeout value.
8754 ///
8755 /// `None` is returned if both end-points disabled the idle timeout.
8756 fn idle_timeout(&self) -> Option<Duration> {
8757 // If the transport parameter is set to 0, then the respective endpoint
8758 // decided to disable the idle timeout. If both are disabled we should
8759 // not set any timeout.
8760 if self.local_transport_params.max_idle_timeout == 0 &&
8761 self.peer_transport_params.max_idle_timeout == 0
8762 {
8763 return None;
8764 }
8765
8766 // If the local endpoint or the peer disabled the idle timeout, use the
8767 // other peer's value, otherwise use the minimum of the two values.
8768 let idle_timeout = if self.local_transport_params.max_idle_timeout == 0 {
8769 self.peer_transport_params.max_idle_timeout
8770 } else if self.peer_transport_params.max_idle_timeout == 0 {
8771 self.local_transport_params.max_idle_timeout
8772 } else {
8773 cmp::min(
8774 self.local_transport_params.max_idle_timeout,
8775 self.peer_transport_params.max_idle_timeout,
8776 )
8777 };
8778
8779 let path_pto = match self.paths.get_active() {
8780 Ok(p) => p.recovery.pto(),
8781 Err(_) => Duration::ZERO,
8782 };
8783
8784 let idle_timeout = Duration::from_millis(idle_timeout);
8785 let idle_timeout = cmp::max(idle_timeout, 3 * path_pto);
8786
8787 Some(idle_timeout)
8788 }
8789
8790 /// Returns the connection's handshake status for use in loss recovery.
8791 fn handshake_status(&self) -> recovery::HandshakeStatus {
8792 recovery::HandshakeStatus {
8793 has_handshake_keys: self.crypto_ctx[packet::Epoch::Handshake]
8794 .has_keys(),
8795
8796 peer_verified_address: self.peer_verified_initial_address,
8797
8798 completed: self.is_established(),
8799 }
8800 }
8801
8802 /// Updates send capacity.
8803 fn update_tx_cap(&mut self) {
8804 let cwin_available = match self.paths.get_active() {
8805 Ok(p) => p.recovery.cwnd_available() as u64,
8806 Err(_) => 0,
8807 };
8808
8809 let cap =
8810 cmp::min(cwin_available, self.max_tx_data - self.tx_data) as usize;
8811 self.tx_cap = (cap as f64 * self.tx_cap_factor).ceil() as usize;
8812 }
8813
8814 fn delivery_rate_check_if_app_limited(&self) -> bool {
8815 // Enter the app-limited phase of delivery rate when these conditions
8816 // are met:
8817 //
8818 // - The remaining capacity is higher than available bytes in cwnd (there
8819 // is more room to send).
8820 // - New data since the last send() is smaller than available bytes in
8821 // cwnd (we queued less than what we can send).
8822 // - There is room to send more data in cwnd.
8823 //
8824 // In application-limited phases the transmission rate is limited by the
8825 // application rather than the congestion control algorithm.
8826 //
8827 // Note that this is equivalent to CheckIfApplicationLimited() from the
8828 // delivery rate draft. This is also separate from `recovery.app_limited`
8829 // and only applies to delivery rate calculation.
8830 let cwin_available = self
8831 .paths
8832 .iter()
8833 .filter(|&(_, p)| p.active())
8834 .map(|(_, p)| p.recovery.cwnd_available())
8835 .sum();
8836
8837 ((self.tx_buffered + self.dgram_send_queue_byte_size()) < cwin_available) &&
8838 (self.tx_data.saturating_sub(self.last_tx_data)) <
8839 cwin_available as u64 &&
8840 cwin_available > 0
8841 }
8842
8843 fn check_tx_buffered_invariant(&mut self) {
8844 // tx_buffered should track bytes queued in the stream buffers
8845 // and unacked retransmitable bytes in the network.
8846 // If tx_buffered > 0 mark the tx_buffered_state if there are no
8847 // flushable streams and there no inflight bytes.
8848 //
8849 // It is normal to have tx_buffered == 0 while there are inflight bytes
8850 // since not QUIC frames are retransmittable; inflight tracks all bytes
8851 // on the network which are subject to congestion control.
8852 if self.tx_buffered > 0 &&
8853 !self.streams.has_flushable() &&
8854 !self
8855 .paths
8856 .iter()
8857 .any(|(_, p)| p.recovery.bytes_in_flight() > 0)
8858 {
8859 self.tx_buffered_state = TxBufferTrackingState::Inconsistent;
8860 }
8861 }
8862
8863 fn set_initial_dcid(
8864 &mut self, cid: ConnectionId<'static>, reset_token: Option<u128>,
8865 path_id: usize,
8866 ) -> Result<()> {
8867 self.ids.set_initial_dcid(cid, reset_token, Some(path_id));
8868 self.paths.get_mut(path_id)?.active_dcid_seq = Some(0);
8869
8870 Ok(())
8871 }
8872
8873 /// Selects the path that the incoming packet belongs to, or creates a new
8874 /// one if no existing path matches.
8875 fn get_or_create_recv_path_id(
8876 &mut self, recv_pid: Option<usize>, dcid: &ConnectionId, buf_len: usize,
8877 info: &RecvInfo,
8878 ) -> Result<usize> {
8879 let ids = &mut self.ids;
8880
8881 let (in_scid_seq, mut in_scid_pid) =
8882 ids.find_scid_seq(dcid).ok_or(Error::InvalidState)?;
8883
8884 if let Some(recv_pid) = recv_pid {
8885 // If the path observes a change of SCID used, note it.
8886 let recv_path = self.paths.get_mut(recv_pid)?;
8887
8888 let cid_entry =
8889 recv_path.active_scid_seq.and_then(|v| ids.get_scid(v).ok());
8890
8891 if cid_entry.map(|e| &e.cid) != Some(dcid) {
8892 let incoming_cid_entry = ids.get_scid(in_scid_seq)?;
8893
8894 let prev_recv_pid =
8895 incoming_cid_entry.path_id.unwrap_or(recv_pid);
8896
8897 if prev_recv_pid != recv_pid {
8898 trace!(
8899 "{} peer reused CID {:?} from path {} on path {}",
8900 self.trace_id,
8901 dcid,
8902 prev_recv_pid,
8903 recv_pid
8904 );
8905
8906 // TODO: reset congestion control.
8907 }
8908
8909 trace!(
8910 "{} path ID {} now see SCID with seq num {}",
8911 self.trace_id,
8912 recv_pid,
8913 in_scid_seq
8914 );
8915
8916 recv_path.active_scid_seq = Some(in_scid_seq);
8917 ids.link_scid_to_path_id(in_scid_seq, recv_pid)?;
8918 }
8919
8920 return Ok(recv_pid);
8921 }
8922
8923 // This is a new 4-tuple. See if the CID has not been assigned on
8924 // another path.
8925
8926 // Ignore this step if are using zero-length SCID.
8927 if ids.zero_length_scid() {
8928 in_scid_pid = None;
8929 }
8930
8931 if let Some(in_scid_pid) = in_scid_pid {
8932 // This CID has been used by another path. If we have the
8933 // room to do so, create a new `Path` structure holding this
8934 // new 4-tuple. Otherwise, drop the packet.
8935 let old_path = self.paths.get_mut(in_scid_pid)?;
8936 let old_local_addr = old_path.local_addr();
8937 let old_peer_addr = old_path.peer_addr();
8938
8939 trace!(
8940 "{} reused CID seq {} of ({},{}) (path {}) on ({},{})",
8941 self.trace_id,
8942 in_scid_seq,
8943 old_local_addr,
8944 old_peer_addr,
8945 in_scid_pid,
8946 info.to,
8947 info.from
8948 );
8949
8950 // Notify the application.
8951 self.paths.notify_event(PathEvent::ReusedSourceConnectionId(
8952 in_scid_seq,
8953 (old_local_addr, old_peer_addr),
8954 (info.to, info.from),
8955 ));
8956 }
8957
8958 // This is a new path using an unassigned CID; create it!
8959 let mut path = path::Path::new(
8960 info.to,
8961 info.from,
8962 &self.recovery_config,
8963 self.path_challenge_recv_max_queue_len,
8964 false,
8965 None,
8966 );
8967
8968 path.max_send_bytes = buf_len * self.max_amplification_factor;
8969 path.active_scid_seq = Some(in_scid_seq);
8970
8971 // Automatically probes the new path.
8972 path.request_validation();
8973
8974 let pid = self.paths.insert_path(path, self.is_server)?;
8975
8976 // Do not record path reuse.
8977 if in_scid_pid.is_none() {
8978 ids.link_scid_to_path_id(in_scid_seq, pid)?;
8979 }
8980
8981 Ok(pid)
8982 }
8983
8984 /// Selects the path on which the next packet must be sent.
8985 fn get_send_path_id(
8986 &self, from: Option<SocketAddr>, to: Option<SocketAddr>,
8987 ) -> Result<usize> {
8988 // A probing packet must be sent, but only if the connection is fully
8989 // established.
8990 if self.is_established() {
8991 let mut probing = self
8992 .paths
8993 .iter()
8994 .filter(|(_, p)| from.is_none() || Some(p.local_addr()) == from)
8995 .filter(|(_, p)| to.is_none() || Some(p.peer_addr()) == to)
8996 .filter(|(_, p)| p.active_dcid_seq.is_some())
8997 .filter(|(_, p)| p.probing_required())
8998 .map(|(pid, _)| pid);
8999
9000 if let Some(pid) = probing.next() {
9001 return Ok(pid);
9002 }
9003 }
9004
9005 if let Some((pid, p)) = self.paths.get_active_with_pid() {
9006 if from.is_some() && Some(p.local_addr()) != from {
9007 return Err(Error::Done);
9008 }
9009
9010 if to.is_some() && Some(p.peer_addr()) != to {
9011 return Err(Error::Done);
9012 }
9013
9014 return Ok(pid);
9015 };
9016
9017 Err(Error::InvalidState)
9018 }
9019
9020 /// Sets the path with identifier 'path_id' to be active.
9021 fn set_active_path(&mut self, path_id: usize, now: Instant) -> Result<()> {
9022 if let Ok(old_active_path) = self.paths.get_active_mut() {
9023 for &e in packet::Epoch::epochs(
9024 packet::Epoch::Initial..=packet::Epoch::Application,
9025 ) {
9026 let (lost_packets, lost_bytes) = old_active_path
9027 .recovery
9028 .on_path_change(e, now, &self.trace_id);
9029
9030 self.lost_count += lost_packets;
9031 self.lost_bytes += lost_bytes as u64;
9032 }
9033 }
9034
9035 self.paths.set_active_path(path_id)
9036 }
9037
9038 /// Handles potential connection migration.
9039 fn on_peer_migrated(
9040 &mut self, new_pid: usize, disable_dcid_reuse: bool, now: Instant,
9041 ) -> Result<()> {
9042 let active_path_id = self.paths.get_active_path_id()?;
9043
9044 if active_path_id == new_pid {
9045 return Ok(());
9046 }
9047
9048 self.set_active_path(new_pid, now)?;
9049
9050 let no_spare_dcid =
9051 self.paths.get_mut(new_pid)?.active_dcid_seq.is_none();
9052
9053 if no_spare_dcid && !disable_dcid_reuse {
9054 self.paths.get_mut(new_pid)?.active_dcid_seq =
9055 self.paths.get_mut(active_path_id)?.active_dcid_seq;
9056 }
9057
9058 Ok(())
9059 }
9060
9061 /// Creates a new client-side path.
9062 fn create_path_on_client(
9063 &mut self, local_addr: SocketAddr, peer_addr: SocketAddr,
9064 ) -> Result<usize> {
9065 if self.is_server {
9066 return Err(Error::InvalidState);
9067 }
9068
9069 // If we use zero-length SCID and go over our local active CID limit,
9070 // the `insert_path()` call will raise an error.
9071 if !self.ids.zero_length_scid() && self.ids.available_scids() == 0 {
9072 return Err(Error::OutOfIdentifiers);
9073 }
9074
9075 // Do we have a spare DCID? If we are using zero-length DCID, just use
9076 // the default having sequence 0 (note that if we exceed our local CID
9077 // limit, the `insert_path()` call will raise an error.
9078 let dcid_seq = if self.ids.zero_length_dcid() {
9079 0
9080 } else {
9081 self.ids
9082 .lowest_available_dcid_seq()
9083 .ok_or(Error::OutOfIdentifiers)?
9084 };
9085
9086 let mut path = path::Path::new(
9087 local_addr,
9088 peer_addr,
9089 &self.recovery_config,
9090 self.path_challenge_recv_max_queue_len,
9091 false,
9092 None,
9093 );
9094 path.active_dcid_seq = Some(dcid_seq);
9095
9096 let pid = self
9097 .paths
9098 .insert_path(path, false)
9099 .map_err(|_| Error::OutOfIdentifiers)?;
9100 self.ids.link_dcid_to_path_id(dcid_seq, pid)?;
9101
9102 Ok(pid)
9103 }
9104
9105 // Marks the connection as closed and does any related tidyup.
9106 fn mark_closed(&mut self) {
9107 #[cfg(feature = "qlog")]
9108 {
9109 let cc = match (self.is_established(), self.timed_out, &self.peer_error, &self.local_error) {
9110 (false, _, _, _) => qlog::events::quic::ConnectionClosed {
9111 initiator: Some(TransportInitiator::Local),
9112 connection_error: None,
9113 application_error: None,
9114 error_code: None,
9115 internal_code: None,
9116 reason: Some("Failed to establish connection".to_string()),
9117 trigger: Some(qlog::events::quic::ConnectionClosedTrigger::HandshakeTimeout)
9118 },
9119
9120 (true, true, _, _) => qlog::events::quic::ConnectionClosed {
9121 initiator: Some(TransportInitiator::Local),
9122 connection_error: None,
9123 application_error: None,
9124 error_code: None,
9125 internal_code: None,
9126 reason: Some("Idle timeout".to_string()),
9127 trigger: Some(qlog::events::quic::ConnectionClosedTrigger::IdleTimeout)
9128 },
9129
9130 (true, false, Some(peer_error), None) => {
9131 let (connection_code, application_error, trigger) = if peer_error.is_app {
9132 (None, Some(qlog::events::ApplicationError::Unknown), None)
9133 } else {
9134 let trigger = if peer_error.error_code == WireErrorCode::NoError as u64 {
9135 Some(qlog::events::quic::ConnectionClosedTrigger::Clean)
9136 } else {
9137 Some(qlog::events::quic::ConnectionClosedTrigger::Error)
9138 };
9139
9140 (Some(qlog::events::ConnectionClosedEventError::TransportError(qlog::events::quic::TransportError::Unknown)), None, trigger)
9141 };
9142
9143 // TODO: select more appopriate connection_code and application_error than unknown.
9144 qlog::events::quic::ConnectionClosed {
9145 initiator: Some(TransportInitiator::Remote),
9146 connection_error: connection_code,
9147 application_error,
9148 error_code: Some(peer_error.error_code),
9149 internal_code: None,
9150 reason: Some(String::from_utf8_lossy(&peer_error.reason).to_string()),
9151 trigger,
9152 }
9153 },
9154
9155 (true, false, None, Some(local_error)) => {
9156 let (connection_code, application_error, trigger) = if local_error.is_app {
9157 (None, Some(qlog::events::ApplicationError::Unknown), None)
9158 } else {
9159 let trigger = if local_error.error_code == WireErrorCode::NoError as u64 {
9160 Some(qlog::events::quic::ConnectionClosedTrigger::Clean)
9161 } else {
9162 Some(qlog::events::quic::ConnectionClosedTrigger::Error)
9163 };
9164
9165 (Some(qlog::events::ConnectionClosedEventError::TransportError(qlog::events::quic::TransportError::Unknown)), None, trigger)
9166 };
9167
9168 // TODO: select more appopriate connection_code and application_error than unknown.
9169 qlog::events::quic::ConnectionClosed {
9170 initiator: Some(TransportInitiator::Local),
9171 connection_error: connection_code,
9172 application_error,
9173 error_code: Some(local_error.error_code),
9174 internal_code: None,
9175 reason: Some(String::from_utf8_lossy(&local_error.reason).to_string()),
9176 trigger,
9177 }
9178 },
9179
9180 _ => qlog::events::quic::ConnectionClosed {
9181 initiator: None,
9182 connection_error: None,
9183 application_error: None,
9184 error_code: None,
9185 internal_code: None,
9186 reason: None,
9187 trigger: None,
9188 },
9189 };
9190
9191 qlog_with_type!(QLOG_CONNECTION_CLOSED, self.qlog, q, {
9192 let ev_data = EventData::QuicConnectionClosed(cc);
9193
9194 q.add_event_data_now(ev_data).ok();
9195 });
9196 self.qlog.streamer = None;
9197 }
9198 self.closed = true;
9199 }
9200}
9201
9202#[cfg(feature = "boringssl-boring-crate")]
9203impl<F: BufFactory> AsMut<boring::ssl::SslRef> for Connection<F> {
9204 fn as_mut(&mut self) -> &mut boring::ssl::SslRef {
9205 self.handshake.ssl_mut()
9206 }
9207}
9208
9209/// Maps an `Error` to `Error::Done`, or itself.
9210///
9211/// When a received packet that hasn't yet been authenticated triggers a failure
9212/// it should, in most cases, be ignored, instead of raising a connection error,
9213/// to avoid potential man-in-the-middle and man-on-the-side attacks.
9214///
9215/// However, if no other packet was previously received, the connection should
9216/// indeed be closed as the received packet might just be network background
9217/// noise, and it shouldn't keep resources occupied indefinitely.
9218///
9219/// This function maps an error to `Error::Done` to ignore a packet failure
9220/// without aborting the connection, except when no other packet was previously
9221/// received, in which case the error itself is returned, but only on the
9222/// server-side as the client will already have armed the idle timer.
9223///
9224/// This must only be used for errors preceding packet authentication. Failures
9225/// happening after a packet has been authenticated should still cause the
9226/// connection to be aborted.
9227fn drop_pkt_on_err(
9228 e: Error, recv_count: usize, is_server: bool, trace_id: &str,
9229) -> Error {
9230 // On the server, if no other packet has been successfully processed, abort
9231 // the connection to avoid keeping the connection open when only junk is
9232 // received.
9233 if is_server && recv_count == 0 {
9234 return e;
9235 }
9236
9237 trace!("{trace_id} dropped invalid packet");
9238
9239 // Ignore other invalid packets that haven't been authenticated to prevent
9240 // man-in-the-middle and man-on-the-side attacks.
9241 Error::Done
9242}
9243
9244struct AddrTupleFmt(SocketAddr, SocketAddr);
9245
9246impl std::fmt::Display for AddrTupleFmt {
9247 fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
9248 let AddrTupleFmt(src, dst) = &self;
9249
9250 if src.ip().is_unspecified() || dst.ip().is_unspecified() {
9251 return Ok(());
9252 }
9253
9254 f.write_fmt(format_args!("src:{src} dst:{dst}"))
9255 }
9256}
9257
9258/// Statistics about the connection.
9259///
9260/// A connection's statistics can be collected using the [`stats()`] method.
9261///
9262/// [`stats()`]: struct.Connection.html#method.stats
9263#[derive(Clone, Default)]
9264pub struct Stats {
9265 /// The number of QUIC packets received.
9266 pub recv: usize,
9267
9268 /// The number of QUIC packets sent.
9269 pub sent: usize,
9270
9271 /// The number of QUIC packets that were lost.
9272 pub lost: usize,
9273
9274 /// The number of QUIC packets that were marked as lost but later acked.
9275 pub spurious_lost: usize,
9276
9277 /// The number of sent QUIC packets with retransmitted data.
9278 pub retrans: usize,
9279
9280 /// The number of sent bytes.
9281 pub sent_bytes: u64,
9282
9283 /// The number of received bytes.
9284 pub recv_bytes: u64,
9285
9286 /// The number of bytes sent acked.
9287 pub acked_bytes: u64,
9288
9289 /// The number of bytes sent lost.
9290 pub lost_bytes: u64,
9291
9292 /// The number of stream bytes retransmitted.
9293 pub stream_retrans_bytes: u64,
9294
9295 /// The number of DATAGRAM frames received.
9296 pub dgram_recv: usize,
9297
9298 /// The number of DATAGRAM frames sent.
9299 pub dgram_sent: usize,
9300
9301 /// The number of known paths for the connection.
9302 pub paths_count: usize,
9303
9304 /// The number of streams reset by local.
9305 pub reset_stream_count_local: u64,
9306
9307 /// The number of streams stopped by local.
9308 pub stopped_stream_count_local: u64,
9309
9310 /// The number of streams reset by remote.
9311 pub reset_stream_count_remote: u64,
9312
9313 /// The number of streams stopped by remote.
9314 pub stopped_stream_count_remote: u64,
9315
9316 /// The number of DATA_BLOCKED frames sent due to hitting the connection
9317 /// flow control limit.
9318 pub data_blocked_sent_count: u64,
9319
9320 /// The number of STREAM_DATA_BLOCKED frames sent due to a stream hitting
9321 /// the stream flow control limit.
9322 pub stream_data_blocked_sent_count: u64,
9323
9324 /// The number of DATA_BLOCKED frames received from the remote.
9325 pub data_blocked_recv_count: u64,
9326
9327 /// The number of STREAM_DATA_BLOCKED frames received from the remote.
9328 pub stream_data_blocked_recv_count: u64,
9329
9330 /// The number of STREAMS_BLOCKED frames for bidirectional streams received
9331 /// from the remote, indicating the peer is blocked on opening new
9332 /// bidirectional streams.
9333 pub streams_blocked_bidi_recv_count: u64,
9334
9335 /// The number of STREAMS_BLOCKED frames for unidirectional streams received
9336 /// from the remote, indicating the peer is blocked on opening new
9337 /// unidirectional streams.
9338 pub streams_blocked_uni_recv_count: u64,
9339
9340 /// The total number of PATH_CHALLENGE frames that were received.
9341 pub path_challenge_rx_count: u64,
9342
9343 /// Total duration during which this side of the connection was
9344 /// actively sending bytes or waiting for those bytes to be acked.
9345 pub bytes_in_flight_duration: Duration,
9346
9347 /// Health state of the connection's tx_buffered.
9348 pub tx_buffered_state: TxBufferTrackingState,
9349}
9350
9351impl std::fmt::Debug for Stats {
9352 #[inline]
9353 fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
9354 write!(
9355 f,
9356 "recv={} sent={} lost={} retrans={}",
9357 self.recv, self.sent, self.lost, self.retrans,
9358 )?;
9359
9360 write!(
9361 f,
9362 " sent_bytes={} recv_bytes={} lost_bytes={}",
9363 self.sent_bytes, self.recv_bytes, self.lost_bytes,
9364 )?;
9365
9366 Ok(())
9367 }
9368}
9369
9370#[doc(hidden)]
9371#[cfg(any(test, feature = "internal"))]
9372pub mod test_utils;
9373
9374#[cfg(test)]
9375mod tests;
9376
9377pub use crate::packet::ConnectionId;
9378pub use crate::packet::Header;
9379pub use crate::packet::Type;
9380
9381pub use crate::path::PathEvent;
9382pub use crate::path::PathStats;
9383pub use crate::path::SocketAddrIter;
9384
9385pub use crate::recovery::BbrBwLoReductionStrategy;
9386pub use crate::recovery::BbrParams;
9387pub use crate::recovery::CongestionControlAlgorithm;
9388pub use crate::recovery::StartupExit;
9389pub use crate::recovery::StartupExitReason;
9390
9391pub use crate::stream::StreamIter;
9392
9393pub use crate::transport_params::TransportParams;
9394pub use crate::transport_params::UnknownTransportParameter;
9395pub use crate::transport_params::UnknownTransportParameterIterator;
9396pub use crate::transport_params::UnknownTransportParameters;
9397
9398pub use crate::buffers::BufFactory;
9399pub use crate::buffers::BufSplit;
9400
9401pub use crate::error::ConnectionError;
9402pub use crate::error::Error;
9403pub use crate::error::Result;
9404pub use crate::error::WireErrorCode;
9405
9406mod buffers;
9407mod cid;
9408mod crypto;
9409mod dgram;
9410mod error;
9411#[cfg(feature = "ffi")]
9412mod ffi;
9413mod flowcontrol;
9414mod frame;
9415pub mod h3;
9416mod minmax;
9417mod packet;
9418mod path;
9419mod pmtud;
9420mod rand;
9421mod range_buf;
9422mod ranges;
9423mod recovery;
9424mod stream;
9425mod tls;
9426mod transport_params;