quiche/lib.rs
1// Copyright (C) 2018-2019, Cloudflare, Inc.
2// All rights reserved.
3//
4// Redistribution and use in source and binary forms, with or without
5// modification, are permitted provided that the following conditions are
6// met:
7//
8// * Redistributions of source code must retain the above copyright notice,
9// this list of conditions and the following disclaimer.
10//
11// * Redistributions in binary form must reproduce the above copyright
12// notice, this list of conditions and the following disclaimer in the
13// documentation and/or other materials provided with the distribution.
14//
15// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
16// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
17// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
18// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
19// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
20// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
21// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
22// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
23// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
24// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
25// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
27//! 🥧 Savoury implementation of the QUIC transport protocol and HTTP/3.
28//!
29//! [quiche] is an implementation of the QUIC transport protocol and HTTP/3 as
30//! specified by the [IETF]. It provides a low level API for processing QUIC
31//! packets and handling connection state. The application is responsible for
32//! providing I/O (e.g. sockets handling) as well as an event loop with support
33//! for timers.
34//!
35//! [quiche]: https://github.com/cloudflare/quiche/
36//! [ietf]: https://quicwg.org/
37//!
38//! ## Configuring connections
39//!
40//! The first step in establishing a QUIC connection using quiche is creating a
41//! [`Config`] object:
42//!
43//! ```
44//! let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
45//! config.set_application_protos(&[b"example-proto"]);
46//!
47//! // Additional configuration specific to application and use case...
48//! # Ok::<(), quiche::Error>(())
49//! ```
50//!
51//! The [`Config`] object controls important aspects of the QUIC connection such
52//! as QUIC version, ALPN IDs, flow control, congestion control, idle timeout
53//! and other properties or features.
54//!
55//! QUIC is a general-purpose transport protocol and there are several
56//! configuration properties where there is no reasonable default value. For
57//! example, the permitted number of concurrent streams of any particular type
58//! is dependent on the application running over QUIC, and other use-case
59//! specific concerns.
60//!
61//! quiche defaults several properties to zero, applications most likely need
62//! to set these to something else to satisfy their needs using the following:
63//!
64//! - [`set_initial_max_streams_bidi()`]
65//! - [`set_initial_max_streams_uni()`]
66//! - [`set_initial_max_data()`]
67//! - [`set_initial_max_stream_data_bidi_local()`]
68//! - [`set_initial_max_stream_data_bidi_remote()`]
69//! - [`set_initial_max_stream_data_uni()`]
70//!
71//! [`Config`] also holds TLS configuration. This can be changed by mutators on
72//! the an existing object, or by constructing a TLS context manually and
73//! creating a configuration using [`with_boring_ssl_ctx_builder()`].
74//!
75//! A configuration object can be shared among multiple connections.
76//!
77//! ### Connection setup
78//!
79//! On the client-side the [`connect()`] utility function can be used to create
80//! a new connection, while [`accept()`] is for servers:
81//!
82//! ```
83//! # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
84//! # let server_name = "quic.tech";
85//! # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
86//! # let peer = "127.0.0.1:1234".parse().unwrap();
87//! # let local = "127.0.0.1:4321".parse().unwrap();
88//! // Client connection.
89//! let conn =
90//! quiche::connect(Some(&server_name), &scid, local, peer, &mut config)?;
91//!
92//! // Server connection.
93//! # let peer = "127.0.0.1:1234".parse().unwrap();
94//! # let local = "127.0.0.1:4321".parse().unwrap();
95//! let conn = quiche::accept(&scid, None, local, peer, &mut config)?;
96//! # Ok::<(), quiche::Error>(())
97//! ```
98//!
99//! In both cases, the application is responsible for generating a new source
100//! connection ID that will be used to identify the new connection.
101//!
102//! The application also need to pass the address of the remote peer of the
103//! connection: in the case of a client that would be the address of the server
104//! it is trying to connect to, and for a server that is the address of the
105//! client that initiated the connection.
106//!
107//! ## Handling incoming packets
108//!
109//! Using the connection's [`recv()`] method the application can process
110//! incoming packets that belong to that connection from the network:
111//!
112//! ```no_run
113//! # let mut buf = [0; 512];
114//! # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
115//! # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
116//! # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
117//! # let peer = "127.0.0.1:1234".parse().unwrap();
118//! # let local = "127.0.0.1:4321".parse().unwrap();
119//! # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
120//! let to = socket.local_addr().unwrap();
121//!
122//! loop {
123//! let (read, from) = socket.recv_from(&mut buf).unwrap();
124//!
125//! let recv_info = quiche::RecvInfo { from, to };
126//!
127//! let read = match conn.recv(&mut buf[..read], recv_info) {
128//! Ok(v) => v,
129//!
130//! Err(quiche::Error::Done) => {
131//! // Done reading.
132//! break;
133//! },
134//!
135//! Err(e) => {
136//! // An error occurred, handle it.
137//! break;
138//! },
139//! };
140//! }
141//! # Ok::<(), quiche::Error>(())
142//! ```
143//!
144//! The application has to pass a [`RecvInfo`] structure in order to provide
145//! additional information about the received packet (such as the address it
146//! was received from).
147//!
148//! ## Generating outgoing packets
149//!
150//! Outgoing packet are generated using the connection's [`send()`] method
151//! instead:
152//!
153//! ```no_run
154//! # let mut out = [0; 512];
155//! # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
156//! # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
157//! # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
158//! # let peer = "127.0.0.1:1234".parse().unwrap();
159//! # let local = "127.0.0.1:4321".parse().unwrap();
160//! # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
161//! loop {
162//! let (write, send_info) = match conn.send(&mut out) {
163//! Ok(v) => v,
164//!
165//! Err(quiche::Error::Done) => {
166//! // Done writing.
167//! break;
168//! },
169//!
170//! Err(e) => {
171//! // An error occurred, handle it.
172//! break;
173//! },
174//! };
175//!
176//! socket.send_to(&out[..write], &send_info.to).unwrap();
177//! }
178//! # Ok::<(), quiche::Error>(())
179//! ```
180//!
181//! The application will be provided with a [`SendInfo`] structure providing
182//! additional information about the newly created packet (such as the address
183//! the packet should be sent to).
184//!
185//! When packets are sent, the application is responsible for maintaining a
186//! timer to react to time-based connection events. The timer expiration can be
187//! obtained using the connection's [`timeout()`] method.
188//!
189//! ```
190//! # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
191//! # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
192//! # let peer = "127.0.0.1:1234".parse().unwrap();
193//! # let local = "127.0.0.1:4321".parse().unwrap();
194//! # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
195//! let timeout = conn.timeout();
196//! # Ok::<(), quiche::Error>(())
197//! ```
198//!
199//! The application is responsible for providing a timer implementation, which
200//! can be specific to the operating system or networking framework used. When
201//! a timer expires, the connection's [`on_timeout()`] method should be called,
202//! after which additional packets might need to be sent on the network:
203//!
204//! ```no_run
205//! # let mut out = [0; 512];
206//! # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
207//! # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
208//! # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
209//! # let peer = "127.0.0.1:1234".parse().unwrap();
210//! # let local = "127.0.0.1:4321".parse().unwrap();
211//! # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
212//! // Timeout expired, handle it.
213//! conn.on_timeout();
214//!
215//! // Send more packets as needed after timeout.
216//! loop {
217//! let (write, send_info) = match conn.send(&mut out) {
218//! Ok(v) => v,
219//!
220//! Err(quiche::Error::Done) => {
221//! // Done writing.
222//! break;
223//! },
224//!
225//! Err(e) => {
226//! // An error occurred, handle it.
227//! break;
228//! },
229//! };
230//!
231//! socket.send_to(&out[..write], &send_info.to).unwrap();
232//! }
233//! # Ok::<(), quiche::Error>(())
234//! ```
235//!
236//! ### Pacing
237//!
238//! It is recommended that applications [pace] sending of outgoing packets to
239//! avoid creating packet bursts that could cause short-term congestion and
240//! losses in the network.
241//!
242//! quiche exposes pacing hints for outgoing packets through the [`at`] field
243//! of the [`SendInfo`] structure that is returned by the [`send()`] method.
244//! This field represents the time when a specific packet should be sent into
245//! the network.
246//!
247//! Applications can use these hints by artificially delaying the sending of
248//! packets through platform-specific mechanisms (such as the [`SO_TXTIME`]
249//! socket option on Linux), or custom methods (for example by using user-space
250//! timers).
251//!
252//! [pace]: https://datatracker.ietf.org/doc/html/rfc9002#section-7.7
253//! [`SO_TXTIME`]: https://man7.org/linux/man-pages/man8/tc-etf.8.html
254//!
255//! ## Sending and receiving stream data
256//!
257//! After some back and forth, the connection will complete its handshake and
258//! will be ready for sending or receiving application data.
259//!
260//! Data can be sent on a stream by using the [`stream_send()`] method:
261//!
262//! ```no_run
263//! # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
264//! # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
265//! # let peer = "127.0.0.1:1234".parse().unwrap();
266//! # let local = "127.0.0.1:4321".parse().unwrap();
267//! # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
268//! if conn.is_established() {
269//! // Handshake completed, send some data on stream 0.
270//! conn.stream_send(0, b"hello", true)?;
271//! }
272//! # Ok::<(), quiche::Error>(())
273//! ```
274//!
275//! The application can check whether there are any readable streams by using
276//! the connection's [`readable()`] method, which returns an iterator over all
277//! the streams that have outstanding data to read.
278//!
279//! The [`stream_recv()`] method can then be used to retrieve the application
280//! data from the readable stream:
281//!
282//! ```no_run
283//! # let mut buf = [0; 512];
284//! # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
285//! # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
286//! # let peer = "127.0.0.1:1234".parse().unwrap();
287//! # let local = "127.0.0.1:4321".parse().unwrap();
288//! # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
289//! if conn.is_established() {
290//! // Iterate over readable streams.
291//! for stream_id in conn.readable() {
292//! // Stream is readable, read until there's no more data.
293//! while let Ok((read, fin)) = conn.stream_recv(stream_id, &mut buf) {
294//! println!("Got {} bytes on stream {}", read, stream_id);
295//! }
296//! }
297//! }
298//! # Ok::<(), quiche::Error>(())
299//! ```
300//!
301//! ## HTTP/3
302//!
303//! The quiche [HTTP/3 module] provides a high level API for sending and
304//! receiving HTTP requests and responses on top of the QUIC transport protocol.
305//!
306//! [`Config`]: https://docs.quic.tech/quiche/struct.Config.html
307//! [`set_initial_max_streams_bidi()`]: https://docs.rs/quiche/latest/quiche/struct.Config.html#method.set_initial_max_streams_bidi
308//! [`set_initial_max_streams_uni()`]: https://docs.rs/quiche/latest/quiche/struct.Config.html#method.set_initial_max_streams_uni
309//! [`set_initial_max_data()`]: https://docs.rs/quiche/latest/quiche/struct.Config.html#method.set_initial_max_data
310//! [`set_initial_max_stream_data_bidi_local()`]: https://docs.rs/quiche/latest/quiche/struct.Config.html#method.set_initial_max_stream_data_bidi_local
311//! [`set_initial_max_stream_data_bidi_remote()`]: https://docs.rs/quiche/latest/quiche/struct.Config.html#method.set_initial_max_stream_data_bidi_remote
312//! [`set_initial_max_stream_data_uni()`]: https://docs.rs/quiche/latest/quiche/struct.Config.html#method.set_initial_max_stream_data_uni
313//! [`with_boring_ssl_ctx_builder()`]: https://docs.quic.tech/quiche/struct.Config.html#method.with_boring_ssl_ctx_builder
314//! [`connect()`]: fn.connect.html
315//! [`accept()`]: fn.accept.html
316//! [`recv()`]: struct.Connection.html#method.recv
317//! [`RecvInfo`]: struct.RecvInfo.html
318//! [`send()`]: struct.Connection.html#method.send
319//! [`SendInfo`]: struct.SendInfo.html
320//! [`at`]: struct.SendInfo.html#structfield.at
321//! [`timeout()`]: struct.Connection.html#method.timeout
322//! [`on_timeout()`]: struct.Connection.html#method.on_timeout
323//! [`stream_send()`]: struct.Connection.html#method.stream_send
324//! [`readable()`]: struct.Connection.html#method.readable
325//! [`stream_recv()`]: struct.Connection.html#method.stream_recv
326//! [HTTP/3 module]: h3/index.html
327//!
328//! ## Congestion Control
329//!
330//! The quiche library provides a high-level API for configuring which
331//! congestion control algorithm to use throughout the QUIC connection.
332//!
333//! When a QUIC connection is created, the application can optionally choose
334//! which CC algorithm to use. See [`CongestionControlAlgorithm`] for currently
335//! available congestion control algorithms.
336//!
337//! For example:
338//!
339//! ```
340//! let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION).unwrap();
341//! config.set_cc_algorithm(quiche::CongestionControlAlgorithm::Reno);
342//! ```
343//!
344//! Alternatively, you can configure the congestion control algorithm to use
345//! by its name.
346//!
347//! ```
348//! let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION).unwrap();
349//! config.set_cc_algorithm_name("reno").unwrap();
350//! ```
351//!
352//! Note that the CC algorithm should be configured before calling [`connect()`]
353//! or [`accept()`]. Otherwise the connection will use a default CC algorithm.
354//!
355//! [`CongestionControlAlgorithm`]: enum.CongestionControlAlgorithm.html
356//!
357//! ## Feature flags
358//!
359//! quiche defines a number of [feature flags] to reduce the amount of compiled
360//! code and dependencies:
361//!
362//! * `boringssl-vendored` (default): Build the vendored BoringSSL library.
363//!
364//! * `boringssl-boring-crate`: Use the BoringSSL library provided by the
365//! [boring] crate. It takes precedence over `boringssl-vendored` if both
366//! features are enabled.
367//!
368//! * `pkg-config-meta`: Generate pkg-config metadata file for libquiche.
369//!
370//! * `ffi`: Build and expose the FFI API.
371//!
372//! * `qlog`: Enable support for the [qlog] logging format.
373//!
374//! [feature flags]: https://doc.rust-lang.org/cargo/reference/manifest.html#the-features-section
375//! [boring]: https://crates.io/crates/boring
376//! [qlog]: https://datatracker.ietf.org/doc/html/draft-ietf-quic-qlog-main-schema
377
378#![allow(clippy::upper_case_acronyms)]
379#![warn(missing_docs)]
380#![warn(unused_qualifications)]
381#![cfg_attr(docsrs, feature(doc_cfg))]
382
383#[macro_use]
384extern crate log;
385
386use std::cmp;
387
388use std::collections::VecDeque;
389
390use debug_panic::debug_panic;
391
392use std::net::SocketAddr;
393
394use std::str::FromStr;
395
396use std::sync::Arc;
397
398use std::time::Duration;
399use std::time::Instant;
400
401#[cfg(feature = "qlog")]
402use qlog::events::quic::DataMovedAdditionalInfo;
403#[cfg(feature = "qlog")]
404use qlog::events::quic::QuicEventType;
405#[cfg(feature = "qlog")]
406use qlog::events::quic::TransportInitiator;
407#[cfg(feature = "qlog")]
408use qlog::events::DataRecipient;
409#[cfg(feature = "qlog")]
410use qlog::events::Event;
411#[cfg(feature = "qlog")]
412use qlog::events::EventData;
413#[cfg(feature = "qlog")]
414use qlog::events::EventImportance;
415#[cfg(feature = "qlog")]
416use qlog::events::EventType;
417#[cfg(feature = "qlog")]
418use qlog::events::RawInfo;
419
420use smallvec::SmallVec;
421
422use crate::buffers::DefaultBufFactory;
423
424use crate::recovery::OnAckReceivedOutcome;
425use crate::recovery::OnLossDetectionTimeoutOutcome;
426use crate::recovery::RecoveryOps;
427use crate::recovery::ReleaseDecision;
428
429use crate::stream::RecvAction;
430use crate::stream::StreamPriorityKey;
431
432/// The current QUIC wire version.
433pub const PROTOCOL_VERSION: u32 = PROTOCOL_VERSION_V1;
434
435/// Supported QUIC versions.
436const PROTOCOL_VERSION_V1: u32 = 0x0000_0001;
437
438/// The maximum length of a connection ID.
439pub const MAX_CONN_ID_LEN: usize = packet::MAX_CID_LEN as usize;
440
441/// The minimum length of Initial packets sent by a client.
442pub const MIN_CLIENT_INITIAL_LEN: usize = 1200;
443
444/// The default initial RTT.
445const DEFAULT_INITIAL_RTT: Duration = Duration::from_millis(333);
446
447const PAYLOAD_MIN_LEN: usize = 4;
448
449// PATH_CHALLENGE (9 bytes) + AEAD tag (16 bytes).
450const MIN_PROBING_SIZE: usize = 25;
451
452const MAX_AMPLIFICATION_FACTOR: usize = 3;
453
454// The maximum number of tracked packet number ranges that need to be acked.
455//
456// This represents more or less how many ack blocks can fit in a typical packet.
457const MAX_ACK_RANGES: usize = 68;
458
459// The highest possible stream ID allowed.
460const MAX_STREAM_ID: u64 = 1 << 60;
461
462// The default max_datagram_size used in congestion control.
463const MAX_SEND_UDP_PAYLOAD_SIZE: usize = 1200;
464
465// The default length of DATAGRAM queues.
466const DEFAULT_MAX_DGRAM_QUEUE_LEN: usize = 0;
467
468// The default length of PATH_CHALLENGE receive queue.
469const DEFAULT_MAX_PATH_CHALLENGE_RX_QUEUE_LEN: usize = 3;
470
471// The DATAGRAM standard recommends either none or 65536 as maximum DATAGRAM
472// frames size. We enforce the recommendation for forward compatibility.
473const MAX_DGRAM_FRAME_SIZE: u64 = 65536;
474
475// The length of the payload length field.
476const PAYLOAD_LENGTH_LEN: usize = 2;
477
478// The number of undecryptable that can be buffered.
479const MAX_UNDECRYPTABLE_PACKETS: usize = 10;
480
481const RESERVED_VERSION_MASK: u32 = 0xfafafafa;
482
483// The default size of the receiver connection flow control window.
484const DEFAULT_CONNECTION_WINDOW: u64 = 48 * 1024;
485
486// The maximum size of the receiver connection flow control window.
487const MAX_CONNECTION_WINDOW: u64 = 24 * 1024 * 1024;
488
489// How much larger the connection flow control window need to be larger than
490// the stream flow control window.
491const CONNECTION_WINDOW_FACTOR: f64 = 1.5;
492
493// How many probing packet timeouts do we tolerate before considering the path
494// validation as failed.
495const MAX_PROBING_TIMEOUTS: usize = 3;
496
497// The default initial congestion window size in terms of packet count.
498const DEFAULT_INITIAL_CONGESTION_WINDOW_PACKETS: usize = 10;
499
500// The maximum data offset that can be stored in a crypto stream.
501const MAX_CRYPTO_STREAM_OFFSET: u64 = 1 << 16;
502
503// The send capacity factor.
504const TX_CAP_FACTOR: f64 = 1.0;
505
506/// Ancillary information about incoming packets.
507#[derive(Clone, Copy, Debug, PartialEq, Eq)]
508pub struct RecvInfo {
509 /// The remote address the packet was received from.
510 pub from: SocketAddr,
511
512 /// The local address the packet was received on.
513 pub to: SocketAddr,
514}
515
516/// Ancillary information about outgoing packets.
517#[derive(Clone, Copy, Debug, PartialEq, Eq)]
518pub struct SendInfo {
519 /// The local address the packet should be sent from.
520 pub from: SocketAddr,
521
522 /// The remote address the packet should be sent to.
523 pub to: SocketAddr,
524
525 /// The time to send the packet out.
526 ///
527 /// See [Pacing] for more details.
528 ///
529 /// [Pacing]: index.html#pacing
530 pub at: Instant,
531}
532
533/// The side of the stream to be shut down.
534///
535/// This should be used when calling [`stream_shutdown()`].
536///
537/// [`stream_shutdown()`]: struct.Connection.html#method.stream_shutdown
538#[repr(C)]
539#[derive(PartialEq, Eq)]
540pub enum Shutdown {
541 /// Stop receiving stream data.
542 Read = 0,
543
544 /// Stop sending stream data.
545 Write = 1,
546}
547
548/// Qlog logging level.
549#[repr(C)]
550#[cfg(feature = "qlog")]
551#[cfg_attr(docsrs, doc(cfg(feature = "qlog")))]
552pub enum QlogLevel {
553 /// Logs any events of Core importance.
554 Core = 0,
555
556 /// Logs any events of Core and Base importance.
557 Base = 1,
558
559 /// Logs any events of Core, Base and Extra importance
560 Extra = 2,
561}
562
563/// Stores configuration shared between multiple connections.
564pub struct Config {
565 local_transport_params: TransportParams,
566
567 version: u32,
568
569 tls_ctx: tls::Context,
570
571 application_protos: Vec<Vec<u8>>,
572
573 grease: bool,
574
575 cc_algorithm: CongestionControlAlgorithm,
576 custom_bbr_params: Option<BbrParams>,
577 initial_congestion_window_packets: usize,
578 enable_relaxed_loss_threshold: bool,
579 enable_cubic_idle_restart_fix: bool,
580 enable_send_streams_blocked: bool,
581
582 pmtud: bool,
583 pmtud_max_probes: u8,
584
585 hystart: bool,
586
587 pacing: bool,
588 /// Send rate limit in Mbps
589 max_pacing_rate: Option<u64>,
590
591 tx_cap_factor: f64,
592
593 dgram_recv_max_queue_len: usize,
594 dgram_send_max_queue_len: usize,
595
596 path_challenge_recv_max_queue_len: usize,
597
598 max_send_udp_payload_size: usize,
599
600 max_connection_window: u64,
601 max_stream_window: u64,
602
603 max_amplification_factor: usize,
604
605 disable_dcid_reuse: bool,
606
607 track_unknown_transport_params: Option<usize>,
608
609 initial_rtt: Duration,
610
611 /// When true, uses the initial max data (for connection
612 /// and stream) as the initial flow control window.
613 use_initial_max_data_as_flow_control_win: bool,
614}
615
616// See https://quicwg.org/base-drafts/rfc9000.html#section-15
617fn is_reserved_version(version: u32) -> bool {
618 version & RESERVED_VERSION_MASK == version
619}
620
621impl Config {
622 /// Creates a config object with the given version.
623 ///
624 /// ## Examples:
625 ///
626 /// ```
627 /// let config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
628 /// # Ok::<(), quiche::Error>(())
629 /// ```
630 pub fn new(version: u32) -> Result<Config> {
631 Self::with_tls_ctx(version, tls::Context::new()?)
632 }
633
634 /// Creates a config object with the given version and
635 /// [`SslContextBuilder`].
636 ///
637 /// This is useful for applications that wish to manually configure
638 /// [`SslContextBuilder`].
639 ///
640 /// [`SslContextBuilder`]: https://docs.rs/boring/latest/boring/ssl/struct.SslContextBuilder.html
641 #[cfg(feature = "boringssl-boring-crate")]
642 #[cfg_attr(docsrs, doc(cfg(feature = "boringssl-boring-crate")))]
643 pub fn with_boring_ssl_ctx_builder(
644 version: u32, tls_ctx_builder: boring::ssl::SslContextBuilder,
645 ) -> Result<Config> {
646 Self::with_tls_ctx(version, tls::Context::from_boring(tls_ctx_builder))
647 }
648
649 fn with_tls_ctx(version: u32, tls_ctx: tls::Context) -> Result<Config> {
650 if !is_reserved_version(version) && !version_is_supported(version) {
651 return Err(Error::UnknownVersion);
652 }
653
654 Ok(Config {
655 local_transport_params: TransportParams::default(),
656 version,
657 tls_ctx,
658 application_protos: Vec::new(),
659 grease: true,
660 cc_algorithm: CongestionControlAlgorithm::CUBIC,
661 custom_bbr_params: None,
662 initial_congestion_window_packets:
663 DEFAULT_INITIAL_CONGESTION_WINDOW_PACKETS,
664 enable_relaxed_loss_threshold: false,
665 enable_cubic_idle_restart_fix: true,
666 enable_send_streams_blocked: false,
667 pmtud: false,
668 pmtud_max_probes: pmtud::MAX_PROBES_DEFAULT,
669 hystart: true,
670 pacing: true,
671 max_pacing_rate: None,
672
673 tx_cap_factor: TX_CAP_FACTOR,
674
675 dgram_recv_max_queue_len: DEFAULT_MAX_DGRAM_QUEUE_LEN,
676 dgram_send_max_queue_len: DEFAULT_MAX_DGRAM_QUEUE_LEN,
677
678 path_challenge_recv_max_queue_len:
679 DEFAULT_MAX_PATH_CHALLENGE_RX_QUEUE_LEN,
680
681 max_send_udp_payload_size: MAX_SEND_UDP_PAYLOAD_SIZE,
682
683 max_connection_window: MAX_CONNECTION_WINDOW,
684 max_stream_window: stream::MAX_STREAM_WINDOW,
685
686 max_amplification_factor: MAX_AMPLIFICATION_FACTOR,
687
688 disable_dcid_reuse: false,
689
690 track_unknown_transport_params: None,
691 initial_rtt: DEFAULT_INITIAL_RTT,
692
693 use_initial_max_data_as_flow_control_win: false,
694 })
695 }
696
697 /// Configures the given certificate chain.
698 ///
699 /// The content of `file` is parsed as a PEM-encoded leaf certificate,
700 /// followed by optional intermediate certificates.
701 ///
702 /// ## Examples:
703 ///
704 /// ```no_run
705 /// # let mut config = quiche::Config::new(0xbabababa)?;
706 /// config.load_cert_chain_from_pem_file("/path/to/cert.pem")?;
707 /// # Ok::<(), quiche::Error>(())
708 /// ```
709 pub fn load_cert_chain_from_pem_file(&mut self, file: &str) -> Result<()> {
710 self.tls_ctx.use_certificate_chain_file(file)
711 }
712
713 /// Configures the given private key.
714 ///
715 /// The content of `file` is parsed as a PEM-encoded private key.
716 ///
717 /// ## Examples:
718 ///
719 /// ```no_run
720 /// # let mut config = quiche::Config::new(0xbabababa)?;
721 /// config.load_priv_key_from_pem_file("/path/to/key.pem")?;
722 /// # Ok::<(), quiche::Error>(())
723 /// ```
724 pub fn load_priv_key_from_pem_file(&mut self, file: &str) -> Result<()> {
725 self.tls_ctx.use_privkey_file(file)
726 }
727
728 /// Specifies a file where trusted CA certificates are stored for the
729 /// purposes of certificate verification.
730 ///
731 /// The content of `file` is parsed as a PEM-encoded certificate chain.
732 ///
733 /// ## Examples:
734 ///
735 /// ```no_run
736 /// # let mut config = quiche::Config::new(0xbabababa)?;
737 /// config.load_verify_locations_from_file("/path/to/cert.pem")?;
738 /// # Ok::<(), quiche::Error>(())
739 /// ```
740 pub fn load_verify_locations_from_file(&mut self, file: &str) -> Result<()> {
741 self.tls_ctx.load_verify_locations_from_file(file)
742 }
743
744 /// Specifies a directory where trusted CA certificates are stored for the
745 /// purposes of certificate verification.
746 ///
747 /// The content of `dir` a set of PEM-encoded certificate chains.
748 ///
749 /// ## Examples:
750 ///
751 /// ```no_run
752 /// # let mut config = quiche::Config::new(0xbabababa)?;
753 /// config.load_verify_locations_from_directory("/path/to/certs")?;
754 /// # Ok::<(), quiche::Error>(())
755 /// ```
756 pub fn load_verify_locations_from_directory(
757 &mut self, dir: &str,
758 ) -> Result<()> {
759 self.tls_ctx.load_verify_locations_from_directory(dir)
760 }
761
762 /// Configures whether to verify the peer's certificate.
763 ///
764 /// This should usually be `true` for client-side connections and `false`
765 /// for server-side ones.
766 ///
767 /// Note that by default, no verification is performed.
768 ///
769 /// Also note that on the server-side, enabling verification of the peer
770 /// will trigger a certificate request and make authentication errors
771 /// fatal, but will still allow anonymous clients (i.e. clients that
772 /// don't present a certificate at all). Servers can check whether a
773 /// client presented a certificate by calling [`peer_cert()`] if they
774 /// need to.
775 ///
776 /// [`peer_cert()`]: struct.Connection.html#method.peer_cert
777 pub fn verify_peer(&mut self, verify: bool) {
778 self.tls_ctx.set_verify(verify);
779 }
780
781 /// Configures whether to do path MTU discovery.
782 ///
783 /// The default value is `false`.
784 pub fn discover_pmtu(&mut self, discover: bool) {
785 self.pmtud = discover;
786 }
787
788 /// Configures the maximum number of PMTUD probe attempts before treating
789 /// a probe size as failed.
790 ///
791 /// Defaults to 3 per [RFC 8899 Section 5.1.2](https://datatracker.ietf.org/doc/html/rfc8899#section-5.1.2).
792 /// If 0 is passed, the default value is used.
793 pub fn set_pmtud_max_probes(&mut self, max_probes: u8) {
794 self.pmtud_max_probes = max_probes;
795 }
796
797 /// Configures whether to send GREASE values.
798 ///
799 /// The default value is `true`.
800 pub fn grease(&mut self, grease: bool) {
801 self.grease = grease;
802 }
803
804 /// Enables logging of secrets.
805 ///
806 /// When logging is enabled, the [`set_keylog()`] method must be called on
807 /// the connection for its cryptographic secrets to be logged in the
808 /// [keylog] format to the specified writer.
809 ///
810 /// [`set_keylog()`]: struct.Connection.html#method.set_keylog
811 /// [keylog]: https://developer.mozilla.org/en-US/docs/Mozilla/Projects/NSS/Key_Log_Format
812 pub fn log_keys(&mut self) {
813 self.tls_ctx.enable_keylog();
814 }
815
816 /// Configures the session ticket key material.
817 ///
818 /// On the server this key will be used to encrypt and decrypt session
819 /// tickets, used to perform session resumption without server-side state.
820 ///
821 /// By default a key is generated internally, and rotated regularly, so
822 /// applications don't need to call this unless they need to use a
823 /// specific key (e.g. in order to support resumption across multiple
824 /// servers), in which case the application is also responsible for
825 /// rotating the key to provide forward secrecy.
826 pub fn set_ticket_key(&mut self, key: &[u8]) -> Result<()> {
827 self.tls_ctx.set_ticket_key(key)
828 }
829
830 /// Enables sending or receiving early data.
831 pub fn enable_early_data(&mut self) {
832 self.tls_ctx.set_early_data_enabled(true);
833 }
834
835 /// Configures the list of supported application protocols.
836 ///
837 /// On the client this configures the list of protocols to send to the
838 /// server as part of the ALPN extension.
839 ///
840 /// On the server this configures the list of supported protocols to match
841 /// against the client-supplied list.
842 ///
843 /// Applications must set a value, but no default is provided.
844 ///
845 /// ## Examples:
846 ///
847 /// ```
848 /// # let mut config = quiche::Config::new(0xbabababa)?;
849 /// config.set_application_protos(&[b"http/1.1", b"http/0.9"]);
850 /// # Ok::<(), quiche::Error>(())
851 /// ```
852 pub fn set_application_protos(
853 &mut self, protos_list: &[&[u8]],
854 ) -> Result<()> {
855 self.application_protos =
856 protos_list.iter().map(|s| s.to_vec()).collect();
857
858 self.tls_ctx.set_alpn(protos_list)
859 }
860
861 /// Configures the list of supported application protocols using wire
862 /// format.
863 ///
864 /// The list of protocols `protos` must be a series of non-empty, 8-bit
865 /// length-prefixed strings.
866 ///
867 /// See [`set_application_protos`](Self::set_application_protos) for more
868 /// background about application protocols.
869 ///
870 /// ## Examples:
871 ///
872 /// ```
873 /// # let mut config = quiche::Config::new(0xbabababa)?;
874 /// config.set_application_protos_wire_format(b"\x08http/1.1\x08http/0.9")?;
875 /// # Ok::<(), quiche::Error>(())
876 /// ```
877 pub fn set_application_protos_wire_format(
878 &mut self, protos: &[u8],
879 ) -> Result<()> {
880 let mut b = octets::Octets::with_slice(protos);
881
882 let mut protos_list = Vec::new();
883
884 while let Ok(proto) = b.get_bytes_with_u8_length() {
885 protos_list.push(proto.buf());
886 }
887
888 self.set_application_protos(&protos_list)
889 }
890
891 /// Sets the anti-amplification limit factor.
892 ///
893 /// The default value is `3`.
894 pub fn set_max_amplification_factor(&mut self, v: usize) {
895 self.max_amplification_factor = v;
896 }
897
898 /// Sets the send capacity factor.
899 ///
900 /// The default value is `1`.
901 pub fn set_send_capacity_factor(&mut self, v: f64) {
902 self.tx_cap_factor = v;
903 }
904
905 /// Sets the connection's initial RTT.
906 ///
907 /// The default value is `333`.
908 pub fn set_initial_rtt(&mut self, v: Duration) {
909 self.initial_rtt = v;
910 }
911
912 /// Sets the `max_idle_timeout` transport parameter, in milliseconds.
913 ///
914 /// The default value is infinite, that is, no timeout is used.
915 pub fn set_max_idle_timeout(&mut self, v: u64) {
916 self.local_transport_params.max_idle_timeout =
917 cmp::min(v, octets::MAX_VAR_INT);
918 }
919
920 /// Sets the `max_udp_payload_size transport` parameter.
921 ///
922 /// The default value is `65527`.
923 pub fn set_max_recv_udp_payload_size(&mut self, v: usize) {
924 self.local_transport_params.max_udp_payload_size =
925 cmp::min(v as u64, octets::MAX_VAR_INT);
926 }
927
928 /// Sets the maximum outgoing UDP payload size.
929 ///
930 /// The default and minimum value is `1200`.
931 pub fn set_max_send_udp_payload_size(&mut self, v: usize) {
932 self.max_send_udp_payload_size = cmp::max(v, MAX_SEND_UDP_PAYLOAD_SIZE);
933 }
934
935 /// Sets the `initial_max_data` transport parameter.
936 ///
937 /// When set to a non-zero value quiche will only allow at most `v` bytes of
938 /// incoming stream data to be buffered for the whole connection (that is,
939 /// data that is not yet read by the application) and will allow more data
940 /// to be received as the buffer is consumed by the application.
941 ///
942 /// When set to zero, either explicitly or via the default, quiche will not
943 /// give any flow control to the peer, preventing it from sending any stream
944 /// data.
945 ///
946 /// The default value is `0`.
947 pub fn set_initial_max_data(&mut self, v: u64) {
948 self.local_transport_params.initial_max_data =
949 cmp::min(v, octets::MAX_VAR_INT);
950 }
951
952 /// Sets the `initial_max_stream_data_bidi_local` transport parameter.
953 ///
954 /// When set to a non-zero value quiche will only allow at most `v` bytes
955 /// of incoming stream data to be buffered for each locally-initiated
956 /// bidirectional stream (that is, data that is not yet read by the
957 /// application) and will allow more data to be received as the buffer is
958 /// consumed by the application.
959 ///
960 /// When set to zero, either explicitly or via the default, quiche will not
961 /// give any flow control to the peer, preventing it from sending any stream
962 /// data.
963 ///
964 /// The default value is `0`.
965 pub fn set_initial_max_stream_data_bidi_local(&mut self, v: u64) {
966 self.local_transport_params
967 .initial_max_stream_data_bidi_local =
968 cmp::min(v, octets::MAX_VAR_INT);
969 }
970
971 /// Sets the `initial_max_stream_data_bidi_remote` transport parameter.
972 ///
973 /// When set to a non-zero value quiche will only allow at most `v` bytes
974 /// of incoming stream data to be buffered for each remotely-initiated
975 /// bidirectional stream (that is, data that is not yet read by the
976 /// application) and will allow more data to be received as the buffer is
977 /// consumed by the application.
978 ///
979 /// When set to zero, either explicitly or via the default, quiche will not
980 /// give any flow control to the peer, preventing it from sending any stream
981 /// data.
982 ///
983 /// The default value is `0`.
984 pub fn set_initial_max_stream_data_bidi_remote(&mut self, v: u64) {
985 self.local_transport_params
986 .initial_max_stream_data_bidi_remote =
987 cmp::min(v, octets::MAX_VAR_INT);
988 }
989
990 /// Sets the `initial_max_stream_data_uni` transport parameter.
991 ///
992 /// When set to a non-zero value quiche will only allow at most `v` bytes
993 /// of incoming stream data to be buffered for each unidirectional stream
994 /// (that is, data that is not yet read by the application) and will allow
995 /// more data to be received as the buffer is consumed by the application.
996 ///
997 /// When set to zero, either explicitly or via the default, quiche will not
998 /// give any flow control to the peer, preventing it from sending any stream
999 /// data.
1000 ///
1001 /// The default value is `0`.
1002 pub fn set_initial_max_stream_data_uni(&mut self, v: u64) {
1003 self.local_transport_params.initial_max_stream_data_uni =
1004 cmp::min(v, octets::MAX_VAR_INT);
1005 }
1006
1007 /// Sets the `initial_max_streams_bidi` transport parameter.
1008 ///
1009 /// When set to a non-zero value quiche will only allow `v` number of
1010 /// concurrent remotely-initiated bidirectional streams to be open at any
1011 /// given time and will increase the limit automatically as streams are
1012 /// completed.
1013 ///
1014 /// When set to zero, either explicitly or via the default, quiche will not
1015 /// not allow the peer to open any bidirectional streams.
1016 ///
1017 /// A bidirectional stream is considered completed when all incoming data
1018 /// has been read by the application (up to the `fin` offset) or the
1019 /// stream's read direction has been shutdown, and all outgoing data has
1020 /// been acked by the peer (up to the `fin` offset) or the stream's write
1021 /// direction has been shutdown.
1022 ///
1023 /// The default value is `0`.
1024 pub fn set_initial_max_streams_bidi(&mut self, v: u64) {
1025 self.local_transport_params.initial_max_streams_bidi =
1026 cmp::min(v, octets::MAX_VAR_INT);
1027 }
1028
1029 /// Sets the `initial_max_streams_uni` transport parameter.
1030 ///
1031 /// When set to a non-zero value quiche will only allow `v` number of
1032 /// concurrent remotely-initiated unidirectional streams to be open at any
1033 /// given time and will increase the limit automatically as streams are
1034 /// completed.
1035 ///
1036 /// When set to zero, either explicitly or via the default, quiche will not
1037 /// not allow the peer to open any unidirectional streams.
1038 ///
1039 /// A unidirectional stream is considered completed when all incoming data
1040 /// has been read by the application (up to the `fin` offset) or the
1041 /// stream's read direction has been shutdown.
1042 ///
1043 /// The default value is `0`.
1044 pub fn set_initial_max_streams_uni(&mut self, v: u64) {
1045 self.local_transport_params.initial_max_streams_uni =
1046 cmp::min(v, octets::MAX_VAR_INT);
1047 }
1048
1049 /// Sets the `ack_delay_exponent` transport parameter.
1050 ///
1051 /// The default value is `3`.
1052 pub fn set_ack_delay_exponent(&mut self, v: u64) {
1053 self.local_transport_params.ack_delay_exponent =
1054 cmp::min(v, octets::MAX_VAR_INT);
1055 }
1056
1057 /// Sets the `max_ack_delay` transport parameter.
1058 ///
1059 /// The default value is `25`.
1060 pub fn set_max_ack_delay(&mut self, v: u64) {
1061 self.local_transport_params.max_ack_delay =
1062 cmp::min(v, octets::MAX_VAR_INT);
1063 }
1064
1065 /// Sets the `active_connection_id_limit` transport parameter.
1066 ///
1067 /// The default value is `2`. Lower values will be ignored.
1068 pub fn set_active_connection_id_limit(&mut self, v: u64) {
1069 if v >= 2 {
1070 self.local_transport_params.active_conn_id_limit =
1071 cmp::min(v, octets::MAX_VAR_INT);
1072 }
1073 }
1074
1075 /// Sets the `disable_active_migration` transport parameter.
1076 ///
1077 /// The default value is `false`.
1078 pub fn set_disable_active_migration(&mut self, v: bool) {
1079 self.local_transport_params.disable_active_migration = v;
1080 }
1081
1082 /// Sets the congestion control algorithm used.
1083 ///
1084 /// The default value is `CongestionControlAlgorithm::CUBIC`.
1085 pub fn set_cc_algorithm(&mut self, algo: CongestionControlAlgorithm) {
1086 self.cc_algorithm = algo;
1087 }
1088
1089 /// Sets custom BBR settings.
1090 ///
1091 /// This API is experimental and will be removed in the future.
1092 ///
1093 /// Currently this only applies if cc_algorithm is
1094 /// `CongestionControlAlgorithm::Bbr2Gcongestion` is set.
1095 ///
1096 /// The default value is `None`.
1097 #[cfg(feature = "internal")]
1098 #[doc(hidden)]
1099 pub fn set_custom_bbr_params(&mut self, custom_bbr_settings: BbrParams) {
1100 self.custom_bbr_params = Some(custom_bbr_settings);
1101 }
1102
1103 /// Sets the congestion control algorithm used by string.
1104 ///
1105 /// The default value is `cubic`. On error `Error::CongestionControl`
1106 /// will be returned.
1107 ///
1108 /// ## Examples:
1109 ///
1110 /// ```
1111 /// # let mut config = quiche::Config::new(0xbabababa)?;
1112 /// config.set_cc_algorithm_name("reno");
1113 /// # Ok::<(), quiche::Error>(())
1114 /// ```
1115 pub fn set_cc_algorithm_name(&mut self, name: &str) -> Result<()> {
1116 self.cc_algorithm = CongestionControlAlgorithm::from_str(name)?;
1117
1118 Ok(())
1119 }
1120
1121 /// Sets initial congestion window size in terms of packet count.
1122 ///
1123 /// The default value is 10.
1124 pub fn set_initial_congestion_window_packets(&mut self, packets: usize) {
1125 self.initial_congestion_window_packets = packets;
1126 }
1127
1128 /// Configure whether to enable relaxed loss detection on spurious loss.
1129 ///
1130 /// The default value is false.
1131 pub fn set_enable_relaxed_loss_threshold(&mut self, enable: bool) {
1132 self.enable_relaxed_loss_threshold = enable;
1133 }
1134
1135 /// Configure whether to enable the CUBIC idle restart fix.
1136 ///
1137 /// When enabled, the epoch shift on idle restart uses the later of
1138 /// the last ACK time and last send time, avoiding an inflated delta
1139 /// when bytes-in-flight transiently hits zero.
1140 ///
1141 /// The default value is `true`.
1142 pub fn set_enable_cubic_idle_restart_fix(&mut self, enable: bool) {
1143 self.enable_cubic_idle_restart_fix = enable;
1144 }
1145
1146 /// Configure whether to enable sending STREAMS_BLOCKED frames.
1147 ///
1148 /// STREAMS_BLOCKED frames are an optional advisory signal in the QUIC
1149 /// protocol which SHOULD be sent when the sender wishes to open a stream
1150 /// but is unable to do so due to the maximum stream limit set by its peer.
1151 ///
1152 /// The default value is false.
1153 pub fn set_enable_send_streams_blocked(&mut self, enable: bool) {
1154 self.enable_send_streams_blocked = enable;
1155 }
1156
1157 /// Configures whether to enable HyStart++.
1158 ///
1159 /// The default value is `true`.
1160 pub fn enable_hystart(&mut self, v: bool) {
1161 self.hystart = v;
1162 }
1163
1164 /// Configures whether to enable pacing.
1165 ///
1166 /// The default value is `true`.
1167 pub fn enable_pacing(&mut self, v: bool) {
1168 self.pacing = v;
1169 }
1170
1171 /// Sets the max value for pacing rate.
1172 ///
1173 /// By default pacing rate is not limited.
1174 pub fn set_max_pacing_rate(&mut self, v: u64) {
1175 self.max_pacing_rate = Some(v);
1176 }
1177
1178 /// Configures whether to enable receiving DATAGRAM frames.
1179 ///
1180 /// When enabled, the `max_datagram_frame_size` transport parameter is set
1181 /// to 65536 as recommended by draft-ietf-quic-datagram-01.
1182 ///
1183 /// The default is `false`.
1184 pub fn enable_dgram(
1185 &mut self, enabled: bool, recv_queue_len: usize, send_queue_len: usize,
1186 ) {
1187 self.local_transport_params.max_datagram_frame_size = if enabled {
1188 Some(MAX_DGRAM_FRAME_SIZE)
1189 } else {
1190 None
1191 };
1192 self.dgram_recv_max_queue_len = recv_queue_len;
1193 self.dgram_send_max_queue_len = send_queue_len;
1194 }
1195
1196 /// Configures the max number of queued received PATH_CHALLENGE frames.
1197 ///
1198 /// When an endpoint receives a PATH_CHALLENGE frame and the queue is full,
1199 /// the frame is discarded.
1200 ///
1201 /// The default is 3.
1202 pub fn set_path_challenge_recv_max_queue_len(&mut self, queue_len: usize) {
1203 self.path_challenge_recv_max_queue_len = queue_len;
1204 }
1205
1206 /// Sets the maximum size of the connection window.
1207 ///
1208 /// The default value is MAX_CONNECTION_WINDOW (24MBytes).
1209 pub fn set_max_connection_window(&mut self, v: u64) {
1210 self.max_connection_window = v;
1211 }
1212
1213 /// Sets the maximum size of the stream window.
1214 ///
1215 /// The default value is MAX_STREAM_WINDOW (16MBytes).
1216 pub fn set_max_stream_window(&mut self, v: u64) {
1217 self.max_stream_window = v;
1218 }
1219
1220 /// Sets the initial stateless reset token.
1221 ///
1222 /// This value is only advertised by servers. Setting a stateless retry
1223 /// token as a client has no effect on the connection.
1224 ///
1225 /// The default value is `None`.
1226 pub fn set_stateless_reset_token(&mut self, v: Option<u128>) {
1227 self.local_transport_params.stateless_reset_token = v;
1228 }
1229
1230 /// Sets whether the QUIC connection should avoid reusing DCIDs over
1231 /// different paths.
1232 ///
1233 /// When set to `true`, it ensures that a destination Connection ID is never
1234 /// reused on different paths. Such behaviour may lead to connection stall
1235 /// if the peer performs a non-voluntary migration (e.g., NAT rebinding) and
1236 /// does not provide additional destination Connection IDs to handle such
1237 /// event.
1238 ///
1239 /// The default value is `false`.
1240 pub fn set_disable_dcid_reuse(&mut self, v: bool) {
1241 self.disable_dcid_reuse = v;
1242 }
1243
1244 /// Enables tracking unknown transport parameters.
1245 ///
1246 /// Specify the maximum number of bytes used to track unknown transport
1247 /// parameters. The size includes the identifier and its value. If storing a
1248 /// transport parameter would cause the limit to be exceeded, it is quietly
1249 /// dropped.
1250 ///
1251 /// The default is that the feature is disabled.
1252 pub fn enable_track_unknown_transport_parameters(&mut self, size: usize) {
1253 self.track_unknown_transport_params = Some(size);
1254 }
1255
1256 /// Sets whether the initial max data value should be used as the initial
1257 /// flow control window.
1258 ///
1259 /// If set to true, the initial flow control window for streams and the
1260 /// connection itself will be set to the initial max data value for streams
1261 /// and the connection respectively. If false, the window is set to the
1262 /// minimum of initial max data and `DEFAULT_STREAM_WINDOW` or
1263 /// `DEFAULT_CONNECTION_WINDOW`
1264 ///
1265 /// The default is false.
1266 pub fn set_use_initial_max_data_as_flow_control_win(&mut self, v: bool) {
1267 self.use_initial_max_data_as_flow_control_win = v;
1268 }
1269}
1270
1271/// Tracks the health of the tx_buffered value.
1272#[derive(Clone, Copy, Debug, Default, PartialEq)]
1273pub enum TxBufferTrackingState {
1274 /// The send buffer is in a good state
1275 #[default]
1276 Ok,
1277 /// The send buffer is in an inconsistent state, which could lead to
1278 /// connection stalls or excess buffering due to bugs we haven't
1279 /// tracked down yet.
1280 Inconsistent,
1281}
1282
1283/// Tracks if the connection hit the peer stream limit and which
1284/// STREAMS_BLOCKED frames have been sent.
1285#[derive(Default)]
1286struct StreamsBlockedState {
1287 /// The peer's max_streams limit at which we last became blocked on
1288 /// opening new local streams, if any.
1289 blocked_at: Option<u64>,
1290
1291 /// The stream limit sent on the most recently sent STREAMS_BLOCKED
1292 /// frame. If != to blocked_at, the connection has pending STREAMS_BLOCKED
1293 /// frames to send.
1294 blocked_sent: Option<u64>,
1295}
1296
1297impl StreamsBlockedState {
1298 /// Returns true if there is a STREAMS_BLOCKED frame that needs sending.
1299 fn has_pending_stream_blocked_frame(&self) -> bool {
1300 self.blocked_sent < self.blocked_at
1301 }
1302
1303 /// Update the stream blocked limit.
1304 fn update_at(&mut self, limit: u64) {
1305 self.blocked_at = self.blocked_at.max(Some(limit));
1306 }
1307
1308 /// Clear blocked_sent to force retransmission of the most recently sent
1309 /// STREAMS_BLOCKED frame.
1310 fn force_retransmit_sent_limit_eq(&mut self, limit: u64) {
1311 // Only clear blocked_sent if the lost frame had the most recently sent
1312 // limit.
1313 if self.blocked_sent == Some(limit) {
1314 self.blocked_sent = None;
1315 }
1316 }
1317}
1318
1319/// A QUIC connection.
1320pub struct Connection<F = DefaultBufFactory>
1321where
1322 F: BufFactory,
1323{
1324 /// QUIC wire version used for the connection.
1325 version: u32,
1326
1327 /// Connection Identifiers.
1328 ids: cid::ConnectionIdentifiers,
1329
1330 /// Unique opaque ID for the connection that can be used for logging.
1331 trace_id: String,
1332
1333 /// Packet number spaces.
1334 pkt_num_spaces: [packet::PktNumSpace; packet::Epoch::count()],
1335
1336 /// The crypto context.
1337 crypto_ctx: [packet::CryptoContext; packet::Epoch::count()],
1338
1339 /// Next packet number.
1340 next_pkt_num: u64,
1341
1342 // TODO
1343 // combine with `next_pkt_num`
1344 /// Track the packet skip context
1345 pkt_num_manager: packet::PktNumManager,
1346
1347 /// Peer's transport parameters.
1348 peer_transport_params: TransportParams,
1349
1350 /// If tracking unknown transport parameters from a peer, how much space to
1351 /// use in bytes.
1352 peer_transport_params_track_unknown: Option<usize>,
1353
1354 /// Local transport parameters.
1355 local_transport_params: TransportParams,
1356
1357 /// TLS handshake state.
1358 handshake: tls::Handshake,
1359
1360 /// Serialized TLS session buffer.
1361 ///
1362 /// This field is populated when a new session ticket is processed on the
1363 /// client. On the server this is empty.
1364 session: Option<Vec<u8>>,
1365
1366 /// The configuration for recovery.
1367 recovery_config: recovery::RecoveryConfig,
1368
1369 /// The path manager.
1370 paths: path::PathMap,
1371
1372 /// PATH_CHALLENGE receive queue max length.
1373 path_challenge_recv_max_queue_len: usize,
1374
1375 /// Total number of received PATH_CHALLENGE frames.
1376 path_challenge_rx_count: u64,
1377
1378 /// List of supported application protocols.
1379 application_protos: Vec<Vec<u8>>,
1380
1381 /// Total number of received packets.
1382 recv_count: usize,
1383
1384 /// Total number of sent packets.
1385 sent_count: usize,
1386
1387 /// Total number of lost packets.
1388 lost_count: usize,
1389
1390 /// Total number of lost packets that were later acked.
1391 spurious_lost_count: usize,
1392
1393 /// Total number of packets sent with data retransmitted.
1394 retrans_count: usize,
1395
1396 /// Total number of sent DATAGRAM frames.
1397 dgram_sent_count: usize,
1398
1399 /// Total number of received DATAGRAM frames.
1400 dgram_recv_count: usize,
1401
1402 /// Total number of bytes received from the peer.
1403 rx_data: u64,
1404
1405 /// Receiver flow controller.
1406 flow_control: flowcontrol::FlowControl,
1407
1408 /// Whether we send MAX_DATA frame.
1409 should_send_max_data: bool,
1410
1411 /// True if there is a pending MAX_STREAMS_BIDI frame to send.
1412 should_send_max_streams_bidi: bool,
1413
1414 /// True if there is a pending MAX_STREAMS_UNI frame to send.
1415 should_send_max_streams_uni: bool,
1416
1417 /// Number of stream data bytes that can be buffered.
1418 tx_cap: usize,
1419
1420 /// The send capacity factor.
1421 tx_cap_factor: f64,
1422
1423 /// Number of bytes buffered in the send buffer.
1424 tx_buffered: usize,
1425
1426 /// Tracks the health of tx_buffered.
1427 tx_buffered_state: TxBufferTrackingState,
1428
1429 /// Total number of bytes sent to the peer.
1430 tx_data: u64,
1431
1432 /// Peer's flow control limit for the connection.
1433 max_tx_data: u64,
1434
1435 /// Last tx_data before running a full send() loop.
1436 last_tx_data: u64,
1437
1438 /// Total number of bytes retransmitted over the connection.
1439 /// This counts only STREAM and CRYPTO data.
1440 stream_retrans_bytes: u64,
1441
1442 /// Total number of bytes sent over the connection.
1443 sent_bytes: u64,
1444
1445 /// Total number of bytes received over the connection.
1446 recv_bytes: u64,
1447
1448 /// Total number of bytes sent acked over the connection.
1449 acked_bytes: u64,
1450
1451 /// Total number of bytes sent lost over the connection.
1452 lost_bytes: u64,
1453
1454 /// Streams map, indexed by stream ID.
1455 streams: stream::StreamMap<F>,
1456
1457 /// Peer's original destination connection ID. Used by the client to
1458 /// validate the server's transport parameter.
1459 odcid: Option<ConnectionId<'static>>,
1460
1461 /// Peer's retry source connection ID. Used by the client during stateless
1462 /// retry to validate the server's transport parameter.
1463 rscid: Option<ConnectionId<'static>>,
1464
1465 /// Received address verification token.
1466 token: Option<Vec<u8>>,
1467
1468 /// Error code and reason to be sent to the peer in a CONNECTION_CLOSE
1469 /// frame.
1470 local_error: Option<ConnectionError>,
1471
1472 /// Error code and reason received from the peer in a CONNECTION_CLOSE
1473 /// frame.
1474 peer_error: Option<ConnectionError>,
1475
1476 /// The connection-level limit at which send blocking occurred.
1477 blocked_limit: Option<u64>,
1478
1479 /// Idle timeout expiration time.
1480 idle_timer: Option<Instant>,
1481
1482 /// Draining timeout expiration time.
1483 draining_timer: Option<Instant>,
1484
1485 /// List of raw packets that were received before they could be decrypted.
1486 undecryptable_pkts: VecDeque<(Vec<u8>, RecvInfo)>,
1487
1488 /// The negotiated ALPN protocol.
1489 alpn: Vec<u8>,
1490
1491 /// Whether this is a server-side connection.
1492 is_server: bool,
1493
1494 /// Whether the initial secrets have been derived.
1495 derived_initial_secrets: bool,
1496
1497 /// Whether a version negotiation packet has already been received. Only
1498 /// relevant for client connections.
1499 did_version_negotiation: bool,
1500
1501 /// Whether stateless retry has been performed.
1502 did_retry: bool,
1503
1504 /// Whether the peer already updated its connection ID.
1505 got_peer_conn_id: bool,
1506
1507 /// Whether the peer verified our initial address.
1508 peer_verified_initial_address: bool,
1509
1510 /// Whether the peer's transport parameters were parsed.
1511 parsed_peer_transport_params: bool,
1512
1513 /// Whether the connection handshake has been completed.
1514 handshake_completed: bool,
1515
1516 /// Whether the HANDSHAKE_DONE frame has been sent.
1517 handshake_done_sent: bool,
1518
1519 /// Whether the HANDSHAKE_DONE frame has been acked.
1520 handshake_done_acked: bool,
1521
1522 /// Whether the connection handshake has been confirmed.
1523 handshake_confirmed: bool,
1524
1525 /// Key phase bit used for outgoing protected packets.
1526 key_phase: bool,
1527
1528 /// Whether an ack-eliciting packet has been sent since last receiving a
1529 /// packet.
1530 ack_eliciting_sent: bool,
1531
1532 /// Whether the connection is closed.
1533 closed: bool,
1534
1535 /// Whether the connection was timed out.
1536 timed_out: bool,
1537
1538 /// Whether to send GREASE.
1539 grease: bool,
1540
1541 /// Whether to send STREAMS_BLOCKED frames when bidi or uni stream quota
1542 /// exhausted.
1543 enable_send_streams_blocked: bool,
1544
1545 /// TLS keylog writer.
1546 keylog: Option<Box<dyn std::io::Write + Send + Sync>>,
1547
1548 #[cfg(feature = "qlog")]
1549 qlog: QlogInfo,
1550
1551 /// DATAGRAM queues.
1552 dgram_recv_queue: dgram::DatagramQueue<F>,
1553 dgram_send_queue: dgram::DatagramQueue<F>,
1554
1555 /// Whether to emit DATAGRAM frames in the next packet.
1556 emit_dgram: bool,
1557
1558 /// Whether the connection should prevent from reusing destination
1559 /// Connection IDs when the peer migrates.
1560 disable_dcid_reuse: bool,
1561
1562 /// The number of streams reset by local.
1563 reset_stream_local_count: u64,
1564
1565 /// The number of streams stopped by local.
1566 stopped_stream_local_count: u64,
1567
1568 /// The number of streams reset by remote.
1569 reset_stream_remote_count: u64,
1570
1571 /// The number of streams stopped by remote.
1572 stopped_stream_remote_count: u64,
1573
1574 /// The number of DATA_BLOCKED frames sent due to hitting the connection
1575 /// flow control limit.
1576 data_blocked_sent_count: u64,
1577
1578 /// The number of STREAM_DATA_BLOCKED frames sent due to a stream hitting
1579 /// the stream flow control limit.
1580 stream_data_blocked_sent_count: u64,
1581
1582 /// The number of DATA_BLOCKED frames received from the remote endpoint.
1583 data_blocked_recv_count: u64,
1584
1585 /// The number of STREAM_DATA_BLOCKED frames received from the remote
1586 /// endpoint.
1587 stream_data_blocked_recv_count: u64,
1588
1589 /// The number of STREAMS_BLOCKED frames received from the remote endpoint
1590 /// indicating the peer is blocked on opening new bidirectional streams.
1591 streams_blocked_bidi_recv_count: u64,
1592
1593 /// The number of STREAMS_BLOCKED frames received from the remote endpoint
1594 /// indicating the peer is blocked on opening new unidirectional streams.
1595 streams_blocked_uni_recv_count: u64,
1596
1597 /// The number of times send() was blocked because the anti-amplification
1598 /// budget (bytes received × max_amplification_factor) was exhausted.
1599 amplification_limited_count: u64,
1600
1601 /// Tracks if the connection hit the peer's bidi or uni stream limit, and if
1602 /// STREAMS_BLOCKED frames are pending transmission.
1603 streams_blocked_bidi_state: StreamsBlockedState,
1604 streams_blocked_uni_state: StreamsBlockedState,
1605
1606 /// The anti-amplification limit factor.
1607 max_amplification_factor: usize,
1608}
1609
1610/// Creates a new server-side connection.
1611///
1612/// The `scid` parameter represents the server's source connection ID, while
1613/// the optional `odcid` parameter represents the original destination ID the
1614/// client sent before a Retry packet (this is only required when using the
1615/// [`retry()`] function). See also the [`accept_with_retry()`] function for
1616/// more advanced retry cases.
1617///
1618/// [`retry()`]: fn.retry.html
1619///
1620/// ## Examples:
1621///
1622/// ```no_run
1623/// # let mut config = quiche::Config::new(0xbabababa)?;
1624/// # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
1625/// # let local = "127.0.0.1:0".parse().unwrap();
1626/// # let peer = "127.0.0.1:1234".parse().unwrap();
1627/// let conn = quiche::accept(&scid, None, local, peer, &mut config)?;
1628/// # Ok::<(), quiche::Error>(())
1629/// ```
1630#[inline(always)]
1631pub fn accept(
1632 scid: &ConnectionId, odcid: Option<&ConnectionId>, local: SocketAddr,
1633 peer: SocketAddr, config: &mut Config,
1634) -> Result<Connection> {
1635 accept_with_buf_factory(scid, odcid, local, peer, config)
1636}
1637
1638/// Creates a new server-side connection, with a custom buffer generation
1639/// method.
1640///
1641/// The buffers generated can be anything that can be drereferenced as a byte
1642/// slice. See [`accept`] and [`BufFactory`] for more info.
1643#[inline]
1644pub fn accept_with_buf_factory<F: BufFactory>(
1645 scid: &ConnectionId, odcid: Option<&ConnectionId>, local: SocketAddr,
1646 peer: SocketAddr, config: &mut Config,
1647) -> Result<Connection<F>> {
1648 // For connections with `odcid` set, we historically used `retry_source_cid =
1649 // scid`. Keep this behavior to preserve backwards compatibility.
1650 // `accept_with_retry` allows the SCIDs to be specified separately.
1651 let retry_cids = odcid.map(|odcid| RetryConnectionIds {
1652 original_destination_cid: odcid,
1653 retry_source_cid: scid,
1654 });
1655 Connection::new(scid, retry_cids, None, local, peer, config, true)
1656}
1657
1658/// A wrapper for connection IDs used in [`accept_with_retry`].
1659pub struct RetryConnectionIds<'a> {
1660 /// The DCID of the first Initial packet received by the server, which
1661 /// triggered the Retry packet.
1662 pub original_destination_cid: &'a ConnectionId<'a>,
1663 /// The SCID of the Retry packet sent by the server. This can be different
1664 /// from the new connection's SCID.
1665 pub retry_source_cid: &'a ConnectionId<'a>,
1666}
1667
1668/// Creates a new server-side connection after the client responded to a Retry
1669/// packet.
1670///
1671/// To generate a Retry packet in the first place, use the [`retry()`] function.
1672///
1673/// The `scid` parameter represents the server's source connection ID, which can
1674/// be freshly generated after the application has successfully verified the
1675/// Retry. `retry_cids` is used to tie the new connection to the Initial + Retry
1676/// exchange that preceded the connection's creation.
1677///
1678/// The DCID of the client's Initial packet is inherently untrusted data. It is
1679/// safe to use the DCID in the `retry_source_cid` field of the
1680/// `RetryConnectionIds` provided to this function. However, using the Initial's
1681/// DCID for the `scid` parameter carries risks. Applications are advised to
1682/// implement their own DCID validation steps before using the DCID in that
1683/// manner.
1684#[inline]
1685pub fn accept_with_retry<F: BufFactory>(
1686 scid: &ConnectionId, retry_cids: RetryConnectionIds, local: SocketAddr,
1687 peer: SocketAddr, config: &mut Config,
1688) -> Result<Connection<F>> {
1689 Connection::new(scid, Some(retry_cids), None, local, peer, config, true)
1690}
1691
1692/// Creates a new client-side connection.
1693///
1694/// The `scid` parameter is used as the connection's source connection ID,
1695/// while the optional `server_name` parameter is used to verify the peer's
1696/// certificate.
1697///
1698/// ## Examples:
1699///
1700/// ```no_run
1701/// # let mut config = quiche::Config::new(0xbabababa)?;
1702/// # let server_name = "quic.tech";
1703/// # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
1704/// # let local = "127.0.0.1:4321".parse().unwrap();
1705/// # let peer = "127.0.0.1:1234".parse().unwrap();
1706/// let conn =
1707/// quiche::connect(Some(&server_name), &scid, local, peer, &mut config)?;
1708/// # Ok::<(), quiche::Error>(())
1709/// ```
1710#[inline]
1711pub fn connect(
1712 server_name: Option<&str>, scid: &ConnectionId, local: SocketAddr,
1713 peer: SocketAddr, config: &mut Config,
1714) -> Result<Connection> {
1715 let mut conn = Connection::new(scid, None, None, local, peer, config, false)?;
1716
1717 if let Some(server_name) = server_name {
1718 conn.handshake.set_host_name(server_name)?;
1719 }
1720
1721 Ok(conn)
1722}
1723
1724/// Creates a new client-side connection using the given DCID initially.
1725///
1726/// Be aware that [RFC 9000] places requirements for unpredictability and length
1727/// on the client DCID field. This function is dangerous if these requirements
1728/// are not satisfied.
1729///
1730/// The `scid` parameter is used as the connection's source connection ID, while
1731/// the optional `server_name` parameter is used to verify the peer's
1732/// certificate.
1733///
1734/// [RFC 9000]: <https://datatracker.ietf.org/doc/html/rfc9000#section-7.2-3>
1735#[cfg(feature = "custom-client-dcid")]
1736#[cfg_attr(docsrs, doc(cfg(feature = "custom-client-dcid")))]
1737pub fn connect_with_dcid(
1738 server_name: Option<&str>, scid: &ConnectionId, dcid: &ConnectionId,
1739 local: SocketAddr, peer: SocketAddr, config: &mut Config,
1740) -> Result<Connection> {
1741 let mut conn =
1742 Connection::new(scid, None, Some(dcid), local, peer, config, false)?;
1743
1744 if let Some(server_name) = server_name {
1745 conn.handshake.set_host_name(server_name)?;
1746 }
1747
1748 Ok(conn)
1749}
1750
1751/// Creates a new client-side connection, with a custom buffer generation
1752/// method.
1753///
1754/// The buffers generated can be anything that can be drereferenced as a byte
1755/// slice. See [`connect`] and [`BufFactory`] for more info.
1756#[inline]
1757pub fn connect_with_buffer_factory<F: BufFactory>(
1758 server_name: Option<&str>, scid: &ConnectionId, local: SocketAddr,
1759 peer: SocketAddr, config: &mut Config,
1760) -> Result<Connection<F>> {
1761 let mut conn = Connection::new(scid, None, None, local, peer, config, false)?;
1762
1763 if let Some(server_name) = server_name {
1764 conn.handshake.set_host_name(server_name)?;
1765 }
1766
1767 Ok(conn)
1768}
1769
1770/// Creates a new client-side connection, with a custom buffer generation
1771/// method using the given dcid initially.
1772/// Be aware the RFC places requirements for unpredictability and length
1773/// on the client DCID field.
1774/// [`RFC9000`]: https://datatracker.ietf.org/doc/html/rfc9000#section-7.2-3
1775///
1776/// The buffers generated can be anything that can be drereferenced as a byte
1777/// slice. See [`connect`] and [`BufFactory`] for more info.
1778#[cfg(feature = "custom-client-dcid")]
1779#[cfg_attr(docsrs, doc(cfg(feature = "custom-client-dcid")))]
1780pub fn connect_with_dcid_and_buffer_factory<F: BufFactory>(
1781 server_name: Option<&str>, scid: &ConnectionId, dcid: &ConnectionId,
1782 local: SocketAddr, peer: SocketAddr, config: &mut Config,
1783) -> Result<Connection<F>> {
1784 let mut conn =
1785 Connection::new(scid, None, Some(dcid), local, peer, config, false)?;
1786
1787 if let Some(server_name) = server_name {
1788 conn.handshake.set_host_name(server_name)?;
1789 }
1790
1791 Ok(conn)
1792}
1793
1794/// Writes a version negotiation packet.
1795///
1796/// The `scid` and `dcid` parameters are the source connection ID and the
1797/// destination connection ID extracted from the received client's Initial
1798/// packet that advertises an unsupported version.
1799///
1800/// ## Examples:
1801///
1802/// ```no_run
1803/// # let mut buf = [0; 512];
1804/// # let mut out = [0; 512];
1805/// # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
1806/// let (len, src) = socket.recv_from(&mut buf).unwrap();
1807///
1808/// let hdr =
1809/// quiche::Header::from_slice(&mut buf[..len], quiche::MAX_CONN_ID_LEN)?;
1810///
1811/// if hdr.version != quiche::PROTOCOL_VERSION {
1812/// let len = quiche::negotiate_version(&hdr.scid, &hdr.dcid, &mut out)?;
1813/// socket.send_to(&out[..len], &src).unwrap();
1814/// }
1815/// # Ok::<(), quiche::Error>(())
1816/// ```
1817#[inline]
1818pub fn negotiate_version(
1819 scid: &ConnectionId, dcid: &ConnectionId, out: &mut [u8],
1820) -> Result<usize> {
1821 packet::negotiate_version(scid, dcid, out)
1822}
1823
1824/// Writes a stateless retry packet.
1825///
1826/// The `scid` and `dcid` parameters are the source connection ID and the
1827/// destination connection ID extracted from the received client's Initial
1828/// packet, while `new_scid` is the server's new source connection ID and
1829/// `token` is the address validation token the client needs to echo back.
1830///
1831/// The application is responsible for generating the address validation
1832/// token to be sent to the client, and verifying tokens sent back by the
1833/// client. The generated token should include the `dcid` parameter, such
1834/// that it can be later extracted from the token and passed to the
1835/// [`accept()`] function as its `odcid` parameter.
1836///
1837/// [`accept()`]: fn.accept.html
1838///
1839/// ## Examples:
1840///
1841/// ```no_run
1842/// # let mut config = quiche::Config::new(0xbabababa)?;
1843/// # let mut buf = [0; 512];
1844/// # let mut out = [0; 512];
1845/// # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
1846/// # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
1847/// # let local = socket.local_addr().unwrap();
1848/// # fn mint_token(hdr: &quiche::Header, src: &std::net::SocketAddr) -> Vec<u8> {
1849/// # vec![]
1850/// # }
1851/// # fn validate_token<'a>(src: &std::net::SocketAddr, token: &'a [u8]) -> Option<quiche::ConnectionId<'a>> {
1852/// # None
1853/// # }
1854/// let (len, peer) = socket.recv_from(&mut buf).unwrap();
1855///
1856/// let hdr = quiche::Header::from_slice(&mut buf[..len], quiche::MAX_CONN_ID_LEN)?;
1857///
1858/// let token = hdr.token.as_ref().unwrap();
1859///
1860/// // No token sent by client, create a new one.
1861/// if token.is_empty() {
1862/// let new_token = mint_token(&hdr, &peer);
1863///
1864/// let len = quiche::retry(
1865/// &hdr.scid, &hdr.dcid, &scid, &new_token, hdr.version, &mut out,
1866/// )?;
1867///
1868/// socket.send_to(&out[..len], &peer).unwrap();
1869/// return Ok(());
1870/// }
1871///
1872/// // Client sent token, validate it.
1873/// let odcid = validate_token(&peer, token);
1874///
1875/// if odcid.is_none() {
1876/// // Invalid address validation token.
1877/// return Ok(());
1878/// }
1879///
1880/// let conn = quiche::accept(&scid, odcid.as_ref(), local, peer, &mut config)?;
1881/// # Ok::<(), quiche::Error>(())
1882/// ```
1883#[inline]
1884pub fn retry(
1885 scid: &ConnectionId, dcid: &ConnectionId, new_scid: &ConnectionId,
1886 token: &[u8], version: u32, out: &mut [u8],
1887) -> Result<usize> {
1888 packet::retry(scid, dcid, new_scid, token, version, out)
1889}
1890
1891/// Returns true if the given protocol version is supported.
1892#[inline]
1893pub fn version_is_supported(version: u32) -> bool {
1894 matches!(version, PROTOCOL_VERSION_V1)
1895}
1896
1897/// Pushes a frame to the output packet if there is enough space.
1898///
1899/// Returns `true` on success, `false` otherwise. In case of failure it means
1900/// there is no room to add the frame in the packet. You may retry to add the
1901/// frame later.
1902macro_rules! push_frame_to_pkt {
1903 ($out:expr, $frames:expr, $frame:expr, $left:expr) => {{
1904 if $frame.wire_len() <= $left {
1905 $left -= $frame.wire_len();
1906
1907 $frame.to_bytes(&mut $out)?;
1908
1909 $frames.push($frame);
1910
1911 true
1912 } else {
1913 false
1914 }
1915 }};
1916}
1917
1918/// Executes the provided body if the qlog feature is enabled, quiche has been
1919/// configured with a log writer, the event's importance is within the
1920/// configured level.
1921macro_rules! qlog_with_type {
1922 ($ty:expr, $qlog:expr, $qlog_streamer_ref:ident, $body:block) => {{
1923 #[cfg(feature = "qlog")]
1924 {
1925 if EventImportance::from($ty).is_contained_in(&$qlog.level) {
1926 if let Some($qlog_streamer_ref) = &mut $qlog.streamer {
1927 $body
1928 }
1929 }
1930 }
1931 }};
1932}
1933
1934#[cfg(feature = "qlog")]
1935const QLOG_PARAMS_SET: EventType =
1936 EventType::QuicEventType(QuicEventType::ParametersSet);
1937
1938#[cfg(feature = "qlog")]
1939const QLOG_PACKET_RX: EventType =
1940 EventType::QuicEventType(QuicEventType::PacketReceived);
1941
1942#[cfg(feature = "qlog")]
1943const QLOG_PACKET_TX: EventType =
1944 EventType::QuicEventType(QuicEventType::PacketSent);
1945
1946#[cfg(feature = "qlog")]
1947const QLOG_DATA_MV: EventType =
1948 EventType::QuicEventType(QuicEventType::StreamDataMoved);
1949
1950#[cfg(feature = "qlog")]
1951const QLOG_METRICS: EventType =
1952 EventType::QuicEventType(QuicEventType::RecoveryMetricsUpdated);
1953
1954#[cfg(feature = "qlog")]
1955const QLOG_CONNECTION_CLOSED: EventType =
1956 EventType::QuicEventType(QuicEventType::ConnectionClosed);
1957
1958#[cfg(feature = "qlog")]
1959struct QlogInfo {
1960 streamer: Option<qlog::streamer::QlogStreamer>,
1961 logged_peer_params: bool,
1962 level: EventImportance,
1963}
1964
1965#[cfg(feature = "qlog")]
1966impl Default for QlogInfo {
1967 fn default() -> Self {
1968 QlogInfo {
1969 streamer: None,
1970 logged_peer_params: false,
1971 level: EventImportance::Base,
1972 }
1973 }
1974}
1975
1976impl<F: BufFactory> Connection<F> {
1977 fn new(
1978 scid: &ConnectionId, retry_cids: Option<RetryConnectionIds>,
1979 client_dcid: Option<&ConnectionId>, local: SocketAddr, peer: SocketAddr,
1980 config: &mut Config, is_server: bool,
1981 ) -> Result<Connection<F>> {
1982 let tls = config.tls_ctx.new_handshake()?;
1983 Connection::with_tls(
1984 scid,
1985 retry_cids,
1986 client_dcid,
1987 local,
1988 peer,
1989 config,
1990 tls,
1991 is_server,
1992 )
1993 }
1994
1995 #[allow(clippy::too_many_arguments)]
1996 fn with_tls(
1997 scid: &ConnectionId, retry_cids: Option<RetryConnectionIds>,
1998 client_dcid: Option<&ConnectionId>, local: SocketAddr, peer: SocketAddr,
1999 config: &Config, tls: tls::Handshake, is_server: bool,
2000 ) -> Result<Connection<F>> {
2001 if retry_cids.is_some() && client_dcid.is_some() {
2002 // These are exclusive, the caller should only specify one or the
2003 // other.
2004 return Err(Error::InvalidDcidInitialization);
2005 }
2006 #[cfg(feature = "custom-client-dcid")]
2007 if let Some(client_dcid) = client_dcid {
2008 // The Minimum length is 8.
2009 // See https://datatracker.ietf.org/doc/html/rfc9000#section-7.2-3
2010 if client_dcid.to_vec().len() < 8 {
2011 return Err(Error::InvalidDcidInitialization);
2012 }
2013 }
2014 #[cfg(not(feature = "custom-client-dcid"))]
2015 if client_dcid.is_some() {
2016 return Err(Error::InvalidDcidInitialization);
2017 }
2018
2019 let max_rx_data = config.local_transport_params.initial_max_data;
2020
2021 let scid_as_hex: Vec<String> =
2022 scid.iter().map(|b| format!("{b:02x}")).collect();
2023
2024 let reset_token = if is_server {
2025 config.local_transport_params.stateless_reset_token
2026 } else {
2027 None
2028 };
2029
2030 let recovery_config = recovery::RecoveryConfig::from_config(config);
2031
2032 let mut path = path::Path::new(
2033 local,
2034 peer,
2035 &recovery_config,
2036 config.path_challenge_recv_max_queue_len,
2037 true,
2038 Some(config),
2039 );
2040
2041 // If we sent a Retry assume the peer's address is verified.
2042 path.verified_peer_address = retry_cids.is_some();
2043 // Assume clients validate the server's address implicitly.
2044 path.peer_verified_local_address = is_server;
2045
2046 // Do not allocate more than the number of active CIDs.
2047 let paths = path::PathMap::new(
2048 path,
2049 config.local_transport_params.active_conn_id_limit as usize,
2050 is_server,
2051 );
2052
2053 let active_path_id = paths.get_active_path_id()?;
2054
2055 let ids = cid::ConnectionIdentifiers::new(
2056 config.local_transport_params.active_conn_id_limit as usize,
2057 scid,
2058 active_path_id,
2059 reset_token,
2060 );
2061
2062 let initial_flow_control_window =
2063 if config.use_initial_max_data_as_flow_control_win {
2064 max_rx_data
2065 } else {
2066 cmp::min(max_rx_data / 2 * 3, DEFAULT_CONNECTION_WINDOW)
2067 };
2068 let mut conn = Connection {
2069 version: config.version,
2070
2071 ids,
2072
2073 trace_id: scid_as_hex.join(""),
2074
2075 pkt_num_spaces: [
2076 packet::PktNumSpace::new(),
2077 packet::PktNumSpace::new(),
2078 packet::PktNumSpace::new(),
2079 ],
2080
2081 crypto_ctx: [
2082 packet::CryptoContext::new(),
2083 packet::CryptoContext::new(),
2084 packet::CryptoContext::new(),
2085 ],
2086
2087 next_pkt_num: 0,
2088
2089 pkt_num_manager: packet::PktNumManager::new(),
2090
2091 peer_transport_params: TransportParams::default(),
2092
2093 peer_transport_params_track_unknown: config
2094 .track_unknown_transport_params,
2095
2096 local_transport_params: config.local_transport_params.clone(),
2097
2098 handshake: tls,
2099
2100 session: None,
2101
2102 recovery_config,
2103
2104 paths,
2105 path_challenge_recv_max_queue_len: config
2106 .path_challenge_recv_max_queue_len,
2107 path_challenge_rx_count: 0,
2108
2109 application_protos: config.application_protos.clone(),
2110
2111 recv_count: 0,
2112 sent_count: 0,
2113 lost_count: 0,
2114 spurious_lost_count: 0,
2115 retrans_count: 0,
2116 dgram_sent_count: 0,
2117 dgram_recv_count: 0,
2118 sent_bytes: 0,
2119 recv_bytes: 0,
2120 acked_bytes: 0,
2121 lost_bytes: 0,
2122
2123 rx_data: 0,
2124 flow_control: flowcontrol::FlowControl::new(
2125 max_rx_data,
2126 initial_flow_control_window,
2127 config.max_connection_window,
2128 ),
2129 should_send_max_data: false,
2130 should_send_max_streams_bidi: false,
2131 should_send_max_streams_uni: false,
2132
2133 tx_cap: 0,
2134 tx_cap_factor: config.tx_cap_factor,
2135
2136 tx_buffered: 0,
2137 tx_buffered_state: TxBufferTrackingState::Ok,
2138
2139 tx_data: 0,
2140 max_tx_data: 0,
2141 last_tx_data: 0,
2142
2143 stream_retrans_bytes: 0,
2144
2145 streams: stream::StreamMap::new(
2146 config.local_transport_params.initial_max_streams_bidi,
2147 config.local_transport_params.initial_max_streams_uni,
2148 config.max_stream_window,
2149 ),
2150
2151 odcid: None,
2152
2153 rscid: None,
2154
2155 token: None,
2156
2157 local_error: None,
2158
2159 peer_error: None,
2160
2161 blocked_limit: None,
2162
2163 idle_timer: None,
2164
2165 draining_timer: None,
2166
2167 undecryptable_pkts: VecDeque::new(),
2168
2169 alpn: Vec::new(),
2170
2171 is_server,
2172
2173 derived_initial_secrets: false,
2174
2175 did_version_negotiation: false,
2176
2177 did_retry: false,
2178
2179 got_peer_conn_id: false,
2180
2181 // Assume clients validate the server's address implicitly.
2182 peer_verified_initial_address: is_server,
2183
2184 parsed_peer_transport_params: false,
2185
2186 handshake_completed: false,
2187
2188 handshake_done_sent: false,
2189 handshake_done_acked: false,
2190
2191 handshake_confirmed: false,
2192
2193 key_phase: false,
2194
2195 ack_eliciting_sent: false,
2196
2197 closed: false,
2198
2199 timed_out: false,
2200
2201 grease: config.grease,
2202
2203 enable_send_streams_blocked: config.enable_send_streams_blocked,
2204
2205 keylog: None,
2206
2207 #[cfg(feature = "qlog")]
2208 qlog: Default::default(),
2209
2210 dgram_recv_queue: dgram::DatagramQueue::new(
2211 config.dgram_recv_max_queue_len,
2212 ),
2213
2214 dgram_send_queue: dgram::DatagramQueue::new(
2215 config.dgram_send_max_queue_len,
2216 ),
2217
2218 emit_dgram: true,
2219
2220 disable_dcid_reuse: config.disable_dcid_reuse,
2221
2222 reset_stream_local_count: 0,
2223 stopped_stream_local_count: 0,
2224 reset_stream_remote_count: 0,
2225 stopped_stream_remote_count: 0,
2226
2227 data_blocked_sent_count: 0,
2228 stream_data_blocked_sent_count: 0,
2229 data_blocked_recv_count: 0,
2230 stream_data_blocked_recv_count: 0,
2231
2232 streams_blocked_bidi_recv_count: 0,
2233 streams_blocked_uni_recv_count: 0,
2234
2235 amplification_limited_count: 0,
2236
2237 streams_blocked_bidi_state: Default::default(),
2238 streams_blocked_uni_state: Default::default(),
2239
2240 max_amplification_factor: config.max_amplification_factor,
2241 };
2242 conn.streams.set_use_initial_max_data_as_flow_control_win(
2243 config.use_initial_max_data_as_flow_control_win,
2244 );
2245
2246 if let Some(retry_cids) = retry_cids {
2247 conn.local_transport_params
2248 .original_destination_connection_id =
2249 Some(retry_cids.original_destination_cid.to_vec().into());
2250
2251 conn.local_transport_params.retry_source_connection_id =
2252 Some(retry_cids.retry_source_cid.to_vec().into());
2253
2254 conn.did_retry = true;
2255 }
2256
2257 conn.local_transport_params.initial_source_connection_id =
2258 Some(conn.ids.get_scid(0)?.cid.to_vec().into());
2259
2260 conn.handshake.init(is_server)?;
2261
2262 conn.handshake
2263 .use_legacy_codepoint(config.version != PROTOCOL_VERSION_V1);
2264
2265 conn.encode_transport_params()?;
2266
2267 if !is_server {
2268 let dcid = if let Some(client_dcid) = client_dcid {
2269 // We already had an dcid generated for us, use it.
2270 client_dcid.to_vec()
2271 } else {
2272 // Derive initial secrets for the client. We can do this here
2273 // because we already generated the random
2274 // destination connection ID.
2275 let mut dcid = [0; 16];
2276 rand::rand_bytes(&mut dcid[..]);
2277 dcid.to_vec()
2278 };
2279
2280 let (aead_open, aead_seal) = crypto::derive_initial_key_material(
2281 &dcid,
2282 conn.version,
2283 conn.is_server,
2284 false,
2285 )?;
2286
2287 let reset_token = conn.peer_transport_params.stateless_reset_token;
2288 conn.set_initial_dcid(
2289 dcid.to_vec().into(),
2290 reset_token,
2291 active_path_id,
2292 )?;
2293
2294 conn.crypto_ctx[packet::Epoch::Initial].crypto_open = Some(aead_open);
2295 conn.crypto_ctx[packet::Epoch::Initial].crypto_seal = Some(aead_seal);
2296
2297 conn.derived_initial_secrets = true;
2298 }
2299
2300 Ok(conn)
2301 }
2302
2303 /// Sets keylog output to the designated [`Writer`].
2304 ///
2305 /// This needs to be called as soon as the connection is created, to avoid
2306 /// missing some early logs.
2307 ///
2308 /// [`Writer`]: https://doc.rust-lang.org/std/io/trait.Write.html
2309 #[inline]
2310 pub fn set_keylog(&mut self, writer: Box<dyn std::io::Write + Send + Sync>) {
2311 self.keylog = Some(writer);
2312 }
2313
2314 /// Sets qlog output to the designated [`Writer`].
2315 ///
2316 /// Only events included in `QlogLevel::Base` are written. The serialization
2317 /// format is JSON-SEQ.
2318 ///
2319 /// This needs to be called as soon as the connection is created, to avoid
2320 /// missing some early logs.
2321 ///
2322 /// [`Writer`]: https://doc.rust-lang.org/std/io/trait.Write.html
2323 #[cfg(feature = "qlog")]
2324 #[cfg_attr(docsrs, doc(cfg(feature = "qlog")))]
2325 pub fn set_qlog(
2326 &mut self, writer: Box<dyn std::io::Write + Send + Sync>, title: String,
2327 description: String,
2328 ) {
2329 self.set_qlog_with_level(writer, title, description, QlogLevel::Base)
2330 }
2331
2332 /// Sets qlog output to the designated [`Writer`].
2333 ///
2334 /// Only qlog events included in the specified `QlogLevel` are written. The
2335 /// serialization format is JSON-SEQ.
2336 ///
2337 /// This needs to be called as soon as the connection is created, to avoid
2338 /// missing some early logs.
2339 ///
2340 /// [`Writer`]: https://doc.rust-lang.org/std/io/trait.Write.html
2341 #[cfg(feature = "qlog")]
2342 #[cfg_attr(docsrs, doc(cfg(feature = "qlog")))]
2343 pub fn set_qlog_with_level(
2344 &mut self, writer: Box<dyn std::io::Write + Send + Sync>, title: String,
2345 description: String, qlog_level: QlogLevel,
2346 ) {
2347 use qlog::events::quic::TransportInitiator;
2348 use qlog::events::HTTP3_URI;
2349 use qlog::events::QUIC_URI;
2350 use qlog::CommonFields;
2351 use qlog::ReferenceTime;
2352
2353 let vp = if self.is_server {
2354 qlog::VantagePointType::Server
2355 } else {
2356 qlog::VantagePointType::Client
2357 };
2358
2359 let level = match qlog_level {
2360 QlogLevel::Core => EventImportance::Core,
2361
2362 QlogLevel::Base => EventImportance::Base,
2363
2364 QlogLevel::Extra => EventImportance::Extra,
2365 };
2366
2367 self.qlog.level = level;
2368
2369 // Best effort to get Instant::now() and SystemTime::now() as closely
2370 // together as possible.
2371 let now = Instant::now();
2372 let now_wall_clock = std::time::SystemTime::now();
2373 let common_fields = CommonFields {
2374 reference_time: ReferenceTime::new_monotonic(Some(now_wall_clock)),
2375 ..Default::default()
2376 };
2377 let trace = qlog::TraceSeq::new(
2378 Some(title.to_string()),
2379 Some(description.to_string()),
2380 Some(common_fields),
2381 Some(qlog::VantagePoint {
2382 name: None,
2383 ty: vp,
2384 flow: None,
2385 }),
2386 vec![QUIC_URI.to_string(), HTTP3_URI.to_string()],
2387 );
2388
2389 let mut streamer = qlog::streamer::QlogStreamer::new(
2390 Some(title),
2391 Some(description),
2392 now,
2393 trace,
2394 self.qlog.level,
2395 qlog::streamer::EventTimePrecision::MicroSeconds,
2396 writer,
2397 );
2398
2399 streamer.start_log().ok();
2400
2401 let ev_data = self
2402 .local_transport_params
2403 .to_qlog(TransportInitiator::Local, self.handshake.cipher());
2404
2405 // This event occurs very early, so just mark the relative time as 0.0.
2406 streamer.add_event(Event::with_time(0.0, ev_data)).ok();
2407
2408 self.qlog.streamer = Some(streamer);
2409 }
2410
2411 /// Returns a mutable reference to the QlogStreamer, if it exists.
2412 #[cfg(feature = "qlog")]
2413 #[cfg_attr(docsrs, doc(cfg(feature = "qlog")))]
2414 pub fn qlog_streamer(&mut self) -> Option<&mut qlog::streamer::QlogStreamer> {
2415 self.qlog.streamer.as_mut()
2416 }
2417
2418 /// Configures the given session for resumption.
2419 ///
2420 /// On the client, this can be used to offer the given serialized session,
2421 /// as returned by [`session()`], for resumption.
2422 ///
2423 /// This must only be called immediately after creating a connection, that
2424 /// is, before any packet is sent or received.
2425 ///
2426 /// [`session()`]: struct.Connection.html#method.session
2427 #[inline]
2428 pub fn set_session(&mut self, session: &[u8]) -> Result<()> {
2429 let mut b = octets::Octets::with_slice(session);
2430
2431 let session_len = b.get_u64()? as usize;
2432 let session_bytes = b.get_bytes(session_len)?;
2433
2434 self.handshake.set_session(session_bytes.as_ref())?;
2435
2436 let raw_params_len = b.get_u64()? as usize;
2437 let raw_params_bytes = b.get_bytes(raw_params_len)?;
2438
2439 let peer_params = TransportParams::decode(
2440 raw_params_bytes.as_ref(),
2441 self.is_server,
2442 self.peer_transport_params_track_unknown,
2443 )?;
2444
2445 self.process_peer_transport_params(peer_params)?;
2446
2447 Ok(())
2448 }
2449
2450 /// Sets the `max_idle_timeout` transport parameter, in milliseconds.
2451 ///
2452 /// This must only be called immediately after creating a connection, that
2453 /// is, before any packet is sent or received.
2454 ///
2455 /// The default value is infinite, that is, no timeout is used unless
2456 /// already configured when creating the connection.
2457 pub fn set_max_idle_timeout(&mut self, v: u64) -> Result<()> {
2458 self.local_transport_params.max_idle_timeout =
2459 cmp::min(v, octets::MAX_VAR_INT);
2460
2461 self.encode_transport_params()
2462 }
2463
2464 /// Sets the congestion control algorithm used.
2465 ///
2466 /// This function can only be called inside one of BoringSSL's handshake
2467 /// callbacks, before any packet has been sent. Calling this function any
2468 /// other time will have no effect.
2469 ///
2470 /// See [`Config::set_cc_algorithm()`].
2471 ///
2472 /// [`Config::set_cc_algorithm()`]: struct.Config.html#method.set_cc_algorithm
2473 #[cfg(feature = "boringssl-boring-crate")]
2474 #[cfg_attr(docsrs, doc(cfg(feature = "boringssl-boring-crate")))]
2475 pub fn set_cc_algorithm_in_handshake(
2476 ssl: &mut boring::ssl::SslRef, algo: CongestionControlAlgorithm,
2477 ) -> Result<()> {
2478 let ex_data = tls::ExData::from_ssl_ref(ssl).ok_or(Error::TlsFail)?;
2479
2480 ex_data.recovery_config.cc_algorithm = algo;
2481
2482 Ok(())
2483 }
2484
2485 /// Sets custom BBR settings.
2486 ///
2487 /// This API is experimental and will be removed in the future.
2488 ///
2489 /// Currently this only applies if cc_algorithm is
2490 /// `CongestionControlAlgorithm::Bbr2Gcongestion` is set.
2491 ///
2492 /// This function can only be called inside one of BoringSSL's handshake
2493 /// callbacks, before any packet has been sent. Calling this function any
2494 /// other time will have no effect.
2495 ///
2496 /// See [`Config::set_custom_bbr_settings()`].
2497 ///
2498 /// [`Config::set_custom_bbr_settings()`]: struct.Config.html#method.set_custom_bbr_settings
2499 #[cfg(all(feature = "boringssl-boring-crate", feature = "internal"))]
2500 #[cfg_attr(docsrs, doc(cfg(feature = "boringssl-boring-crate")))]
2501 #[doc(hidden)]
2502 pub fn set_custom_bbr_settings_in_handshake(
2503 ssl: &mut boring::ssl::SslRef, custom_bbr_params: BbrParams,
2504 ) -> Result<()> {
2505 let ex_data = tls::ExData::from_ssl_ref(ssl).ok_or(Error::TlsFail)?;
2506
2507 ex_data.recovery_config.custom_bbr_params = Some(custom_bbr_params);
2508
2509 Ok(())
2510 }
2511
2512 /// Sets the congestion control algorithm used by string.
2513 ///
2514 /// This function can only be called inside one of BoringSSL's handshake
2515 /// callbacks, before any packet has been sent. Calling this function any
2516 /// other time will have no effect.
2517 ///
2518 /// See [`Config::set_cc_algorithm_name()`].
2519 ///
2520 /// [`Config::set_cc_algorithm_name()`]: struct.Config.html#method.set_cc_algorithm_name
2521 #[cfg(feature = "boringssl-boring-crate")]
2522 #[cfg_attr(docsrs, doc(cfg(feature = "boringssl-boring-crate")))]
2523 pub fn set_cc_algorithm_name_in_handshake(
2524 ssl: &mut boring::ssl::SslRef, name: &str,
2525 ) -> Result<()> {
2526 let cc_algo = CongestionControlAlgorithm::from_str(name)?;
2527 Self::set_cc_algorithm_in_handshake(ssl, cc_algo)
2528 }
2529
2530 /// Sets initial congestion window size in terms of packet count.
2531 ///
2532 /// This function can only be called inside one of BoringSSL's handshake
2533 /// callbacks, before any packet has been sent. Calling this function any
2534 /// other time will have no effect.
2535 ///
2536 /// See [`Config::set_initial_congestion_window_packets()`].
2537 ///
2538 /// [`Config::set_initial_congestion_window_packets()`]: struct.Config.html#method.set_initial_congestion_window_packets
2539 #[cfg(feature = "boringssl-boring-crate")]
2540 #[cfg_attr(docsrs, doc(cfg(feature = "boringssl-boring-crate")))]
2541 pub fn set_initial_congestion_window_packets_in_handshake(
2542 ssl: &mut boring::ssl::SslRef, packets: usize,
2543 ) -> Result<()> {
2544 let ex_data = tls::ExData::from_ssl_ref(ssl).ok_or(Error::TlsFail)?;
2545
2546 ex_data.recovery_config.initial_congestion_window_packets = packets;
2547
2548 Ok(())
2549 }
2550
2551 /// Configure whether to enable relaxed loss detection on spurious loss.
2552 ///
2553 /// This function can only be called inside one of BoringSSL's handshake
2554 /// callbacks, before any packet has been sent. Calling this function any
2555 /// other time will have no effect.
2556 ///
2557 /// See [`Config::set_enable_relaxed_loss_threshold()`].
2558 ///
2559 /// [`Config::set_enable_relaxed_loss_threshold()`]: struct.Config.html#method.set_enable_relaxed_loss_threshold
2560 #[cfg(feature = "boringssl-boring-crate")]
2561 #[cfg_attr(docsrs, doc(cfg(feature = "boringssl-boring-crate")))]
2562 pub fn set_enable_relaxed_loss_threshold_in_handshake(
2563 ssl: &mut boring::ssl::SslRef, enable: bool,
2564 ) -> Result<()> {
2565 let ex_data = tls::ExData::from_ssl_ref(ssl).ok_or(Error::TlsFail)?;
2566
2567 ex_data.recovery_config.enable_relaxed_loss_threshold = enable;
2568
2569 Ok(())
2570 }
2571
2572 /// Configure whether to enable the CUBIC idle restart fix.
2573 ///
2574 /// This function can only be called inside one of BoringSSL's handshake
2575 /// callbacks, before any packet has been sent. Calling this function any
2576 /// other time will have no effect.
2577 ///
2578 /// See [`Config::set_enable_cubic_idle_restart_fix()`].
2579 ///
2580 /// [`Config::set_enable_cubic_idle_restart_fix()`]: struct.Config.html#method.set_enable_cubic_idle_restart_fix
2581 #[cfg(feature = "boringssl-boring-crate")]
2582 #[cfg_attr(docsrs, doc(cfg(feature = "boringssl-boring-crate")))]
2583 pub fn set_enable_cubic_idle_restart_fix_in_handshake(
2584 ssl: &mut boring::ssl::SslRef, enable: bool,
2585 ) -> Result<()> {
2586 let ex_data = tls::ExData::from_ssl_ref(ssl).ok_or(Error::TlsFail)?;
2587
2588 ex_data.recovery_config.enable_cubic_idle_restart_fix = enable;
2589
2590 Ok(())
2591 }
2592
2593 /// Configures whether to enable HyStart++.
2594 ///
2595 /// This function can only be called inside one of BoringSSL's handshake
2596 /// callbacks, before any packet has been sent. Calling this function any
2597 /// other time will have no effect.
2598 ///
2599 /// See [`Config::enable_hystart()`].
2600 ///
2601 /// [`Config::enable_hystart()`]: struct.Config.html#method.enable_hystart
2602 #[cfg(feature = "boringssl-boring-crate")]
2603 #[cfg_attr(docsrs, doc(cfg(feature = "boringssl-boring-crate")))]
2604 pub fn set_hystart_in_handshake(
2605 ssl: &mut boring::ssl::SslRef, v: bool,
2606 ) -> Result<()> {
2607 let ex_data = tls::ExData::from_ssl_ref(ssl).ok_or(Error::TlsFail)?;
2608
2609 ex_data.recovery_config.hystart = v;
2610
2611 Ok(())
2612 }
2613
2614 /// Configures whether to enable pacing.
2615 ///
2616 /// This function can only be called inside one of BoringSSL's handshake
2617 /// callbacks, before any packet has been sent. Calling this function any
2618 /// other time will have no effect.
2619 ///
2620 /// See [`Config::enable_pacing()`].
2621 ///
2622 /// [`Config::enable_pacing()`]: struct.Config.html#method.enable_pacing
2623 #[cfg(feature = "boringssl-boring-crate")]
2624 #[cfg_attr(docsrs, doc(cfg(feature = "boringssl-boring-crate")))]
2625 pub fn set_pacing_in_handshake(
2626 ssl: &mut boring::ssl::SslRef, v: bool,
2627 ) -> Result<()> {
2628 let ex_data = tls::ExData::from_ssl_ref(ssl).ok_or(Error::TlsFail)?;
2629
2630 ex_data.recovery_config.pacing = v;
2631
2632 Ok(())
2633 }
2634
2635 /// Sets the max value for pacing rate.
2636 ///
2637 /// This function can only be called inside one of BoringSSL's handshake
2638 /// callbacks, before any packet has been sent. Calling this function any
2639 /// other time will have no effect.
2640 ///
2641 /// See [`Config::set_max_pacing_rate()`].
2642 ///
2643 /// [`Config::set_max_pacing_rate()`]: struct.Config.html#method.set_max_pacing_rate
2644 #[cfg(feature = "boringssl-boring-crate")]
2645 #[cfg_attr(docsrs, doc(cfg(feature = "boringssl-boring-crate")))]
2646 pub fn set_max_pacing_rate_in_handshake(
2647 ssl: &mut boring::ssl::SslRef, v: Option<u64>,
2648 ) -> Result<()> {
2649 let ex_data = tls::ExData::from_ssl_ref(ssl).ok_or(Error::TlsFail)?;
2650
2651 ex_data.recovery_config.max_pacing_rate = v;
2652
2653 Ok(())
2654 }
2655
2656 /// Sets the maximum outgoing UDP payload size.
2657 ///
2658 /// This function can only be called inside one of BoringSSL's handshake
2659 /// callbacks, before any packet has been sent. Calling this function any
2660 /// other time will have no effect.
2661 ///
2662 /// See [`Config::set_max_send_udp_payload_size()`].
2663 ///
2664 /// [`Config::set_max_send_udp_payload_size()`]: struct.Config.html#method.set_max_send_udp_payload_size
2665 #[cfg(feature = "boringssl-boring-crate")]
2666 #[cfg_attr(docsrs, doc(cfg(feature = "boringssl-boring-crate")))]
2667 pub fn set_max_send_udp_payload_size_in_handshake(
2668 ssl: &mut boring::ssl::SslRef, v: usize,
2669 ) -> Result<()> {
2670 let ex_data = tls::ExData::from_ssl_ref(ssl).ok_or(Error::TlsFail)?;
2671
2672 ex_data.recovery_config.max_send_udp_payload_size = v;
2673
2674 Ok(())
2675 }
2676
2677 /// Sets the send capacity factor.
2678 ///
2679 /// This function can only be called inside one of BoringSSL's handshake
2680 /// callbacks, before any packet has been sent. Calling this function any
2681 /// other time will have no effect.
2682 ///
2683 /// See [`Config::set_send_capacity_factor()`].
2684 ///
2685 /// [`Config::set_max_send_udp_payload_size()`]: struct.Config.html#method.set_send_capacity_factor
2686 #[cfg(feature = "boringssl-boring-crate")]
2687 #[cfg_attr(docsrs, doc(cfg(feature = "boringssl-boring-crate")))]
2688 pub fn set_send_capacity_factor_in_handshake(
2689 ssl: &mut boring::ssl::SslRef, v: f64,
2690 ) -> Result<()> {
2691 let ex_data = tls::ExData::from_ssl_ref(ssl).ok_or(Error::TlsFail)?;
2692
2693 ex_data.tx_cap_factor = v;
2694
2695 Ok(())
2696 }
2697
2698 /// Configures whether to do path MTU discovery.
2699 ///
2700 /// This function can only be called inside one of BoringSSL's handshake
2701 /// callbacks, before any packet has been sent. Calling this function any
2702 /// other time will have no effect.
2703 ///
2704 /// See [`Config::discover_pmtu()`].
2705 ///
2706 /// [`Config::discover_pmtu()`]: struct.Config.html#method.discover_pmtu
2707 #[cfg(feature = "boringssl-boring-crate")]
2708 #[cfg_attr(docsrs, doc(cfg(feature = "boringssl-boring-crate")))]
2709 pub fn set_discover_pmtu_in_handshake(
2710 ssl: &mut boring::ssl::SslRef, discover: bool, max_probes: u8,
2711 ) -> Result<()> {
2712 let ex_data = tls::ExData::from_ssl_ref(ssl).ok_or(Error::TlsFail)?;
2713
2714 ex_data.pmtud = Some((discover, max_probes));
2715
2716 Ok(())
2717 }
2718
2719 /// Sets the `max_idle_timeout` transport parameter, in milliseconds.
2720 ///
2721 /// This function can only be called inside one of BoringSSL's handshake
2722 /// callbacks, before any packet has been sent. Calling this function any
2723 /// other time will have no effect.
2724 ///
2725 /// See [`Config::set_max_idle_timeout()`].
2726 ///
2727 /// [`Config::set_max_idle_timeout()`]: struct.Config.html#method.set_max_idle_timeout
2728 #[cfg(feature = "boringssl-boring-crate")]
2729 #[cfg_attr(docsrs, doc(cfg(feature = "boringssl-boring-crate")))]
2730 pub fn set_max_idle_timeout_in_handshake(
2731 ssl: &mut boring::ssl::SslRef, v: u64,
2732 ) -> Result<()> {
2733 let ex_data = tls::ExData::from_ssl_ref(ssl).ok_or(Error::TlsFail)?;
2734
2735 ex_data.local_transport_params.max_idle_timeout = v;
2736
2737 Self::set_transport_parameters_in_hanshake(
2738 ex_data.local_transport_params.clone(),
2739 ex_data.is_server,
2740 ssl,
2741 )
2742 }
2743
2744 /// Sets the `initial_max_streams_bidi` transport parameter.
2745 ///
2746 /// This function can only be called inside one of BoringSSL's handshake
2747 /// callbacks, before any packet has been sent. Calling this function any
2748 /// other time will have no effect.
2749 ///
2750 /// See [`Config::set_initial_max_streams_bidi()`].
2751 ///
2752 /// [`Config::set_initial_max_streams_bidi()`]: struct.Config.html#method.set_initial_max_streams_bidi
2753 #[cfg(feature = "boringssl-boring-crate")]
2754 #[cfg_attr(docsrs, doc(cfg(feature = "boringssl-boring-crate")))]
2755 pub fn set_initial_max_streams_bidi_in_handshake(
2756 ssl: &mut boring::ssl::SslRef, v: u64,
2757 ) -> Result<()> {
2758 let ex_data = tls::ExData::from_ssl_ref(ssl).ok_or(Error::TlsFail)?;
2759
2760 ex_data.local_transport_params.initial_max_streams_bidi = v;
2761
2762 Self::set_transport_parameters_in_hanshake(
2763 ex_data.local_transport_params.clone(),
2764 ex_data.is_server,
2765 ssl,
2766 )
2767 }
2768
2769 #[cfg(feature = "boringssl-boring-crate")]
2770 fn set_transport_parameters_in_hanshake(
2771 params: TransportParams, is_server: bool, ssl: &mut boring::ssl::SslRef,
2772 ) -> Result<()> {
2773 use foreign_types_shared::ForeignTypeRef;
2774
2775 // In order to apply the new parameter to the TLS state before TPs are
2776 // written into a TLS message, we need to re-encode all TPs immediately.
2777 //
2778 // Since we don't have direct access to the main `Connection` object, we
2779 // need to re-create the `Handshake` state from the `SslRef`.
2780 //
2781 // SAFETY: the `Handshake` object must not be drop()ed, otherwise it
2782 // would free the underlying BoringSSL structure.
2783 let mut handshake =
2784 unsafe { tls::Handshake::from_ptr(ssl.as_ptr() as _) };
2785 handshake.set_quic_transport_params(¶ms, is_server)?;
2786
2787 // Avoid running `drop(handshake)` as that would free the underlying
2788 // handshake state.
2789 std::mem::forget(handshake);
2790
2791 Ok(())
2792 }
2793
2794 /// Sets the `use_initial_max_data_as_flow_control_win` flag during SSL
2795 /// handshake.
2796 ///
2797 /// This function can only be called inside one of BoringSSL's handshake
2798 /// callbacks, before any packet has been sent. Calling this function any
2799 /// other time will have no effect.
2800 ///
2801 /// See [`Connection::enable_use_initial_max_data_as_flow_control_win()`].
2802 #[cfg(feature = "boringssl-boring-crate")]
2803 #[cfg_attr(docsrs, doc(cfg(feature = "boringssl-boring-crate")))]
2804 pub fn set_use_initial_max_data_as_flow_control_win_in_handshake(
2805 ssl: &mut boring::ssl::SslRef,
2806 ) -> Result<()> {
2807 let ex_data = tls::ExData::from_ssl_ref(ssl).ok_or(Error::TlsFail)?;
2808
2809 ex_data.use_initial_max_data_as_flow_control_win = true;
2810 Ok(())
2811 }
2812
2813 /// Processes QUIC packets received from the peer.
2814 ///
2815 /// On success the number of bytes processed from the input buffer is
2816 /// returned. On error the connection will be closed by calling [`close()`]
2817 /// with the appropriate error code.
2818 ///
2819 /// Coalesced packets will be processed as necessary.
2820 ///
2821 /// Note that the contents of the input buffer `buf` might be modified by
2822 /// this function due to, for example, in-place decryption.
2823 ///
2824 /// [`close()`]: struct.Connection.html#method.close
2825 ///
2826 /// ## Examples:
2827 ///
2828 /// ```no_run
2829 /// # let mut buf = [0; 512];
2830 /// # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
2831 /// # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
2832 /// # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
2833 /// # let peer = "127.0.0.1:1234".parse().unwrap();
2834 /// # let local = socket.local_addr().unwrap();
2835 /// # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
2836 /// loop {
2837 /// let (read, from) = socket.recv_from(&mut buf).unwrap();
2838 ///
2839 /// let recv_info = quiche::RecvInfo {
2840 /// from,
2841 /// to: local,
2842 /// };
2843 ///
2844 /// let read = match conn.recv(&mut buf[..read], recv_info) {
2845 /// Ok(v) => v,
2846 ///
2847 /// Err(e) => {
2848 /// // An error occurred, handle it.
2849 /// break;
2850 /// },
2851 /// };
2852 /// }
2853 /// # Ok::<(), quiche::Error>(())
2854 /// ```
2855 pub fn recv(&mut self, buf: &mut [u8], info: RecvInfo) -> Result<usize> {
2856 let len = buf.len();
2857
2858 if len == 0 {
2859 return Err(Error::BufferTooShort);
2860 }
2861
2862 let recv_pid = self.paths.path_id_from_addrs(&(info.to, info.from));
2863
2864 if let Some(recv_pid) = recv_pid {
2865 let recv_path = self.paths.get_mut(recv_pid)?;
2866
2867 // Keep track of how many bytes we received from the client, so we
2868 // can limit bytes sent back before address validation, to a
2869 // multiple of this. The limit needs to be increased early on, so
2870 // that if there is an error there is enough credit to send a
2871 // CONNECTION_CLOSE.
2872 //
2873 // It doesn't matter if the packets received were valid or not, we
2874 // only need to track the total amount of bytes received.
2875 //
2876 // Note that we also need to limit the number of bytes we sent on a
2877 // path if we are not the host that initiated its usage.
2878 if self.is_server && !recv_path.verified_peer_address {
2879 recv_path.max_send_bytes += len * self.max_amplification_factor;
2880 }
2881 } else if !self.is_server {
2882 // If a client receives packets from an unknown server address,
2883 // the client MUST discard these packets.
2884 trace!(
2885 "{} client received packet from unknown address {:?}, dropping",
2886 self.trace_id,
2887 info,
2888 );
2889
2890 return Ok(len);
2891 }
2892
2893 let mut done = 0;
2894 let mut left = len;
2895
2896 // Process coalesced packets.
2897 while left > 0 {
2898 let read = match self.recv_single(
2899 &mut buf[len - left..len],
2900 &info,
2901 recv_pid,
2902 ) {
2903 Ok(v) => v,
2904
2905 Err(Error::Done) => {
2906 // If the packet can't be processed or decrypted, check if
2907 // it's a stateless reset.
2908 if self.is_stateless_reset(&buf[len - left..len]) {
2909 trace!("{} packet is a stateless reset", self.trace_id);
2910
2911 self.mark_closed();
2912 }
2913
2914 left
2915 },
2916
2917 Err(e) => {
2918 // In case of error processing the incoming packet, close
2919 // the connection.
2920 self.close(false, e.to_wire(), b"").ok();
2921 return Err(e);
2922 },
2923 };
2924
2925 done += read;
2926 left -= read;
2927 }
2928
2929 // Even though the packet was previously "accepted", it
2930 // should be safe to forward the error, as it also comes
2931 // from the `recv()` method.
2932 self.process_undecrypted_0rtt_packets()?;
2933
2934 Ok(done)
2935 }
2936
2937 fn process_undecrypted_0rtt_packets(&mut self) -> Result<()> {
2938 // Process previously undecryptable 0-RTT packets if the decryption key
2939 // is now available.
2940 if self.crypto_ctx[packet::Epoch::Application]
2941 .crypto_0rtt_open
2942 .is_some()
2943 {
2944 while let Some((mut pkt, info)) = self.undecryptable_pkts.pop_front()
2945 {
2946 if let Err(e) = self.recv(&mut pkt, info) {
2947 self.undecryptable_pkts.clear();
2948
2949 return Err(e);
2950 }
2951 }
2952 }
2953 Ok(())
2954 }
2955
2956 /// Returns true if a QUIC packet is a stateless reset.
2957 fn is_stateless_reset(&self, buf: &[u8]) -> bool {
2958 // If the packet is too small, then we just throw it away.
2959 let buf_len = buf.len();
2960 if buf_len < 21 {
2961 return false;
2962 }
2963
2964 // TODO: we should iterate over all active destination connection IDs
2965 // and check against their reset token.
2966 match self.peer_transport_params.stateless_reset_token {
2967 Some(token) => {
2968 let token_len = 16;
2969
2970 crypto::verify_slices_are_equal(
2971 &token.to_be_bytes(),
2972 &buf[buf_len - token_len..buf_len],
2973 )
2974 .is_ok()
2975 },
2976
2977 None => false,
2978 }
2979 }
2980
2981 /// Processes a single QUIC packet received from the peer.
2982 ///
2983 /// On success the number of bytes processed from the input buffer is
2984 /// returned. When the [`Done`] error is returned, processing of the
2985 /// remainder of the incoming UDP datagram should be interrupted.
2986 ///
2987 /// Note that a server might observe a new 4-tuple, preventing to
2988 /// know in advance to which path the incoming packet belongs to (`recv_pid`
2989 /// is `None`). As a client, packets from unknown 4-tuple are dropped
2990 /// beforehand (see `recv()`).
2991 ///
2992 /// On error, an error other than [`Done`] is returned.
2993 ///
2994 /// [`Done`]: enum.Error.html#variant.Done
2995 fn recv_single(
2996 &mut self, buf: &mut [u8], info: &RecvInfo, recv_pid: Option<usize>,
2997 ) -> Result<usize> {
2998 let now = Instant::now();
2999
3000 if buf.is_empty() {
3001 return Err(Error::Done);
3002 }
3003
3004 if self.is_closed() || self.is_draining() {
3005 return Err(Error::Done);
3006 }
3007
3008 let is_closing = self.local_error.is_some();
3009
3010 if is_closing {
3011 return Err(Error::Done);
3012 }
3013
3014 let buf_len = buf.len();
3015
3016 let mut b = octets::OctetsMut::with_slice(buf);
3017
3018 let mut hdr = Header::from_bytes(&mut b, self.source_id().len())
3019 .map_err(|e| {
3020 drop_pkt_on_err(
3021 e,
3022 self.recv_count,
3023 self.is_server,
3024 &self.trace_id,
3025 )
3026 })?;
3027
3028 if hdr.ty == Type::VersionNegotiation {
3029 // Version negotiation packets can only be sent by the server.
3030 if self.is_server {
3031 return Err(Error::Done);
3032 }
3033
3034 // Ignore duplicate version negotiation.
3035 if self.did_version_negotiation {
3036 return Err(Error::Done);
3037 }
3038
3039 // Ignore version negotiation if any other packet has already been
3040 // successfully processed.
3041 if self.recv_count > 0 {
3042 return Err(Error::Done);
3043 }
3044
3045 if hdr.dcid != self.source_id() {
3046 return Err(Error::Done);
3047 }
3048
3049 if hdr.scid != self.destination_id() {
3050 return Err(Error::Done);
3051 }
3052
3053 trace!("{} rx pkt {:?}", self.trace_id, hdr);
3054
3055 let versions = hdr.versions.ok_or(Error::Done)?;
3056
3057 // Ignore version negotiation if the version already selected is
3058 // listed.
3059 if versions.contains(&self.version) {
3060 return Err(Error::Done);
3061 }
3062
3063 let supported_versions =
3064 versions.iter().filter(|&&v| version_is_supported(v));
3065
3066 let mut found_version = false;
3067
3068 for &v in supported_versions {
3069 found_version = true;
3070
3071 // The final version takes precedence over draft ones.
3072 if v == PROTOCOL_VERSION_V1 {
3073 self.version = v;
3074 break;
3075 }
3076
3077 self.version = cmp::max(self.version, v);
3078 }
3079
3080 if !found_version {
3081 // We don't support any of the versions offered.
3082 //
3083 // While a man-in-the-middle attacker might be able to
3084 // inject a version negotiation packet that triggers this
3085 // failure, the window of opportunity is very small and
3086 // this error is quite useful for debugging, so don't just
3087 // ignore the packet.
3088 return Err(Error::UnknownVersion);
3089 }
3090
3091 self.did_version_negotiation = true;
3092
3093 // Derive Initial secrets based on the new version.
3094 let (aead_open, aead_seal) = crypto::derive_initial_key_material(
3095 &self.destination_id(),
3096 self.version,
3097 self.is_server,
3098 true,
3099 )?;
3100
3101 // Reset connection state to force sending another Initial packet.
3102 self.drop_epoch_state(packet::Epoch::Initial, now);
3103 self.got_peer_conn_id = false;
3104 self.handshake.clear()?;
3105
3106 self.crypto_ctx[packet::Epoch::Initial].crypto_open = Some(aead_open);
3107 self.crypto_ctx[packet::Epoch::Initial].crypto_seal = Some(aead_seal);
3108
3109 self.handshake
3110 .use_legacy_codepoint(self.version != PROTOCOL_VERSION_V1);
3111
3112 // Encode transport parameters again, as the new version might be
3113 // using a different format.
3114 self.encode_transport_params()?;
3115
3116 return Err(Error::Done);
3117 }
3118
3119 if hdr.ty == Type::Retry {
3120 // Retry packets can only be sent by the server.
3121 if self.is_server {
3122 return Err(Error::Done);
3123 }
3124
3125 // Ignore duplicate retry.
3126 if self.did_retry {
3127 return Err(Error::Done);
3128 }
3129
3130 // Check if Retry packet is valid.
3131 if packet::verify_retry_integrity(
3132 &b,
3133 &self.destination_id(),
3134 self.version,
3135 )
3136 .is_err()
3137 {
3138 return Err(Error::Done);
3139 }
3140
3141 trace!("{} rx pkt {:?}", self.trace_id, hdr);
3142
3143 self.token = hdr.token;
3144 self.did_retry = true;
3145
3146 // Remember peer's new connection ID.
3147 self.odcid = Some(self.destination_id().into_owned());
3148
3149 self.set_initial_dcid(
3150 hdr.scid.clone(),
3151 None,
3152 self.paths.get_active_path_id()?,
3153 )?;
3154
3155 self.rscid = Some(self.destination_id().into_owned());
3156
3157 // Derive Initial secrets using the new connection ID.
3158 let (aead_open, aead_seal) = crypto::derive_initial_key_material(
3159 &hdr.scid,
3160 self.version,
3161 self.is_server,
3162 true,
3163 )?;
3164
3165 // Reset connection state to force sending another Initial packet.
3166 self.drop_epoch_state(packet::Epoch::Initial, now);
3167 self.got_peer_conn_id = false;
3168 self.handshake.clear()?;
3169
3170 self.crypto_ctx[packet::Epoch::Initial].crypto_open = Some(aead_open);
3171 self.crypto_ctx[packet::Epoch::Initial].crypto_seal = Some(aead_seal);
3172
3173 return Err(Error::Done);
3174 }
3175
3176 if self.is_server && !self.did_version_negotiation {
3177 if !version_is_supported(hdr.version) {
3178 return Err(Error::UnknownVersion);
3179 }
3180
3181 self.version = hdr.version;
3182 self.did_version_negotiation = true;
3183
3184 self.handshake
3185 .use_legacy_codepoint(self.version != PROTOCOL_VERSION_V1);
3186
3187 // Encode transport parameters again, as the new version might be
3188 // using a different format.
3189 self.encode_transport_params()?;
3190 }
3191
3192 if hdr.ty != Type::Short && hdr.version != self.version {
3193 // At this point version negotiation was already performed, so
3194 // ignore packets that don't match the connection's version.
3195 return Err(Error::Done);
3196 }
3197
3198 // Long header packets have an explicit payload length, but short
3199 // packets don't so just use the remaining capacity in the buffer.
3200 let payload_len = if hdr.ty == Type::Short {
3201 b.cap()
3202 } else {
3203 b.get_varint().map_err(|e| {
3204 drop_pkt_on_err(
3205 e.into(),
3206 self.recv_count,
3207 self.is_server,
3208 &self.trace_id,
3209 )
3210 })? as usize
3211 };
3212
3213 // Make sure the buffer is same or larger than an explicit
3214 // payload length.
3215 if payload_len > b.cap() {
3216 return Err(drop_pkt_on_err(
3217 Error::InvalidPacket,
3218 self.recv_count,
3219 self.is_server,
3220 &self.trace_id,
3221 ));
3222 }
3223
3224 // Derive initial secrets on the server.
3225 if !self.derived_initial_secrets {
3226 let (aead_open, aead_seal) = crypto::derive_initial_key_material(
3227 &hdr.dcid,
3228 self.version,
3229 self.is_server,
3230 false,
3231 )?;
3232
3233 self.crypto_ctx[packet::Epoch::Initial].crypto_open = Some(aead_open);
3234 self.crypto_ctx[packet::Epoch::Initial].crypto_seal = Some(aead_seal);
3235
3236 self.derived_initial_secrets = true;
3237 }
3238
3239 // Select packet number space epoch based on the received packet's type.
3240 let epoch = hdr.ty.to_epoch()?;
3241
3242 // Select AEAD context used to open incoming packet.
3243 let aead = if hdr.ty == Type::ZeroRTT {
3244 // Only use 0-RTT key if incoming packet is 0-RTT.
3245 self.crypto_ctx[epoch].crypto_0rtt_open.as_ref()
3246 } else {
3247 // Otherwise use the packet number space's main key.
3248 self.crypto_ctx[epoch].crypto_open.as_ref()
3249 };
3250
3251 // Finally, discard packet if no usable key is available.
3252 let mut aead = match aead {
3253 Some(v) => v,
3254
3255 None => {
3256 if hdr.ty == Type::ZeroRTT &&
3257 self.undecryptable_pkts.len() < MAX_UNDECRYPTABLE_PACKETS &&
3258 !self.is_established()
3259 {
3260 // Buffer 0-RTT packets when the required read key is not
3261 // available yet, and process them later.
3262 //
3263 // TODO: in the future we might want to buffer other types
3264 // of undecryptable packets as well.
3265 let pkt_len = b.off() + payload_len;
3266 let pkt = (b.buf()[..pkt_len]).to_vec();
3267
3268 self.undecryptable_pkts.push_back((pkt, *info));
3269 return Ok(pkt_len);
3270 }
3271
3272 let e = drop_pkt_on_err(
3273 Error::CryptoFail,
3274 self.recv_count,
3275 self.is_server,
3276 &self.trace_id,
3277 );
3278
3279 return Err(e);
3280 },
3281 };
3282
3283 let aead_tag_len = aead.alg().tag_len();
3284
3285 packet::decrypt_hdr(&mut b, &mut hdr, aead).map_err(|e| {
3286 drop_pkt_on_err(e, self.recv_count, self.is_server, &self.trace_id)
3287 })?;
3288
3289 let pn = packet::decode_pkt_num(
3290 self.pkt_num_spaces[epoch].largest_rx_pkt_num,
3291 hdr.pkt_num,
3292 hdr.pkt_num_len,
3293 );
3294
3295 let pn_len = hdr.pkt_num_len;
3296
3297 trace!(
3298 "{} rx pkt {:?} len={} pn={} {}",
3299 self.trace_id,
3300 hdr,
3301 payload_len,
3302 pn,
3303 AddrTupleFmt(info.from, info.to)
3304 );
3305
3306 #[cfg(feature = "qlog")]
3307 let mut qlog_frames = vec![];
3308
3309 // Check for key update.
3310 let mut aead_next = None;
3311
3312 if self.handshake_confirmed &&
3313 hdr.ty != Type::ZeroRTT &&
3314 hdr.key_phase != self.key_phase
3315 {
3316 // Check if this packet arrived before key update.
3317 if let Some(key_update) = self.crypto_ctx[epoch]
3318 .key_update
3319 .as_ref()
3320 .and_then(|key_update| {
3321 (pn < key_update.pn_on_update).then_some(key_update)
3322 })
3323 {
3324 aead = &key_update.crypto_open;
3325 } else {
3326 trace!("{} peer-initiated key update", self.trace_id);
3327
3328 aead_next = Some((
3329 self.crypto_ctx[epoch]
3330 .crypto_open
3331 .as_ref()
3332 .unwrap()
3333 .derive_next_packet_key()?,
3334 self.crypto_ctx[epoch]
3335 .crypto_seal
3336 .as_ref()
3337 .unwrap()
3338 .derive_next_packet_key()?,
3339 ));
3340
3341 // `aead_next` is always `Some()` at this point, so the `unwrap()`
3342 // will never fail.
3343 aead = &aead_next.as_ref().unwrap().0;
3344 }
3345 }
3346
3347 let mut payload = packet::decrypt_pkt(
3348 &mut b,
3349 pn,
3350 pn_len,
3351 payload_len,
3352 aead,
3353 )
3354 .map_err(|e| {
3355 drop_pkt_on_err(e, self.recv_count, self.is_server, &self.trace_id)
3356 })?;
3357
3358 if self.pkt_num_spaces[epoch].recv_pkt_num.contains(pn) {
3359 trace!("{} ignored duplicate packet {}", self.trace_id, pn);
3360 return Err(Error::Done);
3361 }
3362
3363 // Packets with no frames are invalid.
3364 if payload.cap() == 0 {
3365 return Err(Error::InvalidPacket);
3366 }
3367
3368 // Now that we decrypted the packet, let's see if we can map it to an
3369 // existing path.
3370 let recv_pid = if hdr.ty == Type::Short && self.got_peer_conn_id {
3371 let pkt_dcid = ConnectionId::from_ref(&hdr.dcid);
3372 self.get_or_create_recv_path_id(recv_pid, &pkt_dcid, buf_len, info)?
3373 } else {
3374 // During handshake, we are on the initial path.
3375 self.paths.get_active_path_id()?
3376 };
3377
3378 // The key update is verified once a packet is successfully decrypted
3379 // using the new keys.
3380 if let Some((open_next, seal_next)) = aead_next {
3381 if !self.crypto_ctx[epoch]
3382 .key_update
3383 .as_ref()
3384 .is_none_or(|prev| prev.update_acked)
3385 {
3386 // Peer has updated keys twice without awaiting confirmation.
3387 return Err(Error::KeyUpdate);
3388 }
3389
3390 trace!("{} key update verified", self.trace_id);
3391
3392 let _ = self.crypto_ctx[epoch].crypto_seal.replace(seal_next);
3393
3394 let open_prev = self.crypto_ctx[epoch]
3395 .crypto_open
3396 .replace(open_next)
3397 .unwrap();
3398
3399 let recv_path = self.paths.get_mut(recv_pid)?;
3400
3401 self.crypto_ctx[epoch].key_update = Some(packet::KeyUpdate {
3402 crypto_open: open_prev,
3403 pn_on_update: pn,
3404 update_acked: false,
3405 timer: now + (recv_path.recovery.pto() * 3),
3406 });
3407
3408 self.key_phase = !self.key_phase;
3409
3410 qlog_with_type!(QLOG_PACKET_RX, self.qlog, q, {
3411 let trigger = Some(
3412 qlog::events::quic::KeyUpdateOrRetiredTrigger::RemoteUpdate,
3413 );
3414
3415 let ev_data_client =
3416 EventData::QuicKeyUpdated(qlog::events::quic::KeyUpdated {
3417 key_type: qlog::events::quic::KeyType::Client1RttSecret,
3418 trigger: trigger.clone(),
3419 ..Default::default()
3420 });
3421
3422 q.add_event_data_with_instant(ev_data_client, now).ok();
3423
3424 let ev_data_server =
3425 EventData::QuicKeyUpdated(qlog::events::quic::KeyUpdated {
3426 key_type: qlog::events::quic::KeyType::Server1RttSecret,
3427 trigger,
3428 ..Default::default()
3429 });
3430
3431 q.add_event_data_with_instant(ev_data_server, now).ok();
3432 });
3433 }
3434
3435 if !self.is_server && !self.got_peer_conn_id {
3436 if self.odcid.is_none() {
3437 self.odcid = Some(self.destination_id().into_owned());
3438 }
3439
3440 // Replace the randomly generated destination connection ID with
3441 // the one supplied by the server.
3442 self.set_initial_dcid(
3443 hdr.scid.clone(),
3444 self.peer_transport_params.stateless_reset_token,
3445 recv_pid,
3446 )?;
3447
3448 self.got_peer_conn_id = true;
3449 }
3450
3451 if self.is_server && !self.got_peer_conn_id {
3452 self.set_initial_dcid(hdr.scid.clone(), None, recv_pid)?;
3453
3454 if !self.did_retry {
3455 self.local_transport_params
3456 .original_destination_connection_id =
3457 Some(hdr.dcid.to_vec().into());
3458
3459 self.encode_transport_params()?;
3460 }
3461
3462 self.got_peer_conn_id = true;
3463 }
3464
3465 // To avoid sending an ACK in response to an ACK-only packet, we need
3466 // to keep track of whether this packet contains any frame other than
3467 // ACK and PADDING.
3468 let mut ack_elicited = false;
3469
3470 // Process packet payload. If a frame cannot be processed, store the
3471 // error and stop further packet processing.
3472 let mut frame_processing_err = None;
3473
3474 // To know if the peer migrated the connection, we need to keep track
3475 // whether this is a non-probing packet.
3476 let mut probing = true;
3477
3478 // Process packet payload.
3479 while payload.cap() > 0 {
3480 let frame = frame::Frame::from_bytes(&mut payload, hdr.ty)?;
3481
3482 qlog_with_type!(QLOG_PACKET_RX, self.qlog, _q, {
3483 qlog_frames.push(frame.to_qlog());
3484 });
3485
3486 if frame.ack_eliciting() {
3487 ack_elicited = true;
3488 }
3489
3490 if !frame.probing() {
3491 probing = false;
3492 }
3493
3494 if let Err(e) = self.process_frame(frame, &hdr, recv_pid, epoch, now)
3495 {
3496 frame_processing_err = Some(e);
3497 break;
3498 }
3499 }
3500
3501 qlog_with_type!(QLOG_PACKET_RX, self.qlog, q, {
3502 let packet_size = b.len();
3503
3504 let qlog_pkt_hdr = qlog::events::quic::PacketHeader::with_type(
3505 hdr.ty.to_qlog(),
3506 Some(pn),
3507 Some(hdr.version),
3508 Some(&hdr.scid),
3509 Some(&hdr.dcid),
3510 );
3511
3512 let qlog_raw_info = RawInfo {
3513 length: Some(packet_size as u64),
3514 payload_length: Some(payload_len as u64),
3515 data: None,
3516 };
3517
3518 let ev_data = EventData::QuicPacketReceived(
3519 qlog::events::quic::PacketReceived {
3520 header: qlog_pkt_hdr,
3521 frames: Some(qlog_frames),
3522 raw: Some(qlog_raw_info),
3523 ..Default::default()
3524 },
3525 );
3526
3527 q.add_event_data_with_instant(ev_data, now).ok();
3528 });
3529
3530 qlog_with_type!(QLOG_METRICS, self.qlog, q, {
3531 let recv_path = self.paths.get_mut(recv_pid)?;
3532 recv_path.recovery.maybe_qlog(q, now);
3533 });
3534
3535 if let Some(e) = frame_processing_err {
3536 // Any frame error is terminal, so now just return.
3537 return Err(e);
3538 }
3539
3540 // Only log the remote transport parameters once the connection is
3541 // established (i.e. after frames have been fully parsed) and only
3542 // once per connection.
3543 if self.is_established() {
3544 qlog_with_type!(QLOG_PARAMS_SET, self.qlog, q, {
3545 if !self.qlog.logged_peer_params {
3546 let ev_data = self.peer_transport_params.to_qlog(
3547 TransportInitiator::Remote,
3548 self.handshake.cipher(),
3549 );
3550
3551 q.add_event_data_with_instant(ev_data, now).ok();
3552
3553 self.qlog.logged_peer_params = true;
3554 }
3555 });
3556 }
3557
3558 // Process acked frames. Note that several packets from several paths
3559 // might have been acked by the received packet.
3560 for (_, p) in self.paths.iter_mut() {
3561 while let Some(acked) = p.recovery.next_acked_frame(epoch) {
3562 match acked {
3563 frame::Frame::Ping {
3564 mtu_probe: Some(mtu_probe),
3565 } => {
3566 if let Some(pmtud) = p.pmtud.as_mut() {
3567 trace!(
3568 "{} pmtud probe acked; probe size {:?}",
3569 self.trace_id,
3570 mtu_probe
3571 );
3572
3573 // Ensure the probe is within the supported MTU range
3574 // before updating the max datagram size
3575 if let Some(current_mtu) =
3576 pmtud.successful_probe(mtu_probe)
3577 {
3578 qlog_with_type!(
3579 EventType::QuicEventType(
3580 QuicEventType::MtuUpdated
3581 ),
3582 self.qlog,
3583 q,
3584 {
3585 let pmtu_data = EventData::QuicMtuUpdated(
3586 qlog::events::quic::MtuUpdated {
3587 old: Some(
3588 p.recovery.max_datagram_size()
3589 as u32,
3590 ),
3591 new: current_mtu as u32,
3592 done: Some(true),
3593 },
3594 );
3595
3596 q.add_event_data_with_instant(
3597 pmtu_data, now,
3598 )
3599 .ok();
3600 }
3601 );
3602
3603 p.recovery
3604 .pmtud_update_max_datagram_size(current_mtu);
3605 }
3606 }
3607 },
3608
3609 frame::Frame::ACK { ranges, .. } => {
3610 // Stop acknowledging packets less than or equal to the
3611 // largest acknowledged in the sent ACK frame that, in
3612 // turn, got acked.
3613 if let Some(largest_acked) = ranges.last() {
3614 self.pkt_num_spaces[epoch]
3615 .recv_pkt_need_ack
3616 .remove_until(largest_acked);
3617 }
3618 },
3619
3620 frame::Frame::CryptoHeader { offset, length } => {
3621 self.crypto_ctx[epoch]
3622 .crypto_stream
3623 .send
3624 .ack_and_drop(offset, length);
3625 },
3626
3627 frame::Frame::StreamHeader {
3628 stream_id,
3629 offset,
3630 length,
3631 ..
3632 } => {
3633 // Update tx_buffered and emit qlog before checking if the
3634 // stream still exists. The client does need to ACK
3635 // frames that were received after the client sends a
3636 // ResetStream.
3637 self.tx_buffered =
3638 self.tx_buffered.saturating_sub(length);
3639
3640 qlog_with_type!(QLOG_DATA_MV, self.qlog, q, {
3641 let ev_data = EventData::QuicStreamDataMoved(
3642 qlog::events::quic::StreamDataMoved {
3643 stream_id: Some(stream_id),
3644 offset: Some(offset),
3645 raw: Some(RawInfo {
3646 length: Some(length as u64),
3647 ..Default::default()
3648 }),
3649 from: Some(DataRecipient::Transport),
3650 to: Some(DataRecipient::Dropped),
3651 ..Default::default()
3652 },
3653 );
3654
3655 q.add_event_data_with_instant(ev_data, now).ok();
3656 });
3657
3658 let stream = match self.streams.get_mut(stream_id) {
3659 Some(v) => v,
3660
3661 None => continue,
3662 };
3663
3664 stream.send.ack_and_drop(offset, length);
3665
3666 let priority_key = Arc::clone(&stream.priority_key);
3667
3668 // Only collect the stream if it is complete and not
3669 // readable or writable.
3670 //
3671 // If it is readable, it will get collected when
3672 // stream_recv() is next used.
3673 //
3674 // If it is writable, it might mean that the stream
3675 // has been stopped by the peer (i.e. a STOP_SENDING
3676 // frame is received), in which case before collecting
3677 // the stream we will need to propagate the
3678 // `StreamStopped` error to the application. It will
3679 // instead get collected when one of stream_capacity(),
3680 // stream_writable(), stream_send(), ... is next called.
3681 //
3682 // Note that we can't use `is_writable()` here because
3683 // it returns false if the stream is stopped. Instead,
3684 // since the stream is marked as writable when a
3685 // STOP_SENDING frame is received, we check the writable
3686 // queue directly instead.
3687 let is_writable = priority_key.writable.is_linked() &&
3688 // Ensure that the stream is actually stopped.
3689 stream.send.is_stopped();
3690
3691 let is_complete = stream.is_complete();
3692 let is_readable = stream.is_readable();
3693
3694 if is_complete && !is_readable && !is_writable {
3695 let local = stream.local;
3696 self.streams.collect(stream_id, local);
3697 }
3698 },
3699
3700 frame::Frame::HandshakeDone => {
3701 // Explicitly set this to true, so that if the frame was
3702 // already scheduled for retransmission, it is aborted.
3703 self.handshake_done_sent = true;
3704
3705 self.handshake_done_acked = true;
3706 },
3707
3708 frame::Frame::ResetStream { stream_id, .. } => {
3709 let stream = match self.streams.get_mut(stream_id) {
3710 Some(v) => v,
3711
3712 None => continue,
3713 };
3714
3715 let priority_key = Arc::clone(&stream.priority_key);
3716
3717 // Only collect the stream if it is complete and not
3718 // readable or writable.
3719 //
3720 // If it is readable, it will get collected when
3721 // stream_recv() is next used.
3722 //
3723 // If it is writable, it might mean that the stream
3724 // has been stopped by the peer (i.e. a STOP_SENDING
3725 // frame is received), in which case before collecting
3726 // the stream we will need to propagate the
3727 // `StreamStopped` error to the application. It will
3728 // instead get collected when one of stream_capacity(),
3729 // stream_writable(), stream_send(), ... is next called.
3730 //
3731 // Note that we can't use `is_writable()` here because
3732 // it returns false if the stream is stopped. Instead,
3733 // since the stream is marked as writable when a
3734 // STOP_SENDING frame is received, we check the writable
3735 // queue directly instead.
3736 let is_writable = priority_key.writable.is_linked() &&
3737 // Ensure that the stream is actually stopped.
3738 stream.send.is_stopped();
3739
3740 let is_complete = stream.is_complete();
3741 let is_readable = stream.is_readable();
3742
3743 if is_complete && !is_readable && !is_writable {
3744 let local = stream.local;
3745 self.streams.collect(stream_id, local);
3746 }
3747 },
3748
3749 _ => (),
3750 }
3751 }
3752 }
3753
3754 // Now that we processed all the frames, if there is a path that has no
3755 // Destination CID, try to allocate one.
3756 let no_dcid = self
3757 .paths
3758 .iter_mut()
3759 .filter(|(_, p)| p.active_dcid_seq.is_none());
3760
3761 for (pid, p) in no_dcid {
3762 if self.ids.zero_length_dcid() {
3763 p.active_dcid_seq = Some(0);
3764 continue;
3765 }
3766
3767 let dcid_seq = match self.ids.lowest_available_dcid_seq() {
3768 Some(seq) => seq,
3769 None => break,
3770 };
3771
3772 self.ids.link_dcid_to_path_id(dcid_seq, pid)?;
3773
3774 p.active_dcid_seq = Some(dcid_seq);
3775 }
3776
3777 // We only record the time of arrival of the largest packet number
3778 // that still needs to be acked, to be used for ACK delay calculation.
3779 if self.pkt_num_spaces[epoch].recv_pkt_need_ack.last() < Some(pn) {
3780 self.pkt_num_spaces[epoch].largest_rx_pkt_time = now;
3781 }
3782
3783 self.pkt_num_spaces[epoch].recv_pkt_num.insert(pn);
3784
3785 self.pkt_num_spaces[epoch].recv_pkt_need_ack.push_item(pn);
3786
3787 self.pkt_num_spaces[epoch].ack_elicited =
3788 cmp::max(self.pkt_num_spaces[epoch].ack_elicited, ack_elicited);
3789
3790 self.pkt_num_spaces[epoch].largest_rx_pkt_num =
3791 cmp::max(self.pkt_num_spaces[epoch].largest_rx_pkt_num, pn);
3792
3793 if !probing {
3794 self.pkt_num_spaces[epoch].largest_rx_non_probing_pkt_num = cmp::max(
3795 self.pkt_num_spaces[epoch].largest_rx_non_probing_pkt_num,
3796 pn,
3797 );
3798
3799 // Did the peer migrated to another path?
3800 let active_path_id = self.paths.get_active_path_id()?;
3801
3802 if self.is_server &&
3803 recv_pid != active_path_id &&
3804 self.pkt_num_spaces[epoch].largest_rx_non_probing_pkt_num == pn
3805 {
3806 self.on_peer_migrated(recv_pid, self.disable_dcid_reuse, now)?;
3807 }
3808 }
3809
3810 if let Some(idle_timeout) = self.idle_timeout() {
3811 self.idle_timer = Some(now + idle_timeout);
3812 }
3813
3814 // Update send capacity.
3815 self.update_tx_cap();
3816
3817 self.recv_count += 1;
3818 self.paths.get_mut(recv_pid)?.recv_count += 1;
3819
3820 let read = b.off() + aead_tag_len;
3821
3822 self.recv_bytes += read as u64;
3823 self.paths.get_mut(recv_pid)?.recv_bytes += read as u64;
3824
3825 // An Handshake packet has been received from the client and has been
3826 // successfully processed, so we can drop the initial state and consider
3827 // the client's address to be verified.
3828 if self.is_server && hdr.ty == Type::Handshake {
3829 self.drop_epoch_state(packet::Epoch::Initial, now);
3830
3831 self.paths.get_mut(recv_pid)?.verified_peer_address = true;
3832 }
3833
3834 self.ack_eliciting_sent = false;
3835
3836 Ok(read)
3837 }
3838
3839 /// Writes a single QUIC packet to be sent to the peer.
3840 ///
3841 /// On success the number of bytes written to the output buffer is
3842 /// returned, or [`Done`] if there was nothing to write.
3843 ///
3844 /// The application should call `send()` multiple times until [`Done`] is
3845 /// returned, indicating that there are no more packets to send. It is
3846 /// recommended that `send()` be called in the following cases:
3847 ///
3848 /// * When the application receives QUIC packets from the peer (that is,
3849 /// any time [`recv()`] is also called).
3850 ///
3851 /// * When the connection timer expires (that is, any time [`on_timeout()`]
3852 /// is also called).
3853 ///
3854 /// * When the application sends data to the peer (for example, any time
3855 /// [`stream_send()`] or [`stream_shutdown()`] are called).
3856 ///
3857 /// * When the application receives data from the peer (for example any
3858 /// time [`stream_recv()`] is called).
3859 ///
3860 /// Once [`is_draining()`] returns `true`, it is no longer necessary to call
3861 /// `send()` and all calls will return [`Done`].
3862 ///
3863 /// [`Done`]: enum.Error.html#variant.Done
3864 /// [`recv()`]: struct.Connection.html#method.recv
3865 /// [`on_timeout()`]: struct.Connection.html#method.on_timeout
3866 /// [`stream_send()`]: struct.Connection.html#method.stream_send
3867 /// [`stream_shutdown()`]: struct.Connection.html#method.stream_shutdown
3868 /// [`stream_recv()`]: struct.Connection.html#method.stream_recv
3869 /// [`is_draining()`]: struct.Connection.html#method.is_draining
3870 ///
3871 /// ## Examples:
3872 ///
3873 /// ```no_run
3874 /// # let mut out = [0; 512];
3875 /// # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
3876 /// # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
3877 /// # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
3878 /// # let peer = "127.0.0.1:1234".parse().unwrap();
3879 /// # let local = socket.local_addr().unwrap();
3880 /// # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
3881 /// loop {
3882 /// let (write, send_info) = match conn.send(&mut out) {
3883 /// Ok(v) => v,
3884 ///
3885 /// Err(quiche::Error::Done) => {
3886 /// // Done writing.
3887 /// break;
3888 /// },
3889 ///
3890 /// Err(e) => {
3891 /// // An error occurred, handle it.
3892 /// break;
3893 /// },
3894 /// };
3895 ///
3896 /// socket.send_to(&out[..write], &send_info.to).unwrap();
3897 /// }
3898 /// # Ok::<(), quiche::Error>(())
3899 /// ```
3900 pub fn send(&mut self, out: &mut [u8]) -> Result<(usize, SendInfo)> {
3901 self.send_on_path(out, None, None)
3902 }
3903
3904 /// Writes a single QUIC packet to be sent to the peer from the specified
3905 /// local address `from` to the destination address `to`.
3906 ///
3907 /// The behavior of this method differs depending on the value of the `from`
3908 /// and `to` parameters:
3909 ///
3910 /// * If both are `Some`, then the method only consider the 4-tuple
3911 /// (`from`, `to`). Application can monitor the 4-tuple availability,
3912 /// either by monitoring [`path_event_next()`] events or by relying on
3913 /// the [`paths_iter()`] method. If the provided 4-tuple does not exist
3914 /// on the connection (anymore), it returns an [`InvalidState`].
3915 ///
3916 /// * If `from` is `Some` and `to` is `None`, then the method only
3917 /// considers sending packets on paths having `from` as local address.
3918 ///
3919 /// * If `to` is `Some` and `from` is `None`, then the method only
3920 /// considers sending packets on paths having `to` as peer address.
3921 ///
3922 /// * If both are `None`, all available paths are considered.
3923 ///
3924 /// On success the number of bytes written to the output buffer is
3925 /// returned, or [`Done`] if there was nothing to write.
3926 ///
3927 /// The application should call `send_on_path()` multiple times until
3928 /// [`Done`] is returned, indicating that there are no more packets to
3929 /// send. It is recommended that `send_on_path()` be called in the
3930 /// following cases:
3931 ///
3932 /// * When the application receives QUIC packets from the peer (that is,
3933 /// any time [`recv()`] is also called).
3934 ///
3935 /// * When the connection timer expires (that is, any time [`on_timeout()`]
3936 /// is also called).
3937 ///
3938 /// * When the application sends data to the peer (for examples, any time
3939 /// [`stream_send()`] or [`stream_shutdown()`] are called).
3940 ///
3941 /// * When the application receives data from the peer (for example any
3942 /// time [`stream_recv()`] is called).
3943 ///
3944 /// Once [`is_draining()`] returns `true`, it is no longer necessary to call
3945 /// `send_on_path()` and all calls will return [`Done`].
3946 ///
3947 /// [`Done`]: enum.Error.html#variant.Done
3948 /// [`InvalidState`]: enum.Error.html#InvalidState
3949 /// [`recv()`]: struct.Connection.html#method.recv
3950 /// [`on_timeout()`]: struct.Connection.html#method.on_timeout
3951 /// [`stream_send()`]: struct.Connection.html#method.stream_send
3952 /// [`stream_shutdown()`]: struct.Connection.html#method.stream_shutdown
3953 /// [`stream_recv()`]: struct.Connection.html#method.stream_recv
3954 /// [`path_event_next()`]: struct.Connection.html#method.path_event_next
3955 /// [`paths_iter()`]: struct.Connection.html#method.paths_iter
3956 /// [`is_draining()`]: struct.Connection.html#method.is_draining
3957 ///
3958 /// ## Examples:
3959 ///
3960 /// ```no_run
3961 /// # let mut out = [0; 512];
3962 /// # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
3963 /// # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
3964 /// # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
3965 /// # let peer = "127.0.0.1:1234".parse().unwrap();
3966 /// # let local = socket.local_addr().unwrap();
3967 /// # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
3968 /// loop {
3969 /// let (write, send_info) = match conn.send_on_path(&mut out, Some(local), Some(peer)) {
3970 /// Ok(v) => v,
3971 ///
3972 /// Err(quiche::Error::Done) => {
3973 /// // Done writing.
3974 /// break;
3975 /// },
3976 ///
3977 /// Err(e) => {
3978 /// // An error occurred, handle it.
3979 /// break;
3980 /// },
3981 /// };
3982 ///
3983 /// socket.send_to(&out[..write], &send_info.to).unwrap();
3984 /// }
3985 /// # Ok::<(), quiche::Error>(())
3986 /// ```
3987 pub fn send_on_path(
3988 &mut self, out: &mut [u8], from: Option<SocketAddr>,
3989 to: Option<SocketAddr>,
3990 ) -> Result<(usize, SendInfo)> {
3991 if out.is_empty() {
3992 return Err(Error::BufferTooShort);
3993 }
3994
3995 if self.is_closed() || self.is_draining() {
3996 return Err(Error::Done);
3997 }
3998
3999 let now = Instant::now();
4000
4001 if self.local_error.is_none() {
4002 self.do_handshake(now)?;
4003 }
4004
4005 // Forwarding the error value here could confuse
4006 // applications, as they may not expect getting a `recv()`
4007 // error when calling `send()`.
4008 //
4009 // We simply fall-through to sending packets, which should
4010 // take care of terminating the connection as needed.
4011 let _ = self.process_undecrypted_0rtt_packets();
4012
4013 // There's no point in trying to send a packet if the Initial secrets
4014 // have not been derived yet, so return early.
4015 if !self.derived_initial_secrets {
4016 return Err(Error::Done);
4017 }
4018
4019 let mut has_initial = false;
4020
4021 let mut done = 0;
4022
4023 // Limit output packet size to respect the sender and receiver's
4024 // maximum UDP payload size limit.
4025 let mut left = cmp::min(out.len(), self.max_send_udp_payload_size());
4026
4027 let send_pid = match (from, to) {
4028 (Some(f), Some(t)) => self
4029 .paths
4030 .path_id_from_addrs(&(f, t))
4031 .ok_or(Error::InvalidState)?,
4032
4033 _ => self.get_send_path_id(from, to)?,
4034 };
4035
4036 let send_path = self.paths.get_mut(send_pid)?;
4037
4038 // Update max datagram size to allow path MTU discovery probe to be sent.
4039 if let Some(pmtud) = send_path.pmtud.as_mut() {
4040 if pmtud.should_probe() {
4041 let size = if self.handshake_confirmed || self.handshake_completed
4042 {
4043 pmtud.get_probe_size()
4044 } else {
4045 pmtud.get_current_mtu()
4046 };
4047
4048 send_path.recovery.pmtud_update_max_datagram_size(size);
4049
4050 left =
4051 cmp::min(out.len(), send_path.recovery.max_datagram_size());
4052 }
4053 }
4054
4055 // Limit data sent by the server based on the amount of data received
4056 // from the client before its address is validated.
4057 if !send_path.verified_peer_address && self.is_server {
4058 left = cmp::min(left, send_path.max_send_bytes);
4059 }
4060
4061 // Generate coalesced packets.
4062 while left > 0 {
4063 let (ty, written) = match self.send_single(
4064 &mut out[done..done + left],
4065 send_pid,
4066 has_initial,
4067 now,
4068 ) {
4069 Ok(v) => v,
4070
4071 Err(Error::BufferTooShort) | Err(Error::Done) => break,
4072
4073 Err(e) => return Err(e),
4074 };
4075
4076 done += written;
4077 left -= written;
4078
4079 match ty {
4080 Type::Initial => has_initial = true,
4081
4082 // No more packets can be coalesced after a 1-RTT.
4083 Type::Short => break,
4084
4085 _ => (),
4086 };
4087
4088 // When sending multiple PTO probes, don't coalesce them together,
4089 // so they are sent on separate UDP datagrams.
4090 if let Ok(epoch) = ty.to_epoch() {
4091 if self.paths.get_mut(send_pid)?.recovery.loss_probes(epoch) > 0 {
4092 break;
4093 }
4094 }
4095
4096 // Don't coalesce packets that must go on different paths.
4097 if !(from.is_some() && to.is_some()) &&
4098 self.get_send_path_id(from, to)? != send_pid
4099 {
4100 break;
4101 }
4102 }
4103
4104 if done == 0 {
4105 self.last_tx_data = self.tx_data;
4106
4107 return Err(Error::Done);
4108 }
4109
4110 if has_initial && left > 0 && done < MIN_CLIENT_INITIAL_LEN {
4111 let pad_len = cmp::min(left, MIN_CLIENT_INITIAL_LEN - done);
4112
4113 // Fill padding area with null bytes, to avoid leaking information
4114 // in case the application reuses the packet buffer.
4115 out[done..done + pad_len].fill(0);
4116
4117 done += pad_len;
4118 }
4119
4120 let send_path = self.paths.get(send_pid)?;
4121
4122 let info = SendInfo {
4123 from: send_path.local_addr(),
4124 to: send_path.peer_addr(),
4125
4126 at: send_path.recovery.get_packet_send_time(now),
4127 };
4128
4129 Ok((done, info))
4130 }
4131
4132 fn send_single(
4133 &mut self, out: &mut [u8], send_pid: usize, has_initial: bool,
4134 now: Instant,
4135 ) -> Result<(Type, usize)> {
4136 if out.is_empty() {
4137 return Err(Error::BufferTooShort);
4138 }
4139
4140 if self.is_draining() {
4141 return Err(Error::Done);
4142 }
4143
4144 let is_closing = self.local_error.is_some();
4145
4146 let out_len = out.len();
4147
4148 let mut b = octets::OctetsMut::with_slice(out);
4149
4150 let pkt_type = self.write_pkt_type(send_pid)?;
4151
4152 let max_dgram_len = if !self.dgram_send_queue.is_empty() {
4153 self.dgram_max_writable_len()
4154 } else {
4155 None
4156 };
4157
4158 let epoch = pkt_type.to_epoch()?;
4159 let pkt_space = &mut self.pkt_num_spaces[epoch];
4160 let crypto_ctx = &mut self.crypto_ctx[epoch];
4161
4162 // Process lost frames. There might be several paths having lost frames.
4163 for (_, p) in self.paths.iter_mut() {
4164 while let Some(lost) = p.recovery.next_lost_frame(epoch) {
4165 match lost {
4166 frame::Frame::CryptoHeader { offset, length } => {
4167 crypto_ctx.crypto_stream.send.retransmit(offset, length);
4168
4169 self.stream_retrans_bytes += length as u64;
4170 p.stream_retrans_bytes += length as u64;
4171
4172 self.retrans_count += 1;
4173 p.retrans_count += 1;
4174 },
4175
4176 frame::Frame::StreamHeader {
4177 stream_id,
4178 offset,
4179 length,
4180 fin,
4181 } => {
4182 let stream = match self.streams.get_mut(stream_id) {
4183 // Only retransmit data if the stream is not closed
4184 // or stopped.
4185 Some(v) if !v.send.is_stopped() => v,
4186
4187 // Data on a closed stream will not be retransmitted
4188 // or acked after it is declared lost, so update
4189 // tx_buffered and qlog.
4190 _ => {
4191 self.tx_buffered =
4192 self.tx_buffered.saturating_sub(length);
4193
4194 qlog_with_type!(QLOG_DATA_MV, self.qlog, q, {
4195 let ev_data = EventData::QuicStreamDataMoved(
4196 qlog::events::quic::StreamDataMoved {
4197 stream_id: Some(stream_id),
4198 offset: Some(offset),
4199 raw: Some(RawInfo {
4200 length: Some(length as u64),
4201 ..Default::default()
4202 }),
4203 from: Some(DataRecipient::Transport),
4204 to: Some(DataRecipient::Dropped),
4205 ..Default::default()
4206 },
4207 );
4208
4209 q.add_event_data_with_instant(ev_data, now)
4210 .ok();
4211 });
4212
4213 continue;
4214 },
4215 };
4216
4217 let was_flushable = stream.is_flushable();
4218
4219 let empty_fin = length == 0 && fin;
4220
4221 stream.send.retransmit(offset, length);
4222
4223 // If the stream is now flushable push it to the
4224 // flushable queue, but only if it wasn't already
4225 // queued.
4226 //
4227 // Consider the stream flushable also when we are
4228 // sending a zero-length frame that has the fin flag
4229 // set.
4230 if (stream.is_flushable() || empty_fin) && !was_flushable
4231 {
4232 let priority_key = Arc::clone(&stream.priority_key);
4233 self.streams.insert_flushable(&priority_key);
4234 }
4235
4236 self.stream_retrans_bytes += length as u64;
4237 p.stream_retrans_bytes += length as u64;
4238
4239 self.retrans_count += 1;
4240 p.retrans_count += 1;
4241 },
4242
4243 frame::Frame::ACK { .. } => {
4244 pkt_space.ack_elicited = true;
4245 },
4246
4247 frame::Frame::ResetStream {
4248 stream_id,
4249 error_code,
4250 final_size,
4251 } => {
4252 self.streams
4253 .insert_reset(stream_id, error_code, final_size);
4254 },
4255
4256 frame::Frame::StopSending {
4257 stream_id,
4258 error_code,
4259 } =>
4260 // We only need to retransmit the STOP_SENDING frame if
4261 // the stream is still active and not FIN'd. Even if the
4262 // packet was lost, if the application has the final
4263 // size at this point there is no need to retransmit.
4264 if let Some(stream) = self.streams.get(stream_id) {
4265 if !stream.recv.is_fin() {
4266 self.streams
4267 .insert_stopped(stream_id, error_code);
4268 }
4269 },
4270
4271 // Retransmit HANDSHAKE_DONE only if it hasn't been acked at
4272 // least once already.
4273 frame::Frame::HandshakeDone =>
4274 if !self.handshake_done_acked {
4275 self.handshake_done_sent = false;
4276 },
4277
4278 frame::Frame::MaxStreamData { stream_id, .. } => {
4279 if self.streams.get(stream_id).is_some() {
4280 self.streams.insert_almost_full(stream_id);
4281 }
4282 },
4283
4284 frame::Frame::MaxData { .. } => {
4285 self.should_send_max_data = true;
4286 },
4287
4288 frame::Frame::MaxStreamsUni { .. } => {
4289 self.should_send_max_streams_uni = true;
4290 },
4291
4292 frame::Frame::MaxStreamsBidi { .. } => {
4293 self.should_send_max_streams_bidi = true;
4294 },
4295
4296 // Retransmit STREAMS_BLOCKED frames if the frame with the
4297 // most recent limit is lost. These are informational
4298 // signals to the peer, reliably sending them
4299 // ensures the signal is used consistently and helps
4300 // debugging.
4301 frame::Frame::StreamsBlockedBidi { limit } => {
4302 self.streams_blocked_bidi_state
4303 .force_retransmit_sent_limit_eq(limit);
4304 },
4305
4306 frame::Frame::StreamsBlockedUni { limit } => {
4307 self.streams_blocked_uni_state
4308 .force_retransmit_sent_limit_eq(limit);
4309 },
4310
4311 frame::Frame::NewConnectionId { seq_num, .. } => {
4312 self.ids.mark_advertise_new_scid_seq(seq_num, true);
4313 },
4314
4315 frame::Frame::RetireConnectionId { seq_num } => {
4316 self.ids.mark_retire_dcid_seq(seq_num, true)?;
4317 },
4318
4319 frame::Frame::Ping { mtu_probe } => {
4320 // Ping frames are not retransmitted.
4321 if let Some(failed_probe) = mtu_probe {
4322 if let Some(pmtud) = p.pmtud.as_mut() {
4323 trace!("pmtud probe dropped: {failed_probe}");
4324 pmtud.failed_probe(failed_probe);
4325 }
4326 }
4327 },
4328
4329 // Sent as StreamHeader frames. Stream frames are never
4330 // generated by quiche.
4331 frame::Frame::Stream { .. } => {
4332 debug_panic!(
4333 "Unexpected frame lost: Stream. quiche should \
4334 have tracked retransmittable stream data as \
4335 StreamHeader frames."
4336 );
4337 },
4338
4339 // Sent as CryptoHeader frames. Crypto frames are never
4340 // generated by quiche.
4341 frame::Frame::Crypto { .. } => {
4342 debug_panic!(
4343 "Unexpected frame lost: Crypto. quiche should \
4344 have tracked retransmittable crypto data as \
4345 CryptoHeader frames."
4346 );
4347 },
4348
4349 // NewToken frames are never sent by quiche; they are not
4350 // implemented.
4351 frame::Frame::NewToken { .. } => {
4352 debug_panic!(
4353 "Unexpected frame lost: NewToken. quiche used to \
4354 not implement NewToken frames, retransmission of \
4355 these frames is not implemented."
4356 );
4357 },
4358
4359 // Data blocked frames are an optional advisory
4360 // signal. We choose to not retransmit them to
4361 // avoid unnecessary network usage.
4362 frame::Frame::DataBlocked { .. } |
4363 frame::Frame::StreamDataBlocked { .. } => (),
4364
4365 // Path challenge and response have their own
4366 // retry logic. They should not be retransmitted
4367 // normally since according to RFC 9000 Section
4368 // 8.2.2: "An endpoint MUST NOT send more than one
4369 // PATH_RESPONSE frame in response to one
4370 // PATH_CHALLENGE frame".
4371 frame::Frame::PathChallenge { .. } |
4372 frame::Frame::PathResponse { .. } => (),
4373
4374 // From RFC 9000 Section 13.3: CONNECTION_CLOSE
4375 // frames, are not sent again when packet loss is
4376 // detected. Resending these signals is described
4377 // in Section 10.
4378 frame::Frame::ConnectionClose { .. } |
4379 frame::Frame::ApplicationClose { .. } => (),
4380
4381 // Padding doesn't require retransmission.
4382 frame::Frame::Padding { .. } => (),
4383
4384 frame::Frame::DatagramHeader { .. } |
4385 frame::Frame::Datagram { .. } => {
4386 // Datagrams do not require retransmission. Just update
4387 // stats.
4388 p.dgram_lost_count = p.dgram_lost_count.saturating_add(1);
4389 },
4390 // IMPORTANT: Do not add an exhaustive catch
4391 // all. We want to add explicit handling for frame
4392 // types that can be safely ignored when lost.
4393 }
4394 }
4395 }
4396 self.check_tx_buffered_invariant();
4397
4398 let is_app_limited = self.delivery_rate_check_if_app_limited();
4399 let n_paths = self.paths.len();
4400 let path = self.paths.get_mut(send_pid)?;
4401 let flow_control = &mut self.flow_control;
4402 let pkt_space = &mut self.pkt_num_spaces[epoch];
4403 let crypto_ctx = &mut self.crypto_ctx[epoch];
4404 let pkt_num_manager = &mut self.pkt_num_manager;
4405
4406 let mut left = if let Some(pmtud) = path.pmtud.as_mut() {
4407 // Limit output buffer size by estimated path MTU.
4408 cmp::min(pmtud.get_current_mtu(), b.cap())
4409 } else {
4410 b.cap()
4411 };
4412
4413 if pkt_num_manager.should_skip_pn(self.handshake_completed) {
4414 pkt_num_manager.set_skip_pn(Some(self.next_pkt_num));
4415 self.next_pkt_num += 1;
4416 };
4417 let pn = self.next_pkt_num;
4418
4419 let largest_acked_pkt =
4420 path.recovery.get_largest_acked_on_epoch(epoch).unwrap_or(0);
4421 let pn_len = packet::pkt_num_len(pn, largest_acked_pkt);
4422
4423 // The AEAD overhead at the current encryption level.
4424 let crypto_overhead = crypto_ctx.crypto_overhead().ok_or(Error::Done)?;
4425
4426 let dcid_seq = path.active_dcid_seq.ok_or(Error::OutOfIdentifiers)?;
4427
4428 let dcid =
4429 ConnectionId::from_ref(self.ids.get_dcid(dcid_seq)?.cid.as_ref());
4430
4431 let scid = if let Some(scid_seq) = path.active_scid_seq {
4432 ConnectionId::from_ref(self.ids.get_scid(scid_seq)?.cid.as_ref())
4433 } else if pkt_type == Type::Short {
4434 ConnectionId::default()
4435 } else {
4436 return Err(Error::InvalidState);
4437 };
4438
4439 let hdr = Header {
4440 ty: pkt_type,
4441
4442 version: self.version,
4443
4444 dcid,
4445 scid,
4446
4447 pkt_num: 0,
4448 pkt_num_len: pn_len,
4449
4450 // Only clone token for Initial packets, as other packets don't have
4451 // this field (Retry doesn't count, as it's not encoded as part of
4452 // this code path).
4453 token: if pkt_type == Type::Initial {
4454 self.token.clone()
4455 } else {
4456 None
4457 },
4458
4459 versions: None,
4460 key_phase: self.key_phase,
4461 };
4462
4463 hdr.to_bytes(&mut b)?;
4464
4465 let hdr_trace = if log::max_level() == log::LevelFilter::Trace {
4466 Some(format!("{hdr:?}"))
4467 } else {
4468 None
4469 };
4470
4471 let hdr_ty = hdr.ty;
4472
4473 #[cfg(feature = "qlog")]
4474 let qlog_pkt_hdr = self.qlog.streamer.as_ref().map(|_q| {
4475 qlog::events::quic::PacketHeader::with_type(
4476 hdr.ty.to_qlog(),
4477 Some(pn),
4478 Some(hdr.version),
4479 Some(&hdr.scid),
4480 Some(&hdr.dcid),
4481 )
4482 });
4483
4484 // Calculate the space required for the packet, including the header
4485 // the payload length, the packet number and the AEAD overhead.
4486 let mut overhead = b.off() + pn_len + crypto_overhead;
4487
4488 // We assume that the payload length, which is only present in long
4489 // header packets, can always be encoded with a 2-byte varint.
4490 if pkt_type != Type::Short {
4491 overhead += PAYLOAD_LENGTH_LEN;
4492 }
4493
4494 // Make sure we have enough space left for the packet overhead.
4495 match left.checked_sub(overhead) {
4496 Some(v) => left = v,
4497
4498 None => {
4499 // We can't send more because there isn't enough space available
4500 // in the output buffer.
4501 //
4502 // This usually happens when we try to send a new packet but
4503 // failed because cwnd is almost full. In such case app_limited
4504 // is set to false here to make cwnd grow when ACK is received.
4505 path.recovery.update_app_limited(false);
4506 return Err(Error::Done);
4507 },
4508 }
4509
4510 // Make sure there is enough space for the minimum payload length.
4511 if left < PAYLOAD_MIN_LEN {
4512 path.recovery.update_app_limited(false);
4513 return Err(Error::Done);
4514 }
4515
4516 let mut frames: SmallVec<[frame::Frame; 1]> = SmallVec::new();
4517
4518 let mut ack_eliciting = false;
4519 let mut in_flight = false;
4520 let mut is_pmtud_probe = false;
4521 let mut has_data = false;
4522
4523 // Whether or not we should explicitly elicit an ACK via PING frame if we
4524 // implicitly elicit one otherwise.
4525 let ack_elicit_required = path.recovery.should_elicit_ack(epoch);
4526
4527 let header_offset = b.off();
4528
4529 // Reserve space for payload length in advance. Since we don't yet know
4530 // what the final length will be, we reserve 2 bytes in all cases.
4531 //
4532 // Only long header packets have an explicit length field.
4533 if pkt_type != Type::Short {
4534 b.skip(PAYLOAD_LENGTH_LEN)?;
4535 }
4536
4537 packet::encode_pkt_num(pn, pn_len, &mut b)?;
4538
4539 let payload_offset = b.off();
4540
4541 let cwnd_available =
4542 path.recovery.cwnd_available().saturating_sub(overhead);
4543
4544 let left_before_packing_ack_frame = left;
4545
4546 // Create ACK frame.
4547 //
4548 // When we need to explicitly elicit an ACK via PING later, go ahead and
4549 // generate an ACK (if there's anything to ACK) since we're going to
4550 // send a packet with PING anyways, even if we haven't received anything
4551 // ACK eliciting.
4552 if pkt_space.recv_pkt_need_ack.len() > 0 &&
4553 (pkt_space.ack_elicited || ack_elicit_required) &&
4554 (!is_closing ||
4555 (pkt_type == Type::Handshake &&
4556 self.local_error
4557 .as_ref()
4558 .is_some_and(|le| le.is_app))) &&
4559 path.active()
4560 {
4561 #[cfg(not(feature = "fuzzing"))]
4562 let ack_delay = pkt_space.largest_rx_pkt_time.elapsed();
4563
4564 #[cfg(not(feature = "fuzzing"))]
4565 let ack_delay = ack_delay.as_micros() as u64 /
4566 2_u64
4567 .pow(self.local_transport_params.ack_delay_exponent as u32);
4568
4569 // pseudo-random reproducible ack delays when fuzzing
4570 #[cfg(feature = "fuzzing")]
4571 let ack_delay = rand::rand_u8() as u64 + 1;
4572
4573 let frame = frame::Frame::ACK {
4574 ack_delay,
4575 ranges: pkt_space.recv_pkt_need_ack.clone(),
4576 ecn_counts: None, // sending ECN is not supported at this time
4577 };
4578
4579 // When a PING frame needs to be sent, avoid sending the ACK if
4580 // there is not enough cwnd available for both (note that PING
4581 // frames are always 1 byte, so we just need to check that the
4582 // ACK's length is lower than cwnd).
4583 if pkt_space.ack_elicited || frame.wire_len() < cwnd_available {
4584 // ACK-only packets are not congestion controlled so ACKs must
4585 // be bundled considering the buffer capacity only, and not the
4586 // available cwnd.
4587 if push_frame_to_pkt!(b, frames, frame, left) {
4588 pkt_space.ack_elicited = false;
4589 }
4590 }
4591 }
4592
4593 // Limit output packet size by congestion window size.
4594 left = cmp::min(
4595 left,
4596 // Bytes consumed by ACK frames.
4597 cwnd_available.saturating_sub(left_before_packing_ack_frame - left),
4598 );
4599
4600 let mut challenge_data = None;
4601
4602 if pkt_type == Type::Short {
4603 // Create PMTUD probe.
4604 //
4605 // In order to send a PMTUD probe the current `left` value, which was
4606 // already limited by the current PMTU measure, needs to be ignored,
4607 // but the outgoing packet still needs to be limited by
4608 // the output buffer size, as well as the congestion
4609 // window.
4610 //
4611 // In addition, the PMTUD probe is only generated when the handshake
4612 // is confirmed, to avoid interfering with the handshake
4613 // (e.g. due to the anti-amplification limits).
4614 if let Ok(active_path) = self.paths.get_active_mut() {
4615 let should_probe_pmtu = active_path.should_send_pmtu_probe(
4616 self.handshake_confirmed,
4617 self.handshake_completed,
4618 out_len,
4619 is_closing,
4620 frames.is_empty(),
4621 );
4622
4623 if should_probe_pmtu {
4624 if let Some(pmtud) = active_path.pmtud.as_mut() {
4625 let probe_size = pmtud.get_probe_size();
4626 trace!(
4627 "{} sending pmtud probe pmtu_probe={} estimated_pmtu={}",
4628 self.trace_id,
4629 probe_size,
4630 pmtud.get_current_mtu(),
4631 );
4632
4633 left = probe_size;
4634
4635 match left.checked_sub(overhead) {
4636 Some(v) => left = v,
4637
4638 None => {
4639 // We can't send more because there isn't enough
4640 // space available in the output buffer.
4641 //
4642 // This usually happens when we try to send a new
4643 // packet but failed because cwnd is almost full.
4644 //
4645 // In such case app_limited is set to false here
4646 // to make cwnd grow when ACK is received.
4647 active_path.recovery.update_app_limited(false);
4648 return Err(Error::Done);
4649 },
4650 }
4651
4652 let frame = frame::Frame::Padding {
4653 len: probe_size - overhead - 1,
4654 };
4655
4656 if push_frame_to_pkt!(b, frames, frame, left) {
4657 let frame = frame::Frame::Ping {
4658 mtu_probe: Some(probe_size),
4659 };
4660
4661 if push_frame_to_pkt!(b, frames, frame, left) {
4662 ack_eliciting = true;
4663 in_flight = true;
4664 }
4665 }
4666
4667 // Reset probe flag after sending to prevent duplicate
4668 // probes in a single flight.
4669 pmtud.set_in_flight(true);
4670 is_pmtud_probe = true;
4671 }
4672 }
4673 }
4674
4675 let path = self.paths.get_mut(send_pid)?;
4676 // Create PATH_RESPONSE frame if needed.
4677 // We do not try to ensure that these are really sent.
4678 while let Some(challenge) = path.pop_received_challenge() {
4679 let frame = frame::Frame::PathResponse { data: challenge };
4680
4681 if push_frame_to_pkt!(b, frames, frame, left) {
4682 ack_eliciting = true;
4683 in_flight = true;
4684 } else {
4685 // If there are other pending PATH_RESPONSE, don't lose them
4686 // now.
4687 break;
4688 }
4689 }
4690
4691 // Create PATH_CHALLENGE frame if needed.
4692 if path.validation_requested() {
4693 // TODO: ensure that data is unique over paths.
4694 let data = rand::rand_u64().to_be_bytes();
4695
4696 let frame = frame::Frame::PathChallenge { data };
4697
4698 if push_frame_to_pkt!(b, frames, frame, left) {
4699 // Let's notify the path once we know the packet size.
4700 challenge_data = Some(data);
4701
4702 ack_eliciting = true;
4703 in_flight = true;
4704 }
4705 }
4706
4707 if let Some(key_update) = crypto_ctx.key_update.as_mut() {
4708 key_update.update_acked = true;
4709 }
4710 }
4711
4712 let path = self.paths.get_mut(send_pid)?;
4713
4714 if pkt_type == Type::Short && !is_closing {
4715 // Create NEW_CONNECTION_ID frames as needed.
4716 while let Some(seq_num) = self.ids.next_advertise_new_scid_seq() {
4717 let frame = self.ids.get_new_connection_id_frame_for(seq_num)?;
4718
4719 if push_frame_to_pkt!(b, frames, frame, left) {
4720 self.ids.mark_advertise_new_scid_seq(seq_num, false);
4721
4722 ack_eliciting = true;
4723 in_flight = true;
4724 } else {
4725 break;
4726 }
4727 }
4728 }
4729
4730 if pkt_type == Type::Short && !is_closing && path.active() {
4731 // Create HANDSHAKE_DONE frame.
4732 // self.should_send_handshake_done() but without the need to borrow
4733 if self.handshake_completed &&
4734 !self.handshake_done_sent &&
4735 self.is_server
4736 {
4737 let frame = frame::Frame::HandshakeDone;
4738
4739 if push_frame_to_pkt!(b, frames, frame, left) {
4740 self.handshake_done_sent = true;
4741
4742 ack_eliciting = true;
4743 in_flight = true;
4744 }
4745 }
4746
4747 // Create MAX_STREAMS_BIDI frame.
4748 if self.streams.should_update_max_streams_bidi() ||
4749 self.should_send_max_streams_bidi
4750 {
4751 let frame = frame::Frame::MaxStreamsBidi {
4752 max: self.streams.max_streams_bidi_next(),
4753 };
4754
4755 if push_frame_to_pkt!(b, frames, frame, left) {
4756 self.streams.update_max_streams_bidi();
4757 self.should_send_max_streams_bidi = false;
4758
4759 ack_eliciting = true;
4760 in_flight = true;
4761 }
4762 }
4763
4764 // Create MAX_STREAMS_UNI frame.
4765 if self.streams.should_update_max_streams_uni() ||
4766 self.should_send_max_streams_uni
4767 {
4768 let frame = frame::Frame::MaxStreamsUni {
4769 max: self.streams.max_streams_uni_next(),
4770 };
4771
4772 if push_frame_to_pkt!(b, frames, frame, left) {
4773 self.streams.update_max_streams_uni();
4774 self.should_send_max_streams_uni = false;
4775
4776 ack_eliciting = true;
4777 in_flight = true;
4778 }
4779 }
4780
4781 // Create DATA_BLOCKED frame.
4782 if let Some(limit) = self.blocked_limit {
4783 let frame = frame::Frame::DataBlocked { limit };
4784
4785 if push_frame_to_pkt!(b, frames, frame, left) {
4786 self.blocked_limit = None;
4787 self.data_blocked_sent_count =
4788 self.data_blocked_sent_count.saturating_add(1);
4789
4790 ack_eliciting = true;
4791 in_flight = true;
4792 }
4793 }
4794
4795 // Create STREAMS_BLOCKED (bidi) frame when the local endpoint has
4796 // exhausted the peer's bidirectional stream count limit.
4797 if self
4798 .streams_blocked_bidi_state
4799 .has_pending_stream_blocked_frame()
4800 {
4801 if let Some(limit) = self.streams_blocked_bidi_state.blocked_at {
4802 let frame = frame::Frame::StreamsBlockedBidi { limit };
4803
4804 if push_frame_to_pkt!(b, frames, frame, left) {
4805 // Record the limit we just notified the peer about so
4806 // that redundant frames for the same limit are
4807 // suppressed.
4808 self.streams_blocked_bidi_state.blocked_sent =
4809 Some(limit);
4810
4811 ack_eliciting = true;
4812 in_flight = true;
4813 }
4814 }
4815 }
4816
4817 // Create STREAMS_BLOCKED (uni) frame when the local endpoint has
4818 // exhausted the peer's unidirectional stream count limit.
4819 if self
4820 .streams_blocked_uni_state
4821 .has_pending_stream_blocked_frame()
4822 {
4823 if let Some(limit) = self.streams_blocked_uni_state.blocked_at {
4824 let frame = frame::Frame::StreamsBlockedUni { limit };
4825
4826 if push_frame_to_pkt!(b, frames, frame, left) {
4827 // Record the limit we just notified the peer about so
4828 // that redundant frames for the same limit are
4829 // suppressed.
4830 self.streams_blocked_uni_state.blocked_sent = Some(limit);
4831
4832 ack_eliciting = true;
4833 in_flight = true;
4834 }
4835 }
4836 }
4837
4838 // Create MAX_STREAM_DATA frames as needed.
4839 for stream_id in self.streams.almost_full() {
4840 let stream = match self.streams.get_mut(stream_id) {
4841 Some(v) => v,
4842
4843 None => {
4844 // The stream doesn't exist anymore, so remove it from
4845 // the almost full set.
4846 self.streams.remove_almost_full(stream_id);
4847 continue;
4848 },
4849 };
4850
4851 // Autotune the stream window size, but only if this is not a
4852 // retransmission (on a retransmit the stream will be in
4853 // `self.streams.almost_full()` but it's `almost_full()`
4854 // method returns false.
4855 if stream.recv.almost_full() {
4856 stream.recv.autotune_window(now, path.recovery.rtt());
4857 }
4858
4859 let frame = frame::Frame::MaxStreamData {
4860 stream_id,
4861 max: stream.recv.max_data_next(),
4862 };
4863
4864 if push_frame_to_pkt!(b, frames, frame, left) {
4865 let recv_win = stream.recv.window();
4866
4867 stream.recv.update_max_data(now);
4868
4869 self.streams.remove_almost_full(stream_id);
4870
4871 ack_eliciting = true;
4872 in_flight = true;
4873
4874 // Make sure the connection window always has some
4875 // room compared to the stream window.
4876 flow_control.ensure_window_lower_bound(
4877 (recv_win as f64 * CONNECTION_WINDOW_FACTOR) as u64,
4878 );
4879 }
4880 }
4881
4882 // Create MAX_DATA frame as needed.
4883 if flow_control.should_update_max_data() &&
4884 flow_control.max_data() < flow_control.max_data_next()
4885 {
4886 // Autotune the connection window size. We only tune the window
4887 // if we are sending an "organic" update, not on retransmits.
4888 flow_control.autotune_window(now, path.recovery.rtt());
4889 self.should_send_max_data = true;
4890 }
4891
4892 if self.should_send_max_data {
4893 let frame = frame::Frame::MaxData {
4894 max: flow_control.max_data_next(),
4895 };
4896
4897 if push_frame_to_pkt!(b, frames, frame, left) {
4898 self.should_send_max_data = false;
4899
4900 // Commits the new max_rx_data limit.
4901 flow_control.update_max_data(now);
4902
4903 ack_eliciting = true;
4904 in_flight = true;
4905 }
4906 }
4907
4908 // Create STOP_SENDING frames as needed.
4909 for (stream_id, error_code) in self
4910 .streams
4911 .stopped()
4912 .map(|(&k, &v)| (k, v))
4913 .collect::<Vec<(u64, u64)>>()
4914 {
4915 let frame = frame::Frame::StopSending {
4916 stream_id,
4917 error_code,
4918 };
4919
4920 if push_frame_to_pkt!(b, frames, frame, left) {
4921 self.streams.remove_stopped(stream_id);
4922
4923 ack_eliciting = true;
4924 in_flight = true;
4925 }
4926 }
4927
4928 // Create RESET_STREAM frames as needed.
4929 for (stream_id, (error_code, final_size)) in self
4930 .streams
4931 .reset()
4932 .map(|(&k, &v)| (k, v))
4933 .collect::<Vec<(u64, (u64, u64))>>()
4934 {
4935 let frame = frame::Frame::ResetStream {
4936 stream_id,
4937 error_code,
4938 final_size,
4939 };
4940
4941 if push_frame_to_pkt!(b, frames, frame, left) {
4942 self.streams.remove_reset(stream_id);
4943
4944 ack_eliciting = true;
4945 in_flight = true;
4946 }
4947 }
4948
4949 // Create STREAM_DATA_BLOCKED frames as needed.
4950 for (stream_id, limit) in self
4951 .streams
4952 .blocked()
4953 .map(|(&k, &v)| (k, v))
4954 .collect::<Vec<(u64, u64)>>()
4955 {
4956 let frame = frame::Frame::StreamDataBlocked { stream_id, limit };
4957
4958 if push_frame_to_pkt!(b, frames, frame, left) {
4959 self.streams.remove_blocked(stream_id);
4960 self.stream_data_blocked_sent_count =
4961 self.stream_data_blocked_sent_count.saturating_add(1);
4962
4963 ack_eliciting = true;
4964 in_flight = true;
4965 }
4966 }
4967
4968 // Create RETIRE_CONNECTION_ID frames as needed.
4969 let retire_dcid_seqs = self.ids.retire_dcid_seqs();
4970
4971 for seq_num in retire_dcid_seqs {
4972 // The sequence number specified in a RETIRE_CONNECTION_ID frame
4973 // MUST NOT refer to the Destination Connection ID field of the
4974 // packet in which the frame is contained.
4975 let dcid_seq = path.active_dcid_seq.ok_or(Error::InvalidState)?;
4976
4977 if seq_num == dcid_seq {
4978 continue;
4979 }
4980
4981 let frame = frame::Frame::RetireConnectionId { seq_num };
4982
4983 if push_frame_to_pkt!(b, frames, frame, left) {
4984 self.ids.mark_retire_dcid_seq(seq_num, false)?;
4985
4986 ack_eliciting = true;
4987 in_flight = true;
4988 } else {
4989 break;
4990 }
4991 }
4992 }
4993
4994 // Create CONNECTION_CLOSE frame. Try to send this only on the active
4995 // path, unless it is the last one available.
4996 if path.active() || n_paths == 1 {
4997 if let Some(conn_err) = self.local_error.as_ref() {
4998 if conn_err.is_app {
4999 // Create ApplicationClose frame.
5000 if pkt_type == Type::Short {
5001 let frame = frame::Frame::ApplicationClose {
5002 error_code: conn_err.error_code,
5003 reason: conn_err.reason.clone(),
5004 };
5005
5006 if push_frame_to_pkt!(b, frames, frame, left) {
5007 let pto = path.recovery.pto();
5008 self.draining_timer = Some(now + (pto * 3));
5009
5010 ack_eliciting = true;
5011 in_flight = true;
5012 }
5013 }
5014 } else {
5015 // Create ConnectionClose frame.
5016 let frame = frame::Frame::ConnectionClose {
5017 error_code: conn_err.error_code,
5018 frame_type: 0,
5019 reason: conn_err.reason.clone(),
5020 };
5021
5022 if push_frame_to_pkt!(b, frames, frame, left) {
5023 let pto = path.recovery.pto();
5024 self.draining_timer = Some(now + (pto * 3));
5025
5026 ack_eliciting = true;
5027 in_flight = true;
5028 }
5029 }
5030 }
5031 }
5032
5033 // Create CRYPTO frame.
5034 if crypto_ctx.crypto_stream.is_flushable() &&
5035 left > frame::MAX_CRYPTO_OVERHEAD &&
5036 !is_closing &&
5037 path.active()
5038 {
5039 let crypto_off = crypto_ctx.crypto_stream.send.off_front();
5040
5041 // Encode the frame.
5042 //
5043 // Instead of creating a `frame::Frame` object, encode the frame
5044 // directly into the packet buffer.
5045 //
5046 // First we reserve some space in the output buffer for writing the
5047 // frame header (we assume the length field is always a 2-byte
5048 // varint as we don't know the value yet).
5049 //
5050 // Then we emit the data from the crypto stream's send buffer.
5051 //
5052 // Finally we go back and encode the frame header with the now
5053 // available information.
5054 let hdr_off = b.off();
5055 let hdr_len = 1 + // frame type
5056 octets::varint_len(crypto_off) + // offset
5057 2; // length, always encode as 2-byte varint
5058
5059 if let Some(max_len) = left.checked_sub(hdr_len) {
5060 let (mut crypto_hdr, mut crypto_payload) =
5061 b.split_at(hdr_off + hdr_len)?;
5062
5063 // Write stream data into the packet buffer.
5064 let (len, _) = crypto_ctx
5065 .crypto_stream
5066 .send
5067 .emit(&mut crypto_payload.as_mut()[..max_len])?;
5068
5069 // Encode the frame's header.
5070 //
5071 // Due to how `OctetsMut::split_at()` works, `crypto_hdr` starts
5072 // from the initial offset of `b` (rather than the current
5073 // offset), so it needs to be advanced to the
5074 // initial frame offset.
5075 crypto_hdr.skip(hdr_off)?;
5076
5077 frame::encode_crypto_header(
5078 crypto_off,
5079 len as u64,
5080 &mut crypto_hdr,
5081 )?;
5082
5083 // Advance the packet buffer's offset.
5084 b.skip(hdr_len + len)?;
5085
5086 let frame = frame::Frame::CryptoHeader {
5087 offset: crypto_off,
5088 length: len,
5089 };
5090
5091 if push_frame_to_pkt!(b, frames, frame, left) {
5092 ack_eliciting = true;
5093 in_flight = true;
5094 has_data = true;
5095 }
5096 }
5097 }
5098
5099 // The preference of data-bearing frame to include in a packet
5100 // is managed by `self.emit_dgram`. However, whether any frames
5101 // can be sent depends on the state of their buffers. In the case
5102 // where one type is preferred but its buffer is empty, fall back
5103 // to the other type in order not to waste this function call.
5104 let mut dgram_emitted = false;
5105 let dgrams_to_emit = max_dgram_len.is_some();
5106 let stream_to_emit = self.streams.has_flushable();
5107
5108 let mut do_dgram = self.emit_dgram && dgrams_to_emit;
5109 let do_stream = !self.emit_dgram && stream_to_emit;
5110
5111 if !do_stream && dgrams_to_emit {
5112 do_dgram = true;
5113 }
5114
5115 // Create DATAGRAM frame.
5116 if (pkt_type == Type::Short || pkt_type == Type::ZeroRTT) &&
5117 left > frame::MAX_DGRAM_OVERHEAD &&
5118 !is_closing &&
5119 path.active() &&
5120 do_dgram
5121 {
5122 if let Some(max_dgram_payload) = max_dgram_len {
5123 while let Some(len) = self.dgram_send_queue.peek_front_len() {
5124 let hdr_off = b.off();
5125 let hdr_len = 1 + // frame type
5126 2; // length, always encode as 2-byte varint
5127
5128 if (hdr_len + len) <= left {
5129 // Front of the queue fits this packet, send it.
5130 match self.dgram_send_queue.pop() {
5131 Some(data) => {
5132 // Encode the frame.
5133 //
5134 // Instead of creating a `frame::Frame` object,
5135 // encode the frame directly into the packet
5136 // buffer.
5137 //
5138 // First we reserve some space in the output
5139 // buffer for writing the frame header (we
5140 // assume the length field is always a 2-byte
5141 // varint as we don't know the value yet).
5142 //
5143 // Then we emit the data from the DATAGRAM's
5144 // buffer.
5145 //
5146 // Finally we go back and encode the frame
5147 // header with the now available information.
5148 let (mut dgram_hdr, mut dgram_payload) =
5149 b.split_at(hdr_off + hdr_len)?;
5150
5151 dgram_payload.as_mut()[..len]
5152 .copy_from_slice(data.as_ref());
5153
5154 // Encode the frame's header.
5155 //
5156 // Due to how `OctetsMut::split_at()` works,
5157 // `dgram_hdr` starts from the initial offset
5158 // of `b` (rather than the current offset), so
5159 // it needs to be advanced to the initial frame
5160 // offset.
5161 dgram_hdr.skip(hdr_off)?;
5162
5163 frame::encode_dgram_header(
5164 len as u64,
5165 &mut dgram_hdr,
5166 )?;
5167
5168 // Advance the packet buffer's offset.
5169 b.skip(hdr_len + len)?;
5170
5171 let frame =
5172 frame::Frame::DatagramHeader { length: len };
5173
5174 if push_frame_to_pkt!(b, frames, frame, left) {
5175 ack_eliciting = true;
5176 in_flight = true;
5177 dgram_emitted = true;
5178 self.dgram_sent_count =
5179 self.dgram_sent_count.saturating_add(1);
5180 path.dgram_sent_count =
5181 path.dgram_sent_count.saturating_add(1);
5182 }
5183 },
5184
5185 None => continue,
5186 };
5187 } else if len > max_dgram_payload {
5188 // This dgram frame will never fit. Let's purge it.
5189 self.dgram_send_queue.pop();
5190 } else {
5191 break;
5192 }
5193 }
5194 }
5195 }
5196
5197 // Create a single STREAM frame for the first stream that is flushable.
5198 if (pkt_type == Type::Short || pkt_type == Type::ZeroRTT) &&
5199 left > frame::MAX_STREAM_OVERHEAD &&
5200 !is_closing &&
5201 path.active() &&
5202 !dgram_emitted
5203 {
5204 while let Some(priority_key) = self.streams.peek_flushable() {
5205 let stream_id = priority_key.id;
5206 let stream = match self.streams.get_mut(stream_id) {
5207 // Avoid sending frames for streams that were already stopped.
5208 //
5209 // This might happen if stream data was buffered but not yet
5210 // flushed on the wire when a STOP_SENDING frame is received.
5211 Some(v) if !v.send.is_stopped() => v,
5212 _ => {
5213 self.streams.remove_flushable(&priority_key);
5214 continue;
5215 },
5216 };
5217
5218 let stream_off = stream.send.off_front();
5219
5220 // Encode the frame.
5221 //
5222 // Instead of creating a `frame::Frame` object, encode the frame
5223 // directly into the packet buffer.
5224 //
5225 // First we reserve some space in the output buffer for writing
5226 // the frame header (we assume the length field is always a
5227 // 2-byte varint as we don't know the value yet).
5228 //
5229 // Then we emit the data from the stream's send buffer.
5230 //
5231 // Finally we go back and encode the frame header with the now
5232 // available information.
5233 let hdr_off = b.off();
5234 let hdr_len = 1 + // frame type
5235 octets::varint_len(stream_id) + // stream_id
5236 octets::varint_len(stream_off) + // offset
5237 2; // length, always encode as 2-byte varint
5238
5239 let max_len = match left.checked_sub(hdr_len) {
5240 Some(v) => v,
5241 None => {
5242 let priority_key = Arc::clone(&stream.priority_key);
5243 self.streams.remove_flushable(&priority_key);
5244
5245 continue;
5246 },
5247 };
5248
5249 let (mut stream_hdr, mut stream_payload) =
5250 b.split_at(hdr_off + hdr_len)?;
5251
5252 // Write stream data into the packet buffer.
5253 let (len, fin) =
5254 stream.send.emit(&mut stream_payload.as_mut()[..max_len])?;
5255
5256 // Encode the frame's header.
5257 //
5258 // Due to how `OctetsMut::split_at()` works, `stream_hdr` starts
5259 // from the initial offset of `b` (rather than the current
5260 // offset), so it needs to be advanced to the initial frame
5261 // offset.
5262 stream_hdr.skip(hdr_off)?;
5263
5264 frame::encode_stream_header(
5265 stream_id,
5266 stream_off,
5267 len as u64,
5268 fin,
5269 &mut stream_hdr,
5270 )?;
5271
5272 // Advance the packet buffer's offset.
5273 b.skip(hdr_len + len)?;
5274
5275 let frame = frame::Frame::StreamHeader {
5276 stream_id,
5277 offset: stream_off,
5278 length: len,
5279 fin,
5280 };
5281
5282 if push_frame_to_pkt!(b, frames, frame, left) {
5283 ack_eliciting = true;
5284 in_flight = true;
5285 has_data = true;
5286 }
5287
5288 let priority_key = Arc::clone(&stream.priority_key);
5289 // If the stream is no longer flushable, remove it from the queue
5290 if !stream.is_flushable() {
5291 self.streams.remove_flushable(&priority_key);
5292 } else if stream.incremental {
5293 // Shuffle the incremental stream to the back of the
5294 // queue.
5295 self.streams.remove_flushable(&priority_key);
5296 self.streams.insert_flushable(&priority_key);
5297 }
5298
5299 #[cfg(feature = "fuzzing")]
5300 // Coalesce STREAM frames when fuzzing.
5301 if left > frame::MAX_STREAM_OVERHEAD {
5302 continue;
5303 }
5304
5305 break;
5306 }
5307 }
5308
5309 // Alternate trying to send DATAGRAMs next time.
5310 self.emit_dgram = !dgram_emitted;
5311
5312 // If no other ack-eliciting frame is sent, include a PING frame
5313 // - if PTO probe needed; OR
5314 // - if we've sent too many non ack-eliciting packets without having
5315 // sent an ACK eliciting one; OR
5316 // - the application requested an ack-eliciting frame be sent.
5317 if (ack_elicit_required || path.needs_ack_eliciting) &&
5318 !ack_eliciting &&
5319 left >= 1 &&
5320 !is_closing
5321 {
5322 let frame = frame::Frame::Ping { mtu_probe: None };
5323
5324 if push_frame_to_pkt!(b, frames, frame, left) {
5325 ack_eliciting = true;
5326 in_flight = true;
5327 }
5328 }
5329
5330 if ack_eliciting && !is_pmtud_probe {
5331 path.needs_ack_eliciting = false;
5332 path.recovery.ping_sent(epoch);
5333 }
5334
5335 if !has_data &&
5336 !dgram_emitted &&
5337 cwnd_available > frame::MAX_STREAM_OVERHEAD
5338 {
5339 path.recovery.on_app_limited();
5340 }
5341
5342 if frames.is_empty() {
5343 // When we reach this point we are not able to write more, so set
5344 // app_limited to false.
5345 path.recovery.update_app_limited(false);
5346 return Err(Error::Done);
5347 }
5348
5349 // When coalescing a 1-RTT packet, we can't add padding in the UDP
5350 // datagram, so use PADDING frames instead.
5351 //
5352 // This is only needed if
5353 // 1) an Initial packet has already been written to the UDP datagram,
5354 // as Initial always requires padding.
5355 //
5356 // 2) this is a probing packet towards an unvalidated peer address.
5357 if (has_initial || !path.validated()) &&
5358 pkt_type == Type::Short &&
5359 left >= 1
5360 {
5361 let frame = frame::Frame::Padding { len: left };
5362
5363 if push_frame_to_pkt!(b, frames, frame, left) {
5364 in_flight = true;
5365 }
5366 }
5367
5368 // Pad payload so that it's always at least 4 bytes.
5369 if b.off() - payload_offset < PAYLOAD_MIN_LEN {
5370 let payload_len = b.off() - payload_offset;
5371
5372 let frame = frame::Frame::Padding {
5373 len: PAYLOAD_MIN_LEN - payload_len,
5374 };
5375
5376 #[allow(unused_assignments)]
5377 if push_frame_to_pkt!(b, frames, frame, left) {
5378 in_flight = true;
5379 }
5380 }
5381
5382 let payload_len = b.off() - payload_offset;
5383
5384 // Fill in payload length.
5385 if pkt_type != Type::Short {
5386 let len = pn_len + payload_len + crypto_overhead;
5387
5388 let (_, mut payload_with_len) = b.split_at(header_offset)?;
5389 payload_with_len
5390 .put_varint_with_len(len as u64, PAYLOAD_LENGTH_LEN)?;
5391 }
5392
5393 trace!(
5394 "{} tx pkt {} len={} pn={} {}",
5395 self.trace_id,
5396 hdr_trace.unwrap_or_default(),
5397 payload_len,
5398 pn,
5399 AddrTupleFmt(path.local_addr(), path.peer_addr())
5400 );
5401
5402 #[cfg(feature = "qlog")]
5403 let mut qlog_frames: Vec<qlog::events::quic::QuicFrame> =
5404 Vec::with_capacity(frames.len());
5405
5406 for frame in &mut frames {
5407 trace!("{} tx frm {:?}", self.trace_id, frame);
5408
5409 qlog_with_type!(QLOG_PACKET_TX, self.qlog, _q, {
5410 qlog_frames.push(frame.to_qlog());
5411 });
5412 }
5413
5414 qlog_with_type!(QLOG_PACKET_TX, self.qlog, q, {
5415 if let Some(header) = qlog_pkt_hdr {
5416 // Qlog packet raw info described at
5417 // https://datatracker.ietf.org/doc/html/draft-ietf-quic-qlog-main-schema-00#section-5.1
5418 //
5419 // `length` includes packet headers and trailers (AEAD tag).
5420 let length = payload_len + payload_offset + crypto_overhead;
5421 let qlog_raw_info = RawInfo {
5422 length: Some(length as u64),
5423 payload_length: Some(payload_len as u64),
5424 data: None,
5425 };
5426
5427 let send_at_time =
5428 now.duration_since(q.start_time()).as_secs_f64() * 1000.0;
5429
5430 let ev_data =
5431 EventData::QuicPacketSent(qlog::events::quic::PacketSent {
5432 header,
5433 frames: Some(qlog_frames),
5434 raw: Some(qlog_raw_info),
5435 send_at_time: Some(send_at_time),
5436 ..Default::default()
5437 });
5438
5439 q.add_event_data_with_instant(ev_data, now).ok();
5440 }
5441 });
5442
5443 let aead = match crypto_ctx.crypto_seal {
5444 Some(ref mut v) => v,
5445 None => return Err(Error::InvalidState),
5446 };
5447
5448 let written = packet::encrypt_pkt(
5449 &mut b,
5450 pn,
5451 pn_len,
5452 payload_len,
5453 payload_offset,
5454 None,
5455 aead,
5456 )?;
5457
5458 let sent_pkt_has_data = if path.recovery.gcongestion_enabled() {
5459 has_data || dgram_emitted
5460 } else {
5461 has_data
5462 };
5463
5464 let sent_pkt = recovery::Sent {
5465 pkt_num: pn,
5466 frames,
5467 time_sent: now,
5468 time_acked: None,
5469 time_lost: None,
5470 size: if ack_eliciting { written } else { 0 },
5471 ack_eliciting,
5472 in_flight,
5473 delivered: 0,
5474 delivered_time: now,
5475 first_sent_time: now,
5476 is_app_limited: false,
5477 tx_in_flight: 0,
5478 lost: 0,
5479 has_data: sent_pkt_has_data,
5480 is_pmtud_probe,
5481 };
5482
5483 if in_flight && is_app_limited {
5484 path.recovery.delivery_rate_update_app_limited(true);
5485 }
5486
5487 self.next_pkt_num += 1;
5488
5489 let handshake_status = recovery::HandshakeStatus {
5490 has_handshake_keys: self.crypto_ctx[packet::Epoch::Handshake]
5491 .has_keys(),
5492 peer_verified_address: self.peer_verified_initial_address,
5493 completed: self.handshake_completed,
5494 };
5495
5496 self.on_packet_sent(send_pid, sent_pkt, epoch, handshake_status, now)?;
5497
5498 let path = self.paths.get_mut(send_pid)?;
5499 qlog_with_type!(QLOG_METRICS, self.qlog, q, {
5500 path.recovery.maybe_qlog(q, now);
5501 });
5502
5503 // Record sent packet size if we probe the path.
5504 if let Some(data) = challenge_data {
5505 path.add_challenge_sent(data, written, now);
5506 }
5507
5508 self.sent_count += 1;
5509 self.sent_bytes += written as u64;
5510 path.sent_count += 1;
5511 path.sent_bytes += written as u64;
5512
5513 if self.dgram_send_queue.byte_size() > path.recovery.cwnd_available() {
5514 path.recovery.update_app_limited(false);
5515 }
5516
5517 let had_send_budget = path.max_send_bytes > 0;
5518 path.max_send_bytes = path.max_send_bytes.saturating_sub(written);
5519 if self.is_server &&
5520 !path.verified_peer_address &&
5521 had_send_budget &&
5522 path.max_send_bytes == 0
5523 {
5524 self.amplification_limited_count =
5525 self.amplification_limited_count.saturating_add(1);
5526 }
5527
5528 // On the client, drop initial state after sending an Handshake packet.
5529 if !self.is_server && hdr_ty == Type::Handshake {
5530 self.drop_epoch_state(packet::Epoch::Initial, now);
5531 }
5532
5533 // (Re)start the idle timer if we are sending the first ack-eliciting
5534 // packet since last receiving a packet.
5535 if ack_eliciting && !self.ack_eliciting_sent {
5536 if let Some(idle_timeout) = self.idle_timeout() {
5537 self.idle_timer = Some(now + idle_timeout);
5538 }
5539 }
5540
5541 if ack_eliciting {
5542 self.ack_eliciting_sent = true;
5543 }
5544
5545 Ok((pkt_type, written))
5546 }
5547
5548 fn on_packet_sent(
5549 &mut self, send_pid: usize, sent_pkt: recovery::Sent,
5550 epoch: packet::Epoch, handshake_status: recovery::HandshakeStatus,
5551 now: Instant,
5552 ) -> Result<()> {
5553 let path = self.paths.get_mut(send_pid)?;
5554
5555 // It's fine to set the skip counter based on a non-active path's values.
5556 let cwnd = path.recovery.cwnd();
5557 let max_datagram_size = path.recovery.max_datagram_size();
5558 self.pkt_num_spaces[epoch].on_packet_sent(&sent_pkt);
5559 self.pkt_num_manager.on_packet_sent(
5560 cwnd,
5561 max_datagram_size,
5562 self.handshake_completed,
5563 );
5564
5565 path.recovery.on_packet_sent(
5566 sent_pkt,
5567 epoch,
5568 handshake_status,
5569 now,
5570 &self.trace_id,
5571 );
5572
5573 Ok(())
5574 }
5575
5576 /// Returns the desired send time for the next packet.
5577 #[inline]
5578 pub fn get_next_release_time(&self) -> Option<ReleaseDecision> {
5579 Some(
5580 self.paths
5581 .get_active()
5582 .ok()?
5583 .recovery
5584 .get_next_release_time(),
5585 )
5586 }
5587
5588 /// Returns whether gcongestion is enabled.
5589 #[inline]
5590 pub fn gcongestion_enabled(&self) -> Option<bool> {
5591 Some(self.paths.get_active().ok()?.recovery.gcongestion_enabled())
5592 }
5593
5594 /// Returns the maximum pacing into the future.
5595 ///
5596 /// Equals 1/8 of the smoothed RTT, but at least 1ms and not greater than
5597 /// 5ms.
5598 pub fn max_release_into_future(&self) -> Duration {
5599 self.paths
5600 .get_active()
5601 .map(|p| p.recovery.rtt().mul_f64(0.125))
5602 .unwrap_or(Duration::from_millis(1))
5603 .max(Duration::from_millis(1))
5604 .min(Duration::from_millis(5))
5605 }
5606
5607 /// Returns whether pacing is enabled.
5608 #[inline]
5609 pub fn pacing_enabled(&self) -> bool {
5610 self.recovery_config.pacing
5611 }
5612
5613 /// Returns the size of the send quantum, in bytes.
5614 ///
5615 /// This represents the maximum size of a packet burst as determined by the
5616 /// congestion control algorithm in use.
5617 ///
5618 /// Applications can, for example, use it in conjunction with segmentation
5619 /// offloading mechanisms as the maximum limit for outgoing aggregates of
5620 /// multiple packets.
5621 #[inline]
5622 pub fn send_quantum(&self) -> usize {
5623 match self.paths.get_active() {
5624 Ok(p) => p.recovery.send_quantum(),
5625 _ => 0,
5626 }
5627 }
5628
5629 /// Returns the size of the send quantum over the given 4-tuple, in bytes.
5630 ///
5631 /// This represents the maximum size of a packet burst as determined by the
5632 /// congestion control algorithm in use.
5633 ///
5634 /// Applications can, for example, use it in conjunction with segmentation
5635 /// offloading mechanisms as the maximum limit for outgoing aggregates of
5636 /// multiple packets.
5637 ///
5638 /// If the (`local_addr`, peer_addr`) 4-tuple relates to a non-existing
5639 /// path, this method returns 0.
5640 pub fn send_quantum_on_path(
5641 &self, local_addr: SocketAddr, peer_addr: SocketAddr,
5642 ) -> usize {
5643 self.paths
5644 .path_id_from_addrs(&(local_addr, peer_addr))
5645 .and_then(|pid| self.paths.get(pid).ok())
5646 .map(|path| path.recovery.send_quantum())
5647 .unwrap_or(0)
5648 }
5649
5650 /// Reads contiguous data from a stream into the provided slice.
5651 ///
5652 /// The slice must be sized by the caller and will be populated up to its
5653 /// capacity.
5654 ///
5655 /// On success the amount of bytes read and a flag indicating the fin state
5656 /// is returned as a tuple, or [`Done`] if there is no data to read.
5657 ///
5658 /// Reading data from a stream may trigger queueing of control messages
5659 /// (e.g. MAX_STREAM_DATA). [`send()`] should be called afterwards.
5660 ///
5661 /// [`Done`]: enum.Error.html#variant.Done
5662 /// [`send()`]: struct.Connection.html#method.send
5663 ///
5664 /// ## Examples:
5665 ///
5666 /// ```no_run
5667 /// # let mut buf = [0; 512];
5668 /// # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
5669 /// # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
5670 /// # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
5671 /// # let peer = "127.0.0.1:1234".parse().unwrap();
5672 /// # let local = socket.local_addr().unwrap();
5673 /// # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
5674 /// # let stream_id = 0;
5675 /// while let Ok((read, fin)) = conn.stream_recv(stream_id, &mut buf) {
5676 /// println!("Got {} bytes on stream {}", read, stream_id);
5677 /// }
5678 /// # Ok::<(), quiche::Error>(())
5679 /// ```
5680 #[inline]
5681 pub fn stream_recv(
5682 &mut self, stream_id: u64, out: &mut [u8],
5683 ) -> Result<(usize, bool)> {
5684 self.stream_recv_buf(stream_id, out)
5685 }
5686
5687 /// Reads contiguous data from a stream into the provided [`bytes::BufMut`].
5688 ///
5689 /// **NOTE**:
5690 /// The BufMut will be populated with all available data up to its capacity.
5691 /// Since some BufMut implementations, e.g., [`Vec<u8>`], dynamically
5692 /// allocate additional memory, the caller may use [`BufMut::limit()`]
5693 /// to limit the maximum amount of data that can be written.
5694 ///
5695 /// On success the amount of bytes read and a flag indicating the fin state
5696 /// is returned as a tuple, or [`Done`] if there is no data to read.
5697 /// [`BufMut::advance_mut()`] will have been called with the same number of
5698 /// total bytes.
5699 ///
5700 /// Reading data from a stream may trigger queueing of control messages
5701 /// (e.g. MAX_STREAM_DATA). [`send()`] should be called afterwards.
5702 ///
5703 /// [`BufMut::limit()`]: bytes::BufMut::limit
5704 /// [`BufMut::advance_mut()`]: bytes::BufMut::advance_mut
5705 /// [`Done`]: enum.Error.html#variant.Done
5706 /// [`send()`]: struct.Connection.html#method.send
5707 ///
5708 /// ## Examples:
5709 ///
5710 /// ```no_run
5711 /// # use bytes::BufMut as _;
5712 /// # let mut buf = Vec::new().limit(1024); // Read at most 1024 bytes
5713 /// # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
5714 /// # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
5715 /// # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
5716 /// # let peer = "127.0.0.1:1234".parse().unwrap();
5717 /// # let local = socket.local_addr().unwrap();
5718 /// # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
5719 /// # let stream_id = 0;
5720 /// # let mut total_read = 0;
5721 /// while let Ok((read, fin)) = conn.stream_recv_buf(stream_id, &mut buf) {
5722 /// println!("Got {} bytes on stream {}", read, stream_id);
5723 /// total_read += read;
5724 /// assert_eq!(buf.get_ref().len(), total_read);
5725 /// }
5726 /// # Ok::<(), quiche::Error>(())
5727 /// ```
5728 pub fn stream_recv_buf<B: bytes::BufMut>(
5729 &mut self, stream_id: u64, out: B,
5730 ) -> Result<(usize, bool)> {
5731 self.do_stream_recv(stream_id, RecvAction::Emit { out })
5732 }
5733
5734 /// Discard contiguous data from a stream without copying.
5735 ///
5736 /// On success the amount of bytes discarded and a flag indicating the fin
5737 /// state is returned as a tuple, or [`Done`] if there is no data to
5738 /// discard.
5739 ///
5740 /// Discarding data from a stream may trigger queueing of control messages
5741 /// (e.g. MAX_STREAM_DATA). [`send()`] should be called afterwards.
5742 ///
5743 /// [`Done`]: enum.Error.html#variant.Done
5744 /// [`send()`]: struct.Connection.html#method.send
5745 ///
5746 /// ## Examples:
5747 ///
5748 /// ```no_run
5749 /// # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
5750 /// # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
5751 /// # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
5752 /// # let peer = "127.0.0.1:1234".parse().unwrap();
5753 /// # let local = socket.local_addr().unwrap();
5754 /// # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
5755 /// # let stream_id = 0;
5756 /// while let Ok((read, fin)) = conn.stream_discard(stream_id, 1) {
5757 /// println!("Discarded {} byte(s) on stream {}", read, stream_id);
5758 /// }
5759 /// # Ok::<(), quiche::Error>(())
5760 /// ```
5761 pub fn stream_discard(
5762 &mut self, stream_id: u64, len: usize,
5763 ) -> Result<(usize, bool)> {
5764 // `do_stream_recv()` is generic on the kind of `BufMut` in RecvAction.
5765 // Since we are discarding, it doesn't matter, but the compiler still
5766 // wants to know, so we say `&mut [u8]`.
5767 self.do_stream_recv::<&mut [u8]>(stream_id, RecvAction::Discard { len })
5768 }
5769
5770 // Reads or discards contiguous data from a stream.
5771 //
5772 // Passing an `action` of `StreamRecvAction::Emit` results in a read into
5773 // the provided slice. It must be sized by the caller and will be populated
5774 // up to its capacity.
5775 //
5776 // Passing an `action` of `StreamRecvAction::Discard` results in discard up
5777 // to the indicated length.
5778 //
5779 // On success the amount of bytes read or discarded, and a flag indicating
5780 // the fin state, is returned as a tuple, or [`Done`] if there is no data to
5781 // read or discard.
5782 //
5783 // Reading or discarding data from a stream may trigger queueing of control
5784 // messages (e.g. MAX_STREAM_DATA). [`send()`] should be called afterwards.
5785 //
5786 // [`Done`]: enum.Error.html#variant.Done
5787 // [`send()`]: struct.Connection.html#method.send
5788 fn do_stream_recv<B: bytes::BufMut>(
5789 &mut self, stream_id: u64, action: RecvAction<B>,
5790 ) -> Result<(usize, bool)> {
5791 // We can't read on our own unidirectional streams.
5792 if !stream::is_bidi(stream_id) &&
5793 stream::is_local(stream_id, self.is_server)
5794 {
5795 return Err(Error::InvalidStreamState(stream_id));
5796 }
5797
5798 let stream = self
5799 .streams
5800 .get_mut(stream_id)
5801 .ok_or(Error::InvalidStreamState(stream_id))?;
5802
5803 if !stream.is_readable() {
5804 return Err(Error::Done);
5805 }
5806
5807 let local = stream.local;
5808 let priority_key = Arc::clone(&stream.priority_key);
5809
5810 #[cfg(feature = "qlog")]
5811 let offset = stream.recv.off_front();
5812
5813 #[cfg(feature = "qlog")]
5814 let to = match action {
5815 RecvAction::Emit { .. } => Some(DataRecipient::Application),
5816
5817 RecvAction::Discard { .. } => Some(DataRecipient::Dropped),
5818 };
5819
5820 let (read, fin) = match stream.recv.emit_or_discard(action) {
5821 Ok(v) => v,
5822
5823 Err(e) => {
5824 // Collect the stream if it is now complete. This can happen if
5825 // we got a `StreamReset` error which will now be propagated to
5826 // the application, so we don't need to keep the stream's state
5827 // anymore.
5828 if stream.is_complete() {
5829 self.streams.collect(stream_id, local);
5830 }
5831
5832 self.streams.remove_readable(&priority_key);
5833 return Err(e);
5834 },
5835 };
5836
5837 self.flow_control.add_consumed(read as u64);
5838
5839 let readable = stream.is_readable();
5840
5841 let complete = stream.is_complete();
5842
5843 if stream.recv.almost_full() {
5844 self.streams.insert_almost_full(stream_id);
5845 }
5846
5847 if !readable {
5848 self.streams.remove_readable(&priority_key);
5849 }
5850
5851 if complete {
5852 self.streams.collect(stream_id, local);
5853 }
5854
5855 qlog_with_type!(QLOG_DATA_MV, self.qlog, q, {
5856 let ev_data = EventData::QuicStreamDataMoved(
5857 qlog::events::quic::StreamDataMoved {
5858 stream_id: Some(stream_id),
5859 offset: Some(offset),
5860 raw: Some(RawInfo {
5861 length: Some(read as u64),
5862 ..Default::default()
5863 }),
5864 from: Some(DataRecipient::Transport),
5865 to,
5866 additional_info: fin
5867 .then_some(DataMovedAdditionalInfo::FinSet),
5868 },
5869 );
5870
5871 let now = Instant::now();
5872 q.add_event_data_with_instant(ev_data, now).ok();
5873 });
5874
5875 if priority_key.incremental && readable {
5876 // Shuffle the incremental stream to the back of the queue.
5877 self.streams.remove_readable(&priority_key);
5878 self.streams.insert_readable(&priority_key);
5879 }
5880
5881 Ok((read, fin))
5882 }
5883
5884 /// Writes data to a stream.
5885 ///
5886 /// On success the number of bytes written is returned, or [`Done`] if no
5887 /// data was written (e.g. because the stream has no capacity).
5888 ///
5889 /// Applications can provide a 0-length buffer with the fin flag set to
5890 /// true. This will lead to a 0-length FIN STREAM frame being sent at the
5891 /// latest offset. The `Ok(0)` value is only returned when the application
5892 /// provided a 0-length buffer.
5893 ///
5894 /// In addition, if the peer has signalled that it doesn't want to receive
5895 /// any more data from this stream by sending the `STOP_SENDING` frame, the
5896 /// [`StreamStopped`] error will be returned instead of any data.
5897 ///
5898 /// Note that in order to avoid buffering an infinite amount of data in the
5899 /// stream's send buffer, streams are only allowed to buffer outgoing data
5900 /// up to the amount that the peer allows it to send (that is, up to the
5901 /// stream's outgoing flow control capacity).
5902 ///
5903 /// This means that the number of written bytes returned can be lower than
5904 /// the length of the input buffer when the stream doesn't have enough
5905 /// capacity for the operation to complete. The application should retry the
5906 /// operation once the stream is reported as writable again.
5907 ///
5908 /// Applications should call this method only after the handshake is
5909 /// completed (whenever [`is_established()`] returns `true`) or during
5910 /// early data if enabled (whenever [`is_in_early_data()`] returns `true`).
5911 ///
5912 /// [`Done`]: enum.Error.html#variant.Done
5913 /// [`StreamStopped`]: enum.Error.html#variant.StreamStopped
5914 /// [`is_established()`]: struct.Connection.html#method.is_established
5915 /// [`is_in_early_data()`]: struct.Connection.html#method.is_in_early_data
5916 ///
5917 /// ## Examples:
5918 ///
5919 /// ```no_run
5920 /// # let mut buf = [0; 512];
5921 /// # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
5922 /// # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
5923 /// # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
5924 /// # let peer = "127.0.0.1:1234".parse().unwrap();
5925 /// # let local = "127.0.0.1:4321".parse().unwrap();
5926 /// # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
5927 /// # let stream_id = 0;
5928 /// conn.stream_send(stream_id, b"hello", true)?;
5929 /// # Ok::<(), quiche::Error>(())
5930 /// ```
5931 pub fn stream_send(
5932 &mut self, stream_id: u64, buf: &[u8], fin: bool,
5933 ) -> Result<usize> {
5934 self.stream_do_send(
5935 stream_id,
5936 buf,
5937 fin,
5938 |stream: &mut stream::Stream<F>,
5939 buf: &[u8],
5940 cap: usize,
5941 fin: bool| {
5942 stream.send.write(&buf[..cap], fin).map(|v| (v, v))
5943 },
5944 )
5945 }
5946
5947 /// Writes data to a stream with zero copying, instead, it appends the
5948 /// provided buffer directly to the send queue if the capacity allows
5949 /// it.
5950 ///
5951 /// When a partial write happens (including when [`Error::Done`] is
5952 /// returned) the remaining (unwritten) buffer will also be returned.
5953 /// The application should retry the operation once the stream is
5954 /// reported as writable again.
5955 pub fn stream_send_zc(
5956 &mut self, stream_id: u64, buf: F::Buf, len: Option<usize>, fin: bool,
5957 ) -> Result<(usize, Option<F::Buf>)>
5958 where
5959 F::Buf: BufSplit,
5960 {
5961 self.stream_do_send(
5962 stream_id,
5963 buf,
5964 fin,
5965 |stream: &mut stream::Stream<F>,
5966 buf: F::Buf,
5967 cap: usize,
5968 fin: bool| {
5969 let len = len.unwrap_or(usize::MAX).min(cap);
5970 let (sent, remaining) = stream.send.append_buf(buf, len, fin)?;
5971 Ok((sent, (sent, remaining)))
5972 },
5973 )
5974 }
5975
5976 fn stream_do_send<B, R, SND>(
5977 &mut self, stream_id: u64, buf: B, fin: bool, write_fn: SND,
5978 ) -> Result<R>
5979 where
5980 B: AsRef<[u8]>,
5981 SND: FnOnce(&mut stream::Stream<F>, B, usize, bool) -> Result<(usize, R)>,
5982 {
5983 // We can't write on the peer's unidirectional streams.
5984 if !stream::is_bidi(stream_id) &&
5985 !stream::is_local(stream_id, self.is_server)
5986 {
5987 return Err(Error::InvalidStreamState(stream_id));
5988 }
5989
5990 let len = buf.as_ref().len();
5991
5992 // Mark the connection as blocked if the connection-level flow control
5993 // limit doesn't let us buffer all the data.
5994 //
5995 // Note that this is separate from "send capacity" as that also takes
5996 // congestion control into consideration.
5997 if self.max_tx_data - self.tx_data < len as u64 {
5998 self.blocked_limit = Some(self.max_tx_data);
5999 }
6000
6001 let cap = self.tx_cap;
6002
6003 // Get existing stream or create a new one.
6004 let stream = match self.get_or_create_stream(stream_id, true) {
6005 Ok(v) => v,
6006
6007 Err(Error::StreamLimit) => {
6008 // If the local endpoint has exhausted the peer's stream count
6009 // limit, record the current limit so that a STREAMS_BLOCKED
6010 // frame can be sent.
6011 if self.enable_send_streams_blocked &&
6012 stream::is_local(stream_id, self.is_server)
6013 {
6014 if stream::is_bidi(stream_id) {
6015 let limit = self.streams.peer_max_streams_bidi();
6016 self.streams_blocked_bidi_state.update_at(limit);
6017 } else {
6018 let limit = self.streams.peer_max_streams_uni();
6019 self.streams_blocked_uni_state.update_at(limit);
6020 }
6021 }
6022
6023 return Err(Error::StreamLimit);
6024 },
6025
6026 Err(e) => return Err(e),
6027 };
6028
6029 #[cfg(feature = "qlog")]
6030 let offset = stream.send.off_back();
6031
6032 let was_writable = stream.is_writable();
6033
6034 let was_flushable = stream.is_flushable();
6035
6036 let is_complete = stream.is_complete();
6037 let is_readable = stream.is_readable();
6038
6039 let priority_key = Arc::clone(&stream.priority_key);
6040
6041 // Return early if the stream has been stopped, and collect its state
6042 // if complete.
6043 if let Err(Error::StreamStopped(e)) = stream.send.cap() {
6044 // Only collect the stream if it is complete and not readable.
6045 // If it is readable, it will get collected when stream_recv()
6046 // is used.
6047 //
6048 // The stream can't be writable if it has been stopped.
6049 if is_complete && !is_readable {
6050 let local = stream.local;
6051 self.streams.collect(stream_id, local);
6052 }
6053
6054 return Err(Error::StreamStopped(e));
6055 };
6056
6057 // Truncate the input buffer based on the connection's send capacity if
6058 // necessary.
6059 //
6060 // When the cap is zero, the method returns Ok(0) *only* when the passed
6061 // buffer is empty. We return Error::Done otherwise.
6062 if cap == 0 && len > 0 {
6063 if was_writable {
6064 // When `stream_writable_next()` returns a stream, the writable
6065 // mark is removed, but because the stream is blocked by the
6066 // connection-level send capacity it won't be marked as writable
6067 // again once the capacity increases.
6068 //
6069 // Since the stream is writable already, mark it here instead.
6070 self.streams.insert_writable(&priority_key);
6071 }
6072
6073 return Err(Error::Done);
6074 }
6075
6076 let (cap, fin, blocked_by_cap) = if cap < len {
6077 (cap, false, true)
6078 } else {
6079 (len, fin, false)
6080 };
6081
6082 let (sent, ret) = match write_fn(stream, buf, cap, fin) {
6083 Ok(v) => v,
6084
6085 Err(e) => {
6086 self.streams.remove_writable(&priority_key);
6087 return Err(e);
6088 },
6089 };
6090
6091 let incremental = stream.incremental;
6092 let priority_key = Arc::clone(&stream.priority_key);
6093
6094 let flushable = stream.is_flushable();
6095
6096 let writable = stream.is_writable();
6097
6098 let empty_fin = len == 0 && fin;
6099
6100 if sent < cap {
6101 let max_off = stream.send.max_off();
6102
6103 if stream.send.blocked_at() != Some(max_off) {
6104 stream.send.update_blocked_at(Some(max_off));
6105 self.streams.insert_blocked(stream_id, max_off);
6106 }
6107 } else {
6108 stream.send.update_blocked_at(None);
6109 self.streams.remove_blocked(stream_id);
6110 }
6111
6112 // If the stream is now flushable push it to the flushable queue, but
6113 // only if it wasn't already queued.
6114 //
6115 // Consider the stream flushable also when we are sending a zero-length
6116 // frame that has the fin flag set.
6117 if (flushable || empty_fin) && !was_flushable {
6118 self.streams.insert_flushable(&priority_key);
6119 }
6120
6121 if !writable {
6122 self.streams.remove_writable(&priority_key);
6123 } else if was_writable && blocked_by_cap {
6124 // When `stream_writable_next()` returns a stream, the writable
6125 // mark is removed, but because the stream is blocked by the
6126 // connection-level send capacity it won't be marked as writable
6127 // again once the capacity increases.
6128 //
6129 // Since the stream is writable already, mark it here instead.
6130 self.streams.insert_writable(&priority_key);
6131 }
6132
6133 self.tx_cap -= sent;
6134
6135 self.tx_data += sent as u64;
6136
6137 self.tx_buffered += sent;
6138 self.check_tx_buffered_invariant();
6139
6140 qlog_with_type!(QLOG_DATA_MV, self.qlog, q, {
6141 let ev_data = EventData::QuicStreamDataMoved(
6142 qlog::events::quic::StreamDataMoved {
6143 stream_id: Some(stream_id),
6144 offset: Some(offset),
6145 raw: Some(RawInfo {
6146 length: Some(sent as u64),
6147 ..Default::default()
6148 }),
6149 from: Some(DataRecipient::Application),
6150 to: Some(DataRecipient::Transport),
6151 additional_info: fin
6152 .then_some(DataMovedAdditionalInfo::FinSet),
6153 },
6154 );
6155
6156 let now = Instant::now();
6157 q.add_event_data_with_instant(ev_data, now).ok();
6158 });
6159
6160 if sent == 0 && cap > 0 {
6161 return Err(Error::Done);
6162 }
6163
6164 if incremental && writable {
6165 // Shuffle the incremental stream to the back of the queue.
6166 self.streams.remove_writable(&priority_key);
6167 self.streams.insert_writable(&priority_key);
6168 }
6169
6170 Ok(ret)
6171 }
6172
6173 /// Sets the priority for a stream.
6174 ///
6175 /// A stream's priority determines the order in which stream data is sent
6176 /// on the wire (streams with lower priority are sent first). Streams are
6177 /// created with a default priority of `127`.
6178 ///
6179 /// The target stream is created if it did not exist before calling this
6180 /// method.
6181 pub fn stream_priority(
6182 &mut self, stream_id: u64, urgency: u8, incremental: bool,
6183 ) -> Result<()> {
6184 // Get existing stream or create a new one, but if the stream
6185 // has already been closed and collected, ignore the prioritization.
6186 let stream = match self.get_or_create_stream(stream_id, true) {
6187 Ok(v) => v,
6188
6189 Err(Error::Done) => return Ok(()),
6190
6191 Err(e) => return Err(e),
6192 };
6193
6194 if stream.urgency == urgency && stream.incremental == incremental {
6195 return Ok(());
6196 }
6197
6198 stream.urgency = urgency;
6199 stream.incremental = incremental;
6200
6201 let new_priority_key = Arc::new(StreamPriorityKey {
6202 urgency: stream.urgency,
6203 incremental: stream.incremental,
6204 id: stream_id,
6205 ..Default::default()
6206 });
6207
6208 let old_priority_key =
6209 std::mem::replace(&mut stream.priority_key, new_priority_key.clone());
6210
6211 self.streams
6212 .update_priority(&old_priority_key, &new_priority_key);
6213
6214 Ok(())
6215 }
6216
6217 /// Shuts down reading or writing from/to the specified stream.
6218 ///
6219 /// When the `direction` argument is set to [`Shutdown::Read`], outstanding
6220 /// data in the stream's receive buffer is dropped, and no additional data
6221 /// is added to it. Data received after calling this method is still
6222 /// validated and acked but not stored, and [`stream_recv()`] will not
6223 /// return it to the application. In addition, a `STOP_SENDING` frame will
6224 /// be sent to the peer to signal it to stop sending data.
6225 ///
6226 /// When the `direction` argument is set to [`Shutdown::Write`], outstanding
6227 /// data in the stream's send buffer is dropped, and no additional data is
6228 /// added to it. Data passed to [`stream_send()`] after calling this method
6229 /// will be ignored. In addition, a `RESET_STREAM` frame will be sent to the
6230 /// peer to signal the reset.
6231 ///
6232 /// Locally-initiated unidirectional streams can only be closed in the
6233 /// [`Shutdown::Write`] direction. Remotely-initiated unidirectional streams
6234 /// can only be closed in the [`Shutdown::Read`] direction. Using an
6235 /// incorrect direction will return [`InvalidStreamState`].
6236 ///
6237 /// [`Shutdown::Read`]: enum.Shutdown.html#variant.Read
6238 /// [`Shutdown::Write`]: enum.Shutdown.html#variant.Write
6239 /// [`stream_recv()`]: struct.Connection.html#method.stream_recv
6240 /// [`stream_send()`]: struct.Connection.html#method.stream_send
6241 /// [`InvalidStreamState`]: enum.Error.html#variant.InvalidStreamState
6242 pub fn stream_shutdown(
6243 &mut self, stream_id: u64, direction: Shutdown, err: u64,
6244 ) -> Result<()> {
6245 // Don't try to stop a local unidirectional stream.
6246 if direction == Shutdown::Read &&
6247 stream::is_local(stream_id, self.is_server) &&
6248 !stream::is_bidi(stream_id)
6249 {
6250 return Err(Error::InvalidStreamState(stream_id));
6251 }
6252
6253 // Don't try to reset a remote unidirectional stream.
6254 if direction == Shutdown::Write &&
6255 !stream::is_local(stream_id, self.is_server) &&
6256 !stream::is_bidi(stream_id)
6257 {
6258 return Err(Error::InvalidStreamState(stream_id));
6259 }
6260
6261 // Get existing stream.
6262 let stream = self.streams.get_mut(stream_id).ok_or(Error::Done)?;
6263
6264 let priority_key = Arc::clone(&stream.priority_key);
6265
6266 match direction {
6267 Shutdown::Read => {
6268 let consumed = stream.recv.shutdown()?;
6269 self.flow_control.add_consumed(consumed);
6270
6271 if !stream.recv.is_fin() {
6272 self.streams.insert_stopped(stream_id, err);
6273 }
6274
6275 // Once shutdown, the stream is guaranteed to be non-readable.
6276 self.streams.remove_readable(&priority_key);
6277
6278 self.stopped_stream_local_count =
6279 self.stopped_stream_local_count.saturating_add(1);
6280 },
6281
6282 Shutdown::Write => {
6283 let (final_size, unsent) = stream.send.shutdown()?;
6284
6285 // Claw back some flow control allowance from data that was
6286 // buffered but not actually sent before the stream was reset.
6287 self.tx_data = self.tx_data.saturating_sub(unsent);
6288
6289 self.tx_buffered =
6290 self.tx_buffered.saturating_sub(unsent as usize);
6291
6292 // These drops in qlog are a bit weird, but the only way to ensure
6293 // that all bytes that are moved from App to Transport in
6294 // stream_do_send are eventually moved from Transport to Dropped.
6295 // Ideally we would add a Transport to Network transition also as
6296 // a way to indicate when bytes were transmitted vs dropped
6297 // without ever being sent.
6298 qlog_with_type!(QLOG_DATA_MV, self.qlog, q, {
6299 let ev_data = EventData::QuicStreamDataMoved(
6300 qlog::events::quic::StreamDataMoved {
6301 stream_id: Some(stream_id),
6302 offset: Some(final_size),
6303 raw: Some(RawInfo {
6304 length: Some(unsent),
6305 ..Default::default()
6306 }),
6307 from: Some(DataRecipient::Transport),
6308 to: Some(DataRecipient::Dropped),
6309 ..Default::default()
6310 },
6311 );
6312
6313 q.add_event_data_with_instant(ev_data, Instant::now()).ok();
6314 });
6315
6316 // Update send capacity.
6317 self.update_tx_cap();
6318
6319 self.streams.insert_reset(stream_id, err, final_size);
6320
6321 // Once shutdown, the stream is guaranteed to be non-writable.
6322 self.streams.remove_writable(&priority_key);
6323
6324 self.reset_stream_local_count =
6325 self.reset_stream_local_count.saturating_add(1);
6326 },
6327 }
6328
6329 Ok(())
6330 }
6331
6332 /// Returns the stream's send capacity in bytes.
6333 ///
6334 /// If the specified stream doesn't exist (including when it has already
6335 /// been completed and closed), the [`InvalidStreamState`] error will be
6336 /// returned.
6337 ///
6338 /// In addition, if the peer has signalled that it doesn't want to receive
6339 /// any more data from this stream by sending the `STOP_SENDING` frame, the
6340 /// [`StreamStopped`] error will be returned.
6341 ///
6342 /// [`InvalidStreamState`]: enum.Error.html#variant.InvalidStreamState
6343 /// [`StreamStopped`]: enum.Error.html#variant.StreamStopped
6344 #[inline]
6345 pub fn stream_capacity(&mut self, stream_id: u64) -> Result<usize> {
6346 if let Some(stream) = self.streams.get(stream_id) {
6347 let stream_cap = match stream.send.cap() {
6348 Ok(v) => v,
6349
6350 Err(Error::StreamStopped(e)) => {
6351 // Only collect the stream if it is complete and not
6352 // readable. If it is readable, it will get collected when
6353 // stream_recv() is used.
6354 if stream.is_complete() && !stream.is_readable() {
6355 let local = stream.local;
6356 self.streams.collect(stream_id, local);
6357 }
6358
6359 return Err(Error::StreamStopped(e));
6360 },
6361
6362 Err(e) => return Err(e),
6363 };
6364
6365 let cap = cmp::min(self.tx_cap, stream_cap);
6366 return Ok(cap);
6367 };
6368
6369 Err(Error::InvalidStreamState(stream_id))
6370 }
6371
6372 /// Returns the next stream that has data to read.
6373 ///
6374 /// Note that once returned by this method, a stream ID will not be returned
6375 /// again until it is "re-armed".
6376 ///
6377 /// The application will need to read all of the pending data on the stream,
6378 /// and new data has to be received before the stream is reported again.
6379 ///
6380 /// This is unlike the [`readable()`] method, that returns the same list of
6381 /// readable streams when called multiple times in succession.
6382 ///
6383 /// [`readable()`]: struct.Connection.html#method.readable
6384 pub fn stream_readable_next(&mut self) -> Option<u64> {
6385 let priority_key = self.streams.readable.front().clone_pointer()?;
6386
6387 self.streams.remove_readable(&priority_key);
6388
6389 Some(priority_key.id)
6390 }
6391
6392 /// Returns true if the stream has data that can be read.
6393 pub fn stream_readable(&self, stream_id: u64) -> bool {
6394 let stream = match self.streams.get(stream_id) {
6395 Some(v) => v,
6396
6397 None => return false,
6398 };
6399
6400 stream.is_readable()
6401 }
6402
6403 /// Returns the next stream that can be written to.
6404 ///
6405 /// Note that once returned by this method, a stream ID will not be returned
6406 /// again until it is "re-armed".
6407 ///
6408 /// This is unlike the [`writable()`] method, that returns the same list of
6409 /// writable streams when called multiple times in succession. It is not
6410 /// advised to use both `stream_writable_next()` and [`writable()`] on the
6411 /// same connection, as it may lead to unexpected results.
6412 ///
6413 /// The [`stream_writable()`] method can also be used to fine-tune when a
6414 /// stream is reported as writable again.
6415 ///
6416 /// [`stream_writable()`]: struct.Connection.html#method.stream_writable
6417 /// [`writable()`]: struct.Connection.html#method.writable
6418 pub fn stream_writable_next(&mut self) -> Option<u64> {
6419 // If there is not enough connection-level send capacity, none of the
6420 // streams are writable.
6421 if self.tx_cap == 0 {
6422 return None;
6423 }
6424
6425 let mut cursor = self.streams.writable.front();
6426
6427 while let Some(priority_key) = cursor.clone_pointer() {
6428 if let Some(stream) = self.streams.get(priority_key.id) {
6429 let cap = match stream.send.cap() {
6430 Ok(v) => v,
6431
6432 // Return the stream to the application immediately if it's
6433 // stopped.
6434 Err(_) =>
6435 return {
6436 self.streams.remove_writable(&priority_key);
6437
6438 Some(priority_key.id)
6439 },
6440 };
6441
6442 if cmp::min(self.tx_cap, cap) >= stream.send_lowat {
6443 self.streams.remove_writable(&priority_key);
6444 return Some(priority_key.id);
6445 }
6446 }
6447
6448 cursor.move_next();
6449 }
6450
6451 None
6452 }
6453
6454 /// Returns true if the stream has enough send capacity.
6455 ///
6456 /// When `len` more bytes can be buffered into the given stream's send
6457 /// buffer, `true` will be returned, `false` otherwise.
6458 ///
6459 /// In the latter case, if the additional data can't be buffered due to
6460 /// flow control limits, the peer will also be notified, and a "low send
6461 /// watermark" will be set for the stream, such that it is not going to be
6462 /// reported as writable again by [`stream_writable_next()`] until its send
6463 /// capacity reaches `len`.
6464 ///
6465 /// If the specified stream doesn't exist (including when it has already
6466 /// been completed and closed), the [`InvalidStreamState`] error will be
6467 /// returned.
6468 ///
6469 /// In addition, if the peer has signalled that it doesn't want to receive
6470 /// any more data from this stream by sending the `STOP_SENDING` frame, the
6471 /// [`StreamStopped`] error will be returned.
6472 ///
6473 /// [`stream_writable_next()`]: struct.Connection.html#method.stream_writable_next
6474 /// [`InvalidStreamState`]: enum.Error.html#variant.InvalidStreamState
6475 /// [`StreamStopped`]: enum.Error.html#variant.StreamStopped
6476 #[inline]
6477 pub fn stream_writable(
6478 &mut self, stream_id: u64, len: usize,
6479 ) -> Result<bool> {
6480 if self.stream_capacity(stream_id)? >= len {
6481 return Ok(true);
6482 }
6483
6484 let stream = match self.streams.get_mut(stream_id) {
6485 Some(v) => v,
6486
6487 None => return Err(Error::InvalidStreamState(stream_id)),
6488 };
6489
6490 stream.send_lowat = cmp::max(1, len);
6491
6492 let is_writable = stream.is_writable();
6493
6494 let priority_key = Arc::clone(&stream.priority_key);
6495
6496 if self.max_tx_data - self.tx_data < len as u64 {
6497 self.blocked_limit = Some(self.max_tx_data);
6498 }
6499
6500 if stream.send.cap()? < len {
6501 let max_off = stream.send.max_off();
6502 if stream.send.blocked_at() != Some(max_off) {
6503 stream.send.update_blocked_at(Some(max_off));
6504 self.streams.insert_blocked(stream_id, max_off);
6505 }
6506 } else if is_writable {
6507 // When `stream_writable_next()` returns a stream, the writable
6508 // mark is removed, but because the stream is blocked by the
6509 // connection-level send capacity it won't be marked as writable
6510 // again once the capacity increases.
6511 //
6512 // Since the stream is writable already, mark it here instead.
6513 self.streams.insert_writable(&priority_key);
6514 }
6515
6516 Ok(false)
6517 }
6518
6519 /// Returns true if all the data has been read from the specified stream.
6520 ///
6521 /// This instructs the application that all the data received from the
6522 /// peer on the stream has been read, and there won't be anymore in the
6523 /// future.
6524 ///
6525 /// Basically this returns true when the peer either set the `fin` flag
6526 /// for the stream, or sent `RESET_STREAM`.
6527 #[inline]
6528 pub fn stream_finished(&self, stream_id: u64) -> bool {
6529 let stream = match self.streams.get(stream_id) {
6530 Some(v) => v,
6531
6532 None => return true,
6533 };
6534
6535 stream.recv.is_fin()
6536 }
6537
6538 /// Returns the number of bidirectional streams that can be created
6539 /// before the peer's stream count limit is reached.
6540 ///
6541 /// This can be useful to know if it's possible to create a bidirectional
6542 /// stream without trying it first.
6543 #[inline]
6544 pub fn peer_streams_left_bidi(&self) -> u64 {
6545 self.streams.peer_streams_left_bidi()
6546 }
6547
6548 /// Returns the number of unidirectional streams that can be created
6549 /// before the peer's stream count limit is reached.
6550 ///
6551 /// This can be useful to know if it's possible to create a unidirectional
6552 /// stream without trying it first.
6553 #[inline]
6554 pub fn peer_streams_left_uni(&self) -> u64 {
6555 self.streams.peer_streams_left_uni()
6556 }
6557
6558 /// Returns an iterator over streams that have outstanding data to read.
6559 ///
6560 /// Note that the iterator will only include streams that were readable at
6561 /// the time the iterator itself was created (i.e. when `readable()` was
6562 /// called). To account for newly readable streams, the iterator needs to
6563 /// be created again.
6564 ///
6565 /// ## Examples:
6566 ///
6567 /// ```no_run
6568 /// # let mut buf = [0; 512];
6569 /// # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
6570 /// # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
6571 /// # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
6572 /// # let peer = "127.0.0.1:1234".parse().unwrap();
6573 /// # let local = socket.local_addr().unwrap();
6574 /// # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
6575 /// // Iterate over readable streams.
6576 /// for stream_id in conn.readable() {
6577 /// // Stream is readable, read until there's no more data.
6578 /// while let Ok((read, fin)) = conn.stream_recv(stream_id, &mut buf) {
6579 /// println!("Got {} bytes on stream {}", read, stream_id);
6580 /// }
6581 /// }
6582 /// # Ok::<(), quiche::Error>(())
6583 /// ```
6584 #[inline]
6585 pub fn readable(&self) -> StreamIter {
6586 self.streams.readable()
6587 }
6588
6589 /// Returns an iterator over streams that can be written in priority order.
6590 ///
6591 /// The priority order is based on RFC 9218 scheduling recommendations.
6592 /// Stream priority can be controlled using [`stream_priority()`]. In order
6593 /// to support fairness requirements, each time this method is called,
6594 /// internal state is updated. Therefore the iterator ordering can change
6595 /// between calls, even if no streams were added or removed.
6596 ///
6597 /// A "writable" stream is a stream that has enough flow control capacity to
6598 /// send data to the peer. To avoid buffering an infinite amount of data,
6599 /// streams are only allowed to buffer outgoing data up to the amount that
6600 /// the peer allows to send.
6601 ///
6602 /// Note that the iterator will only include streams that were writable at
6603 /// the time the iterator itself was created (i.e. when `writable()` was
6604 /// called). To account for newly writable streams, the iterator needs to be
6605 /// created again.
6606 ///
6607 /// ## Examples:
6608 ///
6609 /// ```no_run
6610 /// # let mut buf = [0; 512];
6611 /// # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
6612 /// # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
6613 /// # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
6614 /// # let local = socket.local_addr().unwrap();
6615 /// # let peer = "127.0.0.1:1234".parse().unwrap();
6616 /// # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
6617 /// // Iterate over writable streams.
6618 /// for stream_id in conn.writable() {
6619 /// // Stream is writable, write some data.
6620 /// if let Ok(written) = conn.stream_send(stream_id, &buf, false) {
6621 /// println!("Written {} bytes on stream {}", written, stream_id);
6622 /// }
6623 /// }
6624 /// # Ok::<(), quiche::Error>(())
6625 /// ```
6626 /// [`stream_priority()`]: struct.Connection.html#method.stream_priority
6627 #[inline]
6628 pub fn writable(&self) -> StreamIter {
6629 // If there is not enough connection-level send capacity, none of the
6630 // streams are writable, so return an empty iterator.
6631 if self.tx_cap == 0 {
6632 return StreamIter::default();
6633 }
6634
6635 self.streams.writable()
6636 }
6637
6638 /// Returns the maximum possible size of egress UDP payloads.
6639 ///
6640 /// This is the maximum size of UDP payloads that can be sent, and depends
6641 /// on both the configured maximum send payload size of the local endpoint
6642 /// (as configured with [`set_max_send_udp_payload_size()`]), as well as
6643 /// the transport parameter advertised by the remote peer.
6644 ///
6645 /// Note that this value can change during the lifetime of the connection,
6646 /// but should remain stable across consecutive calls to [`send()`].
6647 ///
6648 /// [`set_max_send_udp_payload_size()`]:
6649 /// struct.Config.html#method.set_max_send_udp_payload_size
6650 /// [`send()`]: struct.Connection.html#method.send
6651 pub fn max_send_udp_payload_size(&self) -> usize {
6652 let max_datagram_size = self
6653 .paths
6654 .get_active()
6655 .ok()
6656 .map(|p| p.recovery.max_datagram_size());
6657
6658 if let Some(max_datagram_size) = max_datagram_size {
6659 if self.is_established() {
6660 // We cap the maximum packet size to 16KB or so, so that it can be
6661 // always encoded with a 2-byte varint.
6662 return cmp::min(16383, max_datagram_size);
6663 }
6664 }
6665
6666 // Allow for 1200 bytes (minimum QUIC packet size) during the
6667 // handshake.
6668 MIN_CLIENT_INITIAL_LEN
6669 }
6670
6671 /// Schedule an ack-eliciting packet on the active path.
6672 ///
6673 /// QUIC packets might not contain ack-eliciting frames during normal
6674 /// operating conditions. If the packet would already contain
6675 /// ack-eliciting frames, this method does not change any behavior.
6676 /// However, if the packet would not ordinarily contain ack-eliciting
6677 /// frames, this method ensures that a PING frame sent.
6678 ///
6679 /// Calling this method multiple times before [`send()`] has no effect.
6680 ///
6681 /// [`send()`]: struct.Connection.html#method.send
6682 pub fn send_ack_eliciting(&mut self) -> Result<()> {
6683 if self.is_closed() || self.is_draining() {
6684 return Ok(());
6685 }
6686 self.paths.get_active_mut()?.needs_ack_eliciting = true;
6687 Ok(())
6688 }
6689
6690 /// Schedule an ack-eliciting packet on the specified path.
6691 ///
6692 /// See [`send_ack_eliciting()`] for more detail. [`InvalidState`] is
6693 /// returned if there is no record of the path.
6694 ///
6695 /// [`send_ack_eliciting()`]: struct.Connection.html#method.send_ack_eliciting
6696 /// [`InvalidState`]: enum.Error.html#variant.InvalidState
6697 pub fn send_ack_eliciting_on_path(
6698 &mut self, local: SocketAddr, peer: SocketAddr,
6699 ) -> Result<()> {
6700 if self.is_closed() || self.is_draining() {
6701 return Ok(());
6702 }
6703 let path_id = self
6704 .paths
6705 .path_id_from_addrs(&(local, peer))
6706 .ok_or(Error::InvalidState)?;
6707 self.paths.get_mut(path_id)?.needs_ack_eliciting = true;
6708 Ok(())
6709 }
6710
6711 /// Reads the first received DATAGRAM.
6712 ///
6713 /// On success the DATAGRAM's data is returned along with its size.
6714 ///
6715 /// [`Done`] is returned if there is no data to read.
6716 ///
6717 /// [`BufferTooShort`] is returned if the provided buffer is too small for
6718 /// the DATAGRAM.
6719 ///
6720 /// [`Done`]: enum.Error.html#variant.Done
6721 /// [`BufferTooShort`]: enum.Error.html#variant.BufferTooShort
6722 ///
6723 /// ## Examples:
6724 ///
6725 /// ```no_run
6726 /// # let mut buf = [0; 512];
6727 /// # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
6728 /// # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
6729 /// # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
6730 /// # let peer = "127.0.0.1:1234".parse().unwrap();
6731 /// # let local = socket.local_addr().unwrap();
6732 /// # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
6733 /// let mut dgram_buf = [0; 512];
6734 /// while let Ok((len)) = conn.dgram_recv(&mut dgram_buf) {
6735 /// println!("Got {} bytes of DATAGRAM", len);
6736 /// }
6737 /// # Ok::<(), quiche::Error>(())
6738 /// ```
6739 #[inline]
6740 pub fn dgram_recv(&mut self, buf: &mut [u8]) -> Result<usize> {
6741 match self.dgram_recv_queue.pop() {
6742 Some(d) => {
6743 if d.as_ref().len() > buf.len() {
6744 return Err(Error::BufferTooShort);
6745 }
6746 let len = d.as_ref().len();
6747
6748 buf[..len].copy_from_slice(d.as_ref());
6749 Ok(len)
6750 },
6751
6752 None => Err(Error::Done),
6753 }
6754 }
6755
6756 /// Reads the first received DATAGRAM.
6757 ///
6758 /// This is the same as [`dgram_recv()`] but returns the DATAGRAM as an
6759 /// owned buffer instead of copying into the provided buffer.
6760 ///
6761 /// [`dgram_recv()`]: struct.Connection.html#method.dgram_recv
6762 #[inline]
6763 pub fn dgram_recv_buf(&mut self) -> Result<F::DgramBuf> {
6764 self.dgram_recv_queue.pop().ok_or(Error::Done)
6765 }
6766
6767 /// Reads the first received DATAGRAM without removing it from the queue.
6768 ///
6769 /// On success the DATAGRAM's data is returned along with the actual number
6770 /// of bytes peeked. The requested length cannot exceed the DATAGRAM's
6771 /// actual length.
6772 ///
6773 /// [`Done`] is returned if there is no data to read.
6774 ///
6775 /// [`BufferTooShort`] is returned if the provided buffer is smaller the
6776 /// number of bytes to peek.
6777 ///
6778 /// [`Done`]: enum.Error.html#variant.Done
6779 /// [`BufferTooShort`]: enum.Error.html#variant.BufferTooShort
6780 #[inline]
6781 pub fn dgram_recv_peek(&self, buf: &mut [u8], len: usize) -> Result<usize> {
6782 self.dgram_recv_queue.peek_front_bytes(buf, len)
6783 }
6784
6785 /// Returns the length of the first stored DATAGRAM.
6786 #[inline]
6787 pub fn dgram_recv_front_len(&self) -> Option<usize> {
6788 self.dgram_recv_queue.peek_front_len()
6789 }
6790
6791 /// Returns the number of items in the DATAGRAM receive queue.
6792 #[inline]
6793 pub fn dgram_recv_queue_len(&self) -> usize {
6794 self.dgram_recv_queue.len()
6795 }
6796
6797 /// Returns the total size of all items in the DATAGRAM receive queue.
6798 #[inline]
6799 pub fn dgram_recv_queue_byte_size(&self) -> usize {
6800 self.dgram_recv_queue.byte_size()
6801 }
6802
6803 /// Returns the number of items in the DATAGRAM send queue.
6804 #[inline]
6805 pub fn dgram_send_queue_len(&self) -> usize {
6806 self.dgram_send_queue.len()
6807 }
6808
6809 /// Returns the total size of all items in the DATAGRAM send queue.
6810 #[inline]
6811 pub fn dgram_send_queue_byte_size(&self) -> usize {
6812 self.dgram_send_queue.byte_size()
6813 }
6814
6815 /// Returns whether or not the DATAGRAM send queue is full.
6816 #[inline]
6817 pub fn is_dgram_send_queue_full(&self) -> bool {
6818 self.dgram_send_queue.is_full()
6819 }
6820
6821 /// Returns whether or not the DATAGRAM recv queue is full.
6822 #[inline]
6823 pub fn is_dgram_recv_queue_full(&self) -> bool {
6824 self.dgram_recv_queue.is_full()
6825 }
6826
6827 /// Sends data in a DATAGRAM frame.
6828 ///
6829 /// [`Done`] is returned if no data was written.
6830 /// [`InvalidState`] is returned if the peer does not support DATAGRAM.
6831 /// [`BufferTooShort`] is returned if the DATAGRAM frame length is larger
6832 /// than peer's supported DATAGRAM frame length. Use
6833 /// [`dgram_max_writable_len()`] to get the largest supported DATAGRAM
6834 /// frame length.
6835 ///
6836 /// Note that there is no flow control of DATAGRAM frames, so in order to
6837 /// avoid buffering an infinite amount of frames we apply an internal
6838 /// limit.
6839 ///
6840 /// [`Done`]: enum.Error.html#variant.Done
6841 /// [`InvalidState`]: enum.Error.html#variant.InvalidState
6842 /// [`BufferTooShort`]: enum.Error.html#variant.BufferTooShort
6843 /// [`dgram_max_writable_len()`]:
6844 /// struct.Connection.html#method.dgram_max_writable_len
6845 ///
6846 /// ## Examples:
6847 ///
6848 /// ```no_run
6849 /// # let mut buf = [0; 512];
6850 /// # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
6851 /// # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
6852 /// # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
6853 /// # let peer = "127.0.0.1:1234".parse().unwrap();
6854 /// # let local = socket.local_addr().unwrap();
6855 /// # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
6856 /// conn.dgram_send(b"hello")?;
6857 /// # Ok::<(), quiche::Error>(())
6858 /// ```
6859 pub fn dgram_send(&mut self, buf: &[u8]) -> Result<()> {
6860 self.dgram_send_buf(F::dgram_buf_from_slice(buf))
6861 }
6862
6863 /// Sends data in a DATAGRAM frame.
6864 ///
6865 /// This is the same as [`dgram_send()`] but takes an owned buffer
6866 /// instead of a slice and avoids copying.
6867 ///
6868 /// [`dgram_send()`]: struct.Connection.html#method.dgram_send
6869 pub fn dgram_send_buf(&mut self, buf: F::DgramBuf) -> Result<()> {
6870 let max_payload_len = match self.dgram_max_writable_len() {
6871 Some(v) => v,
6872
6873 None => return Err(Error::InvalidState),
6874 };
6875
6876 if buf.as_ref().len() > max_payload_len {
6877 return Err(Error::BufferTooShort);
6878 }
6879
6880 self.dgram_send_queue.push(buf)?;
6881
6882 let active_path = self.paths.get_active_mut()?;
6883
6884 if self.dgram_send_queue.byte_size() >
6885 active_path.recovery.cwnd_available()
6886 {
6887 active_path.recovery.update_app_limited(false);
6888 }
6889
6890 Ok(())
6891 }
6892
6893 /// Purges queued outgoing DATAGRAMs matching the predicate.
6894 ///
6895 /// In other words, remove all elements `e` such that `f(&e)` returns true.
6896 ///
6897 /// ## Examples:
6898 /// ```no_run
6899 /// # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
6900 /// # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
6901 /// # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
6902 /// # let peer = "127.0.0.1:1234".parse().unwrap();
6903 /// # let local = socket.local_addr().unwrap();
6904 /// # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
6905 /// conn.dgram_send(b"hello")?;
6906 /// conn.dgram_purge_outgoing(&|d: &[u8]| -> bool { d[0] == 0 });
6907 /// # Ok::<(), quiche::Error>(())
6908 /// ```
6909 #[inline]
6910 pub fn dgram_purge_outgoing<FN: Fn(&[u8]) -> bool>(&mut self, f: FN) {
6911 self.dgram_send_queue.purge(f);
6912 }
6913
6914 /// Returns the maximum DATAGRAM payload that can be sent.
6915 ///
6916 /// [`None`] is returned if the peer hasn't advertised a maximum DATAGRAM
6917 /// frame size.
6918 ///
6919 /// ## Examples:
6920 ///
6921 /// ```no_run
6922 /// # let mut buf = [0; 512];
6923 /// # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
6924 /// # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
6925 /// # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
6926 /// # let peer = "127.0.0.1:1234".parse().unwrap();
6927 /// # let local = socket.local_addr().unwrap();
6928 /// # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
6929 /// if let Some(payload_size) = conn.dgram_max_writable_len() {
6930 /// if payload_size > 5 {
6931 /// conn.dgram_send(b"hello")?;
6932 /// }
6933 /// }
6934 /// # Ok::<(), quiche::Error>(())
6935 /// ```
6936 #[inline]
6937 pub fn dgram_max_writable_len(&self) -> Option<usize> {
6938 match self.peer_transport_params.max_datagram_frame_size {
6939 None => None,
6940 Some(peer_frame_len) => {
6941 let dcid = self.destination_id();
6942 // Start from the maximum packet size...
6943 let mut max_len = self.max_send_udp_payload_size();
6944 // ...subtract the Short packet header overhead...
6945 // (1 byte of pkt_len + len of dcid)
6946 max_len = max_len.saturating_sub(1 + dcid.len());
6947 // ...subtract the packet number (max len)...
6948 max_len = max_len.saturating_sub(packet::MAX_PKT_NUM_LEN);
6949 // ...subtract the crypto overhead...
6950 max_len = max_len.saturating_sub(
6951 self.crypto_ctx[packet::Epoch::Application]
6952 .crypto_overhead()?,
6953 );
6954 // ...clamp to what peer can support...
6955 max_len = cmp::min(peer_frame_len as usize, max_len);
6956 // ...subtract frame overhead, checked for underflow.
6957 // (1 byte of frame type + len of length )
6958 max_len.checked_sub(1 + frame::MAX_DGRAM_OVERHEAD)
6959 },
6960 }
6961 }
6962
6963 fn dgram_enabled(&self) -> bool {
6964 self.local_transport_params
6965 .max_datagram_frame_size
6966 .is_some()
6967 }
6968
6969 /// Returns when the next timeout event will occur.
6970 ///
6971 /// Once the timeout Instant has been reached, the [`on_timeout()`] method
6972 /// should be called. A timeout of `None` means that the timer should be
6973 /// disarmed.
6974 ///
6975 /// [`on_timeout()`]: struct.Connection.html#method.on_timeout
6976 pub fn timeout_instant(&self) -> Option<Instant> {
6977 if self.is_closed() {
6978 return None;
6979 }
6980
6981 if self.is_draining() {
6982 // Draining timer takes precedence over all other timers. If it is
6983 // set it means the connection is closing so there's no point in
6984 // processing the other timers.
6985 self.draining_timer
6986 } else {
6987 // Use the lowest timer value (i.e. "sooner") among idle and loss
6988 // detection timers. If they are both unset (i.e. `None`) then the
6989 // result is `None`, but if at least one of them is set then a
6990 // `Some(...)` value is returned.
6991 let path_timer = self
6992 .paths
6993 .iter()
6994 .filter_map(|(_, p)| p.recovery.loss_detection_timer())
6995 .min();
6996
6997 let key_update_timer = self.crypto_ctx[packet::Epoch::Application]
6998 .key_update
6999 .as_ref()
7000 .map(|key_update| key_update.timer);
7001
7002 let timers = [self.idle_timer, path_timer, key_update_timer];
7003
7004 timers.iter().filter_map(|&x| x).min()
7005 }
7006 }
7007
7008 /// Returns the amount of time until the next timeout event.
7009 ///
7010 /// Once the given duration has elapsed, the [`on_timeout()`] method should
7011 /// be called. A timeout of `None` means that the timer should be disarmed.
7012 ///
7013 /// [`on_timeout()`]: struct.Connection.html#method.on_timeout
7014 pub fn timeout(&self) -> Option<Duration> {
7015 self.timeout_instant().map(|timeout| {
7016 let now = Instant::now();
7017
7018 if timeout <= now {
7019 Duration::ZERO
7020 } else {
7021 timeout.duration_since(now)
7022 }
7023 })
7024 }
7025
7026 /// Processes a timeout event.
7027 ///
7028 /// If no timeout has occurred it does nothing.
7029 pub fn on_timeout(&mut self) {
7030 let now = Instant::now();
7031
7032 if let Some(draining_timer) = self.draining_timer {
7033 if draining_timer <= now {
7034 trace!("{} draining timeout expired", self.trace_id);
7035
7036 self.mark_closed();
7037 }
7038
7039 // Draining timer takes precedence over all other timers. If it is
7040 // set it means the connection is closing so there's no point in
7041 // processing the other timers.
7042 return;
7043 }
7044
7045 if let Some(timer) = self.idle_timer {
7046 if timer <= now {
7047 trace!("{} idle timeout expired", self.trace_id);
7048
7049 self.mark_closed();
7050 self.timed_out = true;
7051 return;
7052 }
7053 }
7054
7055 if let Some(timer) = self.crypto_ctx[packet::Epoch::Application]
7056 .key_update
7057 .as_ref()
7058 .map(|key_update| key_update.timer)
7059 {
7060 if timer <= now {
7061 // Discard previous key once key update timer expired.
7062 let _ = self.crypto_ctx[packet::Epoch::Application]
7063 .key_update
7064 .take();
7065 }
7066 }
7067
7068 let handshake_status = self.handshake_status();
7069
7070 for (_, p) in self.paths.iter_mut() {
7071 if let Some(timer) = p.recovery.loss_detection_timer() {
7072 if timer <= now {
7073 trace!("{} loss detection timeout expired", self.trace_id);
7074
7075 let OnLossDetectionTimeoutOutcome {
7076 lost_packets,
7077 lost_bytes,
7078 } = p.on_loss_detection_timeout(
7079 handshake_status,
7080 now,
7081 self.is_server,
7082 &self.trace_id,
7083 );
7084
7085 self.lost_count += lost_packets;
7086 self.lost_bytes += lost_bytes as u64;
7087
7088 qlog_with_type!(QLOG_METRICS, self.qlog, q, {
7089 p.recovery.maybe_qlog(q, now);
7090 });
7091 }
7092 }
7093 }
7094
7095 // Notify timeout events to the application.
7096 self.paths.notify_failed_validations();
7097
7098 // If the active path failed, try to find a new candidate.
7099 if self.paths.get_active_path_id().is_err() {
7100 match self.paths.find_candidate_path() {
7101 Some(pid) => {
7102 if self.set_active_path(pid, now).is_err() {
7103 // The connection cannot continue.
7104 self.mark_closed();
7105 }
7106 },
7107
7108 // The connection cannot continue.
7109 None => {
7110 self.mark_closed();
7111 },
7112 }
7113 }
7114 }
7115
7116 /// Requests the stack to perform path validation of the proposed 4-tuple.
7117 ///
7118 /// Probing new paths requires spare Connection IDs at both the host and the
7119 /// peer sides. If it is not the case, it raises an [`OutOfIdentifiers`].
7120 ///
7121 /// The probing of new addresses can only be done by the client. The server
7122 /// can only probe network paths that were previously advertised by
7123 /// [`PathEvent::New`]. If the server tries to probe such an unseen network
7124 /// path, this call raises an [`InvalidState`].
7125 ///
7126 /// The caller might also want to probe an existing path. In such case, it
7127 /// triggers a PATH_CHALLENGE frame, but it does not require spare CIDs.
7128 ///
7129 /// A server always probes a new path it observes. Calling this method is
7130 /// hence not required to validate a new path. However, a server can still
7131 /// request an additional path validation of the proposed 4-tuple.
7132 ///
7133 /// Calling this method several times before calling [`send()`] or
7134 /// [`send_on_path()`] results in a single probe being generated. An
7135 /// application wanting to send multiple in-flight probes must call this
7136 /// method again after having sent packets.
7137 ///
7138 /// Returns the Destination Connection ID sequence number associated to that
7139 /// path.
7140 ///
7141 /// [`PathEvent::New`]: enum.PathEvent.html#variant.New
7142 /// [`OutOfIdentifiers`]: enum.Error.html#OutOfIdentifiers
7143 /// [`InvalidState`]: enum.Error.html#InvalidState
7144 /// [`send()`]: struct.Connection.html#method.send
7145 /// [`send_on_path()`]: struct.Connection.html#method.send_on_path
7146 pub fn probe_path(
7147 &mut self, local_addr: SocketAddr, peer_addr: SocketAddr,
7148 ) -> Result<u64> {
7149 // We may want to probe an existing path.
7150 let pid = match self.paths.path_id_from_addrs(&(local_addr, peer_addr)) {
7151 Some(pid) => pid,
7152 None => self.create_path_on_client(local_addr, peer_addr)?,
7153 };
7154
7155 let path = self.paths.get_mut(pid)?;
7156 path.request_validation();
7157
7158 path.active_dcid_seq.ok_or(Error::InvalidState)
7159 }
7160
7161 /// Migrates the connection to a new local address `local_addr`.
7162 ///
7163 /// The behavior is similar to [`migrate()`], with the nuance that the
7164 /// connection only changes the local address, but not the peer one.
7165 ///
7166 /// See [`migrate()`] for the full specification of this method.
7167 ///
7168 /// [`migrate()`]: struct.Connection.html#method.migrate
7169 pub fn migrate_source(&mut self, local_addr: SocketAddr) -> Result<u64> {
7170 let peer_addr = self.paths.get_active()?.peer_addr();
7171 self.migrate(local_addr, peer_addr)
7172 }
7173
7174 /// Migrates the connection over the given network path between `local_addr`
7175 /// and `peer_addr`.
7176 ///
7177 /// Connection migration can only be initiated by the client. Calling this
7178 /// method as a server returns [`InvalidState`].
7179 ///
7180 /// To initiate voluntary migration, there should be enough Connection IDs
7181 /// at both sides. If this requirement is not satisfied, this call returns
7182 /// [`OutOfIdentifiers`].
7183 ///
7184 /// Returns the Destination Connection ID associated to that migrated path.
7185 ///
7186 /// [`OutOfIdentifiers`]: enum.Error.html#OutOfIdentifiers
7187 /// [`InvalidState`]: enum.Error.html#InvalidState
7188 pub fn migrate(
7189 &mut self, local_addr: SocketAddr, peer_addr: SocketAddr,
7190 ) -> Result<u64> {
7191 if self.is_server {
7192 return Err(Error::InvalidState);
7193 }
7194
7195 // If the path already exists, mark it as the active one.
7196 let (pid, dcid_seq) = if let Some(pid) =
7197 self.paths.path_id_from_addrs(&(local_addr, peer_addr))
7198 {
7199 let path = self.paths.get_mut(pid)?;
7200
7201 // If it is already active, do nothing.
7202 if path.active() {
7203 return path.active_dcid_seq.ok_or(Error::OutOfIdentifiers);
7204 }
7205
7206 // Ensures that a Source Connection ID has been dedicated to this
7207 // path, or a free one is available. This is only required if the
7208 // host uses non-zero length Source Connection IDs.
7209 if !self.ids.zero_length_scid() &&
7210 path.active_scid_seq.is_none() &&
7211 self.ids.available_scids() == 0
7212 {
7213 return Err(Error::OutOfIdentifiers);
7214 }
7215
7216 // Ensures that the migrated path has a Destination Connection ID.
7217 let dcid_seq = if let Some(dcid_seq) = path.active_dcid_seq {
7218 dcid_seq
7219 } else {
7220 let dcid_seq = self
7221 .ids
7222 .lowest_available_dcid_seq()
7223 .ok_or(Error::OutOfIdentifiers)?;
7224
7225 self.ids.link_dcid_to_path_id(dcid_seq, pid)?;
7226 path.active_dcid_seq = Some(dcid_seq);
7227
7228 dcid_seq
7229 };
7230
7231 (pid, dcid_seq)
7232 } else {
7233 let pid = self.create_path_on_client(local_addr, peer_addr)?;
7234
7235 let dcid_seq = self
7236 .paths
7237 .get(pid)?
7238 .active_dcid_seq
7239 .ok_or(Error::InvalidState)?;
7240
7241 (pid, dcid_seq)
7242 };
7243
7244 // Change the active path.
7245 self.set_active_path(pid, Instant::now())?;
7246
7247 Ok(dcid_seq)
7248 }
7249
7250 /// Provides additional source Connection IDs that the peer can use to reach
7251 /// this host.
7252 ///
7253 /// This triggers sending NEW_CONNECTION_ID frames if the provided Source
7254 /// Connection ID is not already present. In the case the caller tries to
7255 /// reuse a Connection ID with a different reset token, this raises an
7256 /// `InvalidState`.
7257 ///
7258 /// At any time, the peer cannot have more Destination Connection IDs than
7259 /// the maximum number of active Connection IDs it negotiated. In such case
7260 /// (i.e., when [`scids_left()`] returns 0), if the host agrees to
7261 /// request the removal of previous connection IDs, it sets the
7262 /// `retire_if_needed` parameter. Otherwise, an [`IdLimit`] is returned.
7263 ///
7264 /// Note that setting `retire_if_needed` does not prevent this function from
7265 /// returning an [`IdLimit`] in the case the caller wants to retire still
7266 /// unannounced Connection IDs.
7267 ///
7268 /// The caller is responsible for ensuring that the provided `scid` is not
7269 /// repeated several times over the connection. quiche ensures that as long
7270 /// as the provided Connection ID is still in use (i.e., not retired), it
7271 /// does not assign a different sequence number.
7272 ///
7273 /// Note that if the host uses zero-length Source Connection IDs, it cannot
7274 /// advertise Source Connection IDs and calling this method returns an
7275 /// [`InvalidState`].
7276 ///
7277 /// Returns the sequence number associated to the provided Connection ID.
7278 ///
7279 /// [`scids_left()`]: struct.Connection.html#method.scids_left
7280 /// [`IdLimit`]: enum.Error.html#IdLimit
7281 /// [`InvalidState`]: enum.Error.html#InvalidState
7282 pub fn new_scid(
7283 &mut self, scid: &ConnectionId, reset_token: u128, retire_if_needed: bool,
7284 ) -> Result<u64> {
7285 self.ids.new_scid(
7286 scid.to_vec().into(),
7287 Some(reset_token),
7288 true,
7289 None,
7290 retire_if_needed,
7291 )
7292 }
7293
7294 /// Returns the number of source Connection IDs that are active. This is
7295 /// only meaningful if the host uses non-zero length Source Connection IDs.
7296 pub fn active_scids(&self) -> usize {
7297 self.ids.active_source_cids()
7298 }
7299
7300 /// Returns the number of source Connection IDs that should be provided
7301 /// to the peer without exceeding the limit it advertised.
7302 ///
7303 /// This will automatically limit the number of Connection IDs to the
7304 /// minimum between the locally configured active connection ID limit,
7305 /// and the one sent by the peer.
7306 ///
7307 /// To obtain the maximum possible value allowed by the peer an application
7308 /// can instead inspect the [`peer_active_conn_id_limit`] value.
7309 ///
7310 /// [`peer_active_conn_id_limit`]: struct.Stats.html#structfield.peer_active_conn_id_limit
7311 #[inline]
7312 pub fn scids_left(&self) -> usize {
7313 let max_active_source_cids = cmp::min(
7314 self.peer_transport_params.active_conn_id_limit,
7315 self.local_transport_params.active_conn_id_limit,
7316 ) as usize;
7317
7318 max_active_source_cids - self.active_scids()
7319 }
7320
7321 /// Requests the retirement of the destination Connection ID used by the
7322 /// host to reach its peer.
7323 ///
7324 /// This triggers sending RETIRE_CONNECTION_ID frames.
7325 ///
7326 /// If the application tries to retire a non-existing Destination Connection
7327 /// ID sequence number, or if it uses zero-length Destination Connection ID,
7328 /// this method returns an [`InvalidState`].
7329 ///
7330 /// At any time, the host must have at least one Destination ID. If the
7331 /// application tries to retire the last one, or if the caller tries to
7332 /// retire the destination Connection ID used by the current active path
7333 /// while having neither spare Destination Connection IDs nor validated
7334 /// network paths, this method returns an [`OutOfIdentifiers`]. This
7335 /// behavior prevents the caller from stalling the connection due to the
7336 /// lack of validated path to send non-probing packets.
7337 ///
7338 /// [`InvalidState`]: enum.Error.html#InvalidState
7339 /// [`OutOfIdentifiers`]: enum.Error.html#OutOfIdentifiers
7340 pub fn retire_dcid(&mut self, dcid_seq: u64) -> Result<()> {
7341 if self.ids.zero_length_dcid() {
7342 return Err(Error::InvalidState);
7343 }
7344
7345 let active_path_dcid_seq = self
7346 .paths
7347 .get_active()?
7348 .active_dcid_seq
7349 .ok_or(Error::InvalidState)?;
7350
7351 let active_path_id = self.paths.get_active_path_id()?;
7352
7353 if active_path_dcid_seq == dcid_seq &&
7354 self.ids.lowest_available_dcid_seq().is_none() &&
7355 !self
7356 .paths
7357 .iter()
7358 .any(|(pid, p)| pid != active_path_id && p.usable())
7359 {
7360 return Err(Error::OutOfIdentifiers);
7361 }
7362
7363 if let Some(pid) = self.ids.retire_dcid(dcid_seq)? {
7364 // The retired Destination CID was associated to a given path. Let's
7365 // find an available DCID to associate to that path.
7366 let path = self.paths.get_mut(pid)?;
7367 let dcid_seq = self.ids.lowest_available_dcid_seq();
7368
7369 if let Some(dcid_seq) = dcid_seq {
7370 self.ids.link_dcid_to_path_id(dcid_seq, pid)?;
7371 }
7372
7373 path.active_dcid_seq = dcid_seq;
7374 }
7375
7376 Ok(())
7377 }
7378
7379 /// Processes path-specific events.
7380 ///
7381 /// On success it returns a [`PathEvent`], or `None` when there are no
7382 /// events to report. Please refer to [`PathEvent`] for the exhaustive event
7383 /// list.
7384 ///
7385 /// Note that all events are edge-triggered, meaning that once reported they
7386 /// will not be reported again by calling this method again, until the event
7387 /// is re-armed.
7388 ///
7389 /// [`PathEvent`]: enum.PathEvent.html
7390 pub fn path_event_next(&mut self) -> Option<PathEvent> {
7391 self.paths.pop_event()
7392 }
7393
7394 /// Returns the number of source Connection IDs that are retired.
7395 pub fn retired_scids(&self) -> usize {
7396 self.ids.retired_source_cids()
7397 }
7398
7399 /// Returns a source `ConnectionId` that has been retired.
7400 ///
7401 /// On success it returns a [`ConnectionId`], or `None` when there are no
7402 /// more retired connection IDs.
7403 ///
7404 /// [`ConnectionId`]: struct.ConnectionId.html
7405 pub fn retired_scid_next(&mut self) -> Option<ConnectionId<'static>> {
7406 self.ids.pop_retired_scid()
7407 }
7408
7409 /// Returns the number of spare Destination Connection IDs, i.e.,
7410 /// Destination Connection IDs that are still unused.
7411 ///
7412 /// Note that this function returns 0 if the host uses zero length
7413 /// Destination Connection IDs.
7414 pub fn available_dcids(&self) -> usize {
7415 self.ids.available_dcids()
7416 }
7417
7418 /// Returns an iterator over destination `SockAddr`s whose association
7419 /// with `from` forms a known QUIC path on which packets can be sent to.
7420 ///
7421 /// This function is typically used in combination with [`send_on_path()`].
7422 ///
7423 /// Note that the iterator includes all the possible combination of
7424 /// destination `SockAddr`s, even those whose sending is not required now.
7425 /// In other words, this is another way for the application to recall from
7426 /// past [`PathEvent::New`] events.
7427 ///
7428 /// [`PathEvent::New`]: enum.PathEvent.html#variant.New
7429 /// [`send_on_path()`]: struct.Connection.html#method.send_on_path
7430 ///
7431 /// ## Examples:
7432 ///
7433 /// ```no_run
7434 /// # let mut out = [0; 512];
7435 /// # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
7436 /// # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
7437 /// # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
7438 /// # let local = socket.local_addr().unwrap();
7439 /// # let peer = "127.0.0.1:1234".parse().unwrap();
7440 /// # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
7441 /// // Iterate over possible destinations for the given local `SockAddr`.
7442 /// for dest in conn.paths_iter(local) {
7443 /// loop {
7444 /// let (write, send_info) =
7445 /// match conn.send_on_path(&mut out, Some(local), Some(dest)) {
7446 /// Ok(v) => v,
7447 ///
7448 /// Err(quiche::Error::Done) => {
7449 /// // Done writing for this destination.
7450 /// break;
7451 /// },
7452 ///
7453 /// Err(e) => {
7454 /// // An error occurred, handle it.
7455 /// break;
7456 /// },
7457 /// };
7458 ///
7459 /// socket.send_to(&out[..write], &send_info.to).unwrap();
7460 /// }
7461 /// }
7462 /// # Ok::<(), quiche::Error>(())
7463 /// ```
7464 #[inline]
7465 pub fn paths_iter(&self, from: SocketAddr) -> SocketAddrIter {
7466 // Instead of trying to identify whether packets will be sent on the
7467 // given 4-tuple, simply filter paths that cannot be used.
7468 SocketAddrIter {
7469 sockaddrs: self
7470 .paths
7471 .iter()
7472 .filter(|(_, p)| p.active_dcid_seq.is_some())
7473 .filter(|(_, p)| p.usable() || p.probing_required())
7474 .filter(|(_, p)| p.local_addr() == from)
7475 .map(|(_, p)| p.peer_addr())
7476 .collect(),
7477
7478 index: 0,
7479 }
7480 }
7481
7482 /// Closes the connection with the given error and reason.
7483 ///
7484 /// The `app` parameter specifies whether an application close should be
7485 /// sent to the peer. Otherwise a normal connection close is sent.
7486 ///
7487 /// If `app` is true but the connection is not in a state that is safe to
7488 /// send an application error (not established nor in early data), in
7489 /// accordance with [RFC
7490 /// 9000](https://www.rfc-editor.org/rfc/rfc9000.html#section-10.2.3-3), the
7491 /// error code is changed to APPLICATION_ERROR and the reason phrase is
7492 /// cleared.
7493 ///
7494 /// Returns [`Done`] if the connection had already been closed.
7495 ///
7496 /// Note that the connection will not be closed immediately. An application
7497 /// should continue calling the [`recv()`], [`send()`], [`timeout()`] and
7498 /// [`on_timeout()`] methods as normal, until the [`is_closed()`] method
7499 /// returns `true`.
7500 ///
7501 /// [`Done`]: enum.Error.html#variant.Done
7502 /// [`recv()`]: struct.Connection.html#method.recv
7503 /// [`send()`]: struct.Connection.html#method.send
7504 /// [`timeout()`]: struct.Connection.html#method.timeout
7505 /// [`on_timeout()`]: struct.Connection.html#method.on_timeout
7506 /// [`is_closed()`]: struct.Connection.html#method.is_closed
7507 pub fn close(&mut self, app: bool, err: u64, reason: &[u8]) -> Result<()> {
7508 if self.is_closed() || self.is_draining() {
7509 return Err(Error::Done);
7510 }
7511
7512 if self.local_error.is_some() {
7513 return Err(Error::Done);
7514 }
7515
7516 let is_safe_to_send_app_data =
7517 self.is_established() || self.is_in_early_data();
7518
7519 if app && !is_safe_to_send_app_data {
7520 // Clear error information.
7521 self.local_error = Some(ConnectionError {
7522 is_app: false,
7523 error_code: 0x0c,
7524 reason: vec![],
7525 });
7526 } else {
7527 self.local_error = Some(ConnectionError {
7528 is_app: app,
7529 error_code: err,
7530 reason: reason.to_vec(),
7531 });
7532 }
7533
7534 // When no packet was successfully processed close connection immediately.
7535 if self.recv_count == 0 {
7536 self.mark_closed();
7537 }
7538
7539 Ok(())
7540 }
7541
7542 /// Returns a string uniquely representing the connection.
7543 ///
7544 /// This can be used for logging purposes to differentiate between multiple
7545 /// connections.
7546 #[inline]
7547 pub fn trace_id(&self) -> &str {
7548 &self.trace_id
7549 }
7550
7551 /// Returns the negotiated ALPN protocol.
7552 ///
7553 /// If no protocol has been negotiated, the returned value is empty.
7554 #[inline]
7555 pub fn application_proto(&self) -> &[u8] {
7556 self.alpn.as_ref()
7557 }
7558
7559 /// Returns the server name requested by the client.
7560 #[inline]
7561 pub fn server_name(&self) -> Option<&str> {
7562 self.handshake.server_name()
7563 }
7564
7565 /// Returns the peer's leaf certificate (if any) as a DER-encoded buffer.
7566 #[inline]
7567 pub fn peer_cert(&self) -> Option<&[u8]> {
7568 self.handshake.peer_cert()
7569 }
7570
7571 /// Returns the peer's certificate chain (if any) as a vector of DER-encoded
7572 /// buffers.
7573 ///
7574 /// The certificate at index 0 is the peer's leaf certificate, the other
7575 /// certificates (if any) are the chain certificate authorities used to
7576 /// sign the leaf certificate.
7577 #[inline]
7578 pub fn peer_cert_chain(&self) -> Option<Vec<&[u8]>> {
7579 self.handshake.peer_cert_chain()
7580 }
7581
7582 /// Returns the serialized cryptographic session for the connection.
7583 ///
7584 /// This can be used by a client to cache a connection's session, and resume
7585 /// it later using the [`set_session()`] method.
7586 ///
7587 /// [`set_session()`]: struct.Connection.html#method.set_session
7588 #[inline]
7589 pub fn session(&self) -> Option<&[u8]> {
7590 self.session.as_deref()
7591 }
7592
7593 /// Returns the source connection ID.
7594 ///
7595 /// When there are multiple IDs, and if there is an active path, the ID used
7596 /// on that path is returned. Otherwise the oldest ID is returned.
7597 ///
7598 /// Note that the value returned can change throughout the connection's
7599 /// lifetime.
7600 #[inline]
7601 pub fn source_id(&self) -> ConnectionId<'_> {
7602 if let Ok(path) = self.paths.get_active() {
7603 if let Some(active_scid_seq) = path.active_scid_seq {
7604 if let Ok(e) = self.ids.get_scid(active_scid_seq) {
7605 return ConnectionId::from_ref(e.cid.as_ref());
7606 }
7607 }
7608 }
7609
7610 let e = self.ids.oldest_scid();
7611 ConnectionId::from_ref(e.cid.as_ref())
7612 }
7613
7614 /// Returns all active source connection IDs.
7615 ///
7616 /// An iterator is returned for all active IDs (i.e. ones that have not
7617 /// been explicitly retired yet).
7618 #[inline]
7619 pub fn source_ids(&self) -> impl Iterator<Item = &ConnectionId<'_>> {
7620 self.ids.scids_iter()
7621 }
7622
7623 /// Returns the destination connection ID.
7624 ///
7625 /// Note that the value returned can change throughout the connection's
7626 /// lifetime.
7627 #[inline]
7628 pub fn destination_id(&self) -> ConnectionId<'_> {
7629 if let Ok(path) = self.paths.get_active() {
7630 if let Some(active_dcid_seq) = path.active_dcid_seq {
7631 if let Ok(e) = self.ids.get_dcid(active_dcid_seq) {
7632 return ConnectionId::from_ref(e.cid.as_ref());
7633 }
7634 }
7635 }
7636
7637 let e = self.ids.oldest_dcid();
7638 ConnectionId::from_ref(e.cid.as_ref())
7639 }
7640
7641 /// Returns the PMTU for the active path if it exists.
7642 ///
7643 /// This requires no additonal packets to be sent but simply checks if PMTUD
7644 /// has completed and has found a valid PMTU.
7645 #[inline]
7646 pub fn pmtu(&self) -> Option<usize> {
7647 if let Ok(path) = self.paths.get_active() {
7648 path.pmtud.as_ref().and_then(|pmtud| pmtud.get_pmtu())
7649 } else {
7650 None
7651 }
7652 }
7653
7654 /// Revalidates the PMTU for the active path by sending a new probe packet
7655 /// of PMTU size. If the probe is dropped PMTUD will restart and find a new
7656 /// valid PMTU.
7657 #[inline]
7658 pub fn revalidate_pmtu(&mut self) {
7659 if let Ok(active_path) = self.paths.get_active_mut() {
7660 if let Some(pmtud) = active_path.pmtud.as_mut() {
7661 pmtud.revalidate_pmtu();
7662 }
7663 }
7664 }
7665
7666 /// Returns true if the connection handshake is complete.
7667 #[inline]
7668 pub fn is_established(&self) -> bool {
7669 self.handshake_completed
7670 }
7671
7672 /// Returns true if the connection is resumed.
7673 #[inline]
7674 pub fn is_resumed(&self) -> bool {
7675 self.handshake.is_resumed()
7676 }
7677
7678 /// Returns true if the connection has a pending handshake that has
7679 /// progressed enough to send or receive early data.
7680 #[inline]
7681 pub fn is_in_early_data(&self) -> bool {
7682 self.handshake.is_in_early_data()
7683 }
7684
7685 /// Returns the early data reason for the connection.
7686 ///
7687 /// This status can be useful for logging and debugging. See [BoringSSL]
7688 /// documentation for a definition of the reasons.
7689 ///
7690 /// [BoringSSL]: https://commondatastorage.googleapis.com/chromium-boringssl-docs/ssl.h.html#ssl_early_data_reason_t
7691 #[inline]
7692 pub fn early_data_reason(&self) -> u32 {
7693 self.handshake.early_data_reason()
7694 }
7695
7696 /// Returns whether there is stream or DATAGRAM data available to read.
7697 #[inline]
7698 pub fn is_readable(&self) -> bool {
7699 self.streams.has_readable() || self.dgram_recv_front_len().is_some()
7700 }
7701
7702 /// Returns whether the network path with local address `from` and remote
7703 /// address `peer` has been validated.
7704 ///
7705 /// If the 4-tuple does not exist over the connection, returns an
7706 /// [`InvalidState`].
7707 ///
7708 /// [`InvalidState`]: enum.Error.html#variant.InvalidState
7709 pub fn is_path_validated(
7710 &self, from: SocketAddr, to: SocketAddr,
7711 ) -> Result<bool> {
7712 let pid = self
7713 .paths
7714 .path_id_from_addrs(&(from, to))
7715 .ok_or(Error::InvalidState)?;
7716
7717 Ok(self.paths.get(pid)?.validated())
7718 }
7719
7720 /// Returns true if the connection is draining.
7721 ///
7722 /// If this returns `true`, the connection object cannot yet be dropped, but
7723 /// no new application data can be sent or received. An application should
7724 /// continue calling the [`recv()`], [`timeout()`], and [`on_timeout()`]
7725 /// methods as normal, until the [`is_closed()`] method returns `true`.
7726 ///
7727 /// In contrast, once `is_draining()` returns `true`, calling [`send()`]
7728 /// is not required because no new outgoing packets will be generated.
7729 ///
7730 /// [`recv()`]: struct.Connection.html#method.recv
7731 /// [`send()`]: struct.Connection.html#method.send
7732 /// [`timeout()`]: struct.Connection.html#method.timeout
7733 /// [`on_timeout()`]: struct.Connection.html#method.on_timeout
7734 /// [`is_closed()`]: struct.Connection.html#method.is_closed
7735 #[inline]
7736 pub fn is_draining(&self) -> bool {
7737 self.draining_timer.is_some()
7738 }
7739
7740 /// Returns true if the connection is closed.
7741 ///
7742 /// If this returns true, the connection object can be dropped.
7743 #[inline]
7744 pub fn is_closed(&self) -> bool {
7745 self.closed
7746 }
7747
7748 /// Returns true if the connection was closed due to the idle timeout.
7749 #[inline]
7750 pub fn is_timed_out(&self) -> bool {
7751 self.timed_out
7752 }
7753
7754 /// Returns the error received from the peer, if any.
7755 ///
7756 /// Note that a `Some` return value does not necessarily imply
7757 /// [`is_closed()`] or any other connection state.
7758 ///
7759 /// [`is_closed()`]: struct.Connection.html#method.is_closed
7760 #[inline]
7761 pub fn peer_error(&self) -> Option<&ConnectionError> {
7762 self.peer_error.as_ref()
7763 }
7764
7765 /// Returns the error [`close()`] was called with, or internally
7766 /// created quiche errors, if any.
7767 ///
7768 /// Note that a `Some` return value does not necessarily imply
7769 /// [`is_closed()`] or any other connection state.
7770 /// `Some` also does not guarantee that the error has been sent to
7771 /// or received by the peer.
7772 ///
7773 /// [`close()`]: struct.Connection.html#method.close
7774 /// [`is_closed()`]: struct.Connection.html#method.is_closed
7775 #[inline]
7776 pub fn local_error(&self) -> Option<&ConnectionError> {
7777 self.local_error.as_ref()
7778 }
7779
7780 /// Collects and returns statistics about the connection.
7781 #[inline]
7782 pub fn stats(&self) -> Stats {
7783 Stats {
7784 recv: self.recv_count,
7785 sent: self.sent_count,
7786 lost: self.lost_count,
7787 spurious_lost: self.spurious_lost_count,
7788 retrans: self.retrans_count,
7789 sent_bytes: self.sent_bytes,
7790 recv_bytes: self.recv_bytes,
7791 acked_bytes: self.acked_bytes,
7792 lost_bytes: self.lost_bytes,
7793 stream_retrans_bytes: self.stream_retrans_bytes,
7794 dgram_recv: self.dgram_recv_count,
7795 dgram_sent: self.dgram_sent_count,
7796 paths_count: self.paths.len(),
7797 reset_stream_count_local: self.reset_stream_local_count,
7798 stopped_stream_count_local: self.stopped_stream_local_count,
7799 reset_stream_count_remote: self.reset_stream_remote_count,
7800 stopped_stream_count_remote: self.stopped_stream_remote_count,
7801 data_blocked_sent_count: self.data_blocked_sent_count,
7802 stream_data_blocked_sent_count: self.stream_data_blocked_sent_count,
7803 data_blocked_recv_count: self.data_blocked_recv_count,
7804 stream_data_blocked_recv_count: self.stream_data_blocked_recv_count,
7805 streams_blocked_bidi_recv_count: self.streams_blocked_bidi_recv_count,
7806 streams_blocked_uni_recv_count: self.streams_blocked_uni_recv_count,
7807 path_challenge_rx_count: self.path_challenge_rx_count,
7808 amplification_limited_count: self.amplification_limited_count,
7809 bytes_in_flight_duration: self.bytes_in_flight_duration(),
7810 tx_buffered_state: self.tx_buffered_state,
7811 }
7812 }
7813
7814 /// Returns the sum of the durations when each path in the
7815 /// connection was actively sending bytes or waiting for acks.
7816 /// Note that this could result in a duration that is longer than
7817 /// the actual connection duration in cases where multiple paths
7818 /// are active for extended periods of time. In practice only 1
7819 /// path is typically active at a time.
7820 /// TODO revisit computation if in the future multiple paths are
7821 /// often active at the same time.
7822 fn bytes_in_flight_duration(&self) -> Duration {
7823 self.paths.iter().fold(Duration::ZERO, |acc, (_, path)| {
7824 acc + path.bytes_in_flight_duration()
7825 })
7826 }
7827
7828 /// Returns reference to peer's transport parameters. Returns `None` if we
7829 /// have not yet processed the peer's transport parameters.
7830 pub fn peer_transport_params(&self) -> Option<&TransportParams> {
7831 if !self.parsed_peer_transport_params {
7832 return None;
7833 }
7834
7835 Some(&self.peer_transport_params)
7836 }
7837
7838 /// Collects and returns statistics about each known path for the
7839 /// connection.
7840 pub fn path_stats(&self) -> impl Iterator<Item = PathStats> + '_ {
7841 self.paths.iter().map(|(_, p)| p.stats())
7842 }
7843
7844 /// Returns whether or not this is a server-side connection.
7845 pub fn is_server(&self) -> bool {
7846 self.is_server
7847 }
7848
7849 fn encode_transport_params(&mut self) -> Result<()> {
7850 self.handshake.set_quic_transport_params(
7851 &self.local_transport_params,
7852 self.is_server,
7853 )
7854 }
7855
7856 fn parse_peer_transport_params(
7857 &mut self, peer_params: TransportParams,
7858 ) -> Result<()> {
7859 // Validate initial_source_connection_id.
7860 match &peer_params.initial_source_connection_id {
7861 Some(v) if v != &self.destination_id() =>
7862 return Err(Error::InvalidTransportParam),
7863
7864 Some(_) => (),
7865
7866 // initial_source_connection_id must be sent by
7867 // both endpoints.
7868 None => return Err(Error::InvalidTransportParam),
7869 }
7870
7871 // Validate original_destination_connection_id.
7872 if let Some(odcid) = &self.odcid {
7873 match &peer_params.original_destination_connection_id {
7874 Some(v) if v != odcid =>
7875 return Err(Error::InvalidTransportParam),
7876
7877 Some(_) => (),
7878
7879 // original_destination_connection_id must be
7880 // sent by the server.
7881 None if !self.is_server =>
7882 return Err(Error::InvalidTransportParam),
7883
7884 None => (),
7885 }
7886 }
7887
7888 // Validate retry_source_connection_id.
7889 if let Some(rscid) = &self.rscid {
7890 match &peer_params.retry_source_connection_id {
7891 Some(v) if v != rscid =>
7892 return Err(Error::InvalidTransportParam),
7893
7894 Some(_) => (),
7895
7896 // retry_source_connection_id must be sent by
7897 // the server.
7898 None => return Err(Error::InvalidTransportParam),
7899 }
7900 }
7901
7902 self.process_peer_transport_params(peer_params)?;
7903
7904 self.parsed_peer_transport_params = true;
7905
7906 Ok(())
7907 }
7908
7909 fn process_peer_transport_params(
7910 &mut self, peer_params: TransportParams,
7911 ) -> Result<()> {
7912 self.max_tx_data = peer_params.initial_max_data;
7913
7914 // Update send capacity.
7915 self.update_tx_cap();
7916
7917 self.streams
7918 .update_peer_max_streams_bidi(peer_params.initial_max_streams_bidi);
7919 self.streams
7920 .update_peer_max_streams_uni(peer_params.initial_max_streams_uni);
7921
7922 let max_ack_delay = Duration::from_millis(peer_params.max_ack_delay);
7923
7924 self.recovery_config.max_ack_delay = max_ack_delay;
7925
7926 let active_path = self.paths.get_active_mut()?;
7927
7928 active_path.recovery.update_max_ack_delay(max_ack_delay);
7929
7930 if active_path
7931 .pmtud
7932 .as_ref()
7933 .map(|pmtud| pmtud.should_probe())
7934 .unwrap_or(false)
7935 {
7936 active_path.recovery.pmtud_update_max_datagram_size(
7937 active_path
7938 .pmtud
7939 .as_mut()
7940 .expect("PMTUD existence verified above")
7941 .get_probe_size()
7942 .min(peer_params.max_udp_payload_size as usize),
7943 );
7944 } else {
7945 active_path.recovery.update_max_datagram_size(
7946 peer_params.max_udp_payload_size as usize,
7947 );
7948 }
7949
7950 // Record the max_active_conn_id parameter advertised by the peer.
7951 self.ids
7952 .set_source_conn_id_limit(peer_params.active_conn_id_limit);
7953
7954 self.peer_transport_params = peer_params;
7955
7956 Ok(())
7957 }
7958
7959 /// Continues the handshake.
7960 ///
7961 /// If the connection is already established, it does nothing.
7962 fn do_handshake(&mut self, now: Instant) -> Result<()> {
7963 let mut ex_data = tls::ExData {
7964 application_protos: &self.application_protos,
7965
7966 crypto_ctx: &mut self.crypto_ctx,
7967
7968 session: &mut self.session,
7969
7970 local_error: &mut self.local_error,
7971
7972 keylog: self.keylog.as_mut(),
7973
7974 trace_id: &self.trace_id,
7975
7976 local_transport_params: self.local_transport_params.clone(),
7977
7978 recovery_config: self.recovery_config,
7979
7980 tx_cap_factor: self.tx_cap_factor,
7981
7982 pmtud: None,
7983
7984 is_server: self.is_server,
7985
7986 use_initial_max_data_as_flow_control_win: false,
7987 };
7988
7989 if self.handshake_completed {
7990 return self.handshake.process_post_handshake(&mut ex_data);
7991 }
7992
7993 match self.handshake.do_handshake(&mut ex_data) {
7994 Ok(_) => (),
7995
7996 Err(Error::Done) => {
7997 // Apply in-handshake configuration from callbacks if the path's
7998 // Recovery module can still be reinitilized.
7999 if self
8000 .paths
8001 .get_active()
8002 .map(|p| p.can_reinit_recovery())
8003 .unwrap_or(false)
8004 {
8005 if ex_data.recovery_config != self.recovery_config {
8006 if let Ok(path) = self.paths.get_active_mut() {
8007 self.recovery_config = ex_data.recovery_config;
8008 path.reinit_recovery(&self.recovery_config);
8009 }
8010 }
8011
8012 if ex_data.tx_cap_factor != self.tx_cap_factor {
8013 self.tx_cap_factor = ex_data.tx_cap_factor;
8014 }
8015
8016 if let Some((discover, max_probes)) = ex_data.pmtud {
8017 self.paths.set_discover_pmtu_on_existing_paths(
8018 discover,
8019 self.recovery_config.max_send_udp_payload_size,
8020 max_probes,
8021 );
8022 }
8023
8024 if ex_data.local_transport_params !=
8025 self.local_transport_params
8026 {
8027 self.streams.set_max_streams_bidi(
8028 ex_data
8029 .local_transport_params
8030 .initial_max_streams_bidi,
8031 );
8032
8033 self.local_transport_params =
8034 ex_data.local_transport_params;
8035 }
8036 }
8037
8038 if ex_data.use_initial_max_data_as_flow_control_win {
8039 self.enable_use_initial_max_data_as_flow_control_win();
8040 }
8041
8042 // Try to parse transport parameters as soon as the first flight
8043 // of handshake data is processed.
8044 //
8045 // This is potentially dangerous as the handshake hasn't been
8046 // completed yet, though it's required to be able to send data
8047 // in 0.5 RTT.
8048 let raw_params = self.handshake.quic_transport_params();
8049
8050 if !self.parsed_peer_transport_params && !raw_params.is_empty() {
8051 let peer_params = TransportParams::decode(
8052 raw_params,
8053 self.is_server,
8054 self.peer_transport_params_track_unknown,
8055 )?;
8056
8057 self.parse_peer_transport_params(peer_params)?;
8058 }
8059
8060 return Ok(());
8061 },
8062
8063 Err(e) => return Err(e),
8064 };
8065
8066 self.handshake_completed = self.handshake.is_completed();
8067
8068 self.alpn = self.handshake.alpn_protocol().to_vec();
8069
8070 let raw_params = self.handshake.quic_transport_params();
8071
8072 if !self.parsed_peer_transport_params && !raw_params.is_empty() {
8073 let peer_params = TransportParams::decode(
8074 raw_params,
8075 self.is_server,
8076 self.peer_transport_params_track_unknown,
8077 )?;
8078
8079 self.parse_peer_transport_params(peer_params)?;
8080 }
8081
8082 if self.handshake_completed {
8083 // The handshake is considered confirmed at the server when the
8084 // handshake completes, at which point we can also drop the
8085 // handshake epoch.
8086 if self.is_server {
8087 self.handshake_confirmed = true;
8088
8089 self.drop_epoch_state(packet::Epoch::Handshake, now);
8090 }
8091
8092 // Once the handshake is completed there's no point in processing
8093 // 0-RTT packets anymore, so clear the buffer now.
8094 self.undecryptable_pkts.clear();
8095
8096 trace!("{} connection established: proto={:?} cipher={:?} curve={:?} sigalg={:?} resumed={} {:?}",
8097 &self.trace_id,
8098 std::str::from_utf8(self.application_proto()),
8099 self.handshake.cipher(),
8100 self.handshake.curve(),
8101 self.handshake.sigalg(),
8102 self.handshake.is_resumed(),
8103 self.peer_transport_params);
8104 }
8105
8106 Ok(())
8107 }
8108
8109 /// Use the value of the intial max_data / initial stream max_data setting
8110 /// as the initial flow control window for the connection and streams.
8111 /// The connection-level flow control window will only be changed if it
8112 /// hasn't been auto tuned yet. For streams: only newly created streams
8113 /// receive the new setting.
8114 fn enable_use_initial_max_data_as_flow_control_win(&mut self) {
8115 self.flow_control.set_window_if_not_tuned_yet(
8116 self.local_transport_params.initial_max_data,
8117 );
8118 self.streams
8119 .set_use_initial_max_data_as_flow_control_win(true);
8120 }
8121
8122 /// Selects the packet type for the next outgoing packet.
8123 fn write_pkt_type(&self, send_pid: usize) -> Result<Type> {
8124 // On error send packet in the latest epoch available, but only send
8125 // 1-RTT ones when the handshake is completed.
8126 if self
8127 .local_error
8128 .as_ref()
8129 .is_some_and(|conn_err| !conn_err.is_app)
8130 {
8131 let epoch = match self.handshake.write_level() {
8132 crypto::Level::Initial => packet::Epoch::Initial,
8133 crypto::Level::ZeroRTT => unreachable!(),
8134 crypto::Level::Handshake => packet::Epoch::Handshake,
8135 crypto::Level::OneRTT => packet::Epoch::Application,
8136 };
8137
8138 if !self.handshake_confirmed {
8139 match epoch {
8140 // Downgrade the epoch to Handshake as the handshake is not
8141 // completed yet.
8142 packet::Epoch::Application => return Ok(Type::Handshake),
8143
8144 // Downgrade the epoch to Initial as the remote peer might
8145 // not be able to decrypt handshake packets yet.
8146 packet::Epoch::Handshake
8147 if self.crypto_ctx[packet::Epoch::Initial].has_keys() =>
8148 return Ok(Type::Initial),
8149
8150 _ => (),
8151 };
8152 }
8153
8154 return Ok(Type::from_epoch(epoch));
8155 }
8156
8157 for &epoch in packet::Epoch::epochs(
8158 packet::Epoch::Initial..=packet::Epoch::Application,
8159 ) {
8160 let crypto_ctx = &self.crypto_ctx[epoch];
8161 let pkt_space = &self.pkt_num_spaces[epoch];
8162
8163 // Only send packets in a space when we have the send keys for it.
8164 if crypto_ctx.crypto_seal.is_none() {
8165 continue;
8166 }
8167
8168 // We are ready to send data for this packet number space.
8169 if crypto_ctx.data_available() || pkt_space.ready() {
8170 return Ok(Type::from_epoch(epoch));
8171 }
8172
8173 // There are lost frames in this packet number space.
8174 for (_, p) in self.paths.iter() {
8175 if p.recovery.has_lost_frames(epoch) {
8176 return Ok(Type::from_epoch(epoch));
8177 }
8178
8179 // We need to send PTO probe packets.
8180 if p.recovery.loss_probes(epoch) > 0 {
8181 return Ok(Type::from_epoch(epoch));
8182 }
8183 }
8184 }
8185
8186 // If there are flushable, almost full or blocked streams, use the
8187 // Application epoch.
8188 let send_path = self.paths.get(send_pid)?;
8189 if (self.is_established() || self.is_in_early_data()) &&
8190 (self.should_send_handshake_done() ||
8191 self.flow_control.should_update_max_data() ||
8192 self.should_send_max_data ||
8193 self.blocked_limit.is_some() ||
8194 self.streams_blocked_bidi_state
8195 .has_pending_stream_blocked_frame() ||
8196 self.streams_blocked_uni_state
8197 .has_pending_stream_blocked_frame() ||
8198 self.dgram_send_queue.has_pending() ||
8199 self.local_error
8200 .as_ref()
8201 .is_some_and(|conn_err| conn_err.is_app) ||
8202 self.should_send_max_streams_bidi ||
8203 self.streams.should_update_max_streams_bidi() ||
8204 self.should_send_max_streams_uni ||
8205 self.streams.should_update_max_streams_uni() ||
8206 self.streams.has_flushable() ||
8207 self.streams.has_almost_full() ||
8208 self.streams.has_blocked() ||
8209 self.streams.has_reset() ||
8210 self.streams.has_stopped() ||
8211 self.ids.has_new_scids() ||
8212 self.ids.has_retire_dcids() ||
8213 send_path
8214 .pmtud
8215 .as_ref()
8216 .is_some_and(|pmtud| pmtud.should_probe()) ||
8217 send_path.needs_ack_eliciting ||
8218 send_path.probing_required())
8219 {
8220 // Only clients can send 0-RTT packets.
8221 if !self.is_server && self.is_in_early_data() {
8222 return Ok(Type::ZeroRTT);
8223 }
8224
8225 return Ok(Type::Short);
8226 }
8227
8228 Err(Error::Done)
8229 }
8230
8231 /// Returns the mutable stream with the given ID if it exists, or creates
8232 /// a new one otherwise.
8233 fn get_or_create_stream(
8234 &mut self, id: u64, local: bool,
8235 ) -> Result<&mut stream::Stream<F>> {
8236 self.streams.get_or_create(
8237 id,
8238 &self.local_transport_params,
8239 &self.peer_transport_params,
8240 local,
8241 self.is_server,
8242 )
8243 }
8244
8245 /// Processes an incoming frame.
8246 fn process_frame(
8247 &mut self, frame: frame::Frame, hdr: &Header, recv_path_id: usize,
8248 epoch: packet::Epoch, now: Instant,
8249 ) -> Result<()> {
8250 trace!("{} rx frm {:?}", self.trace_id, frame);
8251
8252 match frame {
8253 frame::Frame::Padding { .. } => (),
8254
8255 frame::Frame::Ping { .. } => (),
8256
8257 frame::Frame::ACK {
8258 ranges, ack_delay, ..
8259 } => {
8260 let ack_delay = ack_delay
8261 .checked_mul(2_u64.pow(
8262 self.peer_transport_params.ack_delay_exponent as u32,
8263 ))
8264 .ok_or(Error::InvalidFrame)?;
8265
8266 if epoch == packet::Epoch::Handshake ||
8267 (epoch == packet::Epoch::Application &&
8268 self.is_established())
8269 {
8270 self.peer_verified_initial_address = true;
8271 }
8272
8273 let handshake_status = self.handshake_status();
8274
8275 let is_app_limited = self.delivery_rate_check_if_app_limited();
8276
8277 let largest_acked = ranges.last().expect(
8278 "ACK frames should always have at least one ack range",
8279 );
8280
8281 for (_, p) in self.paths.iter_mut() {
8282 if self.pkt_num_spaces[epoch]
8283 .largest_tx_pkt_num
8284 .is_some_and(|largest_sent| largest_sent < largest_acked)
8285 {
8286 // https://www.rfc-editor.org/rfc/rfc9000#section-13.1
8287 // An endpoint SHOULD treat receipt of an acknowledgment
8288 // for a packet it did not send as
8289 // a connection error of type PROTOCOL_VIOLATION
8290 return Err(Error::InvalidAckRange);
8291 }
8292
8293 if is_app_limited {
8294 p.recovery.delivery_rate_update_app_limited(true);
8295 }
8296
8297 let OnAckReceivedOutcome {
8298 lost_packets,
8299 lost_bytes,
8300 acked_bytes,
8301 spurious_losses,
8302 } = p.recovery.on_ack_received(
8303 &ranges,
8304 ack_delay,
8305 epoch,
8306 handshake_status,
8307 now,
8308 self.pkt_num_manager.skip_pn(),
8309 &self.trace_id,
8310 )?;
8311
8312 let skip_pn = self.pkt_num_manager.skip_pn();
8313 let largest_acked =
8314 p.recovery.get_largest_acked_on_epoch(epoch);
8315
8316 // Consider the skip_pn validated if the peer has sent an ack
8317 // for a larger pkt number.
8318 if let Some((largest_acked, skip_pn)) =
8319 largest_acked.zip(skip_pn)
8320 {
8321 if largest_acked > skip_pn {
8322 self.pkt_num_manager.set_skip_pn(None);
8323 }
8324 }
8325
8326 self.lost_count += lost_packets;
8327 self.lost_bytes += lost_bytes as u64;
8328 self.acked_bytes += acked_bytes as u64;
8329 self.spurious_lost_count += spurious_losses;
8330 }
8331 },
8332
8333 frame::Frame::ResetStream {
8334 stream_id,
8335 error_code,
8336 final_size,
8337 } => {
8338 // Peer can't send on our unidirectional streams.
8339 if !stream::is_bidi(stream_id) &&
8340 stream::is_local(stream_id, self.is_server)
8341 {
8342 return Err(Error::InvalidStreamState(stream_id));
8343 }
8344
8345 let max_rx_data_left = self.max_rx_data() - self.rx_data;
8346
8347 // Get existing stream or create a new one, but if the stream
8348 // has already been closed and collected, ignore the frame.
8349 //
8350 // This can happen if e.g. an ACK frame is lost, and the peer
8351 // retransmits another frame before it realizes that the stream
8352 // is gone.
8353 //
8354 // Note that it makes it impossible to check if the frame is
8355 // illegal, since we have no state, but since we ignore the
8356 // frame, it should be fine.
8357 let stream = match self.get_or_create_stream(stream_id, false) {
8358 Ok(v) => v,
8359
8360 Err(Error::Done) => return Ok(()),
8361
8362 Err(e) => return Err(e),
8363 };
8364
8365 let was_readable = stream.is_readable();
8366 let priority_key = Arc::clone(&stream.priority_key);
8367
8368 let stream::RecvBufResetReturn {
8369 max_data_delta,
8370 consumed_flowcontrol,
8371 } = stream.recv.reset(error_code, final_size)?;
8372
8373 if max_data_delta > max_rx_data_left {
8374 return Err(Error::FlowControl);
8375 }
8376
8377 if !was_readable && stream.is_readable() {
8378 self.streams.insert_readable(&priority_key);
8379 }
8380
8381 self.rx_data += max_data_delta;
8382 // We dropped the receive buffer, return connection level
8383 // flow-control
8384 self.flow_control.add_consumed(consumed_flowcontrol);
8385
8386 self.reset_stream_remote_count =
8387 self.reset_stream_remote_count.saturating_add(1);
8388 },
8389
8390 frame::Frame::StopSending {
8391 stream_id,
8392 error_code,
8393 } => {
8394 // STOP_SENDING on a receive-only stream is a fatal error.
8395 if !stream::is_local(stream_id, self.is_server) &&
8396 !stream::is_bidi(stream_id)
8397 {
8398 return Err(Error::InvalidStreamState(stream_id));
8399 }
8400
8401 // Get existing stream or create a new one, but if the stream
8402 // has already been closed and collected, ignore the frame.
8403 //
8404 // This can happen if e.g. an ACK frame is lost, and the peer
8405 // retransmits another frame before it realizes that the stream
8406 // is gone.
8407 //
8408 // Note that it makes it impossible to check if the frame is
8409 // illegal, since we have no state, but since we ignore the
8410 // frame, it should be fine.
8411 let stream = match self.get_or_create_stream(stream_id, false) {
8412 Ok(v) => v,
8413
8414 Err(Error::Done) => return Ok(()),
8415
8416 Err(e) => return Err(e),
8417 };
8418
8419 let was_writable = stream.is_writable();
8420
8421 let priority_key = Arc::clone(&stream.priority_key);
8422
8423 // Try stopping the stream.
8424 if let Ok((final_size, unsent)) = stream.send.stop(error_code) {
8425 // Claw back some flow control allowance from data that was
8426 // buffered but not actually sent before the stream was
8427 // reset.
8428 //
8429 // Note that `tx_cap` will be updated later on, so no need
8430 // to touch it here.
8431 self.tx_data = self.tx_data.saturating_sub(unsent);
8432
8433 self.tx_buffered =
8434 self.tx_buffered.saturating_sub(unsent as usize);
8435
8436 // These drops in qlog are a bit weird, but the only way to
8437 // ensure that all bytes that are moved from App to Transport
8438 // in stream_do_send are eventually moved from Transport to
8439 // Dropped. Ideally we would add a Transport to Network
8440 // transition also as a way to indicate when bytes were
8441 // transmitted vs dropped without ever being sent.
8442 qlog_with_type!(QLOG_DATA_MV, self.qlog, q, {
8443 let ev_data = EventData::QuicStreamDataMoved(
8444 qlog::events::quic::StreamDataMoved {
8445 stream_id: Some(stream_id),
8446 offset: Some(final_size),
8447 raw: Some(RawInfo {
8448 length: Some(unsent),
8449 ..Default::default()
8450 }),
8451 from: Some(DataRecipient::Transport),
8452 to: Some(DataRecipient::Dropped),
8453 ..Default::default()
8454 },
8455 );
8456
8457 q.add_event_data_with_instant(ev_data, now).ok();
8458 });
8459
8460 self.streams.insert_reset(stream_id, error_code, final_size);
8461
8462 if !was_writable {
8463 self.streams.insert_writable(&priority_key);
8464 }
8465
8466 self.stopped_stream_remote_count =
8467 self.stopped_stream_remote_count.saturating_add(1);
8468 self.reset_stream_local_count =
8469 self.reset_stream_local_count.saturating_add(1);
8470 }
8471 },
8472
8473 frame::Frame::Crypto { data } => {
8474 if data.max_off() >= MAX_CRYPTO_STREAM_OFFSET {
8475 return Err(Error::CryptoBufferExceeded);
8476 }
8477
8478 // Push the data to the stream so it can be re-ordered.
8479 self.crypto_ctx[epoch].crypto_stream.recv.write(data)?;
8480
8481 // Feed crypto data to the TLS state, if there's data
8482 // available at the expected offset.
8483 let mut crypto_buf = [0; 512];
8484
8485 let level = crypto::Level::from_epoch(epoch);
8486
8487 let stream = &mut self.crypto_ctx[epoch].crypto_stream;
8488
8489 while let Ok((read, _)) = stream.recv.emit(&mut crypto_buf) {
8490 let recv_buf = &crypto_buf[..read];
8491 self.handshake.provide_data(level, recv_buf)?;
8492 }
8493
8494 self.do_handshake(now)?;
8495 },
8496
8497 frame::Frame::CryptoHeader { .. } => unreachable!(),
8498
8499 // TODO: implement stateless retry
8500 frame::Frame::NewToken { .. } =>
8501 if self.is_server {
8502 return Err(Error::InvalidPacket);
8503 },
8504
8505 frame::Frame::Stream { stream_id, data } => {
8506 // Peer can't send on our unidirectional streams.
8507 if !stream::is_bidi(stream_id) &&
8508 stream::is_local(stream_id, self.is_server)
8509 {
8510 return Err(Error::InvalidStreamState(stream_id));
8511 }
8512
8513 let max_rx_data_left = self.max_rx_data() - self.rx_data;
8514
8515 // Get existing stream or create a new one, but if the stream
8516 // has already been closed and collected, ignore the frame.
8517 //
8518 // This can happen if e.g. an ACK frame is lost, and the peer
8519 // retransmits another frame before it realizes that the stream
8520 // is gone.
8521 //
8522 // Note that it makes it impossible to check if the frame is
8523 // illegal, since we have no state, but since we ignore the
8524 // frame, it should be fine.
8525 let stream = match self.get_or_create_stream(stream_id, false) {
8526 Ok(v) => v,
8527
8528 Err(Error::Done) => return Ok(()),
8529
8530 Err(e) => return Err(e),
8531 };
8532
8533 // Check for the connection-level flow control limit.
8534 let max_off_delta =
8535 data.max_off().saturating_sub(stream.recv.max_off());
8536
8537 if max_off_delta > max_rx_data_left {
8538 return Err(Error::FlowControl);
8539 }
8540
8541 let was_readable = stream.is_readable();
8542 let priority_key = Arc::clone(&stream.priority_key);
8543
8544 let was_draining = stream.recv.is_draining();
8545
8546 stream.recv.write(data)?;
8547
8548 if !was_readable && stream.is_readable() {
8549 self.streams.insert_readable(&priority_key);
8550 }
8551
8552 self.rx_data += max_off_delta;
8553
8554 if was_draining {
8555 // When a stream is in draining state it will not queue
8556 // incoming data for the application to read, so consider
8557 // the received data as consumed, which might trigger a flow
8558 // control update.
8559 self.flow_control.add_consumed(max_off_delta);
8560 }
8561 },
8562
8563 frame::Frame::StreamHeader { .. } => unreachable!(),
8564
8565 frame::Frame::MaxData { max } => {
8566 self.max_tx_data = cmp::max(self.max_tx_data, max);
8567 },
8568
8569 frame::Frame::MaxStreamData { stream_id, max } => {
8570 // Peer can't receive on its own unidirectional streams.
8571 if !stream::is_bidi(stream_id) &&
8572 !stream::is_local(stream_id, self.is_server)
8573 {
8574 return Err(Error::InvalidStreamState(stream_id));
8575 }
8576
8577 // Get existing stream or create a new one, but if the stream
8578 // has already been closed and collected, ignore the frame.
8579 //
8580 // This can happen if e.g. an ACK frame is lost, and the peer
8581 // retransmits another frame before it realizes that the stream
8582 // is gone.
8583 //
8584 // Note that it makes it impossible to check if the frame is
8585 // illegal, since we have no state, but since we ignore the
8586 // frame, it should be fine.
8587 let stream = match self.get_or_create_stream(stream_id, false) {
8588 Ok(v) => v,
8589
8590 Err(Error::Done) => return Ok(()),
8591
8592 Err(e) => return Err(e),
8593 };
8594
8595 let was_flushable = stream.is_flushable();
8596
8597 stream.send.update_max_data(max);
8598
8599 let writable = stream.is_writable();
8600
8601 let priority_key = Arc::clone(&stream.priority_key);
8602
8603 // If the stream is now flushable push it to the flushable queue,
8604 // but only if it wasn't already queued.
8605 if stream.is_flushable() && !was_flushable {
8606 let priority_key = Arc::clone(&stream.priority_key);
8607 self.streams.insert_flushable(&priority_key);
8608 }
8609
8610 if writable {
8611 self.streams.insert_writable(&priority_key);
8612 }
8613 },
8614
8615 frame::Frame::MaxStreamsBidi { max } => {
8616 if max > MAX_STREAM_ID {
8617 return Err(Error::InvalidFrame);
8618 }
8619
8620 self.streams.update_peer_max_streams_bidi(max);
8621 },
8622
8623 frame::Frame::MaxStreamsUni { max } => {
8624 if max > MAX_STREAM_ID {
8625 return Err(Error::InvalidFrame);
8626 }
8627
8628 self.streams.update_peer_max_streams_uni(max);
8629 },
8630
8631 frame::Frame::DataBlocked { .. } => {
8632 self.data_blocked_recv_count =
8633 self.data_blocked_recv_count.saturating_add(1);
8634 },
8635
8636 frame::Frame::StreamDataBlocked { .. } => {
8637 self.stream_data_blocked_recv_count =
8638 self.stream_data_blocked_recv_count.saturating_add(1);
8639 },
8640
8641 frame::Frame::StreamsBlockedBidi { limit } => {
8642 if limit > MAX_STREAM_ID {
8643 return Err(Error::InvalidFrame);
8644 }
8645
8646 self.streams_blocked_bidi_recv_count =
8647 self.streams_blocked_bidi_recv_count.saturating_add(1);
8648 },
8649
8650 frame::Frame::StreamsBlockedUni { limit } => {
8651 if limit > MAX_STREAM_ID {
8652 return Err(Error::InvalidFrame);
8653 }
8654
8655 self.streams_blocked_uni_recv_count =
8656 self.streams_blocked_uni_recv_count.saturating_add(1);
8657 },
8658
8659 frame::Frame::NewConnectionId {
8660 seq_num,
8661 retire_prior_to,
8662 conn_id,
8663 reset_token,
8664 } => {
8665 if self.ids.zero_length_dcid() {
8666 return Err(Error::InvalidState);
8667 }
8668
8669 let mut retired_path_ids = SmallVec::new();
8670
8671 // Retire pending path IDs before propagating the error code to
8672 // make sure retired connection IDs are not in use anymore.
8673 let new_dcid_res = self.ids.new_dcid(
8674 conn_id.into(),
8675 seq_num,
8676 u128::from_be_bytes(reset_token),
8677 retire_prior_to,
8678 &mut retired_path_ids,
8679 );
8680
8681 for (dcid_seq, pid) in retired_path_ids {
8682 let path = self.paths.get_mut(pid)?;
8683
8684 // Maybe the path already switched to another DCID.
8685 if path.active_dcid_seq != Some(dcid_seq) {
8686 continue;
8687 }
8688
8689 if let Some(new_dcid_seq) =
8690 self.ids.lowest_available_dcid_seq()
8691 {
8692 path.active_dcid_seq = Some(new_dcid_seq);
8693
8694 self.ids.link_dcid_to_path_id(new_dcid_seq, pid)?;
8695
8696 trace!(
8697 "{} path ID {} changed DCID: old seq num {} new seq num {}",
8698 self.trace_id, pid, dcid_seq, new_dcid_seq,
8699 );
8700 } else {
8701 // We cannot use this path anymore for now.
8702 path.active_dcid_seq = None;
8703
8704 trace!(
8705 "{} path ID {} cannot be used; DCID seq num {} has been retired",
8706 self.trace_id, pid, dcid_seq,
8707 );
8708 }
8709 }
8710
8711 // Propagate error (if any) now...
8712 new_dcid_res?;
8713 },
8714
8715 frame::Frame::RetireConnectionId { seq_num } => {
8716 if self.ids.zero_length_scid() {
8717 return Err(Error::InvalidState);
8718 }
8719
8720 if let Some(pid) = self.ids.retire_scid(seq_num, &hdr.dcid)? {
8721 let path = self.paths.get_mut(pid)?;
8722
8723 // Maybe we already linked a new SCID to that path.
8724 if path.active_scid_seq == Some(seq_num) {
8725 // XXX: We do not remove unused paths now, we instead
8726 // wait until we need to maintain more paths than the
8727 // host is willing to.
8728 path.active_scid_seq = None;
8729 }
8730 }
8731 },
8732
8733 frame::Frame::PathChallenge { data } => {
8734 self.path_challenge_rx_count += 1;
8735
8736 self.paths
8737 .get_mut(recv_path_id)?
8738 .on_challenge_received(data);
8739 },
8740
8741 frame::Frame::PathResponse { data } => {
8742 self.paths.on_response_received(data)?;
8743 },
8744
8745 frame::Frame::ConnectionClose {
8746 error_code, reason, ..
8747 } => {
8748 self.peer_error = Some(ConnectionError {
8749 is_app: false,
8750 error_code,
8751 reason,
8752 });
8753
8754 let path = self.paths.get_active()?;
8755 self.draining_timer = Some(now + (path.recovery.pto() * 3));
8756 },
8757
8758 frame::Frame::ApplicationClose { error_code, reason } => {
8759 self.peer_error = Some(ConnectionError {
8760 is_app: true,
8761 error_code,
8762 reason,
8763 });
8764
8765 let path = self.paths.get_active()?;
8766 self.draining_timer = Some(now + (path.recovery.pto() * 3));
8767 },
8768
8769 frame::Frame::HandshakeDone => {
8770 if self.is_server {
8771 return Err(Error::InvalidPacket);
8772 }
8773
8774 self.peer_verified_initial_address = true;
8775
8776 self.handshake_confirmed = true;
8777
8778 // Once the handshake is confirmed, we can drop Handshake keys.
8779 self.drop_epoch_state(packet::Epoch::Handshake, now);
8780 },
8781
8782 frame::Frame::Datagram { data } => {
8783 // Close the connection if DATAGRAMs are not enabled.
8784 // quiche always advertises support for 64K sized DATAGRAM
8785 // frames, as recommended by the standard, so we don't need a
8786 // size check.
8787 if !self.dgram_enabled() {
8788 return Err(Error::InvalidState);
8789 }
8790
8791 // If recv queue is full, discard oldest
8792 if self.dgram_recv_queue.is_full() {
8793 self.dgram_recv_queue.pop();
8794 }
8795
8796 self.dgram_recv_queue.push(data.into())?;
8797
8798 self.dgram_recv_count = self.dgram_recv_count.saturating_add(1);
8799
8800 let path = self.paths.get_mut(recv_path_id)?;
8801 path.dgram_recv_count = path.dgram_recv_count.saturating_add(1);
8802 },
8803
8804 frame::Frame::DatagramHeader { .. } => unreachable!(),
8805 }
8806
8807 Ok(())
8808 }
8809
8810 /// Drops the keys and recovery state for the given epoch.
8811 fn drop_epoch_state(&mut self, epoch: packet::Epoch, now: Instant) {
8812 let crypto_ctx = &mut self.crypto_ctx[epoch];
8813 if crypto_ctx.crypto_open.is_none() {
8814 return;
8815 }
8816 crypto_ctx.clear();
8817 self.pkt_num_spaces[epoch].clear();
8818
8819 let handshake_status = self.handshake_status();
8820 for (_, p) in self.paths.iter_mut() {
8821 p.recovery
8822 .on_pkt_num_space_discarded(epoch, handshake_status, now);
8823 }
8824
8825 trace!("{} dropped epoch {} state", self.trace_id, epoch);
8826 }
8827
8828 /// Returns the connection level flow control limit.
8829 fn max_rx_data(&self) -> u64 {
8830 self.flow_control.max_data()
8831 }
8832
8833 /// Returns true if the HANDSHAKE_DONE frame needs to be sent.
8834 fn should_send_handshake_done(&self) -> bool {
8835 self.is_established() && !self.handshake_done_sent && self.is_server
8836 }
8837
8838 /// Returns the idle timeout value.
8839 ///
8840 /// `None` is returned if both end-points disabled the idle timeout.
8841 fn idle_timeout(&self) -> Option<Duration> {
8842 // If the transport parameter is set to 0, then the respective endpoint
8843 // decided to disable the idle timeout. If both are disabled we should
8844 // not set any timeout.
8845 if self.local_transport_params.max_idle_timeout == 0 &&
8846 self.peer_transport_params.max_idle_timeout == 0
8847 {
8848 return None;
8849 }
8850
8851 // If the local endpoint or the peer disabled the idle timeout, use the
8852 // other peer's value, otherwise use the minimum of the two values.
8853 let idle_timeout = if self.local_transport_params.max_idle_timeout == 0 {
8854 self.peer_transport_params.max_idle_timeout
8855 } else if self.peer_transport_params.max_idle_timeout == 0 {
8856 self.local_transport_params.max_idle_timeout
8857 } else {
8858 cmp::min(
8859 self.local_transport_params.max_idle_timeout,
8860 self.peer_transport_params.max_idle_timeout,
8861 )
8862 };
8863
8864 let path_pto = match self.paths.get_active() {
8865 Ok(p) => p.recovery.pto(),
8866 Err(_) => Duration::ZERO,
8867 };
8868
8869 let idle_timeout = Duration::from_millis(idle_timeout);
8870 let idle_timeout = cmp::max(idle_timeout, 3 * path_pto);
8871
8872 Some(idle_timeout)
8873 }
8874
8875 /// Returns the connection's handshake status for use in loss recovery.
8876 fn handshake_status(&self) -> recovery::HandshakeStatus {
8877 recovery::HandshakeStatus {
8878 has_handshake_keys: self.crypto_ctx[packet::Epoch::Handshake]
8879 .has_keys(),
8880
8881 peer_verified_address: self.peer_verified_initial_address,
8882
8883 completed: self.is_established(),
8884 }
8885 }
8886
8887 /// Updates send capacity.
8888 fn update_tx_cap(&mut self) {
8889 let cwin_available = match self.paths.get_active() {
8890 Ok(p) => p.recovery.cwnd_available() as u64,
8891 Err(_) => 0,
8892 };
8893
8894 let cap =
8895 cmp::min(cwin_available, self.max_tx_data - self.tx_data) as usize;
8896 self.tx_cap = (cap as f64 * self.tx_cap_factor).ceil() as usize;
8897 }
8898
8899 fn delivery_rate_check_if_app_limited(&self) -> bool {
8900 // Enter the app-limited phase of delivery rate when these conditions
8901 // are met:
8902 //
8903 // - The remaining capacity is higher than available bytes in cwnd (there
8904 // is more room to send).
8905 // - New data since the last send() is smaller than available bytes in
8906 // cwnd (we queued less than what we can send).
8907 // - There is room to send more data in cwnd.
8908 //
8909 // In application-limited phases the transmission rate is limited by the
8910 // application rather than the congestion control algorithm.
8911 //
8912 // Note that this is equivalent to CheckIfApplicationLimited() from the
8913 // delivery rate draft. This is also separate from `recovery.app_limited`
8914 // and only applies to delivery rate calculation.
8915 let cwin_available = self
8916 .paths
8917 .iter()
8918 .filter(|&(_, p)| p.active())
8919 .map(|(_, p)| p.recovery.cwnd_available())
8920 .sum();
8921
8922 ((self.tx_buffered + self.dgram_send_queue_byte_size()) < cwin_available) &&
8923 (self.tx_data.saturating_sub(self.last_tx_data)) <
8924 cwin_available as u64 &&
8925 cwin_available > 0
8926 }
8927
8928 fn check_tx_buffered_invariant(&mut self) {
8929 // tx_buffered should track bytes queued in the stream buffers
8930 // and unacked retransmitable bytes in the network.
8931 // If tx_buffered > 0 mark the tx_buffered_state if there are no
8932 // flushable streams and there no inflight bytes.
8933 //
8934 // It is normal to have tx_buffered == 0 while there are inflight bytes
8935 // since not QUIC frames are retransmittable; inflight tracks all bytes
8936 // on the network which are subject to congestion control.
8937 if self.tx_buffered > 0 &&
8938 !self.streams.has_flushable() &&
8939 !self
8940 .paths
8941 .iter()
8942 .any(|(_, p)| p.recovery.bytes_in_flight() > 0)
8943 {
8944 self.tx_buffered_state = TxBufferTrackingState::Inconsistent;
8945 }
8946 }
8947
8948 fn set_initial_dcid(
8949 &mut self, cid: ConnectionId<'static>, reset_token: Option<u128>,
8950 path_id: usize,
8951 ) -> Result<()> {
8952 self.ids.set_initial_dcid(cid, reset_token, Some(path_id));
8953 self.paths.get_mut(path_id)?.active_dcid_seq = Some(0);
8954
8955 Ok(())
8956 }
8957
8958 /// Selects the path that the incoming packet belongs to, or creates a new
8959 /// one if no existing path matches.
8960 fn get_or_create_recv_path_id(
8961 &mut self, recv_pid: Option<usize>, dcid: &ConnectionId, buf_len: usize,
8962 info: &RecvInfo,
8963 ) -> Result<usize> {
8964 let ids = &mut self.ids;
8965
8966 let (in_scid_seq, mut in_scid_pid) =
8967 ids.find_scid_seq(dcid).ok_or(Error::InvalidState)?;
8968
8969 if let Some(recv_pid) = recv_pid {
8970 // If the path observes a change of SCID used, note it.
8971 let recv_path = self.paths.get_mut(recv_pid)?;
8972
8973 let cid_entry =
8974 recv_path.active_scid_seq.and_then(|v| ids.get_scid(v).ok());
8975
8976 if cid_entry.map(|e| &e.cid) != Some(dcid) {
8977 let incoming_cid_entry = ids.get_scid(in_scid_seq)?;
8978
8979 let prev_recv_pid =
8980 incoming_cid_entry.path_id.unwrap_or(recv_pid);
8981
8982 if prev_recv_pid != recv_pid {
8983 trace!(
8984 "{} peer reused CID {:?} from path {} on path {}",
8985 self.trace_id,
8986 dcid,
8987 prev_recv_pid,
8988 recv_pid
8989 );
8990
8991 // TODO: reset congestion control.
8992 }
8993
8994 trace!(
8995 "{} path ID {} now see SCID with seq num {}",
8996 self.trace_id,
8997 recv_pid,
8998 in_scid_seq
8999 );
9000
9001 recv_path.active_scid_seq = Some(in_scid_seq);
9002 ids.link_scid_to_path_id(in_scid_seq, recv_pid)?;
9003 }
9004
9005 return Ok(recv_pid);
9006 }
9007
9008 // This is a new 4-tuple. See if the CID has not been assigned on
9009 // another path.
9010
9011 // Ignore this step if are using zero-length SCID.
9012 if ids.zero_length_scid() {
9013 in_scid_pid = None;
9014 }
9015
9016 if let Some(in_scid_pid) = in_scid_pid {
9017 // This CID has been used by another path. If we have the
9018 // room to do so, create a new `Path` structure holding this
9019 // new 4-tuple. Otherwise, drop the packet.
9020 let old_path = self.paths.get_mut(in_scid_pid)?;
9021 let old_local_addr = old_path.local_addr();
9022 let old_peer_addr = old_path.peer_addr();
9023
9024 trace!(
9025 "{} reused CID seq {} of ({},{}) (path {}) on ({},{})",
9026 self.trace_id,
9027 in_scid_seq,
9028 old_local_addr,
9029 old_peer_addr,
9030 in_scid_pid,
9031 info.to,
9032 info.from
9033 );
9034
9035 // Notify the application.
9036 self.paths.notify_event(PathEvent::ReusedSourceConnectionId(
9037 in_scid_seq,
9038 (old_local_addr, old_peer_addr),
9039 (info.to, info.from),
9040 ));
9041 }
9042
9043 // This is a new path using an unassigned CID; create it!
9044 let mut path = path::Path::new(
9045 info.to,
9046 info.from,
9047 &self.recovery_config,
9048 self.path_challenge_recv_max_queue_len,
9049 false,
9050 None,
9051 );
9052
9053 path.max_send_bytes = buf_len * self.max_amplification_factor;
9054 path.active_scid_seq = Some(in_scid_seq);
9055
9056 // Automatically probes the new path.
9057 path.request_validation();
9058
9059 let pid = self.paths.insert_path(path, self.is_server)?;
9060
9061 // Do not record path reuse.
9062 if in_scid_pid.is_none() {
9063 ids.link_scid_to_path_id(in_scid_seq, pid)?;
9064 }
9065
9066 Ok(pid)
9067 }
9068
9069 /// Selects the path on which the next packet must be sent.
9070 fn get_send_path_id(
9071 &self, from: Option<SocketAddr>, to: Option<SocketAddr>,
9072 ) -> Result<usize> {
9073 // A probing packet must be sent, but only if the connection is fully
9074 // established.
9075 if self.is_established() {
9076 let mut probing = self
9077 .paths
9078 .iter()
9079 .filter(|(_, p)| from.is_none() || Some(p.local_addr()) == from)
9080 .filter(|(_, p)| to.is_none() || Some(p.peer_addr()) == to)
9081 .filter(|(_, p)| p.active_dcid_seq.is_some())
9082 .filter(|(_, p)| p.probing_required())
9083 .map(|(pid, _)| pid);
9084
9085 if let Some(pid) = probing.next() {
9086 return Ok(pid);
9087 }
9088 }
9089
9090 if let Some((pid, p)) = self.paths.get_active_with_pid() {
9091 if from.is_some() && Some(p.local_addr()) != from {
9092 return Err(Error::Done);
9093 }
9094
9095 if to.is_some() && Some(p.peer_addr()) != to {
9096 return Err(Error::Done);
9097 }
9098
9099 return Ok(pid);
9100 };
9101
9102 Err(Error::InvalidState)
9103 }
9104
9105 /// Sets the path with identifier 'path_id' to be active.
9106 fn set_active_path(&mut self, path_id: usize, now: Instant) -> Result<()> {
9107 if let Ok(old_active_path) = self.paths.get_active_mut() {
9108 for &e in packet::Epoch::epochs(
9109 packet::Epoch::Initial..=packet::Epoch::Application,
9110 ) {
9111 let (lost_packets, lost_bytes) = old_active_path
9112 .recovery
9113 .on_path_change(e, now, &self.trace_id);
9114
9115 self.lost_count += lost_packets;
9116 self.lost_bytes += lost_bytes as u64;
9117 }
9118 }
9119
9120 self.paths.set_active_path(path_id)
9121 }
9122
9123 /// Handles potential connection migration.
9124 fn on_peer_migrated(
9125 &mut self, new_pid: usize, disable_dcid_reuse: bool, now: Instant,
9126 ) -> Result<()> {
9127 let active_path_id = self.paths.get_active_path_id()?;
9128
9129 if active_path_id == new_pid {
9130 return Ok(());
9131 }
9132
9133 self.set_active_path(new_pid, now)?;
9134
9135 let no_spare_dcid =
9136 self.paths.get_mut(new_pid)?.active_dcid_seq.is_none();
9137
9138 if no_spare_dcid && !disable_dcid_reuse {
9139 self.paths.get_mut(new_pid)?.active_dcid_seq =
9140 self.paths.get_mut(active_path_id)?.active_dcid_seq;
9141 }
9142
9143 Ok(())
9144 }
9145
9146 /// Creates a new client-side path.
9147 fn create_path_on_client(
9148 &mut self, local_addr: SocketAddr, peer_addr: SocketAddr,
9149 ) -> Result<usize> {
9150 if self.is_server {
9151 return Err(Error::InvalidState);
9152 }
9153
9154 // If we use zero-length SCID and go over our local active CID limit,
9155 // the `insert_path()` call will raise an error.
9156 if !self.ids.zero_length_scid() && self.ids.available_scids() == 0 {
9157 return Err(Error::OutOfIdentifiers);
9158 }
9159
9160 // Do we have a spare DCID? If we are using zero-length DCID, just use
9161 // the default having sequence 0 (note that if we exceed our local CID
9162 // limit, the `insert_path()` call will raise an error.
9163 let dcid_seq = if self.ids.zero_length_dcid() {
9164 0
9165 } else {
9166 self.ids
9167 .lowest_available_dcid_seq()
9168 .ok_or(Error::OutOfIdentifiers)?
9169 };
9170
9171 let mut path = path::Path::new(
9172 local_addr,
9173 peer_addr,
9174 &self.recovery_config,
9175 self.path_challenge_recv_max_queue_len,
9176 false,
9177 None,
9178 );
9179 path.active_dcid_seq = Some(dcid_seq);
9180
9181 let pid = self
9182 .paths
9183 .insert_path(path, false)
9184 .map_err(|_| Error::OutOfIdentifiers)?;
9185 self.ids.link_dcid_to_path_id(dcid_seq, pid)?;
9186
9187 Ok(pid)
9188 }
9189
9190 // Marks the connection as closed and does any related tidyup.
9191 fn mark_closed(&mut self) {
9192 #[cfg(feature = "qlog")]
9193 {
9194 let cc = match (self.is_established(), self.timed_out, &self.peer_error, &self.local_error) {
9195 (false, _, _, _) => qlog::events::quic::ConnectionClosed {
9196 initiator: Some(TransportInitiator::Local),
9197 connection_error: None,
9198 application_error: None,
9199 error_code: None,
9200 internal_code: None,
9201 reason: Some("Failed to establish connection".to_string()),
9202 trigger: Some(qlog::events::quic::ConnectionClosedTrigger::HandshakeTimeout)
9203 },
9204
9205 (true, true, _, _) => qlog::events::quic::ConnectionClosed {
9206 initiator: Some(TransportInitiator::Local),
9207 connection_error: None,
9208 application_error: None,
9209 error_code: None,
9210 internal_code: None,
9211 reason: Some("Idle timeout".to_string()),
9212 trigger: Some(qlog::events::quic::ConnectionClosedTrigger::IdleTimeout)
9213 },
9214
9215 (true, false, Some(peer_error), None) => {
9216 let (connection_code, application_error, trigger) = if peer_error.is_app {
9217 (None, Some(qlog::events::ApplicationError::Unknown), None)
9218 } else {
9219 let trigger = if peer_error.error_code == WireErrorCode::NoError as u64 {
9220 Some(qlog::events::quic::ConnectionClosedTrigger::Clean)
9221 } else {
9222 Some(qlog::events::quic::ConnectionClosedTrigger::Error)
9223 };
9224
9225 (Some(qlog::events::ConnectionClosedEventError::TransportError(qlog::events::quic::TransportError::Unknown)), None, trigger)
9226 };
9227
9228 // TODO: select more appopriate connection_code and application_error than unknown.
9229 qlog::events::quic::ConnectionClosed {
9230 initiator: Some(TransportInitiator::Remote),
9231 connection_error: connection_code,
9232 application_error,
9233 error_code: Some(peer_error.error_code),
9234 internal_code: None,
9235 reason: Some(String::from_utf8_lossy(&peer_error.reason).to_string()),
9236 trigger,
9237 }
9238 },
9239
9240 (true, false, None, Some(local_error)) => {
9241 let (connection_code, application_error, trigger) = if local_error.is_app {
9242 (None, Some(qlog::events::ApplicationError::Unknown), None)
9243 } else {
9244 let trigger = if local_error.error_code == WireErrorCode::NoError as u64 {
9245 Some(qlog::events::quic::ConnectionClosedTrigger::Clean)
9246 } else {
9247 Some(qlog::events::quic::ConnectionClosedTrigger::Error)
9248 };
9249
9250 (Some(qlog::events::ConnectionClosedEventError::TransportError(qlog::events::quic::TransportError::Unknown)), None, trigger)
9251 };
9252
9253 // TODO: select more appopriate connection_code and application_error than unknown.
9254 qlog::events::quic::ConnectionClosed {
9255 initiator: Some(TransportInitiator::Local),
9256 connection_error: connection_code,
9257 application_error,
9258 error_code: Some(local_error.error_code),
9259 internal_code: None,
9260 reason: Some(String::from_utf8_lossy(&local_error.reason).to_string()),
9261 trigger,
9262 }
9263 },
9264
9265 _ => qlog::events::quic::ConnectionClosed {
9266 initiator: None,
9267 connection_error: None,
9268 application_error: None,
9269 error_code: None,
9270 internal_code: None,
9271 reason: None,
9272 trigger: None,
9273 },
9274 };
9275
9276 qlog_with_type!(QLOG_CONNECTION_CLOSED, self.qlog, q, {
9277 let ev_data = EventData::QuicConnectionClosed(cc);
9278
9279 q.add_event_data_now(ev_data).ok();
9280 });
9281 self.qlog.streamer = None;
9282 }
9283 self.closed = true;
9284 }
9285}
9286
9287#[cfg(feature = "boringssl-boring-crate")]
9288impl<F: BufFactory> AsMut<boring::ssl::SslRef> for Connection<F> {
9289 fn as_mut(&mut self) -> &mut boring::ssl::SslRef {
9290 self.handshake.ssl_mut()
9291 }
9292}
9293
9294/// Maps an `Error` to `Error::Done`, or itself.
9295///
9296/// When a received packet that hasn't yet been authenticated triggers a failure
9297/// it should, in most cases, be ignored, instead of raising a connection error,
9298/// to avoid potential man-in-the-middle and man-on-the-side attacks.
9299///
9300/// However, if no other packet was previously received, the connection should
9301/// indeed be closed as the received packet might just be network background
9302/// noise, and it shouldn't keep resources occupied indefinitely.
9303///
9304/// This function maps an error to `Error::Done` to ignore a packet failure
9305/// without aborting the connection, except when no other packet was previously
9306/// received, in which case the error itself is returned, but only on the
9307/// server-side as the client will already have armed the idle timer.
9308///
9309/// This must only be used for errors preceding packet authentication. Failures
9310/// happening after a packet has been authenticated should still cause the
9311/// connection to be aborted.
9312fn drop_pkt_on_err(
9313 e: Error, recv_count: usize, is_server: bool, trace_id: &str,
9314) -> Error {
9315 // On the server, if no other packet has been successfully processed, abort
9316 // the connection to avoid keeping the connection open when only junk is
9317 // received.
9318 if is_server && recv_count == 0 {
9319 return e;
9320 }
9321
9322 trace!("{trace_id} dropped invalid packet");
9323
9324 // Ignore other invalid packets that haven't been authenticated to prevent
9325 // man-in-the-middle and man-on-the-side attacks.
9326 Error::Done
9327}
9328
9329struct AddrTupleFmt(SocketAddr, SocketAddr);
9330
9331impl std::fmt::Display for AddrTupleFmt {
9332 fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
9333 let AddrTupleFmt(src, dst) = &self;
9334
9335 if src.ip().is_unspecified() || dst.ip().is_unspecified() {
9336 return Ok(());
9337 }
9338
9339 f.write_fmt(format_args!("src:{src} dst:{dst}"))
9340 }
9341}
9342
9343/// Statistics about the connection.
9344///
9345/// A connection's statistics can be collected using the [`stats()`] method.
9346///
9347/// [`stats()`]: struct.Connection.html#method.stats
9348#[derive(Clone, Default)]
9349pub struct Stats {
9350 /// The number of QUIC packets received.
9351 pub recv: usize,
9352
9353 /// The number of QUIC packets sent.
9354 pub sent: usize,
9355
9356 /// The number of QUIC packets that were lost.
9357 pub lost: usize,
9358
9359 /// The number of QUIC packets that were marked as lost but later acked.
9360 pub spurious_lost: usize,
9361
9362 /// The number of sent QUIC packets with retransmitted data.
9363 pub retrans: usize,
9364
9365 /// The number of sent bytes.
9366 pub sent_bytes: u64,
9367
9368 /// The number of received bytes.
9369 pub recv_bytes: u64,
9370
9371 /// The number of bytes sent acked.
9372 pub acked_bytes: u64,
9373
9374 /// The number of bytes sent lost.
9375 pub lost_bytes: u64,
9376
9377 /// The number of stream bytes retransmitted.
9378 pub stream_retrans_bytes: u64,
9379
9380 /// The number of DATAGRAM frames received.
9381 pub dgram_recv: usize,
9382
9383 /// The number of DATAGRAM frames sent.
9384 pub dgram_sent: usize,
9385
9386 /// The number of known paths for the connection.
9387 pub paths_count: usize,
9388
9389 /// The number of streams reset by local.
9390 pub reset_stream_count_local: u64,
9391
9392 /// The number of streams stopped by local.
9393 pub stopped_stream_count_local: u64,
9394
9395 /// The number of streams reset by remote.
9396 pub reset_stream_count_remote: u64,
9397
9398 /// The number of streams stopped by remote.
9399 pub stopped_stream_count_remote: u64,
9400
9401 /// The number of DATA_BLOCKED frames sent due to hitting the connection
9402 /// flow control limit.
9403 pub data_blocked_sent_count: u64,
9404
9405 /// The number of STREAM_DATA_BLOCKED frames sent due to a stream hitting
9406 /// the stream flow control limit.
9407 pub stream_data_blocked_sent_count: u64,
9408
9409 /// The number of DATA_BLOCKED frames received from the remote.
9410 pub data_blocked_recv_count: u64,
9411
9412 /// The number of STREAM_DATA_BLOCKED frames received from the remote.
9413 pub stream_data_blocked_recv_count: u64,
9414
9415 /// The number of STREAMS_BLOCKED frames for bidirectional streams received
9416 /// from the remote, indicating the peer is blocked on opening new
9417 /// bidirectional streams.
9418 pub streams_blocked_bidi_recv_count: u64,
9419
9420 /// The number of STREAMS_BLOCKED frames for unidirectional streams received
9421 /// from the remote, indicating the peer is blocked on opening new
9422 /// unidirectional streams.
9423 pub streams_blocked_uni_recv_count: u64,
9424
9425 /// The total number of PATH_CHALLENGE frames that were received.
9426 pub path_challenge_rx_count: u64,
9427
9428 /// The number of times send() was blocked because the anti-amplification
9429 /// budget (bytes received × max_amplification_factor) was exhausted.
9430 pub amplification_limited_count: u64,
9431
9432 /// Total duration during which this side of the connection was
9433 /// actively sending bytes or waiting for those bytes to be acked.
9434 pub bytes_in_flight_duration: Duration,
9435
9436 /// Health state of the connection's tx_buffered.
9437 pub tx_buffered_state: TxBufferTrackingState,
9438}
9439
9440impl std::fmt::Debug for Stats {
9441 #[inline]
9442 fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
9443 write!(
9444 f,
9445 "recv={} sent={} lost={} retrans={}",
9446 self.recv, self.sent, self.lost, self.retrans,
9447 )?;
9448
9449 write!(
9450 f,
9451 " sent_bytes={} recv_bytes={} lost_bytes={}",
9452 self.sent_bytes, self.recv_bytes, self.lost_bytes,
9453 )?;
9454
9455 Ok(())
9456 }
9457}
9458
9459#[doc(hidden)]
9460#[cfg(any(test, feature = "internal"))]
9461pub mod test_utils;
9462
9463#[cfg(test)]
9464mod tests;
9465
9466pub use crate::packet::ConnectionId;
9467pub use crate::packet::Header;
9468pub use crate::packet::Type;
9469
9470pub use crate::path::PathEvent;
9471pub use crate::path::PathStats;
9472pub use crate::path::SocketAddrIter;
9473
9474pub use crate::recovery::BbrBwLoReductionStrategy;
9475pub use crate::recovery::BbrParams;
9476pub use crate::recovery::CongestionControlAlgorithm;
9477pub use crate::recovery::StartupExit;
9478pub use crate::recovery::StartupExitReason;
9479
9480pub use crate::stream::StreamIter;
9481
9482pub use crate::transport_params::TransportParams;
9483pub use crate::transport_params::UnknownTransportParameter;
9484pub use crate::transport_params::UnknownTransportParameterIterator;
9485pub use crate::transport_params::UnknownTransportParameters;
9486
9487pub use crate::buffers::BufFactory;
9488pub use crate::buffers::BufSplit;
9489
9490pub use crate::error::ConnectionError;
9491pub use crate::error::Error;
9492pub use crate::error::Result;
9493pub use crate::error::WireErrorCode;
9494
9495mod buffers;
9496mod cid;
9497mod crypto;
9498mod dgram;
9499mod error;
9500#[cfg(feature = "ffi")]
9501mod ffi;
9502mod flowcontrol;
9503mod frame;
9504pub mod h3;
9505mod minmax;
9506mod packet;
9507mod path;
9508mod pmtud;
9509mod rand;
9510mod range_buf;
9511mod ranges;
9512mod recovery;
9513mod stream;
9514mod tls;
9515mod transport_params;