quiche/lib.rs
1// Copyright (C) 2018-2019, Cloudflare, Inc.
2// All rights reserved.
3//
4// Redistribution and use in source and binary forms, with or without
5// modification, are permitted provided that the following conditions are
6// met:
7//
8// * Redistributions of source code must retain the above copyright notice,
9// this list of conditions and the following disclaimer.
10//
11// * Redistributions in binary form must reproduce the above copyright
12// notice, this list of conditions and the following disclaimer in the
13// documentation and/or other materials provided with the distribution.
14//
15// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
16// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
17// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
18// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
19// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
20// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
21// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
22// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
23// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
24// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
25// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
27//! 🥧 Savoury implementation of the QUIC transport protocol and HTTP/3.
28//!
29//! [quiche] is an implementation of the QUIC transport protocol and HTTP/3 as
30//! specified by the [IETF]. It provides a low level API for processing QUIC
31//! packets and handling connection state. The application is responsible for
32//! providing I/O (e.g. sockets handling) as well as an event loop with support
33//! for timers.
34//!
35//! [quiche]: https://github.com/cloudflare/quiche/
36//! [ietf]: https://quicwg.org/
37//!
38//! ## Configuring connections
39//!
40//! The first step in establishing a QUIC connection using quiche is creating a
41//! [`Config`] object:
42//!
43//! ```
44//! let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
45//! config.set_application_protos(&[b"example-proto"]);
46//!
47//! // Additional configuration specific to application and use case...
48//! # Ok::<(), quiche::Error>(())
49//! ```
50//!
51//! The [`Config`] object controls important aspects of the QUIC connection such
52//! as QUIC version, ALPN IDs, flow control, congestion control, idle timeout
53//! and other properties or features.
54//!
55//! QUIC is a general-purpose transport protocol and there are several
56//! configuration properties where there is no reasonable default value. For
57//! example, the permitted number of concurrent streams of any particular type
58//! is dependent on the application running over QUIC, and other use-case
59//! specific concerns.
60//!
61//! quiche defaults several properties to zero, applications most likely need
62//! to set these to something else to satisfy their needs using the following:
63//!
64//! - [`set_initial_max_streams_bidi()`]
65//! - [`set_initial_max_streams_uni()`]
66//! - [`set_initial_max_data()`]
67//! - [`set_initial_max_stream_data_bidi_local()`]
68//! - [`set_initial_max_stream_data_bidi_remote()`]
69//! - [`set_initial_max_stream_data_uni()`]
70//!
71//! [`Config`] also holds TLS configuration. This can be changed by mutators on
72//! the an existing object, or by constructing a TLS context manually and
73//! creating a configuration using [`with_boring_ssl_ctx_builder()`].
74//!
75//! A configuration object can be shared among multiple connections.
76//!
77//! ### Connection setup
78//!
79//! On the client-side the [`connect()`] utility function can be used to create
80//! a new connection, while [`accept()`] is for servers:
81//!
82//! ```
83//! # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
84//! # let server_name = "quic.tech";
85//! # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
86//! # let peer = "127.0.0.1:1234".parse().unwrap();
87//! # let local = "127.0.0.1:4321".parse().unwrap();
88//! // Client connection.
89//! let conn =
90//! quiche::connect(Some(&server_name), &scid, local, peer, &mut config)?;
91//!
92//! // Server connection.
93//! # let peer = "127.0.0.1:1234".parse().unwrap();
94//! # let local = "127.0.0.1:4321".parse().unwrap();
95//! let conn = quiche::accept(&scid, None, local, peer, &mut config)?;
96//! # Ok::<(), quiche::Error>(())
97//! ```
98//!
99//! In both cases, the application is responsible for generating a new source
100//! connection ID that will be used to identify the new connection.
101//!
102//! The application also need to pass the address of the remote peer of the
103//! connection: in the case of a client that would be the address of the server
104//! it is trying to connect to, and for a server that is the address of the
105//! client that initiated the connection.
106//!
107//! ## Handling incoming packets
108//!
109//! Using the connection's [`recv()`] method the application can process
110//! incoming packets that belong to that connection from the network:
111//!
112//! ```no_run
113//! # let mut buf = [0; 512];
114//! # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
115//! # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
116//! # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
117//! # let peer = "127.0.0.1:1234".parse().unwrap();
118//! # let local = "127.0.0.1:4321".parse().unwrap();
119//! # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
120//! let to = socket.local_addr().unwrap();
121//!
122//! loop {
123//! let (read, from) = socket.recv_from(&mut buf).unwrap();
124//!
125//! let recv_info = quiche::RecvInfo { from, to };
126//!
127//! let read = match conn.recv(&mut buf[..read], recv_info) {
128//! Ok(v) => v,
129//!
130//! Err(quiche::Error::Done) => {
131//! // Done reading.
132//! break;
133//! },
134//!
135//! Err(e) => {
136//! // An error occurred, handle it.
137//! break;
138//! },
139//! };
140//! }
141//! # Ok::<(), quiche::Error>(())
142//! ```
143//!
144//! The application has to pass a [`RecvInfo`] structure in order to provide
145//! additional information about the received packet (such as the address it
146//! was received from).
147//!
148//! ## Generating outgoing packets
149//!
150//! Outgoing packet are generated using the connection's [`send()`] method
151//! instead:
152//!
153//! ```no_run
154//! # let mut out = [0; 512];
155//! # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
156//! # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
157//! # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
158//! # let peer = "127.0.0.1:1234".parse().unwrap();
159//! # let local = "127.0.0.1:4321".parse().unwrap();
160//! # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
161//! loop {
162//! let (write, send_info) = match conn.send(&mut out) {
163//! Ok(v) => v,
164//!
165//! Err(quiche::Error::Done) => {
166//! // Done writing.
167//! break;
168//! },
169//!
170//! Err(e) => {
171//! // An error occurred, handle it.
172//! break;
173//! },
174//! };
175//!
176//! socket.send_to(&out[..write], &send_info.to).unwrap();
177//! }
178//! # Ok::<(), quiche::Error>(())
179//! ```
180//!
181//! The application will be provided with a [`SendInfo`] structure providing
182//! additional information about the newly created packet (such as the address
183//! the packet should be sent to).
184//!
185//! When packets are sent, the application is responsible for maintaining a
186//! timer to react to time-based connection events. The timer expiration can be
187//! obtained using the connection's [`timeout()`] method.
188//!
189//! ```
190//! # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
191//! # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
192//! # let peer = "127.0.0.1:1234".parse().unwrap();
193//! # let local = "127.0.0.1:4321".parse().unwrap();
194//! # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
195//! let timeout = conn.timeout();
196//! # Ok::<(), quiche::Error>(())
197//! ```
198//!
199//! The application is responsible for providing a timer implementation, which
200//! can be specific to the operating system or networking framework used. When
201//! a timer expires, the connection's [`on_timeout()`] method should be called,
202//! after which additional packets might need to be sent on the network:
203//!
204//! ```no_run
205//! # let mut out = [0; 512];
206//! # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
207//! # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
208//! # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
209//! # let peer = "127.0.0.1:1234".parse().unwrap();
210//! # let local = "127.0.0.1:4321".parse().unwrap();
211//! # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
212//! // Timeout expired, handle it.
213//! conn.on_timeout();
214//!
215//! // Send more packets as needed after timeout.
216//! loop {
217//! let (write, send_info) = match conn.send(&mut out) {
218//! Ok(v) => v,
219//!
220//! Err(quiche::Error::Done) => {
221//! // Done writing.
222//! break;
223//! },
224//!
225//! Err(e) => {
226//! // An error occurred, handle it.
227//! break;
228//! },
229//! };
230//!
231//! socket.send_to(&out[..write], &send_info.to).unwrap();
232//! }
233//! # Ok::<(), quiche::Error>(())
234//! ```
235//!
236//! ### Pacing
237//!
238//! It is recommended that applications [pace] sending of outgoing packets to
239//! avoid creating packet bursts that could cause short-term congestion and
240//! losses in the network.
241//!
242//! quiche exposes pacing hints for outgoing packets through the [`at`] field
243//! of the [`SendInfo`] structure that is returned by the [`send()`] method.
244//! This field represents the time when a specific packet should be sent into
245//! the network.
246//!
247//! Applications can use these hints by artificially delaying the sending of
248//! packets through platform-specific mechanisms (such as the [`SO_TXTIME`]
249//! socket option on Linux), or custom methods (for example by using user-space
250//! timers).
251//!
252//! [pace]: https://datatracker.ietf.org/doc/html/rfc9002#section-7.7
253//! [`SO_TXTIME`]: https://man7.org/linux/man-pages/man8/tc-etf.8.html
254//!
255//! ## Sending and receiving stream data
256//!
257//! After some back and forth, the connection will complete its handshake and
258//! will be ready for sending or receiving application data.
259//!
260//! Data can be sent on a stream by using the [`stream_send()`] method:
261//!
262//! ```no_run
263//! # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
264//! # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
265//! # let peer = "127.0.0.1:1234".parse().unwrap();
266//! # let local = "127.0.0.1:4321".parse().unwrap();
267//! # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
268//! if conn.is_established() {
269//! // Handshake completed, send some data on stream 0.
270//! conn.stream_send(0, b"hello", true)?;
271//! }
272//! # Ok::<(), quiche::Error>(())
273//! ```
274//!
275//! The application can check whether there are any readable streams by using
276//! the connection's [`readable()`] method, which returns an iterator over all
277//! the streams that have outstanding data to read.
278//!
279//! The [`stream_recv()`] method can then be used to retrieve the application
280//! data from the readable stream:
281//!
282//! ```no_run
283//! # let mut buf = [0; 512];
284//! # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
285//! # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
286//! # let peer = "127.0.0.1:1234".parse().unwrap();
287//! # let local = "127.0.0.1:4321".parse().unwrap();
288//! # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
289//! if conn.is_established() {
290//! // Iterate over readable streams.
291//! for stream_id in conn.readable() {
292//! // Stream is readable, read until there's no more data.
293//! while let Ok((read, fin)) = conn.stream_recv(stream_id, &mut buf) {
294//! println!("Got {} bytes on stream {}", read, stream_id);
295//! }
296//! }
297//! }
298//! # Ok::<(), quiche::Error>(())
299//! ```
300//!
301//! ## HTTP/3
302//!
303//! The quiche [HTTP/3 module] provides a high level API for sending and
304//! receiving HTTP requests and responses on top of the QUIC transport protocol.
305//!
306//! [`Config`]: https://docs.quic.tech/quiche/struct.Config.html
307//! [`set_initial_max_streams_bidi()`]: https://docs.rs/quiche/latest/quiche/struct.Config.html#method.set_initial_max_streams_bidi
308//! [`set_initial_max_streams_uni()`]: https://docs.rs/quiche/latest/quiche/struct.Config.html#method.set_initial_max_streams_uni
309//! [`set_initial_max_data()`]: https://docs.rs/quiche/latest/quiche/struct.Config.html#method.set_initial_max_data
310//! [`set_initial_max_stream_data_bidi_local()`]: https://docs.rs/quiche/latest/quiche/struct.Config.html#method.set_initial_max_stream_data_bidi_local
311//! [`set_initial_max_stream_data_bidi_remote()`]: https://docs.rs/quiche/latest/quiche/struct.Config.html#method.set_initial_max_stream_data_bidi_remote
312//! [`set_initial_max_stream_data_uni()`]: https://docs.rs/quiche/latest/quiche/struct.Config.html#method.set_initial_max_stream_data_uni
313//! [`with_boring_ssl_ctx_builder()`]: https://docs.quic.tech/quiche/struct.Config.html#method.with_boring_ssl_ctx_builder
314//! [`connect()`]: fn.connect.html
315//! [`accept()`]: fn.accept.html
316//! [`recv()`]: struct.Connection.html#method.recv
317//! [`RecvInfo`]: struct.RecvInfo.html
318//! [`send()`]: struct.Connection.html#method.send
319//! [`SendInfo`]: struct.SendInfo.html
320//! [`at`]: struct.SendInfo.html#structfield.at
321//! [`timeout()`]: struct.Connection.html#method.timeout
322//! [`on_timeout()`]: struct.Connection.html#method.on_timeout
323//! [`stream_send()`]: struct.Connection.html#method.stream_send
324//! [`readable()`]: struct.Connection.html#method.readable
325//! [`stream_recv()`]: struct.Connection.html#method.stream_recv
326//! [HTTP/3 module]: h3/index.html
327//!
328//! ## Congestion Control
329//!
330//! The quiche library provides a high-level API for configuring which
331//! congestion control algorithm to use throughout the QUIC connection.
332//!
333//! When a QUIC connection is created, the application can optionally choose
334//! which CC algorithm to use. See [`CongestionControlAlgorithm`] for currently
335//! available congestion control algorithms.
336//!
337//! For example:
338//!
339//! ```
340//! let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION).unwrap();
341//! config.set_cc_algorithm(quiche::CongestionControlAlgorithm::Reno);
342//! ```
343//!
344//! Alternatively, you can configure the congestion control algorithm to use
345//! by its name.
346//!
347//! ```
348//! let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION).unwrap();
349//! config.set_cc_algorithm_name("reno").unwrap();
350//! ```
351//!
352//! Note that the CC algorithm should be configured before calling [`connect()`]
353//! or [`accept()`]. Otherwise the connection will use a default CC algorithm.
354//!
355//! [`CongestionControlAlgorithm`]: enum.CongestionControlAlgorithm.html
356//!
357//! ## Feature flags
358//!
359//! quiche defines a number of [feature flags] to reduce the amount of compiled
360//! code and dependencies:
361//!
362//! * `boringssl-vendored` (default): Build the vendored BoringSSL library.
363//!
364//! * `boringssl-boring-crate`: Use the BoringSSL library provided by the
365//! [boring] crate. It takes precedence over `boringssl-vendored` if both
366//! features are enabled.
367//!
368//! * `pkg-config-meta`: Generate pkg-config metadata file for libquiche.
369//!
370//! * `ffi`: Build and expose the FFI API.
371//!
372//! * `qlog`: Enable support for the [qlog] logging format.
373//!
374//! [feature flags]: https://doc.rust-lang.org/cargo/reference/manifest.html#the-features-section
375//! [boring]: https://crates.io/crates/boring
376//! [qlog]: https://datatracker.ietf.org/doc/html/draft-ietf-quic-qlog-main-schema
377
378#![allow(clippy::upper_case_acronyms)]
379#![warn(missing_docs)]
380#![warn(unused_qualifications)]
381#![cfg_attr(docsrs, feature(doc_cfg))]
382
383#[macro_use]
384extern crate log;
385
386use std::cmp;
387
388use std::collections::VecDeque;
389
390use debug_panic::debug_panic;
391
392use std::net::SocketAddr;
393
394use std::str::FromStr;
395
396use std::sync::Arc;
397
398use std::time::Duration;
399use std::time::Instant;
400
401#[cfg(feature = "qlog")]
402use qlog::events::quic::DataMovedAdditionalInfo;
403#[cfg(feature = "qlog")]
404use qlog::events::quic::QuicEventType;
405#[cfg(feature = "qlog")]
406use qlog::events::quic::TransportInitiator;
407#[cfg(feature = "qlog")]
408use qlog::events::DataRecipient;
409#[cfg(feature = "qlog")]
410use qlog::events::Event;
411#[cfg(feature = "qlog")]
412use qlog::events::EventData;
413#[cfg(feature = "qlog")]
414use qlog::events::EventImportance;
415#[cfg(feature = "qlog")]
416use qlog::events::EventType;
417#[cfg(feature = "qlog")]
418use qlog::events::RawInfo;
419
420use smallvec::SmallVec;
421
422use crate::buffers::DefaultBufFactory;
423
424use crate::recovery::OnAckReceivedOutcome;
425use crate::recovery::OnLossDetectionTimeoutOutcome;
426use crate::recovery::RecoveryOps;
427use crate::recovery::ReleaseDecision;
428
429use crate::stream::RecvAction;
430use crate::stream::StreamPriorityKey;
431
432/// The current QUIC wire version.
433pub const PROTOCOL_VERSION: u32 = PROTOCOL_VERSION_V1;
434
435/// Supported QUIC versions.
436const PROTOCOL_VERSION_V1: u32 = 0x0000_0001;
437
438/// The maximum length of a connection ID.
439pub const MAX_CONN_ID_LEN: usize = packet::MAX_CID_LEN as usize;
440
441/// The minimum length of Initial packets sent by a client.
442pub const MIN_CLIENT_INITIAL_LEN: usize = 1200;
443
444/// The default initial RTT.
445const DEFAULT_INITIAL_RTT: Duration = Duration::from_millis(333);
446
447const PAYLOAD_MIN_LEN: usize = 4;
448
449// PATH_CHALLENGE (9 bytes) + AEAD tag (16 bytes).
450const MIN_PROBING_SIZE: usize = 25;
451
452const MAX_AMPLIFICATION_FACTOR: usize = 3;
453
454// The maximum number of tracked packet number ranges that need to be acked.
455//
456// This represents more or less how many ack blocks can fit in a typical packet.
457const MAX_ACK_RANGES: usize = 68;
458
459// The highest possible stream ID allowed.
460const MAX_STREAM_ID: u64 = 1 << 60;
461
462// The default max_datagram_size used in congestion control.
463const MAX_SEND_UDP_PAYLOAD_SIZE: usize = 1200;
464
465// The default length of DATAGRAM queues.
466const DEFAULT_MAX_DGRAM_QUEUE_LEN: usize = 0;
467
468// The default length of PATH_CHALLENGE receive queue.
469const DEFAULT_MAX_PATH_CHALLENGE_RX_QUEUE_LEN: usize = 3;
470
471// The DATAGRAM standard recommends either none or 65536 as maximum DATAGRAM
472// frames size. We enforce the recommendation for forward compatibility.
473const MAX_DGRAM_FRAME_SIZE: u64 = 65536;
474
475// The length of the payload length field.
476const PAYLOAD_LENGTH_LEN: usize = 2;
477
478// The number of undecryptable that can be buffered.
479const MAX_UNDECRYPTABLE_PACKETS: usize = 10;
480
481const RESERVED_VERSION_MASK: u32 = 0xfafafafa;
482
483// The default size of the receiver connection flow control window.
484const DEFAULT_CONNECTION_WINDOW: u64 = 48 * 1024;
485
486// The maximum size of the receiver connection flow control window.
487const MAX_CONNECTION_WINDOW: u64 = 24 * 1024 * 1024;
488
489// How much larger the connection flow control window need to be larger than
490// the stream flow control window.
491const CONNECTION_WINDOW_FACTOR: f64 = 1.5;
492
493// How many probing packet timeouts do we tolerate before considering the path
494// validation as failed.
495const MAX_PROBING_TIMEOUTS: usize = 3;
496
497// The default initial congestion window size in terms of packet count.
498const DEFAULT_INITIAL_CONGESTION_WINDOW_PACKETS: usize = 10;
499
500// The maximum data offset that can be stored in a crypto stream.
501const MAX_CRYPTO_STREAM_OFFSET: u64 = 1 << 16;
502
503// The send capacity factor.
504const TX_CAP_FACTOR: f64 = 1.0;
505
506/// Ancillary information about incoming packets.
507#[derive(Clone, Copy, Debug, PartialEq, Eq)]
508pub struct RecvInfo {
509 /// The remote address the packet was received from.
510 pub from: SocketAddr,
511
512 /// The local address the packet was received on.
513 pub to: SocketAddr,
514}
515
516/// Ancillary information about outgoing packets.
517#[derive(Clone, Copy, Debug, PartialEq, Eq)]
518pub struct SendInfo {
519 /// The local address the packet should be sent from.
520 pub from: SocketAddr,
521
522 /// The remote address the packet should be sent to.
523 pub to: SocketAddr,
524
525 /// The time to send the packet out.
526 ///
527 /// See [Pacing] for more details.
528 ///
529 /// [Pacing]: index.html#pacing
530 pub at: Instant,
531}
532
533/// The side of the stream to be shut down.
534///
535/// This should be used when calling [`stream_shutdown()`].
536///
537/// [`stream_shutdown()`]: struct.Connection.html#method.stream_shutdown
538#[repr(C)]
539#[derive(PartialEq, Eq)]
540pub enum Shutdown {
541 /// Stop receiving stream data.
542 Read = 0,
543
544 /// Stop sending stream data.
545 Write = 1,
546}
547
548/// Qlog logging level.
549#[repr(C)]
550#[cfg(feature = "qlog")]
551#[cfg_attr(docsrs, doc(cfg(feature = "qlog")))]
552pub enum QlogLevel {
553 /// Logs any events of Core importance.
554 Core = 0,
555
556 /// Logs any events of Core and Base importance.
557 Base = 1,
558
559 /// Logs any events of Core, Base and Extra importance
560 Extra = 2,
561}
562
563/// Stores configuration shared between multiple connections.
564pub struct Config {
565 local_transport_params: TransportParams,
566
567 version: u32,
568
569 tls_ctx: tls::Context,
570
571 application_protos: Vec<Vec<u8>>,
572
573 grease: bool,
574
575 cc_algorithm: CongestionControlAlgorithm,
576 custom_bbr_params: Option<BbrParams>,
577 initial_congestion_window_packets: usize,
578 enable_relaxed_loss_threshold: bool,
579 enable_cubic_idle_restart_fix: bool,
580 enable_send_streams_blocked: bool,
581
582 pmtud: bool,
583 pmtud_max_probes: u8,
584
585 hystart: bool,
586
587 pacing: bool,
588 /// Send rate limit in Mbps
589 max_pacing_rate: Option<u64>,
590
591 tx_cap_factor: f64,
592
593 dgram_recv_max_queue_len: usize,
594 dgram_send_max_queue_len: usize,
595
596 path_challenge_recv_max_queue_len: usize,
597
598 max_send_udp_payload_size: usize,
599
600 max_connection_window: u64,
601 max_stream_window: u64,
602
603 max_amplification_factor: usize,
604
605 disable_dcid_reuse: bool,
606
607 track_unknown_transport_params: Option<usize>,
608
609 initial_rtt: Duration,
610
611 /// When true, uses the initial max data (for connection
612 /// and stream) as the initial flow control window.
613 use_initial_max_data_as_flow_control_win: bool,
614}
615
616// See https://quicwg.org/base-drafts/rfc9000.html#section-15
617fn is_reserved_version(version: u32) -> bool {
618 version & RESERVED_VERSION_MASK == version
619}
620
621impl Config {
622 /// Creates a config object with the given version.
623 ///
624 /// ## Examples:
625 ///
626 /// ```
627 /// let config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
628 /// # Ok::<(), quiche::Error>(())
629 /// ```
630 pub fn new(version: u32) -> Result<Config> {
631 Self::with_tls_ctx(version, tls::Context::new()?)
632 }
633
634 /// Creates a config object with the given version and
635 /// [`SslContextBuilder`].
636 ///
637 /// This is useful for applications that wish to manually configure
638 /// [`SslContextBuilder`].
639 ///
640 /// [`SslContextBuilder`]: https://docs.rs/boring/latest/boring/ssl/struct.SslContextBuilder.html
641 #[cfg(feature = "boringssl-boring-crate")]
642 #[cfg_attr(docsrs, doc(cfg(feature = "boringssl-boring-crate")))]
643 pub fn with_boring_ssl_ctx_builder(
644 version: u32, tls_ctx_builder: boring::ssl::SslContextBuilder,
645 ) -> Result<Config> {
646 Self::with_tls_ctx(version, tls::Context::from_boring(tls_ctx_builder))
647 }
648
649 fn with_tls_ctx(version: u32, tls_ctx: tls::Context) -> Result<Config> {
650 if !is_reserved_version(version) && !version_is_supported(version) {
651 return Err(Error::UnknownVersion);
652 }
653
654 Ok(Config {
655 local_transport_params: TransportParams::default(),
656 version,
657 tls_ctx,
658 application_protos: Vec::new(),
659 grease: true,
660 cc_algorithm: CongestionControlAlgorithm::CUBIC,
661 custom_bbr_params: None,
662 initial_congestion_window_packets:
663 DEFAULT_INITIAL_CONGESTION_WINDOW_PACKETS,
664 enable_relaxed_loss_threshold: false,
665 enable_cubic_idle_restart_fix: true,
666 enable_send_streams_blocked: false,
667 pmtud: false,
668 pmtud_max_probes: pmtud::MAX_PROBES_DEFAULT,
669 hystart: true,
670 pacing: true,
671 max_pacing_rate: None,
672
673 tx_cap_factor: TX_CAP_FACTOR,
674
675 dgram_recv_max_queue_len: DEFAULT_MAX_DGRAM_QUEUE_LEN,
676 dgram_send_max_queue_len: DEFAULT_MAX_DGRAM_QUEUE_LEN,
677
678 path_challenge_recv_max_queue_len:
679 DEFAULT_MAX_PATH_CHALLENGE_RX_QUEUE_LEN,
680
681 max_send_udp_payload_size: MAX_SEND_UDP_PAYLOAD_SIZE,
682
683 max_connection_window: MAX_CONNECTION_WINDOW,
684 max_stream_window: stream::MAX_STREAM_WINDOW,
685
686 max_amplification_factor: MAX_AMPLIFICATION_FACTOR,
687
688 disable_dcid_reuse: false,
689
690 track_unknown_transport_params: None,
691 initial_rtt: DEFAULT_INITIAL_RTT,
692
693 use_initial_max_data_as_flow_control_win: false,
694 })
695 }
696
697 /// Configures the given certificate chain.
698 ///
699 /// The content of `file` is parsed as a PEM-encoded leaf certificate,
700 /// followed by optional intermediate certificates.
701 ///
702 /// ## Examples:
703 ///
704 /// ```no_run
705 /// # let mut config = quiche::Config::new(0xbabababa)?;
706 /// config.load_cert_chain_from_pem_file("/path/to/cert.pem")?;
707 /// # Ok::<(), quiche::Error>(())
708 /// ```
709 pub fn load_cert_chain_from_pem_file(&mut self, file: &str) -> Result<()> {
710 self.tls_ctx.use_certificate_chain_file(file)
711 }
712
713 /// Configures the given private key.
714 ///
715 /// The content of `file` is parsed as a PEM-encoded private key.
716 ///
717 /// ## Examples:
718 ///
719 /// ```no_run
720 /// # let mut config = quiche::Config::new(0xbabababa)?;
721 /// config.load_priv_key_from_pem_file("/path/to/key.pem")?;
722 /// # Ok::<(), quiche::Error>(())
723 /// ```
724 pub fn load_priv_key_from_pem_file(&mut self, file: &str) -> Result<()> {
725 self.tls_ctx.use_privkey_file(file)
726 }
727
728 /// Specifies a file where trusted CA certificates are stored for the
729 /// purposes of certificate verification.
730 ///
731 /// The content of `file` is parsed as a PEM-encoded certificate chain.
732 ///
733 /// ## Examples:
734 ///
735 /// ```no_run
736 /// # let mut config = quiche::Config::new(0xbabababa)?;
737 /// config.load_verify_locations_from_file("/path/to/cert.pem")?;
738 /// # Ok::<(), quiche::Error>(())
739 /// ```
740 pub fn load_verify_locations_from_file(&mut self, file: &str) -> Result<()> {
741 self.tls_ctx.load_verify_locations_from_file(file)
742 }
743
744 /// Specifies a directory where trusted CA certificates are stored for the
745 /// purposes of certificate verification.
746 ///
747 /// The content of `dir` a set of PEM-encoded certificate chains.
748 ///
749 /// ## Examples:
750 ///
751 /// ```no_run
752 /// # let mut config = quiche::Config::new(0xbabababa)?;
753 /// config.load_verify_locations_from_directory("/path/to/certs")?;
754 /// # Ok::<(), quiche::Error>(())
755 /// ```
756 pub fn load_verify_locations_from_directory(
757 &mut self, dir: &str,
758 ) -> Result<()> {
759 self.tls_ctx.load_verify_locations_from_directory(dir)
760 }
761
762 /// Configures whether to verify the peer's certificate.
763 ///
764 /// This should usually be `true` for client-side connections and `false`
765 /// for server-side ones.
766 ///
767 /// Note that by default, no verification is performed.
768 ///
769 /// Also note that on the server-side, enabling verification of the peer
770 /// will trigger a certificate request and make authentication errors
771 /// fatal, but will still allow anonymous clients (i.e. clients that
772 /// don't present a certificate at all). Servers can check whether a
773 /// client presented a certificate by calling [`peer_cert()`] if they
774 /// need to.
775 ///
776 /// [`peer_cert()`]: struct.Connection.html#method.peer_cert
777 pub fn verify_peer(&mut self, verify: bool) {
778 self.tls_ctx.set_verify(verify);
779 }
780
781 /// Configures whether to do path MTU discovery.
782 ///
783 /// The default value is `false`.
784 pub fn discover_pmtu(&mut self, discover: bool) {
785 self.pmtud = discover;
786 }
787
788 /// Configures the maximum number of PMTUD probe attempts before treating
789 /// a probe size as failed.
790 ///
791 /// Defaults to 3 per [RFC 8899 Section 5.1.2](https://datatracker.ietf.org/doc/html/rfc8899#section-5.1.2).
792 /// If 0 is passed, the default value is used.
793 pub fn set_pmtud_max_probes(&mut self, max_probes: u8) {
794 self.pmtud_max_probes = max_probes;
795 }
796
797 /// Configures whether to send GREASE values.
798 ///
799 /// The default value is `true`.
800 pub fn grease(&mut self, grease: bool) {
801 self.grease = grease;
802 }
803
804 /// Enables logging of secrets.
805 ///
806 /// When logging is enabled, the [`set_keylog()`] method must be called on
807 /// the connection for its cryptographic secrets to be logged in the
808 /// [keylog] format to the specified writer.
809 ///
810 /// [`set_keylog()`]: struct.Connection.html#method.set_keylog
811 /// [keylog]: https://developer.mozilla.org/en-US/docs/Mozilla/Projects/NSS/Key_Log_Format
812 pub fn log_keys(&mut self) {
813 self.tls_ctx.enable_keylog();
814 }
815
816 /// Configures the session ticket key material.
817 ///
818 /// On the server this key will be used to encrypt and decrypt session
819 /// tickets, used to perform session resumption without server-side state.
820 ///
821 /// By default a key is generated internally, and rotated regularly, so
822 /// applications don't need to call this unless they need to use a
823 /// specific key (e.g. in order to support resumption across multiple
824 /// servers), in which case the application is also responsible for
825 /// rotating the key to provide forward secrecy.
826 pub fn set_ticket_key(&mut self, key: &[u8]) -> Result<()> {
827 self.tls_ctx.set_ticket_key(key)
828 }
829
830 /// Enables sending or receiving early data.
831 pub fn enable_early_data(&mut self) {
832 self.tls_ctx.set_early_data_enabled(true);
833 }
834
835 /// Configures the list of supported application protocols.
836 ///
837 /// On the client this configures the list of protocols to send to the
838 /// server as part of the ALPN extension.
839 ///
840 /// On the server this configures the list of supported protocols to match
841 /// against the client-supplied list.
842 ///
843 /// Applications must set a value, but no default is provided.
844 ///
845 /// ## Examples:
846 ///
847 /// ```
848 /// # let mut config = quiche::Config::new(0xbabababa)?;
849 /// config.set_application_protos(&[b"http/1.1", b"http/0.9"]);
850 /// # Ok::<(), quiche::Error>(())
851 /// ```
852 pub fn set_application_protos(
853 &mut self, protos_list: &[&[u8]],
854 ) -> Result<()> {
855 self.application_protos =
856 protos_list.iter().map(|s| s.to_vec()).collect();
857
858 self.tls_ctx.set_alpn(protos_list)
859 }
860
861 /// Configures the list of supported application protocols using wire
862 /// format.
863 ///
864 /// The list of protocols `protos` must be a series of non-empty, 8-bit
865 /// length-prefixed strings.
866 ///
867 /// See [`set_application_protos`](Self::set_application_protos) for more
868 /// background about application protocols.
869 ///
870 /// ## Examples:
871 ///
872 /// ```
873 /// # let mut config = quiche::Config::new(0xbabababa)?;
874 /// config.set_application_protos_wire_format(b"\x08http/1.1\x08http/0.9")?;
875 /// # Ok::<(), quiche::Error>(())
876 /// ```
877 pub fn set_application_protos_wire_format(
878 &mut self, protos: &[u8],
879 ) -> Result<()> {
880 let mut b = octets::Octets::with_slice(protos);
881
882 let mut protos_list = Vec::new();
883
884 while let Ok(proto) = b.get_bytes_with_u8_length() {
885 protos_list.push(proto.buf());
886 }
887
888 self.set_application_protos(&protos_list)
889 }
890
891 /// Sets the anti-amplification limit factor.
892 ///
893 /// The default value is `3`.
894 pub fn set_max_amplification_factor(&mut self, v: usize) {
895 self.max_amplification_factor = v;
896 }
897
898 /// Sets the send capacity factor.
899 ///
900 /// The default value is `1`.
901 pub fn set_send_capacity_factor(&mut self, v: f64) {
902 self.tx_cap_factor = v;
903 }
904
905 /// Sets the connection's initial RTT.
906 ///
907 /// The default value is `333`.
908 pub fn set_initial_rtt(&mut self, v: Duration) {
909 self.initial_rtt = v;
910 }
911
912 /// Sets the `max_idle_timeout` transport parameter, in milliseconds.
913 ///
914 /// The default value is infinite, that is, no timeout is used.
915 pub fn set_max_idle_timeout(&mut self, v: u64) {
916 self.local_transport_params.max_idle_timeout =
917 cmp::min(v, octets::MAX_VAR_INT);
918 }
919
920 /// Sets the `max_udp_payload_size transport` parameter.
921 ///
922 /// The default value is `65527`.
923 pub fn set_max_recv_udp_payload_size(&mut self, v: usize) {
924 self.local_transport_params.max_udp_payload_size =
925 cmp::min(v as u64, octets::MAX_VAR_INT);
926 }
927
928 /// Sets the maximum outgoing UDP payload size.
929 ///
930 /// The default and minimum value is `1200`.
931 pub fn set_max_send_udp_payload_size(&mut self, v: usize) {
932 self.max_send_udp_payload_size = cmp::max(v, MAX_SEND_UDP_PAYLOAD_SIZE);
933 }
934
935 /// Sets the `initial_max_data` transport parameter.
936 ///
937 /// When set to a non-zero value quiche will only allow at most `v` bytes of
938 /// incoming stream data to be buffered for the whole connection (that is,
939 /// data that is not yet read by the application) and will allow more data
940 /// to be received as the buffer is consumed by the application.
941 ///
942 /// When set to zero, either explicitly or via the default, quiche will not
943 /// give any flow control to the peer, preventing it from sending any stream
944 /// data.
945 ///
946 /// The default value is `0`.
947 pub fn set_initial_max_data(&mut self, v: u64) {
948 self.local_transport_params.initial_max_data =
949 cmp::min(v, octets::MAX_VAR_INT);
950 }
951
952 /// Sets the `initial_max_stream_data_bidi_local` transport parameter.
953 ///
954 /// When set to a non-zero value quiche will only allow at most `v` bytes
955 /// of incoming stream data to be buffered for each locally-initiated
956 /// bidirectional stream (that is, data that is not yet read by the
957 /// application) and will allow more data to be received as the buffer is
958 /// consumed by the application.
959 ///
960 /// When set to zero, either explicitly or via the default, quiche will not
961 /// give any flow control to the peer, preventing it from sending any stream
962 /// data.
963 ///
964 /// The default value is `0`.
965 pub fn set_initial_max_stream_data_bidi_local(&mut self, v: u64) {
966 self.local_transport_params
967 .initial_max_stream_data_bidi_local =
968 cmp::min(v, octets::MAX_VAR_INT);
969 }
970
971 /// Sets the `initial_max_stream_data_bidi_remote` transport parameter.
972 ///
973 /// When set to a non-zero value quiche will only allow at most `v` bytes
974 /// of incoming stream data to be buffered for each remotely-initiated
975 /// bidirectional stream (that is, data that is not yet read by the
976 /// application) and will allow more data to be received as the buffer is
977 /// consumed by the application.
978 ///
979 /// When set to zero, either explicitly or via the default, quiche will not
980 /// give any flow control to the peer, preventing it from sending any stream
981 /// data.
982 ///
983 /// The default value is `0`.
984 pub fn set_initial_max_stream_data_bidi_remote(&mut self, v: u64) {
985 self.local_transport_params
986 .initial_max_stream_data_bidi_remote =
987 cmp::min(v, octets::MAX_VAR_INT);
988 }
989
990 /// Sets the `initial_max_stream_data_uni` transport parameter.
991 ///
992 /// When set to a non-zero value quiche will only allow at most `v` bytes
993 /// of incoming stream data to be buffered for each unidirectional stream
994 /// (that is, data that is not yet read by the application) and will allow
995 /// more data to be received as the buffer is consumed by the application.
996 ///
997 /// When set to zero, either explicitly or via the default, quiche will not
998 /// give any flow control to the peer, preventing it from sending any stream
999 /// data.
1000 ///
1001 /// The default value is `0`.
1002 pub fn set_initial_max_stream_data_uni(&mut self, v: u64) {
1003 self.local_transport_params.initial_max_stream_data_uni =
1004 cmp::min(v, octets::MAX_VAR_INT);
1005 }
1006
1007 /// Sets the `initial_max_streams_bidi` transport parameter.
1008 ///
1009 /// When set to a non-zero value quiche will only allow `v` number of
1010 /// concurrent remotely-initiated bidirectional streams to be open at any
1011 /// given time and will increase the limit automatically as streams are
1012 /// completed.
1013 ///
1014 /// When set to zero, either explicitly or via the default, quiche will not
1015 /// not allow the peer to open any bidirectional streams.
1016 ///
1017 /// A bidirectional stream is considered completed when all incoming data
1018 /// has been read by the application (up to the `fin` offset) or the
1019 /// stream's read direction has been shutdown, and all outgoing data has
1020 /// been acked by the peer (up to the `fin` offset) or the stream's write
1021 /// direction has been shutdown.
1022 ///
1023 /// The default value is `0`.
1024 pub fn set_initial_max_streams_bidi(&mut self, v: u64) {
1025 self.local_transport_params.initial_max_streams_bidi =
1026 cmp::min(v, octets::MAX_VAR_INT);
1027 }
1028
1029 /// Sets the `initial_max_streams_uni` transport parameter.
1030 ///
1031 /// When set to a non-zero value quiche will only allow `v` number of
1032 /// concurrent remotely-initiated unidirectional streams to be open at any
1033 /// given time and will increase the limit automatically as streams are
1034 /// completed.
1035 ///
1036 /// When set to zero, either explicitly or via the default, quiche will not
1037 /// not allow the peer to open any unidirectional streams.
1038 ///
1039 /// A unidirectional stream is considered completed when all incoming data
1040 /// has been read by the application (up to the `fin` offset) or the
1041 /// stream's read direction has been shutdown.
1042 ///
1043 /// The default value is `0`.
1044 pub fn set_initial_max_streams_uni(&mut self, v: u64) {
1045 self.local_transport_params.initial_max_streams_uni =
1046 cmp::min(v, octets::MAX_VAR_INT);
1047 }
1048
1049 /// Sets the `ack_delay_exponent` transport parameter.
1050 ///
1051 /// The default value is `3`.
1052 pub fn set_ack_delay_exponent(&mut self, v: u64) {
1053 self.local_transport_params.ack_delay_exponent =
1054 cmp::min(v, octets::MAX_VAR_INT);
1055 }
1056
1057 /// Sets the `max_ack_delay` transport parameter.
1058 ///
1059 /// The default value is `25`.
1060 pub fn set_max_ack_delay(&mut self, v: u64) {
1061 self.local_transport_params.max_ack_delay =
1062 cmp::min(v, octets::MAX_VAR_INT);
1063 }
1064
1065 /// Sets the `active_connection_id_limit` transport parameter.
1066 ///
1067 /// The default value is `2`. Lower values will be ignored.
1068 pub fn set_active_connection_id_limit(&mut self, v: u64) {
1069 if v >= 2 {
1070 self.local_transport_params.active_conn_id_limit =
1071 cmp::min(v, octets::MAX_VAR_INT);
1072 }
1073 }
1074
1075 /// Sets the `disable_active_migration` transport parameter.
1076 ///
1077 /// The default value is `false`.
1078 pub fn set_disable_active_migration(&mut self, v: bool) {
1079 self.local_transport_params.disable_active_migration = v;
1080 }
1081
1082 /// Sets the congestion control algorithm used.
1083 ///
1084 /// The default value is `CongestionControlAlgorithm::CUBIC`.
1085 pub fn set_cc_algorithm(&mut self, algo: CongestionControlAlgorithm) {
1086 self.cc_algorithm = algo;
1087 }
1088
1089 /// Sets custom BBR settings.
1090 ///
1091 /// This API is experimental and will be removed in the future.
1092 ///
1093 /// Currently this only applies if cc_algorithm is
1094 /// `CongestionControlAlgorithm::Bbr2Gcongestion` is set.
1095 ///
1096 /// The default value is `None`.
1097 #[cfg(feature = "internal")]
1098 #[doc(hidden)]
1099 pub fn set_custom_bbr_params(&mut self, custom_bbr_settings: BbrParams) {
1100 self.custom_bbr_params = Some(custom_bbr_settings);
1101 }
1102
1103 /// Sets the congestion control algorithm used by string.
1104 ///
1105 /// The default value is `cubic`. On error `Error::CongestionControl`
1106 /// will be returned.
1107 ///
1108 /// ## Examples:
1109 ///
1110 /// ```
1111 /// # let mut config = quiche::Config::new(0xbabababa)?;
1112 /// config.set_cc_algorithm_name("reno");
1113 /// # Ok::<(), quiche::Error>(())
1114 /// ```
1115 pub fn set_cc_algorithm_name(&mut self, name: &str) -> Result<()> {
1116 self.cc_algorithm = CongestionControlAlgorithm::from_str(name)?;
1117
1118 Ok(())
1119 }
1120
1121 /// Sets initial congestion window size in terms of packet count.
1122 ///
1123 /// The default value is 10.
1124 pub fn set_initial_congestion_window_packets(&mut self, packets: usize) {
1125 self.initial_congestion_window_packets = packets;
1126 }
1127
1128 /// Configure whether to enable relaxed loss detection on spurious loss.
1129 ///
1130 /// The default value is false.
1131 pub fn set_enable_relaxed_loss_threshold(&mut self, enable: bool) {
1132 self.enable_relaxed_loss_threshold = enable;
1133 }
1134
1135 /// Configure whether to enable the CUBIC idle restart fix.
1136 ///
1137 /// When enabled, the epoch shift on idle restart uses the later of
1138 /// the last ACK time and last send time, avoiding an inflated delta
1139 /// when bytes-in-flight transiently hits zero.
1140 ///
1141 /// The default value is `true`.
1142 pub fn set_enable_cubic_idle_restart_fix(&mut self, enable: bool) {
1143 self.enable_cubic_idle_restart_fix = enable;
1144 }
1145
1146 /// Configure whether to enable sending STREAMS_BLOCKED frames.
1147 ///
1148 /// STREAMS_BLOCKED frames are an optional advisory signal in the QUIC
1149 /// protocol which SHOULD be sent when the sender wishes to open a stream
1150 /// but is unable to do so due to the maximum stream limit set by its peer.
1151 ///
1152 /// The default value is false.
1153 pub fn set_enable_send_streams_blocked(&mut self, enable: bool) {
1154 self.enable_send_streams_blocked = enable;
1155 }
1156
1157 /// Configures whether to enable HyStart++.
1158 ///
1159 /// The default value is `true`.
1160 pub fn enable_hystart(&mut self, v: bool) {
1161 self.hystart = v;
1162 }
1163
1164 /// Configures whether to enable pacing.
1165 ///
1166 /// The default value is `true`.
1167 pub fn enable_pacing(&mut self, v: bool) {
1168 self.pacing = v;
1169 }
1170
1171 /// Sets the max value for pacing rate.
1172 ///
1173 /// By default pacing rate is not limited.
1174 pub fn set_max_pacing_rate(&mut self, v: u64) {
1175 self.max_pacing_rate = Some(v);
1176 }
1177
1178 /// Configures whether to enable receiving DATAGRAM frames.
1179 ///
1180 /// When enabled, the `max_datagram_frame_size` transport parameter is set
1181 /// to 65536 as recommended by draft-ietf-quic-datagram-01.
1182 ///
1183 /// The default is `false`.
1184 pub fn enable_dgram(
1185 &mut self, enabled: bool, recv_queue_len: usize, send_queue_len: usize,
1186 ) {
1187 self.local_transport_params.max_datagram_frame_size = if enabled {
1188 Some(MAX_DGRAM_FRAME_SIZE)
1189 } else {
1190 None
1191 };
1192 self.dgram_recv_max_queue_len = recv_queue_len;
1193 self.dgram_send_max_queue_len = send_queue_len;
1194 }
1195
1196 /// Configures the max number of queued received PATH_CHALLENGE frames.
1197 ///
1198 /// When an endpoint receives a PATH_CHALLENGE frame and the queue is full,
1199 /// the frame is discarded.
1200 ///
1201 /// The default is 3.
1202 pub fn set_path_challenge_recv_max_queue_len(&mut self, queue_len: usize) {
1203 self.path_challenge_recv_max_queue_len = queue_len;
1204 }
1205
1206 /// Sets the maximum size of the connection window.
1207 ///
1208 /// The default value is MAX_CONNECTION_WINDOW (24MBytes).
1209 pub fn set_max_connection_window(&mut self, v: u64) {
1210 self.max_connection_window = v;
1211 }
1212
1213 /// Sets the maximum size of the stream window.
1214 ///
1215 /// The default value is MAX_STREAM_WINDOW (16MBytes).
1216 pub fn set_max_stream_window(&mut self, v: u64) {
1217 self.max_stream_window = v;
1218 }
1219
1220 /// Sets the initial stateless reset token.
1221 ///
1222 /// This value is only advertised by servers. Setting a stateless retry
1223 /// token as a client has no effect on the connection.
1224 ///
1225 /// The default value is `None`.
1226 pub fn set_stateless_reset_token(&mut self, v: Option<u128>) {
1227 self.local_transport_params.stateless_reset_token = v;
1228 }
1229
1230 /// Sets whether the QUIC connection should avoid reusing DCIDs over
1231 /// different paths.
1232 ///
1233 /// When set to `true`, it ensures that a destination Connection ID is never
1234 /// reused on different paths. Such behaviour may lead to connection stall
1235 /// if the peer performs a non-voluntary migration (e.g., NAT rebinding) and
1236 /// does not provide additional destination Connection IDs to handle such
1237 /// event.
1238 ///
1239 /// The default value is `false`.
1240 pub fn set_disable_dcid_reuse(&mut self, v: bool) {
1241 self.disable_dcid_reuse = v;
1242 }
1243
1244 /// Enables tracking unknown transport parameters.
1245 ///
1246 /// Specify the maximum number of bytes used to track unknown transport
1247 /// parameters. The size includes the identifier and its value. If storing a
1248 /// transport parameter would cause the limit to be exceeded, it is quietly
1249 /// dropped.
1250 ///
1251 /// The default is that the feature is disabled.
1252 pub fn enable_track_unknown_transport_parameters(&mut self, size: usize) {
1253 self.track_unknown_transport_params = Some(size);
1254 }
1255
1256 /// Sets whether the initial max data value should be used as the initial
1257 /// flow control window.
1258 ///
1259 /// If set to true, the initial flow control window for streams and the
1260 /// connection itself will be set to the initial max data value for streams
1261 /// and the connection respectively. If false, the window is set to the
1262 /// minimum of initial max data and `DEFAULT_STREAM_WINDOW` or
1263 /// `DEFAULT_CONNECTION_WINDOW`
1264 ///
1265 /// The default is false.
1266 pub fn set_use_initial_max_data_as_flow_control_win(&mut self, v: bool) {
1267 self.use_initial_max_data_as_flow_control_win = v;
1268 }
1269}
1270
1271/// Tracks the health of the tx_buffered value.
1272#[derive(Clone, Copy, Debug, Default, PartialEq)]
1273pub enum TxBufferTrackingState {
1274 /// The send buffer is in a good state
1275 #[default]
1276 Ok,
1277 /// The send buffer is in an inconsistent state, which could lead to
1278 /// connection stalls or excess buffering due to bugs we haven't
1279 /// tracked down yet.
1280 Inconsistent,
1281}
1282
1283/// Tracks if the connection hit the peer stream limit and which
1284/// STREAMS_BLOCKED frames have been sent.
1285#[derive(Default)]
1286struct StreamsBlockedState {
1287 /// The peer's max_streams limit at which we last became blocked on
1288 /// opening new local streams, if any.
1289 blocked_at: Option<u64>,
1290
1291 /// The stream limit sent on the most recently sent STREAMS_BLOCKED
1292 /// frame. If != to blocked_at, the connection has pending STREAMS_BLOCKED
1293 /// frames to send.
1294 blocked_sent: Option<u64>,
1295}
1296
1297impl StreamsBlockedState {
1298 /// Returns true if there is a STREAMS_BLOCKED frame that needs sending.
1299 fn has_pending_stream_blocked_frame(&self) -> bool {
1300 self.blocked_sent < self.blocked_at
1301 }
1302
1303 /// Update the stream blocked limit.
1304 fn update_at(&mut self, limit: u64) {
1305 self.blocked_at = self.blocked_at.max(Some(limit));
1306 }
1307
1308 /// Clear blocked_sent to force retransmission of the most recently sent
1309 /// STREAMS_BLOCKED frame.
1310 fn force_retransmit_sent_limit_eq(&mut self, limit: u64) {
1311 // Only clear blocked_sent if the lost frame had the most recently sent
1312 // limit.
1313 if self.blocked_sent == Some(limit) {
1314 self.blocked_sent = None;
1315 }
1316 }
1317}
1318
1319/// A QUIC connection.
1320pub struct Connection<F = DefaultBufFactory>
1321where
1322 F: BufFactory,
1323{
1324 /// QUIC wire version used for the connection.
1325 version: u32,
1326
1327 /// Connection Identifiers.
1328 ids: cid::ConnectionIdentifiers,
1329
1330 /// Unique opaque ID for the connection that can be used for logging.
1331 trace_id: String,
1332
1333 /// Packet number spaces.
1334 pkt_num_spaces: [packet::PktNumSpace; packet::Epoch::count()],
1335
1336 /// The crypto context.
1337 crypto_ctx: [packet::CryptoContext; packet::Epoch::count()],
1338
1339 /// Next packet number.
1340 next_pkt_num: u64,
1341
1342 // TODO
1343 // combine with `next_pkt_num`
1344 /// Track the packet skip context
1345 pkt_num_manager: packet::PktNumManager,
1346
1347 /// Peer's transport parameters.
1348 peer_transport_params: TransportParams,
1349
1350 /// If tracking unknown transport parameters from a peer, how much space to
1351 /// use in bytes.
1352 peer_transport_params_track_unknown: Option<usize>,
1353
1354 /// Local transport parameters.
1355 local_transport_params: TransportParams,
1356
1357 /// TLS handshake state.
1358 handshake: tls::Handshake,
1359
1360 /// Serialized TLS session buffer.
1361 ///
1362 /// This field is populated when a new session ticket is processed on the
1363 /// client. On the server this is empty.
1364 session: Option<Vec<u8>>,
1365
1366 /// The configuration for recovery.
1367 recovery_config: recovery::RecoveryConfig,
1368
1369 /// The path manager.
1370 paths: path::PathMap,
1371
1372 /// PATH_CHALLENGE receive queue max length.
1373 path_challenge_recv_max_queue_len: usize,
1374
1375 /// Total number of received PATH_CHALLENGE frames.
1376 path_challenge_rx_count: u64,
1377
1378 /// List of supported application protocols.
1379 application_protos: Vec<Vec<u8>>,
1380
1381 /// Total number of received packets.
1382 recv_count: usize,
1383
1384 /// Total number of sent packets.
1385 sent_count: usize,
1386
1387 /// Total number of lost packets.
1388 lost_count: usize,
1389
1390 /// Total number of lost packets that were later acked.
1391 spurious_lost_count: usize,
1392
1393 /// Total number of packets sent with data retransmitted.
1394 retrans_count: usize,
1395
1396 /// Total number of sent DATAGRAM frames.
1397 dgram_sent_count: usize,
1398
1399 /// Total number of received DATAGRAM frames.
1400 dgram_recv_count: usize,
1401
1402 /// Total number of bytes received from the peer.
1403 rx_data: u64,
1404
1405 /// Receiver flow controller.
1406 flow_control: flowcontrol::FlowControl,
1407
1408 /// Whether we send MAX_DATA frame.
1409 should_send_max_data: bool,
1410
1411 /// True if there is a pending MAX_STREAMS_BIDI frame to send.
1412 should_send_max_streams_bidi: bool,
1413
1414 /// True if there is a pending MAX_STREAMS_UNI frame to send.
1415 should_send_max_streams_uni: bool,
1416
1417 /// Number of stream data bytes that can be buffered.
1418 tx_cap: usize,
1419
1420 /// The send capacity factor.
1421 tx_cap_factor: f64,
1422
1423 /// Number of bytes buffered in the send buffer.
1424 tx_buffered: usize,
1425
1426 /// Tracks the health of tx_buffered.
1427 tx_buffered_state: TxBufferTrackingState,
1428
1429 /// Total number of bytes sent to the peer.
1430 tx_data: u64,
1431
1432 /// Peer's flow control limit for the connection.
1433 max_tx_data: u64,
1434
1435 /// Last tx_data before running a full send() loop.
1436 last_tx_data: u64,
1437
1438 /// Total number of bytes retransmitted over the connection.
1439 /// This counts only STREAM and CRYPTO data.
1440 stream_retrans_bytes: u64,
1441
1442 /// Total number of bytes sent over the connection.
1443 sent_bytes: u64,
1444
1445 /// Total number of bytes received over the connection.
1446 recv_bytes: u64,
1447
1448 /// Total number of bytes sent acked over the connection.
1449 acked_bytes: u64,
1450
1451 /// Total number of bytes sent lost over the connection.
1452 lost_bytes: u64,
1453
1454 /// Streams map, indexed by stream ID.
1455 streams: stream::StreamMap<F>,
1456
1457 /// Peer's original destination connection ID. Used by the client to
1458 /// validate the server's transport parameter.
1459 odcid: Option<ConnectionId<'static>>,
1460
1461 /// Peer's retry source connection ID. Used by the client during stateless
1462 /// retry to validate the server's transport parameter.
1463 rscid: Option<ConnectionId<'static>>,
1464
1465 /// Received address verification token.
1466 token: Option<Vec<u8>>,
1467
1468 /// Error code and reason to be sent to the peer in a CONNECTION_CLOSE
1469 /// frame.
1470 local_error: Option<ConnectionError>,
1471
1472 /// Error code and reason received from the peer in a CONNECTION_CLOSE
1473 /// frame.
1474 peer_error: Option<ConnectionError>,
1475
1476 /// The connection-level limit at which send blocking occurred.
1477 blocked_limit: Option<u64>,
1478
1479 /// Idle timeout expiration time.
1480 idle_timer: Option<Instant>,
1481
1482 /// Draining timeout expiration time.
1483 draining_timer: Option<Instant>,
1484
1485 /// List of raw packets that were received before they could be decrypted.
1486 undecryptable_pkts: VecDeque<(Vec<u8>, RecvInfo)>,
1487
1488 /// The negotiated ALPN protocol.
1489 alpn: Vec<u8>,
1490
1491 /// Whether this is a server-side connection.
1492 is_server: bool,
1493
1494 /// Whether the initial secrets have been derived.
1495 derived_initial_secrets: bool,
1496
1497 /// Whether a version negotiation packet has already been received. Only
1498 /// relevant for client connections.
1499 did_version_negotiation: bool,
1500
1501 /// Whether stateless retry has been performed.
1502 did_retry: bool,
1503
1504 /// Whether the peer already updated its connection ID.
1505 got_peer_conn_id: bool,
1506
1507 /// Whether the peer verified our initial address.
1508 peer_verified_initial_address: bool,
1509
1510 /// Whether the peer's transport parameters were parsed.
1511 parsed_peer_transport_params: bool,
1512
1513 /// Whether the connection handshake has been completed.
1514 handshake_completed: bool,
1515
1516 /// Whether the HANDSHAKE_DONE frame has been sent.
1517 handshake_done_sent: bool,
1518
1519 /// Whether the HANDSHAKE_DONE frame has been acked.
1520 handshake_done_acked: bool,
1521
1522 /// Whether the connection handshake has been confirmed.
1523 handshake_confirmed: bool,
1524
1525 /// Key phase bit used for outgoing protected packets.
1526 key_phase: bool,
1527
1528 /// Whether an ack-eliciting packet has been sent since last receiving a
1529 /// packet.
1530 ack_eliciting_sent: bool,
1531
1532 /// Whether the connection is closed.
1533 closed: bool,
1534
1535 /// Whether the connection was timed out.
1536 timed_out: bool,
1537
1538 /// Whether to send GREASE.
1539 grease: bool,
1540
1541 /// Whether to send STREAMS_BLOCKED frames when bidi or uni stream quota
1542 /// exhausted.
1543 enable_send_streams_blocked: bool,
1544
1545 /// TLS keylog writer.
1546 keylog: Option<Box<dyn std::io::Write + Send + Sync>>,
1547
1548 #[cfg(feature = "qlog")]
1549 qlog: QlogInfo,
1550
1551 /// DATAGRAM queues.
1552 dgram_recv_queue: dgram::DatagramQueue<F>,
1553 dgram_send_queue: dgram::DatagramQueue<F>,
1554
1555 /// Whether to emit DATAGRAM frames in the next packet.
1556 emit_dgram: bool,
1557
1558 /// Whether the connection should prevent from reusing destination
1559 /// Connection IDs when the peer migrates.
1560 disable_dcid_reuse: bool,
1561
1562 /// The number of streams reset by local.
1563 reset_stream_local_count: u64,
1564
1565 /// The number of streams stopped by local.
1566 stopped_stream_local_count: u64,
1567
1568 /// The number of streams reset by remote.
1569 reset_stream_remote_count: u64,
1570
1571 /// The number of streams stopped by remote.
1572 stopped_stream_remote_count: u64,
1573
1574 /// The number of DATA_BLOCKED frames sent due to hitting the connection
1575 /// flow control limit.
1576 data_blocked_sent_count: u64,
1577
1578 /// The number of STREAM_DATA_BLOCKED frames sent due to a stream hitting
1579 /// the stream flow control limit.
1580 stream_data_blocked_sent_count: u64,
1581
1582 /// The number of DATA_BLOCKED frames received from the remote endpoint.
1583 data_blocked_recv_count: u64,
1584
1585 /// The number of STREAM_DATA_BLOCKED frames received from the remote
1586 /// endpoint.
1587 stream_data_blocked_recv_count: u64,
1588
1589 /// The number of STREAMS_BLOCKED frames received from the remote endpoint
1590 /// indicating the peer is blocked on opening new bidirectional streams.
1591 streams_blocked_bidi_recv_count: u64,
1592
1593 /// The number of STREAMS_BLOCKED frames received from the remote endpoint
1594 /// indicating the peer is blocked on opening new unidirectional streams.
1595 streams_blocked_uni_recv_count: u64,
1596
1597 /// The number of times send() was blocked because the anti-amplification
1598 /// budget (bytes received × max_amplification_factor) was exhausted.
1599 amplification_limited_count: u64,
1600
1601 /// Tracks if the connection hit the peer's bidi or uni stream limit, and if
1602 /// STREAMS_BLOCKED frames are pending transmission.
1603 streams_blocked_bidi_state: StreamsBlockedState,
1604 streams_blocked_uni_state: StreamsBlockedState,
1605
1606 /// The anti-amplification limit factor.
1607 max_amplification_factor: usize,
1608}
1609
1610/// Creates a new server-side connection.
1611///
1612/// The `scid` parameter represents the server's source connection ID, while
1613/// the optional `odcid` parameter represents the original destination ID the
1614/// client sent before a Retry packet (this is only required when using the
1615/// [`retry()`] function). See also the [`accept_with_retry()`] function for
1616/// more advanced retry cases.
1617///
1618/// [`retry()`]: fn.retry.html
1619///
1620/// ## Examples:
1621///
1622/// ```no_run
1623/// # let mut config = quiche::Config::new(0xbabababa)?;
1624/// # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
1625/// # let local = "127.0.0.1:0".parse().unwrap();
1626/// # let peer = "127.0.0.1:1234".parse().unwrap();
1627/// let conn = quiche::accept(&scid, None, local, peer, &mut config)?;
1628/// # Ok::<(), quiche::Error>(())
1629/// ```
1630#[inline(always)]
1631pub fn accept(
1632 scid: &ConnectionId, odcid: Option<&ConnectionId>, local: SocketAddr,
1633 peer: SocketAddr, config: &mut Config,
1634) -> Result<Connection> {
1635 accept_with_buf_factory(scid, odcid, local, peer, config)
1636}
1637
1638/// Creates a new server-side connection, with a custom buffer generation
1639/// method.
1640///
1641/// The buffers generated can be anything that can be drereferenced as a byte
1642/// slice. See [`accept`] and [`BufFactory`] for more info.
1643#[inline]
1644pub fn accept_with_buf_factory<F: BufFactory>(
1645 scid: &ConnectionId, odcid: Option<&ConnectionId>, local: SocketAddr,
1646 peer: SocketAddr, config: &mut Config,
1647) -> Result<Connection<F>> {
1648 // For connections with `odcid` set, we historically used `retry_source_cid =
1649 // scid`. Keep this behavior to preserve backwards compatibility.
1650 // `accept_with_retry` allows the SCIDs to be specified separately.
1651 let retry_cids = odcid.map(|odcid| RetryConnectionIds {
1652 original_destination_cid: odcid,
1653 retry_source_cid: scid,
1654 });
1655 Connection::new(scid, retry_cids, None, local, peer, config, true)
1656}
1657
1658/// A wrapper for connection IDs used in [`accept_with_retry`].
1659pub struct RetryConnectionIds<'a> {
1660 /// The DCID of the first Initial packet received by the server, which
1661 /// triggered the Retry packet.
1662 pub original_destination_cid: &'a ConnectionId<'a>,
1663 /// The SCID of the Retry packet sent by the server. This can be different
1664 /// from the new connection's SCID.
1665 pub retry_source_cid: &'a ConnectionId<'a>,
1666}
1667
1668/// Creates a new server-side connection after the client responded to a Retry
1669/// packet.
1670///
1671/// To generate a Retry packet in the first place, use the [`retry()`] function.
1672///
1673/// The `scid` parameter represents the server's source connection ID, which can
1674/// be freshly generated after the application has successfully verified the
1675/// Retry. `retry_cids` is used to tie the new connection to the Initial + Retry
1676/// exchange that preceded the connection's creation.
1677///
1678/// The DCID of the client's Initial packet is inherently untrusted data. It is
1679/// safe to use the DCID in the `retry_source_cid` field of the
1680/// `RetryConnectionIds` provided to this function. However, using the Initial's
1681/// DCID for the `scid` parameter carries risks. Applications are advised to
1682/// implement their own DCID validation steps before using the DCID in that
1683/// manner.
1684#[inline]
1685pub fn accept_with_retry<F: BufFactory>(
1686 scid: &ConnectionId, retry_cids: RetryConnectionIds, local: SocketAddr,
1687 peer: SocketAddr, config: &mut Config,
1688) -> Result<Connection<F>> {
1689 Connection::new(scid, Some(retry_cids), None, local, peer, config, true)
1690}
1691
1692/// Creates a new client-side connection.
1693///
1694/// The `scid` parameter is used as the connection's source connection ID,
1695/// while the optional `server_name` parameter is used to verify the peer's
1696/// certificate.
1697///
1698/// ## Examples:
1699///
1700/// ```no_run
1701/// # let mut config = quiche::Config::new(0xbabababa)?;
1702/// # let server_name = "quic.tech";
1703/// # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
1704/// # let local = "127.0.0.1:4321".parse().unwrap();
1705/// # let peer = "127.0.0.1:1234".parse().unwrap();
1706/// let conn =
1707/// quiche::connect(Some(&server_name), &scid, local, peer, &mut config)?;
1708/// # Ok::<(), quiche::Error>(())
1709/// ```
1710#[inline]
1711pub fn connect(
1712 server_name: Option<&str>, scid: &ConnectionId, local: SocketAddr,
1713 peer: SocketAddr, config: &mut Config,
1714) -> Result<Connection> {
1715 let mut conn = Connection::new(scid, None, None, local, peer, config, false)?;
1716
1717 if let Some(server_name) = server_name {
1718 conn.handshake.set_host_name(server_name)?;
1719 }
1720
1721 Ok(conn)
1722}
1723
1724/// Creates a new client-side connection using the given DCID initially.
1725///
1726/// Be aware that [RFC 9000] places requirements for unpredictability and length
1727/// on the client DCID field. This function is dangerous if these requirements
1728/// are not satisfied.
1729///
1730/// The `scid` parameter is used as the connection's source connection ID, while
1731/// the optional `server_name` parameter is used to verify the peer's
1732/// certificate.
1733///
1734/// [RFC 9000]: <https://datatracker.ietf.org/doc/html/rfc9000#section-7.2-3>
1735#[cfg(feature = "custom-client-dcid")]
1736#[cfg_attr(docsrs, doc(cfg(feature = "custom-client-dcid")))]
1737pub fn connect_with_dcid(
1738 server_name: Option<&str>, scid: &ConnectionId, dcid: &ConnectionId,
1739 local: SocketAddr, peer: SocketAddr, config: &mut Config,
1740) -> Result<Connection> {
1741 let mut conn =
1742 Connection::new(scid, None, Some(dcid), local, peer, config, false)?;
1743
1744 if let Some(server_name) = server_name {
1745 conn.handshake.set_host_name(server_name)?;
1746 }
1747
1748 Ok(conn)
1749}
1750
1751/// Creates a new client-side connection, with a custom buffer generation
1752/// method.
1753///
1754/// The buffers generated can be anything that can be drereferenced as a byte
1755/// slice. See [`connect`] and [`BufFactory`] for more info.
1756#[inline]
1757pub fn connect_with_buffer_factory<F: BufFactory>(
1758 server_name: Option<&str>, scid: &ConnectionId, local: SocketAddr,
1759 peer: SocketAddr, config: &mut Config,
1760) -> Result<Connection<F>> {
1761 let mut conn = Connection::new(scid, None, None, local, peer, config, false)?;
1762
1763 if let Some(server_name) = server_name {
1764 conn.handshake.set_host_name(server_name)?;
1765 }
1766
1767 Ok(conn)
1768}
1769
1770/// Creates a new client-side connection, with a custom buffer generation
1771/// method using the given dcid initially.
1772/// Be aware the RFC places requirements for unpredictability and length
1773/// on the client DCID field.
1774/// [`RFC9000`]: https://datatracker.ietf.org/doc/html/rfc9000#section-7.2-3
1775///
1776/// The buffers generated can be anything that can be drereferenced as a byte
1777/// slice. See [`connect`] and [`BufFactory`] for more info.
1778#[cfg(feature = "custom-client-dcid")]
1779#[cfg_attr(docsrs, doc(cfg(feature = "custom-client-dcid")))]
1780pub fn connect_with_dcid_and_buffer_factory<F: BufFactory>(
1781 server_name: Option<&str>, scid: &ConnectionId, dcid: &ConnectionId,
1782 local: SocketAddr, peer: SocketAddr, config: &mut Config,
1783) -> Result<Connection<F>> {
1784 let mut conn =
1785 Connection::new(scid, None, Some(dcid), local, peer, config, false)?;
1786
1787 if let Some(server_name) = server_name {
1788 conn.handshake.set_host_name(server_name)?;
1789 }
1790
1791 Ok(conn)
1792}
1793
1794/// Writes a version negotiation packet.
1795///
1796/// The `scid` and `dcid` parameters are the source connection ID and the
1797/// destination connection ID extracted from the received client's Initial
1798/// packet that advertises an unsupported version.
1799///
1800/// ## Examples:
1801///
1802/// ```no_run
1803/// # let mut buf = [0; 512];
1804/// # let mut out = [0; 512];
1805/// # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
1806/// let (len, src) = socket.recv_from(&mut buf).unwrap();
1807///
1808/// let hdr =
1809/// quiche::Header::from_slice(&mut buf[..len], quiche::MAX_CONN_ID_LEN)?;
1810///
1811/// if hdr.version != quiche::PROTOCOL_VERSION {
1812/// let len = quiche::negotiate_version(&hdr.scid, &hdr.dcid, &mut out)?;
1813/// socket.send_to(&out[..len], &src).unwrap();
1814/// }
1815/// # Ok::<(), quiche::Error>(())
1816/// ```
1817#[inline]
1818pub fn negotiate_version(
1819 scid: &ConnectionId, dcid: &ConnectionId, out: &mut [u8],
1820) -> Result<usize> {
1821 packet::negotiate_version(scid, dcid, out)
1822}
1823
1824/// Writes a stateless retry packet.
1825///
1826/// The `scid` and `dcid` parameters are the source connection ID and the
1827/// destination connection ID extracted from the received client's Initial
1828/// packet, while `new_scid` is the server's new source connection ID and
1829/// `token` is the address validation token the client needs to echo back.
1830///
1831/// The application is responsible for generating the address validation
1832/// token to be sent to the client, and verifying tokens sent back by the
1833/// client. The generated token should include the `dcid` parameter, such
1834/// that it can be later extracted from the token and passed to the
1835/// [`accept()`] function as its `odcid` parameter.
1836///
1837/// [`accept()`]: fn.accept.html
1838///
1839/// ## Examples:
1840///
1841/// ```no_run
1842/// # let mut config = quiche::Config::new(0xbabababa)?;
1843/// # let mut buf = [0; 512];
1844/// # let mut out = [0; 512];
1845/// # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
1846/// # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
1847/// # let local = socket.local_addr().unwrap();
1848/// # fn mint_token(hdr: &quiche::Header, src: &std::net::SocketAddr) -> Vec<u8> {
1849/// # vec![]
1850/// # }
1851/// # fn validate_token<'a>(src: &std::net::SocketAddr, token: &'a [u8]) -> Option<quiche::ConnectionId<'a>> {
1852/// # None
1853/// # }
1854/// let (len, peer) = socket.recv_from(&mut buf).unwrap();
1855///
1856/// let hdr = quiche::Header::from_slice(&mut buf[..len], quiche::MAX_CONN_ID_LEN)?;
1857///
1858/// let token = hdr.token.as_ref().unwrap();
1859///
1860/// // No token sent by client, create a new one.
1861/// if token.is_empty() {
1862/// let new_token = mint_token(&hdr, &peer);
1863///
1864/// let len = quiche::retry(
1865/// &hdr.scid, &hdr.dcid, &scid, &new_token, hdr.version, &mut out,
1866/// )?;
1867///
1868/// socket.send_to(&out[..len], &peer).unwrap();
1869/// return Ok(());
1870/// }
1871///
1872/// // Client sent token, validate it.
1873/// let odcid = validate_token(&peer, token);
1874///
1875/// if odcid.is_none() {
1876/// // Invalid address validation token.
1877/// return Ok(());
1878/// }
1879///
1880/// let conn = quiche::accept(&scid, odcid.as_ref(), local, peer, &mut config)?;
1881/// # Ok::<(), quiche::Error>(())
1882/// ```
1883#[inline]
1884pub fn retry(
1885 scid: &ConnectionId, dcid: &ConnectionId, new_scid: &ConnectionId,
1886 token: &[u8], version: u32, out: &mut [u8],
1887) -> Result<usize> {
1888 packet::retry(scid, dcid, new_scid, token, version, out)
1889}
1890
1891/// Returns true if the given protocol version is supported.
1892#[inline]
1893pub fn version_is_supported(version: u32) -> bool {
1894 matches!(version, PROTOCOL_VERSION_V1)
1895}
1896
1897/// Pushes a frame to the output packet if there is enough space.
1898///
1899/// Returns `true` on success, `false` otherwise. In case of failure it means
1900/// there is no room to add the frame in the packet. You may retry to add the
1901/// frame later.
1902macro_rules! push_frame_to_pkt {
1903 ($out:expr, $frames:expr, $frame:expr, $left:expr) => {{
1904 if $frame.wire_len() <= $left {
1905 $left -= $frame.wire_len();
1906
1907 $frame.to_bytes(&mut $out)?;
1908
1909 $frames.push($frame);
1910
1911 true
1912 } else {
1913 false
1914 }
1915 }};
1916}
1917
1918/// Executes the provided body if the qlog feature is enabled, quiche has been
1919/// configured with a log writer, the event's importance is within the
1920/// configured level.
1921macro_rules! qlog_with_type {
1922 ($ty:expr, $qlog:expr, $qlog_streamer_ref:ident, $body:block) => {{
1923 #[cfg(feature = "qlog")]
1924 {
1925 if EventImportance::from($ty).is_contained_in(&$qlog.level) {
1926 if let Some($qlog_streamer_ref) = &mut $qlog.streamer {
1927 $body
1928 }
1929 }
1930 }
1931 }};
1932}
1933
1934#[cfg(feature = "qlog")]
1935const QLOG_PARAMS_SET: EventType =
1936 EventType::QuicEventType(QuicEventType::ParametersSet);
1937
1938#[cfg(feature = "qlog")]
1939const QLOG_PACKET_RX: EventType =
1940 EventType::QuicEventType(QuicEventType::PacketReceived);
1941
1942#[cfg(feature = "qlog")]
1943const QLOG_PACKET_TX: EventType =
1944 EventType::QuicEventType(QuicEventType::PacketSent);
1945
1946#[cfg(feature = "qlog")]
1947const QLOG_DATA_MV: EventType =
1948 EventType::QuicEventType(QuicEventType::StreamDataMoved);
1949
1950#[cfg(feature = "qlog")]
1951const QLOG_METRICS: EventType =
1952 EventType::QuicEventType(QuicEventType::RecoveryMetricsUpdated);
1953
1954#[cfg(feature = "qlog")]
1955const QLOG_CONNECTION_CLOSED: EventType =
1956 EventType::QuicEventType(QuicEventType::ConnectionClosed);
1957
1958#[cfg(feature = "qlog")]
1959struct QlogInfo {
1960 streamer: Option<qlog::streamer::QlogStreamer>,
1961 logged_peer_params: bool,
1962 level: EventImportance,
1963}
1964
1965#[cfg(feature = "qlog")]
1966impl Default for QlogInfo {
1967 fn default() -> Self {
1968 QlogInfo {
1969 streamer: None,
1970 logged_peer_params: false,
1971 level: EventImportance::Base,
1972 }
1973 }
1974}
1975
1976impl<F: BufFactory> Connection<F> {
1977 fn new(
1978 scid: &ConnectionId, retry_cids: Option<RetryConnectionIds>,
1979 client_dcid: Option<&ConnectionId>, local: SocketAddr, peer: SocketAddr,
1980 config: &mut Config, is_server: bool,
1981 ) -> Result<Connection<F>> {
1982 let tls = config.tls_ctx.new_handshake()?;
1983 Connection::with_tls(
1984 scid,
1985 retry_cids,
1986 client_dcid,
1987 local,
1988 peer,
1989 config,
1990 tls,
1991 is_server,
1992 )
1993 }
1994
1995 #[allow(clippy::too_many_arguments)]
1996 fn with_tls(
1997 scid: &ConnectionId, retry_cids: Option<RetryConnectionIds>,
1998 client_dcid: Option<&ConnectionId>, local: SocketAddr, peer: SocketAddr,
1999 config: &Config, tls: tls::Handshake, is_server: bool,
2000 ) -> Result<Connection<F>> {
2001 if retry_cids.is_some() && client_dcid.is_some() {
2002 // These are exclusive, the caller should only specify one or the
2003 // other.
2004 return Err(Error::InvalidDcidInitialization);
2005 }
2006 #[cfg(feature = "custom-client-dcid")]
2007 if let Some(client_dcid) = client_dcid {
2008 // The Minimum length is 8.
2009 // See https://datatracker.ietf.org/doc/html/rfc9000#section-7.2-3
2010 if client_dcid.to_vec().len() < 8 {
2011 return Err(Error::InvalidDcidInitialization);
2012 }
2013 }
2014 #[cfg(not(feature = "custom-client-dcid"))]
2015 if client_dcid.is_some() {
2016 return Err(Error::InvalidDcidInitialization);
2017 }
2018
2019 let max_rx_data = config.local_transport_params.initial_max_data;
2020
2021 let scid_as_hex: Vec<String> =
2022 scid.iter().map(|b| format!("{b:02x}")).collect();
2023
2024 let reset_token = if is_server {
2025 config.local_transport_params.stateless_reset_token
2026 } else {
2027 None
2028 };
2029
2030 let recovery_config = recovery::RecoveryConfig::from_config(config);
2031
2032 let mut path = path::Path::new(
2033 local,
2034 peer,
2035 &recovery_config,
2036 config.path_challenge_recv_max_queue_len,
2037 true,
2038 Some(config),
2039 );
2040
2041 // If we sent a Retry assume the peer's address is verified.
2042 path.verified_peer_address = retry_cids.is_some();
2043 // Assume clients validate the server's address implicitly.
2044 path.peer_verified_local_address = is_server;
2045
2046 // Do not allocate more than the number of active CIDs.
2047 let paths = path::PathMap::new(
2048 path,
2049 config.local_transport_params.active_conn_id_limit as usize,
2050 is_server,
2051 );
2052
2053 let active_path_id = paths.get_active_path_id()?;
2054
2055 let ids = cid::ConnectionIdentifiers::new(
2056 config.local_transport_params.active_conn_id_limit as usize,
2057 scid,
2058 active_path_id,
2059 reset_token,
2060 );
2061
2062 let initial_flow_control_window =
2063 if config.use_initial_max_data_as_flow_control_win {
2064 max_rx_data
2065 } else {
2066 cmp::min(max_rx_data / 2 * 3, DEFAULT_CONNECTION_WINDOW)
2067 };
2068 let mut conn = Connection {
2069 version: config.version,
2070
2071 ids,
2072
2073 trace_id: scid_as_hex.join(""),
2074
2075 pkt_num_spaces: [
2076 packet::PktNumSpace::new(),
2077 packet::PktNumSpace::new(),
2078 packet::PktNumSpace::new(),
2079 ],
2080
2081 crypto_ctx: [
2082 packet::CryptoContext::new(),
2083 packet::CryptoContext::new(),
2084 packet::CryptoContext::new(),
2085 ],
2086
2087 next_pkt_num: 0,
2088
2089 pkt_num_manager: packet::PktNumManager::new(),
2090
2091 peer_transport_params: TransportParams::default(),
2092
2093 peer_transport_params_track_unknown: config
2094 .track_unknown_transport_params,
2095
2096 local_transport_params: config.local_transport_params.clone(),
2097
2098 handshake: tls,
2099
2100 session: None,
2101
2102 recovery_config,
2103
2104 paths,
2105 path_challenge_recv_max_queue_len: config
2106 .path_challenge_recv_max_queue_len,
2107 path_challenge_rx_count: 0,
2108
2109 application_protos: config.application_protos.clone(),
2110
2111 recv_count: 0,
2112 sent_count: 0,
2113 lost_count: 0,
2114 spurious_lost_count: 0,
2115 retrans_count: 0,
2116 dgram_sent_count: 0,
2117 dgram_recv_count: 0,
2118 sent_bytes: 0,
2119 recv_bytes: 0,
2120 acked_bytes: 0,
2121 lost_bytes: 0,
2122
2123 rx_data: 0,
2124 flow_control: flowcontrol::FlowControl::new(
2125 max_rx_data,
2126 initial_flow_control_window,
2127 config.max_connection_window,
2128 ),
2129 should_send_max_data: false,
2130 should_send_max_streams_bidi: false,
2131 should_send_max_streams_uni: false,
2132
2133 tx_cap: 0,
2134 tx_cap_factor: config.tx_cap_factor,
2135
2136 tx_buffered: 0,
2137 tx_buffered_state: TxBufferTrackingState::Ok,
2138
2139 tx_data: 0,
2140 max_tx_data: 0,
2141 last_tx_data: 0,
2142
2143 stream_retrans_bytes: 0,
2144
2145 streams: stream::StreamMap::new(
2146 config.local_transport_params.initial_max_streams_bidi,
2147 config.local_transport_params.initial_max_streams_uni,
2148 config.max_stream_window,
2149 ),
2150
2151 odcid: None,
2152
2153 rscid: None,
2154
2155 token: None,
2156
2157 local_error: None,
2158
2159 peer_error: None,
2160
2161 blocked_limit: None,
2162
2163 idle_timer: None,
2164
2165 draining_timer: None,
2166
2167 undecryptable_pkts: VecDeque::new(),
2168
2169 alpn: Vec::new(),
2170
2171 is_server,
2172
2173 derived_initial_secrets: false,
2174
2175 did_version_negotiation: false,
2176
2177 did_retry: false,
2178
2179 got_peer_conn_id: false,
2180
2181 // Assume clients validate the server's address implicitly.
2182 peer_verified_initial_address: is_server,
2183
2184 parsed_peer_transport_params: false,
2185
2186 handshake_completed: false,
2187
2188 handshake_done_sent: false,
2189 handshake_done_acked: false,
2190
2191 handshake_confirmed: false,
2192
2193 key_phase: false,
2194
2195 ack_eliciting_sent: false,
2196
2197 closed: false,
2198
2199 timed_out: false,
2200
2201 grease: config.grease,
2202
2203 enable_send_streams_blocked: config.enable_send_streams_blocked,
2204
2205 keylog: None,
2206
2207 #[cfg(feature = "qlog")]
2208 qlog: Default::default(),
2209
2210 dgram_recv_queue: dgram::DatagramQueue::new(
2211 config.dgram_recv_max_queue_len,
2212 ),
2213
2214 dgram_send_queue: dgram::DatagramQueue::new(
2215 config.dgram_send_max_queue_len,
2216 ),
2217
2218 emit_dgram: true,
2219
2220 disable_dcid_reuse: config.disable_dcid_reuse,
2221
2222 reset_stream_local_count: 0,
2223 stopped_stream_local_count: 0,
2224 reset_stream_remote_count: 0,
2225 stopped_stream_remote_count: 0,
2226
2227 data_blocked_sent_count: 0,
2228 stream_data_blocked_sent_count: 0,
2229 data_blocked_recv_count: 0,
2230 stream_data_blocked_recv_count: 0,
2231
2232 streams_blocked_bidi_recv_count: 0,
2233 streams_blocked_uni_recv_count: 0,
2234
2235 amplification_limited_count: 0,
2236
2237 streams_blocked_bidi_state: Default::default(),
2238 streams_blocked_uni_state: Default::default(),
2239
2240 max_amplification_factor: config.max_amplification_factor,
2241 };
2242 conn.streams.set_use_initial_max_data_as_flow_control_win(
2243 config.use_initial_max_data_as_flow_control_win,
2244 );
2245
2246 if let Some(retry_cids) = retry_cids {
2247 conn.local_transport_params
2248 .original_destination_connection_id =
2249 Some(retry_cids.original_destination_cid.to_vec().into());
2250
2251 conn.local_transport_params.retry_source_connection_id =
2252 Some(retry_cids.retry_source_cid.to_vec().into());
2253
2254 conn.did_retry = true;
2255 }
2256
2257 conn.local_transport_params.initial_source_connection_id =
2258 Some(conn.ids.get_scid(0)?.cid.to_vec().into());
2259
2260 conn.handshake.init(is_server)?;
2261
2262 conn.handshake
2263 .use_legacy_codepoint(config.version != PROTOCOL_VERSION_V1);
2264
2265 conn.encode_transport_params()?;
2266
2267 if !is_server {
2268 let dcid = if let Some(client_dcid) = client_dcid {
2269 // We already had an dcid generated for us, use it.
2270 client_dcid.to_vec()
2271 } else {
2272 // Derive initial secrets for the client. We can do this here
2273 // because we already generated the random
2274 // destination connection ID.
2275 let mut dcid = [0; 16];
2276 rand::rand_bytes(&mut dcid[..]);
2277 dcid.to_vec()
2278 };
2279
2280 let (aead_open, aead_seal) = crypto::derive_initial_key_material(
2281 &dcid,
2282 conn.version,
2283 conn.is_server,
2284 false,
2285 )?;
2286
2287 let reset_token = conn.peer_transport_params.stateless_reset_token;
2288 conn.set_initial_dcid(
2289 dcid.to_vec().into(),
2290 reset_token,
2291 active_path_id,
2292 )?;
2293
2294 conn.crypto_ctx[packet::Epoch::Initial].crypto_open = Some(aead_open);
2295 conn.crypto_ctx[packet::Epoch::Initial].crypto_seal = Some(aead_seal);
2296
2297 conn.derived_initial_secrets = true;
2298 }
2299
2300 Ok(conn)
2301 }
2302
2303 /// Sets keylog output to the designated [`Writer`].
2304 ///
2305 /// This needs to be called as soon as the connection is created, to avoid
2306 /// missing some early logs.
2307 ///
2308 /// [`Writer`]: https://doc.rust-lang.org/std/io/trait.Write.html
2309 #[inline]
2310 pub fn set_keylog(&mut self, writer: Box<dyn std::io::Write + Send + Sync>) {
2311 self.keylog = Some(writer);
2312 }
2313
2314 /// Sets qlog output to the designated [`Writer`].
2315 ///
2316 /// Only events included in `QlogLevel::Base` are written. The serialization
2317 /// format is JSON-SEQ.
2318 ///
2319 /// This needs to be called as soon as the connection is created, to avoid
2320 /// missing some early logs.
2321 ///
2322 /// [`Writer`]: https://doc.rust-lang.org/std/io/trait.Write.html
2323 #[cfg(feature = "qlog")]
2324 #[cfg_attr(docsrs, doc(cfg(feature = "qlog")))]
2325 pub fn set_qlog(
2326 &mut self, writer: Box<dyn std::io::Write + Send + Sync>, title: String,
2327 description: String,
2328 ) {
2329 self.set_qlog_with_level(writer, title, description, QlogLevel::Base)
2330 }
2331
2332 /// Sets qlog output to the designated [`Writer`].
2333 ///
2334 /// Only qlog events included in the specified `QlogLevel` are written. The
2335 /// serialization format is JSON-SEQ.
2336 ///
2337 /// This needs to be called as soon as the connection is created, to avoid
2338 /// missing some early logs.
2339 ///
2340 /// [`Writer`]: https://doc.rust-lang.org/std/io/trait.Write.html
2341 #[cfg(feature = "qlog")]
2342 #[cfg_attr(docsrs, doc(cfg(feature = "qlog")))]
2343 pub fn set_qlog_with_level(
2344 &mut self, writer: Box<dyn std::io::Write + Send + Sync>, title: String,
2345 description: String, qlog_level: QlogLevel,
2346 ) {
2347 use qlog::events::quic::TransportInitiator;
2348 use qlog::events::HTTP3_URI;
2349 use qlog::events::QUIC_URI;
2350 use qlog::CommonFields;
2351 use qlog::ReferenceTime;
2352
2353 let vp = if self.is_server {
2354 qlog::VantagePointType::Server
2355 } else {
2356 qlog::VantagePointType::Client
2357 };
2358
2359 let level = match qlog_level {
2360 QlogLevel::Core => EventImportance::Core,
2361
2362 QlogLevel::Base => EventImportance::Base,
2363
2364 QlogLevel::Extra => EventImportance::Extra,
2365 };
2366
2367 self.qlog.level = level;
2368
2369 // Best effort to get Instant::now() and SystemTime::now() as closely
2370 // together as possible.
2371 let now = Instant::now();
2372 let now_wall_clock = std::time::SystemTime::now();
2373 let common_fields = CommonFields {
2374 reference_time: ReferenceTime::new_monotonic(Some(now_wall_clock)),
2375 ..Default::default()
2376 };
2377 let trace = qlog::TraceSeq::new(
2378 Some(title.to_string()),
2379 Some(description.to_string()),
2380 Some(common_fields),
2381 Some(qlog::VantagePoint {
2382 name: None,
2383 ty: vp,
2384 flow: None,
2385 }),
2386 vec![QUIC_URI.to_string(), HTTP3_URI.to_string()],
2387 );
2388
2389 let mut streamer = qlog::streamer::QlogStreamer::new(
2390 Some(title),
2391 Some(description),
2392 now,
2393 trace,
2394 self.qlog.level,
2395 qlog::streamer::EventTimePrecision::MicroSeconds,
2396 writer,
2397 );
2398
2399 streamer.start_log().ok();
2400
2401 let ev_data = self
2402 .local_transport_params
2403 .to_qlog(TransportInitiator::Local, self.handshake.cipher());
2404
2405 // This event occurs very early, so just mark the relative time as 0.0.
2406 streamer.add_event(Event::with_time(0.0, ev_data)).ok();
2407
2408 self.qlog.streamer = Some(streamer);
2409 }
2410
2411 /// Returns a mutable reference to the QlogStreamer, if it exists.
2412 #[cfg(feature = "qlog")]
2413 #[cfg_attr(docsrs, doc(cfg(feature = "qlog")))]
2414 pub fn qlog_streamer(&mut self) -> Option<&mut qlog::streamer::QlogStreamer> {
2415 self.qlog.streamer.as_mut()
2416 }
2417
2418 /// Configures the given session for resumption.
2419 ///
2420 /// On the client, this can be used to offer the given serialized session,
2421 /// as returned by [`session()`], for resumption.
2422 ///
2423 /// This must only be called immediately after creating a connection, that
2424 /// is, before any packet is sent or received.
2425 ///
2426 /// [`session()`]: struct.Connection.html#method.session
2427 #[inline]
2428 pub fn set_session(&mut self, session: &[u8]) -> Result<()> {
2429 let mut b = octets::Octets::with_slice(session);
2430
2431 let session_len = b.get_u64()? as usize;
2432 let session_bytes = b.get_bytes(session_len)?;
2433
2434 self.handshake.set_session(session_bytes.as_ref())?;
2435
2436 let raw_params_len = b.get_u64()? as usize;
2437 let raw_params_bytes = b.get_bytes(raw_params_len)?;
2438
2439 let peer_params = TransportParams::decode(
2440 raw_params_bytes.as_ref(),
2441 self.is_server,
2442 self.peer_transport_params_track_unknown,
2443 )?;
2444
2445 self.process_peer_transport_params(peer_params)?;
2446
2447 Ok(())
2448 }
2449
2450 /// Sets the `max_idle_timeout` transport parameter, in milliseconds.
2451 ///
2452 /// This must only be called immediately after creating a connection, that
2453 /// is, before any packet is sent or received.
2454 ///
2455 /// The default value is infinite, that is, no timeout is used unless
2456 /// already configured when creating the connection.
2457 pub fn set_max_idle_timeout(&mut self, v: u64) -> Result<()> {
2458 self.local_transport_params.max_idle_timeout =
2459 cmp::min(v, octets::MAX_VAR_INT);
2460
2461 self.encode_transport_params()
2462 }
2463
2464 /// Sets the congestion control algorithm used.
2465 ///
2466 /// This function can only be called inside one of BoringSSL's handshake
2467 /// callbacks, before any packet has been sent. Calling this function any
2468 /// other time will have no effect.
2469 ///
2470 /// See [`Config::set_cc_algorithm()`].
2471 ///
2472 /// [`Config::set_cc_algorithm()`]: struct.Config.html#method.set_cc_algorithm
2473 #[cfg(feature = "boringssl-boring-crate")]
2474 #[cfg_attr(docsrs, doc(cfg(feature = "boringssl-boring-crate")))]
2475 pub fn set_cc_algorithm_in_handshake(
2476 ssl: &mut boring::ssl::SslRef, algo: CongestionControlAlgorithm,
2477 ) -> Result<()> {
2478 let ex_data = tls::ExData::from_ssl_ref(ssl).ok_or(Error::TlsFail)?;
2479
2480 ex_data.recovery_config.cc_algorithm = algo;
2481
2482 Ok(())
2483 }
2484
2485 /// Sets custom BBR settings.
2486 ///
2487 /// This API is experimental and will be removed in the future.
2488 ///
2489 /// Currently this only applies if cc_algorithm is
2490 /// `CongestionControlAlgorithm::Bbr2Gcongestion` is set.
2491 ///
2492 /// This function can only be called inside one of BoringSSL's handshake
2493 /// callbacks, before any packet has been sent. Calling this function any
2494 /// other time will have no effect.
2495 ///
2496 /// See [`Config::set_custom_bbr_settings()`].
2497 ///
2498 /// [`Config::set_custom_bbr_settings()`]: struct.Config.html#method.set_custom_bbr_settings
2499 #[cfg(all(feature = "boringssl-boring-crate", feature = "internal"))]
2500 #[cfg_attr(docsrs, doc(cfg(feature = "boringssl-boring-crate")))]
2501 #[doc(hidden)]
2502 pub fn set_custom_bbr_settings_in_handshake(
2503 ssl: &mut boring::ssl::SslRef, custom_bbr_params: BbrParams,
2504 ) -> Result<()> {
2505 let ex_data = tls::ExData::from_ssl_ref(ssl).ok_or(Error::TlsFail)?;
2506
2507 ex_data.recovery_config.custom_bbr_params = Some(custom_bbr_params);
2508
2509 Ok(())
2510 }
2511
2512 /// Sets the congestion control algorithm used by string.
2513 ///
2514 /// This function can only be called inside one of BoringSSL's handshake
2515 /// callbacks, before any packet has been sent. Calling this function any
2516 /// other time will have no effect.
2517 ///
2518 /// See [`Config::set_cc_algorithm_name()`].
2519 ///
2520 /// [`Config::set_cc_algorithm_name()`]: struct.Config.html#method.set_cc_algorithm_name
2521 #[cfg(feature = "boringssl-boring-crate")]
2522 #[cfg_attr(docsrs, doc(cfg(feature = "boringssl-boring-crate")))]
2523 pub fn set_cc_algorithm_name_in_handshake(
2524 ssl: &mut boring::ssl::SslRef, name: &str,
2525 ) -> Result<()> {
2526 let cc_algo = CongestionControlAlgorithm::from_str(name)?;
2527 Self::set_cc_algorithm_in_handshake(ssl, cc_algo)
2528 }
2529
2530 /// Sets initial congestion window size in terms of packet count.
2531 ///
2532 /// This function can only be called inside one of BoringSSL's handshake
2533 /// callbacks, before any packet has been sent. Calling this function any
2534 /// other time will have no effect.
2535 ///
2536 /// See [`Config::set_initial_congestion_window_packets()`].
2537 ///
2538 /// [`Config::set_initial_congestion_window_packets()`]: struct.Config.html#method.set_initial_congestion_window_packets
2539 #[cfg(feature = "boringssl-boring-crate")]
2540 #[cfg_attr(docsrs, doc(cfg(feature = "boringssl-boring-crate")))]
2541 pub fn set_initial_congestion_window_packets_in_handshake(
2542 ssl: &mut boring::ssl::SslRef, packets: usize,
2543 ) -> Result<()> {
2544 let ex_data = tls::ExData::from_ssl_ref(ssl).ok_or(Error::TlsFail)?;
2545
2546 ex_data.recovery_config.initial_congestion_window_packets = packets;
2547
2548 Ok(())
2549 }
2550
2551 /// Configure whether to enable relaxed loss detection on spurious loss.
2552 ///
2553 /// This function can only be called inside one of BoringSSL's handshake
2554 /// callbacks, before any packet has been sent. Calling this function any
2555 /// other time will have no effect.
2556 ///
2557 /// See [`Config::set_enable_relaxed_loss_threshold()`].
2558 ///
2559 /// [`Config::set_enable_relaxed_loss_threshold()`]: struct.Config.html#method.set_enable_relaxed_loss_threshold
2560 #[cfg(feature = "boringssl-boring-crate")]
2561 #[cfg_attr(docsrs, doc(cfg(feature = "boringssl-boring-crate")))]
2562 pub fn set_enable_relaxed_loss_threshold_in_handshake(
2563 ssl: &mut boring::ssl::SslRef, enable: bool,
2564 ) -> Result<()> {
2565 let ex_data = tls::ExData::from_ssl_ref(ssl).ok_or(Error::TlsFail)?;
2566
2567 ex_data.recovery_config.enable_relaxed_loss_threshold = enable;
2568
2569 Ok(())
2570 }
2571
2572 /// Configure whether to enable the CUBIC idle restart fix.
2573 ///
2574 /// This function can only be called inside one of BoringSSL's handshake
2575 /// callbacks, before any packet has been sent. Calling this function any
2576 /// other time will have no effect.
2577 ///
2578 /// See [`Config::set_enable_cubic_idle_restart_fix()`].
2579 ///
2580 /// [`Config::set_enable_cubic_idle_restart_fix()`]: struct.Config.html#method.set_enable_cubic_idle_restart_fix
2581 #[cfg(feature = "boringssl-boring-crate")]
2582 #[cfg_attr(docsrs, doc(cfg(feature = "boringssl-boring-crate")))]
2583 pub fn set_enable_cubic_idle_restart_fix_in_handshake(
2584 ssl: &mut boring::ssl::SslRef, enable: bool,
2585 ) -> Result<()> {
2586 let ex_data = tls::ExData::from_ssl_ref(ssl).ok_or(Error::TlsFail)?;
2587
2588 ex_data.recovery_config.enable_cubic_idle_restart_fix = enable;
2589
2590 Ok(())
2591 }
2592
2593 /// Configures whether to enable HyStart++.
2594 ///
2595 /// This function can only be called inside one of BoringSSL's handshake
2596 /// callbacks, before any packet has been sent. Calling this function any
2597 /// other time will have no effect.
2598 ///
2599 /// See [`Config::enable_hystart()`].
2600 ///
2601 /// [`Config::enable_hystart()`]: struct.Config.html#method.enable_hystart
2602 #[cfg(feature = "boringssl-boring-crate")]
2603 #[cfg_attr(docsrs, doc(cfg(feature = "boringssl-boring-crate")))]
2604 pub fn set_hystart_in_handshake(
2605 ssl: &mut boring::ssl::SslRef, v: bool,
2606 ) -> Result<()> {
2607 let ex_data = tls::ExData::from_ssl_ref(ssl).ok_or(Error::TlsFail)?;
2608
2609 ex_data.recovery_config.hystart = v;
2610
2611 Ok(())
2612 }
2613
2614 /// Configures whether to enable pacing.
2615 ///
2616 /// This function can only be called inside one of BoringSSL's handshake
2617 /// callbacks, before any packet has been sent. Calling this function any
2618 /// other time will have no effect.
2619 ///
2620 /// See [`Config::enable_pacing()`].
2621 ///
2622 /// [`Config::enable_pacing()`]: struct.Config.html#method.enable_pacing
2623 #[cfg(feature = "boringssl-boring-crate")]
2624 #[cfg_attr(docsrs, doc(cfg(feature = "boringssl-boring-crate")))]
2625 pub fn set_pacing_in_handshake(
2626 ssl: &mut boring::ssl::SslRef, v: bool,
2627 ) -> Result<()> {
2628 let ex_data = tls::ExData::from_ssl_ref(ssl).ok_or(Error::TlsFail)?;
2629
2630 ex_data.recovery_config.pacing = v;
2631
2632 Ok(())
2633 }
2634
2635 /// Sets the max value for pacing rate.
2636 ///
2637 /// This function can only be called inside one of BoringSSL's handshake
2638 /// callbacks, before any packet has been sent. Calling this function any
2639 /// other time will have no effect.
2640 ///
2641 /// See [`Config::set_max_pacing_rate()`].
2642 ///
2643 /// [`Config::set_max_pacing_rate()`]: struct.Config.html#method.set_max_pacing_rate
2644 #[cfg(feature = "boringssl-boring-crate")]
2645 #[cfg_attr(docsrs, doc(cfg(feature = "boringssl-boring-crate")))]
2646 pub fn set_max_pacing_rate_in_handshake(
2647 ssl: &mut boring::ssl::SslRef, v: Option<u64>,
2648 ) -> Result<()> {
2649 let ex_data = tls::ExData::from_ssl_ref(ssl).ok_or(Error::TlsFail)?;
2650
2651 ex_data.recovery_config.max_pacing_rate = v;
2652
2653 Ok(())
2654 }
2655
2656 /// Sets the maximum outgoing UDP payload size.
2657 ///
2658 /// This function can only be called inside one of BoringSSL's handshake
2659 /// callbacks, before any packet has been sent. Calling this function any
2660 /// other time will have no effect.
2661 ///
2662 /// See [`Config::set_max_send_udp_payload_size()`].
2663 ///
2664 /// [`Config::set_max_send_udp_payload_size()`]: struct.Config.html#method.set_max_send_udp_payload_size
2665 #[cfg(feature = "boringssl-boring-crate")]
2666 #[cfg_attr(docsrs, doc(cfg(feature = "boringssl-boring-crate")))]
2667 pub fn set_max_send_udp_payload_size_in_handshake(
2668 ssl: &mut boring::ssl::SslRef, v: usize,
2669 ) -> Result<()> {
2670 let ex_data = tls::ExData::from_ssl_ref(ssl).ok_or(Error::TlsFail)?;
2671
2672 ex_data.recovery_config.max_send_udp_payload_size = v;
2673
2674 Ok(())
2675 }
2676
2677 /// Sets the send capacity factor.
2678 ///
2679 /// This function can only be called inside one of BoringSSL's handshake
2680 /// callbacks, before any packet has been sent. Calling this function any
2681 /// other time will have no effect.
2682 ///
2683 /// See [`Config::set_send_capacity_factor()`].
2684 ///
2685 /// [`Config::set_max_send_udp_payload_size()`]: struct.Config.html#method.set_send_capacity_factor
2686 #[cfg(feature = "boringssl-boring-crate")]
2687 #[cfg_attr(docsrs, doc(cfg(feature = "boringssl-boring-crate")))]
2688 pub fn set_send_capacity_factor_in_handshake(
2689 ssl: &mut boring::ssl::SslRef, v: f64,
2690 ) -> Result<()> {
2691 let ex_data = tls::ExData::from_ssl_ref(ssl).ok_or(Error::TlsFail)?;
2692
2693 ex_data.tx_cap_factor = v;
2694
2695 Ok(())
2696 }
2697
2698 /// Configures whether to do path MTU discovery.
2699 ///
2700 /// This function can only be called inside one of BoringSSL's handshake
2701 /// callbacks, before any packet has been sent. Calling this function any
2702 /// other time will have no effect.
2703 ///
2704 /// See [`Config::discover_pmtu()`].
2705 ///
2706 /// [`Config::discover_pmtu()`]: struct.Config.html#method.discover_pmtu
2707 #[cfg(feature = "boringssl-boring-crate")]
2708 #[cfg_attr(docsrs, doc(cfg(feature = "boringssl-boring-crate")))]
2709 pub fn set_discover_pmtu_in_handshake(
2710 ssl: &mut boring::ssl::SslRef, discover: bool, max_probes: u8,
2711 ) -> Result<()> {
2712 let ex_data = tls::ExData::from_ssl_ref(ssl).ok_or(Error::TlsFail)?;
2713
2714 ex_data.pmtud = Some((discover, max_probes));
2715
2716 Ok(())
2717 }
2718
2719 /// Sets the `max_idle_timeout` transport parameter, in milliseconds.
2720 ///
2721 /// This function can only be called inside one of BoringSSL's handshake
2722 /// callbacks, before any packet has been sent. Calling this function any
2723 /// other time will have no effect.
2724 ///
2725 /// See [`Config::set_max_idle_timeout()`].
2726 ///
2727 /// [`Config::set_max_idle_timeout()`]: struct.Config.html#method.set_max_idle_timeout
2728 #[cfg(feature = "boringssl-boring-crate")]
2729 #[cfg_attr(docsrs, doc(cfg(feature = "boringssl-boring-crate")))]
2730 pub fn set_max_idle_timeout_in_handshake(
2731 ssl: &mut boring::ssl::SslRef, v: u64,
2732 ) -> Result<()> {
2733 let ex_data = tls::ExData::from_ssl_ref(ssl).ok_or(Error::TlsFail)?;
2734
2735 ex_data.local_transport_params.max_idle_timeout = v;
2736
2737 Self::set_transport_parameters_in_hanshake(
2738 ex_data.local_transport_params.clone(),
2739 ex_data.is_server,
2740 ssl,
2741 )
2742 }
2743
2744 /// Sets the `initial_max_streams_bidi` transport parameter.
2745 ///
2746 /// This function can only be called inside one of BoringSSL's handshake
2747 /// callbacks, before any packet has been sent. Calling this function any
2748 /// other time will have no effect.
2749 ///
2750 /// See [`Config::set_initial_max_streams_bidi()`].
2751 ///
2752 /// [`Config::set_initial_max_streams_bidi()`]: struct.Config.html#method.set_initial_max_streams_bidi
2753 #[cfg(feature = "boringssl-boring-crate")]
2754 #[cfg_attr(docsrs, doc(cfg(feature = "boringssl-boring-crate")))]
2755 pub fn set_initial_max_streams_bidi_in_handshake(
2756 ssl: &mut boring::ssl::SslRef, v: u64,
2757 ) -> Result<()> {
2758 let ex_data = tls::ExData::from_ssl_ref(ssl).ok_or(Error::TlsFail)?;
2759
2760 ex_data.local_transport_params.initial_max_streams_bidi = v;
2761
2762 Self::set_transport_parameters_in_hanshake(
2763 ex_data.local_transport_params.clone(),
2764 ex_data.is_server,
2765 ssl,
2766 )
2767 }
2768
2769 #[cfg(feature = "boringssl-boring-crate")]
2770 fn set_transport_parameters_in_hanshake(
2771 params: TransportParams, is_server: bool, ssl: &mut boring::ssl::SslRef,
2772 ) -> Result<()> {
2773 use foreign_types_shared::ForeignTypeRef;
2774
2775 // In order to apply the new parameter to the TLS state before TPs are
2776 // written into a TLS message, we need to re-encode all TPs immediately.
2777 //
2778 // Since we don't have direct access to the main `Connection` object, we
2779 // need to re-create the `Handshake` state from the `SslRef`.
2780 //
2781 // SAFETY: the `Handshake` object must not be drop()ed, otherwise it
2782 // would free the underlying BoringSSL structure.
2783 let mut handshake =
2784 unsafe { tls::Handshake::from_ptr(ssl.as_ptr() as _) };
2785 handshake.set_quic_transport_params(¶ms, is_server)?;
2786
2787 // Avoid running `drop(handshake)` as that would free the underlying
2788 // handshake state.
2789 std::mem::forget(handshake);
2790
2791 Ok(())
2792 }
2793
2794 /// Sets the `use_initial_max_data_as_flow_control_win` flag during SSL
2795 /// handshake.
2796 ///
2797 /// This function can only be called inside one of BoringSSL's handshake
2798 /// callbacks, before any packet has been sent. Calling this function any
2799 /// other time will have no effect.
2800 ///
2801 /// See [`Connection::enable_use_initial_max_data_as_flow_control_win()`].
2802 #[cfg(feature = "boringssl-boring-crate")]
2803 #[cfg_attr(docsrs, doc(cfg(feature = "boringssl-boring-crate")))]
2804 pub fn set_use_initial_max_data_as_flow_control_win_in_handshake(
2805 ssl: &mut boring::ssl::SslRef,
2806 ) -> Result<()> {
2807 let ex_data = tls::ExData::from_ssl_ref(ssl).ok_or(Error::TlsFail)?;
2808
2809 ex_data.use_initial_max_data_as_flow_control_win = true;
2810 Ok(())
2811 }
2812
2813 /// Processes QUIC packets received from the peer.
2814 ///
2815 /// On success the number of bytes processed from the input buffer is
2816 /// returned. On error the connection will be closed by calling [`close()`]
2817 /// with the appropriate error code.
2818 ///
2819 /// Coalesced packets will be processed as necessary.
2820 ///
2821 /// Note that the contents of the input buffer `buf` might be modified by
2822 /// this function due to, for example, in-place decryption.
2823 ///
2824 /// [`close()`]: struct.Connection.html#method.close
2825 ///
2826 /// ## Examples:
2827 ///
2828 /// ```no_run
2829 /// # let mut buf = [0; 512];
2830 /// # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
2831 /// # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
2832 /// # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
2833 /// # let peer = "127.0.0.1:1234".parse().unwrap();
2834 /// # let local = socket.local_addr().unwrap();
2835 /// # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
2836 /// loop {
2837 /// let (read, from) = socket.recv_from(&mut buf).unwrap();
2838 ///
2839 /// let recv_info = quiche::RecvInfo {
2840 /// from,
2841 /// to: local,
2842 /// };
2843 ///
2844 /// let read = match conn.recv(&mut buf[..read], recv_info) {
2845 /// Ok(v) => v,
2846 ///
2847 /// Err(e) => {
2848 /// // An error occurred, handle it.
2849 /// break;
2850 /// },
2851 /// };
2852 /// }
2853 /// # Ok::<(), quiche::Error>(())
2854 /// ```
2855 pub fn recv(&mut self, buf: &mut [u8], info: RecvInfo) -> Result<usize> {
2856 let len = buf.len();
2857
2858 if len == 0 {
2859 return Err(Error::BufferTooShort);
2860 }
2861
2862 let recv_pid = self.paths.path_id_from_addrs(&(info.to, info.from));
2863
2864 if let Some(recv_pid) = recv_pid {
2865 let recv_path = self.paths.get_mut(recv_pid)?;
2866
2867 // Keep track of how many bytes we received from the client, so we
2868 // can limit bytes sent back before address validation, to a
2869 // multiple of this. The limit needs to be increased early on, so
2870 // that if there is an error there is enough credit to send a
2871 // CONNECTION_CLOSE.
2872 //
2873 // It doesn't matter if the packets received were valid or not, we
2874 // only need to track the total amount of bytes received.
2875 //
2876 // Note that we also need to limit the number of bytes we sent on a
2877 // path if we are not the host that initiated its usage.
2878 if self.is_server && !recv_path.verified_peer_address {
2879 recv_path.max_send_bytes += len * self.max_amplification_factor;
2880 }
2881 } else if !self.is_server {
2882 // If a client receives packets from an unknown server address,
2883 // the client MUST discard these packets.
2884 trace!(
2885 "{} client received packet from unknown address {:?}, dropping",
2886 self.trace_id,
2887 info,
2888 );
2889
2890 return Ok(len);
2891 }
2892
2893 let mut done = 0;
2894 let mut left = len;
2895
2896 // Process coalesced packets.
2897 while left > 0 {
2898 let read = match self.recv_single(
2899 &mut buf[len - left..len],
2900 &info,
2901 recv_pid,
2902 ) {
2903 Ok(v) => v,
2904
2905 Err(Error::Done) => {
2906 // If the packet can't be processed or decrypted, check if
2907 // it's a stateless reset.
2908 if self.is_stateless_reset(&buf[len - left..len]) {
2909 trace!("{} packet is a stateless reset", self.trace_id);
2910
2911 self.mark_closed();
2912 }
2913
2914 left
2915 },
2916
2917 Err(e) => {
2918 // In case of error processing the incoming packet, close
2919 // the connection.
2920 self.close(false, e.to_wire(), b"").ok();
2921 return Err(e);
2922 },
2923 };
2924
2925 done += read;
2926 left -= read;
2927 }
2928
2929 // Even though the packet was previously "accepted", it
2930 // should be safe to forward the error, as it also comes
2931 // from the `recv()` method.
2932 self.process_undecrypted_0rtt_packets()?;
2933
2934 Ok(done)
2935 }
2936
2937 fn process_undecrypted_0rtt_packets(&mut self) -> Result<()> {
2938 // Process previously undecryptable 0-RTT packets if the decryption key
2939 // is now available.
2940 if self.crypto_ctx[packet::Epoch::Application]
2941 .crypto_0rtt_open
2942 .is_some()
2943 {
2944 while let Some((mut pkt, info)) = self.undecryptable_pkts.pop_front()
2945 {
2946 if let Err(e) = self.recv(&mut pkt, info) {
2947 self.undecryptable_pkts.clear();
2948
2949 return Err(e);
2950 }
2951 }
2952 }
2953 Ok(())
2954 }
2955
2956 /// Returns true if a QUIC packet is a stateless reset.
2957 fn is_stateless_reset(&self, buf: &[u8]) -> bool {
2958 // If the packet is too small, then we just throw it away.
2959 let buf_len = buf.len();
2960 if buf_len < 21 {
2961 return false;
2962 }
2963
2964 // TODO: we should iterate over all active destination connection IDs
2965 // and check against their reset token.
2966 match self.peer_transport_params.stateless_reset_token {
2967 Some(token) => {
2968 let token_len = 16;
2969
2970 crypto::verify_slices_are_equal(
2971 &token.to_be_bytes(),
2972 &buf[buf_len - token_len..buf_len],
2973 )
2974 .is_ok()
2975 },
2976
2977 None => false,
2978 }
2979 }
2980
2981 /// Processes a single QUIC packet received from the peer.
2982 ///
2983 /// On success the number of bytes processed from the input buffer is
2984 /// returned. When the [`Done`] error is returned, processing of the
2985 /// remainder of the incoming UDP datagram should be interrupted.
2986 ///
2987 /// Note that a server might observe a new 4-tuple, preventing to
2988 /// know in advance to which path the incoming packet belongs to (`recv_pid`
2989 /// is `None`). As a client, packets from unknown 4-tuple are dropped
2990 /// beforehand (see `recv()`).
2991 ///
2992 /// On error, an error other than [`Done`] is returned.
2993 ///
2994 /// [`Done`]: enum.Error.html#variant.Done
2995 fn recv_single(
2996 &mut self, buf: &mut [u8], info: &RecvInfo, recv_pid: Option<usize>,
2997 ) -> Result<usize> {
2998 let now = Instant::now();
2999
3000 if buf.is_empty() {
3001 return Err(Error::Done);
3002 }
3003
3004 if self.is_closed() || self.is_draining() {
3005 return Err(Error::Done);
3006 }
3007
3008 let is_closing = self.local_error.is_some();
3009
3010 if is_closing {
3011 return Err(Error::Done);
3012 }
3013
3014 let buf_len = buf.len();
3015
3016 let mut b = octets::OctetsMut::with_slice(buf);
3017
3018 let mut hdr = Header::from_bytes(&mut b, self.source_id().len())
3019 .map_err(|e| {
3020 drop_pkt_on_err(
3021 e,
3022 self.recv_count,
3023 self.is_server,
3024 &self.trace_id,
3025 )
3026 })?;
3027
3028 if hdr.ty == Type::VersionNegotiation {
3029 // Version negotiation packets can only be sent by the server.
3030 if self.is_server {
3031 return Err(Error::Done);
3032 }
3033
3034 // Ignore duplicate version negotiation.
3035 if self.did_version_negotiation {
3036 return Err(Error::Done);
3037 }
3038
3039 // Ignore version negotiation if any other packet has already been
3040 // successfully processed.
3041 if self.recv_count > 0 {
3042 return Err(Error::Done);
3043 }
3044
3045 if hdr.dcid != self.source_id() {
3046 return Err(Error::Done);
3047 }
3048
3049 if hdr.scid != self.destination_id() {
3050 return Err(Error::Done);
3051 }
3052
3053 trace!("{} rx pkt {:?}", self.trace_id, hdr);
3054
3055 let versions = hdr.versions.ok_or(Error::Done)?;
3056
3057 // Ignore version negotiation if the version already selected is
3058 // listed.
3059 if versions.contains(&self.version) {
3060 return Err(Error::Done);
3061 }
3062
3063 let supported_versions =
3064 versions.iter().filter(|&&v| version_is_supported(v));
3065
3066 let mut found_version = false;
3067
3068 for &v in supported_versions {
3069 found_version = true;
3070
3071 // The final version takes precedence over draft ones.
3072 if v == PROTOCOL_VERSION_V1 {
3073 self.version = v;
3074 break;
3075 }
3076
3077 self.version = cmp::max(self.version, v);
3078 }
3079
3080 if !found_version {
3081 // We don't support any of the versions offered.
3082 //
3083 // While a man-in-the-middle attacker might be able to
3084 // inject a version negotiation packet that triggers this
3085 // failure, the window of opportunity is very small and
3086 // this error is quite useful for debugging, so don't just
3087 // ignore the packet.
3088 return Err(Error::UnknownVersion);
3089 }
3090
3091 self.did_version_negotiation = true;
3092
3093 // Derive Initial secrets based on the new version.
3094 let (aead_open, aead_seal) = crypto::derive_initial_key_material(
3095 &self.destination_id(),
3096 self.version,
3097 self.is_server,
3098 true,
3099 )?;
3100
3101 // Reset connection state to force sending another Initial packet.
3102 self.drop_epoch_state(packet::Epoch::Initial, now);
3103 self.got_peer_conn_id = false;
3104 self.handshake.clear()?;
3105
3106 self.crypto_ctx[packet::Epoch::Initial].crypto_open = Some(aead_open);
3107 self.crypto_ctx[packet::Epoch::Initial].crypto_seal = Some(aead_seal);
3108
3109 self.handshake
3110 .use_legacy_codepoint(self.version != PROTOCOL_VERSION_V1);
3111
3112 // Encode transport parameters again, as the new version might be
3113 // using a different format.
3114 self.encode_transport_params()?;
3115
3116 return Err(Error::Done);
3117 }
3118
3119 if hdr.ty == Type::Retry {
3120 // Retry packets can only be sent by the server.
3121 if self.is_server {
3122 return Err(Error::Done);
3123 }
3124
3125 // Ignore duplicate retry.
3126 if self.did_retry {
3127 return Err(Error::Done);
3128 }
3129
3130 // Check if Retry packet is valid.
3131 if packet::verify_retry_integrity(
3132 &b,
3133 &self.destination_id(),
3134 self.version,
3135 )
3136 .is_err()
3137 {
3138 return Err(Error::Done);
3139 }
3140
3141 trace!("{} rx pkt {:?}", self.trace_id, hdr);
3142
3143 self.token = hdr.token;
3144 self.did_retry = true;
3145
3146 // Remember peer's new connection ID.
3147 self.odcid = Some(self.destination_id().into_owned());
3148
3149 self.set_initial_dcid(
3150 hdr.scid.clone(),
3151 None,
3152 self.paths.get_active_path_id()?,
3153 )?;
3154
3155 self.rscid = Some(self.destination_id().into_owned());
3156
3157 // Derive Initial secrets using the new connection ID.
3158 let (aead_open, aead_seal) = crypto::derive_initial_key_material(
3159 &hdr.scid,
3160 self.version,
3161 self.is_server,
3162 true,
3163 )?;
3164
3165 // Reset connection state to force sending another Initial packet.
3166 self.drop_epoch_state(packet::Epoch::Initial, now);
3167 self.got_peer_conn_id = false;
3168 self.handshake.clear()?;
3169
3170 self.crypto_ctx[packet::Epoch::Initial].crypto_open = Some(aead_open);
3171 self.crypto_ctx[packet::Epoch::Initial].crypto_seal = Some(aead_seal);
3172
3173 return Err(Error::Done);
3174 }
3175
3176 if self.is_server && !self.did_version_negotiation {
3177 if !version_is_supported(hdr.version) {
3178 return Err(Error::UnknownVersion);
3179 }
3180
3181 self.version = hdr.version;
3182 self.did_version_negotiation = true;
3183
3184 self.handshake
3185 .use_legacy_codepoint(self.version != PROTOCOL_VERSION_V1);
3186
3187 // Encode transport parameters again, as the new version might be
3188 // using a different format.
3189 self.encode_transport_params()?;
3190 }
3191
3192 if hdr.ty != Type::Short && hdr.version != self.version {
3193 // At this point version negotiation was already performed, so
3194 // ignore packets that don't match the connection's version.
3195 return Err(Error::Done);
3196 }
3197
3198 // Long header packets have an explicit payload length, but short
3199 // packets don't so just use the remaining capacity in the buffer.
3200 let payload_len = if hdr.ty == Type::Short {
3201 b.cap()
3202 } else {
3203 b.get_varint().map_err(|e| {
3204 drop_pkt_on_err(
3205 e.into(),
3206 self.recv_count,
3207 self.is_server,
3208 &self.trace_id,
3209 )
3210 })? as usize
3211 };
3212
3213 // Make sure the buffer is same or larger than an explicit
3214 // payload length.
3215 if payload_len > b.cap() {
3216 return Err(drop_pkt_on_err(
3217 Error::InvalidPacket,
3218 self.recv_count,
3219 self.is_server,
3220 &self.trace_id,
3221 ));
3222 }
3223
3224 // Derive initial secrets on the server.
3225 if !self.derived_initial_secrets {
3226 let (aead_open, aead_seal) = crypto::derive_initial_key_material(
3227 &hdr.dcid,
3228 self.version,
3229 self.is_server,
3230 false,
3231 )?;
3232
3233 self.crypto_ctx[packet::Epoch::Initial].crypto_open = Some(aead_open);
3234 self.crypto_ctx[packet::Epoch::Initial].crypto_seal = Some(aead_seal);
3235
3236 self.derived_initial_secrets = true;
3237 }
3238
3239 // Select packet number space epoch based on the received packet's type.
3240 let epoch = hdr.ty.to_epoch()?;
3241
3242 // Select AEAD context used to open incoming packet.
3243 let aead = if hdr.ty == Type::ZeroRTT {
3244 // Only use 0-RTT key if incoming packet is 0-RTT.
3245 self.crypto_ctx[epoch].crypto_0rtt_open.as_ref()
3246 } else {
3247 // Otherwise use the packet number space's main key.
3248 self.crypto_ctx[epoch].crypto_open.as_ref()
3249 };
3250
3251 // Finally, discard packet if no usable key is available.
3252 let mut aead = match aead {
3253 Some(v) => v,
3254
3255 None => {
3256 if hdr.ty == Type::ZeroRTT &&
3257 self.undecryptable_pkts.len() < MAX_UNDECRYPTABLE_PACKETS &&
3258 !self.is_established()
3259 {
3260 // Buffer 0-RTT packets when the required read key is not
3261 // available yet, and process them later.
3262 //
3263 // TODO: in the future we might want to buffer other types
3264 // of undecryptable packets as well.
3265 let pkt_len = b.off() + payload_len;
3266 let pkt = (b.buf()[..pkt_len]).to_vec();
3267
3268 self.undecryptable_pkts.push_back((pkt, *info));
3269 return Ok(pkt_len);
3270 }
3271
3272 let e = drop_pkt_on_err(
3273 Error::CryptoFail,
3274 self.recv_count,
3275 self.is_server,
3276 &self.trace_id,
3277 );
3278
3279 return Err(e);
3280 },
3281 };
3282
3283 let aead_tag_len = aead.alg().tag_len();
3284
3285 packet::decrypt_hdr(&mut b, &mut hdr, aead).map_err(|e| {
3286 drop_pkt_on_err(e, self.recv_count, self.is_server, &self.trace_id)
3287 })?;
3288
3289 let pn = packet::decode_pkt_num(
3290 self.pkt_num_spaces[epoch].largest_rx_pkt_num,
3291 hdr.pkt_num,
3292 hdr.pkt_num_len,
3293 );
3294
3295 let pn_len = hdr.pkt_num_len;
3296
3297 trace!(
3298 "{} rx pkt {:?} len={} pn={} {}",
3299 self.trace_id,
3300 hdr,
3301 payload_len,
3302 pn,
3303 AddrTupleFmt(info.from, info.to)
3304 );
3305
3306 #[cfg(feature = "qlog")]
3307 let mut qlog_frames = vec![];
3308
3309 // Check for key update.
3310 let mut aead_next = None;
3311
3312 if self.handshake_confirmed &&
3313 hdr.ty != Type::ZeroRTT &&
3314 hdr.key_phase != self.key_phase
3315 {
3316 // Check if this packet arrived before key update.
3317 if let Some(key_update) = self.crypto_ctx[epoch]
3318 .key_update
3319 .as_ref()
3320 .and_then(|key_update| {
3321 (pn < key_update.pn_on_update).then_some(key_update)
3322 })
3323 {
3324 aead = &key_update.crypto_open;
3325 } else {
3326 trace!("{} peer-initiated key update", self.trace_id);
3327
3328 aead_next = Some((
3329 self.crypto_ctx[epoch]
3330 .crypto_open
3331 .as_ref()
3332 .unwrap()
3333 .derive_next_packet_key()?,
3334 self.crypto_ctx[epoch]
3335 .crypto_seal
3336 .as_ref()
3337 .unwrap()
3338 .derive_next_packet_key()?,
3339 ));
3340
3341 // `aead_next` is always `Some()` at this point, so the `unwrap()`
3342 // will never fail.
3343 aead = &aead_next.as_ref().unwrap().0;
3344 }
3345 }
3346
3347 let mut payload = packet::decrypt_pkt(
3348 &mut b,
3349 pn,
3350 pn_len,
3351 payload_len,
3352 aead,
3353 )
3354 .map_err(|e| {
3355 drop_pkt_on_err(e, self.recv_count, self.is_server, &self.trace_id)
3356 })?;
3357
3358 if self.pkt_num_spaces[epoch].recv_pkt_num.contains(pn) {
3359 trace!("{} ignored duplicate packet {}", self.trace_id, pn);
3360 return Err(Error::Done);
3361 }
3362
3363 // Packets with no frames are invalid.
3364 if payload.cap() == 0 {
3365 return Err(Error::InvalidPacket);
3366 }
3367
3368 // Now that we decrypted the packet, let's see if we can map it to an
3369 // existing path.
3370 let recv_pid = if hdr.ty == Type::Short && self.got_peer_conn_id {
3371 let pkt_dcid = ConnectionId::from_ref(&hdr.dcid);
3372 self.get_or_create_recv_path_id(recv_pid, &pkt_dcid, buf_len, info)?
3373 } else {
3374 // During handshake, we are on the initial path.
3375 self.paths.get_active_path_id()?
3376 };
3377
3378 // The key update is verified once a packet is successfully decrypted
3379 // using the new keys.
3380 if let Some((open_next, seal_next)) = aead_next {
3381 if !self.crypto_ctx[epoch]
3382 .key_update
3383 .as_ref()
3384 .is_none_or(|prev| prev.update_acked)
3385 {
3386 // Peer has updated keys twice without awaiting confirmation.
3387 return Err(Error::KeyUpdate);
3388 }
3389
3390 trace!("{} key update verified", self.trace_id);
3391
3392 let _ = self.crypto_ctx[epoch].crypto_seal.replace(seal_next);
3393
3394 let open_prev = self.crypto_ctx[epoch]
3395 .crypto_open
3396 .replace(open_next)
3397 .unwrap();
3398
3399 let recv_path = self.paths.get_mut(recv_pid)?;
3400
3401 self.crypto_ctx[epoch].key_update = Some(packet::KeyUpdate {
3402 crypto_open: open_prev,
3403 pn_on_update: pn,
3404 update_acked: false,
3405 timer: now + (recv_path.recovery.pto() * 3),
3406 });
3407
3408 self.key_phase = !self.key_phase;
3409
3410 qlog_with_type!(QLOG_PACKET_RX, self.qlog, q, {
3411 let trigger = Some(
3412 qlog::events::quic::KeyUpdateOrRetiredTrigger::RemoteUpdate,
3413 );
3414
3415 let ev_data_client =
3416 EventData::QuicKeyUpdated(qlog::events::quic::KeyUpdated {
3417 key_type: qlog::events::quic::KeyType::Client1RttSecret,
3418 trigger: trigger.clone(),
3419 ..Default::default()
3420 });
3421
3422 q.add_event_data_with_instant(ev_data_client, now).ok();
3423
3424 let ev_data_server =
3425 EventData::QuicKeyUpdated(qlog::events::quic::KeyUpdated {
3426 key_type: qlog::events::quic::KeyType::Server1RttSecret,
3427 trigger,
3428 ..Default::default()
3429 });
3430
3431 q.add_event_data_with_instant(ev_data_server, now).ok();
3432 });
3433 }
3434
3435 if !self.is_server && !self.got_peer_conn_id {
3436 if self.odcid.is_none() {
3437 self.odcid = Some(self.destination_id().into_owned());
3438 }
3439
3440 // Replace the randomly generated destination connection ID with
3441 // the one supplied by the server.
3442 self.set_initial_dcid(
3443 hdr.scid.clone(),
3444 self.peer_transport_params.stateless_reset_token,
3445 recv_pid,
3446 )?;
3447
3448 self.got_peer_conn_id = true;
3449 }
3450
3451 if self.is_server && !self.got_peer_conn_id {
3452 self.set_initial_dcid(hdr.scid.clone(), None, recv_pid)?;
3453
3454 if !self.did_retry {
3455 self.local_transport_params
3456 .original_destination_connection_id =
3457 Some(hdr.dcid.to_vec().into());
3458
3459 self.encode_transport_params()?;
3460 }
3461
3462 self.got_peer_conn_id = true;
3463 }
3464
3465 // To avoid sending an ACK in response to an ACK-only packet, we need
3466 // to keep track of whether this packet contains any frame other than
3467 // ACK and PADDING.
3468 let mut ack_elicited = false;
3469
3470 // Process packet payload. If a frame cannot be processed, store the
3471 // error and stop further packet processing.
3472 let mut frame_processing_err = None;
3473
3474 // To know if the peer migrated the connection, we need to keep track
3475 // whether this is a non-probing packet.
3476 let mut probing = true;
3477
3478 // Process packet payload.
3479 while payload.cap() > 0 {
3480 let frame = frame::Frame::from_bytes(&mut payload, hdr.ty)?;
3481
3482 qlog_with_type!(QLOG_PACKET_RX, self.qlog, _q, {
3483 qlog_frames.push(frame.to_qlog());
3484 });
3485
3486 if frame.ack_eliciting() {
3487 ack_elicited = true;
3488 }
3489
3490 if !frame.probing() {
3491 probing = false;
3492 }
3493
3494 if let Err(e) = self.process_frame(frame, &hdr, recv_pid, epoch, now)
3495 {
3496 frame_processing_err = Some(e);
3497 break;
3498 }
3499 }
3500
3501 qlog_with_type!(QLOG_PACKET_RX, self.qlog, q, {
3502 let packet_size = b.len();
3503
3504 let qlog_pkt_hdr = qlog::events::quic::PacketHeader::with_type(
3505 hdr.ty.to_qlog(),
3506 Some(pn),
3507 Some(hdr.version),
3508 Some(&hdr.scid),
3509 Some(&hdr.dcid),
3510 );
3511
3512 let qlog_raw_info = RawInfo {
3513 length: Some(packet_size as u64),
3514 payload_length: Some(payload_len as u64),
3515 data: None,
3516 };
3517
3518 let ev_data = EventData::QuicPacketReceived(
3519 qlog::events::quic::PacketReceived {
3520 header: qlog_pkt_hdr,
3521 frames: Some(qlog_frames),
3522 raw: Some(qlog_raw_info),
3523 ..Default::default()
3524 },
3525 );
3526
3527 q.add_event_data_with_instant(ev_data, now).ok();
3528 });
3529
3530 qlog_with_type!(QLOG_METRICS, self.qlog, q, {
3531 let recv_path = self.paths.get_mut(recv_pid)?;
3532 recv_path.recovery.maybe_qlog(q, now);
3533 });
3534
3535 if let Some(e) = frame_processing_err {
3536 // Any frame error is terminal, so now just return.
3537 return Err(e);
3538 }
3539
3540 // Only log the remote transport parameters once the connection is
3541 // established (i.e. after frames have been fully parsed) and only
3542 // once per connection.
3543 if self.is_established() {
3544 qlog_with_type!(QLOG_PARAMS_SET, self.qlog, q, {
3545 if !self.qlog.logged_peer_params {
3546 let ev_data = self.peer_transport_params.to_qlog(
3547 TransportInitiator::Remote,
3548 self.handshake.cipher(),
3549 );
3550
3551 q.add_event_data_with_instant(ev_data, now).ok();
3552
3553 self.qlog.logged_peer_params = true;
3554 }
3555 });
3556 }
3557
3558 // Process acked frames. Note that several packets from several paths
3559 // might have been acked by the received packet.
3560 for (_, p) in self.paths.iter_mut() {
3561 while let Some(acked) = p.recovery.next_acked_frame(epoch) {
3562 match acked {
3563 frame::Frame::Ping {
3564 mtu_probe: Some(mtu_probe),
3565 } => {
3566 if let Some(pmtud) = p.pmtud.as_mut() {
3567 trace!(
3568 "{} pmtud probe acked; probe size {:?}",
3569 self.trace_id,
3570 mtu_probe
3571 );
3572
3573 // Ensure the probe is within the supported MTU range
3574 // before updating the max datagram size
3575 if let Some(current_mtu) =
3576 pmtud.successful_probe(mtu_probe)
3577 {
3578 qlog_with_type!(
3579 EventType::QuicEventType(
3580 QuicEventType::MtuUpdated
3581 ),
3582 self.qlog,
3583 q,
3584 {
3585 let pmtu_data = EventData::QuicMtuUpdated(
3586 qlog::events::quic::MtuUpdated {
3587 old: Some(
3588 p.recovery.max_datagram_size()
3589 as u32,
3590 ),
3591 new: current_mtu as u32,
3592 done: Some(true),
3593 },
3594 );
3595
3596 q.add_event_data_with_instant(
3597 pmtu_data, now,
3598 )
3599 .ok();
3600 }
3601 );
3602
3603 p.recovery
3604 .pmtud_update_max_datagram_size(current_mtu);
3605 }
3606 }
3607 },
3608
3609 frame::Frame::ACK { ranges, .. } => {
3610 // Stop acknowledging packets less than or equal to the
3611 // largest acknowledged in the sent ACK frame that, in
3612 // turn, got acked.
3613 if let Some(largest_acked) = ranges.last() {
3614 self.pkt_num_spaces[epoch]
3615 .recv_pkt_need_ack
3616 .remove_until(largest_acked);
3617 }
3618 },
3619
3620 frame::Frame::CryptoHeader { offset, length } => {
3621 self.crypto_ctx[epoch]
3622 .crypto_stream
3623 .send
3624 .ack_and_drop(offset, length);
3625 },
3626
3627 frame::Frame::StreamHeader {
3628 stream_id,
3629 offset,
3630 length,
3631 ..
3632 } => {
3633 // Update tx_buffered and emit qlog before checking if the
3634 // stream still exists. The client does need to ACK
3635 // frames that were received after the client sends a
3636 // ResetStream.
3637 self.tx_buffered =
3638 self.tx_buffered.saturating_sub(length);
3639
3640 qlog_with_type!(QLOG_DATA_MV, self.qlog, q, {
3641 let ev_data = EventData::QuicStreamDataMoved(
3642 qlog::events::quic::StreamDataMoved {
3643 stream_id: Some(stream_id),
3644 offset: Some(offset),
3645 raw: Some(RawInfo {
3646 length: Some(length as u64),
3647 ..Default::default()
3648 }),
3649 from: Some(DataRecipient::Transport),
3650 to: Some(DataRecipient::Dropped),
3651 ..Default::default()
3652 },
3653 );
3654
3655 q.add_event_data_with_instant(ev_data, now).ok();
3656 });
3657
3658 let stream = match self.streams.get_mut(stream_id) {
3659 Some(v) => v,
3660
3661 None => continue,
3662 };
3663
3664 stream.send.ack_and_drop(offset, length);
3665
3666 let priority_key = Arc::clone(&stream.priority_key);
3667
3668 // Only collect the stream if it is complete and not
3669 // readable or writable.
3670 //
3671 // If it is readable, it will get collected when
3672 // stream_recv() is next used.
3673 //
3674 // If it is writable, it might mean that the stream
3675 // has been stopped by the peer (i.e. a STOP_SENDING
3676 // frame is received), in which case before collecting
3677 // the stream we will need to propagate the
3678 // `StreamStopped` error to the application. It will
3679 // instead get collected when one of stream_capacity(),
3680 // stream_writable(), stream_send(), ... is next called.
3681 //
3682 // Note that we can't use `is_writable()` here because
3683 // it returns false if the stream is stopped. Instead,
3684 // since the stream is marked as writable when a
3685 // STOP_SENDING frame is received, we check the writable
3686 // queue directly instead.
3687 let is_writable = priority_key.writable.is_linked() &&
3688 // Ensure that the stream is actually stopped.
3689 stream.send.is_stopped();
3690
3691 let is_complete = stream.is_complete();
3692 let is_readable = stream.is_readable();
3693
3694 if is_complete && !is_readable && !is_writable {
3695 let local = stream.local;
3696 self.streams.collect(stream_id, local);
3697 }
3698 },
3699
3700 frame::Frame::HandshakeDone => {
3701 // Explicitly set this to true, so that if the frame was
3702 // already scheduled for retransmission, it is aborted.
3703 self.handshake_done_sent = true;
3704
3705 self.handshake_done_acked = true;
3706 },
3707
3708 frame::Frame::ResetStream { stream_id, .. } => {
3709 let stream = match self.streams.get_mut(stream_id) {
3710 Some(v) => v,
3711
3712 None => continue,
3713 };
3714
3715 let priority_key = Arc::clone(&stream.priority_key);
3716
3717 // Only collect the stream if it is complete and not
3718 // readable or writable.
3719 //
3720 // If it is readable, it will get collected when
3721 // stream_recv() is next used.
3722 //
3723 // If it is writable, it might mean that the stream
3724 // has been stopped by the peer (i.e. a STOP_SENDING
3725 // frame is received), in which case before collecting
3726 // the stream we will need to propagate the
3727 // `StreamStopped` error to the application. It will
3728 // instead get collected when one of stream_capacity(),
3729 // stream_writable(), stream_send(), ... is next called.
3730 //
3731 // Note that we can't use `is_writable()` here because
3732 // it returns false if the stream is stopped. Instead,
3733 // since the stream is marked as writable when a
3734 // STOP_SENDING frame is received, we check the writable
3735 // queue directly instead.
3736 let is_writable = priority_key.writable.is_linked() &&
3737 // Ensure that the stream is actually stopped.
3738 stream.send.is_stopped();
3739
3740 let is_complete = stream.is_complete();
3741 let is_readable = stream.is_readable();
3742
3743 if is_complete && !is_readable && !is_writable {
3744 let local = stream.local;
3745 self.streams.collect(stream_id, local);
3746 }
3747 },
3748
3749 _ => (),
3750 }
3751 }
3752 }
3753
3754 // Now that we processed all the frames, if there is a path that has no
3755 // Destination CID, try to allocate one.
3756 let no_dcid = self
3757 .paths
3758 .iter_mut()
3759 .filter(|(_, p)| p.active_dcid_seq.is_none());
3760
3761 for (pid, p) in no_dcid {
3762 if self.ids.zero_length_dcid() {
3763 p.active_dcid_seq = Some(0);
3764 continue;
3765 }
3766
3767 let dcid_seq = match self.ids.lowest_available_dcid_seq() {
3768 Some(seq) => seq,
3769 None => break,
3770 };
3771
3772 self.ids.link_dcid_to_path_id(dcid_seq, pid)?;
3773
3774 p.active_dcid_seq = Some(dcid_seq);
3775 }
3776
3777 // We only record the time of arrival of the largest packet number
3778 // that still needs to be acked, to be used for ACK delay calculation.
3779 if self.pkt_num_spaces[epoch].recv_pkt_need_ack.last() < Some(pn) {
3780 self.pkt_num_spaces[epoch].largest_rx_pkt_time = now;
3781 }
3782
3783 self.pkt_num_spaces[epoch].recv_pkt_num.insert(pn);
3784
3785 self.pkt_num_spaces[epoch].recv_pkt_need_ack.push_item(pn);
3786
3787 self.pkt_num_spaces[epoch].ack_elicited =
3788 cmp::max(self.pkt_num_spaces[epoch].ack_elicited, ack_elicited);
3789
3790 self.pkt_num_spaces[epoch].largest_rx_pkt_num =
3791 cmp::max(self.pkt_num_spaces[epoch].largest_rx_pkt_num, pn);
3792
3793 if !probing {
3794 self.pkt_num_spaces[epoch].largest_rx_non_probing_pkt_num = cmp::max(
3795 self.pkt_num_spaces[epoch].largest_rx_non_probing_pkt_num,
3796 pn,
3797 );
3798
3799 // Did the peer migrated to another path?
3800 let active_path_id = self.paths.get_active_path_id()?;
3801
3802 if self.is_server &&
3803 recv_pid != active_path_id &&
3804 self.pkt_num_spaces[epoch].largest_rx_non_probing_pkt_num == pn
3805 {
3806 self.on_peer_migrated(recv_pid, self.disable_dcid_reuse, now)?;
3807 }
3808 }
3809
3810 if let Some(idle_timeout) = self.idle_timeout() {
3811 self.idle_timer = Some(now + idle_timeout);
3812 }
3813
3814 // Update send capacity.
3815 self.update_tx_cap();
3816
3817 self.recv_count += 1;
3818 self.paths.get_mut(recv_pid)?.recv_count += 1;
3819
3820 let read = b.off() + aead_tag_len;
3821
3822 self.recv_bytes += read as u64;
3823 self.paths.get_mut(recv_pid)?.recv_bytes += read as u64;
3824
3825 // An Handshake packet has been received from the client and has been
3826 // successfully processed, so we can drop the initial state and consider
3827 // the client's address to be verified.
3828 if self.is_server && hdr.ty == Type::Handshake {
3829 self.drop_epoch_state(packet::Epoch::Initial, now);
3830
3831 self.paths.get_mut(recv_pid)?.verified_peer_address = true;
3832 }
3833
3834 self.ack_eliciting_sent = false;
3835
3836 Ok(read)
3837 }
3838
3839 /// Writes a single QUIC packet to be sent to the peer.
3840 ///
3841 /// On success the number of bytes written to the output buffer is
3842 /// returned, or [`Done`] if there was nothing to write.
3843 ///
3844 /// The application should call `send()` multiple times until [`Done`] is
3845 /// returned, indicating that there are no more packets to send. It is
3846 /// recommended that `send()` be called in the following cases:
3847 ///
3848 /// * When the application receives QUIC packets from the peer (that is,
3849 /// any time [`recv()`] is also called).
3850 ///
3851 /// * When the connection timer expires (that is, any time [`on_timeout()`]
3852 /// is also called).
3853 ///
3854 /// * When the application sends data to the peer (for example, any time
3855 /// [`stream_send()`] or [`stream_shutdown()`] are called).
3856 ///
3857 /// * When the application receives data from the peer (for example any
3858 /// time [`stream_recv()`] is called).
3859 ///
3860 /// Once [`is_draining()`] returns `true`, it is no longer necessary to call
3861 /// `send()` and all calls will return [`Done`].
3862 ///
3863 /// [`Done`]: enum.Error.html#variant.Done
3864 /// [`recv()`]: struct.Connection.html#method.recv
3865 /// [`on_timeout()`]: struct.Connection.html#method.on_timeout
3866 /// [`stream_send()`]: struct.Connection.html#method.stream_send
3867 /// [`stream_shutdown()`]: struct.Connection.html#method.stream_shutdown
3868 /// [`stream_recv()`]: struct.Connection.html#method.stream_recv
3869 /// [`is_draining()`]: struct.Connection.html#method.is_draining
3870 ///
3871 /// ## Examples:
3872 ///
3873 /// ```no_run
3874 /// # let mut out = [0; 512];
3875 /// # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
3876 /// # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
3877 /// # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
3878 /// # let peer = "127.0.0.1:1234".parse().unwrap();
3879 /// # let local = socket.local_addr().unwrap();
3880 /// # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
3881 /// loop {
3882 /// let (write, send_info) = match conn.send(&mut out) {
3883 /// Ok(v) => v,
3884 ///
3885 /// Err(quiche::Error::Done) => {
3886 /// // Done writing.
3887 /// break;
3888 /// },
3889 ///
3890 /// Err(e) => {
3891 /// // An error occurred, handle it.
3892 /// break;
3893 /// },
3894 /// };
3895 ///
3896 /// socket.send_to(&out[..write], &send_info.to).unwrap();
3897 /// }
3898 /// # Ok::<(), quiche::Error>(())
3899 /// ```
3900 pub fn send(&mut self, out: &mut [u8]) -> Result<(usize, SendInfo)> {
3901 self.send_on_path(out, None, None)
3902 }
3903
3904 /// Writes a single QUIC packet to be sent to the peer from the specified
3905 /// local address `from` to the destination address `to`.
3906 ///
3907 /// The behavior of this method differs depending on the value of the `from`
3908 /// and `to` parameters:
3909 ///
3910 /// * If both are `Some`, then the method only consider the 4-tuple
3911 /// (`from`, `to`). Application can monitor the 4-tuple availability,
3912 /// either by monitoring [`path_event_next()`] events or by relying on
3913 /// the [`paths_iter()`] method. If the provided 4-tuple does not exist
3914 /// on the connection (anymore), it returns an [`InvalidState`].
3915 ///
3916 /// * If `from` is `Some` and `to` is `None`, then the method only
3917 /// considers sending packets on paths having `from` as local address.
3918 ///
3919 /// * If `to` is `Some` and `from` is `None`, then the method only
3920 /// considers sending packets on paths having `to` as peer address.
3921 ///
3922 /// * If both are `None`, all available paths are considered.
3923 ///
3924 /// On success the number of bytes written to the output buffer is
3925 /// returned, or [`Done`] if there was nothing to write.
3926 ///
3927 /// The application should call `send_on_path()` multiple times until
3928 /// [`Done`] is returned, indicating that there are no more packets to
3929 /// send. It is recommended that `send_on_path()` be called in the
3930 /// following cases:
3931 ///
3932 /// * When the application receives QUIC packets from the peer (that is,
3933 /// any time [`recv()`] is also called).
3934 ///
3935 /// * When the connection timer expires (that is, any time [`on_timeout()`]
3936 /// is also called).
3937 ///
3938 /// * When the application sends data to the peer (for examples, any time
3939 /// [`stream_send()`] or [`stream_shutdown()`] are called).
3940 ///
3941 /// * When the application receives data from the peer (for example any
3942 /// time [`stream_recv()`] is called).
3943 ///
3944 /// Once [`is_draining()`] returns `true`, it is no longer necessary to call
3945 /// `send_on_path()` and all calls will return [`Done`].
3946 ///
3947 /// [`Done`]: enum.Error.html#variant.Done
3948 /// [`InvalidState`]: enum.Error.html#InvalidState
3949 /// [`recv()`]: struct.Connection.html#method.recv
3950 /// [`on_timeout()`]: struct.Connection.html#method.on_timeout
3951 /// [`stream_send()`]: struct.Connection.html#method.stream_send
3952 /// [`stream_shutdown()`]: struct.Connection.html#method.stream_shutdown
3953 /// [`stream_recv()`]: struct.Connection.html#method.stream_recv
3954 /// [`path_event_next()`]: struct.Connection.html#method.path_event_next
3955 /// [`paths_iter()`]: struct.Connection.html#method.paths_iter
3956 /// [`is_draining()`]: struct.Connection.html#method.is_draining
3957 ///
3958 /// ## Examples:
3959 ///
3960 /// ```no_run
3961 /// # let mut out = [0; 512];
3962 /// # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
3963 /// # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
3964 /// # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
3965 /// # let peer = "127.0.0.1:1234".parse().unwrap();
3966 /// # let local = socket.local_addr().unwrap();
3967 /// # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
3968 /// loop {
3969 /// let (write, send_info) = match conn.send_on_path(&mut out, Some(local), Some(peer)) {
3970 /// Ok(v) => v,
3971 ///
3972 /// Err(quiche::Error::Done) => {
3973 /// // Done writing.
3974 /// break;
3975 /// },
3976 ///
3977 /// Err(e) => {
3978 /// // An error occurred, handle it.
3979 /// break;
3980 /// },
3981 /// };
3982 ///
3983 /// socket.send_to(&out[..write], &send_info.to).unwrap();
3984 /// }
3985 /// # Ok::<(), quiche::Error>(())
3986 /// ```
3987 pub fn send_on_path(
3988 &mut self, out: &mut [u8], from: Option<SocketAddr>,
3989 to: Option<SocketAddr>,
3990 ) -> Result<(usize, SendInfo)> {
3991 if out.is_empty() {
3992 return Err(Error::BufferTooShort);
3993 }
3994
3995 if self.is_closed() || self.is_draining() {
3996 return Err(Error::Done);
3997 }
3998
3999 let now = Instant::now();
4000
4001 if self.local_error.is_none() {
4002 self.do_handshake(now)?;
4003 }
4004
4005 // Forwarding the error value here could confuse
4006 // applications, as they may not expect getting a `recv()`
4007 // error when calling `send()`.
4008 //
4009 // We simply fall-through to sending packets, which should
4010 // take care of terminating the connection as needed.
4011 let _ = self.process_undecrypted_0rtt_packets();
4012
4013 // There's no point in trying to send a packet if the Initial secrets
4014 // have not been derived yet, so return early.
4015 if !self.derived_initial_secrets {
4016 return Err(Error::Done);
4017 }
4018
4019 let mut has_initial = false;
4020
4021 let mut done = 0;
4022
4023 // Limit output packet size to respect the sender and receiver's
4024 // maximum UDP payload size limit.
4025 let mut left = cmp::min(out.len(), self.max_send_udp_payload_size());
4026
4027 let send_pid = match (from, to) {
4028 (Some(f), Some(t)) => self
4029 .paths
4030 .path_id_from_addrs(&(f, t))
4031 .ok_or(Error::InvalidState)?,
4032
4033 _ => self.get_send_path_id(from, to)?,
4034 };
4035
4036 let send_path = self.paths.get_mut(send_pid)?;
4037
4038 // Update max datagram size to allow path MTU discovery probe to be sent.
4039 if let Some(pmtud) = send_path.pmtud.as_mut() {
4040 if pmtud.should_probe() {
4041 let size = if self.handshake_confirmed || self.handshake_completed
4042 {
4043 pmtud.get_probe_size()
4044 } else {
4045 pmtud.get_current_mtu()
4046 };
4047
4048 send_path.recovery.pmtud_update_max_datagram_size(size);
4049
4050 left =
4051 cmp::min(out.len(), send_path.recovery.max_datagram_size());
4052 }
4053 }
4054
4055 // Limit data sent by the server based on the amount of data received
4056 // from the client before its address is validated.
4057 if !send_path.verified_peer_address && self.is_server {
4058 left = cmp::min(left, send_path.max_send_bytes);
4059 }
4060
4061 // Generate coalesced packets.
4062 while left > 0 {
4063 let (ty, written) = match self.send_single(
4064 &mut out[done..done + left],
4065 send_pid,
4066 has_initial,
4067 now,
4068 ) {
4069 Ok(v) => v,
4070
4071 Err(Error::BufferTooShort) | Err(Error::Done) => break,
4072
4073 Err(e) => return Err(e),
4074 };
4075
4076 done += written;
4077 left -= written;
4078
4079 match ty {
4080 Type::Initial => has_initial = true,
4081
4082 // No more packets can be coalesced after a 1-RTT.
4083 Type::Short => break,
4084
4085 _ => (),
4086 };
4087
4088 // When sending multiple PTO probes, don't coalesce them together,
4089 // so they are sent on separate UDP datagrams.
4090 if let Ok(epoch) = ty.to_epoch() {
4091 if self.paths.get_mut(send_pid)?.recovery.loss_probes(epoch) > 0 {
4092 break;
4093 }
4094 }
4095
4096 // Don't coalesce packets that must go on different paths.
4097 if !(from.is_some() && to.is_some()) &&
4098 self.get_send_path_id(from, to)? != send_pid
4099 {
4100 break;
4101 }
4102 }
4103
4104 if done == 0 {
4105 self.last_tx_data = self.tx_data;
4106
4107 return Err(Error::Done);
4108 }
4109
4110 if has_initial && left > 0 && done < MIN_CLIENT_INITIAL_LEN {
4111 let pad_len = cmp::min(left, MIN_CLIENT_INITIAL_LEN - done);
4112
4113 // Fill padding area with null bytes, to avoid leaking information
4114 // in case the application reuses the packet buffer.
4115 out[done..done + pad_len].fill(0);
4116
4117 done += pad_len;
4118 }
4119
4120 let send_path = self.paths.get(send_pid)?;
4121
4122 let info = SendInfo {
4123 from: send_path.local_addr(),
4124 to: send_path.peer_addr(),
4125
4126 at: send_path.recovery.get_packet_send_time(now),
4127 };
4128
4129 Ok((done, info))
4130 }
4131
4132 fn send_single(
4133 &mut self, out: &mut [u8], send_pid: usize, has_initial: bool,
4134 now: Instant,
4135 ) -> Result<(Type, usize)> {
4136 if out.is_empty() {
4137 return Err(Error::BufferTooShort);
4138 }
4139
4140 if self.is_draining() {
4141 return Err(Error::Done);
4142 }
4143
4144 let is_closing = self.local_error.is_some();
4145
4146 let out_len = out.len();
4147
4148 let mut b = octets::OctetsMut::with_slice(out);
4149
4150 let pkt_type = self.write_pkt_type(send_pid)?;
4151
4152 let max_dgram_len = if !self.dgram_send_queue.is_empty() {
4153 self.dgram_max_writable_len()
4154 } else {
4155 None
4156 };
4157
4158 let epoch = pkt_type.to_epoch()?;
4159 let pkt_space = &mut self.pkt_num_spaces[epoch];
4160 let crypto_ctx = &mut self.crypto_ctx[epoch];
4161
4162 // Process lost frames. There might be several paths having lost frames.
4163 for (_, p) in self.paths.iter_mut() {
4164 while let Some(lost) = p.recovery.next_lost_frame(epoch) {
4165 match lost {
4166 frame::Frame::CryptoHeader { offset, length } => {
4167 crypto_ctx.crypto_stream.send.retransmit(offset, length);
4168
4169 self.stream_retrans_bytes += length as u64;
4170 p.stream_retrans_bytes += length as u64;
4171
4172 self.retrans_count += 1;
4173 p.retrans_count += 1;
4174 },
4175
4176 frame::Frame::StreamHeader {
4177 stream_id,
4178 offset,
4179 length,
4180 fin,
4181 } => {
4182 let stream = match self.streams.get_mut(stream_id) {
4183 // Only retransmit data if the stream is not closed
4184 // or stopped.
4185 Some(v) if !v.send.is_stopped() => v,
4186
4187 // Data on a closed stream will not be retransmitted
4188 // or acked after it is declared lost, so update
4189 // tx_buffered and qlog.
4190 _ => {
4191 self.tx_buffered =
4192 self.tx_buffered.saturating_sub(length);
4193
4194 qlog_with_type!(QLOG_DATA_MV, self.qlog, q, {
4195 let ev_data = EventData::QuicStreamDataMoved(
4196 qlog::events::quic::StreamDataMoved {
4197 stream_id: Some(stream_id),
4198 offset: Some(offset),
4199 raw: Some(RawInfo {
4200 length: Some(length as u64),
4201 ..Default::default()
4202 }),
4203 from: Some(DataRecipient::Transport),
4204 to: Some(DataRecipient::Dropped),
4205 ..Default::default()
4206 },
4207 );
4208
4209 q.add_event_data_with_instant(ev_data, now)
4210 .ok();
4211 });
4212
4213 continue;
4214 },
4215 };
4216
4217 let was_flushable = stream.is_flushable();
4218
4219 let empty_fin = length == 0 && fin;
4220
4221 stream.send.retransmit(offset, length);
4222
4223 // If the stream is now flushable push it to the
4224 // flushable queue, but only if it wasn't already
4225 // queued.
4226 //
4227 // Consider the stream flushable also when we are
4228 // sending a zero-length frame that has the fin flag
4229 // set.
4230 if (stream.is_flushable() || empty_fin) && !was_flushable
4231 {
4232 let priority_key = Arc::clone(&stream.priority_key);
4233 self.streams.insert_flushable(&priority_key);
4234 }
4235
4236 self.stream_retrans_bytes += length as u64;
4237 p.stream_retrans_bytes += length as u64;
4238
4239 self.retrans_count += 1;
4240 p.retrans_count += 1;
4241 },
4242
4243 frame::Frame::ACK { .. } => {
4244 pkt_space.ack_elicited = true;
4245 },
4246
4247 frame::Frame::ResetStream {
4248 stream_id,
4249 error_code,
4250 final_size,
4251 } => {
4252 self.streams
4253 .insert_reset(stream_id, error_code, final_size);
4254 },
4255
4256 frame::Frame::StopSending {
4257 stream_id,
4258 error_code,
4259 } =>
4260 // We only need to retransmit the STOP_SENDING frame if
4261 // the stream is still active and not FIN'd. Even if the
4262 // packet was lost, if the application has the final
4263 // size at this point there is no need to retransmit.
4264 if let Some(stream) = self.streams.get(stream_id) {
4265 if !stream.recv.is_fin() {
4266 self.streams
4267 .insert_stopped(stream_id, error_code);
4268 }
4269 },
4270
4271 // Retransmit HANDSHAKE_DONE only if it hasn't been acked at
4272 // least once already.
4273 frame::Frame::HandshakeDone =>
4274 if !self.handshake_done_acked {
4275 self.handshake_done_sent = false;
4276 },
4277
4278 frame::Frame::MaxStreamData { stream_id, .. } => {
4279 if self.streams.get(stream_id).is_some() {
4280 self.streams.insert_almost_full(stream_id);
4281 }
4282 },
4283
4284 frame::Frame::MaxData { .. } => {
4285 self.should_send_max_data = true;
4286 },
4287
4288 frame::Frame::MaxStreamsUni { .. } => {
4289 self.should_send_max_streams_uni = true;
4290 },
4291
4292 frame::Frame::MaxStreamsBidi { .. } => {
4293 self.should_send_max_streams_bidi = true;
4294 },
4295
4296 // Retransmit STREAMS_BLOCKED frames if the frame with the
4297 // most recent limit is lost. These are informational
4298 // signals to the peer, reliably sending them
4299 // ensures the signal is used consistently and helps
4300 // debugging.
4301 frame::Frame::StreamsBlockedBidi { limit } => {
4302 self.streams_blocked_bidi_state
4303 .force_retransmit_sent_limit_eq(limit);
4304 },
4305
4306 frame::Frame::StreamsBlockedUni { limit } => {
4307 self.streams_blocked_uni_state
4308 .force_retransmit_sent_limit_eq(limit);
4309 },
4310
4311 frame::Frame::NewConnectionId { seq_num, .. } => {
4312 self.ids.mark_advertise_new_scid_seq(seq_num, true);
4313 },
4314
4315 frame::Frame::RetireConnectionId { seq_num } => {
4316 self.ids.mark_retire_dcid_seq(seq_num, true)?;
4317 },
4318
4319 frame::Frame::Ping { mtu_probe } => {
4320 // Ping frames are not retransmitted.
4321 if let Some(failed_probe) = mtu_probe {
4322 if let Some(pmtud) = p.pmtud.as_mut() {
4323 trace!("pmtud probe dropped: {failed_probe}");
4324 pmtud.failed_probe(failed_probe);
4325 }
4326 }
4327 },
4328
4329 // Sent as StreamHeader frames. Stream frames are never
4330 // generated by quiche.
4331 frame::Frame::Stream { .. } => {
4332 debug_panic!(
4333 "Unexpected frame lost: Stream. quiche should \
4334 have tracked retransmittable stream data as \
4335 StreamHeader frames."
4336 );
4337 },
4338
4339 // Sent as CryptoHeader frames. Crypto frames are never
4340 // generated by quiche.
4341 frame::Frame::Crypto { .. } => {
4342 debug_panic!(
4343 "Unexpected frame lost: Crypto. quiche should \
4344 have tracked retransmittable crypto data as \
4345 CryptoHeader frames."
4346 );
4347 },
4348
4349 // NewToken frames are never sent by quiche; they are not
4350 // implemented.
4351 frame::Frame::NewToken { .. } => {
4352 debug_panic!(
4353 "Unexpected frame lost: NewToken. quiche used to \
4354 not implement NewToken frames, retransmission of \
4355 these frames is not implemented."
4356 );
4357 },
4358
4359 // Data blocked frames are an optional advisory
4360 // signal. We choose to not retransmit them to
4361 // avoid unnecessary network usage.
4362 frame::Frame::DataBlocked { .. } |
4363 frame::Frame::StreamDataBlocked { .. } => (),
4364
4365 // Path challenge and response have their own
4366 // retry logic. They should not be retransmitted
4367 // normally since according to RFC 9000 Section
4368 // 8.2.2: "An endpoint MUST NOT send more than one
4369 // PATH_RESPONSE frame in response to one
4370 // PATH_CHALLENGE frame".
4371 frame::Frame::PathChallenge { .. } |
4372 frame::Frame::PathResponse { .. } => (),
4373
4374 // From RFC 9000 Section 13.3: CONNECTION_CLOSE
4375 // frames, are not sent again when packet loss is
4376 // detected. Resending these signals is described
4377 // in Section 10.
4378 frame::Frame::ConnectionClose { .. } |
4379 frame::Frame::ApplicationClose { .. } => (),
4380
4381 // Padding doesn't require retransmission.
4382 frame::Frame::Padding { .. } => (),
4383
4384 frame::Frame::DatagramHeader { .. } |
4385 frame::Frame::Datagram { .. } => {
4386 // Datagrams do not require retransmission. Just update
4387 // stats.
4388 p.dgram_lost_count = p.dgram_lost_count.saturating_add(1);
4389 },
4390 // IMPORTANT: Do not add an exhaustive catch
4391 // all. We want to add explicit handling for frame
4392 // types that can be safely ignored when lost.
4393 }
4394 }
4395 }
4396 self.check_tx_buffered_invariant();
4397
4398 let is_app_limited = self.delivery_rate_check_if_app_limited();
4399 let n_paths = self.paths.len();
4400 let path = self.paths.get_mut(send_pid)?;
4401 let flow_control = &mut self.flow_control;
4402 let pkt_space = &mut self.pkt_num_spaces[epoch];
4403 let crypto_ctx = &mut self.crypto_ctx[epoch];
4404 let pkt_num_manager = &mut self.pkt_num_manager;
4405
4406 let mut left = if let Some(pmtud) = path.pmtud.as_mut() {
4407 // Limit output buffer size by estimated path MTU.
4408 cmp::min(pmtud.get_current_mtu(), b.cap())
4409 } else {
4410 b.cap()
4411 };
4412
4413 if pkt_num_manager.should_skip_pn(self.handshake_completed) {
4414 pkt_num_manager.set_skip_pn(Some(self.next_pkt_num));
4415 self.next_pkt_num += 1;
4416 };
4417 let pn = self.next_pkt_num;
4418
4419 let largest_acked_pkt =
4420 path.recovery.get_largest_acked_on_epoch(epoch).unwrap_or(0);
4421 let pn_len = packet::pkt_num_len(pn, largest_acked_pkt);
4422
4423 // The AEAD overhead at the current encryption level.
4424 let crypto_overhead = crypto_ctx.crypto_overhead().ok_or(Error::Done)?;
4425
4426 let dcid_seq = path.active_dcid_seq.ok_or(Error::OutOfIdentifiers)?;
4427
4428 let dcid =
4429 ConnectionId::from_ref(self.ids.get_dcid(dcid_seq)?.cid.as_ref());
4430
4431 let scid = if let Some(scid_seq) = path.active_scid_seq {
4432 ConnectionId::from_ref(self.ids.get_scid(scid_seq)?.cid.as_ref())
4433 } else if pkt_type == Type::Short {
4434 ConnectionId::default()
4435 } else {
4436 return Err(Error::InvalidState);
4437 };
4438
4439 let hdr = Header {
4440 ty: pkt_type,
4441
4442 version: self.version,
4443
4444 dcid,
4445 scid,
4446
4447 pkt_num: 0,
4448 pkt_num_len: pn_len,
4449
4450 // Only clone token for Initial packets, as other packets don't have
4451 // this field (Retry doesn't count, as it's not encoded as part of
4452 // this code path).
4453 token: if pkt_type == Type::Initial {
4454 self.token.clone()
4455 } else {
4456 None
4457 },
4458
4459 versions: None,
4460 key_phase: self.key_phase,
4461 };
4462
4463 hdr.to_bytes(&mut b)?;
4464
4465 let hdr_trace = if log::max_level() == log::LevelFilter::Trace {
4466 Some(format!("{hdr:?}"))
4467 } else {
4468 None
4469 };
4470
4471 let hdr_ty = hdr.ty;
4472
4473 #[cfg(feature = "qlog")]
4474 let qlog_pkt_hdr = self.qlog.streamer.as_ref().map(|_q| {
4475 qlog::events::quic::PacketHeader::with_type(
4476 hdr.ty.to_qlog(),
4477 Some(pn),
4478 Some(hdr.version),
4479 Some(&hdr.scid),
4480 Some(&hdr.dcid),
4481 )
4482 });
4483
4484 // Calculate the space required for the packet, including the header
4485 // the payload length, the packet number and the AEAD overhead.
4486 let mut overhead = b.off() + pn_len + crypto_overhead;
4487
4488 // We assume that the payload length, which is only present in long
4489 // header packets, can always be encoded with a 2-byte varint.
4490 if pkt_type != Type::Short {
4491 overhead += PAYLOAD_LENGTH_LEN;
4492 }
4493
4494 // Make sure we have enough space left for the packet overhead.
4495 match left.checked_sub(overhead) {
4496 Some(v) => left = v,
4497
4498 None => {
4499 // We can't send more because there isn't enough space available
4500 // in the output buffer.
4501 //
4502 // This usually happens when we try to send a new packet but
4503 // failed because cwnd is almost full. In such case app_limited
4504 // is set to false here to make cwnd grow when ACK is received.
4505 path.recovery.update_app_limited(false);
4506 return Err(Error::Done);
4507 },
4508 }
4509
4510 // Make sure there is enough space for the minimum payload length.
4511 if left < PAYLOAD_MIN_LEN {
4512 path.recovery.update_app_limited(false);
4513 return Err(Error::Done);
4514 }
4515
4516 let mut frames: SmallVec<[frame::Frame; 1]> = SmallVec::new();
4517
4518 let mut ack_eliciting = false;
4519 let mut in_flight = false;
4520 let mut is_pmtud_probe = false;
4521 let mut has_data = false;
4522
4523 // Whether or not we should explicitly elicit an ACK via PING frame if we
4524 // implicitly elicit one otherwise.
4525 let ack_elicit_required = path.recovery.should_elicit_ack(epoch);
4526
4527 let header_offset = b.off();
4528
4529 // Reserve space for payload length in advance. Since we don't yet know
4530 // what the final length will be, we reserve 2 bytes in all cases.
4531 //
4532 // Only long header packets have an explicit length field.
4533 if pkt_type != Type::Short {
4534 b.skip(PAYLOAD_LENGTH_LEN)?;
4535 }
4536
4537 packet::encode_pkt_num(pn, pn_len, &mut b)?;
4538
4539 let payload_offset = b.off();
4540
4541 let cwnd_available =
4542 path.recovery.cwnd_available().saturating_sub(overhead);
4543
4544 let left_before_packing_ack_frame = left;
4545
4546 // Create ACK frame.
4547 //
4548 // When we need to explicitly elicit an ACK via PING later, go ahead and
4549 // generate an ACK (if there's anything to ACK) since we're going to
4550 // send a packet with PING anyways, even if we haven't received anything
4551 // ACK eliciting.
4552 if pkt_space.recv_pkt_need_ack.len() > 0 &&
4553 (pkt_space.ack_elicited || ack_elicit_required) &&
4554 (!is_closing ||
4555 (pkt_type == Type::Handshake &&
4556 self.local_error
4557 .as_ref()
4558 .is_some_and(|le| le.is_app))) &&
4559 path.active()
4560 {
4561 #[cfg(not(feature = "fuzzing"))]
4562 let ack_delay = pkt_space.largest_rx_pkt_time.elapsed();
4563
4564 #[cfg(not(feature = "fuzzing"))]
4565 let ack_delay = ack_delay.as_micros() as u64 /
4566 2_u64
4567 .pow(self.local_transport_params.ack_delay_exponent as u32);
4568
4569 // pseudo-random reproducible ack delays when fuzzing
4570 #[cfg(feature = "fuzzing")]
4571 let ack_delay = rand::rand_u8() as u64 + 1;
4572
4573 let frame = frame::Frame::ACK {
4574 ack_delay,
4575 ranges: pkt_space.recv_pkt_need_ack.clone(),
4576 ecn_counts: None, // sending ECN is not supported at this time
4577 };
4578
4579 // When a PING frame needs to be sent, avoid sending the ACK if
4580 // there is not enough cwnd available for both (note that PING
4581 // frames are always 1 byte, so we just need to check that the
4582 // ACK's length is lower than cwnd).
4583 if pkt_space.ack_elicited || frame.wire_len() < cwnd_available {
4584 // ACK-only packets are not congestion controlled so ACKs must
4585 // be bundled considering the buffer capacity only, and not the
4586 // available cwnd.
4587 if push_frame_to_pkt!(b, frames, frame, left) {
4588 pkt_space.ack_elicited = false;
4589 }
4590 }
4591 }
4592
4593 // Limit output packet size by congestion window size.
4594 left = cmp::min(
4595 left,
4596 // Bytes consumed by ACK frames.
4597 cwnd_available.saturating_sub(left_before_packing_ack_frame - left),
4598 );
4599
4600 let mut challenge_data = None;
4601
4602 if pkt_type == Type::Short {
4603 // Create PMTUD probe.
4604 //
4605 // In order to send a PMTUD probe the current `left` value, which was
4606 // already limited by the current PMTU measure, needs to be ignored,
4607 // but the outgoing packet still needs to be limited by
4608 // the output buffer size, as well as the congestion
4609 // window.
4610 //
4611 // In addition, the PMTUD probe is only generated when the handshake
4612 // is confirmed, to avoid interfering with the handshake
4613 // (e.g. due to the anti-amplification limits).
4614 if let Ok(active_path) = self.paths.get_active_mut() {
4615 let should_probe_pmtu = active_path.should_send_pmtu_probe(
4616 self.handshake_confirmed,
4617 self.handshake_completed,
4618 out_len,
4619 is_closing,
4620 frames.is_empty(),
4621 );
4622
4623 if should_probe_pmtu {
4624 if let Some(pmtud) = active_path.pmtud.as_mut() {
4625 let probe_size = pmtud.get_probe_size();
4626 trace!(
4627 "{} sending pmtud probe pmtu_probe={} estimated_pmtu={}",
4628 self.trace_id,
4629 probe_size,
4630 pmtud.get_current_mtu(),
4631 );
4632
4633 left = probe_size;
4634
4635 match left.checked_sub(overhead) {
4636 Some(v) => left = v,
4637
4638 None => {
4639 // We can't send more because there isn't enough
4640 // space available in the output buffer.
4641 //
4642 // This usually happens when we try to send a new
4643 // packet but failed because cwnd is almost full.
4644 //
4645 // In such case app_limited is set to false here
4646 // to make cwnd grow when ACK is received.
4647 active_path.recovery.update_app_limited(false);
4648 return Err(Error::Done);
4649 },
4650 }
4651
4652 let frame = frame::Frame::Padding {
4653 len: probe_size - overhead - 1,
4654 };
4655
4656 if push_frame_to_pkt!(b, frames, frame, left) {
4657 let frame = frame::Frame::Ping {
4658 mtu_probe: Some(probe_size),
4659 };
4660
4661 if push_frame_to_pkt!(b, frames, frame, left) {
4662 ack_eliciting = true;
4663 in_flight = true;
4664 }
4665 }
4666
4667 // Reset probe flag after sending to prevent duplicate
4668 // probes in a single flight.
4669 pmtud.set_in_flight(true);
4670 is_pmtud_probe = true;
4671 }
4672 }
4673 }
4674
4675 let path = self.paths.get_mut(send_pid)?;
4676 // Create PATH_RESPONSE frame if needed.
4677 // We do not try to ensure that these are really sent.
4678 while let Some(challenge) = path.pop_received_challenge() {
4679 let frame = frame::Frame::PathResponse { data: challenge };
4680
4681 if push_frame_to_pkt!(b, frames, frame, left) {
4682 ack_eliciting = true;
4683 in_flight = true;
4684 } else {
4685 // If there are other pending PATH_RESPONSE, don't lose them
4686 // now.
4687 break;
4688 }
4689 }
4690
4691 // Create PATH_CHALLENGE frame if needed.
4692 if path.validation_requested() {
4693 // TODO: ensure that data is unique over paths.
4694 let data = rand::rand_u64().to_be_bytes();
4695
4696 let frame = frame::Frame::PathChallenge { data };
4697
4698 if push_frame_to_pkt!(b, frames, frame, left) {
4699 // Let's notify the path once we know the packet size.
4700 challenge_data = Some(data);
4701
4702 ack_eliciting = true;
4703 in_flight = true;
4704 }
4705 }
4706
4707 if let Some(key_update) = crypto_ctx.key_update.as_mut() {
4708 key_update.update_acked = true;
4709 }
4710 }
4711
4712 let path = self.paths.get_mut(send_pid)?;
4713
4714 if pkt_type == Type::Short && !is_closing {
4715 // Create NEW_CONNECTION_ID frames as needed.
4716 while let Some(seq_num) = self.ids.next_advertise_new_scid_seq() {
4717 let frame = self.ids.get_new_connection_id_frame_for(seq_num)?;
4718
4719 if push_frame_to_pkt!(b, frames, frame, left) {
4720 self.ids.mark_advertise_new_scid_seq(seq_num, false);
4721
4722 ack_eliciting = true;
4723 in_flight = true;
4724 } else {
4725 break;
4726 }
4727 }
4728 }
4729
4730 if pkt_type == Type::Short && !is_closing && path.active() {
4731 // Create HANDSHAKE_DONE frame.
4732 // self.should_send_handshake_done() but without the need to borrow
4733 if self.handshake_completed &&
4734 !self.handshake_done_sent &&
4735 self.is_server
4736 {
4737 let frame = frame::Frame::HandshakeDone;
4738
4739 if push_frame_to_pkt!(b, frames, frame, left) {
4740 self.handshake_done_sent = true;
4741
4742 ack_eliciting = true;
4743 in_flight = true;
4744 }
4745 }
4746
4747 // Create MAX_STREAMS_BIDI frame.
4748 if self.streams.should_update_max_streams_bidi() ||
4749 self.should_send_max_streams_bidi
4750 {
4751 let frame = frame::Frame::MaxStreamsBidi {
4752 max: self.streams.max_streams_bidi_next(),
4753 };
4754
4755 if push_frame_to_pkt!(b, frames, frame, left) {
4756 self.streams.update_max_streams_bidi();
4757 self.should_send_max_streams_bidi = false;
4758
4759 ack_eliciting = true;
4760 in_flight = true;
4761 }
4762 }
4763
4764 // Create MAX_STREAMS_UNI frame.
4765 if self.streams.should_update_max_streams_uni() ||
4766 self.should_send_max_streams_uni
4767 {
4768 let frame = frame::Frame::MaxStreamsUni {
4769 max: self.streams.max_streams_uni_next(),
4770 };
4771
4772 if push_frame_to_pkt!(b, frames, frame, left) {
4773 self.streams.update_max_streams_uni();
4774 self.should_send_max_streams_uni = false;
4775
4776 ack_eliciting = true;
4777 in_flight = true;
4778 }
4779 }
4780
4781 // Create DATA_BLOCKED frame.
4782 if let Some(limit) = self.blocked_limit {
4783 let frame = frame::Frame::DataBlocked { limit };
4784
4785 if push_frame_to_pkt!(b, frames, frame, left) {
4786 self.blocked_limit = None;
4787 self.data_blocked_sent_count =
4788 self.data_blocked_sent_count.saturating_add(1);
4789
4790 ack_eliciting = true;
4791 in_flight = true;
4792 }
4793 }
4794
4795 // Create STREAMS_BLOCKED (bidi) frame when the local endpoint has
4796 // exhausted the peer's bidirectional stream count limit.
4797 if self
4798 .streams_blocked_bidi_state
4799 .has_pending_stream_blocked_frame()
4800 {
4801 if let Some(limit) = self.streams_blocked_bidi_state.blocked_at {
4802 let frame = frame::Frame::StreamsBlockedBidi { limit };
4803
4804 if push_frame_to_pkt!(b, frames, frame, left) {
4805 // Record the limit we just notified the peer about so
4806 // that redundant frames for the same limit are
4807 // suppressed.
4808 self.streams_blocked_bidi_state.blocked_sent =
4809 Some(limit);
4810
4811 ack_eliciting = true;
4812 in_flight = true;
4813 }
4814 }
4815 }
4816
4817 // Create STREAMS_BLOCKED (uni) frame when the local endpoint has
4818 // exhausted the peer's unidirectional stream count limit.
4819 if self
4820 .streams_blocked_uni_state
4821 .has_pending_stream_blocked_frame()
4822 {
4823 if let Some(limit) = self.streams_blocked_uni_state.blocked_at {
4824 let frame = frame::Frame::StreamsBlockedUni { limit };
4825
4826 if push_frame_to_pkt!(b, frames, frame, left) {
4827 // Record the limit we just notified the peer about so
4828 // that redundant frames for the same limit are
4829 // suppressed.
4830 self.streams_blocked_uni_state.blocked_sent = Some(limit);
4831
4832 ack_eliciting = true;
4833 in_flight = true;
4834 }
4835 }
4836 }
4837
4838 // Create MAX_STREAM_DATA frames as needed.
4839 for stream_id in self.streams.almost_full() {
4840 let stream = match self.streams.get_mut(stream_id) {
4841 Some(v) => v,
4842
4843 None => {
4844 // The stream doesn't exist anymore, so remove it from
4845 // the almost full set.
4846 self.streams.remove_almost_full(stream_id);
4847 continue;
4848 },
4849 };
4850
4851 // Autotune the stream window size, but only if this is not a
4852 // retransmission (on a retransmit the stream will be in
4853 // `self.streams.almost_full()` but it's `almost_full()`
4854 // method returns false.
4855 if stream.recv.almost_full() {
4856 stream.recv.autotune_window(now, path.recovery.rtt());
4857 }
4858
4859 let frame = frame::Frame::MaxStreamData {
4860 stream_id,
4861 max: stream.recv.max_data_next(),
4862 };
4863
4864 if push_frame_to_pkt!(b, frames, frame, left) {
4865 let recv_win = stream.recv.window();
4866
4867 stream.recv.update_max_data(now);
4868
4869 self.streams.remove_almost_full(stream_id);
4870
4871 ack_eliciting = true;
4872 in_flight = true;
4873
4874 // Make sure the connection window always has some
4875 // room compared to the stream window.
4876 flow_control.ensure_window_lower_bound(
4877 (recv_win as f64 * CONNECTION_WINDOW_FACTOR) as u64,
4878 );
4879 }
4880 }
4881
4882 // Create MAX_DATA frame as needed.
4883 if flow_control.should_update_max_data() &&
4884 flow_control.max_data() < flow_control.max_data_next()
4885 {
4886 // Autotune the connection window size. We only tune the window
4887 // if we are sending an "organic" update, not on retransmits.
4888 flow_control.autotune_window(now, path.recovery.rtt());
4889 self.should_send_max_data = true;
4890 }
4891
4892 if self.should_send_max_data {
4893 let frame = frame::Frame::MaxData {
4894 max: flow_control.max_data_next(),
4895 };
4896
4897 if push_frame_to_pkt!(b, frames, frame, left) {
4898 self.should_send_max_data = false;
4899
4900 // Commits the new max_rx_data limit.
4901 flow_control.update_max_data(now);
4902
4903 ack_eliciting = true;
4904 in_flight = true;
4905 }
4906 }
4907
4908 // Create STOP_SENDING frames as needed.
4909 for (stream_id, error_code) in self
4910 .streams
4911 .stopped()
4912 .map(|(&k, &v)| (k, v))
4913 .collect::<Vec<(u64, u64)>>()
4914 {
4915 let frame = frame::Frame::StopSending {
4916 stream_id,
4917 error_code,
4918 };
4919
4920 if push_frame_to_pkt!(b, frames, frame, left) {
4921 self.streams.remove_stopped(stream_id);
4922
4923 ack_eliciting = true;
4924 in_flight = true;
4925 }
4926 }
4927
4928 // Create RESET_STREAM frames as needed.
4929 for (stream_id, (error_code, final_size)) in self
4930 .streams
4931 .reset()
4932 .map(|(&k, &v)| (k, v))
4933 .collect::<Vec<(u64, (u64, u64))>>()
4934 {
4935 let frame = frame::Frame::ResetStream {
4936 stream_id,
4937 error_code,
4938 final_size,
4939 };
4940
4941 if push_frame_to_pkt!(b, frames, frame, left) {
4942 self.streams.remove_reset(stream_id);
4943
4944 ack_eliciting = true;
4945 in_flight = true;
4946 }
4947 }
4948
4949 // Create STREAM_DATA_BLOCKED frames as needed.
4950 for (stream_id, limit) in self
4951 .streams
4952 .blocked()
4953 .map(|(&k, &v)| (k, v))
4954 .collect::<Vec<(u64, u64)>>()
4955 {
4956 let frame = frame::Frame::StreamDataBlocked { stream_id, limit };
4957
4958 if push_frame_to_pkt!(b, frames, frame, left) {
4959 self.streams.remove_blocked(stream_id);
4960 self.stream_data_blocked_sent_count =
4961 self.stream_data_blocked_sent_count.saturating_add(1);
4962
4963 ack_eliciting = true;
4964 in_flight = true;
4965 }
4966 }
4967
4968 // Create RETIRE_CONNECTION_ID frames as needed.
4969 let retire_dcid_seqs = self.ids.retire_dcid_seqs();
4970
4971 for seq_num in retire_dcid_seqs {
4972 // The sequence number specified in a RETIRE_CONNECTION_ID frame
4973 // MUST NOT refer to the Destination Connection ID field of the
4974 // packet in which the frame is contained.
4975 let dcid_seq = path.active_dcid_seq.ok_or(Error::InvalidState)?;
4976
4977 if seq_num == dcid_seq {
4978 continue;
4979 }
4980
4981 let frame = frame::Frame::RetireConnectionId { seq_num };
4982
4983 if push_frame_to_pkt!(b, frames, frame, left) {
4984 self.ids.mark_retire_dcid_seq(seq_num, false)?;
4985
4986 ack_eliciting = true;
4987 in_flight = true;
4988 } else {
4989 break;
4990 }
4991 }
4992 }
4993
4994 // Create CONNECTION_CLOSE frame. Try to send this only on the active
4995 // path, unless it is the last one available.
4996 if path.active() || n_paths == 1 {
4997 if let Some(conn_err) = self.local_error.as_ref() {
4998 if conn_err.is_app {
4999 // Create ApplicationClose frame.
5000 if pkt_type == Type::Short {
5001 let frame = frame::Frame::ApplicationClose {
5002 error_code: conn_err.error_code,
5003 reason: conn_err.reason.clone(),
5004 };
5005
5006 if push_frame_to_pkt!(b, frames, frame, left) {
5007 let pto = path.recovery.pto();
5008 self.draining_timer = Some(now + (pto * 3));
5009
5010 ack_eliciting = true;
5011 in_flight = true;
5012 }
5013 }
5014 } else {
5015 // Create ConnectionClose frame.
5016 let frame = frame::Frame::ConnectionClose {
5017 error_code: conn_err.error_code,
5018 frame_type: 0,
5019 reason: conn_err.reason.clone(),
5020 };
5021
5022 if push_frame_to_pkt!(b, frames, frame, left) {
5023 let pto = path.recovery.pto();
5024 self.draining_timer = Some(now + (pto * 3));
5025
5026 ack_eliciting = true;
5027 in_flight = true;
5028 }
5029 }
5030 }
5031 }
5032
5033 // Create CRYPTO frame.
5034 if crypto_ctx.crypto_stream.is_flushable() &&
5035 left > frame::MAX_CRYPTO_OVERHEAD &&
5036 !is_closing &&
5037 path.active()
5038 {
5039 let crypto_off = crypto_ctx.crypto_stream.send.off_front();
5040
5041 // Encode the frame.
5042 //
5043 // Instead of creating a `frame::Frame` object, encode the frame
5044 // directly into the packet buffer.
5045 //
5046 // First we reserve some space in the output buffer for writing the
5047 // frame header (we assume the length field is always a 2-byte
5048 // varint as we don't know the value yet).
5049 //
5050 // Then we emit the data from the crypto stream's send buffer.
5051 //
5052 // Finally we go back and encode the frame header with the now
5053 // available information.
5054 let hdr_off = b.off();
5055 let hdr_len = 1 + // frame type
5056 octets::varint_len(crypto_off) + // offset
5057 2; // length, always encode as 2-byte varint
5058
5059 if let Some(max_len) = left.checked_sub(hdr_len) {
5060 let (mut crypto_hdr, mut crypto_payload) =
5061 b.split_at(hdr_off + hdr_len)?;
5062
5063 // Write stream data into the packet buffer.
5064 let (len, _) = crypto_ctx
5065 .crypto_stream
5066 .send
5067 .emit(&mut crypto_payload.as_mut()[..max_len])?;
5068
5069 // Encode the frame's header.
5070 //
5071 // Due to how `OctetsMut::split_at()` works, `crypto_hdr` starts
5072 // from the initial offset of `b` (rather than the current
5073 // offset), so it needs to be advanced to the
5074 // initial frame offset.
5075 crypto_hdr.skip(hdr_off)?;
5076
5077 frame::encode_crypto_header(
5078 crypto_off,
5079 len as u64,
5080 &mut crypto_hdr,
5081 )?;
5082
5083 // Advance the packet buffer's offset.
5084 b.skip(hdr_len + len)?;
5085
5086 let frame = frame::Frame::CryptoHeader {
5087 offset: crypto_off,
5088 length: len,
5089 };
5090
5091 if push_frame_to_pkt!(b, frames, frame, left) {
5092 ack_eliciting = true;
5093 in_flight = true;
5094 has_data = true;
5095 }
5096 }
5097 }
5098
5099 // The preference of data-bearing frame to include in a packet
5100 // is managed by `self.emit_dgram`. However, whether any frames
5101 // can be sent depends on the state of their buffers. In the case
5102 // where one type is preferred but its buffer is empty, fall back
5103 // to the other type in order not to waste this function call.
5104 let mut dgram_emitted = false;
5105 let dgrams_to_emit = max_dgram_len.is_some();
5106 let stream_to_emit = self.streams.has_flushable();
5107
5108 let mut do_dgram = self.emit_dgram && dgrams_to_emit;
5109 let do_stream = !self.emit_dgram && stream_to_emit;
5110
5111 if !do_stream && dgrams_to_emit {
5112 do_dgram = true;
5113 }
5114
5115 // Create DATAGRAM frame.
5116 if (pkt_type == Type::Short || pkt_type == Type::ZeroRTT) &&
5117 left > frame::MAX_DGRAM_OVERHEAD &&
5118 !is_closing &&
5119 path.active() &&
5120 do_dgram
5121 {
5122 if let Some(max_dgram_payload) = max_dgram_len {
5123 while let Some(len) = self.dgram_send_queue.peek_front_len() {
5124 let hdr_off = b.off();
5125 let hdr_len = 1 + // frame type
5126 2; // length, always encode as 2-byte varint
5127
5128 if (hdr_len + len) <= left {
5129 // Front of the queue fits this packet, send it.
5130 match self.dgram_send_queue.pop() {
5131 Some(data) => {
5132 // Encode the frame.
5133 //
5134 // Instead of creating a `frame::Frame` object,
5135 // encode the frame directly into the packet
5136 // buffer.
5137 //
5138 // First we reserve some space in the output
5139 // buffer for writing the frame header (we
5140 // assume the length field is always a 2-byte
5141 // varint as we don't know the value yet).
5142 //
5143 // Then we emit the data from the DATAGRAM's
5144 // buffer.
5145 //
5146 // Finally we go back and encode the frame
5147 // header with the now available information.
5148 let (mut dgram_hdr, mut dgram_payload) =
5149 b.split_at(hdr_off + hdr_len)?;
5150
5151 dgram_payload.as_mut()[..len]
5152 .copy_from_slice(data.as_ref());
5153
5154 // Encode the frame's header.
5155 //
5156 // Due to how `OctetsMut::split_at()` works,
5157 // `dgram_hdr` starts from the initial offset
5158 // of `b` (rather than the current offset), so
5159 // it needs to be advanced to the initial frame
5160 // offset.
5161 dgram_hdr.skip(hdr_off)?;
5162
5163 frame::encode_dgram_header(
5164 len as u64,
5165 &mut dgram_hdr,
5166 )?;
5167
5168 // Advance the packet buffer's offset.
5169 b.skip(hdr_len + len)?;
5170
5171 let frame =
5172 frame::Frame::DatagramHeader { length: len };
5173
5174 if push_frame_to_pkt!(b, frames, frame, left) {
5175 ack_eliciting = true;
5176 in_flight = true;
5177 dgram_emitted = true;
5178 self.dgram_sent_count =
5179 self.dgram_sent_count.saturating_add(1);
5180 path.dgram_sent_count =
5181 path.dgram_sent_count.saturating_add(1);
5182 }
5183 },
5184
5185 None => continue,
5186 };
5187 } else if len > max_dgram_payload {
5188 // This dgram frame will never fit. Let's purge it.
5189 self.dgram_send_queue.pop();
5190 } else {
5191 break;
5192 }
5193 }
5194 }
5195 }
5196
5197 // Create a single STREAM frame for the first stream that is flushable.
5198 if (pkt_type == Type::Short || pkt_type == Type::ZeroRTT) &&
5199 left > frame::MAX_STREAM_OVERHEAD &&
5200 !is_closing &&
5201 path.active() &&
5202 !dgram_emitted
5203 {
5204 while let Some(priority_key) = self.streams.peek_flushable() {
5205 let stream_id = priority_key.id;
5206 let stream = match self.streams.get_mut(stream_id) {
5207 // Avoid sending frames for streams that were already stopped.
5208 //
5209 // This might happen if stream data was buffered but not yet
5210 // flushed on the wire when a STOP_SENDING frame is received.
5211 Some(v) if !v.send.is_stopped() => v,
5212 _ => {
5213 self.streams.remove_flushable(&priority_key);
5214 continue;
5215 },
5216 };
5217
5218 let stream_off = stream.send.off_front();
5219
5220 // Encode the frame.
5221 //
5222 // Instead of creating a `frame::Frame` object, encode the frame
5223 // directly into the packet buffer.
5224 //
5225 // First we reserve some space in the output buffer for writing
5226 // the frame header (we assume the length field is always a
5227 // 2-byte varint as we don't know the value yet).
5228 //
5229 // Then we emit the data from the stream's send buffer.
5230 //
5231 // Finally we go back and encode the frame header with the now
5232 // available information.
5233 let hdr_off = b.off();
5234 let hdr_len = 1 + // frame type
5235 octets::varint_len(stream_id) + // stream_id
5236 octets::varint_len(stream_off) + // offset
5237 2; // length, always encode as 2-byte varint
5238
5239 let max_len = match left.checked_sub(hdr_len) {
5240 Some(v) => v,
5241 None => {
5242 let priority_key = Arc::clone(&stream.priority_key);
5243 self.streams.remove_flushable(&priority_key);
5244
5245 continue;
5246 },
5247 };
5248
5249 let (mut stream_hdr, mut stream_payload) =
5250 b.split_at(hdr_off + hdr_len)?;
5251
5252 // Write stream data into the packet buffer.
5253 let (len, fin) =
5254 stream.send.emit(&mut stream_payload.as_mut()[..max_len])?;
5255
5256 // Encode the frame's header.
5257 //
5258 // Due to how `OctetsMut::split_at()` works, `stream_hdr` starts
5259 // from the initial offset of `b` (rather than the current
5260 // offset), so it needs to be advanced to the initial frame
5261 // offset.
5262 stream_hdr.skip(hdr_off)?;
5263
5264 frame::encode_stream_header(
5265 stream_id,
5266 stream_off,
5267 len as u64,
5268 fin,
5269 &mut stream_hdr,
5270 )?;
5271
5272 // Advance the packet buffer's offset.
5273 b.skip(hdr_len + len)?;
5274
5275 let frame = frame::Frame::StreamHeader {
5276 stream_id,
5277 offset: stream_off,
5278 length: len,
5279 fin,
5280 };
5281
5282 if push_frame_to_pkt!(b, frames, frame, left) {
5283 ack_eliciting = true;
5284 in_flight = true;
5285 has_data = true;
5286 }
5287
5288 let priority_key = Arc::clone(&stream.priority_key);
5289 // If the stream is no longer flushable, remove it from the queue
5290 if !stream.is_flushable() {
5291 self.streams.remove_flushable(&priority_key);
5292 } else if stream.incremental {
5293 // Shuffle the incremental stream to the back of the
5294 // queue.
5295 self.streams.remove_flushable(&priority_key);
5296 self.streams.insert_flushable(&priority_key);
5297 }
5298
5299 #[cfg(feature = "fuzzing")]
5300 // Coalesce STREAM frames when fuzzing.
5301 if left > frame::MAX_STREAM_OVERHEAD {
5302 continue;
5303 }
5304
5305 break;
5306 }
5307 }
5308
5309 // Alternate trying to send DATAGRAMs next time.
5310 self.emit_dgram = !dgram_emitted;
5311
5312 // If no other ack-eliciting frame is sent, include a PING frame
5313 // - if PTO probe needed; OR
5314 // - if we've sent too many non ack-eliciting packets without having
5315 // sent an ACK eliciting one; OR
5316 // - the application requested an ack-eliciting frame be sent.
5317 if (ack_elicit_required || path.needs_ack_eliciting) &&
5318 !ack_eliciting &&
5319 left >= 1 &&
5320 !is_closing
5321 {
5322 let frame = frame::Frame::Ping { mtu_probe: None };
5323
5324 if push_frame_to_pkt!(b, frames, frame, left) {
5325 ack_eliciting = true;
5326 in_flight = true;
5327 }
5328 }
5329
5330 if ack_eliciting && !is_pmtud_probe {
5331 path.needs_ack_eliciting = false;
5332 path.recovery.ping_sent(epoch);
5333 }
5334
5335 if !has_data &&
5336 !dgram_emitted &&
5337 cwnd_available > frame::MAX_STREAM_OVERHEAD
5338 {
5339 path.recovery.on_app_limited();
5340 }
5341
5342 if frames.is_empty() {
5343 // When we reach this point we are not able to write more, so set
5344 // app_limited to false.
5345 path.recovery.update_app_limited(false);
5346 return Err(Error::Done);
5347 }
5348
5349 // When coalescing a 1-RTT packet, we can't add padding in the UDP
5350 // datagram, so use PADDING frames instead.
5351 //
5352 // This is only needed if
5353 // 1) an Initial packet has already been written to the UDP datagram,
5354 // as Initial always requires padding.
5355 //
5356 // 2) this is a probing packet towards an unvalidated peer address.
5357 if (has_initial || !path.validated()) &&
5358 pkt_type == Type::Short &&
5359 left >= 1
5360 {
5361 let frame = frame::Frame::Padding { len: left };
5362
5363 if push_frame_to_pkt!(b, frames, frame, left) {
5364 in_flight = true;
5365 }
5366 }
5367
5368 // Pad payload so that it's always at least 4 bytes.
5369 if b.off() - payload_offset < PAYLOAD_MIN_LEN {
5370 let payload_len = b.off() - payload_offset;
5371
5372 let frame = frame::Frame::Padding {
5373 len: PAYLOAD_MIN_LEN - payload_len,
5374 };
5375
5376 #[allow(unused_assignments)]
5377 if push_frame_to_pkt!(b, frames, frame, left) {
5378 in_flight = true;
5379 }
5380 }
5381
5382 let payload_len = b.off() - payload_offset;
5383
5384 // Fill in payload length.
5385 if pkt_type != Type::Short {
5386 let len = pn_len + payload_len + crypto_overhead;
5387
5388 let (_, mut payload_with_len) = b.split_at(header_offset)?;
5389 payload_with_len
5390 .put_varint_with_len(len as u64, PAYLOAD_LENGTH_LEN)?;
5391 }
5392
5393 trace!(
5394 "{} tx pkt {} len={} pn={} {}",
5395 self.trace_id,
5396 hdr_trace.unwrap_or_default(),
5397 payload_len,
5398 pn,
5399 AddrTupleFmt(path.local_addr(), path.peer_addr())
5400 );
5401
5402 #[cfg(feature = "qlog")]
5403 let mut qlog_frames: Vec<qlog::events::quic::QuicFrame> =
5404 Vec::with_capacity(frames.len());
5405
5406 for frame in &mut frames {
5407 trace!("{} tx frm {:?}", self.trace_id, frame);
5408
5409 qlog_with_type!(QLOG_PACKET_TX, self.qlog, _q, {
5410 qlog_frames.push(frame.to_qlog());
5411 });
5412 }
5413
5414 qlog_with_type!(QLOG_PACKET_TX, self.qlog, q, {
5415 if let Some(header) = qlog_pkt_hdr {
5416 // Qlog packet raw info described at
5417 // https://datatracker.ietf.org/doc/html/draft-ietf-quic-qlog-main-schema-00#section-5.1
5418 //
5419 // `length` includes packet headers and trailers (AEAD tag).
5420 let length = payload_len + payload_offset + crypto_overhead;
5421 let qlog_raw_info = RawInfo {
5422 length: Some(length as u64),
5423 payload_length: Some(payload_len as u64),
5424 data: None,
5425 };
5426
5427 let send_at_time =
5428 now.duration_since(q.start_time()).as_secs_f64() * 1000.0;
5429
5430 let ev_data =
5431 EventData::QuicPacketSent(qlog::events::quic::PacketSent {
5432 header,
5433 frames: Some(qlog_frames),
5434 raw: Some(qlog_raw_info),
5435 send_at_time: Some(send_at_time),
5436 ..Default::default()
5437 });
5438
5439 q.add_event_data_with_instant(ev_data, now).ok();
5440 }
5441 });
5442
5443 let aead = match crypto_ctx.crypto_seal {
5444 Some(ref mut v) => v,
5445 None => return Err(Error::InvalidState),
5446 };
5447
5448 let written = packet::encrypt_pkt(
5449 &mut b,
5450 pn,
5451 pn_len,
5452 payload_len,
5453 payload_offset,
5454 None,
5455 aead,
5456 )?;
5457
5458 let sent_pkt_has_data = if path.recovery.gcongestion_enabled() {
5459 has_data || dgram_emitted
5460 } else {
5461 has_data
5462 };
5463
5464 let sent_pkt = recovery::Sent {
5465 pkt_num: pn,
5466 frames,
5467 time_sent: now,
5468 time_acked: None,
5469 time_lost: None,
5470 size: if ack_eliciting { written } else { 0 },
5471 ack_eliciting,
5472 in_flight,
5473 delivered: 0,
5474 delivered_time: now,
5475 first_sent_time: now,
5476 is_app_limited: false,
5477 tx_in_flight: 0,
5478 lost: 0,
5479 has_data: sent_pkt_has_data,
5480 is_pmtud_probe,
5481 };
5482
5483 if in_flight && is_app_limited {
5484 path.recovery.delivery_rate_update_app_limited(true);
5485 }
5486
5487 self.next_pkt_num += 1;
5488
5489 let handshake_status = recovery::HandshakeStatus {
5490 has_handshake_keys: self.crypto_ctx[packet::Epoch::Handshake]
5491 .has_keys(),
5492 peer_verified_address: self.peer_verified_initial_address,
5493 completed: self.handshake_completed,
5494 };
5495
5496 self.on_packet_sent(send_pid, sent_pkt, epoch, handshake_status, now)?;
5497
5498 let path = self.paths.get_mut(send_pid)?;
5499 qlog_with_type!(QLOG_METRICS, self.qlog, q, {
5500 path.recovery.maybe_qlog(q, now);
5501 });
5502
5503 // Record sent packet size if we probe the path.
5504 if let Some(data) = challenge_data {
5505 path.add_challenge_sent(data, written, now);
5506 }
5507
5508 self.sent_count += 1;
5509 self.sent_bytes += written as u64;
5510 path.sent_count += 1;
5511 path.sent_bytes += written as u64;
5512
5513 if self.dgram_send_queue.byte_size() > path.recovery.cwnd_available() {
5514 path.recovery.update_app_limited(false);
5515 }
5516
5517 let had_send_budget = path.max_send_bytes > 0;
5518 path.max_send_bytes = path.max_send_bytes.saturating_sub(written);
5519 if self.is_server &&
5520 !path.verified_peer_address &&
5521 had_send_budget &&
5522 path.max_send_bytes == 0
5523 {
5524 self.amplification_limited_count =
5525 self.amplification_limited_count.saturating_add(1);
5526 }
5527
5528 // On the client, drop initial state after sending an Handshake packet.
5529 if !self.is_server && hdr_ty == Type::Handshake {
5530 self.drop_epoch_state(packet::Epoch::Initial, now);
5531 }
5532
5533 // (Re)start the idle timer if we are sending the first ack-eliciting
5534 // packet since last receiving a packet.
5535 if ack_eliciting && !self.ack_eliciting_sent {
5536 if let Some(idle_timeout) = self.idle_timeout() {
5537 self.idle_timer = Some(now + idle_timeout);
5538 }
5539 }
5540
5541 if ack_eliciting {
5542 self.ack_eliciting_sent = true;
5543 }
5544
5545 Ok((pkt_type, written))
5546 }
5547
5548 fn on_packet_sent(
5549 &mut self, send_pid: usize, sent_pkt: recovery::Sent,
5550 epoch: packet::Epoch, handshake_status: recovery::HandshakeStatus,
5551 now: Instant,
5552 ) -> Result<()> {
5553 let path = self.paths.get_mut(send_pid)?;
5554
5555 // It's fine to set the skip counter based on a non-active path's values.
5556 let cwnd = path.recovery.cwnd();
5557 let max_datagram_size = path.recovery.max_datagram_size();
5558 self.pkt_num_spaces[epoch].on_packet_sent(&sent_pkt);
5559 self.pkt_num_manager.on_packet_sent(
5560 cwnd,
5561 max_datagram_size,
5562 self.handshake_completed,
5563 );
5564
5565 path.recovery.on_packet_sent(
5566 sent_pkt,
5567 epoch,
5568 handshake_status,
5569 now,
5570 &self.trace_id,
5571 );
5572
5573 Ok(())
5574 }
5575
5576 /// Returns the desired send time for the next packet.
5577 #[inline]
5578 pub fn get_next_release_time(&self) -> Option<ReleaseDecision> {
5579 Some(
5580 self.paths
5581 .get_active()
5582 .ok()?
5583 .recovery
5584 .get_next_release_time(),
5585 )
5586 }
5587
5588 /// Returns whether gcongestion is enabled.
5589 #[inline]
5590 pub fn gcongestion_enabled(&self) -> Option<bool> {
5591 Some(self.paths.get_active().ok()?.recovery.gcongestion_enabled())
5592 }
5593
5594 /// Returns the maximum pacing into the future.
5595 ///
5596 /// Equals 1/8 of the smoothed RTT, but at least 1ms and not greater than
5597 /// 5ms.
5598 pub fn max_release_into_future(&self) -> Duration {
5599 self.paths
5600 .get_active()
5601 .map(|p| p.recovery.rtt().mul_f64(0.125))
5602 .unwrap_or(Duration::from_millis(1))
5603 .max(Duration::from_millis(1))
5604 .min(Duration::from_millis(5))
5605 }
5606
5607 /// Returns whether pacing is enabled.
5608 #[inline]
5609 pub fn pacing_enabled(&self) -> bool {
5610 self.recovery_config.pacing
5611 }
5612
5613 /// Returns the size of the send quantum, in bytes.
5614 ///
5615 /// This represents the maximum size of a packet burst as determined by the
5616 /// congestion control algorithm in use.
5617 ///
5618 /// Applications can, for example, use it in conjunction with segmentation
5619 /// offloading mechanisms as the maximum limit for outgoing aggregates of
5620 /// multiple packets.
5621 #[inline]
5622 pub fn send_quantum(&self) -> usize {
5623 match self.paths.get_active() {
5624 Ok(p) => p.recovery.send_quantum(),
5625 _ => 0,
5626 }
5627 }
5628
5629 /// Returns the size of the send quantum over the given 4-tuple, in bytes.
5630 ///
5631 /// This represents the maximum size of a packet burst as determined by the
5632 /// congestion control algorithm in use.
5633 ///
5634 /// Applications can, for example, use it in conjunction with segmentation
5635 /// offloading mechanisms as the maximum limit for outgoing aggregates of
5636 /// multiple packets.
5637 ///
5638 /// If the (`local_addr`, peer_addr`) 4-tuple relates to a non-existing
5639 /// path, this method returns 0.
5640 pub fn send_quantum_on_path(
5641 &self, local_addr: SocketAddr, peer_addr: SocketAddr,
5642 ) -> usize {
5643 self.paths
5644 .path_id_from_addrs(&(local_addr, peer_addr))
5645 .and_then(|pid| self.paths.get(pid).ok())
5646 .map(|path| path.recovery.send_quantum())
5647 .unwrap_or(0)
5648 }
5649
5650 /// Reads contiguous data from a stream into the provided slice.
5651 ///
5652 /// The slice must be sized by the caller and will be populated up to its
5653 /// capacity.
5654 ///
5655 /// On success the amount of bytes read and a flag indicating the fin state
5656 /// is returned as a tuple, or [`Done`] if there is no data to read.
5657 ///
5658 /// Reading data from a stream may trigger queueing of control messages
5659 /// (e.g. MAX_STREAM_DATA). [`send()`] should be called afterwards.
5660 ///
5661 /// [`Done`]: enum.Error.html#variant.Done
5662 /// [`send()`]: struct.Connection.html#method.send
5663 ///
5664 /// ## Examples:
5665 ///
5666 /// ```no_run
5667 /// # let mut buf = [0; 512];
5668 /// # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
5669 /// # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
5670 /// # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
5671 /// # let peer = "127.0.0.1:1234".parse().unwrap();
5672 /// # let local = socket.local_addr().unwrap();
5673 /// # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
5674 /// # let stream_id = 0;
5675 /// while let Ok((read, fin)) = conn.stream_recv(stream_id, &mut buf) {
5676 /// println!("Got {} bytes on stream {}", read, stream_id);
5677 /// }
5678 /// # Ok::<(), quiche::Error>(())
5679 /// ```
5680 #[inline]
5681 pub fn stream_recv(
5682 &mut self, stream_id: u64, out: &mut [u8],
5683 ) -> Result<(usize, bool)> {
5684 self.stream_recv_buf(stream_id, out)
5685 }
5686
5687 /// Reads contiguous data from a stream into the provided [`bytes::BufMut`].
5688 ///
5689 /// **NOTE**:
5690 /// The BufMut will be populated with all available data up to its capacity.
5691 /// Since some BufMut implementations, e.g., [`Vec<u8>`], dynamically
5692 /// allocate additional memory, the caller may use [`BufMut::limit()`]
5693 /// to limit the maximum amount of data that can be written.
5694 ///
5695 /// On success the amount of bytes read and a flag indicating the fin state
5696 /// is returned as a tuple, or [`Done`] if there is no data to read.
5697 /// [`BufMut::advance_mut()`] will have been called with the same number of
5698 /// total bytes.
5699 ///
5700 /// Reading data from a stream may trigger queueing of control messages
5701 /// (e.g. MAX_STREAM_DATA). [`send()`] should be called afterwards.
5702 ///
5703 /// [`BufMut::limit()`]: bytes::BufMut::limit
5704 /// [`BufMut::advance_mut()`]: bytes::BufMut::advance_mut
5705 /// [`Done`]: enum.Error.html#variant.Done
5706 /// [`send()`]: struct.Connection.html#method.send
5707 ///
5708 /// ## Examples:
5709 ///
5710 /// ```no_run
5711 /// # use bytes::BufMut as _;
5712 /// # let mut buf = Vec::new().limit(1024); // Read at most 1024 bytes
5713 /// # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
5714 /// # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
5715 /// # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
5716 /// # let peer = "127.0.0.1:1234".parse().unwrap();
5717 /// # let local = socket.local_addr().unwrap();
5718 /// # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
5719 /// # let stream_id = 0;
5720 /// # let mut total_read = 0;
5721 /// while let Ok((read, fin)) = conn.stream_recv_buf(stream_id, &mut buf) {
5722 /// println!("Got {} bytes on stream {}", read, stream_id);
5723 /// total_read += read;
5724 /// assert_eq!(buf.get_ref().len(), total_read);
5725 /// }
5726 /// # Ok::<(), quiche::Error>(())
5727 /// ```
5728 pub fn stream_recv_buf<B: bytes::BufMut>(
5729 &mut self, stream_id: u64, out: B,
5730 ) -> Result<(usize, bool)> {
5731 self.do_stream_recv(stream_id, RecvAction::Emit { out })
5732 }
5733
5734 /// Discard contiguous data from a stream without copying.
5735 ///
5736 /// On success the amount of bytes discarded and a flag indicating the fin
5737 /// state is returned as a tuple, or [`Done`] if there is no data to
5738 /// discard.
5739 ///
5740 /// Discarding data from a stream may trigger queueing of control messages
5741 /// (e.g. MAX_STREAM_DATA). [`send()`] should be called afterwards.
5742 ///
5743 /// [`Done`]: enum.Error.html#variant.Done
5744 /// [`send()`]: struct.Connection.html#method.send
5745 ///
5746 /// ## Examples:
5747 ///
5748 /// ```no_run
5749 /// # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
5750 /// # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
5751 /// # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
5752 /// # let peer = "127.0.0.1:1234".parse().unwrap();
5753 /// # let local = socket.local_addr().unwrap();
5754 /// # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
5755 /// # let stream_id = 0;
5756 /// while let Ok((read, fin)) = conn.stream_discard(stream_id, 1) {
5757 /// println!("Discarded {} byte(s) on stream {}", read, stream_id);
5758 /// }
5759 /// # Ok::<(), quiche::Error>(())
5760 /// ```
5761 pub fn stream_discard(
5762 &mut self, stream_id: u64, len: usize,
5763 ) -> Result<(usize, bool)> {
5764 // `do_stream_recv()` is generic on the kind of `BufMut` in RecvAction.
5765 // Since we are discarding, it doesn't matter, but the compiler still
5766 // wants to know, so we say `&mut [u8]`.
5767 self.do_stream_recv::<&mut [u8]>(stream_id, RecvAction::Discard { len })
5768 }
5769
5770 // Reads or discards contiguous data from a stream.
5771 //
5772 // Passing an `action` of `StreamRecvAction::Emit` results in a read into
5773 // the provided slice. It must be sized by the caller and will be populated
5774 // up to its capacity.
5775 //
5776 // Passing an `action` of `StreamRecvAction::Discard` results in discard up
5777 // to the indicated length.
5778 //
5779 // On success the amount of bytes read or discarded, and a flag indicating
5780 // the fin state, is returned as a tuple, or [`Done`] if there is no data to
5781 // read or discard.
5782 //
5783 // Reading or discarding data from a stream may trigger queueing of control
5784 // messages (e.g. MAX_STREAM_DATA). [`send()`] should be called afterwards.
5785 //
5786 // [`Done`]: enum.Error.html#variant.Done
5787 // [`send()`]: struct.Connection.html#method.send
5788 fn do_stream_recv<B: bytes::BufMut>(
5789 &mut self, stream_id: u64, action: RecvAction<B>,
5790 ) -> Result<(usize, bool)> {
5791 // We can't read on our own unidirectional streams.
5792 if !stream::is_bidi(stream_id) &&
5793 stream::is_local(stream_id, self.is_server)
5794 {
5795 return Err(Error::InvalidStreamState(stream_id));
5796 }
5797
5798 let stream = self
5799 .streams
5800 .get_mut(stream_id)
5801 .ok_or(Error::InvalidStreamState(stream_id))?;
5802
5803 if !stream.is_readable() {
5804 return Err(Error::Done);
5805 }
5806
5807 let local = stream.local;
5808 let priority_key = Arc::clone(&stream.priority_key);
5809
5810 #[cfg(feature = "qlog")]
5811 let offset = stream.recv.off_front();
5812
5813 #[cfg(feature = "qlog")]
5814 let to = match action {
5815 RecvAction::Emit { .. } => Some(DataRecipient::Application),
5816
5817 RecvAction::Discard { .. } => Some(DataRecipient::Dropped),
5818 };
5819
5820 let (read, fin) = match stream.recv.emit_or_discard(action) {
5821 Ok(v) => v,
5822
5823 Err(e) => {
5824 // Collect the stream if it is now complete. This can happen if
5825 // we got a `StreamReset` error which will now be propagated to
5826 // the application, so we don't need to keep the stream's state
5827 // anymore.
5828 if stream.is_complete() {
5829 self.streams.collect(stream_id, local);
5830 }
5831
5832 self.streams.remove_readable(&priority_key);
5833 return Err(e);
5834 },
5835 };
5836
5837 self.flow_control.add_consumed(read as u64);
5838
5839 let readable = stream.is_readable();
5840
5841 let complete = stream.is_complete();
5842
5843 if stream.recv.almost_full() {
5844 self.streams.insert_almost_full(stream_id);
5845 }
5846
5847 if !readable {
5848 self.streams.remove_readable(&priority_key);
5849 }
5850
5851 if complete {
5852 self.streams.collect(stream_id, local);
5853 }
5854
5855 qlog_with_type!(QLOG_DATA_MV, self.qlog, q, {
5856 let ev_data = EventData::QuicStreamDataMoved(
5857 qlog::events::quic::StreamDataMoved {
5858 stream_id: Some(stream_id),
5859 offset: Some(offset),
5860 raw: Some(RawInfo {
5861 length: Some(read as u64),
5862 ..Default::default()
5863 }),
5864 from: Some(DataRecipient::Transport),
5865 to,
5866 additional_info: fin
5867 .then_some(DataMovedAdditionalInfo::FinSet),
5868 },
5869 );
5870
5871 let now = Instant::now();
5872 q.add_event_data_with_instant(ev_data, now).ok();
5873 });
5874
5875 if priority_key.incremental && readable {
5876 // Shuffle the incremental stream to the back of the queue.
5877 self.streams.remove_readable(&priority_key);
5878 self.streams.insert_readable(&priority_key);
5879 }
5880
5881 Ok((read, fin))
5882 }
5883
5884 /// Writes data to a stream.
5885 ///
5886 /// On success the number of bytes written is returned, or [`Done`] if no
5887 /// data was written (e.g. because the stream has no capacity).
5888 ///
5889 /// Applications can provide a 0-length buffer with the fin flag set to
5890 /// true. This will lead to a 0-length FIN STREAM frame being sent at the
5891 /// latest offset. The `Ok(0)` value is only returned when the application
5892 /// provided a 0-length buffer.
5893 ///
5894 /// In addition, if the peer has signalled that it doesn't want to receive
5895 /// any more data from this stream by sending the `STOP_SENDING` frame, the
5896 /// [`StreamStopped`] error will be returned instead of any data.
5897 ///
5898 /// Note that in order to avoid buffering an infinite amount of data in the
5899 /// stream's send buffer, streams are only allowed to buffer outgoing data
5900 /// up to the amount that the peer allows it to send (that is, up to the
5901 /// stream's outgoing flow control capacity).
5902 ///
5903 /// This means that the number of written bytes returned can be lower than
5904 /// the length of the input buffer when the stream doesn't have enough
5905 /// capacity for the operation to complete. The application should retry the
5906 /// operation once the stream is reported as writable again.
5907 ///
5908 /// Applications should call this method only after the handshake is
5909 /// completed (whenever [`is_established()`] returns `true`) or during
5910 /// early data if enabled (whenever [`is_in_early_data()`] returns `true`).
5911 ///
5912 /// [`Done`]: enum.Error.html#variant.Done
5913 /// [`StreamStopped`]: enum.Error.html#variant.StreamStopped
5914 /// [`is_established()`]: struct.Connection.html#method.is_established
5915 /// [`is_in_early_data()`]: struct.Connection.html#method.is_in_early_data
5916 ///
5917 /// ## Examples:
5918 ///
5919 /// ```no_run
5920 /// # let mut buf = [0; 512];
5921 /// # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
5922 /// # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
5923 /// # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
5924 /// # let peer = "127.0.0.1:1234".parse().unwrap();
5925 /// # let local = "127.0.0.1:4321".parse().unwrap();
5926 /// # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
5927 /// # let stream_id = 0;
5928 /// conn.stream_send(stream_id, b"hello", true)?;
5929 /// # Ok::<(), quiche::Error>(())
5930 /// ```
5931 pub fn stream_send(
5932 &mut self, stream_id: u64, buf: &[u8], fin: bool,
5933 ) -> Result<usize> {
5934 self.stream_do_send(
5935 stream_id,
5936 buf,
5937 fin,
5938 |stream: &mut stream::Stream<F>,
5939 buf: &[u8],
5940 cap: usize,
5941 fin: bool| {
5942 stream.send.write(&buf[..cap], fin).map(|v| (v, v))
5943 },
5944 )
5945 }
5946
5947 /// Writes data to a stream with zero copying, instead, it appends the
5948 /// provided buffer directly to the send queue if the capacity allows
5949 /// it.
5950 ///
5951 /// When a partial write happens (including when [`Error::Done`] is
5952 /// returned) the remaining (unwritten) buffer will also be returned.
5953 /// The application should retry the operation once the stream is
5954 /// reported as writable again.
5955 pub fn stream_send_zc(
5956 &mut self, stream_id: u64, buf: F::Buf, fin: bool,
5957 ) -> Result<(usize, Option<F::Buf>)>
5958 where
5959 F::Buf: BufSplit,
5960 {
5961 self.stream_do_send(
5962 stream_id,
5963 buf,
5964 fin,
5965 |stream: &mut stream::Stream<F>,
5966 buf: F::Buf,
5967 cap: usize,
5968 fin: bool| {
5969 let (sent, remaining) = stream.send.append_buf(buf, cap, fin)?;
5970 Ok((sent, (sent, remaining)))
5971 },
5972 )
5973 }
5974
5975 fn stream_do_send<B, R, SND>(
5976 &mut self, stream_id: u64, buf: B, fin: bool, write_fn: SND,
5977 ) -> Result<R>
5978 where
5979 B: AsRef<[u8]>,
5980 SND: FnOnce(&mut stream::Stream<F>, B, usize, bool) -> Result<(usize, R)>,
5981 {
5982 // We can't write on the peer's unidirectional streams.
5983 if !stream::is_bidi(stream_id) &&
5984 !stream::is_local(stream_id, self.is_server)
5985 {
5986 return Err(Error::InvalidStreamState(stream_id));
5987 }
5988
5989 let len = buf.as_ref().len();
5990
5991 // Mark the connection as blocked if the connection-level flow control
5992 // limit doesn't let us buffer all the data.
5993 //
5994 // Note that this is separate from "send capacity" as that also takes
5995 // congestion control into consideration.
5996 if self.max_tx_data - self.tx_data < len as u64 {
5997 self.blocked_limit = Some(self.max_tx_data);
5998 }
5999
6000 let cap = self.tx_cap;
6001
6002 // Get existing stream or create a new one.
6003 let stream = match self.get_or_create_stream(stream_id, true) {
6004 Ok(v) => v,
6005
6006 Err(Error::StreamLimit) => {
6007 // If the local endpoint has exhausted the peer's stream count
6008 // limit, record the current limit so that a STREAMS_BLOCKED
6009 // frame can be sent.
6010 if self.enable_send_streams_blocked &&
6011 stream::is_local(stream_id, self.is_server)
6012 {
6013 if stream::is_bidi(stream_id) {
6014 let limit = self.streams.peer_max_streams_bidi();
6015 self.streams_blocked_bidi_state.update_at(limit);
6016 } else {
6017 let limit = self.streams.peer_max_streams_uni();
6018 self.streams_blocked_uni_state.update_at(limit);
6019 }
6020 }
6021
6022 return Err(Error::StreamLimit);
6023 },
6024
6025 Err(e) => return Err(e),
6026 };
6027
6028 #[cfg(feature = "qlog")]
6029 let offset = stream.send.off_back();
6030
6031 let was_writable = stream.is_writable();
6032
6033 let was_flushable = stream.is_flushable();
6034
6035 let is_complete = stream.is_complete();
6036 let is_readable = stream.is_readable();
6037
6038 let priority_key = Arc::clone(&stream.priority_key);
6039
6040 // Return early if the stream has been stopped, and collect its state
6041 // if complete.
6042 if let Err(Error::StreamStopped(e)) = stream.send.cap() {
6043 // Only collect the stream if it is complete and not readable.
6044 // If it is readable, it will get collected when stream_recv()
6045 // is used.
6046 //
6047 // The stream can't be writable if it has been stopped.
6048 if is_complete && !is_readable {
6049 let local = stream.local;
6050 self.streams.collect(stream_id, local);
6051 }
6052
6053 return Err(Error::StreamStopped(e));
6054 };
6055
6056 // Truncate the input buffer based on the connection's send capacity if
6057 // necessary.
6058 //
6059 // When the cap is zero, the method returns Ok(0) *only* when the passed
6060 // buffer is empty. We return Error::Done otherwise.
6061 if cap == 0 && len > 0 {
6062 if was_writable {
6063 // When `stream_writable_next()` returns a stream, the writable
6064 // mark is removed, but because the stream is blocked by the
6065 // connection-level send capacity it won't be marked as writable
6066 // again once the capacity increases.
6067 //
6068 // Since the stream is writable already, mark it here instead.
6069 self.streams.insert_writable(&priority_key);
6070 }
6071
6072 return Err(Error::Done);
6073 }
6074
6075 let (cap, fin, blocked_by_cap) = if cap < len {
6076 (cap, false, true)
6077 } else {
6078 (len, fin, false)
6079 };
6080
6081 let (sent, ret) = match write_fn(stream, buf, cap, fin) {
6082 Ok(v) => v,
6083
6084 Err(e) => {
6085 self.streams.remove_writable(&priority_key);
6086 return Err(e);
6087 },
6088 };
6089
6090 let incremental = stream.incremental;
6091 let priority_key = Arc::clone(&stream.priority_key);
6092
6093 let flushable = stream.is_flushable();
6094
6095 let writable = stream.is_writable();
6096
6097 let empty_fin = len == 0 && fin;
6098
6099 if sent < cap {
6100 let max_off = stream.send.max_off();
6101
6102 if stream.send.blocked_at() != Some(max_off) {
6103 stream.send.update_blocked_at(Some(max_off));
6104 self.streams.insert_blocked(stream_id, max_off);
6105 }
6106 } else {
6107 stream.send.update_blocked_at(None);
6108 self.streams.remove_blocked(stream_id);
6109 }
6110
6111 // If the stream is now flushable push it to the flushable queue, but
6112 // only if it wasn't already queued.
6113 //
6114 // Consider the stream flushable also when we are sending a zero-length
6115 // frame that has the fin flag set.
6116 if (flushable || empty_fin) && !was_flushable {
6117 self.streams.insert_flushable(&priority_key);
6118 }
6119
6120 if !writable {
6121 self.streams.remove_writable(&priority_key);
6122 } else if was_writable && blocked_by_cap {
6123 // When `stream_writable_next()` returns a stream, the writable
6124 // mark is removed, but because the stream is blocked by the
6125 // connection-level send capacity it won't be marked as writable
6126 // again once the capacity increases.
6127 //
6128 // Since the stream is writable already, mark it here instead.
6129 self.streams.insert_writable(&priority_key);
6130 }
6131
6132 self.tx_cap -= sent;
6133
6134 self.tx_data += sent as u64;
6135
6136 self.tx_buffered += sent;
6137 self.check_tx_buffered_invariant();
6138
6139 qlog_with_type!(QLOG_DATA_MV, self.qlog, q, {
6140 let ev_data = EventData::QuicStreamDataMoved(
6141 qlog::events::quic::StreamDataMoved {
6142 stream_id: Some(stream_id),
6143 offset: Some(offset),
6144 raw: Some(RawInfo {
6145 length: Some(sent as u64),
6146 ..Default::default()
6147 }),
6148 from: Some(DataRecipient::Application),
6149 to: Some(DataRecipient::Transport),
6150 additional_info: fin
6151 .then_some(DataMovedAdditionalInfo::FinSet),
6152 },
6153 );
6154
6155 let now = Instant::now();
6156 q.add_event_data_with_instant(ev_data, now).ok();
6157 });
6158
6159 if sent == 0 && cap > 0 {
6160 return Err(Error::Done);
6161 }
6162
6163 if incremental && writable {
6164 // Shuffle the incremental stream to the back of the queue.
6165 self.streams.remove_writable(&priority_key);
6166 self.streams.insert_writable(&priority_key);
6167 }
6168
6169 Ok(ret)
6170 }
6171
6172 /// Sets the priority for a stream.
6173 ///
6174 /// A stream's priority determines the order in which stream data is sent
6175 /// on the wire (streams with lower priority are sent first). Streams are
6176 /// created with a default priority of `127`.
6177 ///
6178 /// The target stream is created if it did not exist before calling this
6179 /// method.
6180 pub fn stream_priority(
6181 &mut self, stream_id: u64, urgency: u8, incremental: bool,
6182 ) -> Result<()> {
6183 // Get existing stream or create a new one, but if the stream
6184 // has already been closed and collected, ignore the prioritization.
6185 let stream = match self.get_or_create_stream(stream_id, true) {
6186 Ok(v) => v,
6187
6188 Err(Error::Done) => return Ok(()),
6189
6190 Err(e) => return Err(e),
6191 };
6192
6193 if stream.urgency == urgency && stream.incremental == incremental {
6194 return Ok(());
6195 }
6196
6197 stream.urgency = urgency;
6198 stream.incremental = incremental;
6199
6200 let new_priority_key = Arc::new(StreamPriorityKey {
6201 urgency: stream.urgency,
6202 incremental: stream.incremental,
6203 id: stream_id,
6204 ..Default::default()
6205 });
6206
6207 let old_priority_key =
6208 std::mem::replace(&mut stream.priority_key, new_priority_key.clone());
6209
6210 self.streams
6211 .update_priority(&old_priority_key, &new_priority_key);
6212
6213 Ok(())
6214 }
6215
6216 /// Shuts down reading or writing from/to the specified stream.
6217 ///
6218 /// When the `direction` argument is set to [`Shutdown::Read`], outstanding
6219 /// data in the stream's receive buffer is dropped, and no additional data
6220 /// is added to it. Data received after calling this method is still
6221 /// validated and acked but not stored, and [`stream_recv()`] will not
6222 /// return it to the application. In addition, a `STOP_SENDING` frame will
6223 /// be sent to the peer to signal it to stop sending data.
6224 ///
6225 /// When the `direction` argument is set to [`Shutdown::Write`], outstanding
6226 /// data in the stream's send buffer is dropped, and no additional data is
6227 /// added to it. Data passed to [`stream_send()`] after calling this method
6228 /// will be ignored. In addition, a `RESET_STREAM` frame will be sent to the
6229 /// peer to signal the reset.
6230 ///
6231 /// Locally-initiated unidirectional streams can only be closed in the
6232 /// [`Shutdown::Write`] direction. Remotely-initiated unidirectional streams
6233 /// can only be closed in the [`Shutdown::Read`] direction. Using an
6234 /// incorrect direction will return [`InvalidStreamState`].
6235 ///
6236 /// [`Shutdown::Read`]: enum.Shutdown.html#variant.Read
6237 /// [`Shutdown::Write`]: enum.Shutdown.html#variant.Write
6238 /// [`stream_recv()`]: struct.Connection.html#method.stream_recv
6239 /// [`stream_send()`]: struct.Connection.html#method.stream_send
6240 /// [`InvalidStreamState`]: enum.Error.html#variant.InvalidStreamState
6241 pub fn stream_shutdown(
6242 &mut self, stream_id: u64, direction: Shutdown, err: u64,
6243 ) -> Result<()> {
6244 // Don't try to stop a local unidirectional stream.
6245 if direction == Shutdown::Read &&
6246 stream::is_local(stream_id, self.is_server) &&
6247 !stream::is_bidi(stream_id)
6248 {
6249 return Err(Error::InvalidStreamState(stream_id));
6250 }
6251
6252 // Don't try to reset a remote unidirectional stream.
6253 if direction == Shutdown::Write &&
6254 !stream::is_local(stream_id, self.is_server) &&
6255 !stream::is_bidi(stream_id)
6256 {
6257 return Err(Error::InvalidStreamState(stream_id));
6258 }
6259
6260 // Get existing stream.
6261 let stream = self.streams.get_mut(stream_id).ok_or(Error::Done)?;
6262
6263 let priority_key = Arc::clone(&stream.priority_key);
6264
6265 match direction {
6266 Shutdown::Read => {
6267 let consumed = stream.recv.shutdown()?;
6268 self.flow_control.add_consumed(consumed);
6269
6270 if !stream.recv.is_fin() {
6271 self.streams.insert_stopped(stream_id, err);
6272 }
6273
6274 // Once shutdown, the stream is guaranteed to be non-readable.
6275 self.streams.remove_readable(&priority_key);
6276
6277 self.stopped_stream_local_count =
6278 self.stopped_stream_local_count.saturating_add(1);
6279 },
6280
6281 Shutdown::Write => {
6282 let (final_size, unsent) = stream.send.shutdown()?;
6283
6284 // Claw back some flow control allowance from data that was
6285 // buffered but not actually sent before the stream was reset.
6286 self.tx_data = self.tx_data.saturating_sub(unsent);
6287
6288 self.tx_buffered =
6289 self.tx_buffered.saturating_sub(unsent as usize);
6290
6291 // These drops in qlog are a bit weird, but the only way to ensure
6292 // that all bytes that are moved from App to Transport in
6293 // stream_do_send are eventually moved from Transport to Dropped.
6294 // Ideally we would add a Transport to Network transition also as
6295 // a way to indicate when bytes were transmitted vs dropped
6296 // without ever being sent.
6297 qlog_with_type!(QLOG_DATA_MV, self.qlog, q, {
6298 let ev_data = EventData::QuicStreamDataMoved(
6299 qlog::events::quic::StreamDataMoved {
6300 stream_id: Some(stream_id),
6301 offset: Some(final_size),
6302 raw: Some(RawInfo {
6303 length: Some(unsent),
6304 ..Default::default()
6305 }),
6306 from: Some(DataRecipient::Transport),
6307 to: Some(DataRecipient::Dropped),
6308 ..Default::default()
6309 },
6310 );
6311
6312 q.add_event_data_with_instant(ev_data, Instant::now()).ok();
6313 });
6314
6315 // Update send capacity.
6316 self.update_tx_cap();
6317
6318 self.streams.insert_reset(stream_id, err, final_size);
6319
6320 // Once shutdown, the stream is guaranteed to be non-writable.
6321 self.streams.remove_writable(&priority_key);
6322
6323 self.reset_stream_local_count =
6324 self.reset_stream_local_count.saturating_add(1);
6325 },
6326 }
6327
6328 Ok(())
6329 }
6330
6331 /// Returns the stream's send capacity in bytes.
6332 ///
6333 /// The returned capacity takes into account the stream's flow control limit
6334 /// as well as connection level flow and congestion control.
6335 ///
6336 /// If the specified stream doesn't exist (including when it has already
6337 /// been completed and closed), the [`InvalidStreamState`] error will be
6338 /// returned.
6339 ///
6340 /// In addition, if the peer has signalled that it doesn't want to receive
6341 /// any more data from this stream by sending the `STOP_SENDING` frame, the
6342 /// [`StreamStopped`] error will be returned.
6343 ///
6344 /// [`InvalidStreamState`]: enum.Error.html#variant.InvalidStreamState
6345 /// [`StreamStopped`]: enum.Error.html#variant.StreamStopped
6346 #[inline]
6347 pub fn stream_capacity(&mut self, stream_id: u64) -> Result<usize> {
6348 if let Some(stream) = self.streams.get(stream_id) {
6349 let stream_cap = match stream.send.cap() {
6350 Ok(v) => v,
6351
6352 Err(Error::StreamStopped(e)) => {
6353 // Only collect the stream if it is complete and not
6354 // readable. If it is readable, it will get collected when
6355 // stream_recv() is used.
6356 if stream.is_complete() && !stream.is_readable() {
6357 let local = stream.local;
6358 self.streams.collect(stream_id, local);
6359 }
6360
6361 return Err(Error::StreamStopped(e));
6362 },
6363
6364 Err(e) => return Err(e),
6365 };
6366
6367 let cap = cmp::min(self.tx_cap, stream_cap);
6368 return Ok(cap);
6369 };
6370
6371 Err(Error::InvalidStreamState(stream_id))
6372 }
6373
6374 /// Returns the next stream that has data to read.
6375 ///
6376 /// Note that once returned by this method, a stream ID will not be returned
6377 /// again until it is "re-armed".
6378 ///
6379 /// The application will need to read all of the pending data on the stream,
6380 /// and new data has to be received before the stream is reported again.
6381 ///
6382 /// This is unlike the [`readable()`] method, that returns the same list of
6383 /// readable streams when called multiple times in succession.
6384 ///
6385 /// [`readable()`]: struct.Connection.html#method.readable
6386 pub fn stream_readable_next(&mut self) -> Option<u64> {
6387 let priority_key = self.streams.readable.front().clone_pointer()?;
6388
6389 self.streams.remove_readable(&priority_key);
6390
6391 Some(priority_key.id)
6392 }
6393
6394 /// Returns true if the stream has data that can be read.
6395 pub fn stream_readable(&self, stream_id: u64) -> bool {
6396 let stream = match self.streams.get(stream_id) {
6397 Some(v) => v,
6398
6399 None => return false,
6400 };
6401
6402 stream.is_readable()
6403 }
6404
6405 /// Returns the next stream that can be written to.
6406 ///
6407 /// Note that once returned by this method, a stream ID will not be returned
6408 /// again until it is "re-armed".
6409 ///
6410 /// This is unlike the [`writable()`] method, that returns the same list of
6411 /// writable streams when called multiple times in succession. It is not
6412 /// advised to use both `stream_writable_next()` and [`writable()`] on the
6413 /// same connection, as it may lead to unexpected results.
6414 ///
6415 /// The [`stream_writable()`] method can also be used to fine-tune when a
6416 /// stream is reported as writable again.
6417 ///
6418 /// [`stream_writable()`]: struct.Connection.html#method.stream_writable
6419 /// [`writable()`]: struct.Connection.html#method.writable
6420 pub fn stream_writable_next(&mut self) -> Option<u64> {
6421 // If there is not enough connection-level send capacity, none of the
6422 // streams are writable.
6423 if self.tx_cap == 0 {
6424 return None;
6425 }
6426
6427 let mut cursor = self.streams.writable.front();
6428
6429 while let Some(priority_key) = cursor.clone_pointer() {
6430 if let Some(stream) = self.streams.get(priority_key.id) {
6431 let cap = match stream.send.cap() {
6432 Ok(v) => v,
6433
6434 // Return the stream to the application immediately if it's
6435 // stopped.
6436 Err(_) =>
6437 return {
6438 self.streams.remove_writable(&priority_key);
6439
6440 Some(priority_key.id)
6441 },
6442 };
6443
6444 if cmp::min(self.tx_cap, cap) >= stream.send_lowat {
6445 self.streams.remove_writable(&priority_key);
6446 return Some(priority_key.id);
6447 }
6448 }
6449
6450 cursor.move_next();
6451 }
6452
6453 None
6454 }
6455
6456 /// Returns true if the stream has enough send capacity.
6457 ///
6458 /// When `len` more bytes can be buffered into the given stream's send
6459 /// buffer, `true` will be returned, `false` otherwise.
6460 ///
6461 /// In the latter case, if the additional data can't be buffered due to
6462 /// flow control limits, the peer will also be notified, and a "low send
6463 /// watermark" will be set for the stream, such that it is not going to be
6464 /// reported as writable again by [`stream_writable_next()`] until its send
6465 /// capacity reaches `len`.
6466 ///
6467 /// If the specified stream doesn't exist (including when it has already
6468 /// been completed and closed), the [`InvalidStreamState`] error will be
6469 /// returned.
6470 ///
6471 /// In addition, if the peer has signalled that it doesn't want to receive
6472 /// any more data from this stream by sending the `STOP_SENDING` frame, the
6473 /// [`StreamStopped`] error will be returned.
6474 ///
6475 /// [`stream_writable_next()`]: struct.Connection.html#method.stream_writable_next
6476 /// [`InvalidStreamState`]: enum.Error.html#variant.InvalidStreamState
6477 /// [`StreamStopped`]: enum.Error.html#variant.StreamStopped
6478 #[inline]
6479 pub fn stream_writable(
6480 &mut self, stream_id: u64, len: usize,
6481 ) -> Result<bool> {
6482 if self.stream_capacity(stream_id)? >= len {
6483 return Ok(true);
6484 }
6485
6486 let stream = match self.streams.get_mut(stream_id) {
6487 Some(v) => v,
6488
6489 None => return Err(Error::InvalidStreamState(stream_id)),
6490 };
6491
6492 stream.send_lowat = cmp::max(1, len);
6493
6494 let is_writable = stream.is_writable();
6495
6496 let priority_key = Arc::clone(&stream.priority_key);
6497
6498 if self.max_tx_data - self.tx_data < len as u64 {
6499 self.blocked_limit = Some(self.max_tx_data);
6500 }
6501
6502 if stream.send.cap()? < len {
6503 let max_off = stream.send.max_off();
6504 if stream.send.blocked_at() != Some(max_off) {
6505 stream.send.update_blocked_at(Some(max_off));
6506 self.streams.insert_blocked(stream_id, max_off);
6507 }
6508 } else if is_writable {
6509 // When `stream_writable_next()` returns a stream, the writable
6510 // mark is removed, but because the stream is blocked by the
6511 // connection-level send capacity it won't be marked as writable
6512 // again once the capacity increases.
6513 //
6514 // Since the stream is writable already, mark it here instead.
6515 self.streams.insert_writable(&priority_key);
6516 }
6517
6518 Ok(false)
6519 }
6520
6521 /// Returns true if all the data has been read from the specified stream.
6522 ///
6523 /// This instructs the application that all the data received from the
6524 /// peer on the stream has been read, and there won't be anymore in the
6525 /// future.
6526 ///
6527 /// Basically this returns true when the peer either set the `fin` flag
6528 /// for the stream, or sent `RESET_STREAM`.
6529 #[inline]
6530 pub fn stream_finished(&self, stream_id: u64) -> bool {
6531 let stream = match self.streams.get(stream_id) {
6532 Some(v) => v,
6533
6534 None => return true,
6535 };
6536
6537 stream.recv.is_fin()
6538 }
6539
6540 /// Returns the number of bidirectional streams that can be created
6541 /// before the peer's stream count limit is reached.
6542 ///
6543 /// This can be useful to know if it's possible to create a bidirectional
6544 /// stream without trying it first.
6545 #[inline]
6546 pub fn peer_streams_left_bidi(&self) -> u64 {
6547 self.streams.peer_streams_left_bidi()
6548 }
6549
6550 /// Returns the number of unidirectional streams that can be created
6551 /// before the peer's stream count limit is reached.
6552 ///
6553 /// This can be useful to know if it's possible to create a unidirectional
6554 /// stream without trying it first.
6555 #[inline]
6556 pub fn peer_streams_left_uni(&self) -> u64 {
6557 self.streams.peer_streams_left_uni()
6558 }
6559
6560 /// Returns an iterator over streams that have outstanding data to read.
6561 ///
6562 /// Note that the iterator will only include streams that were readable at
6563 /// the time the iterator itself was created (i.e. when `readable()` was
6564 /// called). To account for newly readable streams, the iterator needs to
6565 /// be created again.
6566 ///
6567 /// ## Examples:
6568 ///
6569 /// ```no_run
6570 /// # let mut buf = [0; 512];
6571 /// # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
6572 /// # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
6573 /// # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
6574 /// # let peer = "127.0.0.1:1234".parse().unwrap();
6575 /// # let local = socket.local_addr().unwrap();
6576 /// # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
6577 /// // Iterate over readable streams.
6578 /// for stream_id in conn.readable() {
6579 /// // Stream is readable, read until there's no more data.
6580 /// while let Ok((read, fin)) = conn.stream_recv(stream_id, &mut buf) {
6581 /// println!("Got {} bytes on stream {}", read, stream_id);
6582 /// }
6583 /// }
6584 /// # Ok::<(), quiche::Error>(())
6585 /// ```
6586 #[inline]
6587 pub fn readable(&self) -> StreamIter {
6588 self.streams.readable()
6589 }
6590
6591 /// Returns an iterator over streams that can be written in priority order.
6592 ///
6593 /// The priority order is based on RFC 9218 scheduling recommendations.
6594 /// Stream priority can be controlled using [`stream_priority()`]. In order
6595 /// to support fairness requirements, each time this method is called,
6596 /// internal state is updated. Therefore the iterator ordering can change
6597 /// between calls, even if no streams were added or removed.
6598 ///
6599 /// A "writable" stream is a stream that has enough flow control capacity to
6600 /// send data to the peer. To avoid buffering an infinite amount of data,
6601 /// streams are only allowed to buffer outgoing data up to the amount that
6602 /// the peer allows to send.
6603 ///
6604 /// Note that the iterator will only include streams that were writable at
6605 /// the time the iterator itself was created (i.e. when `writable()` was
6606 /// called). To account for newly writable streams, the iterator needs to be
6607 /// created again.
6608 ///
6609 /// ## Examples:
6610 ///
6611 /// ```no_run
6612 /// # let mut buf = [0; 512];
6613 /// # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
6614 /// # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
6615 /// # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
6616 /// # let local = socket.local_addr().unwrap();
6617 /// # let peer = "127.0.0.1:1234".parse().unwrap();
6618 /// # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
6619 /// // Iterate over writable streams.
6620 /// for stream_id in conn.writable() {
6621 /// // Stream is writable, write some data.
6622 /// if let Ok(written) = conn.stream_send(stream_id, &buf, false) {
6623 /// println!("Written {} bytes on stream {}", written, stream_id);
6624 /// }
6625 /// }
6626 /// # Ok::<(), quiche::Error>(())
6627 /// ```
6628 /// [`stream_priority()`]: struct.Connection.html#method.stream_priority
6629 #[inline]
6630 pub fn writable(&self) -> StreamIter {
6631 // If there is not enough connection-level send capacity, none of the
6632 // streams are writable, so return an empty iterator.
6633 if self.tx_cap == 0 {
6634 return StreamIter::default();
6635 }
6636
6637 self.streams.writable()
6638 }
6639
6640 /// Returns the maximum possible size of egress UDP payloads.
6641 ///
6642 /// This is the maximum size of UDP payloads that can be sent, and depends
6643 /// on both the configured maximum send payload size of the local endpoint
6644 /// (as configured with [`set_max_send_udp_payload_size()`]), as well as
6645 /// the transport parameter advertised by the remote peer.
6646 ///
6647 /// Note that this value can change during the lifetime of the connection,
6648 /// but should remain stable across consecutive calls to [`send()`].
6649 ///
6650 /// [`set_max_send_udp_payload_size()`]:
6651 /// struct.Config.html#method.set_max_send_udp_payload_size
6652 /// [`send()`]: struct.Connection.html#method.send
6653 pub fn max_send_udp_payload_size(&self) -> usize {
6654 let max_datagram_size = self
6655 .paths
6656 .get_active()
6657 .ok()
6658 .map(|p| p.recovery.max_datagram_size());
6659
6660 if let Some(max_datagram_size) = max_datagram_size {
6661 if self.is_established() {
6662 // We cap the maximum packet size to 16KB or so, so that it can be
6663 // always encoded with a 2-byte varint.
6664 return cmp::min(16383, max_datagram_size);
6665 }
6666 }
6667
6668 // Allow for 1200 bytes (minimum QUIC packet size) during the
6669 // handshake.
6670 MIN_CLIENT_INITIAL_LEN
6671 }
6672
6673 /// Schedule an ack-eliciting packet on the active path.
6674 ///
6675 /// QUIC packets might not contain ack-eliciting frames during normal
6676 /// operating conditions. If the packet would already contain
6677 /// ack-eliciting frames, this method does not change any behavior.
6678 /// However, if the packet would not ordinarily contain ack-eliciting
6679 /// frames, this method ensures that a PING frame sent.
6680 ///
6681 /// Calling this method multiple times before [`send()`] has no effect.
6682 ///
6683 /// [`send()`]: struct.Connection.html#method.send
6684 pub fn send_ack_eliciting(&mut self) -> Result<()> {
6685 if self.is_closed() || self.is_draining() {
6686 return Ok(());
6687 }
6688 self.paths.get_active_mut()?.needs_ack_eliciting = true;
6689 Ok(())
6690 }
6691
6692 /// Schedule an ack-eliciting packet on the specified path.
6693 ///
6694 /// See [`send_ack_eliciting()`] for more detail. [`InvalidState`] is
6695 /// returned if there is no record of the path.
6696 ///
6697 /// [`send_ack_eliciting()`]: struct.Connection.html#method.send_ack_eliciting
6698 /// [`InvalidState`]: enum.Error.html#variant.InvalidState
6699 pub fn send_ack_eliciting_on_path(
6700 &mut self, local: SocketAddr, peer: SocketAddr,
6701 ) -> Result<()> {
6702 if self.is_closed() || self.is_draining() {
6703 return Ok(());
6704 }
6705 let path_id = self
6706 .paths
6707 .path_id_from_addrs(&(local, peer))
6708 .ok_or(Error::InvalidState)?;
6709 self.paths.get_mut(path_id)?.needs_ack_eliciting = true;
6710 Ok(())
6711 }
6712
6713 /// Reads the first received DATAGRAM.
6714 ///
6715 /// On success the DATAGRAM's data is returned along with its size.
6716 ///
6717 /// [`Done`] is returned if there is no data to read.
6718 ///
6719 /// [`BufferTooShort`] is returned if the provided buffer is too small for
6720 /// the DATAGRAM.
6721 ///
6722 /// [`Done`]: enum.Error.html#variant.Done
6723 /// [`BufferTooShort`]: enum.Error.html#variant.BufferTooShort
6724 ///
6725 /// ## Examples:
6726 ///
6727 /// ```no_run
6728 /// # let mut buf = [0; 512];
6729 /// # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
6730 /// # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
6731 /// # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
6732 /// # let peer = "127.0.0.1:1234".parse().unwrap();
6733 /// # let local = socket.local_addr().unwrap();
6734 /// # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
6735 /// let mut dgram_buf = [0; 512];
6736 /// while let Ok((len)) = conn.dgram_recv(&mut dgram_buf) {
6737 /// println!("Got {} bytes of DATAGRAM", len);
6738 /// }
6739 /// # Ok::<(), quiche::Error>(())
6740 /// ```
6741 #[inline]
6742 pub fn dgram_recv(&mut self, buf: &mut [u8]) -> Result<usize> {
6743 match self.dgram_recv_queue.pop() {
6744 Some(d) => {
6745 if d.as_ref().len() > buf.len() {
6746 return Err(Error::BufferTooShort);
6747 }
6748 let len = d.as_ref().len();
6749
6750 buf[..len].copy_from_slice(d.as_ref());
6751 Ok(len)
6752 },
6753
6754 None => Err(Error::Done),
6755 }
6756 }
6757
6758 /// Reads the first received DATAGRAM.
6759 ///
6760 /// This is the same as [`dgram_recv()`] but returns the DATAGRAM as an
6761 /// owned buffer instead of copying into the provided buffer.
6762 ///
6763 /// [`dgram_recv()`]: struct.Connection.html#method.dgram_recv
6764 #[inline]
6765 pub fn dgram_recv_buf(&mut self) -> Result<F::DgramBuf> {
6766 self.dgram_recv_queue.pop().ok_or(Error::Done)
6767 }
6768
6769 /// Reads the first received DATAGRAM without removing it from the queue.
6770 ///
6771 /// On success the DATAGRAM's data is returned along with the actual number
6772 /// of bytes peeked. The requested length cannot exceed the DATAGRAM's
6773 /// actual length.
6774 ///
6775 /// [`Done`] is returned if there is no data to read.
6776 ///
6777 /// [`BufferTooShort`] is returned if the provided buffer is smaller the
6778 /// number of bytes to peek.
6779 ///
6780 /// [`Done`]: enum.Error.html#variant.Done
6781 /// [`BufferTooShort`]: enum.Error.html#variant.BufferTooShort
6782 #[inline]
6783 pub fn dgram_recv_peek(&self, buf: &mut [u8], len: usize) -> Result<usize> {
6784 self.dgram_recv_queue.peek_front_bytes(buf, len)
6785 }
6786
6787 /// Returns the length of the first stored DATAGRAM.
6788 #[inline]
6789 pub fn dgram_recv_front_len(&self) -> Option<usize> {
6790 self.dgram_recv_queue.peek_front_len()
6791 }
6792
6793 /// Returns the number of items in the DATAGRAM receive queue.
6794 #[inline]
6795 pub fn dgram_recv_queue_len(&self) -> usize {
6796 self.dgram_recv_queue.len()
6797 }
6798
6799 /// Returns the total size of all items in the DATAGRAM receive queue.
6800 #[inline]
6801 pub fn dgram_recv_queue_byte_size(&self) -> usize {
6802 self.dgram_recv_queue.byte_size()
6803 }
6804
6805 /// Returns the number of items in the DATAGRAM send queue.
6806 #[inline]
6807 pub fn dgram_send_queue_len(&self) -> usize {
6808 self.dgram_send_queue.len()
6809 }
6810
6811 /// Returns the total size of all items in the DATAGRAM send queue.
6812 #[inline]
6813 pub fn dgram_send_queue_byte_size(&self) -> usize {
6814 self.dgram_send_queue.byte_size()
6815 }
6816
6817 /// Returns whether or not the DATAGRAM send queue is full.
6818 #[inline]
6819 pub fn is_dgram_send_queue_full(&self) -> bool {
6820 self.dgram_send_queue.is_full()
6821 }
6822
6823 /// Returns whether or not the DATAGRAM recv queue is full.
6824 #[inline]
6825 pub fn is_dgram_recv_queue_full(&self) -> bool {
6826 self.dgram_recv_queue.is_full()
6827 }
6828
6829 /// Sends data in a DATAGRAM frame.
6830 ///
6831 /// [`Done`] is returned if no data was written.
6832 /// [`InvalidState`] is returned if the peer does not support DATAGRAM.
6833 /// [`BufferTooShort`] is returned if the DATAGRAM frame length is larger
6834 /// than peer's supported DATAGRAM frame length. Use
6835 /// [`dgram_max_writable_len()`] to get the largest supported DATAGRAM
6836 /// frame length.
6837 ///
6838 /// Note that there is no flow control of DATAGRAM frames, so in order to
6839 /// avoid buffering an infinite amount of frames we apply an internal
6840 /// limit.
6841 ///
6842 /// [`Done`]: enum.Error.html#variant.Done
6843 /// [`InvalidState`]: enum.Error.html#variant.InvalidState
6844 /// [`BufferTooShort`]: enum.Error.html#variant.BufferTooShort
6845 /// [`dgram_max_writable_len()`]:
6846 /// struct.Connection.html#method.dgram_max_writable_len
6847 ///
6848 /// ## Examples:
6849 ///
6850 /// ```no_run
6851 /// # let mut buf = [0; 512];
6852 /// # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
6853 /// # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
6854 /// # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
6855 /// # let peer = "127.0.0.1:1234".parse().unwrap();
6856 /// # let local = socket.local_addr().unwrap();
6857 /// # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
6858 /// conn.dgram_send(b"hello")?;
6859 /// # Ok::<(), quiche::Error>(())
6860 /// ```
6861 pub fn dgram_send(&mut self, buf: &[u8]) -> Result<()> {
6862 self.dgram_send_buf(F::dgram_buf_from_slice(buf))
6863 }
6864
6865 /// Sends data in a DATAGRAM frame.
6866 ///
6867 /// This is the same as [`dgram_send()`] but takes an owned buffer
6868 /// instead of a slice and avoids copying.
6869 ///
6870 /// [`dgram_send()`]: struct.Connection.html#method.dgram_send
6871 pub fn dgram_send_buf(&mut self, buf: F::DgramBuf) -> Result<()> {
6872 let max_payload_len = match self.dgram_max_writable_len() {
6873 Some(v) => v,
6874
6875 None => return Err(Error::InvalidState),
6876 };
6877
6878 if buf.as_ref().len() > max_payload_len {
6879 return Err(Error::BufferTooShort);
6880 }
6881
6882 self.dgram_send_queue.push(buf)?;
6883
6884 let active_path = self.paths.get_active_mut()?;
6885
6886 if self.dgram_send_queue.byte_size() >
6887 active_path.recovery.cwnd_available()
6888 {
6889 active_path.recovery.update_app_limited(false);
6890 }
6891
6892 Ok(())
6893 }
6894
6895 /// Purges queued outgoing DATAGRAMs matching the predicate.
6896 ///
6897 /// In other words, remove all elements `e` such that `f(&e)` returns true.
6898 ///
6899 /// ## Examples:
6900 /// ```no_run
6901 /// # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
6902 /// # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
6903 /// # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
6904 /// # let peer = "127.0.0.1:1234".parse().unwrap();
6905 /// # let local = socket.local_addr().unwrap();
6906 /// # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
6907 /// conn.dgram_send(b"hello")?;
6908 /// conn.dgram_purge_outgoing(&|d: &[u8]| -> bool { d[0] == 0 });
6909 /// # Ok::<(), quiche::Error>(())
6910 /// ```
6911 #[inline]
6912 pub fn dgram_purge_outgoing<FN: Fn(&[u8]) -> bool>(&mut self, f: FN) {
6913 self.dgram_send_queue.purge(f);
6914 }
6915
6916 /// Returns the maximum DATAGRAM payload that can be sent.
6917 ///
6918 /// [`None`] is returned if the peer hasn't advertised a maximum DATAGRAM
6919 /// frame size.
6920 ///
6921 /// ## Examples:
6922 ///
6923 /// ```no_run
6924 /// # let mut buf = [0; 512];
6925 /// # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
6926 /// # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
6927 /// # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
6928 /// # let peer = "127.0.0.1:1234".parse().unwrap();
6929 /// # let local = socket.local_addr().unwrap();
6930 /// # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
6931 /// if let Some(payload_size) = conn.dgram_max_writable_len() {
6932 /// if payload_size > 5 {
6933 /// conn.dgram_send(b"hello")?;
6934 /// }
6935 /// }
6936 /// # Ok::<(), quiche::Error>(())
6937 /// ```
6938 #[inline]
6939 pub fn dgram_max_writable_len(&self) -> Option<usize> {
6940 match self.peer_transport_params.max_datagram_frame_size {
6941 None => None,
6942 Some(peer_frame_len) => {
6943 let dcid = self.destination_id();
6944 // Start from the maximum packet size...
6945 let mut max_len = self.max_send_udp_payload_size();
6946 // ...subtract the Short packet header overhead...
6947 // (1 byte of pkt_len + len of dcid)
6948 max_len = max_len.saturating_sub(1 + dcid.len());
6949 // ...subtract the packet number (max len)...
6950 max_len = max_len.saturating_sub(packet::MAX_PKT_NUM_LEN);
6951 // ...subtract the crypto overhead...
6952 max_len = max_len.saturating_sub(
6953 self.crypto_ctx[packet::Epoch::Application]
6954 .crypto_overhead()?,
6955 );
6956 // ...clamp to what peer can support...
6957 max_len = cmp::min(peer_frame_len as usize, max_len);
6958 // ...subtract frame overhead, checked for underflow.
6959 // (1 byte of frame type + len of length )
6960 max_len.checked_sub(1 + frame::MAX_DGRAM_OVERHEAD)
6961 },
6962 }
6963 }
6964
6965 fn dgram_enabled(&self) -> bool {
6966 self.local_transport_params
6967 .max_datagram_frame_size
6968 .is_some()
6969 }
6970
6971 /// Returns when the next timeout event will occur.
6972 ///
6973 /// Once the timeout Instant has been reached, the [`on_timeout()`] method
6974 /// should be called. A timeout of `None` means that the timer should be
6975 /// disarmed.
6976 ///
6977 /// [`on_timeout()`]: struct.Connection.html#method.on_timeout
6978 pub fn timeout_instant(&self) -> Option<Instant> {
6979 if self.is_closed() {
6980 return None;
6981 }
6982
6983 if self.is_draining() {
6984 // Draining timer takes precedence over all other timers. If it is
6985 // set it means the connection is closing so there's no point in
6986 // processing the other timers.
6987 self.draining_timer
6988 } else {
6989 // Use the lowest timer value (i.e. "sooner") among idle and loss
6990 // detection timers. If they are both unset (i.e. `None`) then the
6991 // result is `None`, but if at least one of them is set then a
6992 // `Some(...)` value is returned.
6993 let path_timer = self
6994 .paths
6995 .iter()
6996 .filter_map(|(_, p)| p.recovery.loss_detection_timer())
6997 .min();
6998
6999 let key_update_timer = self.crypto_ctx[packet::Epoch::Application]
7000 .key_update
7001 .as_ref()
7002 .map(|key_update| key_update.timer);
7003
7004 let timers = [self.idle_timer, path_timer, key_update_timer];
7005
7006 timers.iter().filter_map(|&x| x).min()
7007 }
7008 }
7009
7010 /// Returns the amount of time until the next timeout event.
7011 ///
7012 /// Once the given duration has elapsed, the [`on_timeout()`] method should
7013 /// be called. A timeout of `None` means that the timer should be disarmed.
7014 ///
7015 /// [`on_timeout()`]: struct.Connection.html#method.on_timeout
7016 pub fn timeout(&self) -> Option<Duration> {
7017 self.timeout_instant().map(|timeout| {
7018 let now = Instant::now();
7019
7020 if timeout <= now {
7021 Duration::ZERO
7022 } else {
7023 timeout.duration_since(now)
7024 }
7025 })
7026 }
7027
7028 /// Processes a timeout event.
7029 ///
7030 /// If no timeout has occurred it does nothing.
7031 pub fn on_timeout(&mut self) {
7032 let now = Instant::now();
7033
7034 if let Some(draining_timer) = self.draining_timer {
7035 if draining_timer <= now {
7036 trace!("{} draining timeout expired", self.trace_id);
7037
7038 self.mark_closed();
7039 }
7040
7041 // Draining timer takes precedence over all other timers. If it is
7042 // set it means the connection is closing so there's no point in
7043 // processing the other timers.
7044 return;
7045 }
7046
7047 if let Some(timer) = self.idle_timer {
7048 if timer <= now {
7049 trace!("{} idle timeout expired", self.trace_id);
7050
7051 self.mark_closed();
7052 self.timed_out = true;
7053 return;
7054 }
7055 }
7056
7057 if let Some(timer) = self.crypto_ctx[packet::Epoch::Application]
7058 .key_update
7059 .as_ref()
7060 .map(|key_update| key_update.timer)
7061 {
7062 if timer <= now {
7063 // Discard previous key once key update timer expired.
7064 let _ = self.crypto_ctx[packet::Epoch::Application]
7065 .key_update
7066 .take();
7067 }
7068 }
7069
7070 let handshake_status = self.handshake_status();
7071
7072 for (_, p) in self.paths.iter_mut() {
7073 if let Some(timer) = p.recovery.loss_detection_timer() {
7074 if timer <= now {
7075 trace!("{} loss detection timeout expired", self.trace_id);
7076
7077 let OnLossDetectionTimeoutOutcome {
7078 lost_packets,
7079 lost_bytes,
7080 } = p.on_loss_detection_timeout(
7081 handshake_status,
7082 now,
7083 self.is_server,
7084 &self.trace_id,
7085 );
7086
7087 self.lost_count += lost_packets;
7088 self.lost_bytes += lost_bytes as u64;
7089
7090 qlog_with_type!(QLOG_METRICS, self.qlog, q, {
7091 p.recovery.maybe_qlog(q, now);
7092 });
7093 }
7094 }
7095 }
7096
7097 // Notify timeout events to the application.
7098 self.paths.notify_failed_validations();
7099
7100 // If the active path failed, try to find a new candidate.
7101 if self.paths.get_active_path_id().is_err() {
7102 match self.paths.find_candidate_path() {
7103 Some(pid) => {
7104 if self.set_active_path(pid, now).is_err() {
7105 // The connection cannot continue.
7106 self.mark_closed();
7107 }
7108 },
7109
7110 // The connection cannot continue.
7111 None => {
7112 self.mark_closed();
7113 },
7114 }
7115 }
7116 }
7117
7118 /// Requests the stack to perform path validation of the proposed 4-tuple.
7119 ///
7120 /// Probing new paths requires spare Connection IDs at both the host and the
7121 /// peer sides. If it is not the case, it raises an [`OutOfIdentifiers`].
7122 ///
7123 /// The probing of new addresses can only be done by the client. The server
7124 /// can only probe network paths that were previously advertised by
7125 /// [`PathEvent::New`]. If the server tries to probe such an unseen network
7126 /// path, this call raises an [`InvalidState`].
7127 ///
7128 /// The caller might also want to probe an existing path. In such case, it
7129 /// triggers a PATH_CHALLENGE frame, but it does not require spare CIDs.
7130 ///
7131 /// A server always probes a new path it observes. Calling this method is
7132 /// hence not required to validate a new path. However, a server can still
7133 /// request an additional path validation of the proposed 4-tuple.
7134 ///
7135 /// Calling this method several times before calling [`send()`] or
7136 /// [`send_on_path()`] results in a single probe being generated. An
7137 /// application wanting to send multiple in-flight probes must call this
7138 /// method again after having sent packets.
7139 ///
7140 /// Returns the Destination Connection ID sequence number associated to that
7141 /// path.
7142 ///
7143 /// [`PathEvent::New`]: enum.PathEvent.html#variant.New
7144 /// [`OutOfIdentifiers`]: enum.Error.html#OutOfIdentifiers
7145 /// [`InvalidState`]: enum.Error.html#InvalidState
7146 /// [`send()`]: struct.Connection.html#method.send
7147 /// [`send_on_path()`]: struct.Connection.html#method.send_on_path
7148 pub fn probe_path(
7149 &mut self, local_addr: SocketAddr, peer_addr: SocketAddr,
7150 ) -> Result<u64> {
7151 // We may want to probe an existing path.
7152 let pid = match self.paths.path_id_from_addrs(&(local_addr, peer_addr)) {
7153 Some(pid) => pid,
7154 None => self.create_path_on_client(local_addr, peer_addr)?,
7155 };
7156
7157 let path = self.paths.get_mut(pid)?;
7158 path.request_validation();
7159
7160 path.active_dcid_seq.ok_or(Error::InvalidState)
7161 }
7162
7163 /// Migrates the connection to a new local address `local_addr`.
7164 ///
7165 /// The behavior is similar to [`migrate()`], with the nuance that the
7166 /// connection only changes the local address, but not the peer one.
7167 ///
7168 /// See [`migrate()`] for the full specification of this method.
7169 ///
7170 /// [`migrate()`]: struct.Connection.html#method.migrate
7171 pub fn migrate_source(&mut self, local_addr: SocketAddr) -> Result<u64> {
7172 let peer_addr = self.paths.get_active()?.peer_addr();
7173 self.migrate(local_addr, peer_addr)
7174 }
7175
7176 /// Migrates the connection over the given network path between `local_addr`
7177 /// and `peer_addr`.
7178 ///
7179 /// Connection migration can only be initiated by the client. Calling this
7180 /// method as a server returns [`InvalidState`].
7181 ///
7182 /// To initiate voluntary migration, there should be enough Connection IDs
7183 /// at both sides. If this requirement is not satisfied, this call returns
7184 /// [`OutOfIdentifiers`].
7185 ///
7186 /// Returns the Destination Connection ID associated to that migrated path.
7187 ///
7188 /// [`OutOfIdentifiers`]: enum.Error.html#OutOfIdentifiers
7189 /// [`InvalidState`]: enum.Error.html#InvalidState
7190 pub fn migrate(
7191 &mut self, local_addr: SocketAddr, peer_addr: SocketAddr,
7192 ) -> Result<u64> {
7193 if self.is_server {
7194 return Err(Error::InvalidState);
7195 }
7196
7197 // If the path already exists, mark it as the active one.
7198 let (pid, dcid_seq) = if let Some(pid) =
7199 self.paths.path_id_from_addrs(&(local_addr, peer_addr))
7200 {
7201 let path = self.paths.get_mut(pid)?;
7202
7203 // If it is already active, do nothing.
7204 if path.active() {
7205 return path.active_dcid_seq.ok_or(Error::OutOfIdentifiers);
7206 }
7207
7208 // Ensures that a Source Connection ID has been dedicated to this
7209 // path, or a free one is available. This is only required if the
7210 // host uses non-zero length Source Connection IDs.
7211 if !self.ids.zero_length_scid() &&
7212 path.active_scid_seq.is_none() &&
7213 self.ids.available_scids() == 0
7214 {
7215 return Err(Error::OutOfIdentifiers);
7216 }
7217
7218 // Ensures that the migrated path has a Destination Connection ID.
7219 let dcid_seq = if let Some(dcid_seq) = path.active_dcid_seq {
7220 dcid_seq
7221 } else {
7222 let dcid_seq = self
7223 .ids
7224 .lowest_available_dcid_seq()
7225 .ok_or(Error::OutOfIdentifiers)?;
7226
7227 self.ids.link_dcid_to_path_id(dcid_seq, pid)?;
7228 path.active_dcid_seq = Some(dcid_seq);
7229
7230 dcid_seq
7231 };
7232
7233 (pid, dcid_seq)
7234 } else {
7235 let pid = self.create_path_on_client(local_addr, peer_addr)?;
7236
7237 let dcid_seq = self
7238 .paths
7239 .get(pid)?
7240 .active_dcid_seq
7241 .ok_or(Error::InvalidState)?;
7242
7243 (pid, dcid_seq)
7244 };
7245
7246 // Change the active path.
7247 self.set_active_path(pid, Instant::now())?;
7248
7249 Ok(dcid_seq)
7250 }
7251
7252 /// Provides additional source Connection IDs that the peer can use to reach
7253 /// this host.
7254 ///
7255 /// This triggers sending NEW_CONNECTION_ID frames if the provided Source
7256 /// Connection ID is not already present. In the case the caller tries to
7257 /// reuse a Connection ID with a different reset token, this raises an
7258 /// `InvalidState`.
7259 ///
7260 /// At any time, the peer cannot have more Destination Connection IDs than
7261 /// the maximum number of active Connection IDs it negotiated. In such case
7262 /// (i.e., when [`scids_left()`] returns 0), if the host agrees to
7263 /// request the removal of previous connection IDs, it sets the
7264 /// `retire_if_needed` parameter. Otherwise, an [`IdLimit`] is returned.
7265 ///
7266 /// Note that setting `retire_if_needed` does not prevent this function from
7267 /// returning an [`IdLimit`] in the case the caller wants to retire still
7268 /// unannounced Connection IDs.
7269 ///
7270 /// The caller is responsible for ensuring that the provided `scid` is not
7271 /// repeated several times over the connection. quiche ensures that as long
7272 /// as the provided Connection ID is still in use (i.e., not retired), it
7273 /// does not assign a different sequence number.
7274 ///
7275 /// Note that if the host uses zero-length Source Connection IDs, it cannot
7276 /// advertise Source Connection IDs and calling this method returns an
7277 /// [`InvalidState`].
7278 ///
7279 /// Returns the sequence number associated to the provided Connection ID.
7280 ///
7281 /// [`scids_left()`]: struct.Connection.html#method.scids_left
7282 /// [`IdLimit`]: enum.Error.html#IdLimit
7283 /// [`InvalidState`]: enum.Error.html#InvalidState
7284 pub fn new_scid(
7285 &mut self, scid: &ConnectionId, reset_token: u128, retire_if_needed: bool,
7286 ) -> Result<u64> {
7287 self.ids.new_scid(
7288 scid.to_vec().into(),
7289 Some(reset_token),
7290 true,
7291 None,
7292 retire_if_needed,
7293 )
7294 }
7295
7296 /// Returns the number of source Connection IDs that are active. This is
7297 /// only meaningful if the host uses non-zero length Source Connection IDs.
7298 pub fn active_scids(&self) -> usize {
7299 self.ids.active_source_cids()
7300 }
7301
7302 /// Returns the number of source Connection IDs that should be provided
7303 /// to the peer without exceeding the limit it advertised.
7304 ///
7305 /// This will automatically limit the number of Connection IDs to the
7306 /// minimum between the locally configured active connection ID limit,
7307 /// and the one sent by the peer.
7308 ///
7309 /// To obtain the maximum possible value allowed by the peer an application
7310 /// can instead inspect the [`peer_active_conn_id_limit`] value.
7311 ///
7312 /// [`peer_active_conn_id_limit`]: struct.Stats.html#structfield.peer_active_conn_id_limit
7313 #[inline]
7314 pub fn scids_left(&self) -> usize {
7315 let max_active_source_cids = cmp::min(
7316 self.peer_transport_params.active_conn_id_limit,
7317 self.local_transport_params.active_conn_id_limit,
7318 ) as usize;
7319
7320 max_active_source_cids - self.active_scids()
7321 }
7322
7323 /// Requests the retirement of the destination Connection ID used by the
7324 /// host to reach its peer.
7325 ///
7326 /// This triggers sending RETIRE_CONNECTION_ID frames.
7327 ///
7328 /// If the application tries to retire a non-existing Destination Connection
7329 /// ID sequence number, or if it uses zero-length Destination Connection ID,
7330 /// this method returns an [`InvalidState`].
7331 ///
7332 /// At any time, the host must have at least one Destination ID. If the
7333 /// application tries to retire the last one, or if the caller tries to
7334 /// retire the destination Connection ID used by the current active path
7335 /// while having neither spare Destination Connection IDs nor validated
7336 /// network paths, this method returns an [`OutOfIdentifiers`]. This
7337 /// behavior prevents the caller from stalling the connection due to the
7338 /// lack of validated path to send non-probing packets.
7339 ///
7340 /// [`InvalidState`]: enum.Error.html#InvalidState
7341 /// [`OutOfIdentifiers`]: enum.Error.html#OutOfIdentifiers
7342 pub fn retire_dcid(&mut self, dcid_seq: u64) -> Result<()> {
7343 if self.ids.zero_length_dcid() {
7344 return Err(Error::InvalidState);
7345 }
7346
7347 let active_path_dcid_seq = self
7348 .paths
7349 .get_active()?
7350 .active_dcid_seq
7351 .ok_or(Error::InvalidState)?;
7352
7353 let active_path_id = self.paths.get_active_path_id()?;
7354
7355 if active_path_dcid_seq == dcid_seq &&
7356 self.ids.lowest_available_dcid_seq().is_none() &&
7357 !self
7358 .paths
7359 .iter()
7360 .any(|(pid, p)| pid != active_path_id && p.usable())
7361 {
7362 return Err(Error::OutOfIdentifiers);
7363 }
7364
7365 if let Some(pid) = self.ids.retire_dcid(dcid_seq)? {
7366 // The retired Destination CID was associated to a given path. Let's
7367 // find an available DCID to associate to that path.
7368 let path = self.paths.get_mut(pid)?;
7369 let dcid_seq = self.ids.lowest_available_dcid_seq();
7370
7371 if let Some(dcid_seq) = dcid_seq {
7372 self.ids.link_dcid_to_path_id(dcid_seq, pid)?;
7373 }
7374
7375 path.active_dcid_seq = dcid_seq;
7376 }
7377
7378 Ok(())
7379 }
7380
7381 /// Processes path-specific events.
7382 ///
7383 /// On success it returns a [`PathEvent`], or `None` when there are no
7384 /// events to report. Please refer to [`PathEvent`] for the exhaustive event
7385 /// list.
7386 ///
7387 /// Note that all events are edge-triggered, meaning that once reported they
7388 /// will not be reported again by calling this method again, until the event
7389 /// is re-armed.
7390 ///
7391 /// [`PathEvent`]: enum.PathEvent.html
7392 pub fn path_event_next(&mut self) -> Option<PathEvent> {
7393 self.paths.pop_event()
7394 }
7395
7396 /// Returns the number of source Connection IDs that are retired.
7397 pub fn retired_scids(&self) -> usize {
7398 self.ids.retired_source_cids()
7399 }
7400
7401 /// Returns a source `ConnectionId` that has been retired.
7402 ///
7403 /// On success it returns a [`ConnectionId`], or `None` when there are no
7404 /// more retired connection IDs.
7405 ///
7406 /// [`ConnectionId`]: struct.ConnectionId.html
7407 pub fn retired_scid_next(&mut self) -> Option<ConnectionId<'static>> {
7408 self.ids.pop_retired_scid()
7409 }
7410
7411 /// Returns the number of spare Destination Connection IDs, i.e.,
7412 /// Destination Connection IDs that are still unused.
7413 ///
7414 /// Note that this function returns 0 if the host uses zero length
7415 /// Destination Connection IDs.
7416 pub fn available_dcids(&self) -> usize {
7417 self.ids.available_dcids()
7418 }
7419
7420 /// Returns an iterator over destination `SockAddr`s whose association
7421 /// with `from` forms a known QUIC path on which packets can be sent to.
7422 ///
7423 /// This function is typically used in combination with [`send_on_path()`].
7424 ///
7425 /// Note that the iterator includes all the possible combination of
7426 /// destination `SockAddr`s, even those whose sending is not required now.
7427 /// In other words, this is another way for the application to recall from
7428 /// past [`PathEvent::New`] events.
7429 ///
7430 /// [`PathEvent::New`]: enum.PathEvent.html#variant.New
7431 /// [`send_on_path()`]: struct.Connection.html#method.send_on_path
7432 ///
7433 /// ## Examples:
7434 ///
7435 /// ```no_run
7436 /// # let mut out = [0; 512];
7437 /// # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
7438 /// # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
7439 /// # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
7440 /// # let local = socket.local_addr().unwrap();
7441 /// # let peer = "127.0.0.1:1234".parse().unwrap();
7442 /// # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
7443 /// // Iterate over possible destinations for the given local `SockAddr`.
7444 /// for dest in conn.paths_iter(local) {
7445 /// loop {
7446 /// let (write, send_info) =
7447 /// match conn.send_on_path(&mut out, Some(local), Some(dest)) {
7448 /// Ok(v) => v,
7449 ///
7450 /// Err(quiche::Error::Done) => {
7451 /// // Done writing for this destination.
7452 /// break;
7453 /// },
7454 ///
7455 /// Err(e) => {
7456 /// // An error occurred, handle it.
7457 /// break;
7458 /// },
7459 /// };
7460 ///
7461 /// socket.send_to(&out[..write], &send_info.to).unwrap();
7462 /// }
7463 /// }
7464 /// # Ok::<(), quiche::Error>(())
7465 /// ```
7466 #[inline]
7467 pub fn paths_iter(&self, from: SocketAddr) -> SocketAddrIter {
7468 // Instead of trying to identify whether packets will be sent on the
7469 // given 4-tuple, simply filter paths that cannot be used.
7470 SocketAddrIter {
7471 sockaddrs: self
7472 .paths
7473 .iter()
7474 .filter(|(_, p)| p.active_dcid_seq.is_some())
7475 .filter(|(_, p)| p.usable() || p.probing_required())
7476 .filter(|(_, p)| p.local_addr() == from)
7477 .map(|(_, p)| p.peer_addr())
7478 .collect(),
7479
7480 index: 0,
7481 }
7482 }
7483
7484 /// Closes the connection with the given error and reason.
7485 ///
7486 /// The `app` parameter specifies whether an application close should be
7487 /// sent to the peer. Otherwise a normal connection close is sent.
7488 ///
7489 /// If `app` is true but the connection is not in a state that is safe to
7490 /// send an application error (not established nor in early data), in
7491 /// accordance with [RFC
7492 /// 9000](https://www.rfc-editor.org/rfc/rfc9000.html#section-10.2.3-3), the
7493 /// error code is changed to APPLICATION_ERROR and the reason phrase is
7494 /// cleared.
7495 ///
7496 /// Returns [`Done`] if the connection had already been closed.
7497 ///
7498 /// Note that the connection will not be closed immediately. An application
7499 /// should continue calling the [`recv()`], [`send()`], [`timeout()`] and
7500 /// [`on_timeout()`] methods as normal, until the [`is_closed()`] method
7501 /// returns `true`.
7502 ///
7503 /// [`Done`]: enum.Error.html#variant.Done
7504 /// [`recv()`]: struct.Connection.html#method.recv
7505 /// [`send()`]: struct.Connection.html#method.send
7506 /// [`timeout()`]: struct.Connection.html#method.timeout
7507 /// [`on_timeout()`]: struct.Connection.html#method.on_timeout
7508 /// [`is_closed()`]: struct.Connection.html#method.is_closed
7509 pub fn close(&mut self, app: bool, err: u64, reason: &[u8]) -> Result<()> {
7510 if self.is_closed() || self.is_draining() {
7511 return Err(Error::Done);
7512 }
7513
7514 if self.local_error.is_some() {
7515 return Err(Error::Done);
7516 }
7517
7518 let is_safe_to_send_app_data =
7519 self.is_established() || self.is_in_early_data();
7520
7521 if app && !is_safe_to_send_app_data {
7522 // Clear error information.
7523 self.local_error = Some(ConnectionError {
7524 is_app: false,
7525 error_code: 0x0c,
7526 reason: vec![],
7527 });
7528 } else {
7529 self.local_error = Some(ConnectionError {
7530 is_app: app,
7531 error_code: err,
7532 reason: reason.to_vec(),
7533 });
7534 }
7535
7536 // When no packet was successfully processed close connection immediately.
7537 if self.recv_count == 0 {
7538 self.mark_closed();
7539 }
7540
7541 Ok(())
7542 }
7543
7544 /// Returns a string uniquely representing the connection.
7545 ///
7546 /// This can be used for logging purposes to differentiate between multiple
7547 /// connections.
7548 #[inline]
7549 pub fn trace_id(&self) -> &str {
7550 &self.trace_id
7551 }
7552
7553 /// Returns the negotiated ALPN protocol.
7554 ///
7555 /// If no protocol has been negotiated, the returned value is empty.
7556 #[inline]
7557 pub fn application_proto(&self) -> &[u8] {
7558 self.alpn.as_ref()
7559 }
7560
7561 /// Returns the server name requested by the client.
7562 #[inline]
7563 pub fn server_name(&self) -> Option<&str> {
7564 self.handshake.server_name()
7565 }
7566
7567 /// Returns the peer's leaf certificate (if any) as a DER-encoded buffer.
7568 #[inline]
7569 pub fn peer_cert(&self) -> Option<&[u8]> {
7570 self.handshake.peer_cert()
7571 }
7572
7573 /// Returns the peer's certificate chain (if any) as a vector of DER-encoded
7574 /// buffers.
7575 ///
7576 /// The certificate at index 0 is the peer's leaf certificate, the other
7577 /// certificates (if any) are the chain certificate authorities used to
7578 /// sign the leaf certificate.
7579 #[inline]
7580 pub fn peer_cert_chain(&self) -> Option<Vec<&[u8]>> {
7581 self.handshake.peer_cert_chain()
7582 }
7583
7584 /// Returns the serialized cryptographic session for the connection.
7585 ///
7586 /// This can be used by a client to cache a connection's session, and resume
7587 /// it later using the [`set_session()`] method.
7588 ///
7589 /// [`set_session()`]: struct.Connection.html#method.set_session
7590 #[inline]
7591 pub fn session(&self) -> Option<&[u8]> {
7592 self.session.as_deref()
7593 }
7594
7595 /// Returns the source connection ID.
7596 ///
7597 /// When there are multiple IDs, and if there is an active path, the ID used
7598 /// on that path is returned. Otherwise the oldest ID is returned.
7599 ///
7600 /// Note that the value returned can change throughout the connection's
7601 /// lifetime.
7602 #[inline]
7603 pub fn source_id(&self) -> ConnectionId<'_> {
7604 if let Ok(path) = self.paths.get_active() {
7605 if let Some(active_scid_seq) = path.active_scid_seq {
7606 if let Ok(e) = self.ids.get_scid(active_scid_seq) {
7607 return ConnectionId::from_ref(e.cid.as_ref());
7608 }
7609 }
7610 }
7611
7612 let e = self.ids.oldest_scid();
7613 ConnectionId::from_ref(e.cid.as_ref())
7614 }
7615
7616 /// Returns all active source connection IDs.
7617 ///
7618 /// An iterator is returned for all active IDs (i.e. ones that have not
7619 /// been explicitly retired yet).
7620 #[inline]
7621 pub fn source_ids(&self) -> impl Iterator<Item = &ConnectionId<'_>> {
7622 self.ids.scids_iter()
7623 }
7624
7625 /// Returns the destination connection ID.
7626 ///
7627 /// Note that the value returned can change throughout the connection's
7628 /// lifetime.
7629 #[inline]
7630 pub fn destination_id(&self) -> ConnectionId<'_> {
7631 if let Ok(path) = self.paths.get_active() {
7632 if let Some(active_dcid_seq) = path.active_dcid_seq {
7633 if let Ok(e) = self.ids.get_dcid(active_dcid_seq) {
7634 return ConnectionId::from_ref(e.cid.as_ref());
7635 }
7636 }
7637 }
7638
7639 let e = self.ids.oldest_dcid();
7640 ConnectionId::from_ref(e.cid.as_ref())
7641 }
7642
7643 /// Returns the PMTU for the active path if it exists.
7644 ///
7645 /// This requires no additonal packets to be sent but simply checks if PMTUD
7646 /// has completed and has found a valid PMTU.
7647 #[inline]
7648 pub fn pmtu(&self) -> Option<usize> {
7649 if let Ok(path) = self.paths.get_active() {
7650 path.pmtud.as_ref().and_then(|pmtud| pmtud.get_pmtu())
7651 } else {
7652 None
7653 }
7654 }
7655
7656 /// Revalidates the PMTU for the active path by sending a new probe packet
7657 /// of PMTU size. If the probe is dropped PMTUD will restart and find a new
7658 /// valid PMTU.
7659 #[inline]
7660 pub fn revalidate_pmtu(&mut self) {
7661 if let Ok(active_path) = self.paths.get_active_mut() {
7662 if let Some(pmtud) = active_path.pmtud.as_mut() {
7663 pmtud.revalidate_pmtu();
7664 }
7665 }
7666 }
7667
7668 /// Returns true if the connection handshake is complete.
7669 #[inline]
7670 pub fn is_established(&self) -> bool {
7671 self.handshake_completed
7672 }
7673
7674 /// Returns true if the connection is resumed.
7675 #[inline]
7676 pub fn is_resumed(&self) -> bool {
7677 self.handshake.is_resumed()
7678 }
7679
7680 /// Returns true if the connection has a pending handshake that has
7681 /// progressed enough to send or receive early data.
7682 #[inline]
7683 pub fn is_in_early_data(&self) -> bool {
7684 self.handshake.is_in_early_data()
7685 }
7686
7687 /// Returns the early data reason for the connection.
7688 ///
7689 /// This status can be useful for logging and debugging. See [BoringSSL]
7690 /// documentation for a definition of the reasons.
7691 ///
7692 /// [BoringSSL]: https://commondatastorage.googleapis.com/chromium-boringssl-docs/ssl.h.html#ssl_early_data_reason_t
7693 #[inline]
7694 pub fn early_data_reason(&self) -> u32 {
7695 self.handshake.early_data_reason()
7696 }
7697
7698 /// Returns whether there is stream or DATAGRAM data available to read.
7699 #[inline]
7700 pub fn is_readable(&self) -> bool {
7701 self.streams.has_readable() || self.dgram_recv_front_len().is_some()
7702 }
7703
7704 /// Returns whether the network path with local address `from` and remote
7705 /// address `peer` has been validated.
7706 ///
7707 /// If the 4-tuple does not exist over the connection, returns an
7708 /// [`InvalidState`].
7709 ///
7710 /// [`InvalidState`]: enum.Error.html#variant.InvalidState
7711 pub fn is_path_validated(
7712 &self, from: SocketAddr, to: SocketAddr,
7713 ) -> Result<bool> {
7714 let pid = self
7715 .paths
7716 .path_id_from_addrs(&(from, to))
7717 .ok_or(Error::InvalidState)?;
7718
7719 Ok(self.paths.get(pid)?.validated())
7720 }
7721
7722 /// Returns true if the connection is draining.
7723 ///
7724 /// If this returns `true`, the connection object cannot yet be dropped, but
7725 /// no new application data can be sent or received. An application should
7726 /// continue calling the [`recv()`], [`timeout()`], and [`on_timeout()`]
7727 /// methods as normal, until the [`is_closed()`] method returns `true`.
7728 ///
7729 /// In contrast, once `is_draining()` returns `true`, calling [`send()`]
7730 /// is not required because no new outgoing packets will be generated.
7731 ///
7732 /// [`recv()`]: struct.Connection.html#method.recv
7733 /// [`send()`]: struct.Connection.html#method.send
7734 /// [`timeout()`]: struct.Connection.html#method.timeout
7735 /// [`on_timeout()`]: struct.Connection.html#method.on_timeout
7736 /// [`is_closed()`]: struct.Connection.html#method.is_closed
7737 #[inline]
7738 pub fn is_draining(&self) -> bool {
7739 self.draining_timer.is_some()
7740 }
7741
7742 /// Returns true if the connection is closed.
7743 ///
7744 /// If this returns true, the connection object can be dropped.
7745 #[inline]
7746 pub fn is_closed(&self) -> bool {
7747 self.closed
7748 }
7749
7750 /// Returns true if the connection was closed due to the idle timeout.
7751 #[inline]
7752 pub fn is_timed_out(&self) -> bool {
7753 self.timed_out
7754 }
7755
7756 /// Returns the error received from the peer, if any.
7757 ///
7758 /// Note that a `Some` return value does not necessarily imply
7759 /// [`is_closed()`] or any other connection state.
7760 ///
7761 /// [`is_closed()`]: struct.Connection.html#method.is_closed
7762 #[inline]
7763 pub fn peer_error(&self) -> Option<&ConnectionError> {
7764 self.peer_error.as_ref()
7765 }
7766
7767 /// Returns the error [`close()`] was called with, or internally
7768 /// created quiche errors, if any.
7769 ///
7770 /// Note that a `Some` return value does not necessarily imply
7771 /// [`is_closed()`] or any other connection state.
7772 /// `Some` also does not guarantee that the error has been sent to
7773 /// or received by the peer.
7774 ///
7775 /// [`close()`]: struct.Connection.html#method.close
7776 /// [`is_closed()`]: struct.Connection.html#method.is_closed
7777 #[inline]
7778 pub fn local_error(&self) -> Option<&ConnectionError> {
7779 self.local_error.as_ref()
7780 }
7781
7782 /// Collects and returns statistics about the connection.
7783 #[inline]
7784 pub fn stats(&self) -> Stats {
7785 Stats {
7786 recv: self.recv_count,
7787 sent: self.sent_count,
7788 lost: self.lost_count,
7789 spurious_lost: self.spurious_lost_count,
7790 retrans: self.retrans_count,
7791 sent_bytes: self.sent_bytes,
7792 recv_bytes: self.recv_bytes,
7793 acked_bytes: self.acked_bytes,
7794 lost_bytes: self.lost_bytes,
7795 stream_retrans_bytes: self.stream_retrans_bytes,
7796 dgram_recv: self.dgram_recv_count,
7797 dgram_sent: self.dgram_sent_count,
7798 paths_count: self.paths.len(),
7799 reset_stream_count_local: self.reset_stream_local_count,
7800 stopped_stream_count_local: self.stopped_stream_local_count,
7801 reset_stream_count_remote: self.reset_stream_remote_count,
7802 stopped_stream_count_remote: self.stopped_stream_remote_count,
7803 data_blocked_sent_count: self.data_blocked_sent_count,
7804 stream_data_blocked_sent_count: self.stream_data_blocked_sent_count,
7805 data_blocked_recv_count: self.data_blocked_recv_count,
7806 stream_data_blocked_recv_count: self.stream_data_blocked_recv_count,
7807 streams_blocked_bidi_recv_count: self.streams_blocked_bidi_recv_count,
7808 streams_blocked_uni_recv_count: self.streams_blocked_uni_recv_count,
7809 path_challenge_rx_count: self.path_challenge_rx_count,
7810 amplification_limited_count: self.amplification_limited_count,
7811 bytes_in_flight_duration: self.bytes_in_flight_duration(),
7812 tx_buffered_state: self.tx_buffered_state,
7813 }
7814 }
7815
7816 /// Returns the sum of the durations when each path in the
7817 /// connection was actively sending bytes or waiting for acks.
7818 /// Note that this could result in a duration that is longer than
7819 /// the actual connection duration in cases where multiple paths
7820 /// are active for extended periods of time. In practice only 1
7821 /// path is typically active at a time.
7822 /// TODO revisit computation if in the future multiple paths are
7823 /// often active at the same time.
7824 fn bytes_in_flight_duration(&self) -> Duration {
7825 self.paths.iter().fold(Duration::ZERO, |acc, (_, path)| {
7826 acc + path.bytes_in_flight_duration()
7827 })
7828 }
7829
7830 /// Returns reference to peer's transport parameters. Returns `None` if we
7831 /// have not yet processed the peer's transport parameters.
7832 pub fn peer_transport_params(&self) -> Option<&TransportParams> {
7833 if !self.parsed_peer_transport_params {
7834 return None;
7835 }
7836
7837 Some(&self.peer_transport_params)
7838 }
7839
7840 /// Collects and returns statistics about each known path for the
7841 /// connection.
7842 pub fn path_stats(&self) -> impl Iterator<Item = PathStats> + '_ {
7843 self.paths.iter().map(|(_, p)| p.stats())
7844 }
7845
7846 /// Returns whether or not this is a server-side connection.
7847 pub fn is_server(&self) -> bool {
7848 self.is_server
7849 }
7850
7851 fn encode_transport_params(&mut self) -> Result<()> {
7852 self.handshake.set_quic_transport_params(
7853 &self.local_transport_params,
7854 self.is_server,
7855 )
7856 }
7857
7858 fn parse_peer_transport_params(
7859 &mut self, peer_params: TransportParams,
7860 ) -> Result<()> {
7861 // Validate initial_source_connection_id.
7862 match &peer_params.initial_source_connection_id {
7863 Some(v) if v != &self.destination_id() =>
7864 return Err(Error::InvalidTransportParam),
7865
7866 Some(_) => (),
7867
7868 // initial_source_connection_id must be sent by
7869 // both endpoints.
7870 None => return Err(Error::InvalidTransportParam),
7871 }
7872
7873 // Validate original_destination_connection_id.
7874 if let Some(odcid) = &self.odcid {
7875 match &peer_params.original_destination_connection_id {
7876 Some(v) if v != odcid =>
7877 return Err(Error::InvalidTransportParam),
7878
7879 Some(_) => (),
7880
7881 // original_destination_connection_id must be
7882 // sent by the server.
7883 None if !self.is_server =>
7884 return Err(Error::InvalidTransportParam),
7885
7886 None => (),
7887 }
7888 }
7889
7890 // Validate retry_source_connection_id.
7891 if let Some(rscid) = &self.rscid {
7892 match &peer_params.retry_source_connection_id {
7893 Some(v) if v != rscid =>
7894 return Err(Error::InvalidTransportParam),
7895
7896 Some(_) => (),
7897
7898 // retry_source_connection_id must be sent by
7899 // the server.
7900 None => return Err(Error::InvalidTransportParam),
7901 }
7902 }
7903
7904 self.process_peer_transport_params(peer_params)?;
7905
7906 self.parsed_peer_transport_params = true;
7907
7908 Ok(())
7909 }
7910
7911 fn process_peer_transport_params(
7912 &mut self, peer_params: TransportParams,
7913 ) -> Result<()> {
7914 self.max_tx_data = peer_params.initial_max_data;
7915
7916 // Update send capacity.
7917 self.update_tx_cap();
7918
7919 self.streams
7920 .update_peer_max_streams_bidi(peer_params.initial_max_streams_bidi);
7921 self.streams
7922 .update_peer_max_streams_uni(peer_params.initial_max_streams_uni);
7923
7924 let max_ack_delay = Duration::from_millis(peer_params.max_ack_delay);
7925
7926 self.recovery_config.max_ack_delay = max_ack_delay;
7927
7928 let active_path = self.paths.get_active_mut()?;
7929
7930 active_path.recovery.update_max_ack_delay(max_ack_delay);
7931
7932 if active_path
7933 .pmtud
7934 .as_ref()
7935 .map(|pmtud| pmtud.should_probe())
7936 .unwrap_or(false)
7937 {
7938 active_path.recovery.pmtud_update_max_datagram_size(
7939 active_path
7940 .pmtud
7941 .as_mut()
7942 .expect("PMTUD existence verified above")
7943 .get_probe_size()
7944 .min(peer_params.max_udp_payload_size as usize),
7945 );
7946 } else {
7947 active_path.recovery.update_max_datagram_size(
7948 peer_params.max_udp_payload_size as usize,
7949 );
7950 }
7951
7952 // Record the max_active_conn_id parameter advertised by the peer.
7953 self.ids
7954 .set_source_conn_id_limit(peer_params.active_conn_id_limit);
7955
7956 self.peer_transport_params = peer_params;
7957
7958 Ok(())
7959 }
7960
7961 /// Continues the handshake.
7962 ///
7963 /// If the connection is already established, it does nothing.
7964 fn do_handshake(&mut self, now: Instant) -> Result<()> {
7965 let mut ex_data = tls::ExData {
7966 application_protos: &self.application_protos,
7967
7968 crypto_ctx: &mut self.crypto_ctx,
7969
7970 session: &mut self.session,
7971
7972 local_error: &mut self.local_error,
7973
7974 keylog: self.keylog.as_mut(),
7975
7976 trace_id: &self.trace_id,
7977
7978 local_transport_params: self.local_transport_params.clone(),
7979
7980 recovery_config: self.recovery_config,
7981
7982 tx_cap_factor: self.tx_cap_factor,
7983
7984 pmtud: None,
7985
7986 is_server: self.is_server,
7987
7988 use_initial_max_data_as_flow_control_win: false,
7989 };
7990
7991 if self.handshake_completed {
7992 return self.handshake.process_post_handshake(&mut ex_data);
7993 }
7994
7995 match self.handshake.do_handshake(&mut ex_data) {
7996 Ok(_) => (),
7997
7998 Err(Error::Done) => {
7999 // Apply in-handshake configuration from callbacks if the path's
8000 // Recovery module can still be reinitilized.
8001 if self
8002 .paths
8003 .get_active()
8004 .map(|p| p.can_reinit_recovery())
8005 .unwrap_or(false)
8006 {
8007 if ex_data.recovery_config != self.recovery_config {
8008 if let Ok(path) = self.paths.get_active_mut() {
8009 self.recovery_config = ex_data.recovery_config;
8010 path.reinit_recovery(&self.recovery_config);
8011 }
8012 }
8013
8014 if ex_data.tx_cap_factor != self.tx_cap_factor {
8015 self.tx_cap_factor = ex_data.tx_cap_factor;
8016 }
8017
8018 if let Some((discover, max_probes)) = ex_data.pmtud {
8019 self.paths.set_discover_pmtu_on_existing_paths(
8020 discover,
8021 self.recovery_config.max_send_udp_payload_size,
8022 max_probes,
8023 );
8024 }
8025
8026 if ex_data.local_transport_params !=
8027 self.local_transport_params
8028 {
8029 self.streams.set_max_streams_bidi(
8030 ex_data
8031 .local_transport_params
8032 .initial_max_streams_bidi,
8033 );
8034
8035 self.local_transport_params =
8036 ex_data.local_transport_params;
8037 }
8038 }
8039
8040 if ex_data.use_initial_max_data_as_flow_control_win {
8041 self.enable_use_initial_max_data_as_flow_control_win();
8042 }
8043
8044 // Try to parse transport parameters as soon as the first flight
8045 // of handshake data is processed.
8046 //
8047 // This is potentially dangerous as the handshake hasn't been
8048 // completed yet, though it's required to be able to send data
8049 // in 0.5 RTT.
8050 let raw_params = self.handshake.quic_transport_params();
8051
8052 if !self.parsed_peer_transport_params && !raw_params.is_empty() {
8053 let peer_params = TransportParams::decode(
8054 raw_params,
8055 self.is_server,
8056 self.peer_transport_params_track_unknown,
8057 )?;
8058
8059 self.parse_peer_transport_params(peer_params)?;
8060 }
8061
8062 return Ok(());
8063 },
8064
8065 Err(e) => return Err(e),
8066 };
8067
8068 self.handshake_completed = self.handshake.is_completed();
8069
8070 self.alpn = self.handshake.alpn_protocol().to_vec();
8071
8072 let raw_params = self.handshake.quic_transport_params();
8073
8074 if !self.parsed_peer_transport_params && !raw_params.is_empty() {
8075 let peer_params = TransportParams::decode(
8076 raw_params,
8077 self.is_server,
8078 self.peer_transport_params_track_unknown,
8079 )?;
8080
8081 self.parse_peer_transport_params(peer_params)?;
8082 }
8083
8084 if self.handshake_completed {
8085 // The handshake is considered confirmed at the server when the
8086 // handshake completes, at which point we can also drop the
8087 // handshake epoch.
8088 if self.is_server {
8089 self.handshake_confirmed = true;
8090
8091 self.drop_epoch_state(packet::Epoch::Handshake, now);
8092 }
8093
8094 // Once the handshake is completed there's no point in processing
8095 // 0-RTT packets anymore, so clear the buffer now.
8096 self.undecryptable_pkts.clear();
8097
8098 trace!("{} connection established: proto={:?} cipher={:?} curve={:?} sigalg={:?} resumed={} {:?}",
8099 &self.trace_id,
8100 std::str::from_utf8(self.application_proto()),
8101 self.handshake.cipher(),
8102 self.handshake.curve(),
8103 self.handshake.sigalg(),
8104 self.handshake.is_resumed(),
8105 self.peer_transport_params);
8106 }
8107
8108 Ok(())
8109 }
8110
8111 /// Use the value of the intial max_data / initial stream max_data setting
8112 /// as the initial flow control window for the connection and streams.
8113 /// The connection-level flow control window will only be changed if it
8114 /// hasn't been auto tuned yet. For streams: only newly created streams
8115 /// receive the new setting.
8116 fn enable_use_initial_max_data_as_flow_control_win(&mut self) {
8117 self.flow_control.set_window_if_not_tuned_yet(
8118 self.local_transport_params.initial_max_data,
8119 );
8120 self.streams
8121 .set_use_initial_max_data_as_flow_control_win(true);
8122 }
8123
8124 /// Selects the packet type for the next outgoing packet.
8125 fn write_pkt_type(&self, send_pid: usize) -> Result<Type> {
8126 // On error send packet in the latest epoch available, but only send
8127 // 1-RTT ones when the handshake is completed.
8128 if self
8129 .local_error
8130 .as_ref()
8131 .is_some_and(|conn_err| !conn_err.is_app)
8132 {
8133 let epoch = match self.handshake.write_level() {
8134 crypto::Level::Initial => packet::Epoch::Initial,
8135 crypto::Level::ZeroRTT => unreachable!(),
8136 crypto::Level::Handshake => packet::Epoch::Handshake,
8137 crypto::Level::OneRTT => packet::Epoch::Application,
8138 };
8139
8140 if !self.handshake_confirmed {
8141 match epoch {
8142 // Downgrade the epoch to Handshake as the handshake is not
8143 // completed yet.
8144 packet::Epoch::Application => return Ok(Type::Handshake),
8145
8146 // Downgrade the epoch to Initial as the remote peer might
8147 // not be able to decrypt handshake packets yet.
8148 packet::Epoch::Handshake
8149 if self.crypto_ctx[packet::Epoch::Initial].has_keys() =>
8150 return Ok(Type::Initial),
8151
8152 _ => (),
8153 };
8154 }
8155
8156 return Ok(Type::from_epoch(epoch));
8157 }
8158
8159 for &epoch in packet::Epoch::epochs(
8160 packet::Epoch::Initial..=packet::Epoch::Application,
8161 ) {
8162 let crypto_ctx = &self.crypto_ctx[epoch];
8163 let pkt_space = &self.pkt_num_spaces[epoch];
8164
8165 // Only send packets in a space when we have the send keys for it.
8166 if crypto_ctx.crypto_seal.is_none() {
8167 continue;
8168 }
8169
8170 // We are ready to send data for this packet number space.
8171 if crypto_ctx.data_available() || pkt_space.ready() {
8172 return Ok(Type::from_epoch(epoch));
8173 }
8174
8175 // There are lost frames in this packet number space.
8176 for (_, p) in self.paths.iter() {
8177 if p.recovery.has_lost_frames(epoch) {
8178 return Ok(Type::from_epoch(epoch));
8179 }
8180
8181 // We need to send PTO probe packets.
8182 if p.recovery.loss_probes(epoch) > 0 {
8183 return Ok(Type::from_epoch(epoch));
8184 }
8185 }
8186 }
8187
8188 // If there are flushable, almost full or blocked streams, use the
8189 // Application epoch.
8190 let send_path = self.paths.get(send_pid)?;
8191 if (self.is_established() || self.is_in_early_data()) &&
8192 (self.should_send_handshake_done() ||
8193 self.flow_control.should_update_max_data() ||
8194 self.should_send_max_data ||
8195 self.blocked_limit.is_some() ||
8196 self.streams_blocked_bidi_state
8197 .has_pending_stream_blocked_frame() ||
8198 self.streams_blocked_uni_state
8199 .has_pending_stream_blocked_frame() ||
8200 self.dgram_send_queue.has_pending() ||
8201 self.local_error
8202 .as_ref()
8203 .is_some_and(|conn_err| conn_err.is_app) ||
8204 self.should_send_max_streams_bidi ||
8205 self.streams.should_update_max_streams_bidi() ||
8206 self.should_send_max_streams_uni ||
8207 self.streams.should_update_max_streams_uni() ||
8208 self.streams.has_flushable() ||
8209 self.streams.has_almost_full() ||
8210 self.streams.has_blocked() ||
8211 self.streams.has_reset() ||
8212 self.streams.has_stopped() ||
8213 self.ids.has_new_scids() ||
8214 self.ids.has_retire_dcids() ||
8215 send_path
8216 .pmtud
8217 .as_ref()
8218 .is_some_and(|pmtud| pmtud.should_probe()) ||
8219 send_path.needs_ack_eliciting ||
8220 send_path.probing_required())
8221 {
8222 // Only clients can send 0-RTT packets.
8223 if !self.is_server && self.is_in_early_data() {
8224 return Ok(Type::ZeroRTT);
8225 }
8226
8227 return Ok(Type::Short);
8228 }
8229
8230 Err(Error::Done)
8231 }
8232
8233 /// Returns the mutable stream with the given ID if it exists, or creates
8234 /// a new one otherwise.
8235 fn get_or_create_stream(
8236 &mut self, id: u64, local: bool,
8237 ) -> Result<&mut stream::Stream<F>> {
8238 self.streams.get_or_create(
8239 id,
8240 &self.local_transport_params,
8241 &self.peer_transport_params,
8242 local,
8243 self.is_server,
8244 )
8245 }
8246
8247 /// Processes an incoming frame.
8248 fn process_frame(
8249 &mut self, frame: frame::Frame, hdr: &Header, recv_path_id: usize,
8250 epoch: packet::Epoch, now: Instant,
8251 ) -> Result<()> {
8252 trace!("{} rx frm {:?}", self.trace_id, frame);
8253
8254 match frame {
8255 frame::Frame::Padding { .. } => (),
8256
8257 frame::Frame::Ping { .. } => (),
8258
8259 frame::Frame::ACK {
8260 ranges, ack_delay, ..
8261 } => {
8262 let ack_delay = ack_delay
8263 .checked_mul(2_u64.pow(
8264 self.peer_transport_params.ack_delay_exponent as u32,
8265 ))
8266 .ok_or(Error::InvalidFrame)?;
8267
8268 if epoch == packet::Epoch::Handshake ||
8269 (epoch == packet::Epoch::Application &&
8270 self.is_established())
8271 {
8272 self.peer_verified_initial_address = true;
8273 }
8274
8275 let handshake_status = self.handshake_status();
8276
8277 let is_app_limited = self.delivery_rate_check_if_app_limited();
8278
8279 let largest_acked = ranges.last().expect(
8280 "ACK frames should always have at least one ack range",
8281 );
8282
8283 for (_, p) in self.paths.iter_mut() {
8284 if self.pkt_num_spaces[epoch]
8285 .largest_tx_pkt_num
8286 .is_some_and(|largest_sent| largest_sent < largest_acked)
8287 {
8288 // https://www.rfc-editor.org/rfc/rfc9000#section-13.1
8289 // An endpoint SHOULD treat receipt of an acknowledgment
8290 // for a packet it did not send as
8291 // a connection error of type PROTOCOL_VIOLATION
8292 return Err(Error::InvalidAckRange);
8293 }
8294
8295 if is_app_limited {
8296 p.recovery.delivery_rate_update_app_limited(true);
8297 }
8298
8299 let OnAckReceivedOutcome {
8300 lost_packets,
8301 lost_bytes,
8302 acked_bytes,
8303 spurious_losses,
8304 } = p.recovery.on_ack_received(
8305 &ranges,
8306 ack_delay,
8307 epoch,
8308 handshake_status,
8309 now,
8310 self.pkt_num_manager.skip_pn(),
8311 &self.trace_id,
8312 )?;
8313
8314 let skip_pn = self.pkt_num_manager.skip_pn();
8315 let largest_acked =
8316 p.recovery.get_largest_acked_on_epoch(epoch);
8317
8318 // Consider the skip_pn validated if the peer has sent an ack
8319 // for a larger pkt number.
8320 if let Some((largest_acked, skip_pn)) =
8321 largest_acked.zip(skip_pn)
8322 {
8323 if largest_acked > skip_pn {
8324 self.pkt_num_manager.set_skip_pn(None);
8325 }
8326 }
8327
8328 self.lost_count += lost_packets;
8329 self.lost_bytes += lost_bytes as u64;
8330 self.acked_bytes += acked_bytes as u64;
8331 self.spurious_lost_count += spurious_losses;
8332 }
8333 },
8334
8335 frame::Frame::ResetStream {
8336 stream_id,
8337 error_code,
8338 final_size,
8339 } => {
8340 // Peer can't send on our unidirectional streams.
8341 if !stream::is_bidi(stream_id) &&
8342 stream::is_local(stream_id, self.is_server)
8343 {
8344 return Err(Error::InvalidStreamState(stream_id));
8345 }
8346
8347 let max_rx_data_left = self.max_rx_data() - self.rx_data;
8348
8349 // Get existing stream or create a new one, but if the stream
8350 // has already been closed and collected, ignore the frame.
8351 //
8352 // This can happen if e.g. an ACK frame is lost, and the peer
8353 // retransmits another frame before it realizes that the stream
8354 // is gone.
8355 //
8356 // Note that it makes it impossible to check if the frame is
8357 // illegal, since we have no state, but since we ignore the
8358 // frame, it should be fine.
8359 let stream = match self.get_or_create_stream(stream_id, false) {
8360 Ok(v) => v,
8361
8362 Err(Error::Done) => return Ok(()),
8363
8364 Err(e) => return Err(e),
8365 };
8366
8367 let was_readable = stream.is_readable();
8368 let priority_key = Arc::clone(&stream.priority_key);
8369
8370 let stream::RecvBufResetReturn {
8371 max_data_delta,
8372 consumed_flowcontrol,
8373 } = stream.recv.reset(error_code, final_size)?;
8374
8375 if max_data_delta > max_rx_data_left {
8376 return Err(Error::FlowControl);
8377 }
8378
8379 if !was_readable && stream.is_readable() {
8380 self.streams.insert_readable(&priority_key);
8381 }
8382
8383 self.rx_data += max_data_delta;
8384 // We dropped the receive buffer, return connection level
8385 // flow-control
8386 self.flow_control.add_consumed(consumed_flowcontrol);
8387
8388 self.reset_stream_remote_count =
8389 self.reset_stream_remote_count.saturating_add(1);
8390 },
8391
8392 frame::Frame::StopSending {
8393 stream_id,
8394 error_code,
8395 } => {
8396 // STOP_SENDING on a receive-only stream is a fatal error.
8397 if !stream::is_local(stream_id, self.is_server) &&
8398 !stream::is_bidi(stream_id)
8399 {
8400 return Err(Error::InvalidStreamState(stream_id));
8401 }
8402
8403 // Get existing stream or create a new one, but if the stream
8404 // has already been closed and collected, ignore the frame.
8405 //
8406 // This can happen if e.g. an ACK frame is lost, and the peer
8407 // retransmits another frame before it realizes that the stream
8408 // is gone.
8409 //
8410 // Note that it makes it impossible to check if the frame is
8411 // illegal, since we have no state, but since we ignore the
8412 // frame, it should be fine.
8413 let stream = match self.get_or_create_stream(stream_id, false) {
8414 Ok(v) => v,
8415
8416 Err(Error::Done) => return Ok(()),
8417
8418 Err(e) => return Err(e),
8419 };
8420
8421 let was_writable = stream.is_writable();
8422
8423 let priority_key = Arc::clone(&stream.priority_key);
8424
8425 // Try stopping the stream.
8426 if let Ok((final_size, unsent)) = stream.send.stop(error_code) {
8427 // Claw back some flow control allowance from data that was
8428 // buffered but not actually sent before the stream was
8429 // reset.
8430 //
8431 // Note that `tx_cap` will be updated later on, so no need
8432 // to touch it here.
8433 self.tx_data = self.tx_data.saturating_sub(unsent);
8434
8435 self.tx_buffered =
8436 self.tx_buffered.saturating_sub(unsent as usize);
8437
8438 // These drops in qlog are a bit weird, but the only way to
8439 // ensure that all bytes that are moved from App to Transport
8440 // in stream_do_send are eventually moved from Transport to
8441 // Dropped. Ideally we would add a Transport to Network
8442 // transition also as a way to indicate when bytes were
8443 // transmitted vs dropped without ever being sent.
8444 qlog_with_type!(QLOG_DATA_MV, self.qlog, q, {
8445 let ev_data = EventData::QuicStreamDataMoved(
8446 qlog::events::quic::StreamDataMoved {
8447 stream_id: Some(stream_id),
8448 offset: Some(final_size),
8449 raw: Some(RawInfo {
8450 length: Some(unsent),
8451 ..Default::default()
8452 }),
8453 from: Some(DataRecipient::Transport),
8454 to: Some(DataRecipient::Dropped),
8455 ..Default::default()
8456 },
8457 );
8458
8459 q.add_event_data_with_instant(ev_data, now).ok();
8460 });
8461
8462 self.streams.insert_reset(stream_id, error_code, final_size);
8463
8464 if !was_writable {
8465 self.streams.insert_writable(&priority_key);
8466 }
8467
8468 self.stopped_stream_remote_count =
8469 self.stopped_stream_remote_count.saturating_add(1);
8470 self.reset_stream_local_count =
8471 self.reset_stream_local_count.saturating_add(1);
8472 }
8473 },
8474
8475 frame::Frame::Crypto { data } => {
8476 if data.max_off() >= MAX_CRYPTO_STREAM_OFFSET {
8477 return Err(Error::CryptoBufferExceeded);
8478 }
8479
8480 // Push the data to the stream so it can be re-ordered.
8481 self.crypto_ctx[epoch].crypto_stream.recv.write(data)?;
8482
8483 // Feed crypto data to the TLS state, if there's data
8484 // available at the expected offset.
8485 let mut crypto_buf = [0; 512];
8486
8487 let level = crypto::Level::from_epoch(epoch);
8488
8489 let stream = &mut self.crypto_ctx[epoch].crypto_stream;
8490
8491 while let Ok((read, _)) = stream.recv.emit(&mut crypto_buf) {
8492 let recv_buf = &crypto_buf[..read];
8493 self.handshake.provide_data(level, recv_buf)?;
8494 }
8495
8496 self.do_handshake(now)?;
8497 },
8498
8499 frame::Frame::CryptoHeader { .. } => unreachable!(),
8500
8501 // TODO: implement stateless retry
8502 frame::Frame::NewToken { .. } =>
8503 if self.is_server {
8504 return Err(Error::InvalidPacket);
8505 },
8506
8507 frame::Frame::Stream { stream_id, data } => {
8508 // Peer can't send on our unidirectional streams.
8509 if !stream::is_bidi(stream_id) &&
8510 stream::is_local(stream_id, self.is_server)
8511 {
8512 return Err(Error::InvalidStreamState(stream_id));
8513 }
8514
8515 let max_rx_data_left = self.max_rx_data() - self.rx_data;
8516
8517 // Get existing stream or create a new one, but if the stream
8518 // has already been closed and collected, ignore the frame.
8519 //
8520 // This can happen if e.g. an ACK frame is lost, and the peer
8521 // retransmits another frame before it realizes that the stream
8522 // is gone.
8523 //
8524 // Note that it makes it impossible to check if the frame is
8525 // illegal, since we have no state, but since we ignore the
8526 // frame, it should be fine.
8527 let stream = match self.get_or_create_stream(stream_id, false) {
8528 Ok(v) => v,
8529
8530 Err(Error::Done) => return Ok(()),
8531
8532 Err(e) => return Err(e),
8533 };
8534
8535 // Check for the connection-level flow control limit.
8536 let max_off_delta =
8537 data.max_off().saturating_sub(stream.recv.max_off());
8538
8539 if max_off_delta > max_rx_data_left {
8540 return Err(Error::FlowControl);
8541 }
8542
8543 let was_readable = stream.is_readable();
8544 let priority_key = Arc::clone(&stream.priority_key);
8545
8546 let was_draining = stream.recv.is_draining();
8547
8548 stream.recv.write(data)?;
8549
8550 if !was_readable && stream.is_readable() {
8551 self.streams.insert_readable(&priority_key);
8552 }
8553
8554 self.rx_data += max_off_delta;
8555
8556 if was_draining {
8557 // When a stream is in draining state it will not queue
8558 // incoming data for the application to read, so consider
8559 // the received data as consumed, which might trigger a flow
8560 // control update.
8561 self.flow_control.add_consumed(max_off_delta);
8562 }
8563 },
8564
8565 frame::Frame::StreamHeader { .. } => unreachable!(),
8566
8567 frame::Frame::MaxData { max } => {
8568 self.max_tx_data = cmp::max(self.max_tx_data, max);
8569 },
8570
8571 frame::Frame::MaxStreamData { stream_id, max } => {
8572 // Peer can't receive on its own unidirectional streams.
8573 if !stream::is_bidi(stream_id) &&
8574 !stream::is_local(stream_id, self.is_server)
8575 {
8576 return Err(Error::InvalidStreamState(stream_id));
8577 }
8578
8579 // Get existing stream or create a new one, but if the stream
8580 // has already been closed and collected, ignore the frame.
8581 //
8582 // This can happen if e.g. an ACK frame is lost, and the peer
8583 // retransmits another frame before it realizes that the stream
8584 // is gone.
8585 //
8586 // Note that it makes it impossible to check if the frame is
8587 // illegal, since we have no state, but since we ignore the
8588 // frame, it should be fine.
8589 let stream = match self.get_or_create_stream(stream_id, false) {
8590 Ok(v) => v,
8591
8592 Err(Error::Done) => return Ok(()),
8593
8594 Err(e) => return Err(e),
8595 };
8596
8597 let was_flushable = stream.is_flushable();
8598
8599 stream.send.update_max_data(max);
8600
8601 let writable = stream.is_writable();
8602
8603 let priority_key = Arc::clone(&stream.priority_key);
8604
8605 // If the stream is now flushable push it to the flushable queue,
8606 // but only if it wasn't already queued.
8607 if stream.is_flushable() && !was_flushable {
8608 let priority_key = Arc::clone(&stream.priority_key);
8609 self.streams.insert_flushable(&priority_key);
8610 }
8611
8612 if writable {
8613 self.streams.insert_writable(&priority_key);
8614 }
8615 },
8616
8617 frame::Frame::MaxStreamsBidi { max } => {
8618 if max > MAX_STREAM_ID {
8619 return Err(Error::InvalidFrame);
8620 }
8621
8622 self.streams.update_peer_max_streams_bidi(max);
8623 },
8624
8625 frame::Frame::MaxStreamsUni { max } => {
8626 if max > MAX_STREAM_ID {
8627 return Err(Error::InvalidFrame);
8628 }
8629
8630 self.streams.update_peer_max_streams_uni(max);
8631 },
8632
8633 frame::Frame::DataBlocked { .. } => {
8634 self.data_blocked_recv_count =
8635 self.data_blocked_recv_count.saturating_add(1);
8636 },
8637
8638 frame::Frame::StreamDataBlocked { .. } => {
8639 self.stream_data_blocked_recv_count =
8640 self.stream_data_blocked_recv_count.saturating_add(1);
8641 },
8642
8643 frame::Frame::StreamsBlockedBidi { limit } => {
8644 if limit > MAX_STREAM_ID {
8645 return Err(Error::InvalidFrame);
8646 }
8647
8648 self.streams_blocked_bidi_recv_count =
8649 self.streams_blocked_bidi_recv_count.saturating_add(1);
8650 },
8651
8652 frame::Frame::StreamsBlockedUni { limit } => {
8653 if limit > MAX_STREAM_ID {
8654 return Err(Error::InvalidFrame);
8655 }
8656
8657 self.streams_blocked_uni_recv_count =
8658 self.streams_blocked_uni_recv_count.saturating_add(1);
8659 },
8660
8661 frame::Frame::NewConnectionId {
8662 seq_num,
8663 retire_prior_to,
8664 conn_id,
8665 reset_token,
8666 } => {
8667 if self.ids.zero_length_dcid() {
8668 return Err(Error::InvalidState);
8669 }
8670
8671 let mut retired_path_ids = SmallVec::new();
8672
8673 // Retire pending path IDs before propagating the error code to
8674 // make sure retired connection IDs are not in use anymore.
8675 let new_dcid_res = self.ids.new_dcid(
8676 conn_id.into(),
8677 seq_num,
8678 u128::from_be_bytes(reset_token),
8679 retire_prior_to,
8680 &mut retired_path_ids,
8681 );
8682
8683 for (dcid_seq, pid) in retired_path_ids {
8684 let path = self.paths.get_mut(pid)?;
8685
8686 // Maybe the path already switched to another DCID.
8687 if path.active_dcid_seq != Some(dcid_seq) {
8688 continue;
8689 }
8690
8691 if let Some(new_dcid_seq) =
8692 self.ids.lowest_available_dcid_seq()
8693 {
8694 path.active_dcid_seq = Some(new_dcid_seq);
8695
8696 self.ids.link_dcid_to_path_id(new_dcid_seq, pid)?;
8697
8698 trace!(
8699 "{} path ID {} changed DCID: old seq num {} new seq num {}",
8700 self.trace_id, pid, dcid_seq, new_dcid_seq,
8701 );
8702 } else {
8703 // We cannot use this path anymore for now.
8704 path.active_dcid_seq = None;
8705
8706 trace!(
8707 "{} path ID {} cannot be used; DCID seq num {} has been retired",
8708 self.trace_id, pid, dcid_seq,
8709 );
8710 }
8711 }
8712
8713 // Propagate error (if any) now...
8714 new_dcid_res?;
8715 },
8716
8717 frame::Frame::RetireConnectionId { seq_num } => {
8718 if self.ids.zero_length_scid() {
8719 return Err(Error::InvalidState);
8720 }
8721
8722 if let Some(pid) = self.ids.retire_scid(seq_num, &hdr.dcid)? {
8723 let path = self.paths.get_mut(pid)?;
8724
8725 // Maybe we already linked a new SCID to that path.
8726 if path.active_scid_seq == Some(seq_num) {
8727 // XXX: We do not remove unused paths now, we instead
8728 // wait until we need to maintain more paths than the
8729 // host is willing to.
8730 path.active_scid_seq = None;
8731 }
8732 }
8733 },
8734
8735 frame::Frame::PathChallenge { data } => {
8736 self.path_challenge_rx_count += 1;
8737
8738 self.paths
8739 .get_mut(recv_path_id)?
8740 .on_challenge_received(data);
8741 },
8742
8743 frame::Frame::PathResponse { data } => {
8744 self.paths.on_response_received(data)?;
8745 },
8746
8747 frame::Frame::ConnectionClose {
8748 error_code, reason, ..
8749 } => {
8750 self.peer_error = Some(ConnectionError {
8751 is_app: false,
8752 error_code,
8753 reason,
8754 });
8755
8756 let path = self.paths.get_active()?;
8757 self.draining_timer = Some(now + (path.recovery.pto() * 3));
8758 },
8759
8760 frame::Frame::ApplicationClose { error_code, reason } => {
8761 self.peer_error = Some(ConnectionError {
8762 is_app: true,
8763 error_code,
8764 reason,
8765 });
8766
8767 let path = self.paths.get_active()?;
8768 self.draining_timer = Some(now + (path.recovery.pto() * 3));
8769 },
8770
8771 frame::Frame::HandshakeDone => {
8772 if self.is_server {
8773 return Err(Error::InvalidPacket);
8774 }
8775
8776 self.peer_verified_initial_address = true;
8777
8778 self.handshake_confirmed = true;
8779
8780 // Once the handshake is confirmed, we can drop Handshake keys.
8781 self.drop_epoch_state(packet::Epoch::Handshake, now);
8782 },
8783
8784 frame::Frame::Datagram { data } => {
8785 // Close the connection if DATAGRAMs are not enabled.
8786 // quiche always advertises support for 64K sized DATAGRAM
8787 // frames, as recommended by the standard, so we don't need a
8788 // size check.
8789 if !self.dgram_enabled() {
8790 return Err(Error::InvalidState);
8791 }
8792
8793 // If recv queue is full, discard oldest
8794 if self.dgram_recv_queue.is_full() {
8795 self.dgram_recv_queue.pop();
8796 }
8797
8798 self.dgram_recv_queue.push(data.into())?;
8799
8800 self.dgram_recv_count = self.dgram_recv_count.saturating_add(1);
8801
8802 let path = self.paths.get_mut(recv_path_id)?;
8803 path.dgram_recv_count = path.dgram_recv_count.saturating_add(1);
8804 },
8805
8806 frame::Frame::DatagramHeader { .. } => unreachable!(),
8807 }
8808
8809 Ok(())
8810 }
8811
8812 /// Drops the keys and recovery state for the given epoch.
8813 fn drop_epoch_state(&mut self, epoch: packet::Epoch, now: Instant) {
8814 let crypto_ctx = &mut self.crypto_ctx[epoch];
8815 if crypto_ctx.crypto_open.is_none() {
8816 return;
8817 }
8818 crypto_ctx.clear();
8819 self.pkt_num_spaces[epoch].clear();
8820
8821 let handshake_status = self.handshake_status();
8822 for (_, p) in self.paths.iter_mut() {
8823 p.recovery
8824 .on_pkt_num_space_discarded(epoch, handshake_status, now);
8825 }
8826
8827 trace!("{} dropped epoch {} state", self.trace_id, epoch);
8828 }
8829
8830 /// Returns the connection level flow control limit.
8831 fn max_rx_data(&self) -> u64 {
8832 self.flow_control.max_data()
8833 }
8834
8835 /// Returns true if the HANDSHAKE_DONE frame needs to be sent.
8836 fn should_send_handshake_done(&self) -> bool {
8837 self.is_established() && !self.handshake_done_sent && self.is_server
8838 }
8839
8840 /// Returns the idle timeout value.
8841 ///
8842 /// `None` is returned if both end-points disabled the idle timeout.
8843 fn idle_timeout(&self) -> Option<Duration> {
8844 // If the transport parameter is set to 0, then the respective endpoint
8845 // decided to disable the idle timeout. If both are disabled we should
8846 // not set any timeout.
8847 if self.local_transport_params.max_idle_timeout == 0 &&
8848 self.peer_transport_params.max_idle_timeout == 0
8849 {
8850 return None;
8851 }
8852
8853 // If the local endpoint or the peer disabled the idle timeout, use the
8854 // other peer's value, otherwise use the minimum of the two values.
8855 let idle_timeout = if self.local_transport_params.max_idle_timeout == 0 {
8856 self.peer_transport_params.max_idle_timeout
8857 } else if self.peer_transport_params.max_idle_timeout == 0 {
8858 self.local_transport_params.max_idle_timeout
8859 } else {
8860 cmp::min(
8861 self.local_transport_params.max_idle_timeout,
8862 self.peer_transport_params.max_idle_timeout,
8863 )
8864 };
8865
8866 let path_pto = match self.paths.get_active() {
8867 Ok(p) => p.recovery.pto(),
8868 Err(_) => Duration::ZERO,
8869 };
8870
8871 let idle_timeout = Duration::from_millis(idle_timeout);
8872 let idle_timeout = cmp::max(idle_timeout, 3 * path_pto);
8873
8874 Some(idle_timeout)
8875 }
8876
8877 /// Returns the connection's handshake status for use in loss recovery.
8878 fn handshake_status(&self) -> recovery::HandshakeStatus {
8879 recovery::HandshakeStatus {
8880 has_handshake_keys: self.crypto_ctx[packet::Epoch::Handshake]
8881 .has_keys(),
8882
8883 peer_verified_address: self.peer_verified_initial_address,
8884
8885 completed: self.is_established(),
8886 }
8887 }
8888
8889 /// Updates send capacity.
8890 fn update_tx_cap(&mut self) {
8891 let cwin_available = match self.paths.get_active() {
8892 Ok(p) => p.recovery.cwnd_available() as u64,
8893 Err(_) => 0,
8894 };
8895
8896 let cap =
8897 cmp::min(cwin_available, self.max_tx_data - self.tx_data) as usize;
8898 self.tx_cap = (cap as f64 * self.tx_cap_factor).ceil() as usize;
8899 }
8900
8901 fn delivery_rate_check_if_app_limited(&self) -> bool {
8902 // Enter the app-limited phase of delivery rate when these conditions
8903 // are met:
8904 //
8905 // - The remaining capacity is higher than available bytes in cwnd (there
8906 // is more room to send).
8907 // - New data since the last send() is smaller than available bytes in
8908 // cwnd (we queued less than what we can send).
8909 // - There is room to send more data in cwnd.
8910 //
8911 // In application-limited phases the transmission rate is limited by the
8912 // application rather than the congestion control algorithm.
8913 //
8914 // Note that this is equivalent to CheckIfApplicationLimited() from the
8915 // delivery rate draft. This is also separate from `recovery.app_limited`
8916 // and only applies to delivery rate calculation.
8917 let cwin_available = self
8918 .paths
8919 .iter()
8920 .filter(|&(_, p)| p.active())
8921 .map(|(_, p)| p.recovery.cwnd_available())
8922 .sum();
8923
8924 ((self.tx_buffered + self.dgram_send_queue_byte_size()) < cwin_available) &&
8925 (self.tx_data.saturating_sub(self.last_tx_data)) <
8926 cwin_available as u64 &&
8927 cwin_available > 0
8928 }
8929
8930 fn check_tx_buffered_invariant(&mut self) {
8931 // tx_buffered should track bytes queued in the stream buffers
8932 // and unacked retransmitable bytes in the network.
8933 // If tx_buffered > 0 mark the tx_buffered_state if there are no
8934 // flushable streams and there no inflight bytes.
8935 //
8936 // It is normal to have tx_buffered == 0 while there are inflight bytes
8937 // since not QUIC frames are retransmittable; inflight tracks all bytes
8938 // on the network which are subject to congestion control.
8939 if self.tx_buffered > 0 &&
8940 !self.streams.has_flushable() &&
8941 !self
8942 .paths
8943 .iter()
8944 .any(|(_, p)| p.recovery.bytes_in_flight() > 0)
8945 {
8946 self.tx_buffered_state = TxBufferTrackingState::Inconsistent;
8947 }
8948 }
8949
8950 fn set_initial_dcid(
8951 &mut self, cid: ConnectionId<'static>, reset_token: Option<u128>,
8952 path_id: usize,
8953 ) -> Result<()> {
8954 self.ids.set_initial_dcid(cid, reset_token, Some(path_id));
8955 self.paths.get_mut(path_id)?.active_dcid_seq = Some(0);
8956
8957 Ok(())
8958 }
8959
8960 /// Selects the path that the incoming packet belongs to, or creates a new
8961 /// one if no existing path matches.
8962 fn get_or_create_recv_path_id(
8963 &mut self, recv_pid: Option<usize>, dcid: &ConnectionId, buf_len: usize,
8964 info: &RecvInfo,
8965 ) -> Result<usize> {
8966 let ids = &mut self.ids;
8967
8968 let (in_scid_seq, mut in_scid_pid) =
8969 ids.find_scid_seq(dcid).ok_or(Error::InvalidState)?;
8970
8971 if let Some(recv_pid) = recv_pid {
8972 // If the path observes a change of SCID used, note it.
8973 let recv_path = self.paths.get_mut(recv_pid)?;
8974
8975 let cid_entry =
8976 recv_path.active_scid_seq.and_then(|v| ids.get_scid(v).ok());
8977
8978 if cid_entry.map(|e| &e.cid) != Some(dcid) {
8979 let incoming_cid_entry = ids.get_scid(in_scid_seq)?;
8980
8981 let prev_recv_pid =
8982 incoming_cid_entry.path_id.unwrap_or(recv_pid);
8983
8984 if prev_recv_pid != recv_pid {
8985 trace!(
8986 "{} peer reused CID {:?} from path {} on path {}",
8987 self.trace_id,
8988 dcid,
8989 prev_recv_pid,
8990 recv_pid
8991 );
8992
8993 // TODO: reset congestion control.
8994 }
8995
8996 trace!(
8997 "{} path ID {} now see SCID with seq num {}",
8998 self.trace_id,
8999 recv_pid,
9000 in_scid_seq
9001 );
9002
9003 recv_path.active_scid_seq = Some(in_scid_seq);
9004 ids.link_scid_to_path_id(in_scid_seq, recv_pid)?;
9005 }
9006
9007 return Ok(recv_pid);
9008 }
9009
9010 // This is a new 4-tuple. See if the CID has not been assigned on
9011 // another path.
9012
9013 // Ignore this step if are using zero-length SCID.
9014 if ids.zero_length_scid() {
9015 in_scid_pid = None;
9016 }
9017
9018 if let Some(in_scid_pid) = in_scid_pid {
9019 // This CID has been used by another path. If we have the
9020 // room to do so, create a new `Path` structure holding this
9021 // new 4-tuple. Otherwise, drop the packet.
9022 let old_path = self.paths.get_mut(in_scid_pid)?;
9023 let old_local_addr = old_path.local_addr();
9024 let old_peer_addr = old_path.peer_addr();
9025
9026 trace!(
9027 "{} reused CID seq {} of ({},{}) (path {}) on ({},{})",
9028 self.trace_id,
9029 in_scid_seq,
9030 old_local_addr,
9031 old_peer_addr,
9032 in_scid_pid,
9033 info.to,
9034 info.from
9035 );
9036
9037 // Notify the application.
9038 self.paths.notify_event(PathEvent::ReusedSourceConnectionId(
9039 in_scid_seq,
9040 (old_local_addr, old_peer_addr),
9041 (info.to, info.from),
9042 ));
9043 }
9044
9045 // This is a new path using an unassigned CID; create it!
9046 let mut path = path::Path::new(
9047 info.to,
9048 info.from,
9049 &self.recovery_config,
9050 self.path_challenge_recv_max_queue_len,
9051 false,
9052 None,
9053 );
9054
9055 path.max_send_bytes = buf_len * self.max_amplification_factor;
9056 path.active_scid_seq = Some(in_scid_seq);
9057
9058 // Automatically probes the new path.
9059 path.request_validation();
9060
9061 let pid = self.paths.insert_path(path, self.is_server)?;
9062
9063 // Do not record path reuse.
9064 if in_scid_pid.is_none() {
9065 ids.link_scid_to_path_id(in_scid_seq, pid)?;
9066 }
9067
9068 Ok(pid)
9069 }
9070
9071 /// Selects the path on which the next packet must be sent.
9072 fn get_send_path_id(
9073 &self, from: Option<SocketAddr>, to: Option<SocketAddr>,
9074 ) -> Result<usize> {
9075 // A probing packet must be sent, but only if the connection is fully
9076 // established.
9077 if self.is_established() {
9078 let mut probing = self
9079 .paths
9080 .iter()
9081 .filter(|(_, p)| from.is_none() || Some(p.local_addr()) == from)
9082 .filter(|(_, p)| to.is_none() || Some(p.peer_addr()) == to)
9083 .filter(|(_, p)| p.active_dcid_seq.is_some())
9084 .filter(|(_, p)| p.probing_required())
9085 .map(|(pid, _)| pid);
9086
9087 if let Some(pid) = probing.next() {
9088 return Ok(pid);
9089 }
9090 }
9091
9092 if let Some((pid, p)) = self.paths.get_active_with_pid() {
9093 if from.is_some() && Some(p.local_addr()) != from {
9094 return Err(Error::Done);
9095 }
9096
9097 if to.is_some() && Some(p.peer_addr()) != to {
9098 return Err(Error::Done);
9099 }
9100
9101 return Ok(pid);
9102 };
9103
9104 Err(Error::InvalidState)
9105 }
9106
9107 /// Sets the path with identifier 'path_id' to be active.
9108 fn set_active_path(&mut self, path_id: usize, now: Instant) -> Result<()> {
9109 if let Ok(old_active_path) = self.paths.get_active_mut() {
9110 for &e in packet::Epoch::epochs(
9111 packet::Epoch::Initial..=packet::Epoch::Application,
9112 ) {
9113 let (lost_packets, lost_bytes) = old_active_path
9114 .recovery
9115 .on_path_change(e, now, &self.trace_id);
9116
9117 self.lost_count += lost_packets;
9118 self.lost_bytes += lost_bytes as u64;
9119 }
9120 }
9121
9122 self.paths.set_active_path(path_id)
9123 }
9124
9125 /// Handles potential connection migration.
9126 fn on_peer_migrated(
9127 &mut self, new_pid: usize, disable_dcid_reuse: bool, now: Instant,
9128 ) -> Result<()> {
9129 let active_path_id = self.paths.get_active_path_id()?;
9130
9131 if active_path_id == new_pid {
9132 return Ok(());
9133 }
9134
9135 self.set_active_path(new_pid, now)?;
9136
9137 let no_spare_dcid =
9138 self.paths.get_mut(new_pid)?.active_dcid_seq.is_none();
9139
9140 if no_spare_dcid && !disable_dcid_reuse {
9141 self.paths.get_mut(new_pid)?.active_dcid_seq =
9142 self.paths.get_mut(active_path_id)?.active_dcid_seq;
9143 }
9144
9145 Ok(())
9146 }
9147
9148 /// Creates a new client-side path.
9149 fn create_path_on_client(
9150 &mut self, local_addr: SocketAddr, peer_addr: SocketAddr,
9151 ) -> Result<usize> {
9152 if self.is_server {
9153 return Err(Error::InvalidState);
9154 }
9155
9156 // If we use zero-length SCID and go over our local active CID limit,
9157 // the `insert_path()` call will raise an error.
9158 if !self.ids.zero_length_scid() && self.ids.available_scids() == 0 {
9159 return Err(Error::OutOfIdentifiers);
9160 }
9161
9162 // Do we have a spare DCID? If we are using zero-length DCID, just use
9163 // the default having sequence 0 (note that if we exceed our local CID
9164 // limit, the `insert_path()` call will raise an error.
9165 let dcid_seq = if self.ids.zero_length_dcid() {
9166 0
9167 } else {
9168 self.ids
9169 .lowest_available_dcid_seq()
9170 .ok_or(Error::OutOfIdentifiers)?
9171 };
9172
9173 let mut path = path::Path::new(
9174 local_addr,
9175 peer_addr,
9176 &self.recovery_config,
9177 self.path_challenge_recv_max_queue_len,
9178 false,
9179 None,
9180 );
9181 path.active_dcid_seq = Some(dcid_seq);
9182
9183 let pid = self
9184 .paths
9185 .insert_path(path, false)
9186 .map_err(|_| Error::OutOfIdentifiers)?;
9187 self.ids.link_dcid_to_path_id(dcid_seq, pid)?;
9188
9189 Ok(pid)
9190 }
9191
9192 // Marks the connection as closed and does any related tidyup.
9193 fn mark_closed(&mut self) {
9194 #[cfg(feature = "qlog")]
9195 {
9196 let cc = match (self.is_established(), self.timed_out, &self.peer_error, &self.local_error) {
9197 (false, _, _, _) => qlog::events::quic::ConnectionClosed {
9198 initiator: Some(TransportInitiator::Local),
9199 connection_error: None,
9200 application_error: None,
9201 error_code: None,
9202 internal_code: None,
9203 reason: Some("Failed to establish connection".to_string()),
9204 trigger: Some(qlog::events::quic::ConnectionClosedTrigger::HandshakeTimeout)
9205 },
9206
9207 (true, true, _, _) => qlog::events::quic::ConnectionClosed {
9208 initiator: Some(TransportInitiator::Local),
9209 connection_error: None,
9210 application_error: None,
9211 error_code: None,
9212 internal_code: None,
9213 reason: Some("Idle timeout".to_string()),
9214 trigger: Some(qlog::events::quic::ConnectionClosedTrigger::IdleTimeout)
9215 },
9216
9217 (true, false, Some(peer_error), None) => {
9218 let (connection_code, application_error, trigger) = if peer_error.is_app {
9219 (None, Some(qlog::events::ApplicationError::Unknown), None)
9220 } else {
9221 let trigger = if peer_error.error_code == WireErrorCode::NoError as u64 {
9222 Some(qlog::events::quic::ConnectionClosedTrigger::Clean)
9223 } else {
9224 Some(qlog::events::quic::ConnectionClosedTrigger::Error)
9225 };
9226
9227 (Some(qlog::events::ConnectionClosedEventError::TransportError(qlog::events::quic::TransportError::Unknown)), None, trigger)
9228 };
9229
9230 // TODO: select more appopriate connection_code and application_error than unknown.
9231 qlog::events::quic::ConnectionClosed {
9232 initiator: Some(TransportInitiator::Remote),
9233 connection_error: connection_code,
9234 application_error,
9235 error_code: Some(peer_error.error_code),
9236 internal_code: None,
9237 reason: Some(String::from_utf8_lossy(&peer_error.reason).to_string()),
9238 trigger,
9239 }
9240 },
9241
9242 (true, false, None, Some(local_error)) => {
9243 let (connection_code, application_error, trigger) = if local_error.is_app {
9244 (None, Some(qlog::events::ApplicationError::Unknown), None)
9245 } else {
9246 let trigger = if local_error.error_code == WireErrorCode::NoError as u64 {
9247 Some(qlog::events::quic::ConnectionClosedTrigger::Clean)
9248 } else {
9249 Some(qlog::events::quic::ConnectionClosedTrigger::Error)
9250 };
9251
9252 (Some(qlog::events::ConnectionClosedEventError::TransportError(qlog::events::quic::TransportError::Unknown)), None, trigger)
9253 };
9254
9255 // TODO: select more appopriate connection_code and application_error than unknown.
9256 qlog::events::quic::ConnectionClosed {
9257 initiator: Some(TransportInitiator::Local),
9258 connection_error: connection_code,
9259 application_error,
9260 error_code: Some(local_error.error_code),
9261 internal_code: None,
9262 reason: Some(String::from_utf8_lossy(&local_error.reason).to_string()),
9263 trigger,
9264 }
9265 },
9266
9267 _ => qlog::events::quic::ConnectionClosed {
9268 initiator: None,
9269 connection_error: None,
9270 application_error: None,
9271 error_code: None,
9272 internal_code: None,
9273 reason: None,
9274 trigger: None,
9275 },
9276 };
9277
9278 qlog_with_type!(QLOG_CONNECTION_CLOSED, self.qlog, q, {
9279 let ev_data = EventData::QuicConnectionClosed(cc);
9280
9281 q.add_event_data_now(ev_data).ok();
9282 });
9283 self.qlog.streamer = None;
9284 }
9285 self.closed = true;
9286 }
9287}
9288
9289#[cfg(feature = "boringssl-boring-crate")]
9290impl<F: BufFactory> AsMut<boring::ssl::SslRef> for Connection<F> {
9291 fn as_mut(&mut self) -> &mut boring::ssl::SslRef {
9292 self.handshake.ssl_mut()
9293 }
9294}
9295
9296/// Maps an `Error` to `Error::Done`, or itself.
9297///
9298/// When a received packet that hasn't yet been authenticated triggers a failure
9299/// it should, in most cases, be ignored, instead of raising a connection error,
9300/// to avoid potential man-in-the-middle and man-on-the-side attacks.
9301///
9302/// However, if no other packet was previously received, the connection should
9303/// indeed be closed as the received packet might just be network background
9304/// noise, and it shouldn't keep resources occupied indefinitely.
9305///
9306/// This function maps an error to `Error::Done` to ignore a packet failure
9307/// without aborting the connection, except when no other packet was previously
9308/// received, in which case the error itself is returned, but only on the
9309/// server-side as the client will already have armed the idle timer.
9310///
9311/// This must only be used for errors preceding packet authentication. Failures
9312/// happening after a packet has been authenticated should still cause the
9313/// connection to be aborted.
9314fn drop_pkt_on_err(
9315 e: Error, recv_count: usize, is_server: bool, trace_id: &str,
9316) -> Error {
9317 // On the server, if no other packet has been successfully processed, abort
9318 // the connection to avoid keeping the connection open when only junk is
9319 // received.
9320 if is_server && recv_count == 0 {
9321 return e;
9322 }
9323
9324 trace!("{trace_id} dropped invalid packet");
9325
9326 // Ignore other invalid packets that haven't been authenticated to prevent
9327 // man-in-the-middle and man-on-the-side attacks.
9328 Error::Done
9329}
9330
9331struct AddrTupleFmt(SocketAddr, SocketAddr);
9332
9333impl std::fmt::Display for AddrTupleFmt {
9334 fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
9335 let AddrTupleFmt(src, dst) = &self;
9336
9337 if src.ip().is_unspecified() || dst.ip().is_unspecified() {
9338 return Ok(());
9339 }
9340
9341 f.write_fmt(format_args!("src:{src} dst:{dst}"))
9342 }
9343}
9344
9345/// Statistics about the connection.
9346///
9347/// A connection's statistics can be collected using the [`stats()`] method.
9348///
9349/// [`stats()`]: struct.Connection.html#method.stats
9350#[derive(Clone, Default)]
9351pub struct Stats {
9352 /// The number of QUIC packets received.
9353 pub recv: usize,
9354
9355 /// The number of QUIC packets sent.
9356 pub sent: usize,
9357
9358 /// The number of QUIC packets that were lost.
9359 pub lost: usize,
9360
9361 /// The number of QUIC packets that were marked as lost but later acked.
9362 pub spurious_lost: usize,
9363
9364 /// The number of sent QUIC packets with retransmitted data.
9365 pub retrans: usize,
9366
9367 /// The number of sent bytes.
9368 pub sent_bytes: u64,
9369
9370 /// The number of received bytes.
9371 pub recv_bytes: u64,
9372
9373 /// The number of bytes sent acked.
9374 pub acked_bytes: u64,
9375
9376 /// The number of bytes sent lost.
9377 pub lost_bytes: u64,
9378
9379 /// The number of stream bytes retransmitted.
9380 pub stream_retrans_bytes: u64,
9381
9382 /// The number of DATAGRAM frames received.
9383 pub dgram_recv: usize,
9384
9385 /// The number of DATAGRAM frames sent.
9386 pub dgram_sent: usize,
9387
9388 /// The number of known paths for the connection.
9389 pub paths_count: usize,
9390
9391 /// The number of streams reset by local.
9392 pub reset_stream_count_local: u64,
9393
9394 /// The number of streams stopped by local.
9395 pub stopped_stream_count_local: u64,
9396
9397 /// The number of streams reset by remote.
9398 pub reset_stream_count_remote: u64,
9399
9400 /// The number of streams stopped by remote.
9401 pub stopped_stream_count_remote: u64,
9402
9403 /// The number of DATA_BLOCKED frames sent due to hitting the connection
9404 /// flow control limit.
9405 pub data_blocked_sent_count: u64,
9406
9407 /// The number of STREAM_DATA_BLOCKED frames sent due to a stream hitting
9408 /// the stream flow control limit.
9409 pub stream_data_blocked_sent_count: u64,
9410
9411 /// The number of DATA_BLOCKED frames received from the remote.
9412 pub data_blocked_recv_count: u64,
9413
9414 /// The number of STREAM_DATA_BLOCKED frames received from the remote.
9415 pub stream_data_blocked_recv_count: u64,
9416
9417 /// The number of STREAMS_BLOCKED frames for bidirectional streams received
9418 /// from the remote, indicating the peer is blocked on opening new
9419 /// bidirectional streams.
9420 pub streams_blocked_bidi_recv_count: u64,
9421
9422 /// The number of STREAMS_BLOCKED frames for unidirectional streams received
9423 /// from the remote, indicating the peer is blocked on opening new
9424 /// unidirectional streams.
9425 pub streams_blocked_uni_recv_count: u64,
9426
9427 /// The total number of PATH_CHALLENGE frames that were received.
9428 pub path_challenge_rx_count: u64,
9429
9430 /// The number of times send() was blocked because the anti-amplification
9431 /// budget (bytes received × max_amplification_factor) was exhausted.
9432 pub amplification_limited_count: u64,
9433
9434 /// Total duration during which this side of the connection was
9435 /// actively sending bytes or waiting for those bytes to be acked.
9436 pub bytes_in_flight_duration: Duration,
9437
9438 /// Health state of the connection's tx_buffered.
9439 pub tx_buffered_state: TxBufferTrackingState,
9440}
9441
9442impl std::fmt::Debug for Stats {
9443 #[inline]
9444 fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
9445 write!(
9446 f,
9447 "recv={} sent={} lost={} retrans={}",
9448 self.recv, self.sent, self.lost, self.retrans,
9449 )?;
9450
9451 write!(
9452 f,
9453 " sent_bytes={} recv_bytes={} lost_bytes={}",
9454 self.sent_bytes, self.recv_bytes, self.lost_bytes,
9455 )?;
9456
9457 Ok(())
9458 }
9459}
9460
9461#[doc(hidden)]
9462#[cfg(any(test, feature = "internal"))]
9463pub mod test_utils;
9464
9465#[cfg(test)]
9466mod tests;
9467
9468pub use crate::packet::ConnectionId;
9469pub use crate::packet::Header;
9470pub use crate::packet::Type;
9471
9472pub use crate::path::PathEvent;
9473pub use crate::path::PathStats;
9474pub use crate::path::SocketAddrIter;
9475
9476pub use crate::recovery::BbrBwLoReductionStrategy;
9477pub use crate::recovery::BbrParams;
9478pub use crate::recovery::CongestionControlAlgorithm;
9479pub use crate::recovery::StartupExit;
9480pub use crate::recovery::StartupExitReason;
9481
9482pub use crate::stream::StreamIter;
9483
9484pub use crate::transport_params::TransportParams;
9485pub use crate::transport_params::UnknownTransportParameter;
9486pub use crate::transport_params::UnknownTransportParameterIterator;
9487pub use crate::transport_params::UnknownTransportParameters;
9488
9489pub use crate::buffers::BufFactory;
9490pub use crate::buffers::BufSplit;
9491
9492pub use crate::error::ConnectionError;
9493pub use crate::error::Error;
9494pub use crate::error::Result;
9495pub use crate::error::WireErrorCode;
9496
9497mod buffers;
9498mod cid;
9499mod crypto;
9500mod dgram;
9501mod error;
9502#[cfg(feature = "ffi")]
9503mod ffi;
9504mod flowcontrol;
9505mod frame;
9506pub mod h3;
9507mod minmax;
9508mod packet;
9509mod path;
9510mod pmtud;
9511mod rand;
9512mod range_buf;
9513mod ranges;
9514mod recovery;
9515mod stream;
9516mod tls;
9517mod transport_params;