quiche/lib.rs
1// Copyright (C) 2018-2019, Cloudflare, Inc.
2// All rights reserved.
3//
4// Redistribution and use in source and binary forms, with or without
5// modification, are permitted provided that the following conditions are
6// met:
7//
8// * Redistributions of source code must retain the above copyright notice,
9// this list of conditions and the following disclaimer.
10//
11// * Redistributions in binary form must reproduce the above copyright
12// notice, this list of conditions and the following disclaimer in the
13// documentation and/or other materials provided with the distribution.
14//
15// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
16// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
17// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
18// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
19// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
20// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
21// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
22// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
23// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
24// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
25// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
27//! 🥧 Savoury implementation of the QUIC transport protocol and HTTP/3.
28//!
29//! [quiche] is an implementation of the QUIC transport protocol and HTTP/3 as
30//! specified by the [IETF]. It provides a low level API for processing QUIC
31//! packets and handling connection state. The application is responsible for
32//! providing I/O (e.g. sockets handling) as well as an event loop with support
33//! for timers.
34//!
35//! [quiche]: https://github.com/cloudflare/quiche/
36//! [ietf]: https://quicwg.org/
37//!
38//! ## Configuring connections
39//!
40//! The first step in establishing a QUIC connection using quiche is creating a
41//! [`Config`] object:
42//!
43//! ```
44//! let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
45//! config.set_application_protos(&[b"example-proto"]);
46//!
47//! // Additional configuration specific to application and use case...
48//! # Ok::<(), quiche::Error>(())
49//! ```
50//!
51//! The [`Config`] object controls important aspects of the QUIC connection such
52//! as QUIC version, ALPN IDs, flow control, congestion control, idle timeout
53//! and other properties or features.
54//!
55//! QUIC is a general-purpose transport protocol and there are several
56//! configuration properties where there is no reasonable default value. For
57//! example, the permitted number of concurrent streams of any particular type
58//! is dependent on the application running over QUIC, and other use-case
59//! specific concerns.
60//!
61//! quiche defaults several properties to zero, applications most likely need
62//! to set these to something else to satisfy their needs using the following:
63//!
64//! - [`set_initial_max_streams_bidi()`]
65//! - [`set_initial_max_streams_uni()`]
66//! - [`set_initial_max_data()`]
67//! - [`set_initial_max_stream_data_bidi_local()`]
68//! - [`set_initial_max_stream_data_bidi_remote()`]
69//! - [`set_initial_max_stream_data_uni()`]
70//!
71//! [`Config`] also holds TLS configuration. This can be changed by mutators on
72//! the an existing object, or by constructing a TLS context manually and
73//! creating a configuration using [`with_boring_ssl_ctx_builder()`].
74//!
75//! A configuration object can be shared among multiple connections.
76//!
77//! ### Connection setup
78//!
79//! On the client-side the [`connect()`] utility function can be used to create
80//! a new connection, while [`accept()`] is for servers:
81//!
82//! ```
83//! # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
84//! # let server_name = "quic.tech";
85//! # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
86//! # let peer = "127.0.0.1:1234".parse().unwrap();
87//! # let local = "127.0.0.1:4321".parse().unwrap();
88//! // Client connection.
89//! let conn =
90//! quiche::connect(Some(&server_name), &scid, local, peer, &mut config)?;
91//!
92//! // Server connection.
93//! # let peer = "127.0.0.1:1234".parse().unwrap();
94//! # let local = "127.0.0.1:4321".parse().unwrap();
95//! let conn = quiche::accept(&scid, None, local, peer, &mut config)?;
96//! # Ok::<(), quiche::Error>(())
97//! ```
98//!
99//! In both cases, the application is responsible for generating a new source
100//! connection ID that will be used to identify the new connection.
101//!
102//! The application also need to pass the address of the remote peer of the
103//! connection: in the case of a client that would be the address of the server
104//! it is trying to connect to, and for a server that is the address of the
105//! client that initiated the connection.
106//!
107//! ## Handling incoming packets
108//!
109//! Using the connection's [`recv()`] method the application can process
110//! incoming packets that belong to that connection from the network:
111//!
112//! ```no_run
113//! # let mut buf = [0; 512];
114//! # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
115//! # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
116//! # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
117//! # let peer = "127.0.0.1:1234".parse().unwrap();
118//! # let local = "127.0.0.1:4321".parse().unwrap();
119//! # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
120//! let to = socket.local_addr().unwrap();
121//!
122//! loop {
123//! let (read, from) = socket.recv_from(&mut buf).unwrap();
124//!
125//! let recv_info = quiche::RecvInfo { from, to };
126//!
127//! let read = match conn.recv(&mut buf[..read], recv_info) {
128//! Ok(v) => v,
129//!
130//! Err(quiche::Error::Done) => {
131//! // Done reading.
132//! break;
133//! },
134//!
135//! Err(e) => {
136//! // An error occurred, handle it.
137//! break;
138//! },
139//! };
140//! }
141//! # Ok::<(), quiche::Error>(())
142//! ```
143//!
144//! The application has to pass a [`RecvInfo`] structure in order to provide
145//! additional information about the received packet (such as the address it
146//! was received from).
147//!
148//! ## Generating outgoing packets
149//!
150//! Outgoing packet are generated using the connection's [`send()`] method
151//! instead:
152//!
153//! ```no_run
154//! # let mut out = [0; 512];
155//! # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
156//! # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
157//! # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
158//! # let peer = "127.0.0.1:1234".parse().unwrap();
159//! # let local = "127.0.0.1:4321".parse().unwrap();
160//! # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
161//! loop {
162//! let (write, send_info) = match conn.send(&mut out) {
163//! Ok(v) => v,
164//!
165//! Err(quiche::Error::Done) => {
166//! // Done writing.
167//! break;
168//! },
169//!
170//! Err(e) => {
171//! // An error occurred, handle it.
172//! break;
173//! },
174//! };
175//!
176//! socket.send_to(&out[..write], &send_info.to).unwrap();
177//! }
178//! # Ok::<(), quiche::Error>(())
179//! ```
180//!
181//! The application will be provided with a [`SendInfo`] structure providing
182//! additional information about the newly created packet (such as the address
183//! the packet should be sent to).
184//!
185//! When packets are sent, the application is responsible for maintaining a
186//! timer to react to time-based connection events. The timer expiration can be
187//! obtained using the connection's [`timeout()`] method.
188//!
189//! ```
190//! # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
191//! # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
192//! # let peer = "127.0.0.1:1234".parse().unwrap();
193//! # let local = "127.0.0.1:4321".parse().unwrap();
194//! # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
195//! let timeout = conn.timeout();
196//! # Ok::<(), quiche::Error>(())
197//! ```
198//!
199//! The application is responsible for providing a timer implementation, which
200//! can be specific to the operating system or networking framework used. When
201//! a timer expires, the connection's [`on_timeout()`] method should be called,
202//! after which additional packets might need to be sent on the network:
203//!
204//! ```no_run
205//! # let mut out = [0; 512];
206//! # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
207//! # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
208//! # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
209//! # let peer = "127.0.0.1:1234".parse().unwrap();
210//! # let local = "127.0.0.1:4321".parse().unwrap();
211//! # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
212//! // Timeout expired, handle it.
213//! conn.on_timeout();
214//!
215//! // Send more packets as needed after timeout.
216//! loop {
217//! let (write, send_info) = match conn.send(&mut out) {
218//! Ok(v) => v,
219//!
220//! Err(quiche::Error::Done) => {
221//! // Done writing.
222//! break;
223//! },
224//!
225//! Err(e) => {
226//! // An error occurred, handle it.
227//! break;
228//! },
229//! };
230//!
231//! socket.send_to(&out[..write], &send_info.to).unwrap();
232//! }
233//! # Ok::<(), quiche::Error>(())
234//! ```
235//!
236//! ### Pacing
237//!
238//! It is recommended that applications [pace] sending of outgoing packets to
239//! avoid creating packet bursts that could cause short-term congestion and
240//! losses in the network.
241//!
242//! quiche exposes pacing hints for outgoing packets through the [`at`] field
243//! of the [`SendInfo`] structure that is returned by the [`send()`] method.
244//! This field represents the time when a specific packet should be sent into
245//! the network.
246//!
247//! Applications can use these hints by artificially delaying the sending of
248//! packets through platform-specific mechanisms (such as the [`SO_TXTIME`]
249//! socket option on Linux), or custom methods (for example by using user-space
250//! timers).
251//!
252//! [pace]: https://datatracker.ietf.org/doc/html/rfc9002#section-7.7
253//! [`SO_TXTIME`]: https://man7.org/linux/man-pages/man8/tc-etf.8.html
254//!
255//! ## Sending and receiving stream data
256//!
257//! After some back and forth, the connection will complete its handshake and
258//! will be ready for sending or receiving application data.
259//!
260//! Data can be sent on a stream by using the [`stream_send()`] method:
261//!
262//! ```no_run
263//! # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
264//! # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
265//! # let peer = "127.0.0.1:1234".parse().unwrap();
266//! # let local = "127.0.0.1:4321".parse().unwrap();
267//! # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
268//! if conn.is_established() {
269//! // Handshake completed, send some data on stream 0.
270//! conn.stream_send(0, b"hello", true)?;
271//! }
272//! # Ok::<(), quiche::Error>(())
273//! ```
274//!
275//! The application can check whether there are any readable streams by using
276//! the connection's [`readable()`] method, which returns an iterator over all
277//! the streams that have outstanding data to read.
278//!
279//! The [`stream_recv()`] method can then be used to retrieve the application
280//! data from the readable stream:
281//!
282//! ```no_run
283//! # let mut buf = [0; 512];
284//! # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
285//! # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
286//! # let peer = "127.0.0.1:1234".parse().unwrap();
287//! # let local = "127.0.0.1:4321".parse().unwrap();
288//! # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
289//! if conn.is_established() {
290//! // Iterate over readable streams.
291//! for stream_id in conn.readable() {
292//! // Stream is readable, read until there's no more data.
293//! while let Ok((read, fin)) = conn.stream_recv(stream_id, &mut buf) {
294//! println!("Got {} bytes on stream {}", read, stream_id);
295//! }
296//! }
297//! }
298//! # Ok::<(), quiche::Error>(())
299//! ```
300//!
301//! ## HTTP/3
302//!
303//! The quiche [HTTP/3 module] provides a high level API for sending and
304//! receiving HTTP requests and responses on top of the QUIC transport protocol.
305//!
306//! [`Config`]: https://docs.quic.tech/quiche/struct.Config.html
307//! [`set_initial_max_streams_bidi()`]: https://docs.rs/quiche/latest/quiche/struct.Config.html#method.set_initial_max_streams_bidi
308//! [`set_initial_max_streams_uni()`]: https://docs.rs/quiche/latest/quiche/struct.Config.html#method.set_initial_max_streams_uni
309//! [`set_initial_max_data()`]: https://docs.rs/quiche/latest/quiche/struct.Config.html#method.set_initial_max_data
310//! [`set_initial_max_stream_data_bidi_local()`]: https://docs.rs/quiche/latest/quiche/struct.Config.html#method.set_initial_max_stream_data_bidi_local
311//! [`set_initial_max_stream_data_bidi_remote()`]: https://docs.rs/quiche/latest/quiche/struct.Config.html#method.set_initial_max_stream_data_bidi_remote
312//! [`set_initial_max_stream_data_uni()`]: https://docs.rs/quiche/latest/quiche/struct.Config.html#method.set_initial_max_stream_data_uni
313//! [`with_boring_ssl_ctx_builder()`]: https://docs.quic.tech/quiche/struct.Config.html#method.with_boring_ssl_ctx_builder
314//! [`connect()`]: fn.connect.html
315//! [`accept()`]: fn.accept.html
316//! [`recv()`]: struct.Connection.html#method.recv
317//! [`RecvInfo`]: struct.RecvInfo.html
318//! [`send()`]: struct.Connection.html#method.send
319//! [`SendInfo`]: struct.SendInfo.html
320//! [`at`]: struct.SendInfo.html#structfield.at
321//! [`timeout()`]: struct.Connection.html#method.timeout
322//! [`on_timeout()`]: struct.Connection.html#method.on_timeout
323//! [`stream_send()`]: struct.Connection.html#method.stream_send
324//! [`readable()`]: struct.Connection.html#method.readable
325//! [`stream_recv()`]: struct.Connection.html#method.stream_recv
326//! [HTTP/3 module]: h3/index.html
327//!
328//! ## Congestion Control
329//!
330//! The quiche library provides a high-level API for configuring which
331//! congestion control algorithm to use throughout the QUIC connection.
332//!
333//! When a QUIC connection is created, the application can optionally choose
334//! which CC algorithm to use. See [`CongestionControlAlgorithm`] for currently
335//! available congestion control algorithms.
336//!
337//! For example:
338//!
339//! ```
340//! let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION).unwrap();
341//! config.set_cc_algorithm(quiche::CongestionControlAlgorithm::Reno);
342//! ```
343//!
344//! Alternatively, you can configure the congestion control algorithm to use
345//! by its name.
346//!
347//! ```
348//! let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION).unwrap();
349//! config.set_cc_algorithm_name("reno").unwrap();
350//! ```
351//!
352//! Note that the CC algorithm should be configured before calling [`connect()`]
353//! or [`accept()`]. Otherwise the connection will use a default CC algorithm.
354//!
355//! [`CongestionControlAlgorithm`]: enum.CongestionControlAlgorithm.html
356//!
357//! ## Feature flags
358//!
359//! quiche defines a number of [feature flags] to reduce the amount of compiled
360//! code and dependencies:
361//!
362//! * `boringssl-vendored` (default): Build the vendored BoringSSL library.
363//!
364//! * `boringssl-boring-crate`: Use the BoringSSL library provided by the
365//! [boring] crate. It takes precedence over `boringssl-vendored` if both
366//! features are enabled.
367//!
368//! * `pkg-config-meta`: Generate pkg-config metadata file for libquiche.
369//!
370//! * `ffi`: Build and expose the FFI API.
371//!
372//! * `qlog`: Enable support for the [qlog] logging format.
373//!
374//! [feature flags]: https://doc.rust-lang.org/cargo/reference/manifest.html#the-features-section
375//! [boring]: https://crates.io/crates/boring
376//! [qlog]: https://datatracker.ietf.org/doc/html/draft-ietf-quic-qlog-main-schema
377
378#![allow(clippy::upper_case_acronyms)]
379#![warn(missing_docs)]
380#![warn(unused_qualifications)]
381#![cfg_attr(docsrs, feature(doc_cfg))]
382
383#[macro_use]
384extern crate log;
385
386use std::cmp;
387
388use std::collections::VecDeque;
389
390use std::net::SocketAddr;
391
392use std::str::FromStr;
393
394use std::sync::Arc;
395
396use std::time::Duration;
397use std::time::Instant;
398
399#[cfg(feature = "qlog")]
400use qlog::events::connectivity::ConnectivityEventType;
401#[cfg(feature = "qlog")]
402use qlog::events::connectivity::TransportOwner;
403#[cfg(feature = "qlog")]
404use qlog::events::quic::RecoveryEventType;
405#[cfg(feature = "qlog")]
406use qlog::events::quic::TransportEventType;
407#[cfg(feature = "qlog")]
408use qlog::events::DataRecipient;
409#[cfg(feature = "qlog")]
410use qlog::events::Event;
411#[cfg(feature = "qlog")]
412use qlog::events::EventData;
413#[cfg(feature = "qlog")]
414use qlog::events::EventImportance;
415#[cfg(feature = "qlog")]
416use qlog::events::EventType;
417#[cfg(feature = "qlog")]
418use qlog::events::RawInfo;
419
420use smallvec::SmallVec;
421
422use crate::buffers::DefaultBufFactory;
423
424use crate::recovery::OnAckReceivedOutcome;
425use crate::recovery::OnLossDetectionTimeoutOutcome;
426use crate::recovery::RecoveryOps;
427use crate::recovery::ReleaseDecision;
428
429use crate::stream::RecvAction;
430use crate::stream::StreamPriorityKey;
431
432/// The current QUIC wire version.
433pub const PROTOCOL_VERSION: u32 = PROTOCOL_VERSION_V1;
434
435/// Supported QUIC versions.
436const PROTOCOL_VERSION_V1: u32 = 0x0000_0001;
437
438/// The maximum length of a connection ID.
439pub const MAX_CONN_ID_LEN: usize = packet::MAX_CID_LEN as usize;
440
441/// The minimum length of Initial packets sent by a client.
442pub const MIN_CLIENT_INITIAL_LEN: usize = 1200;
443
444/// The default initial RTT.
445const DEFAULT_INITIAL_RTT: Duration = Duration::from_millis(333);
446
447const PAYLOAD_MIN_LEN: usize = 4;
448
449// PATH_CHALLENGE (9 bytes) + AEAD tag (16 bytes).
450const MIN_PROBING_SIZE: usize = 25;
451
452const MAX_AMPLIFICATION_FACTOR: usize = 3;
453
454// The maximum number of tracked packet number ranges that need to be acked.
455//
456// This represents more or less how many ack blocks can fit in a typical packet.
457const MAX_ACK_RANGES: usize = 68;
458
459// The highest possible stream ID allowed.
460const MAX_STREAM_ID: u64 = 1 << 60;
461
462// The default max_datagram_size used in congestion control.
463const MAX_SEND_UDP_PAYLOAD_SIZE: usize = 1200;
464
465// The default length of DATAGRAM queues.
466const DEFAULT_MAX_DGRAM_QUEUE_LEN: usize = 0;
467
468// The default length of PATH_CHALLENGE receive queue.
469const DEFAULT_MAX_PATH_CHALLENGE_RX_QUEUE_LEN: usize = 3;
470
471// The DATAGRAM standard recommends either none or 65536 as maximum DATAGRAM
472// frames size. We enforce the recommendation for forward compatibility.
473const MAX_DGRAM_FRAME_SIZE: u64 = 65536;
474
475// The length of the payload length field.
476const PAYLOAD_LENGTH_LEN: usize = 2;
477
478// The number of undecryptable that can be buffered.
479const MAX_UNDECRYPTABLE_PACKETS: usize = 10;
480
481const RESERVED_VERSION_MASK: u32 = 0xfafafafa;
482
483// The default size of the receiver connection flow control window.
484const DEFAULT_CONNECTION_WINDOW: u64 = 48 * 1024;
485
486// The maximum size of the receiver connection flow control window.
487const MAX_CONNECTION_WINDOW: u64 = 24 * 1024 * 1024;
488
489// How much larger the connection flow control window need to be larger than
490// the stream flow control window.
491const CONNECTION_WINDOW_FACTOR: f64 = 1.5;
492
493// How many probing packet timeouts do we tolerate before considering the path
494// validation as failed.
495const MAX_PROBING_TIMEOUTS: usize = 3;
496
497// The default initial congestion window size in terms of packet count.
498const DEFAULT_INITIAL_CONGESTION_WINDOW_PACKETS: usize = 10;
499
500// The maximum data offset that can be stored in a crypto stream.
501const MAX_CRYPTO_STREAM_OFFSET: u64 = 1 << 16;
502
503// The send capacity factor.
504const TX_CAP_FACTOR: f64 = 1.0;
505
506/// Ancillary information about incoming packets.
507#[derive(Clone, Copy, Debug, PartialEq, Eq)]
508pub struct RecvInfo {
509 /// The remote address the packet was received from.
510 pub from: SocketAddr,
511
512 /// The local address the packet was received on.
513 pub to: SocketAddr,
514}
515
516/// Ancillary information about outgoing packets.
517#[derive(Clone, Copy, Debug, PartialEq, Eq)]
518pub struct SendInfo {
519 /// The local address the packet should be sent from.
520 pub from: SocketAddr,
521
522 /// The remote address the packet should be sent to.
523 pub to: SocketAddr,
524
525 /// The time to send the packet out.
526 ///
527 /// See [Pacing] for more details.
528 ///
529 /// [Pacing]: index.html#pacing
530 pub at: Instant,
531}
532
533/// The side of the stream to be shut down.
534///
535/// This should be used when calling [`stream_shutdown()`].
536///
537/// [`stream_shutdown()`]: struct.Connection.html#method.stream_shutdown
538#[repr(C)]
539#[derive(PartialEq, Eq)]
540pub enum Shutdown {
541 /// Stop receiving stream data.
542 Read = 0,
543
544 /// Stop sending stream data.
545 Write = 1,
546}
547
548/// Qlog logging level.
549#[repr(C)]
550#[cfg(feature = "qlog")]
551#[cfg_attr(docsrs, doc(cfg(feature = "qlog")))]
552pub enum QlogLevel {
553 /// Logs any events of Core importance.
554 Core = 0,
555
556 /// Logs any events of Core and Base importance.
557 Base = 1,
558
559 /// Logs any events of Core, Base and Extra importance
560 Extra = 2,
561}
562
563/// Stores configuration shared between multiple connections.
564pub struct Config {
565 local_transport_params: TransportParams,
566
567 version: u32,
568
569 tls_ctx: tls::Context,
570
571 application_protos: Vec<Vec<u8>>,
572
573 grease: bool,
574
575 cc_algorithm: CongestionControlAlgorithm,
576 custom_bbr_params: Option<BbrParams>,
577 initial_congestion_window_packets: usize,
578 enable_relaxed_loss_threshold: bool,
579
580 pmtud: bool,
581 pmtud_max_probes: u8,
582
583 hystart: bool,
584
585 pacing: bool,
586 /// Send rate limit in Mbps
587 max_pacing_rate: Option<u64>,
588
589 tx_cap_factor: f64,
590
591 dgram_recv_max_queue_len: usize,
592 dgram_send_max_queue_len: usize,
593
594 path_challenge_recv_max_queue_len: usize,
595
596 max_send_udp_payload_size: usize,
597
598 max_connection_window: u64,
599 max_stream_window: u64,
600
601 max_amplification_factor: usize,
602
603 disable_dcid_reuse: bool,
604
605 track_unknown_transport_params: Option<usize>,
606
607 initial_rtt: Duration,
608}
609
610// See https://quicwg.org/base-drafts/rfc9000.html#section-15
611fn is_reserved_version(version: u32) -> bool {
612 version & RESERVED_VERSION_MASK == version
613}
614
615impl Config {
616 /// Creates a config object with the given version.
617 ///
618 /// ## Examples:
619 ///
620 /// ```
621 /// let config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
622 /// # Ok::<(), quiche::Error>(())
623 /// ```
624 pub fn new(version: u32) -> Result<Config> {
625 Self::with_tls_ctx(version, tls::Context::new()?)
626 }
627
628 /// Creates a config object with the given version and
629 /// [`SslContextBuilder`].
630 ///
631 /// This is useful for applications that wish to manually configure
632 /// [`SslContextBuilder`].
633 ///
634 /// [`SslContextBuilder`]: https://docs.rs/boring/latest/boring/ssl/struct.SslContextBuilder.html
635 #[cfg(feature = "boringssl-boring-crate")]
636 #[cfg_attr(docsrs, doc(cfg(feature = "boringssl-boring-crate")))]
637 pub fn with_boring_ssl_ctx_builder(
638 version: u32, tls_ctx_builder: boring::ssl::SslContextBuilder,
639 ) -> Result<Config> {
640 Self::with_tls_ctx(version, tls::Context::from_boring(tls_ctx_builder))
641 }
642
643 fn with_tls_ctx(version: u32, tls_ctx: tls::Context) -> Result<Config> {
644 if !is_reserved_version(version) && !version_is_supported(version) {
645 return Err(Error::UnknownVersion);
646 }
647
648 Ok(Config {
649 local_transport_params: TransportParams::default(),
650 version,
651 tls_ctx,
652 application_protos: Vec::new(),
653 grease: true,
654 cc_algorithm: CongestionControlAlgorithm::CUBIC,
655 custom_bbr_params: None,
656 initial_congestion_window_packets:
657 DEFAULT_INITIAL_CONGESTION_WINDOW_PACKETS,
658 enable_relaxed_loss_threshold: false,
659 pmtud: false,
660 pmtud_max_probes: pmtud::MAX_PROBES_DEFAULT,
661 hystart: true,
662 pacing: true,
663 max_pacing_rate: None,
664
665 tx_cap_factor: TX_CAP_FACTOR,
666
667 dgram_recv_max_queue_len: DEFAULT_MAX_DGRAM_QUEUE_LEN,
668 dgram_send_max_queue_len: DEFAULT_MAX_DGRAM_QUEUE_LEN,
669
670 path_challenge_recv_max_queue_len:
671 DEFAULT_MAX_PATH_CHALLENGE_RX_QUEUE_LEN,
672
673 max_send_udp_payload_size: MAX_SEND_UDP_PAYLOAD_SIZE,
674
675 max_connection_window: MAX_CONNECTION_WINDOW,
676 max_stream_window: stream::MAX_STREAM_WINDOW,
677
678 max_amplification_factor: MAX_AMPLIFICATION_FACTOR,
679
680 disable_dcid_reuse: false,
681
682 track_unknown_transport_params: None,
683 initial_rtt: DEFAULT_INITIAL_RTT,
684 })
685 }
686
687 /// Configures the given certificate chain.
688 ///
689 /// The content of `file` is parsed as a PEM-encoded leaf certificate,
690 /// followed by optional intermediate certificates.
691 ///
692 /// ## Examples:
693 ///
694 /// ```no_run
695 /// # let mut config = quiche::Config::new(0xbabababa)?;
696 /// config.load_cert_chain_from_pem_file("/path/to/cert.pem")?;
697 /// # Ok::<(), quiche::Error>(())
698 /// ```
699 pub fn load_cert_chain_from_pem_file(&mut self, file: &str) -> Result<()> {
700 self.tls_ctx.use_certificate_chain_file(file)
701 }
702
703 /// Configures the given private key.
704 ///
705 /// The content of `file` is parsed as a PEM-encoded private key.
706 ///
707 /// ## Examples:
708 ///
709 /// ```no_run
710 /// # let mut config = quiche::Config::new(0xbabababa)?;
711 /// config.load_priv_key_from_pem_file("/path/to/key.pem")?;
712 /// # Ok::<(), quiche::Error>(())
713 /// ```
714 pub fn load_priv_key_from_pem_file(&mut self, file: &str) -> Result<()> {
715 self.tls_ctx.use_privkey_file(file)
716 }
717
718 /// Specifies a file where trusted CA certificates are stored for the
719 /// purposes of certificate verification.
720 ///
721 /// The content of `file` is parsed as a PEM-encoded certificate chain.
722 ///
723 /// ## Examples:
724 ///
725 /// ```no_run
726 /// # let mut config = quiche::Config::new(0xbabababa)?;
727 /// config.load_verify_locations_from_file("/path/to/cert.pem")?;
728 /// # Ok::<(), quiche::Error>(())
729 /// ```
730 pub fn load_verify_locations_from_file(&mut self, file: &str) -> Result<()> {
731 self.tls_ctx.load_verify_locations_from_file(file)
732 }
733
734 /// Specifies a directory where trusted CA certificates are stored for the
735 /// purposes of certificate verification.
736 ///
737 /// The content of `dir` a set of PEM-encoded certificate chains.
738 ///
739 /// ## Examples:
740 ///
741 /// ```no_run
742 /// # let mut config = quiche::Config::new(0xbabababa)?;
743 /// config.load_verify_locations_from_directory("/path/to/certs")?;
744 /// # Ok::<(), quiche::Error>(())
745 /// ```
746 pub fn load_verify_locations_from_directory(
747 &mut self, dir: &str,
748 ) -> Result<()> {
749 self.tls_ctx.load_verify_locations_from_directory(dir)
750 }
751
752 /// Configures whether to verify the peer's certificate.
753 ///
754 /// This should usually be `true` for client-side connections and `false`
755 /// for server-side ones.
756 ///
757 /// Note that by default, no verification is performed.
758 ///
759 /// Also note that on the server-side, enabling verification of the peer
760 /// will trigger a certificate request and make authentication errors
761 /// fatal, but will still allow anonymous clients (i.e. clients that
762 /// don't present a certificate at all). Servers can check whether a
763 /// client presented a certificate by calling [`peer_cert()`] if they
764 /// need to.
765 ///
766 /// [`peer_cert()`]: struct.Connection.html#method.peer_cert
767 pub fn verify_peer(&mut self, verify: bool) {
768 self.tls_ctx.set_verify(verify);
769 }
770
771 /// Configures whether to do path MTU discovery.
772 ///
773 /// The default value is `false`.
774 pub fn discover_pmtu(&mut self, discover: bool) {
775 self.pmtud = discover;
776 }
777
778 /// Configures the maximum number of PMTUD probe attempts before treating
779 /// a probe size as failed.
780 ///
781 /// Defaults to 3 per [RFC 8899 Section 5.1.2](https://datatracker.ietf.org/doc/html/rfc8899#section-5.1.2).
782 /// If 0 is passed, the default value is used.
783 pub fn set_pmtud_max_probes(&mut self, max_probes: u8) {
784 self.pmtud_max_probes = max_probes;
785 }
786
787 /// Configures whether to send GREASE values.
788 ///
789 /// The default value is `true`.
790 pub fn grease(&mut self, grease: bool) {
791 self.grease = grease;
792 }
793
794 /// Enables logging of secrets.
795 ///
796 /// When logging is enabled, the [`set_keylog()`] method must be called on
797 /// the connection for its cryptographic secrets to be logged in the
798 /// [keylog] format to the specified writer.
799 ///
800 /// [`set_keylog()`]: struct.Connection.html#method.set_keylog
801 /// [keylog]: https://developer.mozilla.org/en-US/docs/Mozilla/Projects/NSS/Key_Log_Format
802 pub fn log_keys(&mut self) {
803 self.tls_ctx.enable_keylog();
804 }
805
806 /// Configures the session ticket key material.
807 ///
808 /// On the server this key will be used to encrypt and decrypt session
809 /// tickets, used to perform session resumption without server-side state.
810 ///
811 /// By default a key is generated internally, and rotated regularly, so
812 /// applications don't need to call this unless they need to use a
813 /// specific key (e.g. in order to support resumption across multiple
814 /// servers), in which case the application is also responsible for
815 /// rotating the key to provide forward secrecy.
816 pub fn set_ticket_key(&mut self, key: &[u8]) -> Result<()> {
817 self.tls_ctx.set_ticket_key(key)
818 }
819
820 /// Enables sending or receiving early data.
821 pub fn enable_early_data(&mut self) {
822 self.tls_ctx.set_early_data_enabled(true);
823 }
824
825 /// Configures the list of supported application protocols.
826 ///
827 /// On the client this configures the list of protocols to send to the
828 /// server as part of the ALPN extension.
829 ///
830 /// On the server this configures the list of supported protocols to match
831 /// against the client-supplied list.
832 ///
833 /// Applications must set a value, but no default is provided.
834 ///
835 /// ## Examples:
836 ///
837 /// ```
838 /// # let mut config = quiche::Config::new(0xbabababa)?;
839 /// config.set_application_protos(&[b"http/1.1", b"http/0.9"]);
840 /// # Ok::<(), quiche::Error>(())
841 /// ```
842 pub fn set_application_protos(
843 &mut self, protos_list: &[&[u8]],
844 ) -> Result<()> {
845 self.application_protos =
846 protos_list.iter().map(|s| s.to_vec()).collect();
847
848 self.tls_ctx.set_alpn(protos_list)
849 }
850
851 /// Configures the list of supported application protocols using wire
852 /// format.
853 ///
854 /// The list of protocols `protos` must be a series of non-empty, 8-bit
855 /// length-prefixed strings.
856 ///
857 /// See [`set_application_protos`](Self::set_application_protos) for more
858 /// background about application protocols.
859 ///
860 /// ## Examples:
861 ///
862 /// ```
863 /// # let mut config = quiche::Config::new(0xbabababa)?;
864 /// config.set_application_protos_wire_format(b"\x08http/1.1\x08http/0.9")?;
865 /// # Ok::<(), quiche::Error>(())
866 /// ```
867 pub fn set_application_protos_wire_format(
868 &mut self, protos: &[u8],
869 ) -> Result<()> {
870 let mut b = octets::Octets::with_slice(protos);
871
872 let mut protos_list = Vec::new();
873
874 while let Ok(proto) = b.get_bytes_with_u8_length() {
875 protos_list.push(proto.buf());
876 }
877
878 self.set_application_protos(&protos_list)
879 }
880
881 /// Sets the anti-amplification limit factor.
882 ///
883 /// The default value is `3`.
884 pub fn set_max_amplification_factor(&mut self, v: usize) {
885 self.max_amplification_factor = v;
886 }
887
888 /// Sets the send capacity factor.
889 ///
890 /// The default value is `1`.
891 pub fn set_send_capacity_factor(&mut self, v: f64) {
892 self.tx_cap_factor = v;
893 }
894
895 /// Sets the connection's initial RTT.
896 ///
897 /// The default value is `333`.
898 pub fn set_initial_rtt(&mut self, v: Duration) {
899 self.initial_rtt = v;
900 }
901
902 /// Sets the `max_idle_timeout` transport parameter, in milliseconds.
903 ///
904 /// The default value is infinite, that is, no timeout is used.
905 pub fn set_max_idle_timeout(&mut self, v: u64) {
906 self.local_transport_params.max_idle_timeout =
907 cmp::min(v, octets::MAX_VAR_INT);
908 }
909
910 /// Sets the `max_udp_payload_size transport` parameter.
911 ///
912 /// The default value is `65527`.
913 pub fn set_max_recv_udp_payload_size(&mut self, v: usize) {
914 self.local_transport_params.max_udp_payload_size =
915 cmp::min(v as u64, octets::MAX_VAR_INT);
916 }
917
918 /// Sets the maximum outgoing UDP payload size.
919 ///
920 /// The default and minimum value is `1200`.
921 pub fn set_max_send_udp_payload_size(&mut self, v: usize) {
922 self.max_send_udp_payload_size = cmp::max(v, MAX_SEND_UDP_PAYLOAD_SIZE);
923 }
924
925 /// Sets the `initial_max_data` transport parameter.
926 ///
927 /// When set to a non-zero value quiche will only allow at most `v` bytes of
928 /// incoming stream data to be buffered for the whole connection (that is,
929 /// data that is not yet read by the application) and will allow more data
930 /// to be received as the buffer is consumed by the application.
931 ///
932 /// When set to zero, either explicitly or via the default, quiche will not
933 /// give any flow control to the peer, preventing it from sending any stream
934 /// data.
935 ///
936 /// The default value is `0`.
937 pub fn set_initial_max_data(&mut self, v: u64) {
938 self.local_transport_params.initial_max_data =
939 cmp::min(v, octets::MAX_VAR_INT);
940 }
941
942 /// Sets the `initial_max_stream_data_bidi_local` transport parameter.
943 ///
944 /// When set to a non-zero value quiche will only allow at most `v` bytes
945 /// of incoming stream data to be buffered for each locally-initiated
946 /// bidirectional stream (that is, data that is not yet read by the
947 /// application) and will allow more data to be received as the buffer is
948 /// consumed by the application.
949 ///
950 /// When set to zero, either explicitly or via the default, quiche will not
951 /// give any flow control to the peer, preventing it from sending any stream
952 /// data.
953 ///
954 /// The default value is `0`.
955 pub fn set_initial_max_stream_data_bidi_local(&mut self, v: u64) {
956 self.local_transport_params
957 .initial_max_stream_data_bidi_local =
958 cmp::min(v, octets::MAX_VAR_INT);
959 }
960
961 /// Sets the `initial_max_stream_data_bidi_remote` transport parameter.
962 ///
963 /// When set to a non-zero value quiche will only allow at most `v` bytes
964 /// of incoming stream data to be buffered for each remotely-initiated
965 /// bidirectional stream (that is, data that is not yet read by the
966 /// application) and will allow more data to be received as the buffer is
967 /// consumed by the application.
968 ///
969 /// When set to zero, either explicitly or via the default, quiche will not
970 /// give any flow control to the peer, preventing it from sending any stream
971 /// data.
972 ///
973 /// The default value is `0`.
974 pub fn set_initial_max_stream_data_bidi_remote(&mut self, v: u64) {
975 self.local_transport_params
976 .initial_max_stream_data_bidi_remote =
977 cmp::min(v, octets::MAX_VAR_INT);
978 }
979
980 /// Sets the `initial_max_stream_data_uni` transport parameter.
981 ///
982 /// When set to a non-zero value quiche will only allow at most `v` bytes
983 /// of incoming stream data to be buffered for each unidirectional stream
984 /// (that is, data that is not yet read by the application) and will allow
985 /// more data to be received as the buffer is consumed by the application.
986 ///
987 /// When set to zero, either explicitly or via the default, quiche will not
988 /// give any flow control to the peer, preventing it from sending any stream
989 /// data.
990 ///
991 /// The default value is `0`.
992 pub fn set_initial_max_stream_data_uni(&mut self, v: u64) {
993 self.local_transport_params.initial_max_stream_data_uni =
994 cmp::min(v, octets::MAX_VAR_INT);
995 }
996
997 /// Sets the `initial_max_streams_bidi` transport parameter.
998 ///
999 /// When set to a non-zero value quiche will only allow `v` number of
1000 /// concurrent remotely-initiated bidirectional streams to be open at any
1001 /// given time and will increase the limit automatically as streams are
1002 /// completed.
1003 ///
1004 /// When set to zero, either explicitly or via the default, quiche will not
1005 /// not allow the peer to open any bidirectional streams.
1006 ///
1007 /// A bidirectional stream is considered completed when all incoming data
1008 /// has been read by the application (up to the `fin` offset) or the
1009 /// stream's read direction has been shutdown, and all outgoing data has
1010 /// been acked by the peer (up to the `fin` offset) or the stream's write
1011 /// direction has been shutdown.
1012 ///
1013 /// The default value is `0`.
1014 pub fn set_initial_max_streams_bidi(&mut self, v: u64) {
1015 self.local_transport_params.initial_max_streams_bidi =
1016 cmp::min(v, octets::MAX_VAR_INT);
1017 }
1018
1019 /// Sets the `initial_max_streams_uni` transport parameter.
1020 ///
1021 /// When set to a non-zero value quiche will only allow `v` number of
1022 /// concurrent remotely-initiated unidirectional streams to be open at any
1023 /// given time and will increase the limit automatically as streams are
1024 /// completed.
1025 ///
1026 /// When set to zero, either explicitly or via the default, quiche will not
1027 /// not allow the peer to open any unidirectional streams.
1028 ///
1029 /// A unidirectional stream is considered completed when all incoming data
1030 /// has been read by the application (up to the `fin` offset) or the
1031 /// stream's read direction has been shutdown.
1032 ///
1033 /// The default value is `0`.
1034 pub fn set_initial_max_streams_uni(&mut self, v: u64) {
1035 self.local_transport_params.initial_max_streams_uni =
1036 cmp::min(v, octets::MAX_VAR_INT);
1037 }
1038
1039 /// Sets the `ack_delay_exponent` transport parameter.
1040 ///
1041 /// The default value is `3`.
1042 pub fn set_ack_delay_exponent(&mut self, v: u64) {
1043 self.local_transport_params.ack_delay_exponent =
1044 cmp::min(v, octets::MAX_VAR_INT);
1045 }
1046
1047 /// Sets the `max_ack_delay` transport parameter.
1048 ///
1049 /// The default value is `25`.
1050 pub fn set_max_ack_delay(&mut self, v: u64) {
1051 self.local_transport_params.max_ack_delay =
1052 cmp::min(v, octets::MAX_VAR_INT);
1053 }
1054
1055 /// Sets the `active_connection_id_limit` transport parameter.
1056 ///
1057 /// The default value is `2`. Lower values will be ignored.
1058 pub fn set_active_connection_id_limit(&mut self, v: u64) {
1059 if v >= 2 {
1060 self.local_transport_params.active_conn_id_limit =
1061 cmp::min(v, octets::MAX_VAR_INT);
1062 }
1063 }
1064
1065 /// Sets the `disable_active_migration` transport parameter.
1066 ///
1067 /// The default value is `false`.
1068 pub fn set_disable_active_migration(&mut self, v: bool) {
1069 self.local_transport_params.disable_active_migration = v;
1070 }
1071
1072 /// Sets the congestion control algorithm used.
1073 ///
1074 /// The default value is `CongestionControlAlgorithm::CUBIC`.
1075 pub fn set_cc_algorithm(&mut self, algo: CongestionControlAlgorithm) {
1076 self.cc_algorithm = algo;
1077 }
1078
1079 /// Sets custom BBR settings.
1080 ///
1081 /// This API is experimental and will be removed in the future.
1082 ///
1083 /// Currently this only applies if cc_algorithm is
1084 /// `CongestionControlAlgorithm::Bbr2Gcongestion` is set.
1085 ///
1086 /// The default value is `None`.
1087 #[cfg(feature = "internal")]
1088 #[doc(hidden)]
1089 pub fn set_custom_bbr_params(&mut self, custom_bbr_settings: BbrParams) {
1090 self.custom_bbr_params = Some(custom_bbr_settings);
1091 }
1092
1093 /// Sets the congestion control algorithm used by string.
1094 ///
1095 /// The default value is `cubic`. On error `Error::CongestionControl`
1096 /// will be returned.
1097 ///
1098 /// ## Examples:
1099 ///
1100 /// ```
1101 /// # let mut config = quiche::Config::new(0xbabababa)?;
1102 /// config.set_cc_algorithm_name("reno");
1103 /// # Ok::<(), quiche::Error>(())
1104 /// ```
1105 pub fn set_cc_algorithm_name(&mut self, name: &str) -> Result<()> {
1106 self.cc_algorithm = CongestionControlAlgorithm::from_str(name)?;
1107
1108 Ok(())
1109 }
1110
1111 /// Sets initial congestion window size in terms of packet count.
1112 ///
1113 /// The default value is 10.
1114 pub fn set_initial_congestion_window_packets(&mut self, packets: usize) {
1115 self.initial_congestion_window_packets = packets;
1116 }
1117
1118 /// Configure whether to enable relaxed loss detection on spurious loss.
1119 ///
1120 /// The default value is false.
1121 pub fn set_enable_relaxed_loss_threshold(&mut self, enable: bool) {
1122 self.enable_relaxed_loss_threshold = enable;
1123 }
1124
1125 /// Configures whether to enable HyStart++.
1126 ///
1127 /// The default value is `true`.
1128 pub fn enable_hystart(&mut self, v: bool) {
1129 self.hystart = v;
1130 }
1131
1132 /// Configures whether to enable pacing.
1133 ///
1134 /// The default value is `true`.
1135 pub fn enable_pacing(&mut self, v: bool) {
1136 self.pacing = v;
1137 }
1138
1139 /// Sets the max value for pacing rate.
1140 ///
1141 /// By default pacing rate is not limited.
1142 pub fn set_max_pacing_rate(&mut self, v: u64) {
1143 self.max_pacing_rate = Some(v);
1144 }
1145
1146 /// Configures whether to enable receiving DATAGRAM frames.
1147 ///
1148 /// When enabled, the `max_datagram_frame_size` transport parameter is set
1149 /// to 65536 as recommended by draft-ietf-quic-datagram-01.
1150 ///
1151 /// The default is `false`.
1152 pub fn enable_dgram(
1153 &mut self, enabled: bool, recv_queue_len: usize, send_queue_len: usize,
1154 ) {
1155 self.local_transport_params.max_datagram_frame_size = if enabled {
1156 Some(MAX_DGRAM_FRAME_SIZE)
1157 } else {
1158 None
1159 };
1160 self.dgram_recv_max_queue_len = recv_queue_len;
1161 self.dgram_send_max_queue_len = send_queue_len;
1162 }
1163
1164 /// Configures the max number of queued received PATH_CHALLENGE frames.
1165 ///
1166 /// When an endpoint receives a PATH_CHALLENGE frame and the queue is full,
1167 /// the frame is discarded.
1168 ///
1169 /// The default is 3.
1170 pub fn set_path_challenge_recv_max_queue_len(&mut self, queue_len: usize) {
1171 self.path_challenge_recv_max_queue_len = queue_len;
1172 }
1173
1174 /// Sets the maximum size of the connection window.
1175 ///
1176 /// The default value is MAX_CONNECTION_WINDOW (24MBytes).
1177 pub fn set_max_connection_window(&mut self, v: u64) {
1178 self.max_connection_window = v;
1179 }
1180
1181 /// Sets the maximum size of the stream window.
1182 ///
1183 /// The default value is MAX_STREAM_WINDOW (16MBytes).
1184 pub fn set_max_stream_window(&mut self, v: u64) {
1185 self.max_stream_window = v;
1186 }
1187
1188 /// Sets the initial stateless reset token.
1189 ///
1190 /// This value is only advertised by servers. Setting a stateless retry
1191 /// token as a client has no effect on the connection.
1192 ///
1193 /// The default value is `None`.
1194 pub fn set_stateless_reset_token(&mut self, v: Option<u128>) {
1195 self.local_transport_params.stateless_reset_token = v;
1196 }
1197
1198 /// Sets whether the QUIC connection should avoid reusing DCIDs over
1199 /// different paths.
1200 ///
1201 /// When set to `true`, it ensures that a destination Connection ID is never
1202 /// reused on different paths. Such behaviour may lead to connection stall
1203 /// if the peer performs a non-voluntary migration (e.g., NAT rebinding) and
1204 /// does not provide additional destination Connection IDs to handle such
1205 /// event.
1206 ///
1207 /// The default value is `false`.
1208 pub fn set_disable_dcid_reuse(&mut self, v: bool) {
1209 self.disable_dcid_reuse = v;
1210 }
1211
1212 /// Enables tracking unknown transport parameters.
1213 ///
1214 /// Specify the maximum number of bytes used to track unknown transport
1215 /// parameters. The size includes the identifier and its value. If storing a
1216 /// transport parameter would cause the limit to be exceeded, it is quietly
1217 /// dropped.
1218 ///
1219 /// The default is that the feature is disabled.
1220 pub fn enable_track_unknown_transport_parameters(&mut self, size: usize) {
1221 self.track_unknown_transport_params = Some(size);
1222 }
1223}
1224
1225/// Tracks the health of the tx_buffered value.
1226#[derive(Clone, Copy, Debug, Default, PartialEq)]
1227pub enum TxBufferTrackingState {
1228 /// The send buffer is in a good state
1229 #[default]
1230 Ok,
1231 /// The send buffer is in an inconsistent state, which could lead to
1232 /// connection stalls or excess buffering due to bugs we haven't
1233 /// tracked down yet.
1234 Inconsistent,
1235}
1236
1237/// A QUIC connection.
1238pub struct Connection<F = DefaultBufFactory>
1239where
1240 F: BufFactory,
1241{
1242 /// QUIC wire version used for the connection.
1243 version: u32,
1244
1245 /// Connection Identifiers.
1246 ids: cid::ConnectionIdentifiers,
1247
1248 /// Unique opaque ID for the connection that can be used for logging.
1249 trace_id: String,
1250
1251 /// Packet number spaces.
1252 pkt_num_spaces: [packet::PktNumSpace; packet::Epoch::count()],
1253
1254 /// The crypto context.
1255 crypto_ctx: [packet::CryptoContext; packet::Epoch::count()],
1256
1257 /// Next packet number.
1258 next_pkt_num: u64,
1259
1260 // TODO
1261 // combine with `next_pkt_num`
1262 /// Track the packet skip context
1263 pkt_num_manager: packet::PktNumManager,
1264
1265 /// Peer's transport parameters.
1266 peer_transport_params: TransportParams,
1267
1268 /// If tracking unknown transport parameters from a peer, how much space to
1269 /// use in bytes.
1270 peer_transport_params_track_unknown: Option<usize>,
1271
1272 /// Local transport parameters.
1273 local_transport_params: TransportParams,
1274
1275 /// TLS handshake state.
1276 handshake: tls::Handshake,
1277
1278 /// Serialized TLS session buffer.
1279 ///
1280 /// This field is populated when a new session ticket is processed on the
1281 /// client. On the server this is empty.
1282 session: Option<Vec<u8>>,
1283
1284 /// The configuration for recovery.
1285 recovery_config: recovery::RecoveryConfig,
1286
1287 /// The path manager.
1288 paths: path::PathMap,
1289
1290 /// PATH_CHALLENGE receive queue max length.
1291 path_challenge_recv_max_queue_len: usize,
1292
1293 /// Total number of received PATH_CHALLENGE frames.
1294 path_challenge_rx_count: u64,
1295
1296 /// List of supported application protocols.
1297 application_protos: Vec<Vec<u8>>,
1298
1299 /// Total number of received packets.
1300 recv_count: usize,
1301
1302 /// Total number of sent packets.
1303 sent_count: usize,
1304
1305 /// Total number of lost packets.
1306 lost_count: usize,
1307
1308 /// Total number of lost packets that were later acked.
1309 spurious_lost_count: usize,
1310
1311 /// Total number of packets sent with data retransmitted.
1312 retrans_count: usize,
1313
1314 /// Total number of sent DATAGRAM frames.
1315 dgram_sent_count: usize,
1316
1317 /// Total number of received DATAGRAM frames.
1318 dgram_recv_count: usize,
1319
1320 /// Total number of bytes received from the peer.
1321 rx_data: u64,
1322
1323 /// Receiver flow controller.
1324 flow_control: flowcontrol::FlowControl,
1325
1326 /// Whether we send MAX_DATA frame.
1327 should_send_max_data: bool,
1328
1329 /// True if there is a pending MAX_STREAMS_BIDI frame to send.
1330 should_send_max_streams_bidi: bool,
1331
1332 /// True if there is a pending MAX_STREAMS_UNI frame to send.
1333 should_send_max_streams_uni: bool,
1334
1335 /// Number of stream data bytes that can be buffered.
1336 tx_cap: usize,
1337
1338 /// The send capacity factor.
1339 tx_cap_factor: f64,
1340
1341 /// Number of bytes buffered in the send buffer.
1342 tx_buffered: usize,
1343
1344 /// Tracks the health of tx_buffered.
1345 tx_buffered_state: TxBufferTrackingState,
1346
1347 /// Total number of bytes sent to the peer.
1348 tx_data: u64,
1349
1350 /// Peer's flow control limit for the connection.
1351 max_tx_data: u64,
1352
1353 /// Last tx_data before running a full send() loop.
1354 last_tx_data: u64,
1355
1356 /// Total number of bytes retransmitted over the connection.
1357 /// This counts only STREAM and CRYPTO data.
1358 stream_retrans_bytes: u64,
1359
1360 /// Total number of bytes sent over the connection.
1361 sent_bytes: u64,
1362
1363 /// Total number of bytes received over the connection.
1364 recv_bytes: u64,
1365
1366 /// Total number of bytes sent acked over the connection.
1367 acked_bytes: u64,
1368
1369 /// Total number of bytes sent lost over the connection.
1370 lost_bytes: u64,
1371
1372 /// Streams map, indexed by stream ID.
1373 streams: stream::StreamMap<F>,
1374
1375 /// Peer's original destination connection ID. Used by the client to
1376 /// validate the server's transport parameter.
1377 odcid: Option<ConnectionId<'static>>,
1378
1379 /// Peer's retry source connection ID. Used by the client during stateless
1380 /// retry to validate the server's transport parameter.
1381 rscid: Option<ConnectionId<'static>>,
1382
1383 /// Received address verification token.
1384 token: Option<Vec<u8>>,
1385
1386 /// Error code and reason to be sent to the peer in a CONNECTION_CLOSE
1387 /// frame.
1388 local_error: Option<ConnectionError>,
1389
1390 /// Error code and reason received from the peer in a CONNECTION_CLOSE
1391 /// frame.
1392 peer_error: Option<ConnectionError>,
1393
1394 /// The connection-level limit at which send blocking occurred.
1395 blocked_limit: Option<u64>,
1396
1397 /// Idle timeout expiration time.
1398 idle_timer: Option<Instant>,
1399
1400 /// Draining timeout expiration time.
1401 draining_timer: Option<Instant>,
1402
1403 /// List of raw packets that were received before they could be decrypted.
1404 undecryptable_pkts: VecDeque<(Vec<u8>, RecvInfo)>,
1405
1406 /// The negotiated ALPN protocol.
1407 alpn: Vec<u8>,
1408
1409 /// Whether this is a server-side connection.
1410 is_server: bool,
1411
1412 /// Whether the initial secrets have been derived.
1413 derived_initial_secrets: bool,
1414
1415 /// Whether a version negotiation packet has already been received. Only
1416 /// relevant for client connections.
1417 did_version_negotiation: bool,
1418
1419 /// Whether stateless retry has been performed.
1420 did_retry: bool,
1421
1422 /// Whether the peer already updated its connection ID.
1423 got_peer_conn_id: bool,
1424
1425 /// Whether the peer verified our initial address.
1426 peer_verified_initial_address: bool,
1427
1428 /// Whether the peer's transport parameters were parsed.
1429 parsed_peer_transport_params: bool,
1430
1431 /// Whether the connection handshake has been completed.
1432 handshake_completed: bool,
1433
1434 /// Whether the HANDSHAKE_DONE frame has been sent.
1435 handshake_done_sent: bool,
1436
1437 /// Whether the HANDSHAKE_DONE frame has been acked.
1438 handshake_done_acked: bool,
1439
1440 /// Whether the connection handshake has been confirmed.
1441 handshake_confirmed: bool,
1442
1443 /// Key phase bit used for outgoing protected packets.
1444 key_phase: bool,
1445
1446 /// Whether an ack-eliciting packet has been sent since last receiving a
1447 /// packet.
1448 ack_eliciting_sent: bool,
1449
1450 /// Whether the connection is closed.
1451 closed: bool,
1452
1453 /// Whether the connection was timed out.
1454 timed_out: bool,
1455
1456 /// Whether to send GREASE.
1457 grease: bool,
1458
1459 /// TLS keylog writer.
1460 keylog: Option<Box<dyn std::io::Write + Send + Sync>>,
1461
1462 #[cfg(feature = "qlog")]
1463 qlog: QlogInfo,
1464
1465 /// DATAGRAM queues.
1466 dgram_recv_queue: dgram::DatagramQueue,
1467 dgram_send_queue: dgram::DatagramQueue,
1468
1469 /// Whether to emit DATAGRAM frames in the next packet.
1470 emit_dgram: bool,
1471
1472 /// Whether the connection should prevent from reusing destination
1473 /// Connection IDs when the peer migrates.
1474 disable_dcid_reuse: bool,
1475
1476 /// The number of streams reset by local.
1477 reset_stream_local_count: u64,
1478
1479 /// The number of streams stopped by local.
1480 stopped_stream_local_count: u64,
1481
1482 /// The number of streams reset by remote.
1483 reset_stream_remote_count: u64,
1484
1485 /// The number of streams stopped by remote.
1486 stopped_stream_remote_count: u64,
1487
1488 /// The number of DATA_BLOCKED frames sent due to hitting the connection
1489 /// flow control limit.
1490 data_blocked_sent_count: u64,
1491
1492 /// The number of STREAM_DATA_BLOCKED frames sent due to a stream hitting
1493 /// the stream flow control limit.
1494 stream_data_blocked_sent_count: u64,
1495
1496 /// The number of DATA_BLOCKED frames received from the remote endpoint.
1497 data_blocked_recv_count: u64,
1498
1499 /// The number of STREAM_DATA_BLOCKED frames received from the remote
1500 /// endpoint.
1501 stream_data_blocked_recv_count: u64,
1502
1503 /// The number of STREAMS_BLOCKED frames received from the remote endpoint
1504 /// indicating the peer is blocked on opening new bidirectional streams.
1505 streams_blocked_bidi_recv_count: u64,
1506
1507 /// The number of STREAMS_BLOCKED frames received from the remote endpoint
1508 /// indicating the peer is blocked on opening new unidirectional streams.
1509 streams_blocked_uni_recv_count: u64,
1510
1511 /// The anti-amplification limit factor.
1512 max_amplification_factor: usize,
1513}
1514
1515/// Creates a new server-side connection.
1516///
1517/// The `scid` parameter represents the server's source connection ID, while
1518/// the optional `odcid` parameter represents the original destination ID the
1519/// client sent before a Retry packet (this is only required when using the
1520/// [`retry()`] function). See also the [`accept_with_retry()`] function for
1521/// more advanced retry cases.
1522///
1523/// [`retry()`]: fn.retry.html
1524///
1525/// ## Examples:
1526///
1527/// ```no_run
1528/// # let mut config = quiche::Config::new(0xbabababa)?;
1529/// # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
1530/// # let local = "127.0.0.1:0".parse().unwrap();
1531/// # let peer = "127.0.0.1:1234".parse().unwrap();
1532/// let conn = quiche::accept(&scid, None, local, peer, &mut config)?;
1533/// # Ok::<(), quiche::Error>(())
1534/// ```
1535#[inline(always)]
1536pub fn accept(
1537 scid: &ConnectionId, odcid: Option<&ConnectionId>, local: SocketAddr,
1538 peer: SocketAddr, config: &mut Config,
1539) -> Result<Connection> {
1540 accept_with_buf_factory(scid, odcid, local, peer, config)
1541}
1542
1543/// Creates a new server-side connection, with a custom buffer generation
1544/// method.
1545///
1546/// The buffers generated can be anything that can be drereferenced as a byte
1547/// slice. See [`accept`] and [`BufFactory`] for more info.
1548#[inline]
1549pub fn accept_with_buf_factory<F: BufFactory>(
1550 scid: &ConnectionId, odcid: Option<&ConnectionId>, local: SocketAddr,
1551 peer: SocketAddr, config: &mut Config,
1552) -> Result<Connection<F>> {
1553 // For connections with `odcid` set, we historically used `retry_source_cid =
1554 // scid`. Keep this behavior to preserve backwards compatibility.
1555 // `accept_with_retry` allows the SCIDs to be specified separately.
1556 let retry_cids = odcid.map(|odcid| RetryConnectionIds {
1557 original_destination_cid: odcid,
1558 retry_source_cid: scid,
1559 });
1560 Connection::new(scid, retry_cids, None, local, peer, config, true)
1561}
1562
1563/// A wrapper for connection IDs used in [`accept_with_retry`].
1564pub struct RetryConnectionIds<'a> {
1565 /// The DCID of the first Initial packet received by the server, which
1566 /// triggered the Retry packet.
1567 pub original_destination_cid: &'a ConnectionId<'a>,
1568 /// The SCID of the Retry packet sent by the server. This can be different
1569 /// from the new connection's SCID.
1570 pub retry_source_cid: &'a ConnectionId<'a>,
1571}
1572
1573/// Creates a new server-side connection after the client responded to a Retry
1574/// packet.
1575///
1576/// To generate a Retry packet in the first place, use the [`retry()`] function.
1577///
1578/// The `scid` parameter represents the server's source connection ID, which can
1579/// be freshly generated after the application has successfully verified the
1580/// Retry. `retry_cids` is used to tie the new connection to the Initial + Retry
1581/// exchange that preceded the connection's creation.
1582///
1583/// The DCID of the client's Initial packet is inherently untrusted data. It is
1584/// safe to use the DCID in the `retry_source_cid` field of the
1585/// `RetryConnectionIds` provided to this function. However, using the Initial's
1586/// DCID for the `scid` parameter carries risks. Applications are advised to
1587/// implement their own DCID validation steps before using the DCID in that
1588/// manner.
1589#[inline]
1590pub fn accept_with_retry<F: BufFactory>(
1591 scid: &ConnectionId, retry_cids: RetryConnectionIds, local: SocketAddr,
1592 peer: SocketAddr, config: &mut Config,
1593) -> Result<Connection<F>> {
1594 Connection::new(scid, Some(retry_cids), None, local, peer, config, true)
1595}
1596
1597/// Creates a new client-side connection.
1598///
1599/// The `scid` parameter is used as the connection's source connection ID,
1600/// while the optional `server_name` parameter is used to verify the peer's
1601/// certificate.
1602///
1603/// ## Examples:
1604///
1605/// ```no_run
1606/// # let mut config = quiche::Config::new(0xbabababa)?;
1607/// # let server_name = "quic.tech";
1608/// # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
1609/// # let local = "127.0.0.1:4321".parse().unwrap();
1610/// # let peer = "127.0.0.1:1234".parse().unwrap();
1611/// let conn =
1612/// quiche::connect(Some(&server_name), &scid, local, peer, &mut config)?;
1613/// # Ok::<(), quiche::Error>(())
1614/// ```
1615#[inline]
1616pub fn connect(
1617 server_name: Option<&str>, scid: &ConnectionId, local: SocketAddr,
1618 peer: SocketAddr, config: &mut Config,
1619) -> Result<Connection> {
1620 let mut conn = Connection::new(scid, None, None, local, peer, config, false)?;
1621
1622 if let Some(server_name) = server_name {
1623 conn.handshake.set_host_name(server_name)?;
1624 }
1625
1626 Ok(conn)
1627}
1628
1629/// Creates a new client-side connection using the given DCID initially.
1630///
1631/// Be aware that [RFC 9000] places requirements for unpredictability and length
1632/// on the client DCID field. This function is dangerous if these requirements
1633/// are not satisfied.
1634///
1635/// The `scid` parameter is used as the connection's source connection ID, while
1636/// the optional `server_name` parameter is used to verify the peer's
1637/// certificate.
1638///
1639/// [RFC 9000]: <https://datatracker.ietf.org/doc/html/rfc9000#section-7.2-3>
1640#[cfg(feature = "custom-client-dcid")]
1641#[cfg_attr(docsrs, doc(cfg(feature = "custom-client-dcid")))]
1642pub fn connect_with_dcid(
1643 server_name: Option<&str>, scid: &ConnectionId, dcid: &ConnectionId,
1644 local: SocketAddr, peer: SocketAddr, config: &mut Config,
1645) -> Result<Connection> {
1646 let mut conn =
1647 Connection::new(scid, None, Some(dcid), local, peer, config, false)?;
1648
1649 if let Some(server_name) = server_name {
1650 conn.handshake.set_host_name(server_name)?;
1651 }
1652
1653 Ok(conn)
1654}
1655
1656/// Creates a new client-side connection, with a custom buffer generation
1657/// method.
1658///
1659/// The buffers generated can be anything that can be drereferenced as a byte
1660/// slice. See [`connect`] and [`BufFactory`] for more info.
1661#[inline]
1662pub fn connect_with_buffer_factory<F: BufFactory>(
1663 server_name: Option<&str>, scid: &ConnectionId, local: SocketAddr,
1664 peer: SocketAddr, config: &mut Config,
1665) -> Result<Connection<F>> {
1666 let mut conn = Connection::new(scid, None, None, local, peer, config, false)?;
1667
1668 if let Some(server_name) = server_name {
1669 conn.handshake.set_host_name(server_name)?;
1670 }
1671
1672 Ok(conn)
1673}
1674
1675/// Creates a new client-side connection, with a custom buffer generation
1676/// method using the given dcid initially.
1677/// Be aware the RFC places requirements for unpredictability and length
1678/// on the client DCID field.
1679/// [`RFC9000`]: https://datatracker.ietf.org/doc/html/rfc9000#section-7.2-3
1680///
1681/// The buffers generated can be anything that can be drereferenced as a byte
1682/// slice. See [`connect`] and [`BufFactory`] for more info.
1683#[cfg(feature = "custom-client-dcid")]
1684#[cfg_attr(docsrs, doc(cfg(feature = "custom-client-dcid")))]
1685pub fn connect_with_dcid_and_buffer_factory<F: BufFactory>(
1686 server_name: Option<&str>, scid: &ConnectionId, dcid: &ConnectionId,
1687 local: SocketAddr, peer: SocketAddr, config: &mut Config,
1688) -> Result<Connection<F>> {
1689 let mut conn =
1690 Connection::new(scid, None, Some(dcid), local, peer, config, false)?;
1691
1692 if let Some(server_name) = server_name {
1693 conn.handshake.set_host_name(server_name)?;
1694 }
1695
1696 Ok(conn)
1697}
1698
1699/// Writes a version negotiation packet.
1700///
1701/// The `scid` and `dcid` parameters are the source connection ID and the
1702/// destination connection ID extracted from the received client's Initial
1703/// packet that advertises an unsupported version.
1704///
1705/// ## Examples:
1706///
1707/// ```no_run
1708/// # let mut buf = [0; 512];
1709/// # let mut out = [0; 512];
1710/// # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
1711/// let (len, src) = socket.recv_from(&mut buf).unwrap();
1712///
1713/// let hdr =
1714/// quiche::Header::from_slice(&mut buf[..len], quiche::MAX_CONN_ID_LEN)?;
1715///
1716/// if hdr.version != quiche::PROTOCOL_VERSION {
1717/// let len = quiche::negotiate_version(&hdr.scid, &hdr.dcid, &mut out)?;
1718/// socket.send_to(&out[..len], &src).unwrap();
1719/// }
1720/// # Ok::<(), quiche::Error>(())
1721/// ```
1722#[inline]
1723pub fn negotiate_version(
1724 scid: &ConnectionId, dcid: &ConnectionId, out: &mut [u8],
1725) -> Result<usize> {
1726 packet::negotiate_version(scid, dcid, out)
1727}
1728
1729/// Writes a stateless retry packet.
1730///
1731/// The `scid` and `dcid` parameters are the source connection ID and the
1732/// destination connection ID extracted from the received client's Initial
1733/// packet, while `new_scid` is the server's new source connection ID and
1734/// `token` is the address validation token the client needs to echo back.
1735///
1736/// The application is responsible for generating the address validation
1737/// token to be sent to the client, and verifying tokens sent back by the
1738/// client. The generated token should include the `dcid` parameter, such
1739/// that it can be later extracted from the token and passed to the
1740/// [`accept()`] function as its `odcid` parameter.
1741///
1742/// [`accept()`]: fn.accept.html
1743///
1744/// ## Examples:
1745///
1746/// ```no_run
1747/// # let mut config = quiche::Config::new(0xbabababa)?;
1748/// # let mut buf = [0; 512];
1749/// # let mut out = [0; 512];
1750/// # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
1751/// # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
1752/// # let local = socket.local_addr().unwrap();
1753/// # fn mint_token(hdr: &quiche::Header, src: &std::net::SocketAddr) -> Vec<u8> {
1754/// # vec![]
1755/// # }
1756/// # fn validate_token<'a>(src: &std::net::SocketAddr, token: &'a [u8]) -> Option<quiche::ConnectionId<'a>> {
1757/// # None
1758/// # }
1759/// let (len, peer) = socket.recv_from(&mut buf).unwrap();
1760///
1761/// let hdr = quiche::Header::from_slice(&mut buf[..len], quiche::MAX_CONN_ID_LEN)?;
1762///
1763/// let token = hdr.token.as_ref().unwrap();
1764///
1765/// // No token sent by client, create a new one.
1766/// if token.is_empty() {
1767/// let new_token = mint_token(&hdr, &peer);
1768///
1769/// let len = quiche::retry(
1770/// &hdr.scid, &hdr.dcid, &scid, &new_token, hdr.version, &mut out,
1771/// )?;
1772///
1773/// socket.send_to(&out[..len], &peer).unwrap();
1774/// return Ok(());
1775/// }
1776///
1777/// // Client sent token, validate it.
1778/// let odcid = validate_token(&peer, token);
1779///
1780/// if odcid.is_none() {
1781/// // Invalid address validation token.
1782/// return Ok(());
1783/// }
1784///
1785/// let conn = quiche::accept(&scid, odcid.as_ref(), local, peer, &mut config)?;
1786/// # Ok::<(), quiche::Error>(())
1787/// ```
1788#[inline]
1789pub fn retry(
1790 scid: &ConnectionId, dcid: &ConnectionId, new_scid: &ConnectionId,
1791 token: &[u8], version: u32, out: &mut [u8],
1792) -> Result<usize> {
1793 packet::retry(scid, dcid, new_scid, token, version, out)
1794}
1795
1796/// Returns true if the given protocol version is supported.
1797#[inline]
1798pub fn version_is_supported(version: u32) -> bool {
1799 matches!(version, PROTOCOL_VERSION_V1)
1800}
1801
1802/// Pushes a frame to the output packet if there is enough space.
1803///
1804/// Returns `true` on success, `false` otherwise. In case of failure it means
1805/// there is no room to add the frame in the packet. You may retry to add the
1806/// frame later.
1807macro_rules! push_frame_to_pkt {
1808 ($out:expr, $frames:expr, $frame:expr, $left:expr) => {{
1809 if $frame.wire_len() <= $left {
1810 $left -= $frame.wire_len();
1811
1812 $frame.to_bytes(&mut $out)?;
1813
1814 $frames.push($frame);
1815
1816 true
1817 } else {
1818 false
1819 }
1820 }};
1821}
1822
1823/// Executes the provided body if the qlog feature is enabled, quiche has been
1824/// configured with a log writer, the event's importance is within the
1825/// configured level.
1826macro_rules! qlog_with_type {
1827 ($ty:expr, $qlog:expr, $qlog_streamer_ref:ident, $body:block) => {{
1828 #[cfg(feature = "qlog")]
1829 {
1830 if EventImportance::from($ty).is_contained_in(&$qlog.level) {
1831 if let Some($qlog_streamer_ref) = &mut $qlog.streamer {
1832 $body
1833 }
1834 }
1835 }
1836 }};
1837}
1838
1839#[cfg(feature = "qlog")]
1840const QLOG_PARAMS_SET: EventType =
1841 EventType::TransportEventType(TransportEventType::ParametersSet);
1842
1843#[cfg(feature = "qlog")]
1844const QLOG_PACKET_RX: EventType =
1845 EventType::TransportEventType(TransportEventType::PacketReceived);
1846
1847#[cfg(feature = "qlog")]
1848const QLOG_PACKET_TX: EventType =
1849 EventType::TransportEventType(TransportEventType::PacketSent);
1850
1851#[cfg(feature = "qlog")]
1852const QLOG_DATA_MV: EventType =
1853 EventType::TransportEventType(TransportEventType::DataMoved);
1854
1855#[cfg(feature = "qlog")]
1856const QLOG_METRICS: EventType =
1857 EventType::RecoveryEventType(RecoveryEventType::MetricsUpdated);
1858
1859#[cfg(feature = "qlog")]
1860const QLOG_CONNECTION_CLOSED: EventType =
1861 EventType::ConnectivityEventType(ConnectivityEventType::ConnectionClosed);
1862
1863#[cfg(feature = "qlog")]
1864struct QlogInfo {
1865 streamer: Option<qlog::streamer::QlogStreamer>,
1866 logged_peer_params: bool,
1867 level: EventImportance,
1868}
1869
1870#[cfg(feature = "qlog")]
1871impl Default for QlogInfo {
1872 fn default() -> Self {
1873 QlogInfo {
1874 streamer: None,
1875 logged_peer_params: false,
1876 level: EventImportance::Base,
1877 }
1878 }
1879}
1880
1881impl<F: BufFactory> Connection<F> {
1882 fn new(
1883 scid: &ConnectionId, retry_cids: Option<RetryConnectionIds>,
1884 client_dcid: Option<&ConnectionId>, local: SocketAddr, peer: SocketAddr,
1885 config: &mut Config, is_server: bool,
1886 ) -> Result<Connection<F>> {
1887 let tls = config.tls_ctx.new_handshake()?;
1888 Connection::with_tls(
1889 scid,
1890 retry_cids,
1891 client_dcid,
1892 local,
1893 peer,
1894 config,
1895 tls,
1896 is_server,
1897 )
1898 }
1899
1900 #[allow(clippy::too_many_arguments)]
1901 fn with_tls(
1902 scid: &ConnectionId, retry_cids: Option<RetryConnectionIds>,
1903 client_dcid: Option<&ConnectionId>, local: SocketAddr, peer: SocketAddr,
1904 config: &Config, tls: tls::Handshake, is_server: bool,
1905 ) -> Result<Connection<F>> {
1906 if retry_cids.is_some() && client_dcid.is_some() {
1907 // These are exclusive, the caller should only specify one or the
1908 // other.
1909 return Err(Error::InvalidDcidInitialization);
1910 }
1911 #[cfg(feature = "custom-client-dcid")]
1912 if let Some(client_dcid) = client_dcid {
1913 // The Minimum length is 8.
1914 // See https://datatracker.ietf.org/doc/html/rfc9000#section-7.2-3
1915 if client_dcid.to_vec().len() < 8 {
1916 return Err(Error::InvalidDcidInitialization);
1917 }
1918 }
1919 #[cfg(not(feature = "custom-client-dcid"))]
1920 if client_dcid.is_some() {
1921 return Err(Error::InvalidDcidInitialization);
1922 }
1923
1924 let max_rx_data = config.local_transport_params.initial_max_data;
1925
1926 let scid_as_hex: Vec<String> =
1927 scid.iter().map(|b| format!("{b:02x}")).collect();
1928
1929 let reset_token = if is_server {
1930 config.local_transport_params.stateless_reset_token
1931 } else {
1932 None
1933 };
1934
1935 let recovery_config = recovery::RecoveryConfig::from_config(config);
1936
1937 let mut path = path::Path::new(
1938 local,
1939 peer,
1940 &recovery_config,
1941 config.path_challenge_recv_max_queue_len,
1942 true,
1943 Some(config),
1944 );
1945
1946 // If we sent a Retry assume the peer's address is verified.
1947 path.verified_peer_address = retry_cids.is_some();
1948 // Assume clients validate the server's address implicitly.
1949 path.peer_verified_local_address = is_server;
1950
1951 // Do not allocate more than the number of active CIDs.
1952 let paths = path::PathMap::new(
1953 path,
1954 config.local_transport_params.active_conn_id_limit as usize,
1955 is_server,
1956 );
1957
1958 let active_path_id = paths.get_active_path_id()?;
1959
1960 let ids = cid::ConnectionIdentifiers::new(
1961 config.local_transport_params.active_conn_id_limit as usize,
1962 scid,
1963 active_path_id,
1964 reset_token,
1965 );
1966
1967 let mut conn = Connection {
1968 version: config.version,
1969
1970 ids,
1971
1972 trace_id: scid_as_hex.join(""),
1973
1974 pkt_num_spaces: [
1975 packet::PktNumSpace::new(),
1976 packet::PktNumSpace::new(),
1977 packet::PktNumSpace::new(),
1978 ],
1979
1980 crypto_ctx: [
1981 packet::CryptoContext::new(),
1982 packet::CryptoContext::new(),
1983 packet::CryptoContext::new(),
1984 ],
1985
1986 next_pkt_num: 0,
1987
1988 pkt_num_manager: packet::PktNumManager::new(),
1989
1990 peer_transport_params: TransportParams::default(),
1991
1992 peer_transport_params_track_unknown: config
1993 .track_unknown_transport_params,
1994
1995 local_transport_params: config.local_transport_params.clone(),
1996
1997 handshake: tls,
1998
1999 session: None,
2000
2001 recovery_config,
2002
2003 paths,
2004 path_challenge_recv_max_queue_len: config
2005 .path_challenge_recv_max_queue_len,
2006 path_challenge_rx_count: 0,
2007
2008 application_protos: config.application_protos.clone(),
2009
2010 recv_count: 0,
2011 sent_count: 0,
2012 lost_count: 0,
2013 spurious_lost_count: 0,
2014 retrans_count: 0,
2015 dgram_sent_count: 0,
2016 dgram_recv_count: 0,
2017 sent_bytes: 0,
2018 recv_bytes: 0,
2019 acked_bytes: 0,
2020 lost_bytes: 0,
2021
2022 rx_data: 0,
2023 flow_control: flowcontrol::FlowControl::new(
2024 max_rx_data,
2025 cmp::min(max_rx_data / 2 * 3, DEFAULT_CONNECTION_WINDOW),
2026 config.max_connection_window,
2027 ),
2028 should_send_max_data: false,
2029 should_send_max_streams_bidi: false,
2030 should_send_max_streams_uni: false,
2031
2032 tx_cap: 0,
2033 tx_cap_factor: config.tx_cap_factor,
2034
2035 tx_buffered: 0,
2036 tx_buffered_state: TxBufferTrackingState::Ok,
2037
2038 tx_data: 0,
2039 max_tx_data: 0,
2040 last_tx_data: 0,
2041
2042 stream_retrans_bytes: 0,
2043
2044 streams: stream::StreamMap::new(
2045 config.local_transport_params.initial_max_streams_bidi,
2046 config.local_transport_params.initial_max_streams_uni,
2047 config.max_stream_window,
2048 ),
2049
2050 odcid: None,
2051
2052 rscid: None,
2053
2054 token: None,
2055
2056 local_error: None,
2057
2058 peer_error: None,
2059
2060 blocked_limit: None,
2061
2062 idle_timer: None,
2063
2064 draining_timer: None,
2065
2066 undecryptable_pkts: VecDeque::new(),
2067
2068 alpn: Vec::new(),
2069
2070 is_server,
2071
2072 derived_initial_secrets: false,
2073
2074 did_version_negotiation: false,
2075
2076 did_retry: false,
2077
2078 got_peer_conn_id: false,
2079
2080 // Assume clients validate the server's address implicitly.
2081 peer_verified_initial_address: is_server,
2082
2083 parsed_peer_transport_params: false,
2084
2085 handshake_completed: false,
2086
2087 handshake_done_sent: false,
2088 handshake_done_acked: false,
2089
2090 handshake_confirmed: false,
2091
2092 key_phase: false,
2093
2094 ack_eliciting_sent: false,
2095
2096 closed: false,
2097
2098 timed_out: false,
2099
2100 grease: config.grease,
2101
2102 keylog: None,
2103
2104 #[cfg(feature = "qlog")]
2105 qlog: Default::default(),
2106
2107 dgram_recv_queue: dgram::DatagramQueue::new(
2108 config.dgram_recv_max_queue_len,
2109 ),
2110
2111 dgram_send_queue: dgram::DatagramQueue::new(
2112 config.dgram_send_max_queue_len,
2113 ),
2114
2115 emit_dgram: true,
2116
2117 disable_dcid_reuse: config.disable_dcid_reuse,
2118
2119 reset_stream_local_count: 0,
2120 stopped_stream_local_count: 0,
2121 reset_stream_remote_count: 0,
2122 stopped_stream_remote_count: 0,
2123
2124 data_blocked_sent_count: 0,
2125 stream_data_blocked_sent_count: 0,
2126 data_blocked_recv_count: 0,
2127 stream_data_blocked_recv_count: 0,
2128
2129 streams_blocked_bidi_recv_count: 0,
2130 streams_blocked_uni_recv_count: 0,
2131
2132 max_amplification_factor: config.max_amplification_factor,
2133 };
2134
2135 if let Some(retry_cids) = retry_cids {
2136 conn.local_transport_params
2137 .original_destination_connection_id =
2138 Some(retry_cids.original_destination_cid.to_vec().into());
2139
2140 conn.local_transport_params.retry_source_connection_id =
2141 Some(retry_cids.retry_source_cid.to_vec().into());
2142
2143 conn.did_retry = true;
2144 }
2145
2146 conn.local_transport_params.initial_source_connection_id =
2147 Some(conn.ids.get_scid(0)?.cid.to_vec().into());
2148
2149 conn.handshake.init(is_server)?;
2150
2151 conn.handshake
2152 .use_legacy_codepoint(config.version != PROTOCOL_VERSION_V1);
2153
2154 conn.encode_transport_params()?;
2155
2156 if !is_server {
2157 let dcid = if let Some(client_dcid) = client_dcid {
2158 // We already had an dcid generated for us, use it.
2159 client_dcid.to_vec()
2160 } else {
2161 // Derive initial secrets for the client. We can do this here
2162 // because we already generated the random
2163 // destination connection ID.
2164 let mut dcid = [0; 16];
2165 rand::rand_bytes(&mut dcid[..]);
2166 dcid.to_vec()
2167 };
2168
2169 let (aead_open, aead_seal) = crypto::derive_initial_key_material(
2170 &dcid,
2171 conn.version,
2172 conn.is_server,
2173 false,
2174 )?;
2175
2176 let reset_token = conn.peer_transport_params.stateless_reset_token;
2177 conn.set_initial_dcid(
2178 dcid.to_vec().into(),
2179 reset_token,
2180 active_path_id,
2181 )?;
2182
2183 conn.crypto_ctx[packet::Epoch::Initial].crypto_open = Some(aead_open);
2184 conn.crypto_ctx[packet::Epoch::Initial].crypto_seal = Some(aead_seal);
2185
2186 conn.derived_initial_secrets = true;
2187 }
2188
2189 Ok(conn)
2190 }
2191
2192 /// Sets keylog output to the designated [`Writer`].
2193 ///
2194 /// This needs to be called as soon as the connection is created, to avoid
2195 /// missing some early logs.
2196 ///
2197 /// [`Writer`]: https://doc.rust-lang.org/std/io/trait.Write.html
2198 #[inline]
2199 pub fn set_keylog(&mut self, writer: Box<dyn std::io::Write + Send + Sync>) {
2200 self.keylog = Some(writer);
2201 }
2202
2203 /// Sets qlog output to the designated [`Writer`].
2204 ///
2205 /// Only events included in `QlogLevel::Base` are written. The serialization
2206 /// format is JSON-SEQ.
2207 ///
2208 /// This needs to be called as soon as the connection is created, to avoid
2209 /// missing some early logs.
2210 ///
2211 /// [`Writer`]: https://doc.rust-lang.org/std/io/trait.Write.html
2212 #[cfg(feature = "qlog")]
2213 #[cfg_attr(docsrs, doc(cfg(feature = "qlog")))]
2214 pub fn set_qlog(
2215 &mut self, writer: Box<dyn std::io::Write + Send + Sync>, title: String,
2216 description: String,
2217 ) {
2218 self.set_qlog_with_level(writer, title, description, QlogLevel::Base)
2219 }
2220
2221 /// Sets qlog output to the designated [`Writer`].
2222 ///
2223 /// Only qlog events included in the specified `QlogLevel` are written. The
2224 /// serialization format is JSON-SEQ.
2225 ///
2226 /// This needs to be called as soon as the connection is created, to avoid
2227 /// missing some early logs.
2228 ///
2229 /// [`Writer`]: https://doc.rust-lang.org/std/io/trait.Write.html
2230 #[cfg(feature = "qlog")]
2231 #[cfg_attr(docsrs, doc(cfg(feature = "qlog")))]
2232 pub fn set_qlog_with_level(
2233 &mut self, writer: Box<dyn std::io::Write + Send + Sync>, title: String,
2234 description: String, qlog_level: QlogLevel,
2235 ) {
2236 let vp = if self.is_server {
2237 qlog::VantagePointType::Server
2238 } else {
2239 qlog::VantagePointType::Client
2240 };
2241
2242 let level = match qlog_level {
2243 QlogLevel::Core => EventImportance::Core,
2244
2245 QlogLevel::Base => EventImportance::Base,
2246
2247 QlogLevel::Extra => EventImportance::Extra,
2248 };
2249
2250 self.qlog.level = level;
2251
2252 let trace = qlog::TraceSeq::new(
2253 qlog::VantagePoint {
2254 name: None,
2255 ty: vp,
2256 flow: None,
2257 },
2258 Some(title.to_string()),
2259 Some(description.to_string()),
2260 Some(qlog::Configuration {
2261 time_offset: Some(0.0),
2262 original_uris: None,
2263 }),
2264 None,
2265 );
2266
2267 let mut streamer = qlog::streamer::QlogStreamer::new(
2268 qlog::QLOG_VERSION.to_string(),
2269 Some(title),
2270 Some(description),
2271 None,
2272 Instant::now(),
2273 trace,
2274 self.qlog.level,
2275 writer,
2276 );
2277
2278 streamer.start_log().ok();
2279
2280 let ev_data = self
2281 .local_transport_params
2282 .to_qlog(TransportOwner::Local, self.handshake.cipher());
2283
2284 // This event occurs very early, so just mark the relative time as 0.0.
2285 streamer.add_event(Event::with_time(0.0, ev_data)).ok();
2286
2287 self.qlog.streamer = Some(streamer);
2288 }
2289
2290 /// Returns a mutable reference to the QlogStreamer, if it exists.
2291 #[cfg(feature = "qlog")]
2292 #[cfg_attr(docsrs, doc(cfg(feature = "qlog")))]
2293 pub fn qlog_streamer(&mut self) -> Option<&mut qlog::streamer::QlogStreamer> {
2294 self.qlog.streamer.as_mut()
2295 }
2296
2297 /// Configures the given session for resumption.
2298 ///
2299 /// On the client, this can be used to offer the given serialized session,
2300 /// as returned by [`session()`], for resumption.
2301 ///
2302 /// This must only be called immediately after creating a connection, that
2303 /// is, before any packet is sent or received.
2304 ///
2305 /// [`session()`]: struct.Connection.html#method.session
2306 #[inline]
2307 pub fn set_session(&mut self, session: &[u8]) -> Result<()> {
2308 let mut b = octets::Octets::with_slice(session);
2309
2310 let session_len = b.get_u64()? as usize;
2311 let session_bytes = b.get_bytes(session_len)?;
2312
2313 self.handshake.set_session(session_bytes.as_ref())?;
2314
2315 let raw_params_len = b.get_u64()? as usize;
2316 let raw_params_bytes = b.get_bytes(raw_params_len)?;
2317
2318 let peer_params = TransportParams::decode(
2319 raw_params_bytes.as_ref(),
2320 self.is_server,
2321 self.peer_transport_params_track_unknown,
2322 )?;
2323
2324 self.process_peer_transport_params(peer_params)?;
2325
2326 Ok(())
2327 }
2328
2329 /// Sets the `max_idle_timeout` transport parameter, in milliseconds.
2330 ///
2331 /// This must only be called immediately after creating a connection, that
2332 /// is, before any packet is sent or received.
2333 ///
2334 /// The default value is infinite, that is, no timeout is used unless
2335 /// already configured when creating the connection.
2336 pub fn set_max_idle_timeout(&mut self, v: u64) -> Result<()> {
2337 self.local_transport_params.max_idle_timeout =
2338 cmp::min(v, octets::MAX_VAR_INT);
2339
2340 self.encode_transport_params()
2341 }
2342
2343 /// Sets the congestion control algorithm used.
2344 ///
2345 /// This function can only be called inside one of BoringSSL's handshake
2346 /// callbacks, before any packet has been sent. Calling this function any
2347 /// other time will have no effect.
2348 ///
2349 /// See [`Config::set_cc_algorithm()`].
2350 ///
2351 /// [`Config::set_cc_algorithm()`]: struct.Config.html#method.set_cc_algorithm
2352 #[cfg(feature = "boringssl-boring-crate")]
2353 #[cfg_attr(docsrs, doc(cfg(feature = "boringssl-boring-crate")))]
2354 pub fn set_cc_algorithm_in_handshake(
2355 ssl: &mut boring::ssl::SslRef, algo: CongestionControlAlgorithm,
2356 ) -> Result<()> {
2357 let ex_data = tls::ExData::from_ssl_ref(ssl).ok_or(Error::TlsFail)?;
2358
2359 ex_data.recovery_config.cc_algorithm = algo;
2360
2361 Ok(())
2362 }
2363
2364 /// Sets custom BBR settings.
2365 ///
2366 /// This API is experimental and will be removed in the future.
2367 ///
2368 /// Currently this only applies if cc_algorithm is
2369 /// `CongestionControlAlgorithm::Bbr2Gcongestion` is set.
2370 ///
2371 /// This function can only be called inside one of BoringSSL's handshake
2372 /// callbacks, before any packet has been sent. Calling this function any
2373 /// other time will have no effect.
2374 ///
2375 /// See [`Config::set_custom_bbr_settings()`].
2376 ///
2377 /// [`Config::set_custom_bbr_settings()`]: struct.Config.html#method.set_custom_bbr_settings
2378 #[cfg(all(feature = "boringssl-boring-crate", feature = "internal"))]
2379 #[cfg_attr(docsrs, doc(cfg(feature = "boringssl-boring-crate")))]
2380 #[doc(hidden)]
2381 pub fn set_custom_bbr_settings_in_handshake(
2382 ssl: &mut boring::ssl::SslRef, custom_bbr_params: BbrParams,
2383 ) -> Result<()> {
2384 let ex_data = tls::ExData::from_ssl_ref(ssl).ok_or(Error::TlsFail)?;
2385
2386 ex_data.recovery_config.custom_bbr_params = Some(custom_bbr_params);
2387
2388 Ok(())
2389 }
2390
2391 /// Sets the congestion control algorithm used by string.
2392 ///
2393 /// This function can only be called inside one of BoringSSL's handshake
2394 /// callbacks, before any packet has been sent. Calling this function any
2395 /// other time will have no effect.
2396 ///
2397 /// See [`Config::set_cc_algorithm_name()`].
2398 ///
2399 /// [`Config::set_cc_algorithm_name()`]: struct.Config.html#method.set_cc_algorithm_name
2400 #[cfg(feature = "boringssl-boring-crate")]
2401 #[cfg_attr(docsrs, doc(cfg(feature = "boringssl-boring-crate")))]
2402 pub fn set_cc_algorithm_name_in_handshake(
2403 ssl: &mut boring::ssl::SslRef, name: &str,
2404 ) -> Result<()> {
2405 let cc_algo = CongestionControlAlgorithm::from_str(name)?;
2406 Self::set_cc_algorithm_in_handshake(ssl, cc_algo)
2407 }
2408
2409 /// Sets initial congestion window size in terms of packet count.
2410 ///
2411 /// This function can only be called inside one of BoringSSL's handshake
2412 /// callbacks, before any packet has been sent. Calling this function any
2413 /// other time will have no effect.
2414 ///
2415 /// See [`Config::set_initial_congestion_window_packets()`].
2416 ///
2417 /// [`Config::set_initial_congestion_window_packets()`]: struct.Config.html#method.set_initial_congestion_window_packets
2418 #[cfg(feature = "boringssl-boring-crate")]
2419 #[cfg_attr(docsrs, doc(cfg(feature = "boringssl-boring-crate")))]
2420 pub fn set_initial_congestion_window_packets_in_handshake(
2421 ssl: &mut boring::ssl::SslRef, packets: usize,
2422 ) -> Result<()> {
2423 let ex_data = tls::ExData::from_ssl_ref(ssl).ok_or(Error::TlsFail)?;
2424
2425 ex_data.recovery_config.initial_congestion_window_packets = packets;
2426
2427 Ok(())
2428 }
2429
2430 /// Configure whether to enable relaxed loss detection on spurious loss.
2431 ///
2432 /// This function can only be called inside one of BoringSSL's handshake
2433 /// callbacks, before any packet has been sent. Calling this function any
2434 /// other time will have no effect.
2435 ///
2436 /// See [`Config::set_enable_relaxed_loss_threshold()`].
2437 ///
2438 /// [`Config::set_enable_relaxed_loss_threshold()`]: struct.Config.html#method.set_enable_relaxed_loss_threshold
2439 #[cfg(feature = "boringssl-boring-crate")]
2440 #[cfg_attr(docsrs, doc(cfg(feature = "boringssl-boring-crate")))]
2441 pub fn set_enable_relaxed_loss_threshold_in_handshake(
2442 ssl: &mut boring::ssl::SslRef, enable: bool,
2443 ) -> Result<()> {
2444 let ex_data = tls::ExData::from_ssl_ref(ssl).ok_or(Error::TlsFail)?;
2445
2446 ex_data.recovery_config.enable_relaxed_loss_threshold = enable;
2447
2448 Ok(())
2449 }
2450
2451 /// Configures whether to enable HyStart++.
2452 ///
2453 /// This function can only be called inside one of BoringSSL's handshake
2454 /// callbacks, before any packet has been sent. Calling this function any
2455 /// other time will have no effect.
2456 ///
2457 /// See [`Config::enable_hystart()`].
2458 ///
2459 /// [`Config::enable_hystart()`]: struct.Config.html#method.enable_hystart
2460 #[cfg(feature = "boringssl-boring-crate")]
2461 #[cfg_attr(docsrs, doc(cfg(feature = "boringssl-boring-crate")))]
2462 pub fn set_hystart_in_handshake(
2463 ssl: &mut boring::ssl::SslRef, v: bool,
2464 ) -> Result<()> {
2465 let ex_data = tls::ExData::from_ssl_ref(ssl).ok_or(Error::TlsFail)?;
2466
2467 ex_data.recovery_config.hystart = v;
2468
2469 Ok(())
2470 }
2471
2472 /// Configures whether to enable pacing.
2473 ///
2474 /// This function can only be called inside one of BoringSSL's handshake
2475 /// callbacks, before any packet has been sent. Calling this function any
2476 /// other time will have no effect.
2477 ///
2478 /// See [`Config::enable_pacing()`].
2479 ///
2480 /// [`Config::enable_pacing()`]: struct.Config.html#method.enable_pacing
2481 #[cfg(feature = "boringssl-boring-crate")]
2482 #[cfg_attr(docsrs, doc(cfg(feature = "boringssl-boring-crate")))]
2483 pub fn set_pacing_in_handshake(
2484 ssl: &mut boring::ssl::SslRef, v: bool,
2485 ) -> Result<()> {
2486 let ex_data = tls::ExData::from_ssl_ref(ssl).ok_or(Error::TlsFail)?;
2487
2488 ex_data.recovery_config.pacing = v;
2489
2490 Ok(())
2491 }
2492
2493 /// Sets the max value for pacing rate.
2494 ///
2495 /// This function can only be called inside one of BoringSSL's handshake
2496 /// callbacks, before any packet has been sent. Calling this function any
2497 /// other time will have no effect.
2498 ///
2499 /// See [`Config::set_max_pacing_rate()`].
2500 ///
2501 /// [`Config::set_max_pacing_rate()`]: struct.Config.html#method.set_max_pacing_rate
2502 #[cfg(feature = "boringssl-boring-crate")]
2503 #[cfg_attr(docsrs, doc(cfg(feature = "boringssl-boring-crate")))]
2504 pub fn set_max_pacing_rate_in_handshake(
2505 ssl: &mut boring::ssl::SslRef, v: Option<u64>,
2506 ) -> Result<()> {
2507 let ex_data = tls::ExData::from_ssl_ref(ssl).ok_or(Error::TlsFail)?;
2508
2509 ex_data.recovery_config.max_pacing_rate = v;
2510
2511 Ok(())
2512 }
2513
2514 /// Sets the maximum outgoing UDP payload size.
2515 ///
2516 /// This function can only be called inside one of BoringSSL's handshake
2517 /// callbacks, before any packet has been sent. Calling this function any
2518 /// other time will have no effect.
2519 ///
2520 /// See [`Config::set_max_send_udp_payload_size()`].
2521 ///
2522 /// [`Config::set_max_send_udp_payload_size()`]: struct.Config.html#method.set_max_send_udp_payload_size
2523 #[cfg(feature = "boringssl-boring-crate")]
2524 #[cfg_attr(docsrs, doc(cfg(feature = "boringssl-boring-crate")))]
2525 pub fn set_max_send_udp_payload_size_in_handshake(
2526 ssl: &mut boring::ssl::SslRef, v: usize,
2527 ) -> Result<()> {
2528 let ex_data = tls::ExData::from_ssl_ref(ssl).ok_or(Error::TlsFail)?;
2529
2530 ex_data.recovery_config.max_send_udp_payload_size = v;
2531
2532 Ok(())
2533 }
2534
2535 /// Sets the send capacity factor.
2536 ///
2537 /// This function can only be called inside one of BoringSSL's handshake
2538 /// callbacks, before any packet has been sent. Calling this function any
2539 /// other time will have no effect.
2540 ///
2541 /// See [`Config::set_send_capacity_factor()`].
2542 ///
2543 /// [`Config::set_max_send_udp_payload_size()`]: struct.Config.html#method.set_send_capacity_factor
2544 #[cfg(feature = "boringssl-boring-crate")]
2545 #[cfg_attr(docsrs, doc(cfg(feature = "boringssl-boring-crate")))]
2546 pub fn set_send_capacity_factor_in_handshake(
2547 ssl: &mut boring::ssl::SslRef, v: f64,
2548 ) -> Result<()> {
2549 let ex_data = tls::ExData::from_ssl_ref(ssl).ok_or(Error::TlsFail)?;
2550
2551 ex_data.tx_cap_factor = v;
2552
2553 Ok(())
2554 }
2555
2556 /// Configures whether to do path MTU discovery.
2557 ///
2558 /// This function can only be called inside one of BoringSSL's handshake
2559 /// callbacks, before any packet has been sent. Calling this function any
2560 /// other time will have no effect.
2561 ///
2562 /// See [`Config::discover_pmtu()`].
2563 ///
2564 /// [`Config::discover_pmtu()`]: struct.Config.html#method.discover_pmtu
2565 #[cfg(feature = "boringssl-boring-crate")]
2566 #[cfg_attr(docsrs, doc(cfg(feature = "boringssl-boring-crate")))]
2567 pub fn set_discover_pmtu_in_handshake(
2568 ssl: &mut boring::ssl::SslRef, discover: bool, max_probes: u8,
2569 ) -> Result<()> {
2570 let ex_data = tls::ExData::from_ssl_ref(ssl).ok_or(Error::TlsFail)?;
2571
2572 ex_data.pmtud = Some((discover, max_probes));
2573
2574 Ok(())
2575 }
2576
2577 /// Sets the `max_idle_timeout` transport parameter, in milliseconds.
2578 ///
2579 /// This function can only be called inside one of BoringSSL's handshake
2580 /// callbacks, before any packet has been sent. Calling this function any
2581 /// other time will have no effect.
2582 ///
2583 /// See [`Config::set_max_idle_timeout()`].
2584 ///
2585 /// [`Config::set_max_idle_timeout()`]: struct.Config.html#method.set_max_idle_timeout
2586 #[cfg(feature = "boringssl-boring-crate")]
2587 #[cfg_attr(docsrs, doc(cfg(feature = "boringssl-boring-crate")))]
2588 pub fn set_max_idle_timeout_in_handshake(
2589 ssl: &mut boring::ssl::SslRef, v: u64,
2590 ) -> Result<()> {
2591 let ex_data = tls::ExData::from_ssl_ref(ssl).ok_or(Error::TlsFail)?;
2592
2593 ex_data.local_transport_params.max_idle_timeout = v;
2594
2595 Self::set_transport_parameters_in_hanshake(
2596 ex_data.local_transport_params.clone(),
2597 ex_data.is_server,
2598 ssl,
2599 )
2600 }
2601
2602 /// Sets the `initial_max_streams_bidi` transport parameter.
2603 ///
2604 /// This function can only be called inside one of BoringSSL's handshake
2605 /// callbacks, before any packet has been sent. Calling this function any
2606 /// other time will have no effect.
2607 ///
2608 /// See [`Config::set_initial_max_streams_bidi()`].
2609 ///
2610 /// [`Config::set_initial_max_streams_bidi()`]: struct.Config.html#method.set_initial_max_streams_bidi
2611 #[cfg(feature = "boringssl-boring-crate")]
2612 #[cfg_attr(docsrs, doc(cfg(feature = "boringssl-boring-crate")))]
2613 pub fn set_initial_max_streams_bidi_in_handshake(
2614 ssl: &mut boring::ssl::SslRef, v: u64,
2615 ) -> Result<()> {
2616 let ex_data = tls::ExData::from_ssl_ref(ssl).ok_or(Error::TlsFail)?;
2617
2618 ex_data.local_transport_params.initial_max_streams_bidi = v;
2619
2620 Self::set_transport_parameters_in_hanshake(
2621 ex_data.local_transport_params.clone(),
2622 ex_data.is_server,
2623 ssl,
2624 )
2625 }
2626
2627 #[cfg(feature = "boringssl-boring-crate")]
2628 fn set_transport_parameters_in_hanshake(
2629 params: TransportParams, is_server: bool, ssl: &mut boring::ssl::SslRef,
2630 ) -> Result<()> {
2631 use foreign_types_shared::ForeignTypeRef;
2632
2633 // In order to apply the new parameter to the TLS state before TPs are
2634 // written into a TLS message, we need to re-encode all TPs immediately.
2635 //
2636 // Since we don't have direct access to the main `Connection` object, we
2637 // need to re-create the `Handshake` state from the `SslRef`.
2638 //
2639 // SAFETY: the `Handshake` object must not be drop()ed, otherwise it
2640 // would free the underlying BoringSSL structure.
2641 let mut handshake =
2642 unsafe { tls::Handshake::from_ptr(ssl.as_ptr() as _) };
2643 handshake.set_quic_transport_params(¶ms, is_server)?;
2644
2645 // Avoid running `drop(handshake)` as that would free the underlying
2646 // handshake state.
2647 std::mem::forget(handshake);
2648
2649 Ok(())
2650 }
2651
2652 /// Processes QUIC packets received from the peer.
2653 ///
2654 /// On success the number of bytes processed from the input buffer is
2655 /// returned. On error the connection will be closed by calling [`close()`]
2656 /// with the appropriate error code.
2657 ///
2658 /// Coalesced packets will be processed as necessary.
2659 ///
2660 /// Note that the contents of the input buffer `buf` might be modified by
2661 /// this function due to, for example, in-place decryption.
2662 ///
2663 /// [`close()`]: struct.Connection.html#method.close
2664 ///
2665 /// ## Examples:
2666 ///
2667 /// ```no_run
2668 /// # let mut buf = [0; 512];
2669 /// # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
2670 /// # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
2671 /// # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
2672 /// # let peer = "127.0.0.1:1234".parse().unwrap();
2673 /// # let local = socket.local_addr().unwrap();
2674 /// # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
2675 /// loop {
2676 /// let (read, from) = socket.recv_from(&mut buf).unwrap();
2677 ///
2678 /// let recv_info = quiche::RecvInfo {
2679 /// from,
2680 /// to: local,
2681 /// };
2682 ///
2683 /// let read = match conn.recv(&mut buf[..read], recv_info) {
2684 /// Ok(v) => v,
2685 ///
2686 /// Err(e) => {
2687 /// // An error occurred, handle it.
2688 /// break;
2689 /// },
2690 /// };
2691 /// }
2692 /// # Ok::<(), quiche::Error>(())
2693 /// ```
2694 pub fn recv(&mut self, buf: &mut [u8], info: RecvInfo) -> Result<usize> {
2695 let len = buf.len();
2696
2697 if len == 0 {
2698 return Err(Error::BufferTooShort);
2699 }
2700
2701 let recv_pid = self.paths.path_id_from_addrs(&(info.to, info.from));
2702
2703 if let Some(recv_pid) = recv_pid {
2704 let recv_path = self.paths.get_mut(recv_pid)?;
2705
2706 // Keep track of how many bytes we received from the client, so we
2707 // can limit bytes sent back before address validation, to a
2708 // multiple of this. The limit needs to be increased early on, so
2709 // that if there is an error there is enough credit to send a
2710 // CONNECTION_CLOSE.
2711 //
2712 // It doesn't matter if the packets received were valid or not, we
2713 // only need to track the total amount of bytes received.
2714 //
2715 // Note that we also need to limit the number of bytes we sent on a
2716 // path if we are not the host that initiated its usage.
2717 if self.is_server && !recv_path.verified_peer_address {
2718 recv_path.max_send_bytes += len * self.max_amplification_factor;
2719 }
2720 } else if !self.is_server {
2721 // If a client receives packets from an unknown server address,
2722 // the client MUST discard these packets.
2723 trace!(
2724 "{} client received packet from unknown address {:?}, dropping",
2725 self.trace_id,
2726 info,
2727 );
2728
2729 return Ok(len);
2730 }
2731
2732 let mut done = 0;
2733 let mut left = len;
2734
2735 // Process coalesced packets.
2736 while left > 0 {
2737 let read = match self.recv_single(
2738 &mut buf[len - left..len],
2739 &info,
2740 recv_pid,
2741 ) {
2742 Ok(v) => v,
2743
2744 Err(Error::Done) => {
2745 // If the packet can't be processed or decrypted, check if
2746 // it's a stateless reset.
2747 if self.is_stateless_reset(&buf[len - left..len]) {
2748 trace!("{} packet is a stateless reset", self.trace_id);
2749
2750 self.mark_closed();
2751 }
2752
2753 left
2754 },
2755
2756 Err(e) => {
2757 // In case of error processing the incoming packet, close
2758 // the connection.
2759 self.close(false, e.to_wire(), b"").ok();
2760 return Err(e);
2761 },
2762 };
2763
2764 done += read;
2765 left -= read;
2766 }
2767
2768 // Even though the packet was previously "accepted", it
2769 // should be safe to forward the error, as it also comes
2770 // from the `recv()` method.
2771 self.process_undecrypted_0rtt_packets()?;
2772
2773 Ok(done)
2774 }
2775
2776 fn process_undecrypted_0rtt_packets(&mut self) -> Result<()> {
2777 // Process previously undecryptable 0-RTT packets if the decryption key
2778 // is now available.
2779 if self.crypto_ctx[packet::Epoch::Application]
2780 .crypto_0rtt_open
2781 .is_some()
2782 {
2783 while let Some((mut pkt, info)) = self.undecryptable_pkts.pop_front()
2784 {
2785 if let Err(e) = self.recv(&mut pkt, info) {
2786 self.undecryptable_pkts.clear();
2787
2788 return Err(e);
2789 }
2790 }
2791 }
2792 Ok(())
2793 }
2794
2795 /// Returns true if a QUIC packet is a stateless reset.
2796 fn is_stateless_reset(&self, buf: &[u8]) -> bool {
2797 // If the packet is too small, then we just throw it away.
2798 let buf_len = buf.len();
2799 if buf_len < 21 {
2800 return false;
2801 }
2802
2803 // TODO: we should iterate over all active destination connection IDs
2804 // and check against their reset token.
2805 match self.peer_transport_params.stateless_reset_token {
2806 Some(token) => {
2807 let token_len = 16;
2808
2809 crypto::verify_slices_are_equal(
2810 &token.to_be_bytes(),
2811 &buf[buf_len - token_len..buf_len],
2812 )
2813 .is_ok()
2814 },
2815
2816 None => false,
2817 }
2818 }
2819
2820 /// Processes a single QUIC packet received from the peer.
2821 ///
2822 /// On success the number of bytes processed from the input buffer is
2823 /// returned. When the [`Done`] error is returned, processing of the
2824 /// remainder of the incoming UDP datagram should be interrupted.
2825 ///
2826 /// Note that a server might observe a new 4-tuple, preventing to
2827 /// know in advance to which path the incoming packet belongs to (`recv_pid`
2828 /// is `None`). As a client, packets from unknown 4-tuple are dropped
2829 /// beforehand (see `recv()`).
2830 ///
2831 /// On error, an error other than [`Done`] is returned.
2832 ///
2833 /// [`Done`]: enum.Error.html#variant.Done
2834 fn recv_single(
2835 &mut self, buf: &mut [u8], info: &RecvInfo, recv_pid: Option<usize>,
2836 ) -> Result<usize> {
2837 let now = Instant::now();
2838
2839 if buf.is_empty() {
2840 return Err(Error::Done);
2841 }
2842
2843 if self.is_closed() || self.is_draining() {
2844 return Err(Error::Done);
2845 }
2846
2847 let is_closing = self.local_error.is_some();
2848
2849 if is_closing {
2850 return Err(Error::Done);
2851 }
2852
2853 let buf_len = buf.len();
2854
2855 let mut b = octets::OctetsMut::with_slice(buf);
2856
2857 let mut hdr = Header::from_bytes(&mut b, self.source_id().len())
2858 .map_err(|e| {
2859 drop_pkt_on_err(
2860 e,
2861 self.recv_count,
2862 self.is_server,
2863 &self.trace_id,
2864 )
2865 })?;
2866
2867 if hdr.ty == Type::VersionNegotiation {
2868 // Version negotiation packets can only be sent by the server.
2869 if self.is_server {
2870 return Err(Error::Done);
2871 }
2872
2873 // Ignore duplicate version negotiation.
2874 if self.did_version_negotiation {
2875 return Err(Error::Done);
2876 }
2877
2878 // Ignore version negotiation if any other packet has already been
2879 // successfully processed.
2880 if self.recv_count > 0 {
2881 return Err(Error::Done);
2882 }
2883
2884 if hdr.dcid != self.source_id() {
2885 return Err(Error::Done);
2886 }
2887
2888 if hdr.scid != self.destination_id() {
2889 return Err(Error::Done);
2890 }
2891
2892 trace!("{} rx pkt {:?}", self.trace_id, hdr);
2893
2894 let versions = hdr.versions.ok_or(Error::Done)?;
2895
2896 // Ignore version negotiation if the version already selected is
2897 // listed.
2898 if versions.contains(&self.version) {
2899 return Err(Error::Done);
2900 }
2901
2902 let supported_versions =
2903 versions.iter().filter(|&&v| version_is_supported(v));
2904
2905 let mut found_version = false;
2906
2907 for &v in supported_versions {
2908 found_version = true;
2909
2910 // The final version takes precedence over draft ones.
2911 if v == PROTOCOL_VERSION_V1 {
2912 self.version = v;
2913 break;
2914 }
2915
2916 self.version = cmp::max(self.version, v);
2917 }
2918
2919 if !found_version {
2920 // We don't support any of the versions offered.
2921 //
2922 // While a man-in-the-middle attacker might be able to
2923 // inject a version negotiation packet that triggers this
2924 // failure, the window of opportunity is very small and
2925 // this error is quite useful for debugging, so don't just
2926 // ignore the packet.
2927 return Err(Error::UnknownVersion);
2928 }
2929
2930 self.did_version_negotiation = true;
2931
2932 // Derive Initial secrets based on the new version.
2933 let (aead_open, aead_seal) = crypto::derive_initial_key_material(
2934 &self.destination_id(),
2935 self.version,
2936 self.is_server,
2937 true,
2938 )?;
2939
2940 // Reset connection state to force sending another Initial packet.
2941 self.drop_epoch_state(packet::Epoch::Initial, now);
2942 self.got_peer_conn_id = false;
2943 self.handshake.clear()?;
2944
2945 self.crypto_ctx[packet::Epoch::Initial].crypto_open = Some(aead_open);
2946 self.crypto_ctx[packet::Epoch::Initial].crypto_seal = Some(aead_seal);
2947
2948 self.handshake
2949 .use_legacy_codepoint(self.version != PROTOCOL_VERSION_V1);
2950
2951 // Encode transport parameters again, as the new version might be
2952 // using a different format.
2953 self.encode_transport_params()?;
2954
2955 return Err(Error::Done);
2956 }
2957
2958 if hdr.ty == Type::Retry {
2959 // Retry packets can only be sent by the server.
2960 if self.is_server {
2961 return Err(Error::Done);
2962 }
2963
2964 // Ignore duplicate retry.
2965 if self.did_retry {
2966 return Err(Error::Done);
2967 }
2968
2969 // Check if Retry packet is valid.
2970 if packet::verify_retry_integrity(
2971 &b,
2972 &self.destination_id(),
2973 self.version,
2974 )
2975 .is_err()
2976 {
2977 return Err(Error::Done);
2978 }
2979
2980 trace!("{} rx pkt {:?}", self.trace_id, hdr);
2981
2982 self.token = hdr.token;
2983 self.did_retry = true;
2984
2985 // Remember peer's new connection ID.
2986 self.odcid = Some(self.destination_id().into_owned());
2987
2988 self.set_initial_dcid(
2989 hdr.scid.clone(),
2990 None,
2991 self.paths.get_active_path_id()?,
2992 )?;
2993
2994 self.rscid = Some(self.destination_id().into_owned());
2995
2996 // Derive Initial secrets using the new connection ID.
2997 let (aead_open, aead_seal) = crypto::derive_initial_key_material(
2998 &hdr.scid,
2999 self.version,
3000 self.is_server,
3001 true,
3002 )?;
3003
3004 // Reset connection state to force sending another Initial packet.
3005 self.drop_epoch_state(packet::Epoch::Initial, now);
3006 self.got_peer_conn_id = false;
3007 self.handshake.clear()?;
3008
3009 self.crypto_ctx[packet::Epoch::Initial].crypto_open = Some(aead_open);
3010 self.crypto_ctx[packet::Epoch::Initial].crypto_seal = Some(aead_seal);
3011
3012 return Err(Error::Done);
3013 }
3014
3015 if self.is_server && !self.did_version_negotiation {
3016 if !version_is_supported(hdr.version) {
3017 return Err(Error::UnknownVersion);
3018 }
3019
3020 self.version = hdr.version;
3021 self.did_version_negotiation = true;
3022
3023 self.handshake
3024 .use_legacy_codepoint(self.version != PROTOCOL_VERSION_V1);
3025
3026 // Encode transport parameters again, as the new version might be
3027 // using a different format.
3028 self.encode_transport_params()?;
3029 }
3030
3031 if hdr.ty != Type::Short && hdr.version != self.version {
3032 // At this point version negotiation was already performed, so
3033 // ignore packets that don't match the connection's version.
3034 return Err(Error::Done);
3035 }
3036
3037 // Long header packets have an explicit payload length, but short
3038 // packets don't so just use the remaining capacity in the buffer.
3039 let payload_len = if hdr.ty == Type::Short {
3040 b.cap()
3041 } else {
3042 b.get_varint().map_err(|e| {
3043 drop_pkt_on_err(
3044 e.into(),
3045 self.recv_count,
3046 self.is_server,
3047 &self.trace_id,
3048 )
3049 })? as usize
3050 };
3051
3052 // Make sure the buffer is same or larger than an explicit
3053 // payload length.
3054 if payload_len > b.cap() {
3055 return Err(drop_pkt_on_err(
3056 Error::InvalidPacket,
3057 self.recv_count,
3058 self.is_server,
3059 &self.trace_id,
3060 ));
3061 }
3062
3063 // Derive initial secrets on the server.
3064 if !self.derived_initial_secrets {
3065 let (aead_open, aead_seal) = crypto::derive_initial_key_material(
3066 &hdr.dcid,
3067 self.version,
3068 self.is_server,
3069 false,
3070 )?;
3071
3072 self.crypto_ctx[packet::Epoch::Initial].crypto_open = Some(aead_open);
3073 self.crypto_ctx[packet::Epoch::Initial].crypto_seal = Some(aead_seal);
3074
3075 self.derived_initial_secrets = true;
3076 }
3077
3078 // Select packet number space epoch based on the received packet's type.
3079 let epoch = hdr.ty.to_epoch()?;
3080
3081 // Select AEAD context used to open incoming packet.
3082 let aead = if hdr.ty == Type::ZeroRTT {
3083 // Only use 0-RTT key if incoming packet is 0-RTT.
3084 self.crypto_ctx[epoch].crypto_0rtt_open.as_ref()
3085 } else {
3086 // Otherwise use the packet number space's main key.
3087 self.crypto_ctx[epoch].crypto_open.as_ref()
3088 };
3089
3090 // Finally, discard packet if no usable key is available.
3091 let mut aead = match aead {
3092 Some(v) => v,
3093
3094 None => {
3095 if hdr.ty == Type::ZeroRTT &&
3096 self.undecryptable_pkts.len() < MAX_UNDECRYPTABLE_PACKETS &&
3097 !self.is_established()
3098 {
3099 // Buffer 0-RTT packets when the required read key is not
3100 // available yet, and process them later.
3101 //
3102 // TODO: in the future we might want to buffer other types
3103 // of undecryptable packets as well.
3104 let pkt_len = b.off() + payload_len;
3105 let pkt = (b.buf()[..pkt_len]).to_vec();
3106
3107 self.undecryptable_pkts.push_back((pkt, *info));
3108 return Ok(pkt_len);
3109 }
3110
3111 let e = drop_pkt_on_err(
3112 Error::CryptoFail,
3113 self.recv_count,
3114 self.is_server,
3115 &self.trace_id,
3116 );
3117
3118 return Err(e);
3119 },
3120 };
3121
3122 let aead_tag_len = aead.alg().tag_len();
3123
3124 packet::decrypt_hdr(&mut b, &mut hdr, aead).map_err(|e| {
3125 drop_pkt_on_err(e, self.recv_count, self.is_server, &self.trace_id)
3126 })?;
3127
3128 let pn = packet::decode_pkt_num(
3129 self.pkt_num_spaces[epoch].largest_rx_pkt_num,
3130 hdr.pkt_num,
3131 hdr.pkt_num_len,
3132 );
3133
3134 let pn_len = hdr.pkt_num_len;
3135
3136 trace!(
3137 "{} rx pkt {:?} len={} pn={} {}",
3138 self.trace_id,
3139 hdr,
3140 payload_len,
3141 pn,
3142 AddrTupleFmt(info.from, info.to)
3143 );
3144
3145 #[cfg(feature = "qlog")]
3146 let mut qlog_frames = vec![];
3147
3148 // Check for key update.
3149 let mut aead_next = None;
3150
3151 if self.handshake_confirmed &&
3152 hdr.ty != Type::ZeroRTT &&
3153 hdr.key_phase != self.key_phase
3154 {
3155 // Check if this packet arrived before key update.
3156 if let Some(key_update) = self.crypto_ctx[epoch]
3157 .key_update
3158 .as_ref()
3159 .and_then(|key_update| {
3160 (pn < key_update.pn_on_update).then_some(key_update)
3161 })
3162 {
3163 aead = &key_update.crypto_open;
3164 } else {
3165 trace!("{} peer-initiated key update", self.trace_id);
3166
3167 aead_next = Some((
3168 self.crypto_ctx[epoch]
3169 .crypto_open
3170 .as_ref()
3171 .unwrap()
3172 .derive_next_packet_key()?,
3173 self.crypto_ctx[epoch]
3174 .crypto_seal
3175 .as_ref()
3176 .unwrap()
3177 .derive_next_packet_key()?,
3178 ));
3179
3180 // `aead_next` is always `Some()` at this point, so the `unwrap()`
3181 // will never fail.
3182 aead = &aead_next.as_ref().unwrap().0;
3183 }
3184 }
3185
3186 let mut payload = packet::decrypt_pkt(
3187 &mut b,
3188 pn,
3189 pn_len,
3190 payload_len,
3191 aead,
3192 )
3193 .map_err(|e| {
3194 drop_pkt_on_err(e, self.recv_count, self.is_server, &self.trace_id)
3195 })?;
3196
3197 if self.pkt_num_spaces[epoch].recv_pkt_num.contains(pn) {
3198 trace!("{} ignored duplicate packet {}", self.trace_id, pn);
3199 return Err(Error::Done);
3200 }
3201
3202 // Packets with no frames are invalid.
3203 if payload.cap() == 0 {
3204 return Err(Error::InvalidPacket);
3205 }
3206
3207 // Now that we decrypted the packet, let's see if we can map it to an
3208 // existing path.
3209 let recv_pid = if hdr.ty == Type::Short && self.got_peer_conn_id {
3210 let pkt_dcid = ConnectionId::from_ref(&hdr.dcid);
3211 self.get_or_create_recv_path_id(recv_pid, &pkt_dcid, buf_len, info)?
3212 } else {
3213 // During handshake, we are on the initial path.
3214 self.paths.get_active_path_id()?
3215 };
3216
3217 // The key update is verified once a packet is successfully decrypted
3218 // using the new keys.
3219 if let Some((open_next, seal_next)) = aead_next {
3220 if !self.crypto_ctx[epoch]
3221 .key_update
3222 .as_ref()
3223 .is_none_or(|prev| prev.update_acked)
3224 {
3225 // Peer has updated keys twice without awaiting confirmation.
3226 return Err(Error::KeyUpdate);
3227 }
3228
3229 trace!("{} key update verified", self.trace_id);
3230
3231 let _ = self.crypto_ctx[epoch].crypto_seal.replace(seal_next);
3232
3233 let open_prev = self.crypto_ctx[epoch]
3234 .crypto_open
3235 .replace(open_next)
3236 .unwrap();
3237
3238 let recv_path = self.paths.get_mut(recv_pid)?;
3239
3240 self.crypto_ctx[epoch].key_update = Some(packet::KeyUpdate {
3241 crypto_open: open_prev,
3242 pn_on_update: pn,
3243 update_acked: false,
3244 timer: now + (recv_path.recovery.pto() * 3),
3245 });
3246
3247 self.key_phase = !self.key_phase;
3248
3249 qlog_with_type!(QLOG_PACKET_RX, self.qlog, q, {
3250 let trigger = Some(
3251 qlog::events::security::KeyUpdateOrRetiredTrigger::RemoteUpdate,
3252 );
3253
3254 let ev_data_client =
3255 EventData::KeyUpdated(qlog::events::security::KeyUpdated {
3256 key_type:
3257 qlog::events::security::KeyType::Client1RttSecret,
3258 trigger: trigger.clone(),
3259 ..Default::default()
3260 });
3261
3262 q.add_event_data_with_instant(ev_data_client, now).ok();
3263
3264 let ev_data_server =
3265 EventData::KeyUpdated(qlog::events::security::KeyUpdated {
3266 key_type:
3267 qlog::events::security::KeyType::Server1RttSecret,
3268 trigger,
3269 ..Default::default()
3270 });
3271
3272 q.add_event_data_with_instant(ev_data_server, now).ok();
3273 });
3274 }
3275
3276 if !self.is_server && !self.got_peer_conn_id {
3277 if self.odcid.is_none() {
3278 self.odcid = Some(self.destination_id().into_owned());
3279 }
3280
3281 // Replace the randomly generated destination connection ID with
3282 // the one supplied by the server.
3283 self.set_initial_dcid(
3284 hdr.scid.clone(),
3285 self.peer_transport_params.stateless_reset_token,
3286 recv_pid,
3287 )?;
3288
3289 self.got_peer_conn_id = true;
3290 }
3291
3292 if self.is_server && !self.got_peer_conn_id {
3293 self.set_initial_dcid(hdr.scid.clone(), None, recv_pid)?;
3294
3295 if !self.did_retry {
3296 self.local_transport_params
3297 .original_destination_connection_id =
3298 Some(hdr.dcid.to_vec().into());
3299
3300 self.encode_transport_params()?;
3301 }
3302
3303 self.got_peer_conn_id = true;
3304 }
3305
3306 // To avoid sending an ACK in response to an ACK-only packet, we need
3307 // to keep track of whether this packet contains any frame other than
3308 // ACK and PADDING.
3309 let mut ack_elicited = false;
3310
3311 // Process packet payload. If a frame cannot be processed, store the
3312 // error and stop further packet processing.
3313 let mut frame_processing_err = None;
3314
3315 // To know if the peer migrated the connection, we need to keep track
3316 // whether this is a non-probing packet.
3317 let mut probing = true;
3318
3319 // Process packet payload.
3320 while payload.cap() > 0 {
3321 let frame = frame::Frame::from_bytes(&mut payload, hdr.ty)?;
3322
3323 qlog_with_type!(QLOG_PACKET_RX, self.qlog, _q, {
3324 qlog_frames.push(frame.to_qlog());
3325 });
3326
3327 if frame.ack_eliciting() {
3328 ack_elicited = true;
3329 }
3330
3331 if !frame.probing() {
3332 probing = false;
3333 }
3334
3335 if let Err(e) = self.process_frame(frame, &hdr, recv_pid, epoch, now)
3336 {
3337 frame_processing_err = Some(e);
3338 break;
3339 }
3340 }
3341
3342 qlog_with_type!(QLOG_PACKET_RX, self.qlog, q, {
3343 let packet_size = b.len();
3344
3345 let qlog_pkt_hdr = qlog::events::quic::PacketHeader::with_type(
3346 hdr.ty.to_qlog(),
3347 Some(pn),
3348 Some(hdr.version),
3349 Some(&hdr.scid),
3350 Some(&hdr.dcid),
3351 );
3352
3353 let qlog_raw_info = RawInfo {
3354 length: Some(packet_size as u64),
3355 payload_length: Some(payload_len as u64),
3356 data: None,
3357 };
3358
3359 let ev_data =
3360 EventData::PacketReceived(qlog::events::quic::PacketReceived {
3361 header: qlog_pkt_hdr,
3362 frames: Some(qlog_frames),
3363 raw: Some(qlog_raw_info),
3364 ..Default::default()
3365 });
3366
3367 q.add_event_data_with_instant(ev_data, now).ok();
3368 });
3369
3370 qlog_with_type!(QLOG_PACKET_RX, self.qlog, q, {
3371 let recv_path = self.paths.get_mut(recv_pid)?;
3372 recv_path.recovery.maybe_qlog(q, now);
3373 });
3374
3375 if let Some(e) = frame_processing_err {
3376 // Any frame error is terminal, so now just return.
3377 return Err(e);
3378 }
3379
3380 // Only log the remote transport parameters once the connection is
3381 // established (i.e. after frames have been fully parsed) and only
3382 // once per connection.
3383 if self.is_established() {
3384 qlog_with_type!(QLOG_PARAMS_SET, self.qlog, q, {
3385 if !self.qlog.logged_peer_params {
3386 let ev_data = self
3387 .peer_transport_params
3388 .to_qlog(TransportOwner::Remote, self.handshake.cipher());
3389
3390 q.add_event_data_with_instant(ev_data, now).ok();
3391
3392 self.qlog.logged_peer_params = true;
3393 }
3394 });
3395 }
3396
3397 // Process acked frames. Note that several packets from several paths
3398 // might have been acked by the received packet.
3399 for (_, p) in self.paths.iter_mut() {
3400 while let Some(acked) = p.recovery.next_acked_frame(epoch) {
3401 match acked {
3402 frame::Frame::Ping {
3403 mtu_probe: Some(mtu_probe),
3404 } =>
3405 if let Some(pmtud) = p.pmtud.as_mut() {
3406 trace!(
3407 "{} pmtud probe acked; probe size {:?}",
3408 self.trace_id,
3409 mtu_probe
3410 );
3411
3412 // Ensure the probe is within the supported MTU range
3413 // before updating the max datagram size
3414 if let Some(current_mtu) =
3415 pmtud.successful_probe(mtu_probe)
3416 {
3417 qlog_with_type!(
3418 EventType::ConnectivityEventType(
3419 ConnectivityEventType::MtuUpdated
3420 ),
3421 self.qlog,
3422 q,
3423 {
3424 let pmtu_data = EventData::MtuUpdated(
3425 qlog::events::connectivity::MtuUpdated {
3426 old: Some(
3427 p.recovery.max_datagram_size()
3428 as u16,
3429 ),
3430 new: current_mtu as u16,
3431 done: Some(true),
3432 },
3433 );
3434
3435 q.add_event_data_with_instant(
3436 pmtu_data, now,
3437 )
3438 .ok();
3439 }
3440 );
3441
3442 p.recovery
3443 .pmtud_update_max_datagram_size(current_mtu);
3444 }
3445 },
3446
3447 frame::Frame::ACK { ranges, .. } => {
3448 // Stop acknowledging packets less than or equal to the
3449 // largest acknowledged in the sent ACK frame that, in
3450 // turn, got acked.
3451 if let Some(largest_acked) = ranges.last() {
3452 self.pkt_num_spaces[epoch]
3453 .recv_pkt_need_ack
3454 .remove_until(largest_acked);
3455 }
3456 },
3457
3458 frame::Frame::CryptoHeader { offset, length } => {
3459 self.crypto_ctx[epoch]
3460 .crypto_stream
3461 .send
3462 .ack_and_drop(offset, length);
3463 },
3464
3465 frame::Frame::StreamHeader {
3466 stream_id,
3467 offset,
3468 length,
3469 ..
3470 } => {
3471 // Update tx_buffered and emit qlog before checking if the
3472 // stream still exists. The client does need to ACK
3473 // frames that were received after the client sends a
3474 // ResetStream.
3475 self.tx_buffered =
3476 self.tx_buffered.saturating_sub(length);
3477
3478 qlog_with_type!(QLOG_DATA_MV, self.qlog, q, {
3479 let ev_data = EventData::DataMoved(
3480 qlog::events::quic::DataMoved {
3481 stream_id: Some(stream_id),
3482 offset: Some(offset),
3483 length: Some(length as u64),
3484 from: Some(DataRecipient::Transport),
3485 to: Some(DataRecipient::Dropped),
3486 ..Default::default()
3487 },
3488 );
3489
3490 q.add_event_data_with_instant(ev_data, now).ok();
3491 });
3492
3493 let stream = match self.streams.get_mut(stream_id) {
3494 Some(v) => v,
3495
3496 None => continue,
3497 };
3498
3499 stream.send.ack_and_drop(offset, length);
3500
3501 let priority_key = Arc::clone(&stream.priority_key);
3502
3503 // Only collect the stream if it is complete and not
3504 // readable or writable.
3505 //
3506 // If it is readable, it will get collected when
3507 // stream_recv() is next used.
3508 //
3509 // If it is writable, it might mean that the stream
3510 // has been stopped by the peer (i.e. a STOP_SENDING
3511 // frame is received), in which case before collecting
3512 // the stream we will need to propagate the
3513 // `StreamStopped` error to the application. It will
3514 // instead get collected when one of stream_capacity(),
3515 // stream_writable(), stream_send(), ... is next called.
3516 //
3517 // Note that we can't use `is_writable()` here because
3518 // it returns false if the stream is stopped. Instead,
3519 // since the stream is marked as writable when a
3520 // STOP_SENDING frame is received, we check the writable
3521 // queue directly instead.
3522 let is_writable = priority_key.writable.is_linked() &&
3523 // Ensure that the stream is actually stopped.
3524 stream.send.is_stopped();
3525
3526 let is_complete = stream.is_complete();
3527 let is_readable = stream.is_readable();
3528
3529 if is_complete && !is_readable && !is_writable {
3530 let local = stream.local;
3531 self.streams.collect(stream_id, local);
3532 }
3533 },
3534
3535 frame::Frame::HandshakeDone => {
3536 // Explicitly set this to true, so that if the frame was
3537 // already scheduled for retransmission, it is aborted.
3538 self.handshake_done_sent = true;
3539
3540 self.handshake_done_acked = true;
3541 },
3542
3543 frame::Frame::ResetStream { stream_id, .. } => {
3544 let stream = match self.streams.get_mut(stream_id) {
3545 Some(v) => v,
3546
3547 None => continue,
3548 };
3549
3550 let priority_key = Arc::clone(&stream.priority_key);
3551
3552 // Only collect the stream if it is complete and not
3553 // readable or writable.
3554 //
3555 // If it is readable, it will get collected when
3556 // stream_recv() is next used.
3557 //
3558 // If it is writable, it might mean that the stream
3559 // has been stopped by the peer (i.e. a STOP_SENDING
3560 // frame is received), in which case before collecting
3561 // the stream we will need to propagate the
3562 // `StreamStopped` error to the application. It will
3563 // instead get collected when one of stream_capacity(),
3564 // stream_writable(), stream_send(), ... is next called.
3565 //
3566 // Note that we can't use `is_writable()` here because
3567 // it returns false if the stream is stopped. Instead,
3568 // since the stream is marked as writable when a
3569 // STOP_SENDING frame is received, we check the writable
3570 // queue directly instead.
3571 let is_writable = priority_key.writable.is_linked() &&
3572 // Ensure that the stream is actually stopped.
3573 stream.send.is_stopped();
3574
3575 let is_complete = stream.is_complete();
3576 let is_readable = stream.is_readable();
3577
3578 if is_complete && !is_readable && !is_writable {
3579 let local = stream.local;
3580 self.streams.collect(stream_id, local);
3581 }
3582 },
3583
3584 _ => (),
3585 }
3586 }
3587 }
3588
3589 // Now that we processed all the frames, if there is a path that has no
3590 // Destination CID, try to allocate one.
3591 let no_dcid = self
3592 .paths
3593 .iter_mut()
3594 .filter(|(_, p)| p.active_dcid_seq.is_none());
3595
3596 for (pid, p) in no_dcid {
3597 if self.ids.zero_length_dcid() {
3598 p.active_dcid_seq = Some(0);
3599 continue;
3600 }
3601
3602 let dcid_seq = match self.ids.lowest_available_dcid_seq() {
3603 Some(seq) => seq,
3604 None => break,
3605 };
3606
3607 self.ids.link_dcid_to_path_id(dcid_seq, pid)?;
3608
3609 p.active_dcid_seq = Some(dcid_seq);
3610 }
3611
3612 // We only record the time of arrival of the largest packet number
3613 // that still needs to be acked, to be used for ACK delay calculation.
3614 if self.pkt_num_spaces[epoch].recv_pkt_need_ack.last() < Some(pn) {
3615 self.pkt_num_spaces[epoch].largest_rx_pkt_time = now;
3616 }
3617
3618 self.pkt_num_spaces[epoch].recv_pkt_num.insert(pn);
3619
3620 self.pkt_num_spaces[epoch].recv_pkt_need_ack.push_item(pn);
3621
3622 self.pkt_num_spaces[epoch].ack_elicited =
3623 cmp::max(self.pkt_num_spaces[epoch].ack_elicited, ack_elicited);
3624
3625 self.pkt_num_spaces[epoch].largest_rx_pkt_num =
3626 cmp::max(self.pkt_num_spaces[epoch].largest_rx_pkt_num, pn);
3627
3628 if !probing {
3629 self.pkt_num_spaces[epoch].largest_rx_non_probing_pkt_num = cmp::max(
3630 self.pkt_num_spaces[epoch].largest_rx_non_probing_pkt_num,
3631 pn,
3632 );
3633
3634 // Did the peer migrated to another path?
3635 let active_path_id = self.paths.get_active_path_id()?;
3636
3637 if self.is_server &&
3638 recv_pid != active_path_id &&
3639 self.pkt_num_spaces[epoch].largest_rx_non_probing_pkt_num == pn
3640 {
3641 self.on_peer_migrated(recv_pid, self.disable_dcid_reuse, now)?;
3642 }
3643 }
3644
3645 if let Some(idle_timeout) = self.idle_timeout() {
3646 self.idle_timer = Some(now + idle_timeout);
3647 }
3648
3649 // Update send capacity.
3650 self.update_tx_cap();
3651
3652 self.recv_count += 1;
3653 self.paths.get_mut(recv_pid)?.recv_count += 1;
3654
3655 let read = b.off() + aead_tag_len;
3656
3657 self.recv_bytes += read as u64;
3658 self.paths.get_mut(recv_pid)?.recv_bytes += read as u64;
3659
3660 // An Handshake packet has been received from the client and has been
3661 // successfully processed, so we can drop the initial state and consider
3662 // the client's address to be verified.
3663 if self.is_server && hdr.ty == Type::Handshake {
3664 self.drop_epoch_state(packet::Epoch::Initial, now);
3665
3666 self.paths.get_mut(recv_pid)?.verified_peer_address = true;
3667 }
3668
3669 self.ack_eliciting_sent = false;
3670
3671 Ok(read)
3672 }
3673
3674 /// Writes a single QUIC packet to be sent to the peer.
3675 ///
3676 /// On success the number of bytes written to the output buffer is
3677 /// returned, or [`Done`] if there was nothing to write.
3678 ///
3679 /// The application should call `send()` multiple times until [`Done`] is
3680 /// returned, indicating that there are no more packets to send. It is
3681 /// recommended that `send()` be called in the following cases:
3682 ///
3683 /// * When the application receives QUIC packets from the peer (that is,
3684 /// any time [`recv()`] is also called).
3685 ///
3686 /// * When the connection timer expires (that is, any time [`on_timeout()`]
3687 /// is also called).
3688 ///
3689 /// * When the application sends data to the peer (for example, any time
3690 /// [`stream_send()`] or [`stream_shutdown()`] are called).
3691 ///
3692 /// * When the application receives data from the peer (for example any
3693 /// time [`stream_recv()`] is called).
3694 ///
3695 /// Once [`is_draining()`] returns `true`, it is no longer necessary to call
3696 /// `send()` and all calls will return [`Done`].
3697 ///
3698 /// [`Done`]: enum.Error.html#variant.Done
3699 /// [`recv()`]: struct.Connection.html#method.recv
3700 /// [`on_timeout()`]: struct.Connection.html#method.on_timeout
3701 /// [`stream_send()`]: struct.Connection.html#method.stream_send
3702 /// [`stream_shutdown()`]: struct.Connection.html#method.stream_shutdown
3703 /// [`stream_recv()`]: struct.Connection.html#method.stream_recv
3704 /// [`is_draining()`]: struct.Connection.html#method.is_draining
3705 ///
3706 /// ## Examples:
3707 ///
3708 /// ```no_run
3709 /// # let mut out = [0; 512];
3710 /// # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
3711 /// # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
3712 /// # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
3713 /// # let peer = "127.0.0.1:1234".parse().unwrap();
3714 /// # let local = socket.local_addr().unwrap();
3715 /// # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
3716 /// loop {
3717 /// let (write, send_info) = match conn.send(&mut out) {
3718 /// Ok(v) => v,
3719 ///
3720 /// Err(quiche::Error::Done) => {
3721 /// // Done writing.
3722 /// break;
3723 /// },
3724 ///
3725 /// Err(e) => {
3726 /// // An error occurred, handle it.
3727 /// break;
3728 /// },
3729 /// };
3730 ///
3731 /// socket.send_to(&out[..write], &send_info.to).unwrap();
3732 /// }
3733 /// # Ok::<(), quiche::Error>(())
3734 /// ```
3735 pub fn send(&mut self, out: &mut [u8]) -> Result<(usize, SendInfo)> {
3736 self.send_on_path(out, None, None)
3737 }
3738
3739 /// Writes a single QUIC packet to be sent to the peer from the specified
3740 /// local address `from` to the destination address `to`.
3741 ///
3742 /// The behavior of this method differs depending on the value of the `from`
3743 /// and `to` parameters:
3744 ///
3745 /// * If both are `Some`, then the method only consider the 4-tuple
3746 /// (`from`, `to`). Application can monitor the 4-tuple availability,
3747 /// either by monitoring [`path_event_next()`] events or by relying on
3748 /// the [`paths_iter()`] method. If the provided 4-tuple does not exist
3749 /// on the connection (anymore), it returns an [`InvalidState`].
3750 ///
3751 /// * If `from` is `Some` and `to` is `None`, then the method only
3752 /// considers sending packets on paths having `from` as local address.
3753 ///
3754 /// * If `to` is `Some` and `from` is `None`, then the method only
3755 /// considers sending packets on paths having `to` as peer address.
3756 ///
3757 /// * If both are `None`, all available paths are considered.
3758 ///
3759 /// On success the number of bytes written to the output buffer is
3760 /// returned, or [`Done`] if there was nothing to write.
3761 ///
3762 /// The application should call `send_on_path()` multiple times until
3763 /// [`Done`] is returned, indicating that there are no more packets to
3764 /// send. It is recommended that `send_on_path()` be called in the
3765 /// following cases:
3766 ///
3767 /// * When the application receives QUIC packets from the peer (that is,
3768 /// any time [`recv()`] is also called).
3769 ///
3770 /// * When the connection timer expires (that is, any time [`on_timeout()`]
3771 /// is also called).
3772 ///
3773 /// * When the application sends data to the peer (for examples, any time
3774 /// [`stream_send()`] or [`stream_shutdown()`] are called).
3775 ///
3776 /// * When the application receives data from the peer (for example any
3777 /// time [`stream_recv()`] is called).
3778 ///
3779 /// Once [`is_draining()`] returns `true`, it is no longer necessary to call
3780 /// `send_on_path()` and all calls will return [`Done`].
3781 ///
3782 /// [`Done`]: enum.Error.html#variant.Done
3783 /// [`InvalidState`]: enum.Error.html#InvalidState
3784 /// [`recv()`]: struct.Connection.html#method.recv
3785 /// [`on_timeout()`]: struct.Connection.html#method.on_timeout
3786 /// [`stream_send()`]: struct.Connection.html#method.stream_send
3787 /// [`stream_shutdown()`]: struct.Connection.html#method.stream_shutdown
3788 /// [`stream_recv()`]: struct.Connection.html#method.stream_recv
3789 /// [`path_event_next()`]: struct.Connection.html#method.path_event_next
3790 /// [`paths_iter()`]: struct.Connection.html#method.paths_iter
3791 /// [`is_draining()`]: struct.Connection.html#method.is_draining
3792 ///
3793 /// ## Examples:
3794 ///
3795 /// ```no_run
3796 /// # let mut out = [0; 512];
3797 /// # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
3798 /// # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
3799 /// # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
3800 /// # let peer = "127.0.0.1:1234".parse().unwrap();
3801 /// # let local = socket.local_addr().unwrap();
3802 /// # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
3803 /// loop {
3804 /// let (write, send_info) = match conn.send_on_path(&mut out, Some(local), Some(peer)) {
3805 /// Ok(v) => v,
3806 ///
3807 /// Err(quiche::Error::Done) => {
3808 /// // Done writing.
3809 /// break;
3810 /// },
3811 ///
3812 /// Err(e) => {
3813 /// // An error occurred, handle it.
3814 /// break;
3815 /// },
3816 /// };
3817 ///
3818 /// socket.send_to(&out[..write], &send_info.to).unwrap();
3819 /// }
3820 /// # Ok::<(), quiche::Error>(())
3821 /// ```
3822 pub fn send_on_path(
3823 &mut self, out: &mut [u8], from: Option<SocketAddr>,
3824 to: Option<SocketAddr>,
3825 ) -> Result<(usize, SendInfo)> {
3826 if out.is_empty() {
3827 return Err(Error::BufferTooShort);
3828 }
3829
3830 if self.is_closed() || self.is_draining() {
3831 return Err(Error::Done);
3832 }
3833
3834 let now = Instant::now();
3835
3836 if self.local_error.is_none() {
3837 self.do_handshake(now)?;
3838 }
3839
3840 // Forwarding the error value here could confuse
3841 // applications, as they may not expect getting a `recv()`
3842 // error when calling `send()`.
3843 //
3844 // We simply fall-through to sending packets, which should
3845 // take care of terminating the connection as needed.
3846 let _ = self.process_undecrypted_0rtt_packets();
3847
3848 // There's no point in trying to send a packet if the Initial secrets
3849 // have not been derived yet, so return early.
3850 if !self.derived_initial_secrets {
3851 return Err(Error::Done);
3852 }
3853
3854 let mut has_initial = false;
3855
3856 let mut done = 0;
3857
3858 // Limit output packet size to respect the sender and receiver's
3859 // maximum UDP payload size limit.
3860 let mut left = cmp::min(out.len(), self.max_send_udp_payload_size());
3861
3862 let send_pid = match (from, to) {
3863 (Some(f), Some(t)) => self
3864 .paths
3865 .path_id_from_addrs(&(f, t))
3866 .ok_or(Error::InvalidState)?,
3867
3868 _ => self.get_send_path_id(from, to)?,
3869 };
3870
3871 let send_path = self.paths.get_mut(send_pid)?;
3872
3873 // Update max datagram size to allow path MTU discovery probe to be sent.
3874 if let Some(pmtud) = send_path.pmtud.as_mut() {
3875 if pmtud.should_probe() {
3876 let size = if self.handshake_confirmed || self.handshake_completed
3877 {
3878 pmtud.get_probe_size()
3879 } else {
3880 pmtud.get_current_mtu()
3881 };
3882
3883 send_path.recovery.pmtud_update_max_datagram_size(size);
3884
3885 left =
3886 cmp::min(out.len(), send_path.recovery.max_datagram_size());
3887 }
3888 }
3889
3890 // Limit data sent by the server based on the amount of data received
3891 // from the client before its address is validated.
3892 if !send_path.verified_peer_address && self.is_server {
3893 left = cmp::min(left, send_path.max_send_bytes);
3894 }
3895
3896 // Generate coalesced packets.
3897 while left > 0 {
3898 let (ty, written) = match self.send_single(
3899 &mut out[done..done + left],
3900 send_pid,
3901 has_initial,
3902 now,
3903 ) {
3904 Ok(v) => v,
3905
3906 Err(Error::BufferTooShort) | Err(Error::Done) => break,
3907
3908 Err(e) => return Err(e),
3909 };
3910
3911 done += written;
3912 left -= written;
3913
3914 match ty {
3915 Type::Initial => has_initial = true,
3916
3917 // No more packets can be coalesced after a 1-RTT.
3918 Type::Short => break,
3919
3920 _ => (),
3921 };
3922
3923 // When sending multiple PTO probes, don't coalesce them together,
3924 // so they are sent on separate UDP datagrams.
3925 if let Ok(epoch) = ty.to_epoch() {
3926 if self.paths.get_mut(send_pid)?.recovery.loss_probes(epoch) > 0 {
3927 break;
3928 }
3929 }
3930
3931 // Don't coalesce packets that must go on different paths.
3932 if !(from.is_some() && to.is_some()) &&
3933 self.get_send_path_id(from, to)? != send_pid
3934 {
3935 break;
3936 }
3937 }
3938
3939 if done == 0 {
3940 self.last_tx_data = self.tx_data;
3941
3942 return Err(Error::Done);
3943 }
3944
3945 if has_initial && left > 0 && done < MIN_CLIENT_INITIAL_LEN {
3946 let pad_len = cmp::min(left, MIN_CLIENT_INITIAL_LEN - done);
3947
3948 // Fill padding area with null bytes, to avoid leaking information
3949 // in case the application reuses the packet buffer.
3950 out[done..done + pad_len].fill(0);
3951
3952 done += pad_len;
3953 }
3954
3955 let send_path = self.paths.get(send_pid)?;
3956
3957 let info = SendInfo {
3958 from: send_path.local_addr(),
3959 to: send_path.peer_addr(),
3960
3961 at: send_path.recovery.get_packet_send_time(now),
3962 };
3963
3964 Ok((done, info))
3965 }
3966
3967 fn send_single(
3968 &mut self, out: &mut [u8], send_pid: usize, has_initial: bool,
3969 now: Instant,
3970 ) -> Result<(Type, usize)> {
3971 if out.is_empty() {
3972 return Err(Error::BufferTooShort);
3973 }
3974
3975 if self.is_draining() {
3976 return Err(Error::Done);
3977 }
3978
3979 let is_closing = self.local_error.is_some();
3980
3981 let out_len = out.len();
3982
3983 let mut b = octets::OctetsMut::with_slice(out);
3984
3985 let pkt_type = self.write_pkt_type(send_pid)?;
3986
3987 let max_dgram_len = if !self.dgram_send_queue.is_empty() {
3988 self.dgram_max_writable_len()
3989 } else {
3990 None
3991 };
3992
3993 let epoch = pkt_type.to_epoch()?;
3994 let pkt_space = &mut self.pkt_num_spaces[epoch];
3995 let crypto_ctx = &mut self.crypto_ctx[epoch];
3996
3997 // Process lost frames. There might be several paths having lost frames.
3998 for (_, p) in self.paths.iter_mut() {
3999 while let Some(lost) = p.recovery.next_lost_frame(epoch) {
4000 match lost {
4001 frame::Frame::CryptoHeader { offset, length } => {
4002 crypto_ctx.crypto_stream.send.retransmit(offset, length);
4003
4004 self.stream_retrans_bytes += length as u64;
4005 p.stream_retrans_bytes += length as u64;
4006
4007 self.retrans_count += 1;
4008 p.retrans_count += 1;
4009 },
4010
4011 frame::Frame::StreamHeader {
4012 stream_id,
4013 offset,
4014 length,
4015 fin,
4016 } => {
4017 let stream = match self.streams.get_mut(stream_id) {
4018 // Only retransmit data if the stream is not closed
4019 // or stopped.
4020 Some(v) if !v.send.is_stopped() => v,
4021
4022 // Data on a closed stream will not be retransmitted
4023 // or acked after it is declared lost, so update
4024 // tx_buffered and qlog.
4025 _ => {
4026 self.tx_buffered =
4027 self.tx_buffered.saturating_sub(length);
4028
4029 qlog_with_type!(QLOG_DATA_MV, self.qlog, q, {
4030 let ev_data = EventData::DataMoved(
4031 qlog::events::quic::DataMoved {
4032 stream_id: Some(stream_id),
4033 offset: Some(offset),
4034 length: Some(length as u64),
4035 from: Some(DataRecipient::Transport),
4036 to: Some(DataRecipient::Dropped),
4037 ..Default::default()
4038 },
4039 );
4040
4041 q.add_event_data_with_instant(ev_data, now)
4042 .ok();
4043 });
4044
4045 continue;
4046 },
4047 };
4048
4049 let was_flushable = stream.is_flushable();
4050
4051 let empty_fin = length == 0 && fin;
4052
4053 stream.send.retransmit(offset, length);
4054
4055 // If the stream is now flushable push it to the
4056 // flushable queue, but only if it wasn't already
4057 // queued.
4058 //
4059 // Consider the stream flushable also when we are
4060 // sending a zero-length frame that has the fin flag
4061 // set.
4062 if (stream.is_flushable() || empty_fin) && !was_flushable
4063 {
4064 let priority_key = Arc::clone(&stream.priority_key);
4065 self.streams.insert_flushable(&priority_key);
4066 }
4067
4068 self.stream_retrans_bytes += length as u64;
4069 p.stream_retrans_bytes += length as u64;
4070
4071 self.retrans_count += 1;
4072 p.retrans_count += 1;
4073 },
4074
4075 frame::Frame::ACK { .. } => {
4076 pkt_space.ack_elicited = true;
4077 },
4078
4079 frame::Frame::ResetStream {
4080 stream_id,
4081 error_code,
4082 final_size,
4083 } => {
4084 self.streams
4085 .insert_reset(stream_id, error_code, final_size);
4086 },
4087
4088 frame::Frame::StopSending {
4089 stream_id,
4090 error_code,
4091 } =>
4092 // We only need to retransmit the STOP_SENDING frame if
4093 // the stream is still active and not FIN'd. Even if the
4094 // packet was lost, if the application has the final
4095 // size at this point there is no need to retransmit.
4096 if let Some(stream) = self.streams.get(stream_id) {
4097 if !stream.recv.is_fin() {
4098 self.streams
4099 .insert_stopped(stream_id, error_code);
4100 }
4101 },
4102
4103 // Retransmit HANDSHAKE_DONE only if it hasn't been acked at
4104 // least once already.
4105 frame::Frame::HandshakeDone if !self.handshake_done_acked => {
4106 self.handshake_done_sent = false;
4107 },
4108
4109 frame::Frame::MaxStreamData { stream_id, .. } => {
4110 if self.streams.get(stream_id).is_some() {
4111 self.streams.insert_almost_full(stream_id);
4112 }
4113 },
4114
4115 frame::Frame::MaxData { .. } => {
4116 self.should_send_max_data = true;
4117 },
4118
4119 frame::Frame::MaxStreamsUni { .. } => {
4120 self.should_send_max_streams_uni = true;
4121 },
4122
4123 frame::Frame::MaxStreamsBidi { .. } => {
4124 self.should_send_max_streams_bidi = true;
4125 },
4126
4127 frame::Frame::NewConnectionId { seq_num, .. } => {
4128 self.ids.mark_advertise_new_scid_seq(seq_num, true);
4129 },
4130
4131 frame::Frame::RetireConnectionId { seq_num } => {
4132 self.ids.mark_retire_dcid_seq(seq_num, true)?;
4133 },
4134
4135 frame::Frame::Ping {
4136 mtu_probe: Some(failed_probe),
4137 } =>
4138 if let Some(pmtud) = p.pmtud.as_mut() {
4139 trace!("pmtud probe dropped: {failed_probe}");
4140 pmtud.failed_probe(failed_probe);
4141 },
4142
4143 _ => (),
4144 }
4145 }
4146 }
4147 self.check_tx_buffered_invariant();
4148
4149 let is_app_limited = self.delivery_rate_check_if_app_limited();
4150 let n_paths = self.paths.len();
4151 let path = self.paths.get_mut(send_pid)?;
4152 let flow_control = &mut self.flow_control;
4153 let pkt_space = &mut self.pkt_num_spaces[epoch];
4154 let crypto_ctx = &mut self.crypto_ctx[epoch];
4155 let pkt_num_manager = &mut self.pkt_num_manager;
4156
4157 let mut left = if let Some(pmtud) = path.pmtud.as_mut() {
4158 // Limit output buffer size by estimated path MTU.
4159 cmp::min(pmtud.get_current_mtu(), b.cap())
4160 } else {
4161 b.cap()
4162 };
4163
4164 if pkt_num_manager.should_skip_pn(self.handshake_completed) {
4165 pkt_num_manager.set_skip_pn(Some(self.next_pkt_num));
4166 self.next_pkt_num += 1;
4167 };
4168 let pn = self.next_pkt_num;
4169
4170 let largest_acked_pkt =
4171 path.recovery.get_largest_acked_on_epoch(epoch).unwrap_or(0);
4172 let pn_len = packet::pkt_num_len(pn, largest_acked_pkt);
4173
4174 // The AEAD overhead at the current encryption level.
4175 let crypto_overhead = crypto_ctx.crypto_overhead().ok_or(Error::Done)?;
4176
4177 let dcid_seq = path.active_dcid_seq.ok_or(Error::OutOfIdentifiers)?;
4178
4179 let dcid =
4180 ConnectionId::from_ref(self.ids.get_dcid(dcid_seq)?.cid.as_ref());
4181
4182 let scid = if let Some(scid_seq) = path.active_scid_seq {
4183 ConnectionId::from_ref(self.ids.get_scid(scid_seq)?.cid.as_ref())
4184 } else if pkt_type == Type::Short {
4185 ConnectionId::default()
4186 } else {
4187 return Err(Error::InvalidState);
4188 };
4189
4190 let hdr = Header {
4191 ty: pkt_type,
4192
4193 version: self.version,
4194
4195 dcid,
4196 scid,
4197
4198 pkt_num: 0,
4199 pkt_num_len: pn_len,
4200
4201 // Only clone token for Initial packets, as other packets don't have
4202 // this field (Retry doesn't count, as it's not encoded as part of
4203 // this code path).
4204 token: if pkt_type == Type::Initial {
4205 self.token.clone()
4206 } else {
4207 None
4208 },
4209
4210 versions: None,
4211 key_phase: self.key_phase,
4212 };
4213
4214 hdr.to_bytes(&mut b)?;
4215
4216 let hdr_trace = if log::max_level() == log::LevelFilter::Trace {
4217 Some(format!("{hdr:?}"))
4218 } else {
4219 None
4220 };
4221
4222 let hdr_ty = hdr.ty;
4223
4224 #[cfg(feature = "qlog")]
4225 let qlog_pkt_hdr = self.qlog.streamer.as_ref().map(|_q| {
4226 qlog::events::quic::PacketHeader::with_type(
4227 hdr.ty.to_qlog(),
4228 Some(pn),
4229 Some(hdr.version),
4230 Some(&hdr.scid),
4231 Some(&hdr.dcid),
4232 )
4233 });
4234
4235 // Calculate the space required for the packet, including the header
4236 // the payload length, the packet number and the AEAD overhead.
4237 let mut overhead = b.off() + pn_len + crypto_overhead;
4238
4239 // We assume that the payload length, which is only present in long
4240 // header packets, can always be encoded with a 2-byte varint.
4241 if pkt_type != Type::Short {
4242 overhead += PAYLOAD_LENGTH_LEN;
4243 }
4244
4245 // Make sure we have enough space left for the packet overhead.
4246 match left.checked_sub(overhead) {
4247 Some(v) => left = v,
4248
4249 None => {
4250 // We can't send more because there isn't enough space available
4251 // in the output buffer.
4252 //
4253 // This usually happens when we try to send a new packet but
4254 // failed because cwnd is almost full. In such case app_limited
4255 // is set to false here to make cwnd grow when ACK is received.
4256 path.recovery.update_app_limited(false);
4257 return Err(Error::Done);
4258 },
4259 }
4260
4261 // Make sure there is enough space for the minimum payload length.
4262 if left < PAYLOAD_MIN_LEN {
4263 path.recovery.update_app_limited(false);
4264 return Err(Error::Done);
4265 }
4266
4267 let mut frames: SmallVec<[frame::Frame; 1]> = SmallVec::new();
4268
4269 let mut ack_eliciting = false;
4270 let mut in_flight = false;
4271 let mut is_pmtud_probe = false;
4272 let mut has_data = false;
4273
4274 // Whether or not we should explicitly elicit an ACK via PING frame if we
4275 // implicitly elicit one otherwise.
4276 let ack_elicit_required = path.recovery.should_elicit_ack(epoch);
4277
4278 let header_offset = b.off();
4279
4280 // Reserve space for payload length in advance. Since we don't yet know
4281 // what the final length will be, we reserve 2 bytes in all cases.
4282 //
4283 // Only long header packets have an explicit length field.
4284 if pkt_type != Type::Short {
4285 b.skip(PAYLOAD_LENGTH_LEN)?;
4286 }
4287
4288 packet::encode_pkt_num(pn, pn_len, &mut b)?;
4289
4290 let payload_offset = b.off();
4291
4292 let cwnd_available =
4293 path.recovery.cwnd_available().saturating_sub(overhead);
4294
4295 let left_before_packing_ack_frame = left;
4296
4297 // Create ACK frame.
4298 //
4299 // When we need to explicitly elicit an ACK via PING later, go ahead and
4300 // generate an ACK (if there's anything to ACK) since we're going to
4301 // send a packet with PING anyways, even if we haven't received anything
4302 // ACK eliciting.
4303 if pkt_space.recv_pkt_need_ack.len() > 0 &&
4304 (pkt_space.ack_elicited || ack_elicit_required) &&
4305 (!is_closing ||
4306 (pkt_type == Type::Handshake &&
4307 self.local_error
4308 .as_ref()
4309 .is_some_and(|le| le.is_app))) &&
4310 path.active()
4311 {
4312 #[cfg(not(feature = "fuzzing"))]
4313 let ack_delay = pkt_space.largest_rx_pkt_time.elapsed();
4314
4315 #[cfg(not(feature = "fuzzing"))]
4316 let ack_delay = ack_delay.as_micros() as u64 /
4317 2_u64
4318 .pow(self.local_transport_params.ack_delay_exponent as u32);
4319
4320 // pseudo-random reproducible ack delays when fuzzing
4321 #[cfg(feature = "fuzzing")]
4322 let ack_delay = rand::rand_u8() as u64 + 1;
4323
4324 let frame = frame::Frame::ACK {
4325 ack_delay,
4326 ranges: pkt_space.recv_pkt_need_ack.clone(),
4327 ecn_counts: None, // sending ECN is not supported at this time
4328 };
4329
4330 // When a PING frame needs to be sent, avoid sending the ACK if
4331 // there is not enough cwnd available for both (note that PING
4332 // frames are always 1 byte, so we just need to check that the
4333 // ACK's length is lower than cwnd).
4334 if pkt_space.ack_elicited || frame.wire_len() < cwnd_available {
4335 // ACK-only packets are not congestion controlled so ACKs must
4336 // be bundled considering the buffer capacity only, and not the
4337 // available cwnd.
4338 if push_frame_to_pkt!(b, frames, frame, left) {
4339 pkt_space.ack_elicited = false;
4340 }
4341 }
4342 }
4343
4344 // Limit output packet size by congestion window size.
4345 left = cmp::min(
4346 left,
4347 // Bytes consumed by ACK frames.
4348 cwnd_available.saturating_sub(left_before_packing_ack_frame - left),
4349 );
4350
4351 let mut challenge_data = None;
4352
4353 if pkt_type == Type::Short {
4354 // Create PMTUD probe.
4355 //
4356 // In order to send a PMTUD probe the current `left` value, which was
4357 // already limited by the current PMTU measure, needs to be ignored,
4358 // but the outgoing packet still needs to be limited by
4359 // the output buffer size, as well as the congestion
4360 // window.
4361 //
4362 // In addition, the PMTUD probe is only generated when the handshake
4363 // is confirmed, to avoid interfering with the handshake
4364 // (e.g. due to the anti-amplification limits).
4365 if let Ok(active_path) = self.paths.get_active_mut() {
4366 let should_probe_pmtu = active_path.should_send_pmtu_probe(
4367 self.handshake_confirmed,
4368 self.handshake_completed,
4369 out_len,
4370 is_closing,
4371 frames.is_empty(),
4372 );
4373
4374 if should_probe_pmtu {
4375 if let Some(pmtud) = active_path.pmtud.as_mut() {
4376 let probe_size = pmtud.get_probe_size();
4377 trace!(
4378 "{} sending pmtud probe pmtu_probe={} estimated_pmtu={}",
4379 self.trace_id,
4380 probe_size,
4381 pmtud.get_current_mtu(),
4382 );
4383
4384 left = probe_size;
4385
4386 match left.checked_sub(overhead) {
4387 Some(v) => left = v,
4388
4389 None => {
4390 // We can't send more because there isn't enough
4391 // space available in the output buffer.
4392 //
4393 // This usually happens when we try to send a new
4394 // packet but failed because cwnd is almost full.
4395 //
4396 // In such case app_limited is set to false here
4397 // to make cwnd grow when ACK is received.
4398 active_path.recovery.update_app_limited(false);
4399 return Err(Error::Done);
4400 },
4401 }
4402
4403 let frame = frame::Frame::Padding {
4404 len: probe_size - overhead - 1,
4405 };
4406
4407 if push_frame_to_pkt!(b, frames, frame, left) {
4408 let frame = frame::Frame::Ping {
4409 mtu_probe: Some(probe_size),
4410 };
4411
4412 if push_frame_to_pkt!(b, frames, frame, left) {
4413 ack_eliciting = true;
4414 in_flight = true;
4415 }
4416 }
4417
4418 // Reset probe flag after sending to prevent duplicate
4419 // probes in a single flight.
4420 pmtud.set_in_flight(true);
4421 is_pmtud_probe = true;
4422 }
4423 }
4424 }
4425
4426 let path = self.paths.get_mut(send_pid)?;
4427 // Create PATH_RESPONSE frame if needed.
4428 // We do not try to ensure that these are really sent.
4429 while let Some(challenge) = path.pop_received_challenge() {
4430 let frame = frame::Frame::PathResponse { data: challenge };
4431
4432 if push_frame_to_pkt!(b, frames, frame, left) {
4433 ack_eliciting = true;
4434 in_flight = true;
4435 } else {
4436 // If there are other pending PATH_RESPONSE, don't lose them
4437 // now.
4438 break;
4439 }
4440 }
4441
4442 // Create PATH_CHALLENGE frame if needed.
4443 if path.validation_requested() {
4444 // TODO: ensure that data is unique over paths.
4445 let data = rand::rand_u64().to_be_bytes();
4446
4447 let frame = frame::Frame::PathChallenge { data };
4448
4449 if push_frame_to_pkt!(b, frames, frame, left) {
4450 // Let's notify the path once we know the packet size.
4451 challenge_data = Some(data);
4452
4453 ack_eliciting = true;
4454 in_flight = true;
4455 }
4456 }
4457
4458 if let Some(key_update) = crypto_ctx.key_update.as_mut() {
4459 key_update.update_acked = true;
4460 }
4461 }
4462
4463 let path = self.paths.get_mut(send_pid)?;
4464
4465 if pkt_type == Type::Short && !is_closing {
4466 // Create NEW_CONNECTION_ID frames as needed.
4467 while let Some(seq_num) = self.ids.next_advertise_new_scid_seq() {
4468 let frame = self.ids.get_new_connection_id_frame_for(seq_num)?;
4469
4470 if push_frame_to_pkt!(b, frames, frame, left) {
4471 self.ids.mark_advertise_new_scid_seq(seq_num, false);
4472
4473 ack_eliciting = true;
4474 in_flight = true;
4475 } else {
4476 break;
4477 }
4478 }
4479 }
4480
4481 if pkt_type == Type::Short && !is_closing && path.active() {
4482 // Create HANDSHAKE_DONE frame.
4483 // self.should_send_handshake_done() but without the need to borrow
4484 if self.handshake_completed &&
4485 !self.handshake_done_sent &&
4486 self.is_server
4487 {
4488 let frame = frame::Frame::HandshakeDone;
4489
4490 if push_frame_to_pkt!(b, frames, frame, left) {
4491 self.handshake_done_sent = true;
4492
4493 ack_eliciting = true;
4494 in_flight = true;
4495 }
4496 }
4497
4498 // Create MAX_STREAMS_BIDI frame.
4499 if self.streams.should_update_max_streams_bidi() ||
4500 self.should_send_max_streams_bidi
4501 {
4502 let frame = frame::Frame::MaxStreamsBidi {
4503 max: self.streams.max_streams_bidi_next(),
4504 };
4505
4506 if push_frame_to_pkt!(b, frames, frame, left) {
4507 self.streams.update_max_streams_bidi();
4508 self.should_send_max_streams_bidi = false;
4509
4510 ack_eliciting = true;
4511 in_flight = true;
4512 }
4513 }
4514
4515 // Create MAX_STREAMS_UNI frame.
4516 if self.streams.should_update_max_streams_uni() ||
4517 self.should_send_max_streams_uni
4518 {
4519 let frame = frame::Frame::MaxStreamsUni {
4520 max: self.streams.max_streams_uni_next(),
4521 };
4522
4523 if push_frame_to_pkt!(b, frames, frame, left) {
4524 self.streams.update_max_streams_uni();
4525 self.should_send_max_streams_uni = false;
4526
4527 ack_eliciting = true;
4528 in_flight = true;
4529 }
4530 }
4531
4532 // Create DATA_BLOCKED frame.
4533 if let Some(limit) = self.blocked_limit {
4534 let frame = frame::Frame::DataBlocked { limit };
4535
4536 if push_frame_to_pkt!(b, frames, frame, left) {
4537 self.blocked_limit = None;
4538 self.data_blocked_sent_count =
4539 self.data_blocked_sent_count.saturating_add(1);
4540
4541 ack_eliciting = true;
4542 in_flight = true;
4543 }
4544 }
4545
4546 // Create MAX_STREAM_DATA frames as needed.
4547 for stream_id in self.streams.almost_full() {
4548 let stream = match self.streams.get_mut(stream_id) {
4549 Some(v) => v,
4550
4551 None => {
4552 // The stream doesn't exist anymore, so remove it from
4553 // the almost full set.
4554 self.streams.remove_almost_full(stream_id);
4555 continue;
4556 },
4557 };
4558
4559 // Autotune the stream window size, but only if this is not a
4560 // retransmission (on a retransmit the stream will be in
4561 // `self.streams.almost_full()` but it's `almost_full()`
4562 // method returns false.
4563 if stream.recv.almost_full() {
4564 stream.recv.autotune_window(now, path.recovery.rtt());
4565 }
4566
4567 let frame = frame::Frame::MaxStreamData {
4568 stream_id,
4569 max: stream.recv.max_data_next(),
4570 };
4571
4572 if push_frame_to_pkt!(b, frames, frame, left) {
4573 let recv_win = stream.recv.window();
4574
4575 stream.recv.update_max_data(now);
4576
4577 self.streams.remove_almost_full(stream_id);
4578
4579 ack_eliciting = true;
4580 in_flight = true;
4581
4582 // Make sure the connection window always has some
4583 // room compared to the stream window.
4584 flow_control.ensure_window_lower_bound(
4585 (recv_win as f64 * CONNECTION_WINDOW_FACTOR) as u64,
4586 );
4587 }
4588 }
4589
4590 // Create MAX_DATA frame as needed.
4591 if flow_control.should_update_max_data() &&
4592 flow_control.max_data() < flow_control.max_data_next()
4593 {
4594 // Autotune the connection window size. We only tune the window
4595 // if we are sending an "organic" update, not on retransmits.
4596 flow_control.autotune_window(now, path.recovery.rtt());
4597 self.should_send_max_data = true;
4598 }
4599
4600 if self.should_send_max_data {
4601 let frame = frame::Frame::MaxData {
4602 max: flow_control.max_data_next(),
4603 };
4604
4605 if push_frame_to_pkt!(b, frames, frame, left) {
4606 self.should_send_max_data = false;
4607
4608 // Commits the new max_rx_data limit.
4609 flow_control.update_max_data(now);
4610
4611 ack_eliciting = true;
4612 in_flight = true;
4613 }
4614 }
4615
4616 // Create STOP_SENDING frames as needed.
4617 for (stream_id, error_code) in self
4618 .streams
4619 .stopped()
4620 .map(|(&k, &v)| (k, v))
4621 .collect::<Vec<(u64, u64)>>()
4622 {
4623 let frame = frame::Frame::StopSending {
4624 stream_id,
4625 error_code,
4626 };
4627
4628 if push_frame_to_pkt!(b, frames, frame, left) {
4629 self.streams.remove_stopped(stream_id);
4630
4631 ack_eliciting = true;
4632 in_flight = true;
4633 }
4634 }
4635
4636 // Create RESET_STREAM frames as needed.
4637 for (stream_id, (error_code, final_size)) in self
4638 .streams
4639 .reset()
4640 .map(|(&k, &v)| (k, v))
4641 .collect::<Vec<(u64, (u64, u64))>>()
4642 {
4643 let frame = frame::Frame::ResetStream {
4644 stream_id,
4645 error_code,
4646 final_size,
4647 };
4648
4649 if push_frame_to_pkt!(b, frames, frame, left) {
4650 self.streams.remove_reset(stream_id);
4651
4652 ack_eliciting = true;
4653 in_flight = true;
4654 }
4655 }
4656
4657 // Create STREAM_DATA_BLOCKED frames as needed.
4658 for (stream_id, limit) in self
4659 .streams
4660 .blocked()
4661 .map(|(&k, &v)| (k, v))
4662 .collect::<Vec<(u64, u64)>>()
4663 {
4664 let frame = frame::Frame::StreamDataBlocked { stream_id, limit };
4665
4666 if push_frame_to_pkt!(b, frames, frame, left) {
4667 self.streams.remove_blocked(stream_id);
4668 self.stream_data_blocked_sent_count =
4669 self.stream_data_blocked_sent_count.saturating_add(1);
4670
4671 ack_eliciting = true;
4672 in_flight = true;
4673 }
4674 }
4675
4676 // Create RETIRE_CONNECTION_ID frames as needed.
4677 let retire_dcid_seqs = self.ids.retire_dcid_seqs();
4678
4679 for seq_num in retire_dcid_seqs {
4680 // The sequence number specified in a RETIRE_CONNECTION_ID frame
4681 // MUST NOT refer to the Destination Connection ID field of the
4682 // packet in which the frame is contained.
4683 let dcid_seq = path.active_dcid_seq.ok_or(Error::InvalidState)?;
4684
4685 if seq_num == dcid_seq {
4686 continue;
4687 }
4688
4689 let frame = frame::Frame::RetireConnectionId { seq_num };
4690
4691 if push_frame_to_pkt!(b, frames, frame, left) {
4692 self.ids.mark_retire_dcid_seq(seq_num, false)?;
4693
4694 ack_eliciting = true;
4695 in_flight = true;
4696 } else {
4697 break;
4698 }
4699 }
4700 }
4701
4702 // Create CONNECTION_CLOSE frame. Try to send this only on the active
4703 // path, unless it is the last one available.
4704 if path.active() || n_paths == 1 {
4705 if let Some(conn_err) = self.local_error.as_ref() {
4706 if conn_err.is_app {
4707 // Create ApplicationClose frame.
4708 if pkt_type == Type::Short {
4709 let frame = frame::Frame::ApplicationClose {
4710 error_code: conn_err.error_code,
4711 reason: conn_err.reason.clone(),
4712 };
4713
4714 if push_frame_to_pkt!(b, frames, frame, left) {
4715 let pto = path.recovery.pto();
4716 self.draining_timer = Some(now + (pto * 3));
4717
4718 ack_eliciting = true;
4719 in_flight = true;
4720 }
4721 }
4722 } else {
4723 // Create ConnectionClose frame.
4724 let frame = frame::Frame::ConnectionClose {
4725 error_code: conn_err.error_code,
4726 frame_type: 0,
4727 reason: conn_err.reason.clone(),
4728 };
4729
4730 if push_frame_to_pkt!(b, frames, frame, left) {
4731 let pto = path.recovery.pto();
4732 self.draining_timer = Some(now + (pto * 3));
4733
4734 ack_eliciting = true;
4735 in_flight = true;
4736 }
4737 }
4738 }
4739 }
4740
4741 // Create CRYPTO frame.
4742 if crypto_ctx.crypto_stream.is_flushable() &&
4743 left > frame::MAX_CRYPTO_OVERHEAD &&
4744 !is_closing &&
4745 path.active()
4746 {
4747 let crypto_off = crypto_ctx.crypto_stream.send.off_front();
4748
4749 // Encode the frame.
4750 //
4751 // Instead of creating a `frame::Frame` object, encode the frame
4752 // directly into the packet buffer.
4753 //
4754 // First we reserve some space in the output buffer for writing the
4755 // frame header (we assume the length field is always a 2-byte
4756 // varint as we don't know the value yet).
4757 //
4758 // Then we emit the data from the crypto stream's send buffer.
4759 //
4760 // Finally we go back and encode the frame header with the now
4761 // available information.
4762 let hdr_off = b.off();
4763 let hdr_len = 1 + // frame type
4764 octets::varint_len(crypto_off) + // offset
4765 2; // length, always encode as 2-byte varint
4766
4767 if let Some(max_len) = left.checked_sub(hdr_len) {
4768 let (mut crypto_hdr, mut crypto_payload) =
4769 b.split_at(hdr_off + hdr_len)?;
4770
4771 // Write stream data into the packet buffer.
4772 let (len, _) = crypto_ctx
4773 .crypto_stream
4774 .send
4775 .emit(&mut crypto_payload.as_mut()[..max_len])?;
4776
4777 // Encode the frame's header.
4778 //
4779 // Due to how `OctetsMut::split_at()` works, `crypto_hdr` starts
4780 // from the initial offset of `b` (rather than the current
4781 // offset), so it needs to be advanced to the
4782 // initial frame offset.
4783 crypto_hdr.skip(hdr_off)?;
4784
4785 frame::encode_crypto_header(
4786 crypto_off,
4787 len as u64,
4788 &mut crypto_hdr,
4789 )?;
4790
4791 // Advance the packet buffer's offset.
4792 b.skip(hdr_len + len)?;
4793
4794 let frame = frame::Frame::CryptoHeader {
4795 offset: crypto_off,
4796 length: len,
4797 };
4798
4799 if push_frame_to_pkt!(b, frames, frame, left) {
4800 ack_eliciting = true;
4801 in_flight = true;
4802 has_data = true;
4803 }
4804 }
4805 }
4806
4807 // The preference of data-bearing frame to include in a packet
4808 // is managed by `self.emit_dgram`. However, whether any frames
4809 // can be sent depends on the state of their buffers. In the case
4810 // where one type is preferred but its buffer is empty, fall back
4811 // to the other type in order not to waste this function call.
4812 let mut dgram_emitted = false;
4813 let dgrams_to_emit = max_dgram_len.is_some();
4814 let stream_to_emit = self.streams.has_flushable();
4815
4816 let mut do_dgram = self.emit_dgram && dgrams_to_emit;
4817 let do_stream = !self.emit_dgram && stream_to_emit;
4818
4819 if !do_stream && dgrams_to_emit {
4820 do_dgram = true;
4821 }
4822
4823 // Create DATAGRAM frame.
4824 if (pkt_type == Type::Short || pkt_type == Type::ZeroRTT) &&
4825 left > frame::MAX_DGRAM_OVERHEAD &&
4826 !is_closing &&
4827 path.active() &&
4828 do_dgram
4829 {
4830 if let Some(max_dgram_payload) = max_dgram_len {
4831 while let Some(len) = self.dgram_send_queue.peek_front_len() {
4832 let hdr_off = b.off();
4833 let hdr_len = 1 + // frame type
4834 2; // length, always encode as 2-byte varint
4835
4836 if (hdr_len + len) <= left {
4837 // Front of the queue fits this packet, send it.
4838 match self.dgram_send_queue.pop() {
4839 Some(data) => {
4840 // Encode the frame.
4841 //
4842 // Instead of creating a `frame::Frame` object,
4843 // encode the frame directly into the packet
4844 // buffer.
4845 //
4846 // First we reserve some space in the output
4847 // buffer for writing the frame header (we
4848 // assume the length field is always a 2-byte
4849 // varint as we don't know the value yet).
4850 //
4851 // Then we emit the data from the DATAGRAM's
4852 // buffer.
4853 //
4854 // Finally we go back and encode the frame
4855 // header with the now available information.
4856 let (mut dgram_hdr, mut dgram_payload) =
4857 b.split_at(hdr_off + hdr_len)?;
4858
4859 dgram_payload.as_mut()[..len]
4860 .copy_from_slice(&data);
4861
4862 // Encode the frame's header.
4863 //
4864 // Due to how `OctetsMut::split_at()` works,
4865 // `dgram_hdr` starts from the initial offset
4866 // of `b` (rather than the current offset), so
4867 // it needs to be advanced to the initial frame
4868 // offset.
4869 dgram_hdr.skip(hdr_off)?;
4870
4871 frame::encode_dgram_header(
4872 len as u64,
4873 &mut dgram_hdr,
4874 )?;
4875
4876 // Advance the packet buffer's offset.
4877 b.skip(hdr_len + len)?;
4878
4879 let frame =
4880 frame::Frame::DatagramHeader { length: len };
4881
4882 if push_frame_to_pkt!(b, frames, frame, left) {
4883 ack_eliciting = true;
4884 in_flight = true;
4885 dgram_emitted = true;
4886 self.dgram_sent_count =
4887 self.dgram_sent_count.saturating_add(1);
4888 path.dgram_sent_count =
4889 path.dgram_sent_count.saturating_add(1);
4890 }
4891 },
4892
4893 None => continue,
4894 };
4895 } else if len > max_dgram_payload {
4896 // This dgram frame will never fit. Let's purge it.
4897 self.dgram_send_queue.pop();
4898 } else {
4899 break;
4900 }
4901 }
4902 }
4903 }
4904
4905 // Create a single STREAM frame for the first stream that is flushable.
4906 if (pkt_type == Type::Short || pkt_type == Type::ZeroRTT) &&
4907 left > frame::MAX_STREAM_OVERHEAD &&
4908 !is_closing &&
4909 path.active() &&
4910 !dgram_emitted
4911 {
4912 while let Some(priority_key) = self.streams.peek_flushable() {
4913 let stream_id = priority_key.id;
4914 let stream = match self.streams.get_mut(stream_id) {
4915 // Avoid sending frames for streams that were already stopped.
4916 //
4917 // This might happen if stream data was buffered but not yet
4918 // flushed on the wire when a STOP_SENDING frame is received.
4919 Some(v) if !v.send.is_stopped() => v,
4920 _ => {
4921 self.streams.remove_flushable(&priority_key);
4922 continue;
4923 },
4924 };
4925
4926 let stream_off = stream.send.off_front();
4927
4928 // Encode the frame.
4929 //
4930 // Instead of creating a `frame::Frame` object, encode the frame
4931 // directly into the packet buffer.
4932 //
4933 // First we reserve some space in the output buffer for writing
4934 // the frame header (we assume the length field is always a
4935 // 2-byte varint as we don't know the value yet).
4936 //
4937 // Then we emit the data from the stream's send buffer.
4938 //
4939 // Finally we go back and encode the frame header with the now
4940 // available information.
4941 let hdr_off = b.off();
4942 let hdr_len = 1 + // frame type
4943 octets::varint_len(stream_id) + // stream_id
4944 octets::varint_len(stream_off) + // offset
4945 2; // length, always encode as 2-byte varint
4946
4947 let max_len = match left.checked_sub(hdr_len) {
4948 Some(v) => v,
4949 None => {
4950 let priority_key = Arc::clone(&stream.priority_key);
4951 self.streams.remove_flushable(&priority_key);
4952
4953 continue;
4954 },
4955 };
4956
4957 let (mut stream_hdr, mut stream_payload) =
4958 b.split_at(hdr_off + hdr_len)?;
4959
4960 // Write stream data into the packet buffer.
4961 let (len, fin) =
4962 stream.send.emit(&mut stream_payload.as_mut()[..max_len])?;
4963
4964 // Encode the frame's header.
4965 //
4966 // Due to how `OctetsMut::split_at()` works, `stream_hdr` starts
4967 // from the initial offset of `b` (rather than the current
4968 // offset), so it needs to be advanced to the initial frame
4969 // offset.
4970 stream_hdr.skip(hdr_off)?;
4971
4972 frame::encode_stream_header(
4973 stream_id,
4974 stream_off,
4975 len as u64,
4976 fin,
4977 &mut stream_hdr,
4978 )?;
4979
4980 // Advance the packet buffer's offset.
4981 b.skip(hdr_len + len)?;
4982
4983 let frame = frame::Frame::StreamHeader {
4984 stream_id,
4985 offset: stream_off,
4986 length: len,
4987 fin,
4988 };
4989
4990 if push_frame_to_pkt!(b, frames, frame, left) {
4991 ack_eliciting = true;
4992 in_flight = true;
4993 has_data = true;
4994 }
4995
4996 let priority_key = Arc::clone(&stream.priority_key);
4997 // If the stream is no longer flushable, remove it from the queue
4998 if !stream.is_flushable() {
4999 self.streams.remove_flushable(&priority_key);
5000 } else if stream.incremental {
5001 // Shuffle the incremental stream to the back of the
5002 // queue.
5003 self.streams.remove_flushable(&priority_key);
5004 self.streams.insert_flushable(&priority_key);
5005 }
5006
5007 #[cfg(feature = "fuzzing")]
5008 // Coalesce STREAM frames when fuzzing.
5009 if left > frame::MAX_STREAM_OVERHEAD {
5010 continue;
5011 }
5012
5013 break;
5014 }
5015 }
5016
5017 // Alternate trying to send DATAGRAMs next time.
5018 self.emit_dgram = !dgram_emitted;
5019
5020 // If no other ack-eliciting frame is sent, include a PING frame
5021 // - if PTO probe needed; OR
5022 // - if we've sent too many non ack-eliciting packets without having
5023 // sent an ACK eliciting one; OR
5024 // - the application requested an ack-eliciting frame be sent.
5025 if (ack_elicit_required || path.needs_ack_eliciting) &&
5026 !ack_eliciting &&
5027 left >= 1 &&
5028 !is_closing
5029 {
5030 let frame = frame::Frame::Ping { mtu_probe: None };
5031
5032 if push_frame_to_pkt!(b, frames, frame, left) {
5033 ack_eliciting = true;
5034 in_flight = true;
5035 }
5036 }
5037
5038 if ack_eliciting && !is_pmtud_probe {
5039 path.needs_ack_eliciting = false;
5040 path.recovery.ping_sent(epoch);
5041 }
5042
5043 if !has_data &&
5044 !dgram_emitted &&
5045 cwnd_available > frame::MAX_STREAM_OVERHEAD
5046 {
5047 path.recovery.on_app_limited();
5048 }
5049
5050 if frames.is_empty() {
5051 // When we reach this point we are not able to write more, so set
5052 // app_limited to false.
5053 path.recovery.update_app_limited(false);
5054 return Err(Error::Done);
5055 }
5056
5057 // When coalescing a 1-RTT packet, we can't add padding in the UDP
5058 // datagram, so use PADDING frames instead.
5059 //
5060 // This is only needed if
5061 // 1) an Initial packet has already been written to the UDP datagram,
5062 // as Initial always requires padding.
5063 //
5064 // 2) this is a probing packet towards an unvalidated peer address.
5065 if (has_initial || !path.validated()) &&
5066 pkt_type == Type::Short &&
5067 left >= 1
5068 {
5069 let frame = frame::Frame::Padding { len: left };
5070
5071 if push_frame_to_pkt!(b, frames, frame, left) {
5072 in_flight = true;
5073 }
5074 }
5075
5076 // Pad payload so that it's always at least 4 bytes.
5077 if b.off() - payload_offset < PAYLOAD_MIN_LEN {
5078 let payload_len = b.off() - payload_offset;
5079
5080 let frame = frame::Frame::Padding {
5081 len: PAYLOAD_MIN_LEN - payload_len,
5082 };
5083
5084 #[allow(unused_assignments)]
5085 if push_frame_to_pkt!(b, frames, frame, left) {
5086 in_flight = true;
5087 }
5088 }
5089
5090 let payload_len = b.off() - payload_offset;
5091
5092 // Fill in payload length.
5093 if pkt_type != Type::Short {
5094 let len = pn_len + payload_len + crypto_overhead;
5095
5096 let (_, mut payload_with_len) = b.split_at(header_offset)?;
5097 payload_with_len
5098 .put_varint_with_len(len as u64, PAYLOAD_LENGTH_LEN)?;
5099 }
5100
5101 trace!(
5102 "{} tx pkt {} len={} pn={} {}",
5103 self.trace_id,
5104 hdr_trace.unwrap_or_default(),
5105 payload_len,
5106 pn,
5107 AddrTupleFmt(path.local_addr(), path.peer_addr())
5108 );
5109
5110 #[cfg(feature = "qlog")]
5111 let mut qlog_frames: SmallVec<
5112 [qlog::events::quic::QuicFrame; 1],
5113 > = SmallVec::with_capacity(frames.len());
5114
5115 for frame in &mut frames {
5116 trace!("{} tx frm {:?}", self.trace_id, frame);
5117
5118 qlog_with_type!(QLOG_PACKET_TX, self.qlog, _q, {
5119 qlog_frames.push(frame.to_qlog());
5120 });
5121 }
5122
5123 qlog_with_type!(QLOG_PACKET_TX, self.qlog, q, {
5124 if let Some(header) = qlog_pkt_hdr {
5125 // Qlog packet raw info described at
5126 // https://datatracker.ietf.org/doc/html/draft-ietf-quic-qlog-main-schema-00#section-5.1
5127 //
5128 // `length` includes packet headers and trailers (AEAD tag).
5129 let length = payload_len + payload_offset + crypto_overhead;
5130 let qlog_raw_info = RawInfo {
5131 length: Some(length as u64),
5132 payload_length: Some(payload_len as u64),
5133 data: None,
5134 };
5135
5136 let send_at_time =
5137 now.duration_since(q.start_time()).as_secs_f64() * 1000.0;
5138
5139 let ev_data =
5140 EventData::PacketSent(qlog::events::quic::PacketSent {
5141 header,
5142 frames: Some(qlog_frames),
5143 raw: Some(qlog_raw_info),
5144 send_at_time: Some(send_at_time),
5145 ..Default::default()
5146 });
5147
5148 q.add_event_data_with_instant(ev_data, now).ok();
5149 }
5150 });
5151
5152 let aead = match crypto_ctx.crypto_seal {
5153 Some(ref mut v) => v,
5154 None => return Err(Error::InvalidState),
5155 };
5156
5157 let written = packet::encrypt_pkt(
5158 &mut b,
5159 pn,
5160 pn_len,
5161 payload_len,
5162 payload_offset,
5163 None,
5164 aead,
5165 )?;
5166
5167 let sent_pkt_has_data = if path.recovery.gcongestion_enabled() {
5168 has_data || dgram_emitted
5169 } else {
5170 has_data
5171 };
5172
5173 let sent_pkt = recovery::Sent {
5174 pkt_num: pn,
5175 frames,
5176 time_sent: now,
5177 time_acked: None,
5178 time_lost: None,
5179 size: if ack_eliciting { written } else { 0 },
5180 ack_eliciting,
5181 in_flight,
5182 delivered: 0,
5183 delivered_time: now,
5184 first_sent_time: now,
5185 is_app_limited: false,
5186 tx_in_flight: 0,
5187 lost: 0,
5188 has_data: sent_pkt_has_data,
5189 is_pmtud_probe,
5190 };
5191
5192 if in_flight && is_app_limited {
5193 path.recovery.delivery_rate_update_app_limited(true);
5194 }
5195
5196 self.next_pkt_num += 1;
5197
5198 let handshake_status = recovery::HandshakeStatus {
5199 has_handshake_keys: self.crypto_ctx[packet::Epoch::Handshake]
5200 .has_keys(),
5201 peer_verified_address: self.peer_verified_initial_address,
5202 completed: self.handshake_completed,
5203 };
5204
5205 self.on_packet_sent(send_pid, sent_pkt, epoch, handshake_status, now)?;
5206
5207 let path = self.paths.get_mut(send_pid)?;
5208 qlog_with_type!(QLOG_METRICS, self.qlog, q, {
5209 path.recovery.maybe_qlog(q, now);
5210 });
5211
5212 // Record sent packet size if we probe the path.
5213 if let Some(data) = challenge_data {
5214 path.add_challenge_sent(data, written, now);
5215 }
5216
5217 self.sent_count += 1;
5218 self.sent_bytes += written as u64;
5219 path.sent_count += 1;
5220 path.sent_bytes += written as u64;
5221
5222 if self.dgram_send_queue.byte_size() > path.recovery.cwnd_available() {
5223 path.recovery.update_app_limited(false);
5224 }
5225
5226 path.max_send_bytes = path.max_send_bytes.saturating_sub(written);
5227
5228 // On the client, drop initial state after sending an Handshake packet.
5229 if !self.is_server && hdr_ty == Type::Handshake {
5230 self.drop_epoch_state(packet::Epoch::Initial, now);
5231 }
5232
5233 // (Re)start the idle timer if we are sending the first ack-eliciting
5234 // packet since last receiving a packet.
5235 if ack_eliciting && !self.ack_eliciting_sent {
5236 if let Some(idle_timeout) = self.idle_timeout() {
5237 self.idle_timer = Some(now + idle_timeout);
5238 }
5239 }
5240
5241 if ack_eliciting {
5242 self.ack_eliciting_sent = true;
5243 }
5244
5245 Ok((pkt_type, written))
5246 }
5247
5248 fn on_packet_sent(
5249 &mut self, send_pid: usize, sent_pkt: recovery::Sent,
5250 epoch: packet::Epoch, handshake_status: recovery::HandshakeStatus,
5251 now: Instant,
5252 ) -> Result<()> {
5253 let path = self.paths.get_mut(send_pid)?;
5254
5255 // It's fine to set the skip counter based on a non-active path's values.
5256 let cwnd = path.recovery.cwnd();
5257 let max_datagram_size = path.recovery.max_datagram_size();
5258 self.pkt_num_spaces[epoch].on_packet_sent(&sent_pkt);
5259 self.pkt_num_manager.on_packet_sent(
5260 cwnd,
5261 max_datagram_size,
5262 self.handshake_completed,
5263 );
5264
5265 path.recovery.on_packet_sent(
5266 sent_pkt,
5267 epoch,
5268 handshake_status,
5269 now,
5270 &self.trace_id,
5271 );
5272
5273 Ok(())
5274 }
5275
5276 /// Returns the desired send time for the next packet.
5277 #[inline]
5278 pub fn get_next_release_time(&self) -> Option<ReleaseDecision> {
5279 Some(
5280 self.paths
5281 .get_active()
5282 .ok()?
5283 .recovery
5284 .get_next_release_time(),
5285 )
5286 }
5287
5288 /// Returns whether gcongestion is enabled.
5289 #[inline]
5290 pub fn gcongestion_enabled(&self) -> Option<bool> {
5291 Some(self.paths.get_active().ok()?.recovery.gcongestion_enabled())
5292 }
5293
5294 /// Returns the maximum pacing into the future.
5295 ///
5296 /// Equals 1/8 of the smoothed RTT, but at least 1ms and not greater than
5297 /// 5ms.
5298 pub fn max_release_into_future(&self) -> Duration {
5299 self.paths
5300 .get_active()
5301 .map(|p| p.recovery.rtt().mul_f64(0.125))
5302 .unwrap_or(Duration::from_millis(1))
5303 .max(Duration::from_millis(1))
5304 .min(Duration::from_millis(5))
5305 }
5306
5307 /// Returns whether pacing is enabled.
5308 #[inline]
5309 pub fn pacing_enabled(&self) -> bool {
5310 self.recovery_config.pacing
5311 }
5312
5313 /// Returns the size of the send quantum, in bytes.
5314 ///
5315 /// This represents the maximum size of a packet burst as determined by the
5316 /// congestion control algorithm in use.
5317 ///
5318 /// Applications can, for example, use it in conjunction with segmentation
5319 /// offloading mechanisms as the maximum limit for outgoing aggregates of
5320 /// multiple packets.
5321 #[inline]
5322 pub fn send_quantum(&self) -> usize {
5323 match self.paths.get_active() {
5324 Ok(p) => p.recovery.send_quantum(),
5325 _ => 0,
5326 }
5327 }
5328
5329 /// Returns the size of the send quantum over the given 4-tuple, in bytes.
5330 ///
5331 /// This represents the maximum size of a packet burst as determined by the
5332 /// congestion control algorithm in use.
5333 ///
5334 /// Applications can, for example, use it in conjunction with segmentation
5335 /// offloading mechanisms as the maximum limit for outgoing aggregates of
5336 /// multiple packets.
5337 ///
5338 /// If the (`local_addr`, peer_addr`) 4-tuple relates to a non-existing
5339 /// path, this method returns 0.
5340 pub fn send_quantum_on_path(
5341 &self, local_addr: SocketAddr, peer_addr: SocketAddr,
5342 ) -> usize {
5343 self.paths
5344 .path_id_from_addrs(&(local_addr, peer_addr))
5345 .and_then(|pid| self.paths.get(pid).ok())
5346 .map(|path| path.recovery.send_quantum())
5347 .unwrap_or(0)
5348 }
5349
5350 /// Reads contiguous data from a stream into the provided slice.
5351 ///
5352 /// The slice must be sized by the caller and will be populated up to its
5353 /// capacity.
5354 ///
5355 /// On success the amount of bytes read and a flag indicating the fin state
5356 /// is returned as a tuple, or [`Done`] if there is no data to read.
5357 ///
5358 /// Reading data from a stream may trigger queueing of control messages
5359 /// (e.g. MAX_STREAM_DATA). [`send()`] should be called afterwards.
5360 ///
5361 /// [`Done`]: enum.Error.html#variant.Done
5362 /// [`send()`]: struct.Connection.html#method.send
5363 ///
5364 /// ## Examples:
5365 ///
5366 /// ```no_run
5367 /// # let mut buf = [0; 512];
5368 /// # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
5369 /// # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
5370 /// # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
5371 /// # let peer = "127.0.0.1:1234".parse().unwrap();
5372 /// # let local = socket.local_addr().unwrap();
5373 /// # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
5374 /// # let stream_id = 0;
5375 /// while let Ok((read, fin)) = conn.stream_recv(stream_id, &mut buf) {
5376 /// println!("Got {} bytes on stream {}", read, stream_id);
5377 /// }
5378 /// # Ok::<(), quiche::Error>(())
5379 /// ```
5380 pub fn stream_recv(
5381 &mut self, stream_id: u64, out: &mut [u8],
5382 ) -> Result<(usize, bool)> {
5383 self.do_stream_recv(stream_id, RecvAction::Emit { out })
5384 }
5385
5386 /// Discard contiguous data from a stream without copying.
5387 ///
5388 /// On success the amount of bytes discarded and a flag indicating the fin
5389 /// state is returned as a tuple, or [`Done`] if there is no data to
5390 /// discard.
5391 ///
5392 /// Discarding data from a stream may trigger queueing of control messages
5393 /// (e.g. MAX_STREAM_DATA). [`send()`] should be called afterwards.
5394 ///
5395 /// [`Done`]: enum.Error.html#variant.Done
5396 /// [`send()`]: struct.Connection.html#method.send
5397 ///
5398 /// ## Examples:
5399 ///
5400 /// ```no_run
5401 /// # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
5402 /// # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
5403 /// # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
5404 /// # let peer = "127.0.0.1:1234".parse().unwrap();
5405 /// # let local = socket.local_addr().unwrap();
5406 /// # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
5407 /// # let stream_id = 0;
5408 /// while let Ok((read, fin)) = conn.stream_discard(stream_id, 1) {
5409 /// println!("Discarded {} byte(s) on stream {}", read, stream_id);
5410 /// }
5411 /// # Ok::<(), quiche::Error>(())
5412 /// ```
5413 pub fn stream_discard(
5414 &mut self, stream_id: u64, len: usize,
5415 ) -> Result<(usize, bool)> {
5416 self.do_stream_recv(stream_id, RecvAction::Discard { len })
5417 }
5418
5419 // Reads or discards contiguous data from a stream.
5420 //
5421 // Passing an `action` of `StreamRecvAction::Emit` results in a read into
5422 // the provided slice. It must be sized by the caller and will be populated
5423 // up to its capacity.
5424 //
5425 // Passing an `action` of `StreamRecvAction::Discard` results in discard up
5426 // to the indicated length.
5427 //
5428 // On success the amount of bytes read or discarded, and a flag indicating
5429 // the fin state, is returned as a tuple, or [`Done`] if there is no data to
5430 // read or discard.
5431 //
5432 // Reading or discarding data from a stream may trigger queueing of control
5433 // messages (e.g. MAX_STREAM_DATA). [`send()`] should be called afterwards.
5434 //
5435 // [`Done`]: enum.Error.html#variant.Done
5436 // [`send()`]: struct.Connection.html#method.send
5437 fn do_stream_recv(
5438 &mut self, stream_id: u64, action: RecvAction,
5439 ) -> Result<(usize, bool)> {
5440 // We can't read on our own unidirectional streams.
5441 if !stream::is_bidi(stream_id) &&
5442 stream::is_local(stream_id, self.is_server)
5443 {
5444 return Err(Error::InvalidStreamState(stream_id));
5445 }
5446
5447 let stream = self
5448 .streams
5449 .get_mut(stream_id)
5450 .ok_or(Error::InvalidStreamState(stream_id))?;
5451
5452 if !stream.is_readable() {
5453 return Err(Error::Done);
5454 }
5455
5456 let local = stream.local;
5457 let priority_key = Arc::clone(&stream.priority_key);
5458
5459 #[cfg(feature = "qlog")]
5460 let offset = stream.recv.off_front();
5461
5462 #[cfg(feature = "qlog")]
5463 let to = match action {
5464 RecvAction::Emit { .. } => Some(DataRecipient::Application),
5465
5466 RecvAction::Discard { .. } => Some(DataRecipient::Dropped),
5467 };
5468
5469 let (read, fin) = match stream.recv.emit_or_discard(action) {
5470 Ok(v) => v,
5471
5472 Err(e) => {
5473 // Collect the stream if it is now complete. This can happen if
5474 // we got a `StreamReset` error which will now be propagated to
5475 // the application, so we don't need to keep the stream's state
5476 // anymore.
5477 if stream.is_complete() {
5478 self.streams.collect(stream_id, local);
5479 }
5480
5481 self.streams.remove_readable(&priority_key);
5482 return Err(e);
5483 },
5484 };
5485
5486 self.flow_control.add_consumed(read as u64);
5487
5488 let readable = stream.is_readable();
5489
5490 let complete = stream.is_complete();
5491
5492 if stream.recv.almost_full() {
5493 self.streams.insert_almost_full(stream_id);
5494 }
5495
5496 if !readable {
5497 self.streams.remove_readable(&priority_key);
5498 }
5499
5500 if complete {
5501 self.streams.collect(stream_id, local);
5502 }
5503
5504 qlog_with_type!(QLOG_DATA_MV, self.qlog, q, {
5505 let ev_data = EventData::DataMoved(qlog::events::quic::DataMoved {
5506 stream_id: Some(stream_id),
5507 offset: Some(offset),
5508 length: Some(read as u64),
5509 from: Some(DataRecipient::Transport),
5510 to,
5511 ..Default::default()
5512 });
5513
5514 let now = Instant::now();
5515 q.add_event_data_with_instant(ev_data, now).ok();
5516 });
5517
5518 if priority_key.incremental && readable {
5519 // Shuffle the incremental stream to the back of the queue.
5520 self.streams.remove_readable(&priority_key);
5521 self.streams.insert_readable(&priority_key);
5522 }
5523
5524 Ok((read, fin))
5525 }
5526
5527 /// Writes data to a stream.
5528 ///
5529 /// On success the number of bytes written is returned, or [`Done`] if no
5530 /// data was written (e.g. because the stream has no capacity).
5531 ///
5532 /// Applications can provide a 0-length buffer with the fin flag set to
5533 /// true. This will lead to a 0-length FIN STREAM frame being sent at the
5534 /// latest offset. The `Ok(0)` value is only returned when the application
5535 /// provided a 0-length buffer.
5536 ///
5537 /// In addition, if the peer has signalled that it doesn't want to receive
5538 /// any more data from this stream by sending the `STOP_SENDING` frame, the
5539 /// [`StreamStopped`] error will be returned instead of any data.
5540 ///
5541 /// Note that in order to avoid buffering an infinite amount of data in the
5542 /// stream's send buffer, streams are only allowed to buffer outgoing data
5543 /// up to the amount that the peer allows it to send (that is, up to the
5544 /// stream's outgoing flow control capacity).
5545 ///
5546 /// This means that the number of written bytes returned can be lower than
5547 /// the length of the input buffer when the stream doesn't have enough
5548 /// capacity for the operation to complete. The application should retry the
5549 /// operation once the stream is reported as writable again.
5550 ///
5551 /// Applications should call this method only after the handshake is
5552 /// completed (whenever [`is_established()`] returns `true`) or during
5553 /// early data if enabled (whenever [`is_in_early_data()`] returns `true`).
5554 ///
5555 /// [`Done`]: enum.Error.html#variant.Done
5556 /// [`StreamStopped`]: enum.Error.html#variant.StreamStopped
5557 /// [`is_established()`]: struct.Connection.html#method.is_established
5558 /// [`is_in_early_data()`]: struct.Connection.html#method.is_in_early_data
5559 ///
5560 /// ## Examples:
5561 ///
5562 /// ```no_run
5563 /// # let mut buf = [0; 512];
5564 /// # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
5565 /// # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
5566 /// # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
5567 /// # let peer = "127.0.0.1:1234".parse().unwrap();
5568 /// # let local = "127.0.0.1:4321".parse().unwrap();
5569 /// # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
5570 /// # let stream_id = 0;
5571 /// conn.stream_send(stream_id, b"hello", true)?;
5572 /// # Ok::<(), quiche::Error>(())
5573 /// ```
5574 pub fn stream_send(
5575 &mut self, stream_id: u64, buf: &[u8], fin: bool,
5576 ) -> Result<usize> {
5577 self.stream_do_send(
5578 stream_id,
5579 buf,
5580 fin,
5581 |stream: &mut stream::Stream<F>,
5582 buf: &[u8],
5583 cap: usize,
5584 fin: bool| {
5585 stream.send.write(&buf[..cap], fin).map(|v| (v, v))
5586 },
5587 )
5588 }
5589
5590 /// Writes data to a stream with zero copying, instead, it appends the
5591 /// provided buffer directly to the send queue if the capacity allows
5592 /// it.
5593 ///
5594 /// When a partial write happens (including when [`Error::Done`] is
5595 /// returned) the remaining (unwritten) buffer will also be returned.
5596 /// The application should retry the operation once the stream is
5597 /// reported as writable again.
5598 pub fn stream_send_zc(
5599 &mut self, stream_id: u64, buf: F::Buf, len: Option<usize>, fin: bool,
5600 ) -> Result<(usize, Option<F::Buf>)>
5601 where
5602 F::Buf: BufSplit,
5603 {
5604 self.stream_do_send(
5605 stream_id,
5606 buf,
5607 fin,
5608 |stream: &mut stream::Stream<F>,
5609 buf: F::Buf,
5610 cap: usize,
5611 fin: bool| {
5612 let len = len.unwrap_or(usize::MAX).min(cap);
5613 let (sent, remaining) = stream.send.append_buf(buf, len, fin)?;
5614 Ok((sent, (sent, remaining)))
5615 },
5616 )
5617 }
5618
5619 fn stream_do_send<B, R, SND>(
5620 &mut self, stream_id: u64, buf: B, fin: bool, write_fn: SND,
5621 ) -> Result<R>
5622 where
5623 B: AsRef<[u8]>,
5624 SND: FnOnce(&mut stream::Stream<F>, B, usize, bool) -> Result<(usize, R)>,
5625 {
5626 // We can't write on the peer's unidirectional streams.
5627 if !stream::is_bidi(stream_id) &&
5628 !stream::is_local(stream_id, self.is_server)
5629 {
5630 return Err(Error::InvalidStreamState(stream_id));
5631 }
5632
5633 let len = buf.as_ref().len();
5634
5635 // Mark the connection as blocked if the connection-level flow control
5636 // limit doesn't let us buffer all the data.
5637 //
5638 // Note that this is separate from "send capacity" as that also takes
5639 // congestion control into consideration.
5640 if self.max_tx_data - self.tx_data < len as u64 {
5641 self.blocked_limit = Some(self.max_tx_data);
5642 }
5643
5644 let cap = self.tx_cap;
5645
5646 // Get existing stream or create a new one.
5647 let stream = self.get_or_create_stream(stream_id, true)?;
5648
5649 #[cfg(feature = "qlog")]
5650 let offset = stream.send.off_back();
5651
5652 let was_writable = stream.is_writable();
5653
5654 let was_flushable = stream.is_flushable();
5655
5656 let is_complete = stream.is_complete();
5657 let is_readable = stream.is_readable();
5658
5659 let priority_key = Arc::clone(&stream.priority_key);
5660
5661 // Return early if the stream has been stopped, and collect its state
5662 // if complete.
5663 if let Err(Error::StreamStopped(e)) = stream.send.cap() {
5664 // Only collect the stream if it is complete and not readable.
5665 // If it is readable, it will get collected when stream_recv()
5666 // is used.
5667 //
5668 // The stream can't be writable if it has been stopped.
5669 if is_complete && !is_readable {
5670 let local = stream.local;
5671 self.streams.collect(stream_id, local);
5672 }
5673
5674 return Err(Error::StreamStopped(e));
5675 };
5676
5677 // Truncate the input buffer based on the connection's send capacity if
5678 // necessary.
5679 //
5680 // When the cap is zero, the method returns Ok(0) *only* when the passed
5681 // buffer is empty. We return Error::Done otherwise.
5682 if cap == 0 && len > 0 {
5683 if was_writable {
5684 // When `stream_writable_next()` returns a stream, the writable
5685 // mark is removed, but because the stream is blocked by the
5686 // connection-level send capacity it won't be marked as writable
5687 // again once the capacity increases.
5688 //
5689 // Since the stream is writable already, mark it here instead.
5690 self.streams.insert_writable(&priority_key);
5691 }
5692
5693 return Err(Error::Done);
5694 }
5695
5696 let (cap, fin, blocked_by_cap) = if cap < len {
5697 (cap, false, true)
5698 } else {
5699 (len, fin, false)
5700 };
5701
5702 let (sent, ret) = match write_fn(stream, buf, cap, fin) {
5703 Ok(v) => v,
5704
5705 Err(e) => {
5706 self.streams.remove_writable(&priority_key);
5707 return Err(e);
5708 },
5709 };
5710
5711 let incremental = stream.incremental;
5712 let priority_key = Arc::clone(&stream.priority_key);
5713
5714 let flushable = stream.is_flushable();
5715
5716 let writable = stream.is_writable();
5717
5718 let empty_fin = len == 0 && fin;
5719
5720 if sent < cap {
5721 let max_off = stream.send.max_off();
5722
5723 if stream.send.blocked_at() != Some(max_off) {
5724 stream.send.update_blocked_at(Some(max_off));
5725 self.streams.insert_blocked(stream_id, max_off);
5726 }
5727 } else {
5728 stream.send.update_blocked_at(None);
5729 self.streams.remove_blocked(stream_id);
5730 }
5731
5732 // If the stream is now flushable push it to the flushable queue, but
5733 // only if it wasn't already queued.
5734 //
5735 // Consider the stream flushable also when we are sending a zero-length
5736 // frame that has the fin flag set.
5737 if (flushable || empty_fin) && !was_flushable {
5738 self.streams.insert_flushable(&priority_key);
5739 }
5740
5741 if !writable {
5742 self.streams.remove_writable(&priority_key);
5743 } else if was_writable && blocked_by_cap {
5744 // When `stream_writable_next()` returns a stream, the writable
5745 // mark is removed, but because the stream is blocked by the
5746 // connection-level send capacity it won't be marked as writable
5747 // again once the capacity increases.
5748 //
5749 // Since the stream is writable already, mark it here instead.
5750 self.streams.insert_writable(&priority_key);
5751 }
5752
5753 self.tx_cap -= sent;
5754
5755 self.tx_data += sent as u64;
5756
5757 self.tx_buffered += sent;
5758 self.check_tx_buffered_invariant();
5759
5760 qlog_with_type!(QLOG_DATA_MV, self.qlog, q, {
5761 let ev_data = EventData::DataMoved(qlog::events::quic::DataMoved {
5762 stream_id: Some(stream_id),
5763 offset: Some(offset),
5764 length: Some(sent as u64),
5765 from: Some(DataRecipient::Application),
5766 to: Some(DataRecipient::Transport),
5767 ..Default::default()
5768 });
5769
5770 let now = Instant::now();
5771 q.add_event_data_with_instant(ev_data, now).ok();
5772 });
5773
5774 if sent == 0 && cap > 0 {
5775 return Err(Error::Done);
5776 }
5777
5778 if incremental && writable {
5779 // Shuffle the incremental stream to the back of the queue.
5780 self.streams.remove_writable(&priority_key);
5781 self.streams.insert_writable(&priority_key);
5782 }
5783
5784 Ok(ret)
5785 }
5786
5787 /// Sets the priority for a stream.
5788 ///
5789 /// A stream's priority determines the order in which stream data is sent
5790 /// on the wire (streams with lower priority are sent first). Streams are
5791 /// created with a default priority of `127`.
5792 ///
5793 /// The target stream is created if it did not exist before calling this
5794 /// method.
5795 pub fn stream_priority(
5796 &mut self, stream_id: u64, urgency: u8, incremental: bool,
5797 ) -> Result<()> {
5798 // Get existing stream or create a new one, but if the stream
5799 // has already been closed and collected, ignore the prioritization.
5800 let stream = match self.get_or_create_stream(stream_id, true) {
5801 Ok(v) => v,
5802
5803 Err(Error::Done) => return Ok(()),
5804
5805 Err(e) => return Err(e),
5806 };
5807
5808 if stream.urgency == urgency && stream.incremental == incremental {
5809 return Ok(());
5810 }
5811
5812 stream.urgency = urgency;
5813 stream.incremental = incremental;
5814
5815 let new_priority_key = Arc::new(StreamPriorityKey {
5816 urgency: stream.urgency,
5817 incremental: stream.incremental,
5818 id: stream_id,
5819 ..Default::default()
5820 });
5821
5822 let old_priority_key =
5823 std::mem::replace(&mut stream.priority_key, new_priority_key.clone());
5824
5825 self.streams
5826 .update_priority(&old_priority_key, &new_priority_key);
5827
5828 Ok(())
5829 }
5830
5831 /// Shuts down reading or writing from/to the specified stream.
5832 ///
5833 /// When the `direction` argument is set to [`Shutdown::Read`], outstanding
5834 /// data in the stream's receive buffer is dropped, and no additional data
5835 /// is added to it. Data received after calling this method is still
5836 /// validated and acked but not stored, and [`stream_recv()`] will not
5837 /// return it to the application. In addition, a `STOP_SENDING` frame will
5838 /// be sent to the peer to signal it to stop sending data.
5839 ///
5840 /// When the `direction` argument is set to [`Shutdown::Write`], outstanding
5841 /// data in the stream's send buffer is dropped, and no additional data is
5842 /// added to it. Data passed to [`stream_send()`] after calling this method
5843 /// will be ignored. In addition, a `RESET_STREAM` frame will be sent to the
5844 /// peer to signal the reset.
5845 ///
5846 /// Locally-initiated unidirectional streams can only be closed in the
5847 /// [`Shutdown::Write`] direction. Remotely-initiated unidirectional streams
5848 /// can only be closed in the [`Shutdown::Read`] direction. Using an
5849 /// incorrect direction will return [`InvalidStreamState`].
5850 ///
5851 /// [`Shutdown::Read`]: enum.Shutdown.html#variant.Read
5852 /// [`Shutdown::Write`]: enum.Shutdown.html#variant.Write
5853 /// [`stream_recv()`]: struct.Connection.html#method.stream_recv
5854 /// [`stream_send()`]: struct.Connection.html#method.stream_send
5855 /// [`InvalidStreamState`]: enum.Error.html#variant.InvalidStreamState
5856 pub fn stream_shutdown(
5857 &mut self, stream_id: u64, direction: Shutdown, err: u64,
5858 ) -> Result<()> {
5859 // Don't try to stop a local unidirectional stream.
5860 if direction == Shutdown::Read &&
5861 stream::is_local(stream_id, self.is_server) &&
5862 !stream::is_bidi(stream_id)
5863 {
5864 return Err(Error::InvalidStreamState(stream_id));
5865 }
5866
5867 // Don't try to reset a remote unidirectional stream.
5868 if direction == Shutdown::Write &&
5869 !stream::is_local(stream_id, self.is_server) &&
5870 !stream::is_bidi(stream_id)
5871 {
5872 return Err(Error::InvalidStreamState(stream_id));
5873 }
5874
5875 // Get existing stream.
5876 let stream = self.streams.get_mut(stream_id).ok_or(Error::Done)?;
5877
5878 let priority_key = Arc::clone(&stream.priority_key);
5879
5880 match direction {
5881 Shutdown::Read => {
5882 let consumed = stream.recv.shutdown()?;
5883 self.flow_control.add_consumed(consumed);
5884
5885 if !stream.recv.is_fin() {
5886 self.streams.insert_stopped(stream_id, err);
5887 }
5888
5889 // Once shutdown, the stream is guaranteed to be non-readable.
5890 self.streams.remove_readable(&priority_key);
5891
5892 self.stopped_stream_local_count =
5893 self.stopped_stream_local_count.saturating_add(1);
5894 },
5895
5896 Shutdown::Write => {
5897 let (final_size, unsent) = stream.send.shutdown()?;
5898
5899 // Claw back some flow control allowance from data that was
5900 // buffered but not actually sent before the stream was reset.
5901 self.tx_data = self.tx_data.saturating_sub(unsent);
5902
5903 self.tx_buffered =
5904 self.tx_buffered.saturating_sub(unsent as usize);
5905
5906 // These drops in qlog are a bit weird, but the only way to ensure
5907 // that all bytes that are moved from App to Transport in
5908 // stream_do_send are eventually moved from Transport to Dropped.
5909 // Ideally we would add a Transport to Network transition also as
5910 // a way to indicate when bytes were transmitted vs dropped
5911 // without ever being sent.
5912 qlog_with_type!(QLOG_DATA_MV, self.qlog, q, {
5913 let ev_data =
5914 EventData::DataMoved(qlog::events::quic::DataMoved {
5915 stream_id: Some(stream_id),
5916 offset: Some(final_size),
5917 length: Some(unsent),
5918 from: Some(DataRecipient::Transport),
5919 to: Some(DataRecipient::Dropped),
5920 ..Default::default()
5921 });
5922
5923 q.add_event_data_with_instant(ev_data, Instant::now()).ok();
5924 });
5925
5926 // Update send capacity.
5927 self.update_tx_cap();
5928
5929 self.streams.insert_reset(stream_id, err, final_size);
5930
5931 // Once shutdown, the stream is guaranteed to be non-writable.
5932 self.streams.remove_writable(&priority_key);
5933
5934 self.reset_stream_local_count =
5935 self.reset_stream_local_count.saturating_add(1);
5936 },
5937 }
5938
5939 Ok(())
5940 }
5941
5942 /// Returns the stream's send capacity in bytes.
5943 ///
5944 /// If the specified stream doesn't exist (including when it has already
5945 /// been completed and closed), the [`InvalidStreamState`] error will be
5946 /// returned.
5947 ///
5948 /// In addition, if the peer has signalled that it doesn't want to receive
5949 /// any more data from this stream by sending the `STOP_SENDING` frame, the
5950 /// [`StreamStopped`] error will be returned.
5951 ///
5952 /// [`InvalidStreamState`]: enum.Error.html#variant.InvalidStreamState
5953 /// [`StreamStopped`]: enum.Error.html#variant.StreamStopped
5954 #[inline]
5955 pub fn stream_capacity(&mut self, stream_id: u64) -> Result<usize> {
5956 if let Some(stream) = self.streams.get(stream_id) {
5957 let stream_cap = match stream.send.cap() {
5958 Ok(v) => v,
5959
5960 Err(Error::StreamStopped(e)) => {
5961 // Only collect the stream if it is complete and not
5962 // readable. If it is readable, it will get collected when
5963 // stream_recv() is used.
5964 if stream.is_complete() && !stream.is_readable() {
5965 let local = stream.local;
5966 self.streams.collect(stream_id, local);
5967 }
5968
5969 return Err(Error::StreamStopped(e));
5970 },
5971
5972 Err(e) => return Err(e),
5973 };
5974
5975 let cap = cmp::min(self.tx_cap, stream_cap);
5976 return Ok(cap);
5977 };
5978
5979 Err(Error::InvalidStreamState(stream_id))
5980 }
5981
5982 /// Returns the next stream that has data to read.
5983 ///
5984 /// Note that once returned by this method, a stream ID will not be returned
5985 /// again until it is "re-armed".
5986 ///
5987 /// The application will need to read all of the pending data on the stream,
5988 /// and new data has to be received before the stream is reported again.
5989 ///
5990 /// This is unlike the [`readable()`] method, that returns the same list of
5991 /// readable streams when called multiple times in succession.
5992 ///
5993 /// [`readable()`]: struct.Connection.html#method.readable
5994 pub fn stream_readable_next(&mut self) -> Option<u64> {
5995 let priority_key = self.streams.readable.front().clone_pointer()?;
5996
5997 self.streams.remove_readable(&priority_key);
5998
5999 Some(priority_key.id)
6000 }
6001
6002 /// Returns true if the stream has data that can be read.
6003 pub fn stream_readable(&self, stream_id: u64) -> bool {
6004 let stream = match self.streams.get(stream_id) {
6005 Some(v) => v,
6006
6007 None => return false,
6008 };
6009
6010 stream.is_readable()
6011 }
6012
6013 /// Returns the next stream that can be written to.
6014 ///
6015 /// Note that once returned by this method, a stream ID will not be returned
6016 /// again until it is "re-armed".
6017 ///
6018 /// This is unlike the [`writable()`] method, that returns the same list of
6019 /// writable streams when called multiple times in succession. It is not
6020 /// advised to use both `stream_writable_next()` and [`writable()`] on the
6021 /// same connection, as it may lead to unexpected results.
6022 ///
6023 /// The [`stream_writable()`] method can also be used to fine-tune when a
6024 /// stream is reported as writable again.
6025 ///
6026 /// [`stream_writable()`]: struct.Connection.html#method.stream_writable
6027 /// [`writable()`]: struct.Connection.html#method.writable
6028 pub fn stream_writable_next(&mut self) -> Option<u64> {
6029 // If there is not enough connection-level send capacity, none of the
6030 // streams are writable.
6031 if self.tx_cap == 0 {
6032 return None;
6033 }
6034
6035 let mut cursor = self.streams.writable.front();
6036
6037 while let Some(priority_key) = cursor.clone_pointer() {
6038 if let Some(stream) = self.streams.get(priority_key.id) {
6039 let cap = match stream.send.cap() {
6040 Ok(v) => v,
6041
6042 // Return the stream to the application immediately if it's
6043 // stopped.
6044 Err(_) =>
6045 return {
6046 self.streams.remove_writable(&priority_key);
6047
6048 Some(priority_key.id)
6049 },
6050 };
6051
6052 if cmp::min(self.tx_cap, cap) >= stream.send_lowat {
6053 self.streams.remove_writable(&priority_key);
6054 return Some(priority_key.id);
6055 }
6056 }
6057
6058 cursor.move_next();
6059 }
6060
6061 None
6062 }
6063
6064 /// Returns true if the stream has enough send capacity.
6065 ///
6066 /// When `len` more bytes can be buffered into the given stream's send
6067 /// buffer, `true` will be returned, `false` otherwise.
6068 ///
6069 /// In the latter case, if the additional data can't be buffered due to
6070 /// flow control limits, the peer will also be notified, and a "low send
6071 /// watermark" will be set for the stream, such that it is not going to be
6072 /// reported as writable again by [`stream_writable_next()`] until its send
6073 /// capacity reaches `len`.
6074 ///
6075 /// If the specified stream doesn't exist (including when it has already
6076 /// been completed and closed), the [`InvalidStreamState`] error will be
6077 /// returned.
6078 ///
6079 /// In addition, if the peer has signalled that it doesn't want to receive
6080 /// any more data from this stream by sending the `STOP_SENDING` frame, the
6081 /// [`StreamStopped`] error will be returned.
6082 ///
6083 /// [`stream_writable_next()`]: struct.Connection.html#method.stream_writable_next
6084 /// [`InvalidStreamState`]: enum.Error.html#variant.InvalidStreamState
6085 /// [`StreamStopped`]: enum.Error.html#variant.StreamStopped
6086 #[inline]
6087 pub fn stream_writable(
6088 &mut self, stream_id: u64, len: usize,
6089 ) -> Result<bool> {
6090 if self.stream_capacity(stream_id)? >= len {
6091 return Ok(true);
6092 }
6093
6094 let stream = match self.streams.get_mut(stream_id) {
6095 Some(v) => v,
6096
6097 None => return Err(Error::InvalidStreamState(stream_id)),
6098 };
6099
6100 stream.send_lowat = cmp::max(1, len);
6101
6102 let is_writable = stream.is_writable();
6103
6104 let priority_key = Arc::clone(&stream.priority_key);
6105
6106 if self.max_tx_data - self.tx_data < len as u64 {
6107 self.blocked_limit = Some(self.max_tx_data);
6108 }
6109
6110 if stream.send.cap()? < len {
6111 let max_off = stream.send.max_off();
6112 if stream.send.blocked_at() != Some(max_off) {
6113 stream.send.update_blocked_at(Some(max_off));
6114 self.streams.insert_blocked(stream_id, max_off);
6115 }
6116 } else if is_writable {
6117 // When `stream_writable_next()` returns a stream, the writable
6118 // mark is removed, but because the stream is blocked by the
6119 // connection-level send capacity it won't be marked as writable
6120 // again once the capacity increases.
6121 //
6122 // Since the stream is writable already, mark it here instead.
6123 self.streams.insert_writable(&priority_key);
6124 }
6125
6126 Ok(false)
6127 }
6128
6129 /// Returns true if all the data has been read from the specified stream.
6130 ///
6131 /// This instructs the application that all the data received from the
6132 /// peer on the stream has been read, and there won't be anymore in the
6133 /// future.
6134 ///
6135 /// Basically this returns true when the peer either set the `fin` flag
6136 /// for the stream, or sent `RESET_STREAM`.
6137 #[inline]
6138 pub fn stream_finished(&self, stream_id: u64) -> bool {
6139 let stream = match self.streams.get(stream_id) {
6140 Some(v) => v,
6141
6142 None => return true,
6143 };
6144
6145 stream.recv.is_fin()
6146 }
6147
6148 /// Returns the number of bidirectional streams that can be created
6149 /// before the peer's stream count limit is reached.
6150 ///
6151 /// This can be useful to know if it's possible to create a bidirectional
6152 /// stream without trying it first.
6153 #[inline]
6154 pub fn peer_streams_left_bidi(&self) -> u64 {
6155 self.streams.peer_streams_left_bidi()
6156 }
6157
6158 /// Returns the number of unidirectional streams that can be created
6159 /// before the peer's stream count limit is reached.
6160 ///
6161 /// This can be useful to know if it's possible to create a unidirectional
6162 /// stream without trying it first.
6163 #[inline]
6164 pub fn peer_streams_left_uni(&self) -> u64 {
6165 self.streams.peer_streams_left_uni()
6166 }
6167
6168 /// Returns an iterator over streams that have outstanding data to read.
6169 ///
6170 /// Note that the iterator will only include streams that were readable at
6171 /// the time the iterator itself was created (i.e. when `readable()` was
6172 /// called). To account for newly readable streams, the iterator needs to
6173 /// be created again.
6174 ///
6175 /// ## Examples:
6176 ///
6177 /// ```no_run
6178 /// # let mut buf = [0; 512];
6179 /// # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
6180 /// # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
6181 /// # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
6182 /// # let peer = "127.0.0.1:1234".parse().unwrap();
6183 /// # let local = socket.local_addr().unwrap();
6184 /// # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
6185 /// // Iterate over readable streams.
6186 /// for stream_id in conn.readable() {
6187 /// // Stream is readable, read until there's no more data.
6188 /// while let Ok((read, fin)) = conn.stream_recv(stream_id, &mut buf) {
6189 /// println!("Got {} bytes on stream {}", read, stream_id);
6190 /// }
6191 /// }
6192 /// # Ok::<(), quiche::Error>(())
6193 /// ```
6194 #[inline]
6195 pub fn readable(&self) -> StreamIter {
6196 self.streams.readable()
6197 }
6198
6199 /// Returns an iterator over streams that can be written in priority order.
6200 ///
6201 /// The priority order is based on RFC 9218 scheduling recommendations.
6202 /// Stream priority can be controlled using [`stream_priority()`]. In order
6203 /// to support fairness requirements, each time this method is called,
6204 /// internal state is updated. Therefore the iterator ordering can change
6205 /// between calls, even if no streams were added or removed.
6206 ///
6207 /// A "writable" stream is a stream that has enough flow control capacity to
6208 /// send data to the peer. To avoid buffering an infinite amount of data,
6209 /// streams are only allowed to buffer outgoing data up to the amount that
6210 /// the peer allows to send.
6211 ///
6212 /// Note that the iterator will only include streams that were writable at
6213 /// the time the iterator itself was created (i.e. when `writable()` was
6214 /// called). To account for newly writable streams, the iterator needs to be
6215 /// created again.
6216 ///
6217 /// ## Examples:
6218 ///
6219 /// ```no_run
6220 /// # let mut buf = [0; 512];
6221 /// # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
6222 /// # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
6223 /// # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
6224 /// # let local = socket.local_addr().unwrap();
6225 /// # let peer = "127.0.0.1:1234".parse().unwrap();
6226 /// # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
6227 /// // Iterate over writable streams.
6228 /// for stream_id in conn.writable() {
6229 /// // Stream is writable, write some data.
6230 /// if let Ok(written) = conn.stream_send(stream_id, &buf, false) {
6231 /// println!("Written {} bytes on stream {}", written, stream_id);
6232 /// }
6233 /// }
6234 /// # Ok::<(), quiche::Error>(())
6235 /// ```
6236 /// [`stream_priority()`]: struct.Connection.html#method.stream_priority
6237 #[inline]
6238 pub fn writable(&self) -> StreamIter {
6239 // If there is not enough connection-level send capacity, none of the
6240 // streams are writable, so return an empty iterator.
6241 if self.tx_cap == 0 {
6242 return StreamIter::default();
6243 }
6244
6245 self.streams.writable()
6246 }
6247
6248 /// Returns the maximum possible size of egress UDP payloads.
6249 ///
6250 /// This is the maximum size of UDP payloads that can be sent, and depends
6251 /// on both the configured maximum send payload size of the local endpoint
6252 /// (as configured with [`set_max_send_udp_payload_size()`]), as well as
6253 /// the transport parameter advertised by the remote peer.
6254 ///
6255 /// Note that this value can change during the lifetime of the connection,
6256 /// but should remain stable across consecutive calls to [`send()`].
6257 ///
6258 /// [`set_max_send_udp_payload_size()`]:
6259 /// struct.Config.html#method.set_max_send_udp_payload_size
6260 /// [`send()`]: struct.Connection.html#method.send
6261 pub fn max_send_udp_payload_size(&self) -> usize {
6262 let max_datagram_size = self
6263 .paths
6264 .get_active()
6265 .ok()
6266 .map(|p| p.recovery.max_datagram_size());
6267
6268 if let Some(max_datagram_size) = max_datagram_size {
6269 if self.is_established() {
6270 // We cap the maximum packet size to 16KB or so, so that it can be
6271 // always encoded with a 2-byte varint.
6272 return cmp::min(16383, max_datagram_size);
6273 }
6274 }
6275
6276 // Allow for 1200 bytes (minimum QUIC packet size) during the
6277 // handshake.
6278 MIN_CLIENT_INITIAL_LEN
6279 }
6280
6281 /// Schedule an ack-eliciting packet on the active path.
6282 ///
6283 /// QUIC packets might not contain ack-eliciting frames during normal
6284 /// operating conditions. If the packet would already contain
6285 /// ack-eliciting frames, this method does not change any behavior.
6286 /// However, if the packet would not ordinarily contain ack-eliciting
6287 /// frames, this method ensures that a PING frame sent.
6288 ///
6289 /// Calling this method multiple times before [`send()`] has no effect.
6290 ///
6291 /// [`send()`]: struct.Connection.html#method.send
6292 pub fn send_ack_eliciting(&mut self) -> Result<()> {
6293 if self.is_closed() || self.is_draining() {
6294 return Ok(());
6295 }
6296 self.paths.get_active_mut()?.needs_ack_eliciting = true;
6297 Ok(())
6298 }
6299
6300 /// Schedule an ack-eliciting packet on the specified path.
6301 ///
6302 /// See [`send_ack_eliciting()`] for more detail. [`InvalidState`] is
6303 /// returned if there is no record of the path.
6304 ///
6305 /// [`send_ack_eliciting()`]: struct.Connection.html#method.send_ack_eliciting
6306 /// [`InvalidState`]: enum.Error.html#variant.InvalidState
6307 pub fn send_ack_eliciting_on_path(
6308 &mut self, local: SocketAddr, peer: SocketAddr,
6309 ) -> Result<()> {
6310 if self.is_closed() || self.is_draining() {
6311 return Ok(());
6312 }
6313 let path_id = self
6314 .paths
6315 .path_id_from_addrs(&(local, peer))
6316 .ok_or(Error::InvalidState)?;
6317 self.paths.get_mut(path_id)?.needs_ack_eliciting = true;
6318 Ok(())
6319 }
6320
6321 /// Reads the first received DATAGRAM.
6322 ///
6323 /// On success the DATAGRAM's data is returned along with its size.
6324 ///
6325 /// [`Done`] is returned if there is no data to read.
6326 ///
6327 /// [`BufferTooShort`] is returned if the provided buffer is too small for
6328 /// the DATAGRAM.
6329 ///
6330 /// [`Done`]: enum.Error.html#variant.Done
6331 /// [`BufferTooShort`]: enum.Error.html#variant.BufferTooShort
6332 ///
6333 /// ## Examples:
6334 ///
6335 /// ```no_run
6336 /// # let mut buf = [0; 512];
6337 /// # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
6338 /// # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
6339 /// # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
6340 /// # let peer = "127.0.0.1:1234".parse().unwrap();
6341 /// # let local = socket.local_addr().unwrap();
6342 /// # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
6343 /// let mut dgram_buf = [0; 512];
6344 /// while let Ok((len)) = conn.dgram_recv(&mut dgram_buf) {
6345 /// println!("Got {} bytes of DATAGRAM", len);
6346 /// }
6347 /// # Ok::<(), quiche::Error>(())
6348 /// ```
6349 #[inline]
6350 pub fn dgram_recv(&mut self, buf: &mut [u8]) -> Result<usize> {
6351 match self.dgram_recv_queue.pop() {
6352 Some(d) => {
6353 if d.len() > buf.len() {
6354 return Err(Error::BufferTooShort);
6355 }
6356
6357 buf[..d.len()].copy_from_slice(&d);
6358 Ok(d.len())
6359 },
6360
6361 None => Err(Error::Done),
6362 }
6363 }
6364
6365 /// Reads the first received DATAGRAM.
6366 ///
6367 /// This is the same as [`dgram_recv()`] but returns the DATAGRAM as a
6368 /// `Vec<u8>` instead of copying into the provided buffer.
6369 ///
6370 /// [`dgram_recv()`]: struct.Connection.html#method.dgram_recv
6371 #[inline]
6372 pub fn dgram_recv_vec(&mut self) -> Result<Vec<u8>> {
6373 match self.dgram_recv_queue.pop() {
6374 Some(d) => Ok(d),
6375
6376 None => Err(Error::Done),
6377 }
6378 }
6379
6380 /// Reads the first received DATAGRAM without removing it from the queue.
6381 ///
6382 /// On success the DATAGRAM's data is returned along with the actual number
6383 /// of bytes peeked. The requested length cannot exceed the DATAGRAM's
6384 /// actual length.
6385 ///
6386 /// [`Done`] is returned if there is no data to read.
6387 ///
6388 /// [`BufferTooShort`] is returned if the provided buffer is smaller the
6389 /// number of bytes to peek.
6390 ///
6391 /// [`Done`]: enum.Error.html#variant.Done
6392 /// [`BufferTooShort`]: enum.Error.html#variant.BufferTooShort
6393 #[inline]
6394 pub fn dgram_recv_peek(&self, buf: &mut [u8], len: usize) -> Result<usize> {
6395 self.dgram_recv_queue.peek_front_bytes(buf, len)
6396 }
6397
6398 /// Returns the length of the first stored DATAGRAM.
6399 #[inline]
6400 pub fn dgram_recv_front_len(&self) -> Option<usize> {
6401 self.dgram_recv_queue.peek_front_len()
6402 }
6403
6404 /// Returns the number of items in the DATAGRAM receive queue.
6405 #[inline]
6406 pub fn dgram_recv_queue_len(&self) -> usize {
6407 self.dgram_recv_queue.len()
6408 }
6409
6410 /// Returns the total size of all items in the DATAGRAM receive queue.
6411 #[inline]
6412 pub fn dgram_recv_queue_byte_size(&self) -> usize {
6413 self.dgram_recv_queue.byte_size()
6414 }
6415
6416 /// Returns the number of items in the DATAGRAM send queue.
6417 #[inline]
6418 pub fn dgram_send_queue_len(&self) -> usize {
6419 self.dgram_send_queue.len()
6420 }
6421
6422 /// Returns the total size of all items in the DATAGRAM send queue.
6423 #[inline]
6424 pub fn dgram_send_queue_byte_size(&self) -> usize {
6425 self.dgram_send_queue.byte_size()
6426 }
6427
6428 /// Returns whether or not the DATAGRAM send queue is full.
6429 #[inline]
6430 pub fn is_dgram_send_queue_full(&self) -> bool {
6431 self.dgram_send_queue.is_full()
6432 }
6433
6434 /// Returns whether or not the DATAGRAM recv queue is full.
6435 #[inline]
6436 pub fn is_dgram_recv_queue_full(&self) -> bool {
6437 self.dgram_recv_queue.is_full()
6438 }
6439
6440 /// Sends data in a DATAGRAM frame.
6441 ///
6442 /// [`Done`] is returned if no data was written.
6443 /// [`InvalidState`] is returned if the peer does not support DATAGRAM.
6444 /// [`BufferTooShort`] is returned if the DATAGRAM frame length is larger
6445 /// than peer's supported DATAGRAM frame length. Use
6446 /// [`dgram_max_writable_len()`] to get the largest supported DATAGRAM
6447 /// frame length.
6448 ///
6449 /// Note that there is no flow control of DATAGRAM frames, so in order to
6450 /// avoid buffering an infinite amount of frames we apply an internal
6451 /// limit.
6452 ///
6453 /// [`Done`]: enum.Error.html#variant.Done
6454 /// [`InvalidState`]: enum.Error.html#variant.InvalidState
6455 /// [`BufferTooShort`]: enum.Error.html#variant.BufferTooShort
6456 /// [`dgram_max_writable_len()`]:
6457 /// struct.Connection.html#method.dgram_max_writable_len
6458 ///
6459 /// ## Examples:
6460 ///
6461 /// ```no_run
6462 /// # let mut buf = [0; 512];
6463 /// # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
6464 /// # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
6465 /// # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
6466 /// # let peer = "127.0.0.1:1234".parse().unwrap();
6467 /// # let local = socket.local_addr().unwrap();
6468 /// # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
6469 /// conn.dgram_send(b"hello")?;
6470 /// # Ok::<(), quiche::Error>(())
6471 /// ```
6472 pub fn dgram_send(&mut self, buf: &[u8]) -> Result<()> {
6473 let max_payload_len = match self.dgram_max_writable_len() {
6474 Some(v) => v,
6475
6476 None => return Err(Error::InvalidState),
6477 };
6478
6479 if buf.len() > max_payload_len {
6480 return Err(Error::BufferTooShort);
6481 }
6482
6483 self.dgram_send_queue.push(buf.to_vec())?;
6484
6485 let active_path = self.paths.get_active_mut()?;
6486
6487 if self.dgram_send_queue.byte_size() >
6488 active_path.recovery.cwnd_available()
6489 {
6490 active_path.recovery.update_app_limited(false);
6491 }
6492
6493 Ok(())
6494 }
6495
6496 /// Sends data in a DATAGRAM frame.
6497 ///
6498 /// This is the same as [`dgram_send()`] but takes a `Vec<u8>` instead of
6499 /// a slice.
6500 ///
6501 /// [`dgram_send()`]: struct.Connection.html#method.dgram_send
6502 pub fn dgram_send_vec(&mut self, buf: Vec<u8>) -> Result<()> {
6503 let max_payload_len = match self.dgram_max_writable_len() {
6504 Some(v) => v,
6505
6506 None => return Err(Error::InvalidState),
6507 };
6508
6509 if buf.len() > max_payload_len {
6510 return Err(Error::BufferTooShort);
6511 }
6512
6513 self.dgram_send_queue.push(buf)?;
6514
6515 let active_path = self.paths.get_active_mut()?;
6516
6517 if self.dgram_send_queue.byte_size() >
6518 active_path.recovery.cwnd_available()
6519 {
6520 active_path.recovery.update_app_limited(false);
6521 }
6522
6523 Ok(())
6524 }
6525
6526 /// Purges queued outgoing DATAGRAMs matching the predicate.
6527 ///
6528 /// In other words, remove all elements `e` such that `f(&e)` returns true.
6529 ///
6530 /// ## Examples:
6531 /// ```no_run
6532 /// # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
6533 /// # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
6534 /// # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
6535 /// # let peer = "127.0.0.1:1234".parse().unwrap();
6536 /// # let local = socket.local_addr().unwrap();
6537 /// # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
6538 /// conn.dgram_send(b"hello")?;
6539 /// conn.dgram_purge_outgoing(&|d: &[u8]| -> bool { d[0] == 0 });
6540 /// # Ok::<(), quiche::Error>(())
6541 /// ```
6542 #[inline]
6543 pub fn dgram_purge_outgoing<FN: Fn(&[u8]) -> bool>(&mut self, f: FN) {
6544 self.dgram_send_queue.purge(f);
6545 }
6546
6547 /// Returns the maximum DATAGRAM payload that can be sent.
6548 ///
6549 /// [`None`] is returned if the peer hasn't advertised a maximum DATAGRAM
6550 /// frame size.
6551 ///
6552 /// ## Examples:
6553 ///
6554 /// ```no_run
6555 /// # let mut buf = [0; 512];
6556 /// # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
6557 /// # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
6558 /// # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
6559 /// # let peer = "127.0.0.1:1234".parse().unwrap();
6560 /// # let local = socket.local_addr().unwrap();
6561 /// # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
6562 /// if let Some(payload_size) = conn.dgram_max_writable_len() {
6563 /// if payload_size > 5 {
6564 /// conn.dgram_send(b"hello")?;
6565 /// }
6566 /// }
6567 /// # Ok::<(), quiche::Error>(())
6568 /// ```
6569 #[inline]
6570 pub fn dgram_max_writable_len(&self) -> Option<usize> {
6571 match self.peer_transport_params.max_datagram_frame_size {
6572 None => None,
6573 Some(peer_frame_len) => {
6574 let dcid = self.destination_id();
6575 // Start from the maximum packet size...
6576 let mut max_len = self.max_send_udp_payload_size();
6577 // ...subtract the Short packet header overhead...
6578 // (1 byte of pkt_len + len of dcid)
6579 max_len = max_len.saturating_sub(1 + dcid.len());
6580 // ...subtract the packet number (max len)...
6581 max_len = max_len.saturating_sub(packet::MAX_PKT_NUM_LEN);
6582 // ...subtract the crypto overhead...
6583 max_len = max_len.saturating_sub(
6584 self.crypto_ctx[packet::Epoch::Application]
6585 .crypto_overhead()?,
6586 );
6587 // ...clamp to what peer can support...
6588 max_len = cmp::min(peer_frame_len as usize, max_len);
6589 // ...subtract frame overhead, checked for underflow.
6590 // (1 byte of frame type + len of length )
6591 max_len.checked_sub(1 + frame::MAX_DGRAM_OVERHEAD)
6592 },
6593 }
6594 }
6595
6596 fn dgram_enabled(&self) -> bool {
6597 self.local_transport_params
6598 .max_datagram_frame_size
6599 .is_some()
6600 }
6601
6602 /// Returns when the next timeout event will occur.
6603 ///
6604 /// Once the timeout Instant has been reached, the [`on_timeout()`] method
6605 /// should be called. A timeout of `None` means that the timer should be
6606 /// disarmed.
6607 ///
6608 /// [`on_timeout()`]: struct.Connection.html#method.on_timeout
6609 pub fn timeout_instant(&self) -> Option<Instant> {
6610 if self.is_closed() {
6611 return None;
6612 }
6613
6614 if self.is_draining() {
6615 // Draining timer takes precedence over all other timers. If it is
6616 // set it means the connection is closing so there's no point in
6617 // processing the other timers.
6618 self.draining_timer
6619 } else {
6620 // Use the lowest timer value (i.e. "sooner") among idle and loss
6621 // detection timers. If they are both unset (i.e. `None`) then the
6622 // result is `None`, but if at least one of them is set then a
6623 // `Some(...)` value is returned.
6624 let path_timer = self
6625 .paths
6626 .iter()
6627 .filter_map(|(_, p)| p.recovery.loss_detection_timer())
6628 .min();
6629
6630 let key_update_timer = self.crypto_ctx[packet::Epoch::Application]
6631 .key_update
6632 .as_ref()
6633 .map(|key_update| key_update.timer);
6634
6635 let timers = [self.idle_timer, path_timer, key_update_timer];
6636
6637 timers.iter().filter_map(|&x| x).min()
6638 }
6639 }
6640
6641 /// Returns the amount of time until the next timeout event.
6642 ///
6643 /// Once the given duration has elapsed, the [`on_timeout()`] method should
6644 /// be called. A timeout of `None` means that the timer should be disarmed.
6645 ///
6646 /// [`on_timeout()`]: struct.Connection.html#method.on_timeout
6647 pub fn timeout(&self) -> Option<Duration> {
6648 self.timeout_instant().map(|timeout| {
6649 let now = Instant::now();
6650
6651 if timeout <= now {
6652 Duration::ZERO
6653 } else {
6654 timeout.duration_since(now)
6655 }
6656 })
6657 }
6658
6659 /// Processes a timeout event.
6660 ///
6661 /// If no timeout has occurred it does nothing.
6662 pub fn on_timeout(&mut self) {
6663 let now = Instant::now();
6664
6665 if let Some(draining_timer) = self.draining_timer {
6666 if draining_timer <= now {
6667 trace!("{} draining timeout expired", self.trace_id);
6668
6669 self.mark_closed();
6670 }
6671
6672 // Draining timer takes precedence over all other timers. If it is
6673 // set it means the connection is closing so there's no point in
6674 // processing the other timers.
6675 return;
6676 }
6677
6678 if let Some(timer) = self.idle_timer {
6679 if timer <= now {
6680 trace!("{} idle timeout expired", self.trace_id);
6681
6682 self.mark_closed();
6683 self.timed_out = true;
6684 return;
6685 }
6686 }
6687
6688 if let Some(timer) = self.crypto_ctx[packet::Epoch::Application]
6689 .key_update
6690 .as_ref()
6691 .map(|key_update| key_update.timer)
6692 {
6693 if timer <= now {
6694 // Discard previous key once key update timer expired.
6695 let _ = self.crypto_ctx[packet::Epoch::Application]
6696 .key_update
6697 .take();
6698 }
6699 }
6700
6701 let handshake_status = self.handshake_status();
6702
6703 for (_, p) in self.paths.iter_mut() {
6704 if let Some(timer) = p.recovery.loss_detection_timer() {
6705 if timer <= now {
6706 trace!("{} loss detection timeout expired", self.trace_id);
6707
6708 let OnLossDetectionTimeoutOutcome {
6709 lost_packets,
6710 lost_bytes,
6711 } = p.on_loss_detection_timeout(
6712 handshake_status,
6713 now,
6714 self.is_server,
6715 &self.trace_id,
6716 );
6717
6718 self.lost_count += lost_packets;
6719 self.lost_bytes += lost_bytes as u64;
6720
6721 qlog_with_type!(QLOG_METRICS, self.qlog, q, {
6722 p.recovery.maybe_qlog(q, now);
6723 });
6724 }
6725 }
6726 }
6727
6728 // Notify timeout events to the application.
6729 self.paths.notify_failed_validations();
6730
6731 // If the active path failed, try to find a new candidate.
6732 if self.paths.get_active_path_id().is_err() {
6733 match self.paths.find_candidate_path() {
6734 Some(pid) => {
6735 if self.set_active_path(pid, now).is_err() {
6736 // The connection cannot continue.
6737 self.mark_closed();
6738 }
6739 },
6740
6741 // The connection cannot continue.
6742 None => {
6743 self.mark_closed();
6744 },
6745 }
6746 }
6747 }
6748
6749 /// Requests the stack to perform path validation of the proposed 4-tuple.
6750 ///
6751 /// Probing new paths requires spare Connection IDs at both the host and the
6752 /// peer sides. If it is not the case, it raises an [`OutOfIdentifiers`].
6753 ///
6754 /// The probing of new addresses can only be done by the client. The server
6755 /// can only probe network paths that were previously advertised by
6756 /// [`PathEvent::New`]. If the server tries to probe such an unseen network
6757 /// path, this call raises an [`InvalidState`].
6758 ///
6759 /// The caller might also want to probe an existing path. In such case, it
6760 /// triggers a PATH_CHALLENGE frame, but it does not require spare CIDs.
6761 ///
6762 /// A server always probes a new path it observes. Calling this method is
6763 /// hence not required to validate a new path. However, a server can still
6764 /// request an additional path validation of the proposed 4-tuple.
6765 ///
6766 /// Calling this method several times before calling [`send()`] or
6767 /// [`send_on_path()`] results in a single probe being generated. An
6768 /// application wanting to send multiple in-flight probes must call this
6769 /// method again after having sent packets.
6770 ///
6771 /// Returns the Destination Connection ID sequence number associated to that
6772 /// path.
6773 ///
6774 /// [`PathEvent::New`]: enum.PathEvent.html#variant.New
6775 /// [`OutOfIdentifiers`]: enum.Error.html#OutOfIdentifiers
6776 /// [`InvalidState`]: enum.Error.html#InvalidState
6777 /// [`send()`]: struct.Connection.html#method.send
6778 /// [`send_on_path()`]: struct.Connection.html#method.send_on_path
6779 pub fn probe_path(
6780 &mut self, local_addr: SocketAddr, peer_addr: SocketAddr,
6781 ) -> Result<u64> {
6782 // We may want to probe an existing path.
6783 let pid = match self.paths.path_id_from_addrs(&(local_addr, peer_addr)) {
6784 Some(pid) => pid,
6785 None => self.create_path_on_client(local_addr, peer_addr)?,
6786 };
6787
6788 let path = self.paths.get_mut(pid)?;
6789 path.request_validation();
6790
6791 path.active_dcid_seq.ok_or(Error::InvalidState)
6792 }
6793
6794 /// Migrates the connection to a new local address `local_addr`.
6795 ///
6796 /// The behavior is similar to [`migrate()`], with the nuance that the
6797 /// connection only changes the local address, but not the peer one.
6798 ///
6799 /// See [`migrate()`] for the full specification of this method.
6800 ///
6801 /// [`migrate()`]: struct.Connection.html#method.migrate
6802 pub fn migrate_source(&mut self, local_addr: SocketAddr) -> Result<u64> {
6803 let peer_addr = self.paths.get_active()?.peer_addr();
6804 self.migrate(local_addr, peer_addr)
6805 }
6806
6807 /// Migrates the connection over the given network path between `local_addr`
6808 /// and `peer_addr`.
6809 ///
6810 /// Connection migration can only be initiated by the client. Calling this
6811 /// method as a server returns [`InvalidState`].
6812 ///
6813 /// To initiate voluntary migration, there should be enough Connection IDs
6814 /// at both sides. If this requirement is not satisfied, this call returns
6815 /// [`OutOfIdentifiers`].
6816 ///
6817 /// Returns the Destination Connection ID associated to that migrated path.
6818 ///
6819 /// [`OutOfIdentifiers`]: enum.Error.html#OutOfIdentifiers
6820 /// [`InvalidState`]: enum.Error.html#InvalidState
6821 pub fn migrate(
6822 &mut self, local_addr: SocketAddr, peer_addr: SocketAddr,
6823 ) -> Result<u64> {
6824 if self.is_server {
6825 return Err(Error::InvalidState);
6826 }
6827
6828 // If the path already exists, mark it as the active one.
6829 let (pid, dcid_seq) = if let Some(pid) =
6830 self.paths.path_id_from_addrs(&(local_addr, peer_addr))
6831 {
6832 let path = self.paths.get_mut(pid)?;
6833
6834 // If it is already active, do nothing.
6835 if path.active() {
6836 return path.active_dcid_seq.ok_or(Error::OutOfIdentifiers);
6837 }
6838
6839 // Ensures that a Source Connection ID has been dedicated to this
6840 // path, or a free one is available. This is only required if the
6841 // host uses non-zero length Source Connection IDs.
6842 if !self.ids.zero_length_scid() &&
6843 path.active_scid_seq.is_none() &&
6844 self.ids.available_scids() == 0
6845 {
6846 return Err(Error::OutOfIdentifiers);
6847 }
6848
6849 // Ensures that the migrated path has a Destination Connection ID.
6850 let dcid_seq = if let Some(dcid_seq) = path.active_dcid_seq {
6851 dcid_seq
6852 } else {
6853 let dcid_seq = self
6854 .ids
6855 .lowest_available_dcid_seq()
6856 .ok_or(Error::OutOfIdentifiers)?;
6857
6858 self.ids.link_dcid_to_path_id(dcid_seq, pid)?;
6859 path.active_dcid_seq = Some(dcid_seq);
6860
6861 dcid_seq
6862 };
6863
6864 (pid, dcid_seq)
6865 } else {
6866 let pid = self.create_path_on_client(local_addr, peer_addr)?;
6867
6868 let dcid_seq = self
6869 .paths
6870 .get(pid)?
6871 .active_dcid_seq
6872 .ok_or(Error::InvalidState)?;
6873
6874 (pid, dcid_seq)
6875 };
6876
6877 // Change the active path.
6878 self.set_active_path(pid, Instant::now())?;
6879
6880 Ok(dcid_seq)
6881 }
6882
6883 /// Provides additional source Connection IDs that the peer can use to reach
6884 /// this host.
6885 ///
6886 /// This triggers sending NEW_CONNECTION_ID frames if the provided Source
6887 /// Connection ID is not already present. In the case the caller tries to
6888 /// reuse a Connection ID with a different reset token, this raises an
6889 /// `InvalidState`.
6890 ///
6891 /// At any time, the peer cannot have more Destination Connection IDs than
6892 /// the maximum number of active Connection IDs it negotiated. In such case
6893 /// (i.e., when [`scids_left()`] returns 0), if the host agrees to
6894 /// request the removal of previous connection IDs, it sets the
6895 /// `retire_if_needed` parameter. Otherwise, an [`IdLimit`] is returned.
6896 ///
6897 /// Note that setting `retire_if_needed` does not prevent this function from
6898 /// returning an [`IdLimit`] in the case the caller wants to retire still
6899 /// unannounced Connection IDs.
6900 ///
6901 /// The caller is responsible for ensuring that the provided `scid` is not
6902 /// repeated several times over the connection. quiche ensures that as long
6903 /// as the provided Connection ID is still in use (i.e., not retired), it
6904 /// does not assign a different sequence number.
6905 ///
6906 /// Note that if the host uses zero-length Source Connection IDs, it cannot
6907 /// advertise Source Connection IDs and calling this method returns an
6908 /// [`InvalidState`].
6909 ///
6910 /// Returns the sequence number associated to the provided Connection ID.
6911 ///
6912 /// [`scids_left()`]: struct.Connection.html#method.scids_left
6913 /// [`IdLimit`]: enum.Error.html#IdLimit
6914 /// [`InvalidState`]: enum.Error.html#InvalidState
6915 pub fn new_scid(
6916 &mut self, scid: &ConnectionId, reset_token: u128, retire_if_needed: bool,
6917 ) -> Result<u64> {
6918 self.ids.new_scid(
6919 scid.to_vec().into(),
6920 Some(reset_token),
6921 true,
6922 None,
6923 retire_if_needed,
6924 )
6925 }
6926
6927 /// Returns the number of source Connection IDs that are active. This is
6928 /// only meaningful if the host uses non-zero length Source Connection IDs.
6929 pub fn active_scids(&self) -> usize {
6930 self.ids.active_source_cids()
6931 }
6932
6933 /// Returns the number of source Connection IDs that should be provided
6934 /// to the peer without exceeding the limit it advertised.
6935 ///
6936 /// This will automatically limit the number of Connection IDs to the
6937 /// minimum between the locally configured active connection ID limit,
6938 /// and the one sent by the peer.
6939 ///
6940 /// To obtain the maximum possible value allowed by the peer an application
6941 /// can instead inspect the [`peer_active_conn_id_limit`] value.
6942 ///
6943 /// [`peer_active_conn_id_limit`]: struct.Stats.html#structfield.peer_active_conn_id_limit
6944 #[inline]
6945 pub fn scids_left(&self) -> usize {
6946 let max_active_source_cids = cmp::min(
6947 self.peer_transport_params.active_conn_id_limit,
6948 self.local_transport_params.active_conn_id_limit,
6949 ) as usize;
6950
6951 max_active_source_cids - self.active_scids()
6952 }
6953
6954 /// Requests the retirement of the destination Connection ID used by the
6955 /// host to reach its peer.
6956 ///
6957 /// This triggers sending RETIRE_CONNECTION_ID frames.
6958 ///
6959 /// If the application tries to retire a non-existing Destination Connection
6960 /// ID sequence number, or if it uses zero-length Destination Connection ID,
6961 /// this method returns an [`InvalidState`].
6962 ///
6963 /// At any time, the host must have at least one Destination ID. If the
6964 /// application tries to retire the last one, or if the caller tries to
6965 /// retire the destination Connection ID used by the current active path
6966 /// while having neither spare Destination Connection IDs nor validated
6967 /// network paths, this method returns an [`OutOfIdentifiers`]. This
6968 /// behavior prevents the caller from stalling the connection due to the
6969 /// lack of validated path to send non-probing packets.
6970 ///
6971 /// [`InvalidState`]: enum.Error.html#InvalidState
6972 /// [`OutOfIdentifiers`]: enum.Error.html#OutOfIdentifiers
6973 pub fn retire_dcid(&mut self, dcid_seq: u64) -> Result<()> {
6974 if self.ids.zero_length_dcid() {
6975 return Err(Error::InvalidState);
6976 }
6977
6978 let active_path_dcid_seq = self
6979 .paths
6980 .get_active()?
6981 .active_dcid_seq
6982 .ok_or(Error::InvalidState)?;
6983
6984 let active_path_id = self.paths.get_active_path_id()?;
6985
6986 if active_path_dcid_seq == dcid_seq &&
6987 self.ids.lowest_available_dcid_seq().is_none() &&
6988 !self
6989 .paths
6990 .iter()
6991 .any(|(pid, p)| pid != active_path_id && p.usable())
6992 {
6993 return Err(Error::OutOfIdentifiers);
6994 }
6995
6996 if let Some(pid) = self.ids.retire_dcid(dcid_seq)? {
6997 // The retired Destination CID was associated to a given path. Let's
6998 // find an available DCID to associate to that path.
6999 let path = self.paths.get_mut(pid)?;
7000 let dcid_seq = self.ids.lowest_available_dcid_seq();
7001
7002 if let Some(dcid_seq) = dcid_seq {
7003 self.ids.link_dcid_to_path_id(dcid_seq, pid)?;
7004 }
7005
7006 path.active_dcid_seq = dcid_seq;
7007 }
7008
7009 Ok(())
7010 }
7011
7012 /// Processes path-specific events.
7013 ///
7014 /// On success it returns a [`PathEvent`], or `None` when there are no
7015 /// events to report. Please refer to [`PathEvent`] for the exhaustive event
7016 /// list.
7017 ///
7018 /// Note that all events are edge-triggered, meaning that once reported they
7019 /// will not be reported again by calling this method again, until the event
7020 /// is re-armed.
7021 ///
7022 /// [`PathEvent`]: enum.PathEvent.html
7023 pub fn path_event_next(&mut self) -> Option<PathEvent> {
7024 self.paths.pop_event()
7025 }
7026
7027 /// Returns the number of source Connection IDs that are retired.
7028 pub fn retired_scids(&self) -> usize {
7029 self.ids.retired_source_cids()
7030 }
7031
7032 /// Returns a source `ConnectionId` that has been retired.
7033 ///
7034 /// On success it returns a [`ConnectionId`], or `None` when there are no
7035 /// more retired connection IDs.
7036 ///
7037 /// [`ConnectionId`]: struct.ConnectionId.html
7038 pub fn retired_scid_next(&mut self) -> Option<ConnectionId<'static>> {
7039 self.ids.pop_retired_scid()
7040 }
7041
7042 /// Returns the number of spare Destination Connection IDs, i.e.,
7043 /// Destination Connection IDs that are still unused.
7044 ///
7045 /// Note that this function returns 0 if the host uses zero length
7046 /// Destination Connection IDs.
7047 pub fn available_dcids(&self) -> usize {
7048 self.ids.available_dcids()
7049 }
7050
7051 /// Returns an iterator over destination `SockAddr`s whose association
7052 /// with `from` forms a known QUIC path on which packets can be sent to.
7053 ///
7054 /// This function is typically used in combination with [`send_on_path()`].
7055 ///
7056 /// Note that the iterator includes all the possible combination of
7057 /// destination `SockAddr`s, even those whose sending is not required now.
7058 /// In other words, this is another way for the application to recall from
7059 /// past [`PathEvent::New`] events.
7060 ///
7061 /// [`PathEvent::New`]: enum.PathEvent.html#variant.New
7062 /// [`send_on_path()`]: struct.Connection.html#method.send_on_path
7063 ///
7064 /// ## Examples:
7065 ///
7066 /// ```no_run
7067 /// # let mut out = [0; 512];
7068 /// # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
7069 /// # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
7070 /// # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
7071 /// # let local = socket.local_addr().unwrap();
7072 /// # let peer = "127.0.0.1:1234".parse().unwrap();
7073 /// # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
7074 /// // Iterate over possible destinations for the given local `SockAddr`.
7075 /// for dest in conn.paths_iter(local) {
7076 /// loop {
7077 /// let (write, send_info) =
7078 /// match conn.send_on_path(&mut out, Some(local), Some(dest)) {
7079 /// Ok(v) => v,
7080 ///
7081 /// Err(quiche::Error::Done) => {
7082 /// // Done writing for this destination.
7083 /// break;
7084 /// },
7085 ///
7086 /// Err(e) => {
7087 /// // An error occurred, handle it.
7088 /// break;
7089 /// },
7090 /// };
7091 ///
7092 /// socket.send_to(&out[..write], &send_info.to).unwrap();
7093 /// }
7094 /// }
7095 /// # Ok::<(), quiche::Error>(())
7096 /// ```
7097 #[inline]
7098 pub fn paths_iter(&self, from: SocketAddr) -> SocketAddrIter {
7099 // Instead of trying to identify whether packets will be sent on the
7100 // given 4-tuple, simply filter paths that cannot be used.
7101 SocketAddrIter {
7102 sockaddrs: self
7103 .paths
7104 .iter()
7105 .filter(|(_, p)| p.active_dcid_seq.is_some())
7106 .filter(|(_, p)| p.usable() || p.probing_required())
7107 .filter(|(_, p)| p.local_addr() == from)
7108 .map(|(_, p)| p.peer_addr())
7109 .collect(),
7110
7111 index: 0,
7112 }
7113 }
7114
7115 /// Closes the connection with the given error and reason.
7116 ///
7117 /// The `app` parameter specifies whether an application close should be
7118 /// sent to the peer. Otherwise a normal connection close is sent.
7119 ///
7120 /// If `app` is true but the connection is not in a state that is safe to
7121 /// send an application error (not established nor in early data), in
7122 /// accordance with [RFC
7123 /// 9000](https://www.rfc-editor.org/rfc/rfc9000.html#section-10.2.3-3), the
7124 /// error code is changed to APPLICATION_ERROR and the reason phrase is
7125 /// cleared.
7126 ///
7127 /// Returns [`Done`] if the connection had already been closed.
7128 ///
7129 /// Note that the connection will not be closed immediately. An application
7130 /// should continue calling the [`recv()`], [`send()`], [`timeout()`] and
7131 /// [`on_timeout()`] methods as normal, until the [`is_closed()`] method
7132 /// returns `true`.
7133 ///
7134 /// [`Done`]: enum.Error.html#variant.Done
7135 /// [`recv()`]: struct.Connection.html#method.recv
7136 /// [`send()`]: struct.Connection.html#method.send
7137 /// [`timeout()`]: struct.Connection.html#method.timeout
7138 /// [`on_timeout()`]: struct.Connection.html#method.on_timeout
7139 /// [`is_closed()`]: struct.Connection.html#method.is_closed
7140 pub fn close(&mut self, app: bool, err: u64, reason: &[u8]) -> Result<()> {
7141 if self.is_closed() || self.is_draining() {
7142 return Err(Error::Done);
7143 }
7144
7145 if self.local_error.is_some() {
7146 return Err(Error::Done);
7147 }
7148
7149 let is_safe_to_send_app_data =
7150 self.is_established() || self.is_in_early_data();
7151
7152 if app && !is_safe_to_send_app_data {
7153 // Clear error information.
7154 self.local_error = Some(ConnectionError {
7155 is_app: false,
7156 error_code: 0x0c,
7157 reason: vec![],
7158 });
7159 } else {
7160 self.local_error = Some(ConnectionError {
7161 is_app: app,
7162 error_code: err,
7163 reason: reason.to_vec(),
7164 });
7165 }
7166
7167 // When no packet was successfully processed close connection immediately.
7168 if self.recv_count == 0 {
7169 self.mark_closed();
7170 }
7171
7172 Ok(())
7173 }
7174
7175 /// Returns a string uniquely representing the connection.
7176 ///
7177 /// This can be used for logging purposes to differentiate between multiple
7178 /// connections.
7179 #[inline]
7180 pub fn trace_id(&self) -> &str {
7181 &self.trace_id
7182 }
7183
7184 /// Returns the negotiated ALPN protocol.
7185 ///
7186 /// If no protocol has been negotiated, the returned value is empty.
7187 #[inline]
7188 pub fn application_proto(&self) -> &[u8] {
7189 self.alpn.as_ref()
7190 }
7191
7192 /// Returns the server name requested by the client.
7193 #[inline]
7194 pub fn server_name(&self) -> Option<&str> {
7195 self.handshake.server_name()
7196 }
7197
7198 /// Returns the peer's leaf certificate (if any) as a DER-encoded buffer.
7199 #[inline]
7200 pub fn peer_cert(&self) -> Option<&[u8]> {
7201 self.handshake.peer_cert()
7202 }
7203
7204 /// Returns the peer's certificate chain (if any) as a vector of DER-encoded
7205 /// buffers.
7206 ///
7207 /// The certificate at index 0 is the peer's leaf certificate, the other
7208 /// certificates (if any) are the chain certificate authorities used to
7209 /// sign the leaf certificate.
7210 #[inline]
7211 pub fn peer_cert_chain(&self) -> Option<Vec<&[u8]>> {
7212 self.handshake.peer_cert_chain()
7213 }
7214
7215 /// Returns the serialized cryptographic session for the connection.
7216 ///
7217 /// This can be used by a client to cache a connection's session, and resume
7218 /// it later using the [`set_session()`] method.
7219 ///
7220 /// [`set_session()`]: struct.Connection.html#method.set_session
7221 #[inline]
7222 pub fn session(&self) -> Option<&[u8]> {
7223 self.session.as_deref()
7224 }
7225
7226 /// Returns the source connection ID.
7227 ///
7228 /// When there are multiple IDs, and if there is an active path, the ID used
7229 /// on that path is returned. Otherwise the oldest ID is returned.
7230 ///
7231 /// Note that the value returned can change throughout the connection's
7232 /// lifetime.
7233 #[inline]
7234 pub fn source_id(&self) -> ConnectionId<'_> {
7235 if let Ok(path) = self.paths.get_active() {
7236 if let Some(active_scid_seq) = path.active_scid_seq {
7237 if let Ok(e) = self.ids.get_scid(active_scid_seq) {
7238 return ConnectionId::from_ref(e.cid.as_ref());
7239 }
7240 }
7241 }
7242
7243 let e = self.ids.oldest_scid();
7244 ConnectionId::from_ref(e.cid.as_ref())
7245 }
7246
7247 /// Returns all active source connection IDs.
7248 ///
7249 /// An iterator is returned for all active IDs (i.e. ones that have not
7250 /// been explicitly retired yet).
7251 #[inline]
7252 pub fn source_ids(&self) -> impl Iterator<Item = &ConnectionId<'_>> {
7253 self.ids.scids_iter()
7254 }
7255
7256 /// Returns the destination connection ID.
7257 ///
7258 /// Note that the value returned can change throughout the connection's
7259 /// lifetime.
7260 #[inline]
7261 pub fn destination_id(&self) -> ConnectionId<'_> {
7262 if let Ok(path) = self.paths.get_active() {
7263 if let Some(active_dcid_seq) = path.active_dcid_seq {
7264 if let Ok(e) = self.ids.get_dcid(active_dcid_seq) {
7265 return ConnectionId::from_ref(e.cid.as_ref());
7266 }
7267 }
7268 }
7269
7270 let e = self.ids.oldest_dcid();
7271 ConnectionId::from_ref(e.cid.as_ref())
7272 }
7273
7274 /// Returns the PMTU for the active path if it exists.
7275 ///
7276 /// This requires no additonal packets to be sent but simply checks if PMTUD
7277 /// has completed and has found a valid PMTU.
7278 #[inline]
7279 pub fn pmtu(&self) -> Option<usize> {
7280 if let Ok(path) = self.paths.get_active() {
7281 path.pmtud.as_ref().and_then(|pmtud| pmtud.get_pmtu())
7282 } else {
7283 None
7284 }
7285 }
7286
7287 /// Revalidates the PMTU for the active path by sending a new probe packet
7288 /// of PMTU size. If the probe is dropped PMTUD will restart and find a new
7289 /// valid PMTU.
7290 #[inline]
7291 pub fn revalidate_pmtu(&mut self) {
7292 if let Ok(active_path) = self.paths.get_active_mut() {
7293 if let Some(pmtud) = active_path.pmtud.as_mut() {
7294 pmtud.revalidate_pmtu();
7295 }
7296 }
7297 }
7298
7299 /// Returns true if the connection handshake is complete.
7300 #[inline]
7301 pub fn is_established(&self) -> bool {
7302 self.handshake_completed
7303 }
7304
7305 /// Returns true if the connection is resumed.
7306 #[inline]
7307 pub fn is_resumed(&self) -> bool {
7308 self.handshake.is_resumed()
7309 }
7310
7311 /// Returns true if the connection has a pending handshake that has
7312 /// progressed enough to send or receive early data.
7313 #[inline]
7314 pub fn is_in_early_data(&self) -> bool {
7315 self.handshake.is_in_early_data()
7316 }
7317
7318 /// Returns the early data reason for the connection.
7319 ///
7320 /// This status can be useful for logging and debugging. See [BoringSSL]
7321 /// documentation for a definition of the reasons.
7322 ///
7323 /// [BoringSSL]: https://commondatastorage.googleapis.com/chromium-boringssl-docs/ssl.h.html#ssl_early_data_reason_t
7324 #[inline]
7325 pub fn early_data_reason(&self) -> u32 {
7326 self.handshake.early_data_reason()
7327 }
7328
7329 /// Returns whether there is stream or DATAGRAM data available to read.
7330 #[inline]
7331 pub fn is_readable(&self) -> bool {
7332 self.streams.has_readable() || self.dgram_recv_front_len().is_some()
7333 }
7334
7335 /// Returns whether the network path with local address `from` and remote
7336 /// address `peer` has been validated.
7337 ///
7338 /// If the 4-tuple does not exist over the connection, returns an
7339 /// [`InvalidState`].
7340 ///
7341 /// [`InvalidState`]: enum.Error.html#variant.InvalidState
7342 pub fn is_path_validated(
7343 &self, from: SocketAddr, to: SocketAddr,
7344 ) -> Result<bool> {
7345 let pid = self
7346 .paths
7347 .path_id_from_addrs(&(from, to))
7348 .ok_or(Error::InvalidState)?;
7349
7350 Ok(self.paths.get(pid)?.validated())
7351 }
7352
7353 /// Returns true if the connection is draining.
7354 ///
7355 /// If this returns `true`, the connection object cannot yet be dropped, but
7356 /// no new application data can be sent or received. An application should
7357 /// continue calling the [`recv()`], [`timeout()`], and [`on_timeout()`]
7358 /// methods as normal, until the [`is_closed()`] method returns `true`.
7359 ///
7360 /// In contrast, once `is_draining()` returns `true`, calling [`send()`]
7361 /// is not required because no new outgoing packets will be generated.
7362 ///
7363 /// [`recv()`]: struct.Connection.html#method.recv
7364 /// [`send()`]: struct.Connection.html#method.send
7365 /// [`timeout()`]: struct.Connection.html#method.timeout
7366 /// [`on_timeout()`]: struct.Connection.html#method.on_timeout
7367 /// [`is_closed()`]: struct.Connection.html#method.is_closed
7368 #[inline]
7369 pub fn is_draining(&self) -> bool {
7370 self.draining_timer.is_some()
7371 }
7372
7373 /// Returns true if the connection is closed.
7374 ///
7375 /// If this returns true, the connection object can be dropped.
7376 #[inline]
7377 pub fn is_closed(&self) -> bool {
7378 self.closed
7379 }
7380
7381 /// Returns true if the connection was closed due to the idle timeout.
7382 #[inline]
7383 pub fn is_timed_out(&self) -> bool {
7384 self.timed_out
7385 }
7386
7387 /// Returns the error received from the peer, if any.
7388 ///
7389 /// Note that a `Some` return value does not necessarily imply
7390 /// [`is_closed()`] or any other connection state.
7391 ///
7392 /// [`is_closed()`]: struct.Connection.html#method.is_closed
7393 #[inline]
7394 pub fn peer_error(&self) -> Option<&ConnectionError> {
7395 self.peer_error.as_ref()
7396 }
7397
7398 /// Returns the error [`close()`] was called with, or internally
7399 /// created quiche errors, if any.
7400 ///
7401 /// Note that a `Some` return value does not necessarily imply
7402 /// [`is_closed()`] or any other connection state.
7403 /// `Some` also does not guarantee that the error has been sent to
7404 /// or received by the peer.
7405 ///
7406 /// [`close()`]: struct.Connection.html#method.close
7407 /// [`is_closed()`]: struct.Connection.html#method.is_closed
7408 #[inline]
7409 pub fn local_error(&self) -> Option<&ConnectionError> {
7410 self.local_error.as_ref()
7411 }
7412
7413 /// Collects and returns statistics about the connection.
7414 #[inline]
7415 pub fn stats(&self) -> Stats {
7416 Stats {
7417 recv: self.recv_count,
7418 sent: self.sent_count,
7419 lost: self.lost_count,
7420 spurious_lost: self.spurious_lost_count,
7421 retrans: self.retrans_count,
7422 sent_bytes: self.sent_bytes,
7423 recv_bytes: self.recv_bytes,
7424 acked_bytes: self.acked_bytes,
7425 lost_bytes: self.lost_bytes,
7426 stream_retrans_bytes: self.stream_retrans_bytes,
7427 dgram_recv: self.dgram_recv_count,
7428 dgram_sent: self.dgram_sent_count,
7429 paths_count: self.paths.len(),
7430 reset_stream_count_local: self.reset_stream_local_count,
7431 stopped_stream_count_local: self.stopped_stream_local_count,
7432 reset_stream_count_remote: self.reset_stream_remote_count,
7433 stopped_stream_count_remote: self.stopped_stream_remote_count,
7434 data_blocked_sent_count: self.data_blocked_sent_count,
7435 stream_data_blocked_sent_count: self.stream_data_blocked_sent_count,
7436 data_blocked_recv_count: self.data_blocked_recv_count,
7437 stream_data_blocked_recv_count: self.stream_data_blocked_recv_count,
7438 streams_blocked_bidi_recv_count: self.streams_blocked_bidi_recv_count,
7439 streams_blocked_uni_recv_count: self.streams_blocked_uni_recv_count,
7440 path_challenge_rx_count: self.path_challenge_rx_count,
7441 bytes_in_flight_duration: self.bytes_in_flight_duration(),
7442 tx_buffered_state: self.tx_buffered_state,
7443 }
7444 }
7445
7446 /// Returns the sum of the durations when each path in the
7447 /// connection was actively sending bytes or waiting for acks.
7448 /// Note that this could result in a duration that is longer than
7449 /// the actual connection duration in cases where multiple paths
7450 /// are active for extended periods of time. In practice only 1
7451 /// path is typically active at a time.
7452 /// TODO revisit computation if in the future multiple paths are
7453 /// often active at the same time.
7454 fn bytes_in_flight_duration(&self) -> Duration {
7455 self.paths.iter().fold(Duration::ZERO, |acc, (_, path)| {
7456 acc + path.bytes_in_flight_duration()
7457 })
7458 }
7459
7460 /// Returns reference to peer's transport parameters. Returns `None` if we
7461 /// have not yet processed the peer's transport parameters.
7462 pub fn peer_transport_params(&self) -> Option<&TransportParams> {
7463 if !self.parsed_peer_transport_params {
7464 return None;
7465 }
7466
7467 Some(&self.peer_transport_params)
7468 }
7469
7470 /// Collects and returns statistics about each known path for the
7471 /// connection.
7472 pub fn path_stats(&self) -> impl Iterator<Item = PathStats> + '_ {
7473 self.paths.iter().map(|(_, p)| p.stats())
7474 }
7475
7476 /// Returns whether or not this is a server-side connection.
7477 pub fn is_server(&self) -> bool {
7478 self.is_server
7479 }
7480
7481 fn encode_transport_params(&mut self) -> Result<()> {
7482 self.handshake.set_quic_transport_params(
7483 &self.local_transport_params,
7484 self.is_server,
7485 )
7486 }
7487
7488 fn parse_peer_transport_params(
7489 &mut self, peer_params: TransportParams,
7490 ) -> Result<()> {
7491 // Validate initial_source_connection_id.
7492 match &peer_params.initial_source_connection_id {
7493 Some(v) if v != &self.destination_id() =>
7494 return Err(Error::InvalidTransportParam),
7495
7496 Some(_) => (),
7497
7498 // initial_source_connection_id must be sent by
7499 // both endpoints.
7500 None => return Err(Error::InvalidTransportParam),
7501 }
7502
7503 // Validate original_destination_connection_id.
7504 if let Some(odcid) = &self.odcid {
7505 match &peer_params.original_destination_connection_id {
7506 Some(v) if v != odcid =>
7507 return Err(Error::InvalidTransportParam),
7508
7509 Some(_) => (),
7510
7511 // original_destination_connection_id must be
7512 // sent by the server.
7513 None if !self.is_server =>
7514 return Err(Error::InvalidTransportParam),
7515
7516 None => (),
7517 }
7518 }
7519
7520 // Validate retry_source_connection_id.
7521 if let Some(rscid) = &self.rscid {
7522 match &peer_params.retry_source_connection_id {
7523 Some(v) if v != rscid =>
7524 return Err(Error::InvalidTransportParam),
7525
7526 Some(_) => (),
7527
7528 // retry_source_connection_id must be sent by
7529 // the server.
7530 None => return Err(Error::InvalidTransportParam),
7531 }
7532 }
7533
7534 self.process_peer_transport_params(peer_params)?;
7535
7536 self.parsed_peer_transport_params = true;
7537
7538 Ok(())
7539 }
7540
7541 fn process_peer_transport_params(
7542 &mut self, peer_params: TransportParams,
7543 ) -> Result<()> {
7544 self.max_tx_data = peer_params.initial_max_data;
7545
7546 // Update send capacity.
7547 self.update_tx_cap();
7548
7549 self.streams
7550 .update_peer_max_streams_bidi(peer_params.initial_max_streams_bidi);
7551 self.streams
7552 .update_peer_max_streams_uni(peer_params.initial_max_streams_uni);
7553
7554 let max_ack_delay = Duration::from_millis(peer_params.max_ack_delay);
7555
7556 self.recovery_config.max_ack_delay = max_ack_delay;
7557
7558 let active_path = self.paths.get_active_mut()?;
7559
7560 active_path.recovery.update_max_ack_delay(max_ack_delay);
7561
7562 if active_path
7563 .pmtud
7564 .as_ref()
7565 .map(|pmtud| pmtud.should_probe())
7566 .unwrap_or(false)
7567 {
7568 active_path.recovery.pmtud_update_max_datagram_size(
7569 active_path
7570 .pmtud
7571 .as_mut()
7572 .expect("PMTUD existence verified above")
7573 .get_probe_size()
7574 .min(peer_params.max_udp_payload_size as usize),
7575 );
7576 } else {
7577 active_path.recovery.update_max_datagram_size(
7578 peer_params.max_udp_payload_size as usize,
7579 );
7580 }
7581
7582 // Record the max_active_conn_id parameter advertised by the peer.
7583 self.ids
7584 .set_source_conn_id_limit(peer_params.active_conn_id_limit);
7585
7586 self.peer_transport_params = peer_params;
7587
7588 Ok(())
7589 }
7590
7591 /// Continues the handshake.
7592 ///
7593 /// If the connection is already established, it does nothing.
7594 fn do_handshake(&mut self, now: Instant) -> Result<()> {
7595 let mut ex_data = tls::ExData {
7596 application_protos: &self.application_protos,
7597
7598 crypto_ctx: &mut self.crypto_ctx,
7599
7600 session: &mut self.session,
7601
7602 local_error: &mut self.local_error,
7603
7604 keylog: self.keylog.as_mut(),
7605
7606 trace_id: &self.trace_id,
7607
7608 local_transport_params: self.local_transport_params.clone(),
7609
7610 recovery_config: self.recovery_config,
7611
7612 tx_cap_factor: self.tx_cap_factor,
7613
7614 pmtud: None,
7615
7616 is_server: self.is_server,
7617 };
7618
7619 if self.handshake_completed {
7620 return self.handshake.process_post_handshake(&mut ex_data);
7621 }
7622
7623 match self.handshake.do_handshake(&mut ex_data) {
7624 Ok(_) => (),
7625
7626 Err(Error::Done) => {
7627 // Apply in-handshake configuration from callbacks if the path's
7628 // Recovery module can still be reinitilized.
7629 if self
7630 .paths
7631 .get_active()
7632 .map(|p| p.can_reinit_recovery())
7633 .unwrap_or(false)
7634 {
7635 if ex_data.recovery_config != self.recovery_config {
7636 if let Ok(path) = self.paths.get_active_mut() {
7637 self.recovery_config = ex_data.recovery_config;
7638 path.reinit_recovery(&self.recovery_config);
7639 }
7640 }
7641
7642 if ex_data.tx_cap_factor != self.tx_cap_factor {
7643 self.tx_cap_factor = ex_data.tx_cap_factor;
7644 }
7645
7646 if let Some((discover, max_probes)) = ex_data.pmtud {
7647 self.paths.set_discover_pmtu_on_existing_paths(
7648 discover,
7649 self.recovery_config.max_send_udp_payload_size,
7650 max_probes,
7651 );
7652 }
7653
7654 if ex_data.local_transport_params !=
7655 self.local_transport_params
7656 {
7657 self.streams.set_max_streams_bidi(
7658 ex_data
7659 .local_transport_params
7660 .initial_max_streams_bidi,
7661 );
7662
7663 self.local_transport_params =
7664 ex_data.local_transport_params;
7665 }
7666 }
7667
7668 // Try to parse transport parameters as soon as the first flight
7669 // of handshake data is processed.
7670 //
7671 // This is potentially dangerous as the handshake hasn't been
7672 // completed yet, though it's required to be able to send data
7673 // in 0.5 RTT.
7674 let raw_params = self.handshake.quic_transport_params();
7675
7676 if !self.parsed_peer_transport_params && !raw_params.is_empty() {
7677 let peer_params = TransportParams::decode(
7678 raw_params,
7679 self.is_server,
7680 self.peer_transport_params_track_unknown,
7681 )?;
7682
7683 self.parse_peer_transport_params(peer_params)?;
7684 }
7685
7686 return Ok(());
7687 },
7688
7689 Err(e) => return Err(e),
7690 };
7691
7692 self.handshake_completed = self.handshake.is_completed();
7693
7694 self.alpn = self.handshake.alpn_protocol().to_vec();
7695
7696 let raw_params = self.handshake.quic_transport_params();
7697
7698 if !self.parsed_peer_transport_params && !raw_params.is_empty() {
7699 let peer_params = TransportParams::decode(
7700 raw_params,
7701 self.is_server,
7702 self.peer_transport_params_track_unknown,
7703 )?;
7704
7705 self.parse_peer_transport_params(peer_params)?;
7706 }
7707
7708 if self.handshake_completed {
7709 // The handshake is considered confirmed at the server when the
7710 // handshake completes, at which point we can also drop the
7711 // handshake epoch.
7712 if self.is_server {
7713 self.handshake_confirmed = true;
7714
7715 self.drop_epoch_state(packet::Epoch::Handshake, now);
7716 }
7717
7718 // Once the handshake is completed there's no point in processing
7719 // 0-RTT packets anymore, so clear the buffer now.
7720 self.undecryptable_pkts.clear();
7721
7722 trace!("{} connection established: proto={:?} cipher={:?} curve={:?} sigalg={:?} resumed={} {:?}",
7723 &self.trace_id,
7724 std::str::from_utf8(self.application_proto()),
7725 self.handshake.cipher(),
7726 self.handshake.curve(),
7727 self.handshake.sigalg(),
7728 self.handshake.is_resumed(),
7729 self.peer_transport_params);
7730 }
7731
7732 Ok(())
7733 }
7734
7735 /// Selects the packet type for the next outgoing packet.
7736 fn write_pkt_type(&self, send_pid: usize) -> Result<Type> {
7737 // On error send packet in the latest epoch available, but only send
7738 // 1-RTT ones when the handshake is completed.
7739 if self
7740 .local_error
7741 .as_ref()
7742 .is_some_and(|conn_err| !conn_err.is_app)
7743 {
7744 let epoch = match self.handshake.write_level() {
7745 crypto::Level::Initial => packet::Epoch::Initial,
7746 crypto::Level::ZeroRTT => unreachable!(),
7747 crypto::Level::Handshake => packet::Epoch::Handshake,
7748 crypto::Level::OneRTT => packet::Epoch::Application,
7749 };
7750
7751 if !self.handshake_confirmed {
7752 match epoch {
7753 // Downgrade the epoch to Handshake as the handshake is not
7754 // completed yet.
7755 packet::Epoch::Application => return Ok(Type::Handshake),
7756
7757 // Downgrade the epoch to Initial as the remote peer might
7758 // not be able to decrypt handshake packets yet.
7759 packet::Epoch::Handshake
7760 if self.crypto_ctx[packet::Epoch::Initial].has_keys() =>
7761 return Ok(Type::Initial),
7762
7763 _ => (),
7764 };
7765 }
7766
7767 return Ok(Type::from_epoch(epoch));
7768 }
7769
7770 for &epoch in packet::Epoch::epochs(
7771 packet::Epoch::Initial..=packet::Epoch::Application,
7772 ) {
7773 let crypto_ctx = &self.crypto_ctx[epoch];
7774 let pkt_space = &self.pkt_num_spaces[epoch];
7775
7776 // Only send packets in a space when we have the send keys for it.
7777 if crypto_ctx.crypto_seal.is_none() {
7778 continue;
7779 }
7780
7781 // We are ready to send data for this packet number space.
7782 if crypto_ctx.data_available() || pkt_space.ready() {
7783 return Ok(Type::from_epoch(epoch));
7784 }
7785
7786 // There are lost frames in this packet number space.
7787 for (_, p) in self.paths.iter() {
7788 if p.recovery.has_lost_frames(epoch) {
7789 return Ok(Type::from_epoch(epoch));
7790 }
7791
7792 // We need to send PTO probe packets.
7793 if p.recovery.loss_probes(epoch) > 0 {
7794 return Ok(Type::from_epoch(epoch));
7795 }
7796 }
7797 }
7798
7799 // If there are flushable, almost full or blocked streams, use the
7800 // Application epoch.
7801 let send_path = self.paths.get(send_pid)?;
7802 if (self.is_established() || self.is_in_early_data()) &&
7803 (self.should_send_handshake_done() ||
7804 self.flow_control.should_update_max_data() ||
7805 self.should_send_max_data ||
7806 self.blocked_limit.is_some() ||
7807 self.dgram_send_queue.has_pending() ||
7808 self.local_error
7809 .as_ref()
7810 .is_some_and(|conn_err| conn_err.is_app) ||
7811 self.should_send_max_streams_bidi ||
7812 self.streams.should_update_max_streams_bidi() ||
7813 self.should_send_max_streams_uni ||
7814 self.streams.should_update_max_streams_uni() ||
7815 self.streams.has_flushable() ||
7816 self.streams.has_almost_full() ||
7817 self.streams.has_blocked() ||
7818 self.streams.has_reset() ||
7819 self.streams.has_stopped() ||
7820 self.ids.has_new_scids() ||
7821 self.ids.has_retire_dcids() ||
7822 send_path
7823 .pmtud
7824 .as_ref()
7825 .is_some_and(|pmtud| pmtud.should_probe()) ||
7826 send_path.needs_ack_eliciting ||
7827 send_path.probing_required())
7828 {
7829 // Only clients can send 0-RTT packets.
7830 if !self.is_server && self.is_in_early_data() {
7831 return Ok(Type::ZeroRTT);
7832 }
7833
7834 return Ok(Type::Short);
7835 }
7836
7837 Err(Error::Done)
7838 }
7839
7840 /// Returns the mutable stream with the given ID if it exists, or creates
7841 /// a new one otherwise.
7842 fn get_or_create_stream(
7843 &mut self, id: u64, local: bool,
7844 ) -> Result<&mut stream::Stream<F>> {
7845 self.streams.get_or_create(
7846 id,
7847 &self.local_transport_params,
7848 &self.peer_transport_params,
7849 local,
7850 self.is_server,
7851 )
7852 }
7853
7854 /// Processes an incoming frame.
7855 fn process_frame(
7856 &mut self, frame: frame::Frame, hdr: &Header, recv_path_id: usize,
7857 epoch: packet::Epoch, now: Instant,
7858 ) -> Result<()> {
7859 trace!("{} rx frm {:?}", self.trace_id, frame);
7860
7861 match frame {
7862 frame::Frame::Padding { .. } => (),
7863
7864 frame::Frame::Ping { .. } => (),
7865
7866 frame::Frame::ACK {
7867 ranges, ack_delay, ..
7868 } => {
7869 let ack_delay = ack_delay
7870 .checked_mul(2_u64.pow(
7871 self.peer_transport_params.ack_delay_exponent as u32,
7872 ))
7873 .ok_or(Error::InvalidFrame)?;
7874
7875 if epoch == packet::Epoch::Handshake ||
7876 (epoch == packet::Epoch::Application &&
7877 self.is_established())
7878 {
7879 self.peer_verified_initial_address = true;
7880 }
7881
7882 let handshake_status = self.handshake_status();
7883
7884 let is_app_limited = self.delivery_rate_check_if_app_limited();
7885
7886 let largest_acked = ranges.last().expect(
7887 "ACK frames should always have at least one ack range",
7888 );
7889
7890 for (_, p) in self.paths.iter_mut() {
7891 if self.pkt_num_spaces[epoch]
7892 .largest_tx_pkt_num
7893 .is_some_and(|largest_sent| largest_sent < largest_acked)
7894 {
7895 // https://www.rfc-editor.org/rfc/rfc9000#section-13.1
7896 // An endpoint SHOULD treat receipt of an acknowledgment
7897 // for a packet it did not send as
7898 // a connection error of type PROTOCOL_VIOLATION
7899 return Err(Error::InvalidAckRange);
7900 }
7901
7902 if is_app_limited {
7903 p.recovery.delivery_rate_update_app_limited(true);
7904 }
7905
7906 let OnAckReceivedOutcome {
7907 lost_packets,
7908 lost_bytes,
7909 acked_bytes,
7910 spurious_losses,
7911 } = p.recovery.on_ack_received(
7912 &ranges,
7913 ack_delay,
7914 epoch,
7915 handshake_status,
7916 now,
7917 self.pkt_num_manager.skip_pn(),
7918 &self.trace_id,
7919 )?;
7920
7921 let skip_pn = self.pkt_num_manager.skip_pn();
7922 let largest_acked =
7923 p.recovery.get_largest_acked_on_epoch(epoch);
7924
7925 // Consider the skip_pn validated if the peer has sent an ack
7926 // for a larger pkt number.
7927 if let Some((largest_acked, skip_pn)) =
7928 largest_acked.zip(skip_pn)
7929 {
7930 if largest_acked > skip_pn {
7931 self.pkt_num_manager.set_skip_pn(None);
7932 }
7933 }
7934
7935 self.lost_count += lost_packets;
7936 self.lost_bytes += lost_bytes as u64;
7937 self.acked_bytes += acked_bytes as u64;
7938 self.spurious_lost_count += spurious_losses;
7939 }
7940 },
7941
7942 frame::Frame::ResetStream {
7943 stream_id,
7944 error_code,
7945 final_size,
7946 } => {
7947 // Peer can't send on our unidirectional streams.
7948 if !stream::is_bidi(stream_id) &&
7949 stream::is_local(stream_id, self.is_server)
7950 {
7951 return Err(Error::InvalidStreamState(stream_id));
7952 }
7953
7954 let max_rx_data_left = self.max_rx_data() - self.rx_data;
7955
7956 // Get existing stream or create a new one, but if the stream
7957 // has already been closed and collected, ignore the frame.
7958 //
7959 // This can happen if e.g. an ACK frame is lost, and the peer
7960 // retransmits another frame before it realizes that the stream
7961 // is gone.
7962 //
7963 // Note that it makes it impossible to check if the frame is
7964 // illegal, since we have no state, but since we ignore the
7965 // frame, it should be fine.
7966 let stream = match self.get_or_create_stream(stream_id, false) {
7967 Ok(v) => v,
7968
7969 Err(Error::Done) => return Ok(()),
7970
7971 Err(e) => return Err(e),
7972 };
7973
7974 let was_readable = stream.is_readable();
7975 let priority_key = Arc::clone(&stream.priority_key);
7976
7977 let stream::RecvBufResetReturn {
7978 max_data_delta,
7979 consumed_flowcontrol,
7980 } = stream.recv.reset(error_code, final_size)?;
7981
7982 if max_data_delta > max_rx_data_left {
7983 return Err(Error::FlowControl);
7984 }
7985
7986 if !was_readable && stream.is_readable() {
7987 self.streams.insert_readable(&priority_key);
7988 }
7989
7990 self.rx_data += max_data_delta;
7991 // We dropped the receive buffer, return connection level
7992 // flow-control
7993 self.flow_control.add_consumed(consumed_flowcontrol);
7994
7995 self.reset_stream_remote_count =
7996 self.reset_stream_remote_count.saturating_add(1);
7997 },
7998
7999 frame::Frame::StopSending {
8000 stream_id,
8001 error_code,
8002 } => {
8003 // STOP_SENDING on a receive-only stream is a fatal error.
8004 if !stream::is_local(stream_id, self.is_server) &&
8005 !stream::is_bidi(stream_id)
8006 {
8007 return Err(Error::InvalidStreamState(stream_id));
8008 }
8009
8010 // Get existing stream or create a new one, but if the stream
8011 // has already been closed and collected, ignore the frame.
8012 //
8013 // This can happen if e.g. an ACK frame is lost, and the peer
8014 // retransmits another frame before it realizes that the stream
8015 // is gone.
8016 //
8017 // Note that it makes it impossible to check if the frame is
8018 // illegal, since we have no state, but since we ignore the
8019 // frame, it should be fine.
8020 let stream = match self.get_or_create_stream(stream_id, false) {
8021 Ok(v) => v,
8022
8023 Err(Error::Done) => return Ok(()),
8024
8025 Err(e) => return Err(e),
8026 };
8027
8028 let was_writable = stream.is_writable();
8029
8030 let priority_key = Arc::clone(&stream.priority_key);
8031
8032 // Try stopping the stream.
8033 if let Ok((final_size, unsent)) = stream.send.stop(error_code) {
8034 // Claw back some flow control allowance from data that was
8035 // buffered but not actually sent before the stream was
8036 // reset.
8037 //
8038 // Note that `tx_cap` will be updated later on, so no need
8039 // to touch it here.
8040 self.tx_data = self.tx_data.saturating_sub(unsent);
8041
8042 self.tx_buffered =
8043 self.tx_buffered.saturating_sub(unsent as usize);
8044
8045 // These drops in qlog are a bit weird, but the only way to
8046 // ensure that all bytes that are moved from App to Transport
8047 // in stream_do_send are eventually moved from Transport to
8048 // Dropped. Ideally we would add a Transport to Network
8049 // transition also as a way to indicate when bytes were
8050 // transmitted vs dropped without ever being sent.
8051 qlog_with_type!(QLOG_DATA_MV, self.qlog, q, {
8052 let ev_data =
8053 EventData::DataMoved(qlog::events::quic::DataMoved {
8054 stream_id: Some(stream_id),
8055 offset: Some(final_size),
8056 length: Some(unsent),
8057 from: Some(DataRecipient::Transport),
8058 to: Some(DataRecipient::Dropped),
8059 ..Default::default()
8060 });
8061
8062 q.add_event_data_with_instant(ev_data, now).ok();
8063 });
8064
8065 self.streams.insert_reset(stream_id, error_code, final_size);
8066
8067 if !was_writable {
8068 self.streams.insert_writable(&priority_key);
8069 }
8070
8071 self.stopped_stream_remote_count =
8072 self.stopped_stream_remote_count.saturating_add(1);
8073 self.reset_stream_local_count =
8074 self.reset_stream_local_count.saturating_add(1);
8075 }
8076 },
8077
8078 frame::Frame::Crypto { data } => {
8079 if data.max_off() >= MAX_CRYPTO_STREAM_OFFSET {
8080 return Err(Error::CryptoBufferExceeded);
8081 }
8082
8083 // Push the data to the stream so it can be re-ordered.
8084 self.crypto_ctx[epoch].crypto_stream.recv.write(data)?;
8085
8086 // Feed crypto data to the TLS state, if there's data
8087 // available at the expected offset.
8088 let mut crypto_buf = [0; 512];
8089
8090 let level = crypto::Level::from_epoch(epoch);
8091
8092 let stream = &mut self.crypto_ctx[epoch].crypto_stream;
8093
8094 while let Ok((read, _)) = stream.recv.emit(&mut crypto_buf) {
8095 let recv_buf = &crypto_buf[..read];
8096 self.handshake.provide_data(level, recv_buf)?;
8097 }
8098
8099 self.do_handshake(now)?;
8100 },
8101
8102 frame::Frame::CryptoHeader { .. } => unreachable!(),
8103
8104 // TODO: implement stateless retry
8105 frame::Frame::NewToken { .. } =>
8106 if self.is_server {
8107 return Err(Error::InvalidPacket);
8108 },
8109
8110 frame::Frame::Stream { stream_id, data } => {
8111 // Peer can't send on our unidirectional streams.
8112 if !stream::is_bidi(stream_id) &&
8113 stream::is_local(stream_id, self.is_server)
8114 {
8115 return Err(Error::InvalidStreamState(stream_id));
8116 }
8117
8118 let max_rx_data_left = self.max_rx_data() - self.rx_data;
8119
8120 // Get existing stream or create a new one, but if the stream
8121 // has already been closed and collected, ignore the frame.
8122 //
8123 // This can happen if e.g. an ACK frame is lost, and the peer
8124 // retransmits another frame before it realizes that the stream
8125 // is gone.
8126 //
8127 // Note that it makes it impossible to check if the frame is
8128 // illegal, since we have no state, but since we ignore the
8129 // frame, it should be fine.
8130 let stream = match self.get_or_create_stream(stream_id, false) {
8131 Ok(v) => v,
8132
8133 Err(Error::Done) => return Ok(()),
8134
8135 Err(e) => return Err(e),
8136 };
8137
8138 // Check for the connection-level flow control limit.
8139 let max_off_delta =
8140 data.max_off().saturating_sub(stream.recv.max_off());
8141
8142 if max_off_delta > max_rx_data_left {
8143 return Err(Error::FlowControl);
8144 }
8145
8146 let was_readable = stream.is_readable();
8147 let priority_key = Arc::clone(&stream.priority_key);
8148
8149 let was_draining = stream.recv.is_draining();
8150
8151 stream.recv.write(data)?;
8152
8153 if !was_readable && stream.is_readable() {
8154 self.streams.insert_readable(&priority_key);
8155 }
8156
8157 self.rx_data += max_off_delta;
8158
8159 if was_draining {
8160 // When a stream is in draining state it will not queue
8161 // incoming data for the application to read, so consider
8162 // the received data as consumed, which might trigger a flow
8163 // control update.
8164 self.flow_control.add_consumed(max_off_delta);
8165 }
8166 },
8167
8168 frame::Frame::StreamHeader { .. } => unreachable!(),
8169
8170 frame::Frame::MaxData { max } => {
8171 self.max_tx_data = cmp::max(self.max_tx_data, max);
8172 },
8173
8174 frame::Frame::MaxStreamData { stream_id, max } => {
8175 // Peer can't receive on its own unidirectional streams.
8176 if !stream::is_bidi(stream_id) &&
8177 !stream::is_local(stream_id, self.is_server)
8178 {
8179 return Err(Error::InvalidStreamState(stream_id));
8180 }
8181
8182 // Get existing stream or create a new one, but if the stream
8183 // has already been closed and collected, ignore the frame.
8184 //
8185 // This can happen if e.g. an ACK frame is lost, and the peer
8186 // retransmits another frame before it realizes that the stream
8187 // is gone.
8188 //
8189 // Note that it makes it impossible to check if the frame is
8190 // illegal, since we have no state, but since we ignore the
8191 // frame, it should be fine.
8192 let stream = match self.get_or_create_stream(stream_id, false) {
8193 Ok(v) => v,
8194
8195 Err(Error::Done) => return Ok(()),
8196
8197 Err(e) => return Err(e),
8198 };
8199
8200 let was_flushable = stream.is_flushable();
8201
8202 stream.send.update_max_data(max);
8203
8204 let writable = stream.is_writable();
8205
8206 let priority_key = Arc::clone(&stream.priority_key);
8207
8208 // If the stream is now flushable push it to the flushable queue,
8209 // but only if it wasn't already queued.
8210 if stream.is_flushable() && !was_flushable {
8211 let priority_key = Arc::clone(&stream.priority_key);
8212 self.streams.insert_flushable(&priority_key);
8213 }
8214
8215 if writable {
8216 self.streams.insert_writable(&priority_key);
8217 }
8218 },
8219
8220 frame::Frame::MaxStreamsBidi { max } => {
8221 if max > MAX_STREAM_ID {
8222 return Err(Error::InvalidFrame);
8223 }
8224
8225 self.streams.update_peer_max_streams_bidi(max);
8226 },
8227
8228 frame::Frame::MaxStreamsUni { max } => {
8229 if max > MAX_STREAM_ID {
8230 return Err(Error::InvalidFrame);
8231 }
8232
8233 self.streams.update_peer_max_streams_uni(max);
8234 },
8235
8236 frame::Frame::DataBlocked { .. } => {
8237 self.data_blocked_recv_count =
8238 self.data_blocked_recv_count.saturating_add(1);
8239 },
8240
8241 frame::Frame::StreamDataBlocked { .. } => {
8242 self.stream_data_blocked_recv_count =
8243 self.stream_data_blocked_recv_count.saturating_add(1);
8244 },
8245
8246 frame::Frame::StreamsBlockedBidi { limit } => {
8247 if limit > MAX_STREAM_ID {
8248 return Err(Error::InvalidFrame);
8249 }
8250
8251 self.streams_blocked_bidi_recv_count =
8252 self.streams_blocked_bidi_recv_count.saturating_add(1);
8253 },
8254
8255 frame::Frame::StreamsBlockedUni { limit } => {
8256 if limit > MAX_STREAM_ID {
8257 return Err(Error::InvalidFrame);
8258 }
8259
8260 self.streams_blocked_uni_recv_count =
8261 self.streams_blocked_uni_recv_count.saturating_add(1);
8262 },
8263
8264 frame::Frame::NewConnectionId {
8265 seq_num,
8266 retire_prior_to,
8267 conn_id,
8268 reset_token,
8269 } => {
8270 if self.ids.zero_length_dcid() {
8271 return Err(Error::InvalidState);
8272 }
8273
8274 let mut retired_path_ids = SmallVec::new();
8275
8276 // Retire pending path IDs before propagating the error code to
8277 // make sure retired connection IDs are not in use anymore.
8278 let new_dcid_res = self.ids.new_dcid(
8279 conn_id.into(),
8280 seq_num,
8281 u128::from_be_bytes(reset_token),
8282 retire_prior_to,
8283 &mut retired_path_ids,
8284 );
8285
8286 for (dcid_seq, pid) in retired_path_ids {
8287 let path = self.paths.get_mut(pid)?;
8288
8289 // Maybe the path already switched to another DCID.
8290 if path.active_dcid_seq != Some(dcid_seq) {
8291 continue;
8292 }
8293
8294 if let Some(new_dcid_seq) =
8295 self.ids.lowest_available_dcid_seq()
8296 {
8297 path.active_dcid_seq = Some(new_dcid_seq);
8298
8299 self.ids.link_dcid_to_path_id(new_dcid_seq, pid)?;
8300
8301 trace!(
8302 "{} path ID {} changed DCID: old seq num {} new seq num {}",
8303 self.trace_id, pid, dcid_seq, new_dcid_seq,
8304 );
8305 } else {
8306 // We cannot use this path anymore for now.
8307 path.active_dcid_seq = None;
8308
8309 trace!(
8310 "{} path ID {} cannot be used; DCID seq num {} has been retired",
8311 self.trace_id, pid, dcid_seq,
8312 );
8313 }
8314 }
8315
8316 // Propagate error (if any) now...
8317 new_dcid_res?;
8318 },
8319
8320 frame::Frame::RetireConnectionId { seq_num } => {
8321 if self.ids.zero_length_scid() {
8322 return Err(Error::InvalidState);
8323 }
8324
8325 if let Some(pid) = self.ids.retire_scid(seq_num, &hdr.dcid)? {
8326 let path = self.paths.get_mut(pid)?;
8327
8328 // Maybe we already linked a new SCID to that path.
8329 if path.active_scid_seq == Some(seq_num) {
8330 // XXX: We do not remove unused paths now, we instead
8331 // wait until we need to maintain more paths than the
8332 // host is willing to.
8333 path.active_scid_seq = None;
8334 }
8335 }
8336 },
8337
8338 frame::Frame::PathChallenge { data } => {
8339 self.path_challenge_rx_count += 1;
8340
8341 self.paths
8342 .get_mut(recv_path_id)?
8343 .on_challenge_received(data);
8344 },
8345
8346 frame::Frame::PathResponse { data } => {
8347 self.paths.on_response_received(data)?;
8348 },
8349
8350 frame::Frame::ConnectionClose {
8351 error_code, reason, ..
8352 } => {
8353 self.peer_error = Some(ConnectionError {
8354 is_app: false,
8355 error_code,
8356 reason,
8357 });
8358
8359 let path = self.paths.get_active()?;
8360 self.draining_timer = Some(now + (path.recovery.pto() * 3));
8361 },
8362
8363 frame::Frame::ApplicationClose { error_code, reason } => {
8364 self.peer_error = Some(ConnectionError {
8365 is_app: true,
8366 error_code,
8367 reason,
8368 });
8369
8370 let path = self.paths.get_active()?;
8371 self.draining_timer = Some(now + (path.recovery.pto() * 3));
8372 },
8373
8374 frame::Frame::HandshakeDone => {
8375 if self.is_server {
8376 return Err(Error::InvalidPacket);
8377 }
8378
8379 self.peer_verified_initial_address = true;
8380
8381 self.handshake_confirmed = true;
8382
8383 // Once the handshake is confirmed, we can drop Handshake keys.
8384 self.drop_epoch_state(packet::Epoch::Handshake, now);
8385 },
8386
8387 frame::Frame::Datagram { data } => {
8388 // Close the connection if DATAGRAMs are not enabled.
8389 // quiche always advertises support for 64K sized DATAGRAM
8390 // frames, as recommended by the standard, so we don't need a
8391 // size check.
8392 if !self.dgram_enabled() {
8393 return Err(Error::InvalidState);
8394 }
8395
8396 // If recv queue is full, discard oldest
8397 if self.dgram_recv_queue.is_full() {
8398 self.dgram_recv_queue.pop();
8399 }
8400
8401 self.dgram_recv_queue.push(data)?;
8402
8403 self.dgram_recv_count = self.dgram_recv_count.saturating_add(1);
8404
8405 let path = self.paths.get_mut(recv_path_id)?;
8406 path.dgram_recv_count = path.dgram_recv_count.saturating_add(1);
8407 },
8408
8409 frame::Frame::DatagramHeader { .. } => unreachable!(),
8410 }
8411
8412 Ok(())
8413 }
8414
8415 /// Drops the keys and recovery state for the given epoch.
8416 fn drop_epoch_state(&mut self, epoch: packet::Epoch, now: Instant) {
8417 let crypto_ctx = &mut self.crypto_ctx[epoch];
8418 if crypto_ctx.crypto_open.is_none() {
8419 return;
8420 }
8421 crypto_ctx.clear();
8422 self.pkt_num_spaces[epoch].clear();
8423
8424 let handshake_status = self.handshake_status();
8425 for (_, p) in self.paths.iter_mut() {
8426 p.recovery
8427 .on_pkt_num_space_discarded(epoch, handshake_status, now);
8428 }
8429
8430 trace!("{} dropped epoch {} state", self.trace_id, epoch);
8431 }
8432
8433 /// Returns the connection level flow control limit.
8434 fn max_rx_data(&self) -> u64 {
8435 self.flow_control.max_data()
8436 }
8437
8438 /// Returns true if the HANDSHAKE_DONE frame needs to be sent.
8439 fn should_send_handshake_done(&self) -> bool {
8440 self.is_established() && !self.handshake_done_sent && self.is_server
8441 }
8442
8443 /// Returns the idle timeout value.
8444 ///
8445 /// `None` is returned if both end-points disabled the idle timeout.
8446 fn idle_timeout(&self) -> Option<Duration> {
8447 // If the transport parameter is set to 0, then the respective endpoint
8448 // decided to disable the idle timeout. If both are disabled we should
8449 // not set any timeout.
8450 if self.local_transport_params.max_idle_timeout == 0 &&
8451 self.peer_transport_params.max_idle_timeout == 0
8452 {
8453 return None;
8454 }
8455
8456 // If the local endpoint or the peer disabled the idle timeout, use the
8457 // other peer's value, otherwise use the minimum of the two values.
8458 let idle_timeout = if self.local_transport_params.max_idle_timeout == 0 {
8459 self.peer_transport_params.max_idle_timeout
8460 } else if self.peer_transport_params.max_idle_timeout == 0 {
8461 self.local_transport_params.max_idle_timeout
8462 } else {
8463 cmp::min(
8464 self.local_transport_params.max_idle_timeout,
8465 self.peer_transport_params.max_idle_timeout,
8466 )
8467 };
8468
8469 let path_pto = match self.paths.get_active() {
8470 Ok(p) => p.recovery.pto(),
8471 Err(_) => Duration::ZERO,
8472 };
8473
8474 let idle_timeout = Duration::from_millis(idle_timeout);
8475 let idle_timeout = cmp::max(idle_timeout, 3 * path_pto);
8476
8477 Some(idle_timeout)
8478 }
8479
8480 /// Returns the connection's handshake status for use in loss recovery.
8481 fn handshake_status(&self) -> recovery::HandshakeStatus {
8482 recovery::HandshakeStatus {
8483 has_handshake_keys: self.crypto_ctx[packet::Epoch::Handshake]
8484 .has_keys(),
8485
8486 peer_verified_address: self.peer_verified_initial_address,
8487
8488 completed: self.is_established(),
8489 }
8490 }
8491
8492 /// Updates send capacity.
8493 fn update_tx_cap(&mut self) {
8494 let cwin_available = match self.paths.get_active() {
8495 Ok(p) => p.recovery.cwnd_available() as u64,
8496 Err(_) => 0,
8497 };
8498
8499 let cap =
8500 cmp::min(cwin_available, self.max_tx_data - self.tx_data) as usize;
8501 self.tx_cap = (cap as f64 * self.tx_cap_factor).ceil() as usize;
8502 }
8503
8504 fn delivery_rate_check_if_app_limited(&self) -> bool {
8505 // Enter the app-limited phase of delivery rate when these conditions
8506 // are met:
8507 //
8508 // - The remaining capacity is higher than available bytes in cwnd (there
8509 // is more room to send).
8510 // - New data since the last send() is smaller than available bytes in
8511 // cwnd (we queued less than what we can send).
8512 // - There is room to send more data in cwnd.
8513 //
8514 // In application-limited phases the transmission rate is limited by the
8515 // application rather than the congestion control algorithm.
8516 //
8517 // Note that this is equivalent to CheckIfApplicationLimited() from the
8518 // delivery rate draft. This is also separate from `recovery.app_limited`
8519 // and only applies to delivery rate calculation.
8520 let cwin_available = self
8521 .paths
8522 .iter()
8523 .filter(|&(_, p)| p.active())
8524 .map(|(_, p)| p.recovery.cwnd_available())
8525 .sum();
8526
8527 ((self.tx_buffered + self.dgram_send_queue_byte_size()) < cwin_available) &&
8528 (self.tx_data.saturating_sub(self.last_tx_data)) <
8529 cwin_available as u64 &&
8530 cwin_available > 0
8531 }
8532
8533 fn check_tx_buffered_invariant(&mut self) {
8534 // tx_buffered should track bytes queued in the stream buffers
8535 // and unacked retransmitable bytes in the network.
8536 // If tx_buffered > 0 mark the tx_buffered_state if there are no
8537 // flushable streams and there no inflight bytes.
8538 //
8539 // It is normal to have tx_buffered == 0 while there are inflight bytes
8540 // since not QUIC frames are retransmittable; inflight tracks all bytes
8541 // on the network which are subject to congestion control.
8542 if self.tx_buffered > 0 &&
8543 !self.streams.has_flushable() &&
8544 !self
8545 .paths
8546 .iter()
8547 .any(|(_, p)| p.recovery.bytes_in_flight() > 0)
8548 {
8549 self.tx_buffered_state = TxBufferTrackingState::Inconsistent;
8550 }
8551 }
8552
8553 fn set_initial_dcid(
8554 &mut self, cid: ConnectionId<'static>, reset_token: Option<u128>,
8555 path_id: usize,
8556 ) -> Result<()> {
8557 self.ids.set_initial_dcid(cid, reset_token, Some(path_id));
8558 self.paths.get_mut(path_id)?.active_dcid_seq = Some(0);
8559
8560 Ok(())
8561 }
8562
8563 /// Selects the path that the incoming packet belongs to, or creates a new
8564 /// one if no existing path matches.
8565 fn get_or_create_recv_path_id(
8566 &mut self, recv_pid: Option<usize>, dcid: &ConnectionId, buf_len: usize,
8567 info: &RecvInfo,
8568 ) -> Result<usize> {
8569 let ids = &mut self.ids;
8570
8571 let (in_scid_seq, mut in_scid_pid) =
8572 ids.find_scid_seq(dcid).ok_or(Error::InvalidState)?;
8573
8574 if let Some(recv_pid) = recv_pid {
8575 // If the path observes a change of SCID used, note it.
8576 let recv_path = self.paths.get_mut(recv_pid)?;
8577
8578 let cid_entry =
8579 recv_path.active_scid_seq.and_then(|v| ids.get_scid(v).ok());
8580
8581 if cid_entry.map(|e| &e.cid) != Some(dcid) {
8582 let incoming_cid_entry = ids.get_scid(in_scid_seq)?;
8583
8584 let prev_recv_pid =
8585 incoming_cid_entry.path_id.unwrap_or(recv_pid);
8586
8587 if prev_recv_pid != recv_pid {
8588 trace!(
8589 "{} peer reused CID {:?} from path {} on path {}",
8590 self.trace_id,
8591 dcid,
8592 prev_recv_pid,
8593 recv_pid
8594 );
8595
8596 // TODO: reset congestion control.
8597 }
8598
8599 trace!(
8600 "{} path ID {} now see SCID with seq num {}",
8601 self.trace_id,
8602 recv_pid,
8603 in_scid_seq
8604 );
8605
8606 recv_path.active_scid_seq = Some(in_scid_seq);
8607 ids.link_scid_to_path_id(in_scid_seq, recv_pid)?;
8608 }
8609
8610 return Ok(recv_pid);
8611 }
8612
8613 // This is a new 4-tuple. See if the CID has not been assigned on
8614 // another path.
8615
8616 // Ignore this step if are using zero-length SCID.
8617 if ids.zero_length_scid() {
8618 in_scid_pid = None;
8619 }
8620
8621 if let Some(in_scid_pid) = in_scid_pid {
8622 // This CID has been used by another path. If we have the
8623 // room to do so, create a new `Path` structure holding this
8624 // new 4-tuple. Otherwise, drop the packet.
8625 let old_path = self.paths.get_mut(in_scid_pid)?;
8626 let old_local_addr = old_path.local_addr();
8627 let old_peer_addr = old_path.peer_addr();
8628
8629 trace!(
8630 "{} reused CID seq {} of ({},{}) (path {}) on ({},{})",
8631 self.trace_id,
8632 in_scid_seq,
8633 old_local_addr,
8634 old_peer_addr,
8635 in_scid_pid,
8636 info.to,
8637 info.from
8638 );
8639
8640 // Notify the application.
8641 self.paths.notify_event(PathEvent::ReusedSourceConnectionId(
8642 in_scid_seq,
8643 (old_local_addr, old_peer_addr),
8644 (info.to, info.from),
8645 ));
8646 }
8647
8648 // This is a new path using an unassigned CID; create it!
8649 let mut path = path::Path::new(
8650 info.to,
8651 info.from,
8652 &self.recovery_config,
8653 self.path_challenge_recv_max_queue_len,
8654 false,
8655 None,
8656 );
8657
8658 path.max_send_bytes = buf_len * self.max_amplification_factor;
8659 path.active_scid_seq = Some(in_scid_seq);
8660
8661 // Automatically probes the new path.
8662 path.request_validation();
8663
8664 let pid = self.paths.insert_path(path, self.is_server)?;
8665
8666 // Do not record path reuse.
8667 if in_scid_pid.is_none() {
8668 ids.link_scid_to_path_id(in_scid_seq, pid)?;
8669 }
8670
8671 Ok(pid)
8672 }
8673
8674 /// Selects the path on which the next packet must be sent.
8675 fn get_send_path_id(
8676 &self, from: Option<SocketAddr>, to: Option<SocketAddr>,
8677 ) -> Result<usize> {
8678 // A probing packet must be sent, but only if the connection is fully
8679 // established.
8680 if self.is_established() {
8681 let mut probing = self
8682 .paths
8683 .iter()
8684 .filter(|(_, p)| from.is_none() || Some(p.local_addr()) == from)
8685 .filter(|(_, p)| to.is_none() || Some(p.peer_addr()) == to)
8686 .filter(|(_, p)| p.active_dcid_seq.is_some())
8687 .filter(|(_, p)| p.probing_required())
8688 .map(|(pid, _)| pid);
8689
8690 if let Some(pid) = probing.next() {
8691 return Ok(pid);
8692 }
8693 }
8694
8695 if let Some((pid, p)) = self.paths.get_active_with_pid() {
8696 if from.is_some() && Some(p.local_addr()) != from {
8697 return Err(Error::Done);
8698 }
8699
8700 if to.is_some() && Some(p.peer_addr()) != to {
8701 return Err(Error::Done);
8702 }
8703
8704 return Ok(pid);
8705 };
8706
8707 Err(Error::InvalidState)
8708 }
8709
8710 /// Sets the path with identifier 'path_id' to be active.
8711 fn set_active_path(&mut self, path_id: usize, now: Instant) -> Result<()> {
8712 if let Ok(old_active_path) = self.paths.get_active_mut() {
8713 for &e in packet::Epoch::epochs(
8714 packet::Epoch::Initial..=packet::Epoch::Application,
8715 ) {
8716 let (lost_packets, lost_bytes) = old_active_path
8717 .recovery
8718 .on_path_change(e, now, &self.trace_id);
8719
8720 self.lost_count += lost_packets;
8721 self.lost_bytes += lost_bytes as u64;
8722 }
8723 }
8724
8725 self.paths.set_active_path(path_id)
8726 }
8727
8728 /// Handles potential connection migration.
8729 fn on_peer_migrated(
8730 &mut self, new_pid: usize, disable_dcid_reuse: bool, now: Instant,
8731 ) -> Result<()> {
8732 let active_path_id = self.paths.get_active_path_id()?;
8733
8734 if active_path_id == new_pid {
8735 return Ok(());
8736 }
8737
8738 self.set_active_path(new_pid, now)?;
8739
8740 let no_spare_dcid =
8741 self.paths.get_mut(new_pid)?.active_dcid_seq.is_none();
8742
8743 if no_spare_dcid && !disable_dcid_reuse {
8744 self.paths.get_mut(new_pid)?.active_dcid_seq =
8745 self.paths.get_mut(active_path_id)?.active_dcid_seq;
8746 }
8747
8748 Ok(())
8749 }
8750
8751 /// Creates a new client-side path.
8752 fn create_path_on_client(
8753 &mut self, local_addr: SocketAddr, peer_addr: SocketAddr,
8754 ) -> Result<usize> {
8755 if self.is_server {
8756 return Err(Error::InvalidState);
8757 }
8758
8759 // If we use zero-length SCID and go over our local active CID limit,
8760 // the `insert_path()` call will raise an error.
8761 if !self.ids.zero_length_scid() && self.ids.available_scids() == 0 {
8762 return Err(Error::OutOfIdentifiers);
8763 }
8764
8765 // Do we have a spare DCID? If we are using zero-length DCID, just use
8766 // the default having sequence 0 (note that if we exceed our local CID
8767 // limit, the `insert_path()` call will raise an error.
8768 let dcid_seq = if self.ids.zero_length_dcid() {
8769 0
8770 } else {
8771 self.ids
8772 .lowest_available_dcid_seq()
8773 .ok_or(Error::OutOfIdentifiers)?
8774 };
8775
8776 let mut path = path::Path::new(
8777 local_addr,
8778 peer_addr,
8779 &self.recovery_config,
8780 self.path_challenge_recv_max_queue_len,
8781 false,
8782 None,
8783 );
8784 path.active_dcid_seq = Some(dcid_seq);
8785
8786 let pid = self
8787 .paths
8788 .insert_path(path, false)
8789 .map_err(|_| Error::OutOfIdentifiers)?;
8790 self.ids.link_dcid_to_path_id(dcid_seq, pid)?;
8791
8792 Ok(pid)
8793 }
8794
8795 // Marks the connection as closed and does any related tidyup.
8796 fn mark_closed(&mut self) {
8797 #[cfg(feature = "qlog")]
8798 {
8799 let cc = match (self.is_established(), self.timed_out, &self.peer_error, &self.local_error) {
8800 (false, _, _, _) => qlog::events::connectivity::ConnectionClosed {
8801 owner: Some(TransportOwner::Local),
8802 connection_code: None,
8803 application_code: None,
8804 internal_code: None,
8805 reason: Some("Failed to establish connection".to_string()),
8806 trigger: Some(qlog::events::connectivity::ConnectionClosedTrigger::HandshakeTimeout)
8807 },
8808
8809 (true, true, _, _) => qlog::events::connectivity::ConnectionClosed {
8810 owner: Some(TransportOwner::Local),
8811 connection_code: None,
8812 application_code: None,
8813 internal_code: None,
8814 reason: Some("Idle timeout".to_string()),
8815 trigger: Some(qlog::events::connectivity::ConnectionClosedTrigger::IdleTimeout)
8816 },
8817
8818 (true, false, Some(peer_error), None) => {
8819 let (connection_code, application_code, trigger) = if peer_error.is_app {
8820 (None, Some(qlog::events::ApplicationErrorCode::Value(peer_error.error_code)), None)
8821 } else {
8822 let trigger = if peer_error.error_code == WireErrorCode::NoError as u64 {
8823 Some(qlog::events::connectivity::ConnectionClosedTrigger::Clean)
8824 } else {
8825 Some(qlog::events::connectivity::ConnectionClosedTrigger::Error)
8826 };
8827
8828 (Some(qlog::events::ConnectionErrorCode::Value(peer_error.error_code)), None, trigger)
8829 };
8830
8831 qlog::events::connectivity::ConnectionClosed {
8832 owner: Some(TransportOwner::Remote),
8833 connection_code,
8834 application_code,
8835 internal_code: None,
8836 reason: Some(String::from_utf8_lossy(&peer_error.reason).to_string()),
8837 trigger,
8838 }
8839 },
8840
8841 (true, false, None, Some(local_error)) => {
8842 let (connection_code, application_code, trigger) = if local_error.is_app {
8843 (None, Some(qlog::events::ApplicationErrorCode::Value(local_error.error_code)), None)
8844 } else {
8845 let trigger = if local_error.error_code == WireErrorCode::NoError as u64 {
8846 Some(qlog::events::connectivity::ConnectionClosedTrigger::Clean)
8847 } else {
8848 Some(qlog::events::connectivity::ConnectionClosedTrigger::Error)
8849 };
8850
8851 (Some(qlog::events::ConnectionErrorCode::Value(local_error.error_code)), None, trigger)
8852 };
8853
8854 qlog::events::connectivity::ConnectionClosed {
8855 owner: Some(TransportOwner::Local),
8856 connection_code,
8857 application_code,
8858 internal_code: None,
8859 reason: Some(String::from_utf8_lossy(&local_error.reason).to_string()),
8860 trigger,
8861 }
8862 },
8863
8864 _ => qlog::events::connectivity::ConnectionClosed {
8865 owner: None,
8866 connection_code: None,
8867 application_code: None,
8868 internal_code: None,
8869 reason: None,
8870 trigger: None,
8871 },
8872 };
8873
8874 qlog_with_type!(QLOG_CONNECTION_CLOSED, self.qlog, q, {
8875 let ev_data = EventData::ConnectionClosed(cc);
8876
8877 q.add_event_data_now(ev_data).ok();
8878 });
8879 self.qlog.streamer = None;
8880 }
8881 self.closed = true;
8882 }
8883}
8884
8885#[cfg(feature = "boringssl-boring-crate")]
8886impl<F: BufFactory> AsMut<boring::ssl::SslRef> for Connection<F> {
8887 fn as_mut(&mut self) -> &mut boring::ssl::SslRef {
8888 self.handshake.ssl_mut()
8889 }
8890}
8891
8892/// Maps an `Error` to `Error::Done`, or itself.
8893///
8894/// When a received packet that hasn't yet been authenticated triggers a failure
8895/// it should, in most cases, be ignored, instead of raising a connection error,
8896/// to avoid potential man-in-the-middle and man-on-the-side attacks.
8897///
8898/// However, if no other packet was previously received, the connection should
8899/// indeed be closed as the received packet might just be network background
8900/// noise, and it shouldn't keep resources occupied indefinitely.
8901///
8902/// This function maps an error to `Error::Done` to ignore a packet failure
8903/// without aborting the connection, except when no other packet was previously
8904/// received, in which case the error itself is returned, but only on the
8905/// server-side as the client will already have armed the idle timer.
8906///
8907/// This must only be used for errors preceding packet authentication. Failures
8908/// happening after a packet has been authenticated should still cause the
8909/// connection to be aborted.
8910fn drop_pkt_on_err(
8911 e: Error, recv_count: usize, is_server: bool, trace_id: &str,
8912) -> Error {
8913 // On the server, if no other packet has been successfully processed, abort
8914 // the connection to avoid keeping the connection open when only junk is
8915 // received.
8916 if is_server && recv_count == 0 {
8917 return e;
8918 }
8919
8920 trace!("{trace_id} dropped invalid packet");
8921
8922 // Ignore other invalid packets that haven't been authenticated to prevent
8923 // man-in-the-middle and man-on-the-side attacks.
8924 Error::Done
8925}
8926
8927struct AddrTupleFmt(SocketAddr, SocketAddr);
8928
8929impl std::fmt::Display for AddrTupleFmt {
8930 fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
8931 let AddrTupleFmt(src, dst) = &self;
8932
8933 if src.ip().is_unspecified() || dst.ip().is_unspecified() {
8934 return Ok(());
8935 }
8936
8937 f.write_fmt(format_args!("src:{src} dst:{dst}"))
8938 }
8939}
8940
8941/// Statistics about the connection.
8942///
8943/// A connection's statistics can be collected using the [`stats()`] method.
8944///
8945/// [`stats()`]: struct.Connection.html#method.stats
8946#[derive(Clone, Default)]
8947pub struct Stats {
8948 /// The number of QUIC packets received.
8949 pub recv: usize,
8950
8951 /// The number of QUIC packets sent.
8952 pub sent: usize,
8953
8954 /// The number of QUIC packets that were lost.
8955 pub lost: usize,
8956
8957 /// The number of QUIC packets that were marked as lost but later acked.
8958 pub spurious_lost: usize,
8959
8960 /// The number of sent QUIC packets with retransmitted data.
8961 pub retrans: usize,
8962
8963 /// The number of sent bytes.
8964 pub sent_bytes: u64,
8965
8966 /// The number of received bytes.
8967 pub recv_bytes: u64,
8968
8969 /// The number of bytes sent acked.
8970 pub acked_bytes: u64,
8971
8972 /// The number of bytes sent lost.
8973 pub lost_bytes: u64,
8974
8975 /// The number of stream bytes retransmitted.
8976 pub stream_retrans_bytes: u64,
8977
8978 /// The number of DATAGRAM frames received.
8979 pub dgram_recv: usize,
8980
8981 /// The number of DATAGRAM frames sent.
8982 pub dgram_sent: usize,
8983
8984 /// The number of known paths for the connection.
8985 pub paths_count: usize,
8986
8987 /// The number of streams reset by local.
8988 pub reset_stream_count_local: u64,
8989
8990 /// The number of streams stopped by local.
8991 pub stopped_stream_count_local: u64,
8992
8993 /// The number of streams reset by remote.
8994 pub reset_stream_count_remote: u64,
8995
8996 /// The number of streams stopped by remote.
8997 pub stopped_stream_count_remote: u64,
8998
8999 /// The number of DATA_BLOCKED frames sent due to hitting the connection
9000 /// flow control limit.
9001 pub data_blocked_sent_count: u64,
9002
9003 /// The number of STREAM_DATA_BLOCKED frames sent due to a stream hitting
9004 /// the stream flow control limit.
9005 pub stream_data_blocked_sent_count: u64,
9006
9007 /// The number of DATA_BLOCKED frames received from the remote.
9008 pub data_blocked_recv_count: u64,
9009
9010 /// The number of STREAM_DATA_BLOCKED frames received from the remote.
9011 pub stream_data_blocked_recv_count: u64,
9012
9013 /// The number of STREAMS_BLOCKED frames for bidirectional streams received
9014 /// from the remote, indicating the peer is blocked on opening new
9015 /// bidirectional streams.
9016 pub streams_blocked_bidi_recv_count: u64,
9017
9018 /// The number of STREAMS_BLOCKED frames for unidirectional streams received
9019 /// from the remote, indicating the peer is blocked on opening new
9020 /// unidirectional streams.
9021 pub streams_blocked_uni_recv_count: u64,
9022
9023 /// The total number of PATH_CHALLENGE frames that were received.
9024 pub path_challenge_rx_count: u64,
9025
9026 /// Total duration during which this side of the connection was
9027 /// actively sending bytes or waiting for those bytes to be acked.
9028 pub bytes_in_flight_duration: Duration,
9029
9030 /// Health state of the connection's tx_buffered.
9031 pub tx_buffered_state: TxBufferTrackingState,
9032}
9033
9034impl std::fmt::Debug for Stats {
9035 #[inline]
9036 fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
9037 write!(
9038 f,
9039 "recv={} sent={} lost={} retrans={}",
9040 self.recv, self.sent, self.lost, self.retrans,
9041 )?;
9042
9043 write!(
9044 f,
9045 " sent_bytes={} recv_bytes={} lost_bytes={}",
9046 self.sent_bytes, self.recv_bytes, self.lost_bytes,
9047 )?;
9048
9049 Ok(())
9050 }
9051}
9052
9053#[doc(hidden)]
9054#[cfg(any(test, feature = "internal"))]
9055pub mod test_utils;
9056
9057#[cfg(test)]
9058mod tests;
9059
9060pub use crate::packet::ConnectionId;
9061pub use crate::packet::Header;
9062pub use crate::packet::Type;
9063
9064pub use crate::path::PathEvent;
9065pub use crate::path::PathStats;
9066pub use crate::path::SocketAddrIter;
9067
9068pub use crate::recovery::BbrBwLoReductionStrategy;
9069pub use crate::recovery::BbrParams;
9070pub use crate::recovery::CongestionControlAlgorithm;
9071pub use crate::recovery::StartupExit;
9072pub use crate::recovery::StartupExitReason;
9073
9074pub use crate::stream::StreamIter;
9075
9076pub use crate::transport_params::TransportParams;
9077pub use crate::transport_params::UnknownTransportParameter;
9078pub use crate::transport_params::UnknownTransportParameterIterator;
9079pub use crate::transport_params::UnknownTransportParameters;
9080
9081pub use crate::buffers::BufFactory;
9082pub use crate::buffers::BufSplit;
9083
9084pub use crate::error::ConnectionError;
9085pub use crate::error::Error;
9086pub use crate::error::Result;
9087pub use crate::error::WireErrorCode;
9088
9089mod buffers;
9090mod cid;
9091mod crypto;
9092mod dgram;
9093mod error;
9094#[cfg(feature = "ffi")]
9095mod ffi;
9096mod flowcontrol;
9097mod frame;
9098pub mod h3;
9099mod minmax;
9100mod packet;
9101mod path;
9102mod pmtud;
9103mod rand;
9104mod range_buf;
9105mod ranges;
9106mod recovery;
9107mod stream;
9108mod tls;
9109mod transport_params;