quiche/lib.rs
1// Copyright (C) 2018-2019, Cloudflare, Inc.
2// All rights reserved.
3//
4// Redistribution and use in source and binary forms, with or without
5// modification, are permitted provided that the following conditions are
6// met:
7//
8// * Redistributions of source code must retain the above copyright notice,
9// this list of conditions and the following disclaimer.
10//
11// * Redistributions in binary form must reproduce the above copyright
12// notice, this list of conditions and the following disclaimer in the
13// documentation and/or other materials provided with the distribution.
14//
15// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
16// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
17// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
18// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
19// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
20// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
21// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
22// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
23// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
24// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
25// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
27//! 🥧 Savoury implementation of the QUIC transport protocol and HTTP/3.
28//!
29//! [quiche] is an implementation of the QUIC transport protocol and HTTP/3 as
30//! specified by the [IETF]. It provides a low level API for processing QUIC
31//! packets and handling connection state. The application is responsible for
32//! providing I/O (e.g. sockets handling) as well as an event loop with support
33//! for timers.
34//!
35//! [quiche]: https://github.com/cloudflare/quiche/
36//! [ietf]: https://quicwg.org/
37//!
38//! ## Configuring connections
39//!
40//! The first step in establishing a QUIC connection using quiche is creating a
41//! [`Config`] object:
42//!
43//! ```
44//! let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
45//! config.set_application_protos(&[b"example-proto"]);
46//!
47//! // Additional configuration specific to application and use case...
48//! # Ok::<(), quiche::Error>(())
49//! ```
50//!
51//! The [`Config`] object controls important aspects of the QUIC connection such
52//! as QUIC version, ALPN IDs, flow control, congestion control, idle timeout
53//! and other properties or features.
54//!
55//! QUIC is a general-purpose transport protocol and there are several
56//! configuration properties where there is no reasonable default value. For
57//! example, the permitted number of concurrent streams of any particular type
58//! is dependent on the application running over QUIC, and other use-case
59//! specific concerns.
60//!
61//! quiche defaults several properties to zero, applications most likely need
62//! to set these to something else to satisfy their needs using the following:
63//!
64//! - [`set_initial_max_streams_bidi()`]
65//! - [`set_initial_max_streams_uni()`]
66//! - [`set_initial_max_data()`]
67//! - [`set_initial_max_stream_data_bidi_local()`]
68//! - [`set_initial_max_stream_data_bidi_remote()`]
69//! - [`set_initial_max_stream_data_uni()`]
70//!
71//! [`Config`] also holds TLS configuration. This can be changed by mutators on
72//! the an existing object, or by constructing a TLS context manually and
73//! creating a configuration using [`with_boring_ssl_ctx_builder()`].
74//!
75//! A configuration object can be shared among multiple connections.
76//!
77//! ### Connection setup
78//!
79//! On the client-side the [`connect()`] utility function can be used to create
80//! a new connection, while [`accept()`] is for servers:
81//!
82//! ```
83//! # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
84//! # let server_name = "quic.tech";
85//! # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
86//! # let peer = "127.0.0.1:1234".parse().unwrap();
87//! # let local = "127.0.0.1:4321".parse().unwrap();
88//! // Client connection.
89//! let conn =
90//! quiche::connect(Some(&server_name), &scid, local, peer, &mut config)?;
91//!
92//! // Server connection.
93//! # let peer = "127.0.0.1:1234".parse().unwrap();
94//! # let local = "127.0.0.1:4321".parse().unwrap();
95//! let conn = quiche::accept(&scid, None, local, peer, &mut config)?;
96//! # Ok::<(), quiche::Error>(())
97//! ```
98//!
99//! In both cases, the application is responsible for generating a new source
100//! connection ID that will be used to identify the new connection.
101//!
102//! The application also need to pass the address of the remote peer of the
103//! connection: in the case of a client that would be the address of the server
104//! it is trying to connect to, and for a server that is the address of the
105//! client that initiated the connection.
106//!
107//! ## Handling incoming packets
108//!
109//! Using the connection's [`recv()`] method the application can process
110//! incoming packets that belong to that connection from the network:
111//!
112//! ```no_run
113//! # let mut buf = [0; 512];
114//! # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
115//! # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
116//! # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
117//! # let peer = "127.0.0.1:1234".parse().unwrap();
118//! # let local = "127.0.0.1:4321".parse().unwrap();
119//! # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
120//! let to = socket.local_addr().unwrap();
121//!
122//! loop {
123//! let (read, from) = socket.recv_from(&mut buf).unwrap();
124//!
125//! let recv_info = quiche::RecvInfo { from, to };
126//!
127//! let read = match conn.recv(&mut buf[..read], recv_info) {
128//! Ok(v) => v,
129//!
130//! Err(quiche::Error::Done) => {
131//! // Done reading.
132//! break;
133//! },
134//!
135//! Err(e) => {
136//! // An error occurred, handle it.
137//! break;
138//! },
139//! };
140//! }
141//! # Ok::<(), quiche::Error>(())
142//! ```
143//!
144//! The application has to pass a [`RecvInfo`] structure in order to provide
145//! additional information about the received packet (such as the address it
146//! was received from).
147//!
148//! ## Generating outgoing packets
149//!
150//! Outgoing packet are generated using the connection's [`send()`] method
151//! instead:
152//!
153//! ```no_run
154//! # let mut out = [0; 512];
155//! # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
156//! # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
157//! # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
158//! # let peer = "127.0.0.1:1234".parse().unwrap();
159//! # let local = "127.0.0.1:4321".parse().unwrap();
160//! # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
161//! loop {
162//! let (write, send_info) = match conn.send(&mut out) {
163//! Ok(v) => v,
164//!
165//! Err(quiche::Error::Done) => {
166//! // Done writing.
167//! break;
168//! },
169//!
170//! Err(e) => {
171//! // An error occurred, handle it.
172//! break;
173//! },
174//! };
175//!
176//! socket.send_to(&out[..write], &send_info.to).unwrap();
177//! }
178//! # Ok::<(), quiche::Error>(())
179//! ```
180//!
181//! The application will be provided with a [`SendInfo`] structure providing
182//! additional information about the newly created packet (such as the address
183//! the packet should be sent to).
184//!
185//! When packets are sent, the application is responsible for maintaining a
186//! timer to react to time-based connection events. The timer expiration can be
187//! obtained using the connection's [`timeout()`] method.
188//!
189//! ```
190//! # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
191//! # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
192//! # let peer = "127.0.0.1:1234".parse().unwrap();
193//! # let local = "127.0.0.1:4321".parse().unwrap();
194//! # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
195//! let timeout = conn.timeout();
196//! # Ok::<(), quiche::Error>(())
197//! ```
198//!
199//! The application is responsible for providing a timer implementation, which
200//! can be specific to the operating system or networking framework used. When
201//! a timer expires, the connection's [`on_timeout()`] method should be called,
202//! after which additional packets might need to be sent on the network:
203//!
204//! ```no_run
205//! # let mut out = [0; 512];
206//! # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
207//! # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
208//! # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
209//! # let peer = "127.0.0.1:1234".parse().unwrap();
210//! # let local = "127.0.0.1:4321".parse().unwrap();
211//! # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
212//! // Timeout expired, handle it.
213//! conn.on_timeout();
214//!
215//! // Send more packets as needed after timeout.
216//! loop {
217//! let (write, send_info) = match conn.send(&mut out) {
218//! Ok(v) => v,
219//!
220//! Err(quiche::Error::Done) => {
221//! // Done writing.
222//! break;
223//! },
224//!
225//! Err(e) => {
226//! // An error occurred, handle it.
227//! break;
228//! },
229//! };
230//!
231//! socket.send_to(&out[..write], &send_info.to).unwrap();
232//! }
233//! # Ok::<(), quiche::Error>(())
234//! ```
235//!
236//! ### Pacing
237//!
238//! It is recommended that applications [pace] sending of outgoing packets to
239//! avoid creating packet bursts that could cause short-term congestion and
240//! losses in the network.
241//!
242//! quiche exposes pacing hints for outgoing packets through the [`at`] field
243//! of the [`SendInfo`] structure that is returned by the [`send()`] method.
244//! This field represents the time when a specific packet should be sent into
245//! the network.
246//!
247//! Applications can use these hints by artificially delaying the sending of
248//! packets through platform-specific mechanisms (such as the [`SO_TXTIME`]
249//! socket option on Linux), or custom methods (for example by using user-space
250//! timers).
251//!
252//! [pace]: https://datatracker.ietf.org/doc/html/rfc9002#section-7.7
253//! [`SO_TXTIME`]: https://man7.org/linux/man-pages/man8/tc-etf.8.html
254//!
255//! ## Sending and receiving stream data
256//!
257//! After some back and forth, the connection will complete its handshake and
258//! will be ready for sending or receiving application data.
259//!
260//! Data can be sent on a stream by using the [`stream_send()`] method:
261//!
262//! ```no_run
263//! # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
264//! # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
265//! # let peer = "127.0.0.1:1234".parse().unwrap();
266//! # let local = "127.0.0.1:4321".parse().unwrap();
267//! # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
268//! if conn.is_established() {
269//! // Handshake completed, send some data on stream 0.
270//! conn.stream_send(0, b"hello", true)?;
271//! }
272//! # Ok::<(), quiche::Error>(())
273//! ```
274//!
275//! The application can check whether there are any readable streams by using
276//! the connection's [`readable()`] method, which returns an iterator over all
277//! the streams that have outstanding data to read.
278//!
279//! The [`stream_recv()`] method can then be used to retrieve the application
280//! data from the readable stream:
281//!
282//! ```no_run
283//! # let mut buf = [0; 512];
284//! # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
285//! # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
286//! # let peer = "127.0.0.1:1234".parse().unwrap();
287//! # let local = "127.0.0.1:4321".parse().unwrap();
288//! # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
289//! if conn.is_established() {
290//! // Iterate over readable streams.
291//! for stream_id in conn.readable() {
292//! // Stream is readable, read until there's no more data.
293//! while let Ok((read, fin)) = conn.stream_recv(stream_id, &mut buf) {
294//! println!("Got {} bytes on stream {}", read, stream_id);
295//! }
296//! }
297//! }
298//! # Ok::<(), quiche::Error>(())
299//! ```
300//!
301//! ## HTTP/3
302//!
303//! The quiche [HTTP/3 module] provides a high level API for sending and
304//! receiving HTTP requests and responses on top of the QUIC transport protocol.
305//!
306//! [`Config`]: https://docs.quic.tech/quiche/struct.Config.html
307//! [`set_initial_max_streams_bidi()`]: https://docs.rs/quiche/latest/quiche/struct.Config.html#method.set_initial_max_streams_bidi
308//! [`set_initial_max_streams_uni()`]: https://docs.rs/quiche/latest/quiche/struct.Config.html#method.set_initial_max_streams_uni
309//! [`set_initial_max_data()`]: https://docs.rs/quiche/latest/quiche/struct.Config.html#method.set_initial_max_data
310//! [`set_initial_max_stream_data_bidi_local()`]: https://docs.rs/quiche/latest/quiche/struct.Config.html#method.set_initial_max_stream_data_bidi_local
311//! [`set_initial_max_stream_data_bidi_remote()`]: https://docs.rs/quiche/latest/quiche/struct.Config.html#method.set_initial_max_stream_data_bidi_remote
312//! [`set_initial_max_stream_data_uni()`]: https://docs.rs/quiche/latest/quiche/struct.Config.html#method.set_initial_max_stream_data_uni
313//! [`with_boring_ssl_ctx_builder()`]: https://docs.quic.tech/quiche/struct.Config.html#method.with_boring_ssl_ctx_builder
314//! [`connect()`]: fn.connect.html
315//! [`accept()`]: fn.accept.html
316//! [`recv()`]: struct.Connection.html#method.recv
317//! [`RecvInfo`]: struct.RecvInfo.html
318//! [`send()`]: struct.Connection.html#method.send
319//! [`SendInfo`]: struct.SendInfo.html
320//! [`at`]: struct.SendInfo.html#structfield.at
321//! [`timeout()`]: struct.Connection.html#method.timeout
322//! [`on_timeout()`]: struct.Connection.html#method.on_timeout
323//! [`stream_send()`]: struct.Connection.html#method.stream_send
324//! [`readable()`]: struct.Connection.html#method.readable
325//! [`stream_recv()`]: struct.Connection.html#method.stream_recv
326//! [HTTP/3 module]: h3/index.html
327//!
328//! ## Congestion Control
329//!
330//! The quiche library provides a high-level API for configuring which
331//! congestion control algorithm to use throughout the QUIC connection.
332//!
333//! When a QUIC connection is created, the application can optionally choose
334//! which CC algorithm to use. See [`CongestionControlAlgorithm`] for currently
335//! available congestion control algorithms.
336//!
337//! For example:
338//!
339//! ```
340//! let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION).unwrap();
341//! config.set_cc_algorithm(quiche::CongestionControlAlgorithm::Reno);
342//! ```
343//!
344//! Alternatively, you can configure the congestion control algorithm to use
345//! by its name.
346//!
347//! ```
348//! let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION).unwrap();
349//! config.set_cc_algorithm_name("reno").unwrap();
350//! ```
351//!
352//! Note that the CC algorithm should be configured before calling [`connect()`]
353//! or [`accept()`]. Otherwise the connection will use a default CC algorithm.
354//!
355//! [`CongestionControlAlgorithm`]: enum.CongestionControlAlgorithm.html
356//!
357//! ## Feature flags
358//!
359//! quiche defines a number of [feature flags] to reduce the amount of compiled
360//! code and dependencies:
361//!
362//! * `boringssl-vendored` (default): Build the vendored BoringSSL library.
363//!
364//! * `boringssl-boring-crate`: Use the BoringSSL library provided by the
365//! [boring] crate. It takes precedence over `boringssl-vendored` if both
366//! features are enabled.
367//!
368//! * `pkg-config-meta`: Generate pkg-config metadata file for libquiche.
369//!
370//! * `ffi`: Build and expose the FFI API.
371//!
372//! * `qlog`: Enable support for the [qlog] logging format.
373//!
374//! [feature flags]: https://doc.rust-lang.org/cargo/reference/manifest.html#the-features-section
375//! [boring]: https://crates.io/crates/boring
376//! [qlog]: https://datatracker.ietf.org/doc/html/draft-ietf-quic-qlog-main-schema
377
378#![allow(clippy::upper_case_acronyms)]
379#![warn(missing_docs)]
380#![warn(unused_qualifications)]
381#![cfg_attr(docsrs, feature(doc_cfg))]
382
383#[macro_use]
384extern crate log;
385
386use std::cmp;
387
388use std::collections::VecDeque;
389
390use std::net::SocketAddr;
391
392use std::str::FromStr;
393
394use std::sync::Arc;
395
396use std::time::Duration;
397use std::time::Instant;
398
399#[cfg(feature = "qlog")]
400use qlog::events::connectivity::ConnectivityEventType;
401#[cfg(feature = "qlog")]
402use qlog::events::connectivity::TransportOwner;
403#[cfg(feature = "qlog")]
404use qlog::events::quic::RecoveryEventType;
405#[cfg(feature = "qlog")]
406use qlog::events::quic::TransportEventType;
407#[cfg(feature = "qlog")]
408use qlog::events::DataRecipient;
409#[cfg(feature = "qlog")]
410use qlog::events::Event;
411#[cfg(feature = "qlog")]
412use qlog::events::EventData;
413#[cfg(feature = "qlog")]
414use qlog::events::EventImportance;
415#[cfg(feature = "qlog")]
416use qlog::events::EventType;
417#[cfg(feature = "qlog")]
418use qlog::events::RawInfo;
419
420use smallvec::SmallVec;
421
422use crate::range_buf::DefaultBufFactory;
423
424use crate::recovery::OnAckReceivedOutcome;
425use crate::recovery::OnLossDetectionTimeoutOutcome;
426use crate::recovery::RecoveryOps;
427use crate::recovery::ReleaseDecision;
428
429use crate::stream::RecvAction;
430use crate::stream::StreamPriorityKey;
431
432/// The current QUIC wire version.
433pub const PROTOCOL_VERSION: u32 = PROTOCOL_VERSION_V1;
434
435/// Supported QUIC versions.
436const PROTOCOL_VERSION_V1: u32 = 0x0000_0001;
437
438/// The maximum length of a connection ID.
439pub const MAX_CONN_ID_LEN: usize = packet::MAX_CID_LEN as usize;
440
441/// The minimum length of Initial packets sent by a client.
442pub const MIN_CLIENT_INITIAL_LEN: usize = 1200;
443
444/// The default initial RTT.
445const DEFAULT_INITIAL_RTT: Duration = Duration::from_millis(333);
446
447const PAYLOAD_MIN_LEN: usize = 4;
448
449// PATH_CHALLENGE (9 bytes) + AEAD tag (16 bytes).
450const MIN_PROBING_SIZE: usize = 25;
451
452const MAX_AMPLIFICATION_FACTOR: usize = 3;
453
454// The maximum number of tracked packet number ranges that need to be acked.
455//
456// This represents more or less how many ack blocks can fit in a typical packet.
457const MAX_ACK_RANGES: usize = 68;
458
459// The highest possible stream ID allowed.
460const MAX_STREAM_ID: u64 = 1 << 60;
461
462// The default max_datagram_size used in congestion control.
463const MAX_SEND_UDP_PAYLOAD_SIZE: usize = 1200;
464
465// The default length of DATAGRAM queues.
466const DEFAULT_MAX_DGRAM_QUEUE_LEN: usize = 0;
467
468// The default length of PATH_CHALLENGE receive queue.
469const DEFAULT_MAX_PATH_CHALLENGE_RX_QUEUE_LEN: usize = 3;
470
471// The DATAGRAM standard recommends either none or 65536 as maximum DATAGRAM
472// frames size. We enforce the recommendation for forward compatibility.
473const MAX_DGRAM_FRAME_SIZE: u64 = 65536;
474
475// The length of the payload length field.
476const PAYLOAD_LENGTH_LEN: usize = 2;
477
478// The number of undecryptable that can be buffered.
479const MAX_UNDECRYPTABLE_PACKETS: usize = 10;
480
481const RESERVED_VERSION_MASK: u32 = 0xfafafafa;
482
483// The default size of the receiver connection flow control window.
484const DEFAULT_CONNECTION_WINDOW: u64 = 48 * 1024;
485
486// The maximum size of the receiver connection flow control window.
487const MAX_CONNECTION_WINDOW: u64 = 24 * 1024 * 1024;
488
489// How much larger the connection flow control window need to be larger than
490// the stream flow control window.
491const CONNECTION_WINDOW_FACTOR: f64 = 1.5;
492
493// How many probing packet timeouts do we tolerate before considering the path
494// validation as failed.
495const MAX_PROBING_TIMEOUTS: usize = 3;
496
497// The default initial congestion window size in terms of packet count.
498const DEFAULT_INITIAL_CONGESTION_WINDOW_PACKETS: usize = 10;
499
500// The maximum data offset that can be stored in a crypto stream.
501const MAX_CRYPTO_STREAM_OFFSET: u64 = 1 << 16;
502
503// The send capacity factor.
504const TX_CAP_FACTOR: f64 = 1.0;
505
506/// Ancillary information about incoming packets.
507#[derive(Clone, Copy, Debug, PartialEq, Eq)]
508pub struct RecvInfo {
509 /// The remote address the packet was received from.
510 pub from: SocketAddr,
511
512 /// The local address the packet was received on.
513 pub to: SocketAddr,
514}
515
516/// Ancillary information about outgoing packets.
517#[derive(Clone, Copy, Debug, PartialEq, Eq)]
518pub struct SendInfo {
519 /// The local address the packet should be sent from.
520 pub from: SocketAddr,
521
522 /// The remote address the packet should be sent to.
523 pub to: SocketAddr,
524
525 /// The time to send the packet out.
526 ///
527 /// See [Pacing] for more details.
528 ///
529 /// [Pacing]: index.html#pacing
530 pub at: Instant,
531}
532
533/// The side of the stream to be shut down.
534///
535/// This should be used when calling [`stream_shutdown()`].
536///
537/// [`stream_shutdown()`]: struct.Connection.html#method.stream_shutdown
538#[repr(C)]
539#[derive(PartialEq, Eq)]
540pub enum Shutdown {
541 /// Stop receiving stream data.
542 Read = 0,
543
544 /// Stop sending stream data.
545 Write = 1,
546}
547
548/// Qlog logging level.
549#[repr(C)]
550#[cfg(feature = "qlog")]
551#[cfg_attr(docsrs, doc(cfg(feature = "qlog")))]
552pub enum QlogLevel {
553 /// Logs any events of Core importance.
554 Core = 0,
555
556 /// Logs any events of Core and Base importance.
557 Base = 1,
558
559 /// Logs any events of Core, Base and Extra importance
560 Extra = 2,
561}
562
563/// Stores configuration shared between multiple connections.
564pub struct Config {
565 local_transport_params: TransportParams,
566
567 version: u32,
568
569 tls_ctx: tls::Context,
570
571 application_protos: Vec<Vec<u8>>,
572
573 grease: bool,
574
575 cc_algorithm: CongestionControlAlgorithm,
576 custom_bbr_params: Option<BbrParams>,
577 initial_congestion_window_packets: usize,
578 enable_relaxed_loss_threshold: bool,
579
580 pmtud: bool,
581 pmtud_max_probes: u8,
582
583 hystart: bool,
584
585 pacing: bool,
586 /// Send rate limit in Mbps
587 max_pacing_rate: Option<u64>,
588
589 tx_cap_factor: f64,
590
591 dgram_recv_max_queue_len: usize,
592 dgram_send_max_queue_len: usize,
593
594 path_challenge_recv_max_queue_len: usize,
595
596 max_send_udp_payload_size: usize,
597
598 max_connection_window: u64,
599 max_stream_window: u64,
600
601 max_amplification_factor: usize,
602
603 disable_dcid_reuse: bool,
604
605 track_unknown_transport_params: Option<usize>,
606
607 initial_rtt: Duration,
608}
609
610// See https://quicwg.org/base-drafts/rfc9000.html#section-15
611fn is_reserved_version(version: u32) -> bool {
612 version & RESERVED_VERSION_MASK == version
613}
614
615impl Config {
616 /// Creates a config object with the given version.
617 ///
618 /// ## Examples:
619 ///
620 /// ```
621 /// let config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
622 /// # Ok::<(), quiche::Error>(())
623 /// ```
624 pub fn new(version: u32) -> Result<Config> {
625 Self::with_tls_ctx(version, tls::Context::new()?)
626 }
627
628 /// Creates a config object with the given version and
629 /// [`SslContextBuilder`].
630 ///
631 /// This is useful for applications that wish to manually configure
632 /// [`SslContextBuilder`].
633 ///
634 /// [`SslContextBuilder`]: https://docs.rs/boring/latest/boring/ssl/struct.SslContextBuilder.html
635 #[cfg(feature = "boringssl-boring-crate")]
636 #[cfg_attr(docsrs, doc(cfg(feature = "boringssl-boring-crate")))]
637 pub fn with_boring_ssl_ctx_builder(
638 version: u32, tls_ctx_builder: boring::ssl::SslContextBuilder,
639 ) -> Result<Config> {
640 Self::with_tls_ctx(version, tls::Context::from_boring(tls_ctx_builder))
641 }
642
643 fn with_tls_ctx(version: u32, tls_ctx: tls::Context) -> Result<Config> {
644 if !is_reserved_version(version) && !version_is_supported(version) {
645 return Err(Error::UnknownVersion);
646 }
647
648 Ok(Config {
649 local_transport_params: TransportParams::default(),
650 version,
651 tls_ctx,
652 application_protos: Vec::new(),
653 grease: true,
654 cc_algorithm: CongestionControlAlgorithm::CUBIC,
655 custom_bbr_params: None,
656 initial_congestion_window_packets:
657 DEFAULT_INITIAL_CONGESTION_WINDOW_PACKETS,
658 enable_relaxed_loss_threshold: false,
659 pmtud: false,
660 pmtud_max_probes: pmtud::MAX_PROBES_DEFAULT,
661 hystart: true,
662 pacing: true,
663 max_pacing_rate: None,
664
665 tx_cap_factor: TX_CAP_FACTOR,
666
667 dgram_recv_max_queue_len: DEFAULT_MAX_DGRAM_QUEUE_LEN,
668 dgram_send_max_queue_len: DEFAULT_MAX_DGRAM_QUEUE_LEN,
669
670 path_challenge_recv_max_queue_len:
671 DEFAULT_MAX_PATH_CHALLENGE_RX_QUEUE_LEN,
672
673 max_send_udp_payload_size: MAX_SEND_UDP_PAYLOAD_SIZE,
674
675 max_connection_window: MAX_CONNECTION_WINDOW,
676 max_stream_window: stream::MAX_STREAM_WINDOW,
677
678 max_amplification_factor: MAX_AMPLIFICATION_FACTOR,
679
680 disable_dcid_reuse: false,
681
682 track_unknown_transport_params: None,
683 initial_rtt: DEFAULT_INITIAL_RTT,
684 })
685 }
686
687 /// Configures the given certificate chain.
688 ///
689 /// The content of `file` is parsed as a PEM-encoded leaf certificate,
690 /// followed by optional intermediate certificates.
691 ///
692 /// ## Examples:
693 ///
694 /// ```no_run
695 /// # let mut config = quiche::Config::new(0xbabababa)?;
696 /// config.load_cert_chain_from_pem_file("/path/to/cert.pem")?;
697 /// # Ok::<(), quiche::Error>(())
698 /// ```
699 pub fn load_cert_chain_from_pem_file(&mut self, file: &str) -> Result<()> {
700 self.tls_ctx.use_certificate_chain_file(file)
701 }
702
703 /// Configures the given private key.
704 ///
705 /// The content of `file` is parsed as a PEM-encoded private key.
706 ///
707 /// ## Examples:
708 ///
709 /// ```no_run
710 /// # let mut config = quiche::Config::new(0xbabababa)?;
711 /// config.load_priv_key_from_pem_file("/path/to/key.pem")?;
712 /// # Ok::<(), quiche::Error>(())
713 /// ```
714 pub fn load_priv_key_from_pem_file(&mut self, file: &str) -> Result<()> {
715 self.tls_ctx.use_privkey_file(file)
716 }
717
718 /// Specifies a file where trusted CA certificates are stored for the
719 /// purposes of certificate verification.
720 ///
721 /// The content of `file` is parsed as a PEM-encoded certificate chain.
722 ///
723 /// ## Examples:
724 ///
725 /// ```no_run
726 /// # let mut config = quiche::Config::new(0xbabababa)?;
727 /// config.load_verify_locations_from_file("/path/to/cert.pem")?;
728 /// # Ok::<(), quiche::Error>(())
729 /// ```
730 pub fn load_verify_locations_from_file(&mut self, file: &str) -> Result<()> {
731 self.tls_ctx.load_verify_locations_from_file(file)
732 }
733
734 /// Specifies a directory where trusted CA certificates are stored for the
735 /// purposes of certificate verification.
736 ///
737 /// The content of `dir` a set of PEM-encoded certificate chains.
738 ///
739 /// ## Examples:
740 ///
741 /// ```no_run
742 /// # let mut config = quiche::Config::new(0xbabababa)?;
743 /// config.load_verify_locations_from_directory("/path/to/certs")?;
744 /// # Ok::<(), quiche::Error>(())
745 /// ```
746 pub fn load_verify_locations_from_directory(
747 &mut self, dir: &str,
748 ) -> Result<()> {
749 self.tls_ctx.load_verify_locations_from_directory(dir)
750 }
751
752 /// Configures whether to verify the peer's certificate.
753 ///
754 /// This should usually be `true` for client-side connections and `false`
755 /// for server-side ones.
756 ///
757 /// Note that by default, no verification is performed.
758 ///
759 /// Also note that on the server-side, enabling verification of the peer
760 /// will trigger a certificate request and make authentication errors
761 /// fatal, but will still allow anonymous clients (i.e. clients that
762 /// don't present a certificate at all). Servers can check whether a
763 /// client presented a certificate by calling [`peer_cert()`] if they
764 /// need to.
765 ///
766 /// [`peer_cert()`]: struct.Connection.html#method.peer_cert
767 pub fn verify_peer(&mut self, verify: bool) {
768 self.tls_ctx.set_verify(verify);
769 }
770
771 /// Configures whether to do path MTU discovery.
772 ///
773 /// The default value is `false`.
774 pub fn discover_pmtu(&mut self, discover: bool) {
775 self.pmtud = discover;
776 }
777
778 /// Configures the maximum number of PMTUD probe attempts before treating
779 /// a probe size as failed.
780 ///
781 /// Defaults to 3 per [RFC 8899 Section 5.1.2](https://datatracker.ietf.org/doc/html/rfc8899#section-5.1.2).
782 /// If 0 is passed, the default value is used.
783 pub fn set_pmtud_max_probes(&mut self, max_probes: u8) {
784 self.pmtud_max_probes = max_probes;
785 }
786
787 /// Configures whether to send GREASE values.
788 ///
789 /// The default value is `true`.
790 pub fn grease(&mut self, grease: bool) {
791 self.grease = grease;
792 }
793
794 /// Enables logging of secrets.
795 ///
796 /// When logging is enabled, the [`set_keylog()`] method must be called on
797 /// the connection for its cryptographic secrets to be logged in the
798 /// [keylog] format to the specified writer.
799 ///
800 /// [`set_keylog()`]: struct.Connection.html#method.set_keylog
801 /// [keylog]: https://developer.mozilla.org/en-US/docs/Mozilla/Projects/NSS/Key_Log_Format
802 pub fn log_keys(&mut self) {
803 self.tls_ctx.enable_keylog();
804 }
805
806 /// Configures the session ticket key material.
807 ///
808 /// On the server this key will be used to encrypt and decrypt session
809 /// tickets, used to perform session resumption without server-side state.
810 ///
811 /// By default a key is generated internally, and rotated regularly, so
812 /// applications don't need to call this unless they need to use a
813 /// specific key (e.g. in order to support resumption across multiple
814 /// servers), in which case the application is also responsible for
815 /// rotating the key to provide forward secrecy.
816 pub fn set_ticket_key(&mut self, key: &[u8]) -> Result<()> {
817 self.tls_ctx.set_ticket_key(key)
818 }
819
820 /// Enables sending or receiving early data.
821 pub fn enable_early_data(&mut self) {
822 self.tls_ctx.set_early_data_enabled(true);
823 }
824
825 /// Configures the list of supported application protocols.
826 ///
827 /// On the client this configures the list of protocols to send to the
828 /// server as part of the ALPN extension.
829 ///
830 /// On the server this configures the list of supported protocols to match
831 /// against the client-supplied list.
832 ///
833 /// Applications must set a value, but no default is provided.
834 ///
835 /// ## Examples:
836 ///
837 /// ```
838 /// # let mut config = quiche::Config::new(0xbabababa)?;
839 /// config.set_application_protos(&[b"http/1.1", b"http/0.9"]);
840 /// # Ok::<(), quiche::Error>(())
841 /// ```
842 pub fn set_application_protos(
843 &mut self, protos_list: &[&[u8]],
844 ) -> Result<()> {
845 self.application_protos =
846 protos_list.iter().map(|s| s.to_vec()).collect();
847
848 self.tls_ctx.set_alpn(protos_list)
849 }
850
851 /// Configures the list of supported application protocols using wire
852 /// format.
853 ///
854 /// The list of protocols `protos` must be a series of non-empty, 8-bit
855 /// length-prefixed strings.
856 ///
857 /// See [`set_application_protos`](Self::set_application_protos) for more
858 /// background about application protocols.
859 ///
860 /// ## Examples:
861 ///
862 /// ```
863 /// # let mut config = quiche::Config::new(0xbabababa)?;
864 /// config.set_application_protos_wire_format(b"\x08http/1.1\x08http/0.9")?;
865 /// # Ok::<(), quiche::Error>(())
866 /// ```
867 pub fn set_application_protos_wire_format(
868 &mut self, protos: &[u8],
869 ) -> Result<()> {
870 let mut b = octets::Octets::with_slice(protos);
871
872 let mut protos_list = Vec::new();
873
874 while let Ok(proto) = b.get_bytes_with_u8_length() {
875 protos_list.push(proto.buf());
876 }
877
878 self.set_application_protos(&protos_list)
879 }
880
881 /// Sets the anti-amplification limit factor.
882 ///
883 /// The default value is `3`.
884 pub fn set_max_amplification_factor(&mut self, v: usize) {
885 self.max_amplification_factor = v;
886 }
887
888 /// Sets the send capacity factor.
889 ///
890 /// The default value is `1`.
891 pub fn set_send_capacity_factor(&mut self, v: f64) {
892 self.tx_cap_factor = v;
893 }
894
895 /// Sets the connection's initial RTT.
896 ///
897 /// The default value is `333`.
898 pub fn set_initial_rtt(&mut self, v: Duration) {
899 self.initial_rtt = v;
900 }
901
902 /// Sets the `max_idle_timeout` transport parameter, in milliseconds.
903 ///
904 /// The default value is infinite, that is, no timeout is used.
905 pub fn set_max_idle_timeout(&mut self, v: u64) {
906 self.local_transport_params.max_idle_timeout =
907 cmp::min(v, octets::MAX_VAR_INT);
908 }
909
910 /// Sets the `max_udp_payload_size transport` parameter.
911 ///
912 /// The default value is `65527`.
913 pub fn set_max_recv_udp_payload_size(&mut self, v: usize) {
914 self.local_transport_params.max_udp_payload_size =
915 cmp::min(v as u64, octets::MAX_VAR_INT);
916 }
917
918 /// Sets the maximum outgoing UDP payload size.
919 ///
920 /// The default and minimum value is `1200`.
921 pub fn set_max_send_udp_payload_size(&mut self, v: usize) {
922 self.max_send_udp_payload_size = cmp::max(v, MAX_SEND_UDP_PAYLOAD_SIZE);
923 }
924
925 /// Sets the `initial_max_data` transport parameter.
926 ///
927 /// When set to a non-zero value quiche will only allow at most `v` bytes of
928 /// incoming stream data to be buffered for the whole connection (that is,
929 /// data that is not yet read by the application) and will allow more data
930 /// to be received as the buffer is consumed by the application.
931 ///
932 /// When set to zero, either explicitly or via the default, quiche will not
933 /// give any flow control to the peer, preventing it from sending any stream
934 /// data.
935 ///
936 /// The default value is `0`.
937 pub fn set_initial_max_data(&mut self, v: u64) {
938 self.local_transport_params.initial_max_data =
939 cmp::min(v, octets::MAX_VAR_INT);
940 }
941
942 /// Sets the `initial_max_stream_data_bidi_local` transport parameter.
943 ///
944 /// When set to a non-zero value quiche will only allow at most `v` bytes
945 /// of incoming stream data to be buffered for each locally-initiated
946 /// bidirectional stream (that is, data that is not yet read by the
947 /// application) and will allow more data to be received as the buffer is
948 /// consumed by the application.
949 ///
950 /// When set to zero, either explicitly or via the default, quiche will not
951 /// give any flow control to the peer, preventing it from sending any stream
952 /// data.
953 ///
954 /// The default value is `0`.
955 pub fn set_initial_max_stream_data_bidi_local(&mut self, v: u64) {
956 self.local_transport_params
957 .initial_max_stream_data_bidi_local =
958 cmp::min(v, octets::MAX_VAR_INT);
959 }
960
961 /// Sets the `initial_max_stream_data_bidi_remote` transport parameter.
962 ///
963 /// When set to a non-zero value quiche will only allow at most `v` bytes
964 /// of incoming stream data to be buffered for each remotely-initiated
965 /// bidirectional stream (that is, data that is not yet read by the
966 /// application) and will allow more data to be received as the buffer is
967 /// consumed by the application.
968 ///
969 /// When set to zero, either explicitly or via the default, quiche will not
970 /// give any flow control to the peer, preventing it from sending any stream
971 /// data.
972 ///
973 /// The default value is `0`.
974 pub fn set_initial_max_stream_data_bidi_remote(&mut self, v: u64) {
975 self.local_transport_params
976 .initial_max_stream_data_bidi_remote =
977 cmp::min(v, octets::MAX_VAR_INT);
978 }
979
980 /// Sets the `initial_max_stream_data_uni` transport parameter.
981 ///
982 /// When set to a non-zero value quiche will only allow at most `v` bytes
983 /// of incoming stream data to be buffered for each unidirectional stream
984 /// (that is, data that is not yet read by the application) and will allow
985 /// more data to be received as the buffer is consumed by the application.
986 ///
987 /// When set to zero, either explicitly or via the default, quiche will not
988 /// give any flow control to the peer, preventing it from sending any stream
989 /// data.
990 ///
991 /// The default value is `0`.
992 pub fn set_initial_max_stream_data_uni(&mut self, v: u64) {
993 self.local_transport_params.initial_max_stream_data_uni =
994 cmp::min(v, octets::MAX_VAR_INT);
995 }
996
997 /// Sets the `initial_max_streams_bidi` transport parameter.
998 ///
999 /// When set to a non-zero value quiche will only allow `v` number of
1000 /// concurrent remotely-initiated bidirectional streams to be open at any
1001 /// given time and will increase the limit automatically as streams are
1002 /// completed.
1003 ///
1004 /// When set to zero, either explicitly or via the default, quiche will not
1005 /// not allow the peer to open any bidirectional streams.
1006 ///
1007 /// A bidirectional stream is considered completed when all incoming data
1008 /// has been read by the application (up to the `fin` offset) or the
1009 /// stream's read direction has been shutdown, and all outgoing data has
1010 /// been acked by the peer (up to the `fin` offset) or the stream's write
1011 /// direction has been shutdown.
1012 ///
1013 /// The default value is `0`.
1014 pub fn set_initial_max_streams_bidi(&mut self, v: u64) {
1015 self.local_transport_params.initial_max_streams_bidi =
1016 cmp::min(v, octets::MAX_VAR_INT);
1017 }
1018
1019 /// Sets the `initial_max_streams_uni` transport parameter.
1020 ///
1021 /// When set to a non-zero value quiche will only allow `v` number of
1022 /// concurrent remotely-initiated unidirectional streams to be open at any
1023 /// given time and will increase the limit automatically as streams are
1024 /// completed.
1025 ///
1026 /// When set to zero, either explicitly or via the default, quiche will not
1027 /// not allow the peer to open any unidirectional streams.
1028 ///
1029 /// A unidirectional stream is considered completed when all incoming data
1030 /// has been read by the application (up to the `fin` offset) or the
1031 /// stream's read direction has been shutdown.
1032 ///
1033 /// The default value is `0`.
1034 pub fn set_initial_max_streams_uni(&mut self, v: u64) {
1035 self.local_transport_params.initial_max_streams_uni =
1036 cmp::min(v, octets::MAX_VAR_INT);
1037 }
1038
1039 /// Sets the `ack_delay_exponent` transport parameter.
1040 ///
1041 /// The default value is `3`.
1042 pub fn set_ack_delay_exponent(&mut self, v: u64) {
1043 self.local_transport_params.ack_delay_exponent =
1044 cmp::min(v, octets::MAX_VAR_INT);
1045 }
1046
1047 /// Sets the `max_ack_delay` transport parameter.
1048 ///
1049 /// The default value is `25`.
1050 pub fn set_max_ack_delay(&mut self, v: u64) {
1051 self.local_transport_params.max_ack_delay =
1052 cmp::min(v, octets::MAX_VAR_INT);
1053 }
1054
1055 /// Sets the `active_connection_id_limit` transport parameter.
1056 ///
1057 /// The default value is `2`. Lower values will be ignored.
1058 pub fn set_active_connection_id_limit(&mut self, v: u64) {
1059 if v >= 2 {
1060 self.local_transport_params.active_conn_id_limit =
1061 cmp::min(v, octets::MAX_VAR_INT);
1062 }
1063 }
1064
1065 /// Sets the `disable_active_migration` transport parameter.
1066 ///
1067 /// The default value is `false`.
1068 pub fn set_disable_active_migration(&mut self, v: bool) {
1069 self.local_transport_params.disable_active_migration = v;
1070 }
1071
1072 /// Sets the congestion control algorithm used.
1073 ///
1074 /// The default value is `CongestionControlAlgorithm::CUBIC`.
1075 pub fn set_cc_algorithm(&mut self, algo: CongestionControlAlgorithm) {
1076 self.cc_algorithm = algo;
1077 }
1078
1079 /// Sets custom BBR settings.
1080 ///
1081 /// This API is experimental and will be removed in the future.
1082 ///
1083 /// Currently this only applies if cc_algorithm is
1084 /// `CongestionControlAlgorithm::Bbr2Gcongestion` is set.
1085 ///
1086 /// The default value is `None`.
1087 #[cfg(feature = "internal")]
1088 #[doc(hidden)]
1089 pub fn set_custom_bbr_params(&mut self, custom_bbr_settings: BbrParams) {
1090 self.custom_bbr_params = Some(custom_bbr_settings);
1091 }
1092
1093 /// Sets the congestion control algorithm used by string.
1094 ///
1095 /// The default value is `cubic`. On error `Error::CongestionControl`
1096 /// will be returned.
1097 ///
1098 /// ## Examples:
1099 ///
1100 /// ```
1101 /// # let mut config = quiche::Config::new(0xbabababa)?;
1102 /// config.set_cc_algorithm_name("reno");
1103 /// # Ok::<(), quiche::Error>(())
1104 /// ```
1105 pub fn set_cc_algorithm_name(&mut self, name: &str) -> Result<()> {
1106 self.cc_algorithm = CongestionControlAlgorithm::from_str(name)?;
1107
1108 Ok(())
1109 }
1110
1111 /// Sets initial congestion window size in terms of packet count.
1112 ///
1113 /// The default value is 10.
1114 pub fn set_initial_congestion_window_packets(&mut self, packets: usize) {
1115 self.initial_congestion_window_packets = packets;
1116 }
1117
1118 /// Configure whether to enable relaxed loss detection on spurious loss.
1119 ///
1120 /// The default value is false.
1121 pub fn set_enable_relaxed_loss_threshold(&mut self, enable: bool) {
1122 self.enable_relaxed_loss_threshold = enable;
1123 }
1124
1125 /// Configures whether to enable HyStart++.
1126 ///
1127 /// The default value is `true`.
1128 pub fn enable_hystart(&mut self, v: bool) {
1129 self.hystart = v;
1130 }
1131
1132 /// Configures whether to enable pacing.
1133 ///
1134 /// The default value is `true`.
1135 pub fn enable_pacing(&mut self, v: bool) {
1136 self.pacing = v;
1137 }
1138
1139 /// Sets the max value for pacing rate.
1140 ///
1141 /// By default pacing rate is not limited.
1142 pub fn set_max_pacing_rate(&mut self, v: u64) {
1143 self.max_pacing_rate = Some(v);
1144 }
1145
1146 /// Configures whether to enable receiving DATAGRAM frames.
1147 ///
1148 /// When enabled, the `max_datagram_frame_size` transport parameter is set
1149 /// to 65536 as recommended by draft-ietf-quic-datagram-01.
1150 ///
1151 /// The default is `false`.
1152 pub fn enable_dgram(
1153 &mut self, enabled: bool, recv_queue_len: usize, send_queue_len: usize,
1154 ) {
1155 self.local_transport_params.max_datagram_frame_size = if enabled {
1156 Some(MAX_DGRAM_FRAME_SIZE)
1157 } else {
1158 None
1159 };
1160 self.dgram_recv_max_queue_len = recv_queue_len;
1161 self.dgram_send_max_queue_len = send_queue_len;
1162 }
1163
1164 /// Configures the max number of queued received PATH_CHALLENGE frames.
1165 ///
1166 /// When an endpoint receives a PATH_CHALLENGE frame and the queue is full,
1167 /// the frame is discarded.
1168 ///
1169 /// The default is 3.
1170 pub fn set_path_challenge_recv_max_queue_len(&mut self, queue_len: usize) {
1171 self.path_challenge_recv_max_queue_len = queue_len;
1172 }
1173
1174 /// Sets the maximum size of the connection window.
1175 ///
1176 /// The default value is MAX_CONNECTION_WINDOW (24MBytes).
1177 pub fn set_max_connection_window(&mut self, v: u64) {
1178 self.max_connection_window = v;
1179 }
1180
1181 /// Sets the maximum size of the stream window.
1182 ///
1183 /// The default value is MAX_STREAM_WINDOW (16MBytes).
1184 pub fn set_max_stream_window(&mut self, v: u64) {
1185 self.max_stream_window = v;
1186 }
1187
1188 /// Sets the initial stateless reset token.
1189 ///
1190 /// This value is only advertised by servers. Setting a stateless retry
1191 /// token as a client has no effect on the connection.
1192 ///
1193 /// The default value is `None`.
1194 pub fn set_stateless_reset_token(&mut self, v: Option<u128>) {
1195 self.local_transport_params.stateless_reset_token = v;
1196 }
1197
1198 /// Sets whether the QUIC connection should avoid reusing DCIDs over
1199 /// different paths.
1200 ///
1201 /// When set to `true`, it ensures that a destination Connection ID is never
1202 /// reused on different paths. Such behaviour may lead to connection stall
1203 /// if the peer performs a non-voluntary migration (e.g., NAT rebinding) and
1204 /// does not provide additional destination Connection IDs to handle such
1205 /// event.
1206 ///
1207 /// The default value is `false`.
1208 pub fn set_disable_dcid_reuse(&mut self, v: bool) {
1209 self.disable_dcid_reuse = v;
1210 }
1211
1212 /// Enables tracking unknown transport parameters.
1213 ///
1214 /// Specify the maximum number of bytes used to track unknown transport
1215 /// parameters. The size includes the identifier and its value. If storing a
1216 /// transport parameter would cause the limit to be exceeded, it is quietly
1217 /// dropped.
1218 ///
1219 /// The default is that the feature is disabled.
1220 pub fn enable_track_unknown_transport_parameters(&mut self, size: usize) {
1221 self.track_unknown_transport_params = Some(size);
1222 }
1223}
1224
1225/// Tracks the health of the tx_buffered value.
1226#[derive(Clone, Copy, Debug, Default, PartialEq)]
1227pub enum TxBufferTrackingState {
1228 /// The send buffer is in a good state
1229 #[default]
1230 Ok,
1231 /// The send buffer is in an inconsistent state, which could lead to
1232 /// connection stalls or excess buffering due to bugs we haven't
1233 /// tracked down yet.
1234 Inconsistent,
1235}
1236
1237/// A QUIC connection.
1238pub struct Connection<F = DefaultBufFactory>
1239where
1240 F: BufFactory,
1241{
1242 /// QUIC wire version used for the connection.
1243 version: u32,
1244
1245 /// Connection Identifiers.
1246 ids: cid::ConnectionIdentifiers,
1247
1248 /// Unique opaque ID for the connection that can be used for logging.
1249 trace_id: String,
1250
1251 /// Packet number spaces.
1252 pkt_num_spaces: [packet::PktNumSpace; packet::Epoch::count()],
1253
1254 /// The crypto context.
1255 crypto_ctx: [packet::CryptoContext; packet::Epoch::count()],
1256
1257 /// Next packet number.
1258 next_pkt_num: u64,
1259
1260 // TODO
1261 // combine with `next_pkt_num`
1262 /// Track the packet skip context
1263 pkt_num_manager: packet::PktNumManager,
1264
1265 /// Peer's transport parameters.
1266 peer_transport_params: TransportParams,
1267
1268 /// If tracking unknown transport parameters from a peer, how much space to
1269 /// use in bytes.
1270 peer_transport_params_track_unknown: Option<usize>,
1271
1272 /// Local transport parameters.
1273 local_transport_params: TransportParams,
1274
1275 /// TLS handshake state.
1276 handshake: tls::Handshake,
1277
1278 /// Serialized TLS session buffer.
1279 ///
1280 /// This field is populated when a new session ticket is processed on the
1281 /// client. On the server this is empty.
1282 session: Option<Vec<u8>>,
1283
1284 /// The configuration for recovery.
1285 recovery_config: recovery::RecoveryConfig,
1286
1287 /// The path manager.
1288 paths: path::PathMap,
1289
1290 /// PATH_CHALLENGE receive queue max length.
1291 path_challenge_recv_max_queue_len: usize,
1292
1293 /// Total number of received PATH_CHALLENGE frames.
1294 path_challenge_rx_count: u64,
1295
1296 /// List of supported application protocols.
1297 application_protos: Vec<Vec<u8>>,
1298
1299 /// Total number of received packets.
1300 recv_count: usize,
1301
1302 /// Total number of sent packets.
1303 sent_count: usize,
1304
1305 /// Total number of lost packets.
1306 lost_count: usize,
1307
1308 /// Total number of lost packets that were later acked.
1309 spurious_lost_count: usize,
1310
1311 /// Total number of packets sent with data retransmitted.
1312 retrans_count: usize,
1313
1314 /// Total number of sent DATAGRAM frames.
1315 dgram_sent_count: usize,
1316
1317 /// Total number of received DATAGRAM frames.
1318 dgram_recv_count: usize,
1319
1320 /// Total number of bytes received from the peer.
1321 rx_data: u64,
1322
1323 /// Receiver flow controller.
1324 flow_control: flowcontrol::FlowControl,
1325
1326 /// Whether we send MAX_DATA frame.
1327 should_send_max_data: bool,
1328
1329 /// Number of stream data bytes that can be buffered.
1330 tx_cap: usize,
1331
1332 /// The send capacity factor.
1333 tx_cap_factor: f64,
1334
1335 /// Number of bytes buffered in the send buffer.
1336 tx_buffered: usize,
1337
1338 /// Tracks the health of tx_buffered.
1339 tx_buffered_state: TxBufferTrackingState,
1340
1341 /// Total number of bytes sent to the peer.
1342 tx_data: u64,
1343
1344 /// Peer's flow control limit for the connection.
1345 max_tx_data: u64,
1346
1347 /// Last tx_data before running a full send() loop.
1348 last_tx_data: u64,
1349
1350 /// Total number of bytes retransmitted over the connection.
1351 /// This counts only STREAM and CRYPTO data.
1352 stream_retrans_bytes: u64,
1353
1354 /// Total number of bytes sent over the connection.
1355 sent_bytes: u64,
1356
1357 /// Total number of bytes received over the connection.
1358 recv_bytes: u64,
1359
1360 /// Total number of bytes sent acked over the connection.
1361 acked_bytes: u64,
1362
1363 /// Total number of bytes sent lost over the connection.
1364 lost_bytes: u64,
1365
1366 /// Streams map, indexed by stream ID.
1367 streams: stream::StreamMap<F>,
1368
1369 /// Peer's original destination connection ID. Used by the client to
1370 /// validate the server's transport parameter.
1371 odcid: Option<ConnectionId<'static>>,
1372
1373 /// Peer's retry source connection ID. Used by the client during stateless
1374 /// retry to validate the server's transport parameter.
1375 rscid: Option<ConnectionId<'static>>,
1376
1377 /// Received address verification token.
1378 token: Option<Vec<u8>>,
1379
1380 /// Error code and reason to be sent to the peer in a CONNECTION_CLOSE
1381 /// frame.
1382 local_error: Option<ConnectionError>,
1383
1384 /// Error code and reason received from the peer in a CONNECTION_CLOSE
1385 /// frame.
1386 peer_error: Option<ConnectionError>,
1387
1388 /// The connection-level limit at which send blocking occurred.
1389 blocked_limit: Option<u64>,
1390
1391 /// Idle timeout expiration time.
1392 idle_timer: Option<Instant>,
1393
1394 /// Draining timeout expiration time.
1395 draining_timer: Option<Instant>,
1396
1397 /// List of raw packets that were received before they could be decrypted.
1398 undecryptable_pkts: VecDeque<(Vec<u8>, RecvInfo)>,
1399
1400 /// The negotiated ALPN protocol.
1401 alpn: Vec<u8>,
1402
1403 /// Whether this is a server-side connection.
1404 is_server: bool,
1405
1406 /// Whether the initial secrets have been derived.
1407 derived_initial_secrets: bool,
1408
1409 /// Whether a version negotiation packet has already been received. Only
1410 /// relevant for client connections.
1411 did_version_negotiation: bool,
1412
1413 /// Whether stateless retry has been performed.
1414 did_retry: bool,
1415
1416 /// Whether the peer already updated its connection ID.
1417 got_peer_conn_id: bool,
1418
1419 /// Whether the peer verified our initial address.
1420 peer_verified_initial_address: bool,
1421
1422 /// Whether the peer's transport parameters were parsed.
1423 parsed_peer_transport_params: bool,
1424
1425 /// Whether the connection handshake has been completed.
1426 handshake_completed: bool,
1427
1428 /// Whether the HANDSHAKE_DONE frame has been sent.
1429 handshake_done_sent: bool,
1430
1431 /// Whether the HANDSHAKE_DONE frame has been acked.
1432 handshake_done_acked: bool,
1433
1434 /// Whether the connection handshake has been confirmed.
1435 handshake_confirmed: bool,
1436
1437 /// Key phase bit used for outgoing protected packets.
1438 key_phase: bool,
1439
1440 /// Whether an ack-eliciting packet has been sent since last receiving a
1441 /// packet.
1442 ack_eliciting_sent: bool,
1443
1444 /// Whether the connection is closed.
1445 closed: bool,
1446
1447 /// Whether the connection was timed out.
1448 timed_out: bool,
1449
1450 /// Whether to send GREASE.
1451 grease: bool,
1452
1453 /// TLS keylog writer.
1454 keylog: Option<Box<dyn std::io::Write + Send + Sync>>,
1455
1456 #[cfg(feature = "qlog")]
1457 qlog: QlogInfo,
1458
1459 /// DATAGRAM queues.
1460 dgram_recv_queue: dgram::DatagramQueue,
1461 dgram_send_queue: dgram::DatagramQueue,
1462
1463 /// Whether to emit DATAGRAM frames in the next packet.
1464 emit_dgram: bool,
1465
1466 /// Whether the connection should prevent from reusing destination
1467 /// Connection IDs when the peer migrates.
1468 disable_dcid_reuse: bool,
1469
1470 /// The number of streams reset by local.
1471 reset_stream_local_count: u64,
1472
1473 /// The number of streams stopped by local.
1474 stopped_stream_local_count: u64,
1475
1476 /// The number of streams reset by remote.
1477 reset_stream_remote_count: u64,
1478
1479 /// The number of streams stopped by remote.
1480 stopped_stream_remote_count: u64,
1481
1482 /// The number of DATA_BLOCKED frames sent due to hitting the connection
1483 /// flow control limit.
1484 data_blocked_sent_count: u64,
1485
1486 /// The number of STREAM_DATA_BLOCKED frames sent due to a stream hitting
1487 /// the stream flow control limit.
1488 stream_data_blocked_sent_count: u64,
1489
1490 /// The number of DATA_BLOCKED frames received from the remote endpoint.
1491 data_blocked_recv_count: u64,
1492
1493 /// The number of STREAM_DATA_BLOCKED frames received from the remote
1494 /// endpoint.
1495 stream_data_blocked_recv_count: u64,
1496
1497 /// The anti-amplification limit factor.
1498 max_amplification_factor: usize,
1499}
1500
1501/// Creates a new server-side connection.
1502///
1503/// The `scid` parameter represents the server's source connection ID, while
1504/// the optional `odcid` parameter represents the original destination ID the
1505/// client sent before a Retry packet (this is only required when using the
1506/// [`retry()`] function). See also the [`accept_with_retry()`] function for
1507/// more advanced retry cases.
1508///
1509/// [`retry()`]: fn.retry.html
1510///
1511/// ## Examples:
1512///
1513/// ```no_run
1514/// # let mut config = quiche::Config::new(0xbabababa)?;
1515/// # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
1516/// # let local = "127.0.0.1:0".parse().unwrap();
1517/// # let peer = "127.0.0.1:1234".parse().unwrap();
1518/// let conn = quiche::accept(&scid, None, local, peer, &mut config)?;
1519/// # Ok::<(), quiche::Error>(())
1520/// ```
1521#[inline(always)]
1522pub fn accept(
1523 scid: &ConnectionId, odcid: Option<&ConnectionId>, local: SocketAddr,
1524 peer: SocketAddr, config: &mut Config,
1525) -> Result<Connection> {
1526 accept_with_buf_factory(scid, odcid, local, peer, config)
1527}
1528
1529/// Creates a new server-side connection, with a custom buffer generation
1530/// method.
1531///
1532/// The buffers generated can be anything that can be drereferenced as a byte
1533/// slice. See [`accept`] and [`BufFactory`] for more info.
1534#[inline]
1535pub fn accept_with_buf_factory<F: BufFactory>(
1536 scid: &ConnectionId, odcid: Option<&ConnectionId>, local: SocketAddr,
1537 peer: SocketAddr, config: &mut Config,
1538) -> Result<Connection<F>> {
1539 // For connections with `odcid` set, we historically used `retry_source_cid =
1540 // scid`. Keep this behavior to preserve backwards compatibility.
1541 // `accept_with_retry` allows the SCIDs to be specified separately.
1542 let retry_cids = odcid.map(|odcid| RetryConnectionIds {
1543 original_destination_cid: odcid,
1544 retry_source_cid: scid,
1545 });
1546 Connection::new(scid, retry_cids, None, local, peer, config, true)
1547}
1548
1549/// A wrapper for connection IDs used in [`accept_with_retry`].
1550pub struct RetryConnectionIds<'a> {
1551 /// The DCID of the first Initial packet received by the server, which
1552 /// triggered the Retry packet.
1553 pub original_destination_cid: &'a ConnectionId<'a>,
1554 /// The SCID of the Retry packet sent by the server. This can be different
1555 /// from the new connection's SCID.
1556 pub retry_source_cid: &'a ConnectionId<'a>,
1557}
1558
1559/// Creates a new server-side connection after the client responded to a Retry
1560/// packet.
1561///
1562/// To generate a Retry packet in the first place, use the [`retry()`] function.
1563///
1564/// The `scid` parameter represents the server's source connection ID, which can
1565/// be freshly generated after the application has successfully verified the
1566/// Retry. `retry_cids` is used to tie the new connection to the Initial + Retry
1567/// exchange that preceded the connection's creation.
1568///
1569/// The DCID of the client's Initial packet is inherently untrusted data. It is
1570/// safe to use the DCID in the `retry_source_cid` field of the
1571/// `RetryConnectionIds` provided to this function. However, using the Initial's
1572/// DCID for the `scid` parameter carries risks. Applications are advised to
1573/// implement their own DCID validation steps before using the DCID in that
1574/// manner.
1575#[inline]
1576pub fn accept_with_retry<F: BufFactory>(
1577 scid: &ConnectionId, retry_cids: RetryConnectionIds, local: SocketAddr,
1578 peer: SocketAddr, config: &mut Config,
1579) -> Result<Connection<F>> {
1580 Connection::new(scid, Some(retry_cids), None, local, peer, config, true)
1581}
1582
1583/// Creates a new client-side connection.
1584///
1585/// The `scid` parameter is used as the connection's source connection ID,
1586/// while the optional `server_name` parameter is used to verify the peer's
1587/// certificate.
1588///
1589/// ## Examples:
1590///
1591/// ```no_run
1592/// # let mut config = quiche::Config::new(0xbabababa)?;
1593/// # let server_name = "quic.tech";
1594/// # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
1595/// # let local = "127.0.0.1:4321".parse().unwrap();
1596/// # let peer = "127.0.0.1:1234".parse().unwrap();
1597/// let conn =
1598/// quiche::connect(Some(&server_name), &scid, local, peer, &mut config)?;
1599/// # Ok::<(), quiche::Error>(())
1600/// ```
1601#[inline]
1602pub fn connect(
1603 server_name: Option<&str>, scid: &ConnectionId, local: SocketAddr,
1604 peer: SocketAddr, config: &mut Config,
1605) -> Result<Connection> {
1606 let mut conn = Connection::new(scid, None, None, local, peer, config, false)?;
1607
1608 if let Some(server_name) = server_name {
1609 conn.handshake.set_host_name(server_name)?;
1610 }
1611
1612 Ok(conn)
1613}
1614
1615/// Creates a new client-side connection using the given DCID initially.
1616///
1617/// Be aware that [RFC 9000] places requirements for unpredictability and length
1618/// on the client DCID field. This function is dangerous if these requirements
1619/// are not satisfied.
1620///
1621/// The `scid` parameter is used as the connection's source connection ID, while
1622/// the optional `server_name` parameter is used to verify the peer's
1623/// certificate.
1624///
1625/// [RFC 9000]: <https://datatracker.ietf.org/doc/html/rfc9000#section-7.2-3>
1626#[cfg(feature = "custom-client-dcid")]
1627#[cfg_attr(docsrs, doc(cfg(feature = "custom-client-dcid")))]
1628pub fn connect_with_dcid(
1629 server_name: Option<&str>, scid: &ConnectionId, dcid: &ConnectionId,
1630 local: SocketAddr, peer: SocketAddr, config: &mut Config,
1631) -> Result<Connection> {
1632 let mut conn =
1633 Connection::new(scid, None, Some(dcid), local, peer, config, false)?;
1634
1635 if let Some(server_name) = server_name {
1636 conn.handshake.set_host_name(server_name)?;
1637 }
1638
1639 Ok(conn)
1640}
1641
1642/// Creates a new client-side connection, with a custom buffer generation
1643/// method.
1644///
1645/// The buffers generated can be anything that can be drereferenced as a byte
1646/// slice. See [`connect`] and [`BufFactory`] for more info.
1647#[inline]
1648pub fn connect_with_buffer_factory<F: BufFactory>(
1649 server_name: Option<&str>, scid: &ConnectionId, local: SocketAddr,
1650 peer: SocketAddr, config: &mut Config,
1651) -> Result<Connection<F>> {
1652 let mut conn = Connection::new(scid, None, None, local, peer, config, false)?;
1653
1654 if let Some(server_name) = server_name {
1655 conn.handshake.set_host_name(server_name)?;
1656 }
1657
1658 Ok(conn)
1659}
1660
1661/// Creates a new client-side connection, with a custom buffer generation
1662/// method using the given dcid initially.
1663/// Be aware the RFC places requirements for unpredictability and length
1664/// on the client DCID field.
1665/// [`RFC9000`]: https://datatracker.ietf.org/doc/html/rfc9000#section-7.2-3
1666///
1667/// The buffers generated can be anything that can be drereferenced as a byte
1668/// slice. See [`connect`] and [`BufFactory`] for more info.
1669#[cfg(feature = "custom-client-dcid")]
1670#[cfg_attr(docsrs, doc(cfg(feature = "custom-client-dcid")))]
1671pub fn connect_with_dcid_and_buffer_factory<F: BufFactory>(
1672 server_name: Option<&str>, scid: &ConnectionId, dcid: &ConnectionId,
1673 local: SocketAddr, peer: SocketAddr, config: &mut Config,
1674) -> Result<Connection<F>> {
1675 let mut conn =
1676 Connection::new(scid, None, Some(dcid), local, peer, config, false)?;
1677
1678 if let Some(server_name) = server_name {
1679 conn.handshake.set_host_name(server_name)?;
1680 }
1681
1682 Ok(conn)
1683}
1684
1685/// Writes a version negotiation packet.
1686///
1687/// The `scid` and `dcid` parameters are the source connection ID and the
1688/// destination connection ID extracted from the received client's Initial
1689/// packet that advertises an unsupported version.
1690///
1691/// ## Examples:
1692///
1693/// ```no_run
1694/// # let mut buf = [0; 512];
1695/// # let mut out = [0; 512];
1696/// # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
1697/// let (len, src) = socket.recv_from(&mut buf).unwrap();
1698///
1699/// let hdr =
1700/// quiche::Header::from_slice(&mut buf[..len], quiche::MAX_CONN_ID_LEN)?;
1701///
1702/// if hdr.version != quiche::PROTOCOL_VERSION {
1703/// let len = quiche::negotiate_version(&hdr.scid, &hdr.dcid, &mut out)?;
1704/// socket.send_to(&out[..len], &src).unwrap();
1705/// }
1706/// # Ok::<(), quiche::Error>(())
1707/// ```
1708#[inline]
1709pub fn negotiate_version(
1710 scid: &ConnectionId, dcid: &ConnectionId, out: &mut [u8],
1711) -> Result<usize> {
1712 packet::negotiate_version(scid, dcid, out)
1713}
1714
1715/// Writes a stateless retry packet.
1716///
1717/// The `scid` and `dcid` parameters are the source connection ID and the
1718/// destination connection ID extracted from the received client's Initial
1719/// packet, while `new_scid` is the server's new source connection ID and
1720/// `token` is the address validation token the client needs to echo back.
1721///
1722/// The application is responsible for generating the address validation
1723/// token to be sent to the client, and verifying tokens sent back by the
1724/// client. The generated token should include the `dcid` parameter, such
1725/// that it can be later extracted from the token and passed to the
1726/// [`accept()`] function as its `odcid` parameter.
1727///
1728/// [`accept()`]: fn.accept.html
1729///
1730/// ## Examples:
1731///
1732/// ```no_run
1733/// # let mut config = quiche::Config::new(0xbabababa)?;
1734/// # let mut buf = [0; 512];
1735/// # let mut out = [0; 512];
1736/// # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
1737/// # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
1738/// # let local = socket.local_addr().unwrap();
1739/// # fn mint_token(hdr: &quiche::Header, src: &std::net::SocketAddr) -> Vec<u8> {
1740/// # vec![]
1741/// # }
1742/// # fn validate_token<'a>(src: &std::net::SocketAddr, token: &'a [u8]) -> Option<quiche::ConnectionId<'a>> {
1743/// # None
1744/// # }
1745/// let (len, peer) = socket.recv_from(&mut buf).unwrap();
1746///
1747/// let hdr = quiche::Header::from_slice(&mut buf[..len], quiche::MAX_CONN_ID_LEN)?;
1748///
1749/// let token = hdr.token.as_ref().unwrap();
1750///
1751/// // No token sent by client, create a new one.
1752/// if token.is_empty() {
1753/// let new_token = mint_token(&hdr, &peer);
1754///
1755/// let len = quiche::retry(
1756/// &hdr.scid, &hdr.dcid, &scid, &new_token, hdr.version, &mut out,
1757/// )?;
1758///
1759/// socket.send_to(&out[..len], &peer).unwrap();
1760/// return Ok(());
1761/// }
1762///
1763/// // Client sent token, validate it.
1764/// let odcid = validate_token(&peer, token);
1765///
1766/// if odcid.is_none() {
1767/// // Invalid address validation token.
1768/// return Ok(());
1769/// }
1770///
1771/// let conn = quiche::accept(&scid, odcid.as_ref(), local, peer, &mut config)?;
1772/// # Ok::<(), quiche::Error>(())
1773/// ```
1774#[inline]
1775pub fn retry(
1776 scid: &ConnectionId, dcid: &ConnectionId, new_scid: &ConnectionId,
1777 token: &[u8], version: u32, out: &mut [u8],
1778) -> Result<usize> {
1779 packet::retry(scid, dcid, new_scid, token, version, out)
1780}
1781
1782/// Returns true if the given protocol version is supported.
1783#[inline]
1784pub fn version_is_supported(version: u32) -> bool {
1785 matches!(version, PROTOCOL_VERSION_V1)
1786}
1787
1788/// Pushes a frame to the output packet if there is enough space.
1789///
1790/// Returns `true` on success, `false` otherwise. In case of failure it means
1791/// there is no room to add the frame in the packet. You may retry to add the
1792/// frame later.
1793macro_rules! push_frame_to_pkt {
1794 ($out:expr, $frames:expr, $frame:expr, $left:expr) => {{
1795 if $frame.wire_len() <= $left {
1796 $left -= $frame.wire_len();
1797
1798 $frame.to_bytes(&mut $out)?;
1799
1800 $frames.push($frame);
1801
1802 true
1803 } else {
1804 false
1805 }
1806 }};
1807}
1808
1809/// Executes the provided body if the qlog feature is enabled, quiche has been
1810/// configured with a log writer, the event's importance is within the
1811/// configured level.
1812macro_rules! qlog_with_type {
1813 ($ty:expr, $qlog:expr, $qlog_streamer_ref:ident, $body:block) => {{
1814 #[cfg(feature = "qlog")]
1815 {
1816 if EventImportance::from($ty).is_contained_in(&$qlog.level) {
1817 if let Some($qlog_streamer_ref) = &mut $qlog.streamer {
1818 $body
1819 }
1820 }
1821 }
1822 }};
1823}
1824
1825#[cfg(feature = "qlog")]
1826const QLOG_PARAMS_SET: EventType =
1827 EventType::TransportEventType(TransportEventType::ParametersSet);
1828
1829#[cfg(feature = "qlog")]
1830const QLOG_PACKET_RX: EventType =
1831 EventType::TransportEventType(TransportEventType::PacketReceived);
1832
1833#[cfg(feature = "qlog")]
1834const QLOG_PACKET_TX: EventType =
1835 EventType::TransportEventType(TransportEventType::PacketSent);
1836
1837#[cfg(feature = "qlog")]
1838const QLOG_DATA_MV: EventType =
1839 EventType::TransportEventType(TransportEventType::DataMoved);
1840
1841#[cfg(feature = "qlog")]
1842const QLOG_METRICS: EventType =
1843 EventType::RecoveryEventType(RecoveryEventType::MetricsUpdated);
1844
1845#[cfg(feature = "qlog")]
1846const QLOG_CONNECTION_CLOSED: EventType =
1847 EventType::ConnectivityEventType(ConnectivityEventType::ConnectionClosed);
1848
1849#[cfg(feature = "qlog")]
1850struct QlogInfo {
1851 streamer: Option<qlog::streamer::QlogStreamer>,
1852 logged_peer_params: bool,
1853 level: EventImportance,
1854}
1855
1856#[cfg(feature = "qlog")]
1857impl Default for QlogInfo {
1858 fn default() -> Self {
1859 QlogInfo {
1860 streamer: None,
1861 logged_peer_params: false,
1862 level: EventImportance::Base,
1863 }
1864 }
1865}
1866
1867impl<F: BufFactory> Connection<F> {
1868 fn new(
1869 scid: &ConnectionId, retry_cids: Option<RetryConnectionIds>,
1870 client_dcid: Option<&ConnectionId>, local: SocketAddr, peer: SocketAddr,
1871 config: &mut Config, is_server: bool,
1872 ) -> Result<Connection<F>> {
1873 let tls = config.tls_ctx.new_handshake()?;
1874 Connection::with_tls(
1875 scid,
1876 retry_cids,
1877 client_dcid,
1878 local,
1879 peer,
1880 config,
1881 tls,
1882 is_server,
1883 )
1884 }
1885
1886 #[allow(clippy::too_many_arguments)]
1887 fn with_tls(
1888 scid: &ConnectionId, retry_cids: Option<RetryConnectionIds>,
1889 client_dcid: Option<&ConnectionId>, local: SocketAddr, peer: SocketAddr,
1890 config: &Config, tls: tls::Handshake, is_server: bool,
1891 ) -> Result<Connection<F>> {
1892 if retry_cids.is_some() && client_dcid.is_some() {
1893 // These are exclusive, the caller should only specify one or the
1894 // other.
1895 return Err(Error::InvalidDcidInitialization);
1896 }
1897 #[cfg(feature = "custom-client-dcid")]
1898 if let Some(client_dcid) = client_dcid {
1899 // The Minimum length is 8.
1900 // See https://datatracker.ietf.org/doc/html/rfc9000#section-7.2-3
1901 if client_dcid.to_vec().len() < 8 {
1902 return Err(Error::InvalidDcidInitialization);
1903 }
1904 }
1905 #[cfg(not(feature = "custom-client-dcid"))]
1906 if client_dcid.is_some() {
1907 return Err(Error::InvalidDcidInitialization);
1908 }
1909
1910 let max_rx_data = config.local_transport_params.initial_max_data;
1911
1912 let scid_as_hex: Vec<String> =
1913 scid.iter().map(|b| format!("{b:02x}")).collect();
1914
1915 let reset_token = if is_server {
1916 config.local_transport_params.stateless_reset_token
1917 } else {
1918 None
1919 };
1920
1921 let recovery_config = recovery::RecoveryConfig::from_config(config);
1922
1923 let mut path = path::Path::new(
1924 local,
1925 peer,
1926 &recovery_config,
1927 config.path_challenge_recv_max_queue_len,
1928 true,
1929 Some(config),
1930 );
1931
1932 // If we sent a Retry assume the peer's address is verified.
1933 path.verified_peer_address = retry_cids.is_some();
1934 // Assume clients validate the server's address implicitly.
1935 path.peer_verified_local_address = is_server;
1936
1937 // Do not allocate more than the number of active CIDs.
1938 let paths = path::PathMap::new(
1939 path,
1940 config.local_transport_params.active_conn_id_limit as usize,
1941 is_server,
1942 );
1943
1944 let active_path_id = paths.get_active_path_id()?;
1945
1946 let ids = cid::ConnectionIdentifiers::new(
1947 config.local_transport_params.active_conn_id_limit as usize,
1948 scid,
1949 active_path_id,
1950 reset_token,
1951 );
1952
1953 let mut conn = Connection {
1954 version: config.version,
1955
1956 ids,
1957
1958 trace_id: scid_as_hex.join(""),
1959
1960 pkt_num_spaces: [
1961 packet::PktNumSpace::new(),
1962 packet::PktNumSpace::new(),
1963 packet::PktNumSpace::new(),
1964 ],
1965
1966 crypto_ctx: [
1967 packet::CryptoContext::new(),
1968 packet::CryptoContext::new(),
1969 packet::CryptoContext::new(),
1970 ],
1971
1972 next_pkt_num: 0,
1973
1974 pkt_num_manager: packet::PktNumManager::new(),
1975
1976 peer_transport_params: TransportParams::default(),
1977
1978 peer_transport_params_track_unknown: config
1979 .track_unknown_transport_params,
1980
1981 local_transport_params: config.local_transport_params.clone(),
1982
1983 handshake: tls,
1984
1985 session: None,
1986
1987 recovery_config,
1988
1989 paths,
1990 path_challenge_recv_max_queue_len: config
1991 .path_challenge_recv_max_queue_len,
1992 path_challenge_rx_count: 0,
1993
1994 application_protos: config.application_protos.clone(),
1995
1996 recv_count: 0,
1997 sent_count: 0,
1998 lost_count: 0,
1999 spurious_lost_count: 0,
2000 retrans_count: 0,
2001 dgram_sent_count: 0,
2002 dgram_recv_count: 0,
2003 sent_bytes: 0,
2004 recv_bytes: 0,
2005 acked_bytes: 0,
2006 lost_bytes: 0,
2007
2008 rx_data: 0,
2009 flow_control: flowcontrol::FlowControl::new(
2010 max_rx_data,
2011 cmp::min(max_rx_data / 2 * 3, DEFAULT_CONNECTION_WINDOW),
2012 config.max_connection_window,
2013 ),
2014 should_send_max_data: false,
2015
2016 tx_cap: 0,
2017 tx_cap_factor: config.tx_cap_factor,
2018
2019 tx_buffered: 0,
2020 tx_buffered_state: TxBufferTrackingState::Ok,
2021
2022 tx_data: 0,
2023 max_tx_data: 0,
2024 last_tx_data: 0,
2025
2026 stream_retrans_bytes: 0,
2027
2028 streams: stream::StreamMap::new(
2029 config.local_transport_params.initial_max_streams_bidi,
2030 config.local_transport_params.initial_max_streams_uni,
2031 config.max_stream_window,
2032 ),
2033
2034 odcid: None,
2035
2036 rscid: None,
2037
2038 token: None,
2039
2040 local_error: None,
2041
2042 peer_error: None,
2043
2044 blocked_limit: None,
2045
2046 idle_timer: None,
2047
2048 draining_timer: None,
2049
2050 undecryptable_pkts: VecDeque::new(),
2051
2052 alpn: Vec::new(),
2053
2054 is_server,
2055
2056 derived_initial_secrets: false,
2057
2058 did_version_negotiation: false,
2059
2060 did_retry: false,
2061
2062 got_peer_conn_id: false,
2063
2064 // Assume clients validate the server's address implicitly.
2065 peer_verified_initial_address: is_server,
2066
2067 parsed_peer_transport_params: false,
2068
2069 handshake_completed: false,
2070
2071 handshake_done_sent: false,
2072 handshake_done_acked: false,
2073
2074 handshake_confirmed: false,
2075
2076 key_phase: false,
2077
2078 ack_eliciting_sent: false,
2079
2080 closed: false,
2081
2082 timed_out: false,
2083
2084 grease: config.grease,
2085
2086 keylog: None,
2087
2088 #[cfg(feature = "qlog")]
2089 qlog: Default::default(),
2090
2091 dgram_recv_queue: dgram::DatagramQueue::new(
2092 config.dgram_recv_max_queue_len,
2093 ),
2094
2095 dgram_send_queue: dgram::DatagramQueue::new(
2096 config.dgram_send_max_queue_len,
2097 ),
2098
2099 emit_dgram: true,
2100
2101 disable_dcid_reuse: config.disable_dcid_reuse,
2102
2103 reset_stream_local_count: 0,
2104 stopped_stream_local_count: 0,
2105 reset_stream_remote_count: 0,
2106 stopped_stream_remote_count: 0,
2107
2108 data_blocked_sent_count: 0,
2109 stream_data_blocked_sent_count: 0,
2110 data_blocked_recv_count: 0,
2111 stream_data_blocked_recv_count: 0,
2112
2113 max_amplification_factor: config.max_amplification_factor,
2114 };
2115
2116 if let Some(retry_cids) = retry_cids {
2117 conn.local_transport_params
2118 .original_destination_connection_id =
2119 Some(retry_cids.original_destination_cid.to_vec().into());
2120
2121 conn.local_transport_params.retry_source_connection_id =
2122 Some(retry_cids.retry_source_cid.to_vec().into());
2123
2124 conn.did_retry = true;
2125 }
2126
2127 conn.local_transport_params.initial_source_connection_id =
2128 Some(conn.ids.get_scid(0)?.cid.to_vec().into());
2129
2130 conn.handshake.init(is_server)?;
2131
2132 conn.handshake
2133 .use_legacy_codepoint(config.version != PROTOCOL_VERSION_V1);
2134
2135 conn.encode_transport_params()?;
2136
2137 if !is_server {
2138 let dcid = if let Some(client_dcid) = client_dcid {
2139 // We already had an dcid generated for us, use it.
2140 client_dcid.to_vec()
2141 } else {
2142 // Derive initial secrets for the client. We can do this here
2143 // because we already generated the random
2144 // destination connection ID.
2145 let mut dcid = [0; 16];
2146 rand::rand_bytes(&mut dcid[..]);
2147 dcid.to_vec()
2148 };
2149
2150 let (aead_open, aead_seal) = crypto::derive_initial_key_material(
2151 &dcid,
2152 conn.version,
2153 conn.is_server,
2154 false,
2155 )?;
2156
2157 let reset_token = conn.peer_transport_params.stateless_reset_token;
2158 conn.set_initial_dcid(
2159 dcid.to_vec().into(),
2160 reset_token,
2161 active_path_id,
2162 )?;
2163
2164 conn.crypto_ctx[packet::Epoch::Initial].crypto_open = Some(aead_open);
2165 conn.crypto_ctx[packet::Epoch::Initial].crypto_seal = Some(aead_seal);
2166
2167 conn.derived_initial_secrets = true;
2168 }
2169
2170 Ok(conn)
2171 }
2172
2173 /// Sets keylog output to the designated [`Writer`].
2174 ///
2175 /// This needs to be called as soon as the connection is created, to avoid
2176 /// missing some early logs.
2177 ///
2178 /// [`Writer`]: https://doc.rust-lang.org/std/io/trait.Write.html
2179 #[inline]
2180 pub fn set_keylog(&mut self, writer: Box<dyn std::io::Write + Send + Sync>) {
2181 self.keylog = Some(writer);
2182 }
2183
2184 /// Sets qlog output to the designated [`Writer`].
2185 ///
2186 /// Only events included in `QlogLevel::Base` are written. The serialization
2187 /// format is JSON-SEQ.
2188 ///
2189 /// This needs to be called as soon as the connection is created, to avoid
2190 /// missing some early logs.
2191 ///
2192 /// [`Writer`]: https://doc.rust-lang.org/std/io/trait.Write.html
2193 #[cfg(feature = "qlog")]
2194 #[cfg_attr(docsrs, doc(cfg(feature = "qlog")))]
2195 pub fn set_qlog(
2196 &mut self, writer: Box<dyn std::io::Write + Send + Sync>, title: String,
2197 description: String,
2198 ) {
2199 self.set_qlog_with_level(writer, title, description, QlogLevel::Base)
2200 }
2201
2202 /// Sets qlog output to the designated [`Writer`].
2203 ///
2204 /// Only qlog events included in the specified `QlogLevel` are written. The
2205 /// serialization format is JSON-SEQ.
2206 ///
2207 /// This needs to be called as soon as the connection is created, to avoid
2208 /// missing some early logs.
2209 ///
2210 /// [`Writer`]: https://doc.rust-lang.org/std/io/trait.Write.html
2211 #[cfg(feature = "qlog")]
2212 #[cfg_attr(docsrs, doc(cfg(feature = "qlog")))]
2213 pub fn set_qlog_with_level(
2214 &mut self, writer: Box<dyn std::io::Write + Send + Sync>, title: String,
2215 description: String, qlog_level: QlogLevel,
2216 ) {
2217 let vp = if self.is_server {
2218 qlog::VantagePointType::Server
2219 } else {
2220 qlog::VantagePointType::Client
2221 };
2222
2223 let level = match qlog_level {
2224 QlogLevel::Core => EventImportance::Core,
2225
2226 QlogLevel::Base => EventImportance::Base,
2227
2228 QlogLevel::Extra => EventImportance::Extra,
2229 };
2230
2231 self.qlog.level = level;
2232
2233 let trace = qlog::TraceSeq::new(
2234 qlog::VantagePoint {
2235 name: None,
2236 ty: vp,
2237 flow: None,
2238 },
2239 Some(title.to_string()),
2240 Some(description.to_string()),
2241 Some(qlog::Configuration {
2242 time_offset: Some(0.0),
2243 original_uris: None,
2244 }),
2245 None,
2246 );
2247
2248 let mut streamer = qlog::streamer::QlogStreamer::new(
2249 qlog::QLOG_VERSION.to_string(),
2250 Some(title),
2251 Some(description),
2252 None,
2253 Instant::now(),
2254 trace,
2255 self.qlog.level,
2256 writer,
2257 );
2258
2259 streamer.start_log().ok();
2260
2261 let ev_data = self
2262 .local_transport_params
2263 .to_qlog(TransportOwner::Local, self.handshake.cipher());
2264
2265 // This event occurs very early, so just mark the relative time as 0.0.
2266 streamer.add_event(Event::with_time(0.0, ev_data)).ok();
2267
2268 self.qlog.streamer = Some(streamer);
2269 }
2270
2271 /// Returns a mutable reference to the QlogStreamer, if it exists.
2272 #[cfg(feature = "qlog")]
2273 #[cfg_attr(docsrs, doc(cfg(feature = "qlog")))]
2274 pub fn qlog_streamer(&mut self) -> Option<&mut qlog::streamer::QlogStreamer> {
2275 self.qlog.streamer.as_mut()
2276 }
2277
2278 /// Configures the given session for resumption.
2279 ///
2280 /// On the client, this can be used to offer the given serialized session,
2281 /// as returned by [`session()`], for resumption.
2282 ///
2283 /// This must only be called immediately after creating a connection, that
2284 /// is, before any packet is sent or received.
2285 ///
2286 /// [`session()`]: struct.Connection.html#method.session
2287 #[inline]
2288 pub fn set_session(&mut self, session: &[u8]) -> Result<()> {
2289 let mut b = octets::Octets::with_slice(session);
2290
2291 let session_len = b.get_u64()? as usize;
2292 let session_bytes = b.get_bytes(session_len)?;
2293
2294 self.handshake.set_session(session_bytes.as_ref())?;
2295
2296 let raw_params_len = b.get_u64()? as usize;
2297 let raw_params_bytes = b.get_bytes(raw_params_len)?;
2298
2299 let peer_params = TransportParams::decode(
2300 raw_params_bytes.as_ref(),
2301 self.is_server,
2302 self.peer_transport_params_track_unknown,
2303 )?;
2304
2305 self.process_peer_transport_params(peer_params)?;
2306
2307 Ok(())
2308 }
2309
2310 /// Sets the `max_idle_timeout` transport parameter, in milliseconds.
2311 ///
2312 /// This must only be called immediately after creating a connection, that
2313 /// is, before any packet is sent or received.
2314 ///
2315 /// The default value is infinite, that is, no timeout is used unless
2316 /// already configured when creating the connection.
2317 pub fn set_max_idle_timeout(&mut self, v: u64) -> Result<()> {
2318 self.local_transport_params.max_idle_timeout =
2319 cmp::min(v, octets::MAX_VAR_INT);
2320
2321 self.encode_transport_params()
2322 }
2323
2324 /// Sets the congestion control algorithm used.
2325 ///
2326 /// This function can only be called inside one of BoringSSL's handshake
2327 /// callbacks, before any packet has been sent. Calling this function any
2328 /// other time will have no effect.
2329 ///
2330 /// See [`Config::set_cc_algorithm()`].
2331 ///
2332 /// [`Config::set_cc_algorithm()`]: struct.Config.html#method.set_cc_algorithm
2333 #[cfg(feature = "boringssl-boring-crate")]
2334 #[cfg_attr(docsrs, doc(cfg(feature = "boringssl-boring-crate")))]
2335 pub fn set_cc_algorithm_in_handshake(
2336 ssl: &mut boring::ssl::SslRef, algo: CongestionControlAlgorithm,
2337 ) -> Result<()> {
2338 let ex_data = tls::ExData::from_ssl_ref(ssl).ok_or(Error::TlsFail)?;
2339
2340 ex_data.recovery_config.cc_algorithm = algo;
2341
2342 Ok(())
2343 }
2344
2345 /// Sets custom BBR settings.
2346 ///
2347 /// This API is experimental and will be removed in the future.
2348 ///
2349 /// Currently this only applies if cc_algorithm is
2350 /// `CongestionControlAlgorithm::Bbr2Gcongestion` is set.
2351 ///
2352 /// This function can only be called inside one of BoringSSL's handshake
2353 /// callbacks, before any packet has been sent. Calling this function any
2354 /// other time will have no effect.
2355 ///
2356 /// See [`Config::set_custom_bbr_settings()`].
2357 ///
2358 /// [`Config::set_custom_bbr_settings()`]: struct.Config.html#method.set_custom_bbr_settings
2359 #[cfg(all(feature = "boringssl-boring-crate", feature = "internal"))]
2360 #[cfg_attr(docsrs, doc(cfg(feature = "boringssl-boring-crate")))]
2361 #[doc(hidden)]
2362 pub fn set_custom_bbr_settings_in_handshake(
2363 ssl: &mut boring::ssl::SslRef, custom_bbr_params: BbrParams,
2364 ) -> Result<()> {
2365 let ex_data = tls::ExData::from_ssl_ref(ssl).ok_or(Error::TlsFail)?;
2366
2367 ex_data.recovery_config.custom_bbr_params = Some(custom_bbr_params);
2368
2369 Ok(())
2370 }
2371
2372 /// Sets the congestion control algorithm used by string.
2373 ///
2374 /// This function can only be called inside one of BoringSSL's handshake
2375 /// callbacks, before any packet has been sent. Calling this function any
2376 /// other time will have no effect.
2377 ///
2378 /// See [`Config::set_cc_algorithm_name()`].
2379 ///
2380 /// [`Config::set_cc_algorithm_name()`]: struct.Config.html#method.set_cc_algorithm_name
2381 #[cfg(feature = "boringssl-boring-crate")]
2382 #[cfg_attr(docsrs, doc(cfg(feature = "boringssl-boring-crate")))]
2383 pub fn set_cc_algorithm_name_in_handshake(
2384 ssl: &mut boring::ssl::SslRef, name: &str,
2385 ) -> Result<()> {
2386 let cc_algo = CongestionControlAlgorithm::from_str(name)?;
2387 Self::set_cc_algorithm_in_handshake(ssl, cc_algo)
2388 }
2389
2390 /// Sets initial congestion window size in terms of packet count.
2391 ///
2392 /// This function can only be called inside one of BoringSSL's handshake
2393 /// callbacks, before any packet has been sent. Calling this function any
2394 /// other time will have no effect.
2395 ///
2396 /// See [`Config::set_initial_congestion_window_packets()`].
2397 ///
2398 /// [`Config::set_initial_congestion_window_packets()`]: struct.Config.html#method.set_initial_congestion_window_packets
2399 #[cfg(feature = "boringssl-boring-crate")]
2400 #[cfg_attr(docsrs, doc(cfg(feature = "boringssl-boring-crate")))]
2401 pub fn set_initial_congestion_window_packets_in_handshake(
2402 ssl: &mut boring::ssl::SslRef, packets: usize,
2403 ) -> Result<()> {
2404 let ex_data = tls::ExData::from_ssl_ref(ssl).ok_or(Error::TlsFail)?;
2405
2406 ex_data.recovery_config.initial_congestion_window_packets = packets;
2407
2408 Ok(())
2409 }
2410
2411 /// Configure whether to enable relaxed loss detection on spurious loss.
2412 ///
2413 /// This function can only be called inside one of BoringSSL's handshake
2414 /// callbacks, before any packet has been sent. Calling this function any
2415 /// other time will have no effect.
2416 ///
2417 /// See [`Config::set_enable_relaxed_loss_threshold()`].
2418 ///
2419 /// [`Config::set_enable_relaxed_loss_threshold()`]: struct.Config.html#method.set_enable_relaxed_loss_threshold
2420 #[cfg(feature = "boringssl-boring-crate")]
2421 #[cfg_attr(docsrs, doc(cfg(feature = "boringssl-boring-crate")))]
2422 pub fn set_enable_relaxed_loss_threshold_in_handshake(
2423 ssl: &mut boring::ssl::SslRef, enable: bool,
2424 ) -> Result<()> {
2425 let ex_data = tls::ExData::from_ssl_ref(ssl).ok_or(Error::TlsFail)?;
2426
2427 ex_data.recovery_config.enable_relaxed_loss_threshold = enable;
2428
2429 Ok(())
2430 }
2431
2432 /// Configures whether to enable HyStart++.
2433 ///
2434 /// This function can only be called inside one of BoringSSL's handshake
2435 /// callbacks, before any packet has been sent. Calling this function any
2436 /// other time will have no effect.
2437 ///
2438 /// See [`Config::enable_hystart()`].
2439 ///
2440 /// [`Config::enable_hystart()`]: struct.Config.html#method.enable_hystart
2441 #[cfg(feature = "boringssl-boring-crate")]
2442 #[cfg_attr(docsrs, doc(cfg(feature = "boringssl-boring-crate")))]
2443 pub fn set_hystart_in_handshake(
2444 ssl: &mut boring::ssl::SslRef, v: bool,
2445 ) -> Result<()> {
2446 let ex_data = tls::ExData::from_ssl_ref(ssl).ok_or(Error::TlsFail)?;
2447
2448 ex_data.recovery_config.hystart = v;
2449
2450 Ok(())
2451 }
2452
2453 /// Configures whether to enable pacing.
2454 ///
2455 /// This function can only be called inside one of BoringSSL's handshake
2456 /// callbacks, before any packet has been sent. Calling this function any
2457 /// other time will have no effect.
2458 ///
2459 /// See [`Config::enable_pacing()`].
2460 ///
2461 /// [`Config::enable_pacing()`]: struct.Config.html#method.enable_pacing
2462 #[cfg(feature = "boringssl-boring-crate")]
2463 #[cfg_attr(docsrs, doc(cfg(feature = "boringssl-boring-crate")))]
2464 pub fn set_pacing_in_handshake(
2465 ssl: &mut boring::ssl::SslRef, v: bool,
2466 ) -> Result<()> {
2467 let ex_data = tls::ExData::from_ssl_ref(ssl).ok_or(Error::TlsFail)?;
2468
2469 ex_data.recovery_config.pacing = v;
2470
2471 Ok(())
2472 }
2473
2474 /// Sets the max value for pacing rate.
2475 ///
2476 /// This function can only be called inside one of BoringSSL's handshake
2477 /// callbacks, before any packet has been sent. Calling this function any
2478 /// other time will have no effect.
2479 ///
2480 /// See [`Config::set_max_pacing_rate()`].
2481 ///
2482 /// [`Config::set_max_pacing_rate()`]: struct.Config.html#method.set_max_pacing_rate
2483 #[cfg(feature = "boringssl-boring-crate")]
2484 #[cfg_attr(docsrs, doc(cfg(feature = "boringssl-boring-crate")))]
2485 pub fn set_max_pacing_rate_in_handshake(
2486 ssl: &mut boring::ssl::SslRef, v: Option<u64>,
2487 ) -> Result<()> {
2488 let ex_data = tls::ExData::from_ssl_ref(ssl).ok_or(Error::TlsFail)?;
2489
2490 ex_data.recovery_config.max_pacing_rate = v;
2491
2492 Ok(())
2493 }
2494
2495 /// Sets the maximum outgoing UDP payload size.
2496 ///
2497 /// This function can only be called inside one of BoringSSL's handshake
2498 /// callbacks, before any packet has been sent. Calling this function any
2499 /// other time will have no effect.
2500 ///
2501 /// See [`Config::set_max_send_udp_payload_size()`].
2502 ///
2503 /// [`Config::set_max_send_udp_payload_size()`]: struct.Config.html#method.set_max_send_udp_payload_size
2504 #[cfg(feature = "boringssl-boring-crate")]
2505 #[cfg_attr(docsrs, doc(cfg(feature = "boringssl-boring-crate")))]
2506 pub fn set_max_send_udp_payload_size_in_handshake(
2507 ssl: &mut boring::ssl::SslRef, v: usize,
2508 ) -> Result<()> {
2509 let ex_data = tls::ExData::from_ssl_ref(ssl).ok_or(Error::TlsFail)?;
2510
2511 ex_data.recovery_config.max_send_udp_payload_size = v;
2512
2513 Ok(())
2514 }
2515
2516 /// Sets the send capacity factor.
2517 ///
2518 /// This function can only be called inside one of BoringSSL's handshake
2519 /// callbacks, before any packet has been sent. Calling this function any
2520 /// other time will have no effect.
2521 ///
2522 /// See [`Config::set_send_capacity_factor()`].
2523 ///
2524 /// [`Config::set_max_send_udp_payload_size()`]: struct.Config.html#method.set_send_capacity_factor
2525 #[cfg(feature = "boringssl-boring-crate")]
2526 #[cfg_attr(docsrs, doc(cfg(feature = "boringssl-boring-crate")))]
2527 pub fn set_send_capacity_factor_in_handshake(
2528 ssl: &mut boring::ssl::SslRef, v: f64,
2529 ) -> Result<()> {
2530 let ex_data = tls::ExData::from_ssl_ref(ssl).ok_or(Error::TlsFail)?;
2531
2532 ex_data.tx_cap_factor = v;
2533
2534 Ok(())
2535 }
2536
2537 /// Configures whether to do path MTU discovery.
2538 ///
2539 /// This function can only be called inside one of BoringSSL's handshake
2540 /// callbacks, before any packet has been sent. Calling this function any
2541 /// other time will have no effect.
2542 ///
2543 /// See [`Config::discover_pmtu()`].
2544 ///
2545 /// [`Config::discover_pmtu()`]: struct.Config.html#method.discover_pmtu
2546 #[cfg(feature = "boringssl-boring-crate")]
2547 #[cfg_attr(docsrs, doc(cfg(feature = "boringssl-boring-crate")))]
2548 pub fn set_discover_pmtu_in_handshake(
2549 ssl: &mut boring::ssl::SslRef, discover: bool, max_probes: u8,
2550 ) -> Result<()> {
2551 let ex_data = tls::ExData::from_ssl_ref(ssl).ok_or(Error::TlsFail)?;
2552
2553 ex_data.pmtud = Some((discover, max_probes));
2554
2555 Ok(())
2556 }
2557
2558 /// Sets the `max_idle_timeout` transport parameter, in milliseconds.
2559 ///
2560 /// This function can only be called inside one of BoringSSL's handshake
2561 /// callbacks, before any packet has been sent. Calling this function any
2562 /// other time will have no effect.
2563 ///
2564 /// See [`Config::set_max_idle_timeout()`].
2565 ///
2566 /// [`Config::set_max_idle_timeout()`]: struct.Config.html#method.set_max_idle_timeout
2567 #[cfg(feature = "boringssl-boring-crate")]
2568 #[cfg_attr(docsrs, doc(cfg(feature = "boringssl-boring-crate")))]
2569 pub fn set_max_idle_timeout_in_handshake(
2570 ssl: &mut boring::ssl::SslRef, v: u64,
2571 ) -> Result<()> {
2572 let ex_data = tls::ExData::from_ssl_ref(ssl).ok_or(Error::TlsFail)?;
2573
2574 ex_data.local_transport_params.max_idle_timeout = v;
2575
2576 Self::set_transport_parameters_in_hanshake(
2577 ex_data.local_transport_params.clone(),
2578 ex_data.is_server,
2579 ssl,
2580 )
2581 }
2582
2583 /// Sets the `initial_max_streams_bidi` transport parameter.
2584 ///
2585 /// This function can only be called inside one of BoringSSL's handshake
2586 /// callbacks, before any packet has been sent. Calling this function any
2587 /// other time will have no effect.
2588 ///
2589 /// See [`Config::set_initial_max_streams_bidi()`].
2590 ///
2591 /// [`Config::set_initial_max_streams_bidi()`]: struct.Config.html#method.set_initial_max_streams_bidi
2592 #[cfg(feature = "boringssl-boring-crate")]
2593 #[cfg_attr(docsrs, doc(cfg(feature = "boringssl-boring-crate")))]
2594 pub fn set_initial_max_streams_bidi_in_handshake(
2595 ssl: &mut boring::ssl::SslRef, v: u64,
2596 ) -> Result<()> {
2597 let ex_data = tls::ExData::from_ssl_ref(ssl).ok_or(Error::TlsFail)?;
2598
2599 ex_data.local_transport_params.initial_max_streams_bidi = v;
2600
2601 Self::set_transport_parameters_in_hanshake(
2602 ex_data.local_transport_params.clone(),
2603 ex_data.is_server,
2604 ssl,
2605 )
2606 }
2607
2608 #[cfg(feature = "boringssl-boring-crate")]
2609 fn set_transport_parameters_in_hanshake(
2610 params: TransportParams, is_server: bool, ssl: &mut boring::ssl::SslRef,
2611 ) -> Result<()> {
2612 use foreign_types_shared::ForeignTypeRef;
2613
2614 // In order to apply the new parameter to the TLS state before TPs are
2615 // written into a TLS message, we need to re-encode all TPs immediately.
2616 //
2617 // Since we don't have direct access to the main `Connection` object, we
2618 // need to re-create the `Handshake` state from the `SslRef`.
2619 //
2620 // SAFETY: the `Handshake` object must not be drop()ed, otherwise it
2621 // would free the underlying BoringSSL structure.
2622 let mut handshake =
2623 unsafe { tls::Handshake::from_ptr(ssl.as_ptr() as _) };
2624 handshake.set_quic_transport_params(¶ms, is_server)?;
2625
2626 // Avoid running `drop(handshake)` as that would free the underlying
2627 // handshake state.
2628 std::mem::forget(handshake);
2629
2630 Ok(())
2631 }
2632
2633 /// Processes QUIC packets received from the peer.
2634 ///
2635 /// On success the number of bytes processed from the input buffer is
2636 /// returned. On error the connection will be closed by calling [`close()`]
2637 /// with the appropriate error code.
2638 ///
2639 /// Coalesced packets will be processed as necessary.
2640 ///
2641 /// Note that the contents of the input buffer `buf` might be modified by
2642 /// this function due to, for example, in-place decryption.
2643 ///
2644 /// [`close()`]: struct.Connection.html#method.close
2645 ///
2646 /// ## Examples:
2647 ///
2648 /// ```no_run
2649 /// # let mut buf = [0; 512];
2650 /// # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
2651 /// # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
2652 /// # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
2653 /// # let peer = "127.0.0.1:1234".parse().unwrap();
2654 /// # let local = socket.local_addr().unwrap();
2655 /// # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
2656 /// loop {
2657 /// let (read, from) = socket.recv_from(&mut buf).unwrap();
2658 ///
2659 /// let recv_info = quiche::RecvInfo {
2660 /// from,
2661 /// to: local,
2662 /// };
2663 ///
2664 /// let read = match conn.recv(&mut buf[..read], recv_info) {
2665 /// Ok(v) => v,
2666 ///
2667 /// Err(e) => {
2668 /// // An error occurred, handle it.
2669 /// break;
2670 /// },
2671 /// };
2672 /// }
2673 /// # Ok::<(), quiche::Error>(())
2674 /// ```
2675 pub fn recv(&mut self, buf: &mut [u8], info: RecvInfo) -> Result<usize> {
2676 let len = buf.len();
2677
2678 if len == 0 {
2679 return Err(Error::BufferTooShort);
2680 }
2681
2682 let recv_pid = self.paths.path_id_from_addrs(&(info.to, info.from));
2683
2684 if let Some(recv_pid) = recv_pid {
2685 let recv_path = self.paths.get_mut(recv_pid)?;
2686
2687 // Keep track of how many bytes we received from the client, so we
2688 // can limit bytes sent back before address validation, to a
2689 // multiple of this. The limit needs to be increased early on, so
2690 // that if there is an error there is enough credit to send a
2691 // CONNECTION_CLOSE.
2692 //
2693 // It doesn't matter if the packets received were valid or not, we
2694 // only need to track the total amount of bytes received.
2695 //
2696 // Note that we also need to limit the number of bytes we sent on a
2697 // path if we are not the host that initiated its usage.
2698 if self.is_server && !recv_path.verified_peer_address {
2699 recv_path.max_send_bytes += len * self.max_amplification_factor;
2700 }
2701 } else if !self.is_server {
2702 // If a client receives packets from an unknown server address,
2703 // the client MUST discard these packets.
2704 trace!(
2705 "{} client received packet from unknown address {:?}, dropping",
2706 self.trace_id,
2707 info,
2708 );
2709
2710 return Ok(len);
2711 }
2712
2713 let mut done = 0;
2714 let mut left = len;
2715
2716 // Process coalesced packets.
2717 while left > 0 {
2718 let read = match self.recv_single(
2719 &mut buf[len - left..len],
2720 &info,
2721 recv_pid,
2722 ) {
2723 Ok(v) => v,
2724
2725 Err(Error::Done) => {
2726 // If the packet can't be processed or decrypted, check if
2727 // it's a stateless reset.
2728 if self.is_stateless_reset(&buf[len - left..len]) {
2729 trace!("{} packet is a stateless reset", self.trace_id);
2730
2731 self.mark_closed();
2732 }
2733
2734 left
2735 },
2736
2737 Err(e) => {
2738 // In case of error processing the incoming packet, close
2739 // the connection.
2740 self.close(false, e.to_wire(), b"").ok();
2741 return Err(e);
2742 },
2743 };
2744
2745 done += read;
2746 left -= read;
2747 }
2748
2749 // Even though the packet was previously "accepted", it
2750 // should be safe to forward the error, as it also comes
2751 // from the `recv()` method.
2752 self.process_undecrypted_0rtt_packets()?;
2753
2754 Ok(done)
2755 }
2756
2757 fn process_undecrypted_0rtt_packets(&mut self) -> Result<()> {
2758 // Process previously undecryptable 0-RTT packets if the decryption key
2759 // is now available.
2760 if self.crypto_ctx[packet::Epoch::Application]
2761 .crypto_0rtt_open
2762 .is_some()
2763 {
2764 while let Some((mut pkt, info)) = self.undecryptable_pkts.pop_front()
2765 {
2766 if let Err(e) = self.recv(&mut pkt, info) {
2767 self.undecryptable_pkts.clear();
2768
2769 return Err(e);
2770 }
2771 }
2772 }
2773 Ok(())
2774 }
2775
2776 /// Returns true if a QUIC packet is a stateless reset.
2777 fn is_stateless_reset(&self, buf: &[u8]) -> bool {
2778 // If the packet is too small, then we just throw it away.
2779 let buf_len = buf.len();
2780 if buf_len < 21 {
2781 return false;
2782 }
2783
2784 // TODO: we should iterate over all active destination connection IDs
2785 // and check against their reset token.
2786 match self.peer_transport_params.stateless_reset_token {
2787 Some(token) => {
2788 let token_len = 16;
2789
2790 crypto::verify_slices_are_equal(
2791 &token.to_be_bytes(),
2792 &buf[buf_len - token_len..buf_len],
2793 )
2794 .is_ok()
2795 },
2796
2797 None => false,
2798 }
2799 }
2800
2801 /// Processes a single QUIC packet received from the peer.
2802 ///
2803 /// On success the number of bytes processed from the input buffer is
2804 /// returned. When the [`Done`] error is returned, processing of the
2805 /// remainder of the incoming UDP datagram should be interrupted.
2806 ///
2807 /// Note that a server might observe a new 4-tuple, preventing to
2808 /// know in advance to which path the incoming packet belongs to (`recv_pid`
2809 /// is `None`). As a client, packets from unknown 4-tuple are dropped
2810 /// beforehand (see `recv()`).
2811 ///
2812 /// On error, an error other than [`Done`] is returned.
2813 ///
2814 /// [`Done`]: enum.Error.html#variant.Done
2815 fn recv_single(
2816 &mut self, buf: &mut [u8], info: &RecvInfo, recv_pid: Option<usize>,
2817 ) -> Result<usize> {
2818 let now = Instant::now();
2819
2820 if buf.is_empty() {
2821 return Err(Error::Done);
2822 }
2823
2824 if self.is_closed() || self.is_draining() {
2825 return Err(Error::Done);
2826 }
2827
2828 let is_closing = self.local_error.is_some();
2829
2830 if is_closing {
2831 return Err(Error::Done);
2832 }
2833
2834 let buf_len = buf.len();
2835
2836 let mut b = octets::OctetsMut::with_slice(buf);
2837
2838 let mut hdr = Header::from_bytes(&mut b, self.source_id().len())
2839 .map_err(|e| {
2840 drop_pkt_on_err(
2841 e,
2842 self.recv_count,
2843 self.is_server,
2844 &self.trace_id,
2845 )
2846 })?;
2847
2848 if hdr.ty == Type::VersionNegotiation {
2849 // Version negotiation packets can only be sent by the server.
2850 if self.is_server {
2851 return Err(Error::Done);
2852 }
2853
2854 // Ignore duplicate version negotiation.
2855 if self.did_version_negotiation {
2856 return Err(Error::Done);
2857 }
2858
2859 // Ignore version negotiation if any other packet has already been
2860 // successfully processed.
2861 if self.recv_count > 0 {
2862 return Err(Error::Done);
2863 }
2864
2865 if hdr.dcid != self.source_id() {
2866 return Err(Error::Done);
2867 }
2868
2869 if hdr.scid != self.destination_id() {
2870 return Err(Error::Done);
2871 }
2872
2873 trace!("{} rx pkt {:?}", self.trace_id, hdr);
2874
2875 let versions = hdr.versions.ok_or(Error::Done)?;
2876
2877 // Ignore version negotiation if the version already selected is
2878 // listed.
2879 if versions.contains(&self.version) {
2880 return Err(Error::Done);
2881 }
2882
2883 let supported_versions =
2884 versions.iter().filter(|&&v| version_is_supported(v));
2885
2886 let mut found_version = false;
2887
2888 for &v in supported_versions {
2889 found_version = true;
2890
2891 // The final version takes precedence over draft ones.
2892 if v == PROTOCOL_VERSION_V1 {
2893 self.version = v;
2894 break;
2895 }
2896
2897 self.version = cmp::max(self.version, v);
2898 }
2899
2900 if !found_version {
2901 // We don't support any of the versions offered.
2902 //
2903 // While a man-in-the-middle attacker might be able to
2904 // inject a version negotiation packet that triggers this
2905 // failure, the window of opportunity is very small and
2906 // this error is quite useful for debugging, so don't just
2907 // ignore the packet.
2908 return Err(Error::UnknownVersion);
2909 }
2910
2911 self.did_version_negotiation = true;
2912
2913 // Derive Initial secrets based on the new version.
2914 let (aead_open, aead_seal) = crypto::derive_initial_key_material(
2915 &self.destination_id(),
2916 self.version,
2917 self.is_server,
2918 true,
2919 )?;
2920
2921 // Reset connection state to force sending another Initial packet.
2922 self.drop_epoch_state(packet::Epoch::Initial, now);
2923 self.got_peer_conn_id = false;
2924 self.handshake.clear()?;
2925
2926 self.crypto_ctx[packet::Epoch::Initial].crypto_open = Some(aead_open);
2927 self.crypto_ctx[packet::Epoch::Initial].crypto_seal = Some(aead_seal);
2928
2929 self.handshake
2930 .use_legacy_codepoint(self.version != PROTOCOL_VERSION_V1);
2931
2932 // Encode transport parameters again, as the new version might be
2933 // using a different format.
2934 self.encode_transport_params()?;
2935
2936 return Err(Error::Done);
2937 }
2938
2939 if hdr.ty == Type::Retry {
2940 // Retry packets can only be sent by the server.
2941 if self.is_server {
2942 return Err(Error::Done);
2943 }
2944
2945 // Ignore duplicate retry.
2946 if self.did_retry {
2947 return Err(Error::Done);
2948 }
2949
2950 // Check if Retry packet is valid.
2951 if packet::verify_retry_integrity(
2952 &b,
2953 &self.destination_id(),
2954 self.version,
2955 )
2956 .is_err()
2957 {
2958 return Err(Error::Done);
2959 }
2960
2961 trace!("{} rx pkt {:?}", self.trace_id, hdr);
2962
2963 self.token = hdr.token;
2964 self.did_retry = true;
2965
2966 // Remember peer's new connection ID.
2967 self.odcid = Some(self.destination_id().into_owned());
2968
2969 self.set_initial_dcid(
2970 hdr.scid.clone(),
2971 None,
2972 self.paths.get_active_path_id()?,
2973 )?;
2974
2975 self.rscid = Some(self.destination_id().into_owned());
2976
2977 // Derive Initial secrets using the new connection ID.
2978 let (aead_open, aead_seal) = crypto::derive_initial_key_material(
2979 &hdr.scid,
2980 self.version,
2981 self.is_server,
2982 true,
2983 )?;
2984
2985 // Reset connection state to force sending another Initial packet.
2986 self.drop_epoch_state(packet::Epoch::Initial, now);
2987 self.got_peer_conn_id = false;
2988 self.handshake.clear()?;
2989
2990 self.crypto_ctx[packet::Epoch::Initial].crypto_open = Some(aead_open);
2991 self.crypto_ctx[packet::Epoch::Initial].crypto_seal = Some(aead_seal);
2992
2993 return Err(Error::Done);
2994 }
2995
2996 if self.is_server && !self.did_version_negotiation {
2997 if !version_is_supported(hdr.version) {
2998 return Err(Error::UnknownVersion);
2999 }
3000
3001 self.version = hdr.version;
3002 self.did_version_negotiation = true;
3003
3004 self.handshake
3005 .use_legacy_codepoint(self.version != PROTOCOL_VERSION_V1);
3006
3007 // Encode transport parameters again, as the new version might be
3008 // using a different format.
3009 self.encode_transport_params()?;
3010 }
3011
3012 if hdr.ty != Type::Short && hdr.version != self.version {
3013 // At this point version negotiation was already performed, so
3014 // ignore packets that don't match the connection's version.
3015 return Err(Error::Done);
3016 }
3017
3018 // Long header packets have an explicit payload length, but short
3019 // packets don't so just use the remaining capacity in the buffer.
3020 let payload_len = if hdr.ty == Type::Short {
3021 b.cap()
3022 } else {
3023 b.get_varint().map_err(|e| {
3024 drop_pkt_on_err(
3025 e.into(),
3026 self.recv_count,
3027 self.is_server,
3028 &self.trace_id,
3029 )
3030 })? as usize
3031 };
3032
3033 // Make sure the buffer is same or larger than an explicit
3034 // payload length.
3035 if payload_len > b.cap() {
3036 return Err(drop_pkt_on_err(
3037 Error::InvalidPacket,
3038 self.recv_count,
3039 self.is_server,
3040 &self.trace_id,
3041 ));
3042 }
3043
3044 // Derive initial secrets on the server.
3045 if !self.derived_initial_secrets {
3046 let (aead_open, aead_seal) = crypto::derive_initial_key_material(
3047 &hdr.dcid,
3048 self.version,
3049 self.is_server,
3050 false,
3051 )?;
3052
3053 self.crypto_ctx[packet::Epoch::Initial].crypto_open = Some(aead_open);
3054 self.crypto_ctx[packet::Epoch::Initial].crypto_seal = Some(aead_seal);
3055
3056 self.derived_initial_secrets = true;
3057 }
3058
3059 // Select packet number space epoch based on the received packet's type.
3060 let epoch = hdr.ty.to_epoch()?;
3061
3062 // Select AEAD context used to open incoming packet.
3063 let aead = if hdr.ty == Type::ZeroRTT {
3064 // Only use 0-RTT key if incoming packet is 0-RTT.
3065 self.crypto_ctx[epoch].crypto_0rtt_open.as_ref()
3066 } else {
3067 // Otherwise use the packet number space's main key.
3068 self.crypto_ctx[epoch].crypto_open.as_ref()
3069 };
3070
3071 // Finally, discard packet if no usable key is available.
3072 let mut aead = match aead {
3073 Some(v) => v,
3074
3075 None => {
3076 if hdr.ty == Type::ZeroRTT &&
3077 self.undecryptable_pkts.len() < MAX_UNDECRYPTABLE_PACKETS &&
3078 !self.is_established()
3079 {
3080 // Buffer 0-RTT packets when the required read key is not
3081 // available yet, and process them later.
3082 //
3083 // TODO: in the future we might want to buffer other types
3084 // of undecryptable packets as well.
3085 let pkt_len = b.off() + payload_len;
3086 let pkt = (b.buf()[..pkt_len]).to_vec();
3087
3088 self.undecryptable_pkts.push_back((pkt, *info));
3089 return Ok(pkt_len);
3090 }
3091
3092 let e = drop_pkt_on_err(
3093 Error::CryptoFail,
3094 self.recv_count,
3095 self.is_server,
3096 &self.trace_id,
3097 );
3098
3099 return Err(e);
3100 },
3101 };
3102
3103 let aead_tag_len = aead.alg().tag_len();
3104
3105 packet::decrypt_hdr(&mut b, &mut hdr, aead).map_err(|e| {
3106 drop_pkt_on_err(e, self.recv_count, self.is_server, &self.trace_id)
3107 })?;
3108
3109 let pn = packet::decode_pkt_num(
3110 self.pkt_num_spaces[epoch].largest_rx_pkt_num,
3111 hdr.pkt_num,
3112 hdr.pkt_num_len,
3113 );
3114
3115 let pn_len = hdr.pkt_num_len;
3116
3117 trace!(
3118 "{} rx pkt {:?} len={} pn={} {}",
3119 self.trace_id,
3120 hdr,
3121 payload_len,
3122 pn,
3123 AddrTupleFmt(info.from, info.to)
3124 );
3125
3126 #[cfg(feature = "qlog")]
3127 let mut qlog_frames = vec![];
3128
3129 // Check for key update.
3130 let mut aead_next = None;
3131
3132 if self.handshake_confirmed &&
3133 hdr.ty != Type::ZeroRTT &&
3134 hdr.key_phase != self.key_phase
3135 {
3136 // Check if this packet arrived before key update.
3137 if let Some(key_update) = self.crypto_ctx[epoch]
3138 .key_update
3139 .as_ref()
3140 .and_then(|key_update| {
3141 (pn < key_update.pn_on_update).then_some(key_update)
3142 })
3143 {
3144 aead = &key_update.crypto_open;
3145 } else {
3146 trace!("{} peer-initiated key update", self.trace_id);
3147
3148 aead_next = Some((
3149 self.crypto_ctx[epoch]
3150 .crypto_open
3151 .as_ref()
3152 .unwrap()
3153 .derive_next_packet_key()?,
3154 self.crypto_ctx[epoch]
3155 .crypto_seal
3156 .as_ref()
3157 .unwrap()
3158 .derive_next_packet_key()?,
3159 ));
3160
3161 // `aead_next` is always `Some()` at this point, so the `unwrap()`
3162 // will never fail.
3163 aead = &aead_next.as_ref().unwrap().0;
3164 }
3165 }
3166
3167 let mut payload = packet::decrypt_pkt(
3168 &mut b,
3169 pn,
3170 pn_len,
3171 payload_len,
3172 aead,
3173 )
3174 .map_err(|e| {
3175 drop_pkt_on_err(e, self.recv_count, self.is_server, &self.trace_id)
3176 })?;
3177
3178 if self.pkt_num_spaces[epoch].recv_pkt_num.contains(pn) {
3179 trace!("{} ignored duplicate packet {}", self.trace_id, pn);
3180 return Err(Error::Done);
3181 }
3182
3183 // Packets with no frames are invalid.
3184 if payload.cap() == 0 {
3185 return Err(Error::InvalidPacket);
3186 }
3187
3188 // Now that we decrypted the packet, let's see if we can map it to an
3189 // existing path.
3190 let recv_pid = if hdr.ty == Type::Short && self.got_peer_conn_id {
3191 let pkt_dcid = ConnectionId::from_ref(&hdr.dcid);
3192 self.get_or_create_recv_path_id(recv_pid, &pkt_dcid, buf_len, info)?
3193 } else {
3194 // During handshake, we are on the initial path.
3195 self.paths.get_active_path_id()?
3196 };
3197
3198 // The key update is verified once a packet is successfully decrypted
3199 // using the new keys.
3200 if let Some((open_next, seal_next)) = aead_next {
3201 if !self.crypto_ctx[epoch]
3202 .key_update
3203 .as_ref()
3204 .is_none_or(|prev| prev.update_acked)
3205 {
3206 // Peer has updated keys twice without awaiting confirmation.
3207 return Err(Error::KeyUpdate);
3208 }
3209
3210 trace!("{} key update verified", self.trace_id);
3211
3212 let _ = self.crypto_ctx[epoch].crypto_seal.replace(seal_next);
3213
3214 let open_prev = self.crypto_ctx[epoch]
3215 .crypto_open
3216 .replace(open_next)
3217 .unwrap();
3218
3219 let recv_path = self.paths.get_mut(recv_pid)?;
3220
3221 self.crypto_ctx[epoch].key_update = Some(packet::KeyUpdate {
3222 crypto_open: open_prev,
3223 pn_on_update: pn,
3224 update_acked: false,
3225 timer: now + (recv_path.recovery.pto() * 3),
3226 });
3227
3228 self.key_phase = !self.key_phase;
3229
3230 qlog_with_type!(QLOG_PACKET_RX, self.qlog, q, {
3231 let trigger = Some(
3232 qlog::events::security::KeyUpdateOrRetiredTrigger::RemoteUpdate,
3233 );
3234
3235 let ev_data_client =
3236 EventData::KeyUpdated(qlog::events::security::KeyUpdated {
3237 key_type:
3238 qlog::events::security::KeyType::Client1RttSecret,
3239 trigger: trigger.clone(),
3240 ..Default::default()
3241 });
3242
3243 q.add_event_data_with_instant(ev_data_client, now).ok();
3244
3245 let ev_data_server =
3246 EventData::KeyUpdated(qlog::events::security::KeyUpdated {
3247 key_type:
3248 qlog::events::security::KeyType::Server1RttSecret,
3249 trigger,
3250 ..Default::default()
3251 });
3252
3253 q.add_event_data_with_instant(ev_data_server, now).ok();
3254 });
3255 }
3256
3257 if !self.is_server && !self.got_peer_conn_id {
3258 if self.odcid.is_none() {
3259 self.odcid = Some(self.destination_id().into_owned());
3260 }
3261
3262 // Replace the randomly generated destination connection ID with
3263 // the one supplied by the server.
3264 self.set_initial_dcid(
3265 hdr.scid.clone(),
3266 self.peer_transport_params.stateless_reset_token,
3267 recv_pid,
3268 )?;
3269
3270 self.got_peer_conn_id = true;
3271 }
3272
3273 if self.is_server && !self.got_peer_conn_id {
3274 self.set_initial_dcid(hdr.scid.clone(), None, recv_pid)?;
3275
3276 if !self.did_retry {
3277 self.local_transport_params
3278 .original_destination_connection_id =
3279 Some(hdr.dcid.to_vec().into());
3280
3281 self.encode_transport_params()?;
3282 }
3283
3284 self.got_peer_conn_id = true;
3285 }
3286
3287 // To avoid sending an ACK in response to an ACK-only packet, we need
3288 // to keep track of whether this packet contains any frame other than
3289 // ACK and PADDING.
3290 let mut ack_elicited = false;
3291
3292 // Process packet payload. If a frame cannot be processed, store the
3293 // error and stop further packet processing.
3294 let mut frame_processing_err = None;
3295
3296 // To know if the peer migrated the connection, we need to keep track
3297 // whether this is a non-probing packet.
3298 let mut probing = true;
3299
3300 // Process packet payload.
3301 while payload.cap() > 0 {
3302 let frame = frame::Frame::from_bytes(&mut payload, hdr.ty)?;
3303
3304 qlog_with_type!(QLOG_PACKET_RX, self.qlog, _q, {
3305 qlog_frames.push(frame.to_qlog());
3306 });
3307
3308 if frame.ack_eliciting() {
3309 ack_elicited = true;
3310 }
3311
3312 if !frame.probing() {
3313 probing = false;
3314 }
3315
3316 if let Err(e) = self.process_frame(frame, &hdr, recv_pid, epoch, now)
3317 {
3318 frame_processing_err = Some(e);
3319 break;
3320 }
3321 }
3322
3323 qlog_with_type!(QLOG_PACKET_RX, self.qlog, q, {
3324 let packet_size = b.len();
3325
3326 let qlog_pkt_hdr = qlog::events::quic::PacketHeader::with_type(
3327 hdr.ty.to_qlog(),
3328 Some(pn),
3329 Some(hdr.version),
3330 Some(&hdr.scid),
3331 Some(&hdr.dcid),
3332 );
3333
3334 let qlog_raw_info = RawInfo {
3335 length: Some(packet_size as u64),
3336 payload_length: Some(payload_len as u64),
3337 data: None,
3338 };
3339
3340 let ev_data =
3341 EventData::PacketReceived(qlog::events::quic::PacketReceived {
3342 header: qlog_pkt_hdr,
3343 frames: Some(qlog_frames),
3344 raw: Some(qlog_raw_info),
3345 ..Default::default()
3346 });
3347
3348 q.add_event_data_with_instant(ev_data, now).ok();
3349 });
3350
3351 qlog_with_type!(QLOG_PACKET_RX, self.qlog, q, {
3352 let recv_path = self.paths.get_mut(recv_pid)?;
3353 recv_path.recovery.maybe_qlog(q, now);
3354 });
3355
3356 if let Some(e) = frame_processing_err {
3357 // Any frame error is terminal, so now just return.
3358 return Err(e);
3359 }
3360
3361 // Only log the remote transport parameters once the connection is
3362 // established (i.e. after frames have been fully parsed) and only
3363 // once per connection.
3364 if self.is_established() {
3365 qlog_with_type!(QLOG_PARAMS_SET, self.qlog, q, {
3366 if !self.qlog.logged_peer_params {
3367 let ev_data = self
3368 .peer_transport_params
3369 .to_qlog(TransportOwner::Remote, self.handshake.cipher());
3370
3371 q.add_event_data_with_instant(ev_data, now).ok();
3372
3373 self.qlog.logged_peer_params = true;
3374 }
3375 });
3376 }
3377
3378 // Process acked frames. Note that several packets from several paths
3379 // might have been acked by the received packet.
3380 for (_, p) in self.paths.iter_mut() {
3381 while let Some(acked) = p.recovery.next_acked_frame(epoch) {
3382 match acked {
3383 frame::Frame::Ping {
3384 mtu_probe: Some(mtu_probe),
3385 } =>
3386 if let Some(pmtud) = p.pmtud.as_mut() {
3387 trace!(
3388 "{} pmtud probe acked; probe size {:?}",
3389 self.trace_id,
3390 mtu_probe
3391 );
3392
3393 // Ensure the probe is within the supported MTU range
3394 // before updating the max datagram size
3395 if let Some(current_mtu) =
3396 pmtud.successful_probe(mtu_probe)
3397 {
3398 qlog_with_type!(
3399 EventType::ConnectivityEventType(
3400 ConnectivityEventType::MtuUpdated
3401 ),
3402 self.qlog,
3403 q,
3404 {
3405 let pmtu_data = EventData::MtuUpdated(
3406 qlog::events::connectivity::MtuUpdated {
3407 old: Some(
3408 p.recovery.max_datagram_size()
3409 as u16,
3410 ),
3411 new: current_mtu as u16,
3412 done: Some(true),
3413 },
3414 );
3415
3416 q.add_event_data_with_instant(
3417 pmtu_data, now,
3418 )
3419 .ok();
3420 }
3421 );
3422
3423 p.recovery
3424 .pmtud_update_max_datagram_size(current_mtu);
3425 }
3426 },
3427
3428 frame::Frame::ACK { ranges, .. } => {
3429 // Stop acknowledging packets less than or equal to the
3430 // largest acknowledged in the sent ACK frame that, in
3431 // turn, got acked.
3432 if let Some(largest_acked) = ranges.last() {
3433 self.pkt_num_spaces[epoch]
3434 .recv_pkt_need_ack
3435 .remove_until(largest_acked);
3436 }
3437 },
3438
3439 frame::Frame::CryptoHeader { offset, length } => {
3440 self.crypto_ctx[epoch]
3441 .crypto_stream
3442 .send
3443 .ack_and_drop(offset, length);
3444 },
3445
3446 frame::Frame::StreamHeader {
3447 stream_id,
3448 offset,
3449 length,
3450 ..
3451 } => {
3452 // Update tx_buffered and emit qlog before checking if the
3453 // stream still exists. The client does need to ACK
3454 // frames that were received after the client sends a
3455 // ResetStream.
3456 self.tx_buffered =
3457 self.tx_buffered.saturating_sub(length);
3458
3459 qlog_with_type!(QLOG_DATA_MV, self.qlog, q, {
3460 let ev_data = EventData::DataMoved(
3461 qlog::events::quic::DataMoved {
3462 stream_id: Some(stream_id),
3463 offset: Some(offset),
3464 length: Some(length as u64),
3465 from: Some(DataRecipient::Transport),
3466 to: Some(DataRecipient::Dropped),
3467 ..Default::default()
3468 },
3469 );
3470
3471 q.add_event_data_with_instant(ev_data, now).ok();
3472 });
3473
3474 let stream = match self.streams.get_mut(stream_id) {
3475 Some(v) => v,
3476
3477 None => continue,
3478 };
3479
3480 stream.send.ack_and_drop(offset, length);
3481
3482 let priority_key = Arc::clone(&stream.priority_key);
3483
3484 // Only collect the stream if it is complete and not
3485 // readable or writable.
3486 //
3487 // If it is readable, it will get collected when
3488 // stream_recv() is next used.
3489 //
3490 // If it is writable, it might mean that the stream
3491 // has been stopped by the peer (i.e. a STOP_SENDING
3492 // frame is received), in which case before collecting
3493 // the stream we will need to propagate the
3494 // `StreamStopped` error to the application. It will
3495 // instead get collected when one of stream_capacity(),
3496 // stream_writable(), stream_send(), ... is next called.
3497 //
3498 // Note that we can't use `is_writable()` here because
3499 // it returns false if the stream is stopped. Instead,
3500 // since the stream is marked as writable when a
3501 // STOP_SENDING frame is received, we check the writable
3502 // queue directly instead.
3503 let is_writable = priority_key.writable.is_linked() &&
3504 // Ensure that the stream is actually stopped.
3505 stream.send.is_stopped();
3506
3507 let is_complete = stream.is_complete();
3508 let is_readable = stream.is_readable();
3509
3510 if is_complete && !is_readable && !is_writable {
3511 let local = stream.local;
3512 self.streams.collect(stream_id, local);
3513 }
3514 },
3515
3516 frame::Frame::HandshakeDone => {
3517 // Explicitly set this to true, so that if the frame was
3518 // already scheduled for retransmission, it is aborted.
3519 self.handshake_done_sent = true;
3520
3521 self.handshake_done_acked = true;
3522 },
3523
3524 frame::Frame::ResetStream { stream_id, .. } => {
3525 let stream = match self.streams.get_mut(stream_id) {
3526 Some(v) => v,
3527
3528 None => continue,
3529 };
3530
3531 let priority_key = Arc::clone(&stream.priority_key);
3532
3533 // Only collect the stream if it is complete and not
3534 // readable or writable.
3535 //
3536 // If it is readable, it will get collected when
3537 // stream_recv() is next used.
3538 //
3539 // If it is writable, it might mean that the stream
3540 // has been stopped by the peer (i.e. a STOP_SENDING
3541 // frame is received), in which case before collecting
3542 // the stream we will need to propagate the
3543 // `StreamStopped` error to the application. It will
3544 // instead get collected when one of stream_capacity(),
3545 // stream_writable(), stream_send(), ... is next called.
3546 //
3547 // Note that we can't use `is_writable()` here because
3548 // it returns false if the stream is stopped. Instead,
3549 // since the stream is marked as writable when a
3550 // STOP_SENDING frame is received, we check the writable
3551 // queue directly instead.
3552 let is_writable = priority_key.writable.is_linked() &&
3553 // Ensure that the stream is actually stopped.
3554 stream.send.is_stopped();
3555
3556 let is_complete = stream.is_complete();
3557 let is_readable = stream.is_readable();
3558
3559 if is_complete && !is_readable && !is_writable {
3560 let local = stream.local;
3561 self.streams.collect(stream_id, local);
3562 }
3563 },
3564
3565 _ => (),
3566 }
3567 }
3568 }
3569
3570 // Now that we processed all the frames, if there is a path that has no
3571 // Destination CID, try to allocate one.
3572 let no_dcid = self
3573 .paths
3574 .iter_mut()
3575 .filter(|(_, p)| p.active_dcid_seq.is_none());
3576
3577 for (pid, p) in no_dcid {
3578 if self.ids.zero_length_dcid() {
3579 p.active_dcid_seq = Some(0);
3580 continue;
3581 }
3582
3583 let dcid_seq = match self.ids.lowest_available_dcid_seq() {
3584 Some(seq) => seq,
3585 None => break,
3586 };
3587
3588 self.ids.link_dcid_to_path_id(dcid_seq, pid)?;
3589
3590 p.active_dcid_seq = Some(dcid_seq);
3591 }
3592
3593 // We only record the time of arrival of the largest packet number
3594 // that still needs to be acked, to be used for ACK delay calculation.
3595 if self.pkt_num_spaces[epoch].recv_pkt_need_ack.last() < Some(pn) {
3596 self.pkt_num_spaces[epoch].largest_rx_pkt_time = now;
3597 }
3598
3599 self.pkt_num_spaces[epoch].recv_pkt_num.insert(pn);
3600
3601 self.pkt_num_spaces[epoch].recv_pkt_need_ack.push_item(pn);
3602
3603 self.pkt_num_spaces[epoch].ack_elicited =
3604 cmp::max(self.pkt_num_spaces[epoch].ack_elicited, ack_elicited);
3605
3606 self.pkt_num_spaces[epoch].largest_rx_pkt_num =
3607 cmp::max(self.pkt_num_spaces[epoch].largest_rx_pkt_num, pn);
3608
3609 if !probing {
3610 self.pkt_num_spaces[epoch].largest_rx_non_probing_pkt_num = cmp::max(
3611 self.pkt_num_spaces[epoch].largest_rx_non_probing_pkt_num,
3612 pn,
3613 );
3614
3615 // Did the peer migrated to another path?
3616 let active_path_id = self.paths.get_active_path_id()?;
3617
3618 if self.is_server &&
3619 recv_pid != active_path_id &&
3620 self.pkt_num_spaces[epoch].largest_rx_non_probing_pkt_num == pn
3621 {
3622 self.on_peer_migrated(recv_pid, self.disable_dcid_reuse, now)?;
3623 }
3624 }
3625
3626 if let Some(idle_timeout) = self.idle_timeout() {
3627 self.idle_timer = Some(now + idle_timeout);
3628 }
3629
3630 // Update send capacity.
3631 self.update_tx_cap();
3632
3633 self.recv_count += 1;
3634 self.paths.get_mut(recv_pid)?.recv_count += 1;
3635
3636 let read = b.off() + aead_tag_len;
3637
3638 self.recv_bytes += read as u64;
3639 self.paths.get_mut(recv_pid)?.recv_bytes += read as u64;
3640
3641 // An Handshake packet has been received from the client and has been
3642 // successfully processed, so we can drop the initial state and consider
3643 // the client's address to be verified.
3644 if self.is_server && hdr.ty == Type::Handshake {
3645 self.drop_epoch_state(packet::Epoch::Initial, now);
3646
3647 self.paths.get_mut(recv_pid)?.verified_peer_address = true;
3648 }
3649
3650 self.ack_eliciting_sent = false;
3651
3652 Ok(read)
3653 }
3654
3655 /// Writes a single QUIC packet to be sent to the peer.
3656 ///
3657 /// On success the number of bytes written to the output buffer is
3658 /// returned, or [`Done`] if there was nothing to write.
3659 ///
3660 /// The application should call `send()` multiple times until [`Done`] is
3661 /// returned, indicating that there are no more packets to send. It is
3662 /// recommended that `send()` be called in the following cases:
3663 ///
3664 /// * When the application receives QUIC packets from the peer (that is,
3665 /// any time [`recv()`] is also called).
3666 ///
3667 /// * When the connection timer expires (that is, any time [`on_timeout()`]
3668 /// is also called).
3669 ///
3670 /// * When the application sends data to the peer (for example, any time
3671 /// [`stream_send()`] or [`stream_shutdown()`] are called).
3672 ///
3673 /// * When the application receives data from the peer (for example any
3674 /// time [`stream_recv()`] is called).
3675 ///
3676 /// Once [`is_draining()`] returns `true`, it is no longer necessary to call
3677 /// `send()` and all calls will return [`Done`].
3678 ///
3679 /// [`Done`]: enum.Error.html#variant.Done
3680 /// [`recv()`]: struct.Connection.html#method.recv
3681 /// [`on_timeout()`]: struct.Connection.html#method.on_timeout
3682 /// [`stream_send()`]: struct.Connection.html#method.stream_send
3683 /// [`stream_shutdown()`]: struct.Connection.html#method.stream_shutdown
3684 /// [`stream_recv()`]: struct.Connection.html#method.stream_recv
3685 /// [`is_draining()`]: struct.Connection.html#method.is_draining
3686 ///
3687 /// ## Examples:
3688 ///
3689 /// ```no_run
3690 /// # let mut out = [0; 512];
3691 /// # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
3692 /// # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
3693 /// # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
3694 /// # let peer = "127.0.0.1:1234".parse().unwrap();
3695 /// # let local = socket.local_addr().unwrap();
3696 /// # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
3697 /// loop {
3698 /// let (write, send_info) = match conn.send(&mut out) {
3699 /// Ok(v) => v,
3700 ///
3701 /// Err(quiche::Error::Done) => {
3702 /// // Done writing.
3703 /// break;
3704 /// },
3705 ///
3706 /// Err(e) => {
3707 /// // An error occurred, handle it.
3708 /// break;
3709 /// },
3710 /// };
3711 ///
3712 /// socket.send_to(&out[..write], &send_info.to).unwrap();
3713 /// }
3714 /// # Ok::<(), quiche::Error>(())
3715 /// ```
3716 pub fn send(&mut self, out: &mut [u8]) -> Result<(usize, SendInfo)> {
3717 self.send_on_path(out, None, None)
3718 }
3719
3720 /// Writes a single QUIC packet to be sent to the peer from the specified
3721 /// local address `from` to the destination address `to`.
3722 ///
3723 /// The behavior of this method differs depending on the value of the `from`
3724 /// and `to` parameters:
3725 ///
3726 /// * If both are `Some`, then the method only consider the 4-tuple
3727 /// (`from`, `to`). Application can monitor the 4-tuple availability,
3728 /// either by monitoring [`path_event_next()`] events or by relying on
3729 /// the [`paths_iter()`] method. If the provided 4-tuple does not exist
3730 /// on the connection (anymore), it returns an [`InvalidState`].
3731 ///
3732 /// * If `from` is `Some` and `to` is `None`, then the method only
3733 /// considers sending packets on paths having `from` as local address.
3734 ///
3735 /// * If `to` is `Some` and `from` is `None`, then the method only
3736 /// considers sending packets on paths having `to` as peer address.
3737 ///
3738 /// * If both are `None`, all available paths are considered.
3739 ///
3740 /// On success the number of bytes written to the output buffer is
3741 /// returned, or [`Done`] if there was nothing to write.
3742 ///
3743 /// The application should call `send_on_path()` multiple times until
3744 /// [`Done`] is returned, indicating that there are no more packets to
3745 /// send. It is recommended that `send_on_path()` be called in the
3746 /// following cases:
3747 ///
3748 /// * When the application receives QUIC packets from the peer (that is,
3749 /// any time [`recv()`] is also called).
3750 ///
3751 /// * When the connection timer expires (that is, any time [`on_timeout()`]
3752 /// is also called).
3753 ///
3754 /// * When the application sends data to the peer (for examples, any time
3755 /// [`stream_send()`] or [`stream_shutdown()`] are called).
3756 ///
3757 /// * When the application receives data from the peer (for example any
3758 /// time [`stream_recv()`] is called).
3759 ///
3760 /// Once [`is_draining()`] returns `true`, it is no longer necessary to call
3761 /// `send_on_path()` and all calls will return [`Done`].
3762 ///
3763 /// [`Done`]: enum.Error.html#variant.Done
3764 /// [`InvalidState`]: enum.Error.html#InvalidState
3765 /// [`recv()`]: struct.Connection.html#method.recv
3766 /// [`on_timeout()`]: struct.Connection.html#method.on_timeout
3767 /// [`stream_send()`]: struct.Connection.html#method.stream_send
3768 /// [`stream_shutdown()`]: struct.Connection.html#method.stream_shutdown
3769 /// [`stream_recv()`]: struct.Connection.html#method.stream_recv
3770 /// [`path_event_next()`]: struct.Connection.html#method.path_event_next
3771 /// [`paths_iter()`]: struct.Connection.html#method.paths_iter
3772 /// [`is_draining()`]: struct.Connection.html#method.is_draining
3773 ///
3774 /// ## Examples:
3775 ///
3776 /// ```no_run
3777 /// # let mut out = [0; 512];
3778 /// # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
3779 /// # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
3780 /// # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
3781 /// # let peer = "127.0.0.1:1234".parse().unwrap();
3782 /// # let local = socket.local_addr().unwrap();
3783 /// # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
3784 /// loop {
3785 /// let (write, send_info) = match conn.send_on_path(&mut out, Some(local), Some(peer)) {
3786 /// Ok(v) => v,
3787 ///
3788 /// Err(quiche::Error::Done) => {
3789 /// // Done writing.
3790 /// break;
3791 /// },
3792 ///
3793 /// Err(e) => {
3794 /// // An error occurred, handle it.
3795 /// break;
3796 /// },
3797 /// };
3798 ///
3799 /// socket.send_to(&out[..write], &send_info.to).unwrap();
3800 /// }
3801 /// # Ok::<(), quiche::Error>(())
3802 /// ```
3803 pub fn send_on_path(
3804 &mut self, out: &mut [u8], from: Option<SocketAddr>,
3805 to: Option<SocketAddr>,
3806 ) -> Result<(usize, SendInfo)> {
3807 if out.is_empty() {
3808 return Err(Error::BufferTooShort);
3809 }
3810
3811 if self.is_closed() || self.is_draining() {
3812 return Err(Error::Done);
3813 }
3814
3815 let now = Instant::now();
3816
3817 if self.local_error.is_none() {
3818 self.do_handshake(now)?;
3819 }
3820
3821 // Forwarding the error value here could confuse
3822 // applications, as they may not expect getting a `recv()`
3823 // error when calling `send()`.
3824 //
3825 // We simply fall-through to sending packets, which should
3826 // take care of terminating the connection as needed.
3827 let _ = self.process_undecrypted_0rtt_packets();
3828
3829 // There's no point in trying to send a packet if the Initial secrets
3830 // have not been derived yet, so return early.
3831 if !self.derived_initial_secrets {
3832 return Err(Error::Done);
3833 }
3834
3835 let mut has_initial = false;
3836
3837 let mut done = 0;
3838
3839 // Limit output packet size to respect the sender and receiver's
3840 // maximum UDP payload size limit.
3841 let mut left = cmp::min(out.len(), self.max_send_udp_payload_size());
3842
3843 let send_pid = match (from, to) {
3844 (Some(f), Some(t)) => self
3845 .paths
3846 .path_id_from_addrs(&(f, t))
3847 .ok_or(Error::InvalidState)?,
3848
3849 _ => self.get_send_path_id(from, to)?,
3850 };
3851
3852 let send_path = self.paths.get_mut(send_pid)?;
3853
3854 // Update max datagram size to allow path MTU discovery probe to be sent.
3855 if let Some(pmtud) = send_path.pmtud.as_mut() {
3856 if pmtud.should_probe() {
3857 let size = if self.handshake_confirmed || self.handshake_completed
3858 {
3859 pmtud.get_probe_size()
3860 } else {
3861 pmtud.get_current_mtu()
3862 };
3863
3864 send_path.recovery.pmtud_update_max_datagram_size(size);
3865
3866 left =
3867 cmp::min(out.len(), send_path.recovery.max_datagram_size());
3868 }
3869 }
3870
3871 // Limit data sent by the server based on the amount of data received
3872 // from the client before its address is validated.
3873 if !send_path.verified_peer_address && self.is_server {
3874 left = cmp::min(left, send_path.max_send_bytes);
3875 }
3876
3877 // Generate coalesced packets.
3878 while left > 0 {
3879 let (ty, written) = match self.send_single(
3880 &mut out[done..done + left],
3881 send_pid,
3882 has_initial,
3883 now,
3884 ) {
3885 Ok(v) => v,
3886
3887 Err(Error::BufferTooShort) | Err(Error::Done) => break,
3888
3889 Err(e) => return Err(e),
3890 };
3891
3892 done += written;
3893 left -= written;
3894
3895 match ty {
3896 Type::Initial => has_initial = true,
3897
3898 // No more packets can be coalesced after a 1-RTT.
3899 Type::Short => break,
3900
3901 _ => (),
3902 };
3903
3904 // When sending multiple PTO probes, don't coalesce them together,
3905 // so they are sent on separate UDP datagrams.
3906 if let Ok(epoch) = ty.to_epoch() {
3907 if self.paths.get_mut(send_pid)?.recovery.loss_probes(epoch) > 0 {
3908 break;
3909 }
3910 }
3911
3912 // Don't coalesce packets that must go on different paths.
3913 if !(from.is_some() && to.is_some()) &&
3914 self.get_send_path_id(from, to)? != send_pid
3915 {
3916 break;
3917 }
3918 }
3919
3920 if done == 0 {
3921 self.last_tx_data = self.tx_data;
3922
3923 return Err(Error::Done);
3924 }
3925
3926 if has_initial && left > 0 && done < MIN_CLIENT_INITIAL_LEN {
3927 let pad_len = cmp::min(left, MIN_CLIENT_INITIAL_LEN - done);
3928
3929 // Fill padding area with null bytes, to avoid leaking information
3930 // in case the application reuses the packet buffer.
3931 out[done..done + pad_len].fill(0);
3932
3933 done += pad_len;
3934 }
3935
3936 let send_path = self.paths.get(send_pid)?;
3937
3938 let info = SendInfo {
3939 from: send_path.local_addr(),
3940 to: send_path.peer_addr(),
3941
3942 at: send_path.recovery.get_packet_send_time(now),
3943 };
3944
3945 Ok((done, info))
3946 }
3947
3948 fn send_single(
3949 &mut self, out: &mut [u8], send_pid: usize, has_initial: bool,
3950 now: Instant,
3951 ) -> Result<(Type, usize)> {
3952 if out.is_empty() {
3953 return Err(Error::BufferTooShort);
3954 }
3955
3956 if self.is_draining() {
3957 return Err(Error::Done);
3958 }
3959
3960 let is_closing = self.local_error.is_some();
3961
3962 let out_len = out.len();
3963
3964 let mut b = octets::OctetsMut::with_slice(out);
3965
3966 let pkt_type = self.write_pkt_type(send_pid)?;
3967
3968 let max_dgram_len = if !self.dgram_send_queue.is_empty() {
3969 self.dgram_max_writable_len()
3970 } else {
3971 None
3972 };
3973
3974 let epoch = pkt_type.to_epoch()?;
3975 let pkt_space = &mut self.pkt_num_spaces[epoch];
3976 let crypto_ctx = &mut self.crypto_ctx[epoch];
3977
3978 // Process lost frames. There might be several paths having lost frames.
3979 for (_, p) in self.paths.iter_mut() {
3980 while let Some(lost) = p.recovery.next_lost_frame(epoch) {
3981 match lost {
3982 frame::Frame::CryptoHeader { offset, length } => {
3983 crypto_ctx.crypto_stream.send.retransmit(offset, length);
3984
3985 self.stream_retrans_bytes += length as u64;
3986 p.stream_retrans_bytes += length as u64;
3987
3988 self.retrans_count += 1;
3989 p.retrans_count += 1;
3990 },
3991
3992 frame::Frame::StreamHeader {
3993 stream_id,
3994 offset,
3995 length,
3996 fin,
3997 } => {
3998 let stream = match self.streams.get_mut(stream_id) {
3999 // Only retransmit data if the stream is not closed
4000 // or stopped.
4001 Some(v) if !v.send.is_stopped() => v,
4002
4003 // Data on a closed stream will not be retransmitted
4004 // or acked after it is declared lost, so update
4005 // tx_buffered and qlog.
4006 _ => {
4007 self.tx_buffered =
4008 self.tx_buffered.saturating_sub(length);
4009
4010 qlog_with_type!(QLOG_DATA_MV, self.qlog, q, {
4011 let ev_data = EventData::DataMoved(
4012 qlog::events::quic::DataMoved {
4013 stream_id: Some(stream_id),
4014 offset: Some(offset),
4015 length: Some(length as u64),
4016 from: Some(DataRecipient::Transport),
4017 to: Some(DataRecipient::Dropped),
4018 ..Default::default()
4019 },
4020 );
4021
4022 q.add_event_data_with_instant(ev_data, now)
4023 .ok();
4024 });
4025
4026 continue;
4027 },
4028 };
4029
4030 let was_flushable = stream.is_flushable();
4031
4032 let empty_fin = length == 0 && fin;
4033
4034 stream.send.retransmit(offset, length);
4035
4036 // If the stream is now flushable push it to the
4037 // flushable queue, but only if it wasn't already
4038 // queued.
4039 //
4040 // Consider the stream flushable also when we are
4041 // sending a zero-length frame that has the fin flag
4042 // set.
4043 if (stream.is_flushable() || empty_fin) && !was_flushable
4044 {
4045 let priority_key = Arc::clone(&stream.priority_key);
4046 self.streams.insert_flushable(&priority_key);
4047 }
4048
4049 self.stream_retrans_bytes += length as u64;
4050 p.stream_retrans_bytes += length as u64;
4051
4052 self.retrans_count += 1;
4053 p.retrans_count += 1;
4054 },
4055
4056 frame::Frame::ACK { .. } => {
4057 pkt_space.ack_elicited = true;
4058 },
4059
4060 frame::Frame::ResetStream {
4061 stream_id,
4062 error_code,
4063 final_size,
4064 } => {
4065 self.streams
4066 .insert_reset(stream_id, error_code, final_size);
4067 },
4068
4069 frame::Frame::StopSending {
4070 stream_id,
4071 error_code,
4072 } =>
4073 // We only need to retransmit the STOP_SENDING frame if
4074 // the stream is still active and not FIN'd. Even if the
4075 // packet was lost, if the application has the final
4076 // size at this point there is no need to retransmit.
4077 if let Some(stream) = self.streams.get(stream_id) {
4078 if !stream.recv.is_fin() {
4079 self.streams
4080 .insert_stopped(stream_id, error_code);
4081 }
4082 },
4083
4084 // Retransmit HANDSHAKE_DONE only if it hasn't been acked at
4085 // least once already.
4086 frame::Frame::HandshakeDone if !self.handshake_done_acked => {
4087 self.handshake_done_sent = false;
4088 },
4089
4090 frame::Frame::MaxStreamData { stream_id, .. } => {
4091 if self.streams.get(stream_id).is_some() {
4092 self.streams.insert_almost_full(stream_id);
4093 }
4094 },
4095
4096 frame::Frame::MaxData { .. } => {
4097 self.should_send_max_data = true;
4098 },
4099
4100 frame::Frame::NewConnectionId { seq_num, .. } => {
4101 self.ids.mark_advertise_new_scid_seq(seq_num, true);
4102 },
4103
4104 frame::Frame::RetireConnectionId { seq_num } => {
4105 self.ids.mark_retire_dcid_seq(seq_num, true)?;
4106 },
4107
4108 frame::Frame::Ping {
4109 mtu_probe: Some(failed_probe),
4110 } =>
4111 if let Some(pmtud) = p.pmtud.as_mut() {
4112 trace!("pmtud probe dropped: {failed_probe}");
4113 pmtud.failed_probe(failed_probe);
4114 },
4115
4116 _ => (),
4117 }
4118 }
4119 }
4120 self.check_tx_buffered_invariant();
4121
4122 let is_app_limited = self.delivery_rate_check_if_app_limited();
4123 let n_paths = self.paths.len();
4124 let path = self.paths.get_mut(send_pid)?;
4125 let flow_control = &mut self.flow_control;
4126 let pkt_space = &mut self.pkt_num_spaces[epoch];
4127 let crypto_ctx = &mut self.crypto_ctx[epoch];
4128 let pkt_num_manager = &mut self.pkt_num_manager;
4129
4130 let mut left = if let Some(pmtud) = path.pmtud.as_mut() {
4131 // Limit output buffer size by estimated path MTU.
4132 cmp::min(pmtud.get_current_mtu(), b.cap())
4133 } else {
4134 b.cap()
4135 };
4136
4137 if pkt_num_manager.should_skip_pn(self.handshake_completed) {
4138 pkt_num_manager.set_skip_pn(Some(self.next_pkt_num));
4139 self.next_pkt_num += 1;
4140 };
4141 let pn = self.next_pkt_num;
4142
4143 let largest_acked_pkt =
4144 path.recovery.get_largest_acked_on_epoch(epoch).unwrap_or(0);
4145 let pn_len = packet::pkt_num_len(pn, largest_acked_pkt);
4146
4147 // The AEAD overhead at the current encryption level.
4148 let crypto_overhead = crypto_ctx.crypto_overhead().ok_or(Error::Done)?;
4149
4150 let dcid_seq = path.active_dcid_seq.ok_or(Error::OutOfIdentifiers)?;
4151
4152 let dcid =
4153 ConnectionId::from_ref(self.ids.get_dcid(dcid_seq)?.cid.as_ref());
4154
4155 let scid = if let Some(scid_seq) = path.active_scid_seq {
4156 ConnectionId::from_ref(self.ids.get_scid(scid_seq)?.cid.as_ref())
4157 } else if pkt_type == Type::Short {
4158 ConnectionId::default()
4159 } else {
4160 return Err(Error::InvalidState);
4161 };
4162
4163 let hdr = Header {
4164 ty: pkt_type,
4165
4166 version: self.version,
4167
4168 dcid,
4169 scid,
4170
4171 pkt_num: 0,
4172 pkt_num_len: pn_len,
4173
4174 // Only clone token for Initial packets, as other packets don't have
4175 // this field (Retry doesn't count, as it's not encoded as part of
4176 // this code path).
4177 token: if pkt_type == Type::Initial {
4178 self.token.clone()
4179 } else {
4180 None
4181 },
4182
4183 versions: None,
4184 key_phase: self.key_phase,
4185 };
4186
4187 hdr.to_bytes(&mut b)?;
4188
4189 let hdr_trace = if log::max_level() == log::LevelFilter::Trace {
4190 Some(format!("{hdr:?}"))
4191 } else {
4192 None
4193 };
4194
4195 let hdr_ty = hdr.ty;
4196
4197 #[cfg(feature = "qlog")]
4198 let qlog_pkt_hdr = self.qlog.streamer.as_ref().map(|_q| {
4199 qlog::events::quic::PacketHeader::with_type(
4200 hdr.ty.to_qlog(),
4201 Some(pn),
4202 Some(hdr.version),
4203 Some(&hdr.scid),
4204 Some(&hdr.dcid),
4205 )
4206 });
4207
4208 // Calculate the space required for the packet, including the header
4209 // the payload length, the packet number and the AEAD overhead.
4210 let mut overhead = b.off() + pn_len + crypto_overhead;
4211
4212 // We assume that the payload length, which is only present in long
4213 // header packets, can always be encoded with a 2-byte varint.
4214 if pkt_type != Type::Short {
4215 overhead += PAYLOAD_LENGTH_LEN;
4216 }
4217
4218 // Make sure we have enough space left for the packet overhead.
4219 match left.checked_sub(overhead) {
4220 Some(v) => left = v,
4221
4222 None => {
4223 // We can't send more because there isn't enough space available
4224 // in the output buffer.
4225 //
4226 // This usually happens when we try to send a new packet but
4227 // failed because cwnd is almost full. In such case app_limited
4228 // is set to false here to make cwnd grow when ACK is received.
4229 path.recovery.update_app_limited(false);
4230 return Err(Error::Done);
4231 },
4232 }
4233
4234 // Make sure there is enough space for the minimum payload length.
4235 if left < PAYLOAD_MIN_LEN {
4236 path.recovery.update_app_limited(false);
4237 return Err(Error::Done);
4238 }
4239
4240 let mut frames: SmallVec<[frame::Frame; 1]> = SmallVec::new();
4241
4242 let mut ack_eliciting = false;
4243 let mut in_flight = false;
4244 let mut is_pmtud_probe = false;
4245 let mut has_data = false;
4246
4247 // Whether or not we should explicitly elicit an ACK via PING frame if we
4248 // implicitly elicit one otherwise.
4249 let ack_elicit_required = path.recovery.should_elicit_ack(epoch);
4250
4251 let header_offset = b.off();
4252
4253 // Reserve space for payload length in advance. Since we don't yet know
4254 // what the final length will be, we reserve 2 bytes in all cases.
4255 //
4256 // Only long header packets have an explicit length field.
4257 if pkt_type != Type::Short {
4258 b.skip(PAYLOAD_LENGTH_LEN)?;
4259 }
4260
4261 packet::encode_pkt_num(pn, pn_len, &mut b)?;
4262
4263 let payload_offset = b.off();
4264
4265 let cwnd_available =
4266 path.recovery.cwnd_available().saturating_sub(overhead);
4267
4268 let left_before_packing_ack_frame = left;
4269
4270 // Create ACK frame.
4271 //
4272 // When we need to explicitly elicit an ACK via PING later, go ahead and
4273 // generate an ACK (if there's anything to ACK) since we're going to
4274 // send a packet with PING anyways, even if we haven't received anything
4275 // ACK eliciting.
4276 if pkt_space.recv_pkt_need_ack.len() > 0 &&
4277 (pkt_space.ack_elicited || ack_elicit_required) &&
4278 (!is_closing ||
4279 (pkt_type == Type::Handshake &&
4280 self.local_error
4281 .as_ref()
4282 .is_some_and(|le| le.is_app))) &&
4283 path.active()
4284 {
4285 #[cfg(not(feature = "fuzzing"))]
4286 let ack_delay = pkt_space.largest_rx_pkt_time.elapsed();
4287
4288 #[cfg(not(feature = "fuzzing"))]
4289 let ack_delay = ack_delay.as_micros() as u64 /
4290 2_u64
4291 .pow(self.local_transport_params.ack_delay_exponent as u32);
4292
4293 // pseudo-random reproducible ack delays when fuzzing
4294 #[cfg(feature = "fuzzing")]
4295 let ack_delay = rand::rand_u8() as u64 + 1;
4296
4297 let frame = frame::Frame::ACK {
4298 ack_delay,
4299 ranges: pkt_space.recv_pkt_need_ack.clone(),
4300 ecn_counts: None, // sending ECN is not supported at this time
4301 };
4302
4303 // When a PING frame needs to be sent, avoid sending the ACK if
4304 // there is not enough cwnd available for both (note that PING
4305 // frames are always 1 byte, so we just need to check that the
4306 // ACK's length is lower than cwnd).
4307 if pkt_space.ack_elicited || frame.wire_len() < cwnd_available {
4308 // ACK-only packets are not congestion controlled so ACKs must
4309 // be bundled considering the buffer capacity only, and not the
4310 // available cwnd.
4311 if push_frame_to_pkt!(b, frames, frame, left) {
4312 pkt_space.ack_elicited = false;
4313 }
4314 }
4315 }
4316
4317 // Limit output packet size by congestion window size.
4318 left = cmp::min(
4319 left,
4320 // Bytes consumed by ACK frames.
4321 cwnd_available.saturating_sub(left_before_packing_ack_frame - left),
4322 );
4323
4324 let mut challenge_data = None;
4325
4326 let active_path = self.paths.get_active_mut()?;
4327
4328 if pkt_type == Type::Short {
4329 // Create PMTUD probe.
4330 //
4331 // In order to send a PMTUD probe the current `left` value, which was
4332 // already limited by the current PMTU measure, needs to be ignored,
4333 // but the outgoing packet still needs to be limited by
4334 // the output buffer size, as well as the congestion
4335 // window.
4336 //
4337 // In addition, the PMTUD probe is only generated when the handshake
4338 // is confirmed, to avoid interfering with the handshake
4339 // (e.g. due to the anti-amplification limits).
4340 let should_probe_pmtu = active_path.should_send_pmtu_probe(
4341 self.handshake_confirmed,
4342 self.handshake_completed,
4343 out_len,
4344 is_closing,
4345 frames.is_empty(),
4346 );
4347
4348 if should_probe_pmtu {
4349 if let Some(pmtud) = active_path.pmtud.as_mut() {
4350 let probe_size = pmtud.get_probe_size();
4351 trace!(
4352 "{} sending pmtud probe pmtu_probe={} estimated_pmtu={}",
4353 self.trace_id,
4354 probe_size,
4355 pmtud.get_current_mtu(),
4356 );
4357
4358 left = probe_size;
4359
4360 match left.checked_sub(overhead) {
4361 Some(v) => left = v,
4362
4363 None => {
4364 // We can't send more because there isn't enough space
4365 // available in the output buffer.
4366 //
4367 // This usually happens when we try to send a new
4368 // packet but failed
4369 // because cwnd is almost full.
4370 //
4371 // In such case app_limited is set to false here to
4372 // make cwnd grow when ACK
4373 // is received.
4374 active_path.recovery.update_app_limited(false);
4375 return Err(Error::Done);
4376 },
4377 }
4378
4379 let frame = frame::Frame::Padding {
4380 len: probe_size - overhead - 1,
4381 };
4382
4383 if push_frame_to_pkt!(b, frames, frame, left) {
4384 let frame = frame::Frame::Ping {
4385 mtu_probe: Some(probe_size),
4386 };
4387
4388 if push_frame_to_pkt!(b, frames, frame, left) {
4389 ack_eliciting = true;
4390 in_flight = true;
4391 }
4392 }
4393
4394 // Reset probe flag after sending to prevent duplicate probes
4395 // in a single flight.
4396 pmtud.set_in_flight(true);
4397 is_pmtud_probe = true;
4398 }
4399 }
4400
4401 let path = self.paths.get_mut(send_pid)?;
4402 // Create PATH_RESPONSE frame if needed.
4403 // We do not try to ensure that these are really sent.
4404 while let Some(challenge) = path.pop_received_challenge() {
4405 let frame = frame::Frame::PathResponse { data: challenge };
4406
4407 if push_frame_to_pkt!(b, frames, frame, left) {
4408 ack_eliciting = true;
4409 in_flight = true;
4410 } else {
4411 // If there are other pending PATH_RESPONSE, don't lose them
4412 // now.
4413 break;
4414 }
4415 }
4416
4417 // Create PATH_CHALLENGE frame if needed.
4418 if path.validation_requested() {
4419 // TODO: ensure that data is unique over paths.
4420 let data = rand::rand_u64().to_be_bytes();
4421
4422 let frame = frame::Frame::PathChallenge { data };
4423
4424 if push_frame_to_pkt!(b, frames, frame, left) {
4425 // Let's notify the path once we know the packet size.
4426 challenge_data = Some(data);
4427
4428 ack_eliciting = true;
4429 in_flight = true;
4430 }
4431 }
4432
4433 if let Some(key_update) = crypto_ctx.key_update.as_mut() {
4434 key_update.update_acked = true;
4435 }
4436 }
4437
4438 let path = self.paths.get_mut(send_pid)?;
4439
4440 if pkt_type == Type::Short && !is_closing {
4441 // Create NEW_CONNECTION_ID frames as needed.
4442 while let Some(seq_num) = self.ids.next_advertise_new_scid_seq() {
4443 let frame = self.ids.get_new_connection_id_frame_for(seq_num)?;
4444
4445 if push_frame_to_pkt!(b, frames, frame, left) {
4446 self.ids.mark_advertise_new_scid_seq(seq_num, false);
4447
4448 ack_eliciting = true;
4449 in_flight = true;
4450 } else {
4451 break;
4452 }
4453 }
4454 }
4455
4456 if pkt_type == Type::Short && !is_closing && path.active() {
4457 // Create HANDSHAKE_DONE frame.
4458 // self.should_send_handshake_done() but without the need to borrow
4459 if self.handshake_completed &&
4460 !self.handshake_done_sent &&
4461 self.is_server
4462 {
4463 let frame = frame::Frame::HandshakeDone;
4464
4465 if push_frame_to_pkt!(b, frames, frame, left) {
4466 self.handshake_done_sent = true;
4467
4468 ack_eliciting = true;
4469 in_flight = true;
4470 }
4471 }
4472
4473 // Create MAX_STREAMS_BIDI frame.
4474 if self.streams.should_update_max_streams_bidi() {
4475 let frame = frame::Frame::MaxStreamsBidi {
4476 max: self.streams.max_streams_bidi_next(),
4477 };
4478
4479 if push_frame_to_pkt!(b, frames, frame, left) {
4480 self.streams.update_max_streams_bidi();
4481
4482 ack_eliciting = true;
4483 in_flight = true;
4484 }
4485 }
4486
4487 // Create MAX_STREAMS_UNI frame.
4488 if self.streams.should_update_max_streams_uni() {
4489 let frame = frame::Frame::MaxStreamsUni {
4490 max: self.streams.max_streams_uni_next(),
4491 };
4492
4493 if push_frame_to_pkt!(b, frames, frame, left) {
4494 self.streams.update_max_streams_uni();
4495
4496 ack_eliciting = true;
4497 in_flight = true;
4498 }
4499 }
4500
4501 // Create DATA_BLOCKED frame.
4502 if let Some(limit) = self.blocked_limit {
4503 let frame = frame::Frame::DataBlocked { limit };
4504
4505 if push_frame_to_pkt!(b, frames, frame, left) {
4506 self.blocked_limit = None;
4507 self.data_blocked_sent_count =
4508 self.data_blocked_sent_count.saturating_add(1);
4509
4510 ack_eliciting = true;
4511 in_flight = true;
4512 }
4513 }
4514
4515 // Create MAX_STREAM_DATA frames as needed.
4516 for stream_id in self.streams.almost_full() {
4517 let stream = match self.streams.get_mut(stream_id) {
4518 Some(v) => v,
4519
4520 None => {
4521 // The stream doesn't exist anymore, so remove it from
4522 // the almost full set.
4523 self.streams.remove_almost_full(stream_id);
4524 continue;
4525 },
4526 };
4527
4528 // Autotune the stream window size, but only if this is not a
4529 // retransmission (on a retransmit the stream will be in
4530 // `self.streams.almost_full()` but it's `almost_full()`
4531 // method returns false.
4532 if stream.recv.almost_full() {
4533 stream.recv.autotune_window(now, path.recovery.rtt());
4534 }
4535
4536 let frame = frame::Frame::MaxStreamData {
4537 stream_id,
4538 max: stream.recv.max_data_next(),
4539 };
4540
4541 if push_frame_to_pkt!(b, frames, frame, left) {
4542 let recv_win = stream.recv.window();
4543
4544 stream.recv.update_max_data(now);
4545
4546 self.streams.remove_almost_full(stream_id);
4547
4548 ack_eliciting = true;
4549 in_flight = true;
4550
4551 // Make sure the connection window always has some
4552 // room compared to the stream window.
4553 flow_control.ensure_window_lower_bound(
4554 (recv_win as f64 * CONNECTION_WINDOW_FACTOR) as u64,
4555 );
4556 }
4557 }
4558
4559 // Create MAX_DATA frame as needed.
4560 if flow_control.should_update_max_data() &&
4561 flow_control.max_data() < flow_control.max_data_next()
4562 {
4563 // Autotune the connection window size. We only tune the window
4564 // if we are sending an "organic" update, not on retransmits.
4565 flow_control.autotune_window(now, path.recovery.rtt());
4566 self.should_send_max_data = true;
4567 }
4568
4569 if self.should_send_max_data {
4570 let frame = frame::Frame::MaxData {
4571 max: flow_control.max_data_next(),
4572 };
4573
4574 if push_frame_to_pkt!(b, frames, frame, left) {
4575 self.should_send_max_data = false;
4576
4577 // Commits the new max_rx_data limit.
4578 flow_control.update_max_data(now);
4579
4580 ack_eliciting = true;
4581 in_flight = true;
4582 }
4583 }
4584
4585 // Create STOP_SENDING frames as needed.
4586 for (stream_id, error_code) in self
4587 .streams
4588 .stopped()
4589 .map(|(&k, &v)| (k, v))
4590 .collect::<Vec<(u64, u64)>>()
4591 {
4592 let frame = frame::Frame::StopSending {
4593 stream_id,
4594 error_code,
4595 };
4596
4597 if push_frame_to_pkt!(b, frames, frame, left) {
4598 self.streams.remove_stopped(stream_id);
4599
4600 ack_eliciting = true;
4601 in_flight = true;
4602 }
4603 }
4604
4605 // Create RESET_STREAM frames as needed.
4606 for (stream_id, (error_code, final_size)) in self
4607 .streams
4608 .reset()
4609 .map(|(&k, &v)| (k, v))
4610 .collect::<Vec<(u64, (u64, u64))>>()
4611 {
4612 let frame = frame::Frame::ResetStream {
4613 stream_id,
4614 error_code,
4615 final_size,
4616 };
4617
4618 if push_frame_to_pkt!(b, frames, frame, left) {
4619 self.streams.remove_reset(stream_id);
4620
4621 ack_eliciting = true;
4622 in_flight = true;
4623 }
4624 }
4625
4626 // Create STREAM_DATA_BLOCKED frames as needed.
4627 for (stream_id, limit) in self
4628 .streams
4629 .blocked()
4630 .map(|(&k, &v)| (k, v))
4631 .collect::<Vec<(u64, u64)>>()
4632 {
4633 let frame = frame::Frame::StreamDataBlocked { stream_id, limit };
4634
4635 if push_frame_to_pkt!(b, frames, frame, left) {
4636 self.streams.remove_blocked(stream_id);
4637 self.stream_data_blocked_sent_count =
4638 self.stream_data_blocked_sent_count.saturating_add(1);
4639
4640 ack_eliciting = true;
4641 in_flight = true;
4642 }
4643 }
4644
4645 // Create RETIRE_CONNECTION_ID frames as needed.
4646 let retire_dcid_seqs = self.ids.retire_dcid_seqs();
4647
4648 for seq_num in retire_dcid_seqs {
4649 // The sequence number specified in a RETIRE_CONNECTION_ID frame
4650 // MUST NOT refer to the Destination Connection ID field of the
4651 // packet in which the frame is contained.
4652 let dcid_seq = path.active_dcid_seq.ok_or(Error::InvalidState)?;
4653
4654 if seq_num == dcid_seq {
4655 continue;
4656 }
4657
4658 let frame = frame::Frame::RetireConnectionId { seq_num };
4659
4660 if push_frame_to_pkt!(b, frames, frame, left) {
4661 self.ids.mark_retire_dcid_seq(seq_num, false)?;
4662
4663 ack_eliciting = true;
4664 in_flight = true;
4665 } else {
4666 break;
4667 }
4668 }
4669 }
4670
4671 // Create CONNECTION_CLOSE frame. Try to send this only on the active
4672 // path, unless it is the last one available.
4673 if path.active() || n_paths == 1 {
4674 if let Some(conn_err) = self.local_error.as_ref() {
4675 if conn_err.is_app {
4676 // Create ApplicationClose frame.
4677 if pkt_type == Type::Short {
4678 let frame = frame::Frame::ApplicationClose {
4679 error_code: conn_err.error_code,
4680 reason: conn_err.reason.clone(),
4681 };
4682
4683 if push_frame_to_pkt!(b, frames, frame, left) {
4684 let pto = path.recovery.pto();
4685 self.draining_timer = Some(now + (pto * 3));
4686
4687 ack_eliciting = true;
4688 in_flight = true;
4689 }
4690 }
4691 } else {
4692 // Create ConnectionClose frame.
4693 let frame = frame::Frame::ConnectionClose {
4694 error_code: conn_err.error_code,
4695 frame_type: 0,
4696 reason: conn_err.reason.clone(),
4697 };
4698
4699 if push_frame_to_pkt!(b, frames, frame, left) {
4700 let pto = path.recovery.pto();
4701 self.draining_timer = Some(now + (pto * 3));
4702
4703 ack_eliciting = true;
4704 in_flight = true;
4705 }
4706 }
4707 }
4708 }
4709
4710 // Create CRYPTO frame.
4711 if crypto_ctx.crypto_stream.is_flushable() &&
4712 left > frame::MAX_CRYPTO_OVERHEAD &&
4713 !is_closing &&
4714 path.active()
4715 {
4716 let crypto_off = crypto_ctx.crypto_stream.send.off_front();
4717
4718 // Encode the frame.
4719 //
4720 // Instead of creating a `frame::Frame` object, encode the frame
4721 // directly into the packet buffer.
4722 //
4723 // First we reserve some space in the output buffer for writing the
4724 // frame header (we assume the length field is always a 2-byte
4725 // varint as we don't know the value yet).
4726 //
4727 // Then we emit the data from the crypto stream's send buffer.
4728 //
4729 // Finally we go back and encode the frame header with the now
4730 // available information.
4731 let hdr_off = b.off();
4732 let hdr_len = 1 + // frame type
4733 octets::varint_len(crypto_off) + // offset
4734 2; // length, always encode as 2-byte varint
4735
4736 if let Some(max_len) = left.checked_sub(hdr_len) {
4737 let (mut crypto_hdr, mut crypto_payload) =
4738 b.split_at(hdr_off + hdr_len)?;
4739
4740 // Write stream data into the packet buffer.
4741 let (len, _) = crypto_ctx
4742 .crypto_stream
4743 .send
4744 .emit(&mut crypto_payload.as_mut()[..max_len])?;
4745
4746 // Encode the frame's header.
4747 //
4748 // Due to how `OctetsMut::split_at()` works, `crypto_hdr` starts
4749 // from the initial offset of `b` (rather than the current
4750 // offset), so it needs to be advanced to the
4751 // initial frame offset.
4752 crypto_hdr.skip(hdr_off)?;
4753
4754 frame::encode_crypto_header(
4755 crypto_off,
4756 len as u64,
4757 &mut crypto_hdr,
4758 )?;
4759
4760 // Advance the packet buffer's offset.
4761 b.skip(hdr_len + len)?;
4762
4763 let frame = frame::Frame::CryptoHeader {
4764 offset: crypto_off,
4765 length: len,
4766 };
4767
4768 if push_frame_to_pkt!(b, frames, frame, left) {
4769 ack_eliciting = true;
4770 in_flight = true;
4771 has_data = true;
4772 }
4773 }
4774 }
4775
4776 // The preference of data-bearing frame to include in a packet
4777 // is managed by `self.emit_dgram`. However, whether any frames
4778 // can be sent depends on the state of their buffers. In the case
4779 // where one type is preferred but its buffer is empty, fall back
4780 // to the other type in order not to waste this function call.
4781 let mut dgram_emitted = false;
4782 let dgrams_to_emit = max_dgram_len.is_some();
4783 let stream_to_emit = self.streams.has_flushable();
4784
4785 let mut do_dgram = self.emit_dgram && dgrams_to_emit;
4786 let do_stream = !self.emit_dgram && stream_to_emit;
4787
4788 if !do_stream && dgrams_to_emit {
4789 do_dgram = true;
4790 }
4791
4792 // Create DATAGRAM frame.
4793 if (pkt_type == Type::Short || pkt_type == Type::ZeroRTT) &&
4794 left > frame::MAX_DGRAM_OVERHEAD &&
4795 !is_closing &&
4796 path.active() &&
4797 do_dgram
4798 {
4799 if let Some(max_dgram_payload) = max_dgram_len {
4800 while let Some(len) = self.dgram_send_queue.peek_front_len() {
4801 let hdr_off = b.off();
4802 let hdr_len = 1 + // frame type
4803 2; // length, always encode as 2-byte varint
4804
4805 if (hdr_len + len) <= left {
4806 // Front of the queue fits this packet, send it.
4807 match self.dgram_send_queue.pop() {
4808 Some(data) => {
4809 // Encode the frame.
4810 //
4811 // Instead of creating a `frame::Frame` object,
4812 // encode the frame directly into the packet
4813 // buffer.
4814 //
4815 // First we reserve some space in the output
4816 // buffer for writing the frame header (we
4817 // assume the length field is always a 2-byte
4818 // varint as we don't know the value yet).
4819 //
4820 // Then we emit the data from the DATAGRAM's
4821 // buffer.
4822 //
4823 // Finally we go back and encode the frame
4824 // header with the now available information.
4825 let (mut dgram_hdr, mut dgram_payload) =
4826 b.split_at(hdr_off + hdr_len)?;
4827
4828 dgram_payload.as_mut()[..len]
4829 .copy_from_slice(&data);
4830
4831 // Encode the frame's header.
4832 //
4833 // Due to how `OctetsMut::split_at()` works,
4834 // `dgram_hdr` starts from the initial offset
4835 // of `b` (rather than the current offset), so
4836 // it needs to be advanced to the initial frame
4837 // offset.
4838 dgram_hdr.skip(hdr_off)?;
4839
4840 frame::encode_dgram_header(
4841 len as u64,
4842 &mut dgram_hdr,
4843 )?;
4844
4845 // Advance the packet buffer's offset.
4846 b.skip(hdr_len + len)?;
4847
4848 let frame =
4849 frame::Frame::DatagramHeader { length: len };
4850
4851 if push_frame_to_pkt!(b, frames, frame, left) {
4852 ack_eliciting = true;
4853 in_flight = true;
4854 dgram_emitted = true;
4855 self.dgram_sent_count =
4856 self.dgram_sent_count.saturating_add(1);
4857 path.dgram_sent_count =
4858 path.dgram_sent_count.saturating_add(1);
4859 }
4860 },
4861
4862 None => continue,
4863 };
4864 } else if len > max_dgram_payload {
4865 // This dgram frame will never fit. Let's purge it.
4866 self.dgram_send_queue.pop();
4867 } else {
4868 break;
4869 }
4870 }
4871 }
4872 }
4873
4874 // Create a single STREAM frame for the first stream that is flushable.
4875 if (pkt_type == Type::Short || pkt_type == Type::ZeroRTT) &&
4876 left > frame::MAX_STREAM_OVERHEAD &&
4877 !is_closing &&
4878 path.active() &&
4879 !dgram_emitted
4880 {
4881 while let Some(priority_key) = self.streams.peek_flushable() {
4882 let stream_id = priority_key.id;
4883 let stream = match self.streams.get_mut(stream_id) {
4884 // Avoid sending frames for streams that were already stopped.
4885 //
4886 // This might happen if stream data was buffered but not yet
4887 // flushed on the wire when a STOP_SENDING frame is received.
4888 Some(v) if !v.send.is_stopped() => v,
4889 _ => {
4890 self.streams.remove_flushable(&priority_key);
4891 continue;
4892 },
4893 };
4894
4895 let stream_off = stream.send.off_front();
4896
4897 // Encode the frame.
4898 //
4899 // Instead of creating a `frame::Frame` object, encode the frame
4900 // directly into the packet buffer.
4901 //
4902 // First we reserve some space in the output buffer for writing
4903 // the frame header (we assume the length field is always a
4904 // 2-byte varint as we don't know the value yet).
4905 //
4906 // Then we emit the data from the stream's send buffer.
4907 //
4908 // Finally we go back and encode the frame header with the now
4909 // available information.
4910 let hdr_off = b.off();
4911 let hdr_len = 1 + // frame type
4912 octets::varint_len(stream_id) + // stream_id
4913 octets::varint_len(stream_off) + // offset
4914 2; // length, always encode as 2-byte varint
4915
4916 let max_len = match left.checked_sub(hdr_len) {
4917 Some(v) => v,
4918 None => {
4919 let priority_key = Arc::clone(&stream.priority_key);
4920 self.streams.remove_flushable(&priority_key);
4921
4922 continue;
4923 },
4924 };
4925
4926 let (mut stream_hdr, mut stream_payload) =
4927 b.split_at(hdr_off + hdr_len)?;
4928
4929 // Write stream data into the packet buffer.
4930 let (len, fin) =
4931 stream.send.emit(&mut stream_payload.as_mut()[..max_len])?;
4932
4933 // Encode the frame's header.
4934 //
4935 // Due to how `OctetsMut::split_at()` works, `stream_hdr` starts
4936 // from the initial offset of `b` (rather than the current
4937 // offset), so it needs to be advanced to the initial frame
4938 // offset.
4939 stream_hdr.skip(hdr_off)?;
4940
4941 frame::encode_stream_header(
4942 stream_id,
4943 stream_off,
4944 len as u64,
4945 fin,
4946 &mut stream_hdr,
4947 )?;
4948
4949 // Advance the packet buffer's offset.
4950 b.skip(hdr_len + len)?;
4951
4952 let frame = frame::Frame::StreamHeader {
4953 stream_id,
4954 offset: stream_off,
4955 length: len,
4956 fin,
4957 };
4958
4959 if push_frame_to_pkt!(b, frames, frame, left) {
4960 ack_eliciting = true;
4961 in_flight = true;
4962 has_data = true;
4963 }
4964
4965 let priority_key = Arc::clone(&stream.priority_key);
4966 // If the stream is no longer flushable, remove it from the queue
4967 if !stream.is_flushable() {
4968 self.streams.remove_flushable(&priority_key);
4969 } else if stream.incremental {
4970 // Shuffle the incremental stream to the back of the
4971 // queue.
4972 self.streams.remove_flushable(&priority_key);
4973 self.streams.insert_flushable(&priority_key);
4974 }
4975
4976 #[cfg(feature = "fuzzing")]
4977 // Coalesce STREAM frames when fuzzing.
4978 if left > frame::MAX_STREAM_OVERHEAD {
4979 continue;
4980 }
4981
4982 break;
4983 }
4984 }
4985
4986 // Alternate trying to send DATAGRAMs next time.
4987 self.emit_dgram = !dgram_emitted;
4988
4989 // If no other ack-eliciting frame is sent, include a PING frame
4990 // - if PTO probe needed; OR
4991 // - if we've sent too many non ack-eliciting packets without having
4992 // sent an ACK eliciting one; OR
4993 // - the application requested an ack-eliciting frame be sent.
4994 if (ack_elicit_required || path.needs_ack_eliciting) &&
4995 !ack_eliciting &&
4996 left >= 1 &&
4997 !is_closing
4998 {
4999 let frame = frame::Frame::Ping { mtu_probe: None };
5000
5001 if push_frame_to_pkt!(b, frames, frame, left) {
5002 ack_eliciting = true;
5003 in_flight = true;
5004 }
5005 }
5006
5007 if ack_eliciting && !is_pmtud_probe {
5008 path.needs_ack_eliciting = false;
5009 path.recovery.ping_sent(epoch);
5010 }
5011
5012 if !has_data &&
5013 !dgram_emitted &&
5014 cwnd_available > frame::MAX_STREAM_OVERHEAD
5015 {
5016 path.recovery.on_app_limited();
5017 }
5018
5019 if frames.is_empty() {
5020 // When we reach this point we are not able to write more, so set
5021 // app_limited to false.
5022 path.recovery.update_app_limited(false);
5023 return Err(Error::Done);
5024 }
5025
5026 // When coalescing a 1-RTT packet, we can't add padding in the UDP
5027 // datagram, so use PADDING frames instead.
5028 //
5029 // This is only needed if
5030 // 1) an Initial packet has already been written to the UDP datagram,
5031 // as Initial always requires padding.
5032 //
5033 // 2) this is a probing packet towards an unvalidated peer address.
5034 if (has_initial || !path.validated()) &&
5035 pkt_type == Type::Short &&
5036 left >= 1
5037 {
5038 let frame = frame::Frame::Padding { len: left };
5039
5040 if push_frame_to_pkt!(b, frames, frame, left) {
5041 in_flight = true;
5042 }
5043 }
5044
5045 // Pad payload so that it's always at least 4 bytes.
5046 if b.off() - payload_offset < PAYLOAD_MIN_LEN {
5047 let payload_len = b.off() - payload_offset;
5048
5049 let frame = frame::Frame::Padding {
5050 len: PAYLOAD_MIN_LEN - payload_len,
5051 };
5052
5053 #[allow(unused_assignments)]
5054 if push_frame_to_pkt!(b, frames, frame, left) {
5055 in_flight = true;
5056 }
5057 }
5058
5059 let payload_len = b.off() - payload_offset;
5060
5061 // Fill in payload length.
5062 if pkt_type != Type::Short {
5063 let len = pn_len + payload_len + crypto_overhead;
5064
5065 let (_, mut payload_with_len) = b.split_at(header_offset)?;
5066 payload_with_len
5067 .put_varint_with_len(len as u64, PAYLOAD_LENGTH_LEN)?;
5068 }
5069
5070 trace!(
5071 "{} tx pkt {} len={} pn={} {}",
5072 self.trace_id,
5073 hdr_trace.unwrap_or_default(),
5074 payload_len,
5075 pn,
5076 AddrTupleFmt(path.local_addr(), path.peer_addr())
5077 );
5078
5079 #[cfg(feature = "qlog")]
5080 let mut qlog_frames: SmallVec<
5081 [qlog::events::quic::QuicFrame; 1],
5082 > = SmallVec::with_capacity(frames.len());
5083
5084 for frame in &mut frames {
5085 trace!("{} tx frm {:?}", self.trace_id, frame);
5086
5087 qlog_with_type!(QLOG_PACKET_TX, self.qlog, _q, {
5088 qlog_frames.push(frame.to_qlog());
5089 });
5090 }
5091
5092 qlog_with_type!(QLOG_PACKET_TX, self.qlog, q, {
5093 if let Some(header) = qlog_pkt_hdr {
5094 // Qlog packet raw info described at
5095 // https://datatracker.ietf.org/doc/html/draft-ietf-quic-qlog-main-schema-00#section-5.1
5096 //
5097 // `length` includes packet headers and trailers (AEAD tag).
5098 let length = payload_len + payload_offset + crypto_overhead;
5099 let qlog_raw_info = RawInfo {
5100 length: Some(length as u64),
5101 payload_length: Some(payload_len as u64),
5102 data: None,
5103 };
5104
5105 let send_at_time =
5106 now.duration_since(q.start_time()).as_secs_f32() * 1000.0;
5107
5108 let ev_data =
5109 EventData::PacketSent(qlog::events::quic::PacketSent {
5110 header,
5111 frames: Some(qlog_frames),
5112 raw: Some(qlog_raw_info),
5113 send_at_time: Some(send_at_time),
5114 ..Default::default()
5115 });
5116
5117 q.add_event_data_with_instant(ev_data, now).ok();
5118 }
5119 });
5120
5121 let aead = match crypto_ctx.crypto_seal {
5122 Some(ref v) => v,
5123 None => return Err(Error::InvalidState),
5124 };
5125
5126 let written = packet::encrypt_pkt(
5127 &mut b,
5128 pn,
5129 pn_len,
5130 payload_len,
5131 payload_offset,
5132 None,
5133 aead,
5134 )?;
5135
5136 let sent_pkt_has_data = if path.recovery.gcongestion_enabled() {
5137 has_data || dgram_emitted
5138 } else {
5139 has_data
5140 };
5141
5142 let sent_pkt = recovery::Sent {
5143 pkt_num: pn,
5144 frames,
5145 time_sent: now,
5146 time_acked: None,
5147 time_lost: None,
5148 size: if ack_eliciting { written } else { 0 },
5149 ack_eliciting,
5150 in_flight,
5151 delivered: 0,
5152 delivered_time: now,
5153 first_sent_time: now,
5154 is_app_limited: false,
5155 tx_in_flight: 0,
5156 lost: 0,
5157 has_data: sent_pkt_has_data,
5158 is_pmtud_probe,
5159 };
5160
5161 if in_flight && is_app_limited {
5162 path.recovery.delivery_rate_update_app_limited(true);
5163 }
5164
5165 self.next_pkt_num += 1;
5166
5167 let handshake_status = recovery::HandshakeStatus {
5168 has_handshake_keys: self.crypto_ctx[packet::Epoch::Handshake]
5169 .has_keys(),
5170 peer_verified_address: self.peer_verified_initial_address,
5171 completed: self.handshake_completed,
5172 };
5173
5174 self.on_packet_sent(send_pid, sent_pkt, epoch, handshake_status, now)?;
5175
5176 let path = self.paths.get_mut(send_pid)?;
5177 qlog_with_type!(QLOG_METRICS, self.qlog, q, {
5178 path.recovery.maybe_qlog(q, now);
5179 });
5180
5181 // Record sent packet size if we probe the path.
5182 if let Some(data) = challenge_data {
5183 path.add_challenge_sent(data, written, now);
5184 }
5185
5186 self.sent_count += 1;
5187 self.sent_bytes += written as u64;
5188 path.sent_count += 1;
5189 path.sent_bytes += written as u64;
5190
5191 if self.dgram_send_queue.byte_size() > path.recovery.cwnd_available() {
5192 path.recovery.update_app_limited(false);
5193 }
5194
5195 path.max_send_bytes = path.max_send_bytes.saturating_sub(written);
5196
5197 // On the client, drop initial state after sending an Handshake packet.
5198 if !self.is_server && hdr_ty == Type::Handshake {
5199 self.drop_epoch_state(packet::Epoch::Initial, now);
5200 }
5201
5202 // (Re)start the idle timer if we are sending the first ack-eliciting
5203 // packet since last receiving a packet.
5204 if ack_eliciting && !self.ack_eliciting_sent {
5205 if let Some(idle_timeout) = self.idle_timeout() {
5206 self.idle_timer = Some(now + idle_timeout);
5207 }
5208 }
5209
5210 if ack_eliciting {
5211 self.ack_eliciting_sent = true;
5212 }
5213
5214 Ok((pkt_type, written))
5215 }
5216
5217 fn on_packet_sent(
5218 &mut self, send_pid: usize, sent_pkt: recovery::Sent,
5219 epoch: packet::Epoch, handshake_status: recovery::HandshakeStatus,
5220 now: Instant,
5221 ) -> Result<()> {
5222 let path = self.paths.get_mut(send_pid)?;
5223
5224 // It's fine to set the skip counter based on a non-active path's values.
5225 let cwnd = path.recovery.cwnd();
5226 let max_datagram_size = path.recovery.max_datagram_size();
5227 self.pkt_num_spaces[epoch].on_packet_sent(&sent_pkt);
5228 self.pkt_num_manager.on_packet_sent(
5229 cwnd,
5230 max_datagram_size,
5231 self.handshake_completed,
5232 );
5233
5234 path.recovery.on_packet_sent(
5235 sent_pkt,
5236 epoch,
5237 handshake_status,
5238 now,
5239 &self.trace_id,
5240 );
5241
5242 Ok(())
5243 }
5244
5245 /// Returns the desired send time for the next packet.
5246 #[inline]
5247 pub fn get_next_release_time(&self) -> Option<ReleaseDecision> {
5248 Some(
5249 self.paths
5250 .get_active()
5251 .ok()?
5252 .recovery
5253 .get_next_release_time(),
5254 )
5255 }
5256
5257 /// Returns whether gcongestion is enabled.
5258 #[inline]
5259 pub fn gcongestion_enabled(&self) -> Option<bool> {
5260 Some(self.paths.get_active().ok()?.recovery.gcongestion_enabled())
5261 }
5262
5263 /// Returns the maximum pacing into the future.
5264 ///
5265 /// Equals 1/8 of the smoothed RTT, but at least 1ms and not greater than
5266 /// 5ms.
5267 pub fn max_release_into_future(&self) -> Duration {
5268 self.paths
5269 .get_active()
5270 .map(|p| p.recovery.rtt().mul_f64(0.125))
5271 .unwrap_or(Duration::from_millis(1))
5272 .max(Duration::from_millis(1))
5273 .min(Duration::from_millis(5))
5274 }
5275
5276 /// Returns whether pacing is enabled.
5277 #[inline]
5278 pub fn pacing_enabled(&self) -> bool {
5279 self.recovery_config.pacing
5280 }
5281
5282 /// Returns the size of the send quantum, in bytes.
5283 ///
5284 /// This represents the maximum size of a packet burst as determined by the
5285 /// congestion control algorithm in use.
5286 ///
5287 /// Applications can, for example, use it in conjunction with segmentation
5288 /// offloading mechanisms as the maximum limit for outgoing aggregates of
5289 /// multiple packets.
5290 #[inline]
5291 pub fn send_quantum(&self) -> usize {
5292 match self.paths.get_active() {
5293 Ok(p) => p.recovery.send_quantum(),
5294 _ => 0,
5295 }
5296 }
5297
5298 /// Returns the size of the send quantum over the given 4-tuple, in bytes.
5299 ///
5300 /// This represents the maximum size of a packet burst as determined by the
5301 /// congestion control algorithm in use.
5302 ///
5303 /// Applications can, for example, use it in conjunction with segmentation
5304 /// offloading mechanisms as the maximum limit for outgoing aggregates of
5305 /// multiple packets.
5306 ///
5307 /// If the (`local_addr`, peer_addr`) 4-tuple relates to a non-existing
5308 /// path, this method returns 0.
5309 pub fn send_quantum_on_path(
5310 &self, local_addr: SocketAddr, peer_addr: SocketAddr,
5311 ) -> usize {
5312 self.paths
5313 .path_id_from_addrs(&(local_addr, peer_addr))
5314 .and_then(|pid| self.paths.get(pid).ok())
5315 .map(|path| path.recovery.send_quantum())
5316 .unwrap_or(0)
5317 }
5318
5319 /// Reads contiguous data from a stream into the provided slice.
5320 ///
5321 /// The slice must be sized by the caller and will be populated up to its
5322 /// capacity.
5323 ///
5324 /// On success the amount of bytes read and a flag indicating the fin state
5325 /// is returned as a tuple, or [`Done`] if there is no data to read.
5326 ///
5327 /// Reading data from a stream may trigger queueing of control messages
5328 /// (e.g. MAX_STREAM_DATA). [`send()`] should be called afterwards.
5329 ///
5330 /// [`Done`]: enum.Error.html#variant.Done
5331 /// [`send()`]: struct.Connection.html#method.send
5332 ///
5333 /// ## Examples:
5334 ///
5335 /// ```no_run
5336 /// # let mut buf = [0; 512];
5337 /// # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
5338 /// # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
5339 /// # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
5340 /// # let peer = "127.0.0.1:1234".parse().unwrap();
5341 /// # let local = socket.local_addr().unwrap();
5342 /// # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
5343 /// # let stream_id = 0;
5344 /// while let Ok((read, fin)) = conn.stream_recv(stream_id, &mut buf) {
5345 /// println!("Got {} bytes on stream {}", read, stream_id);
5346 /// }
5347 /// # Ok::<(), quiche::Error>(())
5348 /// ```
5349 pub fn stream_recv(
5350 &mut self, stream_id: u64, out: &mut [u8],
5351 ) -> Result<(usize, bool)> {
5352 self.do_stream_recv(stream_id, RecvAction::Emit { out })
5353 }
5354
5355 /// Discard contiguous data from a stream without copying.
5356 ///
5357 /// On success the amount of bytes discarded and a flag indicating the fin
5358 /// state is returned as a tuple, or [`Done`] if there is no data to
5359 /// discard.
5360 ///
5361 /// Discarding data from a stream may trigger queueing of control messages
5362 /// (e.g. MAX_STREAM_DATA). [`send()`] should be called afterwards.
5363 ///
5364 /// [`Done`]: enum.Error.html#variant.Done
5365 /// [`send()`]: struct.Connection.html#method.send
5366 ///
5367 /// ## Examples:
5368 ///
5369 /// ```no_run
5370 /// # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
5371 /// # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
5372 /// # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
5373 /// # let peer = "127.0.0.1:1234".parse().unwrap();
5374 /// # let local = socket.local_addr().unwrap();
5375 /// # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
5376 /// # let stream_id = 0;
5377 /// while let Ok((read, fin)) = conn.stream_discard(stream_id, 1) {
5378 /// println!("Discarded {} byte(s) on stream {}", read, stream_id);
5379 /// }
5380 /// # Ok::<(), quiche::Error>(())
5381 /// ```
5382 pub fn stream_discard(
5383 &mut self, stream_id: u64, len: usize,
5384 ) -> Result<(usize, bool)> {
5385 self.do_stream_recv(stream_id, RecvAction::Discard { len })
5386 }
5387
5388 // Reads or discards contiguous data from a stream.
5389 //
5390 // Passing an `action` of `StreamRecvAction::Emit` results in a read into
5391 // the provided slice. It must be sized by the caller and will be populated
5392 // up to its capacity.
5393 //
5394 // Passing an `action` of `StreamRecvAction::Discard` results in discard up
5395 // to the indicated length.
5396 //
5397 // On success the amount of bytes read or discarded, and a flag indicating
5398 // the fin state, is returned as a tuple, or [`Done`] if there is no data to
5399 // read or discard.
5400 //
5401 // Reading or discarding data from a stream may trigger queueing of control
5402 // messages (e.g. MAX_STREAM_DATA). [`send()`] should be called afterwards.
5403 //
5404 // [`Done`]: enum.Error.html#variant.Done
5405 // [`send()`]: struct.Connection.html#method.send
5406 fn do_stream_recv(
5407 &mut self, stream_id: u64, action: RecvAction,
5408 ) -> Result<(usize, bool)> {
5409 // We can't read on our own unidirectional streams.
5410 if !stream::is_bidi(stream_id) &&
5411 stream::is_local(stream_id, self.is_server)
5412 {
5413 return Err(Error::InvalidStreamState(stream_id));
5414 }
5415
5416 let stream = self
5417 .streams
5418 .get_mut(stream_id)
5419 .ok_or(Error::InvalidStreamState(stream_id))?;
5420
5421 if !stream.is_readable() {
5422 return Err(Error::Done);
5423 }
5424
5425 let local = stream.local;
5426 let priority_key = Arc::clone(&stream.priority_key);
5427
5428 #[cfg(feature = "qlog")]
5429 let offset = stream.recv.off_front();
5430
5431 #[cfg(feature = "qlog")]
5432 let to = match action {
5433 RecvAction::Emit { .. } => Some(DataRecipient::Application),
5434
5435 RecvAction::Discard { .. } => Some(DataRecipient::Dropped),
5436 };
5437
5438 let (read, fin) = match stream.recv.emit_or_discard(action) {
5439 Ok(v) => v,
5440
5441 Err(e) => {
5442 // Collect the stream if it is now complete. This can happen if
5443 // we got a `StreamReset` error which will now be propagated to
5444 // the application, so we don't need to keep the stream's state
5445 // anymore.
5446 if stream.is_complete() {
5447 self.streams.collect(stream_id, local);
5448 }
5449
5450 self.streams.remove_readable(&priority_key);
5451 return Err(e);
5452 },
5453 };
5454
5455 self.flow_control.add_consumed(read as u64);
5456
5457 let readable = stream.is_readable();
5458
5459 let complete = stream.is_complete();
5460
5461 if stream.recv.almost_full() {
5462 self.streams.insert_almost_full(stream_id);
5463 }
5464
5465 if !readable {
5466 self.streams.remove_readable(&priority_key);
5467 }
5468
5469 if complete {
5470 self.streams.collect(stream_id, local);
5471 }
5472
5473 qlog_with_type!(QLOG_DATA_MV, self.qlog, q, {
5474 let ev_data = EventData::DataMoved(qlog::events::quic::DataMoved {
5475 stream_id: Some(stream_id),
5476 offset: Some(offset),
5477 length: Some(read as u64),
5478 from: Some(DataRecipient::Transport),
5479 to,
5480 ..Default::default()
5481 });
5482
5483 let now = Instant::now();
5484 q.add_event_data_with_instant(ev_data, now).ok();
5485 });
5486
5487 if priority_key.incremental && readable {
5488 // Shuffle the incremental stream to the back of the queue.
5489 self.streams.remove_readable(&priority_key);
5490 self.streams.insert_readable(&priority_key);
5491 }
5492
5493 Ok((read, fin))
5494 }
5495
5496 /// Writes data to a stream.
5497 ///
5498 /// On success the number of bytes written is returned, or [`Done`] if no
5499 /// data was written (e.g. because the stream has no capacity).
5500 ///
5501 /// Applications can provide a 0-length buffer with the fin flag set to
5502 /// true. This will lead to a 0-length FIN STREAM frame being sent at the
5503 /// latest offset. The `Ok(0)` value is only returned when the application
5504 /// provided a 0-length buffer.
5505 ///
5506 /// In addition, if the peer has signalled that it doesn't want to receive
5507 /// any more data from this stream by sending the `STOP_SENDING` frame, the
5508 /// [`StreamStopped`] error will be returned instead of any data.
5509 ///
5510 /// Note that in order to avoid buffering an infinite amount of data in the
5511 /// stream's send buffer, streams are only allowed to buffer outgoing data
5512 /// up to the amount that the peer allows it to send (that is, up to the
5513 /// stream's outgoing flow control capacity).
5514 ///
5515 /// This means that the number of written bytes returned can be lower than
5516 /// the length of the input buffer when the stream doesn't have enough
5517 /// capacity for the operation to complete. The application should retry the
5518 /// operation once the stream is reported as writable again.
5519 ///
5520 /// Applications should call this method only after the handshake is
5521 /// completed (whenever [`is_established()`] returns `true`) or during
5522 /// early data if enabled (whenever [`is_in_early_data()`] returns `true`).
5523 ///
5524 /// [`Done`]: enum.Error.html#variant.Done
5525 /// [`StreamStopped`]: enum.Error.html#variant.StreamStopped
5526 /// [`is_established()`]: struct.Connection.html#method.is_established
5527 /// [`is_in_early_data()`]: struct.Connection.html#method.is_in_early_data
5528 ///
5529 /// ## Examples:
5530 ///
5531 /// ```no_run
5532 /// # let mut buf = [0; 512];
5533 /// # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
5534 /// # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
5535 /// # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
5536 /// # let peer = "127.0.0.1:1234".parse().unwrap();
5537 /// # let local = "127.0.0.1:4321".parse().unwrap();
5538 /// # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
5539 /// # let stream_id = 0;
5540 /// conn.stream_send(stream_id, b"hello", true)?;
5541 /// # Ok::<(), quiche::Error>(())
5542 /// ```
5543 pub fn stream_send(
5544 &mut self, stream_id: u64, buf: &[u8], fin: bool,
5545 ) -> Result<usize> {
5546 self.stream_do_send(
5547 stream_id,
5548 buf,
5549 fin,
5550 |stream: &mut stream::Stream<F>,
5551 buf: &[u8],
5552 cap: usize,
5553 fin: bool| {
5554 stream.send.write(&buf[..cap], fin).map(|v| (v, v))
5555 },
5556 )
5557 }
5558
5559 /// Writes data to a stream with zero copying, instead, it appends the
5560 /// provided buffer directly to the send queue if the capacity allows
5561 /// it.
5562 ///
5563 /// When a partial write happens (including when [`Error::Done`] is
5564 /// returned) the remaining (unwritten) buffer will also be returned.
5565 /// The application should retry the operation once the stream is
5566 /// reported as writable again.
5567 pub fn stream_send_zc(
5568 &mut self, stream_id: u64, buf: F::Buf, len: Option<usize>, fin: bool,
5569 ) -> Result<(usize, Option<F::Buf>)>
5570 where
5571 F::Buf: BufSplit,
5572 {
5573 self.stream_do_send(
5574 stream_id,
5575 buf,
5576 fin,
5577 |stream: &mut stream::Stream<F>,
5578 buf: F::Buf,
5579 cap: usize,
5580 fin: bool| {
5581 let len = len.unwrap_or(usize::MAX).min(cap);
5582 let (sent, remaining) = stream.send.append_buf(buf, len, fin)?;
5583 Ok((sent, (sent, remaining)))
5584 },
5585 )
5586 }
5587
5588 fn stream_do_send<B, R, SND>(
5589 &mut self, stream_id: u64, buf: B, fin: bool, write_fn: SND,
5590 ) -> Result<R>
5591 where
5592 B: AsRef<[u8]>,
5593 SND: FnOnce(&mut stream::Stream<F>, B, usize, bool) -> Result<(usize, R)>,
5594 {
5595 // We can't write on the peer's unidirectional streams.
5596 if !stream::is_bidi(stream_id) &&
5597 !stream::is_local(stream_id, self.is_server)
5598 {
5599 return Err(Error::InvalidStreamState(stream_id));
5600 }
5601
5602 let len = buf.as_ref().len();
5603
5604 // Mark the connection as blocked if the connection-level flow control
5605 // limit doesn't let us buffer all the data.
5606 //
5607 // Note that this is separate from "send capacity" as that also takes
5608 // congestion control into consideration.
5609 if self.max_tx_data - self.tx_data < len as u64 {
5610 self.blocked_limit = Some(self.max_tx_data);
5611 }
5612
5613 let cap = self.tx_cap;
5614
5615 // Get existing stream or create a new one.
5616 let stream = self.get_or_create_stream(stream_id, true)?;
5617
5618 #[cfg(feature = "qlog")]
5619 let offset = stream.send.off_back();
5620
5621 let was_writable = stream.is_writable();
5622
5623 let was_flushable = stream.is_flushable();
5624
5625 let is_complete = stream.is_complete();
5626 let is_readable = stream.is_readable();
5627
5628 let priority_key = Arc::clone(&stream.priority_key);
5629
5630 // Return early if the stream has been stopped, and collect its state
5631 // if complete.
5632 if let Err(Error::StreamStopped(e)) = stream.send.cap() {
5633 // Only collect the stream if it is complete and not readable.
5634 // If it is readable, it will get collected when stream_recv()
5635 // is used.
5636 //
5637 // The stream can't be writable if it has been stopped.
5638 if is_complete && !is_readable {
5639 let local = stream.local;
5640 self.streams.collect(stream_id, local);
5641 }
5642
5643 return Err(Error::StreamStopped(e));
5644 };
5645
5646 // Truncate the input buffer based on the connection's send capacity if
5647 // necessary.
5648 //
5649 // When the cap is zero, the method returns Ok(0) *only* when the passed
5650 // buffer is empty. We return Error::Done otherwise.
5651 if cap == 0 && len > 0 {
5652 if was_writable {
5653 // When `stream_writable_next()` returns a stream, the writable
5654 // mark is removed, but because the stream is blocked by the
5655 // connection-level send capacity it won't be marked as writable
5656 // again once the capacity increases.
5657 //
5658 // Since the stream is writable already, mark it here instead.
5659 self.streams.insert_writable(&priority_key);
5660 }
5661
5662 return Err(Error::Done);
5663 }
5664
5665 let (cap, fin, blocked_by_cap) = if cap < len {
5666 (cap, false, true)
5667 } else {
5668 (len, fin, false)
5669 };
5670
5671 let (sent, ret) = match write_fn(stream, buf, cap, fin) {
5672 Ok(v) => v,
5673
5674 Err(e) => {
5675 self.streams.remove_writable(&priority_key);
5676 return Err(e);
5677 },
5678 };
5679
5680 let incremental = stream.incremental;
5681 let priority_key = Arc::clone(&stream.priority_key);
5682
5683 let flushable = stream.is_flushable();
5684
5685 let writable = stream.is_writable();
5686
5687 let empty_fin = len == 0 && fin;
5688
5689 if sent < cap {
5690 let max_off = stream.send.max_off();
5691
5692 if stream.send.blocked_at() != Some(max_off) {
5693 stream.send.update_blocked_at(Some(max_off));
5694 self.streams.insert_blocked(stream_id, max_off);
5695 }
5696 } else {
5697 stream.send.update_blocked_at(None);
5698 self.streams.remove_blocked(stream_id);
5699 }
5700
5701 // If the stream is now flushable push it to the flushable queue, but
5702 // only if it wasn't already queued.
5703 //
5704 // Consider the stream flushable also when we are sending a zero-length
5705 // frame that has the fin flag set.
5706 if (flushable || empty_fin) && !was_flushable {
5707 self.streams.insert_flushable(&priority_key);
5708 }
5709
5710 if !writable {
5711 self.streams.remove_writable(&priority_key);
5712 } else if was_writable && blocked_by_cap {
5713 // When `stream_writable_next()` returns a stream, the writable
5714 // mark is removed, but because the stream is blocked by the
5715 // connection-level send capacity it won't be marked as writable
5716 // again once the capacity increases.
5717 //
5718 // Since the stream is writable already, mark it here instead.
5719 self.streams.insert_writable(&priority_key);
5720 }
5721
5722 self.tx_cap -= sent;
5723
5724 self.tx_data += sent as u64;
5725
5726 self.tx_buffered += sent;
5727 self.check_tx_buffered_invariant();
5728
5729 qlog_with_type!(QLOG_DATA_MV, self.qlog, q, {
5730 let ev_data = EventData::DataMoved(qlog::events::quic::DataMoved {
5731 stream_id: Some(stream_id),
5732 offset: Some(offset),
5733 length: Some(sent as u64),
5734 from: Some(DataRecipient::Application),
5735 to: Some(DataRecipient::Transport),
5736 ..Default::default()
5737 });
5738
5739 let now = Instant::now();
5740 q.add_event_data_with_instant(ev_data, now).ok();
5741 });
5742
5743 if sent == 0 && cap > 0 {
5744 return Err(Error::Done);
5745 }
5746
5747 if incremental && writable {
5748 // Shuffle the incremental stream to the back of the queue.
5749 self.streams.remove_writable(&priority_key);
5750 self.streams.insert_writable(&priority_key);
5751 }
5752
5753 Ok(ret)
5754 }
5755
5756 /// Sets the priority for a stream.
5757 ///
5758 /// A stream's priority determines the order in which stream data is sent
5759 /// on the wire (streams with lower priority are sent first). Streams are
5760 /// created with a default priority of `127`.
5761 ///
5762 /// The target stream is created if it did not exist before calling this
5763 /// method.
5764 pub fn stream_priority(
5765 &mut self, stream_id: u64, urgency: u8, incremental: bool,
5766 ) -> Result<()> {
5767 // Get existing stream or create a new one, but if the stream
5768 // has already been closed and collected, ignore the prioritization.
5769 let stream = match self.get_or_create_stream(stream_id, true) {
5770 Ok(v) => v,
5771
5772 Err(Error::Done) => return Ok(()),
5773
5774 Err(e) => return Err(e),
5775 };
5776
5777 if stream.urgency == urgency && stream.incremental == incremental {
5778 return Ok(());
5779 }
5780
5781 stream.urgency = urgency;
5782 stream.incremental = incremental;
5783
5784 let new_priority_key = Arc::new(StreamPriorityKey {
5785 urgency: stream.urgency,
5786 incremental: stream.incremental,
5787 id: stream_id,
5788 ..Default::default()
5789 });
5790
5791 let old_priority_key =
5792 std::mem::replace(&mut stream.priority_key, new_priority_key.clone());
5793
5794 self.streams
5795 .update_priority(&old_priority_key, &new_priority_key);
5796
5797 Ok(())
5798 }
5799
5800 /// Shuts down reading or writing from/to the specified stream.
5801 ///
5802 /// When the `direction` argument is set to [`Shutdown::Read`], outstanding
5803 /// data in the stream's receive buffer is dropped, and no additional data
5804 /// is added to it. Data received after calling this method is still
5805 /// validated and acked but not stored, and [`stream_recv()`] will not
5806 /// return it to the application. In addition, a `STOP_SENDING` frame will
5807 /// be sent to the peer to signal it to stop sending data.
5808 ///
5809 /// When the `direction` argument is set to [`Shutdown::Write`], outstanding
5810 /// data in the stream's send buffer is dropped, and no additional data is
5811 /// added to it. Data passed to [`stream_send()`] after calling this method
5812 /// will be ignored. In addition, a `RESET_STREAM` frame will be sent to the
5813 /// peer to signal the reset.
5814 ///
5815 /// Locally-initiated unidirectional streams can only be closed in the
5816 /// [`Shutdown::Write`] direction. Remotely-initiated unidirectional streams
5817 /// can only be closed in the [`Shutdown::Read`] direction. Using an
5818 /// incorrect direction will return [`InvalidStreamState`].
5819 ///
5820 /// [`Shutdown::Read`]: enum.Shutdown.html#variant.Read
5821 /// [`Shutdown::Write`]: enum.Shutdown.html#variant.Write
5822 /// [`stream_recv()`]: struct.Connection.html#method.stream_recv
5823 /// [`stream_send()`]: struct.Connection.html#method.stream_send
5824 /// [`InvalidStreamState`]: enum.Error.html#variant.InvalidStreamState
5825 pub fn stream_shutdown(
5826 &mut self, stream_id: u64, direction: Shutdown, err: u64,
5827 ) -> Result<()> {
5828 // Don't try to stop a local unidirectional stream.
5829 if direction == Shutdown::Read &&
5830 stream::is_local(stream_id, self.is_server) &&
5831 !stream::is_bidi(stream_id)
5832 {
5833 return Err(Error::InvalidStreamState(stream_id));
5834 }
5835
5836 // Don't try to reset a remote unidirectional stream.
5837 if direction == Shutdown::Write &&
5838 !stream::is_local(stream_id, self.is_server) &&
5839 !stream::is_bidi(stream_id)
5840 {
5841 return Err(Error::InvalidStreamState(stream_id));
5842 }
5843
5844 // Get existing stream.
5845 let stream = self.streams.get_mut(stream_id).ok_or(Error::Done)?;
5846
5847 let priority_key = Arc::clone(&stream.priority_key);
5848
5849 match direction {
5850 Shutdown::Read => {
5851 let consumed = stream.recv.shutdown()?;
5852 self.flow_control.add_consumed(consumed);
5853
5854 if !stream.recv.is_fin() {
5855 self.streams.insert_stopped(stream_id, err);
5856 }
5857
5858 // Once shutdown, the stream is guaranteed to be non-readable.
5859 self.streams.remove_readable(&priority_key);
5860
5861 self.stopped_stream_local_count =
5862 self.stopped_stream_local_count.saturating_add(1);
5863 },
5864
5865 Shutdown::Write => {
5866 let (final_size, unsent) = stream.send.shutdown()?;
5867
5868 // Claw back some flow control allowance from data that was
5869 // buffered but not actually sent before the stream was reset.
5870 self.tx_data = self.tx_data.saturating_sub(unsent);
5871
5872 self.tx_buffered =
5873 self.tx_buffered.saturating_sub(unsent as usize);
5874
5875 // These drops in qlog are a bit weird, but the only way to ensure
5876 // that all bytes that are moved from App to Transport in
5877 // stream_do_send are eventually moved from Transport to Dropped.
5878 // Ideally we would add a Transport to Network transition also as
5879 // a way to indicate when bytes were transmitted vs dropped
5880 // without ever being sent.
5881 qlog_with_type!(QLOG_DATA_MV, self.qlog, q, {
5882 let ev_data =
5883 EventData::DataMoved(qlog::events::quic::DataMoved {
5884 stream_id: Some(stream_id),
5885 offset: Some(final_size),
5886 length: Some(unsent),
5887 from: Some(DataRecipient::Transport),
5888 to: Some(DataRecipient::Dropped),
5889 ..Default::default()
5890 });
5891
5892 q.add_event_data_with_instant(ev_data, Instant::now()).ok();
5893 });
5894
5895 // Update send capacity.
5896 self.update_tx_cap();
5897
5898 self.streams.insert_reset(stream_id, err, final_size);
5899
5900 // Once shutdown, the stream is guaranteed to be non-writable.
5901 self.streams.remove_writable(&priority_key);
5902
5903 self.reset_stream_local_count =
5904 self.reset_stream_local_count.saturating_add(1);
5905 },
5906 }
5907
5908 Ok(())
5909 }
5910
5911 /// Returns the stream's send capacity in bytes.
5912 ///
5913 /// If the specified stream doesn't exist (including when it has already
5914 /// been completed and closed), the [`InvalidStreamState`] error will be
5915 /// returned.
5916 ///
5917 /// In addition, if the peer has signalled that it doesn't want to receive
5918 /// any more data from this stream by sending the `STOP_SENDING` frame, the
5919 /// [`StreamStopped`] error will be returned.
5920 ///
5921 /// [`InvalidStreamState`]: enum.Error.html#variant.InvalidStreamState
5922 /// [`StreamStopped`]: enum.Error.html#variant.StreamStopped
5923 #[inline]
5924 pub fn stream_capacity(&mut self, stream_id: u64) -> Result<usize> {
5925 if let Some(stream) = self.streams.get(stream_id) {
5926 let stream_cap = match stream.send.cap() {
5927 Ok(v) => v,
5928
5929 Err(Error::StreamStopped(e)) => {
5930 // Only collect the stream if it is complete and not
5931 // readable. If it is readable, it will get collected when
5932 // stream_recv() is used.
5933 if stream.is_complete() && !stream.is_readable() {
5934 let local = stream.local;
5935 self.streams.collect(stream_id, local);
5936 }
5937
5938 return Err(Error::StreamStopped(e));
5939 },
5940
5941 Err(e) => return Err(e),
5942 };
5943
5944 let cap = cmp::min(self.tx_cap, stream_cap);
5945 return Ok(cap);
5946 };
5947
5948 Err(Error::InvalidStreamState(stream_id))
5949 }
5950
5951 /// Returns the next stream that has data to read.
5952 ///
5953 /// Note that once returned by this method, a stream ID will not be returned
5954 /// again until it is "re-armed".
5955 ///
5956 /// The application will need to read all of the pending data on the stream,
5957 /// and new data has to be received before the stream is reported again.
5958 ///
5959 /// This is unlike the [`readable()`] method, that returns the same list of
5960 /// readable streams when called multiple times in succession.
5961 ///
5962 /// [`readable()`]: struct.Connection.html#method.readable
5963 pub fn stream_readable_next(&mut self) -> Option<u64> {
5964 let priority_key = self.streams.readable.front().clone_pointer()?;
5965
5966 self.streams.remove_readable(&priority_key);
5967
5968 Some(priority_key.id)
5969 }
5970
5971 /// Returns true if the stream has data that can be read.
5972 pub fn stream_readable(&self, stream_id: u64) -> bool {
5973 let stream = match self.streams.get(stream_id) {
5974 Some(v) => v,
5975
5976 None => return false,
5977 };
5978
5979 stream.is_readable()
5980 }
5981
5982 /// Returns the next stream that can be written to.
5983 ///
5984 /// Note that once returned by this method, a stream ID will not be returned
5985 /// again until it is "re-armed".
5986 ///
5987 /// This is unlike the [`writable()`] method, that returns the same list of
5988 /// writable streams when called multiple times in succession. It is not
5989 /// advised to use both `stream_writable_next()` and [`writable()`] on the
5990 /// same connection, as it may lead to unexpected results.
5991 ///
5992 /// The [`stream_writable()`] method can also be used to fine-tune when a
5993 /// stream is reported as writable again.
5994 ///
5995 /// [`stream_writable()`]: struct.Connection.html#method.stream_writable
5996 /// [`writable()`]: struct.Connection.html#method.writable
5997 pub fn stream_writable_next(&mut self) -> Option<u64> {
5998 // If there is not enough connection-level send capacity, none of the
5999 // streams are writable.
6000 if self.tx_cap == 0 {
6001 return None;
6002 }
6003
6004 let mut cursor = self.streams.writable.front();
6005
6006 while let Some(priority_key) = cursor.clone_pointer() {
6007 if let Some(stream) = self.streams.get(priority_key.id) {
6008 let cap = match stream.send.cap() {
6009 Ok(v) => v,
6010
6011 // Return the stream to the application immediately if it's
6012 // stopped.
6013 Err(_) =>
6014 return {
6015 self.streams.remove_writable(&priority_key);
6016
6017 Some(priority_key.id)
6018 },
6019 };
6020
6021 if cmp::min(self.tx_cap, cap) >= stream.send_lowat {
6022 self.streams.remove_writable(&priority_key);
6023 return Some(priority_key.id);
6024 }
6025 }
6026
6027 cursor.move_next();
6028 }
6029
6030 None
6031 }
6032
6033 /// Returns true if the stream has enough send capacity.
6034 ///
6035 /// When `len` more bytes can be buffered into the given stream's send
6036 /// buffer, `true` will be returned, `false` otherwise.
6037 ///
6038 /// In the latter case, if the additional data can't be buffered due to
6039 /// flow control limits, the peer will also be notified, and a "low send
6040 /// watermark" will be set for the stream, such that it is not going to be
6041 /// reported as writable again by [`stream_writable_next()`] until its send
6042 /// capacity reaches `len`.
6043 ///
6044 /// If the specified stream doesn't exist (including when it has already
6045 /// been completed and closed), the [`InvalidStreamState`] error will be
6046 /// returned.
6047 ///
6048 /// In addition, if the peer has signalled that it doesn't want to receive
6049 /// any more data from this stream by sending the `STOP_SENDING` frame, the
6050 /// [`StreamStopped`] error will be returned.
6051 ///
6052 /// [`stream_writable_next()`]: struct.Connection.html#method.stream_writable_next
6053 /// [`InvalidStreamState`]: enum.Error.html#variant.InvalidStreamState
6054 /// [`StreamStopped`]: enum.Error.html#variant.StreamStopped
6055 #[inline]
6056 pub fn stream_writable(
6057 &mut self, stream_id: u64, len: usize,
6058 ) -> Result<bool> {
6059 if self.stream_capacity(stream_id)? >= len {
6060 return Ok(true);
6061 }
6062
6063 let stream = match self.streams.get_mut(stream_id) {
6064 Some(v) => v,
6065
6066 None => return Err(Error::InvalidStreamState(stream_id)),
6067 };
6068
6069 stream.send_lowat = cmp::max(1, len);
6070
6071 let is_writable = stream.is_writable();
6072
6073 let priority_key = Arc::clone(&stream.priority_key);
6074
6075 if self.max_tx_data - self.tx_data < len as u64 {
6076 self.blocked_limit = Some(self.max_tx_data);
6077 }
6078
6079 if stream.send.cap()? < len {
6080 let max_off = stream.send.max_off();
6081 if stream.send.blocked_at() != Some(max_off) {
6082 stream.send.update_blocked_at(Some(max_off));
6083 self.streams.insert_blocked(stream_id, max_off);
6084 }
6085 } else if is_writable {
6086 // When `stream_writable_next()` returns a stream, the writable
6087 // mark is removed, but because the stream is blocked by the
6088 // connection-level send capacity it won't be marked as writable
6089 // again once the capacity increases.
6090 //
6091 // Since the stream is writable already, mark it here instead.
6092 self.streams.insert_writable(&priority_key);
6093 }
6094
6095 Ok(false)
6096 }
6097
6098 /// Returns true if all the data has been read from the specified stream.
6099 ///
6100 /// This instructs the application that all the data received from the
6101 /// peer on the stream has been read, and there won't be anymore in the
6102 /// future.
6103 ///
6104 /// Basically this returns true when the peer either set the `fin` flag
6105 /// for the stream, or sent `RESET_STREAM`.
6106 #[inline]
6107 pub fn stream_finished(&self, stream_id: u64) -> bool {
6108 let stream = match self.streams.get(stream_id) {
6109 Some(v) => v,
6110
6111 None => return true,
6112 };
6113
6114 stream.recv.is_fin()
6115 }
6116
6117 /// Returns the number of bidirectional streams that can be created
6118 /// before the peer's stream count limit is reached.
6119 ///
6120 /// This can be useful to know if it's possible to create a bidirectional
6121 /// stream without trying it first.
6122 #[inline]
6123 pub fn peer_streams_left_bidi(&self) -> u64 {
6124 self.streams.peer_streams_left_bidi()
6125 }
6126
6127 /// Returns the number of unidirectional streams that can be created
6128 /// before the peer's stream count limit is reached.
6129 ///
6130 /// This can be useful to know if it's possible to create a unidirectional
6131 /// stream without trying it first.
6132 #[inline]
6133 pub fn peer_streams_left_uni(&self) -> u64 {
6134 self.streams.peer_streams_left_uni()
6135 }
6136
6137 /// Returns an iterator over streams that have outstanding data to read.
6138 ///
6139 /// Note that the iterator will only include streams that were readable at
6140 /// the time the iterator itself was created (i.e. when `readable()` was
6141 /// called). To account for newly readable streams, the iterator needs to
6142 /// be created again.
6143 ///
6144 /// ## Examples:
6145 ///
6146 /// ```no_run
6147 /// # let mut buf = [0; 512];
6148 /// # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
6149 /// # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
6150 /// # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
6151 /// # let peer = "127.0.0.1:1234".parse().unwrap();
6152 /// # let local = socket.local_addr().unwrap();
6153 /// # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
6154 /// // Iterate over readable streams.
6155 /// for stream_id in conn.readable() {
6156 /// // Stream is readable, read until there's no more data.
6157 /// while let Ok((read, fin)) = conn.stream_recv(stream_id, &mut buf) {
6158 /// println!("Got {} bytes on stream {}", read, stream_id);
6159 /// }
6160 /// }
6161 /// # Ok::<(), quiche::Error>(())
6162 /// ```
6163 #[inline]
6164 pub fn readable(&self) -> StreamIter {
6165 self.streams.readable()
6166 }
6167
6168 /// Returns an iterator over streams that can be written in priority order.
6169 ///
6170 /// The priority order is based on RFC 9218 scheduling recommendations.
6171 /// Stream priority can be controlled using [`stream_priority()`]. In order
6172 /// to support fairness requirements, each time this method is called,
6173 /// internal state is updated. Therefore the iterator ordering can change
6174 /// between calls, even if no streams were added or removed.
6175 ///
6176 /// A "writable" stream is a stream that has enough flow control capacity to
6177 /// send data to the peer. To avoid buffering an infinite amount of data,
6178 /// streams are only allowed to buffer outgoing data up to the amount that
6179 /// the peer allows to send.
6180 ///
6181 /// Note that the iterator will only include streams that were writable at
6182 /// the time the iterator itself was created (i.e. when `writable()` was
6183 /// called). To account for newly writable streams, the iterator needs to be
6184 /// created again.
6185 ///
6186 /// ## Examples:
6187 ///
6188 /// ```no_run
6189 /// # let mut buf = [0; 512];
6190 /// # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
6191 /// # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
6192 /// # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
6193 /// # let local = socket.local_addr().unwrap();
6194 /// # let peer = "127.0.0.1:1234".parse().unwrap();
6195 /// # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
6196 /// // Iterate over writable streams.
6197 /// for stream_id in conn.writable() {
6198 /// // Stream is writable, write some data.
6199 /// if let Ok(written) = conn.stream_send(stream_id, &buf, false) {
6200 /// println!("Written {} bytes on stream {}", written, stream_id);
6201 /// }
6202 /// }
6203 /// # Ok::<(), quiche::Error>(())
6204 /// ```
6205 /// [`stream_priority()`]: struct.Connection.html#method.stream_priority
6206 #[inline]
6207 pub fn writable(&self) -> StreamIter {
6208 // If there is not enough connection-level send capacity, none of the
6209 // streams are writable, so return an empty iterator.
6210 if self.tx_cap == 0 {
6211 return StreamIter::default();
6212 }
6213
6214 self.streams.writable()
6215 }
6216
6217 /// Returns the maximum possible size of egress UDP payloads.
6218 ///
6219 /// This is the maximum size of UDP payloads that can be sent, and depends
6220 /// on both the configured maximum send payload size of the local endpoint
6221 /// (as configured with [`set_max_send_udp_payload_size()`]), as well as
6222 /// the transport parameter advertised by the remote peer.
6223 ///
6224 /// Note that this value can change during the lifetime of the connection,
6225 /// but should remain stable across consecutive calls to [`send()`].
6226 ///
6227 /// [`set_max_send_udp_payload_size()`]:
6228 /// struct.Config.html#method.set_max_send_udp_payload_size
6229 /// [`send()`]: struct.Connection.html#method.send
6230 pub fn max_send_udp_payload_size(&self) -> usize {
6231 let max_datagram_size = self
6232 .paths
6233 .get_active()
6234 .ok()
6235 .map(|p| p.recovery.max_datagram_size());
6236
6237 if let Some(max_datagram_size) = max_datagram_size {
6238 if self.is_established() {
6239 // We cap the maximum packet size to 16KB or so, so that it can be
6240 // always encoded with a 2-byte varint.
6241 return cmp::min(16383, max_datagram_size);
6242 }
6243 }
6244
6245 // Allow for 1200 bytes (minimum QUIC packet size) during the
6246 // handshake.
6247 MIN_CLIENT_INITIAL_LEN
6248 }
6249
6250 /// Schedule an ack-eliciting packet on the active path.
6251 ///
6252 /// QUIC packets might not contain ack-eliciting frames during normal
6253 /// operating conditions. If the packet would already contain
6254 /// ack-eliciting frames, this method does not change any behavior.
6255 /// However, if the packet would not ordinarily contain ack-eliciting
6256 /// frames, this method ensures that a PING frame sent.
6257 ///
6258 /// Calling this method multiple times before [`send()`] has no effect.
6259 ///
6260 /// [`send()`]: struct.Connection.html#method.send
6261 pub fn send_ack_eliciting(&mut self) -> Result<()> {
6262 if self.is_closed() || self.is_draining() {
6263 return Ok(());
6264 }
6265 self.paths.get_active_mut()?.needs_ack_eliciting = true;
6266 Ok(())
6267 }
6268
6269 /// Schedule an ack-eliciting packet on the specified path.
6270 ///
6271 /// See [`send_ack_eliciting()`] for more detail. [`InvalidState`] is
6272 /// returned if there is no record of the path.
6273 ///
6274 /// [`send_ack_eliciting()`]: struct.Connection.html#method.send_ack_eliciting
6275 /// [`InvalidState`]: enum.Error.html#variant.InvalidState
6276 pub fn send_ack_eliciting_on_path(
6277 &mut self, local: SocketAddr, peer: SocketAddr,
6278 ) -> Result<()> {
6279 if self.is_closed() || self.is_draining() {
6280 return Ok(());
6281 }
6282 let path_id = self
6283 .paths
6284 .path_id_from_addrs(&(local, peer))
6285 .ok_or(Error::InvalidState)?;
6286 self.paths.get_mut(path_id)?.needs_ack_eliciting = true;
6287 Ok(())
6288 }
6289
6290 /// Reads the first received DATAGRAM.
6291 ///
6292 /// On success the DATAGRAM's data is returned along with its size.
6293 ///
6294 /// [`Done`] is returned if there is no data to read.
6295 ///
6296 /// [`BufferTooShort`] is returned if the provided buffer is too small for
6297 /// the DATAGRAM.
6298 ///
6299 /// [`Done`]: enum.Error.html#variant.Done
6300 /// [`BufferTooShort`]: enum.Error.html#variant.BufferTooShort
6301 ///
6302 /// ## Examples:
6303 ///
6304 /// ```no_run
6305 /// # let mut buf = [0; 512];
6306 /// # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
6307 /// # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
6308 /// # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
6309 /// # let peer = "127.0.0.1:1234".parse().unwrap();
6310 /// # let local = socket.local_addr().unwrap();
6311 /// # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
6312 /// let mut dgram_buf = [0; 512];
6313 /// while let Ok((len)) = conn.dgram_recv(&mut dgram_buf) {
6314 /// println!("Got {} bytes of DATAGRAM", len);
6315 /// }
6316 /// # Ok::<(), quiche::Error>(())
6317 /// ```
6318 #[inline]
6319 pub fn dgram_recv(&mut self, buf: &mut [u8]) -> Result<usize> {
6320 match self.dgram_recv_queue.pop() {
6321 Some(d) => {
6322 if d.len() > buf.len() {
6323 return Err(Error::BufferTooShort);
6324 }
6325
6326 buf[..d.len()].copy_from_slice(&d);
6327 Ok(d.len())
6328 },
6329
6330 None => Err(Error::Done),
6331 }
6332 }
6333
6334 /// Reads the first received DATAGRAM.
6335 ///
6336 /// This is the same as [`dgram_recv()`] but returns the DATAGRAM as a
6337 /// `Vec<u8>` instead of copying into the provided buffer.
6338 ///
6339 /// [`dgram_recv()`]: struct.Connection.html#method.dgram_recv
6340 #[inline]
6341 pub fn dgram_recv_vec(&mut self) -> Result<Vec<u8>> {
6342 match self.dgram_recv_queue.pop() {
6343 Some(d) => Ok(d),
6344
6345 None => Err(Error::Done),
6346 }
6347 }
6348
6349 /// Reads the first received DATAGRAM without removing it from the queue.
6350 ///
6351 /// On success the DATAGRAM's data is returned along with the actual number
6352 /// of bytes peeked. The requested length cannot exceed the DATAGRAM's
6353 /// actual length.
6354 ///
6355 /// [`Done`] is returned if there is no data to read.
6356 ///
6357 /// [`BufferTooShort`] is returned if the provided buffer is smaller the
6358 /// number of bytes to peek.
6359 ///
6360 /// [`Done`]: enum.Error.html#variant.Done
6361 /// [`BufferTooShort`]: enum.Error.html#variant.BufferTooShort
6362 #[inline]
6363 pub fn dgram_recv_peek(&self, buf: &mut [u8], len: usize) -> Result<usize> {
6364 self.dgram_recv_queue.peek_front_bytes(buf, len)
6365 }
6366
6367 /// Returns the length of the first stored DATAGRAM.
6368 #[inline]
6369 pub fn dgram_recv_front_len(&self) -> Option<usize> {
6370 self.dgram_recv_queue.peek_front_len()
6371 }
6372
6373 /// Returns the number of items in the DATAGRAM receive queue.
6374 #[inline]
6375 pub fn dgram_recv_queue_len(&self) -> usize {
6376 self.dgram_recv_queue.len()
6377 }
6378
6379 /// Returns the total size of all items in the DATAGRAM receive queue.
6380 #[inline]
6381 pub fn dgram_recv_queue_byte_size(&self) -> usize {
6382 self.dgram_recv_queue.byte_size()
6383 }
6384
6385 /// Returns the number of items in the DATAGRAM send queue.
6386 #[inline]
6387 pub fn dgram_send_queue_len(&self) -> usize {
6388 self.dgram_send_queue.len()
6389 }
6390
6391 /// Returns the total size of all items in the DATAGRAM send queue.
6392 #[inline]
6393 pub fn dgram_send_queue_byte_size(&self) -> usize {
6394 self.dgram_send_queue.byte_size()
6395 }
6396
6397 /// Returns whether or not the DATAGRAM send queue is full.
6398 #[inline]
6399 pub fn is_dgram_send_queue_full(&self) -> bool {
6400 self.dgram_send_queue.is_full()
6401 }
6402
6403 /// Returns whether or not the DATAGRAM recv queue is full.
6404 #[inline]
6405 pub fn is_dgram_recv_queue_full(&self) -> bool {
6406 self.dgram_recv_queue.is_full()
6407 }
6408
6409 /// Sends data in a DATAGRAM frame.
6410 ///
6411 /// [`Done`] is returned if no data was written.
6412 /// [`InvalidState`] is returned if the peer does not support DATAGRAM.
6413 /// [`BufferTooShort`] is returned if the DATAGRAM frame length is larger
6414 /// than peer's supported DATAGRAM frame length. Use
6415 /// [`dgram_max_writable_len()`] to get the largest supported DATAGRAM
6416 /// frame length.
6417 ///
6418 /// Note that there is no flow control of DATAGRAM frames, so in order to
6419 /// avoid buffering an infinite amount of frames we apply an internal
6420 /// limit.
6421 ///
6422 /// [`Done`]: enum.Error.html#variant.Done
6423 /// [`InvalidState`]: enum.Error.html#variant.InvalidState
6424 /// [`BufferTooShort`]: enum.Error.html#variant.BufferTooShort
6425 /// [`dgram_max_writable_len()`]:
6426 /// struct.Connection.html#method.dgram_max_writable_len
6427 ///
6428 /// ## Examples:
6429 ///
6430 /// ```no_run
6431 /// # let mut buf = [0; 512];
6432 /// # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
6433 /// # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
6434 /// # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
6435 /// # let peer = "127.0.0.1:1234".parse().unwrap();
6436 /// # let local = socket.local_addr().unwrap();
6437 /// # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
6438 /// conn.dgram_send(b"hello")?;
6439 /// # Ok::<(), quiche::Error>(())
6440 /// ```
6441 pub fn dgram_send(&mut self, buf: &[u8]) -> Result<()> {
6442 let max_payload_len = match self.dgram_max_writable_len() {
6443 Some(v) => v,
6444
6445 None => return Err(Error::InvalidState),
6446 };
6447
6448 if buf.len() > max_payload_len {
6449 return Err(Error::BufferTooShort);
6450 }
6451
6452 self.dgram_send_queue.push(buf.to_vec())?;
6453
6454 let active_path = self.paths.get_active_mut()?;
6455
6456 if self.dgram_send_queue.byte_size() >
6457 active_path.recovery.cwnd_available()
6458 {
6459 active_path.recovery.update_app_limited(false);
6460 }
6461
6462 Ok(())
6463 }
6464
6465 /// Sends data in a DATAGRAM frame.
6466 ///
6467 /// This is the same as [`dgram_send()`] but takes a `Vec<u8>` instead of
6468 /// a slice.
6469 ///
6470 /// [`dgram_send()`]: struct.Connection.html#method.dgram_send
6471 pub fn dgram_send_vec(&mut self, buf: Vec<u8>) -> Result<()> {
6472 let max_payload_len = match self.dgram_max_writable_len() {
6473 Some(v) => v,
6474
6475 None => return Err(Error::InvalidState),
6476 };
6477
6478 if buf.len() > max_payload_len {
6479 return Err(Error::BufferTooShort);
6480 }
6481
6482 self.dgram_send_queue.push(buf)?;
6483
6484 let active_path = self.paths.get_active_mut()?;
6485
6486 if self.dgram_send_queue.byte_size() >
6487 active_path.recovery.cwnd_available()
6488 {
6489 active_path.recovery.update_app_limited(false);
6490 }
6491
6492 Ok(())
6493 }
6494
6495 /// Purges queued outgoing DATAGRAMs matching the predicate.
6496 ///
6497 /// In other words, remove all elements `e` such that `f(&e)` returns true.
6498 ///
6499 /// ## Examples:
6500 /// ```no_run
6501 /// # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
6502 /// # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
6503 /// # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
6504 /// # let peer = "127.0.0.1:1234".parse().unwrap();
6505 /// # let local = socket.local_addr().unwrap();
6506 /// # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
6507 /// conn.dgram_send(b"hello")?;
6508 /// conn.dgram_purge_outgoing(&|d: &[u8]| -> bool { d[0] == 0 });
6509 /// # Ok::<(), quiche::Error>(())
6510 /// ```
6511 #[inline]
6512 pub fn dgram_purge_outgoing<FN: Fn(&[u8]) -> bool>(&mut self, f: FN) {
6513 self.dgram_send_queue.purge(f);
6514 }
6515
6516 /// Returns the maximum DATAGRAM payload that can be sent.
6517 ///
6518 /// [`None`] is returned if the peer hasn't advertised a maximum DATAGRAM
6519 /// frame size.
6520 ///
6521 /// ## Examples:
6522 ///
6523 /// ```no_run
6524 /// # let mut buf = [0; 512];
6525 /// # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
6526 /// # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
6527 /// # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
6528 /// # let peer = "127.0.0.1:1234".parse().unwrap();
6529 /// # let local = socket.local_addr().unwrap();
6530 /// # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
6531 /// if let Some(payload_size) = conn.dgram_max_writable_len() {
6532 /// if payload_size > 5 {
6533 /// conn.dgram_send(b"hello")?;
6534 /// }
6535 /// }
6536 /// # Ok::<(), quiche::Error>(())
6537 /// ```
6538 #[inline]
6539 pub fn dgram_max_writable_len(&self) -> Option<usize> {
6540 match self.peer_transport_params.max_datagram_frame_size {
6541 None => None,
6542 Some(peer_frame_len) => {
6543 let dcid = self.destination_id();
6544 // Start from the maximum packet size...
6545 let mut max_len = self.max_send_udp_payload_size();
6546 // ...subtract the Short packet header overhead...
6547 // (1 byte of pkt_len + len of dcid)
6548 max_len = max_len.saturating_sub(1 + dcid.len());
6549 // ...subtract the packet number (max len)...
6550 max_len = max_len.saturating_sub(packet::MAX_PKT_NUM_LEN);
6551 // ...subtract the crypto overhead...
6552 max_len = max_len.saturating_sub(
6553 self.crypto_ctx[packet::Epoch::Application]
6554 .crypto_overhead()?,
6555 );
6556 // ...clamp to what peer can support...
6557 max_len = cmp::min(peer_frame_len as usize, max_len);
6558 // ...subtract frame overhead, checked for underflow.
6559 // (1 byte of frame type + len of length )
6560 max_len.checked_sub(1 + frame::MAX_DGRAM_OVERHEAD)
6561 },
6562 }
6563 }
6564
6565 fn dgram_enabled(&self) -> bool {
6566 self.local_transport_params
6567 .max_datagram_frame_size
6568 .is_some()
6569 }
6570
6571 /// Returns when the next timeout event will occur.
6572 ///
6573 /// Once the timeout Instant has been reached, the [`on_timeout()`] method
6574 /// should be called. A timeout of `None` means that the timer should be
6575 /// disarmed.
6576 ///
6577 /// [`on_timeout()`]: struct.Connection.html#method.on_timeout
6578 pub fn timeout_instant(&self) -> Option<Instant> {
6579 if self.is_closed() {
6580 return None;
6581 }
6582
6583 if self.is_draining() {
6584 // Draining timer takes precedence over all other timers. If it is
6585 // set it means the connection is closing so there's no point in
6586 // processing the other timers.
6587 self.draining_timer
6588 } else {
6589 // Use the lowest timer value (i.e. "sooner") among idle and loss
6590 // detection timers. If they are both unset (i.e. `None`) then the
6591 // result is `None`, but if at least one of them is set then a
6592 // `Some(...)` value is returned.
6593 let path_timer = self
6594 .paths
6595 .iter()
6596 .filter_map(|(_, p)| p.recovery.loss_detection_timer())
6597 .min();
6598
6599 let key_update_timer = self.crypto_ctx[packet::Epoch::Application]
6600 .key_update
6601 .as_ref()
6602 .map(|key_update| key_update.timer);
6603
6604 let timers = [self.idle_timer, path_timer, key_update_timer];
6605
6606 timers.iter().filter_map(|&x| x).min()
6607 }
6608 }
6609
6610 /// Returns the amount of time until the next timeout event.
6611 ///
6612 /// Once the given duration has elapsed, the [`on_timeout()`] method should
6613 /// be called. A timeout of `None` means that the timer should be disarmed.
6614 ///
6615 /// [`on_timeout()`]: struct.Connection.html#method.on_timeout
6616 pub fn timeout(&self) -> Option<Duration> {
6617 self.timeout_instant().map(|timeout| {
6618 let now = Instant::now();
6619
6620 if timeout <= now {
6621 Duration::ZERO
6622 } else {
6623 timeout.duration_since(now)
6624 }
6625 })
6626 }
6627
6628 /// Processes a timeout event.
6629 ///
6630 /// If no timeout has occurred it does nothing.
6631 pub fn on_timeout(&mut self) {
6632 let now = Instant::now();
6633
6634 if let Some(draining_timer) = self.draining_timer {
6635 if draining_timer <= now {
6636 trace!("{} draining timeout expired", self.trace_id);
6637
6638 self.mark_closed();
6639 }
6640
6641 // Draining timer takes precedence over all other timers. If it is
6642 // set it means the connection is closing so there's no point in
6643 // processing the other timers.
6644 return;
6645 }
6646
6647 if let Some(timer) = self.idle_timer {
6648 if timer <= now {
6649 trace!("{} idle timeout expired", self.trace_id);
6650
6651 self.mark_closed();
6652 self.timed_out = true;
6653 return;
6654 }
6655 }
6656
6657 if let Some(timer) = self.crypto_ctx[packet::Epoch::Application]
6658 .key_update
6659 .as_ref()
6660 .map(|key_update| key_update.timer)
6661 {
6662 if timer <= now {
6663 // Discard previous key once key update timer expired.
6664 let _ = self.crypto_ctx[packet::Epoch::Application]
6665 .key_update
6666 .take();
6667 }
6668 }
6669
6670 let handshake_status = self.handshake_status();
6671
6672 for (_, p) in self.paths.iter_mut() {
6673 if let Some(timer) = p.recovery.loss_detection_timer() {
6674 if timer <= now {
6675 trace!("{} loss detection timeout expired", self.trace_id);
6676
6677 let OnLossDetectionTimeoutOutcome {
6678 lost_packets,
6679 lost_bytes,
6680 } = p.on_loss_detection_timeout(
6681 handshake_status,
6682 now,
6683 self.is_server,
6684 &self.trace_id,
6685 );
6686
6687 self.lost_count += lost_packets;
6688 self.lost_bytes += lost_bytes as u64;
6689
6690 qlog_with_type!(QLOG_METRICS, self.qlog, q, {
6691 p.recovery.maybe_qlog(q, now);
6692 });
6693 }
6694 }
6695 }
6696
6697 // Notify timeout events to the application.
6698 self.paths.notify_failed_validations();
6699
6700 // If the active path failed, try to find a new candidate.
6701 if self.paths.get_active_path_id().is_err() {
6702 match self.paths.find_candidate_path() {
6703 Some(pid) => {
6704 if self.set_active_path(pid, now).is_err() {
6705 // The connection cannot continue.
6706 self.mark_closed();
6707 }
6708 },
6709
6710 // The connection cannot continue.
6711 None => {
6712 self.mark_closed();
6713 },
6714 }
6715 }
6716 }
6717
6718 /// Requests the stack to perform path validation of the proposed 4-tuple.
6719 ///
6720 /// Probing new paths requires spare Connection IDs at both the host and the
6721 /// peer sides. If it is not the case, it raises an [`OutOfIdentifiers`].
6722 ///
6723 /// The probing of new addresses can only be done by the client. The server
6724 /// can only probe network paths that were previously advertised by
6725 /// [`PathEvent::New`]. If the server tries to probe such an unseen network
6726 /// path, this call raises an [`InvalidState`].
6727 ///
6728 /// The caller might also want to probe an existing path. In such case, it
6729 /// triggers a PATH_CHALLENGE frame, but it does not require spare CIDs.
6730 ///
6731 /// A server always probes a new path it observes. Calling this method is
6732 /// hence not required to validate a new path. However, a server can still
6733 /// request an additional path validation of the proposed 4-tuple.
6734 ///
6735 /// Calling this method several times before calling [`send()`] or
6736 /// [`send_on_path()`] results in a single probe being generated. An
6737 /// application wanting to send multiple in-flight probes must call this
6738 /// method again after having sent packets.
6739 ///
6740 /// Returns the Destination Connection ID sequence number associated to that
6741 /// path.
6742 ///
6743 /// [`PathEvent::New`]: enum.PathEvent.html#variant.New
6744 /// [`OutOfIdentifiers`]: enum.Error.html#OutOfIdentifiers
6745 /// [`InvalidState`]: enum.Error.html#InvalidState
6746 /// [`send()`]: struct.Connection.html#method.send
6747 /// [`send_on_path()`]: struct.Connection.html#method.send_on_path
6748 pub fn probe_path(
6749 &mut self, local_addr: SocketAddr, peer_addr: SocketAddr,
6750 ) -> Result<u64> {
6751 // We may want to probe an existing path.
6752 let pid = match self.paths.path_id_from_addrs(&(local_addr, peer_addr)) {
6753 Some(pid) => pid,
6754 None => self.create_path_on_client(local_addr, peer_addr)?,
6755 };
6756
6757 let path = self.paths.get_mut(pid)?;
6758 path.request_validation();
6759
6760 path.active_dcid_seq.ok_or(Error::InvalidState)
6761 }
6762
6763 /// Migrates the connection to a new local address `local_addr`.
6764 ///
6765 /// The behavior is similar to [`migrate()`], with the nuance that the
6766 /// connection only changes the local address, but not the peer one.
6767 ///
6768 /// See [`migrate()`] for the full specification of this method.
6769 ///
6770 /// [`migrate()`]: struct.Connection.html#method.migrate
6771 pub fn migrate_source(&mut self, local_addr: SocketAddr) -> Result<u64> {
6772 let peer_addr = self.paths.get_active()?.peer_addr();
6773 self.migrate(local_addr, peer_addr)
6774 }
6775
6776 /// Migrates the connection over the given network path between `local_addr`
6777 /// and `peer_addr`.
6778 ///
6779 /// Connection migration can only be initiated by the client. Calling this
6780 /// method as a server returns [`InvalidState`].
6781 ///
6782 /// To initiate voluntary migration, there should be enough Connection IDs
6783 /// at both sides. If this requirement is not satisfied, this call returns
6784 /// [`OutOfIdentifiers`].
6785 ///
6786 /// Returns the Destination Connection ID associated to that migrated path.
6787 ///
6788 /// [`OutOfIdentifiers`]: enum.Error.html#OutOfIdentifiers
6789 /// [`InvalidState`]: enum.Error.html#InvalidState
6790 pub fn migrate(
6791 &mut self, local_addr: SocketAddr, peer_addr: SocketAddr,
6792 ) -> Result<u64> {
6793 if self.is_server {
6794 return Err(Error::InvalidState);
6795 }
6796
6797 // If the path already exists, mark it as the active one.
6798 let (pid, dcid_seq) = if let Some(pid) =
6799 self.paths.path_id_from_addrs(&(local_addr, peer_addr))
6800 {
6801 let path = self.paths.get_mut(pid)?;
6802
6803 // If it is already active, do nothing.
6804 if path.active() {
6805 return path.active_dcid_seq.ok_or(Error::OutOfIdentifiers);
6806 }
6807
6808 // Ensures that a Source Connection ID has been dedicated to this
6809 // path, or a free one is available. This is only required if the
6810 // host uses non-zero length Source Connection IDs.
6811 if !self.ids.zero_length_scid() &&
6812 path.active_scid_seq.is_none() &&
6813 self.ids.available_scids() == 0
6814 {
6815 return Err(Error::OutOfIdentifiers);
6816 }
6817
6818 // Ensures that the migrated path has a Destination Connection ID.
6819 let dcid_seq = if let Some(dcid_seq) = path.active_dcid_seq {
6820 dcid_seq
6821 } else {
6822 let dcid_seq = self
6823 .ids
6824 .lowest_available_dcid_seq()
6825 .ok_or(Error::OutOfIdentifiers)?;
6826
6827 self.ids.link_dcid_to_path_id(dcid_seq, pid)?;
6828 path.active_dcid_seq = Some(dcid_seq);
6829
6830 dcid_seq
6831 };
6832
6833 (pid, dcid_seq)
6834 } else {
6835 let pid = self.create_path_on_client(local_addr, peer_addr)?;
6836
6837 let dcid_seq = self
6838 .paths
6839 .get(pid)?
6840 .active_dcid_seq
6841 .ok_or(Error::InvalidState)?;
6842
6843 (pid, dcid_seq)
6844 };
6845
6846 // Change the active path.
6847 self.set_active_path(pid, Instant::now())?;
6848
6849 Ok(dcid_seq)
6850 }
6851
6852 /// Provides additional source Connection IDs that the peer can use to reach
6853 /// this host.
6854 ///
6855 /// This triggers sending NEW_CONNECTION_ID frames if the provided Source
6856 /// Connection ID is not already present. In the case the caller tries to
6857 /// reuse a Connection ID with a different reset token, this raises an
6858 /// `InvalidState`.
6859 ///
6860 /// At any time, the peer cannot have more Destination Connection IDs than
6861 /// the maximum number of active Connection IDs it negotiated. In such case
6862 /// (i.e., when [`scids_left()`] returns 0), if the host agrees to
6863 /// request the removal of previous connection IDs, it sets the
6864 /// `retire_if_needed` parameter. Otherwise, an [`IdLimit`] is returned.
6865 ///
6866 /// Note that setting `retire_if_needed` does not prevent this function from
6867 /// returning an [`IdLimit`] in the case the caller wants to retire still
6868 /// unannounced Connection IDs.
6869 ///
6870 /// The caller is responsible for ensuring that the provided `scid` is not
6871 /// repeated several times over the connection. quiche ensures that as long
6872 /// as the provided Connection ID is still in use (i.e., not retired), it
6873 /// does not assign a different sequence number.
6874 ///
6875 /// Note that if the host uses zero-length Source Connection IDs, it cannot
6876 /// advertise Source Connection IDs and calling this method returns an
6877 /// [`InvalidState`].
6878 ///
6879 /// Returns the sequence number associated to the provided Connection ID.
6880 ///
6881 /// [`scids_left()`]: struct.Connection.html#method.scids_left
6882 /// [`IdLimit`]: enum.Error.html#IdLimit
6883 /// [`InvalidState`]: enum.Error.html#InvalidState
6884 pub fn new_scid(
6885 &mut self, scid: &ConnectionId, reset_token: u128, retire_if_needed: bool,
6886 ) -> Result<u64> {
6887 self.ids.new_scid(
6888 scid.to_vec().into(),
6889 Some(reset_token),
6890 true,
6891 None,
6892 retire_if_needed,
6893 )
6894 }
6895
6896 /// Returns the number of source Connection IDs that are active. This is
6897 /// only meaningful if the host uses non-zero length Source Connection IDs.
6898 pub fn active_scids(&self) -> usize {
6899 self.ids.active_source_cids()
6900 }
6901
6902 /// Returns the number of source Connection IDs that should be provided
6903 /// to the peer without exceeding the limit it advertised.
6904 ///
6905 /// This will automatically limit the number of Connection IDs to the
6906 /// minimum between the locally configured active connection ID limit,
6907 /// and the one sent by the peer.
6908 ///
6909 /// To obtain the maximum possible value allowed by the peer an application
6910 /// can instead inspect the [`peer_active_conn_id_limit`] value.
6911 ///
6912 /// [`peer_active_conn_id_limit`]: struct.Stats.html#structfield.peer_active_conn_id_limit
6913 #[inline]
6914 pub fn scids_left(&self) -> usize {
6915 let max_active_source_cids = cmp::min(
6916 self.peer_transport_params.active_conn_id_limit,
6917 self.local_transport_params.active_conn_id_limit,
6918 ) as usize;
6919
6920 max_active_source_cids - self.active_scids()
6921 }
6922
6923 /// Requests the retirement of the destination Connection ID used by the
6924 /// host to reach its peer.
6925 ///
6926 /// This triggers sending RETIRE_CONNECTION_ID frames.
6927 ///
6928 /// If the application tries to retire a non-existing Destination Connection
6929 /// ID sequence number, or if it uses zero-length Destination Connection ID,
6930 /// this method returns an [`InvalidState`].
6931 ///
6932 /// At any time, the host must have at least one Destination ID. If the
6933 /// application tries to retire the last one, or if the caller tries to
6934 /// retire the destination Connection ID used by the current active path
6935 /// while having neither spare Destination Connection IDs nor validated
6936 /// network paths, this method returns an [`OutOfIdentifiers`]. This
6937 /// behavior prevents the caller from stalling the connection due to the
6938 /// lack of validated path to send non-probing packets.
6939 ///
6940 /// [`InvalidState`]: enum.Error.html#InvalidState
6941 /// [`OutOfIdentifiers`]: enum.Error.html#OutOfIdentifiers
6942 pub fn retire_dcid(&mut self, dcid_seq: u64) -> Result<()> {
6943 if self.ids.zero_length_dcid() {
6944 return Err(Error::InvalidState);
6945 }
6946
6947 let active_path_dcid_seq = self
6948 .paths
6949 .get_active()?
6950 .active_dcid_seq
6951 .ok_or(Error::InvalidState)?;
6952
6953 let active_path_id = self.paths.get_active_path_id()?;
6954
6955 if active_path_dcid_seq == dcid_seq &&
6956 self.ids.lowest_available_dcid_seq().is_none() &&
6957 !self
6958 .paths
6959 .iter()
6960 .any(|(pid, p)| pid != active_path_id && p.usable())
6961 {
6962 return Err(Error::OutOfIdentifiers);
6963 }
6964
6965 if let Some(pid) = self.ids.retire_dcid(dcid_seq)? {
6966 // The retired Destination CID was associated to a given path. Let's
6967 // find an available DCID to associate to that path.
6968 let path = self.paths.get_mut(pid)?;
6969 let dcid_seq = self.ids.lowest_available_dcid_seq();
6970
6971 if let Some(dcid_seq) = dcid_seq {
6972 self.ids.link_dcid_to_path_id(dcid_seq, pid)?;
6973 }
6974
6975 path.active_dcid_seq = dcid_seq;
6976 }
6977
6978 Ok(())
6979 }
6980
6981 /// Processes path-specific events.
6982 ///
6983 /// On success it returns a [`PathEvent`], or `None` when there are no
6984 /// events to report. Please refer to [`PathEvent`] for the exhaustive event
6985 /// list.
6986 ///
6987 /// Note that all events are edge-triggered, meaning that once reported they
6988 /// will not be reported again by calling this method again, until the event
6989 /// is re-armed.
6990 ///
6991 /// [`PathEvent`]: enum.PathEvent.html
6992 pub fn path_event_next(&mut self) -> Option<PathEvent> {
6993 self.paths.pop_event()
6994 }
6995
6996 /// Returns the number of source Connection IDs that are retired.
6997 pub fn retired_scids(&self) -> usize {
6998 self.ids.retired_source_cids()
6999 }
7000
7001 /// Returns a source `ConnectionId` that has been retired.
7002 ///
7003 /// On success it returns a [`ConnectionId`], or `None` when there are no
7004 /// more retired connection IDs.
7005 ///
7006 /// [`ConnectionId`]: struct.ConnectionId.html
7007 pub fn retired_scid_next(&mut self) -> Option<ConnectionId<'static>> {
7008 self.ids.pop_retired_scid()
7009 }
7010
7011 /// Returns the number of spare Destination Connection IDs, i.e.,
7012 /// Destination Connection IDs that are still unused.
7013 ///
7014 /// Note that this function returns 0 if the host uses zero length
7015 /// Destination Connection IDs.
7016 pub fn available_dcids(&self) -> usize {
7017 self.ids.available_dcids()
7018 }
7019
7020 /// Returns an iterator over destination `SockAddr`s whose association
7021 /// with `from` forms a known QUIC path on which packets can be sent to.
7022 ///
7023 /// This function is typically used in combination with [`send_on_path()`].
7024 ///
7025 /// Note that the iterator includes all the possible combination of
7026 /// destination `SockAddr`s, even those whose sending is not required now.
7027 /// In other words, this is another way for the application to recall from
7028 /// past [`PathEvent::New`] events.
7029 ///
7030 /// [`PathEvent::New`]: enum.PathEvent.html#variant.New
7031 /// [`send_on_path()`]: struct.Connection.html#method.send_on_path
7032 ///
7033 /// ## Examples:
7034 ///
7035 /// ```no_run
7036 /// # let mut out = [0; 512];
7037 /// # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
7038 /// # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
7039 /// # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
7040 /// # let local = socket.local_addr().unwrap();
7041 /// # let peer = "127.0.0.1:1234".parse().unwrap();
7042 /// # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
7043 /// // Iterate over possible destinations for the given local `SockAddr`.
7044 /// for dest in conn.paths_iter(local) {
7045 /// loop {
7046 /// let (write, send_info) =
7047 /// match conn.send_on_path(&mut out, Some(local), Some(dest)) {
7048 /// Ok(v) => v,
7049 ///
7050 /// Err(quiche::Error::Done) => {
7051 /// // Done writing for this destination.
7052 /// break;
7053 /// },
7054 ///
7055 /// Err(e) => {
7056 /// // An error occurred, handle it.
7057 /// break;
7058 /// },
7059 /// };
7060 ///
7061 /// socket.send_to(&out[..write], &send_info.to).unwrap();
7062 /// }
7063 /// }
7064 /// # Ok::<(), quiche::Error>(())
7065 /// ```
7066 #[inline]
7067 pub fn paths_iter(&self, from: SocketAddr) -> SocketAddrIter {
7068 // Instead of trying to identify whether packets will be sent on the
7069 // given 4-tuple, simply filter paths that cannot be used.
7070 SocketAddrIter {
7071 sockaddrs: self
7072 .paths
7073 .iter()
7074 .filter(|(_, p)| p.active_dcid_seq.is_some())
7075 .filter(|(_, p)| p.usable() || p.probing_required())
7076 .filter(|(_, p)| p.local_addr() == from)
7077 .map(|(_, p)| p.peer_addr())
7078 .collect(),
7079
7080 index: 0,
7081 }
7082 }
7083
7084 /// Closes the connection with the given error and reason.
7085 ///
7086 /// The `app` parameter specifies whether an application close should be
7087 /// sent to the peer. Otherwise a normal connection close is sent.
7088 ///
7089 /// If `app` is true but the connection is not in a state that is safe to
7090 /// send an application error (not established nor in early data), in
7091 /// accordance with [RFC
7092 /// 9000](https://www.rfc-editor.org/rfc/rfc9000.html#section-10.2.3-3), the
7093 /// error code is changed to APPLICATION_ERROR and the reason phrase is
7094 /// cleared.
7095 ///
7096 /// Returns [`Done`] if the connection had already been closed.
7097 ///
7098 /// Note that the connection will not be closed immediately. An application
7099 /// should continue calling the [`recv()`], [`send()`], [`timeout()`] and
7100 /// [`on_timeout()`] methods as normal, until the [`is_closed()`] method
7101 /// returns `true`.
7102 ///
7103 /// [`Done`]: enum.Error.html#variant.Done
7104 /// [`recv()`]: struct.Connection.html#method.recv
7105 /// [`send()`]: struct.Connection.html#method.send
7106 /// [`timeout()`]: struct.Connection.html#method.timeout
7107 /// [`on_timeout()`]: struct.Connection.html#method.on_timeout
7108 /// [`is_closed()`]: struct.Connection.html#method.is_closed
7109 pub fn close(&mut self, app: bool, err: u64, reason: &[u8]) -> Result<()> {
7110 if self.is_closed() || self.is_draining() {
7111 return Err(Error::Done);
7112 }
7113
7114 if self.local_error.is_some() {
7115 return Err(Error::Done);
7116 }
7117
7118 let is_safe_to_send_app_data =
7119 self.is_established() || self.is_in_early_data();
7120
7121 if app && !is_safe_to_send_app_data {
7122 // Clear error information.
7123 self.local_error = Some(ConnectionError {
7124 is_app: false,
7125 error_code: 0x0c,
7126 reason: vec![],
7127 });
7128 } else {
7129 self.local_error = Some(ConnectionError {
7130 is_app: app,
7131 error_code: err,
7132 reason: reason.to_vec(),
7133 });
7134 }
7135
7136 // When no packet was successfully processed close connection immediately.
7137 if self.recv_count == 0 {
7138 self.mark_closed();
7139 }
7140
7141 Ok(())
7142 }
7143
7144 /// Returns a string uniquely representing the connection.
7145 ///
7146 /// This can be used for logging purposes to differentiate between multiple
7147 /// connections.
7148 #[inline]
7149 pub fn trace_id(&self) -> &str {
7150 &self.trace_id
7151 }
7152
7153 /// Returns the negotiated ALPN protocol.
7154 ///
7155 /// If no protocol has been negotiated, the returned value is empty.
7156 #[inline]
7157 pub fn application_proto(&self) -> &[u8] {
7158 self.alpn.as_ref()
7159 }
7160
7161 /// Returns the server name requested by the client.
7162 #[inline]
7163 pub fn server_name(&self) -> Option<&str> {
7164 self.handshake.server_name()
7165 }
7166
7167 /// Returns the peer's leaf certificate (if any) as a DER-encoded buffer.
7168 #[inline]
7169 pub fn peer_cert(&self) -> Option<&[u8]> {
7170 self.handshake.peer_cert()
7171 }
7172
7173 /// Returns the peer's certificate chain (if any) as a vector of DER-encoded
7174 /// buffers.
7175 ///
7176 /// The certificate at index 0 is the peer's leaf certificate, the other
7177 /// certificates (if any) are the chain certificate authorities used to
7178 /// sign the leaf certificate.
7179 #[inline]
7180 pub fn peer_cert_chain(&self) -> Option<Vec<&[u8]>> {
7181 self.handshake.peer_cert_chain()
7182 }
7183
7184 /// Returns the serialized cryptographic session for the connection.
7185 ///
7186 /// This can be used by a client to cache a connection's session, and resume
7187 /// it later using the [`set_session()`] method.
7188 ///
7189 /// [`set_session()`]: struct.Connection.html#method.set_session
7190 #[inline]
7191 pub fn session(&self) -> Option<&[u8]> {
7192 self.session.as_deref()
7193 }
7194
7195 /// Returns the source connection ID.
7196 ///
7197 /// When there are multiple IDs, and if there is an active path, the ID used
7198 /// on that path is returned. Otherwise the oldest ID is returned.
7199 ///
7200 /// Note that the value returned can change throughout the connection's
7201 /// lifetime.
7202 #[inline]
7203 pub fn source_id(&self) -> ConnectionId<'_> {
7204 if let Ok(path) = self.paths.get_active() {
7205 if let Some(active_scid_seq) = path.active_scid_seq {
7206 if let Ok(e) = self.ids.get_scid(active_scid_seq) {
7207 return ConnectionId::from_ref(e.cid.as_ref());
7208 }
7209 }
7210 }
7211
7212 let e = self.ids.oldest_scid();
7213 ConnectionId::from_ref(e.cid.as_ref())
7214 }
7215
7216 /// Returns all active source connection IDs.
7217 ///
7218 /// An iterator is returned for all active IDs (i.e. ones that have not
7219 /// been explicitly retired yet).
7220 #[inline]
7221 pub fn source_ids(&self) -> impl Iterator<Item = &ConnectionId<'_>> {
7222 self.ids.scids_iter()
7223 }
7224
7225 /// Returns the destination connection ID.
7226 ///
7227 /// Note that the value returned can change throughout the connection's
7228 /// lifetime.
7229 #[inline]
7230 pub fn destination_id(&self) -> ConnectionId<'_> {
7231 if let Ok(path) = self.paths.get_active() {
7232 if let Some(active_dcid_seq) = path.active_dcid_seq {
7233 if let Ok(e) = self.ids.get_dcid(active_dcid_seq) {
7234 return ConnectionId::from_ref(e.cid.as_ref());
7235 }
7236 }
7237 }
7238
7239 let e = self.ids.oldest_dcid();
7240 ConnectionId::from_ref(e.cid.as_ref())
7241 }
7242
7243 /// Returns the PMTU for the active path if it exists.
7244 ///
7245 /// This requires no additonal packets to be sent but simply checks if PMTUD
7246 /// has completed and has found a valid PMTU.
7247 #[inline]
7248 pub fn pmtu(&self) -> Option<usize> {
7249 if let Ok(path) = self.paths.get_active() {
7250 path.pmtud.as_ref().and_then(|pmtud| pmtud.get_pmtu())
7251 } else {
7252 None
7253 }
7254 }
7255
7256 /// Revalidates the PMTU for the active path by sending a new probe packet
7257 /// of PMTU size. If the probe is dropped PMTUD will restart and find a new
7258 /// valid PMTU.
7259 #[inline]
7260 pub fn revalidate_pmtu(&mut self) {
7261 if let Ok(active_path) = self.paths.get_active_mut() {
7262 if let Some(pmtud) = active_path.pmtud.as_mut() {
7263 pmtud.revalidate_pmtu();
7264 }
7265 }
7266 }
7267
7268 /// Returns true if the connection handshake is complete.
7269 #[inline]
7270 pub fn is_established(&self) -> bool {
7271 self.handshake_completed
7272 }
7273
7274 /// Returns true if the connection is resumed.
7275 #[inline]
7276 pub fn is_resumed(&self) -> bool {
7277 self.handshake.is_resumed()
7278 }
7279
7280 /// Returns true if the connection has a pending handshake that has
7281 /// progressed enough to send or receive early data.
7282 #[inline]
7283 pub fn is_in_early_data(&self) -> bool {
7284 self.handshake.is_in_early_data()
7285 }
7286
7287 /// Returns the early data reason for the connection.
7288 ///
7289 /// This status can be useful for logging and debugging. See [BoringSSL]
7290 /// documentation for a definition of the reasons.
7291 ///
7292 /// [BoringSSL]: https://commondatastorage.googleapis.com/chromium-boringssl-docs/ssl.h.html#ssl_early_data_reason_t
7293 #[inline]
7294 pub fn early_data_reason(&self) -> u32 {
7295 self.handshake.early_data_reason()
7296 }
7297
7298 /// Returns whether there is stream or DATAGRAM data available to read.
7299 #[inline]
7300 pub fn is_readable(&self) -> bool {
7301 self.streams.has_readable() || self.dgram_recv_front_len().is_some()
7302 }
7303
7304 /// Returns whether the network path with local address `from` and remote
7305 /// address `peer` has been validated.
7306 ///
7307 /// If the 4-tuple does not exist over the connection, returns an
7308 /// [`InvalidState`].
7309 ///
7310 /// [`InvalidState`]: enum.Error.html#variant.InvalidState
7311 pub fn is_path_validated(
7312 &self, from: SocketAddr, to: SocketAddr,
7313 ) -> Result<bool> {
7314 let pid = self
7315 .paths
7316 .path_id_from_addrs(&(from, to))
7317 .ok_or(Error::InvalidState)?;
7318
7319 Ok(self.paths.get(pid)?.validated())
7320 }
7321
7322 /// Returns true if the connection is draining.
7323 ///
7324 /// If this returns `true`, the connection object cannot yet be dropped, but
7325 /// no new application data can be sent or received. An application should
7326 /// continue calling the [`recv()`], [`timeout()`], and [`on_timeout()`]
7327 /// methods as normal, until the [`is_closed()`] method returns `true`.
7328 ///
7329 /// In contrast, once `is_draining()` returns `true`, calling [`send()`]
7330 /// is not required because no new outgoing packets will be generated.
7331 ///
7332 /// [`recv()`]: struct.Connection.html#method.recv
7333 /// [`send()`]: struct.Connection.html#method.send
7334 /// [`timeout()`]: struct.Connection.html#method.timeout
7335 /// [`on_timeout()`]: struct.Connection.html#method.on_timeout
7336 /// [`is_closed()`]: struct.Connection.html#method.is_closed
7337 #[inline]
7338 pub fn is_draining(&self) -> bool {
7339 self.draining_timer.is_some()
7340 }
7341
7342 /// Returns true if the connection is closed.
7343 ///
7344 /// If this returns true, the connection object can be dropped.
7345 #[inline]
7346 pub fn is_closed(&self) -> bool {
7347 self.closed
7348 }
7349
7350 /// Returns true if the connection was closed due to the idle timeout.
7351 #[inline]
7352 pub fn is_timed_out(&self) -> bool {
7353 self.timed_out
7354 }
7355
7356 /// Returns the error received from the peer, if any.
7357 ///
7358 /// Note that a `Some` return value does not necessarily imply
7359 /// [`is_closed()`] or any other connection state.
7360 ///
7361 /// [`is_closed()`]: struct.Connection.html#method.is_closed
7362 #[inline]
7363 pub fn peer_error(&self) -> Option<&ConnectionError> {
7364 self.peer_error.as_ref()
7365 }
7366
7367 /// Returns the error [`close()`] was called with, or internally
7368 /// created quiche errors, if any.
7369 ///
7370 /// Note that a `Some` return value does not necessarily imply
7371 /// [`is_closed()`] or any other connection state.
7372 /// `Some` also does not guarantee that the error has been sent to
7373 /// or received by the peer.
7374 ///
7375 /// [`close()`]: struct.Connection.html#method.close
7376 /// [`is_closed()`]: struct.Connection.html#method.is_closed
7377 #[inline]
7378 pub fn local_error(&self) -> Option<&ConnectionError> {
7379 self.local_error.as_ref()
7380 }
7381
7382 /// Collects and returns statistics about the connection.
7383 #[inline]
7384 pub fn stats(&self) -> Stats {
7385 Stats {
7386 recv: self.recv_count,
7387 sent: self.sent_count,
7388 lost: self.lost_count,
7389 spurious_lost: self.spurious_lost_count,
7390 retrans: self.retrans_count,
7391 sent_bytes: self.sent_bytes,
7392 recv_bytes: self.recv_bytes,
7393 acked_bytes: self.acked_bytes,
7394 lost_bytes: self.lost_bytes,
7395 stream_retrans_bytes: self.stream_retrans_bytes,
7396 dgram_recv: self.dgram_recv_count,
7397 dgram_sent: self.dgram_sent_count,
7398 paths_count: self.paths.len(),
7399 reset_stream_count_local: self.reset_stream_local_count,
7400 stopped_stream_count_local: self.stopped_stream_local_count,
7401 reset_stream_count_remote: self.reset_stream_remote_count,
7402 stopped_stream_count_remote: self.stopped_stream_remote_count,
7403 data_blocked_sent_count: self.data_blocked_sent_count,
7404 stream_data_blocked_sent_count: self.stream_data_blocked_sent_count,
7405 data_blocked_recv_count: self.data_blocked_recv_count,
7406 stream_data_blocked_recv_count: self.stream_data_blocked_recv_count,
7407 path_challenge_rx_count: self.path_challenge_rx_count,
7408 bytes_in_flight_duration: self.bytes_in_flight_duration(),
7409 tx_buffered_state: self.tx_buffered_state,
7410 }
7411 }
7412
7413 /// Returns the sum of the durations when each path in the
7414 /// connection was actively sending bytes or waiting for acks.
7415 /// Note that this could result in a duration that is longer than
7416 /// the actual connection duration in cases where multiple paths
7417 /// are active for extended periods of time. In practice only 1
7418 /// path is typically active at a time.
7419 /// TODO revisit computation if in the future multiple paths are
7420 /// often active at the same time.
7421 fn bytes_in_flight_duration(&self) -> Duration {
7422 self.paths.iter().fold(Duration::ZERO, |acc, (_, path)| {
7423 acc + path.bytes_in_flight_duration()
7424 })
7425 }
7426
7427 /// Returns reference to peer's transport parameters. Returns `None` if we
7428 /// have not yet processed the peer's transport parameters.
7429 pub fn peer_transport_params(&self) -> Option<&TransportParams> {
7430 if !self.parsed_peer_transport_params {
7431 return None;
7432 }
7433
7434 Some(&self.peer_transport_params)
7435 }
7436
7437 /// Collects and returns statistics about each known path for the
7438 /// connection.
7439 pub fn path_stats(&self) -> impl Iterator<Item = PathStats> + '_ {
7440 self.paths.iter().map(|(_, p)| p.stats())
7441 }
7442
7443 /// Returns whether or not this is a server-side connection.
7444 pub fn is_server(&self) -> bool {
7445 self.is_server
7446 }
7447
7448 fn encode_transport_params(&mut self) -> Result<()> {
7449 self.handshake.set_quic_transport_params(
7450 &self.local_transport_params,
7451 self.is_server,
7452 )
7453 }
7454
7455 fn parse_peer_transport_params(
7456 &mut self, peer_params: TransportParams,
7457 ) -> Result<()> {
7458 // Validate initial_source_connection_id.
7459 match &peer_params.initial_source_connection_id {
7460 Some(v) if v != &self.destination_id() =>
7461 return Err(Error::InvalidTransportParam),
7462
7463 Some(_) => (),
7464
7465 // initial_source_connection_id must be sent by
7466 // both endpoints.
7467 None => return Err(Error::InvalidTransportParam),
7468 }
7469
7470 // Validate original_destination_connection_id.
7471 if let Some(odcid) = &self.odcid {
7472 match &peer_params.original_destination_connection_id {
7473 Some(v) if v != odcid =>
7474 return Err(Error::InvalidTransportParam),
7475
7476 Some(_) => (),
7477
7478 // original_destination_connection_id must be
7479 // sent by the server.
7480 None if !self.is_server =>
7481 return Err(Error::InvalidTransportParam),
7482
7483 None => (),
7484 }
7485 }
7486
7487 // Validate retry_source_connection_id.
7488 if let Some(rscid) = &self.rscid {
7489 match &peer_params.retry_source_connection_id {
7490 Some(v) if v != rscid =>
7491 return Err(Error::InvalidTransportParam),
7492
7493 Some(_) => (),
7494
7495 // retry_source_connection_id must be sent by
7496 // the server.
7497 None => return Err(Error::InvalidTransportParam),
7498 }
7499 }
7500
7501 self.process_peer_transport_params(peer_params)?;
7502
7503 self.parsed_peer_transport_params = true;
7504
7505 Ok(())
7506 }
7507
7508 fn process_peer_transport_params(
7509 &mut self, peer_params: TransportParams,
7510 ) -> Result<()> {
7511 self.max_tx_data = peer_params.initial_max_data;
7512
7513 // Update send capacity.
7514 self.update_tx_cap();
7515
7516 self.streams
7517 .update_peer_max_streams_bidi(peer_params.initial_max_streams_bidi);
7518 self.streams
7519 .update_peer_max_streams_uni(peer_params.initial_max_streams_uni);
7520
7521 let max_ack_delay = Duration::from_millis(peer_params.max_ack_delay);
7522
7523 self.recovery_config.max_ack_delay = max_ack_delay;
7524
7525 let active_path = self.paths.get_active_mut()?;
7526
7527 active_path.recovery.update_max_ack_delay(max_ack_delay);
7528
7529 if active_path
7530 .pmtud
7531 .as_ref()
7532 .map(|pmtud| pmtud.should_probe())
7533 .unwrap_or(false)
7534 {
7535 active_path.recovery.pmtud_update_max_datagram_size(
7536 active_path
7537 .pmtud
7538 .as_mut()
7539 .expect("PMTUD existence verified above")
7540 .get_probe_size()
7541 .min(peer_params.max_udp_payload_size as usize),
7542 );
7543 } else {
7544 active_path.recovery.update_max_datagram_size(
7545 peer_params.max_udp_payload_size as usize,
7546 );
7547 }
7548
7549 // Record the max_active_conn_id parameter advertised by the peer.
7550 self.ids
7551 .set_source_conn_id_limit(peer_params.active_conn_id_limit);
7552
7553 self.peer_transport_params = peer_params;
7554
7555 Ok(())
7556 }
7557
7558 /// Continues the handshake.
7559 ///
7560 /// If the connection is already established, it does nothing.
7561 fn do_handshake(&mut self, now: Instant) -> Result<()> {
7562 let mut ex_data = tls::ExData {
7563 application_protos: &self.application_protos,
7564
7565 crypto_ctx: &mut self.crypto_ctx,
7566
7567 session: &mut self.session,
7568
7569 local_error: &mut self.local_error,
7570
7571 keylog: self.keylog.as_mut(),
7572
7573 trace_id: &self.trace_id,
7574
7575 local_transport_params: self.local_transport_params.clone(),
7576
7577 recovery_config: self.recovery_config,
7578
7579 tx_cap_factor: self.tx_cap_factor,
7580
7581 pmtud: None,
7582
7583 is_server: self.is_server,
7584 };
7585
7586 if self.handshake_completed {
7587 return self.handshake.process_post_handshake(&mut ex_data);
7588 }
7589
7590 match self.handshake.do_handshake(&mut ex_data) {
7591 Ok(_) => (),
7592
7593 Err(Error::Done) => {
7594 // Apply in-handshake configuration from callbacks if the path's
7595 // Recovery module can still be reinitilized.
7596 if self
7597 .paths
7598 .get_active()
7599 .map(|p| p.can_reinit_recovery())
7600 .unwrap_or(false)
7601 {
7602 if ex_data.recovery_config != self.recovery_config {
7603 if let Ok(path) = self.paths.get_active_mut() {
7604 self.recovery_config = ex_data.recovery_config;
7605 path.reinit_recovery(&self.recovery_config);
7606 }
7607 }
7608
7609 if ex_data.tx_cap_factor != self.tx_cap_factor {
7610 self.tx_cap_factor = ex_data.tx_cap_factor;
7611 }
7612
7613 if let Some((discover, max_probes)) = ex_data.pmtud {
7614 self.paths.set_discover_pmtu_on_existing_paths(
7615 discover,
7616 self.recovery_config.max_send_udp_payload_size,
7617 max_probes,
7618 );
7619 }
7620
7621 if ex_data.local_transport_params !=
7622 self.local_transport_params
7623 {
7624 self.streams.set_max_streams_bidi(
7625 ex_data
7626 .local_transport_params
7627 .initial_max_streams_bidi,
7628 );
7629
7630 self.local_transport_params =
7631 ex_data.local_transport_params;
7632 }
7633 }
7634
7635 // Try to parse transport parameters as soon as the first flight
7636 // of handshake data is processed.
7637 //
7638 // This is potentially dangerous as the handshake hasn't been
7639 // completed yet, though it's required to be able to send data
7640 // in 0.5 RTT.
7641 let raw_params = self.handshake.quic_transport_params();
7642
7643 if !self.parsed_peer_transport_params && !raw_params.is_empty() {
7644 let peer_params = TransportParams::decode(
7645 raw_params,
7646 self.is_server,
7647 self.peer_transport_params_track_unknown,
7648 )?;
7649
7650 self.parse_peer_transport_params(peer_params)?;
7651 }
7652
7653 return Ok(());
7654 },
7655
7656 Err(e) => return Err(e),
7657 };
7658
7659 self.handshake_completed = self.handshake.is_completed();
7660
7661 self.alpn = self.handshake.alpn_protocol().to_vec();
7662
7663 let raw_params = self.handshake.quic_transport_params();
7664
7665 if !self.parsed_peer_transport_params && !raw_params.is_empty() {
7666 let peer_params = TransportParams::decode(
7667 raw_params,
7668 self.is_server,
7669 self.peer_transport_params_track_unknown,
7670 )?;
7671
7672 self.parse_peer_transport_params(peer_params)?;
7673 }
7674
7675 if self.handshake_completed {
7676 // The handshake is considered confirmed at the server when the
7677 // handshake completes, at which point we can also drop the
7678 // handshake epoch.
7679 if self.is_server {
7680 self.handshake_confirmed = true;
7681
7682 self.drop_epoch_state(packet::Epoch::Handshake, now);
7683 }
7684
7685 // Once the handshake is completed there's no point in processing
7686 // 0-RTT packets anymore, so clear the buffer now.
7687 self.undecryptable_pkts.clear();
7688
7689 trace!("{} connection established: proto={:?} cipher={:?} curve={:?} sigalg={:?} resumed={} {:?}",
7690 &self.trace_id,
7691 std::str::from_utf8(self.application_proto()),
7692 self.handshake.cipher(),
7693 self.handshake.curve(),
7694 self.handshake.sigalg(),
7695 self.handshake.is_resumed(),
7696 self.peer_transport_params);
7697 }
7698
7699 Ok(())
7700 }
7701
7702 /// Selects the packet type for the next outgoing packet.
7703 fn write_pkt_type(&self, send_pid: usize) -> Result<Type> {
7704 // On error send packet in the latest epoch available, but only send
7705 // 1-RTT ones when the handshake is completed.
7706 if self
7707 .local_error
7708 .as_ref()
7709 .is_some_and(|conn_err| !conn_err.is_app)
7710 {
7711 let epoch = match self.handshake.write_level() {
7712 crypto::Level::Initial => packet::Epoch::Initial,
7713 crypto::Level::ZeroRTT => unreachable!(),
7714 crypto::Level::Handshake => packet::Epoch::Handshake,
7715 crypto::Level::OneRTT => packet::Epoch::Application,
7716 };
7717
7718 if !self.handshake_confirmed {
7719 match epoch {
7720 // Downgrade the epoch to Handshake as the handshake is not
7721 // completed yet.
7722 packet::Epoch::Application => return Ok(Type::Handshake),
7723
7724 // Downgrade the epoch to Initial as the remote peer might
7725 // not be able to decrypt handshake packets yet.
7726 packet::Epoch::Handshake
7727 if self.crypto_ctx[packet::Epoch::Initial].has_keys() =>
7728 return Ok(Type::Initial),
7729
7730 _ => (),
7731 };
7732 }
7733
7734 return Ok(Type::from_epoch(epoch));
7735 }
7736
7737 for &epoch in packet::Epoch::epochs(
7738 packet::Epoch::Initial..=packet::Epoch::Application,
7739 ) {
7740 let crypto_ctx = &self.crypto_ctx[epoch];
7741 let pkt_space = &self.pkt_num_spaces[epoch];
7742
7743 // Only send packets in a space when we have the send keys for it.
7744 if crypto_ctx.crypto_seal.is_none() {
7745 continue;
7746 }
7747
7748 // We are ready to send data for this packet number space.
7749 if crypto_ctx.data_available() || pkt_space.ready() {
7750 return Ok(Type::from_epoch(epoch));
7751 }
7752
7753 // There are lost frames in this packet number space.
7754 for (_, p) in self.paths.iter() {
7755 if p.recovery.has_lost_frames(epoch) {
7756 return Ok(Type::from_epoch(epoch));
7757 }
7758
7759 // We need to send PTO probe packets.
7760 if p.recovery.loss_probes(epoch) > 0 {
7761 return Ok(Type::from_epoch(epoch));
7762 }
7763 }
7764 }
7765
7766 // If there are flushable, almost full or blocked streams, use the
7767 // Application epoch.
7768 let send_path = self.paths.get(send_pid)?;
7769 if (self.is_established() || self.is_in_early_data()) &&
7770 (self.should_send_handshake_done() ||
7771 self.flow_control.should_update_max_data() ||
7772 self.should_send_max_data ||
7773 self.blocked_limit.is_some() ||
7774 self.dgram_send_queue.has_pending() ||
7775 self.local_error
7776 .as_ref()
7777 .is_some_and(|conn_err| conn_err.is_app) ||
7778 self.streams.should_update_max_streams_bidi() ||
7779 self.streams.should_update_max_streams_uni() ||
7780 self.streams.has_flushable() ||
7781 self.streams.has_almost_full() ||
7782 self.streams.has_blocked() ||
7783 self.streams.has_reset() ||
7784 self.streams.has_stopped() ||
7785 self.ids.has_new_scids() ||
7786 self.ids.has_retire_dcids() ||
7787 send_path
7788 .pmtud
7789 .as_ref()
7790 .is_some_and(|pmtud| pmtud.should_probe()) ||
7791 send_path.needs_ack_eliciting ||
7792 send_path.probing_required())
7793 {
7794 // Only clients can send 0-RTT packets.
7795 if !self.is_server && self.is_in_early_data() {
7796 return Ok(Type::ZeroRTT);
7797 }
7798
7799 return Ok(Type::Short);
7800 }
7801
7802 Err(Error::Done)
7803 }
7804
7805 /// Returns the mutable stream with the given ID if it exists, or creates
7806 /// a new one otherwise.
7807 fn get_or_create_stream(
7808 &mut self, id: u64, local: bool,
7809 ) -> Result<&mut stream::Stream<F>> {
7810 self.streams.get_or_create(
7811 id,
7812 &self.local_transport_params,
7813 &self.peer_transport_params,
7814 local,
7815 self.is_server,
7816 )
7817 }
7818
7819 /// Processes an incoming frame.
7820 fn process_frame(
7821 &mut self, frame: frame::Frame, hdr: &Header, recv_path_id: usize,
7822 epoch: packet::Epoch, now: Instant,
7823 ) -> Result<()> {
7824 trace!("{} rx frm {:?}", self.trace_id, frame);
7825
7826 match frame {
7827 frame::Frame::Padding { .. } => (),
7828
7829 frame::Frame::Ping { .. } => (),
7830
7831 frame::Frame::ACK {
7832 ranges, ack_delay, ..
7833 } => {
7834 let ack_delay = ack_delay
7835 .checked_mul(2_u64.pow(
7836 self.peer_transport_params.ack_delay_exponent as u32,
7837 ))
7838 .ok_or(Error::InvalidFrame)?;
7839
7840 if epoch == packet::Epoch::Handshake ||
7841 (epoch == packet::Epoch::Application &&
7842 self.is_established())
7843 {
7844 self.peer_verified_initial_address = true;
7845 }
7846
7847 let handshake_status = self.handshake_status();
7848
7849 let is_app_limited = self.delivery_rate_check_if_app_limited();
7850
7851 let largest_acked = ranges.last().expect(
7852 "ACK frames should always have at least one ack range",
7853 );
7854
7855 for (_, p) in self.paths.iter_mut() {
7856 if self.pkt_num_spaces[epoch]
7857 .largest_tx_pkt_num
7858 .is_some_and(|largest_sent| largest_sent < largest_acked)
7859 {
7860 // https://www.rfc-editor.org/rfc/rfc9000#section-13.1
7861 // An endpoint SHOULD treat receipt of an acknowledgment
7862 // for a packet it did not send as
7863 // a connection error of type PROTOCOL_VIOLATION
7864 return Err(Error::InvalidAckRange);
7865 }
7866
7867 if is_app_limited {
7868 p.recovery.delivery_rate_update_app_limited(true);
7869 }
7870
7871 let OnAckReceivedOutcome {
7872 lost_packets,
7873 lost_bytes,
7874 acked_bytes,
7875 spurious_losses,
7876 } = p.recovery.on_ack_received(
7877 &ranges,
7878 ack_delay,
7879 epoch,
7880 handshake_status,
7881 now,
7882 self.pkt_num_manager.skip_pn(),
7883 &self.trace_id,
7884 )?;
7885
7886 let skip_pn = self.pkt_num_manager.skip_pn();
7887 let largest_acked =
7888 p.recovery.get_largest_acked_on_epoch(epoch);
7889
7890 // Consider the skip_pn validated if the peer has sent an ack
7891 // for a larger pkt number.
7892 if let Some((largest_acked, skip_pn)) =
7893 largest_acked.zip(skip_pn)
7894 {
7895 if largest_acked > skip_pn {
7896 self.pkt_num_manager.set_skip_pn(None);
7897 }
7898 }
7899
7900 self.lost_count += lost_packets;
7901 self.lost_bytes += lost_bytes as u64;
7902 self.acked_bytes += acked_bytes as u64;
7903 self.spurious_lost_count += spurious_losses;
7904 }
7905 },
7906
7907 frame::Frame::ResetStream {
7908 stream_id,
7909 error_code,
7910 final_size,
7911 } => {
7912 // Peer can't send on our unidirectional streams.
7913 if !stream::is_bidi(stream_id) &&
7914 stream::is_local(stream_id, self.is_server)
7915 {
7916 return Err(Error::InvalidStreamState(stream_id));
7917 }
7918
7919 let max_rx_data_left = self.max_rx_data() - self.rx_data;
7920
7921 // Get existing stream or create a new one, but if the stream
7922 // has already been closed and collected, ignore the frame.
7923 //
7924 // This can happen if e.g. an ACK frame is lost, and the peer
7925 // retransmits another frame before it realizes that the stream
7926 // is gone.
7927 //
7928 // Note that it makes it impossible to check if the frame is
7929 // illegal, since we have no state, but since we ignore the
7930 // frame, it should be fine.
7931 let stream = match self.get_or_create_stream(stream_id, false) {
7932 Ok(v) => v,
7933
7934 Err(Error::Done) => return Ok(()),
7935
7936 Err(e) => return Err(e),
7937 };
7938
7939 let was_readable = stream.is_readable();
7940 let priority_key = Arc::clone(&stream.priority_key);
7941
7942 let stream::RecvBufResetReturn {
7943 max_data_delta,
7944 consumed_flowcontrol,
7945 } = stream.recv.reset(error_code, final_size)?;
7946
7947 if max_data_delta > max_rx_data_left {
7948 return Err(Error::FlowControl);
7949 }
7950
7951 if !was_readable && stream.is_readable() {
7952 self.streams.insert_readable(&priority_key);
7953 }
7954
7955 self.rx_data += max_data_delta;
7956 // We dropped the receive buffer, return connection level
7957 // flow-control
7958 self.flow_control.add_consumed(consumed_flowcontrol);
7959
7960 self.reset_stream_remote_count =
7961 self.reset_stream_remote_count.saturating_add(1);
7962 },
7963
7964 frame::Frame::StopSending {
7965 stream_id,
7966 error_code,
7967 } => {
7968 // STOP_SENDING on a receive-only stream is a fatal error.
7969 if !stream::is_local(stream_id, self.is_server) &&
7970 !stream::is_bidi(stream_id)
7971 {
7972 return Err(Error::InvalidStreamState(stream_id));
7973 }
7974
7975 // Get existing stream or create a new one, but if the stream
7976 // has already been closed and collected, ignore the frame.
7977 //
7978 // This can happen if e.g. an ACK frame is lost, and the peer
7979 // retransmits another frame before it realizes that the stream
7980 // is gone.
7981 //
7982 // Note that it makes it impossible to check if the frame is
7983 // illegal, since we have no state, but since we ignore the
7984 // frame, it should be fine.
7985 let stream = match self.get_or_create_stream(stream_id, false) {
7986 Ok(v) => v,
7987
7988 Err(Error::Done) => return Ok(()),
7989
7990 Err(e) => return Err(e),
7991 };
7992
7993 let was_writable = stream.is_writable();
7994
7995 let priority_key = Arc::clone(&stream.priority_key);
7996
7997 // Try stopping the stream.
7998 if let Ok((final_size, unsent)) = stream.send.stop(error_code) {
7999 // Claw back some flow control allowance from data that was
8000 // buffered but not actually sent before the stream was
8001 // reset.
8002 //
8003 // Note that `tx_cap` will be updated later on, so no need
8004 // to touch it here.
8005 self.tx_data = self.tx_data.saturating_sub(unsent);
8006
8007 self.tx_buffered =
8008 self.tx_buffered.saturating_sub(unsent as usize);
8009
8010 // These drops in qlog are a bit weird, but the only way to
8011 // ensure that all bytes that are moved from App to Transport
8012 // in stream_do_send are eventually moved from Transport to
8013 // Dropped. Ideally we would add a Transport to Network
8014 // transition also as a way to indicate when bytes were
8015 // transmitted vs dropped without ever being sent.
8016 qlog_with_type!(QLOG_DATA_MV, self.qlog, q, {
8017 let ev_data =
8018 EventData::DataMoved(qlog::events::quic::DataMoved {
8019 stream_id: Some(stream_id),
8020 offset: Some(final_size),
8021 length: Some(unsent),
8022 from: Some(DataRecipient::Transport),
8023 to: Some(DataRecipient::Dropped),
8024 ..Default::default()
8025 });
8026
8027 q.add_event_data_with_instant(ev_data, now).ok();
8028 });
8029
8030 self.streams.insert_reset(stream_id, error_code, final_size);
8031
8032 if !was_writable {
8033 self.streams.insert_writable(&priority_key);
8034 }
8035
8036 self.stopped_stream_remote_count =
8037 self.stopped_stream_remote_count.saturating_add(1);
8038 self.reset_stream_local_count =
8039 self.reset_stream_local_count.saturating_add(1);
8040 }
8041 },
8042
8043 frame::Frame::Crypto { data } => {
8044 if data.max_off() >= MAX_CRYPTO_STREAM_OFFSET {
8045 return Err(Error::CryptoBufferExceeded);
8046 }
8047
8048 // Push the data to the stream so it can be re-ordered.
8049 self.crypto_ctx[epoch].crypto_stream.recv.write(data)?;
8050
8051 // Feed crypto data to the TLS state, if there's data
8052 // available at the expected offset.
8053 let mut crypto_buf = [0; 512];
8054
8055 let level = crypto::Level::from_epoch(epoch);
8056
8057 let stream = &mut self.crypto_ctx[epoch].crypto_stream;
8058
8059 while let Ok((read, _)) = stream.recv.emit(&mut crypto_buf) {
8060 let recv_buf = &crypto_buf[..read];
8061 self.handshake.provide_data(level, recv_buf)?;
8062 }
8063
8064 self.do_handshake(now)?;
8065 },
8066
8067 frame::Frame::CryptoHeader { .. } => unreachable!(),
8068
8069 // TODO: implement stateless retry
8070 frame::Frame::NewToken { .. } =>
8071 if self.is_server {
8072 return Err(Error::InvalidPacket);
8073 },
8074
8075 frame::Frame::Stream { stream_id, data } => {
8076 // Peer can't send on our unidirectional streams.
8077 if !stream::is_bidi(stream_id) &&
8078 stream::is_local(stream_id, self.is_server)
8079 {
8080 return Err(Error::InvalidStreamState(stream_id));
8081 }
8082
8083 let max_rx_data_left = self.max_rx_data() - self.rx_data;
8084
8085 // Get existing stream or create a new one, but if the stream
8086 // has already been closed and collected, ignore the frame.
8087 //
8088 // This can happen if e.g. an ACK frame is lost, and the peer
8089 // retransmits another frame before it realizes that the stream
8090 // is gone.
8091 //
8092 // Note that it makes it impossible to check if the frame is
8093 // illegal, since we have no state, but since we ignore the
8094 // frame, it should be fine.
8095 let stream = match self.get_or_create_stream(stream_id, false) {
8096 Ok(v) => v,
8097
8098 Err(Error::Done) => return Ok(()),
8099
8100 Err(e) => return Err(e),
8101 };
8102
8103 // Check for the connection-level flow control limit.
8104 let max_off_delta =
8105 data.max_off().saturating_sub(stream.recv.max_off());
8106
8107 if max_off_delta > max_rx_data_left {
8108 return Err(Error::FlowControl);
8109 }
8110
8111 let was_readable = stream.is_readable();
8112 let priority_key = Arc::clone(&stream.priority_key);
8113
8114 let was_draining = stream.recv.is_draining();
8115
8116 stream.recv.write(data)?;
8117
8118 if !was_readable && stream.is_readable() {
8119 self.streams.insert_readable(&priority_key);
8120 }
8121
8122 self.rx_data += max_off_delta;
8123
8124 if was_draining {
8125 // When a stream is in draining state it will not queue
8126 // incoming data for the application to read, so consider
8127 // the received data as consumed, which might trigger a flow
8128 // control update.
8129 self.flow_control.add_consumed(max_off_delta);
8130 }
8131 },
8132
8133 frame::Frame::StreamHeader { .. } => unreachable!(),
8134
8135 frame::Frame::MaxData { max } => {
8136 self.max_tx_data = cmp::max(self.max_tx_data, max);
8137 },
8138
8139 frame::Frame::MaxStreamData { stream_id, max } => {
8140 // Peer can't receive on its own unidirectional streams.
8141 if !stream::is_bidi(stream_id) &&
8142 !stream::is_local(stream_id, self.is_server)
8143 {
8144 return Err(Error::InvalidStreamState(stream_id));
8145 }
8146
8147 // Get existing stream or create a new one, but if the stream
8148 // has already been closed and collected, ignore the frame.
8149 //
8150 // This can happen if e.g. an ACK frame is lost, and the peer
8151 // retransmits another frame before it realizes that the stream
8152 // is gone.
8153 //
8154 // Note that it makes it impossible to check if the frame is
8155 // illegal, since we have no state, but since we ignore the
8156 // frame, it should be fine.
8157 let stream = match self.get_or_create_stream(stream_id, false) {
8158 Ok(v) => v,
8159
8160 Err(Error::Done) => return Ok(()),
8161
8162 Err(e) => return Err(e),
8163 };
8164
8165 let was_flushable = stream.is_flushable();
8166
8167 stream.send.update_max_data(max);
8168
8169 let writable = stream.is_writable();
8170
8171 let priority_key = Arc::clone(&stream.priority_key);
8172
8173 // If the stream is now flushable push it to the flushable queue,
8174 // but only if it wasn't already queued.
8175 if stream.is_flushable() && !was_flushable {
8176 let priority_key = Arc::clone(&stream.priority_key);
8177 self.streams.insert_flushable(&priority_key);
8178 }
8179
8180 if writable {
8181 self.streams.insert_writable(&priority_key);
8182 }
8183 },
8184
8185 frame::Frame::MaxStreamsBidi { max } => {
8186 if max > MAX_STREAM_ID {
8187 return Err(Error::InvalidFrame);
8188 }
8189
8190 self.streams.update_peer_max_streams_bidi(max);
8191 },
8192
8193 frame::Frame::MaxStreamsUni { max } => {
8194 if max > MAX_STREAM_ID {
8195 return Err(Error::InvalidFrame);
8196 }
8197
8198 self.streams.update_peer_max_streams_uni(max);
8199 },
8200
8201 frame::Frame::DataBlocked { .. } => {
8202 self.data_blocked_recv_count =
8203 self.data_blocked_recv_count.saturating_add(1);
8204 },
8205
8206 frame::Frame::StreamDataBlocked { .. } => {
8207 self.stream_data_blocked_recv_count =
8208 self.stream_data_blocked_recv_count.saturating_add(1);
8209 },
8210
8211 frame::Frame::StreamsBlockedBidi { limit } => {
8212 if limit > MAX_STREAM_ID {
8213 return Err(Error::InvalidFrame);
8214 }
8215 },
8216
8217 frame::Frame::StreamsBlockedUni { limit } => {
8218 if limit > MAX_STREAM_ID {
8219 return Err(Error::InvalidFrame);
8220 }
8221 },
8222
8223 frame::Frame::NewConnectionId {
8224 seq_num,
8225 retire_prior_to,
8226 conn_id,
8227 reset_token,
8228 } => {
8229 if self.ids.zero_length_dcid() {
8230 return Err(Error::InvalidState);
8231 }
8232
8233 let mut retired_path_ids = SmallVec::new();
8234
8235 // Retire pending path IDs before propagating the error code to
8236 // make sure retired connection IDs are not in use anymore.
8237 let new_dcid_res = self.ids.new_dcid(
8238 conn_id.into(),
8239 seq_num,
8240 u128::from_be_bytes(reset_token),
8241 retire_prior_to,
8242 &mut retired_path_ids,
8243 );
8244
8245 for (dcid_seq, pid) in retired_path_ids {
8246 let path = self.paths.get_mut(pid)?;
8247
8248 // Maybe the path already switched to another DCID.
8249 if path.active_dcid_seq != Some(dcid_seq) {
8250 continue;
8251 }
8252
8253 if let Some(new_dcid_seq) =
8254 self.ids.lowest_available_dcid_seq()
8255 {
8256 path.active_dcid_seq = Some(new_dcid_seq);
8257
8258 self.ids.link_dcid_to_path_id(new_dcid_seq, pid)?;
8259
8260 trace!(
8261 "{} path ID {} changed DCID: old seq num {} new seq num {}",
8262 self.trace_id, pid, dcid_seq, new_dcid_seq,
8263 );
8264 } else {
8265 // We cannot use this path anymore for now.
8266 path.active_dcid_seq = None;
8267
8268 trace!(
8269 "{} path ID {} cannot be used; DCID seq num {} has been retired",
8270 self.trace_id, pid, dcid_seq,
8271 );
8272 }
8273 }
8274
8275 // Propagate error (if any) now...
8276 new_dcid_res?;
8277 },
8278
8279 frame::Frame::RetireConnectionId { seq_num } => {
8280 if self.ids.zero_length_scid() {
8281 return Err(Error::InvalidState);
8282 }
8283
8284 if let Some(pid) = self.ids.retire_scid(seq_num, &hdr.dcid)? {
8285 let path = self.paths.get_mut(pid)?;
8286
8287 // Maybe we already linked a new SCID to that path.
8288 if path.active_scid_seq == Some(seq_num) {
8289 // XXX: We do not remove unused paths now, we instead
8290 // wait until we need to maintain more paths than the
8291 // host is willing to.
8292 path.active_scid_seq = None;
8293 }
8294 }
8295 },
8296
8297 frame::Frame::PathChallenge { data } => {
8298 self.path_challenge_rx_count += 1;
8299
8300 self.paths
8301 .get_mut(recv_path_id)?
8302 .on_challenge_received(data);
8303 },
8304
8305 frame::Frame::PathResponse { data } => {
8306 self.paths.on_response_received(data)?;
8307 },
8308
8309 frame::Frame::ConnectionClose {
8310 error_code, reason, ..
8311 } => {
8312 self.peer_error = Some(ConnectionError {
8313 is_app: false,
8314 error_code,
8315 reason,
8316 });
8317
8318 let path = self.paths.get_active()?;
8319 self.draining_timer = Some(now + (path.recovery.pto() * 3));
8320 },
8321
8322 frame::Frame::ApplicationClose { error_code, reason } => {
8323 self.peer_error = Some(ConnectionError {
8324 is_app: true,
8325 error_code,
8326 reason,
8327 });
8328
8329 let path = self.paths.get_active()?;
8330 self.draining_timer = Some(now + (path.recovery.pto() * 3));
8331 },
8332
8333 frame::Frame::HandshakeDone => {
8334 if self.is_server {
8335 return Err(Error::InvalidPacket);
8336 }
8337
8338 self.peer_verified_initial_address = true;
8339
8340 self.handshake_confirmed = true;
8341
8342 // Once the handshake is confirmed, we can drop Handshake keys.
8343 self.drop_epoch_state(packet::Epoch::Handshake, now);
8344 },
8345
8346 frame::Frame::Datagram { data } => {
8347 // Close the connection if DATAGRAMs are not enabled.
8348 // quiche always advertises support for 64K sized DATAGRAM
8349 // frames, as recommended by the standard, so we don't need a
8350 // size check.
8351 if !self.dgram_enabled() {
8352 return Err(Error::InvalidState);
8353 }
8354
8355 // If recv queue is full, discard oldest
8356 if self.dgram_recv_queue.is_full() {
8357 self.dgram_recv_queue.pop();
8358 }
8359
8360 self.dgram_recv_queue.push(data)?;
8361
8362 self.dgram_recv_count = self.dgram_recv_count.saturating_add(1);
8363
8364 let path = self.paths.get_mut(recv_path_id)?;
8365 path.dgram_recv_count = path.dgram_recv_count.saturating_add(1);
8366 },
8367
8368 frame::Frame::DatagramHeader { .. } => unreachable!(),
8369 }
8370
8371 Ok(())
8372 }
8373
8374 /// Drops the keys and recovery state for the given epoch.
8375 fn drop_epoch_state(&mut self, epoch: packet::Epoch, now: Instant) {
8376 let crypto_ctx = &mut self.crypto_ctx[epoch];
8377 if crypto_ctx.crypto_open.is_none() {
8378 return;
8379 }
8380 crypto_ctx.clear();
8381 self.pkt_num_spaces[epoch].clear();
8382
8383 let handshake_status = self.handshake_status();
8384 for (_, p) in self.paths.iter_mut() {
8385 p.recovery
8386 .on_pkt_num_space_discarded(epoch, handshake_status, now);
8387 }
8388
8389 trace!("{} dropped epoch {} state", self.trace_id, epoch);
8390 }
8391
8392 /// Returns the connection level flow control limit.
8393 fn max_rx_data(&self) -> u64 {
8394 self.flow_control.max_data()
8395 }
8396
8397 /// Returns true if the HANDSHAKE_DONE frame needs to be sent.
8398 fn should_send_handshake_done(&self) -> bool {
8399 self.is_established() && !self.handshake_done_sent && self.is_server
8400 }
8401
8402 /// Returns the idle timeout value.
8403 ///
8404 /// `None` is returned if both end-points disabled the idle timeout.
8405 fn idle_timeout(&self) -> Option<Duration> {
8406 // If the transport parameter is set to 0, then the respective endpoint
8407 // decided to disable the idle timeout. If both are disabled we should
8408 // not set any timeout.
8409 if self.local_transport_params.max_idle_timeout == 0 &&
8410 self.peer_transport_params.max_idle_timeout == 0
8411 {
8412 return None;
8413 }
8414
8415 // If the local endpoint or the peer disabled the idle timeout, use the
8416 // other peer's value, otherwise use the minimum of the two values.
8417 let idle_timeout = if self.local_transport_params.max_idle_timeout == 0 {
8418 self.peer_transport_params.max_idle_timeout
8419 } else if self.peer_transport_params.max_idle_timeout == 0 {
8420 self.local_transport_params.max_idle_timeout
8421 } else {
8422 cmp::min(
8423 self.local_transport_params.max_idle_timeout,
8424 self.peer_transport_params.max_idle_timeout,
8425 )
8426 };
8427
8428 let path_pto = match self.paths.get_active() {
8429 Ok(p) => p.recovery.pto(),
8430 Err(_) => Duration::ZERO,
8431 };
8432
8433 let idle_timeout = Duration::from_millis(idle_timeout);
8434 let idle_timeout = cmp::max(idle_timeout, 3 * path_pto);
8435
8436 Some(idle_timeout)
8437 }
8438
8439 /// Returns the connection's handshake status for use in loss recovery.
8440 fn handshake_status(&self) -> recovery::HandshakeStatus {
8441 recovery::HandshakeStatus {
8442 has_handshake_keys: self.crypto_ctx[packet::Epoch::Handshake]
8443 .has_keys(),
8444
8445 peer_verified_address: self.peer_verified_initial_address,
8446
8447 completed: self.is_established(),
8448 }
8449 }
8450
8451 /// Updates send capacity.
8452 fn update_tx_cap(&mut self) {
8453 let cwin_available = match self.paths.get_active() {
8454 Ok(p) => p.recovery.cwnd_available() as u64,
8455 Err(_) => 0,
8456 };
8457
8458 let cap =
8459 cmp::min(cwin_available, self.max_tx_data - self.tx_data) as usize;
8460 self.tx_cap = (cap as f64 * self.tx_cap_factor).ceil() as usize;
8461 }
8462
8463 fn delivery_rate_check_if_app_limited(&self) -> bool {
8464 // Enter the app-limited phase of delivery rate when these conditions
8465 // are met:
8466 //
8467 // - The remaining capacity is higher than available bytes in cwnd (there
8468 // is more room to send).
8469 // - New data since the last send() is smaller than available bytes in
8470 // cwnd (we queued less than what we can send).
8471 // - There is room to send more data in cwnd.
8472 //
8473 // In application-limited phases the transmission rate is limited by the
8474 // application rather than the congestion control algorithm.
8475 //
8476 // Note that this is equivalent to CheckIfApplicationLimited() from the
8477 // delivery rate draft. This is also separate from `recovery.app_limited`
8478 // and only applies to delivery rate calculation.
8479 let cwin_available = self
8480 .paths
8481 .iter()
8482 .filter(|&(_, p)| p.active())
8483 .map(|(_, p)| p.recovery.cwnd_available())
8484 .sum();
8485
8486 ((self.tx_buffered + self.dgram_send_queue_byte_size()) < cwin_available) &&
8487 (self.tx_data.saturating_sub(self.last_tx_data)) <
8488 cwin_available as u64 &&
8489 cwin_available > 0
8490 }
8491
8492 fn check_tx_buffered_invariant(&mut self) {
8493 // tx_buffered should track bytes queued in the stream buffers
8494 // and unacked retransmitable bytes in the network.
8495 // If tx_buffered > 0 mark the tx_buffered_state if there are no
8496 // flushable streams and there no inflight bytes.
8497 //
8498 // It is normal to have tx_buffered == 0 while there are inflight bytes
8499 // since not QUIC frames are retransmittable; inflight tracks all bytes
8500 // on the network which are subject to congestion control.
8501 if self.tx_buffered > 0 &&
8502 !self.streams.has_flushable() &&
8503 !self
8504 .paths
8505 .iter()
8506 .any(|(_, p)| p.recovery.bytes_in_flight() > 0)
8507 {
8508 self.tx_buffered_state = TxBufferTrackingState::Inconsistent;
8509 }
8510 }
8511
8512 fn set_initial_dcid(
8513 &mut self, cid: ConnectionId<'static>, reset_token: Option<u128>,
8514 path_id: usize,
8515 ) -> Result<()> {
8516 self.ids.set_initial_dcid(cid, reset_token, Some(path_id));
8517 self.paths.get_mut(path_id)?.active_dcid_seq = Some(0);
8518
8519 Ok(())
8520 }
8521
8522 /// Selects the path that the incoming packet belongs to, or creates a new
8523 /// one if no existing path matches.
8524 fn get_or_create_recv_path_id(
8525 &mut self, recv_pid: Option<usize>, dcid: &ConnectionId, buf_len: usize,
8526 info: &RecvInfo,
8527 ) -> Result<usize> {
8528 let ids = &mut self.ids;
8529
8530 let (in_scid_seq, mut in_scid_pid) =
8531 ids.find_scid_seq(dcid).ok_or(Error::InvalidState)?;
8532
8533 if let Some(recv_pid) = recv_pid {
8534 // If the path observes a change of SCID used, note it.
8535 let recv_path = self.paths.get_mut(recv_pid)?;
8536
8537 let cid_entry =
8538 recv_path.active_scid_seq.and_then(|v| ids.get_scid(v).ok());
8539
8540 if cid_entry.map(|e| &e.cid) != Some(dcid) {
8541 let incoming_cid_entry = ids.get_scid(in_scid_seq)?;
8542
8543 let prev_recv_pid =
8544 incoming_cid_entry.path_id.unwrap_or(recv_pid);
8545
8546 if prev_recv_pid != recv_pid {
8547 trace!(
8548 "{} peer reused CID {:?} from path {} on path {}",
8549 self.trace_id,
8550 dcid,
8551 prev_recv_pid,
8552 recv_pid
8553 );
8554
8555 // TODO: reset congestion control.
8556 }
8557
8558 trace!(
8559 "{} path ID {} now see SCID with seq num {}",
8560 self.trace_id,
8561 recv_pid,
8562 in_scid_seq
8563 );
8564
8565 recv_path.active_scid_seq = Some(in_scid_seq);
8566 ids.link_scid_to_path_id(in_scid_seq, recv_pid)?;
8567 }
8568
8569 return Ok(recv_pid);
8570 }
8571
8572 // This is a new 4-tuple. See if the CID has not been assigned on
8573 // another path.
8574
8575 // Ignore this step if are using zero-length SCID.
8576 if ids.zero_length_scid() {
8577 in_scid_pid = None;
8578 }
8579
8580 if let Some(in_scid_pid) = in_scid_pid {
8581 // This CID has been used by another path. If we have the
8582 // room to do so, create a new `Path` structure holding this
8583 // new 4-tuple. Otherwise, drop the packet.
8584 let old_path = self.paths.get_mut(in_scid_pid)?;
8585 let old_local_addr = old_path.local_addr();
8586 let old_peer_addr = old_path.peer_addr();
8587
8588 trace!(
8589 "{} reused CID seq {} of ({},{}) (path {}) on ({},{})",
8590 self.trace_id,
8591 in_scid_seq,
8592 old_local_addr,
8593 old_peer_addr,
8594 in_scid_pid,
8595 info.to,
8596 info.from
8597 );
8598
8599 // Notify the application.
8600 self.paths.notify_event(PathEvent::ReusedSourceConnectionId(
8601 in_scid_seq,
8602 (old_local_addr, old_peer_addr),
8603 (info.to, info.from),
8604 ));
8605 }
8606
8607 // This is a new path using an unassigned CID; create it!
8608 let mut path = path::Path::new(
8609 info.to,
8610 info.from,
8611 &self.recovery_config,
8612 self.path_challenge_recv_max_queue_len,
8613 false,
8614 None,
8615 );
8616
8617 path.max_send_bytes = buf_len * self.max_amplification_factor;
8618 path.active_scid_seq = Some(in_scid_seq);
8619
8620 // Automatically probes the new path.
8621 path.request_validation();
8622
8623 let pid = self.paths.insert_path(path, self.is_server)?;
8624
8625 // Do not record path reuse.
8626 if in_scid_pid.is_none() {
8627 ids.link_scid_to_path_id(in_scid_seq, pid)?;
8628 }
8629
8630 Ok(pid)
8631 }
8632
8633 /// Selects the path on which the next packet must be sent.
8634 fn get_send_path_id(
8635 &self, from: Option<SocketAddr>, to: Option<SocketAddr>,
8636 ) -> Result<usize> {
8637 // A probing packet must be sent, but only if the connection is fully
8638 // established.
8639 if self.is_established() {
8640 let mut probing = self
8641 .paths
8642 .iter()
8643 .filter(|(_, p)| from.is_none() || Some(p.local_addr()) == from)
8644 .filter(|(_, p)| to.is_none() || Some(p.peer_addr()) == to)
8645 .filter(|(_, p)| p.active_dcid_seq.is_some())
8646 .filter(|(_, p)| p.probing_required())
8647 .map(|(pid, _)| pid);
8648
8649 if let Some(pid) = probing.next() {
8650 return Ok(pid);
8651 }
8652 }
8653
8654 if let Some((pid, p)) = self.paths.get_active_with_pid() {
8655 if from.is_some() && Some(p.local_addr()) != from {
8656 return Err(Error::Done);
8657 }
8658
8659 if to.is_some() && Some(p.peer_addr()) != to {
8660 return Err(Error::Done);
8661 }
8662
8663 return Ok(pid);
8664 };
8665
8666 Err(Error::InvalidState)
8667 }
8668
8669 /// Sets the path with identifier 'path_id' to be active.
8670 fn set_active_path(&mut self, path_id: usize, now: Instant) -> Result<()> {
8671 if let Ok(old_active_path) = self.paths.get_active_mut() {
8672 for &e in packet::Epoch::epochs(
8673 packet::Epoch::Initial..=packet::Epoch::Application,
8674 ) {
8675 let (lost_packets, lost_bytes) = old_active_path
8676 .recovery
8677 .on_path_change(e, now, &self.trace_id);
8678
8679 self.lost_count += lost_packets;
8680 self.lost_bytes += lost_bytes as u64;
8681 }
8682 }
8683
8684 self.paths.set_active_path(path_id)
8685 }
8686
8687 /// Handles potential connection migration.
8688 fn on_peer_migrated(
8689 &mut self, new_pid: usize, disable_dcid_reuse: bool, now: Instant,
8690 ) -> Result<()> {
8691 let active_path_id = self.paths.get_active_path_id()?;
8692
8693 if active_path_id == new_pid {
8694 return Ok(());
8695 }
8696
8697 self.set_active_path(new_pid, now)?;
8698
8699 let no_spare_dcid =
8700 self.paths.get_mut(new_pid)?.active_dcid_seq.is_none();
8701
8702 if no_spare_dcid && !disable_dcid_reuse {
8703 self.paths.get_mut(new_pid)?.active_dcid_seq =
8704 self.paths.get_mut(active_path_id)?.active_dcid_seq;
8705 }
8706
8707 Ok(())
8708 }
8709
8710 /// Creates a new client-side path.
8711 fn create_path_on_client(
8712 &mut self, local_addr: SocketAddr, peer_addr: SocketAddr,
8713 ) -> Result<usize> {
8714 if self.is_server {
8715 return Err(Error::InvalidState);
8716 }
8717
8718 // If we use zero-length SCID and go over our local active CID limit,
8719 // the `insert_path()` call will raise an error.
8720 if !self.ids.zero_length_scid() && self.ids.available_scids() == 0 {
8721 return Err(Error::OutOfIdentifiers);
8722 }
8723
8724 // Do we have a spare DCID? If we are using zero-length DCID, just use
8725 // the default having sequence 0 (note that if we exceed our local CID
8726 // limit, the `insert_path()` call will raise an error.
8727 let dcid_seq = if self.ids.zero_length_dcid() {
8728 0
8729 } else {
8730 self.ids
8731 .lowest_available_dcid_seq()
8732 .ok_or(Error::OutOfIdentifiers)?
8733 };
8734
8735 let mut path = path::Path::new(
8736 local_addr,
8737 peer_addr,
8738 &self.recovery_config,
8739 self.path_challenge_recv_max_queue_len,
8740 false,
8741 None,
8742 );
8743 path.active_dcid_seq = Some(dcid_seq);
8744
8745 let pid = self
8746 .paths
8747 .insert_path(path, false)
8748 .map_err(|_| Error::OutOfIdentifiers)?;
8749 self.ids.link_dcid_to_path_id(dcid_seq, pid)?;
8750
8751 Ok(pid)
8752 }
8753
8754 // Marks the connection as closed and does any related tidyup.
8755 fn mark_closed(&mut self) {
8756 #[cfg(feature = "qlog")]
8757 {
8758 let cc = match (self.is_established(), self.timed_out, &self.peer_error, &self.local_error) {
8759 (false, _, _, _) => qlog::events::connectivity::ConnectionClosed {
8760 owner: Some(TransportOwner::Local),
8761 connection_code: None,
8762 application_code: None,
8763 internal_code: None,
8764 reason: Some("Failed to establish connection".to_string()),
8765 trigger: Some(qlog::events::connectivity::ConnectionClosedTrigger::HandshakeTimeout)
8766 },
8767
8768 (true, true, _, _) => qlog::events::connectivity::ConnectionClosed {
8769 owner: Some(TransportOwner::Local),
8770 connection_code: None,
8771 application_code: None,
8772 internal_code: None,
8773 reason: Some("Idle timeout".to_string()),
8774 trigger: Some(qlog::events::connectivity::ConnectionClosedTrigger::IdleTimeout)
8775 },
8776
8777 (true, false, Some(peer_error), None) => {
8778 let (connection_code, application_code, trigger) = if peer_error.is_app {
8779 (None, Some(qlog::events::ApplicationErrorCode::Value(peer_error.error_code)), None)
8780 } else {
8781 let trigger = if peer_error.error_code == WireErrorCode::NoError as u64 {
8782 Some(qlog::events::connectivity::ConnectionClosedTrigger::Clean)
8783 } else {
8784 Some(qlog::events::connectivity::ConnectionClosedTrigger::Error)
8785 };
8786
8787 (Some(qlog::events::ConnectionErrorCode::Value(peer_error.error_code)), None, trigger)
8788 };
8789
8790 qlog::events::connectivity::ConnectionClosed {
8791 owner: Some(TransportOwner::Remote),
8792 connection_code,
8793 application_code,
8794 internal_code: None,
8795 reason: Some(String::from_utf8_lossy(&peer_error.reason).to_string()),
8796 trigger,
8797 }
8798 },
8799
8800 (true, false, None, Some(local_error)) => {
8801 let (connection_code, application_code, trigger) = if local_error.is_app {
8802 (None, Some(qlog::events::ApplicationErrorCode::Value(local_error.error_code)), None)
8803 } else {
8804 let trigger = if local_error.error_code == WireErrorCode::NoError as u64 {
8805 Some(qlog::events::connectivity::ConnectionClosedTrigger::Clean)
8806 } else {
8807 Some(qlog::events::connectivity::ConnectionClosedTrigger::Error)
8808 };
8809
8810 (Some(qlog::events::ConnectionErrorCode::Value(local_error.error_code)), None, trigger)
8811 };
8812
8813 qlog::events::connectivity::ConnectionClosed {
8814 owner: Some(TransportOwner::Local),
8815 connection_code,
8816 application_code,
8817 internal_code: None,
8818 reason: Some(String::from_utf8_lossy(&local_error.reason).to_string()),
8819 trigger,
8820 }
8821 },
8822
8823 _ => qlog::events::connectivity::ConnectionClosed {
8824 owner: None,
8825 connection_code: None,
8826 application_code: None,
8827 internal_code: None,
8828 reason: None,
8829 trigger: None,
8830 },
8831 };
8832
8833 qlog_with_type!(QLOG_CONNECTION_CLOSED, self.qlog, q, {
8834 let ev_data = EventData::ConnectionClosed(cc);
8835
8836 q.add_event_data_now(ev_data).ok();
8837 });
8838 self.qlog.streamer = None;
8839 }
8840 self.closed = true;
8841 }
8842}
8843
8844#[cfg(feature = "boringssl-boring-crate")]
8845impl<F: BufFactory> AsMut<boring::ssl::SslRef> for Connection<F> {
8846 fn as_mut(&mut self) -> &mut boring::ssl::SslRef {
8847 self.handshake.ssl_mut()
8848 }
8849}
8850
8851/// Maps an `Error` to `Error::Done`, or itself.
8852///
8853/// When a received packet that hasn't yet been authenticated triggers a failure
8854/// it should, in most cases, be ignored, instead of raising a connection error,
8855/// to avoid potential man-in-the-middle and man-on-the-side attacks.
8856///
8857/// However, if no other packet was previously received, the connection should
8858/// indeed be closed as the received packet might just be network background
8859/// noise, and it shouldn't keep resources occupied indefinitely.
8860///
8861/// This function maps an error to `Error::Done` to ignore a packet failure
8862/// without aborting the connection, except when no other packet was previously
8863/// received, in which case the error itself is returned, but only on the
8864/// server-side as the client will already have armed the idle timer.
8865///
8866/// This must only be used for errors preceding packet authentication. Failures
8867/// happening after a packet has been authenticated should still cause the
8868/// connection to be aborted.
8869fn drop_pkt_on_err(
8870 e: Error, recv_count: usize, is_server: bool, trace_id: &str,
8871) -> Error {
8872 // On the server, if no other packet has been successfully processed, abort
8873 // the connection to avoid keeping the connection open when only junk is
8874 // received.
8875 if is_server && recv_count == 0 {
8876 return e;
8877 }
8878
8879 trace!("{trace_id} dropped invalid packet");
8880
8881 // Ignore other invalid packets that haven't been authenticated to prevent
8882 // man-in-the-middle and man-on-the-side attacks.
8883 Error::Done
8884}
8885
8886struct AddrTupleFmt(SocketAddr, SocketAddr);
8887
8888impl std::fmt::Display for AddrTupleFmt {
8889 fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
8890 let AddrTupleFmt(src, dst) = &self;
8891
8892 if src.ip().is_unspecified() || dst.ip().is_unspecified() {
8893 return Ok(());
8894 }
8895
8896 f.write_fmt(format_args!("src:{src} dst:{dst}"))
8897 }
8898}
8899
8900/// Statistics about the connection.
8901///
8902/// A connection's statistics can be collected using the [`stats()`] method.
8903///
8904/// [`stats()`]: struct.Connection.html#method.stats
8905#[derive(Clone, Default)]
8906pub struct Stats {
8907 /// The number of QUIC packets received.
8908 pub recv: usize,
8909
8910 /// The number of QUIC packets sent.
8911 pub sent: usize,
8912
8913 /// The number of QUIC packets that were lost.
8914 pub lost: usize,
8915
8916 /// The number of QUIC packets that were marked as lost but later acked.
8917 pub spurious_lost: usize,
8918
8919 /// The number of sent QUIC packets with retransmitted data.
8920 pub retrans: usize,
8921
8922 /// The number of sent bytes.
8923 pub sent_bytes: u64,
8924
8925 /// The number of received bytes.
8926 pub recv_bytes: u64,
8927
8928 /// The number of bytes sent acked.
8929 pub acked_bytes: u64,
8930
8931 /// The number of bytes sent lost.
8932 pub lost_bytes: u64,
8933
8934 /// The number of stream bytes retransmitted.
8935 pub stream_retrans_bytes: u64,
8936
8937 /// The number of DATAGRAM frames received.
8938 pub dgram_recv: usize,
8939
8940 /// The number of DATAGRAM frames sent.
8941 pub dgram_sent: usize,
8942
8943 /// The number of known paths for the connection.
8944 pub paths_count: usize,
8945
8946 /// The number of streams reset by local.
8947 pub reset_stream_count_local: u64,
8948
8949 /// The number of streams stopped by local.
8950 pub stopped_stream_count_local: u64,
8951
8952 /// The number of streams reset by remote.
8953 pub reset_stream_count_remote: u64,
8954
8955 /// The number of streams stopped by remote.
8956 pub stopped_stream_count_remote: u64,
8957
8958 /// The number of DATA_BLOCKED frames sent due to hitting the connection
8959 /// flow control limit.
8960 pub data_blocked_sent_count: u64,
8961
8962 /// The number of STREAM_DATA_BLOCKED frames sent due to a stream hitting
8963 /// the stream flow control limit.
8964 pub stream_data_blocked_sent_count: u64,
8965
8966 /// The number of DATA_BLOCKED frames received from the remote.
8967 pub data_blocked_recv_count: u64,
8968
8969 /// The number of STREAM_DATA_BLOCKED frames received from the remote.
8970 pub stream_data_blocked_recv_count: u64,
8971
8972 /// The total number of PATH_CHALLENGE frames that were received.
8973 pub path_challenge_rx_count: u64,
8974
8975 /// Total duration during which this side of the connection was
8976 /// actively sending bytes or waiting for those bytes to be acked.
8977 pub bytes_in_flight_duration: Duration,
8978
8979 /// Health state of the connection's tx_buffered.
8980 pub tx_buffered_state: TxBufferTrackingState,
8981}
8982
8983impl std::fmt::Debug for Stats {
8984 #[inline]
8985 fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
8986 write!(
8987 f,
8988 "recv={} sent={} lost={} retrans={}",
8989 self.recv, self.sent, self.lost, self.retrans,
8990 )?;
8991
8992 write!(
8993 f,
8994 " sent_bytes={} recv_bytes={} lost_bytes={}",
8995 self.sent_bytes, self.recv_bytes, self.lost_bytes,
8996 )?;
8997
8998 Ok(())
8999 }
9000}
9001
9002#[doc(hidden)]
9003#[cfg(any(test, feature = "internal"))]
9004pub mod test_utils;
9005
9006#[cfg(test)]
9007mod tests;
9008
9009pub use crate::packet::ConnectionId;
9010pub use crate::packet::Header;
9011pub use crate::packet::Type;
9012
9013pub use crate::path::PathEvent;
9014pub use crate::path::PathStats;
9015pub use crate::path::SocketAddrIter;
9016
9017pub use crate::recovery::BbrBwLoReductionStrategy;
9018pub use crate::recovery::BbrParams;
9019pub use crate::recovery::CongestionControlAlgorithm;
9020pub use crate::recovery::StartupExit;
9021pub use crate::recovery::StartupExitReason;
9022
9023pub use crate::stream::StreamIter;
9024
9025pub use crate::transport_params::TransportParams;
9026pub use crate::transport_params::UnknownTransportParameter;
9027pub use crate::transport_params::UnknownTransportParameterIterator;
9028pub use crate::transport_params::UnknownTransportParameters;
9029
9030pub use crate::range_buf::BufFactory;
9031pub use crate::range_buf::BufSplit;
9032
9033pub use crate::error::ConnectionError;
9034pub use crate::error::Error;
9035pub use crate::error::Result;
9036pub use crate::error::WireErrorCode;
9037
9038mod cid;
9039mod crypto;
9040mod dgram;
9041mod error;
9042#[cfg(feature = "ffi")]
9043mod ffi;
9044mod flowcontrol;
9045mod frame;
9046pub mod h3;
9047mod minmax;
9048mod packet;
9049mod path;
9050mod pmtud;
9051mod rand;
9052mod range_buf;
9053mod ranges;
9054mod recovery;
9055mod stream;
9056mod tls;
9057mod transport_params;