quiche/lib.rs
1// Copyright (C) 2018-2019, Cloudflare, Inc.
2// All rights reserved.
3//
4// Redistribution and use in source and binary forms, with or without
5// modification, are permitted provided that the following conditions are
6// met:
7//
8// * Redistributions of source code must retain the above copyright notice,
9// this list of conditions and the following disclaimer.
10//
11// * Redistributions in binary form must reproduce the above copyright
12// notice, this list of conditions and the following disclaimer in the
13// documentation and/or other materials provided with the distribution.
14//
15// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
16// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
17// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
18// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
19// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
20// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
21// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
22// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
23// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
24// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
25// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
27//! 🥧 Savoury implementation of the QUIC transport protocol and HTTP/3.
28//!
29//! [quiche] is an implementation of the QUIC transport protocol and HTTP/3 as
30//! specified by the [IETF]. It provides a low level API for processing QUIC
31//! packets and handling connection state. The application is responsible for
32//! providing I/O (e.g. sockets handling) as well as an event loop with support
33//! for timers.
34//!
35//! [quiche]: https://github.com/cloudflare/quiche/
36//! [ietf]: https://quicwg.org/
37//!
38//! ## Configuring connections
39//!
40//! The first step in establishing a QUIC connection using quiche is creating a
41//! [`Config`] object:
42//!
43//! ```
44//! let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
45//! config.set_application_protos(&[b"example-proto"]);
46//!
47//! // Additional configuration specific to application and use case...
48//! # Ok::<(), quiche::Error>(())
49//! ```
50//!
51//! The [`Config`] object controls important aspects of the QUIC connection such
52//! as QUIC version, ALPN IDs, flow control, congestion control, idle timeout
53//! and other properties or features.
54//!
55//! QUIC is a general-purpose transport protocol and there are several
56//! configuration properties where there is no reasonable default value. For
57//! example, the permitted number of concurrent streams of any particular type
58//! is dependent on the application running over QUIC, and other use-case
59//! specific concerns.
60//!
61//! quiche defaults several properties to zero, applications most likely need
62//! to set these to something else to satisfy their needs using the following:
63//!
64//! - [`set_initial_max_streams_bidi()`]
65//! - [`set_initial_max_streams_uni()`]
66//! - [`set_initial_max_data()`]
67//! - [`set_initial_max_stream_data_bidi_local()`]
68//! - [`set_initial_max_stream_data_bidi_remote()`]
69//! - [`set_initial_max_stream_data_uni()`]
70//!
71//! [`Config`] also holds TLS configuration. This can be changed by mutators on
72//! the an existing object, or by constructing a TLS context manually and
73//! creating a configuration using [`with_boring_ssl_ctx_builder()`].
74//!
75//! A configuration object can be shared among multiple connections.
76//!
77//! ### Connection setup
78//!
79//! On the client-side the [`connect()`] utility function can be used to create
80//! a new connection, while [`accept()`] is for servers:
81//!
82//! ```
83//! # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
84//! # let server_name = "quic.tech";
85//! # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
86//! # let peer = "127.0.0.1:1234".parse().unwrap();
87//! # let local = "127.0.0.1:4321".parse().unwrap();
88//! // Client connection.
89//! let conn =
90//! quiche::connect(Some(&server_name), &scid, local, peer, &mut config)?;
91//!
92//! // Server connection.
93//! # let peer = "127.0.0.1:1234".parse().unwrap();
94//! # let local = "127.0.0.1:4321".parse().unwrap();
95//! let conn = quiche::accept(&scid, None, local, peer, &mut config)?;
96//! # Ok::<(), quiche::Error>(())
97//! ```
98//!
99//! In both cases, the application is responsible for generating a new source
100//! connection ID that will be used to identify the new connection.
101//!
102//! The application also need to pass the address of the remote peer of the
103//! connection: in the case of a client that would be the address of the server
104//! it is trying to connect to, and for a server that is the address of the
105//! client that initiated the connection.
106//!
107//! ## Handling incoming packets
108//!
109//! Using the connection's [`recv()`] method the application can process
110//! incoming packets that belong to that connection from the network:
111//!
112//! ```no_run
113//! # let mut buf = [0; 512];
114//! # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
115//! # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
116//! # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
117//! # let peer = "127.0.0.1:1234".parse().unwrap();
118//! # let local = "127.0.0.1:4321".parse().unwrap();
119//! # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
120//! let to = socket.local_addr().unwrap();
121//!
122//! loop {
123//! let (read, from) = socket.recv_from(&mut buf).unwrap();
124//!
125//! let recv_info = quiche::RecvInfo { from, to };
126//!
127//! let read = match conn.recv(&mut buf[..read], recv_info) {
128//! Ok(v) => v,
129//!
130//! Err(quiche::Error::Done) => {
131//! // Done reading.
132//! break;
133//! },
134//!
135//! Err(e) => {
136//! // An error occurred, handle it.
137//! break;
138//! },
139//! };
140//! }
141//! # Ok::<(), quiche::Error>(())
142//! ```
143//!
144//! The application has to pass a [`RecvInfo`] structure in order to provide
145//! additional information about the received packet (such as the address it
146//! was received from).
147//!
148//! ## Generating outgoing packets
149//!
150//! Outgoing packet are generated using the connection's [`send()`] method
151//! instead:
152//!
153//! ```no_run
154//! # let mut out = [0; 512];
155//! # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
156//! # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
157//! # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
158//! # let peer = "127.0.0.1:1234".parse().unwrap();
159//! # let local = "127.0.0.1:4321".parse().unwrap();
160//! # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
161//! loop {
162//! let (write, send_info) = match conn.send(&mut out) {
163//! Ok(v) => v,
164//!
165//! Err(quiche::Error::Done) => {
166//! // Done writing.
167//! break;
168//! },
169//!
170//! Err(e) => {
171//! // An error occurred, handle it.
172//! break;
173//! },
174//! };
175//!
176//! socket.send_to(&out[..write], &send_info.to).unwrap();
177//! }
178//! # Ok::<(), quiche::Error>(())
179//! ```
180//!
181//! The application will be provided with a [`SendInfo`] structure providing
182//! additional information about the newly created packet (such as the address
183//! the packet should be sent to).
184//!
185//! When packets are sent, the application is responsible for maintaining a
186//! timer to react to time-based connection events. The timer expiration can be
187//! obtained using the connection's [`timeout()`] method.
188//!
189//! ```
190//! # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
191//! # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
192//! # let peer = "127.0.0.1:1234".parse().unwrap();
193//! # let local = "127.0.0.1:4321".parse().unwrap();
194//! # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
195//! let timeout = conn.timeout();
196//! # Ok::<(), quiche::Error>(())
197//! ```
198//!
199//! The application is responsible for providing a timer implementation, which
200//! can be specific to the operating system or networking framework used. When
201//! a timer expires, the connection's [`on_timeout()`] method should be called,
202//! after which additional packets might need to be sent on the network:
203//!
204//! ```no_run
205//! # let mut out = [0; 512];
206//! # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
207//! # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
208//! # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
209//! # let peer = "127.0.0.1:1234".parse().unwrap();
210//! # let local = "127.0.0.1:4321".parse().unwrap();
211//! # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
212//! // Timeout expired, handle it.
213//! conn.on_timeout();
214//!
215//! // Send more packets as needed after timeout.
216//! loop {
217//! let (write, send_info) = match conn.send(&mut out) {
218//! Ok(v) => v,
219//!
220//! Err(quiche::Error::Done) => {
221//! // Done writing.
222//! break;
223//! },
224//!
225//! Err(e) => {
226//! // An error occurred, handle it.
227//! break;
228//! },
229//! };
230//!
231//! socket.send_to(&out[..write], &send_info.to).unwrap();
232//! }
233//! # Ok::<(), quiche::Error>(())
234//! ```
235//!
236//! ### Pacing
237//!
238//! It is recommended that applications [pace] sending of outgoing packets to
239//! avoid creating packet bursts that could cause short-term congestion and
240//! losses in the network.
241//!
242//! quiche exposes pacing hints for outgoing packets through the [`at`] field
243//! of the [`SendInfo`] structure that is returned by the [`send()`] method.
244//! This field represents the time when a specific packet should be sent into
245//! the network.
246//!
247//! Applications can use these hints by artificially delaying the sending of
248//! packets through platform-specific mechanisms (such as the [`SO_TXTIME`]
249//! socket option on Linux), or custom methods (for example by using user-space
250//! timers).
251//!
252//! [pace]: https://datatracker.ietf.org/doc/html/rfc9002#section-7.7
253//! [`SO_TXTIME`]: https://man7.org/linux/man-pages/man8/tc-etf.8.html
254//!
255//! ## Sending and receiving stream data
256//!
257//! After some back and forth, the connection will complete its handshake and
258//! will be ready for sending or receiving application data.
259//!
260//! Data can be sent on a stream by using the [`stream_send()`] method:
261//!
262//! ```no_run
263//! # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
264//! # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
265//! # let peer = "127.0.0.1:1234".parse().unwrap();
266//! # let local = "127.0.0.1:4321".parse().unwrap();
267//! # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
268//! if conn.is_established() {
269//! // Handshake completed, send some data on stream 0.
270//! conn.stream_send(0, b"hello", true)?;
271//! }
272//! # Ok::<(), quiche::Error>(())
273//! ```
274//!
275//! The application can check whether there are any readable streams by using
276//! the connection's [`readable()`] method, which returns an iterator over all
277//! the streams that have outstanding data to read.
278//!
279//! The [`stream_recv()`] method can then be used to retrieve the application
280//! data from the readable stream:
281//!
282//! ```no_run
283//! # let mut buf = [0; 512];
284//! # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
285//! # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
286//! # let peer = "127.0.0.1:1234".parse().unwrap();
287//! # let local = "127.0.0.1:4321".parse().unwrap();
288//! # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
289//! if conn.is_established() {
290//! // Iterate over readable streams.
291//! for stream_id in conn.readable() {
292//! // Stream is readable, read until there's no more data.
293//! while let Ok((read, fin)) = conn.stream_recv(stream_id, &mut buf) {
294//! println!("Got {} bytes on stream {}", read, stream_id);
295//! }
296//! }
297//! }
298//! # Ok::<(), quiche::Error>(())
299//! ```
300//!
301//! ## HTTP/3
302//!
303//! The quiche [HTTP/3 module] provides a high level API for sending and
304//! receiving HTTP requests and responses on top of the QUIC transport protocol.
305//!
306//! [`Config`]: https://docs.quic.tech/quiche/struct.Config.html
307//! [`set_initial_max_streams_bidi()`]: https://docs.rs/quiche/latest/quiche/struct.Config.html#method.set_initial_max_streams_bidi
308//! [`set_initial_max_streams_uni()`]: https://docs.rs/quiche/latest/quiche/struct.Config.html#method.set_initial_max_streams_uni
309//! [`set_initial_max_data()`]: https://docs.rs/quiche/latest/quiche/struct.Config.html#method.set_initial_max_data
310//! [`set_initial_max_stream_data_bidi_local()`]: https://docs.rs/quiche/latest/quiche/struct.Config.html#method.set_initial_max_stream_data_bidi_local
311//! [`set_initial_max_stream_data_bidi_remote()`]: https://docs.rs/quiche/latest/quiche/struct.Config.html#method.set_initial_max_stream_data_bidi_remote
312//! [`set_initial_max_stream_data_uni()`]: https://docs.rs/quiche/latest/quiche/struct.Config.html#method.set_initial_max_stream_data_uni
313//! [`with_boring_ssl_ctx_builder()`]: https://docs.quic.tech/quiche/struct.Config.html#method.with_boring_ssl_ctx_builder
314//! [`connect()`]: fn.connect.html
315//! [`accept()`]: fn.accept.html
316//! [`recv()`]: struct.Connection.html#method.recv
317//! [`RecvInfo`]: struct.RecvInfo.html
318//! [`send()`]: struct.Connection.html#method.send
319//! [`SendInfo`]: struct.SendInfo.html
320//! [`at`]: struct.SendInfo.html#structfield.at
321//! [`timeout()`]: struct.Connection.html#method.timeout
322//! [`on_timeout()`]: struct.Connection.html#method.on_timeout
323//! [`stream_send()`]: struct.Connection.html#method.stream_send
324//! [`readable()`]: struct.Connection.html#method.readable
325//! [`stream_recv()`]: struct.Connection.html#method.stream_recv
326//! [HTTP/3 module]: h3/index.html
327//!
328//! ## Congestion Control
329//!
330//! The quiche library provides a high-level API for configuring which
331//! congestion control algorithm to use throughout the QUIC connection.
332//!
333//! When a QUIC connection is created, the application can optionally choose
334//! which CC algorithm to use. See [`CongestionControlAlgorithm`] for currently
335//! available congestion control algorithms.
336//!
337//! For example:
338//!
339//! ```
340//! let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION).unwrap();
341//! config.set_cc_algorithm(quiche::CongestionControlAlgorithm::Reno);
342//! ```
343//!
344//! Alternatively, you can configure the congestion control algorithm to use
345//! by its name.
346//!
347//! ```
348//! let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION).unwrap();
349//! config.set_cc_algorithm_name("reno").unwrap();
350//! ```
351//!
352//! Note that the CC algorithm should be configured before calling [`connect()`]
353//! or [`accept()`]. Otherwise the connection will use a default CC algorithm.
354//!
355//! [`CongestionControlAlgorithm`]: enum.CongestionControlAlgorithm.html
356//!
357//! ## Feature flags
358//!
359//! quiche defines a number of [feature flags] to reduce the amount of compiled
360//! code and dependencies:
361//!
362//! * `boringssl-vendored` (default): Build the vendored BoringSSL library.
363//!
364//! * `boringssl-boring-crate`: Use the BoringSSL library provided by the
365//! [boring] crate. It takes precedence over `boringssl-vendored` if both
366//! features are enabled.
367//!
368//! * `pkg-config-meta`: Generate pkg-config metadata file for libquiche.
369//!
370//! * `ffi`: Build and expose the FFI API.
371//!
372//! * `qlog`: Enable support for the [qlog] logging format.
373//!
374//! [feature flags]: https://doc.rust-lang.org/cargo/reference/manifest.html#the-features-section
375//! [boring]: https://crates.io/crates/boring
376//! [qlog]: https://datatracker.ietf.org/doc/html/draft-ietf-quic-qlog-main-schema
377
378#![allow(clippy::upper_case_acronyms)]
379#![warn(missing_docs)]
380#![warn(unused_qualifications)]
381#![cfg_attr(docsrs, feature(doc_cfg))]
382
383#[macro_use]
384extern crate log;
385
386use std::cmp;
387
388use std::collections::VecDeque;
389
390use std::net::SocketAddr;
391
392use std::str::FromStr;
393
394use std::sync::Arc;
395
396use std::time::Duration;
397use std::time::Instant;
398
399#[cfg(feature = "qlog")]
400use qlog::events::connectivity::ConnectivityEventType;
401#[cfg(feature = "qlog")]
402use qlog::events::connectivity::TransportOwner;
403#[cfg(feature = "qlog")]
404use qlog::events::quic::RecoveryEventType;
405#[cfg(feature = "qlog")]
406use qlog::events::quic::TransportEventType;
407#[cfg(feature = "qlog")]
408use qlog::events::DataRecipient;
409#[cfg(feature = "qlog")]
410use qlog::events::Event;
411#[cfg(feature = "qlog")]
412use qlog::events::EventData;
413#[cfg(feature = "qlog")]
414use qlog::events::EventImportance;
415#[cfg(feature = "qlog")]
416use qlog::events::EventType;
417#[cfg(feature = "qlog")]
418use qlog::events::RawInfo;
419
420use smallvec::SmallVec;
421
422use crate::buffers::DefaultBufFactory;
423
424use crate::recovery::OnAckReceivedOutcome;
425use crate::recovery::OnLossDetectionTimeoutOutcome;
426use crate::recovery::RecoveryOps;
427use crate::recovery::ReleaseDecision;
428
429use crate::stream::RecvAction;
430use crate::stream::StreamPriorityKey;
431
432/// The current QUIC wire version.
433pub const PROTOCOL_VERSION: u32 = PROTOCOL_VERSION_V1;
434
435/// Supported QUIC versions.
436const PROTOCOL_VERSION_V1: u32 = 0x0000_0001;
437
438/// The maximum length of a connection ID.
439pub const MAX_CONN_ID_LEN: usize = packet::MAX_CID_LEN as usize;
440
441/// The minimum length of Initial packets sent by a client.
442pub const MIN_CLIENT_INITIAL_LEN: usize = 1200;
443
444/// The default initial RTT.
445const DEFAULT_INITIAL_RTT: Duration = Duration::from_millis(333);
446
447const PAYLOAD_MIN_LEN: usize = 4;
448
449// PATH_CHALLENGE (9 bytes) + AEAD tag (16 bytes).
450const MIN_PROBING_SIZE: usize = 25;
451
452const MAX_AMPLIFICATION_FACTOR: usize = 3;
453
454// The maximum number of tracked packet number ranges that need to be acked.
455//
456// This represents more or less how many ack blocks can fit in a typical packet.
457const MAX_ACK_RANGES: usize = 68;
458
459// The highest possible stream ID allowed.
460const MAX_STREAM_ID: u64 = 1 << 60;
461
462// The default max_datagram_size used in congestion control.
463const MAX_SEND_UDP_PAYLOAD_SIZE: usize = 1200;
464
465// The default length of DATAGRAM queues.
466const DEFAULT_MAX_DGRAM_QUEUE_LEN: usize = 0;
467
468// The default length of PATH_CHALLENGE receive queue.
469const DEFAULT_MAX_PATH_CHALLENGE_RX_QUEUE_LEN: usize = 3;
470
471// The DATAGRAM standard recommends either none or 65536 as maximum DATAGRAM
472// frames size. We enforce the recommendation for forward compatibility.
473const MAX_DGRAM_FRAME_SIZE: u64 = 65536;
474
475// The length of the payload length field.
476const PAYLOAD_LENGTH_LEN: usize = 2;
477
478// The number of undecryptable that can be buffered.
479const MAX_UNDECRYPTABLE_PACKETS: usize = 10;
480
481const RESERVED_VERSION_MASK: u32 = 0xfafafafa;
482
483// The default size of the receiver connection flow control window.
484const DEFAULT_CONNECTION_WINDOW: u64 = 48 * 1024;
485
486// The maximum size of the receiver connection flow control window.
487const MAX_CONNECTION_WINDOW: u64 = 24 * 1024 * 1024;
488
489// How much larger the connection flow control window need to be larger than
490// the stream flow control window.
491const CONNECTION_WINDOW_FACTOR: f64 = 1.5;
492
493// How many probing packet timeouts do we tolerate before considering the path
494// validation as failed.
495const MAX_PROBING_TIMEOUTS: usize = 3;
496
497// The default initial congestion window size in terms of packet count.
498const DEFAULT_INITIAL_CONGESTION_WINDOW_PACKETS: usize = 10;
499
500// The maximum data offset that can be stored in a crypto stream.
501const MAX_CRYPTO_STREAM_OFFSET: u64 = 1 << 16;
502
503// The send capacity factor.
504const TX_CAP_FACTOR: f64 = 1.0;
505
506/// Ancillary information about incoming packets.
507#[derive(Clone, Copy, Debug, PartialEq, Eq)]
508pub struct RecvInfo {
509 /// The remote address the packet was received from.
510 pub from: SocketAddr,
511
512 /// The local address the packet was received on.
513 pub to: SocketAddr,
514}
515
516/// Ancillary information about outgoing packets.
517#[derive(Clone, Copy, Debug, PartialEq, Eq)]
518pub struct SendInfo {
519 /// The local address the packet should be sent from.
520 pub from: SocketAddr,
521
522 /// The remote address the packet should be sent to.
523 pub to: SocketAddr,
524
525 /// The time to send the packet out.
526 ///
527 /// See [Pacing] for more details.
528 ///
529 /// [Pacing]: index.html#pacing
530 pub at: Instant,
531}
532
533/// The side of the stream to be shut down.
534///
535/// This should be used when calling [`stream_shutdown()`].
536///
537/// [`stream_shutdown()`]: struct.Connection.html#method.stream_shutdown
538#[repr(C)]
539#[derive(PartialEq, Eq)]
540pub enum Shutdown {
541 /// Stop receiving stream data.
542 Read = 0,
543
544 /// Stop sending stream data.
545 Write = 1,
546}
547
548/// Qlog logging level.
549#[repr(C)]
550#[cfg(feature = "qlog")]
551#[cfg_attr(docsrs, doc(cfg(feature = "qlog")))]
552pub enum QlogLevel {
553 /// Logs any events of Core importance.
554 Core = 0,
555
556 /// Logs any events of Core and Base importance.
557 Base = 1,
558
559 /// Logs any events of Core, Base and Extra importance
560 Extra = 2,
561}
562
563/// Stores configuration shared between multiple connections.
564pub struct Config {
565 local_transport_params: TransportParams,
566
567 version: u32,
568
569 tls_ctx: tls::Context,
570
571 application_protos: Vec<Vec<u8>>,
572
573 grease: bool,
574
575 cc_algorithm: CongestionControlAlgorithm,
576 custom_bbr_params: Option<BbrParams>,
577 initial_congestion_window_packets: usize,
578 enable_relaxed_loss_threshold: bool,
579 enable_cubic_idle_restart_fix: bool,
580 enable_send_streams_blocked: bool,
581
582 pmtud: bool,
583 pmtud_max_probes: u8,
584
585 hystart: bool,
586
587 pacing: bool,
588 /// Send rate limit in Mbps
589 max_pacing_rate: Option<u64>,
590
591 tx_cap_factor: f64,
592
593 dgram_recv_max_queue_len: usize,
594 dgram_send_max_queue_len: usize,
595
596 path_challenge_recv_max_queue_len: usize,
597
598 max_send_udp_payload_size: usize,
599
600 max_connection_window: u64,
601 max_stream_window: u64,
602
603 max_amplification_factor: usize,
604
605 disable_dcid_reuse: bool,
606
607 track_unknown_transport_params: Option<usize>,
608
609 initial_rtt: Duration,
610}
611
612// See https://quicwg.org/base-drafts/rfc9000.html#section-15
613fn is_reserved_version(version: u32) -> bool {
614 version & RESERVED_VERSION_MASK == version
615}
616
617impl Config {
618 /// Creates a config object with the given version.
619 ///
620 /// ## Examples:
621 ///
622 /// ```
623 /// let config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
624 /// # Ok::<(), quiche::Error>(())
625 /// ```
626 pub fn new(version: u32) -> Result<Config> {
627 Self::with_tls_ctx(version, tls::Context::new()?)
628 }
629
630 /// Creates a config object with the given version and
631 /// [`SslContextBuilder`].
632 ///
633 /// This is useful for applications that wish to manually configure
634 /// [`SslContextBuilder`].
635 ///
636 /// [`SslContextBuilder`]: https://docs.rs/boring/latest/boring/ssl/struct.SslContextBuilder.html
637 #[cfg(feature = "boringssl-boring-crate")]
638 #[cfg_attr(docsrs, doc(cfg(feature = "boringssl-boring-crate")))]
639 pub fn with_boring_ssl_ctx_builder(
640 version: u32, tls_ctx_builder: boring::ssl::SslContextBuilder,
641 ) -> Result<Config> {
642 Self::with_tls_ctx(version, tls::Context::from_boring(tls_ctx_builder))
643 }
644
645 fn with_tls_ctx(version: u32, tls_ctx: tls::Context) -> Result<Config> {
646 if !is_reserved_version(version) && !version_is_supported(version) {
647 return Err(Error::UnknownVersion);
648 }
649
650 Ok(Config {
651 local_transport_params: TransportParams::default(),
652 version,
653 tls_ctx,
654 application_protos: Vec::new(),
655 grease: true,
656 cc_algorithm: CongestionControlAlgorithm::CUBIC,
657 custom_bbr_params: None,
658 initial_congestion_window_packets:
659 DEFAULT_INITIAL_CONGESTION_WINDOW_PACKETS,
660 enable_relaxed_loss_threshold: false,
661 enable_cubic_idle_restart_fix: true,
662 enable_send_streams_blocked: false,
663 pmtud: false,
664 pmtud_max_probes: pmtud::MAX_PROBES_DEFAULT,
665 hystart: true,
666 pacing: true,
667 max_pacing_rate: None,
668
669 tx_cap_factor: TX_CAP_FACTOR,
670
671 dgram_recv_max_queue_len: DEFAULT_MAX_DGRAM_QUEUE_LEN,
672 dgram_send_max_queue_len: DEFAULT_MAX_DGRAM_QUEUE_LEN,
673
674 path_challenge_recv_max_queue_len:
675 DEFAULT_MAX_PATH_CHALLENGE_RX_QUEUE_LEN,
676
677 max_send_udp_payload_size: MAX_SEND_UDP_PAYLOAD_SIZE,
678
679 max_connection_window: MAX_CONNECTION_WINDOW,
680 max_stream_window: stream::MAX_STREAM_WINDOW,
681
682 max_amplification_factor: MAX_AMPLIFICATION_FACTOR,
683
684 disable_dcid_reuse: false,
685
686 track_unknown_transport_params: None,
687 initial_rtt: DEFAULT_INITIAL_RTT,
688 })
689 }
690
691 /// Configures the given certificate chain.
692 ///
693 /// The content of `file` is parsed as a PEM-encoded leaf certificate,
694 /// followed by optional intermediate certificates.
695 ///
696 /// ## Examples:
697 ///
698 /// ```no_run
699 /// # let mut config = quiche::Config::new(0xbabababa)?;
700 /// config.load_cert_chain_from_pem_file("/path/to/cert.pem")?;
701 /// # Ok::<(), quiche::Error>(())
702 /// ```
703 pub fn load_cert_chain_from_pem_file(&mut self, file: &str) -> Result<()> {
704 self.tls_ctx.use_certificate_chain_file(file)
705 }
706
707 /// Configures the given private key.
708 ///
709 /// The content of `file` is parsed as a PEM-encoded private key.
710 ///
711 /// ## Examples:
712 ///
713 /// ```no_run
714 /// # let mut config = quiche::Config::new(0xbabababa)?;
715 /// config.load_priv_key_from_pem_file("/path/to/key.pem")?;
716 /// # Ok::<(), quiche::Error>(())
717 /// ```
718 pub fn load_priv_key_from_pem_file(&mut self, file: &str) -> Result<()> {
719 self.tls_ctx.use_privkey_file(file)
720 }
721
722 /// Specifies a file where trusted CA certificates are stored for the
723 /// purposes of certificate verification.
724 ///
725 /// The content of `file` is parsed as a PEM-encoded certificate chain.
726 ///
727 /// ## Examples:
728 ///
729 /// ```no_run
730 /// # let mut config = quiche::Config::new(0xbabababa)?;
731 /// config.load_verify_locations_from_file("/path/to/cert.pem")?;
732 /// # Ok::<(), quiche::Error>(())
733 /// ```
734 pub fn load_verify_locations_from_file(&mut self, file: &str) -> Result<()> {
735 self.tls_ctx.load_verify_locations_from_file(file)
736 }
737
738 /// Specifies a directory where trusted CA certificates are stored for the
739 /// purposes of certificate verification.
740 ///
741 /// The content of `dir` a set of PEM-encoded certificate chains.
742 ///
743 /// ## Examples:
744 ///
745 /// ```no_run
746 /// # let mut config = quiche::Config::new(0xbabababa)?;
747 /// config.load_verify_locations_from_directory("/path/to/certs")?;
748 /// # Ok::<(), quiche::Error>(())
749 /// ```
750 pub fn load_verify_locations_from_directory(
751 &mut self, dir: &str,
752 ) -> Result<()> {
753 self.tls_ctx.load_verify_locations_from_directory(dir)
754 }
755
756 /// Configures whether to verify the peer's certificate.
757 ///
758 /// This should usually be `true` for client-side connections and `false`
759 /// for server-side ones.
760 ///
761 /// Note that by default, no verification is performed.
762 ///
763 /// Also note that on the server-side, enabling verification of the peer
764 /// will trigger a certificate request and make authentication errors
765 /// fatal, but will still allow anonymous clients (i.e. clients that
766 /// don't present a certificate at all). Servers can check whether a
767 /// client presented a certificate by calling [`peer_cert()`] if they
768 /// need to.
769 ///
770 /// [`peer_cert()`]: struct.Connection.html#method.peer_cert
771 pub fn verify_peer(&mut self, verify: bool) {
772 self.tls_ctx.set_verify(verify);
773 }
774
775 /// Configures whether to do path MTU discovery.
776 ///
777 /// The default value is `false`.
778 pub fn discover_pmtu(&mut self, discover: bool) {
779 self.pmtud = discover;
780 }
781
782 /// Configures the maximum number of PMTUD probe attempts before treating
783 /// a probe size as failed.
784 ///
785 /// Defaults to 3 per [RFC 8899 Section 5.1.2](https://datatracker.ietf.org/doc/html/rfc8899#section-5.1.2).
786 /// If 0 is passed, the default value is used.
787 pub fn set_pmtud_max_probes(&mut self, max_probes: u8) {
788 self.pmtud_max_probes = max_probes;
789 }
790
791 /// Configures whether to send GREASE values.
792 ///
793 /// The default value is `true`.
794 pub fn grease(&mut self, grease: bool) {
795 self.grease = grease;
796 }
797
798 /// Enables logging of secrets.
799 ///
800 /// When logging is enabled, the [`set_keylog()`] method must be called on
801 /// the connection for its cryptographic secrets to be logged in the
802 /// [keylog] format to the specified writer.
803 ///
804 /// [`set_keylog()`]: struct.Connection.html#method.set_keylog
805 /// [keylog]: https://developer.mozilla.org/en-US/docs/Mozilla/Projects/NSS/Key_Log_Format
806 pub fn log_keys(&mut self) {
807 self.tls_ctx.enable_keylog();
808 }
809
810 /// Configures the session ticket key material.
811 ///
812 /// On the server this key will be used to encrypt and decrypt session
813 /// tickets, used to perform session resumption without server-side state.
814 ///
815 /// By default a key is generated internally, and rotated regularly, so
816 /// applications don't need to call this unless they need to use a
817 /// specific key (e.g. in order to support resumption across multiple
818 /// servers), in which case the application is also responsible for
819 /// rotating the key to provide forward secrecy.
820 pub fn set_ticket_key(&mut self, key: &[u8]) -> Result<()> {
821 self.tls_ctx.set_ticket_key(key)
822 }
823
824 /// Enables sending or receiving early data.
825 pub fn enable_early_data(&mut self) {
826 self.tls_ctx.set_early_data_enabled(true);
827 }
828
829 /// Configures the list of supported application protocols.
830 ///
831 /// On the client this configures the list of protocols to send to the
832 /// server as part of the ALPN extension.
833 ///
834 /// On the server this configures the list of supported protocols to match
835 /// against the client-supplied list.
836 ///
837 /// Applications must set a value, but no default is provided.
838 ///
839 /// ## Examples:
840 ///
841 /// ```
842 /// # let mut config = quiche::Config::new(0xbabababa)?;
843 /// config.set_application_protos(&[b"http/1.1", b"http/0.9"]);
844 /// # Ok::<(), quiche::Error>(())
845 /// ```
846 pub fn set_application_protos(
847 &mut self, protos_list: &[&[u8]],
848 ) -> Result<()> {
849 self.application_protos =
850 protos_list.iter().map(|s| s.to_vec()).collect();
851
852 self.tls_ctx.set_alpn(protos_list)
853 }
854
855 /// Configures the list of supported application protocols using wire
856 /// format.
857 ///
858 /// The list of protocols `protos` must be a series of non-empty, 8-bit
859 /// length-prefixed strings.
860 ///
861 /// See [`set_application_protos`](Self::set_application_protos) for more
862 /// background about application protocols.
863 ///
864 /// ## Examples:
865 ///
866 /// ```
867 /// # let mut config = quiche::Config::new(0xbabababa)?;
868 /// config.set_application_protos_wire_format(b"\x08http/1.1\x08http/0.9")?;
869 /// # Ok::<(), quiche::Error>(())
870 /// ```
871 pub fn set_application_protos_wire_format(
872 &mut self, protos: &[u8],
873 ) -> Result<()> {
874 let mut b = octets::Octets::with_slice(protos);
875
876 let mut protos_list = Vec::new();
877
878 while let Ok(proto) = b.get_bytes_with_u8_length() {
879 protos_list.push(proto.buf());
880 }
881
882 self.set_application_protos(&protos_list)
883 }
884
885 /// Sets the anti-amplification limit factor.
886 ///
887 /// The default value is `3`.
888 pub fn set_max_amplification_factor(&mut self, v: usize) {
889 self.max_amplification_factor = v;
890 }
891
892 /// Sets the send capacity factor.
893 ///
894 /// The default value is `1`.
895 pub fn set_send_capacity_factor(&mut self, v: f64) {
896 self.tx_cap_factor = v;
897 }
898
899 /// Sets the connection's initial RTT.
900 ///
901 /// The default value is `333`.
902 pub fn set_initial_rtt(&mut self, v: Duration) {
903 self.initial_rtt = v;
904 }
905
906 /// Sets the `max_idle_timeout` transport parameter, in milliseconds.
907 ///
908 /// The default value is infinite, that is, no timeout is used.
909 pub fn set_max_idle_timeout(&mut self, v: u64) {
910 self.local_transport_params.max_idle_timeout =
911 cmp::min(v, octets::MAX_VAR_INT);
912 }
913
914 /// Sets the `max_udp_payload_size transport` parameter.
915 ///
916 /// The default value is `65527`.
917 pub fn set_max_recv_udp_payload_size(&mut self, v: usize) {
918 self.local_transport_params.max_udp_payload_size =
919 cmp::min(v as u64, octets::MAX_VAR_INT);
920 }
921
922 /// Sets the maximum outgoing UDP payload size.
923 ///
924 /// The default and minimum value is `1200`.
925 pub fn set_max_send_udp_payload_size(&mut self, v: usize) {
926 self.max_send_udp_payload_size = cmp::max(v, MAX_SEND_UDP_PAYLOAD_SIZE);
927 }
928
929 /// Sets the `initial_max_data` transport parameter.
930 ///
931 /// When set to a non-zero value quiche will only allow at most `v` bytes of
932 /// incoming stream data to be buffered for the whole connection (that is,
933 /// data that is not yet read by the application) and will allow more data
934 /// to be received as the buffer is consumed by the application.
935 ///
936 /// When set to zero, either explicitly or via the default, quiche will not
937 /// give any flow control to the peer, preventing it from sending any stream
938 /// data.
939 ///
940 /// The default value is `0`.
941 pub fn set_initial_max_data(&mut self, v: u64) {
942 self.local_transport_params.initial_max_data =
943 cmp::min(v, octets::MAX_VAR_INT);
944 }
945
946 /// Sets the `initial_max_stream_data_bidi_local` transport parameter.
947 ///
948 /// When set to a non-zero value quiche will only allow at most `v` bytes
949 /// of incoming stream data to be buffered for each locally-initiated
950 /// bidirectional stream (that is, data that is not yet read by the
951 /// application) and will allow more data to be received as the buffer is
952 /// consumed by the application.
953 ///
954 /// When set to zero, either explicitly or via the default, quiche will not
955 /// give any flow control to the peer, preventing it from sending any stream
956 /// data.
957 ///
958 /// The default value is `0`.
959 pub fn set_initial_max_stream_data_bidi_local(&mut self, v: u64) {
960 self.local_transport_params
961 .initial_max_stream_data_bidi_local =
962 cmp::min(v, octets::MAX_VAR_INT);
963 }
964
965 /// Sets the `initial_max_stream_data_bidi_remote` transport parameter.
966 ///
967 /// When set to a non-zero value quiche will only allow at most `v` bytes
968 /// of incoming stream data to be buffered for each remotely-initiated
969 /// bidirectional stream (that is, data that is not yet read by the
970 /// application) and will allow more data to be received as the buffer is
971 /// consumed by the application.
972 ///
973 /// When set to zero, either explicitly or via the default, quiche will not
974 /// give any flow control to the peer, preventing it from sending any stream
975 /// data.
976 ///
977 /// The default value is `0`.
978 pub fn set_initial_max_stream_data_bidi_remote(&mut self, v: u64) {
979 self.local_transport_params
980 .initial_max_stream_data_bidi_remote =
981 cmp::min(v, octets::MAX_VAR_INT);
982 }
983
984 /// Sets the `initial_max_stream_data_uni` transport parameter.
985 ///
986 /// When set to a non-zero value quiche will only allow at most `v` bytes
987 /// of incoming stream data to be buffered for each unidirectional stream
988 /// (that is, data that is not yet read by the application) and will allow
989 /// more data to be received as the buffer is consumed by the application.
990 ///
991 /// When set to zero, either explicitly or via the default, quiche will not
992 /// give any flow control to the peer, preventing it from sending any stream
993 /// data.
994 ///
995 /// The default value is `0`.
996 pub fn set_initial_max_stream_data_uni(&mut self, v: u64) {
997 self.local_transport_params.initial_max_stream_data_uni =
998 cmp::min(v, octets::MAX_VAR_INT);
999 }
1000
1001 /// Sets the `initial_max_streams_bidi` transport parameter.
1002 ///
1003 /// When set to a non-zero value quiche will only allow `v` number of
1004 /// concurrent remotely-initiated bidirectional streams to be open at any
1005 /// given time and will increase the limit automatically as streams are
1006 /// completed.
1007 ///
1008 /// When set to zero, either explicitly or via the default, quiche will not
1009 /// not allow the peer to open any bidirectional streams.
1010 ///
1011 /// A bidirectional stream is considered completed when all incoming data
1012 /// has been read by the application (up to the `fin` offset) or the
1013 /// stream's read direction has been shutdown, and all outgoing data has
1014 /// been acked by the peer (up to the `fin` offset) or the stream's write
1015 /// direction has been shutdown.
1016 ///
1017 /// The default value is `0`.
1018 pub fn set_initial_max_streams_bidi(&mut self, v: u64) {
1019 self.local_transport_params.initial_max_streams_bidi =
1020 cmp::min(v, octets::MAX_VAR_INT);
1021 }
1022
1023 /// Sets the `initial_max_streams_uni` transport parameter.
1024 ///
1025 /// When set to a non-zero value quiche will only allow `v` number of
1026 /// concurrent remotely-initiated unidirectional streams to be open at any
1027 /// given time and will increase the limit automatically as streams are
1028 /// completed.
1029 ///
1030 /// When set to zero, either explicitly or via the default, quiche will not
1031 /// not allow the peer to open any unidirectional streams.
1032 ///
1033 /// A unidirectional stream is considered completed when all incoming data
1034 /// has been read by the application (up to the `fin` offset) or the
1035 /// stream's read direction has been shutdown.
1036 ///
1037 /// The default value is `0`.
1038 pub fn set_initial_max_streams_uni(&mut self, v: u64) {
1039 self.local_transport_params.initial_max_streams_uni =
1040 cmp::min(v, octets::MAX_VAR_INT);
1041 }
1042
1043 /// Sets the `ack_delay_exponent` transport parameter.
1044 ///
1045 /// The default value is `3`.
1046 pub fn set_ack_delay_exponent(&mut self, v: u64) {
1047 self.local_transport_params.ack_delay_exponent =
1048 cmp::min(v, octets::MAX_VAR_INT);
1049 }
1050
1051 /// Sets the `max_ack_delay` transport parameter.
1052 ///
1053 /// The default value is `25`.
1054 pub fn set_max_ack_delay(&mut self, v: u64) {
1055 self.local_transport_params.max_ack_delay =
1056 cmp::min(v, octets::MAX_VAR_INT);
1057 }
1058
1059 /// Sets the `active_connection_id_limit` transport parameter.
1060 ///
1061 /// The default value is `2`. Lower values will be ignored.
1062 pub fn set_active_connection_id_limit(&mut self, v: u64) {
1063 if v >= 2 {
1064 self.local_transport_params.active_conn_id_limit =
1065 cmp::min(v, octets::MAX_VAR_INT);
1066 }
1067 }
1068
1069 /// Sets the `disable_active_migration` transport parameter.
1070 ///
1071 /// The default value is `false`.
1072 pub fn set_disable_active_migration(&mut self, v: bool) {
1073 self.local_transport_params.disable_active_migration = v;
1074 }
1075
1076 /// Sets the congestion control algorithm used.
1077 ///
1078 /// The default value is `CongestionControlAlgorithm::CUBIC`.
1079 pub fn set_cc_algorithm(&mut self, algo: CongestionControlAlgorithm) {
1080 self.cc_algorithm = algo;
1081 }
1082
1083 /// Sets custom BBR settings.
1084 ///
1085 /// This API is experimental and will be removed in the future.
1086 ///
1087 /// Currently this only applies if cc_algorithm is
1088 /// `CongestionControlAlgorithm::Bbr2Gcongestion` is set.
1089 ///
1090 /// The default value is `None`.
1091 #[cfg(feature = "internal")]
1092 #[doc(hidden)]
1093 pub fn set_custom_bbr_params(&mut self, custom_bbr_settings: BbrParams) {
1094 self.custom_bbr_params = Some(custom_bbr_settings);
1095 }
1096
1097 /// Sets the congestion control algorithm used by string.
1098 ///
1099 /// The default value is `cubic`. On error `Error::CongestionControl`
1100 /// will be returned.
1101 ///
1102 /// ## Examples:
1103 ///
1104 /// ```
1105 /// # let mut config = quiche::Config::new(0xbabababa)?;
1106 /// config.set_cc_algorithm_name("reno");
1107 /// # Ok::<(), quiche::Error>(())
1108 /// ```
1109 pub fn set_cc_algorithm_name(&mut self, name: &str) -> Result<()> {
1110 self.cc_algorithm = CongestionControlAlgorithm::from_str(name)?;
1111
1112 Ok(())
1113 }
1114
1115 /// Sets initial congestion window size in terms of packet count.
1116 ///
1117 /// The default value is 10.
1118 pub fn set_initial_congestion_window_packets(&mut self, packets: usize) {
1119 self.initial_congestion_window_packets = packets;
1120 }
1121
1122 /// Configure whether to enable relaxed loss detection on spurious loss.
1123 ///
1124 /// The default value is false.
1125 pub fn set_enable_relaxed_loss_threshold(&mut self, enable: bool) {
1126 self.enable_relaxed_loss_threshold = enable;
1127 }
1128
1129 /// Configure whether to enable the CUBIC idle restart fix.
1130 ///
1131 /// When enabled, the epoch shift on idle restart uses the later of
1132 /// the last ACK time and last send time, avoiding an inflated delta
1133 /// when bytes-in-flight transiently hits zero.
1134 ///
1135 /// The default value is `true`.
1136 pub fn set_enable_cubic_idle_restart_fix(&mut self, enable: bool) {
1137 self.enable_cubic_idle_restart_fix = enable;
1138 }
1139
1140 /// Configure whether to enable sending STREAMS_BLOCKED frames.
1141 ///
1142 /// STREAMS_BLOCKED frames are an optional advisory signal in the QUIC
1143 /// protocol which SHOULD be sent when the sender wishes to open a stream
1144 /// but is unable to do so due to the maximum stream limit set by its peer.
1145 ///
1146 /// The default value is false.
1147 pub fn set_enable_send_streams_blocked(&mut self, enable: bool) {
1148 self.enable_send_streams_blocked = enable;
1149 }
1150
1151 /// Configures whether to enable HyStart++.
1152 ///
1153 /// The default value is `true`.
1154 pub fn enable_hystart(&mut self, v: bool) {
1155 self.hystart = v;
1156 }
1157
1158 /// Configures whether to enable pacing.
1159 ///
1160 /// The default value is `true`.
1161 pub fn enable_pacing(&mut self, v: bool) {
1162 self.pacing = v;
1163 }
1164
1165 /// Sets the max value for pacing rate.
1166 ///
1167 /// By default pacing rate is not limited.
1168 pub fn set_max_pacing_rate(&mut self, v: u64) {
1169 self.max_pacing_rate = Some(v);
1170 }
1171
1172 /// Configures whether to enable receiving DATAGRAM frames.
1173 ///
1174 /// When enabled, the `max_datagram_frame_size` transport parameter is set
1175 /// to 65536 as recommended by draft-ietf-quic-datagram-01.
1176 ///
1177 /// The default is `false`.
1178 pub fn enable_dgram(
1179 &mut self, enabled: bool, recv_queue_len: usize, send_queue_len: usize,
1180 ) {
1181 self.local_transport_params.max_datagram_frame_size = if enabled {
1182 Some(MAX_DGRAM_FRAME_SIZE)
1183 } else {
1184 None
1185 };
1186 self.dgram_recv_max_queue_len = recv_queue_len;
1187 self.dgram_send_max_queue_len = send_queue_len;
1188 }
1189
1190 /// Configures the max number of queued received PATH_CHALLENGE frames.
1191 ///
1192 /// When an endpoint receives a PATH_CHALLENGE frame and the queue is full,
1193 /// the frame is discarded.
1194 ///
1195 /// The default is 3.
1196 pub fn set_path_challenge_recv_max_queue_len(&mut self, queue_len: usize) {
1197 self.path_challenge_recv_max_queue_len = queue_len;
1198 }
1199
1200 /// Sets the maximum size of the connection window.
1201 ///
1202 /// The default value is MAX_CONNECTION_WINDOW (24MBytes).
1203 pub fn set_max_connection_window(&mut self, v: u64) {
1204 self.max_connection_window = v;
1205 }
1206
1207 /// Sets the maximum size of the stream window.
1208 ///
1209 /// The default value is MAX_STREAM_WINDOW (16MBytes).
1210 pub fn set_max_stream_window(&mut self, v: u64) {
1211 self.max_stream_window = v;
1212 }
1213
1214 /// Sets the initial stateless reset token.
1215 ///
1216 /// This value is only advertised by servers. Setting a stateless retry
1217 /// token as a client has no effect on the connection.
1218 ///
1219 /// The default value is `None`.
1220 pub fn set_stateless_reset_token(&mut self, v: Option<u128>) {
1221 self.local_transport_params.stateless_reset_token = v;
1222 }
1223
1224 /// Sets whether the QUIC connection should avoid reusing DCIDs over
1225 /// different paths.
1226 ///
1227 /// When set to `true`, it ensures that a destination Connection ID is never
1228 /// reused on different paths. Such behaviour may lead to connection stall
1229 /// if the peer performs a non-voluntary migration (e.g., NAT rebinding) and
1230 /// does not provide additional destination Connection IDs to handle such
1231 /// event.
1232 ///
1233 /// The default value is `false`.
1234 pub fn set_disable_dcid_reuse(&mut self, v: bool) {
1235 self.disable_dcid_reuse = v;
1236 }
1237
1238 /// Enables tracking unknown transport parameters.
1239 ///
1240 /// Specify the maximum number of bytes used to track unknown transport
1241 /// parameters. The size includes the identifier and its value. If storing a
1242 /// transport parameter would cause the limit to be exceeded, it is quietly
1243 /// dropped.
1244 ///
1245 /// The default is that the feature is disabled.
1246 pub fn enable_track_unknown_transport_parameters(&mut self, size: usize) {
1247 self.track_unknown_transport_params = Some(size);
1248 }
1249}
1250
1251/// Tracks the health of the tx_buffered value.
1252#[derive(Clone, Copy, Debug, Default, PartialEq)]
1253pub enum TxBufferTrackingState {
1254 /// The send buffer is in a good state
1255 #[default]
1256 Ok,
1257 /// The send buffer is in an inconsistent state, which could lead to
1258 /// connection stalls or excess buffering due to bugs we haven't
1259 /// tracked down yet.
1260 Inconsistent,
1261}
1262
1263/// Tracks if the connection hit the peer stream limit and which
1264/// STREAMS_BLOCKED frames have been sent.
1265#[derive(Default)]
1266struct StreamsBlockedState {
1267 /// The peer's max_streams limit at which we last became blocked on
1268 /// opening new local streams, if any.
1269 blocked_at: Option<u64>,
1270
1271 /// The stream limit sent on the most recently sent STREAMS_BLOCKED
1272 /// frame. If != to blocked_at, the connection has pending STREAMS_BLOCKED
1273 /// frames to send.
1274 blocked_sent: Option<u64>,
1275}
1276
1277impl StreamsBlockedState {
1278 /// Returns true if there is a STREAMS_BLOCKED frame that needs sending.
1279 fn has_pending_stream_blocked_frame(&self) -> bool {
1280 self.blocked_sent < self.blocked_at
1281 }
1282
1283 /// Update the stream blocked limit.
1284 fn update_at(&mut self, limit: u64) {
1285 self.blocked_at = self.blocked_at.max(Some(limit));
1286 }
1287
1288 /// Clear blocked_sent to force retransmission of the most recently sent
1289 /// STREAMS_BLOCKED frame.
1290 fn force_retransmit_sent_limit_eq(&mut self, limit: u64) {
1291 // Only clear blocked_sent if the lost frame had the most recently sent
1292 // limit.
1293 if self.blocked_sent == Some(limit) {
1294 self.blocked_sent = None;
1295 }
1296 }
1297}
1298
1299/// A QUIC connection.
1300pub struct Connection<F = DefaultBufFactory>
1301where
1302 F: BufFactory,
1303{
1304 /// QUIC wire version used for the connection.
1305 version: u32,
1306
1307 /// Connection Identifiers.
1308 ids: cid::ConnectionIdentifiers,
1309
1310 /// Unique opaque ID for the connection that can be used for logging.
1311 trace_id: String,
1312
1313 /// Packet number spaces.
1314 pkt_num_spaces: [packet::PktNumSpace; packet::Epoch::count()],
1315
1316 /// The crypto context.
1317 crypto_ctx: [packet::CryptoContext; packet::Epoch::count()],
1318
1319 /// Next packet number.
1320 next_pkt_num: u64,
1321
1322 // TODO
1323 // combine with `next_pkt_num`
1324 /// Track the packet skip context
1325 pkt_num_manager: packet::PktNumManager,
1326
1327 /// Peer's transport parameters.
1328 peer_transport_params: TransportParams,
1329
1330 /// If tracking unknown transport parameters from a peer, how much space to
1331 /// use in bytes.
1332 peer_transport_params_track_unknown: Option<usize>,
1333
1334 /// Local transport parameters.
1335 local_transport_params: TransportParams,
1336
1337 /// TLS handshake state.
1338 handshake: tls::Handshake,
1339
1340 /// Serialized TLS session buffer.
1341 ///
1342 /// This field is populated when a new session ticket is processed on the
1343 /// client. On the server this is empty.
1344 session: Option<Vec<u8>>,
1345
1346 /// The configuration for recovery.
1347 recovery_config: recovery::RecoveryConfig,
1348
1349 /// The path manager.
1350 paths: path::PathMap,
1351
1352 /// PATH_CHALLENGE receive queue max length.
1353 path_challenge_recv_max_queue_len: usize,
1354
1355 /// Total number of received PATH_CHALLENGE frames.
1356 path_challenge_rx_count: u64,
1357
1358 /// List of supported application protocols.
1359 application_protos: Vec<Vec<u8>>,
1360
1361 /// Total number of received packets.
1362 recv_count: usize,
1363
1364 /// Total number of sent packets.
1365 sent_count: usize,
1366
1367 /// Total number of lost packets.
1368 lost_count: usize,
1369
1370 /// Total number of lost packets that were later acked.
1371 spurious_lost_count: usize,
1372
1373 /// Total number of packets sent with data retransmitted.
1374 retrans_count: usize,
1375
1376 /// Total number of sent DATAGRAM frames.
1377 dgram_sent_count: usize,
1378
1379 /// Total number of received DATAGRAM frames.
1380 dgram_recv_count: usize,
1381
1382 /// Total number of bytes received from the peer.
1383 rx_data: u64,
1384
1385 /// Receiver flow controller.
1386 flow_control: flowcontrol::FlowControl,
1387
1388 /// Whether we send MAX_DATA frame.
1389 should_send_max_data: bool,
1390
1391 /// True if there is a pending MAX_STREAMS_BIDI frame to send.
1392 should_send_max_streams_bidi: bool,
1393
1394 /// True if there is a pending MAX_STREAMS_UNI frame to send.
1395 should_send_max_streams_uni: bool,
1396
1397 /// Number of stream data bytes that can be buffered.
1398 tx_cap: usize,
1399
1400 /// The send capacity factor.
1401 tx_cap_factor: f64,
1402
1403 /// Number of bytes buffered in the send buffer.
1404 tx_buffered: usize,
1405
1406 /// Tracks the health of tx_buffered.
1407 tx_buffered_state: TxBufferTrackingState,
1408
1409 /// Total number of bytes sent to the peer.
1410 tx_data: u64,
1411
1412 /// Peer's flow control limit for the connection.
1413 max_tx_data: u64,
1414
1415 /// Last tx_data before running a full send() loop.
1416 last_tx_data: u64,
1417
1418 /// Total number of bytes retransmitted over the connection.
1419 /// This counts only STREAM and CRYPTO data.
1420 stream_retrans_bytes: u64,
1421
1422 /// Total number of bytes sent over the connection.
1423 sent_bytes: u64,
1424
1425 /// Total number of bytes received over the connection.
1426 recv_bytes: u64,
1427
1428 /// Total number of bytes sent acked over the connection.
1429 acked_bytes: u64,
1430
1431 /// Total number of bytes sent lost over the connection.
1432 lost_bytes: u64,
1433
1434 /// Streams map, indexed by stream ID.
1435 streams: stream::StreamMap<F>,
1436
1437 /// Peer's original destination connection ID. Used by the client to
1438 /// validate the server's transport parameter.
1439 odcid: Option<ConnectionId<'static>>,
1440
1441 /// Peer's retry source connection ID. Used by the client during stateless
1442 /// retry to validate the server's transport parameter.
1443 rscid: Option<ConnectionId<'static>>,
1444
1445 /// Received address verification token.
1446 token: Option<Vec<u8>>,
1447
1448 /// Error code and reason to be sent to the peer in a CONNECTION_CLOSE
1449 /// frame.
1450 local_error: Option<ConnectionError>,
1451
1452 /// Error code and reason received from the peer in a CONNECTION_CLOSE
1453 /// frame.
1454 peer_error: Option<ConnectionError>,
1455
1456 /// The connection-level limit at which send blocking occurred.
1457 blocked_limit: Option<u64>,
1458
1459 /// Idle timeout expiration time.
1460 idle_timer: Option<Instant>,
1461
1462 /// Draining timeout expiration time.
1463 draining_timer: Option<Instant>,
1464
1465 /// List of raw packets that were received before they could be decrypted.
1466 undecryptable_pkts: VecDeque<(Vec<u8>, RecvInfo)>,
1467
1468 /// The negotiated ALPN protocol.
1469 alpn: Vec<u8>,
1470
1471 /// Whether this is a server-side connection.
1472 is_server: bool,
1473
1474 /// Whether the initial secrets have been derived.
1475 derived_initial_secrets: bool,
1476
1477 /// Whether a version negotiation packet has already been received. Only
1478 /// relevant for client connections.
1479 did_version_negotiation: bool,
1480
1481 /// Whether stateless retry has been performed.
1482 did_retry: bool,
1483
1484 /// Whether the peer already updated its connection ID.
1485 got_peer_conn_id: bool,
1486
1487 /// Whether the peer verified our initial address.
1488 peer_verified_initial_address: bool,
1489
1490 /// Whether the peer's transport parameters were parsed.
1491 parsed_peer_transport_params: bool,
1492
1493 /// Whether the connection handshake has been completed.
1494 handshake_completed: bool,
1495
1496 /// Whether the HANDSHAKE_DONE frame has been sent.
1497 handshake_done_sent: bool,
1498
1499 /// Whether the HANDSHAKE_DONE frame has been acked.
1500 handshake_done_acked: bool,
1501
1502 /// Whether the connection handshake has been confirmed.
1503 handshake_confirmed: bool,
1504
1505 /// Key phase bit used for outgoing protected packets.
1506 key_phase: bool,
1507
1508 /// Whether an ack-eliciting packet has been sent since last receiving a
1509 /// packet.
1510 ack_eliciting_sent: bool,
1511
1512 /// Whether the connection is closed.
1513 closed: bool,
1514
1515 /// Whether the connection was timed out.
1516 timed_out: bool,
1517
1518 /// Whether to send GREASE.
1519 grease: bool,
1520
1521 /// Whether to send STREAMS_BLOCKED frames when bidi or uni stream quota
1522 /// exhausted.
1523 enable_send_streams_blocked: bool,
1524
1525 /// TLS keylog writer.
1526 keylog: Option<Box<dyn std::io::Write + Send + Sync>>,
1527
1528 #[cfg(feature = "qlog")]
1529 qlog: QlogInfo,
1530
1531 /// DATAGRAM queues.
1532 dgram_recv_queue: dgram::DatagramQueue,
1533 dgram_send_queue: dgram::DatagramQueue,
1534
1535 /// Whether to emit DATAGRAM frames in the next packet.
1536 emit_dgram: bool,
1537
1538 /// Whether the connection should prevent from reusing destination
1539 /// Connection IDs when the peer migrates.
1540 disable_dcid_reuse: bool,
1541
1542 /// The number of streams reset by local.
1543 reset_stream_local_count: u64,
1544
1545 /// The number of streams stopped by local.
1546 stopped_stream_local_count: u64,
1547
1548 /// The number of streams reset by remote.
1549 reset_stream_remote_count: u64,
1550
1551 /// The number of streams stopped by remote.
1552 stopped_stream_remote_count: u64,
1553
1554 /// The number of DATA_BLOCKED frames sent due to hitting the connection
1555 /// flow control limit.
1556 data_blocked_sent_count: u64,
1557
1558 /// The number of STREAM_DATA_BLOCKED frames sent due to a stream hitting
1559 /// the stream flow control limit.
1560 stream_data_blocked_sent_count: u64,
1561
1562 /// The number of DATA_BLOCKED frames received from the remote endpoint.
1563 data_blocked_recv_count: u64,
1564
1565 /// The number of STREAM_DATA_BLOCKED frames received from the remote
1566 /// endpoint.
1567 stream_data_blocked_recv_count: u64,
1568
1569 /// The number of STREAMS_BLOCKED frames received from the remote endpoint
1570 /// indicating the peer is blocked on opening new bidirectional streams.
1571 streams_blocked_bidi_recv_count: u64,
1572
1573 /// The number of STREAMS_BLOCKED frames received from the remote endpoint
1574 /// indicating the peer is blocked on opening new unidirectional streams.
1575 streams_blocked_uni_recv_count: u64,
1576
1577 /// Tracks if the connection hit the peer's bidi or uni stream limit, and if
1578 /// STREAMS_BLOCKED frames are pending transmission.
1579 streams_blocked_bidi_state: StreamsBlockedState,
1580 streams_blocked_uni_state: StreamsBlockedState,
1581
1582 /// The anti-amplification limit factor.
1583 max_amplification_factor: usize,
1584}
1585
1586/// Creates a new server-side connection.
1587///
1588/// The `scid` parameter represents the server's source connection ID, while
1589/// the optional `odcid` parameter represents the original destination ID the
1590/// client sent before a Retry packet (this is only required when using the
1591/// [`retry()`] function). See also the [`accept_with_retry()`] function for
1592/// more advanced retry cases.
1593///
1594/// [`retry()`]: fn.retry.html
1595///
1596/// ## Examples:
1597///
1598/// ```no_run
1599/// # let mut config = quiche::Config::new(0xbabababa)?;
1600/// # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
1601/// # let local = "127.0.0.1:0".parse().unwrap();
1602/// # let peer = "127.0.0.1:1234".parse().unwrap();
1603/// let conn = quiche::accept(&scid, None, local, peer, &mut config)?;
1604/// # Ok::<(), quiche::Error>(())
1605/// ```
1606#[inline(always)]
1607pub fn accept(
1608 scid: &ConnectionId, odcid: Option<&ConnectionId>, local: SocketAddr,
1609 peer: SocketAddr, config: &mut Config,
1610) -> Result<Connection> {
1611 accept_with_buf_factory(scid, odcid, local, peer, config)
1612}
1613
1614/// Creates a new server-side connection, with a custom buffer generation
1615/// method.
1616///
1617/// The buffers generated can be anything that can be drereferenced as a byte
1618/// slice. See [`accept`] and [`BufFactory`] for more info.
1619#[inline]
1620pub fn accept_with_buf_factory<F: BufFactory>(
1621 scid: &ConnectionId, odcid: Option<&ConnectionId>, local: SocketAddr,
1622 peer: SocketAddr, config: &mut Config,
1623) -> Result<Connection<F>> {
1624 // For connections with `odcid` set, we historically used `retry_source_cid =
1625 // scid`. Keep this behavior to preserve backwards compatibility.
1626 // `accept_with_retry` allows the SCIDs to be specified separately.
1627 let retry_cids = odcid.map(|odcid| RetryConnectionIds {
1628 original_destination_cid: odcid,
1629 retry_source_cid: scid,
1630 });
1631 Connection::new(scid, retry_cids, None, local, peer, config, true)
1632}
1633
1634/// A wrapper for connection IDs used in [`accept_with_retry`].
1635pub struct RetryConnectionIds<'a> {
1636 /// The DCID of the first Initial packet received by the server, which
1637 /// triggered the Retry packet.
1638 pub original_destination_cid: &'a ConnectionId<'a>,
1639 /// The SCID of the Retry packet sent by the server. This can be different
1640 /// from the new connection's SCID.
1641 pub retry_source_cid: &'a ConnectionId<'a>,
1642}
1643
1644/// Creates a new server-side connection after the client responded to a Retry
1645/// packet.
1646///
1647/// To generate a Retry packet in the first place, use the [`retry()`] function.
1648///
1649/// The `scid` parameter represents the server's source connection ID, which can
1650/// be freshly generated after the application has successfully verified the
1651/// Retry. `retry_cids` is used to tie the new connection to the Initial + Retry
1652/// exchange that preceded the connection's creation.
1653///
1654/// The DCID of the client's Initial packet is inherently untrusted data. It is
1655/// safe to use the DCID in the `retry_source_cid` field of the
1656/// `RetryConnectionIds` provided to this function. However, using the Initial's
1657/// DCID for the `scid` parameter carries risks. Applications are advised to
1658/// implement their own DCID validation steps before using the DCID in that
1659/// manner.
1660#[inline]
1661pub fn accept_with_retry<F: BufFactory>(
1662 scid: &ConnectionId, retry_cids: RetryConnectionIds, local: SocketAddr,
1663 peer: SocketAddr, config: &mut Config,
1664) -> Result<Connection<F>> {
1665 Connection::new(scid, Some(retry_cids), None, local, peer, config, true)
1666}
1667
1668/// Creates a new client-side connection.
1669///
1670/// The `scid` parameter is used as the connection's source connection ID,
1671/// while the optional `server_name` parameter is used to verify the peer's
1672/// certificate.
1673///
1674/// ## Examples:
1675///
1676/// ```no_run
1677/// # let mut config = quiche::Config::new(0xbabababa)?;
1678/// # let server_name = "quic.tech";
1679/// # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
1680/// # let local = "127.0.0.1:4321".parse().unwrap();
1681/// # let peer = "127.0.0.1:1234".parse().unwrap();
1682/// let conn =
1683/// quiche::connect(Some(&server_name), &scid, local, peer, &mut config)?;
1684/// # Ok::<(), quiche::Error>(())
1685/// ```
1686#[inline]
1687pub fn connect(
1688 server_name: Option<&str>, scid: &ConnectionId, local: SocketAddr,
1689 peer: SocketAddr, config: &mut Config,
1690) -> Result<Connection> {
1691 let mut conn = Connection::new(scid, None, None, local, peer, config, false)?;
1692
1693 if let Some(server_name) = server_name {
1694 conn.handshake.set_host_name(server_name)?;
1695 }
1696
1697 Ok(conn)
1698}
1699
1700/// Creates a new client-side connection using the given DCID initially.
1701///
1702/// Be aware that [RFC 9000] places requirements for unpredictability and length
1703/// on the client DCID field. This function is dangerous if these requirements
1704/// are not satisfied.
1705///
1706/// The `scid` parameter is used as the connection's source connection ID, while
1707/// the optional `server_name` parameter is used to verify the peer's
1708/// certificate.
1709///
1710/// [RFC 9000]: <https://datatracker.ietf.org/doc/html/rfc9000#section-7.2-3>
1711#[cfg(feature = "custom-client-dcid")]
1712#[cfg_attr(docsrs, doc(cfg(feature = "custom-client-dcid")))]
1713pub fn connect_with_dcid(
1714 server_name: Option<&str>, scid: &ConnectionId, dcid: &ConnectionId,
1715 local: SocketAddr, peer: SocketAddr, config: &mut Config,
1716) -> Result<Connection> {
1717 let mut conn =
1718 Connection::new(scid, None, Some(dcid), local, peer, config, false)?;
1719
1720 if let Some(server_name) = server_name {
1721 conn.handshake.set_host_name(server_name)?;
1722 }
1723
1724 Ok(conn)
1725}
1726
1727/// Creates a new client-side connection, with a custom buffer generation
1728/// method.
1729///
1730/// The buffers generated can be anything that can be drereferenced as a byte
1731/// slice. See [`connect`] and [`BufFactory`] for more info.
1732#[inline]
1733pub fn connect_with_buffer_factory<F: BufFactory>(
1734 server_name: Option<&str>, scid: &ConnectionId, local: SocketAddr,
1735 peer: SocketAddr, config: &mut Config,
1736) -> Result<Connection<F>> {
1737 let mut conn = Connection::new(scid, None, None, local, peer, config, false)?;
1738
1739 if let Some(server_name) = server_name {
1740 conn.handshake.set_host_name(server_name)?;
1741 }
1742
1743 Ok(conn)
1744}
1745
1746/// Creates a new client-side connection, with a custom buffer generation
1747/// method using the given dcid initially.
1748/// Be aware the RFC places requirements for unpredictability and length
1749/// on the client DCID field.
1750/// [`RFC9000`]: https://datatracker.ietf.org/doc/html/rfc9000#section-7.2-3
1751///
1752/// The buffers generated can be anything that can be drereferenced as a byte
1753/// slice. See [`connect`] and [`BufFactory`] for more info.
1754#[cfg(feature = "custom-client-dcid")]
1755#[cfg_attr(docsrs, doc(cfg(feature = "custom-client-dcid")))]
1756pub fn connect_with_dcid_and_buffer_factory<F: BufFactory>(
1757 server_name: Option<&str>, scid: &ConnectionId, dcid: &ConnectionId,
1758 local: SocketAddr, peer: SocketAddr, config: &mut Config,
1759) -> Result<Connection<F>> {
1760 let mut conn =
1761 Connection::new(scid, None, Some(dcid), local, peer, config, false)?;
1762
1763 if let Some(server_name) = server_name {
1764 conn.handshake.set_host_name(server_name)?;
1765 }
1766
1767 Ok(conn)
1768}
1769
1770/// Writes a version negotiation packet.
1771///
1772/// The `scid` and `dcid` parameters are the source connection ID and the
1773/// destination connection ID extracted from the received client's Initial
1774/// packet that advertises an unsupported version.
1775///
1776/// ## Examples:
1777///
1778/// ```no_run
1779/// # let mut buf = [0; 512];
1780/// # let mut out = [0; 512];
1781/// # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
1782/// let (len, src) = socket.recv_from(&mut buf).unwrap();
1783///
1784/// let hdr =
1785/// quiche::Header::from_slice(&mut buf[..len], quiche::MAX_CONN_ID_LEN)?;
1786///
1787/// if hdr.version != quiche::PROTOCOL_VERSION {
1788/// let len = quiche::negotiate_version(&hdr.scid, &hdr.dcid, &mut out)?;
1789/// socket.send_to(&out[..len], &src).unwrap();
1790/// }
1791/// # Ok::<(), quiche::Error>(())
1792/// ```
1793#[inline]
1794pub fn negotiate_version(
1795 scid: &ConnectionId, dcid: &ConnectionId, out: &mut [u8],
1796) -> Result<usize> {
1797 packet::negotiate_version(scid, dcid, out)
1798}
1799
1800/// Writes a stateless retry packet.
1801///
1802/// The `scid` and `dcid` parameters are the source connection ID and the
1803/// destination connection ID extracted from the received client's Initial
1804/// packet, while `new_scid` is the server's new source connection ID and
1805/// `token` is the address validation token the client needs to echo back.
1806///
1807/// The application is responsible for generating the address validation
1808/// token to be sent to the client, and verifying tokens sent back by the
1809/// client. The generated token should include the `dcid` parameter, such
1810/// that it can be later extracted from the token and passed to the
1811/// [`accept()`] function as its `odcid` parameter.
1812///
1813/// [`accept()`]: fn.accept.html
1814///
1815/// ## Examples:
1816///
1817/// ```no_run
1818/// # let mut config = quiche::Config::new(0xbabababa)?;
1819/// # let mut buf = [0; 512];
1820/// # let mut out = [0; 512];
1821/// # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
1822/// # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
1823/// # let local = socket.local_addr().unwrap();
1824/// # fn mint_token(hdr: &quiche::Header, src: &std::net::SocketAddr) -> Vec<u8> {
1825/// # vec![]
1826/// # }
1827/// # fn validate_token<'a>(src: &std::net::SocketAddr, token: &'a [u8]) -> Option<quiche::ConnectionId<'a>> {
1828/// # None
1829/// # }
1830/// let (len, peer) = socket.recv_from(&mut buf).unwrap();
1831///
1832/// let hdr = quiche::Header::from_slice(&mut buf[..len], quiche::MAX_CONN_ID_LEN)?;
1833///
1834/// let token = hdr.token.as_ref().unwrap();
1835///
1836/// // No token sent by client, create a new one.
1837/// if token.is_empty() {
1838/// let new_token = mint_token(&hdr, &peer);
1839///
1840/// let len = quiche::retry(
1841/// &hdr.scid, &hdr.dcid, &scid, &new_token, hdr.version, &mut out,
1842/// )?;
1843///
1844/// socket.send_to(&out[..len], &peer).unwrap();
1845/// return Ok(());
1846/// }
1847///
1848/// // Client sent token, validate it.
1849/// let odcid = validate_token(&peer, token);
1850///
1851/// if odcid.is_none() {
1852/// // Invalid address validation token.
1853/// return Ok(());
1854/// }
1855///
1856/// let conn = quiche::accept(&scid, odcid.as_ref(), local, peer, &mut config)?;
1857/// # Ok::<(), quiche::Error>(())
1858/// ```
1859#[inline]
1860pub fn retry(
1861 scid: &ConnectionId, dcid: &ConnectionId, new_scid: &ConnectionId,
1862 token: &[u8], version: u32, out: &mut [u8],
1863) -> Result<usize> {
1864 packet::retry(scid, dcid, new_scid, token, version, out)
1865}
1866
1867/// Returns true if the given protocol version is supported.
1868#[inline]
1869pub fn version_is_supported(version: u32) -> bool {
1870 matches!(version, PROTOCOL_VERSION_V1)
1871}
1872
1873/// Pushes a frame to the output packet if there is enough space.
1874///
1875/// Returns `true` on success, `false` otherwise. In case of failure it means
1876/// there is no room to add the frame in the packet. You may retry to add the
1877/// frame later.
1878macro_rules! push_frame_to_pkt {
1879 ($out:expr, $frames:expr, $frame:expr, $left:expr) => {{
1880 if $frame.wire_len() <= $left {
1881 $left -= $frame.wire_len();
1882
1883 $frame.to_bytes(&mut $out)?;
1884
1885 $frames.push($frame);
1886
1887 true
1888 } else {
1889 false
1890 }
1891 }};
1892}
1893
1894/// Executes the provided body if the qlog feature is enabled, quiche has been
1895/// configured with a log writer, the event's importance is within the
1896/// configured level.
1897macro_rules! qlog_with_type {
1898 ($ty:expr, $qlog:expr, $qlog_streamer_ref:ident, $body:block) => {{
1899 #[cfg(feature = "qlog")]
1900 {
1901 if EventImportance::from($ty).is_contained_in(&$qlog.level) {
1902 if let Some($qlog_streamer_ref) = &mut $qlog.streamer {
1903 $body
1904 }
1905 }
1906 }
1907 }};
1908}
1909
1910#[cfg(feature = "qlog")]
1911const QLOG_PARAMS_SET: EventType =
1912 EventType::TransportEventType(TransportEventType::ParametersSet);
1913
1914#[cfg(feature = "qlog")]
1915const QLOG_PACKET_RX: EventType =
1916 EventType::TransportEventType(TransportEventType::PacketReceived);
1917
1918#[cfg(feature = "qlog")]
1919const QLOG_PACKET_TX: EventType =
1920 EventType::TransportEventType(TransportEventType::PacketSent);
1921
1922#[cfg(feature = "qlog")]
1923const QLOG_DATA_MV: EventType =
1924 EventType::TransportEventType(TransportEventType::DataMoved);
1925
1926#[cfg(feature = "qlog")]
1927const QLOG_METRICS: EventType =
1928 EventType::RecoveryEventType(RecoveryEventType::MetricsUpdated);
1929
1930#[cfg(feature = "qlog")]
1931const QLOG_CONNECTION_CLOSED: EventType =
1932 EventType::ConnectivityEventType(ConnectivityEventType::ConnectionClosed);
1933
1934#[cfg(feature = "qlog")]
1935struct QlogInfo {
1936 streamer: Option<qlog::streamer::QlogStreamer>,
1937 logged_peer_params: bool,
1938 level: EventImportance,
1939}
1940
1941#[cfg(feature = "qlog")]
1942impl Default for QlogInfo {
1943 fn default() -> Self {
1944 QlogInfo {
1945 streamer: None,
1946 logged_peer_params: false,
1947 level: EventImportance::Base,
1948 }
1949 }
1950}
1951
1952impl<F: BufFactory> Connection<F> {
1953 fn new(
1954 scid: &ConnectionId, retry_cids: Option<RetryConnectionIds>,
1955 client_dcid: Option<&ConnectionId>, local: SocketAddr, peer: SocketAddr,
1956 config: &mut Config, is_server: bool,
1957 ) -> Result<Connection<F>> {
1958 let tls = config.tls_ctx.new_handshake()?;
1959 Connection::with_tls(
1960 scid,
1961 retry_cids,
1962 client_dcid,
1963 local,
1964 peer,
1965 config,
1966 tls,
1967 is_server,
1968 )
1969 }
1970
1971 #[allow(clippy::too_many_arguments)]
1972 fn with_tls(
1973 scid: &ConnectionId, retry_cids: Option<RetryConnectionIds>,
1974 client_dcid: Option<&ConnectionId>, local: SocketAddr, peer: SocketAddr,
1975 config: &Config, tls: tls::Handshake, is_server: bool,
1976 ) -> Result<Connection<F>> {
1977 if retry_cids.is_some() && client_dcid.is_some() {
1978 // These are exclusive, the caller should only specify one or the
1979 // other.
1980 return Err(Error::InvalidDcidInitialization);
1981 }
1982 #[cfg(feature = "custom-client-dcid")]
1983 if let Some(client_dcid) = client_dcid {
1984 // The Minimum length is 8.
1985 // See https://datatracker.ietf.org/doc/html/rfc9000#section-7.2-3
1986 if client_dcid.to_vec().len() < 8 {
1987 return Err(Error::InvalidDcidInitialization);
1988 }
1989 }
1990 #[cfg(not(feature = "custom-client-dcid"))]
1991 if client_dcid.is_some() {
1992 return Err(Error::InvalidDcidInitialization);
1993 }
1994
1995 let max_rx_data = config.local_transport_params.initial_max_data;
1996
1997 let scid_as_hex: Vec<String> =
1998 scid.iter().map(|b| format!("{b:02x}")).collect();
1999
2000 let reset_token = if is_server {
2001 config.local_transport_params.stateless_reset_token
2002 } else {
2003 None
2004 };
2005
2006 let recovery_config = recovery::RecoveryConfig::from_config(config);
2007
2008 let mut path = path::Path::new(
2009 local,
2010 peer,
2011 &recovery_config,
2012 config.path_challenge_recv_max_queue_len,
2013 true,
2014 Some(config),
2015 );
2016
2017 // If we sent a Retry assume the peer's address is verified.
2018 path.verified_peer_address = retry_cids.is_some();
2019 // Assume clients validate the server's address implicitly.
2020 path.peer_verified_local_address = is_server;
2021
2022 // Do not allocate more than the number of active CIDs.
2023 let paths = path::PathMap::new(
2024 path,
2025 config.local_transport_params.active_conn_id_limit as usize,
2026 is_server,
2027 );
2028
2029 let active_path_id = paths.get_active_path_id()?;
2030
2031 let ids = cid::ConnectionIdentifiers::new(
2032 config.local_transport_params.active_conn_id_limit as usize,
2033 scid,
2034 active_path_id,
2035 reset_token,
2036 );
2037
2038 let mut conn = Connection {
2039 version: config.version,
2040
2041 ids,
2042
2043 trace_id: scid_as_hex.join(""),
2044
2045 pkt_num_spaces: [
2046 packet::PktNumSpace::new(),
2047 packet::PktNumSpace::new(),
2048 packet::PktNumSpace::new(),
2049 ],
2050
2051 crypto_ctx: [
2052 packet::CryptoContext::new(),
2053 packet::CryptoContext::new(),
2054 packet::CryptoContext::new(),
2055 ],
2056
2057 next_pkt_num: 0,
2058
2059 pkt_num_manager: packet::PktNumManager::new(),
2060
2061 peer_transport_params: TransportParams::default(),
2062
2063 peer_transport_params_track_unknown: config
2064 .track_unknown_transport_params,
2065
2066 local_transport_params: config.local_transport_params.clone(),
2067
2068 handshake: tls,
2069
2070 session: None,
2071
2072 recovery_config,
2073
2074 paths,
2075 path_challenge_recv_max_queue_len: config
2076 .path_challenge_recv_max_queue_len,
2077 path_challenge_rx_count: 0,
2078
2079 application_protos: config.application_protos.clone(),
2080
2081 recv_count: 0,
2082 sent_count: 0,
2083 lost_count: 0,
2084 spurious_lost_count: 0,
2085 retrans_count: 0,
2086 dgram_sent_count: 0,
2087 dgram_recv_count: 0,
2088 sent_bytes: 0,
2089 recv_bytes: 0,
2090 acked_bytes: 0,
2091 lost_bytes: 0,
2092
2093 rx_data: 0,
2094 flow_control: flowcontrol::FlowControl::new(
2095 max_rx_data,
2096 cmp::min(max_rx_data / 2 * 3, DEFAULT_CONNECTION_WINDOW),
2097 config.max_connection_window,
2098 ),
2099 should_send_max_data: false,
2100 should_send_max_streams_bidi: false,
2101 should_send_max_streams_uni: false,
2102
2103 tx_cap: 0,
2104 tx_cap_factor: config.tx_cap_factor,
2105
2106 tx_buffered: 0,
2107 tx_buffered_state: TxBufferTrackingState::Ok,
2108
2109 tx_data: 0,
2110 max_tx_data: 0,
2111 last_tx_data: 0,
2112
2113 stream_retrans_bytes: 0,
2114
2115 streams: stream::StreamMap::new(
2116 config.local_transport_params.initial_max_streams_bidi,
2117 config.local_transport_params.initial_max_streams_uni,
2118 config.max_stream_window,
2119 ),
2120
2121 odcid: None,
2122
2123 rscid: None,
2124
2125 token: None,
2126
2127 local_error: None,
2128
2129 peer_error: None,
2130
2131 blocked_limit: None,
2132
2133 idle_timer: None,
2134
2135 draining_timer: None,
2136
2137 undecryptable_pkts: VecDeque::new(),
2138
2139 alpn: Vec::new(),
2140
2141 is_server,
2142
2143 derived_initial_secrets: false,
2144
2145 did_version_negotiation: false,
2146
2147 did_retry: false,
2148
2149 got_peer_conn_id: false,
2150
2151 // Assume clients validate the server's address implicitly.
2152 peer_verified_initial_address: is_server,
2153
2154 parsed_peer_transport_params: false,
2155
2156 handshake_completed: false,
2157
2158 handshake_done_sent: false,
2159 handshake_done_acked: false,
2160
2161 handshake_confirmed: false,
2162
2163 key_phase: false,
2164
2165 ack_eliciting_sent: false,
2166
2167 closed: false,
2168
2169 timed_out: false,
2170
2171 grease: config.grease,
2172
2173 enable_send_streams_blocked: config.enable_send_streams_blocked,
2174
2175 keylog: None,
2176
2177 #[cfg(feature = "qlog")]
2178 qlog: Default::default(),
2179
2180 dgram_recv_queue: dgram::DatagramQueue::new(
2181 config.dgram_recv_max_queue_len,
2182 ),
2183
2184 dgram_send_queue: dgram::DatagramQueue::new(
2185 config.dgram_send_max_queue_len,
2186 ),
2187
2188 emit_dgram: true,
2189
2190 disable_dcid_reuse: config.disable_dcid_reuse,
2191
2192 reset_stream_local_count: 0,
2193 stopped_stream_local_count: 0,
2194 reset_stream_remote_count: 0,
2195 stopped_stream_remote_count: 0,
2196
2197 data_blocked_sent_count: 0,
2198 stream_data_blocked_sent_count: 0,
2199 data_blocked_recv_count: 0,
2200 stream_data_blocked_recv_count: 0,
2201
2202 streams_blocked_bidi_recv_count: 0,
2203 streams_blocked_uni_recv_count: 0,
2204
2205 streams_blocked_bidi_state: Default::default(),
2206 streams_blocked_uni_state: Default::default(),
2207
2208 max_amplification_factor: config.max_amplification_factor,
2209 };
2210
2211 if let Some(retry_cids) = retry_cids {
2212 conn.local_transport_params
2213 .original_destination_connection_id =
2214 Some(retry_cids.original_destination_cid.to_vec().into());
2215
2216 conn.local_transport_params.retry_source_connection_id =
2217 Some(retry_cids.retry_source_cid.to_vec().into());
2218
2219 conn.did_retry = true;
2220 }
2221
2222 conn.local_transport_params.initial_source_connection_id =
2223 Some(conn.ids.get_scid(0)?.cid.to_vec().into());
2224
2225 conn.handshake.init(is_server)?;
2226
2227 conn.handshake
2228 .use_legacy_codepoint(config.version != PROTOCOL_VERSION_V1);
2229
2230 conn.encode_transport_params()?;
2231
2232 if !is_server {
2233 let dcid = if let Some(client_dcid) = client_dcid {
2234 // We already had an dcid generated for us, use it.
2235 client_dcid.to_vec()
2236 } else {
2237 // Derive initial secrets for the client. We can do this here
2238 // because we already generated the random
2239 // destination connection ID.
2240 let mut dcid = [0; 16];
2241 rand::rand_bytes(&mut dcid[..]);
2242 dcid.to_vec()
2243 };
2244
2245 let (aead_open, aead_seal) = crypto::derive_initial_key_material(
2246 &dcid,
2247 conn.version,
2248 conn.is_server,
2249 false,
2250 )?;
2251
2252 let reset_token = conn.peer_transport_params.stateless_reset_token;
2253 conn.set_initial_dcid(
2254 dcid.to_vec().into(),
2255 reset_token,
2256 active_path_id,
2257 )?;
2258
2259 conn.crypto_ctx[packet::Epoch::Initial].crypto_open = Some(aead_open);
2260 conn.crypto_ctx[packet::Epoch::Initial].crypto_seal = Some(aead_seal);
2261
2262 conn.derived_initial_secrets = true;
2263 }
2264
2265 Ok(conn)
2266 }
2267
2268 /// Sets keylog output to the designated [`Writer`].
2269 ///
2270 /// This needs to be called as soon as the connection is created, to avoid
2271 /// missing some early logs.
2272 ///
2273 /// [`Writer`]: https://doc.rust-lang.org/std/io/trait.Write.html
2274 #[inline]
2275 pub fn set_keylog(&mut self, writer: Box<dyn std::io::Write + Send + Sync>) {
2276 self.keylog = Some(writer);
2277 }
2278
2279 /// Sets qlog output to the designated [`Writer`].
2280 ///
2281 /// Only events included in `QlogLevel::Base` are written. The serialization
2282 /// format is JSON-SEQ.
2283 ///
2284 /// This needs to be called as soon as the connection is created, to avoid
2285 /// missing some early logs.
2286 ///
2287 /// [`Writer`]: https://doc.rust-lang.org/std/io/trait.Write.html
2288 #[cfg(feature = "qlog")]
2289 #[cfg_attr(docsrs, doc(cfg(feature = "qlog")))]
2290 pub fn set_qlog(
2291 &mut self, writer: Box<dyn std::io::Write + Send + Sync>, title: String,
2292 description: String,
2293 ) {
2294 self.set_qlog_with_level(writer, title, description, QlogLevel::Base)
2295 }
2296
2297 /// Sets qlog output to the designated [`Writer`].
2298 ///
2299 /// Only qlog events included in the specified `QlogLevel` are written. The
2300 /// serialization format is JSON-SEQ.
2301 ///
2302 /// This needs to be called as soon as the connection is created, to avoid
2303 /// missing some early logs.
2304 ///
2305 /// [`Writer`]: https://doc.rust-lang.org/std/io/trait.Write.html
2306 #[cfg(feature = "qlog")]
2307 #[cfg_attr(docsrs, doc(cfg(feature = "qlog")))]
2308 pub fn set_qlog_with_level(
2309 &mut self, writer: Box<dyn std::io::Write + Send + Sync>, title: String,
2310 description: String, qlog_level: QlogLevel,
2311 ) {
2312 let vp = if self.is_server {
2313 qlog::VantagePointType::Server
2314 } else {
2315 qlog::VantagePointType::Client
2316 };
2317
2318 let level = match qlog_level {
2319 QlogLevel::Core => EventImportance::Core,
2320
2321 QlogLevel::Base => EventImportance::Base,
2322
2323 QlogLevel::Extra => EventImportance::Extra,
2324 };
2325
2326 self.qlog.level = level;
2327
2328 let trace = qlog::TraceSeq::new(
2329 qlog::VantagePoint {
2330 name: None,
2331 ty: vp,
2332 flow: None,
2333 },
2334 Some(title.to_string()),
2335 Some(description.to_string()),
2336 Some(qlog::Configuration {
2337 time_offset: Some(0.0),
2338 original_uris: None,
2339 }),
2340 None,
2341 );
2342
2343 let mut streamer = qlog::streamer::QlogStreamer::new(
2344 qlog::QLOG_VERSION.to_string(),
2345 Some(title),
2346 Some(description),
2347 None,
2348 Instant::now(),
2349 trace,
2350 self.qlog.level,
2351 writer,
2352 );
2353
2354 streamer.start_log().ok();
2355
2356 let ev_data = self
2357 .local_transport_params
2358 .to_qlog(TransportOwner::Local, self.handshake.cipher());
2359
2360 // This event occurs very early, so just mark the relative time as 0.0.
2361 streamer.add_event(Event::with_time(0.0, ev_data)).ok();
2362
2363 self.qlog.streamer = Some(streamer);
2364 }
2365
2366 /// Returns a mutable reference to the QlogStreamer, if it exists.
2367 #[cfg(feature = "qlog")]
2368 #[cfg_attr(docsrs, doc(cfg(feature = "qlog")))]
2369 pub fn qlog_streamer(&mut self) -> Option<&mut qlog::streamer::QlogStreamer> {
2370 self.qlog.streamer.as_mut()
2371 }
2372
2373 /// Configures the given session for resumption.
2374 ///
2375 /// On the client, this can be used to offer the given serialized session,
2376 /// as returned by [`session()`], for resumption.
2377 ///
2378 /// This must only be called immediately after creating a connection, that
2379 /// is, before any packet is sent or received.
2380 ///
2381 /// [`session()`]: struct.Connection.html#method.session
2382 #[inline]
2383 pub fn set_session(&mut self, session: &[u8]) -> Result<()> {
2384 let mut b = octets::Octets::with_slice(session);
2385
2386 let session_len = b.get_u64()? as usize;
2387 let session_bytes = b.get_bytes(session_len)?;
2388
2389 self.handshake.set_session(session_bytes.as_ref())?;
2390
2391 let raw_params_len = b.get_u64()? as usize;
2392 let raw_params_bytes = b.get_bytes(raw_params_len)?;
2393
2394 let peer_params = TransportParams::decode(
2395 raw_params_bytes.as_ref(),
2396 self.is_server,
2397 self.peer_transport_params_track_unknown,
2398 )?;
2399
2400 self.process_peer_transport_params(peer_params)?;
2401
2402 Ok(())
2403 }
2404
2405 /// Sets the `max_idle_timeout` transport parameter, in milliseconds.
2406 ///
2407 /// This must only be called immediately after creating a connection, that
2408 /// is, before any packet is sent or received.
2409 ///
2410 /// The default value is infinite, that is, no timeout is used unless
2411 /// already configured when creating the connection.
2412 pub fn set_max_idle_timeout(&mut self, v: u64) -> Result<()> {
2413 self.local_transport_params.max_idle_timeout =
2414 cmp::min(v, octets::MAX_VAR_INT);
2415
2416 self.encode_transport_params()
2417 }
2418
2419 /// Sets the congestion control algorithm used.
2420 ///
2421 /// This function can only be called inside one of BoringSSL's handshake
2422 /// callbacks, before any packet has been sent. Calling this function any
2423 /// other time will have no effect.
2424 ///
2425 /// See [`Config::set_cc_algorithm()`].
2426 ///
2427 /// [`Config::set_cc_algorithm()`]: struct.Config.html#method.set_cc_algorithm
2428 #[cfg(feature = "boringssl-boring-crate")]
2429 #[cfg_attr(docsrs, doc(cfg(feature = "boringssl-boring-crate")))]
2430 pub fn set_cc_algorithm_in_handshake(
2431 ssl: &mut boring::ssl::SslRef, algo: CongestionControlAlgorithm,
2432 ) -> Result<()> {
2433 let ex_data = tls::ExData::from_ssl_ref(ssl).ok_or(Error::TlsFail)?;
2434
2435 ex_data.recovery_config.cc_algorithm = algo;
2436
2437 Ok(())
2438 }
2439
2440 /// Sets custom BBR settings.
2441 ///
2442 /// This API is experimental and will be removed in the future.
2443 ///
2444 /// Currently this only applies if cc_algorithm is
2445 /// `CongestionControlAlgorithm::Bbr2Gcongestion` is set.
2446 ///
2447 /// This function can only be called inside one of BoringSSL's handshake
2448 /// callbacks, before any packet has been sent. Calling this function any
2449 /// other time will have no effect.
2450 ///
2451 /// See [`Config::set_custom_bbr_settings()`].
2452 ///
2453 /// [`Config::set_custom_bbr_settings()`]: struct.Config.html#method.set_custom_bbr_settings
2454 #[cfg(all(feature = "boringssl-boring-crate", feature = "internal"))]
2455 #[cfg_attr(docsrs, doc(cfg(feature = "boringssl-boring-crate")))]
2456 #[doc(hidden)]
2457 pub fn set_custom_bbr_settings_in_handshake(
2458 ssl: &mut boring::ssl::SslRef, custom_bbr_params: BbrParams,
2459 ) -> Result<()> {
2460 let ex_data = tls::ExData::from_ssl_ref(ssl).ok_or(Error::TlsFail)?;
2461
2462 ex_data.recovery_config.custom_bbr_params = Some(custom_bbr_params);
2463
2464 Ok(())
2465 }
2466
2467 /// Sets the congestion control algorithm used by string.
2468 ///
2469 /// This function can only be called inside one of BoringSSL's handshake
2470 /// callbacks, before any packet has been sent. Calling this function any
2471 /// other time will have no effect.
2472 ///
2473 /// See [`Config::set_cc_algorithm_name()`].
2474 ///
2475 /// [`Config::set_cc_algorithm_name()`]: struct.Config.html#method.set_cc_algorithm_name
2476 #[cfg(feature = "boringssl-boring-crate")]
2477 #[cfg_attr(docsrs, doc(cfg(feature = "boringssl-boring-crate")))]
2478 pub fn set_cc_algorithm_name_in_handshake(
2479 ssl: &mut boring::ssl::SslRef, name: &str,
2480 ) -> Result<()> {
2481 let cc_algo = CongestionControlAlgorithm::from_str(name)?;
2482 Self::set_cc_algorithm_in_handshake(ssl, cc_algo)
2483 }
2484
2485 /// Sets initial congestion window size in terms of packet count.
2486 ///
2487 /// This function can only be called inside one of BoringSSL's handshake
2488 /// callbacks, before any packet has been sent. Calling this function any
2489 /// other time will have no effect.
2490 ///
2491 /// See [`Config::set_initial_congestion_window_packets()`].
2492 ///
2493 /// [`Config::set_initial_congestion_window_packets()`]: struct.Config.html#method.set_initial_congestion_window_packets
2494 #[cfg(feature = "boringssl-boring-crate")]
2495 #[cfg_attr(docsrs, doc(cfg(feature = "boringssl-boring-crate")))]
2496 pub fn set_initial_congestion_window_packets_in_handshake(
2497 ssl: &mut boring::ssl::SslRef, packets: usize,
2498 ) -> Result<()> {
2499 let ex_data = tls::ExData::from_ssl_ref(ssl).ok_or(Error::TlsFail)?;
2500
2501 ex_data.recovery_config.initial_congestion_window_packets = packets;
2502
2503 Ok(())
2504 }
2505
2506 /// Configure whether to enable relaxed loss detection on spurious loss.
2507 ///
2508 /// This function can only be called inside one of BoringSSL's handshake
2509 /// callbacks, before any packet has been sent. Calling this function any
2510 /// other time will have no effect.
2511 ///
2512 /// See [`Config::set_enable_relaxed_loss_threshold()`].
2513 ///
2514 /// [`Config::set_enable_relaxed_loss_threshold()`]: struct.Config.html#method.set_enable_relaxed_loss_threshold
2515 #[cfg(feature = "boringssl-boring-crate")]
2516 #[cfg_attr(docsrs, doc(cfg(feature = "boringssl-boring-crate")))]
2517 pub fn set_enable_relaxed_loss_threshold_in_handshake(
2518 ssl: &mut boring::ssl::SslRef, enable: bool,
2519 ) -> Result<()> {
2520 let ex_data = tls::ExData::from_ssl_ref(ssl).ok_or(Error::TlsFail)?;
2521
2522 ex_data.recovery_config.enable_relaxed_loss_threshold = enable;
2523
2524 Ok(())
2525 }
2526
2527 /// Configure whether to enable the CUBIC idle restart fix.
2528 ///
2529 /// This function can only be called inside one of BoringSSL's handshake
2530 /// callbacks, before any packet has been sent. Calling this function any
2531 /// other time will have no effect.
2532 ///
2533 /// See [`Config::set_enable_cubic_idle_restart_fix()`].
2534 ///
2535 /// [`Config::set_enable_cubic_idle_restart_fix()`]: struct.Config.html#method.set_enable_cubic_idle_restart_fix
2536 #[cfg(feature = "boringssl-boring-crate")]
2537 #[cfg_attr(docsrs, doc(cfg(feature = "boringssl-boring-crate")))]
2538 pub fn set_enable_cubic_idle_restart_fix_in_handshake(
2539 ssl: &mut boring::ssl::SslRef, enable: bool,
2540 ) -> Result<()> {
2541 let ex_data = tls::ExData::from_ssl_ref(ssl).ok_or(Error::TlsFail)?;
2542
2543 ex_data.recovery_config.enable_cubic_idle_restart_fix = enable;
2544
2545 Ok(())
2546 }
2547
2548 /// Configures whether to enable HyStart++.
2549 ///
2550 /// This function can only be called inside one of BoringSSL's handshake
2551 /// callbacks, before any packet has been sent. Calling this function any
2552 /// other time will have no effect.
2553 ///
2554 /// See [`Config::enable_hystart()`].
2555 ///
2556 /// [`Config::enable_hystart()`]: struct.Config.html#method.enable_hystart
2557 #[cfg(feature = "boringssl-boring-crate")]
2558 #[cfg_attr(docsrs, doc(cfg(feature = "boringssl-boring-crate")))]
2559 pub fn set_hystart_in_handshake(
2560 ssl: &mut boring::ssl::SslRef, v: bool,
2561 ) -> Result<()> {
2562 let ex_data = tls::ExData::from_ssl_ref(ssl).ok_or(Error::TlsFail)?;
2563
2564 ex_data.recovery_config.hystart = v;
2565
2566 Ok(())
2567 }
2568
2569 /// Configures whether to enable pacing.
2570 ///
2571 /// This function can only be called inside one of BoringSSL's handshake
2572 /// callbacks, before any packet has been sent. Calling this function any
2573 /// other time will have no effect.
2574 ///
2575 /// See [`Config::enable_pacing()`].
2576 ///
2577 /// [`Config::enable_pacing()`]: struct.Config.html#method.enable_pacing
2578 #[cfg(feature = "boringssl-boring-crate")]
2579 #[cfg_attr(docsrs, doc(cfg(feature = "boringssl-boring-crate")))]
2580 pub fn set_pacing_in_handshake(
2581 ssl: &mut boring::ssl::SslRef, v: bool,
2582 ) -> Result<()> {
2583 let ex_data = tls::ExData::from_ssl_ref(ssl).ok_or(Error::TlsFail)?;
2584
2585 ex_data.recovery_config.pacing = v;
2586
2587 Ok(())
2588 }
2589
2590 /// Sets the max value for pacing rate.
2591 ///
2592 /// This function can only be called inside one of BoringSSL's handshake
2593 /// callbacks, before any packet has been sent. Calling this function any
2594 /// other time will have no effect.
2595 ///
2596 /// See [`Config::set_max_pacing_rate()`].
2597 ///
2598 /// [`Config::set_max_pacing_rate()`]: struct.Config.html#method.set_max_pacing_rate
2599 #[cfg(feature = "boringssl-boring-crate")]
2600 #[cfg_attr(docsrs, doc(cfg(feature = "boringssl-boring-crate")))]
2601 pub fn set_max_pacing_rate_in_handshake(
2602 ssl: &mut boring::ssl::SslRef, v: Option<u64>,
2603 ) -> Result<()> {
2604 let ex_data = tls::ExData::from_ssl_ref(ssl).ok_or(Error::TlsFail)?;
2605
2606 ex_data.recovery_config.max_pacing_rate = v;
2607
2608 Ok(())
2609 }
2610
2611 /// Sets the maximum outgoing UDP payload size.
2612 ///
2613 /// This function can only be called inside one of BoringSSL's handshake
2614 /// callbacks, before any packet has been sent. Calling this function any
2615 /// other time will have no effect.
2616 ///
2617 /// See [`Config::set_max_send_udp_payload_size()`].
2618 ///
2619 /// [`Config::set_max_send_udp_payload_size()`]: struct.Config.html#method.set_max_send_udp_payload_size
2620 #[cfg(feature = "boringssl-boring-crate")]
2621 #[cfg_attr(docsrs, doc(cfg(feature = "boringssl-boring-crate")))]
2622 pub fn set_max_send_udp_payload_size_in_handshake(
2623 ssl: &mut boring::ssl::SslRef, v: usize,
2624 ) -> Result<()> {
2625 let ex_data = tls::ExData::from_ssl_ref(ssl).ok_or(Error::TlsFail)?;
2626
2627 ex_data.recovery_config.max_send_udp_payload_size = v;
2628
2629 Ok(())
2630 }
2631
2632 /// Sets the send capacity factor.
2633 ///
2634 /// This function can only be called inside one of BoringSSL's handshake
2635 /// callbacks, before any packet has been sent. Calling this function any
2636 /// other time will have no effect.
2637 ///
2638 /// See [`Config::set_send_capacity_factor()`].
2639 ///
2640 /// [`Config::set_max_send_udp_payload_size()`]: struct.Config.html#method.set_send_capacity_factor
2641 #[cfg(feature = "boringssl-boring-crate")]
2642 #[cfg_attr(docsrs, doc(cfg(feature = "boringssl-boring-crate")))]
2643 pub fn set_send_capacity_factor_in_handshake(
2644 ssl: &mut boring::ssl::SslRef, v: f64,
2645 ) -> Result<()> {
2646 let ex_data = tls::ExData::from_ssl_ref(ssl).ok_or(Error::TlsFail)?;
2647
2648 ex_data.tx_cap_factor = v;
2649
2650 Ok(())
2651 }
2652
2653 /// Configures whether to do path MTU discovery.
2654 ///
2655 /// This function can only be called inside one of BoringSSL's handshake
2656 /// callbacks, before any packet has been sent. Calling this function any
2657 /// other time will have no effect.
2658 ///
2659 /// See [`Config::discover_pmtu()`].
2660 ///
2661 /// [`Config::discover_pmtu()`]: struct.Config.html#method.discover_pmtu
2662 #[cfg(feature = "boringssl-boring-crate")]
2663 #[cfg_attr(docsrs, doc(cfg(feature = "boringssl-boring-crate")))]
2664 pub fn set_discover_pmtu_in_handshake(
2665 ssl: &mut boring::ssl::SslRef, discover: bool, max_probes: u8,
2666 ) -> Result<()> {
2667 let ex_data = tls::ExData::from_ssl_ref(ssl).ok_or(Error::TlsFail)?;
2668
2669 ex_data.pmtud = Some((discover, max_probes));
2670
2671 Ok(())
2672 }
2673
2674 /// Sets the `max_idle_timeout` transport parameter, in milliseconds.
2675 ///
2676 /// This function can only be called inside one of BoringSSL's handshake
2677 /// callbacks, before any packet has been sent. Calling this function any
2678 /// other time will have no effect.
2679 ///
2680 /// See [`Config::set_max_idle_timeout()`].
2681 ///
2682 /// [`Config::set_max_idle_timeout()`]: struct.Config.html#method.set_max_idle_timeout
2683 #[cfg(feature = "boringssl-boring-crate")]
2684 #[cfg_attr(docsrs, doc(cfg(feature = "boringssl-boring-crate")))]
2685 pub fn set_max_idle_timeout_in_handshake(
2686 ssl: &mut boring::ssl::SslRef, v: u64,
2687 ) -> Result<()> {
2688 let ex_data = tls::ExData::from_ssl_ref(ssl).ok_or(Error::TlsFail)?;
2689
2690 ex_data.local_transport_params.max_idle_timeout = v;
2691
2692 Self::set_transport_parameters_in_hanshake(
2693 ex_data.local_transport_params.clone(),
2694 ex_data.is_server,
2695 ssl,
2696 )
2697 }
2698
2699 /// Sets the `initial_max_streams_bidi` transport parameter.
2700 ///
2701 /// This function can only be called inside one of BoringSSL's handshake
2702 /// callbacks, before any packet has been sent. Calling this function any
2703 /// other time will have no effect.
2704 ///
2705 /// See [`Config::set_initial_max_streams_bidi()`].
2706 ///
2707 /// [`Config::set_initial_max_streams_bidi()`]: struct.Config.html#method.set_initial_max_streams_bidi
2708 #[cfg(feature = "boringssl-boring-crate")]
2709 #[cfg_attr(docsrs, doc(cfg(feature = "boringssl-boring-crate")))]
2710 pub fn set_initial_max_streams_bidi_in_handshake(
2711 ssl: &mut boring::ssl::SslRef, v: u64,
2712 ) -> Result<()> {
2713 let ex_data = tls::ExData::from_ssl_ref(ssl).ok_or(Error::TlsFail)?;
2714
2715 ex_data.local_transport_params.initial_max_streams_bidi = v;
2716
2717 Self::set_transport_parameters_in_hanshake(
2718 ex_data.local_transport_params.clone(),
2719 ex_data.is_server,
2720 ssl,
2721 )
2722 }
2723
2724 #[cfg(feature = "boringssl-boring-crate")]
2725 fn set_transport_parameters_in_hanshake(
2726 params: TransportParams, is_server: bool, ssl: &mut boring::ssl::SslRef,
2727 ) -> Result<()> {
2728 use foreign_types_shared::ForeignTypeRef;
2729
2730 // In order to apply the new parameter to the TLS state before TPs are
2731 // written into a TLS message, we need to re-encode all TPs immediately.
2732 //
2733 // Since we don't have direct access to the main `Connection` object, we
2734 // need to re-create the `Handshake` state from the `SslRef`.
2735 //
2736 // SAFETY: the `Handshake` object must not be drop()ed, otherwise it
2737 // would free the underlying BoringSSL structure.
2738 let mut handshake =
2739 unsafe { tls::Handshake::from_ptr(ssl.as_ptr() as _) };
2740 handshake.set_quic_transport_params(¶ms, is_server)?;
2741
2742 // Avoid running `drop(handshake)` as that would free the underlying
2743 // handshake state.
2744 std::mem::forget(handshake);
2745
2746 Ok(())
2747 }
2748
2749 /// Processes QUIC packets received from the peer.
2750 ///
2751 /// On success the number of bytes processed from the input buffer is
2752 /// returned. On error the connection will be closed by calling [`close()`]
2753 /// with the appropriate error code.
2754 ///
2755 /// Coalesced packets will be processed as necessary.
2756 ///
2757 /// Note that the contents of the input buffer `buf` might be modified by
2758 /// this function due to, for example, in-place decryption.
2759 ///
2760 /// [`close()`]: struct.Connection.html#method.close
2761 ///
2762 /// ## Examples:
2763 ///
2764 /// ```no_run
2765 /// # let mut buf = [0; 512];
2766 /// # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
2767 /// # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
2768 /// # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
2769 /// # let peer = "127.0.0.1:1234".parse().unwrap();
2770 /// # let local = socket.local_addr().unwrap();
2771 /// # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
2772 /// loop {
2773 /// let (read, from) = socket.recv_from(&mut buf).unwrap();
2774 ///
2775 /// let recv_info = quiche::RecvInfo {
2776 /// from,
2777 /// to: local,
2778 /// };
2779 ///
2780 /// let read = match conn.recv(&mut buf[..read], recv_info) {
2781 /// Ok(v) => v,
2782 ///
2783 /// Err(e) => {
2784 /// // An error occurred, handle it.
2785 /// break;
2786 /// },
2787 /// };
2788 /// }
2789 /// # Ok::<(), quiche::Error>(())
2790 /// ```
2791 pub fn recv(&mut self, buf: &mut [u8], info: RecvInfo) -> Result<usize> {
2792 let len = buf.len();
2793
2794 if len == 0 {
2795 return Err(Error::BufferTooShort);
2796 }
2797
2798 let recv_pid = self.paths.path_id_from_addrs(&(info.to, info.from));
2799
2800 if let Some(recv_pid) = recv_pid {
2801 let recv_path = self.paths.get_mut(recv_pid)?;
2802
2803 // Keep track of how many bytes we received from the client, so we
2804 // can limit bytes sent back before address validation, to a
2805 // multiple of this. The limit needs to be increased early on, so
2806 // that if there is an error there is enough credit to send a
2807 // CONNECTION_CLOSE.
2808 //
2809 // It doesn't matter if the packets received were valid or not, we
2810 // only need to track the total amount of bytes received.
2811 //
2812 // Note that we also need to limit the number of bytes we sent on a
2813 // path if we are not the host that initiated its usage.
2814 if self.is_server && !recv_path.verified_peer_address {
2815 recv_path.max_send_bytes += len * self.max_amplification_factor;
2816 }
2817 } else if !self.is_server {
2818 // If a client receives packets from an unknown server address,
2819 // the client MUST discard these packets.
2820 trace!(
2821 "{} client received packet from unknown address {:?}, dropping",
2822 self.trace_id,
2823 info,
2824 );
2825
2826 return Ok(len);
2827 }
2828
2829 let mut done = 0;
2830 let mut left = len;
2831
2832 // Process coalesced packets.
2833 while left > 0 {
2834 let read = match self.recv_single(
2835 &mut buf[len - left..len],
2836 &info,
2837 recv_pid,
2838 ) {
2839 Ok(v) => v,
2840
2841 Err(Error::Done) => {
2842 // If the packet can't be processed or decrypted, check if
2843 // it's a stateless reset.
2844 if self.is_stateless_reset(&buf[len - left..len]) {
2845 trace!("{} packet is a stateless reset", self.trace_id);
2846
2847 self.mark_closed();
2848 }
2849
2850 left
2851 },
2852
2853 Err(e) => {
2854 // In case of error processing the incoming packet, close
2855 // the connection.
2856 self.close(false, e.to_wire(), b"").ok();
2857 return Err(e);
2858 },
2859 };
2860
2861 done += read;
2862 left -= read;
2863 }
2864
2865 // Even though the packet was previously "accepted", it
2866 // should be safe to forward the error, as it also comes
2867 // from the `recv()` method.
2868 self.process_undecrypted_0rtt_packets()?;
2869
2870 Ok(done)
2871 }
2872
2873 fn process_undecrypted_0rtt_packets(&mut self) -> Result<()> {
2874 // Process previously undecryptable 0-RTT packets if the decryption key
2875 // is now available.
2876 if self.crypto_ctx[packet::Epoch::Application]
2877 .crypto_0rtt_open
2878 .is_some()
2879 {
2880 while let Some((mut pkt, info)) = self.undecryptable_pkts.pop_front()
2881 {
2882 if let Err(e) = self.recv(&mut pkt, info) {
2883 self.undecryptable_pkts.clear();
2884
2885 return Err(e);
2886 }
2887 }
2888 }
2889 Ok(())
2890 }
2891
2892 /// Returns true if a QUIC packet is a stateless reset.
2893 fn is_stateless_reset(&self, buf: &[u8]) -> bool {
2894 // If the packet is too small, then we just throw it away.
2895 let buf_len = buf.len();
2896 if buf_len < 21 {
2897 return false;
2898 }
2899
2900 // TODO: we should iterate over all active destination connection IDs
2901 // and check against their reset token.
2902 match self.peer_transport_params.stateless_reset_token {
2903 Some(token) => {
2904 let token_len = 16;
2905
2906 crypto::verify_slices_are_equal(
2907 &token.to_be_bytes(),
2908 &buf[buf_len - token_len..buf_len],
2909 )
2910 .is_ok()
2911 },
2912
2913 None => false,
2914 }
2915 }
2916
2917 /// Processes a single QUIC packet received from the peer.
2918 ///
2919 /// On success the number of bytes processed from the input buffer is
2920 /// returned. When the [`Done`] error is returned, processing of the
2921 /// remainder of the incoming UDP datagram should be interrupted.
2922 ///
2923 /// Note that a server might observe a new 4-tuple, preventing to
2924 /// know in advance to which path the incoming packet belongs to (`recv_pid`
2925 /// is `None`). As a client, packets from unknown 4-tuple are dropped
2926 /// beforehand (see `recv()`).
2927 ///
2928 /// On error, an error other than [`Done`] is returned.
2929 ///
2930 /// [`Done`]: enum.Error.html#variant.Done
2931 fn recv_single(
2932 &mut self, buf: &mut [u8], info: &RecvInfo, recv_pid: Option<usize>,
2933 ) -> Result<usize> {
2934 let now = Instant::now();
2935
2936 if buf.is_empty() {
2937 return Err(Error::Done);
2938 }
2939
2940 if self.is_closed() || self.is_draining() {
2941 return Err(Error::Done);
2942 }
2943
2944 let is_closing = self.local_error.is_some();
2945
2946 if is_closing {
2947 return Err(Error::Done);
2948 }
2949
2950 let buf_len = buf.len();
2951
2952 let mut b = octets::OctetsMut::with_slice(buf);
2953
2954 let mut hdr = Header::from_bytes(&mut b, self.source_id().len())
2955 .map_err(|e| {
2956 drop_pkt_on_err(
2957 e,
2958 self.recv_count,
2959 self.is_server,
2960 &self.trace_id,
2961 )
2962 })?;
2963
2964 if hdr.ty == Type::VersionNegotiation {
2965 // Version negotiation packets can only be sent by the server.
2966 if self.is_server {
2967 return Err(Error::Done);
2968 }
2969
2970 // Ignore duplicate version negotiation.
2971 if self.did_version_negotiation {
2972 return Err(Error::Done);
2973 }
2974
2975 // Ignore version negotiation if any other packet has already been
2976 // successfully processed.
2977 if self.recv_count > 0 {
2978 return Err(Error::Done);
2979 }
2980
2981 if hdr.dcid != self.source_id() {
2982 return Err(Error::Done);
2983 }
2984
2985 if hdr.scid != self.destination_id() {
2986 return Err(Error::Done);
2987 }
2988
2989 trace!("{} rx pkt {:?}", self.trace_id, hdr);
2990
2991 let versions = hdr.versions.ok_or(Error::Done)?;
2992
2993 // Ignore version negotiation if the version already selected is
2994 // listed.
2995 if versions.contains(&self.version) {
2996 return Err(Error::Done);
2997 }
2998
2999 let supported_versions =
3000 versions.iter().filter(|&&v| version_is_supported(v));
3001
3002 let mut found_version = false;
3003
3004 for &v in supported_versions {
3005 found_version = true;
3006
3007 // The final version takes precedence over draft ones.
3008 if v == PROTOCOL_VERSION_V1 {
3009 self.version = v;
3010 break;
3011 }
3012
3013 self.version = cmp::max(self.version, v);
3014 }
3015
3016 if !found_version {
3017 // We don't support any of the versions offered.
3018 //
3019 // While a man-in-the-middle attacker might be able to
3020 // inject a version negotiation packet that triggers this
3021 // failure, the window of opportunity is very small and
3022 // this error is quite useful for debugging, so don't just
3023 // ignore the packet.
3024 return Err(Error::UnknownVersion);
3025 }
3026
3027 self.did_version_negotiation = true;
3028
3029 // Derive Initial secrets based on the new version.
3030 let (aead_open, aead_seal) = crypto::derive_initial_key_material(
3031 &self.destination_id(),
3032 self.version,
3033 self.is_server,
3034 true,
3035 )?;
3036
3037 // Reset connection state to force sending another Initial packet.
3038 self.drop_epoch_state(packet::Epoch::Initial, now);
3039 self.got_peer_conn_id = false;
3040 self.handshake.clear()?;
3041
3042 self.crypto_ctx[packet::Epoch::Initial].crypto_open = Some(aead_open);
3043 self.crypto_ctx[packet::Epoch::Initial].crypto_seal = Some(aead_seal);
3044
3045 self.handshake
3046 .use_legacy_codepoint(self.version != PROTOCOL_VERSION_V1);
3047
3048 // Encode transport parameters again, as the new version might be
3049 // using a different format.
3050 self.encode_transport_params()?;
3051
3052 return Err(Error::Done);
3053 }
3054
3055 if hdr.ty == Type::Retry {
3056 // Retry packets can only be sent by the server.
3057 if self.is_server {
3058 return Err(Error::Done);
3059 }
3060
3061 // Ignore duplicate retry.
3062 if self.did_retry {
3063 return Err(Error::Done);
3064 }
3065
3066 // Check if Retry packet is valid.
3067 if packet::verify_retry_integrity(
3068 &b,
3069 &self.destination_id(),
3070 self.version,
3071 )
3072 .is_err()
3073 {
3074 return Err(Error::Done);
3075 }
3076
3077 trace!("{} rx pkt {:?}", self.trace_id, hdr);
3078
3079 self.token = hdr.token;
3080 self.did_retry = true;
3081
3082 // Remember peer's new connection ID.
3083 self.odcid = Some(self.destination_id().into_owned());
3084
3085 self.set_initial_dcid(
3086 hdr.scid.clone(),
3087 None,
3088 self.paths.get_active_path_id()?,
3089 )?;
3090
3091 self.rscid = Some(self.destination_id().into_owned());
3092
3093 // Derive Initial secrets using the new connection ID.
3094 let (aead_open, aead_seal) = crypto::derive_initial_key_material(
3095 &hdr.scid,
3096 self.version,
3097 self.is_server,
3098 true,
3099 )?;
3100
3101 // Reset connection state to force sending another Initial packet.
3102 self.drop_epoch_state(packet::Epoch::Initial, now);
3103 self.got_peer_conn_id = false;
3104 self.handshake.clear()?;
3105
3106 self.crypto_ctx[packet::Epoch::Initial].crypto_open = Some(aead_open);
3107 self.crypto_ctx[packet::Epoch::Initial].crypto_seal = Some(aead_seal);
3108
3109 return Err(Error::Done);
3110 }
3111
3112 if self.is_server && !self.did_version_negotiation {
3113 if !version_is_supported(hdr.version) {
3114 return Err(Error::UnknownVersion);
3115 }
3116
3117 self.version = hdr.version;
3118 self.did_version_negotiation = true;
3119
3120 self.handshake
3121 .use_legacy_codepoint(self.version != PROTOCOL_VERSION_V1);
3122
3123 // Encode transport parameters again, as the new version might be
3124 // using a different format.
3125 self.encode_transport_params()?;
3126 }
3127
3128 if hdr.ty != Type::Short && hdr.version != self.version {
3129 // At this point version negotiation was already performed, so
3130 // ignore packets that don't match the connection's version.
3131 return Err(Error::Done);
3132 }
3133
3134 // Long header packets have an explicit payload length, but short
3135 // packets don't so just use the remaining capacity in the buffer.
3136 let payload_len = if hdr.ty == Type::Short {
3137 b.cap()
3138 } else {
3139 b.get_varint().map_err(|e| {
3140 drop_pkt_on_err(
3141 e.into(),
3142 self.recv_count,
3143 self.is_server,
3144 &self.trace_id,
3145 )
3146 })? as usize
3147 };
3148
3149 // Make sure the buffer is same or larger than an explicit
3150 // payload length.
3151 if payload_len > b.cap() {
3152 return Err(drop_pkt_on_err(
3153 Error::InvalidPacket,
3154 self.recv_count,
3155 self.is_server,
3156 &self.trace_id,
3157 ));
3158 }
3159
3160 // Derive initial secrets on the server.
3161 if !self.derived_initial_secrets {
3162 let (aead_open, aead_seal) = crypto::derive_initial_key_material(
3163 &hdr.dcid,
3164 self.version,
3165 self.is_server,
3166 false,
3167 )?;
3168
3169 self.crypto_ctx[packet::Epoch::Initial].crypto_open = Some(aead_open);
3170 self.crypto_ctx[packet::Epoch::Initial].crypto_seal = Some(aead_seal);
3171
3172 self.derived_initial_secrets = true;
3173 }
3174
3175 // Select packet number space epoch based on the received packet's type.
3176 let epoch = hdr.ty.to_epoch()?;
3177
3178 // Select AEAD context used to open incoming packet.
3179 let aead = if hdr.ty == Type::ZeroRTT {
3180 // Only use 0-RTT key if incoming packet is 0-RTT.
3181 self.crypto_ctx[epoch].crypto_0rtt_open.as_ref()
3182 } else {
3183 // Otherwise use the packet number space's main key.
3184 self.crypto_ctx[epoch].crypto_open.as_ref()
3185 };
3186
3187 // Finally, discard packet if no usable key is available.
3188 let mut aead = match aead {
3189 Some(v) => v,
3190
3191 None => {
3192 if hdr.ty == Type::ZeroRTT &&
3193 self.undecryptable_pkts.len() < MAX_UNDECRYPTABLE_PACKETS &&
3194 !self.is_established()
3195 {
3196 // Buffer 0-RTT packets when the required read key is not
3197 // available yet, and process them later.
3198 //
3199 // TODO: in the future we might want to buffer other types
3200 // of undecryptable packets as well.
3201 let pkt_len = b.off() + payload_len;
3202 let pkt = (b.buf()[..pkt_len]).to_vec();
3203
3204 self.undecryptable_pkts.push_back((pkt, *info));
3205 return Ok(pkt_len);
3206 }
3207
3208 let e = drop_pkt_on_err(
3209 Error::CryptoFail,
3210 self.recv_count,
3211 self.is_server,
3212 &self.trace_id,
3213 );
3214
3215 return Err(e);
3216 },
3217 };
3218
3219 let aead_tag_len = aead.alg().tag_len();
3220
3221 packet::decrypt_hdr(&mut b, &mut hdr, aead).map_err(|e| {
3222 drop_pkt_on_err(e, self.recv_count, self.is_server, &self.trace_id)
3223 })?;
3224
3225 let pn = packet::decode_pkt_num(
3226 self.pkt_num_spaces[epoch].largest_rx_pkt_num,
3227 hdr.pkt_num,
3228 hdr.pkt_num_len,
3229 );
3230
3231 let pn_len = hdr.pkt_num_len;
3232
3233 trace!(
3234 "{} rx pkt {:?} len={} pn={} {}",
3235 self.trace_id,
3236 hdr,
3237 payload_len,
3238 pn,
3239 AddrTupleFmt(info.from, info.to)
3240 );
3241
3242 #[cfg(feature = "qlog")]
3243 let mut qlog_frames = vec![];
3244
3245 // Check for key update.
3246 let mut aead_next = None;
3247
3248 if self.handshake_confirmed &&
3249 hdr.ty != Type::ZeroRTT &&
3250 hdr.key_phase != self.key_phase
3251 {
3252 // Check if this packet arrived before key update.
3253 if let Some(key_update) = self.crypto_ctx[epoch]
3254 .key_update
3255 .as_ref()
3256 .and_then(|key_update| {
3257 (pn < key_update.pn_on_update).then_some(key_update)
3258 })
3259 {
3260 aead = &key_update.crypto_open;
3261 } else {
3262 trace!("{} peer-initiated key update", self.trace_id);
3263
3264 aead_next = Some((
3265 self.crypto_ctx[epoch]
3266 .crypto_open
3267 .as_ref()
3268 .unwrap()
3269 .derive_next_packet_key()?,
3270 self.crypto_ctx[epoch]
3271 .crypto_seal
3272 .as_ref()
3273 .unwrap()
3274 .derive_next_packet_key()?,
3275 ));
3276
3277 // `aead_next` is always `Some()` at this point, so the `unwrap()`
3278 // will never fail.
3279 aead = &aead_next.as_ref().unwrap().0;
3280 }
3281 }
3282
3283 let mut payload = packet::decrypt_pkt(
3284 &mut b,
3285 pn,
3286 pn_len,
3287 payload_len,
3288 aead,
3289 )
3290 .map_err(|e| {
3291 drop_pkt_on_err(e, self.recv_count, self.is_server, &self.trace_id)
3292 })?;
3293
3294 if self.pkt_num_spaces[epoch].recv_pkt_num.contains(pn) {
3295 trace!("{} ignored duplicate packet {}", self.trace_id, pn);
3296 return Err(Error::Done);
3297 }
3298
3299 // Packets with no frames are invalid.
3300 if payload.cap() == 0 {
3301 return Err(Error::InvalidPacket);
3302 }
3303
3304 // Now that we decrypted the packet, let's see if we can map it to an
3305 // existing path.
3306 let recv_pid = if hdr.ty == Type::Short && self.got_peer_conn_id {
3307 let pkt_dcid = ConnectionId::from_ref(&hdr.dcid);
3308 self.get_or_create_recv_path_id(recv_pid, &pkt_dcid, buf_len, info)?
3309 } else {
3310 // During handshake, we are on the initial path.
3311 self.paths.get_active_path_id()?
3312 };
3313
3314 // The key update is verified once a packet is successfully decrypted
3315 // using the new keys.
3316 if let Some((open_next, seal_next)) = aead_next {
3317 if !self.crypto_ctx[epoch]
3318 .key_update
3319 .as_ref()
3320 .is_none_or(|prev| prev.update_acked)
3321 {
3322 // Peer has updated keys twice without awaiting confirmation.
3323 return Err(Error::KeyUpdate);
3324 }
3325
3326 trace!("{} key update verified", self.trace_id);
3327
3328 let _ = self.crypto_ctx[epoch].crypto_seal.replace(seal_next);
3329
3330 let open_prev = self.crypto_ctx[epoch]
3331 .crypto_open
3332 .replace(open_next)
3333 .unwrap();
3334
3335 let recv_path = self.paths.get_mut(recv_pid)?;
3336
3337 self.crypto_ctx[epoch].key_update = Some(packet::KeyUpdate {
3338 crypto_open: open_prev,
3339 pn_on_update: pn,
3340 update_acked: false,
3341 timer: now + (recv_path.recovery.pto() * 3),
3342 });
3343
3344 self.key_phase = !self.key_phase;
3345
3346 qlog_with_type!(QLOG_PACKET_RX, self.qlog, q, {
3347 let trigger = Some(
3348 qlog::events::security::KeyUpdateOrRetiredTrigger::RemoteUpdate,
3349 );
3350
3351 let ev_data_client =
3352 EventData::KeyUpdated(qlog::events::security::KeyUpdated {
3353 key_type:
3354 qlog::events::security::KeyType::Client1RttSecret,
3355 trigger: trigger.clone(),
3356 ..Default::default()
3357 });
3358
3359 q.add_event_data_with_instant(ev_data_client, now).ok();
3360
3361 let ev_data_server =
3362 EventData::KeyUpdated(qlog::events::security::KeyUpdated {
3363 key_type:
3364 qlog::events::security::KeyType::Server1RttSecret,
3365 trigger,
3366 ..Default::default()
3367 });
3368
3369 q.add_event_data_with_instant(ev_data_server, now).ok();
3370 });
3371 }
3372
3373 if !self.is_server && !self.got_peer_conn_id {
3374 if self.odcid.is_none() {
3375 self.odcid = Some(self.destination_id().into_owned());
3376 }
3377
3378 // Replace the randomly generated destination connection ID with
3379 // the one supplied by the server.
3380 self.set_initial_dcid(
3381 hdr.scid.clone(),
3382 self.peer_transport_params.stateless_reset_token,
3383 recv_pid,
3384 )?;
3385
3386 self.got_peer_conn_id = true;
3387 }
3388
3389 if self.is_server && !self.got_peer_conn_id {
3390 self.set_initial_dcid(hdr.scid.clone(), None, recv_pid)?;
3391
3392 if !self.did_retry {
3393 self.local_transport_params
3394 .original_destination_connection_id =
3395 Some(hdr.dcid.to_vec().into());
3396
3397 self.encode_transport_params()?;
3398 }
3399
3400 self.got_peer_conn_id = true;
3401 }
3402
3403 // To avoid sending an ACK in response to an ACK-only packet, we need
3404 // to keep track of whether this packet contains any frame other than
3405 // ACK and PADDING.
3406 let mut ack_elicited = false;
3407
3408 // Process packet payload. If a frame cannot be processed, store the
3409 // error and stop further packet processing.
3410 let mut frame_processing_err = None;
3411
3412 // To know if the peer migrated the connection, we need to keep track
3413 // whether this is a non-probing packet.
3414 let mut probing = true;
3415
3416 // Process packet payload.
3417 while payload.cap() > 0 {
3418 let frame = frame::Frame::from_bytes(&mut payload, hdr.ty)?;
3419
3420 qlog_with_type!(QLOG_PACKET_RX, self.qlog, _q, {
3421 qlog_frames.push(frame.to_qlog());
3422 });
3423
3424 if frame.ack_eliciting() {
3425 ack_elicited = true;
3426 }
3427
3428 if !frame.probing() {
3429 probing = false;
3430 }
3431
3432 if let Err(e) = self.process_frame(frame, &hdr, recv_pid, epoch, now)
3433 {
3434 frame_processing_err = Some(e);
3435 break;
3436 }
3437 }
3438
3439 qlog_with_type!(QLOG_PACKET_RX, self.qlog, q, {
3440 let packet_size = b.len();
3441
3442 let qlog_pkt_hdr = qlog::events::quic::PacketHeader::with_type(
3443 hdr.ty.to_qlog(),
3444 Some(pn),
3445 Some(hdr.version),
3446 Some(&hdr.scid),
3447 Some(&hdr.dcid),
3448 );
3449
3450 let qlog_raw_info = RawInfo {
3451 length: Some(packet_size as u64),
3452 payload_length: Some(payload_len as u64),
3453 data: None,
3454 };
3455
3456 let ev_data =
3457 EventData::PacketReceived(qlog::events::quic::PacketReceived {
3458 header: qlog_pkt_hdr,
3459 frames: Some(qlog_frames),
3460 raw: Some(qlog_raw_info),
3461 ..Default::default()
3462 });
3463
3464 q.add_event_data_with_instant(ev_data, now).ok();
3465 });
3466
3467 qlog_with_type!(QLOG_PACKET_RX, self.qlog, q, {
3468 let recv_path = self.paths.get_mut(recv_pid)?;
3469 recv_path.recovery.maybe_qlog(q, now);
3470 });
3471
3472 if let Some(e) = frame_processing_err {
3473 // Any frame error is terminal, so now just return.
3474 return Err(e);
3475 }
3476
3477 // Only log the remote transport parameters once the connection is
3478 // established (i.e. after frames have been fully parsed) and only
3479 // once per connection.
3480 if self.is_established() {
3481 qlog_with_type!(QLOG_PARAMS_SET, self.qlog, q, {
3482 if !self.qlog.logged_peer_params {
3483 let ev_data = self
3484 .peer_transport_params
3485 .to_qlog(TransportOwner::Remote, self.handshake.cipher());
3486
3487 q.add_event_data_with_instant(ev_data, now).ok();
3488
3489 self.qlog.logged_peer_params = true;
3490 }
3491 });
3492 }
3493
3494 // Process acked frames. Note that several packets from several paths
3495 // might have been acked by the received packet.
3496 for (_, p) in self.paths.iter_mut() {
3497 while let Some(acked) = p.recovery.next_acked_frame(epoch) {
3498 match acked {
3499 frame::Frame::Ping {
3500 mtu_probe: Some(mtu_probe),
3501 } =>
3502 if let Some(pmtud) = p.pmtud.as_mut() {
3503 trace!(
3504 "{} pmtud probe acked; probe size {:?}",
3505 self.trace_id,
3506 mtu_probe
3507 );
3508
3509 // Ensure the probe is within the supported MTU range
3510 // before updating the max datagram size
3511 if let Some(current_mtu) =
3512 pmtud.successful_probe(mtu_probe)
3513 {
3514 qlog_with_type!(
3515 EventType::ConnectivityEventType(
3516 ConnectivityEventType::MtuUpdated
3517 ),
3518 self.qlog,
3519 q,
3520 {
3521 let pmtu_data = EventData::MtuUpdated(
3522 qlog::events::connectivity::MtuUpdated {
3523 old: Some(
3524 p.recovery.max_datagram_size()
3525 as u16,
3526 ),
3527 new: current_mtu as u16,
3528 done: Some(true),
3529 },
3530 );
3531
3532 q.add_event_data_with_instant(
3533 pmtu_data, now,
3534 )
3535 .ok();
3536 }
3537 );
3538
3539 p.recovery
3540 .pmtud_update_max_datagram_size(current_mtu);
3541 }
3542 },
3543
3544 frame::Frame::ACK { ranges, .. } => {
3545 // Stop acknowledging packets less than or equal to the
3546 // largest acknowledged in the sent ACK frame that, in
3547 // turn, got acked.
3548 if let Some(largest_acked) = ranges.last() {
3549 self.pkt_num_spaces[epoch]
3550 .recv_pkt_need_ack
3551 .remove_until(largest_acked);
3552 }
3553 },
3554
3555 frame::Frame::CryptoHeader { offset, length } => {
3556 self.crypto_ctx[epoch]
3557 .crypto_stream
3558 .send
3559 .ack_and_drop(offset, length);
3560 },
3561
3562 frame::Frame::StreamHeader {
3563 stream_id,
3564 offset,
3565 length,
3566 ..
3567 } => {
3568 // Update tx_buffered and emit qlog before checking if the
3569 // stream still exists. The client does need to ACK
3570 // frames that were received after the client sends a
3571 // ResetStream.
3572 self.tx_buffered =
3573 self.tx_buffered.saturating_sub(length);
3574
3575 qlog_with_type!(QLOG_DATA_MV, self.qlog, q, {
3576 let ev_data = EventData::DataMoved(
3577 qlog::events::quic::DataMoved {
3578 stream_id: Some(stream_id),
3579 offset: Some(offset),
3580 length: Some(length as u64),
3581 from: Some(DataRecipient::Transport),
3582 to: Some(DataRecipient::Dropped),
3583 ..Default::default()
3584 },
3585 );
3586
3587 q.add_event_data_with_instant(ev_data, now).ok();
3588 });
3589
3590 let stream = match self.streams.get_mut(stream_id) {
3591 Some(v) => v,
3592
3593 None => continue,
3594 };
3595
3596 stream.send.ack_and_drop(offset, length);
3597
3598 let priority_key = Arc::clone(&stream.priority_key);
3599
3600 // Only collect the stream if it is complete and not
3601 // readable or writable.
3602 //
3603 // If it is readable, it will get collected when
3604 // stream_recv() is next used.
3605 //
3606 // If it is writable, it might mean that the stream
3607 // has been stopped by the peer (i.e. a STOP_SENDING
3608 // frame is received), in which case before collecting
3609 // the stream we will need to propagate the
3610 // `StreamStopped` error to the application. It will
3611 // instead get collected when one of stream_capacity(),
3612 // stream_writable(), stream_send(), ... is next called.
3613 //
3614 // Note that we can't use `is_writable()` here because
3615 // it returns false if the stream is stopped. Instead,
3616 // since the stream is marked as writable when a
3617 // STOP_SENDING frame is received, we check the writable
3618 // queue directly instead.
3619 let is_writable = priority_key.writable.is_linked() &&
3620 // Ensure that the stream is actually stopped.
3621 stream.send.is_stopped();
3622
3623 let is_complete = stream.is_complete();
3624 let is_readable = stream.is_readable();
3625
3626 if is_complete && !is_readable && !is_writable {
3627 let local = stream.local;
3628 self.streams.collect(stream_id, local);
3629 }
3630 },
3631
3632 frame::Frame::HandshakeDone => {
3633 // Explicitly set this to true, so that if the frame was
3634 // already scheduled for retransmission, it is aborted.
3635 self.handshake_done_sent = true;
3636
3637 self.handshake_done_acked = true;
3638 },
3639
3640 frame::Frame::ResetStream { stream_id, .. } => {
3641 let stream = match self.streams.get_mut(stream_id) {
3642 Some(v) => v,
3643
3644 None => continue,
3645 };
3646
3647 let priority_key = Arc::clone(&stream.priority_key);
3648
3649 // Only collect the stream if it is complete and not
3650 // readable or writable.
3651 //
3652 // If it is readable, it will get collected when
3653 // stream_recv() is next used.
3654 //
3655 // If it is writable, it might mean that the stream
3656 // has been stopped by the peer (i.e. a STOP_SENDING
3657 // frame is received), in which case before collecting
3658 // the stream we will need to propagate the
3659 // `StreamStopped` error to the application. It will
3660 // instead get collected when one of stream_capacity(),
3661 // stream_writable(), stream_send(), ... is next called.
3662 //
3663 // Note that we can't use `is_writable()` here because
3664 // it returns false if the stream is stopped. Instead,
3665 // since the stream is marked as writable when a
3666 // STOP_SENDING frame is received, we check the writable
3667 // queue directly instead.
3668 let is_writable = priority_key.writable.is_linked() &&
3669 // Ensure that the stream is actually stopped.
3670 stream.send.is_stopped();
3671
3672 let is_complete = stream.is_complete();
3673 let is_readable = stream.is_readable();
3674
3675 if is_complete && !is_readable && !is_writable {
3676 let local = stream.local;
3677 self.streams.collect(stream_id, local);
3678 }
3679 },
3680
3681 _ => (),
3682 }
3683 }
3684 }
3685
3686 // Now that we processed all the frames, if there is a path that has no
3687 // Destination CID, try to allocate one.
3688 let no_dcid = self
3689 .paths
3690 .iter_mut()
3691 .filter(|(_, p)| p.active_dcid_seq.is_none());
3692
3693 for (pid, p) in no_dcid {
3694 if self.ids.zero_length_dcid() {
3695 p.active_dcid_seq = Some(0);
3696 continue;
3697 }
3698
3699 let dcid_seq = match self.ids.lowest_available_dcid_seq() {
3700 Some(seq) => seq,
3701 None => break,
3702 };
3703
3704 self.ids.link_dcid_to_path_id(dcid_seq, pid)?;
3705
3706 p.active_dcid_seq = Some(dcid_seq);
3707 }
3708
3709 // We only record the time of arrival of the largest packet number
3710 // that still needs to be acked, to be used for ACK delay calculation.
3711 if self.pkt_num_spaces[epoch].recv_pkt_need_ack.last() < Some(pn) {
3712 self.pkt_num_spaces[epoch].largest_rx_pkt_time = now;
3713 }
3714
3715 self.pkt_num_spaces[epoch].recv_pkt_num.insert(pn);
3716
3717 self.pkt_num_spaces[epoch].recv_pkt_need_ack.push_item(pn);
3718
3719 self.pkt_num_spaces[epoch].ack_elicited =
3720 cmp::max(self.pkt_num_spaces[epoch].ack_elicited, ack_elicited);
3721
3722 self.pkt_num_spaces[epoch].largest_rx_pkt_num =
3723 cmp::max(self.pkt_num_spaces[epoch].largest_rx_pkt_num, pn);
3724
3725 if !probing {
3726 self.pkt_num_spaces[epoch].largest_rx_non_probing_pkt_num = cmp::max(
3727 self.pkt_num_spaces[epoch].largest_rx_non_probing_pkt_num,
3728 pn,
3729 );
3730
3731 // Did the peer migrated to another path?
3732 let active_path_id = self.paths.get_active_path_id()?;
3733
3734 if self.is_server &&
3735 recv_pid != active_path_id &&
3736 self.pkt_num_spaces[epoch].largest_rx_non_probing_pkt_num == pn
3737 {
3738 self.on_peer_migrated(recv_pid, self.disable_dcid_reuse, now)?;
3739 }
3740 }
3741
3742 if let Some(idle_timeout) = self.idle_timeout() {
3743 self.idle_timer = Some(now + idle_timeout);
3744 }
3745
3746 // Update send capacity.
3747 self.update_tx_cap();
3748
3749 self.recv_count += 1;
3750 self.paths.get_mut(recv_pid)?.recv_count += 1;
3751
3752 let read = b.off() + aead_tag_len;
3753
3754 self.recv_bytes += read as u64;
3755 self.paths.get_mut(recv_pid)?.recv_bytes += read as u64;
3756
3757 // An Handshake packet has been received from the client and has been
3758 // successfully processed, so we can drop the initial state and consider
3759 // the client's address to be verified.
3760 if self.is_server && hdr.ty == Type::Handshake {
3761 self.drop_epoch_state(packet::Epoch::Initial, now);
3762
3763 self.paths.get_mut(recv_pid)?.verified_peer_address = true;
3764 }
3765
3766 self.ack_eliciting_sent = false;
3767
3768 Ok(read)
3769 }
3770
3771 /// Writes a single QUIC packet to be sent to the peer.
3772 ///
3773 /// On success the number of bytes written to the output buffer is
3774 /// returned, or [`Done`] if there was nothing to write.
3775 ///
3776 /// The application should call `send()` multiple times until [`Done`] is
3777 /// returned, indicating that there are no more packets to send. It is
3778 /// recommended that `send()` be called in the following cases:
3779 ///
3780 /// * When the application receives QUIC packets from the peer (that is,
3781 /// any time [`recv()`] is also called).
3782 ///
3783 /// * When the connection timer expires (that is, any time [`on_timeout()`]
3784 /// is also called).
3785 ///
3786 /// * When the application sends data to the peer (for example, any time
3787 /// [`stream_send()`] or [`stream_shutdown()`] are called).
3788 ///
3789 /// * When the application receives data from the peer (for example any
3790 /// time [`stream_recv()`] is called).
3791 ///
3792 /// Once [`is_draining()`] returns `true`, it is no longer necessary to call
3793 /// `send()` and all calls will return [`Done`].
3794 ///
3795 /// [`Done`]: enum.Error.html#variant.Done
3796 /// [`recv()`]: struct.Connection.html#method.recv
3797 /// [`on_timeout()`]: struct.Connection.html#method.on_timeout
3798 /// [`stream_send()`]: struct.Connection.html#method.stream_send
3799 /// [`stream_shutdown()`]: struct.Connection.html#method.stream_shutdown
3800 /// [`stream_recv()`]: struct.Connection.html#method.stream_recv
3801 /// [`is_draining()`]: struct.Connection.html#method.is_draining
3802 ///
3803 /// ## Examples:
3804 ///
3805 /// ```no_run
3806 /// # let mut out = [0; 512];
3807 /// # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
3808 /// # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
3809 /// # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
3810 /// # let peer = "127.0.0.1:1234".parse().unwrap();
3811 /// # let local = socket.local_addr().unwrap();
3812 /// # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
3813 /// loop {
3814 /// let (write, send_info) = match conn.send(&mut out) {
3815 /// Ok(v) => v,
3816 ///
3817 /// Err(quiche::Error::Done) => {
3818 /// // Done writing.
3819 /// break;
3820 /// },
3821 ///
3822 /// Err(e) => {
3823 /// // An error occurred, handle it.
3824 /// break;
3825 /// },
3826 /// };
3827 ///
3828 /// socket.send_to(&out[..write], &send_info.to).unwrap();
3829 /// }
3830 /// # Ok::<(), quiche::Error>(())
3831 /// ```
3832 pub fn send(&mut self, out: &mut [u8]) -> Result<(usize, SendInfo)> {
3833 self.send_on_path(out, None, None)
3834 }
3835
3836 /// Writes a single QUIC packet to be sent to the peer from the specified
3837 /// local address `from` to the destination address `to`.
3838 ///
3839 /// The behavior of this method differs depending on the value of the `from`
3840 /// and `to` parameters:
3841 ///
3842 /// * If both are `Some`, then the method only consider the 4-tuple
3843 /// (`from`, `to`). Application can monitor the 4-tuple availability,
3844 /// either by monitoring [`path_event_next()`] events or by relying on
3845 /// the [`paths_iter()`] method. If the provided 4-tuple does not exist
3846 /// on the connection (anymore), it returns an [`InvalidState`].
3847 ///
3848 /// * If `from` is `Some` and `to` is `None`, then the method only
3849 /// considers sending packets on paths having `from` as local address.
3850 ///
3851 /// * If `to` is `Some` and `from` is `None`, then the method only
3852 /// considers sending packets on paths having `to` as peer address.
3853 ///
3854 /// * If both are `None`, all available paths are considered.
3855 ///
3856 /// On success the number of bytes written to the output buffer is
3857 /// returned, or [`Done`] if there was nothing to write.
3858 ///
3859 /// The application should call `send_on_path()` multiple times until
3860 /// [`Done`] is returned, indicating that there are no more packets to
3861 /// send. It is recommended that `send_on_path()` be called in the
3862 /// following cases:
3863 ///
3864 /// * When the application receives QUIC packets from the peer (that is,
3865 /// any time [`recv()`] is also called).
3866 ///
3867 /// * When the connection timer expires (that is, any time [`on_timeout()`]
3868 /// is also called).
3869 ///
3870 /// * When the application sends data to the peer (for examples, any time
3871 /// [`stream_send()`] or [`stream_shutdown()`] are called).
3872 ///
3873 /// * When the application receives data from the peer (for example any
3874 /// time [`stream_recv()`] is called).
3875 ///
3876 /// Once [`is_draining()`] returns `true`, it is no longer necessary to call
3877 /// `send_on_path()` and all calls will return [`Done`].
3878 ///
3879 /// [`Done`]: enum.Error.html#variant.Done
3880 /// [`InvalidState`]: enum.Error.html#InvalidState
3881 /// [`recv()`]: struct.Connection.html#method.recv
3882 /// [`on_timeout()`]: struct.Connection.html#method.on_timeout
3883 /// [`stream_send()`]: struct.Connection.html#method.stream_send
3884 /// [`stream_shutdown()`]: struct.Connection.html#method.stream_shutdown
3885 /// [`stream_recv()`]: struct.Connection.html#method.stream_recv
3886 /// [`path_event_next()`]: struct.Connection.html#method.path_event_next
3887 /// [`paths_iter()`]: struct.Connection.html#method.paths_iter
3888 /// [`is_draining()`]: struct.Connection.html#method.is_draining
3889 ///
3890 /// ## Examples:
3891 ///
3892 /// ```no_run
3893 /// # let mut out = [0; 512];
3894 /// # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
3895 /// # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
3896 /// # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
3897 /// # let peer = "127.0.0.1:1234".parse().unwrap();
3898 /// # let local = socket.local_addr().unwrap();
3899 /// # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
3900 /// loop {
3901 /// let (write, send_info) = match conn.send_on_path(&mut out, Some(local), Some(peer)) {
3902 /// Ok(v) => v,
3903 ///
3904 /// Err(quiche::Error::Done) => {
3905 /// // Done writing.
3906 /// break;
3907 /// },
3908 ///
3909 /// Err(e) => {
3910 /// // An error occurred, handle it.
3911 /// break;
3912 /// },
3913 /// };
3914 ///
3915 /// socket.send_to(&out[..write], &send_info.to).unwrap();
3916 /// }
3917 /// # Ok::<(), quiche::Error>(())
3918 /// ```
3919 pub fn send_on_path(
3920 &mut self, out: &mut [u8], from: Option<SocketAddr>,
3921 to: Option<SocketAddr>,
3922 ) -> Result<(usize, SendInfo)> {
3923 if out.is_empty() {
3924 return Err(Error::BufferTooShort);
3925 }
3926
3927 if self.is_closed() || self.is_draining() {
3928 return Err(Error::Done);
3929 }
3930
3931 let now = Instant::now();
3932
3933 if self.local_error.is_none() {
3934 self.do_handshake(now)?;
3935 }
3936
3937 // Forwarding the error value here could confuse
3938 // applications, as they may not expect getting a `recv()`
3939 // error when calling `send()`.
3940 //
3941 // We simply fall-through to sending packets, which should
3942 // take care of terminating the connection as needed.
3943 let _ = self.process_undecrypted_0rtt_packets();
3944
3945 // There's no point in trying to send a packet if the Initial secrets
3946 // have not been derived yet, so return early.
3947 if !self.derived_initial_secrets {
3948 return Err(Error::Done);
3949 }
3950
3951 let mut has_initial = false;
3952
3953 let mut done = 0;
3954
3955 // Limit output packet size to respect the sender and receiver's
3956 // maximum UDP payload size limit.
3957 let mut left = cmp::min(out.len(), self.max_send_udp_payload_size());
3958
3959 let send_pid = match (from, to) {
3960 (Some(f), Some(t)) => self
3961 .paths
3962 .path_id_from_addrs(&(f, t))
3963 .ok_or(Error::InvalidState)?,
3964
3965 _ => self.get_send_path_id(from, to)?,
3966 };
3967
3968 let send_path = self.paths.get_mut(send_pid)?;
3969
3970 // Update max datagram size to allow path MTU discovery probe to be sent.
3971 if let Some(pmtud) = send_path.pmtud.as_mut() {
3972 if pmtud.should_probe() {
3973 let size = if self.handshake_confirmed || self.handshake_completed
3974 {
3975 pmtud.get_probe_size()
3976 } else {
3977 pmtud.get_current_mtu()
3978 };
3979
3980 send_path.recovery.pmtud_update_max_datagram_size(size);
3981
3982 left =
3983 cmp::min(out.len(), send_path.recovery.max_datagram_size());
3984 }
3985 }
3986
3987 // Limit data sent by the server based on the amount of data received
3988 // from the client before its address is validated.
3989 if !send_path.verified_peer_address && self.is_server {
3990 left = cmp::min(left, send_path.max_send_bytes);
3991 }
3992
3993 // Generate coalesced packets.
3994 while left > 0 {
3995 let (ty, written) = match self.send_single(
3996 &mut out[done..done + left],
3997 send_pid,
3998 has_initial,
3999 now,
4000 ) {
4001 Ok(v) => v,
4002
4003 Err(Error::BufferTooShort) | Err(Error::Done) => break,
4004
4005 Err(e) => return Err(e),
4006 };
4007
4008 done += written;
4009 left -= written;
4010
4011 match ty {
4012 Type::Initial => has_initial = true,
4013
4014 // No more packets can be coalesced after a 1-RTT.
4015 Type::Short => break,
4016
4017 _ => (),
4018 };
4019
4020 // When sending multiple PTO probes, don't coalesce them together,
4021 // so they are sent on separate UDP datagrams.
4022 if let Ok(epoch) = ty.to_epoch() {
4023 if self.paths.get_mut(send_pid)?.recovery.loss_probes(epoch) > 0 {
4024 break;
4025 }
4026 }
4027
4028 // Don't coalesce packets that must go on different paths.
4029 if !(from.is_some() && to.is_some()) &&
4030 self.get_send_path_id(from, to)? != send_pid
4031 {
4032 break;
4033 }
4034 }
4035
4036 if done == 0 {
4037 self.last_tx_data = self.tx_data;
4038
4039 return Err(Error::Done);
4040 }
4041
4042 if has_initial && left > 0 && done < MIN_CLIENT_INITIAL_LEN {
4043 let pad_len = cmp::min(left, MIN_CLIENT_INITIAL_LEN - done);
4044
4045 // Fill padding area with null bytes, to avoid leaking information
4046 // in case the application reuses the packet buffer.
4047 out[done..done + pad_len].fill(0);
4048
4049 done += pad_len;
4050 }
4051
4052 let send_path = self.paths.get(send_pid)?;
4053
4054 let info = SendInfo {
4055 from: send_path.local_addr(),
4056 to: send_path.peer_addr(),
4057
4058 at: send_path.recovery.get_packet_send_time(now),
4059 };
4060
4061 Ok((done, info))
4062 }
4063
4064 fn send_single(
4065 &mut self, out: &mut [u8], send_pid: usize, has_initial: bool,
4066 now: Instant,
4067 ) -> Result<(Type, usize)> {
4068 if out.is_empty() {
4069 return Err(Error::BufferTooShort);
4070 }
4071
4072 if self.is_draining() {
4073 return Err(Error::Done);
4074 }
4075
4076 let is_closing = self.local_error.is_some();
4077
4078 let out_len = out.len();
4079
4080 let mut b = octets::OctetsMut::with_slice(out);
4081
4082 let pkt_type = self.write_pkt_type(send_pid)?;
4083
4084 let max_dgram_len = if !self.dgram_send_queue.is_empty() {
4085 self.dgram_max_writable_len()
4086 } else {
4087 None
4088 };
4089
4090 let epoch = pkt_type.to_epoch()?;
4091 let pkt_space = &mut self.pkt_num_spaces[epoch];
4092 let crypto_ctx = &mut self.crypto_ctx[epoch];
4093
4094 // Process lost frames. There might be several paths having lost frames.
4095 for (_, p) in self.paths.iter_mut() {
4096 while let Some(lost) = p.recovery.next_lost_frame(epoch) {
4097 match lost {
4098 frame::Frame::CryptoHeader { offset, length } => {
4099 crypto_ctx.crypto_stream.send.retransmit(offset, length);
4100
4101 self.stream_retrans_bytes += length as u64;
4102 p.stream_retrans_bytes += length as u64;
4103
4104 self.retrans_count += 1;
4105 p.retrans_count += 1;
4106 },
4107
4108 frame::Frame::StreamHeader {
4109 stream_id,
4110 offset,
4111 length,
4112 fin,
4113 } => {
4114 let stream = match self.streams.get_mut(stream_id) {
4115 // Only retransmit data if the stream is not closed
4116 // or stopped.
4117 Some(v) if !v.send.is_stopped() => v,
4118
4119 // Data on a closed stream will not be retransmitted
4120 // or acked after it is declared lost, so update
4121 // tx_buffered and qlog.
4122 _ => {
4123 self.tx_buffered =
4124 self.tx_buffered.saturating_sub(length);
4125
4126 qlog_with_type!(QLOG_DATA_MV, self.qlog, q, {
4127 let ev_data = EventData::DataMoved(
4128 qlog::events::quic::DataMoved {
4129 stream_id: Some(stream_id),
4130 offset: Some(offset),
4131 length: Some(length as u64),
4132 from: Some(DataRecipient::Transport),
4133 to: Some(DataRecipient::Dropped),
4134 ..Default::default()
4135 },
4136 );
4137
4138 q.add_event_data_with_instant(ev_data, now)
4139 .ok();
4140 });
4141
4142 continue;
4143 },
4144 };
4145
4146 let was_flushable = stream.is_flushable();
4147
4148 let empty_fin = length == 0 && fin;
4149
4150 stream.send.retransmit(offset, length);
4151
4152 // If the stream is now flushable push it to the
4153 // flushable queue, but only if it wasn't already
4154 // queued.
4155 //
4156 // Consider the stream flushable also when we are
4157 // sending a zero-length frame that has the fin flag
4158 // set.
4159 if (stream.is_flushable() || empty_fin) && !was_flushable
4160 {
4161 let priority_key = Arc::clone(&stream.priority_key);
4162 self.streams.insert_flushable(&priority_key);
4163 }
4164
4165 self.stream_retrans_bytes += length as u64;
4166 p.stream_retrans_bytes += length as u64;
4167
4168 self.retrans_count += 1;
4169 p.retrans_count += 1;
4170 },
4171
4172 frame::Frame::ACK { .. } => {
4173 pkt_space.ack_elicited = true;
4174 },
4175
4176 frame::Frame::ResetStream {
4177 stream_id,
4178 error_code,
4179 final_size,
4180 } => {
4181 self.streams
4182 .insert_reset(stream_id, error_code, final_size);
4183 },
4184
4185 frame::Frame::StopSending {
4186 stream_id,
4187 error_code,
4188 } =>
4189 // We only need to retransmit the STOP_SENDING frame if
4190 // the stream is still active and not FIN'd. Even if the
4191 // packet was lost, if the application has the final
4192 // size at this point there is no need to retransmit.
4193 if let Some(stream) = self.streams.get(stream_id) {
4194 if !stream.recv.is_fin() {
4195 self.streams
4196 .insert_stopped(stream_id, error_code);
4197 }
4198 },
4199
4200 // Retransmit HANDSHAKE_DONE only if it hasn't been acked at
4201 // least once already.
4202 frame::Frame::HandshakeDone if !self.handshake_done_acked => {
4203 self.handshake_done_sent = false;
4204 },
4205
4206 frame::Frame::MaxStreamData { stream_id, .. } => {
4207 if self.streams.get(stream_id).is_some() {
4208 self.streams.insert_almost_full(stream_id);
4209 }
4210 },
4211
4212 frame::Frame::MaxData { .. } => {
4213 self.should_send_max_data = true;
4214 },
4215
4216 frame::Frame::MaxStreamsUni { .. } => {
4217 self.should_send_max_streams_uni = true;
4218 },
4219
4220 frame::Frame::MaxStreamsBidi { .. } => {
4221 self.should_send_max_streams_bidi = true;
4222 },
4223
4224 // Retransmit STREAMS_BLOCKED frames if the frame with the
4225 // most recent limit is lost. These are informational
4226 // signals to the peer, reliably sending them
4227 // ensures the signal is used consistently and helps
4228 // debugging.
4229 frame::Frame::StreamsBlockedBidi { limit } => {
4230 self.streams_blocked_bidi_state
4231 .force_retransmit_sent_limit_eq(limit);
4232 },
4233
4234 frame::Frame::StreamsBlockedUni { limit } => {
4235 self.streams_blocked_uni_state
4236 .force_retransmit_sent_limit_eq(limit);
4237 },
4238
4239 frame::Frame::NewConnectionId { seq_num, .. } => {
4240 self.ids.mark_advertise_new_scid_seq(seq_num, true);
4241 },
4242
4243 frame::Frame::RetireConnectionId { seq_num } => {
4244 self.ids.mark_retire_dcid_seq(seq_num, true)?;
4245 },
4246
4247 frame::Frame::Ping {
4248 mtu_probe: Some(failed_probe),
4249 } =>
4250 if let Some(pmtud) = p.pmtud.as_mut() {
4251 trace!("pmtud probe dropped: {failed_probe}");
4252 pmtud.failed_probe(failed_probe);
4253 },
4254
4255 _ => (),
4256 }
4257 }
4258 }
4259 self.check_tx_buffered_invariant();
4260
4261 let is_app_limited = self.delivery_rate_check_if_app_limited();
4262 let n_paths = self.paths.len();
4263 let path = self.paths.get_mut(send_pid)?;
4264 let flow_control = &mut self.flow_control;
4265 let pkt_space = &mut self.pkt_num_spaces[epoch];
4266 let crypto_ctx = &mut self.crypto_ctx[epoch];
4267 let pkt_num_manager = &mut self.pkt_num_manager;
4268
4269 let mut left = if let Some(pmtud) = path.pmtud.as_mut() {
4270 // Limit output buffer size by estimated path MTU.
4271 cmp::min(pmtud.get_current_mtu(), b.cap())
4272 } else {
4273 b.cap()
4274 };
4275
4276 if pkt_num_manager.should_skip_pn(self.handshake_completed) {
4277 pkt_num_manager.set_skip_pn(Some(self.next_pkt_num));
4278 self.next_pkt_num += 1;
4279 };
4280 let pn = self.next_pkt_num;
4281
4282 let largest_acked_pkt =
4283 path.recovery.get_largest_acked_on_epoch(epoch).unwrap_or(0);
4284 let pn_len = packet::pkt_num_len(pn, largest_acked_pkt);
4285
4286 // The AEAD overhead at the current encryption level.
4287 let crypto_overhead = crypto_ctx.crypto_overhead().ok_or(Error::Done)?;
4288
4289 let dcid_seq = path.active_dcid_seq.ok_or(Error::OutOfIdentifiers)?;
4290
4291 let dcid =
4292 ConnectionId::from_ref(self.ids.get_dcid(dcid_seq)?.cid.as_ref());
4293
4294 let scid = if let Some(scid_seq) = path.active_scid_seq {
4295 ConnectionId::from_ref(self.ids.get_scid(scid_seq)?.cid.as_ref())
4296 } else if pkt_type == Type::Short {
4297 ConnectionId::default()
4298 } else {
4299 return Err(Error::InvalidState);
4300 };
4301
4302 let hdr = Header {
4303 ty: pkt_type,
4304
4305 version: self.version,
4306
4307 dcid,
4308 scid,
4309
4310 pkt_num: 0,
4311 pkt_num_len: pn_len,
4312
4313 // Only clone token for Initial packets, as other packets don't have
4314 // this field (Retry doesn't count, as it's not encoded as part of
4315 // this code path).
4316 token: if pkt_type == Type::Initial {
4317 self.token.clone()
4318 } else {
4319 None
4320 },
4321
4322 versions: None,
4323 key_phase: self.key_phase,
4324 };
4325
4326 hdr.to_bytes(&mut b)?;
4327
4328 let hdr_trace = if log::max_level() == log::LevelFilter::Trace {
4329 Some(format!("{hdr:?}"))
4330 } else {
4331 None
4332 };
4333
4334 let hdr_ty = hdr.ty;
4335
4336 #[cfg(feature = "qlog")]
4337 let qlog_pkt_hdr = self.qlog.streamer.as_ref().map(|_q| {
4338 qlog::events::quic::PacketHeader::with_type(
4339 hdr.ty.to_qlog(),
4340 Some(pn),
4341 Some(hdr.version),
4342 Some(&hdr.scid),
4343 Some(&hdr.dcid),
4344 )
4345 });
4346
4347 // Calculate the space required for the packet, including the header
4348 // the payload length, the packet number and the AEAD overhead.
4349 let mut overhead = b.off() + pn_len + crypto_overhead;
4350
4351 // We assume that the payload length, which is only present in long
4352 // header packets, can always be encoded with a 2-byte varint.
4353 if pkt_type != Type::Short {
4354 overhead += PAYLOAD_LENGTH_LEN;
4355 }
4356
4357 // Make sure we have enough space left for the packet overhead.
4358 match left.checked_sub(overhead) {
4359 Some(v) => left = v,
4360
4361 None => {
4362 // We can't send more because there isn't enough space available
4363 // in the output buffer.
4364 //
4365 // This usually happens when we try to send a new packet but
4366 // failed because cwnd is almost full. In such case app_limited
4367 // is set to false here to make cwnd grow when ACK is received.
4368 path.recovery.update_app_limited(false);
4369 return Err(Error::Done);
4370 },
4371 }
4372
4373 // Make sure there is enough space for the minimum payload length.
4374 if left < PAYLOAD_MIN_LEN {
4375 path.recovery.update_app_limited(false);
4376 return Err(Error::Done);
4377 }
4378
4379 let mut frames: SmallVec<[frame::Frame; 1]> = SmallVec::new();
4380
4381 let mut ack_eliciting = false;
4382 let mut in_flight = false;
4383 let mut is_pmtud_probe = false;
4384 let mut has_data = false;
4385
4386 // Whether or not we should explicitly elicit an ACK via PING frame if we
4387 // implicitly elicit one otherwise.
4388 let ack_elicit_required = path.recovery.should_elicit_ack(epoch);
4389
4390 let header_offset = b.off();
4391
4392 // Reserve space for payload length in advance. Since we don't yet know
4393 // what the final length will be, we reserve 2 bytes in all cases.
4394 //
4395 // Only long header packets have an explicit length field.
4396 if pkt_type != Type::Short {
4397 b.skip(PAYLOAD_LENGTH_LEN)?;
4398 }
4399
4400 packet::encode_pkt_num(pn, pn_len, &mut b)?;
4401
4402 let payload_offset = b.off();
4403
4404 let cwnd_available =
4405 path.recovery.cwnd_available().saturating_sub(overhead);
4406
4407 let left_before_packing_ack_frame = left;
4408
4409 // Create ACK frame.
4410 //
4411 // When we need to explicitly elicit an ACK via PING later, go ahead and
4412 // generate an ACK (if there's anything to ACK) since we're going to
4413 // send a packet with PING anyways, even if we haven't received anything
4414 // ACK eliciting.
4415 if pkt_space.recv_pkt_need_ack.len() > 0 &&
4416 (pkt_space.ack_elicited || ack_elicit_required) &&
4417 (!is_closing ||
4418 (pkt_type == Type::Handshake &&
4419 self.local_error
4420 .as_ref()
4421 .is_some_and(|le| le.is_app))) &&
4422 path.active()
4423 {
4424 #[cfg(not(feature = "fuzzing"))]
4425 let ack_delay = pkt_space.largest_rx_pkt_time.elapsed();
4426
4427 #[cfg(not(feature = "fuzzing"))]
4428 let ack_delay = ack_delay.as_micros() as u64 /
4429 2_u64
4430 .pow(self.local_transport_params.ack_delay_exponent as u32);
4431
4432 // pseudo-random reproducible ack delays when fuzzing
4433 #[cfg(feature = "fuzzing")]
4434 let ack_delay = rand::rand_u8() as u64 + 1;
4435
4436 let frame = frame::Frame::ACK {
4437 ack_delay,
4438 ranges: pkt_space.recv_pkt_need_ack.clone(),
4439 ecn_counts: None, // sending ECN is not supported at this time
4440 };
4441
4442 // When a PING frame needs to be sent, avoid sending the ACK if
4443 // there is not enough cwnd available for both (note that PING
4444 // frames are always 1 byte, so we just need to check that the
4445 // ACK's length is lower than cwnd).
4446 if pkt_space.ack_elicited || frame.wire_len() < cwnd_available {
4447 // ACK-only packets are not congestion controlled so ACKs must
4448 // be bundled considering the buffer capacity only, and not the
4449 // available cwnd.
4450 if push_frame_to_pkt!(b, frames, frame, left) {
4451 pkt_space.ack_elicited = false;
4452 }
4453 }
4454 }
4455
4456 // Limit output packet size by congestion window size.
4457 left = cmp::min(
4458 left,
4459 // Bytes consumed by ACK frames.
4460 cwnd_available.saturating_sub(left_before_packing_ack_frame - left),
4461 );
4462
4463 let mut challenge_data = None;
4464
4465 if pkt_type == Type::Short {
4466 // Create PMTUD probe.
4467 //
4468 // In order to send a PMTUD probe the current `left` value, which was
4469 // already limited by the current PMTU measure, needs to be ignored,
4470 // but the outgoing packet still needs to be limited by
4471 // the output buffer size, as well as the congestion
4472 // window.
4473 //
4474 // In addition, the PMTUD probe is only generated when the handshake
4475 // is confirmed, to avoid interfering with the handshake
4476 // (e.g. due to the anti-amplification limits).
4477 if let Ok(active_path) = self.paths.get_active_mut() {
4478 let should_probe_pmtu = active_path.should_send_pmtu_probe(
4479 self.handshake_confirmed,
4480 self.handshake_completed,
4481 out_len,
4482 is_closing,
4483 frames.is_empty(),
4484 );
4485
4486 if should_probe_pmtu {
4487 if let Some(pmtud) = active_path.pmtud.as_mut() {
4488 let probe_size = pmtud.get_probe_size();
4489 trace!(
4490 "{} sending pmtud probe pmtu_probe={} estimated_pmtu={}",
4491 self.trace_id,
4492 probe_size,
4493 pmtud.get_current_mtu(),
4494 );
4495
4496 left = probe_size;
4497
4498 match left.checked_sub(overhead) {
4499 Some(v) => left = v,
4500
4501 None => {
4502 // We can't send more because there isn't enough
4503 // space available in the output buffer.
4504 //
4505 // This usually happens when we try to send a new
4506 // packet but failed because cwnd is almost full.
4507 //
4508 // In such case app_limited is set to false here
4509 // to make cwnd grow when ACK is received.
4510 active_path.recovery.update_app_limited(false);
4511 return Err(Error::Done);
4512 },
4513 }
4514
4515 let frame = frame::Frame::Padding {
4516 len: probe_size - overhead - 1,
4517 };
4518
4519 if push_frame_to_pkt!(b, frames, frame, left) {
4520 let frame = frame::Frame::Ping {
4521 mtu_probe: Some(probe_size),
4522 };
4523
4524 if push_frame_to_pkt!(b, frames, frame, left) {
4525 ack_eliciting = true;
4526 in_flight = true;
4527 }
4528 }
4529
4530 // Reset probe flag after sending to prevent duplicate
4531 // probes in a single flight.
4532 pmtud.set_in_flight(true);
4533 is_pmtud_probe = true;
4534 }
4535 }
4536 }
4537
4538 let path = self.paths.get_mut(send_pid)?;
4539 // Create PATH_RESPONSE frame if needed.
4540 // We do not try to ensure that these are really sent.
4541 while let Some(challenge) = path.pop_received_challenge() {
4542 let frame = frame::Frame::PathResponse { data: challenge };
4543
4544 if push_frame_to_pkt!(b, frames, frame, left) {
4545 ack_eliciting = true;
4546 in_flight = true;
4547 } else {
4548 // If there are other pending PATH_RESPONSE, don't lose them
4549 // now.
4550 break;
4551 }
4552 }
4553
4554 // Create PATH_CHALLENGE frame if needed.
4555 if path.validation_requested() {
4556 // TODO: ensure that data is unique over paths.
4557 let data = rand::rand_u64().to_be_bytes();
4558
4559 let frame = frame::Frame::PathChallenge { data };
4560
4561 if push_frame_to_pkt!(b, frames, frame, left) {
4562 // Let's notify the path once we know the packet size.
4563 challenge_data = Some(data);
4564
4565 ack_eliciting = true;
4566 in_flight = true;
4567 }
4568 }
4569
4570 if let Some(key_update) = crypto_ctx.key_update.as_mut() {
4571 key_update.update_acked = true;
4572 }
4573 }
4574
4575 let path = self.paths.get_mut(send_pid)?;
4576
4577 if pkt_type == Type::Short && !is_closing {
4578 // Create NEW_CONNECTION_ID frames as needed.
4579 while let Some(seq_num) = self.ids.next_advertise_new_scid_seq() {
4580 let frame = self.ids.get_new_connection_id_frame_for(seq_num)?;
4581
4582 if push_frame_to_pkt!(b, frames, frame, left) {
4583 self.ids.mark_advertise_new_scid_seq(seq_num, false);
4584
4585 ack_eliciting = true;
4586 in_flight = true;
4587 } else {
4588 break;
4589 }
4590 }
4591 }
4592
4593 if pkt_type == Type::Short && !is_closing && path.active() {
4594 // Create HANDSHAKE_DONE frame.
4595 // self.should_send_handshake_done() but without the need to borrow
4596 if self.handshake_completed &&
4597 !self.handshake_done_sent &&
4598 self.is_server
4599 {
4600 let frame = frame::Frame::HandshakeDone;
4601
4602 if push_frame_to_pkt!(b, frames, frame, left) {
4603 self.handshake_done_sent = true;
4604
4605 ack_eliciting = true;
4606 in_flight = true;
4607 }
4608 }
4609
4610 // Create MAX_STREAMS_BIDI frame.
4611 if self.streams.should_update_max_streams_bidi() ||
4612 self.should_send_max_streams_bidi
4613 {
4614 let frame = frame::Frame::MaxStreamsBidi {
4615 max: self.streams.max_streams_bidi_next(),
4616 };
4617
4618 if push_frame_to_pkt!(b, frames, frame, left) {
4619 self.streams.update_max_streams_bidi();
4620 self.should_send_max_streams_bidi = false;
4621
4622 ack_eliciting = true;
4623 in_flight = true;
4624 }
4625 }
4626
4627 // Create MAX_STREAMS_UNI frame.
4628 if self.streams.should_update_max_streams_uni() ||
4629 self.should_send_max_streams_uni
4630 {
4631 let frame = frame::Frame::MaxStreamsUni {
4632 max: self.streams.max_streams_uni_next(),
4633 };
4634
4635 if push_frame_to_pkt!(b, frames, frame, left) {
4636 self.streams.update_max_streams_uni();
4637 self.should_send_max_streams_uni = false;
4638
4639 ack_eliciting = true;
4640 in_flight = true;
4641 }
4642 }
4643
4644 // Create DATA_BLOCKED frame.
4645 if let Some(limit) = self.blocked_limit {
4646 let frame = frame::Frame::DataBlocked { limit };
4647
4648 if push_frame_to_pkt!(b, frames, frame, left) {
4649 self.blocked_limit = None;
4650 self.data_blocked_sent_count =
4651 self.data_blocked_sent_count.saturating_add(1);
4652
4653 ack_eliciting = true;
4654 in_flight = true;
4655 }
4656 }
4657
4658 // Create STREAMS_BLOCKED (bidi) frame when the local endpoint has
4659 // exhausted the peer's bidirectional stream count limit.
4660 if self
4661 .streams_blocked_bidi_state
4662 .has_pending_stream_blocked_frame()
4663 {
4664 if let Some(limit) = self.streams_blocked_bidi_state.blocked_at {
4665 let frame = frame::Frame::StreamsBlockedBidi { limit };
4666
4667 if push_frame_to_pkt!(b, frames, frame, left) {
4668 // Record the limit we just notified the peer about so
4669 // that redundant frames for the same limit are
4670 // suppressed.
4671 self.streams_blocked_bidi_state.blocked_sent =
4672 Some(limit);
4673
4674 ack_eliciting = true;
4675 in_flight = true;
4676 }
4677 }
4678 }
4679
4680 // Create STREAMS_BLOCKED (uni) frame when the local endpoint has
4681 // exhausted the peer's unidirectional stream count limit.
4682 if self
4683 .streams_blocked_uni_state
4684 .has_pending_stream_blocked_frame()
4685 {
4686 if let Some(limit) = self.streams_blocked_uni_state.blocked_at {
4687 let frame = frame::Frame::StreamsBlockedUni { limit };
4688
4689 if push_frame_to_pkt!(b, frames, frame, left) {
4690 // Record the limit we just notified the peer about so
4691 // that redundant frames for the same limit are
4692 // suppressed.
4693 self.streams_blocked_uni_state.blocked_sent = Some(limit);
4694
4695 ack_eliciting = true;
4696 in_flight = true;
4697 }
4698 }
4699 }
4700
4701 // Create MAX_STREAM_DATA frames as needed.
4702 for stream_id in self.streams.almost_full() {
4703 let stream = match self.streams.get_mut(stream_id) {
4704 Some(v) => v,
4705
4706 None => {
4707 // The stream doesn't exist anymore, so remove it from
4708 // the almost full set.
4709 self.streams.remove_almost_full(stream_id);
4710 continue;
4711 },
4712 };
4713
4714 // Autotune the stream window size, but only if this is not a
4715 // retransmission (on a retransmit the stream will be in
4716 // `self.streams.almost_full()` but it's `almost_full()`
4717 // method returns false.
4718 if stream.recv.almost_full() {
4719 stream.recv.autotune_window(now, path.recovery.rtt());
4720 }
4721
4722 let frame = frame::Frame::MaxStreamData {
4723 stream_id,
4724 max: stream.recv.max_data_next(),
4725 };
4726
4727 if push_frame_to_pkt!(b, frames, frame, left) {
4728 let recv_win = stream.recv.window();
4729
4730 stream.recv.update_max_data(now);
4731
4732 self.streams.remove_almost_full(stream_id);
4733
4734 ack_eliciting = true;
4735 in_flight = true;
4736
4737 // Make sure the connection window always has some
4738 // room compared to the stream window.
4739 flow_control.ensure_window_lower_bound(
4740 (recv_win as f64 * CONNECTION_WINDOW_FACTOR) as u64,
4741 );
4742 }
4743 }
4744
4745 // Create MAX_DATA frame as needed.
4746 if flow_control.should_update_max_data() &&
4747 flow_control.max_data() < flow_control.max_data_next()
4748 {
4749 // Autotune the connection window size. We only tune the window
4750 // if we are sending an "organic" update, not on retransmits.
4751 flow_control.autotune_window(now, path.recovery.rtt());
4752 self.should_send_max_data = true;
4753 }
4754
4755 if self.should_send_max_data {
4756 let frame = frame::Frame::MaxData {
4757 max: flow_control.max_data_next(),
4758 };
4759
4760 if push_frame_to_pkt!(b, frames, frame, left) {
4761 self.should_send_max_data = false;
4762
4763 // Commits the new max_rx_data limit.
4764 flow_control.update_max_data(now);
4765
4766 ack_eliciting = true;
4767 in_flight = true;
4768 }
4769 }
4770
4771 // Create STOP_SENDING frames as needed.
4772 for (stream_id, error_code) in self
4773 .streams
4774 .stopped()
4775 .map(|(&k, &v)| (k, v))
4776 .collect::<Vec<(u64, u64)>>()
4777 {
4778 let frame = frame::Frame::StopSending {
4779 stream_id,
4780 error_code,
4781 };
4782
4783 if push_frame_to_pkt!(b, frames, frame, left) {
4784 self.streams.remove_stopped(stream_id);
4785
4786 ack_eliciting = true;
4787 in_flight = true;
4788 }
4789 }
4790
4791 // Create RESET_STREAM frames as needed.
4792 for (stream_id, (error_code, final_size)) in self
4793 .streams
4794 .reset()
4795 .map(|(&k, &v)| (k, v))
4796 .collect::<Vec<(u64, (u64, u64))>>()
4797 {
4798 let frame = frame::Frame::ResetStream {
4799 stream_id,
4800 error_code,
4801 final_size,
4802 };
4803
4804 if push_frame_to_pkt!(b, frames, frame, left) {
4805 self.streams.remove_reset(stream_id);
4806
4807 ack_eliciting = true;
4808 in_flight = true;
4809 }
4810 }
4811
4812 // Create STREAM_DATA_BLOCKED frames as needed.
4813 for (stream_id, limit) in self
4814 .streams
4815 .blocked()
4816 .map(|(&k, &v)| (k, v))
4817 .collect::<Vec<(u64, u64)>>()
4818 {
4819 let frame = frame::Frame::StreamDataBlocked { stream_id, limit };
4820
4821 if push_frame_to_pkt!(b, frames, frame, left) {
4822 self.streams.remove_blocked(stream_id);
4823 self.stream_data_blocked_sent_count =
4824 self.stream_data_blocked_sent_count.saturating_add(1);
4825
4826 ack_eliciting = true;
4827 in_flight = true;
4828 }
4829 }
4830
4831 // Create RETIRE_CONNECTION_ID frames as needed.
4832 let retire_dcid_seqs = self.ids.retire_dcid_seqs();
4833
4834 for seq_num in retire_dcid_seqs {
4835 // The sequence number specified in a RETIRE_CONNECTION_ID frame
4836 // MUST NOT refer to the Destination Connection ID field of the
4837 // packet in which the frame is contained.
4838 let dcid_seq = path.active_dcid_seq.ok_or(Error::InvalidState)?;
4839
4840 if seq_num == dcid_seq {
4841 continue;
4842 }
4843
4844 let frame = frame::Frame::RetireConnectionId { seq_num };
4845
4846 if push_frame_to_pkt!(b, frames, frame, left) {
4847 self.ids.mark_retire_dcid_seq(seq_num, false)?;
4848
4849 ack_eliciting = true;
4850 in_flight = true;
4851 } else {
4852 break;
4853 }
4854 }
4855 }
4856
4857 // Create CONNECTION_CLOSE frame. Try to send this only on the active
4858 // path, unless it is the last one available.
4859 if path.active() || n_paths == 1 {
4860 if let Some(conn_err) = self.local_error.as_ref() {
4861 if conn_err.is_app {
4862 // Create ApplicationClose frame.
4863 if pkt_type == Type::Short {
4864 let frame = frame::Frame::ApplicationClose {
4865 error_code: conn_err.error_code,
4866 reason: conn_err.reason.clone(),
4867 };
4868
4869 if push_frame_to_pkt!(b, frames, frame, left) {
4870 let pto = path.recovery.pto();
4871 self.draining_timer = Some(now + (pto * 3));
4872
4873 ack_eliciting = true;
4874 in_flight = true;
4875 }
4876 }
4877 } else {
4878 // Create ConnectionClose frame.
4879 let frame = frame::Frame::ConnectionClose {
4880 error_code: conn_err.error_code,
4881 frame_type: 0,
4882 reason: conn_err.reason.clone(),
4883 };
4884
4885 if push_frame_to_pkt!(b, frames, frame, left) {
4886 let pto = path.recovery.pto();
4887 self.draining_timer = Some(now + (pto * 3));
4888
4889 ack_eliciting = true;
4890 in_flight = true;
4891 }
4892 }
4893 }
4894 }
4895
4896 // Create CRYPTO frame.
4897 if crypto_ctx.crypto_stream.is_flushable() &&
4898 left > frame::MAX_CRYPTO_OVERHEAD &&
4899 !is_closing &&
4900 path.active()
4901 {
4902 let crypto_off = crypto_ctx.crypto_stream.send.off_front();
4903
4904 // Encode the frame.
4905 //
4906 // Instead of creating a `frame::Frame` object, encode the frame
4907 // directly into the packet buffer.
4908 //
4909 // First we reserve some space in the output buffer for writing the
4910 // frame header (we assume the length field is always a 2-byte
4911 // varint as we don't know the value yet).
4912 //
4913 // Then we emit the data from the crypto stream's send buffer.
4914 //
4915 // Finally we go back and encode the frame header with the now
4916 // available information.
4917 let hdr_off = b.off();
4918 let hdr_len = 1 + // frame type
4919 octets::varint_len(crypto_off) + // offset
4920 2; // length, always encode as 2-byte varint
4921
4922 if let Some(max_len) = left.checked_sub(hdr_len) {
4923 let (mut crypto_hdr, mut crypto_payload) =
4924 b.split_at(hdr_off + hdr_len)?;
4925
4926 // Write stream data into the packet buffer.
4927 let (len, _) = crypto_ctx
4928 .crypto_stream
4929 .send
4930 .emit(&mut crypto_payload.as_mut()[..max_len])?;
4931
4932 // Encode the frame's header.
4933 //
4934 // Due to how `OctetsMut::split_at()` works, `crypto_hdr` starts
4935 // from the initial offset of `b` (rather than the current
4936 // offset), so it needs to be advanced to the
4937 // initial frame offset.
4938 crypto_hdr.skip(hdr_off)?;
4939
4940 frame::encode_crypto_header(
4941 crypto_off,
4942 len as u64,
4943 &mut crypto_hdr,
4944 )?;
4945
4946 // Advance the packet buffer's offset.
4947 b.skip(hdr_len + len)?;
4948
4949 let frame = frame::Frame::CryptoHeader {
4950 offset: crypto_off,
4951 length: len,
4952 };
4953
4954 if push_frame_to_pkt!(b, frames, frame, left) {
4955 ack_eliciting = true;
4956 in_flight = true;
4957 has_data = true;
4958 }
4959 }
4960 }
4961
4962 // The preference of data-bearing frame to include in a packet
4963 // is managed by `self.emit_dgram`. However, whether any frames
4964 // can be sent depends on the state of their buffers. In the case
4965 // where one type is preferred but its buffer is empty, fall back
4966 // to the other type in order not to waste this function call.
4967 let mut dgram_emitted = false;
4968 let dgrams_to_emit = max_dgram_len.is_some();
4969 let stream_to_emit = self.streams.has_flushable();
4970
4971 let mut do_dgram = self.emit_dgram && dgrams_to_emit;
4972 let do_stream = !self.emit_dgram && stream_to_emit;
4973
4974 if !do_stream && dgrams_to_emit {
4975 do_dgram = true;
4976 }
4977
4978 // Create DATAGRAM frame.
4979 if (pkt_type == Type::Short || pkt_type == Type::ZeroRTT) &&
4980 left > frame::MAX_DGRAM_OVERHEAD &&
4981 !is_closing &&
4982 path.active() &&
4983 do_dgram
4984 {
4985 if let Some(max_dgram_payload) = max_dgram_len {
4986 while let Some(len) = self.dgram_send_queue.peek_front_len() {
4987 let hdr_off = b.off();
4988 let hdr_len = 1 + // frame type
4989 2; // length, always encode as 2-byte varint
4990
4991 if (hdr_len + len) <= left {
4992 // Front of the queue fits this packet, send it.
4993 match self.dgram_send_queue.pop() {
4994 Some(data) => {
4995 // Encode the frame.
4996 //
4997 // Instead of creating a `frame::Frame` object,
4998 // encode the frame directly into the packet
4999 // buffer.
5000 //
5001 // First we reserve some space in the output
5002 // buffer for writing the frame header (we
5003 // assume the length field is always a 2-byte
5004 // varint as we don't know the value yet).
5005 //
5006 // Then we emit the data from the DATAGRAM's
5007 // buffer.
5008 //
5009 // Finally we go back and encode the frame
5010 // header with the now available information.
5011 let (mut dgram_hdr, mut dgram_payload) =
5012 b.split_at(hdr_off + hdr_len)?;
5013
5014 dgram_payload.as_mut()[..len]
5015 .copy_from_slice(&data);
5016
5017 // Encode the frame's header.
5018 //
5019 // Due to how `OctetsMut::split_at()` works,
5020 // `dgram_hdr` starts from the initial offset
5021 // of `b` (rather than the current offset), so
5022 // it needs to be advanced to the initial frame
5023 // offset.
5024 dgram_hdr.skip(hdr_off)?;
5025
5026 frame::encode_dgram_header(
5027 len as u64,
5028 &mut dgram_hdr,
5029 )?;
5030
5031 // Advance the packet buffer's offset.
5032 b.skip(hdr_len + len)?;
5033
5034 let frame =
5035 frame::Frame::DatagramHeader { length: len };
5036
5037 if push_frame_to_pkt!(b, frames, frame, left) {
5038 ack_eliciting = true;
5039 in_flight = true;
5040 dgram_emitted = true;
5041 self.dgram_sent_count =
5042 self.dgram_sent_count.saturating_add(1);
5043 path.dgram_sent_count =
5044 path.dgram_sent_count.saturating_add(1);
5045 }
5046 },
5047
5048 None => continue,
5049 };
5050 } else if len > max_dgram_payload {
5051 // This dgram frame will never fit. Let's purge it.
5052 self.dgram_send_queue.pop();
5053 } else {
5054 break;
5055 }
5056 }
5057 }
5058 }
5059
5060 // Create a single STREAM frame for the first stream that is flushable.
5061 if (pkt_type == Type::Short || pkt_type == Type::ZeroRTT) &&
5062 left > frame::MAX_STREAM_OVERHEAD &&
5063 !is_closing &&
5064 path.active() &&
5065 !dgram_emitted
5066 {
5067 while let Some(priority_key) = self.streams.peek_flushable() {
5068 let stream_id = priority_key.id;
5069 let stream = match self.streams.get_mut(stream_id) {
5070 // Avoid sending frames for streams that were already stopped.
5071 //
5072 // This might happen if stream data was buffered but not yet
5073 // flushed on the wire when a STOP_SENDING frame is received.
5074 Some(v) if !v.send.is_stopped() => v,
5075 _ => {
5076 self.streams.remove_flushable(&priority_key);
5077 continue;
5078 },
5079 };
5080
5081 let stream_off = stream.send.off_front();
5082
5083 // Encode the frame.
5084 //
5085 // Instead of creating a `frame::Frame` object, encode the frame
5086 // directly into the packet buffer.
5087 //
5088 // First we reserve some space in the output buffer for writing
5089 // the frame header (we assume the length field is always a
5090 // 2-byte varint as we don't know the value yet).
5091 //
5092 // Then we emit the data from the stream's send buffer.
5093 //
5094 // Finally we go back and encode the frame header with the now
5095 // available information.
5096 let hdr_off = b.off();
5097 let hdr_len = 1 + // frame type
5098 octets::varint_len(stream_id) + // stream_id
5099 octets::varint_len(stream_off) + // offset
5100 2; // length, always encode as 2-byte varint
5101
5102 let max_len = match left.checked_sub(hdr_len) {
5103 Some(v) => v,
5104 None => {
5105 let priority_key = Arc::clone(&stream.priority_key);
5106 self.streams.remove_flushable(&priority_key);
5107
5108 continue;
5109 },
5110 };
5111
5112 let (mut stream_hdr, mut stream_payload) =
5113 b.split_at(hdr_off + hdr_len)?;
5114
5115 // Write stream data into the packet buffer.
5116 let (len, fin) =
5117 stream.send.emit(&mut stream_payload.as_mut()[..max_len])?;
5118
5119 // Encode the frame's header.
5120 //
5121 // Due to how `OctetsMut::split_at()` works, `stream_hdr` starts
5122 // from the initial offset of `b` (rather than the current
5123 // offset), so it needs to be advanced to the initial frame
5124 // offset.
5125 stream_hdr.skip(hdr_off)?;
5126
5127 frame::encode_stream_header(
5128 stream_id,
5129 stream_off,
5130 len as u64,
5131 fin,
5132 &mut stream_hdr,
5133 )?;
5134
5135 // Advance the packet buffer's offset.
5136 b.skip(hdr_len + len)?;
5137
5138 let frame = frame::Frame::StreamHeader {
5139 stream_id,
5140 offset: stream_off,
5141 length: len,
5142 fin,
5143 };
5144
5145 if push_frame_to_pkt!(b, frames, frame, left) {
5146 ack_eliciting = true;
5147 in_flight = true;
5148 has_data = true;
5149 }
5150
5151 let priority_key = Arc::clone(&stream.priority_key);
5152 // If the stream is no longer flushable, remove it from the queue
5153 if !stream.is_flushable() {
5154 self.streams.remove_flushable(&priority_key);
5155 } else if stream.incremental {
5156 // Shuffle the incremental stream to the back of the
5157 // queue.
5158 self.streams.remove_flushable(&priority_key);
5159 self.streams.insert_flushable(&priority_key);
5160 }
5161
5162 #[cfg(feature = "fuzzing")]
5163 // Coalesce STREAM frames when fuzzing.
5164 if left > frame::MAX_STREAM_OVERHEAD {
5165 continue;
5166 }
5167
5168 break;
5169 }
5170 }
5171
5172 // Alternate trying to send DATAGRAMs next time.
5173 self.emit_dgram = !dgram_emitted;
5174
5175 // If no other ack-eliciting frame is sent, include a PING frame
5176 // - if PTO probe needed; OR
5177 // - if we've sent too many non ack-eliciting packets without having
5178 // sent an ACK eliciting one; OR
5179 // - the application requested an ack-eliciting frame be sent.
5180 if (ack_elicit_required || path.needs_ack_eliciting) &&
5181 !ack_eliciting &&
5182 left >= 1 &&
5183 !is_closing
5184 {
5185 let frame = frame::Frame::Ping { mtu_probe: None };
5186
5187 if push_frame_to_pkt!(b, frames, frame, left) {
5188 ack_eliciting = true;
5189 in_flight = true;
5190 }
5191 }
5192
5193 if ack_eliciting && !is_pmtud_probe {
5194 path.needs_ack_eliciting = false;
5195 path.recovery.ping_sent(epoch);
5196 }
5197
5198 if !has_data &&
5199 !dgram_emitted &&
5200 cwnd_available > frame::MAX_STREAM_OVERHEAD
5201 {
5202 path.recovery.on_app_limited();
5203 }
5204
5205 if frames.is_empty() {
5206 // When we reach this point we are not able to write more, so set
5207 // app_limited to false.
5208 path.recovery.update_app_limited(false);
5209 return Err(Error::Done);
5210 }
5211
5212 // When coalescing a 1-RTT packet, we can't add padding in the UDP
5213 // datagram, so use PADDING frames instead.
5214 //
5215 // This is only needed if
5216 // 1) an Initial packet has already been written to the UDP datagram,
5217 // as Initial always requires padding.
5218 //
5219 // 2) this is a probing packet towards an unvalidated peer address.
5220 if (has_initial || !path.validated()) &&
5221 pkt_type == Type::Short &&
5222 left >= 1
5223 {
5224 let frame = frame::Frame::Padding { len: left };
5225
5226 if push_frame_to_pkt!(b, frames, frame, left) {
5227 in_flight = true;
5228 }
5229 }
5230
5231 // Pad payload so that it's always at least 4 bytes.
5232 if b.off() - payload_offset < PAYLOAD_MIN_LEN {
5233 let payload_len = b.off() - payload_offset;
5234
5235 let frame = frame::Frame::Padding {
5236 len: PAYLOAD_MIN_LEN - payload_len,
5237 };
5238
5239 #[allow(unused_assignments)]
5240 if push_frame_to_pkt!(b, frames, frame, left) {
5241 in_flight = true;
5242 }
5243 }
5244
5245 let payload_len = b.off() - payload_offset;
5246
5247 // Fill in payload length.
5248 if pkt_type != Type::Short {
5249 let len = pn_len + payload_len + crypto_overhead;
5250
5251 let (_, mut payload_with_len) = b.split_at(header_offset)?;
5252 payload_with_len
5253 .put_varint_with_len(len as u64, PAYLOAD_LENGTH_LEN)?;
5254 }
5255
5256 trace!(
5257 "{} tx pkt {} len={} pn={} {}",
5258 self.trace_id,
5259 hdr_trace.unwrap_or_default(),
5260 payload_len,
5261 pn,
5262 AddrTupleFmt(path.local_addr(), path.peer_addr())
5263 );
5264
5265 #[cfg(feature = "qlog")]
5266 let mut qlog_frames: SmallVec<
5267 [qlog::events::quic::QuicFrame; 1],
5268 > = SmallVec::with_capacity(frames.len());
5269
5270 for frame in &mut frames {
5271 trace!("{} tx frm {:?}", self.trace_id, frame);
5272
5273 qlog_with_type!(QLOG_PACKET_TX, self.qlog, _q, {
5274 qlog_frames.push(frame.to_qlog());
5275 });
5276 }
5277
5278 qlog_with_type!(QLOG_PACKET_TX, self.qlog, q, {
5279 if let Some(header) = qlog_pkt_hdr {
5280 // Qlog packet raw info described at
5281 // https://datatracker.ietf.org/doc/html/draft-ietf-quic-qlog-main-schema-00#section-5.1
5282 //
5283 // `length` includes packet headers and trailers (AEAD tag).
5284 let length = payload_len + payload_offset + crypto_overhead;
5285 let qlog_raw_info = RawInfo {
5286 length: Some(length as u64),
5287 payload_length: Some(payload_len as u64),
5288 data: None,
5289 };
5290
5291 let send_at_time =
5292 now.duration_since(q.start_time()).as_secs_f64() * 1000.0;
5293
5294 let ev_data =
5295 EventData::PacketSent(qlog::events::quic::PacketSent {
5296 header,
5297 frames: Some(qlog_frames),
5298 raw: Some(qlog_raw_info),
5299 send_at_time: Some(send_at_time),
5300 ..Default::default()
5301 });
5302
5303 q.add_event_data_with_instant(ev_data, now).ok();
5304 }
5305 });
5306
5307 let aead = match crypto_ctx.crypto_seal {
5308 Some(ref mut v) => v,
5309 None => return Err(Error::InvalidState),
5310 };
5311
5312 let written = packet::encrypt_pkt(
5313 &mut b,
5314 pn,
5315 pn_len,
5316 payload_len,
5317 payload_offset,
5318 None,
5319 aead,
5320 )?;
5321
5322 let sent_pkt_has_data = if path.recovery.gcongestion_enabled() {
5323 has_data || dgram_emitted
5324 } else {
5325 has_data
5326 };
5327
5328 let sent_pkt = recovery::Sent {
5329 pkt_num: pn,
5330 frames,
5331 time_sent: now,
5332 time_acked: None,
5333 time_lost: None,
5334 size: if ack_eliciting { written } else { 0 },
5335 ack_eliciting,
5336 in_flight,
5337 delivered: 0,
5338 delivered_time: now,
5339 first_sent_time: now,
5340 is_app_limited: false,
5341 tx_in_flight: 0,
5342 lost: 0,
5343 has_data: sent_pkt_has_data,
5344 is_pmtud_probe,
5345 };
5346
5347 if in_flight && is_app_limited {
5348 path.recovery.delivery_rate_update_app_limited(true);
5349 }
5350
5351 self.next_pkt_num += 1;
5352
5353 let handshake_status = recovery::HandshakeStatus {
5354 has_handshake_keys: self.crypto_ctx[packet::Epoch::Handshake]
5355 .has_keys(),
5356 peer_verified_address: self.peer_verified_initial_address,
5357 completed: self.handshake_completed,
5358 };
5359
5360 self.on_packet_sent(send_pid, sent_pkt, epoch, handshake_status, now)?;
5361
5362 let path = self.paths.get_mut(send_pid)?;
5363 qlog_with_type!(QLOG_METRICS, self.qlog, q, {
5364 path.recovery.maybe_qlog(q, now);
5365 });
5366
5367 // Record sent packet size if we probe the path.
5368 if let Some(data) = challenge_data {
5369 path.add_challenge_sent(data, written, now);
5370 }
5371
5372 self.sent_count += 1;
5373 self.sent_bytes += written as u64;
5374 path.sent_count += 1;
5375 path.sent_bytes += written as u64;
5376
5377 if self.dgram_send_queue.byte_size() > path.recovery.cwnd_available() {
5378 path.recovery.update_app_limited(false);
5379 }
5380
5381 path.max_send_bytes = path.max_send_bytes.saturating_sub(written);
5382
5383 // On the client, drop initial state after sending an Handshake packet.
5384 if !self.is_server && hdr_ty == Type::Handshake {
5385 self.drop_epoch_state(packet::Epoch::Initial, now);
5386 }
5387
5388 // (Re)start the idle timer if we are sending the first ack-eliciting
5389 // packet since last receiving a packet.
5390 if ack_eliciting && !self.ack_eliciting_sent {
5391 if let Some(idle_timeout) = self.idle_timeout() {
5392 self.idle_timer = Some(now + idle_timeout);
5393 }
5394 }
5395
5396 if ack_eliciting {
5397 self.ack_eliciting_sent = true;
5398 }
5399
5400 Ok((pkt_type, written))
5401 }
5402
5403 fn on_packet_sent(
5404 &mut self, send_pid: usize, sent_pkt: recovery::Sent,
5405 epoch: packet::Epoch, handshake_status: recovery::HandshakeStatus,
5406 now: Instant,
5407 ) -> Result<()> {
5408 let path = self.paths.get_mut(send_pid)?;
5409
5410 // It's fine to set the skip counter based on a non-active path's values.
5411 let cwnd = path.recovery.cwnd();
5412 let max_datagram_size = path.recovery.max_datagram_size();
5413 self.pkt_num_spaces[epoch].on_packet_sent(&sent_pkt);
5414 self.pkt_num_manager.on_packet_sent(
5415 cwnd,
5416 max_datagram_size,
5417 self.handshake_completed,
5418 );
5419
5420 path.recovery.on_packet_sent(
5421 sent_pkt,
5422 epoch,
5423 handshake_status,
5424 now,
5425 &self.trace_id,
5426 );
5427
5428 Ok(())
5429 }
5430
5431 /// Returns the desired send time for the next packet.
5432 #[inline]
5433 pub fn get_next_release_time(&self) -> Option<ReleaseDecision> {
5434 Some(
5435 self.paths
5436 .get_active()
5437 .ok()?
5438 .recovery
5439 .get_next_release_time(),
5440 )
5441 }
5442
5443 /// Returns whether gcongestion is enabled.
5444 #[inline]
5445 pub fn gcongestion_enabled(&self) -> Option<bool> {
5446 Some(self.paths.get_active().ok()?.recovery.gcongestion_enabled())
5447 }
5448
5449 /// Returns the maximum pacing into the future.
5450 ///
5451 /// Equals 1/8 of the smoothed RTT, but at least 1ms and not greater than
5452 /// 5ms.
5453 pub fn max_release_into_future(&self) -> Duration {
5454 self.paths
5455 .get_active()
5456 .map(|p| p.recovery.rtt().mul_f64(0.125))
5457 .unwrap_or(Duration::from_millis(1))
5458 .max(Duration::from_millis(1))
5459 .min(Duration::from_millis(5))
5460 }
5461
5462 /// Returns whether pacing is enabled.
5463 #[inline]
5464 pub fn pacing_enabled(&self) -> bool {
5465 self.recovery_config.pacing
5466 }
5467
5468 /// Returns the size of the send quantum, in bytes.
5469 ///
5470 /// This represents the maximum size of a packet burst as determined by the
5471 /// congestion control algorithm in use.
5472 ///
5473 /// Applications can, for example, use it in conjunction with segmentation
5474 /// offloading mechanisms as the maximum limit for outgoing aggregates of
5475 /// multiple packets.
5476 #[inline]
5477 pub fn send_quantum(&self) -> usize {
5478 match self.paths.get_active() {
5479 Ok(p) => p.recovery.send_quantum(),
5480 _ => 0,
5481 }
5482 }
5483
5484 /// Returns the size of the send quantum over the given 4-tuple, in bytes.
5485 ///
5486 /// This represents the maximum size of a packet burst as determined by the
5487 /// congestion control algorithm in use.
5488 ///
5489 /// Applications can, for example, use it in conjunction with segmentation
5490 /// offloading mechanisms as the maximum limit for outgoing aggregates of
5491 /// multiple packets.
5492 ///
5493 /// If the (`local_addr`, peer_addr`) 4-tuple relates to a non-existing
5494 /// path, this method returns 0.
5495 pub fn send_quantum_on_path(
5496 &self, local_addr: SocketAddr, peer_addr: SocketAddr,
5497 ) -> usize {
5498 self.paths
5499 .path_id_from_addrs(&(local_addr, peer_addr))
5500 .and_then(|pid| self.paths.get(pid).ok())
5501 .map(|path| path.recovery.send_quantum())
5502 .unwrap_or(0)
5503 }
5504
5505 /// Reads contiguous data from a stream into the provided slice.
5506 ///
5507 /// The slice must be sized by the caller and will be populated up to its
5508 /// capacity.
5509 ///
5510 /// On success the amount of bytes read and a flag indicating the fin state
5511 /// is returned as a tuple, or [`Done`] if there is no data to read.
5512 ///
5513 /// Reading data from a stream may trigger queueing of control messages
5514 /// (e.g. MAX_STREAM_DATA). [`send()`] should be called afterwards.
5515 ///
5516 /// [`Done`]: enum.Error.html#variant.Done
5517 /// [`send()`]: struct.Connection.html#method.send
5518 ///
5519 /// ## Examples:
5520 ///
5521 /// ```no_run
5522 /// # let mut buf = [0; 512];
5523 /// # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
5524 /// # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
5525 /// # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
5526 /// # let peer = "127.0.0.1:1234".parse().unwrap();
5527 /// # let local = socket.local_addr().unwrap();
5528 /// # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
5529 /// # let stream_id = 0;
5530 /// while let Ok((read, fin)) = conn.stream_recv(stream_id, &mut buf) {
5531 /// println!("Got {} bytes on stream {}", read, stream_id);
5532 /// }
5533 /// # Ok::<(), quiche::Error>(())
5534 /// ```
5535 pub fn stream_recv(
5536 &mut self, stream_id: u64, out: &mut [u8],
5537 ) -> Result<(usize, bool)> {
5538 self.do_stream_recv(stream_id, RecvAction::Emit { out })
5539 }
5540
5541 /// Discard contiguous data from a stream without copying.
5542 ///
5543 /// On success the amount of bytes discarded and a flag indicating the fin
5544 /// state is returned as a tuple, or [`Done`] if there is no data to
5545 /// discard.
5546 ///
5547 /// Discarding data from a stream may trigger queueing of control messages
5548 /// (e.g. MAX_STREAM_DATA). [`send()`] should be called afterwards.
5549 ///
5550 /// [`Done`]: enum.Error.html#variant.Done
5551 /// [`send()`]: struct.Connection.html#method.send
5552 ///
5553 /// ## Examples:
5554 ///
5555 /// ```no_run
5556 /// # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
5557 /// # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
5558 /// # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
5559 /// # let peer = "127.0.0.1:1234".parse().unwrap();
5560 /// # let local = socket.local_addr().unwrap();
5561 /// # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
5562 /// # let stream_id = 0;
5563 /// while let Ok((read, fin)) = conn.stream_discard(stream_id, 1) {
5564 /// println!("Discarded {} byte(s) on stream {}", read, stream_id);
5565 /// }
5566 /// # Ok::<(), quiche::Error>(())
5567 /// ```
5568 pub fn stream_discard(
5569 &mut self, stream_id: u64, len: usize,
5570 ) -> Result<(usize, bool)> {
5571 self.do_stream_recv(stream_id, RecvAction::Discard { len })
5572 }
5573
5574 // Reads or discards contiguous data from a stream.
5575 //
5576 // Passing an `action` of `StreamRecvAction::Emit` results in a read into
5577 // the provided slice. It must be sized by the caller and will be populated
5578 // up to its capacity.
5579 //
5580 // Passing an `action` of `StreamRecvAction::Discard` results in discard up
5581 // to the indicated length.
5582 //
5583 // On success the amount of bytes read or discarded, and a flag indicating
5584 // the fin state, is returned as a tuple, or [`Done`] if there is no data to
5585 // read or discard.
5586 //
5587 // Reading or discarding data from a stream may trigger queueing of control
5588 // messages (e.g. MAX_STREAM_DATA). [`send()`] should be called afterwards.
5589 //
5590 // [`Done`]: enum.Error.html#variant.Done
5591 // [`send()`]: struct.Connection.html#method.send
5592 fn do_stream_recv(
5593 &mut self, stream_id: u64, action: RecvAction,
5594 ) -> Result<(usize, bool)> {
5595 // We can't read on our own unidirectional streams.
5596 if !stream::is_bidi(stream_id) &&
5597 stream::is_local(stream_id, self.is_server)
5598 {
5599 return Err(Error::InvalidStreamState(stream_id));
5600 }
5601
5602 let stream = self
5603 .streams
5604 .get_mut(stream_id)
5605 .ok_or(Error::InvalidStreamState(stream_id))?;
5606
5607 if !stream.is_readable() {
5608 return Err(Error::Done);
5609 }
5610
5611 let local = stream.local;
5612 let priority_key = Arc::clone(&stream.priority_key);
5613
5614 #[cfg(feature = "qlog")]
5615 let offset = stream.recv.off_front();
5616
5617 #[cfg(feature = "qlog")]
5618 let to = match action {
5619 RecvAction::Emit { .. } => Some(DataRecipient::Application),
5620
5621 RecvAction::Discard { .. } => Some(DataRecipient::Dropped),
5622 };
5623
5624 let (read, fin) = match stream.recv.emit_or_discard(action) {
5625 Ok(v) => v,
5626
5627 Err(e) => {
5628 // Collect the stream if it is now complete. This can happen if
5629 // we got a `StreamReset` error which will now be propagated to
5630 // the application, so we don't need to keep the stream's state
5631 // anymore.
5632 if stream.is_complete() {
5633 self.streams.collect(stream_id, local);
5634 }
5635
5636 self.streams.remove_readable(&priority_key);
5637 return Err(e);
5638 },
5639 };
5640
5641 self.flow_control.add_consumed(read as u64);
5642
5643 let readable = stream.is_readable();
5644
5645 let complete = stream.is_complete();
5646
5647 if stream.recv.almost_full() {
5648 self.streams.insert_almost_full(stream_id);
5649 }
5650
5651 if !readable {
5652 self.streams.remove_readable(&priority_key);
5653 }
5654
5655 if complete {
5656 self.streams.collect(stream_id, local);
5657 }
5658
5659 qlog_with_type!(QLOG_DATA_MV, self.qlog, q, {
5660 let ev_data = EventData::DataMoved(qlog::events::quic::DataMoved {
5661 stream_id: Some(stream_id),
5662 offset: Some(offset),
5663 length: Some(read as u64),
5664 from: Some(DataRecipient::Transport),
5665 to,
5666 ..Default::default()
5667 });
5668
5669 let now = Instant::now();
5670 q.add_event_data_with_instant(ev_data, now).ok();
5671 });
5672
5673 if priority_key.incremental && readable {
5674 // Shuffle the incremental stream to the back of the queue.
5675 self.streams.remove_readable(&priority_key);
5676 self.streams.insert_readable(&priority_key);
5677 }
5678
5679 Ok((read, fin))
5680 }
5681
5682 /// Writes data to a stream.
5683 ///
5684 /// On success the number of bytes written is returned, or [`Done`] if no
5685 /// data was written (e.g. because the stream has no capacity).
5686 ///
5687 /// Applications can provide a 0-length buffer with the fin flag set to
5688 /// true. This will lead to a 0-length FIN STREAM frame being sent at the
5689 /// latest offset. The `Ok(0)` value is only returned when the application
5690 /// provided a 0-length buffer.
5691 ///
5692 /// In addition, if the peer has signalled that it doesn't want to receive
5693 /// any more data from this stream by sending the `STOP_SENDING` frame, the
5694 /// [`StreamStopped`] error will be returned instead of any data.
5695 ///
5696 /// Note that in order to avoid buffering an infinite amount of data in the
5697 /// stream's send buffer, streams are only allowed to buffer outgoing data
5698 /// up to the amount that the peer allows it to send (that is, up to the
5699 /// stream's outgoing flow control capacity).
5700 ///
5701 /// This means that the number of written bytes returned can be lower than
5702 /// the length of the input buffer when the stream doesn't have enough
5703 /// capacity for the operation to complete. The application should retry the
5704 /// operation once the stream is reported as writable again.
5705 ///
5706 /// Applications should call this method only after the handshake is
5707 /// completed (whenever [`is_established()`] returns `true`) or during
5708 /// early data if enabled (whenever [`is_in_early_data()`] returns `true`).
5709 ///
5710 /// [`Done`]: enum.Error.html#variant.Done
5711 /// [`StreamStopped`]: enum.Error.html#variant.StreamStopped
5712 /// [`is_established()`]: struct.Connection.html#method.is_established
5713 /// [`is_in_early_data()`]: struct.Connection.html#method.is_in_early_data
5714 ///
5715 /// ## Examples:
5716 ///
5717 /// ```no_run
5718 /// # let mut buf = [0; 512];
5719 /// # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
5720 /// # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
5721 /// # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
5722 /// # let peer = "127.0.0.1:1234".parse().unwrap();
5723 /// # let local = "127.0.0.1:4321".parse().unwrap();
5724 /// # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
5725 /// # let stream_id = 0;
5726 /// conn.stream_send(stream_id, b"hello", true)?;
5727 /// # Ok::<(), quiche::Error>(())
5728 /// ```
5729 pub fn stream_send(
5730 &mut self, stream_id: u64, buf: &[u8], fin: bool,
5731 ) -> Result<usize> {
5732 self.stream_do_send(
5733 stream_id,
5734 buf,
5735 fin,
5736 |stream: &mut stream::Stream<F>,
5737 buf: &[u8],
5738 cap: usize,
5739 fin: bool| {
5740 stream.send.write(&buf[..cap], fin).map(|v| (v, v))
5741 },
5742 )
5743 }
5744
5745 /// Writes data to a stream with zero copying, instead, it appends the
5746 /// provided buffer directly to the send queue if the capacity allows
5747 /// it.
5748 ///
5749 /// When a partial write happens (including when [`Error::Done`] is
5750 /// returned) the remaining (unwritten) buffer will also be returned.
5751 /// The application should retry the operation once the stream is
5752 /// reported as writable again.
5753 pub fn stream_send_zc(
5754 &mut self, stream_id: u64, buf: F::Buf, len: Option<usize>, fin: bool,
5755 ) -> Result<(usize, Option<F::Buf>)>
5756 where
5757 F::Buf: BufSplit,
5758 {
5759 self.stream_do_send(
5760 stream_id,
5761 buf,
5762 fin,
5763 |stream: &mut stream::Stream<F>,
5764 buf: F::Buf,
5765 cap: usize,
5766 fin: bool| {
5767 let len = len.unwrap_or(usize::MAX).min(cap);
5768 let (sent, remaining) = stream.send.append_buf(buf, len, fin)?;
5769 Ok((sent, (sent, remaining)))
5770 },
5771 )
5772 }
5773
5774 fn stream_do_send<B, R, SND>(
5775 &mut self, stream_id: u64, buf: B, fin: bool, write_fn: SND,
5776 ) -> Result<R>
5777 where
5778 B: AsRef<[u8]>,
5779 SND: FnOnce(&mut stream::Stream<F>, B, usize, bool) -> Result<(usize, R)>,
5780 {
5781 // We can't write on the peer's unidirectional streams.
5782 if !stream::is_bidi(stream_id) &&
5783 !stream::is_local(stream_id, self.is_server)
5784 {
5785 return Err(Error::InvalidStreamState(stream_id));
5786 }
5787
5788 let len = buf.as_ref().len();
5789
5790 // Mark the connection as blocked if the connection-level flow control
5791 // limit doesn't let us buffer all the data.
5792 //
5793 // Note that this is separate from "send capacity" as that also takes
5794 // congestion control into consideration.
5795 if self.max_tx_data - self.tx_data < len as u64 {
5796 self.blocked_limit = Some(self.max_tx_data);
5797 }
5798
5799 let cap = self.tx_cap;
5800
5801 // Get existing stream or create a new one.
5802 let stream = match self.get_or_create_stream(stream_id, true) {
5803 Ok(v) => v,
5804
5805 Err(Error::StreamLimit) => {
5806 // If the local endpoint has exhausted the peer's stream count
5807 // limit, record the current limit so that a STREAMS_BLOCKED
5808 // frame can be sent.
5809 if self.enable_send_streams_blocked &&
5810 stream::is_local(stream_id, self.is_server)
5811 {
5812 if stream::is_bidi(stream_id) {
5813 let limit = self.streams.peer_max_streams_bidi();
5814 self.streams_blocked_bidi_state.update_at(limit);
5815 } else {
5816 let limit = self.streams.peer_max_streams_uni();
5817 self.streams_blocked_uni_state.update_at(limit);
5818 }
5819 }
5820
5821 return Err(Error::StreamLimit);
5822 },
5823
5824 Err(e) => return Err(e),
5825 };
5826
5827 #[cfg(feature = "qlog")]
5828 let offset = stream.send.off_back();
5829
5830 let was_writable = stream.is_writable();
5831
5832 let was_flushable = stream.is_flushable();
5833
5834 let is_complete = stream.is_complete();
5835 let is_readable = stream.is_readable();
5836
5837 let priority_key = Arc::clone(&stream.priority_key);
5838
5839 // Return early if the stream has been stopped, and collect its state
5840 // if complete.
5841 if let Err(Error::StreamStopped(e)) = stream.send.cap() {
5842 // Only collect the stream if it is complete and not readable.
5843 // If it is readable, it will get collected when stream_recv()
5844 // is used.
5845 //
5846 // The stream can't be writable if it has been stopped.
5847 if is_complete && !is_readable {
5848 let local = stream.local;
5849 self.streams.collect(stream_id, local);
5850 }
5851
5852 return Err(Error::StreamStopped(e));
5853 };
5854
5855 // Truncate the input buffer based on the connection's send capacity if
5856 // necessary.
5857 //
5858 // When the cap is zero, the method returns Ok(0) *only* when the passed
5859 // buffer is empty. We return Error::Done otherwise.
5860 if cap == 0 && len > 0 {
5861 if was_writable {
5862 // When `stream_writable_next()` returns a stream, the writable
5863 // mark is removed, but because the stream is blocked by the
5864 // connection-level send capacity it won't be marked as writable
5865 // again once the capacity increases.
5866 //
5867 // Since the stream is writable already, mark it here instead.
5868 self.streams.insert_writable(&priority_key);
5869 }
5870
5871 return Err(Error::Done);
5872 }
5873
5874 let (cap, fin, blocked_by_cap) = if cap < len {
5875 (cap, false, true)
5876 } else {
5877 (len, fin, false)
5878 };
5879
5880 let (sent, ret) = match write_fn(stream, buf, cap, fin) {
5881 Ok(v) => v,
5882
5883 Err(e) => {
5884 self.streams.remove_writable(&priority_key);
5885 return Err(e);
5886 },
5887 };
5888
5889 let incremental = stream.incremental;
5890 let priority_key = Arc::clone(&stream.priority_key);
5891
5892 let flushable = stream.is_flushable();
5893
5894 let writable = stream.is_writable();
5895
5896 let empty_fin = len == 0 && fin;
5897
5898 if sent < cap {
5899 let max_off = stream.send.max_off();
5900
5901 if stream.send.blocked_at() != Some(max_off) {
5902 stream.send.update_blocked_at(Some(max_off));
5903 self.streams.insert_blocked(stream_id, max_off);
5904 }
5905 } else {
5906 stream.send.update_blocked_at(None);
5907 self.streams.remove_blocked(stream_id);
5908 }
5909
5910 // If the stream is now flushable push it to the flushable queue, but
5911 // only if it wasn't already queued.
5912 //
5913 // Consider the stream flushable also when we are sending a zero-length
5914 // frame that has the fin flag set.
5915 if (flushable || empty_fin) && !was_flushable {
5916 self.streams.insert_flushable(&priority_key);
5917 }
5918
5919 if !writable {
5920 self.streams.remove_writable(&priority_key);
5921 } else if was_writable && blocked_by_cap {
5922 // When `stream_writable_next()` returns a stream, the writable
5923 // mark is removed, but because the stream is blocked by the
5924 // connection-level send capacity it won't be marked as writable
5925 // again once the capacity increases.
5926 //
5927 // Since the stream is writable already, mark it here instead.
5928 self.streams.insert_writable(&priority_key);
5929 }
5930
5931 self.tx_cap -= sent;
5932
5933 self.tx_data += sent as u64;
5934
5935 self.tx_buffered += sent;
5936 self.check_tx_buffered_invariant();
5937
5938 qlog_with_type!(QLOG_DATA_MV, self.qlog, q, {
5939 let ev_data = EventData::DataMoved(qlog::events::quic::DataMoved {
5940 stream_id: Some(stream_id),
5941 offset: Some(offset),
5942 length: Some(sent as u64),
5943 from: Some(DataRecipient::Application),
5944 to: Some(DataRecipient::Transport),
5945 ..Default::default()
5946 });
5947
5948 let now = Instant::now();
5949 q.add_event_data_with_instant(ev_data, now).ok();
5950 });
5951
5952 if sent == 0 && cap > 0 {
5953 return Err(Error::Done);
5954 }
5955
5956 if incremental && writable {
5957 // Shuffle the incremental stream to the back of the queue.
5958 self.streams.remove_writable(&priority_key);
5959 self.streams.insert_writable(&priority_key);
5960 }
5961
5962 Ok(ret)
5963 }
5964
5965 /// Sets the priority for a stream.
5966 ///
5967 /// A stream's priority determines the order in which stream data is sent
5968 /// on the wire (streams with lower priority are sent first). Streams are
5969 /// created with a default priority of `127`.
5970 ///
5971 /// The target stream is created if it did not exist before calling this
5972 /// method.
5973 pub fn stream_priority(
5974 &mut self, stream_id: u64, urgency: u8, incremental: bool,
5975 ) -> Result<()> {
5976 // Get existing stream or create a new one, but if the stream
5977 // has already been closed and collected, ignore the prioritization.
5978 let stream = match self.get_or_create_stream(stream_id, true) {
5979 Ok(v) => v,
5980
5981 Err(Error::Done) => return Ok(()),
5982
5983 Err(e) => return Err(e),
5984 };
5985
5986 if stream.urgency == urgency && stream.incremental == incremental {
5987 return Ok(());
5988 }
5989
5990 stream.urgency = urgency;
5991 stream.incremental = incremental;
5992
5993 let new_priority_key = Arc::new(StreamPriorityKey {
5994 urgency: stream.urgency,
5995 incremental: stream.incremental,
5996 id: stream_id,
5997 ..Default::default()
5998 });
5999
6000 let old_priority_key =
6001 std::mem::replace(&mut stream.priority_key, new_priority_key.clone());
6002
6003 self.streams
6004 .update_priority(&old_priority_key, &new_priority_key);
6005
6006 Ok(())
6007 }
6008
6009 /// Shuts down reading or writing from/to the specified stream.
6010 ///
6011 /// When the `direction` argument is set to [`Shutdown::Read`], outstanding
6012 /// data in the stream's receive buffer is dropped, and no additional data
6013 /// is added to it. Data received after calling this method is still
6014 /// validated and acked but not stored, and [`stream_recv()`] will not
6015 /// return it to the application. In addition, a `STOP_SENDING` frame will
6016 /// be sent to the peer to signal it to stop sending data.
6017 ///
6018 /// When the `direction` argument is set to [`Shutdown::Write`], outstanding
6019 /// data in the stream's send buffer is dropped, and no additional data is
6020 /// added to it. Data passed to [`stream_send()`] after calling this method
6021 /// will be ignored. In addition, a `RESET_STREAM` frame will be sent to the
6022 /// peer to signal the reset.
6023 ///
6024 /// Locally-initiated unidirectional streams can only be closed in the
6025 /// [`Shutdown::Write`] direction. Remotely-initiated unidirectional streams
6026 /// can only be closed in the [`Shutdown::Read`] direction. Using an
6027 /// incorrect direction will return [`InvalidStreamState`].
6028 ///
6029 /// [`Shutdown::Read`]: enum.Shutdown.html#variant.Read
6030 /// [`Shutdown::Write`]: enum.Shutdown.html#variant.Write
6031 /// [`stream_recv()`]: struct.Connection.html#method.stream_recv
6032 /// [`stream_send()`]: struct.Connection.html#method.stream_send
6033 /// [`InvalidStreamState`]: enum.Error.html#variant.InvalidStreamState
6034 pub fn stream_shutdown(
6035 &mut self, stream_id: u64, direction: Shutdown, err: u64,
6036 ) -> Result<()> {
6037 // Don't try to stop a local unidirectional stream.
6038 if direction == Shutdown::Read &&
6039 stream::is_local(stream_id, self.is_server) &&
6040 !stream::is_bidi(stream_id)
6041 {
6042 return Err(Error::InvalidStreamState(stream_id));
6043 }
6044
6045 // Don't try to reset a remote unidirectional stream.
6046 if direction == Shutdown::Write &&
6047 !stream::is_local(stream_id, self.is_server) &&
6048 !stream::is_bidi(stream_id)
6049 {
6050 return Err(Error::InvalidStreamState(stream_id));
6051 }
6052
6053 // Get existing stream.
6054 let stream = self.streams.get_mut(stream_id).ok_or(Error::Done)?;
6055
6056 let priority_key = Arc::clone(&stream.priority_key);
6057
6058 match direction {
6059 Shutdown::Read => {
6060 let consumed = stream.recv.shutdown()?;
6061 self.flow_control.add_consumed(consumed);
6062
6063 if !stream.recv.is_fin() {
6064 self.streams.insert_stopped(stream_id, err);
6065 }
6066
6067 // Once shutdown, the stream is guaranteed to be non-readable.
6068 self.streams.remove_readable(&priority_key);
6069
6070 self.stopped_stream_local_count =
6071 self.stopped_stream_local_count.saturating_add(1);
6072 },
6073
6074 Shutdown::Write => {
6075 let (final_size, unsent) = stream.send.shutdown()?;
6076
6077 // Claw back some flow control allowance from data that was
6078 // buffered but not actually sent before the stream was reset.
6079 self.tx_data = self.tx_data.saturating_sub(unsent);
6080
6081 self.tx_buffered =
6082 self.tx_buffered.saturating_sub(unsent as usize);
6083
6084 // These drops in qlog are a bit weird, but the only way to ensure
6085 // that all bytes that are moved from App to Transport in
6086 // stream_do_send are eventually moved from Transport to Dropped.
6087 // Ideally we would add a Transport to Network transition also as
6088 // a way to indicate when bytes were transmitted vs dropped
6089 // without ever being sent.
6090 qlog_with_type!(QLOG_DATA_MV, self.qlog, q, {
6091 let ev_data =
6092 EventData::DataMoved(qlog::events::quic::DataMoved {
6093 stream_id: Some(stream_id),
6094 offset: Some(final_size),
6095 length: Some(unsent),
6096 from: Some(DataRecipient::Transport),
6097 to: Some(DataRecipient::Dropped),
6098 ..Default::default()
6099 });
6100
6101 q.add_event_data_with_instant(ev_data, Instant::now()).ok();
6102 });
6103
6104 // Update send capacity.
6105 self.update_tx_cap();
6106
6107 self.streams.insert_reset(stream_id, err, final_size);
6108
6109 // Once shutdown, the stream is guaranteed to be non-writable.
6110 self.streams.remove_writable(&priority_key);
6111
6112 self.reset_stream_local_count =
6113 self.reset_stream_local_count.saturating_add(1);
6114 },
6115 }
6116
6117 Ok(())
6118 }
6119
6120 /// Returns the stream's send capacity in bytes.
6121 ///
6122 /// If the specified stream doesn't exist (including when it has already
6123 /// been completed and closed), the [`InvalidStreamState`] error will be
6124 /// returned.
6125 ///
6126 /// In addition, if the peer has signalled that it doesn't want to receive
6127 /// any more data from this stream by sending the `STOP_SENDING` frame, the
6128 /// [`StreamStopped`] error will be returned.
6129 ///
6130 /// [`InvalidStreamState`]: enum.Error.html#variant.InvalidStreamState
6131 /// [`StreamStopped`]: enum.Error.html#variant.StreamStopped
6132 #[inline]
6133 pub fn stream_capacity(&mut self, stream_id: u64) -> Result<usize> {
6134 if let Some(stream) = self.streams.get(stream_id) {
6135 let stream_cap = match stream.send.cap() {
6136 Ok(v) => v,
6137
6138 Err(Error::StreamStopped(e)) => {
6139 // Only collect the stream if it is complete and not
6140 // readable. If it is readable, it will get collected when
6141 // stream_recv() is used.
6142 if stream.is_complete() && !stream.is_readable() {
6143 let local = stream.local;
6144 self.streams.collect(stream_id, local);
6145 }
6146
6147 return Err(Error::StreamStopped(e));
6148 },
6149
6150 Err(e) => return Err(e),
6151 };
6152
6153 let cap = cmp::min(self.tx_cap, stream_cap);
6154 return Ok(cap);
6155 };
6156
6157 Err(Error::InvalidStreamState(stream_id))
6158 }
6159
6160 /// Returns the next stream that has data to read.
6161 ///
6162 /// Note that once returned by this method, a stream ID will not be returned
6163 /// again until it is "re-armed".
6164 ///
6165 /// The application will need to read all of the pending data on the stream,
6166 /// and new data has to be received before the stream is reported again.
6167 ///
6168 /// This is unlike the [`readable()`] method, that returns the same list of
6169 /// readable streams when called multiple times in succession.
6170 ///
6171 /// [`readable()`]: struct.Connection.html#method.readable
6172 pub fn stream_readable_next(&mut self) -> Option<u64> {
6173 let priority_key = self.streams.readable.front().clone_pointer()?;
6174
6175 self.streams.remove_readable(&priority_key);
6176
6177 Some(priority_key.id)
6178 }
6179
6180 /// Returns true if the stream has data that can be read.
6181 pub fn stream_readable(&self, stream_id: u64) -> bool {
6182 let stream = match self.streams.get(stream_id) {
6183 Some(v) => v,
6184
6185 None => return false,
6186 };
6187
6188 stream.is_readable()
6189 }
6190
6191 /// Returns the next stream that can be written to.
6192 ///
6193 /// Note that once returned by this method, a stream ID will not be returned
6194 /// again until it is "re-armed".
6195 ///
6196 /// This is unlike the [`writable()`] method, that returns the same list of
6197 /// writable streams when called multiple times in succession. It is not
6198 /// advised to use both `stream_writable_next()` and [`writable()`] on the
6199 /// same connection, as it may lead to unexpected results.
6200 ///
6201 /// The [`stream_writable()`] method can also be used to fine-tune when a
6202 /// stream is reported as writable again.
6203 ///
6204 /// [`stream_writable()`]: struct.Connection.html#method.stream_writable
6205 /// [`writable()`]: struct.Connection.html#method.writable
6206 pub fn stream_writable_next(&mut self) -> Option<u64> {
6207 // If there is not enough connection-level send capacity, none of the
6208 // streams are writable.
6209 if self.tx_cap == 0 {
6210 return None;
6211 }
6212
6213 let mut cursor = self.streams.writable.front();
6214
6215 while let Some(priority_key) = cursor.clone_pointer() {
6216 if let Some(stream) = self.streams.get(priority_key.id) {
6217 let cap = match stream.send.cap() {
6218 Ok(v) => v,
6219
6220 // Return the stream to the application immediately if it's
6221 // stopped.
6222 Err(_) =>
6223 return {
6224 self.streams.remove_writable(&priority_key);
6225
6226 Some(priority_key.id)
6227 },
6228 };
6229
6230 if cmp::min(self.tx_cap, cap) >= stream.send_lowat {
6231 self.streams.remove_writable(&priority_key);
6232 return Some(priority_key.id);
6233 }
6234 }
6235
6236 cursor.move_next();
6237 }
6238
6239 None
6240 }
6241
6242 /// Returns true if the stream has enough send capacity.
6243 ///
6244 /// When `len` more bytes can be buffered into the given stream's send
6245 /// buffer, `true` will be returned, `false` otherwise.
6246 ///
6247 /// In the latter case, if the additional data can't be buffered due to
6248 /// flow control limits, the peer will also be notified, and a "low send
6249 /// watermark" will be set for the stream, such that it is not going to be
6250 /// reported as writable again by [`stream_writable_next()`] until its send
6251 /// capacity reaches `len`.
6252 ///
6253 /// If the specified stream doesn't exist (including when it has already
6254 /// been completed and closed), the [`InvalidStreamState`] error will be
6255 /// returned.
6256 ///
6257 /// In addition, if the peer has signalled that it doesn't want to receive
6258 /// any more data from this stream by sending the `STOP_SENDING` frame, the
6259 /// [`StreamStopped`] error will be returned.
6260 ///
6261 /// [`stream_writable_next()`]: struct.Connection.html#method.stream_writable_next
6262 /// [`InvalidStreamState`]: enum.Error.html#variant.InvalidStreamState
6263 /// [`StreamStopped`]: enum.Error.html#variant.StreamStopped
6264 #[inline]
6265 pub fn stream_writable(
6266 &mut self, stream_id: u64, len: usize,
6267 ) -> Result<bool> {
6268 if self.stream_capacity(stream_id)? >= len {
6269 return Ok(true);
6270 }
6271
6272 let stream = match self.streams.get_mut(stream_id) {
6273 Some(v) => v,
6274
6275 None => return Err(Error::InvalidStreamState(stream_id)),
6276 };
6277
6278 stream.send_lowat = cmp::max(1, len);
6279
6280 let is_writable = stream.is_writable();
6281
6282 let priority_key = Arc::clone(&stream.priority_key);
6283
6284 if self.max_tx_data - self.tx_data < len as u64 {
6285 self.blocked_limit = Some(self.max_tx_data);
6286 }
6287
6288 if stream.send.cap()? < len {
6289 let max_off = stream.send.max_off();
6290 if stream.send.blocked_at() != Some(max_off) {
6291 stream.send.update_blocked_at(Some(max_off));
6292 self.streams.insert_blocked(stream_id, max_off);
6293 }
6294 } else if is_writable {
6295 // When `stream_writable_next()` returns a stream, the writable
6296 // mark is removed, but because the stream is blocked by the
6297 // connection-level send capacity it won't be marked as writable
6298 // again once the capacity increases.
6299 //
6300 // Since the stream is writable already, mark it here instead.
6301 self.streams.insert_writable(&priority_key);
6302 }
6303
6304 Ok(false)
6305 }
6306
6307 /// Returns true if all the data has been read from the specified stream.
6308 ///
6309 /// This instructs the application that all the data received from the
6310 /// peer on the stream has been read, and there won't be anymore in the
6311 /// future.
6312 ///
6313 /// Basically this returns true when the peer either set the `fin` flag
6314 /// for the stream, or sent `RESET_STREAM`.
6315 #[inline]
6316 pub fn stream_finished(&self, stream_id: u64) -> bool {
6317 let stream = match self.streams.get(stream_id) {
6318 Some(v) => v,
6319
6320 None => return true,
6321 };
6322
6323 stream.recv.is_fin()
6324 }
6325
6326 /// Returns the number of bidirectional streams that can be created
6327 /// before the peer's stream count limit is reached.
6328 ///
6329 /// This can be useful to know if it's possible to create a bidirectional
6330 /// stream without trying it first.
6331 #[inline]
6332 pub fn peer_streams_left_bidi(&self) -> u64 {
6333 self.streams.peer_streams_left_bidi()
6334 }
6335
6336 /// Returns the number of unidirectional streams that can be created
6337 /// before the peer's stream count limit is reached.
6338 ///
6339 /// This can be useful to know if it's possible to create a unidirectional
6340 /// stream without trying it first.
6341 #[inline]
6342 pub fn peer_streams_left_uni(&self) -> u64 {
6343 self.streams.peer_streams_left_uni()
6344 }
6345
6346 /// Returns an iterator over streams that have outstanding data to read.
6347 ///
6348 /// Note that the iterator will only include streams that were readable at
6349 /// the time the iterator itself was created (i.e. when `readable()` was
6350 /// called). To account for newly readable streams, the iterator needs to
6351 /// be created again.
6352 ///
6353 /// ## Examples:
6354 ///
6355 /// ```no_run
6356 /// # let mut buf = [0; 512];
6357 /// # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
6358 /// # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
6359 /// # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
6360 /// # let peer = "127.0.0.1:1234".parse().unwrap();
6361 /// # let local = socket.local_addr().unwrap();
6362 /// # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
6363 /// // Iterate over readable streams.
6364 /// for stream_id in conn.readable() {
6365 /// // Stream is readable, read until there's no more data.
6366 /// while let Ok((read, fin)) = conn.stream_recv(stream_id, &mut buf) {
6367 /// println!("Got {} bytes on stream {}", read, stream_id);
6368 /// }
6369 /// }
6370 /// # Ok::<(), quiche::Error>(())
6371 /// ```
6372 #[inline]
6373 pub fn readable(&self) -> StreamIter {
6374 self.streams.readable()
6375 }
6376
6377 /// Returns an iterator over streams that can be written in priority order.
6378 ///
6379 /// The priority order is based on RFC 9218 scheduling recommendations.
6380 /// Stream priority can be controlled using [`stream_priority()`]. In order
6381 /// to support fairness requirements, each time this method is called,
6382 /// internal state is updated. Therefore the iterator ordering can change
6383 /// between calls, even if no streams were added or removed.
6384 ///
6385 /// A "writable" stream is a stream that has enough flow control capacity to
6386 /// send data to the peer. To avoid buffering an infinite amount of data,
6387 /// streams are only allowed to buffer outgoing data up to the amount that
6388 /// the peer allows to send.
6389 ///
6390 /// Note that the iterator will only include streams that were writable at
6391 /// the time the iterator itself was created (i.e. when `writable()` was
6392 /// called). To account for newly writable streams, the iterator needs to be
6393 /// created again.
6394 ///
6395 /// ## Examples:
6396 ///
6397 /// ```no_run
6398 /// # let mut buf = [0; 512];
6399 /// # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
6400 /// # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
6401 /// # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
6402 /// # let local = socket.local_addr().unwrap();
6403 /// # let peer = "127.0.0.1:1234".parse().unwrap();
6404 /// # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
6405 /// // Iterate over writable streams.
6406 /// for stream_id in conn.writable() {
6407 /// // Stream is writable, write some data.
6408 /// if let Ok(written) = conn.stream_send(stream_id, &buf, false) {
6409 /// println!("Written {} bytes on stream {}", written, stream_id);
6410 /// }
6411 /// }
6412 /// # Ok::<(), quiche::Error>(())
6413 /// ```
6414 /// [`stream_priority()`]: struct.Connection.html#method.stream_priority
6415 #[inline]
6416 pub fn writable(&self) -> StreamIter {
6417 // If there is not enough connection-level send capacity, none of the
6418 // streams are writable, so return an empty iterator.
6419 if self.tx_cap == 0 {
6420 return StreamIter::default();
6421 }
6422
6423 self.streams.writable()
6424 }
6425
6426 /// Returns the maximum possible size of egress UDP payloads.
6427 ///
6428 /// This is the maximum size of UDP payloads that can be sent, and depends
6429 /// on both the configured maximum send payload size of the local endpoint
6430 /// (as configured with [`set_max_send_udp_payload_size()`]), as well as
6431 /// the transport parameter advertised by the remote peer.
6432 ///
6433 /// Note that this value can change during the lifetime of the connection,
6434 /// but should remain stable across consecutive calls to [`send()`].
6435 ///
6436 /// [`set_max_send_udp_payload_size()`]:
6437 /// struct.Config.html#method.set_max_send_udp_payload_size
6438 /// [`send()`]: struct.Connection.html#method.send
6439 pub fn max_send_udp_payload_size(&self) -> usize {
6440 let max_datagram_size = self
6441 .paths
6442 .get_active()
6443 .ok()
6444 .map(|p| p.recovery.max_datagram_size());
6445
6446 if let Some(max_datagram_size) = max_datagram_size {
6447 if self.is_established() {
6448 // We cap the maximum packet size to 16KB or so, so that it can be
6449 // always encoded with a 2-byte varint.
6450 return cmp::min(16383, max_datagram_size);
6451 }
6452 }
6453
6454 // Allow for 1200 bytes (minimum QUIC packet size) during the
6455 // handshake.
6456 MIN_CLIENT_INITIAL_LEN
6457 }
6458
6459 /// Schedule an ack-eliciting packet on the active path.
6460 ///
6461 /// QUIC packets might not contain ack-eliciting frames during normal
6462 /// operating conditions. If the packet would already contain
6463 /// ack-eliciting frames, this method does not change any behavior.
6464 /// However, if the packet would not ordinarily contain ack-eliciting
6465 /// frames, this method ensures that a PING frame sent.
6466 ///
6467 /// Calling this method multiple times before [`send()`] has no effect.
6468 ///
6469 /// [`send()`]: struct.Connection.html#method.send
6470 pub fn send_ack_eliciting(&mut self) -> Result<()> {
6471 if self.is_closed() || self.is_draining() {
6472 return Ok(());
6473 }
6474 self.paths.get_active_mut()?.needs_ack_eliciting = true;
6475 Ok(())
6476 }
6477
6478 /// Schedule an ack-eliciting packet on the specified path.
6479 ///
6480 /// See [`send_ack_eliciting()`] for more detail. [`InvalidState`] is
6481 /// returned if there is no record of the path.
6482 ///
6483 /// [`send_ack_eliciting()`]: struct.Connection.html#method.send_ack_eliciting
6484 /// [`InvalidState`]: enum.Error.html#variant.InvalidState
6485 pub fn send_ack_eliciting_on_path(
6486 &mut self, local: SocketAddr, peer: SocketAddr,
6487 ) -> Result<()> {
6488 if self.is_closed() || self.is_draining() {
6489 return Ok(());
6490 }
6491 let path_id = self
6492 .paths
6493 .path_id_from_addrs(&(local, peer))
6494 .ok_or(Error::InvalidState)?;
6495 self.paths.get_mut(path_id)?.needs_ack_eliciting = true;
6496 Ok(())
6497 }
6498
6499 /// Reads the first received DATAGRAM.
6500 ///
6501 /// On success the DATAGRAM's data is returned along with its size.
6502 ///
6503 /// [`Done`] is returned if there is no data to read.
6504 ///
6505 /// [`BufferTooShort`] is returned if the provided buffer is too small for
6506 /// the DATAGRAM.
6507 ///
6508 /// [`Done`]: enum.Error.html#variant.Done
6509 /// [`BufferTooShort`]: enum.Error.html#variant.BufferTooShort
6510 ///
6511 /// ## Examples:
6512 ///
6513 /// ```no_run
6514 /// # let mut buf = [0; 512];
6515 /// # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
6516 /// # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
6517 /// # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
6518 /// # let peer = "127.0.0.1:1234".parse().unwrap();
6519 /// # let local = socket.local_addr().unwrap();
6520 /// # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
6521 /// let mut dgram_buf = [0; 512];
6522 /// while let Ok((len)) = conn.dgram_recv(&mut dgram_buf) {
6523 /// println!("Got {} bytes of DATAGRAM", len);
6524 /// }
6525 /// # Ok::<(), quiche::Error>(())
6526 /// ```
6527 #[inline]
6528 pub fn dgram_recv(&mut self, buf: &mut [u8]) -> Result<usize> {
6529 match self.dgram_recv_queue.pop() {
6530 Some(d) => {
6531 if d.len() > buf.len() {
6532 return Err(Error::BufferTooShort);
6533 }
6534
6535 buf[..d.len()].copy_from_slice(&d);
6536 Ok(d.len())
6537 },
6538
6539 None => Err(Error::Done),
6540 }
6541 }
6542
6543 /// Reads the first received DATAGRAM.
6544 ///
6545 /// This is the same as [`dgram_recv()`] but returns the DATAGRAM as a
6546 /// `Vec<u8>` instead of copying into the provided buffer.
6547 ///
6548 /// [`dgram_recv()`]: struct.Connection.html#method.dgram_recv
6549 #[inline]
6550 pub fn dgram_recv_vec(&mut self) -> Result<Vec<u8>> {
6551 match self.dgram_recv_queue.pop() {
6552 Some(d) => Ok(d),
6553
6554 None => Err(Error::Done),
6555 }
6556 }
6557
6558 /// Reads the first received DATAGRAM without removing it from the queue.
6559 ///
6560 /// On success the DATAGRAM's data is returned along with the actual number
6561 /// of bytes peeked. The requested length cannot exceed the DATAGRAM's
6562 /// actual length.
6563 ///
6564 /// [`Done`] is returned if there is no data to read.
6565 ///
6566 /// [`BufferTooShort`] is returned if the provided buffer is smaller the
6567 /// number of bytes to peek.
6568 ///
6569 /// [`Done`]: enum.Error.html#variant.Done
6570 /// [`BufferTooShort`]: enum.Error.html#variant.BufferTooShort
6571 #[inline]
6572 pub fn dgram_recv_peek(&self, buf: &mut [u8], len: usize) -> Result<usize> {
6573 self.dgram_recv_queue.peek_front_bytes(buf, len)
6574 }
6575
6576 /// Returns the length of the first stored DATAGRAM.
6577 #[inline]
6578 pub fn dgram_recv_front_len(&self) -> Option<usize> {
6579 self.dgram_recv_queue.peek_front_len()
6580 }
6581
6582 /// Returns the number of items in the DATAGRAM receive queue.
6583 #[inline]
6584 pub fn dgram_recv_queue_len(&self) -> usize {
6585 self.dgram_recv_queue.len()
6586 }
6587
6588 /// Returns the total size of all items in the DATAGRAM receive queue.
6589 #[inline]
6590 pub fn dgram_recv_queue_byte_size(&self) -> usize {
6591 self.dgram_recv_queue.byte_size()
6592 }
6593
6594 /// Returns the number of items in the DATAGRAM send queue.
6595 #[inline]
6596 pub fn dgram_send_queue_len(&self) -> usize {
6597 self.dgram_send_queue.len()
6598 }
6599
6600 /// Returns the total size of all items in the DATAGRAM send queue.
6601 #[inline]
6602 pub fn dgram_send_queue_byte_size(&self) -> usize {
6603 self.dgram_send_queue.byte_size()
6604 }
6605
6606 /// Returns whether or not the DATAGRAM send queue is full.
6607 #[inline]
6608 pub fn is_dgram_send_queue_full(&self) -> bool {
6609 self.dgram_send_queue.is_full()
6610 }
6611
6612 /// Returns whether or not the DATAGRAM recv queue is full.
6613 #[inline]
6614 pub fn is_dgram_recv_queue_full(&self) -> bool {
6615 self.dgram_recv_queue.is_full()
6616 }
6617
6618 /// Sends data in a DATAGRAM frame.
6619 ///
6620 /// [`Done`] is returned if no data was written.
6621 /// [`InvalidState`] is returned if the peer does not support DATAGRAM.
6622 /// [`BufferTooShort`] is returned if the DATAGRAM frame length is larger
6623 /// than peer's supported DATAGRAM frame length. Use
6624 /// [`dgram_max_writable_len()`] to get the largest supported DATAGRAM
6625 /// frame length.
6626 ///
6627 /// Note that there is no flow control of DATAGRAM frames, so in order to
6628 /// avoid buffering an infinite amount of frames we apply an internal
6629 /// limit.
6630 ///
6631 /// [`Done`]: enum.Error.html#variant.Done
6632 /// [`InvalidState`]: enum.Error.html#variant.InvalidState
6633 /// [`BufferTooShort`]: enum.Error.html#variant.BufferTooShort
6634 /// [`dgram_max_writable_len()`]:
6635 /// struct.Connection.html#method.dgram_max_writable_len
6636 ///
6637 /// ## Examples:
6638 ///
6639 /// ```no_run
6640 /// # let mut buf = [0; 512];
6641 /// # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
6642 /// # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
6643 /// # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
6644 /// # let peer = "127.0.0.1:1234".parse().unwrap();
6645 /// # let local = socket.local_addr().unwrap();
6646 /// # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
6647 /// conn.dgram_send(b"hello")?;
6648 /// # Ok::<(), quiche::Error>(())
6649 /// ```
6650 pub fn dgram_send(&mut self, buf: &[u8]) -> Result<()> {
6651 let max_payload_len = match self.dgram_max_writable_len() {
6652 Some(v) => v,
6653
6654 None => return Err(Error::InvalidState),
6655 };
6656
6657 if buf.len() > max_payload_len {
6658 return Err(Error::BufferTooShort);
6659 }
6660
6661 self.dgram_send_queue.push(buf.to_vec())?;
6662
6663 let active_path = self.paths.get_active_mut()?;
6664
6665 if self.dgram_send_queue.byte_size() >
6666 active_path.recovery.cwnd_available()
6667 {
6668 active_path.recovery.update_app_limited(false);
6669 }
6670
6671 Ok(())
6672 }
6673
6674 /// Sends data in a DATAGRAM frame.
6675 ///
6676 /// This is the same as [`dgram_send()`] but takes a `Vec<u8>` instead of
6677 /// a slice.
6678 ///
6679 /// [`dgram_send()`]: struct.Connection.html#method.dgram_send
6680 pub fn dgram_send_vec(&mut self, buf: Vec<u8>) -> Result<()> {
6681 let max_payload_len = match self.dgram_max_writable_len() {
6682 Some(v) => v,
6683
6684 None => return Err(Error::InvalidState),
6685 };
6686
6687 if buf.len() > max_payload_len {
6688 return Err(Error::BufferTooShort);
6689 }
6690
6691 self.dgram_send_queue.push(buf)?;
6692
6693 let active_path = self.paths.get_active_mut()?;
6694
6695 if self.dgram_send_queue.byte_size() >
6696 active_path.recovery.cwnd_available()
6697 {
6698 active_path.recovery.update_app_limited(false);
6699 }
6700
6701 Ok(())
6702 }
6703
6704 /// Purges queued outgoing DATAGRAMs matching the predicate.
6705 ///
6706 /// In other words, remove all elements `e` such that `f(&e)` returns true.
6707 ///
6708 /// ## Examples:
6709 /// ```no_run
6710 /// # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
6711 /// # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
6712 /// # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
6713 /// # let peer = "127.0.0.1:1234".parse().unwrap();
6714 /// # let local = socket.local_addr().unwrap();
6715 /// # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
6716 /// conn.dgram_send(b"hello")?;
6717 /// conn.dgram_purge_outgoing(&|d: &[u8]| -> bool { d[0] == 0 });
6718 /// # Ok::<(), quiche::Error>(())
6719 /// ```
6720 #[inline]
6721 pub fn dgram_purge_outgoing<FN: Fn(&[u8]) -> bool>(&mut self, f: FN) {
6722 self.dgram_send_queue.purge(f);
6723 }
6724
6725 /// Returns the maximum DATAGRAM payload that can be sent.
6726 ///
6727 /// [`None`] is returned if the peer hasn't advertised a maximum DATAGRAM
6728 /// frame size.
6729 ///
6730 /// ## Examples:
6731 ///
6732 /// ```no_run
6733 /// # let mut buf = [0; 512];
6734 /// # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
6735 /// # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
6736 /// # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
6737 /// # let peer = "127.0.0.1:1234".parse().unwrap();
6738 /// # let local = socket.local_addr().unwrap();
6739 /// # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
6740 /// if let Some(payload_size) = conn.dgram_max_writable_len() {
6741 /// if payload_size > 5 {
6742 /// conn.dgram_send(b"hello")?;
6743 /// }
6744 /// }
6745 /// # Ok::<(), quiche::Error>(())
6746 /// ```
6747 #[inline]
6748 pub fn dgram_max_writable_len(&self) -> Option<usize> {
6749 match self.peer_transport_params.max_datagram_frame_size {
6750 None => None,
6751 Some(peer_frame_len) => {
6752 let dcid = self.destination_id();
6753 // Start from the maximum packet size...
6754 let mut max_len = self.max_send_udp_payload_size();
6755 // ...subtract the Short packet header overhead...
6756 // (1 byte of pkt_len + len of dcid)
6757 max_len = max_len.saturating_sub(1 + dcid.len());
6758 // ...subtract the packet number (max len)...
6759 max_len = max_len.saturating_sub(packet::MAX_PKT_NUM_LEN);
6760 // ...subtract the crypto overhead...
6761 max_len = max_len.saturating_sub(
6762 self.crypto_ctx[packet::Epoch::Application]
6763 .crypto_overhead()?,
6764 );
6765 // ...clamp to what peer can support...
6766 max_len = cmp::min(peer_frame_len as usize, max_len);
6767 // ...subtract frame overhead, checked for underflow.
6768 // (1 byte of frame type + len of length )
6769 max_len.checked_sub(1 + frame::MAX_DGRAM_OVERHEAD)
6770 },
6771 }
6772 }
6773
6774 fn dgram_enabled(&self) -> bool {
6775 self.local_transport_params
6776 .max_datagram_frame_size
6777 .is_some()
6778 }
6779
6780 /// Returns when the next timeout event will occur.
6781 ///
6782 /// Once the timeout Instant has been reached, the [`on_timeout()`] method
6783 /// should be called. A timeout of `None` means that the timer should be
6784 /// disarmed.
6785 ///
6786 /// [`on_timeout()`]: struct.Connection.html#method.on_timeout
6787 pub fn timeout_instant(&self) -> Option<Instant> {
6788 if self.is_closed() {
6789 return None;
6790 }
6791
6792 if self.is_draining() {
6793 // Draining timer takes precedence over all other timers. If it is
6794 // set it means the connection is closing so there's no point in
6795 // processing the other timers.
6796 self.draining_timer
6797 } else {
6798 // Use the lowest timer value (i.e. "sooner") among idle and loss
6799 // detection timers. If they are both unset (i.e. `None`) then the
6800 // result is `None`, but if at least one of them is set then a
6801 // `Some(...)` value is returned.
6802 let path_timer = self
6803 .paths
6804 .iter()
6805 .filter_map(|(_, p)| p.recovery.loss_detection_timer())
6806 .min();
6807
6808 let key_update_timer = self.crypto_ctx[packet::Epoch::Application]
6809 .key_update
6810 .as_ref()
6811 .map(|key_update| key_update.timer);
6812
6813 let timers = [self.idle_timer, path_timer, key_update_timer];
6814
6815 timers.iter().filter_map(|&x| x).min()
6816 }
6817 }
6818
6819 /// Returns the amount of time until the next timeout event.
6820 ///
6821 /// Once the given duration has elapsed, the [`on_timeout()`] method should
6822 /// be called. A timeout of `None` means that the timer should be disarmed.
6823 ///
6824 /// [`on_timeout()`]: struct.Connection.html#method.on_timeout
6825 pub fn timeout(&self) -> Option<Duration> {
6826 self.timeout_instant().map(|timeout| {
6827 let now = Instant::now();
6828
6829 if timeout <= now {
6830 Duration::ZERO
6831 } else {
6832 timeout.duration_since(now)
6833 }
6834 })
6835 }
6836
6837 /// Processes a timeout event.
6838 ///
6839 /// If no timeout has occurred it does nothing.
6840 pub fn on_timeout(&mut self) {
6841 let now = Instant::now();
6842
6843 if let Some(draining_timer) = self.draining_timer {
6844 if draining_timer <= now {
6845 trace!("{} draining timeout expired", self.trace_id);
6846
6847 self.mark_closed();
6848 }
6849
6850 // Draining timer takes precedence over all other timers. If it is
6851 // set it means the connection is closing so there's no point in
6852 // processing the other timers.
6853 return;
6854 }
6855
6856 if let Some(timer) = self.idle_timer {
6857 if timer <= now {
6858 trace!("{} idle timeout expired", self.trace_id);
6859
6860 self.mark_closed();
6861 self.timed_out = true;
6862 return;
6863 }
6864 }
6865
6866 if let Some(timer) = self.crypto_ctx[packet::Epoch::Application]
6867 .key_update
6868 .as_ref()
6869 .map(|key_update| key_update.timer)
6870 {
6871 if timer <= now {
6872 // Discard previous key once key update timer expired.
6873 let _ = self.crypto_ctx[packet::Epoch::Application]
6874 .key_update
6875 .take();
6876 }
6877 }
6878
6879 let handshake_status = self.handshake_status();
6880
6881 for (_, p) in self.paths.iter_mut() {
6882 if let Some(timer) = p.recovery.loss_detection_timer() {
6883 if timer <= now {
6884 trace!("{} loss detection timeout expired", self.trace_id);
6885
6886 let OnLossDetectionTimeoutOutcome {
6887 lost_packets,
6888 lost_bytes,
6889 } = p.on_loss_detection_timeout(
6890 handshake_status,
6891 now,
6892 self.is_server,
6893 &self.trace_id,
6894 );
6895
6896 self.lost_count += lost_packets;
6897 self.lost_bytes += lost_bytes as u64;
6898
6899 qlog_with_type!(QLOG_METRICS, self.qlog, q, {
6900 p.recovery.maybe_qlog(q, now);
6901 });
6902 }
6903 }
6904 }
6905
6906 // Notify timeout events to the application.
6907 self.paths.notify_failed_validations();
6908
6909 // If the active path failed, try to find a new candidate.
6910 if self.paths.get_active_path_id().is_err() {
6911 match self.paths.find_candidate_path() {
6912 Some(pid) => {
6913 if self.set_active_path(pid, now).is_err() {
6914 // The connection cannot continue.
6915 self.mark_closed();
6916 }
6917 },
6918
6919 // The connection cannot continue.
6920 None => {
6921 self.mark_closed();
6922 },
6923 }
6924 }
6925 }
6926
6927 /// Requests the stack to perform path validation of the proposed 4-tuple.
6928 ///
6929 /// Probing new paths requires spare Connection IDs at both the host and the
6930 /// peer sides. If it is not the case, it raises an [`OutOfIdentifiers`].
6931 ///
6932 /// The probing of new addresses can only be done by the client. The server
6933 /// can only probe network paths that were previously advertised by
6934 /// [`PathEvent::New`]. If the server tries to probe such an unseen network
6935 /// path, this call raises an [`InvalidState`].
6936 ///
6937 /// The caller might also want to probe an existing path. In such case, it
6938 /// triggers a PATH_CHALLENGE frame, but it does not require spare CIDs.
6939 ///
6940 /// A server always probes a new path it observes. Calling this method is
6941 /// hence not required to validate a new path. However, a server can still
6942 /// request an additional path validation of the proposed 4-tuple.
6943 ///
6944 /// Calling this method several times before calling [`send()`] or
6945 /// [`send_on_path()`] results in a single probe being generated. An
6946 /// application wanting to send multiple in-flight probes must call this
6947 /// method again after having sent packets.
6948 ///
6949 /// Returns the Destination Connection ID sequence number associated to that
6950 /// path.
6951 ///
6952 /// [`PathEvent::New`]: enum.PathEvent.html#variant.New
6953 /// [`OutOfIdentifiers`]: enum.Error.html#OutOfIdentifiers
6954 /// [`InvalidState`]: enum.Error.html#InvalidState
6955 /// [`send()`]: struct.Connection.html#method.send
6956 /// [`send_on_path()`]: struct.Connection.html#method.send_on_path
6957 pub fn probe_path(
6958 &mut self, local_addr: SocketAddr, peer_addr: SocketAddr,
6959 ) -> Result<u64> {
6960 // We may want to probe an existing path.
6961 let pid = match self.paths.path_id_from_addrs(&(local_addr, peer_addr)) {
6962 Some(pid) => pid,
6963 None => self.create_path_on_client(local_addr, peer_addr)?,
6964 };
6965
6966 let path = self.paths.get_mut(pid)?;
6967 path.request_validation();
6968
6969 path.active_dcid_seq.ok_or(Error::InvalidState)
6970 }
6971
6972 /// Migrates the connection to a new local address `local_addr`.
6973 ///
6974 /// The behavior is similar to [`migrate()`], with the nuance that the
6975 /// connection only changes the local address, but not the peer one.
6976 ///
6977 /// See [`migrate()`] for the full specification of this method.
6978 ///
6979 /// [`migrate()`]: struct.Connection.html#method.migrate
6980 pub fn migrate_source(&mut self, local_addr: SocketAddr) -> Result<u64> {
6981 let peer_addr = self.paths.get_active()?.peer_addr();
6982 self.migrate(local_addr, peer_addr)
6983 }
6984
6985 /// Migrates the connection over the given network path between `local_addr`
6986 /// and `peer_addr`.
6987 ///
6988 /// Connection migration can only be initiated by the client. Calling this
6989 /// method as a server returns [`InvalidState`].
6990 ///
6991 /// To initiate voluntary migration, there should be enough Connection IDs
6992 /// at both sides. If this requirement is not satisfied, this call returns
6993 /// [`OutOfIdentifiers`].
6994 ///
6995 /// Returns the Destination Connection ID associated to that migrated path.
6996 ///
6997 /// [`OutOfIdentifiers`]: enum.Error.html#OutOfIdentifiers
6998 /// [`InvalidState`]: enum.Error.html#InvalidState
6999 pub fn migrate(
7000 &mut self, local_addr: SocketAddr, peer_addr: SocketAddr,
7001 ) -> Result<u64> {
7002 if self.is_server {
7003 return Err(Error::InvalidState);
7004 }
7005
7006 // If the path already exists, mark it as the active one.
7007 let (pid, dcid_seq) = if let Some(pid) =
7008 self.paths.path_id_from_addrs(&(local_addr, peer_addr))
7009 {
7010 let path = self.paths.get_mut(pid)?;
7011
7012 // If it is already active, do nothing.
7013 if path.active() {
7014 return path.active_dcid_seq.ok_or(Error::OutOfIdentifiers);
7015 }
7016
7017 // Ensures that a Source Connection ID has been dedicated to this
7018 // path, or a free one is available. This is only required if the
7019 // host uses non-zero length Source Connection IDs.
7020 if !self.ids.zero_length_scid() &&
7021 path.active_scid_seq.is_none() &&
7022 self.ids.available_scids() == 0
7023 {
7024 return Err(Error::OutOfIdentifiers);
7025 }
7026
7027 // Ensures that the migrated path has a Destination Connection ID.
7028 let dcid_seq = if let Some(dcid_seq) = path.active_dcid_seq {
7029 dcid_seq
7030 } else {
7031 let dcid_seq = self
7032 .ids
7033 .lowest_available_dcid_seq()
7034 .ok_or(Error::OutOfIdentifiers)?;
7035
7036 self.ids.link_dcid_to_path_id(dcid_seq, pid)?;
7037 path.active_dcid_seq = Some(dcid_seq);
7038
7039 dcid_seq
7040 };
7041
7042 (pid, dcid_seq)
7043 } else {
7044 let pid = self.create_path_on_client(local_addr, peer_addr)?;
7045
7046 let dcid_seq = self
7047 .paths
7048 .get(pid)?
7049 .active_dcid_seq
7050 .ok_or(Error::InvalidState)?;
7051
7052 (pid, dcid_seq)
7053 };
7054
7055 // Change the active path.
7056 self.set_active_path(pid, Instant::now())?;
7057
7058 Ok(dcid_seq)
7059 }
7060
7061 /// Provides additional source Connection IDs that the peer can use to reach
7062 /// this host.
7063 ///
7064 /// This triggers sending NEW_CONNECTION_ID frames if the provided Source
7065 /// Connection ID is not already present. In the case the caller tries to
7066 /// reuse a Connection ID with a different reset token, this raises an
7067 /// `InvalidState`.
7068 ///
7069 /// At any time, the peer cannot have more Destination Connection IDs than
7070 /// the maximum number of active Connection IDs it negotiated. In such case
7071 /// (i.e., when [`scids_left()`] returns 0), if the host agrees to
7072 /// request the removal of previous connection IDs, it sets the
7073 /// `retire_if_needed` parameter. Otherwise, an [`IdLimit`] is returned.
7074 ///
7075 /// Note that setting `retire_if_needed` does not prevent this function from
7076 /// returning an [`IdLimit`] in the case the caller wants to retire still
7077 /// unannounced Connection IDs.
7078 ///
7079 /// The caller is responsible for ensuring that the provided `scid` is not
7080 /// repeated several times over the connection. quiche ensures that as long
7081 /// as the provided Connection ID is still in use (i.e., not retired), it
7082 /// does not assign a different sequence number.
7083 ///
7084 /// Note that if the host uses zero-length Source Connection IDs, it cannot
7085 /// advertise Source Connection IDs and calling this method returns an
7086 /// [`InvalidState`].
7087 ///
7088 /// Returns the sequence number associated to the provided Connection ID.
7089 ///
7090 /// [`scids_left()`]: struct.Connection.html#method.scids_left
7091 /// [`IdLimit`]: enum.Error.html#IdLimit
7092 /// [`InvalidState`]: enum.Error.html#InvalidState
7093 pub fn new_scid(
7094 &mut self, scid: &ConnectionId, reset_token: u128, retire_if_needed: bool,
7095 ) -> Result<u64> {
7096 self.ids.new_scid(
7097 scid.to_vec().into(),
7098 Some(reset_token),
7099 true,
7100 None,
7101 retire_if_needed,
7102 )
7103 }
7104
7105 /// Returns the number of source Connection IDs that are active. This is
7106 /// only meaningful if the host uses non-zero length Source Connection IDs.
7107 pub fn active_scids(&self) -> usize {
7108 self.ids.active_source_cids()
7109 }
7110
7111 /// Returns the number of source Connection IDs that should be provided
7112 /// to the peer without exceeding the limit it advertised.
7113 ///
7114 /// This will automatically limit the number of Connection IDs to the
7115 /// minimum between the locally configured active connection ID limit,
7116 /// and the one sent by the peer.
7117 ///
7118 /// To obtain the maximum possible value allowed by the peer an application
7119 /// can instead inspect the [`peer_active_conn_id_limit`] value.
7120 ///
7121 /// [`peer_active_conn_id_limit`]: struct.Stats.html#structfield.peer_active_conn_id_limit
7122 #[inline]
7123 pub fn scids_left(&self) -> usize {
7124 let max_active_source_cids = cmp::min(
7125 self.peer_transport_params.active_conn_id_limit,
7126 self.local_transport_params.active_conn_id_limit,
7127 ) as usize;
7128
7129 max_active_source_cids - self.active_scids()
7130 }
7131
7132 /// Requests the retirement of the destination Connection ID used by the
7133 /// host to reach its peer.
7134 ///
7135 /// This triggers sending RETIRE_CONNECTION_ID frames.
7136 ///
7137 /// If the application tries to retire a non-existing Destination Connection
7138 /// ID sequence number, or if it uses zero-length Destination Connection ID,
7139 /// this method returns an [`InvalidState`].
7140 ///
7141 /// At any time, the host must have at least one Destination ID. If the
7142 /// application tries to retire the last one, or if the caller tries to
7143 /// retire the destination Connection ID used by the current active path
7144 /// while having neither spare Destination Connection IDs nor validated
7145 /// network paths, this method returns an [`OutOfIdentifiers`]. This
7146 /// behavior prevents the caller from stalling the connection due to the
7147 /// lack of validated path to send non-probing packets.
7148 ///
7149 /// [`InvalidState`]: enum.Error.html#InvalidState
7150 /// [`OutOfIdentifiers`]: enum.Error.html#OutOfIdentifiers
7151 pub fn retire_dcid(&mut self, dcid_seq: u64) -> Result<()> {
7152 if self.ids.zero_length_dcid() {
7153 return Err(Error::InvalidState);
7154 }
7155
7156 let active_path_dcid_seq = self
7157 .paths
7158 .get_active()?
7159 .active_dcid_seq
7160 .ok_or(Error::InvalidState)?;
7161
7162 let active_path_id = self.paths.get_active_path_id()?;
7163
7164 if active_path_dcid_seq == dcid_seq &&
7165 self.ids.lowest_available_dcid_seq().is_none() &&
7166 !self
7167 .paths
7168 .iter()
7169 .any(|(pid, p)| pid != active_path_id && p.usable())
7170 {
7171 return Err(Error::OutOfIdentifiers);
7172 }
7173
7174 if let Some(pid) = self.ids.retire_dcid(dcid_seq)? {
7175 // The retired Destination CID was associated to a given path. Let's
7176 // find an available DCID to associate to that path.
7177 let path = self.paths.get_mut(pid)?;
7178 let dcid_seq = self.ids.lowest_available_dcid_seq();
7179
7180 if let Some(dcid_seq) = dcid_seq {
7181 self.ids.link_dcid_to_path_id(dcid_seq, pid)?;
7182 }
7183
7184 path.active_dcid_seq = dcid_seq;
7185 }
7186
7187 Ok(())
7188 }
7189
7190 /// Processes path-specific events.
7191 ///
7192 /// On success it returns a [`PathEvent`], or `None` when there are no
7193 /// events to report. Please refer to [`PathEvent`] for the exhaustive event
7194 /// list.
7195 ///
7196 /// Note that all events are edge-triggered, meaning that once reported they
7197 /// will not be reported again by calling this method again, until the event
7198 /// is re-armed.
7199 ///
7200 /// [`PathEvent`]: enum.PathEvent.html
7201 pub fn path_event_next(&mut self) -> Option<PathEvent> {
7202 self.paths.pop_event()
7203 }
7204
7205 /// Returns the number of source Connection IDs that are retired.
7206 pub fn retired_scids(&self) -> usize {
7207 self.ids.retired_source_cids()
7208 }
7209
7210 /// Returns a source `ConnectionId` that has been retired.
7211 ///
7212 /// On success it returns a [`ConnectionId`], or `None` when there are no
7213 /// more retired connection IDs.
7214 ///
7215 /// [`ConnectionId`]: struct.ConnectionId.html
7216 pub fn retired_scid_next(&mut self) -> Option<ConnectionId<'static>> {
7217 self.ids.pop_retired_scid()
7218 }
7219
7220 /// Returns the number of spare Destination Connection IDs, i.e.,
7221 /// Destination Connection IDs that are still unused.
7222 ///
7223 /// Note that this function returns 0 if the host uses zero length
7224 /// Destination Connection IDs.
7225 pub fn available_dcids(&self) -> usize {
7226 self.ids.available_dcids()
7227 }
7228
7229 /// Returns an iterator over destination `SockAddr`s whose association
7230 /// with `from` forms a known QUIC path on which packets can be sent to.
7231 ///
7232 /// This function is typically used in combination with [`send_on_path()`].
7233 ///
7234 /// Note that the iterator includes all the possible combination of
7235 /// destination `SockAddr`s, even those whose sending is not required now.
7236 /// In other words, this is another way for the application to recall from
7237 /// past [`PathEvent::New`] events.
7238 ///
7239 /// [`PathEvent::New`]: enum.PathEvent.html#variant.New
7240 /// [`send_on_path()`]: struct.Connection.html#method.send_on_path
7241 ///
7242 /// ## Examples:
7243 ///
7244 /// ```no_run
7245 /// # let mut out = [0; 512];
7246 /// # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
7247 /// # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
7248 /// # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
7249 /// # let local = socket.local_addr().unwrap();
7250 /// # let peer = "127.0.0.1:1234".parse().unwrap();
7251 /// # let mut conn = quiche::accept(&scid, None, local, peer, &mut config)?;
7252 /// // Iterate over possible destinations for the given local `SockAddr`.
7253 /// for dest in conn.paths_iter(local) {
7254 /// loop {
7255 /// let (write, send_info) =
7256 /// match conn.send_on_path(&mut out, Some(local), Some(dest)) {
7257 /// Ok(v) => v,
7258 ///
7259 /// Err(quiche::Error::Done) => {
7260 /// // Done writing for this destination.
7261 /// break;
7262 /// },
7263 ///
7264 /// Err(e) => {
7265 /// // An error occurred, handle it.
7266 /// break;
7267 /// },
7268 /// };
7269 ///
7270 /// socket.send_to(&out[..write], &send_info.to).unwrap();
7271 /// }
7272 /// }
7273 /// # Ok::<(), quiche::Error>(())
7274 /// ```
7275 #[inline]
7276 pub fn paths_iter(&self, from: SocketAddr) -> SocketAddrIter {
7277 // Instead of trying to identify whether packets will be sent on the
7278 // given 4-tuple, simply filter paths that cannot be used.
7279 SocketAddrIter {
7280 sockaddrs: self
7281 .paths
7282 .iter()
7283 .filter(|(_, p)| p.active_dcid_seq.is_some())
7284 .filter(|(_, p)| p.usable() || p.probing_required())
7285 .filter(|(_, p)| p.local_addr() == from)
7286 .map(|(_, p)| p.peer_addr())
7287 .collect(),
7288
7289 index: 0,
7290 }
7291 }
7292
7293 /// Closes the connection with the given error and reason.
7294 ///
7295 /// The `app` parameter specifies whether an application close should be
7296 /// sent to the peer. Otherwise a normal connection close is sent.
7297 ///
7298 /// If `app` is true but the connection is not in a state that is safe to
7299 /// send an application error (not established nor in early data), in
7300 /// accordance with [RFC
7301 /// 9000](https://www.rfc-editor.org/rfc/rfc9000.html#section-10.2.3-3), the
7302 /// error code is changed to APPLICATION_ERROR and the reason phrase is
7303 /// cleared.
7304 ///
7305 /// Returns [`Done`] if the connection had already been closed.
7306 ///
7307 /// Note that the connection will not be closed immediately. An application
7308 /// should continue calling the [`recv()`], [`send()`], [`timeout()`] and
7309 /// [`on_timeout()`] methods as normal, until the [`is_closed()`] method
7310 /// returns `true`.
7311 ///
7312 /// [`Done`]: enum.Error.html#variant.Done
7313 /// [`recv()`]: struct.Connection.html#method.recv
7314 /// [`send()`]: struct.Connection.html#method.send
7315 /// [`timeout()`]: struct.Connection.html#method.timeout
7316 /// [`on_timeout()`]: struct.Connection.html#method.on_timeout
7317 /// [`is_closed()`]: struct.Connection.html#method.is_closed
7318 pub fn close(&mut self, app: bool, err: u64, reason: &[u8]) -> Result<()> {
7319 if self.is_closed() || self.is_draining() {
7320 return Err(Error::Done);
7321 }
7322
7323 if self.local_error.is_some() {
7324 return Err(Error::Done);
7325 }
7326
7327 let is_safe_to_send_app_data =
7328 self.is_established() || self.is_in_early_data();
7329
7330 if app && !is_safe_to_send_app_data {
7331 // Clear error information.
7332 self.local_error = Some(ConnectionError {
7333 is_app: false,
7334 error_code: 0x0c,
7335 reason: vec![],
7336 });
7337 } else {
7338 self.local_error = Some(ConnectionError {
7339 is_app: app,
7340 error_code: err,
7341 reason: reason.to_vec(),
7342 });
7343 }
7344
7345 // When no packet was successfully processed close connection immediately.
7346 if self.recv_count == 0 {
7347 self.mark_closed();
7348 }
7349
7350 Ok(())
7351 }
7352
7353 /// Returns a string uniquely representing the connection.
7354 ///
7355 /// This can be used for logging purposes to differentiate between multiple
7356 /// connections.
7357 #[inline]
7358 pub fn trace_id(&self) -> &str {
7359 &self.trace_id
7360 }
7361
7362 /// Returns the negotiated ALPN protocol.
7363 ///
7364 /// If no protocol has been negotiated, the returned value is empty.
7365 #[inline]
7366 pub fn application_proto(&self) -> &[u8] {
7367 self.alpn.as_ref()
7368 }
7369
7370 /// Returns the server name requested by the client.
7371 #[inline]
7372 pub fn server_name(&self) -> Option<&str> {
7373 self.handshake.server_name()
7374 }
7375
7376 /// Returns the peer's leaf certificate (if any) as a DER-encoded buffer.
7377 #[inline]
7378 pub fn peer_cert(&self) -> Option<&[u8]> {
7379 self.handshake.peer_cert()
7380 }
7381
7382 /// Returns the peer's certificate chain (if any) as a vector of DER-encoded
7383 /// buffers.
7384 ///
7385 /// The certificate at index 0 is the peer's leaf certificate, the other
7386 /// certificates (if any) are the chain certificate authorities used to
7387 /// sign the leaf certificate.
7388 #[inline]
7389 pub fn peer_cert_chain(&self) -> Option<Vec<&[u8]>> {
7390 self.handshake.peer_cert_chain()
7391 }
7392
7393 /// Returns the serialized cryptographic session for the connection.
7394 ///
7395 /// This can be used by a client to cache a connection's session, and resume
7396 /// it later using the [`set_session()`] method.
7397 ///
7398 /// [`set_session()`]: struct.Connection.html#method.set_session
7399 #[inline]
7400 pub fn session(&self) -> Option<&[u8]> {
7401 self.session.as_deref()
7402 }
7403
7404 /// Returns the source connection ID.
7405 ///
7406 /// When there are multiple IDs, and if there is an active path, the ID used
7407 /// on that path is returned. Otherwise the oldest ID is returned.
7408 ///
7409 /// Note that the value returned can change throughout the connection's
7410 /// lifetime.
7411 #[inline]
7412 pub fn source_id(&self) -> ConnectionId<'_> {
7413 if let Ok(path) = self.paths.get_active() {
7414 if let Some(active_scid_seq) = path.active_scid_seq {
7415 if let Ok(e) = self.ids.get_scid(active_scid_seq) {
7416 return ConnectionId::from_ref(e.cid.as_ref());
7417 }
7418 }
7419 }
7420
7421 let e = self.ids.oldest_scid();
7422 ConnectionId::from_ref(e.cid.as_ref())
7423 }
7424
7425 /// Returns all active source connection IDs.
7426 ///
7427 /// An iterator is returned for all active IDs (i.e. ones that have not
7428 /// been explicitly retired yet).
7429 #[inline]
7430 pub fn source_ids(&self) -> impl Iterator<Item = &ConnectionId<'_>> {
7431 self.ids.scids_iter()
7432 }
7433
7434 /// Returns the destination connection ID.
7435 ///
7436 /// Note that the value returned can change throughout the connection's
7437 /// lifetime.
7438 #[inline]
7439 pub fn destination_id(&self) -> ConnectionId<'_> {
7440 if let Ok(path) = self.paths.get_active() {
7441 if let Some(active_dcid_seq) = path.active_dcid_seq {
7442 if let Ok(e) = self.ids.get_dcid(active_dcid_seq) {
7443 return ConnectionId::from_ref(e.cid.as_ref());
7444 }
7445 }
7446 }
7447
7448 let e = self.ids.oldest_dcid();
7449 ConnectionId::from_ref(e.cid.as_ref())
7450 }
7451
7452 /// Returns the PMTU for the active path if it exists.
7453 ///
7454 /// This requires no additonal packets to be sent but simply checks if PMTUD
7455 /// has completed and has found a valid PMTU.
7456 #[inline]
7457 pub fn pmtu(&self) -> Option<usize> {
7458 if let Ok(path) = self.paths.get_active() {
7459 path.pmtud.as_ref().and_then(|pmtud| pmtud.get_pmtu())
7460 } else {
7461 None
7462 }
7463 }
7464
7465 /// Revalidates the PMTU for the active path by sending a new probe packet
7466 /// of PMTU size. If the probe is dropped PMTUD will restart and find a new
7467 /// valid PMTU.
7468 #[inline]
7469 pub fn revalidate_pmtu(&mut self) {
7470 if let Ok(active_path) = self.paths.get_active_mut() {
7471 if let Some(pmtud) = active_path.pmtud.as_mut() {
7472 pmtud.revalidate_pmtu();
7473 }
7474 }
7475 }
7476
7477 /// Returns true if the connection handshake is complete.
7478 #[inline]
7479 pub fn is_established(&self) -> bool {
7480 self.handshake_completed
7481 }
7482
7483 /// Returns true if the connection is resumed.
7484 #[inline]
7485 pub fn is_resumed(&self) -> bool {
7486 self.handshake.is_resumed()
7487 }
7488
7489 /// Returns true if the connection has a pending handshake that has
7490 /// progressed enough to send or receive early data.
7491 #[inline]
7492 pub fn is_in_early_data(&self) -> bool {
7493 self.handshake.is_in_early_data()
7494 }
7495
7496 /// Returns the early data reason for the connection.
7497 ///
7498 /// This status can be useful for logging and debugging. See [BoringSSL]
7499 /// documentation for a definition of the reasons.
7500 ///
7501 /// [BoringSSL]: https://commondatastorage.googleapis.com/chromium-boringssl-docs/ssl.h.html#ssl_early_data_reason_t
7502 #[inline]
7503 pub fn early_data_reason(&self) -> u32 {
7504 self.handshake.early_data_reason()
7505 }
7506
7507 /// Returns whether there is stream or DATAGRAM data available to read.
7508 #[inline]
7509 pub fn is_readable(&self) -> bool {
7510 self.streams.has_readable() || self.dgram_recv_front_len().is_some()
7511 }
7512
7513 /// Returns whether the network path with local address `from` and remote
7514 /// address `peer` has been validated.
7515 ///
7516 /// If the 4-tuple does not exist over the connection, returns an
7517 /// [`InvalidState`].
7518 ///
7519 /// [`InvalidState`]: enum.Error.html#variant.InvalidState
7520 pub fn is_path_validated(
7521 &self, from: SocketAddr, to: SocketAddr,
7522 ) -> Result<bool> {
7523 let pid = self
7524 .paths
7525 .path_id_from_addrs(&(from, to))
7526 .ok_or(Error::InvalidState)?;
7527
7528 Ok(self.paths.get(pid)?.validated())
7529 }
7530
7531 /// Returns true if the connection is draining.
7532 ///
7533 /// If this returns `true`, the connection object cannot yet be dropped, but
7534 /// no new application data can be sent or received. An application should
7535 /// continue calling the [`recv()`], [`timeout()`], and [`on_timeout()`]
7536 /// methods as normal, until the [`is_closed()`] method returns `true`.
7537 ///
7538 /// In contrast, once `is_draining()` returns `true`, calling [`send()`]
7539 /// is not required because no new outgoing packets will be generated.
7540 ///
7541 /// [`recv()`]: struct.Connection.html#method.recv
7542 /// [`send()`]: struct.Connection.html#method.send
7543 /// [`timeout()`]: struct.Connection.html#method.timeout
7544 /// [`on_timeout()`]: struct.Connection.html#method.on_timeout
7545 /// [`is_closed()`]: struct.Connection.html#method.is_closed
7546 #[inline]
7547 pub fn is_draining(&self) -> bool {
7548 self.draining_timer.is_some()
7549 }
7550
7551 /// Returns true if the connection is closed.
7552 ///
7553 /// If this returns true, the connection object can be dropped.
7554 #[inline]
7555 pub fn is_closed(&self) -> bool {
7556 self.closed
7557 }
7558
7559 /// Returns true if the connection was closed due to the idle timeout.
7560 #[inline]
7561 pub fn is_timed_out(&self) -> bool {
7562 self.timed_out
7563 }
7564
7565 /// Returns the error received from the peer, if any.
7566 ///
7567 /// Note that a `Some` return value does not necessarily imply
7568 /// [`is_closed()`] or any other connection state.
7569 ///
7570 /// [`is_closed()`]: struct.Connection.html#method.is_closed
7571 #[inline]
7572 pub fn peer_error(&self) -> Option<&ConnectionError> {
7573 self.peer_error.as_ref()
7574 }
7575
7576 /// Returns the error [`close()`] was called with, or internally
7577 /// created quiche errors, if any.
7578 ///
7579 /// Note that a `Some` return value does not necessarily imply
7580 /// [`is_closed()`] or any other connection state.
7581 /// `Some` also does not guarantee that the error has been sent to
7582 /// or received by the peer.
7583 ///
7584 /// [`close()`]: struct.Connection.html#method.close
7585 /// [`is_closed()`]: struct.Connection.html#method.is_closed
7586 #[inline]
7587 pub fn local_error(&self) -> Option<&ConnectionError> {
7588 self.local_error.as_ref()
7589 }
7590
7591 /// Collects and returns statistics about the connection.
7592 #[inline]
7593 pub fn stats(&self) -> Stats {
7594 Stats {
7595 recv: self.recv_count,
7596 sent: self.sent_count,
7597 lost: self.lost_count,
7598 spurious_lost: self.spurious_lost_count,
7599 retrans: self.retrans_count,
7600 sent_bytes: self.sent_bytes,
7601 recv_bytes: self.recv_bytes,
7602 acked_bytes: self.acked_bytes,
7603 lost_bytes: self.lost_bytes,
7604 stream_retrans_bytes: self.stream_retrans_bytes,
7605 dgram_recv: self.dgram_recv_count,
7606 dgram_sent: self.dgram_sent_count,
7607 paths_count: self.paths.len(),
7608 reset_stream_count_local: self.reset_stream_local_count,
7609 stopped_stream_count_local: self.stopped_stream_local_count,
7610 reset_stream_count_remote: self.reset_stream_remote_count,
7611 stopped_stream_count_remote: self.stopped_stream_remote_count,
7612 data_blocked_sent_count: self.data_blocked_sent_count,
7613 stream_data_blocked_sent_count: self.stream_data_blocked_sent_count,
7614 data_blocked_recv_count: self.data_blocked_recv_count,
7615 stream_data_blocked_recv_count: self.stream_data_blocked_recv_count,
7616 streams_blocked_bidi_recv_count: self.streams_blocked_bidi_recv_count,
7617 streams_blocked_uni_recv_count: self.streams_blocked_uni_recv_count,
7618 path_challenge_rx_count: self.path_challenge_rx_count,
7619 bytes_in_flight_duration: self.bytes_in_flight_duration(),
7620 tx_buffered_state: self.tx_buffered_state,
7621 }
7622 }
7623
7624 /// Returns the sum of the durations when each path in the
7625 /// connection was actively sending bytes or waiting for acks.
7626 /// Note that this could result in a duration that is longer than
7627 /// the actual connection duration in cases where multiple paths
7628 /// are active for extended periods of time. In practice only 1
7629 /// path is typically active at a time.
7630 /// TODO revisit computation if in the future multiple paths are
7631 /// often active at the same time.
7632 fn bytes_in_flight_duration(&self) -> Duration {
7633 self.paths.iter().fold(Duration::ZERO, |acc, (_, path)| {
7634 acc + path.bytes_in_flight_duration()
7635 })
7636 }
7637
7638 /// Returns reference to peer's transport parameters. Returns `None` if we
7639 /// have not yet processed the peer's transport parameters.
7640 pub fn peer_transport_params(&self) -> Option<&TransportParams> {
7641 if !self.parsed_peer_transport_params {
7642 return None;
7643 }
7644
7645 Some(&self.peer_transport_params)
7646 }
7647
7648 /// Collects and returns statistics about each known path for the
7649 /// connection.
7650 pub fn path_stats(&self) -> impl Iterator<Item = PathStats> + '_ {
7651 self.paths.iter().map(|(_, p)| p.stats())
7652 }
7653
7654 /// Returns whether or not this is a server-side connection.
7655 pub fn is_server(&self) -> bool {
7656 self.is_server
7657 }
7658
7659 fn encode_transport_params(&mut self) -> Result<()> {
7660 self.handshake.set_quic_transport_params(
7661 &self.local_transport_params,
7662 self.is_server,
7663 )
7664 }
7665
7666 fn parse_peer_transport_params(
7667 &mut self, peer_params: TransportParams,
7668 ) -> Result<()> {
7669 // Validate initial_source_connection_id.
7670 match &peer_params.initial_source_connection_id {
7671 Some(v) if v != &self.destination_id() =>
7672 return Err(Error::InvalidTransportParam),
7673
7674 Some(_) => (),
7675
7676 // initial_source_connection_id must be sent by
7677 // both endpoints.
7678 None => return Err(Error::InvalidTransportParam),
7679 }
7680
7681 // Validate original_destination_connection_id.
7682 if let Some(odcid) = &self.odcid {
7683 match &peer_params.original_destination_connection_id {
7684 Some(v) if v != odcid =>
7685 return Err(Error::InvalidTransportParam),
7686
7687 Some(_) => (),
7688
7689 // original_destination_connection_id must be
7690 // sent by the server.
7691 None if !self.is_server =>
7692 return Err(Error::InvalidTransportParam),
7693
7694 None => (),
7695 }
7696 }
7697
7698 // Validate retry_source_connection_id.
7699 if let Some(rscid) = &self.rscid {
7700 match &peer_params.retry_source_connection_id {
7701 Some(v) if v != rscid =>
7702 return Err(Error::InvalidTransportParam),
7703
7704 Some(_) => (),
7705
7706 // retry_source_connection_id must be sent by
7707 // the server.
7708 None => return Err(Error::InvalidTransportParam),
7709 }
7710 }
7711
7712 self.process_peer_transport_params(peer_params)?;
7713
7714 self.parsed_peer_transport_params = true;
7715
7716 Ok(())
7717 }
7718
7719 fn process_peer_transport_params(
7720 &mut self, peer_params: TransportParams,
7721 ) -> Result<()> {
7722 self.max_tx_data = peer_params.initial_max_data;
7723
7724 // Update send capacity.
7725 self.update_tx_cap();
7726
7727 self.streams
7728 .update_peer_max_streams_bidi(peer_params.initial_max_streams_bidi);
7729 self.streams
7730 .update_peer_max_streams_uni(peer_params.initial_max_streams_uni);
7731
7732 let max_ack_delay = Duration::from_millis(peer_params.max_ack_delay);
7733
7734 self.recovery_config.max_ack_delay = max_ack_delay;
7735
7736 let active_path = self.paths.get_active_mut()?;
7737
7738 active_path.recovery.update_max_ack_delay(max_ack_delay);
7739
7740 if active_path
7741 .pmtud
7742 .as_ref()
7743 .map(|pmtud| pmtud.should_probe())
7744 .unwrap_or(false)
7745 {
7746 active_path.recovery.pmtud_update_max_datagram_size(
7747 active_path
7748 .pmtud
7749 .as_mut()
7750 .expect("PMTUD existence verified above")
7751 .get_probe_size()
7752 .min(peer_params.max_udp_payload_size as usize),
7753 );
7754 } else {
7755 active_path.recovery.update_max_datagram_size(
7756 peer_params.max_udp_payload_size as usize,
7757 );
7758 }
7759
7760 // Record the max_active_conn_id parameter advertised by the peer.
7761 self.ids
7762 .set_source_conn_id_limit(peer_params.active_conn_id_limit);
7763
7764 self.peer_transport_params = peer_params;
7765
7766 Ok(())
7767 }
7768
7769 /// Continues the handshake.
7770 ///
7771 /// If the connection is already established, it does nothing.
7772 fn do_handshake(&mut self, now: Instant) -> Result<()> {
7773 let mut ex_data = tls::ExData {
7774 application_protos: &self.application_protos,
7775
7776 crypto_ctx: &mut self.crypto_ctx,
7777
7778 session: &mut self.session,
7779
7780 local_error: &mut self.local_error,
7781
7782 keylog: self.keylog.as_mut(),
7783
7784 trace_id: &self.trace_id,
7785
7786 local_transport_params: self.local_transport_params.clone(),
7787
7788 recovery_config: self.recovery_config,
7789
7790 tx_cap_factor: self.tx_cap_factor,
7791
7792 pmtud: None,
7793
7794 is_server: self.is_server,
7795 };
7796
7797 if self.handshake_completed {
7798 return self.handshake.process_post_handshake(&mut ex_data);
7799 }
7800
7801 match self.handshake.do_handshake(&mut ex_data) {
7802 Ok(_) => (),
7803
7804 Err(Error::Done) => {
7805 // Apply in-handshake configuration from callbacks if the path's
7806 // Recovery module can still be reinitilized.
7807 if self
7808 .paths
7809 .get_active()
7810 .map(|p| p.can_reinit_recovery())
7811 .unwrap_or(false)
7812 {
7813 if ex_data.recovery_config != self.recovery_config {
7814 if let Ok(path) = self.paths.get_active_mut() {
7815 self.recovery_config = ex_data.recovery_config;
7816 path.reinit_recovery(&self.recovery_config);
7817 }
7818 }
7819
7820 if ex_data.tx_cap_factor != self.tx_cap_factor {
7821 self.tx_cap_factor = ex_data.tx_cap_factor;
7822 }
7823
7824 if let Some((discover, max_probes)) = ex_data.pmtud {
7825 self.paths.set_discover_pmtu_on_existing_paths(
7826 discover,
7827 self.recovery_config.max_send_udp_payload_size,
7828 max_probes,
7829 );
7830 }
7831
7832 if ex_data.local_transport_params !=
7833 self.local_transport_params
7834 {
7835 self.streams.set_max_streams_bidi(
7836 ex_data
7837 .local_transport_params
7838 .initial_max_streams_bidi,
7839 );
7840
7841 self.local_transport_params =
7842 ex_data.local_transport_params;
7843 }
7844 }
7845
7846 // Try to parse transport parameters as soon as the first flight
7847 // of handshake data is processed.
7848 //
7849 // This is potentially dangerous as the handshake hasn't been
7850 // completed yet, though it's required to be able to send data
7851 // in 0.5 RTT.
7852 let raw_params = self.handshake.quic_transport_params();
7853
7854 if !self.parsed_peer_transport_params && !raw_params.is_empty() {
7855 let peer_params = TransportParams::decode(
7856 raw_params,
7857 self.is_server,
7858 self.peer_transport_params_track_unknown,
7859 )?;
7860
7861 self.parse_peer_transport_params(peer_params)?;
7862 }
7863
7864 return Ok(());
7865 },
7866
7867 Err(e) => return Err(e),
7868 };
7869
7870 self.handshake_completed = self.handshake.is_completed();
7871
7872 self.alpn = self.handshake.alpn_protocol().to_vec();
7873
7874 let raw_params = self.handshake.quic_transport_params();
7875
7876 if !self.parsed_peer_transport_params && !raw_params.is_empty() {
7877 let peer_params = TransportParams::decode(
7878 raw_params,
7879 self.is_server,
7880 self.peer_transport_params_track_unknown,
7881 )?;
7882
7883 self.parse_peer_transport_params(peer_params)?;
7884 }
7885
7886 if self.handshake_completed {
7887 // The handshake is considered confirmed at the server when the
7888 // handshake completes, at which point we can also drop the
7889 // handshake epoch.
7890 if self.is_server {
7891 self.handshake_confirmed = true;
7892
7893 self.drop_epoch_state(packet::Epoch::Handshake, now);
7894 }
7895
7896 // Once the handshake is completed there's no point in processing
7897 // 0-RTT packets anymore, so clear the buffer now.
7898 self.undecryptable_pkts.clear();
7899
7900 trace!("{} connection established: proto={:?} cipher={:?} curve={:?} sigalg={:?} resumed={} {:?}",
7901 &self.trace_id,
7902 std::str::from_utf8(self.application_proto()),
7903 self.handshake.cipher(),
7904 self.handshake.curve(),
7905 self.handshake.sigalg(),
7906 self.handshake.is_resumed(),
7907 self.peer_transport_params);
7908 }
7909
7910 Ok(())
7911 }
7912
7913 /// Selects the packet type for the next outgoing packet.
7914 fn write_pkt_type(&self, send_pid: usize) -> Result<Type> {
7915 // On error send packet in the latest epoch available, but only send
7916 // 1-RTT ones when the handshake is completed.
7917 if self
7918 .local_error
7919 .as_ref()
7920 .is_some_and(|conn_err| !conn_err.is_app)
7921 {
7922 let epoch = match self.handshake.write_level() {
7923 crypto::Level::Initial => packet::Epoch::Initial,
7924 crypto::Level::ZeroRTT => unreachable!(),
7925 crypto::Level::Handshake => packet::Epoch::Handshake,
7926 crypto::Level::OneRTT => packet::Epoch::Application,
7927 };
7928
7929 if !self.handshake_confirmed {
7930 match epoch {
7931 // Downgrade the epoch to Handshake as the handshake is not
7932 // completed yet.
7933 packet::Epoch::Application => return Ok(Type::Handshake),
7934
7935 // Downgrade the epoch to Initial as the remote peer might
7936 // not be able to decrypt handshake packets yet.
7937 packet::Epoch::Handshake
7938 if self.crypto_ctx[packet::Epoch::Initial].has_keys() =>
7939 return Ok(Type::Initial),
7940
7941 _ => (),
7942 };
7943 }
7944
7945 return Ok(Type::from_epoch(epoch));
7946 }
7947
7948 for &epoch in packet::Epoch::epochs(
7949 packet::Epoch::Initial..=packet::Epoch::Application,
7950 ) {
7951 let crypto_ctx = &self.crypto_ctx[epoch];
7952 let pkt_space = &self.pkt_num_spaces[epoch];
7953
7954 // Only send packets in a space when we have the send keys for it.
7955 if crypto_ctx.crypto_seal.is_none() {
7956 continue;
7957 }
7958
7959 // We are ready to send data for this packet number space.
7960 if crypto_ctx.data_available() || pkt_space.ready() {
7961 return Ok(Type::from_epoch(epoch));
7962 }
7963
7964 // There are lost frames in this packet number space.
7965 for (_, p) in self.paths.iter() {
7966 if p.recovery.has_lost_frames(epoch) {
7967 return Ok(Type::from_epoch(epoch));
7968 }
7969
7970 // We need to send PTO probe packets.
7971 if p.recovery.loss_probes(epoch) > 0 {
7972 return Ok(Type::from_epoch(epoch));
7973 }
7974 }
7975 }
7976
7977 // If there are flushable, almost full or blocked streams, use the
7978 // Application epoch.
7979 let send_path = self.paths.get(send_pid)?;
7980 if (self.is_established() || self.is_in_early_data()) &&
7981 (self.should_send_handshake_done() ||
7982 self.flow_control.should_update_max_data() ||
7983 self.should_send_max_data ||
7984 self.blocked_limit.is_some() ||
7985 self.streams_blocked_bidi_state
7986 .has_pending_stream_blocked_frame() ||
7987 self.streams_blocked_uni_state
7988 .has_pending_stream_blocked_frame() ||
7989 self.dgram_send_queue.has_pending() ||
7990 self.local_error
7991 .as_ref()
7992 .is_some_and(|conn_err| conn_err.is_app) ||
7993 self.should_send_max_streams_bidi ||
7994 self.streams.should_update_max_streams_bidi() ||
7995 self.should_send_max_streams_uni ||
7996 self.streams.should_update_max_streams_uni() ||
7997 self.streams.has_flushable() ||
7998 self.streams.has_almost_full() ||
7999 self.streams.has_blocked() ||
8000 self.streams.has_reset() ||
8001 self.streams.has_stopped() ||
8002 self.ids.has_new_scids() ||
8003 self.ids.has_retire_dcids() ||
8004 send_path
8005 .pmtud
8006 .as_ref()
8007 .is_some_and(|pmtud| pmtud.should_probe()) ||
8008 send_path.needs_ack_eliciting ||
8009 send_path.probing_required())
8010 {
8011 // Only clients can send 0-RTT packets.
8012 if !self.is_server && self.is_in_early_data() {
8013 return Ok(Type::ZeroRTT);
8014 }
8015
8016 return Ok(Type::Short);
8017 }
8018
8019 Err(Error::Done)
8020 }
8021
8022 /// Returns the mutable stream with the given ID if it exists, or creates
8023 /// a new one otherwise.
8024 fn get_or_create_stream(
8025 &mut self, id: u64, local: bool,
8026 ) -> Result<&mut stream::Stream<F>> {
8027 self.streams.get_or_create(
8028 id,
8029 &self.local_transport_params,
8030 &self.peer_transport_params,
8031 local,
8032 self.is_server,
8033 )
8034 }
8035
8036 /// Processes an incoming frame.
8037 fn process_frame(
8038 &mut self, frame: frame::Frame, hdr: &Header, recv_path_id: usize,
8039 epoch: packet::Epoch, now: Instant,
8040 ) -> Result<()> {
8041 trace!("{} rx frm {:?}", self.trace_id, frame);
8042
8043 match frame {
8044 frame::Frame::Padding { .. } => (),
8045
8046 frame::Frame::Ping { .. } => (),
8047
8048 frame::Frame::ACK {
8049 ranges, ack_delay, ..
8050 } => {
8051 let ack_delay = ack_delay
8052 .checked_mul(2_u64.pow(
8053 self.peer_transport_params.ack_delay_exponent as u32,
8054 ))
8055 .ok_or(Error::InvalidFrame)?;
8056
8057 if epoch == packet::Epoch::Handshake ||
8058 (epoch == packet::Epoch::Application &&
8059 self.is_established())
8060 {
8061 self.peer_verified_initial_address = true;
8062 }
8063
8064 let handshake_status = self.handshake_status();
8065
8066 let is_app_limited = self.delivery_rate_check_if_app_limited();
8067
8068 let largest_acked = ranges.last().expect(
8069 "ACK frames should always have at least one ack range",
8070 );
8071
8072 for (_, p) in self.paths.iter_mut() {
8073 if self.pkt_num_spaces[epoch]
8074 .largest_tx_pkt_num
8075 .is_some_and(|largest_sent| largest_sent < largest_acked)
8076 {
8077 // https://www.rfc-editor.org/rfc/rfc9000#section-13.1
8078 // An endpoint SHOULD treat receipt of an acknowledgment
8079 // for a packet it did not send as
8080 // a connection error of type PROTOCOL_VIOLATION
8081 return Err(Error::InvalidAckRange);
8082 }
8083
8084 if is_app_limited {
8085 p.recovery.delivery_rate_update_app_limited(true);
8086 }
8087
8088 let OnAckReceivedOutcome {
8089 lost_packets,
8090 lost_bytes,
8091 acked_bytes,
8092 spurious_losses,
8093 } = p.recovery.on_ack_received(
8094 &ranges,
8095 ack_delay,
8096 epoch,
8097 handshake_status,
8098 now,
8099 self.pkt_num_manager.skip_pn(),
8100 &self.trace_id,
8101 )?;
8102
8103 let skip_pn = self.pkt_num_manager.skip_pn();
8104 let largest_acked =
8105 p.recovery.get_largest_acked_on_epoch(epoch);
8106
8107 // Consider the skip_pn validated if the peer has sent an ack
8108 // for a larger pkt number.
8109 if let Some((largest_acked, skip_pn)) =
8110 largest_acked.zip(skip_pn)
8111 {
8112 if largest_acked > skip_pn {
8113 self.pkt_num_manager.set_skip_pn(None);
8114 }
8115 }
8116
8117 self.lost_count += lost_packets;
8118 self.lost_bytes += lost_bytes as u64;
8119 self.acked_bytes += acked_bytes as u64;
8120 self.spurious_lost_count += spurious_losses;
8121 }
8122 },
8123
8124 frame::Frame::ResetStream {
8125 stream_id,
8126 error_code,
8127 final_size,
8128 } => {
8129 // Peer can't send on our unidirectional streams.
8130 if !stream::is_bidi(stream_id) &&
8131 stream::is_local(stream_id, self.is_server)
8132 {
8133 return Err(Error::InvalidStreamState(stream_id));
8134 }
8135
8136 let max_rx_data_left = self.max_rx_data() - self.rx_data;
8137
8138 // Get existing stream or create a new one, but if the stream
8139 // has already been closed and collected, ignore the frame.
8140 //
8141 // This can happen if e.g. an ACK frame is lost, and the peer
8142 // retransmits another frame before it realizes that the stream
8143 // is gone.
8144 //
8145 // Note that it makes it impossible to check if the frame is
8146 // illegal, since we have no state, but since we ignore the
8147 // frame, it should be fine.
8148 let stream = match self.get_or_create_stream(stream_id, false) {
8149 Ok(v) => v,
8150
8151 Err(Error::Done) => return Ok(()),
8152
8153 Err(e) => return Err(e),
8154 };
8155
8156 let was_readable = stream.is_readable();
8157 let priority_key = Arc::clone(&stream.priority_key);
8158
8159 let stream::RecvBufResetReturn {
8160 max_data_delta,
8161 consumed_flowcontrol,
8162 } = stream.recv.reset(error_code, final_size)?;
8163
8164 if max_data_delta > max_rx_data_left {
8165 return Err(Error::FlowControl);
8166 }
8167
8168 if !was_readable && stream.is_readable() {
8169 self.streams.insert_readable(&priority_key);
8170 }
8171
8172 self.rx_data += max_data_delta;
8173 // We dropped the receive buffer, return connection level
8174 // flow-control
8175 self.flow_control.add_consumed(consumed_flowcontrol);
8176
8177 self.reset_stream_remote_count =
8178 self.reset_stream_remote_count.saturating_add(1);
8179 },
8180
8181 frame::Frame::StopSending {
8182 stream_id,
8183 error_code,
8184 } => {
8185 // STOP_SENDING on a receive-only stream is a fatal error.
8186 if !stream::is_local(stream_id, self.is_server) &&
8187 !stream::is_bidi(stream_id)
8188 {
8189 return Err(Error::InvalidStreamState(stream_id));
8190 }
8191
8192 // Get existing stream or create a new one, but if the stream
8193 // has already been closed and collected, ignore the frame.
8194 //
8195 // This can happen if e.g. an ACK frame is lost, and the peer
8196 // retransmits another frame before it realizes that the stream
8197 // is gone.
8198 //
8199 // Note that it makes it impossible to check if the frame is
8200 // illegal, since we have no state, but since we ignore the
8201 // frame, it should be fine.
8202 let stream = match self.get_or_create_stream(stream_id, false) {
8203 Ok(v) => v,
8204
8205 Err(Error::Done) => return Ok(()),
8206
8207 Err(e) => return Err(e),
8208 };
8209
8210 let was_writable = stream.is_writable();
8211
8212 let priority_key = Arc::clone(&stream.priority_key);
8213
8214 // Try stopping the stream.
8215 if let Ok((final_size, unsent)) = stream.send.stop(error_code) {
8216 // Claw back some flow control allowance from data that was
8217 // buffered but not actually sent before the stream was
8218 // reset.
8219 //
8220 // Note that `tx_cap` will be updated later on, so no need
8221 // to touch it here.
8222 self.tx_data = self.tx_data.saturating_sub(unsent);
8223
8224 self.tx_buffered =
8225 self.tx_buffered.saturating_sub(unsent as usize);
8226
8227 // These drops in qlog are a bit weird, but the only way to
8228 // ensure that all bytes that are moved from App to Transport
8229 // in stream_do_send are eventually moved from Transport to
8230 // Dropped. Ideally we would add a Transport to Network
8231 // transition also as a way to indicate when bytes were
8232 // transmitted vs dropped without ever being sent.
8233 qlog_with_type!(QLOG_DATA_MV, self.qlog, q, {
8234 let ev_data =
8235 EventData::DataMoved(qlog::events::quic::DataMoved {
8236 stream_id: Some(stream_id),
8237 offset: Some(final_size),
8238 length: Some(unsent),
8239 from: Some(DataRecipient::Transport),
8240 to: Some(DataRecipient::Dropped),
8241 ..Default::default()
8242 });
8243
8244 q.add_event_data_with_instant(ev_data, now).ok();
8245 });
8246
8247 self.streams.insert_reset(stream_id, error_code, final_size);
8248
8249 if !was_writable {
8250 self.streams.insert_writable(&priority_key);
8251 }
8252
8253 self.stopped_stream_remote_count =
8254 self.stopped_stream_remote_count.saturating_add(1);
8255 self.reset_stream_local_count =
8256 self.reset_stream_local_count.saturating_add(1);
8257 }
8258 },
8259
8260 frame::Frame::Crypto { data } => {
8261 if data.max_off() >= MAX_CRYPTO_STREAM_OFFSET {
8262 return Err(Error::CryptoBufferExceeded);
8263 }
8264
8265 // Push the data to the stream so it can be re-ordered.
8266 self.crypto_ctx[epoch].crypto_stream.recv.write(data)?;
8267
8268 // Feed crypto data to the TLS state, if there's data
8269 // available at the expected offset.
8270 let mut crypto_buf = [0; 512];
8271
8272 let level = crypto::Level::from_epoch(epoch);
8273
8274 let stream = &mut self.crypto_ctx[epoch].crypto_stream;
8275
8276 while let Ok((read, _)) = stream.recv.emit(&mut crypto_buf) {
8277 let recv_buf = &crypto_buf[..read];
8278 self.handshake.provide_data(level, recv_buf)?;
8279 }
8280
8281 self.do_handshake(now)?;
8282 },
8283
8284 frame::Frame::CryptoHeader { .. } => unreachable!(),
8285
8286 // TODO: implement stateless retry
8287 frame::Frame::NewToken { .. } =>
8288 if self.is_server {
8289 return Err(Error::InvalidPacket);
8290 },
8291
8292 frame::Frame::Stream { stream_id, data } => {
8293 // Peer can't send on our unidirectional streams.
8294 if !stream::is_bidi(stream_id) &&
8295 stream::is_local(stream_id, self.is_server)
8296 {
8297 return Err(Error::InvalidStreamState(stream_id));
8298 }
8299
8300 let max_rx_data_left = self.max_rx_data() - self.rx_data;
8301
8302 // Get existing stream or create a new one, but if the stream
8303 // has already been closed and collected, ignore the frame.
8304 //
8305 // This can happen if e.g. an ACK frame is lost, and the peer
8306 // retransmits another frame before it realizes that the stream
8307 // is gone.
8308 //
8309 // Note that it makes it impossible to check if the frame is
8310 // illegal, since we have no state, but since we ignore the
8311 // frame, it should be fine.
8312 let stream = match self.get_or_create_stream(stream_id, false) {
8313 Ok(v) => v,
8314
8315 Err(Error::Done) => return Ok(()),
8316
8317 Err(e) => return Err(e),
8318 };
8319
8320 // Check for the connection-level flow control limit.
8321 let max_off_delta =
8322 data.max_off().saturating_sub(stream.recv.max_off());
8323
8324 if max_off_delta > max_rx_data_left {
8325 return Err(Error::FlowControl);
8326 }
8327
8328 let was_readable = stream.is_readable();
8329 let priority_key = Arc::clone(&stream.priority_key);
8330
8331 let was_draining = stream.recv.is_draining();
8332
8333 stream.recv.write(data)?;
8334
8335 if !was_readable && stream.is_readable() {
8336 self.streams.insert_readable(&priority_key);
8337 }
8338
8339 self.rx_data += max_off_delta;
8340
8341 if was_draining {
8342 // When a stream is in draining state it will not queue
8343 // incoming data for the application to read, so consider
8344 // the received data as consumed, which might trigger a flow
8345 // control update.
8346 self.flow_control.add_consumed(max_off_delta);
8347 }
8348 },
8349
8350 frame::Frame::StreamHeader { .. } => unreachable!(),
8351
8352 frame::Frame::MaxData { max } => {
8353 self.max_tx_data = cmp::max(self.max_tx_data, max);
8354 },
8355
8356 frame::Frame::MaxStreamData { stream_id, max } => {
8357 // Peer can't receive on its own unidirectional streams.
8358 if !stream::is_bidi(stream_id) &&
8359 !stream::is_local(stream_id, self.is_server)
8360 {
8361 return Err(Error::InvalidStreamState(stream_id));
8362 }
8363
8364 // Get existing stream or create a new one, but if the stream
8365 // has already been closed and collected, ignore the frame.
8366 //
8367 // This can happen if e.g. an ACK frame is lost, and the peer
8368 // retransmits another frame before it realizes that the stream
8369 // is gone.
8370 //
8371 // Note that it makes it impossible to check if the frame is
8372 // illegal, since we have no state, but since we ignore the
8373 // frame, it should be fine.
8374 let stream = match self.get_or_create_stream(stream_id, false) {
8375 Ok(v) => v,
8376
8377 Err(Error::Done) => return Ok(()),
8378
8379 Err(e) => return Err(e),
8380 };
8381
8382 let was_flushable = stream.is_flushable();
8383
8384 stream.send.update_max_data(max);
8385
8386 let writable = stream.is_writable();
8387
8388 let priority_key = Arc::clone(&stream.priority_key);
8389
8390 // If the stream is now flushable push it to the flushable queue,
8391 // but only if it wasn't already queued.
8392 if stream.is_flushable() && !was_flushable {
8393 let priority_key = Arc::clone(&stream.priority_key);
8394 self.streams.insert_flushable(&priority_key);
8395 }
8396
8397 if writable {
8398 self.streams.insert_writable(&priority_key);
8399 }
8400 },
8401
8402 frame::Frame::MaxStreamsBidi { max } => {
8403 if max > MAX_STREAM_ID {
8404 return Err(Error::InvalidFrame);
8405 }
8406
8407 self.streams.update_peer_max_streams_bidi(max);
8408 },
8409
8410 frame::Frame::MaxStreamsUni { max } => {
8411 if max > MAX_STREAM_ID {
8412 return Err(Error::InvalidFrame);
8413 }
8414
8415 self.streams.update_peer_max_streams_uni(max);
8416 },
8417
8418 frame::Frame::DataBlocked { .. } => {
8419 self.data_blocked_recv_count =
8420 self.data_blocked_recv_count.saturating_add(1);
8421 },
8422
8423 frame::Frame::StreamDataBlocked { .. } => {
8424 self.stream_data_blocked_recv_count =
8425 self.stream_data_blocked_recv_count.saturating_add(1);
8426 },
8427
8428 frame::Frame::StreamsBlockedBidi { limit } => {
8429 if limit > MAX_STREAM_ID {
8430 return Err(Error::InvalidFrame);
8431 }
8432
8433 self.streams_blocked_bidi_recv_count =
8434 self.streams_blocked_bidi_recv_count.saturating_add(1);
8435 },
8436
8437 frame::Frame::StreamsBlockedUni { limit } => {
8438 if limit > MAX_STREAM_ID {
8439 return Err(Error::InvalidFrame);
8440 }
8441
8442 self.streams_blocked_uni_recv_count =
8443 self.streams_blocked_uni_recv_count.saturating_add(1);
8444 },
8445
8446 frame::Frame::NewConnectionId {
8447 seq_num,
8448 retire_prior_to,
8449 conn_id,
8450 reset_token,
8451 } => {
8452 if self.ids.zero_length_dcid() {
8453 return Err(Error::InvalidState);
8454 }
8455
8456 let mut retired_path_ids = SmallVec::new();
8457
8458 // Retire pending path IDs before propagating the error code to
8459 // make sure retired connection IDs are not in use anymore.
8460 let new_dcid_res = self.ids.new_dcid(
8461 conn_id.into(),
8462 seq_num,
8463 u128::from_be_bytes(reset_token),
8464 retire_prior_to,
8465 &mut retired_path_ids,
8466 );
8467
8468 for (dcid_seq, pid) in retired_path_ids {
8469 let path = self.paths.get_mut(pid)?;
8470
8471 // Maybe the path already switched to another DCID.
8472 if path.active_dcid_seq != Some(dcid_seq) {
8473 continue;
8474 }
8475
8476 if let Some(new_dcid_seq) =
8477 self.ids.lowest_available_dcid_seq()
8478 {
8479 path.active_dcid_seq = Some(new_dcid_seq);
8480
8481 self.ids.link_dcid_to_path_id(new_dcid_seq, pid)?;
8482
8483 trace!(
8484 "{} path ID {} changed DCID: old seq num {} new seq num {}",
8485 self.trace_id, pid, dcid_seq, new_dcid_seq,
8486 );
8487 } else {
8488 // We cannot use this path anymore for now.
8489 path.active_dcid_seq = None;
8490
8491 trace!(
8492 "{} path ID {} cannot be used; DCID seq num {} has been retired",
8493 self.trace_id, pid, dcid_seq,
8494 );
8495 }
8496 }
8497
8498 // Propagate error (if any) now...
8499 new_dcid_res?;
8500 },
8501
8502 frame::Frame::RetireConnectionId { seq_num } => {
8503 if self.ids.zero_length_scid() {
8504 return Err(Error::InvalidState);
8505 }
8506
8507 if let Some(pid) = self.ids.retire_scid(seq_num, &hdr.dcid)? {
8508 let path = self.paths.get_mut(pid)?;
8509
8510 // Maybe we already linked a new SCID to that path.
8511 if path.active_scid_seq == Some(seq_num) {
8512 // XXX: We do not remove unused paths now, we instead
8513 // wait until we need to maintain more paths than the
8514 // host is willing to.
8515 path.active_scid_seq = None;
8516 }
8517 }
8518 },
8519
8520 frame::Frame::PathChallenge { data } => {
8521 self.path_challenge_rx_count += 1;
8522
8523 self.paths
8524 .get_mut(recv_path_id)?
8525 .on_challenge_received(data);
8526 },
8527
8528 frame::Frame::PathResponse { data } => {
8529 self.paths.on_response_received(data)?;
8530 },
8531
8532 frame::Frame::ConnectionClose {
8533 error_code, reason, ..
8534 } => {
8535 self.peer_error = Some(ConnectionError {
8536 is_app: false,
8537 error_code,
8538 reason,
8539 });
8540
8541 let path = self.paths.get_active()?;
8542 self.draining_timer = Some(now + (path.recovery.pto() * 3));
8543 },
8544
8545 frame::Frame::ApplicationClose { error_code, reason } => {
8546 self.peer_error = Some(ConnectionError {
8547 is_app: true,
8548 error_code,
8549 reason,
8550 });
8551
8552 let path = self.paths.get_active()?;
8553 self.draining_timer = Some(now + (path.recovery.pto() * 3));
8554 },
8555
8556 frame::Frame::HandshakeDone => {
8557 if self.is_server {
8558 return Err(Error::InvalidPacket);
8559 }
8560
8561 self.peer_verified_initial_address = true;
8562
8563 self.handshake_confirmed = true;
8564
8565 // Once the handshake is confirmed, we can drop Handshake keys.
8566 self.drop_epoch_state(packet::Epoch::Handshake, now);
8567 },
8568
8569 frame::Frame::Datagram { data } => {
8570 // Close the connection if DATAGRAMs are not enabled.
8571 // quiche always advertises support for 64K sized DATAGRAM
8572 // frames, as recommended by the standard, so we don't need a
8573 // size check.
8574 if !self.dgram_enabled() {
8575 return Err(Error::InvalidState);
8576 }
8577
8578 // If recv queue is full, discard oldest
8579 if self.dgram_recv_queue.is_full() {
8580 self.dgram_recv_queue.pop();
8581 }
8582
8583 self.dgram_recv_queue.push(data)?;
8584
8585 self.dgram_recv_count = self.dgram_recv_count.saturating_add(1);
8586
8587 let path = self.paths.get_mut(recv_path_id)?;
8588 path.dgram_recv_count = path.dgram_recv_count.saturating_add(1);
8589 },
8590
8591 frame::Frame::DatagramHeader { .. } => unreachable!(),
8592 }
8593
8594 Ok(())
8595 }
8596
8597 /// Drops the keys and recovery state for the given epoch.
8598 fn drop_epoch_state(&mut self, epoch: packet::Epoch, now: Instant) {
8599 let crypto_ctx = &mut self.crypto_ctx[epoch];
8600 if crypto_ctx.crypto_open.is_none() {
8601 return;
8602 }
8603 crypto_ctx.clear();
8604 self.pkt_num_spaces[epoch].clear();
8605
8606 let handshake_status = self.handshake_status();
8607 for (_, p) in self.paths.iter_mut() {
8608 p.recovery
8609 .on_pkt_num_space_discarded(epoch, handshake_status, now);
8610 }
8611
8612 trace!("{} dropped epoch {} state", self.trace_id, epoch);
8613 }
8614
8615 /// Returns the connection level flow control limit.
8616 fn max_rx_data(&self) -> u64 {
8617 self.flow_control.max_data()
8618 }
8619
8620 /// Returns true if the HANDSHAKE_DONE frame needs to be sent.
8621 fn should_send_handshake_done(&self) -> bool {
8622 self.is_established() && !self.handshake_done_sent && self.is_server
8623 }
8624
8625 /// Returns the idle timeout value.
8626 ///
8627 /// `None` is returned if both end-points disabled the idle timeout.
8628 fn idle_timeout(&self) -> Option<Duration> {
8629 // If the transport parameter is set to 0, then the respective endpoint
8630 // decided to disable the idle timeout. If both are disabled we should
8631 // not set any timeout.
8632 if self.local_transport_params.max_idle_timeout == 0 &&
8633 self.peer_transport_params.max_idle_timeout == 0
8634 {
8635 return None;
8636 }
8637
8638 // If the local endpoint or the peer disabled the idle timeout, use the
8639 // other peer's value, otherwise use the minimum of the two values.
8640 let idle_timeout = if self.local_transport_params.max_idle_timeout == 0 {
8641 self.peer_transport_params.max_idle_timeout
8642 } else if self.peer_transport_params.max_idle_timeout == 0 {
8643 self.local_transport_params.max_idle_timeout
8644 } else {
8645 cmp::min(
8646 self.local_transport_params.max_idle_timeout,
8647 self.peer_transport_params.max_idle_timeout,
8648 )
8649 };
8650
8651 let path_pto = match self.paths.get_active() {
8652 Ok(p) => p.recovery.pto(),
8653 Err(_) => Duration::ZERO,
8654 };
8655
8656 let idle_timeout = Duration::from_millis(idle_timeout);
8657 let idle_timeout = cmp::max(idle_timeout, 3 * path_pto);
8658
8659 Some(idle_timeout)
8660 }
8661
8662 /// Returns the connection's handshake status for use in loss recovery.
8663 fn handshake_status(&self) -> recovery::HandshakeStatus {
8664 recovery::HandshakeStatus {
8665 has_handshake_keys: self.crypto_ctx[packet::Epoch::Handshake]
8666 .has_keys(),
8667
8668 peer_verified_address: self.peer_verified_initial_address,
8669
8670 completed: self.is_established(),
8671 }
8672 }
8673
8674 /// Updates send capacity.
8675 fn update_tx_cap(&mut self) {
8676 let cwin_available = match self.paths.get_active() {
8677 Ok(p) => p.recovery.cwnd_available() as u64,
8678 Err(_) => 0,
8679 };
8680
8681 let cap =
8682 cmp::min(cwin_available, self.max_tx_data - self.tx_data) as usize;
8683 self.tx_cap = (cap as f64 * self.tx_cap_factor).ceil() as usize;
8684 }
8685
8686 fn delivery_rate_check_if_app_limited(&self) -> bool {
8687 // Enter the app-limited phase of delivery rate when these conditions
8688 // are met:
8689 //
8690 // - The remaining capacity is higher than available bytes in cwnd (there
8691 // is more room to send).
8692 // - New data since the last send() is smaller than available bytes in
8693 // cwnd (we queued less than what we can send).
8694 // - There is room to send more data in cwnd.
8695 //
8696 // In application-limited phases the transmission rate is limited by the
8697 // application rather than the congestion control algorithm.
8698 //
8699 // Note that this is equivalent to CheckIfApplicationLimited() from the
8700 // delivery rate draft. This is also separate from `recovery.app_limited`
8701 // and only applies to delivery rate calculation.
8702 let cwin_available = self
8703 .paths
8704 .iter()
8705 .filter(|&(_, p)| p.active())
8706 .map(|(_, p)| p.recovery.cwnd_available())
8707 .sum();
8708
8709 ((self.tx_buffered + self.dgram_send_queue_byte_size()) < cwin_available) &&
8710 (self.tx_data.saturating_sub(self.last_tx_data)) <
8711 cwin_available as u64 &&
8712 cwin_available > 0
8713 }
8714
8715 fn check_tx_buffered_invariant(&mut self) {
8716 // tx_buffered should track bytes queued in the stream buffers
8717 // and unacked retransmitable bytes in the network.
8718 // If tx_buffered > 0 mark the tx_buffered_state if there are no
8719 // flushable streams and there no inflight bytes.
8720 //
8721 // It is normal to have tx_buffered == 0 while there are inflight bytes
8722 // since not QUIC frames are retransmittable; inflight tracks all bytes
8723 // on the network which are subject to congestion control.
8724 if self.tx_buffered > 0 &&
8725 !self.streams.has_flushable() &&
8726 !self
8727 .paths
8728 .iter()
8729 .any(|(_, p)| p.recovery.bytes_in_flight() > 0)
8730 {
8731 self.tx_buffered_state = TxBufferTrackingState::Inconsistent;
8732 }
8733 }
8734
8735 fn set_initial_dcid(
8736 &mut self, cid: ConnectionId<'static>, reset_token: Option<u128>,
8737 path_id: usize,
8738 ) -> Result<()> {
8739 self.ids.set_initial_dcid(cid, reset_token, Some(path_id));
8740 self.paths.get_mut(path_id)?.active_dcid_seq = Some(0);
8741
8742 Ok(())
8743 }
8744
8745 /// Selects the path that the incoming packet belongs to, or creates a new
8746 /// one if no existing path matches.
8747 fn get_or_create_recv_path_id(
8748 &mut self, recv_pid: Option<usize>, dcid: &ConnectionId, buf_len: usize,
8749 info: &RecvInfo,
8750 ) -> Result<usize> {
8751 let ids = &mut self.ids;
8752
8753 let (in_scid_seq, mut in_scid_pid) =
8754 ids.find_scid_seq(dcid).ok_or(Error::InvalidState)?;
8755
8756 if let Some(recv_pid) = recv_pid {
8757 // If the path observes a change of SCID used, note it.
8758 let recv_path = self.paths.get_mut(recv_pid)?;
8759
8760 let cid_entry =
8761 recv_path.active_scid_seq.and_then(|v| ids.get_scid(v).ok());
8762
8763 if cid_entry.map(|e| &e.cid) != Some(dcid) {
8764 let incoming_cid_entry = ids.get_scid(in_scid_seq)?;
8765
8766 let prev_recv_pid =
8767 incoming_cid_entry.path_id.unwrap_or(recv_pid);
8768
8769 if prev_recv_pid != recv_pid {
8770 trace!(
8771 "{} peer reused CID {:?} from path {} on path {}",
8772 self.trace_id,
8773 dcid,
8774 prev_recv_pid,
8775 recv_pid
8776 );
8777
8778 // TODO: reset congestion control.
8779 }
8780
8781 trace!(
8782 "{} path ID {} now see SCID with seq num {}",
8783 self.trace_id,
8784 recv_pid,
8785 in_scid_seq
8786 );
8787
8788 recv_path.active_scid_seq = Some(in_scid_seq);
8789 ids.link_scid_to_path_id(in_scid_seq, recv_pid)?;
8790 }
8791
8792 return Ok(recv_pid);
8793 }
8794
8795 // This is a new 4-tuple. See if the CID has not been assigned on
8796 // another path.
8797
8798 // Ignore this step if are using zero-length SCID.
8799 if ids.zero_length_scid() {
8800 in_scid_pid = None;
8801 }
8802
8803 if let Some(in_scid_pid) = in_scid_pid {
8804 // This CID has been used by another path. If we have the
8805 // room to do so, create a new `Path` structure holding this
8806 // new 4-tuple. Otherwise, drop the packet.
8807 let old_path = self.paths.get_mut(in_scid_pid)?;
8808 let old_local_addr = old_path.local_addr();
8809 let old_peer_addr = old_path.peer_addr();
8810
8811 trace!(
8812 "{} reused CID seq {} of ({},{}) (path {}) on ({},{})",
8813 self.trace_id,
8814 in_scid_seq,
8815 old_local_addr,
8816 old_peer_addr,
8817 in_scid_pid,
8818 info.to,
8819 info.from
8820 );
8821
8822 // Notify the application.
8823 self.paths.notify_event(PathEvent::ReusedSourceConnectionId(
8824 in_scid_seq,
8825 (old_local_addr, old_peer_addr),
8826 (info.to, info.from),
8827 ));
8828 }
8829
8830 // This is a new path using an unassigned CID; create it!
8831 let mut path = path::Path::new(
8832 info.to,
8833 info.from,
8834 &self.recovery_config,
8835 self.path_challenge_recv_max_queue_len,
8836 false,
8837 None,
8838 );
8839
8840 path.max_send_bytes = buf_len * self.max_amplification_factor;
8841 path.active_scid_seq = Some(in_scid_seq);
8842
8843 // Automatically probes the new path.
8844 path.request_validation();
8845
8846 let pid = self.paths.insert_path(path, self.is_server)?;
8847
8848 // Do not record path reuse.
8849 if in_scid_pid.is_none() {
8850 ids.link_scid_to_path_id(in_scid_seq, pid)?;
8851 }
8852
8853 Ok(pid)
8854 }
8855
8856 /// Selects the path on which the next packet must be sent.
8857 fn get_send_path_id(
8858 &self, from: Option<SocketAddr>, to: Option<SocketAddr>,
8859 ) -> Result<usize> {
8860 // A probing packet must be sent, but only if the connection is fully
8861 // established.
8862 if self.is_established() {
8863 let mut probing = self
8864 .paths
8865 .iter()
8866 .filter(|(_, p)| from.is_none() || Some(p.local_addr()) == from)
8867 .filter(|(_, p)| to.is_none() || Some(p.peer_addr()) == to)
8868 .filter(|(_, p)| p.active_dcid_seq.is_some())
8869 .filter(|(_, p)| p.probing_required())
8870 .map(|(pid, _)| pid);
8871
8872 if let Some(pid) = probing.next() {
8873 return Ok(pid);
8874 }
8875 }
8876
8877 if let Some((pid, p)) = self.paths.get_active_with_pid() {
8878 if from.is_some() && Some(p.local_addr()) != from {
8879 return Err(Error::Done);
8880 }
8881
8882 if to.is_some() && Some(p.peer_addr()) != to {
8883 return Err(Error::Done);
8884 }
8885
8886 return Ok(pid);
8887 };
8888
8889 Err(Error::InvalidState)
8890 }
8891
8892 /// Sets the path with identifier 'path_id' to be active.
8893 fn set_active_path(&mut self, path_id: usize, now: Instant) -> Result<()> {
8894 if let Ok(old_active_path) = self.paths.get_active_mut() {
8895 for &e in packet::Epoch::epochs(
8896 packet::Epoch::Initial..=packet::Epoch::Application,
8897 ) {
8898 let (lost_packets, lost_bytes) = old_active_path
8899 .recovery
8900 .on_path_change(e, now, &self.trace_id);
8901
8902 self.lost_count += lost_packets;
8903 self.lost_bytes += lost_bytes as u64;
8904 }
8905 }
8906
8907 self.paths.set_active_path(path_id)
8908 }
8909
8910 /// Handles potential connection migration.
8911 fn on_peer_migrated(
8912 &mut self, new_pid: usize, disable_dcid_reuse: bool, now: Instant,
8913 ) -> Result<()> {
8914 let active_path_id = self.paths.get_active_path_id()?;
8915
8916 if active_path_id == new_pid {
8917 return Ok(());
8918 }
8919
8920 self.set_active_path(new_pid, now)?;
8921
8922 let no_spare_dcid =
8923 self.paths.get_mut(new_pid)?.active_dcid_seq.is_none();
8924
8925 if no_spare_dcid && !disable_dcid_reuse {
8926 self.paths.get_mut(new_pid)?.active_dcid_seq =
8927 self.paths.get_mut(active_path_id)?.active_dcid_seq;
8928 }
8929
8930 Ok(())
8931 }
8932
8933 /// Creates a new client-side path.
8934 fn create_path_on_client(
8935 &mut self, local_addr: SocketAddr, peer_addr: SocketAddr,
8936 ) -> Result<usize> {
8937 if self.is_server {
8938 return Err(Error::InvalidState);
8939 }
8940
8941 // If we use zero-length SCID and go over our local active CID limit,
8942 // the `insert_path()` call will raise an error.
8943 if !self.ids.zero_length_scid() && self.ids.available_scids() == 0 {
8944 return Err(Error::OutOfIdentifiers);
8945 }
8946
8947 // Do we have a spare DCID? If we are using zero-length DCID, just use
8948 // the default having sequence 0 (note that if we exceed our local CID
8949 // limit, the `insert_path()` call will raise an error.
8950 let dcid_seq = if self.ids.zero_length_dcid() {
8951 0
8952 } else {
8953 self.ids
8954 .lowest_available_dcid_seq()
8955 .ok_or(Error::OutOfIdentifiers)?
8956 };
8957
8958 let mut path = path::Path::new(
8959 local_addr,
8960 peer_addr,
8961 &self.recovery_config,
8962 self.path_challenge_recv_max_queue_len,
8963 false,
8964 None,
8965 );
8966 path.active_dcid_seq = Some(dcid_seq);
8967
8968 let pid = self
8969 .paths
8970 .insert_path(path, false)
8971 .map_err(|_| Error::OutOfIdentifiers)?;
8972 self.ids.link_dcid_to_path_id(dcid_seq, pid)?;
8973
8974 Ok(pid)
8975 }
8976
8977 // Marks the connection as closed and does any related tidyup.
8978 fn mark_closed(&mut self) {
8979 #[cfg(feature = "qlog")]
8980 {
8981 let cc = match (self.is_established(), self.timed_out, &self.peer_error, &self.local_error) {
8982 (false, _, _, _) => qlog::events::connectivity::ConnectionClosed {
8983 owner: Some(TransportOwner::Local),
8984 connection_code: None,
8985 application_code: None,
8986 internal_code: None,
8987 reason: Some("Failed to establish connection".to_string()),
8988 trigger: Some(qlog::events::connectivity::ConnectionClosedTrigger::HandshakeTimeout)
8989 },
8990
8991 (true, true, _, _) => qlog::events::connectivity::ConnectionClosed {
8992 owner: Some(TransportOwner::Local),
8993 connection_code: None,
8994 application_code: None,
8995 internal_code: None,
8996 reason: Some("Idle timeout".to_string()),
8997 trigger: Some(qlog::events::connectivity::ConnectionClosedTrigger::IdleTimeout)
8998 },
8999
9000 (true, false, Some(peer_error), None) => {
9001 let (connection_code, application_code, trigger) = if peer_error.is_app {
9002 (None, Some(qlog::events::ApplicationErrorCode::Value(peer_error.error_code)), None)
9003 } else {
9004 let trigger = if peer_error.error_code == WireErrorCode::NoError as u64 {
9005 Some(qlog::events::connectivity::ConnectionClosedTrigger::Clean)
9006 } else {
9007 Some(qlog::events::connectivity::ConnectionClosedTrigger::Error)
9008 };
9009
9010 (Some(qlog::events::ConnectionErrorCode::Value(peer_error.error_code)), None, trigger)
9011 };
9012
9013 qlog::events::connectivity::ConnectionClosed {
9014 owner: Some(TransportOwner::Remote),
9015 connection_code,
9016 application_code,
9017 internal_code: None,
9018 reason: Some(String::from_utf8_lossy(&peer_error.reason).to_string()),
9019 trigger,
9020 }
9021 },
9022
9023 (true, false, None, Some(local_error)) => {
9024 let (connection_code, application_code, trigger) = if local_error.is_app {
9025 (None, Some(qlog::events::ApplicationErrorCode::Value(local_error.error_code)), None)
9026 } else {
9027 let trigger = if local_error.error_code == WireErrorCode::NoError as u64 {
9028 Some(qlog::events::connectivity::ConnectionClosedTrigger::Clean)
9029 } else {
9030 Some(qlog::events::connectivity::ConnectionClosedTrigger::Error)
9031 };
9032
9033 (Some(qlog::events::ConnectionErrorCode::Value(local_error.error_code)), None, trigger)
9034 };
9035
9036 qlog::events::connectivity::ConnectionClosed {
9037 owner: Some(TransportOwner::Local),
9038 connection_code,
9039 application_code,
9040 internal_code: None,
9041 reason: Some(String::from_utf8_lossy(&local_error.reason).to_string()),
9042 trigger,
9043 }
9044 },
9045
9046 _ => qlog::events::connectivity::ConnectionClosed {
9047 owner: None,
9048 connection_code: None,
9049 application_code: None,
9050 internal_code: None,
9051 reason: None,
9052 trigger: None,
9053 },
9054 };
9055
9056 qlog_with_type!(QLOG_CONNECTION_CLOSED, self.qlog, q, {
9057 let ev_data = EventData::ConnectionClosed(cc);
9058
9059 q.add_event_data_now(ev_data).ok();
9060 });
9061 self.qlog.streamer = None;
9062 }
9063 self.closed = true;
9064 }
9065}
9066
9067#[cfg(feature = "boringssl-boring-crate")]
9068impl<F: BufFactory> AsMut<boring::ssl::SslRef> for Connection<F> {
9069 fn as_mut(&mut self) -> &mut boring::ssl::SslRef {
9070 self.handshake.ssl_mut()
9071 }
9072}
9073
9074/// Maps an `Error` to `Error::Done`, or itself.
9075///
9076/// When a received packet that hasn't yet been authenticated triggers a failure
9077/// it should, in most cases, be ignored, instead of raising a connection error,
9078/// to avoid potential man-in-the-middle and man-on-the-side attacks.
9079///
9080/// However, if no other packet was previously received, the connection should
9081/// indeed be closed as the received packet might just be network background
9082/// noise, and it shouldn't keep resources occupied indefinitely.
9083///
9084/// This function maps an error to `Error::Done` to ignore a packet failure
9085/// without aborting the connection, except when no other packet was previously
9086/// received, in which case the error itself is returned, but only on the
9087/// server-side as the client will already have armed the idle timer.
9088///
9089/// This must only be used for errors preceding packet authentication. Failures
9090/// happening after a packet has been authenticated should still cause the
9091/// connection to be aborted.
9092fn drop_pkt_on_err(
9093 e: Error, recv_count: usize, is_server: bool, trace_id: &str,
9094) -> Error {
9095 // On the server, if no other packet has been successfully processed, abort
9096 // the connection to avoid keeping the connection open when only junk is
9097 // received.
9098 if is_server && recv_count == 0 {
9099 return e;
9100 }
9101
9102 trace!("{trace_id} dropped invalid packet");
9103
9104 // Ignore other invalid packets that haven't been authenticated to prevent
9105 // man-in-the-middle and man-on-the-side attacks.
9106 Error::Done
9107}
9108
9109struct AddrTupleFmt(SocketAddr, SocketAddr);
9110
9111impl std::fmt::Display for AddrTupleFmt {
9112 fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
9113 let AddrTupleFmt(src, dst) = &self;
9114
9115 if src.ip().is_unspecified() || dst.ip().is_unspecified() {
9116 return Ok(());
9117 }
9118
9119 f.write_fmt(format_args!("src:{src} dst:{dst}"))
9120 }
9121}
9122
9123/// Statistics about the connection.
9124///
9125/// A connection's statistics can be collected using the [`stats()`] method.
9126///
9127/// [`stats()`]: struct.Connection.html#method.stats
9128#[derive(Clone, Default)]
9129pub struct Stats {
9130 /// The number of QUIC packets received.
9131 pub recv: usize,
9132
9133 /// The number of QUIC packets sent.
9134 pub sent: usize,
9135
9136 /// The number of QUIC packets that were lost.
9137 pub lost: usize,
9138
9139 /// The number of QUIC packets that were marked as lost but later acked.
9140 pub spurious_lost: usize,
9141
9142 /// The number of sent QUIC packets with retransmitted data.
9143 pub retrans: usize,
9144
9145 /// The number of sent bytes.
9146 pub sent_bytes: u64,
9147
9148 /// The number of received bytes.
9149 pub recv_bytes: u64,
9150
9151 /// The number of bytes sent acked.
9152 pub acked_bytes: u64,
9153
9154 /// The number of bytes sent lost.
9155 pub lost_bytes: u64,
9156
9157 /// The number of stream bytes retransmitted.
9158 pub stream_retrans_bytes: u64,
9159
9160 /// The number of DATAGRAM frames received.
9161 pub dgram_recv: usize,
9162
9163 /// The number of DATAGRAM frames sent.
9164 pub dgram_sent: usize,
9165
9166 /// The number of known paths for the connection.
9167 pub paths_count: usize,
9168
9169 /// The number of streams reset by local.
9170 pub reset_stream_count_local: u64,
9171
9172 /// The number of streams stopped by local.
9173 pub stopped_stream_count_local: u64,
9174
9175 /// The number of streams reset by remote.
9176 pub reset_stream_count_remote: u64,
9177
9178 /// The number of streams stopped by remote.
9179 pub stopped_stream_count_remote: u64,
9180
9181 /// The number of DATA_BLOCKED frames sent due to hitting the connection
9182 /// flow control limit.
9183 pub data_blocked_sent_count: u64,
9184
9185 /// The number of STREAM_DATA_BLOCKED frames sent due to a stream hitting
9186 /// the stream flow control limit.
9187 pub stream_data_blocked_sent_count: u64,
9188
9189 /// The number of DATA_BLOCKED frames received from the remote.
9190 pub data_blocked_recv_count: u64,
9191
9192 /// The number of STREAM_DATA_BLOCKED frames received from the remote.
9193 pub stream_data_blocked_recv_count: u64,
9194
9195 /// The number of STREAMS_BLOCKED frames for bidirectional streams received
9196 /// from the remote, indicating the peer is blocked on opening new
9197 /// bidirectional streams.
9198 pub streams_blocked_bidi_recv_count: u64,
9199
9200 /// The number of STREAMS_BLOCKED frames for unidirectional streams received
9201 /// from the remote, indicating the peer is blocked on opening new
9202 /// unidirectional streams.
9203 pub streams_blocked_uni_recv_count: u64,
9204
9205 /// The total number of PATH_CHALLENGE frames that were received.
9206 pub path_challenge_rx_count: u64,
9207
9208 /// Total duration during which this side of the connection was
9209 /// actively sending bytes or waiting for those bytes to be acked.
9210 pub bytes_in_flight_duration: Duration,
9211
9212 /// Health state of the connection's tx_buffered.
9213 pub tx_buffered_state: TxBufferTrackingState,
9214}
9215
9216impl std::fmt::Debug for Stats {
9217 #[inline]
9218 fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
9219 write!(
9220 f,
9221 "recv={} sent={} lost={} retrans={}",
9222 self.recv, self.sent, self.lost, self.retrans,
9223 )?;
9224
9225 write!(
9226 f,
9227 " sent_bytes={} recv_bytes={} lost_bytes={}",
9228 self.sent_bytes, self.recv_bytes, self.lost_bytes,
9229 )?;
9230
9231 Ok(())
9232 }
9233}
9234
9235#[doc(hidden)]
9236#[cfg(any(test, feature = "internal"))]
9237pub mod test_utils;
9238
9239#[cfg(test)]
9240mod tests;
9241
9242pub use crate::packet::ConnectionId;
9243pub use crate::packet::Header;
9244pub use crate::packet::Type;
9245
9246pub use crate::path::PathEvent;
9247pub use crate::path::PathStats;
9248pub use crate::path::SocketAddrIter;
9249
9250pub use crate::recovery::BbrBwLoReductionStrategy;
9251pub use crate::recovery::BbrParams;
9252pub use crate::recovery::CongestionControlAlgorithm;
9253pub use crate::recovery::StartupExit;
9254pub use crate::recovery::StartupExitReason;
9255
9256pub use crate::stream::StreamIter;
9257
9258pub use crate::transport_params::TransportParams;
9259pub use crate::transport_params::UnknownTransportParameter;
9260pub use crate::transport_params::UnknownTransportParameterIterator;
9261pub use crate::transport_params::UnknownTransportParameters;
9262
9263pub use crate::buffers::BufFactory;
9264pub use crate::buffers::BufSplit;
9265
9266pub use crate::error::ConnectionError;
9267pub use crate::error::Error;
9268pub use crate::error::Result;
9269pub use crate::error::WireErrorCode;
9270
9271mod buffers;
9272mod cid;
9273mod crypto;
9274mod dgram;
9275mod error;
9276#[cfg(feature = "ffi")]
9277mod ffi;
9278mod flowcontrol;
9279mod frame;
9280pub mod h3;
9281mod minmax;
9282mod packet;
9283mod path;
9284mod pmtud;
9285mod rand;
9286mod range_buf;
9287mod ranges;
9288mod recovery;
9289mod stream;
9290mod tls;
9291mod transport_params;