tokio_quiche/http3/driver/mod.rs
1// Copyright (C) 2025, Cloudflare, Inc.
2// All rights reserved.
3//
4// Redistribution and use in source and binary forms, with or without
5// modification, are permitted provided that the following conditions are
6// met:
7//
8// * Redistributions of source code must retain the above copyright notice,
9// this list of conditions and the following disclaimer.
10//
11// * Redistributions in binary form must reproduce the above copyright
12// notice, this list of conditions and the following disclaimer in the
13// documentation and/or other materials provided with the distribution.
14//
15// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
16// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
17// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
18// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
19// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
20// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
21// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
22// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
23// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
24// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
25// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
27mod client;
28/// Wrapper for running HTTP/3 connections.
29pub mod connection;
30mod datagram;
31// `DriverHooks` must stay private to prevent users from creating their own
32// H3Drivers.
33mod hooks;
34mod server;
35mod streams;
36#[cfg(test)]
37pub mod test_utils;
38#[cfg(test)]
39mod tests;
40
41use std::collections::BTreeMap;
42use std::error::Error;
43use std::fmt;
44use std::marker::PhantomData;
45use std::sync::Arc;
46use std::time::Instant;
47
48use bytes::BufMut as _;
49use bytes::Bytes;
50use bytes::BytesMut;
51use datagram_socket::DgramBuffer;
52use datagram_socket::StreamClosureKind;
53use foundations::telemetry::log;
54use futures::FutureExt;
55use futures_util::stream::FuturesUnordered;
56use quiche::h3;
57use quiche::h3::WireErrorCode;
58use tokio::select;
59use tokio::sync::mpsc;
60use tokio::sync::mpsc::error::TryRecvError;
61use tokio::sync::mpsc::error::TrySendError;
62use tokio::sync::mpsc::UnboundedReceiver;
63use tokio::sync::mpsc::UnboundedSender;
64use tokio_stream::StreamExt;
65use tokio_util::sync::PollSender;
66
67use self::hooks::DriverHooks;
68use self::hooks::InboundHeaders;
69use self::streams::FlowCtx;
70use self::streams::HaveUpstreamCapacity;
71use self::streams::ReceivedDownstreamData;
72use self::streams::StreamCtx;
73use self::streams::StreamReady;
74use self::streams::WaitForDownstreamData;
75use self::streams::WaitForStream;
76use self::streams::WaitForUpstreamCapacity;
77use crate::buf_factory::BufFactory;
78use crate::http3::settings::Http3Settings;
79use crate::http3::H3AuditStats;
80use crate::metrics::Metrics;
81use crate::quic::HandshakeInfo;
82use crate::quic::QuicCommand;
83use crate::quic::QuicheConnection;
84use crate::ApplicationOverQuic;
85use crate::QuicResult;
86
87pub use self::client::ClientEventStream;
88pub use self::client::ClientH3Command;
89pub use self::client::ClientH3Controller;
90pub use self::client::ClientH3Driver;
91pub use self::client::ClientH3Event;
92pub use self::client::ClientRequestSender;
93pub use self::client::NewClientRequest;
94pub use self::server::IsInEarlyData;
95pub use self::server::RawPriorityValue;
96pub use self::server::ServerEventStream;
97pub use self::server::ServerH3Command;
98pub use self::server::ServerH3Controller;
99pub use self::server::ServerH3Driver;
100pub use self::server::ServerH3Event;
101
102// The default priority for HTTP/3 responses if the application didn't provide
103// one.
104const DEFAULT_PRIO: h3::Priority = h3::Priority::new(3, true);
105
106// For a stream use a channel with 16 entries, which works out to 16 * 64KB =
107// 1MB of max buffered data.
108#[cfg(not(any(test, debug_assertions)))]
109const STREAM_CAPACITY: usize = 16;
110#[cfg(any(test, debug_assertions))]
111const STREAM_CAPACITY: usize = 1; // Set to 1 to stress write_pending under test conditions
112
113// For *all* flows use a shared channel with 2048 entries, which works out
114// to 3MB of max buffered data at 1500 bytes per datagram.
115const FLOW_CAPACITY: usize = 2048;
116
117/// Used by a local task to send [`OutboundFrame`]s to a peer on the
118/// stream or flow associated with this channel.
119pub type OutboundFrameSender = PollSender<OutboundFrame>;
120
121/// Used internally to receive [`OutboundFrame`]s which should be sent to a peer
122/// on the stream or flow associated with this channel.
123type OutboundFrameStream = mpsc::Receiver<OutboundFrame>;
124
125/// Used internally to send [`InboundFrame`]s (data) from the peer to a local
126/// task on the stream or flow associated with this channel.
127type InboundFrameSender = PollSender<InboundFrame>;
128
129/// Used by a local task to receive [`InboundFrame`]s (data) on the stream or
130/// flow associated with this channel.
131pub type InboundFrameStream = mpsc::Receiver<InboundFrame>;
132
133/// The error type used internally in [H3Driver].
134///
135/// Note that [`ApplicationOverQuic`] errors are not exposed to users at this
136/// time. The type is public to document the failure modes in [H3Driver].
137#[derive(Debug, PartialEq, Eq)]
138#[non_exhaustive]
139pub enum H3ConnectionError {
140 /// The controller task was shut down and is no longer listening.
141 ControllerWentAway,
142 /// Other error at the connection, but not stream level.
143 H3(h3::Error),
144 /// Received a GOAWAY frame from the peer.
145 GoAway,
146 /// Received data for a stream that was closed or never opened.
147 NonexistentStream,
148 /// The server's post-accept timeout was hit.
149 /// The timeout can be configured in [`Http3Settings`].
150 PostAcceptTimeout,
151}
152
153impl From<h3::Error> for H3ConnectionError {
154 fn from(err: h3::Error) -> Self {
155 H3ConnectionError::H3(err)
156 }
157}
158
159impl From<quiche::Error> for H3ConnectionError {
160 fn from(err: quiche::Error) -> Self {
161 H3ConnectionError::H3(h3::Error::TransportError(err))
162 }
163}
164
165impl Error for H3ConnectionError {}
166
167impl fmt::Display for H3ConnectionError {
168 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
169 let s: &dyn fmt::Display = match self {
170 Self::ControllerWentAway => &"controller went away",
171 Self::H3(e) => e,
172 Self::GoAway => &"goaway",
173 Self::NonexistentStream => &"nonexistent stream",
174 Self::PostAcceptTimeout => &"post accept timeout hit",
175 };
176
177 write!(f, "H3ConnectionError: {s}")
178 }
179}
180
181type H3ConnectionResult<T> = Result<T, H3ConnectionError>;
182
183/// HTTP/3 headers that were received on a stream.
184///
185/// `recv` is used to read the message body, while `send` is used to transmit
186/// data back to the peer.
187pub struct IncomingH3Headers {
188 /// Stream ID of the frame.
189 pub stream_id: u64,
190 /// The actual [`h3::Header`]s which were received.
191 pub headers: Vec<h3::Header>,
192 /// An [`OutboundFrameSender`] for streaming body data to the peer. For
193 /// [ClientH3Driver], note that the request body can also be passed a
194 /// cloned sender via [`NewClientRequest`].
195 pub send: OutboundFrameSender,
196 /// An [`InboundFrameStream`] of body data received from the peer.
197 pub recv: InboundFrameStream,
198 /// Whether there is a body associated with the incoming headers.
199 pub read_fin: bool,
200 /// Handle to the [`H3AuditStats`] for the message's stream.
201 pub h3_audit_stats: Arc<H3AuditStats>,
202}
203
204impl fmt::Debug for IncomingH3Headers {
205 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
206 f.debug_struct("IncomingH3Headers")
207 .field("stream_id", &self.stream_id)
208 .field("headers", &self.headers)
209 .field("read_fin", &self.read_fin)
210 .field("h3_audit_stats", &self.h3_audit_stats)
211 .finish()
212 }
213}
214
215/// [`H3Event`]s are produced by an [H3Driver] to describe HTTP/3 state updates.
216///
217/// Both [ServerH3Driver] and [ClientH3Driver] may extend this enum with
218/// endpoint-specific variants. The events must be consumed by users of the
219/// drivers, like a higher-level `Server` or `Client` controller.
220#[derive(Debug)]
221pub enum H3Event {
222 /// A SETTINGS frame was received.
223 IncomingSettings {
224 /// Raw HTTP/3 setting pairs, in the order received from the peer.
225 settings: Vec<(u64, u64)>,
226 },
227
228 /// A HEADERS frame was received on the given stream. This is either a
229 /// request or a response depending on the perspective of the [`H3Event`]
230 /// receiver.
231 IncomingHeaders(IncomingH3Headers),
232
233 /// A DATAGRAM flow was created and associated with the given `flow_id`.
234 /// This event is fired before a HEADERS event for CONNECT[-UDP] requests.
235 NewFlow {
236 /// Flow ID of the new flow.
237 flow_id: u64,
238 /// An [`OutboundFrameSender`] for transmitting datagrams to the peer.
239 send: OutboundFrameSender,
240 /// An [`InboundFrameStream`] for receiving datagrams from the peer.
241 recv: InboundFrameStream,
242 },
243 /// A RST_STREAM frame was seen on the given `stream_id`. The user of the
244 /// driver should clean up any state allocated for this stream.
245 ResetStream { stream_id: u64 },
246 /// The connection has irrecoverably errored and is shutting down.
247 ConnectionError(h3::Error),
248 /// The connection has been shutdown, optionally due to an
249 /// [`H3ConnectionError`].
250 ConnectionShutdown(Option<H3ConnectionError>),
251 /// Body data has been received over a stream.
252 BodyBytesReceived {
253 /// Stream ID of the body data.
254 stream_id: u64,
255 /// Number of bytes received.
256 num_bytes: u64,
257 /// Whether the stream is finished and won't yield any more data.
258 fin: bool,
259 },
260 /// The stream has been closed. This is used to signal stream closures that
261 /// don't result from RST_STREAM frames, unlike the
262 /// [`H3Event::ResetStream`] variant.
263 StreamClosed { stream_id: u64 },
264}
265
266impl H3Event {
267 /// Generates an event from an applicable [`H3ConnectionError`].
268 fn from_error(err: &H3ConnectionError) -> Option<Self> {
269 Some(match err {
270 H3ConnectionError::H3(e) => Self::ConnectionError(*e),
271 H3ConnectionError::PostAcceptTimeout => Self::ConnectionShutdown(
272 Some(H3ConnectionError::PostAcceptTimeout),
273 ),
274 _ => return None,
275 })
276 }
277}
278
279/// An [`OutboundFrame`] is a data frame that should be sent from a local task
280/// to a peer over a [`quiche::h3::Connection`].
281///
282/// This is used, for example, to send response body data to a peer, or proxied
283/// UDP datagrams.
284#[derive(Debug)]
285pub enum OutboundFrame {
286 /// Response headers to be sent to the peer, with optional priority.
287 Headers(Vec<h3::Header>, Option<quiche::h3::Priority>),
288 /// Response body/CONNECT downstream data plus FIN flag.
289 Body(Bytes, bool),
290 /// CONNECT-UDP (DATAGRAM) downstream data plus flow ID.
291 Datagram(DgramBuffer, u64),
292 /// Close the stream with a trailers, with optional priority.
293 Trailers(Vec<h3::Header>, Option<quiche::h3::Priority>),
294 /// An error encountered when serving the request. Stream should be closed.
295 PeerStreamError,
296 /// DATAGRAM flow explicitly closed.
297 FlowShutdown { flow_id: u64, stream_id: u64 },
298}
299
300/// An [`InboundFrame`] is a data frame that was received from the peer over a
301/// [`quiche::h3::Connection`]. This is used by peers to send body or datagrams
302/// to the local task.
303#[derive(Debug)]
304pub enum InboundFrame {
305 /// Request body/CONNECT upstream data plus FIN flag.
306 Body(BytesMut, bool),
307 /// CONNECT-UDP (DATAGRAM) upstream data.
308 Datagram(DgramBuffer),
309}
310
311/// A ready-made [`ApplicationOverQuic`] which can handle HTTP/3 and MASQUE.
312/// Depending on the `DriverHooks` in use, it powers either a client or a
313/// server.
314///
315/// Use the [ClientH3Driver] and [ServerH3Driver] aliases to access the
316/// respective driver types. The driver is passed into an I/O loop and
317/// communicates with the driver's user (e.g., an HTTP client or a server) via
318/// its associated [H3Controller]. The controller allows the application to both
319/// listen for [`H3Event`]s of note and send [`H3Command`]s into the I/O loop.
320pub struct H3Driver<H: DriverHooks> {
321 /// Configuration used to initialize `conn`. Created from [`Http3Settings`]
322 /// in the constructor.
323 h3_config: h3::Config,
324 /// The underlying HTTP/3 connection. Initialized in
325 /// `ApplicationOverQuic::on_conn_established`.
326 conn: Option<h3::Connection>,
327 /// State required by the client/server hooks.
328 hooks: H,
329 /// Sends [`H3Event`]s to the [H3Controller] paired with this driver.
330 h3_event_sender: mpsc::UnboundedSender<H::Event>,
331 /// Receives [`H3Command`]s from the [H3Controller] paired with this driver.
332 cmd_recv: mpsc::UnboundedReceiver<H::Command>,
333 /// A sender that feeds back into `cmd_recv`. Used by hooks that need to
334 /// re-queue commands (e.g. retrying blocked requests) without access to
335 /// the [H3Controller]'s copy of the sender.
336 cmd_sender: mpsc::UnboundedSender<H::Command>,
337
338 /// A map of stream IDs to their [StreamCtx]. This is mainly used to
339 /// retrieve the internal Tokio channels associated with the stream.
340 stream_map: BTreeMap<u64, StreamCtx>,
341 /// A map of flow IDs to their [FlowCtx]. This is mainly used to retrieve
342 /// the internal Tokio channels associated with the flow.
343 flow_map: BTreeMap<u64, FlowCtx>,
344 /// Set of [`WaitForStream`] futures. A stream is added to this set if
345 /// we need to send to it and its channel is at capacity, or if we need
346 /// data from its channel and the channel is empty.
347 waiting_streams: FuturesUnordered<WaitForStream>,
348
349 /// Receives [`OutboundFrame`]s from all datagram flows on the connection.
350 dgram_recv: OutboundFrameStream,
351 /// Keeps the datagram channel open such that datagram flows can be created.
352 dgram_send: OutboundFrameSender,
353 /// A buffer to receive H3 body data from quiche. We initialize a large
354 /// buffer and then `split()` off filled parts until we need to reallocate.
355 body_recv_buf: bytes::buf::Limit<BytesMut>,
356
357 /// The buffer used to interact with the underlying IoWorker.
358 io_worker_buf: Vec<u8>,
359 /// The maximum HTTP/3 stream ID seen on this connection.
360 max_stream_seen: u64,
361
362 /// Tracks whether we have forwarded the HTTP/3 SETTINGS frame
363 /// to the [H3Controller] once.
364 settings_received_and_forwarded: bool,
365}
366
367impl<H: DriverHooks> H3Driver<H> {
368 /// Builds a new [H3Driver] and an associated [H3Controller].
369 ///
370 /// The driver should then be passed to
371 /// [`InitialQuicConnection`](crate::InitialQuicConnection)'s `start`
372 /// method.
373 pub fn new(http3_settings: Http3Settings) -> (Self, H3Controller<H>) {
374 let (dgram_send, dgram_recv) = mpsc::channel(FLOW_CAPACITY);
375 let (cmd_sender, cmd_recv) = mpsc::unbounded_channel();
376 let (h3_event_sender, h3_event_recv) = mpsc::unbounded_channel();
377
378 (
379 H3Driver {
380 h3_config: (&http3_settings).into(),
381 conn: None,
382 hooks: H::new(&http3_settings),
383 h3_event_sender,
384 cmd_recv,
385 cmd_sender: cmd_sender.clone(),
386
387 stream_map: BTreeMap::new(),
388 flow_map: BTreeMap::new(),
389
390 dgram_recv,
391 dgram_send: PollSender::new(dgram_send),
392 max_stream_seen: 0,
393 body_recv_buf: BytesMut::with_capacity(BufFactory::MAX_BUF_SIZE)
394 .limit(BufFactory::MAX_BUF_SIZE),
395 io_worker_buf: vec![0u8; BufFactory::MAX_BUF_SIZE],
396
397 waiting_streams: FuturesUnordered::new(),
398
399 settings_received_and_forwarded: false,
400 },
401 H3Controller {
402 cmd_sender,
403 h3_event_recv: Some(h3_event_recv),
404 },
405 )
406 }
407
408 /// Returns a sender that feeds back into this driver's own `cmd_recv`.
409 ///
410 /// Hooks that need to re-queue commands (e.g. retrying a request that
411 /// was temporarily blocked) can use this sender without needing access
412 /// to the paired [H3Controller].
413 pub(crate) fn self_cmd_sender(&self) -> &mpsc::UnboundedSender<H::Command> {
414 &self.cmd_sender
415 }
416
417 /// Retrieve the [FlowCtx] associated with the given `flow_id`. If no
418 /// context is found, a new one will be created.
419 fn get_or_insert_flow(
420 &mut self, flow_id: u64,
421 ) -> H3ConnectionResult<&mut FlowCtx> {
422 use std::collections::btree_map::Entry;
423 Ok(match self.flow_map.entry(flow_id) {
424 Entry::Vacant(e) => {
425 // This is a datagram for a new flow we haven't seen before
426 let (flow, recv) = FlowCtx::new(FLOW_CAPACITY);
427 let flow_req = H3Event::NewFlow {
428 flow_id,
429 recv,
430 send: self.dgram_send.clone(),
431 };
432 self.h3_event_sender
433 .send(flow_req.into())
434 .map_err(|_| H3ConnectionError::ControllerWentAway)?;
435 e.insert(flow)
436 },
437 Entry::Occupied(e) => e.into_mut(),
438 })
439 }
440
441 /// Adds a [StreamCtx] to the stream map with the given `stream_id`.
442 fn insert_stream(&mut self, stream_id: u64, ctx: StreamCtx) {
443 self.stream_map.insert(stream_id, ctx);
444 self.max_stream_seen = self.max_stream_seen.max(stream_id);
445 }
446
447 /// Fetches body chunks from the [`quiche::h3::Connection`] and forwards
448 /// them to the stream's associated [`InboundFrameStream`].
449 fn process_h3_data(
450 &mut self, qconn: &mut QuicheConnection, stream_id: u64,
451 ) -> H3ConnectionResult<()> {
452 // Split self borrow between conn and stream_map
453 let conn = self.conn.as_mut().ok_or(Self::connection_not_present())?;
454 let ctx = self
455 .stream_map
456 .get_mut(&stream_id)
457 .ok_or(H3ConnectionError::NonexistentStream)?;
458
459 enum StreamStatus {
460 Done { close: bool },
461 Reset { wire_err_code: u64 },
462 Blocked,
463 }
464
465 let status = loop {
466 let Some(sender) = ctx.send.as_ref().and_then(PollSender::get_ref)
467 else {
468 // already waiting for capacity
469 break StreamStatus::Done { close: false };
470 };
471
472 let try_reserve_result = sender.try_reserve();
473 let permit = match try_reserve_result {
474 Ok(permit) => permit,
475 Err(TrySendError::Closed(())) => {
476 // The channel has closed before we delivered a fin or reset
477 // to the application.
478 if !ctx.fin_or_reset_recv &&
479 ctx.associated_dgram_flow_id.is_none()
480 // The channel might be closed if the stream was used to
481 // initiate a datagram exchange.
482 // TODO: ideally, the application would still shut down the
483 // stream properly. Once applications code
484 // is fixed, we can remove this check.
485 {
486 let err = h3::WireErrorCode::RequestCancelled as u64;
487 let _ = qconn.stream_shutdown(
488 stream_id,
489 quiche::Shutdown::Read,
490 err,
491 );
492 drop(try_reserve_result); // needed to drop the borrow on ctx.
493 ctx.handle_sent_stop_sending(err);
494 // TODO: should we send an H3Event event to
495 // h3_event_sender? We can only get here if the app
496 // actively closed or dropped
497 // the channel so any event we send would be more for
498 // logging or auditing
499 }
500 break StreamStatus::Done {
501 close: ctx.both_directions_done(),
502 };
503 },
504 Err(TrySendError::Full(())) => {
505 if ctx.fin_or_reset_recv || qconn.stream_readable(stream_id) {
506 break StreamStatus::Blocked;
507 }
508 break StreamStatus::Done { close: false };
509 },
510 };
511
512 if ctx.fin_or_reset_recv {
513 // Signal end-of-body to upstream
514 permit.send(InboundFrame::Body(Default::default(), true));
515 break StreamStatus::Done {
516 close: ctx.fin_or_reset_sent,
517 };
518 }
519
520 // NOTE: `self.body_recv_buf` is `Limit<BytesMut>` so
521 // `has_remaining_mut()` will indicate if the buffer
522 // has space available until the *limit* is
523 // reached. (A plain `BytesMut` can reallocate and would always
524 // return true)
525 if !self.body_recv_buf.has_remaining_mut() {
526 self.body_recv_buf =
527 BytesMut::with_capacity(BufFactory::MAX_BUF_SIZE)
528 .limit(BufFactory::MAX_BUF_SIZE)
529 };
530 match conn.recv_body_buf(qconn, stream_id, &mut self.body_recv_buf) {
531 Ok(n) => {
532 ctx.audit_stats.add_downstream_bytes_recvd(n as u64);
533 let event = H3Event::BodyBytesReceived {
534 stream_id,
535 num_bytes: n as u64,
536 fin: false,
537 };
538 let _ = self.h3_event_sender.send(event.into());
539 // Take the filled part, leave the remaining capacity
540 let filled_body = self.body_recv_buf.get_mut().split();
541 // Sanity check: the remaining spare capacity should equal
542 // the limit.
543 debug_assert_eq!(
544 self.body_recv_buf.get_mut().spare_capacity_mut().len(),
545 self.body_recv_buf.remaining_mut()
546 );
547 permit.send(InboundFrame::Body(filled_body, false));
548 },
549 Err(h3::Error::Done) =>
550 break StreamStatus::Done { close: false },
551 Err(h3::Error::TransportError(quiche::Error::StreamReset(
552 code,
553 ))) => {
554 break StreamStatus::Reset {
555 wire_err_code: code,
556 };
557 },
558 Err(_) => break StreamStatus::Done { close: true },
559 }
560 };
561
562 match status {
563 StreamStatus::Done { close } => {
564 if close {
565 return self.cleanup_stream(qconn, stream_id);
566 }
567
568 // The QUIC stream is finished, manually invoke `process_h3_fin`
569 // in case `h3::poll()` is never called again.
570 //
571 // Note that this case will not conflict with StreamStatus::Done
572 // being returned due to the body channel being
573 // blocked. qconn.stream_finished() will guarantee
574 // that we've fully parsed the body as it only returns true
575 // if we've seen a Fin for the read half of the stream.
576 if !ctx.fin_or_reset_recv && qconn.stream_finished(stream_id) {
577 return self.process_h3_fin(qconn, stream_id);
578 }
579 },
580 StreamStatus::Reset { wire_err_code } => {
581 debug_assert!(ctx.send.is_some());
582 ctx.handle_recvd_reset(wire_err_code);
583 self.h3_event_sender
584 .send(H3Event::ResetStream { stream_id }.into())
585 .map_err(|_| H3ConnectionError::ControllerWentAway)?;
586 if ctx.both_directions_done() {
587 return self.cleanup_stream(qconn, stream_id);
588 }
589 },
590 StreamStatus::Blocked => {
591 self.waiting_streams.push(ctx.wait_for_send(stream_id));
592 },
593 }
594
595 Ok(())
596 }
597
598 /// Processes an end-of-stream event from the [`quiche::h3::Connection`].
599 fn process_h3_fin(
600 &mut self, qconn: &mut QuicheConnection, stream_id: u64,
601 ) -> H3ConnectionResult<()> {
602 let ctx = self
603 .stream_map
604 .get_mut(&stream_id)
605 .filter(|c| !c.fin_or_reset_recv);
606 let Some(ctx) = ctx else {
607 // Stream is already finished, nothing to do
608 return Ok(());
609 };
610
611 ctx.fin_or_reset_recv = true;
612 ctx.audit_stats
613 .set_recvd_stream_fin(StreamClosureKind::Explicit);
614
615 // It's important to send this H3Event before process_h3_data so that
616 // a server can (potentially) generate the control response before the
617 // corresponding receiver drops.
618 let event = H3Event::BodyBytesReceived {
619 stream_id,
620 num_bytes: 0,
621 fin: true,
622 };
623 let _ = self.h3_event_sender.send(event.into());
624
625 // Communicate fin to upstream. Since `ctx.fin_recv` is true now,
626 // there can't be a recursive loop.
627 self.process_h3_data(qconn, stream_id)
628 }
629
630 /// Processes a single [`quiche::h3::Event`] received from the underlying
631 /// [`quiche::h3::Connection`]. Some events are dispatched to helper
632 /// methods.
633 fn process_read_event(
634 &mut self, qconn: &mut QuicheConnection, stream_id: u64, event: h3::Event,
635 ) -> H3ConnectionResult<()> {
636 self.forward_settings()?;
637
638 match event {
639 // Requests/responses are exclusively handled by hooks.
640 h3::Event::Headers { list, more_frames } =>
641 H::headers_received(self, qconn, InboundHeaders {
642 stream_id,
643 headers: list,
644 has_body: more_frames,
645 }),
646
647 h3::Event::Data => self.process_h3_data(qconn, stream_id),
648 h3::Event::Finished => self.process_h3_fin(qconn, stream_id),
649
650 h3::Event::Reset(code) => {
651 if let Some(ctx) = self.stream_map.get_mut(&stream_id) {
652 ctx.handle_recvd_reset(code);
653 // See if we are waiting on this stream and close the channel
654 // if we are. If we are not waiting, `handle_recvd_reset()`
655 // will have taken care of closing.
656 for pending in self.waiting_streams.iter_mut() {
657 match pending {
658 WaitForStream::Upstream(
659 WaitForUpstreamCapacity {
660 stream_id: id,
661 chan: Some(chan),
662 },
663 ) if stream_id == *id => {
664 chan.close();
665 },
666 _ => {},
667 }
668 }
669
670 self.h3_event_sender
671 .send(H3Event::ResetStream { stream_id }.into())
672 .map_err(|_| H3ConnectionError::ControllerWentAway)?;
673 if ctx.both_directions_done() {
674 return self.cleanup_stream(qconn, stream_id);
675 }
676 }
677
678 // TODO: if we don't have the stream in our map: should we
679 // send the H3Event::ResetStream?
680 Ok(())
681 },
682
683 h3::Event::PriorityUpdate => Ok(()),
684 h3::Event::GoAway => Err(H3ConnectionError::GoAway),
685 }
686 }
687
688 /// The SETTINGS frame can be received at any point, so we
689 /// need to check `peer_settings_raw` to decide if we've received it.
690 ///
691 /// Settings should only be sent once, so we generate a single event
692 /// when `peer_settings_raw` transitions from None to Some.
693 fn forward_settings(&mut self) -> H3ConnectionResult<()> {
694 if self.settings_received_and_forwarded {
695 return Ok(());
696 }
697
698 // capture the peer settings and forward it
699 if let Some(settings) = self.conn_mut()?.peer_settings_raw() {
700 let incoming_settings = H3Event::IncomingSettings {
701 settings: settings.to_vec(),
702 };
703
704 self.h3_event_sender
705 .send(incoming_settings.into())
706 .map_err(|_| H3ConnectionError::ControllerWentAway)?;
707
708 self.settings_received_and_forwarded = true;
709 }
710 Ok(())
711 }
712
713 /// Send an individual frame to the underlying [`quiche::h3::Connection`] to
714 /// be flushed at a later time.
715 ///
716 /// `Self::process_writes` will iterate over all writable streams and call
717 /// this method in a loop for each stream to send all writable packets.
718 fn process_write_frame(
719 conn: &mut h3::Connection, qconn: &mut QuicheConnection,
720 ctx: &mut StreamCtx,
721 ) -> h3::Result<()> {
722 let Some(frame) = &mut ctx.queued_frame else {
723 return Ok(());
724 };
725
726 let audit_stats = &ctx.audit_stats;
727 let stream_id = audit_stats.stream_id();
728
729 match frame {
730 OutboundFrame::Headers(headers, priority) => {
731 let prio = priority.as_ref().unwrap_or(&DEFAULT_PRIO);
732
733 let res = if ctx.initial_headers_sent {
734 // Initial headers were already sent, send additional
735 // headers now.
736 conn.send_additional_headers_with_priority(
737 qconn, stream_id, headers, prio, false, false,
738 )
739 } else {
740 // Send initial headers.
741 conn.send_response_with_priority(
742 qconn, stream_id, headers, prio, false,
743 )
744 .inspect(|_| ctx.initial_headers_sent = true)
745 };
746
747 if let Err(h3::Error::StreamBlocked) = res {
748 ctx.first_full_headers_flush_fail_time
749 .get_or_insert(Instant::now());
750 }
751
752 if res.is_ok() {
753 if let Some(first) =
754 ctx.first_full_headers_flush_fail_time.take()
755 {
756 ctx.audit_stats.add_header_flush_duration(
757 Instant::now().duration_since(first),
758 );
759 }
760 }
761
762 res
763 },
764
765 OutboundFrame::Body(body, fin) => {
766 let len = body.len();
767 if len == 0 && !*fin {
768 // quiche doesn't allow sending an empty body when the fin
769 // flag is not set
770 return Ok(());
771 }
772 if *fin {
773 // If this is the last body frame, drop the receiver in the
774 // stream map to signal that we shouldn't receive any more
775 // frames. NOTE: we can't use `mpsc::Receiver::close()`
776 // due to an inconsistency in how tokio handles reading
777 // from a closed mpsc channel https://github.com/tokio-rs/tokio/issues/7631
778 ctx.recv = None;
779 }
780 let n = conn.send_body_zc(qconn, stream_id, body, *fin)?;
781
782 audit_stats.add_downstream_bytes_sent(n as _);
783 if n != len {
784 // Couldn't write the entire body, `send_body_zc` will
785 // have trimmed `body` accordingly. The driver keeps
786 // the remainder of the body to send in the future.
787 debug_assert_eq!(
788 n + body.len(),
789 len,
790 "send_body_zc() should have trimmed body but did not"
791 );
792 Err(h3::Error::StreamBlocked)
793 } else {
794 if *fin {
795 Self::on_fin_sent(ctx)?;
796 }
797 Ok(())
798 }
799 },
800
801 OutboundFrame::Trailers(headers, priority) => {
802 let prio = priority.as_ref().unwrap_or(&DEFAULT_PRIO);
803
804 // trailers always set fin=true
805 let res = conn.send_additional_headers_with_priority(
806 qconn, stream_id, headers, prio, true, true,
807 );
808
809 if res.is_ok() {
810 Self::on_fin_sent(ctx)?;
811 }
812 res
813 },
814
815 OutboundFrame::PeerStreamError => Err(h3::Error::MessageError),
816
817 OutboundFrame::FlowShutdown { .. } => {
818 unreachable!("Only flows send shutdowns")
819 },
820
821 OutboundFrame::Datagram(..) => {
822 unreachable!("Only flows send datagrams")
823 },
824 }
825 }
826
827 fn on_fin_sent(ctx: &mut StreamCtx) -> h3::Result<()> {
828 ctx.recv = None;
829 ctx.fin_or_reset_sent = true;
830 ctx.audit_stats
831 .set_sent_stream_fin(StreamClosureKind::Explicit);
832 if ctx.fin_or_reset_recv {
833 // Return a TransportError to trigger stream cleanup
834 // instead of h3::Error::Done
835 Err(h3::Error::TransportError(quiche::Error::Done))
836 } else {
837 Ok(())
838 }
839 }
840
841 /// Resumes reads or writes to the connection when a stream channel becomes
842 /// unblocked.
843 ///
844 /// If we were waiting for more data from a channel, we resume writing to
845 /// the connection. Otherwise, we were blocked on channel capacity and
846 /// continue reading from the connection. `Upstream` in this context is
847 /// the consumer of the stream.
848 fn upstream_ready(
849 &mut self, qconn: &mut QuicheConnection, ready: StreamReady,
850 ) -> H3ConnectionResult<()> {
851 match ready {
852 StreamReady::Downstream(r) => self.upstream_read_ready(qconn, r),
853 StreamReady::Upstream(r) => self.upstream_write_ready(qconn, r),
854 }
855 }
856
857 fn upstream_read_ready(
858 &mut self, qconn: &mut QuicheConnection,
859 read_ready: ReceivedDownstreamData,
860 ) -> H3ConnectionResult<()> {
861 let ReceivedDownstreamData {
862 stream_id,
863 chan,
864 data,
865 } = read_ready;
866
867 match self.stream_map.get_mut(&stream_id) {
868 None => Ok(()),
869 Some(stream) => {
870 stream.recv = Some(chan);
871 stream.queued_frame = data;
872 self.process_writable_stream(qconn, stream_id)
873 },
874 }
875 }
876
877 fn upstream_write_ready(
878 &mut self, qconn: &mut QuicheConnection,
879 write_ready: HaveUpstreamCapacity,
880 ) -> H3ConnectionResult<()> {
881 let HaveUpstreamCapacity {
882 stream_id,
883 mut chan,
884 } = write_ready;
885
886 match self.stream_map.get_mut(&stream_id) {
887 None => Ok(()),
888 Some(stream) => {
889 chan.abort_send(); // Have to do it to release the associated permit
890 stream.send = Some(chan);
891 self.process_h3_data(qconn, stream_id)
892 },
893 }
894 }
895
896 /// Processes all queued outbound datagrams from the `dgram_recv` channel.
897 fn dgram_ready(
898 &mut self, qconn: &mut QuicheConnection, frame: OutboundFrame,
899 ) -> H3ConnectionResult<()> {
900 let mut frame = Ok(frame);
901
902 loop {
903 match frame {
904 Ok(OutboundFrame::Datagram(dgram, flow_id)) => {
905 // Drop datagrams if there is no capacity
906 let _ = datagram::send_h3_dgram(qconn, flow_id, dgram);
907 },
908 Ok(OutboundFrame::FlowShutdown { flow_id, stream_id }) => {
909 self.shutdown_stream(
910 qconn,
911 stream_id,
912 StreamShutdown::Both {
913 read_error_code: WireErrorCode::NoError as u64,
914 write_error_code: WireErrorCode::NoError as u64,
915 },
916 )?;
917 self.flow_map.remove(&flow_id);
918 break;
919 },
920 Ok(_) => unreachable!("Flows can't send frame of other types"),
921 Err(TryRecvError::Empty) => break,
922 Err(TryRecvError::Disconnected) =>
923 return Err(H3ConnectionError::ControllerWentAway),
924 }
925
926 frame = self.dgram_recv.try_recv();
927 }
928
929 Ok(())
930 }
931
932 /// Return a mutable reference to the driver's HTTP/3 connection.
933 ///
934 /// If the connection doesn't exist yet, this function returns
935 /// a `Self::connection_not_present()` error.
936 fn conn_mut(&mut self) -> H3ConnectionResult<&mut h3::Connection> {
937 self.conn.as_mut().ok_or(Self::connection_not_present())
938 }
939
940 /// Alias for [`quiche::Error::TlsFail`], which is used in the case where
941 /// this driver doesn't have an established HTTP/3 connection attached
942 /// to it yet.
943 const fn connection_not_present() -> H3ConnectionError {
944 H3ConnectionError::H3(h3::Error::TransportError(quiche::Error::TlsFail))
945 }
946
947 /// Cleans up internal state for the indicated HTTP/3 stream.
948 ///
949 /// This function removes the stream from the stream map, closes any pending
950 /// futures, removes associated DATAGRAM flows, and sends a
951 /// [`H3Event::StreamClosed`] event (for servers).
952 fn cleanup_stream(
953 &mut self, qconn: &mut QuicheConnection, stream_id: u64,
954 ) -> H3ConnectionResult<()> {
955 let Some(stream_ctx) = self.stream_map.remove(&stream_id) else {
956 return Ok(());
957 };
958
959 // Find if the stream also has any pending futures associated with it
960 for pending in self.waiting_streams.iter_mut() {
961 match pending {
962 WaitForStream::Downstream(WaitForDownstreamData {
963 stream_id: id,
964 chan: Some(chan),
965 }) if stream_id == *id => {
966 chan.close();
967 },
968 WaitForStream::Upstream(WaitForUpstreamCapacity {
969 stream_id: id,
970 chan: Some(chan),
971 }) if stream_id == *id => {
972 chan.close();
973 },
974 _ => {},
975 }
976 }
977
978 // Close any DATAGRAM-proxying channels when we close the stream, if they
979 // exist
980 if let Some(mapped_flow_id) = stream_ctx.associated_dgram_flow_id {
981 self.flow_map.remove(&mapped_flow_id);
982 }
983
984 if qconn.is_server() {
985 // Signal the server to remove the stream from its map
986 let _ = self
987 .h3_event_sender
988 .send(H3Event::StreamClosed { stream_id }.into());
989 }
990
991 Ok(())
992 }
993
994 /// Shuts down the indicated HTTP/3 stream by sending frames and cleaning
995 /// up then cleans up internal state by calling
996 /// [`Self::cleanup_stream`].
997 fn shutdown_stream(
998 &mut self, qconn: &mut QuicheConnection, stream_id: u64,
999 shutdown: StreamShutdown,
1000 ) -> H3ConnectionResult<()> {
1001 let Some(stream_ctx) = self.stream_map.get(&stream_id) else {
1002 return Ok(());
1003 };
1004
1005 let audit_stats = &stream_ctx.audit_stats;
1006
1007 match shutdown {
1008 StreamShutdown::Read { error_code } => {
1009 audit_stats.set_sent_stop_sending_error_code(error_code as _);
1010 let _ = qconn.stream_shutdown(
1011 stream_id,
1012 quiche::Shutdown::Read,
1013 error_code,
1014 );
1015 },
1016 StreamShutdown::Write { error_code } => {
1017 audit_stats.set_sent_reset_stream_error_code(error_code as _);
1018 let _ = qconn.stream_shutdown(
1019 stream_id,
1020 quiche::Shutdown::Write,
1021 error_code,
1022 );
1023 },
1024 StreamShutdown::Both {
1025 read_error_code,
1026 write_error_code,
1027 } => {
1028 audit_stats
1029 .set_sent_stop_sending_error_code(read_error_code as _);
1030 let _ = qconn.stream_shutdown(
1031 stream_id,
1032 quiche::Shutdown::Read,
1033 read_error_code,
1034 );
1035 audit_stats
1036 .set_sent_reset_stream_error_code(write_error_code as _);
1037 let _ = qconn.stream_shutdown(
1038 stream_id,
1039 quiche::Shutdown::Write,
1040 write_error_code,
1041 );
1042 },
1043 }
1044
1045 self.cleanup_stream(qconn, stream_id)
1046 }
1047
1048 /// Handles a regular [`H3Command`]. May be called internally by
1049 /// [DriverHooks] for non-endpoint-specific [`H3Command`]s.
1050 fn handle_core_command(
1051 &mut self, qconn: &mut QuicheConnection, cmd: H3Command,
1052 ) -> H3ConnectionResult<()> {
1053 match cmd {
1054 H3Command::QuicCmd(cmd) => cmd.execute(qconn),
1055 H3Command::GoAway => {
1056 let max_id = self.max_stream_seen;
1057 self.conn_mut()
1058 .expect("connection should be established")
1059 .send_goaway(qconn, max_id)?;
1060 },
1061 H3Command::ShutdownStream {
1062 stream_id,
1063 shutdown,
1064 } => {
1065 self.shutdown_stream(qconn, stream_id, shutdown)?;
1066 },
1067 }
1068 Ok(())
1069 }
1070}
1071
1072impl<H: DriverHooks> H3Driver<H> {
1073 /// Reads all buffered datagrams out of `qconn` and distributes them to
1074 /// their flow channels.
1075 fn process_available_dgrams(
1076 &mut self, qconn: &mut QuicheConnection,
1077 ) -> H3ConnectionResult<()> {
1078 loop {
1079 match datagram::receive_h3_dgram(qconn) {
1080 Ok((flow_id, dgram)) => {
1081 self.get_or_insert_flow(flow_id)?.send_best_effort(dgram);
1082 },
1083 Err(quiche::Error::Done) => return Ok(()),
1084 Err(err) => return Err(H3ConnectionError::from(err)),
1085 }
1086 }
1087 }
1088
1089 /// Flushes any queued-up frames for `stream_id` into `qconn` until either
1090 /// there is no more capacity in `qconn` or no more frames to send.
1091 fn process_writable_stream(
1092 &mut self, qconn: &mut QuicheConnection, stream_id: u64,
1093 ) -> H3ConnectionResult<()> {
1094 // Split self borrow between conn and stream_map
1095 let conn = self.conn.as_mut().ok_or(Self::connection_not_present())?;
1096 let Some(ctx) = self.stream_map.get_mut(&stream_id) else {
1097 return Ok(()); // Unknown stream_id
1098 };
1099
1100 loop {
1101 // Process each writable frame, queue the next frame for processing
1102 // and shut down any errored streams.
1103 match Self::process_write_frame(conn, qconn, ctx) {
1104 Ok(()) => ctx.queued_frame = None,
1105 Err(h3::Error::StreamBlocked | h3::Error::Done) => break,
1106 Err(h3::Error::MessageError) => {
1107 return self.shutdown_stream(
1108 qconn,
1109 stream_id,
1110 StreamShutdown::Both {
1111 read_error_code: WireErrorCode::MessageError as u64,
1112 write_error_code: WireErrorCode::MessageError as u64,
1113 },
1114 );
1115 },
1116 Err(h3::Error::TransportError(quiche::Error::StreamStopped(
1117 e,
1118 ))) => {
1119 ctx.handle_recvd_stop_sending(e);
1120 if ctx.both_directions_done() {
1121 return self.cleanup_stream(qconn, stream_id);
1122 } else {
1123 return Ok(());
1124 }
1125 },
1126 Err(h3::Error::TransportError(
1127 quiche::Error::InvalidStreamState(stream),
1128 )) => {
1129 return self.cleanup_stream(qconn, stream);
1130 },
1131 Err(_) => {
1132 return self.cleanup_stream(qconn, stream_id);
1133 },
1134 }
1135
1136 let Some(recv) = ctx.recv.as_mut() else {
1137 // This stream is already waiting for data or we wrote a fin and
1138 // closed the channel.
1139 debug_assert!(
1140 ctx.queued_frame.is_none(),
1141 "We MUST NOT have a queued frame if we are already waiting on
1142 more data from the channel"
1143 );
1144 return Ok(());
1145 };
1146
1147 // Attempt to queue the next frame for processing. The corresponding
1148 // sender is created at the same time as the `StreamCtx`
1149 // and ultimately ends up in an `H3Body`. The body then
1150 // determines which frames to send to the peer via
1151 // this processing loop.
1152 match recv.try_recv() {
1153 Ok(frame) => ctx.queued_frame = Some(frame),
1154 Err(TryRecvError::Disconnected) => {
1155 if !ctx.fin_or_reset_sent &&
1156 ctx.associated_dgram_flow_id.is_none()
1157 // The channel might be closed if the stream was used to
1158 // initiate a datagram exchange.
1159 // TODO: ideally, the application would still shut down the
1160 // stream properly. Once applications code
1161 // is fixed, we can remove this check.
1162 {
1163 // The channel closed without having written a fin. Send a
1164 // RESET_STREAM to indicate we won't be writing anything
1165 // else
1166 let err = h3::WireErrorCode::RequestCancelled as u64;
1167 let _ = qconn.stream_shutdown(
1168 stream_id,
1169 quiche::Shutdown::Write,
1170 err,
1171 );
1172 ctx.handle_sent_reset(err);
1173 if ctx.both_directions_done() {
1174 return self.cleanup_stream(qconn, stream_id);
1175 }
1176 }
1177 break;
1178 },
1179 Err(TryRecvError::Empty) => {
1180 self.waiting_streams.push(ctx.wait_for_recv(stream_id));
1181 break;
1182 },
1183 }
1184 }
1185
1186 Ok(())
1187 }
1188
1189 /// Tests `qconn` for either a local or peer error and increments
1190 /// the associated HTTP/3 or QUIC error counter.
1191 fn record_quiche_error(qconn: &mut QuicheConnection, metrics: &impl Metrics) {
1192 // split metrics between local/peer and QUIC/HTTP/3 level errors
1193 if let Some(err) = qconn.local_error() {
1194 if err.is_app {
1195 metrics.local_h3_conn_close_error_count(err.error_code.into())
1196 } else {
1197 metrics.local_quic_conn_close_error_count(err.error_code.into())
1198 }
1199 .inc();
1200 } else if let Some(err) = qconn.peer_error() {
1201 if err.is_app {
1202 metrics.peer_h3_conn_close_error_count(err.error_code.into())
1203 } else {
1204 metrics.peer_quic_conn_close_error_count(err.error_code.into())
1205 }
1206 .inc();
1207 }
1208 }
1209}
1210
1211impl<H: DriverHooks> ApplicationOverQuic for H3Driver<H> {
1212 fn on_conn_established(
1213 &mut self, quiche_conn: &mut QuicheConnection,
1214 handshake_info: &HandshakeInfo,
1215 ) -> QuicResult<()> {
1216 let conn = h3::Connection::with_transport(quiche_conn, &self.h3_config)?;
1217 self.conn = Some(conn);
1218
1219 H::conn_established(self, quiche_conn, handshake_info)?;
1220 Ok(())
1221 }
1222
1223 #[inline]
1224 fn should_act(&self) -> bool {
1225 self.conn.is_some()
1226 }
1227
1228 #[inline]
1229 fn buffer(&mut self) -> &mut [u8] {
1230 &mut self.io_worker_buf
1231 }
1232
1233 /// Poll the underlying [`quiche::h3::Connection`] for
1234 /// [`quiche::h3::Event`]s and DATAGRAMs, delegating processing to
1235 /// `Self::process_read_event`.
1236 ///
1237 /// If a DATAGRAM is found, it is sent to the receiver on its channel.
1238 fn process_reads(&mut self, qconn: &mut QuicheConnection) -> QuicResult<()> {
1239 loop {
1240 match self.conn_mut()?.poll(qconn) {
1241 Ok((stream_id, event)) =>
1242 self.process_read_event(qconn, stream_id, event)?,
1243 Err(h3::Error::Done) => break,
1244 Err(err) => {
1245 // Don't bubble error up, instead keep the worker loop going
1246 // until quiche reports the connection is
1247 // closed.
1248 log::debug!("connection closed due to h3 protocol error"; "error"=>?err);
1249 return Ok(());
1250 },
1251 };
1252 }
1253
1254 self.process_available_dgrams(qconn)?;
1255 Ok(())
1256 }
1257
1258 /// Write as much data as possible into the [`quiche::h3::Connection`] from
1259 /// all sources. This will attempt to write any queued frames into their
1260 /// respective streams, if writable.
1261 fn process_writes(&mut self, qconn: &mut QuicheConnection) -> QuicResult<()> {
1262 while let Some(stream_id) = qconn.stream_writable_next() {
1263 self.process_writable_stream(qconn, stream_id)?;
1264 }
1265
1266 // Also optimistically check for any ready streams
1267 while let Some(Some(ready)) = self.waiting_streams.next().now_or_never() {
1268 self.upstream_ready(qconn, ready)?;
1269 }
1270
1271 Ok(())
1272 }
1273
1274 /// Reports connection-level error metrics and forwards
1275 /// IOWorker errors to the associated [H3Controller].
1276 fn on_conn_close<M: Metrics>(
1277 &mut self, quiche_conn: &mut QuicheConnection, metrics: &M,
1278 work_loop_result: &QuicResult<()>,
1279 ) {
1280 let max_stream_seen = self.max_stream_seen;
1281 metrics
1282 .maximum_writable_streams()
1283 .observe(max_stream_seen as f64);
1284
1285 let Err(work_loop_error) = work_loop_result else {
1286 return;
1287 };
1288
1289 Self::record_quiche_error(quiche_conn, metrics);
1290
1291 let Some(h3_err) = work_loop_error.downcast_ref::<H3ConnectionError>()
1292 else {
1293 log::error!("Found non-H3ConnectionError"; "error" => %work_loop_error);
1294 return;
1295 };
1296
1297 if matches!(h3_err, H3ConnectionError::ControllerWentAway) {
1298 // Inform client that we won't (can't) respond anymore
1299 let _ = quiche_conn.close(true, WireErrorCode::NoError as u64, &[]);
1300 return;
1301 }
1302
1303 if let Some(ev) = H3Event::from_error(h3_err) {
1304 let _ = self.h3_event_sender.send(ev.into());
1305 #[expect(clippy::needless_return)]
1306 return; // avoid accidental fallthrough in the future
1307 }
1308 }
1309
1310 /// Wait for incoming data from the [H3Controller]. The next iteration of
1311 /// the I/O loop commences when one of the `select!`ed futures triggers.
1312 #[inline]
1313 async fn wait_for_data(
1314 &mut self, qconn: &mut QuicheConnection,
1315 ) -> QuicResult<()> {
1316 select! {
1317 biased;
1318 Some(ready) = self.waiting_streams.next() => self.upstream_ready(qconn, ready),
1319 Some(dgram) = self.dgram_recv.recv() => self.dgram_ready(qconn, dgram),
1320 Some(cmd) = self.cmd_recv.recv() => H::conn_command(self, qconn, cmd),
1321 r = self.hooks.wait_for_action(qconn), if H::has_wait_action(self) => r,
1322 }?;
1323
1324 // Make sure controller is not starved, but also not prioritized in the
1325 // biased select. So poll it last, however also perform a try_recv
1326 // each iteration.
1327 if let Ok(cmd) = self.cmd_recv.try_recv() {
1328 H::conn_command(self, qconn, cmd)?;
1329 }
1330
1331 Ok(())
1332 }
1333}
1334
1335impl<H: DriverHooks> Drop for H3Driver<H> {
1336 fn drop(&mut self) {
1337 for stream in self.stream_map.values() {
1338 stream
1339 .audit_stats
1340 .set_recvd_stream_fin(StreamClosureKind::Implicit);
1341 }
1342 }
1343}
1344
1345/// [`H3Command`]s are sent by the [H3Controller] to alter the [H3Driver]'s
1346/// state.
1347///
1348/// Both [ServerH3Driver] and [ClientH3Driver] may extend this enum with
1349/// endpoint-specific variants.
1350#[derive(Debug)]
1351pub enum H3Command {
1352 /// A connection-level command that executes directly on the
1353 /// [`quiche::Connection`].
1354 QuicCmd(QuicCommand),
1355 /// Send a GOAWAY frame to the peer to initiate a graceful connection
1356 /// shutdown.
1357 GoAway,
1358 /// Shuts down a stream in the specified direction(s) and removes it from
1359 /// local state.
1360 ///
1361 /// This removes the stream from local state and sends a `RESET_STREAM`
1362 /// frame (for write direction) and/or a `STOP_SENDING` frame (for read
1363 /// direction) to the peer. See [`quiche::Connection::stream_shutdown`]
1364 /// for details.
1365 ShutdownStream {
1366 stream_id: u64,
1367 shutdown: StreamShutdown,
1368 },
1369}
1370
1371/// Specifies which direction(s) of a stream to shut down.
1372///
1373/// Used with [`H3Controller::shutdown_stream`] and the internal
1374/// `shutdown_stream` function to control whether to send a `STOP_SENDING` frame
1375/// (read direction), and/or a `RESET_STREAM` frame (write direction)
1376///
1377/// Note: Despite its name, "shutdown" here refers to signaling the peer about
1378/// stream termination, not sending a FIN flag. `STOP_SENDING` asks the peer to
1379/// stop sending data, while `RESET_STREAM` abruptly terminates the write side.
1380#[derive(Debug, Clone, Copy, PartialEq, Eq)]
1381pub enum StreamShutdown {
1382 /// Shut down only the read direction (sends `STOP_SENDING` frame with the
1383 /// given error code).
1384 Read { error_code: u64 },
1385 /// Shut down only the write direction (sends `RESET_STREAM` frame with the
1386 /// given error code).
1387 Write { error_code: u64 },
1388 /// Shut down both directions (sends both `STOP_SENDING` and `RESET_STREAM`
1389 /// frames).
1390 Both {
1391 read_error_code: u64,
1392 write_error_code: u64,
1393 },
1394}
1395
1396/// Sends [`H3Command`]s to an [H3Driver]. The sender is typed and internally
1397/// wraps instances of `T` in the appropriate `H3Command` variant.
1398pub struct RequestSender<C, T> {
1399 sender: UnboundedSender<C>,
1400 // Required to work around dangling type parameter
1401 _r: PhantomData<fn() -> T>,
1402}
1403
1404impl<C, T: Into<C>> RequestSender<C, T> {
1405 /// Send a request to the [H3Driver]. This can only fail if the driver is
1406 /// gone.
1407 #[inline(always)]
1408 pub fn send(&self, v: T) -> Result<(), mpsc::error::SendError<C>> {
1409 self.sender.send(v.into())
1410 }
1411}
1412
1413impl<C, T> Clone for RequestSender<C, T> {
1414 fn clone(&self) -> Self {
1415 Self {
1416 sender: self.sender.clone(),
1417 _r: Default::default(),
1418 }
1419 }
1420}
1421
1422/// Interface to communicate with a paired [H3Driver].
1423///
1424/// An [H3Controller] receives [`H3Event`]s from its driver, which must be
1425/// consumed by the application built on top of the driver to react to incoming
1426/// events. The controller also allows the application to send ad-hoc
1427/// [`H3Command`]s to the driver, which will be processed when the driver waits
1428/// for incoming data.
1429pub struct H3Controller<H: DriverHooks> {
1430 /// Sends [`H3Command`]s to the [H3Driver], like [`QuicCommand`]s or
1431 /// outbound HTTP requests.
1432 cmd_sender: UnboundedSender<H::Command>,
1433 /// Receives [`H3Event`]s from the [H3Driver]. Can be extracted and
1434 /// used independently of the [H3Controller].
1435 h3_event_recv: Option<UnboundedReceiver<H::Event>>,
1436}
1437
1438impl<H: DriverHooks> H3Controller<H> {
1439 /// Gets a mut reference to the [`H3Event`] receiver for the paired
1440 /// [H3Driver].
1441 pub fn event_receiver_mut(&mut self) -> &mut UnboundedReceiver<H::Event> {
1442 self.h3_event_recv
1443 .as_mut()
1444 .expect("No event receiver on H3Controller")
1445 }
1446
1447 /// Takes the [`H3Event`] receiver for the paired [H3Driver].
1448 pub fn take_event_receiver(&mut self) -> UnboundedReceiver<H::Event> {
1449 self.h3_event_recv
1450 .take()
1451 .expect("No event receiver on H3Controller")
1452 }
1453
1454 /// Creates a [`QuicCommand`] sender for the paired [H3Driver].
1455 pub fn cmd_sender(&self) -> RequestSender<H::Command, QuicCommand> {
1456 RequestSender {
1457 sender: self.cmd_sender.clone(),
1458 _r: Default::default(),
1459 }
1460 }
1461
1462 /// Sends a GOAWAY frame to initiate a graceful connection shutdown.
1463 pub fn send_goaway(&self) {
1464 let _ = self.cmd_sender.send(H3Command::GoAway.into());
1465 }
1466
1467 /// Creates an [`H3Command`] sender for the paired [H3Driver].
1468 pub fn h3_cmd_sender(&self) -> RequestSender<H::Command, H3Command> {
1469 RequestSender {
1470 sender: self.cmd_sender.clone(),
1471 _r: Default::default(),
1472 }
1473 }
1474
1475 /// Shuts down a stream in the specified direction(s) and removes it from
1476 /// local state.
1477 ///
1478 /// This removes the stream from local state and sends a `RESET_STREAM`
1479 /// frame (for write direction) and/or a `STOP_SENDING` frame (for read
1480 /// direction) to the peer, depending on the [`StreamShutdown`] variant.
1481 pub fn shutdown_stream(&self, stream_id: u64, shutdown: StreamShutdown) {
1482 let _ = self.cmd_sender.send(
1483 H3Command::ShutdownStream {
1484 stream_id,
1485 shutdown,
1486 }
1487 .into(),
1488 );
1489 }
1490}