diff --git a/.github/workflows/CI.yml b/.github/workflows/CI.yml index 1247cac3..07e23562 100644 --- a/.github/workflows/CI.yml +++ b/.github/workflows/CI.yml @@ -51,6 +51,9 @@ jobs: - name: Check with unstable flag run: cargo check --features unstable + - name: Check with tracing feature + run: cargo check --features tracing + - name: Run lib tests and doc tests run: cargo test diff --git a/Cargo.toml b/Cargo.toml index cf0b1d70..2910908c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -23,6 +23,9 @@ rust-version = "1.63" # Enables `futures::Stream` implementations for various types. stream = [] +# Enables tracing. +tracing = ["dep:tracing"] + # Enables **unstable** APIs. Any API exposed by this feature has no backwards # compatibility guarantees. In other words, you should not use this feature for # anything besides experimentation. Definitely **do not** publish a crate that @@ -46,12 +49,14 @@ tokio-util = { version = "0.7.1", features = ["codec", "io"] } tokio = { version = "1", features = ["io-util"] } bytes = "1" http = "1" -tracing = { version = "0.1.35", default-features = false, features = ["std"] } +tracing = { version = "0.1.35", default-features = false, features = ["std"], optional = true } fnv = "1.0.5" slab = "0.4.2" indexmap = { version = "2", features = ["std"] } [dev-dependencies] +# Test +tracing = { version = "0.1.35", default-features = false, features = ["std"] } # Fuzzing quickcheck = { version = "1.0.3", default-features = false } diff --git a/src/client.rs b/src/client.rs index 86fef1ec..ad48db06 100644 --- a/src/client.rs +++ b/src/client.rs @@ -141,6 +141,8 @@ use crate::frame::{Headers, Pseudo, Reason, Settings, StreamId}; use crate::proto::{self, Error}; use crate::{FlowControl, PingPong, RecvStream, SendStream}; +#[cfg(feature = "tracing")] +use ::tracing::Instrument; use bytes::{Buf, Bytes}; use http::{uri, HeaderMap, Method, Request, Response, Version}; use std::fmt; @@ -150,7 +152,6 @@ use std::task::{Context, Poll}; use std::time::Duration; use std::usize; use tokio::io::{AsyncRead, AsyncWrite, AsyncWriteExt}; -use tracing::Instrument; /// Initializes new HTTP/2 streams on a connection by sending a request. /// @@ -1276,10 +1277,15 @@ where T: AsyncRead + AsyncWrite + Unpin, { let builder = Builder::new(); - builder + + #[cfg(feature = "tracing")] + return builder .handshake(io) - .instrument(tracing::trace_span!("client_handshake")) - .await + .instrument(::tracing::trace_span!("client_handshake")) + .await; + + #[cfg(not(feature = "tracing"))] + return builder.handshake(io).await; } // ===== impl Connection ===== @@ -1288,12 +1294,12 @@ async fn bind_connection(io: &mut T) -> Result<(), crate::Error> where T: AsyncRead + AsyncWrite + Unpin, { - tracing::debug!("binding client connection"); + debug!("binding client connection"); let msg: &'static [u8] = b"PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n"; io.write_all(msg).await.map_err(crate::Error::from_io)?; - tracing::debug!("client connection bound"); + debug!("client connection bound"); Ok(()) } @@ -1440,7 +1446,7 @@ where self.inner.maybe_close_connection_if_no_streams(); let result = self.inner.poll(cx).map_err(Into::into); if result.is_pending() && !self.inner.has_streams_or_other_references() { - tracing::trace!("last stream closed during poll, wake again"); + trace!("last stream closed during poll, wake again"); cx.waker().wake_by_ref(); } result diff --git a/src/codec/framed_read.rs b/src/codec/framed_read.rs index 9270a863..2193339d 100644 --- a/src/codec/framed_read.rs +++ b/src/codec/framed_read.rs @@ -126,10 +126,9 @@ fn decode_frame( partial_inout: &mut Option, mut bytes: BytesMut, ) -> Result, Error> { - let span = tracing::trace_span!("FramedRead::decode_frame", offset = bytes.len()); - let _e = span.enter(); + let _span = trace_span!("FramedRead::decode_frame", offset = bytes.len()); - tracing::trace!("decoding frame from {}B", bytes.len()); + trace!("decoding frame from {}B", bytes.len()); // Parse the head let head = frame::Head::parse(&bytes); @@ -141,7 +140,7 @@ fn decode_frame( let kind = head.kind(); - tracing::trace!(frame.kind = ?kind); + trace!(frame.kind = ?kind); macro_rules! header_block { ($frame:ident, $head:ident, $bytes:ident) => ({ @@ -159,8 +158,8 @@ fn decode_frame( // `PROTOCOL_ERROR`. return Err(Error::library_reset($head.stream_id(), Reason::PROTOCOL_ERROR)); }, - Err(e) => { - proto_err!(conn: "failed to load frame; err={:?}", e); + Err(_e) => { + proto_err!(conn: "failed to load frame; err={:?}", _e); return Err(Error::library_go_away(Reason::PROTOCOL_ERROR)); } }; @@ -176,8 +175,8 @@ fn decode_frame( proto_err!(stream: "malformed header block; stream={:?}", id); return Err(Error::library_reset(id, Reason::PROTOCOL_ERROR)); }, - Err(e) => { - proto_err!(conn: "failed HPACK decoding; err={:?}", e); + Err(_e) => { + proto_err!(conn: "failed HPACK decoding; err={:?}", _e); return Err(Error::library_go_away(Reason::PROTOCOL_ERROR)); } } @@ -185,7 +184,7 @@ fn decode_frame( if is_end_headers { frame.into() } else { - tracing::trace!("loaded partial header block"); + trace!("loaded partial header block"); // Defer returning the frame *partial_inout = Some(Partial { frame: Continuable::$frame(frame), @@ -202,8 +201,8 @@ fn decode_frame( Kind::Settings => { let res = frame::Settings::load(head, &bytes[frame::HEADER_LEN..]); - res.map_err(|e| { - proto_err!(conn: "failed to load SETTINGS frame; err={:?}", e); + res.map_err(|_e| { + proto_err!(conn: "failed to load SETTINGS frame; err={:?}", _e); Error::library_go_away(Reason::PROTOCOL_ERROR) })? .into() @@ -211,8 +210,8 @@ fn decode_frame( Kind::Ping => { let res = frame::Ping::load(head, &bytes[frame::HEADER_LEN..]); - res.map_err(|e| { - proto_err!(conn: "failed to load PING frame; err={:?}", e); + res.map_err(|_e| { + proto_err!(conn: "failed to load PING frame; err={:?}", _e); Error::library_go_away(Reason::PROTOCOL_ERROR) })? .into() @@ -220,8 +219,8 @@ fn decode_frame( Kind::WindowUpdate => { let res = frame::WindowUpdate::load(head, &bytes[frame::HEADER_LEN..]); - res.map_err(|e| { - proto_err!(conn: "failed to load WINDOW_UPDATE frame; err={:?}", e); + res.map_err(|_e| { + proto_err!(conn: "failed to load WINDOW_UPDATE frame; err={:?}", _e); Error::library_go_away(Reason::PROTOCOL_ERROR) })? .into() @@ -231,8 +230,8 @@ fn decode_frame( let res = frame::Data::load(head, bytes.freeze()); // TODO: Should this always be connection level? Probably not... - res.map_err(|e| { - proto_err!(conn: "failed to load DATA frame; err={:?}", e); + res.map_err(|_e| { + proto_err!(conn: "failed to load DATA frame; err={:?}", _e); Error::library_go_away(Reason::PROTOCOL_ERROR) })? .into() @@ -240,16 +239,16 @@ fn decode_frame( Kind::Headers => header_block!(Headers, head, bytes), Kind::Reset => { let res = frame::Reset::load(head, &bytes[frame::HEADER_LEN..]); - res.map_err(|e| { - proto_err!(conn: "failed to load RESET frame; err={:?}", e); + res.map_err(|_e| { + proto_err!(conn: "failed to load RESET frame; err={:?}", _e); Error::library_go_away(Reason::PROTOCOL_ERROR) })? .into() } Kind::GoAway => { let res = frame::GoAway::load(&bytes[frame::HEADER_LEN..]); - res.map_err(|e| { - proto_err!(conn: "failed to load GO_AWAY frame; err={:?}", e); + res.map_err(|_e| { + proto_err!(conn: "failed to load GO_AWAY frame; err={:?}", _e); Error::library_go_away(Reason::PROTOCOL_ERROR) })? .into() @@ -272,8 +271,8 @@ fn decode_frame( proto_err!(stream: "PRIORITY invalid dependency ID; stream={:?}", id); return Err(Error::library_reset(id, Reason::PROTOCOL_ERROR)); } - Err(e) => { - proto_err!(conn: "failed to load PRIORITY frame; err={:?};", e); + Err(_e) => { + proto_err!(conn: "failed to load PRIORITY frame; err={:?};", _e); return Err(Error::library_go_away(Reason::PROTOCOL_ERROR)); } } @@ -301,7 +300,7 @@ fn decode_frame( } else { let cnt = partial.continuation_frames_count + 1; if cnt > max_continuation_frames { - tracing::debug!("too_many_continuations, max = {}", max_continuation_frames); + debug!("too_many_continuations, max = {}", max_continuation_frames); return Err(Error::library_go_away_data( Reason::ENHANCE_YOUR_CALM, "too_many_continuations", @@ -348,8 +347,8 @@ fn decode_frame( proto_err!(stream: "malformed CONTINUATION frame; stream={:?}", id); return Err(Error::library_reset(id, Reason::PROTOCOL_ERROR)); } - Err(e) => { - proto_err!(conn: "failed HPACK decoding; err={:?}", e); + Err(_e) => { + proto_err!(conn: "failed HPACK decoding; err={:?}", _e); return Err(Error::library_go_away(Reason::PROTOCOL_ERROR)); } } @@ -377,17 +376,16 @@ where type Item = Result; fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let span = tracing::trace_span!("FramedRead::poll_next"); - let _e = span.enter(); + let _span = trace_span!("FramedRead::poll_next"); loop { - tracing::trace!("poll"); + trace!("poll"); let bytes = match ready!(Pin::new(&mut self.inner).poll_next(cx)) { Some(Ok(bytes)) => bytes, Some(Err(e)) => return Poll::Ready(Some(Err(map_err(e)))), None => return Poll::Ready(None), }; - tracing::trace!(read.bytes = bytes.len()); + trace!(read.bytes = bytes.len()); let Self { ref mut hpack, max_header_list_size, @@ -402,7 +400,7 @@ where partial, bytes, )? { - tracing::debug!(?frame, "received"); + debug!(?frame, "received"); return Poll::Ready(Some(Ok(frame))); } } diff --git a/src/codec/framed_write.rs b/src/codec/framed_write.rs index c88af02d..20bda81e 100644 --- a/src/codec/framed_write.rs +++ b/src/codec/framed_write.rs @@ -128,19 +128,18 @@ where /// Flush buffered data to the wire pub fn flush(&mut self, cx: &mut Context) -> Poll> { - let span = tracing::trace_span!("FramedWrite::flush"); - let _e = span.enter(); + let _span = trace_span!("FramedWrite::flush"); loop { while !self.encoder.is_empty() { match self.encoder.next { Some(Next::Data(ref mut frame)) => { - tracing::trace!(queued_data_frame = true); + trace!(queued_data_frame = true); let mut buf = (&mut self.encoder.buf).chain(frame.payload_mut()); ready!(poll_write_buf(Pin::new(&mut self.inner), cx, &mut buf))? } _ => { - tracing::trace!(queued_data_frame = false); + trace!(queued_data_frame = false); ready!(poll_write_buf( Pin::new(&mut self.inner), cx, @@ -156,7 +155,7 @@ where } } - tracing::trace!("flushing buffer"); + trace!("flushing buffer"); // Flush the upstream ready!(Pin::new(&mut self.inner).poll_flush(cx))?; @@ -207,10 +206,9 @@ where fn buffer(&mut self, item: Frame) -> Result<(), UserError> { // Ensure that we have enough capacity to accept the write. assert!(self.has_capacity()); - let span = tracing::trace_span!("FramedWrite::buffer", frame = ?item); - let _e = span.enter(); + let _span = trace_span!("FramedWrite::buffer", frame = ?item); - tracing::debug!(frame = ?item, "send"); + debug!(frame = ?item, "send"); match item { Frame::Data(mut v) => { @@ -259,31 +257,31 @@ where } Frame::Settings(v) => { v.encode(self.buf.get_mut()); - tracing::trace!(rem = self.buf.remaining(), "encoded settings"); + trace!(rem = self.buf.remaining(), "encoded settings"); } Frame::GoAway(v) => { v.encode(self.buf.get_mut()); - tracing::trace!(rem = self.buf.remaining(), "encoded go_away"); + trace!(rem = self.buf.remaining(), "encoded go_away"); } Frame::Ping(v) => { v.encode(self.buf.get_mut()); - tracing::trace!(rem = self.buf.remaining(), "encoded ping"); + trace!(rem = self.buf.remaining(), "encoded ping"); } Frame::WindowUpdate(v) => { v.encode(self.buf.get_mut()); - tracing::trace!(rem = self.buf.remaining(), "encoded window_update"); + trace!(rem = self.buf.remaining(), "encoded window_update"); } Frame::Priority(_) => { /* - v.encode(self.buf.get_mut()); - tracing::trace!("encoded priority; rem={:?}", self.buf.remaining()); - */ + v.encode(self.buf.get_mut()); + trace!("encoded priority; rem={:?}", self.buf.remaining()); + */ unimplemented!(); } Frame::Reset(v) => { v.encode(self.buf.get_mut()); - tracing::trace!(rem = self.buf.remaining(), "encoded reset"); + trace!(rem = self.buf.remaining(), "encoded reset"); } } diff --git a/src/frame/go_away.rs b/src/frame/go_away.rs index 99330e98..b4ccd253 100644 --- a/src/frame/go_away.rs +++ b/src/frame/go_away.rs @@ -57,7 +57,7 @@ impl GoAway { } pub fn encode(&self, dst: &mut B) { - tracing::trace!("encoding GO_AWAY; code={:?}", self.error_code); + trace!("encoding GO_AWAY; code={:?}", self.error_code); let head = Head::new(Kind::GoAway, 0, StreamId::zero()); head.encode(8 + self.debug_data.len(), dst); dst.put_u32(self.last_stream_id.into()); diff --git a/src/frame/headers.rs b/src/frame/headers.rs index bb36cd4f..9d3b6e7a 100644 --- a/src/frame/headers.rs +++ b/src/frame/headers.rs @@ -152,7 +152,7 @@ impl Headers { let flags = HeadersFlag(head.flag()); let mut pad = 0; - tracing::trace!("loading headers; flags={:?}", flags); + trace!("loading headers; flags={:?}", flags); if head.stream_id().is_zero() { return Err(Error::InvalidStreamId); @@ -855,10 +855,10 @@ impl HeaderBlock { macro_rules! set_pseudo { ($field:ident, $val:expr) => {{ if reg { - tracing::trace!("load_hpack; header malformed -- pseudo not at head of block"); + trace!("load_hpack; header malformed -- pseudo not at head of block"); malformed = true; } else if self.pseudo.$field.is_some() { - tracing::trace!("load_hpack; header malformed -- repeated pseudo"); + trace!("load_hpack; header malformed -- repeated pseudo"); malformed = true; } else { let __val = $val; @@ -867,7 +867,7 @@ impl HeaderBlock { if headers_size < max_header_list_size { self.pseudo.$field = Some(__val); } else if !self.is_over_size { - tracing::trace!("load_hpack; header list size over max"); + trace!("load_hpack; header list size over max"); self.is_over_size = true; } } @@ -894,13 +894,10 @@ impl HeaderBlock { || name == "keep-alive" || name == "proxy-connection" { - tracing::trace!("load_hpack; connection level header"); + trace!("load_hpack; connection level header"); malformed = true; } else if name == header::TE && value != "trailers" { - tracing::trace!( - "load_hpack; TE header not set to trailers; val={:?}", - value - ); + trace!("load_hpack; TE header not set to trailers; val={:?}", value); malformed = true; } else { reg = true; @@ -911,7 +908,7 @@ impl HeaderBlock { decoded_header_size(name.as_str().len(), value.len()); self.fields.append(name, value); } else if !self.is_over_size { - tracing::trace!("load_hpack; header list size over max"); + trace!("load_hpack; header list size over max"); self.is_over_size = true; } } @@ -926,12 +923,12 @@ impl HeaderBlock { }); if let Err(e) = res { - tracing::trace!("hpack decoding error; err={:?}", e); + trace!("hpack decoding error; err={:?}", e); return Err(e.into()); } if malformed { - tracing::trace!("malformed message"); + trace!("malformed message"); return Err(Error::MalformedMessage); } diff --git a/src/frame/ping.rs b/src/frame/ping.rs index 241d06ea..92212a43 100644 --- a/src/frame/ping.rs +++ b/src/frame/ping.rs @@ -85,7 +85,7 @@ impl Ping { pub fn encode(&self, dst: &mut B) { let sz = self.payload.len(); - tracing::trace!("encoding PING; ack={} len={}", self.ack, sz); + trace!("encoding PING; ack={} len={}", self.ack, sz); let flags = if self.ack { ACK_FLAG } else { 0 }; let head = Head::new(Kind::Ping, flags, StreamId::zero()); diff --git a/src/frame/reset.rs b/src/frame/reset.rs index 39f6ac20..06c6aa53 100644 --- a/src/frame/reset.rs +++ b/src/frame/reset.rs @@ -38,7 +38,7 @@ impl Reset { } pub fn encode(&self, dst: &mut B) { - tracing::trace!( + trace!( "encoding RESET; id={:?} code={:?}", self.stream_id, self.error_code diff --git a/src/frame/settings.rs b/src/frame/settings.rs index 484498a9..4e434a3a 100644 --- a/src/frame/settings.rs +++ b/src/frame/settings.rs @@ -149,7 +149,7 @@ impl Settings { // Ensure the payload length is correct, each setting is 6 bytes long. if payload.len() % 6 != 0 { - tracing::debug!("invalid settings payload length; len={:?}", payload.len()); + debug!("invalid settings payload length; len={:?}", payload.len()); return Err(Error::InvalidPayloadAckSettings); } @@ -215,13 +215,13 @@ impl Settings { let head = Head::new(Kind::Settings, self.flags.into(), StreamId::zero()); let payload_len = self.payload_len(); - tracing::trace!("encoding SETTINGS; len={}", payload_len); + trace!("encoding SETTINGS; len={}", payload_len); head.encode(payload_len, dst); // Encode the settings self.for_each(|setting| { - tracing::trace!("encoding setting; val={:?}", setting); + trace!("encoding setting; val={:?}", setting); setting.encode(dst) }); } diff --git a/src/frame/window_update.rs b/src/frame/window_update.rs index eed2ce17..f690eb18 100644 --- a/src/frame/window_update.rs +++ b/src/frame/window_update.rs @@ -48,7 +48,7 @@ impl WindowUpdate { } pub fn encode(&self, dst: &mut B) { - tracing::trace!("encoding WINDOW_UPDATE; id={:?}", self.stream_id); + trace!("encoding WINDOW_UPDATE; id={:?}", self.stream_id); let head = Head::new(Kind::WindowUpdate, 0, self.stream_id); head.encode(4, dst); dst.put_u32(self.size_increment); diff --git a/src/hpack/decoder.rs b/src/hpack/decoder.rs index e48976c3..4beb2e16 100644 --- a/src/hpack/decoder.rs +++ b/src/hpack/decoder.rs @@ -189,10 +189,9 @@ impl Decoder { self.last_max_update = size; } - let span = tracing::trace_span!("hpack::decode"); - let _e = span.enter(); + let _span = trace_span!("hpack::decode"); - tracing::trace!("decode"); + trace!("decode"); while let Some(ty) = peek_u8(src) { // At this point we are always at the beginning of the next block @@ -200,14 +199,14 @@ impl Decoder { // determined from the first byte. match Representation::load(ty)? { Indexed => { - tracing::trace!(rem = src.remaining(), kind = %"Indexed"); + trace!(rem = src.remaining(), kind = %"Indexed"); can_resize = false; let entry = self.decode_indexed(src)?; consume(src); f(entry); } LiteralWithIndexing => { - tracing::trace!(rem = src.remaining(), kind = %"LiteralWithIndexing"); + trace!(rem = src.remaining(), kind = %"LiteralWithIndexing"); can_resize = false; let entry = self.decode_literal(src, true)?; @@ -218,14 +217,14 @@ impl Decoder { f(entry); } LiteralWithoutIndexing => { - tracing::trace!(rem = src.remaining(), kind = %"LiteralWithoutIndexing"); + trace!(rem = src.remaining(), kind = %"LiteralWithoutIndexing"); can_resize = false; let entry = self.decode_literal(src, false)?; consume(src); f(entry); } LiteralNeverIndexed => { - tracing::trace!(rem = src.remaining(), kind = %"LiteralNeverIndexed"); + trace!(rem = src.remaining(), kind = %"LiteralNeverIndexed"); can_resize = false; let entry = self.decode_literal(src, false)?; consume(src); @@ -235,7 +234,7 @@ impl Decoder { f(entry); } SizeUpdate => { - tracing::trace!(rem = src.remaining(), kind = %"SizeUpdate"); + trace!(rem = src.remaining(), kind = %"SizeUpdate"); if !can_resize { return Err(DecoderError::InvalidMaxDynamicSize); } @@ -257,7 +256,7 @@ impl Decoder { return Err(DecoderError::InvalidMaxDynamicSize); } - tracing::debug!( + debug!( from = self.table.size(), to = new_size, "Decoder changed max table size" @@ -318,7 +317,7 @@ impl Decoder { let len = decode_int(buf, 7)?; if len > buf.remaining() { - tracing::trace!(len, remaining = buf.remaining(), "decode_string underflow",); + trace!(len, remaining = buf.remaining(), "decode_string underflow",); return Err(DecoderError::NeedMore(NeedMore::StringUnderflow)); } @@ -494,6 +493,7 @@ impl Table { } } + #[cfg(any(feature = "tracing", test))] fn size(&self) -> usize { self.size } diff --git a/src/hpack/encoder.rs b/src/hpack/encoder.rs index bd49056f..79189e7a 100644 --- a/src/hpack/encoder.rs +++ b/src/hpack/encoder.rs @@ -62,8 +62,7 @@ impl Encoder { where I: IntoIterator>>, { - let span = tracing::trace_span!("hpack::encode"); - let _e = span.enter(); + let _span = trace_span!("hpack::encode"); self.encode_size_updates(dst); diff --git a/src/lib.rs b/src/lib.rs index 1120e106..8a1b2b45 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -88,12 +88,15 @@ #![cfg_attr(not(h2_internal_check_unexpected_cfgs), allow(unexpected_cfgs))] #![cfg_attr(test, deny(warnings))] +#[macro_use] +mod tracing; + macro_rules! proto_err { (conn: $($msg:tt)+) => { - tracing::debug!("connection error PROTOCOL_ERROR -- {};", format_args!($($msg)+)) + debug!("connection error PROTOCOL_ERROR -- {};", format_args!($($msg)+)) }; (stream: $($msg:tt)+) => { - tracing::debug!("stream error PROTOCOL_ERROR -- {};", format_args!($($msg)+)) + debug!("stream error PROTOCOL_ERROR -- {};", format_args!($($msg)+)) }; } diff --git a/src/proto/connection.rs b/src/proto/connection.rs index 5589fabc..136d42e4 100644 --- a/src/proto/connection.rs +++ b/src/proto/connection.rs @@ -55,7 +55,8 @@ where streams: Streams, /// A `tracing` span tracking the lifetime of the connection. - span: tracing::Span, + #[cfg(feature = "tracing")] + span: ::tracing::Span, /// Client or server _phantom: PhantomData

, @@ -135,7 +136,8 @@ where ping_pong: PingPong::new(), settings: Settings::new(config.settings), streams, - span: tracing::debug_span!("Connection", peer = %P::NAME), + #[cfg(feature = "tracing")] + span: ::tracing::debug_span!("Connection", peer = %P::NAME), _phantom: PhantomData, }, } @@ -184,9 +186,9 @@ where /// Returns `Error` as this may raise errors that are caused by delayed /// processing of received frames. fn poll_ready(&mut self, cx: &mut Context) -> Poll> { + #[cfg(feature = "tracing")] let _e = self.inner.span.enter(); - let span = tracing::trace_span!("poll_ready"); - let _e = span.enter(); + let _span = trace_span!("poll_ready"); // The order of these calls don't really matter too much ready!(self.inner.ping_pong.send_pending_pong(cx, &mut self.codec))?; ready!(self.inner.ping_pong.send_pending_ping(cx, &mut self.codec))?; @@ -259,13 +261,12 @@ where // order to placate the borrow checker — `self` is mutably borrowed by // `poll2`, which means that we can't borrow `self.span` to enter it. // The clone is just an atomic ref bump. - let span = self.inner.span.clone(); - let _e = span.enter(); - let span = tracing::trace_span!("poll"); - let _e = span.enter(); + #[cfg(feature = "tracing")] + let _span1 = self.inner.span.clone().entered(); + let _span2 = trace_span!("poll"); loop { - tracing::trace!(connection.state = ?self.inner.state); + trace!(connection.state = ?self.inner.state); // TODO: probably clean up this glob of code match self.inner.state { // When open, continue to poll a frame @@ -294,7 +295,7 @@ where self.inner.as_dyn().handle_poll2_result(result)? } State::Closing(reason, initiator) => { - tracing::trace!("connection closing after flush"); + trace!("connection closing after flush"); // Flush/shutdown the codec ready!(self.codec.shutdown(cx))?; @@ -432,7 +433,7 @@ where // terminating the connection. Err(Error::GoAway(debug_data, reason, initiator)) => { let e = Error::GoAway(debug_data.clone(), reason, initiator); - tracing::debug!(error = ?e, "Connection::poll; connection error"); + debug!(error = ?e, "Connection::poll; connection error"); // We may have already sent a GOAWAY for this error, // if so, don't send another, just flush and close up. @@ -441,7 +442,7 @@ where .going_away() .map_or(false, |frame| frame.reason() == reason) { - tracing::trace!(" -> already going away"); + trace!(" -> already going away"); *self.state = State::Closing(reason, initiator); return Ok(()); } @@ -456,7 +457,7 @@ where // another frame. Err(Error::Reset(id, reason, initiator)) => { debug_assert_eq!(initiator, Initiator::Library); - tracing::trace!(?id, ?reason, "stream error"); + trace!(?id, ?reason, "stream error"); self.streams.send_reset(id, reason); Ok(()) } @@ -465,7 +466,7 @@ where // // TODO: Are I/O errors recoverable? Err(Error::Io(kind, inner)) => { - tracing::debug!(error = ?kind, "Connection::poll; IO error"); + debug!(error = ?kind, "Connection::poll; IO error"); let e = Error::Io(kind, inner); // Reset all active streams @@ -495,27 +496,27 @@ where use crate::frame::Frame::*; match frame { Some(Headers(frame)) => { - tracing::trace!(?frame, "recv HEADERS"); + trace!(?frame, "recv HEADERS"); self.streams.recv_headers(frame)?; } Some(Data(frame)) => { - tracing::trace!(?frame, "recv DATA"); + trace!(?frame, "recv DATA"); self.streams.recv_data(frame)?; } Some(Reset(frame)) => { - tracing::trace!(?frame, "recv RST_STREAM"); + trace!(?frame, "recv RST_STREAM"); self.streams.recv_reset(frame)?; } Some(PushPromise(frame)) => { - tracing::trace!(?frame, "recv PUSH_PROMISE"); + trace!(?frame, "recv PUSH_PROMISE"); self.streams.recv_push_promise(frame)?; } Some(Settings(frame)) => { - tracing::trace!(?frame, "recv SETTINGS"); + trace!(?frame, "recv SETTINGS"); return Ok(ReceivedFrame::Settings(frame)); } Some(GoAway(frame)) => { - tracing::trace!(?frame, "recv GOAWAY"); + trace!(?frame, "recv GOAWAY"); // This should prevent starting new streams, // but should allow continuing to process current streams // until they are all EOS. Once they are, State should @@ -524,7 +525,7 @@ where *self.error = Some(frame); } Some(Ping(frame)) => { - tracing::trace!(?frame, "recv PING"); + trace!(?frame, "recv PING"); let status = self.ping_pong.recv_ping(frame); if status.is_shutdown() { assert!( @@ -537,15 +538,15 @@ where } } Some(WindowUpdate(frame)) => { - tracing::trace!(?frame, "recv WINDOW_UPDATE"); + trace!(?frame, "recv WINDOW_UPDATE"); self.streams.recv_window_update(frame)?; } - Some(Priority(frame)) => { - tracing::trace!(?frame, "recv PRIORITY"); + Some(Priority(_frame)) => { + trace!(?_frame, "recv PRIORITY"); // TODO: handle } None => { - tracing::trace!("codec closed"); + trace!("codec closed"); self.streams.recv_eof(false).expect("mutex poisoned"); return Ok(ReceivedFrame::Done); } diff --git a/src/proto/ping_pong.rs b/src/proto/ping_pong.rs index 692c0bae..36dfa59f 100644 --- a/src/proto/ping_pong.rs +++ b/src/proto/ping_pong.rs @@ -107,7 +107,7 @@ impl PingPong { &Ping::SHUTDOWN, "pending_ping should be for shutdown", ); - tracing::trace!("recv PING SHUTDOWN ack"); + trace!("recv PING SHUTDOWN ack"); return ReceivedPing::Shutdown; } @@ -117,7 +117,7 @@ impl PingPong { if let Some(ref users) = self.user_pings { if ping.payload() == &Ping::USER && users.receive_pong() { - tracing::trace!("recv PING USER ack"); + trace!("recv PING USER ack"); return ReceivedPing::Unknown; } } @@ -125,7 +125,7 @@ impl PingPong { // else we were acked a ping we didn't send? // The spec doesn't require us to do anything about this, // so for resiliency, just ignore it for now. - tracing::warn!("recv PING ack that we never sent: {:?}", ping); + warn!("recv PING ack that we never sent: {:?}", ping); ReceivedPing::Unknown } else { // Save the ping's payload to be sent as an acknowledgement. diff --git a/src/proto/settings.rs b/src/proto/settings.rs index d6155fc3..27173db9 100644 --- a/src/proto/settings.rs +++ b/src/proto/settings.rs @@ -53,7 +53,7 @@ impl Settings { if frame.is_ack() { match &self.local { Local::WaitingAck(local) => { - tracing::debug!("received settings ACK; applying {:?}", local); + debug!("received settings ACK; applying {:?}", local); if let Some(max) = local.max_frame_size() { codec.set_max_recv_frame_size(max as usize); @@ -92,7 +92,7 @@ impl Settings { match &self.local { Local::ToSend(..) | Local::WaitingAck(..) => Err(UserError::SendSettingsWhilePending), Local::Synced => { - tracing::trace!("queue to send local settings: {:?}", frame); + trace!("queue to send local settings: {:?}", frame); self.local = Local::ToSend(frame); Ok(()) } @@ -131,7 +131,7 @@ impl Settings { // Buffer the settings frame dst.buffer(frame.into()).expect("invalid settings frame"); - tracing::trace!("ACK sent; applying settings"); + trace!("ACK sent; applying settings"); let is_initial = self.mark_remote_initial_settings_as_received(); streams.apply_remote_settings(&settings, is_initial)?; @@ -156,7 +156,7 @@ impl Settings { // Buffer the settings frame dst.buffer(settings.clone().into()) .expect("invalid settings frame"); - tracing::trace!("local settings sent; waiting for ack: {:?}", settings); + trace!("local settings sent; waiting for ack: {:?}", settings); self.local = Local::WaitingAck(settings.clone()); } diff --git a/src/proto/streams/counts.rs b/src/proto/streams/counts.rs index fdb07f1c..c160f3d8 100644 --- a/src/proto/streams/counts.rs +++ b/src/proto/streams/counts.rs @@ -94,6 +94,7 @@ impl Counts { self.num_local_error_reset_streams += 1; } + #[cfg(feature = "tracing")] pub(crate) fn max_local_error_resets(&self) -> Option { self.max_local_error_reset_streams } @@ -152,6 +153,7 @@ impl Counts { self.num_local_reset_streams += 1; } + #[cfg(feature = "tracing")] pub(crate) fn max_remote_reset_streams(&self) -> usize { self.max_remote_reset_streams } @@ -210,7 +212,7 @@ impl Counts { // TODO: move this to macro? pub fn transition_after(&mut self, mut stream: store::Ptr, is_reset_counted: bool) { - tracing::trace!( + trace!( "transition_after; stream={:?}; state={:?}; is_closed={:?}; \ pending_send_empty={:?}; buffered_send_data={}; \ num_recv={}; num_send={}", @@ -232,7 +234,7 @@ impl Counts { } if stream.is_counted { - tracing::trace!("dec_num_streams; stream={:?}", stream.id); + trace!("dec_num_streams; stream={:?}", stream.id); // Decrement the number of active streams. self.dec_num_streams(&mut stream); } diff --git a/src/proto/streams/flow_control.rs b/src/proto/streams/flow_control.rs index 57a93582..8b90b124 100644 --- a/src/proto/streams/flow_control.rs +++ b/src/proto/streams/flow_control.rs @@ -121,7 +121,7 @@ impl FlowControl { return Err(Reason::FLOW_CONTROL_ERROR); } - tracing::trace!( + trace!( "inc_window; sz={}; old={}; new={}", sz, self.window_size, @@ -137,7 +137,7 @@ impl FlowControl { /// This is called after receiving a SETTINGS frame with a lower /// INITIAL_WINDOW_SIZE value. pub fn dec_send_window(&mut self, sz: WindowSize) -> Result<(), Reason> { - tracing::trace!( + trace!( "dec_window; sz={}; window={}, available={}", sz, self.window_size, @@ -153,7 +153,7 @@ impl FlowControl { /// This is called after receiving a SETTINGS ACK frame with a lower /// INITIAL_WINDOW_SIZE value. pub fn dec_recv_window(&mut self, sz: WindowSize) -> Result<(), Reason> { - tracing::trace!( + trace!( "dec_recv_window; sz={}; window={}, available={}", sz, self.window_size, @@ -168,7 +168,7 @@ impl FlowControl { /// Decrements the window reflecting data has actually been sent. The caller /// must ensure that the window has capacity. pub fn send_data(&mut self, sz: WindowSize) -> Result<(), Reason> { - tracing::trace!( + trace!( "send_data; sz={}; window={}; available={}", sz, self.window_size, diff --git a/src/proto/streams/prioritize.rs b/src/proto/streams/prioritize.rs index 14b37e22..cd46640d 100644 --- a/src/proto/streams/prioritize.rs +++ b/src/proto/streams/prioritize.rs @@ -91,7 +91,7 @@ impl Prioritize { let _res = flow.assign_capacity(config.remote_init_window_sz); debug_assert!(_res.is_ok()); - tracing::trace!("Prioritize::new; flow={:?}", flow); + trace!("Prioritize::new; flow={:?}", flow); Prioritize { pending_send: store::Queue::new(), @@ -116,8 +116,7 @@ impl Prioritize { stream: &mut store::Ptr, task: &mut Option, ) { - let span = tracing::trace_span!("Prioritize::queue_frame", ?stream.id); - let _e = span.enter(); + let _span = trace_span!("Prioritize::queue_frame", ?stream.id); // Queue the frame in the buffer stream.pending_send.push_back(buffer, frame); self.schedule_send(stream, task); @@ -126,7 +125,7 @@ impl Prioritize { pub fn schedule_send(&mut self, stream: &mut store::Ptr, task: &mut Option) { // If the stream is waiting to be opened, nothing more to do. if stream.is_send_ready() { - tracing::trace!(?stream.id, "schedule_send"); + trace!(?stream.id, "schedule_send"); // Queue the stream self.pending_send.push(stream); @@ -172,10 +171,8 @@ impl Prioritize { // Update the buffered data counter stream.buffered_send_data += sz as usize; - let span = - tracing::trace_span!("send_data", sz, requested = stream.requested_send_capacity); - let _e = span.enter(); - tracing::trace!(buffered = stream.buffered_send_data); + let _span = trace_span!("send_data", sz, requested = stream.requested_send_capacity); + trace!(buffered = stream.buffered_send_data); // Implicitly request more send capacity if not enough has been // requested yet. @@ -200,7 +197,7 @@ impl Prioritize { self.reserve_capacity(0, stream, counts); } - tracing::trace!( + trace!( available = %stream.send_flow.available(), buffered = stream.buffered_send_data, ); @@ -234,14 +231,13 @@ impl Prioritize { stream: &mut store::Ptr, counts: &mut Counts, ) { - let span = tracing::trace_span!( + let _span = trace_span!( "reserve_capacity", ?stream.id, requested = capacity, effective = (capacity as usize) + stream.buffered_send_data, curr = stream.requested_send_capacity ); - let _e = span.enter(); // Actual capacity is `capacity` + the current amount of buffered data. // If it were less, then we could never send out the buffered data. @@ -294,14 +290,13 @@ impl Prioritize { inc: WindowSize, stream: &mut store::Ptr, ) -> Result<(), Reason> { - let span = tracing::trace_span!( + let _span = trace_span!( "recv_stream_window_update", ?stream.id, ?stream.state, inc, flow = ?stream.send_flow ); - let _e = span.enter(); if stream.state.is_send_closed() && stream.buffered_send_data == 0 { // We can't send any data, so don't bother doing anything else. @@ -359,11 +354,10 @@ impl Prioritize { } pub fn clear_pending_capacity(&mut self, store: &mut Store, counts: &mut Counts) { - let span = tracing::trace_span!("clear_pending_capacity"); - let _e = span.enter(); + let _span = trace_span!("clear_pending_capacity"); while let Some(stream) = self.pending_capacity.pop(store) { - counts.transition(stream, |_, stream| { - tracing::trace!(?stream.id, "clear_pending_capacity"); + counts.transition(stream, |_, _stream| { + trace!(?_stream.id, "clear_pending_capacity"); }) } } @@ -376,8 +370,7 @@ impl Prioritize { ) where R: Resolve, { - let span = tracing::trace_span!("assign_connection_capacity", inc); - let _e = span.enter(); + let _span = trace_span!("assign_connection_capacity", inc); // TODO: proper error handling let _res = self.flow.assign_capacity(inc); @@ -422,9 +415,8 @@ impl Prioritize { // Can't assign more than what is available stream.send_flow.window_size() - stream.send_flow.available().as_size(), ); - let span = tracing::trace_span!("try_assign_capacity", ?stream.id); - let _e = span.enter(); - tracing::trace!( + let _span = trace_span!("try_assign_capacity", ?stream.id); + trace!( requested = total_requested, additional, buffered = stream.buffered_send_data, @@ -455,7 +447,7 @@ impl Prioritize { // TODO: Should prioritization factor into this? let assign = cmp::min(conn_available, additional); - tracing::trace!(capacity = assign, "assigning"); + trace!(capacity = assign, "assigning"); // Assign the capacity to the stream stream.assign_capacity(assign, self.max_buffer_size); @@ -466,7 +458,7 @@ impl Prioritize { debug_assert!(_res.is_ok()); } - tracing::trace!( + trace!( available = %stream.send_flow.available(), requested = stream.requested_send_capacity, buffered = stream.buffered_send_data, @@ -525,7 +517,7 @@ impl Prioritize { // The max frame length let max_frame_len = dst.max_send_frame_size(); - tracing::trace!("poll_complete"); + trace!("poll_complete"); loop { if let Some(mut stream) = self.pop_pending_open(store, counts) { @@ -535,7 +527,7 @@ impl Prioritize { match self.pop_frame(buffer, store, max_frame_len, counts) { Some(frame) => { - tracing::trace!(?frame, "writing"); + trace!(?frame, "writing"); debug_assert_eq!(self.in_flight_data_frame, InFlightData::Nothing); if let Frame::Data(ref frame) = frame { @@ -581,8 +573,7 @@ impl Prioritize { where B: Buf, { - let span = tracing::trace_span!("try_reclaim_frame"); - let _e = span.enter(); + let _span = trace_span!("try_reclaim_frame"); // First check if there are any data chunks to take back if let Some(frame) = dst.take_last_data_frame() { @@ -601,7 +592,7 @@ impl Prioritize { where B: Buf, { - tracing::trace!( + trace!( ?frame, sz = frame.payload().inner.get_ref().remaining(), "reclaimed" @@ -613,7 +604,7 @@ impl Prioritize { match mem::replace(&mut self.in_flight_data_frame, InFlightData::Nothing) { InFlightData::Nothing => panic!("wasn't expecting a frame to reclaim"), InFlightData::Drop => { - tracing::trace!("not reclaiming frame for cancelled stream"); + trace!("not reclaiming frame for cancelled stream"); return false; } InFlightData::DataFrame(k) => { @@ -661,12 +652,11 @@ impl Prioritize { } pub fn clear_queue(&mut self, buffer: &mut Buffer>, stream: &mut store::Ptr) { - let span = tracing::trace_span!("clear_queue", ?stream.id); - let _e = span.enter(); + let _span = trace_span!("clear_queue", ?stream.id); // TODO: make this more efficient? - while let Some(frame) = stream.pending_send.pop_front(buffer) { - tracing::trace!(?frame, "dropping"); + while let Some(_frame) = stream.pending_send.pop_front(buffer) { + trace!(?_frame, "dropping"); } stream.buffered_send_data = 0; @@ -703,14 +693,12 @@ impl Prioritize { where B: Buf, { - let span = tracing::trace_span!("pop_frame"); - let _e = span.enter(); + let _span = trace_span!("pop_frame"); loop { match self.pending_send.pop(store) { Some(mut stream) => { - let span = tracing::trace_span!("popped", ?stream.id, ?stream.state); - let _e = span.enter(); + let _span = trace_span!("popped", ?stream.id, ?stream.state); // It's possible that this stream, besides having data to send, // is also queued to send a reset, and thus is already in the queue @@ -719,7 +707,7 @@ impl Prioritize { // To be safe, we just always ask the stream. let is_pending_reset = stream.is_pending_reset_expiration(); - tracing::trace!(is_pending_reset); + trace!(is_pending_reset); let frame = match stream.pending_send.pop_front(buffer) { Some(Frame::Data(mut frame)) => { @@ -728,7 +716,7 @@ impl Prioritize { let stream_capacity = stream.send_flow.available(); let sz = frame.payload().remaining(); - tracing::trace!( + trace!( sz, eos = frame.is_end_stream(), window = %stream_capacity, @@ -741,7 +729,7 @@ impl Prioritize { // Zero length data frames always have capacity to // be sent. if sz > 0 && stream_capacity == 0 { - tracing::trace!("stream capacity is 0"); + trace!("stream capacity is 0"); // Ensure that the stream is waiting for // connection level capacity @@ -777,10 +765,12 @@ impl Prioritize { continue; } - tracing::trace!(len, "sending data frame"); + trace!(len, "sending data frame"); // Update the flow control - tracing::trace_span!("updating stream flow").in_scope(|| { + { + let _span = trace_span!("updating stream flow"); + stream.send_data(len, self.max_buffer_size); // Assign the capacity back to the connection that @@ -789,25 +779,26 @@ impl Prioritize { // TODO: proper error handling let _res = self.flow.assign_capacity(len); debug_assert!(_res.is_ok()); - }); + } - let (eos, len) = tracing::trace_span!("updating connection flow") - .in_scope(|| { - // TODO: proper error handling - let _res = self.flow.send_data(len); - debug_assert!(_res.is_ok()); + let (eos, len) = { + let _span = trace_span!("updating connection flow"); - // Wrap the frame's data payload to ensure that the - // correct amount of data gets written. + // TODO: proper error handling + let _res = self.flow.send_data(len); + debug_assert!(_res.is_ok()); + + // Wrap the frame's data payload to ensure that the + // correct amount of data gets written. - let eos = frame.is_end_stream(); - let len = len as usize; + let eos = frame.is_end_stream(); + let len = len as usize; - if frame.payload().remaining() > len { - frame.set_end_stream(false); - } - (eos, len) - }); + if frame.payload().remaining() > len { + frame.set_end_stream(false); + } + (eos, len) + }; Frame::Data(frame.map(|buf| Prioritized { inner: buf.take(len), @@ -851,7 +842,7 @@ impl Prioritize { // had data buffered to be sent, but all the frames are cleared // in clear_queue(). Instead of doing O(N) traversal through queue // to remove, lets just ignore the stream here. - tracing::trace!("removing dangling stream from pending_send"); + trace!("removing dangling stream from pending_send"); // Since this should only happen as a consequence of `clear_queue`, // we must be in a closed state of some kind. debug_assert!(stream.state.is_closed()); @@ -861,7 +852,7 @@ impl Prioritize { } }; - tracing::trace!("pop_frame; frame={:?}", frame); + trace!("pop_frame; frame={:?}", frame); if cfg!(debug_assertions) && stream.state.is_idle() { debug_assert!(stream.id > self.last_opened_id); @@ -890,11 +881,11 @@ impl Prioritize { store: &'s mut Store, counts: &mut Counts, ) -> Option> { - tracing::trace!("schedule_pending_open"); + trace!("schedule_pending_open"); // check for any pending open streams if counts.can_inc_num_send_streams() { if let Some(mut stream) = self.pending_open.pop(store) { - tracing::trace!("schedule_pending_open; stream={:?}", stream.id); + trace!("schedule_pending_open; stream={:?}", stream.id); counts.inc_num_send_streams(&mut stream); stream.notify_send(); diff --git a/src/proto/streams/recv.rs b/src/proto/streams/recv.rs index 46cb87cd..bf83b1ce 100644 --- a/src/proto/streams/recv.rs +++ b/src/proto/streams/recv.rs @@ -158,7 +158,7 @@ impl Recv { stream: &mut store::Ptr, counts: &mut Counts, ) -> Result<(), RecvHeaderBlockError>> { - tracing::trace!("opening stream; init_window={}", self.init_window_sz); + trace!("opening stream; init_window={}", self.init_window_sz); let is_initial = stream.state.recv_open(&frame)?; if is_initial { @@ -200,7 +200,7 @@ impl Recv { // So, if peer is a server, we'll send a 431. In either case, // an error is recorded, which will send a REFUSED_STREAM, // since we don't want any of the data frames either. - tracing::debug!( + debug!( "stream error REQUEST_HEADER_FIELDS_TOO_LARGE -- \ recv_headers: frame is over size; stream={:?}", stream.id @@ -359,7 +359,7 @@ impl Recv { /// Releases capacity of the connection pub fn release_connection_capacity(&mut self, capacity: WindowSize, task: &mut Option) { - tracing::trace!( + trace!( "release_connection_capacity; size={}, connection in_flight_data={}", capacity, self.in_flight_data, @@ -387,7 +387,7 @@ impl Recv { stream: &mut store::Ptr, task: &mut Option, ) -> Result<(), UserError> { - tracing::trace!("release_capacity; size={}", capacity); + trace!("release_capacity; size={}", capacity); if capacity > stream.in_flight_recv_data { return Err(UserError::ReleaseCapacityTooBig); @@ -423,7 +423,7 @@ impl Recv { return; } - tracing::trace!( + trace!( "auto-release closed stream ({:?}) capacity: {:?}", stream.id, stream.in_flight_recv_data, @@ -452,7 +452,7 @@ impl Recv { target: WindowSize, task: &mut Option, ) -> Result<(), Reason> { - tracing::trace!( + trace!( "set_target_connection_window; target={}; available={}, reserved={}", target, self.flow.available(), @@ -499,7 +499,7 @@ impl Recv { let old_sz = self.init_window_sz; self.init_window_sz = target; - tracing::trace!("update_initial_window_size; new={}; old={}", target, old_sz,); + trace!("update_initial_window_size; new={}; old={}", target, old_sz,); // Per RFC 7540 §6.9.2: // @@ -521,7 +521,7 @@ impl Recv { Ordering::Less => { // We must decrease the (local) window on every open stream. let dec = old_sz - target; - tracing::trace!("decrementing all windows; dec={}", dec); + trace!("decrementing all windows; dec={}", dec); store.try_for_each(|mut stream| { stream @@ -534,7 +534,7 @@ impl Recv { Ordering::Greater => { // We must increase the (local) window on every open stream. let inc = target - old_sz; - tracing::trace!("incrementing all windows; inc={}", inc); + trace!("incrementing all windows; inc={}", inc); store.try_for_each(|mut stream| { // XXX: Shouldn't the peer have already noticed our // overflow and sent us a GOAWAY? @@ -585,7 +585,7 @@ impl Recv { return Err(Error::library_go_away(Reason::PROTOCOL_ERROR)); } - tracing::trace!( + trace!( "recv_data; size={}; connection={}; stream={}", sz, self.flow.window_size(), @@ -593,7 +593,7 @@ impl Recv { ); if is_ignoring_frame { - tracing::trace!( + trace!( "recv_data; frame ignored on locally reset {:?} for some time", stream.id, ); @@ -643,7 +643,7 @@ impl Recv { // Received a frame, but no one cared about it. fix issue#648 if !stream.is_recv { - tracing::trace!( + trace!( "recv_data; frame ignored on stream release {:?} for some time", stream.id, ); @@ -687,7 +687,7 @@ impl Recv { pub fn consume_connection_window(&mut self, sz: WindowSize) -> Result<(), Error> { if self.flow.window_size() < sz { - tracing::debug!( + debug!( "connection error FLOW_CONTROL_ERROR -- window_size ({:?}) < sz ({:?});", self.flow.window_size(), sz, @@ -721,7 +721,7 @@ impl Recv { // So, if peer is a server, we'll send a 431. In either case, // an error is recorded, which will send a REFUSED_STREAM, // since we don't want any of the data frames either. - tracing::debug!( + debug!( "stream error REFUSED_STREAM -- recv_push_promise: \ headers frame is over size; promised_id={:?};", frame.promised_id(), @@ -745,10 +745,10 @@ impl Recv { req.method(), promised_id, ), - InvalidContentLength(e) => proto_err!( + InvalidContentLength(_e) => proto_err!( stream: "recv_push_promise; promised request has invalid content-length {:?}; promised_id={:?}", - e, + _e, promised_id, ), } @@ -767,7 +767,7 @@ impl Recv { pub fn ensure_not_idle(&self, id: StreamId) -> Result<(), Reason> { if let Ok(next) = self.next_stream_id { if id >= next { - tracing::debug!( + debug!( "stream ID implicitly closed, PROTOCOL_ERROR; stream={:?}", id ); @@ -798,7 +798,7 @@ impl Recv { if counts.can_inc_num_remote_reset_streams() { counts.inc_num_remote_reset_streams(); } else { - tracing::warn!( + warn!( "recv_reset; remotely-reset pending-accept streams reached limit ({:?})", counts.max_remote_reset_streams(), ); @@ -896,7 +896,7 @@ impl Recv { return; } - tracing::trace!("enqueue_reset_expiration; {:?}", stream.id); + trace!("enqueue_reset_expiration; {:?}", stream.id); if counts.can_inc_num_reset_streams() { counts.inc_num_reset_streams(); @@ -961,8 +961,8 @@ impl Recv { fn clear_stream_window_update_queue(&mut self, store: &mut Store, counts: &mut Counts) { while let Some(stream) = self.pending_window_updates.pop(store) { - counts.transition(stream, |_, stream| { - tracing::trace!("clear_stream_window_update_queue; stream={:?}", stream.id); + counts.transition(stream, |_, _stream| { + trace!("clear_stream_window_update_queue; stream={:?}", _stream.id); }) } } @@ -1052,7 +1052,7 @@ impl Recv { }; counts.transition(stream, |_, stream| { - tracing::trace!("pending_window_updates -- pop; stream={:?}", stream.id); + trace!("pending_window_updates -- pop; stream={:?}", stream.id); debug_assert!(!stream.is_pending_window_update); if !stream.state.is_recv_streaming() { diff --git a/src/proto/streams/send.rs b/src/proto/streams/send.rs index 626e61a3..78975947 100644 --- a/src/proto/streams/send.rs +++ b/src/proto/streams/send.rs @@ -85,11 +85,11 @@ impl Send { || fields.contains_key("keep-alive") || fields.contains_key("proxy-connection") { - tracing::debug!("illegal connection-specific headers found"); + debug!("illegal connection-specific headers found"); return Err(UserError::MalformedHeaders); } else if let Some(te) = fields.get(http::header::TE) { if te != "trailers" { - tracing::debug!("illegal connection-specific headers found"); + debug!("illegal connection-specific headers found"); return Err(UserError::MalformedHeaders); } } @@ -107,7 +107,7 @@ impl Send { return Err(UserError::PeerDisabledServerPush); } - tracing::trace!( + trace!( "send_push_promise; frame={:?}; init_window={:?}", frame, self.init_window_sz @@ -130,7 +130,7 @@ impl Send { counts: &mut Counts, task: &mut Option, ) -> Result<(), UserError> { - tracing::trace!( + trace!( "send_headers; frame={:?}; init_window={:?}", frame, self.init_window_sz @@ -182,7 +182,7 @@ impl Send { let is_empty = stream.pending_send.is_empty(); let stream_id = stream.id; - tracing::trace!( + trace!( "send_reset(..., reason={:?}, initiator={:?}, stream={:?}, ..., \ is_reset={:?}; is_closed={:?}; pending_send.is_empty={:?}; \ state={:?} \ @@ -198,7 +198,7 @@ impl Send { if is_reset { // Don't double reset - tracing::trace!( + trace!( " -> not sending RST_STREAM ({:?} is already reset)", stream_id ); @@ -211,7 +211,7 @@ impl Send { // If closed AND the send queue is flushed, then the stream cannot be // reset explicitly, either. Implicit resets can still be queued. if is_closed && is_empty { - tracing::trace!( + trace!( " -> not sending explicit RST_STREAM ({:?} was closed \ and send queue was flushed)", stream_id @@ -227,7 +227,7 @@ impl Send { let frame = frame::Reset::new(stream.id, reason); - tracing::trace!("send_reset -- queueing; frame={:?}", frame); + trace!("send_reset -- queueing; frame={:?}", frame); self.prioritize .queue_frame(frame.into(), buffer, stream, task); self.prioritize.reclaim_all_capacity(stream, counts); @@ -281,7 +281,7 @@ impl Send { stream.state.send_close(); - tracing::trace!("send_trailers -- queuing; frame={:?}", frame); + trace!("send_trailers -- queuing; frame={:?}", frame); self.prioritize .queue_frame(frame.into(), buffer, stream, task); @@ -375,7 +375,7 @@ impl Send { task: &mut Option, ) -> Result<(), Reason> { if let Err(e) = self.prioritize.recv_stream_window_update(sz, stream) { - tracing::debug!("recv_stream_window_update !!; err={:?}", e); + debug!("recv_stream_window_update !!; err={:?}", e); self.send_reset( Reason::FLOW_CONTROL_ERROR, @@ -460,13 +460,13 @@ impl Send { Ordering::Less => { // We must decrease the (remote) window on every open stream. let dec = old_val - val; - tracing::trace!("decrementing all windows; dec={}", dec); + trace!("decrementing all windows; dec={}", dec); let mut total_reclaimed = 0; store.try_for_each(|mut stream| { let stream = &mut *stream; - tracing::trace!( + trace!( "decrementing stream window; id={:?}; decr={}; flow={:?}", stream.id, dec, @@ -487,7 +487,7 @@ impl Send { // and reassign it to other streams. let window_size = stream.send_flow.window_size(); let available = stream.send_flow.available().as_size(); - let reclaimed = if available > window_size { + let _reclaimed = if available > window_size { // Drop down to `window_size`. let reclaim = available - window_size; stream @@ -500,11 +500,11 @@ impl Send { 0 }; - tracing::trace!( + trace!( "decremented stream window; id={:?}; decr={}; reclaimed={}; flow={:?}", stream.id, dec, - reclaimed, + _reclaimed, stream.send_flow ); diff --git a/src/proto/streams/state.rs b/src/proto/streams/state.rs index 5256f09c..9a13bbd6 100644 --- a/src/proto/streams/state.rs +++ b/src/proto/streams/state.rs @@ -145,7 +145,7 @@ impl State { Open { local: AwaitingHeaders, remote: if frame.is_informational() { - tracing::trace!("skipping 1xx response headers"); + trace!("skipping 1xx response headers"); AwaitingHeaders } else { Streaming @@ -159,7 +159,7 @@ impl State { if eos { Closed(Cause::EndStream) } else if frame.is_informational() { - tracing::trace!("skipping 1xx response headers"); + trace!("skipping 1xx response headers"); ReservedRemote } else { HalfClosedLocal(Streaming) @@ -175,7 +175,7 @@ impl State { Open { local, remote: if frame.is_informational() { - tracing::trace!("skipping 1xx response headers"); + trace!("skipping 1xx response headers"); AwaitingHeaders } else { Streaming @@ -187,15 +187,15 @@ impl State { if eos { Closed(Cause::EndStream) } else if frame.is_informational() { - tracing::trace!("skipping 1xx response headers"); + trace!("skipping 1xx response headers"); HalfClosedLocal(AwaitingHeaders) } else { HalfClosedLocal(Streaming) } } - ref state => { + ref _state => { // All other transitions result in a protocol error - proto_err!(conn: "recv_open: in unexpected state {:?}", state); + proto_err!(conn: "recv_open: in unexpected state {:?}", _state); return Err(Error::library_go_away(Reason::PROTOCOL_ERROR)); } }; @@ -210,8 +210,8 @@ impl State { self.inner = ReservedRemote; Ok(()) } - ref state => { - proto_err!(conn: "reserve_remote: in unexpected state {:?}", state); + ref _state => { + proto_err!(conn: "reserve_remote: in unexpected state {:?}", _state); Err(Error::library_go_away(Reason::PROTOCOL_ERROR)) } } @@ -233,17 +233,17 @@ impl State { match self.inner { Open { local, .. } => { // The remote side will continue to receive data. - tracing::trace!("recv_close: Open => HalfClosedRemote({:?})", local); + trace!("recv_close: Open => HalfClosedRemote({:?})", local); self.inner = HalfClosedRemote(local); Ok(()) } HalfClosedLocal(..) => { - tracing::trace!("recv_close: HalfClosedLocal => Closed"); + trace!("recv_close: HalfClosedLocal => Closed"); self.inner = Closed(Cause::EndStream); Ok(()) } - ref state => { - proto_err!(conn: "recv_close: in unexpected state {:?}", state); + ref _state => { + proto_err!(conn: "recv_close: in unexpected state {:?}", _state); Err(Error::library_go_away(Reason::PROTOCOL_ERROR)) } } @@ -273,11 +273,11 @@ impl State { // In either of these cases, we want to overwrite the stream's // previous state with the received RST_STREAM, so that the queue // will be cleared by `Prioritize::pop_frame`. - ref state => { - tracing::trace!( + ref _state => { + trace!( "recv_reset; frame={:?}; state={:?}; queued={:?}", frame, - state, + _state, queued ); self.inner = Closed(Cause::Error(Error::remote_reset( @@ -293,7 +293,7 @@ impl State { match self.inner { Closed(..) => {} _ => { - tracing::trace!("handle_error; err={:?}", err); + trace!("handle_error; err={:?}", err); self.inner = Closed(Cause::Error(err.clone())); } } @@ -302,8 +302,8 @@ impl State { pub fn recv_eof(&mut self) { match self.inner { Closed(..) => {} - ref state => { - tracing::trace!("recv_eof; state={:?}", state); + ref _state => { + trace!("recv_eof; state={:?}", _state); self.inner = Closed(Cause::Error( io::Error::new( io::ErrorKind::BrokenPipe, @@ -320,11 +320,11 @@ impl State { match self.inner { Open { remote, .. } => { // The remote side will continue to receive data. - tracing::trace!("send_close: Open => HalfClosedLocal({:?})", remote); + trace!("send_close: Open => HalfClosedLocal({:?})", remote); self.inner = HalfClosedLocal(remote); } HalfClosedRemote(..) => { - tracing::trace!("send_close: HalfClosedRemote => Closed"); + trace!("send_close: HalfClosedRemote => Closed"); self.inner = Closed(Cause::EndStream); } ref state => panic!("send_close: unexpected state {:?}", state), diff --git a/src/proto/streams/store.rs b/src/proto/streams/store.rs index 35fd6f25..d39b0de6 100644 --- a/src/proto/streams/store.rs +++ b/src/proto/streams/store.rs @@ -257,10 +257,10 @@ where /// /// If the stream is already contained by the list, return `false`. pub fn push(&mut self, stream: &mut store::Ptr) -> bool { - tracing::trace!("Queue::push_back"); + trace!("Queue::push_back"); if N::is_queued(stream) { - tracing::trace!(" -> already queued"); + trace!(" -> already queued"); return false; } @@ -272,7 +272,7 @@ where // Queue the stream match self.indices { Some(ref mut idxs) => { - tracing::trace!(" -> existing entries"); + trace!(" -> existing entries"); // Update the current tail node to point to `stream` let key = stream.key(); @@ -282,7 +282,7 @@ where idxs.tail = stream.key(); } None => { - tracing::trace!(" -> first entry"); + trace!(" -> first entry"); self.indices = Some(store::Indices { head: stream.key(), tail: stream.key(), @@ -297,10 +297,10 @@ where /// /// If the stream is already contained by the list, return `false`. pub fn push_front(&mut self, stream: &mut store::Ptr) -> bool { - tracing::trace!("Queue::push_front"); + trace!("Queue::push_front"); if N::is_queued(stream) { - tracing::trace!(" -> already queued"); + trace!(" -> already queued"); return false; } @@ -312,7 +312,7 @@ where // Queue the stream match self.indices { Some(ref mut idxs) => { - tracing::trace!(" -> existing entries"); + trace!(" -> existing entries"); // Update the provided stream to point to the head node let head_key = stream.resolve(idxs.head).key(); @@ -322,7 +322,7 @@ where idxs.head = stream.key(); } None => { - tracing::trace!(" -> first entry"); + trace!(" -> first entry"); self.indices = Some(store::Indices { head: stream.key(), tail: stream.key(), diff --git a/src/proto/streams/stream.rs b/src/proto/streams/stream.rs index 43e31364..232c734d 100644 --- a/src/proto/streams/stream.rs +++ b/src/proto/streams/stream.rs @@ -281,7 +281,7 @@ impl Stream { let _res = self.send_flow.assign_capacity(capacity); debug_assert!(_res.is_ok()); - tracing::trace!( + trace!( " assigned capacity to stream; available={}; buffered={}; id={:?}; max_buffer_size={} prev={}", self.send_flow.available(), self.buffered_send_data, @@ -307,7 +307,7 @@ impl Stream { self.buffered_send_data -= len as usize; self.requested_send_capacity -= len; - tracing::trace!( + trace!( " sent stream data; available={}; buffered={}; id={:?}; max_buffer_size={} prev={}", self.send_flow.available(), self.buffered_send_data, @@ -325,7 +325,7 @@ impl Stream { /// then consider waking the send task again... pub fn notify_capacity(&mut self) { self.send_capacity_inc = true; - tracing::trace!(" notifying task"); + trace!(" notifying task"); self.notify_send(); } diff --git a/src/proto/streams/streams.rs b/src/proto/streams/streams.rs index e6c9ed8a..1a3b1816 100644 --- a/src/proto/streams/streams.rs +++ b/src/proto/streams/streams.rs @@ -132,7 +132,7 @@ where let me = &mut *me; me.actions.recv.next_incoming(&mut me.store).map(|key| { let stream = &mut me.store.resolve(key); - tracing::trace!( + trace!( "next_incoming; id={:?}, state={:?}", stream.id, stream.state @@ -425,7 +425,7 @@ impl Inner { // The GOAWAY process has begun. All streams with a greater ID than // specified as part of GOAWAY should be ignored. if id > self.actions.recv.max_stream_id() { - tracing::trace!( + trace!( "id ({:?}) > max_stream_id ({:?}), ignoring HEADERS", id, self.actions.recv.max_stream_id() @@ -445,7 +445,7 @@ impl Inner { // This may be response headers for a stream we've already // forgotten about... if self.actions.may_have_forgotten_stream(peer, id) { - tracing::debug!( + debug!( "recv_headers for old stream={:?}, sending STREAM_CLOSED", id, ); @@ -478,7 +478,7 @@ impl Inner { // Locally reset streams must ignore frames "for some time". // This is because the remote may have sent trailers before // receiving the RST_STREAM frame. - tracing::trace!("recv_headers; ignoring trailers on {:?}", stream.id); + trace!("recv_headers; ignoring trailers on {:?}", stream.id); return Ok(()); } @@ -487,7 +487,7 @@ impl Inner { let send_buffer = &mut *send_buffer; self.counts.transition(stream, |counts, stream| { - tracing::trace!( + trace!( "recv_headers; stream={:?}; state={:?}", stream.id, stream.state @@ -546,7 +546,7 @@ impl Inner { // The GOAWAY process has begun. All streams with a greater ID // than specified as part of GOAWAY should be ignored. if id > self.actions.recv.max_stream_id() { - tracing::trace!( + trace!( "id ({:?}) > max_stream_id ({:?}), ignoring DATA", id, self.actions.recv.max_stream_id() @@ -555,7 +555,7 @@ impl Inner { } if self.actions.may_have_forgotten_stream(peer, id) { - tracing::debug!("recv_data for old stream={:?}, sending STREAM_CLOSED", id,); + debug!("recv_data for old stream={:?}, sending STREAM_CLOSED", id,); let sz = frame.payload().len(); // This should have been enforced at the codec::FramedRead layer, so @@ -607,7 +607,7 @@ impl Inner { // The GOAWAY process has begun. All streams with a greater ID than // specified as part of GOAWAY should be ignored. if id > self.actions.recv.max_stream_id() { - tracing::trace!( + trace!( "id ({:?}) > max_stream_id ({:?}), ignoring RST_STREAM", id, self.actions.recv.max_stream_id() @@ -743,7 +743,7 @@ impl Inner { // The GOAWAY process has begun. All streams with a greater ID // than specified as part of GOAWAY should be ignored. if id > self.actions.recv.max_stream_id() { - tracing::trace!( + trace!( "id ({:?}) > max_stream_id ({:?}), ignoring PUSH_PROMISE", id, self.actions.recv.max_stream_id() @@ -851,7 +851,7 @@ impl Inner { ); } - tracing::trace!("Streams::recv_eof"); + trace!("Streams::recv_eof"); self.store.for_each(|stream| { counts.transition(stream, |counts, stream| { @@ -966,7 +966,7 @@ where if let Some(pending) = pending { let mut stream = me.store.resolve(pending.key); - tracing::trace!("poll_pending_open; stream = {:?}", stream.is_pending_open); + trace!("poll_pending_open; stream = {:?}", stream.is_pending_open); if stream.is_pending_open { stream.wait_send(cx); return Poll::Pending; @@ -1449,7 +1449,7 @@ fn drop_stream_ref(inner: &Mutex, key: store::Key) { Ok(inner) => inner, Err(_) => { if ::std::thread::panicking() { - tracing::trace!("StreamRef::drop; mutex poisoned"); + trace!("StreamRef::drop; mutex poisoned"); return; } else { panic!("StreamRef::drop; mutex poisoned"); @@ -1461,7 +1461,7 @@ fn drop_stream_ref(inner: &Mutex, key: store::Key) { me.refs -= 1; let mut stream = me.store.resolve(key); - tracing::trace!("drop_stream_ref; stream={:?}", stream); + trace!("drop_stream_ref; stream={:?}", stream); // decrement the stream's ref count by 1. stream.ref_dec(); @@ -1578,7 +1578,7 @@ impl Actions { .send_reset(reason, initiator, buffer, stream, counts, &mut self.task); Ok(()) } else { - tracing::warn!( + warn!( "reset_on_recv_stream_err; locally-reset streams reached limit ({:?})", counts.max_local_error_resets().unwrap(), ); diff --git a/src/server.rs b/src/server.rs index 27f89d41..98675dc7 100644 --- a/src/server.rs +++ b/src/server.rs @@ -120,6 +120,8 @@ use crate::frame::{self, Pseudo, PushPromiseHeaderError, Reason, Settings, Strea use crate::proto::{self, Config, Error, Prioritized}; use crate::{FlowControl, PingPong, RecvStream, SendStream}; +#[cfg(feature = "tracing")] +use ::tracing::instrument::{Instrument, Instrumented}; use bytes::{Buf, Bytes}; use http::{HeaderMap, Method, Request, Response}; use std::future::Future; @@ -128,7 +130,6 @@ use std::task::{Context, Poll}; use std::time::Duration; use std::{fmt, io}; use tokio::io::{AsyncRead, AsyncWrite, ReadBuf}; -use tracing::instrument::{Instrument, Instrumented}; /// In progress HTTP/2 connection handshake future. /// @@ -151,7 +152,8 @@ pub struct Handshake { /// The current state of the handshake. state: Handshaking, /// Span tracking the handshake - span: tracing::Span, + #[cfg(feature = "tracing")] + span: ::tracing::Span, } /// Accepts inbound HTTP/2 streams on a connection. @@ -308,9 +310,17 @@ impl fmt::Debug for SendPushedResponse { /// Stages of an in-progress handshake. enum Handshaking { /// State 1. Connection is flushing pending SETTINGS frame. + #[cfg(feature = "tracing")] Flushing(Instrumented>>), + #[cfg(not(feature = "tracing"))] + Flushing(Flush>), + /// State 2. Connection is waiting for the client preface. + #[cfg(feature = "tracing")] ReadingPreface(Instrumented>>), + #[cfg(not(feature = "tracing"))] + ReadingPreface(ReadPreface>), + /// State 3. Handshake is done, polling again would panic. Done, } @@ -377,7 +387,9 @@ where B: Buf, { fn handshake2(io: T, builder: Builder) -> Handshake { - let span = tracing::trace_span!("server_handshake"); + #[cfg(feature = "tracing")] + let span = ::tracing::trace_span!("server_handshake"); + #[cfg(feature = "tracing")] let entered = span.enter(); // Create the codec. @@ -397,14 +409,19 @@ where .expect("invalid SETTINGS frame"); // Create the handshake future. + #[cfg(feature = "tracing")] let state = - Handshaking::Flushing(Flush::new(codec).instrument(tracing::trace_span!("flush"))); + Handshaking::Flushing(Flush::new(codec).instrument(::tracing::trace_span!("flush"))); + #[cfg(not(feature = "tracing"))] + let state = Handshaking::Flushing(Flush::new(codec)); + #[cfg(feature = "tracing")] drop(entered); Handshake { builder, state, + #[cfg(feature = "tracing")] span, } } @@ -430,7 +447,7 @@ where } if let Some(inner) = self.connection.next_incoming() { - tracing::trace!("received incoming"); + trace!("received incoming"); let (head, _) = inner.take_request().into_parts(); let body = RecvStream::new(FlowControl::new(inner.clone_to_opaque())); @@ -1342,9 +1359,9 @@ where type Output = Result, crate::Error>; fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - let span = self.span.clone(); // XXX(eliza): T_T - let _e = span.enter(); - tracing::trace!(state = ?self.state); + #[cfg(feature = "tracing")] + let _span = self.span.clone().entered(); // XXX(eliza): T_T + trace!(state = ?self.state); loop { match &mut self.state { @@ -1354,16 +1371,19 @@ where // for the client preface. let codec = match Pin::new(flush).poll(cx)? { Poll::Pending => { - tracing::trace!(flush.poll = %"Pending"); + trace!(flush.poll = %"Pending"); return Poll::Pending; } Poll::Ready(flushed) => { - tracing::trace!(flush.poll = %"Ready"); + trace!(flush.poll = %"Ready"); flushed } }; self.state = Handshaking::ReadingPreface( - ReadPreface::new(codec).instrument(tracing::trace_span!("read_preface")), + #[cfg(feature = "tracing")] + ReadPreface::new(codec).instrument(::tracing::trace_span!("read_preface")), + #[cfg(not(feature = "tracing"))] + ReadPreface::new(codec), ); } Handshaking::ReadingPreface(read) => { @@ -1388,7 +1408,7 @@ where }, ); - tracing::trace!("connection established!"); + trace!("connection established!"); let mut c = Connection { connection }; if let Some(sz) = self.builder.initial_target_connection_window_size { c.set_target_window_size(sz); @@ -1454,15 +1474,14 @@ impl Peer { if let Err(e) = frame::PushPromise::validate_request(&request) { use PushPromiseHeaderError::*; match e { - NotSafeAndCacheable => tracing::debug!( + NotSafeAndCacheable => debug!( ?promised_id, "convert_push_message: method {} is not safe and cacheable", request.method(), ), - InvalidContentLength(e) => tracing::debug!( + InvalidContentLength(_e) => debug!( ?promised_id, - "convert_push_message; promised request has invalid content-length {:?}", - e, + "convert_push_message; promised request has invalid content-length {:?}", _e, ), } return Err(UserError::MalformedHeaders); @@ -1516,7 +1535,7 @@ impl proto::Peer for Peer { macro_rules! malformed { ($($arg:tt)*) => {{ - tracing::debug!($($arg)*); + debug!($($arg)*); return Err(Error::library_reset(stream_id, Reason::PROTOCOL_ERROR)); }} } @@ -1552,11 +1571,11 @@ impl proto::Peer for Peer { // header if let Some(authority) = pseudo.authority { let maybe_authority = uri::Authority::from_maybe_shared(authority.clone().into_inner()); - parts.authority = Some(maybe_authority.or_else(|why| { + parts.authority = Some(maybe_authority.or_else(|_why| { malformed!( "malformed headers: malformed authority ({:?}): {}", authority, - why, + _why, ) })?); } @@ -1567,11 +1586,11 @@ impl proto::Peer for Peer { malformed!("malformed headers: :scheme in CONNECT"); } let maybe_scheme = scheme.parse(); - let scheme = maybe_scheme.or_else(|why| { + let scheme = maybe_scheme.or_else(|_why| { malformed!( "malformed headers: malformed scheme ({:?}): {}", scheme, - why, + _why, ) })?; @@ -1596,8 +1615,8 @@ impl proto::Peer for Peer { } let maybe_path = uri::PathAndQuery::from_maybe_shared(path.clone().into_inner()); - parts.path_and_query = Some(maybe_path.or_else(|why| { - malformed!("malformed headers: malformed path ({:?}): {}", path, why,) + parts.path_and_query = Some(maybe_path.or_else(|_why| { + malformed!("malformed headers: malformed path ({:?}): {}", path, _why,) })?); } else if is_connect && has_protocol { malformed!("malformed headers: missing path in extended CONNECT"); @@ -1607,10 +1626,10 @@ impl proto::Peer for Peer { let mut request = match b.body(()) { Ok(request) => request, - Err(e) => { + Err(_e) => { // TODO: Should there be more specialized handling for different // kinds of errors - proto_err!(stream: "error building request: {}; stream={:?}", e, stream_id); + proto_err!(stream: "error building request: {}; stream={:?}", _e, stream_id); return Err(Error::library_reset(stream_id, Reason::PROTOCOL_ERROR)); } }; diff --git a/src/tracing.rs b/src/tracing.rs new file mode 100644 index 00000000..c87575e4 --- /dev/null +++ b/src/tracing.rs @@ -0,0 +1,44 @@ +macro_rules! debug { + ($($arg:tt)+) => { + { + #[cfg(feature = "tracing")] + { + ::tracing::debug!($($arg)+); + } + } + } +} + +macro_rules! trace { + ($($arg:tt)*) => { + { + #[cfg(feature = "tracing")] + { + ::tracing::trace!($($arg)+); + } + } + } +} + +macro_rules! trace_span { + ($($arg:tt)*) => { + { + #[cfg(feature = "tracing")] + { + let _span = ::tracing::trace_span!($($arg)+); + _span.entered() + } + } + } +} + +macro_rules! warn { + ($($arg:tt)*) => { + { + #[cfg(feature = "tracing")] + { + ::tracing::warn!($($arg)+); + } + } + } +}